aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2014-11-24 09:08:18 +0000
committerDimitry Andric <dim@FreeBSD.org>2014-11-24 09:08:18 +0000
commit5ca98fd98791947eba83a1ed3f2c8191ef7afa6c (patch)
treef5944309621cee4fe0976be6f9ac619b7ebfc4c2 /test
parent68bcb7db193e4bc81430063148253d30a791023e (diff)
downloadsrc-5ca98fd98791947eba83a1ed3f2c8191ef7afa6c.tar.gz
src-5ca98fd98791947eba83a1ed3f2c8191ef7afa6c.zip
Vendor import of llvm RELEASE_350/final tag r216957 (effectively, 3.5.0 release):vendor/llvm/llvm-release_350-r216957
Notes
Notes: svn path=/vendor/llvm/dist/; revision=274955 svn path=/vendor/llvm/llvm-release_35-r216957/; revision=274956; tag=vendor/llvm/llvm-release_350-r216957
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/BasicAA/2007-11-05-SizeCrash.ll2
-rw-r--r--test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll2
-rw-r--r--test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll36
-rw-r--r--test/Analysis/BasicAA/cs-cs.ll236
-rw-r--r--test/Analysis/BasicAA/noalias-bugs.ll2
-rw-r--r--test/Analysis/BasicAA/pr18573.ll53
-rw-r--r--test/Analysis/BlockFrequencyInfo/bad_input.ll50
-rw-r--r--test/Analysis/BlockFrequencyInfo/basic.ll55
-rw-r--r--test/Analysis/BlockFrequencyInfo/double_backedge.ll27
-rw-r--r--test/Analysis/BlockFrequencyInfo/double_exit.ll165
-rw-r--r--test/Analysis/BlockFrequencyInfo/irreducible.ll421
-rw-r--r--test/Analysis/BlockFrequencyInfo/loop_with_branch.ll44
-rw-r--r--test/Analysis/BlockFrequencyInfo/nested_loop_with_branches.ll59
-rw-r--r--test/Analysis/BranchProbabilityInfo/loop.ll42
-rw-r--r--test/Analysis/BranchProbabilityInfo/pr18705.ll58
-rw-r--r--test/Analysis/CostModel/AArch64/lit.local.cfg2
-rw-r--r--test/Analysis/CostModel/AArch64/select.ll38
-rw-r--r--test/Analysis/CostModel/AArch64/store.ll22
-rw-r--r--test/Analysis/CostModel/ARM/cast.ll34
-rw-r--r--test/Analysis/CostModel/ARM/lit.local.cfg3
-rw-r--r--test/Analysis/CostModel/PowerPC/ext.ll21
-rw-r--r--test/Analysis/CostModel/PowerPC/insert_extract.ll4
-rw-r--r--test/Analysis/CostModel/PowerPC/lit.local.cfg3
-rw-r--r--test/Analysis/CostModel/PowerPC/load_store.ll11
-rw-r--r--test/Analysis/CostModel/X86/alternate-shuffle-cost.ll347
-rw-r--r--test/Analysis/CostModel/X86/cast.ll97
-rw-r--r--test/Analysis/CostModel/X86/cmp.ll4
-rw-r--r--test/Analysis/CostModel/X86/intrinsic-cost.ll28
-rw-r--r--test/Analysis/CostModel/X86/lit.local.cfg3
-rw-r--r--test/Analysis/CostModel/X86/scalarize.ll41
-rw-r--r--test/Analysis/CostModel/X86/vdiv-cost.ll92
-rw-r--r--test/Analysis/CostModel/X86/vselect-cost.ll126
-rw-r--r--test/Analysis/CostModel/X86/vshift-cost.ll167
-rw-r--r--test/Analysis/Delinearization/a.ll11
-rw-r--r--test/Analysis/Delinearization/gcd_multiply_expr.ll153
-rw-r--r--test/Analysis/Delinearization/himeno_1.ll10
-rw-r--r--test/Analysis/Delinearization/himeno_2.ll10
-rw-r--r--test/Analysis/Delinearization/iv_times_constant_in_subscript.ll45
-rw-r--r--test/Analysis/Delinearization/lit.local.cfg2
-rw-r--r--test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll10
-rw-r--r--test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll10
-rw-r--r--test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll10
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_2d.ll17
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll2
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_3d.ll10
-rw-r--r--test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll10
-rw-r--r--test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll43
-rw-r--r--test/Analysis/Delinearization/undef.ll38
-rw-r--r--test/Analysis/DependenceAnalysis/Banerjee.ll107
-rw-r--r--test/Analysis/DependenceAnalysis/GCD.ll83
-rw-r--r--test/Analysis/LazyCallGraph/basic.ll176
-rw-r--r--test/Analysis/Lint/address-spaces.ll25
-rw-r--r--test/Analysis/ScalarEvolution/2009-04-22-TruncCast.ll2
-rw-r--r--test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll2
-rw-r--r--test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll2
-rw-r--r--test/Analysis/ScalarEvolution/and-xor.ll18
-rw-r--r--test/Analysis/ScalarEvolution/fold.ll26
-rw-r--r--test/Analysis/ScalarEvolution/max-trip-count.ll109
-rw-r--r--test/Analysis/ScalarEvolution/nsw-offset.ll2
-rw-r--r--test/Analysis/ScalarEvolution/trip-count-pow2.ll53
-rw-r--r--test/Analysis/ScalarEvolution/trip-count-switch.ll30
-rw-r--r--test/Analysis/ScalarEvolution/xor-and.ll13
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/memcpyopt.ll4
-rw-r--r--test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll18
-rw-r--r--test/Assembler/2007-09-10-AliasFwdRef.ll4
-rw-r--r--test/Assembler/2009-04-25-AliasGEP.ll8
-rw-r--r--test/Assembler/ConstantExprFoldSelect.ll8
-rw-r--r--test/Assembler/addrspacecast-alias.ll7
-rw-r--r--test/Assembler/alias-redefinition.ll7
-rw-r--r--test/Assembler/atomic.ll12
-rw-r--r--test/Assembler/functionlocal-metadata.ll2
-rw-r--r--test/Assembler/getInt.ll3
-rw-r--r--test/Assembler/half-constprop.ll2
-rw-r--r--test/Assembler/half-conv.ll2
-rw-r--r--test/Assembler/inalloca.ll16
-rw-r--r--test/Assembler/internal-hidden-alias.ll6
-rw-r--r--test/Assembler/internal-hidden-function.ll7
-rw-r--r--test/Assembler/internal-hidden-variable.ll4
-rw-r--r--test/Assembler/internal-protected-alias.ll6
-rw-r--r--test/Assembler/internal-protected-function.ll7
-rw-r--r--test/Assembler/internal-protected-variable.ll4
-rw-r--r--test/Assembler/invalid-comdat.ll4
-rw-r--r--test/Assembler/invalid-comdat2.ll5
-rw-r--r--test/Assembler/invalid-name.ll6
-rw-r--r--test/Assembler/invalid_cast3.ll7
-rw-r--r--test/Assembler/private-hidden-alias.ll6
-rw-r--r--test/Assembler/private-hidden-function.ll7
-rw-r--r--test/Assembler/private-hidden-variable.ll4
-rw-r--r--test/Assembler/private-protected-alias.ll6
-rw-r--r--test/Assembler/private-protected-function.ll7
-rw-r--r--test/Assembler/private-protected-variable.ll4
-rw-r--r--test/Assembler/upgrade-loop-metadata.ll41
-rw-r--r--test/Bindings/Ocaml/target.ml9
-rw-r--r--test/Bindings/Ocaml/vmcore.ml27
-rw-r--r--test/Bindings/llvm-c/lit.local.cfg5
-rw-r--r--test/Bitcode/aggregateInstructions.3.2.ll33
-rw-r--r--test/Bitcode/aggregateInstructions.3.2.ll.bcbin0 -> 452 bytes
-rw-r--r--test/Bitcode/atomic.ll17
-rw-r--r--test/Bitcode/attributes.ll32
-rw-r--r--test/Bitcode/binaryFloatInstructions.3.2.ll120
-rw-r--r--test/Bitcode/binaryFloatInstructions.3.2.ll.bcbin0 -> 992 bytes
-rw-r--r--test/Bitcode/binaryIntInstructions.3.2.ll177
-rw-r--r--test/Bitcode/binaryIntInstructions.3.2.ll.bcbin0 -> 1324 bytes
-rw-r--r--test/Bitcode/bitwiseInstructions.3.2.ll68
-rw-r--r--test/Bitcode/bitwiseInstructions.3.2.ll.bcbin0 -> 612 bytes
-rw-r--r--test/Bitcode/calling-conventions.3.2.ll150
-rw-r--r--test/Bitcode/calling-conventions.3.2.ll.bcbin0 -> 1236 bytes
-rw-r--r--test/Bitcode/cmpxchg-upgrade.ll23
-rw-r--r--test/Bitcode/cmpxchg-upgrade.ll.bcbin0 -> 360 bytes
-rw-r--r--test/Bitcode/conversionInstructions.3.2.ll104
-rw-r--r--test/Bitcode/conversionInstructions.3.2.ll.bcbin0 -> 996 bytes
-rw-r--r--test/Bitcode/deprecated-linker_private-linker_private_weak.ll17
-rw-r--r--test/Bitcode/drop-debug-info.ll4
-rw-r--r--test/Bitcode/global-variables.3.2.ll41
-rw-r--r--test/Bitcode/global-variables.3.2.ll.bcbin0 -> 536 bytes
-rw-r--r--test/Bitcode/inalloca.ll18
-rw-r--r--test/Bitcode/linkage-types-3.2.ll128
-rw-r--r--test/Bitcode/linkage-types-3.2.ll.bcbin0 -> 964 bytes
-rw-r--r--test/Bitcode/local-linkage-default-visibility.3.4.ll79
-rw-r--r--test/Bitcode/local-linkage-default-visibility.3.4.ll.bcbin0 -> 924 bytes
-rw-r--r--test/Bitcode/memInstructions.3.2.ll328
-rw-r--r--test/Bitcode/memInstructions.3.2.ll.bcbin0 -> 1728 bytes
-rw-r--r--test/Bitcode/miscInstructions.3.2.ll126
-rw-r--r--test/Bitcode/miscInstructions.3.2.ll.bcbin0 -> 908 bytes
-rw-r--r--test/Bitcode/old-aliases.ll22
-rw-r--r--test/Bitcode/old-aliases.ll.bcbin0 -> 368 bytes
-rw-r--r--test/Bitcode/pr18704.ll158
-rw-r--r--test/Bitcode/pr18704.ll.bcbin0 -> 880 bytes
-rw-r--r--test/Bitcode/select.ll2
-rw-r--r--test/Bitcode/tailcall.ll17
-rw-r--r--test/Bitcode/terminatorInstructions.3.2.ll47
-rw-r--r--test/Bitcode/terminatorInstructions.3.2.ll.bcbin0 -> 568 bytes
-rw-r--r--test/Bitcode/upgrade-global-ctors.ll3
-rw-r--r--test/Bitcode/upgrade-global-ctors.ll.bcbin0 -> 316 bytes
-rw-r--r--test/Bitcode/upgrade-loop-metadata.ll37
-rw-r--r--test/Bitcode/upgrade-loop-metadata.ll.bcbin0 -> 640 bytes
-rw-r--r--test/Bitcode/variableArgumentIntrinsic.3.2.ll33
-rw-r--r--test/Bitcode/variableArgumentIntrinsic.3.2.ll.bcbin0 -> 456 bytes
-rw-r--r--test/Bitcode/vectorInstructions.3.2.ll34
-rw-r--r--test/Bitcode/vectorInstructions.3.2.ll.bcbin0 -> 500 bytes
-rw-r--r--test/Bitcode/visibility-styles.3.2.ll23
-rw-r--r--test/Bitcode/visibility-styles.3.2.ll.bcbin0 -> 372 bytes
-rw-r--r--test/Bitcode/weak-cmpxchg-upgrade.ll15
-rw-r--r--test/Bitcode/weak-cmpxchg-upgrade.ll.bcbin0 -> 332 bytes
-rwxr-xr-xtest/BugPoint/compile-custom.ll12
-rwxr-xr-xtest/BugPoint/compile-custom.ll.py10
-rw-r--r--test/CMakeLists.txt6
-rw-r--r--test/CodeGen/AArch64/128bit_load_store.ll53
-rw-r--r--test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll55
-rw-r--r--test/CodeGen/AArch64/aarch64-address-type-promotion.ll28
-rw-r--r--test/CodeGen/AArch64/aarch64-neon-v1i1-setcc.ll69
-rw-r--r--test/CodeGen/AArch64/adc.ll33
-rw-r--r--test/CodeGen/AArch64/addsub-shifted.ll18
-rw-r--r--test/CodeGen/AArch64/addsub.ll22
-rw-r--r--test/CodeGen/AArch64/addsub_ext.ll8
-rw-r--r--test/CodeGen/AArch64/alloca.ll121
-rw-r--r--test/CodeGen/AArch64/analyze-branch.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll47
-rw-r--r--test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll45
-rw-r--r--test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll12
-rw-r--r--test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll26
-rw-r--r--test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll31
-rw-r--r--test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll40
-rw-r--r--test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll20
-rw-r--r--test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll21
-rw-r--r--test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll50
-rw-r--r--test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll67
-rw-r--r--test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll56
-rw-r--r--test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll19
-rw-r--r--test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll37
-rw-r--r--test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll67
-rw-r--r--test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll23
-rw-r--r--test/CodeGen/AArch64/arm64-EXT-undef-mask.ll23
-rw-r--r--test/CodeGen/AArch64/arm64-aapcs.ll125
-rw-r--r--test/CodeGen/AArch64/arm64-abi-varargs.ll191
-rw-r--r--test/CodeGen/AArch64/arm64-abi.ll239
-rw-r--r--test/CodeGen/AArch64/arm64-abi_align.ll532
-rw-r--r--test/CodeGen/AArch64/arm64-addp.ll32
-rw-r--r--test/CodeGen/AArch64/arm64-addr-mode-folding.ll171
-rw-r--r--test/CodeGen/AArch64/arm64-addr-type-promotion.ll82
-rw-r--r--test/CodeGen/AArch64/arm64-addrmode.ll72
-rw-r--r--test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll21
-rw-r--r--test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll29
-rw-r--r--test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll72
-rw-r--r--test/CodeGen/AArch64/arm64-ands-bad-peephole.ll31
-rw-r--r--test/CodeGen/AArch64/arm64-anyregcc-crash.ll19
-rw-r--r--test/CodeGen/AArch64/arm64-anyregcc.ll363
-rw-r--r--test/CodeGen/AArch64/arm64-arith-saturating.ll153
-rw-r--r--test/CodeGen/AArch64/arm64-arith.ll270
-rw-r--r--test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll16
-rw-r--r--test/CodeGen/AArch64/arm64-atomic-128.ll228
-rw-r--r--test/CodeGen/AArch64/arm64-atomic.ll335
-rw-r--r--test/CodeGen/AArch64/arm64-basic-pic.ll54
-rw-r--r--test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll1101
-rw-r--r--test/CodeGen/AArch64/arm64-big-endian-eh.ll73
-rw-r--r--test/CodeGen/AArch64/arm64-big-endian-varargs.ll58
-rw-r--r--test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll848
-rw-r--r--test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll1100
-rw-r--r--test/CodeGen/AArch64/arm64-big-imm-offsets.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-big-stack.ll21
-rw-r--r--test/CodeGen/AArch64/arm64-bitfield-extract.ll532
-rw-r--r--test/CodeGen/AArch64/arm64-blockaddress.ll30
-rw-r--r--test/CodeGen/AArch64/arm64-build-vector.ll59
-rw-r--r--test/CodeGen/AArch64/arm64-call-tailcalls.ll91
-rw-r--r--test/CodeGen/AArch64/arm64-cast-opt.ll31
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp-heuristics.ll190
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp.ll289
-rw-r--r--test/CodeGen/AArch64/arm64-clrsb.ll36
-rw-r--r--test/CodeGen/AArch64/arm64-coalesce-ext.ll17
-rw-r--r--test/CodeGen/AArch64/arm64-code-model-large-abs.ll72
-rw-r--r--test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll37
-rw-r--r--test/CodeGen/AArch64/arm64-collect-loh-str.ll23
-rw-r--r--test/CodeGen/AArch64/arm64-collect-loh.ll53
-rw-r--r--test/CodeGen/AArch64/arm64-complex-copy-noneon.ll21
-rw-r--r--test/CodeGen/AArch64/arm64-complex-ret.ll7
-rw-r--r--test/CodeGen/AArch64/arm64-const-addr.ll23
-rw-r--r--test/CodeGen/AArch64/arm64-convert-v4f64.ll33
-rw-r--r--test/CodeGen/AArch64/arm64-copy-tuple.ll146
-rw-r--r--test/CodeGen/AArch64/arm64-crc32.ll71
-rw-r--r--test/CodeGen/AArch64/arm64-crypto.ll135
-rw-r--r--test/CodeGen/AArch64/arm64-cse.ll59
-rw-r--r--test/CodeGen/AArch64/arm64-csel.ll230
-rw-r--r--test/CodeGen/AArch64/arm64-cvt.ll401
-rw-r--r--test/CodeGen/AArch64/arm64-dagcombiner-convergence.ll19
-rw-r--r--test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll32
-rw-r--r--test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll46
-rw-r--r--test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll102
-rw-r--r--test/CodeGen/AArch64/arm64-dead-def-frame-index.ll18
-rw-r--r--test/CodeGen/AArch64/arm64-dead-register-def-bug.ll32
-rw-r--r--test/CodeGen/AArch64/arm64-dup.ll323
-rw-r--r--test/CodeGen/AArch64/arm64-early-ifcvt.ll423
-rw-r--r--test/CodeGen/AArch64/arm64-elf-calls.ll20
-rw-r--r--test/CodeGen/AArch64/arm64-elf-constpool.ll13
-rw-r--r--test/CodeGen/AArch64/arm64-elf-globals.ll115
-rw-r--r--test/CodeGen/AArch64/arm64-ext.ll118
-rw-r--r--test/CodeGen/AArch64/arm64-extend-int-to-fp.ll19
-rw-r--r--test/CodeGen/AArch64/arm64-extend.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-extern-weak.ll51
-rw-r--r--test/CodeGen/AArch64/arm64-extload-knownzero.ll28
-rw-r--r--test/CodeGen/AArch64/arm64-extract.ll58
-rw-r--r--test/CodeGen/AArch64/arm64-extract_subvector.ll51
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll47
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-alloca.ll25
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-br.ll155
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-call.ll100
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-conversion.ll442
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll146
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-gv.ll38
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-icmp.ll214
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll36
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll148
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-materialize.ll27
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll68
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-rem.ll44
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-ret.ll63
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-select.ll63
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel.ll95
-rw-r--r--test/CodeGen/AArch64/arm64-fastcc-tailcall.ll24
-rw-r--r--test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll18
-rw-r--r--test/CodeGen/AArch64/arm64-fcmp-opt.ll204
-rw-r--r--test/CodeGen/AArch64/arm64-fcopysign.ll51
-rw-r--r--test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-fmadd.ll92
-rw-r--r--test/CodeGen/AArch64/arm64-fmax.ll34
-rw-r--r--test/CodeGen/AArch64/arm64-fminv.ll101
-rw-r--r--test/CodeGen/AArch64/arm64-fmuladd.ll88
-rw-r--r--test/CodeGen/AArch64/arm64-fold-address.ll79
-rw-r--r--test/CodeGen/AArch64/arm64-fold-lsl.ll79
-rw-r--r--test/CodeGen/AArch64/arm64-fp-contract-zero.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-fp-imm.ll32
-rw-r--r--test/CodeGen/AArch64/arm64-fp.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-fp128-folding.ll17
-rw-r--r--test/CodeGen/AArch64/arm64-fp128.ll273
-rw-r--r--test/CodeGen/AArch64/arm64-frame-index.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-frameaddr.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-global-address.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-hello.ll38
-rw-r--r--test/CodeGen/AArch64/arm64-i16-subreg-extract.ll12
-rw-r--r--test/CodeGen/AArch64/arm64-icmp-opt.ll17
-rw-r--r--test/CodeGen/AArch64/arm64-illegal-float-ops.ll295
-rw-r--r--test/CodeGen/AArch64/arm64-indexed-memory.ll351
-rw-r--r--test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll40
-rw-r--r--test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll6174
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm-error-I.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm-error-J.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm-error-K.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm-error-L.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm-error-M.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm-error-N.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-inline-asm.ll230
-rw-r--r--test/CodeGen/AArch64/arm64-join-reserved.ll17
-rw-r--r--test/CodeGen/AArch64/arm64-jumptable.ll35
-rw-r--r--test/CodeGen/AArch64/arm64-large-frame.ll69
-rw-r--r--test/CodeGen/AArch64/arm64-ld1.ll1345
-rw-r--r--test/CodeGen/AArch64/arm64-ldp.ll149
-rw-r--r--test/CodeGen/AArch64/arm64-ldur.ll67
-rw-r--r--test/CodeGen/AArch64/arm64-ldxr-stxr.ll270
-rw-r--r--test/CodeGen/AArch64/arm64-leaf.ll13
-rw-r--r--test/CodeGen/AArch64/arm64-long-shift.ll59
-rw-r--r--test/CodeGen/AArch64/arm64-memcpy-inline.ll112
-rw-r--r--test/CodeGen/AArch64/arm64-memset-inline.ll27
-rw-r--r--test/CodeGen/AArch64/arm64-memset-to-bzero.ll108
-rw-r--r--test/CodeGen/AArch64/arm64-misched-basic-A53.ll203
-rw-r--r--test/CodeGen/AArch64/arm64-misched-basic-A57.ll112
-rw-r--r--test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-movi.ll202
-rw-r--r--test/CodeGen/AArch64/arm64-mul.ll90
-rw-r--r--test/CodeGen/AArch64/arm64-named-reg-alloc.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-named-reg-notareg.ll13
-rw-r--r--test/CodeGen/AArch64/arm64-neg.ll71
-rw-r--r--test/CodeGen/AArch64/arm64-neon-2velem-high.ll341
-rw-r--r--test/CodeGen/AArch64/arm64-neon-2velem.ll2853
-rw-r--r--test/CodeGen/AArch64/arm64-neon-3vdiff.ll1829
-rw-r--r--test/CodeGen/AArch64/arm64-neon-aba-abd.ll236
-rw-r--r--test/CodeGen/AArch64/arm64-neon-across.ll460
-rw-r--r--test/CodeGen/AArch64/arm64-neon-add-pairwise.ll100
-rw-r--r--test/CodeGen/AArch64/arm64-neon-add-sub.ll237
-rw-r--r--test/CodeGen/AArch64/arm64-neon-compare-instructions.ll1191
-rw-r--r--test/CodeGen/AArch64/arm64-neon-copy.ll1445
-rw-r--r--test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll48
-rw-r--r--test/CodeGen/AArch64/arm64-neon-mul-div.ll797
-rw-r--r--test/CodeGen/AArch64/arm64-neon-scalar-by-elem-mul.ll124
-rw-r--r--test/CodeGen/AArch64/arm64-neon-select_cc.ll206
-rw-r--r--test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll482
-rw-r--r--test/CodeGen/AArch64/arm64-neon-simd-shift.ll663
-rw-r--r--test/CodeGen/AArch64/arm64-neon-simd-vget.ll225
-rw-r--r--test/CodeGen/AArch64/arm64-neon-v1i1-setcc.ll74
-rw-r--r--test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll175
-rw-r--r--test/CodeGen/AArch64/arm64-patchpoint.ll171
-rw-r--r--test/CodeGen/AArch64/arm64-pic-local-symbol.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-platform-reg.ll26
-rw-r--r--test/CodeGen/AArch64/arm64-popcnt.ll43
-rw-r--r--test/CodeGen/AArch64/arm64-prefetch.ll88
-rw-r--r--test/CodeGen/AArch64/arm64-promote-const.ll255
-rw-r--r--test/CodeGen/AArch64/arm64-redzone.ll18
-rw-r--r--test/CodeGen/AArch64/arm64-reg-copy-noneon.ll20
-rw-r--r--test/CodeGen/AArch64/arm64-register-offset-addressing.ll145
-rw-r--r--test/CodeGen/AArch64/arm64-register-pairing.ll53
-rw-r--r--test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll27
-rw-r--r--test/CodeGen/AArch64/arm64-regress-interphase-shift.ll33
-rw-r--r--test/CodeGen/AArch64/arm64-return-vector.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-returnaddr.ll26
-rw-r--r--test/CodeGen/AArch64/arm64-rev.ll235
-rw-r--r--test/CodeGen/AArch64/arm64-rounding.ll208
-rw-r--r--test/CodeGen/AArch64/arm64-scaled_iv.ll38
-rw-r--r--test/CodeGen/AArch64/arm64-scvt.ll830
-rw-r--r--test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll27
-rw-r--r--test/CodeGen/AArch64/arm64-shifted-sext.ll277
-rw-r--r--test/CodeGen/AArch64/arm64-shrink-v1i64.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-simplest-elf.ll18
-rw-r--r--test/CodeGen/AArch64/arm64-sincos.ll42
-rw-r--r--test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-sli-sri-opt.ll41
-rw-r--r--test/CodeGen/AArch64/arm64-smaxv.ll74
-rw-r--r--test/CodeGen/AArch64/arm64-sminv.ll74
-rw-r--r--test/CodeGen/AArch64/arm64-spill-lr.ll74
-rw-r--r--test/CodeGen/AArch64/arm64-spill.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll19
-rw-r--r--test/CodeGen/AArch64/arm64-st1.ll676
-rw-r--r--test/CodeGen/AArch64/arm64-stack-no-frame.ll20
-rw-r--r--test/CodeGen/AArch64/arm64-stackmap.ll288
-rw-r--r--test/CodeGen/AArch64/arm64-stackpointer.ll24
-rw-r--r--test/CodeGen/AArch64/arm64-stacksave.ll20
-rw-r--r--test/CodeGen/AArch64/arm64-stp.ll101
-rw-r--r--test/CodeGen/AArch64/arm64-strict-align.ll26
-rw-r--r--test/CodeGen/AArch64/arm64-stur.ll98
-rw-r--r--test/CodeGen/AArch64/arm64-subsections.ll5
-rw-r--r--test/CodeGen/AArch64/arm64-subvector-extend.ll141
-rw-r--r--test/CodeGen/AArch64/arm64-swizzle-tbl-i16-layout.ll36
-rw-r--r--test/CodeGen/AArch64/arm64-tbl.ll132
-rw-r--r--test/CodeGen/AArch64/arm64-this-return.ll83
-rw-r--r--test/CodeGen/AArch64/arm64-tls-darwin.ll18
-rw-r--r--test/CodeGen/AArch64/arm64-tls-dynamic-together.ll18
-rw-r--r--test/CodeGen/AArch64/arm64-tls-dynamics.ll135
-rw-r--r--test/CodeGen/AArch64/arm64-tls-execs.ll63
-rw-r--r--test/CodeGen/AArch64/arm64-trap.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-trn.ll134
-rw-r--r--test/CodeGen/AArch64/arm64-trunc-store.ll75
-rw-r--r--test/CodeGen/AArch64/arm64-umaxv.ll92
-rw-r--r--test/CodeGen/AArch64/arm64-uminv.ll92
-rw-r--r--test/CodeGen/AArch64/arm64-umov.ll33
-rw-r--r--test/CodeGen/AArch64/arm64-unaligned_ldst.ll41
-rw-r--r--test/CodeGen/AArch64/arm64-uzp.ll107
-rw-r--r--test/CodeGen/AArch64/arm64-vaargs.ll20
-rw-r--r--test/CodeGen/AArch64/arm64-vabs.ll804
-rw-r--r--test/CodeGen/AArch64/arm64-vadd.ll941
-rw-r--r--test/CodeGen/AArch64/arm64-vaddlv.ll26
-rw-r--r--test/CodeGen/AArch64/arm64-vaddv.ll245
-rw-r--r--test/CodeGen/AArch64/arm64-variadic-aapcs.ll143
-rw-r--r--test/CodeGen/AArch64/arm64-vbitwise.ll91
-rw-r--r--test/CodeGen/AArch64/arm64-vclz.ll109
-rw-r--r--test/CodeGen/AArch64/arm64-vcmp.ll236
-rw-r--r--test/CodeGen/AArch64/arm64-vcnt.ll56
-rw-r--r--test/CodeGen/AArch64/arm64-vcombine.ll17
-rw-r--r--test/CodeGen/AArch64/arm64-vcvt.ll686
-rw-r--r--test/CodeGen/AArch64/arm64-vcvt_f.ll82
-rw-r--r--test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll73
-rw-r--r--test/CodeGen/AArch64/arm64-vcvt_n.ll49
-rw-r--r--test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll34
-rw-r--r--test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-vecCmpBr.ll207
-rw-r--r--test/CodeGen/AArch64/arm64-vecFold.ll145
-rw-r--r--test/CodeGen/AArch64/arm64-vector-ext.ll16
-rw-r--r--test/CodeGen/AArch64/arm64-vector-imm.ll134
-rw-r--r--test/CodeGen/AArch64/arm64-vector-insertion.ll33
-rw-r--r--test/CodeGen/AArch64/arm64-vector-ldst.ll601
-rw-r--r--test/CodeGen/AArch64/arm64-vext.ll464
-rw-r--r--test/CodeGen/AArch64/arm64-vext_reverse.ll172
-rw-r--r--test/CodeGen/AArch64/arm64-vfloatintrinsics.ll375
-rw-r--r--test/CodeGen/AArch64/arm64-vhadd.ll249
-rw-r--r--test/CodeGen/AArch64/arm64-vhsub.ll125
-rw-r--r--test/CodeGen/AArch64/arm64-virtual_base.ll51
-rw-r--r--test/CodeGen/AArch64/arm64-vmax.ll679
-rw-r--r--test/CodeGen/AArch64/arm64-vminmaxnm.ll68
-rw-r--r--test/CodeGen/AArch64/arm64-vmovn.ll242
-rw-r--r--test/CodeGen/AArch64/arm64-vmul.ll2036
-rw-r--r--test/CodeGen/AArch64/arm64-volatile.ll27
-rw-r--r--test/CodeGen/AArch64/arm64-vpopcnt.ll68
-rw-r--r--test/CodeGen/AArch64/arm64-vqadd.ll332
-rw-r--r--test/CodeGen/AArch64/arm64-vqsub.ll147
-rw-r--r--test/CodeGen/AArch64/arm64-vselect.ll25
-rw-r--r--test/CodeGen/AArch64/arm64-vsetcc_fp.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-vshift.ll1926
-rw-r--r--test/CodeGen/AArch64/arm64-vshr.ll63
-rw-r--r--test/CodeGen/AArch64/arm64-vshuffle.ll115
-rw-r--r--test/CodeGen/AArch64/arm64-vsqrt.ll232
-rw-r--r--test/CodeGen/AArch64/arm64-vsra.ll150
-rw-r--r--test/CodeGen/AArch64/arm64-vsub.ll417
-rw-r--r--test/CodeGen/AArch64/arm64-weak-reference.ll10
-rw-r--r--test/CodeGen/AArch64/arm64-xaluo.ll524
-rw-r--r--test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll17
-rw-r--r--test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll49
-rw-r--r--test/CodeGen/AArch64/arm64-zext.ll11
-rw-r--r--test/CodeGen/AArch64/arm64-zextload-unscaled.ll40
-rw-r--r--test/CodeGen/AArch64/arm64-zip.ll107
-rw-r--r--test/CodeGen/AArch64/asm-large-immediate.ll10
-rw-r--r--test/CodeGen/AArch64/assertion-rc-mismatch.ll24
-rw-r--r--test/CodeGen/AArch64/atomic-ops.ll499
-rw-r--r--test/CodeGen/AArch64/basic-pic.ll12
-rw-r--r--test/CodeGen/AArch64/bitfield-insert-0.ll2
-rw-r--r--test/CodeGen/AArch64/bitfield-insert.ll25
-rw-r--r--test/CodeGen/AArch64/bitfield.ll10
-rw-r--r--test/CodeGen/AArch64/blockaddress.ll6
-rw-r--r--test/CodeGen/AArch64/bool-loads.ll22
-rw-r--r--test/CodeGen/AArch64/branch-relax-asm.ll35
-rw-r--r--test/CodeGen/AArch64/breg.ll4
-rw-r--r--test/CodeGen/AArch64/callee-save.ll18
-rw-r--r--test/CodeGen/AArch64/cmpxchg-idioms.ll93
-rw-r--r--test/CodeGen/AArch64/code-model-large-abs.ll2
-rw-r--r--test/CodeGen/AArch64/compare-branch.ll2
-rw-r--r--test/CodeGen/AArch64/compiler-ident.ll12
-rw-r--r--test/CodeGen/AArch64/complex-fp-to-int.ll141
-rw-r--r--test/CodeGen/AArch64/complex-int-to-fp.ll164
-rw-r--r--test/CodeGen/AArch64/cond-sel.ll38
-rw-r--r--test/CodeGen/AArch64/cpus.ll14
-rw-r--r--test/CodeGen/AArch64/directcond.ll26
-rw-r--r--test/CodeGen/AArch64/dp-3source.ll2
-rw-r--r--test/CodeGen/AArch64/dp1.ll2
-rw-r--r--test/CodeGen/AArch64/dp2.ll24
-rw-r--r--test/CodeGen/AArch64/eliminate-trunc.ll39
-rw-r--r--test/CodeGen/AArch64/extern-weak.ll28
-rw-r--r--test/CodeGen/AArch64/extract.ll6
-rw-r--r--test/CodeGen/AArch64/f16-convert.ll251
-rw-r--r--test/CodeGen/AArch64/fast-isel-mul.ll40
-rw-r--r--test/CodeGen/AArch64/fastcc-reserved.ll16
-rw-r--r--test/CodeGen/AArch64/fastcc.ll47
-rw-r--r--test/CodeGen/AArch64/fcmp.ll2
-rw-r--r--test/CodeGen/AArch64/fcvt-fixed.ll6
-rw-r--r--test/CodeGen/AArch64/fcvt-int.ll4
-rw-r--r--test/CodeGen/AArch64/flags-multiuse.ll4
-rw-r--r--test/CodeGen/AArch64/floatdp_1source.ll2
-rw-r--r--test/CodeGen/AArch64/floatdp_2source.ll2
-rw-r--r--test/CodeGen/AArch64/fp-cond-sel.ll23
-rw-r--r--test/CodeGen/AArch64/fp-dp3.ll58
-rw-r--r--test/CodeGen/AArch64/fp128-folding.ll4
-rw-r--r--test/CodeGen/AArch64/fp128.ll279
-rw-r--r--test/CodeGen/AArch64/fpimm.ll6
-rw-r--r--test/CodeGen/AArch64/frameaddr.ll4
-rw-r--r--test/CodeGen/AArch64/free-zext.ll14
-rw-r--r--test/CodeGen/AArch64/func-argpassing.ll67
-rw-r--r--test/CodeGen/AArch64/func-calls.ll61
-rw-r--r--test/CodeGen/AArch64/funcptr_cast.ll13
-rw-r--r--test/CodeGen/AArch64/global-alignment.ll26
-rw-r--r--test/CodeGen/AArch64/global-merge-1.ll26
-rw-r--r--test/CodeGen/AArch64/global-merge-2.ll51
-rw-r--r--test/CodeGen/AArch64/global-merge-3.ll51
-rw-r--r--test/CodeGen/AArch64/global-merge-4.ll73
-rw-r--r--test/CodeGen/AArch64/global-merge.ll30
-rw-r--r--test/CodeGen/AArch64/got-abuse.ll6
-rw-r--r--test/CodeGen/AArch64/half.ll83
-rw-r--r--test/CodeGen/AArch64/hints.ll67
-rw-r--r--test/CodeGen/AArch64/i1-contents.ll55
-rw-r--r--test/CodeGen/AArch64/i128-align.ll6
-rw-r--r--test/CodeGen/AArch64/i128-fast-isel-fallback.ll18
-rw-r--r--test/CodeGen/AArch64/illegal-float-ops.ll2
-rw-r--r--test/CodeGen/AArch64/init-array.ll4
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badI.ll4
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badK.ll2
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badK2.ll2
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badL.ll2
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints.ll137
-rw-r--r--test/CodeGen/AArch64/inline-asm-modifiers.ll147
-rw-r--r--test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll26
-rw-r--r--test/CodeGen/AArch64/intrinsics-memory-barrier.ll57
-rw-r--r--test/CodeGen/AArch64/jump-table.ll22
-rw-r--r--test/CodeGen/AArch64/large-consts.ll9
-rw-r--r--test/CodeGen/AArch64/large-frame.ll119
-rw-r--r--test/CodeGen/AArch64/ldst-opt.ll767
-rw-r--r--test/CodeGen/AArch64/ldst-regoffset.ll80
-rw-r--r--test/CodeGen/AArch64/ldst-unscaledimm.ll6
-rw-r--r--test/CodeGen/AArch64/ldst-unsignedimm.ll62
-rw-r--r--test/CodeGen/AArch64/lit.local.cfg10
-rw-r--r--test/CodeGen/AArch64/literal_pools.ll103
-rw-r--r--test/CodeGen/AArch64/literal_pools_float.ll46
-rw-r--r--test/CodeGen/AArch64/local_vars.ll21
-rw-r--r--test/CodeGen/AArch64/logical-imm.ll2
-rw-r--r--test/CodeGen/AArch64/logical_shifted_reg.ll6
-rw-r--r--test/CodeGen/AArch64/mature-mc-support.ll12
-rw-r--r--test/CodeGen/AArch64/memcpy-f128.ll19
-rw-r--r--test/CodeGen/AArch64/movw-consts.ll32
-rw-r--r--test/CodeGen/AArch64/movw-shift-encoding.ll9
-rw-r--r--test/CodeGen/AArch64/mul-lohi.ll19
-rw-r--r--test/CodeGen/AArch64/mul_pow2.ll123
-rw-r--r--test/CodeGen/AArch64/neon-2velem-high.ll331
-rw-r--r--test/CodeGen/AArch64/neon-2velem.ll2550
-rw-r--r--test/CodeGen/AArch64/neon-3vdiff.ll1806
-rw-r--r--test/CodeGen/AArch64/neon-aba-abd.ll236
-rw-r--r--test/CodeGen/AArch64/neon-across.ll476
-rw-r--r--test/CodeGen/AArch64/neon-add-pairwise.ll92
-rw-r--r--test/CodeGen/AArch64/neon-add-sub.ll237
-rw-r--r--test/CodeGen/AArch64/neon-bitcast.ll12
-rw-r--r--test/CodeGen/AArch64/neon-bitwise-instructions.ll890
-rw-r--r--test/CodeGen/AArch64/neon-bsl.ll222
-rw-r--r--test/CodeGen/AArch64/neon-compare-instructions.ll957
-rw-r--r--test/CodeGen/AArch64/neon-copy.ll615
-rw-r--r--test/CodeGen/AArch64/neon-crypto.ll149
-rw-r--r--test/CodeGen/AArch64/neon-diagnostics.ll2
-rw-r--r--test/CodeGen/AArch64/neon-extract.ll122
-rw-r--r--test/CodeGen/AArch64/neon-facge-facgt.ll56
-rw-r--r--test/CodeGen/AArch64/neon-fma.ll50
-rw-r--r--test/CodeGen/AArch64/neon-fpround_f128.ll18
-rw-r--r--test/CodeGen/AArch64/neon-frsqrt-frecp.ll54
-rw-r--r--test/CodeGen/AArch64/neon-halving-add-sub.ll207
-rw-r--r--test/CodeGen/AArch64/neon-idiv.ll13
-rw-r--r--test/CodeGen/AArch64/neon-max-min-pairwise.ll310
-rw-r--r--test/CodeGen/AArch64/neon-max-min.ll310
-rw-r--r--test/CodeGen/AArch64/neon-misc-scalar.ll60
-rw-r--r--test/CodeGen/AArch64/neon-misc.ll1799
-rw-r--r--test/CodeGen/AArch64/neon-mla-mls.ll24
-rw-r--r--test/CodeGen/AArch64/neon-mov.ll127
-rw-r--r--test/CodeGen/AArch64/neon-mul-div.ll181
-rw-r--r--test/CodeGen/AArch64/neon-or-combine.ll29
-rw-r--r--test/CodeGen/AArch64/neon-perm.ll1919
-rw-r--r--test/CodeGen/AArch64/neon-rounding-halving-add.ll105
-rw-r--r--test/CodeGen/AArch64/neon-rounding-shift.ll121
-rw-r--r--test/CodeGen/AArch64/neon-saturating-add-sub.ll241
-rw-r--r--test/CodeGen/AArch64/neon-saturating-rounding-shift.ll121
-rw-r--r--test/CodeGen/AArch64/neon-saturating-shift.ll121
-rw-r--r--test/CodeGen/AArch64/neon-scalar-abs.ll61
-rw-r--r--test/CodeGen/AArch64/neon-scalar-add-sub.ll50
-rw-r--r--test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll48
-rw-r--r--test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll124
-rw-r--r--test/CodeGen/AArch64/neon-scalar-compare.ll343
-rw-r--r--test/CodeGen/AArch64/neon-scalar-copy.ll84
-rw-r--r--test/CodeGen/AArch64/neon-scalar-cvt.ll137
-rw-r--r--test/CodeGen/AArch64/neon-scalar-extract-narrow.ll104
-rw-r--r--test/CodeGen/AArch64/neon-scalar-fabd.ll26
-rw-r--r--test/CodeGen/AArch64/neon-scalar-fcvt.ll255
-rw-r--r--test/CodeGen/AArch64/neon-scalar-fp-compare.ll328
-rw-r--r--test/CodeGen/AArch64/neon-scalar-mul.ll143
-rw-r--r--test/CodeGen/AArch64/neon-scalar-neg.ll61
-rw-r--r--test/CodeGen/AArch64/neon-scalar-recip.ll116
-rw-r--r--test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll247
-rw-r--r--test/CodeGen/AArch64/neon-scalar-rounding-shift.ll39
-rw-r--r--test/CodeGen/AArch64/neon-scalar-saturating-add-sub.ll242
-rw-r--r--test/CodeGen/AArch64/neon-scalar-saturating-rounding-shift.ll94
-rw-r--r--test/CodeGen/AArch64/neon-scalar-saturating-shift.ll88
-rw-r--r--test/CodeGen/AArch64/neon-scalar-shift-imm.ll531
-rw-r--r--test/CodeGen/AArch64/neon-scalar-shift.ll38
-rw-r--r--test/CodeGen/AArch64/neon-shift-left-long.ll10
-rw-r--r--test/CodeGen/AArch64/neon-shift.ll171
-rw-r--r--test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll2314
-rw-r--r--test/CodeGen/AArch64/neon-simd-ldst-one.ll2113
-rw-r--r--test/CodeGen/AArch64/neon-simd-ldst.ll164
-rw-r--r--test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll354
-rw-r--r--test/CodeGen/AArch64/neon-simd-post-ldst-one.ll319
-rw-r--r--test/CodeGen/AArch64/neon-simd-shift.ll1556
-rw-r--r--test/CodeGen/AArch64/neon-simd-tbl.ll828
-rw-r--r--test/CodeGen/AArch64/neon-simd-vget.ll225
-rw-r--r--test/CodeGen/AArch64/neon-truncStore-extLoad.ll57
-rw-r--r--test/CodeGen/AArch64/nzcv-save.ll18
-rw-r--r--test/CodeGen/AArch64/pic-eh-stubs.ll5
-rw-r--r--test/CodeGen/AArch64/ragreedy-csr.ll297
-rw-r--r--test/CodeGen/AArch64/rbit.ll20
-rw-r--r--test/CodeGen/AArch64/regress-bitcast-formals.ll2
-rw-r--r--test/CodeGen/AArch64/regress-f128csel-flags.ll2
-rw-r--r--test/CodeGen/AArch64/regress-fp128-livein.ll2
-rw-r--r--test/CodeGen/AArch64/regress-tail-livereg.ll16
-rw-r--r--test/CodeGen/AArch64/regress-tblgen-chains.ll13
-rw-r--r--test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll15
-rw-r--r--test/CodeGen/AArch64/regress-wzr-allocatable.ll41
-rw-r--r--test/CodeGen/AArch64/returnaddr.ll2
-rw-r--r--test/CodeGen/AArch64/setcc-takes-i32.ll2
-rw-r--r--test/CodeGen/AArch64/sibling-call.ll14
-rw-r--r--test/CodeGen/AArch64/sincos-expansion.ll2
-rw-r--r--test/CodeGen/AArch64/sincospow-vector-expansion.ll96
-rw-r--r--test/CodeGen/AArch64/tail-call.ll34
-rw-r--r--test/CodeGen/AArch64/tls-dynamic-together.ll18
-rw-r--r--test/CodeGen/AArch64/tls-dynamics.ll121
-rw-r--r--test/CodeGen/AArch64/tls-execs.ll63
-rw-r--r--test/CodeGen/AArch64/trunc-v1i64.ll63
-rw-r--r--test/CodeGen/AArch64/tst-br.ll12
-rw-r--r--test/CodeGen/AArch64/variadic.ll199
-rw-r--r--test/CodeGen/AArch64/zero-reg.ll9
-rw-r--r--test/CodeGen/ARM/2006-11-10-CycleInDAG.ll2
-rw-r--r--test/CodeGen/ARM/2007-04-03-PEIBug.ll5
-rw-r--r--test/CodeGen/ARM/2007-05-14-InlineAsmCstCrash.ll2
-rw-r--r--test/CodeGen/ARM/2007-05-23-BadPreIndexedStore.ll5
-rw-r--r--test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll7
-rw-r--r--test/CodeGen/ARM/2008-07-17-Fdiv.ll2
-rw-r--r--test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll2
-rw-r--r--test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll2
-rw-r--r--test/CodeGen/ARM/2009-03-09-AddrModeBug.ll2
-rw-r--r--test/CodeGen/ARM/2009-04-06-AsmModifier.ll5
-rw-r--r--test/CodeGen/ARM/2009-04-08-AggregateAddr.ll2
-rw-r--r--test/CodeGen/ARM/2009-04-08-FREM.ll2
-rw-r--r--test/CodeGen/ARM/2009-04-08-FloatUndef.ll2
-rw-r--r--test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll2
-rw-r--r--test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll3
-rw-r--r--test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll4
-rw-r--r--test/CodeGen/ARM/2009-07-09-asm-p-constraint.ll2
-rw-r--r--test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll2
-rw-r--r--test/CodeGen/ARM/2009-08-23-linkerprivate.ll8
-rw-r--r--test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll3
-rw-r--r--test/CodeGen/ARM/2009-09-10-postdec.ll2
-rw-r--r--test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll2
-rw-r--r--test/CodeGen/ARM/2009-09-24-spill-align.ll2
-rw-r--r--test/CodeGen/ARM/2009-11-02-NegativeLane.ll2
-rw-r--r--test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll2
-rw-r--r--test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll2
-rw-r--r--test/CodeGen/ARM/2010-04-09-NeonSelect.ll4
-rw-r--r--test/CodeGen/ARM/2010-04-14-SplitVector.ll2
-rw-r--r--test/CodeGen/ARM/2010-05-18-PostIndexBug.ll18
-rw-r--r--test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll2
-rw-r--r--test/CodeGen/ARM/2010-05-21-BuildVector.ll2
-rw-r--r--test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll4
-rw-r--r--test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll2
-rw-r--r--test/CodeGen/ARM/2010-07-26-GlobalMerge.ll2
-rw-r--r--test/CodeGen/ARM/2010-08-04-StackVariable.ll4
-rw-r--r--test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll285
-rw-r--r--test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll4
-rw-r--r--test/CodeGen/ARM/2010-12-07-PEIBug.ll6
-rw-r--r--test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll6
-rw-r--r--test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll20
-rw-r--r--test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll6
-rw-r--r--test/CodeGen/ARM/2011-04-12-AlignBug.ll8
-rw-r--r--test/CodeGen/ARM/2011-06-09-TailCallByVal.ll2
-rw-r--r--test/CodeGen/ARM/2011-06-16-TailCallByVal.ll4
-rw-r--r--test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll6
-rw-r--r--test/CodeGen/ARM/2011-10-26-memset-inline.ll8
-rw-r--r--test/CodeGen/ARM/2011-10-26-memset-with-neon.ll2
-rw-r--r--test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll2
-rw-r--r--test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll2
-rw-r--r--test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll2
-rw-r--r--test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll2
-rw-r--r--test/CodeGen/ARM/2012-04-10-DAGCombine.ll2
-rw-r--r--test/CodeGen/ARM/2012-05-04-vmov.ll8
-rw-r--r--test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll2
-rw-r--r--test/CodeGen/ARM/2012-08-23-legalize-vmull.ll2
-rw-r--r--test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll2
-rw-r--r--test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll2
-rw-r--r--test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll2
-rw-r--r--test/CodeGen/ARM/2012-11-14-subs_carry.ll2
-rw-r--r--test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll4
-rw-r--r--test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll2
-rw-r--r--test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll6
-rw-r--r--test/CodeGen/ARM/2013-05-05-IfConvertBug.ll33
-rw-r--r--test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll2
-rw-r--r--test/CodeGen/ARM/2013-07-29-vector-or-combine.ll2
-rw-r--r--test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll56
-rw-r--r--test/CodeGen/ARM/2014-02-05-vfp-regs-after-stack.ll22
-rw-r--r--test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll114
-rw-r--r--test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll50
-rw-r--r--test/CodeGen/ARM/2014-07-18-earlyclobber-str-post.ll13
-rw-r--r--test/CodeGen/ARM/DbgValueOtherTargets.test2
-rw-r--r--test/CodeGen/ARM/Windows/aapcs.ll16
-rw-r--r--test/CodeGen/ARM/Windows/alloca.ll22
-rw-r--r--test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll27
-rw-r--r--test/CodeGen/ARM/Windows/chkstk.ll24
-rw-r--r--test/CodeGen/ARM/Windows/dllimport.ll61
-rw-r--r--test/CodeGen/ARM/Windows/frame-register.ll22
-rw-r--r--test/CodeGen/ARM/Windows/global-minsize.ll16
-rw-r--r--test/CodeGen/ARM/Windows/hard-float.ll10
-rw-r--r--test/CodeGen/ARM/Windows/integer-floating-point-conversion.ll74
-rw-r--r--test/CodeGen/ARM/Windows/long-calls.ll18
-rw-r--r--test/CodeGen/ARM/Windows/mangling.ll9
-rw-r--r--test/CodeGen/ARM/Windows/memset.ll18
-rw-r--r--test/CodeGen/ARM/Windows/mov32t-bundling.ll28
-rw-r--r--test/CodeGen/ARM/Windows/movw-movt-relocations.ll27
-rw-r--r--test/CodeGen/ARM/Windows/no-aeabi.ll32
-rw-r--r--test/CodeGen/ARM/Windows/no-arm-mode.ll5
-rw-r--r--test/CodeGen/ARM/Windows/no-ehabi.ll21
-rw-r--r--test/CodeGen/ARM/Windows/pic.ll16
-rw-r--r--test/CodeGen/ARM/Windows/read-only-data.ll15
-rw-r--r--test/CodeGen/ARM/Windows/structors.ll12
-rw-r--r--test/CodeGen/ARM/Windows/vla.ll31
-rw-r--r--test/CodeGen/ARM/a15-mla.ll3
-rw-r--r--test/CodeGen/ARM/a15.ll2
-rw-r--r--test/CodeGen/ARM/aapcs-hfa-code.ll111
-rw-r--r--test/CodeGen/ARM/aapcs-hfa.ll164
-rw-r--r--test/CodeGen/ARM/addrmode.ll5
-rw-r--r--test/CodeGen/ARM/addrspacecast.ll2
-rw-r--r--test/CodeGen/ARM/argaddr.ll2
-rw-r--r--test/CodeGen/ARM/arm-abi-attr.ll28
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll9
-rw-r--r--test/CodeGen/ARM/arm-asm.ll2
-rw-r--r--test/CodeGen/ARM/arm-modifier.ll2
-rw-r--r--test/CodeGen/ARM/arm-negative-stride.ll2
-rw-r--r--test/CodeGen/ARM/arm-ttype-target2.ll2
-rw-r--r--test/CodeGen/ARM/atomic-64bit.ll224
-rw-r--r--test/CodeGen/ARM/atomic-cmp.ll5
-rw-r--r--test/CodeGen/ARM/atomic-cmpxchg.ll50
-rw-r--r--test/CodeGen/ARM/atomic-load-store.ll39
-rw-r--r--test/CodeGen/ARM/atomic-op.ll39
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll467
-rw-r--r--test/CodeGen/ARM/atomicrmw_minmax.ll4
-rw-r--r--test/CodeGen/ARM/available_externally.ll6
-rw-r--r--test/CodeGen/ARM/bfc.ll2
-rw-r--r--test/CodeGen/ARM/bfi.ll2
-rw-r--r--test/CodeGen/ARM/bfx.ll2
-rw-r--r--test/CodeGen/ARM/bic.ll2
-rw-r--r--test/CodeGen/ARM/big-endian-eh-unwind.ll73
-rw-r--r--test/CodeGen/ARM/big-endian-neon-bitconv.ll392
-rw-r--r--test/CodeGen/ARM/big-endian-neon-extend.ll81
-rw-r--r--test/CodeGen/ARM/big-endian-neon-trunc-store.ll26
-rw-r--r--test/CodeGen/ARM/big-endian-ret-f64.ll12
-rw-r--r--test/CodeGen/ARM/big-endian-vector-callee.ll1172
-rw-r--r--test/CodeGen/ARM/big-endian-vector-caller.ll1369
-rw-r--r--test/CodeGen/ARM/bits.ll2
-rw-r--r--test/CodeGen/ARM/bswap16.ll42
-rw-r--r--test/CodeGen/ARM/build-attributes-encoding.s10
-rw-r--r--test/CodeGen/ARM/build-attributes.ll468
-rw-r--r--test/CodeGen/ARM/cache-intrinsic.ll26
-rw-r--r--test/CodeGen/ARM/call-tc.ll8
-rw-r--r--test/CodeGen/ARM/call.ll12
-rw-r--r--test/CodeGen/ARM/carry.ll2
-rw-r--r--test/CodeGen/ARM/clz.ll2
-rw-r--r--test/CodeGen/ARM/cmpxchg-idioms.ll107
-rw-r--r--test/CodeGen/ARM/cmpxchg-weak.ll43
-rw-r--r--test/CodeGen/ARM/coalesce-dbgvalue.ll2
-rw-r--r--test/CodeGen/ARM/compare-call.ll6
-rw-r--r--test/CodeGen/ARM/constantfp.ll12
-rw-r--r--test/CodeGen/ARM/crash-O0.ll2
-rw-r--r--test/CodeGen/ARM/cse-ldrlit.ll61
-rw-r--r--test/CodeGen/ARM/ctz.ll2
-rw-r--r--test/CodeGen/ARM/dagcombine-concatvector.ll11
-rw-r--r--test/CodeGen/ARM/data-in-code-annotations.ll2
-rw-r--r--test/CodeGen/ARM/debug-frame-large-stack.ll99
-rw-r--r--test/CodeGen/ARM/debug-frame-no-debug.ll97
-rw-r--r--test/CodeGen/ARM/debug-frame-vararg.ll142
-rw-r--r--test/CodeGen/ARM/debug-frame.ll575
-rw-r--r--test/CodeGen/ARM/debug-info-arg.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-blocks.ll9
-rw-r--r--test/CodeGen/ARM/debug-info-qreg.ll10
-rw-r--r--test/CodeGen/ARM/debug-info-s16-reg.ll6
-rw-r--r--test/CodeGen/ARM/debug-info-sreg2.ll14
-rw-r--r--test/CodeGen/ARM/debug-segmented-stacks.ll82
-rw-r--r--test/CodeGen/ARM/default-float-abi.ll22
-rw-r--r--test/CodeGen/ARM/divmod-eabi.ll5
-rw-r--r--test/CodeGen/ARM/dwarf-eh.ll71
-rw-r--r--test/CodeGen/ARM/dyn-stackalloc.ll2
-rw-r--r--test/CodeGen/ARM/ehabi-filters.ll2
-rw-r--r--test/CodeGen/ARM/ehabi-handlerdata-nounwind.ll61
-rw-r--r--test/CodeGen/ARM/ehabi-handlerdata.ll59
-rw-r--r--test/CodeGen/ARM/ehabi-no-landingpad.ll3
-rw-r--r--test/CodeGen/ARM/ehabi-unwind.ll5
-rw-r--r--test/CodeGen/ARM/ehabi.ll253
-rw-r--r--test/CodeGen/ARM/extload-knownzero.ll2
-rw-r--r--test/CodeGen/ARM/extloadi1.ll3
-rw-r--r--test/CodeGen/ARM/fadds.ll23
-rw-r--r--test/CodeGen/ARM/fast-isel-call.ll2
-rw-r--r--test/CodeGen/ARM/fast-isel-crash2.ll4
-rw-r--r--test/CodeGen/ARM/fast-isel-frameaddr.ll16
-rw-r--r--test/CodeGen/ARM/fast-isel-inline-asm.ll18
-rw-r--r--test/CodeGen/ARM/fast-isel-intrinsic.ll42
-rw-r--r--test/CodeGen/ARM/fast-isel-static.ll8
-rw-r--r--test/CodeGen/ARM/fast-tail-call.ll2
-rw-r--r--test/CodeGen/ARM/fastcc-vfp.ll40
-rw-r--r--test/CodeGen/ARM/fastisel-thumb-litpool.ll11
-rw-r--r--test/CodeGen/ARM/fdivs.ll8
-rw-r--r--test/CodeGen/ARM/fixunsdfdi.ll7
-rw-r--r--test/CodeGen/ARM/fmacs.ll10
-rw-r--r--test/CodeGen/ARM/fmdrr-fmrrd.ll8
-rw-r--r--test/CodeGen/ARM/fmscs.ll6
-rw-r--r--test/CodeGen/ARM/fmuls.ll23
-rw-r--r--test/CodeGen/ARM/fnegs.ll23
-rw-r--r--test/CodeGen/ARM/fnmacs.ll6
-rw-r--r--test/CodeGen/ARM/fnmscs.ll23
-rw-r--r--test/CodeGen/ARM/fnmul.ll10
-rw-r--r--test/CodeGen/ARM/fnmuls.ll8
-rw-r--r--test/CodeGen/ARM/fold-const.ll2
-rw-r--r--test/CodeGen/ARM/fold-stack-adjust.ll66
-rw-r--r--test/CodeGen/ARM/formal.ll2
-rw-r--r--test/CodeGen/ARM/fp-arg-shuffle.ll2
-rw-r--r--test/CodeGen/ARM/fp-fast.ll3
-rw-r--r--test/CodeGen/ARM/fp.ll2
-rw-r--r--test/CodeGen/ARM/fp16.ll51
-rw-r--r--test/CodeGen/ARM/fp_convert.ll23
-rw-r--r--test/CodeGen/ARM/fpcmp-opt.ll4
-rw-r--r--test/CodeGen/ARM/fpcmp.ll2
-rw-r--r--test/CodeGen/ARM/fpconsts.ll2
-rw-r--r--test/CodeGen/ARM/fpconv.ll4
-rw-r--r--test/CodeGen/ARM/fpmem.ll2
-rw-r--r--test/CodeGen/ARM/fpow.ll2
-rw-r--r--test/CodeGen/ARM/fptoint.ll2
-rw-r--r--test/CodeGen/ARM/frame-register.ll38
-rw-r--r--test/CodeGen/ARM/fsubs.ll19
-rw-r--r--test/CodeGen/ARM/func-argpassing-endian.ll122
-rw-r--r--test/CodeGen/ARM/global-merge-1.ll85
-rw-r--r--test/CodeGen/ARM/half.ll74
-rw-r--r--test/CodeGen/ARM/hello.ll21
-rw-r--r--test/CodeGen/ARM/hfa-in-contiguous-registers.ll94
-rw-r--r--test/CodeGen/ARM/hints.ll69
-rw-r--r--test/CodeGen/ARM/iabs.ll2
-rw-r--r--test/CodeGen/ARM/ifconv-kills.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt-branch-weight-bug.ll63
-rw-r--r--test/CodeGen/ARM/ifcvt-branch-weight.ll42
-rw-r--r--test/CodeGen/ARM/ifcvt1.ll4
-rw-r--r--test/CodeGen/ARM/ifcvt10.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt2.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt3.ll14
-rw-r--r--test/CodeGen/ARM/ifcvt4.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt9.ll2
-rw-r--r--test/CodeGen/ARM/illegal-vector-bitcast.ll4
-rw-r--r--test/CodeGen/ARM/imm.ll5
-rw-r--r--test/CodeGen/ARM/indirect-hidden.ll22
-rw-r--r--test/CodeGen/ARM/indirect-reg-input.ll2
-rw-r--r--test/CodeGen/ARM/indirectbr-3.ll2
-rw-r--r--test/CodeGen/ARM/indirectbr.ll11
-rw-r--r--test/CodeGen/ARM/inline-diagnostics.ll16
-rw-r--r--test/CodeGen/ARM/inlineasm-64bit.ll4
-rw-r--r--test/CodeGen/ARM/inlineasm-imm-arm.ll2
-rw-r--r--test/CodeGen/ARM/inlineasm-ldr-pseudo.ll17
-rw-r--r--test/CodeGen/ARM/inlineasm-switch-mode-oneway-from-arm.ll18
-rw-r--r--test/CodeGen/ARM/inlineasm-switch-mode-oneway-from-thumb.ll18
-rw-r--r--test/CodeGen/ARM/inlineasm-switch-mode.ll22
-rw-r--r--test/CodeGen/ARM/inlineasm.ll2
-rw-r--r--test/CodeGen/ARM/inlineasm2.ll2
-rw-r--r--test/CodeGen/ARM/inlineasm3.ll3
-rw-r--r--test/CodeGen/ARM/inlineasm4.ll2
-rw-r--r--test/CodeGen/ARM/insn-sched1.ll11
-rw-r--r--test/CodeGen/ARM/integer_insertelement.ll2
-rw-r--r--test/CodeGen/ARM/interrupt-attr.ll56
-rw-r--r--test/CodeGen/ARM/intrinsics-crypto.ll58
-rw-r--r--test/CodeGen/ARM/intrinsics-memory-barrier.ll55
-rw-r--r--test/CodeGen/ARM/intrinsics-overflow.ll57
-rw-r--r--test/CodeGen/ARM/intrinsics-v8.ll4
-rw-r--r--test/CodeGen/ARM/ispositive.ll2
-rw-r--r--test/CodeGen/ARM/jump_tables.ll32
-rw-r--r--test/CodeGen/ARM/large-stack.ll2
-rw-r--r--test/CodeGen/ARM/ldaex-stlex.ll92
-rw-r--r--test/CodeGen/ARM/ldm.ll4
-rw-r--r--test/CodeGen/ARM/ldr.ll2
-rw-r--r--test/CodeGen/ARM/ldr_ext.ll2
-rw-r--r--test/CodeGen/ARM/ldr_frame.ll5
-rw-r--r--test/CodeGen/ARM/ldr_post.ll4
-rw-r--r--test/CodeGen/ARM/ldr_pre.ll4
-rw-r--r--test/CodeGen/ARM/ldrd.ll8
-rw-r--r--test/CodeGen/ARM/ldstrex-m.ll59
-rw-r--r--test/CodeGen/ARM/ldstrex.ll28
-rw-r--r--test/CodeGen/ARM/lit.local.cfg3
-rw-r--r--test/CodeGen/ARM/load.ll13
-rw-r--r--test/CodeGen/ARM/long-setcc.ll7
-rw-r--r--test/CodeGen/ARM/long.ll2
-rw-r--r--test/CodeGen/ARM/longMAC.ll35
-rw-r--r--test/CodeGen/ARM/long_shift.ll58
-rw-r--r--test/CodeGen/ARM/lsr-scale-addr-mode.ll5
-rw-r--r--test/CodeGen/ARM/lsr-unfolded-offset.ll4
-rw-r--r--test/CodeGen/ARM/machine-licm.ll10
-rw-r--r--test/CodeGen/ARM/mature-mc-support.ll12
-rw-r--r--test/CodeGen/ARM/mem.ll8
-rw-r--r--test/CodeGen/ARM/memcpy-inline.ll34
-rw-r--r--test/CodeGen/ARM/memfunc.ll3
-rw-r--r--test/CodeGen/ARM/metadata-default.ll16
-rw-r--r--test/CodeGen/ARM/metadata-short-enums.ll16
-rw-r--r--test/CodeGen/ARM/metadata-short-wchar.ll16
-rw-r--r--test/CodeGen/ARM/minsize-imms.ll57
-rw-r--r--test/CodeGen/ARM/minsize-litpools.ll26
-rw-r--r--test/CodeGen/ARM/misched-copy-arm.ll2
-rw-r--r--test/CodeGen/ARM/mls.ll5
-rw-r--r--test/CodeGen/ARM/movt-movw-global.ll8
-rw-r--r--test/CodeGen/ARM/movt.ll2
-rw-r--r--test/CodeGen/ARM/mul.ll14
-rw-r--r--test/CodeGen/ARM/mul_const.ll2
-rw-r--r--test/CodeGen/ARM/mulhi.ll6
-rw-r--r--test/CodeGen/ARM/mult-alt-generic-arm.ll2
-rw-r--r--test/CodeGen/ARM/mvn.ll16
-rw-r--r--test/CodeGen/ARM/named-reg-alloc.ll14
-rw-r--r--test/CodeGen/ARM/named-reg-notareg.ll13
-rw-r--r--test/CodeGen/ARM/neon_arith1.ll5
-rw-r--r--test/CodeGen/ARM/neon_cmp.ll3
-rw-r--r--test/CodeGen/ARM/neon_div.ll3
-rw-r--r--test/CodeGen/ARM/neon_fpconv.ll2
-rw-r--r--test/CodeGen/ARM/neon_ld1.ll2
-rw-r--r--test/CodeGen/ARM/neon_ld2.ll4
-rw-r--r--test/CodeGen/ARM/neon_minmax.ll2
-rw-r--r--test/CodeGen/ARM/neon_shift.ll2
-rw-r--r--test/CodeGen/ARM/neon_vabs.ll2
-rw-r--r--test/CodeGen/ARM/none-macho.ll99
-rw-r--r--test/CodeGen/ARM/noreturn.ll17
-rw-r--r--test/CodeGen/ARM/null-streamer.ll7
-rw-r--r--test/CodeGen/ARM/optimize-dmbs-v7.ll74
-rw-r--r--test/CodeGen/ARM/optselect-regclass.ll3
-rw-r--r--test/CodeGen/ARM/out-of-registers.ll42
-rw-r--r--test/CodeGen/ARM/pack.ll2
-rw-r--r--test/CodeGen/ARM/phi.ll4
-rw-r--r--test/CodeGen/ARM/popcnt.ll2
-rw-r--r--test/CodeGen/ARM/prefetch-thumb.ll22
-rw-r--r--test/CodeGen/ARM/prefetch.ll28
-rw-r--r--test/CodeGen/ARM/rbit.ll20
-rw-r--r--test/CodeGen/ARM/reg_sequence.ll12
-rw-r--r--test/CodeGen/ARM/ret0.ll2
-rw-r--r--test/CodeGen/ARM/ret_arg1.ll2
-rw-r--r--test/CodeGen/ARM/ret_arg2.ll2
-rw-r--r--test/CodeGen/ARM/ret_arg3.ll3
-rw-r--r--test/CodeGen/ARM/ret_arg4.ll2
-rw-r--r--test/CodeGen/ARM/ret_arg5.ll2
-rw-r--r--test/CodeGen/ARM/ret_f32_arg2.ll2
-rw-r--r--test/CodeGen/ARM/ret_f32_arg5.ll2
-rw-r--r--test/CodeGen/ARM/ret_f64_arg2.ll2
-rw-r--r--test/CodeGen/ARM/ret_f64_arg_reg_split.ll2
-rw-r--r--test/CodeGen/ARM/ret_f64_arg_split.ll2
-rw-r--r--test/CodeGen/ARM/ret_f64_arg_stack.ll2
-rw-r--r--test/CodeGen/ARM/ret_i128_arg2.ll2
-rw-r--r--test/CodeGen/ARM/ret_i64_arg2.ll2
-rw-r--r--test/CodeGen/ARM/ret_i64_arg3.ll2
-rw-r--r--test/CodeGen/ARM/ret_i64_arg_split.ll2
-rw-r--r--test/CodeGen/ARM/ret_void.ll2
-rw-r--r--test/CodeGen/ARM/returned-ext.ll4
-rw-r--r--test/CodeGen/ARM/returned-trunc-tail-calls.ll2
-rw-r--r--test/CodeGen/ARM/rev.ll2
-rw-r--r--test/CodeGen/ARM/saxpy10-a9.ll135
-rw-r--r--test/CodeGen/ARM/sbfx.ll2
-rw-r--r--test/CodeGen/ARM/segmented-stacks-dynamic.ll64
-rw-r--r--test/CodeGen/ARM/segmented-stacks.ll249
-rw-r--r--test/CodeGen/ARM/select-imm.ll10
-rw-r--r--test/CodeGen/ARM/select-undef.ll3
-rw-r--r--test/CodeGen/ARM/select.ll10
-rw-r--r--test/CodeGen/ARM/setcc-sentinals.ll4
-rw-r--r--test/CodeGen/ARM/sjljehprepare-lower-empty-struct.ll31
-rw-r--r--test/CodeGen/ARM/smml.ll3
-rw-r--r--test/CodeGen/ARM/smul.ll4
-rw-r--r--test/CodeGen/ARM/spill-q.ll2
-rw-r--r--test/CodeGen/ARM/ssp-data-layout.ll528
-rw-r--r--test/CodeGen/ARM/stack-frame.ll9
-rw-r--r--test/CodeGen/ARM/stackpointer.ll25
-rw-r--r--test/CodeGen/ARM/str_post.ll2
-rw-r--r--test/CodeGen/ARM/str_pre.ll8
-rw-r--r--test/CodeGen/ARM/str_trunc.ll12
-rw-r--r--test/CodeGen/ARM/struct-byval-frame-index.ll2
-rw-r--r--test/CodeGen/ARM/struct_byval_arm_t1_t2.ll2
-rw-r--r--test/CodeGen/ARM/sub.ll21
-rw-r--r--test/CodeGen/ARM/subreg-remat.ll2
-rw-r--r--test/CodeGen/ARM/sxt_rot.ll2
-rw-r--r--test/CodeGen/ARM/t2-imm.ll2
-rw-r--r--test/CodeGen/ARM/tail-call.ll21
-rw-r--r--test/CodeGen/ARM/taildup-branch-weight.ll54
-rw-r--r--test/CodeGen/ARM/this-return.ll4
-rw-r--r--test/CodeGen/ARM/thumb-litpool.ll15
-rw-r--r--test/CodeGen/ARM/thumb2-it-block.ll4
-rw-r--r--test/CodeGen/ARM/tls-models.ll32
-rw-r--r--test/CodeGen/ARM/tls1.ll2
-rw-r--r--test/CodeGen/ARM/tls2.ll4
-rw-r--r--test/CodeGen/ARM/trap.ll1
-rw-r--r--test/CodeGen/ARM/trunc_ldr.ll10
-rw-r--r--test/CodeGen/ARM/truncstore-dag-combine.ll7
-rw-r--r--test/CodeGen/ARM/tst_teq.ll7
-rw-r--r--test/CodeGen/ARM/twoaddrinstr.ll2
-rw-r--r--test/CodeGen/ARM/unaligned_load_store.ll11
-rw-r--r--test/CodeGen/ARM/unaligned_load_store_vector.ll2
-rw-r--r--test/CodeGen/ARM/undefined.ll14
-rw-r--r--test/CodeGen/ARM/unord.ll10
-rw-r--r--test/CodeGen/ARM/uxt_rot.ll14
-rw-r--r--test/CodeGen/ARM/v1-constant-fold.ll4
-rw-r--r--test/CodeGen/ARM/va_arg.ll4
-rw-r--r--test/CodeGen/ARM/vaba.ll2
-rw-r--r--test/CodeGen/ARM/vabd.ll2
-rw-r--r--test/CodeGen/ARM/vabs.ll10
-rw-r--r--test/CodeGen/ARM/vadd.ll2
-rw-r--r--test/CodeGen/ARM/varargs-spill-stack-align-nacl.ll31
-rw-r--r--test/CodeGen/ARM/vargs.ll3
-rw-r--r--test/CodeGen/ARM/vbits.ll2
-rw-r--r--test/CodeGen/ARM/vbsl.ll2
-rw-r--r--test/CodeGen/ARM/vceq.ll2
-rw-r--r--test/CodeGen/ARM/vcge.ll10
-rw-r--r--test/CodeGen/ARM/vcgt.ll12
-rw-r--r--test/CodeGen/ARM/vcnt.ll2
-rw-r--r--test/CodeGen/ARM/vcombine.ll39
-rw-r--r--test/CodeGen/ARM/vcvt.ll2
-rw-r--r--test/CodeGen/ARM/vdup.ll35
-rw-r--r--test/CodeGen/ARM/vector-spilling.ll34
-rw-r--r--test/CodeGen/ARM/vext.ll2
-rw-r--r--test/CodeGen/ARM/vfcmp.ll2
-rw-r--r--test/CodeGen/ARM/vfp-libcalls.ll11
-rw-r--r--test/CodeGen/ARM/vfp-regs-dwarf.ll44
-rw-r--r--test/CodeGen/ARM/vhadd.ll2
-rw-r--r--test/CodeGen/ARM/vhsub.ll2
-rw-r--r--test/CodeGen/ARM/vicmp.ll2
-rw-r--r--test/CodeGen/ARM/vld1.ll6
-rw-r--r--test/CodeGen/ARM/vld2.ll2
-rw-r--r--test/CodeGen/ARM/vld3.ll4
-rw-r--r--test/CodeGen/ARM/vld4.ll2
-rw-r--r--test/CodeGen/ARM/vlddup.ll2
-rw-r--r--test/CodeGen/ARM/vldlane.ll6
-rw-r--r--test/CodeGen/ARM/vldm-sched-a9.ll10
-rw-r--r--test/CodeGen/ARM/vminmax.ll2
-rw-r--r--test/CodeGen/ARM/vmla.ll2
-rw-r--r--test/CodeGen/ARM/vmls.ll2
-rw-r--r--test/CodeGen/ARM/vmov.ll2
-rw-r--r--test/CodeGen/ARM/vmul.ll2
-rw-r--r--test/CodeGen/ARM/vneg.ll2
-rw-r--r--test/CodeGen/ARM/vpadal.ll2
-rw-r--r--test/CodeGen/ARM/vpadd.ll13
-rw-r--r--test/CodeGen/ARM/vpminmax.ll2
-rw-r--r--test/CodeGen/ARM/vqadd.ll2
-rw-r--r--test/CodeGen/ARM/vqshl.ll2
-rw-r--r--test/CodeGen/ARM/vqshrn.ll2
-rw-r--r--test/CodeGen/ARM/vqsub.ll2
-rw-r--r--test/CodeGen/ARM/vrec.ll2
-rw-r--r--test/CodeGen/ARM/vrev.ll10
-rw-r--r--test/CodeGen/ARM/vsel.ll8
-rw-r--r--test/CodeGen/ARM/vselect_imax.ll2
-rw-r--r--test/CodeGen/ARM/vshift.ll34
-rw-r--r--test/CodeGen/ARM/vshiftins.ll2
-rw-r--r--test/CodeGen/ARM/vshl.ll2
-rw-r--r--test/CodeGen/ARM/vshll.ll101
-rw-r--r--test/CodeGen/ARM/vshrn.ll49
-rw-r--r--test/CodeGen/ARM/vsra.ll36
-rw-r--r--test/CodeGen/ARM/vst1.ll2
-rw-r--r--test/CodeGen/ARM/vst2.ll2
-rw-r--r--test/CodeGen/ARM/vst3.ll2
-rw-r--r--test/CodeGen/ARM/vst4.ll2
-rw-r--r--test/CodeGen/ARM/vstlane.ll2
-rw-r--r--test/CodeGen/ARM/vsub.ll2
-rw-r--r--test/CodeGen/ARM/vtbl.ll2
-rw-r--r--test/CodeGen/ARM/vtrn.ll2
-rw-r--r--test/CodeGen/ARM/vuzp.ll2
-rw-r--r--test/CodeGen/ARM/vzip.ll2
-rw-r--r--test/CodeGen/ARM/warn-stack.ll2
-rw-r--r--test/CodeGen/ARM/weak.ll6
-rw-r--r--test/CodeGen/ARM/weak2.ll5
-rw-r--r--test/CodeGen/ARM/widen-vmovs.ll6
-rw-r--r--test/CodeGen/ARM/zero-cycle-zero.ll70
-rw-r--r--test/CodeGen/ARM/zextload_demandedbits.ll2
-rw-r--r--test/CodeGen/CPP/atomic.ll89
-rw-r--r--test/CodeGen/CPP/attributes.ll7
-rw-r--r--test/CodeGen/CPP/lit.local.cfg3
-rw-r--r--test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll2
-rw-r--r--test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll2
-rw-r--r--test/CodeGen/Generic/2007-04-27-LargeMemObject.ll2
-rw-r--r--test/CodeGen/Generic/2007-12-17-InvokeAsm.ll2
-rw-r--r--test/CodeGen/Generic/2008-02-20-MatchingMem.ll2
-rw-r--r--test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll19
-rw-r--r--test/CodeGen/Generic/MachineBranchProb.ll3
-rw-r--r--test/CodeGen/Generic/asm-large-immediate.ll2
-rw-r--r--test/CodeGen/Generic/inline-asm-mem-clobber.ll2
-rw-r--r--test/CodeGen/Generic/inline-asm-special-strings.ll2
-rw-r--r--test/CodeGen/Generic/no-target.ll3
-rw-r--r--test/CodeGen/Generic/print-after.ll2
-rw-r--r--test/CodeGen/Generic/select.ll1
-rw-r--r--test/CodeGen/Generic/stop-after.ll2
-rw-r--r--test/CodeGen/Hexagon/hwloop-dbg.ll8
-rw-r--r--test/CodeGen/Hexagon/lit.local.cfg3
-rw-r--r--test/CodeGen/Hexagon/packetize_cond_inst.ll2
-rw-r--r--test/CodeGen/MSP430/fp.ll12
-rw-r--r--test/CodeGen/MSP430/lit.local.cfg3
-rw-r--r--test/CodeGen/MSP430/misched-msp430.ll20
-rw-r--r--test/CodeGen/Mips/2008-07-16-SignExtInReg.ll6
-rw-r--r--test/CodeGen/Mips/2008-08-01-AsmInline.ll2
-rw-r--r--test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll8
-rw-r--r--test/CodeGen/Mips/2010-07-20-Switch.ll8
-rw-r--r--test/CodeGen/Mips/2013-11-18-fp64-const0.ll2
-rw-r--r--test/CodeGen/Mips/Fast-ISel/loadstore2.ll83
-rw-r--r--test/CodeGen/Mips/Fast-ISel/nullvoid.ll9
-rw-r--r--test/CodeGen/Mips/Fast-ISel/simplestore.ll15
-rw-r--r--test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll38
-rw-r--r--test/CodeGen/Mips/Fast-ISel/simplestorei.ll65
-rw-r--r--test/CodeGen/Mips/abicalls.ll16
-rw-r--r--test/CodeGen/Mips/abiflags-xx.ll5
-rw-r--r--test/CodeGen/Mips/abiflags32.ll17
-rw-r--r--test/CodeGen/Mips/addi.ll2
-rw-r--r--test/CodeGen/Mips/align16.ll8
-rw-r--r--test/CodeGen/Mips/alloca16.ll4
-rw-r--r--test/CodeGen/Mips/analyzebranch.ll35
-rw-r--r--test/CodeGen/Mips/atomic.ll672
-rw-r--r--test/CodeGen/Mips/atomicops.ll3
-rw-r--r--test/CodeGen/Mips/blez_bgez.ll2
-rw-r--r--test/CodeGen/Mips/blockaddr.ll16
-rw-r--r--test/CodeGen/Mips/bswap.ll83
-rw-r--r--test/CodeGen/Mips/buildpairextractelementf64.ll34
-rw-r--r--test/CodeGen/Mips/cache-intrinsic.ll26
-rw-r--r--test/CodeGen/Mips/call-optimization.ll91
-rw-r--r--test/CodeGen/Mips/cconv/arguments-float.ll222
-rw-r--r--test/CodeGen/Mips/cconv/arguments-fp128.ll51
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll157
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-float.ll211
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-fp128.ll49
-rw-r--r--test/CodeGen/Mips/cconv/arguments.ll170
-rw-r--r--test/CodeGen/Mips/cconv/callee-saved-float.ll111
-rw-r--r--test/CodeGen/Mips/cconv/callee-saved-fpxx.ll58
-rw-r--r--test/CodeGen/Mips/cconv/callee-saved-fpxx1.ll24
-rw-r--r--test/CodeGen/Mips/cconv/callee-saved.ll167
-rw-r--r--test/CodeGen/Mips/cconv/memory-layout.ll140
-rw-r--r--test/CodeGen/Mips/cconv/reserved-space.ll39
-rw-r--r--test/CodeGen/Mips/cconv/return-float.ll48
-rw-r--r--test/CodeGen/Mips/cconv/return-hard-float.ll59
-rw-r--r--test/CodeGen/Mips/cconv/return-hard-fp128.ll31
-rw-r--r--test/CodeGen/Mips/cconv/return.ll66
-rw-r--r--test/CodeGen/Mips/cconv/stack-alignment.ll28
-rw-r--r--test/CodeGen/Mips/cfi_offset.ll41
-rw-r--r--test/CodeGen/Mips/ci2.ll39
-rwxr-xr-xtest/CodeGen/Mips/cmov.ll710
-rw-r--r--test/CodeGen/Mips/const-mult.ll3
-rw-r--r--test/CodeGen/Mips/const4a.ll4
-rw-r--r--test/CodeGen/Mips/const6.ll2
-rw-r--r--test/CodeGen/Mips/const6a.ll2
-rw-r--r--test/CodeGen/Mips/countleading.ll90
-rw-r--r--test/CodeGen/Mips/ctlz.ll2
-rw-r--r--test/CodeGen/Mips/divrem.ll363
-rw-r--r--test/CodeGen/Mips/dsp-r1.ll2
-rw-r--r--test/CodeGen/Mips/eh-dwarf-cfa.ll2
-rw-r--r--test/CodeGen/Mips/eh-return32.ll14
-rw-r--r--test/CodeGen/Mips/eh-return64.ll16
-rw-r--r--test/CodeGen/Mips/ehframe-indirect.ll34
-rw-r--r--test/CodeGen/Mips/elf_eflags.ll86
-rw-r--r--test/CodeGen/Mips/ex2.ll9
-rw-r--r--test/CodeGen/Mips/f16abs.ll2
-rw-r--r--test/CodeGen/Mips/fabs.ll50
-rw-r--r--test/CodeGen/Mips/fastcc.ll97
-rw-r--r--test/CodeGen/Mips/fcmp.ll783
-rw-r--r--test/CodeGen/Mips/fcopysign-f32-f64.ll1
-rw-r--r--test/CodeGen/Mips/fcopysign.ll3
-rw-r--r--test/CodeGen/Mips/fixdfsf.ll4
-rw-r--r--test/CodeGen/Mips/fmadd1.ll323
-rw-r--r--test/CodeGen/Mips/fneg.ll27
-rw-r--r--test/CodeGen/Mips/fp-indexed-ls.ll194
-rw-r--r--test/CodeGen/Mips/fp16instrinsmc.ll4
-rw-r--r--test/CodeGen/Mips/fp16mix.ll20
-rw-r--r--test/CodeGen/Mips/fp16static.ll2
-rw-r--r--test/CodeGen/Mips/fp64a.ll197
-rw-r--r--test/CodeGen/Mips/fpbr.ll93
-rw-r--r--test/CodeGen/Mips/fpneeded.ll16
-rw-r--r--test/CodeGen/Mips/fpnotneeded.ll16
-rw-r--r--test/CodeGen/Mips/fptr2.ll2
-rw-r--r--test/CodeGen/Mips/fpxx.ll221
-rw-r--r--test/CodeGen/Mips/global-address.ll8
-rw-r--r--test/CodeGen/Mips/helloworld.ll9
-rw-r--r--test/CodeGen/Mips/hf16_1.ll4
-rw-r--r--test/CodeGen/Mips/hf16call32.ll2
-rw-r--r--test/CodeGen/Mips/hf16call32_body.ll2
-rw-r--r--test/CodeGen/Mips/hf1_body.ll2
-rw-r--r--test/CodeGen/Mips/hfptrcall.ll2
-rw-r--r--test/CodeGen/Mips/i32k.ll2
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-bad-I-1.ll2
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-bad-J.ll2
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-bad-L.ll2
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-bad-N.ll2
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-bad-O.ll2
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-bad-P.ll2
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll19
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll4
-rw-r--r--test/CodeGen/Mips/inlineasm-operand-code.ll28
-rw-r--r--test/CodeGen/Mips/inlineasm_constraint.ll30
-rw-r--r--test/CodeGen/Mips/int-to-float-conversion.ll1
-rw-r--r--test/CodeGen/Mips/l3mc.ll114
-rw-r--r--test/CodeGen/Mips/largefr1.ll74
-rw-r--r--test/CodeGen/Mips/largeimmprinting.ll4
-rw-r--r--test/CodeGen/Mips/lcb2.ll133
-rw-r--r--test/CodeGen/Mips/lcb3c.ll59
-rw-r--r--test/CodeGen/Mips/lcb4a.ll69
-rw-r--r--test/CodeGen/Mips/lcb5.ll240
-rw-r--r--test/CodeGen/Mips/lit.local.cfg3
-rw-r--r--test/CodeGen/Mips/llvm-ir/call.ll166
-rw-r--r--test/CodeGen/Mips/llvm-ir/indirectbr.ll34
-rw-r--r--test/CodeGen/Mips/llvm-ir/ret.ll205
-rw-r--r--test/CodeGen/Mips/load-store-left-right.ll434
-rw-r--r--test/CodeGen/Mips/longbranch.ll188
-rw-r--r--test/CodeGen/Mips/madd-msub.ll241
-rw-r--r--test/CodeGen/Mips/mature-mc-support.ll32
-rw-r--r--test/CodeGen/Mips/mbrsize4a.ll37
-rw-r--r--test/CodeGen/Mips/micromips-atomic.ll18
-rw-r--r--test/CodeGen/Mips/micromips-directives.ll16
-rw-r--r--test/CodeGen/Mips/micromips-jal.ll48
-rw-r--r--test/CodeGen/Mips/micromips-load-effective-address.ll29
-rw-r--r--test/CodeGen/Mips/mips16-hf-attr.ll45
-rw-r--r--test/CodeGen/Mips/mips16_32_1.ll5
-rw-r--r--test/CodeGen/Mips/mips16_32_10.ll9
-rw-r--r--test/CodeGen/Mips/mips16_32_3.ll21
-rw-r--r--test/CodeGen/Mips/mips16_32_4.ll24
-rw-r--r--test/CodeGen/Mips/mips16_32_5.ll18
-rw-r--r--test/CodeGen/Mips/mips16_32_6.ll15
-rw-r--r--test/CodeGen/Mips/mips16_32_7.ll21
-rw-r--r--test/CodeGen/Mips/mips16_32_8.ll9
-rw-r--r--test/CodeGen/Mips/mips16_32_9.ll12
-rw-r--r--test/CodeGen/Mips/mips16_fpret.ll8
-rw-r--r--test/CodeGen/Mips/mips16ex.ll4
-rw-r--r--test/CodeGen/Mips/mips16fpe.ll2
-rw-r--r--test/CodeGen/Mips/mips32r6/compatibility.ll9
-rw-r--r--test/CodeGen/Mips/mips64-f128.ll358
-rw-r--r--test/CodeGen/Mips/mips64-fp-indexed-ls.ll110
-rw-r--r--test/CodeGen/Mips/mips64-sret.ll25
-rw-r--r--test/CodeGen/Mips/mips64countleading.ll19
-rw-r--r--test/CodeGen/Mips/mips64directive.ll1
-rw-r--r--test/CodeGen/Mips/mips64ext.ll3
-rw-r--r--test/CodeGen/Mips/mips64fpimm0.ll1
-rw-r--r--test/CodeGen/Mips/mips64fpldst.ll6
-rw-r--r--test/CodeGen/Mips/mips64imm.ll1
-rw-r--r--test/CodeGen/Mips/mips64instrs.ll126
-rw-r--r--test/CodeGen/Mips/mips64intldst.ll6
-rw-r--r--test/CodeGen/Mips/mips64lea.ll1
-rw-r--r--test/CodeGen/Mips/mips64load-store-left-right.ll73
-rw-r--r--test/CodeGen/Mips/mips64muldiv.ll56
-rw-r--r--test/CodeGen/Mips/mips64r6/compatibility.ll9
-rw-r--r--test/CodeGen/Mips/mno-ldc1-sdc1.ll274
-rw-r--r--test/CodeGen/Mips/msa/2r_vector_scalar.ll69
-rw-r--r--test/CodeGen/Mips/msa/3r-s.ll86
-rw-r--r--test/CodeGen/Mips/msa/arithmetic_float.ll3
-rw-r--r--test/CodeGen/Mips/msa/basic_operations.ll388
-rw-r--r--test/CodeGen/Mips/msa/basic_operations_float.ll160
-rw-r--r--test/CodeGen/Mips/msa/bitwise.ll5
-rw-r--r--test/CodeGen/Mips/msa/compare.ll34
-rw-r--r--test/CodeGen/Mips/msa/compare_float.ll28
-rw-r--r--test/CodeGen/Mips/msa/elm_copy.ll136
-rw-r--r--test/CodeGen/Mips/msa/elm_insv.ll138
-rw-r--r--test/CodeGen/Mips/msa/elm_shift_slide.ll32
-rw-r--r--test/CodeGen/Mips/msa/frameindex.ll309
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll2
-rw-r--r--test/CodeGen/Mips/msa/shift-dagcombine.ll3
-rw-r--r--test/CodeGen/Mips/msa/shuffle.ll46
-rw-r--r--test/CodeGen/Mips/msa/special.ll44
-rw-r--r--test/CodeGen/Mips/msa/vec.ll168
-rw-r--r--test/CodeGen/Mips/nacl-align.ll96
-rw-r--r--test/CodeGen/Mips/nacl-branch-delay.ll71
-rw-r--r--test/CodeGen/Mips/nacl-reserved-regs.ll51
-rw-r--r--test/CodeGen/Mips/no-odd-spreg.ll58
-rw-r--r--test/CodeGen/Mips/nomips16.ll2
-rw-r--r--test/CodeGen/Mips/null-streamer.ll7
-rw-r--r--test/CodeGen/Mips/null.ll2
-rw-r--r--test/CodeGen/Mips/o32_cc.ll288
-rw-r--r--test/CodeGen/Mips/octeon.ll29
-rw-r--r--test/CodeGen/Mips/octeon_popcnt.ll47
-rw-r--r--test/CodeGen/Mips/optimize-fp-math.ll1
-rw-r--r--test/CodeGen/Mips/optimize-pic-o0.ll33
-rw-r--r--test/CodeGen/Mips/powif64_16.ll2
-rw-r--r--test/CodeGen/Mips/prevent-hoisting.ll144
-rw-r--r--test/CodeGen/Mips/remat-immed-load.ll1
-rw-r--r--test/CodeGen/Mips/rotate.ll2
-rw-r--r--test/CodeGen/Mips/s2rem.ll92
-rw-r--r--test/CodeGen/Mips/sel1c.ll2
-rw-r--r--test/CodeGen/Mips/sel2c.ll2
-rw-r--r--test/CodeGen/Mips/select.ll800
-rw-r--r--test/CodeGen/Mips/selectcc.ll14
-rw-r--r--test/CodeGen/Mips/sint-fp-store_pattern.ll1
-rw-r--r--test/CodeGen/Mips/sr1.ll60
-rw-r--r--test/CodeGen/Mips/start-asm-file.ll91
-rw-r--r--test/CodeGen/Mips/tail16.ll20
-rw-r--r--test/CodeGen/Mips/tls-alias.ll4
-rw-r--r--test/CodeGen/Mips/tls.ll6
-rw-r--r--test/CodeGen/Mips/trap1.ll2
-rw-r--r--test/CodeGen/Mips/unalignedload.ll82
-rw-r--r--test/CodeGen/Mips/zeroreg.ll98
-rw-r--r--test/CodeGen/NVPTX/access-non-generic.ll91
-rw-r--r--test/CodeGen/NVPTX/addrspacecast-gvar.ll9
-rw-r--r--test/CodeGen/NVPTX/addrspacecast.ll99
-rw-r--r--test/CodeGen/NVPTX/aggr-param.ll20
-rw-r--r--test/CodeGen/NVPTX/arg-lowering.ll13
-rw-r--r--test/CodeGen/NVPTX/arithmetic-fp-sm20.ll12
-rw-r--r--test/CodeGen/NVPTX/arithmetic-int.ll26
-rw-r--r--test/CodeGen/NVPTX/atomics.ll182
-rw-r--r--test/CodeGen/NVPTX/bfe.ll32
-rw-r--r--test/CodeGen/NVPTX/bug17709.ll2
-rw-r--r--test/CodeGen/NVPTX/call-with-alloca-buffer.ll66
-rw-r--r--test/CodeGen/NVPTX/compare-int.ll40
-rw-r--r--test/CodeGen/NVPTX/convert-fp.ll28
-rw-r--r--test/CodeGen/NVPTX/convert-int-sm20.ll8
-rw-r--r--test/CodeGen/NVPTX/div-ri.ll8
-rw-r--r--test/CodeGen/NVPTX/envreg.ll139
-rw-r--r--test/CodeGen/NVPTX/fma.ll4
-rw-r--r--test/CodeGen/NVPTX/fp-contract.ll33
-rw-r--r--test/CodeGen/NVPTX/fp-literals.ll7
-rw-r--r--test/CodeGen/NVPTX/fp16.ll45
-rw-r--r--test/CodeGen/NVPTX/gvar-init.ll5
-rw-r--r--test/CodeGen/NVPTX/half.ll70
-rw-r--r--test/CodeGen/NVPTX/imad.ll9
-rw-r--r--test/CodeGen/NVPTX/implicit-def.ll2
-rw-r--r--test/CodeGen/NVPTX/inline-asm.ll7
-rw-r--r--test/CodeGen/NVPTX/intrinsic-old.ll2
-rw-r--r--test/CodeGen/NVPTX/intrinsics.ll2
-rw-r--r--test/CodeGen/NVPTX/isspacep.ll35
-rw-r--r--test/CodeGen/NVPTX/ld-addrspace.ll48
-rw-r--r--test/CodeGen/NVPTX/ld-generic.ll16
-rw-r--r--test/CodeGen/NVPTX/ldparam-v4.ll10
-rw-r--r--test/CodeGen/NVPTX/ldu-i8.ll6
-rw-r--r--test/CodeGen/NVPTX/ldu-ldg.ll40
-rw-r--r--test/CodeGen/NVPTX/ldu-reg-plus-offset.ll6
-rw-r--r--test/CodeGen/NVPTX/lit.local.cfg3
-rw-r--r--test/CodeGen/NVPTX/local-stack-frame.ll18
-rw-r--r--test/CodeGen/NVPTX/managed.ll11
-rw-r--r--test/CodeGen/NVPTX/misaligned-vector-ldst.ll77
-rw-r--r--test/CodeGen/NVPTX/mulwide.ll46
-rw-r--r--test/CodeGen/NVPTX/noduplicate-syncthreads.ll74
-rw-r--r--test/CodeGen/NVPTX/nvvm-reflect.ll16
-rw-r--r--test/CodeGen/NVPTX/pr13291-i1-store.ll4
-rw-r--r--test/CodeGen/NVPTX/rotate.ll58
-rw-r--r--test/CodeGen/NVPTX/shift-parts.ll38
-rw-r--r--test/CodeGen/NVPTX/st-addrspace.ll48
-rw-r--r--test/CodeGen/NVPTX/st-generic.ll16
-rw-r--r--test/CodeGen/NVPTX/surf-read-cuda.ll53
-rw-r--r--test/CodeGen/NVPTX/surf-read.ll20
-rw-r--r--test/CodeGen/NVPTX/surf-write-cuda.ll42
-rw-r--r--test/CodeGen/NVPTX/surf-write.ll16
-rw-r--r--test/CodeGen/NVPTX/symbol-naming.ll31
-rw-r--r--test/CodeGen/NVPTX/tex-read-cuda.ll46
-rw-r--r--test/CodeGen/NVPTX/tex-read.ll20
-rw-r--r--test/CodeGen/NVPTX/texsurf-queries.ll103
-rw-r--r--test/CodeGen/NVPTX/vec-param-load.ll6
-rw-r--r--test/CodeGen/NVPTX/vector-call.ll12
-rw-r--r--test/CodeGen/NVPTX/weak-global.ll9
-rw-r--r--test/CodeGen/NVPTX/weak-linkage.ll12
-rw-r--r--test/CodeGen/PowerPC/2007-04-24-InlineAsm-I-Modifier.ll4
-rw-r--r--test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll2
-rw-r--r--test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll3
-rw-r--r--test/CodeGen/PowerPC/2008-07-10-SplatMiscompile.ll1
-rw-r--r--test/CodeGen/PowerPC/2008-12-12-EH.ll9
-rw-r--r--test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll8
-rw-r--r--test/CodeGen/PowerPC/Atomics-32.ll48
-rw-r--r--test/CodeGen/PowerPC/Atomics-64.ll32
-rw-r--r--test/CodeGen/PowerPC/Frames-alloca.ll8
-rw-r--r--test/CodeGen/PowerPC/Frames-large.ll16
-rw-r--r--test/CodeGen/PowerPC/Frames-small.ll16
-rw-r--r--test/CodeGen/PowerPC/aa-tbaa.ll41
-rw-r--r--test/CodeGen/PowerPC/alias.ll31
-rw-r--r--test/CodeGen/PowerPC/anon_aggr.ll28
-rw-r--r--test/CodeGen/PowerPC/atomic-1.ll3
-rw-r--r--test/CodeGen/PowerPC/atomic-2.ll3
-rw-r--r--test/CodeGen/PowerPC/available-externally.ll9
-rw-r--r--test/CodeGen/PowerPC/bdzlr.ll9
-rw-r--r--test/CodeGen/PowerPC/cc.ll4
-rw-r--r--test/CodeGen/PowerPC/coalesce-ext.ll2
-rw-r--r--test/CodeGen/PowerPC/complex-return.ll4
-rw-r--r--test/CodeGen/PowerPC/crash.ll17
-rw-r--r--test/CodeGen/PowerPC/crbit-asm.ll59
-rw-r--r--test/CodeGen/PowerPC/crbits.ll192
-rw-r--r--test/CodeGen/PowerPC/ctrloop-large-ec.ll2
-rw-r--r--test/CodeGen/PowerPC/ctrloop-le.ll3
-rw-r--r--test/CodeGen/PowerPC/ctrloop-lt.ll3
-rw-r--r--test/CodeGen/PowerPC/ctrloop-sh.ll72
-rw-r--r--test/CodeGen/PowerPC/dbg.ll5
-rw-r--r--test/CodeGen/PowerPC/early-ret2.ll8
-rw-r--r--test/CodeGen/PowerPC/fast-isel-conversion-p5.ll23
-rw-r--r--test/CodeGen/PowerPC/fast-isel-conversion.ll104
-rw-r--r--test/CodeGen/PowerPC/float-to-int.ll49
-rw-r--r--test/CodeGen/PowerPC/fold-zero.ll13
-rw-r--r--test/CodeGen/PowerPC/func-addr.ll17
-rw-r--r--test/CodeGen/PowerPC/hello-reloc.s68
-rw-r--r--test/CodeGen/PowerPC/i1-to-double.ll21
-rw-r--r--test/CodeGen/PowerPC/i32-to-float.ll25
-rw-r--r--test/CodeGen/PowerPC/i64-to-float.ll25
-rw-r--r--test/CodeGen/PowerPC/indexed-load.ll22
-rw-r--r--test/CodeGen/PowerPC/inlineasm-copy.ll2
-rw-r--r--test/CodeGen/PowerPC/jaggedstructs.ll2
-rw-r--r--test/CodeGen/PowerPC/lit.local.cfg3
-rw-r--r--test/CodeGen/PowerPC/lsa.ll2
-rw-r--r--test/CodeGen/PowerPC/mature-mc-support.ll27
-rw-r--r--test/CodeGen/PowerPC/mcm-10.ll3
-rw-r--r--test/CodeGen/PowerPC/mcm-11.ll3
-rw-r--r--test/CodeGen/PowerPC/mcm-obj-2.ll4
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r0.ll15
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll18
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r1.ll20
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll18
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r13.ll18
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll17
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r2.ll18
-rw-r--r--test/CodeGen/PowerPC/optcmp.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc32-i1-vaarg.ll20
-rw-r--r--test/CodeGen/PowerPC/ppc32-lshrti3.ll39
-rw-r--r--test/CodeGen/PowerPC/ppc32-pic.ll21
-rw-r--r--test/CodeGen/PowerPC/ppc32-vacopy.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-altivec-abi.ll25
-rw-r--r--test/CodeGen/PowerPC/ppc64-byval-align.ll56
-rw-r--r--test/CodeGen/PowerPC/ppc64-calls.ll12
-rw-r--r--test/CodeGen/PowerPC/ppc64-smallarg.ll59
-rw-r--r--test/CodeGen/PowerPC/ppc64le-aggregates.ll329
-rw-r--r--test/CodeGen/PowerPC/ppc64le-calls.ll17
-rw-r--r--test/CodeGen/PowerPC/ppc64le-crsave.ll28
-rw-r--r--test/CodeGen/PowerPC/ppc64le-localentry.ll46
-rw-r--r--test/CodeGen/PowerPC/ppc64le-smallarg.ll59
-rw-r--r--test/CodeGen/PowerPC/ppcf128-endian.ll154
-rw-r--r--test/CodeGen/PowerPC/pr17168.ll2
-rw-r--r--test/CodeGen/PowerPC/pr18663-2.ll153
-rw-r--r--test/CodeGen/PowerPC/pr18663.ll298
-rw-r--r--test/CodeGen/PowerPC/pr20442.ll79
-rw-r--r--test/CodeGen/PowerPC/private.ll28
-rw-r--r--test/CodeGen/PowerPC/pwr7-gt-nop.ll31
-rw-r--r--test/CodeGen/PowerPC/resolvefi-basereg.ll362
-rw-r--r--test/CodeGen/PowerPC/resolvefi-disp.ll71
-rw-r--r--test/CodeGen/PowerPC/rlwimi-and.ll2
-rw-r--r--test/CodeGen/PowerPC/rlwimi-dyn-and.ll48
-rw-r--r--test/CodeGen/PowerPC/sdag-ppcf128.ll2
-rw-r--r--test/CodeGen/PowerPC/sections.ll4
-rw-r--r--test/CodeGen/PowerPC/setcc_no_zext.ll4
-rw-r--r--test/CodeGen/PowerPC/seteq-0.ll7
-rw-r--r--test/CodeGen/PowerPC/sjlj.ll4
-rw-r--r--test/CodeGen/PowerPC/splat-bug.ll18
-rw-r--r--test/CodeGen/PowerPC/srl-mask.ll16
-rw-r--r--test/CodeGen/PowerPC/stack-realign.ll53
-rw-r--r--test/CodeGen/PowerPC/stfiwx.ll35
-rw-r--r--test/CodeGen/PowerPC/structsinmem.ll2
-rw-r--r--test/CodeGen/PowerPC/structsinregs.ll2
-rw-r--r--test/CodeGen/PowerPC/subsumes-pred-regs.ll2
-rw-r--r--test/CodeGen/PowerPC/svr4-redzone.ll2
-rw-r--r--test/CodeGen/PowerPC/tls-2.ll15
-rw-r--r--test/CodeGen/PowerPC/tls-gd.ll23
-rw-r--r--test/CodeGen/PowerPC/tls-ie.ll22
-rw-r--r--test/CodeGen/PowerPC/tls-ld-2.ll24
-rw-r--r--test/CodeGen/PowerPC/tls-ld.ll24
-rw-r--r--test/CodeGen/PowerPC/tls-pic.ll55
-rw-r--r--test/CodeGen/PowerPC/tls.ll33
-rw-r--r--test/CodeGen/PowerPC/toc-load-sched-bug.ll534
-rw-r--r--test/CodeGen/PowerPC/unaligned.ll8
-rw-r--r--test/CodeGen/PowerPC/unwind-dw2-g.ll2
-rw-r--r--test/CodeGen/PowerPC/varargs-struct-float.ll4
-rw-r--r--test/CodeGen/PowerPC/vec_cmp.ll118
-rw-r--r--test/CodeGen/PowerPC/vec_misaligned.ll10
-rw-r--r--test/CodeGen/PowerPC/vec_mul.ll17
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle_le.ll209
-rw-r--r--test/CodeGen/PowerPC/vec_urem_const.ll13
-rw-r--r--test/CodeGen/PowerPC/vperm-instcombine.ll17
-rw-r--r--test/CodeGen/PowerPC/vperm-lowering.ll66
-rw-r--r--test/CodeGen/PowerPC/vsx-args.ll26
-rw-r--r--test/CodeGen/PowerPC/vsx-fma-m.ll238
-rw-r--r--test/CodeGen/PowerPC/vsx-self-copy.ll27
-rw-r--r--test/CodeGen/PowerPC/vsx-spill.ll49
-rw-r--r--test/CodeGen/PowerPC/vsx.ll651
-rw-r--r--test/CodeGen/PowerPC/vtable-reloc.ll11
-rw-r--r--test/CodeGen/PowerPC/weak_def_can_be_hidden.ll24
-rw-r--r--test/CodeGen/R600/32-bit-local-address-space.ll64
-rw-r--r--test/CodeGen/R600/64bit-kernel-args.ll4
-rw-r--r--test/CodeGen/R600/add.ll127
-rw-r--r--test/CodeGen/R600/add_i64.ll39
-rw-r--r--test/CodeGen/R600/address-space.ll11
-rw-r--r--test/CodeGen/R600/and.ll115
-rw-r--r--test/CodeGen/R600/anyext.ll14
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i32.ll44
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i64.ll2
-rw-r--r--test/CodeGen/R600/atomic_cmp_swap_local.ll37
-rw-r--r--test/CodeGen/R600/atomic_load_add.ll45
-rw-r--r--test/CodeGen/R600/atomic_load_sub.ll45
-rw-r--r--test/CodeGen/R600/basic-branch.ll15
-rw-r--r--test/CodeGen/R600/basic-loop.ll18
-rw-r--r--test/CodeGen/R600/bfi_int.ll2
-rw-r--r--test/CodeGen/R600/big_alu.ll12
-rw-r--r--test/CodeGen/R600/bitcast.ll67
-rw-r--r--test/CodeGen/R600/bswap.ll50
-rw-r--r--test/CodeGen/R600/call.ll33
-rw-r--r--test/CodeGen/R600/cayman-loop-bug.ll32
-rw-r--r--test/CodeGen/R600/cf-stack-bug.ll227
-rw-r--r--test/CodeGen/R600/codegen-prepare-addrmode-sext.ll19
-rw-r--r--test/CodeGen/R600/concat_vectors.ll249
-rw-r--r--test/CodeGen/R600/copy-illegal-type.ll166
-rw-r--r--test/CodeGen/R600/ctlz_zero_undef.ll70
-rw-r--r--test/CodeGen/R600/ctpop.ll284
-rw-r--r--test/CodeGen/R600/ctpop64.ll122
-rw-r--r--test/CodeGen/R600/cttz_zero_undef.ll70
-rw-r--r--test/CodeGen/R600/cvt_f32_ubyte.ll175
-rw-r--r--test/CodeGen/R600/default-fp-mode.ll29
-rw-r--r--test/CodeGen/R600/elf.r600.ll2
-rw-r--r--test/CodeGen/R600/extload.ll94
-rw-r--r--test/CodeGen/R600/extract_vector_elt_i16.ll29
-rw-r--r--test/CodeGen/R600/fabs.ll11
-rw-r--r--test/CodeGen/R600/fadd.ll37
-rw-r--r--test/CodeGen/R600/fceil.ll131
-rw-r--r--test/CodeGen/R600/fceil64.ll103
-rw-r--r--test/CodeGen/R600/fcmp64.ll2
-rw-r--r--test/CodeGen/R600/fconst64.ll4
-rw-r--r--test/CodeGen/R600/fcopysign.f32.ll50
-rw-r--r--test/CodeGen/R600/fcopysign.f64.ll37
-rw-r--r--test/CodeGen/R600/fdiv.ll77
-rw-r--r--test/CodeGen/R600/ffloor.ll104
-rw-r--r--test/CodeGen/R600/fma.ll72
-rw-r--r--test/CodeGen/R600/fnearbyint.ll57
-rw-r--r--test/CodeGen/R600/fneg.ll13
-rw-r--r--test/CodeGen/R600/fp16_to_fp.ll28
-rw-r--r--test/CodeGen/R600/fp32_to_fp16.ll14
-rw-r--r--test/CodeGen/R600/fp_to_sint.ll213
-rw-r--r--test/CodeGen/R600/fp_to_uint.f64.ll9
-rw-r--r--test/CodeGen/R600/fp_to_uint.ll210
-rw-r--r--test/CodeGen/R600/fsub64.ll7
-rw-r--r--test/CodeGen/R600/ftrunc.ll119
-rw-r--r--test/CodeGen/R600/gep-address-space.ll16
-rw-r--r--test/CodeGen/R600/gv-const-addrspace-fail.ll58
-rw-r--r--test/CodeGen/R600/gv-const-addrspace.ll97
-rw-r--r--test/CodeGen/R600/half.ll61
-rw-r--r--test/CodeGen/R600/icmp64.ll92
-rw-r--r--test/CodeGen/R600/indirect-private-64.ll89
-rw-r--r--test/CodeGen/R600/infinite-loop-evergreen.ll10
-rw-r--r--test/CodeGen/R600/infinite-loop.ll17
-rw-r--r--test/CodeGen/R600/input-mods.ll26
-rw-r--r--test/CodeGen/R600/insert_vector_elt.ll203
-rw-r--r--test/CodeGen/R600/insert_vector_elt_f64.ll36
-rw-r--r--test/CodeGen/R600/jump-address.ll2
-rw-r--r--test/CodeGen/R600/kernel-args.ll32
-rw-r--r--test/CodeGen/R600/large-alloca.ll14
-rw-r--r--test/CodeGen/R600/large-constant-initializer.ll18
-rw-r--r--test/CodeGen/R600/lds-output-queue.ll4
-rw-r--r--test/CodeGen/R600/lds-size.ll2
-rw-r--r--test/CodeGen/R600/lit.local.cfg3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.abs.ll48
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll28
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll12
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll426
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll554
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfi.ll41
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfm.ll40
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.brev.ll27
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.clamp.ll28
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll42
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll27
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll27
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_scale.ll48
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.fract.ll27
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imad24.ll21
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imax.ll29
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imin.ll29
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imul24.ll15
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.kill.ll12
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll13
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll30
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rcp.ll65
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll11
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll14
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.ll13
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll29
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umad24.ll19
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umax.ll44
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umin.ll44
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umul24.ll17
-rw-r--r--test/CodeGen/R600/llvm.SI.gather4.ll508
-rw-r--r--test/CodeGen/R600/llvm.SI.getlod.ll44
-rw-r--r--test/CodeGen/R600/llvm.SI.image.ll49
-rw-r--r--test/CodeGen/R600/llvm.SI.image.sample.ll289
-rw-r--r--test/CodeGen/R600/llvm.SI.image.sample.o.ll289
-rw-r--r--test/CodeGen/R600/llvm.SI.sample-masked.ll16
-rw-r--r--test/CodeGen/R600/llvm.SI.sample.ll6
-rw-r--r--test/CodeGen/R600/llvm.SI.sampled.ll4
-rw-r--r--test/CodeGen/R600/llvm.SI.sendmsg.ll8
-rw-r--r--test/CodeGen/R600/llvm.SI.tbuffer.store.ll26
-rw-r--r--test/CodeGen/R600/llvm.amdgpu.dp4.ll11
-rw-r--r--test/CodeGen/R600/llvm.amdgpu.kilp.ll20
-rw-r--r--test/CodeGen/R600/llvm.amdgpu.lrp.ll12
-rw-r--r--test/CodeGen/R600/llvm.cos.ll43
-rw-r--r--test/CodeGen/R600/llvm.exp2.ll79
-rw-r--r--test/CodeGen/R600/llvm.log2.ll79
-rw-r--r--test/CodeGen/R600/llvm.pow.ll29
-rw-r--r--test/CodeGen/R600/llvm.rint.f64.ll45
-rw-r--r--test/CodeGen/R600/llvm.rint.ll77
-rw-r--r--test/CodeGen/R600/llvm.sin.ll66
-rw-r--r--test/CodeGen/R600/llvm.sqrt.ll2
-rw-r--r--test/CodeGen/R600/llvm.trunc.ll13
-rw-r--r--test/CodeGen/R600/load-i1.ll107
-rw-r--r--test/CodeGen/R600/load.ll187
-rw-r--r--test/CodeGen/R600/load64.ll20
-rw-r--r--test/CodeGen/R600/local-64.ll158
-rw-r--r--test/CodeGen/R600/local-atomics.ll254
-rw-r--r--test/CodeGen/R600/local-atomics64.ll251
-rw-r--r--test/CodeGen/R600/local-memory-two-objects.ll15
-rw-r--r--test/CodeGen/R600/local-memory.ll8
-rw-r--r--test/CodeGen/R600/loop-idiom.ll54
-rw-r--r--test/CodeGen/R600/mad_int24.ll17
-rw-r--r--test/CodeGen/R600/mad_uint24.ll73
-rw-r--r--test/CodeGen/R600/mubuf.ll98
-rw-r--r--test/CodeGen/R600/mul.ll75
-rw-r--r--test/CodeGen/R600/mul_int24.ll17
-rw-r--r--test/CodeGen/R600/mul_uint24.ll69
-rw-r--r--test/CodeGen/R600/mulhu.ll2
-rw-r--r--test/CodeGen/R600/no-initializer-constant-addrspace.ll20
-rw-r--r--test/CodeGen/R600/or.ll150
-rw-r--r--test/CodeGen/R600/parallelandifcollapse.ll6
-rw-r--r--test/CodeGen/R600/parallelorifcollapse.ll5
-rw-r--r--test/CodeGen/R600/private-memory-atomics.ll31
-rw-r--r--test/CodeGen/R600/private-memory-broken.ll20
-rw-r--r--test/CodeGen/R600/private-memory.ll236
-rw-r--r--test/CodeGen/R600/pv.ll6
-rw-r--r--test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll59
-rw-r--r--test/CodeGen/R600/register-count-comments.ll20
-rw-r--r--test/CodeGen/R600/reorder-stores.ll104
-rw-r--r--test/CodeGen/R600/rotl.i64.ll34
-rw-r--r--test/CodeGen/R600/rotl.ll54
-rw-r--r--test/CodeGen/R600/rotr.i64.ll58
-rw-r--r--test/CodeGen/R600/rotr.ll67
-rw-r--r--test/CodeGen/R600/rsq.ll28
-rw-r--r--test/CodeGen/R600/saddo.ll62
-rw-r--r--test/CodeGen/R600/salu-to-valu.ll90
-rw-r--r--test/CodeGen/R600/scalar_to_vector.ll80
-rw-r--r--test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll162
-rw-r--r--test/CodeGen/R600/sdiv.ll90
-rw-r--r--test/CodeGen/R600/select-i1.ll14
-rw-r--r--test/CodeGen/R600/select-vectors.ll155
-rw-r--r--test/CodeGen/R600/select.ll17
-rw-r--r--test/CodeGen/R600/select64.ll50
-rw-r--r--test/CodeGen/R600/selectcc-opt.ll34
-rw-r--r--test/CodeGen/R600/selectcc.ll19
-rw-r--r--test/CodeGen/R600/setcc-equivalent.ll31
-rw-r--r--test/CodeGen/R600/setcc-opt.ll15
-rw-r--r--test/CodeGen/R600/setcc.ll26
-rw-r--r--test/CodeGen/R600/setcc64.ll26
-rw-r--r--test/CodeGen/R600/seto.ll3
-rw-r--r--test/CodeGen/R600/setuo.ll3
-rw-r--r--test/CodeGen/R600/sext-in-reg.ll524
-rw-r--r--test/CodeGen/R600/sgpr-control-flow.ll27
-rw-r--r--test/CodeGen/R600/sgpr-copy-duplicate-operand.ll2
-rw-r--r--test/CodeGen/R600/sgpr-copy.ll6
-rw-r--r--test/CodeGen/R600/shl.ll117
-rw-r--r--test/CodeGen/R600/si-annotate-cf-assertion.ll3
-rw-r--r--test/CodeGen/R600/si-sgpr-spill.ll890
-rw-r--r--test/CodeGen/R600/sign_extend.ll63
-rw-r--r--test/CodeGen/R600/simplify-demanded-bits-build-pair.ll38
-rw-r--r--test/CodeGen/R600/sint_to_fp.ll22
-rw-r--r--test/CodeGen/R600/sint_to_fp64.ll32
-rw-r--r--test/CodeGen/R600/smrd.ll98
-rw-r--r--test/CodeGen/R600/sra.ll130
-rw-r--r--test/CodeGen/R600/srem.ll50
-rw-r--r--test/CodeGen/R600/srl.ll126
-rw-r--r--test/CodeGen/R600/ssubo.ll64
-rw-r--r--test/CodeGen/R600/store-v3i32.ll12
-rw-r--r--test/CodeGen/R600/store-v3i64.ll28
-rw-r--r--test/CodeGen/R600/store-vector-ptrs.ll3
-rw-r--r--test/CodeGen/R600/store.ll92
-rw-r--r--test/CodeGen/R600/sub.ll71
-rw-r--r--test/CodeGen/R600/trunc-store-i1.ll32
-rw-r--r--test/CodeGen/R600/trunc.ll29
-rw-r--r--test/CodeGen/R600/uaddo.ll69
-rw-r--r--test/CodeGen/R600/udivrem.ll358
-rw-r--r--test/CodeGen/R600/udivrem64.ll82
-rw-r--r--test/CodeGen/R600/uint_to_fp.f64.ll36
-rw-r--r--test/CodeGen/R600/uint_to_fp.ll76
-rw-r--r--test/CodeGen/R600/unaligned-load-store.ll2
-rw-r--r--test/CodeGen/R600/unhandled-loop-condition-assertion.ll114
-rw-r--r--test/CodeGen/R600/unroll.ll37
-rw-r--r--test/CodeGen/R600/usubo.ll66
-rw-r--r--test/CodeGen/R600/v1i64-kernel-arg.ll17
-rw-r--r--test/CodeGen/R600/v_cndmask.ll14
-rw-r--r--test/CodeGen/R600/valu-i1.ll39
-rw-r--r--test/CodeGen/R600/vector-alloca.ll75
-rw-r--r--test/CodeGen/R600/vop-shrink.ll41
-rw-r--r--test/CodeGen/R600/vtx-schedule.ll4
-rw-r--r--test/CodeGen/R600/work-item-intrinsics.ll26
-rw-r--r--test/CodeGen/R600/xor.ll104
-rw-r--r--test/CodeGen/R600/zero_extend.ll16
-rw-r--r--test/CodeGen/SPARC/2009-08-28-PIC.ll40
-rw-r--r--test/CodeGen/SPARC/2011-01-11-Call.ll8
-rw-r--r--test/CodeGen/SPARC/2011-01-11-FrameAddr.ll44
-rw-r--r--test/CodeGen/SPARC/2011-01-19-DelaySlot.ll27
-rw-r--r--test/CodeGen/SPARC/64abi.ll63
-rw-r--r--test/CodeGen/SPARC/64bit.ll32
-rw-r--r--test/CodeGen/SPARC/64cond.ll7
-rw-r--r--test/CodeGen/SPARC/64spill.ll116
-rw-r--r--test/CodeGen/SPARC/atomics.ll155
-rw-r--r--test/CodeGen/SPARC/constpool.ll16
-rw-r--r--test/CodeGen/SPARC/ctpop.ll25
-rw-r--r--test/CodeGen/SPARC/exception.ll85
-rw-r--r--test/CodeGen/SPARC/fp128.ll19
-rw-r--r--test/CodeGen/SPARC/globals.ll10
-rw-r--r--test/CodeGen/SPARC/inlineasm.ll45
-rw-r--r--test/CodeGen/SPARC/leafproc.ll18
-rw-r--r--test/CodeGen/SPARC/lit.local.cfg3
-rw-r--r--test/CodeGen/SPARC/mature-mc-support.ll20
-rw-r--r--test/CodeGen/SPARC/missinglabel.ll23
-rw-r--r--test/CodeGen/SPARC/obj-relocs.ll31
-rw-r--r--test/CodeGen/SPARC/parts.ll14
-rw-r--r--test/CodeGen/SPARC/rem.ll4
-rw-r--r--test/CodeGen/SPARC/setjmp.ll2
-rw-r--r--test/CodeGen/SPARC/spillsize.ll25
-rw-r--r--test/CodeGen/SPARC/sret-secondary.ll8
-rw-r--r--test/CodeGen/SPARC/tls.ll54
-rw-r--r--test/CodeGen/SPARC/trap.ll11
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-01.py4
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-02.py2
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-03.py4
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-04.py4
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-05.py4
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-06.py4
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-09.py4
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-10.py4
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-11.py8
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-12.py8
-rw-r--r--test/CodeGen/SystemZ/Large/lit.local.cfg3
-rw-r--r--test/CodeGen/SystemZ/atomic-load-01.ll5
-rw-r--r--test/CodeGen/SystemZ/atomic-load-02.ll5
-rw-r--r--test/CodeGen/SystemZ/atomic-load-03.ll8
-rw-r--r--test/CodeGen/SystemZ/atomic-load-04.ll8
-rw-r--r--test/CodeGen/SystemZ/atomic-store-01.ll5
-rw-r--r--test/CodeGen/SystemZ/atomic-store-02.ll5
-rw-r--r--test/CodeGen/SystemZ/atomic-store-03.ll8
-rw-r--r--test/CodeGen/SystemZ/atomic-store-04.ll8
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-05.ll64
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-06.ll64
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-05.ll64
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-06.ll64
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-05.ll64
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-06.ll64
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-05.ll69
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-06.ll69
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-05.ll64
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-06.ll64
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-01.ll6
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-02.ll6
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-03.ll36
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-04.ll27
-rw-r--r--test/CodeGen/SystemZ/cond-store-01.ll7
-rw-r--r--test/CodeGen/SystemZ/cond-store-02.ll7
-rw-r--r--test/CodeGen/SystemZ/cond-store-03.ll4
-rw-r--r--test/CodeGen/SystemZ/cond-store-04.ll4
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-04.ll61
-rw-r--r--test/CodeGen/SystemZ/fp-conv-06.ll4
-rw-r--r--test/CodeGen/SystemZ/fp-conv-08.ll4
-rw-r--r--test/CodeGen/SystemZ/fp-conv-10.ll4
-rw-r--r--test/CodeGen/SystemZ/fp-conv-12.ll4
-rw-r--r--test/CodeGen/SystemZ/fp-conv-13.ll64
-rw-r--r--test/CodeGen/SystemZ/fp-conv-14.ll63
-rw-r--r--test/CodeGen/SystemZ/frame-08.ll4
-rw-r--r--test/CodeGen/SystemZ/frame-11.ll13
-rw-r--r--test/CodeGen/SystemZ/frame-13.ll13
-rw-r--r--test/CodeGen/SystemZ/frame-14.ll14
-rw-r--r--test/CodeGen/SystemZ/insert-06.ll14
-rw-r--r--test/CodeGen/SystemZ/int-abs-01.ll64
-rw-r--r--test/CodeGen/SystemZ/int-cmp-05.ll17
-rw-r--r--test/CodeGen/SystemZ/int-cmp-06.ll30
-rw-r--r--test/CodeGen/SystemZ/int-cmp-44.ll92
-rw-r--r--test/CodeGen/SystemZ/int-cmp-45.ll2
-rw-r--r--test/CodeGen/SystemZ/int-cmp-47.ll109
-rw-r--r--test/CodeGen/SystemZ/int-neg-02.ll133
-rw-r--r--test/CodeGen/SystemZ/lit.local.cfg3
-rw-r--r--test/CodeGen/SystemZ/mature-mc-support.ll15
-rw-r--r--test/CodeGen/SystemZ/risbg-01.ll26
-rw-r--r--test/CodeGen/SystemZ/rnsbg-01.ll11
-rw-r--r--test/CodeGen/SystemZ/rosbg-01.ll11
-rw-r--r--test/CodeGen/SystemZ/rxsbg-01.ll11
-rw-r--r--test/CodeGen/SystemZ/selectcc-01.ll178
-rw-r--r--test/CodeGen/SystemZ/selectcc-02.ll178
-rw-r--r--test/CodeGen/SystemZ/selectcc-03.ll187
-rw-r--r--test/CodeGen/SystemZ/serialize-01.ll21
-rw-r--r--test/CodeGen/SystemZ/shift-04.ll101
-rw-r--r--test/CodeGen/SystemZ/shift-10.ll19
-rw-r--r--test/CodeGen/SystemZ/spill-01.ll2
-rw-r--r--test/CodeGen/Thumb/2009-06-18-ThumbCommuteMul.ll4
-rw-r--r--test/CodeGen/Thumb/2010-06-18-SibCallCrash.ll2
-rw-r--r--test/CodeGen/Thumb/2010-07-15-debugOrdering.ll2
-rw-r--r--test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll18
-rw-r--r--test/CodeGen/Thumb/DbgValueOtherTargets.test2
-rw-r--r--test/CodeGen/Thumb/barrier.ll6
-rw-r--r--test/CodeGen/Thumb/cortex-m0-unaligned-access.ll13
-rw-r--r--test/CodeGen/Thumb/fastcc.ll36
-rw-r--r--test/CodeGen/Thumb/fpconv.ll2
-rw-r--r--test/CodeGen/Thumb/fpow.ll2
-rw-r--r--test/CodeGen/Thumb/inlineasm-imm-thumb.ll2
-rw-r--r--test/CodeGen/Thumb/inlineasm-thumb.ll3
-rw-r--r--test/CodeGen/Thumb/ispositive.ll2
-rw-r--r--test/CodeGen/Thumb/ldr_ext.ll4
-rw-r--r--test/CodeGen/Thumb/ldr_frame.ll2
-rw-r--r--test/CodeGen/Thumb/lit.local.cfg3
-rw-r--r--test/CodeGen/Thumb/long-setcc.ll9
-rw-r--r--test/CodeGen/Thumb/long.ll20
-rw-r--r--test/CodeGen/Thumb/long_shift.ll2
-rw-r--r--test/CodeGen/Thumb/mature-mc-support.ll12
-rw-r--r--test/CodeGen/Thumb/mul.ll14
-rw-r--r--test/CodeGen/Thumb/rev.ll2
-rw-r--r--test/CodeGen/Thumb/segmented-stacks-dynamic.ll65
-rw-r--r--test/CodeGen/Thumb/segmented-stacks.ll261
-rw-r--r--test/CodeGen/Thumb/sjljehprepare-lower-vector.ll23
-rw-r--r--test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll2
-rw-r--r--test/CodeGen/Thumb/stack-frame.ll5
-rw-r--r--test/CodeGen/Thumb/thumb-imm.ll6
-rw-r--r--test/CodeGen/Thumb/thumb-ldm.ll43
-rw-r--r--test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll38
-rw-r--r--test/CodeGen/Thumb/trap.ll2
-rw-r--r--test/CodeGen/Thumb/triple.ll7
-rw-r--r--test/CodeGen/Thumb/tst_teq.ll5
-rw-r--r--test/CodeGen/Thumb/unord.ll11
-rw-r--r--test/CodeGen/Thumb/vargs.ll11
-rw-r--r--test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll2
-rw-r--r--test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll23
-rw-r--r--test/CodeGen/Thumb2/bfi.ll2
-rw-r--r--test/CodeGen/Thumb2/bfx.ll2
-rw-r--r--test/CodeGen/Thumb2/buildvector-crash.ll2
-rw-r--r--test/CodeGen/Thumb2/carry.ll2
-rw-r--r--test/CodeGen/Thumb2/cortex-fp.ll10
-rw-r--r--test/CodeGen/Thumb2/cross-rc-coalescing-2.ll2
-rw-r--r--test/CodeGen/Thumb2/div.ll8
-rw-r--r--test/CodeGen/Thumb2/ifcvt-neon.ll2
-rw-r--r--test/CodeGen/Thumb2/large-stack.ll6
-rw-r--r--test/CodeGen/Thumb2/ldr-str-imm12.ll2
-rw-r--r--test/CodeGen/Thumb2/lit.local.cfg3
-rw-r--r--test/CodeGen/Thumb2/longMACt.ll2
-rw-r--r--test/CodeGen/Thumb2/mul_const.ll2
-rw-r--r--test/CodeGen/Thumb2/segmented-stacks.ll34
-rw-r--r--test/CodeGen/Thumb2/tail-call-r9.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-adc.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-add.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-add2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-add3.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-add4.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-add5.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-add6.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-and.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-and2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-asr.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-asr2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-bcc.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-bfc.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-bic.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-branch.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-cbnz.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-clz.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmn.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmn2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmp.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmp2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-eor.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-eor2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt2.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt3.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-jtb.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldm.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr_ext.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr_post.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr_pre.ll12
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldrb.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldrh.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsl.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsl2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsr.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsr2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsr3.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mla.ll5
-rw-r--r--test/CodeGen/Thumb2/thumb2-mls.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mov.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mul.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mulhi.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mvn.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mvn2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-neg.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-orn.ll3
-rw-r--r--test/CodeGen/Thumb2/thumb2-orn2.ll3
-rw-r--r--test/CodeGen/Thumb2/thumb2-orr.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-orr2.ll3
-rw-r--r--test/CodeGen/Thumb2/thumb2-pack.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-rev.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-rev16.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ror.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-rsb.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-rsb2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sbc.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-select.ll3
-rw-r--r--test/CodeGen/Thumb2/thumb2-select_xform.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-shifter.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-smla.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-smul.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-spill-q.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-str.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-str_post.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-str_pre.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-strb.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-strh.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub3.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub4.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub5.ll3
-rw-r--r--test/CodeGen/Thumb2/thumb2-sxt-uxt.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sxt_rot.ll3
-rw-r--r--test/CodeGen/Thumb2/thumb2-teq.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-teq2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-tst.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-tst2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-uxt_rot.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-uxtb.ll4
-rw-r--r--test/CodeGen/Thumb2/tls1.ll2
-rw-r--r--test/CodeGen/Thumb2/tls2.ll4
-rw-r--r--test/CodeGen/Thumb2/tpsoft.ll54
-rw-r--r--test/CodeGen/Thumb2/v8_IT_3.ll8
-rw-r--r--test/CodeGen/Thumb2/v8_IT_5.ll7
-rw-r--r--test/CodeGen/Thumb2/v8_IT_6.ll100
-rw-r--r--test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll2
-rw-r--r--test/CodeGen/X86/2006-07-20-InlineAsm.ll2
-rw-r--r--test/CodeGen/X86/2006-07-31-SingleRegClass.ll2
-rw-r--r--test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll10
-rw-r--r--test/CodeGen/X86/2007-03-24-InlineAsmPModifier.ll2
-rw-r--r--test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll2
-rw-r--r--test/CodeGen/X86/2007-05-05-Personality.ll17
-rw-r--r--test/CodeGen/X86/2007-09-17-ObjcFrameEH.ll67
-rw-r--r--test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll30
-rw-r--r--test/CodeGen/X86/2007-10-17-IllegalAsm.ll87
-rw-r--r--test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll2
-rw-r--r--test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll2
-rw-r--r--test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll2
-rw-r--r--test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll3
-rw-r--r--test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll2
-rw-r--r--test/CodeGen/X86/2008-02-26-AsmDirectMemOp.ll2
-rw-r--r--test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll8
-rw-r--r--test/CodeGen/X86/2008-03-14-SpillerCrash.ll2
-rw-r--r--test/CodeGen/X86/2008-04-02-unnamedEH.ll6
-rw-r--r--test/CodeGen/X86/2008-04-08-CoalescerCrash.ll2
-rw-r--r--test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll2
-rw-r--r--test/CodeGen/X86/2008-08-31-EH_RETURN64.ll2
-rw-r--r--test/CodeGen/X86/2008-09-18-inline-asm-2.ll6
-rw-r--r--test/CodeGen/X86/2008-10-17-Asm64bitRConstraint.ll4
-rw-r--r--test/CodeGen/X86/2008-10-20-AsmDoubleInI32.ll4
-rw-r--r--test/CodeGen/X86/2008-12-12-PrivateEHSymbol.ll10
-rw-r--r--test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll2
-rw-r--r--test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll2
-rw-r--r--test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll2
-rw-r--r--test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll4
-rw-r--r--test/CodeGen/X86/2009-08-23-linkerprivate.ll8
-rw-r--r--test/CodeGen/X86/2009-09-19-earlyclobber.ll2
-rw-r--r--test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll2
-rw-r--r--test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll2
-rw-r--r--test/CodeGen/X86/2010-01-08-Atomic64Bug.ll4
-rw-r--r--test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll2
-rw-r--r--test/CodeGen/X86/2010-05-26-DotDebugLoc.ll4
-rw-r--r--test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll2
-rw-r--r--test/CodeGen/X86/2010-06-25-asm-RA-crash.ll2
-rw-r--r--test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll2
-rw-r--r--test/CodeGen/X86/2010-06-28-matched-g-constraint.ll2
-rw-r--r--test/CodeGen/X86/2010-07-02-asm-alignstack.ll2
-rw-r--r--test/CodeGen/X86/2010-07-06-asm-RIP.ll2
-rw-r--r--test/CodeGen/X86/2010-07-13-indirectXconstraint.ll2
-rw-r--r--test/CodeGen/X86/2010-08-04-StackVariable.ll2
-rw-r--r--test/CodeGen/X86/2010-09-16-EmptyFilename.ll2
-rw-r--r--test/CodeGen/X86/2010-10-08-cmpxchg8b.ll3
-rw-r--r--test/CodeGen/X86/2010-12-02-MC-Set.ll27
-rw-r--r--test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll20
-rw-r--r--test/CodeGen/X86/2011-05-09-loaduse.ll2
-rw-r--r--test/CodeGen/X86/2011-10-11-SpillDead.ll2
-rw-r--r--test/CodeGen/X86/2011-10-19-widen_vselect.ll18
-rw-r--r--test/CodeGen/X86/2011-12-28-vselecti8.ll19
-rw-r--r--test/CodeGen/X86/2012-08-17-legalizer-crash.ll2
-rw-r--r--test/CodeGen/X86/2012-11-30-handlemove-dbg.ll6
-rw-r--r--test/CodeGen/X86/2012-11-30-misched-dbg.ll23
-rw-r--r--test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll8
-rw-r--r--test/CodeGen/X86/2014-05-29-factorial.ll24
-rw-r--r--test/CodeGen/X86/2014-05-30-CombineAddNSW.ll20
-rw-r--r--test/CodeGen/X86/3addr-16bit.ll6
-rw-r--r--test/CodeGen/X86/Atomics-64.ll62
-rw-r--r--test/CodeGen/X86/GC/lit.local.cfg3
-rw-r--r--test/CodeGen/X86/GC/ocaml-gc.ll6
-rw-r--r--test/CodeGen/X86/MachineBranchProb.ll34
-rw-r--r--test/CodeGen/X86/MachineSink-DbgValue.ll4
-rw-r--r--test/CodeGen/X86/MergeConsecutiveStores.ll1
-rw-r--r--test/CodeGen/X86/add-of-carry.ll2
-rw-r--r--test/CodeGen/X86/address-type-promotion-constantexpr.ll16
-rw-r--r--test/CodeGen/X86/alias-error.ll5
-rw-r--r--test/CodeGen/X86/aliases.ll31
-rw-r--r--test/CodeGen/X86/anyregcc-crash.ll4
-rw-r--r--test/CodeGen/X86/anyregcc.ll184
-rw-r--r--test/CodeGen/X86/asm-block-labels.ll2
-rw-r--r--test/CodeGen/X86/asm-global-imm.ll2
-rw-r--r--test/CodeGen/X86/atom-bypass-slow-division-64.ll4
-rw-r--r--test/CodeGen/X86/atom-cmpb.ll36
-rw-r--r--test/CodeGen/X86/atom-fixup-lea4.ll23
-rw-r--r--test/CodeGen/X86/atomic-load-store-wide.ll2
-rw-r--r--test/CodeGen/X86/atomic-minmax-i6432.ll65
-rw-r--r--test/CodeGen/X86/atomic-ops-ancient-64.ll44
-rw-r--r--test/CodeGen/X86/atomic128.ll316
-rw-r--r--test/CodeGen/X86/atomic16.ll79
-rw-r--r--test/CodeGen/X86/atomic32.ll82
-rw-r--r--test/CodeGen/X86/atomic64.ll43
-rw-r--r--test/CodeGen/X86/atomic6432.ll94
-rw-r--r--test/CodeGen/X86/atomic8.ll81
-rw-r--r--test/CodeGen/X86/atomic_op.ll21
-rw-r--r--test/CodeGen/X86/avoid_complex_am.ll40
-rw-r--r--test/CodeGen/X86/avx-blend.ll120
-rw-r--r--test/CodeGen/X86/avx-cvt-2.ll43
-rw-r--r--test/CodeGen/X86/avx-intel-ocl.ll62
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86.ll24
-rw-r--r--test/CodeGen/X86/avx-shift.ll4
-rw-r--r--test/CodeGen/X86/avx-shuffle.ll43
-rw-r--r--test/CodeGen/X86/avx-splat.ll9
-rwxr-xr-xtest/CodeGen/X86/avx-trunc.ll8
-rw-r--r--test/CodeGen/X86/avx-vbroadcast.ll63
-rw-r--r--test/CodeGen/X86/avx-vperm2f128.ll2
-rw-r--r--test/CodeGen/X86/avx-vshufp.ll10
-rw-r--r--test/CodeGen/X86/avx-vzeroupper.ll37
-rw-r--r--test/CodeGen/X86/avx.ll136
-rw-r--r--test/CodeGen/X86/avx1-logical-load-folding.ll60
-rw-r--r--test/CodeGen/X86/avx2-blend.ll11
-rw-r--r--test/CodeGen/X86/avx2-gather.ll16
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-x86.ll8
-rw-r--r--test/CodeGen/X86/avx2-shift.ll33
-rw-r--r--test/CodeGen/X86/avx2-shuffle.ll18
-rw-r--r--test/CodeGen/X86/avx2-vbroadcast.ll228
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll65
-rw-r--r--test/CodeGen/X86/avx512-arith.ll139
-rw-r--r--test/CodeGen/X86/avx512-build-vector.ll12
-rw-r--r--test/CodeGen/X86/avx512-cmp.ll67
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll74
-rw-r--r--test/CodeGen/X86/avx512-gather-scatter-intrin.ll206
-rw-r--r--test/CodeGen/X86/avx512-inc-dec.ll13
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll113
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll495
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll49
-rw-r--r--test/CodeGen/X86/avx512-mov.ll60
-rw-r--r--test/CodeGen/X86/avx512-nontemporal.ll19
-rw-r--r--test/CodeGen/X86/avx512-select.ll19
-rw-r--r--test/CodeGen/X86/avx512-shuffle.ll110
-rw-r--r--test/CodeGen/X86/avx512-trunc-ext.ll31
-rw-r--r--test/CodeGen/X86/avx512-vbroadcast.ll19
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll87
-rw-r--r--test/CodeGen/X86/avx512-vselect-crash.ll11
-rw-r--r--test/CodeGen/X86/avx512-zext-load-crash.ll14
-rw-r--r--test/CodeGen/X86/barrier-sse.ll11
-rw-r--r--test/CodeGen/X86/blend-msb.ll31
-rw-r--r--test/CodeGen/X86/block-placement.ll2
-rw-r--r--test/CodeGen/X86/bmi.ll17
-rw-r--r--test/CodeGen/X86/br-fold.ll18
-rw-r--r--test/CodeGen/X86/bswap-vector.ll166
-rw-r--r--test/CodeGen/X86/bt.ll2
-rw-r--r--test/CodeGen/X86/cache-intrinsic.ll26
-rw-r--r--test/CodeGen/X86/call-imm.ll2
-rw-r--r--test/CodeGen/X86/cas.ll2
-rw-r--r--test/CodeGen/X86/catch.ll21
-rw-r--r--test/CodeGen/X86/cfi.ll27
-rw-r--r--test/CodeGen/X86/cfstring.ll6
-rw-r--r--test/CodeGen/X86/cmov.ll4
-rw-r--r--test/CodeGen/X86/cmp.ll58
-rw-r--r--test/CodeGen/X86/cmpxchg-i1.ll87
-rw-r--r--test/CodeGen/X86/cmpxchg-i128-i1.ll83
-rw-r--r--test/CodeGen/X86/cmpxchg16b.ll2
-rw-r--r--test/CodeGen/X86/coalescer-remat.ll3
-rw-r--r--test/CodeGen/X86/codegen-prepare-addrmode-sext.ll323
-rw-r--r--test/CodeGen/X86/codegen-prepare-cast.ll4
-rw-r--r--test/CodeGen/X86/codegen-prepare-crash.ll14
-rw-r--r--test/CodeGen/X86/codegen-prepare-extload.ll2
-rw-r--r--test/CodeGen/X86/codegen-prepare.ll1
-rw-r--r--test/CodeGen/X86/coff-comdat.ll92
-rw-r--r--test/CodeGen/X86/coff-comdat2.ll9
-rw-r--r--test/CodeGen/X86/coff-comdat3.ll8
-rw-r--r--test/CodeGen/X86/combine-64bit-vec-binop.ll273
-rw-r--r--test/CodeGen/X86/combine-avx-intrinsics.ll119
-rw-r--r--test/CodeGen/X86/combine-avx2-intrinsics.ll164
-rw-r--r--test/CodeGen/X86/combine-or.ll281
-rw-r--r--test/CodeGen/X86/combine-sse2-intrinsics.ll53
-rw-r--r--test/CodeGen/X86/combine-sse41-intrinsics.ll182
-rw-r--r--test/CodeGen/X86/combine-vec-shuffle-2.ll253
-rw-r--r--test/CodeGen/X86/combine-vec-shuffle-3.ll380
-rw-r--r--test/CodeGen/X86/combine-vec-shuffle-4.ll237
-rw-r--r--test/CodeGen/X86/combine-vec-shuffle-5.ll257
-rw-r--r--test/CodeGen/X86/combine-vec-shuffle.ll253
-rw-r--r--test/CodeGen/X86/computeKnownBits_urem.ll14
-rw-r--r--test/CodeGen/X86/const-base-addr.ll24
-rw-r--r--test/CodeGen/X86/constant-hoisting-shift-immediate.ll25
-rw-r--r--test/CodeGen/X86/constant-pool-remat-0.ll2
-rw-r--r--test/CodeGen/X86/constant-pool-sharing.ll11
-rw-r--r--test/CodeGen/X86/crash.ll6
-rw-r--r--test/CodeGen/X86/cse-add-with-overflow.ll43
-rw-r--r--test/CodeGen/X86/ctpop-combine.ll2
-rw-r--r--test/CodeGen/X86/cvt16.ll89
-rw-r--r--test/CodeGen/X86/dagcombine-and-setcc.ll47
-rw-r--r--test/CodeGen/X86/darwin-no-dead-strip.ll12
-rw-r--r--test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll109
-rw-r--r--test/CodeGen/X86/dbg-changes-codegen.ll83
-rw-r--r--test/CodeGen/X86/divide-by-constant.ll8
-rw-r--r--test/CodeGen/X86/dll-linkage.ll14
-rw-r--r--test/CodeGen/X86/dllexport-x86_64.ll108
-rw-r--r--test/CodeGen/X86/dllexport.ll130
-rw-r--r--test/CodeGen/X86/dllimport-x86_64.ll48
-rw-r--r--test/CodeGen/X86/dllimport.ll59
-rw-r--r--test/CodeGen/X86/dwarf-comp-dir.ll6
-rw-r--r--test/CodeGen/X86/dynamic-alloca-in-entry.ll19
-rw-r--r--test/CodeGen/X86/elf-comdat.ll11
-rw-r--r--test/CodeGen/X86/elf-comdat2.ll12
-rw-r--r--test/CodeGen/X86/exedepsfix-broadcast.ll128
-rw-r--r--test/CodeGen/X86/expand-opaque-const.ll21
-rw-r--r--test/CodeGen/X86/extract-store.ll22
-rw-r--r--test/CodeGen/X86/f16c-intrinsics.ll14
-rw-r--r--test/CodeGen/X86/fast-isel-args-fail.ll2
-rw-r--r--test/CodeGen/X86/fast-isel-args-fail2.ll10
-rw-r--r--test/CodeGen/X86/fast-isel-args.ll24
-rw-r--r--test/CodeGen/X86/fast-isel-branch_weights.ll19
-rw-r--r--test/CodeGen/X86/fast-isel-cmp-branch2.ll294
-rw-r--r--test/CodeGen/X86/fast-isel-cmp-branch3.ll470
-rw-r--r--test/CodeGen/X86/fast-isel-cmp.ll689
-rw-r--r--test/CodeGen/X86/fast-isel-fold-mem.ll12
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov.ll62
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov2.ll255
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmp.ll50
-rw-r--r--test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll138
-rw-r--r--test/CodeGen/X86/fast-isel-select-sse.ll391
-rw-r--r--test/CodeGen/X86/fast-isel-select.ll16
-rw-r--r--test/CodeGen/X86/fast-isel-sse12-fptoint.ll54
-rw-r--r--test/CodeGen/X86/fast-isel-x86.ll6
-rw-r--r--test/CodeGen/X86/fast-isel.ll4
-rw-r--r--test/CodeGen/X86/fastcall-correct-mangling.ll25
-rw-r--r--test/CodeGen/X86/float-asmprint.ll5
-rw-r--r--test/CodeGen/X86/fma-do-not-commute.ll30
-rw-r--r--test/CodeGen/X86/fma.ll33
-rwxr-xr-xtest/CodeGen/X86/fma3-intrinsics.ll4
-rw-r--r--test/CodeGen/X86/fold-call-oper.ll48
-rw-r--r--test/CodeGen/X86/fold-load-vec.ll2
-rw-r--r--test/CodeGen/X86/fold-vector-sext-crash.ll12
-rw-r--r--test/CodeGen/X86/fold-vector-sext-zext.ll291
-rw-r--r--test/CodeGen/X86/fold-xmm-zero.ll2
-rw-r--r--test/CodeGen/X86/frameaddr.ll44
-rw-r--r--test/CodeGen/X86/gcc_except_table.ll39
-rw-r--r--test/CodeGen/X86/global-sections.ll106
-rw-r--r--test/CodeGen/X86/haddsub-2.ll802
-rw-r--r--test/CodeGen/X86/haddsub-undef.ll325
-rw-r--r--test/CodeGen/X86/half.ll69
-rw-r--r--test/CodeGen/X86/hidden-vis-pic.ll7
-rw-r--r--test/CodeGen/X86/i64-mem-copy.ll2
-rw-r--r--test/CodeGen/X86/i8-umulo.ll24
-rw-r--r--test/CodeGen/X86/inalloca-ctor.ll34
-rw-r--r--test/CodeGen/X86/inalloca-invoke.ll54
-rw-r--r--test/CodeGen/X86/inalloca-stdcall.ll26
-rw-r--r--test/CodeGen/X86/inalloca.ll65
-rw-r--r--test/CodeGen/X86/indirect-hidden.ll43
-rw-r--r--test/CodeGen/X86/inline-asm-flag-clobber.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-fpstack.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-h.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-modifier-n.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-modifier-q.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-mrv.ll8
-rw-r--r--test/CodeGen/X86/inline-asm-q-regs.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll17
-rw-r--r--test/CodeGen/X86/inline-asm-stack-realign.ll16
-rw-r--r--test/CodeGen/X86/inline-asm-stack-realign2.ll16
-rw-r--r--test/CodeGen/X86/inline-asm-stack-realign3.ll29
-rw-r--r--test/CodeGen/X86/inline-asm-tied.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-x-scalar.ll2
-rw-r--r--test/CodeGen/X86/inline-asm.ll17
-rw-r--r--test/CodeGen/X86/ins_split_regalloc.ll33
-rw-r--r--test/CodeGen/X86/isel-sink.ll1
-rw-r--r--test/CodeGen/X86/jump_table_alias.ll33
-rw-r--r--test/CodeGen/X86/jump_table_bitcast.ll46
-rw-r--r--test/CodeGen/X86/jump_tables.ll272
-rw-r--r--test/CodeGen/X86/large-constants.ll67
-rw-r--r--test/CodeGen/X86/libcall-sret.ll28
-rw-r--r--test/CodeGen/X86/lit.local.cfg5
-rw-r--r--test/CodeGen/X86/live-out-reg-info.ll2
-rw-r--r--test/CodeGen/X86/load-slice.ll2
-rw-r--r--test/CodeGen/X86/lower-bitcast.ll188
-rw-r--r--test/CodeGen/X86/lower-vec-shift.ll125
-rw-r--r--test/CodeGen/X86/lsr-interesting-step.ll14
-rw-r--r--test/CodeGen/X86/lsr-normalization.ll9
-rw-r--r--test/CodeGen/X86/lzcnt-tzcnt.ll447
-rw-r--r--test/CodeGen/X86/machine-cp.ll26
-rw-r--r--test/CodeGen/X86/macho-comdat.ll6
-rw-r--r--test/CodeGen/X86/masked-iv-safe.ll6
-rw-r--r--test/CodeGen/X86/mature-mc-support.ll18
-rw-r--r--test/CodeGen/X86/memcmp.ll22
-rw-r--r--test/CodeGen/X86/memset-2.ll4
-rw-r--r--test/CodeGen/X86/merge_store.ll1
-rw-r--r--test/CodeGen/X86/misched-aa-colored.ll189
-rw-r--r--test/CodeGen/X86/misched-aa-mmos.ll37
-rw-r--r--test/CodeGen/X86/misched-matmul.ll2
-rw-r--r--test/CodeGen/X86/mod128.ll26
-rw-r--r--test/CodeGen/X86/movbe.ll45
-rw-r--r--test/CodeGen/X86/ms-inline-asm.ll35
-rw-r--r--test/CodeGen/X86/mul128_sext_loop.ll32
-rw-r--r--test/CodeGen/X86/mult-alt-generic-i686.ll2
-rw-r--r--test/CodeGen/X86/mult-alt-generic-x86_64.ll2
-rw-r--r--test/CodeGen/X86/mult-alt-x86.ll2
-rw-r--r--test/CodeGen/X86/multiple-loop-post-inc.ll2
-rw-r--r--test/CodeGen/X86/musttail-indirect.ll124
-rw-r--r--test/CodeGen/X86/musttail-thiscall.ll31
-rw-r--r--test/CodeGen/X86/musttail.ll90
-rw-r--r--test/CodeGen/X86/named-reg-alloc.ll14
-rw-r--r--test/CodeGen/X86/named-reg-notareg.ll13
-rw-r--r--test/CodeGen/X86/negate-add-zero.ll17
-rw-r--r--test/CodeGen/X86/no-cfi.ll34
-rw-r--r--test/CodeGen/X86/no-elf-compact-unwind.ll48
-rw-r--r--test/CodeGen/X86/nocx16.ll2
-rw-r--r--test/CodeGen/X86/null-streamer.ll18
-rw-r--r--test/CodeGen/X86/opaque-constant-asm.ll13
-rw-r--r--test/CodeGen/X86/osx-private-labels.ll71
-rw-r--r--test/CodeGen/X86/patchpoint.ll76
-rw-r--r--test/CodeGen/X86/peep-test-4.ll76
-rw-r--r--test/CodeGen/X86/peephole-multiple-folds.ll29
-rw-r--r--test/CodeGen/X86/personality.ll15
-rw-r--r--test/CodeGen/X86/personality_size.ll4
-rw-r--r--test/CodeGen/X86/pic.ll3
-rw-r--r--test/CodeGen/X86/pr10420.ll67
-rw-r--r--test/CodeGen/X86/pr14090.ll70
-rw-r--r--test/CodeGen/X86/pr1462.ll3
-rw-r--r--test/CodeGen/X86/pr16031.ll2
-rw-r--r--test/CodeGen/X86/pr19049.ll7
-rw-r--r--test/CodeGen/X86/pr20020.ll73
-rw-r--r--test/CodeGen/X86/pr20088.ll9
-rw-r--r--test/CodeGen/X86/pr5145.ll16
-rw-r--r--test/CodeGen/X86/preserve_allcc64.ll104
-rw-r--r--test/CodeGen/X86/preserve_mostcc64.ll86
-rw-r--r--test/CodeGen/X86/private-2.ll2
-rw-r--r--test/CodeGen/X86/pshufd-combine-crash.ll14
-rw-r--r--test/CodeGen/X86/ragreedy-bug.ll292
-rw-r--r--test/CodeGen/X86/ragreedy-hoist-spill.ll389
-rw-r--r--test/CodeGen/X86/ragreedy-last-chance-recoloring.ll181
-rw-r--r--test/CodeGen/X86/rdpmc.ll22
-rw-r--r--test/CodeGen/X86/rdtsc.ll53
-rw-r--r--test/CodeGen/X86/remat-invalid-liveness.ll85
-rw-r--r--test/CodeGen/X86/ret-mmx.ll1
-rw-r--r--test/CodeGen/X86/rot16.ll2
-rw-r--r--test/CodeGen/X86/rotate4.ll134
-rw-r--r--test/CodeGen/X86/saddo-redundant-add.ll34
-rw-r--r--test/CodeGen/X86/segmented-stacks-dynamic.ll12
-rw-r--r--test/CodeGen/X86/segmented-stacks.ll140
-rw-r--r--test/CodeGen/X86/select.ll8
-rw-r--r--test/CodeGen/X86/setjmp-spills.ll141
-rw-r--r--test/CodeGen/X86/shift-combine-crash.ll57
-rw-r--r--test/CodeGen/X86/shift-double.ll2
-rw-r--r--test/CodeGen/X86/shift-parts.ll8
-rw-r--r--test/CodeGen/X86/shift-pcmp.ll30
-rw-r--r--test/CodeGen/X86/shl_undef.ll6
-rw-r--r--test/CodeGen/X86/shrink-compare.ll8
-rw-r--r--test/CodeGen/X86/shuffle-combine-crash.ll30
-rw-r--r--test/CodeGen/X86/sibcall-5.ll9
-rw-r--r--test/CodeGen/X86/sibcall.ll4
-rw-r--r--test/CodeGen/X86/sqrt.ll26
-rw-r--r--test/CodeGen/X86/sse-scalar-fp-arith-2.ll423
-rw-r--r--test/CodeGen/X86/sse-scalar-fp-arith.ll310
-rw-r--r--test/CodeGen/X86/sse1.ll14
-rw-r--r--test/CodeGen/X86/sse2-blend.ll22
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86.ll34
-rw-r--r--test/CodeGen/X86/sse2-vector-shifts.ll180
-rw-r--r--test/CodeGen/X86/sse2.ll24
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub-2.ll318
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub.ll296
-rw-r--r--test/CodeGen/X86/sse3.ll2
-rw-r--r--test/CodeGen/X86/sse41-blend.ll70
-rw-r--r--test/CodeGen/X86/sse41.ll487
-rw-r--r--test/CodeGen/X86/ssp-data-layout.ll510
-rw-r--r--test/CodeGen/X86/stack-align-memcpy.ll27
-rw-r--r--test/CodeGen/X86/stack-protector-dbginfo.ll4
-rw-r--r--test/CodeGen/X86/stack-protector.ll597
-rw-r--r--test/CodeGen/X86/stackmap-fast-isel.ll165
-rw-r--r--test/CodeGen/X86/stackmap-liveness.ll176
-rw-r--r--test/CodeGen/X86/stackmap-nops.ll230
-rw-r--r--test/CodeGen/X86/stackmap.ll333
-rw-r--r--test/CodeGen/X86/stackpointer.ll28
-rw-r--r--test/CodeGen/X86/stdcall-notailcall.ll10
-rw-r--r--test/CodeGen/X86/stdcall.ll4
-rw-r--r--test/CodeGen/X86/sunkaddr-ext.ll26
-rw-r--r--test/CodeGen/X86/swizzle-2.ll515
-rw-r--r--test/CodeGen/X86/swizzle-avx2.ll91
-rw-r--r--test/CodeGen/X86/tbm-intrinsics-x86_64.ll2
-rw-r--r--test/CodeGen/X86/testb-je-fusion.ll20
-rw-r--r--test/CodeGen/X86/tls.ll87
-rw-r--r--test/CodeGen/X86/v2f32.ll2
-rw-r--r--test/CodeGen/X86/v4i32load-crash.ll5
-rw-r--r--test/CodeGen/X86/vbinop-simplify-bug.ll23
-rw-r--r--test/CodeGen/X86/vec_cast2.ll27
-rw-r--r--test/CodeGen/X86/vec_extract-sse4.ll30
-rw-r--r--test/CodeGen/X86/vec_fabs.ll30
-rw-r--r--test/CodeGen/X86/vec_fpext.ll6
-rw-r--r--test/CodeGen/X86/vec_return.ll2
-rw-r--r--test/CodeGen/X86/vec_round.ll2
-rw-r--r--test/CodeGen/X86/vec_setcc-2.ll96
-rw-r--r--test/CodeGen/X86/vec_setcc.ll18
-rw-r--r--test/CodeGen/X86/vec_shift5.ll160
-rw-r--r--test/CodeGen/X86/vec_shift6.ll134
-rw-r--r--test/CodeGen/X86/vec_shuf-insert.ll29
-rw-r--r--test/CodeGen/X86/vec_shuffle-40.ll22
-rw-r--r--test/CodeGen/X86/vec_shuffle-41.ll21
-rw-r--r--test/CodeGen/X86/vec_splat.ll16
-rw-r--r--test/CodeGen/X86/vec_split.ll33
-rw-r--r--test/CodeGen/X86/vector-gep.ll33
-rw-r--r--test/CodeGen/X86/vector-idiv.ll218
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v16.ll196
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v2.ll219
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v4.ll170
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v8.ll493
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining.ll119
-rw-r--r--test/CodeGen/X86/viabs.ll87
-rw-r--r--test/CodeGen/X86/vselect-2.ll33
-rw-r--r--test/CodeGen/X86/vselect.ll278
-rw-r--r--test/CodeGen/X86/vshift-6.ll36
-rw-r--r--test/CodeGen/X86/warn-stack.ll2
-rw-r--r--test/CodeGen/X86/weak_def_can_be_hidden.ll24
-rw-r--r--test/CodeGen/X86/widen_cast-4.ll25
-rw-r--r--test/CodeGen/X86/widen_cast-6.ll6
-rw-r--r--test/CodeGen/X86/widen_conversions.ll18
-rw-r--r--test/CodeGen/X86/widen_load-2.ll2
-rw-r--r--test/CodeGen/X86/widen_shuffle-1.ll4
-rw-r--r--test/CodeGen/X86/win32_sret.ll181
-rw-r--r--test/CodeGen/X86/win64_alloca_dynalloca.ll6
-rw-r--r--test/CodeGen/X86/win64_eh.ll170
-rw-r--r--test/CodeGen/X86/win_chkstk.ll6
-rw-r--r--test/CodeGen/X86/win_cst_pool.ll66
-rw-r--r--test/CodeGen/X86/x86-64-double-precision-shift-left.ll77
-rw-r--r--test/CodeGen/X86/x86-64-double-precision-shift-right.ll74
-rw-r--r--test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll67
-rw-r--r--test/CodeGen/X86/x86-64-double-shifts-var.ll57
-rw-r--r--test/CodeGen/X86/x86-64-frameaddr.ll15
-rw-r--r--test/CodeGen/X86/x86-64-sret-return-2.ll18
-rw-r--r--test/CodeGen/X86/x86-64-static-relo-movl.ll24
-rw-r--r--test/CodeGen/X86/x86-frameaddr.ll9
-rw-r--r--test/CodeGen/X86/x86-frameaddr2.ll9
-rw-r--r--test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll56
-rw-r--r--test/CodeGen/X86/x86-shifts.ll2
-rw-r--r--test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll41
-rw-r--r--test/CodeGen/X86/xaluo.ll743
-rw-r--r--test/CodeGen/X86/zlib-longest-match.ll240
-rw-r--r--test/CodeGen/XCore/align.ll15
-rw-r--r--test/CodeGen/XCore/atomic.ll76
-rw-r--r--test/CodeGen/XCore/bigstructret.ll39
-rw-r--r--test/CodeGen/XCore/byVal.ll2
-rw-r--r--test/CodeGen/XCore/call.ll10
-rw-r--r--test/CodeGen/XCore/codemodel.ll213
-rw-r--r--test/CodeGen/XCore/dwarf_debug.ll39
-rw-r--r--test/CodeGen/XCore/epilogue_prologue.ll249
-rw-r--r--test/CodeGen/XCore/exception.ll3
-rw-r--r--test/CodeGen/XCore/globals.ll53
-rw-r--r--test/CodeGen/XCore/inline-asm.ll21
-rw-r--r--test/CodeGen/XCore/linkage.ll12
-rw-r--r--test/CodeGen/XCore/lit.local.cfg3
-rw-r--r--test/CodeGen/XCore/llvm-intrinsics.ll361
-rw-r--r--test/CodeGen/XCore/load.ll2
-rw-r--r--test/CodeGen/XCore/memcpy.ll32
-rw-r--r--test/CodeGen/XCore/resources.ll16
-rw-r--r--test/CodeGen/XCore/resources_combine.ll93
-rw-r--r--test/CodeGen/XCore/scavenging.ll69
-rw-r--r--test/DebugInfo/2009-11-05-DeadGlobalVariable.ll6
-rw-r--r--test/DebugInfo/2009-11-06-NamelessGlobalVariable.ll2
-rw-r--r--test/DebugInfo/2009-11-10-CurrentFn.ll2
-rw-r--r--test/DebugInfo/2010-01-19-DbgScope.ll35
-rw-r--r--test/DebugInfo/2010-03-19-DbgDeclare.ll4
-rw-r--r--test/DebugInfo/2010-03-24-MemberFn.ll2
-rw-r--r--test/DebugInfo/2010-04-06-NestedFnDbgInfo.ll20
-rw-r--r--test/DebugInfo/2010-04-19-FramePtr.ll4
-rw-r--r--test/DebugInfo/2010-05-10-MultipleCU.ll19
-rw-r--r--test/DebugInfo/2010-06-29-InlinedFnLocalVar.ll9
-rw-r--r--test/DebugInfo/2010-07-19-Crash.ll2
-rw-r--r--test/DebugInfo/AArch64/cfi-frame.ll58
-rw-r--r--test/DebugInfo/AArch64/dwarfdump.ll10
-rw-r--r--test/DebugInfo/AArch64/eh_frame.s4
-rw-r--r--test/DebugInfo/AArch64/eh_frame_personality.ll4
-rw-r--r--test/DebugInfo/AArch64/lit.local.cfg3
-rw-r--r--test/DebugInfo/AArch64/struct_by_value.ll70
-rw-r--r--test/DebugInfo/AArch64/variable-loc.ll101
-rw-r--r--test/DebugInfo/ARM/PR16736.ll2
-rw-r--r--test/DebugInfo/ARM/lit.local.cfg3
-rw-r--r--test/DebugInfo/ARM/sectionorder.ll17
-rw-r--r--test/DebugInfo/ARM/tls.ll28
-rw-r--r--test/DebugInfo/COFF/asan-module-ctor.ll91
-rw-r--r--test/DebugInfo/COFF/asan-module-without-functions.ll53
-rw-r--r--test/DebugInfo/COFF/asm.ll184
-rw-r--r--test/DebugInfo/COFF/lit.local.cfg2
-rw-r--r--test/DebugInfo/COFF/multifile.ll257
-rw-r--r--test/DebugInfo/COFF/multifunction.ll378
-rw-r--r--test/DebugInfo/COFF/simple.ll167
-rw-r--r--test/DebugInfo/COFF/tail-call-without-lexical-scopes.ll78
-rw-r--r--test/DebugInfo/Inputs/arange-overlap.cc26
-rwxr-xr-xtest/DebugInfo/Inputs/arange-overlap.elf-x86_64bin0 -> 9824 bytes
-rw-r--r--test/DebugInfo/Inputs/arm-relocs.elf-armbin0 -> 3012 bytes
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-line-dwo.cc10
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-line-dwo.elf-x86-64bin0 -> 1377 bytes
-rw-r--r--test/DebugInfo/Inputs/fission-ranges.cc17
-rwxr-xr-xtest/DebugInfo/Inputs/fission-ranges.elf-x86_64bin0 -> 8693 bytes
-rwxr-xr-xtest/DebugInfo/Inputs/llvm-symbolizer-dwo-testbin0 -> 9579 bytes
-rw-r--r--test/DebugInfo/Inputs/llvm-symbolizer-dwo-test.cc18
-rw-r--r--test/DebugInfo/Inputs/llvm-symbolizer-test.c18
-rwxr-xr-xtest/DebugInfo/Inputs/llvm-symbolizer-test.elf-x86-64bin0 -> 10693 bytes
-rw-r--r--test/DebugInfo/Inputs/shared-object-stripped.elf-i386bin0 -> 1280 bytes
-rw-r--r--test/DebugInfo/Mips/delay-slot.ll75
-rw-r--r--test/DebugInfo/Mips/lit.local.cfg2
-rw-r--r--test/DebugInfo/PR20038.ll168
-rw-r--r--test/DebugInfo/PowerPC/lit.local.cfg3
-rw-r--r--test/DebugInfo/PowerPC/tls-fission.ll2
-rw-r--r--test/DebugInfo/PowerPC/tls.ll2
-rw-r--r--test/DebugInfo/Sparc/gnu-window-save.ll71
-rw-r--r--test/DebugInfo/Sparc/lit.local.cfg2
-rw-r--r--test/DebugInfo/SystemZ/eh_frame.s53
-rw-r--r--test/DebugInfo/SystemZ/eh_frame_personality.s4
-rw-r--r--test/DebugInfo/SystemZ/lit.local.cfg3
-rw-r--r--test/DebugInfo/SystemZ/variable-loc.ll29
-rw-r--r--test/DebugInfo/X86/2010-08-10-DbgConstant.ll3
-rw-r--r--test/DebugInfo/X86/2011-09-26-GlobalVarContext.ll18
-rw-r--r--test/DebugInfo/X86/2011-12-16-BadStructRef.ll22
-rw-r--r--test/DebugInfo/X86/DW_AT_byte_size.ll6
-rw-r--r--test/DebugInfo/X86/DW_AT_linkage_name.ll116
-rw-r--r--test/DebugInfo/X86/DW_AT_location-reference.ll48
-rw-r--r--test/DebugInfo/X86/DW_AT_object_pointer.ll5
-rw-r--r--test/DebugInfo/X86/DW_AT_specification.ll23
-rw-r--r--test/DebugInfo/X86/DW_AT_stmt_list_sec_offset.ll11
-rw-r--r--test/DebugInfo/X86/DW_TAG_friend.ll4
-rw-r--r--test/DebugInfo/X86/aligned_stack_var.ll2
-rw-r--r--test/DebugInfo/X86/arange.ll46
-rw-r--r--test/DebugInfo/X86/arguments.ll11
-rw-r--r--test/DebugInfo/X86/array.ll101
-rw-r--r--test/DebugInfo/X86/array2.ll107
-rw-r--r--test/DebugInfo/X86/block-capture.ll38
-rw-r--r--test/DebugInfo/X86/byvalstruct.ll5
-rw-r--r--test/DebugInfo/X86/c-type-units.ll29
-rw-r--r--test/DebugInfo/X86/coff_debug_info_type.ll41
-rw-r--r--test/DebugInfo/X86/coff_relative_names.ll6
-rw-r--r--test/DebugInfo/X86/concrete_out_of_line.ll59
-rw-r--r--test/DebugInfo/X86/cu-ranges-odr.ll96
-rw-r--r--test/DebugInfo/X86/cu-ranges.ll73
-rw-r--r--test/DebugInfo/X86/data_member_location.ll11
-rw-r--r--test/DebugInfo/X86/dbg-asm.s22
-rw-r--r--test/DebugInfo/X86/dbg-at-specficiation.ll2
-rw-r--r--test/DebugInfo/X86/dbg-byval-parameter.ll2
-rw-r--r--test/DebugInfo/X86/dbg-const-int.ll10
-rw-r--r--test/DebugInfo/X86/dbg-const.ll2
-rw-r--r--test/DebugInfo/X86/dbg-declare-arg.ll2
-rw-r--r--test/DebugInfo/X86/dbg-declare.ll6
-rw-r--r--test/DebugInfo/X86/dbg-large-unsigned-const.ll62
-rw-r--r--test/DebugInfo/X86/dbg-merge-loc-entry.ll2
-rw-r--r--test/DebugInfo/X86/dbg-subrange.ll8
-rw-r--r--test/DebugInfo/X86/dbg-value-const-byref.ll106
-rw-r--r--test/DebugInfo/X86/dbg-value-inlined-parameter.ll50
-rw-r--r--test/DebugInfo/X86/dbg-value-isel.ll2
-rw-r--r--test/DebugInfo/X86/dbg-value-location.ll20
-rw-r--r--test/DebugInfo/X86/dbg-value-terminator.ll58
-rw-r--r--test/DebugInfo/X86/dbg_value_direct.ll5
-rw-r--r--test/DebugInfo/X86/debug-dead-local-var.ll51
-rw-r--r--test/DebugInfo/X86/debug-info-block-captured-self.ll26
-rw-r--r--test/DebugInfo/X86/debug-info-blocks.ll37
-rw-r--r--test/DebugInfo/X86/debug-info-static-member.ll42
-rw-r--r--test/DebugInfo/X86/debug-loc-asan.ll186
-rw-r--r--test/DebugInfo/X86/debug-loc-offset.ll153
-rw-r--r--test/DebugInfo/X86/debug-ranges-offset.ll241
-rw-r--r--test/DebugInfo/X86/decl-derived-member.ll144
-rw-r--r--test/DebugInfo/X86/discriminator.ll63
-rw-r--r--test/DebugInfo/X86/dwarf-aranges-no-dwarf-labels.ll4
-rw-r--r--test/DebugInfo/X86/dwarf-aranges.ll21
-rw-r--r--test/DebugInfo/X86/dwarf-public-names.ll22
-rw-r--r--test/DebugInfo/X86/dwarf-pubnames-split.ll4
-rw-r--r--test/DebugInfo/X86/eh_symbol.ll24
-rw-r--r--test/DebugInfo/X86/elf-names.ll12
-rw-r--r--test/DebugInfo/X86/empty-and-one-elem-array.ll2
-rw-r--r--test/DebugInfo/X86/empty-array.ll8
-rw-r--r--test/DebugInfo/X86/ending-run.ll12
-rw-r--r--test/DebugInfo/X86/enum-class.ll4
-rw-r--r--test/DebugInfo/X86/enum-fwd-decl.ll4
-rw-r--r--test/DebugInfo/X86/fission-cu.ll46
-rw-r--r--test/DebugInfo/X86/fission-hash.ll4
-rw-r--r--test/DebugInfo/X86/fission-ranges.ll59
-rw-r--r--test/DebugInfo/X86/formal_parameter.ll84
-rw-r--r--test/DebugInfo/X86/generate-odr-hash.ll270
-rw-r--r--test/DebugInfo/X86/gnu-public-names-empty.ll6
-rw-r--r--test/DebugInfo/X86/gnu-public-names.ll289
-rw-r--r--test/DebugInfo/X86/inline-member-function.ll95
-rw-r--r--test/DebugInfo/X86/inline-seldag-test.ll77
-rw-r--r--test/DebugInfo/X86/instcombine-instrinsics.ll157
-rw-r--r--test/DebugInfo/X86/lexical_block.ll59
-rw-r--r--test/DebugInfo/X86/line-info.ll4
-rw-r--r--test/DebugInfo/X86/linkage-name.ll12
-rw-r--r--test/DebugInfo/X86/lit.local.cfg3
-rw-r--r--test/DebugInfo/X86/low-pc-cu.ll40
-rw-r--r--test/DebugInfo/X86/misched-dbg-value.ll45
-rw-r--r--test/DebugInfo/X86/multiple-aranges.ll14
-rw-r--r--test/DebugInfo/X86/multiple-at-const-val.ll4
-rw-r--r--test/DebugInfo/X86/nondefault-subrange-array.ll10
-rw-r--r--test/DebugInfo/X86/objc-fwd-decl.ll4
-rw-r--r--test/DebugInfo/X86/objc-property-void.ll104
-rw-r--r--test/DebugInfo/X86/op_deref.ll23
-rw-r--r--test/DebugInfo/X86/parameters.ll8
-rw-r--r--test/DebugInfo/X86/pointer-type-size.ll4
-rw-r--r--test/DebugInfo/X86/pr11300.ll17
-rw-r--r--test/DebugInfo/X86/pr12831.ll12
-rw-r--r--test/DebugInfo/X86/pr13303.ll2
-rw-r--r--test/DebugInfo/X86/pr19307.ll147
-rw-r--r--test/DebugInfo/X86/pr9951.ll27
-rw-r--r--test/DebugInfo/X86/prologue-stack.ll2
-rw-r--r--test/DebugInfo/X86/ref_addr_relocation.ll2
-rw-r--r--test/DebugInfo/X86/reference-argument.ll2
-rw-r--r--test/DebugInfo/X86/rvalue-ref.ll2
-rw-r--r--test/DebugInfo/X86/sret.ll393
-rw-r--r--test/DebugInfo/X86/stmt-list-multiple-compile-units.ll48
-rw-r--r--test/DebugInfo/X86/stmt-list.ll2
-rw-r--r--test/DebugInfo/X86/stringpool.ll4
-rw-r--r--test/DebugInfo/X86/struct-loc.ll4
-rw-r--r--test/DebugInfo/X86/subrange-type.ll2
-rw-r--r--test/DebugInfo/X86/subreg.ll6
-rw-r--r--test/DebugInfo/X86/subregisters.ll117
-rw-r--r--test/DebugInfo/X86/template.ll4
-rw-r--r--test/DebugInfo/X86/tls-fission.ll32
-rw-r--r--test/DebugInfo/X86/tls.ll119
-rw-r--r--test/DebugInfo/X86/type_units_with_addresses.ll151
-rw-r--r--test/DebugInfo/X86/union-template.ll4
-rw-r--r--test/DebugInfo/X86/vector.ll4
-rw-r--r--test/DebugInfo/X86/vla.ll2
-rw-r--r--test/DebugInfo/arm-relocs.test5
-rw-r--r--test/DebugInfo/array.ll2
-rw-r--r--test/DebugInfo/constant-pointers.ll51
-rw-r--r--test/DebugInfo/cross-cu-inlining.ll130
-rw-r--r--test/DebugInfo/cross-cu-linkonce.ll73
-rw-r--r--test/DebugInfo/cu-line-tables.ll51
-rw-r--r--test/DebugInfo/cu-range-hole.ll74
-rw-r--r--test/DebugInfo/cu-ranges.ll71
-rw-r--r--test/DebugInfo/dead-argument-order.ll81
-rw-r--r--test/DebugInfo/debug-info-qualifiers.ll100
-rw-r--r--test/DebugInfo/dwarf-public-names.ll22
-rw-r--r--test/DebugInfo/dwarfdump-inlining.test28
-rw-r--r--test/DebugInfo/dwarfdump-line-dwo.test6
-rw-r--r--test/DebugInfo/dwarfdump-ranges.test10
-rw-r--r--test/DebugInfo/dwarfdump-test.test56
-rw-r--r--test/DebugInfo/dwarfdump-zlib.test12
-rw-r--r--test/DebugInfo/empty.ll31
-rw-r--r--test/DebugInfo/enum.ll4
-rw-r--r--test/DebugInfo/global.ll7
-rw-r--r--test/DebugInfo/incorrect-variable-debugloc.ll391
-rw-r--r--test/DebugInfo/inline-no-debug-info.ll69
-rw-r--r--test/DebugInfo/inline-scopes.ll130
-rw-r--r--test/DebugInfo/inlined-arguments.ll10
-rw-r--r--test/DebugInfo/inlined-vars.ll12
-rw-r--r--test/DebugInfo/llvm-symbolizer-zlib.test7
-rw-r--r--test/DebugInfo/llvm-symbolizer.test92
-rw-r--r--test/DebugInfo/lto-comp-dir.ll84
-rw-r--r--test/DebugInfo/member-order.ll4
-rw-r--r--test/DebugInfo/member-pointers.ll6
-rw-r--r--test/DebugInfo/missing-abstract-variable.ll191
-rw-r--r--test/DebugInfo/namespace.ll52
-rw-r--r--test/DebugInfo/namespace_function_definition.ll44
-rw-r--r--test/DebugInfo/namespace_inline_function_definition.ll92
-rw-r--r--test/DebugInfo/restrict.ll53
-rw-r--r--test/DebugInfo/sugared-constants.ll82
-rw-r--r--test/DebugInfo/template-recursive-void.ll4
-rw-r--r--test/DebugInfo/tu-composite.ll4
-rw-r--r--test/DebugInfo/tu-member-pointer.ll4
-rw-r--r--test/DebugInfo/two-cus-from-same-file.ll7
-rw-r--r--test/DebugInfo/typedef.ll32
-rw-r--r--test/DebugInfo/unconditional-branch.ll64
-rw-r--r--test/DebugInfo/varargs.ll99
-rw-r--r--test/DebugInfo/version.ll4
-rw-r--r--test/ExecutionEngine/MCJIT/eh-lg-pic.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/lit.local.cfg4
-rw-r--r--test/ExecutionEngine/MCJIT/load-object-a.ll24
-rw-r--r--test/ExecutionEngine/MCJIT/non-extern-addend-smallcodemodel.ll25
-rw-r--r--test/ExecutionEngine/MCJIT/non-extern-addend.ll21
-rw-r--r--test/ExecutionEngine/MCJIT/remote/Inputs/cross-module-b.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-b.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-c.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/cross-module-a.ll7
-rw-r--r--test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/lit.local.cfg7
-rw-r--r--test/ExecutionEngine/MCJIT/remote/multi-module-a.ll4
-rw-r--r--test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/simpletest-remote.ll6
-rw-r--r--test/ExecutionEngine/MCJIT/remote/stubs-remote.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll2
-rw-r--r--test/ExecutionEngine/MCJIT/remote/test-data-align-remote.ll4
-rw-r--r--test/ExecutionEngine/MCJIT/remote/test-fp-no-external-funcs-remote.ll6
-rw-r--r--test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-remote.ll4
-rw-r--r--test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-remote.ll2
-rw-r--r--test/ExecutionEngine/RuntimeDyld/ARM/MachO_ARM_PIC_relocations.s27
-rw-r--r--test/ExecutionEngine/RuntimeDyld/ARM/lit.local.cfg3
-rw-r--r--test/ExecutionEngine/RuntimeDyld/Inputs/arm_secdiff_reloc.obin616 -> 0 bytes
-rw-r--r--test/ExecutionEngine/RuntimeDyld/X86/MachO_x86-64_PIC_relocations.s32
-rw-r--r--test/ExecutionEngine/RuntimeDyld/X86/lit.local.cfg3
-rw-r--r--test/ExecutionEngine/RuntimeDyld/arm_secdiff_reloc.test1
-rw-r--r--test/ExecutionEngine/lit.local.cfg8
-rw-r--r--test/Feature/alias2.ll28
-rw-r--r--test/Feature/aliases.ll15
-rw-r--r--test/Feature/comdat.ll21
-rw-r--r--test/Feature/globalvars.ll2
-rw-r--r--test/Feature/instructions.ll2
-rw-r--r--test/Feature/intrinsic-noduplicate.ll9
-rw-r--r--test/Feature/intrinsics.ll5
-rw-r--r--test/Feature/linker_private_linkages.ll6
-rw-r--r--test/Feature/optnone-llc.ll54
-rw-r--r--test/Feature/optnone-opt.ll74
-rw-r--r--test/FileCheck/check-multiple-prefixes-nomatch-2.txt10
-rw-r--r--test/FileCheck/implicit-check-not.txt44
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/asm_attr.ll20
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/asm_mov.ll146
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/asm_mov.s64
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s24
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s59
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/bug_11395.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/X86/lit.local.cfg3
-rw-r--r--test/Instrumentation/AddressSanitizer/asan-vs-gvn.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/basic.ll68
-rw-r--r--test/Instrumentation/AddressSanitizer/coverage-dbg.ll67
-rw-r--r--test/Instrumentation/AddressSanitizer/coverage.ll65
-rw-r--r--test/Instrumentation/AddressSanitizer/debug_info.ll7
-rw-r--r--test/Instrumentation/AddressSanitizer/different_scale_and_offset.ll41
-rw-r--r--test/Instrumentation/AddressSanitizer/do-not-instrument-internal-globals.ll6
-rw-r--r--test/Instrumentation/AddressSanitizer/do-not-instrument-llvm-metadata.ll12
-rw-r--r--test/Instrumentation/AddressSanitizer/do-not-touch-comdat-global.ll14
-rw-r--r--test/Instrumentation/AddressSanitizer/do-not-touch-odr-global.ll11
-rw-r--r--test/Instrumentation/AddressSanitizer/do-not-touch-threadlocal.ll4
-rw-r--r--test/Instrumentation/AddressSanitizer/freebsd.ll29
-rw-r--r--test/Instrumentation/AddressSanitizer/global_metadata.ll71
-rw-r--r--test/Instrumentation/AddressSanitizer/instrument-no-return.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/instrument_global.ll5
-rw-r--r--test/Instrumentation/AddressSanitizer/instrument_initializer_metadata.ll10
-rw-r--r--test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll4
-rw-r--r--test/Instrumentation/AddressSanitizer/instrumentation-with-call-threshold.ll30
-rw-r--r--test/Instrumentation/AddressSanitizer/keep-instrumented_functions.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/lifetime-uar.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/lifetime.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/stack-poisoning.ll43
-rw-r--r--test/Instrumentation/AddressSanitizer/stack_layout.ll49
-rw-r--r--test/Instrumentation/AddressSanitizer/test64.ll18
-rw-r--r--test/Instrumentation/AddressSanitizer/ubsan.ll52
-rw-r--r--test/Instrumentation/BoundsChecking/phi.ll55
-rw-r--r--test/Instrumentation/BoundsChecking/simple.ll24
-rw-r--r--test/Instrumentation/DataFlowSanitizer/load.ll188
-rw-r--r--test/Instrumentation/DataFlowSanitizer/prefix-rename.ll8
-rw-r--r--test/Instrumentation/DataFlowSanitizer/store.ll179
-rw-r--r--test/Instrumentation/DataFlowSanitizer/union.ll52
-rw-r--r--test/Instrumentation/MemorySanitizer/atomics.ll16
-rw-r--r--test/Instrumentation/MemorySanitizer/check_access_address.ll28
-rw-r--r--test/Instrumentation/MemorySanitizer/do-not-emit-module-limits.ll21
-rw-r--r--test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll53
-rw-r--r--test/Instrumentation/MemorySanitizer/missing_origin.ll19
-rw-r--r--test/Instrumentation/MemorySanitizer/msan_basic.ll243
-rw-r--r--test/Instrumentation/MemorySanitizer/mul_by_constant.ll94
-rw-r--r--test/Instrumentation/MemorySanitizer/store-origin.ll73
-rw-r--r--test/Instrumentation/MemorySanitizer/vector_arith.ll65
-rw-r--r--test/Instrumentation/MemorySanitizer/vector_pack.ll60
-rw-r--r--test/Instrumentation/MemorySanitizer/vector_shift.ll100
-rw-r--r--test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll32
-rw-r--r--test/Instrumentation/ThreadSanitizer/atomic.ll50
-rw-r--r--test/Instrumentation/ThreadSanitizer/tsan_basic.ll6
-rw-r--r--test/Instrumentation/ThreadSanitizer/vptr_update.ll27
-rw-r--r--test/LTO/attrs.ll15
-rw-r--r--test/LTO/current-section.ll4
-rw-r--r--test/LTO/jump-table-type.ll23
-rw-r--r--test/LTO/keep-used-puts-during-instcombine.ll36
-rw-r--r--test/LTO/linkonce_odr_func.ll8
-rw-r--r--test/LTO/lit.local.cfg3
-rw-r--r--test/LTO/no-undefined-puts-when-implemented.ll40
-rw-r--r--test/LTO/private-symbol.ll7
-rw-r--r--test/LTO/symver-asm.ll16
-rw-r--r--test/LTO/triple-init.ll16
-rw-r--r--test/Linker/2011-08-22-ResolveAlias2.ll108
-rw-r--r--test/Linker/Inputs/alias.ll3
-rw-r--r--test/Linker/Inputs/comdat.ll20
-rw-r--r--test/Linker/Inputs/comdat2.ll2
-rw-r--r--test/Linker/Inputs/comdat3.ll2
-rw-r--r--test/Linker/Inputs/comdat4.ll5
-rw-r--r--test/Linker/Inputs/comdat5.ll15
-rw-r--r--test/Linker/Inputs/datalayout-a.ll1
-rw-r--r--test/Linker/Inputs/datalayout-b.ll1
-rw-r--r--test/Linker/Inputs/old_global_ctors.3.4.bcbin0 -> 368 bytes
-rw-r--r--test/Linker/Inputs/targettriple-a.ll1
-rw-r--r--test/Linker/Inputs/targettriple-b.ll1
-rw-r--r--test/Linker/Inputs/type-unique-simple2-a.ll2
-rw-r--r--test/Linker/Inputs/type-unique-simple2-b.ll2
-rw-r--r--test/Linker/alias.ll16
-rw-r--r--test/Linker/comdat.ll32
-rw-r--r--test/Linker/comdat2.ll7
-rw-r--r--test/Linker/comdat3.ll5
-rw-r--r--test/Linker/comdat4.ll5
-rw-r--r--test/Linker/comdat5.ll7
-rw-r--r--test/Linker/comdat6.ll13
-rw-r--r--test/Linker/comdat7.ll9
-rw-r--r--test/Linker/comdat8.ll10
-rw-r--r--test/Linker/datalayout.ll14
-rw-r--r--test/Linker/debug-info-version-a.ll16
-rw-r--r--test/Linker/debug-info-version-b.ll10
-rw-r--r--test/Linker/dllstorage-a.ll4
-rw-r--r--test/Linker/dllstorage-b.ll3
-rw-r--r--test/Linker/func-attrs-a.ll14
-rw-r--r--test/Linker/func-attrs-b.ll8
-rw-r--r--test/Linker/global_ctors.ll29
-rw-r--r--test/Linker/targettriple.ll14
-rw-r--r--test/Linker/type-unique-odr-a.ll102
-rw-r--r--test/Linker/type-unique-odr-b.ll86
-rw-r--r--test/Linker/type-unique-simple-a.ll4
-rw-r--r--test/Linker/type-unique-simple-b.ll2
-rw-r--r--test/Linker/type-unique-simple2-a.ll129
-rw-r--r--test/Linker/type-unique-simple2-b.ll88
-rw-r--r--test/Linker/type-unique-simple2.ll2
-rw-r--r--test/Linker/unnamed-addr1-a.ll11
-rw-r--r--test/Linker/unnamed-addr1-b.ll6
-rw-r--r--test/MC/AArch64/alias-logicalimm.s41
-rw-r--r--test/MC/AArch64/arm64-adr.s31
-rw-r--r--test/MC/AArch64/arm64-advsimd.s1997
-rw-r--r--test/MC/AArch64/arm64-aliases.s753
-rw-r--r--test/MC/AArch64/arm64-arithmetic-encoding.s615
-rw-r--r--test/MC/AArch64/arm64-arm64-fixup.s10
-rw-r--r--test/MC/AArch64/arm64-basic-a64-instructions.s18
-rw-r--r--test/MC/AArch64/arm64-be-datalayout.s4
-rw-r--r--test/MC/AArch64/arm64-bitfield-encoding.s38
-rw-r--r--test/MC/AArch64/arm64-branch-encoding.s159
-rw-r--r--test/MC/AArch64/arm64-condbr-without-dots.s37
-rw-r--r--test/MC/AArch64/arm64-crypto.s66
-rw-r--r--test/MC/AArch64/arm64-diagno-predicate.s24
-rw-r--r--test/MC/AArch64/arm64-diags.s428
-rw-r--r--test/MC/AArch64/arm64-directive_loh.s93
-rw-r--r--test/MC/AArch64/arm64-elf-reloc-condbr.s10
-rw-r--r--test/MC/AArch64/arm64-elf-relocs.s249
-rw-r--r--test/MC/AArch64/arm64-fp-encoding.s443
-rw-r--r--test/MC/AArch64/arm64-large-relocs.s38
-rw-r--r--test/MC/AArch64/arm64-leaf-compact-unwind.s208
-rw-r--r--test/MC/AArch64/arm64-logical-encoding.s224
-rw-r--r--test/MC/AArch64/arm64-mapping-across-sections.s28
-rw-r--r--test/MC/AArch64/arm64-mapping-within-section.s23
-rw-r--r--test/MC/AArch64/arm64-memory.s634
-rw-r--r--test/MC/AArch64/arm64-nv-cond.s11
-rw-r--r--test/MC/AArch64/arm64-optional-hash.s31
-rw-r--r--test/MC/AArch64/arm64-separator.s20
-rw-r--r--test/MC/AArch64/arm64-simd-ldst.s2404
-rw-r--r--test/MC/AArch64/arm64-small-data-fixups.s24
-rw-r--r--test/MC/AArch64/arm64-spsel-sysreg.s24
-rw-r--r--test/MC/AArch64/arm64-system-encoding.s623
-rw-r--r--test/MC/AArch64/arm64-target-specific-sysreg.s10
-rw-r--r--test/MC/AArch64/arm64-tls-modifiers-darwin.s13
-rw-r--r--test/MC/AArch64/arm64-tls-relocs.s320
-rw-r--r--test/MC/AArch64/arm64-v128_lo-diagnostics.s11
-rw-r--r--test/MC/AArch64/arm64-variable-exprs.s40
-rw-r--r--test/MC/AArch64/arm64-vector-lists.s20
-rw-r--r--test/MC/AArch64/arm64-verbose-vector-case.s19
-rw-r--r--test/MC/AArch64/basic-a64-diagnostics.s912
-rw-r--r--test/MC/AArch64/basic-a64-instructions.s1848
-rw-r--r--test/MC/AArch64/dot-req-case-insensitive.s18
-rw-r--r--test/MC/AArch64/dot-req-diagnostics.s37
-rw-r--r--test/MC/AArch64/dot-req.s37
-rw-r--r--test/MC/AArch64/elf-globaladdress.ll2
-rw-r--r--test/MC/AArch64/elf-reloc-addend.s8
-rw-r--r--test/MC/AArch64/elf-reloc-condbr.s10
-rw-r--r--test/MC/AArch64/gicv3-regs.s220
-rw-r--r--test/MC/AArch64/ldr-pseudo-diagnostics.s14
-rw-r--r--test/MC/AArch64/ldr-pseudo-obj-errors.s13
-rw-r--r--test/MC/AArch64/ldr-pseudo.s319
-rw-r--r--test/MC/AArch64/lit.local.cfg5
-rw-r--r--test/MC/AArch64/neon-2velem.s2
-rw-r--r--test/MC/AArch64/neon-3vdiff.s2
-rw-r--r--test/MC/AArch64/neon-across.s2
-rw-r--r--test/MC/AArch64/neon-compare-instructions.s100
-rw-r--r--test/MC/AArch64/neon-crypto.s5
-rw-r--r--test/MC/AArch64/neon-diagnostics.s740
-rw-r--r--test/MC/AArch64/neon-extract.s6
-rw-r--r--test/MC/AArch64/neon-mov.s143
-rw-r--r--test/MC/AArch64/neon-perm.s2
-rw-r--r--test/MC/AArch64/neon-scalar-compare.s10
-rw-r--r--test/MC/AArch64/neon-scalar-dup.s44
-rw-r--r--test/MC/AArch64/neon-scalar-fp-compare.s20
-rw-r--r--test/MC/AArch64/neon-simd-copy.s72
-rw-r--r--test/MC/AArch64/neon-simd-ldst-multi-elem.s786
-rw-r--r--test/MC/AArch64/neon-simd-ldst-one-elem.s514
-rw-r--r--test/MC/AArch64/neon-simd-misc.s6
-rw-r--r--test/MC/AArch64/neon-simd-post-ldst-multi-elem.s426
-rw-r--r--test/MC/AArch64/neon-sxtl.s26
-rw-r--r--test/MC/AArch64/neon-tbl.s97
-rw-r--r--test/MC/AArch64/neon-uxtl.s26
-rw-r--r--test/MC/AArch64/noneon-diagnostics.s13
-rw-r--r--test/MC/AArch64/optional-hash.s17
-rw-r--r--test/MC/AArch64/tls-relocs.s301
-rw-r--r--test/MC/AArch64/trace-regs.s765
-rw-r--r--test/MC/ARM/2013-03-18-Br-to-label-named-like-reg.s5
-rw-r--r--test/MC/ARM/AlignedBundling/lit.local.cfg3
-rw-r--r--test/MC/ARM/Windows/mov32t-range.s37
-rw-r--r--test/MC/ARM/Windows/multiple-text-sections.s58
-rw-r--r--test/MC/ARM/Windows/text-attributes.s30
-rw-r--r--test/MC/ARM/arm-elf-symver.s143
-rw-r--r--test/MC/ARM/arm-ldrd.s2
-rw-r--r--test/MC/ARM/arm-memory-instructions.s11
-rw-r--r--test/MC/ARM/arm-qualifier-diagnostics.s15
-rw-r--r--test/MC/ARM/arm-thumb-cpus-default.s23
-rw-r--r--test/MC/ARM/arm-thumb-cpus.s23
-rw-r--r--test/MC/ARM/arm_addrmode2.s8
-rw-r--r--test/MC/ARM/arm_fixups.s16
-rw-r--r--test/MC/ARM/arm_word_directive.s6
-rw-r--r--test/MC/ARM/basic-arm-instructions.s21
-rw-r--r--test/MC/ARM/basic-thumb-instructions.s25
-rw-r--r--test/MC/ARM/basic-thumb2-instructions.s60
-rw-r--r--test/MC/ARM/big-endian-arm-fixup.s107
-rw-r--r--test/MC/ARM/big-endian-thumb-fixup.s63
-rw-r--r--test/MC/ARM/big-endian-thumb2-fixup.s49
-rw-r--r--test/MC/ARM/bkpt.s32
-rw-r--r--test/MC/ARM/cmp-immediate-fixup-error.s7
-rw-r--r--test/MC/ARM/cmp-immediate-fixup-error2.s7
-rw-r--r--test/MC/ARM/cmp-immediate-fixup.s9
-rw-r--r--test/MC/ARM/cmp-immediate-fixup2.s9
-rw-r--r--test/MC/ARM/coff-debugging-secrel.ll49
-rw-r--r--test/MC/ARM/coff-file.s47
-rw-r--r--test/MC/ARM/coff-function-type-info.ll45
-rw-r--r--test/MC/ARM/coff-relocations.s101
-rw-r--r--test/MC/ARM/comment.s47
-rw-r--r--test/MC/ARM/complex-operands.s40
-rw-r--r--test/MC/ARM/data-in-code.ll10
-rw-r--r--test/MC/ARM/diagnostics.s26
-rw-r--r--test/MC/ARM/directive-align.s28
-rw-r--r--test/MC/ARM/directive-arch-armv2.s30
-rw-r--r--test/MC/ARM/directive-arch-armv2a.s30
-rw-r--r--test/MC/ARM/directive-arch-armv3.s30
-rw-r--r--test/MC/ARM/directive-arch-armv3m.s30
-rw-r--r--test/MC/ARM/directive-arch-armv4.s38
-rw-r--r--test/MC/ARM/directive-arch-armv4t.s34
-rw-r--r--test/MC/ARM/directive-arch-armv5.s30
-rw-r--r--test/MC/ARM/directive-arch-armv5t.s34
-rw-r--r--test/MC/ARM/directive-arch-armv5te.s34
-rw-r--r--test/MC/ARM/directive-arch-armv6-m.s30
-rw-r--r--test/MC/ARM/directive-arch-armv6.s34
-rw-r--r--test/MC/ARM/directive-arch-armv6j.s34
-rw-r--r--test/MC/ARM/directive-arch-armv6t2.s34
-rw-r--r--test/MC/ARM/directive-arch-armv6z.s38
-rw-r--r--test/MC/ARM/directive-arch-armv6zk.s38
-rw-r--r--test/MC/ARM/directive-arch-armv7-a.s38
-rw-r--r--test/MC/ARM/directive-arch-armv7-m.s34
-rw-r--r--test/MC/ARM/directive-arch-armv7-r.s38
-rw-r--r--test/MC/ARM/directive-arch-armv7.s30
-rw-r--r--test/MC/ARM/directive-arch-armv7a.s38
-rw-r--r--test/MC/ARM/directive-arch-armv7m.s34
-rw-r--r--test/MC/ARM/directive-arch-armv7r.s38
-rw-r--r--test/MC/ARM/directive-arch-armv8-a.s46
-rw-r--r--test/MC/ARM/directive-arch-armv8a.s46
-rw-r--r--test/MC/ARM/directive-arch-iwmmxt.s38
-rw-r--r--test/MC/ARM/directive-arch-iwmmxt2.s38
-rw-r--r--test/MC/ARM/directive-arch_extension-crc.s57
-rw-r--r--test/MC/ARM/directive-arch_extension-crypto.s108
-rw-r--r--test/MC/ARM/directive-arch_extension-fp.s344
-rw-r--r--test/MC/ARM/directive-arch_extension-idiv.s53
-rw-r--r--test/MC/ARM/directive-arch_extension-mp.s38
-rw-r--r--test/MC/ARM/directive-arch_extension-sec.s31
-rw-r--r--test/MC/ARM/directive-arch_extension-simd.s275
-rw-r--r--test/MC/ARM/directive-eabi_attribute-2.s98
-rw-r--r--test/MC/ARM/directive-eabi_attribute-diagnostics.s36
-rw-r--r--test/MC/ARM/directive-eabi_attribute-overwrite.s17
-rw-r--r--test/MC/ARM/directive-even.s70
-rw-r--r--test/MC/ARM/directive-fpu-multiple.s26
-rw-r--r--test/MC/ARM/directive-fpu-softvfp.s8
-rw-r--r--test/MC/ARM/directive-literals.s26
-rw-r--r--test/MC/ARM/directive-object_arch-2.s22
-rw-r--r--test/MC/ARM/directive-object_arch-3.s11
-rw-r--r--test/MC/ARM/directive-object_arch-diagnostics.s23
-rw-r--r--test/MC/ARM/directive-object_arch.s22
-rw-r--r--test/MC/ARM/directive-tlsdescseq-diagnostics.s35
-rw-r--r--test/MC/ARM/directive-tlsdescseq.s33
-rw-r--r--test/MC/ARM/directive-word-diagnostics.s12
-rw-r--r--test/MC/ARM/dot-req-case-insensitive.s20
-rw-r--r--test/MC/ARM/dwarf-asm-multiple-sections.s79
-rw-r--r--test/MC/ARM/dwarf-asm-no-code.s27
-rw-r--r--test/MC/ARM/dwarf-asm-nonstandard-section.s57
-rw-r--r--test/MC/ARM/dwarf-asm-single-section.s56
-rw-r--r--test/MC/ARM/dwarf-cfi-initial-state.s17
-rw-r--r--test/MC/ARM/eh-directive-cantunwind-diagnostics.s8
-rw-r--r--test/MC/ARM/eh-directive-fnstart-diagnostics.s2
-rw-r--r--test/MC/ARM/eh-directive-movsp-diagnostics.s102
-rw-r--r--test/MC/ARM/eh-directive-movsp.s44
-rw-r--r--test/MC/ARM/eh-directive-personalityindex-diagnostics.s122
-rw-r--r--test/MC/ARM/eh-directive-personalityindex.s202
-rw-r--r--test/MC/ARM/eh-directive-save-diagnostics.s (renamed from test/MC/ARM/eh-directive-save-diagnoatics.s)0
-rw-r--r--test/MC/ARM/eh-directive-setfp.s2
-rw-r--r--test/MC/ARM/eh-directive-unwind_raw-diagnostics.s73
-rw-r--r--test/MC/ARM/eh-directive-unwind_raw.s110
-rw-r--r--test/MC/ARM/elf-jump24-fixup.s2
-rw-r--r--test/MC/ARM/elf-thumbfunc-reloc.ll6
-rw-r--r--test/MC/ARM/elf-thumbfunc-reloc.s13
-rw-r--r--test/MC/ARM/elf-thumbfunc.s12
-rw-r--r--test/MC/ARM/fconst.s22
-rw-r--r--test/MC/ARM/fixup-cpu-mode.s9
-rw-r--r--test/MC/ARM/fp-const-errors.s22
-rw-r--r--test/MC/ARM/gas-compl-copr-reg.s14
-rw-r--r--test/MC/ARM/inst-arm-suffixes.s15
-rw-r--r--test/MC/ARM/inst-constant-required.s15
-rw-r--r--test/MC/ARM/inst-directive-emit.s20
-rw-r--r--test/MC/ARM/inst-directive.s81
-rw-r--r--test/MC/ARM/inst-overflow.s14
-rw-r--r--test/MC/ARM/inst-thumb-overflow-2.s13
-rw-r--r--test/MC/ARM/inst-thumb-overflow.s13
-rw-r--r--test/MC/ARM/inst-thumb-suffixes.s13
-rw-r--r--test/MC/ARM/invalid-vector-index.s5
-rw-r--r--test/MC/ARM/ldr-pseudo-darwin.s247
-rw-r--r--test/MC/ARM/ldr-pseudo-obj-errors.s17
-rw-r--r--test/MC/ARM/ldr-pseudo-parse-errors.s10
-rw-r--r--test/MC/ARM/ldr-pseudo.s221
-rw-r--r--test/MC/ARM/ldrd-strd-gnu-arm-bad-imm.s9
-rw-r--r--test/MC/ARM/ldrd-strd-gnu-arm.s20
-rw-r--r--test/MC/ARM/ldrd-strd-gnu-sp.s9
-rw-r--r--test/MC/ARM/ldrd-strd-gnu-thumb-bad-regs.s10
-rw-r--r--test/MC/ARM/ldrd-strd-gnu-thumb.s20
-rw-r--r--test/MC/ARM/lit.local.cfg3
-rw-r--r--test/MC/ARM/ltorg-darwin.s151
-rw-r--r--test/MC/ARM/ltorg.s138
-rw-r--r--test/MC/ARM/macho-relocs-with-addend.s34
-rw-r--r--test/MC/ARM/mul-v4.s39
-rw-r--r--test/MC/ARM/neon-vld-encoding.s4
-rw-r--r--test/MC/ARM/neon-vld-vst-align.s8354
-rw-r--r--test/MC/ARM/not-armv4.s8
-rw-r--r--test/MC/ARM/pool.s19
-rw-r--r--test/MC/ARM/simple-fp-encoding.s43
-rw-r--r--test/MC/ARM/symbol-variants-errors.s23
-rw-r--r--test/MC/ARM/symbol-variants.s91
-rw-r--r--test/MC/ARM/target-expressions.s80
-rw-r--r--test/MC/ARM/thumb-far-jump.s26
-rw-r--r--test/MC/ARM/thumb-st_other.s19
-rw-r--r--test/MC/ARM/thumb-types.s108
-rw-r--r--test/MC/ARM/thumb2-cbn-to-next-inst.s33
-rw-r--r--test/MC/ARM/thumb2-diagnostics.s18
-rw-r--r--test/MC/ARM/thumb2-ldrd.s15
-rw-r--r--test/MC/ARM/thumb2-mclass.s15
-rw-r--r--test/MC/ARM/thumb2-strd.s10
-rw-r--r--test/MC/ARM/thumb2be-b.w-encoding.s9
-rw-r--r--test/MC/ARM/thumb2be-beq.w-encoding.s9
-rw-r--r--test/MC/ARM/thumb2be-movt-encoding.s9
-rw-r--r--test/MC/ARM/thumb2be-movw-encoding.s9
-rw-r--r--test/MC/ARM/thumb_set-diagnostics.s43
-rw-r--r--test/MC/ARM/thumb_set.s154
-rw-r--r--test/MC/ARM/thumbv7m.s45
-rw-r--r--test/MC/ARM/udf-arm-diagnostics.s19
-rw-r--r--test/MC/ARM/udf-arm.s11
-rw-r--r--test/MC/ARM/udf-thumb-2-diagnostics.s25
-rw-r--r--test/MC/ARM/udf-thumb-2.s13
-rw-r--r--test/MC/ARM/udf-thumb-diagnostics.s19
-rw-r--r--test/MC/ARM/udf-thumb.s11
-rw-r--r--test/MC/ARM/unwind-stack-diagnostics.s30
-rw-r--r--test/MC/ARM/variant-diagnostics.s13
-rw-r--r--test/MC/ARM/vfp-aliases-diagnostics.s114
-rw-r--r--test/MC/ARM/vfp-aliases.s62
-rw-r--r--test/MC/ARM/vmov-vmvn-byte-replicate.s31
-rw-r--r--test/MC/ARM/vmov-vmvn-illegal-cases.s30
-rw-r--r--test/MC/ARM/vorr-vbic-illegal-cases.s42
-rw-r--r--test/MC/ARM/xscale-attributes.ll39
-rw-r--r--test/MC/AsmParser/conditional_asm.s69
-rw-r--r--test/MC/AsmParser/directive-err-diagnostics.s17
-rw-r--r--test/MC/AsmParser/directive-err.s30
-rw-r--r--test/MC/AsmParser/directive_end-2.s14
-rw-r--r--test/MC/AsmParser/directive_end.s11
-rw-r--r--test/MC/AsmParser/directive_file.s1
-rw-r--r--test/MC/AsmParser/directive_fill.s46
-rw-r--r--test/MC/AsmParser/directive_line.s1
-rw-r--r--test/MC/AsmParser/directive_loc.s10
-rw-r--r--test/MC/AsmParser/directive_rept-diagnostics.s41
-rw-r--r--test/MC/AsmParser/directive_rept.s30
-rw-r--r--test/MC/AsmParser/directive_seh.s33
-rw-r--r--test/MC/AsmParser/directive_values.s12
-rw-r--r--test/MC/AsmParser/dot-symbol-assignment-backwards.s12
-rw-r--r--test/MC/AsmParser/dot-symbol-assignment.s31
-rw-r--r--test/MC/AsmParser/dot-symbol-non-absolute.s9
-rw-r--r--test/MC/AsmParser/dot-symbol.s5
-rw-r--r--test/MC/AsmParser/exprs.s2
-rw-r--r--test/MC/AsmParser/if-diagnostics.s29
-rw-r--r--test/MC/AsmParser/ifc.s5
-rw-r--r--test/MC/AsmParser/ifeqs-diagnostics.s22
-rw-r--r--test/MC/AsmParser/ifeqs.s20
-rw-r--r--test/MC/AsmParser/invalid-input-assertion.s9
-rw-r--r--test/MC/AsmParser/lit.local.cfg3
-rw-r--r--test/MC/AsmParser/macro-def-in-instantiation.s20
-rw-r--r--test/MC/AsmParser/macro-err1.s2
-rw-r--r--test/MC/AsmParser/macro-irp.s2
-rw-r--r--test/MC/AsmParser/macro-qualifier-diagnostics.s64
-rw-r--r--test/MC/AsmParser/macro-qualifier.s16
-rw-r--r--test/MC/AsmParser/macros-argument-parsing-diagnostics.s24
-rw-r--r--test/MC/AsmParser/macros-argument-parsing.s91
-rw-r--r--test/MC/AsmParser/macros-darwin-vararg.s8
-rw-r--r--test/MC/AsmParser/macros-darwin.s90
-rw-r--r--test/MC/AsmParser/macros-gas.s105
-rw-r--r--test/MC/AsmParser/macros.s93
-rw-r--r--test/MC/AsmParser/vararg-default-value.s15
-rw-r--r--test/MC/AsmParser/vararg.s51
-rw-r--r--test/MC/COFF/alias.s11
-rw-r--r--test/MC/COFF/bad-expr.s7
-rw-r--r--test/MC/COFF/basic-coff-64.s4
-rw-r--r--test/MC/COFF/basic-coff.s4
-rw-r--r--test/MC/COFF/bss.s2
-rw-r--r--test/MC/COFF/bss_section.ll3
-rw-r--r--test/MC/COFF/comm.ll4
-rw-r--r--test/MC/COFF/comm.s25
-rw-r--r--test/MC/COFF/directive-section-characteristics.ll17
-rw-r--r--test/MC/COFF/early-dce.s16
-rw-r--r--test/MC/COFF/feat00.s2
-rw-r--r--test/MC/COFF/file.s47
-rw-r--r--test/MC/COFF/global_ctors_dtors.ll38
-rw-r--r--test/MC/COFF/initialised-data.ll7
-rw-r--r--test/MC/COFF/invalid-def.s5
-rw-r--r--test/MC/COFF/invalid-endef.s4
-rw-r--r--test/MC/COFF/invalid-scl-range.s6
-rw-r--r--test/MC/COFF/invalid-scl.s4
-rw-r--r--test/MC/COFF/invalid-type-range.s6
-rw-r--r--test/MC/COFF/invalid-type.s4
-rw-r--r--test/MC/COFF/ir-to-imgrel.ll6
-rwxr-xr-xtest/MC/COFF/linker-options.ll2
-rw-r--r--test/MC/COFF/linkonce-invalid.s14
-rw-r--r--test/MC/COFF/linkonce.s57
-rw-r--r--test/MC/COFF/lit.local.cfg3
-rwxr-xr-xtest/MC/COFF/lset0.s13
-rw-r--r--test/MC/COFF/offset.s19
-rw-r--r--test/MC/COFF/secidx-diagnostic.s8
-rw-r--r--test/MC/COFF/secidx.s16
-rw-r--r--test/MC/COFF/section-comdat-conflict.s13
-rw-r--r--test/MC/COFF/section-comdat-conflict2.s6
-rw-r--r--test/MC/COFF/section-comdat.s98
-rw-r--r--test/MC/COFF/section-name-encoding.s36
-rw-r--r--test/MC/COFF/seh-stackalloc-zero.s11
-rw-r--r--test/MC/COFF/seh.s4
-rw-r--r--test/MC/COFF/symbol-alias.s2
-rw-r--r--test/MC/COFF/symbol-fragment-offset-64.s4
-rw-r--r--test/MC/COFF/symbol-fragment-offset.s4
-rw-r--r--test/MC/COFF/timestamp.s4
-rw-r--r--test/MC/COFF/tricky-names.ll6
-rw-r--r--test/MC/COFF/weak-symbol.ll28
-rw-r--r--test/MC/COFF/weak.s2
-rw-r--r--test/MC/Disassembler/AArch64/a64-ignored-fields.txt1
-rw-r--r--test/MC/Disassembler/AArch64/arm64-advsimd.txt2283
-rw-r--r--test/MC/Disassembler/AArch64/arm64-arithmetic.txt526
-rw-r--r--test/MC/Disassembler/AArch64/arm64-basic-a64-undefined.txt31
-rw-r--r--test/MC/Disassembler/AArch64/arm64-bitfield.txt29
-rw-r--r--test/MC/Disassembler/AArch64/arm64-branch.txt75
-rw-r--r--test/MC/Disassembler/AArch64/arm64-canonical-form.txt21
-rw-r--r--test/MC/Disassembler/AArch64/arm64-crc32.txt18
-rw-r--r--test/MC/Disassembler/AArch64/arm64-crypto.txt47
-rw-r--r--test/MC/Disassembler/AArch64/arm64-invalid-logical.txt6
-rw-r--r--test/MC/Disassembler/AArch64/arm64-logical.txt223
-rw-r--r--test/MC/Disassembler/AArch64/arm64-memory.txt564
-rw-r--r--test/MC/Disassembler/AArch64/arm64-non-apple-fmov.txt7
-rw-r--r--test/MC/Disassembler/AArch64/arm64-scalar-fp.txt255
-rw-r--r--test/MC/Disassembler/AArch64/arm64-system.txt62
-rw-r--r--test/MC/Disassembler/AArch64/basic-a64-instructions.txt1381
-rw-r--r--test/MC/Disassembler/AArch64/basic-a64-undefined.txt67
-rw-r--r--test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt1
-rw-r--r--test/MC/Disassembler/AArch64/gicv3-regs.txt221
-rw-r--r--test/MC/Disassembler/AArch64/ldp-offset-predictable.txt1
-rw-r--r--test/MC/Disassembler/AArch64/ldp-postind.predictable.txt1
-rw-r--r--test/MC/Disassembler/AArch64/ldp-preind.predictable.txt1
-rw-r--r--test/MC/Disassembler/AArch64/lit.local.cfg3
-rw-r--r--test/MC/Disassembler/AArch64/neon-instructions.txt195
-rw-r--r--test/MC/Disassembler/AArch64/trace-regs.txt733
-rw-r--r--test/MC/Disassembler/ARM/addrmode2-reencoding.txt12
-rw-r--r--test/MC/Disassembler/ARM/hex-immediates.txt8
-rw-r--r--test/MC/Disassembler/ARM/invalid-thumbv7.txt39
-rw-r--r--test/MC/Disassembler/ARM/lit.local.cfg3
-rw-r--r--test/MC/Disassembler/Mips/lit.local.cfg3
-rw-r--r--test/MC/Disassembler/Mips/micromips.txt9
-rw-r--r--test/MC/Disassembler/Mips/micromips_le.txt9
-rw-r--r--test/MC/Disassembler/Mips/mips32.txt3
-rw-r--r--test/MC/Disassembler/Mips/mips32_le.txt3
-rw-r--r--test/MC/Disassembler/Mips/mips32r2.txt3
-rw-r--r--test/MC/Disassembler/Mips/mips32r2_le.txt3
-rw-r--r--test/MC/Disassembler/Mips/mips32r6.txt127
-rw-r--r--test/MC/Disassembler/Mips/mips64r6.txt145
-rw-r--r--test/MC/Disassembler/Mips/msa/test_2r.txt17
-rw-r--r--test/MC/Disassembler/Mips/msa/test_2r_msa64.txt3
-rw-r--r--test/MC/Disassembler/Mips/msa/test_2rf.txt34
-rw-r--r--test/MC/Disassembler/Mips/msa/test_3r.txt244
-rw-r--r--test/MC/Disassembler/Mips/msa/test_3rf.txt84
-rw-r--r--test/MC/Disassembler/Mips/msa/test_bit.txt50
-rw-r--r--test/MC/Disassembler/Mips/msa/test_ctrlregs.txt35
-rw-r--r--test/MC/Disassembler/Mips/msa/test_dlsa.txt6
-rw-r--r--test/MC/Disassembler/Mips/msa/test_elm.txt17
-rw-r--r--test/MC/Disassembler/Mips/msa/test_elm_insert.txt5
-rw-r--r--test/MC/Disassembler/Mips/msa/test_elm_insert_msa64.txt3
-rw-r--r--test/MC/Disassembler/Mips/msa/test_elm_insve.txt6
-rw-r--r--test/MC/Disassembler/Mips/msa/test_elm_msa64.txt6
-rw-r--r--test/MC/Disassembler/Mips/msa/test_i10.txt6
-rw-r--r--test/MC/Disassembler/Mips/msa/test_i5.txt46
-rw-r--r--test/MC/Disassembler/Mips/msa/test_i8.txt12
-rw-r--r--test/MC/Disassembler/Mips/msa/test_lsa.txt6
-rw-r--r--test/MC/Disassembler/Mips/msa/test_mi10.txt28
-rw-r--r--test/MC/Disassembler/Mips/msa/test_vec.txt9
-rw-r--r--test/MC/Disassembler/PowerPC/lit.local.cfg3
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-encoding-bookII.txt74
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-encoding-bookIII.txt107
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-encoding-ext.txt2253
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-encoding-fp.txt329
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-encoding-vmx.txt509
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-encoding.txt621
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-operands.txt94
-rw-r--r--test/MC/Disassembler/PowerPC/vsx.txt452
-rw-r--r--test/MC/Disassembler/Sparc/lit.local.cfg3
-rw-r--r--test/MC/Disassembler/Sparc/sparc-fp.txt148
-rw-r--r--test/MC/Disassembler/Sparc/sparc-mem.txt163
-rw-r--r--test/MC/Disassembler/Sparc/sparc.txt202
-rw-r--r--test/MC/Disassembler/SystemZ/insns.txt600
-rw-r--r--test/MC/Disassembler/SystemZ/lit.local.cfg3
-rw-r--r--test/MC/Disassembler/X86/avx-512.txt104
-rw-r--r--test/MC/Disassembler/X86/fp-stack.txt1037
-rw-r--r--test/MC/Disassembler/X86/hex-immediates.txt2
-rw-r--r--test/MC/Disassembler/X86/lit.local.cfg3
-rw-r--r--test/MC/Disassembler/X86/missing-sib.txt4
-rw-r--r--test/MC/Disassembler/X86/moffs.txt86
-rw-r--r--test/MC/Disassembler/X86/padlock.txt56
-rw-r--r--test/MC/Disassembler/X86/prefixes.txt4
-rw-r--r--test/MC/Disassembler/X86/simple-tests.txt12
-rw-r--r--test/MC/Disassembler/X86/x86-16.txt788
-rw-r--r--test/MC/Disassembler/X86/x86-32.txt15
-rw-r--r--test/MC/Disassembler/XCore/lit.local.cfg3
-rw-r--r--test/MC/ELF/ARM/bss-non-zero-value.s9
-rw-r--r--test/MC/ELF/ARM/gnu-type-hash-diagnostics.s9
-rw-r--r--test/MC/ELF/ARM/gnu-type-hash.s16
-rw-r--r--test/MC/ELF/ARM/lit.local.cfg3
-rw-r--r--test/MC/ELF/abs.s2
-rw-r--r--test/MC/ELF/alias-reloc.s2
-rw-r--r--test/MC/ELF/alias.s24
-rw-r--r--test/MC/ELF/bad-expr.s8
-rw-r--r--test/MC/ELF/bad-expr2.s12
-rw-r--r--test/MC/ELF/bad-expr3.s10
-rw-r--r--test/MC/ELF/basic-elf-32.s4
-rw-r--r--test/MC/ELF/cfi-adjust-cfa-offset.s2
-rw-r--r--test/MC/ELF/cfi-advance-loc2.s2
-rw-r--r--test/MC/ELF/cfi-def-cfa-offset.s2
-rw-r--r--test/MC/ELF/cfi-def-cfa-register.s2
-rw-r--r--test/MC/ELF/cfi-def-cfa.s2
-rw-r--r--test/MC/ELF/cfi-escape.s2
-rw-r--r--test/MC/ELF/cfi-offset.s2
-rw-r--r--test/MC/ELF/cfi-register.s2
-rw-r--r--test/MC/ELF/cfi-rel-offset.s2
-rw-r--r--test/MC/ELF/cfi-rel-offset2.s2
-rw-r--r--test/MC/ELF/cfi-remember.s2
-rw-r--r--test/MC/ELF/cfi-restore.s2
-rw-r--r--test/MC/ELF/cfi-same-value.s2
-rw-r--r--test/MC/ELF/cfi-sections.s4
-rw-r--r--test/MC/ELF/cfi-signal-frame.s4
-rw-r--r--test/MC/ELF/cfi-undefined.s2
-rw-r--r--test/MC/ELF/cfi-version.ll45
-rw-r--r--test/MC/ELF/cfi-window-save.s2
-rw-r--r--test/MC/ELF/cfi-zero-addr-delta.s2
-rw-r--r--test/MC/ELF/cfi.s84
-rw-r--r--test/MC/ELF/comdat.s8
-rw-r--r--test/MC/ELF/common.s18
-rw-r--r--test/MC/ELF/comp-dir.s1
-rw-r--r--test/MC/ELF/compression.s80
-rw-r--r--test/MC/ELF/discriminator.s61
-rw-r--r--test/MC/ELF/dot-symbol-assignment.s22
-rw-r--r--test/MC/ELF/file-double.s12
-rw-r--r--test/MC/ELF/file.s2
-rw-r--r--test/MC/ELF/gen-dwarf.s32
-rw-r--r--test/MC/ELF/gnu-type-diagnostics.s18
-rw-r--r--test/MC/ELF/gnu-type.s38
-rw-r--r--test/MC/ELF/ifunc-reloc.s16
-rw-r--r--test/MC/ELF/lcomm.s4
-rw-r--r--test/MC/ELF/lit.local.cfg3
-rw-r--r--test/MC/ELF/local-reloc.s2
-rw-r--r--test/MC/ELF/many-section.s93319
-rw-r--r--test/MC/ELF/many-sections-2.s65408
-rw-r--r--test/MC/ELF/many-sections.s106
-rw-r--r--test/MC/ELF/merge.s9
-rw-r--r--test/MC/ELF/no-reloc.s19
-rw-r--r--test/MC/ELF/nocompression.s5
-rw-r--r--test/MC/ELF/noexec.s2
-rw-r--r--test/MC/ELF/offset.s132
-rw-r--r--test/MC/ELF/pic-diff.s4
-rw-r--r--test/MC/ELF/pr19430.s14
-rw-r--r--test/MC/ELF/pr9292.s8
-rw-r--r--test/MC/ELF/relocation-386.s11
-rw-r--r--test/MC/ELF/relocation-pc.s4
-rw-r--r--test/MC/ELF/relocation.s17
-rw-r--r--test/MC/ELF/set.s8
-rw-r--r--test/MC/ELF/strtab-suffix-opt.s21
-rw-r--r--test/MC/ELF/subtraction-error.s8
-rw-r--r--test/MC/ELF/symref.s142
-rw-r--r--test/MC/ELF/symver.s142
-rw-r--r--test/MC/ELF/tls-i386.s56
-rw-r--r--test/MC/ELF/tls.s26
-rw-r--r--test/MC/ELF/type-propagate.s151
-rw-r--r--test/MC/ELF/type.s197
-rw-r--r--test/MC/ELF/undef.s79
-rw-r--r--test/MC/ELF/weak.s4
-rw-r--r--test/MC/ELF/weakref-reloc.s6
-rw-r--r--test/MC/ELF/weakref.s44
-rw-r--r--test/MC/MachO/AArch64/darwin-ARM64-local-label-diff.s21
-rw-r--r--test/MC/MachO/AArch64/darwin-ARM64-reloc.s157
-rw-r--r--test/MC/MachO/AArch64/lit.local.cfg3
-rw-r--r--test/MC/MachO/ARM/aliased-symbols.s115
-rw-r--r--test/MC/MachO/ARM/bad-darwin-ARM-reloc.s9
-rw-r--r--test/MC/MachO/ARM/bad-darwin-directives.s29
-rw-r--r--test/MC/MachO/ARM/ios-version-min-load-command.s10
-rw-r--r--test/MC/MachO/ARM/lit.local.cfg3
-rw-r--r--test/MC/MachO/ARM/version-min-diagnostics.s49
-rw-r--r--test/MC/MachO/ARM/version-min.s21
-rw-r--r--test/MC/MachO/bad-darwin-x86_64-reloc-expr.s6
-rw-r--r--test/MC/MachO/bss.s2
-rw-r--r--test/MC/MachO/debug_frame.s1
-rw-r--r--test/MC/MachO/eh-frame-reloc.s16
-rw-r--r--test/MC/MachO/eh_symbol.s14
-rw-r--r--test/MC/MachO/gen-dwarf-cpp.s13
-rw-r--r--test/MC/MachO/gen-dwarf.s14
-rw-r--r--test/MC/MachO/lit.local.cfg3
-rw-r--r--test/MC/MachO/osx-version-min-load-command.s10
-rw-r--r--test/MC/MachO/pr19185.s6
-rw-r--r--test/MC/MachO/temp-labels.s2
-rw-r--r--test/MC/MachO/variable-exprs.s8
-rw-r--r--test/MC/MachO/x86_32-scattered-reloc-fallback.s27
-rw-r--r--test/MC/Mips/abicalls.ll15
-rw-r--r--test/MC/Mips/cfi.s13
-rw-r--r--test/MC/Mips/cpload-bad.s15
-rw-r--r--test/MC/Mips/cpload.s33
-rw-r--r--test/MC/Mips/cpsetup-bad.s14
-rw-r--r--test/MC/Mips/cpsetup.s78
-rw-r--r--test/MC/Mips/do_switch.ll39
-rw-r--r--test/MC/Mips/do_switch1.s75
-rw-r--r--test/MC/Mips/do_switch2.s77
-rw-r--r--test/MC/Mips/do_switch3.s82
-rw-r--r--test/MC/Mips/eh-frame.s8
-rw-r--r--test/MC/Mips/elf-N64.ll26
-rw-r--r--test/MC/Mips/elf-N64.s65
-rw-r--r--test/MC/Mips/elf-gprel-32-64.ll40
-rw-r--r--test/MC/Mips/elf-gprel-32-64.s86
-rw-r--r--test/MC/Mips/elf-reginfo.ll34
-rw-r--r--test/MC/Mips/elf-relsym.ll39
-rw-r--r--test/MC/Mips/elf-relsym.s87
-rw-r--r--test/MC/Mips/elf-tls.ll40
-rw-r--r--test/MC/Mips/elf-tls.s134
-rw-r--r--test/MC/Mips/elf_eflags.ll69
-rw-r--r--test/MC/Mips/elf_eflags.s120
-rw-r--r--test/MC/Mips/elf_eflags_abicalls.s6
-rw-r--r--test/MC/Mips/elf_eflags_micromips.s8
-rw-r--r--test/MC/Mips/elf_eflags_mips16.s8
-rw-r--r--test/MC/Mips/elf_eflags_nan2008.s12
-rw-r--r--test/MC/Mips/elf_eflags_nanlegacy.s15
-rw-r--r--test/MC/Mips/elf_eflags_noreorder.s6
-rw-r--r--test/MC/Mips/elf_eflags_pic0.s7
-rw-r--r--test/MC/Mips/elf_eflags_pic2.s6
-rw-r--r--test/MC/Mips/elf_reginfo.s32
-rw-r--r--test/MC/Mips/elf_st_other.ll11
-rw-r--r--test/MC/Mips/elf_st_other.s35
-rw-r--r--test/MC/Mips/higher-highest-addressing.s54
-rw-r--r--test/MC/Mips/higher_highest.ll32
-rw-r--r--test/MC/Mips/hilo-addressing.s53
-rw-r--r--test/MC/Mips/lea_64.ll18
-rw-r--r--test/MC/Mips/lit.local.cfg3
-rw-r--r--test/MC/Mips/llvm-mc-fixup-endianness.s6
-rw-r--r--test/MC/Mips/micromips-16-bit-instructions.s27
-rw-r--r--test/MC/Mips/micromips-alias.s16
-rw-r--r--test/MC/Mips/micromips-alu-instructions.s2
-rw-r--r--test/MC/Mips/micromips-bad-branches.s225
-rw-r--r--test/MC/Mips/micromips-control-instructions.s60
-rw-r--r--test/MC/Mips/micromips-diagnostic-fixup.s10
-rw-r--r--test/MC/Mips/micromips-el-fixup-data.s25
-rw-r--r--test/MC/Mips/micromips-fpu-instructions.s193
-rw-r--r--test/MC/Mips/micromips-jump-instructions.s6
-rw-r--r--test/MC/Mips/micromips-loadstore-instructions.s15
-rw-r--r--test/MC/Mips/micromips-long-branch.ll16437
-rw-r--r--test/MC/Mips/micromips-pc16-fixup.s10
-rw-r--r--test/MC/Mips/micromips-relocations.s12
-rw-r--r--test/MC/Mips/mips-abi-bad.s20
-rw-r--r--test/MC/Mips/mips-alu-instructions.s29
-rw-r--r--test/MC/Mips/mips-bad-branches.s409
-rw-r--r--test/MC/Mips/mips-control-instructions.s4
-rw-r--r--test/MC/Mips/mips-data-directives.s36
-rw-r--r--test/MC/Mips/mips-diagnostic-fixup.s10
-rw-r--r--test/MC/Mips/mips-expansions-bad.s6
-rw-r--r--test/MC/Mips/mips-expansions.s13
-rw-r--r--test/MC/Mips/mips-jump-instructions.s12
-rw-r--r--test/MC/Mips/mips-noat.s29
-rw-r--r--test/MC/Mips/mips-pc16-fixup.s10
-rw-r--r--test/MC/Mips/mips-reginfo-fp32.s34
-rw-r--r--test/MC/Mips/mips-reginfo-fp64.s60
-rw-r--r--test/MC/Mips/mips-register-names-invalid.s8
-rw-r--r--test/MC/Mips/mips-register-names-o32.s40
-rw-r--r--test/MC/Mips/mips-register-names.s71
-rw-r--r--test/MC/Mips/mips1/invalid-mips2-wrong-error.s16
-rw-r--r--test/MC/Mips/mips1/invalid-mips2.s24
-rw-r--r--test/MC/Mips/mips1/invalid-mips3-wrong-error.s23
-rw-r--r--test/MC/Mips/mips1/invalid-mips3.s65
-rw-r--r--test/MC/Mips/mips1/invalid-mips32.s10
-rw-r--r--test/MC/Mips/mips1/invalid-mips4-wrong-error.s23
-rw-r--r--test/MC/Mips/mips1/invalid-mips4.s89
-rw-r--r--test/MC/Mips/mips1/invalid-mips5-wrong-error.s46
-rw-r--r--test/MC/Mips/mips1/invalid-mips5.s90
-rw-r--r--test/MC/Mips/mips1/valid-xfail.s11
-rw-r--r--test/MC/Mips/mips1/valid.s113
-rw-r--r--test/MC/Mips/mips2/invalid-mips3-wrong-error.s18
-rw-r--r--test/MC/Mips/mips2/invalid-mips3.s49
-rw-r--r--test/MC/Mips/mips2/invalid-mips32.s44
-rw-r--r--test/MC/Mips/mips2/invalid-mips32r2-xfail.s11
-rw-r--r--test/MC/Mips/mips2/invalid-mips32r2.s66
-rw-r--r--test/MC/Mips/mips2/invalid-mips4-wrong-error.s14
-rw-r--r--test/MC/Mips/mips2/invalid-mips4.s72
-rw-r--r--test/MC/Mips/mips2/invalid-mips5-wrong-error.s46
-rw-r--r--test/MC/Mips/mips2/invalid-mips5.s73
-rw-r--r--test/MC/Mips/mips2/valid.s138
-rw-r--r--test/MC/Mips/mips3/invalid-mips32.s10
-rw-r--r--test/MC/Mips/mips3/invalid-mips4.s30
-rw-r--r--test/MC/Mips/mips3/invalid-mips5-wrong-error.s46
-rw-r--r--test/MC/Mips/mips3/invalid-mips5.s32
-rw-r--r--test/MC/Mips/mips3/valid.s197
-rw-r--r--test/MC/Mips/mips32/abiflags.s36
-rw-r--r--test/MC/Mips/mips32/invalid-mips32r2-xfail.s11
-rw-r--r--test/MC/Mips/mips32/invalid-mips32r2.s34
-rw-r--r--test/MC/Mips/mips32/invalid-mips64.s9
-rw-r--r--test/MC/Mips/mips32/valid-xfail.s38
-rw-r--r--test/MC/Mips/mips32/valid.s166
-rw-r--r--test/MC/Mips/mips32r2/abiflags.s37
-rw-r--r--test/MC/Mips/mips32r2/invalid-mips64r2.s10
-rw-r--r--test/MC/Mips/mips32r2/invalid.s10
-rw-r--r--test/MC/Mips/mips32r2/valid-xfail.s309
-rw-r--r--test/MC/Mips/mips32r2/valid.s197
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips1-wrong-error.s17
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips1.s24
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips2-wrong-error.s20
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips2.s26
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips32-wrong-error.s20
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips32.s25
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips32r2.s15
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips4-wrong-error.s21
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips4.s11
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips5-wrong-error.s11
-rw-r--r--test/MC/Mips/mips32r6/invalid-mips5.s9
-rw-r--r--test/MC/Mips/mips32r6/invalid.s14
-rw-r--r--test/MC/Mips/mips32r6/relocations.s70
-rw-r--r--test/MC/Mips/mips32r6/valid-xfail.s19
-rw-r--r--test/MC/Mips/mips32r6/valid.s154
-rw-r--r--test/MC/Mips/mips4/invalid-mips32.s10
-rw-r--r--test/MC/Mips/mips4/invalid-mips5-wrong-error.s46
-rw-r--r--test/MC/Mips/mips4/invalid-mips5.s9
-rw-r--r--test/MC/Mips/mips4/invalid-mips64.s24
-rw-r--r--test/MC/Mips/mips4/invalid-mips64r2-xfail.s11
-rw-r--r--test/MC/Mips/mips4/invalid-mips64r2.s37
-rw-r--r--test/MC/Mips/mips4/valid-xfail.s49
-rw-r--r--test/MC/Mips/mips4/valid.s216
-rw-r--r--test/MC/Mips/mips5/invalid-mips32.s10
-rw-r--r--test/MC/Mips/mips5/invalid-mips64.s24
-rw-r--r--test/MC/Mips/mips5/invalid-mips64r2-xfail.s11
-rw-r--r--test/MC/Mips/mips5/invalid-mips64r2.s43
-rw-r--r--test/MC/Mips/mips5/valid-xfail.s87
-rw-r--r--test/MC/Mips/mips5/valid.s218
-rw-r--r--test/MC/Mips/mips64-alu-instructions.s43
-rw-r--r--test/MC/Mips/mips64-expansions.s209
-rw-r--r--test/MC/Mips/mips64-register-names-n32-n64.s49
-rw-r--r--test/MC/Mips/mips64-register-names-o32.s41
-rw-r--r--test/MC/Mips/mips64-register-names.s70
-rw-r--r--test/MC/Mips/mips64/abiflags.s36
-rw-r--r--test/MC/Mips/mips64/invalid-mips64r2-xfail.s11
-rw-r--r--test/MC/Mips/mips64/invalid-mips64r2.s29
-rw-r--r--test/MC/Mips/mips64/valid-xfail.s94
-rw-r--r--test/MC/Mips/mips64/valid.s235
-rw-r--r--test/MC/Mips/mips64eb-fixups.s43
-rw-r--r--test/MC/Mips/mips64r2/abi-bad.s9
-rw-r--r--test/MC/Mips/mips64r2/abiflags.s36
-rw-r--r--test/MC/Mips/mips64r2/invalid.s10
-rw-r--r--test/MC/Mips/mips64r2/valid-xfail.s311
-rw-r--r--test/MC/Mips/mips64r2/valid.s263
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips1-wrong-error.s17
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips1.s27
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips2.s29
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips3-wrong-error.s23
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips3.s33
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips32-wrong-error.s20
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips4-wrong-error.s21
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips4.s14
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips5-wrong-error.s48
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips5.s12
-rw-r--r--test/MC/Mips/mips64r6/invalid-mips64.s54
-rw-r--r--test/MC/Mips/mips64r6/invalid.s12
-rw-r--r--test/MC/Mips/mips64r6/relocations.s76
-rw-r--r--test/MC/Mips/mips64r6/valid-xfail.s19
-rw-r--r--test/MC/Mips/mips64r6/valid.s173
-rw-r--r--test/MC/Mips/mips_abi_flags_xx.s45
-rw-r--r--test/MC/Mips/mips_abi_flags_xx_set.s37
-rw-r--r--test/MC/Mips/mips_directives.s44
-rw-r--r--test/MC/Mips/mips_directives_bad.s59
-rw-r--r--test/MC/Mips/mips_gprel16.ll33
-rw-r--r--test/MC/Mips/mips_gprel16.s73
-rw-r--r--test/MC/Mips/msa/abiflags.s37
-rw-r--r--test/MC/Mips/msa/test_2r.s20
-rw-r--r--test/MC/Mips/msa/test_2r_msa64.s5
-rw-r--r--test/MC/Mips/msa/test_2rf.s37
-rw-r--r--test/MC/Mips/msa/test_3r.s247
-rw-r--r--test/MC/Mips/msa/test_3rf.s87
-rw-r--r--test/MC/Mips/msa/test_bit.s53
-rw-r--r--test/MC/Mips/msa/test_cbranch.s22
-rw-r--r--test/MC/Mips/msa/test_ctrlregs.s38
-rw-r--r--test/MC/Mips/msa/test_dlsa.s12
-rw-r--r--test/MC/Mips/msa/test_elm.s20
-rw-r--r--test/MC/Mips/msa/test_elm_insert.s8
-rw-r--r--test/MC/Mips/msa/test_elm_insert_msa64.s5
-rw-r--r--test/MC/Mips/msa/test_elm_insve.s9
-rw-r--r--test/MC/Mips/msa/test_elm_msa64.s7
-rw-r--r--test/MC/Mips/msa/test_i10.s10
-rw-r--r--test/MC/Mips/msa/test_i5.s49
-rw-r--r--test/MC/Mips/msa/test_i8.s15
-rw-r--r--test/MC/Mips/msa/test_lsa.s9
-rw-r--r--test/MC/Mips/msa/test_mi10.s79
-rw-r--r--test/MC/Mips/msa/test_vec.s12
-rw-r--r--test/MC/Mips/nabi-regs.s10
-rw-r--r--test/MC/Mips/nacl-mask.s319
-rw-r--r--test/MC/Mips/nooddspreg-cmdarg.s42
-rw-r--r--test/MC/Mips/nooddspreg-error.s14
-rw-r--r--test/MC/Mips/nooddspreg.s44
-rw-r--r--test/MC/Mips/octeon-instructions.s85
-rw-r--r--test/MC/Mips/oddspreg.s69
-rw-r--r--test/MC/Mips/r-mips-got-disp.ll19
-rw-r--r--test/MC/Mips/r-mips-got-disp.s65
-rw-r--r--test/MC/Mips/set-at-directive-explicit-at.s42
-rw-r--r--test/MC/Mips/set-at-directive.s218
-rw-r--r--test/MC/Mips/sym-expr.s14
-rw-r--r--test/MC/Mips/xgot.ll29
-rw-r--r--test/MC/Mips/xgot.s67
-rw-r--r--test/MC/PowerPC/deprecated-p7.s1
-rw-r--r--test/MC/PowerPC/lit.local.cfg3
-rw-r--r--test/MC/PowerPC/ppc-llong.s2
-rw-r--r--test/MC/PowerPC/ppc-machine.s1
-rw-r--r--test/MC/PowerPC/ppc-nop.s8
-rw-r--r--test/MC/PowerPC/ppc-reloc.s17
-rw-r--r--test/MC/PowerPC/ppc-word.s2
-rw-r--r--test/MC/PowerPC/ppc64-abiversion.s9
-rw-r--r--test/MC/PowerPC/ppc64-encoding-bookII.s161
-rw-r--r--test/MC/PowerPC/ppc64-encoding-bookIII.s178
-rw-r--r--test/MC/PowerPC/ppc64-encoding-ext.s5636
-rw-r--r--test/MC/PowerPC/ppc64-encoding-fp.s578
-rw-r--r--test/MC/PowerPC/ppc64-encoding-vmx.s882
-rw-r--r--test/MC/PowerPC/ppc64-encoding.s1216
-rw-r--r--test/MC/PowerPC/ppc64-errors.s2
-rw-r--r--test/MC/PowerPC/ppc64-fixup-apply.s19
-rw-r--r--test/MC/PowerPC/ppc64-fixup-explicit.s96
-rw-r--r--test/MC/PowerPC/ppc64-fixups.s1123
-rw-r--r--test/MC/PowerPC/ppc64-initial-cfa.s27
-rw-r--r--test/MC/PowerPC/ppc64-localentry-error1.s11
-rw-r--r--test/MC/PowerPC/ppc64-localentry-error2.s12
-rw-r--r--test/MC/PowerPC/ppc64-localentry.s70
-rw-r--r--test/MC/PowerPC/ppc64-operands.s177
-rw-r--r--test/MC/PowerPC/ppc64-regs.s1
-rw-r--r--test/MC/PowerPC/vsx.s447
-rw-r--r--test/MC/Sparc/lit.local.cfg3
-rw-r--r--test/MC/Sparc/sparc-alu-instructions.s128
-rw-r--r--test/MC/Sparc/sparc-atomic-instructions.s19
-rw-r--r--test/MC/Sparc/sparc-ctrl-instructions.s278
-rw-r--r--test/MC/Sparc/sparc-directive-xword.s10
-rw-r--r--test/MC/Sparc/sparc-directives.s19
-rw-r--r--test/MC/Sparc/sparc-fp-instructions.s140
-rw-r--r--test/MC/Sparc/sparc-mem-instructions.s58
-rw-r--r--test/MC/Sparc/sparc-nop-data.s11
-rw-r--r--test/MC/Sparc/sparc-pic.s49
-rw-r--r--test/MC/Sparc/sparc-relocations.s46
-rw-r--r--test/MC/Sparc/sparc-vis.s4
-rw-r--r--test/MC/Sparc/sparc64-alu-instructions.s38
-rw-r--r--test/MC/Sparc/sparc64-ctrl-instructions.s1226
-rw-r--r--test/MC/Sparc/sparcv8-instructions.s15
-rw-r--r--test/MC/Sparc/sparcv9-instructions.s23
-rw-r--r--test/MC/SystemZ/insn-bad-z196.s344
-rw-r--r--test/MC/SystemZ/insn-bad.s115
-rw-r--r--test/MC/SystemZ/insn-good-z196.s450
-rw-r--r--test/MC/SystemZ/lit.local.cfg3
-rw-r--r--test/MC/X86/AlignedBundling/lit.local.cfg3
-rw-r--r--test/MC/X86/address-size.s16
-rw-r--r--test/MC/X86/avx512-encodings.s3182
-rw-r--r--test/MC/X86/fixup-cpu-mode.s8
-rw-r--r--test/MC/X86/index-operations.s146
-rw-r--r--test/MC/X86/intel-syntax-avx512.s5
-rw-r--r--test/MC/X86/intel-syntax-bitwise-ops.s22
-rw-r--r--test/MC/X86/intel-syntax-directional-label.s17
-rw-r--r--test/MC/X86/intel-syntax-invalid-basereg.s7
-rw-r--r--test/MC/X86/intel-syntax-invalid-scale.s11
-rw-r--r--test/MC/X86/intel-syntax.s14
-rw-r--r--test/MC/X86/lit.local.cfg3
-rw-r--r--test/MC/X86/no-elf-compact-unwind.s16
-rw-r--r--test/MC/X86/padlock.s52
-rw-r--r--test/MC/X86/relax-insn.s5
-rw-r--r--test/MC/X86/reloc-undef-global.s20
-rw-r--r--test/MC/X86/ret.s114
-rw-r--r--test/MC/X86/stackmap-nops.ll47
-rw-r--r--test/MC/X86/variant-diagnostics.s11
-rw-r--r--test/MC/X86/x86-16.s949
-rw-r--r--test/MC/X86/x86-32.s74
-rw-r--r--test/MC/X86/x86-64.s58
-rw-r--r--test/MC/X86/x86-itanium.ll6
-rw-r--r--test/MC/X86/x86-target-directives.s7
-rw-r--r--test/MC/X86/x86-windows-itanium-libcalls.ll16
-rw-r--r--test/MC/X86/x86_64-avx-encoding.s2
-rw-r--r--test/MC/X86/x86_64-signed-reloc.s16
-rw-r--r--test/MC/X86/x86_64-tbm-encoding.s6
-rw-r--r--test/MC/X86/x86_errors.s17
-rw-r--r--test/MC/X86/x86_long_nop.s10
-rw-r--r--test/MC/X86/x86_nop.s1
-rw-r--r--test/Makefile11
-rw-r--r--test/Object/ARM/lit.local.cfg3
-rw-r--r--test/Object/Inputs/COFF/i386.yaml16
-rw-r--r--test/Object/Inputs/COFF/long-file-symbol.yaml14
-rw-r--r--test/Object/Inputs/COFF/weak-external.yaml43
-rw-r--r--test/Object/Inputs/COFF/x86-64.yaml33
-rw-r--r--test/Object/Inputs/absolute.elf-x86-64bin0 -> 711 bytes
-rw-r--r--test/Object/Inputs/common.coff-i386bin0 -> 520 bytes
-rw-r--r--test/Object/Inputs/corrupt-archive.abin0 -> 2698 bytes
-rw-r--r--test/Object/Inputs/darwin-m-test1.mach0-armv7bin0 -> 432 bytes
-rw-r--r--test/Object/Inputs/darwin-m-test2.macho-i386bin0 -> 88 bytes
-rwxr-xr-xtest/Object/Inputs/darwin-m-test3.macho-x86-64bin0 -> 9216 bytes
-rwxr-xr-xtest/Object/Inputs/hello-world.macho-x86_64bin0 -> 8496 bytes
-rw-r--r--test/Object/Inputs/macho-archive-x86_64.abin0 -> 1304 bytes
-rwxr-xr-xtest/Object/Inputs/macho-hello-g.macho-x86_64bin0 -> 8680 bytes
-rw-r--r--test/Object/Inputs/macho-text-data-bss.macho-x86_64bin0 -> 844 bytes
-rw-r--r--test/Object/Inputs/macho-universal-archive.x86_64.i386bin0 -> 1656 bytes
-rwxr-xr-xtest/Object/Inputs/no-sections.elf-x86-64bin0 -> 2912 bytes
-rw-r--r--test/Object/Inputs/program-headers.mips64bin0 -> 790 bytes
-rw-r--r--test/Object/Inputs/relocatable-with-section-address.elf-x86-64bin0 -> 1584 bytes
-rwxr-xr-xtest/Object/Inputs/relocation-dynamic.elf-i386bin0 -> 1504 bytes
-rw-r--r--test/Object/Inputs/relocation-relocatable.elf-i386bin0 -> 772 bytes
-rw-r--r--test/Object/Inputs/thumb-symbols.elf.armbin0 -> 481 bytes
-rw-r--r--test/Object/Inputs/trivial-object-test.coff-x86-64bin347 -> 437 bytes
-rw-r--r--test/Object/Inputs/trivial-object-test.elf-mipselbin0 -> 1124 bytes
-rw-r--r--test/Object/Inputs/trivial.ll4
-rw-r--r--test/Object/Inputs/unwind-section.elf-x86-64bin0 -> 2369 bytes
-rw-r--r--test/Object/Inputs/weak.elf-x86-64bin0 -> 896 bytes
-rw-r--r--test/Object/Mips/lit.local.cfg3
-rw-r--r--test/Object/X86/archive-ir-asm.ll20
-rw-r--r--test/Object/X86/lit.local.cfg3
-rw-r--r--test/Object/X86/nm-ir.ll45
-rw-r--r--test/Object/X86/objdump-cfg-invalid-opcode.yaml2
-rw-r--r--test/Object/X86/objdump-disassembly-inline-relocations.test35
-rw-r--r--test/Object/X86/objdump-disassembly-symbolic.test20
-rw-r--r--test/Object/X86/yaml2obj-elf-x86-rel.yaml41
-rw-r--r--test/Object/ar-error.test6
-rw-r--r--test/Object/archive-long-index.test28
-rw-r--r--test/Object/archive-symtab.test50
-rw-r--r--test/Object/archive-toc.test24
-rw-r--r--test/Object/coff-archive-short.test2
-rw-r--r--test/Object/coff-archive.test2
-rw-r--r--test/Object/directory.ll2
-rw-r--r--test/Object/extract.ll3
-rw-r--r--test/Object/mangle-ir.ll14
-rw-r--r--test/Object/nm-archive.test11
-rw-r--r--test/Object/nm-darwin-m.test53
-rw-r--r--test/Object/nm-error.test6
-rw-r--r--test/Object/nm-shared-object.test40
-rw-r--r--test/Object/nm-trivial-object.test131
-rw-r--r--test/Object/nm-universal-binary.test35
-rw-r--r--test/Object/obj2yaml-coff-long-file-symbol.test3
-rw-r--r--test/Object/obj2yaml-coff-weak-external.test3
-rw-r--r--test/Object/obj2yaml.test304
-rw-r--r--test/Object/objdump-no-sectionheaders.test6
-rw-r--r--test/Object/objdump-relocations.test12
-rw-r--r--test/Object/readobj-elf-versioning.test4
-rw-r--r--test/Object/readobj-shared-object.test18
-rw-r--r--test/Object/simple-archive.test2
-rw-r--r--test/Object/size-trivial-macho.test89
-rw-r--r--test/Object/yaml2obj-coff-multi-doc.test91
-rw-r--r--test/Object/yaml2obj-elf-file-headers-with-e_flags.yaml17
-rw-r--r--test/Object/yaml2obj-elf-multi-doc.test56
-rw-r--r--test/Object/yaml2obj-elf-rel.yaml118
-rw-r--r--test/Object/yaml2obj-elf-section-basic.yaml41
-rw-r--r--test/Object/yaml2obj-elf-section-invalid-size.yaml26
-rw-r--r--test/Object/yaml2obj-elf-symbol-basic.yaml2
-rw-r--r--test/Object/yaml2obj-elf-symbol-visibility.yaml126
-rw-r--r--test/Object/yaml2obj-readobj.test3
-rw-r--r--test/Other/Inputs/llvm_cov.gcdabin296 -> 0 bytes
-rw-r--r--test/Other/Inputs/llvm_cov.gcnobin984 -> 0 bytes
-rw-r--r--test/Other/X86/lit.local.cfg3
-rw-r--r--test/Other/constant-fold-gep.ll19
-rw-r--r--test/Other/extract-alias.ll6
-rw-r--r--test/Other/llvm-cov.test4
-rw-r--r--test/Other/llvm-nm-without-aliases.ll4
-rw-r--r--test/Other/new-pass-manager.ll69
-rw-r--r--test/Other/optimization-remarks-inline.ll40
-rw-r--r--test/Other/pass-pipeline-parsing.ll146
-rw-r--r--test/TableGen/ForeachLoop.td26
-rw-r--r--test/TableGen/GeneralList.td1
-rw-r--r--test/TableGen/MultiClassDefName.td25
-rw-r--r--test/TableGen/ValidIdentifiers.td16
-rw-r--r--test/TableGen/if-empty-list-arg.td7
-rw-r--r--test/TableGen/intrinsic-long-name.td32
-rw-r--r--test/TableGen/lisp.td1
-rw-r--r--test/TableGen/listconcat.td18
-rw-r--r--test/TableGen/math.td10
-rw-r--r--test/TableGen/strconcat.td14
-rw-r--r--test/Transforms/AddDiscriminators/basic.ll59
-rw-r--r--test/Transforms/AddDiscriminators/first-only.ll82
-rw-r--r--test/Transforms/AddDiscriminators/multiple.ll71
-rw-r--r--test/Transforms/AddDiscriminators/no-discriminators.ll71
-rw-r--r--test/Transforms/ArgumentPromotion/basictest.ll32
-rw-r--r--test/Transforms/ArgumentPromotion/byval-2.ll37
-rw-r--r--test/Transforms/ArgumentPromotion/byval.ll33
-rw-r--r--test/Transforms/ArgumentPromotion/dbg.ll22
-rw-r--r--test/Transforms/ArgumentPromotion/inalloca.ll49
-rw-r--r--test/Transforms/ArgumentPromotion/tail.ll20
-rw-r--r--test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v7.ll364
-rw-r--r--test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v8.ll226
-rw-r--r--test/Transforms/AtomicExpandLoadLinked/ARM/cmpxchg-weak.ll97
-rw-r--r--test/Transforms/AtomicExpandLoadLinked/ARM/lit.local.cfg3
-rw-r--r--test/Transforms/BBVectorize/lit.local.cfg3
-rw-r--r--test/Transforms/BBVectorize/simple-int.ll381
-rw-r--r--test/Transforms/BranchFolding/2007-10-19-InlineAsmDirectives.ll2
-rw-r--r--test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll64
-rw-r--r--test/Transforms/CodeGenPrepare/X86/lit.local.cfg3
-rw-r--r--test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll37
-rw-r--r--test/Transforms/CodeGenPrepare/X86/x86-shuffle-sink.ll105
-rw-r--r--test/Transforms/ConstProp/loads.ll47
-rw-r--r--test/Transforms/ConstantHoisting/AArch64/const-addr.ll23
-rw-r--r--test/Transforms/ConstantHoisting/AArch64/large-immediate.ll27
-rw-r--r--test/Transforms/ConstantHoisting/AArch64/lit.local.cfg2
-rw-r--r--test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll23
-rw-r--r--test/Transforms/ConstantHoisting/PowerPC/lit.local.cfg3
-rw-r--r--test/Transforms/ConstantHoisting/PowerPC/masks.ll66
-rw-r--r--test/Transforms/ConstantHoisting/X86/cast-inst.ll29
-rw-r--r--test/Transforms/ConstantHoisting/X86/const-base-addr.ll24
-rw-r--r--test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll22
-rw-r--r--test/Transforms/ConstantHoisting/X86/large-immediate.ll36
-rw-r--r--test/Transforms/ConstantHoisting/X86/lit.local.cfg3
-rw-r--r--test/Transforms/ConstantHoisting/X86/phi.ll116
-rw-r--r--test/Transforms/ConstantHoisting/X86/stackmap.ll17
-rw-r--r--test/Transforms/ConstantMerge/linker-private.ll23
-rw-r--r--test/Transforms/DeadArgElim/deadexternal.ll2
-rw-r--r--test/Transforms/DeadArgElim/keepalive.ll16
-rw-r--r--test/Transforms/DeadStoreElimination/PartialStore.ll22
-rw-r--r--test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll74
-rw-r--r--test/Transforms/DeadStoreElimination/simple.ll26
-rw-r--r--test/Transforms/FunctionAttrs/nocapture.ll17
-rw-r--r--test/Transforms/FunctionAttrs/readattrs.ll20
-rw-r--r--test/Transforms/GCOVProfiling/global-ctor.ll58
-rw-r--r--test/Transforms/GCOVProfiling/linezero.ll143
-rw-r--r--test/Transforms/GCOVProfiling/version.ll4
-rw-r--r--test/Transforms/GVN/2009-03-10-PREOnVoid.ll56
-rw-r--r--test/Transforms/GVN/calloc-load-removal.ll25
-rw-r--r--test/Transforms/GVN/invariant-load.ll31
-rw-r--r--test/Transforms/GVN/load-pre-nonlocal.ll87
-rw-r--r--test/Transforms/GVN/rle.ll13
-rw-r--r--test/Transforms/GVN/unreachable_block_infinite_loop.ll2
-rw-r--r--test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll14
-rw-r--r--test/Transforms/GlobalDCE/global_ctors.ll14
-rw-r--r--test/Transforms/GlobalDCE/global_ctors_integration.ll45
-rw-r--r--test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll2
-rw-r--r--test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll18
-rw-r--r--test/Transforms/GlobalOpt/alias-resolve.ll30
-rw-r--r--test/Transforms/GlobalOpt/alias-used-address-space.ll26
-rw-r--r--test/Transforms/GlobalOpt/alias-used-section.ll8
-rw-r--r--test/Transforms/GlobalOpt/atexit.ll2
-rw-r--r--test/Transforms/GlobalOpt/constantfold-initializers.ll38
-rw-r--r--test/Transforms/GlobalOpt/ctor-list-opt.ll19
-rw-r--r--test/Transforms/GlobalOpt/fastcc.ll46
-rw-r--r--test/Transforms/GlobalOpt/memset.ll19
-rw-r--r--test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll2
-rw-r--r--test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll57
-rw-r--r--test/Transforms/IndVarSimplify/iv-widen.ll40
-rw-r--r--test/Transforms/IndVarSimplify/lcssa-preservation.ll51
-rw-r--r--test/Transforms/IndVarSimplify/lftr-extend-const.ll4
-rw-r--r--test/Transforms/IndVarSimplify/lftr-reuse.ll10
-rw-r--r--test/Transforms/IndVarSimplify/overflowcheck.ll56
-rw-r--r--test/Transforms/IndVarSimplify/pr18223.ll30
-rw-r--r--test/Transforms/IndVarSimplify/tripcount_compute.ll31
-rw-r--r--test/Transforms/Inline/2010-05-31-ByvalTailcall.ll24
-rw-r--r--test/Transforms/Inline/always-inline.ll11
-rw-r--r--test/Transforms/Inline/blockaddress.ll5
-rw-r--r--test/Transforms/Inline/byval-tail-call.ll38
-rw-r--r--test/Transforms/Inline/byval_lifetime.ll26
-rw-r--r--test/Transforms/Inline/debug-invoke.ll37
-rw-r--r--test/Transforms/Inline/ignore-debug-info.ll55
-rw-r--r--test/Transforms/Inline/inline-cold.ll200
-rw-r--r--test/Transforms/Inline/inline-tail.ll185
-rw-r--r--test/Transforms/Inline/inline-vla.ll38
-rw-r--r--test/Transforms/Inline/inline_invoke.ll5
-rw-r--r--test/Transforms/Inline/inline_returns_twice.ll2
-rw-r--r--test/Transforms/Inline/invoke-cleanup.ll39
-rw-r--r--test/Transforms/Inline/invoke-combine-clauses.ll117
-rw-r--r--test/Transforms/Inline/null-function.ll9
-rw-r--r--test/Transforms/Inline/optimization-remarks.ll60
-rw-r--r--test/Transforms/Inline/ptr-diff.ll2
-rw-r--r--test/Transforms/Inline/switch.ll60
-rw-r--r--test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll4
-rw-r--r--test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll4
-rw-r--r--test/Transforms/InstCombine/2010-03-03-ExtElim.ll4
-rw-r--r--test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll69
-rw-r--r--test/Transforms/InstCombine/2012-07-30-addrsp-bitcast.ll2
-rw-r--r--test/Transforms/InstCombine/AddOverFlow.ll118
-rw-r--r--test/Transforms/InstCombine/OverlappingInsertvalues.ll36
-rw-r--r--test/Transforms/InstCombine/abs_abs.ll961
-rw-r--r--test/Transforms/InstCombine/add-shrink.ll10
-rw-r--r--test/Transforms/InstCombine/add-sitofp.ll2
-rw-r--r--test/Transforms/InstCombine/add2.ll272
-rw-r--r--test/Transforms/InstCombine/add4.ll23
-rw-r--r--test/Transforms/InstCombine/addrspacecast.ll80
-rw-r--r--test/Transforms/InstCombine/align-2d-gep.ll2
-rw-r--r--test/Transforms/InstCombine/alloca.ll21
-rw-r--r--test/Transforms/InstCombine/ashr-nop.ll8
-rw-r--r--test/Transforms/InstCombine/bitcast-store.ll18
-rw-r--r--test/Transforms/InstCombine/blend_x86.ll55
-rw-r--r--test/Transforms/InstCombine/call-cast-target-inalloca.ll22
-rw-r--r--test/Transforms/InstCombine/call-cast-target.ll12
-rw-r--r--test/Transforms/InstCombine/cast-call-combine.ll23
-rw-r--r--test/Transforms/InstCombine/cast-set.ll7
-rw-r--r--test/Transforms/InstCombine/cast.ll59
-rw-r--r--test/Transforms/InstCombine/ceil.ll56
-rw-r--r--test/Transforms/InstCombine/constant-fold-address-space-pointer.ll10
-rw-r--r--test/Transforms/InstCombine/constant-fold-math.ll47
-rw-r--r--test/Transforms/InstCombine/copysign.ll49
-rw-r--r--test/Transforms/InstCombine/descale-zero.ll21
-rw-r--r--test/Transforms/InstCombine/distribute.ll68
-rw-r--r--test/Transforms/InstCombine/div.ll43
-rw-r--r--test/Transforms/InstCombine/double-float-shrink-1.ll5
-rw-r--r--test/Transforms/InstCombine/exp2-1.ll24
-rw-r--r--test/Transforms/InstCombine/fast-math.ll53
-rw-r--r--test/Transforms/InstCombine/fdiv.ll26
-rw-r--r--test/Transforms/InstCombine/ffs-1.ll6
-rw-r--r--test/Transforms/InstCombine/float-shrink-compare.ll54
-rw-r--r--test/Transforms/InstCombine/fmul.ll34
-rw-r--r--test/Transforms/InstCombine/fpcast.ll9
-rw-r--r--test/Transforms/InstCombine/fpextend.ll14
-rw-r--r--test/Transforms/InstCombine/fpextend_x86.ll57
-rw-r--r--test/Transforms/InstCombine/fprintf-1.ll8
-rw-r--r--test/Transforms/InstCombine/gep-addrspace.ll17
-rw-r--r--test/Transforms/InstCombine/gepphigep.ll56
-rw-r--r--test/Transforms/InstCombine/getelementptr.ll37
-rw-r--r--test/Transforms/InstCombine/icmp.ll72
-rw-r--r--test/Transforms/InstCombine/insert-extract-shuffle.ll37
-rw-r--r--test/Transforms/InstCombine/intrinsics.ll13
-rw-r--r--test/Transforms/InstCombine/load-addrspace-cast.ll12
-rw-r--r--test/Transforms/InstCombine/load.ll48
-rw-r--r--test/Transforms/InstCombine/loadstore-alignment.ll114
-rw-r--r--test/Transforms/InstCombine/memcpy-from-global.ll68
-rw-r--r--test/Transforms/InstCombine/mul.ll16
-rw-r--r--test/Transforms/InstCombine/onehot_merge.ll4
-rw-r--r--test/Transforms/InstCombine/overflow-mul.ll175
-rw-r--r--test/Transforms/InstCombine/pow-1.ll29
-rw-r--r--test/Transforms/InstCombine/pr19420.ll67
-rw-r--r--test/Transforms/InstCombine/pr20059.ll16
-rw-r--r--test/Transforms/InstCombine/pr20079.ll9
-rw-r--r--test/Transforms/InstCombine/printf-1.ll8
-rw-r--r--test/Transforms/InstCombine/r600-intrinsics.ll47
-rw-r--r--test/Transforms/InstCombine/rem.ll11
-rw-r--r--test/Transforms/InstCombine/round.ll90
-rw-r--r--test/Transforms/InstCombine/select-2.ll10
-rw-r--r--test/Transforms/InstCombine/select-select.ll24
-rw-r--r--test/Transforms/InstCombine/select.ll213
-rw-r--r--test/Transforms/InstCombine/sext.ll4
-rw-r--r--test/Transforms/InstCombine/shift.ll94
-rw-r--r--test/Transforms/InstCombine/sign-test-and-or.ll38
-rw-r--r--test/Transforms/InstCombine/sincospi.ll9
-rw-r--r--test/Transforms/InstCombine/sprintf-1.ll8
-rw-r--r--test/Transforms/InstCombine/strchr-1.ll13
-rw-r--r--test/Transforms/InstCombine/strlen-1.ll12
-rw-r--r--test/Transforms/InstCombine/sub.ll74
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts.ll366
-rw-r--r--test/Transforms/InstCombine/vec_phi_extract.ll4
-rw-r--r--test/Transforms/InstCombine/vec_sext.ll23
-rw-r--r--test/Transforms/InstCombine/vec_shuffle.ll186
-rw-r--r--test/Transforms/InstCombine/zext-bool-add-sub.ll2
-rw-r--r--test/Transforms/InstCombine/zext.ll34
-rw-r--r--test/Transforms/InstMerge/ld_hoist_st_sink.ll84
-rw-r--r--test/Transforms/InstSimplify/2010-12-20-Distribute.ll62
-rw-r--r--test/Transforms/InstSimplify/apint-or.ll37
-rw-r--r--test/Transforms/InstSimplify/ashr-nop.ll10
-rw-r--r--test/Transforms/InstSimplify/compare.ll230
-rw-r--r--test/Transforms/InstSimplify/dead-code-removal.ll15
-rw-r--r--test/Transforms/InstSimplify/undef.ll7
-rw-r--r--test/Transforms/InstSimplify/vector_gep.ll49
-rw-r--r--test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll11
-rw-r--r--test/Transforms/Internalize/lists.ll11
-rw-r--r--test/Transforms/Internalize/local-visibility.ll25
-rw-r--r--test/Transforms/JumpThreading/phi-eq.ll2
-rw-r--r--test/Transforms/JumpThreading/pr15851_hang.ll22
-rw-r--r--test/Transforms/JumpThreading/select.ll2
-rw-r--r--test/Transforms/LICM/extra-copies.ll29
-rw-r--r--test/Transforms/LICM/hoist-bitcast-load.ll239
-rw-r--r--test/Transforms/LICM/hoist-deref-load.ll168
-rw-r--r--test/Transforms/LICM/lcssa-ssa-promoter.ll76
-rw-r--r--test/Transforms/LICM/scalar_promote.ll12
-rw-r--r--test/Transforms/LICM/sinking.ll104
-rw-r--r--test/Transforms/LICM/volatile-alias.ll2
-rw-r--r--test/Transforms/LoadCombine/load-combine.ll190
-rw-r--r--test/Transforms/LoopIdiom/R600/lit.local.cfg3
-rw-r--r--test/Transforms/LoopIdiom/R600/popcnt.ll104
-rw-r--r--test/Transforms/LoopIdiom/X86/lit.local.cfg3
-rw-r--r--test/Transforms/LoopRotate/PhiSelfReference-1.ll39
-rw-r--r--test/Transforms/LoopRotate/PhiSelfRefernce-1.ll39
-rw-r--r--test/Transforms/LoopRotate/dbgvalue.ll6
-rw-r--r--test/Transforms/LoopRotate/preserve-loop-simplify.ll65
-rw-r--r--test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll2
-rw-r--r--test/Transforms/LoopSimplify/ashr-crash.ll80
-rw-r--r--test/Transforms/LoopSimplify/notify-scev.ll110
-rw-r--r--test/Transforms/LoopStrengthReduce/AArch64/lit.local.cfg4
-rw-r--r--test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll33
-rw-r--r--test/Transforms/LoopStrengthReduce/AArch64/lsr-memset.ll101
-rw-r--r--test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll70
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll3
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg3
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lit.local.cfg3
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll50
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/pr17473.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll6
-rw-r--r--test/Transforms/LoopStrengthReduce/lsr-expand-quadratic.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/pr18165.ll2
-rw-r--r--test/Transforms/LoopUnroll/PowerPC/lit.local.cfg3
-rw-r--r--test/Transforms/LoopUnroll/X86/lit.local.cfg3
-rw-r--r--test/Transforms/LoopUnroll/X86/partial.ll127
-rw-r--r--test/Transforms/LoopUnroll/loop-remarks.ll25
-rw-r--r--test/Transforms/LoopUnroll/pr18861.ll43
-rw-r--r--test/Transforms/LoopUnroll/runtime-loop.ll6
-rw-r--r--test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll69
-rw-r--r--test/Transforms/LoopUnroll/unroll-pragmas.ll289
-rw-r--r--test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll42
-rw-r--r--test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll42
-rw-r--r--test/Transforms/LoopVectorize/AArch64/gather-cost.ll85
-rw-r--r--test/Transforms/LoopVectorize/AArch64/lit.local.cfg5
-rw-r--r--test/Transforms/LoopVectorize/ARM/arm-unroll.ll39
-rw-r--r--test/Transforms/LoopVectorize/ARM/lit.local.cfg3
-rw-r--r--test/Transforms/LoopVectorize/PowerPC/lit.local.cfg3
-rw-r--r--test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll51
-rw-r--r--test/Transforms/LoopVectorize/X86/already-vectorized.ll6
-rw-r--r--test/Transforms/LoopVectorize/X86/avx512.ll35
-rw-r--r--test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll39
-rw-r--r--test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll40
-rw-r--r--test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll25
-rw-r--r--test/Transforms/LoopVectorize/X86/lit.local.cfg3
-rw-r--r--test/Transforms/LoopVectorize/X86/metadata-enable.ll176
-rw-r--r--test/Transforms/LoopVectorize/X86/small-size.ll27
-rw-r--r--test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll26
-rw-r--r--test/Transforms/LoopVectorize/X86/unroll-small-loops.ll72
-rw-r--r--test/Transforms/LoopVectorize/X86/vect.omp.force.ll93
-rw-r--r--test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll73
-rw-r--r--test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll2
-rw-r--r--test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll161
-rw-r--r--test/Transforms/LoopVectorize/X86/vectorization-remarks.ll74
-rw-r--r--test/Transforms/LoopVectorize/XCore/lit.local.cfg3
-rw-r--r--test/Transforms/LoopVectorize/calloc.ll2
-rw-r--r--test/Transforms/LoopVectorize/control-flow.ll78
-rw-r--r--test/Transforms/LoopVectorize/flags.ll26
-rw-r--r--test/Transforms/LoopVectorize/float-reduction.ll2
-rw-r--r--test/Transforms/LoopVectorize/gcc-examples.ll4
-rw-r--r--test/Transforms/LoopVectorize/global_alias.ll8
-rw-r--r--test/Transforms/LoopVectorize/if-conversion.ll2
-rw-r--r--test/Transforms/LoopVectorize/if-pred-stores.ll126
-rw-r--r--test/Transforms/LoopVectorize/increment.ll2
-rw-r--r--test/Transforms/LoopVectorize/induction.ll65
-rw-r--r--test/Transforms/LoopVectorize/intrinsic.ll102
-rw-r--r--test/Transforms/LoopVectorize/metadata-unroll.ll2
-rw-r--r--test/Transforms/LoopVectorize/metadata-width.ll2
-rw-r--r--test/Transforms/LoopVectorize/metadata.ll44
-rw-r--r--test/Transforms/LoopVectorize/multi-use-reduction-bug.ll2
-rw-r--r--test/Transforms/LoopVectorize/multiple-address-spaces.ll2
-rw-r--r--test/Transforms/LoopVectorize/no_array_bounds.ll101
-rw-r--r--test/Transforms/LoopVectorize/no_switch.ll86
-rw-r--r--test/Transforms/LoopVectorize/ptr_loops.ll2
-rw-r--r--test/Transforms/LoopVectorize/runtime-check-address-space.ll2
-rw-r--r--test/Transforms/LoopVectorize/runtime-check-readonly.ll15
-rw-r--r--test/Transforms/LoopVectorize/store-shuffle-bug.ll19
-rw-r--r--test/Transforms/LoopVectorize/tbaa-nodep.ll102
-rw-r--r--test/Transforms/LoopVectorize/unroll_novec.ll12
-rw-r--r--test/Transforms/LoopVectorize/value-ptr-bug.ll2
-rw-r--r--test/Transforms/LoopVectorize/vect.omp.persistence.ll88
-rw-r--r--test/Transforms/LoopVectorize/vect.stats.ll65
-rw-r--r--test/Transforms/LoopVectorize/vectorize-once.ll6
-rw-r--r--test/Transforms/LoopVectorize/version-mem-access.ll87
-rw-r--r--test/Transforms/LowerAtomic/atomic-swap.ll17
-rw-r--r--test/Transforms/LowerExpectIntrinsic/basic.ll29
-rw-r--r--test/Transforms/LowerInvoke/2004-02-29-PHICrash.ll15
-rw-r--r--test/Transforms/LowerInvoke/2005-08-03-InvokeWithPHI.ll17
-rw-r--r--test/Transforms/LowerInvoke/2005-08-03-InvokeWithPHIUse.ll15
-rw-r--r--test/Transforms/LowerInvoke/2008-02-14-CritEdgePhiCrash.ll14
-rw-r--r--test/Transforms/LowerInvoke/basictest.ll30
-rw-r--r--test/Transforms/LowerInvoke/lowerinvoke.ll25
-rw-r--r--test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll27
-rw-r--r--test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll41
-rw-r--r--test/Transforms/LowerSwitch/2014-06-23-PHIlowering.ll40
-rw-r--r--test/Transforms/LowerSwitch/feature.ll114
-rw-r--r--test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll4
-rw-r--r--test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll2
-rw-r--r--test/Transforms/MemCpyOpt/capturing-func.ll22
-rw-r--r--test/Transforms/MemCpyOpt/form-memset.ll12
-rw-r--r--test/Transforms/MemCpyOpt/loadstore-sret.ll2
-rw-r--r--test/Transforms/MemCpyOpt/memcpy-undef.ll46
-rw-r--r--test/Transforms/MemCpyOpt/memcpy.ll21
-rw-r--r--test/Transforms/MemCpyOpt/sret.ll2
-rw-r--r--test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll91
-rw-r--r--test/Transforms/MergeFunc/crash.ll14
-rw-r--r--test/Transforms/MergeFunc/functions.ll27
-rw-r--r--test/Transforms/MergeFunc/inttoptr-address-space.ll6
-rw-r--r--test/Transforms/MergeFunc/inttoptr.ll14
-rw-r--r--test/Transforms/MergeFunc/mergefunc-struct-return.ll40
-rw-r--r--test/Transforms/MergeFunc/ranges.ll43
-rw-r--r--test/Transforms/MetaRenamer/metarenamer.ll4
-rw-r--r--test/Transforms/ObjCARC/allocas.ll4
-rw-r--r--test/Transforms/ObjCARC/contract-end-of-use-list.ll30
-rw-r--r--test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll4
-rw-r--r--test/Transforms/Reassociate/2002-05-15-AgressiveSubMove.ll15
-rw-r--r--test/Transforms/Reassociate/2002-05-15-MissedTree.ll14
-rw-r--r--test/Transforms/Reassociate/2002-05-15-SubReassociate.ll34
-rw-r--r--test/Transforms/Reassociate/2002-05-15-SubReassociate2.ll13
-rw-r--r--test/Transforms/Reassociate/2005-09-01-ArrayOutOfBounds.ll43
-rw-r--r--test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll16
-rw-r--r--test/Transforms/Reassociate/basictest.ll201
-rw-r--r--test/Transforms/Reassociate/fp-commute.ll21
-rw-r--r--test/Transforms/Reassociate/inverses.ll12
-rw-r--r--test/Transforms/Reassociate/looptest.ll1
-rw-r--r--test/Transforms/Reassociate/mightymul.ll4
-rw-r--r--test/Transforms/Reassociate/multistep.ll1
-rw-r--r--test/Transforms/Reassociate/negation.ll36
-rw-r--r--test/Transforms/Reassociate/otherops.ll42
-rw-r--r--test/Transforms/Reassociate/shift-factor.ll20
-rw-r--r--test/Transforms/Reassociate/subtest.ll31
-rw-r--r--test/Transforms/Reassociate/subtest2.ll13
-rw-r--r--test/Transforms/SCCP/atomic.ll9
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/lit.local.cfg2
-rw-r--r--test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll18
-rw-r--r--test/Transforms/SLPVectorizer/ARM/lit.local.cfg3
-rw-r--r--test/Transforms/SLPVectorizer/R600/lit.local.cfg3
-rw-r--r--test/Transforms/SLPVectorizer/X86/addsub.ll181
-rw-r--r--test/Transforms/SLPVectorizer/X86/align.ll27
-rw-r--r--test/Transforms/SLPVectorizer/X86/call.ll128
-rw-r--r--test/Transforms/SLPVectorizer/X86/consecutive-access.ll175
-rw-r--r--test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll31
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll65
-rw-r--r--test/Transforms/SLPVectorizer/X86/cse.ll30
-rw-r--r--test/Transforms/SLPVectorizer/X86/extractcost.ll30
-rw-r--r--test/Transforms/SLPVectorizer/X86/gep.ll41
-rw-r--r--test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll85
-rw-r--r--test/Transforms/SLPVectorizer/X86/intrinsic.ll386
-rw-r--r--test/Transforms/SLPVectorizer/X86/lit.local.cfg3
-rw-r--r--test/Transforms/SLPVectorizer/X86/metadata.ll61
-rw-r--r--test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll36
-rw-r--r--test/Transforms/SLPVectorizer/X86/phi.ll2
-rw-r--r--test/Transforms/SLPVectorizer/X86/pr19657.ll73
-rw-r--r--test/Transforms/SLPVectorizer/X86/tiny-tree.ll15
-rw-r--r--test/Transforms/SLPVectorizer/X86/value-bug.ll80
-rw-r--r--test/Transforms/SLPVectorizer/XCore/lit.local.cfg3
-rw-r--r--test/Transforms/SROA/address-spaces.ll68
-rw-r--r--test/Transforms/SROA/basictest.ll86
-rw-r--r--test/Transforms/SROA/slice-order-independence.ll37
-rw-r--r--test/Transforms/SROA/slice-width.ll25
-rw-r--r--test/Transforms/SROA/vector-promotion.ll47
-rw-r--r--test/Transforms/SampleProfile/Inputs/bad_discriminator_value.prof2
-rw-r--r--test/Transforms/SampleProfile/Inputs/bad_fn_header.prof3
-rw-r--r--test/Transforms/SampleProfile/Inputs/bad_line_values.prof2
-rw-r--r--test/Transforms/SampleProfile/Inputs/bad_mangle.prof3
-rw-r--r--test/Transforms/SampleProfile/Inputs/bad_sample_line.prof3
-rw-r--r--test/Transforms/SampleProfile/Inputs/bad_samples.prof2
-rw-r--r--test/Transforms/SampleProfile/Inputs/branch.prof5
-rw-r--r--test/Transforms/SampleProfile/Inputs/calls.prof10
-rw-r--r--test/Transforms/SampleProfile/Inputs/discriminator.prof8
-rw-r--r--test/Transforms/SampleProfile/Inputs/propagate.prof17
-rw-r--r--test/Transforms/SampleProfile/Inputs/syntax.prof3
-rw-r--r--test/Transforms/SampleProfile/branch.ll18
-rw-r--r--test/Transforms/SampleProfile/calls.ll116
-rw-r--r--test/Transforms/SampleProfile/discriminator.ll90
-rw-r--r--test/Transforms/SampleProfile/propagate.ll243
-rw-r--r--test/Transforms/SampleProfile/syntax.ll20
-rw-r--r--test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll6
-rw-r--r--test/Transforms/ScalarRepl/vector_memcpy.ll10
-rw-r--r--test/Transforms/Scalarizer/basic.ll451
-rw-r--r--test/Transforms/Scalarizer/dbginfo.ll86
-rw-r--r--test/Transforms/Scalarizer/no-data-layout.ll25
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg3
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll196
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll236
-rw-r--r--test/Transforms/SimplifyCFG/PR17073.ll73
-rw-r--r--test/Transforms/SimplifyCFG/SPARC/lit.local.cfg3
-rw-r--r--test/Transforms/SimplifyCFG/X86/lit.local.cfg3
-rw-r--r--test/Transforms/SimplifyCFG/X86/switch-table-bug.ll41
-rw-r--r--test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll167
-rw-r--r--test/Transforms/SimplifyCFG/basictest.ll30
-rw-r--r--test/Transforms/SimplifyCFG/extract-cost.ll22
-rw-r--r--test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll40
-rw-r--r--test/Transforms/SimplifyCFG/preserve-branchweights.ll84
-rw-r--r--test/Transforms/SimplifyCFG/speculate-math.ll58
-rw-r--r--test/Transforms/SimplifyCFG/speculate-vector-ops.ll60
-rw-r--r--test/Transforms/SimplifyCFG/trapping-load-unreachable.ll2
-rw-r--r--test/Transforms/Sink/basic.ll79
-rw-r--r--test/Transforms/StripSymbols/2010-08-25-crash.ll2
-rw-r--r--test/Transforms/StripSymbols/strip-dead-debug-info.ll2
-rw-r--r--test/Transforms/TailCallElim/basic.ll45
-rw-r--r--test/Transforms/TailDup/X86/lit.local.cfg3
-rw-r--r--test/Transforms/TailDup/lit.local.cfg3
-rw-r--r--test/Unit/lit.cfg13
-rw-r--r--test/Unit/lit.site.cfg.in1
-rw-r--r--test/Verifier/2010-08-07-PointerIntrinsic.ll5
-rw-r--r--test/Verifier/alias.ll27
-rw-r--r--test/Verifier/aliasing-chain.ll6
-rw-r--r--test/Verifier/bitcast-address-space-nested-global-cycle.ll4
-rw-r--r--test/Verifier/bitcast-address-space-nested-global.ll4
-rw-r--r--test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll5
-rw-r--r--test/Verifier/bitcast-address-space-through-constant-inttoptr.ll4
-rw-r--r--test/Verifier/bitcast-address-space-through-gep-2.ll4
-rw-r--r--test/Verifier/bitcast-address-space-through-gep.ll4
-rw-r--r--test/Verifier/bitcast-address-space-through-inttoptr.ll3
-rw-r--r--test/Verifier/bitcast-address-spaces.ll3
-rw-r--r--test/Verifier/bitcast-alias-address-space.ll4
-rw-r--r--test/Verifier/bitcast-vector-pointer-as.ll4
-rw-r--r--test/Verifier/comdat.ll5
-rw-r--r--test/Verifier/comdat2.ll5
-rw-r--r--test/Verifier/global-ctors.ll11
-rwxr-xr-xtest/Verifier/inalloca-vararg.ll9
-rw-r--r--test/Verifier/inalloca1.ll22
-rw-r--r--test/Verifier/inalloca2.ll39
-rw-r--r--test/Verifier/inalloca3.ll13
-rw-r--r--test/Verifier/jumptable.ll8
-rw-r--r--test/Verifier/musttail-invalid.ll82
-rw-r--r--test/Verifier/musttail-valid.ll16
-rw-r--r--test/Verifier/range-1.ll2
-rw-r--r--test/Verifier/range-2.ll30
-rw-r--r--test/Verifier/recursive-type-1.ll12
-rw-r--r--test/Verifier/recursive-type-2.ll14
-rw-r--r--test/Verifier/recursive-type-3.ll11
-rw-r--r--test/Verifier/sret.ll7
-rw-r--r--test/Verifier/varargs-intrinsic.ll6
-rw-r--r--test/lit.cfg87
-rw-r--r--test/lit.site.cfg.in2
-rw-r--r--test/tools/llvm-cov/Inputs/copy_block_helper.gcdabin0 -> 432 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/copy_block_helper.gcnobin0 -> 1140 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/range_based_for.gcdabin0 -> 164 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/range_based_for.gcnobin0 -> 552 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/test.cpp5
-rw-r--r--test/tools/llvm-cov/Inputs/test.cpp.gcov82
-rw-r--r--test/tools/llvm-cov/Inputs/test.gcdabin824 -> 904 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/test.gcnobin3112 -> 3552 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/test.h3
-rw-r--r--test/tools/llvm-cov/Inputs/test_-a.cpp.gcov111
-rw-r--r--test/tools/llvm-cov/Inputs/test_-a.h.gcov10
-rw-r--r--test/tools/llvm-cov/Inputs/test_-a_-b.cpp.gcov134
-rw-r--r--test/tools/llvm-cov/Inputs/test_-a_-b.h.gcov12
-rw-r--r--test/tools/llvm-cov/Inputs/test_-a_-b_-c_-u.cpp.gcov160
-rw-r--r--test/tools/llvm-cov/Inputs/test_-a_-b_-c_-u.h.gcov14
-rw-r--r--test/tools/llvm-cov/Inputs/test_-a_-b_-u.cpp.gcov160
-rw-r--r--test/tools/llvm-cov/Inputs/test_-a_-b_-u.h.gcov14
-rw-r--r--test/tools/llvm-cov/Inputs/test_-b.output13
-rw-r--r--test/tools/llvm-cov/Inputs/test_-b_-f.output65
-rw-r--r--test/tools/llvm-cov/Inputs/test_-f.output38
-rw-r--r--test/tools/llvm-cov/Inputs/test_file_checksum_fail.gcdabin0 -> 825 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/test_func_checksum_fail.gcdabin0 -> 825 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/test_long_file_names.output8
-rw-r--r--test/tools/llvm-cov/Inputs/test_long_paths.output8
-rw-r--r--test/tools/llvm-cov/Inputs/test_missing.cpp.gcov77
-rw-r--r--test/tools/llvm-cov/Inputs/test_missing.h.gcov6
-rw-r--r--test/tools/llvm-cov/Inputs/test_missing.output8
-rw-r--r--test/tools/llvm-cov/Inputs/test_no_gcda.cpp.gcov79
-rw-r--r--test/tools/llvm-cov/Inputs/test_no_gcda.h.gcov8
-rw-r--r--test/tools/llvm-cov/Inputs/test_no_gcda.output8
-rw-r--r--test/tools/llvm-cov/Inputs/test_no_options.cpp.gcov79
-rw-r--r--test/tools/llvm-cov/Inputs/test_no_options.h.gcov8
-rw-r--r--test/tools/llvm-cov/Inputs/test_no_options.output8
-rw-r--r--test/tools/llvm-cov/Inputs/test_no_output.output6
-rw-r--r--test/tools/llvm-cov/Inputs/test_no_preserve_paths.output8
-rw-r--r--test/tools/llvm-cov/Inputs/test_objdir.cpp.gcov79
-rw-r--r--test/tools/llvm-cov/Inputs/test_objdir.h.gcov8
-rw-r--r--test/tools/llvm-cov/Inputs/test_paths.cpp.gcov79
-rw-r--r--test/tools/llvm-cov/Inputs/test_paths.gcdabin0 -> 904 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/test_paths.gcnobin0 -> 4476 bytes
-rw-r--r--test/tools/llvm-cov/Inputs/test_paths.h.gcov8
-rw-r--r--test/tools/llvm-cov/Inputs/test_preserve_paths.output8
-rw-r--r--test/tools/llvm-cov/Inputs/test_read_fail.gcnobin111 -> 71 bytes
-rw-r--r--test/tools/llvm-cov/copy_block_helper.m32
-rw-r--r--test/tools/llvm-cov/lit.local.cfg2
-rw-r--r--test/tools/llvm-cov/llvm-cov.test115
-rw-r--r--test/tools/llvm-cov/range_based_for.cpp29
-rw-r--r--test/tools/llvm-objdump/Inputs/export.dll.coff-i386bin0 -> 1052 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/file-aux-record.yaml21
-rwxr-xr-xtest/tools/llvm-objdump/Inputs/file.obj.coff-armbin0 -> 374 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/large-bss.obj.coff-i386bin0 -> 270 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/many-relocs.obj-i386bin0 -> 305 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/nop.exe.coff-i386bin7680 -> 6144 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/out-of-section-sym.elf-i386bin0 -> 4450 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/out-of-section-sym.s15
-rw-r--r--test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64.exebin0 -> 2560 bytes
-rw-r--r--test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64.obj (renamed from test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64)bin698 -> 698 bytes
-rw-r--r--test/tools/llvm-objdump/coff-file.test6
-rw-r--r--test/tools/llvm-objdump/coff-large-bss.test6
-rw-r--r--test/tools/llvm-objdump/coff-many-relocs.test14
-rw-r--r--test/tools/llvm-objdump/coff-non-null-terminated-file.test5
-rw-r--r--test/tools/llvm-objdump/coff-private-headers.test70
-rw-r--r--test/tools/llvm-objdump/hex-relocation-addr.test17
-rw-r--r--test/tools/llvm-objdump/lit.local.cfg3
-rw-r--r--test/tools/llvm-objdump/out-of-section-sym.test13
-rw-r--r--test/tools/llvm-objdump/win64-unwind-data.test155
-rw-r--r--test/tools/llvm-profdata/Inputs/bad-hash.profdata4
-rw-r--r--test/tools/llvm-profdata/Inputs/bar3-1.profdata6
-rw-r--r--test/tools/llvm-profdata/Inputs/c-general.profdatabin0 -> 1384 bytes
-rw-r--r--test/tools/llvm-profdata/Inputs/empty.profdata0
-rw-r--r--test/tools/llvm-profdata/Inputs/extra-word.profdata2
-rw-r--r--test/tools/llvm-profdata/Inputs/foo3-1.profdata6
-rw-r--r--test/tools/llvm-profdata/Inputs/foo3-2.profdata6
-rw-r--r--test/tools/llvm-profdata/Inputs/foo3bar3-1.profdata13
-rw-r--r--test/tools/llvm-profdata/Inputs/foo3bar3-2.profdata13
-rw-r--r--test/tools/llvm-profdata/Inputs/foo4-1.profdata7
-rw-r--r--test/tools/llvm-profdata/Inputs/foo4-2.profdata7
-rw-r--r--test/tools/llvm-profdata/Inputs/invalid-count-later.profdata4
-rw-r--r--test/tools/llvm-profdata/Inputs/no-counts.profdata3
-rw-r--r--test/tools/llvm-profdata/Inputs/overflow.profdata4
-rw-r--r--test/tools/llvm-profdata/c-general.test24
-rw-r--r--test/tools/llvm-profdata/errors.test16
-rw-r--r--test/tools/llvm-profdata/raw-32-bits-be.test42
-rw-r--r--test/tools/llvm-profdata/raw-32-bits-le.test42
-rw-r--r--test/tools/llvm-profdata/raw-64-bits-be.test42
-rw-r--r--test/tools/llvm-profdata/raw-64-bits-le.test42
-rw-r--r--test/tools/llvm-profdata/raw-magic-but-no-header.test6
-rw-r--r--test/tools/llvm-profdata/raw-two-profiles.test64
-rw-r--r--test/tools/llvm-profdata/simple.test77
-rw-r--r--test/tools/llvm-readobj/ARM/attributes.s287
-rw-r--r--test/tools/llvm-readobj/ARM/lit.local.cfg3
-rw-r--r--test/tools/llvm-readobj/ARM/unwind.s326
-rw-r--r--test/tools/llvm-readobj/Inputs/cxx-cli-aux.cpp2
-rw-r--r--test/tools/llvm-readobj/Inputs/cxx-cli-aux.obj.coff-i386bin0 -> 2682 bytes
-rwxr-xr-xtest/tools/llvm-readobj/Inputs/dynamic-table-exe.mipsbin0 -> 6333 bytes
-rwxr-xr-xtest/tools/llvm-readobj/Inputs/dynamic-table-exe.x86bin0 -> 6555 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/dynamic-table-so.mips (renamed from test/tools/llvm-readobj/Inputs/dynamic-table.mips)bin5395 -> 5395 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/dynamic-table.c6
-rw-r--r--test/tools/llvm-readobj/Inputs/file-aux-record.yaml21
-rw-r--r--test/tools/llvm-readobj/Inputs/file-multiple-aux-records.yaml21
-rwxr-xr-xtest/tools/llvm-readobj/Inputs/got-empty.exe.mipselbin0 -> 9400 bytes
-rwxr-xr-xtest/tools/llvm-readobj/Inputs/got-tls.so.elf-mips64elbin0 -> 7398 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/multifile-linetables.obj.coff-2012-i368bin0 -> 1631 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/multifile-linetables.obj.coff-2012-x86_64bin0 -> 1799 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/multifunction-linetables.obj.coff-2012-i368bin0 -> 2155 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/multifunction-linetables.obj.coff-2012-x86_64bin0 -> 2475 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/nop.exe.coff-x86-64bin0 -> 1024 bytes
-rwxr-xr-xtest/tools/llvm-readobj/Inputs/trivial.obj.coff-armbin0 -> 367 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/trivial.obj.coff-i386bin314 -> 350 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/trivial.obj.elf-mipselbin0 -> 629 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/zero-string-table.obj.coff-i386bin0 -> 704 bytes
-rw-r--r--test/tools/llvm-readobj/codeview-linetables.test282
-rw-r--r--test/tools/llvm-readobj/coff-file-sections-reading.test18
-rw-r--r--test/tools/llvm-readobj/coff-non-null-terminated-file.test20
-rw-r--r--test/tools/llvm-readobj/coff-zero-string-table.test8
-rw-r--r--test/tools/llvm-readobj/cxx-cli-aux.test42
-rw-r--r--test/tools/llvm-readobj/dynamic.test84
-rw-r--r--test/tools/llvm-readobj/file-headers.test19
-rw-r--r--test/tools/llvm-readobj/mips-got.test331
-rw-r--r--test/tools/llvm-readobj/peplus.test83
-rw-r--r--test/tools/llvm-readobj/program-headers.test28
-rw-r--r--test/tools/llvm-readobj/relocations.test52
-rw-r--r--test/tools/llvm-readobj/sections-ext.test79
-rw-r--r--test/tools/llvm-readobj/sections.test33
-rw-r--r--test/tools/llvm-readobj/symbols.test22
4117 files changed, 247125 insertions, 221032 deletions
diff --git a/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll b/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
index 563d3326367b..32d9930f4270 100644
--- a/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
+++ b/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
%struct.pci_device_id = type { i32, i32, i32, i32, i32, i32, i64 }
%struct.usb_bus = type { %struct.device* }
%struct.usb_hcd = type { %struct.usb_bus, i64, [0 x i64] }
-@uhci_pci_ids = external constant [1 x %struct.pci_device_id] ; <[1 x %struct.pci_device_id]*> [#uses=1]
+@uhci_pci_ids = constant [1 x %struct.pci_device_id] zeroinitializer
@__mod_pci_device_table = alias [1 x %struct.pci_device_id]* @uhci_pci_ids
; <[1 x %struct.pci_device_id]*> [#uses=0]
diff --git a/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll b/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
index 52d0af1b81ce..cd997ea52513 100644
--- a/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
+++ b/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
%struct.pci_device_id = type { i32, i32, i32, i32, i32, i32, i64 }
%struct.usb_bus = type { %struct.device* }
%struct.usb_hcd = type { %struct.usb_bus, [0 x i64] }
-@pci_ids = external constant [1 x %struct.pci_device_id] ; <[1 x %struct.pci_device_id]*> [#uses=1]
+@pci_ids = constant [1 x %struct.pci_device_id] zeroinitializer
@__mod_pci_device_table = alias [1 x %struct.pci_device_id]* @pci_ids ; <[1 x %struct.pci_device_id]*> [#uses=0]
diff --git a/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll b/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll
new file mode 100644
index 000000000000..bc2512eca0c9
--- /dev/null
+++ b/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -basicaa -gvn -S | FileCheck %s
+
+; PR15967
+; BasicAA claims no alias when there is (due to a problem when the MaxLookup
+; limit was reached).
+
+target datalayout = "e"
+
+%struct.foo = type { i32, i32 }
+
+define i32 @main() {
+ %t = alloca %struct.foo, align 4
+ %1 = getelementptr inbounds %struct.foo* %t, i32 0, i32 0
+ store i32 1, i32* %1, align 4
+ %2 = getelementptr inbounds %struct.foo* %t, i64 1
+ %3 = bitcast %struct.foo* %2 to i8*
+ %4 = getelementptr inbounds i8* %3, i32 -1
+ store i8 0, i8* %4
+ %5 = getelementptr inbounds i8* %4, i32 -1
+ store i8 0, i8* %5
+ %6 = getelementptr inbounds i8* %5, i32 -1
+ store i8 0, i8* %6
+ %7 = getelementptr inbounds i8* %6, i32 -1
+ store i8 0, i8* %7
+ %8 = getelementptr inbounds i8* %7, i32 -1
+ store i8 0, i8* %8
+ %9 = getelementptr inbounds i8* %8, i32 -1
+ store i8 0, i8* %9
+ %10 = getelementptr inbounds i8* %9, i32 -1
+ store i8 0, i8* %10
+ %11 = getelementptr inbounds i8* %10, i32 -1
+ store i8 0, i8* %11
+ %12 = load i32* %1, align 4
+ ret i32 %12
+; CHECK: ret i32 %12
+}
diff --git a/test/Analysis/BasicAA/cs-cs.ll b/test/Analysis/BasicAA/cs-cs.ll
new file mode 100644
index 000000000000..693634c0414d
--- /dev/null
+++ b/test/Analysis/BasicAA/cs-cs.ll
@@ -0,0 +1,236 @@
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
+target triple = "arm-apple-ios"
+
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly
+declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+
+declare void @a_readonly_func(i8 *) noinline nounwind readonly
+
+define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
+entry:
+ %q = getelementptr i8* %p, i64 16
+ %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
+ call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+ %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
+ %c = add <8 x i16> %a, %b
+ ret <8 x i16> %c
+
+; CHECK-LABEL: Function: test1:
+
+; CHECK: NoAlias: i8* %p, i8* %q
+; CHECK: Just Ref: Ptr: i8* %p <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: Ptr: i8* %q <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: Ptr: i8* %p <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: Both ModRef: Ptr: i8* %q <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: Just Ref: Ptr: i8* %p <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: Ptr: i8* %q <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
+; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
+}
+
+define void @test2(i8* %P, i8* %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2:
+
+; CHECK: MayAlias: i8* %P, i8* %Q
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2a(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2a:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2b(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ %R = getelementptr i8* %P, i64 12
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2b:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: NoAlias: i8* %P, i8* %R
+; CHECK: NoAlias: i8* %Q, i8* %R
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2c(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ %R = getelementptr i8* %P, i64 11
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2c:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: NoAlias: i8* %P, i8* %R
+; CHECK: NoAlias: i8* %Q, i8* %R
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2d(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ %R = getelementptr i8* %P, i64 -12
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2d:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: NoAlias: i8* %P, i8* %R
+; CHECK: NoAlias: i8* %Q, i8* %R
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test2e(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ %R = getelementptr i8* %P, i64 -11
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test2e:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: NoAlias: i8* %P, i8* %R
+; CHECK: NoAlias: i8* %Q, i8* %R
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test3(i8* %P, i8* %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test3:
+
+; CHECK: MayAlias: i8* %P, i8* %Q
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+}
+
+define void @test3a(i8* noalias %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test3a:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
+}
+
+define void @test4(i8* %P, i8* noalias %Q) nounwind ssp {
+ tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test4:
+
+; CHECK: NoAlias: i8* %P, i8* %Q
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
+; CHECK: NoModRef: Ptr: i8* %Q <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
+; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
+}
+
+define void @test5(i8* %P, i8* %Q, i8* %R) nounwind ssp {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test5:
+
+; CHECK: MayAlias: i8* %P, i8* %Q
+; CHECK: MayAlias: i8* %P, i8* %R
+; CHECK: MayAlias: i8* %Q, i8* %R
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+}
+
+define void @test6(i8* %P) nounwind ssp {
+ call void @llvm.memset.p0i8.i64(i8* %P, i8 -51, i64 32, i32 8, i1 false)
+ call void @a_readonly_func(i8* %P)
+ ret void
+
+; CHECK-LABEL: Function: test6:
+
+; CHECK: Just Mod: Ptr: i8* %P <-> call void @llvm.memset.p0i8.i64(i8* %P, i8 -51, i64 32, i32 8, i1 false)
+; CHECK: Just Ref: Ptr: i8* %P <-> call void @a_readonly_func(i8* %P)
+; CHECK: Just Mod: call void @llvm.memset.p0i8.i64(i8* %P, i8 -51, i64 32, i32 8, i1 false) <-> call void @a_readonly_func(i8* %P)
+; CHECK: Just Ref: call void @a_readonly_func(i8* %P) <-> call void @llvm.memset.p0i8.i64(i8* %P, i8 -51, i64 32, i32 8, i1 false)
+}
+
+attributes #0 = { nounwind }
diff --git a/test/Analysis/BasicAA/noalias-bugs.ll b/test/Analysis/BasicAA/noalias-bugs.ll
index c02a302c1950..2bcc14fd9397 100644
--- a/test/Analysis/BasicAA/noalias-bugs.ll
+++ b/test/Analysis/BasicAA/noalias-bugs.ll
@@ -1,6 +1,6 @@
; RUN: opt -S -basicaa -dse < %s | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; We incorrectly returned noalias in the example below for "ptr.64" and
diff --git a/test/Analysis/BasicAA/pr18573.ll b/test/Analysis/BasicAA/pr18573.ll
new file mode 100644
index 000000000000..1d2a316b6ffe
--- /dev/null
+++ b/test/Analysis/BasicAA/pr18573.ll
@@ -0,0 +1,53 @@
+; RUN: opt %s -O2 -S | FileCheck %s
+
+; Check that llvm.x86.avx2.gather.d.ps.256 intrinsic is not eliminated as gather and store memory accesses are based on arr.ptr
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readonly
+declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>, <8 x float>, i8) #0
+
+; Function Attrs: nounwind
+define <8 x float> @foo1(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 {
+allocas:
+ %vix = load <8 x i32>* %vix.ptr, align 4
+ %t1.ptr = getelementptr i8* %arr.ptr, i8 4
+
+ %v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
+ store i8 1, i8* %t1.ptr, align 4
+
+ %v2 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
+ %res = fadd <8 x float> %v1, %v2
+
+ ret <8 x float> %res
+}
+; CHECK: foo1
+; CHECK: llvm.x86.avx2.gather.d.ps.256
+; CHECK: store
+; CHECK: llvm.x86.avx2.gather.d.ps.256
+
+; Check that second gather is eliminated as gather and store memory accesses are based on different no-aliasing pointers
+
+; Function Attrs: nounwind
+define <8 x float> @foo2(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 {
+allocas:
+ %vix = load <8 x i32>* %vix.ptr, align 4
+ %t1.ptr = getelementptr i8* %arr.ptr, i8 4
+
+ %v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
+ store i8 1, i8* %t2.ptr, align 4
+
+ %v2 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
+ %res = fadd <8 x float> %v1, %v2
+
+ ret <8 x float> %res
+}
+; CHECK: foo2
+; CHECK: llvm.x86.avx2.gather.d.ps.256
+; CHECK: store
+; CHECK-NOT: llvm.x86.avx2.gather.d.ps.256
+
+attributes #0 = { nounwind readonly }
+attributes #1 = { nounwind "target-cpu"="corei7-avx" "target-features"="+avx2,+popcnt,+cmov,+f16c,+rdrnd,+fma" }
+attributes #2 = { nounwind }
+
diff --git a/test/Analysis/BlockFrequencyInfo/bad_input.ll b/test/Analysis/BlockFrequencyInfo/bad_input.ll
new file mode 100644
index 000000000000..bcdc1e6f0bc0
--- /dev/null
+++ b/test/Analysis/BlockFrequencyInfo/bad_input.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -analyze -block-freq | FileCheck %s
+
+declare void @g(i32 %x)
+
+; CHECK-LABEL: Printing analysis {{.*}} for function 'branch_weight_0':
+; CHECK-NEXT: block-frequency-info: branch_weight_0
+define void @branch_weight_0(i32 %a) {
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+entry:
+ br label %for.body
+
+; Check that we get 1,4 instead of 0,3.
+; CHECK-NEXT: for.body: float = 4.0,
+for.body:
+ %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ call void @g(i32 %i)
+ %inc = add i32 %i, 1
+ %cmp = icmp ugt i32 %inc, %a
+ br i1 %cmp, label %for.end, label %for.body, !prof !0
+
+; CHECK-NEXT: for.end: float = 1.0, int = [[ENTRY]]
+for.end:
+ ret void
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 0, i32 3}
+
+; CHECK-LABEL: Printing analysis {{.*}} for function 'infinite_loop'
+; CHECK-NEXT: block-frequency-info: infinite_loop
+define void @infinite_loop(i1 %x) {
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+entry:
+ br i1 %x, label %for.body, label %for.end, !prof !1
+
+; Check that the loop scale maxes out at 4096, giving 2048 here.
+; CHECK-NEXT: for.body: float = 2048.0,
+for.body:
+ %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ call void @g(i32 %i)
+ %inc = add i32 %i, 1
+ br label %for.body
+
+; Check that the exit weight is half of entry, since half is lost in the
+; infinite loop above.
+; CHECK-NEXT: for.end: float = 0.5,
+for.end:
+ ret void
+}
+
+!1 = metadata !{metadata !"branch_weights", i32 1, i32 1}
diff --git a/test/Analysis/BlockFrequencyInfo/basic.ll b/test/Analysis/BlockFrequencyInfo/basic.ll
index ce29fb5ce1ba..006e6ab4d74e 100644
--- a/test/Analysis/BlockFrequencyInfo/basic.ll
+++ b/test/Analysis/BlockFrequencyInfo/basic.ll
@@ -1,13 +1,14 @@
; RUN: opt < %s -analyze -block-freq | FileCheck %s
define i32 @test1(i32 %i, i32* %a) {
-; CHECK: Printing analysis {{.*}} for function 'test1'
-; CHECK: entry = 1.0
+; CHECK-LABEL: Printing analysis {{.*}} for function 'test1':
+; CHECK-NEXT: block-frequency-info: test1
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
br label %body
; Loop backedges are weighted and thus their bodies have a greater frequency.
-; CHECK: body = 32.0
+; CHECK-NEXT: body: float = 32.0,
body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
@@ -18,29 +19,29 @@ body:
%exitcond = icmp eq i32 %next, %i
br i1 %exitcond, label %exit, label %body
-; CHECK: exit = 1.0
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
ret i32 %sum
}
define i32 @test2(i32 %i, i32 %a, i32 %b) {
-; CHECK: Printing analysis {{.*}} for function 'test2'
-; CHECK: entry = 1.0
+; CHECK-LABEL: Printing analysis {{.*}} for function 'test2':
+; CHECK-NEXT: block-frequency-info: test2
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
%cond = icmp ult i32 %i, 42
br i1 %cond, label %then, label %else, !prof !0
; The 'then' branch is predicted more likely via branch weight metadata.
-; CHECK: then = 0.94116
+; CHECK-NEXT: then: float = 0.9411{{[0-9]*}},
then:
br label %exit
-; CHECK: else = 0.05877
+; CHECK-NEXT: else: float = 0.05882{{[0-9]*}},
else:
br label %exit
-; FIXME: It may be a bug that we don't sum back to 1.0.
-; CHECK: exit = 0.99993
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
%result = phi i32 [ %a, %then ], [ %b, %else ]
ret i32 %result
@@ -49,37 +50,37 @@ exit:
!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
define i32 @test3(i32 %i, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
-; CHECK: Printing analysis {{.*}} for function 'test3'
-; CHECK: entry = 1.0
+; CHECK-LABEL: Printing analysis {{.*}} for function 'test3':
+; CHECK-NEXT: block-frequency-info: test3
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
switch i32 %i, label %case_a [ i32 1, label %case_b
i32 2, label %case_c
i32 3, label %case_d
i32 4, label %case_e ], !prof !1
-; CHECK: case_a = 0.04998
+; CHECK-NEXT: case_a: float = 0.05,
case_a:
br label %exit
-; CHECK: case_b = 0.04998
+; CHECK-NEXT: case_b: float = 0.05,
case_b:
br label %exit
; The 'case_c' branch is predicted more likely via branch weight metadata.
-; CHECK: case_c = 0.79998
+; CHECK-NEXT: case_c: float = 0.8,
case_c:
br label %exit
-; CHECK: case_d = 0.04998
+; CHECK-NEXT: case_d: float = 0.05,
case_d:
br label %exit
-; CHECK: case_e = 0.04998
+; CHECK-NEXT: case_e: float = 0.05,
case_e:
br label %exit
-; FIXME: It may be a bug that we don't sum back to 1.0.
-; CHECK: exit = 0.99993
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
%result = phi i32 [ %a, %case_a ],
[ %b, %case_b ],
@@ -91,44 +92,50 @@ exit:
!1 = metadata !{metadata !"branch_weights", i32 4, i32 4, i32 64, i32 4, i32 4}
-; CHECK: Printing analysis {{.*}} for function 'nested_loops'
-; CHECK: entry = 1.0
-; This test doesn't seem to be assigning sensible frequencies to nested loops.
define void @nested_loops(i32 %a) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'nested_loops':
+; CHECK-NEXT: block-frequency-info: nested_loops
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
br label %for.cond1.preheader
+; CHECK-NEXT: for.cond1.preheader: float = 4001.0,
for.cond1.preheader:
%x.024 = phi i32 [ 0, %entry ], [ %inc12, %for.inc11 ]
br label %for.cond4.preheader
+; CHECK-NEXT: for.cond4.preheader: float = 16008001.0,
for.cond4.preheader:
%y.023 = phi i32 [ 0, %for.cond1.preheader ], [ %inc9, %for.inc8 ]
%add = add i32 %y.023, %x.024
br label %for.body6
+; CHECK-NEXT: for.body6: float = 64048012001.0,
for.body6:
%z.022 = phi i32 [ 0, %for.cond4.preheader ], [ %inc, %for.body6 ]
%add7 = add i32 %add, %z.022
- tail call void @g(i32 %add7) #2
+ tail call void @g(i32 %add7)
%inc = add i32 %z.022, 1
%cmp5 = icmp ugt i32 %inc, %a
br i1 %cmp5, label %for.inc8, label %for.body6, !prof !2
+; CHECK-NEXT: for.inc8: float = 16008001.0,
for.inc8:
%inc9 = add i32 %y.023, 1
%cmp2 = icmp ugt i32 %inc9, %a
br i1 %cmp2, label %for.inc11, label %for.cond4.preheader, !prof !2
+; CHECK-NEXT: for.inc11: float = 4001.0,
for.inc11:
%inc12 = add i32 %x.024, 1
%cmp = icmp ugt i32 %inc12, %a
br i1 %cmp, label %for.end13, label %for.cond1.preheader, !prof !2
+; CHECK-NEXT: for.end13: float = 1.0, int = [[ENTRY]]
for.end13:
ret void
}
-declare void @g(i32) #1
+declare void @g(i32)
!2 = metadata !{metadata !"branch_weights", i32 1, i32 4000}
diff --git a/test/Analysis/BlockFrequencyInfo/double_backedge.ll b/test/Analysis/BlockFrequencyInfo/double_backedge.ll
new file mode 100644
index 000000000000..df8217cfa1b1
--- /dev/null
+++ b/test/Analysis/BlockFrequencyInfo/double_backedge.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -analyze -block-freq | FileCheck %s
+
+define void @double_backedge(i1 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'double_backedge':
+; CHECK-NEXT: block-frequency-info: double_backedge
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br label %loop
+
+loop:
+; CHECK-NEXT: loop: float = 10.0,
+ br i1 %x, label %exit, label %loop.1, !prof !0
+
+loop.1:
+; CHECK-NEXT: loop.1: float = 9.0,
+ br i1 %x, label %loop, label %loop.2, !prof !1
+
+loop.2:
+; CHECK-NEXT: loop.2: float = 5.0,
+ br label %loop
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+!0 = metadata !{metadata !"branch_weights", i32 1, i32 9}
+!1 = metadata !{metadata !"branch_weights", i32 4, i32 5}
diff --git a/test/Analysis/BlockFrequencyInfo/double_exit.ll b/test/Analysis/BlockFrequencyInfo/double_exit.ll
new file mode 100644
index 000000000000..75f664d07ac4
--- /dev/null
+++ b/test/Analysis/BlockFrequencyInfo/double_exit.ll
@@ -0,0 +1,165 @@
+; RUN: opt < %s -analyze -block-freq | FileCheck %s
+
+; CHECK-LABEL: Printing analysis {{.*}} for function 'double_exit':
+; CHECK-NEXT: block-frequency-info: double_exit
+define i32 @double_exit(i32 %N) {
+; Mass = 1
+; Frequency = 1
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+entry:
+ br label %outer
+
+; Mass = 1
+; Backedge mass = 1/3, exit mass = 2/3
+; Loop scale = 3/2
+; Pseudo-edges = exit
+; Pseudo-mass = 1
+; Frequency = 1*3/2*1 = 3/2
+; CHECK-NEXT: outer: float = 1.5,
+outer:
+ %I.0 = phi i32 [ 0, %entry ], [ %inc6, %outer.inc ]
+ %Return.0 = phi i32 [ 0, %entry ], [ %Return.1, %outer.inc ]
+ %cmp = icmp slt i32 %I.0, %N
+ br i1 %cmp, label %inner, label %exit, !prof !2 ; 2:1
+
+; Mass = 1
+; Backedge mass = 3/5, exit mass = 2/5
+; Loop scale = 5/2
+; Pseudo-edges = outer.inc @ 1/5, exit @ 1/5
+; Pseudo-mass = 2/3
+; Frequency = 3/2*1*5/2*2/3 = 5/2
+; CHECK-NEXT: inner: float = 2.5,
+inner:
+ %Return.1 = phi i32 [ %Return.0, %outer ], [ %call4, %inner.inc ]
+ %J.0 = phi i32 [ %I.0, %outer ], [ %inc, %inner.inc ]
+ %cmp2 = icmp slt i32 %J.0, %N
+ br i1 %cmp2, label %inner.body, label %outer.inc, !prof !1 ; 4:1
+
+; Mass = 4/5
+; Frequency = 5/2*4/5 = 2
+; CHECK-NEXT: inner.body: float = 2.0,
+inner.body:
+ %call = call i32 @c2(i32 %I.0, i32 %J.0)
+ %tobool = icmp ne i32 %call, 0
+ br i1 %tobool, label %exit, label %inner.inc, !prof !0 ; 3:1
+
+; Mass = 3/5
+; Frequency = 5/2*3/5 = 3/2
+; CHECK-NEXT: inner.inc: float = 1.5,
+inner.inc:
+ %call4 = call i32 @logic2(i32 %Return.1, i32 %I.0, i32 %J.0)
+ %inc = add nsw i32 %J.0, 1
+ br label %inner
+
+; Mass = 1/3
+; Frequency = 3/2*1/3 = 1/2
+; CHECK-NEXT: outer.inc: float = 0.5,
+outer.inc:
+ %inc6 = add nsw i32 %I.0, 1
+ br label %outer
+
+; Mass = 1
+; Frequency = 1
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+exit:
+ %Return.2 = phi i32 [ %Return.1, %inner.body ], [ %Return.0, %outer ]
+ ret i32 %Return.2
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 1, i32 3}
+!1 = metadata !{metadata !"branch_weights", i32 4, i32 1}
+!2 = metadata !{metadata !"branch_weights", i32 2, i32 1}
+
+declare i32 @c2(i32, i32)
+declare i32 @logic2(i32, i32, i32)
+
+; CHECK-LABEL: Printing analysis {{.*}} for function 'double_exit_in_loop':
+; CHECK-NEXT: block-frequency-info: double_exit_in_loop
+define i32 @double_exit_in_loop(i32 %N) {
+; Mass = 1
+; Frequency = 1
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+entry:
+ br label %outer
+
+; Mass = 1
+; Backedge mass = 1/2, exit mass = 1/2
+; Loop scale = 2
+; Pseudo-edges = exit
+; Pseudo-mass = 1
+; Frequency = 1*2*1 = 2
+; CHECK-NEXT: outer: float = 2.0,
+outer:
+ %I.0 = phi i32 [ 0, %entry ], [ %inc12, %outer.inc ]
+ %Return.0 = phi i32 [ 0, %entry ], [ %Return.3, %outer.inc ]
+ %cmp = icmp slt i32 %I.0, %N
+ br i1 %cmp, label %middle, label %exit, !prof !3 ; 1:1
+
+; Mass = 1
+; Backedge mass = 1/3, exit mass = 2/3
+; Loop scale = 3/2
+; Pseudo-edges = outer.inc
+; Pseudo-mass = 1/2
+; Frequency = 2*1*3/2*1/2 = 3/2
+; CHECK-NEXT: middle: float = 1.5,
+middle:
+ %J.0 = phi i32 [ %I.0, %outer ], [ %inc9, %middle.inc ]
+ %Return.1 = phi i32 [ %Return.0, %outer ], [ %Return.2, %middle.inc ]
+ %cmp2 = icmp slt i32 %J.0, %N
+ br i1 %cmp2, label %inner, label %outer.inc, !prof !2 ; 2:1
+
+; Mass = 1
+; Backedge mass = 3/5, exit mass = 2/5
+; Loop scale = 5/2
+; Pseudo-edges = middle.inc @ 1/5, outer.inc @ 1/5
+; Pseudo-mass = 2/3
+; Frequency = 3/2*1*5/2*2/3 = 5/2
+; CHECK-NEXT: inner: float = 2.5,
+inner:
+ %Return.2 = phi i32 [ %Return.1, %middle ], [ %call7, %inner.inc ]
+ %K.0 = phi i32 [ %J.0, %middle ], [ %inc, %inner.inc ]
+ %cmp5 = icmp slt i32 %K.0, %N
+ br i1 %cmp5, label %inner.body, label %middle.inc, !prof !1 ; 4:1
+
+; Mass = 4/5
+; Frequency = 5/2*4/5 = 2
+; CHECK-NEXT: inner.body: float = 2.0,
+inner.body:
+ %call = call i32 @c3(i32 %I.0, i32 %J.0, i32 %K.0)
+ %tobool = icmp ne i32 %call, 0
+ br i1 %tobool, label %outer.inc, label %inner.inc, !prof !0 ; 3:1
+
+; Mass = 3/5
+; Frequency = 5/2*3/5 = 3/2
+; CHECK-NEXT: inner.inc: float = 1.5,
+inner.inc:
+ %call7 = call i32 @logic3(i32 %Return.2, i32 %I.0, i32 %J.0, i32 %K.0)
+ %inc = add nsw i32 %K.0, 1
+ br label %inner
+
+; Mass = 1/3
+; Frequency = 3/2*1/3 = 1/2
+; CHECK-NEXT: middle.inc: float = 0.5,
+middle.inc:
+ %inc9 = add nsw i32 %J.0, 1
+ br label %middle
+
+; Mass = 1/2
+; Frequency = 2*1/2 = 1
+; CHECK-NEXT: outer.inc: float = 1.0,
+outer.inc:
+ %Return.3 = phi i32 [ %Return.2, %inner.body ], [ %Return.1, %middle ]
+ %inc12 = add nsw i32 %I.0, 1
+ br label %outer
+
+; Mass = 1
+; Frequency = 1
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+exit:
+ ret i32 %Return.0
+}
+
+!3 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+
+declare i32 @c3(i32, i32, i32)
+declare i32 @logic3(i32, i32, i32, i32)
diff --git a/test/Analysis/BlockFrequencyInfo/irreducible.ll b/test/Analysis/BlockFrequencyInfo/irreducible.ll
new file mode 100644
index 000000000000..af4ad15d9c1d
--- /dev/null
+++ b/test/Analysis/BlockFrequencyInfo/irreducible.ll
@@ -0,0 +1,421 @@
+; RUN: opt < %s -analyze -block-freq | FileCheck %s
+
+; A loop with multiple exits isn't irreducible. It should be handled
+; correctly.
+;
+; CHECK-LABEL: Printing analysis {{.*}} for function 'multiexit':
+; CHECK-NEXT: block-frequency-info: multiexit
+define void @multiexit(i1 %x) {
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+entry:
+ br label %loop.1
+
+; CHECK-NEXT: loop.1: float = 2.0,
+loop.1:
+ br i1 %x, label %exit.1, label %loop.2, !prof !0
+
+; CHECK-NEXT: loop.2: float = 1.75,
+loop.2:
+ br i1 %x, label %exit.2, label %loop.1, !prof !1
+
+; CHECK-NEXT: exit.1: float = 0.25,
+exit.1:
+ br label %return
+
+; CHECK-NEXT: exit.2: float = 0.75,
+exit.2:
+ br label %return
+
+; CHECK-NEXT: return: float = 1.0, int = [[ENTRY]]
+return:
+ ret void
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 1, i32 7}
+!1 = metadata !{metadata !"branch_weights", i32 3, i32 4}
+
+; Irreducible control flow
+; ========================
+;
+; LoopInfo defines a loop as a non-trivial SCC dominated by a single block,
+; called the header. A given loop, L, can have sub-loops, which are loops
+; within the subgraph of L that excludes the header.
+;
+; In addition to loops, -block-freq has limited support for irreducible SCCs,
+; which are SCCs with multiple entry blocks. Irreducible SCCs are discovered
+; on they fly, and modelled as loops with multiple headers.
+;
+; The headers of irreducible sub-SCCs consist of its entry blocks and all nodes
+; that are targets of a backedge within it (excluding backedges within true
+; sub-loops).
+;
+; -block-freq is currently designed to act like a block is inserted that
+; intercepts all the edges to the headers. All backedges and entries point to
+; this block. Its successors are the headers, which split the frequency
+; evenly.
+;
+; There are a number of testcases below. Only the first two have detailed
+; explanations.
+;
+; Testcase #1
+; ===========
+;
+; In this case c1 and c2 should have frequencies of 15/7 and 13/7,
+; respectively. To calculate this, consider assigning 1.0 to entry, and
+; distributing frequency iteratively (to infinity). At the first iteration,
+; entry gives 3/4 to c1 and 1/4 to c2. At every step after, c1 and c2 give 3/4
+; of what they have to each other. Somehow, all of it comes out to exit.
+;
+; c1 = 3/4 + 1/4*3/4 + 3/4*3^2/4^2 + 1/4*3^3/4^3 + 3/4*3^3/4^3 + ...
+; c2 = 1/4 + 3/4*3/4 + 1/4*3^2/4^2 + 3/4*3^3/4^3 + 1/4*3^3/4^3 + ...
+;
+; Simplify by splitting up the odd and even terms of the series and taking out
+; factors so that the infite series matches:
+;
+; c1 = 3/4 *(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
+; + 3/16*(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
+; c2 = 1/4 *(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
+; + 9/16*(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
+;
+; c1 = 15/16*(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
+; c2 = 13/16*(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
+;
+; Since this geometric series sums to 16/7:
+;
+; c1 = 15/7
+; c2 = 13/7
+;
+; If we treat c1 and c2 as members of the same loop, the exit frequency of the
+; loop as a whole is 1/4, so the loop scale should be 4. Summing c1 and c2
+; gives 28/7, or 4.0, which is nice confirmation of the math above.
+;
+; -block-freq currently treats the two nodes as equals.
+define void @multientry(i1 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'multientry':
+; CHECK-NEXT: block-frequency-info: multientry
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br i1 %x, label %c1, label %c2, !prof !2
+
+c1:
+; CHECK-NEXT: c1: float = 2.0,
+; The "correct" answer is: float = 2.142857{{[0-9]*}},
+ br i1 %x, label %c2, label %exit, !prof !2
+
+c2:
+; CHECK-NEXT: c2: float = 2.0,
+; The "correct" answer is: float = 1.857142{{[0-9]*}},
+ br i1 %x, label %c1, label %exit, !prof !2
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+
+!2 = metadata !{metadata !"branch_weights", i32 3, i32 1}
+
+; Testcase #2
+; ===========
+;
+; In this case c1 and c2 should be treated as equals in a single loop. The
+; exit frequency is 1/3, so the scaling factor for the loop should be 3.0. The
+; loop is entered 2/3 of the time, and c1 and c2 split the total loop frequency
+; evenly (1/2), so they should each have frequencies of 1.0 (3.0*2/3*1/2).
+; Another way of computing this result is by assigning 1.0 to entry and showing
+; that c1 and c2 should accumulate frequencies of:
+;
+; 1/3 + 2/9 + 4/27 + 8/81 + ...
+; 2^0/3^1 + 2^1/3^2 + 2^2/3^3 + 2^3/3^4 + ...
+;
+; At the first step, c1 and c2 each get 1/3 of the entry. At each subsequent
+; step, c1 and c2 each get 1/3 of what's left in c1 and c2 combined. This
+; infinite series sums to 1.
+;
+; Since the currently algorithm *always* assumes entry blocks are equal,
+; -block-freq gets the right answers here.
+define void @crossloops(i2 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'crossloops':
+; CHECK-NEXT: block-frequency-info: crossloops
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ switch i2 %x, label %exit [ i2 1, label %c1
+ i2 2, label %c2 ], !prof !3
+
+c1:
+; CHECK-NEXT: c1: float = 1.0,
+ switch i2 %x, label %exit [ i2 1, label %c1
+ i2 2, label %c2 ], !prof !3
+
+c2:
+; CHECK-NEXT: c2: float = 1.0,
+ switch i2 %x, label %exit [ i2 1, label %c1
+ i2 2, label %c2 ], !prof !3
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+
+!3 = metadata !{metadata !"branch_weights", i32 2, i32 2, i32 2}
+
+; A true loop with irreducible control flow inside.
+define void @loop_around_irreducible(i1 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'loop_around_irreducible':
+; CHECK-NEXT: block-frequency-info: loop_around_irreducible
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br label %loop
+
+loop:
+; CHECK-NEXT: loop: float = 4.0, int = [[HEAD:[0-9]+]]
+ br i1 %x, label %left, label %right, !prof !4
+
+left:
+; CHECK-NEXT: left: float = 8.0,
+ br i1 %x, label %right, label %loop.end, !prof !5
+
+right:
+; CHECK-NEXT: right: float = 8.0,
+ br i1 %x, label %left, label %loop.end, !prof !5
+
+loop.end:
+; CHECK-NEXT: loop.end: float = 4.0, int = [[HEAD]]
+ br i1 %x, label %loop, label %exit, !prof !5
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+!4 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+!5 = metadata !{metadata !"branch_weights", i32 3, i32 1}
+
+; Two unrelated irreducible SCCs.
+define void @two_sccs(i1 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'two_sccs':
+; CHECK-NEXT: block-frequency-info: two_sccs
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br i1 %x, label %a, label %b, !prof !6
+
+a:
+; CHECK-NEXT: a: float = 0.75,
+ br i1 %x, label %a.left, label %a.right, !prof !7
+
+a.left:
+; CHECK-NEXT: a.left: float = 1.5,
+ br i1 %x, label %a.right, label %exit, !prof !6
+
+a.right:
+; CHECK-NEXT: a.right: float = 1.5,
+ br i1 %x, label %a.left, label %exit, !prof !6
+
+b:
+; CHECK-NEXT: b: float = 0.25,
+ br i1 %x, label %b.left, label %b.right, !prof !7
+
+b.left:
+; CHECK-NEXT: b.left: float = 0.625,
+ br i1 %x, label %b.right, label %exit, !prof !8
+
+b.right:
+; CHECK-NEXT: b.right: float = 0.625,
+ br i1 %x, label %b.left, label %exit, !prof !8
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+!6 = metadata !{metadata !"branch_weights", i32 3, i32 1}
+!7 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+!8 = metadata !{metadata !"branch_weights", i32 4, i32 1}
+
+; A true loop inside irreducible control flow.
+define void @loop_inside_irreducible(i1 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'loop_inside_irreducible':
+; CHECK-NEXT: block-frequency-info: loop_inside_irreducible
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br i1 %x, label %left, label %right, !prof !9
+
+left:
+; CHECK-NEXT: left: float = 2.0,
+ br i1 %x, label %right, label %exit, !prof !10
+
+right:
+; CHECK-NEXT: right: float = 2.0, int = [[RIGHT:[0-9]+]]
+ br label %loop
+
+loop:
+; CHECK-NEXT: loop: float = 6.0,
+ br i1 %x, label %loop, label %right.end, !prof !11
+
+right.end:
+; CHECK-NEXT: right.end: float = 2.0, int = [[RIGHT]]
+ br i1 %x, label %left, label %exit, !prof !10
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+!9 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+!10 = metadata !{metadata !"branch_weights", i32 3, i32 1}
+!11 = metadata !{metadata !"branch_weights", i32 2, i32 1}
+
+; Irreducible control flow in a branch that's in a true loop.
+define void @loop_around_branch_with_irreducible(i1 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'loop_around_branch_with_irreducible':
+; CHECK-NEXT: block-frequency-info: loop_around_branch_with_irreducible
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br label %loop
+
+loop:
+; CHECK-NEXT: loop: float = 2.0, int = [[LOOP:[0-9]+]]
+ br i1 %x, label %normal, label %irreducible.entry, !prof !12
+
+normal:
+; CHECK-NEXT: normal: float = 1.5,
+ br label %loop.end
+
+irreducible.entry:
+; CHECK-NEXT: irreducible.entry: float = 0.5, int = [[IRREDUCIBLE:[0-9]+]]
+ br i1 %x, label %left, label %right, !prof !13
+
+left:
+; CHECK-NEXT: left: float = 1.0,
+ br i1 %x, label %right, label %irreducible.exit, !prof !12
+
+right:
+; CHECK-NEXT: right: float = 1.0,
+ br i1 %x, label %left, label %irreducible.exit, !prof !12
+
+irreducible.exit:
+; CHECK-NEXT: irreducible.exit: float = 0.5, int = [[IRREDUCIBLE]]
+ br label %loop.end
+
+loop.end:
+; CHECK-NEXT: loop.end: float = 2.0, int = [[LOOP]]
+ br i1 %x, label %loop, label %exit, !prof !13
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+!12 = metadata !{metadata !"branch_weights", i32 3, i32 1}
+!13 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+
+; Irreducible control flow between two true loops.
+define void @loop_around_branch_with_irreducible_around_loop(i1 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'loop_around_branch_with_irreducible_around_loop':
+; CHECK-NEXT: block-frequency-info: loop_around_branch_with_irreducible_around_loop
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br label %loop
+
+loop:
+; CHECK-NEXT: loop: float = 3.0, int = [[LOOP:[0-9]+]]
+ br i1 %x, label %normal, label %irreducible, !prof !14
+
+normal:
+; CHECK-NEXT: normal: float = 2.0,
+ br label %loop.end
+
+irreducible:
+; CHECK-NEXT: irreducible: float = 1.0,
+ br i1 %x, label %left, label %right, !prof !15
+
+left:
+; CHECK-NEXT: left: float = 2.0,
+ br i1 %x, label %right, label %loop.end, !prof !16
+
+right:
+; CHECK-NEXT: right: float = 2.0, int = [[RIGHT:[0-9]+]]
+ br label %right.loop
+
+right.loop:
+; CHECK-NEXT: right.loop: float = 10.0,
+ br i1 %x, label %right.loop, label %right.end, !prof !17
+
+right.end:
+; CHECK-NEXT: right.end: float = 2.0, int = [[RIGHT]]
+ br i1 %x, label %left, label %loop.end, !prof !16
+
+loop.end:
+; CHECK-NEXT: loop.end: float = 3.0, int = [[LOOP]]
+ br i1 %x, label %loop, label %exit, !prof !14
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+!14 = metadata !{metadata !"branch_weights", i32 2, i32 1}
+!15 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+!16 = metadata !{metadata !"branch_weights", i32 3, i32 1}
+!17 = metadata !{metadata !"branch_weights", i32 4, i32 1}
+
+; An irreducible SCC with a non-header.
+define void @nonheader(i1 %x) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'nonheader':
+; CHECK-NEXT: block-frequency-info: nonheader
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br i1 %x, label %left, label %right, !prof !18
+
+left:
+; CHECK-NEXT: left: float = 1.0,
+ br i1 %x, label %bottom, label %exit, !prof !19
+
+right:
+; CHECK-NEXT: right: float = 1.0,
+ br i1 %x, label %bottom, label %exit, !prof !20
+
+bottom:
+; CHECK-NEXT: bottom: float = 1.0,
+ br i1 %x, label %left, label %right, !prof !18
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+!18 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+!19 = metadata !{metadata !"branch_weights", i32 1, i32 3}
+!20 = metadata !{metadata !"branch_weights", i32 3, i32 1}
+
+; An irreducible SCC with an irreducible sub-SCC. In the current version of
+; -block-freq, this means an extra header.
+;
+; This testcases uses non-trivial branch weights. The CHECK statements here
+; will start to fail if we change -block-freq to be more accurate. Currently,
+; we expect left, right and top to be treated as equal headers.
+define void @nonentry_header(i1 %x, i2 %y) {
+; CHECK-LABEL: Printing analysis {{.*}} for function 'nonentry_header':
+; CHECK-NEXT: block-frequency-info: nonentry_header
+entry:
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+ br i1 %x, label %left, label %right, !prof !21
+
+left:
+; CHECK-NEXT: left: float = 3.0,
+ br i1 %x, label %top, label %bottom, !prof !22
+
+right:
+; CHECK-NEXT: right: float = 3.0,
+ br i1 %x, label %top, label %bottom, !prof !22
+
+top:
+; CHECK-NEXT: top: float = 3.0,
+ switch i2 %y, label %exit [ i2 0, label %left
+ i2 1, label %right
+ i2 2, label %bottom ], !prof !23
+
+bottom:
+; CHECK-NEXT: bottom: float = 4.5,
+ br label %top
+
+exit:
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+ ret void
+}
+!21 = metadata !{metadata !"branch_weights", i32 2, i32 1}
+!22 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+!23 = metadata !{metadata !"branch_weights", i32 8, i32 1, i32 3, i32 12}
diff --git a/test/Analysis/BlockFrequencyInfo/loop_with_branch.ll b/test/Analysis/BlockFrequencyInfo/loop_with_branch.ll
new file mode 100644
index 000000000000..9d27b6bf0f20
--- /dev/null
+++ b/test/Analysis/BlockFrequencyInfo/loop_with_branch.ll
@@ -0,0 +1,44 @@
+; RUN: opt < %s -analyze -block-freq | FileCheck %s
+
+; CHECK-LABEL: Printing analysis {{.*}} for function 'loop_with_branch':
+; CHECK-NEXT: block-frequency-info: loop_with_branch
+define void @loop_with_branch(i32 %a) {
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+entry:
+ %skip_loop = call i1 @foo0(i32 %a)
+ br i1 %skip_loop, label %skip, label %header, !prof !0
+
+; CHECK-NEXT: skip: float = 0.25,
+skip:
+ br label %exit
+
+; CHECK-NEXT: header: float = 4.5,
+header:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %back ]
+ %i.next = add i32 %i, 1
+ %choose = call i2 @foo1(i32 %i)
+ switch i2 %choose, label %exit [ i2 0, label %left
+ i2 1, label %right ], !prof !1
+
+; CHECK-NEXT: left: float = 1.5,
+left:
+ br label %back
+
+; CHECK-NEXT: right: float = 2.25,
+right:
+ br label %back
+
+; CHECK-NEXT: back: float = 3.75,
+back:
+ br label %header
+
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+exit:
+ ret void
+}
+
+declare i1 @foo0(i32)
+declare i2 @foo1(i32)
+
+!0 = metadata !{metadata !"branch_weights", i32 1, i32 3}
+!1 = metadata !{metadata !"branch_weights", i32 1, i32 2, i32 3}
diff --git a/test/Analysis/BlockFrequencyInfo/nested_loop_with_branches.ll b/test/Analysis/BlockFrequencyInfo/nested_loop_with_branches.ll
new file mode 100644
index 000000000000..d93ffceb5fab
--- /dev/null
+++ b/test/Analysis/BlockFrequencyInfo/nested_loop_with_branches.ll
@@ -0,0 +1,59 @@
+; RUN: opt < %s -analyze -block-freq | FileCheck %s
+
+; CHECK-LABEL: Printing analysis {{.*}} for function 'nested_loop_with_branches'
+; CHECK-NEXT: block-frequency-info: nested_loop_with_branches
+define void @nested_loop_with_branches(i32 %a) {
+; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
+entry:
+ %v0 = call i1 @foo0(i32 %a)
+ br i1 %v0, label %exit, label %outer, !prof !0
+
+; CHECK-NEXT: outer: float = 12.0,
+outer:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %inner.end ], [ %i.next, %no_inner ]
+ %i.next = add i32 %i, 1
+ %do_inner = call i1 @foo1(i32 %i)
+ br i1 %do_inner, label %no_inner, label %inner, !prof !0
+
+; CHECK-NEXT: inner: float = 36.0,
+inner:
+ %j = phi i32 [ 0, %outer ], [ %j.next, %inner.end ]
+ %side = call i1 @foo3(i32 %j)
+ br i1 %side, label %left, label %right, !prof !0
+
+; CHECK-NEXT: left: float = 9.0,
+left:
+ %v4 = call i1 @foo4(i32 %j)
+ br label %inner.end
+
+; CHECK-NEXT: right: float = 27.0,
+right:
+ %v5 = call i1 @foo5(i32 %j)
+ br label %inner.end
+
+; CHECK-NEXT: inner.end: float = 36.0,
+inner.end:
+ %stay_inner = phi i1 [ %v4, %left ], [ %v5, %right ]
+ %j.next = add i32 %j, 1
+ br i1 %stay_inner, label %inner, label %outer, !prof !1
+
+; CHECK-NEXT: no_inner: float = 3.0,
+no_inner:
+ %continue = call i1 @foo6(i32 %i)
+ br i1 %continue, label %outer, label %exit, !prof !1
+
+; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
+exit:
+ ret void
+}
+
+declare i1 @foo0(i32)
+declare i1 @foo1(i32)
+declare i1 @foo2(i32)
+declare i1 @foo3(i32)
+declare i1 @foo4(i32)
+declare i1 @foo5(i32)
+declare i1 @foo6(i32)
+
+!0 = metadata !{metadata !"branch_weights", i32 1, i32 3}
+!1 = metadata !{metadata !"branch_weights", i32 3, i32 1}
diff --git a/test/Analysis/BranchProbabilityInfo/loop.ll b/test/Analysis/BranchProbabilityInfo/loop.ll
index b648cbb16a60..40f1111c6b03 100644
--- a/test/Analysis/BranchProbabilityInfo/loop.ll
+++ b/test/Analysis/BranchProbabilityInfo/loop.ll
@@ -15,7 +15,7 @@ do.body:
%i.0 = phi i32 [ 0, %entry ], [ %inc3, %do.end ]
call void @g1()
br label %do.body1
-; CHECK: edge do.body -> do.body1 probability is 124 / 124 = 100%
+; CHECK: edge do.body -> do.body1 probability is 16 / 16 = 100%
do.body1:
%j.0 = phi i32 [ 0, %do.body ], [ %inc, %do.body1 ]
@@ -55,8 +55,8 @@ for.body:
%i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc5, %for.end ]
call void @g1()
br i1 %cmp27, label %for.body3, label %for.end
-; CHECK: edge for.body -> for.body3 probability is 62 / 124 = 50%
-; CHECK: edge for.body -> for.end probability is 62 / 124 = 50%
+; CHECK: edge for.body -> for.body3 probability is 20 / 32 = 62.5%
+; CHECK: edge for.body -> for.end probability is 12 / 32 = 37.5%
for.body3:
%j.08 = phi i32 [ %inc, %for.body3 ], [ 0, %for.body ]
@@ -91,8 +91,8 @@ do.body:
%0 = load i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %do.body1, label %if.end
-; CHECK: edge do.body -> do.body1 probability is 62 / 124 = 50%
-; CHECK: edge do.body -> if.end probability is 62 / 124 = 50%
+; CHECK: edge do.body -> do.body1 probability is 16 / 32 = 50%
+; CHECK: edge do.body -> if.end probability is 16 / 32 = 50%
do.body1:
%j.0 = phi i32 [ %inc, %do.body1 ], [ 0, %do.body ]
@@ -165,7 +165,7 @@ do.body:
%i.0 = phi i32 [ 0, %entry ], [ %inc4, %do.end ]
call void @g1()
br label %do.body1
-; CHECK: edge do.body -> do.body1 probability is 124 / 124 = 100%
+; CHECK: edge do.body -> do.body1 probability is 16 / 16 = 100%
do.body1:
%j.0 = phi i32 [ 0, %do.body ], [ %inc, %if.end ]
@@ -209,7 +209,7 @@ do.body:
%i.0 = phi i32 [ 0, %entry ], [ %inc4, %do.end ]
call void @g1()
br label %do.body1
-; CHECK: edge do.body -> do.body1 probability is 124 / 124 = 100%
+; CHECK: edge do.body -> do.body1 probability is 16 / 16 = 100%
do.body1:
%j.0 = phi i32 [ 0, %do.body ], [ %inc, %do.cond ]
@@ -261,14 +261,14 @@ for.body:
%0 = load i32* %c, align 4
%cmp1 = icmp eq i32 %0, %i.011
br i1 %cmp1, label %for.inc5, label %if.end
-; CHECK: edge for.body -> for.inc5 probability is 62 / 124 = 50%
-; CHECK: edge for.body -> if.end probability is 62 / 124 = 50%
+; CHECK: edge for.body -> for.inc5 probability is 16 / 32 = 50%
+; CHECK: edge for.body -> if.end probability is 16 / 32 = 50%
if.end:
call void @g1()
br i1 %cmp38, label %for.body4, label %for.end
-; CHECK: edge if.end -> for.body4 probability is 62 / 124 = 50%
-; CHECK: edge if.end -> for.end probability is 62 / 124 = 50%
+; CHECK: edge if.end -> for.body4 probability is 20 / 32 = 62.5%
+; CHECK: edge if.end -> for.end probability is 12 / 32 = 37.5%
for.body4:
%j.09 = phi i32 [ %inc, %for.body4 ], [ 0, %if.end ]
@@ -282,7 +282,7 @@ for.body4:
for.end:
call void @g3()
br label %for.inc5
-; CHECK: edge for.end -> for.inc5 probability is 124 / 124 = 100%
+; CHECK: edge for.end -> for.inc5 probability is 16 / 16 = 100%
for.inc5:
%inc6 = add nsw i32 %i.011, 1
@@ -314,35 +314,35 @@ for.body:
%i.019 = phi i32 [ 0, %for.body.lr.ph ], [ %inc14, %for.end ]
call void @g1()
br i1 %cmp216, label %for.body3, label %for.end
-; CHECK: edge for.body -> for.body3 probability is 62 / 124 = 50%
-; CHECK: edge for.body -> for.end probability is 62 / 124 = 50%
+; CHECK: edge for.body -> for.body3 probability is 20 / 32 = 62.5%
+; CHECK: edge for.body -> for.end probability is 12 / 32 = 37.5%
for.body3:
%j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.inc ]
%0 = load i32* %c, align 4
%cmp4 = icmp eq i32 %0, %j.017
br i1 %cmp4, label %for.inc, label %if.end
-; CHECK: edge for.body3 -> for.inc probability is 62 / 124 = 50%
-; CHECK: edge for.body3 -> if.end probability is 62 / 124 = 50%
+; CHECK: edge for.body3 -> for.inc probability is 16 / 32 = 50%
+; CHECK: edge for.body3 -> if.end probability is 16 / 32 = 50%
if.end:
%1 = load i32* %arrayidx5, align 4
%cmp6 = icmp eq i32 %1, %j.017
br i1 %cmp6, label %for.inc, label %if.end8
-; CHECK: edge if.end -> for.inc probability is 62 / 124 = 50%
-; CHECK: edge if.end -> if.end8 probability is 62 / 124 = 50%
+; CHECK: edge if.end -> for.inc probability is 16 / 32 = 50%
+; CHECK: edge if.end -> if.end8 probability is 16 / 32 = 50%
if.end8:
%2 = load i32* %arrayidx9, align 4
%cmp10 = icmp eq i32 %2, %j.017
br i1 %cmp10, label %for.inc, label %if.end12
-; CHECK: edge if.end8 -> for.inc probability is 62 / 124 = 50%
-; CHECK: edge if.end8 -> if.end12 probability is 62 / 124 = 50%
+; CHECK: edge if.end8 -> for.inc probability is 16 / 32 = 50%
+; CHECK: edge if.end8 -> if.end12 probability is 16 / 32 = 50%
if.end12:
call void @g2()
br label %for.inc
-; CHECK: edge if.end12 -> for.inc probability is 124 / 124 = 100%
+; CHECK: edge if.end12 -> for.inc probability is 16 / 16 = 100%
for.inc:
%inc = add nsw i32 %j.017, 1
diff --git a/test/Analysis/BranchProbabilityInfo/pr18705.ll b/test/Analysis/BranchProbabilityInfo/pr18705.ll
new file mode 100644
index 000000000000..9f239b46058d
--- /dev/null
+++ b/test/Analysis/BranchProbabilityInfo/pr18705.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -analyze -branch-prob | FileCheck %s
+
+; Since neither of while.body's out-edges is an exit or a back edge,
+; calcLoopBranchHeuristics should return early without setting the weights.
+; calcFloatingPointHeuristics, which is run later, sets the weights.
+;
+; CHECK: edge while.body -> if.then probability is 20 / 32 = 62.5%
+; CHECK: edge while.body -> if.else probability is 12 / 32 = 37.5%
+
+define void @foo1(i32 %n, i32* nocapture %b, i32* nocapture %c, i32* nocapture %d, float* nocapture readonly %f0, float* nocapture readonly %f1) {
+entry:
+ %tobool8 = icmp eq i32 %n, 0
+ br i1 %tobool8, label %while.end, label %while.body.lr.ph
+
+while.body.lr.ph:
+ %0 = sext i32 %n to i64
+ br label %while.body
+
+while.body:
+ %indvars.iv = phi i64 [ %0, %while.body.lr.ph ], [ %indvars.iv.next, %if.end ]
+ %b.addr.011 = phi i32* [ %b, %while.body.lr.ph ], [ %b.addr.1, %if.end ]
+ %d.addr.010 = phi i32* [ %d, %while.body.lr.ph ], [ %incdec.ptr4, %if.end ]
+ %c.addr.09 = phi i32* [ %c, %while.body.lr.ph ], [ %c.addr.1, %if.end ]
+ %indvars.iv.next = add nsw i64 %indvars.iv, -1
+ %arrayidx = getelementptr inbounds float* %f0, i64 %indvars.iv.next
+ %1 = load float* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds float* %f1, i64 %indvars.iv.next
+ %2 = load float* %arrayidx2, align 4
+ %cmp = fcmp une float %1, %2
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %incdec.ptr = getelementptr inbounds i32* %b.addr.011, i64 1
+ %3 = load i32* %b.addr.011, align 4
+ %add = add nsw i32 %3, 12
+ store i32 %add, i32* %b.addr.011, align 4
+ br label %if.end
+
+if.else:
+ %incdec.ptr3 = getelementptr inbounds i32* %c.addr.09, i64 1
+ %4 = load i32* %c.addr.09, align 4
+ %sub = add nsw i32 %4, -13
+ store i32 %sub, i32* %c.addr.09, align 4
+ br label %if.end
+
+if.end:
+ %c.addr.1 = phi i32* [ %c.addr.09, %if.then ], [ %incdec.ptr3, %if.else ]
+ %b.addr.1 = phi i32* [ %incdec.ptr, %if.then ], [ %b.addr.011, %if.else ]
+ %incdec.ptr4 = getelementptr inbounds i32* %d.addr.010, i64 1
+ store i32 14, i32* %d.addr.010, align 4
+ %5 = trunc i64 %indvars.iv.next to i32
+ %tobool = icmp eq i32 %5, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
diff --git a/test/Analysis/CostModel/AArch64/lit.local.cfg b/test/Analysis/CostModel/AArch64/lit.local.cfg
new file mode 100644
index 000000000000..7184443994b6
--- /dev/null
+++ b/test/Analysis/CostModel/AArch64/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Analysis/CostModel/AArch64/select.ll b/test/Analysis/CostModel/AArch64/select.ll
new file mode 100644
index 000000000000..216dc5ddc488
--- /dev/null
+++ b/test/Analysis/CostModel/AArch64/select.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=arm64-apple-ios -mcpu=cyclone | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+
+; CHECK-LABEL: select
+define void @select() {
+ ; Scalar values
+ ; CHECK: cost of 1 {{.*}} select
+ %v1 = select i1 undef, i8 undef, i8 undef
+ ; CHECK: cost of 1 {{.*}} select
+ %v2 = select i1 undef, i16 undef, i16 undef
+ ; CHECK: cost of 1 {{.*}} select
+ %v3 = select i1 undef, i32 undef, i32 undef
+ ; CHECK: cost of 1 {{.*}} select
+ %v4 = select i1 undef, i64 undef, i64 undef
+ ; CHECK: cost of 1 {{.*}} select
+ %v5 = select i1 undef, float undef, float undef
+ ; CHECK: cost of 1 {{.*}} select
+ %v6 = select i1 undef, double undef, double undef
+
+ ; Vector values - check for vectors that have a high cost because they end up
+ ; scalarized.
+ ; CHECK: cost of 320 {{.*}} select
+ %v13b = select <16 x i1> undef, <16 x i16> undef, <16 x i16> undef
+
+ ; CHECK: cost of 160 {{.*}} select
+ %v15b = select <8 x i1> undef, <8 x i32> undef, <8 x i32> undef
+ ; CHECK: cost of 320 {{.*}} select
+ %v15c = select <16 x i1> undef, <16 x i32> undef, <16 x i32> undef
+
+ ; CHECK: cost of 80 {{.*}} select
+ %v16a = select <4 x i1> undef, <4 x i64> undef, <4 x i64> undef
+ ; CHECK: cost of 160 {{.*}} select
+ %v16b = select <8 x i1> undef, <8 x i64> undef, <8 x i64> undef
+ ; CHECK: cost of 320 {{.*}} select
+ %v16c = select <16 x i1> undef, <16 x i64> undef, <16 x i64> undef
+
+ ret void
+}
diff --git a/test/Analysis/CostModel/AArch64/store.ll b/test/Analysis/CostModel/AArch64/store.ll
new file mode 100644
index 000000000000..0c9883cf2a2f
--- /dev/null
+++ b/test/Analysis/CostModel/AArch64/store.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=arm64-apple-ios -mcpu=cyclone | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+; CHECK-LABEL: store
+define void @store() {
+ ; Stores of <2 x i64> should be expensive because we don't split them and
+ ; and unaligned 16b stores have bad performance.
+ ; CHECK: cost of 12 {{.*}} store
+ store <2 x i64> undef, <2 x i64> * undef
+
+ ; We scalarize the loads/stores because there is no vector register name for
+ ; these types (they get extended to v.4h/v.2s).
+ ; CHECK: cost of 16 {{.*}} store
+ store <2 x i8> undef, <2 x i8> * undef
+ ; CHECK: cost of 64 {{.*}} store
+ store <4 x i8> undef, <4 x i8> * undef
+ ; CHECK: cost of 16 {{.*}} load
+ load <2 x i8> * undef
+ ; CHECK: cost of 64 {{.*}} load
+ load <4 x i8> * undef
+
+ ret void
+}
diff --git a/test/Analysis/CostModel/ARM/cast.ll b/test/Analysis/CostModel/ARM/cast.ll
index 0cdd61cac4f7..662110f2720d 100644
--- a/test/Analysis/CostModel/ARM/cast.ll
+++ b/test/Analysis/CostModel/ARM/cast.ll
@@ -221,9 +221,9 @@ define i32 @casts() {
%r96 = fptoui <2 x float> undef to <2 x i32>
; CHECK: cost of 1 {{.*}} fptosi
%r97 = fptosi <2 x float> undef to <2 x i32>
- ; CHECK: cost of 24 {{.*}} fptoui
+ ; CHECK: cost of 28 {{.*}} fptoui
%r98 = fptoui <2 x float> undef to <2 x i64>
- ; CHECK: cost of 24 {{.*}} fptosi
+ ; CHECK: cost of 28 {{.*}} fptosi
%r99 = fptosi <2 x float> undef to <2 x i64>
; CHECK: cost of 8 {{.*}} fptoui
@@ -242,9 +242,9 @@ define i32 @casts() {
%r106 = fptoui <2 x double> undef to <2 x i32>
; CHECK: cost of 2 {{.*}} fptosi
%r107 = fptosi <2 x double> undef to <2 x i32>
- ; CHECK: cost of 24 {{.*}} fptoui
+ ; CHECK: cost of 28 {{.*}} fptoui
%r108 = fptoui <2 x double> undef to <2 x i64>
- ; CHECK: cost of 24 {{.*}} fptosi
+ ; CHECK: cost of 28 {{.*}} fptosi
%r109 = fptosi <2 x double> undef to <2 x i64>
; CHECK: cost of 16 {{.*}} fptoui
@@ -263,9 +263,9 @@ define i32 @casts() {
%r116 = fptoui <4 x float> undef to <4 x i32>
; CHECK: cost of 1 {{.*}} fptosi
%r117 = fptosi <4 x float> undef to <4 x i32>
- ; CHECK: cost of 48 {{.*}} fptoui
+ ; CHECK: cost of 56 {{.*}} fptoui
%r118 = fptoui <4 x float> undef to <4 x i64>
- ; CHECK: cost of 48 {{.*}} fptosi
+ ; CHECK: cost of 56 {{.*}} fptosi
%r119 = fptosi <4 x float> undef to <4 x i64>
; CHECK: cost of 16 {{.*}} fptoui
@@ -284,9 +284,9 @@ define i32 @casts() {
%r126 = fptoui <4 x double> undef to <4 x i32>
; CHECK: cost of 16 {{.*}} fptosi
%r127 = fptosi <4 x double> undef to <4 x i32>
- ; CHECK: cost of 48 {{.*}} fptoui
+ ; CHECK: cost of 56 {{.*}} fptoui
%r128 = fptoui <4 x double> undef to <4 x i64>
- ; CHECK: cost of 48 {{.*}} fptosi
+ ; CHECK: cost of 56 {{.*}} fptosi
%r129 = fptosi <4 x double> undef to <4 x i64>
; CHECK: cost of 32 {{.*}} fptoui
@@ -305,9 +305,9 @@ define i32 @casts() {
%r136 = fptoui <8 x float> undef to <8 x i32>
; CHECK: cost of 2 {{.*}} fptosi
%r137 = fptosi <8 x float> undef to <8 x i32>
- ; CHECK: cost of 96 {{.*}} fptoui
+ ; CHECK: cost of 112 {{.*}} fptoui
%r138 = fptoui <8 x float> undef to <8 x i64>
- ; CHECK: cost of 96 {{.*}} fptosi
+ ; CHECK: cost of 112 {{.*}} fptosi
%r139 = fptosi <8 x float> undef to <8 x i64>
; CHECK: cost of 32 {{.*}} fptoui
@@ -326,9 +326,9 @@ define i32 @casts() {
%r146 = fptoui <8 x double> undef to <8 x i32>
; CHECK: cost of 32 {{.*}} fptosi
%r147 = fptosi <8 x double> undef to <8 x i32>
- ; CHECK: cost of 96 {{.*}} fptoui
+ ; CHECK: cost of 112 {{.*}} fptoui
%r148 = fptoui <8 x double> undef to <8 x i64>
- ; CHECK: cost of 96 {{.*}} fptosi
+ ; CHECK: cost of 112 {{.*}} fptosi
%r149 = fptosi <8 x double> undef to <8 x i64>
; CHECK: cost of 64 {{.*}} fptoui
@@ -347,9 +347,9 @@ define i32 @casts() {
%r156 = fptoui <16 x float> undef to <16 x i32>
; CHECK: cost of 4 {{.*}} fptosi
%r157 = fptosi <16 x float> undef to <16 x i32>
- ; CHECK: cost of 192 {{.*}} fptoui
+ ; CHECK: cost of 224 {{.*}} fptoui
%r158 = fptoui <16 x float> undef to <16 x i64>
- ; CHECK: cost of 192 {{.*}} fptosi
+ ; CHECK: cost of 224 {{.*}} fptosi
%r159 = fptosi <16 x float> undef to <16 x i64>
; CHECK: cost of 64 {{.*}} fptoui
@@ -368,9 +368,9 @@ define i32 @casts() {
%r166 = fptoui <16 x double> undef to <16 x i32>
; CHECK: cost of 64 {{.*}} fptosi
%r167 = fptosi <16 x double> undef to <16 x i32>
- ; CHECK: cost of 192 {{.*}} fptoui
+ ; CHECK: cost of 224 {{.*}} fptoui
%r168 = fptoui <16 x double> undef to <16 x i64>
- ; CHECK: cost of 192 {{.*}} fptosi
+ ; CHECK: cost of 224 {{.*}} fptosi
%r169 = fptosi <16 x double> undef to <16 x i64>
; CHECK: cost of 8 {{.*}} uitofp
@@ -528,7 +528,7 @@ define i32 @casts() {
%r242 = uitofp <16 x i8> undef to <16 x double>
; CHECK: cost of 64 {{.*}} sitofp
%r243 = sitofp <16 x i8> undef to <16 x double>
- ; C4ECK: cost of 64 {{.*}} uitofp
+ ; CHECK: cost of 64 {{.*}} uitofp
%r244 = uitofp <16 x i16> undef to <16 x double>
; CHECK: cost of 64 {{.*}} sitofp
%r245 = sitofp <16 x i16> undef to <16 x double>
diff --git a/test/Analysis/CostModel/ARM/lit.local.cfg b/test/Analysis/CostModel/ARM/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/Analysis/CostModel/ARM/lit.local.cfg
+++ b/test/Analysis/CostModel/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/Analysis/CostModel/PowerPC/ext.ll b/test/Analysis/CostModel/PowerPC/ext.ll
new file mode 100644
index 000000000000..7d6a14e93cdf
--- /dev/null
+++ b/test/Analysis/CostModel/PowerPC/ext.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define void @exts() {
+
+ ; CHECK: cost of 1 {{.*}} sext
+ %v1 = sext i16 undef to i32
+
+ ; CHECK: cost of 1 {{.*}} sext
+ %v2 = sext <2 x i16> undef to <2 x i32>
+
+ ; CHECK: cost of 1 {{.*}} sext
+ %v3 = sext <4 x i16> undef to <4 x i32>
+
+ ; CHECK: cost of 112 {{.*}} sext
+ %v4 = sext <8 x i16> undef to <8 x i32>
+
+ ret void
+}
+
diff --git a/test/Analysis/CostModel/PowerPC/insert_extract.ll b/test/Analysis/CostModel/PowerPC/insert_extract.ll
index f51963d56fde..8dc003153a24 100644
--- a/test/Analysis/CostModel/PowerPC/insert_extract.ll
+++ b/test/Analysis/CostModel/PowerPC/insert_extract.ll
@@ -3,13 +3,13 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "powerpc64-unknown-linux-gnu"
define i32 @insert(i32 %arg) {
- ; CHECK: cost of 13 {{.*}} insertelement
+ ; CHECK: cost of 10 {{.*}} insertelement
%x = insertelement <4 x i32> undef, i32 %arg, i32 0
ret i32 undef
}
define i32 @extract(<4 x i32> %arg) {
- ; CHECK: cost of 13 {{.*}} extractelement
+ ; CHECK: cost of 3 {{.*}} extractelement
%x = extractelement <4 x i32> %arg, i32 0
ret i32 %x
}
diff --git a/test/Analysis/CostModel/PowerPC/lit.local.cfg b/test/Analysis/CostModel/PowerPC/lit.local.cfg
index 2e463005586f..5d33887ff0a4 100644
--- a/test/Analysis/CostModel/PowerPC/lit.local.cfg
+++ b/test/Analysis/CostModel/PowerPC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'PowerPC' in targets:
+if not 'PowerPC' in config.root.targets:
config.unsupported = True
diff --git a/test/Analysis/CostModel/PowerPC/load_store.ll b/test/Analysis/CostModel/PowerPC/load_store.ll
index c77cce955abf..368f0a73489b 100644
--- a/test/Analysis/CostModel/PowerPC/load_store.ll
+++ b/test/Analysis/CostModel/PowerPC/load_store.ll
@@ -29,6 +29,17 @@ define i32 @loads(i32 %arg) {
; CHECK: cost of 4 {{.*}} load
load i128* undef, align 4
+ ; FIXME: There actually are sub-vector Altivec loads, and so we could handle
+ ; this with a small expense, but we don't currently.
+ ; CHECK: cost of 48 {{.*}} load
+ load <4 x i16>* undef, align 2
+
+ ; CHECK: cost of 1 {{.*}} load
+ load <4 x i32>* undef, align 4
+
+ ; CHECK: cost of 46 {{.*}} load
+ load <3 x float>* undef, align 1
+
ret i32 undef
}
diff --git a/test/Analysis/CostModel/X86/alternate-shuffle-cost.ll b/test/Analysis/CostModel/X86/alternate-shuffle-cost.ll
new file mode 100644
index 000000000000..2e162f0f0005
--- /dev/null
+++ b/test/Analysis/CostModel/X86/alternate-shuffle-cost.ll
@@ -0,0 +1,347 @@
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,-ssse3 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE2
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,+sse3,+ssse3 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSSE3
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE41
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+
+; Verify the cost model for alternate shuffles.
+
+; shufflevector instructions with illegal 64-bit vector types.
+; 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
+; 64-bit packed float vectors (v2f32) are widened to type v4f32.
+
+define <2 x i32> @test_v2i32(<2 x i32> %a, <2 x i32> %b) {
+ %1 = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2i32':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <2 x float> @test_v2f32(<2 x float> %a, <2 x float> %b) {
+ %1 = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2f32':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <2 x i32> @test_v2i32_2(<2 x i32> %a, <2 x i32> %b) {
+ %1 = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2i32_2':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <2 x float> @test_v2f32_2(<2 x float> %a, <2 x float> %b) {
+ %1 = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2f32_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+; Test shuffles on packed vectors of two elements.
+
+define <2 x i64> @test_v2i64(<2 x i64> %a, <2 x i64> %b) {
+ %1 = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2i64':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <2 x double> @test_v2f64(<2 x double> %a, <2 x double> %b) {
+ %1 = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2f64':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <2 x i64> @test_v2i64_2(<2 x i64> %a, <2 x i64> %b) {
+ %1 = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i64> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2i64_2':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <2 x double> @test_v2f64_2(<2 x double> %a, <2 x double> %b) {
+ %1 = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x double> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v2f64_2':
+; SSE2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+; Test shuffles on packed vectors of four elements.
+
+define <4 x i32> @test_v4i32(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4i32':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x i32> @test_v4i32_2(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4i32_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x float> @test_v4f32(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4f32':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x float> @test_v4f32_2(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4f32_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <4 x i64> @test_v4i64(<4 x i64> %a, <4 x i64> %b) {
+ %1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i64> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4i64':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x i64> @test_v4i64_2(<4 x i64> %a, <4 x i64> %b) {
+ %1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x i64> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4i64_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b) {
+ %1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4f64':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <4 x double> @test_v4f64_2(<4 x double> %a, <4 x double> %b) {
+ %1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x double> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v4f64_2':
+; SSE2: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+; Test shuffles on packed vectors of eight elements.
+define <8 x i16> @test_v8i16(<8 x i16> %a, <8 x i16> %b) {
+ %1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x i16> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8i16':
+; SSE2: Cost Model: {{.*}} 8 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x i16> @test_v8i16_2(<8 x i16> %a, <8 x i16> %b) {
+ %1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x i16> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8i16_2':
+; SSE2: Cost Model: {{.*}} 8 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x i32> @test_v8i32(<8 x i32> %a, <8 x i32> %b) {
+ %1 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8i32':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x i32> @test_v8i32_2(<8 x i32> %a, <8 x i32> %b) {
+ %1 = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x i32> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8i32_2':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x float> @test_v8f32(<8 x float> %a, <8 x float> %b) {
+ %1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8f32':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <8 x float> @test_v8f32_2(<8 x float> %a, <8 x float> %b) {
+ %1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x float> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v8f32_2':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 4 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+; Test shuffles on packed vectors of sixteen elements.
+define <16 x i8> @test_v16i8(<16 x i8> %a, <16 x i8> %b) {
+ %1 = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+ ret <16 x i8> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v16i8':
+; SSE2: Cost Model: {{.*}} 48 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+
+
+define <16 x i8> @test_v16i8_2(<16 x i8> %a, <16 x i8> %b) {
+ %1 = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+ ret <16 x i8> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v16i8_2':
+; SSE2: Cost Model: {{.*}} 48 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 3 for instruction: %1 = shufflevector
+
+
+define <16 x i16> @test_v16i16(<16 x i16> %a, <16 x i16> %b) {
+ %1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+ ret <16 x i16> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v16i16':
+; SSE2: Cost Model: {{.*}} 16 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 5 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+
+define <16 x i16> @test_v16i16_2(<16 x i16> %a, <16 x i16> %b) {
+ %1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
+ ret <16 x i16> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v16i16_2':
+; SSE2: Cost Model: {{.*}} 16 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 2 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 5 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 1 for instruction: %1 = shufflevector
+
+define <32 x i8> @test_v32i8(<32 x i8> %a, <32 x i8> %b) {
+ %1 = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 33, i32 2, i32 35, i32 4, i32 37, i32 6, i32 39, i32 8, i32 41, i32 10, i32 43, i32 12, i32 45, i32 14, i32 47, i32 16, i32 49, i32 18, i32 51, i32 20, i32 53, i32 22, i32 55, i32 24, i32 57, i32 26, i32 59, i32 28, i32 61, i32 30, i32 63>
+ ret <32 x i8> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v32i8':
+; SSE2: Cost Model: {{.*}} 96 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 9 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 9 for instruction: %1 = shufflevector
+
+
+define <32 x i8> @test_v32i8_2(<32 x i8> %a, <32 x i8> %b) {
+ %1 = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31>
+ ret <32 x i8> %1
+}
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_v32i8_2':
+; SSE2: Cost Model: {{.*}} 96 for instruction: %1 = shufflevector
+; SSSE3: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; SSE41: Cost Model: {{.*}} 6 for instruction: %1 = shufflevector
+; AVX: Cost Model: {{.*}} 9 for instruction: %1 = shufflevector
+; AVX2: Cost Model: {{.*}} 9 for instruction: %1 = shufflevector
+
diff --git a/test/Analysis/CostModel/X86/cast.ll b/test/Analysis/CostModel/X86/cast.ll
index f3c1283c7e32..7f97b176f7c1 100644
--- a/test/Analysis/CostModel/X86/cast.ll
+++ b/test/Analysis/CostModel/X86/cast.ll
@@ -1,10 +1,11 @@
-; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AVX2
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AVX
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
define i32 @add(i32 %arg) {
-
+; CHECK-LABEL: for function 'add'
; -- Same size registeres --
;CHECK: cost of 1 {{.*}} zext
%A = zext <4 x i1> undef to <4 x i32>
@@ -33,57 +34,106 @@ define i32 @add(i32 %arg) {
}
define i32 @zext_sext(<8 x i1> %in) {
- ;CHECK: cost of 6 {{.*}} zext
+; CHECK-AVX2-LABEL: for function 'zext_sext'
+; CHECK-AVX-LABEL: for function 'zext_sext'
+ ;CHECK-AVX2: cost of 3 {{.*}} zext
+ ;CHECK-AVX: cost of 4 {{.*}} zext
%Z = zext <8 x i1> %in to <8 x i32>
- ;CHECK: cost of 9 {{.*}} sext
+ ;CHECK-AVX2: cost of 3 {{.*}} sext
+ ;CHECK-AVX: cost of 7 {{.*}} sext
%S = sext <8 x i1> %in to <8 x i32>
- ;CHECK: cost of 1 {{.*}} zext
+ ;CHECK-AVX2: cost of 1 {{.*}} zext
+ ;CHECK-AVX: cost of 4 {{.*}} zext
%A1 = zext <16 x i8> undef to <16 x i16>
- ;CHECK: cost of 1 {{.*}} sext
+ ;CHECK-AVX2: cost of 1 {{.*}} sext
+ ;CHECK-AVX: cost of 4 {{.*}} sext
%A2 = sext <16 x i8> undef to <16 x i16>
- ;CHECK: cost of 1 {{.*}} sext
+ ;CHECK-AVX2: cost of 1 {{.*}} sext
+ ;CHECK-AVX: cost of 4 {{.*}} sext
%A = sext <8 x i16> undef to <8 x i32>
- ;CHECK: cost of 1 {{.*}} zext
+ ;CHECK-AVX2: cost of 1 {{.*}} zext
+ ;CHECK-AVX: cost of 4 {{.*}} zext
%B = zext <8 x i16> undef to <8 x i32>
- ;CHECK: cost of 1 {{.*}} sext
+ ;CHECK-AVX2: cost of 1 {{.*}} sext
+ ;CHECK-AVX: cost of 4 {{.*}} sext
%C = sext <4 x i32> undef to <4 x i64>
- ;CHECK: cost of 6 {{.*}} sext
- %C1 = sext <4 x i8> undef to <4 x i64>
- ;CHECK: cost of 6 {{.*}} sext
- %C2 = sext <4 x i16> undef to <4 x i64>
- ;CHECK: cost of 1 {{.*}} zext
+ ;CHECK-AVX2: cost of 3 {{.*}} zext
+ ;CHECK-AVX: cost of 4 {{.*}} zext
+ %C.v8i8.z = zext <8 x i8> undef to <8 x i32>
+ ;CHECK-AVX2: cost of 3 {{.*}} sext
+ ;CHECK-AVX: cost of 7 {{.*}} sext
+ %C.v8i8.s = sext <8 x i8> undef to <8 x i32>
+ ;CHECK-AVX2: cost of 3 {{.*}} zext
+ ;CHECK-AVX: cost of 3 {{.*}} zext
+ %C.v4i16.z = zext <4 x i16> undef to <4 x i64>
+ ;CHECK-AVX2: cost of 3 {{.*}} sext
+ ;CHECK-AVX: cost of 6 {{.*}} sext
+ %C.v4i16.s = sext <4 x i16> undef to <4 x i64>
+
+ ;CHECK-AVX2: cost of 3 {{.*}} zext
+ ;CHECK-AVX: cost of 4 {{.*}} zext
+ %C.v4i8.z = zext <4 x i8> undef to <4 x i64>
+ ;CHECK-AVX2: cost of 3 {{.*}} sext
+ ;CHECK-AVX: cost of 6 {{.*}} sext
+ %C.v4i8.s = sext <4 x i8> undef to <4 x i64>
+
+ ;CHECK-AVX2: cost of 1 {{.*}} zext
+ ;CHECK-AVX: cost of 4 {{.*}} zext
%D = zext <4 x i32> undef to <4 x i64>
- ;CHECK: cost of 1 {{.*}} trunc
+ ;CHECK-AVX2: cost of 2 {{.*}} trunc
+ ;CHECK-AVX: cost of 4 {{.*}} trunc
%E = trunc <4 x i64> undef to <4 x i32>
- ;CHECK: cost of 1 {{.*}} trunc
+ ;CHECK-AVX2: cost of 2 {{.*}} trunc
+ ;CHECK-AVX: cost of 5 {{.*}} trunc
%F = trunc <8 x i32> undef to <8 x i16>
- ;CHECK: cost of 2 {{.*}} trunc
+ ;CHECK-AVX2: cost of 4 {{.*}} trunc
+ ;CHECK-AVX: cost of 4 {{.*}} trunc
%F1 = trunc <16 x i16> undef to <16 x i8>
-
- ;CHECK: cost of 3 {{.*}} trunc
+ ;CHECK-AVX2: cost of 2 {{.*}} trunc
+ ;CHECK-AVX: cost of 4 {{.*}} trunc
+ %F2 = trunc <8 x i32> undef to <8 x i8>
+ ;CHECK-AVX2: cost of 2 {{.*}} trunc
+ ;CHECK-AVX: cost of 4 {{.*}} trunc
+ %F3 = trunc <4 x i64> undef to <4 x i8>
+
+ ;CHECK-AVX2: cost of 4 {{.*}} trunc
+ ;CHECK-AVX: cost of 9 {{.*}} trunc
%G = trunc <8 x i64> undef to <8 x i32>
ret i32 undef
}
define i32 @masks8(<8 x i1> %in) {
- ;CHECK: cost of 6 {{.*}} zext
+; CHECK-AVX2-LABEL: for function 'masks8'
+; CHECK-AVX-LABEL: for function 'masks8'
+
+ ;CHECK-AVX2: cost of 3 {{.*}} zext
+ ;CHECK-AVX: cost of 4 {{.*}} zext
%Z = zext <8 x i1> %in to <8 x i32>
- ;CHECK: cost of 9 {{.*}} sext
+ ;CHECK-AVX2: cost of 3 {{.*}} sext
+ ;CHECK-AVX: cost of 7 {{.*}} sext
%S = sext <8 x i1> %in to <8 x i32>
ret i32 undef
}
define i32 @masks4(<4 x i1> %in) {
- ;CHECK: cost of 8 {{.*}} sext
+; CHECK-AVX2-LABEL: for function 'masks4'
+; CHECK-AVX-LABEL: for function 'masks4'
+
+ ;CHECK-AVX2: cost of 3 {{.*}} zext
+ ;CHECK-AVX: cost of 4 {{.*}} zext
+ %Z = zext <4 x i1> %in to <4 x i64>
+ ;CHECK-AVX2: cost of 3 {{.*}} sext
+ ;CHECK-AVX: cost of 6 {{.*}} sext
%S = sext <4 x i1> %in to <4 x i64>
ret i32 undef
}
define void @sitofp4(<4 x i1> %a, <4 x i8> %b, <4 x i16> %c, <4 x i32> %d) {
+; CHECK-LABEL: for function 'sitofp4'
; CHECK: cost of 3 {{.*}} sitofp
%A1 = sitofp <4 x i1> %a to <4 x float>
; CHECK: cost of 3 {{.*}} sitofp
@@ -107,6 +157,7 @@ define void @sitofp4(<4 x i1> %a, <4 x i8> %b, <4 x i16> %c, <4 x i32> %d) {
}
define void @sitofp8(<8 x i1> %a, <8 x i8> %b, <8 x i16> %c, <8 x i32> %d) {
+; CHECK-LABEL: for function 'sitofp8'
; CHECK: cost of 8 {{.*}} sitofp
%A1 = sitofp <8 x i1> %a to <8 x float>
@@ -122,6 +173,7 @@ define void @sitofp8(<8 x i1> %a, <8 x i8> %b, <8 x i16> %c, <8 x i32> %d) {
}
define void @uitofp4(<4 x i1> %a, <4 x i8> %b, <4 x i16> %c, <4 x i32> %d) {
+; CHECK-LABEL: for function 'uitofp4'
; CHECK: cost of 7 {{.*}} uitofp
%A1 = uitofp <4 x i1> %a to <4 x float>
; CHECK: cost of 7 {{.*}} uitofp
@@ -145,6 +197,7 @@ define void @uitofp4(<4 x i1> %a, <4 x i8> %b, <4 x i16> %c, <4 x i32> %d) {
}
define void @uitofp8(<8 x i1> %a, <8 x i8> %b, <8 x i16> %c, <8 x i32> %d) {
+; CHECK-LABEL: for function 'uitofp8'
; CHECK: cost of 6 {{.*}} uitofp
%A1 = uitofp <8 x i1> %a to <8 x float>
diff --git a/test/Analysis/CostModel/X86/cmp.ll b/test/Analysis/CostModel/X86/cmp.ll
index 713b3742e920..9f2bdb3c21b7 100644
--- a/test/Analysis/CostModel/X86/cmp.ll
+++ b/test/Analysis/CostModel/X86/cmp.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck --check-prefix=AVX1 %s
-; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core-avx2 | FileCheck --check-prefix=AVX2 %s
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck -check-prefix=CHECK -check-prefix=AVX1 %s
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core-avx2 | FileCheck -check-prefix=CHECK -check-prefix=AVX2 %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Analysis/CostModel/X86/intrinsic-cost.ll b/test/Analysis/CostModel/X86/intrinsic-cost.ll
index 8eeee8124d9a..3b27b52f7654 100644
--- a/test/Analysis/CostModel/X86/intrinsic-cost.ll
+++ b/test/Analysis/CostModel/X86/intrinsic-cost.ll
@@ -58,3 +58,31 @@ for.end: ; preds = %vector.body
}
declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) nounwind readnone
+
+define void @test3(float* nocapture %f, <4 x float> %b, <4 x float> %c) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds float* %f, i64 %index
+ %1 = bitcast float* %0 to <4 x float>*
+ %wide.load = load <4 x float>* %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c)
+ store <4 x float> %2, <4 x float>* %1, align 4
+ %index.next = add i64 %index, 4
+ %3 = icmp eq i64 %index.next, 1024
+ br i1 %3, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; CORE2: Printing analysis 'Cost Model Analysis' for function 'test3':
+; CORE2: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c)
+
+; COREI7: Printing analysis 'Cost Model Analysis' for function 'test3':
+; COREI7: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c)
+
+}
+
+declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
diff --git a/test/Analysis/CostModel/X86/lit.local.cfg b/test/Analysis/CostModel/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Analysis/CostModel/X86/lit.local.cfg
+++ b/test/Analysis/CostModel/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Analysis/CostModel/X86/scalarize.ll b/test/Analysis/CostModel/X86/scalarize.ll
new file mode 100644
index 000000000000..fc25fcbc563f
--- /dev/null
+++ b/test/Analysis/CostModel/X86/scalarize.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=i386 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK32
+; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK64
+
+; Test vector scalarization costs.
+; RUN: llc < %s -march=x86 -mcpu=i386
+; RUN: llc < %s -march=x86 -mcpu=yonah
+
+%i4 = type <4 x i32>
+%i8 = type <2 x i64>
+
+;;; TEST HANDLING OF VARIOUS VECTOR SIZES
+
+declare %i4 @llvm.bswap.v4i32(%i4)
+declare %i8 @llvm.bswap.v2i64(%i8)
+
+declare %i4 @llvm.ctpop.v4i32(%i4)
+declare %i8 @llvm.ctpop.v2i64(%i8)
+
+; CHECK32-LABEL: test_scalarized_intrinsics
+; CHECK64-LABEL: test_scalarized_intrinsics
+define void @test_scalarized_intrinsics() {
+ %r1 = add %i8 undef, undef
+
+; CHECK32: cost of 12 {{.*}}bswap.v4i32
+; CHECK64: cost of 12 {{.*}}bswap.v4i32
+ %r2 = call %i4 @llvm.bswap.v4i32(%i4 undef)
+; CHECK32: cost of 10 {{.*}}bswap.v2i64
+; CHECK64: cost of 6 {{.*}}bswap.v2i64
+ %r3 = call %i8 @llvm.bswap.v2i64(%i8 undef)
+
+; CHECK32: cost of 12 {{.*}}ctpop.v4i32
+; CHECK64: cost of 12 {{.*}}ctpop.v4i32
+ %r4 = call %i4 @llvm.ctpop.v4i32(%i4 undef)
+; CHECK32: cost of 10 {{.*}}ctpop.v2i64
+; CHECK64: cost of 6 {{.*}}ctpop.v2i64
+ %r5 = call %i8 @llvm.ctpop.v2i64(%i8 undef)
+
+; CHECK32: ret
+; CHECK64: ret
+ ret void
+}
diff --git a/test/Analysis/CostModel/X86/vdiv-cost.ll b/test/Analysis/CostModel/X86/vdiv-cost.ll
new file mode 100644
index 000000000000..c8e4557cbefd
--- /dev/null
+++ b/test/Analysis/CostModel/X86/vdiv-cost.ll
@@ -0,0 +1,92 @@
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+sse2,-sse4.1 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE2
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+define <4 x i32> @test1(<4 x i32> %a) {
+ %div = udiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test1':
+; SSE2: Found an estimated cost of 15 for instruction: %div
+; AVX2: Found an estimated cost of 15 for instruction: %div
+}
+
+define <8 x i32> @test2(<8 x i32> %a) {
+ %div = udiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test2':
+; SSE2: Found an estimated cost of 30 for instruction: %div
+; AVX2: Found an estimated cost of 15 for instruction: %div
+}
+
+define <8 x i16> @test3(<8 x i16> %a) {
+ %div = udiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test3':
+; SSE2: Found an estimated cost of 6 for instruction: %div
+; AVX2: Found an estimated cost of 6 for instruction: %div
+}
+
+define <16 x i16> @test4(<16 x i16> %a) {
+ %div = udiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7>
+ ret <16 x i16> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test4':
+; SSE2: Found an estimated cost of 12 for instruction: %div
+; AVX2: Found an estimated cost of 6 for instruction: %div
+}
+
+define <8 x i16> @test5(<8 x i16> %a) {
+ %div = sdiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test5':
+; SSE2: Found an estimated cost of 6 for instruction: %div
+; AVX2: Found an estimated cost of 6 for instruction: %div
+}
+
+define <16 x i16> @test6(<16 x i16> %a) {
+ %div = sdiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7>
+ ret <16 x i16> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test6':
+; SSE2: Found an estimated cost of 12 for instruction: %div
+; AVX2: Found an estimated cost of 6 for instruction: %div
+}
+
+define <16 x i8> @test7(<16 x i8> %a) {
+ %div = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test7':
+; SSE2: Found an estimated cost of 320 for instruction: %div
+; AVX2: Found an estimated cost of 320 for instruction: %div
+}
+
+define <4 x i32> @test8(<4 x i32> %a) {
+ %div = sdiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test8':
+; SSE2: Found an estimated cost of 19 for instruction: %div
+; AVX2: Found an estimated cost of 15 for instruction: %div
+}
+
+define <8 x i32> @test9(<8 x i32> %a) {
+ %div = sdiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test9':
+; SSE2: Found an estimated cost of 38 for instruction: %div
+; AVX2: Found an estimated cost of 15 for instruction: %div
+}
+
+define <8 x i32> @test10(<8 x i32> %a) {
+ %div = sdiv <8 x i32> %a, <i32 8, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %div
+
+; CHECK: 'Cost Model Analysis' for function 'test10':
+; SSE2: Found an estimated cost of 160 for instruction: %div
+; AVX2: Found an estimated cost of 160 for instruction: %div
+}
diff --git a/test/Analysis/CostModel/X86/vselect-cost.ll b/test/Analysis/CostModel/X86/vselect-cost.ll
new file mode 100644
index 000000000000..2416777506df
--- /dev/null
+++ b/test/Analysis/CostModel/X86/vselect-cost.ll
@@ -0,0 +1,126 @@
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+sse2,-sse4.1 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE2
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE41
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+
+; Verify the cost of vector select instructions.
+
+; SSE41 added blend instructions with an immediate for <2 x double> and
+; <4 x float>. Integers of the same size should also use those instructions.
+
+define <2 x i64> @test_2i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_2i64':
+; SSE2: Cost Model: {{.*}} 4 for instruction: %sel = select <2 x i1>
+; SSE41: Cost Model: {{.*}} 1 for instruction: %sel = select <2 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <2 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <2 x i1>
+ %sel = select <2 x i1> <i1 true, i1 false>, <2 x i64> %a, <2 x i64> %b
+ ret <2 x i64> %sel
+}
+
+define <2 x double> @test_2double(<2 x double> %a, <2 x double> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_2double':
+; SSE2: Cost Model: {{.*}} 3 for instruction: %sel = select <2 x i1>
+; SSE41: Cost Model: {{.*}} 1 for instruction: %sel = select <2 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <2 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <2 x i1>
+ %sel = select <2 x i1> <i1 true, i1 false>, <2 x double> %a, <2 x double> %b
+ ret <2 x double> %sel
+}
+
+define <4 x i32> @test_4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_4i32':
+; SSE2: Cost Model: {{.*}} 8 for instruction: %sel = select <4 x i1>
+; SSE41: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+ %sel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> %a, <4 x i32> %b
+ ret <4 x i32> %sel
+}
+
+define <4 x float> @test_4float(<4 x float> %a, <4 x float> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_4float':
+; SSE2: Cost Model: {{.*}} 7 for instruction: %sel = select <4 x i1>
+; SSE41: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+ %sel = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %sel
+}
+
+define <16 x i8> @test_16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_16i8':
+; SSE2: Cost Model: {{.*}} 32 for instruction: %sel = select <16 x i1>
+; SSE41: Cost Model: {{.*}} 1 for instruction: %sel = select <16 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <16 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <16 x i1>
+ %sel = select <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true>, <16 x i8> %a, <16 x i8> %b
+ ret <16 x i8> %sel
+}
+
+; AVX added blend instructions with an immediate for <4 x double> and
+; <8 x float>. Integers of the same size should also use those instructions.
+define <4 x i64> @test_4i64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_4i64':
+; SSE2: Cost Model: {{.*}} 8 for instruction: %sel = select <4 x i1>
+; SSE41: Cost Model: {{.*}} 2 for instruction: %sel = select <4 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+ %sel = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i64> %a, <4 x i64> %b
+ ret <4 x i64> %sel
+}
+
+define <4 x double> @test_4double(<4 x double> %a, <4 x double> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_4double':
+; SSE2: Cost Model: {{.*}} 6 for instruction: %sel = select <4 x i1>
+; SSE41: Cost Model: {{.*}} 2 for instruction: %sel = select <4 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <4 x i1>
+ %sel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %a, <4 x double> %b
+ ret <4 x double> %sel
+}
+
+define <8 x i32> @test_8i32(<8 x i32> %a, <8 x i32> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_8i32':
+; SSE2: Cost Model: {{.*}} 16 for instruction: %sel = select <8 x i1>
+; SSE41: Cost Model: {{.*}} 2 for instruction: %sel = select <8 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <8 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <8 x i1>
+ %sel = select <8 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 false>, <8 x i32> %a, <8 x i32> %b
+ ret <8 x i32> %sel
+}
+
+define <8 x float> @test_8float(<8 x float> %a, <8 x float> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_8float':
+; SSE2: Cost Model: {{.*}} 14 for instruction: %sel = select <8 x i1>
+; SSE41: Cost Model: {{.*}} 2 for instruction: %sel = select <8 x i1>
+; AVX: Cost Model: {{.*}} 1 for instruction: %sel = select <8 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <8 x i1>
+ %sel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x float> %a, <8 x float> %b
+ ret <8 x float> %sel
+}
+
+; AVX2
+define <16 x i16> @test_16i16(<16 x i16> %a, <16 x i16> %b) {
+; CHECK:Printing analysis 'Cost Model Analysis' for function 'test_16i16':
+; SSE2: Cost Model: {{.*}} 32 for instruction: %sel = select <16 x i1>
+; SSE41: Cost Model: {{.*}} 2 for instruction: %sel = select <16 x i1>
+;;; FIXME: This AVX cost is obviously wrong. We shouldn't be scalarizing.
+; AVX: Cost Model: {{.*}} 32 for instruction: %sel = select <16 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <16 x i1>
+ %sel = select <16 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <16 x i16> %a, <16 x i16> %b
+ ret <16 x i16> %sel
+}
+
+define <32 x i8> @test_32i8(<32 x i8> %a, <32 x i8> %b) {
+; CHECK: Printing analysis 'Cost Model Analysis' for function 'test_32i8':
+; SSE2: Cost Model: {{.*}} 64 for instruction: %sel = select <32 x i1>
+; SSE41: Cost Model: {{.*}} 2 for instruction: %sel = select <32 x i1>
+;;; FIXME: This AVX cost is obviously wrong. We shouldn't be scalarizing.
+; AVX: Cost Model: {{.*}} 64 for instruction: %sel = select <32 x i1>
+; AVX2: Cost Model: {{.*}} 1 for instruction: %sel = select <32 x i1>
+ %sel = select <32 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true>, <32 x i8> %a, <32 x i8> %b
+ ret <32 x i8> %sel
+}
+
diff --git a/test/Analysis/CostModel/X86/vshift-cost.ll b/test/Analysis/CostModel/X86/vshift-cost.ll
new file mode 100644
index 000000000000..84d72463ac0d
--- /dev/null
+++ b/test/Analysis/CostModel/X86/vshift-cost.ll
@@ -0,0 +1,167 @@
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+sse2,-sse4.1 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE2
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE41
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+
+; Verify the cost of vector shift left instructions.
+
+; We always emit a single pmullw in the case of v8i16 vector shifts by
+; non-uniform constant.
+
+define <8 x i16> @test1(<8 x i16> %a) {
+ %shl = shl <8 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
+ ret <8 x i16> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test1':
+; CHECK: Found an estimated cost of 1 for instruction: %shl
+
+
+define <8 x i16> @test2(<8 x i16> %a) {
+ %shl = shl <8 x i16> %a, <i16 0, i16 undef, i16 0, i16 0, i16 1, i16 undef, i16 -1, i16 1>
+ ret <8 x i16> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test2':
+; CHECK: Found an estimated cost of 1 for instruction: %shl
+
+
+; With SSE4.1, v4i32 shifts can be lowered into a single pmulld instruction.
+; Make sure that the estimated cost is always 1 except for the case where
+; we only have SSE2 support. With SSE2, we are forced to special lower the
+; v4i32 mul as a 2x shuffle, 2x pmuludq, 2x shuffle.
+
+define <4 x i32> @test3(<4 x i32> %a) {
+ %shl = shl <4 x i32> %a, <i32 1, i32 -1, i32 2, i32 -3>
+ ret <4 x i32> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test3':
+; SSE2: Found an estimated cost of 6 for instruction: %shl
+; SSE41: Found an estimated cost of 1 for instruction: %shl
+; AVX: Found an estimated cost of 1 for instruction: %shl
+; AVX2: Found an estimated cost of 1 for instruction: %shl
+
+
+define <4 x i32> @test4(<4 x i32> %a) {
+ %shl = shl <4 x i32> %a, <i32 0, i32 0, i32 1, i32 1>
+ ret <4 x i32> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test4':
+; SSE2: Found an estimated cost of 6 for instruction: %shl
+; SSE41: Found an estimated cost of 1 for instruction: %shl
+; AVX: Found an estimated cost of 1 for instruction: %shl
+; AVX2: Found an estimated cost of 1 for instruction: %shl
+
+
+; On AVX2 we are able to lower the following shift into a single
+; vpsllvq. Therefore, the expected cost is only 1.
+; In all other cases, this shift is scalarized as the target does not support
+; vpsllv instructions.
+
+define <2 x i64> @test5(<2 x i64> %a) {
+ %shl = shl <2 x i64> %a, <i64 2, i64 3>
+ ret <2 x i64> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test5':
+; SSE2: Found an estimated cost of 20 for instruction: %shl
+; SSE41: Found an estimated cost of 20 for instruction: %shl
+; AVX: Found an estimated cost of 20 for instruction: %shl
+; AVX2: Found an estimated cost of 1 for instruction: %shl
+
+
+; v16i16 and v8i32 shift left by non-uniform constant are lowered into
+; vector multiply instructions. With AVX (but not AVX2), the vector multiply
+; is lowered into a sequence of: 1 extract + 2 vpmullw + 1 insert.
+;
+; With AVX2, instruction vpmullw works with 256bit quantities and
+; therefore there is no need to split the resulting vector multiply into
+; a sequence of two multiply.
+;
+; With SSE2 and SSE4.1, the vector shift cost for 'test6' is twice
+; the cost computed in the case of 'test1'. That is because the backend
+; simply emits 2 pmullw with no extract/insert.
+
+
+define <16 x i16> @test6(<16 x i16> %a) {
+ %shl = shl <16 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
+ ret <16 x i16> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test6':
+; SSE2: Found an estimated cost of 2 for instruction: %shl
+; SSE41: Found an estimated cost of 2 for instruction: %shl
+; AVX: Found an estimated cost of 4 for instruction: %shl
+; AVX2: Found an estimated cost of 1 for instruction: %shl
+
+
+; With SSE2 and SSE4.1, the vector shift cost for 'test7' is twice
+; the cost computed in the case of 'test3'. That is because the multiply
+; is type-legalized into two 4i32 vector multiply.
+
+define <8 x i32> @test7(<8 x i32> %a) {
+ %shl = shl <8 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
+ ret <8 x i32> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test7':
+; SSE2: Found an estimated cost of 12 for instruction: %shl
+; SSE41: Found an estimated cost of 2 for instruction: %shl
+; AVX: Found an estimated cost of 4 for instruction: %shl
+; AVX2: Found an estimated cost of 1 for instruction: %shl
+
+
+; On AVX2 we are able to lower the following shift into a single
+; vpsllvq. Therefore, the expected cost is only 1.
+; In all other cases, this shift is scalarized as the target does not support
+; vpsllv instructions.
+
+define <4 x i64> @test8(<4 x i64> %a) {
+ %shl = shl <4 x i64> %a, <i64 1, i64 2, i64 3, i64 4>
+ ret <4 x i64> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test8':
+; SSE2: Found an estimated cost of 40 for instruction: %shl
+; SSE41: Found an estimated cost of 40 for instruction: %shl
+; AVX: Found an estimated cost of 40 for instruction: %shl
+; AVX2: Found an estimated cost of 1 for instruction: %shl
+
+
+; Same as 'test6', with the difference that the cost is double.
+
+define <32 x i16> @test9(<32 x i16> %a) {
+ %shl = shl <32 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
+ ret <32 x i16> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test9':
+; SSE2: Found an estimated cost of 4 for instruction: %shl
+; SSE41: Found an estimated cost of 4 for instruction: %shl
+; AVX: Found an estimated cost of 8 for instruction: %shl
+; AVX2: Found an estimated cost of 2 for instruction: %shl
+
+
+; Same as 'test7', except that now the cost is double.
+
+define <16 x i32> @test10(<16 x i32> %a) {
+ %shl = shl <16 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
+ ret <16 x i32> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test10':
+; SSE2: Found an estimated cost of 24 for instruction: %shl
+; SSE41: Found an estimated cost of 4 for instruction: %shl
+; AVX: Found an estimated cost of 8 for instruction: %shl
+; AVX2: Found an estimated cost of 2 for instruction: %shl
+
+
+; On AVX2 we are able to lower the following shift into a sequence of
+; two vpsllvq instructions. Therefore, the expected cost is only 2.
+; In all other cases, this shift is scalarized as we don't have vpsllv
+; instructions.
+
+define <8 x i64> @test11(<8 x i64> %a) {
+ %shl = shl <8 x i64> %a, <i64 1, i64 1, i64 2, i64 3, i64 1, i64 1, i64 2, i64 3>
+ ret <8 x i64> %shl
+}
+; CHECK: 'Cost Model Analysis' for function 'test11':
+; SSE2: Found an estimated cost of 80 for instruction: %shl
+; SSE41: Found an estimated cost of 80 for instruction: %shl
+; AVX: Found an estimated cost of 80 for instruction: %shl
+; AVX2: Found an estimated cost of 2 for instruction: %shl
+
+
diff --git a/test/Analysis/Delinearization/a.ll b/test/Analysis/Delinearization/a.ll
index 9308749b2792..efebcc42ea27 100644
--- a/test/Analysis/Delinearization/a.ll
+++ b/test/Analysis/Delinearization/a.ll
@@ -12,17 +12,6 @@
; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(i32) bytes.
; CHECK: ArrayRef[{3,+,2}<%for.i>][{-4,+,3}<%for.j>][{7,+,5}<%for.k>]
-; AddRec: {{(8 + ((4 + (12 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(12 * %o)}<%for.j>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%o] with elements of sizeof(i32) bytes.
-; CHECK: ArrayRef[{(1 + (3 * %m)),+,(2 * %m)}<%for.i>][{2,+,(3 * %o)}<%for.j>]
-
-; AddRec: {(8 + ((-8 + (24 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize] with elements of 2 bytes.
-; CHECK: ArrayRef[{((1 + ((-1 + (3 * %m)) * %o)) * sizeof(i32)),+,(%m * %o * sizeof(i32))}<%for.i>]
-
-; Function Attrs: nounwind uwtable
define void @foo(i64 %n, i64 %m, i64 %o, i32* nocapture %A) #0 {
entry:
%cmp32 = icmp sgt i64 %n, 0
diff --git a/test/Analysis/Delinearization/gcd_multiply_expr.ll b/test/Analysis/Delinearization/gcd_multiply_expr.ll
new file mode 100644
index 000000000000..f962f6db7f2b
--- /dev/null
+++ b/test/Analysis/Delinearization/gcd_multiply_expr.ll
@@ -0,0 +1,153 @@
+; RUN: opt < %s -basicaa -da -analyze -delinearize
+;
+; a, b, c, d, g, h;
+; char *f;
+; static fn1(p1) {
+; char *e = p1;
+; for (; d;) {
+; a = 0;
+; for (;; ++a)
+; for (; b; ++b)
+; c = e[b + a];
+; }
+; }
+;
+; fn2() {
+; for (;;)
+; fn1(&f[g * h]);
+; }
+
+@g = common global i32 0, align 4
+@h = common global i32 0, align 4
+@f = common global i8* null, align 4
+@a = common global i32 0, align 4
+@b = common global i32 0, align 4
+@c = common global i32 0, align 4
+@d = common global i32 0, align 4
+
+define i32 @fn2() {
+entry:
+ %.pr = load i32* @d, align 4
+ %phitmp = icmp eq i32 %.pr, 0
+ br label %for.cond
+
+for.cond:
+ %0 = phi i1 [ true, %for.cond ], [ %phitmp, %entry ]
+ br i1 %0, label %for.cond, label %for.cond2thread-pre-split.preheader.i
+
+for.cond2thread-pre-split.preheader.i:
+ %1 = load i32* @g, align 4
+ %2 = load i32* @h, align 4
+ %mul = mul nsw i32 %2, %1
+ %3 = load i8** @f, align 4
+ %.pr.pre.i = load i32* @b, align 4
+ br label %for.cond2thread-pre-split.i
+
+for.cond2thread-pre-split.i:
+ %.pr.i = phi i32 [ 0, %for.inc5.i ], [ %.pr.pre.i, %for.cond2thread-pre-split.preheader.i ]
+ %storemerge.i = phi i32 [ %inc6.i, %for.inc5.i ], [ 0, %for.cond2thread-pre-split.preheader.i ]
+ store i32 %storemerge.i, i32* @a, align 4
+ %tobool31.i = icmp eq i32 %.pr.i, 0
+ br i1 %tobool31.i, label %for.inc5.i, label %for.body4.preheader.i
+
+for.body4.preheader.i:
+ %4 = icmp slt i32 %.pr.i, -7
+ %add.i = add i32 %storemerge.i, %mul
+ br i1 %4, label %for.body4.i.preheader, label %for.body4.ur.i.preheader
+
+for.body4.i.preheader:
+ %5 = sub i32 -8, %.pr.i
+ %6 = lshr i32 %5, 3
+ %7 = mul i32 %6, 8
+ br label %for.body4.i
+
+for.body4.i:
+ %8 = phi i32 [ %inc.7.i, %for.body4.i ], [ %.pr.i, %for.body4.i.preheader ]
+ %arrayidx.sum1 = add i32 %add.i, %8
+ %arrayidx.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum1
+ %9 = load i8* %arrayidx.i, align 1
+ %conv.i = sext i8 %9 to i32
+ store i32 %conv.i, i32* @c, align 4
+ %inc.i = add nsw i32 %8, 1
+ store i32 %inc.i, i32* @b, align 4
+ %arrayidx.sum2 = add i32 %add.i, %inc.i
+ %arrayidx.1.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum2
+ %10 = load i8* %arrayidx.1.i, align 1
+ %conv.1.i = sext i8 %10 to i32
+ store i32 %conv.1.i, i32* @c, align 4
+ %inc.1.i = add nsw i32 %8, 2
+ store i32 %inc.1.i, i32* @b, align 4
+ %arrayidx.sum3 = add i32 %add.i, %inc.1.i
+ %arrayidx.2.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum3
+ %11 = load i8* %arrayidx.2.i, align 1
+ %conv.2.i = sext i8 %11 to i32
+ store i32 %conv.2.i, i32* @c, align 4
+ %inc.2.i = add nsw i32 %8, 3
+ store i32 %inc.2.i, i32* @b, align 4
+ %arrayidx.sum4 = add i32 %add.i, %inc.2.i
+ %arrayidx.3.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum4
+ %12 = load i8* %arrayidx.3.i, align 1
+ %conv.3.i = sext i8 %12 to i32
+ store i32 %conv.3.i, i32* @c, align 4
+ %inc.3.i = add nsw i32 %8, 4
+ store i32 %inc.3.i, i32* @b, align 4
+ %arrayidx.sum5 = add i32 %add.i, %inc.3.i
+ %arrayidx.4.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum5
+ %13 = load i8* %arrayidx.4.i, align 1
+ %conv.4.i = sext i8 %13 to i32
+ store i32 %conv.4.i, i32* @c, align 4
+ %inc.4.i = add nsw i32 %8, 5
+ store i32 %inc.4.i, i32* @b, align 4
+ %arrayidx.sum6 = add i32 %add.i, %inc.4.i
+ %arrayidx.5.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum6
+ %14 = load i8* %arrayidx.5.i, align 1
+ %conv.5.i = sext i8 %14 to i32
+ store i32 %conv.5.i, i32* @c, align 4
+ %inc.5.i = add nsw i32 %8, 6
+ store i32 %inc.5.i, i32* @b, align 4
+ %arrayidx.sum7 = add i32 %add.i, %inc.5.i
+ %arrayidx.6.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum7
+ %15 = load i8* %arrayidx.6.i, align 1
+ %conv.6.i = sext i8 %15 to i32
+ store i32 %conv.6.i, i32* @c, align 4
+ %inc.6.i = add nsw i32 %8, 7
+ store i32 %inc.6.i, i32* @b, align 4
+ %arrayidx.sum8 = add i32 %add.i, %inc.6.i
+ %arrayidx.7.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum8
+ %16 = load i8* %arrayidx.7.i, align 1
+ %conv.7.i = sext i8 %16 to i32
+ store i32 %conv.7.i, i32* @c, align 4
+ %inc.7.i = add nsw i32 %8, 8
+ store i32 %inc.7.i, i32* @b, align 4
+ %tobool3.7.i = icmp sgt i32 %inc.7.i, -8
+ br i1 %tobool3.7.i, label %for.inc5.loopexit.ur-lcssa.i, label %for.body4.i
+
+for.inc5.loopexit.ur-lcssa.i:
+ %17 = add i32 %.pr.i, 8
+ %18 = add i32 %17, %7
+ %19 = icmp eq i32 %18, 0
+ br i1 %19, label %for.inc5.i, label %for.body4.ur.i.preheader
+
+for.body4.ur.i.preheader:
+ %.ph = phi i32 [ %18, %for.inc5.loopexit.ur-lcssa.i ], [ %.pr.i, %for.body4.preheader.i ]
+ br label %for.body4.ur.i
+
+for.body4.ur.i:
+ %20 = phi i32 [ %inc.ur.i, %for.body4.ur.i ], [ %.ph, %for.body4.ur.i.preheader ]
+ %arrayidx.sum = add i32 %add.i, %20
+ %arrayidx.ur.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum
+ %21 = load i8* %arrayidx.ur.i, align 1
+ %conv.ur.i = sext i8 %21 to i32
+ store i32 %conv.ur.i, i32* @c, align 4
+ %inc.ur.i = add nsw i32 %20, 1
+ store i32 %inc.ur.i, i32* @b, align 4
+ %tobool3.ur.i = icmp eq i32 %inc.ur.i, 0
+ br i1 %tobool3.ur.i, label %for.inc5.i.loopexit, label %for.body4.ur.i
+
+for.inc5.i.loopexit:
+ br label %for.inc5.i
+
+for.inc5.i:
+ %inc6.i = add nsw i32 %storemerge.i, 1
+ br label %for.cond2thread-pre-split.i
+}
diff --git a/test/Analysis/Delinearization/himeno_1.ll b/test/Analysis/Delinearization/himeno_1.ll
index 9458bd2e5261..c94ca7aff759 100644
--- a/test/Analysis/Delinearization/himeno_1.ll
+++ b/test/Analysis/Delinearization/himeno_1.ll
@@ -31,16 +31,6 @@
; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
; CHECK: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
-; AddRec: {{(-4 + (4 * (sext i32 (-1 + %p.deps) to i64)) + (4 * (sext i32 %a.deps to i64) * (1 + (sext i32 %a.cols to i64))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>,+,(4 * (sext i32 %a.deps to i64))}<%for.j>
-; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
-; CHECK: ArrayRef[{(1 + (sext i32 %a.cols to i64)),+,(sext i32 %a.cols to i64)}<%for.i>][{(-1 + (sext i32 (-1 + %p.deps) to i64)),+,(sext i32 %a.deps to i64)}<%for.j>]
-
-; AddRec: {(-4 + (4 * (sext i32 (-1 + %p.deps) to i64)) + ((sext i32 %a.deps to i64) * (-4 + (4 * (sext i32 (-1 + %p.cols) to i64)) + (4 * (sext i32 %a.cols to i64)))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>
-; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize] with elements of sizeof(float) bytes.
-; CHECK: ArrayRef[{(-1 + (sext i32 (-1 + %p.deps) to i64) + ((sext i32 %a.deps to i64) * (-1 + (sext i32 (-1 + %p.cols) to i64) + (sext i32 %a.cols to i64)))),+,((sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>]
-
%struct.Mat = type { float*, i32, i32, i32, i32 }
define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable {
diff --git a/test/Analysis/Delinearization/himeno_2.ll b/test/Analysis/Delinearization/himeno_2.ll
index a29006606fab..c256384f201e 100644
--- a/test/Analysis/Delinearization/himeno_2.ll
+++ b/test/Analysis/Delinearization/himeno_2.ll
@@ -31,16 +31,6 @@
; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
; CHECK: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
-; AddRec: {{(-4 + (4 * (sext i32 (-1 + %p.deps) to i64)) + (4 * (sext i32 %a.deps to i64) * (1 + (sext i32 %a.cols to i64))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>,+,(4 * (sext i32 %a.deps to i64))}<%for.j>
-; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
-; CHECK: ArrayRef[{(1 + (sext i32 %a.cols to i64)),+,(sext i32 %a.cols to i64)}<%for.i>][{(-1 + (sext i32 (-1 + %p.deps) to i64)),+,(sext i32 %a.deps to i64)}<%for.j>]
-
-; AddRec: {(-4 + (4 * (sext i32 (-1 + %p.deps) to i64)) + ((sext i32 %a.deps to i64) * (-4 + (4 * (sext i32 (-1 + %p.cols) to i64)) + (4 * (sext i32 %a.cols to i64)))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>
-; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize] with elements of sizeof(float) bytes.
-; CHECK: ArrayRef[{(-1 + (sext i32 (-1 + %p.deps) to i64) + ((sext i32 %a.deps to i64) * (-1 + (sext i32 (-1 + %p.cols) to i64) + (sext i32 %a.cols to i64)))),+,((sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>]
-
%struct.Mat = type { float*, i32, i32, i32, i32 }
define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable {
diff --git a/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll b/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
new file mode 100644
index 000000000000..01a4b96b11a0
--- /dev/null
+++ b/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
@@ -0,0 +1,45 @@
+; RUN: opt < %s -analyze -delinearize | FileCheck %s
+
+; Derived from the following code:
+;
+; void foo(long n, long m, long b, double A[n][m]) {
+; for (long i = 0; i < n; i++)
+; for (long j = 0; j < m; j++)
+; A[2i+b][2j] = 1.0;
+; }
+
+; AddRec: {{((%m * %b * sizeof(double)) + %A),+,(2 * %m * sizeof(double))}<%for.i>,+,(2 * sizeof(double))}<%for.j>
+; CHECK: Base offset: %A
+; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; CHECK: ArrayRef[{%b,+,2}<%for.i>][{0,+,2}<%for.j>]
+
+
+define void @foo(i64 %n, i64 %m, i64 %b, double* %A) {
+entry:
+ br label %for.i
+
+for.i:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %for.i.inc ]
+ %outerdim = mul nsw i64 %i, 2
+ %outerdim2 = add nsw i64 %outerdim, %b
+ %tmp = mul nsw i64 %outerdim2, %m
+ br label %for.j
+
+for.j:
+ %j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
+ %prodj = mul i64 %j, 2
+ %vlaarrayidx.sum = add i64 %prodj, %tmp
+ %arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
+ store double 1.0, double* %arrayidx
+ %j.inc = add nsw i64 %j, 1
+ %j.exitcond = icmp eq i64 %j.inc, %m
+ br i1 %j.exitcond, label %for.i.inc, label %for.j
+
+for.i.inc:
+ %i.inc = add nsw i64 %i, 1
+ %i.exitcond = icmp eq i64 %i.inc, %n
+ br i1 %i.exitcond, label %end, label %for.i
+
+end:
+ ret void
+}
diff --git a/test/Analysis/Delinearization/lit.local.cfg b/test/Analysis/Delinearization/lit.local.cfg
index 19eebc0ac7ac..c6106e4746f2 100644
--- a/test/Analysis/Delinearization/lit.local.cfg
+++ b/test/Analysis/Delinearization/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.ll', '.c', '.cpp']
+config.suffixes = ['.ll']
diff --git a/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll b/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
index 82cab167c74f..ae80ebc52271 100644
--- a/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
+++ b/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
@@ -13,16 +13,6 @@
; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
; CHECK: ArrayRef[{3,+,1}<nw><%for.i>][{-4,+,1}<nw><%for.j>][{7,+,1}<nw><%for.k>]
-; AddRec: {{(48 + ((-24 + (24 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%o] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{(-3 + (3 * %m)),+,%m}<%for.i>][{6,+,%o}<%for.j>]
-
-; AddRec: {(48 + ((-32 + (32 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{(6 + ((-4 + (4 * %m)) * %o)),+,(%m * %o)}<%for.i>]
-
define void @foo(i64 %n, i64 %m, i64 %o, double* %A) {
entry:
br label %for.i
diff --git a/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll b/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
index a1e779fff6c9..75080dad3af7 100644
--- a/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
+++ b/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
@@ -13,16 +13,6 @@
; CHECK: ArrayDecl[UnknownSize][%m][(%o + %p)] with elements of sizeof(double) bytes.
; CHECK: ArrayRef[{3,+,1}<nw><%for.cond4.preheader.lr.ph.us>][{-4,+,1}<nw><%for.body6.lr.ph.us.us>][{7,+,1}<nw><%for.body6.us.us>]
-; AddRec: {{(48 + (8 * %o) + (8 * (-4 + (3 * %m)) * (%o + %p)) + %A),+,(8 * (%o + %p) * %m)}<%for.cond4.preheader.lr.ph.us>,+,(8 * (%o + %p))}<%for.body6.lr.ph.us.us>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][(%o + %p)] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{(-4 + (3 * %m)),+,%m}<%for.cond4.preheader.lr.ph.us>][{(6 + %o),+,(%o + %p)}<%for.body6.lr.ph.us.us>]
-
-; AddRec: {(48 + (8 * %o) + ((-40 + (32 * %m)) * (%o + %p)) + %A),+,(8 * (%o + %p) * %m)}<%for.cond4.preheader.lr.ph.us>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{(6 + ((-5 + (4 * %m)) * (%o + %p)) + %o),+,((%o + %p) * %m)}<%for.cond4.preheader.lr.ph.us>]
-
define void @foo(i64 %n, i64 %m, i64 %o, i64 %p, double* nocapture %A) nounwind uwtable {
entry:
%add = add nsw i64 %p, %o
diff --git a/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll b/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
index a52a4c93ce23..e921444668d0 100644
--- a/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
+++ b/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
@@ -13,16 +13,6 @@
; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
; CHECK: ArrayRef[{%p,+,1}<nw><%for.i>][{%q,+,1}<nw><%for.j>][{%r,+,1}<nw><%for.k>]
-; AddRec: {{(-8 + (8 * ((((%m * %p) + %q) * %o) + %r)) + (8 * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%o] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{(1 + (%m * %p) + %q),+,%m}<%for.i>][{(-1 + %r),+,%o}<%for.j>]
-
-; AddRec: {(-8 + (8 * ((((%m * %p) + %q) * %o) + %r)) + (8 * %m * %o) + %A),+,(8 * %m * %o)}<%for.i>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{(-1 + ((((1 + %p) * %m) + %q) * %o) + %r),+,(%m * %o)}<%for.i>]
-
define void @foo(i64 %n, i64 %m, i64 %o, double* %A, i64 %p, i64 %q, i64 %r) {
entry:
br label %for.i
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_2d.ll b/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
index d68a15883942..5a88c4ce4eb1 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
@@ -8,16 +8,20 @@
; A[i][j] = 1.0;
; }
+; Inst: %val = load double* %arrayidx
+; In Loop with Header: for.j
+; AddRec: {{0,+,(%m * sizeof(double))}<%for.i>,+,sizeof(double)}<%for.j>
+; Base offset: %A
+; ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
+
+; Inst: store double %val, double* %arrayidx
+; In Loop with Header: for.j
; AddRec: {{%A,+,(8 * %m)}<%for.i>,+,8}<%for.j>
; CHECK: Base offset: %A
; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
-; AddRec: {(-8 + (8 * %m) + %A),+,(8 * %m)}<%for.i>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{(-1 + %m),+,%m}<%for.i>]
-
define void @foo(i64 %n, i64 %m, double* %A) {
entry:
br label %for.i
@@ -31,7 +35,8 @@ for.j:
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%vlaarrayidx.sum = add i64 %j, %tmp
%arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
- store double 1.0, double* %arrayidx
+ %val = load double* %arrayidx
+ store double %val, double* %arrayidx
%j.inc = add nsw i64 %j, 1
%j.exitcond = icmp eq i64 %j.inc, %m
br i1 %j.exitcond, label %for.i.inc, label %for.j
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll b/test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll
index 7207420205aa..810188f7d552 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_2d_nested.ll
@@ -1,4 +1,6 @@
; RUN: opt < %s -analyze -delinearize | FileCheck %s
+; XFAIL: *
+; We do not recognize anymore variable size arrays.
; extern void bar(long n, long m, double A[n][m]);
;
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_3d.ll b/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
index 24f95837c860..aad0f0940840 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
@@ -13,16 +13,6 @@
; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>][{0,+,1}<nuw><nsw><%for.k>]
-; AddRec: {{(-8 + (8 * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][(%m * %o)] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{(-1 + %o),+,%o}<%for.j>]
-
-; AddRec: {(-8 + (8 * %m * %o) + %A),+,(8 * %m * %o)}<%for.i>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize] with elements of sizeof(double) bytes.
-; CHECK: ArrayRef[{(-1 + (%m * %o)),+,(%m * %o)}<%for.i>]
-
define void @foo(i64 %n, i64 %m, i64 %o, double* %A) {
entry:
br label %for.i
diff --git a/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll b/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll
index e1516104ddfc..9e406d125f44 100644
--- a/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll
+++ b/test/Analysis/Delinearization/multidim_only_ivs_3d_cast.ll
@@ -12,16 +12,6 @@
; CHECK: ArrayDecl[UnknownSize][(zext i32 %m to i64)][(zext i32 %o to i64)] with elements of 8 bytes.
; CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
-; AddRec: {{((8 * (zext i32 (-1 + %o) to i64)) + %A),+,(8 * (zext i32 %m to i64) * (zext i32 %o to i64))}<%for.i>,+,(8 * (zext i32 %o to i64))}<%for.j>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][((zext i32 %m to i64) * (zext i32 %o to i64))] with elements of 8 bytes.
-; CHECK: ArrayRef[{0,+,1}<%for.i>][{(zext i32 (-1 + %o) to i64),+,(zext i32 %o to i64)}<%for.j>]
-
-; AddRec: {((8 * (zext i32 (-1 + %o) to i64)) + (8 * (zext i32 (-1 + %m) to i64) * (zext i32 %o to i64)) + %A),+,(8 * (zext i32 %m to i64) * (zext i32 %o to i64))}<%for.i>
-; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize] with elements of 8 bytes.
-; CHECK: ArrayRef[{((zext i32 (-1 + %o) to i64) + ((zext i32 (-1 + %m) to i64) * (zext i32 %o to i64))),+,((zext i32 %m to i64) * (zext i32 %o to i64))}<%for.i>]
-
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll b/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll
new file mode 100644
index 000000000000..6a98507340ac
--- /dev/null
+++ b/test/Analysis/Delinearization/multidim_two_accesses_different_delinearization.ll
@@ -0,0 +1,43 @@
+; RUN: opt -basicaa -da -analyze -da-delinearize < %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Derived from the following code:
+;
+; void foo(long n, long m, double *A) {
+; for (long i = 0; i < n; i++)
+; for (long j = 0; j < m; j++)
+; *(A + i * n + j) = 1.0;
+; *(A + j * m + i) = 2.0;
+; }
+
+define void @foo(i64 %n, i64 %m, double* %A) {
+entry:
+ br label %for.i
+
+for.i:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %for.i.inc ]
+ br label %for.j
+
+for.j:
+ %j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
+ %tmp = mul nsw i64 %i, %m
+ %vlaarrayidx.sum = add i64 %j, %tmp
+ %arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
+ store double 1.0, double* %arrayidx
+ %tmp1 = mul nsw i64 %j, %n
+ %vlaarrayidx.sum1 = add i64 %i, %tmp1
+ %arrayidx1 = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum1
+ store double 1.0, double* %arrayidx1
+ %j.inc = add nsw i64 %j, 1
+ %j.exitcond = icmp eq i64 %j.inc, %m
+ br i1 %j.exitcond, label %for.i.inc, label %for.j
+
+for.i.inc:
+ %i.inc = add nsw i64 %i, 1
+ %i.exitcond = icmp eq i64 %i.inc, %n
+ br i1 %i.exitcond, label %end, label %for.i
+
+end:
+ ret void
+}
diff --git a/test/Analysis/Delinearization/undef.ll b/test/Analysis/Delinearization/undef.ll
new file mode 100644
index 000000000000..8ee64e3a2f40
--- /dev/null
+++ b/test/Analysis/Delinearization/undef.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -analyze -delinearize
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @foo(double* %Ey) {
+entry:
+ br i1 undef, label %for.cond55.preheader, label %for.end324
+
+for.cond55.preheader:
+ %iz.069 = phi i64 [ %inc323, %for.inc322 ], [ 0, %entry ]
+ br i1 undef, label %for.cond58.preheader, label %for.inc322
+
+for.cond58.preheader:
+ %iy.067 = phi i64 [ %inc320, %for.end ], [ 0, %for.cond55.preheader ]
+ br i1 undef, label %for.body60, label %for.end
+
+for.body60:
+ %ix.062 = phi i64 [ %inc, %for.body60 ], [ 0, %for.cond58.preheader ]
+ %0 = mul i64 %iz.069, undef
+ %tmp5 = add i64 %iy.067, %0
+ %tmp6 = mul i64 %tmp5, undef
+ %arrayidx69.sum = add i64 undef, %tmp6
+ %arrayidx70 = getelementptr inbounds double* %Ey, i64 %arrayidx69.sum
+ %1 = load double* %arrayidx70, align 8
+ %inc = add nsw i64 %ix.062, 1
+ br i1 false, label %for.body60, label %for.end
+
+for.end:
+ %inc320 = add nsw i64 %iy.067, 1
+ br i1 undef, label %for.cond58.preheader, label %for.inc322
+
+for.inc322:
+ %inc323 = add nsw i64 %iz.069, 1
+ br i1 undef, label %for.cond55.preheader, label %for.end324
+
+for.end324:
+ ret void
+}
diff --git a/test/Analysis/DependenceAnalysis/Banerjee.ll b/test/Analysis/DependenceAnalysis/Banerjee.ll
index 09e8fd29dcc4..883a06d0bed5 100644
--- a/test/Analysis/DependenceAnalysis/Banerjee.ll
+++ b/test/Analysis/DependenceAnalysis/Banerjee.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -da -da-delinearize=false | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -da -da-delinearize | FileCheck %s -check-prefix=DELIN
; ModuleID = 'Banerjee.bc'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -21,6 +22,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee0':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [<= <>]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc7
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ]
%i.03 = phi i64 [ 1, %entry ], [ %inc8, %for.inc7 ]
@@ -73,6 +82,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - output [* *]!
+; DELIN: 'Dependence Analysis' for function 'banerjee1':
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - flow [* <>]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - output [* *]!
+
for.cond1.preheader.preheader: ; preds = %entry
%0 = add i64 %n, 1
br label %for.cond1.preheader
@@ -140,6 +157,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee2':
+; DELIN: da analyze - none!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc8
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ]
@@ -191,6 +216,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee3':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [> >]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc8
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ]
@@ -242,6 +275,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee4':
+; DELIN: da analyze - none!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc7
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ]
@@ -293,6 +334,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee5':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [< <]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc7
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ]
@@ -344,6 +393,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee6':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [=> <>]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc8
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ]
@@ -395,6 +452,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee7':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [> <=]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc8
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ]
@@ -446,6 +511,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee8':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [> <>]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc8
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ]
@@ -497,6 +570,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee9':
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - flow [<= =|<]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc8
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ]
@@ -549,6 +630,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee10':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [<> =]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc7
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ]
@@ -600,6 +689,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee11':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [<= <>]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc7
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ]
@@ -651,6 +748,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'banerjee12':
+; DELIN: da analyze - none!
+; DELIN: da analyze - flow [= <>]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc7
%B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ]
diff --git a/test/Analysis/DependenceAnalysis/GCD.ll b/test/Analysis/DependenceAnalysis/GCD.ll
index bb31d118857d..7eca18ed262c 100644
--- a/test/Analysis/DependenceAnalysis/GCD.ll
+++ b/test/Analysis/DependenceAnalysis/GCD.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -analyze -basicaa -da | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -da -da-delinearize=false | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -da -da-delinearize | FileCheck %s -check-prefix=DELIN
; ModuleID = 'GCD.bc'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -22,6 +23,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'gcd0'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - flow [=> *|<]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc8
%B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc8 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ]
@@ -75,6 +84,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'gcd1'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc9
%B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc9 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc10, %for.inc9 ]
@@ -129,6 +146,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'gcd2'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc9
%B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc9 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc10, %for.inc9 ]
@@ -183,6 +208,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'gcd3'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - flow [<> *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc7
%B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc7 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ]
@@ -235,6 +268,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'gcd4'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc17
%B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc17 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc18, %for.inc17 ]
@@ -297,6 +338,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
+; DELIN: 'Dependence Analysis' for function 'gcd5'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - flow [<> *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - none!
+
for.cond1.preheader: ; preds = %entry, %for.inc17
%B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc17 ]
%i.03 = phi i64 [ 0, %entry ], [ %inc18, %for.inc17 ]
@@ -360,6 +409,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - output [* *]!
+; DELIN: 'Dependence Analysis' for function 'gcd6'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - output [* *]!
+
for.cond1.preheader.preheader: ; preds = %entry
br label %for.cond1.preheader
@@ -432,6 +489,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - output [* *]!
+; DELIN: 'Dependence Analysis' for function 'gcd7'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - flow [* *|<]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - output [* *]!
+
for.cond1.preheader.preheader: ; preds = %entry
br label %for.cond1.preheader
@@ -516,6 +581,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - output [* *]!
+; DELIN: 'Dependence Analysis' for function 'gcd8'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - none!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - output [* *]!
+
for.cond1.preheader.preheader: ; preds = %entry
br label %for.cond1.preheader
@@ -595,6 +668,14 @@ entry:
; CHECK: da analyze - confused!
; CHECK: da analyze - output [* *]!
+; DELIN: 'Dependence Analysis' for function 'gcd9'
+; DELIN: da analyze - output [* *]!
+; DELIN: da analyze - flow [* *|<]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - input [* *]!
+; DELIN: da analyze - confused!
+; DELIN: da analyze - output [* *]!
+
for.cond1.preheader.preheader: ; preds = %entry
br label %for.cond1.preheader
diff --git a/test/Analysis/LazyCallGraph/basic.ll b/test/Analysis/LazyCallGraph/basic.ll
new file mode 100644
index 000000000000..b8108d99ed6f
--- /dev/null
+++ b/test/Analysis/LazyCallGraph/basic.ll
@@ -0,0 +1,176 @@
+; RUN: opt -disable-output -passes=print-cg %s 2>&1 | FileCheck %s
+;
+; Basic validation of the call graph analysis used in the new pass manager.
+
+define void @f() {
+; CHECK-LABEL: Call edges in function: f
+; CHECK-NOT: ->
+
+entry:
+ ret void
+}
+
+; A bunch more functions just to make it easier to test several call edges at once.
+define void @f1() {
+ ret void
+}
+define void @f2() {
+ ret void
+}
+define void @f3() {
+ ret void
+}
+define void @f4() {
+ ret void
+}
+define void @f5() {
+ ret void
+}
+define void @f6() {
+ ret void
+}
+define void @f7() {
+ ret void
+}
+define void @f8() {
+ ret void
+}
+define void @f9() {
+ ret void
+}
+define void @f10() {
+ ret void
+}
+define void @f11() {
+ ret void
+}
+define void @f12() {
+ ret void
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+define void @test0() {
+; CHECK-LABEL: Call edges in function: test0
+; CHECK-NEXT: -> f
+; CHECK-NOT: ->
+
+entry:
+ call void @f()
+ call void @f()
+ call void @f()
+ call void @f()
+ ret void
+}
+
+define void ()* @test1(void ()** %x) {
+; CHECK-LABEL: Call edges in function: test1
+; CHECK-NEXT: -> f12
+; CHECK-NEXT: -> f11
+; CHECK-NEXT: -> f10
+; CHECK-NEXT: -> f7
+; CHECK-NEXT: -> f9
+; CHECK-NEXT: -> f8
+; CHECK-NEXT: -> f6
+; CHECK-NEXT: -> f5
+; CHECK-NEXT: -> f4
+; CHECK-NEXT: -> f3
+; CHECK-NEXT: -> f2
+; CHECK-NEXT: -> f1
+; CHECK-NOT: ->
+
+entry:
+ br label %next
+
+dead:
+ br label %next
+
+next:
+ phi void ()* [ @f1, %entry ], [ @f2, %dead ]
+ select i1 true, void ()* @f3, void ()* @f4
+ store void ()* @f5, void ()** %x
+ call void @f6()
+ call void (void ()*, void ()*)* bitcast (void ()* @f7 to void (void ()*, void ()*)*)(void ()* @f8, void ()* @f9)
+ invoke void @f10() to label %exit unwind label %unwind
+
+exit:
+ ret void ()* @f11
+
+unwind:
+ %res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ resume { i8*, i32 } { i8* bitcast (void ()* @f12 to i8*), i32 42 }
+}
+
+@g = global void ()* @f1
+@g1 = global [4 x void ()*] [void ()* @f2, void ()* @f3, void ()* @f4, void ()* @f5]
+@g2 = global {i8, void ()*, i8} {i8 1, void ()* @f6, i8 2}
+@h = constant void ()* @f7
+
+define void @test2() {
+; CHECK-LABEL: Call edges in function: test2
+; CHECK-NEXT: -> f7
+; CHECK-NEXT: -> f6
+; CHECK-NEXT: -> f5
+; CHECK-NEXT: -> f4
+; CHECK-NEXT: -> f3
+; CHECK-NEXT: -> f2
+; CHECK-NEXT: -> f1
+; CHECK-NOT: ->
+
+ load i8** bitcast (void ()** @g to i8**)
+ load i8** bitcast (void ()** getelementptr ([4 x void ()*]* @g1, i32 0, i32 2) to i8**)
+ load i8** bitcast (void ()** getelementptr ({i8, void ()*, i8}* @g2, i32 0, i32 1) to i8**)
+ load i8** bitcast (void ()** @h to i8**)
+ ret void
+}
+
+; Verify the SCCs formed.
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f7
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f6
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f5
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f4
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f3
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f2
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f1
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: test2
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f12
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f11
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f10
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f9
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f8
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: test1
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: f
+;
+; CHECK-LABEL: SCC with 1 functions:
+; CHECK-NEXT: test0
diff --git a/test/Analysis/Lint/address-spaces.ll b/test/Analysis/Lint/address-spaces.ll
new file mode 100644
index 000000000000..46ee1d734baf
--- /dev/null
+++ b/test/Analysis/Lint/address-spaces.ll
@@ -0,0 +1,25 @@
+; RUN: opt -lint < %s
+
+target datalayout = "p32:32:32-p1:16:16:16-n16:32"
+
+declare void @foo(i64) nounwind
+
+define i64 @test1(i32 addrspace(1)* %x) nounwind {
+ %y = ptrtoint i32 addrspace(1)* %x to i64
+ ret i64 %y
+}
+
+define <4 x i64> @test1_vector(<4 x i32 addrspace(1)*> %x) nounwind {
+ %y = ptrtoint <4 x i32 addrspace(1)*> %x to <4 x i64>
+ ret <4 x i64> %y
+}
+
+define i32 addrspace(1)* @test2(i64 %x) nounwind {
+ %y = inttoptr i64 %x to i32 addrspace(1)*
+ ret i32 addrspace(1)* %y
+}
+
+define <4 x i32 addrspace(1)*> @test2_vector(<4 x i64> %x) nounwind {
+ %y = inttoptr <4 x i64> %x to <4 x i32 addrspace(1)*>
+ ret <4 x i32 addrspace(1)*> %y
+} \ No newline at end of file
diff --git a/test/Analysis/ScalarEvolution/2009-04-22-TruncCast.ll b/test/Analysis/ScalarEvolution/2009-04-22-TruncCast.ll
index 3dacfbb0a8d4..a845465a26b5 100644
--- a/test/Analysis/ScalarEvolution/2009-04-22-TruncCast.ll
+++ b/test/Analysis/ScalarEvolution/2009-04-22-TruncCast.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -analyze -scalar-evolution | grep "(trunc i" | not grep ext
+; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
; CHECK: Printing analysis 'Scalar Evolution Analysis' for function 'test1'
; CHECK-NOT: (trunc i{{.*}}ext
diff --git a/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll b/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
index b88e33f2bb8a..5746d1c5900c 100644
--- a/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
+++ b/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -basicaa -globalopt -instcombine -loop-rotate -licm -instcombine -indvars -loop-deletion -constmerge -S
+; RUN: opt < %s -basicaa -globalopt -instcombine -loop-rotate -licm -instcombine -indvars -loop-deletion -constmerge -S | FileCheck %s
; PR11882: ComputeLoadConstantCompareExitLimit crash.
;
; for.body is deleted leaving a loop-invariant load.
diff --git a/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll b/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll
index 52e6683c9f01..66df9d19234f 100644
--- a/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll
+++ b/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll
@@ -4,7 +4,7 @@
; getUDivExpr()->getZeroExtendExpr()->isLoopBackedgeGuardedBy()
;
; We actually want SCEV simplification to fail gracefully in this
-; case, so there's no output to check, just the absense of stack overflow.
+; case, so there's no output to check, just the absence of stack overflow.
@c = common global i8 0, align 1
diff --git a/test/Analysis/ScalarEvolution/and-xor.ll b/test/Analysis/ScalarEvolution/and-xor.ll
index 404ab91e269d..ad636da4d4d7 100644
--- a/test/Analysis/ScalarEvolution/and-xor.ll
+++ b/test/Analysis/ScalarEvolution/and-xor.ll
@@ -1,11 +1,27 @@
; RUN: opt < %s -scalar-evolution -analyze | FileCheck %s
+; CHECK-LABEL: @test1
; CHECK: --> (zext
; CHECK: --> (zext
; CHECK-NOT: --> (zext
-define i32 @foo(i32 %x) {
+define i32 @test1(i32 %x) {
%n = and i32 %x, 255
%y = xor i32 %n, 255
ret i32 %y
}
+
+; ScalarEvolution shouldn't try to analyze %z into something like
+; --> (zext i4 (-1 + (-1 * (trunc i64 (8 * %x) to i4))) to i64)
+; or
+; --> (8 * (zext i1 (trunc i64 ((8 * %x) /u 8) to i1) to i64))
+
+; CHECK-LABEL: @test2
+; CHECK: --> (8 * (zext i1 (trunc i64 %x to i1) to i64))
+
+define i64 @test2(i64 %x) {
+ %a = shl i64 %x, 3
+ %t = and i64 %a, 8
+ %z = xor i64 %t, 8
+ ret i64 %z
+}
diff --git a/test/Analysis/ScalarEvolution/fold.ll b/test/Analysis/ScalarEvolution/fold.ll
index 57006dd9bb42..ab5742557b33 100644
--- a/test/Analysis/ScalarEvolution/fold.ll
+++ b/test/Analysis/ScalarEvolution/fold.ll
@@ -60,3 +60,29 @@ loop:
exit:
ret void
}
+
+define void @test5(i32 %i) {
+; CHECK-LABEL: @test5
+ %A = and i32 %i, 1
+; CHECK: --> (zext i1 (trunc i32 %i to i1) to i32)
+ %B = and i32 %i, 2
+; CHECK: --> (2 * (zext i1 (trunc i32 (%i /u 2) to i1) to i32))
+ %C = and i32 %i, 63
+; CHECK: --> (zext i6 (trunc i32 %i to i6) to i32)
+ %D = and i32 %i, 126
+; CHECK: --> (2 * (zext i6 (trunc i32 (%i /u 2) to i6) to i32))
+ %E = and i32 %i, 64
+; CHECK: --> (64 * (zext i1 (trunc i32 (%i /u 64) to i1) to i32))
+ %F = and i32 %i, -2147483648
+; CHECK: --> (-2147483648 * (%i /u -2147483648))
+ ret void
+}
+
+define void @test6(i8 %x) {
+; CHECK-LABEL: @test6
+ %A = zext i8 %x to i16
+ %B = shl nuw i16 %A, 8
+ %C = and i16 %B, -2048
+; CHECK: --> (2048 * ((zext i8 %x to i16) /u 8))
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/max-trip-count.ll b/test/Analysis/ScalarEvolution/max-trip-count.ll
index 0cdbdf57a64c..31f06a46ad00 100644
--- a/test/Analysis/ScalarEvolution/max-trip-count.ll
+++ b/test/Analysis/ScalarEvolution/max-trip-count.ll
@@ -98,3 +98,112 @@ for.end: ; preds = %for.cond.for.end_cr
; CHECK: Determining loop execution counts for: @test
; CHECK-NEXT: backedge-taken count is
; CHECK-NEXT: max backedge-taken count is -1
+
+; PR19799: Indvars miscompile due to an incorrect max backedge taken count from SCEV.
+; CHECK-LABEL: @pr19799
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: max backedge-taken count is 1
+@a = common global i32 0, align 4
+
+define i32 @pr19799() {
+entry:
+ store i32 -1, i32* @a, align 4
+ br label %for.body.i
+
+for.body.i: ; preds = %for.cond.i, %entry
+ %storemerge1.i = phi i32 [ -1, %entry ], [ %add.i.i, %for.cond.i ]
+ %tobool.i = icmp eq i32 %storemerge1.i, 0
+ %add.i.i = add nsw i32 %storemerge1.i, 2
+ br i1 %tobool.i, label %bar.exit, label %for.cond.i
+
+for.cond.i: ; preds = %for.body.i
+ store i32 %add.i.i, i32* @a, align 4
+ %cmp.i = icmp slt i32 %storemerge1.i, 0
+ br i1 %cmp.i, label %for.body.i, label %bar.exit
+
+bar.exit: ; preds = %for.cond.i, %for.body.i
+ ret i32 0
+}
+
+; PR18886: Indvars miscompile due to an incorrect max backedge taken count from SCEV.
+; CHECK-LABEL: @pr18886
+; CHECK: Loop %for.body: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body: max backedge-taken count is 3
+@aa = global i64 0, align 8
+
+define i32 @pr18886() {
+entry:
+ store i64 -21, i64* @aa, align 8
+ br label %for.body
+
+for.body:
+ %storemerge1 = phi i64 [ -21, %entry ], [ %add, %for.cond ]
+ %tobool = icmp eq i64 %storemerge1, 0
+ %add = add nsw i64 %storemerge1, 8
+ br i1 %tobool, label %return, label %for.cond
+
+for.cond:
+ store i64 %add, i64* @aa, align 8
+ %cmp = icmp slt i64 %add, 9
+ br i1 %cmp, label %for.body, label %return
+
+return:
+ %retval.0 = phi i32 [ 1, %for.body ], [ 0, %for.cond ]
+ ret i32 %retval.0
+}
+
+; Here we have a must-exit loop latch that is not computable and a
+; may-exit early exit that can only have one non-exiting iteration
+; before the check is forever skipped.
+;
+; CHECK-LABEL: @cannot_compute_mustexit
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: Unpredictable max backedge-taken count.
+@b = common global i32 0, align 4
+
+define i32 @cannot_compute_mustexit() {
+entry:
+ store i32 -1, i32* @a, align 4
+ br label %for.body.i
+
+for.body.i: ; preds = %for.cond.i, %entry
+ %storemerge1.i = phi i32 [ -1, %entry ], [ %add.i.i, %for.cond.i ]
+ %tobool.i = icmp eq i32 %storemerge1.i, 0
+ %add.i.i = add nsw i32 %storemerge1.i, 2
+ br i1 %tobool.i, label %bar.exit, label %for.cond.i
+
+for.cond.i: ; preds = %for.body.i
+ store i32 %add.i.i, i32* @a, align 4
+ %ld = load volatile i32* @b
+ %cmp.i = icmp ne i32 %ld, 0
+ br i1 %cmp.i, label %for.body.i, label %bar.exit
+
+bar.exit: ; preds = %for.cond.i, %for.body.i
+ ret i32 0
+}
+
+; This loop has two must-exits, both of which dominate the latch. The
+; MaxBECount should be the minimum of them.
+;
+; CHECK-LABEL: @two_mustexit
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: max backedge-taken count is 1
+define i32 @two_mustexit() {
+entry:
+ store i32 -1, i32* @a, align 4
+ br label %for.body.i
+
+for.body.i: ; preds = %for.cond.i, %entry
+ %storemerge1.i = phi i32 [ -1, %entry ], [ %add.i.i, %for.cond.i ]
+ %tobool.i = icmp sgt i32 %storemerge1.i, 0
+ %add.i.i = add nsw i32 %storemerge1.i, 2
+ br i1 %tobool.i, label %bar.exit, label %for.cond.i
+
+for.cond.i: ; preds = %for.body.i
+ store i32 %add.i.i, i32* @a, align 4
+ %cmp.i = icmp slt i32 %storemerge1.i, 3
+ br i1 %cmp.i, label %for.body.i, label %bar.exit
+
+bar.exit: ; preds = %for.cond.i, %for.body.i
+ ret i32 0
+}
diff --git a/test/Analysis/ScalarEvolution/nsw-offset.ll b/test/Analysis/ScalarEvolution/nsw-offset.ll
index 8969a5ad4ceb..88cdcf23d9ed 100644
--- a/test/Analysis/ScalarEvolution/nsw-offset.ll
+++ b/test/Analysis/ScalarEvolution/nsw-offset.ll
@@ -73,5 +73,5 @@ return: ; preds = %bb1.return_crit_edg
ret void
}
-; CHECK: Loop %bb: backedge-taken count is ((-1 + %n) /u 2)
+; CHECK: Loop %bb: backedge-taken count is ((-1 + (2 * (%no /u 2))) /u 2)
; CHECK: Loop %bb: max backedge-taken count is 1073741822
diff --git a/test/Analysis/ScalarEvolution/trip-count-pow2.ll b/test/Analysis/ScalarEvolution/trip-count-pow2.ll
new file mode 100644
index 000000000000..2c5b72e49daf
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/trip-count-pow2.ll
@@ -0,0 +1,53 @@
+; RUN: opt < %s -scalar-evolution -analyze | FileCheck %s
+
+define void @test1(i32 %n) {
+entry:
+ %s = mul i32 %n, 96
+ br label %loop
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %i.next = add i32 %i, 32
+ %t = icmp ne i32 %i.next, %s
+ br i1 %t, label %loop, label %exit
+exit:
+ ret void
+
+; CHECK-LABEL: @test1
+; CHECK: Loop %loop: backedge-taken count is ((-32 + (96 * %n)) /u 32)
+; CHECK: Loop %loop: max backedge-taken count is ((-32 + (96 * %n)) /u 32)
+}
+
+; PR19183
+define i32 @test2(i32 %n) {
+entry:
+ %s = and i32 %n, -32
+ br label %loop
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %i.next = add i32 %i, 32
+ %t = icmp ne i32 %i.next, %s
+ br i1 %t, label %loop, label %exit
+exit:
+ ret i32 %i
+
+; CHECK-LABEL: @test2
+; CHECK: Loop %loop: backedge-taken count is ((-32 + (32 * (%n /u 32))) /u 32)
+; CHECK: Loop %loop: max backedge-taken count is ((-32 + (32 * (%n /u 32))) /u 32)
+}
+
+define void @test3(i32 %n) {
+entry:
+ %s = mul i32 %n, 96
+ br label %loop
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %i.next = add i32 %i, 96
+ %t = icmp ne i32 %i.next, %s
+ br i1 %t, label %loop, label %exit
+exit:
+ ret void
+
+; CHECK-LABEL: @test3
+; CHECK: Loop %loop: Unpredictable backedge-taken count.
+; CHECK: Loop %loop: Unpredictable max backedge-taken count.
+}
diff --git a/test/Analysis/ScalarEvolution/trip-count-switch.ll b/test/Analysis/ScalarEvolution/trip-count-switch.ll
new file mode 100644
index 000000000000..2d2b6b499408
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/trip-count-switch.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
+
+declare void @foo()
+
+define void @test1() nounwind {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %if.end, %entry
+ %i.0 = phi i32 [ 2, %entry ], [ %dec, %if.end ]
+ switch i32 %i.0, label %if.end [
+ i32 0, label %for.end
+ i32 1, label %if.then
+ ]
+
+if.then: ; preds = %for.cond
+ tail call void @foo()
+ br label %if.end
+
+if.end: ; preds = %for.cond, %if.then
+ %dec = add nsw i32 %i.0, -1
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ ret void
+
+; CHECK-LABEL: @test1
+; CHECK: Loop %for.cond: backedge-taken count is 2
+; CHECK: Loop %for.cond: max backedge-taken count is 2
+}
diff --git a/test/Analysis/ScalarEvolution/xor-and.ll b/test/Analysis/ScalarEvolution/xor-and.ll
deleted file mode 100644
index 2616ea928a49..000000000000
--- a/test/Analysis/ScalarEvolution/xor-and.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: opt < %s -scalar-evolution -analyze | FileCheck %s
-
-; ScalarEvolution shouldn't try to analyze %z into something like
-; --> (zext i4 (-1 + (-1 * (trunc i64 (8 * %x) to i4))) to i64)
-
-; CHECK: --> (zext i4 (-8 + (trunc i64 (8 * %x) to i4)) to i64)
-
-define i64 @foo(i64 %x) {
- %a = shl i64 %x, 3
- %t = and i64 %a, 8
- %z = xor i64 %t, 8
- ret i64 %z
-}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/memcpyopt.ll b/test/Analysis/TypeBasedAliasAnalysis/memcpyopt.ll
index 6fd6eaca012e..cdf72811ce5f 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/memcpyopt.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/memcpyopt.ll
@@ -18,8 +18,8 @@ define void @foo(i8* nocapture %p, i8* nocapture %q, i8* nocapture %s) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
-; CHECK [[TAGA]] = metadata !{metadata [[TYPEA:!.*]], metadata [[TYPEA]], i64 0}
-; CHECK [[TYPEA]] = metadata !{metadata !"A", metadata !{{.*}}}
+; CHECK: [[TAGA]] = metadata !{metadata [[TYPEA:!.*]], metadata [[TYPEA]], i64 0}
+; CHECK: [[TYPEA]] = metadata !{metadata !"A", metadata !{{.*}}}
!0 = metadata !{metadata !"tbaa root", null}
!1 = metadata !{metadata !3, metadata !3, i64 0}
!2 = metadata !{metadata !4, metadata !4, i64 0}
diff --git a/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll b/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
index 0cd5c301842a..e1c5d4526470 100644
--- a/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
+++ b/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
@@ -43,7 +43,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i16 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%s.addr = alloca i32*, align 8
%A.addr = alloca %struct.StructA*, align 8
@@ -98,7 +98,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i16 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%A.addr = alloca %struct.StructA*, align 8
%B.addr = alloca %struct.StructB*, align 8
@@ -127,7 +127,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%A.addr = alloca %struct.StructA*, align 8
%B.addr = alloca %struct.StructB*, align 8
@@ -155,7 +155,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%A.addr = alloca %struct.StructA*, align 8
%B.addr = alloca %struct.StructB*, align 8
@@ -184,7 +184,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%A.addr = alloca %struct.StructA*, align 8
%S.addr = alloca %struct.StructS*, align 8
@@ -212,7 +212,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i16 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%A.addr = alloca %struct.StructA*, align 8
%S.addr = alloca %struct.StructS*, align 8
@@ -240,7 +240,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%S.addr = alloca %struct.StructS*, align 8
%S2.addr = alloca %struct.StructS2*, align 8
@@ -268,7 +268,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i16 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%S.addr = alloca %struct.StructS*, align 8
%S2.addr = alloca %struct.StructS2*, align 8
@@ -296,7 +296,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; Remove a load and propogate the value from store.
+; Remove a load and propagate the value from store.
; OPT: ret i32 1
%C.addr = alloca %struct.StructC*, align 8
%D.addr = alloca %struct.StructD*, align 8
diff --git a/test/Assembler/2007-09-10-AliasFwdRef.ll b/test/Assembler/2007-09-10-AliasFwdRef.ll
index b21491ba5a90..2ebfc2719e95 100644
--- a/test/Assembler/2007-09-10-AliasFwdRef.ll
+++ b/test/Assembler/2007-09-10-AliasFwdRef.ll
@@ -6,4 +6,6 @@
-declare extern_weak i32 @pthread_cancel(i32)
+define weak i32 @pthread_cancel(i32) {
+ ret i32 0
+}
diff --git a/test/Assembler/2009-04-25-AliasGEP.ll b/test/Assembler/2009-04-25-AliasGEP.ll
deleted file mode 100644
index 6d07208defe3..000000000000
--- a/test/Assembler/2009-04-25-AliasGEP.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis
-; PR4066
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9"
- %struct.i2c_device_id = type { }
-@w83l785ts_id = internal constant [0 x %struct.i2c_device_id] zeroinitializer, align 1 ; <[0 x %struct.i2c_device_id]*> [#uses=1]
-
-@__mod_i2c_device_table = alias getelementptr ([0 x %struct.i2c_device_id]* @w83l785ts_id, i32 0, i32 0) ; <%struct.i2c_device_id*> [#uses=0]
diff --git a/test/Assembler/ConstantExprFoldSelect.ll b/test/Assembler/ConstantExprFoldSelect.ll
new file mode 100644
index 000000000000..b000e02653c6
--- /dev/null
+++ b/test/Assembler/ConstantExprFoldSelect.ll
@@ -0,0 +1,8 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+; PR18319
+
+define void @function() {
+ %c = trunc <4 x i16> select (<4 x i1> <i1 undef, i1 undef, i1 false, i1 true>, <4 x i16> <i16 undef, i16 2, i16 3, i16 4>, <4 x i16> <i16 -1, i16 -2, i16 -3, i16 -4>) to <4 x i8>
+; CHECK: <i16 undef, i16 -2, i16 -3, i16 4>
+ ret void
+}
diff --git a/test/Assembler/addrspacecast-alias.ll b/test/Assembler/addrspacecast-alias.ll
new file mode 100644
index 000000000000..d7516599dfe2
--- /dev/null
+++ b/test/Assembler/addrspacecast-alias.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+; Test that global aliases are allowed to be constant addrspacecast
+
+@i = internal addrspace(1) global i8 42
+@ia = alias internal addrspacecast (i8 addrspace(1)* @i to i8 addrspace(2)* addrspace(3)*)
+; CHECK: @ia = alias internal addrspacecast (i8 addrspace(2)* addrspace(1)* bitcast (i8 addrspace(1)* @i to i8 addrspace(2)* addrspace(1)*) to i8 addrspace(2)* addrspace(3)*)
diff --git a/test/Assembler/alias-redefinition.ll b/test/Assembler/alias-redefinition.ll
new file mode 100644
index 000000000000..19ad85bf5f5b
--- /dev/null
+++ b/test/Assembler/alias-redefinition.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-as %s 2>&1 | FileCheck %s
+
+; CHECK: error: redefinition of global named '@bar'
+
+@foo = global i32 0
+@bar = alias i32* @foo
+@bar = alias i32* @foo
diff --git a/test/Assembler/atomic.ll b/test/Assembler/atomic.ll
index b245cdea75b1..d7ccd9900bd8 100644
--- a/test/Assembler/atomic.ll
+++ b/test/Assembler/atomic.ll
@@ -10,10 +10,14 @@ define void @f(i32* %x) {
store atomic i32 3, i32* %x release, align 4
; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
- ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
- cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
- ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
- cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
+ ; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic
+ cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic monotonic
+ ; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
+ cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
+ ; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
+ cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
+ ; CHECK: cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic
+ cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic
; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
atomicrmw add i32* %x, i32 10 seq_cst
; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
diff --git a/test/Assembler/functionlocal-metadata.ll b/test/Assembler/functionlocal-metadata.ll
index 0d93bfdb275d..f9b1d7403796 100644
--- a/test/Assembler/functionlocal-metadata.ll
+++ b/test/Assembler/functionlocal-metadata.ll
@@ -53,4 +53,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
; CHECK: ![[ID0]] = metadata !{i32 662302, i32 26, metadata ![[ID1]], null}
; CHECK: ![[ID1]] = metadata !{i32 4, metadata !"foo"}
; CHECK: ![[ID2]] = metadata !{metadata !"bar"}
-; CHECK; ![[ID3]] = metadata !{metadata !"foo"}
+; CHECK: ![[ID3]] = metadata !{metadata !"foo"}
diff --git a/test/Assembler/getInt.ll b/test/Assembler/getInt.ll
new file mode 100644
index 000000000000..8e2537ae6cf1
--- /dev/null
+++ b/test/Assembler/getInt.ll
@@ -0,0 +1,3 @@
+; RUN: not opt < %s 2>&1 | grep 'not a number, or does not fit in an unsigned int'
+
+target datalayout = "p:4294967296:64:64"
diff --git a/test/Assembler/half-constprop.ll b/test/Assembler/half-constprop.ll
index 03ccdda97e0a..9e24f7242ba9 100644
--- a/test/Assembler/half-constprop.ll
+++ b/test/Assembler/half-constprop.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | opt -O3 | llvm-dis | FileCheck %s
+; RUN: opt < %s -O3 -S | FileCheck %s
; Testing half constant propagation.
define half @abc() nounwind {
diff --git a/test/Assembler/half-conv.ll b/test/Assembler/half-conv.ll
index bf9ae5713979..70a6b86c393f 100644
--- a/test/Assembler/half-conv.ll
+++ b/test/Assembler/half-conv.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | opt -O3 | llvm-dis | FileCheck %s
+; RUN: opt < %s -O3 -S | FileCheck %s
; Testing half to float conversion.
define float @abc() nounwind {
diff --git a/test/Assembler/inalloca.ll b/test/Assembler/inalloca.ll
new file mode 100644
index 000000000000..ff7a87e0a392
--- /dev/null
+++ b/test/Assembler/inalloca.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-as %s -o /dev/null
+
+define void @a() {
+entry:
+ %0 = alloca inalloca i32
+ %1 = alloca inalloca [2 x i32]
+ %2 = alloca inalloca i32, i32 2
+ %3 = alloca inalloca i32, i32 2, align 16
+ %4 = alloca inalloca i32, i32 2, align 16, !foo !0
+ %5 = alloca i32, i32 2, align 16, !foo !0
+ %6 = alloca i32, i32 2, align 16
+ ret void
+}
+
+!0 = metadata !{i32 662302, null}
+!foo = !{ !0 }
diff --git a/test/Assembler/internal-hidden-alias.ll b/test/Assembler/internal-hidden-alias.ll
new file mode 100644
index 000000000000..660514bb1850
--- /dev/null
+++ b/test/Assembler/internal-hidden-alias.ll
@@ -0,0 +1,6 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+@global = global i32 0
+
+@alias = hidden alias internal i32* @global
+; CHECK: symbol with local linkage must have default visibility
diff --git a/test/Assembler/internal-hidden-function.ll b/test/Assembler/internal-hidden-function.ll
new file mode 100644
index 000000000000..193ed7c28919
--- /dev/null
+++ b/test/Assembler/internal-hidden-function.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+define internal hidden void @function() {
+; CHECK: symbol with local linkage must have default visibility
+entry:
+ ret void
+}
diff --git a/test/Assembler/internal-hidden-variable.ll b/test/Assembler/internal-hidden-variable.ll
new file mode 100644
index 000000000000..eddd06758a04
--- /dev/null
+++ b/test/Assembler/internal-hidden-variable.ll
@@ -0,0 +1,4 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+@var = internal hidden global i32 0
+; CHECK: symbol with local linkage must have default visibility
diff --git a/test/Assembler/internal-protected-alias.ll b/test/Assembler/internal-protected-alias.ll
new file mode 100644
index 000000000000..d78582684c50
--- /dev/null
+++ b/test/Assembler/internal-protected-alias.ll
@@ -0,0 +1,6 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+@global = global i32 0
+
+@alias = protected alias internal i32* @global
+; CHECK: symbol with local linkage must have default visibility
diff --git a/test/Assembler/internal-protected-function.ll b/test/Assembler/internal-protected-function.ll
new file mode 100644
index 000000000000..944cb75eec4c
--- /dev/null
+++ b/test/Assembler/internal-protected-function.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+define internal protected void @function() {
+; CHECK: symbol with local linkage must have default visibility
+entry:
+ ret void
+}
diff --git a/test/Assembler/internal-protected-variable.ll b/test/Assembler/internal-protected-variable.ll
new file mode 100644
index 000000000000..df02275bac79
--- /dev/null
+++ b/test/Assembler/internal-protected-variable.ll
@@ -0,0 +1,4 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+@var = internal protected global i32 0
+; CHECK: symbol with local linkage must have default visibility
diff --git a/test/Assembler/invalid-comdat.ll b/test/Assembler/invalid-comdat.ll
new file mode 100644
index 000000000000..987e1e1e7d92
--- /dev/null
+++ b/test/Assembler/invalid-comdat.ll
@@ -0,0 +1,4 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+@v = global i32 0, comdat $v
+; CHECK: use of undefined comdat '$v'
diff --git a/test/Assembler/invalid-comdat2.ll b/test/Assembler/invalid-comdat2.ll
new file mode 100644
index 000000000000..ed656ef2b112
--- /dev/null
+++ b/test/Assembler/invalid-comdat2.ll
@@ -0,0 +1,5 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+$v = comdat any
+$v = comdat any
+; CHECK: redefinition of comdat '$v'
diff --git a/test/Assembler/invalid-name.ll b/test/Assembler/invalid-name.ll
new file mode 100644
index 000000000000..d9d7a1108808
--- /dev/null
+++ b/test/Assembler/invalid-name.ll
@@ -0,0 +1,6 @@
+; RUN: not llvm-as %s 2>&1 | FileCheck %s
+
+; CHECK: expected function name
+define void @"zed\00bar"() {
+ ret void
+}
diff --git a/test/Assembler/invalid_cast3.ll b/test/Assembler/invalid_cast3.ll
new file mode 100644
index 000000000000..cc956cee59a1
--- /dev/null
+++ b/test/Assembler/invalid_cast3.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+; CHECK: invalid cast opcode for cast from '<4 x i32*>' to '<2 x i32*>'
+define <2 x i32*> @illegal_vector_pointer_bitcast_num_elements(<4 x i32*> %c) {
+ %bc = bitcast <4 x i32*> %c to <2 x i32*>
+ ret <2 x i32*> %bc
+}
diff --git a/test/Assembler/private-hidden-alias.ll b/test/Assembler/private-hidden-alias.ll
new file mode 100644
index 000000000000..58be92a34f25
--- /dev/null
+++ b/test/Assembler/private-hidden-alias.ll
@@ -0,0 +1,6 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+@global = global i32 0
+
+@alias = hidden alias private i32* @global
+; CHECK: symbol with local linkage must have default visibility
diff --git a/test/Assembler/private-hidden-function.ll b/test/Assembler/private-hidden-function.ll
new file mode 100644
index 000000000000..dd73f0413b9f
--- /dev/null
+++ b/test/Assembler/private-hidden-function.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+define private hidden void @function() {
+; CHECK: symbol with local linkage must have default visibility
+entry:
+ ret void
+}
diff --git a/test/Assembler/private-hidden-variable.ll b/test/Assembler/private-hidden-variable.ll
new file mode 100644
index 000000000000..ce6bfa9bae68
--- /dev/null
+++ b/test/Assembler/private-hidden-variable.ll
@@ -0,0 +1,4 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+@var = private hidden global i32 0
+; CHECK: symbol with local linkage must have default visibility
diff --git a/test/Assembler/private-protected-alias.ll b/test/Assembler/private-protected-alias.ll
new file mode 100644
index 000000000000..a72c248f0b03
--- /dev/null
+++ b/test/Assembler/private-protected-alias.ll
@@ -0,0 +1,6 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+@global = global i32 0
+
+@alias = protected alias private i32* @global
+; CHECK: symbol with local linkage must have default visibility
diff --git a/test/Assembler/private-protected-function.ll b/test/Assembler/private-protected-function.ll
new file mode 100644
index 000000000000..5dbb420a8253
--- /dev/null
+++ b/test/Assembler/private-protected-function.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+define private protected void @function() {
+; CHECK: symbol with local linkage must have default visibility
+entry:
+ ret void
+}
diff --git a/test/Assembler/private-protected-variable.ll b/test/Assembler/private-protected-variable.ll
new file mode 100644
index 000000000000..c4458f5b3f63
--- /dev/null
+++ b/test/Assembler/private-protected-variable.ll
@@ -0,0 +1,4 @@
+; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
+
+@var = private protected global i32 0
+; CHECK: symbol with local linkage must have default visibility
diff --git a/test/Assembler/upgrade-loop-metadata.ll b/test/Assembler/upgrade-loop-metadata.ll
new file mode 100644
index 000000000000..1c0311dd09e9
--- /dev/null
+++ b/test/Assembler/upgrade-loop-metadata.ll
@@ -0,0 +1,41 @@
+; Test to make sure loop vectorizer metadata is automatically upgraded.
+;
+; Run using opt as well to ensure that the metadata is upgraded when parsing
+; assembly.
+;
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+; RUN: opt -S < %s | FileCheck %s
+
+define void @_Z28loop_with_vectorize_metadatav() {
+entry:
+ %i = alloca i32, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 16
+ br i1 %cmp, label %for.body, label %for.end, !llvm.loop !1
+
+for.body: ; preds = %for.cond
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %1 = load i32* %i, align 4
+ %inc = add nsw i32 %1, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ ret void
+}
+
+; CHECK: !{metadata !"llvm.loop.interleave.count", i32 4}
+; CHECK: !{metadata !"llvm.loop.vectorize.width", i32 8}
+; CHECK: !{metadata !"llvm.loop.vectorize.enable", i1 true}
+
+!0 = metadata !{metadata !"clang version 3.5.0 (trunk 211528)"}
+!1 = metadata !{metadata !1, metadata !2, metadata !3, metadata !4, metadata !4}
+!2 = metadata !{metadata !"llvm.vectorizer.unroll", i32 4}
+!3 = metadata !{metadata !"llvm.vectorizer.width", i32 8}
+!4 = metadata !{metadata !"llvm.vectorizer.enable", i1 true}
diff --git a/test/Bindings/Ocaml/target.ml b/test/Bindings/Ocaml/target.ml
index d69fb0e664fd..0a2283aa3ed5 100644
--- a/test/Bindings/Ocaml/target.ml
+++ b/test/Bindings/Ocaml/target.ml
@@ -43,12 +43,10 @@ let machine = TargetMachine.create (Target.default_triple ()) target
let test_target_data () =
let module DL = DataLayout in
- let layout = "e-p:32:32:32-S32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-" ^
- "f16:16:16-f32:32:32-f64:32:64-f128:128:128-v64:32:64-v128:32:128-" ^
- "a0:0:64-n32" in
+ let layout = "e-p:32:32-f64:32:64-v64:32:64-v128:32:128-n32-S32" in
let dl = DL.of_string layout in
let sty = struct_type context [| i32_type; i64_type |] in
-
+
assert_equal (DL.as_string dl) layout;
assert_equal (DL.byte_order dl) Endian.Little;
assert_equal (DL.pointer_size dl) 4;
@@ -88,7 +86,8 @@ let test_target_machine () =
assert_equal (TM.triple machine) (Target.default_triple ());
assert_equal (TM.cpu machine) "";
assert_equal (TM.features machine) "";
- ignore (TM.data_layout machine)
+ ignore (TM.data_layout machine);
+ TM.set_verbose_asm true machine
(*===-- Code Emission -----------------------------------------------------===*)
diff --git a/test/Bindings/Ocaml/vmcore.ml b/test/Bindings/Ocaml/vmcore.ml
index 167efce0b2b1..f014116ffe8e 100644
--- a/test/Bindings/Ocaml/vmcore.ml
+++ b/test/Bindings/Ocaml/vmcore.ml
@@ -85,11 +85,11 @@ let test_target () =
end;
begin group "layout";
- let layout = "bogus" in
+ let layout = "e" in
set_data_layout layout m;
insist (layout = data_layout m)
end
- (* CHECK: target datalayout = "bogus"
+ (* CHECK: target datalayout = "e"
* CHECK: target triple = "i686-apple-darwin8"
*)
@@ -413,7 +413,7 @@ let test_global_values () =
let test_global_variables () =
let (++) x f = f x; x in
- let fourty_two32 = const_int i32_type 42 in
+ let forty_two32 = const_int i32_type 42 in
group "declarations"; begin
(* CHECK: @GVar01 = external global i32
@@ -444,16 +444,16 @@ let test_global_variables () =
* CHECK: @QGVar02 = addrspace(3) global i32 42
* CHECK: @QGVar03 = addrspace(3) global i32 42
*)
- let g = define_global "GVar02" fourty_two32 m in
+ let g = define_global "GVar02" forty_two32 m in
let g2 = declare_global i32_type "GVar03" m ++
- set_initializer fourty_two32 in
+ set_initializer forty_two32 in
insist (not (is_declaration g));
insist (not (is_declaration g2));
insist ((global_initializer g) == (global_initializer g2));
- let g = define_qualified_global "QGVar02" fourty_two32 3 m in
+ let g = define_qualified_global "QGVar02" forty_two32 3 m in
let g2 = declare_qualified_global i32_type "QGVar03" 3 m ++
- set_initializer fourty_two32 in
+ set_initializer forty_two32 in
insist (not (is_declaration g));
insist (not (is_declaration g2));
insist ((global_initializer g) == (global_initializer g2));
@@ -462,34 +462,34 @@ let test_global_variables () =
(* CHECK: GVar04{{.*}}thread_local
*)
group "threadlocal";
- let g = define_global "GVar04" fourty_two32 m ++
+ let g = define_global "GVar04" forty_two32 m ++
set_thread_local true in
insist (is_thread_local g);
(* CHECK: GVar05{{.*}}thread_local(initialexec)
*)
group "threadlocal_mode";
- let g = define_global "GVar05" fourty_two32 m ++
+ let g = define_global "GVar05" forty_two32 m ++
set_thread_local_mode ThreadLocalMode.InitialExec in
insist ((thread_local_mode g) = ThreadLocalMode.InitialExec);
(* CHECK: GVar06{{.*}}externally_initialized
*)
group "externally_initialized";
- let g = define_global "GVar06" fourty_two32 m ++
+ let g = define_global "GVar06" forty_two32 m ++
set_externally_initialized true in
insist (is_externally_initialized g);
(* CHECK-NOWHERE-NOT: GVar07
*)
group "delete";
- let g = define_global "GVar07" fourty_two32 m in
+ let g = define_global "GVar07" forty_two32 m in
delete_global g;
(* CHECK: ConstGlobalVar{{.*}}constant
*)
group "constant";
- let g = define_global "ConstGlobalVar" fourty_two32 m in
+ let g = define_global "ConstGlobalVar" forty_two32 m in
insist (not (is_global_constant g));
set_global_constant true g;
insist (is_global_constant g);
@@ -581,7 +581,8 @@ let test_users () =
let test_aliases () =
(* CHECK: @alias = alias i32* @aliasee
*)
- let v = declare_global i32_type "aliasee" m in
+ let forty_two32 = const_int i32_type 42 in
+ let v = define_global "aliasee" forty_two32 m in
ignore (add_alias m (pointer_type i32_type) v "alias")
diff --git a/test/Bindings/llvm-c/lit.local.cfg b/test/Bindings/llvm-c/lit.local.cfg
index d83ebeed8e1c..75b22c06fb2f 100644
--- a/test/Bindings/llvm-c/lit.local.cfg
+++ b/test/Bindings/llvm-c/lit.local.cfg
@@ -1,5 +1,4 @@
-targets = set(config.root.targets_to_build.split())
-if not "X86" in targets:
+if not "X86" in config.root.targets:
config.unsupported = True
-if not "ARM" in targets:
+if not "ARM" in config.root.targets:
config.unsupported = True
diff --git a/test/Bitcode/aggregateInstructions.3.2.ll b/test/Bitcode/aggregateInstructions.3.2.ll
new file mode 100644
index 000000000000..9352390b131b
--- /dev/null
+++ b/test/Bitcode/aggregateInstructions.3.2.ll
@@ -0,0 +1,33 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; aggregateOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread instructions with aggregate operands
+; in older bitcode files.
+
+define void @extractvalue([4 x i8] %x1, [4 x [4 x i8]] %x2, {{i32, float}} %x3){
+entry:
+; CHECK: %res1 = extractvalue [4 x i8] %x1, 0
+ %res1 = extractvalue [4 x i8] %x1, 0
+
+; CHECK-NEXT: %res2 = extractvalue [4 x [4 x i8]] %x2, 1
+ %res2 = extractvalue [4 x [4 x i8 ]] %x2, 1
+
+; CHECK-NEXT: %res3 = extractvalue [4 x [4 x i8]] %x2, 0, 1
+ %res3 = extractvalue [4 x [4 x i8 ]] %x2, 0, 1
+
+; CHECK-NEXT: %res4 = extractvalue { { i32, float } } %x3, 0, 1
+ %res4 = extractvalue {{i32, float}} %x3, 0, 1
+
+ ret void
+}
+
+define void @insertvalue([4 x [4 x i8 ]] %x1){
+entry:
+; CHECK: %res1 = insertvalue [4 x [4 x i8]] %x1, i8 0, 0, 0
+ %res1 = insertvalue [4 x [4 x i8 ]] %x1, i8 0, 0, 0
+
+; CHECK-NEXT: %res2 = insertvalue [4 x [4 x i8]] undef, i8 0, 0, 0
+ %res2 = insertvalue [4 x [4 x i8 ]] undef, i8 0, 0, 0
+
+ ret void
+} \ No newline at end of file
diff --git a/test/Bitcode/aggregateInstructions.3.2.ll.bc b/test/Bitcode/aggregateInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..053f85fda645
--- /dev/null
+++ b/test/Bitcode/aggregateInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/atomic.ll b/test/Bitcode/atomic.ll
new file mode 100644
index 000000000000..37815a749b55
--- /dev/null
+++ b/test/Bitcode/atomic.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-as %s -o - | llvm-dis | FileCheck %s
+
+define void @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
+ cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ ; CHECK: cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+
+ cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ ; CHECK: cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+
+ cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
+ ; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
+
+ cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread release monotonic
+ ; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread release monotonic
+
+ ret void
+} \ No newline at end of file
diff --git a/test/Bitcode/attributes.ll b/test/Bitcode/attributes.ll
index 1789878e9f50..2490e5920726 100644
--- a/test/Bitcode/attributes.ll
+++ b/test/Bitcode/attributes.ll
@@ -203,7 +203,7 @@ define void @f34()
; CHECK: define void @f34()
{
call void @nobuiltin() nobuiltin
-; CHECK: call void @nobuiltin() #24
+; CHECK: call void @nobuiltin() #25
ret void;
}
@@ -213,6 +213,32 @@ define void @f35() optnone noinline
ret void;
}
+define void @f36(i8* inalloca) {
+; CHECK: define void @f36(i8* inalloca) {
+ ret void
+}
+
+define nonnull i8* @f37(i8* nonnull %a) {
+; CHECK: define nonnull i8* @f37(i8* nonnull %a) {
+ ret i8* %a
+}
+
+define void @f38() unnamed_addr jumptable {
+; CHECK: define void @f38() unnamed_addr #24
+ call void bitcast (void (i8*)* @f36 to void ()*)()
+ unreachable
+}
+
+define dereferenceable(2) i8* @f39(i8* dereferenceable(1) %a) {
+; CHECK: define dereferenceable(2) i8* @f39(i8* dereferenceable(1) %a) {
+ ret i8* %a
+}
+
+define dereferenceable(18446744073709551606) i8* @f40(i8* dereferenceable(18446744073709551615) %a) {
+; CHECK: define dereferenceable(18446744073709551606) i8* @f40(i8* dereferenceable(18446744073709551615) %a) {
+ ret i8* %a
+}
+
; CHECK: attributes #0 = { noreturn }
; CHECK: attributes #1 = { nounwind }
; CHECK: attributes #2 = { readnone }
@@ -237,5 +263,5 @@ define void @f35() optnone noinline
; CHECK: attributes #21 = { sspstrong }
; CHECK: attributes #22 = { minsize }
; CHECK: attributes #23 = { noinline optnone }
-; CHECK: attributes #24 = { nobuiltin }
-
+; CHECK: attributes #24 = { jumptable }
+; CHECK: attributes #25 = { nobuiltin }
diff --git a/test/Bitcode/binaryFloatInstructions.3.2.ll b/test/Bitcode/binaryFloatInstructions.3.2.ll
new file mode 100644
index 000000000000..f94d82d23c9f
--- /dev/null
+++ b/test/Bitcode/binaryFloatInstructions.3.2.ll
@@ -0,0 +1,120 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; BinaryFloatOperation.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread binary float instructions from
+; older bitcode files.
+
+define void @fadd(float %x1, double %x2 ,half %x3, fp128 %x4, x86_fp80 %x5, ppc_fp128 %x6){
+entry:
+; CHECK: %res1 = fadd float %x1, %x1
+ %res1 = fadd float %x1, %x1
+
+; CHECK-NEXT: %res2 = fadd double %x2, %x2
+ %res2 = fadd double %x2, %x2
+
+; CHECK-NEXT: %res3 = fadd half %x3, %x3
+ %res3 = fadd half %x3, %x3
+
+; CHECK-NEXT: %res4 = fadd fp128 %x4, %x4
+ %res4 = fadd fp128 %x4, %x4
+
+; CHECK-NEXT: %res5 = fadd x86_fp80 %x5, %x5
+ %res5 = fadd x86_fp80 %x5, %x5
+
+; CHECK-NEXT: %res6 = fadd ppc_fp128 %x6, %x6
+ %res6 = fadd ppc_fp128 %x6, %x6
+
+ ret void
+}
+
+define void @faddFloatVec(<2 x float> %x1, <3 x float> %x2 ,<4 x float> %x3, <8 x float> %x4, <16 x float> %x5){
+entry:
+; CHECK: %res1 = fadd <2 x float> %x1, %x1
+ %res1 = fadd <2 x float> %x1, %x1
+
+; CHECK-NEXT: %res2 = fadd <3 x float> %x2, %x2
+ %res2 = fadd <3 x float> %x2, %x2
+
+; CHECK-NEXT: %res3 = fadd <4 x float> %x3, %x3
+ %res3 = fadd <4 x float> %x3, %x3
+
+; CHECK-NEXT: %res4 = fadd <8 x float> %x4, %x4
+ %res4 = fadd <8 x float> %x4, %x4
+
+; CHECK-NEXT: %res5 = fadd <16 x float> %x5, %x5
+ %res5 = fadd <16 x float> %x5, %x5
+
+ ret void
+}
+
+define void @faddDoubleVec(<2 x double> %x1, <3 x double> %x2 ,<4 x double> %x3, <8 x double> %x4, <16 x double> %x5){
+entry:
+; CHECK: %res1 = fadd <2 x double> %x1, %x1
+ %res1 = fadd <2 x double> %x1, %x1
+
+; CHECK-NEXT: %res2 = fadd <3 x double> %x2, %x2
+ %res2 = fadd <3 x double> %x2, %x2
+
+; CHECK-NEXT: %res3 = fadd <4 x double> %x3, %x3
+ %res3 = fadd <4 x double> %x3, %x3
+
+; CHECK-NEXT: %res4 = fadd <8 x double> %x4, %x4
+ %res4 = fadd <8 x double> %x4, %x4
+
+; CHECK-NEXT: %res5 = fadd <16 x double> %x5, %x5
+ %res5 = fadd <16 x double> %x5, %x5
+
+ ret void
+}
+
+define void @faddHalfVec(<2 x half> %x1, <3 x half> %x2 ,<4 x half> %x3, <8 x half> %x4, <16 x half> %x5){
+entry:
+; CHECK: %res1 = fadd <2 x half> %x1, %x1
+ %res1 = fadd <2 x half> %x1, %x1
+
+; CHECK-NEXT: %res2 = fadd <3 x half> %x2, %x2
+ %res2 = fadd <3 x half> %x2, %x2
+
+; CHECK-NEXT: %res3 = fadd <4 x half> %x3, %x3
+ %res3 = fadd <4 x half> %x3, %x3
+
+; CHECK-NEXT: %res4 = fadd <8 x half> %x4, %x4
+ %res4 = fadd <8 x half> %x4, %x4
+
+; CHECK-NEXT: %res5 = fadd <16 x half> %x5, %x5
+ %res5 = fadd <16 x half> %x5, %x5
+
+ ret void
+}
+
+define void @fsub(float %x1){
+entry:
+; CHECK: %res1 = fsub float %x1, %x1
+ %res1 = fsub float %x1, %x1
+
+ ret void
+}
+
+define void @fmul(float %x1){
+entry:
+; CHECK: %res1 = fmul float %x1, %x1
+ %res1 = fmul float %x1, %x1
+
+ ret void
+}
+
+define void @fdiv(float %x1){
+entry:
+; CHECK: %res1 = fdiv float %x1, %x1
+ %res1 = fdiv float %x1, %x1
+
+ ret void
+}
+
+define void @frem(float %x1){
+entry:
+; CHECK: %res1 = frem float %x1, %x1
+ %res1 = frem float %x1, %x1
+
+ ret void
+}
diff --git a/test/Bitcode/binaryFloatInstructions.3.2.ll.bc b/test/Bitcode/binaryFloatInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..8dbb4e456a04
--- /dev/null
+++ b/test/Bitcode/binaryFloatInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/binaryIntInstructions.3.2.ll b/test/Bitcode/binaryIntInstructions.3.2.ll
new file mode 100644
index 000000000000..b08501ca932a
--- /dev/null
+++ b/test/Bitcode/binaryIntInstructions.3.2.ll
@@ -0,0 +1,177 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; BinaryIntOperation.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread binary integer instructions from
+; older bitcode files.
+
+define void @add(i1 %x1, i8 %x2 ,i16 %x3, i32 %x4, i64 %x5){
+entry:
+; CHECK: %res1 = add i1 %x1, %x1
+ %res1 = add i1 %x1, %x1
+
+; CHECK-NEXT: %res2 = add i8 %x2, %x2
+ %res2 = add i8 %x2, %x2
+
+; CHECK-NEXT: %res3 = add i16 %x3, %x3
+ %res3 = add i16 %x3, %x3
+
+; CHECK-NEXT: %res4 = add i32 %x4, %x4
+ %res4 = add i32 %x4, %x4
+
+; CHECK-NEXT: %res5 = add i64 %x5, %x5
+ %res5 = add i64 %x5, %x5
+
+; CHECK: %res6 = add nuw i1 %x1, %x1
+ %res6 = add nuw i1 %x1, %x1
+
+; CHECK: %res7 = add nsw i1 %x1, %x1
+ %res7 = add nsw i1 %x1, %x1
+
+; CHECK: %res8 = add nuw nsw i1 %x1, %x1
+ %res8 = add nuw nsw i1 %x1, %x1
+
+ ret void
+}
+
+define void @addvec8NuwNsw(<2 x i8> %x1, <3 x i8> %x2 ,<4 x i8> %x3, <8 x i8> %x4, <16 x i8> %x5){
+entry:
+; CHECK: %res1 = add nuw nsw <2 x i8> %x1, %x1
+ %res1 = add nuw nsw <2 x i8> %x1, %x1
+
+; CHECK-NEXT: %res2 = add nuw nsw <3 x i8> %x2, %x2
+ %res2 = add nuw nsw <3 x i8> %x2, %x2
+
+; CHECK-NEXT: %res3 = add nuw nsw <4 x i8> %x3, %x3
+ %res3 = add nuw nsw <4 x i8> %x3, %x3
+
+; CHECK-NEXT: %res4 = add nuw nsw <8 x i8> %x4, %x4
+ %res4 = add nuw nsw <8 x i8> %x4, %x4
+
+; CHECK-NEXT: %res5 = add nuw nsw <16 x i8> %x5, %x5
+ %res5 = add nuw nsw <16 x i8> %x5, %x5
+
+ ret void
+}
+
+define void @addvec16NuwNsw(<2 x i16> %x1, <3 x i16> %x2 ,<4 x i16> %x3, <8 x i16> %x4, <16 x i16> %x5){
+entry:
+; CHECK: %res1 = add nuw nsw <2 x i16> %x1, %x1
+ %res1 = add nuw nsw <2 x i16> %x1, %x1
+
+; CHECK-NEXT: %res2 = add nuw nsw <3 x i16> %x2, %x2
+ %res2 = add nuw nsw <3 x i16> %x2, %x2
+
+; CHECK-NEXT: %res3 = add nuw nsw <4 x i16> %x3, %x3
+ %res3 = add nuw nsw <4 x i16> %x3, %x3
+
+; CHECK-NEXT: %res4 = add nuw nsw <8 x i16> %x4, %x4
+ %res4 = add nuw nsw <8 x i16> %x4, %x4
+
+; CHECK-NEXT: %res5 = add nuw nsw <16 x i16> %x5, %x5
+ %res5 = add nuw nsw <16 x i16> %x5, %x5
+
+ ret void
+}
+
+define void @addvec32NuwNsw(<2 x i32> %x1, <3 x i32> %x2 ,<4 x i32> %x3, <8 x i32> %x4, <16 x i32> %x5){
+entry:
+; CHECK: %res1 = add nuw nsw <2 x i32> %x1, %x1
+ %res1 = add nuw nsw <2 x i32> %x1, %x1
+
+; CHECK-NEXT: %res2 = add nuw nsw <3 x i32> %x2, %x2
+ %res2 = add nuw nsw <3 x i32> %x2, %x2
+
+; CHECK-NEXT: %res3 = add nuw nsw <4 x i32> %x3, %x3
+ %res3 = add nuw nsw <4 x i32> %x3, %x3
+
+; CHECK-NEXT: %res4 = add nuw nsw <8 x i32> %x4, %x4
+ %res4 = add nuw nsw <8 x i32> %x4, %x4
+
+; CHECK-NEXT: %res5 = add nuw nsw <16 x i32> %x5, %x5
+ %res5 = add nuw nsw <16 x i32> %x5, %x5
+
+ ret void
+}
+
+define void @addvec64NuwNsw(<2 x i64> %x1, <3 x i64> %x2 ,<4 x i64> %x3, <8 x i64> %x4, <16 x i64> %x5){
+entry:
+; CHECK: %res1 = add nuw nsw <2 x i64> %x1, %x1
+ %res1 = add nuw nsw <2 x i64> %x1, %x1
+
+; CHECK-NEXT: %res2 = add nuw nsw <3 x i64> %x2, %x2
+ %res2 = add nuw nsw <3 x i64> %x2, %x2
+
+; CHECK-NEXT: %res3 = add nuw nsw <4 x i64> %x3, %x3
+ %res3 = add nuw nsw <4 x i64> %x3, %x3
+
+; CHECK-NEXT: %res4 = add nuw nsw <8 x i64> %x4, %x4
+ %res4 = add nuw nsw <8 x i64> %x4, %x4
+
+; CHECK-NEXT: %res5 = add nuw nsw <16 x i64> %x5, %x5
+ %res5 = add nuw nsw <16 x i64> %x5, %x5
+
+ ret void
+}
+
+define void @sub(i8 %x1){
+entry:
+; CHECK: %res1 = sub i8 %x1, %x1
+ %res1 = sub i8 %x1, %x1
+
+; CHECK: %res2 = sub nuw i8 %x1, %x1
+ %res2 = sub nuw i8 %x1, %x1
+
+; CHECK: %res3 = sub nsw i8 %x1, %x1
+ %res3 = sub nsw i8 %x1, %x1
+
+; CHECK: %res4 = sub nuw nsw i8 %x1, %x1
+ %res4 = sub nuw nsw i8 %x1, %x1
+
+ ret void
+}
+
+define void @mul(i8 %x1){
+entry:
+; CHECK: %res1 = mul i8 %x1, %x1
+ %res1 = mul i8 %x1, %x1
+
+ ret void
+}
+
+define void @udiv(i8 %x1){
+entry:
+; CHECK: %res1 = udiv i8 %x1, %x1
+ %res1 = udiv i8 %x1, %x1
+
+; CHECK-NEXT: %res2 = udiv exact i8 %x1, %x1
+ %res2 = udiv exact i8 %x1, %x1
+
+ ret void
+}
+
+define void @sdiv(i8 %x1){
+entry:
+; CHECK: %res1 = sdiv i8 %x1, %x1
+ %res1 = sdiv i8 %x1, %x1
+
+; CHECK-NEXT: %res2 = sdiv exact i8 %x1, %x1
+ %res2 = sdiv exact i8 %x1, %x1
+
+ ret void
+}
+
+define void @urem(i32 %x1){
+entry:
+; CHECK: %res1 = urem i32 %x1, %x1
+ %res1 = urem i32 %x1, %x1
+
+ ret void
+}
+
+define void @srem(i32 %x1){
+entry:
+; CHECK: %res1 = srem i32 %x1, %x1
+ %res1 = srem i32 %x1, %x1
+
+ ret void
+}
diff --git a/test/Bitcode/binaryIntInstructions.3.2.ll.bc b/test/Bitcode/binaryIntInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..749e0c34dc84
--- /dev/null
+++ b/test/Bitcode/binaryIntInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/bitwiseInstructions.3.2.ll b/test/Bitcode/bitwiseInstructions.3.2.ll
new file mode 100644
index 000000000000..6225a08f2064
--- /dev/null
+++ b/test/Bitcode/bitwiseInstructions.3.2.ll
@@ -0,0 +1,68 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; bitwiseOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread bitwise instructions from
+; older bitcode files.
+
+define void @shl(i8 %x1){
+entry:
+; CHECK: %res1 = shl i8 %x1, %x1
+ %res1 = shl i8 %x1, %x1
+
+; CHECK: %res2 = shl nuw i8 %x1, %x1
+ %res2 = shl nuw i8 %x1, %x1
+
+; CHECK: %res3 = shl nsw i8 %x1, %x1
+ %res3 = shl nsw i8 %x1, %x1
+
+; CHECK: %res4 = shl nuw nsw i8 %x1, %x1
+ %res4 = shl nuw nsw i8 %x1, %x1
+
+ ret void
+}
+
+define void @lshr(i8 %x1){
+entry:
+; CHECK: %res1 = lshr i8 %x1, %x1
+ %res1 = lshr i8 %x1, %x1
+
+; CHECK: %res2 = lshr exact i8 %x1, %x1
+ %res2 = lshr exact i8 %x1, %x1
+
+ ret void
+}
+
+define void @ashr(i8 %x1){
+entry:
+; CHECK: %res1 = ashr i8 %x1, %x1
+ %res1 = ashr i8 %x1, %x1
+
+; CHECK-NEXT: %res2 = ashr exact i8 %x1, %x1
+ %res2 = ashr exact i8 %x1, %x1
+
+ ret void
+}
+
+define void @and(i8 %x1){
+entry:
+; CHECK: %res1 = and i8 %x1, %x1
+ %res1 = and i8 %x1, %x1
+
+ ret void
+}
+
+define void @or(i8 %x1){
+entry:
+; CHECK: %res1 = or i8 %x1, %x1
+ %res1 = or i8 %x1, %x1
+
+ ret void
+}
+
+define void @xor(i8 %x1){
+entry:
+; CHECK: %res1 = xor i8 %x1, %x1
+ %res1 = xor i8 %x1, %x1
+
+ ret void
+} \ No newline at end of file
diff --git a/test/Bitcode/bitwiseInstructions.3.2.ll.bc b/test/Bitcode/bitwiseInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..136a7c9a0a2d
--- /dev/null
+++ b/test/Bitcode/bitwiseInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/calling-conventions.3.2.ll b/test/Bitcode/calling-conventions.3.2.ll
new file mode 100644
index 000000000000..aca9efd0892b
--- /dev/null
+++ b/test/Bitcode/calling-conventions.3.2.ll
@@ -0,0 +1,150 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; calling-conventions.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not silently misread calling conventions of
+; older bitcode files.
+
+declare ccc void @ccc()
+; CHECK: declare void @ccc
+
+declare fastcc void @fastcc()
+; CHECK: declare fastcc void @fastcc
+
+declare coldcc void @coldcc()
+; CHECK: declare coldcc void @coldcc
+
+declare cc10 void @cc10()
+; CHECK: declare cc10 void @cc10
+
+declare spir_kernel void @spir_kernel()
+; CHECK: declare spir_kernel void @spir_kernel
+
+declare spir_func void @spir_func()
+; CHECK: declare spir_func void @spir_func
+
+declare intel_ocl_bicc void @intel_ocl_bicc()
+; CHECK: declare intel_ocl_bicc void @intel_ocl_bicc
+
+declare x86_stdcallcc void @x86_stdcallcc()
+; CHECK: declare x86_stdcallcc void @x86_stdcallcc
+
+declare x86_fastcallcc void @x86_fastcallcc()
+; CHECK: declare x86_fastcallcc void @x86_fastcallcc
+
+declare x86_thiscallcc void @x86_thiscallcc()
+; CHECK: declare x86_thiscallcc void @x86_thiscallcc
+
+declare arm_apcscc void @arm_apcscc()
+; CHECK: declare arm_apcscc void @arm_apcscc
+
+declare arm_aapcscc void @arm_aapcscc()
+; CHECK: declare arm_aapcscc void @arm_aapcscc
+
+declare arm_aapcs_vfpcc void @arm_aapcs_vfpcc()
+; CHECK: declare arm_aapcs_vfpcc void @arm_aapcs_vfpcc
+
+declare msp430_intrcc void @msp430_intrcc()
+; CHECK: declare msp430_intrcc void @msp430_intrcc
+
+declare ptx_kernel void @ptx_kernel()
+; CHECK: declare ptx_kernel void @ptx_kernel
+
+declare ptx_device void @ptx_device()
+; CHECK: declare ptx_device void @ptx_device
+
+define void @call_ccc() {
+; CHECK: call void @ccc
+ call ccc void @ccc()
+ ret void
+}
+
+define void @call_fastcc() {
+; CHECK: call fastcc void @fastcc
+ call fastcc void @fastcc()
+ ret void
+}
+
+define void @call_coldcc() {
+; CHECK: call coldcc void @coldcc
+ call coldcc void @coldcc()
+ ret void
+}
+
+define void @call_cc10 () {
+; CHECK: call cc10 void @cc10
+ call cc10 void @cc10 ()
+ ret void
+}
+
+define void @call_spir_kernel() {
+; CHECK: call spir_kernel void @spir_kernel
+ call spir_kernel void @spir_kernel()
+ ret void
+}
+
+define void @call_spir_func() {
+; CHECK: call spir_func void @spir_func
+ call spir_func void @spir_func()
+ ret void
+}
+
+define void @call_intel_ocl_bicc() {
+; CHECK: call intel_ocl_bicc void @intel_ocl_bicc
+ call intel_ocl_bicc void @intel_ocl_bicc()
+ ret void
+}
+
+define void @call_x86_stdcallcc() {
+; CHECK: call x86_stdcallcc void @x86_stdcallcc
+ call x86_stdcallcc void @x86_stdcallcc()
+ ret void
+}
+
+define void @call_x86_fastcallcc() {
+; CHECK: call x86_fastcallcc void @x86_fastcallcc
+ call x86_fastcallcc void @x86_fastcallcc()
+ ret void
+}
+
+define void @call_x86_thiscallcc() {
+; CHECK: call x86_thiscallcc void @x86_thiscallcc
+ call x86_thiscallcc void @x86_thiscallcc()
+ ret void
+}
+
+define void @call_arm_apcscc() {
+; CHECK: call arm_apcscc void @arm_apcscc
+ call arm_apcscc void @arm_apcscc()
+ ret void
+}
+
+define void @call_arm_aapcscc() {
+; CHECK: call arm_aapcscc void @arm_aapcscc
+ call arm_aapcscc void @arm_aapcscc()
+ ret void
+}
+
+define void @call_arm_aapcs_vfpcc() {
+; CHECK: call arm_aapcs_vfpcc void @arm_aapcs_vfpcc
+ call arm_aapcs_vfpcc void @arm_aapcs_vfpcc()
+ ret void
+}
+
+define void @call_msp430_intrcc() {
+; CHECK: call msp430_intrcc void @msp430_intrcc
+ call msp430_intrcc void @msp430_intrcc()
+ ret void
+}
+
+define void @call_ptx_kernel() {
+; CHECK: call ptx_kernel void @ptx_kernel
+ call ptx_kernel void @ptx_kernel()
+ ret void
+}
+
+define void @call_ptx_device() {
+; CHECK: call ptx_device void @ptx_device
+ call ptx_device void @ptx_device()
+ ret void
+}
+
diff --git a/test/Bitcode/calling-conventions.3.2.ll.bc b/test/Bitcode/calling-conventions.3.2.ll.bc
new file mode 100644
index 000000000000..b3fad967db0e
--- /dev/null
+++ b/test/Bitcode/calling-conventions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/cmpxchg-upgrade.ll b/test/Bitcode/cmpxchg-upgrade.ll
new file mode 100644
index 000000000000..d36ac1c17909
--- /dev/null
+++ b/test/Bitcode/cmpxchg-upgrade.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-dis < %s.bc | FileCheck %s
+
+; cmpxchg-upgrade.ll.bc was produced by running a version of llvm-as from just
+; before the IR change on this file.
+
+define void @test(i32* %addr) {
+ cmpxchg i32* %addr, i32 42, i32 0 monotonic
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 monotonic monotonic
+
+ cmpxchg i32* %addr, i32 42, i32 0 acquire
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acquire acquire
+
+ cmpxchg i32* %addr, i32 42, i32 0 release
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 release monotonic
+
+ cmpxchg i32* %addr, i32 42, i32 0 acq_rel
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acq_rel acquire
+
+ cmpxchg i32* %addr, i32 42, i32 0 seq_cst
+; CHECK: cmpxchg i32* %addr, i32 42, i32 0 seq_cst seq_cst
+
+ ret void
+} \ No newline at end of file
diff --git a/test/Bitcode/cmpxchg-upgrade.ll.bc b/test/Bitcode/cmpxchg-upgrade.ll.bc
new file mode 100644
index 000000000000..922f2eb84edf
--- /dev/null
+++ b/test/Bitcode/cmpxchg-upgrade.ll.bc
Binary files differ
diff --git a/test/Bitcode/conversionInstructions.3.2.ll b/test/Bitcode/conversionInstructions.3.2.ll
new file mode 100644
index 000000000000..4b3f27386eff
--- /dev/null
+++ b/test/Bitcode/conversionInstructions.3.2.ll
@@ -0,0 +1,104 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; conversionOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread conversion instructions from
+; older bitcode files.
+
+define void @trunc(i32 %src){
+entry:
+; CHECK: %res1 = trunc i32 %src to i8
+ %res1 = trunc i32 %src to i8
+
+ ret void
+}
+
+define void @zext(i32 %src){
+entry:
+; CHECK: %res1 = zext i32 %src to i64
+ %res1 = zext i32 %src to i64
+
+ ret void
+}
+
+define void @sext(i32 %src){
+entry:
+; CHECK: %res1 = sext i32 %src to i64
+ %res1 = sext i32 %src to i64
+
+ ret void
+}
+
+define void @fptrunc(double %src){
+entry:
+; CHECK: %res1 = fptrunc double %src to float
+ %res1 = fptrunc double %src to float
+
+ ret void
+}
+
+define void @fpext(float %src){
+entry:
+; CHECK: %res1 = fpext float %src to double
+ %res1 = fpext float %src to double
+
+ ret void
+}
+
+define void @fptoui(float %src){
+entry:
+; CHECK: %res1 = fptoui float %src to i32
+ %res1 = fptoui float %src to i32
+
+ ret void
+}
+
+define void @fptosi(float %src){
+entry:
+; CHECK: %res1 = fptosi float %src to i32
+ %res1 = fptosi float %src to i32
+
+ ret void
+}
+
+define void @uitofp(i32 %src){
+entry:
+; CHECK: %res1 = uitofp i32 %src to float
+ %res1 = uitofp i32 %src to float
+
+ ret void
+}
+
+define void @sitofp(i32 %src){
+entry:
+; CHECK: %res1 = sitofp i32 %src to float
+ %res1 = sitofp i32 %src to float
+
+ ret void
+}
+
+define void @ptrtoint(i32* %src){
+entry:
+; CHECK: %res1 = ptrtoint i32* %src to i8
+ %res1 = ptrtoint i32* %src to i8
+
+ ret void
+}
+
+define void @inttoptr(i32 %src){
+entry:
+; CHECK: %res1 = inttoptr i32 %src to i32*
+ %res1 = inttoptr i32 %src to i32*
+
+ ret void
+}
+
+define void @bitcast(i32 %src1, i32* %src2){
+entry:
+; CHECK: %res1 = bitcast i32 %src1 to i32
+ %res1 = bitcast i32 %src1 to i32
+
+; CHECK: %res2 = bitcast i32* %src2 to i64*
+ %res2 = bitcast i32* %src2 to i64*
+
+ ret void
+} \ No newline at end of file
diff --git a/test/Bitcode/conversionInstructions.3.2.ll.bc b/test/Bitcode/conversionInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..fabf7dab6fb8
--- /dev/null
+++ b/test/Bitcode/conversionInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/deprecated-linker_private-linker_private_weak.ll b/test/Bitcode/deprecated-linker_private-linker_private_weak.ll
new file mode 100644
index 000000000000..12a527c7738f
--- /dev/null
+++ b/test/Bitcode/deprecated-linker_private-linker_private_weak.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-as -o - %s | llvm-dis | FileCheck %s
+; RUN: llvm-as -o /dev/null %s 2>&1 | FileCheck %s -check-prefix CHECK-WARNINGS
+
+@.linker_private = linker_private unnamed_addr constant [15 x i8] c"linker_private\00", align 64
+@.linker_private_weak = linker_private_weak unnamed_addr constant [20 x i8] c"linker_private_weak\00", align 64
+
+; CHECK: @.linker_private = private unnamed_addr constant [15 x i8] c"linker_private\00", align 64
+; CHECK: @.linker_private_weak = private unnamed_addr constant [20 x i8] c"linker_private_weak\00", align 64
+
+; CHECK-WARNINGS: warning: '.linker_private' is deprecated, treating as PrivateLinkage
+; CHECK-WARNINGS: @.linker_private = linker_private unnamed_addr constant [15 x i8] c"linker_private\00", align 64
+; CHECK-WARNINGS: ^
+
+; CHECK-WARNINGS: warning: '.linker_private_weak' is deprecated, treating as PrivateLinkage
+; CHECK-WARNINGS: @.linker_private_weak = linker_private_weak unnamed_addr constant [20 x i8] c"linker_private_weak\00", align 64
+; CHECK-WARNINGS: ^
+
diff --git a/test/Bitcode/drop-debug-info.ll b/test/Bitcode/drop-debug-info.ll
index da4ae0c541eb..5123018577ee 100644
--- a/test/Bitcode/drop-debug-info.ll
+++ b/test/Bitcode/drop-debug-info.ll
@@ -1,4 +1,5 @@
-; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+; RUN: llvm-as < %s -o %t.bc 2>&1 >/dev/null | FileCheck -check-prefix=WARN %s
+; RUN: llvm-dis < %t.bc | FileCheck %s
define i32 @main() {
entry:
@@ -22,5 +23,6 @@ entry:
!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
!12 = metadata !{i32 4, i32 0, metadata !4, null}
+; WARN: warning: ignoring debug info with an invalid version (0)
; CHECK-NOT: !dbg
; CHECK-NOT: !llvm.dbg.cu
diff --git a/test/Bitcode/global-variables.3.2.ll b/test/Bitcode/global-variables.3.2.ll
new file mode 100644
index 000000000000..549d025549be
--- /dev/null
+++ b/test/Bitcode/global-variables.3.2.ll
@@ -0,0 +1,41 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; global-variables.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not silently misread global variables attributes of
+; older bitcode files.
+
+@global.var = global i32 1
+; CHECK: @global.var = global i32 1
+
+@constant.var = constant i32 1
+; CHECK: @constant.var = constant i32 1
+
+@noinit.var = global float undef
+; CHECK: @noinit.var = global float undef
+
+@section.var = global i32 1, section "foo"
+; CHECK: @section.var = global i32 1, section "foo"
+
+@align.var = global i64 undef, align 8
+; CHECK: @align.var = global i64 undef, align 8
+
+@unnamed_addr.var = unnamed_addr global i8 1
+; CHECK: @unnamed_addr.var = unnamed_addr global i8 1
+
+@default_addrspace.var = addrspace(0) global i8 1
+; CHECK: @default_addrspace.var = global i8 1
+
+@non_default_addrspace.var = addrspace(1) global i8* undef
+; CHECK: @non_default_addrspace.var = addrspace(1) global i8* undef
+
+@initialexec.var = thread_local(initialexec) global i32 0, align 4
+; CHECK: @initialexec.var = thread_local(initialexec) global i32 0, align 4
+
+@localdynamic.var = thread_local(localdynamic) constant i32 0, align 4
+; CHECK: @localdynamic.var = thread_local(localdynamic) constant i32 0, align 4
+
+@localexec.var = thread_local(localexec) constant i32 0, align 4
+; CHECK: @localexec.var = thread_local(localexec) constant i32 0, align 4
+
+@string.var = private unnamed_addr constant [13 x i8] c"hello world\0A\00"
+; CHECK: @string.var = private unnamed_addr constant [13 x i8] c"hello world\0A\00"
diff --git a/test/Bitcode/global-variables.3.2.ll.bc b/test/Bitcode/global-variables.3.2.ll.bc
new file mode 100644
index 000000000000..c105f2fb1831
--- /dev/null
+++ b/test/Bitcode/global-variables.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/inalloca.ll b/test/Bitcode/inalloca.ll
new file mode 100644
index 000000000000..bad87a9b03f0
--- /dev/null
+++ b/test/Bitcode/inalloca.ll
@@ -0,0 +1,18 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+; inalloca should roundtrip.
+
+define void @foo(i32* inalloca %args) {
+ ret void
+}
+; CHECK-LABEL: define void @foo(i32* inalloca %args)
+
+define void @bar() {
+ ; Use the maximum alignment, since we stuff our bit with alignment.
+ %args = alloca inalloca i32, align 536870912
+ call void @foo(i32* inalloca %args)
+ ret void
+}
+; CHECK-LABEL: define void @bar() {
+; CHECK: %args = alloca inalloca i32, align 536870912
+; CHECK: call void @foo(i32* inalloca %args)
diff --git a/test/Bitcode/linkage-types-3.2.ll b/test/Bitcode/linkage-types-3.2.ll
new file mode 100644
index 000000000000..fd070efbd444
--- /dev/null
+++ b/test/Bitcode/linkage-types-3.2.ll
@@ -0,0 +1,128 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; linkage-types-3.2.ll.bc was generated by passing this file to llvm-as-3.2
+; The test checks that LLVM does not silently misread linkage types of
+; older bitcode files.
+
+@common.var = common global i32 0
+; CHECK: @common.var = common global i32 0
+
+@appending.var = appending global [8 x i32] undef
+; CHECK: @appending.var = appending global [8 x i32] undef
+
+@extern_weak.var = extern_weak global i32
+; CHECK: @extern_weak.var = extern_weak global i32
+
+@private.var = private constant i32 0
+; CHECK: @private.var = private constant i32 0
+
+@linker_private.var = linker_private constant i32 0
+; CHECK: @linker_private.var = private constant i32 0
+
+@linker_private_weak.var = linker_private_weak constant i32 0
+; CHECK: @linker_private_weak.var = private constant i32 0
+
+@linker_private_weak_def_auto.var = linker_private_weak_def_auto constant i32 0
+; CHECK: @linker_private_weak_def_auto.var = constant i32 0
+
+@internal.var = internal constant i32 0
+; CHECK: @internal.var = internal constant i32 0
+
+@available_externally.var = available_externally constant i32 0
+; CHECK: @available_externally.var = available_externally constant i32 0
+
+@linkonce.var = linkonce constant i32 0
+; CHECK: @linkonce.var = linkonce constant i32 0
+
+@weak.var = weak constant i32 0
+; CHECK: @weak.var = weak constant i32 0
+
+@linkonce_odr.var = linkonce_odr constant i32 0
+; CHECK: @linkonce_odr.var = linkonce_odr constant i32 0
+
+@linkonce_odr_auto_hide.var = linkonce_odr_auto_hide constant i32 0
+; CHECK: @linkonce_odr_auto_hide.var = constant i32 0
+
+@external.var = external constant i32
+; CHECK: @external.var = external constant i32
+
+@dllexport.var = dllexport global i32 0
+; CHECK: @dllexport.var = dllexport global i32 0
+
+@dllimport.var = dllimport global i32
+; CHECK: @dllimport.var = external dllimport global i32
+
+define private void @private()
+; CHECK: define private void @private
+{
+ ret void;
+}
+
+define linker_private void @linker_private()
+; CHECK: define private void @linker_private
+{
+ ret void;
+}
+
+define linker_private_weak void @linker_private_weak()
+; CHECK: define private void @linker_private_weak
+{
+ ret void;
+}
+
+define linker_private_weak_def_auto void @linker_private_weak_def_auto()
+; CHECK: define void @linker_private_weak_def_auto
+{
+ ret void;
+}
+
+define internal void @internal()
+; CHECK: define internal void @internal
+{
+ ret void;
+}
+
+define available_externally void @available_externally()
+; CHECK: define available_externally void @available_externally
+{
+ ret void;
+}
+
+define linkonce void @linkonce()
+; CHECK: define linkonce void @linkonce
+{
+ ret void;
+}
+
+define weak void @weak()
+; CHECK: define weak void @weak
+{
+ ret void;
+}
+
+define linkonce_odr void @linkonce_odr()
+; CHECK: define linkonce_odr void @linkonce_odr
+{
+ ret void;
+}
+
+define linkonce_odr_auto_hide void @linkonce_odr_auto_hide()
+; CHECK: define void @linkonce_odr_auto_hide
+{
+ ret void;
+}
+
+define external void @external()
+; CHECK: define void @external
+{
+ ret void;
+}
+
+declare dllimport void @dllimport()
+; CHECK: declare dllimport void @dllimport
+
+define dllexport void @dllexport()
+; CHECK: define dllexport void @dllexport()
+{
+ ret void;
+}
diff --git a/test/Bitcode/linkage-types-3.2.ll.bc b/test/Bitcode/linkage-types-3.2.ll.bc
new file mode 100644
index 000000000000..c856ddf7aa7b
--- /dev/null
+++ b/test/Bitcode/linkage-types-3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/local-linkage-default-visibility.3.4.ll b/test/Bitcode/local-linkage-default-visibility.3.4.ll
new file mode 100644
index 000000000000..45a7b1213a8c
--- /dev/null
+++ b/test/Bitcode/local-linkage-default-visibility.3.4.ll
@@ -0,0 +1,79 @@
+; RUN: llvm-dis < %s.bc | FileCheck %s
+
+; local-linkage-default-visibility.3.4.ll.bc was generated by passing this file
+; to llvm-as-3.4. The test checks that LLVM upgrades visibility of symbols
+; with local linkage to default visibility.
+
+@default.internal.var = internal global i32 0
+; CHECK: @default.internal.var = internal global i32 0
+
+@hidden.internal.var = internal hidden global i32 0
+; CHECK: @hidden.internal.var = internal global i32 0
+
+@protected.internal.var = internal protected global i32 0
+; CHECK: @protected.internal.var = internal global i32 0
+
+@default.private.var = private global i32 0
+; CHECK: @default.private.var = private global i32 0
+
+@hidden.private.var = private hidden global i32 0
+; CHECK: @hidden.private.var = private global i32 0
+
+@protected.private.var = private protected global i32 0
+; CHECK: @protected.private.var = private global i32 0
+
+@global = global i32 0
+
+@default.internal.alias = alias internal i32* @global
+; CHECK: @default.internal.alias = alias internal i32* @global
+
+@hidden.internal.alias = hidden alias internal i32* @global
+; CHECK: @hidden.internal.alias = alias internal i32* @global
+
+@protected.internal.alias = protected alias internal i32* @global
+; CHECK: @protected.internal.alias = alias internal i32* @global
+
+@default.private.alias = alias private i32* @global
+; CHECK: @default.private.alias = alias private i32* @global
+
+@hidden.private.alias = hidden alias private i32* @global
+; CHECK: @hidden.private.alias = alias private i32* @global
+
+@protected.private.alias = protected alias private i32* @global
+; CHECK: @protected.private.alias = alias private i32* @global
+
+define internal void @default.internal() {
+; CHECK: define internal void @default.internal
+entry:
+ ret void
+}
+
+define internal hidden void @hidden.internal() {
+; CHECK: define internal void @hidden.internal
+entry:
+ ret void
+}
+
+define internal protected void @protected.internal() {
+; CHECK: define internal void @protected.internal
+entry:
+ ret void
+}
+
+define private void @default.private() {
+; CHECK: define private void @default.private
+entry:
+ ret void
+}
+
+define private hidden void @hidden.private() {
+; CHECK: define private void @hidden.private
+entry:
+ ret void
+}
+
+define private protected void @protected.private() {
+; CHECK: define private void @protected.private
+entry:
+ ret void
+}
diff --git a/test/Bitcode/local-linkage-default-visibility.3.4.ll.bc b/test/Bitcode/local-linkage-default-visibility.3.4.ll.bc
new file mode 100644
index 000000000000..6e49f7e365b7
--- /dev/null
+++ b/test/Bitcode/local-linkage-default-visibility.3.4.ll.bc
Binary files differ
diff --git a/test/Bitcode/memInstructions.3.2.ll b/test/Bitcode/memInstructions.3.2.ll
new file mode 100644
index 000000000000..e4cb6bdbe96b
--- /dev/null
+++ b/test/Bitcode/memInstructions.3.2.ll
@@ -0,0 +1,328 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; memOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread memory related instructions of
+; older bitcode files.
+
+define void @alloca(){
+entry:
+; CHECK: %res1 = alloca i8
+ %res1 = alloca i8
+
+; CHECK-NEXT: %res2 = alloca i8, i32 2
+ %res2 = alloca i8, i32 2
+
+; CHECK-NEXT: %res3 = alloca i8, i32 2, align 4
+ %res3 = alloca i8, i32 2, align 4
+
+; CHECK-NEXT: %res4 = alloca i8, align 4
+ %res4 = alloca i8, align 4
+
+ ret void
+}
+
+define void @load(){
+entry:
+ %ptr1 = alloca i8
+ store i8 2, i8* %ptr1
+
+; CHECK: %res1 = load i8* %ptr1
+ %res1 = load i8* %ptr1
+
+; CHECK-NEXT: %res2 = load volatile i8* %ptr1
+ %res2 = load volatile i8* %ptr1
+
+; CHECK-NEXT: %res3 = load i8* %ptr1, align 1
+ %res3 = load i8* %ptr1, align 1
+
+; CHECK-NEXT: %res4 = load volatile i8* %ptr1, align 1
+ %res4 = load volatile i8* %ptr1, align 1
+
+; CHECK-NEXT: %res5 = load i8* %ptr1, !nontemporal !0
+ %res5 = load i8* %ptr1, !nontemporal !0
+
+; CHECK-NEXT: %res6 = load volatile i8* %ptr1, !nontemporal !0
+ %res6 = load volatile i8* %ptr1, !nontemporal !0
+
+; CHECK-NEXT: %res7 = load i8* %ptr1, align 1, !nontemporal !0
+ %res7 = load i8* %ptr1, align 1, !nontemporal !0
+
+; CHECK-NEXT: %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0
+ %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0
+
+; CHECK-NEXT: %res9 = load i8* %ptr1, !invariant.load !1
+ %res9 = load i8* %ptr1, !invariant.load !1
+
+; CHECK-NEXT: %res10 = load volatile i8* %ptr1, !invariant.load !1
+ %res10 = load volatile i8* %ptr1, !invariant.load !1
+
+; CHECK-NEXT: %res11 = load i8* %ptr1, align 1, !invariant.load !1
+ %res11 = load i8* %ptr1, align 1, !invariant.load !1
+
+; CHECK-NEXT: %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1
+ %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1
+
+; CHECK-NEXT: %res13 = load i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+ %res13 = load i8* %ptr1, !nontemporal !0, !invariant.load !1
+
+; CHECK-NEXT: %res14 = load volatile i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+ %res14 = load volatile i8* %ptr1, !nontemporal !0, !invariant.load !1
+
+; CHECK-NEXT: %res15 = load i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+ %res15 = load i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
+
+; CHECK-NEXT: %res16 = load volatile i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+ %res16 = load volatile i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
+
+ ret void
+}
+
+define void @loadAtomic(){
+entry:
+ %ptr1 = alloca i8
+ store i8 2, i8* %ptr1
+
+; CHECK: %res1 = load atomic i8* %ptr1 unordered, align 1
+ %res1 = load atomic i8* %ptr1 unordered, align 1
+
+; CHECK-NEXT: %res2 = load atomic i8* %ptr1 monotonic, align 1
+ %res2 = load atomic i8* %ptr1 monotonic, align 1
+
+; CHECK-NEXT: %res3 = load atomic i8* %ptr1 acquire, align 1
+ %res3 = load atomic i8* %ptr1 acquire, align 1
+
+; CHECK-NEXT: %res4 = load atomic i8* %ptr1 seq_cst, align 1
+ %res4 = load atomic i8* %ptr1 seq_cst, align 1
+
+; CHECK-NEXT: %res5 = load atomic volatile i8* %ptr1 unordered, align 1
+ %res5 = load atomic volatile i8* %ptr1 unordered, align 1
+
+; CHECK-NEXT: %res6 = load atomic volatile i8* %ptr1 monotonic, align 1
+ %res6 = load atomic volatile i8* %ptr1 monotonic, align 1
+
+; CHECK-NEXT: %res7 = load atomic volatile i8* %ptr1 acquire, align 1
+ %res7 = load atomic volatile i8* %ptr1 acquire, align 1
+
+; CHECK-NEXT: %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1
+ %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1
+
+; CHECK-NEXT: %res9 = load atomic i8* %ptr1 singlethread unordered, align 1
+ %res9 = load atomic i8* %ptr1 singlethread unordered, align 1
+
+; CHECK-NEXT: %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1
+ %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1
+
+; CHECK-NEXT: %res11 = load atomic i8* %ptr1 singlethread acquire, align 1
+ %res11 = load atomic i8* %ptr1 singlethread acquire, align 1
+
+; CHECK-NEXT: %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1
+ %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1
+
+; CHECK-NEXT: %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1
+ %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1
+
+; CHECK-NEXT: %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1
+ %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1
+
+; CHECK-NEXT: %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1
+ %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1
+
+; CHECK-NEXT: %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1
+ %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1
+
+ ret void
+}
+
+define void @store(){
+entry:
+ %ptr1 = alloca i8
+
+; CHECK: store i8 2, i8* %ptr1
+ store i8 2, i8* %ptr1
+
+; CHECK-NEXT: store volatile i8 2, i8* %ptr1
+ store volatile i8 2, i8* %ptr1
+
+; CHECK-NEXT: store i8 2, i8* %ptr1, align 1
+ store i8 2, i8* %ptr1, align 1
+
+; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1
+ store volatile i8 2, i8* %ptr1, align 1
+
+; CHECK-NEXT: store i8 2, i8* %ptr1, !nontemporal !0
+ store i8 2, i8* %ptr1, !nontemporal !0
+
+; CHECK-NEXT: store volatile i8 2, i8* %ptr1, !nontemporal !0
+ store volatile i8 2, i8* %ptr1, !nontemporal !0
+
+; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0
+ store i8 2, i8* %ptr1, align 1, !nontemporal !0
+
+; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
+ store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
+
+ ret void
+}
+
+define void @storeAtomic(){
+entry:
+ %ptr1 = alloca i8
+
+; CHECK: store atomic i8 2, i8* %ptr1 unordered, align 1
+ store atomic i8 2, i8* %ptr1 unordered, align 1
+
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 monotonic, align 1
+ store atomic i8 2, i8* %ptr1 monotonic, align 1
+
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 release, align 1
+ store atomic i8 2, i8* %ptr1 release, align 1
+
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 seq_cst, align 1
+ store atomic i8 2, i8* %ptr1 seq_cst, align 1
+
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 unordered, align 1
+ store atomic volatile i8 2, i8* %ptr1 unordered, align 1
+
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
+ store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
+
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 release, align 1
+ store atomic volatile i8 2, i8* %ptr1 release, align 1
+
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
+ store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
+
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
+ store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
+
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
+ store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
+
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread release, align 1
+ store atomic i8 2, i8* %ptr1 singlethread release, align 1
+
+; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
+ store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
+
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
+ store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
+
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
+ store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
+
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
+ store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
+
+; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
+ store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
+
+ ret void
+}
+
+define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){
+entry:
+ ;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
+
+; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+; CHECK-NEXT: %res1 = extractvalue { i32, i1 } [[TMP]], 0
+ %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+; CHECK-NEXT: %res2 = extractvalue { i32, i1 } [[TMP]], 0
+ %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+; CHECK-NEXT: %res3 = extractvalue { i32, i1 } [[TMP]], 0
+ %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+; CHECK-NEXT: %res4 = extractvalue { i32, i1 } [[TMP]], 0
+ %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
+
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
+; CHECK-NEXT: %res5 = extractvalue { i32, i1 } [[TMP]], 0
+ %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
+; CHECK-NEXT: %res6 = extractvalue { i32, i1 } [[TMP]], 0
+ %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+; CHECK-NEXT: %res7 = extractvalue { i32, i1 } [[TMP]], 0
+ %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+; CHECK-NEXT: %res8 = extractvalue { i32, i1 } [[TMP]], 0
+ %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
+
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
+; CHECK-NEXT: %res9 = extractvalue { i32, i1 } [[TMP]], 0
+ %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
+; CHECK-NEXT: %res10 = extractvalue { i32, i1 } [[TMP]], 0
+ %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+; CHECK-NEXT: %res11 = extractvalue { i32, i1 } [[TMP]], 0
+ %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+; CHECK-NEXT: %res12 = extractvalue { i32, i1 } [[TMP]], 0
+ %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
+
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+; CHECK-NEXT: %res13 = extractvalue { i32, i1 } [[TMP]], 0
+ %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+; CHECK-NEXT: %res14 = extractvalue { i32, i1 } [[TMP]], 0
+ %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+; CHECK-NEXT: %res15 = extractvalue { i32, i1 } [[TMP]], 0
+ %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+; CHECK-NEXT: %res16 = extractvalue { i32, i1 } [[TMP]], 0
+ %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
+
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+; CHECK-NEXT: %res17 = extractvalue { i32, i1 } [[TMP]], 0
+ %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+; CHECK-NEXT: %res18 = extractvalue { i32, i1 } [[TMP]], 0
+ %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+; CHECK-NEXT: %res19 = extractvalue { i32, i1 } [[TMP]], 0
+ %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+; CHECK-NEXT: %res20 = extractvalue { i32, i1 } [[TMP]], 0
+ %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
+
+ ret void
+}
+
+define void @getelementptr({i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){
+entry:
+; CHECK: %res1 = getelementptr { i8, i8 }* %s, i32 1, i32 1
+ %res1 = getelementptr {i8, i8}* %s, i32 1, i32 1
+
+; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }* %s, i32 1, i32 1
+ %res2 = getelementptr inbounds {i8, i8}* %s, i32 1, i32 1
+
+; CHECK-NEXT: %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets
+ %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets
+
+ ret void
+}
+
+!0 = metadata !{ i32 1 }
+!1 = metadata !{} \ No newline at end of file
diff --git a/test/Bitcode/memInstructions.3.2.ll.bc b/test/Bitcode/memInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..d75954a301b6
--- /dev/null
+++ b/test/Bitcode/memInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/miscInstructions.3.2.ll b/test/Bitcode/miscInstructions.3.2.ll
new file mode 100644
index 000000000000..bceae20109c3
--- /dev/null
+++ b/test/Bitcode/miscInstructions.3.2.ll
@@ -0,0 +1,126 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; miscInstructions.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread miscellaneous instructions of
+; older bitcode files.
+
+define void @icmp(i32 %x1, i32 %x2, i32* %ptr1, i32* %ptr2, <2 x i32> %vec1, <2 x i32> %vec2){
+entry:
+; CHECK: %res1 = icmp eq i32 %x1, %x2
+ %res1 = icmp eq i32 %x1, %x2
+
+; CHECK-NEXT: %res2 = icmp ne i32 %x1, %x2
+ %res2 = icmp ne i32 %x1, %x2
+
+; CHECK-NEXT: %res3 = icmp ugt i32 %x1, %x2
+ %res3 = icmp ugt i32 %x1, %x2
+
+; CHECK-NEXT: %res4 = icmp uge i32 %x1, %x2
+ %res4 = icmp uge i32 %x1, %x2
+
+; CHECK-NEXT: %res5 = icmp ult i32 %x1, %x2
+ %res5 = icmp ult i32 %x1, %x2
+
+; CHECK-NEXT: %res6 = icmp ule i32 %x1, %x2
+ %res6 = icmp ule i32 %x1, %x2
+
+; CHECK-NEXT: %res7 = icmp sgt i32 %x1, %x2
+ %res7 = icmp sgt i32 %x1, %x2
+
+; CHECK-NEXT: %res8 = icmp sge i32 %x1, %x2
+ %res8 = icmp sge i32 %x1, %x2
+
+; CHECK-NEXT: %res9 = icmp slt i32 %x1, %x2
+ %res9 = icmp slt i32 %x1, %x2
+
+; CHECK-NEXT: %res10 = icmp sle i32 %x1, %x2
+ %res10 = icmp sle i32 %x1, %x2
+
+; CHECK-NEXT: %res11 = icmp eq i32* %ptr1, %ptr2
+ %res11 = icmp eq i32* %ptr1, %ptr2
+
+; CHECK-NEXT: %res12 = icmp eq <2 x i32> %vec1, %vec2
+ %res12 = icmp eq <2 x i32> %vec1, %vec2
+
+ ret void
+}
+
+
+define void @fcmp(float %x1, float %x2, <2 x float> %vec1, <2 x float> %vec2){
+entry:
+; CHECK: %res1 = fcmp oeq float %x1, %x2
+ %res1 = fcmp oeq float %x1, %x2
+
+; CHECK-NEXT: %res2 = fcmp one float %x1, %x2
+ %res2 = fcmp one float %x1, %x2
+
+; CHECK-NEXT: %res3 = fcmp ugt float %x1, %x2
+ %res3 = fcmp ugt float %x1, %x2
+
+; CHECK-NEXT: %res4 = fcmp uge float %x1, %x2
+ %res4 = fcmp uge float %x1, %x2
+
+; CHECK-NEXT: %res5 = fcmp ult float %x1, %x2
+ %res5 = fcmp ult float %x1, %x2
+
+; CHECK-NEXT: %res6 = fcmp ule float %x1, %x2
+ %res6 = fcmp ule float %x1, %x2
+
+; CHECK-NEXT: %res7 = fcmp ogt float %x1, %x2
+ %res7 = fcmp ogt float %x1, %x2
+
+; CHECK-NEXT: %res8 = fcmp oge float %x1, %x2
+ %res8 = fcmp oge float %x1, %x2
+
+; CHECK-NEXT: %res9 = fcmp olt float %x1, %x2
+ %res9 = fcmp olt float %x1, %x2
+
+; CHECK-NEXT: %res10 = fcmp ole float %x1, %x2
+ %res10 = fcmp ole float %x1, %x2
+
+; CHECK-NEXT: %res11 = fcmp ord float %x1, %x2
+ %res11 = fcmp ord float %x1, %x2
+
+; CHECK-NEXT: %res12 = fcmp ueq float %x1, %x2
+ %res12 = fcmp ueq float %x1, %x2
+
+; CHECK-NEXT: %res13 = fcmp une float %x1, %x2
+ %res13 = fcmp une float %x1, %x2
+
+; CHECK-NEXT: %res14 = fcmp uno float %x1, %x2
+ %res14 = fcmp uno float %x1, %x2
+
+; CHECK-NEXT: %res15 = fcmp true float %x1, %x2
+ %res15 = fcmp true float %x1, %x2
+
+; CHECK-NEXT: %res16 = fcmp false float %x1, %x2
+ %res16 = fcmp false float %x1, %x2
+
+; CHECK-NEXT: %res17 = fcmp oeq <2 x float> %vec1, %vec2
+ %res17 = fcmp oeq <2 x float> %vec1, %vec2
+
+ ret void
+}
+
+declare i32 @printf(i8* noalias nocapture, ...)
+
+define void @call(i32 %x, i8* %msg ){
+entry:
+
+; CHECK: %res1 = call i32 @test(i32 %x)
+ %res1 = call i32 @test(i32 %x)
+
+; CHECK-NEXT: %res2 = tail call i32 @test(i32 %x)
+ %res2 = tail call i32 @test(i32 %x)
+
+; CHECK-NEXT: %res3 = call i32 (i8*, ...)* @printf(i8* %msg, i32 12, i8 42)
+ %res3 = call i32 (i8*, ...)* @printf(i8* %msg, i32 12, i8 42)
+
+ ret void
+}
+
+define i32 @test(i32 %x){
+entry:
+
+ ret i32 %x
+}
diff --git a/test/Bitcode/miscInstructions.3.2.ll.bc b/test/Bitcode/miscInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..9d479b506171
--- /dev/null
+++ b/test/Bitcode/miscInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/old-aliases.ll b/test/Bitcode/old-aliases.ll
new file mode 100644
index 000000000000..7a0eea2f3f24
--- /dev/null
+++ b/test/Bitcode/old-aliases.ll
@@ -0,0 +1,22 @@
+; RUN: llvm-dis < %s.bc | FileCheck %s
+
+; old-aliases.bc consist of this file assembled with an old llvm-as (3.5 trunk)
+; from when aliases contained a ConstantExpr.
+
+@v1 = global i32 0
+; CHECK: @v1 = global i32 0
+
+@v2 = global [1 x i32] zeroinitializer
+; CHECK: @v2 = global [1 x i32] zeroinitializer
+
+@v3 = alias bitcast (i32* @v1 to i16*)
+; CHECK: @v3 = alias bitcast (i32* @v1 to i16*)
+
+@v4 = alias getelementptr ([1 x i32]* @v2, i32 0, i32 0)
+; CHECK: @v4 = alias getelementptr inbounds ([1 x i32]* @v2, i32 0, i32 0)
+
+@v5 = alias i32 addrspace(2)* addrspacecast (i32 addrspace(0)* @v1 to i32 addrspace(2)*)
+; CHECK: @v5 = alias addrspacecast (i32* @v1 to i32 addrspace(2)*)
+
+@v6 = alias i16* @v3
+; CHECK: @v6 = alias i16* @v3
diff --git a/test/Bitcode/old-aliases.ll.bc b/test/Bitcode/old-aliases.ll.bc
new file mode 100644
index 000000000000..1f157b2a0452
--- /dev/null
+++ b/test/Bitcode/old-aliases.ll.bc
Binary files differ
diff --git a/test/Bitcode/pr18704.ll b/test/Bitcode/pr18704.ll
new file mode 100644
index 000000000000..f05fe53259fa
--- /dev/null
+++ b/test/Bitcode/pr18704.ll
@@ -0,0 +1,158 @@
+; RUN: not llvm-dis < %s.bc 2>&1 | FileCheck %s
+
+; CHECK: llvm-dis{{(\.EXE|\.exe)?}}: Never resolved value found in function
+
+; pr18704.ll.bc has an instruction referring to invalid type.
+; The test checks that LLVM reports the error and doesn't access freed memory
+; in doing so.
+
+;<MODULE_BLOCK NumWords=217 BlockCodeSize=3>
+; <VERSION op0=1/>
+; <BLOCKINFO_BLOCK/>
+; <TYPE_BLOCK_ID NumWords=23 BlockCodeSize=4>
+; <NUMENTRY op0=25/>
+; <INTEGER op0=8/>
+; <POINTER abbrevid=4 op0=0 op1=0/>
+; <POINTER abbrevid=4 op0=1 op1=0/>
+; <ARRAY abbrevid=9 op0=6 op1=0/>
+; <POINTER abbrevid=4 op0=3 op1=0/>
+; <ARRAY abbrevid=9 op0=10 op1=0/>
+; <POINTER abbrevid=4 op0=5 op1=0/>
+; <ARRAY abbrevid=9 op0=4 op1=0/>
+; <POINTER abbrevid=4 op0=7 op1=0/>
+; <ARRAY abbrevid=9 op0=5 op1=0/>
+; <POINTER abbrevid=4 op0=9 op1=0/>
+; <STRUCT_NAME abbrevid=7 op0=115 op1=116 op2=114 op3=117 op4=99 op5=116 op6=46 op7=112 op8=97 op9=105 op10=114 op11=46 op12=48/>
+; <STRUCT_NAMED abbrevid=8 op0=0 op1=1 op2=1/>
+; <ARRAY abbrevid=9 op0=2 op1=11/>
+; <POINTER abbrevid=4 op0=12 op1=0/>
+; <FUNCTION abbrevid=5 op0=0 op1=1 op2=1 op3=1/>
+; <POINTER abbrevid=4 op0=14 op1=0/>
+; <FUNCTION abbrevid=5 op0=0 op1=1 op2=1/>
+; <POINTER abbrevid=4 op0=16 op1=0/>
+; <INTEGER op0=64/>
+; <FUNCTION abbrevid=5 op0=0 op1=1 op2=18/>
+; <POINTER abbrevid=4 op0=19 op1=0/>
+; <INTEGER op0=32/>
+; <FUNCTION abbrevid=5 op0=0 op1=21/>
+; <POINTER abbrevid=4 op0=22 op1=0/>
+; <VOID/>
+; </TYPE_BLOCK_ID>
+; <GLOBALVAR abbrevid=4 op0=2 op1=0 op2=0 op3=0 op4=0 op5=0/>
+; <GLOBALVAR abbrevid=4 op0=2 op1=0 op2=0 op3=0 op4=0 op5=0/>
+; <GLOBALVAR abbrevid=4 op0=2 op1=0 op2=0 op3=0 op4=0 op5=0/>
+; <GLOBALVAR op0=4 op1=1 op2=25 op3=9 op4=0 op5=0 op6=0 op7=0 op8=1 op9=0/>
+; <GLOBALVAR op0=6 op1=1 op2=26 op3=9 op4=0 op5=0 op6=0 op7=0 op8=1 op9=0/>
+; <GLOBALVAR op0=8 op1=1 op2=27 op3=9 op4=0 op5=0 op6=0 op7=0 op8=1 op9=0/>
+; <GLOBALVAR abbrevid=4 op0=10 op1=1 op2=28 op3=3 op4=0 op5=0/>
+; <GLOBALVAR abbrevid=4 op0=6 op1=1 op2=26 op3=3 op4=0 op5=0/>
+; <GLOBALVAR abbrevid=4 op0=13 op1=1 op2=31 op3=3 op4=0 op5=0/>
+; <GLOBALVAR abbrevid=4 op0=2 op1=1 op2=23 op3=3 op4=0 op5=0/>
+; <GLOBALVAR abbrevid=4 op0=2 op1=0 op2=24 op3=0 op4=0 op5=0/>
+; <GLOBALVAR op0=10 op1=1 op2=28 op3=9 op4=0 op5=0 op6=0 op7=0 op8=1 op9=0/>
+; <FUNCTION op0=15 op1=0 op2=1 op3=0 op4=0 op5=0 op6=0 op7=0 op8=0 op9=0/>
+; <FUNCTION op0=17 op1=0 op2=1 op3=0 op4=0 op5=0 op6=0 op7=0 op8=0 op9=0/>
+; <FUNCTION op0=20 op1=0 op2=1 op3=0 op4=0 op5=0 op6=0 op7=0 op8=0 op9=0/>
+; <FUNCTION op0=15 op1=0 op2=0 op3=0 op4=0 op5=0 op6=0 op7=0 op8=0 op9=0/>
+; <FUNCTION op0=17 op1=0 op2=0 op3=0 op4=0 op5=0 op6=0 op7=0 op8=0 op9=0/>
+; <FUNCTION op0=23 op1=0 op2=0 op3=0 op4=0 op5=0 op6=0 op7=0 op8=0 op9=0/>
+; <CONSTANTS_BLOCK NumWords=20 BlockCodeSize=4>
+; <SETTYPE abbrevid=4 op0=21/>
+; <NULL/>
+; <SETTYPE abbrevid=4 op0=1/>
+; <CE_CAST abbrevid=6 op0=11 op1=17 op2=16/>
+; <CE_INBOUNDS_GEP op0=6 op1=7 op2=21 op3=18 op4=21 op5=18/>
+; <CE_CAST abbrevid=6 op0=11 op1=15 op2=15/>
+; <CE_CAST abbrevid=6 op0=11 op1=13 op2=8/>
+; <CE_CAST abbrevid=6 op0=11 op1=2 op2=9/>
+; <SETTYPE abbrevid=4 op0=3/>
+; <CSTRING abbrevid=11 op0=112 op1=114 op2=105 op3=110 op4=116/>
+; <SETTYPE abbrevid=4 op0=5/>
+; <CSTRING abbrevid=11 op0=115 op1=97 op2=121 op3=72 op4=105 op5=87 op6=105 op7=116 op8=104/>
+; <SETTYPE abbrevid=4 op0=7/>
+; <CSTRING abbrevid=11 op0=110 op1=101 op2=119/>
+; <SETTYPE abbrevid=4 op0=9/>
+; <CSTRING abbrevid=11 op0=109 op1=97 op2=105 op3=110/>
+; <SETTYPE abbrevid=4 op0=11/>
+; <AGGREGATE abbrevid=8 op0=31 op1=19/>
+; <AGGREGATE abbrevid=8 op0=20 op1=21/>
+; <SETTYPE abbrevid=4 op0=12/>
+; <AGGREGATE abbrevid=8 op0=28 op1=29/>
+; <SETTYPE abbrevid=4 op0=1/>
+; <CE_INBOUNDS_GEP op0=10 op1=6 op2=21 op3=18 op4=21 op5=18/>
+; </CONSTANTS_BLOCK>
+; <METADATA_BLOCK NumWords=23 BlockCodeSize=3>
+; <METADATA_KIND op0=0 op1=100 op2=98 op3=103/>
+; <METADATA_KIND op0=1 op1=116 op2=98 op3=97 op4=97/>
+; <METADATA_KIND op0=2 op1=112 op2=114 op3=111 op4=102/>
+; <METADATA_KIND op0=3 op1=102 op2=112 op3=109 op4=97 op5=116 op6=104/>
+; <METADATA_KIND op0=4 op1=114 op2=97 op3=110 op4=103 op5=101/>
+; <METADATA_KIND op0=5 op1=116 op2=98 op3=97 op4=97 op5=46 op6=115 op7=116 op8=114 op9=117 op10=99 op11=116/>
+; <METADATA_KIND op0=6 op1=105 op2=110 op3=118 op4=97 op5=114 op6=105 op7=97 op8=110 op9=116 op10=46 op11=108 op12=111 op13=97 op14=100/>
+; </METADATA_BLOCK>
+; <VALUE_SYMTAB NumWords=29 BlockCodeSize=4>
+; <ENTRY abbrevid=6 op0=16 op1=101 op2=120 op3=97 op4=109 op5=112 op6=108 op7=101 op8=95 op9=109 op10=97 op11=105 op12=110/>
+; <ENTRY abbrevid=6 op0=1 op1=99 op2=111 op3=110 op4=115 op5=111 op6=108 op7=101/>
+; <ENTRY abbrevid=6 op0=2 op1=103 op2=114 op3=101 op4=101 op5=116 op6=105 op7=110 op8=103/>
+; <ENTRY abbrevid=6 op0=15 op1=101 op2=120 op3=97 op4=109 op5=112 op6=108 op7=101 op8=95 op9=115 op10=97 op11=121 op12=72 op13=105 op14=87 op15=105 op16=116 op17=104/>
+; <ENTRY abbrevid=6 op0=0 op1=115 op2=116 op3=114 op4=105 op5=110 op6=103/>
+; <ENTRY abbrevid=6 op0=14 op1=109 op2=97 op3=108 op4=108 op5=111 op6=99/>
+; <ENTRY abbrevid=6 op0=8 op1=101 op2=120 op3=97 op4=109 op5=112 op6=108 op7=101 op8=95 op9=118 op10=116 op11=97 op12=98/>
+; <ENTRY abbrevid=6 op0=13 op1=115 op2=116 op3=114 op4=105 op5=110 op6=103 op7=95 op8=115 op9=116 op10=114 op11=105 op12=110 op13=103 op14=76 op15=105 op16=116 op17=101 op18=114 op19=97 op20=108/>
+; <ENTRY abbrevid=6 op0=9 op1=95 op2=95 op3=101 op4=120 op5=97 op6=109 op7=112 op8=108 op9=101/>
+; <ENTRY abbrevid=6 op0=12 op1=103 op2=101 op3=116 op4=102 op5=117 op6=110 op7=99/>
+; <ENTRY abbrevid=6 op0=10 op1=101 op2=120 op3=97 op4=109 op5=112 op6=108 op7=101/>
+; <ENTRY abbrevid=6 op0=17 op1=109 op2=97 op3=105 op4=110/>
+; </VALUE_SYMTAB>
+; <FUNCTION_BLOCK NumWords=18 BlockCodeSize=4>
+; <DECLAREBLOCKS op0=1/>
+; <CONSTANTS_BLOCK NumWords=3 BlockCodeSize=4>
+; <SETTYPE abbrevid=4 op0=1/>
+; <CE_INBOUNDS_GEP op0=4 op1=3 op2=21 op3=18 op4=21 op5=18/>
+; </CONSTANTS_BLOCK>
+; <INST_LOAD abbrevid=4 op0=34 op1=0 op2=0/>
+; <INST_CALL op0=0 op1=0 op2=24 op3=1 op4=2/>
+; <INST_CAST abbrevid=7 op0=1 op1=15 op2=11/>
+; <INST_CALL op0=0 op1=0 op2=1 op3=3 op4=5/>
+; <INST_RET abbrevid=9 op0=1/>
+; <VALUE_SYMTAB NumWords=4 BlockCodeSize=4>
+; <BBENTRY abbrevid=7 op0=0 op1=101 op2=110 op3=116 op4=114 op5=121/>
+; <ENTRY abbrevid=6 op0=33 op1=115 op2=97 op3=121 op4=105 op5=110 op6=103/>
+; </VALUE_SYMTAB>
+; </FUNCTION_BLOCK>
+; <FUNCTION_BLOCK NumWords=23 BlockCodeSize=4>
+; <DECLAREBLOCKS op0=1/>
+; <CONSTANTS_BLOCK NumWords=4 BlockCodeSize=4>
+; <SETTYPE abbrevid=4 op0=1/>
+; <CE_INBOUNDS_GEP op0=6 op1=4 op2=21 op3=18 op4=21 op5=18/>
+; <CE_INBOUNDS_GEP op0=8 op1=5 op2=21 op3=18 op4=21 op5=18/>
+; </CONSTANTS_BLOCK>
+; <INST_LOAD op0=4294966291 op1=2 op2=0 op3=0/>
+; <INST_CALL op0=0 op1=0 op2=24 op3=1 op4=3/>
+; <INST_CAST abbrevid=7 op0=1 op1=15 op2=11/>
+; <INST_LOAD abbrevid=4 op0=36 op1=0 op2=0/>
+; <INST_CALL op0=0 op1=0 op2=27 op3=1 op4=5/>
+; <INST_CAST abbrevid=7 op0=1 op1=17 op2=11/>
+; <INST_CALL op0=0 op1=0 op2=1 op3=3/>
+; <INST_CALL op0=0 op1=0 op2=5 op3=7 op4=1/>
+; <INST_RET abbrevid=9 op0=1/>
+; <VALUE_SYMTAB NumWords=2 BlockCodeSize=4>
+; <BBENTRY abbrevid=7 op0=0 op1=101 op2=110 op3=116 op4=114 op5=121/>
+; </VALUE_SYMTAB>
+; </FUNCTION_BLOCK>
+; <FUNCTION_BLOCK NumWords=15 BlockCodeSize=4>
+; <DECLAREBLOCKS op0=1/>
+; <CONSTANTS_BLOCK NumWords=3 BlockCodeSize=4>
+; <SETTYPE abbrevid=4 op0=1/>
+; <CE_INBOUNDS_GEP op0=10 op1=11 op2=21 op3=18 op4=21 op5=18/>
+; </CONSTANTS_BLOCK>
+; <INST_LOAD abbrevid=4 op0=23 op1=0 op2=0/>
+; <INST_CALL op0=0 op1=0 op2=22 op3=1 op4=2/>
+; <INST_CAST abbrevid=7 op0=1 op1=17 op2=11/>
+; <INST_CALL op0=0 op1=0 op2=1 op3=3/>
+; <INST_RET abbrevid=9 op0=19/>
+; <VALUE_SYMTAB NumWords=2 BlockCodeSize=4>
+; <BBENTRY abbrevid=7 op0=0 op1=101 op2=110 op3=116 op4=114 op5=121/>
+; </VALUE_SYMTAB>
+; </FUNCTION_BLOCK>
+;</MODULE_BLOCK>
diff --git a/test/Bitcode/pr18704.ll.bc b/test/Bitcode/pr18704.ll.bc
new file mode 100644
index 000000000000..dbfcf37b82ab
--- /dev/null
+++ b/test/Bitcode/pr18704.ll.bc
Binary files differ
diff --git a/test/Bitcode/select.ll b/test/Bitcode/select.ll
index 71e669a90cdc..08a3061394db 100644
--- a/test/Bitcode/select.ll
+++ b/test/Bitcode/select.ll
@@ -5,5 +5,5 @@ define <2 x i32> @main() {
}
; CHECK: define <2 x i32> @main() {
-; CHECK: ret <2 x i32> select (<2 x i1> <i1 false, i1 undef>, <2 x i32> zeroinitializer, <2 x i32> <i32 0, i32 undef>)
+; CHECK: ret <2 x i32> <i32 0, i32 undef>
; CHECK: }
diff --git a/test/Bitcode/tailcall.ll b/test/Bitcode/tailcall.ll
new file mode 100644
index 000000000000..765b47054cad
--- /dev/null
+++ b/test/Bitcode/tailcall.ll
@@ -0,0 +1,17 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+; Check that musttail and tail roundtrip.
+
+declare cc8191 void @t1_callee()
+define cc8191 void @t1() {
+; CHECK: tail call cc8191 void @t1_callee()
+ tail call cc8191 void @t1_callee()
+ ret void
+}
+
+declare cc8191 void @t2_callee()
+define cc8191 void @t2() {
+; CHECK: musttail call cc8191 void @t2_callee()
+ musttail call cc8191 void @t2_callee()
+ ret void
+}
diff --git a/test/Bitcode/terminatorInstructions.3.2.ll b/test/Bitcode/terminatorInstructions.3.2.ll
new file mode 100644
index 000000000000..31e78967ee0c
--- /dev/null
+++ b/test/Bitcode/terminatorInstructions.3.2.ll
@@ -0,0 +1,47 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; TerminatorOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread terminator instructions from
+; older bitcode files.
+
+define i32 @condbr(i1 %cond){
+entry:
+; CHECK: br i1 %cond, label %TrueLabel, label %FalseLabel
+ br i1 %cond, label %TrueLabel, label %FalseLabel
+
+ TrueLabel:
+ ret i32 1
+
+ FalseLabel:
+ ret i32 0
+}
+
+define i32 @uncondbr(){
+entry:
+; CHECK: br label %uncondLabel
+ br label %uncondLabel
+
+ uncondLabel:
+ ret i32 1
+}
+
+define i32 @indirectbr(i8* %Addr){
+entry:
+; CHECK: indirectbr i8* %Addr, [label %bb1, label %bb2]
+ indirectbr i8* %Addr, [ label %bb1, label %bb2 ]
+
+ bb1:
+ ret i32 1
+
+ bb2:
+ ret i32 0
+}
+
+define void @unreachable(){
+entry:
+; CHECK: unreachable
+ unreachable
+
+ ret void
+}
+
diff --git a/test/Bitcode/terminatorInstructions.3.2.ll.bc b/test/Bitcode/terminatorInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..9d92ead8ad18
--- /dev/null
+++ b/test/Bitcode/terminatorInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/upgrade-global-ctors.ll b/test/Bitcode/upgrade-global-ctors.ll
new file mode 100644
index 000000000000..bd253a81620f
--- /dev/null
+++ b/test/Bitcode/upgrade-global-ctors.ll
@@ -0,0 +1,3 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
diff --git a/test/Bitcode/upgrade-global-ctors.ll.bc b/test/Bitcode/upgrade-global-ctors.ll.bc
new file mode 100644
index 000000000000..927fd91867e9
--- /dev/null
+++ b/test/Bitcode/upgrade-global-ctors.ll.bc
Binary files differ
diff --git a/test/Bitcode/upgrade-loop-metadata.ll b/test/Bitcode/upgrade-loop-metadata.ll
new file mode 100644
index 000000000000..67a8d3935926
--- /dev/null
+++ b/test/Bitcode/upgrade-loop-metadata.ll
@@ -0,0 +1,37 @@
+; Test to make sure loop vectorizer metadata is automatically upgraded.
+;
+; RUN: llvm-dis < %s.bc | FileCheck %s
+
+define void @_Z28loop_with_vectorize_metadatav() {
+entry:
+ %i = alloca i32, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 16
+ br i1 %cmp, label %for.body, label %for.end, !llvm.loop !1
+
+for.body: ; preds = %for.cond
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %1 = load i32* %i, align 4
+ %inc = add nsw i32 %1, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ ret void
+}
+
+; CHECK: !{metadata !"llvm.loop.interleave.count", i32 4}
+; CHECK: !{metadata !"llvm.loop.vectorize.width", i32 8}
+; CHECK: !{metadata !"llvm.loop.vectorize.enable", i1 true}
+
+!0 = metadata !{metadata !"clang version 3.5.0 (trunk 211528)"}
+!1 = metadata !{metadata !1, metadata !2, metadata !3, metadata !4, metadata !4}
+!2 = metadata !{metadata !"llvm.vectorizer.unroll", i32 4}
+!3 = metadata !{metadata !"llvm.vectorizer.width", i32 8}
+!4 = metadata !{metadata !"llvm.vectorizer.enable", i1 true}
diff --git a/test/Bitcode/upgrade-loop-metadata.ll.bc b/test/Bitcode/upgrade-loop-metadata.ll.bc
new file mode 100644
index 000000000000..3f218cb7feb4
--- /dev/null
+++ b/test/Bitcode/upgrade-loop-metadata.ll.bc
Binary files differ
diff --git a/test/Bitcode/variableArgumentIntrinsic.3.2.ll b/test/Bitcode/variableArgumentIntrinsic.3.2.ll
new file mode 100644
index 000000000000..35fe0e252822
--- /dev/null
+++ b/test/Bitcode/variableArgumentIntrinsic.3.2.ll
@@ -0,0 +1,33 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; vaArgIntrinsic.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread variable argument intrinsic instructions
+; of older bitcode files.
+
+define i32 @varArgIntrinsic(i32 %X, ...) {
+
+ %ap = alloca i8*
+ %ap2 = bitcast i8** %ap to i8*
+
+; CHECK: call void @llvm.va_start(i8* %ap2)
+ call void @llvm.va_start(i8* %ap2)
+
+; CHECK-NEXT: %tmp = va_arg i8** %ap, i32
+ %tmp = va_arg i8** %ap, i32
+
+ %aq = alloca i8*
+ %aq2 = bitcast i8** %aq to i8*
+
+; CHECK: call void @llvm.va_copy(i8* %aq2, i8* %ap2)
+ call void @llvm.va_copy(i8* %aq2, i8* %ap2)
+; CHECK-NEXT: call void @llvm.va_end(i8* %aq2)
+ call void @llvm.va_end(i8* %aq2)
+
+; CHECK-NEXT: call void @llvm.va_end(i8* %ap2)
+ call void @llvm.va_end(i8* %ap2)
+ ret i32 %tmp
+}
+
+declare void @llvm.va_start(i8*)
+declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.va_end(i8*) \ No newline at end of file
diff --git a/test/Bitcode/variableArgumentIntrinsic.3.2.ll.bc b/test/Bitcode/variableArgumentIntrinsic.3.2.ll.bc
new file mode 100644
index 000000000000..066e102b1da9
--- /dev/null
+++ b/test/Bitcode/variableArgumentIntrinsic.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/vectorInstructions.3.2.ll b/test/Bitcode/vectorInstructions.3.2.ll
new file mode 100644
index 000000000000..b24ef75ef081
--- /dev/null
+++ b/test/Bitcode/vectorInstructions.3.2.ll
@@ -0,0 +1,34 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; vectorOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not misread vector operations of
+; older bitcode files.
+
+define void @extractelement(<2 x i8> %x1){
+entry:
+; CHECK: %res1 = extractelement <2 x i8> %x1, i32 0
+ %res1 = extractelement <2 x i8> %x1, i32 0
+
+ ret void
+}
+
+define void @insertelement(<2 x i8> %x1){
+entry:
+; CHECK: %res1 = insertelement <2 x i8> %x1, i8 0, i32 0
+ %res1 = insertelement <2 x i8> %x1, i8 0, i32 0
+
+ ret void
+}
+
+define void @shufflevector(<2 x i8> %x1){
+entry:
+; CHECK: %res1 = shufflevector <2 x i8> %x1, <2 x i8> %x1, <2 x i32> <i32 0, i32 1>
+ %res1 = shufflevector <2 x i8> %x1, <2 x i8> %x1, <2 x i32> <i32 0, i32 1>
+
+; CHECK-NEXT: %res2 = shufflevector <2 x i8> %x1, <2 x i8> undef, <2 x i32> <i32 0, i32 1>
+ %res2 = shufflevector <2 x i8> %x1, <2 x i8> undef, <2 x i32> <i32 0, i32 1>
+
+ ret void
+}
+
+
diff --git a/test/Bitcode/vectorInstructions.3.2.ll.bc b/test/Bitcode/vectorInstructions.3.2.ll.bc
new file mode 100644
index 000000000000..b1727031701b
--- /dev/null
+++ b/test/Bitcode/vectorInstructions.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/visibility-styles.3.2.ll b/test/Bitcode/visibility-styles.3.2.ll
new file mode 100644
index 000000000000..ec2ee6832063
--- /dev/null
+++ b/test/Bitcode/visibility-styles.3.2.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-dis < %s.bc| FileCheck %s
+
+; visibility-styles.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
+; The test checks that LLVM does not silently misread visibility styles of
+; older bitcode files.
+
+@default.var = default global i32 0
+; CHECK: @default.var = global i32 0
+
+@hidden.var = hidden global i32 0
+; CHECK: @hidden.var = hidden global i32 0
+
+@protected.var = protected global i32 0
+; CHECK: @protected.var = protected global i32 0
+
+declare default void @default()
+; CHECK: declare void @default
+
+declare hidden void @hidden()
+; CHECK: declare hidden void @hidden
+
+declare protected void @protected()
+; CHECK: declare protected void @protected
diff --git a/test/Bitcode/visibility-styles.3.2.ll.bc b/test/Bitcode/visibility-styles.3.2.ll.bc
new file mode 100644
index 000000000000..e2f0b058cce6
--- /dev/null
+++ b/test/Bitcode/visibility-styles.3.2.ll.bc
Binary files differ
diff --git a/test/Bitcode/weak-cmpxchg-upgrade.ll b/test/Bitcode/weak-cmpxchg-upgrade.ll
new file mode 100644
index 000000000000..dbcd150633ed
--- /dev/null
+++ b/test/Bitcode/weak-cmpxchg-upgrade.ll
@@ -0,0 +1,15 @@
+; RUN: llvm-dis < %s.bc | FileCheck %s
+
+; cmpxchg-upgrade.ll.bc was produced by running a version of llvm-as from just
+; before the IR change on this file.
+
+define i32 @test(i32* %addr, i32 %old, i32 %new) {
+; CHECK: [[TMP:%.*]] = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst monotonic
+; CHECK: %val = extractvalue { i32, i1 } [[TMP]], 0
+ %val = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst monotonic
+ ret i32 %val
+}
+
+define i32 @test(i32* %addr, i32 %old, i32 %new) {
+ ret i1 %val
+}
diff --git a/test/Bitcode/weak-cmpxchg-upgrade.ll.bc b/test/Bitcode/weak-cmpxchg-upgrade.ll.bc
new file mode 100644
index 000000000000..f713c317d46f
--- /dev/null
+++ b/test/Bitcode/weak-cmpxchg-upgrade.ll.bc
Binary files differ
diff --git a/test/BugPoint/compile-custom.ll b/test/BugPoint/compile-custom.ll
new file mode 100755
index 000000000000..d152f08626f8
--- /dev/null
+++ b/test/BugPoint/compile-custom.ll
@@ -0,0 +1,12 @@
+; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext --compile-custom --compile-command="%python %s.py arg1 arg2" --output-prefix %t %s | FileCheck %s
+; REQUIRES: loadable_module
+
+; Test that arguments are correctly passed in --compile-command. The output
+; of bugpoint includes the output of the custom tool, so we just echo the args
+; in the tool and check here.
+
+; CHECK: Error: arg1 arg2
+
+define void @noop() {
+ ret void
+}
diff --git a/test/BugPoint/compile-custom.ll.py b/test/BugPoint/compile-custom.ll.py
new file mode 100755
index 000000000000..4b9b30caadc8
--- /dev/null
+++ b/test/BugPoint/compile-custom.ll.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+
+import sys
+
+# Currently any print-out from the custom tool is interpreted as a crash
+# (i.e. test is still interesting)
+
+print("Error: " + ' '.join(sys.argv[1:]))
+
+sys.exit(1)
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index d6f7dab1287e..3e08a1638945 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -15,9 +15,11 @@ endif()
# Set the depends list as a variable so that it can grow conditionally.
# NOTE: Sync the substitutions in test/lit.cfg when adding to this list.
set(LLVM_TEST_DEPENDS
+ llvm-config
UnitTests
BugpointPasses
LLVMHello
+ bugpoint
llc
lli
lli-child-target
@@ -35,13 +37,15 @@ set(LLVM_TEST_DEPENDS
llvm-mc
llvm-mcmarkup
llvm-nm
+ llvm-size
llvm-objdump
+ llvm-profdata
llvm-readobj
llvm-rtdyld
llvm-symbolizer
+ llvm-tblgen
macho-dump
opt
- profile_rt-shared
FileCheck
count
not
diff --git a/test/CodeGen/AArch64/128bit_load_store.ll b/test/CodeGen/AArch64/128bit_load_store.ll
new file mode 100644
index 000000000000..a6f077698e40
--- /dev/null
+++ b/test/CodeGen/AArch64/128bit_load_store.ll
@@ -0,0 +1,53 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=neon | FileCheck %s --check-prefix=CHECK
+
+define void @test_store_f128(fp128* %ptr, fp128 %val) #0 {
+; CHECK-LABEL: test_store_f128
+; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}]
+entry:
+ store fp128 %val, fp128* %ptr, align 16
+ ret void
+}
+
+define fp128 @test_load_f128(fp128* readonly %ptr) #2 {
+; CHECK-LABEL: test_load_f128
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}]
+entry:
+ %0 = load fp128* %ptr, align 16
+ ret fp128 %0
+}
+
+define void @test_vstrq_p128(i128* %ptr, i128 %val) #0 {
+; CHECK-LABEL: test_vstrq_p128
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [{{x[0-9]+}}]
+
+entry:
+ %0 = bitcast i128* %ptr to fp128*
+ %1 = bitcast i128 %val to fp128
+ store fp128 %1, fp128* %0, align 16
+ ret void
+}
+
+define i128 @test_vldrq_p128(i128* readonly %ptr) #2 {
+; CHECK-LABEL: test_vldrq_p128
+; CHECK: ldp {{x[0-9]+}}, {{x[0-9]+}}, [{{x[0-9]+}}]
+
+entry:
+ %0 = bitcast i128* %ptr to fp128*
+ %1 = load fp128* %0, align 16
+ %2 = bitcast fp128 %1 to i128
+ ret i128 %2
+}
+
+define void @test_ld_st_p128(i128* nocapture %ptr) #0 {
+; CHECK-LABEL: test_ld_st_p128
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}]
+; CHECK-NEXT: str {{q[0-9]+}}, [{{x[0-9]+}}, #16]
+entry:
+ %0 = bitcast i128* %ptr to fp128*
+ %1 = load fp128* %0, align 16
+ %add.ptr = getelementptr inbounds i128* %ptr, i64 1
+ %2 = bitcast i128* %add.ptr to fp128*
+ store fp128 %1, fp128* %2, align 16
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll b/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
new file mode 100644
index 000000000000..2df9c375bdce
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
@@ -0,0 +1,55 @@
+; RUN: llc -O3 -mcpu=cortex-a53 -mtriple=aarch64--linux-gnu %s -o - | FileCheck %s
+; PR20188: don't crash when merging sexts.
+
+; CHECK: foo:
+define void @foo() unnamed_addr align 2 {
+entry:
+ br label %invoke.cont145
+
+invoke.cont145:
+ %or.cond = and i1 undef, false
+ br i1 %or.cond, label %if.then274, label %invoke.cont145
+
+if.then274:
+ %0 = load i32* null, align 4
+ br i1 undef, label %invoke.cont291, label %if.else313
+
+invoke.cont291:
+ %idxprom.i.i.i605 = sext i32 %0 to i64
+ %arrayidx.i.i.i607 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i605
+ %idxprom.i.i.i596 = sext i32 %0 to i64
+ %arrayidx.i.i.i598 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i596
+ br label %if.end356
+
+if.else313:
+ %cmp314 = fcmp olt double undef, 0.000000e+00
+ br i1 %cmp314, label %invoke.cont317, label %invoke.cont353
+
+invoke.cont317:
+ br i1 undef, label %invoke.cont326, label %invoke.cont334
+
+invoke.cont326:
+ %idxprom.i.i.i587 = sext i32 %0 to i64
+ %arrayidx.i.i.i589 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i587
+ %sub329 = fsub fast double undef, undef
+ br label %invoke.cont334
+
+invoke.cont334:
+ %lo.1 = phi double [ %sub329, %invoke.cont326 ], [ undef, %invoke.cont317 ]
+ br i1 undef, label %invoke.cont342, label %if.end356
+
+invoke.cont342:
+ %idxprom.i.i.i578 = sext i32 %0 to i64
+ %arrayidx.i.i.i580 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i578
+ br label %if.end356
+
+invoke.cont353:
+ %idxprom.i.i.i572 = sext i32 %0 to i64
+ %arrayidx.i.i.i574 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i572
+ br label %if.end356
+
+if.end356:
+ %lo.2 = phi double [ 0.000000e+00, %invoke.cont291 ], [ %lo.1, %invoke.cont342 ], [ undef, %invoke.cont353 ], [ %lo.1, %invoke.cont334 ]
+ call void null(i32 %0, double %lo.2)
+ unreachable
+}
diff --git a/test/CodeGen/AArch64/aarch64-address-type-promotion.ll b/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
new file mode 100644
index 000000000000..ee90d199b458
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -o - | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
+target triple = "arm64-apple-macosx10.9"
+
+; Check that sexts get promoted above adds.
+define void @foo(i32* nocapture %a, i32 %i) {
+entry:
+; CHECK-LABEL: _foo:
+; CHECK: add
+; CHECK-NEXT: ldp
+; CHECK-NEXT: add
+; CHECK-NEXT: str
+; CHECK-NEXT: ret
+ %add = add nsw i32 %i, 1
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32* %a, i64 %idxprom
+ %0 = load i32* %arrayidx, align 4
+ %add1 = add nsw i32 %i, 2
+ %idxprom2 = sext i32 %add1 to i64
+ %arrayidx3 = getelementptr inbounds i32* %a, i64 %idxprom2
+ %1 = load i32* %arrayidx3, align 4
+ %add4 = add nsw i32 %1, %0
+ %idxprom5 = sext i32 %i to i64
+ %arrayidx6 = getelementptr inbounds i32* %a, i64 %idxprom5
+ store i32 %add4, i32* %arrayidx6, align 4
+ ret void
+}
diff --git a/test/CodeGen/AArch64/aarch64-neon-v1i1-setcc.ll b/test/CodeGen/AArch64/aarch64-neon-v1i1-setcc.ll
new file mode 100644
index 000000000000..c932253049e2
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-neon-v1i1-setcc.ll
@@ -0,0 +1,69 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+; arm64 has a separate copy as aarch64-neon-v1i1-setcc.ll
+
+; This file test the DAG node like "v1i1 SETCC v1i64, v1i64". As the v1i1 type
+; is illegal in AArch64 backend, the legalizer tries to scalarize this node.
+; As the v1i64 operands of SETCC are legal types, they will not be scalarized.
+; Currently the type legalizer will have an assertion failure as it assumes all
+; operands of SETCC have been legalized.
+; FIXME: If the algorithm of type scalarization is improved and can legaize
+; "v1i1 SETCC" correctly, these test cases are not needed.
+
+define i64 @test_sext_extr_cmp_0(<1 x i64> %v1, <1 x i64> %v2) {
+; CHECK-LABEL: test_sext_extr_cmp_0:
+; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}
+ %1 = icmp sge <1 x i64> %v1, %v2
+ %2 = extractelement <1 x i1> %1, i32 0
+ %vget_lane = sext i1 %2 to i64
+ ret i64 %vget_lane
+}
+
+define i64 @test_sext_extr_cmp_1(<1 x double> %v1, <1 x double> %v2) {
+; CHECK-LABEL: test_sext_extr_cmp_1:
+; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
+ %1 = fcmp oeq <1 x double> %v1, %v2
+ %2 = extractelement <1 x i1> %1, i32 0
+ %vget_lane = sext i1 %2 to i64
+ ret i64 %vget_lane
+}
+
+define <1 x i64> @test_select_v1i1_0(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
+; CHECK-LABEL: test_select_v1i1_0:
+; CHECK: cmeq d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: bic v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %1 = icmp eq <1 x i64> %v1, %v2
+ %res = select <1 x i1> %1, <1 x i64> zeroinitializer, <1 x i64> %v3
+ ret <1 x i64> %res
+}
+
+define <1 x i64> @test_select_v1i1_1(<1 x double> %v1, <1 x double> %v2, <1 x i64> %v3) {
+; CHECK-LABEL: test_select_v1i1_1:
+; CHECK: fcmeq d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: bic v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %1 = fcmp oeq <1 x double> %v1, %v2
+ %res = select <1 x i1> %1, <1 x i64> zeroinitializer, <1 x i64> %v3
+ ret <1 x i64> %res
+}
+
+define <1 x double> @test_select_v1i1_2(<1 x i64> %v1, <1 x i64> %v2, <1 x double> %v3) {
+; CHECK-LABEL: test_select_v1i1_2:
+; CHECK: cmeq d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: bic v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %1 = icmp eq <1 x i64> %v1, %v2
+ %res = select <1 x i1> %1, <1 x double> zeroinitializer, <1 x double> %v3
+ ret <1 x double> %res
+}
+
+define i32 @test_br_extr_cmp(<1 x i64> %v1, <1 x i64> %v2) {
+; CHECK-LABEL: test_br_extr_cmp:
+; CHECK: cmp x{{[0-9]+}}, x{{[0-9]+}}
+ %1 = icmp eq <1 x i64> %v1, %v2
+ %2 = extractelement <1 x i1> %1, i32 0
+ br i1 %2, label %if.end, label %if.then
+
+if.then:
+ ret i32 0;
+
+if.end:
+ ret i32 1;
+}
diff --git a/test/CodeGen/AArch64/adc.ll b/test/CodeGen/AArch64/adc.ll
index 26fd3e66b798..892573ba06b1 100644
--- a/test/CodeGen/AArch64/adc.ll
+++ b/test/CodeGen/AArch64/adc.ll
@@ -1,15 +1,20 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-LE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64_be-none-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-BE %s
define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-LABEL: test_simple:
%valadd = add i128 %a, %b
-; CHECK: adds [[ADDLO:x[0-9]+]], x0, x2
-; CHECK-NEXT: adcs [[ADDHI:x[0-9]+]], x1, x3
+; CHECK-LE: adds [[ADDLO:x[0-9]+]], x0, x2
+; CHECK-LE-NEXT: adcs [[ADDHI:x[0-9]+]], x1, x3
+; CHECK-BE: adds [[ADDLO:x[0-9]+]], x1, x3
+; CHECK-BE-NEXT: adcs [[ADDHI:x[0-9]+]], x0, x2
%valsub = sub i128 %valadd, %c
-; CHECK: subs x0, [[ADDLO]], x4
-; CHECK: sbcs x1, [[ADDHI]], x5
+; CHECK-LE: subs x0, [[ADDLO]], x4
+; CHECK-LE: sbcs x1, [[ADDHI]], x5
+; CHECK-BE: subs x1, [[ADDLO]], x5
+; CHECK-BE: sbcs x0, [[ADDHI]], x4
ret i128 %valsub
; CHECK: ret
@@ -19,8 +24,10 @@ define i128 @test_imm(i128 %a) {
; CHECK-LABEL: test_imm:
%val = add i128 %a, 12
-; CHECK: adds x0, x0, #12
-; CHECK: adcs x1, x1, {{x[0-9]|xzr}}
+; CHECK-LE: adds x0, x0, #12
+; CHECK-LE: adcs x1, x1, {{x[0-9]|xzr}}
+; CHECK-BE: adds x1, x1, #12
+; CHECK-BE: adcs x0, x0, {{x[0-9]|xzr}}
ret i128 %val
; CHECK: ret
@@ -32,8 +39,10 @@ define i128 @test_shifted(i128 %a, i128 %b) {
%rhs = shl i128 %b, 45
%val = add i128 %a, %rhs
-; CHECK: adds x0, x0, x2, lsl #45
-; CHECK: adcs x1, x1, {{x[0-9]}}
+; CHECK-LE: adds x0, x0, x2, lsl #45
+; CHECK-LE: adcs x1, x1, {{x[0-9]}}
+; CHECK-BE: adds x1, x1, x3, lsl #45
+; CHECK-BE: adcs x0, x0, {{x[0-9]}}
ret i128 %val
; CHECK: ret
@@ -46,8 +55,10 @@ define i128 @test_extended(i128 %a, i16 %b) {
%rhs = shl i128 %ext, 3
%val = add i128 %a, %rhs
-; CHECK: adds x0, x0, w2, sxth #3
-; CHECK: adcs x1, x1, {{x[0-9]}}
+; CHECK-LE: adds x0, x0, w2, sxth #3
+; CHECK-LE: adcs x1, x1, {{x[0-9]}}
+; CHECK-BE: adds x1, x1, w2, sxth #3
+; CHECK-BE: adcs x0, x0, {{x[0-9]}}
ret i128 %val
; CHECK: ret
diff --git a/test/CodeGen/AArch64/addsub-shifted.ll b/test/CodeGen/AArch64/addsub-shifted.ll
index 269c1e8143b2..0a93edd8290a 100644
--- a/test/CodeGen/AArch64/addsub-shifted.ll
+++ b/test/CodeGen/AArch64/addsub-shifted.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
@var32 = global i32 0
@var64 = global i64 0
@@ -35,7 +35,7 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift4a = shl i32 %lhs4a, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
-; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsl #15
+; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
%rhs5 = load volatile i64* @var64
%shift5 = shl i64 %rhs5, 18
@@ -66,7 +66,7 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift8a = shl i64 %lhs8a, 60
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
-; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsl #60
+; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
ret void
; CHECK: ret
@@ -99,7 +99,7 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift4a = lshr i32 %lhs32, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
-; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsr #15
+; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
%shift5 = lshr i64 %rhs64, 18
%val5 = add i64 %lhs64, %shift5
@@ -125,7 +125,7 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift8a = lshr i64 %lhs64, 45
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
-; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsr #45
+; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
ret void
; CHECK: ret
@@ -158,7 +158,7 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift4a = ashr i32 %lhs32, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
-; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, asr #15
+; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
%shift5 = ashr i64 %rhs64, 18
%val5 = add i64 %lhs64, %shift5
@@ -184,7 +184,7 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift8a = ashr i64 %lhs64, 45
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
-; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, asr #45
+; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
ret void
; CHECK: ret
@@ -245,7 +245,7 @@ define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
br i1 %tst1, label %t2, label %end
; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
; 0 then the results will differ.
-; CHECK: sub [[RHS:w[0-9]+]], wzr, {{w[0-9]+}}, lsl #13
+; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
; CHECK: cmp {{w[0-9]+}}, [[RHS]]
t2:
@@ -268,7 +268,7 @@ t4:
%tst4 = icmp slt i64 %lhs64, %val4
br i1 %tst4, label %t5, label %end
; Again, it's important that cmn isn't used here in case %rhs64 == 0.
-; CHECK: sub [[RHS:x[0-9]+]], xzr, {{x[0-9]+}}, lsl #43
+; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
; CHECK: cmp {{x[0-9]+}}, [[RHS]]
t5:
diff --git a/test/CodeGen/AArch64/addsub.ll b/test/CodeGen/AArch64/addsub.ll
index 4d46d04b80f1..b85fdbb14ce2 100644
--- a/test/CodeGen/AArch64/addsub.ll
+++ b/test/CodeGen/AArch64/addsub.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-linux-gnu | FileCheck %s
; Note that this should be refactored (for efficiency if nothing else)
; when the PCS is implemented so we don't have to worry about the
@@ -28,12 +28,12 @@ define void @add_small() {
define void @add_med() {
; CHECK-LABEL: add_med:
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #3567, lsl #12
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{#3567, lsl #12|#14610432}}
%val32 = load i32* @var_i32
%newval32 = add i32 %val32, 14610432 ; =0xdef000
store i32 %newval32, i32* @var_i32
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #4095, lsl #12
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{#4095, lsl #12|#16773120}}
%val64 = load i64* @var_i64
%newval64 = add i64 %val64, 16773120 ; =0xfff000
store i64 %newval64, i64* @var_i64
@@ -62,12 +62,12 @@ define void @sub_small() {
define void @sub_med() {
; CHECK-LABEL: sub_med:
-; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #3567, lsl #12
+; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{#3567, lsl #12|#14610432}}
%val32 = load i32* @var_i32
%newval32 = sub i32 %val32, 14610432 ; =0xdef000
store i32 %newval32, i32* @var_i32
-; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, #4095, lsl #12
+; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{#4095, lsl #12|#16773120}}
%val64 = load i64* @var_i64
%newval64 = sub i64 %val64, 16773120 ; =0xfff000
store i64 %newval64, i64* @var_i64
@@ -80,13 +80,13 @@ define void @testing() {
%val = load i32* @var_i32
; CHECK: cmp {{w[0-9]+}}, #4095
-; CHECK: b.ne .LBB4_6
+; CHECK: b.ne [[RET:.?LBB[0-9]+_[0-9]+]]
%cmp_pos_small = icmp ne i32 %val, 4095
br i1 %cmp_pos_small, label %ret, label %test2
test2:
-; CHECK: cmp {{w[0-9]+}}, #3567, lsl #12
-; CHECK: b.lo .LBB4_6
+; CHECK: cmp {{w[0-9]+}}, {{#3567, lsl #12|#14610432}}
+; CHECK: b.lo [[RET]]
%newval2 = add i32 %val, 1
store i32 %newval2, i32* @var_i32
%cmp_pos_big = icmp ult i32 %val, 14610432
@@ -94,7 +94,7 @@ test2:
test3:
; CHECK: cmp {{w[0-9]+}}, #123
-; CHECK: b.lt .LBB4_6
+; CHECK: b.lt [[RET]]
%newval3 = add i32 %val, 2
store i32 %newval3, i32* @var_i32
%cmp_pos_slt = icmp slt i32 %val, 123
@@ -102,7 +102,7 @@ test3:
test4:
; CHECK: cmp {{w[0-9]+}}, #321
-; CHECK: b.gt .LBB4_6
+; CHECK: b.gt [[RET]]
%newval4 = add i32 %val, 3
store i32 %newval4, i32* @var_i32
%cmp_pos_sgt = icmp sgt i32 %val, 321
@@ -110,7 +110,7 @@ test4:
test5:
; CHECK: cmn {{w[0-9]+}}, #444
-; CHECK: b.gt .LBB4_6
+; CHECK: b.gt [[RET]]
%newval5 = add i32 %val, 4
store i32 %newval5, i32* @var_i32
%cmp_neg_uge = icmp sgt i32 %val, -444
diff --git a/test/CodeGen/AArch64/addsub_ext.ll b/test/CodeGen/AArch64/addsub_ext.ll
index f0e11c652240..ceea8a08ecee 100644
--- a/test/CodeGen/AArch64/addsub_ext.ll
+++ b/test/CodeGen/AArch64/addsub_ext.ll
@@ -1,11 +1,11 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s
@var8 = global i8 0
@var16 = global i16 0
@var32 = global i32 0
@var64 = global i64 0
-define void @addsub_i8rhs() {
+define void @addsub_i8rhs() minsize {
; CHECK-LABEL: addsub_i8rhs:
%val8_tmp = load i8* @var8
%lhs32 = load i32* @var32
@@ -80,7 +80,7 @@ end:
ret void
}
-define void @addsub_i16rhs() {
+define void @addsub_i16rhs() minsize {
; CHECK-LABEL: addsub_i16rhs:
%val16_tmp = load i16* @var16
%lhs32 = load i32* @var32
@@ -158,7 +158,7 @@ end:
; N.b. we could probably check more here ("add w2, w3, w1, uxtw" for
; example), but the remaining instructions are probably not idiomatic
; in the face of "add/sub (shifted register)" so I don't intend to.
-define void @addsub_i32rhs() {
+define void @addsub_i32rhs() minsize {
; CHECK-LABEL: addsub_i32rhs:
%val32_tmp = load i32* @var32
%lhs64 = load i64* @var64
diff --git a/test/CodeGen/AArch64/alloca.ll b/test/CodeGen/AArch64/alloca.ll
index 1d3c0a02ac87..f93efbc42e65 100644
--- a/test/CodeGen/AArch64/alloca.ll
+++ b/test/CodeGen/AArch64/alloca.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-NOFP-ARM64 %s
declare void @use_addr(i8*)
@@ -8,23 +8,22 @@ define void @test_simple_alloca(i64 %n) {
%buf = alloca i8, i64 %n
; Make sure we align the stack change to 16 bytes:
-; CHECK-DAG: add [[SPDELTA:x[0-9]+]], x0, #15
-; CHECK-DAG: and x0, [[SPDELTA]], #0xfffffffffffffff0
+; CHECK: {{mov|add}} x29
+; CHECK: mov [[TMP:x[0-9]+]], sp
+; CHECK: add [[SPDELTA_TMP:x[0-9]+]], x0, #15
+; CHECK: and [[SPDELTA:x[0-9]+]], [[SPDELTA_TMP]], #0xfffffffffffffff0
; Make sure we change SP. It would be surprising if anything but x0 were used
; for the final sp, but it could be if it was then moved into x0.
-; CHECK-DAG: mov [[TMP:x[0-9]+]], sp
-; CHECK-DAG: sub x0, [[TMP]], [[SPDELTA]]
-; CHECK: mov sp, x0
+; CHECK: sub [[NEWSP:x[0-9]+]], [[TMP]], [[SPDELTA]]
+; CHECK: mov sp, [[NEWSP]]
call void @use_addr(i8* %buf)
; CHECK: bl use_addr
ret void
; Make sure epilogue restores sp from fp
-; CHECK: sub sp, x29, #16
-; CHECK: ldp x29, x30, [sp, #16]
-; CHECK: add sp, sp, #32
+; CHECK: {{sub|mov}} sp, x29
; CHECK: ret
}
@@ -32,57 +31,70 @@ declare void @use_addr_loc(i8*, i64*)
define i64 @test_alloca_with_local(i64 %n) {
; CHECK-LABEL: test_alloca_with_local:
-; CHECK: sub sp, sp, #32
-; CHECK: stp x29, x30, [sp, #16]
+; CHECK-DAG: sub sp, sp, [[LOCAL_STACK:#[0-9]+]]
+; CHECK-DAG: {{mov|add}} x29, sp
%loc = alloca i64
%buf = alloca i8, i64 %n
; Make sure we align the stack change to 16 bytes:
-; CHECK-DAG: add [[SPDELTA:x[0-9]+]], x0, #15
-; CHECK-DAG: and x0, [[SPDELTA]], #0xfffffffffffffff0
+; CHECK: mov [[TMP:x[0-9]+]], sp
+; CHECK: add [[SPDELTA_TMP:x[0-9]+]], x0, #15
+; CHECK: and [[SPDELTA:x[0-9]+]], [[SPDELTA_TMP]], #0xfffffffffffffff0
; Make sure we change SP. It would be surprising if anything but x0 were used
; for the final sp, but it could be if it was then moved into x0.
-; CHECK-DAG: mov [[TMP:x[0-9]+]], sp
-; CHECK-DAG: sub x0, [[TMP]], [[SPDELTA]]
-; CHECK: mov sp, x0
+; CHECK: sub [[NEWSP:x[0-9]+]], [[TMP]], [[SPDELTA]]
+; CHECK: mov sp, [[NEWSP]]
- ; Obviously suboptimal code here, but it to get &local in x1
-; CHECK: sub [[TMP:x[0-9]+]], x29, [[LOC_FROM_FP:#[0-9]+]]
-; CHECK: add x1, [[TMP]], #0
+; CHECK: sub {{x[0-9]+}}, x29, #[[LOC_FROM_FP:[0-9]+]]
call void @use_addr_loc(i8* %buf, i64* %loc)
; CHECK: bl use_addr
%val = load i64* %loc
-; CHECK: sub x[[TMP:[0-9]+]], x29, [[LOC_FROM_FP]]
-; CHECK: ldr x0, [x[[TMP]]]
+
+; CHECK: ldur x0, [x29, #-[[LOC_FROM_FP]]]
ret i64 %val
; Make sure epilogue restores sp from fp
-; CHECK: sub sp, x29, #16
-; CHECK: ldp x29, x30, [sp, #16]
-; CHECK: add sp, sp, #32
+; CHECK: {{sub|mov}} sp, x29
; CHECK: ret
}
define void @test_variadic_alloca(i64 %n, ...) {
-; CHECK: test_variadic_alloca:
-
-; CHECK: sub sp, sp, #208
-; CHECK: stp x29, x30, [sp, #192]
-; CHECK: add x29, sp, #192
-; CHECK: sub [[TMP:x[0-9]+]], x29, #192
-; CHECK: add x8, [[TMP]], #0
-; CHECK-FP: str q7, [x8, #112]
+; CHECK-LABEL: test_variadic_alloca:
+
; [...]
-; CHECK-FP: str q1, [x8, #16]
-; CHECK-NOFP: sub sp, sp, #80
-; CHECK-NOFP: stp x29, x30, [sp, #64]
-; CHECK-NOFP: add x29, sp, #64
-; CHECK-NOFP: sub [[TMP:x[0-9]+]], x29, #64
-; CHECK-NOFP: add x8, [[TMP]], #0
+
+; CHECK-NOFP-AARCH64: sub sp, sp, #80
+; CHECK-NOFP-AARCH64: stp x29, x30, [sp, #64]
+; CHECK-NOFP-AARCH64: add x29, sp, #64
+; CHECK-NOFP-AARCH64: sub [[TMP:x[0-9]+]], x29, #64
+; CHECK-NOFP-AARCH64: add x8, [[TMP]], #0
+
+
+; CHECK: stp x29, x30, [sp, #-16]!
+; CHECK: mov x29, sp
+; CHECK: sub sp, sp, #192
+; CHECK: stp q6, q7, [x29, #-96]
+; [...]
+; CHECK: stp q0, q1, [x29, #-192]
+
+; CHECK: stp x6, x7, [x29, #-16]
+; [...]
+; CHECK: stp x2, x3, [x29, #-48]
+
+; CHECK-NOFP-ARM64: stp x29, x30, [sp, #-16]!
+; CHECK-NOFP-ARM64: mov x29, sp
+; CHECK-NOFP-ARM64: sub sp, sp, #64
+; CHECK-NOFP-ARM64: stp x6, x7, [x29, #-16]
+; [...]
+; CHECK-NOFP-ARM64: stp x4, x5, [x29, #-32]
+; [...]
+; CHECK-NOFP-ARM64: stp x2, x3, [x29, #-48]
+; [...]
+; CHECK-NOFP-ARM64: mov x8, sp
%addr = alloca i8, i64 %n
@@ -90,23 +102,24 @@ define void @test_variadic_alloca(i64 %n, ...) {
; CHECK: bl use_addr
ret void
-; CHECK: sub sp, x29, #192
-; CHECK: ldp x29, x30, [sp, #192]
-; CHECK: add sp, sp, #208
-; CHECK-NOFP: sub sp, x29, #64
-; CHECK-NOFP: ldp x29, x30, [sp, #64]
-; CHECK-NOFP: add sp, sp, #80
+; CHECK-NOFP-AARCH64: sub sp, x29, #64
+; CHECK-NOFP-AARCH64: ldp x29, x30, [sp, #64]
+; CHECK-NOFP-AARCH64: add sp, sp, #80
+
+; CHECK-NOFP-ARM64: mov sp, x29
+; CHECK-NOFP-ARM64: ldp x29, x30, [sp], #16
}
define void @test_alloca_large_frame(i64 %n) {
; CHECK-LABEL: test_alloca_large_frame:
-; CHECK: sub sp, sp, #496
-; CHECK: stp x29, x30, [sp, #480]
-; CHECK: add x29, sp, #480
-; CHECK: sub sp, sp, #48
-; CHECK: sub sp, sp, #1953, lsl #12
+
+; CHECK: stp x20, x19, [sp, #-32]!
+; CHECK: stp x29, x30, [sp, #16]
+; CHECK: add x29, sp, #16
+; CHECK: sub sp, sp, #1953, lsl #12
+; CHECK: sub sp, sp, #512
%addr1 = alloca i8, i64 %n
%addr2 = alloca i64, i64 1000000
@@ -114,9 +127,10 @@ define void @test_alloca_large_frame(i64 %n) {
call void @use_addr_loc(i8* %addr1, i64* %addr2)
ret void
-; CHECK: sub sp, x29, #480
-; CHECK: ldp x29, x30, [sp, #480]
-; CHECK: add sp, sp, #496
+
+; CHECK: sub sp, x29, #16
+; CHECK: ldp x29, x30, [sp, #16]
+; CHECK: ldp x20, x19, [sp], #32
}
declare i8* @llvm.stacksave()
@@ -124,7 +138,6 @@ declare void @llvm.stackrestore(i8*)
define void @test_scoped_alloca(i64 %n) {
; CHECK-LABEL: test_scoped_alloca:
-; CHECK: sub sp, sp, #32
%sp = call i8* @llvm.stacksave()
; CHECK: mov [[SAVED_SP:x[0-9]+]], sp
diff --git a/test/CodeGen/AArch64/analyze-branch.ll b/test/CodeGen/AArch64/analyze-branch.ll
index 36bc2e00d238..6616b27c45b7 100644
--- a/test/CodeGen/AArch64/analyze-branch.ll
+++ b/test/CodeGen/AArch64/analyze-branch.ll
@@ -168,7 +168,7 @@ define void @test_TBZ_fallthrough_nottaken(i64 %in) nounwind {
%tst = icmp eq i64 %bit, 0
br i1 %tst, label %true, label %false, !prof !1
-; CHECK: tbz {{x[0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]]
+; CHECK: tbz {{[wx][0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: // BB#
; CHECK-NEXT: bl test_false
@@ -213,7 +213,7 @@ define void @test_TBNZ_fallthrough_nottaken(i64 %in) nounwind {
%tst = icmp ne i64 %bit, 0
br i1 %tst, label %true, label %false, !prof !1
-; CHECK: tbnz {{x[0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]]
+; CHECK: tbnz {{[wx][0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: // BB#
; CHECK-NEXT: bl test_false
diff --git a/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll b/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll
new file mode 100644
index 000000000000..6fb7c3fb5e0a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll
@@ -0,0 +1,47 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin
+
+; Can't copy or spill / restore CPSR.
+; rdar://9105206
+
+define fastcc void @t() ssp align 2 {
+entry:
+ br i1 undef, label %bb3.i, label %bb2.i
+
+bb2.i: ; preds = %entry
+ br label %bb3.i
+
+bb3.i: ; preds = %bb2.i, %entry
+ br i1 undef, label %_ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71, label %bb.i69
+
+bb.i69: ; preds = %bb3.i
+ br label %_ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71
+
+_ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71: ; preds = %bb.i69, %bb3.i
+ %0 = select i1 undef, float 0.000000e+00, float undef
+ %1 = fdiv float %0, undef
+ %2 = fcmp ult float %1, 0xBF847AE140000000
+ %storemerge9 = select i1 %2, float %1, float 0.000000e+00
+ store float %storemerge9, float* undef, align 4
+ br i1 undef, label %bb42, label %bb47
+
+bb42: ; preds = %_ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71
+ br i1 undef, label %bb46, label %bb53
+
+bb46: ; preds = %bb42
+ br label %bb48
+
+bb47: ; preds = %_ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71
+ br label %bb48
+
+bb48: ; preds = %bb47, %bb46
+ br i1 undef, label %bb1.i14, label %bb.i13
+
+bb.i13: ; preds = %bb48
+ br label %bb1.i14
+
+bb1.i14: ; preds = %bb.i13, %bb48
+ br label %bb53
+
+bb53: ; preds = %bb1.i14, %bb42
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll b/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
new file mode 100644
index 000000000000..2b083d804912
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
@@ -0,0 +1,45 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin
+
+; rdar://9146594
+
+define void @drt_vsprintf() nounwind ssp {
+entry:
+ %do_tab_convert = alloca i32, align 4
+ br i1 undef, label %if.then24, label %if.else295, !dbg !13
+
+if.then24: ; preds = %entry
+ unreachable
+
+if.else295: ; preds = %entry
+ call void @llvm.dbg.declare(metadata !{i32* %do_tab_convert}, metadata !16), !dbg !18
+ store i32 0, i32* %do_tab_convert, align 4, !dbg !19
+ unreachable
+}
+
+declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
+
+!llvm.dbg.gv = !{!0}
+!llvm.dbg.sp = !{!1, !7, !10, !11, !12}
+
+!0 = metadata !{i32 589876, i32 0, metadata !1, metadata !"vsplive", metadata !"vsplive", metadata !"", metadata !2, i32 617, metadata !6, i32 1, i32 1, null, null} ; [ DW_TAG_variable ]
+!1 = metadata !{i32 589870, metadata !20, metadata !2, metadata !"drt_vsprintf", metadata !"drt_vsprintf", metadata !"", i32 616, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 589865, metadata !20} ; [ DW_TAG_file_type ]
+!3 = metadata !{i32 589841, metadata !20, i32 12, metadata !"clang version 3.0 (http://llvm.org/git/clang.git git:/git/puzzlebox/clang.git/ c4d1aea01c4444eb81bdbf391f1be309127c3cf1)", i1 true, metadata !"", i32 0, metadata !21, metadata !21, null, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!4 = metadata !{i32 589845, metadata !20, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !5, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!5 = metadata !{metadata !6}
+!6 = metadata !{i32 589860, null, metadata !3, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!7 = metadata !{i32 589870, metadata !20, metadata !2, metadata !"putc_mem", metadata !"putc_mem", metadata !"", i32 30, metadata !8, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!8 = metadata !{i32 589845, metadata !20, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !9, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!9 = metadata !{null}
+!10 = metadata !{i32 589870, metadata !20, metadata !2, metadata !"print_double", metadata !"print_double", metadata !"", i32 203, metadata !4, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!11 = metadata !{i32 589870, metadata !20, metadata !2, metadata !"print_number", metadata !"print_number", metadata !"", i32 75, metadata !4, i1 true, i1 true, i32 0, i32 0, i32 0, i32 256, i1 false, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!12 = metadata !{i32 589870, metadata !20, metadata !2, metadata !"get_flags", metadata !"get_flags", metadata !"", i32 508, metadata !8, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!13 = metadata !{i32 653, i32 5, metadata !14, null}
+!14 = metadata !{i32 589835, metadata !20, metadata !15, i32 652, i32 35, i32 2} ; [ DW_TAG_lexical_block ]
+!15 = metadata !{i32 589835, metadata !20, metadata !1, i32 616, i32 1, i32 0} ; [ DW_TAG_lexical_block ]
+!16 = metadata !{i32 590080, metadata !17, metadata !"do_tab_convert", metadata !2, i32 853, metadata !6, i32 0, null} ; [ DW_TAG_auto_variable ]
+!17 = metadata !{i32 589835, metadata !20, metadata !14, i32 850, i32 12, i32 33} ; [ DW_TAG_lexical_block ]
+!18 = metadata !{i32 853, i32 11, metadata !17, null}
+!19 = metadata !{i32 853, i32 29, metadata !17, null}
+!20 = metadata !{metadata !"print.i", metadata !"/Volumes/Ebi/echeng/radars/r9146594"}
+!21 = metadata !{i32 0}
diff --git a/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll b/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll
new file mode 100644
index 000000000000..6f0ec34fc1dd
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+define void @foo(i64 %val) {
+; CHECK: foo
+; The stack frame store is not 64-bit aligned. Make sure we use an
+; instruction that can handle that.
+; CHECK: stur x0, [sp, #20]
+ %a = alloca [49 x i32], align 4
+ %p32 = getelementptr inbounds [49 x i32]* %a, i64 0, i64 2
+ %p = bitcast i32* %p32 to i64*
+ store i64 %val, i64* %p, align 8
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll b/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll
new file mode 100644
index 000000000000..88232fcc0b4d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=arm64-apple-iOS5.0
+
+; CPSR is not allocatable so fast allocatable wouldn't mark them killed.
+; rdar://9313272
+
+define hidden void @t() nounwind {
+entry:
+ %cmp = icmp eq i32* null, undef
+ %frombool = zext i1 %cmp to i8
+ store i8 %frombool, i8* undef, align 1
+ %tmp4 = load i8* undef, align 1
+ %tobool = trunc i8 %tmp4 to i1
+ br i1 %tobool, label %land.lhs.true, label %if.end
+
+land.lhs.true: ; preds = %entry
+ unreachable
+
+if.end: ; preds = %entry
+ br i1 undef, label %land.lhs.true14, label %if.end33
+
+land.lhs.true14: ; preds = %if.end
+ unreachable
+
+if.end33: ; preds = %if.end
+ unreachable
+}
diff --git a/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll b/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
new file mode 100644
index 000000000000..8f99bc30a554
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s
+
+; Can't fold the increment by 1<<12 into a post-increment load
+; rdar://10301335
+
+@test_data = common global i32 0, align 4
+
+define void @t() nounwind ssp {
+; CHECK-LABEL: t:
+entry:
+ br label %for.body
+
+for.body:
+; CHECK: for.body
+; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}]
+; CHECK: add x[[REG:[0-9]+]],
+; CHECK: x[[REG]], #1, lsl #12
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %0 = shl nsw i64 %indvars.iv, 12
+ %add = add nsw i64 %0, 34628173824
+ %1 = inttoptr i64 %add to i32*
+ %2 = load volatile i32* %1, align 4096
+ store volatile i32 %2, i32* @test_data, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 200
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll b/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll
new file mode 100644
index 000000000000..d47dbb28164c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -march=arm64
+
+; The target lowering for integer comparisons was replacing some DAG nodes
+; during operation legalization, which resulted in dangling pointers,
+; cycles in DAGs, and eventually crashes. This is the testcase for
+; one of those crashes. (rdar://10653656)
+
+define void @test(i1 zeroext %IsArrow) nounwind ssp align 2 {
+entry:
+ br i1 undef, label %return, label %lor.lhs.false
+
+lor.lhs.false:
+ br i1 undef, label %return, label %if.end
+
+if.end:
+ %tmp.i = load i64* undef, align 8
+ %and.i.i.i = and i64 %tmp.i, -16
+ br i1 %IsArrow, label %if.else_crit_edge, label %if.end32
+
+if.else_crit_edge:
+ br i1 undef, label %if.end32, label %return
+
+if.end32:
+ %0 = icmp ult i32 undef, 3
+ %1 = zext i64 %tmp.i to i320
+ %.pn.v = select i1 %0, i320 128, i320 64
+ %.pn = shl i320 %1, %.pn.v
+ %ins346392 = or i320 %.pn, 0
+ store i320 %ins346392, i320* undef, align 8
+ br i1 undef, label %sw.bb.i.i, label %exit
+
+sw.bb.i.i:
+ unreachable
+
+exit:
+ unreachable
+
+return:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll b/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll
new file mode 100644
index 000000000000..a4d37e48685f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define i32 @foo(<4 x i32> %a, i32 %n) nounwind {
+; CHECK-LABEL: foo:
+; CHECK: fmov w0, s0
+; CHECK-NEXT: ret
+ %b = bitcast <4 x i32> %a to i128
+ %c = trunc i128 %b to i32
+ ret i32 %c
+}
+
+define i64 @bar(<2 x i64> %a, i64 %n) nounwind {
+; CHECK-LABEL: bar:
+; CHECK: fmov x0, d0
+; CHECK-NEXT: ret
+ %b = bitcast <2 x i64> %a to i128
+ %c = trunc i128 %b to i64
+ ret i64 %c
+}
+
diff --git a/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll b/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
new file mode 100644
index 000000000000..d59b0d004380
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -march arm64 -mcpu=cyclone | FileCheck %s
+; <rdar://problem/11294426>
+
+@b = private unnamed_addr constant [3 x i32] [i32 1768775988, i32 1685481784, i32 1836253201], align 4
+
+; The important thing for this test is that we need an unaligned load of `l_b'
+; ("ldr w2, [x1, #8]" in this case).
+
+; CHECK: adrp x[[PAGE:[0-9]+]], {{l_b@PAGE|.Lb}}
+; CHECK: add x[[ADDR:[0-9]+]], x[[PAGE]], {{l_b@PAGEOFF|:lo12:.Lb}}
+; CHECK-NEXT: ldr [[VAL:w[0-9]+]], [x[[ADDR]], #8]
+; CHECK-NEXT: str [[VAL]], [x0, #8]
+; CHECK-NEXT: ldr [[VAL2:x[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: str [[VAL2]], [x0]
+
+define void @foo(i8* %a) {
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast ([3 x i32]* @b to i8*), i64 12, i32 4, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
diff --git a/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll b/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
new file mode 100644
index 000000000000..7da2d2ca513e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
@@ -0,0 +1,22 @@
+; RUN: llc -mtriple=arm64-apple-ios < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -relocation-model=pic < %s | FileCheck %s --check-prefix=CHECK-LINUX
+; <rdar://problem/11392109>
+
+define hidden void @t(i64* %addr) optsize ssp {
+entry:
+ store i64 zext (i32 ptrtoint (i64 (i32)* @x to i32) to i64), i64* %addr, align 8
+; CHECK: adrp x{{[0-9]+}}, _x@GOTPAGE
+; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, _x@GOTPAGEOFF]
+; CHECK-NEXT: and x{{[0-9]+}}, x{{[0-9]+}}, #0xffffffff
+; CHECK-NEXT: str x{{[0-9]+}}, [x{{[0-9]+}}]
+ ret void
+}
+
+declare i64 @x(i32) optsize
+
+; Worth checking the Linux code is sensible too: only way to access
+; the GOT is via a 64-bit load. Just loading wN is unacceptable
+; (there's no ELF relocation to do that).
+
+; CHECK-LINUX: adrp {{x[0-9]+}}, :got:x
+; CHECK-LINUX: ldr {{x[0-9]+}}, [{{x[0-9]+}}, :got_lo12:x]
diff --git a/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll b/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
new file mode 100644
index 000000000000..4b037db9c84b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios -verify-machineinstrs | FileCheck %s
+
+; LdStOpt bug created illegal instruction:
+; %D1<def>, %D2<def> = LDPSi %X0, 1
+; rdar://11512047
+
+%0 = type opaque
+%struct.CGRect = type { %struct.CGPoint, %struct.CGSize }
+%struct.CGPoint = type { double, double }
+%struct.CGSize = type { double, double }
+
+@"OBJC_IVAR_$_UIScreen._bounds" = external hidden global i64, section "__DATA, __objc_ivar", align 8
+
+define hidden %struct.CGRect @t(%0* nocapture %self, i8* nocapture %_cmd) nounwind readonly optsize ssp {
+entry:
+; CHECK-LABEL: t:
+; CHECK: ldp d{{[0-9]+}}, d{{[0-9]+}}
+ %ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+ %0 = bitcast %0* %self to i8*
+ %add.ptr = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr10.0 = bitcast i8* %add.ptr to double*
+ %tmp11 = load double* %add.ptr10.0, align 8
+ %add.ptr.sum = add i64 %ivar, 8
+ %add.ptr10.1 = getelementptr inbounds i8* %0, i64 %add.ptr.sum
+ %1 = bitcast i8* %add.ptr10.1 to double*
+ %tmp12 = load double* %1, align 8
+ %add.ptr.sum17 = add i64 %ivar, 16
+ %add.ptr4.1 = getelementptr inbounds i8* %0, i64 %add.ptr.sum17
+ %add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
+ %tmp = load double* %add.ptr4.1.0, align 8
+ %add.ptr4.1.sum = add i64 %ivar, 24
+ %add.ptr4.1.1 = getelementptr inbounds i8* %0, i64 %add.ptr4.1.sum
+ %2 = bitcast i8* %add.ptr4.1.1 to double*
+ %tmp5 = load double* %2, align 8
+ %insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
+ %insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
+ %insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0
+ %insert7 = insertvalue %struct.CGSize undef, double %tmp, 0
+ %insert9 = insertvalue %struct.CGSize %insert7, double %tmp5, 1
+ %insert3 = insertvalue %struct.CGRect %insert, %struct.CGSize %insert9, 1
+ ret %struct.CGRect %insert3
+}
+
+!llvm.module.flags = !{!0, !1, !2, !3}
+
+!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
+!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
+!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
+!4 = metadata !{}
diff --git a/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll b/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
new file mode 100644
index 000000000000..168e921bcc02
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
@@ -0,0 +1,67 @@
+; RUN: llc -march=arm64 -O0 < %s | FileCheck %s
+; RUN: llc -march=arm64 -O3 < %s | FileCheck %s
+
+@.str = private unnamed_addr constant [9 x i8] c"%lf %lu\0A\00", align 1
+@.str1 = private unnamed_addr constant [8 x i8] c"%lf %u\0A\00", align 1
+@.str2 = private unnamed_addr constant [8 x i8] c"%f %lu\0A\00", align 1
+@.str3 = private unnamed_addr constant [7 x i8] c"%f %u\0A\00", align 1
+
+define void @testDouble(double %d) ssp {
+; CHECK-LABEL: testDouble:
+; CHECK: fcvtzu x{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: fcvtzu w{{[0-9]+}}, d{{[0-9]+}}
+entry:
+ %d.addr = alloca double, align 8
+ store double %d, double* %d.addr, align 8
+ %0 = load double* %d.addr, align 8
+ %1 = load double* %d.addr, align 8
+ %conv = fptoui double %1 to i64
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), double %0, i64 %conv)
+ %2 = load double* %d.addr, align 8
+ %3 = load double* %d.addr, align 8
+ %conv1 = fptoui double %3 to i32
+ %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str1, i32 0, i32 0), double %2, i32 %conv1)
+ ret void
+}
+
+declare i32 @printf(i8*, ...)
+
+define void @testFloat(float %f) ssp {
+; CHECK-LABEL: testFloat:
+; CHECK: fcvtzu x{{[0-9]+}}, s{{[0-9]+}}
+; CHECK: fcvtzu w{{[0-9]+}}, s{{[0-9]+}}
+entry:
+ %f.addr = alloca float, align 4
+ store float %f, float* %f.addr, align 4
+ %0 = load float* %f.addr, align 4
+ %conv = fpext float %0 to double
+ %1 = load float* %f.addr, align 4
+ %conv1 = fptoui float %1 to i64
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str2, i32 0, i32 0), double %conv, i64 %conv1)
+ %2 = load float* %f.addr, align 4
+ %conv2 = fpext float %2 to double
+ %3 = load float* %f.addr, align 4
+ %conv3 = fptoui float %3 to i32
+ %call4 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str3, i32 0, i32 0), double %conv2, i32 %conv3)
+ ret void
+}
+
+define i32 @main(i32 %argc, i8** %argv) ssp {
+entry:
+ %retval = alloca i32, align 4
+ %argc.addr = alloca i32, align 4
+ %argv.addr = alloca i8**, align 8
+ store i32 0, i32* %retval
+ store i32 %argc, i32* %argc.addr, align 4
+ store i8** %argv, i8*** %argv.addr, align 8
+ call void @testDouble(double 1.159198e+01)
+ call void @testFloat(float 0x40272F1800000000)
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0, !1, !2, !3}
+
+!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
+!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
+!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
diff --git a/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll b/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
new file mode 100644
index 000000000000..55ecfb5d2bd6
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios
+; rdar://11849816
+
+@shlib_path_substitutions = external hidden unnamed_addr global i8**, align 8
+
+declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
+
+declare noalias i8* @xmalloc(i64) optsize
+
+declare i64 @strlen(i8* nocapture) nounwind readonly optsize
+
+declare i8* @__strcpy_chk(i8*, i8*, i64) nounwind optsize
+
+declare i8* @__strcat_chk(i8*, i8*, i64) nounwind optsize
+
+declare noalias i8* @xstrdup(i8*) optsize
+
+define i8* @dyld_fix_path(i8* %path) nounwind optsize ssp {
+entry:
+ br i1 undef, label %if.end56, label %for.cond
+
+for.cond: ; preds = %entry
+ br i1 undef, label %for.cond10, label %for.body
+
+for.body: ; preds = %for.cond
+ unreachable
+
+for.cond10: ; preds = %for.cond
+ br i1 undef, label %if.end56, label %for.body14
+
+for.body14: ; preds = %for.cond10
+ %call22 = tail call i64 @strlen(i8* undef) nounwind optsize
+ %sext = shl i64 %call22, 32
+ %conv30 = ashr exact i64 %sext, 32
+ %add29 = sub i64 0, %conv30
+ %sub = add i64 %add29, 0
+ %add31 = shl i64 %sub, 32
+ %sext59 = add i64 %add31, 4294967296
+ %conv33 = ashr exact i64 %sext59, 32
+ %call34 = tail call noalias i8* @xmalloc(i64 %conv33) nounwind optsize
+ br i1 undef, label %cond.false45, label %cond.true43
+
+cond.true43: ; preds = %for.body14
+ unreachable
+
+cond.false45: ; preds = %for.body14
+ %add.ptr = getelementptr inbounds i8* %path, i64 %conv30
+ unreachable
+
+if.end56: ; preds = %for.cond10, %entry
+ ret i8* null
+}
+
+declare i32 @strncmp(i8* nocapture, i8* nocapture, i64) nounwind readonly optsize
+
+declare i8* @strcpy(i8*, i8* nocapture) nounwind
diff --git a/test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll b/test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll
new file mode 100644
index 000000000000..e2c43d953bb9
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -fp-contract=fast | FileCheck %s --check-prefix=FAST
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+;FAST-LABEL: _Z9example25v:
+;FAST: fcmgt.4s
+;FAST: ret
+
+;CHECK-LABEL: _Z9example25v:
+;CHECK: fcmgt.4s
+;CHECK: ret
+
+define <4 x i32> @_Z9example25v( <4 x float> %N0, <4 x float> %N1) {
+ %A = fcmp olt <4 x float> %N0, %N1
+ %B = zext <4 x i1> %A to <4 x i32>
+ ret <4 x i32> %B
+}
diff --git a/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll b/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll
new file mode 100644
index 000000000000..94511243a49f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -march=arm64
+; Make sure we are not crashing on this test.
+
+define void @autogen_SD13158() {
+entry:
+ %B26 = frem float 0.000000e+00, undef
+ br i1 undef, label %CF, label %CF77
+
+CF: ; preds = %CF, %CF76
+ store float %B26, float* undef
+ br i1 undef, label %CF, label %CF77
+
+CF77: ; preds = %CF
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll b/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll
new file mode 100644
index 000000000000..404027bfd5f3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s -march=arm64
+
+; Make sure we are not crashing on this test.
+
+define void @autogen_SD12881() {
+BB:
+ %B17 = ashr <4 x i32> zeroinitializer, zeroinitializer
+ br label %CF
+
+CF: ; preds = %CF83, %CF, %BB
+ br i1 undef, label %CF, label %CF83
+
+CF83: ; preds = %CF
+ %FC70 = sitofp <4 x i32> %B17 to <4 x double>
+ br label %CF
+}
+
+
+define void @autogen_SD12881_2() {
+BB:
+ %B17 = ashr <4 x i32> zeroinitializer, zeroinitializer
+ br label %CF
+
+CF: ; preds = %CF83, %CF, %BB
+ br i1 undef, label %CF, label %CF83
+
+CF83: ; preds = %CF
+ %FC70 = uitofp <4 x i32> %B17 to <4 x double>
+ br label %CF
+}
+
+define void @_Z12my_example2bv() nounwind noinline ssp {
+entry:
+ %0 = fptosi <2 x double> undef to <2 x i32>
+ store <2 x i32> %0, <2 x i32>* undef, align 8
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll b/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll
new file mode 100644
index 000000000000..a350ba1472c9
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple
+
+;CHECK-LABEL: Shuff:
+;CHECK: tbl.8b
+;CHECK: ret
+define <8 x i8 > @Shuff(<8 x i8> %in, <8 x i8>* %out) nounwind ssp {
+ %value = shufflevector <8 x i8> %in, <8 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %value
+}
+
+
diff --git a/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll b/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
new file mode 100644
index 000000000000..c4597d5a4815
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
@@ -0,0 +1,67 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false | FileCheck %s -check-prefix=GENERIC
+
+define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: bar:
+; CHECK: add.2d v[[REG:[0-9]+]], v0, v1
+; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1
+; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1
+; GENERIC-LABEL: bar:
+; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d
+; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1
+; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1
+ %add = add <2 x i64> %a, %b
+ %vgetq_lane = extractelement <2 x i64> %add, i32 0
+ %vgetq_lane2 = extractelement <2 x i64> %b, i32 0
+ %add3 = add i64 %vgetq_lane, %vgetq_lane2
+ %sub = sub i64 %vgetq_lane, %vgetq_lane2
+ %vecinit = insertelement <2 x i64> undef, i64 %add3, i32 0
+ %vecinit8 = insertelement <2 x i64> %vecinit, i64 %sub, i32 1
+ ret <2 x i64> %vecinit8
+}
+
+define double @subdd_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: subdd_su64:
+; CHECK: sub d0, d1, d0
+; CHECK-NEXT: ret
+; GENERIC-LABEL: subdd_su64:
+; GENERIC: sub d0, d1, d0
+; GENERIC-NEXT: ret
+ %vecext = extractelement <2 x i64> %a, i32 0
+ %vecext1 = extractelement <2 x i64> %b, i32 0
+ %sub.i = sub nsw i64 %vecext1, %vecext
+ %retval = bitcast i64 %sub.i to double
+ ret double %retval
+}
+
+define double @vaddd_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: vaddd_su64:
+; CHECK: add d0, d1, d0
+; CHECK-NEXT: ret
+; GENERIC-LABEL: vaddd_su64:
+; GENERIC: add d0, d1, d0
+; GENERIC-NEXT: ret
+ %vecext = extractelement <2 x i64> %a, i32 0
+ %vecext1 = extractelement <2 x i64> %b, i32 0
+ %add.i = add nsw i64 %vecext1, %vecext
+ %retval = bitcast i64 %add.i to double
+ ret double %retval
+}
+
+; sub MI doesn't access dsub register.
+define double @add_sub_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: add_sub_su64:
+; CHECK: add d0, d1, d0
+; CHECK: sub d0, {{d[0-9]+}}, d0
+; CHECK-NEXT: ret
+; GENERIC-LABEL: add_sub_su64:
+; GENERIC: add d0, d1, d0
+; GENERIC: sub d0, {{d[0-9]+}}, d0
+; GENERIC-NEXT: ret
+ %vecext = extractelement <2 x i64> %a, i32 0
+ %vecext1 = extractelement <2 x i64> %b, i32 0
+ %add.i = add i64 %vecext1, %vecext
+ %sub.i = sub i64 0, %add.i
+ %retval = bitcast i64 %sub.i to double
+ ret double %retval
+}
diff --git a/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll b/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll
new file mode 100644
index 000000000000..a73b70718019
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -march=arm64
+
+; This test case tests an infinite loop bug in DAG combiner.
+; It just tries to do the following replacing endlessly:
+; (1) Replacing.3 0x2c509f0: v4i32 = any_extend 0x2c4cd08 [ORD=4]
+; With: 0x2c4d128: v4i32 = sign_extend 0x2c4cd08 [ORD=4]
+;
+; (2) Replacing.2 0x2c4d128: v4i32 = sign_extend 0x2c4cd08 [ORD=4]
+; With: 0x2c509f0: v4i32 = any_extend 0x2c4cd08 [ORD=4]
+; As we think the (2) optimization from SIGN_EXTEND to ANY_EXTEND is
+; an optimization to replace unused bits with undefined bits, we remove
+; the (1) optimization (It doesn't make sense to replace undefined bits
+; with signed bits).
+
+define <4 x i32> @infiniteLoop(<4 x i32> %in0, <4 x i16> %in1) {
+entry:
+ %cmp.i = icmp sge <4 x i16> %in1, <i16 32767, i16 32767, i16 -1, i16 -32768>
+ %sext.i = sext <4 x i1> %cmp.i to <4 x i32>
+ %mul.i = mul <4 x i32> %in0, %sext.i
+ %sext = shl <4 x i32> %mul.i, <i32 16, i32 16, i32 16, i32 16>
+ %vmovl.i.i = ashr <4 x i32> %sext, <i32 16, i32 16, i32 16, i32 16>
+ ret <4 x i32> %vmovl.i.i
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll b/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll
new file mode 100644
index 000000000000..1b2d54317c23
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -O0 -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+; The following 2 test cases test shufflevector with beginning UNDEF mask.
+define <8 x i16> @test_vext_undef_traverse(<8 x i16> %in) {
+;CHECK-LABEL: test_vext_undef_traverse:
+;CHECK: {{ext.16b.*v0, #4}}
+ %vext = shufflevector <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 0, i16 0>, <8 x i16> %in, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x i16> %vext
+}
+
+define <8 x i16> @test_vext_undef_traverse2(<8 x i16> %in) {
+;CHECK-LABEL: test_vext_undef_traverse2:
+;CHECK: {{ext.16b.*v0, #6}}
+ %vext = shufflevector <8 x i16> %in, <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2>
+ ret <8 x i16> %vext
+}
+
+define <8 x i8> @test_vext_undef_traverse3(<8 x i8> %in) {
+;CHECK-LABEL: test_vext_undef_traverse3:
+;CHECK: {{ext.8b.*v0, #6}}
+ %vext = shufflevector <8 x i8> %in, <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 3, i32 4, i32 5>
+ ret <8 x i8> %vext
+}
diff --git a/test/CodeGen/AArch64/arm64-aapcs.ll b/test/CodeGen/AArch64/arm64-aapcs.ll
new file mode 100644
index 000000000000..127a7cc0a155
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-aapcs.ll
@@ -0,0 +1,125 @@
+; RUN: llc -mtriple=arm64-linux-gnu -enable-misched=false < %s | FileCheck %s
+
+@var = global i32 0, align 4
+
+define i128 @test_i128_align(i32, i128 %arg, i32 %after) {
+ store i32 %after, i32* @var, align 4
+; CHECK: str w4, [{{x[0-9]+}}, :lo12:var]
+
+ ret i128 %arg
+; CHECK: mov x0, x2
+; CHECK: mov x1, x3
+}
+
+@var64 = global i64 0, align 8
+
+ ; Check stack slots are 64-bit at all times.
+define void @test_stack_slots([8 x i32], i1 %bool, i8 %char, i16 %short,
+ i32 %int, i64 %long) {
+ ; Part of last store. Blasted scheduler.
+; CHECK: ldr [[LONG:x[0-9]+]], [sp, #32]
+
+ %ext_bool = zext i1 %bool to i64
+ store volatile i64 %ext_bool, i64* @var64, align 8
+; CHECK: ldrb w[[EXT:[0-9]+]], [sp]
+; CHECK: and x[[EXTED:[0-9]+]], x[[EXT]], #0x1
+; CHECK: str x[[EXTED]], [{{x[0-9]+}}, :lo12:var64]
+
+ %ext_char = zext i8 %char to i64
+ store volatile i64 %ext_char, i64* @var64, align 8
+; CHECK: ldrb w[[EXT:[0-9]+]], [sp, #8]
+; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64]
+
+ %ext_short = zext i16 %short to i64
+ store volatile i64 %ext_short, i64* @var64, align 8
+; CHECK: ldrh w[[EXT:[0-9]+]], [sp, #16]
+; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64]
+
+ %ext_int = zext i32 %int to i64
+ store volatile i64 %ext_int, i64* @var64, align 8
+; CHECK: ldr{{b?}} w[[EXT:[0-9]+]], [sp, #24]
+; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64]
+
+ store volatile i64 %long, i64* @var64, align 8
+; CHECK: str [[LONG]], [{{x[0-9]+}}, :lo12:var64]
+
+ ret void
+}
+
+; Make sure the callee does extensions (in the absence of zext/sext
+; keyword on args) while we're here.
+
+define void @test_extension(i1 %bool, i8 %char, i16 %short, i32 %int) {
+ %ext_bool = zext i1 %bool to i64
+ store volatile i64 %ext_bool, i64* @var64
+; CHECK: and [[EXT:x[0-9]+]], x0, #0x1
+; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
+
+ %ext_char = sext i8 %char to i64
+ store volatile i64 %ext_char, i64* @var64
+; CHECK: sxtb [[EXT:x[0-9]+]], w1
+; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
+
+ %ext_short = zext i16 %short to i64
+ store volatile i64 %ext_short, i64* @var64
+; CHECK: and [[EXT:x[0-9]+]], x2, #0xffff
+; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
+
+ %ext_int = zext i32 %int to i64
+ store volatile i64 %ext_int, i64* @var64
+; CHECK: ubfx [[EXT:x[0-9]+]], x3, #0, #32
+; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
+
+ ret void
+}
+
+declare void @variadic(i32 %a, ...)
+
+ ; Under AAPCS variadic functions have the same calling convention as
+ ; others. The extra arguments should go in registers rather than on the stack.
+define void @test_variadic() {
+ call void(i32, ...)* @variadic(i32 0, i64 1, double 2.0)
+; CHECK: fmov d0, #2.0
+; CHECK: orr w1, wzr, #0x1
+; CHECK: bl variadic
+ ret void
+}
+
+; We weren't marking x7 as used after deciding that the i128 didn't fit into
+; registers and putting the first half on the stack, so the *second* half went
+; into x7. Yuck!
+define i128 @test_i128_shadow([7 x i64] %x0_x6, i128 %sp) {
+; CHECK-LABEL: test_i128_shadow:
+; CHECK: ldp x0, x1, [sp]
+
+ ret i128 %sp
+}
+
+; This test is to check if fp128 can be correctly handled on stack.
+define fp128 @test_fp128([8 x float] %arg0, fp128 %arg1) {
+; CHECK-LABEL: test_fp128:
+; CHECK: ldr {{q[0-9]+}}, [sp]
+ ret fp128 %arg1
+}
+
+; Check if VPR can be correctly pass by stack.
+define <2 x double> @test_vreg_stack([8 x <2 x double>], <2 x double> %varg_stack) {
+entry:
+; CHECK-LABEL: test_vreg_stack:
+; CHECK: ldr {{q[0-9]+}}, [sp]
+ ret <2 x double> %varg_stack;
+}
+
+; Check that f16 can be passed and returned (ACLE 2.0 extension)
+define half @test_half(float, half %arg) {
+; CHECK-LABEL: test_half:
+; CHECK: mov v0.16b, v{{[0-9]+}}.16b
+ ret half %arg;
+}
+
+; Check that f16 constants are materialized correctly
+define half @test_half_const() {
+; CHECK-LABEL: test_half_const:
+; CHECK: ldr h0, [x{{[0-9]+}}, :lo12:{{.*}}]
+ ret half 0xH4248
+}
diff --git a/test/CodeGen/AArch64/arm64-abi-varargs.ll b/test/CodeGen/AArch64/arm64-abi-varargs.ll
new file mode 100644
index 000000000000..92db392cd041
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-abi-varargs.ll
@@ -0,0 +1,191 @@
+; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s
+target triple = "arm64-apple-ios7.0.0"
+
+; rdar://13625505
+; Here we have 9 fixed integer arguments the 9th argument in on stack, the
+; varargs start right after at 8-byte alignment.
+define void @fn9(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp {
+; CHECK-LABEL: fn9:
+; 9th fixed argument
+; CHECK: ldr {{w[0-9]+}}, [sp, #64]
+; CHECK: add [[ARGS:x[0-9]+]], sp, #72
+; CHECK: add {{x[0-9]+}}, [[ARGS]], #8
+; First vararg
+; CHECK: ldr {{w[0-9]+}}, [sp, #72]
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8
+; Second vararg
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #8
+; Third vararg
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
+ %1 = alloca i32, align 4
+ %2 = alloca i32, align 4
+ %3 = alloca i32, align 4
+ %4 = alloca i32, align 4
+ %5 = alloca i32, align 4
+ %6 = alloca i32, align 4
+ %7 = alloca i32, align 4
+ %8 = alloca i32, align 4
+ %9 = alloca i32, align 4
+ %args = alloca i8*, align 8
+ %a10 = alloca i32, align 4
+ %a11 = alloca i32, align 4
+ %a12 = alloca i32, align 4
+ store i32 %a1, i32* %1, align 4
+ store i32 %a2, i32* %2, align 4
+ store i32 %a3, i32* %3, align 4
+ store i32 %a4, i32* %4, align 4
+ store i32 %a5, i32* %5, align 4
+ store i32 %a6, i32* %6, align 4
+ store i32 %a7, i32* %7, align 4
+ store i32 %a8, i32* %8, align 4
+ store i32 %a9, i32* %9, align 4
+ %10 = bitcast i8** %args to i8*
+ call void @llvm.va_start(i8* %10)
+ %11 = va_arg i8** %args, i32
+ store i32 %11, i32* %a10, align 4
+ %12 = va_arg i8** %args, i32
+ store i32 %12, i32* %a11, align 4
+ %13 = va_arg i8** %args, i32
+ store i32 %13, i32* %a12, align 4
+ ret void
+}
+
+declare void @llvm.va_start(i8*) nounwind
+
+define i32 @main() nounwind ssp {
+; CHECK-LABEL: main:
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
+; CHECK: str {{x[0-9]+}}, [sp, #8]
+; CHECK: str {{w[0-9]+}}, [sp]
+ %a1 = alloca i32, align 4
+ %a2 = alloca i32, align 4
+ %a3 = alloca i32, align 4
+ %a4 = alloca i32, align 4
+ %a5 = alloca i32, align 4
+ %a6 = alloca i32, align 4
+ %a7 = alloca i32, align 4
+ %a8 = alloca i32, align 4
+ %a9 = alloca i32, align 4
+ %a10 = alloca i32, align 4
+ %a11 = alloca i32, align 4
+ %a12 = alloca i32, align 4
+ store i32 1, i32* %a1, align 4
+ store i32 2, i32* %a2, align 4
+ store i32 3, i32* %a3, align 4
+ store i32 4, i32* %a4, align 4
+ store i32 5, i32* %a5, align 4
+ store i32 6, i32* %a6, align 4
+ store i32 7, i32* %a7, align 4
+ store i32 8, i32* %a8, align 4
+ store i32 9, i32* %a9, align 4
+ store i32 10, i32* %a10, align 4
+ store i32 11, i32* %a11, align 4
+ store i32 12, i32* %a12, align 4
+ %1 = load i32* %a1, align 4
+ %2 = load i32* %a2, align 4
+ %3 = load i32* %a3, align 4
+ %4 = load i32* %a4, align 4
+ %5 = load i32* %a5, align 4
+ %6 = load i32* %a6, align 4
+ %7 = load i32* %a7, align 4
+ %8 = load i32* %a8, align 4
+ %9 = load i32* %a9, align 4
+ %10 = load i32* %a10, align 4
+ %11 = load i32* %a11, align 4
+ %12 = load i32* %a12, align 4
+ call void (i32, i32, i32, i32, i32, i32, i32, i32, i32, ...)* @fn9(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12)
+ ret i32 0
+}
+
+;rdar://13668483
+@.str = private unnamed_addr constant [4 x i8] c"fmt\00", align 1
+define void @foo(i8* %fmt, ...) nounwind {
+entry:
+; CHECK-LABEL: foo:
+; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, #0x8
+; CHECK: ldr {{w[0-9]+}}, [sp, #48]
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #15
+; CHECK: and x[[ADDR:[0-9]+]], {{x[0-9]+}}, #0xfffffffffffffff0
+; CHECK: ldr {{q[0-9]+}}, [x[[ADDR]]]
+ %fmt.addr = alloca i8*, align 8
+ %args = alloca i8*, align 8
+ %vc = alloca i32, align 4
+ %vv = alloca <4 x i32>, align 16
+ store i8* %fmt, i8** %fmt.addr, align 8
+ %args1 = bitcast i8** %args to i8*
+ call void @llvm.va_start(i8* %args1)
+ %0 = va_arg i8** %args, i32
+ store i32 %0, i32* %vc, align 4
+ %1 = va_arg i8** %args, <4 x i32>
+ store <4 x i32> %1, <4 x i32>* %vv, align 16
+ ret void
+}
+
+define void @bar(i32 %x, <4 x i32> %y) nounwind {
+entry:
+; CHECK-LABEL: bar:
+; CHECK: str {{q[0-9]+}}, [sp, #16]
+; CHECK: str {{x[0-9]+}}, [sp]
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca <4 x i32>, align 16
+ store i32 %x, i32* %x.addr, align 4
+ store <4 x i32> %y, <4 x i32>* %y.addr, align 16
+ %0 = load i32* %x.addr, align 4
+ %1 = load <4 x i32>* %y.addr, align 16
+ call void (i8*, ...)* @foo(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %0, <4 x i32> %1)
+ ret void
+}
+
+; rdar://13668927
+; When passing 16-byte aligned small structs as vararg, make sure the caller
+; side is 16-byte aligned on stack.
+%struct.s41 = type { i32, i16, i32, i16 }
+define void @foo2(i8* %fmt, ...) nounwind {
+entry:
+; CHECK-LABEL: foo2:
+; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, #0x8
+; CHECK: ldr {{w[0-9]+}}, [sp, #48]
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #15
+; CHECK: and x[[ADDR:[0-9]+]], {{x[0-9]+}}, #0xfffffffffffffff0
+; CHECK: ldr {{q[0-9]+}}, [x[[ADDR]]]
+ %fmt.addr = alloca i8*, align 8
+ %args = alloca i8*, align 8
+ %vc = alloca i32, align 4
+ %vs = alloca %struct.s41, align 16
+ store i8* %fmt, i8** %fmt.addr, align 8
+ %args1 = bitcast i8** %args to i8*
+ call void @llvm.va_start(i8* %args1)
+ %0 = va_arg i8** %args, i32
+ store i32 %0, i32* %vc, align 4
+ %ap.cur = load i8** %args
+ %1 = getelementptr i8* %ap.cur, i32 15
+ %2 = ptrtoint i8* %1 to i64
+ %3 = and i64 %2, -16
+ %ap.align = inttoptr i64 %3 to i8*
+ %ap.next = getelementptr i8* %ap.align, i32 16
+ store i8* %ap.next, i8** %args
+ %4 = bitcast i8* %ap.align to %struct.s41*
+ %5 = bitcast %struct.s41* %vs to i8*
+ %6 = bitcast %struct.s41* %4 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* %6, i64 16, i32 16, i1 false)
+ ret void
+}
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+
+define void @bar2(i32 %x, i128 %s41.coerce) nounwind {
+entry:
+; CHECK-LABEL: bar2:
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
+; CHECK: str {{x[0-9]+}}, [sp]
+ %x.addr = alloca i32, align 4
+ %s41 = alloca %struct.s41, align 16
+ store i32 %x, i32* %x.addr, align 4
+ %0 = bitcast %struct.s41* %s41 to i128*
+ store i128 %s41.coerce, i128* %0, align 1
+ %1 = load i32* %x.addr, align 4
+ %2 = bitcast %struct.s41* %s41 to i128*
+ %3 = load i128* %2, align 1
+ call void (i8*, ...)* @foo2(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %1, i128 %3)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-abi.ll b/test/CodeGen/AArch64/arm64-abi.ll
new file mode 100644
index 000000000000..a955029b3725
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-abi.ll
@@ -0,0 +1,239 @@
+; RUN: llc < %s -debug -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s
+; RUN: llc < %s -O0 | FileCheck -check-prefix=FAST %s
+; REQUIRES: asserts
+target triple = "arm64-apple-darwin"
+
+; rdar://9932559
+define i64 @i8i16callee(i64 %a1, i64 %a2, i64 %a3, i8 signext %a4, i16 signext %a5, i64 %a6, i64 %a7, i64 %a8, i8 signext %b1, i16 signext %b2, i8 signext %b3, i8 signext %b4) nounwind readnone noinline {
+entry:
+; CHECK-LABEL: i8i16callee:
+; The 8th, 9th, 10th and 11th arguments are passed at sp, sp+2, sp+4, sp+5.
+; They are i8, i16, i8 and i8.
+; CHECK-DAG: ldrsb {{w[0-9]+}}, [sp, #5]
+; CHECK-DAG: ldrsb {{w[0-9]+}}, [sp, #4]
+; CHECK-DAG: ldrsh {{w[0-9]+}}, [sp, #2]
+; CHECK-DAG: ldrsb {{w[0-9]+}}, [sp]
+; FAST-LABEL: i8i16callee:
+; FAST-DAG: ldrsb {{w[0-9]+}}, [sp, #5]
+; FAST-DAG: ldrsb {{w[0-9]+}}, [sp, #4]
+; FAST-DAG: ldrsh {{w[0-9]+}}, [sp, #2]
+; FAST-DAG: ldrsb {{w[0-9]+}}, [sp]
+ %conv = sext i8 %a4 to i64
+ %conv3 = sext i16 %a5 to i64
+ %conv8 = sext i8 %b1 to i64
+ %conv9 = sext i16 %b2 to i64
+ %conv11 = sext i8 %b3 to i64
+ %conv13 = sext i8 %b4 to i64
+ %add10 = add i64 %a2, %a1
+ %add12 = add i64 %add10, %a3
+ %add14 = add i64 %add12, %conv
+ %add = add i64 %add14, %conv3
+ %add1 = add i64 %add, %a6
+ %add2 = add i64 %add1, %a7
+ %add4 = add i64 %add2, %a8
+ %add5 = add i64 %add4, %conv8
+ %add6 = add i64 %add5, %conv9
+ %add7 = add i64 %add6, %conv11
+ %add15 = add i64 %add7, %conv13
+ %sext = shl i64 %add15, 32
+ %conv17 = ashr exact i64 %sext, 32
+ ret i64 %conv17
+}
+
+define i32 @i8i16caller() nounwind readnone {
+entry:
+; CHECK: i8i16caller
+; The 8th, 9th, 10th and 11th arguments are passed at sp, sp+2, sp+4, sp+5.
+; They are i8, i16, i8 and i8.
+; CHECK-DAG: strb {{w[0-9]+}}, [sp, #5]
+; CHECK-DAG: strb {{w[0-9]+}}, [sp, #4]
+; CHECK-DAG: strh {{w[0-9]+}}, [sp, #2]
+; CHECK-DAG: strb {{w[0-9]+}}, [sp]
+; CHECK: bl
+; FAST: i8i16caller
+; FAST: strb {{w[0-9]+}}, [sp]
+; FAST: strh {{w[0-9]+}}, [sp, #2]
+; FAST: strb {{w[0-9]+}}, [sp, #4]
+; FAST: strb {{w[0-9]+}}, [sp, #5]
+; FAST: bl
+ %call = tail call i64 @i8i16callee(i64 0, i64 1, i64 2, i8 signext 3, i16 signext 4, i64 5, i64 6, i64 7, i8 signext 97, i16 signext 98, i8 signext 99, i8 signext 100)
+ %conv = trunc i64 %call to i32
+ ret i32 %conv
+}
+
+; rdar://12651543
+define double @circle_center([2 x float] %a) nounwind ssp {
+ %call = tail call double @ext([2 x float] %a) nounwind
+; CHECK: circle_center
+; CHECK: bl
+ ret double %call
+}
+declare double @ext([2 x float])
+
+; rdar://12656141
+; 16-byte vector should be aligned at 16-byte when passing on stack.
+; A double argument will be passed on stack, so vecotr should be at sp+16.
+define double @fixed_4i(<4 x i32>* nocapture %in) nounwind {
+entry:
+; CHECK: fixed_4i
+; CHECK: str [[REG_1:q[0-9]+]], [sp, #16]
+; FAST: fixed_4i
+; FAST: sub sp, sp, #64
+; FAST: mov x[[ADDR:[0-9]+]], sp
+; FAST: str [[REG_1:q[0-9]+]], [x[[ADDR]], #16]
+ %0 = load <4 x i32>* %in, align 16
+ %call = tail call double @args_vec_4i(double 3.000000e+00, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, double 3.000000e+00, <4 x i32> %0, i8 signext 3)
+ ret double %call
+}
+declare double @args_vec_4i(double, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, double, <4 x i32>, i8 signext)
+
+; rdar://12695237
+; d8 at sp, i in register w0.
+@g_d = common global double 0.000000e+00, align 8
+define void @test1(float %f1, double %d1, double %d2, double %d3, double %d4,
+ double %d5, double %d6, double %d7, double %d8, i32 %i) nounwind ssp {
+entry:
+; CHECK: test1
+; CHECK: ldr [[REG_1:d[0-9]+]], [sp]
+; CHECK: scvtf [[REG_2:s[0-9]+]], w0
+; CHECK: fadd s0, [[REG_2]], s0
+ %conv = sitofp i32 %i to float
+ %add = fadd float %conv, %f1
+ %conv1 = fpext float %add to double
+ %add2 = fadd double %conv1, %d7
+ %add3 = fadd double %add2, %d8
+ store double %add3, double* @g_d, align 8
+ ret void
+}
+
+; i9 at sp, d1 in register s0.
+define void @test2(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
+ i32 %i7, i32 %i8, i32 %i9, float %d1) nounwind ssp {
+entry:
+; CHECK: test2
+; CHECK: scvtf [[REG_2:s[0-9]+]], w0
+; CHECK: fadd s0, [[REG_2]], s0
+; CHECK: ldr [[REG_1:s[0-9]+]], [sp]
+ %conv = sitofp i32 %i1 to float
+ %add = fadd float %conv, %d1
+ %conv1 = fpext float %add to double
+ %conv2 = sitofp i32 %i8 to double
+ %add3 = fadd double %conv2, %conv1
+ %conv4 = sitofp i32 %i9 to double
+ %add5 = fadd double %conv4, %add3
+ store double %add5, double* @g_d, align 8
+ ret void
+}
+
+; rdar://12648441
+; Check alignment on stack for v64, f64, i64, f32, i32.
+define double @test3(<2 x i32>* nocapture %in) nounwind {
+entry:
+; CHECK: test3
+; CHECK: str [[REG_1:d[0-9]+]], [sp, #8]
+; FAST: test3
+; FAST: sub sp, sp, #32
+; FAST: mov x[[ADDR:[0-9]+]], sp
+; FAST: str [[REG_1:d[0-9]+]], [x[[ADDR]], #8]
+ %0 = load <2 x i32>* %in, align 8
+ %call = tail call double @args_vec_2i(double 3.000000e+00, <2 x i32> %0,
+ <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0,
+ <2 x i32> %0, float 3.000000e+00, <2 x i32> %0, i8 signext 3)
+ ret double %call
+}
+declare double @args_vec_2i(double, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>,
+ <2 x i32>, <2 x i32>, <2 x i32>, float, <2 x i32>, i8 signext)
+
+define double @test4(double* nocapture %in) nounwind {
+entry:
+; CHECK: test4
+; CHECK: str [[REG_1:d[0-9]+]], [sp, #8]
+; CHECK: str [[REG_2:w[0-9]+]], [sp]
+; CHECK: orr w0, wzr, #0x3
+ %0 = load double* %in, align 8
+ %call = tail call double @args_f64(double 3.000000e+00, double %0, double %0,
+ double %0, double %0, double %0, double %0, double %0,
+ float 3.000000e+00, double %0, i8 signext 3)
+ ret double %call
+}
+declare double @args_f64(double, double, double, double, double, double, double,
+ double, float, double, i8 signext)
+
+define i64 @test5(i64* nocapture %in) nounwind {
+entry:
+; CHECK: test5
+; CHECK: strb [[REG_3:w[0-9]+]], [sp, #16]
+; CHECK: str [[REG_1:x[0-9]+]], [sp, #8]
+; CHECK: str [[REG_2:w[0-9]+]], [sp]
+ %0 = load i64* %in, align 8
+ %call = tail call i64 @args_i64(i64 3, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0,
+ i64 %0, i64 %0, i32 3, i64 %0, i8 signext 3)
+ ret i64 %call
+}
+declare i64 @args_i64(i64, i64, i64, i64, i64, i64, i64, i64, i32, i64,
+ i8 signext)
+
+define i32 @test6(float* nocapture %in) nounwind {
+entry:
+; CHECK: test6
+; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8]
+; CHECK: str [[REG_1:s[0-9]+]], [sp, #4]
+; CHECK: strh [[REG_3:w[0-9]+]], [sp]
+ %0 = load float* %in, align 4
+ %call = tail call i32 @args_f32(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
+ i32 7, i32 8, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0,
+ float 6.0, float 7.0, float 8.0, i16 signext 3, float %0,
+ i8 signext 3)
+ ret i32 %call
+}
+declare i32 @args_f32(i32, i32, i32, i32, i32, i32, i32, i32,
+ float, float, float, float, float, float, float, float,
+ i16 signext, float, i8 signext)
+
+define i32 @test7(i32* nocapture %in) nounwind {
+entry:
+; CHECK: test7
+; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8]
+; CHECK: str [[REG_1:w[0-9]+]], [sp, #4]
+; CHECK: strh [[REG_3:w[0-9]+]], [sp]
+ %0 = load i32* %in, align 4
+ %call = tail call i32 @args_i32(i32 3, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0,
+ i32 %0, i32 %0, i16 signext 3, i32 %0, i8 signext 4)
+ ret i32 %call
+}
+declare i32 @args_i32(i32, i32, i32, i32, i32, i32, i32, i32, i16 signext, i32,
+ i8 signext)
+
+define i32 @test8(i32 %argc, i8** nocapture %argv) nounwind {
+entry:
+; CHECK: test8
+; CHECK: strb {{w[0-9]+}}, [sp, #3]
+; CHECK: strb wzr, [sp, #2]
+; CHECK: strb {{w[0-9]+}}, [sp, #1]
+; CHECK: strb wzr, [sp]
+; CHECK: bl
+; FAST: test8
+; FAST: strb {{w[0-9]+}}, [sp]
+; FAST: strb {{w[0-9]+}}, [sp, #1]
+; FAST: strb {{w[0-9]+}}, [sp, #2]
+; FAST: strb {{w[0-9]+}}, [sp, #3]
+; FAST: bl
+ tail call void @args_i1(i1 zeroext false, i1 zeroext true, i1 zeroext false,
+ i1 zeroext true, i1 zeroext false, i1 zeroext true,
+ i1 zeroext false, i1 zeroext true, i1 zeroext false,
+ i1 zeroext true, i1 zeroext false, i1 zeroext true)
+ ret i32 0
+}
+
+declare void @args_i1(i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext,
+ i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext,
+ i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext)
+
+define i32 @i1_stack_incoming(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
+ i64 %g, i64 %h, i64 %i, i1 zeroext %j) {
+; CHECK-LABEL: i1_stack_incoming:
+; CHECK: ldrb w0, [sp, #8]
+; CHECK: ret
+ %v = zext i1 %j to i32
+ ret i32 %v
+}
diff --git a/test/CodeGen/AArch64/arm64-abi_align.ll b/test/CodeGen/AArch64/arm64-abi_align.ll
new file mode 100644
index 000000000000..44c5a07ce39a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-abi_align.ll
@@ -0,0 +1,532 @@
+; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s
+; RUN: llc < %s -O0 | FileCheck -check-prefix=FAST %s
+target triple = "arm64-apple-darwin"
+
+; rdar://12648441
+; Generated from arm64-arguments.c with -O2.
+; Test passing structs with size < 8, < 16 and > 16
+; with alignment of 16 and without
+
+; Structs with size < 8
+%struct.s38 = type { i32, i16 }
+; With alignment of 16, the size will be padded to multiple of 16 bytes.
+%struct.s39 = type { i32, i16, [10 x i8] }
+; Structs with size < 16
+%struct.s40 = type { i32, i16, i32, i16 }
+%struct.s41 = type { i32, i16, i32, i16 }
+; Structs with size > 16
+%struct.s42 = type { i32, i16, i32, i16, i32, i16 }
+%struct.s43 = type { i32, i16, i32, i16, i32, i16, [10 x i8] }
+
+@g38 = common global %struct.s38 zeroinitializer, align 4
+@g38_2 = common global %struct.s38 zeroinitializer, align 4
+@g39 = common global %struct.s39 zeroinitializer, align 16
+@g39_2 = common global %struct.s39 zeroinitializer, align 16
+@g40 = common global %struct.s40 zeroinitializer, align 4
+@g40_2 = common global %struct.s40 zeroinitializer, align 4
+@g41 = common global %struct.s41 zeroinitializer, align 16
+@g41_2 = common global %struct.s41 zeroinitializer, align 16
+@g42 = common global %struct.s42 zeroinitializer, align 4
+@g42_2 = common global %struct.s42 zeroinitializer, align 4
+@g43 = common global %struct.s43 zeroinitializer, align 16
+@g43_2 = common global %struct.s43 zeroinitializer, align 16
+
+; structs with size < 8 bytes, passed via i64 in x1 and x2
+define i32 @f38(i32 %i, i64 %s1.coerce, i64 %s2.coerce) #0 {
+entry:
+; CHECK: f38
+; CHECK: add w[[A:[0-9]+]], w1, w0
+; CHECK: add {{w[0-9]+}}, w[[A]], w2
+ %s1.sroa.0.0.extract.trunc = trunc i64 %s1.coerce to i32
+ %s1.sroa.1.4.extract.shift = lshr i64 %s1.coerce, 32
+ %s2.sroa.0.0.extract.trunc = trunc i64 %s2.coerce to i32
+ %s2.sroa.1.4.extract.shift = lshr i64 %s2.coerce, 32
+ %sext8 = shl nuw nsw i64 %s1.sroa.1.4.extract.shift, 16
+ %sext = trunc i64 %sext8 to i32
+ %conv = ashr exact i32 %sext, 16
+ %sext1011 = shl nuw nsw i64 %s2.sroa.1.4.extract.shift, 16
+ %sext10 = trunc i64 %sext1011 to i32
+ %conv6 = ashr exact i32 %sext10, 16
+ %add = add i32 %s1.sroa.0.0.extract.trunc, %i
+ %add3 = add i32 %add, %s2.sroa.0.0.extract.trunc
+ %add4 = add i32 %add3, %conv
+ %add7 = add i32 %add4, %conv6
+ ret i32 %add7
+}
+
+define i32 @caller38() #1 {
+entry:
+; CHECK: caller38
+; CHECK: ldr x1,
+; CHECK: ldr x2,
+ %0 = load i64* bitcast (%struct.s38* @g38 to i64*), align 4
+ %1 = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
+ %call = tail call i32 @f38(i32 3, i64 %0, i64 %1) #5
+ ret i32 %call
+}
+
+declare i32 @f38_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
+ i32 %i7, i32 %i8, i32 %i9, i64 %s1.coerce, i64 %s2.coerce) #0
+
+; structs with size < 8 bytes, passed on stack at [sp+8] and [sp+16]
+; i9 at [sp]
+define i32 @caller38_stack() #1 {
+entry:
+; CHECK: caller38_stack
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8]
+; CHECK: movz w[[C:[0-9]+]], #0x9
+; CHECK: str w[[C]], [sp]
+ %0 = load i64* bitcast (%struct.s38* @g38 to i64*), align 4
+ %1 = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
+ %call = tail call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
+ i32 7, i32 8, i32 9, i64 %0, i64 %1) #5
+ ret i32 %call
+}
+
+; structs with size < 8 bytes, alignment of 16
+; passed via i128 in x1 and x3
+define i32 @f39(i32 %i, i128 %s1.coerce, i128 %s2.coerce) #0 {
+entry:
+; CHECK: f39
+; CHECK: add w[[A:[0-9]+]], w1, w0
+; CHECK: add {{w[0-9]+}}, w[[A]], w3
+ %s1.sroa.0.0.extract.trunc = trunc i128 %s1.coerce to i32
+ %s1.sroa.1.4.extract.shift = lshr i128 %s1.coerce, 32
+ %s2.sroa.0.0.extract.trunc = trunc i128 %s2.coerce to i32
+ %s2.sroa.1.4.extract.shift = lshr i128 %s2.coerce, 32
+ %sext8 = shl nuw nsw i128 %s1.sroa.1.4.extract.shift, 16
+ %sext = trunc i128 %sext8 to i32
+ %conv = ashr exact i32 %sext, 16
+ %sext1011 = shl nuw nsw i128 %s2.sroa.1.4.extract.shift, 16
+ %sext10 = trunc i128 %sext1011 to i32
+ %conv6 = ashr exact i32 %sext10, 16
+ %add = add i32 %s1.sroa.0.0.extract.trunc, %i
+ %add3 = add i32 %add, %s2.sroa.0.0.extract.trunc
+ %add4 = add i32 %add3, %conv
+ %add7 = add i32 %add4, %conv6
+ ret i32 %add7
+}
+
+define i32 @caller39() #1 {
+entry:
+; CHECK: caller39
+; CHECK: ldp x1, x2,
+; CHECK: ldp x3, x4,
+ %0 = load i128* bitcast (%struct.s39* @g39 to i128*), align 16
+ %1 = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
+ %call = tail call i32 @f39(i32 3, i128 %0, i128 %1) #5
+ ret i32 %call
+}
+
+declare i32 @f39_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
+ i32 %i7, i32 %i8, i32 %i9, i128 %s1.coerce, i128 %s2.coerce) #0
+
+; structs with size < 8 bytes, alignment 16
+; passed on stack at [sp+16] and [sp+32]
+define i32 @caller39_stack() #1 {
+entry:
+; CHECK: caller39_stack
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #32]
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
+; CHECK: movz w[[C:[0-9]+]], #0x9
+; CHECK: str w[[C]], [sp]
+ %0 = load i128* bitcast (%struct.s39* @g39 to i128*), align 16
+ %1 = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
+ %call = tail call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
+ i32 7, i32 8, i32 9, i128 %0, i128 %1) #5
+ ret i32 %call
+}
+
+; structs with size < 16 bytes
+; passed via i128 in x1 and x3
+define i32 @f40(i32 %i, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce) #0 {
+entry:
+; CHECK: f40
+; CHECK: add w[[A:[0-9]+]], w1, w0
+; CHECK: add {{w[0-9]+}}, w[[A]], w3
+ %s1.coerce.fca.0.extract = extractvalue [2 x i64] %s1.coerce, 0
+ %s2.coerce.fca.0.extract = extractvalue [2 x i64] %s2.coerce, 0
+ %s1.sroa.0.0.extract.trunc = trunc i64 %s1.coerce.fca.0.extract to i32
+ %s2.sroa.0.0.extract.trunc = trunc i64 %s2.coerce.fca.0.extract to i32
+ %s1.sroa.0.4.extract.shift = lshr i64 %s1.coerce.fca.0.extract, 32
+ %sext8 = shl nuw nsw i64 %s1.sroa.0.4.extract.shift, 16
+ %sext = trunc i64 %sext8 to i32
+ %conv = ashr exact i32 %sext, 16
+ %s2.sroa.0.4.extract.shift = lshr i64 %s2.coerce.fca.0.extract, 32
+ %sext1011 = shl nuw nsw i64 %s2.sroa.0.4.extract.shift, 16
+ %sext10 = trunc i64 %sext1011 to i32
+ %conv6 = ashr exact i32 %sext10, 16
+ %add = add i32 %s1.sroa.0.0.extract.trunc, %i
+ %add3 = add i32 %add, %s2.sroa.0.0.extract.trunc
+ %add4 = add i32 %add3, %conv
+ %add7 = add i32 %add4, %conv6
+ ret i32 %add7
+}
+
+define i32 @caller40() #1 {
+entry:
+; CHECK: caller40
+; CHECK: ldp x1, x2,
+; CHECK: ldp x3, x4,
+ %0 = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
+ %1 = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
+ %call = tail call i32 @f40(i32 3, [2 x i64] %0, [2 x i64] %1) #5
+ ret i32 %call
+}
+
+declare i32 @f40_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
+ i32 %i7, i32 %i8, i32 %i9, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce) #0
+
+; structs with size < 16 bytes
+; passed on stack at [sp+8] and [sp+24]
+define i32 @caller40_stack() #1 {
+entry:
+; CHECK: caller40_stack
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #24]
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8]
+; CHECK: movz w[[C:[0-9]+]], #0x9
+; CHECK: str w[[C]], [sp]
+ %0 = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
+ %1 = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
+ %call = tail call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
+ i32 7, i32 8, i32 9, [2 x i64] %0, [2 x i64] %1) #5
+ ret i32 %call
+}
+
+; structs with size < 16 bytes, alignment of 16
+; passed via i128 in x1 and x3
+define i32 @f41(i32 %i, i128 %s1.coerce, i128 %s2.coerce) #0 {
+entry:
+; CHECK: f41
+; CHECK: add w[[A:[0-9]+]], w1, w0
+; CHECK: add {{w[0-9]+}}, w[[A]], w3
+ %s1.sroa.0.0.extract.trunc = trunc i128 %s1.coerce to i32
+ %s1.sroa.1.4.extract.shift = lshr i128 %s1.coerce, 32
+ %s2.sroa.0.0.extract.trunc = trunc i128 %s2.coerce to i32
+ %s2.sroa.1.4.extract.shift = lshr i128 %s2.coerce, 32
+ %sext8 = shl nuw nsw i128 %s1.sroa.1.4.extract.shift, 16
+ %sext = trunc i128 %sext8 to i32
+ %conv = ashr exact i32 %sext, 16
+ %sext1011 = shl nuw nsw i128 %s2.sroa.1.4.extract.shift, 16
+ %sext10 = trunc i128 %sext1011 to i32
+ %conv6 = ashr exact i32 %sext10, 16
+ %add = add i32 %s1.sroa.0.0.extract.trunc, %i
+ %add3 = add i32 %add, %s2.sroa.0.0.extract.trunc
+ %add4 = add i32 %add3, %conv
+ %add7 = add i32 %add4, %conv6
+ ret i32 %add7
+}
+
+define i32 @caller41() #1 {
+entry:
+; CHECK: caller41
+; CHECK: ldp x1, x2,
+; CHECK: ldp x3, x4,
+ %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16
+ %1 = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
+ %call = tail call i32 @f41(i32 3, i128 %0, i128 %1) #5
+ ret i32 %call
+}
+
+declare i32 @f41_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
+ i32 %i7, i32 %i8, i32 %i9, i128 %s1.coerce, i128 %s2.coerce) #0
+
+; structs with size < 16 bytes, alignment of 16
+; passed on stack at [sp+16] and [sp+32]
+define i32 @caller41_stack() #1 {
+entry:
+; CHECK: caller41_stack
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #32]
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
+; CHECK: movz w[[C:[0-9]+]], #0x9
+; CHECK: str w[[C]], [sp]
+ %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16
+ %1 = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
+ %call = tail call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
+ i32 7, i32 8, i32 9, i128 %0, i128 %1) #5
+ ret i32 %call
+}
+
+; structs with size of 22 bytes, passed indirectly in x1 and x2
+define i32 @f42(i32 %i, %struct.s42* nocapture %s1, %struct.s42* nocapture %s2) #2 {
+entry:
+; CHECK: f42
+; CHECK: ldr w[[A:[0-9]+]], [x1]
+; CHECK: ldr w[[B:[0-9]+]], [x2]
+; CHECK: add w[[C:[0-9]+]], w[[A]], w0
+; CHECK: add {{w[0-9]+}}, w[[C]], w[[B]]
+; FAST: f42
+; FAST: ldr w[[A:[0-9]+]], [x1]
+; FAST: ldr w[[B:[0-9]+]], [x2]
+; FAST: add w[[C:[0-9]+]], w[[A]], w0
+; FAST: add {{w[0-9]+}}, w[[C]], w[[B]]
+ %i1 = getelementptr inbounds %struct.s42* %s1, i64 0, i32 0
+ %0 = load i32* %i1, align 4, !tbaa !0
+ %i2 = getelementptr inbounds %struct.s42* %s2, i64 0, i32 0
+ %1 = load i32* %i2, align 4, !tbaa !0
+ %s = getelementptr inbounds %struct.s42* %s1, i64 0, i32 1
+ %2 = load i16* %s, align 2, !tbaa !3
+ %conv = sext i16 %2 to i32
+ %s5 = getelementptr inbounds %struct.s42* %s2, i64 0, i32 1
+ %3 = load i16* %s5, align 2, !tbaa !3
+ %conv6 = sext i16 %3 to i32
+ %add = add i32 %0, %i
+ %add3 = add i32 %add, %1
+ %add4 = add i32 %add3, %conv
+ %add7 = add i32 %add4, %conv6
+ ret i32 %add7
+}
+
+; For s1, we allocate a 22-byte space, pass its address via x1
+define i32 @caller42() #3 {
+entry:
+; CHECK: caller42
+; CHECK: str {{x[0-9]+}}, [sp, #48]
+; CHECK: str {{q[0-9]+}}, [sp, #32]
+; CHECK: str {{x[0-9]+}}, [sp, #16]
+; CHECK: str {{q[0-9]+}}, [sp]
+; CHECK: add x1, sp, #32
+; CHECK: mov x2, sp
+; Space for s1 is allocated at sp+32
+; Space for s2 is allocated at sp
+
+; FAST: caller42
+; FAST: sub sp, sp, #96
+; Space for s1 is allocated at fp-24 = sp+72
+; Space for s2 is allocated at sp+48
+; FAST: sub x[[A:[0-9]+]], x29, #24
+; FAST: add x[[A:[0-9]+]], sp, #48
+; Call memcpy with size = 24 (0x18)
+; FAST: orr {{x[0-9]+}}, xzr, #0x18
+ %tmp = alloca %struct.s42, align 4
+ %tmp1 = alloca %struct.s42, align 4
+ %0 = bitcast %struct.s42* %tmp to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s42* @g42 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4
+ %1 = bitcast %struct.s42* %tmp1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s42* @g42_2 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4
+ %call = call i32 @f42(i32 3, %struct.s42* %tmp, %struct.s42* %tmp1) #5
+ ret i32 %call
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #4
+
+declare i32 @f42_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
+ i32 %i7, i32 %i8, i32 %i9, %struct.s42* nocapture %s1,
+ %struct.s42* nocapture %s2) #2
+
+define i32 @caller42_stack() #3 {
+entry:
+; CHECK: caller42_stack
+; CHECK: mov x29, sp
+; CHECK: sub sp, sp, #96
+; CHECK: stur {{x[0-9]+}}, [x29, #-16]
+; CHECK: stur {{q[0-9]+}}, [x29, #-32]
+; CHECK: str {{x[0-9]+}}, [sp, #48]
+; CHECK: str {{q[0-9]+}}, [sp, #32]
+; Space for s1 is allocated at x29-32 = sp+64
+; Space for s2 is allocated at sp+32
+; CHECK: add x[[B:[0-9]+]], sp, #32
+; CHECK: str x[[B]], [sp, #16]
+; CHECK: sub x[[A:[0-9]+]], x29, #32
+; Address of s1 is passed on stack at sp+8
+; CHECK: str x[[A]], [sp, #8]
+; CHECK: movz w[[C:[0-9]+]], #0x9
+; CHECK: str w[[C]], [sp]
+
+; FAST: caller42_stack
+; Space for s1 is allocated at fp-24
+; Space for s2 is allocated at fp-48
+; FAST: sub x[[A:[0-9]+]], x29, #24
+; FAST: sub x[[B:[0-9]+]], x29, #48
+; Call memcpy with size = 24 (0x18)
+; FAST: orr {{x[0-9]+}}, xzr, #0x18
+; FAST: str {{w[0-9]+}}, [sp]
+; Address of s1 is passed on stack at sp+8
+; FAST: str {{x[0-9]+}}, [sp, #8]
+; FAST: str {{x[0-9]+}}, [sp, #16]
+ %tmp = alloca %struct.s42, align 4
+ %tmp1 = alloca %struct.s42, align 4
+ %0 = bitcast %struct.s42* %tmp to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s42* @g42 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4
+ %1 = bitcast %struct.s42* %tmp1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s42* @g42_2 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4
+ %call = call i32 @f42_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, %struct.s42* %tmp, %struct.s42* %tmp1) #5
+ ret i32 %call
+}
+
+; structs with size of 22 bytes, alignment of 16
+; passed indirectly in x1 and x2
+define i32 @f43(i32 %i, %struct.s43* nocapture %s1, %struct.s43* nocapture %s2) #2 {
+entry:
+; CHECK: f43
+; CHECK: ldr w[[A:[0-9]+]], [x1]
+; CHECK: ldr w[[B:[0-9]+]], [x2]
+; CHECK: add w[[C:[0-9]+]], w[[A]], w0
+; CHECK: add {{w[0-9]+}}, w[[C]], w[[B]]
+; FAST: f43
+; FAST: ldr w[[A:[0-9]+]], [x1]
+; FAST: ldr w[[B:[0-9]+]], [x2]
+; FAST: add w[[C:[0-9]+]], w[[A]], w0
+; FAST: add {{w[0-9]+}}, w[[C]], w[[B]]
+ %i1 = getelementptr inbounds %struct.s43* %s1, i64 0, i32 0
+ %0 = load i32* %i1, align 4, !tbaa !0
+ %i2 = getelementptr inbounds %struct.s43* %s2, i64 0, i32 0
+ %1 = load i32* %i2, align 4, !tbaa !0
+ %s = getelementptr inbounds %struct.s43* %s1, i64 0, i32 1
+ %2 = load i16* %s, align 2, !tbaa !3
+ %conv = sext i16 %2 to i32
+ %s5 = getelementptr inbounds %struct.s43* %s2, i64 0, i32 1
+ %3 = load i16* %s5, align 2, !tbaa !3
+ %conv6 = sext i16 %3 to i32
+ %add = add i32 %0, %i
+ %add3 = add i32 %add, %1
+ %add4 = add i32 %add3, %conv
+ %add7 = add i32 %add4, %conv6
+ ret i32 %add7
+}
+
+define i32 @caller43() #3 {
+entry:
+; CHECK: caller43
+; CHECK: str {{q[0-9]+}}, [sp, #48]
+; CHECK: str {{q[0-9]+}}, [sp, #32]
+; CHECK: str {{q[0-9]+}}, [sp, #16]
+; CHECK: str {{q[0-9]+}}, [sp]
+; CHECK: add x1, sp, #32
+; CHECK: mov x2, sp
+; Space for s1 is allocated at sp+32
+; Space for s2 is allocated at sp
+
+; FAST: caller43
+; FAST: mov x29, sp
+; Space for s1 is allocated at sp+32
+; Space for s2 is allocated at sp
+; FAST: add x1, sp, #32
+; FAST: mov x2, sp
+; FAST: str {{x[0-9]+}}, [sp, #32]
+; FAST: str {{x[0-9]+}}, [sp, #40]
+; FAST: str {{x[0-9]+}}, [sp, #48]
+; FAST: str {{x[0-9]+}}, [sp, #56]
+; FAST: str {{x[0-9]+}}, [sp]
+; FAST: str {{x[0-9]+}}, [sp, #8]
+; FAST: str {{x[0-9]+}}, [sp, #16]
+; FAST: str {{x[0-9]+}}, [sp, #24]
+ %tmp = alloca %struct.s43, align 16
+ %tmp1 = alloca %struct.s43, align 16
+ %0 = bitcast %struct.s43* %tmp to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s43* @g43 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4
+ %1 = bitcast %struct.s43* %tmp1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s43* @g43_2 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4
+ %call = call i32 @f43(i32 3, %struct.s43* %tmp, %struct.s43* %tmp1) #5
+ ret i32 %call
+}
+
+declare i32 @f43_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
+ i32 %i7, i32 %i8, i32 %i9, %struct.s43* nocapture %s1,
+ %struct.s43* nocapture %s2) #2
+
+define i32 @caller43_stack() #3 {
+entry:
+; CHECK: caller43_stack
+; CHECK: mov x29, sp
+; CHECK: sub sp, sp, #96
+; CHECK: stur {{q[0-9]+}}, [x29, #-16]
+; CHECK: stur {{q[0-9]+}}, [x29, #-32]
+; CHECK: str {{q[0-9]+}}, [sp, #48]
+; CHECK: str {{q[0-9]+}}, [sp, #32]
+; Space for s1 is allocated at x29-32 = sp+64
+; Space for s2 is allocated at sp+32
+; CHECK: add x[[B:[0-9]+]], sp, #32
+; CHECK: str x[[B]], [sp, #16]
+; CHECK: sub x[[A:[0-9]+]], x29, #32
+; Address of s1 is passed on stack at sp+8
+; CHECK: str x[[A]], [sp, #8]
+; CHECK: movz w[[C:[0-9]+]], #0x9
+; CHECK: str w[[C]], [sp]
+
+; FAST: caller43_stack
+; FAST: sub sp, sp, #96
+; Space for s1 is allocated at fp-32 = sp+64
+; Space for s2 is allocated at sp+32
+; FAST: sub x[[A:[0-9]+]], x29, #32
+; FAST: add x[[B:[0-9]+]], sp, #32
+; FAST: stur {{x[0-9]+}}, [x29, #-32]
+; FAST: stur {{x[0-9]+}}, [x29, #-24]
+; FAST: stur {{x[0-9]+}}, [x29, #-16]
+; FAST: stur {{x[0-9]+}}, [x29, #-8]
+; FAST: str {{x[0-9]+}}, [sp, #32]
+; FAST: str {{x[0-9]+}}, [sp, #40]
+; FAST: str {{x[0-9]+}}, [sp, #48]
+; FAST: str {{x[0-9]+}}, [sp, #56]
+; FAST: str {{w[0-9]+}}, [sp]
+; Address of s1 is passed on stack at sp+8
+; FAST: str {{x[0-9]+}}, [sp, #8]
+; FAST: str {{x[0-9]+}}, [sp, #16]
+ %tmp = alloca %struct.s43, align 16
+ %tmp1 = alloca %struct.s43, align 16
+ %0 = bitcast %struct.s43* %tmp to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s43* @g43 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4
+ %1 = bitcast %struct.s43* %tmp1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s43* @g43_2 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4
+ %call = call i32 @f43_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, %struct.s43* %tmp, %struct.s43* %tmp1) #5
+ ret i32 %call
+}
+
+; rdar://13668927
+; Check that we don't split an i128.
+declare i32 @callee_i128_split(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5,
+ i32 %i6, i32 %i7, i128 %s1, i32 %i8)
+
+define i32 @i128_split() {
+entry:
+; CHECK: i128_split
+; "i128 %0" should be on stack at [sp].
+; "i32 8" should be on stack at [sp, #16].
+; CHECK: str {{w[0-9]+}}, [sp, #16]
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp]
+; FAST: i128_split
+; FAST: sub sp, sp, #48
+; FAST: mov x[[ADDR:[0-9]+]], sp
+; FAST: str {{w[0-9]+}}, [x[[ADDR]], #16]
+; Load/Store opt is disabled with -O0, so the i128 is split.
+; FAST: str {{x[0-9]+}}, [x[[ADDR]], #8]
+; FAST: str {{x[0-9]+}}, [x[[ADDR]]]
+ %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16
+ %call = tail call i32 @callee_i128_split(i32 1, i32 2, i32 3, i32 4, i32 5,
+ i32 6, i32 7, i128 %0, i32 8) #5
+ ret i32 %call
+}
+
+declare i32 @callee_i64(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5,
+ i32 %i6, i32 %i7, i64 %s1, i32 %i8)
+
+define i32 @i64_split() {
+entry:
+; CHECK: i64_split
+; "i64 %0" should be in register x7.
+; "i32 8" should be on stack at [sp].
+; CHECK: ldr x7, [{{x[0-9]+}}]
+; CHECK: str {{w[0-9]+}}, [sp]
+; FAST: i64_split
+; FAST: ldr x7, [{{x[0-9]+}}]
+; FAST: str {{w[0-9]+}}, [sp]
+ %0 = load i64* bitcast (%struct.s41* @g41 to i64*), align 16
+ %call = tail call i32 @callee_i64(i32 1, i32 2, i32 3, i32 4, i32 5,
+ i32 6, i32 7, i64 %0, i32 8) #5
+ ret i32 %call
+}
+
+attributes #0 = { noinline nounwind readnone "fp-contract-model"="standard" "relocation-model"="pic" "ssp-buffers-size"="8" }
+attributes #1 = { nounwind readonly "fp-contract-model"="standard" "relocation-model"="pic" "ssp-buffers-size"="8" }
+attributes #2 = { noinline nounwind readonly "fp-contract-model"="standard" "relocation-model"="pic" "ssp-buffers-size"="8" }
+attributes #3 = { nounwind "fp-contract-model"="standard" "relocation-model"="pic" "ssp-buffers-size"="8" }
+attributes #4 = { nounwind }
+attributes #5 = { nobuiltin }
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"short", metadata !1}
+!4 = metadata !{i64 0, i64 4, metadata !0, i64 4, i64 2, metadata !3, i64 8, i64 4, metadata !0, i64 12, i64 2, metadata !3, i64 16, i64 4, metadata !0, i64 20, i64 2, metadata !3}
diff --git a/test/CodeGen/AArch64/arm64-addp.ll b/test/CodeGen/AArch64/arm64-addp.ll
new file mode 100644
index 000000000000..3f1e5c5d44e3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-addp.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s
+
+define double @foo(<2 x double> %a) nounwind {
+; CHECK-LABEL: foo:
+; CHECK: faddp.2d d0, v0
+; CHECK-NEXT: ret
+ %lane0.i = extractelement <2 x double> %a, i32 0
+ %lane1.i = extractelement <2 x double> %a, i32 1
+ %vpaddd.i = fadd double %lane0.i, %lane1.i
+ ret double %vpaddd.i
+}
+
+define i64 @foo0(<2 x i64> %a) nounwind {
+; CHECK-LABEL: foo0:
+; CHECK: addp.2d d0, v0
+; CHECK-NEXT: fmov x0, d0
+; CHECK-NEXT: ret
+ %lane0.i = extractelement <2 x i64> %a, i32 0
+ %lane1.i = extractelement <2 x i64> %a, i32 1
+ %vpaddd.i = add i64 %lane0.i, %lane1.i
+ ret i64 %vpaddd.i
+}
+
+define float @foo1(<2 x float> %a) nounwind {
+; CHECK-LABEL: foo1:
+; CHECK: faddp.2s
+; CHECK-NEXT: ret
+ %lane0.i = extractelement <2 x float> %a, i32 0
+ %lane1.i = extractelement <2 x float> %a, i32 1
+ %vpaddd.i = fadd float %lane0.i, %lane1.i
+ ret float %vpaddd.i
+}
diff --git a/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
new file mode 100644
index 000000000000..08fb8c90c484
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -0,0 +1,171 @@
+; RUN: llc -O3 -mtriple arm64-apple-ios3 %s -o - | FileCheck %s
+; <rdar://problem/13621857>
+
+@block = common global i8* null, align 8
+
+define i32 @fct(i32 %i1, i32 %i2) {
+; CHECK: @fct
+; Sign extension is used more than once, thus it should not be folded.
+; CodeGenPrepare is not sharing sext across uses, thus this is folded because
+; of that.
+; _CHECK-NOT_: , sxtw]
+entry:
+ %idxprom = sext i32 %i1 to i64
+ %0 = load i8** @block, align 8
+ %arrayidx = getelementptr inbounds i8* %0, i64 %idxprom
+ %1 = load i8* %arrayidx, align 1
+ %idxprom1 = sext i32 %i2 to i64
+ %arrayidx2 = getelementptr inbounds i8* %0, i64 %idxprom1
+ %2 = load i8* %arrayidx2, align 1
+ %cmp = icmp eq i8 %1, %2
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %cmp7 = icmp ugt i8 %1, %2
+ %conv8 = zext i1 %cmp7 to i32
+ br label %return
+
+if.end: ; preds = %entry
+ %inc = add nsw i32 %i1, 1
+ %inc9 = add nsw i32 %i2, 1
+ %idxprom10 = sext i32 %inc to i64
+ %arrayidx11 = getelementptr inbounds i8* %0, i64 %idxprom10
+ %3 = load i8* %arrayidx11, align 1
+ %idxprom12 = sext i32 %inc9 to i64
+ %arrayidx13 = getelementptr inbounds i8* %0, i64 %idxprom12
+ %4 = load i8* %arrayidx13, align 1
+ %cmp16 = icmp eq i8 %3, %4
+ br i1 %cmp16, label %if.end23, label %if.then18
+
+if.then18: ; preds = %if.end
+ %cmp21 = icmp ugt i8 %3, %4
+ %conv22 = zext i1 %cmp21 to i32
+ br label %return
+
+if.end23: ; preds = %if.end
+ %inc24 = add nsw i32 %i1, 2
+ %inc25 = add nsw i32 %i2, 2
+ %idxprom26 = sext i32 %inc24 to i64
+ %arrayidx27 = getelementptr inbounds i8* %0, i64 %idxprom26
+ %5 = load i8* %arrayidx27, align 1
+ %idxprom28 = sext i32 %inc25 to i64
+ %arrayidx29 = getelementptr inbounds i8* %0, i64 %idxprom28
+ %6 = load i8* %arrayidx29, align 1
+ %cmp32 = icmp eq i8 %5, %6
+ br i1 %cmp32, label %return, label %if.then34
+
+if.then34: ; preds = %if.end23
+ %cmp37 = icmp ugt i8 %5, %6
+ %conv38 = zext i1 %cmp37 to i32
+ br label %return
+
+return: ; preds = %if.end23, %if.then34, %if.then18, %if.then
+ %retval.0 = phi i32 [ %conv8, %if.then ], [ %conv22, %if.then18 ], [ %conv38, %if.then34 ], [ 1, %if.end23 ]
+ ret i32 %retval.0
+}
+
+define i32 @fct1(i32 %i1, i32 %i2) optsize {
+; CHECK: @fct1
+; Addressing are folded when optimizing for code size.
+; CHECK: , sxtw]
+; CHECK: , sxtw]
+entry:
+ %idxprom = sext i32 %i1 to i64
+ %0 = load i8** @block, align 8
+ %arrayidx = getelementptr inbounds i8* %0, i64 %idxprom
+ %1 = load i8* %arrayidx, align 1
+ %idxprom1 = sext i32 %i2 to i64
+ %arrayidx2 = getelementptr inbounds i8* %0, i64 %idxprom1
+ %2 = load i8* %arrayidx2, align 1
+ %cmp = icmp eq i8 %1, %2
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %cmp7 = icmp ugt i8 %1, %2
+ %conv8 = zext i1 %cmp7 to i32
+ br label %return
+
+if.end: ; preds = %entry
+ %inc = add nsw i32 %i1, 1
+ %inc9 = add nsw i32 %i2, 1
+ %idxprom10 = sext i32 %inc to i64
+ %arrayidx11 = getelementptr inbounds i8* %0, i64 %idxprom10
+ %3 = load i8* %arrayidx11, align 1
+ %idxprom12 = sext i32 %inc9 to i64
+ %arrayidx13 = getelementptr inbounds i8* %0, i64 %idxprom12
+ %4 = load i8* %arrayidx13, align 1
+ %cmp16 = icmp eq i8 %3, %4
+ br i1 %cmp16, label %if.end23, label %if.then18
+
+if.then18: ; preds = %if.end
+ %cmp21 = icmp ugt i8 %3, %4
+ %conv22 = zext i1 %cmp21 to i32
+ br label %return
+
+if.end23: ; preds = %if.end
+ %inc24 = add nsw i32 %i1, 2
+ %inc25 = add nsw i32 %i2, 2
+ %idxprom26 = sext i32 %inc24 to i64
+ %arrayidx27 = getelementptr inbounds i8* %0, i64 %idxprom26
+ %5 = load i8* %arrayidx27, align 1
+ %idxprom28 = sext i32 %inc25 to i64
+ %arrayidx29 = getelementptr inbounds i8* %0, i64 %idxprom28
+ %6 = load i8* %arrayidx29, align 1
+ %cmp32 = icmp eq i8 %5, %6
+ br i1 %cmp32, label %return, label %if.then34
+
+if.then34: ; preds = %if.end23
+ %cmp37 = icmp ugt i8 %5, %6
+ %conv38 = zext i1 %cmp37 to i32
+ br label %return
+
+return: ; preds = %if.end23, %if.then34, %if.then18, %if.then
+ %retval.0 = phi i32 [ %conv8, %if.then ], [ %conv22, %if.then18 ], [ %conv38, %if.then34 ], [ 1, %if.end23 ]
+ ret i32 %retval.0
+}
+
+; CHECK: @test
+; CHECK-NOT: , uxtw #2]
+define i32 @test(i32* %array, i8 zeroext %c, i32 %arg) {
+entry:
+ %conv = zext i8 %c to i32
+ %add = sub i32 0, %arg
+ %tobool = icmp eq i32 %conv, %add
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %idxprom = zext i8 %c to i64
+ %arrayidx = getelementptr inbounds i32* %array, i64 %idxprom
+ %0 = load volatile i32* %arrayidx, align 4
+ %1 = load volatile i32* %arrayidx, align 4
+ %add3 = add nsw i32 %1, %0
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ %res.0 = phi i32 [ %add3, %if.then ], [ 0, %entry ]
+ ret i32 %res.0
+}
+
+
+; CHECK: @test2
+; CHECK: , uxtw #2]
+; CHECK: , uxtw #2]
+define i32 @test2(i32* %array, i8 zeroext %c, i32 %arg) optsize {
+entry:
+ %conv = zext i8 %c to i32
+ %add = sub i32 0, %arg
+ %tobool = icmp eq i32 %conv, %add
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %idxprom = zext i8 %c to i64
+ %arrayidx = getelementptr inbounds i32* %array, i64 %idxprom
+ %0 = load volatile i32* %arrayidx, align 4
+ %1 = load volatile i32* %arrayidx, align 4
+ %add3 = add nsw i32 %1, %0
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ %res.0 = phi i32 [ %add3, %if.then ], [ 0, %entry ]
+ ret i32 %res.0
+}
diff --git a/test/CodeGen/AArch64/arm64-addr-type-promotion.ll b/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
new file mode 100644
index 000000000000..1a3ca8bd5b8c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
@@ -0,0 +1,82 @@
+; RUN: llc -march arm64 < %s | FileCheck %s
+; rdar://13452552
+; ModuleID = 'reduced_test.ll'
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+target triple = "arm64-apple-ios3.0.0"
+
+@block = common global i8* null, align 8
+
+define zeroext i8 @fullGtU(i32 %i1, i32 %i2) {
+; CHECK: fullGtU
+; CHECK: adrp [[PAGE:x[0-9]+]], _block@GOTPAGE
+; CHECK: ldr [[ADDR:x[0-9]+]], {{\[}}[[PAGE]], _block@GOTPAGEOFF]
+; CHECK-NEXT: ldr [[BLOCKBASE:x[0-9]+]], {{\[}}[[ADDR]]]
+; CHECK-NEXT: ldrb [[BLOCKVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE]], w0, sxtw]
+; CHECK-NEXT: ldrb [[BLOCKVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE]], w1, sxtw]
+; CHECK-NEXT cmp [[BLOCKVAL1]], [[BLOCKVAL2]]
+; CHECK-NEXT b.ne
+; Next BB
+; CHECK: add [[BLOCKBASE2:x[0-9]+]], [[BLOCKBASE]], w1, sxtw
+; CHECK-NEXT: add [[BLOCKBASE1:x[0-9]+]], [[BLOCKBASE]], w0, sxtw
+; CHECK-NEXT: ldrb [[LOADEDVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE1]], #1]
+; CHECK-NEXT: ldrb [[LOADEDVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE2]], #1]
+; CHECK-NEXT: cmp [[LOADEDVAL1]], [[LOADEDVAL2]]
+; CHECK-NEXT: b.ne
+; Next BB
+; CHECK: ldrb [[LOADEDVAL3:w[0-9]+]], {{\[}}[[BLOCKBASE1]], #2]
+; CHECK-NEXT: ldrb [[LOADEDVAL4:w[0-9]+]], {{\[}}[[BLOCKBASE2]], #2]
+; CHECK-NEXT: cmp [[LOADEDVAL3]], [[LOADEDVAL4]]
+entry:
+ %idxprom = sext i32 %i1 to i64
+ %tmp = load i8** @block, align 8
+ %arrayidx = getelementptr inbounds i8* %tmp, i64 %idxprom
+ %tmp1 = load i8* %arrayidx, align 1
+ %idxprom1 = sext i32 %i2 to i64
+ %arrayidx2 = getelementptr inbounds i8* %tmp, i64 %idxprom1
+ %tmp2 = load i8* %arrayidx2, align 1
+ %cmp = icmp eq i8 %tmp1, %tmp2
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %cmp7 = icmp ugt i8 %tmp1, %tmp2
+ %conv9 = zext i1 %cmp7 to i8
+ br label %return
+
+if.end: ; preds = %entry
+ %inc = add nsw i32 %i1, 1
+ %inc10 = add nsw i32 %i2, 1
+ %idxprom11 = sext i32 %inc to i64
+ %arrayidx12 = getelementptr inbounds i8* %tmp, i64 %idxprom11
+ %tmp3 = load i8* %arrayidx12, align 1
+ %idxprom13 = sext i32 %inc10 to i64
+ %arrayidx14 = getelementptr inbounds i8* %tmp, i64 %idxprom13
+ %tmp4 = load i8* %arrayidx14, align 1
+ %cmp17 = icmp eq i8 %tmp3, %tmp4
+ br i1 %cmp17, label %if.end25, label %if.then19
+
+if.then19: ; preds = %if.end
+ %cmp22 = icmp ugt i8 %tmp3, %tmp4
+ %conv24 = zext i1 %cmp22 to i8
+ br label %return
+
+if.end25: ; preds = %if.end
+ %inc26 = add nsw i32 %i1, 2
+ %inc27 = add nsw i32 %i2, 2
+ %idxprom28 = sext i32 %inc26 to i64
+ %arrayidx29 = getelementptr inbounds i8* %tmp, i64 %idxprom28
+ %tmp5 = load i8* %arrayidx29, align 1
+ %idxprom30 = sext i32 %inc27 to i64
+ %arrayidx31 = getelementptr inbounds i8* %tmp, i64 %idxprom30
+ %tmp6 = load i8* %arrayidx31, align 1
+ %cmp34 = icmp eq i8 %tmp5, %tmp6
+ br i1 %cmp34, label %return, label %if.then36
+
+if.then36: ; preds = %if.end25
+ %cmp39 = icmp ugt i8 %tmp5, %tmp6
+ %conv41 = zext i1 %cmp39 to i8
+ br label %return
+
+return: ; preds = %if.then36, %if.end25, %if.then19, %if.then
+ %retval.0 = phi i8 [ %conv9, %if.then ], [ %conv24, %if.then19 ], [ %conv41, %if.then36 ], [ 0, %if.end25 ]
+ ret i8 %retval.0
+}
diff --git a/test/CodeGen/AArch64/arm64-addrmode.ll b/test/CodeGen/AArch64/arm64-addrmode.ll
new file mode 100644
index 000000000000..700fba80149a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-addrmode.ll
@@ -0,0 +1,72 @@
+; RUN: llc -march=arm64 < %s | FileCheck %s
+; rdar://10232252
+
+@object = external hidden global i64, section "__DATA, __objc_ivar", align 8
+
+; base + offset (imm9)
+; CHECK: @t1
+; CHECK: ldr xzr, [x{{[0-9]+}}, #8]
+; CHECK: ret
+define void @t1() {
+ %incdec.ptr = getelementptr inbounds i64* @object, i64 1
+ %tmp = load volatile i64* %incdec.ptr, align 8
+ ret void
+}
+
+; base + offset (> imm9)
+; CHECK: @t2
+; CHECK: sub [[ADDREG:x[0-9]+]], x{{[0-9]+}}, #264
+; CHECK: ldr xzr, [
+; CHECK: [[ADDREG]]]
+; CHECK: ret
+define void @t2() {
+ %incdec.ptr = getelementptr inbounds i64* @object, i64 -33
+ %tmp = load volatile i64* %incdec.ptr, align 8
+ ret void
+}
+
+; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes)
+; CHECK: @t3
+; CHECK: ldr xzr, [x{{[0-9]+}}, #32760]
+; CHECK: ret
+define void @t3() {
+ %incdec.ptr = getelementptr inbounds i64* @object, i64 4095
+ %tmp = load volatile i64* %incdec.ptr, align 8
+ ret void
+}
+
+; base + unsigned offset (> imm12 * size of type in bytes)
+; CHECK: @t4
+; CHECK: add [[ADDREG:x[0-9]+]], x{{[0-9]+}}, #8, lsl #12
+; CHECK: ldr xzr, [
+; CHECK: [[ADDREG]]]
+; CHECK: ret
+define void @t4() {
+ %incdec.ptr = getelementptr inbounds i64* @object, i64 4096
+ %tmp = load volatile i64* %incdec.ptr, align 8
+ ret void
+}
+
+; base + reg
+; CHECK: @t5
+; CHECK: ldr xzr, [x{{[0-9]+}}, x{{[0-9]+}}, lsl #3]
+; CHECK: ret
+define void @t5(i64 %a) {
+ %incdec.ptr = getelementptr inbounds i64* @object, i64 %a
+ %tmp = load volatile i64* %incdec.ptr, align 8
+ ret void
+}
+
+; base + reg + imm
+; CHECK: @t6
+; CHECK: add [[ADDREG:x[0-9]+]], x{{[0-9]+}}, x{{[0-9]+}}, lsl #3
+; CHECK-NEXT: add [[ADDREG]], [[ADDREG]], #8, lsl #12
+; CHECK: ldr xzr, [
+; CHECK: [[ADDREG]]]
+; CHECK: ret
+define void @t6(i64 %a) {
+ %tmp1 = getelementptr inbounds i64* @object, i64 %a
+ %incdec.ptr = getelementptr inbounds i64* %tmp1, i64 4096
+ %tmp = load volatile i64* %incdec.ptr, align 8
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll b/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
new file mode 100644
index 000000000000..f396bc991708
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin -enable-misched=false | FileCheck %s
+
+; rdar://12713765
+; Make sure we are not creating stack objects that are assumed to be 64-byte
+; aligned.
+@T3_retval = common global <16 x float> zeroinitializer, align 16
+
+define void @test(<16 x float>* noalias sret %agg.result) nounwind ssp {
+entry:
+; CHECK: test
+; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [sp, #32]
+; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [sp]
+; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], {{\[}}[[BASE:x[0-9]+]], #32]
+; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], {{\[}}[[BASE]]]
+ %retval = alloca <16 x float>, align 16
+ %0 = load <16 x float>* @T3_retval, align 16
+ store <16 x float> %0, <16 x float>* %retval
+ %1 = load <16 x float>* %retval
+ store <16 x float> %1, <16 x float>* %agg.result, align 16
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll b/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
new file mode 100644
index 000000000000..3750f31b3734
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=arm64 -mcpu=cyclone < %s | FileCheck %s
+
+; CHECK: foo
+; CHECK: ldr w[[REG:[0-9]+]], [x19, #264]
+; CHECK: str w[[REG]], [x19, #132]
+; CHECK: ldr w{{[0-9]+}}, [x19, #264]
+
+define i32 @foo(i32 %a) nounwind {
+ %retval = alloca i32, align 4
+ %a.addr = alloca i32, align 4
+ %arr = alloca [32 x i32], align 4
+ %i = alloca i32, align 4
+ %arr2 = alloca [32 x i32], align 4
+ %j = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ %tmp = load i32* %a.addr, align 4
+ %tmp1 = zext i32 %tmp to i64
+ %v = mul i64 4, %tmp1
+ %vla = alloca i8, i64 %v, align 4
+ %tmp2 = bitcast i8* %vla to i32*
+ %tmp3 = load i32* %a.addr, align 4
+ store i32 %tmp3, i32* %i, align 4
+ %tmp4 = load i32* %a.addr, align 4
+ store i32 %tmp4, i32* %j, align 4
+ %tmp5 = load i32* %j, align 4
+ store i32 %tmp5, i32* %retval
+ %x = load i32* %retval
+ ret i32 %x
+}
diff --git a/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll b/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll
new file mode 100644
index 000000000000..419497722f4c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll
@@ -0,0 +1,72 @@
+; RUN: llc -O1 -march=arm64 -enable-andcmp-sinking=true < %s | FileCheck %s
+; ModuleID = 'and-cbz-extr-mr.bc'
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+define zeroext i1 @foo(i1 %IsEditable, i1 %isTextField, i8* %str1, i8* %str2, i8* %str3, i8* %str4, i8* %str5, i8* %str6, i8* %str7, i8* %str8, i8* %str9, i8* %str10, i8* %str11, i8* %str12, i8* %str13, i32 %int1, i8* %str14) unnamed_addr #0 align 2 {
+; CHECK: _foo:
+entry:
+ %tobool = icmp eq i8* %str14, null
+ br i1 %tobool, label %return, label %if.end
+
+; CHECK: %if.end
+; CHECK: tbz
+if.end: ; preds = %entry
+ %and.i.i.i = and i32 %int1, 4
+ %tobool.i.i.i = icmp eq i32 %and.i.i.i, 0
+ br i1 %tobool.i.i.i, label %if.end12, label %land.rhs.i
+
+land.rhs.i: ; preds = %if.end
+ %cmp.i.i.i = icmp eq i8* %str12, %str13
+ br i1 %cmp.i.i.i, label %if.then3, label %lor.rhs.i.i.i
+
+lor.rhs.i.i.i: ; preds = %land.rhs.i
+ %cmp.i13.i.i.i = icmp eq i8* %str10, %str11
+ br i1 %cmp.i13.i.i.i, label %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, label %if.end5
+
+_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit: ; preds = %lor.rhs.i.i.i
+ %cmp.i.i.i.i = icmp eq i8* %str8, %str9
+ br i1 %cmp.i.i.i.i, label %if.then3, label %if.end5
+
+if.then3: ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, %land.rhs.i
+ %tmp11 = load i8* %str14, align 8
+ %tmp12 = and i8 %tmp11, 2
+ %tmp13 = icmp ne i8 %tmp12, 0
+ br label %return
+
+if.end5: ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, %lor.rhs.i.i.i
+; CHECK: %if.end5
+; CHECK: tbz
+ br i1 %tobool.i.i.i, label %if.end12, label %land.rhs.i19
+
+land.rhs.i19: ; preds = %if.end5
+ %cmp.i.i.i18 = icmp eq i8* %str6, %str7
+ br i1 %cmp.i.i.i18, label %if.then7, label %lor.rhs.i.i.i23
+
+lor.rhs.i.i.i23: ; preds = %land.rhs.i19
+ %cmp.i13.i.i.i22 = icmp eq i8* %str3, %str4
+ br i1 %cmp.i13.i.i.i22, label %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, label %if.end12
+
+_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28: ; preds = %lor.rhs.i.i.i23
+ %cmp.i.i.i.i26 = icmp eq i8* %str1, %str2
+ br i1 %cmp.i.i.i.i26, label %if.then7, label %if.end12
+
+if.then7: ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, %land.rhs.i19
+ br i1 %isTextField, label %if.then9, label %if.end12
+
+if.then9: ; preds = %if.then7
+ %tmp23 = load i8* %str5, align 8
+ %tmp24 = and i8 %tmp23, 2
+ %tmp25 = icmp ne i8 %tmp24, 0
+ br label %return
+
+if.end12: ; preds = %if.then7, %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, %lor.rhs.i.i.i23, %if.end5, %if.end
+ %lnot = xor i1 %IsEditable, true
+ br label %return
+
+return: ; preds = %if.end12, %if.then9, %if.then3, %entry
+ %retval.0 = phi i1 [ %tmp13, %if.then3 ], [ %tmp25, %if.then9 ], [ %lnot, %if.end12 ], [ true, %entry ]
+ ret i1 %retval.0
+}
+
+attributes #0 = { nounwind ssp }
diff --git a/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll b/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll
new file mode 100644
index 000000000000..38661a5f38f3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll
@@ -0,0 +1,31 @@
+; RUN: llc %s -o - -aarch64-atomic-cfg-tidy=0 | FileCheck %s
+; Check that ANDS (tst) is not merged with ADD when the immediate
+; is not 0.
+; <rdar://problem/16693089>
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios"
+
+; CHECK-LABEL: tst1:
+; CHECK: add [[REG:w[0-9]+]], w{{[0-9]+}}, #1
+; CHECK: tst [[REG]], #0x1
+define void @tst1(i1 %tst, i32 %true) {
+entry:
+ br i1 %tst, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %result.09 = phi i32 [ %add2.result.0, %for.body ], [ 1, %entry ]
+ %i.08 = phi i32 [ %inc, %for.body ], [ 2, %entry ]
+ %and = and i32 %i.08, 1
+ %cmp1 = icmp eq i32 %and, 0
+ %add2.result.0 = select i1 %cmp1, i32 %true, i32 %result.09
+ %inc = add nsw i32 %i.08, 1
+ %cmp = icmp slt i32 %i.08, %true
+ br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
+
+for.cond.for.end_crit_edge: ; preds = %for.body
+ %add2.result.0.lcssa = phi i32 [ %add2.result.0, %for.body ]
+ br label %for.end
+
+for.end: ; preds = %for.cond.for.end_crit_edge, %entry
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-anyregcc-crash.ll b/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
new file mode 100644
index 000000000000..241cf974c05b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
@@ -0,0 +1,19 @@
+; RUN: not llc < %s -mtriple=arm64-apple-darwin 2>&1 | FileCheck %s
+;
+; Check that misuse of anyregcc results in a compile time error.
+
+; CHECK: LLVM ERROR: ran out of registers during register allocation
+define i64 @anyreglimit(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8,
+ i64 %v9, i64 %v10, i64 %v11, i64 %v12, i64 %v13, i64 %v14, i64 %v15, i64 %v16,
+ i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
+ i64 %v25, i64 %v26, i64 %v27, i64 %v28, i64 %v29, i64 %v30, i64 %v31, i64 %v32) {
+entry:
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 32,
+ i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8,
+ i64 %v9, i64 %v10, i64 %v11, i64 %v12, i64 %v13, i64 %v14, i64 %v15, i64 %v16,
+ i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
+ i64 %v25, i64 %v26, i64 %v27, i64 %v28, i64 %v29, i64 %v30, i64 %v31, i64 %v32)
+ ret i64 %result
+}
+
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/AArch64/arm64-anyregcc.ll b/test/CodeGen/AArch64/arm64-anyregcc.ll
new file mode 100644
index 000000000000..e26875d52f99
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-anyregcc.ll
@@ -0,0 +1,363 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
+
+; Stackmap Header: no constants - 6 callsites
+; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 8
+; Num LargeConstants
+; CHECK-NEXT: .long 0
+; Num Callsites
+; CHECK-NEXT: .long 8
+
+; Functions and stack size
+; CHECK-NEXT: .quad _test
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _property_access1
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _property_access2
+; CHECK-NEXT: .quad 32
+; CHECK-NEXT: .quad _property_access3
+; CHECK-NEXT: .quad 32
+; CHECK-NEXT: .quad _anyreg_test1
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _anyreg_test2
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _patchpoint_spilldef
+; CHECK-NEXT: .quad 112
+; CHECK-NEXT: .quad _patchpoint_spillargs
+; CHECK-NEXT: .quad 128
+
+
+; test
+; CHECK-LABEL: .long L{{.*}}-_test
+; CHECK-NEXT: .short 0
+; 3 locations
+; CHECK-NEXT: .short 3
+; Loc 0: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 2: Constant 3
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 3
+define i64 @test() nounwind ssp uwtable {
+entry:
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 0, i32 16, i8* null, i32 2, i32 1, i32 2, i64 3)
+ ret i64 0
+}
+
+; property access 1 - %obj is an anyreg call argument and should therefore be in a register
+; CHECK-LABEL: .long L{{.*}}-_property_access1
+; CHECK-NEXT: .short 0
+; 2 locations
+; CHECK-NEXT: .short 2
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @property_access1(i8* %obj) nounwind ssp uwtable {
+entry:
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 1, i32 20, i8* %f, i32 1, i8* %obj)
+ ret i64 %ret
+}
+
+; property access 2 - %obj is an anyreg call argument and should therefore be in a register
+; CHECK-LABEL: .long L{{.*}}-_property_access2
+; CHECK-NEXT: .short 0
+; 2 locations
+; CHECK-NEXT: .short 2
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @property_access2() nounwind ssp uwtable {
+entry:
+ %obj = alloca i64, align 8
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %f, i32 1, i64* %obj)
+ ret i64 %ret
+}
+
+; property access 3 - %obj is a frame index
+; CHECK-LABEL: .long L{{.*}}-_property_access3
+; CHECK-NEXT: .short 0
+; 2 locations
+; CHECK-NEXT: .short 2
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Direct FP - 8
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 29
+; CHECK-NEXT: .long -8
+define i64 @property_access3() nounwind ssp uwtable {
+entry:
+ %obj = alloca i64, align 8
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 3, i32 20, i8* %f, i32 0, i64* %obj)
+ ret i64 %ret
+}
+
+; anyreg_test1
+; CHECK-LABEL: .long L{{.*}}-_anyreg_test1
+; CHECK-NEXT: .short 0
+; 14 locations
+; CHECK-NEXT: .short 14
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 2: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 3: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 4: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 5: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 6: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 7: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 8: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 9: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 10: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 11: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 12: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 13: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @anyreg_test1(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
+entry:
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 4, i32 20, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ ret i64 %ret
+}
+
+; anyreg_test2
+; CHECK-LABEL: .long L{{.*}}-_anyreg_test2
+; CHECK-NEXT: .short 0
+; 14 locations
+; CHECK-NEXT: .short 14
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 2: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 3: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 4: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 5: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 6: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 7: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 8: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 9: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 10: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 11: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 12: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 13: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @anyreg_test2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
+entry:
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ ret i64 %ret
+}
+
+; Test spilling the return value of an anyregcc call.
+;
+; <rdar://problem/15432754> [JS] Assertion: "Folded a def to a non-store!"
+;
+; CHECK-LABEL: .long L{{.*}}-_patchpoint_spilldef
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 3
+; Loc 0: Register (some register that will be spilled to the stack)
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @patchpoint_spilldef(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 16, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
+ tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind
+ ret i64 %result
+}
+
+; Test spilling the arguments of an anyregcc call.
+;
+; <rdar://problem/15487687> [JS] AnyRegCC argument ends up being spilled
+;
+; CHECK-LABEL: .long L{{.*}}-_patchpoint_spillargs
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 5
+; Loc 0: Return a register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Arg0 in a Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 2: Arg1 in a Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 3: Arg2 spilled to FP -96
+; CHECK-NEXT: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 29
+; CHECK-NEXT: .long -96
+; Loc 4: Arg3 spilled to FP - 88
+; CHECK-NEXT: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 29
+; CHECK-NEXT: .long -88
+define i64 @patchpoint_spillargs(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+ tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 13, i32 16, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ ret i64 %result
+}
+
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/AArch64/arm64-arith-saturating.ll b/test/CodeGen/AArch64/arm64-arith-saturating.ll
new file mode 100644
index 000000000000..78cd1fcb1a21
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-arith-saturating.ll
@@ -0,0 +1,153 @@
+; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s
+
+define i32 @qadds(<4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: qadds:
+; CHECK: sqadd s0, s0, s1
+ %vecext = extractelement <4 x i32> %b, i32 0
+ %vecext1 = extractelement <4 x i32> %c, i32 0
+ %vqadd.i = tail call i32 @llvm.aarch64.neon.sqadd.i32(i32 %vecext, i32 %vecext1) nounwind
+ ret i32 %vqadd.i
+}
+
+define i64 @qaddd(<2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: qaddd:
+; CHECK: sqadd d0, d0, d1
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vecext1 = extractelement <2 x i64> %c, i32 0
+ %vqadd.i = tail call i64 @llvm.aarch64.neon.sqadd.i64(i64 %vecext, i64 %vecext1) nounwind
+ ret i64 %vqadd.i
+}
+
+define i32 @uqadds(<4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: uqadds:
+; CHECK: uqadd s0, s0, s1
+ %vecext = extractelement <4 x i32> %b, i32 0
+ %vecext1 = extractelement <4 x i32> %c, i32 0
+ %vqadd.i = tail call i32 @llvm.aarch64.neon.uqadd.i32(i32 %vecext, i32 %vecext1) nounwind
+ ret i32 %vqadd.i
+}
+
+define i64 @uqaddd(<2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: uqaddd:
+; CHECK: uqadd d0, d0, d1
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vecext1 = extractelement <2 x i64> %c, i32 0
+ %vqadd.i = tail call i64 @llvm.aarch64.neon.uqadd.i64(i64 %vecext, i64 %vecext1) nounwind
+ ret i64 %vqadd.i
+}
+
+declare i64 @llvm.aarch64.neon.uqadd.i64(i64, i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.uqadd.i32(i32, i32) nounwind readnone
+declare i64 @llvm.aarch64.neon.sqadd.i64(i64, i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.sqadd.i32(i32, i32) nounwind readnone
+
+define i32 @qsubs(<4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: qsubs:
+; CHECK: sqsub s0, s0, s1
+ %vecext = extractelement <4 x i32> %b, i32 0
+ %vecext1 = extractelement <4 x i32> %c, i32 0
+ %vqsub.i = tail call i32 @llvm.aarch64.neon.sqsub.i32(i32 %vecext, i32 %vecext1) nounwind
+ ret i32 %vqsub.i
+}
+
+define i64 @qsubd(<2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: qsubd:
+; CHECK: sqsub d0, d0, d1
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vecext1 = extractelement <2 x i64> %c, i32 0
+ %vqsub.i = tail call i64 @llvm.aarch64.neon.sqsub.i64(i64 %vecext, i64 %vecext1) nounwind
+ ret i64 %vqsub.i
+}
+
+define i32 @uqsubs(<4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: uqsubs:
+; CHECK: uqsub s0, s0, s1
+ %vecext = extractelement <4 x i32> %b, i32 0
+ %vecext1 = extractelement <4 x i32> %c, i32 0
+ %vqsub.i = tail call i32 @llvm.aarch64.neon.uqsub.i32(i32 %vecext, i32 %vecext1) nounwind
+ ret i32 %vqsub.i
+}
+
+define i64 @uqsubd(<2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: uqsubd:
+; CHECK: uqsub d0, d0, d1
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vecext1 = extractelement <2 x i64> %c, i32 0
+ %vqsub.i = tail call i64 @llvm.aarch64.neon.uqsub.i64(i64 %vecext, i64 %vecext1) nounwind
+ ret i64 %vqsub.i
+}
+
+declare i64 @llvm.aarch64.neon.uqsub.i64(i64, i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.uqsub.i32(i32, i32) nounwind readnone
+declare i64 @llvm.aarch64.neon.sqsub.i64(i64, i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32) nounwind readnone
+
+define i32 @qabss(<4 x i32> %b, <4 x i32> %c) nounwind readnone {
+; CHECK-LABEL: qabss:
+; CHECK: sqabs s0, s0
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %b, i32 0
+ %vqabs.i = tail call i32 @llvm.aarch64.neon.sqabs.i32(i32 %vecext) nounwind
+ ret i32 %vqabs.i
+}
+
+define i64 @qabsd(<2 x i64> %b, <2 x i64> %c) nounwind readnone {
+; CHECK-LABEL: qabsd:
+; CHECK: sqabs d0, d0
+; CHECK: ret
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vqabs.i = tail call i64 @llvm.aarch64.neon.sqabs.i64(i64 %vecext) nounwind
+ ret i64 %vqabs.i
+}
+
+define i32 @qnegs(<4 x i32> %b, <4 x i32> %c) nounwind readnone {
+; CHECK-LABEL: qnegs:
+; CHECK: sqneg s0, s0
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %b, i32 0
+ %vqneg.i = tail call i32 @llvm.aarch64.neon.sqneg.i32(i32 %vecext) nounwind
+ ret i32 %vqneg.i
+}
+
+define i64 @qnegd(<2 x i64> %b, <2 x i64> %c) nounwind readnone {
+; CHECK-LABEL: qnegd:
+; CHECK: sqneg d0, d0
+; CHECK: ret
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vqneg.i = tail call i64 @llvm.aarch64.neon.sqneg.i64(i64 %vecext) nounwind
+ ret i64 %vqneg.i
+}
+
+declare i64 @llvm.aarch64.neon.sqneg.i64(i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.sqneg.i32(i32) nounwind readnone
+declare i64 @llvm.aarch64.neon.sqabs.i64(i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.sqabs.i32(i32) nounwind readnone
+
+
+define i32 @vqmovund(<2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: vqmovund:
+; CHECK: sqxtun s0, d0
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vqmovun.i = tail call i32 @llvm.aarch64.neon.scalar.sqxtun.i32.i64(i64 %vecext) nounwind
+ ret i32 %vqmovun.i
+}
+
+define i32 @vqmovnd_s(<2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: vqmovnd_s:
+; CHECK: sqxtn s0, d0
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vqmovn.i = tail call i32 @llvm.aarch64.neon.scalar.sqxtn.i32.i64(i64 %vecext) nounwind
+ ret i32 %vqmovn.i
+}
+
+define i32 @vqmovnd_u(<2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: vqmovnd_u:
+; CHECK: uqxtn s0, d0
+ %vecext = extractelement <2 x i64> %b, i32 0
+ %vqmovn.i = tail call i32 @llvm.aarch64.neon.scalar.uqxtn.i32.i64(i64 %vecext) nounwind
+ ret i32 %vqmovn.i
+}
+
+declare i32 @llvm.aarch64.neon.scalar.uqxtn.i32.i64(i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.scalar.sqxtn.i32.i64(i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.scalar.sqxtun.i32.i64(i64) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-arith.ll b/test/CodeGen/AArch64/arm64-arith.ll
new file mode 100644
index 000000000000..f36e706b15dd
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-arith.ll
@@ -0,0 +1,270 @@
+; RUN: llc < %s -march=arm64 -asm-verbose=false | FileCheck %s
+
+define i32 @t1(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t1:
+; CHECK: add w0, w1, w0
+; CHECK: ret
+ %add = add i32 %b, %a
+ ret i32 %add
+}
+
+define i32 @t2(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: udiv w0, w0, w1
+; CHECK: ret
+ %udiv = udiv i32 %a, %b
+ ret i32 %udiv
+}
+
+define i64 @t3(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: udiv x0, x0, x1
+; CHECK: ret
+ %udiv = udiv i64 %a, %b
+ ret i64 %udiv
+}
+
+define i32 @t4(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t4:
+; CHECK: sdiv w0, w0, w1
+; CHECK: ret
+ %sdiv = sdiv i32 %a, %b
+ ret i32 %sdiv
+}
+
+define i64 @t5(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t5:
+; CHECK: sdiv x0, x0, x1
+; CHECK: ret
+ %sdiv = sdiv i64 %a, %b
+ ret i64 %sdiv
+}
+
+define i32 @t6(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t6:
+; CHECK: lsl w0, w0, w1
+; CHECK: ret
+ %shl = shl i32 %a, %b
+ ret i32 %shl
+}
+
+define i64 @t7(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t7:
+; CHECK: lsl x0, x0, x1
+; CHECK: ret
+ %shl = shl i64 %a, %b
+ ret i64 %shl
+}
+
+define i32 @t8(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t8:
+; CHECK: lsr w0, w0, w1
+; CHECK: ret
+ %lshr = lshr i32 %a, %b
+ ret i32 %lshr
+}
+
+define i64 @t9(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t9:
+; CHECK: lsr x0, x0, x1
+; CHECK: ret
+ %lshr = lshr i64 %a, %b
+ ret i64 %lshr
+}
+
+define i32 @t10(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t10:
+; CHECK: asr w0, w0, w1
+; CHECK: ret
+ %ashr = ashr i32 %a, %b
+ ret i32 %ashr
+}
+
+define i64 @t11(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t11:
+; CHECK: asr x0, x0, x1
+; CHECK: ret
+ %ashr = ashr i64 %a, %b
+ ret i64 %ashr
+}
+
+define i32 @t12(i16 %a, i32 %x) nounwind ssp {
+entry:
+; CHECK-LABEL: t12:
+; CHECK: add w0, w1, w0, sxth
+; CHECK: ret
+ %c = sext i16 %a to i32
+ %e = add i32 %x, %c
+ ret i32 %e
+}
+
+define i32 @t13(i16 %a, i32 %x) nounwind ssp {
+entry:
+; CHECK-LABEL: t13:
+; CHECK: add w0, w1, w0, sxth #2
+; CHECK: ret
+ %c = sext i16 %a to i32
+ %d = shl i32 %c, 2
+ %e = add i32 %x, %d
+ ret i32 %e
+}
+
+define i64 @t14(i16 %a, i64 %x) nounwind ssp {
+entry:
+; CHECK-LABEL: t14:
+; CHECK: add x0, x1, w0, uxth #3
+; CHECK: ret
+ %c = zext i16 %a to i64
+ %d = shl i64 %c, 3
+ %e = add i64 %x, %d
+ ret i64 %e
+}
+
+; rdar://9160598
+define i64 @t15(i64 %a, i64 %x) nounwind ssp {
+entry:
+; CHECK-LABEL: t15:
+; CHECK: add x0, x1, w0, uxtw
+; CHECK: ret
+ %b = and i64 %a, 4294967295
+ %c = add i64 %x, %b
+ ret i64 %c
+}
+
+define i64 @t16(i64 %x) nounwind ssp {
+entry:
+; CHECK-LABEL: t16:
+; CHECK: lsl x0, x0, #1
+; CHECK: ret
+ %a = shl i64 %x, 1
+ ret i64 %a
+}
+
+; rdar://9166974
+define i64 @t17(i16 %a, i64 %x) nounwind ssp {
+entry:
+; CHECK-LABEL: t17:
+; CHECK: sxth [[REG:x[0-9]+]], w0
+; CHECK: neg x0, [[REG]], lsl #32
+; CHECK: ret
+ %tmp16 = sext i16 %a to i64
+ %tmp17 = mul i64 %tmp16, -4294967296
+ ret i64 %tmp17
+}
+
+define i32 @t18(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t18:
+; CHECK: sdiv w0, w0, w1
+; CHECK: ret
+ %sdiv = call i32 @llvm.aarch64.sdiv.i32(i32 %a, i32 %b)
+ ret i32 %sdiv
+}
+
+define i64 @t19(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t19:
+; CHECK: sdiv x0, x0, x1
+; CHECK: ret
+ %sdiv = call i64 @llvm.aarch64.sdiv.i64(i64 %a, i64 %b)
+ ret i64 %sdiv
+}
+
+define i32 @t20(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t20:
+; CHECK: udiv w0, w0, w1
+; CHECK: ret
+ %udiv = call i32 @llvm.aarch64.udiv.i32(i32 %a, i32 %b)
+ ret i32 %udiv
+}
+
+define i64 @t21(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t21:
+; CHECK: udiv x0, x0, x1
+; CHECK: ret
+ %udiv = call i64 @llvm.aarch64.udiv.i64(i64 %a, i64 %b)
+ ret i64 %udiv
+}
+
+declare i32 @llvm.aarch64.sdiv.i32(i32, i32) nounwind readnone
+declare i64 @llvm.aarch64.sdiv.i64(i64, i64) nounwind readnone
+declare i32 @llvm.aarch64.udiv.i32(i32, i32) nounwind readnone
+declare i64 @llvm.aarch64.udiv.i64(i64, i64) nounwind readnone
+
+; 32-bit not.
+define i32 @inv_32(i32 %x) nounwind ssp {
+entry:
+; CHECK: inv_32
+; CHECK: mvn w0, w0
+; CHECK: ret
+ %inv = xor i32 %x, -1
+ ret i32 %inv
+}
+
+; 64-bit not.
+define i64 @inv_64(i64 %x) nounwind ssp {
+entry:
+; CHECK: inv_64
+; CHECK: mvn x0, x0
+; CHECK: ret
+ %inv = xor i64 %x, -1
+ ret i64 %inv
+}
+
+; Multiplying by a power of two plus or minus one is better done via shift
+; and add/sub rather than the madd/msub instructions. The latter are 4+ cycles,
+; and the former are two (total for the two instruction sequence for subtract).
+define i32 @f0(i32 %a) nounwind readnone ssp {
+; CHECK-LABEL: f0:
+; CHECK-NEXT: add w0, w0, w0, lsl #3
+; CHECK-NEXT: ret
+ %res = mul i32 %a, 9
+ ret i32 %res
+}
+
+define i64 @f1(i64 %a) nounwind readnone ssp {
+; CHECK-LABEL: f1:
+; CHECK-NEXT: lsl x8, x0, #4
+; CHECK-NEXT: sub x0, x8, x0
+; CHECK-NEXT: ret
+ %res = mul i64 %a, 15
+ ret i64 %res
+}
+
+define i32 @f2(i32 %a) nounwind readnone ssp {
+; CHECK-LABEL: f2:
+; CHECK-NEXT: lsl w8, w0, #3
+; CHECK-NEXT: sub w0, w8, w0
+; CHECK-NEXT: ret
+ %res = mul nsw i32 %a, 7
+ ret i32 %res
+}
+
+define i64 @f3(i64 %a) nounwind readnone ssp {
+; CHECK-LABEL: f3:
+; CHECK-NEXT: add x0, x0, x0, lsl #4
+; CHECK-NEXT: ret
+ %res = mul nsw i64 %a, 17
+ ret i64 %res
+}
+
+define i32 @f4(i32 %a) nounwind readnone ssp {
+; CHECK-LABEL: f4:
+; CHECK-NEXT: add w0, w0, w0, lsl #1
+; CHECK-NEXT: ret
+ %res = mul i32 %a, 3
+ ret i32 %res
+}
diff --git a/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll b/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll
new file mode 100644
index 000000000000..0904b62c4032
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll
@@ -0,0 +1,16 @@
+; RUN: llc -march=arm64 -aarch64-dead-def-elimination=false < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+; Function Attrs: nounwind ssp uwtable
+define i32 @test1() #0 {
+ %tmp1 = alloca i8
+ %tmp2 = icmp eq i8* %tmp1, null
+ %tmp3 = zext i1 %tmp2 to i32
+
+ ret i32 %tmp3
+
+ ; CHECK-LABEL: test1
+ ; CHECK: adds {{x[0-9]+}}, sp, #15
+}
diff --git a/test/CodeGen/AArch64/arm64-atomic-128.ll b/test/CodeGen/AArch64/arm64-atomic-128.ll
new file mode 100644
index 000000000000..3377849f6698
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-atomic-128.ll
@@ -0,0 +1,228 @@
+; RUN: llc < %s -march=arm64 -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone | FileCheck %s
+
+@var = global i128 0
+
+define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
+; CHECK-LABEL: val_compare_and_swap:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp [[RESULTLO:x[0-9]+]], [[RESULTHI:x[0-9]+]], [x[[ADDR:[0-9]+]]]
+; CHECK-DAG: eor [[MISMATCH_LO:x[0-9]+]], [[RESULTLO]], x2
+; CHECK-DAG: eor [[MISMATCH_HI:x[0-9]+]], [[RESULTHI]], x3
+; CHECK: orr [[MISMATCH:x[0-9]+]], [[MISMATCH_LO]], [[MISMATCH_HI]]
+; CHECK: cbnz [[MISMATCH]], [[DONE:.LBB[0-9]+_[0-9]+]]
+; CHECK: stxp [[SCRATCH_RES:w[0-9]+]], x4, x5, [x[[ADDR]]]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+; CHECK: [[DONE]]:
+ %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+ %val = extractvalue { i128, i1 } %pair, 0
+ ret i128 %val
+}
+
+define void @fetch_and_nand(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_nand:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
+; CHECK-DAG: and [[TMP_REGLO:x[0-9]+]], [[DEST_REGLO]], x2
+; CHECK-DAG: and [[TMP_REGHI:x[0-9]+]], [[DEST_REGHI]], x3
+; CHECK-DAG: mvn [[SCRATCH_REGLO:x[0-9]+]], [[TMP_REGLO]]
+; CHECK-DAG: mvn [[SCRATCH_REGHI:x[0-9]+]], [[TMP_REGHI]]
+; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+
+; CHECK-DAG: str [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]]
+ %val = atomicrmw nand i128* %p, i128 %bits release
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_or(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_or:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
+; CHECK-DAG: orr [[SCRATCH_REGLO:x[0-9]+]], [[DEST_REGLO]], x2
+; CHECK-DAG: orr [[SCRATCH_REGHI:x[0-9]+]], [[DEST_REGHI]], x3
+; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+
+; CHECK-DAG: str [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]]
+ %val = atomicrmw or i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_add(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_add:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
+; CHECK: adds [[SCRATCH_REGLO:x[0-9]+]], [[DEST_REGLO]], x2
+; CHECK: adcs [[SCRATCH_REGHI:x[0-9]+]], [[DEST_REGHI]], x3
+; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+
+; CHECK-DAG: str [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]]
+ %val = atomicrmw add i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_sub(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_sub:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
+; CHECK: subs [[SCRATCH_REGLO:x[0-9]+]], [[DEST_REGLO]], x2
+; CHECK: sbcs [[SCRATCH_REGHI:x[0-9]+]], [[DEST_REGHI]], x3
+; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+
+; CHECK-DAG: str [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]]
+ %val = atomicrmw sub i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_min(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_min:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
+; CHECK: cmp [[DEST_REGLO]], x2
+; CHECK: cset [[LOCMP:w[0-9]+]], ls
+; CHECK: cmp [[DEST_REGHI:x[0-9]+]], x3
+; CHECK: cset [[HICMP:w[0-9]+]], le
+; CHECK: csel [[CMP:w[0-9]+]], [[LOCMP]], [[HICMP]], eq
+; CHECK: cmp [[CMP]], #0
+; CHECK-DAG: csel [[SCRATCH_REGHI:x[0-9]+]], [[DEST_REGHI]], x3, ne
+; CHECK-DAG: csel [[SCRATCH_REGLO:x[0-9]+]], [[DEST_REGLO]], x2, ne
+; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+
+; CHECK-DAG: str [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]]
+ %val = atomicrmw min i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_max(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_max:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
+; CHECK: cmp [[DEST_REGLO]], x2
+; CHECK: cset [[LOCMP:w[0-9]+]], hi
+; CHECK: cmp [[DEST_REGHI:x[0-9]+]], x3
+; CHECK: cset [[HICMP:w[0-9]+]], gt
+; CHECK: csel [[CMP:w[0-9]+]], [[LOCMP]], [[HICMP]], eq
+; CHECK: cmp [[CMP]], #0
+; CHECK-DAG: csel [[SCRATCH_REGHI:x[0-9]+]], [[DEST_REGHI]], x3, ne
+; CHECK-DAG: csel [[SCRATCH_REGLO:x[0-9]+]], [[DEST_REGLO]], x2, ne
+; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+
+; CHECK-DAG: str [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]]
+ %val = atomicrmw max i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umin(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umin:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
+; CHECK: cmp [[DEST_REGLO]], x2
+; CHECK: cset [[LOCMP:w[0-9]+]], ls
+; CHECK: cmp [[DEST_REGHI:x[0-9]+]], x3
+; CHECK: cset [[HICMP:w[0-9]+]], ls
+; CHECK: csel [[CMP:w[0-9]+]], [[LOCMP]], [[HICMP]], eq
+; CHECK: cmp [[CMP]], #0
+; CHECK-DAG: csel [[SCRATCH_REGHI:x[0-9]+]], [[DEST_REGHI]], x3, ne
+; CHECK-DAG: csel [[SCRATCH_REGLO:x[0-9]+]], [[DEST_REGLO]], x2, ne
+; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+
+; CHECK-DAG: str [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]]
+ %val = atomicrmw umin i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umax(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umax:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp [[DEST_REGLO:x[0-9]+]], [[DEST_REGHI:x[0-9]+]], [x0]
+; CHECK: cmp [[DEST_REGLO]], x2
+; CHECK: cset [[LOCMP:w[0-9]+]], hi
+; CHECK: cmp [[DEST_REGHI:x[0-9]+]], x3
+; CHECK: cset [[HICMP:w[0-9]+]], hi
+; CHECK: csel [[CMP:w[0-9]+]], [[LOCMP]], [[HICMP]], eq
+; CHECK: cmp [[CMP]], #0
+; CHECK-DAG: csel [[SCRATCH_REGHI:x[0-9]+]], [[DEST_REGHI]], x3, ne
+; CHECK-DAG: csel [[SCRATCH_REGLO:x[0-9]+]], [[DEST_REGLO]], x2, ne
+; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
+; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
+
+; CHECK-DAG: str [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]]
+ %val = atomicrmw umax i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define i128 @atomic_load_seq_cst(i128* %p) {
+; CHECK-LABEL: atomic_load_seq_cst:
+; CHECK-NOT: dmb
+; CHECK-LABEL: ldaxp
+; CHECK-NOT: dmb
+ %r = load atomic i128* %p seq_cst, align 16
+ ret i128 %r
+}
+
+define i128 @atomic_load_relaxed(i128* %p) {
+; CHECK-LABEL: atomic_load_relaxed:
+; CHECK-NOT: dmb
+; CHECK: ldxp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
+; CHECK-NOT: dmb
+ %r = load atomic i128* %p monotonic, align 16
+ ret i128 %r
+}
+
+
+define void @atomic_store_seq_cst(i128 %in, i128* %p) {
+; CHECK-LABEL: atomic_store_seq_cst:
+; CHECK-NOT: dmb
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxp xzr, xzr, [x2]
+; CHECK: stlxp [[SUCCESS:w[0-9]+]], x0, x1, [x2]
+; CHECK: cbnz [[SUCCESS]], [[LABEL]]
+; CHECK-NOT: dmb
+ store atomic i128 %in, i128* %p seq_cst, align 16
+ ret void
+}
+
+define void @atomic_store_release(i128 %in, i128* %p) {
+; CHECK-LABEL: atomic_store_release:
+; CHECK-NOT: dmb
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldxp xzr, xzr, [x2]
+; CHECK: stlxp [[SUCCESS:w[0-9]+]], x0, x1, [x2]
+; CHECK: cbnz [[SUCCESS]], [[LABEL]]
+; CHECK-NOT: dmb
+ store atomic i128 %in, i128* %p release, align 16
+ ret void
+}
+
+define void @atomic_store_relaxed(i128 %in, i128* %p) {
+; CHECK-LABEL: atomic_store_relaxed:
+; CHECK-NOT: dmb
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldxp xzr, xzr, [x2]
+; CHECK: stxp [[SUCCESS:w[0-9]+]], x0, x1, [x2]
+; CHECK: cbnz [[SUCCESS]], [[LABEL]]
+; CHECK-NOT: dmb
+ store atomic i128 %in, i128* %p unordered, align 16
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-atomic.ll b/test/CodeGen/AArch64/arm64-atomic.ll
new file mode 100644
index 000000000000..b56f91ddd111
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-atomic.ll
@@ -0,0 +1,335 @@
+; RUN: llc < %s -march=arm64 -verify-machineinstrs -mcpu=cyclone | FileCheck %s
+
+define i32 @val_compare_and_swap(i32* %p) {
+; CHECK-LABEL: val_compare_and_swap:
+; CHECK: orr [[NEWVAL_REG:w[0-9]+]], wzr, #0x4
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxr [[RESULT:w[0-9]+]], [x0]
+; CHECK: cmp [[RESULT]], #7
+; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]]
+; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], [[NEWVAL_REG]], [x0]
+; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
+; CHECK: [[LABEL2]]:
+ %pair = cmpxchg i32* %p, i32 7, i32 4 acquire acquire
+ %val = extractvalue { i32, i1 } %pair, 0
+ ret i32 %val
+}
+
+define i64 @val_compare_and_swap_64(i64* %p) {
+; CHECK-LABEL: val_compare_and_swap_64:
+; CHECK: orr w[[NEWVAL_REG:[0-9]+]], wzr, #0x4
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldxr [[RESULT:x[0-9]+]], [x0]
+; CHECK: cmp [[RESULT]], #7
+; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]]
+; CHECK-NOT: stxr x[[NEWVAL_REG]], x[[NEWVAL_REG]]
+; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], x[[NEWVAL_REG]], [x0]
+; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
+; CHECK: [[LABEL2]]:
+ %pair = cmpxchg i64* %p, i64 7, i64 4 monotonic monotonic
+ %val = extractvalue { i64, i1 } %pair, 0
+ ret i64 %val
+}
+
+define i32 @fetch_and_nand(i32* %p) {
+; CHECK-LABEL: fetch_and_nand:
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldxr w[[DEST_REG:[0-9]+]], [x0]
+; CHECK: mvn [[TMP_REG:w[0-9]+]], w[[DEST_REG]]
+; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], [[TMP_REG]], #0xfffffff8
+; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]]
+; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
+; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
+; CHECK: mov x0, x[[DEST_REG]]
+ %val = atomicrmw nand i32* %p, i32 7 release
+ ret i32 %val
+}
+
+define i64 @fetch_and_nand_64(i64* %p) {
+; CHECK-LABEL: fetch_and_nand_64:
+; CHECK: mov x[[ADDR:[0-9]+]], x0
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxr x[[DEST_REG:[0-9]+]], [x[[ADDR]]]
+; CHECK: mvn w[[TMP_REG:[0-9]+]], w[[DEST_REG]]
+; CHECK: orr [[SCRATCH2_REG:x[0-9]+]], x[[TMP_REG]], #0xfffffffffffffff8
+; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
+; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
+
+ %val = atomicrmw nand i64* %p, i64 7 acq_rel
+ ret i64 %val
+}
+
+define i32 @fetch_and_or(i32* %p) {
+; CHECK-LABEL: fetch_and_or:
+; CHECK: movz [[OLDVAL_REG:w[0-9]+]], #0x5
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxr w[[DEST_REG:[0-9]+]], [x0]
+; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], w[[DEST_REG]], [[OLDVAL_REG]]
+; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]]
+; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
+; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
+; CHECK: mov x0, x[[DEST_REG]]
+ %val = atomicrmw or i32* %p, i32 5 seq_cst
+ ret i32 %val
+}
+
+define i64 @fetch_and_or_64(i64* %p) {
+; CHECK: fetch_and_or_64:
+; CHECK: mov x[[ADDR:[0-9]+]], x0
+; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: ldxr [[DEST_REG:x[0-9]+]], [x[[ADDR]]]
+; CHECK: orr [[SCRATCH2_REG:x[0-9]+]], [[DEST_REG]], #0x7
+; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
+; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
+ %val = atomicrmw or i64* %p, i64 7 monotonic
+ ret i64 %val
+}
+
+define void @acquire_fence() {
+ fence acquire
+ ret void
+ ; CHECK-LABEL: acquire_fence:
+ ; CHECK: dmb ishld
+}
+
+define void @release_fence() {
+ fence release
+ ret void
+ ; CHECK-LABEL: release_fence:
+ ; CHECK: dmb ish{{$}}
+}
+
+define void @seq_cst_fence() {
+ fence seq_cst
+ ret void
+ ; CHECK-LABEL: seq_cst_fence:
+ ; CHECK: dmb ish{{$}}
+}
+
+define i32 @atomic_load(i32* %p) {
+ %r = load atomic i32* %p seq_cst, align 4
+ ret i32 %r
+ ; CHECK-LABEL: atomic_load:
+ ; CHECK: ldar
+}
+
+define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) {
+; CHECK-LABEL: atomic_load_relaxed_8:
+ %ptr_unsigned = getelementptr i8* %p, i32 4095
+ %val_unsigned = load atomic i8* %ptr_unsigned monotonic, align 1
+; CHECK: ldrb {{w[0-9]+}}, [x0, #4095]
+
+ %ptr_regoff = getelementptr i8* %p, i32 %off32
+ %val_regoff = load atomic i8* %ptr_regoff unordered, align 1
+ %tot1 = add i8 %val_unsigned, %val_regoff
+; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw]
+
+ %ptr_unscaled = getelementptr i8* %p, i32 -256
+ %val_unscaled = load atomic i8* %ptr_unscaled monotonic, align 1
+ %tot2 = add i8 %tot1, %val_unscaled
+; CHECK: ldurb {{w[0-9]+}}, [x0, #-256]
+
+ %ptr_random = getelementptr i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+ %val_random = load atomic i8* %ptr_random unordered, align 1
+ %tot3 = add i8 %tot2, %val_random
+; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
+; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]]
+
+ ret i8 %tot3
+}
+
+define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) {
+; CHECK-LABEL: atomic_load_relaxed_16:
+ %ptr_unsigned = getelementptr i16* %p, i32 4095
+ %val_unsigned = load atomic i16* %ptr_unsigned monotonic, align 2
+; CHECK: ldrh {{w[0-9]+}}, [x0, #8190]
+
+ %ptr_regoff = getelementptr i16* %p, i32 %off32
+ %val_regoff = load atomic i16* %ptr_regoff unordered, align 2
+ %tot1 = add i16 %val_unsigned, %val_regoff
+; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1]
+
+ %ptr_unscaled = getelementptr i16* %p, i32 -128
+ %val_unscaled = load atomic i16* %ptr_unscaled monotonic, align 2
+ %tot2 = add i16 %tot1, %val_unscaled
+; CHECK: ldurh {{w[0-9]+}}, [x0, #-256]
+
+ %ptr_random = getelementptr i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+ %val_random = load atomic i16* %ptr_random unordered, align 2
+ %tot3 = add i16 %tot2, %val_random
+; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
+; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]]
+
+ ret i16 %tot3
+}
+
+define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) {
+; CHECK-LABEL: atomic_load_relaxed_32:
+ %ptr_unsigned = getelementptr i32* %p, i32 4095
+ %val_unsigned = load atomic i32* %ptr_unsigned monotonic, align 4
+; CHECK: ldr {{w[0-9]+}}, [x0, #16380]
+
+ %ptr_regoff = getelementptr i32* %p, i32 %off32
+ %val_regoff = load atomic i32* %ptr_regoff unordered, align 4
+ %tot1 = add i32 %val_unsigned, %val_regoff
+; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2]
+
+ %ptr_unscaled = getelementptr i32* %p, i32 -64
+ %val_unscaled = load atomic i32* %ptr_unscaled monotonic, align 4
+ %tot2 = add i32 %tot1, %val_unscaled
+; CHECK: ldur {{w[0-9]+}}, [x0, #-256]
+
+ %ptr_random = getelementptr i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+ %val_random = load atomic i32* %ptr_random unordered, align 4
+ %tot3 = add i32 %tot2, %val_random
+; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
+; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]]
+
+ ret i32 %tot3
+}
+
+define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) {
+; CHECK-LABEL: atomic_load_relaxed_64:
+ %ptr_unsigned = getelementptr i64* %p, i32 4095
+ %val_unsigned = load atomic i64* %ptr_unsigned monotonic, align 8
+; CHECK: ldr {{x[0-9]+}}, [x0, #32760]
+
+ %ptr_regoff = getelementptr i64* %p, i32 %off32
+ %val_regoff = load atomic i64* %ptr_regoff unordered, align 8
+ %tot1 = add i64 %val_unsigned, %val_regoff
+; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3]
+
+ %ptr_unscaled = getelementptr i64* %p, i32 -32
+ %val_unscaled = load atomic i64* %ptr_unscaled monotonic, align 8
+ %tot2 = add i64 %tot1, %val_unscaled
+; CHECK: ldur {{x[0-9]+}}, [x0, #-256]
+
+ %ptr_random = getelementptr i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+ %val_random = load atomic i64* %ptr_random unordered, align 8
+ %tot3 = add i64 %tot2, %val_random
+; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
+; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]]
+
+ ret i64 %tot3
+}
+
+
+define void @atomc_store(i32* %p) {
+ store atomic i32 4, i32* %p seq_cst, align 4
+ ret void
+ ; CHECK-LABEL: atomc_store:
+ ; CHECK: stlr
+}
+
+define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) {
+; CHECK-LABEL: atomic_store_relaxed_8:
+ %ptr_unsigned = getelementptr i8* %p, i32 4095
+ store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1
+; CHECK: strb {{w[0-9]+}}, [x0, #4095]
+
+ %ptr_regoff = getelementptr i8* %p, i32 %off32
+ store atomic i8 %val, i8* %ptr_regoff unordered, align 1
+; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw]
+
+ %ptr_unscaled = getelementptr i8* %p, i32 -256
+ store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1
+; CHECK: sturb {{w[0-9]+}}, [x0, #-256]
+
+ %ptr_random = getelementptr i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+ store atomic i8 %val, i8* %ptr_random unordered, align 1
+; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
+; CHECK: strb {{w[0-9]+}}, [x[[ADDR]]]
+
+ ret void
+}
+
+define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) {
+; CHECK-LABEL: atomic_store_relaxed_16:
+ %ptr_unsigned = getelementptr i16* %p, i32 4095
+ store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2
+; CHECK: strh {{w[0-9]+}}, [x0, #8190]
+
+ %ptr_regoff = getelementptr i16* %p, i32 %off32
+ store atomic i16 %val, i16* %ptr_regoff unordered, align 2
+; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1]
+
+ %ptr_unscaled = getelementptr i16* %p, i32 -128
+ store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2
+; CHECK: sturh {{w[0-9]+}}, [x0, #-256]
+
+ %ptr_random = getelementptr i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+ store atomic i16 %val, i16* %ptr_random unordered, align 2
+; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
+; CHECK: strh {{w[0-9]+}}, [x[[ADDR]]]
+
+ ret void
+}
+
+define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) {
+; CHECK-LABEL: atomic_store_relaxed_32:
+ %ptr_unsigned = getelementptr i32* %p, i32 4095
+ store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4
+; CHECK: str {{w[0-9]+}}, [x0, #16380]
+
+ %ptr_regoff = getelementptr i32* %p, i32 %off32
+ store atomic i32 %val, i32* %ptr_regoff unordered, align 4
+; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2]
+
+ %ptr_unscaled = getelementptr i32* %p, i32 -64
+ store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4
+; CHECK: stur {{w[0-9]+}}, [x0, #-256]
+
+ %ptr_random = getelementptr i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+ store atomic i32 %val, i32* %ptr_random unordered, align 4
+; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
+; CHECK: str {{w[0-9]+}}, [x[[ADDR]]]
+
+ ret void
+}
+
+define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) {
+; CHECK-LABEL: atomic_store_relaxed_64:
+ %ptr_unsigned = getelementptr i64* %p, i32 4095
+ store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8
+; CHECK: str {{x[0-9]+}}, [x0, #32760]
+
+ %ptr_regoff = getelementptr i64* %p, i32 %off32
+ store atomic i64 %val, i64* %ptr_regoff unordered, align 8
+; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3]
+
+ %ptr_unscaled = getelementptr i64* %p, i32 -32
+ store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8
+; CHECK: stur {{x[0-9]+}}, [x0, #-256]
+
+ %ptr_random = getelementptr i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+ store atomic i64 %val, i64* %ptr_random unordered, align 8
+; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
+; CHECK: str {{x[0-9]+}}, [x[[ADDR]]]
+
+ ret void
+}
+
+; rdar://11531169
+; rdar://11531308
+
+%"class.X::Atomic" = type { %struct.x_atomic_t }
+%struct.x_atomic_t = type { i32 }
+
+@counter = external hidden global %"class.X::Atomic", align 4
+
+define i32 @next_id() nounwind optsize ssp align 2 {
+entry:
+ %0 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst
+ %add.i = add i32 %0, 1
+ %tobool = icmp eq i32 %add.i, 0
+ br i1 %tobool, label %if.else, label %return
+
+if.else: ; preds = %entry
+ %1 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst
+ %add.i2 = add i32 %1, 1
+ br label %return
+
+return: ; preds = %if.else, %entry
+ %retval.0 = phi i32 [ %add.i2, %if.else ], [ %add.i, %entry ]
+ ret i32 %retval.0
+}
diff --git a/test/CodeGen/AArch64/arm64-basic-pic.ll b/test/CodeGen/AArch64/arm64-basic-pic.ll
new file mode 100644
index 000000000000..9fdb1e91385b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-basic-pic.ll
@@ -0,0 +1,54 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -relocation-model=pic %s -o - | FileCheck %s
+
+@var = global i32 0
+
+define i32 @get_globalvar() {
+; CHECK-LABEL: get_globalvar:
+
+ %val = load i32* @var
+; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
+; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], :got_lo12:var]
+; CHECK: ldr w0, [x[[GOTLOC]]]
+
+ ret i32 %val
+}
+
+define i32* @get_globalvaraddr() {
+; CHECK-LABEL: get_globalvaraddr:
+
+ %val = load i32* @var
+; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
+; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:var]
+
+ ret i32* @var
+}
+
+@hiddenvar = hidden global i32 0
+
+define i32 @get_hiddenvar() {
+; CHECK-LABEL: get_hiddenvar:
+
+ %val = load i32* @hiddenvar
+; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
+; CHECK: ldr w0, [x[[HI]], :lo12:hiddenvar]
+
+ ret i32 %val
+}
+
+define i32* @get_hiddenvaraddr() {
+; CHECK-LABEL: get_hiddenvaraddr:
+
+ %val = load i32* @hiddenvar
+; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
+; CHECK: add x0, [[HI]], :lo12:hiddenvar
+
+ ret i32* @hiddenvar
+}
+
+define void()* @get_func() {
+; CHECK-LABEL: get_func:
+
+ ret void()* bitcast(void()*()* @get_func to void()*)
+; CHECK: adrp x[[GOTHI:[0-9]+]], :got:get_func
+; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:get_func]
+}
diff --git a/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll b/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll
new file mode 100644
index 000000000000..f0e968b2c177
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll
@@ -0,0 +1,1101 @@
+; RUN: llc -mtriple arm64_be < %s -aarch64-load-store-opt=false -O1 -o - | FileCheck %s
+; RUN: llc -mtriple arm64_be < %s -aarch64-load-store-opt=false -O0 -fast-isel=true -o - | FileCheck %s
+
+; CHECK-LABEL: test_i64_f64:
+define void @test_i64_f64(double* %p, i64* %q) {
+; CHECK: ldr
+; CHECK: str
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = bitcast double %2 to i64
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v1i64:
+define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) {
+; CHECK: ldr
+; CHECK: str
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = bitcast <1 x i64> %2 to i64
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v2f32:
+define void @test_i64_v2f32(<2 x float>* %p, i64* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: str
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = bitcast <2 x float> %2 to i64
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v2i32:
+define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: str
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = bitcast <2 x i32> %2 to i64
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v4i16:
+define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4h }
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: str
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = bitcast <4 x i16> %2 to i64
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v8i8:
+define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8b }
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: str
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = bitcast <8 x i8> %2 to i64
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_i64:
+define void @test_f64_i64(i64* %p, double* %q) {
+; CHECK: ldr
+; CHECK: str
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = bitcast i64 %2 to double
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v1i64:
+define void @test_f64_v1i64(<1 x i64>* %p, double* %q) {
+; CHECK: ldr
+; CHECK: str
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = bitcast <1 x i64> %2 to double
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v2f32:
+define void @test_f64_v2f32(<2 x float>* %p, double* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: str
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = bitcast <2 x float> %2 to double
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v2i32:
+define void @test_f64_v2i32(<2 x i32>* %p, double* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: str
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = bitcast <2 x i32> %2 to double
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v4i16:
+define void @test_f64_v4i16(<4 x i16>* %p, double* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4h }
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: str
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = bitcast <4 x i16> %2 to double
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v8i8:
+define void @test_f64_v8i8(<8 x i8>* %p, double* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8b }
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: str
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = bitcast <8 x i8> %2 to double
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_i64:
+define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) {
+; CHECK: ldr
+; CHECK: str
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = bitcast i64 %2 to <1 x i64>
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_f64:
+define void @test_v1i64_f64(double* %p, <1 x i64>* %q) {
+; CHECK: ldr
+; CHECK: str
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = bitcast double %2 to <1 x i64>
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_v2f32:
+define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: str
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = bitcast <2 x float> %2 to <1 x i64>
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_v2i32:
+define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: str
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = bitcast <2 x i32> %2 to <1 x i64>
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_v4i16:
+define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4h }
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: str
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = bitcast <4 x i16> %2 to <1 x i64>
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_v8i8:
+define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8b }
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: str
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = bitcast <8 x i8> %2 to <1 x i64>
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_i64:
+define void @test_v2f32_i64(i64* %p, <2 x float>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = bitcast i64 %2 to <2 x float>
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_f64:
+define void @test_v2f32_f64(double* %p, <2 x float>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = bitcast double %2 to <2 x float>
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_v1i64:
+define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = bitcast <1 x i64> %2 to <2 x float>
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_v2i32:
+define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = bitcast <2 x i32> %2 to <2 x float>
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_v4i16:
+define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4h }
+; CHECK: rev32 v{{[0-9]+}}.4h
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = bitcast <4 x i16> %2 to <2 x float>
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_v8i8:
+define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8b }
+; CHECK: rev32 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = bitcast <8 x i8> %2 to <2 x float>
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_i64:
+define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = bitcast i64 %2 to <2 x i32>
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_f64:
+define void @test_v2i32_f64(double* %p, <2 x i32>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = bitcast double %2 to <2 x i32>
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_v1i64:
+define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = bitcast <1 x i64> %2 to <2 x i32>
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_v2f32:
+define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = bitcast <2 x float> %2 to <2 x i32>
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_v4i16:
+define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4h }
+; CHECK: rev32 v{{[0-9]+}}.4h
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = bitcast <4 x i16> %2 to <2 x i32>
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_v8i8:
+define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8b }
+; CHECK: rev32 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.2s }
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = bitcast <8 x i8> %2 to <2 x i32>
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_i64:
+define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: st1 { v{{[0-9]+}}.4h }
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = bitcast i64 %2 to <4 x i16>
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_f64:
+define void @test_v4i16_f64(double* %p, <4 x i16>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: st1 { v{{[0-9]+}}.4h }
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = bitcast double %2 to <4 x i16>
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_v1i64:
+define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: st1 { v{{[0-9]+}}.4h }
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = bitcast <1 x i64> %2 to <4 x i16>
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_v2f32:
+define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev32 v{{[0-9]+}}.4h
+; CHECK: st1 { v{{[0-9]+}}.4h }
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = bitcast <2 x float> %2 to <4 x i16>
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_v2i32:
+define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev32 v{{[0-9]+}}.4h
+; CHECK: st1 { v{{[0-9]+}}.4h }
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = bitcast <2 x i32> %2 to <4 x i16>
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_v8i8:
+define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8b }
+; CHECK: rev16 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.4h }
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = bitcast <8 x i8> %2 to <4 x i16>
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_i64:
+define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.8b }
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = bitcast i64 %2 to <8 x i8>
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_f64:
+define void @test_v8i8_f64(double* %p, <8 x i8>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.8b }
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = bitcast double %2 to <8 x i8>
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_v1i64:
+define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.8b }
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = bitcast <1 x i64> %2 to <8 x i8>
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_v2f32:
+define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev32 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.8b }
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = bitcast <2 x float> %2 to <8 x i8>
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_v2i32:
+define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2s }
+; CHECK: rev32 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.8b }
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = bitcast <2 x i32> %2 to <8 x i8>
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_v4i16:
+define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4h }
+; CHECK: rev16 v{{[0-9]+}}.8b
+; CHECK: st1 { v{{[0-9]+}}.8b }
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = bitcast <4 x i16> %2 to <8 x i8>
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v2f64:
+define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: ext
+; CHECK: str
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = bitcast <2 x double> %2 to fp128
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v2i64:
+define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: ext
+; CHECK: str
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = bitcast <2 x i64> %2 to fp128
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v4f32:
+define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: str q
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = bitcast <4 x float> %2 to fp128
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v4i32:
+define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4s }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: str
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = bitcast <4 x i32> %2 to fp128
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v8i16:
+define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8h }
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: str
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = bitcast <8 x i16> %2 to fp128
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v16i8:
+define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.16b }
+; CHECK: ext
+; CHECK: str q
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = bitcast <16 x i8> %2 to fp128
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_f128:
+define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) {
+; CHECK: ldr
+; CHECK: ext
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = bitcast fp128 %2 to <2 x double>
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v2i64:
+define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = bitcast <2 x i64> %2 to <2 x double>
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v4f32:
+define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = bitcast <4 x float> %2 to <2 x double>
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v4i32:
+define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4s }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = bitcast <4 x i32> %2 to <2 x double>
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v8i16:
+define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8h }
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = bitcast <8 x i16> %2 to <2 x double>
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v16i8:
+define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.16b }
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = bitcast <16 x i8> %2 to <2 x double>
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_f128:
+define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) {
+; CHECK: ldr
+; CHECK: ext
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = bitcast fp128 %2 to <2 x i64>
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v2f64:
+define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = bitcast <2 x double> %2 to <2 x i64>
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v4f32:
+define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = bitcast <4 x float> %2 to <2 x i64>
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v4i32:
+define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4s }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = bitcast <4 x i32> %2 to <2 x i64>
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v8i16:
+define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8h }
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = bitcast <8 x i16> %2 to <2 x i64>
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v16i8:
+define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.16b }
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = bitcast <16 x i8> %2 to <2 x i64>
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_f128:
+define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) {
+; CHECK: ldr q
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = bitcast fp128 %2 to <4 x float>
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v2f64:
+define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = bitcast <2 x double> %2 to <4 x float>
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v2i64:
+define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = bitcast <2 x i64> %2 to <4 x float>
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v4i32:
+define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4s }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = bitcast <4 x i32> %2 to <4 x float>
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v8i16:
+define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8h }
+; CHECK: rev32 v{{[0-9]+}}.8h
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = bitcast <8 x i16> %2 to <4 x float>
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v16i8:
+define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.16b }
+; CHECK: rev32 v{{[0-9]+}}.16b
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.2d }
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = bitcast <16 x i8> %2 to <4 x float>
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_f128:
+define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: st1 { v{{[0-9]+}}.4s }
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = bitcast fp128 %2 to <4 x i32>
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v2f64:
+define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.4s }
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = bitcast <2 x double> %2 to <4 x i32>
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v2i64:
+define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.4s }
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = bitcast <2 x i64> %2 to <4 x i32>
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v4f32:
+define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: st1 { v{{[0-9]+}}.4s }
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = bitcast <4 x float> %2 to <4 x i32>
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v8i16:
+define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8h }
+; CHECK: rev32 v{{[0-9]+}}.8h
+; CHECK: st1 { v{{[0-9]+}}.4s }
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = bitcast <8 x i16> %2 to <4 x i32>
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v16i8:
+define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.16b }
+; CHECK: rev32 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.4s }
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = bitcast <16 x i8> %2 to <4 x i32>
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_f128:
+define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) {
+; CHECK: ldr
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: st1 { v{{[0-9]+}}.8h }
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = bitcast fp128 %2 to <8 x i16>
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v2f64:
+define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: st1 { v{{[0-9]+}}.8h }
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = bitcast <2 x double> %2 to <8 x i16>
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v2i64:
+define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: st1 { v{{[0-9]+}}.8h }
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = bitcast <2 x i64> %2 to <8 x i16>
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v4f32:
+define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: rev32 v{{[0-9]+}}.8h
+; CHECK: st1 { v{{[0-9]+}}.8h }
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = bitcast <4 x float> %2 to <8 x i16>
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v4i32:
+define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4s }
+; CHECK: rev32 v{{[0-9]+}}.8h
+; CHECK: st1 { v{{[0-9]+}}.8h }
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = bitcast <4 x i32> %2 to <8 x i16>
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v16i8:
+define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.16b }
+; CHECK: rev16 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.8h }
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = bitcast <16 x i8> %2 to <8 x i16>
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_f128:
+define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) {
+; CHECK: ldr q
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: st1 { v{{[0-9]+}}.16b }
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = bitcast fp128 %2 to <16 x i8>
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v2f64:
+define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.16b }
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = bitcast <2 x double> %2 to <16 x i8>
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v2i64:
+define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.16b }
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = bitcast <2 x i64> %2 to <16 x i8>
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v4f32:
+define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.2d }
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: rev32 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.16b }
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = bitcast <4 x float> %2 to <16 x i8>
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v4i32:
+define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.4s }
+; CHECK: rev32 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.16b }
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = bitcast <4 x i32> %2 to <16 x i8>
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v8i16:
+define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) {
+; CHECK: ld1 { v{{[0-9]+}}.8h }
+; CHECK: rev16 v{{[0-9]+}}.16b
+; CHECK: st1 { v{{[0-9]+}}.16b }
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = bitcast <8 x i16> %2 to <16 x i8>
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-big-endian-eh.ll b/test/CodeGen/AArch64/arm64-big-endian-eh.ll
new file mode 100644
index 000000000000..93e7da98de21
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-big-endian-eh.ll
@@ -0,0 +1,73 @@
+; RUN: llc -mtriple arm64_be-linux-gnu -filetype obj < %s | llvm-objdump -s - | FileCheck %s
+
+; ARM EHABI for big endian
+; This test case checks whether CIE length record is laid out in big endian format.
+;
+; This is the LLVM assembly generated from following C++ code:
+;
+; extern void foo(int);
+; void test(int a, int b) {
+; try {
+; foo(a);
+; } catch (...) {
+; foo(b);
+; }
+;}
+
+define void @_Z4testii(i32 %a, i32 %b) #0 {
+entry:
+ invoke void @_Z3fooi(i32 %a)
+ to label %try.cont unwind label %lpad
+
+lpad: ; preds = %entry
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = tail call i8* @__cxa_begin_catch(i8* %1) #2
+ invoke void @_Z3fooi(i32 %b)
+ to label %invoke.cont2 unwind label %lpad1
+
+invoke.cont2: ; preds = %lpad
+ tail call void @__cxa_end_catch()
+ br label %try.cont
+
+try.cont: ; preds = %entry, %invoke.cont2
+ ret void
+
+lpad1: ; preds = %lpad
+ %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @__cxa_end_catch()
+ to label %eh.resume unwind label %terminate.lpad
+
+eh.resume: ; preds = %lpad1
+ resume { i8*, i32 } %3
+
+terminate.lpad: ; preds = %lpad1
+ %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %5 = extractvalue { i8*, i32 } %4, 0
+ tail call void @__clang_call_terminate(i8* %5) #3
+ unreachable
+}
+
+declare void @_Z3fooi(i32) #0
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+; Function Attrs: noinline noreturn nounwind
+define linkonce_odr hidden void @__clang_call_terminate(i8*) #1 {
+ %2 = tail call i8* @__cxa_begin_catch(i8* %0) #2
+ tail call void @_ZSt9terminatev() #3
+ unreachable
+}
+
+declare void @_ZSt9terminatev()
+
+; CHECK-LABEL: Contents of section .eh_frame:
+; CHECK-NEXT: 0000 0000001c
+
diff --git a/test/CodeGen/AArch64/arm64-big-endian-varargs.ll b/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
new file mode 100644
index 000000000000..d7b26b975231
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s | FileCheck %s
+
+; Vararg saving must save Q registers using the equivalent of STR/STP.
+
+target datalayout = "E-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "arm64_be-arm-none-eabi"
+
+%struct.__va_list = type { i8*, i8*, i8*, i32, i32 }
+
+declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_end(i8*) nounwind
+
+define double @callee(i32 %a, ...) {
+; CHECK: stp
+; CHECK: stp
+; CHECK: stp
+; CHECK: stp
+; CHECK: stp
+; CHECK: stp
+entry:
+ %vl = alloca %struct.__va_list, align 8
+ %vl1 = bitcast %struct.__va_list* %vl to i8*
+ call void @llvm.va_start(i8* %vl1)
+ %vr_offs_p = getelementptr inbounds %struct.__va_list* %vl, i64 0, i32 4
+ %vr_offs = load i32* %vr_offs_p, align 4
+ %0 = icmp sgt i32 %vr_offs, -1
+ br i1 %0, label %vaarg.on_stack, label %vaarg.maybe_reg
+
+vaarg.maybe_reg: ; preds = %entry
+ %new_reg_offs = add i32 %vr_offs, 16
+ store i32 %new_reg_offs, i32* %vr_offs_p, align 4
+ %inreg = icmp slt i32 %new_reg_offs, 1
+ br i1 %inreg, label %vaarg.in_reg, label %vaarg.on_stack
+
+vaarg.in_reg: ; preds = %vaarg.maybe_reg
+ %reg_top_p = getelementptr inbounds %struct.__va_list* %vl, i64 0, i32 2
+ %reg_top = load i8** %reg_top_p, align 8
+ %1 = sext i32 %vr_offs to i64
+ %2 = getelementptr i8* %reg_top, i64 %1
+ %3 = ptrtoint i8* %2 to i64
+ %align_be = add i64 %3, 8
+ %4 = inttoptr i64 %align_be to i8*
+ br label %vaarg.end
+
+vaarg.on_stack: ; preds = %vaarg.maybe_reg, %entry
+ %stack_p = getelementptr inbounds %struct.__va_list* %vl, i64 0, i32 0
+ %stack = load i8** %stack_p, align 8
+ %new_stack = getelementptr i8* %stack, i64 8
+ store i8* %new_stack, i8** %stack_p, align 8
+ br label %vaarg.end
+
+vaarg.end: ; preds = %vaarg.on_stack, %vaarg.in_reg
+ %.sink = phi i8* [ %4, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ]
+ %5 = bitcast i8* %.sink to double*
+ %6 = load double* %5, align 8
+ call void @llvm.va_end(i8* %vl1)
+ ret double %6
+}
diff --git a/test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll b/test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll
new file mode 100644
index 000000000000..1dcccf106a29
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll
@@ -0,0 +1,848 @@
+; RUN: llc -mtriple arm64_be < %s -aarch64-load-store-opt=false -o - | FileCheck %s
+; RUN: llc -mtriple arm64_be < %s -fast-isel=true -aarch64-load-store-opt=false -o - | FileCheck %s
+
+; CHECK-LABEL: test_i64_f64:
+define i64 @test_i64_f64(double %p) {
+; CHECK-NOT: rev
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+}
+
+; CHECK-LABEL: test_i64_v1i64:
+define i64 @test_i64_v1i64(<1 x i64> %p) {
+; CHECK-NOT: rev
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+}
+
+; CHECK-LABEL: test_i64_v2f32:
+define i64 @test_i64_v2f32(<2 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+}
+
+; CHECK-LABEL: test_i64_v2i32:
+define i64 @test_i64_v2i32(<2 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+}
+
+; CHECK-LABEL: test_i64_v4i16:
+define i64 @test_i64_v4i16(<4 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+}
+
+; CHECK-LABEL: test_i64_v8i8:
+define i64 @test_i64_v8i8(<8 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+}
+
+; CHECK-LABEL: test_f64_i64:
+define double @test_f64_i64(i64 %p) {
+; CHECK-NOT: rev
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+}
+
+; CHECK-LABEL: test_f64_v1i64:
+define double @test_f64_v1i64(<1 x i64> %p) {
+; CHECK-NOT: rev
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+}
+
+; CHECK-LABEL: test_f64_v2f32:
+define double @test_f64_v2f32(<2 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+}
+
+; CHECK-LABEL: test_f64_v2i32:
+define double @test_f64_v2i32(<2 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+}
+
+; CHECK-LABEL: test_f64_v4i16:
+define double @test_f64_v4i16(<4 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+}
+
+; CHECK-LABEL: test_f64_v8i8:
+define double @test_f64_v8i8(<8 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+}
+
+; CHECK-LABEL: test_v1i64_i64:
+define <1 x i64> @test_v1i64_i64(i64 %p) {
+; CHECK-NOT: rev
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+}
+
+; CHECK-LABEL: test_v1i64_f64:
+define <1 x i64> @test_v1i64_f64(double %p) {
+; CHECK-NOT: rev
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+}
+
+; CHECK-LABEL: test_v1i64_v2f32:
+define <1 x i64> @test_v1i64_v2f32(<2 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+}
+
+; CHECK-LABEL: test_v1i64_v2i32:
+define <1 x i64> @test_v1i64_v2i32(<2 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+}
+
+; CHECK-LABEL: test_v1i64_v4i16:
+define <1 x i64> @test_v1i64_v4i16(<4 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+}
+
+; CHECK-LABEL: test_v1i64_v8i8:
+define <1 x i64> @test_v1i64_v8i8(<8 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+}
+
+; CHECK-LABEL: test_v2f32_i64:
+define <2 x float> @test_v2f32_i64(i64 %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+}
+
+; CHECK-LABEL: test_v2f32_f64:
+define <2 x float> @test_v2f32_f64(double %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+}
+
+; CHECK-LABEL: test_v2f32_v1i64:
+define <2 x float> @test_v2f32_v1i64(<1 x i64> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+}
+
+; CHECK-LABEL: test_v2f32_v2i32:
+define <2 x float> @test_v2f32_v2i32(<2 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+}
+
+; CHECK-LABEL: test_v2f32_v4i16:
+define <2 x float> @test_v2f32_v4i16(<4 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+}
+
+; CHECK-LABEL: test_v2f32_v8i8:
+define <2 x float> @test_v2f32_v8i8(<8 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+}
+
+; CHECK-LABEL: test_v2i32_i64:
+define <2 x i32> @test_v2i32_i64(i64 %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+}
+
+; CHECK-LABEL: test_v2i32_f64:
+define <2 x i32> @test_v2i32_f64(double %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+}
+
+; CHECK-LABEL: test_v2i32_v1i64:
+define <2 x i32> @test_v2i32_v1i64(<1 x i64> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+}
+
+; CHECK-LABEL: test_v2i32_v2f32:
+define <2 x i32> @test_v2i32_v2f32(<2 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+}
+
+; CHECK-LABEL: test_v2i32_v4i16:
+define <2 x i32> @test_v2i32_v4i16(<4 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+}
+
+; CHECK-LABEL: test_v2i32_v8i8:
+define <2 x i32> @test_v2i32_v8i8(<8 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+}
+
+; CHECK-LABEL: test_v4i16_i64:
+define <4 x i16> @test_v4i16_i64(i64 %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+}
+
+; CHECK-LABEL: test_v4i16_f64:
+define <4 x i16> @test_v4i16_f64(double %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+}
+
+; CHECK-LABEL: test_v4i16_v1i64:
+define <4 x i16> @test_v4i16_v1i64(<1 x i64> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+}
+
+; CHECK-LABEL: test_v4i16_v2f32:
+define <4 x i16> @test_v4i16_v2f32(<2 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+}
+
+; CHECK-LABEL: test_v4i16_v2i32:
+define <4 x i16> @test_v4i16_v2i32(<2 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+}
+
+; CHECK-LABEL: test_v4i16_v8i8:
+define <4 x i16> @test_v4i16_v8i8(<8 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+}
+
+; CHECK-LABEL: test_v8i8_i64:
+define <8 x i8> @test_v8i8_i64(i64 %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+}
+
+; CHECK-LABEL: test_v8i8_f64:
+define <8 x i8> @test_v8i8_f64(double %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+}
+
+; CHECK-LABEL: test_v8i8_v1i64:
+define <8 x i8> @test_v8i8_v1i64(<1 x i64> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+}
+
+; CHECK-LABEL: test_v8i8_v2f32:
+define <8 x i8> @test_v8i8_v2f32(<2 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+}
+
+; CHECK-LABEL: test_v8i8_v2i32:
+define <8 x i8> @test_v8i8_v2i32(<2 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+}
+
+; CHECK-LABEL: test_v8i8_v4i16:
+define <8 x i8> @test_v8i8_v4i16(<4 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+}
+
+; CHECK-LABEL: test_f128_v2f64:
+define fp128 @test_f128_v2f64(<2 x double> %p) {
+; CHECK: ext
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+}
+
+; CHECK-LABEL: test_f128_v2i64:
+define fp128 @test_f128_v2i64(<2 x i64> %p) {
+; CHECK: ext
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+}
+
+; CHECK-LABEL: test_f128_v4f32:
+define fp128 @test_f128_v4f32(<4 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+}
+
+; CHECK-LABEL: test_f128_v4i32:
+define fp128 @test_f128_v4i32(<4 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+}
+
+; CHECK-LABEL: test_f128_v8i16:
+define fp128 @test_f128_v8i16(<8 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+}
+
+; CHECK-LABEL: test_f128_v16i8:
+define fp128 @test_f128_v16i8(<16 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+}
+
+; CHECK-LABEL: test_v2f64_f128:
+define <2 x double> @test_v2f64_f128(fp128 %p) {
+; CHECK: ext
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_v2f64_v2i64:
+define <2 x double> @test_v2f64_v2i64(<2 x i64> %p) {
+; CHECK: ext
+; CHECK: ext
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_v2f64_v4f32:
+define <2 x double> @test_v2f64_v4f32(<4 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: ext
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_v2f64_v4i32:
+define <2 x double> @test_v2f64_v4i32(<4 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: ext
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_v2f64_v8i16:
+define <2 x double> @test_v2f64_v8i16(<8 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: ext
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_v2f64_v16i8:
+define <2 x double> @test_v2f64_v16i8(<16 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: ext
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_v2i64_f128:
+define <2 x i64> @test_v2i64_f128(fp128 %p) {
+; CHECK: ext
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+}
+
+; CHECK-LABEL: test_v2i64_v2f64:
+define <2 x i64> @test_v2i64_v2f64(<2 x double> %p) {
+; CHECK: ext
+; CHECK: ext
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+}
+
+; CHECK-LABEL: test_v2i64_v4f32:
+define <2 x i64> @test_v2i64_v4f32(<4 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: ext
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+}
+
+; CHECK-LABEL: test_v2i64_v4i32:
+define <2 x i64> @test_v2i64_v4i32(<4 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: ext
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+}
+
+; CHECK-LABEL: test_v2i64_v8i16:
+define <2 x i64> @test_v2i64_v8i16(<8 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: ext
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+}
+
+; CHECK-LABEL: test_v2i64_v16i8:
+define <2 x i64> @test_v2i64_v16i8(<16 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: ext
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+}
+
+; CHECK-LABEL: test_v4f32_f128:
+define <4 x float> @test_v4f32_f128(fp128 %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_v4f32_v2f64:
+define <4 x float> @test_v4f32_v2f64(<2 x double> %p) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_v4f32_v2i64:
+define <4 x float> @test_v4f32_v2i64(<2 x i64> %p) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_v4f32_v4i32:
+define <4 x float> @test_v4f32_v4i32(<4 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_v4f32_v8i16:
+define <4 x float> @test_v4f32_v8i16(<8 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_v4f32_v16i8:
+define <4 x float> @test_v4f32_v16i8(<16 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_v4i32_f128:
+define <4 x i32> @test_v4i32_f128(fp128 %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+}
+
+; CHECK-LABEL: test_v4i32_v2f64:
+define <4 x i32> @test_v4i32_v2f64(<2 x double> %p) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+}
+
+; CHECK-LABEL: test_v4i32_v2i64:
+define <4 x i32> @test_v4i32_v2i64(<2 x i64> %p) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+}
+
+; CHECK-LABEL: test_v4i32_v4f32:
+define <4 x i32> @test_v4i32_v4f32(<4 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+}
+
+; CHECK-LABEL: test_v4i32_v8i16:
+define <4 x i32> @test_v4i32_v8i16(<8 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+}
+
+; CHECK-LABEL: test_v4i32_v16i8:
+define <4 x i32> @test_v4i32_v16i8(<16 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+}
+
+; CHECK-LABEL: test_v8i16_f128:
+define <8 x i16> @test_v8i16_f128(fp128 %p) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+; CHECK-LABEL: test_v8i16_v2f64:
+define <8 x i16> @test_v8i16_v2f64(<2 x double> %p) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+; CHECK-LABEL: test_v8i16_v2i64:
+define <8 x i16> @test_v8i16_v2i64(<2 x i64> %p) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+; CHECK-LABEL: test_v8i16_v4f32:
+define <8 x i16> @test_v8i16_v4f32(<4 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+; CHECK-LABEL: test_v8i16_v4i32:
+define <8 x i16> @test_v8i16_v4i32(<4 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+; CHECK-LABEL: test_v8i16_v16i8:
+define <8 x i16> @test_v8i16_v16i8(<16 x i8> %p) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+; CHECK-LABEL: test_v16i8_f128:
+define <16 x i8> @test_v16i8_f128(fp128 %p) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+}
+
+; CHECK-LABEL: test_v16i8_v2f64:
+define <16 x i8> @test_v16i8_v2f64(<2 x double> %p) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+}
+
+; CHECK-LABEL: test_v16i8_v2i64:
+define <16 x i8> @test_v16i8_v2i64(<2 x i64> %p) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+}
+
+; CHECK-LABEL: test_v16i8_v4f32:
+define <16 x i8> @test_v16i8_v4f32(<4 x float> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+}
+
+; CHECK-LABEL: test_v16i8_v4i32:
+define <16 x i8> @test_v16i8_v4i32(<4 x i32> %p) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+}
+
+; CHECK-LABEL: test_v16i8_v8i16:
+define <16 x i8> @test_v16i8_v8i16(<8 x i16> %p) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+}
diff --git a/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll b/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
new file mode 100644
index 000000000000..9a12b7a01153
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
@@ -0,0 +1,1100 @@
+; RUN: llc -mtriple arm64_be < %s -aarch64-load-store-opt=false -o - | FileCheck %s
+; RUN: llc -mtriple arm64_be < %s -aarch64-load-store-opt=false -fast-isel=true -O0 -o - | FileCheck %s
+
+; CHECK-LABEL: test_i64_f64:
+declare i64 @test_i64_f64_helper(double %p)
+define void @test_i64_f64(double* %p, i64* %q) {
+; CHECK-NOT: rev
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call i64 @test_i64_f64_helper(double %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v1i64:
+declare i64 @test_i64_v1i64_helper(<1 x i64> %p)
+define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) {
+; CHECK-NOT: rev
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call i64 @test_i64_v1i64_helper(<1 x i64> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v2f32:
+declare i64 @test_i64_v2f32_helper(<2 x float> %p)
+define void @test_i64_v2f32(<2 x float>* %p, i64* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call i64 @test_i64_v2f32_helper(<2 x float> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v2i32:
+declare i64 @test_i64_v2i32_helper(<2 x i32> %p)
+define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call i64 @test_i64_v2i32_helper(<2 x i32> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v4i16:
+declare i64 @test_i64_v4i16_helper(<4 x i16> %p)
+define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call i64 @test_i64_v4i16_helper(<4 x i16> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_i64_v8i8:
+declare i64 @test_i64_v8i8_helper(<8 x i8> %p)
+define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call i64 @test_i64_v8i8_helper(<8 x i8> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_i64:
+declare double @test_f64_i64_helper(i64 %p)
+define void @test_f64_i64(i64* %p, double* %q) {
+; CHECK-NOT: rev
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call double @test_f64_i64_helper(i64 %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v1i64:
+declare double @test_f64_v1i64_helper(<1 x i64> %p)
+define void @test_f64_v1i64(<1 x i64>* %p, double* %q) {
+; CHECK-NOT: rev
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call double @test_f64_v1i64_helper(<1 x i64> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v2f32:
+declare double @test_f64_v2f32_helper(<2 x float> %p)
+define void @test_f64_v2f32(<2 x float>* %p, double* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call double @test_f64_v2f32_helper(<2 x float> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v2i32:
+declare double @test_f64_v2i32_helper(<2 x i32> %p)
+define void @test_f64_v2i32(<2 x i32>* %p, double* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call double @test_f64_v2i32_helper(<2 x i32> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v4i16:
+declare double @test_f64_v4i16_helper(<4 x i16> %p)
+define void @test_f64_v4i16(<4 x i16>* %p, double* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call double @test_f64_v4i16_helper(<4 x i16> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f64_v8i8:
+declare double @test_f64_v8i8_helper(<8 x i8> %p)
+define void @test_f64_v8i8(<8 x i8>* %p, double* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call double @test_f64_v8i8_helper(<8 x i8> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_i64:
+declare <1 x i64> @test_v1i64_i64_helper(i64 %p)
+define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) {
+; CHECK-NOT: rev
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <1 x i64> @test_v1i64_i64_helper(i64 %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_f64:
+declare <1 x i64> @test_v1i64_f64_helper(double %p)
+define void @test_v1i64_f64(double* %p, <1 x i64>* %q) {
+; CHECK-NOT: rev
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <1 x i64> @test_v1i64_f64_helper(double %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_v2f32:
+declare <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %p)
+define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_v2i32:
+declare <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %p)
+define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_v4i16:
+declare <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %p)
+define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v1i64_v8i8:
+declare <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %p)
+define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_i64:
+declare <2 x float> @test_v2f32_i64_helper(i64 %p)
+define void @test_v2f32_i64(i64* %p, <2 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <2 x float> @test_v2f32_i64_helper(i64 %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_f64:
+declare <2 x float> @test_v2f32_f64_helper(double %p)
+define void @test_v2f32_f64(double* %p, <2 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <2 x float> @test_v2f32_f64_helper(double %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_v1i64:
+declare <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %p)
+define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_v2i32:
+declare <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %p)
+define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_v4i16:
+declare <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %p)
+define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f32_v8i8:
+declare <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %p)
+define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_i64:
+declare <2 x i32> @test_v2i32_i64_helper(i64 %p)
+define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <2 x i32> @test_v2i32_i64_helper(i64 %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_f64:
+declare <2 x i32> @test_v2i32_f64_helper(double %p)
+define void @test_v2i32_f64(double* %p, <2 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <2 x i32> @test_v2i32_f64_helper(double %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_v1i64:
+declare <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %p)
+define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_v2f32:
+declare <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %p)
+define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_v4i16:
+declare <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %p)
+define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i32_v8i8:
+declare <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %p)
+define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: rev64 v{{[0-9]+}}.2s
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_i64:
+declare <4 x i16> @test_v4i16_i64_helper(i64 %p)
+define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <4 x i16> @test_v4i16_i64_helper(i64 %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_f64:
+declare <4 x i16> @test_v4i16_f64_helper(double %p)
+define void @test_v4i16_f64(double* %p, <4 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <4 x i16> @test_v4i16_f64_helper(double %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_v1i64:
+declare <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %p)
+define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_v2f32:
+declare <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %p)
+define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_v2i32:
+declare <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %p)
+define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i16_v8i8:
+declare <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %p)
+define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+; CHECK: rev64 v{{[0-9]+}}.4h
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_i64:
+declare <8 x i8> @test_v8i8_i64_helper(i64 %p)
+define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <8 x i8> @test_v8i8_i64_helper(i64 %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_f64:
+declare <8 x i8> @test_v8i8_f64_helper(double %p)
+define void @test_v8i8_f64(double* %p, <8 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <8 x i8> @test_v8i8_f64_helper(double %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_v1i64:
+declare <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %p)
+define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_v2f32:
+declare <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %p)
+define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_v2i32:
+declare <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %p)
+define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.2s
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i8_v4i16:
+declare <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %p)
+define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4h
+; CHECK: rev64 v{{[0-9]+}}.8b
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v2f64:
+declare fp128 @test_f128_v2f64_helper(<2 x double> %p)
+define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) {
+; CHECK: ext
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call fp128 @test_f128_v2f64_helper(<2 x double> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v2i64:
+declare fp128 @test_f128_v2i64_helper(<2 x i64> %p)
+define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) {
+; CHECK: ext
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call fp128 @test_f128_v2i64_helper(<2 x i64> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v4f32:
+declare fp128 @test_f128_v4f32_helper(<4 x float> %p)
+define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call fp128 @test_f128_v4f32_helper(<4 x float> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v4i32:
+declare fp128 @test_f128_v4i32_helper(<4 x i32> %p)
+define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call fp128 @test_f128_v4i32_helper(<4 x i32> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v8i16:
+declare fp128 @test_f128_v8i16_helper(<8 x i16> %p)
+define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call fp128 @test_f128_v8i16_helper(<8 x i16> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_f128_v16i8:
+declare fp128 @test_f128_v16i8_helper(<16 x i8> %p)
+define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call fp128 @test_f128_v16i8_helper(<16 x i8> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_f128:
+declare <2 x double> @test_v2f64_f128_helper(fp128 %p)
+define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) {
+; CHECK: ext
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <2 x double> @test_v2f64_f128_helper(fp128 %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v2i64:
+declare <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %p)
+define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) {
+; CHECK: ext
+; CHECK: ext
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v4f32:
+declare <2 x double> @test_v2f64_v4f32_helper(<4 x float> %p)
+define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: ext
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <2 x double> @test_v2f64_v4f32_helper(<4 x float> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v4i32:
+declare <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %p)
+define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: ext
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v8i16:
+declare <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %p)
+define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: ext
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2f64_v16i8:
+declare <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %p)
+define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: ext
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_f128:
+declare <2 x i64> @test_v2i64_f128_helper(fp128 %p)
+define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) {
+; CHECK: ext
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <2 x i64> @test_v2i64_f128_helper(fp128 %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v2f64:
+declare <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %p)
+define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) {
+; CHECK: ext
+; CHECK: ext
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v4f32:
+declare <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %p)
+define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: ext
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v4i32:
+declare <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %p)
+define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: ext
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v8i16:
+declare <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %p)
+define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: ext
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v2i64_v16i8:
+declare <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %p)
+define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: ext
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_f128:
+declare <4 x float> @test_v4f32_f128_helper(fp128 %p)
+define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <4 x float> @test_v4f32_f128_helper(fp128 %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v2f64:
+declare <4 x float> @test_v4f32_v2f64_helper(<2 x double> %p)
+define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <4 x float> @test_v4f32_v2f64_helper(<2 x double> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v2i64:
+declare <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %p)
+define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v4i32:
+declare <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %p)
+define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v8i16:
+declare <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %p)
+define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4f32_v16i8:
+declare <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %p)
+define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_f128:
+declare <4 x i32> @test_v4i32_f128_helper(fp128 %p)
+define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <4 x i32> @test_v4i32_f128_helper(fp128 %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v2f64:
+declare <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %p)
+define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v2i64:
+declare <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %p)
+define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v4f32:
+declare <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %p)
+define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v8i16:
+declare <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %p)
+define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v4i32_v16i8:
+declare <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %p)
+define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_f128:
+declare <8 x i16> @test_v8i16_f128_helper(fp128 %p)
+define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <8 x i16> @test_v8i16_f128_helper(fp128 %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v2f64:
+declare <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %p)
+define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v2i64:
+declare <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %p)
+define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v4f32:
+declare <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %p)
+define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v4i32:
+declare <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %p)
+define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v8i16_v16i8:
+declare <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %p)
+define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_f128:
+declare <16 x i8> @test_v16i8_f128_helper(fp128 %p)
+define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <16 x i8> @test_v16i8_f128_helper(fp128 %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v2f64:
+declare <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %p)
+define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v2i64:
+declare <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %p)
+define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) {
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v4f32:
+declare <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %p)
+define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v4i32:
+declare <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %p)
+define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.4s
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
+
+; CHECK-LABEL: test_v16i8_v8i16:
+declare <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %p)
+define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) {
+; CHECK: rev64 v{{[0-9]+}}.8h
+; CHECK: ext
+; CHECK: rev64 v{{[0-9]+}}.16b
+; CHECK: ext
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-big-imm-offsets.ll b/test/CodeGen/AArch64/arm64-big-imm-offsets.ll
new file mode 100644
index 000000000000..a56df07a49ac
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-big-imm-offsets.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=arm64 < %s
+
+
+; Make sure large offsets aren't mistaken for valid immediate offsets.
+; <rdar://problem/13190511>
+define void @f(i32* nocapture %p) {
+entry:
+ %a = ptrtoint i32* %p to i64
+ %ao = add i64 %a, 25769803792
+ %b = inttoptr i64 %ao to i32*
+ store volatile i32 0, i32* %b, align 4
+ store volatile i32 0, i32* %b, align 4
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-big-stack.ll b/test/CodeGen/AArch64/arm64-big-stack.ll
new file mode 100644
index 000000000000..3f91bb3c2482
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-big-stack.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s | FileCheck %s
+target triple = "arm64-apple-macosx10"
+
+; Check that big stacks are generated correctly.
+; Currently, this is done by a sequence of sub instructions,
+; which can encode immediate with a 12 bits mask an optionally
+; shift left (up to 12). I.e., 16773120 is the biggest value.
+; <rdar://12513931>
+; CHECK-LABEL: foo:
+; CHECK: sub sp, sp, #4095, lsl #12
+; CHECK: sub sp, sp, #4095, lsl #12
+; CHECK: sub sp, sp, #2, lsl #12
+define void @foo() nounwind ssp {
+entry:
+ %buffer = alloca [33554432 x i8], align 1
+ %arraydecay = getelementptr inbounds [33554432 x i8]* %buffer, i64 0, i64 0
+ call void @doit(i8* %arraydecay) nounwind
+ ret void
+}
+
+declare void @doit(i8*)
diff --git a/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/test/CodeGen/AArch64/arm64-bitfield-extract.ll
new file mode 100644
index 000000000000..112efddd4fad
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-bitfield-extract.ll
@@ -0,0 +1,532 @@
+; RUN: opt -codegenprepare -mtriple=arm64-apple=ios -S -o - %s | FileCheck --check-prefix=OPT %s
+; RUN: llc < %s -march=arm64 | FileCheck %s
+%struct.X = type { i8, i8, [2 x i8] }
+%struct.Y = type { i32, i8 }
+%struct.Z = type { i8, i8, [2 x i8], i16 }
+%struct.A = type { i64, i8 }
+
+define void @foo(%struct.X* nocapture %x, %struct.Y* nocapture %y) nounwind optsize ssp {
+; CHECK-LABEL: foo:
+; CHECK: ubfx
+; CHECK-NOT: and
+; CHECK: ret
+
+ %tmp = bitcast %struct.X* %x to i32*
+ %tmp1 = load i32* %tmp, align 4
+ %b = getelementptr inbounds %struct.Y* %y, i64 0, i32 1
+ %bf.clear = lshr i32 %tmp1, 3
+ %bf.clear.lobit = and i32 %bf.clear, 1
+ %frombool = trunc i32 %bf.clear.lobit to i8
+ store i8 %frombool, i8* %b, align 1
+ ret void
+}
+
+define i32 @baz(i64 %cav1.coerce) nounwind {
+; CHECK-LABEL: baz:
+; CHECK: sbfx w0, w0, #0, #4
+ %tmp = trunc i64 %cav1.coerce to i32
+ %tmp1 = shl i32 %tmp, 28
+ %bf.val.sext = ashr exact i32 %tmp1, 28
+ ret i32 %bf.val.sext
+}
+
+define i32 @bar(i64 %cav1.coerce) nounwind {
+; CHECK-LABEL: bar:
+; CHECK: sbfx w0, w0, #4, #6
+ %tmp = trunc i64 %cav1.coerce to i32
+ %cav1.sroa.0.1.insert = shl i32 %tmp, 22
+ %tmp1 = ashr i32 %cav1.sroa.0.1.insert, 26
+ ret i32 %tmp1
+}
+
+define void @fct1(%struct.Z* nocapture %x, %struct.A* nocapture %y) nounwind optsize ssp {
+; CHECK-LABEL: fct1:
+; CHECK: ubfx
+; CHECK-NOT: and
+; CHECK: ret
+
+ %tmp = bitcast %struct.Z* %x to i64*
+ %tmp1 = load i64* %tmp, align 4
+ %b = getelementptr inbounds %struct.A* %y, i64 0, i32 0
+ %bf.clear = lshr i64 %tmp1, 3
+ %bf.clear.lobit = and i64 %bf.clear, 1
+ store i64 %bf.clear.lobit, i64* %b, align 8
+ ret void
+}
+
+define i64 @fct2(i64 %cav1.coerce) nounwind {
+; CHECK-LABEL: fct2:
+; CHECK: sbfx x0, x0, #0, #36
+ %tmp = shl i64 %cav1.coerce, 28
+ %bf.val.sext = ashr exact i64 %tmp, 28
+ ret i64 %bf.val.sext
+}
+
+define i64 @fct3(i64 %cav1.coerce) nounwind {
+; CHECK-LABEL: fct3:
+; CHECK: sbfx x0, x0, #4, #38
+ %cav1.sroa.0.1.insert = shl i64 %cav1.coerce, 22
+ %tmp1 = ashr i64 %cav1.sroa.0.1.insert, 26
+ ret i64 %tmp1
+}
+
+define void @fct4(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct4:
+; CHECK: ldr [[REG1:x[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], x1, #16, #24
+; CHECK-NEXT: str [[REG1]],
+; CHECK-NEXT: ret
+ %0 = load i64* %y, align 8
+ %and = and i64 %0, -16777216
+ %shr = lshr i64 %x, 16
+ %and1 = and i64 %shr, 16777215
+ %or = or i64 %and, %and1
+ store i64 %or, i64* %y, align 8
+ ret void
+}
+
+define void @fct5(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct5:
+; CHECK: ldr [[REG1:w[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], w1, #16, #3
+; CHECK-NEXT: str [[REG1]],
+; CHECK-NEXT: ret
+ %0 = load i32* %y, align 8
+ %and = and i32 %0, -8
+ %shr = lshr i32 %x, 16
+ %and1 = and i32 %shr, 7
+ %or = or i32 %and, %and1
+ store i32 %or, i32* %y, align 8
+ ret void
+}
+
+; Check if we can still catch bfm instruction when we drop some low bits
+define void @fct6(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct6:
+; CHECK: ldr [[REG1:w[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], w1, #16, #3
+; lsr is an alias of ubfm
+; CHECK-NEXT: lsr [[REG2:w[0-9]+]], [[REG1]], #2
+; CHECK-NEXT: str [[REG2]],
+; CHECK-NEXT: ret
+ %0 = load i32* %y, align 8
+ %and = and i32 %0, -8
+ %shr = lshr i32 %x, 16
+ %and1 = and i32 %shr, 7
+ %or = or i32 %and, %and1
+ %shr1 = lshr i32 %or, 2
+ store i32 %shr1, i32* %y, align 8
+ ret void
+}
+
+
+; Check if we can still catch bfm instruction when we drop some high bits
+define void @fct7(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct7:
+; CHECK: ldr [[REG1:w[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], w1, #16, #3
+; lsl is an alias of ubfm
+; CHECK-NEXT: lsl [[REG2:w[0-9]+]], [[REG1]], #2
+; CHECK-NEXT: str [[REG2]],
+; CHECK-NEXT: ret
+ %0 = load i32* %y, align 8
+ %and = and i32 %0, -8
+ %shr = lshr i32 %x, 16
+ %and1 = and i32 %shr, 7
+ %or = or i32 %and, %and1
+ %shl = shl i32 %or, 2
+ store i32 %shl, i32* %y, align 8
+ ret void
+}
+
+
+; Check if we can still catch bfm instruction when we drop some low bits
+; (i64 version)
+define void @fct8(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct8:
+; CHECK: ldr [[REG1:x[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], x1, #16, #3
+; lsr is an alias of ubfm
+; CHECK-NEXT: lsr [[REG2:x[0-9]+]], [[REG1]], #2
+; CHECK-NEXT: str [[REG2]],
+; CHECK-NEXT: ret
+ %0 = load i64* %y, align 8
+ %and = and i64 %0, -8
+ %shr = lshr i64 %x, 16
+ %and1 = and i64 %shr, 7
+ %or = or i64 %and, %and1
+ %shr1 = lshr i64 %or, 2
+ store i64 %shr1, i64* %y, align 8
+ ret void
+}
+
+
+; Check if we can still catch bfm instruction when we drop some high bits
+; (i64 version)
+define void @fct9(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct9:
+; CHECK: ldr [[REG1:x[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], x1, #16, #3
+; lsr is an alias of ubfm
+; CHECK-NEXT: lsl [[REG2:x[0-9]+]], [[REG1]], #2
+; CHECK-NEXT: str [[REG2]],
+; CHECK-NEXT: ret
+ %0 = load i64* %y, align 8
+ %and = and i64 %0, -8
+ %shr = lshr i64 %x, 16
+ %and1 = and i64 %shr, 7
+ %or = or i64 %and, %and1
+ %shl = shl i64 %or, 2
+ store i64 %shl, i64* %y, align 8
+ ret void
+}
+
+; Check if we can catch bfm instruction when lsb is 0 (i.e., no lshr)
+; (i32 version)
+define void @fct10(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct10:
+; CHECK: ldr [[REG1:w[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], w1, #0, #3
+; lsl is an alias of ubfm
+; CHECK-NEXT: lsl [[REG2:w[0-9]+]], [[REG1]], #2
+; CHECK-NEXT: str [[REG2]],
+; CHECK-NEXT: ret
+ %0 = load i32* %y, align 8
+ %and = and i32 %0, -8
+ %and1 = and i32 %x, 7
+ %or = or i32 %and, %and1
+ %shl = shl i32 %or, 2
+ store i32 %shl, i32* %y, align 8
+ ret void
+}
+
+; Check if we can catch bfm instruction when lsb is 0 (i.e., no lshr)
+; (i64 version)
+define void @fct11(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct11:
+; CHECK: ldr [[REG1:x[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], x1, #0, #3
+; lsl is an alias of ubfm
+; CHECK-NEXT: lsl [[REG2:x[0-9]+]], [[REG1]], #2
+; CHECK-NEXT: str [[REG2]],
+; CHECK-NEXT: ret
+ %0 = load i64* %y, align 8
+ %and = and i64 %0, -8
+ %and1 = and i64 %x, 7
+ %or = or i64 %and, %and1
+ %shl = shl i64 %or, 2
+ store i64 %shl, i64* %y, align 8
+ ret void
+}
+
+define zeroext i1 @fct12bis(i32 %tmp2) unnamed_addr nounwind ssp align 2 {
+; CHECK-LABEL: fct12bis:
+; CHECK-NOT: and
+; CHECK: ubfx w0, w0, #11, #1
+ %and.i.i = and i32 %tmp2, 2048
+ %tobool.i.i = icmp ne i32 %and.i.i, 0
+ ret i1 %tobool.i.i
+}
+
+; Check if we can still catch bfm instruction when we drop some high bits
+; and some low bits
+define void @fct12(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct12:
+; CHECK: ldr [[REG1:w[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], w1, #16, #3
+; lsr is an alias of ubfm
+; CHECK-NEXT: ubfx [[REG2:w[0-9]+]], [[REG1]], #2, #28
+; CHECK-NEXT: str [[REG2]],
+; CHECK-NEXT: ret
+ %0 = load i32* %y, align 8
+ %and = and i32 %0, -8
+ %shr = lshr i32 %x, 16
+ %and1 = and i32 %shr, 7
+ %or = or i32 %and, %and1
+ %shl = shl i32 %or, 2
+ %shr2 = lshr i32 %shl, 4
+ store i32 %shr2, i32* %y, align 8
+ ret void
+}
+
+; Check if we can still catch bfm instruction when we drop some high bits
+; and some low bits
+; (i64 version)
+define void @fct13(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct13:
+; CHECK: ldr [[REG1:x[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], x1, #16, #3
+; lsr is an alias of ubfm
+; CHECK-NEXT: ubfx [[REG2:x[0-9]+]], [[REG1]], #2, #60
+; CHECK-NEXT: str [[REG2]],
+; CHECK-NEXT: ret
+ %0 = load i64* %y, align 8
+ %and = and i64 %0, -8
+ %shr = lshr i64 %x, 16
+ %and1 = and i64 %shr, 7
+ %or = or i64 %and, %and1
+ %shl = shl i64 %or, 2
+ %shr2 = lshr i64 %shl, 4
+ store i64 %shr2, i64* %y, align 8
+ ret void
+}
+
+
+; Check if we can still catch bfm instruction when we drop some high bits
+; and some low bits
+define void @fct14(i32* nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct14:
+; CHECK: ldr [[REG1:w[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], w1, #16, #8
+; lsr is an alias of ubfm
+; CHECK-NEXT: lsr [[REG2:w[0-9]+]], [[REG1]], #4
+; CHECK-NEXT: bfxil [[REG2]], w2, #5, #3
+; lsl is an alias of ubfm
+; CHECK-NEXT: lsl [[REG3:w[0-9]+]], [[REG2]], #2
+; CHECK-NEXT: str [[REG3]],
+; CHECK-NEXT: ret
+ %0 = load i32* %y, align 8
+ %and = and i32 %0, -256
+ %shr = lshr i32 %x, 16
+ %and1 = and i32 %shr, 255
+ %or = or i32 %and, %and1
+ %shl = lshr i32 %or, 4
+ %and2 = and i32 %shl, -8
+ %shr1 = lshr i32 %x1, 5
+ %and3 = and i32 %shr1, 7
+ %or1 = or i32 %and2, %and3
+ %shl1 = shl i32 %or1, 2
+ store i32 %shl1, i32* %y, align 8
+ ret void
+}
+
+; Check if we can still catch bfm instruction when we drop some high bits
+; and some low bits
+; (i64 version)
+define void @fct15(i64* nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct15:
+; CHECK: ldr [[REG1:x[0-9]+]],
+; CHECK-NEXT: bfxil [[REG1]], x1, #16, #8
+; lsr is an alias of ubfm
+; CHECK-NEXT: lsr [[REG2:x[0-9]+]], [[REG1]], #4
+; CHECK-NEXT: bfxil [[REG2]], x2, #5, #3
+; lsl is an alias of ubfm
+; CHECK-NEXT: lsl [[REG3:x[0-9]+]], [[REG2]], #2
+; CHECK-NEXT: str [[REG3]],
+; CHECK-NEXT: ret
+ %0 = load i64* %y, align 8
+ %and = and i64 %0, -256
+ %shr = lshr i64 %x, 16
+ %and1 = and i64 %shr, 255
+ %or = or i64 %and, %and1
+ %shl = lshr i64 %or, 4
+ %and2 = and i64 %shl, -8
+ %shr1 = lshr i64 %x1, 5
+ %and3 = and i64 %shr1, 7
+ %or1 = or i64 %and2, %and3
+ %shl1 = shl i64 %or1, 2
+ store i64 %shl1, i64* %y, align 8
+ ret void
+}
+
+; Check if we can still catch bfm instruction when we drop some high bits
+; and some low bits and a masking operation has to be kept
+define void @fct16(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct16:
+; CHECK: ldr [[REG1:w[0-9]+]],
+; Create the constant
+; CHECK: movz [[REGCST:w[0-9]+]], #0x1a, lsl #16
+; CHECK: movk [[REGCST]], #0x8160
+; Do the masking
+; CHECK: and [[REG2:w[0-9]+]], [[REG1]], [[REGCST]]
+; CHECK-NEXT: bfxil [[REG2]], w1, #16, #3
+; lsr is an alias of ubfm
+; CHECK-NEXT: ubfx [[REG3:w[0-9]+]], [[REG2]], #2, #28
+; CHECK-NEXT: str [[REG3]],
+; CHECK-NEXT: ret
+ %0 = load i32* %y, align 8
+ %and = and i32 %0, 1737056
+ %shr = lshr i32 %x, 16
+ %and1 = and i32 %shr, 7
+ %or = or i32 %and, %and1
+ %shl = shl i32 %or, 2
+ %shr2 = lshr i32 %shl, 4
+ store i32 %shr2, i32* %y, align 8
+ ret void
+}
+
+
+; Check if we can still catch bfm instruction when we drop some high bits
+; and some low bits and a masking operation has to be kept
+; (i64 version)
+define void @fct17(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+entry:
+; CHECK-LABEL: fct17:
+; CHECK: ldr [[REG1:x[0-9]+]],
+; Create the constant
+; CHECK: movz w[[REGCST:[0-9]+]], #0x1a, lsl #16
+; CHECK: movk w[[REGCST]], #0x8160
+; Do the masking
+; CHECK: and [[REG2:x[0-9]+]], [[REG1]], x[[REGCST]]
+; CHECK-NEXT: bfxil [[REG2]], x1, #16, #3
+; lsr is an alias of ubfm
+; CHECK-NEXT: ubfx [[REG3:x[0-9]+]], [[REG2]], #2, #60
+; CHECK-NEXT: str [[REG3]],
+; CHECK-NEXT: ret
+ %0 = load i64* %y, align 8
+ %and = and i64 %0, 1737056
+ %shr = lshr i64 %x, 16
+ %and1 = and i64 %shr, 7
+ %or = or i64 %and, %and1
+ %shl = shl i64 %or, 2
+ %shr2 = lshr i64 %shl, 4
+ store i64 %shr2, i64* %y, align 8
+ ret void
+}
+
+define i64 @fct18(i32 %xor72) nounwind ssp {
+; CHECK-LABEL: fct18:
+; CHECK: ubfx x0, x0, #9, #8
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %result = and i64 %conv82, 255
+ ret i64 %result
+}
+
+; Using the access to the global array to keep the instruction and control flow.
+@first_ones = external global [65536 x i8]
+
+; Function Attrs: nounwind readonly ssp
+define i32 @fct19(i64 %arg1) nounwind readonly ssp {
+; CHECK-LABEL: fct19:
+entry:
+ %x.sroa.1.0.extract.shift = lshr i64 %arg1, 16
+ %x.sroa.1.0.extract.trunc = trunc i64 %x.sroa.1.0.extract.shift to i16
+ %x.sroa.3.0.extract.shift = lshr i64 %arg1, 32
+ %x.sroa.5.0.extract.shift = lshr i64 %arg1, 48
+ %tobool = icmp eq i64 %x.sroa.5.0.extract.shift, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %arrayidx3 = getelementptr inbounds [65536 x i8]* @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift
+ %0 = load i8* %arrayidx3, align 1
+ %conv = zext i8 %0 to i32
+ br label %return
+
+; OPT-LABEL: if.end
+if.end: ; preds = %entry
+; OPT: lshr
+; CHECK: ubfx [[REG1:x[0-9]+]], [[REG2:x[0-9]+]], #32, #16
+ %x.sroa.3.0.extract.trunc = trunc i64 %x.sroa.3.0.extract.shift to i16
+ %tobool6 = icmp eq i16 %x.sroa.3.0.extract.trunc, 0
+; CHECK: cbz
+ br i1 %tobool6, label %if.end13, label %if.then7
+
+; OPT-LABEL: if.then7
+if.then7: ; preds = %if.end
+; OPT: lshr
+; "and" should be combined to "ubfm" while "ubfm" should be removed by cse.
+; So neither of them should be in the assemble code.
+; CHECK-NOT: and
+; CHECK-NOT: ubfm
+ %idxprom10 = and i64 %x.sroa.3.0.extract.shift, 65535
+ %arrayidx11 = getelementptr inbounds [65536 x i8]* @first_ones, i64 0, i64 %idxprom10
+ %1 = load i8* %arrayidx11, align 1
+ %conv12 = zext i8 %1 to i32
+ %add = add nsw i32 %conv12, 16
+ br label %return
+
+; OPT-LABEL: if.end13
+if.end13: ; preds = %if.end
+; OPT: lshr
+; OPT: trunc
+; CHECK: ubfx [[REG3:x[0-9]+]], [[REG4:x[0-9]+]], #16, #16
+ %tobool16 = icmp eq i16 %x.sroa.1.0.extract.trunc, 0
+; CHECK: cbz
+ br i1 %tobool16, label %return, label %if.then17
+
+; OPT-LABEL: if.then17
+if.then17: ; preds = %if.end13
+; OPT: lshr
+; "and" should be combined to "ubfm" while "ubfm" should be removed by cse.
+; So neither of them should be in the assemble code.
+; CHECK-NOT: and
+; CHECK-NOT: ubfm
+ %idxprom20 = and i64 %x.sroa.1.0.extract.shift, 65535
+ %arrayidx21 = getelementptr inbounds [65536 x i8]* @first_ones, i64 0, i64 %idxprom20
+ %2 = load i8* %arrayidx21, align 1
+ %conv22 = zext i8 %2 to i32
+ %add23 = add nsw i32 %conv22, 32
+ br label %return
+
+return: ; preds = %if.end13, %if.then17, %if.then7, %if.then
+; CHECK: ret
+ %retval.0 = phi i32 [ %conv, %if.then ], [ %add, %if.then7 ], [ %add23, %if.then17 ], [ 64, %if.end13 ]
+ ret i32 %retval.0
+}
+
+; Make sure we do not assert if the immediate in and is bigger than i64.
+; PR19503.
+; OPT-LABEL: @fct20
+; OPT: lshr
+; OPT-NOT: lshr
+; OPT: ret
+; CHECK-LABEL: fct20:
+; CHECK: ret
+define i80 @fct20(i128 %a, i128 %b) {
+entry:
+ %shr = lshr i128 %a, 18
+ %conv = trunc i128 %shr to i80
+ %tobool = icmp eq i128 %b, 0
+ br i1 %tobool, label %then, label %end
+then:
+ %and = and i128 %shr, 483673642326615442599424
+ %conv2 = trunc i128 %and to i80
+ br label %end
+end:
+ %conv3 = phi i80 [%conv, %entry], [%conv2, %then]
+ ret i80 %conv3
+}
+
+; Check if we can still catch UBFX when "AND" is used by SHL.
+; CHECK-LABEL: fct21:
+; CHECK: ubfx
+@arr = external global [8 x [64 x i64]]
+define i64 @fct21(i64 %x) {
+entry:
+ %shr = lshr i64 %x, 4
+ %and = and i64 %shr, 15
+ %arrayidx = getelementptr inbounds [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 %and
+ %0 = load i64* %arrayidx, align 8
+ ret i64 %0
+}
+
+define i16 @test_ignored_rightbits(i32 %dst, i32 %in) {
+; CHECK-LABEL: test_ignored_rightbits:
+
+ %positioned_field = shl i32 %in, 3
+ %positioned_masked_field = and i32 %positioned_field, 120
+ %masked_dst = and i32 %dst, 7
+ %insertion = or i32 %masked_dst, %positioned_masked_field
+; CHECK: {{bfm|bfi|bfxil}}
+
+ %shl16 = shl i32 %insertion, 8
+ %or18 = or i32 %shl16, %insertion
+ %conv19 = trunc i32 %or18 to i16
+; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #8, #7
+
+ ret i16 %conv19
+}
diff --git a/test/CodeGen/AArch64/arm64-blockaddress.ll b/test/CodeGen/AArch64/arm64-blockaddress.ll
new file mode 100644
index 000000000000..ac4f19e65dff
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-blockaddress.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s --check-prefix=CHECK-LINUX
+; RUN: llc < %s -mtriple=arm64-linux-gnu -code-model=large| FileCheck %s --check-prefix=CHECK-LARGE
+
+; rdar://9188695
+
+define i64 @t() nounwind ssp {
+entry:
+; CHECK-LABEL: t:
+; CHECK: adrp [[REG:x[0-9]+]], Ltmp1@PAGE
+; CHECK: add {{x[0-9]+}}, [[REG]], Ltmp1@PAGEOFF
+
+; CHECK-LINUX-LABEL: t:
+; CHECK-LINUX: adrp [[REG:x[0-9]+]], .Ltmp1
+; CHECK-LINUX: add {{x[0-9]+}}, [[REG]], :lo12:.Ltmp1
+
+; CHECK-LARGE-LABEL: t:
+; CHECK-LARGE: movz [[ADDR_REG:x[0-9]+]], #:abs_g3:[[DEST_LBL:.Ltmp[0-9]+]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g2_nc:[[DEST_LBL]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g1_nc:[[DEST_LBL]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g0_nc:[[DEST_LBL]]
+
+ %recover = alloca i64, align 8
+ store volatile i64 ptrtoint (i8* blockaddress(@t, %mylabel) to i64), i64* %recover, align 8
+ br label %mylabel
+
+mylabel:
+ %tmp = load volatile i64* %recover, align 8
+ ret i64 %tmp
+}
diff --git a/test/CodeGen/AArch64/arm64-build-vector.ll b/test/CodeGen/AArch64/arm64-build-vector.ll
new file mode 100644
index 000000000000..d0f6db080551
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-build-vector.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+; Check that building up a vector w/ only one non-zero lane initializes
+; intelligently.
+define void @one_lane(i32* nocapture %out_int, i32 %skip0) nounwind {
+; CHECK-LABEL: one_lane:
+; CHECK: dup.16b v[[REG:[0-9]+]], wzr
+; CHECK-NEXT: ins.b v[[REG]][0], w1
+; v and q are aliases, and str is preferred against st.16b when possible
+; rdar://11246289
+; CHECK: str q[[REG]], [x0]
+; CHECK: ret
+ %conv = trunc i32 %skip0 to i8
+ %vset_lane = insertelement <16 x i8> <i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i8 %conv, i32 0
+ %tmp = bitcast i32* %out_int to <4 x i32>*
+ %tmp1 = bitcast <16 x i8> %vset_lane to <4 x i32>
+ store <4 x i32> %tmp1, <4 x i32>* %tmp, align 16
+ ret void
+}
+
+; Check that building a vector from floats doesn't insert an unnecessary
+; copy for lane zero.
+define <4 x float> @foo(float %a, float %b, float %c, float %d) nounwind {
+; CHECK-LABEL: foo:
+; CHECK-NOT: ins.s v0[0], v0[0]
+; CHECK: ins.s v0[1], v1[0]
+; CHECK: ins.s v0[2], v2[0]
+; CHECK: ins.s v0[3], v3[0]
+; CHECK: ret
+ %1 = insertelement <4 x float> undef, float %a, i32 0
+ %2 = insertelement <4 x float> %1, float %b, i32 1
+ %3 = insertelement <4 x float> %2, float %c, i32 2
+ %4 = insertelement <4 x float> %3, float %d, i32 3
+ ret <4 x float> %4
+}
+
+define <8 x i16> @build_all_zero(<8 x i16> %a) #1 {
+; CHECK-LABEL: build_all_zero:
+; CHECK: movz w[[GREG:[0-9]+]], #0xae80
+; CHECK-NEXT: fmov s[[FREG:[0-9]+]], w[[GREG]]
+; CHECK-NEXT: mul.8h v0, v0, v[[FREG]]
+ %b = add <8 x i16> %a, <i16 -32768, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>
+ %c = mul <8 x i16> %b, <i16 -20864, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>
+ ret <8 x i16> %c
+}
+
+; There is an optimization in DAG Combiner as following:
+; fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
+; -> (BUILD_VECTOR A, B, ..., C, D, ...)
+; This case checks when A,B and C,D are different types, there should be no
+; assertion failure.
+define <8 x i16> @concat_2_build_vector(<4 x i16> %in0) {
+; CHECK-LABEL: concat_2_build_vector:
+; CHECK: movi
+ %vshl_n = shl <4 x i16> %in0, <i16 8, i16 8, i16 8, i16 8>
+ %vshl_n2 = shl <4 x i16> %vshl_n, <i16 9, i16 9, i16 9, i16 9>
+ %shuffle.i = shufflevector <4 x i16> %vshl_n2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle.i
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/arm64-call-tailcalls.ll b/test/CodeGen/AArch64/arm64-call-tailcalls.ll
new file mode 100644
index 000000000000..487c1d9bec3b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-call-tailcalls.ll
@@ -0,0 +1,91 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+
+@t = weak global i32 ()* null
+@x = external global i32, align 4
+
+define void @t2() {
+; CHECK-LABEL: t2:
+; CHECK: adrp x[[GOTADDR:[0-9]+]], _t@GOTPAGE
+; CHECK: ldr x[[ADDR:[0-9]+]], [x[[GOTADDR]], _t@GOTPAGEOFF]
+; CHECK: ldr x[[DEST:[0-9]+]], [x[[ADDR]]]
+; CHECK: br x[[DEST]]
+ %tmp = load i32 ()** @t
+ %tmp.upgrd.2 = tail call i32 %tmp()
+ ret void
+}
+
+define void @t3() {
+; CHECK-LABEL: t3:
+; CHECK: b _t2
+ tail call void @t2()
+ ret void
+}
+
+define double @t4(double %a) nounwind readonly ssp {
+; CHECK-LABEL: t4:
+; CHECK: b _sin
+ %tmp = tail call double @sin(double %a) nounwind readonly
+ ret double %tmp
+}
+
+define float @t5(float %a) nounwind readonly ssp {
+; CHECK-LABEL: t5:
+; CHECK: b _sinf
+ %tmp = tail call float @sinf(float %a) nounwind readonly
+ ret float %tmp
+}
+
+define void @t7() nounwind {
+; CHECK-LABEL: t7:
+; CHECK: b _foo
+; CHECK: b _bar
+
+ br i1 undef, label %bb, label %bb1.lr.ph
+
+bb1.lr.ph: ; preds = %entry
+ tail call void @bar() nounwind
+ ret void
+
+bb: ; preds = %entry
+ tail call void @foo() nounwind
+ ret void
+}
+
+define i32 @t8(i32 %x) nounwind ssp {
+; CHECK-LABEL: t8:
+; CHECK: b _a
+; CHECK: b _b
+; CHECK: b _c
+ %and = and i32 %x, 1
+ %tobool = icmp eq i32 %and, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %call = tail call i32 @a(i32 %x) nounwind
+ br label %return
+
+if.end: ; preds = %entry
+ %and1 = and i32 %x, 2
+ %tobool2 = icmp eq i32 %and1, 0
+ br i1 %tobool2, label %if.end5, label %if.then3
+
+if.then3: ; preds = %if.end
+ %call4 = tail call i32 @b(i32 %x) nounwind
+ br label %return
+
+if.end5: ; preds = %if.end
+ %call6 = tail call i32 @c(i32 %x) nounwind
+ br label %return
+
+return: ; preds = %if.end5, %if.then3, %if.then
+ %retval.0 = phi i32 [ %call, %if.then ], [ %call4, %if.then3 ], [ %call6, %if.end5 ]
+ ret i32 %retval.0
+}
+
+declare float @sinf(float) nounwind readonly
+declare double @sin(double) nounwind readonly
+declare void @bar() nounwind
+declare void @foo() nounwind
+declare i32 @a(i32)
+declare i32 @b(i32)
+declare i32 @c(i32)
diff --git a/test/CodeGen/AArch64/arm64-cast-opt.ll b/test/CodeGen/AArch64/arm64-cast-opt.ll
new file mode 100644
index 000000000000..65a871d43685
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-cast-opt.ll
@@ -0,0 +1,31 @@
+; RUN: llc -O3 -march=arm64 -mtriple arm64-apple-ios5.0.0 < %s | FileCheck %s
+; <rdar://problem/15992732>
+; Zero truncation is not necessary when the values are extended properly
+; already.
+
+@block = common global i8* null, align 8
+
+define zeroext i8 @foo(i32 %i1, i32 %i2) {
+; CHECK-LABEL: foo:
+; CHECK: cset
+; CHECK-NOT: and
+entry:
+ %idxprom = sext i32 %i1 to i64
+ %0 = load i8** @block, align 8
+ %arrayidx = getelementptr inbounds i8* %0, i64 %idxprom
+ %1 = load i8* %arrayidx, align 1
+ %idxprom1 = sext i32 %i2 to i64
+ %arrayidx2 = getelementptr inbounds i8* %0, i64 %idxprom1
+ %2 = load i8* %arrayidx2, align 1
+ %cmp = icmp eq i8 %1, %2
+ br i1 %cmp, label %return, label %if.then
+
+if.then: ; preds = %entry
+ %cmp7 = icmp ugt i8 %1, %2
+ %conv9 = zext i1 %cmp7 to i8
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i8 [ %conv9, %if.then ], [ 1, %entry ]
+ ret i8 %retval.0
+}
diff --git a/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll b/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
new file mode 100644
index 000000000000..664a26cafe4d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
@@ -0,0 +1,190 @@
+; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-ccmp | FileCheck %s
+target triple = "arm64-apple-ios7.0.0"
+
+@channelColumns = external global i64
+@channelTracks = external global i64
+@mazeRoute = external hidden unnamed_addr global i8*, align 8
+@TOP = external global i64*
+@BOT = external global i64*
+@netsAssign = external global i64*
+
+; Function from yacr2/maze.c
+; The branch at the end of %if.then is driven by %cmp5 and %cmp6.
+; Isel converts the and i1 into two branches, and arm64-ccmp should not convert
+; it back again. %cmp6 has much higher latency than %cmp5.
+; CHECK: Maze1
+; CHECK: %if.then
+; CHECK: cmp x{{[0-9]+}}, #2
+; CHECK-NEXT b.cc
+; CHECK: %if.then
+; CHECK: cmp x{{[0-9]+}}, #2
+; CHECK-NEXT b.cc
+define i32 @Maze1() nounwind ssp {
+entry:
+ %0 = load i64* @channelColumns, align 8, !tbaa !0
+ %cmp90 = icmp eq i64 %0, 0
+ br i1 %cmp90, label %for.end, label %for.body
+
+for.body: ; preds = %for.inc, %entry
+ %1 = phi i64 [ %0, %entry ], [ %37, %for.inc ]
+ %i.092 = phi i64 [ 1, %entry ], [ %inc53, %for.inc ]
+ %numLeft.091 = phi i32 [ 0, %entry ], [ %numLeft.1, %for.inc ]
+ %2 = load i8** @mazeRoute, align 8, !tbaa !3
+ %arrayidx = getelementptr inbounds i8* %2, i64 %i.092
+ %3 = load i8* %arrayidx, align 1, !tbaa !1
+ %tobool = icmp eq i8 %3, 0
+ br i1 %tobool, label %for.inc, label %if.then
+
+if.then: ; preds = %for.body
+ %4 = load i64** @TOP, align 8, !tbaa !3
+ %arrayidx1 = getelementptr inbounds i64* %4, i64 %i.092
+ %5 = load i64* %arrayidx1, align 8, !tbaa !0
+ %6 = load i64** @netsAssign, align 8, !tbaa !3
+ %arrayidx2 = getelementptr inbounds i64* %6, i64 %5
+ %7 = load i64* %arrayidx2, align 8, !tbaa !0
+ %8 = load i64** @BOT, align 8, !tbaa !3
+ %arrayidx3 = getelementptr inbounds i64* %8, i64 %i.092
+ %9 = load i64* %arrayidx3, align 8, !tbaa !0
+ %arrayidx4 = getelementptr inbounds i64* %6, i64 %9
+ %10 = load i64* %arrayidx4, align 8, !tbaa !0
+ %cmp5 = icmp ugt i64 %i.092, 1
+ %cmp6 = icmp ugt i64 %10, 1
+ %or.cond = and i1 %cmp5, %cmp6
+ br i1 %or.cond, label %land.lhs.true7, label %if.else
+
+land.lhs.true7: ; preds = %if.then
+ %11 = load i64* @channelTracks, align 8, !tbaa !0
+ %add = add i64 %11, 1
+ %call = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add, i64 %10, i64 0, i64 %7, i32 -1, i32 -1)
+ %tobool8 = icmp eq i32 %call, 0
+ br i1 %tobool8, label %land.lhs.true7.if.else_crit_edge, label %if.then9
+
+land.lhs.true7.if.else_crit_edge: ; preds = %land.lhs.true7
+ %.pre = load i64* @channelColumns, align 8, !tbaa !0
+ br label %if.else
+
+if.then9: ; preds = %land.lhs.true7
+ %12 = load i8** @mazeRoute, align 8, !tbaa !3
+ %arrayidx10 = getelementptr inbounds i8* %12, i64 %i.092
+ store i8 0, i8* %arrayidx10, align 1, !tbaa !1
+ %13 = load i64** @TOP, align 8, !tbaa !3
+ %arrayidx11 = getelementptr inbounds i64* %13, i64 %i.092
+ %14 = load i64* %arrayidx11, align 8, !tbaa !0
+ tail call fastcc void @CleanNet(i64 %14)
+ %15 = load i64** @BOT, align 8, !tbaa !3
+ %arrayidx12 = getelementptr inbounds i64* %15, i64 %i.092
+ %16 = load i64* %arrayidx12, align 8, !tbaa !0
+ tail call fastcc void @CleanNet(i64 %16)
+ br label %for.inc
+
+if.else: ; preds = %land.lhs.true7.if.else_crit_edge, %if.then
+ %17 = phi i64 [ %.pre, %land.lhs.true7.if.else_crit_edge ], [ %1, %if.then ]
+ %cmp13 = icmp ult i64 %i.092, %17
+ %or.cond89 = and i1 %cmp13, %cmp6
+ br i1 %or.cond89, label %land.lhs.true16, label %if.else24
+
+land.lhs.true16: ; preds = %if.else
+ %18 = load i64* @channelTracks, align 8, !tbaa !0
+ %add17 = add i64 %18, 1
+ %call18 = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add17, i64 %10, i64 0, i64 %7, i32 1, i32 -1)
+ %tobool19 = icmp eq i32 %call18, 0
+ br i1 %tobool19, label %if.else24, label %if.then20
+
+if.then20: ; preds = %land.lhs.true16
+ %19 = load i8** @mazeRoute, align 8, !tbaa !3
+ %arrayidx21 = getelementptr inbounds i8* %19, i64 %i.092
+ store i8 0, i8* %arrayidx21, align 1, !tbaa !1
+ %20 = load i64** @TOP, align 8, !tbaa !3
+ %arrayidx22 = getelementptr inbounds i64* %20, i64 %i.092
+ %21 = load i64* %arrayidx22, align 8, !tbaa !0
+ tail call fastcc void @CleanNet(i64 %21)
+ %22 = load i64** @BOT, align 8, !tbaa !3
+ %arrayidx23 = getelementptr inbounds i64* %22, i64 %i.092
+ %23 = load i64* %arrayidx23, align 8, !tbaa !0
+ tail call fastcc void @CleanNet(i64 %23)
+ br label %for.inc
+
+if.else24: ; preds = %land.lhs.true16, %if.else
+ br i1 %cmp5, label %land.lhs.true26, label %if.else36
+
+land.lhs.true26: ; preds = %if.else24
+ %24 = load i64* @channelTracks, align 8, !tbaa !0
+ %cmp27 = icmp ult i64 %7, %24
+ br i1 %cmp27, label %land.lhs.true28, label %if.else36
+
+land.lhs.true28: ; preds = %land.lhs.true26
+ %add29 = add i64 %24, 1
+ %call30 = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 0, i64 %7, i64 %add29, i64 %10, i32 -1, i32 1)
+ %tobool31 = icmp eq i32 %call30, 0
+ br i1 %tobool31, label %if.else36, label %if.then32
+
+if.then32: ; preds = %land.lhs.true28
+ %25 = load i8** @mazeRoute, align 8, !tbaa !3
+ %arrayidx33 = getelementptr inbounds i8* %25, i64 %i.092
+ store i8 0, i8* %arrayidx33, align 1, !tbaa !1
+ %26 = load i64** @TOP, align 8, !tbaa !3
+ %arrayidx34 = getelementptr inbounds i64* %26, i64 %i.092
+ %27 = load i64* %arrayidx34, align 8, !tbaa !0
+ tail call fastcc void @CleanNet(i64 %27)
+ %28 = load i64** @BOT, align 8, !tbaa !3
+ %arrayidx35 = getelementptr inbounds i64* %28, i64 %i.092
+ %29 = load i64* %arrayidx35, align 8, !tbaa !0
+ tail call fastcc void @CleanNet(i64 %29)
+ br label %for.inc
+
+if.else36: ; preds = %land.lhs.true28, %land.lhs.true26, %if.else24
+ %30 = load i64* @channelColumns, align 8, !tbaa !0
+ %cmp37 = icmp ult i64 %i.092, %30
+ br i1 %cmp37, label %land.lhs.true38, label %if.else48
+
+land.lhs.true38: ; preds = %if.else36
+ %31 = load i64* @channelTracks, align 8, !tbaa !0
+ %cmp39 = icmp ult i64 %7, %31
+ br i1 %cmp39, label %land.lhs.true40, label %if.else48
+
+land.lhs.true40: ; preds = %land.lhs.true38
+ %add41 = add i64 %31, 1
+ %call42 = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 0, i64 %7, i64 %add41, i64 %10, i32 1, i32 1)
+ %tobool43 = icmp eq i32 %call42, 0
+ br i1 %tobool43, label %if.else48, label %if.then44
+
+if.then44: ; preds = %land.lhs.true40
+ %32 = load i8** @mazeRoute, align 8, !tbaa !3
+ %arrayidx45 = getelementptr inbounds i8* %32, i64 %i.092
+ store i8 0, i8* %arrayidx45, align 1, !tbaa !1
+ %33 = load i64** @TOP, align 8, !tbaa !3
+ %arrayidx46 = getelementptr inbounds i64* %33, i64 %i.092
+ %34 = load i64* %arrayidx46, align 8, !tbaa !0
+ tail call fastcc void @CleanNet(i64 %34)
+ %35 = load i64** @BOT, align 8, !tbaa !3
+ %arrayidx47 = getelementptr inbounds i64* %35, i64 %i.092
+ %36 = load i64* %arrayidx47, align 8, !tbaa !0
+ tail call fastcc void @CleanNet(i64 %36)
+ br label %for.inc
+
+if.else48: ; preds = %land.lhs.true40, %land.lhs.true38, %if.else36
+ %inc = add nsw i32 %numLeft.091, 1
+ br label %for.inc
+
+for.inc: ; preds = %if.else48, %if.then44, %if.then32, %if.then20, %if.then9, %for.body
+ %numLeft.1 = phi i32 [ %numLeft.091, %if.then9 ], [ %numLeft.091, %if.then20 ], [ %numLeft.091, %if.then32 ], [ %numLeft.091, %if.then44 ], [ %inc, %if.else48 ], [ %numLeft.091, %for.body ]
+ %inc53 = add i64 %i.092, 1
+ %37 = load i64* @channelColumns, align 8, !tbaa !0
+ %cmp = icmp ugt i64 %inc53, %37
+ br i1 %cmp, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ %numLeft.0.lcssa = phi i32 [ 0, %entry ], [ %numLeft.1, %for.inc ]
+ ret i32 %numLeft.0.lcssa
+}
+
+; Materializable
+declare hidden fastcc i32 @Maze1Mech(i64, i64, i64, i64, i64, i32, i32) nounwind ssp
+
+; Materializable
+declare hidden fastcc void @CleanNet(i64) nounwind ssp
+
+!0 = metadata !{metadata !"long", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"any pointer", metadata !1}
diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll
new file mode 100644
index 000000000000..63965f9538b5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -0,0 +1,289 @@
+; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-ccmp -aarch64-stress-ccmp | FileCheck %s
+target triple = "arm64-apple-ios"
+
+; CHECK: single_same
+; CHECK: cmp w0, #5
+; CHECK-NEXT: ccmp w1, #17, #4, ne
+; CHECK-NEXT: b.ne
+; CHECK: %if.then
+; CHECK: bl _foo
+; CHECK: %if.end
+define i32 @single_same(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp eq i32 %a, 5
+ %cmp1 = icmp eq i32 %b, 17
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret i32 7
+}
+
+; Different condition codes for the two compares.
+; CHECK: single_different
+; CHECK: cmp w0, #6
+; CHECK-NEXT: ccmp w1, #17, #0, ge
+; CHECK-NEXT: b.eq
+; CHECK: %if.then
+; CHECK: bl _foo
+; CHECK: %if.end
+define i32 @single_different(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp sle i32 %a, 5
+ %cmp1 = icmp ne i32 %b, 17
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret i32 7
+}
+
+; Second block clobbers the flags, can't convert (easily).
+; CHECK: single_flagclobber
+; CHECK: cmp
+; CHECK: b.eq
+; CHECK: cmp
+; CHECK: b.gt
+define i32 @single_flagclobber(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp eq i32 %a, 5
+ br i1 %cmp, label %if.then, label %lor.lhs.false
+
+lor.lhs.false: ; preds = %entry
+ %cmp1 = icmp slt i32 %b, 7
+ %mul = shl nsw i32 %b, 1
+ %add = add nsw i32 %b, 1
+ %cond = select i1 %cmp1, i32 %mul, i32 %add
+ %cmp2 = icmp slt i32 %cond, 17
+ br i1 %cmp2, label %if.then, label %if.end
+
+if.then: ; preds = %lor.lhs.false, %entry
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end: ; preds = %if.then, %lor.lhs.false
+ ret i32 7
+}
+
+; Second block clobbers the flags and ends with a tbz terminator.
+; CHECK: single_flagclobber_tbz
+; CHECK: cmp
+; CHECK: b.eq
+; CHECK: cmp
+; CHECK: tbz
+define i32 @single_flagclobber_tbz(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp eq i32 %a, 5
+ br i1 %cmp, label %if.then, label %lor.lhs.false
+
+lor.lhs.false: ; preds = %entry
+ %cmp1 = icmp slt i32 %b, 7
+ %mul = shl nsw i32 %b, 1
+ %add = add nsw i32 %b, 1
+ %cond = select i1 %cmp1, i32 %mul, i32 %add
+ %and = and i32 %cond, 8
+ %cmp2 = icmp ne i32 %and, 0
+ br i1 %cmp2, label %if.then, label %if.end
+
+if.then: ; preds = %lor.lhs.false, %entry
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end: ; preds = %if.then, %lor.lhs.false
+ ret i32 7
+}
+
+; Speculatively execute division by zero.
+; The sdiv/udiv instructions do not trap when the divisor is zero, so they are
+; safe to speculate.
+; CHECK: speculate_division
+; CHECK-NOT: cmp
+; CHECK: sdiv
+; CHECK: cmp
+; CHECK-NEXT: ccmp
+define i32 @speculate_division(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ br i1 %cmp, label %land.lhs.true, label %if.end
+
+land.lhs.true:
+ %div = sdiv i32 %b, %a
+ %cmp1 = icmp slt i32 %div, 17
+ br i1 %cmp1, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret i32 7
+}
+
+; Floating point compare.
+; CHECK: single_fcmp
+; CHECK: cmp
+; CHECK-NOT: b.
+; CHECK: fccmp {{.*}}, #8, ge
+; CHECK: b.lt
+define i32 @single_fcmp(i32 %a, float %b) nounwind ssp {
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ br i1 %cmp, label %land.lhs.true, label %if.end
+
+land.lhs.true:
+ %conv = sitofp i32 %a to float
+ %div = fdiv float %b, %conv
+ %cmp1 = fcmp oge float %div, 1.700000e+01
+ br i1 %cmp1, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret i32 7
+}
+
+; Chain multiple compares.
+; CHECK: multi_different
+; CHECK: cmp
+; CHECK: ccmp
+; CHECK: ccmp
+; CHECK: b.
+define void @multi_different(i32 %a, i32 %b, i32 %c) nounwind ssp {
+entry:
+ %cmp = icmp sgt i32 %a, %b
+ br i1 %cmp, label %land.lhs.true, label %if.end
+
+land.lhs.true:
+ %div = sdiv i32 %b, %a
+ %cmp1 = icmp eq i32 %div, 5
+ %cmp4 = icmp sgt i32 %div, %c
+ %or.cond = and i1 %cmp1, %cmp4
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; Convert a cbz in the head block.
+; CHECK: cbz_head
+; CHECK: cmp w0, #0
+; CHECK: ccmp
+define i32 @cbz_head(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp ne i32 %b, 17
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret i32 7
+}
+
+; Check that the immediate operand is in range. The ccmp instruction encodes a
+; smaller range of immediates than subs/adds.
+; The ccmp immediates must be in the range 0-31.
+; CHECK: immediate_range
+; CHECK-NOT: ccmp
+define i32 @immediate_range(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp eq i32 %a, 5
+ %cmp1 = icmp eq i32 %b, 32
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret i32 7
+}
+
+; Convert a cbz in the second block.
+; CHECK: cbz_second
+; CHECK: cmp w0, #0
+; CHECK: ccmp w1, #0, #0, ne
+; CHECK: b.eq
+define i32 @cbz_second(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp ne i32 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret i32 7
+}
+
+; Convert a cbnz in the second block.
+; CHECK: cbnz_second
+; CHECK: cmp w0, #0
+; CHECK: ccmp w1, #0, #4, ne
+; CHECK: b.ne
+define i32 @cbnz_second(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp eq i32 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ %call = tail call i32 @foo() nounwind
+ br label %if.end
+
+if.end:
+ ret i32 7
+}
+declare i32 @foo()
+
+%str1 = type { %str2 }
+%str2 = type { [24 x i8], i8*, i32, %str1*, i32, [4 x i8], %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, i8*, i8, i8*, %str1*, i8* }
+
+; Test case distilled from 126.gcc.
+; The phi in sw.bb.i.i gets multiple operands for the %entry predecessor.
+; CHECK: build_modify_expr
+define void @build_modify_expr() nounwind ssp {
+entry:
+ switch i32 undef, label %sw.bb.i.i [
+ i32 69, label %if.end85
+ i32 70, label %if.end85
+ i32 71, label %if.end85
+ i32 72, label %if.end85
+ i32 73, label %if.end85
+ i32 105, label %if.end85
+ i32 106, label %if.end85
+ ]
+
+if.end85:
+ ret void
+
+sw.bb.i.i:
+ %ref.tr.i.i = phi %str1* [ %0, %sw.bb.i.i ], [ undef, %entry ]
+ %operands.i.i = getelementptr inbounds %str1* %ref.tr.i.i, i64 0, i32 0, i32 2
+ %arrayidx.i.i = bitcast i32* %operands.i.i to %str1**
+ %0 = load %str1** %arrayidx.i.i, align 8
+ %code1.i.i.phi.trans.insert = getelementptr inbounds %str1* %0, i64 0, i32 0, i32 0, i64 16
+ br label %sw.bb.i.i
+}
diff --git a/test/CodeGen/AArch64/arm64-clrsb.ll b/test/CodeGen/AArch64/arm64-clrsb.ll
new file mode 100644
index 000000000000..042e52e5e781
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-clrsb.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ctlz.i32(i32, i1) #0
+declare i64 @llvm.ctlz.i64(i64, i1) #1
+
+; Function Attrs: nounwind ssp
+define i32 @clrsb32(i32 %x) #2 {
+entry:
+ %shr = ashr i32 %x, 31
+ %xor = xor i32 %shr, %x
+ %mul = shl i32 %xor, 1
+ %add = or i32 %mul, 1
+ %0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 false)
+
+ ret i32 %0
+; CHECK-LABEL: clrsb32
+; CHECK: cls [[TEMP:w[0-9]+]], [[TEMP]]
+}
+
+; Function Attrs: nounwind ssp
+define i64 @clrsb64(i64 %x) #3 {
+entry:
+ %shr = ashr i64 %x, 63
+ %xor = xor i64 %shr, %x
+ %mul = shl nsw i64 %xor, 1
+ %add = or i64 %mul, 1
+ %0 = tail call i64 @llvm.ctlz.i64(i64 %add, i1 false)
+
+ ret i64 %0
+; CHECK-LABEL: clrsb64
+; CHECK: cls [[TEMP:x[0-9]+]], [[TEMP]]
+}
diff --git a/test/CodeGen/AArch64/arm64-coalesce-ext.ll b/test/CodeGen/AArch64/arm64-coalesce-ext.ll
new file mode 100644
index 000000000000..9420bf3bb593
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-coalesce-ext.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=arm64 -mtriple=arm64-apple-darwin < %s | FileCheck %s
+; Check that the peephole optimizer knows about sext and zext instructions.
+; CHECK: test1sext
+define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind {
+ %C = add i64 %A, %B
+ ; CHECK: add x[[SUM:[0-9]+]], x0, x1
+ %D = trunc i64 %C to i32
+ %E = shl i64 %C, 32
+ %F = ashr i64 %E, 32
+ ; CHECK: sxtw x[[EXT:[0-9]+]], w[[SUM]]
+ store volatile i64 %F, i64 *%P2
+ ; CHECK: str x[[EXT]]
+ store volatile i32 %D, i32* %P
+ ; Reuse low bits of extended register, don't extend live range of SUM.
+ ; CHECK: str w[[SUM]]
+ ret i32 %D
+}
diff --git a/test/CodeGen/AArch64/arm64-code-model-large-abs.ll b/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
new file mode 100644
index 000000000000..264da2da25bc
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
@@ -0,0 +1,72 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -code-model=large < %s | FileCheck %s
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
+
+define i8* @global_addr() {
+; CHECK-LABEL: global_addr:
+ ret i8* @var8
+ ; The movz/movk calculation should end up returned directly in x0.
+; CHECK: movz x0, #:abs_g3:var8
+; CHECK: movk x0, #:abs_g2_nc:var8
+; CHECK: movk x0, #:abs_g1_nc:var8
+; CHECK: movk x0, #:abs_g0_nc:var8
+; CHECK-NEXT: ret
+}
+
+define i8 @global_i8() {
+; CHECK-LABEL: global_i8:
+ %val = load i8* @var8
+ ret i8 %val
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var8
+; CHECK: ldrb w0, [x[[ADDR_REG]]]
+}
+
+define i16 @global_i16() {
+; CHECK-LABEL: global_i16:
+ %val = load i16* @var16
+ ret i16 %val
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var16
+; CHECK: ldrh w0, [x[[ADDR_REG]]]
+}
+
+define i32 @global_i32() {
+; CHECK-LABEL: global_i32:
+ %val = load i32* @var32
+ ret i32 %val
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var32
+; CHECK: ldr w0, [x[[ADDR_REG]]]
+}
+
+define i64 @global_i64() {
+; CHECK-LABEL: global_i64:
+ %val = load i64* @var64
+ ret i64 %val
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var64
+; CHECK: ldr x0, [x[[ADDR_REG]]]
+}
+
+define <2 x i64> @constpool() {
+; CHECK-LABEL: constpool:
+ ret <2 x i64> <i64 123456789, i64 987654321100>
+
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:[[CPADDR:.LCPI[0-9]+_[0-9]+]]
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:[[CPADDR]]
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:[[CPADDR]]
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:[[CPADDR]]
+; CHECK: ldr q0, [x[[ADDR_REG]]]
+}
diff --git a/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll b/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll
new file mode 100644
index 000000000000..81cee38420aa
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mtriple=arm64-apple-ios -O3 -aarch64-collect-loh -aarch64-collect-loh-bb-only=true -aarch64-collect-loh-pre-collect-register=false < %s -o - | FileCheck %s
+; Check that the LOH analysis does not crash when the analysed chained
+; contains instructions that are filtered out.
+;
+; Before the fix for <rdar://problem/16041712>, these cases were removed
+; from the main container. Now, the deterministic container does not allow
+; to remove arbitrary values, so we have to live with garbage values.
+; <rdar://problem/16041712>
+
+%"class.H4ISP::H4ISPDevice" = type { i32 (%"class.H4ISP::H4ISPDevice"*, i32, i8*, i8*)*, i8*, i32*, %"class.H4ISP::H4ISPCameraManager"* }
+
+%"class.H4ISP::H4ISPCameraManager" = type opaque
+
+declare i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(%"class.H4ISP::H4ISPDevice"*)
+
+@pH4ISPDevice = hidden global %"class.H4ISP::H4ISPDevice"* null, align 8
+
+; CHECK-LABEL: _foo:
+; CHECK: ret
+; CHECK-NOT: .loh AdrpLdrGotLdr
+define void @foo() {
+entry:
+ br label %if.then83
+if.then83: ; preds = %if.end81
+ %tmp = load %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
+ %call84 = call i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(%"class.H4ISP::H4ISPDevice"* %tmp) #19
+ tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27}"()
+ %tmp2 = load %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
+ tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x28}"()
+ %pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice"* %tmp2, i64 0, i32 3
+ %tmp3 = load %"class.H4ISP::H4ISPCameraManager"** %pCameraManager.i268, align 8
+ %tobool.i269 = icmp eq %"class.H4ISP::H4ISPCameraManager"* %tmp3, null
+ br i1 %tobool.i269, label %if.then83, label %end
+end:
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/arm64-collect-loh-str.ll b/test/CodeGen/AArch64/arm64-collect-loh-str.ll
new file mode 100644
index 000000000000..d7bc00e318f7
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-collect-loh-str.ll
@@ -0,0 +1,23 @@
+; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s
+; Test case for <rdar://problem/15942912>.
+; AdrpAddStr cannot be used when the store uses same
+; register as address and value. Indeed, the related
+; if applied, may completely remove the definition or
+; at least provide a wrong one (with the offset folded
+; into the definition).
+
+%struct.anon = type { i32*, i32** }
+
+@pptp_wan_head = internal global %struct.anon zeroinitializer, align 8
+
+; CHECK-LABEL: _pptp_wan_init
+; CHECK: ret
+; CHECK-NOT: AdrpAddStr
+define i32 @pptp_wan_init() {
+entry:
+ store i32* null, i32** getelementptr inbounds (%struct.anon* @pptp_wan_head, i64 0, i32 0), align 8
+ store i32** getelementptr inbounds (%struct.anon* @pptp_wan_head, i64 0, i32 0), i32*** getelementptr inbounds (%struct.anon* @pptp_wan_head, i64 0, i32 1), align 8
+ ret i32 0
+}
+
+
diff --git a/test/CodeGen/AArch64/arm64-collect-loh.ll b/test/CodeGen/AArch64/arm64-collect-loh.ll
new file mode 100644
index 000000000000..6d73daac6209
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-collect-loh.ll
@@ -0,0 +1,53 @@
+; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s --check-prefix=CHECK-ELF
+
+; CHECK-ELF-NOT: .loh
+; CHECK-ELF-NOT: AdrpAdrp
+; CHECK-ELF-NOT: AdrpAdd
+; CHECK-ELF-NOT: AdrpLdrGot
+
+@a = internal unnamed_addr global i32 0, align 4
+@b = external global i32
+
+; Function Attrs: noinline nounwind ssp
+define void @foo(i32 %t) {
+entry:
+ %tmp = load i32* @a, align 4
+ %add = add nsw i32 %tmp, %t
+ store i32 %add, i32* @a, align 4
+ ret void
+}
+
+; Function Attrs: nounwind ssp
+; Testcase for <rdar://problem/15438605>, AdrpAdrp reuse is valid only when the first adrp
+; dominates the second.
+; The first adrp comes from the loading of 'a' and the second the loading of 'b'.
+; 'a' is loaded in if.then, 'b' in if.end4, if.then does not dominates if.end4.
+; CHECK-LABEL: _test
+; CHECK: ret
+; CHECK-NOT: .loh AdrpAdrp
+define i32 @test(i32 %t) {
+entry:
+ %cmp = icmp sgt i32 %t, 5
+ br i1 %cmp, label %if.then, label %if.end4
+
+if.then: ; preds = %entry
+ %tmp = load i32* @a, align 4
+ %add = add nsw i32 %tmp, %t
+ %cmp1 = icmp sgt i32 %add, 12
+ br i1 %cmp1, label %if.then2, label %if.end4
+
+if.then2: ; preds = %if.then
+ tail call void @foo(i32 %add)
+ %tmp1 = load i32* @a, align 4
+ br label %if.end4
+
+if.end4: ; preds = %if.then2, %if.then, %entry
+ %t.addr.0 = phi i32 [ %tmp1, %if.then2 ], [ %t, %if.then ], [ %t, %entry ]
+ %tmp2 = load i32* @b, align 4
+ %add5 = add nsw i32 %tmp2, %t.addr.0
+ tail call void @foo(i32 %add5)
+ %tmp3 = load i32* @b, align 4
+ %add6 = add nsw i32 %tmp3, %t.addr.0
+ ret i32 %add6
+}
diff --git a/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll b/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll
new file mode 100644
index 000000000000..f65b11612828
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=-neon < %s
+
+; The DAG combiner decided to use a vector load/store for this struct copy
+; previously. This probably shouldn't happen without NEON, but the most
+; important thing is that it compiles.
+
+define void @store_combine() nounwind {
+ %src = alloca { double, double }, align 8
+ %dst = alloca { double, double }, align 8
+
+ %src.realp = getelementptr inbounds { double, double }* %src, i32 0, i32 0
+ %src.real = load double* %src.realp
+ %src.imagp = getelementptr inbounds { double, double }* %src, i32 0, i32 1
+ %src.imag = load double* %src.imagp
+
+ %dst.realp = getelementptr inbounds { double, double }* %dst, i32 0, i32 0
+ %dst.imagp = getelementptr inbounds { double, double }* %dst, i32 0, i32 1
+ store double %src.real, double* %dst.realp
+ store double %src.imag, double* %dst.imagp
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-complex-ret.ll b/test/CodeGen/AArch64/arm64-complex-ret.ll
new file mode 100644
index 000000000000..93d50a59861d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-complex-ret.ll
@@ -0,0 +1,7 @@
+; RUN: llc -march=arm64 -o - %s | FileCheck %s
+
+define { i192, i192, i21, i192 } @foo(i192) {
+; CHECK-LABEL: foo:
+; CHECK: stp xzr, xzr, [x8]
+ ret { i192, i192, i21, i192 } {i192 0, i192 1, i21 2, i192 3}
+}
diff --git a/test/CodeGen/AArch64/arm64-const-addr.ll b/test/CodeGen/AArch64/arm64-const-addr.ll
new file mode 100644
index 000000000000..c55a9226cc7a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-const-addr.ll
@@ -0,0 +1,23 @@
+; RUN: llc -mtriple=arm64-darwin-unknown < %s | FileCheck %s
+
+%T = type { i32, i32, i32, i32 }
+
+; Test if the constant base address gets only materialized once.
+define i32 @test1() nounwind {
+; CHECK-LABEL: test1
+; CHECK: movz w8, #0x40f, lsl #16
+; CHECK-NEXT: movk w8, #0xc000
+; CHECK-NEXT: ldp w9, w10, [x8, #4]
+; CHECK: ldr w8, [x8, #12]
+ %at = inttoptr i64 68141056 to %T*
+ %o1 = getelementptr %T* %at, i32 0, i32 1
+ %t1 = load i32* %o1
+ %o2 = getelementptr %T* %at, i32 0, i32 2
+ %t2 = load i32* %o2
+ %a1 = add i32 %t1, %t2
+ %o3 = getelementptr %T* %at, i32 0, i32 3
+ %t3 = load i32* %o3
+ %a2 = add i32 %a1, %t3
+ ret i32 %a2
+}
+
diff --git a/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/test/CodeGen/AArch64/arm64-convert-v4f64.ll
new file mode 100644
index 000000000000..7123e5e0b235
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-convert-v4f64.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -march=arm64 | FileCheck %s
+
+
+define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) {
+; CHECK: fptosi_v4f64_to_v4i16
+; CHECK-DAG: fcvtzs v[[LHS:[0-9]+]].2d, v1.2d
+; CHECK-DAG: fcvtzs v[[RHS:[0-9]+]].2d, v0.2d
+; CHECK-DAG: xtn v[[LHS_NA:[0-9]+]].2s, v[[LHS]].2d
+; CHECK-DAG: xtn v[[RHS_NA:[0-9]+]].2s, v[[RHS]].2d
+; CHECK: uzp1 v0.4h, v[[RHS_NA]].4h, v[[LHS_NA]].4h
+ %tmp1 = load <4 x double>* %ptr
+ %tmp2 = fptosi <4 x double> %tmp1 to <4 x i16>
+ ret <4 x i16> %tmp2
+}
+
+define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) {
+; CHECK: fptosi_v4f64_to_v4i8
+; CHECK-DAG: fcvtzs v[[CONV3:[0-9]+]].2d, v3.2d
+; CHECK-DAG: fcvtzs v[[CONV2:[0-9]+]].2d, v2.2d
+; CHECK-DAG: fcvtzs v[[CONV1:[0-9]+]].2d, v1.2d
+; CHECK-DAG: fcvtzs v[[CONV0:[0-9]+]].2d, v0.2d
+; CHECK-DAG: xtn v[[NA3:[0-9]+]].2s, v[[CONV3]].2d
+; CHECK-DAG: xtn v[[NA2:[0-9]+]].2s, v[[CONV2]].2d
+; CHECK-DAG: xtn v[[NA1:[0-9]+]].2s, v[[CONV1]].2d
+; CHECK-DAG: xtn v[[NA0:[0-9]+]].2s, v[[CONV0]].2d
+; CHECK-DAG: uzp1 v[[TMP1:[0-9]+]].4h, v[[CONV2]].4h, v[[CONV3]].4h
+; CHECK-DAG: uzp1 v[[TMP2:[0-9]+]].4h, v[[CONV0]].4h, v[[CONV1]].4h
+; CHECK: uzp1 v0.8b, v[[TMP2]].8b, v[[TMP1]].8b
+ %tmp1 = load <8 x double>* %ptr
+ %tmp2 = fptosi <8 x double> %tmp1 to <8 x i8>
+ ret <8 x i8> %tmp2
+}
+
diff --git a/test/CodeGen/AArch64/arm64-copy-tuple.ll b/test/CodeGen/AArch64/arm64-copy-tuple.ll
new file mode 100644
index 000000000000..1803787d729f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-copy-tuple.ll
@@ -0,0 +1,146 @@
+; RUN: llc -mtriple=arm64-apple-ios -o - %s | FileCheck %s
+
+; The main purpose of this test is to find out whether copyPhysReg can deal with
+; the memmove-like situation arising in tuples, where an early copy can clobber
+; the value needed by a later one if the tuples overlap.
+
+; We use dummy inline asm to force LLVM to generate a COPY between the registers
+; we want by clobbering all the others.
+
+define void @test_D1D2_from_D0D1(i8* %addr) #0 {
+; CHECK-LABEL: test_D1D2_from_D0D1:
+; CHECK: mov.8b v2, v1
+; CHECK: mov.8b v1, v0
+entry:
+ %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
+ %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+ %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
+ %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1
+ tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+
+ tail call void asm sideeffect "", "~{v0},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+ ret void
+}
+
+define void @test_D0D1_from_D1D2(i8* %addr) #0 {
+; CHECK-LABEL: test_D0D1_from_D1D2:
+; CHECK: mov.8b v0, v1
+; CHECK: mov.8b v1, v2
+entry:
+ %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
+ %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+ %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
+ %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1
+ tail call void asm sideeffect "", "~{v0},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+
+ tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+ ret void
+}
+
+define void @test_D0D1_from_D31D0(i8* %addr) #0 {
+; CHECK-LABEL: test_D0D1_from_D31D0:
+; CHECK: mov.8b v1, v0
+; CHECK: mov.8b v0, v31
+entry:
+ %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
+ %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+ %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
+ %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1
+ tail call void asm sideeffect "", "~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30}"()
+ tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+
+ tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+ ret void
+}
+
+define void @test_D31D0_from_D0D1(i8* %addr) #0 {
+; CHECK-LABEL: test_D31D0_from_D0D1:
+; CHECK: mov.8b v31, v0
+; CHECK: mov.8b v0, v1
+entry:
+ %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
+ %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+ %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
+ %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1
+ tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+
+ tail call void asm sideeffect "", "~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30}"()
+ tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+ ret void
+}
+
+define void @test_D2D3D4_from_D0D1D2(i8* %addr) #0 {
+; CHECK-LABEL: test_D2D3D4_from_D0D1D2:
+; CHECK: mov.8b v4, v2
+; CHECK: mov.8b v3, v1
+; CHECK: mov.8b v2, v0
+entry:
+ %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
+ %vec = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+ %vec0 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 0
+ %vec1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 1
+ %vec2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 2
+
+ tail call void asm sideeffect "", "~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, i8* %addr)
+
+ tail call void asm sideeffect "", "~{v0},~{v1},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, i8* %addr)
+ ret void
+}
+
+define void @test_Q0Q1Q2_from_Q1Q2Q3(i8* %addr) #0 {
+; CHECK-LABEL: test_Q0Q1Q2_from_Q1Q2Q3:
+; CHECK: mov.16b v0, v1
+; CHECK: mov.16b v1, v2
+; CHECK: mov.16b v2, v3
+entry:
+ %addr_v16i8 = bitcast i8* %addr to <16 x i8>*
+ %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>* %addr_v16i8)
+ %vec0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 0
+ %vec1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 1
+ %vec2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 2
+ tail call void asm sideeffect "", "~{v0},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, i8* %addr)
+
+ tail call void asm sideeffect "", "~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, i8* %addr)
+ ret void
+}
+
+define void @test_Q1Q2Q3Q4_from_Q30Q31Q0Q1(i8* %addr) #0 {
+; CHECK-LABEL: test_Q1Q2Q3Q4_from_Q30Q31Q0Q1:
+; CHECK: mov.16b v4, v1
+; CHECK: mov.16b v3, v0
+; CHECK: mov.16b v2, v31
+; CHECK: mov.16b v1, v30
+ %addr_v16i8 = bitcast i8* %addr to <16 x i8>*
+ %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16i8(<16 x i8>* %addr_v16i8)
+ %vec0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 0
+ %vec1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 1
+ %vec2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 2
+ %vec3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 3
+
+ tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29}"()
+ tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, i8* %addr)
+
+ tail call void asm sideeffect "", "~{v0},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+ tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, i8* %addr)
+ ret void
+}
+
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*)
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0v8i8(<8 x i8>*)
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>*)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16i8(<16 x i8>*)
+
+declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
+declare void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*)
+declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*)
+declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*)
diff --git a/test/CodeGen/AArch64/arm64-crc32.ll b/test/CodeGen/AArch64/arm64-crc32.ll
new file mode 100644
index 000000000000..d3099e6bb132
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-crc32.ll
@@ -0,0 +1,71 @@
+; RUN: llc -march=arm64 -mattr=+crc -o - %s | FileCheck %s
+
+define i32 @test_crc32b(i32 %cur, i8 %next) {
+; CHECK-LABEL: test_crc32b:
+; CHECK: crc32b w0, w0, w1
+ %bits = zext i8 %next to i32
+ %val = call i32 @llvm.aarch64.crc32b(i32 %cur, i32 %bits)
+ ret i32 %val
+}
+
+define i32 @test_crc32h(i32 %cur, i16 %next) {
+; CHECK-LABEL: test_crc32h:
+; CHECK: crc32h w0, w0, w1
+ %bits = zext i16 %next to i32
+ %val = call i32 @llvm.aarch64.crc32h(i32 %cur, i32 %bits)
+ ret i32 %val
+}
+
+define i32 @test_crc32w(i32 %cur, i32 %next) {
+; CHECK-LABEL: test_crc32w:
+; CHECK: crc32w w0, w0, w1
+ %val = call i32 @llvm.aarch64.crc32w(i32 %cur, i32 %next)
+ ret i32 %val
+}
+
+define i32 @test_crc32x(i32 %cur, i64 %next) {
+; CHECK-LABEL: test_crc32x:
+; CHECK: crc32x w0, w0, x1
+ %val = call i32 @llvm.aarch64.crc32x(i32 %cur, i64 %next)
+ ret i32 %val
+}
+
+define i32 @test_crc32cb(i32 %cur, i8 %next) {
+; CHECK-LABEL: test_crc32cb:
+; CHECK: crc32cb w0, w0, w1
+ %bits = zext i8 %next to i32
+ %val = call i32 @llvm.aarch64.crc32cb(i32 %cur, i32 %bits)
+ ret i32 %val
+}
+
+define i32 @test_crc32ch(i32 %cur, i16 %next) {
+; CHECK-LABEL: test_crc32ch:
+; CHECK: crc32ch w0, w0, w1
+ %bits = zext i16 %next to i32
+ %val = call i32 @llvm.aarch64.crc32ch(i32 %cur, i32 %bits)
+ ret i32 %val
+}
+
+define i32 @test_crc32cw(i32 %cur, i32 %next) {
+; CHECK-LABEL: test_crc32cw:
+; CHECK: crc32cw w0, w0, w1
+ %val = call i32 @llvm.aarch64.crc32cw(i32 %cur, i32 %next)
+ ret i32 %val
+}
+
+define i32 @test_crc32cx(i32 %cur, i64 %next) {
+; CHECK-LABEL: test_crc32cx:
+; CHECK: crc32cx w0, w0, x1
+ %val = call i32 @llvm.aarch64.crc32cx(i32 %cur, i64 %next)
+ ret i32 %val
+}
+
+declare i32 @llvm.aarch64.crc32b(i32, i32)
+declare i32 @llvm.aarch64.crc32h(i32, i32)
+declare i32 @llvm.aarch64.crc32w(i32, i32)
+declare i32 @llvm.aarch64.crc32x(i32, i64)
+
+declare i32 @llvm.aarch64.crc32cb(i32, i32)
+declare i32 @llvm.aarch64.crc32ch(i32, i32)
+declare i32 @llvm.aarch64.crc32cw(i32, i32)
+declare i32 @llvm.aarch64.crc32cx(i32, i64)
diff --git a/test/CodeGen/AArch64/arm64-crypto.ll b/test/CodeGen/AArch64/arm64-crypto.ll
new file mode 100644
index 000000000000..2908b336b1bd
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-crypto.ll
@@ -0,0 +1,135 @@
+; RUN: llc -march=arm64 -mattr=crypto -aarch64-neon-syntax=apple -o - %s | FileCheck %s
+
+declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data, <16 x i8> %key)
+declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data, <16 x i8> %key)
+declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %data)
+declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %data)
+
+define <16 x i8> @test_aese(<16 x i8> %data, <16 x i8> %key) {
+; CHECK-LABEL: test_aese:
+; CHECK: aese.16b v0, v1
+ %res = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data, <16 x i8> %key)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_aesd(<16 x i8> %data, <16 x i8> %key) {
+; CHECK-LABEL: test_aesd:
+; CHECK: aesd.16b v0, v1
+ %res = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data, <16 x i8> %key)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_aesmc(<16 x i8> %data) {
+; CHECK-LABEL: test_aesmc:
+; CHECK: aesmc.16b v0, v0
+ %res = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %data)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @test_aesimc(<16 x i8> %data) {
+; CHECK-LABEL: test_aesimc:
+; CHECK: aesimc.16b v0, v0
+ %res = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %data)
+ ret <16 x i8> %res
+}
+
+declare <4 x i32> @llvm.aarch64.crypto.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+declare <4 x i32> @llvm.aarch64.crypto.sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+declare <4 x i32> @llvm.aarch64.crypto.sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+declare i32 @llvm.aarch64.crypto.sha1h(i32 %hash_e)
+declare <4 x i32> @llvm.aarch64.crypto.sha1su0(<4 x i32> %wk0_3, <4 x i32> %wk4_7, <4 x i32> %wk8_11)
+declare <4 x i32> @llvm.aarch64.crypto.sha1su1(<4 x i32> %wk0_3, <4 x i32> %wk12_15)
+
+define <4 x i32> @test_sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
+; CHECK-LABEL: test_sha1c:
+; CHECK: fmov [[HASH_E:s[0-9]+]], w0
+; CHECK: sha1c.4s q0, [[HASH_E]], v1
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+ ret <4 x i32> %res
+}
+
+; <rdar://problem/14742333> Incomplete removal of unnecessary FMOV instructions in intrinsic SHA1
+define <4 x i32> @test_sha1c_in_a_row(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
+; CHECK-LABEL: test_sha1c_in_a_row:
+; CHECK: fmov [[HASH_E:s[0-9]+]], w0
+; CHECK: sha1c.4s q[[SHA1RES:[0-9]+]], [[HASH_E]], v1
+; CHECK-NOT: fmov
+; CHECK: sha1c.4s q0, s[[SHA1RES]], v1
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+ %extract = extractelement <4 x i32> %res, i32 0
+ %res2 = call <4 x i32> @llvm.aarch64.crypto.sha1c(<4 x i32> %hash_abcd, i32 %extract, <4 x i32> %wk)
+ ret <4 x i32> %res2
+}
+
+define <4 x i32> @test_sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
+; CHECK-LABEL: test_sha1p:
+; CHECK: fmov [[HASH_E:s[0-9]+]], w0
+; CHECK: sha1p.4s q0, [[HASH_E]], v1
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
+; CHECK-LABEL: test_sha1m:
+; CHECK: fmov [[HASH_E:s[0-9]+]], w0
+; CHECK: sha1m.4s q0, [[HASH_E]], v1
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
+ ret <4 x i32> %res
+}
+
+define i32 @test_sha1h(i32 %hash_e) {
+; CHECK-LABEL: test_sha1h:
+; CHECK: fmov [[HASH_E:s[0-9]+]], w0
+; CHECK: sha1h [[RES:s[0-9]+]], [[HASH_E]]
+; CHECK: fmov w0, [[RES]]
+ %res = call i32 @llvm.aarch64.crypto.sha1h(i32 %hash_e)
+ ret i32 %res
+}
+
+define <4 x i32> @test_sha1su0(<4 x i32> %wk0_3, <4 x i32> %wk4_7, <4 x i32> %wk8_11) {
+; CHECK-LABEL: test_sha1su0:
+; CHECK: sha1su0.4s v0, v1, v2
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha1su0(<4 x i32> %wk0_3, <4 x i32> %wk4_7, <4 x i32> %wk8_11)
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_sha1su1(<4 x i32> %wk0_3, <4 x i32> %wk12_15) {
+; CHECK-LABEL: test_sha1su1:
+; CHECK: sha1su1.4s v0, v1
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha1su1(<4 x i32> %wk0_3, <4 x i32> %wk12_15)
+ ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.aarch64.crypto.sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
+declare <4 x i32> @llvm.aarch64.crypto.sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
+declare <4 x i32> @llvm.aarch64.crypto.sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7)
+declare <4 x i32> @llvm.aarch64.crypto.sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
+
+define <4 x i32> @test_sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk) {
+; CHECK-LABEL: test_sha256h:
+; CHECK: sha256h.4s q0, q1, v2
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk) {
+; CHECK-LABEL: test_sha256h2:
+; CHECK: sha256h2.4s q0, q1, v2
+
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7) {
+; CHECK-LABEL: test_sha256su0:
+; CHECK: sha256su0.4s v0, v1
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7)
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_11, <4 x i32> %w12_15) {
+; CHECK-LABEL: test_sha256su1:
+; CHECK: sha256su1.4s v0, v1, v2
+ %res = call <4 x i32> @llvm.aarch64.crypto.sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
+ ret <4 x i32> %res
+}
diff --git a/test/CodeGen/AArch64/arm64-cse.ll b/test/CodeGen/AArch64/arm64-cse.ll
new file mode 100644
index 000000000000..5d62cfe76a84
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-cse.ll
@@ -0,0 +1,59 @@
+; RUN: llc -O3 < %s -aarch64-atomic-cfg-tidy=0 | FileCheck %s
+target triple = "arm64-apple-ios"
+
+; rdar://12462006
+; CSE between "icmp reg reg" and "sub reg reg".
+; Both can be in the same basic block or in different basic blocks.
+define i8* @t1(i8* %base, i32* nocapture %offset, i32 %size) nounwind {
+entry:
+; CHECK-LABEL: t1:
+; CHECK: subs
+; CHECK-NOT: cmp
+; CHECK-NOT: sub
+; CHECK: b.ge
+; CHECK: sub
+; CHECK: sub
+; CHECK-NOT: sub
+; CHECK: ret
+ %0 = load i32* %offset, align 4
+ %cmp = icmp slt i32 %0, %size
+ %s = sub nsw i32 %0, %size
+ br i1 %cmp, label %return, label %if.end
+
+if.end:
+ %sub = sub nsw i32 %0, %size
+ %s2 = sub nsw i32 %s, %size
+ %s3 = sub nsw i32 %sub, %s2
+ store i32 %s3, i32* %offset, align 4
+ %add.ptr = getelementptr inbounds i8* %base, i32 %sub
+ br label %return
+
+return:
+ %retval.0 = phi i8* [ %add.ptr, %if.end ], [ null, %entry ]
+ ret i8* %retval.0
+}
+
+; CSE between "icmp reg imm" and "sub reg imm".
+define i8* @t2(i8* %base, i32* nocapture %offset) nounwind {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: subs
+; CHECK-NOT: cmp
+; CHECK-NOT: sub
+; CHECK: b.lt
+; CHECK-NOT: sub
+; CHECK: ret
+ %0 = load i32* %offset, align 4
+ %cmp = icmp slt i32 %0, 1
+ br i1 %cmp, label %return, label %if.end
+
+if.end:
+ %sub = sub nsw i32 %0, 1
+ store i32 %sub, i32* %offset, align 4
+ %add.ptr = getelementptr inbounds i8* %base, i32 %sub
+ br label %return
+
+return:
+ %retval.0 = phi i8* [ %add.ptr, %if.end ], [ null, %entry ]
+ ret i8* %retval.0
+}
diff --git a/test/CodeGen/AArch64/arm64-csel.ll b/test/CodeGen/AArch64/arm64-csel.ll
new file mode 100644
index 000000000000..98eba30f119d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-csel.ll
@@ -0,0 +1,230 @@
+; RUN: llc -O3 < %s | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
+target triple = "arm64-unknown-unknown"
+
+; CHECK-LABEL: foo1
+; CHECK: cinc w{{[0-9]+}}, w{{[0-9]+}}, ne
+define i32 @foo1(i32 %b, i32 %c) nounwind readnone ssp {
+entry:
+ %not.tobool = icmp ne i32 %c, 0
+ %add = zext i1 %not.tobool to i32
+ %b.add = add i32 %c, %b
+ %add1 = add i32 %b.add, %add
+ ret i32 %add1
+}
+
+; CHECK-LABEL: foo2
+; CHECK: cneg w{{[0-9]+}}, w{{[0-9]+}}, ne
+define i32 @foo2(i32 %b, i32 %c) nounwind readnone ssp {
+entry:
+ %mul = sub i32 0, %b
+ %tobool = icmp eq i32 %c, 0
+ %b.mul = select i1 %tobool, i32 %b, i32 %mul
+ %add = add nsw i32 %b.mul, %c
+ ret i32 %add
+}
+
+; CHECK-LABEL: foo3
+; CHECK: cinv w{{[0-9]+}}, w{{[0-9]+}}, ne
+define i32 @foo3(i32 %b, i32 %c) nounwind readnone ssp {
+entry:
+ %not.tobool = icmp ne i32 %c, 0
+ %xor = sext i1 %not.tobool to i32
+ %b.xor = xor i32 %xor, %b
+ %add = add nsw i32 %b.xor, %c
+ ret i32 %add
+}
+
+; rdar://11632325
+define i32@foo4(i32 %a) nounwind ssp {
+; CHECK-LABEL: foo4
+; CHECK: cneg
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %a, -1
+ %neg = sub nsw i32 0, %a
+ %cond = select i1 %cmp, i32 %a, i32 %neg
+ ret i32 %cond
+}
+
+define i32@foo5(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK-LABEL: foo5
+; CHECK: subs
+; CHECK-NEXT: cneg
+; CHECK-NEXT: ret
+ %sub = sub nsw i32 %a, %b
+ %cmp = icmp sgt i32 %sub, -1
+ %sub3 = sub nsw i32 0, %sub
+ %cond = select i1 %cmp, i32 %sub, i32 %sub3
+ ret i32 %cond
+}
+
+; make sure we can handle branch instruction in optimizeCompare.
+define i32@foo6(i32 %a, i32 %b) nounwind ssp {
+; CHECK-LABEL: foo6
+; CHECK: b
+ %sub = sub nsw i32 %a, %b
+ %cmp = icmp sgt i32 %sub, 0
+ br i1 %cmp, label %l.if, label %l.else
+
+l.if:
+ ret i32 1
+
+l.else:
+ ret i32 %sub
+}
+
+; If CPSR is used multiple times and V flag is used, we don't remove cmp.
+define i32 @foo7(i32 %a, i32 %b) nounwind {
+entry:
+; CHECK-LABEL: foo7:
+; CHECK: sub
+; CHECK-next: adds
+; CHECK-next: csneg
+; CHECK-next: b
+ %sub = sub nsw i32 %a, %b
+ %cmp = icmp sgt i32 %sub, -1
+ %sub3 = sub nsw i32 0, %sub
+ %cond = select i1 %cmp, i32 %sub, i32 %sub3
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %cmp2 = icmp slt i32 %sub, -1
+ %sel = select i1 %cmp2, i32 %cond, i32 %a
+ ret i32 %sel
+
+if.else:
+ ret i32 %cond
+}
+
+define i32 @foo8(i32 %v, i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: foo8:
+; CHECK: cmp w0, #0
+; CHECK: csinv w0, w1, w2, ne
+ %tobool = icmp eq i32 %v, 0
+ %neg = xor i32 -1, %b
+ %cond = select i1 %tobool, i32 %neg, i32 %a
+ ret i32 %cond
+}
+
+define i32 @foo9(i32 %v) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo9:
+; CHECK: cmp w0, #0
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x4
+; CHECK: cinv w0, w[[REG]], eq
+ %tobool = icmp ne i32 %v, 0
+ %cond = select i1 %tobool, i32 4, i32 -5
+ ret i32 %cond
+}
+
+define i64 @foo10(i64 %v) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo10:
+; CHECK: cmp x0, #0
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x4
+; CHECK: cinv x0, x[[REG]], eq
+ %tobool = icmp ne i64 %v, 0
+ %cond = select i1 %tobool, i64 4, i64 -5
+ ret i64 %cond
+}
+
+define i32 @foo11(i32 %v) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo11:
+; CHECK: cmp w0, #0
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x4
+; CHECK: cneg w0, w[[REG]], eq
+ %tobool = icmp ne i32 %v, 0
+ %cond = select i1 %tobool, i32 4, i32 -4
+ ret i32 %cond
+}
+
+define i64 @foo12(i64 %v) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo12:
+; CHECK: cmp x0, #0
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x4
+; CHECK: cneg x0, x[[REG]], eq
+ %tobool = icmp ne i64 %v, 0
+ %cond = select i1 %tobool, i64 4, i64 -4
+ ret i64 %cond
+}
+
+define i32 @foo13(i32 %v, i32 %a, i32 %b) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo13:
+; CHECK: cmp w0, #0
+; CHECK: csneg w0, w1, w2, ne
+ %tobool = icmp eq i32 %v, 0
+ %sub = sub i32 0, %b
+ %cond = select i1 %tobool, i32 %sub, i32 %a
+ ret i32 %cond
+}
+
+define i64 @foo14(i64 %v, i64 %a, i64 %b) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo14:
+; CHECK: cmp x0, #0
+; CHECK: csneg x0, x1, x2, ne
+ %tobool = icmp eq i64 %v, 0
+ %sub = sub i64 0, %b
+ %cond = select i1 %tobool, i64 %sub, i64 %a
+ ret i64 %cond
+}
+
+define i32 @foo15(i32 %a, i32 %b) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo15:
+; CHECK: cmp w0, w1
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x1
+; CHECK: cinc w0, w[[REG]], gt
+ %cmp = icmp sgt i32 %a, %b
+ %. = select i1 %cmp, i32 2, i32 1
+ ret i32 %.
+}
+
+define i32 @foo16(i32 %a, i32 %b) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo16:
+; CHECK: cmp w0, w1
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x1
+; CHECK: cinc w0, w[[REG]], le
+ %cmp = icmp sgt i32 %a, %b
+ %. = select i1 %cmp, i32 1, i32 2
+ ret i32 %.
+}
+
+define i64 @foo17(i64 %a, i64 %b) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo17:
+; CHECK: cmp x0, x1
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x1
+; CHECK: cinc x0, x[[REG]], gt
+ %cmp = icmp sgt i64 %a, %b
+ %. = select i1 %cmp, i64 2, i64 1
+ ret i64 %.
+}
+
+define i64 @foo18(i64 %a, i64 %b) nounwind readnone optsize ssp {
+entry:
+; CHECK-LABEL: foo18:
+; CHECK: cmp x0, x1
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x1
+; CHECK: cinc x0, x[[REG]], le
+ %cmp = icmp sgt i64 %a, %b
+ %. = select i1 %cmp, i64 1, i64 2
+ ret i64 %.
+}
+
+define i64 @foo19(i64 %a, i64 %b, i64 %c) {
+entry:
+; CHECK-LABEL: foo19:
+; CHECK: cinc x0, x2
+; CHECK-NOT: add
+ %cmp = icmp ult i64 %a, %b
+ %inc = zext i1 %cmp to i64
+ %inc.c = add i64 %inc, %c
+ ret i64 %inc.c
+}
diff --git a/test/CodeGen/AArch64/arm64-cvt.ll b/test/CodeGen/AArch64/arm64-cvt.ll
new file mode 100644
index 000000000000..420a8bc04833
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-cvt.ll
@@ -0,0 +1,401 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+;
+; Floating-point scalar convert to signed integer (to nearest with ties to away)
+;
+define i32 @fcvtas_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtas_1w1s:
+;CHECK: fcvtas w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtas_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtas_1x1s:
+;CHECK: fcvtas x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtas_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtas_1w1d:
+;CHECK: fcvtas w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtas_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtas_1x1d:
+;CHECK: fcvtas x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtas.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtas.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtas.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtas.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to unsigned integer
+;
+define i32 @fcvtau_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtau_1w1s:
+;CHECK: fcvtau w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtau_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtau_1x1s:
+;CHECK: fcvtau x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtau.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtau_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtau_1w1d:
+;CHECK: fcvtau w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtau.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtau_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtau_1x1d:
+;CHECK: fcvtau x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtau.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtau.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtau.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtau.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to signed integer (toward -Inf)
+;
+define i32 @fcvtms_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtms_1w1s:
+;CHECK: fcvtms w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtms.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtms_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtms_1x1s:
+;CHECK: fcvtms x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtms.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtms_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtms_1w1d:
+;CHECK: fcvtms w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtms.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtms_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtms_1x1d:
+;CHECK: fcvtms x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtms.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtms.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtms.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtms.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to unsigned integer (toward -Inf)
+;
+define i32 @fcvtmu_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtmu_1w1s:
+;CHECK: fcvtmu w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtmu_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtmu_1x1s:
+;CHECK: fcvtmu x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtmu.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtmu_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtmu_1w1d:
+;CHECK: fcvtmu w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtmu.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtmu_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtmu_1x1d:
+;CHECK: fcvtmu x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtmu.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtmu.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to signed integer (to nearest with ties to even)
+;
+define i32 @fcvtns_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtns_1w1s:
+;CHECK: fcvtns w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtns.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtns_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtns_1x1s:
+;CHECK: fcvtns x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtns.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtns_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtns_1w1d:
+;CHECK: fcvtns w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtns.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtns_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtns_1x1d:
+;CHECK: fcvtns x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtns.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtns.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtns.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtns.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to unsigned integer (to nearest with ties to even)
+;
+define i32 @fcvtnu_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtnu_1w1s:
+;CHECK: fcvtnu w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtnu_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtnu_1x1s:
+;CHECK: fcvtnu x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtnu.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtnu_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtnu_1w1d:
+;CHECK: fcvtnu w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtnu.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtnu_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtnu_1x1d:
+;CHECK: fcvtnu x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtnu.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtnu.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to signed integer (toward +Inf)
+;
+define i32 @fcvtps_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtps_1w1s:
+;CHECK: fcvtps w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtps.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtps_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtps_1x1s:
+;CHECK: fcvtps x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtps.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtps_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtps_1w1d:
+;CHECK: fcvtps w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtps.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtps_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtps_1x1d:
+;CHECK: fcvtps x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtps.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtps.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtps.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtps.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtps.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to unsigned integer (toward +Inf)
+;
+define i32 @fcvtpu_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtpu_1w1s:
+;CHECK: fcvtpu w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtpu_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtpu_1x1s:
+;CHECK: fcvtpu x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtpu.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtpu_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtpu_1w1d:
+;CHECK: fcvtpu w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtpu.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtpu_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtpu_1x1d:
+;CHECK: fcvtpu x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtpu.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtpu.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to signed integer (toward zero)
+;
+define i32 @fcvtzs_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtzs_1w1s:
+;CHECK: fcvtzs w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtzs.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtzs_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtzs_1x1s:
+;CHECK: fcvtzs x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtzs.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtzs_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtzs_1w1d:
+;CHECK: fcvtzs w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtzs.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtzs_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtzs_1x1d:
+;CHECK: fcvtzs x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtzs.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtzs.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtzs.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double) nounwind readnone
+
+;
+; Floating-point scalar convert to unsigned integer (toward zero)
+;
+define i32 @fcvtzu_1w1s(float %A) nounwind {
+;CHECK-LABEL: fcvtzu_1w1s:
+;CHECK: fcvtzu w0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtzu.i32.f32(float %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtzu_1x1s(float %A) nounwind {
+;CHECK-LABEL: fcvtzu_1x1s:
+;CHECK: fcvtzu x0, s0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtzu.i64.f32(float %A)
+ ret i64 %tmp3
+}
+
+define i32 @fcvtzu_1w1d(double %A) nounwind {
+;CHECK-LABEL: fcvtzu_1w1d:
+;CHECK: fcvtzu w0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i32 @llvm.aarch64.neon.fcvtzu.i32.f64(double %A)
+ ret i32 %tmp3
+}
+
+define i64 @fcvtzu_1x1d(double %A) nounwind {
+;CHECK-LABEL: fcvtzu_1x1d:
+;CHECK: fcvtzu x0, d0
+;CHECK-NEXT: ret
+ %tmp3 = call i64 @llvm.aarch64.neon.fcvtzu.i64.f64(double %A)
+ ret i64 %tmp3
+}
+
+declare i32 @llvm.aarch64.neon.fcvtzu.i32.f32(float) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtzu.i64.f32(float) nounwind readnone
+declare i32 @llvm.aarch64.neon.fcvtzu.i32.f64(double) nounwind readnone
+declare i64 @llvm.aarch64.neon.fcvtzu.i64.f64(double) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-dagcombiner-convergence.ll b/test/CodeGen/AArch64/arm64-dagcombiner-convergence.ll
new file mode 100644
index 000000000000..a45e31320de8
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-dagcombiner-convergence.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -o /dev/null
+; rdar://10795250
+; DAGCombiner should converge.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
+target triple = "arm64-apple-macosx10.8.0"
+
+define i64 @foo(i128 %Params.coerce, i128 %SelLocs.coerce) {
+entry:
+ %tmp = lshr i128 %Params.coerce, 61
+ %.tr38.i = trunc i128 %tmp to i64
+ %mul.i = and i64 %.tr38.i, 4294967288
+ %tmp1 = lshr i128 %SelLocs.coerce, 62
+ %.tr.i = trunc i128 %tmp1 to i64
+ %mul7.i = and i64 %.tr.i, 4294967292
+ %add.i = add i64 %mul7.i, %mul.i
+ %conv.i.i = and i64 %add.i, 4294967292
+ ret i64 %conv.i.i
+}
diff --git a/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll b/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
new file mode 100644
index 000000000000..6eed48bf62e3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
@@ -0,0 +1,32 @@
+; RUN: llc -mcpu=cyclone < %s | FileCheck %s
+
+; r208640 broke ppc64/Linux self-hosting; xfailing while this is worked on.
+; XFAIL: *
+
+target datalayout = "e-i64:64-n32:64-S128"
+target triple = "arm64-apple-ios"
+
+%"struct.SU" = type { i32, %"struct.SU"*, i32*, i32, i32, %"struct.BO", i32, [5 x i8] }
+%"struct.BO" = type { %"struct.RE" }
+
+%"struct.RE" = type { i32, i32, i32, i32 }
+
+; This is a read-modify-write of some bifields combined into an i48. It gets
+; legalized into i32 and i16 accesses. Only a single store of zero to the low
+; i32 part should be live.
+
+; CHECK-LABEL: test:
+; CHECK-NOT: ldr
+; CHECK: str wzr
+; CHECK-NOT: str
+define void @test(%"struct.SU"* nocapture %su) {
+entry:
+ %r1 = getelementptr inbounds %"struct.SU"* %su, i64 1, i32 5
+ %r2 = bitcast %"struct.BO"* %r1 to i48*
+ %r3 = load i48* %r2, align 8
+ %r4 = and i48 %r3, -4294967296
+ %r5 = or i48 0, %r4
+ store i48 %r5, i48* %r2, align 8
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll b/test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll
new file mode 100644
index 000000000000..ce132c6afa46
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-dagcombiner-indexed-load.ll
@@ -0,0 +1,46 @@
+; RUN: llc -O3 < %s | FileCheck %s
+; RUN: llc -O3 -addr-sink-using-gep=1 < %s | FileCheck %s
+; Test case for a DAG combiner bug where we combined an indexed load
+; with an extension (sext, zext, or any) into a regular extended load,
+; i.e., dropping the indexed value.
+; <rdar://problem/16389332>
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios"
+
+%class.A = type { i64, i64 }
+%class.C = type { i64 }
+
+; CHECK-LABEL: XX:
+; CHECK: ldr
+define i32 @XX(%class.A* %K, i1 %tst, i32* %addr, %class.C** %ppC, %class.C* %pC) {
+entry:
+ br i1 %tst, label %if.then, label %lor.rhs.i
+
+lor.rhs.i: ; preds = %entry
+ %tmp = load i32* %addr, align 4
+ %y.i.i.i = getelementptr inbounds %class.A* %K, i64 0, i32 1
+ %tmp1 = load i64* %y.i.i.i, align 8
+ %U.sroa.3.8.extract.trunc.i = trunc i64 %tmp1 to i32
+ %div11.i = sdiv i32 %U.sroa.3.8.extract.trunc.i, 17
+ %add12.i = add nsw i32 0, %div11.i
+ %U.sroa.3.12.extract.shift.i = lshr i64 %tmp1, 32
+ %U.sroa.3.12.extract.trunc.i = trunc i64 %U.sroa.3.12.extract.shift.i to i32
+ %div15.i = sdiv i32 %U.sroa.3.12.extract.trunc.i, 13
+ %add16.i = add nsw i32 %add12.i, %div15.i
+ %rem.i.i = srem i32 %add16.i, %tmp
+ %idxprom = sext i32 %rem.i.i to i64
+ %arrayidx = getelementptr inbounds %class.C** %ppC, i64 %idxprom
+ %tobool533 = icmp eq %class.C* %pC, null
+ br i1 %tobool533, label %while.end, label %while.body
+
+if.then: ; preds = %entry
+ ret i32 42
+
+while.body: ; preds = %lor.rhs.i
+ ret i32 5
+
+while.end: ; preds = %lor.rhs.i
+ %tmp3 = load %class.C** %arrayidx, align 8
+ ret i32 50
+}
diff --git a/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll b/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll
new file mode 100644
index 000000000000..0679014e59ae
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll
@@ -0,0 +1,102 @@
+; RUN: llc -mtriple arm64-apple-ios -O3 -o - < %s | FileCheck %s
+; <rdar://problem/14477220>
+
+%class.Complex = type { float, float }
+%class.Complex_int = type { i32, i32 }
+%class.Complex_long = type { i64, i64 }
+
+; CHECK-LABEL: @test
+; CHECK: add [[BASE:x[0-9]+]], x0, x1, lsl #3
+; CHECK: ldp [[CPLX1_I:s[0-9]+]], [[CPLX1_R:s[0-9]+]], {{\[}}[[BASE]]]
+; CHECK: ldp [[CPLX2_I:s[0-9]+]], [[CPLX2_R:s[0-9]+]], {{\[}}[[BASE]], #64]
+; CHECK: fadd {{s[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]]
+; CHECK: fadd {{s[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]]
+; CHECK: ret
+define void @test(%class.Complex* nocapture %out, i64 %out_start) {
+entry:
+ %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
+ %0 = bitcast %class.Complex* %arrayidx to i64*
+ %1 = load i64* %0, align 4
+ %t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32
+ %2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float
+ %t0.sroa.2.0.extract.shift = lshr i64 %1, 32
+ %t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
+ %3 = bitcast i32 %t0.sroa.2.0.extract.trunc to float
+ %add = add i64 %out_start, 8
+ %arrayidx2 = getelementptr inbounds %class.Complex* %out, i64 %add
+ %i.i = getelementptr inbounds %class.Complex* %arrayidx2, i64 0, i32 0
+ %4 = load float* %i.i, align 4
+ %add.i = fadd float %4, %2
+ %retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
+ %r.i = getelementptr inbounds %class.Complex* %arrayidx2, i64 0, i32 1
+ %5 = load float* %r.i, align 4
+ %add5.i = fadd float %5, %3
+ %retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
+ %ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>*
+ store <2 x float> %retval.sroa.0.4.vec.insert.i, <2 x float>* %ref.tmp.sroa.0.0.cast, align 4
+ ret void
+}
+
+; CHECK-LABEL: @test_int
+; CHECK: add [[BASE:x[0-9]+]], x0, x1, lsl #3
+; CHECK: ldp [[CPLX1_I:w[0-9]+]], [[CPLX1_R:w[0-9]+]], {{\[}}[[BASE]]]
+; CHECK: ldp [[CPLX2_I:w[0-9]+]], [[CPLX2_R:w[0-9]+]], {{\[}}[[BASE]], #64]
+; CHECK: add {{w[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]]
+; CHECK: add {{w[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]]
+; CHECK: ret
+define void @test_int(%class.Complex_int* nocapture %out, i64 %out_start) {
+entry:
+ %arrayidx = getelementptr inbounds %class.Complex_int* %out, i64 %out_start
+ %0 = bitcast %class.Complex_int* %arrayidx to i64*
+ %1 = load i64* %0, align 4
+ %t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32
+ %2 = bitcast i32 %t0.sroa.0.0.extract.trunc to i32
+ %t0.sroa.2.0.extract.shift = lshr i64 %1, 32
+ %t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
+ %3 = bitcast i32 %t0.sroa.2.0.extract.trunc to i32
+ %add = add i64 %out_start, 8
+ %arrayidx2 = getelementptr inbounds %class.Complex_int* %out, i64 %add
+ %i.i = getelementptr inbounds %class.Complex_int* %arrayidx2, i64 0, i32 0
+ %4 = load i32* %i.i, align 4
+ %add.i = add i32 %4, %2
+ %retval.sroa.0.0.vec.insert.i = insertelement <2 x i32> undef, i32 %add.i, i32 0
+ %r.i = getelementptr inbounds %class.Complex_int* %arrayidx2, i64 0, i32 1
+ %5 = load i32* %r.i, align 4
+ %add5.i = add i32 %5, %3
+ %retval.sroa.0.4.vec.insert.i = insertelement <2 x i32> %retval.sroa.0.0.vec.insert.i, i32 %add5.i, i32 1
+ %ref.tmp.sroa.0.0.cast = bitcast %class.Complex_int* %arrayidx to <2 x i32>*
+ store <2 x i32> %retval.sroa.0.4.vec.insert.i, <2 x i32>* %ref.tmp.sroa.0.0.cast, align 4
+ ret void
+}
+
+; CHECK-LABEL: @test_long
+; CHECK: add [[BASE:x[0-9]+]], x0, x1, lsl #4
+; CHECK: ldp [[CPLX1_I:x[0-9]+]], [[CPLX1_R:x[0-9]+]], {{\[}}[[BASE]]]
+; CHECK: ldp [[CPLX2_I:x[0-9]+]], [[CPLX2_R:x[0-9]+]], {{\[}}[[BASE]], #128]
+; CHECK: add {{x[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]]
+; CHECK: add {{x[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]]
+; CHECK: ret
+define void @test_long(%class.Complex_long* nocapture %out, i64 %out_start) {
+entry:
+ %arrayidx = getelementptr inbounds %class.Complex_long* %out, i64 %out_start
+ %0 = bitcast %class.Complex_long* %arrayidx to i128*
+ %1 = load i128* %0, align 4
+ %t0.sroa.0.0.extract.trunc = trunc i128 %1 to i64
+ %2 = bitcast i64 %t0.sroa.0.0.extract.trunc to i64
+ %t0.sroa.2.0.extract.shift = lshr i128 %1, 64
+ %t0.sroa.2.0.extract.trunc = trunc i128 %t0.sroa.2.0.extract.shift to i64
+ %3 = bitcast i64 %t0.sroa.2.0.extract.trunc to i64
+ %add = add i64 %out_start, 8
+ %arrayidx2 = getelementptr inbounds %class.Complex_long* %out, i64 %add
+ %i.i = getelementptr inbounds %class.Complex_long* %arrayidx2, i32 0, i32 0
+ %4 = load i64* %i.i, align 4
+ %add.i = add i64 %4, %2
+ %retval.sroa.0.0.vec.insert.i = insertelement <2 x i64> undef, i64 %add.i, i32 0
+ %r.i = getelementptr inbounds %class.Complex_long* %arrayidx2, i32 0, i32 1
+ %5 = load i64* %r.i, align 4
+ %add5.i = add i64 %5, %3
+ %retval.sroa.0.4.vec.insert.i = insertelement <2 x i64> %retval.sroa.0.0.vec.insert.i, i64 %add5.i, i32 1
+ %ref.tmp.sroa.0.0.cast = bitcast %class.Complex_long* %arrayidx to <2 x i64>*
+ store <2 x i64> %retval.sroa.0.4.vec.insert.i, <2 x i64>* %ref.tmp.sroa.0.0.cast, align 4
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll b/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll
new file mode 100644
index 000000000000..9bb4b7120763
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=arm64 < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+; Function Attrs: nounwind ssp uwtable
+define i32 @test1() #0 {
+ %tmp1 = alloca i8
+ %tmp2 = alloca i32, i32 4096
+ %tmp3 = icmp eq i8* %tmp1, null
+ %tmp4 = zext i1 %tmp3 to i32
+
+ ret i32 %tmp4
+
+ ; CHECK-LABEL: test1
+ ; CHECK: adds [[TEMP:[a-z0-9]+]], sp, #4, lsl #12
+ ; CHECK: adds [[TEMP]], [[TEMP]], #15
+}
diff --git a/test/CodeGen/AArch64/arm64-dead-register-def-bug.ll b/test/CodeGen/AArch64/arm64-dead-register-def-bug.ll
new file mode 100644
index 000000000000..1bbcf50ba73c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-dead-register-def-bug.ll
@@ -0,0 +1,32 @@
+; RUN: llc -mtriple="arm64-apple-ios" < %s | FileCheck %s
+;
+; Check that the dead register definition pass is considering implicit defs.
+; When rematerializing through truncates, the coalescer may produce instructions
+; with dead defs, but live implicit-defs of subregs:
+; E.g. %X1<def, dead> = MOVi64imm 2, %W1<imp-def>; %X1:GPR64, %W1:GPR32
+; These instructions are live, and their definitions should not be rewritten.
+;
+; <rdar://problem/16492408>
+
+define void @testcase() {
+; CHECK: testcase:
+; CHECK-NOT: orr xzr, xzr, #0x2
+
+bb1:
+ %tmp1 = tail call float @ceilf(float 2.000000e+00)
+ %tmp2 = fptoui float %tmp1 to i64
+ br i1 undef, label %bb2, label %bb3
+
+bb2:
+ tail call void @foo()
+ br label %bb3
+
+bb3:
+ %tmp3 = trunc i64 %tmp2 to i32
+ tail call void @bar(i32 %tmp3)
+ ret void
+}
+
+declare void @foo()
+declare void @bar(i32)
+declare float @ceilf(float) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-dup.ll b/test/CodeGen/AArch64/arm64-dup.ll
new file mode 100644
index 000000000000..0c56b46c4176
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-dup.ll
@@ -0,0 +1,323 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
+
+define <8 x i8> @v_dup8(i8 %A) nounwind {
+;CHECK-LABEL: v_dup8:
+;CHECK: dup.8b
+ %tmp1 = insertelement <8 x i8> zeroinitializer, i8 %A, i32 0
+ %tmp2 = insertelement <8 x i8> %tmp1, i8 %A, i32 1
+ %tmp3 = insertelement <8 x i8> %tmp2, i8 %A, i32 2
+ %tmp4 = insertelement <8 x i8> %tmp3, i8 %A, i32 3
+ %tmp5 = insertelement <8 x i8> %tmp4, i8 %A, i32 4
+ %tmp6 = insertelement <8 x i8> %tmp5, i8 %A, i32 5
+ %tmp7 = insertelement <8 x i8> %tmp6, i8 %A, i32 6
+ %tmp8 = insertelement <8 x i8> %tmp7, i8 %A, i32 7
+ ret <8 x i8> %tmp8
+}
+
+define <4 x i16> @v_dup16(i16 %A) nounwind {
+;CHECK-LABEL: v_dup16:
+;CHECK: dup.4h
+ %tmp1 = insertelement <4 x i16> zeroinitializer, i16 %A, i32 0
+ %tmp2 = insertelement <4 x i16> %tmp1, i16 %A, i32 1
+ %tmp3 = insertelement <4 x i16> %tmp2, i16 %A, i32 2
+ %tmp4 = insertelement <4 x i16> %tmp3, i16 %A, i32 3
+ ret <4 x i16> %tmp4
+}
+
+define <2 x i32> @v_dup32(i32 %A) nounwind {
+;CHECK-LABEL: v_dup32:
+;CHECK: dup.2s
+ %tmp1 = insertelement <2 x i32> zeroinitializer, i32 %A, i32 0
+ %tmp2 = insertelement <2 x i32> %tmp1, i32 %A, i32 1
+ ret <2 x i32> %tmp2
+}
+
+define <2 x float> @v_dupfloat(float %A) nounwind {
+;CHECK-LABEL: v_dupfloat:
+;CHECK: dup.2s
+ %tmp1 = insertelement <2 x float> zeroinitializer, float %A, i32 0
+ %tmp2 = insertelement <2 x float> %tmp1, float %A, i32 1
+ ret <2 x float> %tmp2
+}
+
+define <16 x i8> @v_dupQ8(i8 %A) nounwind {
+;CHECK-LABEL: v_dupQ8:
+;CHECK: dup.16b
+ %tmp1 = insertelement <16 x i8> zeroinitializer, i8 %A, i32 0
+ %tmp2 = insertelement <16 x i8> %tmp1, i8 %A, i32 1
+ %tmp3 = insertelement <16 x i8> %tmp2, i8 %A, i32 2
+ %tmp4 = insertelement <16 x i8> %tmp3, i8 %A, i32 3
+ %tmp5 = insertelement <16 x i8> %tmp4, i8 %A, i32 4
+ %tmp6 = insertelement <16 x i8> %tmp5, i8 %A, i32 5
+ %tmp7 = insertelement <16 x i8> %tmp6, i8 %A, i32 6
+ %tmp8 = insertelement <16 x i8> %tmp7, i8 %A, i32 7
+ %tmp9 = insertelement <16 x i8> %tmp8, i8 %A, i32 8
+ %tmp10 = insertelement <16 x i8> %tmp9, i8 %A, i32 9
+ %tmp11 = insertelement <16 x i8> %tmp10, i8 %A, i32 10
+ %tmp12 = insertelement <16 x i8> %tmp11, i8 %A, i32 11
+ %tmp13 = insertelement <16 x i8> %tmp12, i8 %A, i32 12
+ %tmp14 = insertelement <16 x i8> %tmp13, i8 %A, i32 13
+ %tmp15 = insertelement <16 x i8> %tmp14, i8 %A, i32 14
+ %tmp16 = insertelement <16 x i8> %tmp15, i8 %A, i32 15
+ ret <16 x i8> %tmp16
+}
+
+define <8 x i16> @v_dupQ16(i16 %A) nounwind {
+;CHECK-LABEL: v_dupQ16:
+;CHECK: dup.8h
+ %tmp1 = insertelement <8 x i16> zeroinitializer, i16 %A, i32 0
+ %tmp2 = insertelement <8 x i16> %tmp1, i16 %A, i32 1
+ %tmp3 = insertelement <8 x i16> %tmp2, i16 %A, i32 2
+ %tmp4 = insertelement <8 x i16> %tmp3, i16 %A, i32 3
+ %tmp5 = insertelement <8 x i16> %tmp4, i16 %A, i32 4
+ %tmp6 = insertelement <8 x i16> %tmp5, i16 %A, i32 5
+ %tmp7 = insertelement <8 x i16> %tmp6, i16 %A, i32 6
+ %tmp8 = insertelement <8 x i16> %tmp7, i16 %A, i32 7
+ ret <8 x i16> %tmp8
+}
+
+define <4 x i32> @v_dupQ32(i32 %A) nounwind {
+;CHECK-LABEL: v_dupQ32:
+;CHECK: dup.4s
+ %tmp1 = insertelement <4 x i32> zeroinitializer, i32 %A, i32 0
+ %tmp2 = insertelement <4 x i32> %tmp1, i32 %A, i32 1
+ %tmp3 = insertelement <4 x i32> %tmp2, i32 %A, i32 2
+ %tmp4 = insertelement <4 x i32> %tmp3, i32 %A, i32 3
+ ret <4 x i32> %tmp4
+}
+
+define <4 x float> @v_dupQfloat(float %A) nounwind {
+;CHECK-LABEL: v_dupQfloat:
+;CHECK: dup.4s
+ %tmp1 = insertelement <4 x float> zeroinitializer, float %A, i32 0
+ %tmp2 = insertelement <4 x float> %tmp1, float %A, i32 1
+ %tmp3 = insertelement <4 x float> %tmp2, float %A, i32 2
+ %tmp4 = insertelement <4 x float> %tmp3, float %A, i32 3
+ ret <4 x float> %tmp4
+}
+
+; Check to make sure it works with shuffles, too.
+
+define <8 x i8> @v_shuffledup8(i8 %A) nounwind {
+;CHECK-LABEL: v_shuffledup8:
+;CHECK: dup.8b
+ %tmp1 = insertelement <8 x i8> undef, i8 %A, i32 0
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
+ ret <8 x i8> %tmp2
+}
+
+define <4 x i16> @v_shuffledup16(i16 %A) nounwind {
+;CHECK-LABEL: v_shuffledup16:
+;CHECK: dup.4h
+ %tmp1 = insertelement <4 x i16> undef, i16 %A, i32 0
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
+ ret <4 x i16> %tmp2
+}
+
+define <2 x i32> @v_shuffledup32(i32 %A) nounwind {
+;CHECK-LABEL: v_shuffledup32:
+;CHECK: dup.2s
+ %tmp1 = insertelement <2 x i32> undef, i32 %A, i32 0
+ %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
+ ret <2 x i32> %tmp2
+}
+
+define <2 x float> @v_shuffledupfloat(float %A) nounwind {
+;CHECK-LABEL: v_shuffledupfloat:
+;CHECK: dup.2s
+ %tmp1 = insertelement <2 x float> undef, float %A, i32 0
+ %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
+ ret <2 x float> %tmp2
+}
+
+define <16 x i8> @v_shuffledupQ8(i8 %A) nounwind {
+;CHECK-LABEL: v_shuffledupQ8:
+;CHECK: dup.16b
+ %tmp1 = insertelement <16 x i8> undef, i8 %A, i32 0
+ %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> zeroinitializer
+ ret <16 x i8> %tmp2
+}
+
+define <8 x i16> @v_shuffledupQ16(i16 %A) nounwind {
+;CHECK-LABEL: v_shuffledupQ16:
+;CHECK: dup.8h
+ %tmp1 = insertelement <8 x i16> undef, i16 %A, i32 0
+ %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> zeroinitializer
+ ret <8 x i16> %tmp2
+}
+
+define <4 x i32> @v_shuffledupQ32(i32 %A) nounwind {
+;CHECK-LABEL: v_shuffledupQ32:
+;CHECK: dup.4s
+ %tmp1 = insertelement <4 x i32> undef, i32 %A, i32 0
+ %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> zeroinitializer
+ ret <4 x i32> %tmp2
+}
+
+define <4 x float> @v_shuffledupQfloat(float %A) nounwind {
+;CHECK-LABEL: v_shuffledupQfloat:
+;CHECK: dup.4s
+ %tmp1 = insertelement <4 x float> undef, float %A, i32 0
+ %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
+ ret <4 x float> %tmp2
+}
+
+define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: vduplane8:
+;CHECK: dup.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
+ ret <8 x i8> %tmp2
+}
+
+define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: vduplane16:
+;CHECK: dup.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
+ ret <4 x i16> %tmp2
+}
+
+define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: vduplane32:
+;CHECK: dup.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
+ ret <2 x i32> %tmp2
+}
+
+define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind {
+;CHECK-LABEL: vduplanefloat:
+;CHECK: dup.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 >
+ ret <2 x float> %tmp2
+}
+
+define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: vduplaneQ8:
+;CHECK: dup.16b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
+ ret <16 x i8> %tmp2
+}
+
+define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: vduplaneQ16:
+;CHECK: dup.8h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
+ ret <8 x i16> %tmp2
+}
+
+define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: vduplaneQ32:
+;CHECK: dup.4s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
+ ret <4 x i32> %tmp2
+}
+
+define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind {
+;CHECK-LABEL: vduplaneQfloat:
+;CHECK: dup.4s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
+ ret <4 x float> %tmp2
+}
+
+define <2 x i64> @foo(<2 x i64> %arg0_int64x1_t) nounwind readnone {
+;CHECK-LABEL: foo:
+;CHECK: dup.2d
+entry:
+ %0 = shufflevector <2 x i64> %arg0_int64x1_t, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @bar(<2 x i64> %arg0_int64x1_t) nounwind readnone {
+;CHECK-LABEL: bar:
+;CHECK: dup.2d
+entry:
+ %0 = shufflevector <2 x i64> %arg0_int64x1_t, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ ret <2 x i64> %0
+}
+
+define <2 x double> @baz(<2 x double> %arg0_int64x1_t) nounwind readnone {
+;CHECK-LABEL: baz:
+;CHECK: dup.2d
+entry:
+ %0 = shufflevector <2 x double> %arg0_int64x1_t, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ ret <2 x double> %0
+}
+
+define <2 x double> @qux(<2 x double> %arg0_int64x1_t) nounwind readnone {
+;CHECK-LABEL: qux:
+;CHECK: dup.2d
+entry:
+ %0 = shufflevector <2 x double> %arg0_int64x1_t, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %0
+}
+
+define <2 x i32> @f(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: f:
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: ins.s v0[1], w1
+; CHECK-NEXT: ret
+ %vecinit = insertelement <2 x i32> undef, i32 %a, i32 0
+ %vecinit1 = insertelement <2 x i32> %vecinit, i32 %b, i32 1
+ ret <2 x i32> %vecinit1
+}
+
+define <4 x i32> @g(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: g:
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: ins.s v0[1], w1
+; CHECK-NEXT: ins.s v0[2], w1
+; CHECK-NEXT: ins.s v0[3], w0
+; CHECK-NEXT: ret
+ %vecinit = insertelement <4 x i32> undef, i32 %a, i32 0
+ %vecinit1 = insertelement <4 x i32> %vecinit, i32 %b, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit1, i32 %b, i32 2
+ %vecinit3 = insertelement <4 x i32> %vecinit2, i32 %a, i32 3
+ ret <4 x i32> %vecinit3
+}
+
+define <2 x i64> @h(i64 %a, i64 %b) nounwind readnone {
+; CHECK-LABEL: h:
+; CHECK-NEXT: fmov d0, x0
+; CHECK-NEXT: ins.d v0[1], x1
+; CHECK-NEXT: ret
+ %vecinit = insertelement <2 x i64> undef, i64 %a, i32 0
+ %vecinit1 = insertelement <2 x i64> %vecinit, i64 %b, i32 1
+ ret <2 x i64> %vecinit1
+}
+
+; We used to spot this as a BUILD_VECTOR implementable by dup, but assume that
+; the single value needed was of the same type as the vector. This is false if
+; the scalar corresponding to the vector type is illegal (e.g. a <4 x i16>
+; BUILD_VECTOR will have an i32 as its source). In that case, the operation is
+; not a simple "dup vD.4h, vN.h[idx]" after all, and we crashed.
+;
+; *However*, it is a dup vD.4h, vN.h[2*idx].
+define <4 x i16> @test_build_illegal(<4 x i32> %in) {
+; CHECK-LABEL: test_build_illegal:
+; CHECK: dup.4h v0, v0[6]
+ %val = extractelement <4 x i32> %in, i32 3
+ %smallval = trunc i32 %val to i16
+ %vec = insertelement <4x i16> undef, i16 %smallval, i32 3
+
+ ret <4 x i16> %vec
+}
+
+; We used to inherit an already extract_subvectored v4i16 from
+; SelectionDAGBuilder here. We then added a DUPLANE on top of that, preventing
+; the formation of an indexed-by-7 MLS.
+define <4 x i16> @test_high_splat(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) #0 {
+; CHECK-LABEL: test_high_splat:
+; CHECK: mls.4h v0, v1, v2[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
diff --git a/test/CodeGen/AArch64/arm64-early-ifcvt.ll b/test/CodeGen/AArch64/arm64-early-ifcvt.ll
new file mode 100644
index 000000000000..44150c29aeb0
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-early-ifcvt.ll
@@ -0,0 +1,423 @@
+; RUN: llc < %s -stress-early-ifcvt -aarch64-atomic-cfg-tidy=0 | FileCheck %s
+target triple = "arm64-apple-macosx"
+
+; CHECK: mm2
+define i32 @mm2(i32* nocapture %p, i32 %n) nounwind uwtable readonly ssp {
+entry:
+ br label %do.body
+
+; CHECK: do.body
+; Loop body has no branches before the backedge.
+; CHECK-NOT: LBB
+do.body:
+ %max.0 = phi i32 [ 0, %entry ], [ %max.1, %do.cond ]
+ %min.0 = phi i32 [ 0, %entry ], [ %min.1, %do.cond ]
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ]
+ %p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ]
+ %incdec.ptr = getelementptr inbounds i32* %p.addr.0, i64 1
+ %0 = load i32* %p.addr.0, align 4
+ %cmp = icmp sgt i32 %0, %max.0
+ br i1 %cmp, label %do.cond, label %if.else
+
+if.else:
+ %cmp1 = icmp slt i32 %0, %min.0
+ %.min.0 = select i1 %cmp1, i32 %0, i32 %min.0
+ br label %do.cond
+
+do.cond:
+ %max.1 = phi i32 [ %0, %do.body ], [ %max.0, %if.else ]
+ %min.1 = phi i32 [ %min.0, %do.body ], [ %.min.0, %if.else ]
+; CHECK: cbnz
+ %dec = add i32 %n.addr.0, -1
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %do.end, label %do.body
+
+do.end:
+ %sub = sub nsw i32 %max.1, %min.1
+ ret i32 %sub
+}
+
+; CHECK-LABEL: fold_inc_true_32:
+; CHECK: {{subs.*wzr,|cmp}} w2, #1
+; CHECK-NEXT: csinc w0, w1, w0, eq
+; CHECK-NEXT: ret
+define i32 @fold_inc_true_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i32 %c, 1
+ %inc = add nsw i32 %x, 1
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %y, %eq_bb ], [ %inc, %entry ]
+ ret i32 %cond
+}
+
+; CHECK-LABEL: fold_inc_true_64:
+; CHECK: {{subs.*xzr,|cmp}} x2, #1
+; CHECK-NEXT: csinc x0, x1, x0, eq
+; CHECK-NEXT: ret
+define i64 @fold_inc_true_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i64 %c, 1
+ %inc = add nsw i64 %x, 1
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %y, %eq_bb ], [ %inc, %entry ]
+ ret i64 %cond
+}
+
+; CHECK-LABEL: fold_inc_false_32:
+; CHECK: {{subs.*wzr,|cmp}} w2, #1
+; CHECK-NEXT: csinc w0, w1, w0, ne
+; CHECK-NEXT: ret
+define i32 @fold_inc_false_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i32 %c, 1
+ %inc = add nsw i32 %x, 1
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %inc, %eq_bb ], [ %y, %entry ]
+ ret i32 %cond
+}
+
+; CHECK-LABEL: fold_inc_false_64:
+; CHECK: {{subs.*xzr,|cmp}} x2, #1
+; CHECK-NEXT: csinc x0, x1, x0, ne
+; CHECK-NEXT: ret
+define i64 @fold_inc_false_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i64 %c, 1
+ %inc = add nsw i64 %x, 1
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %inc, %eq_bb ], [ %y, %entry ]
+ ret i64 %cond
+}
+
+; CHECK-LABEL: fold_inv_true_32:
+; CHECK: {{subs.*wzr,|cmp}} w2, #1
+; CHECK-NEXT: csinv w0, w1, w0, eq
+; CHECK-NEXT: ret
+define i32 @fold_inv_true_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i32 %c, 1
+ %inv = xor i32 %x, -1
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %y, %eq_bb ], [ %inv, %entry ]
+ ret i32 %cond
+}
+
+; CHECK-LABEL: fold_inv_true_64:
+; CHECK: {{subs.*xzr,|cmp}} x2, #1
+; CHECK-NEXT: csinv x0, x1, x0, eq
+; CHECK-NEXT: ret
+define i64 @fold_inv_true_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i64 %c, 1
+ %inv = xor i64 %x, -1
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %y, %eq_bb ], [ %inv, %entry ]
+ ret i64 %cond
+}
+
+; CHECK-LABEL: fold_inv_false_32:
+; CHECK: {{subs.*wzr,|cmp}} w2, #1
+; CHECK-NEXT: csinv w0, w1, w0, ne
+; CHECK-NEXT: ret
+define i32 @fold_inv_false_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i32 %c, 1
+ %inv = xor i32 %x, -1
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %inv, %eq_bb ], [ %y, %entry ]
+ ret i32 %cond
+}
+
+; CHECK-LABEL: fold_inv_false_64:
+; CHECK: {{subs.*xzr,|cmp}} x2, #1
+; CHECK-NEXT: csinv x0, x1, x0, ne
+; CHECK-NEXT: ret
+define i64 @fold_inv_false_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i64 %c, 1
+ %inv = xor i64 %x, -1
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %inv, %eq_bb ], [ %y, %entry ]
+ ret i64 %cond
+}
+
+; CHECK-LABEL: fold_neg_true_32:
+; CHECK: {{subs.*wzr,|cmp}} w2, #1
+; CHECK-NEXT: csneg w0, w1, w0, eq
+; CHECK-NEXT: ret
+define i32 @fold_neg_true_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i32 %c, 1
+ %neg = sub nsw i32 0, %x
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %y, %eq_bb ], [ %neg, %entry ]
+ ret i32 %cond
+}
+
+; CHECK-LABEL: fold_neg_true_64:
+; CHECK: {{subs.*xzr,|cmp}} x2, #1
+; CHECK-NEXT: csneg x0, x1, x0, eq
+; CHECK-NEXT: ret
+define i64 @fold_neg_true_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i64 %c, 1
+ %neg = sub nsw i64 0, %x
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %y, %eq_bb ], [ %neg, %entry ]
+ ret i64 %cond
+}
+
+; CHECK-LABEL: fold_neg_false_32:
+; CHECK: {{subs.*wzr,|cmp}} w2, #1
+; CHECK-NEXT: csneg w0, w1, w0, ne
+; CHECK-NEXT: ret
+define i32 @fold_neg_false_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i32 %c, 1
+ %neg = sub nsw i32 0, %x
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %neg, %eq_bb ], [ %y, %entry ]
+ ret i32 %cond
+}
+
+; CHECK-LABEL: fold_neg_false_64:
+; CHECK: {{subs.*xzr,|cmp}} x2, #1
+; CHECK-NEXT: csneg x0, x1, x0, ne
+; CHECK-NEXT: ret
+define i64 @fold_neg_false_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i64 %c, 1
+ %neg = sub nsw i64 0, %x
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %neg, %eq_bb ], [ %y, %entry ]
+ ret i64 %cond
+}
+
+; CHECK: cbnz_32
+; CHECK: {{subs.*wzr,|cmp}} w2, #0
+; CHECK-NEXT: csel w0, w1, w0, ne
+; CHECK-NEXT: ret
+define i32 @cbnz_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i32 %c, 0
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %x, %eq_bb ], [ %y, %entry ]
+ ret i32 %cond
+}
+
+; CHECK: cbnz_64
+; CHECK: {{subs.*xzr,|cmp}} x2, #0
+; CHECK-NEXT: csel x0, x1, x0, ne
+; CHECK-NEXT: ret
+define i64 @cbnz_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %tobool = icmp eq i64 %c, 0
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %x, %eq_bb ], [ %y, %entry ]
+ ret i64 %cond
+}
+
+; CHECK: cbz_32
+; CHECK: {{subs.*wzr,|cmp}} w2, #0
+; CHECK-NEXT: csel w0, w1, w0, eq
+; CHECK-NEXT: ret
+define i32 @cbz_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %tobool = icmp ne i32 %c, 0
+ br i1 %tobool, label %ne_bb, label %done
+
+ne_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %x, %ne_bb ], [ %y, %entry ]
+ ret i32 %cond
+}
+
+; CHECK: cbz_64
+; CHECK: {{subs.*xzr,|cmp}} x2, #0
+; CHECK-NEXT: csel x0, x1, x0, eq
+; CHECK-NEXT: ret
+define i64 @cbz_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %tobool = icmp ne i64 %c, 0
+ br i1 %tobool, label %ne_bb, label %done
+
+ne_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %x, %ne_bb ], [ %y, %entry ]
+ ret i64 %cond
+}
+
+; CHECK: tbnz_32
+; CHECK: {{ands.*xzr,|tst}} w2, #0x80
+; CHECK-NEXT: csel w0, w1, w0, ne
+; CHECK-NEXT: ret
+define i32 @tbnz_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %mask = and i32 %c, 128
+ %tobool = icmp eq i32 %mask, 0
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %x, %eq_bb ], [ %y, %entry ]
+ ret i32 %cond
+}
+
+; CHECK: tbnz_64
+; CHECK: {{ands.*xzr,|tst}} x2, #0x8000000000000000
+; CHECK-NEXT: csel x0, x1, x0, ne
+; CHECK-NEXT: ret
+define i64 @tbnz_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %mask = and i64 %c, 9223372036854775808
+ %tobool = icmp eq i64 %mask, 0
+ br i1 %tobool, label %eq_bb, label %done
+
+eq_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %x, %eq_bb ], [ %y, %entry ]
+ ret i64 %cond
+}
+
+; CHECK: tbz_32
+; CHECK: {{ands.*xzr,|tst}} w2, #0x80
+; CHECK-NEXT: csel w0, w1, w0, eq
+; CHECK-NEXT: ret
+define i32 @tbz_32(i32 %x, i32 %y, i32 %c) nounwind ssp {
+entry:
+ %mask = and i32 %c, 128
+ %tobool = icmp ne i32 %mask, 0
+ br i1 %tobool, label %ne_bb, label %done
+
+ne_bb:
+ br label %done
+
+done:
+ %cond = phi i32 [ %x, %ne_bb ], [ %y, %entry ]
+ ret i32 %cond
+}
+
+; CHECK: tbz_64
+; CHECK: {{ands.*xzr,|tst}} x2, #0x8000000000000000
+; CHECK-NEXT: csel x0, x1, x0, eq
+; CHECK-NEXT: ret
+define i64 @tbz_64(i64 %x, i64 %y, i64 %c) nounwind ssp {
+entry:
+ %mask = and i64 %c, 9223372036854775808
+ %tobool = icmp ne i64 %mask, 0
+ br i1 %tobool, label %ne_bb, label %done
+
+ne_bb:
+ br label %done
+
+done:
+ %cond = phi i64 [ %x, %ne_bb ], [ %y, %entry ]
+ ret i64 %cond
+}
+
+; This function from 175.vpr folds an ADDWri into a CSINC.
+; Remember to clear the kill flag on the ADDWri.
+define i32 @get_ytrack_to_xtracks() nounwind ssp {
+entry:
+ br label %for.body
+
+for.body:
+ %x0 = load i32* undef, align 4
+ br i1 undef, label %if.then.i146, label %is_sbox.exit155
+
+if.then.i146:
+ %add8.i143 = add nsw i32 0, %x0
+ %rem.i144 = srem i32 %add8.i143, %x0
+ %add9.i145 = add i32 %rem.i144, 1
+ br label %is_sbox.exit155
+
+is_sbox.exit155: ; preds = %if.then.i146, %for.body
+ %seg_offset.0.i151 = phi i32 [ %add9.i145, %if.then.i146 ], [ undef, %for.body ]
+ %idxprom15.i152 = sext i32 %seg_offset.0.i151 to i64
+ %arrayidx18.i154 = getelementptr inbounds i32* null, i64 %idxprom15.i152
+ %x1 = load i32* %arrayidx18.i154, align 4
+ br i1 undef, label %for.body51, label %for.body
+
+for.body51: ; preds = %is_sbox.exit155
+ call fastcc void @get_switch_type(i32 %x1, i32 undef, i16 signext undef, i16 signext undef, i16* undef)
+ unreachable
+}
+declare fastcc void @get_switch_type(i32, i32, i16 signext, i16 signext, i16* nocapture) nounwind ssp
diff --git a/test/CodeGen/AArch64/arm64-elf-calls.ll b/test/CodeGen/AArch64/arm64-elf-calls.ll
new file mode 100644
index 000000000000..8c4020327b91
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-elf-calls.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -filetype=obj -o - %s | llvm-objdump -triple=arm64-linux-gnu - -r | FileCheck %s --check-prefix=CHECK-OBJ
+
+declare void @callee()
+
+define void @caller() {
+ call void @callee()
+ ret void
+; CHECK-LABEL: caller:
+; CHECK: bl callee
+; CHECK-OBJ: R_AARCH64_CALL26 callee
+}
+
+define void @tail_caller() {
+ tail call void @callee()
+ ret void
+; CHECK-LABEL: tail_caller:
+; CHECK: b callee
+; CHECK-OBJ: R_AARCH64_JUMP26 callee
+}
diff --git a/test/CodeGen/AArch64/arm64-elf-constpool.ll b/test/CodeGen/AArch64/arm64-elf-constpool.ll
new file mode 100644
index 000000000000..95d334376b76
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-elf-constpool.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -O0 -o - %s | FileCheck %s
+
+; O0 checked for fastisel purposes. It has a separate path which
+; creates a constpool entry for floating values.
+
+define double @needs_const() {
+ ret double 3.14159
+; CHECK: .LCPI0_0:
+
+; CHECK: adrp {{x[0-9]+}}, .LCPI0_0
+; CHECK: ldr d0, [{{x[0-9]+}}, :lo12:.LCPI0_0]
+}
diff --git a/test/CodeGen/AArch64/arm64-elf-globals.ll b/test/CodeGen/AArch64/arm64-elf-globals.ll
new file mode 100644
index 000000000000..4ed44e7c17af
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-elf-globals.ll
@@ -0,0 +1,115 @@
+; RUN: llc -mtriple=arm64-linux-gnu -o - %s -mcpu=cyclone | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -o - %s -O0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST
+; RUN: llc -mtriple=arm64-linux-gnu -relocation-model=pic -o - %s -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-PIC
+; RUN: llc -mtriple=arm64-linux-gnu -O0 -relocation-model=pic -o - %s -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST-PIC
+
+@var8 = external global i8, align 1
+@var16 = external global i16, align 2
+@var32 = external global i32, align 4
+@var64 = external global i64, align 8
+
+define i8 @test_i8(i8 %new) {
+ %val = load i8* @var8, align 1
+ store i8 %new, i8* @var8
+ ret i8 %val
+; CHECK-LABEL: test_i8:
+; CHECK: adrp x[[HIREG:[0-9]+]], var8
+; CHECK: ldrb {{w[0-9]+}}, [x[[HIREG]], :lo12:var8]
+; CHECK: strb {{w[0-9]+}}, [x[[HIREG]], :lo12:var8]
+
+; CHECK-PIC-LABEL: test_i8:
+; CHECK-PIC: adrp x[[HIREG:[0-9]+]], :got:var8
+; CHECK-PIC: ldr x[[VAR_ADDR:[0-9]+]], [x[[HIREG]], :got_lo12:var8]
+; CHECK-PIC: ldrb {{w[0-9]+}}, [x[[VAR_ADDR]]]
+
+; CHECK-FAST: adrp x[[HIREG:[0-9]+]], var8
+; CHECK-FAST: ldrb {{w[0-9]+}}, [x[[HIREG]], :lo12:var8]
+
+; CHECK-FAST-PIC: adrp x[[HIREG:[0-9]+]], :got:var8
+; CHECK-FAST-PIC: ldr x[[VARADDR:[0-9]+]], [x[[HIREG]], :got_lo12:var8]
+; CHECK-FAST-PIC: ldr {{w[0-9]+}}, [x[[VARADDR]]]
+}
+
+define i16 @test_i16(i16 %new) {
+ %val = load i16* @var16, align 2
+ store i16 %new, i16* @var16
+ ret i16 %val
+; CHECK-LABEL: test_i16:
+; CHECK: adrp x[[HIREG:[0-9]+]], var16
+; CHECK: ldrh {{w[0-9]+}}, [x[[HIREG]], :lo12:var16]
+; CHECK: strh {{w[0-9]+}}, [x[[HIREG]], :lo12:var16]
+
+; CHECK-FAST: adrp x[[HIREG:[0-9]+]], var16
+; CHECK-FAST: ldrh {{w[0-9]+}}, [x[[HIREG]], :lo12:var16]
+}
+
+define i32 @test_i32(i32 %new) {
+ %val = load i32* @var32, align 4
+ store i32 %new, i32* @var32
+ ret i32 %val
+; CHECK-LABEL: test_i32:
+; CHECK: adrp x[[HIREG:[0-9]+]], var32
+; CHECK: ldr {{w[0-9]+}}, [x[[HIREG]], :lo12:var32]
+; CHECK: str {{w[0-9]+}}, [x[[HIREG]], :lo12:var32]
+
+; CHECK-FAST: adrp x[[HIREG:[0-9]+]], var32
+; CHECK-FAST: add {{x[0-9]+}}, x[[HIREG]], :lo12:var32
+}
+
+define i64 @test_i64(i64 %new) {
+ %val = load i64* @var64, align 8
+ store i64 %new, i64* @var64
+ ret i64 %val
+; CHECK-LABEL: test_i64:
+; CHECK: adrp x[[HIREG:[0-9]+]], var64
+; CHECK: ldr {{x[0-9]+}}, [x[[HIREG]], :lo12:var64]
+; CHECK: str {{x[0-9]+}}, [x[[HIREG]], :lo12:var64]
+
+; CHECK-FAST: adrp x[[HIREG:[0-9]+]], var64
+; CHECK-FAST: add {{x[0-9]+}}, x[[HIREG]], :lo12:var64
+}
+
+define i64* @test_addr() {
+ ret i64* @var64
+; CHECK-LABEL: test_addr:
+; CHECK: adrp [[HIREG:x[0-9]+]], var64
+; CHECK: add x0, [[HIREG]], :lo12:var64
+
+; CHECK-FAST: adrp [[HIREG:x[0-9]+]], var64
+; CHECK-FAST: add x0, [[HIREG]], :lo12:var64
+}
+
+@hiddenvar = hidden global i32 0, align 4
+@protectedvar = protected global i32 0, align 4
+
+define i32 @test_vis() {
+ %lhs = load i32* @hiddenvar, align 4
+ %rhs = load i32* @protectedvar, align 4
+ %ret = add i32 %lhs, %rhs
+ ret i32 %ret
+; CHECK-PIC: adrp {{x[0-9]+}}, hiddenvar
+; CHECK-PIC: ldr {{w[0-9]+}}, [{{x[0-9]+}}, :lo12:hiddenvar]
+; CHECK-PIC: adrp {{x[0-9]+}}, protectedvar
+; CHECK-PIC: ldr {{w[0-9]+}}, [{{x[0-9]+}}, :lo12:protectedvar]
+}
+
+@var_default = external global [2 x i32]
+
+define i32 @test_default_align() {
+ %addr = getelementptr [2 x i32]* @var_default, i32 0, i32 0
+ %val = load i32* %addr
+ ret i32 %val
+; CHECK-LABEL: test_default_align:
+; CHECK: adrp x[[HIREG:[0-9]+]], var_default
+; CHECK: ldr w0, [x[[HIREG]], :lo12:var_default]
+}
+
+define i64 @test_default_unaligned() {
+ %addr = bitcast [2 x i32]* @var_default to i64*
+ %val = load i64* %addr
+ ret i64 %val
+; CHECK-LABEL: test_default_unaligned:
+; CHECK: adrp [[HIREG:x[0-9]+]], var_default
+; CHECK: add x[[ADDR:[0-9]+]], [[HIREG]], :lo12:var_default
+; CHECK: ldr x0, [x[[ADDR]]]
+}
diff --git a/test/CodeGen/AArch64/arm64-ext.ll b/test/CodeGen/AArch64/arm64-ext.ll
new file mode 100644
index 000000000000..67860de51b0f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-ext.ll
@@ -0,0 +1,118 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: test_vextd:
+;CHECK: {{ext.8b.*#3}}
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i8> %tmp3
+}
+
+define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: test_vextRd:
+;CHECK: {{ext.8b.*#5}}
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: test_vextq:
+;CHECK: {{ext.16b.*3}}
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
+ ret <16 x i8> %tmp3
+}
+
+define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: test_vextRq:
+;CHECK: {{ext.16b.*7}}
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: test_vextd16:
+;CHECK: {{ext.8b.*#6}}
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i16> %tmp3
+}
+
+define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: test_vextq32:
+;CHECK: {{ext.16b.*12}}
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i32> %tmp3
+}
+
+; Undef shuffle indices should not prevent matching to VEXT:
+
+define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: test_vextd_undef:
+;CHECK: {{ext.8b.*}}
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i8> %tmp3
+}
+
+define <8 x i8> @test_vextd_undef2(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: test_vextd_undef2:
+;CHECK: {{ext.8b.*#6}}
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 3, i32 4, i32 5>
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: test_vextRq_undef:
+;CHECK: {{ext.16b.*#7}}
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 undef, i32 undef, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 undef, i32 6>
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @test_vextRq_undef2(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: test_vextRq_undef2:
+;CHECK: {{ext.16b.*#10}}
+ %tmp1 = load <8 x i16>* %A
+ %vext = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 4>
+ ret <8 x i16> %vext;
+}
+
+; Tests for ReconstructShuffle function. Indices have to be carefully
+; chosen to reach lowering phase as a BUILD_VECTOR.
+
+; One vector needs vext, the other can be handled by extract_subvector
+; Also checks interleaving of sources is handled correctly.
+; Essence: a vext is used on %A and something saner than stack load/store for final result.
+define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: test_interleaved:
+;CHECK: ext.8b
+;CHECK: zip1.4h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 3, i32 8, i32 5, i32 9>
+ ret <4 x i16> %tmp3
+}
+
+; An undef in the shuffle list should still be optimizable
+define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: test_undef:
+;CHECK: zip1.4h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 undef, i32 8, i32 5, i32 9>
+ ret <4 x i16> %tmp3
+}
diff --git a/test/CodeGen/AArch64/arm64-extend-int-to-fp.ll b/test/CodeGen/AArch64/arm64-extend-int-to-fp.ll
new file mode 100644
index 000000000000..048fdb083a41
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-extend-int-to-fp.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <4 x float> @foo(<4 x i16> %a) nounwind {
+; CHECK-LABEL: foo:
+; CHECK: ushll.4s v0, v0, #0
+; CHECK-NEXT: ucvtf.4s v0, v0
+; CHECK-NEXT: ret
+ %vcvt.i = uitofp <4 x i16> %a to <4 x float>
+ ret <4 x float> %vcvt.i
+}
+
+define <4 x float> @bar(<4 x i16> %a) nounwind {
+; CHECK-LABEL: bar:
+; CHECK: sshll.4s v0, v0, #0
+; CHECK-NEXT: scvtf.4s v0, v0
+; CHECK-NEXT: ret
+ %vcvt.i = sitofp <4 x i16> %a to <4 x float>
+ ret <4 x float> %vcvt.i
+}
diff --git a/test/CodeGen/AArch64/arm64-extend.ll b/test/CodeGen/AArch64/arm64-extend.ll
new file mode 100644
index 000000000000..afcaca2c4920
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-extend.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s
+@array = external global [0 x i32]
+
+define i64 @foo(i32 %i) {
+; CHECK: foo
+; CHECK: adrp x[[REG:[0-9]+]], _array@GOTPAGE
+; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _array@GOTPAGEOFF]
+; CHECK: ldrsw x0, [x[[REG1]], w0, sxtw #2]
+; CHECK: ret
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds [0 x i32]* @array, i64 0, i64 %idxprom
+ %tmp1 = load i32* %arrayidx, align 4
+ %conv = sext i32 %tmp1 to i64
+ ret i64 %conv
+}
diff --git a/test/CodeGen/AArch64/arm64-extern-weak.ll b/test/CodeGen/AArch64/arm64-extern-weak.ll
new file mode 100644
index 000000000000..a239403befa5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-extern-weak.ll
@@ -0,0 +1,51 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -o - < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -code-model=large -o - < %s | FileCheck --check-prefix=CHECK-LARGE %s
+
+declare extern_weak i32 @var()
+
+define i32()* @foo() {
+; The usual ADRP/ADD pair can't be used for a weak reference because it must
+; evaluate to 0 if the symbol is undefined. We use a litpool entry.
+ ret i32()* @var
+
+; CHECK: adrp x[[VAR:[0-9]+]], :got:var
+; CHECK: ldr x0, [x[[VAR]], :got_lo12:var]
+
+ ; In the large model, the usual relocations are absolute and can
+ ; materialise 0.
+; CHECK-LARGE: movz x0, #:abs_g3:var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:var
+; CHECK-LARGE: movk x0, #:abs_g1_nc:var
+; CHECK-LARGE: movk x0, #:abs_g0_nc:var
+}
+
+
+@arr_var = extern_weak global [10 x i32]
+
+define i32* @bar() {
+ %addr = getelementptr [10 x i32]* @arr_var, i32 0, i32 5
+; CHECK: adrp x[[ARR_VAR_HI:[0-9]+]], :got:arr_var
+; CHECK: ldr [[ARR_VAR:x[0-9]+]], [x[[ARR_VAR_HI]], :got_lo12:arr_var]
+; CHECK: add x0, [[ARR_VAR]], #20
+ ret i32* %addr
+
+ ; In the large model, the usual relocations are absolute and can
+ ; materialise 0.
+; CHECK-LARGE: movz [[ARR_VAR:x[0-9]+]], #:abs_g3:arr_var
+; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g2_nc:arr_var
+; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g1_nc:arr_var
+; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g0_nc:arr_var
+}
+
+@defined_weak_var = internal unnamed_addr global i32 0
+
+define i32* @wibble() {
+ ret i32* @defined_weak_var
+; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
+; CHECK: add x0, [[BASE]], :lo12:defined_weak_var
+
+; CHECK-LARGE: movz x0, #:abs_g3:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g0_nc:defined_weak_var
+}
diff --git a/test/CodeGen/AArch64/arm64-extload-knownzero.ll b/test/CodeGen/AArch64/arm64-extload-knownzero.ll
new file mode 100644
index 000000000000..14e5fd310d7b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-extload-knownzero.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+; rdar://12771555
+
+define void @foo(i16* %ptr, i32 %a) nounwind {
+entry:
+; CHECK-LABEL: foo:
+ %tmp1 = icmp ult i32 %a, 100
+ br i1 %tmp1, label %bb1, label %bb2
+bb1:
+; CHECK: %bb1
+; CHECK: ldrh [[REG:w[0-9]+]]
+ %tmp2 = load i16* %ptr, align 2
+ br label %bb2
+bb2:
+; CHECK: %bb2
+; CHECK-NOT: and {{w[0-9]+}}, [[REG]], #0xffff
+; CHECK: cmp [[REG]], #23
+ %tmp3 = phi i16 [ 0, %entry ], [ %tmp2, %bb1 ]
+ %cmp = icmp ult i16 %tmp3, 24
+ br i1 %cmp, label %bb3, label %exit
+bb3:
+ call void @bar() nounwind
+ br label %exit
+exit:
+ ret void
+}
+
+declare void @bar ()
diff --git a/test/CodeGen/AArch64/arm64-extract.ll b/test/CodeGen/AArch64/arm64-extract.ll
new file mode 100644
index 000000000000..01984662d23a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-extract.ll
@@ -0,0 +1,58 @@
+; RUN: llc -aarch64-extr-generation=true -verify-machineinstrs < %s \
+; RUN: -march=arm64 | FileCheck %s
+
+define i64 @ror_i64(i64 %in) {
+; CHECK-LABEL: ror_i64:
+ %left = shl i64 %in, 19
+ %right = lshr i64 %in, 45
+ %val5 = or i64 %left, %right
+; CHECK: ror {{x[0-9]+}}, x0, #45
+ ret i64 %val5
+}
+
+define i32 @ror_i32(i32 %in) {
+; CHECK-LABEL: ror_i32:
+ %left = shl i32 %in, 9
+ %right = lshr i32 %in, 23
+ %val5 = or i32 %left, %right
+; CHECK: ror {{w[0-9]+}}, w0, #23
+ ret i32 %val5
+}
+
+define i32 @extr_i32(i32 %lhs, i32 %rhs) {
+; CHECK-LABEL: extr_i32:
+ %left = shl i32 %lhs, 6
+ %right = lshr i32 %rhs, 26
+ %val = or i32 %left, %right
+ ; Order of lhs and rhs matters here. Regalloc would have to be very odd to use
+ ; something other than w0 and w1.
+; CHECK: extr {{w[0-9]+}}, w0, w1, #26
+
+ ret i32 %val
+}
+
+define i64 @extr_i64(i64 %lhs, i64 %rhs) {
+; CHECK-LABEL: extr_i64:
+ %right = lshr i64 %rhs, 40
+ %left = shl i64 %lhs, 24
+ %val = or i64 %right, %left
+ ; Order of lhs and rhs matters here. Regalloc would have to be very odd to use
+ ; something other than w0 and w1.
+; CHECK: extr {{x[0-9]+}}, x0, x1, #40
+
+ ret i64 %val
+}
+
+; Regression test: a bad experimental pattern crept into git which optimised
+; this pattern to a single EXTR.
+define i32 @extr_regress(i32 %a, i32 %b) {
+; CHECK-LABEL: extr_regress:
+
+ %sh1 = shl i32 %a, 14
+ %sh2 = lshr i32 %b, 14
+ %val = or i32 %sh2, %sh1
+; CHECK-NOT: extr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, #{{[0-9]+}}
+
+ ret i32 %val
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/arm64-extract_subvector.ll b/test/CodeGen/AArch64/arm64-extract_subvector.ll
new file mode 100644
index 000000000000..8b15a6453b2b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-extract_subvector.ll
@@ -0,0 +1,51 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+; Extract of an upper half of a vector is an "ext.16b v0, v0, v0, #8" insn.
+
+define <8 x i8> @v8i8(<16 x i8> %a) nounwind {
+; CHECK: v8i8
+; CHECK: ext.16b v0, v0, v0, #8
+; CHECK: ret
+ %ret = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %ret
+}
+
+define <4 x i16> @v4i16(<8 x i16> %a) nounwind {
+; CHECK-LABEL: v4i16:
+; CHECK: ext.16b v0, v0, v0, #8
+; CHECK: ret
+ %ret = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %ret
+}
+
+define <2 x i32> @v2i32(<4 x i32> %a) nounwind {
+; CHECK-LABEL: v2i32:
+; CHECK: ext.16b v0, v0, v0, #8
+; CHECK: ret
+ %ret = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> <i32 2, i32 3>
+ ret <2 x i32> %ret
+}
+
+define <1 x i64> @v1i64(<2 x i64> %a) nounwind {
+; CHECK-LABEL: v1i64:
+; CHECK: ext.16b v0, v0, v0, #8
+; CHECK: ret
+ %ret = shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> <i32 1>
+ ret <1 x i64> %ret
+}
+
+define <2 x float> @v2f32(<4 x float> %a) nounwind {
+; CHECK-LABEL: v2f32:
+; CHECK: ext.16b v0, v0, v0, #8
+; CHECK: ret
+ %ret = shufflevector <4 x float> %a, <4 x float> %a, <2 x i32> <i32 2, i32 3>
+ ret <2 x float> %ret
+}
+
+define <1 x double> @v1f64(<2 x double> %a) nounwind {
+; CHECK-LABEL: v1f64:
+; CHECK: ext.16b v0, v0, v0, #8
+; CHECK: ret
+ %ret = shufflevector <2 x double> %a, <2 x double> %a, <1 x i32> <i32 1>
+ ret <1 x double> %ret
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll b/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
new file mode 100644
index 000000000000..ebd847e0f728
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
@@ -0,0 +1,47 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+@sortlist = common global [5001 x i32] zeroinitializer, align 16
+@sortlist2 = common global [5001 x i64] zeroinitializer, align 16
+
+; Load an address with an offset larget then LDR imm can handle
+define i32 @foo() nounwind {
+entry:
+; CHECK: @foo
+; CHECK: adrp x[[REG:[0-9]+]], _sortlist@GOTPAGE
+; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _sortlist@GOTPAGEOFF]
+; CHECK: movz x[[REG2:[0-9]+]], #0x4e20
+; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]]
+; CHECK: ldr w0, [x[[REG3]]]
+; CHECK: ret
+ %0 = load i32* getelementptr inbounds ([5001 x i32]* @sortlist, i32 0, i64 5000), align 4
+ ret i32 %0
+}
+
+define i64 @foo2() nounwind {
+entry:
+; CHECK: @foo2
+; CHECK: adrp x[[REG:[0-9]+]], _sortlist2@GOTPAGE
+; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _sortlist2@GOTPAGEOFF]
+; CHECK: movz x[[REG2:[0-9]+]], #0x9c40
+; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]]
+; CHECK: ldr x0, [x[[REG3]]]
+; CHECK: ret
+ %0 = load i64* getelementptr inbounds ([5001 x i64]* @sortlist2, i32 0, i64 5000), align 4
+ ret i64 %0
+}
+
+; Load an address with a ridiculously large offset.
+; rdar://12505553
+@pd2 = common global i8* null, align 8
+
+define signext i8 @foo3() nounwind ssp {
+entry:
+; CHECK: @foo3
+; CHECK: movz x[[REG:[0-9]+]], #0xb3a, lsl #32
+; CHECK: movk x[[REG]], #0x73ce, lsl #16
+; CHECK: movk x[[REG]], #0x2ff2
+ %0 = load i8** @pd2, align 8
+ %arrayidx = getelementptr inbounds i8* %0, i64 12345678901234
+ %1 = load i8* %arrayidx, align 1
+ ret i8 %1
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll b/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
new file mode 100644
index 000000000000..1706e9eba2bd
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
@@ -0,0 +1,25 @@
+; This test should cause the TargetMaterializeAlloca to be invoked
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+%struct.S1Ty = type { i64 }
+%struct.S2Ty = type { %struct.S1Ty, %struct.S1Ty }
+
+define void @takeS1(%struct.S1Ty* %V) nounwind {
+entry:
+ %V.addr = alloca %struct.S1Ty*, align 8
+ store %struct.S1Ty* %V, %struct.S1Ty** %V.addr, align 8
+ ret void
+}
+
+define void @main() nounwind {
+entry:
+; CHECK: main
+; CHECK: mov x29, sp
+; CHECK: mov x[[REG:[0-9]+]], sp
+; CHECK-NEXT: orr x[[REG1:[0-9]+]], xzr, #0x8
+; CHECK-NEXT: add x0, x[[REG]], x[[REG1]]
+ %E = alloca %struct.S2Ty, align 4
+ %B = getelementptr inbounds %struct.S2Ty* %E, i32 0, i32 1
+ call void @takeS1(%struct.S1Ty* %B)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-br.ll b/test/CodeGen/AArch64/arm64-fast-isel-br.ll
new file mode 100644
index 000000000000..37a8295c8931
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-br.ll
@@ -0,0 +1,155 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin -mcpu=cyclone | FileCheck %s
+
+define void @branch1() nounwind uwtable ssp {
+ %x = alloca i32, align 4
+ store i32 0, i32* %x, align 4
+ %1 = load i32* %x, align 4
+ %2 = icmp ne i32 %1, 0
+ br i1 %2, label %3, label %4
+
+; <label>:3 ; preds = %0
+ br label %4
+
+; <label>:4 ; preds = %3, %0
+ ret void
+}
+
+define void @branch2() nounwind uwtable ssp {
+ %1 = alloca i32, align 4
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ store i32 0, i32* %1
+ store i32 1, i32* %y, align 4
+ store i32 1, i32* %x, align 4
+ store i32 0, i32* %z, align 4
+ %2 = load i32* %x, align 4
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+; <label>:4 ; preds = %0
+ store i32 0, i32* %1
+ br label %14
+
+; <label>:5 ; preds = %0
+ %6 = load i32* %y, align 4
+ %7 = icmp ne i32 %6, 0
+ br i1 %7, label %8, label %13
+
+; <label>:8 ; preds = %5
+ %9 = load i32* %z, align 4
+ %10 = icmp ne i32 %9, 0
+ br i1 %10, label %11, label %12
+
+; <label>:11 ; preds = %8
+ store i32 1, i32* %1
+ br label %14
+
+; <label>:12 ; preds = %8
+ store i32 0, i32* %1
+ br label %14
+
+; <label>:13 ; preds = %5
+ br label %14
+
+; <label>:14 ; preds = %4, %11, %12, %13
+ %15 = load i32* %1
+ ret void
+}
+
+define void @true_() nounwind uwtable ssp {
+; CHECK: @true_
+; CHECK: b LBB2_1
+ br i1 true, label %1, label %2
+
+; <label>:1
+; CHECK: LBB2_1
+ br label %2
+
+; <label>:2
+ ret void
+}
+
+define void @false_() nounwind uwtable ssp {
+; CHECK: @false_
+; CHECK: b LBB3_2
+ br i1 false, label %1, label %2
+
+; <label>:1
+ br label %2
+
+; <label>:2
+; CHECK: LBB3_2
+ ret void
+}
+
+define zeroext i8 @trunc_(i8 zeroext %a, i16 zeroext %b, i32 %c, i64 %d) {
+entry:
+ %a.addr = alloca i8, align 1
+ %b.addr = alloca i16, align 2
+ %c.addr = alloca i32, align 4
+ %d.addr = alloca i64, align 8
+ store i8 %a, i8* %a.addr, align 1
+ store i16 %b, i16* %b.addr, align 2
+ store i32 %c, i32* %c.addr, align 4
+ store i64 %d, i64* %d.addr, align 8
+ %0 = load i16* %b.addr, align 2
+; CHECK: and w0, w0, #0x1
+; CHECK: subs w0, w0, #0
+; CHECK: b.eq LBB4_2
+ %conv = trunc i16 %0 to i1
+ br i1 %conv, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @foo1()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %1 = load i32* %c.addr, align 4
+; CHECK: and w[[REG:[0-9]+]], w{{[0-9]+}}, #0x1
+; CHECK: subs w{{[0-9]+}}, w[[REG]], #0
+; CHECK: b.eq LBB4_4
+ %conv1 = trunc i32 %1 to i1
+ br i1 %conv1, label %if.then3, label %if.end4
+
+if.then3: ; preds = %if.end
+ call void @foo1()
+ br label %if.end4
+
+if.end4: ; preds = %if.then3, %if.end
+ %2 = load i64* %d.addr, align 8
+; CHECK: subs w{{[0-9]+}}, w{{[0-9]+}}, #0
+; CHECK: b.eq LBB4_6
+ %conv5 = trunc i64 %2 to i1
+ br i1 %conv5, label %if.then7, label %if.end8
+
+if.then7: ; preds = %if.end4
+ call void @foo1()
+ br label %if.end8
+
+if.end8: ; preds = %if.then7, %if.end4
+ %3 = load i8* %a.addr, align 1
+ ret i8 %3
+}
+
+declare void @foo1()
+
+; rdar://15174028
+define i32 @trunc64(i64 %foo) nounwind {
+; CHECK: trunc64
+; CHECK: orr [[REG:x[0-9]+]], xzr, #0x1
+; CHECK: and [[REG2:x[0-9]+]], x0, [[REG]]
+; CHECK: mov x[[REG3:[0-9]+]], [[REG2]]
+; CHECK: and [[REG4:w[0-9]+]], w[[REG3]], #0x1
+; CHECK: subs {{w[0-9]+}}, [[REG4]], #0
+; CHECK: b.eq LBB5_2
+ %a = and i64 %foo, 1
+ %b = trunc i64 %a to i1
+ br i1 %b, label %if.then, label %if.else
+
+if.then:
+ ret i32 1
+
+if.else:
+ ret i32 0
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-call.ll b/test/CodeGen/AArch64/arm64-fast-isel-call.ll
new file mode 100644
index 000000000000..8d756ae54619
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-call.ll
@@ -0,0 +1,100 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64_be-linux-gnu | FileCheck %s --check-prefix=CHECK-BE
+
+define void @call0() nounwind {
+entry:
+ ret void
+}
+
+define void @foo0() nounwind {
+entry:
+; CHECK: foo0
+; CHECK: bl _call0
+ call void @call0()
+ ret void
+}
+
+define i32 @call1(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ %tmp = load i32* %a.addr, align 4
+ ret i32 %tmp
+}
+
+define i32 @foo1(i32 %a) nounwind {
+entry:
+; CHECK: foo1
+; CHECK: stur w0, [x29, #-4]
+; CHECK-NEXT: ldur w0, [x29, #-4]
+; CHECK-NEXT: bl _call1
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ %tmp = load i32* %a.addr, align 4
+ %call = call i32 @call1(i32 %tmp)
+ ret i32 %call
+}
+
+define i32 @sext_(i8 %a, i16 %b) nounwind {
+entry:
+; CHECK: @sext_
+; CHECK: sxtb w0, w0
+; CHECK: sxth w1, w1
+; CHECK: bl _foo_sext_
+ call void @foo_sext_(i8 signext %a, i16 signext %b)
+ ret i32 0
+}
+
+declare void @foo_sext_(i8 %a, i16 %b)
+
+define i32 @zext_(i8 %a, i16 %b) nounwind {
+entry:
+; CHECK: @zext_
+; CHECK: uxtb w0, w0
+; CHECK: uxth w1, w1
+ call void @foo_zext_(i8 zeroext %a, i16 zeroext %b)
+ ret i32 0
+}
+
+declare void @foo_zext_(i8 %a, i16 %b)
+
+define i32 @t1(i32 %argc, i8** nocapture %argv) {
+entry:
+; CHECK: @t1
+; The last parameter will be passed on stack via i8.
+; CHECK: strb w{{[0-9]+}}, [sp]
+; CHECK-NEXT: bl _bar
+ %call = call i32 @bar(i8 zeroext 0, i8 zeroext -8, i8 zeroext -69, i8 zeroext 28, i8 zeroext 40, i8 zeroext -70, i8 zeroext 28, i8 zeroext 39, i8 zeroext -41)
+ ret i32 0
+}
+
+declare i32 @bar(i8 zeroext, i8 zeroext, i8 zeroext, i8 zeroext, i8 zeroext, i8 zeroext, i8 zeroext, i8 zeroext, i8 zeroext)
+
+; Test materialization of integers. Target-independent selector handles this.
+define i32 @t2() {
+entry:
+; CHECK: @t2
+; CHECK: movz x0, #0
+; CHECK: orr w1, wzr, #0xfffffff8
+; CHECK: orr w[[REG:[0-9]+]], wzr, #0x3ff
+; CHECK: orr w[[REG2:[0-9]+]], wzr, #0x2
+; CHECK: movz w[[REG3:[0-9]+]], #0
+; CHECK: orr w[[REG4:[0-9]+]], wzr, #0x1
+; CHECK: uxth w2, w[[REG]]
+; CHECK: sxtb w3, w[[REG2]]
+; CHECK: and w4, w[[REG3]], #0x1
+; CHECK: and w5, w[[REG4]], #0x1
+; CHECK: bl _func2
+ %call = call i32 @func2(i64 zeroext 0, i32 signext -8, i16 zeroext 1023, i8 signext -254, i1 zeroext 0, i1 zeroext 1)
+ ret i32 0
+}
+
+declare i32 @func2(i64 zeroext, i32 signext, i16 zeroext, i8 signext, i1 zeroext, i1 zeroext)
+
+declare void @callee_b0f(i8 %bp10, i8 %bp11, i8 %bp12, i8 %bp13, i8 %bp14, i8 %bp15, i8 %bp17, i8 %bp18, i8 %bp19)
+define void @caller_b1f() {
+entry:
+ ; CHECK-BE: strb w{{.*}}, [sp, #7]
+ call void @callee_b0f(i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 42)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll b/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
new file mode 100644
index 000000000000..c5417de0ae97
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
@@ -0,0 +1,442 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin -mcpu=cyclone | FileCheck %s
+
+;; Test various conversions.
+define zeroext i32 @trunc_(i8 zeroext %a, i16 zeroext %b, i32 %c, i64 %d) nounwind ssp {
+entry:
+; CHECK: trunc_
+; CHECK: sub sp, sp, #16
+; CHECK: strb w0, [sp, #15]
+; CHECK: strh w1, [sp, #12]
+; CHECK: str w2, [sp, #8]
+; CHECK: str x3, [sp]
+; CHECK: ldr x3, [sp]
+; CHECK: mov x0, x3
+; CHECK: str w0, [sp, #8]
+; CHECK: ldr w0, [sp, #8]
+; CHECK: strh w0, [sp, #12]
+; CHECK: ldrh w0, [sp, #12]
+; CHECK: strb w0, [sp, #15]
+; CHECK: ldrb w0, [sp, #15]
+; CHECK: uxtb w0, w0
+; CHECK: add sp, sp, #16
+; CHECK: ret
+ %a.addr = alloca i8, align 1
+ %b.addr = alloca i16, align 2
+ %c.addr = alloca i32, align 4
+ %d.addr = alloca i64, align 8
+ store i8 %a, i8* %a.addr, align 1
+ store i16 %b, i16* %b.addr, align 2
+ store i32 %c, i32* %c.addr, align 4
+ store i64 %d, i64* %d.addr, align 8
+ %tmp = load i64* %d.addr, align 8
+ %conv = trunc i64 %tmp to i32
+ store i32 %conv, i32* %c.addr, align 4
+ %tmp1 = load i32* %c.addr, align 4
+ %conv2 = trunc i32 %tmp1 to i16
+ store i16 %conv2, i16* %b.addr, align 2
+ %tmp3 = load i16* %b.addr, align 2
+ %conv4 = trunc i16 %tmp3 to i8
+ store i8 %conv4, i8* %a.addr, align 1
+ %tmp5 = load i8* %a.addr, align 1
+ %conv6 = zext i8 %tmp5 to i32
+ ret i32 %conv6
+}
+
+define i64 @zext_(i8 zeroext %a, i16 zeroext %b, i32 %c, i64 %d) nounwind ssp {
+entry:
+; CHECK: zext_
+; CHECK: sub sp, sp, #16
+; CHECK: strb w0, [sp, #15]
+; CHECK: strh w1, [sp, #12]
+; CHECK: str w2, [sp, #8]
+; CHECK: str x3, [sp]
+; CHECK: ldrb w0, [sp, #15]
+; CHECK: uxtb w0, w0
+; CHECK: strh w0, [sp, #12]
+; CHECK: ldrh w0, [sp, #12]
+; CHECK: uxth w0, w0
+; CHECK: str w0, [sp, #8]
+; CHECK: ldr w0, [sp, #8]
+; CHECK: mov x3, x0
+; CHECK: ubfx x3, x3, #0, #32
+; CHECK: str x3, [sp]
+; CHECK: ldr x0, [sp]
+; CHECK: ret
+ %a.addr = alloca i8, align 1
+ %b.addr = alloca i16, align 2
+ %c.addr = alloca i32, align 4
+ %d.addr = alloca i64, align 8
+ store i8 %a, i8* %a.addr, align 1
+ store i16 %b, i16* %b.addr, align 2
+ store i32 %c, i32* %c.addr, align 4
+ store i64 %d, i64* %d.addr, align 8
+ %tmp = load i8* %a.addr, align 1
+ %conv = zext i8 %tmp to i16
+ store i16 %conv, i16* %b.addr, align 2
+ %tmp1 = load i16* %b.addr, align 2
+ %conv2 = zext i16 %tmp1 to i32
+ store i32 %conv2, i32* %c.addr, align 4
+ %tmp3 = load i32* %c.addr, align 4
+ %conv4 = zext i32 %tmp3 to i64
+ store i64 %conv4, i64* %d.addr, align 8
+ %tmp5 = load i64* %d.addr, align 8
+ ret i64 %tmp5
+}
+
+define i32 @zext_i1_i32(i1 zeroext %a) nounwind ssp {
+entry:
+; CHECK: @zext_i1_i32
+; CHECK: and w0, w0, #0x1
+ %conv = zext i1 %a to i32
+ ret i32 %conv;
+}
+
+define i64 @zext_i1_i64(i1 zeroext %a) nounwind ssp {
+entry:
+; CHECK: @zext_i1_i64
+; CHECK: and w0, w0, #0x1
+ %conv = zext i1 %a to i64
+ ret i64 %conv;
+}
+
+define i64 @sext_(i8 signext %a, i16 signext %b, i32 %c, i64 %d) nounwind ssp {
+entry:
+; CHECK: sext_
+; CHECK: sub sp, sp, #16
+; CHECK: strb w0, [sp, #15]
+; CHECK: strh w1, [sp, #12]
+; CHECK: str w2, [sp, #8]
+; CHECK: str x3, [sp]
+; CHECK: ldrb w0, [sp, #15]
+; CHECK: sxtb w0, w0
+; CHECK: strh w0, [sp, #12]
+; CHECK: ldrh w0, [sp, #12]
+; CHECK: sxth w0, w0
+; CHECK: str w0, [sp, #8]
+; CHECK: ldr w0, [sp, #8]
+; CHECK: mov x3, x0
+; CHECK: sxtw x3, w3
+; CHECK: str x3, [sp]
+; CHECK: ldr x0, [sp]
+; CHECK: ret
+ %a.addr = alloca i8, align 1
+ %b.addr = alloca i16, align 2
+ %c.addr = alloca i32, align 4
+ %d.addr = alloca i64, align 8
+ store i8 %a, i8* %a.addr, align 1
+ store i16 %b, i16* %b.addr, align 2
+ store i32 %c, i32* %c.addr, align 4
+ store i64 %d, i64* %d.addr, align 8
+ %tmp = load i8* %a.addr, align 1
+ %conv = sext i8 %tmp to i16
+ store i16 %conv, i16* %b.addr, align 2
+ %tmp1 = load i16* %b.addr, align 2
+ %conv2 = sext i16 %tmp1 to i32
+ store i32 %conv2, i32* %c.addr, align 4
+ %tmp3 = load i32* %c.addr, align 4
+ %conv4 = sext i32 %tmp3 to i64
+ store i64 %conv4, i64* %d.addr, align 8
+ %tmp5 = load i64* %d.addr, align 8
+ ret i64 %tmp5
+}
+
+; Test sext i8 to i64
+
+define zeroext i64 @sext_i8_i64(i8 zeroext %in) {
+; CHECK-LABEL: sext_i8_i64:
+; CHECK: mov x[[TMP:[0-9]+]], x0
+; CHECK: sxtb x0, w[[TMP]]
+ %big = sext i8 %in to i64
+ ret i64 %big
+}
+
+define zeroext i64 @sext_i16_i64(i16 zeroext %in) {
+; CHECK-LABEL: sext_i16_i64:
+; CHECK: mov x[[TMP:[0-9]+]], x0
+; CHECK: sxth x0, w[[TMP]]
+ %big = sext i16 %in to i64
+ ret i64 %big
+}
+
+; Test sext i1 to i32
+define i32 @sext_i1_i32(i1 signext %a) nounwind ssp {
+entry:
+; CHECK: sext_i1_i32
+; CHECK: sbfx w0, w0, #0, #1
+ %conv = sext i1 %a to i32
+ ret i32 %conv
+}
+
+; Test sext i1 to i16
+define signext i16 @sext_i1_i16(i1 %a) nounwind ssp {
+entry:
+; CHECK: sext_i1_i16
+; CHECK: sbfx w0, w0, #0, #1
+ %conv = sext i1 %a to i16
+ ret i16 %conv
+}
+
+; Test sext i1 to i8
+define signext i8 @sext_i1_i8(i1 %a) nounwind ssp {
+entry:
+; CHECK: sext_i1_i8
+; CHECK: sbfx w0, w0, #0, #1
+ %conv = sext i1 %a to i8
+ ret i8 %conv
+}
+
+; Test fpext
+define double @fpext_(float %a) nounwind ssp {
+entry:
+; CHECK: fpext_
+; CHECK: fcvt d0, s0
+ %conv = fpext float %a to double
+ ret double %conv
+}
+
+; Test fptrunc
+define float @fptrunc_(double %a) nounwind ssp {
+entry:
+; CHECK: fptrunc_
+; CHECK: fcvt s0, d0
+ %conv = fptrunc double %a to float
+ ret float %conv
+}
+
+; Test fptosi
+define i32 @fptosi_ws(float %a) nounwind ssp {
+entry:
+; CHECK: fptosi_ws
+; CHECK: fcvtzs w0, s0
+ %conv = fptosi float %a to i32
+ ret i32 %conv
+}
+
+; Test fptosi
+define i32 @fptosi_wd(double %a) nounwind ssp {
+entry:
+; CHECK: fptosi_wd
+; CHECK: fcvtzs w0, d0
+ %conv = fptosi double %a to i32
+ ret i32 %conv
+}
+
+; Test fptoui
+define i32 @fptoui_ws(float %a) nounwind ssp {
+entry:
+; CHECK: fptoui_ws
+; CHECK: fcvtzu w0, s0
+ %conv = fptoui float %a to i32
+ ret i32 %conv
+}
+
+; Test fptoui
+define i32 @fptoui_wd(double %a) nounwind ssp {
+entry:
+; CHECK: fptoui_wd
+; CHECK: fcvtzu w0, d0
+ %conv = fptoui double %a to i32
+ ret i32 %conv
+}
+
+; Test sitofp
+define float @sitofp_sw_i1(i1 %a) nounwind ssp {
+entry:
+; CHECK: sitofp_sw_i1
+; CHECK: sbfx w0, w0, #0, #1
+; CHECK: scvtf s0, w0
+ %conv = sitofp i1 %a to float
+ ret float %conv
+}
+
+; Test sitofp
+define float @sitofp_sw_i8(i8 %a) nounwind ssp {
+entry:
+; CHECK: sitofp_sw_i8
+; CHECK: sxtb w0, w0
+; CHECK: scvtf s0, w0
+ %conv = sitofp i8 %a to float
+ ret float %conv
+}
+
+; Test sitofp
+define float @sitofp_sw_i16(i16 %a) nounwind ssp {
+entry:
+; CHECK: sitofp_sw_i16
+; CHECK: sxth w0, w0
+; CHECK: scvtf s0, w0
+ %conv = sitofp i16 %a to float
+ ret float %conv
+}
+
+; Test sitofp
+define float @sitofp_sw(i32 %a) nounwind ssp {
+entry:
+; CHECK: sitofp_sw
+; CHECK: scvtf s0, w0
+ %conv = sitofp i32 %a to float
+ ret float %conv
+}
+
+; Test sitofp
+define float @sitofp_sx(i64 %a) nounwind ssp {
+entry:
+; CHECK: sitofp_sx
+; CHECK: scvtf s0, x0
+ %conv = sitofp i64 %a to float
+ ret float %conv
+}
+
+; Test sitofp
+define double @sitofp_dw(i32 %a) nounwind ssp {
+entry:
+; CHECK: sitofp_dw
+; CHECK: scvtf d0, w0
+ %conv = sitofp i32 %a to double
+ ret double %conv
+}
+
+; Test sitofp
+define double @sitofp_dx(i64 %a) nounwind ssp {
+entry:
+; CHECK: sitofp_dx
+; CHECK: scvtf d0, x0
+ %conv = sitofp i64 %a to double
+ ret double %conv
+}
+
+; Test uitofp
+define float @uitofp_sw_i1(i1 %a) nounwind ssp {
+entry:
+; CHECK: uitofp_sw_i1
+; CHECK: and w0, w0, #0x1
+; CHECK: ucvtf s0, w0
+ %conv = uitofp i1 %a to float
+ ret float %conv
+}
+
+; Test uitofp
+define float @uitofp_sw_i8(i8 %a) nounwind ssp {
+entry:
+; CHECK: uitofp_sw_i8
+; CHECK: uxtb w0, w0
+; CHECK: ucvtf s0, w0
+ %conv = uitofp i8 %a to float
+ ret float %conv
+}
+
+; Test uitofp
+define float @uitofp_sw_i16(i16 %a) nounwind ssp {
+entry:
+; CHECK: uitofp_sw_i16
+; CHECK: uxth w0, w0
+; CHECK: ucvtf s0, w0
+ %conv = uitofp i16 %a to float
+ ret float %conv
+}
+
+; Test uitofp
+define float @uitofp_sw(i32 %a) nounwind ssp {
+entry:
+; CHECK: uitofp_sw
+; CHECK: ucvtf s0, w0
+ %conv = uitofp i32 %a to float
+ ret float %conv
+}
+
+; Test uitofp
+define float @uitofp_sx(i64 %a) nounwind ssp {
+entry:
+; CHECK: uitofp_sx
+; CHECK: ucvtf s0, x0
+ %conv = uitofp i64 %a to float
+ ret float %conv
+}
+
+; Test uitofp
+define double @uitofp_dw(i32 %a) nounwind ssp {
+entry:
+; CHECK: uitofp_dw
+; CHECK: ucvtf d0, w0
+ %conv = uitofp i32 %a to double
+ ret double %conv
+}
+
+; Test uitofp
+define double @uitofp_dx(i64 %a) nounwind ssp {
+entry:
+; CHECK: uitofp_dx
+; CHECK: ucvtf d0, x0
+ %conv = uitofp i64 %a to double
+ ret double %conv
+}
+
+define i32 @i64_trunc_i32(i64 %a) nounwind ssp {
+entry:
+; CHECK: i64_trunc_i32
+; CHECK: mov x1, x0
+ %conv = trunc i64 %a to i32
+ ret i32 %conv
+}
+
+define zeroext i16 @i64_trunc_i16(i64 %a) nounwind ssp {
+entry:
+; CHECK: i64_trunc_i16
+; CHECK: mov x[[REG:[0-9]+]], x0
+; CHECK: and [[REG2:w[0-9]+]], w[[REG]], #0xffff
+; CHECK: uxth w0, [[REG2]]
+ %conv = trunc i64 %a to i16
+ ret i16 %conv
+}
+
+define zeroext i8 @i64_trunc_i8(i64 %a) nounwind ssp {
+entry:
+; CHECK: i64_trunc_i8
+; CHECK: mov x[[REG:[0-9]+]], x0
+; CHECK: and [[REG2:w[0-9]+]], w[[REG]], #0xff
+; CHECK: uxtb w0, [[REG2]]
+ %conv = trunc i64 %a to i8
+ ret i8 %conv
+}
+
+define zeroext i1 @i64_trunc_i1(i64 %a) nounwind ssp {
+entry:
+; CHECK: i64_trunc_i1
+; CHECK: mov x[[REG:[0-9]+]], x0
+; CHECK: and [[REG2:w[0-9]+]], w[[REG]], #0x1
+; CHECK: and w0, [[REG2]], #0x1
+ %conv = trunc i64 %a to i1
+ ret i1 %conv
+}
+
+; rdar://15101939
+define void @stack_trunc() nounwind {
+; CHECK: stack_trunc
+; CHECK: sub sp, sp, #16
+; CHECK: ldr [[REG:x[0-9]+]], [sp]
+; CHECK: mov x[[REG2:[0-9]+]], [[REG]]
+; CHECK: and [[REG3:w[0-9]+]], w[[REG2]], #0xff
+; CHECK: strb [[REG3]], [sp, #15]
+; CHECK: add sp, sp, #16
+ %a = alloca i8, align 1
+ %b = alloca i64, align 8
+ %c = load i64* %b, align 8
+ %d = trunc i64 %c to i8
+ store i8 %d, i8* %a, align 1
+ ret void
+}
+
+define zeroext i64 @zext_i8_i64(i8 zeroext %in) {
+; CHECK-LABEL: zext_i8_i64:
+; CHECK: mov x[[TMP:[0-9]+]], x0
+; CHECK: ubfx x0, x[[TMP]], #0, #8
+ %big = zext i8 %in to i64
+ ret i64 %big
+}
+define zeroext i64 @zext_i16_i64(i16 zeroext %in) {
+; CHECK-LABEL: zext_i16_i64:
+; CHECK: mov x[[TMP:[0-9]+]], x0
+; CHECK: ubfx x0, x[[TMP]], #0, #16
+ %big = zext i16 %in to i64
+ ret i64 %big
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll b/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll
new file mode 100644
index 000000000000..f03059620768
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll
@@ -0,0 +1,146 @@
+; RUN: llc < %s -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin | FileCheck %s
+
+define zeroext i1 @fcmp_float1(float %a) nounwind ssp {
+entry:
+; CHECK-LABEL: @fcmp_float1
+; CHECK: fcmp s0, #0.0
+; CHECK: cset w{{[0-9]+}}, ne
+ %cmp = fcmp une float %a, 0.000000e+00
+ ret i1 %cmp
+}
+
+define zeroext i1 @fcmp_float2(float %a, float %b) nounwind ssp {
+entry:
+; CHECK-LABEL: @fcmp_float2
+; CHECK: fcmp s0, s1
+; CHECK: cset w{{[0-9]+}}, ne
+ %cmp = fcmp une float %a, %b
+ ret i1 %cmp
+}
+
+define zeroext i1 @fcmp_double1(double %a) nounwind ssp {
+entry:
+; CHECK-LABEL: @fcmp_double1
+; CHECK: fcmp d0, #0.0
+; CHECK: cset w{{[0-9]+}}, ne
+ %cmp = fcmp une double %a, 0.000000e+00
+ ret i1 %cmp
+}
+
+define zeroext i1 @fcmp_double2(double %a, double %b) nounwind ssp {
+entry:
+; CHECK-LABEL: @fcmp_double2
+; CHECK: fcmp d0, d1
+; CHECK: cset w{{[0-9]+}}, ne
+ %cmp = fcmp une double %a, %b
+ ret i1 %cmp
+}
+
+; Check each fcmp condition
+define float @fcmp_oeq(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_oeq
+; CHECK: fcmp s0, s1
+; CHECK: cset w{{[0-9]+}}, eq
+ %cmp = fcmp oeq float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ogt(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ogt
+; CHECK: fcmp s0, s1
+; CHECK: cset w{{[0-9]+}}, gt
+ %cmp = fcmp ogt float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_oge(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_oge
+; CHECK: fcmp s0, s1
+; CHECK: cset w{{[0-9]+}}, ge
+ %cmp = fcmp oge float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_olt(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_olt
+; CHECK: fcmp s0, s1
+; CHECK: cset w{{[0-9]+}}, mi
+ %cmp = fcmp olt float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ole(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ole
+; CHECK: fcmp s0, s1
+; CHECK: cset w{{[0-9]+}}, ls
+ %cmp = fcmp ole float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ord(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ord
+; CHECK: fcmp s0, s1
+; CHECK: cset {{w[0-9]+}}, vc
+ %cmp = fcmp ord float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_uno(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_uno
+; CHECK: fcmp s0, s1
+; CHECK: cset {{w[0-9]+}}, vs
+ %cmp = fcmp uno float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ugt(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ugt
+; CHECK: fcmp s0, s1
+; CHECK: cset {{w[0-9]+}}, hi
+ %cmp = fcmp ugt float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_uge(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_uge
+; CHECK: fcmp s0, s1
+; CHECK: cset {{w[0-9]+}}, pl
+ %cmp = fcmp uge float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ult(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ult
+; CHECK: fcmp s0, s1
+; CHECK: cset {{w[0-9]+}}, lt
+ %cmp = fcmp ult float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ule(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ule
+; CHECK: fcmp s0, s1
+; CHECK: cset {{w[0-9]+}}, le
+ %cmp = fcmp ule float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_une(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_une
+; CHECK: fcmp s0, s1
+; CHECK: cset {{w[0-9]+}}, ne
+ %cmp = fcmp une float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-gv.ll b/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
new file mode 100644
index 000000000000..dc4d8953c276
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+; Test load/store of global value from global offset table.
+@seed = common global i64 0, align 8
+
+define void @Initrand() nounwind {
+entry:
+; CHECK: @Initrand
+; CHECK: adrp x[[REG:[0-9]+]], _seed@GOTPAGE
+; CHECK: ldr x[[REG2:[0-9]+]], [x[[REG]], _seed@GOTPAGEOFF]
+; CHECK: str x{{[0-9]+}}, [x[[REG2]]]
+ store i64 74755, i64* @seed, align 8
+ ret void
+}
+
+define i32 @Rand() nounwind {
+entry:
+; CHECK: @Rand
+; CHECK: adrp x[[REG:[0-9]+]], _seed@GOTPAGE
+; CHECK: ldr x[[REG2:[0-9]+]], [x[[REG]], _seed@GOTPAGEOFF]
+; CHECK: movz x[[REG3:[0-9]+]], #0x51d
+; CHECK: ldr x[[REG4:[0-9]+]], [x[[REG2]]]
+; CHECK: mul x[[REG5:[0-9]+]], x[[REG4]], x[[REG3]]
+; CHECK: movz x[[REG6:[0-9]+]], #0x3619
+; CHECK: add x[[REG7:[0-9]+]], x[[REG5]], x[[REG6]]
+; CHECK: orr x[[REG8:[0-9]+]], xzr, #0xffff
+; CHECK: and x[[REG9:[0-9]+]], x[[REG7]], x[[REG8]]
+; CHECK: str x[[REG9]], [x[[REG]]]
+; CHECK: ldr x{{[0-9]+}}, [x[[REG]]]
+ %0 = load i64* @seed, align 8
+ %mul = mul nsw i64 %0, 1309
+ %add = add nsw i64 %mul, 13849
+ %and = and i64 %add, 65535
+ store i64 %and, i64* @seed, align 8
+ %1 = load i64* @seed, align 8
+ %conv = trunc i64 %1 to i32
+ ret i32 %conv
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll b/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
new file mode 100644
index 000000000000..971be5c43469
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
@@ -0,0 +1,214 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+define i32 @icmp_eq_imm(i32 %a) nounwind ssp {
+entry:
+; CHECK: icmp_eq_imm
+; CHECK: cmp w0, #31
+; CHECK: cset w0, eq
+ %cmp = icmp eq i32 %a, 31
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_eq_neg_imm(i32 %a) nounwind ssp {
+entry:
+; CHECK: icmp_eq_neg_imm
+; CHECK: cmn w0, #7
+; CHECK: cset w0, eq
+ %cmp = icmp eq i32 %a, -7
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_eq(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_eq
+; CHECK: cmp w0, w1
+; CHECK: cset w0, eq
+ %cmp = icmp eq i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_ne(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_ne
+; CHECK: cmp w0, w1
+; CHECK: cset w0, ne
+ %cmp = icmp ne i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_ugt(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_ugt
+; CHECK: cmp w0, w1
+; CHECK: cset w0, hi
+ %cmp = icmp ugt i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_uge(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_uge
+; CHECK: cmp w0, w1
+; CHECK: cset w0, hs
+ %cmp = icmp uge i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_ult(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_ult
+; CHECK: cmp w0, w1
+; CHECK: cset w0, lo
+ %cmp = icmp ult i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_ule(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_ule
+; CHECK: cmp w0, w1
+; CHECK: cset w0, ls
+ %cmp = icmp ule i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_sgt(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_sgt
+; CHECK: cmp w0, w1
+; CHECK: cset w0, gt
+ %cmp = icmp sgt i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_sge(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_sge
+; CHECK: cmp w0, w1
+; CHECK: cset w0, ge
+ %cmp = icmp sge i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_slt(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_slt
+; CHECK: cmp w0, w1
+; CHECK: cset w0, lt
+ %cmp = icmp slt i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_sle(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK: icmp_sle
+; CHECK: cmp w0, w1
+; CHECK: cset w0, le
+ %cmp = icmp sle i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @icmp_i64(i64 %a, i64 %b) nounwind ssp {
+entry:
+; CHECK: icmp_i64
+; CHECK: cmp x0, x1
+; CHECK: cset w{{[0-9]+}}, le
+ %cmp = icmp sle i64 %a, %b
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define zeroext i1 @icmp_eq_i16(i16 %a, i16 %b) nounwind ssp {
+entry:
+; CHECK: icmp_eq_i16
+; CHECK: sxth w0, w0
+; CHECK: sxth w1, w1
+; CHECK: cmp w0, w1
+; CHECK: cset w0, eq
+ %cmp = icmp eq i16 %a, %b
+ ret i1 %cmp
+}
+
+define zeroext i1 @icmp_eq_i8(i8 %a, i8 %b) nounwind ssp {
+entry:
+; CHECK: icmp_eq_i8
+; CHECK: sxtb w0, w0
+; CHECK: sxtb w1, w1
+; CHECK: cmp w0, w1
+; CHECK: cset w0, eq
+ %cmp = icmp eq i8 %a, %b
+ ret i1 %cmp
+}
+
+define i32 @icmp_i16_unsigned(i16 %a, i16 %b) nounwind {
+entry:
+; CHECK: icmp_i16_unsigned
+; CHECK: uxth w0, w0
+; CHECK: uxth w1, w1
+; CHECK: cmp w0, w1
+; CHECK: cset w0, lo
+ %cmp = icmp ult i16 %a, %b
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
+
+define i32 @icmp_i8_signed(i8 %a, i8 %b) nounwind {
+entry:
+; CHECK: @icmp_i8_signed
+; CHECK: sxtb w0, w0
+; CHECK: sxtb w1, w1
+; CHECK: cmp w0, w1
+; CHECK: cset w0, gt
+ %cmp = icmp sgt i8 %a, %b
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
+
+
+define i32 @icmp_i16_signed_const(i16 %a) nounwind {
+entry:
+; CHECK: icmp_i16_signed_const
+; CHECK: sxth w0, w0
+; CHECK: cmn w0, #233
+; CHECK: cset w0, lt
+; CHECK: and w0, w0, #0x1
+ %cmp = icmp slt i16 %a, -233
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
+
+define i32 @icmp_i8_signed_const(i8 %a) nounwind {
+entry:
+; CHECK: icmp_i8_signed_const
+; CHECK: sxtb w0, w0
+; CHECK: cmp w0, #124
+; CHECK: cset w0, gt
+; CHECK: and w0, w0, #0x1
+ %cmp = icmp sgt i8 %a, 124
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
+
+define i32 @icmp_i1_unsigned_const(i1 %a) nounwind {
+entry:
+; CHECK: icmp_i1_unsigned_const
+; CHECK: and w0, w0, #0x1
+; CHECK: cmp w0, #0
+; CHECK: cset w0, lo
+; CHECK: and w0, w0, #0x1
+ %cmp = icmp ult i1 %a, 0
+ %conv2 = zext i1 %cmp to i32
+ ret i32 %conv2
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll b/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
new file mode 100644
index 000000000000..70335ace50c1
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+@fn.table = internal global [2 x i8*] [i8* blockaddress(@fn, %ZERO), i8* blockaddress(@fn, %ONE)], align 8
+
+define i32 @fn(i32 %target) nounwind {
+entry:
+; CHECK: @fn
+ %retval = alloca i32, align 4
+ %target.addr = alloca i32, align 4
+ store i32 %target, i32* %target.addr, align 4
+ %0 = load i32* %target.addr, align 4
+ %idxprom = zext i32 %0 to i64
+ %arrayidx = getelementptr inbounds [2 x i8*]* @fn.table, i32 0, i64 %idxprom
+ %1 = load i8** %arrayidx, align 8
+ br label %indirectgoto
+
+ZERO: ; preds = %indirectgoto
+; CHECK: LBB0_1
+ store i32 0, i32* %retval
+ br label %return
+
+ONE: ; preds = %indirectgoto
+; CHECK: LBB0_2
+ store i32 1, i32* %retval
+ br label %return
+
+return: ; preds = %ONE, %ZERO
+ %2 = load i32* %retval
+ ret i32 %2
+
+indirectgoto: ; preds = %entry
+; CHECK: ldr x0, [sp]
+; CHECK: br x0
+ %indirect.goto.dest = phi i8* [ %1, %entry ]
+ indirectbr i8* %indirect.goto.dest, [label %ZERO, label %ONE]
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll b/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
new file mode 100644
index 000000000000..115298805ac1
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
@@ -0,0 +1,148 @@
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=arm64-apple-ios | FileCheck %s --check-prefix=ARM64
+
+@message = global [80 x i8] c"The LLVM Compiler Infrastructure\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 16
+@temp = common global [80 x i8] zeroinitializer, align 16
+
+define void @t1() {
+; ARM64-LABEL: t1
+; ARM64: adrp x8, _message@PAGE
+; ARM64: add x0, x8, _message@PAGEOFF
+; ARM64: movz w9, #0
+; ARM64: movz x2, #0x50
+; ARM64: uxtb w1, w9
+; ARM64: bl _memset
+ call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i8 0, i64 80, i32 16, i1 false)
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+
+define void @t2() {
+; ARM64-LABEL: t2
+; ARM64: adrp x8, _temp@GOTPAGE
+; ARM64: ldr x0, [x8, _temp@GOTPAGEOFF]
+; ARM64: adrp x8, _message@PAGE
+; ARM64: add x1, x8, _message@PAGEOFF
+; ARM64: movz x2, #0x50
+; ARM64: bl _memcpy
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i64 80, i32 16, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
+
+define void @t3() {
+; ARM64-LABEL: t3
+; ARM64: adrp x8, _temp@GOTPAGE
+; ARM64: ldr x0, [x8, _temp@GOTPAGEOFF]
+; ARM64: adrp x8, _message@PAGE
+; ARM64: add x1, x8, _message@PAGEOFF
+; ARM64: movz x2, #0x14
+; ARM64: bl _memmove
+ call void @llvm.memmove.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i64 20, i32 16, i1 false)
+ ret void
+}
+
+declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
+
+define void @t4() {
+; ARM64-LABEL: t4
+; ARM64: adrp x8, _temp@GOTPAGE
+; ARM64: ldr x8, [x8, _temp@GOTPAGEOFF]
+; ARM64: adrp x9, _message@PAGE
+; ARM64: add x9, x9, _message@PAGEOFF
+; ARM64: ldr x10, [x9]
+; ARM64: str x10, [x8]
+; ARM64: ldr x10, [x9, #8]
+; ARM64: str x10, [x8, #8]
+; ARM64: ldrb w11, [x9, #16]
+; ARM64: strb w11, [x8, #16]
+; ARM64: ret
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i64 17, i32 16, i1 false)
+ ret void
+}
+
+define void @t5() {
+; ARM64-LABEL: t5
+; ARM64: adrp x8, _temp@GOTPAGE
+; ARM64: ldr x8, [x8, _temp@GOTPAGEOFF]
+; ARM64: adrp x9, _message@PAGE
+; ARM64: add x9, x9, _message@PAGEOFF
+; ARM64: ldr x10, [x9]
+; ARM64: str x10, [x8]
+; ARM64: ldr x10, [x9, #8]
+; ARM64: str x10, [x8, #8]
+; ARM64: ldrb w11, [x9, #16]
+; ARM64: strb w11, [x8, #16]
+; ARM64: ret
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i64 17, i32 8, i1 false)
+ ret void
+}
+
+define void @t6() {
+; ARM64-LABEL: t6
+; ARM64: adrp x8, _temp@GOTPAGE
+; ARM64: ldr x8, [x8, _temp@GOTPAGEOFF]
+; ARM64: adrp x9, _message@PAGE
+; ARM64: add x9, x9, _message@PAGEOFF
+; ARM64: ldr w10, [x9]
+; ARM64: str w10, [x8]
+; ARM64: ldr w10, [x9, #4]
+; ARM64: str w10, [x8, #4]
+; ARM64: ldrb w10, [x9, #8]
+; ARM64: strb w10, [x8, #8]
+; ARM64: ret
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i64 9, i32 4, i1 false)
+ ret void
+}
+
+define void @t7() {
+; ARM64-LABEL: t7
+; ARM64: adrp x8, _temp@GOTPAGE
+; ARM64: ldr x8, [x8, _temp@GOTPAGEOFF]
+; ARM64: adrp x9, _message@PAGE
+; ARM64: add x9, x9, _message@PAGEOFF
+; ARM64: ldrh w10, [x9]
+; ARM64: strh w10, [x8]
+; ARM64: ldrh w10, [x9, #2]
+; ARM64: strh w10, [x8, #2]
+; ARM64: ldrh w10, [x9, #4]
+; ARM64: strh w10, [x8, #4]
+; ARM64: ldrb w10, [x9, #6]
+; ARM64: strb w10, [x8, #6]
+; ARM64: ret
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i64 7, i32 2, i1 false)
+ ret void
+}
+
+define void @t8() {
+; ARM64-LABEL: t8
+; ARM64: adrp x8, _temp@GOTPAGE
+; ARM64: ldr x8, [x8, _temp@GOTPAGEOFF]
+; ARM64: adrp x9, _message@PAGE
+; ARM64: add x9, x9, _message@PAGEOFF
+; ARM64: ldrb w10, [x9]
+; ARM64: strb w10, [x8]
+; ARM64: ldrb w10, [x9, #1]
+; ARM64: strb w10, [x8, #1]
+; ARM64: ldrb w10, [x9, #2]
+; ARM64: strb w10, [x8, #2]
+; ARM64: ldrb w10, [x9, #3]
+; ARM64: strb w10, [x8, #3]
+; ARM64: ret
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8]* @message, i32 0, i32 0), i64 4, i32 1, i1 false)
+ ret void
+}
+
+define void @test_distant_memcpy(i8* %dst) {
+; ARM64-LABEL: test_distant_memcpy:
+; ARM64: mov [[ARRAY:x[0-9]+]], sp
+; ARM64: movz [[OFFSET:x[0-9]+]], #0x1f40
+; ARM64: add x[[ADDR:[0-9]+]], [[ARRAY]], [[OFFSET]]
+; ARM64: ldrb [[BYTE:w[0-9]+]], [x[[ADDR]]]
+; ARM64: strb [[BYTE]], [x0]
+ %array = alloca i8, i32 8192
+ %elem = getelementptr i8* %array, i32 8000
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %elem, i64 1, i32 1, i1 false)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-materialize.ll b/test/CodeGen/AArch64/arm64-fast-isel-materialize.ll
new file mode 100644
index 000000000000..ffac131f0cab
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-materialize.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+; Materialize using fmov
+define void @float_(float* %value) {
+; CHECK: @float_
+; CHECK: fmov s0, #1.25000000
+ store float 1.250000e+00, float* %value, align 4
+ ret void
+}
+
+define void @double_(double* %value) {
+; CHECK: @double_
+; CHECK: fmov d0, #1.25000000
+ store double 1.250000e+00, double* %value, align 8
+ ret void
+}
+
+; Materialize from constant pool
+define float @float_cp() {
+; CHECK: @float_cp
+ ret float 0x400921FB60000000
+}
+
+define double @double_cp() {
+; CHECK: @double_cp
+ ret double 0x400921FB54442D18
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll b/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll
new file mode 100644
index 000000000000..483d1799f9c8
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll
@@ -0,0 +1,68 @@
+; RUN: llc -mtriple=arm64-apple-ios -O0 %s -o - | FileCheck %s
+
+; Fast-isel can't do vector conversions yet, but it was emitting some highly
+; suspect UCVTFUWDri MachineInstrs.
+define <4 x float> @test_uitofp(<4 x i32> %in) {
+; CHECK-LABEL: test_uitofp:
+; CHECK: ucvtf.4s v0, v0
+
+ %res = uitofp <4 x i32> %in to <4 x float>
+ ret <4 x float> %res
+}
+
+define <2 x double> @test_sitofp(<2 x i32> %in) {
+; CHECK-LABEL: test_sitofp:
+; CHECK: sshll.2d [[EXT:v[0-9]+]], v0, #0
+; CHECK: scvtf.2d v0, [[EXT]]
+
+ %res = sitofp <2 x i32> %in to <2 x double>
+ ret <2 x double> %res
+}
+
+define <2 x i32> @test_fptoui(<2 x float> %in) {
+; CHECK-LABEL: test_fptoui:
+; CHECK: fcvtzu.2s v0, v0
+
+ %res = fptoui <2 x float> %in to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define <2 x i64> @test_fptosi(<2 x double> %in) {
+; CHECK-LABEL: test_fptosi:
+; CHECK: fcvtzs.2d v0, v0
+
+ %res = fptosi <2 x double> %in to <2 x i64>
+ ret <2 x i64> %res
+}
+
+define fp128 @uitofp_i32_fp128(i32 %a) {
+entry:
+; CHECK-LABEL: uitofp_i32_fp128
+; CHECK: bl ___floatunsitf
+ %conv = uitofp i32 %a to fp128
+ ret fp128 %conv
+}
+
+define fp128 @uitofp_i64_fp128(i64 %a) {
+entry:
+; CHECK-LABEL: uitofp_i64_fp128
+; CHECK: bl ___floatunditf
+ %conv = uitofp i64 %a to fp128
+ ret fp128 %conv
+}
+
+define i32 @uitofp_fp128_i32(fp128 %a) {
+entry:
+; CHECK-LABEL: uitofp_fp128_i32
+; CHECK: ___fixunstfsi
+ %conv = fptoui fp128 %a to i32
+ ret i32 %conv
+}
+
+define i64 @uitofp_fp128_i64(fp128 %a) {
+entry:
+; CHECK-LABEL: uitofp_fp128_i64
+; CHECK: ___fixunstfdi
+ %conv = fptoui fp128 %a to i64
+ ret i64 %conv
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-rem.ll b/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
new file mode 100644
index 000000000000..d5bdbaae9e75
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
@@ -0,0 +1,44 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin -print-machineinstrs=expand-isel-pseudos -o /dev/null 2> %t
+; RUN: FileCheck %s < %t --check-prefix=CHECK-SSA
+; REQUIRES: asserts
+
+; CHECK-SSA-LABEL: Machine code for function t1
+
+; CHECK-SSA: [[QUOTREG:%vreg[0-9]+]]<def> = SDIVWr
+; CHECK-SSA-NOT: [[QUOTREG]]<def> =
+; CHECK-SSA: {{%vreg[0-9]+}}<def> = MSUBWrrr [[QUOTREG]]
+
+; CHECK-SSA-LABEL: Machine code for function t2
+
+define i32 @t1(i32 %a, i32 %b) {
+; CHECK: @t1
+; CHECK: sdiv [[TMP:w[0-9]+]], w0, w1
+; CHECK: msub w0, [[TMP]], w1, w0
+ %1 = srem i32 %a, %b
+ ret i32 %1
+}
+
+define i64 @t2(i64 %a, i64 %b) {
+; CHECK: @t2
+; CHECK: sdiv [[TMP:x[0-9]+]], x0, x1
+; CHECK: msub x0, [[TMP]], x1, x0
+ %1 = srem i64 %a, %b
+ ret i64 %1
+}
+
+define i32 @t3(i32 %a, i32 %b) {
+; CHECK: @t3
+; CHECK: udiv [[TMP:w[0-9]+]], w0, w1
+; CHECK: msub w0, [[TMP]], w1, w0
+ %1 = urem i32 %a, %b
+ ret i32 %1
+}
+
+define i64 @t4(i64 %a, i64 %b) {
+; CHECK: @t4
+; CHECK: udiv [[TMP:x[0-9]+]], x0, x1
+; CHECK: msub x0, [[TMP]], x1, x0
+ %1 = urem i64 %a, %b
+ ret i64 %1
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-ret.ll b/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
new file mode 100644
index 000000000000..d91fd285d551
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
@@ -0,0 +1,63 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+;; Test returns.
+define void @t0() nounwind ssp {
+entry:
+; CHECK: t0
+; CHECK: ret
+ ret void
+}
+
+define i32 @t1(i32 %a) nounwind ssp {
+entry:
+; CHECK: t1
+; CHECK: str w0, [sp, #12]
+; CHECK-NEXT: ldr w0, [sp, #12]
+; CHECK: ret
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ %tmp = load i32* %a.addr, align 4
+ ret i32 %tmp
+}
+
+define i64 @t2(i64 %a) nounwind ssp {
+entry:
+; CHECK: t2
+; CHECK: str x0, [sp, #8]
+; CHECK-NEXT: ldr x0, [sp, #8]
+; CHECK: ret
+ %a.addr = alloca i64, align 8
+ store i64 %a, i64* %a.addr, align 8
+ %tmp = load i64* %a.addr, align 8
+ ret i64 %tmp
+}
+
+define signext i16 @ret_i16(i16 signext %a) nounwind {
+entry:
+; CHECK: @ret_i16
+; CHECK: sxth w0, w0
+ %a.addr = alloca i16, align 1
+ store i16 %a, i16* %a.addr, align 1
+ %0 = load i16* %a.addr, align 1
+ ret i16 %0
+}
+
+define signext i8 @ret_i8(i8 signext %a) nounwind {
+entry:
+; CHECK: @ret_i8
+; CHECK: sxtb w0, w0
+ %a.addr = alloca i8, align 1
+ store i8 %a, i8* %a.addr, align 1
+ %0 = load i8* %a.addr, align 1
+ ret i8 %0
+}
+
+define signext i1 @ret_i1(i1 signext %a) nounwind {
+entry:
+; CHECK: @ret_i1
+; CHECK: and w0, w0, #0x1
+ %a.addr = alloca i1, align 1
+ store i1 %a, i1* %a.addr, align 1
+ %0 = load i1* %a.addr, align 1
+ ret i1 %0
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-select.ll b/test/CodeGen/AArch64/arm64-fast-isel-select.ll
new file mode 100644
index 000000000000..1cc207f59155
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel-select.ll
@@ -0,0 +1,63 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+define i32 @t1(i32 %c) nounwind readnone {
+entry:
+; CHECK: @t1
+; CHECK: and w0, w0, #0x1
+; CHECK: subs w0, w0, #0
+; CHECK: csel w0, w{{[0-9]+}}, w{{[0-9]+}}, ne
+ %0 = icmp sgt i32 %c, 1
+ %1 = select i1 %0, i32 123, i32 357
+ ret i32 %1
+}
+
+define i64 @t2(i32 %c) nounwind readnone {
+entry:
+; CHECK: @t2
+; CHECK: and w0, w0, #0x1
+; CHECK: subs w0, w0, #0
+; CHECK: csel x0, x{{[0-9]+}}, x{{[0-9]+}}, ne
+ %0 = icmp sgt i32 %c, 1
+ %1 = select i1 %0, i64 123, i64 357
+ ret i64 %1
+}
+
+define i32 @t3(i1 %c, i32 %a, i32 %b) nounwind readnone {
+entry:
+; CHECK: @t3
+; CHECK: and w0, w0, #0x1
+; CHECK: subs w0, w0, #0
+; CHECK: csel w0, w{{[0-9]+}}, w{{[0-9]+}}, ne
+ %0 = select i1 %c, i32 %a, i32 %b
+ ret i32 %0
+}
+
+define i64 @t4(i1 %c, i64 %a, i64 %b) nounwind readnone {
+entry:
+; CHECK: @t4
+; CHECK: and w0, w0, #0x1
+; CHECK: subs w0, w0, #0
+; CHECK: csel x0, x{{[0-9]+}}, x{{[0-9]+}}, ne
+ %0 = select i1 %c, i64 %a, i64 %b
+ ret i64 %0
+}
+
+define float @t5(i1 %c, float %a, float %b) nounwind readnone {
+entry:
+; CHECK: @t5
+; CHECK: and w0, w0, #0x1
+; CHECK: subs w0, w0, #0
+; CHECK: fcsel s0, s0, s1, ne
+ %0 = select i1 %c, float %a, float %b
+ ret float %0
+}
+
+define double @t6(i1 %c, double %a, double %b) nounwind readnone {
+entry:
+; CHECK: @t6
+; CHECK: and w0, w0, #0x1
+; CHECK: subs w0, w0, #0
+; CHECK: fcsel d0, d0, d1, ne
+ %0 = select i1 %c, double %a, double %b
+ ret double %0
+}
diff --git a/test/CodeGen/AArch64/arm64-fast-isel.ll b/test/CodeGen/AArch64/arm64-fast-isel.ll
new file mode 100644
index 000000000000..0194b3a6c2d4
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fast-isel.ll
@@ -0,0 +1,95 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+
+define void @t0(i32 %a) nounwind {
+entry:
+; CHECK: t0
+; CHECK: str {{w[0-9]+}}, [sp, #12]
+; CHECK-NEXT: ldr [[REGISTER:w[0-9]+]], [sp, #12]
+; CHECK-NEXT: str [[REGISTER]], [sp, #12]
+; CHECK: ret
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr
+ %tmp = load i32* %a.addr
+ store i32 %tmp, i32* %a.addr
+ ret void
+}
+
+define void @t1(i64 %a) nounwind {
+; CHECK: t1
+; CHECK: str {{x[0-9]+}}, [sp, #8]
+; CHECK-NEXT: ldr [[REGISTER:x[0-9]+]], [sp, #8]
+; CHECK-NEXT: str [[REGISTER]], [sp, #8]
+; CHECK: ret
+ %a.addr = alloca i64, align 4
+ store i64 %a, i64* %a.addr
+ %tmp = load i64* %a.addr
+ store i64 %tmp, i64* %a.addr
+ ret void
+}
+
+define zeroext i1 @i1(i1 %a) nounwind {
+entry:
+; CHECK: @i1
+; CHECK: and w0, w0, #0x1
+; CHECK: strb w0, [sp, #15]
+; CHECK: ldrb w0, [sp, #15]
+; CHECK: and w0, w0, #0x1
+; CHECK: and w0, w0, #0x1
+; CHECK: add sp, sp, #16
+; CHECK: ret
+ %a.addr = alloca i1, align 1
+ store i1 %a, i1* %a.addr, align 1
+ %0 = load i1* %a.addr, align 1
+ ret i1 %0
+}
+
+define i32 @t2(i32 *%ptr) nounwind {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: ldur w0, [x0, #-4]
+; CHECK: ret
+ %0 = getelementptr i32 *%ptr, i32 -1
+ %1 = load i32* %0, align 4
+ ret i32 %1
+}
+
+define i32 @t3(i32 *%ptr) nounwind {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: ldur w0, [x0, #-256]
+; CHECK: ret
+ %0 = getelementptr i32 *%ptr, i32 -64
+ %1 = load i32* %0, align 4
+ ret i32 %1
+}
+
+define void @t4(i32 *%ptr) nounwind {
+entry:
+; CHECK-LABEL: t4:
+; CHECK: movz w8, #0
+; CHECK: stur w8, [x0, #-4]
+; CHECK: ret
+ %0 = getelementptr i32 *%ptr, i32 -1
+ store i32 0, i32* %0, align 4
+ ret void
+}
+
+define void @t5(i32 *%ptr) nounwind {
+entry:
+; CHECK-LABEL: t5:
+; CHECK: movz w8, #0
+; CHECK: stur w8, [x0, #-256]
+; CHECK: ret
+ %0 = getelementptr i32 *%ptr, i32 -64
+ store i32 0, i32* %0, align 4
+ ret void
+}
+
+define void @t6() nounwind {
+; CHECK: t6
+; CHECK: brk #0x1
+ tail call void @llvm.trap()
+ ret void
+}
+
+declare void @llvm.trap() nounwind
diff --git a/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll b/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
new file mode 100644
index 000000000000..8a744c513d7a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define void @caller(i32* nocapture %p, i32 %a, i32 %b) nounwind optsize ssp {
+; CHECK-NOT: stp
+; CHECK: b {{_callee|callee}}
+; CHECK-NOT: ldp
+; CHECK: ret
+ %1 = icmp eq i32 %b, 0
+ br i1 %1, label %3, label %2
+
+ tail call fastcc void @callee(i32* %p, i32 %a) optsize
+ br label %3
+
+ ret void
+}
+
+define internal fastcc void @callee(i32* nocapture %p, i32 %a) nounwind optsize noinline ssp {
+ store volatile i32 %a, i32* %p, align 4, !tbaa !0
+ ret void
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll b/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll
new file mode 100644
index 000000000000..af9fe0561737
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll
@@ -0,0 +1,18 @@
+; fastisel should not fold add with non-pointer bitwidth
+; sext(a) + sext(b) != sext(a + b)
+; RUN: llc -mtriple=arm64-apple-darwin %s -O0 -o - | FileCheck %s
+
+define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
+entry:
+ %ptr.addr = alloca i8*, align 8
+ %add = add i8 64, 64 ; 0x40 + 0x40
+ %0 = load i8** %ptr.addr, align 8
+
+ ; CHECK-LABEL: _gep_promotion:
+ ; CHECK: ldrb {{[a-z][0-9]+}}, {{\[[a-z][0-9]+\]}}
+ %arrayidx = getelementptr inbounds i8* %0, i8 %add
+
+ %1 = load i8* %arrayidx, align 1
+ ret i8 %1
+}
+
diff --git a/test/CodeGen/AArch64/arm64-fcmp-opt.ll b/test/CodeGen/AArch64/arm64-fcmp-opt.ll
new file mode 100644
index 000000000000..41027d4b5c74
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fcmp-opt.ll
@@ -0,0 +1,204 @@
+; RUN: llc < %s -march=arm64 -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s
+; rdar://10263824
+
+define i1 @fcmp_float1(float %a) nounwind ssp {
+entry:
+; CHECK-LABEL: @fcmp_float1
+; CHECK: fcmp s0, #0.0
+; CHECK: cset w0, ne
+ %cmp = fcmp une float %a, 0.000000e+00
+ ret i1 %cmp
+}
+
+define i1 @fcmp_float2(float %a, float %b) nounwind ssp {
+entry:
+; CHECK-LABEL: @fcmp_float2
+; CHECK: fcmp s0, s1
+; CHECK: cset w0, ne
+ %cmp = fcmp une float %a, %b
+ ret i1 %cmp
+}
+
+define i1 @fcmp_double1(double %a) nounwind ssp {
+entry:
+; CHECK-LABEL: @fcmp_double1
+; CHECK: fcmp d0, #0.0
+; CHECK: cset w0, ne
+ %cmp = fcmp une double %a, 0.000000e+00
+ ret i1 %cmp
+}
+
+define i1 @fcmp_double2(double %a, double %b) nounwind ssp {
+entry:
+; CHECK-LABEL: @fcmp_double2
+; CHECK: fcmp d0, d1
+; CHECK: cset w0, ne
+ %cmp = fcmp une double %a, %b
+ ret i1 %cmp
+}
+
+; Check each fcmp condition
+define float @fcmp_oeq(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_oeq
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], eq
+
+ %cmp = fcmp oeq float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ogt(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ogt
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], gt
+
+ %cmp = fcmp ogt float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_oge(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_oge
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], ge
+
+ %cmp = fcmp oge float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_olt(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_olt
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], mi
+
+ %cmp = fcmp olt float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ole(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ole
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], ls
+
+ %cmp = fcmp ole float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ord(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ord
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], vc
+ %cmp = fcmp ord float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_uno(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_uno
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], vs
+ %cmp = fcmp uno float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ugt(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ugt
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], hi
+ %cmp = fcmp ugt float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_uge(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_uge
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], pl
+ %cmp = fcmp uge float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ult(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ult
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], lt
+ %cmp = fcmp ult float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_ule(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ule
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], le
+ %cmp = fcmp ule float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+define float @fcmp_une(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_une
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], ne
+ %cmp = fcmp une float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+; Possible opportunity for improvement. See comment in
+; ARM64TargetLowering::LowerSETCC()
+define float @fcmp_one(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_one
+; fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel [[TMP:s[0-9]+]], s[[ONE]], s[[ZERO]], mi
+; CHECK: fcsel s0, s[[ONE]], [[TMP]], gt
+ %cmp = fcmp one float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
+
+; Possible opportunity for improvement. See comment in
+; ARM64TargetLowering::LowerSETCC()
+define float @fcmp_ueq(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: @fcmp_ueq
+; CHECK: fcmp s0, s1
+; CHECK-DAG: movi.2d v[[ZERO:[0-9]+]], #0
+; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
+; CHECK: fcsel [[TMP:s[0-9]+]], s[[ONE]], s[[ZERO]], eq
+; CHECK: fcsel s0, s[[ONE]], [[TMP]], vs
+ %cmp = fcmp ueq float %a, %b
+ %conv = uitofp i1 %cmp to float
+ ret float %conv
+}
diff --git a/test/CodeGen/AArch64/arm64-fcopysign.ll b/test/CodeGen/AArch64/arm64-fcopysign.ll
new file mode 100644
index 000000000000..66241df9444c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fcopysign.ll
@@ -0,0 +1,51 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
+
+; rdar://9332258
+
+define float @test1(float %x, float %y) nounwind {
+entry:
+; CHECK-LABEL: test1:
+; CHECK: movi.4s v2, #0x80, lsl #24
+; CHECK: bit.16b v0, v1, v2
+ %0 = tail call float @copysignf(float %x, float %y) nounwind readnone
+ ret float %0
+}
+
+define double @test2(double %x, double %y) nounwind {
+entry:
+; CHECK-LABEL: test2:
+; CHECK: movi.2d v2, #0
+; CHECK: fneg.2d v2, v2
+; CHECK: bit.16b v0, v1, v2
+ %0 = tail call double @copysign(double %x, double %y) nounwind readnone
+ ret double %0
+}
+
+; rdar://9545768
+define double @test3(double %a, float %b, float %c) nounwind {
+; CHECK-LABEL: test3:
+; CHECK: fcvt d1, s1
+; CHECK: fneg.2d v2, v{{[0-9]+}}
+; CHECK: bit.16b v0, v1, v2
+ %tmp1 = fadd float %b, %c
+ %tmp2 = fpext float %tmp1 to double
+ %tmp = tail call double @copysign( double %a, double %tmp2 ) nounwind readnone
+ ret double %tmp
+}
+
+define float @test4() nounwind {
+entry:
+; CHECK-LABEL: test4:
+; CHECK: fcvt s0, d0
+; CHECK: movi.4s v[[CONST:[0-9]+]], #0x80, lsl #24
+; CHECK: bit.16b v{{[0-9]+}}, v0, v[[CONST]]
+ %0 = tail call double (...)* @bar() nounwind
+ %1 = fptrunc double %0 to float
+ %2 = tail call float @copysignf(float 5.000000e-01, float %1) nounwind readnone
+ %3 = fadd float %1, %2
+ ret float %3
+}
+
+declare double @bar(...)
+declare double @copysign(double, double) nounwind readnone
+declare float @copysignf(float, float) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll b/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
new file mode 100644
index 000000000000..e51c38b2b95e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+; DAGCombine to transform a conversion of an extract_vector_elt to an
+; extract_vector_elt of a conversion, which saves a round trip of copies
+; of the value to a GPR and back to and FPR.
+; rdar://11855286
+define double @foo0(<2 x i64> %a) nounwind {
+; CHECK: scvtf.2d [[REG:v[0-9]+]], v0, #9
+; CHECK-NEXT: ins.d v0[0], [[REG]][1]
+ %vecext = extractelement <2 x i64> %a, i32 1
+ %fcvt_n = tail call double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64 %vecext, i32 9)
+ ret double %fcvt_n
+}
+
+declare double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64, i32) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-fmadd.ll b/test/CodeGen/AArch64/arm64-fmadd.ll
new file mode 100644
index 000000000000..c791900cc2ff
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fmadd.ll
@@ -0,0 +1,92 @@
+; RUN: llc -march=arm64 < %s | FileCheck %s
+
+define float @fma32(float %a, float %b, float %c) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: fma32:
+; CHECK: fmadd s0, s0, s1, s2
+ %0 = tail call float @llvm.fma.f32(float %a, float %b, float %c)
+ ret float %0
+}
+
+define float @fnma32(float %a, float %b, float %c) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: fnma32:
+; CHECK: fnmadd s0, s0, s1, s2
+ %0 = tail call float @llvm.fma.f32(float %a, float %b, float %c)
+ %mul = fmul float %0, -1.000000e+00
+ ret float %mul
+}
+
+define float @fms32(float %a, float %b, float %c) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: fms32:
+; CHECK: fmsub s0, s0, s1, s2
+ %mul = fmul float %b, -1.000000e+00
+ %0 = tail call float @llvm.fma.f32(float %a, float %mul, float %c)
+ ret float %0
+}
+
+define float @fms32_com(float %a, float %b, float %c) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: fms32_com:
+; CHECK: fmsub s0, s1, s0, s2
+ %mul = fmul float %b, -1.000000e+00
+ %0 = tail call float @llvm.fma.f32(float %mul, float %a, float %c)
+ ret float %0
+}
+
+define float @fnms32(float %a, float %b, float %c) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: fnms32:
+; CHECK: fnmsub s0, s0, s1, s2
+ %mul = fmul float %c, -1.000000e+00
+ %0 = tail call float @llvm.fma.f32(float %a, float %b, float %mul)
+ ret float %0
+}
+
+define double @fma64(double %a, double %b, double %c) nounwind readnone ssp {
+; CHECK-LABEL: fma64:
+; CHECK: fmadd d0, d0, d1, d2
+entry:
+ %0 = tail call double @llvm.fma.f64(double %a, double %b, double %c)
+ ret double %0
+}
+
+define double @fnma64(double %a, double %b, double %c) nounwind readnone ssp {
+; CHECK-LABEL: fnma64:
+; CHECK: fnmadd d0, d0, d1, d2
+entry:
+ %0 = tail call double @llvm.fma.f64(double %a, double %b, double %c)
+ %mul = fmul double %0, -1.000000e+00
+ ret double %mul
+}
+
+define double @fms64(double %a, double %b, double %c) nounwind readnone ssp {
+; CHECK-LABEL: fms64:
+; CHECK: fmsub d0, d0, d1, d2
+entry:
+ %mul = fmul double %b, -1.000000e+00
+ %0 = tail call double @llvm.fma.f64(double %a, double %mul, double %c)
+ ret double %0
+}
+
+define double @fms64_com(double %a, double %b, double %c) nounwind readnone ssp {
+; CHECK-LABEL: fms64_com:
+; CHECK: fmsub d0, d1, d0, d2
+entry:
+ %mul = fmul double %b, -1.000000e+00
+ %0 = tail call double @llvm.fma.f64(double %mul, double %a, double %c)
+ ret double %0
+}
+
+define double @fnms64(double %a, double %b, double %c) nounwind readnone ssp {
+; CHECK-LABEL: fnms64:
+; CHECK: fnmsub d0, d0, d1, d2
+entry:
+ %mul = fmul double %c, -1.000000e+00
+ %0 = tail call double @llvm.fma.f64(double %a, double %b, double %mul)
+ ret double %0
+}
+
+declare float @llvm.fma.f32(float, float, float) nounwind readnone
+declare double @llvm.fma.f64(double, double, double) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-fmax.ll b/test/CodeGen/AArch64/arm64-fmax.ll
new file mode 100644
index 000000000000..94b745437bd3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fmax.ll
@@ -0,0 +1,34 @@
+; RUN: llc -march=arm64 -enable-no-nans-fp-math < %s | FileCheck %s
+
+define double @test_direct(float %in) #1 {
+; CHECK-LABEL: test_direct:
+ %cmp = fcmp olt float %in, 0.000000e+00
+ %longer = fpext float %in to double
+ %val = select i1 %cmp, double 0.000000e+00, double %longer
+ ret double %val
+
+; CHECK: fmax
+}
+
+define double @test_cross(float %in) #1 {
+; CHECK-LABEL: test_cross:
+ %cmp = fcmp olt float %in, 0.000000e+00
+ %longer = fpext float %in to double
+ %val = select i1 %cmp, double %longer, double 0.000000e+00
+ ret double %val
+
+; CHECK: fmin
+}
+
+; This isn't a min or a max, but passes the first condition for swapping the
+; results. Make sure they're put back before we resort to the normal fcsel.
+define float @test_cross_fail(float %lhs, float %rhs) {
+; CHECK-LABEL: test_cross_fail:
+ %tst = fcmp une float %lhs, %rhs
+ %res = select i1 %tst, float %rhs, float %lhs
+ ret float %res
+
+ ; The register allocator would have to decide to be deliberately obtuse before
+ ; other register were used.
+; CHECK: fcsel s0, s1, s0, ne
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/arm64-fminv.ll b/test/CodeGen/AArch64/arm64-fminv.ll
new file mode 100644
index 000000000000..f4c97355dd19
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fminv.ll
@@ -0,0 +1,101 @@
+; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
+
+define float @test_fminv_v2f32(<2 x float> %in) {
+; CHECK: test_fminv_v2f32:
+; CHECK: fminp s0, v0.2s
+ %min = call float @llvm.aarch64.neon.fminv.f32.v2f32(<2 x float> %in)
+ ret float %min
+}
+
+define float @test_fminv_v4f32(<4 x float> %in) {
+; CHECK: test_fminv_v4f32:
+; CHECK: fminv s0, v0.4s
+ %min = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %in)
+ ret float %min
+}
+
+define double @test_fminv_v2f64(<2 x double> %in) {
+; CHECK: test_fminv_v2f64:
+; CHECK: fminp d0, v0.2d
+ %min = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %in)
+ ret double %min
+}
+
+declare float @llvm.aarch64.neon.fminv.f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>)
+declare double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double>)
+
+define float @test_fmaxv_v2f32(<2 x float> %in) {
+; CHECK: test_fmaxv_v2f32:
+; CHECK: fmaxp s0, v0.2s
+ %max = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %in)
+ ret float %max
+}
+
+define float @test_fmaxv_v4f32(<4 x float> %in) {
+; CHECK: test_fmaxv_v4f32:
+; CHECK: fmaxv s0, v0.4s
+ %max = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %in)
+ ret float %max
+}
+
+define double @test_fmaxv_v2f64(<2 x double> %in) {
+; CHECK: test_fmaxv_v2f64:
+; CHECK: fmaxp d0, v0.2d
+ %max = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %in)
+ ret double %max
+}
+
+declare float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float>)
+declare double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double>)
+
+define float @test_fminnmv_v2f32(<2 x float> %in) {
+; CHECK: test_fminnmv_v2f32:
+; CHECK: fminnmp s0, v0.2s
+ %minnm = call float @llvm.aarch64.neon.fminnmv.f32.v2f32(<2 x float> %in)
+ ret float %minnm
+}
+
+define float @test_fminnmv_v4f32(<4 x float> %in) {
+; CHECK: test_fminnmv_v4f32:
+; CHECK: fminnmv s0, v0.4s
+ %minnm = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %in)
+ ret float %minnm
+}
+
+define double @test_fminnmv_v2f64(<2 x double> %in) {
+; CHECK: test_fminnmv_v2f64:
+; CHECK: fminnmp d0, v0.2d
+ %minnm = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in)
+ ret double %minnm
+}
+
+declare float @llvm.aarch64.neon.fminnmv.f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float>)
+declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>)
+
+define float @test_fmaxnmv_v2f32(<2 x float> %in) {
+; CHECK: test_fmaxnmv_v2f32:
+; CHECK: fmaxnmp s0, v0.2s
+ %maxnm = call float @llvm.aarch64.neon.fmaxnmv.f32.v2f32(<2 x float> %in)
+ ret float %maxnm
+}
+
+define float @test_fmaxnmv_v4f32(<4 x float> %in) {
+; CHECK: test_fmaxnmv_v4f32:
+; CHECK: fmaxnmv s0, v0.4s
+ %maxnm = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %in)
+ ret float %maxnm
+}
+
+define double @test_fmaxnmv_v2f64(<2 x double> %in) {
+; CHECK: test_fmaxnmv_v2f64:
+; CHECK: fmaxnmp d0, v0.2d
+ %maxnm = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in)
+ ret double %maxnm
+}
+
+declare float @llvm.aarch64.neon.fmaxnmv.f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float>)
+declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
diff --git a/test/CodeGen/AArch64/arm64-fmuladd.ll b/test/CodeGen/AArch64/arm64-fmuladd.ll
new file mode 100644
index 000000000000..6c5eecabd755
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fmuladd.ll
@@ -0,0 +1,88 @@
+; RUN: llc -asm-verbose=false < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define float @test_f32(float* %A, float* %B, float* %C) nounwind {
+;CHECK-LABEL: test_f32:
+;CHECK: fmadd
+;CHECK-NOT: fmadd
+ %tmp1 = load float* %A
+ %tmp2 = load float* %B
+ %tmp3 = load float* %C
+ %tmp4 = call float @llvm.fmuladd.f32(float %tmp1, float %tmp2, float %tmp3)
+ ret float %tmp4
+}
+
+define <2 x float> @test_v2f32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
+;CHECK-LABEL: test_v2f32:
+;CHECK: fmla.2s
+;CHECK-NOT: fmla.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = load <2 x float>* %C
+ %tmp4 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
+ ret <2 x float> %tmp4
+}
+
+define <4 x float> @test_v4f32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+;CHECK-LABEL: test_v4f32:
+;CHECK: fmla.4s
+;CHECK-NOT: fmla.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = load <4 x float>* %C
+ %tmp4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
+ ret <4 x float> %tmp4
+}
+
+define <8 x float> @test_v8f32(<8 x float>* %A, <8 x float>* %B, <8 x float>* %C) nounwind {
+;CHECK-LABEL: test_v8f32:
+;CHECK: fmla.4s
+;CHECK: fmla.4s
+;CHECK-NOT: fmla.4s
+ %tmp1 = load <8 x float>* %A
+ %tmp2 = load <8 x float>* %B
+ %tmp3 = load <8 x float>* %C
+ %tmp4 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %tmp1, <8 x float> %tmp2, <8 x float> %tmp3)
+ ret <8 x float> %tmp4
+}
+
+define double @test_f64(double* %A, double* %B, double* %C) nounwind {
+;CHECK-LABEL: test_f64:
+;CHECK: fmadd
+;CHECK-NOT: fmadd
+ %tmp1 = load double* %A
+ %tmp2 = load double* %B
+ %tmp3 = load double* %C
+ %tmp4 = call double @llvm.fmuladd.f64(double %tmp1, double %tmp2, double %tmp3)
+ ret double %tmp4
+}
+
+define <2 x double> @test_v2f64(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
+;CHECK-LABEL: test_v2f64:
+;CHECK: fmla.2d
+;CHECK-NOT: fmla.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = load <2 x double>* %C
+ %tmp4 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %tmp1, <2 x double> %tmp2, <2 x double> %tmp3)
+ ret <2 x double> %tmp4
+}
+
+define <4 x double> @test_v4f64(<4 x double>* %A, <4 x double>* %B, <4 x double>* %C) nounwind {
+;CHECK-LABEL: test_v4f64:
+;CHECK: fmla.2d
+;CHECK: fmla.2d
+;CHECK-NOT: fmla.2d
+ %tmp1 = load <4 x double>* %A
+ %tmp2 = load <4 x double>* %B
+ %tmp3 = load <4 x double>* %C
+ %tmp4 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %tmp1, <4 x double> %tmp2, <4 x double> %tmp3)
+ ret <4 x double> %tmp4
+}
+
+declare float @llvm.fmuladd.f32(float, float, float) nounwind readnone
+declare <2 x float> @llvm.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
+declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
+declare double @llvm.fmuladd.f64(double, double, double) nounwind readnone
+declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
+declare <4 x double> @llvm.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-fold-address.ll b/test/CodeGen/AArch64/arm64-fold-address.ll
new file mode 100644
index 000000000000..96cc3e90f63a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fold-address.ll
@@ -0,0 +1,79 @@
+; RUN: llc < %s -O2 -mtriple=arm64-apple-darwin | FileCheck %s
+
+%0 = type opaque
+%struct.CGRect = type { %struct.CGPoint, %struct.CGSize }
+%struct.CGPoint = type { double, double }
+%struct.CGSize = type { double, double }
+
+@"OBJC_IVAR_$_UIScreen._bounds" = external hidden global i64, section "__DATA, __objc_ivar", align 8
+
+define hidden %struct.CGRect @nofold(%0* nocapture %self, i8* nocapture %_cmd) nounwind readonly optsize ssp {
+entry:
+; CHECK-LABEL: nofold:
+; CHECK: add x[[REG:[0-9]+]], x0, x{{[0-9]+}}
+; CHECK: ldp d0, d1, [x[[REG]]]
+; CHECK: ldp d2, d3, [x[[REG]], #16]
+; CHECK: ret
+ %ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+ %0 = bitcast %0* %self to i8*
+ %add.ptr = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr10.0 = bitcast i8* %add.ptr to double*
+ %tmp11 = load double* %add.ptr10.0, align 8
+ %add.ptr.sum = add i64 %ivar, 8
+ %add.ptr10.1 = getelementptr inbounds i8* %0, i64 %add.ptr.sum
+ %1 = bitcast i8* %add.ptr10.1 to double*
+ %tmp12 = load double* %1, align 8
+ %add.ptr.sum17 = add i64 %ivar, 16
+ %add.ptr4.1 = getelementptr inbounds i8* %0, i64 %add.ptr.sum17
+ %add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
+ %tmp = load double* %add.ptr4.1.0, align 8
+ %add.ptr4.1.sum = add i64 %ivar, 24
+ %add.ptr4.1.1 = getelementptr inbounds i8* %0, i64 %add.ptr4.1.sum
+ %2 = bitcast i8* %add.ptr4.1.1 to double*
+ %tmp5 = load double* %2, align 8
+ %insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
+ %insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
+ %insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0
+ %insert7 = insertvalue %struct.CGSize undef, double %tmp, 0
+ %insert9 = insertvalue %struct.CGSize %insert7, double %tmp5, 1
+ %insert3 = insertvalue %struct.CGRect %insert, %struct.CGSize %insert9, 1
+ ret %struct.CGRect %insert3
+}
+
+define hidden %struct.CGRect @fold(%0* nocapture %self, i8* nocapture %_cmd) nounwind readonly optsize ssp {
+entry:
+; CHECK-LABEL: fold:
+; CHECK: ldr d0, [x0, x{{[0-9]+}}]
+; CHECK-NOT: add x0, x0, x1
+; CHECK: ret
+ %ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+ %0 = bitcast %0* %self to i8*
+ %add.ptr = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr10.0 = bitcast i8* %add.ptr to double*
+ %tmp11 = load double* %add.ptr10.0, align 8
+ %add.ptr10.1 = getelementptr inbounds i8* %0, i64 %ivar
+ %1 = bitcast i8* %add.ptr10.1 to double*
+ %tmp12 = load double* %1, align 8
+ %add.ptr4.1 = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
+ %tmp = load double* %add.ptr4.1.0, align 8
+ %add.ptr4.1.1 = getelementptr inbounds i8* %0, i64 %ivar
+ %2 = bitcast i8* %add.ptr4.1.1 to double*
+ %tmp5 = load double* %2, align 8
+ %insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
+ %insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
+ %insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0
+ %insert7 = insertvalue %struct.CGSize undef, double %tmp, 0
+ %insert9 = insertvalue %struct.CGSize %insert7, double %tmp5, 1
+ %insert3 = insertvalue %struct.CGRect %insert, %struct.CGSize %insert9, 1
+ ret %struct.CGRect %insert3
+}
+
+
+!llvm.module.flags = !{!0, !1, !2, !3}
+
+!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
+!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
+!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
+!4 = metadata !{}
diff --git a/test/CodeGen/AArch64/arm64-fold-lsl.ll b/test/CodeGen/AArch64/arm64-fold-lsl.ll
new file mode 100644
index 000000000000..ec65e467e37d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fold-lsl.ll
@@ -0,0 +1,79 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+;
+; <rdar://problem/14486451>
+
+%struct.a = type [256 x i16]
+%struct.b = type [256 x i32]
+%struct.c = type [256 x i64]
+
+define i16 @load_halfword(%struct.a* %ctx, i32 %xor72) nounwind {
+; CHECK-LABEL: load_halfword:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: ldrh w0, [x0, [[REG]], lsl #1]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.a* %ctx, i64 0, i64 %idxprom83
+ %result = load i16* %arrayidx86, align 2
+ ret i16 %result
+}
+
+define i32 @load_word(%struct.b* %ctx, i32 %xor72) nounwind {
+; CHECK-LABEL: load_word:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: ldr w0, [x0, [[REG]], lsl #2]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.b* %ctx, i64 0, i64 %idxprom83
+ %result = load i32* %arrayidx86, align 4
+ ret i32 %result
+}
+
+define i64 @load_doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
+; CHECK-LABEL: load_doubleword:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: ldr x0, [x0, [[REG]], lsl #3]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.c* %ctx, i64 0, i64 %idxprom83
+ %result = load i64* %arrayidx86, align 8
+ ret i64 %result
+}
+
+define void @store_halfword(%struct.a* %ctx, i32 %xor72, i16 %val) nounwind {
+; CHECK-LABEL: store_halfword:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: strh w2, [x0, [[REG]], lsl #1]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.a* %ctx, i64 0, i64 %idxprom83
+ store i16 %val, i16* %arrayidx86, align 8
+ ret void
+}
+
+define void @store_word(%struct.b* %ctx, i32 %xor72, i32 %val) nounwind {
+; CHECK-LABEL: store_word:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: str w2, [x0, [[REG]], lsl #2]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.b* %ctx, i64 0, i64 %idxprom83
+ store i32 %val, i32* %arrayidx86, align 8
+ ret void
+}
+
+define void @store_doubleword(%struct.c* %ctx, i32 %xor72, i64 %val) nounwind {
+; CHECK-LABEL: store_doubleword:
+; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
+; CHECK: str x2, [x0, [[REG]], lsl #3]
+ %shr81 = lshr i32 %xor72, 9
+ %conv82 = zext i32 %shr81 to i64
+ %idxprom83 = and i64 %conv82, 255
+ %arrayidx86 = getelementptr inbounds %struct.c* %ctx, i64 0, i64 %idxprom83
+ store i64 %val, i64* %arrayidx86, align 8
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-fp-contract-zero.ll b/test/CodeGen/AArch64/arm64-fp-contract-zero.ll
new file mode 100644
index 000000000000..f982cbb7f5e0
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fp-contract-zero.ll
@@ -0,0 +1,14 @@
+; RUN: llc -mtriple=arm64 -fp-contract=fast -o - %s | FileCheck %s
+
+
+; Make sure we don't try to fold an fneg into +0.0, creating an illegal constant
+; -0.0. It's also good, though not essential, that we don't resort to a litpool.
+define double @test_fms_fold(double %a, double %b) {
+; CHECK-LABEL: test_fms_fold:
+; CHECK: fmov {{d[0-9]+}}, xzr
+; CHECK: ret
+ %mul = fmul double %a, 0.000000e+00
+ %mul1 = fmul double %b, 0.000000e+00
+ %sub = fsub double %mul, %mul1
+ ret double %sub
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/arm64-fp-imm.ll b/test/CodeGen/AArch64/arm64-fp-imm.ll
new file mode 100644
index 000000000000..6e271e03d281
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fp-imm.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
+
+; CHECK: literal8
+; CHECK: .quad 4614256656552045848
+define double @foo() {
+; CHECK: _foo:
+; CHECK: adrp x[[REG:[0-9]+]], lCPI0_0@PAGE
+; CHECK: ldr d0, [x[[REG]], lCPI0_0@PAGEOFF]
+; CHECK-NEXT: ret
+ ret double 0x400921FB54442D18
+}
+
+; CHECK: literal4
+; CHECK: .long 1078530011
+define float @bar() {
+; CHECK: _bar:
+; CHECK: adrp x[[REG:[0-9]+]], lCPI1_0@PAGE
+; CHECK: ldr s0, [x[[REG]], lCPI1_0@PAGEOFF]
+; CHECK-NEXT: ret
+ ret float 0x400921FB60000000
+}
+
+; CHECK: literal16
+; CHECK: .quad 0
+; CHECK: .quad 0
+define fp128 @baz() {
+; CHECK: _baz:
+; CHECK: adrp x[[REG:[0-9]+]], lCPI2_0@PAGE
+; CHECK: ldr q0, [x[[REG]], lCPI2_0@PAGEOFF]
+; CHECK-NEXT: ret
+ ret fp128 0xL00000000000000000000000000000000
+}
diff --git a/test/CodeGen/AArch64/arm64-fp.ll b/test/CodeGen/AArch64/arm64-fp.ll
new file mode 100644
index 000000000000..08b1b6754c2a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fp.ll
@@ -0,0 +1,8 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define float @t1(i1 %a, float %b, float %c) nounwind {
+; CHECK: t1
+; CHECK: fcsel s0, s0, s1, ne
+ %sel = select i1 %a, float %b, float %c
+ ret float %sel
+}
diff --git a/test/CodeGen/AArch64/arm64-fp128-folding.ll b/test/CodeGen/AArch64/arm64-fp128-folding.ll
new file mode 100644
index 000000000000..6a7d203f5b17
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fp128-folding.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=arm64 -verify-machineinstrs < %s | FileCheck %s
+declare void @bar(i8*, i8*, i32*)
+
+; SelectionDAG used to try to fold some fp128 operations using the ppc128 type,
+; which is not supported.
+
+define fp128 @test_folding() {
+; CHECK-LABEL: test_folding:
+ %l = alloca i32
+ store i32 42, i32* %l
+ %val = load i32* %l
+ %fpval = sitofp i32 %val to fp128
+ ; If the value is loaded from a constant pool into an fp128, it's been folded
+ ; successfully.
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}},
+ ret fp128 %fpval
+}
diff --git a/test/CodeGen/AArch64/arm64-fp128.ll b/test/CodeGen/AArch64/arm64-fp128.ll
new file mode 100644
index 000000000000..b1d50102aa28
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-fp128.ll
@@ -0,0 +1,273 @@
+; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s
+
+@lhs = global fp128 zeroinitializer, align 16
+@rhs = global fp128 zeroinitializer, align 16
+
+define fp128 @test_add() {
+; CHECK-LABEL: test_add:
+
+ %lhs = load fp128* @lhs, align 16
+ %rhs = load fp128* @rhs, align 16
+; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
+
+ %val = fadd fp128 %lhs, %rhs
+; CHECK: bl __addtf3
+ ret fp128 %val
+}
+
+define fp128 @test_sub() {
+; CHECK-LABEL: test_sub:
+
+ %lhs = load fp128* @lhs, align 16
+ %rhs = load fp128* @rhs, align 16
+; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
+
+ %val = fsub fp128 %lhs, %rhs
+; CHECK: bl __subtf3
+ ret fp128 %val
+}
+
+define fp128 @test_mul() {
+; CHECK-LABEL: test_mul:
+
+ %lhs = load fp128* @lhs, align 16
+ %rhs = load fp128* @rhs, align 16
+; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
+
+ %val = fmul fp128 %lhs, %rhs
+; CHECK: bl __multf3
+ ret fp128 %val
+}
+
+define fp128 @test_div() {
+; CHECK-LABEL: test_div:
+
+ %lhs = load fp128* @lhs, align 16
+ %rhs = load fp128* @rhs, align 16
+; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
+
+ %val = fdiv fp128 %lhs, %rhs
+; CHECK: bl __divtf3
+ ret fp128 %val
+}
+
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_fptosi() {
+; CHECK-LABEL: test_fptosi:
+ %val = load fp128* @lhs, align 16
+
+ %val32 = fptosi fp128 %val to i32
+ store i32 %val32, i32* @var32
+; CHECK: bl __fixtfsi
+
+ %val64 = fptosi fp128 %val to i64
+ store i64 %val64, i64* @var64
+; CHECK: bl __fixtfdi
+
+ ret void
+}
+
+define void @test_fptoui() {
+; CHECK-LABEL: test_fptoui:
+ %val = load fp128* @lhs, align 16
+
+ %val32 = fptoui fp128 %val to i32
+ store i32 %val32, i32* @var32
+; CHECK: bl __fixunstfsi
+
+ %val64 = fptoui fp128 %val to i64
+ store i64 %val64, i64* @var64
+; CHECK: bl __fixunstfdi
+
+ ret void
+}
+
+define void @test_sitofp() {
+; CHECK-LABEL: test_sitofp:
+
+ %src32 = load i32* @var32
+ %val32 = sitofp i32 %src32 to fp128
+ store volatile fp128 %val32, fp128* @lhs
+; CHECK: bl __floatsitf
+
+ %src64 = load i64* @var64
+ %val64 = sitofp i64 %src64 to fp128
+ store volatile fp128 %val64, fp128* @lhs
+; CHECK: bl __floatditf
+
+ ret void
+}
+
+define void @test_uitofp() {
+; CHECK-LABEL: test_uitofp:
+
+ %src32 = load i32* @var32
+ %val32 = uitofp i32 %src32 to fp128
+ store volatile fp128 %val32, fp128* @lhs
+; CHECK: bl __floatunsitf
+
+ %src64 = load i64* @var64
+ %val64 = uitofp i64 %src64 to fp128
+ store volatile fp128 %val64, fp128* @lhs
+; CHECK: bl __floatunditf
+
+ ret void
+}
+
+define i1 @test_setcc1() {
+; CHECK-LABEL: test_setcc1:
+
+ %lhs = load fp128* @lhs, align 16
+ %rhs = load fp128* @rhs, align 16
+; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
+
+; Technically, everything after the call to __letf2 is redundant, but we'll let
+; LLVM have its fun for now.
+ %val = fcmp ole fp128 %lhs, %rhs
+; CHECK: bl __letf2
+; CHECK: cmp w0, #0
+; CHECK: cset w0, le
+
+ ret i1 %val
+; CHECK: ret
+}
+
+define i1 @test_setcc2() {
+; CHECK-LABEL: test_setcc2:
+
+ %lhs = load fp128* @lhs, align 16
+ %rhs = load fp128* @rhs, align 16
+; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
+
+ %val = fcmp ugt fp128 %lhs, %rhs
+; CHECK: bl __gttf2
+; CHECK: cmp w0, #0
+; CHECK: cset [[GT:w[0-9]+]], gt
+
+; CHECK: bl __unordtf2
+; CHECK: cmp w0, #0
+; CHECK: cset [[UNORDERED:w[0-9]+]], ne
+; CHECK: orr w0, [[UNORDERED]], [[GT]]
+
+ ret i1 %val
+; CHECK: ret
+}
+
+define i32 @test_br_cc() {
+; CHECK-LABEL: test_br_cc:
+
+ %lhs = load fp128* @lhs, align 16
+ %rhs = load fp128* @rhs, align 16
+; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
+; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
+
+ ; olt == !uge, which LLVM unfortunately "optimizes" this to.
+ %cond = fcmp olt fp128 %lhs, %rhs
+; CHECK: bl __getf2
+; CHECK: cmp w0, #0
+; CHECK: cset [[OGE:w[0-9]+]], ge
+
+; CHECK: bl __unordtf2
+; CHECK: cmp w0, #0
+; CHECK: cset [[UNORDERED:w[0-9]+]], ne
+
+; CHECK: orr [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]]
+; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]]
+ br i1 %cond, label %iftrue, label %iffalse
+
+iftrue:
+ ret i32 42
+; CHECK-NEXT: BB#
+; CHECK-NEXT: movz w0, #0x2a
+; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]]
+
+iffalse:
+ ret i32 29
+; CHECK: [[RET29]]:
+; CHECK-NEXT: movz w0, #0x1d
+; CHECK-NEXT: [[REALRET]]:
+; CHECK: ret
+}
+
+define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
+; CHECK-LABEL: test_select:
+
+ %val = select i1 %cond, fp128 %lhs, fp128 %rhs
+ store fp128 %val, fp128* @lhs, align 16
+; CHECK: tst w0, #0x1
+; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: BB#
+; CHECK-NEXT: mov v[[VAL:[0-9]+]].16b, v0.16b
+; CHECK-NEXT: [[IFFALSE]]:
+; CHECK: str q[[VAL]], [{{x[0-9]+}}, :lo12:lhs]
+ ret void
+; CHECK: ret
+}
+
+@varfloat = global float 0.0, align 4
+@vardouble = global double 0.0, align 8
+
+define void @test_round() {
+; CHECK-LABEL: test_round:
+
+ %val = load fp128* @lhs, align 16
+
+ %float = fptrunc fp128 %val to float
+ store float %float, float* @varfloat, align 4
+; CHECK: bl __trunctfsf2
+; CHECK: str s0, [{{x[0-9]+}}, :lo12:varfloat]
+
+ %double = fptrunc fp128 %val to double
+ store double %double, double* @vardouble, align 8
+; CHECK: bl __trunctfdf2
+; CHECK: str d0, [{{x[0-9]+}}, :lo12:vardouble]
+
+ ret void
+}
+
+define void @test_extend() {
+; CHECK-LABEL: test_extend:
+
+ %val = load fp128* @lhs, align 16
+
+ %float = load float* @varfloat
+ %fromfloat = fpext float %float to fp128
+ store volatile fp128 %fromfloat, fp128* @lhs, align 16
+; CHECK: bl __extendsftf2
+; CHECK: str q0, [{{x[0-9]+}}, :lo12:lhs]
+
+ %double = load double* @vardouble
+ %fromdouble = fpext double %double to fp128
+ store volatile fp128 %fromdouble, fp128* @lhs, align 16
+; CHECK: bl __extenddftf2
+; CHECK: str q0, [{{x[0-9]+}}, :lo12:lhs]
+
+ ret void
+; CHECK: ret
+}
+
+define fp128 @test_neg(fp128 %in) {
+; CHECK: [[MINUS0:.LCPI[0-9]+_0]]:
+; Make sure the weird hex constant below *is* -0.0
+; CHECK-NEXT: fp128 -0
+
+; CHECK-LABEL: test_neg:
+
+ ; Could in principle be optimized to fneg which we can't select, this makes
+ ; sure that doesn't happen.
+ %ret = fsub fp128 0xL00000000000000008000000000000000, %in
+; CHECK: mov v1.16b, v0.16b
+; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:[[MINUS0]]]
+; CHECK: bl __subtf3
+
+ ret fp128 %ret
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/arm64-frame-index.ll b/test/CodeGen/AArch64/arm64-frame-index.ll
new file mode 100644
index 000000000000..321f3354ca21
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-frame-index.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=arm64 -mtriple=arm64-apple-ios -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s
+; rdar://11935841
+
+define void @t1() nounwind ssp {
+entry:
+; CHECK-LABEL: t1:
+; CHECK-NOT: add x{{[0-9]+}}, sp
+; CHECK: stp x28, x27, [sp, #-16]!
+ %v = alloca [288 x i32], align 4
+ unreachable
+}
diff --git a/test/CodeGen/AArch64/arm64-frameaddr.ll b/test/CodeGen/AArch64/arm64-frameaddr.ll
new file mode 100644
index 000000000000..469078c88143
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-frameaddr.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define i8* @t() nounwind {
+entry:
+; CHECK-LABEL: t:
+; CHECK: stp x29, x30, [sp, #-16]!
+; CHECK: mov x29, sp
+; CHECK: mov x0, x29
+; CHECK: ldp x29, x30, [sp], #16
+; CHECK: ret
+ %0 = call i8* @llvm.frameaddress(i32 0)
+ ret i8* %0
+}
+
+declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-global-address.ll b/test/CodeGen/AArch64/arm64-global-address.ll
new file mode 100644
index 000000000000..005f414f8752
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-global-address.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+; rdar://9618644
+
+@G = external global i32
+
+define i32 @test(i32 %off) nounwind {
+; CHECK-LABEL: test:
+; CHECK: adrp x[[REG:[0-9]+]], _G@GOTPAGE
+; CHECK: ldr x[[REG2:[0-9]+]], [x[[REG]], _G@GOTPAGEOFF]
+; CHECK: add w0, w[[REG2]], w0
+ %tmp = ptrtoint i32* @G to i32
+ %tmp1 = add i32 %tmp, %off
+ ret i32 %tmp1
+}
diff --git a/test/CodeGen/AArch64/arm64-hello.ll b/test/CodeGen/AArch64/arm64-hello.ll
new file mode 100644
index 000000000000..a6346fb467fb
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-hello.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s --check-prefix=CHECK-LINUX
+
+; CHECK-LABEL: main:
+; CHECK: stp x29, x30, [sp, #-16]!
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: stur wzr, [x29, #-4]
+; CHECK: adrp x0, L_.str@PAGE
+; CHECK: add x0, x0, L_.str@PAGEOFF
+; CHECK-NEXT: bl _puts
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldp x29, x30, [sp], #16
+; CHECK-NEXT: ret
+
+; CHECK-LINUX-LABEL: main:
+; CHECK-LINUX: stp x29, x30, [sp, #-16]!
+; CHECK-LINUX-NEXT: mov x29, sp
+; CHECK-LINUX-NEXT: sub sp, sp, #16
+; CHECK-LINUX-NEXT: stur wzr, [x29, #-4]
+; CHECK-LINUX: adrp x0, .L.str
+; CHECK-LINUX: add x0, x0, :lo12:.L.str
+; CHECK-LINUX-NEXT: bl puts
+; CHECK-LINUX-NEXT: mov sp, x29
+; CHECK-LINUX-NEXT: ldp x29, x30, [sp], #16
+; CHECK-LINUX-NEXT: ret
+
+@.str = private unnamed_addr constant [7 x i8] c"hello\0A\00"
+
+define i32 @main() nounwind ssp {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %call = call i32 @puts(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0))
+ ret i32 %call
+}
+
+declare i32 @puts(i8*)
diff --git a/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll b/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
new file mode 100644
index 000000000000..ba759e32aae5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define i32 @foo(<4 x i16>* %__a) nounwind {
+; CHECK-LABEL: foo:
+; CHECK: umov.h w{{[0-9]+}}, v{{[0-9]+}}[0]
+ %tmp18 = load <4 x i16>* %__a, align 8
+ %vget_lane = extractelement <4 x i16> %tmp18, i32 0
+ %conv = zext i16 %vget_lane to i32
+ %mul = mul nsw i32 3, %conv
+ ret i32 %mul
+}
+
diff --git a/test/CodeGen/AArch64/arm64-icmp-opt.ll b/test/CodeGen/AArch64/arm64-icmp-opt.ll
new file mode 100644
index 000000000000..7b12ed748617
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-icmp-opt.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+; Optimize (x > -1) to (x >= 0) etc.
+; Optimize (cmp (add / sub), 0): eliminate the subs used to update flag
+; for comparison only
+; rdar://10233472
+
+define i32 @t1(i64 %a) nounwind ssp {
+entry:
+; CHECK-LABEL: t1:
+; CHECK-NOT: movn
+; CHECK: cmp x0, #0
+; CHECK: cset w0, ge
+ %cmp = icmp sgt i64 %a, -1
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
diff --git a/test/CodeGen/AArch64/arm64-illegal-float-ops.ll b/test/CodeGen/AArch64/arm64-illegal-float-ops.ll
new file mode 100644
index 000000000000..9a35fe54d32e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-illegal-float-ops.ll
@@ -0,0 +1,295 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+@varfp128 = global fp128 zeroinitializer
+
+declare float @llvm.cos.f32(float)
+declare double @llvm.cos.f64(double)
+declare fp128 @llvm.cos.f128(fp128)
+
+define void @test_cos(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_cos:
+
+ %cosfloat = call float @llvm.cos.f32(float %float)
+ store float %cosfloat, float* @varfloat
+; CHECK: bl cosf
+
+ %cosdouble = call double @llvm.cos.f64(double %double)
+ store double %cosdouble, double* @vardouble
+; CHECK: bl cos
+
+ %cosfp128 = call fp128 @llvm.cos.f128(fp128 %fp128)
+ store fp128 %cosfp128, fp128* @varfp128
+; CHECK: bl cosl
+
+ ret void
+}
+
+declare float @llvm.exp.f32(float)
+declare double @llvm.exp.f64(double)
+declare fp128 @llvm.exp.f128(fp128)
+
+define void @test_exp(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_exp:
+
+ %expfloat = call float @llvm.exp.f32(float %float)
+ store float %expfloat, float* @varfloat
+; CHECK: bl expf
+
+ %expdouble = call double @llvm.exp.f64(double %double)
+ store double %expdouble, double* @vardouble
+; CHECK: bl exp
+
+ %expfp128 = call fp128 @llvm.exp.f128(fp128 %fp128)
+ store fp128 %expfp128, fp128* @varfp128
+; CHECK: bl expl
+
+ ret void
+}
+
+declare float @llvm.exp2.f32(float)
+declare double @llvm.exp2.f64(double)
+declare fp128 @llvm.exp2.f128(fp128)
+
+define void @test_exp2(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_exp2:
+
+ %exp2float = call float @llvm.exp2.f32(float %float)
+ store float %exp2float, float* @varfloat
+; CHECK: bl exp2f
+
+ %exp2double = call double @llvm.exp2.f64(double %double)
+ store double %exp2double, double* @vardouble
+; CHECK: bl exp2
+
+ %exp2fp128 = call fp128 @llvm.exp2.f128(fp128 %fp128)
+ store fp128 %exp2fp128, fp128* @varfp128
+; CHECK: bl exp2l
+ ret void
+
+}
+
+declare float @llvm.log.f32(float)
+declare double @llvm.log.f64(double)
+declare fp128 @llvm.log.f128(fp128)
+
+define void @test_log(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_log:
+
+ %logfloat = call float @llvm.log.f32(float %float)
+ store float %logfloat, float* @varfloat
+; CHECK: bl logf
+
+ %logdouble = call double @llvm.log.f64(double %double)
+ store double %logdouble, double* @vardouble
+; CHECK: bl log
+
+ %logfp128 = call fp128 @llvm.log.f128(fp128 %fp128)
+ store fp128 %logfp128, fp128* @varfp128
+; CHECK: bl logl
+
+ ret void
+}
+
+declare float @llvm.log2.f32(float)
+declare double @llvm.log2.f64(double)
+declare fp128 @llvm.log2.f128(fp128)
+
+define void @test_log2(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_log2:
+
+ %log2float = call float @llvm.log2.f32(float %float)
+ store float %log2float, float* @varfloat
+; CHECK: bl log2f
+
+ %log2double = call double @llvm.log2.f64(double %double)
+ store double %log2double, double* @vardouble
+; CHECK: bl log2
+
+ %log2fp128 = call fp128 @llvm.log2.f128(fp128 %fp128)
+ store fp128 %log2fp128, fp128* @varfp128
+; CHECK: bl log2l
+ ret void
+
+}
+
+declare float @llvm.log10.f32(float)
+declare double @llvm.log10.f64(double)
+declare fp128 @llvm.log10.f128(fp128)
+
+define void @test_log10(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_log10:
+
+ %log10float = call float @llvm.log10.f32(float %float)
+ store float %log10float, float* @varfloat
+; CHECK: bl log10f
+
+ %log10double = call double @llvm.log10.f64(double %double)
+ store double %log10double, double* @vardouble
+; CHECK: bl log10
+
+ %log10fp128 = call fp128 @llvm.log10.f128(fp128 %fp128)
+ store fp128 %log10fp128, fp128* @varfp128
+; CHECK: bl log10l
+
+ ret void
+}
+
+declare float @llvm.sin.f32(float)
+declare double @llvm.sin.f64(double)
+declare fp128 @llvm.sin.f128(fp128)
+
+define void @test_sin(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_sin:
+
+ %sinfloat = call float @llvm.sin.f32(float %float)
+ store float %sinfloat, float* @varfloat
+; CHECK: bl sinf
+
+ %sindouble = call double @llvm.sin.f64(double %double)
+ store double %sindouble, double* @vardouble
+; CHECK: bl sin
+
+ %sinfp128 = call fp128 @llvm.sin.f128(fp128 %fp128)
+ store fp128 %sinfp128, fp128* @varfp128
+; CHECK: bl sinl
+ ret void
+
+}
+
+declare float @llvm.pow.f32(float, float)
+declare double @llvm.pow.f64(double, double)
+declare fp128 @llvm.pow.f128(fp128, fp128)
+
+define void @test_pow(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_pow:
+
+ %powfloat = call float @llvm.pow.f32(float %float, float %float)
+ store float %powfloat, float* @varfloat
+; CHECK: bl powf
+
+ %powdouble = call double @llvm.pow.f64(double %double, double %double)
+ store double %powdouble, double* @vardouble
+; CHECK: bl pow
+
+ %powfp128 = call fp128 @llvm.pow.f128(fp128 %fp128, fp128 %fp128)
+ store fp128 %powfp128, fp128* @varfp128
+; CHECK: bl powl
+
+ ret void
+}
+
+declare float @llvm.powi.f32(float, i32)
+declare double @llvm.powi.f64(double, i32)
+declare fp128 @llvm.powi.f128(fp128, i32)
+
+define void @test_powi(float %float, double %double, i32 %exponent, fp128 %fp128) {
+; CHECK-LABEL: test_powi:
+
+ %powifloat = call float @llvm.powi.f32(float %float, i32 %exponent)
+ store float %powifloat, float* @varfloat
+; CHECK: bl __powisf2
+
+ %powidouble = call double @llvm.powi.f64(double %double, i32 %exponent)
+ store double %powidouble, double* @vardouble
+; CHECK: bl __powidf2
+
+ %powifp128 = call fp128 @llvm.powi.f128(fp128 %fp128, i32 %exponent)
+ store fp128 %powifp128, fp128* @varfp128
+; CHECK: bl __powitf2
+ ret void
+
+}
+
+define void @test_frem(float %float, double %double, fp128 %fp128) {
+; CHECK-LABEL: test_frem:
+
+ %fremfloat = frem float %float, %float
+ store float %fremfloat, float* @varfloat
+; CHECK: bl fmodf
+
+ %fremdouble = frem double %double, %double
+ store double %fremdouble, double* @vardouble
+; CHECK: bl fmod
+
+ %fremfp128 = frem fp128 %fp128, %fp128
+ store fp128 %fremfp128, fp128* @varfp128
+; CHECK: bl fmodl
+
+ ret void
+}
+
+declare fp128 @llvm.fma.f128(fp128, fp128, fp128)
+
+define void @test_fma(fp128 %fp128) {
+; CHECK-LABEL: test_fma:
+
+ %fmafp128 = call fp128 @llvm.fma.f128(fp128 %fp128, fp128 %fp128, fp128 %fp128)
+ store fp128 %fmafp128, fp128* @varfp128
+; CHECK: bl fmal
+
+ ret void
+}
+
+declare fp128 @llvm.fmuladd.f128(fp128, fp128, fp128)
+
+define void @test_fmuladd(fp128 %fp128) {
+; CHECK-LABEL: test_fmuladd:
+
+ %fmuladdfp128 = call fp128 @llvm.fmuladd.f128(fp128 %fp128, fp128 %fp128, fp128 %fp128)
+ store fp128 %fmuladdfp128, fp128* @varfp128
+; CHECK-NOT: bl fmal
+; CHECK: bl __multf3
+; CHECK: bl __addtf3
+
+ ret void
+}
+
+define i32 @test_fptosi32(fp128 %a) {
+; CHECK-LABEL: test_fptosi32:
+; CHECK: bl __fixtfsi
+ %conv.i = fptosi fp128 %a to i32
+ %b = add nsw i32 %conv.i, 48
+ ret i32 %b
+}
+
+define i64 @test_fptosi64(fp128 %a) {
+; CHECK-LABEL: test_fptosi64:
+; CHECK: bl __fixtfdi
+ %conv.i = fptosi fp128 %a to i64
+ %b = add nsw i64 %conv.i, 48
+ ret i64 %b
+}
+
+define i128 @test_fptosi128(fp128 %a) {
+; CHECK-LABEL: test_fptosi128:
+; CHECK: bl __fixtfti
+ %conv.i = fptosi fp128 %a to i128
+ %b = add nsw i128 %conv.i, 48
+ ret i128 %b
+}
+
+define i32 @test_fptoui32(fp128 %a) {
+; CHECK-LABEL: test_fptoui32:
+; CHECK: bl __fixunstfsi
+ %conv.i = fptoui fp128 %a to i32
+ %b = add nsw i32 %conv.i, 48
+ ret i32 %b
+}
+
+define i64 @test_fptoui64(fp128 %a) {
+; CHECK-LABEL: test_fptoui64:
+; CHECK: bl __fixunstfdi
+ %conv.i = fptoui fp128 %a to i64
+ %b = add nsw i64 %conv.i, 48
+ ret i64 %b
+}
+
+define i128 @test_fptoui128(fp128 %a) {
+; CHECK-LABEL: test_fptoui128:
+; CHECK: bl __fixunstfti
+ %conv.i = fptoui fp128 %a to i128
+ %b = add nsw i128 %conv.i, 48
+ ret i128 %b
+}
diff --git a/test/CodeGen/AArch64/arm64-indexed-memory.ll b/test/CodeGen/AArch64/arm64-indexed-memory.ll
new file mode 100644
index 000000000000..e501c6e403bd
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-indexed-memory.ll
@@ -0,0 +1,351 @@
+; RUN: llc < %s -march=arm64 -aarch64-redzone | FileCheck %s
+
+define void @store64(i64** nocapture %out, i64 %index, i64 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: store64:
+; CHECK: str x{{[0-9+]}}, [x{{[0-9+]}}], #8
+; CHECK: ret
+ %tmp = load i64** %out, align 8
+ %incdec.ptr = getelementptr inbounds i64* %tmp, i64 1
+ store i64 %spacing, i64* %tmp, align 4
+ store i64* %incdec.ptr, i64** %out, align 8
+ ret void
+}
+
+define void @store32(i32** nocapture %out, i32 %index, i32 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: store32:
+; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #4
+; CHECK: ret
+ %tmp = load i32** %out, align 8
+ %incdec.ptr = getelementptr inbounds i32* %tmp, i64 1
+ store i32 %spacing, i32* %tmp, align 4
+ store i32* %incdec.ptr, i32** %out, align 8
+ ret void
+}
+
+define void @store16(i16** nocapture %out, i16 %index, i16 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: store16:
+; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #2
+; CHECK: ret
+ %tmp = load i16** %out, align 8
+ %incdec.ptr = getelementptr inbounds i16* %tmp, i64 1
+ store i16 %spacing, i16* %tmp, align 4
+ store i16* %incdec.ptr, i16** %out, align 8
+ ret void
+}
+
+define void @store8(i8** nocapture %out, i8 %index, i8 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: store8:
+; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #1
+; CHECK: ret
+ %tmp = load i8** %out, align 8
+ %incdec.ptr = getelementptr inbounds i8* %tmp, i64 1
+ store i8 %spacing, i8* %tmp, align 4
+ store i8* %incdec.ptr, i8** %out, align 8
+ ret void
+}
+
+define void @truncst64to32(i32** nocapture %out, i32 %index, i64 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: truncst64to32:
+; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #4
+; CHECK: ret
+ %tmp = load i32** %out, align 8
+ %incdec.ptr = getelementptr inbounds i32* %tmp, i64 1
+ %trunc = trunc i64 %spacing to i32
+ store i32 %trunc, i32* %tmp, align 4
+ store i32* %incdec.ptr, i32** %out, align 8
+ ret void
+}
+
+define void @truncst64to16(i16** nocapture %out, i16 %index, i64 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: truncst64to16:
+; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #2
+; CHECK: ret
+ %tmp = load i16** %out, align 8
+ %incdec.ptr = getelementptr inbounds i16* %tmp, i64 1
+ %trunc = trunc i64 %spacing to i16
+ store i16 %trunc, i16* %tmp, align 4
+ store i16* %incdec.ptr, i16** %out, align 8
+ ret void
+}
+
+define void @truncst64to8(i8** nocapture %out, i8 %index, i64 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: truncst64to8:
+; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #1
+; CHECK: ret
+ %tmp = load i8** %out, align 8
+ %incdec.ptr = getelementptr inbounds i8* %tmp, i64 1
+ %trunc = trunc i64 %spacing to i8
+ store i8 %trunc, i8* %tmp, align 4
+ store i8* %incdec.ptr, i8** %out, align 8
+ ret void
+}
+
+
+define void @storef32(float** nocapture %out, float %index, float %spacing) nounwind noinline ssp {
+; CHECK-LABEL: storef32:
+; CHECK: str s{{[0-9+]}}, [x{{[0-9+]}}], #4
+; CHECK: ret
+ %tmp = load float** %out, align 8
+ %incdec.ptr = getelementptr inbounds float* %tmp, i64 1
+ store float %spacing, float* %tmp, align 4
+ store float* %incdec.ptr, float** %out, align 8
+ ret void
+}
+
+define void @storef64(double** nocapture %out, double %index, double %spacing) nounwind noinline ssp {
+; CHECK-LABEL: storef64:
+; CHECK: str d{{[0-9+]}}, [x{{[0-9+]}}], #8
+; CHECK: ret
+ %tmp = load double** %out, align 8
+ %incdec.ptr = getelementptr inbounds double* %tmp, i64 1
+ store double %spacing, double* %tmp, align 4
+ store double* %incdec.ptr, double** %out, align 8
+ ret void
+}
+
+define double * @pref64(double** nocapture %out, double %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pref64:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: str d0, [x0, #32]!
+; CHECK-NEXT: ret
+ %tmp = load double** %out, align 8
+ %ptr = getelementptr inbounds double* %tmp, i64 4
+ store double %spacing, double* %ptr, align 4
+ ret double *%ptr
+}
+
+define float * @pref32(float** nocapture %out, float %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pref32:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: str s0, [x0, #12]!
+; CHECK-NEXT: ret
+ %tmp = load float** %out, align 8
+ %ptr = getelementptr inbounds float* %tmp, i64 3
+ store float %spacing, float* %ptr, align 4
+ ret float *%ptr
+}
+
+define i64 * @pre64(i64** nocapture %out, i64 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pre64:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: str x1, [x0, #16]!
+; CHECK-NEXT: ret
+ %tmp = load i64** %out, align 8
+ %ptr = getelementptr inbounds i64* %tmp, i64 2
+ store i64 %spacing, i64* %ptr, align 4
+ ret i64 *%ptr
+}
+
+define i32 * @pre32(i32** nocapture %out, i32 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pre32:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: str w1, [x0, #8]!
+; CHECK-NEXT: ret
+ %tmp = load i32** %out, align 8
+ %ptr = getelementptr inbounds i32* %tmp, i64 2
+ store i32 %spacing, i32* %ptr, align 4
+ ret i32 *%ptr
+}
+
+define i16 * @pre16(i16** nocapture %out, i16 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pre16:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: strh w1, [x0, #4]!
+; CHECK-NEXT: ret
+ %tmp = load i16** %out, align 8
+ %ptr = getelementptr inbounds i16* %tmp, i64 2
+ store i16 %spacing, i16* %ptr, align 4
+ ret i16 *%ptr
+}
+
+define i8 * @pre8(i8** nocapture %out, i8 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pre8:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: strb w1, [x0, #2]!
+; CHECK-NEXT: ret
+ %tmp = load i8** %out, align 8
+ %ptr = getelementptr inbounds i8* %tmp, i64 2
+ store i8 %spacing, i8* %ptr, align 4
+ ret i8 *%ptr
+}
+
+define i32 * @pretrunc64to32(i32** nocapture %out, i64 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pretrunc64to32:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: str w1, [x0, #8]!
+; CHECK-NEXT: ret
+ %tmp = load i32** %out, align 8
+ %ptr = getelementptr inbounds i32* %tmp, i64 2
+ %trunc = trunc i64 %spacing to i32
+ store i32 %trunc, i32* %ptr, align 4
+ ret i32 *%ptr
+}
+
+define i16 * @pretrunc64to16(i16** nocapture %out, i64 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pretrunc64to16:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: strh w1, [x0, #4]!
+; CHECK-NEXT: ret
+ %tmp = load i16** %out, align 8
+ %ptr = getelementptr inbounds i16* %tmp, i64 2
+ %trunc = trunc i64 %spacing to i16
+ store i16 %trunc, i16* %ptr, align 4
+ ret i16 *%ptr
+}
+
+define i8 * @pretrunc64to8(i8** nocapture %out, i64 %spacing) nounwind noinline ssp {
+; CHECK-LABEL: pretrunc64to8:
+; CHECK: ldr x0, [x0]
+; CHECK-NEXT: strb w1, [x0, #2]!
+; CHECK-NEXT: ret
+ %tmp = load i8** %out, align 8
+ %ptr = getelementptr inbounds i8* %tmp, i64 2
+ %trunc = trunc i64 %spacing to i8
+ store i8 %trunc, i8* %ptr, align 4
+ ret i8 *%ptr
+}
+
+;-----
+; Pre-indexed loads
+;-----
+define double* @preidxf64(double* %src, double* %out) {
+; CHECK-LABEL: preidxf64:
+; CHECK: ldr d0, [x0, #8]!
+; CHECK: str d0, [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds double* %src, i64 1
+ %tmp = load double* %ptr, align 4
+ store double %tmp, double* %out, align 4
+ ret double* %ptr
+}
+
+define float* @preidxf32(float* %src, float* %out) {
+; CHECK-LABEL: preidxf32:
+; CHECK: ldr s0, [x0, #4]!
+; CHECK: str s0, [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds float* %src, i64 1
+ %tmp = load float* %ptr, align 4
+ store float %tmp, float* %out, align 4
+ ret float* %ptr
+}
+
+define i64* @preidx64(i64* %src, i64* %out) {
+; CHECK-LABEL: preidx64:
+; CHECK: ldr x[[REG:[0-9]+]], [x0, #8]!
+; CHECK: str x[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i64* %src, i64 1
+ %tmp = load i64* %ptr, align 4
+ store i64 %tmp, i64* %out, align 4
+ ret i64* %ptr
+}
+
+define i32* @preidx32(i32* %src, i32* %out) {
+; CHECK: ldr w[[REG:[0-9]+]], [x0, #4]!
+; CHECK: str w[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i32* %src, i64 1
+ %tmp = load i32* %ptr, align 4
+ store i32 %tmp, i32* %out, align 4
+ ret i32* %ptr
+}
+
+define i16* @preidx16zext32(i16* %src, i32* %out) {
+; CHECK: ldrh w[[REG:[0-9]+]], [x0, #2]!
+; CHECK: str w[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i16* %src, i64 1
+ %tmp = load i16* %ptr, align 4
+ %ext = zext i16 %tmp to i32
+ store i32 %ext, i32* %out, align 4
+ ret i16* %ptr
+}
+
+define i16* @preidx16zext64(i16* %src, i64* %out) {
+; CHECK: ldrh w[[REG:[0-9]+]], [x0, #2]!
+; CHECK: str x[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i16* %src, i64 1
+ %tmp = load i16* %ptr, align 4
+ %ext = zext i16 %tmp to i64
+ store i64 %ext, i64* %out, align 4
+ ret i16* %ptr
+}
+
+define i8* @preidx8zext32(i8* %src, i32* %out) {
+; CHECK: ldrb w[[REG:[0-9]+]], [x0, #1]!
+; CHECK: str w[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i8* %src, i64 1
+ %tmp = load i8* %ptr, align 4
+ %ext = zext i8 %tmp to i32
+ store i32 %ext, i32* %out, align 4
+ ret i8* %ptr
+}
+
+define i8* @preidx8zext64(i8* %src, i64* %out) {
+; CHECK: ldrb w[[REG:[0-9]+]], [x0, #1]!
+; CHECK: str x[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i8* %src, i64 1
+ %tmp = load i8* %ptr, align 4
+ %ext = zext i8 %tmp to i64
+ store i64 %ext, i64* %out, align 4
+ ret i8* %ptr
+}
+
+define i32* @preidx32sext64(i32* %src, i64* %out) {
+; CHECK: ldrsw x[[REG:[0-9]+]], [x0, #4]!
+; CHECK: str x[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i32* %src, i64 1
+ %tmp = load i32* %ptr, align 4
+ %ext = sext i32 %tmp to i64
+ store i64 %ext, i64* %out, align 8
+ ret i32* %ptr
+}
+
+define i16* @preidx16sext32(i16* %src, i32* %out) {
+; CHECK: ldrsh w[[REG:[0-9]+]], [x0, #2]!
+; CHECK: str w[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i16* %src, i64 1
+ %tmp = load i16* %ptr, align 4
+ %ext = sext i16 %tmp to i32
+ store i32 %ext, i32* %out, align 4
+ ret i16* %ptr
+}
+
+define i16* @preidx16sext64(i16* %src, i64* %out) {
+; CHECK: ldrsh x[[REG:[0-9]+]], [x0, #2]!
+; CHECK: str x[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i16* %src, i64 1
+ %tmp = load i16* %ptr, align 4
+ %ext = sext i16 %tmp to i64
+ store i64 %ext, i64* %out, align 4
+ ret i16* %ptr
+}
+
+define i8* @preidx8sext32(i8* %src, i32* %out) {
+; CHECK: ldrsb w[[REG:[0-9]+]], [x0, #1]!
+; CHECK: str w[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i8* %src, i64 1
+ %tmp = load i8* %ptr, align 4
+ %ext = sext i8 %tmp to i32
+ store i32 %ext, i32* %out, align 4
+ ret i8* %ptr
+}
+
+define i8* @preidx8sext64(i8* %src, i64* %out) {
+; CHECK: ldrsb x[[REG:[0-9]+]], [x0, #1]!
+; CHECK: str x[[REG]], [x1]
+; CHECK: ret
+ %ptr = getelementptr inbounds i8* %src, i64 1
+ %tmp = load i8* %ptr, align 4
+ %ext = sext i8 %tmp to i64
+ store i64 %ext, i64* %out, align 4
+ ret i8* %ptr
+}
diff --git a/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll b/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
new file mode 100644
index 000000000000..c118f109289b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s
+
+; This used to assert with "Overran sorted position" in AssignTopologicalOrder
+; due to a cycle created in performPostLD1Combine.
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+; Function Attrs: nounwind ssp
+define void @f(double* %P1) #0 {
+entry:
+ %arrayidx4 = getelementptr inbounds double* %P1, i64 1
+ %0 = load double* %arrayidx4, align 8, !tbaa !1
+ %1 = load double* %P1, align 8, !tbaa !1
+ %2 = insertelement <2 x double> undef, double %0, i32 0
+ %3 = insertelement <2 x double> %2, double %1, i32 1
+ %4 = fsub <2 x double> zeroinitializer, %3
+ %5 = fmul <2 x double> undef, %4
+ %6 = extractelement <2 x double> %5, i32 0
+ %cmp168 = fcmp olt double %6, undef
+ br i1 %cmp168, label %if.then172, label %return
+
+if.then172: ; preds = %cond.end90
+ %7 = tail call i64 @llvm.objectsize.i64.p0i8(i8* undef, i1 false)
+ br label %return
+
+return: ; preds = %if.then172, %cond.end90, %entry
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #1
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"double", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll b/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
new file mode 100644
index 000000000000..9ee4063658b2
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
@@ -0,0 +1,6174 @@
+; RUN: llc -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s
+
+@ptr = global i8* null
+
+define <8 x i8> @test_v8i8_pre_load(<8 x i8>* %addr) {
+; CHECK-LABEL: test_v8i8_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ %val = load <8 x i8>* %newaddr, align 8
+ store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+ ret <8 x i8> %val
+}
+
+define <8 x i8> @test_v8i8_post_load(<8 x i8>* %addr) {
+; CHECK-LABEL: test_v8i8_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ %val = load <8 x i8>* %addr, align 8
+ store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+ ret <8 x i8> %val
+}
+
+define void @test_v8i8_pre_store(<8 x i8> %in, <8 x i8>* %addr) {
+; CHECK-LABEL: test_v8i8_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ store <8 x i8> %in, <8 x i8>* %newaddr, align 8
+ store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+ ret void
+}
+
+define void @test_v8i8_post_store(<8 x i8> %in, <8 x i8>* %addr) {
+; CHECK-LABEL: test_v8i8_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ store <8 x i8> %in, <8 x i8>* %addr, align 8
+ store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+ ret void
+}
+
+define <4 x i16> @test_v4i16_pre_load(<4 x i16>* %addr) {
+; CHECK-LABEL: test_v4i16_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ %val = load <4 x i16>* %newaddr, align 8
+ store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+ ret <4 x i16> %val
+}
+
+define <4 x i16> @test_v4i16_post_load(<4 x i16>* %addr) {
+; CHECK-LABEL: test_v4i16_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ %val = load <4 x i16>* %addr, align 8
+ store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+ ret <4 x i16> %val
+}
+
+define void @test_v4i16_pre_store(<4 x i16> %in, <4 x i16>* %addr) {
+; CHECK-LABEL: test_v4i16_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ store <4 x i16> %in, <4 x i16>* %newaddr, align 8
+ store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+ ret void
+}
+
+define void @test_v4i16_post_store(<4 x i16> %in, <4 x i16>* %addr) {
+; CHECK-LABEL: test_v4i16_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ store <4 x i16> %in, <4 x i16>* %addr, align 8
+ store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+ ret void
+}
+
+define <2 x i32> @test_v2i32_pre_load(<2 x i32>* %addr) {
+; CHECK-LABEL: test_v2i32_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ %val = load <2 x i32>* %newaddr, align 8
+ store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+ ret <2 x i32> %val
+}
+
+define <2 x i32> @test_v2i32_post_load(<2 x i32>* %addr) {
+; CHECK-LABEL: test_v2i32_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ %val = load <2 x i32>* %addr, align 8
+ store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+ ret <2 x i32> %val
+}
+
+define void @test_v2i32_pre_store(<2 x i32> %in, <2 x i32>* %addr) {
+; CHECK-LABEL: test_v2i32_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ store <2 x i32> %in, <2 x i32>* %newaddr, align 8
+ store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+ ret void
+}
+
+define void @test_v2i32_post_store(<2 x i32> %in, <2 x i32>* %addr) {
+; CHECK-LABEL: test_v2i32_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ store <2 x i32> %in, <2 x i32>* %addr, align 8
+ store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+ ret void
+}
+
+define <2 x float> @test_v2f32_pre_load(<2 x float>* %addr) {
+; CHECK-LABEL: test_v2f32_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <2 x float>* %addr, i32 5
+ %val = load <2 x float>* %newaddr, align 8
+ store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+ ret <2 x float> %val
+}
+
+define <2 x float> @test_v2f32_post_load(<2 x float>* %addr) {
+; CHECK-LABEL: test_v2f32_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <2 x float>* %addr, i32 5
+ %val = load <2 x float>* %addr, align 8
+ store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+ ret <2 x float> %val
+}
+
+define void @test_v2f32_pre_store(<2 x float> %in, <2 x float>* %addr) {
+; CHECK-LABEL: test_v2f32_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <2 x float>* %addr, i32 5
+ store <2 x float> %in, <2 x float>* %newaddr, align 8
+ store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+ ret void
+}
+
+define void @test_v2f32_post_store(<2 x float> %in, <2 x float>* %addr) {
+; CHECK-LABEL: test_v2f32_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <2 x float>* %addr, i32 5
+ store <2 x float> %in, <2 x float>* %addr, align 8
+ store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+ ret void
+}
+
+define <1 x i64> @test_v1i64_pre_load(<1 x i64>* %addr) {
+; CHECK-LABEL: test_v1i64_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ %val = load <1 x i64>* %newaddr, align 8
+ store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+ ret <1 x i64> %val
+}
+
+define <1 x i64> @test_v1i64_post_load(<1 x i64>* %addr) {
+; CHECK-LABEL: test_v1i64_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ %val = load <1 x i64>* %addr, align 8
+ store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+ ret <1 x i64> %val
+}
+
+define void @test_v1i64_pre_store(<1 x i64> %in, <1 x i64>* %addr) {
+; CHECK-LABEL: test_v1i64_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ store <1 x i64> %in, <1 x i64>* %newaddr, align 8
+ store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+ ret void
+}
+
+define void @test_v1i64_post_store(<1 x i64> %in, <1 x i64>* %addr) {
+; CHECK-LABEL: test_v1i64_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ store <1 x i64> %in, <1 x i64>* %addr, align 8
+ store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+ ret void
+}
+
+define <16 x i8> @test_v16i8_pre_load(<16 x i8>* %addr) {
+; CHECK-LABEL: test_v16i8_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ %val = load <16 x i8>* %newaddr, align 8
+ store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+ ret <16 x i8> %val
+}
+
+define <16 x i8> @test_v16i8_post_load(<16 x i8>* %addr) {
+; CHECK-LABEL: test_v16i8_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ %val = load <16 x i8>* %addr, align 8
+ store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+ ret <16 x i8> %val
+}
+
+define void @test_v16i8_pre_store(<16 x i8> %in, <16 x i8>* %addr) {
+; CHECK-LABEL: test_v16i8_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ store <16 x i8> %in, <16 x i8>* %newaddr, align 8
+ store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+ ret void
+}
+
+define void @test_v16i8_post_store(<16 x i8> %in, <16 x i8>* %addr) {
+; CHECK-LABEL: test_v16i8_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ store <16 x i8> %in, <16 x i8>* %addr, align 8
+ store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+ ret void
+}
+
+define <8 x i16> @test_v8i16_pre_load(<8 x i16>* %addr) {
+; CHECK-LABEL: test_v8i16_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ %val = load <8 x i16>* %newaddr, align 8
+ store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+ ret <8 x i16> %val
+}
+
+define <8 x i16> @test_v8i16_post_load(<8 x i16>* %addr) {
+; CHECK-LABEL: test_v8i16_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ %val = load <8 x i16>* %addr, align 8
+ store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+ ret <8 x i16> %val
+}
+
+define void @test_v8i16_pre_store(<8 x i16> %in, <8 x i16>* %addr) {
+; CHECK-LABEL: test_v8i16_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ store <8 x i16> %in, <8 x i16>* %newaddr, align 8
+ store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+ ret void
+}
+
+define void @test_v8i16_post_store(<8 x i16> %in, <8 x i16>* %addr) {
+; CHECK-LABEL: test_v8i16_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ store <8 x i16> %in, <8 x i16>* %addr, align 8
+ store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+ ret void
+}
+
+define <4 x i32> @test_v4i32_pre_load(<4 x i32>* %addr) {
+; CHECK-LABEL: test_v4i32_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ %val = load <4 x i32>* %newaddr, align 8
+ store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+ ret <4 x i32> %val
+}
+
+define <4 x i32> @test_v4i32_post_load(<4 x i32>* %addr) {
+; CHECK-LABEL: test_v4i32_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ %val = load <4 x i32>* %addr, align 8
+ store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+ ret <4 x i32> %val
+}
+
+define void @test_v4i32_pre_store(<4 x i32> %in, <4 x i32>* %addr) {
+; CHECK-LABEL: test_v4i32_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ store <4 x i32> %in, <4 x i32>* %newaddr, align 8
+ store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+ ret void
+}
+
+define void @test_v4i32_post_store(<4 x i32> %in, <4 x i32>* %addr) {
+; CHECK-LABEL: test_v4i32_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ store <4 x i32> %in, <4 x i32>* %addr, align 8
+ store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+ ret void
+}
+
+
+define <4 x float> @test_v4f32_pre_load(<4 x float>* %addr) {
+; CHECK-LABEL: test_v4f32_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <4 x float>* %addr, i32 5
+ %val = load <4 x float>* %newaddr, align 8
+ store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+ ret <4 x float> %val
+}
+
+define <4 x float> @test_v4f32_post_load(<4 x float>* %addr) {
+; CHECK-LABEL: test_v4f32_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <4 x float>* %addr, i32 5
+ %val = load <4 x float>* %addr, align 8
+ store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+ ret <4 x float> %val
+}
+
+define void @test_v4f32_pre_store(<4 x float> %in, <4 x float>* %addr) {
+; CHECK-LABEL: test_v4f32_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <4 x float>* %addr, i32 5
+ store <4 x float> %in, <4 x float>* %newaddr, align 8
+ store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+ ret void
+}
+
+define void @test_v4f32_post_store(<4 x float> %in, <4 x float>* %addr) {
+; CHECK-LABEL: test_v4f32_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <4 x float>* %addr, i32 5
+ store <4 x float> %in, <4 x float>* %addr, align 8
+ store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+ ret void
+}
+
+
+define <2 x i64> @test_v2i64_pre_load(<2 x i64>* %addr) {
+; CHECK-LABEL: test_v2i64_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ %val = load <2 x i64>* %newaddr, align 8
+ store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+ ret <2 x i64> %val
+}
+
+define <2 x i64> @test_v2i64_post_load(<2 x i64>* %addr) {
+; CHECK-LABEL: test_v2i64_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ %val = load <2 x i64>* %addr, align 8
+ store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+ ret <2 x i64> %val
+}
+
+define void @test_v2i64_pre_store(<2 x i64> %in, <2 x i64>* %addr) {
+; CHECK-LABEL: test_v2i64_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ store <2 x i64> %in, <2 x i64>* %newaddr, align 8
+ store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+ ret void
+}
+
+define void @test_v2i64_post_store(<2 x i64> %in, <2 x i64>* %addr) {
+; CHECK-LABEL: test_v2i64_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ store <2 x i64> %in, <2 x i64>* %addr, align 8
+ store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+ ret void
+}
+
+
+define <2 x double> @test_v2f64_pre_load(<2 x double>* %addr) {
+; CHECK-LABEL: test_v2f64_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <2 x double>* %addr, i32 5
+ %val = load <2 x double>* %newaddr, align 8
+ store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+ ret <2 x double> %val
+}
+
+define <2 x double> @test_v2f64_post_load(<2 x double>* %addr) {
+; CHECK-LABEL: test_v2f64_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <2 x double>* %addr, i32 5
+ %val = load <2 x double>* %addr, align 8
+ store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+ ret <2 x double> %val
+}
+
+define void @test_v2f64_pre_store(<2 x double> %in, <2 x double>* %addr) {
+; CHECK-LABEL: test_v2f64_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <2 x double>* %addr, i32 5
+ store <2 x double> %in, <2 x double>* %newaddr, align 8
+ store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+ ret void
+}
+
+define void @test_v2f64_post_store(<2 x double> %in, <2 x double>* %addr) {
+; CHECK-LABEL: test_v2f64_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <2 x double>* %addr, i32 5
+ store <2 x double> %in, <2 x double>* %addr, align 8
+ store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+ ret void
+}
+
+define i8* @test_v16i8_post_imm_st1_lane(<16 x i8> %in, i8* %addr) {
+; CHECK-LABEL: test_v16i8_post_imm_st1_lane:
+; CHECK: st1.b { v0 }[3], [x0], #1
+ %elt = extractelement <16 x i8> %in, i32 3
+ store i8 %elt, i8* %addr
+
+ %newaddr = getelementptr i8* %addr, i32 1
+ ret i8* %newaddr
+}
+
+define i8* @test_v16i8_post_reg_st1_lane(<16 x i8> %in, i8* %addr) {
+; CHECK-LABEL: test_v16i8_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x2
+; CHECK: st1.b { v0 }[3], [x0], x[[OFFSET]]
+ %elt = extractelement <16 x i8> %in, i32 3
+ store i8 %elt, i8* %addr
+
+ %newaddr = getelementptr i8* %addr, i32 2
+ ret i8* %newaddr
+}
+
+
+define i16* @test_v8i16_post_imm_st1_lane(<8 x i16> %in, i16* %addr) {
+; CHECK-LABEL: test_v8i16_post_imm_st1_lane:
+; CHECK: st1.h { v0 }[3], [x0], #2
+ %elt = extractelement <8 x i16> %in, i32 3
+ store i16 %elt, i16* %addr
+
+ %newaddr = getelementptr i16* %addr, i32 1
+ ret i16* %newaddr
+}
+
+define i16* @test_v8i16_post_reg_st1_lane(<8 x i16> %in, i16* %addr) {
+; CHECK-LABEL: test_v8i16_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x4
+; CHECK: st1.h { v0 }[3], [x0], x[[OFFSET]]
+ %elt = extractelement <8 x i16> %in, i32 3
+ store i16 %elt, i16* %addr
+
+ %newaddr = getelementptr i16* %addr, i32 2
+ ret i16* %newaddr
+}
+
+define i32* @test_v4i32_post_imm_st1_lane(<4 x i32> %in, i32* %addr) {
+; CHECK-LABEL: test_v4i32_post_imm_st1_lane:
+; CHECK: st1.s { v0 }[3], [x0], #4
+ %elt = extractelement <4 x i32> %in, i32 3
+ store i32 %elt, i32* %addr
+
+ %newaddr = getelementptr i32* %addr, i32 1
+ ret i32* %newaddr
+}
+
+define i32* @test_v4i32_post_reg_st1_lane(<4 x i32> %in, i32* %addr) {
+; CHECK-LABEL: test_v4i32_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x8
+; CHECK: st1.s { v0 }[3], [x0], x[[OFFSET]]
+ %elt = extractelement <4 x i32> %in, i32 3
+ store i32 %elt, i32* %addr
+
+ %newaddr = getelementptr i32* %addr, i32 2
+ ret i32* %newaddr
+}
+
+define float* @test_v4f32_post_imm_st1_lane(<4 x float> %in, float* %addr) {
+; CHECK-LABEL: test_v4f32_post_imm_st1_lane:
+; CHECK: st1.s { v0 }[3], [x0], #4
+ %elt = extractelement <4 x float> %in, i32 3
+ store float %elt, float* %addr
+
+ %newaddr = getelementptr float* %addr, i32 1
+ ret float* %newaddr
+}
+
+define float* @test_v4f32_post_reg_st1_lane(<4 x float> %in, float* %addr) {
+; CHECK-LABEL: test_v4f32_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x8
+; CHECK: st1.s { v0 }[3], [x0], x[[OFFSET]]
+ %elt = extractelement <4 x float> %in, i32 3
+ store float %elt, float* %addr
+
+ %newaddr = getelementptr float* %addr, i32 2
+ ret float* %newaddr
+}
+
+define i64* @test_v2i64_post_imm_st1_lane(<2 x i64> %in, i64* %addr) {
+; CHECK-LABEL: test_v2i64_post_imm_st1_lane:
+; CHECK: st1.d { v0 }[1], [x0], #8
+ %elt = extractelement <2 x i64> %in, i64 1
+ store i64 %elt, i64* %addr
+
+ %newaddr = getelementptr i64* %addr, i64 1
+ ret i64* %newaddr
+}
+
+define i64* @test_v2i64_post_reg_st1_lane(<2 x i64> %in, i64* %addr) {
+; CHECK-LABEL: test_v2i64_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x10
+; CHECK: st1.d { v0 }[1], [x0], x[[OFFSET]]
+ %elt = extractelement <2 x i64> %in, i64 1
+ store i64 %elt, i64* %addr
+
+ %newaddr = getelementptr i64* %addr, i64 2
+ ret i64* %newaddr
+}
+
+define double* @test_v2f64_post_imm_st1_lane(<2 x double> %in, double* %addr) {
+; CHECK-LABEL: test_v2f64_post_imm_st1_lane:
+; CHECK: st1.d { v0 }[1], [x0], #8
+ %elt = extractelement <2 x double> %in, i32 1
+ store double %elt, double* %addr
+
+ %newaddr = getelementptr double* %addr, i32 1
+ ret double* %newaddr
+}
+
+define double* @test_v2f64_post_reg_st1_lane(<2 x double> %in, double* %addr) {
+; CHECK-LABEL: test_v2f64_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x10
+; CHECK: st1.d { v0 }[1], [x0], x[[OFFSET]]
+ %elt = extractelement <2 x double> %in, i32 1
+ store double %elt, double* %addr
+
+ %newaddr = getelementptr double* %addr, i32 2
+ ret double* %newaddr
+}
+
+define i8* @test_v8i8_post_imm_st1_lane(<8 x i8> %in, i8* %addr) {
+; CHECK-LABEL: test_v8i8_post_imm_st1_lane:
+; CHECK: st1.b { v0 }[3], [x0], #1
+ %elt = extractelement <8 x i8> %in, i32 3
+ store i8 %elt, i8* %addr
+
+ %newaddr = getelementptr i8* %addr, i32 1
+ ret i8* %newaddr
+}
+
+define i8* @test_v8i8_post_reg_st1_lane(<8 x i8> %in, i8* %addr) {
+; CHECK-LABEL: test_v8i8_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x2
+; CHECK: st1.b { v0 }[3], [x0], x[[OFFSET]]
+ %elt = extractelement <8 x i8> %in, i32 3
+ store i8 %elt, i8* %addr
+
+ %newaddr = getelementptr i8* %addr, i32 2
+ ret i8* %newaddr
+}
+
+define i16* @test_v4i16_post_imm_st1_lane(<4 x i16> %in, i16* %addr) {
+; CHECK-LABEL: test_v4i16_post_imm_st1_lane:
+; CHECK: st1.h { v0 }[3], [x0], #2
+ %elt = extractelement <4 x i16> %in, i32 3
+ store i16 %elt, i16* %addr
+
+ %newaddr = getelementptr i16* %addr, i32 1
+ ret i16* %newaddr
+}
+
+define i16* @test_v4i16_post_reg_st1_lane(<4 x i16> %in, i16* %addr) {
+; CHECK-LABEL: test_v4i16_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x4
+; CHECK: st1.h { v0 }[3], [x0], x[[OFFSET]]
+ %elt = extractelement <4 x i16> %in, i32 3
+ store i16 %elt, i16* %addr
+
+ %newaddr = getelementptr i16* %addr, i32 2
+ ret i16* %newaddr
+}
+
+define i32* @test_v2i32_post_imm_st1_lane(<2 x i32> %in, i32* %addr) {
+; CHECK-LABEL: test_v2i32_post_imm_st1_lane:
+; CHECK: st1.s { v0 }[1], [x0], #4
+ %elt = extractelement <2 x i32> %in, i32 1
+ store i32 %elt, i32* %addr
+
+ %newaddr = getelementptr i32* %addr, i32 1
+ ret i32* %newaddr
+}
+
+define i32* @test_v2i32_post_reg_st1_lane(<2 x i32> %in, i32* %addr) {
+; CHECK-LABEL: test_v2i32_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x8
+; CHECK: st1.s { v0 }[1], [x0], x[[OFFSET]]
+ %elt = extractelement <2 x i32> %in, i32 1
+ store i32 %elt, i32* %addr
+
+ %newaddr = getelementptr i32* %addr, i32 2
+ ret i32* %newaddr
+}
+
+define float* @test_v2f32_post_imm_st1_lane(<2 x float> %in, float* %addr) {
+; CHECK-LABEL: test_v2f32_post_imm_st1_lane:
+; CHECK: st1.s { v0 }[1], [x0], #4
+ %elt = extractelement <2 x float> %in, i32 1
+ store float %elt, float* %addr
+
+ %newaddr = getelementptr float* %addr, i32 1
+ ret float* %newaddr
+}
+
+define float* @test_v2f32_post_reg_st1_lane(<2 x float> %in, float* %addr) {
+; CHECK-LABEL: test_v2f32_post_reg_st1_lane:
+; CHECK: orr w[[OFFSET:[0-9]+]], wzr, #0x8
+; CHECK: st1.s { v0 }[1], [x0], x[[OFFSET]]
+ %elt = extractelement <2 x float> %in, i32 1
+ store float %elt, float* %addr
+
+ %newaddr = getelementptr float* %addr, i32 2
+ ret float* %newaddr
+}
+
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v16i8_post_imm_ld2:
+;CHECK: ld2.16b { v0, v1 }, [x0], #32
+ %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld2
+}
+
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v16i8_post_reg_ld2:
+;CHECK: ld2.16b { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld2
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
+
+
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v8i8_post_imm_ld2:
+;CHECK: ld2.8b { v0, v1 }, [x0], #16
+ %ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 16
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8> } %ld2
+}
+
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i8_post_reg_ld2:
+;CHECK: ld2.8b { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8> } %ld2
+}
+
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8*)
+
+
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v8i16_post_imm_ld2:
+;CHECK: ld2.8h { v0, v1 }, [x0], #32
+ %ld2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 16
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16> } %ld2
+}
+
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i16_post_reg_ld2:
+;CHECK: ld2.8h { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16> } %ld2
+}
+
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0i16(i16*)
+
+
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v4i16_post_imm_ld2:
+;CHECK: ld2.4h { v0, v1 }, [x0], #16
+ %ld2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 8
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16> } %ld2
+}
+
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i16_post_reg_ld2:
+;CHECK: ld2.4h { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16> } %ld2
+}
+
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0i16(i16*)
+
+
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v4i32_post_imm_ld2:
+;CHECK: ld2.4s { v0, v1 }, [x0], #32
+ %ld2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 8
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32> } %ld2
+}
+
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i32_post_reg_ld2:
+;CHECK: ld2.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32> } %ld2
+}
+
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32*)
+
+
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v2i32_post_imm_ld2:
+;CHECK: ld2.2s { v0, v1 }, [x0], #16
+ %ld2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32> } %ld2
+}
+
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i32_post_reg_ld2:
+;CHECK: ld2.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32> } %ld2
+}
+
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32*)
+
+
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v2i64_post_imm_ld2:
+;CHECK: ld2.2d { v0, v1 }, [x0], #32
+ %ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 4
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64> } %ld2
+}
+
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i64_post_reg_ld2:
+;CHECK: ld2.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64> } %ld2
+}
+
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64*)
+
+
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v1i64_post_imm_ld2:
+;CHECK: ld1.1d { v0, v1 }, [x0], #16
+ %ld2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 2
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64> } %ld2
+}
+
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1i64_post_reg_ld2:
+;CHECK: ld1.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64> } %ld2
+}
+
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0i64(i64*)
+
+
+define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v4f32_post_imm_ld2:
+;CHECK: ld2.4s { v0, v1 }, [x0], #32
+ %ld2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 8
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float> } %ld2
+}
+
+define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4f32_post_reg_ld2:
+;CHECK: ld2.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float> } %ld2
+}
+
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0f32(float*)
+
+
+define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v2f32_post_imm_ld2:
+;CHECK: ld2.2s { v0, v1 }, [x0], #16
+ %ld2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float> } %ld2
+}
+
+define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f32_post_reg_ld2:
+;CHECK: ld2.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float> } %ld2
+}
+
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0f32(float*)
+
+
+define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v2f64_post_imm_ld2:
+;CHECK: ld2.2d { v0, v1 }, [x0], #32
+ %ld2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 4
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double> } %ld2
+}
+
+define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f64_post_reg_ld2:
+;CHECK: ld2.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double> } %ld2
+}
+
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0f64(double*)
+
+
+define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v1f64_post_imm_ld2:
+;CHECK: ld1.1d { v0, v1 }, [x0], #16
+ %ld2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 2
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double> } %ld2
+}
+
+define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1f64_post_reg_ld2:
+;CHECK: ld1.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double> } %ld2
+}
+
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0f64(double*)
+
+
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v16i8_post_imm_ld3:
+;CHECK: ld3.16b { v0, v1, v2 }, [x0], #48
+ %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 48
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
+}
+
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v16i8_post_reg_ld3:
+;CHECK: ld3.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*)
+
+
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v8i8_post_imm_ld3:
+;CHECK: ld3.8b { v0, v1, v2 }, [x0], #24
+ %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 24
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
+}
+
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i8_post_reg_ld3:
+;CHECK: ld3.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8*)
+
+
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v8i16_post_imm_ld3:
+;CHECK: ld3.8h { v0, v1, v2 }, [x0], #48
+ %ld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 24
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
+}
+
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i16_post_reg_ld3:
+;CHECK: ld3.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0i16(i16*)
+
+
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v4i16_post_imm_ld3:
+;CHECK: ld3.4h { v0, v1, v2 }, [x0], #24
+ %ld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 12
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
+}
+
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i16_post_reg_ld3:
+;CHECK: ld3.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
+}
+
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*)
+
+
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v4i32_post_imm_ld3:
+;CHECK: ld3.4s { v0, v1, v2 }, [x0], #48
+ %ld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 12
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
+}
+
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i32_post_reg_ld3:
+;CHECK: ld3.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
+}
+
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32*)
+
+
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v2i32_post_imm_ld3:
+;CHECK: ld3.2s { v0, v1, v2 }, [x0], #24
+ %ld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 6
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
+}
+
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i32_post_reg_ld3:
+;CHECK: ld3.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
+}
+
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0i32(i32*)
+
+
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v2i64_post_imm_ld3:
+;CHECK: ld3.2d { v0, v1, v2 }, [x0], #48
+ %ld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 6
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
+}
+
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i64_post_reg_ld3:
+;CHECK: ld3.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0i64(i64*)
+
+
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v1i64_post_imm_ld3:
+;CHECK: ld1.1d { v0, v1, v2 }, [x0], #24
+ %ld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 3
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
+}
+
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1i64_post_reg_ld3:
+;CHECK: ld1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
+}
+
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0i64(i64*)
+
+
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v4f32_post_imm_ld3:
+;CHECK: ld3.4s { v0, v1, v2 }, [x0], #48
+ %ld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 12
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float> } %ld3
+}
+
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4f32_post_reg_ld3:
+;CHECK: ld3.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float> } %ld3
+}
+
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float*)
+
+
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v2f32_post_imm_ld3:
+;CHECK: ld3.2s { v0, v1, v2 }, [x0], #24
+ %ld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 6
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float> } %ld3
+}
+
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f32_post_reg_ld3:
+;CHECK: ld3.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float> } %ld3
+}
+
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0f32(float*)
+
+
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v2f64_post_imm_ld3:
+;CHECK: ld3.2d { v0, v1, v2 }, [x0], #48
+ %ld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 6
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double> } %ld3
+}
+
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f64_post_reg_ld3:
+;CHECK: ld3.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double> } %ld3
+}
+
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0f64(double*)
+
+
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v1f64_post_imm_ld3:
+;CHECK: ld1.1d { v0, v1, v2 }, [x0], #24
+ %ld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 3
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double> } %ld3
+}
+
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1f64_post_reg_ld3:
+;CHECK: ld1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double> } %ld3
+}
+
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0f64(double*)
+
+
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v16i8_post_imm_ld4:
+;CHECK: ld4.16b { v0, v1, v2, v3 }, [x0], #64
+ %ld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 64
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
+}
+
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v16i8_post_reg_ld4:
+;CHECK: ld4.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*)
+
+
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v8i8_post_imm_ld4:
+;CHECK: ld4.8b { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
+}
+
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i8_post_reg_ld4:
+;CHECK: ld4.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0i8(i8*)
+
+
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v8i16_post_imm_ld4:
+;CHECK: ld4.8h { v0, v1, v2, v3 }, [x0], #64
+ %ld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 32
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
+}
+
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i16_post_reg_ld4:
+;CHECK: ld4.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0i16(i16*)
+
+
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v4i16_post_imm_ld4:
+;CHECK: ld4.4h { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 16
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
+}
+
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i16_post_reg_ld4:
+;CHECK: ld4.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
+}
+
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*)
+
+
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v4i32_post_imm_ld4:
+;CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], #64
+ %ld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 16
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
+}
+
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i32_post_reg_ld4:
+;CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
+}
+
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0i32(i32*)
+
+
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v2i32_post_imm_ld4:
+;CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 8
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
+}
+
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i32_post_reg_ld4:
+;CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
+}
+
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0i32(i32*)
+
+
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v2i64_post_imm_ld4:
+;CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], #64
+ %ld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 8
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
+}
+
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i64_post_reg_ld4:
+;CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i64(i64*)
+
+
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v1i64_post_imm_ld4:
+;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 4
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
+}
+
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1i64_post_reg_ld4:
+;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
+}
+
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0i64(i64*)
+
+
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v4f32_post_imm_ld4:
+;CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], #64
+ %ld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 16
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
+}
+
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4f32_post_reg_ld4:
+;CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
+}
+
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0f32(float*)
+
+
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v2f32_post_imm_ld4:
+;CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 8
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
+}
+
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f32_post_reg_ld4:
+;CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
+}
+
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0f32(float*)
+
+
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v2f64_post_imm_ld4:
+;CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], #64
+ %ld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 8
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
+}
+
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f64_post_reg_ld4:
+;CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
+}
+
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0f64(double*)
+
+
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v1f64_post_imm_ld4:
+;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 4
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
+}
+
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1f64_post_reg_ld4:
+;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
+}
+
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0f64(double*)
+
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x2(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v16i8_post_imm_ld1x2:
+;CHECK: ld1.16b { v0, v1 }, [x0], #32
+ %ld1x2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld1x2
+}
+
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x2(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v16i8_post_reg_ld1x2:
+;CHECK: ld1.16b { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld1x2
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8*)
+
+
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x2(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v8i8_post_imm_ld1x2:
+;CHECK: ld1.8b { v0, v1 }, [x0], #16
+ %ld1x2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 16
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8> } %ld1x2
+}
+
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x2(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i8_post_reg_ld1x2:
+;CHECK: ld1.8b { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8> } %ld1x2
+}
+
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8*)
+
+
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x2(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v8i16_post_imm_ld1x2:
+;CHECK: ld1.8h { v0, v1 }, [x0], #32
+ %ld1x2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 16
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16> } %ld1x2
+}
+
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x2(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i16_post_reg_ld1x2:
+;CHECK: ld1.8h { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16> } %ld1x2
+}
+
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16*)
+
+
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x2(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v4i16_post_imm_ld1x2:
+;CHECK: ld1.4h { v0, v1 }, [x0], #16
+ %ld1x2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 8
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16> } %ld1x2
+}
+
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x2(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i16_post_reg_ld1x2:
+;CHECK: ld1.4h { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16> } %ld1x2
+}
+
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16*)
+
+
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x2(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v4i32_post_imm_ld1x2:
+;CHECK: ld1.4s { v0, v1 }, [x0], #32
+ %ld1x2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 8
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32> } %ld1x2
+}
+
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x2(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i32_post_reg_ld1x2:
+;CHECK: ld1.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32> } %ld1x2
+}
+
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32*)
+
+
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x2(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v2i32_post_imm_ld1x2:
+;CHECK: ld1.2s { v0, v1 }, [x0], #16
+ %ld1x2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32> } %ld1x2
+}
+
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x2(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i32_post_reg_ld1x2:
+;CHECK: ld1.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32> } %ld1x2
+}
+
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32*)
+
+
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x2(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v2i64_post_imm_ld1x2:
+;CHECK: ld1.2d { v0, v1 }, [x0], #32
+ %ld1x2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 4
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64> } %ld1x2
+}
+
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x2(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i64_post_reg_ld1x2:
+;CHECK: ld1.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64> } %ld1x2
+}
+
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64*)
+
+
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x2(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v1i64_post_imm_ld1x2:
+;CHECK: ld1.1d { v0, v1 }, [x0], #16
+ %ld1x2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 2
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64> } %ld1x2
+}
+
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x2(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1i64_post_reg_ld1x2:
+;CHECK: ld1.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64> } %ld1x2
+}
+
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64*)
+
+
+define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x2(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v4f32_post_imm_ld1x2:
+;CHECK: ld1.4s { v0, v1 }, [x0], #32
+ %ld1x2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 8
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float> } %ld1x2
+}
+
+define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x2(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4f32_post_reg_ld1x2:
+;CHECK: ld1.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float> } %ld1x2
+}
+
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float*)
+
+
+define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x2(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v2f32_post_imm_ld1x2:
+;CHECK: ld1.2s { v0, v1 }, [x0], #16
+ %ld1x2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float> } %ld1x2
+}
+
+define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x2(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f32_post_reg_ld1x2:
+;CHECK: ld1.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float> } %ld1x2
+}
+
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float*)
+
+
+define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x2(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v2f64_post_imm_ld1x2:
+;CHECK: ld1.2d { v0, v1 }, [x0], #32
+ %ld1x2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 4
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double> } %ld1x2
+}
+
+define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x2(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f64_post_reg_ld1x2:
+;CHECK: ld1.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double> } %ld1x2
+}
+
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double*)
+
+
+define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x2(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v1f64_post_imm_ld1x2:
+;CHECK: ld1.1d { v0, v1 }, [x0], #16
+ %ld1x2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 2
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double> } %ld1x2
+}
+
+define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x2(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1f64_post_reg_ld1x2:
+;CHECK: ld1.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld1x2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double> } %ld1x2
+}
+
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double*)
+
+
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x3(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v16i8_post_imm_ld1x3:
+;CHECK: ld1.16b { v0, v1, v2 }, [x0], #48
+ %ld1x3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 48
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld1x3
+}
+
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x3(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v16i8_post_reg_ld1x3:
+;CHECK: ld1.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld1x3
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8*)
+
+
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x3(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v8i8_post_imm_ld1x3:
+;CHECK: ld1.8b { v0, v1, v2 }, [x0], #24
+ %ld1x3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 24
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld1x3
+}
+
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x3(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i8_post_reg_ld1x3:
+;CHECK: ld1.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld1x3
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8*)
+
+
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x3(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v8i16_post_imm_ld1x3:
+;CHECK: ld1.8h { v0, v1, v2 }, [x0], #48
+ %ld1x3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 24
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld1x3
+}
+
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x3(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i16_post_reg_ld1x3:
+;CHECK: ld1.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld1x3
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16*)
+
+
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x3(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v4i16_post_imm_ld1x3:
+;CHECK: ld1.4h { v0, v1, v2 }, [x0], #24
+ %ld1x3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 12
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld1x3
+}
+
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x3(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i16_post_reg_ld1x3:
+;CHECK: ld1.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld1x3
+}
+
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16*)
+
+
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x3(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v4i32_post_imm_ld1x3:
+;CHECK: ld1.4s { v0, v1, v2 }, [x0], #48
+ %ld1x3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 12
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld1x3
+}
+
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x3(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i32_post_reg_ld1x3:
+;CHECK: ld1.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld1x3
+}
+
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32*)
+
+
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x3(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v2i32_post_imm_ld1x3:
+;CHECK: ld1.2s { v0, v1, v2 }, [x0], #24
+ %ld1x3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 6
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld1x3
+}
+
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x3(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i32_post_reg_ld1x3:
+;CHECK: ld1.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld1x3
+}
+
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32*)
+
+
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x3(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v2i64_post_imm_ld1x3:
+;CHECK: ld1.2d { v0, v1, v2 }, [x0], #48
+ %ld1x3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 6
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld1x3
+}
+
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x3(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i64_post_reg_ld1x3:
+;CHECK: ld1.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld1x3
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64*)
+
+
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x3(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v1i64_post_imm_ld1x3:
+;CHECK: ld1.1d { v0, v1, v2 }, [x0], #24
+ %ld1x3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 3
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld1x3
+}
+
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x3(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1i64_post_reg_ld1x3:
+;CHECK: ld1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld1x3
+}
+
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64*)
+
+
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x3(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v4f32_post_imm_ld1x3:
+;CHECK: ld1.4s { v0, v1, v2 }, [x0], #48
+ %ld1x3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 12
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float> } %ld1x3
+}
+
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x3(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4f32_post_reg_ld1x3:
+;CHECK: ld1.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float> } %ld1x3
+}
+
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float*)
+
+
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x3(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v2f32_post_imm_ld1x3:
+;CHECK: ld1.2s { v0, v1, v2 }, [x0], #24
+ %ld1x3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 6
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float> } %ld1x3
+}
+
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x3(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f32_post_reg_ld1x3:
+;CHECK: ld1.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float> } %ld1x3
+}
+
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float*)
+
+
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x3(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v2f64_post_imm_ld1x3:
+;CHECK: ld1.2d { v0, v1, v2 }, [x0], #48
+ %ld1x3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 6
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double> } %ld1x3
+}
+
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x3(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f64_post_reg_ld1x3:
+;CHECK: ld1.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double> } %ld1x3
+}
+
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double*)
+
+
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x3(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v1f64_post_imm_ld1x3:
+;CHECK: ld1.1d { v0, v1, v2 }, [x0], #24
+ %ld1x3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 3
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double> } %ld1x3
+}
+
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x3(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1f64_post_reg_ld1x3:
+;CHECK: ld1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld1x3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double> } %ld1x3
+}
+
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double*)
+
+
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x4(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v16i8_post_imm_ld1x4:
+;CHECK: ld1.16b { v0, v1, v2, v3 }, [x0], #64
+ %ld1x4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 64
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld1x4
+}
+
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x4(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v16i8_post_reg_ld1x4:
+;CHECK: ld1.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld1x4
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8*)
+
+
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x4(i8* %A, i8** %ptr) {
+;CHECK-LABEL: test_v8i8_post_imm_ld1x4:
+;CHECK: ld1.8b { v0, v1, v2, v3 }, [x0], #32
+ %ld1x4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld1x4
+}
+
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x4(i8* %A, i8** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i8_post_reg_ld1x4:
+;CHECK: ld1.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld1x4
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8*)
+
+
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x4(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v8i16_post_imm_ld1x4:
+;CHECK: ld1.8h { v0, v1, v2, v3 }, [x0], #64
+ %ld1x4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 32
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld1x4
+}
+
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x4(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v8i16_post_reg_ld1x4:
+;CHECK: ld1.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld1x4
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16*)
+
+
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x4(i16* %A, i16** %ptr) {
+;CHECK-LABEL: test_v4i16_post_imm_ld1x4:
+;CHECK: ld1.4h { v0, v1, v2, v3 }, [x0], #32
+ %ld1x4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 16
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld1x4
+}
+
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x4(i16* %A, i16** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i16_post_reg_ld1x4:
+;CHECK: ld1.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld1x4
+}
+
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16*)
+
+
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x4(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v4i32_post_imm_ld1x4:
+;CHECK: ld1.4s { v0, v1, v2, v3 }, [x0], #64
+ %ld1x4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 16
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld1x4
+}
+
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x4(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4i32_post_reg_ld1x4:
+;CHECK: ld1.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld1x4
+}
+
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32*)
+
+
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x4(i32* %A, i32** %ptr) {
+;CHECK-LABEL: test_v2i32_post_imm_ld1x4:
+;CHECK: ld1.2s { v0, v1, v2, v3 }, [x0], #32
+ %ld1x4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 8
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld1x4
+}
+
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x4(i32* %A, i32** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i32_post_reg_ld1x4:
+;CHECK: ld1.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld1x4
+}
+
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32*)
+
+
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x4(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v2i64_post_imm_ld1x4:
+;CHECK: ld1.2d { v0, v1, v2, v3 }, [x0], #64
+ %ld1x4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 8
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld1x4
+}
+
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x4(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2i64_post_reg_ld1x4:
+;CHECK: ld1.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld1x4
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64*)
+
+
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x4(i64* %A, i64** %ptr) {
+;CHECK-LABEL: test_v1i64_post_imm_ld1x4:
+;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], #32
+ %ld1x4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 4
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld1x4
+}
+
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x4(i64* %A, i64** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1i64_post_reg_ld1x4:
+;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld1x4
+}
+
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64*)
+
+
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x4(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v4f32_post_imm_ld1x4:
+;CHECK: ld1.4s { v0, v1, v2, v3 }, [x0], #64
+ %ld1x4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 16
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld1x4
+}
+
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x4(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v4f32_post_reg_ld1x4:
+;CHECK: ld1.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld1x4
+}
+
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float*)
+
+
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x4(float* %A, float** %ptr) {
+;CHECK-LABEL: test_v2f32_post_imm_ld1x4:
+;CHECK: ld1.2s { v0, v1, v2, v3 }, [x0], #32
+ %ld1x4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 8
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld1x4
+}
+
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x4(float* %A, float** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f32_post_reg_ld1x4:
+;CHECK: ld1.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld1x4
+}
+
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float*)
+
+
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x4(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v2f64_post_imm_ld1x4:
+;CHECK: ld1.2d { v0, v1, v2, v3 }, [x0], #64
+ %ld1x4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 8
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld1x4
+}
+
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x4(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v2f64_post_reg_ld1x4:
+;CHECK: ld1.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld1x4
+}
+
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double*)
+
+
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x4(double* %A, double** %ptr) {
+;CHECK-LABEL: test_v1f64_post_imm_ld1x4:
+;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], #32
+ %ld1x4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 4
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld1x4
+}
+
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x4(double* %A, double** %ptr, i64 %inc) {
+;CHECK-LABEL: test_v1f64_post_reg_ld1x4:
+;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld1x4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld1x4
+}
+
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double*)
+
+
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2r(i8* %A, i8** %ptr) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_ld2r:
+;CHECK: ld2r.16b { v0, v1 }, [x0], #2
+ %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 2
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld2
+}
+
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_ld2r:
+;CHECK: ld2r.16b { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld2
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8*) nounwind readonly
+
+
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2r(i8* %A, i8** %ptr) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_ld2r:
+;CHECK: ld2r.8b { v0, v1 }, [x0], #2
+ %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 2
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8> } %ld2
+}
+
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_ld2r:
+;CHECK: ld2r.8b { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8> } %ld2
+}
+
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8*) nounwind readonly
+
+
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2r(i16* %A, i16** %ptr) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_ld2r:
+;CHECK: ld2r.8h { v0, v1 }, [x0], #4
+ %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 2
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16> } %ld2
+}
+
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_ld2r:
+;CHECK: ld2r.8h { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16> } %ld2
+}
+
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16*) nounwind readonly
+
+
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2r(i16* %A, i16** %ptr) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_ld2r:
+;CHECK: ld2r.4h { v0, v1 }, [x0], #4
+ %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 2
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16> } %ld2
+}
+
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_ld2r:
+;CHECK: ld2r.4h { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16> } %ld2
+}
+
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16*) nounwind readonly
+
+
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2r(i32* %A, i32** %ptr) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_ld2r:
+;CHECK: ld2r.4s { v0, v1 }, [x0], #8
+ %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 2
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32> } %ld2
+}
+
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_ld2r:
+;CHECK: ld2r.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32> } %ld2
+}
+
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32*) nounwind readonly
+
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2r(i32* %A, i32** %ptr) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_ld2r:
+;CHECK: ld2r.2s { v0, v1 }, [x0], #8
+ %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 2
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32> } %ld2
+}
+
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_ld2r:
+;CHECK: ld2r.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32> } %ld2
+}
+
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32*) nounwind readonly
+
+
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2r(i64* %A, i64** %ptr) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_ld2r:
+;CHECK: ld2r.2d { v0, v1 }, [x0], #16
+ %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 2
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64> } %ld2
+}
+
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_ld2r:
+;CHECK: ld2r.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64> } %ld2
+}
+
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64*) nounwind readonly
+
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2r(i64* %A, i64** %ptr) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_ld2r:
+;CHECK: ld2r.1d { v0, v1 }, [x0], #16
+ %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 2
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64> } %ld2
+}
+
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_ld2r:
+;CHECK: ld2r.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64> } %ld2
+}
+
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64*) nounwind readonly
+
+
+define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2r(float* %A, float** %ptr) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_ld2r:
+;CHECK: ld2r.4s { v0, v1 }, [x0], #8
+ %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 2
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float> } %ld2
+}
+
+define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2r(float* %A, float** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_ld2r:
+;CHECK: ld2r.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float> } %ld2
+}
+
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0f32(float*) nounwind readonly
+
+define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2r(float* %A, float** %ptr) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_ld2r:
+;CHECK: ld2r.2s { v0, v1 }, [x0], #8
+ %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 2
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float> } %ld2
+}
+
+define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2r(float* %A, float** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_ld2r:
+;CHECK: ld2r.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float> } %ld2
+}
+
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0f32(float*) nounwind readonly
+
+
+define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2r(double* %A, double** %ptr) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_ld2r:
+;CHECK: ld2r.2d { v0, v1 }, [x0], #16
+ %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 2
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double> } %ld2
+}
+
+define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2r(double* %A, double** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_ld2r:
+;CHECK: ld2r.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double> } %ld2
+}
+
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0f64(double*) nounwind readonly
+
+define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2r(double* %A, double** %ptr) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_ld2r:
+;CHECK: ld2r.1d { v0, v1 }, [x0], #16
+ %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 2
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double> } %ld2
+}
+
+define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2r(double* %A, double** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_ld2r:
+;CHECK: ld2r.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double> } %ld2
+}
+
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0f64(double*) nounwind readonly
+
+
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3r(i8* %A, i8** %ptr) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_ld3r:
+;CHECK: ld3r.16b { v0, v1, v2 }, [x0], #3
+ %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 3
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
+}
+
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_ld3r:
+;CHECK: ld3r.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8*) nounwind readonly
+
+
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3r(i8* %A, i8** %ptr) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_ld3r:
+;CHECK: ld3r.8b { v0, v1, v2 }, [x0], #3
+ %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 3
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
+}
+
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_ld3r:
+;CHECK: ld3r.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8*) nounwind readonly
+
+
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3r(i16* %A, i16** %ptr) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_ld3r:
+;CHECK: ld3r.8h { v0, v1, v2 }, [x0], #6
+ %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 3
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
+}
+
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_ld3r:
+;CHECK: ld3r.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16*) nounwind readonly
+
+
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3r(i16* %A, i16** %ptr) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_ld3r:
+;CHECK: ld3r.4h { v0, v1, v2 }, [x0], #6
+ %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 3
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
+}
+
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_ld3r:
+;CHECK: ld3r.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
+}
+
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16*) nounwind readonly
+
+
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3r(i32* %A, i32** %ptr) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_ld3r:
+;CHECK: ld3r.4s { v0, v1, v2 }, [x0], #12
+ %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 3
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
+}
+
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_ld3r:
+;CHECK: ld3r.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
+}
+
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32*) nounwind readonly
+
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3r(i32* %A, i32** %ptr) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_ld3r:
+;CHECK: ld3r.2s { v0, v1, v2 }, [x0], #12
+ %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 3
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
+}
+
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_ld3r:
+;CHECK: ld3r.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
+}
+
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32*) nounwind readonly
+
+
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3r(i64* %A, i64** %ptr) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_ld3r:
+;CHECK: ld3r.2d { v0, v1, v2 }, [x0], #24
+ %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 3
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
+}
+
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_ld3r:
+;CHECK: ld3r.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64*) nounwind readonly
+
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3r(i64* %A, i64** %ptr) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_ld3r:
+;CHECK: ld3r.1d { v0, v1, v2 }, [x0], #24
+ %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 3
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
+}
+
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_ld3r:
+;CHECK: ld3r.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
+}
+
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64*) nounwind readonly
+
+
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3r(float* %A, float** %ptr) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_ld3r:
+;CHECK: ld3r.4s { v0, v1, v2 }, [x0], #12
+ %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 3
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float> } %ld3
+}
+
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3r(float* %A, float** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_ld3r:
+;CHECK: ld3r.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float> } %ld3
+}
+
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0f32(float*) nounwind readonly
+
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3r(float* %A, float** %ptr) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_ld3r:
+;CHECK: ld3r.2s { v0, v1, v2 }, [x0], #12
+ %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 3
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float> } %ld3
+}
+
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3r(float* %A, float** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_ld3r:
+;CHECK: ld3r.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float> } %ld3
+}
+
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0f32(float*) nounwind readonly
+
+
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3r(double* %A, double** %ptr) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_ld3r:
+;CHECK: ld3r.2d { v0, v1, v2 }, [x0], #24
+ %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 3
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double> } %ld3
+}
+
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3r(double* %A, double** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_ld3r:
+;CHECK: ld3r.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double> } %ld3
+}
+
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0f64(double*) nounwind readonly
+
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3r(double* %A, double** %ptr) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_ld3r:
+;CHECK: ld3r.1d { v0, v1, v2 }, [x0], #24
+ %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 3
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double> } %ld3
+}
+
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3r(double* %A, double** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_ld3r:
+;CHECK: ld3r.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double> } %ld3
+}
+
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0f64(double*) nounwind readonly
+
+
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4r(i8* %A, i8** %ptr) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_ld4r:
+;CHECK: ld4r.16b { v0, v1, v2, v3 }, [x0], #4
+ %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 4
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
+}
+
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_ld4r:
+;CHECK: ld4r.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8*) nounwind readonly
+
+
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4r(i8* %A, i8** %ptr) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_ld4r:
+;CHECK: ld4r.8b { v0, v1, v2, v3 }, [x0], #4
+ %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 4
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
+}
+
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_ld4r:
+;CHECK: ld4r.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8*) nounwind readonly
+
+
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4r(i16* %A, i16** %ptr) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_ld4r:
+;CHECK: ld4r.8h { v0, v1, v2, v3 }, [x0], #8
+ %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 4
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
+}
+
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_ld4r:
+;CHECK: ld4r.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16*) nounwind readonly
+
+
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4r(i16* %A, i16** %ptr) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_ld4r:
+;CHECK: ld4r.4h { v0, v1, v2, v3 }, [x0], #8
+ %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i32 4
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
+}
+
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_ld4r:
+;CHECK: ld4r.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
+}
+
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16*) nounwind readonly
+
+
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4r(i32* %A, i32** %ptr) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_ld4r:
+;CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0], #16
+ %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
+}
+
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_ld4r:
+;CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
+}
+
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32*) nounwind readonly
+
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4r(i32* %A, i32** %ptr) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_ld4r:
+;CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0], #16
+ %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
+}
+
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_ld4r:
+;CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
+}
+
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32*) nounwind readonly
+
+
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4r(i64* %A, i64** %ptr) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_ld4r:
+;CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 4
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
+}
+
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_ld4r:
+;CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64*) nounwind readonly
+
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4r(i64* %A, i64** %ptr) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_ld4r:
+;CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i32 4
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
+}
+
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_ld4r:
+;CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
+}
+
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64*) nounwind readonly
+
+
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4r(float* %A, float** %ptr) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_ld4r:
+;CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0], #16
+ %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
+}
+
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4r(float* %A, float** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_ld4r:
+;CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
+}
+
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0f32(float*) nounwind readonly
+
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4r(float* %A, float** %ptr) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_ld4r:
+;CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0], #16
+ %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
+}
+
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4r(float* %A, float** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_ld4r:
+;CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0f32(float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
+}
+
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0f32(float*) nounwind readonly
+
+
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4r(double* %A, double** %ptr) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_ld4r:
+;CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 4
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
+}
+
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4r(double* %A, double** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_ld4r:
+;CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
+}
+
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0f64(double*) nounwind readonly
+
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4r(double* %A, double** %ptr) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_ld4r:
+;CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0], #32
+ %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i32 4
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
+}
+
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4r(double* %A, double** %ptr, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_ld4r:
+;CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0f64(double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
+}
+
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0f64(double*) nounwind readonly
+
+
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_ld2lane:
+;CHECK: ld2.b { v0, v1 }[0], [x0], #2
+ %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 2
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld2
+}
+
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2lane(i8* %A, i8** %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_ld2lane:
+;CHECK: ld2.b { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld2
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+
+
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_ld2lane:
+;CHECK: ld2.b { v0, v1 }[0], [x0], #2
+ %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 2
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8> } %ld2
+}
+
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2lane(i8* %A, i8** %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_ld2lane:
+;CHECK: ld2.b { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8> } %ld2
+}
+
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0i8(<8 x i8>, <8 x i8>, i64, i8*) nounwind readonly
+
+
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_ld2lane:
+;CHECK: ld2.h { v0, v1 }[0], [x0], #4
+ %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 2
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16> } %ld2
+}
+
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2lane(i16* %A, i16** %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_ld2lane:
+;CHECK: ld2.h { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16> } %ld2
+}
+
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+
+
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_ld2lane:
+;CHECK: ld2.h { v0, v1 }[0], [x0], #4
+ %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 2
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16> } %ld2
+}
+
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2lane(i16* %A, i16** %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_ld2lane:
+;CHECK: ld2.h { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16> } %ld2
+}
+
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i16(<4 x i16>, <4 x i16>, i64, i16*) nounwind readonly
+
+
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_ld2lane:
+;CHECK: ld2.s { v0, v1 }[0], [x0], #8
+ %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 2
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32> } %ld2
+}
+
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2lane(i32* %A, i32** %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_ld2lane:
+;CHECK: ld2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32> } %ld2
+}
+
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+
+
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_ld2lane:
+;CHECK: ld2.s { v0, v1 }[0], [x0], #8
+ %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 2
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32> } %ld2
+}
+
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2lane(i32* %A, i32** %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_ld2lane:
+;CHECK: ld2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32> } %ld2
+}
+
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0i32(<2 x i32>, <2 x i32>, i64, i32*) nounwind readonly
+
+
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_ld2lane:
+;CHECK: ld2.d { v0, v1 }[0], [x0], #16
+ %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i32 2
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64> } %ld2
+}
+
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2lane(i64* %A, i64** %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_ld2lane:
+;CHECK: ld2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64> } %ld2
+}
+
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+
+
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_ld2lane:
+;CHECK: ld2.d { v0, v1 }[0], [x0], #16
+ %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i32 2
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64> } %ld2
+}
+
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2lane(i64* %A, i64** %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_ld2lane:
+;CHECK: ld2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64> } %ld2
+}
+
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i64(<1 x i64>, <1 x i64>, i64, i64*) nounwind readonly
+
+
+define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_ld2lane:
+;CHECK: ld2.s { v0, v1 }[0], [x0], #8
+ %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 2
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float> } %ld2
+}
+
+define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2lane(float* %A, float** %ptr, i64 %inc, <4 x float> %B, <4 x float> %C) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_ld2lane:
+;CHECK: ld2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float> } %ld2
+}
+
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0f32(<4 x float>, <4 x float>, i64, float*) nounwind readonly
+
+
+define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_ld2lane:
+;CHECK: ld2.s { v0, v1 }[0], [x0], #8
+ %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 2
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float> } %ld2
+}
+
+define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2lane(float* %A, float** %ptr, i64 %inc, <2 x float> %B, <2 x float> %C) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_ld2lane:
+;CHECK: ld2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float> } %ld2
+}
+
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float>, <2 x float>, i64, float*) nounwind readonly
+
+
+define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_ld2lane:
+;CHECK: ld2.d { v0, v1 }[0], [x0], #16
+ %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i32 2
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double> } %ld2
+}
+
+define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2lane(double* %A, double** %ptr, i64 %inc, <2 x double> %B, <2 x double> %C) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_ld2lane:
+;CHECK: ld2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double> } %ld2
+}
+
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0f64(<2 x double>, <2 x double>, i64, double*) nounwind readonly
+
+
+define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_ld2lane:
+;CHECK: ld2.d { v0, v1 }[0], [x0], #16
+ %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i32 2
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double> } %ld2
+}
+
+define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2lane(double* %A, double** %ptr, i64 %inc, <1 x double> %B, <1 x double> %C) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_ld2lane:
+;CHECK: ld2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
+ %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double> } %ld2
+}
+
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0f64(<1 x double>, <1 x double>, i64, double*) nounwind readonly
+
+
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_ld3lane:
+;CHECK: ld3.b { v0, v1, v2 }[0], [x0], #3
+ %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 3
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
+}
+
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3lane(i8* %A, i8** %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_ld3lane:
+;CHECK: ld3.b { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+
+
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_ld3lane:
+;CHECK: ld3.b { v0, v1, v2 }[0], [x0], #3
+ %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 3
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
+}
+
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3lane(i8* %A, i8** %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_ld3lane:
+;CHECK: ld3.b { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i64, i8*) nounwind readonly
+
+
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_ld3lane:
+;CHECK: ld3.h { v0, v1, v2 }[0], [x0], #6
+ %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 3
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
+}
+
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3lane(i16* %A, i16** %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_ld3lane:
+;CHECK: ld3.h { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+
+
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_ld3lane:
+;CHECK: ld3.h { v0, v1, v2 }[0], [x0], #6
+ %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 3
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
+}
+
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3lane(i16* %A, i16** %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_ld3lane:
+;CHECK: ld3.h { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
+}
+
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i64, i16*) nounwind readonly
+
+
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_ld3lane:
+;CHECK: ld3.s { v0, v1, v2 }[0], [x0], #12
+ %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 3
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
+}
+
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3lane(i32* %A, i32** %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_ld3lane:
+;CHECK: ld3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
+}
+
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+
+
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_ld3lane:
+;CHECK: ld3.s { v0, v1, v2 }[0], [x0], #12
+ %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 3
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
+}
+
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3lane(i32* %A, i32** %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_ld3lane:
+;CHECK: ld3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
+}
+
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i64, i32*) nounwind readonly
+
+
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_ld3lane:
+;CHECK: ld3.d { v0, v1, v2 }[0], [x0], #24
+ %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i32 3
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
+}
+
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3lane(i64* %A, i64** %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_ld3lane:
+;CHECK: ld3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+
+
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_ld3lane:
+;CHECK: ld3.d { v0, v1, v2 }[0], [x0], #24
+ %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i32 3
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
+}
+
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3lane(i64* %A, i64** %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_ld3lane:
+;CHECK: ld3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
+}
+
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64, i64*) nounwind readonly
+
+
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_ld3lane:
+;CHECK: ld3.s { v0, v1, v2 }[0], [x0], #12
+ %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 3
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float> } %ld3
+}
+
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3lane(float* %A, float** %ptr, i64 %inc, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_ld3lane:
+;CHECK: ld3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float> } %ld3
+}
+
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, i64, float*) nounwind readonly
+
+
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_ld3lane:
+;CHECK: ld3.s { v0, v1, v2 }[0], [x0], #12
+ %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 3
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float> } %ld3
+}
+
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3lane(float* %A, float** %ptr, i64 %inc, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_ld3lane:
+;CHECK: ld3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float> } %ld3
+}
+
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, i64, float*) nounwind readonly
+
+
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_ld3lane:
+;CHECK: ld3.d { v0, v1, v2 }[0], [x0], #24
+ %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i32 3
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double> } %ld3
+}
+
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3lane(double* %A, double** %ptr, i64 %inc, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_ld3lane:
+;CHECK: ld3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double> } %ld3
+}
+
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, i64, double*) nounwind readonly
+
+
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_ld3lane:
+;CHECK: ld3.d { v0, v1, v2 }[0], [x0], #24
+ %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i32 3
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double> } %ld3
+}
+
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3lane(double* %A, double** %ptr, i64 %inc, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_ld3lane:
+;CHECK: ld3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double> } %ld3
+}
+
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, i64, double*) nounwind readonly
+
+
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_ld4lane:
+;CHECK: ld4.b { v0, v1, v2, v3 }[0], [x0], #4
+ %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 4
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
+}
+
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4lane(i8* %A, i8** %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_ld4lane:
+;CHECK: ld4.b { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+
+
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_ld4lane:
+;CHECK: ld4.b { v0, v1, v2, v3 }[0], [x0], #4
+ %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 4
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
+}
+
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4lane(i8* %A, i8** %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_ld4lane:
+;CHECK: ld4.b { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ store i8* %tmp, i8** %ptr
+ ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i64, i8*) nounwind readonly
+
+
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_ld4lane:
+;CHECK: ld4.h { v0, v1, v2, v3 }[0], [x0], #8
+ %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 4
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
+}
+
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4lane(i16* %A, i16** %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_ld4lane:
+;CHECK: ld4.h { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
+}
+
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+
+
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_ld4lane:
+;CHECK: ld4.h { v0, v1, v2, v3 }[0], [x0], #8
+ %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 4
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
+}
+
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4lane(i16* %A, i16** %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_ld4lane:
+;CHECK: ld4.h { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ store i16* %tmp, i16** %ptr
+ ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
+}
+
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i64, i16*) nounwind readonly
+
+
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_ld4lane:
+;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], #16
+ %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
+}
+
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4lane(i32* %A, i32** %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_ld4lane:
+;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
+}
+
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+
+
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_ld4lane:
+;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], #16
+ %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
+}
+
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4lane(i32* %A, i32** %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_ld4lane:
+;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ store i32* %tmp, i32** %ptr
+ ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
+}
+
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i64, i32*) nounwind readonly
+
+
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_ld4lane:
+;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], #32
+ %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i32 4
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
+}
+
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4lane(i64* %A, i64** %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_ld4lane:
+;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+
+
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_ld4lane:
+;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], #32
+ %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i32 4
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
+}
+
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4lane(i64* %A, i64** %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_ld4lane:
+;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ store i64* %tmp, i64** %ptr
+ ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
+}
+
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, i64*) nounwind readonly
+
+
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_ld4lane:
+;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], #16
+ %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
+}
+
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4lane(float* %A, float** %ptr, i64 %inc, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_ld4lane:
+;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
+}
+
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, float*) nounwind readonly
+
+
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_ld4lane:
+;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], #16
+ %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
+}
+
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4lane(float* %A, float** %ptr, i64 %inc, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_ld4lane:
+;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ store float* %tmp, float** %ptr
+ ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
+}
+
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, i64, float*) nounwind readonly
+
+
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_ld4lane:
+;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], #32
+ %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i32 4
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
+}
+
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4lane(double* %A, double** %ptr, i64 %inc, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_ld4lane:
+;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
+}
+
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, <2 x double>, i64, double*) nounwind readonly
+
+
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_ld4lane:
+;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], #32
+ %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i32 4
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
+}
+
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4lane(double* %A, double** %ptr, i64 %inc, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_ld4lane:
+;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ store double* %tmp, double** %ptr
+ ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
+}
+
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, i64, double*) nounwind readonly
+
+
+define i8* @test_v16i8_post_imm_st2(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st2:
+;CHECK: st2.16b { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st2(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st2:
+;CHECK: st2.16b { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8>, <16 x i8>, i8*)
+
+
+define i8* @test_v8i8_post_imm_st2(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st2:
+;CHECK: st2.8b { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
+ %tmp = getelementptr i8* %A, i32 16
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st2(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st2:
+;CHECK: st2.8b { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
+
+
+define i16* @test_v8i16_post_imm_st2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st2:
+;CHECK: st2.8h { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
+ %tmp = getelementptr i16* %A, i32 16
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st2:
+;CHECK: st2.8h { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16>, <8 x i16>, i16*)
+
+
+define i16* @test_v4i16_post_imm_st2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st2:
+;CHECK: st2.4h { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
+ %tmp = getelementptr i16* %A, i32 8
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st2:
+;CHECK: st2.4h { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16>, <4 x i16>, i16*)
+
+
+define i32* @test_v4i32_post_imm_st2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st2:
+;CHECK: st2.4s { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
+ %tmp = getelementptr i32* %A, i32 8
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st2:
+;CHECK: st2.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*)
+
+
+define i32* @test_v2i32_post_imm_st2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st2:
+;CHECK: st2.2s { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st2:
+;CHECK: st2.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32>, <2 x i32>, i32*)
+
+
+define i64* @test_v2i64_post_imm_st2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st2:
+;CHECK: st2.2d { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
+ %tmp = getelementptr i64* %A, i64 4
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st2:
+;CHECK: st2.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*)
+
+
+define i64* @test_v1i64_post_imm_st2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st2:
+;CHECK: st1.1d { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
+ %tmp = getelementptr i64* %A, i64 2
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st2:
+;CHECK: st1.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64>, <1 x i64>, i64*)
+
+
+define float* @test_v4f32_post_imm_st2(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st2:
+;CHECK: st2.4s { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
+ %tmp = getelementptr float* %A, i32 8
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st2(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st2:
+;CHECK: st2.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
+
+
+define float* @test_v2f32_post_imm_st2(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st2:
+;CHECK: st2.2s { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st2(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st2:
+;CHECK: st2.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v2f32.p0f32(<2 x float>, <2 x float>, float*)
+
+
+define double* @test_v2f64_post_imm_st2(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st2:
+;CHECK: st2.2d { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
+ %tmp = getelementptr double* %A, i64 4
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st2(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st2:
+;CHECK: st2.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v2f64.p0f64(<2 x double>, <2 x double>, double*)
+
+
+define double* @test_v1f64_post_imm_st2(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st2:
+;CHECK: st1.1d { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
+ %tmp = getelementptr double* %A, i64 2
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st2(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st2:
+;CHECK: st1.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2.v1f64.p0f64(<1 x double>, <1 x double>, double*)
+
+
+define i8* @test_v16i8_post_imm_st3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st3:
+;CHECK: st3.16b { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
+ %tmp = getelementptr i8* %A, i32 48
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st3:
+;CHECK: st3.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*)
+
+
+define i8* @test_v8i8_post_imm_st3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st3:
+;CHECK: st3.8b { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
+ %tmp = getelementptr i8* %A, i32 24
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st3:
+;CHECK: st3.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*)
+
+
+define i16* @test_v8i16_post_imm_st3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st3:
+;CHECK: st3.8h { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
+ %tmp = getelementptr i16* %A, i32 24
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st3:
+;CHECK: st3.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i16*)
+
+
+define i16* @test_v4i16_post_imm_st3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st3:
+;CHECK: st3.4h { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
+ %tmp = getelementptr i16* %A, i32 12
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st3:
+;CHECK: st3.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i16*)
+
+
+define i32* @test_v4i32_post_imm_st3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st3:
+;CHECK: st3.4s { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
+ %tmp = getelementptr i32* %A, i32 12
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st3:
+;CHECK: st3.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*)
+
+
+define i32* @test_v2i32_post_imm_st3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st3:
+;CHECK: st3.2s { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
+ %tmp = getelementptr i32* %A, i32 6
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st3:
+;CHECK: st3.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i32*)
+
+
+define i64* @test_v2i64_post_imm_st3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st3:
+;CHECK: st3.2d { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
+ %tmp = getelementptr i64* %A, i64 6
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st3:
+;CHECK: st3.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64*)
+
+
+define i64* @test_v1i64_post_imm_st3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st3:
+;CHECK: st1.1d { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
+ %tmp = getelementptr i64* %A, i64 3
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st3:
+;CHECK: st1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64*)
+
+
+define float* @test_v4f32_post_imm_st3(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st3:
+;CHECK: st3.4s { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
+ %tmp = getelementptr float* %A, i32 12
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st3(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st3:
+;CHECK: st3.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
+
+
+define float* @test_v2f32_post_imm_st3(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st3:
+;CHECK: st3.2s { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
+ %tmp = getelementptr float* %A, i32 6
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st3(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st3:
+;CHECK: st3.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, float*)
+
+
+define double* @test_v2f64_post_imm_st3(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st3:
+;CHECK: st3.2d { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
+ %tmp = getelementptr double* %A, i64 6
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st3(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st3:
+;CHECK: st3.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, double*)
+
+
+define double* @test_v1f64_post_imm_st3(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st3:
+;CHECK: st1.1d { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
+ %tmp = getelementptr double* %A, i64 3
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st3(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st3:
+;CHECK: st1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, double*)
+
+
+define i8* @test_v16i8_post_imm_st4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st4:
+;CHECK: st4.16b { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
+ %tmp = getelementptr i8* %A, i32 64
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st4:
+;CHECK: st4.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*)
+
+
+define i8* @test_v8i8_post_imm_st4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st4:
+;CHECK: st4.8b { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st4:
+;CHECK: st4.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i8*)
+
+
+define i16* @test_v8i16_post_imm_st4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st4:
+;CHECK: st4.8h { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
+ %tmp = getelementptr i16* %A, i32 32
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st4:
+;CHECK: st4.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i16*)
+
+
+define i16* @test_v4i16_post_imm_st4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st4:
+;CHECK: st4.4h { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
+ %tmp = getelementptr i16* %A, i32 16
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st4:
+;CHECK: st4.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>,<4 x i16>, i16*)
+
+
+define i32* @test_v4i32_post_imm_st4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st4:
+;CHECK: st4.4s { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
+ %tmp = getelementptr i32* %A, i32 16
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st4:
+;CHECK: st4.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>,<4 x i32>, i32*)
+
+
+define i32* @test_v2i32_post_imm_st4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st4:
+;CHECK: st4.2s { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
+ %tmp = getelementptr i32* %A, i32 8
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st4:
+;CHECK: st4.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32*)
+
+
+define i64* @test_v2i64_post_imm_st4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st4:
+;CHECK: st4.2d { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
+ %tmp = getelementptr i64* %A, i64 8
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st4:
+;CHECK: st4.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>,<2 x i64>, i64*)
+
+
+define i64* @test_v1i64_post_imm_st4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st4:
+;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
+ %tmp = getelementptr i64* %A, i64 4
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st4:
+;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>,<1 x i64>, i64*)
+
+
+define float* @test_v4f32_post_imm_st4(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st4:
+;CHECK: st4.4s { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
+ %tmp = getelementptr float* %A, i32 16
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st4(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st4:
+;CHECK: st4.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+
+
+define float* @test_v2f32_post_imm_st4(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st4:
+;CHECK: st4.2s { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
+ %tmp = getelementptr float* %A, i32 8
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st4(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st4:
+;CHECK: st4.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, float*)
+
+
+define double* @test_v2f64_post_imm_st4(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st4:
+;CHECK: st4.2d { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
+ %tmp = getelementptr double* %A, i64 8
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st4(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st4:
+;CHECK: st4.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>,<2 x double>, double*)
+
+
+define double* @test_v1f64_post_imm_st4(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st4:
+;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
+ %tmp = getelementptr double* %A, i64 4
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st4(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st4:
+;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, double*)
+
+
+define i8* @test_v16i8_post_imm_st1x2(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st1x2:
+;CHECK: st1.16b { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st1x2(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st1x2:
+;CHECK: st1.16b { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8>, <16 x i8>, i8*)
+
+
+define i8* @test_v8i8_post_imm_st1x2(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st1x2:
+;CHECK: st1.8b { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
+ %tmp = getelementptr i8* %A, i32 16
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st1x2(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st1x2:
+;CHECK: st1.8b { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
+
+
+define i16* @test_v8i16_post_imm_st1x2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st1x2:
+;CHECK: st1.8h { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
+ %tmp = getelementptr i16* %A, i32 16
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st1x2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st1x2:
+;CHECK: st1.8h { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16>, <8 x i16>, i16*)
+
+
+define i16* @test_v4i16_post_imm_st1x2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st1x2:
+;CHECK: st1.4h { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
+ %tmp = getelementptr i16* %A, i32 8
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st1x2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st1x2:
+;CHECK: st1.4h { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16>, <4 x i16>, i16*)
+
+
+define i32* @test_v4i32_post_imm_st1x2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st1x2:
+;CHECK: st1.4s { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
+ %tmp = getelementptr i32* %A, i32 8
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st1x2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st1x2:
+;CHECK: st1.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*)
+
+
+define i32* @test_v2i32_post_imm_st1x2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st1x2:
+;CHECK: st1.2s { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st1x2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st1x2:
+;CHECK: st1.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32>, <2 x i32>, i32*)
+
+
+define i64* @test_v2i64_post_imm_st1x2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st1x2:
+;CHECK: st1.2d { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
+ %tmp = getelementptr i64* %A, i64 4
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st1x2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st1x2:
+;CHECK: st1.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*)
+
+
+define i64* @test_v1i64_post_imm_st1x2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st1x2:
+;CHECK: st1.1d { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
+ %tmp = getelementptr i64* %A, i64 2
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st1x2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st1x2:
+;CHECK: st1.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64>, <1 x i64>, i64*)
+
+
+define float* @test_v4f32_post_imm_st1x2(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st1x2:
+;CHECK: st1.4s { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
+ %tmp = getelementptr float* %A, i32 8
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st1x2(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st1x2:
+;CHECK: st1.4s { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
+
+
+define float* @test_v2f32_post_imm_st1x2(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st1x2:
+;CHECK: st1.2s { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st1x2(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st1x2:
+;CHECK: st1.2s { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float>, <2 x float>, float*)
+
+
+define double* @test_v2f64_post_imm_st1x2(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st1x2:
+;CHECK: st1.2d { v0, v1 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
+ %tmp = getelementptr double* %A, i64 4
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st1x2(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st1x2:
+;CHECK: st1.2d { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double>, <2 x double>, double*)
+
+
+define double* @test_v1f64_post_imm_st1x2(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st1x2:
+;CHECK: st1.1d { v0, v1 }, [x0], #16
+ call void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
+ %tmp = getelementptr double* %A, i64 2
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st1x2(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st1x2:
+;CHECK: st1.1d { v0, v1 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double>, <1 x double>, double*)
+
+
+define i8* @test_v16i8_post_imm_st1x3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st1x3:
+;CHECK: st1.16b { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
+ %tmp = getelementptr i8* %A, i32 48
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st1x3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st1x3:
+;CHECK: st1.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*)
+
+
+define i8* @test_v8i8_post_imm_st1x3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st1x3:
+;CHECK: st1.8b { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
+ %tmp = getelementptr i8* %A, i32 24
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st1x3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st1x3:
+;CHECK: st1.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*)
+
+
+define i16* @test_v8i16_post_imm_st1x3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st1x3:
+;CHECK: st1.8h { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
+ %tmp = getelementptr i16* %A, i32 24
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st1x3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st1x3:
+;CHECK: st1.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i16*)
+
+
+define i16* @test_v4i16_post_imm_st1x3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st1x3:
+;CHECK: st1.4h { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
+ %tmp = getelementptr i16* %A, i32 12
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st1x3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st1x3:
+;CHECK: st1.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i16*)
+
+
+define i32* @test_v4i32_post_imm_st1x3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st1x3:
+;CHECK: st1.4s { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
+ %tmp = getelementptr i32* %A, i32 12
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st1x3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st1x3:
+;CHECK: st1.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*)
+
+
+define i32* @test_v2i32_post_imm_st1x3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st1x3:
+;CHECK: st1.2s { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
+ %tmp = getelementptr i32* %A, i32 6
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st1x3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st1x3:
+;CHECK: st1.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i32*)
+
+
+define i64* @test_v2i64_post_imm_st1x3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st1x3:
+;CHECK: st1.2d { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
+ %tmp = getelementptr i64* %A, i64 6
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st1x3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st1x3:
+;CHECK: st1.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64*)
+
+
+define i64* @test_v1i64_post_imm_st1x3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st1x3:
+;CHECK: st1.1d { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
+ %tmp = getelementptr i64* %A, i64 3
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st1x3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st1x3:
+;CHECK: st1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64*)
+
+
+define float* @test_v4f32_post_imm_st1x3(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st1x3:
+;CHECK: st1.4s { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
+ %tmp = getelementptr float* %A, i32 12
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st1x3(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st1x3:
+;CHECK: st1.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
+
+
+define float* @test_v2f32_post_imm_st1x3(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st1x3:
+;CHECK: st1.2s { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
+ %tmp = getelementptr float* %A, i32 6
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st1x3(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st1x3:
+;CHECK: st1.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, float*)
+
+
+define double* @test_v2f64_post_imm_st1x3(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st1x3:
+;CHECK: st1.2d { v0, v1, v2 }, [x0], #48
+ call void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
+ %tmp = getelementptr double* %A, i64 6
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st1x3(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st1x3:
+;CHECK: st1.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, double*)
+
+
+define double* @test_v1f64_post_imm_st1x3(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st1x3:
+;CHECK: st1.1d { v0, v1, v2 }, [x0], #24
+ call void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
+ %tmp = getelementptr double* %A, i64 3
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st1x3(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st1x3:
+;CHECK: st1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, double*)
+
+
+define i8* @test_v16i8_post_imm_st1x4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st1x4:
+;CHECK: st1.16b { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
+ %tmp = getelementptr i8* %A, i32 64
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st1x4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st1x4:
+;CHECK: st1.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*)
+
+
+define i8* @test_v8i8_post_imm_st1x4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st1x4:
+;CHECK: st1.8b { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st1x4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st1x4:
+;CHECK: st1.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i8*)
+
+
+define i16* @test_v8i16_post_imm_st1x4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st1x4:
+;CHECK: st1.8h { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
+ %tmp = getelementptr i16* %A, i32 32
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st1x4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st1x4:
+;CHECK: st1.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i16*)
+
+
+define i16* @test_v4i16_post_imm_st1x4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st1x4:
+;CHECK: st1.4h { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
+ %tmp = getelementptr i16* %A, i32 16
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st1x4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st1x4:
+;CHECK: st1.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>,<4 x i16>, i16*)
+
+
+define i32* @test_v4i32_post_imm_st1x4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st1x4:
+;CHECK: st1.4s { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
+ %tmp = getelementptr i32* %A, i32 16
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st1x4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st1x4:
+;CHECK: st1.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>,<4 x i32>, i32*)
+
+
+define i32* @test_v2i32_post_imm_st1x4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st1x4:
+;CHECK: st1.2s { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
+ %tmp = getelementptr i32* %A, i32 8
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st1x4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st1x4:
+;CHECK: st1.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32*)
+
+
+define i64* @test_v2i64_post_imm_st1x4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st1x4:
+;CHECK: st1.2d { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
+ %tmp = getelementptr i64* %A, i64 8
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st1x4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st1x4:
+;CHECK: st1.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>,<2 x i64>, i64*)
+
+
+define i64* @test_v1i64_post_imm_st1x4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st1x4:
+;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
+ %tmp = getelementptr i64* %A, i64 4
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st1x4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st1x4:
+;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>,<1 x i64>, i64*)
+
+
+define float* @test_v4f32_post_imm_st1x4(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st1x4:
+;CHECK: st1.4s { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
+ %tmp = getelementptr float* %A, i32 16
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st1x4(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st1x4:
+;CHECK: st1.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+
+
+define float* @test_v2f32_post_imm_st1x4(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st1x4:
+;CHECK: st1.2s { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
+ %tmp = getelementptr float* %A, i32 8
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st1x4(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st1x4:
+;CHECK: st1.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, float*)
+
+
+define double* @test_v2f64_post_imm_st1x4(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st1x4:
+;CHECK: st1.2d { v0, v1, v2, v3 }, [x0], #64
+ call void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
+ %tmp = getelementptr double* %A, i64 8
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st1x4(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st1x4:
+;CHECK: st1.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>,<2 x double>, double*)
+
+
+define double* @test_v1f64_post_imm_st1x4(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st1x4:
+;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], #32
+ call void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
+ %tmp = getelementptr double* %A, i64 4
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st1x4(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st1x4:
+;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, double*)
+
+
+define i8* @test_v16i8_post_imm_st2lanelane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) {
+ call void @llvm.aarch64.neon.st2lanelane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i64 1, i8* %A)
+ %tmp = getelementptr i8* %A, i32 2
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st2lanelane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) {
+ call void @llvm.aarch64.neon.st2lanelane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i64 1, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lanelane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i64, i8*) nounwind readnone
+
+
+define i8* @test_v16i8_post_imm_st2lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st2lane:
+;CHECK: st2.b { v0, v1 }[0], [x0], #2
+ call void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 2
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st2lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st2lane:
+;CHECK: st2.b { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*)
+
+
+define i8* @test_v8i8_post_imm_st2lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st2lane:
+;CHECK: st2.b { v0, v1 }[0], [x0], #2
+ call void @llvm.aarch64.neon.st2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 2
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st2lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st2lane:
+;CHECK: st2.b { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v8i8.p0i8(<8 x i8>, <8 x i8>, i64, i8*)
+
+
+define i16* @test_v8i16_post_imm_st2lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st2lane:
+;CHECK: st2.h { v0, v1 }[0], [x0], #4
+ call void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 2
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st2lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st2lane:
+;CHECK: st2.h { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*)
+
+
+define i16* @test_v4i16_post_imm_st2lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st2lane:
+;CHECK: st2.h { v0, v1 }[0], [x0], #4
+ call void @llvm.aarch64.neon.st2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 2
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st2lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st2lane:
+;CHECK: st2.h { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v4i16.p0i16(<4 x i16>, <4 x i16>, i64, i16*)
+
+
+define i32* @test_v4i32_post_imm_st2lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st2lane:
+;CHECK: st2.s { v0, v1 }[0], [x0], #8
+ call void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 2
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st2lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st2lane:
+;CHECK: st2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*)
+
+
+define i32* @test_v2i32_post_imm_st2lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st2lane:
+;CHECK: st2.s { v0, v1 }[0], [x0], #8
+ call void @llvm.aarch64.neon.st2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 2
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st2lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st2lane:
+;CHECK: st2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v2i32.p0i32(<2 x i32>, <2 x i32>, i64, i32*)
+
+
+define i64* @test_v2i64_post_imm_st2lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st2lane:
+;CHECK: st2.d { v0, v1 }[0], [x0], #16
+ call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 2
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st2lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st2lane:
+;CHECK: st2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*)
+
+
+define i64* @test_v1i64_post_imm_st2lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st2lane:
+;CHECK: st2.d { v0, v1 }[0], [x0], #16
+ call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 2
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st2lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st2lane:
+;CHECK: st2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64>, <1 x i64>, i64, i64*)
+
+
+define float* @test_v4f32_post_imm_st2lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st2lane:
+;CHECK: st2.s { v0, v1 }[0], [x0], #8
+ call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 2
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st2lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st2lane:
+;CHECK: st2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float>, <4 x float>, i64, float*)
+
+
+define float* @test_v2f32_post_imm_st2lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st2lane:
+;CHECK: st2.s { v0, v1 }[0], [x0], #8
+ call void @llvm.aarch64.neon.st2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 2
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st2lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st2lane:
+;CHECK: st2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v2f32.p0f32(<2 x float>, <2 x float>, i64, float*)
+
+
+define double* @test_v2f64_post_imm_st2lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st2lane:
+;CHECK: st2.d { v0, v1 }[0], [x0], #16
+ call void @llvm.aarch64.neon.st2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 2
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st2lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st2lane:
+;CHECK: st2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v2f64.p0f64(<2 x double>, <2 x double>, i64, double*)
+
+
+define double* @test_v1f64_post_imm_st2lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st2lane:
+;CHECK: st2.d { v0, v1 }[0], [x0], #16
+ call void @llvm.aarch64.neon.st2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 2
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st2lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st2lane:
+;CHECK: st2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st2lane.v1f64.p0f64(<1 x double>, <1 x double>, i64, double*)
+
+
+define i8* @test_v16i8_post_imm_st3lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st3lane:
+;CHECK: st3.b { v0, v1, v2 }[0], [x0], #3
+ call void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 3
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st3lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st3lane:
+;CHECK: st3.b { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*)
+
+
+define i8* @test_v8i8_post_imm_st3lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st3lane:
+;CHECK: st3.b { v0, v1, v2 }[0], [x0], #3
+ call void @llvm.aarch64.neon.st3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 3
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st3lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st3lane:
+;CHECK: st3.b { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i64, i8*)
+
+
+define i16* @test_v8i16_post_imm_st3lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st3lane:
+;CHECK: st3.h { v0, v1, v2 }[0], [x0], #6
+ call void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 3
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st3lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st3lane:
+;CHECK: st3.h { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*)
+
+
+define i16* @test_v4i16_post_imm_st3lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st3lane:
+;CHECK: st3.h { v0, v1, v2 }[0], [x0], #6
+ call void @llvm.aarch64.neon.st3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 3
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st3lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st3lane:
+;CHECK: st3.h { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i64, i16*)
+
+
+define i32* @test_v4i32_post_imm_st3lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st3lane:
+;CHECK: st3.s { v0, v1, v2 }[0], [x0], #12
+ call void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 3
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st3lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st3lane:
+;CHECK: st3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*)
+
+
+define i32* @test_v2i32_post_imm_st3lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st3lane:
+;CHECK: st3.s { v0, v1, v2 }[0], [x0], #12
+ call void @llvm.aarch64.neon.st3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 3
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st3lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st3lane:
+;CHECK: st3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i64, i32*)
+
+
+define i64* @test_v2i64_post_imm_st3lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st3lane:
+;CHECK: st3.d { v0, v1, v2 }[0], [x0], #24
+ call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 3
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st3lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st3lane:
+;CHECK: st3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*)
+
+
+define i64* @test_v1i64_post_imm_st3lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st3lane:
+;CHECK: st3.d { v0, v1, v2 }[0], [x0], #24
+ call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 3
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st3lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st3lane:
+;CHECK: st3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
+
+
+define float* @test_v4f32_post_imm_st3lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st3lane:
+;CHECK: st3.s { v0, v1, v2 }[0], [x0], #12
+ call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 3
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st3lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st3lane:
+;CHECK: st3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, i64, float*)
+
+
+define float* @test_v2f32_post_imm_st3lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st3lane:
+;CHECK: st3.s { v0, v1, v2 }[0], [x0], #12
+ call void @llvm.aarch64.neon.st3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 3
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st3lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st3lane:
+;CHECK: st3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, i64, float*)
+
+
+define double* @test_v2f64_post_imm_st3lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st3lane:
+;CHECK: st3.d { v0, v1, v2 }[0], [x0], #24
+ call void @llvm.aarch64.neon.st3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 3
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st3lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st3lane:
+;CHECK: st3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, i64, double*)
+
+
+define double* @test_v1f64_post_imm_st3lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st3lane:
+;CHECK: st3.d { v0, v1, v2 }[0], [x0], #24
+ call void @llvm.aarch64.neon.st3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 3
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st3lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st3lane:
+;CHECK: st3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st3lane.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, i64, double*)
+
+
+define i8* @test_v16i8_post_imm_st4lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+;CHECK-LABEL: test_v16i8_post_imm_st4lane:
+;CHECK: st4.b { v0, v1, v2, v3 }[0], [x0], #4
+ call void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 4
+ ret i8* %tmp
+}
+
+define i8* @test_v16i8_post_reg_st4lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v16i8_post_reg_st4lane:
+;CHECK: st4.b { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*)
+
+
+define i8* @test_v8i8_post_imm_st4lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+;CHECK-LABEL: test_v8i8_post_imm_st4lane:
+;CHECK: st4.b { v0, v1, v2, v3 }[0], [x0], #4
+ call void @llvm.aarch64.neon.st4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i32 4
+ ret i8* %tmp
+}
+
+define i8* @test_v8i8_post_reg_st4lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i8_post_reg_st4lane:
+;CHECK: st4.b { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
+ %tmp = getelementptr i8* %A, i64 %inc
+ ret i8* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i64, i8*)
+
+
+define i16* @test_v8i16_post_imm_st4lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+;CHECK-LABEL: test_v8i16_post_imm_st4lane:
+;CHECK: st4.h { v0, v1, v2, v3 }[0], [x0], #8
+ call void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 4
+ ret i16* %tmp
+}
+
+define i16* @test_v8i16_post_reg_st4lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v8i16_post_reg_st4lane:
+;CHECK: st4.h { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*)
+
+
+define i16* @test_v4i16_post_imm_st4lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+;CHECK-LABEL: test_v4i16_post_imm_st4lane:
+;CHECK: st4.h { v0, v1, v2, v3 }[0], [x0], #8
+ call void @llvm.aarch64.neon.st4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i32 4
+ ret i16* %tmp
+}
+
+define i16* @test_v4i16_post_reg_st4lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i16_post_reg_st4lane:
+;CHECK: st4.h { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
+ %tmp = getelementptr i16* %A, i64 %inc
+ ret i16* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i64, i16*)
+
+
+define i32* @test_v4i32_post_imm_st4lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+;CHECK-LABEL: test_v4i32_post_imm_st4lane:
+;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], #16
+ call void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ ret i32* %tmp
+}
+
+define i32* @test_v4i32_post_reg_st4lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4i32_post_reg_st4lane:
+;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*)
+
+
+define i32* @test_v2i32_post_imm_st4lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+;CHECK-LABEL: test_v2i32_post_imm_st4lane:
+;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], #16
+ call void @llvm.aarch64.neon.st4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i32 4
+ ret i32* %tmp
+}
+
+define i32* @test_v2i32_post_reg_st4lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i32_post_reg_st4lane:
+;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
+ %tmp = getelementptr i32* %A, i64 %inc
+ ret i32* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i64, i32*)
+
+
+define i64* @test_v2i64_post_imm_st4lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+;CHECK-LABEL: test_v2i64_post_imm_st4lane:
+;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], #32
+ call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 4
+ ret i64* %tmp
+}
+
+define i64* @test_v2i64_post_reg_st4lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2i64_post_reg_st4lane:
+;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*)
+
+
+define i64* @test_v1i64_post_imm_st4lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+;CHECK-LABEL: test_v1i64_post_imm_st4lane:
+;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], #32
+ call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 4
+ ret i64* %tmp
+}
+
+define i64* @test_v1i64_post_reg_st4lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1i64_post_reg_st4lane:
+;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
+ %tmp = getelementptr i64* %A, i64 %inc
+ ret i64* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
+
+
+define float* @test_v4f32_post_imm_st4lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+;CHECK-LABEL: test_v4f32_post_imm_st4lane:
+;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], #16
+ call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ ret float* %tmp
+}
+
+define float* @test_v4f32_post_reg_st4lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v4f32_post_reg_st4lane:
+;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, float*)
+
+
+define float* @test_v2f32_post_imm_st4lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+;CHECK-LABEL: test_v2f32_post_imm_st4lane:
+;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], #16
+ call void @llvm.aarch64.neon.st4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i32 4
+ ret float* %tmp
+}
+
+define float* @test_v2f32_post_reg_st4lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f32_post_reg_st4lane:
+;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
+ %tmp = getelementptr float* %A, i64 %inc
+ ret float* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, i64, float*)
+
+
+define double* @test_v2f64_post_imm_st4lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+;CHECK-LABEL: test_v2f64_post_imm_st4lane:
+;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], #32
+ call void @llvm.aarch64.neon.st4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 4
+ ret double* %tmp
+}
+
+define double* @test_v2f64_post_reg_st4lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v2f64_post_reg_st4lane:
+;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, <2 x double>, i64, double*)
+
+
+define double* @test_v1f64_post_imm_st4lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+;CHECK-LABEL: test_v1f64_post_imm_st4lane:
+;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], #32
+ call void @llvm.aarch64.neon.st4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 4
+ ret double* %tmp
+}
+
+define double* @test_v1f64_post_reg_st4lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
+;CHECK-LABEL: test_v1f64_post_reg_st4lane:
+;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
+ call void @llvm.aarch64.neon.st4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
+ %tmp = getelementptr double* %A, i64 %inc
+ ret double* %tmp
+}
+
+declare void @llvm.aarch64.neon.st4lane.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, i64, double*)
+
+define <16 x i8> @test_v16i8_post_imm_ld1r(i8* %bar, i8** %ptr) {
+; CHECK-LABEL: test_v16i8_post_imm_ld1r:
+; CHECK: ld1r.16b { v0 }, [x0], #1
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
+ %tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
+ %tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
+ %tmp5 = insertelement <16 x i8> %tmp4, i8 %tmp1, i32 3
+ %tmp6 = insertelement <16 x i8> %tmp5, i8 %tmp1, i32 4
+ %tmp7 = insertelement <16 x i8> %tmp6, i8 %tmp1, i32 5
+ %tmp8 = insertelement <16 x i8> %tmp7, i8 %tmp1, i32 6
+ %tmp9 = insertelement <16 x i8> %tmp8, i8 %tmp1, i32 7
+ %tmp10 = insertelement <16 x i8> %tmp9, i8 %tmp1, i32 8
+ %tmp11 = insertelement <16 x i8> %tmp10, i8 %tmp1, i32 9
+ %tmp12 = insertelement <16 x i8> %tmp11, i8 %tmp1, i32 10
+ %tmp13 = insertelement <16 x i8> %tmp12, i8 %tmp1, i32 11
+ %tmp14 = insertelement <16 x i8> %tmp13, i8 %tmp1, i32 12
+ %tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp1, i32 13
+ %tmp16 = insertelement <16 x i8> %tmp15, i8 %tmp1, i32 14
+ %tmp17 = insertelement <16 x i8> %tmp16, i8 %tmp1, i32 15
+ %tmp18 = getelementptr i8* %bar, i64 1
+ store i8* %tmp18, i8** %ptr
+ ret <16 x i8> %tmp17
+}
+
+define <16 x i8> @test_v16i8_post_reg_ld1r(i8* %bar, i8** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v16i8_post_reg_ld1r:
+; CHECK: ld1r.16b { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
+ %tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
+ %tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
+ %tmp5 = insertelement <16 x i8> %tmp4, i8 %tmp1, i32 3
+ %tmp6 = insertelement <16 x i8> %tmp5, i8 %tmp1, i32 4
+ %tmp7 = insertelement <16 x i8> %tmp6, i8 %tmp1, i32 5
+ %tmp8 = insertelement <16 x i8> %tmp7, i8 %tmp1, i32 6
+ %tmp9 = insertelement <16 x i8> %tmp8, i8 %tmp1, i32 7
+ %tmp10 = insertelement <16 x i8> %tmp9, i8 %tmp1, i32 8
+ %tmp11 = insertelement <16 x i8> %tmp10, i8 %tmp1, i32 9
+ %tmp12 = insertelement <16 x i8> %tmp11, i8 %tmp1, i32 10
+ %tmp13 = insertelement <16 x i8> %tmp12, i8 %tmp1, i32 11
+ %tmp14 = insertelement <16 x i8> %tmp13, i8 %tmp1, i32 12
+ %tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp1, i32 13
+ %tmp16 = insertelement <16 x i8> %tmp15, i8 %tmp1, i32 14
+ %tmp17 = insertelement <16 x i8> %tmp16, i8 %tmp1, i32 15
+ %tmp18 = getelementptr i8* %bar, i64 %inc
+ store i8* %tmp18, i8** %ptr
+ ret <16 x i8> %tmp17
+}
+
+define <8 x i8> @test_v8i8_post_imm_ld1r(i8* %bar, i8** %ptr) {
+; CHECK-LABEL: test_v8i8_post_imm_ld1r:
+; CHECK: ld1r.8b { v0 }, [x0], #1
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
+ %tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
+ %tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
+ %tmp5 = insertelement <8 x i8> %tmp4, i8 %tmp1, i32 3
+ %tmp6 = insertelement <8 x i8> %tmp5, i8 %tmp1, i32 4
+ %tmp7 = insertelement <8 x i8> %tmp6, i8 %tmp1, i32 5
+ %tmp8 = insertelement <8 x i8> %tmp7, i8 %tmp1, i32 6
+ %tmp9 = insertelement <8 x i8> %tmp8, i8 %tmp1, i32 7
+ %tmp10 = getelementptr i8* %bar, i64 1
+ store i8* %tmp10, i8** %ptr
+ ret <8 x i8> %tmp9
+}
+
+define <8 x i8> @test_v8i8_post_reg_ld1r(i8* %bar, i8** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v8i8_post_reg_ld1r:
+; CHECK: ld1r.8b { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
+ %tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
+ %tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
+ %tmp5 = insertelement <8 x i8> %tmp4, i8 %tmp1, i32 3
+ %tmp6 = insertelement <8 x i8> %tmp5, i8 %tmp1, i32 4
+ %tmp7 = insertelement <8 x i8> %tmp6, i8 %tmp1, i32 5
+ %tmp8 = insertelement <8 x i8> %tmp7, i8 %tmp1, i32 6
+ %tmp9 = insertelement <8 x i8> %tmp8, i8 %tmp1, i32 7
+ %tmp10 = getelementptr i8* %bar, i64 %inc
+ store i8* %tmp10, i8** %ptr
+ ret <8 x i8> %tmp9
+}
+
+define <8 x i16> @test_v8i16_post_imm_ld1r(i16* %bar, i16** %ptr) {
+; CHECK-LABEL: test_v8i16_post_imm_ld1r:
+; CHECK: ld1r.8h { v0 }, [x0], #2
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
+ %tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
+ %tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
+ %tmp5 = insertelement <8 x i16> %tmp4, i16 %tmp1, i32 3
+ %tmp6 = insertelement <8 x i16> %tmp5, i16 %tmp1, i32 4
+ %tmp7 = insertelement <8 x i16> %tmp6, i16 %tmp1, i32 5
+ %tmp8 = insertelement <8 x i16> %tmp7, i16 %tmp1, i32 6
+ %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 7
+ %tmp10 = getelementptr i16* %bar, i64 1
+ store i16* %tmp10, i16** %ptr
+ ret <8 x i16> %tmp9
+}
+
+define <8 x i16> @test_v8i16_post_reg_ld1r(i16* %bar, i16** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v8i16_post_reg_ld1r:
+; CHECK: ld1r.8h { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
+ %tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
+ %tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
+ %tmp5 = insertelement <8 x i16> %tmp4, i16 %tmp1, i32 3
+ %tmp6 = insertelement <8 x i16> %tmp5, i16 %tmp1, i32 4
+ %tmp7 = insertelement <8 x i16> %tmp6, i16 %tmp1, i32 5
+ %tmp8 = insertelement <8 x i16> %tmp7, i16 %tmp1, i32 6
+ %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 7
+ %tmp10 = getelementptr i16* %bar, i64 %inc
+ store i16* %tmp10, i16** %ptr
+ ret <8 x i16> %tmp9
+}
+
+define <4 x i16> @test_v4i16_post_imm_ld1r(i16* %bar, i16** %ptr) {
+; CHECK-LABEL: test_v4i16_post_imm_ld1r:
+; CHECK: ld1r.4h { v0 }, [x0], #2
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
+ %tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
+ %tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
+ %tmp5 = insertelement <4 x i16> %tmp4, i16 %tmp1, i32 3
+ %tmp6 = getelementptr i16* %bar, i64 1
+ store i16* %tmp6, i16** %ptr
+ ret <4 x i16> %tmp5
+}
+
+define <4 x i16> @test_v4i16_post_reg_ld1r(i16* %bar, i16** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v4i16_post_reg_ld1r:
+; CHECK: ld1r.4h { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
+ %tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
+ %tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
+ %tmp5 = insertelement <4 x i16> %tmp4, i16 %tmp1, i32 3
+ %tmp6 = getelementptr i16* %bar, i64 %inc
+ store i16* %tmp6, i16** %ptr
+ ret <4 x i16> %tmp5
+}
+
+define <4 x i32> @test_v4i32_post_imm_ld1r(i32* %bar, i32** %ptr) {
+; CHECK-LABEL: test_v4i32_post_imm_ld1r:
+; CHECK: ld1r.4s { v0 }, [x0], #4
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
+ %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
+ %tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
+ %tmp5 = insertelement <4 x i32> %tmp4, i32 %tmp1, i32 3
+ %tmp6 = getelementptr i32* %bar, i64 1
+ store i32* %tmp6, i32** %ptr
+ ret <4 x i32> %tmp5
+}
+
+define <4 x i32> @test_v4i32_post_reg_ld1r(i32* %bar, i32** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v4i32_post_reg_ld1r:
+; CHECK: ld1r.4s { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
+ %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
+ %tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
+ %tmp5 = insertelement <4 x i32> %tmp4, i32 %tmp1, i32 3
+ %tmp6 = getelementptr i32* %bar, i64 %inc
+ store i32* %tmp6, i32** %ptr
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i32> @test_v2i32_post_imm_ld1r(i32* %bar, i32** %ptr) {
+; CHECK-LABEL: test_v2i32_post_imm_ld1r:
+; CHECK: ld1r.2s { v0 }, [x0], #4
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
+ %tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
+ %tmp4 = getelementptr i32* %bar, i64 1
+ store i32* %tmp4, i32** %ptr
+ ret <2 x i32> %tmp3
+}
+
+define <2 x i32> @test_v2i32_post_reg_ld1r(i32* %bar, i32** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v2i32_post_reg_ld1r:
+; CHECK: ld1r.2s { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
+ %tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
+ %tmp4 = getelementptr i32* %bar, i64 %inc
+ store i32* %tmp4, i32** %ptr
+ ret <2 x i32> %tmp3
+}
+
+define <2 x i64> @test_v2i64_post_imm_ld1r(i64* %bar, i64** %ptr) {
+; CHECK-LABEL: test_v2i64_post_imm_ld1r:
+; CHECK: ld1r.2d { v0 }, [x0], #8
+ %tmp1 = load i64* %bar
+ %tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
+ %tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
+ %tmp4 = getelementptr i64* %bar, i64 1
+ store i64* %tmp4, i64** %ptr
+ ret <2 x i64> %tmp3
+}
+
+define <2 x i64> @test_v2i64_post_reg_ld1r(i64* %bar, i64** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v2i64_post_reg_ld1r:
+; CHECK: ld1r.2d { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load i64* %bar
+ %tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
+ %tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
+ %tmp4 = getelementptr i64* %bar, i64 %inc
+ store i64* %tmp4, i64** %ptr
+ ret <2 x i64> %tmp3
+}
+
+define <4 x float> @test_v4f32_post_imm_ld1r(float* %bar, float** %ptr) {
+; CHECK-LABEL: test_v4f32_post_imm_ld1r:
+; CHECK: ld1r.4s { v0 }, [x0], #4
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <4 x float> <float undef, float undef, float undef, float undef>, float %tmp1, i32 0
+ %tmp3 = insertelement <4 x float> %tmp2, float %tmp1, i32 1
+ %tmp4 = insertelement <4 x float> %tmp3, float %tmp1, i32 2
+ %tmp5 = insertelement <4 x float> %tmp4, float %tmp1, i32 3
+ %tmp6 = getelementptr float* %bar, i64 1
+ store float* %tmp6, float** %ptr
+ ret <4 x float> %tmp5
+}
+
+define <4 x float> @test_v4f32_post_reg_ld1r(float* %bar, float** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v4f32_post_reg_ld1r:
+; CHECK: ld1r.4s { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <4 x float> <float undef, float undef, float undef, float undef>, float %tmp1, i32 0
+ %tmp3 = insertelement <4 x float> %tmp2, float %tmp1, i32 1
+ %tmp4 = insertelement <4 x float> %tmp3, float %tmp1, i32 2
+ %tmp5 = insertelement <4 x float> %tmp4, float %tmp1, i32 3
+ %tmp6 = getelementptr float* %bar, i64 %inc
+ store float* %tmp6, float** %ptr
+ ret <4 x float> %tmp5
+}
+
+define <2 x float> @test_v2f32_post_imm_ld1r(float* %bar, float** %ptr) {
+; CHECK-LABEL: test_v2f32_post_imm_ld1r:
+; CHECK: ld1r.2s { v0 }, [x0], #4
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <2 x float> <float undef, float undef>, float %tmp1, i32 0
+ %tmp3 = insertelement <2 x float> %tmp2, float %tmp1, i32 1
+ %tmp4 = getelementptr float* %bar, i64 1
+ store float* %tmp4, float** %ptr
+ ret <2 x float> %tmp3
+}
+
+define <2 x float> @test_v2f32_post_reg_ld1r(float* %bar, float** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v2f32_post_reg_ld1r:
+; CHECK: ld1r.2s { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <2 x float> <float undef, float undef>, float %tmp1, i32 0
+ %tmp3 = insertelement <2 x float> %tmp2, float %tmp1, i32 1
+ %tmp4 = getelementptr float* %bar, i64 %inc
+ store float* %tmp4, float** %ptr
+ ret <2 x float> %tmp3
+}
+
+define <2 x double> @test_v2f64_post_imm_ld1r(double* %bar, double** %ptr) {
+; CHECK-LABEL: test_v2f64_post_imm_ld1r:
+; CHECK: ld1r.2d { v0 }, [x0], #8
+ %tmp1 = load double* %bar
+ %tmp2 = insertelement <2 x double> <double undef, double undef>, double %tmp1, i32 0
+ %tmp3 = insertelement <2 x double> %tmp2, double %tmp1, i32 1
+ %tmp4 = getelementptr double* %bar, i64 1
+ store double* %tmp4, double** %ptr
+ ret <2 x double> %tmp3
+}
+
+define <2 x double> @test_v2f64_post_reg_ld1r(double* %bar, double** %ptr, i64 %inc) {
+; CHECK-LABEL: test_v2f64_post_reg_ld1r:
+; CHECK: ld1r.2d { v0 }, [x0], x{{[0-9]+}}
+ %tmp1 = load double* %bar
+ %tmp2 = insertelement <2 x double> <double undef, double undef>, double %tmp1, i32 0
+ %tmp3 = insertelement <2 x double> %tmp2, double %tmp1, i32 1
+ %tmp4 = getelementptr double* %bar, i64 %inc
+ store double* %tmp4, double** %ptr
+ ret <2 x double> %tmp3
+}
+
+define <16 x i8> @test_v16i8_post_imm_ld1lane(i8* %bar, i8** %ptr, <16 x i8> %A) {
+; CHECK-LABEL: test_v16i8_post_imm_ld1lane:
+; CHECK: ld1.b { v0 }[1], [x0], #1
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
+ %tmp3 = getelementptr i8* %bar, i64 1
+ store i8* %tmp3, i8** %ptr
+ ret <16 x i8> %tmp2
+}
+
+define <16 x i8> @test_v16i8_post_reg_ld1lane(i8* %bar, i8** %ptr, i64 %inc, <16 x i8> %A) {
+; CHECK-LABEL: test_v16i8_post_reg_ld1lane:
+; CHECK: ld1.b { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
+ %tmp3 = getelementptr i8* %bar, i64 %inc
+ store i8* %tmp3, i8** %ptr
+ ret <16 x i8> %tmp2
+}
+
+define <8 x i8> @test_v8i8_post_imm_ld1lane(i8* %bar, i8** %ptr, <8 x i8> %A) {
+; CHECK-LABEL: test_v8i8_post_imm_ld1lane:
+; CHECK: ld1.b { v0 }[1], [x0], #1
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <8 x i8> %A, i8 %tmp1, i32 1
+ %tmp3 = getelementptr i8* %bar, i64 1
+ store i8* %tmp3, i8** %ptr
+ ret <8 x i8> %tmp2
+}
+
+define <8 x i8> @test_v8i8_post_reg_ld1lane(i8* %bar, i8** %ptr, i64 %inc, <8 x i8> %A) {
+; CHECK-LABEL: test_v8i8_post_reg_ld1lane:
+; CHECK: ld1.b { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <8 x i8> %A, i8 %tmp1, i32 1
+ %tmp3 = getelementptr i8* %bar, i64 %inc
+ store i8* %tmp3, i8** %ptr
+ ret <8 x i8> %tmp2
+}
+
+define <8 x i16> @test_v8i16_post_imm_ld1lane(i16* %bar, i16** %ptr, <8 x i16> %A) {
+; CHECK-LABEL: test_v8i16_post_imm_ld1lane:
+; CHECK: ld1.h { v0 }[1], [x0], #2
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <8 x i16> %A, i16 %tmp1, i32 1
+ %tmp3 = getelementptr i16* %bar, i64 1
+ store i16* %tmp3, i16** %ptr
+ ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @test_v8i16_post_reg_ld1lane(i16* %bar, i16** %ptr, i64 %inc, <8 x i16> %A) {
+; CHECK-LABEL: test_v8i16_post_reg_ld1lane:
+; CHECK: ld1.h { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <8 x i16> %A, i16 %tmp1, i32 1
+ %tmp3 = getelementptr i16* %bar, i64 %inc
+ store i16* %tmp3, i16** %ptr
+ ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @test_v4i16_post_imm_ld1lane(i16* %bar, i16** %ptr, <4 x i16> %A) {
+; CHECK-LABEL: test_v4i16_post_imm_ld1lane:
+; CHECK: ld1.h { v0 }[1], [x0], #2
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
+ %tmp3 = getelementptr i16* %bar, i64 1
+ store i16* %tmp3, i16** %ptr
+ ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @test_v4i16_post_reg_ld1lane(i16* %bar, i16** %ptr, i64 %inc, <4 x i16> %A) {
+; CHECK-LABEL: test_v4i16_post_reg_ld1lane:
+; CHECK: ld1.h { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
+ %tmp3 = getelementptr i16* %bar, i64 %inc
+ store i16* %tmp3, i16** %ptr
+ ret <4 x i16> %tmp2
+}
+
+define <4 x i32> @test_v4i32_post_imm_ld1lane(i32* %bar, i32** %ptr, <4 x i32> %A) {
+; CHECK-LABEL: test_v4i32_post_imm_ld1lane:
+; CHECK: ld1.s { v0 }[1], [x0], #4
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <4 x i32> %A, i32 %tmp1, i32 1
+ %tmp3 = getelementptr i32* %bar, i64 1
+ store i32* %tmp3, i32** %ptr
+ ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @test_v4i32_post_reg_ld1lane(i32* %bar, i32** %ptr, i64 %inc, <4 x i32> %A) {
+; CHECK-LABEL: test_v4i32_post_reg_ld1lane:
+; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <4 x i32> %A, i32 %tmp1, i32 1
+ %tmp3 = getelementptr i32* %bar, i64 %inc
+ store i32* %tmp3, i32** %ptr
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @test_v2i32_post_imm_ld1lane(i32* %bar, i32** %ptr, <2 x i32> %A) {
+; CHECK-LABEL: test_v2i32_post_imm_ld1lane:
+; CHECK: ld1.s { v0 }[1], [x0], #4
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <2 x i32> %A, i32 %tmp1, i32 1
+ %tmp3 = getelementptr i32* %bar, i64 1
+ store i32* %tmp3, i32** %ptr
+ ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @test_v2i32_post_reg_ld1lane(i32* %bar, i32** %ptr, i64 %inc, <2 x i32> %A) {
+; CHECK-LABEL: test_v2i32_post_reg_ld1lane:
+; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <2 x i32> %A, i32 %tmp1, i32 1
+ %tmp3 = getelementptr i32* %bar, i64 %inc
+ store i32* %tmp3, i32** %ptr
+ ret <2 x i32> %tmp2
+}
+
+define <2 x i64> @test_v2i64_post_imm_ld1lane(i64* %bar, i64** %ptr, <2 x i64> %A) {
+; CHECK-LABEL: test_v2i64_post_imm_ld1lane:
+; CHECK: ld1.d { v0 }[1], [x0], #8
+ %tmp1 = load i64* %bar
+ %tmp2 = insertelement <2 x i64> %A, i64 %tmp1, i32 1
+ %tmp3 = getelementptr i64* %bar, i64 1
+ store i64* %tmp3, i64** %ptr
+ ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @test_v2i64_post_reg_ld1lane(i64* %bar, i64** %ptr, i64 %inc, <2 x i64> %A) {
+; CHECK-LABEL: test_v2i64_post_reg_ld1lane:
+; CHECK: ld1.d { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load i64* %bar
+ %tmp2 = insertelement <2 x i64> %A, i64 %tmp1, i32 1
+ %tmp3 = getelementptr i64* %bar, i64 %inc
+ store i64* %tmp3, i64** %ptr
+ ret <2 x i64> %tmp2
+}
+
+define <4 x float> @test_v4f32_post_imm_ld1lane(float* %bar, float** %ptr, <4 x float> %A) {
+; CHECK-LABEL: test_v4f32_post_imm_ld1lane:
+; CHECK: ld1.s { v0 }[1], [x0], #4
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
+ %tmp3 = getelementptr float* %bar, i64 1
+ store float* %tmp3, float** %ptr
+ ret <4 x float> %tmp2
+}
+
+define <4 x float> @test_v4f32_post_reg_ld1lane(float* %bar, float** %ptr, i64 %inc, <4 x float> %A) {
+; CHECK-LABEL: test_v4f32_post_reg_ld1lane:
+; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
+ %tmp3 = getelementptr float* %bar, i64 %inc
+ store float* %tmp3, float** %ptr
+ ret <4 x float> %tmp2
+}
+
+define <2 x float> @test_v2f32_post_imm_ld1lane(float* %bar, float** %ptr, <2 x float> %A) {
+; CHECK-LABEL: test_v2f32_post_imm_ld1lane:
+; CHECK: ld1.s { v0 }[1], [x0], #4
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <2 x float> %A, float %tmp1, i32 1
+ %tmp3 = getelementptr float* %bar, i64 1
+ store float* %tmp3, float** %ptr
+ ret <2 x float> %tmp2
+}
+
+define <2 x float> @test_v2f32_post_reg_ld1lane(float* %bar, float** %ptr, i64 %inc, <2 x float> %A) {
+; CHECK-LABEL: test_v2f32_post_reg_ld1lane:
+; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <2 x float> %A, float %tmp1, i32 1
+ %tmp3 = getelementptr float* %bar, i64 %inc
+ store float* %tmp3, float** %ptr
+ ret <2 x float> %tmp2
+}
+
+define <2 x double> @test_v2f64_post_imm_ld1lane(double* %bar, double** %ptr, <2 x double> %A) {
+; CHECK-LABEL: test_v2f64_post_imm_ld1lane:
+; CHECK: ld1.d { v0 }[1], [x0], #8
+ %tmp1 = load double* %bar
+ %tmp2 = insertelement <2 x double> %A, double %tmp1, i32 1
+ %tmp3 = getelementptr double* %bar, i64 1
+ store double* %tmp3, double** %ptr
+ ret <2 x double> %tmp2
+}
+
+define <2 x double> @test_v2f64_post_reg_ld1lane(double* %bar, double** %ptr, i64 %inc, <2 x double> %A) {
+; CHECK-LABEL: test_v2f64_post_reg_ld1lane:
+; CHECK: ld1.d { v0 }[1], [x0], x{{[0-9]+}}
+ %tmp1 = load double* %bar
+ %tmp2 = insertelement <2 x double> %A, double %tmp1, i32 1
+ %tmp3 = getelementptr double* %bar, i64 %inc
+ store double* %tmp3, double** %ptr
+ ret <2 x double> %tmp2
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/arm64-inline-asm-error-I.ll b/test/CodeGen/AArch64/arm64-inline-asm-error-I.ll
new file mode 100644
index 000000000000..a7aaf9e55d1b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-inline-asm-error-I.ll
@@ -0,0 +1,11 @@
+; RUN: not llc -march=arm64 < %s 2> %t
+; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+; Check for at least one invalid constant.
+; CHECK-ERRORS: error: invalid operand for inline asm constraint 'I'
+
+define i32 @constraint_I(i32 %i, i32 %j) nounwind ssp {
+entry:
+ %0 = tail call i32 asm sideeffect "add $0, $1, $2", "=r,r,I"(i32 %i, i32 4097) nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll b/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll
new file mode 100644
index 000000000000..077e1b80d93f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll
@@ -0,0 +1,11 @@
+; RUN: not llc -march=arm64 < %s 2> %t
+; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+; Check for at least one invalid constant.
+; CHECK-ERRORS: error: invalid operand for inline asm constraint 'J'
+
+define i32 @constraint_J(i32 %i, i32 %j) nounwind ssp {
+entry:
+ %0 = tail call i32 asm sideeffect "sub $0, $1, $2", "=r,r,J"(i32 %i, i32 2) nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll b/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll
new file mode 100644
index 000000000000..2a7f9619de55
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll
@@ -0,0 +1,11 @@
+; RUN: not llc -march=arm64 < %s 2> %t
+; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+; Check for at least one invalid constant.
+; CHECK-ERRORS: error: invalid operand for inline asm constraint 'K'
+
+define i32 @constraint_K(i32 %i, i32 %j) nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "eor $0, $1, $2", "=r,r,K"(i32 %i, i32 -1) nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/AArch64/arm64-inline-asm-error-L.ll b/test/CodeGen/AArch64/arm64-inline-asm-error-L.ll
new file mode 100644
index 000000000000..170194341951
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-inline-asm-error-L.ll
@@ -0,0 +1,11 @@
+; RUN: not llc -march=arm64 < %s 2> %t
+; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+; Check for at least one invalid constant.
+; CHECK-ERRORS: error: invalid operand for inline asm constraint 'L'
+
+define i32 @constraint_L(i32 %i, i32 %j) nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "eor $0, $1, $2", "=r,r,L"(i32 %i, i64 -1) nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/AArch64/arm64-inline-asm-error-M.ll b/test/CodeGen/AArch64/arm64-inline-asm-error-M.ll
new file mode 100644
index 000000000000..952bf6042c2d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-inline-asm-error-M.ll
@@ -0,0 +1,11 @@
+; RUN: not llc -march=arm64 < %s 2> %t
+; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+; Check for at least one invalid constant.
+; CHECK-ERRORS: error: invalid operand for inline asm constraint 'M'
+
+define i32 @constraint_M(i32 %i, i32 %j) nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "movk $0, $1", "=r,M"(i32 305418240) nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll b/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll
new file mode 100644
index 000000000000..b4a199f160ac
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll
@@ -0,0 +1,11 @@
+; RUN: not llc -march=arm64 < %s 2> %t
+; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+; Check for at least one invalid constant.
+; CHECK-ERRORS: error: invalid operand for inline asm constraint 'N'
+
+define i32 @constraint_N(i32 %i, i32 %j) nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "movk $0, $1", "=r,N"(i64 1311761352401879040) nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll b/test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll
new file mode 100644
index 000000000000..6bfce8f8f6a4
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll
@@ -0,0 +1,11 @@
+; RUN: not llc < %s -march=arm64 2>&1 | FileCheck %s
+
+
+; The 'z' constraint allocates either xzr or wzr, but obviously an input of 1 is
+; incompatible.
+define void @test_bad_zero_reg() {
+ tail call void asm sideeffect "USE($0)", "z"(i32 1) nounwind
+; CHECK: error: invalid operand for inline asm constraint 'z'
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-inline-asm.ll b/test/CodeGen/AArch64/arm64-inline-asm.ll
new file mode 100644
index 000000000000..d76cca3f21c6
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-inline-asm.ll
@@ -0,0 +1,230 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -no-integrated-as | FileCheck %s
+
+; rdar://9167275
+
+define i32 @t1() nounwind ssp {
+entry:
+; CHECK-LABEL: t1:
+; CHECK: mov {{w[0-9]+}}, 7
+ %0 = tail call i32 asm "mov ${0:w}, 7", "=r"() nounwind
+ ret i32 %0
+}
+
+define i64 @t2() nounwind ssp {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: mov {{x[0-9]+}}, 7
+ %0 = tail call i64 asm "mov $0, 7", "=r"() nounwind
+ ret i64 %0
+}
+
+define i64 @t3() nounwind ssp {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: mov {{w[0-9]+}}, 7
+ %0 = tail call i64 asm "mov ${0:w}, 7", "=r"() nounwind
+ ret i64 %0
+}
+
+; rdar://9281206
+
+define void @t4(i64 %op) nounwind {
+entry:
+; CHECK-LABEL: t4:
+; CHECK: mov x0, {{x[0-9]+}}; svc #0
+ %0 = tail call i64 asm sideeffect "mov x0, $1; svc #0;", "=r,r,r,~{x0}"(i64 %op, i64 undef) nounwind
+ ret void
+}
+
+; rdar://9394290
+
+define float @t5(float %x) nounwind {
+entry:
+; CHECK-LABEL: t5:
+; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ %0 = tail call float asm "fadd ${0:s}, ${0:s}, ${0:s}", "=w,0"(float %x) nounwind
+ ret float %0
+}
+
+; rdar://9553599
+
+define zeroext i8 @t6(i8* %src) nounwind {
+entry:
+; CHECK-LABEL: t6:
+; CHECK: ldtrb {{w[0-9]+}}, [{{x[0-9]+}}]
+ %0 = tail call i8 asm "ldtrb ${0:w}, [$1]", "=r,r"(i8* %src) nounwind
+ ret i8 %0
+}
+
+define void @t7(i8* %f, i32 %g) nounwind {
+entry:
+ %f.addr = alloca i8*, align 8
+ store i8* %f, i8** %f.addr, align 8
+ ; CHECK-LABEL: t7:
+ ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+ call void asm "str ${1:w}, $0", "=*Q,r"(i8** %f.addr, i32 %g) nounwind
+ ret void
+}
+
+; rdar://10258229
+; ARM64TargetLowering::getRegForInlineAsmConstraint() should recognize 'v'
+; registers.
+define void @t8() nounwind ssp {
+entry:
+; CHECK-LABEL: t8:
+; CHECK: stp {{d[0-9]+}}, {{d[0-9]+}}, [sp, #-16]
+ tail call void asm sideeffect "nop", "~{v8}"() nounwind
+ ret void
+}
+
+define i32 @constraint_I(i32 %i, i32 %j) nounwind {
+entry:
+ ; CHECK-LABEL: constraint_I:
+ %0 = tail call i32 asm sideeffect "add ${0:w}, ${1:w}, $2", "=r,r,I"(i32 %i, i32 16773120) nounwind
+ ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #16773120
+ %1 = tail call i32 asm sideeffect "add ${0:w}, ${1:w}, $2", "=r,r,I"(i32 %i, i32 4096) nounwind
+ ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #4096
+ ret i32 %1
+}
+
+define i32 @constraint_J(i32 %i, i32 %j) nounwind {
+entry:
+ ; CHECK-LABEL: constraint_J:
+ %0 = tail call i32 asm sideeffect "sub ${0:w}, ${1:w}, $2", "=r,r,J"(i32 %i, i32 -16773120) nounwind
+ ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #4278194176
+ %1 = tail call i32 asm sideeffect "sub ${0:w}, ${1:w}, $2", "=r,r,J"(i32 %i, i32 -1) nounwind
+ ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #4294967295
+ ret i32 %1
+}
+
+define i32 @constraint_KL(i32 %i, i32 %j) nounwind {
+entry:
+ ; CHECK-LABEL: constraint_KL:
+ %0 = tail call i32 asm sideeffect "eor ${0:w}, ${1:w}, $2", "=r,r,K"(i32 %i, i32 255) nounwind
+ ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, #255
+ %1 = tail call i32 asm sideeffect "eor ${0:w}, ${1:w}, $2", "=r,r,L"(i32 %i, i64 16711680) nounwind
+ ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, #16711680
+ ret i32 %1
+}
+
+define i32 @constraint_MN(i32 %i, i32 %j) nounwind {
+entry:
+ ; CHECK-LABEL: constraint_MN:
+ %0 = tail call i32 asm sideeffect "movk ${0:w}, $1", "=r,M"(i32 65535) nounwind
+ ; CHECK: movk {{w[0-9]+}}, #65535
+ %1 = tail call i32 asm sideeffect "movz ${0:w}, $1", "=r,N"(i64 0) nounwind
+ ; CHECK: movz {{w[0-9]+}}, #0
+ ret i32 %1
+}
+
+define void @t9() nounwind {
+entry:
+ ; CHECK-LABEL: t9:
+ %data = alloca <2 x double>, align 16
+ %0 = load <2 x double>* %data, align 16
+ call void asm sideeffect "mov.2d v4, $0\0A", "w,~{v4}"(<2 x double> %0) nounwind
+ ; CHECK: mov.2d v4, {{v[0-9]+}}
+ ret void
+}
+
+define void @t10() nounwind {
+entry:
+ ; CHECK-LABEL: t10:
+ %data = alloca <2 x float>, align 8
+ %a = alloca [2 x float], align 4
+ %arraydecay = getelementptr inbounds [2 x float]* %a, i32 0, i32 0
+ %0 = load <2 x float>* %data, align 8
+ call void asm sideeffect "ldr ${1:q}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
+ ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}]
+ call void asm sideeffect "ldr ${1:d}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
+ ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}]
+ call void asm sideeffect "ldr ${1:s}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
+ ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}]
+ call void asm sideeffect "ldr ${1:h}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
+ ; CHECK: ldr {{h[0-9]+}}, [{{x[0-9]+}}]
+ call void asm sideeffect "ldr ${1:b}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
+ ; CHECK: ldr {{b[0-9]+}}, [{{x[0-9]+}}]
+ ret void
+}
+
+define void @t11() nounwind {
+entry:
+ ; CHECK-LABEL: t11:
+ %a = alloca i32, align 4
+ %0 = load i32* %a, align 4
+ call void asm sideeffect "mov ${1:x}, ${0:x}\0A", "r,i"(i32 %0, i32 0) nounwind
+ ; CHECK: mov xzr, {{x[0-9]+}}
+ %1 = load i32* %a, align 4
+ call void asm sideeffect "mov ${1:w}, ${0:w}\0A", "r,i"(i32 %1, i32 0) nounwind
+ ; CHECK: mov wzr, {{w[0-9]+}}
+ ret void
+}
+
+define void @t12() nounwind {
+entry:
+ ; CHECK-LABEL: t12:
+ %data = alloca <4 x float>, align 16
+ %0 = load <4 x float>* %data, align 16
+ call void asm sideeffect "mov.2d v4, $0\0A", "x,~{v4}"(<4 x float> %0) nounwind
+ ; CHECK mov.2d v4, {{v([0-9])|(1[0-5])}}
+ ret void
+}
+
+define void @t13() nounwind {
+entry:
+ ; CHECK-LABEL: t13:
+ tail call void asm sideeffect "mov x4, $0\0A", "N"(i64 1311673391471656960) nounwind
+ ; CHECK: mov x4, #1311673391471656960
+ tail call void asm sideeffect "mov x4, $0\0A", "N"(i64 -4662) nounwind
+ ; CHECK: mov x4, #-4662
+ tail call void asm sideeffect "mov x4, $0\0A", "N"(i64 4660) nounwind
+ ; CHECK: mov x4, #4660
+ call void asm sideeffect "mov x4, $0\0A", "N"(i64 -71777214294589696) nounwind
+ ; CHECK: mov x4, #-71777214294589696
+ ret void
+}
+
+define void @t14() nounwind {
+entry:
+ ; CHECK-LABEL: t14:
+ tail call void asm sideeffect "mov w4, $0\0A", "M"(i32 305397760) nounwind
+ ; CHECK: mov w4, #305397760
+ tail call void asm sideeffect "mov w4, $0\0A", "M"(i32 -4662) nounwind
+ ; CHECK: mov w4, #4294962634
+ tail call void asm sideeffect "mov w4, $0\0A", "M"(i32 4660) nounwind
+ ; CHECK: mov w4, #4660
+ call void asm sideeffect "mov w4, $0\0A", "M"(i32 -16711936) nounwind
+ ; CHECK: mov w4, #4278255360
+ ret void
+}
+
+define void @t15() nounwind {
+entry:
+ %0 = tail call double asm sideeffect "fmov $0, d8", "=r"() nounwind
+ ; CHECK: fmov {{x[0-9]+}}, d8
+ ret void
+}
+
+; rdar://problem/14285178
+
+define void @test_zero_reg(i32* %addr) {
+; CHECK-LABEL: test_zero_reg:
+
+ tail call void asm sideeffect "USE($0)", "z"(i32 0) nounwind
+; CHECK: USE(xzr)
+
+ tail call void asm sideeffect "USE(${0:w})", "zr"(i32 0)
+; CHECK: USE(wzr)
+
+ tail call void asm sideeffect "USE(${0:w})", "zr"(i32 1)
+; CHECK: orr [[VAL1:w[0-9]+]], wzr, #0x1
+; CHECK: USE([[VAL1]])
+
+ tail call void asm sideeffect "USE($0), USE($1)", "z,z"(i32 0, i32 0) nounwind
+; CHECK: USE(xzr), USE(xzr)
+
+ tail call void asm sideeffect "USE($0), USE(${1:w})", "z,z"(i32 0, i32 0) nounwind
+; CHECK: USE(xzr), USE(wzr)
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-join-reserved.ll b/test/CodeGen/AArch64/arm64-join-reserved.ll
new file mode 100644
index 000000000000..e99168b5eba3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-join-reserved.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -verify-machineinstrs | FileCheck %s
+target triple = "arm64-apple-macosx10"
+
+; Make sure that a store to [sp] addresses off sp directly.
+; A move isn't necessary.
+; <rdar://problem/11492712>
+; CHECK-LABEL: g:
+; CHECK: str xzr, [sp]
+; CHECK: bl
+; CHECK: ret
+define void @g() nounwind ssp {
+entry:
+ tail call void (i32, ...)* @f(i32 0, i32 0) nounwind
+ ret void
+}
+
+declare void @f(i32, ...)
diff --git a/test/CodeGen/AArch64/arm64-jumptable.ll b/test/CodeGen/AArch64/arm64-jumptable.ll
new file mode 100644
index 000000000000..4635cfe5858d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-jumptable.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=arm64-apple-ios < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu < %s | FileCheck %s --check-prefix=CHECK-LINUX
+; <rdar://11417675>
+
+define void @sum(i32* %to) {
+entry:
+ switch i32 undef, label %exit [
+ i32 1, label %bb1
+ i32 2, label %bb2
+ i32 3, label %bb3
+ i32 4, label %bb4
+ ]
+bb1:
+ store i32 undef, i32* %to
+ br label %exit
+bb2:
+ store i32 undef, i32* %to
+ br label %exit
+bb3:
+ store i32 undef, i32* %to
+ br label %exit
+bb4:
+ store i32 undef, i32* %to
+ br label %exit
+exit:
+ ret void
+}
+
+; CHECK-LABEL: sum:
+; CHECK: adrp {{x[0-9]+}}, LJTI0_0@PAGE
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, LJTI0_0@PAGEOFF
+
+; CHECK-LINUX-LABEL: sum:
+; CHECK-LINUX: adrp {{x[0-9]+}}, .LJTI0_0
+; CHECK-LINUX: add {{x[0-9]+}}, {{x[0-9]+}}, :lo12:.LJTI0_0
diff --git a/test/CodeGen/AArch64/arm64-large-frame.ll b/test/CodeGen/AArch64/arm64-large-frame.ll
new file mode 100644
index 000000000000..5a53da693882
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-large-frame.ll
@@ -0,0 +1,69 @@
+; RUN: llc -verify-machineinstrs -mtriple=arm64-none-linux-gnu -disable-fp-elim < %s | FileCheck %s
+declare void @use_addr(i8*)
+
+@addr = global i8* null
+
+define void @test_bigframe() {
+; CHECK-LABEL: test_bigframe:
+; CHECK: .cfi_startproc
+
+ %var1 = alloca i8, i32 20000000
+ %var2 = alloca i8, i32 16
+ %var3 = alloca i8, i32 20000000
+
+; CHECK: sub sp, sp, #4095, lsl #12
+; CHECK: sub sp, sp, #4095, lsl #12
+; CHECK: sub sp, sp, #1575, lsl #12
+; CHECK: sub sp, sp, #2576
+; CHECK: .cfi_def_cfa_offset 40000032
+
+
+; CHECK: add [[TMP:x[0-9]+]], sp, #4095, lsl #12
+; CHECK: add [[TMP1:x[0-9]+]], [[TMP]], #787, lsl #12
+; CHECK: add {{x[0-9]+}}, [[TMP1]], #3344
+ store volatile i8* %var1, i8** @addr
+
+ %var1plus2 = getelementptr i8* %var1, i32 2
+ store volatile i8* %var1plus2, i8** @addr
+
+; CHECK: add [[TMP:x[0-9]+]], sp, #4095, lsl #12
+; CHECK: add [[TMP1:x[0-9]+]], [[TMP]], #787, lsl #12
+; CHECK: add {{x[0-9]+}}, [[TMP1]], #3328
+ store volatile i8* %var2, i8** @addr
+
+ %var2plus2 = getelementptr i8* %var2, i32 2
+ store volatile i8* %var2plus2, i8** @addr
+
+ store volatile i8* %var3, i8** @addr
+
+ %var3plus2 = getelementptr i8* %var3, i32 2
+ store volatile i8* %var3plus2, i8** @addr
+
+; CHECK: add sp, sp, #4095, lsl #12
+; CHECK: add sp, sp, #4095, lsl #12
+; CHECK: add sp, sp, #1575, lsl #12
+; CHECK: add sp, sp, #2576
+; CHECK: .cfi_endproc
+ ret void
+}
+
+define void @test_mediumframe() {
+; CHECK-LABEL: test_mediumframe:
+ %var1 = alloca i8, i32 1000000
+ %var2 = alloca i8, i32 16
+ %var3 = alloca i8, i32 1000000
+; CHECK: sub sp, sp, #488, lsl #12
+; CHECK-NEXT: sub sp, sp, #1168
+
+ store volatile i8* %var1, i8** @addr
+; CHECK: add [[VAR1ADDR:x[0-9]+]], sp, #244, lsl #12
+; CHECK: add [[VAR1ADDR]], [[VAR1ADDR]], #592
+
+; CHECK: add [[VAR2ADDR:x[0-9]+]], sp, #244, lsl #12
+; CHECK: add [[VAR2ADDR]], [[VAR2ADDR]], #576
+
+ store volatile i8* %var2, i8** @addr
+; CHECK: add sp, sp, #488, lsl #12
+; CHECK: add sp, sp, #1168
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-ld1.ll b/test/CodeGen/AArch64/arm64-ld1.ll
new file mode 100644
index 000000000000..72d808ccc347
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-ld1.ll
@@ -0,0 +1,1345 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s
+
+%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
+%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
+
+define %struct.__neon_int8x8x2_t @ld2_8b(i8* %A) nounwind {
+; CHECK-LABEL: ld2_8b
+; Make sure we are loading into the results defined by the ABI (i.e., v0, v1)
+; and from the argument of the function also defined by ABI (i.e., x0)
+; CHECK ld2.8b { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x8x2_t %tmp2
+}
+
+define %struct.__neon_int8x8x3_t @ld3_8b(i8* %A) nounwind {
+; CHECK-LABEL: ld3_8b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3.8b { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x8x3_t %tmp2
+}
+
+define %struct.__neon_int8x8x4_t @ld4_8b(i8* %A) nounwind {
+; CHECK-LABEL: ld4_8b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4.8b { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x8x4_t %tmp2
+}
+
+declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0i8(i8*) nounwind readonly
+
+%struct.__neon_int8x16x2_t = type { <16 x i8>, <16 x i8> }
+%struct.__neon_int8x16x3_t = type { <16 x i8>, <16 x i8>, <16 x i8> }
+%struct.__neon_int8x16x4_t = type { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> }
+
+define %struct.__neon_int8x16x2_t @ld2_16b(i8* %A) nounwind {
+; CHECK-LABEL: ld2_16b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2.16b { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x16x2_t %tmp2
+}
+
+define %struct.__neon_int8x16x3_t @ld3_16b(i8* %A) nounwind {
+; CHECK-LABEL: ld3_16b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3.16b { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x16x3_t %tmp2
+}
+
+define %struct.__neon_int8x16x4_t @ld4_16b(i8* %A) nounwind {
+; CHECK-LABEL: ld4_16b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4.16b { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x16x4_t %tmp2
+}
+
+declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*) nounwind readonly
+
+%struct.__neon_int16x4x2_t = type { <4 x i16>, <4 x i16> }
+%struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
+%struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
+
+define %struct.__neon_int16x4x2_t @ld2_4h(i16* %A) nounwind {
+; CHECK-LABEL: ld2_4h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2.4h { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x4x2_t %tmp2
+}
+
+define %struct.__neon_int16x4x3_t @ld3_4h(i16* %A) nounwind {
+; CHECK-LABEL: ld3_4h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3.4h { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x4x3_t %tmp2
+}
+
+define %struct.__neon_int16x4x4_t @ld4_4h(i16* %A) nounwind {
+; CHECK-LABEL: ld4_4h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4.4h { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x4x4_t %tmp2
+}
+
+declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*) nounwind readonly
+
+%struct.__neon_int16x8x2_t = type { <8 x i16>, <8 x i16> }
+%struct.__neon_int16x8x3_t = type { <8 x i16>, <8 x i16>, <8 x i16> }
+%struct.__neon_int16x8x4_t = type { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
+
+define %struct.__neon_int16x8x2_t @ld2_8h(i16* %A) nounwind {
+; CHECK-LABEL: ld2_8h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2.8h { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x8x2_t %tmp2
+}
+
+define %struct.__neon_int16x8x3_t @ld3_8h(i16* %A) nounwind {
+; CHECK-LABEL: ld3_8h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3.8h { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x8x3_t %tmp2
+}
+
+define %struct.__neon_int16x8x4_t @ld4_8h(i16* %A) nounwind {
+; CHECK-LABEL: ld4_8h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4.8h { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x8x4_t %tmp2
+}
+
+declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0i16(i16*) nounwind readonly
+
+%struct.__neon_int32x2x2_t = type { <2 x i32>, <2 x i32> }
+%struct.__neon_int32x2x3_t = type { <2 x i32>, <2 x i32>, <2 x i32> }
+%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
+
+define %struct.__neon_int32x2x2_t @ld2_2s(i32* %A) nounwind {
+; CHECK-LABEL: ld2_2s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2.2s { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x2x2_t %tmp2
+}
+
+define %struct.__neon_int32x2x3_t @ld3_2s(i32* %A) nounwind {
+; CHECK-LABEL: ld3_2s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3.2s { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x2x3_t %tmp2
+}
+
+define %struct.__neon_int32x2x4_t @ld4_2s(i32* %A) nounwind {
+; CHECK-LABEL: ld4_2s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4.2s { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x2x4_t %tmp2
+}
+
+declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0i32(i32*) nounwind readonly
+
+%struct.__neon_int32x4x2_t = type { <4 x i32>, <4 x i32> }
+%struct.__neon_int32x4x3_t = type { <4 x i32>, <4 x i32>, <4 x i32> }
+%struct.__neon_int32x4x4_t = type { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
+
+define %struct.__neon_int32x4x2_t @ld2_4s(i32* %A) nounwind {
+; CHECK-LABEL: ld2_4s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2.4s { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x4x2_t %tmp2
+}
+
+define %struct.__neon_int32x4x3_t @ld3_4s(i32* %A) nounwind {
+; CHECK-LABEL: ld3_4s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3.4s { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x4x3_t %tmp2
+}
+
+define %struct.__neon_int32x4x4_t @ld4_4s(i32* %A) nounwind {
+; CHECK-LABEL: ld4_4s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4.4s { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x4x4_t %tmp2
+}
+
+declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0i32(i32*) nounwind readonly
+
+%struct.__neon_int64x2x2_t = type { <2 x i64>, <2 x i64> }
+%struct.__neon_int64x2x3_t = type { <2 x i64>, <2 x i64>, <2 x i64> }
+%struct.__neon_int64x2x4_t = type { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> }
+
+define %struct.__neon_int64x2x2_t @ld2_2d(i64* %A) nounwind {
+; CHECK-LABEL: ld2_2d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2.2d { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x2x2_t %tmp2
+}
+
+define %struct.__neon_int64x2x3_t @ld3_2d(i64* %A) nounwind {
+; CHECK-LABEL: ld3_2d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3.2d { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x2x3_t %tmp2
+}
+
+define %struct.__neon_int64x2x4_t @ld4_2d(i64* %A) nounwind {
+; CHECK-LABEL: ld4_2d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4.2d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x2x4_t %tmp2
+}
+
+declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0i64(i64*) nounwind readonly
+
+%struct.__neon_int64x1x2_t = type { <1 x i64>, <1 x i64> }
+%struct.__neon_int64x1x3_t = type { <1 x i64>, <1 x i64>, <1 x i64> }
+%struct.__neon_int64x1x4_t = type { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> }
+
+
+define %struct.__neon_int64x1x2_t @ld2_1di64(i64* %A) nounwind {
+; CHECK-LABEL: ld2_1di64
+; Make sure we are using the operands defined by the ABI
+; CHECK ld1.1d { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x1x2_t %tmp2
+}
+
+define %struct.__neon_int64x1x3_t @ld3_1di64(i64* %A) nounwind {
+; CHECK-LABEL: ld3_1di64
+; Make sure we are using the operands defined by the ABI
+; CHECK ld1.1d { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x1x3_t %tmp2
+}
+
+define %struct.__neon_int64x1x4_t @ld4_1di64(i64* %A) nounwind {
+; CHECK-LABEL: ld4_1di64
+; Make sure we are using the operands defined by the ABI
+; CHECK ld1.1d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x1x4_t %tmp2
+}
+
+
+declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0i64(i64*) nounwind readonly
+
+%struct.__neon_float64x1x2_t = type { <1 x double>, <1 x double> }
+%struct.__neon_float64x1x3_t = type { <1 x double>, <1 x double>, <1 x double> }
+%struct.__neon_float64x1x4_t = type { <1 x double>, <1 x double>, <1 x double>, <1 x double> }
+
+
+define %struct.__neon_float64x1x2_t @ld2_1df64(double* %A) nounwind {
+; CHECK-LABEL: ld2_1df64
+; Make sure we are using the operands defined by the ABI
+; CHECK ld1.1d { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
+ ret %struct.__neon_float64x1x2_t %tmp2
+}
+
+define %struct.__neon_float64x1x3_t @ld3_1df64(double* %A) nounwind {
+; CHECK-LABEL: ld3_1df64
+; Make sure we are using the operands defined by the ABI
+; CHECK ld1.1d { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
+ ret %struct.__neon_float64x1x3_t %tmp2
+}
+
+define %struct.__neon_float64x1x4_t @ld4_1df64(double* %A) nounwind {
+; CHECK-LABEL: ld4_1df64
+; Make sure we are using the operands defined by the ABI
+; CHECK ld1.1d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
+ ret %struct.__neon_float64x1x4_t %tmp2
+}
+
+declare %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld2.v1f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld3.v1f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0f64(double*) nounwind readonly
+
+
+define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, i8* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld2lane_16b
+; CHECK ld2.b { v0, v1 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, i64 1, i8* %A)
+ ret %struct.__neon_int8x16x2_t %tmp2
+}
+
+define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i8* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld3lane_16b
+; CHECK ld3.b { v0, v1, v2 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i64 1, i8* %A)
+ ret %struct.__neon_int8x16x3_t %tmp2
+}
+
+define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i8* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld4lane_16b
+; CHECK ld4.b { v0, v1, v2, v3 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i64 1, i8* %A)
+ ret %struct.__neon_int8x16x4_t %tmp2
+}
+
+declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+
+define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, i16* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld2lane_8h
+; CHECK ld2.h { v0, v1 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, i64 1, i16* %A)
+ ret %struct.__neon_int16x8x2_t %tmp2
+}
+
+define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i16* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld3lane_8h
+; CHECK ld3.h { v0, v1, v3 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i64 1, i16* %A)
+ ret %struct.__neon_int16x8x3_t %tmp2
+}
+
+define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i16* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld4lane_8h
+; CHECK ld4.h { v0, v1, v2, v3 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i64 1, i16* %A)
+ ret %struct.__neon_int16x8x4_t %tmp2
+}
+
+declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+
+define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, i32* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld2lane_4s
+; CHECK ld2.s { v0, v1 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, i64 1, i32* %A)
+ ret %struct.__neon_int32x4x2_t %tmp2
+}
+
+define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i32* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld3lane_4s
+; CHECK ld3.s { v0, v1, v2 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i64 1, i32* %A)
+ ret %struct.__neon_int32x4x3_t %tmp2
+}
+
+define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i32* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld4lane_4s
+; CHECK ld4.s { v0, v1, v2, v3 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i64 1, i32* %A)
+ ret %struct.__neon_int32x4x4_t %tmp2
+}
+
+declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+
+define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, i64* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld2lane_2d
+; CHECK ld2.d { v0, v1 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, i64 1, i64* %A)
+ ret %struct.__neon_int64x2x2_t %tmp2
+}
+
+define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld3lane_2d
+; CHECK ld3.d { v0, v1, v3 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64 1, i64* %A)
+ ret %struct.__neon_int64x2x3_t %tmp2
+}
+
+define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64* %A) nounwind {
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld4lane_2d
+; CHECK ld4.d { v0, v1, v2, v3 }[1], [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64 1, i64* %A)
+ ret %struct.__neon_int64x2x4_t %tmp2
+}
+
+declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+
+define <8 x i8> @ld1r_8b(i8* %bar) {
+; CHECK: ld1r_8b
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.8b { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
+ %tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
+ %tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
+ %tmp5 = insertelement <8 x i8> %tmp4, i8 %tmp1, i32 3
+ %tmp6 = insertelement <8 x i8> %tmp5, i8 %tmp1, i32 4
+ %tmp7 = insertelement <8 x i8> %tmp6, i8 %tmp1, i32 5
+ %tmp8 = insertelement <8 x i8> %tmp7, i8 %tmp1, i32 6
+ %tmp9 = insertelement <8 x i8> %tmp8, i8 %tmp1, i32 7
+ ret <8 x i8> %tmp9
+}
+
+define <16 x i8> @ld1r_16b(i8* %bar) {
+; CHECK: ld1r_16b
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.16b { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
+ %tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
+ %tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
+ %tmp5 = insertelement <16 x i8> %tmp4, i8 %tmp1, i32 3
+ %tmp6 = insertelement <16 x i8> %tmp5, i8 %tmp1, i32 4
+ %tmp7 = insertelement <16 x i8> %tmp6, i8 %tmp1, i32 5
+ %tmp8 = insertelement <16 x i8> %tmp7, i8 %tmp1, i32 6
+ %tmp9 = insertelement <16 x i8> %tmp8, i8 %tmp1, i32 7
+ %tmp10 = insertelement <16 x i8> %tmp9, i8 %tmp1, i32 8
+ %tmp11 = insertelement <16 x i8> %tmp10, i8 %tmp1, i32 9
+ %tmp12 = insertelement <16 x i8> %tmp11, i8 %tmp1, i32 10
+ %tmp13 = insertelement <16 x i8> %tmp12, i8 %tmp1, i32 11
+ %tmp14 = insertelement <16 x i8> %tmp13, i8 %tmp1, i32 12
+ %tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp1, i32 13
+ %tmp16 = insertelement <16 x i8> %tmp15, i8 %tmp1, i32 14
+ %tmp17 = insertelement <16 x i8> %tmp16, i8 %tmp1, i32 15
+ ret <16 x i8> %tmp17
+}
+
+define <4 x i16> @ld1r_4h(i16* %bar) {
+; CHECK: ld1r_4h
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.4h { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
+ %tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
+ %tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
+ %tmp5 = insertelement <4 x i16> %tmp4, i16 %tmp1, i32 3
+ ret <4 x i16> %tmp5
+}
+
+define <8 x i16> @ld1r_8h(i16* %bar) {
+; CHECK: ld1r_8h
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.8h { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
+ %tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
+ %tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
+ %tmp5 = insertelement <8 x i16> %tmp4, i16 %tmp1, i32 3
+ %tmp6 = insertelement <8 x i16> %tmp5, i16 %tmp1, i32 4
+ %tmp7 = insertelement <8 x i16> %tmp6, i16 %tmp1, i32 5
+ %tmp8 = insertelement <8 x i16> %tmp7, i16 %tmp1, i32 6
+ %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 7
+ ret <8 x i16> %tmp9
+}
+
+define <2 x i32> @ld1r_2s(i32* %bar) {
+; CHECK: ld1r_2s
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.2s { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
+ %tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @ld1r_4s(i32* %bar) {
+; CHECK: ld1r_4s
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.4s { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
+ %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
+ %tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
+ %tmp5 = insertelement <4 x i32> %tmp4, i32 %tmp1, i32 3
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @ld1r_2d(i64* %bar) {
+; CHECK: ld1r_2d
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.2d { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i64* %bar
+ %tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
+ %tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
+ ret <2 x i64> %tmp3
+}
+
+define %struct.__neon_int8x8x2_t @ld2r_8b(i8* %A) nounwind {
+; CHECK: ld2r_8b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2r.8b { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x8x2_t %tmp2
+}
+
+define %struct.__neon_int8x8x3_t @ld3r_8b(i8* %A) nounwind {
+; CHECK: ld3r_8b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3r.8b { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x8x3_t %tmp2
+}
+
+define %struct.__neon_int8x8x4_t @ld4r_8b(i8* %A) nounwind {
+; CHECK: ld4r_8b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4r.8b { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x8x4_t %tmp2
+}
+
+declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8*) nounwind readonly
+
+define %struct.__neon_int8x16x2_t @ld2r_16b(i8* %A) nounwind {
+; CHECK: ld2r_16b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2r.16b { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x16x2_t %tmp2
+}
+
+define %struct.__neon_int8x16x3_t @ld3r_16b(i8* %A) nounwind {
+; CHECK: ld3r_16b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3r.16b { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x16x3_t %tmp2
+}
+
+define %struct.__neon_int8x16x4_t @ld4r_16b(i8* %A) nounwind {
+; CHECK: ld4r_16b
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4r.16b { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
+ ret %struct.__neon_int8x16x4_t %tmp2
+}
+
+declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8*) nounwind readonly
+
+define %struct.__neon_int16x4x2_t @ld2r_4h(i16* %A) nounwind {
+; CHECK: ld2r_4h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2r.4h { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x4x2_t %tmp2
+}
+
+define %struct.__neon_int16x4x3_t @ld3r_4h(i16* %A) nounwind {
+; CHECK: ld3r_4h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3r.4h { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x4x3_t %tmp2
+}
+
+define %struct.__neon_int16x4x4_t @ld4r_4h(i16* %A) nounwind {
+; CHECK: ld4r_4h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4r.4h { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x4x4_t %tmp2
+}
+
+declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16*) nounwind readonly
+
+define %struct.__neon_int16x8x2_t @ld2r_8h(i16* %A) nounwind {
+; CHECK: ld2r_8h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2r.8h { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x8x2_t %tmp2
+}
+
+define %struct.__neon_int16x8x3_t @ld3r_8h(i16* %A) nounwind {
+; CHECK: ld3r_8h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3r.8h { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x8x3_t %tmp2
+}
+
+define %struct.__neon_int16x8x4_t @ld4r_8h(i16* %A) nounwind {
+; CHECK: ld4r_8h
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4r.8h { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
+ ret %struct.__neon_int16x8x4_t %tmp2
+}
+
+declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16*) nounwind readonly
+
+define %struct.__neon_int32x2x2_t @ld2r_2s(i32* %A) nounwind {
+; CHECK: ld2r_2s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2r.2s { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x2x2_t %tmp2
+}
+
+define %struct.__neon_int32x2x3_t @ld3r_2s(i32* %A) nounwind {
+; CHECK: ld3r_2s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3r.2s { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x2x3_t %tmp2
+}
+
+define %struct.__neon_int32x2x4_t @ld4r_2s(i32* %A) nounwind {
+; CHECK: ld4r_2s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4r.2s { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x2x4_t %tmp2
+}
+
+declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32*) nounwind readonly
+
+define %struct.__neon_int32x4x2_t @ld2r_4s(i32* %A) nounwind {
+; CHECK: ld2r_4s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2r.4s { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x4x2_t %tmp2
+}
+
+define %struct.__neon_int32x4x3_t @ld3r_4s(i32* %A) nounwind {
+; CHECK: ld3r_4s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3r.4s { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x4x3_t %tmp2
+}
+
+define %struct.__neon_int32x4x4_t @ld4r_4s(i32* %A) nounwind {
+; CHECK: ld4r_4s
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4r.4s { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
+ ret %struct.__neon_int32x4x4_t %tmp2
+}
+
+declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32*) nounwind readonly
+
+define %struct.__neon_int64x1x2_t @ld2r_1d(i64* %A) nounwind {
+; CHECK: ld2r_1d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2r.1d { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x1x2_t %tmp2
+}
+
+define %struct.__neon_int64x1x3_t @ld3r_1d(i64* %A) nounwind {
+; CHECK: ld3r_1d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3r.1d { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x1x3_t %tmp2
+}
+
+define %struct.__neon_int64x1x4_t @ld4r_1d(i64* %A) nounwind {
+; CHECK: ld4r_1d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4r.1d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x1x4_t %tmp2
+}
+
+declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64*) nounwind readonly
+
+define %struct.__neon_int64x2x2_t @ld2r_2d(i64* %A) nounwind {
+; CHECK: ld2r_2d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld2r.2d { v0, v1 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x2x2_t %tmp2
+}
+
+define %struct.__neon_int64x2x3_t @ld3r_2d(i64* %A) nounwind {
+; CHECK: ld3r_2d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld3r.2d { v0, v1, v2 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x2x3_t %tmp2
+}
+
+define %struct.__neon_int64x2x4_t @ld4r_2d(i64* %A) nounwind {
+; CHECK: ld4r_2d
+; Make sure we are using the operands defined by the ABI
+; CHECK ld4r.2d { v0, v1, v2, v3 }, [x0]
+; CHECK-NEXT ret
+ %tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
+ ret %struct.__neon_int64x2x4_t %tmp2
+}
+
+declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64*) nounwind readonly
+
+define <16 x i8> @ld1_16b(<16 x i8> %V, i8* %bar) {
+; CHECK-LABEL: ld1_16b
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.b { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <16 x i8> %V, i8 %tmp1, i32 0
+ ret <16 x i8> %tmp2
+}
+
+define <8 x i16> @ld1_8h(<8 x i16> %V, i16* %bar) {
+; CHECK-LABEL: ld1_8h
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.h { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <8 x i16> %V, i16 %tmp1, i32 0
+ ret <8 x i16> %tmp2
+}
+
+define <4 x i32> @ld1_4s(<4 x i32> %V, i32* %bar) {
+; CHECK-LABEL: ld1_4s
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.s { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <4 x i32> %V, i32 %tmp1, i32 0
+ ret <4 x i32> %tmp2
+}
+
+define <4 x float> @ld1_4s_float(<4 x float> %V, float* %bar) {
+; CHECK-LABEL: ld1_4s_float:
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.s { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <4 x float> %V, float %tmp1, i32 0
+ ret <4 x float> %tmp2
+}
+
+define <2 x i64> @ld1_2d(<2 x i64> %V, i64* %bar) {
+; CHECK-LABEL: ld1_2d
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.d { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i64* %bar
+ %tmp2 = insertelement <2 x i64> %V, i64 %tmp1, i32 0
+ ret <2 x i64> %tmp2
+}
+
+define <2 x double> @ld1_2d_double(<2 x double> %V, double* %bar) {
+; CHECK-LABEL: ld1_2d_double:
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.d { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load double* %bar
+ %tmp2 = insertelement <2 x double> %V, double %tmp1, i32 0
+ ret <2 x double> %tmp2
+}
+
+define <1 x i64> @ld1_1d(<1 x i64>* %p) {
+; CHECK-LABEL: ld1_1d
+; Make sure we are using the operands defined by the ABI
+; CHECK: ldr [[REG:d[0-9]+]], [x0]
+; CHECK-NEXT: ret
+ %tmp = load <1 x i64>* %p, align 8
+ ret <1 x i64> %tmp
+}
+
+define <8 x i8> @ld1_8b(<8 x i8> %V, i8* %bar) {
+; CHECK-LABEL: ld1_8b
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.b { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i8* %bar
+ %tmp2 = insertelement <8 x i8> %V, i8 %tmp1, i32 0
+ ret <8 x i8> %tmp2
+}
+
+define <4 x i16> @ld1_4h(<4 x i16> %V, i16* %bar) {
+; CHECK-LABEL: ld1_4h
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.h { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i16* %bar
+ %tmp2 = insertelement <4 x i16> %V, i16 %tmp1, i32 0
+ ret <4 x i16> %tmp2
+}
+
+define <2 x i32> @ld1_2s(<2 x i32> %V, i32* %bar) {
+; CHECK-LABEL: ld1_2s:
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.s { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load i32* %bar
+ %tmp2 = insertelement <2 x i32> %V, i32 %tmp1, i32 0
+ ret <2 x i32> %tmp2
+}
+
+define <2 x float> @ld1_2s_float(<2 x float> %V, float* %bar) {
+; CHECK-LABEL: ld1_2s_float:
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1.s { v0 }[0], [x0]
+; CHECK-NEXT ret
+ %tmp1 = load float* %bar
+ %tmp2 = insertelement <2 x float> %V, float %tmp1, i32 0
+ ret <2 x float> %tmp2
+}
+
+
+; Add rdar://13098923 test case: vld1_dup_u32 doesn't generate ld1r.2s
+define void @ld1r_2s_from_dup(i8* nocapture %a, i8* nocapture %b, i16* nocapture %diff) nounwind ssp {
+entry:
+; CHECK: ld1r_2s_from_dup
+; CHECK: ld1r.2s { [[ARG1:v[0-9]+]] }, [x0]
+; CHECK-NEXT: ld1r.2s { [[ARG2:v[0-9]+]] }, [x1]
+; CHECK-NEXT: usubl.8h v[[RESREGNUM:[0-9]+]], [[ARG1]], [[ARG2]]
+; CHECK-NEXT: str d[[RESREGNUM]], [x2]
+; CHECK-NEXT: ret
+ %tmp = bitcast i8* %a to i32*
+ %tmp1 = load i32* %tmp, align 4
+ %tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
+ %lane = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
+ %tmp3 = bitcast <2 x i32> %lane to <8 x i8>
+ %tmp4 = bitcast i8* %b to i32*
+ %tmp5 = load i32* %tmp4, align 4
+ %tmp6 = insertelement <2 x i32> undef, i32 %tmp5, i32 0
+ %lane1 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> zeroinitializer
+ %tmp7 = bitcast <2 x i32> %lane1 to <8 x i8>
+ %vmovl.i.i = zext <8 x i8> %tmp3 to <8 x i16>
+ %vmovl.i4.i = zext <8 x i8> %tmp7 to <8 x i16>
+ %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i4.i
+ %tmp8 = bitcast <8 x i16> %sub.i to <2 x i64>
+ %shuffle.i = shufflevector <2 x i64> %tmp8, <2 x i64> undef, <1 x i32> zeroinitializer
+ %tmp9 = bitcast <1 x i64> %shuffle.i to <4 x i16>
+ %tmp10 = bitcast i16* %diff to <4 x i16>*
+ store <4 x i16> %tmp9, <4 x i16>* %tmp10, align 8
+ ret void
+}
+
+; Tests for rdar://11947069: vld1_dup_* and vld1q_dup_* code gen is suboptimal
+define <4 x float> @ld1r_4s_float(float* nocapture %x) {
+entry:
+; CHECK-LABEL: ld1r_4s_float
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.4s { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp = load float* %x, align 4
+ %tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
+ %tmp2 = insertelement <4 x float> %tmp1, float %tmp, i32 1
+ %tmp3 = insertelement <4 x float> %tmp2, float %tmp, i32 2
+ %tmp4 = insertelement <4 x float> %tmp3, float %tmp, i32 3
+ ret <4 x float> %tmp4
+}
+
+define <2 x float> @ld1r_2s_float(float* nocapture %x) {
+entry:
+; CHECK-LABEL: ld1r_2s_float
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.2s { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp = load float* %x, align 4
+ %tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
+ %tmp2 = insertelement <2 x float> %tmp1, float %tmp, i32 1
+ ret <2 x float> %tmp2
+}
+
+define <2 x double> @ld1r_2d_double(double* nocapture %x) {
+entry:
+; CHECK-LABEL: ld1r_2d_double
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.2d { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp = load double* %x, align 4
+ %tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
+ %tmp2 = insertelement <2 x double> %tmp1, double %tmp, i32 1
+ ret <2 x double> %tmp2
+}
+
+define <1 x double> @ld1r_1d_double(double* nocapture %x) {
+entry:
+; CHECK-LABEL: ld1r_1d_double
+; Make sure we are using the operands defined by the ABI
+; CHECK: ldr d0, [x0]
+; CHECK-NEXT ret
+ %tmp = load double* %x, align 4
+ %tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
+ ret <1 x double> %tmp1
+}
+
+define <4 x float> @ld1r_4s_float_shuff(float* nocapture %x) {
+entry:
+; CHECK-LABEL: ld1r_4s_float_shuff
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.4s { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp = load float* %x, align 4
+ %tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
+ %lane = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
+ ret <4 x float> %lane
+}
+
+define <2 x float> @ld1r_2s_float_shuff(float* nocapture %x) {
+entry:
+; CHECK-LABEL: ld1r_2s_float_shuff
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.2s { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp = load float* %x, align 4
+ %tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
+ %lane = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
+ ret <2 x float> %lane
+}
+
+define <2 x double> @ld1r_2d_double_shuff(double* nocapture %x) {
+entry:
+; CHECK-LABEL: ld1r_2d_double_shuff
+; Make sure we are using the operands defined by the ABI
+; CHECK: ld1r.2d { v0 }, [x0]
+; CHECK-NEXT ret
+ %tmp = load double* %x, align 4
+ %tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
+ %lane = shufflevector <2 x double> %tmp1, <2 x double> undef, <2 x i32> zeroinitializer
+ ret <2 x double> %lane
+}
+
+define <1 x double> @ld1r_1d_double_shuff(double* nocapture %x) {
+entry:
+; CHECK-LABEL: ld1r_1d_double_shuff
+; Make sure we are using the operands defined by the ABI
+; CHECK: ldr d0, [x0]
+; CHECK-NEXT ret
+ %tmp = load double* %x, align 4
+ %tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
+ %lane = shufflevector <1 x double> %tmp1, <1 x double> undef, <1 x i32> zeroinitializer
+ ret <1 x double> %lane
+}
+
+%struct.__neon_float32x2x2_t = type { <2 x float>, <2 x float> }
+%struct.__neon_float32x2x3_t = type { <2 x float>, <2 x float>, <2 x float> }
+%struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
+
+declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float*) nounwind readonly
+declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double*) nounwind readonly
+
+define %struct.__neon_int8x8x2_t @ld1_x2_v8i8(i8* %addr) {
+; CHECK-LABEL: ld1_x2_v8i8:
+; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8* %addr)
+ ret %struct.__neon_int8x8x2_t %val
+}
+
+define %struct.__neon_int16x4x2_t @ld1_x2_v4i16(i16* %addr) {
+; CHECK-LABEL: ld1_x2_v4i16:
+; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* %addr)
+ ret %struct.__neon_int16x4x2_t %val
+}
+
+define %struct.__neon_int32x2x2_t @ld1_x2_v2i32(i32* %addr) {
+; CHECK-LABEL: ld1_x2_v2i32:
+; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32* %addr)
+ ret %struct.__neon_int32x2x2_t %val
+}
+
+define %struct.__neon_float32x2x2_t @ld1_x2_v2f32(float* %addr) {
+; CHECK-LABEL: ld1_x2_v2f32:
+; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %addr)
+ ret %struct.__neon_float32x2x2_t %val
+}
+
+define %struct.__neon_int64x1x2_t @ld1_x2_v1i64(i64* %addr) {
+; CHECK-LABEL: ld1_x2_v1i64:
+; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64* %addr)
+ ret %struct.__neon_int64x1x2_t %val
+}
+
+define %struct.__neon_float64x1x2_t @ld1_x2_v1f64(double* %addr) {
+; CHECK-LABEL: ld1_x2_v1f64:
+; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double* %addr)
+ ret %struct.__neon_float64x1x2_t %val
+}
+
+
+%struct.__neon_float32x4x2_t = type { <4 x float>, <4 x float> }
+%struct.__neon_float32x4x3_t = type { <4 x float>, <4 x float>, <4 x float> }
+%struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
+
+%struct.__neon_float64x2x2_t = type { <2 x double>, <2 x double> }
+%struct.__neon_float64x2x3_t = type { <2 x double>, <2 x double>, <2 x double> }
+%struct.__neon_float64x2x4_t = type { <2 x double>, <2 x double>, <2 x double>, <2 x double> }
+
+declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_float32x4x2_t @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float*) nounwind readonly
+declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_float64x2x2_t @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double*) nounwind readonly
+
+define %struct.__neon_int8x16x2_t @ld1_x2_v16i8(i8* %addr) {
+; CHECK-LABEL: ld1_x2_v16i8:
+; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8* %addr)
+ ret %struct.__neon_int8x16x2_t %val
+}
+
+define %struct.__neon_int16x8x2_t @ld1_x2_v8i16(i16* %addr) {
+; CHECK-LABEL: ld1_x2_v8i16:
+; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* %addr)
+ ret %struct.__neon_int16x8x2_t %val
+}
+
+define %struct.__neon_int32x4x2_t @ld1_x2_v4i32(i32* %addr) {
+; CHECK-LABEL: ld1_x2_v4i32:
+; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32* %addr)
+ ret %struct.__neon_int32x4x2_t %val
+}
+
+define %struct.__neon_float32x4x2_t @ld1_x2_v4f32(float* %addr) {
+; CHECK-LABEL: ld1_x2_v4f32:
+; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float32x4x2_t @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float* %addr)
+ ret %struct.__neon_float32x4x2_t %val
+}
+
+define %struct.__neon_int64x2x2_t @ld1_x2_v2i64(i64* %addr) {
+; CHECK-LABEL: ld1_x2_v2i64:
+; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64* %addr)
+ ret %struct.__neon_int64x2x2_t %val
+}
+
+define %struct.__neon_float64x2x2_t @ld1_x2_v2f64(double* %addr) {
+; CHECK-LABEL: ld1_x2_v2f64:
+; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float64x2x2_t @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double* %addr)
+ ret %struct.__neon_float64x2x2_t %val
+}
+
+declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float*) nounwind readonly
+declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double*) nounwind readonly
+
+define %struct.__neon_int8x8x3_t @ld1_x3_v8i8(i8* %addr) {
+; CHECK-LABEL: ld1_x3_v8i8:
+; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8* %addr)
+ ret %struct.__neon_int8x8x3_t %val
+}
+
+define %struct.__neon_int16x4x3_t @ld1_x3_v4i16(i16* %addr) {
+; CHECK-LABEL: ld1_x3_v4i16:
+; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* %addr)
+ ret %struct.__neon_int16x4x3_t %val
+}
+
+define %struct.__neon_int32x2x3_t @ld1_x3_v2i32(i32* %addr) {
+; CHECK-LABEL: ld1_x3_v2i32:
+; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32* %addr)
+ ret %struct.__neon_int32x2x3_t %val
+}
+
+define %struct.__neon_float32x2x3_t @ld1_x3_v2f32(float* %addr) {
+; CHECK-LABEL: ld1_x3_v2f32:
+; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %addr)
+ ret %struct.__neon_float32x2x3_t %val
+}
+
+define %struct.__neon_int64x1x3_t @ld1_x3_v1i64(i64* %addr) {
+; CHECK-LABEL: ld1_x3_v1i64:
+; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64* %addr)
+ ret %struct.__neon_int64x1x3_t %val
+}
+
+define %struct.__neon_float64x1x3_t @ld1_x3_v1f64(double* %addr) {
+; CHECK-LABEL: ld1_x3_v1f64:
+; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double* %addr)
+ ret %struct.__neon_float64x1x3_t %val
+}
+
+declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_float32x4x3_t @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float*) nounwind readonly
+declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_float64x2x3_t @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double*) nounwind readonly
+
+define %struct.__neon_int8x16x3_t @ld1_x3_v16i8(i8* %addr) {
+; CHECK-LABEL: ld1_x3_v16i8:
+; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8* %addr)
+ ret %struct.__neon_int8x16x3_t %val
+}
+
+define %struct.__neon_int16x8x3_t @ld1_x3_v8i16(i16* %addr) {
+; CHECK-LABEL: ld1_x3_v8i16:
+; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* %addr)
+ ret %struct.__neon_int16x8x3_t %val
+}
+
+define %struct.__neon_int32x4x3_t @ld1_x3_v4i32(i32* %addr) {
+; CHECK-LABEL: ld1_x3_v4i32:
+; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32* %addr)
+ ret %struct.__neon_int32x4x3_t %val
+}
+
+define %struct.__neon_float32x4x3_t @ld1_x3_v4f32(float* %addr) {
+; CHECK-LABEL: ld1_x3_v4f32:
+; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float32x4x3_t @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float* %addr)
+ ret %struct.__neon_float32x4x3_t %val
+}
+
+define %struct.__neon_int64x2x3_t @ld1_x3_v2i64(i64* %addr) {
+; CHECK-LABEL: ld1_x3_v2i64:
+; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64* %addr)
+ ret %struct.__neon_int64x2x3_t %val
+}
+
+define %struct.__neon_float64x2x3_t @ld1_x3_v2f64(double* %addr) {
+; CHECK-LABEL: ld1_x3_v2f64:
+; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float64x2x3_t @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double* %addr)
+ ret %struct.__neon_float64x2x3_t %val
+}
+
+declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float*) nounwind readonly
+declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double*) nounwind readonly
+
+define %struct.__neon_int8x8x4_t @ld1_x4_v8i8(i8* %addr) {
+; CHECK-LABEL: ld1_x4_v8i8:
+; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8* %addr)
+ ret %struct.__neon_int8x8x4_t %val
+}
+
+define %struct.__neon_int16x4x4_t @ld1_x4_v4i16(i16* %addr) {
+; CHECK-LABEL: ld1_x4_v4i16:
+; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* %addr)
+ ret %struct.__neon_int16x4x4_t %val
+}
+
+define %struct.__neon_int32x2x4_t @ld1_x4_v2i32(i32* %addr) {
+; CHECK-LABEL: ld1_x4_v2i32:
+; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32* %addr)
+ ret %struct.__neon_int32x2x4_t %val
+}
+
+define %struct.__neon_float32x2x4_t @ld1_x4_v2f32(float* %addr) {
+; CHECK-LABEL: ld1_x4_v2f32:
+; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %addr)
+ ret %struct.__neon_float32x2x4_t %val
+}
+
+define %struct.__neon_int64x1x4_t @ld1_x4_v1i64(i64* %addr) {
+; CHECK-LABEL: ld1_x4_v1i64:
+; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64* %addr)
+ ret %struct.__neon_int64x1x4_t %val
+}
+
+define %struct.__neon_float64x1x4_t @ld1_x4_v1f64(double* %addr) {
+; CHECK-LABEL: ld1_x4_v1f64:
+; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double* %addr)
+ ret %struct.__neon_float64x1x4_t %val
+}
+
+declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_float32x4x4_t @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float*) nounwind readonly
+declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_float64x2x4_t @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double*) nounwind readonly
+
+define %struct.__neon_int8x16x4_t @ld1_x4_v16i8(i8* %addr) {
+; CHECK-LABEL: ld1_x4_v16i8:
+; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8* %addr)
+ ret %struct.__neon_int8x16x4_t %val
+}
+
+define %struct.__neon_int16x8x4_t @ld1_x4_v8i16(i16* %addr) {
+; CHECK-LABEL: ld1_x4_v8i16:
+; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* %addr)
+ ret %struct.__neon_int16x8x4_t %val
+}
+
+define %struct.__neon_int32x4x4_t @ld1_x4_v4i32(i32* %addr) {
+; CHECK-LABEL: ld1_x4_v4i32:
+; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32* %addr)
+ ret %struct.__neon_int32x4x4_t %val
+}
+
+define %struct.__neon_float32x4x4_t @ld1_x4_v4f32(float* %addr) {
+; CHECK-LABEL: ld1_x4_v4f32:
+; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float32x4x4_t @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float* %addr)
+ ret %struct.__neon_float32x4x4_t %val
+}
+
+define %struct.__neon_int64x2x4_t @ld1_x4_v2i64(i64* %addr) {
+; CHECK-LABEL: ld1_x4_v2i64:
+; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64* %addr)
+ ret %struct.__neon_int64x2x4_t %val
+}
+
+define %struct.__neon_float64x2x4_t @ld1_x4_v2f64(double* %addr) {
+; CHECK-LABEL: ld1_x4_v2f64:
+; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ %val = call %struct.__neon_float64x2x4_t @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double* %addr)
+ ret %struct.__neon_float64x2x4_t %val
+}
diff --git a/test/CodeGen/AArch64/arm64-ldp.ll b/test/CodeGen/AArch64/arm64-ldp.ll
new file mode 100644
index 000000000000..5a986261b31b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-ldp.ll
@@ -0,0 +1,149 @@
+; RUN: llc < %s -march=arm64 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=arm64 -aarch64-unscaled-mem-op=true\
+; RUN: -verify-machineinstrs | FileCheck -check-prefix=LDUR_CHK %s
+
+; CHECK: ldp_int
+; CHECK: ldp
+define i32 @ldp_int(i32* %p) nounwind {
+ %tmp = load i32* %p, align 4
+ %add.ptr = getelementptr inbounds i32* %p, i64 1
+ %tmp1 = load i32* %add.ptr, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ ret i32 %add
+}
+
+; CHECK: ldp_long
+; CHECK: ldp
+define i64 @ldp_long(i64* %p) nounwind {
+ %tmp = load i64* %p, align 8
+ %add.ptr = getelementptr inbounds i64* %p, i64 1
+ %tmp1 = load i64* %add.ptr, align 8
+ %add = add nsw i64 %tmp1, %tmp
+ ret i64 %add
+}
+
+; CHECK: ldp_float
+; CHECK: ldp
+define float @ldp_float(float* %p) nounwind {
+ %tmp = load float* %p, align 4
+ %add.ptr = getelementptr inbounds float* %p, i64 1
+ %tmp1 = load float* %add.ptr, align 4
+ %add = fadd float %tmp, %tmp1
+ ret float %add
+}
+
+; CHECK: ldp_double
+; CHECK: ldp
+define double @ldp_double(double* %p) nounwind {
+ %tmp = load double* %p, align 8
+ %add.ptr = getelementptr inbounds double* %p, i64 1
+ %tmp1 = load double* %add.ptr, align 8
+ %add = fadd double %tmp, %tmp1
+ ret double %add
+}
+
+; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
+define i32 @ldur_int(i32* %a) nounwind {
+; LDUR_CHK: ldur_int
+; LDUR_CHK: ldp [[DST1:w[0-9]+]], [[DST2:w[0-9]+]], [x0, #-8]
+; LDUR_CHK-NEXT: add w{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i32* %a, i32 -1
+ %tmp1 = load i32* %p1, align 2
+ %p2 = getelementptr inbounds i32* %a, i32 -2
+ %tmp2 = load i32* %p2, align 2
+ %tmp3 = add i32 %tmp1, %tmp2
+ ret i32 %tmp3
+}
+
+define i64 @ldur_long(i64* %a) nounwind ssp {
+; LDUR_CHK: ldur_long
+; LDUR_CHK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-16]
+; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i64* %a, i64 -1
+ %tmp1 = load i64* %p1, align 2
+ %p2 = getelementptr inbounds i64* %a, i64 -2
+ %tmp2 = load i64* %p2, align 2
+ %tmp3 = add i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
+
+define float @ldur_float(float* %a) {
+; LDUR_CHK: ldur_float
+; LDUR_CHK: ldp [[DST1:s[0-9]+]], [[DST2:s[0-9]+]], [x0, #-8]
+; LDUR_CHK-NEXT: add s{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds float* %a, i64 -1
+ %tmp1 = load float* %p1, align 2
+ %p2 = getelementptr inbounds float* %a, i64 -2
+ %tmp2 = load float* %p2, align 2
+ %tmp3 = fadd float %tmp1, %tmp2
+ ret float %tmp3
+}
+
+define double @ldur_double(double* %a) {
+; LDUR_CHK: ldur_double
+; LDUR_CHK: ldp [[DST1:d[0-9]+]], [[DST2:d[0-9]+]], [x0, #-16]
+; LDUR_CHK-NEXT: add d{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds double* %a, i64 -1
+ %tmp1 = load double* %p1, align 2
+ %p2 = getelementptr inbounds double* %a, i64 -2
+ %tmp2 = load double* %p2, align 2
+ %tmp3 = fadd double %tmp1, %tmp2
+ ret double %tmp3
+}
+
+; Now check some boundary conditions
+define i64 @pairUpBarelyIn(i64* %a) nounwind ssp {
+; LDUR_CHK: pairUpBarelyIn
+; LDUR_CHK-NOT: ldur
+; LDUR_CHK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
+; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i64* %a, i64 -31
+ %tmp1 = load i64* %p1, align 2
+ %p2 = getelementptr inbounds i64* %a, i64 -32
+ %tmp2 = load i64* %p2, align 2
+ %tmp3 = add i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
+
+define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
+; LDUR_CHK: pairUpBarelyOut
+; LDUR_CHK-NOT: ldp
+; Don't be fragile about which loads or manipulations of the base register
+; are used---just check that there isn't an ldp before the add
+; LDUR_CHK: add
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i64* %a, i64 -32
+ %tmp1 = load i64* %p1, align 2
+ %p2 = getelementptr inbounds i64* %a, i64 -33
+ %tmp2 = load i64* %p2, align 2
+ %tmp3 = add i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
+
+define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
+; LDUR_CHK: pairUpNotAligned
+; LDUR_CHK-NOT: ldp
+; LDUR_CHK: ldur
+; LDUR_CHK-NEXT: ldur
+; LDUR_CHK-NEXT: add
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i64* %a, i64 -18
+ %bp1 = bitcast i64* %p1 to i8*
+ %bp1p1 = getelementptr inbounds i8* %bp1, i64 1
+ %dp1 = bitcast i8* %bp1p1 to i64*
+ %tmp1 = load i64* %dp1, align 1
+
+ %p2 = getelementptr inbounds i64* %a, i64 -17
+ %bp2 = bitcast i64* %p2 to i8*
+ %bp2p1 = getelementptr inbounds i8* %bp2, i64 1
+ %dp2 = bitcast i8* %bp2p1 to i64*
+ %tmp2 = load i64* %dp2, align 1
+
+ %tmp3 = add i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
diff --git a/test/CodeGen/AArch64/arm64-ldur.ll b/test/CodeGen/AArch64/arm64-ldur.ll
new file mode 100644
index 000000000000..2848c06f9bb0
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-ldur.ll
@@ -0,0 +1,67 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define i64 @_f0(i64* %p) {
+; CHECK: f0:
+; CHECK: ldur x0, [x0, #-8]
+; CHECK-NEXT: ret
+ %tmp = getelementptr inbounds i64* %p, i64 -1
+ %ret = load i64* %tmp, align 2
+ ret i64 %ret
+}
+define i32 @_f1(i32* %p) {
+; CHECK: f1:
+; CHECK: ldur w0, [x0, #-4]
+; CHECK-NEXT: ret
+ %tmp = getelementptr inbounds i32* %p, i64 -1
+ %ret = load i32* %tmp, align 2
+ ret i32 %ret
+}
+define i16 @_f2(i16* %p) {
+; CHECK: f2:
+; CHECK: ldurh w0, [x0, #-2]
+; CHECK-NEXT: ret
+ %tmp = getelementptr inbounds i16* %p, i64 -1
+ %ret = load i16* %tmp, align 2
+ ret i16 %ret
+}
+define i8 @_f3(i8* %p) {
+; CHECK: f3:
+; CHECK: ldurb w0, [x0, #-1]
+; CHECK-NEXT: ret
+ %tmp = getelementptr inbounds i8* %p, i64 -1
+ %ret = load i8* %tmp, align 2
+ ret i8 %ret
+}
+
+define i64 @zext32(i8* %a) nounwind ssp {
+; CHECK-LABEL: zext32:
+; CHECK: ldur w0, [x0, #-12]
+; CHECK-NEXT: ret
+ %p = getelementptr inbounds i8* %a, i64 -12
+ %tmp1 = bitcast i8* %p to i32*
+ %tmp2 = load i32* %tmp1, align 4
+ %ret = zext i32 %tmp2 to i64
+
+ ret i64 %ret
+}
+define i64 @zext16(i8* %a) nounwind ssp {
+; CHECK-LABEL: zext16:
+; CHECK: ldurh w0, [x0, #-12]
+; CHECK-NEXT: ret
+ %p = getelementptr inbounds i8* %a, i64 -12
+ %tmp1 = bitcast i8* %p to i16*
+ %tmp2 = load i16* %tmp1, align 2
+ %ret = zext i16 %tmp2 to i64
+
+ ret i64 %ret
+}
+define i64 @zext8(i8* %a) nounwind ssp {
+; CHECK-LABEL: zext8:
+; CHECK: ldurb w0, [x0, #-12]
+; CHECK-NEXT: ret
+ %p = getelementptr inbounds i8* %a, i64 -12
+ %tmp2 = load i8* %p, align 1
+ %ret = zext i8 %tmp2 to i64
+
+ ret i64 %ret
+}
diff --git a/test/CodeGen/AArch64/arm64-ldxr-stxr.ll b/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
new file mode 100644
index 000000000000..9093df27cddc
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
@@ -0,0 +1,270 @@
+; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s
+
+%0 = type { i64, i64 }
+
+define i128 @f0(i8* %p) nounwind readonly {
+; CHECK-LABEL: f0:
+; CHECK: ldxp {{x[0-9]+}}, {{x[0-9]+}}, [x0]
+entry:
+ %ldrexd = tail call %0 @llvm.aarch64.ldxp(i8* %p)
+ %0 = extractvalue %0 %ldrexd, 1
+ %1 = extractvalue %0 %ldrexd, 0
+ %2 = zext i64 %0 to i128
+ %3 = zext i64 %1 to i128
+ %shl = shl nuw i128 %2, 64
+ %4 = or i128 %shl, %3
+ ret i128 %4
+}
+
+define i32 @f1(i8* %ptr, i128 %val) nounwind {
+; CHECK-LABEL: f1:
+; CHECK: stxp {{w[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, [x0]
+entry:
+ %tmp4 = trunc i128 %val to i64
+ %tmp6 = lshr i128 %val, 64
+ %tmp7 = trunc i128 %tmp6 to i64
+ %strexd = tail call i32 @llvm.aarch64.stxp(i64 %tmp4, i64 %tmp7, i8* %ptr)
+ ret i32 %strexd
+}
+
+declare %0 @llvm.aarch64.ldxp(i8*) nounwind
+declare i32 @llvm.aarch64.stxp(i64, i64, i8*) nounwind
+
+@var = global i64 0, align 8
+
+define void @test_load_i8(i8* %addr) {
+; CHECK-LABEL: test_load_i8:
+; CHECK: ldxrb w[[LOADVAL:[0-9]+]], [x0]
+; CHECK-NOT: uxtb
+; CHECK-NOT: and
+; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
+
+ %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
+ %shortval = trunc i64 %val to i8
+ %extval = zext i8 %shortval to i64
+ store i64 %extval, i64* @var, align 8
+ ret void
+}
+
+define void @test_load_i16(i16* %addr) {
+; CHECK-LABEL: test_load_i16:
+; CHECK: ldxrh w[[LOADVAL:[0-9]+]], [x0]
+; CHECK-NOT: uxth
+; CHECK-NOT: and
+; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
+
+ %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* %addr)
+ %shortval = trunc i64 %val to i16
+ %extval = zext i16 %shortval to i64
+ store i64 %extval, i64* @var, align 8
+ ret void
+}
+
+define void @test_load_i32(i32* %addr) {
+; CHECK-LABEL: test_load_i32:
+; CHECK: ldxr w[[LOADVAL:[0-9]+]], [x0]
+; CHECK-NOT: uxtw
+; CHECK-NOT: and
+; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
+
+ %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
+ %shortval = trunc i64 %val to i32
+ %extval = zext i32 %shortval to i64
+ store i64 %extval, i64* @var, align 8
+ ret void
+}
+
+define void @test_load_i64(i64* %addr) {
+; CHECK-LABEL: test_load_i64:
+; CHECK: ldxr x[[LOADVAL:[0-9]+]], [x0]
+; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
+
+ %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* %addr)
+ store i64 %val, i64* @var, align 8
+ ret void
+}
+
+
+declare i64 @llvm.aarch64.ldxr.p0i8(i8*) nounwind
+declare i64 @llvm.aarch64.ldxr.p0i16(i16*) nounwind
+declare i64 @llvm.aarch64.ldxr.p0i32(i32*) nounwind
+declare i64 @llvm.aarch64.ldxr.p0i64(i64*) nounwind
+
+define i32 @test_store_i8(i32, i8 %val, i8* %addr) {
+; CHECK-LABEL: test_store_i8:
+; CHECK-NOT: uxtb
+; CHECK-NOT: and
+; CHECK: stxrb w0, w1, [x2]
+ %extval = zext i8 %val to i64
+ %res = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* %addr)
+ ret i32 %res
+}
+
+define i32 @test_store_i16(i32, i16 %val, i16* %addr) {
+; CHECK-LABEL: test_store_i16:
+; CHECK-NOT: uxth
+; CHECK-NOT: and
+; CHECK: stxrh w0, w1, [x2]
+ %extval = zext i16 %val to i64
+ %res = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* %addr)
+ ret i32 %res
+}
+
+define i32 @test_store_i32(i32, i32 %val, i32* %addr) {
+; CHECK-LABEL: test_store_i32:
+; CHECK-NOT: uxtw
+; CHECK-NOT: and
+; CHECK: stxr w0, w1, [x2]
+ %extval = zext i32 %val to i64
+ %res = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* %addr)
+ ret i32 %res
+}
+
+define i32 @test_store_i64(i32, i64 %val, i64* %addr) {
+; CHECK-LABEL: test_store_i64:
+; CHECK: stxr w0, x1, [x2]
+ %res = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* %addr)
+ ret i32 %res
+}
+
+declare i32 @llvm.aarch64.stxr.p0i8(i64, i8*) nounwind
+declare i32 @llvm.aarch64.stxr.p0i16(i64, i16*) nounwind
+declare i32 @llvm.aarch64.stxr.p0i32(i64, i32*) nounwind
+declare i32 @llvm.aarch64.stxr.p0i64(i64, i64*) nounwind
+
+; CHECK: test_clear:
+; CHECK: clrex
+define void @test_clear() {
+ call void @llvm.aarch64.clrex()
+ ret void
+}
+
+declare void @llvm.aarch64.clrex() nounwind
+
+define i128 @test_load_acquire_i128(i8* %p) nounwind readonly {
+; CHECK-LABEL: test_load_acquire_i128:
+; CHECK: ldaxp {{x[0-9]+}}, {{x[0-9]+}}, [x0]
+entry:
+ %ldrexd = tail call %0 @llvm.aarch64.ldaxp(i8* %p)
+ %0 = extractvalue %0 %ldrexd, 1
+ %1 = extractvalue %0 %ldrexd, 0
+ %2 = zext i64 %0 to i128
+ %3 = zext i64 %1 to i128
+ %shl = shl nuw i128 %2, 64
+ %4 = or i128 %shl, %3
+ ret i128 %4
+}
+
+define i32 @test_store_release_i128(i8* %ptr, i128 %val) nounwind {
+; CHECK-LABEL: test_store_release_i128:
+; CHECK: stlxp {{w[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, [x0]
+entry:
+ %tmp4 = trunc i128 %val to i64
+ %tmp6 = lshr i128 %val, 64
+ %tmp7 = trunc i128 %tmp6 to i64
+ %strexd = tail call i32 @llvm.aarch64.stlxp(i64 %tmp4, i64 %tmp7, i8* %ptr)
+ ret i32 %strexd
+}
+
+declare %0 @llvm.aarch64.ldaxp(i8*) nounwind
+declare i32 @llvm.aarch64.stlxp(i64, i64, i8*) nounwind
+
+define void @test_load_acquire_i8(i8* %addr) {
+; CHECK-LABEL: test_load_acquire_i8:
+; CHECK: ldaxrb w[[LOADVAL:[0-9]+]], [x0]
+; CHECK-NOT: uxtb
+; CHECK-NOT: and
+; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
+
+ %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* %addr)
+ %shortval = trunc i64 %val to i8
+ %extval = zext i8 %shortval to i64
+ store i64 %extval, i64* @var, align 8
+ ret void
+}
+
+define void @test_load_acquire_i16(i16* %addr) {
+; CHECK-LABEL: test_load_acquire_i16:
+; CHECK: ldaxrh w[[LOADVAL:[0-9]+]], [x0]
+; CHECK-NOT: uxth
+; CHECK-NOT: and
+; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
+
+ %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* %addr)
+ %shortval = trunc i64 %val to i16
+ %extval = zext i16 %shortval to i64
+ store i64 %extval, i64* @var, align 8
+ ret void
+}
+
+define void @test_load_acquire_i32(i32* %addr) {
+; CHECK-LABEL: test_load_acquire_i32:
+; CHECK: ldaxr w[[LOADVAL:[0-9]+]], [x0]
+; CHECK-NOT: uxtw
+; CHECK-NOT: and
+; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
+
+ %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %addr)
+ %shortval = trunc i64 %val to i32
+ %extval = zext i32 %shortval to i64
+ store i64 %extval, i64* @var, align 8
+ ret void
+}
+
+define void @test_load_acquire_i64(i64* %addr) {
+; CHECK-LABEL: test_load_acquire_i64:
+; CHECK: ldaxr x[[LOADVAL:[0-9]+]], [x0]
+; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
+
+ %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* %addr)
+ store i64 %val, i64* @var, align 8
+ ret void
+}
+
+
+declare i64 @llvm.aarch64.ldaxr.p0i8(i8*) nounwind
+declare i64 @llvm.aarch64.ldaxr.p0i16(i16*) nounwind
+declare i64 @llvm.aarch64.ldaxr.p0i32(i32*) nounwind
+declare i64 @llvm.aarch64.ldaxr.p0i64(i64*) nounwind
+
+define i32 @test_store_release_i8(i32, i8 %val, i8* %addr) {
+; CHECK-LABEL: test_store_release_i8:
+; CHECK-NOT: uxtb
+; CHECK-NOT: and
+; CHECK: stlxrb w0, w1, [x2]
+ %extval = zext i8 %val to i64
+ %res = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* %addr)
+ ret i32 %res
+}
+
+define i32 @test_store_release_i16(i32, i16 %val, i16* %addr) {
+; CHECK-LABEL: test_store_release_i16:
+; CHECK-NOT: uxth
+; CHECK-NOT: and
+; CHECK: stlxrh w0, w1, [x2]
+ %extval = zext i16 %val to i64
+ %res = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* %addr)
+ ret i32 %res
+}
+
+define i32 @test_store_release_i32(i32, i32 %val, i32* %addr) {
+; CHECK-LABEL: test_store_release_i32:
+; CHECK-NOT: uxtw
+; CHECK-NOT: and
+; CHECK: stlxr w0, w1, [x2]
+ %extval = zext i32 %val to i64
+ %res = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* %addr)
+ ret i32 %res
+}
+
+define i32 @test_store_release_i64(i32, i64 %val, i64* %addr) {
+; CHECK-LABEL: test_store_release_i64:
+; CHECK: stlxr w0, x1, [x2]
+ %res = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* %addr)
+ ret i32 %res
+}
+
+declare i32 @llvm.aarch64.stlxr.p0i8(i64, i8*) nounwind
+declare i32 @llvm.aarch64.stlxr.p0i16(i64, i16*) nounwind
+declare i32 @llvm.aarch64.stlxr.p0i32(i64, i32*) nounwind
+declare i32 @llvm.aarch64.stlxr.p0i64(i64, i64*) nounwind
diff --git a/test/CodeGen/AArch64/arm64-leaf.ll b/test/CodeGen/AArch64/arm64-leaf.ll
new file mode 100644
index 000000000000..d3b2031686e8
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-leaf.ll
@@ -0,0 +1,13 @@
+; RUN: llc -march=arm64 -mtriple=arm64-apple-ios < %s | FileCheck %s
+; rdar://12829704
+
+define void @t8() nounwind ssp {
+; CHECK-LABEL: t8:
+; CHECK-NOT: stp fp, lr, [sp, #-16]!
+; CHECK-NOT: mov fp, sp
+; CHECK: nop
+; CHECK-NOT: mov sp, fp
+; CHECK-NOT: ldp fp, lr, [sp], #16
+ tail call void asm sideeffect "nop", "~{v8}"() nounwind
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-long-shift.ll b/test/CodeGen/AArch64/arm64-long-shift.ll
new file mode 100644
index 000000000000..d5baf16bdd5c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-long-shift.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s
+
+define i128 @shl(i128 %r, i128 %s) nounwind readnone {
+; CHECK-LABEL: shl:
+; CHECK: lsl [[XREG_0:x[0-9]+]], x1, x2
+; CHECK-NEXT: orr w[[XREG_1:[0-9]+]], wzr, #0x40
+; CHECK-NEXT: sub [[XREG_2:x[0-9]+]], x[[XREG_1]], x2
+; CHECK-NEXT: lsr [[XREG_3:x[0-9]+]], x0, [[XREG_2]]
+; CHECK-NEXT: orr [[XREG_6:x[0-9]+]], [[XREG_3]], [[XREG_0]]
+; CHECK-NEXT: sub [[XREG_4:x[0-9]+]], x2, #64
+; CHECK-NEXT: lsl [[XREG_5:x[0-9]+]], x0, [[XREG_4]]
+; CHECK-NEXT: cmp [[XREG_4]], #0
+; CHECK-NEXT: csel x1, [[XREG_5]], [[XREG_6]], ge
+; CHECK-NEXT: lsl [[SMALLSHIFT_LO:x[0-9]+]], x0, x2
+; CHECK-NEXT: csel x0, xzr, [[SMALLSHIFT_LO]], ge
+; CHECK-NEXT: ret
+
+ %shl = shl i128 %r, %s
+ ret i128 %shl
+}
+
+define i128 @ashr(i128 %r, i128 %s) nounwind readnone {
+; CHECK-LABEL: ashr:
+; CHECK: lsr [[XREG_0:x[0-9]+]], x0, x2
+; CHECK-NEXT: orr w[[XREG_1:[0-9]+]], wzr, #0x40
+; CHECK-NEXT: sub [[XREG_2:x[0-9]+]], x[[XREG_1]], x2
+; CHECK-NEXT: lsl [[XREG_3:x[0-9]+]], x1, [[XREG_2]]
+; CHECK-NEXT: orr [[XREG_4:x[0-9]+]], [[XREG_0]], [[XREG_3]]
+; CHECK-NEXT: sub [[XREG_5:x[0-9]+]], x2, #64
+; CHECK-NEXT: asr [[XREG_6:x[0-9]+]], x1, [[XREG_5]]
+; CHECK-NEXT: cmp [[XREG_5]], #0
+; CHECK-NEXT: csel x0, [[XREG_6]], [[XREG_4]], ge
+; CHECK-NEXT: asr [[SMALLSHIFT_HI:x[0-9]+]], x1, x2
+; CHECK-NEXT: asr [[BIGSHIFT_HI:x[0-9]+]], x1, #63
+; CHECK-NEXT: csel x1, [[BIGSHIFT_HI]], [[SMALLSHIFT_HI]], ge
+; CHECK-NEXT: ret
+
+ %shr = ashr i128 %r, %s
+ ret i128 %shr
+}
+
+define i128 @lshr(i128 %r, i128 %s) nounwind readnone {
+; CHECK-LABEL: lshr:
+; CHECK: lsr [[XREG_0:x[0-9]+]], x0, x2
+; CHECK-NEXT: orr w[[XREG_1:[0-9]+]], wzr, #0x40
+; CHECK-NEXT: sub [[XREG_2:x[0-9]+]], x[[XREG_1]], x2
+; CHECK-NEXT: lsl [[XREG_3:x[0-9]+]], x1, [[XREG_2]]
+; CHECK-NEXT: orr [[XREG_4:x[0-9]+]], [[XREG_0]], [[XREG_3]]
+; CHECK-NEXT: sub [[XREG_5:x[0-9]+]], x2, #64
+; CHECK-NEXT: lsr [[XREG_6:x[0-9]+]], x1, [[XREG_5]]
+; CHECK-NEXT: cmp [[XREG_5]], #0
+; CHECK-NEXT: csel x0, [[XREG_6]], [[XREG_4]], ge
+; CHECK-NEXT: lsr [[SMALLSHIFT_HI:x[0-9]+]], x1, x2
+; CHECK-NEXT: csel x1, xzr, [[SMALLSHIFT_HI]], ge
+; CHECK-NEXT: ret
+
+ %shr = lshr i128 %r, %s
+ ret i128 %shr
+}
diff --git a/test/CodeGen/AArch64/arm64-memcpy-inline.ll b/test/CodeGen/AArch64/arm64-memcpy-inline.ll
new file mode 100644
index 000000000000..f921a592451d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-memcpy-inline.ll
@@ -0,0 +1,112 @@
+; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s
+
+%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
+
+@src = external global %struct.x
+@dst = external global %struct.x
+
+@.str1 = private unnamed_addr constant [31 x i8] c"DHRYSTONE PROGRAM, SOME STRING\00", align 1
+@.str2 = private unnamed_addr constant [36 x i8] c"DHRYSTONE PROGRAM, SOME STRING BLAH\00", align 1
+@.str3 = private unnamed_addr constant [24 x i8] c"DHRYSTONE PROGRAM, SOME\00", align 1
+@.str4 = private unnamed_addr constant [18 x i8] c"DHRYSTONE PROGR \00", align 1
+@.str5 = private unnamed_addr constant [7 x i8] c"DHRYST\00", align 1
+@.str6 = private unnamed_addr constant [14 x i8] c"/tmp/rmXXXXXX\00", align 1
+@spool.splbuf = internal global [512 x i8] zeroinitializer, align 16
+
+define i32 @t0() {
+entry:
+; CHECK-LABEL: t0:
+; CHECK: ldrb [[REG0:w[0-9]+]], [x[[BASEREG:[0-9]+]], #10]
+; CHECK: strb [[REG0]], [x[[BASEREG2:[0-9]+]], #10]
+; CHECK: ldrh [[REG1:w[0-9]+]], [x[[BASEREG]], #8]
+; CHECK: strh [[REG1]], [x[[BASEREG2]], #8]
+; CHECK: ldr [[REG2:x[0-9]+]],
+; CHECK: str [[REG2]],
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.x* @dst, i32 0, i32 0), i8* getelementptr inbounds (%struct.x* @src, i32 0, i32 0), i32 11, i32 8, i1 false)
+ ret i32 0
+}
+
+define void @t1(i8* nocapture %C) nounwind {
+entry:
+; CHECK-LABEL: t1:
+; CHECK: ldur [[DEST:q[0-9]+]], [x[[BASEREG:[0-9]+]], #15]
+; CHECK: stur [[DEST]], [x0, #15]
+; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]]
+; CHECK: str [[DEST]], [x0]
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8]* @.str1, i64 0, i64 0), i64 31, i32 1, i1 false)
+ ret void
+}
+
+define void @t2(i8* nocapture %C) nounwind {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: movz [[REG3:w[0-9]+]]
+; CHECK: movk [[REG3]],
+; CHECK: str [[REG3]], [x0, #32]
+; CHECK: ldp [[DEST1:q[0-9]+]], [[DEST2:q[0-9]+]], [x{{[0-9]+}}]
+; CHECK: stp [[DEST1]], [[DEST2]], [x0]
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false)
+ ret void
+}
+
+define void @t3(i8* nocapture %C) nounwind {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: ldr [[REG4:x[0-9]+]], [x[[BASEREG:[0-9]+]], #16]
+; CHECK: str [[REG4]], [x0, #16]
+; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]]
+; CHECK: str [[DEST]], [x0]
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8]* @.str3, i64 0, i64 0), i64 24, i32 1, i1 false)
+ ret void
+}
+
+define void @t4(i8* nocapture %C) nounwind {
+entry:
+; CHECK-LABEL: t4:
+; CHECK: orr [[REG5:w[0-9]+]], wzr, #0x20
+; CHECK: strh [[REG5]], [x0, #16]
+; CHECK: ldr [[REG6:q[0-9]+]], [x{{[0-9]+}}]
+; CHECK: str [[REG6]], [x0]
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false)
+ ret void
+}
+
+define void @t5(i8* nocapture %C) nounwind {
+entry:
+; CHECK-LABEL: t5:
+; CHECK: strb wzr, [x0, #6]
+; CHECK: movz [[REG7:w[0-9]+]], #0x5453
+; CHECK: strh [[REG7]], [x0, #4]
+; CHECK: movz [[REG8:w[0-9]+]],
+; CHECK: movk [[REG8]],
+; CHECK: str [[REG8]], [x0]
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8]* @.str5, i64 0, i64 0), i64 7, i32 1, i1 false)
+ ret void
+}
+
+define void @t6() nounwind {
+entry:
+; CHECK-LABEL: t6:
+; CHECK: ldur [[REG9:x[0-9]+]], [x{{[0-9]+}}, #6]
+; CHECK: stur [[REG9]], [x{{[0-9]+}}, #6]
+; CHECK: ldr
+; CHECK: str
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8]* @.str6, i64 0, i64 0), i64 14, i32 1, i1 false)
+ ret void
+}
+
+%struct.Foo = type { i32, i32, i32, i32 }
+
+define void @t7(%struct.Foo* nocapture %a, %struct.Foo* nocapture %b) nounwind {
+entry:
+; CHECK: t7
+; CHECK: ldr [[REG10:q[0-9]+]], [x1]
+; CHECK: str [[REG10]], [x0]
+ %0 = bitcast %struct.Foo* %a to i8*
+ %1 = bitcast %struct.Foo* %b to i8*
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 16, i32 4, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
diff --git a/test/CodeGen/AArch64/arm64-memset-inline.ll b/test/CodeGen/AArch64/arm64-memset-inline.ll
new file mode 100644
index 000000000000..2e237f4a882d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-memset-inline.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define void @t1(i8* nocapture %c) nounwind optsize {
+entry:
+; CHECK-LABEL: t1:
+; CHECK: str wzr, [x0, #8]
+; CHECK: str xzr, [x0]
+ call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false)
+ ret void
+}
+
+define void @t2() nounwind ssp {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: strh wzr, [sp, #32]
+; CHECK: stp xzr, xzr, [sp, #16]
+; CHECK: str xzr, [sp, #8]
+ %buf = alloca [26 x i8], align 1
+ %0 = getelementptr inbounds [26 x i8]* %buf, i32 0, i32 0
+ call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false)
+ call void @something(i8* %0) nounwind
+ ret void
+}
+
+declare void @something(i8*) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
diff --git a/test/CodeGen/AArch64/arm64-memset-to-bzero.ll b/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
new file mode 100644
index 000000000000..29036caabf3a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
@@ -0,0 +1,108 @@
+; RUN: llc %s -mtriple=arm64-apple-darwin -o - | \
+; RUN: FileCheck --check-prefix=CHECK-DARWIN --check-prefix=CHECK %s
+; RUN: llc %s -mtriple=arm64-linux-gnu -o - | \
+; RUN: FileCheck --check-prefix=CHECK-LINUX --check-prefix=CHECK %s
+; <rdar://problem/14199482> ARM64: Calls to bzero() replaced with calls to memset()
+
+; CHECK: @fct1
+; For small size (<= 256), we do not change memset to bzero.
+; CHECK: memset
+define void @fct1(i8* nocapture %ptr) {
+entry:
+ tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 256, i32 1, i1 false)
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+
+; CHECK: @fct2
+; When the size is bigger than 256, change into bzero.
+; CHECK-DARWIN: bzero
+; CHECK-LINUX: memset
+define void @fct2(i8* nocapture %ptr) {
+entry:
+ tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 257, i32 1, i1 false)
+ ret void
+}
+
+; CHECK: @fct3
+; For unknown size, change to bzero.
+; CHECK-DARWIN: bzero
+; CHECK-LINUX: memset
+define void @fct3(i8* nocapture %ptr, i32 %unknown) {
+entry:
+ %conv = sext i32 %unknown to i64
+ tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 %conv, i32 1, i1 false)
+ ret void
+}
+
+; CHECK: @fct4
+; Size <= 256, no change.
+; CHECK: memset
+define void @fct4(i8* %ptr) {
+entry:
+ %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
+ %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 256, i64 %tmp)
+ ret void
+}
+
+declare i8* @__memset_chk(i8*, i32, i64, i64)
+
+declare i64 @llvm.objectsize.i64(i8*, i1)
+
+; CHECK: @fct5
+; Size > 256, change.
+; CHECK-DARWIN: bzero
+; CHECK-LINUX: memset
+define void @fct5(i8* %ptr) {
+entry:
+ %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
+ %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 257, i64 %tmp)
+ ret void
+}
+
+; CHECK: @fct6
+; Size = unknown, change.
+; CHECK-DARWIN: bzero
+; CHECK-LINUX: memset
+define void @fct6(i8* %ptr, i32 %unknown) {
+entry:
+ %conv = sext i32 %unknown to i64
+ %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
+ %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 %conv, i64 %tmp)
+ ret void
+}
+
+; Next functions check that memset is not turned into bzero
+; when the set constant is non-zero, whatever the given size.
+
+; CHECK: @fct7
+; memset with something that is not a zero, no change.
+; CHECK: memset
+define void @fct7(i8* %ptr) {
+entry:
+ %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
+ %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 256, i64 %tmp)
+ ret void
+}
+
+; CHECK: @fct8
+; memset with something that is not a zero, no change.
+; CHECK: memset
+define void @fct8(i8* %ptr) {
+entry:
+ %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
+ %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 257, i64 %tmp)
+ ret void
+}
+
+; CHECK: @fct9
+; memset with something that is not a zero, no change.
+; CHECK: memset
+define void @fct9(i8* %ptr, i32 %unknown) {
+entry:
+ %conv = sext i32 %unknown to i64
+ %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
+ %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 %conv, i64 %tmp)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
new file mode 100644
index 000000000000..bc7ed7fbdf83
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
@@ -0,0 +1,203 @@
+; REQUIRES: asserts
+; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a53 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+;
+; The Cortex-A53 machine model will cause the MADD instruction to be scheduled
+; much higher than the ADD instructions in order to hide latency. When not
+; specifying a subtarget, the MADD will remain near the end of the block.
+;
+; CHECK: ********** MI Scheduling **********
+; CHECK: main
+; CHECK: *** Final schedule for BB#2 ***
+; CHECK: MADDWrrr
+; CHECK: ADDWri
+; CHECK: ********** INTERVALS **********
+@main.x = private unnamed_addr constant [8 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 4
+@main.y = private unnamed_addr constant [8 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2], align 4
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ %x = alloca [8 x i32], align 4
+ %y = alloca [8 x i32], align 4
+ %i = alloca i32, align 4
+ %xx = alloca i32, align 4
+ %yy = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = bitcast [8 x i32]* %x to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false)
+ %1 = bitcast [8 x i32]* %y to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false)
+ store i32 0, i32* %xx, align 4
+ store i32 0, i32* %yy, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %2 = load i32* %i, align 4
+ %cmp = icmp slt i32 %2, 8
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %3 = load i32* %i, align 4
+ %idxprom = sext i32 %3 to i64
+ %arrayidx = getelementptr inbounds [8 x i32]* %x, i32 0, i64 %idxprom
+ %4 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %4, 1
+ store i32 %add, i32* %xx, align 4
+ %5 = load i32* %xx, align 4
+ %add1 = add nsw i32 %5, 12
+ store i32 %add1, i32* %xx, align 4
+ %6 = load i32* %xx, align 4
+ %add2 = add nsw i32 %6, 23
+ store i32 %add2, i32* %xx, align 4
+ %7 = load i32* %xx, align 4
+ %add3 = add nsw i32 %7, 34
+ store i32 %add3, i32* %xx, align 4
+ %8 = load i32* %i, align 4
+ %idxprom4 = sext i32 %8 to i64
+ %arrayidx5 = getelementptr inbounds [8 x i32]* %y, i32 0, i64 %idxprom4
+ %9 = load i32* %arrayidx5, align 4
+ %10 = load i32* %yy, align 4
+ %mul = mul nsw i32 %10, %9
+ store i32 %mul, i32* %yy, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %11 = load i32* %i, align 4
+ %inc = add nsw i32 %11, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %12 = load i32* %xx, align 4
+ %13 = load i32* %yy, align 4
+ %add6 = add nsw i32 %12, %13
+ ret i32 %add6
+}
+
+
+; The Cortex-A53 machine model will cause the FDIVvvv_42 to be raised to
+; hide latency. Whereas normally there would only be a single FADDvvv_4s
+; after it, this test checks to make sure there are more than one.
+;
+; CHECK: ********** MI Scheduling **********
+; CHECK: neon4xfloat:BB#0
+; CHECK: *** Final schedule for BB#0 ***
+; CHECK: FDIVv4f32
+; CHECK: FADDv4f32
+; CHECK: FADDv4f32
+; CHECK: ********** INTERVALS **********
+define <4 x float> @neon4xfloat(<4 x float> %A, <4 x float> %B) {
+ %tmp1 = fadd <4 x float> %A, %B;
+ %tmp2 = fadd <4 x float> %A, %tmp1;
+ %tmp3 = fadd <4 x float> %A, %tmp2;
+ %tmp4 = fadd <4 x float> %A, %tmp3;
+ %tmp5 = fadd <4 x float> %A, %tmp4;
+ %tmp6 = fadd <4 x float> %A, %tmp5;
+ %tmp7 = fadd <4 x float> %A, %tmp6;
+ %tmp8 = fadd <4 x float> %A, %tmp7;
+ %tmp9 = fdiv <4 x float> %A, %B;
+ %tmp10 = fadd <4 x float> %tmp8, %tmp9;
+
+ ret <4 x float> %tmp10
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+
+; Regression Test for PR19761
+; [ARM64] Cortex-a53 schedule mode can't handle NEON post-increment load
+;
+; Nothing explicit to check other than llc not crashing.
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(i8* %A, i8** %ptr) {
+ %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
+ %tmp = getelementptr i8* %A, i32 32
+ store i8* %tmp, i8** %ptr
+ ret { <16 x i8>, <16 x i8> } %ld2
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
+
+; Regression Test for PR20057.
+;
+; Cortex-A53 machine model stalls on A53UnitFPMDS contention. Instructions that
+; are otherwise ready are jammed in the pending queue.
+; CHECK: ********** MI Scheduling **********
+; CHECK: testResourceConflict
+; CHECK: *** Final schedule for BB#0 ***
+; CHECK: BRK
+; CHECK: ********** INTERVALS **********
+define void @testResourceConflict(float* %ptr) {
+entry:
+ %add1 = fadd float undef, undef
+ %mul2 = fmul float undef, undef
+ %add3 = fadd float %mul2, undef
+ %mul4 = fmul float undef, %add3
+ %add5 = fadd float %mul4, undef
+ %sub6 = fsub float 0.000000e+00, undef
+ %sub7 = fsub float %add5, undef
+ %div8 = fdiv float 1.000000e+00, undef
+ %mul9 = fmul float %div8, %sub7
+ %mul14 = fmul float %sub6, %div8
+ %mul10 = fsub float -0.000000e+00, %mul14
+ %mul15 = fmul float undef, %div8
+ %mul11 = fsub float -0.000000e+00, %mul15
+ %mul12 = fmul float 0.000000e+00, %div8
+ %mul13 = fmul float %add1, %mul9
+ %mul21 = fmul float %add5, %mul11
+ %add22 = fadd float %mul13, %mul21
+ store float %add22, float* %ptr, align 4
+ %mul28 = fmul float %add1, %mul10
+ %mul33 = fmul float %add5, %mul12
+ %add34 = fadd float %mul33, %mul28
+ store float %add34, float* %ptr, align 4
+ %mul240 = fmul float undef, %mul9
+ %add246 = fadd float %mul240, undef
+ store float %add246, float* %ptr, align 4
+ %mul52 = fmul float undef, %mul10
+ %mul57 = fmul float undef, %mul12
+ %add58 = fadd float %mul57, %mul52
+ store float %add58, float* %ptr, align 4
+ %mul27 = fmul float 0.000000e+00, %mul9
+ %mul81 = fmul float undef, %mul10
+ %add82 = fadd float %mul27, %mul81
+ store float %add82, float* %ptr, align 4
+ call void @llvm.trap()
+ unreachable
+}
+
+declare void @llvm.trap()
+
+; Regression test for PR20057: "permanent hazard"'
+; Resource contention on LDST.
+; CHECK: ********** MI Scheduling **********
+; CHECK: testLdStConflict
+; CHECK: *** Final schedule for BB#1 ***
+; CHECK: LD4Fourv2d
+; CHECK: STRQui
+; CHECK: ********** INTERVALS **********
+define void @testLdStConflict() {
+entry:
+ br label %loop
+
+loop:
+ %0 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i8(i8* null)
+ %ptr = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr, align 4
+ %ptr1 = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr1, align 4
+ %ptr2 = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr2, align 4
+ %ptr3 = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr3, align 4
+ %ptr4 = bitcast i8* undef to <2 x i64>*
+ store <2 x i64> zeroinitializer, <2 x i64>* %ptr4, align 4
+ br label %loop
+}
+
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i8(i8*)
diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
new file mode 100644
index 000000000000..238474a12c65
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
@@ -0,0 +1,112 @@
+; REQUIRES: asserts
+;
+; The Cortext-A57 machine model will avoid scheduling load instructions in
+; succession because loads on the A57 have a latency of 4 cycles and they all
+; issue to the same pipeline. Instead, it will move other instructions between
+; the loads to avoid unnecessary stalls. The generic machine model schedules 4
+; loads consecutively for this case and will cause stalls.
+;
+; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+; CHECK: ********** MI Scheduling **********
+; CHECK: main:BB#2
+; CHECK LDR
+; CHECK Latency : 4
+; CHECK: *** Final schedule for BB#2 ***
+; CHECK: LDR
+; CHECK: LDR
+; CHECK-NOT: LDR
+; CHECK: {{.*}}
+; CHECK: ********** MI Scheduling **********
+
+@main.x = private unnamed_addr constant [8 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 4
+@main.y = private unnamed_addr constant [8 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2], align 4
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ %x = alloca [8 x i32], align 4
+ %y = alloca [8 x i32], align 4
+ %i = alloca i32, align 4
+ %xx = alloca i32, align 4
+ %yy = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = bitcast [8 x i32]* %x to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false)
+ %1 = bitcast [8 x i32]* %y to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false)
+ store i32 0, i32* %xx, align 4
+ store i32 0, i32* %yy, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %2 = load i32* %i, align 4
+ %cmp = icmp slt i32 %2, 8
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %3 = load i32* %yy, align 4
+ %4 = load i32* %i, align 4
+ %idxprom = sext i32 %4 to i64
+ %arrayidx = getelementptr inbounds [8 x i32]* %x, i32 0, i64 %idxprom
+ %5 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %5, 1
+ store i32 %add, i32* %xx, align 4
+ %6 = load i32* %xx, align 4
+ %add1 = add nsw i32 %6, 12
+ store i32 %add1, i32* %xx, align 4
+ %7 = load i32* %xx, align 4
+ %add2 = add nsw i32 %7, 23
+ store i32 %add2, i32* %xx, align 4
+ %8 = load i32* %xx, align 4
+ %add3 = add nsw i32 %8, 34
+ store i32 %add3, i32* %xx, align 4
+ %9 = load i32* %i, align 4
+ %idxprom4 = sext i32 %9 to i64
+ %arrayidx5 = getelementptr inbounds [8 x i32]* %y, i32 0, i64 %idxprom4
+ %10 = load i32* %arrayidx5, align 4
+
+ %add4 = add nsw i32 %9, %add
+ %add5 = add nsw i32 %10, %add1
+ %add6 = add nsw i32 %add4, %add5
+
+ %add7 = add nsw i32 %9, %add3
+ %add8 = add nsw i32 %10, %add4
+ %add9 = add nsw i32 %add7, %add8
+
+ %add10 = add nsw i32 %9, %add6
+ %add11 = add nsw i32 %10, %add7
+ %add12 = add nsw i32 %add10, %add11
+
+ %add13 = add nsw i32 %9, %add9
+ %add14 = add nsw i32 %10, %add10
+ %add15 = add nsw i32 %add13, %add14
+
+ store i32 %add15, i32* %xx, align 4
+
+ %div = sdiv i32 %4, %5
+
+ store i32 %div, i32* %yy, align 4
+
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %11 = load i32* %i, align 4
+ %inc = add nsw i32 %11, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %12 = load i32* %xx, align 4
+ %13 = load i32* %yy, align 4
+ %add67 = add nsw i32 %12, %13
+ ret i32 %add67
+}
+
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll b/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
new file mode 100644
index 000000000000..07373ccedc5b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
@@ -0,0 +1,22 @@
+; REQUIRES: asserts
+; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a53 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+;
+; For Cortex-A53, shiftable operands that are not actually shifted
+; are not needed for an additional two cycles.
+;
+; CHECK: ********** MI Scheduling **********
+; CHECK: shiftable
+; CHECK: SU(2): %vreg2<def> = SUBXri %vreg1, 20, 0
+; CHECK: Successors:
+; CHECK-NEXT: val SU(4): Latency=1 Reg=%vreg2
+; CHECK-NEXT: val SU(3): Latency=2 Reg=%vreg2
+; CHECK: ********** INTERVALS **********
+define i64 @shiftable(i64 %A, i64 %B) {
+ %tmp0 = sub i64 %B, 20
+ %tmp1 = shl i64 %tmp0, 5;
+ %tmp2 = add i64 %A, %tmp1;
+ %tmp3 = add i64 %A, %tmp0
+ %tmp4 = mul i64 %tmp2, %tmp3
+
+ ret i64 %tmp4
+}
diff --git a/test/CodeGen/AArch64/arm64-movi.ll b/test/CodeGen/AArch64/arm64-movi.ll
new file mode 100644
index 000000000000..2cd368d909dc
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-movi.ll
@@ -0,0 +1,202 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+;==--------------------------------------------------------------------------==
+; Tests for MOV-immediate implemented with ORR-immediate.
+;==--------------------------------------------------------------------------==
+
+; 64-bit immed with 32-bit pattern size, rotated by 0.
+define i64 @test64_32_rot0() nounwind {
+; CHECK-LABEL: test64_32_rot0:
+; CHECK: orr x0, xzr, #0x700000007
+ ret i64 30064771079
+}
+
+; 64-bit immed with 32-bit pattern size, rotated by 2.
+define i64 @test64_32_rot2() nounwind {
+; CHECK-LABEL: test64_32_rot2:
+; CHECK: orr x0, xzr, #0xc0000003c0000003
+ ret i64 13835058071388291075
+}
+
+; 64-bit immed with 4-bit pattern size, rotated by 3.
+define i64 @test64_4_rot3() nounwind {
+; CHECK-LABEL: test64_4_rot3:
+; CHECK: orr x0, xzr, #0xeeeeeeeeeeeeeeee
+ ret i64 17216961135462248174
+}
+
+; 32-bit immed with 32-bit pattern size, rotated by 16.
+define i32 @test32_32_rot16() nounwind {
+; CHECK-LABEL: test32_32_rot16:
+; CHECK: orr w0, wzr, #0xff0000
+ ret i32 16711680
+}
+
+; 32-bit immed with 2-bit pattern size, rotated by 1.
+define i32 @test32_2_rot1() nounwind {
+; CHECK-LABEL: test32_2_rot1:
+; CHECK: orr w0, wzr, #0xaaaaaaaa
+ ret i32 2863311530
+}
+
+;==--------------------------------------------------------------------------==
+; Tests for MOVZ with MOVK.
+;==--------------------------------------------------------------------------==
+
+define i32 @movz() nounwind {
+; CHECK-LABEL: movz:
+; CHECK: movz w0, #0x5
+ ret i32 5
+}
+
+define i64 @movz_3movk() nounwind {
+; CHECK-LABEL: movz_3movk:
+; CHECK: movz x0, #0x5, lsl #48
+; CHECK-NEXT: movk x0, #0x1234, lsl #32
+; CHECK-NEXT: movk x0, #0xabcd, lsl #16
+; CHECK-NEXT: movk x0, #0x5678
+ ret i64 1427392313513592
+}
+
+define i64 @movz_movk_skip1() nounwind {
+; CHECK-LABEL: movz_movk_skip1:
+; CHECK: movz x0, #0x5, lsl #32
+; CHECK-NEXT: movk x0, #0x4321, lsl #16
+ ret i64 22601072640
+}
+
+define i64 @movz_skip1_movk() nounwind {
+; CHECK-LABEL: movz_skip1_movk:
+; CHECK: movz x0, #0x8654, lsl #32
+; CHECK-NEXT: movk x0, #0x1234
+ ret i64 147695335379508
+}
+
+;==--------------------------------------------------------------------------==
+; Tests for MOVN with MOVK.
+;==--------------------------------------------------------------------------==
+
+define i64 @movn() nounwind {
+; CHECK-LABEL: movn:
+; CHECK: movn x0, #0x29
+ ret i64 -42
+}
+
+define i64 @movn_skip1_movk() nounwind {
+; CHECK-LABEL: movn_skip1_movk:
+; CHECK: movn x0, #0x29, lsl #32
+; CHECK-NEXT: movk x0, #0x1234
+ ret i64 -176093720012
+}
+
+;==--------------------------------------------------------------------------==
+; Tests for ORR with MOVK.
+;==--------------------------------------------------------------------------==
+; rdar://14987673
+
+define i64 @orr_movk1() nounwind {
+; CHECK-LABEL: orr_movk1:
+; CHECK: orr x0, xzr, #0xffff0000ffff0
+; CHECK: movk x0, #0xdead, lsl #16
+ ret i64 72056498262245120
+}
+
+define i64 @orr_movk2() nounwind {
+; CHECK-LABEL: orr_movk2:
+; CHECK: orr x0, xzr, #0xffff0000ffff0
+; CHECK: movk x0, #0xdead, lsl #48
+ ret i64 -2400982650836746496
+}
+
+define i64 @orr_movk3() nounwind {
+; CHECK-LABEL: orr_movk3:
+; CHECK: orr x0, xzr, #0xffff0000ffff0
+; CHECK: movk x0, #0xdead, lsl #32
+ ret i64 72020953688702720
+}
+
+define i64 @orr_movk4() nounwind {
+; CHECK-LABEL: orr_movk4:
+; CHECK: orr x0, xzr, #0xffff0000ffff0
+; CHECK: movk x0, #0xdead
+ ret i64 72056494543068845
+}
+
+; rdar://14987618
+define i64 @orr_movk5() nounwind {
+; CHECK-LABEL: orr_movk5:
+; CHECK: orr x0, xzr, #0xff00ff00ff00ff00
+; CHECK: movk x0, #0xdead, lsl #16
+ ret i64 -71777214836900096
+}
+
+define i64 @orr_movk6() nounwind {
+; CHECK-LABEL: orr_movk6:
+; CHECK: orr x0, xzr, #0xff00ff00ff00ff00
+; CHECK: movk x0, #0xdead, lsl #16
+; CHECK: movk x0, #0xdead, lsl #48
+ ret i64 -2400982647117578496
+}
+
+define i64 @orr_movk7() nounwind {
+; CHECK-LABEL: orr_movk7:
+; CHECK: orr x0, xzr, #0xff00ff00ff00ff00
+; CHECK: movk x0, #0xdead, lsl #48
+ ret i64 -2400982646575268096
+}
+
+define i64 @orr_movk8() nounwind {
+; CHECK-LABEL: orr_movk8:
+; CHECK: orr x0, xzr, #0xff00ff00ff00ff00
+; CHECK: movk x0, #0xdead
+; CHECK: movk x0, #0xdead, lsl #48
+ ret i64 -2400982646575276371
+}
+
+; rdar://14987715
+define i64 @orr_movk9() nounwind {
+; CHECK-LABEL: orr_movk9:
+; CHECK: orr x0, xzr, #0xffffff000000000
+; CHECK: movk x0, #0xff00
+; CHECK: movk x0, #0xdead, lsl #16
+ ret i64 1152921439623315200
+}
+
+define i64 @orr_movk10() nounwind {
+; CHECK-LABEL: orr_movk10:
+; CHECK: orr x0, xzr, #0xfffffffffffff00
+; CHECK: movk x0, #0xdead, lsl #16
+ ret i64 1152921504047824640
+}
+
+define i64 @orr_movk11() nounwind {
+; CHECK-LABEL: orr_movk11:
+; CHECK: orr x0, xzr, #0xfff00000000000ff
+; CHECK: movk x0, #0xdead, lsl #16
+; CHECK: movk x0, #0xffff, lsl #32
+ ret i64 -4222125209747201
+}
+
+define i64 @orr_movk12() nounwind {
+; CHECK-LABEL: orr_movk12:
+; CHECK: orr x0, xzr, #0xfff00000000000ff
+; CHECK: movk x0, #0xdead, lsl #32
+ ret i64 -4258765016661761
+}
+
+define i64 @orr_movk13() nounwind {
+; CHECK-LABEL: orr_movk13:
+; CHECK: orr x0, xzr, #0xfffff000000
+; CHECK: movk x0, #0xdead
+; CHECK: movk x0, #0xdead, lsl #48
+ ret i64 -2401245434149282131
+}
+
+; rdar://13944082
+define i64 @g() nounwind {
+; CHECK-LABEL: g:
+; CHECK: movz x0, #0xffff, lsl #48
+; CHECK: movk x0, #0x2
+entry:
+ ret i64 -281474976710654
+}
diff --git a/test/CodeGen/AArch64/arm64-mul.ll b/test/CodeGen/AArch64/arm64-mul.ll
new file mode 100644
index 000000000000..2e7986d67d9e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-mul.ll
@@ -0,0 +1,90 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+; rdar://9296808
+; rdar://9349137
+
+define i128 @t1(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t1:
+; CHECK: mul {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: umulh {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp1 = zext i64 %a to i128
+ %tmp2 = zext i64 %b to i128
+ %tmp3 = mul i128 %tmp1, %tmp2
+ ret i128 %tmp3
+}
+
+define i128 @t2(i64 %a, i64 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: mul {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: smulh {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp1 = sext i64 %a to i128
+ %tmp2 = sext i64 %b to i128
+ %tmp3 = mul i128 %tmp1, %tmp2
+ ret i128 %tmp3
+}
+
+define i64 @t3(i32 %a, i32 %b) nounwind {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: umull {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp1 = zext i32 %a to i64
+ %tmp2 = zext i32 %b to i64
+ %tmp3 = mul i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
+
+define i64 @t4(i32 %a, i32 %b) nounwind {
+entry:
+; CHECK-LABEL: t4:
+; CHECK: smull {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp1 = sext i32 %a to i64
+ %tmp2 = sext i32 %b to i64
+ %tmp3 = mul i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
+
+define i64 @t5(i32 %a, i32 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: t5:
+; CHECK: umaddl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}}
+ %tmp1 = zext i32 %a to i64
+ %tmp2 = zext i32 %b to i64
+ %tmp3 = mul i64 %tmp1, %tmp2
+ %tmp4 = add i64 %c, %tmp3
+ ret i64 %tmp4
+}
+
+define i64 @t6(i32 %a, i32 %b, i64 %c) nounwind {
+entry:
+; CHECK-LABEL: t6:
+; CHECK: smsubl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}}
+ %tmp1 = sext i32 %a to i64
+ %tmp2 = sext i32 %b to i64
+ %tmp3 = mul i64 %tmp1, %tmp2
+ %tmp4 = sub i64 %c, %tmp3
+ ret i64 %tmp4
+}
+
+define i64 @t7(i32 %a, i32 %b) nounwind {
+entry:
+; CHECK-LABEL: t7:
+; CHECK: umnegl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp1 = zext i32 %a to i64
+ %tmp2 = zext i32 %b to i64
+ %tmp3 = mul i64 %tmp1, %tmp2
+ %tmp4 = sub i64 0, %tmp3
+ ret i64 %tmp4
+}
+
+define i64 @t8(i32 %a, i32 %b) nounwind {
+entry:
+; CHECK-LABEL: t8:
+; CHECK: smnegl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp1 = sext i32 %a to i64
+ %tmp2 = sext i32 %b to i64
+ %tmp3 = mul i64 %tmp1, %tmp2
+ %tmp4 = sub i64 0, %tmp3
+ ret i64 %tmp4
+}
diff --git a/test/CodeGen/AArch64/arm64-named-reg-alloc.ll b/test/CodeGen/AArch64/arm64-named-reg-alloc.ll
new file mode 100644
index 000000000000..d86d2e617ee5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-named-reg-alloc.ll
@@ -0,0 +1,14 @@
+; RUN: not llc < %s -mtriple=arm64-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=arm64-linux-gnueabi 2>&1 | FileCheck %s
+
+define i32 @get_stack() nounwind {
+entry:
+; FIXME: Include an allocatable-specific error message
+; CHECK: Invalid register name global variable
+ %sp = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %sp
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"x5\00"}
diff --git a/test/CodeGen/AArch64/arm64-named-reg-notareg.ll b/test/CodeGen/AArch64/arm64-named-reg-notareg.ll
new file mode 100644
index 000000000000..3ca14c408f4b
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-named-reg-notareg.ll
@@ -0,0 +1,13 @@
+; RUN: not llc < %s -mtriple=arm64-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=arm64-linux-gnueabi 2>&1 | FileCheck %s
+
+define i32 @get_stack() nounwind {
+entry:
+; CHECK: Invalid register name global variable
+ %sp = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %sp
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"notareg\00"}
diff --git a/test/CodeGen/AArch64/arm64-neg.ll b/test/CodeGen/AArch64/arm64-neg.ll
new file mode 100644
index 000000000000..659ce988a706
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neg.ll
@@ -0,0 +1,71 @@
+; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
+
+define i32 @test_neg_i32(i32 %in) {
+; CHECK-LABEL: test_neg_i32:
+; CHECK: neg w0, w0
+ %res = sub i32 0, %in
+ ret i32 %res
+}
+
+define i64 @test_neg_i64(i64 %in) {
+; CHECK-LABEL: test_neg_i64:
+; CHECK: neg x0, x0
+ %res = sub i64 0, %in
+ ret i64 %res
+}
+
+define <8 x i8> @test_neg_v8i8(<8 x i8> %in) {
+; CHECK-LABEL: test_neg_v8i8:
+; CHECK: neg v0.8b, v0.8b
+ %res = sub <8 x i8> zeroinitializer, %in
+ ret <8 x i8> %res
+}
+
+define <4 x i16> @test_neg_v4i16(<4 x i16> %in) {
+; CHECK-LABEL: test_neg_v4i16:
+; CHECK: neg v0.4h, v0.4h
+ %res = sub <4 x i16> zeroinitializer, %in
+ ret <4 x i16> %res
+}
+
+define <2 x i32> @test_neg_v2i32(<2 x i32> %in) {
+; CHECK-LABEL: test_neg_v2i32:
+; CHECK: neg v0.2s, v0.2s
+ %res = sub <2 x i32> zeroinitializer, %in
+ ret <2 x i32> %res
+}
+
+define <16 x i8> @test_neg_v16i8(<16 x i8> %in) {
+; CHECK-LABEL: test_neg_v16i8:
+; CHECK: neg v0.16b, v0.16b
+ %res = sub <16 x i8> zeroinitializer, %in
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_neg_v8i16(<8 x i16> %in) {
+; CHECK-LABEL: test_neg_v8i16:
+; CHECK: neg v0.8h, v0.8h
+ %res = sub <8 x i16> zeroinitializer, %in
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_neg_v4i32(<4 x i32> %in) {
+; CHECK-LABEL: test_neg_v4i32:
+; CHECK: neg v0.4s, v0.4s
+ %res = sub <4 x i32> zeroinitializer, %in
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_neg_v2i64(<2 x i64> %in) {
+; CHECK-LABEL: test_neg_v2i64:
+; CHECK: neg v0.2d, v0.2d
+ %res = sub <2 x i64> zeroinitializer, %in
+ ret <2 x i64> %res
+}
+
+define <1 x i64> @test_neg_v1i64(<1 x i64> %in) {
+; CHECK-LABEL: test_neg_v1i64:
+; CHECK: neg d0, d0
+ %res = sub <1 x i64> zeroinitializer, %in
+ ret <1 x i64> %res
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-2velem-high.ll b/test/CodeGen/AArch64/arm64-neon-2velem-high.ll
new file mode 100644
index 000000000000..58df094d1922
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-2velem-high.ll
@@ -0,0 +1,341 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
+
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>)
+
+define <4 x i32> @test_vmull_high_n_s16(<8 x i16> %a, i16 %b) {
+; CHECK-LABEL: test_vmull_high_n_s16:
+; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
+; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
+ %vmull15.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ ret <4 x i32> %vmull15.i.i
+}
+
+define <2 x i64> @test_vmull_high_n_s32(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: test_vmull_high_n_s32:
+; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
+; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
+ %vmull9.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ ret <2 x i64> %vmull9.i.i
+}
+
+define <4 x i32> @test_vmull_high_n_u16(<8 x i16> %a, i16 %b) {
+; CHECK-LABEL: test_vmull_high_n_u16:
+; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
+; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
+ %vmull15.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ ret <4 x i32> %vmull15.i.i
+}
+
+define <2 x i64> @test_vmull_high_n_u32(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: test_vmull_high_n_u32:
+; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
+; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
+ %vmull9.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ ret <2 x i64> %vmull9.i.i
+}
+
+define <4 x i32> @test_vqdmull_high_n_s16(<8 x i16> %a, i16 %b) {
+; CHECK-LABEL: test_vqdmull_high_n_s16:
+; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
+; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
+ %vqdmull15.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ ret <4 x i32> %vqdmull15.i.i
+}
+
+define <2 x i64> @test_vqdmull_high_n_s32(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: test_vqdmull_high_n_s32:
+; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
+; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
+ %vqdmull9.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ ret <2 x i64> %vqdmull9.i.i
+}
+
+define <4 x i32> @test_vmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK-LABEL: test_vmlal_high_n_s16:
+; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
+; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK-LABEL: test_vmlal_high_n_s32:
+; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
+; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <4 x i32> @test_vmlal_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK-LABEL: test_vmlal_high_n_u16:
+; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
+; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vmlal_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK-LABEL: test_vmlal_high_n_u32:
+; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
+; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <4 x i32> @test_vqdmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK-LABEL: test_vqdmlal_high_n_s16:
+; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vqdmlal15.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %vqdmlal17.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal15.i.i)
+ ret <4 x i32> %vqdmlal17.i.i
+}
+
+define <2 x i64> @test_vqdmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK-LABEL: test_vqdmlal_high_n_s32:
+; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vqdmlal9.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %vqdmlal11.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal9.i.i)
+ ret <2 x i64> %vqdmlal11.i.i
+}
+
+define <4 x i32> @test_vmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK-LABEL: test_vmlsl_high_n_s16:
+; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+ ret <4 x i32> %sub.i.i
+}
+
+define <2 x i64> @test_vmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK-LABEL: test_vmlsl_high_n_s32:
+; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+ ret <2 x i64> %sub.i.i
+}
+
+define <4 x i32> @test_vmlsl_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK-LABEL: test_vmlsl_high_n_u16:
+; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+ ret <4 x i32> %sub.i.i
+}
+
+define <2 x i64> @test_vmlsl_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK-LABEL: test_vmlsl_high_n_u32:
+; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+ ret <2 x i64> %sub.i.i
+}
+
+define <4 x i32> @test_vqdmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK-LABEL: test_vqdmlsl_high_n_s16:
+; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vqdmlsl15.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %vqdmlsl17.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl15.i.i)
+ ret <4 x i32> %vqdmlsl17.i.i
+}
+
+define <2 x i64> @test_vqdmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK-LABEL: test_vqdmlsl_high_n_s32:
+; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vqdmlsl9.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %vqdmlsl11.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i.i)
+ ret <2 x i64> %vqdmlsl11.i.i
+}
+
+define <2 x float> @test_vmul_n_f32(<2 x float> %a, float %b) {
+; CHECK-LABEL: test_vmul_n_f32:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %vecinit.i = insertelement <2 x float> undef, float %b, i32 0
+ %vecinit1.i = insertelement <2 x float> %vecinit.i, float %b, i32 1
+ %mul.i = fmul <2 x float> %vecinit1.i, %a
+ ret <2 x float> %mul.i
+}
+
+define <4 x float> @test_vmulq_n_f32(<4 x float> %a, float %b) {
+; CHECK-LABEL: test_vmulq_n_f32:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %vecinit.i = insertelement <4 x float> undef, float %b, i32 0
+ %vecinit1.i = insertelement <4 x float> %vecinit.i, float %b, i32 1
+ %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %b, i32 2
+ %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %b, i32 3
+ %mul.i = fmul <4 x float> %vecinit3.i, %a
+ ret <4 x float> %mul.i
+}
+
+define <2 x double> @test_vmulq_n_f64(<2 x double> %a, double %b) {
+; CHECK-LABEL: test_vmulq_n_f64:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %vecinit.i = insertelement <2 x double> undef, double %b, i32 0
+ %vecinit1.i = insertelement <2 x double> %vecinit.i, double %b, i32 1
+ %mul.i = fmul <2 x double> %vecinit1.i, %a
+ ret <2 x double> %mul.i
+}
+
+define <2 x float> @test_vfma_n_f32(<2 x float> %a, <2 x float> %b, float %n) {
+; CHECK-LABEL: test_vfma_n_f32:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %vecinit.i = insertelement <2 x float> undef, float %n, i32 0
+ %vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %b, <2 x float> %vecinit1.i, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmaq_n_f32(<4 x float> %a, <4 x float> %b, float %n) {
+; CHECK-LABEL: test_vfmaq_n_f32:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %vecinit.i = insertelement <4 x float> undef, float %n, i32 0
+ %vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1
+ %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2
+ %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %b, <4 x float> %vecinit3.i, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_n_f32(<2 x float> %a, <2 x float> %b, float %n) {
+; CHECK-LABEL: test_vfms_n_f32:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %vecinit.i = insertelement <2 x float> undef, float %n, i32 0
+ %vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1
+ %0 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %b
+ %1 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %0, <2 x float> %vecinit1.i, <2 x float> %a)
+ ret <2 x float> %1
+}
+
+define <4 x float> @test_vfmsq_n_f32(<4 x float> %a, <4 x float> %b, float %n) {
+; CHECK-LABEL: test_vfmsq_n_f32:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %vecinit.i = insertelement <4 x float> undef, float %n, i32 0
+ %vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1
+ %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2
+ %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3
+ %0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %b
+ %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %vecinit3.i, <4 x float> %a)
+ ret <4 x float> %1
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-2velem.ll b/test/CodeGen/AArch64/arm64-neon-2velem.ll
new file mode 100644
index 000000000000..869966caa3ae
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-2velem.ll
@@ -0,0 +1,2853 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+declare <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double>, <2 x double>)
+
+declare <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float>, <4 x float>)
+
+declare <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float>, <2 x float>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32>, <2 x i32>)
+
+declare <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16>, <8 x i16>)
+
+declare <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16>, <4 x i16>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>)
+
+declare <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>)
+
+declare <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_vmla_lane_s16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmla_lane_s16:
+; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <8 x i16> @test_vmlaq_lane_s16(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlaq_lane_s16:
+; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <8 x i16> %shuffle, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <2 x i32> @test_vmla_lane_s32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmla_lane_s32:
+; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <4 x i32> @test_vmlaq_lane_s32(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlaq_lane_s32:
+; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i16> @test_vmla_laneq_s16(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmla_laneq_s16:
+; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <8 x i16> @test_vmlaq_laneq_s16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlaq_laneq_s16:
+; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <8 x i16> %shuffle, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <2 x i32> @test_vmla_laneq_s32(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmla_laneq_s32:
+; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <4 x i32> @test_vmlaq_laneq_s32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlaq_laneq_s32:
+; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i32> %shuffle, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i16> @test_vmls_lane_s16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmls_lane_s16:
+; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
+
+define <8 x i16> @test_vmlsq_lane_s16(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsq_lane_s16:
+; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <8 x i16> %shuffle, %b
+ %sub = sub <8 x i16> %a, %mul
+ ret <8 x i16> %sub
+}
+
+define <2 x i32> @test_vmls_lane_s32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmls_lane_s32:
+; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %mul = mul <2 x i32> %shuffle, %b
+ %sub = sub <2 x i32> %a, %mul
+ ret <2 x i32> %sub
+}
+
+define <4 x i32> @test_vmlsq_lane_s32(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsq_lane_s32:
+; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle, %b
+ %sub = sub <4 x i32> %a, %mul
+ ret <4 x i32> %sub
+}
+
+define <4 x i16> @test_vmls_laneq_s16(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmls_laneq_s16:
+; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
+
+define <8 x i16> @test_vmlsq_laneq_s16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsq_laneq_s16:
+; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <8 x i16> %shuffle, %b
+ %sub = sub <8 x i16> %a, %mul
+ ret <8 x i16> %sub
+}
+
+define <2 x i32> @test_vmls_laneq_s32(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmls_laneq_s32:
+; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %b
+ %sub = sub <2 x i32> %a, %mul
+ ret <2 x i32> %sub
+}
+
+define <4 x i32> @test_vmlsq_laneq_s32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsq_laneq_s32:
+; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i32> %shuffle, %b
+ %sub = sub <4 x i32> %a, %mul
+ ret <4 x i32> %sub
+}
+
+define <4 x i16> @test_vmul_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmul_lane_s16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmulq_lane_s16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmul_lane_s32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmulq_lane_s32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_lane_u16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmul_lane_u16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_lane_u16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmulq_lane_u16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_lane_u32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmul_lane_u32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_lane_u32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmulq_lane_u32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmul_laneq_s16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmulq_laneq_s16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmul_laneq_s32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmulq_laneq_s32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_laneq_u16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmul_laneq_u16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_laneq_u16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmulq_laneq_u16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_laneq_u32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmul_laneq_u32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_laneq_u32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmulq_laneq_u32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <2 x float> @test_vfma_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK-LABEL: test_vfma_lane_f32:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
+
+define <4 x float> @test_vfmaq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
+; CHECK-LABEL: test_vfmaq_lane_f32:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+define <2 x float> @test_vfma_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfma_laneq_f32:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmaq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfmaq_laneq_f32:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK-LABEL: test_vfms_lane_f32:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <2 x float> %sub, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmsq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
+; CHECK-LABEL: test_vfmsq_lane_f32:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <2 x float> %sub, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfms_laneq_f32:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <4 x float> %sub, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmsq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfmsq_laneq_f32:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x double> @test_vfmaq_lane_f64(<2 x double> %a, <2 x double> %b, <1 x double> %v) {
+; CHECK-LABEL: test_vfmaq_lane_f64:
+; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+define <2 x double> @test_vfmaq_laneq_f64(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
+; CHECK-LABEL: test_vfmaq_laneq_f64:
+; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <2 x double> @test_vfmsq_lane_f64(<2 x double> %a, <2 x double> %b, <1 x double> %v) {
+; CHECK-LABEL: test_vfmsq_lane_f64:
+; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <1 x double> <double -0.000000e+00>, %v
+ %lane = shufflevector <1 x double> %sub, <1 x double> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <2 x double> @test_vfmsq_laneq_f64(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
+; CHECK-LABEL: test_vfmsq_laneq_f64:
+; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %v
+ %lane = shufflevector <2 x double> %sub, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define float @test_vfmas_laneq_f32(float %a, float %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfmas_laneq_f32
+; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %extract = extractelement <4 x float> %v, i32 3
+ %0 = tail call float @llvm.fma.f32(float %b, float %extract, float %a)
+ ret float %0
+}
+
+declare float @llvm.fma.f32(float, float, float)
+
+define double @test_vfmsd_lane_f64(double %a, double %b, <1 x double> %v) {
+; CHECK-LABEL: test_vfmsd_lane_f64
+; CHECK: fmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NEXT: ret
+entry:
+ %extract.rhs = extractelement <1 x double> %v, i32 0
+ %extract = fsub double -0.000000e+00, %extract.rhs
+ %0 = tail call double @llvm.fma.f64(double %b, double %extract, double %a)
+ ret double %0
+}
+
+declare double @llvm.fma.f64(double, double, double)
+
+define float @test_vfmss_laneq_f32(float %a, float %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfmss_laneq_f32
+; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %extract.rhs = extractelement <4 x float> %v, i32 3
+ %extract = fsub float -0.000000e+00, %extract.rhs
+ %0 = tail call float @llvm.fma.f32(float %b, float %extract, float %a)
+ ret float %0
+}
+
+define double @test_vfmsd_laneq_f64(double %a, double %b, <2 x double> %v) {
+; CHECK-LABEL: test_vfmsd_laneq_f64
+; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-NEXT: ret
+entry:
+ %extract.rhs = extractelement <2 x double> %v, i32 1
+ %extract = fsub double -0.000000e+00, %extract.rhs
+ %0 = tail call double @llvm.fma.f64(double %b, double %extract, double %a)
+ ret double %0
+}
+
+define <4 x i32> @test_vmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlal_lane_s16:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlal_lane_s32:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_laneq_s16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlal_laneq_s16:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_laneq_s32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlal_laneq_s32:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlal_high_lane_s16:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlal_high_lane_s32:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_laneq_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlal_high_laneq_s16:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_laneq_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlal_high_laneq_s32:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlsl_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_lane_s16:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_lane_s32:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_laneq_s16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_laneq_s16:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_laneq_s32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_laneq_s32:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_high_lane_s16:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_high_lane_s32:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_laneq_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_high_laneq_s16:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_laneq_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_high_laneq_s32:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlal_lane_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlal_lane_u16:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_lane_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlal_lane_u32:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_laneq_u16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlal_laneq_u16:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_laneq_u32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlal_laneq_u32:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_lane_u16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlal_high_lane_u16:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_lane_u32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlal_high_lane_u32:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_laneq_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlal_high_laneq_u16:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_laneq_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlal_high_laneq_u32:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlsl_lane_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_lane_u16:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_lane_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_lane_u32:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_laneq_u16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_laneq_u16:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_laneq_u32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_laneq_u32:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_lane_u16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_high_lane_u16:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_lane_u32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_high_lane_u32:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_laneq_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_high_laneq_u16:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_laneq_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_high_laneq_u32:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmull_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmull_lane_s16:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmull_lane_s32:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_lane_u16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmull_lane_u16:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_lane_u32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmull_lane_u32:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmull_high_lane_s16:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmull_high_lane_s32:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_lane_u16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmull_high_lane_u16:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_lane_u32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmull_high_lane_u32:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmull_laneq_s16:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmull_laneq_s32:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_laneq_u16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmull_laneq_u16:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_laneq_u32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmull_laneq_u32:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmull_high_laneq_s16:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmull_high_laneq_s32:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_laneq_u16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmull_high_laneq_u16:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_laneq_u32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmull_high_laneq_u32:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vqdmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmlal_lane_s16:
+; CHECK: qdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmlal2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmlal_lane_s32:
+; CHECK: qdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmlal2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlal_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmlal_high_lane_s16:
+; CHECK: qdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmlal2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmlal_high_lane_s32:
+; CHECK: qdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmlal2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlsl_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmlsl_lane_s16:
+; CHECK: qdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmlsl_lane_s32:
+; CHECK: qdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmlsl_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmlsl_high_lane_s16:
+; CHECK: qdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmlsl_high_lane_s32:
+; CHECK: qdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmull_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmull_lane_s16:
+; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmull_lane_s32:
+; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vqdmull_laneq_s16:
+; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vqdmull_laneq_s32:
+; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_high_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmull_high_lane_s16:
+; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_high_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmull_high_lane_s32:
+; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_high_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vqdmull_high_laneq_s16:
+; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_high_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vqdmull_high_laneq_s32:
+; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i16> @test_vqdmulh_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmulh_lane_s16:
+; CHECK: qdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmulh2.i = tail call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i16> %vqdmulh2.i
+}
+
+define <8 x i16> @test_vqdmulhq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmulhq_lane_s16:
+; CHECK: qdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %vqdmulh2.i = tail call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
+ ret <8 x i16> %vqdmulh2.i
+}
+
+define <2 x i32> @test_vqdmulh_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmulh_lane_s32:
+; CHECK: qdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmulh2.i = tail call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i32> %vqdmulh2.i
+}
+
+define <4 x i32> @test_vqdmulhq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmulhq_lane_s32:
+; CHECK: qdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %vqdmulh2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
+ ret <4 x i32> %vqdmulh2.i
+}
+
+define <4 x i16> @test_vqrdmulh_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqrdmulh_lane_s16:
+; CHECK: qrdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqrdmulh2.i = tail call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i16> %vqrdmulh2.i
+}
+
+define <8 x i16> @test_vqrdmulhq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqrdmulhq_lane_s16:
+; CHECK: qrdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %vqrdmulh2.i = tail call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
+ ret <8 x i16> %vqrdmulh2.i
+}
+
+define <2 x i32> @test_vqrdmulh_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqrdmulh_lane_s32:
+; CHECK: qrdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqrdmulh2.i = tail call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i32> %vqrdmulh2.i
+}
+
+define <4 x i32> @test_vqrdmulhq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqrdmulhq_lane_s32:
+; CHECK: qrdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %vqrdmulh2.i = tail call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
+ ret <4 x i32> %vqrdmulh2.i
+}
+
+define <2 x float> @test_vmul_lane_f32(<2 x float> %a, <2 x float> %v) {
+; CHECK-LABEL: test_vmul_lane_f32:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %mul = fmul <2 x float> %shuffle, %a
+ ret <2 x float> %mul
+}
+
+define <1 x double> @test_vmul_lane_f64(<1 x double> %a, <1 x double> %v) {
+; CHECK-LABEL: test_vmul_lane_f64:
+; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NEXT: ret
+entry:
+ %0 = bitcast <1 x double> %a to <8 x i8>
+ %1 = bitcast <8 x i8> %0 to double
+ %extract = extractelement <1 x double> %v, i32 0
+ %2 = fmul double %1, %extract
+ %3 = insertelement <1 x double> undef, double %2, i32 0
+ ret <1 x double> %3
+}
+
+define <4 x float> @test_vmulq_lane_f32(<4 x float> %a, <2 x float> %v) {
+; CHECK-LABEL: test_vmulq_lane_f32:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = fmul <4 x float> %shuffle, %a
+ ret <4 x float> %mul
+}
+
+define <2 x double> @test_vmulq_lane_f64(<2 x double> %a, <1 x double> %v) {
+; CHECK-LABEL: test_vmulq_lane_f64:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
+ %mul = fmul <2 x double> %shuffle, %a
+ ret <2 x double> %mul
+}
+
+define <2 x float> @test_vmul_laneq_f32(<2 x float> %a, <4 x float> %v) {
+; CHECK-LABEL: test_vmul_laneq_f32:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %mul = fmul <2 x float> %shuffle, %a
+ ret <2 x float> %mul
+}
+
+define <1 x double> @test_vmul_laneq_f64(<1 x double> %a, <2 x double> %v) {
+; CHECK-LABEL: test_vmul_laneq_f64:
+; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-NEXT: ret
+entry:
+ %0 = bitcast <1 x double> %a to <8 x i8>
+ %1 = bitcast <8 x i8> %0 to double
+ %extract = extractelement <2 x double> %v, i32 1
+ %2 = fmul double %1, %extract
+ %3 = insertelement <1 x double> undef, double %2, i32 0
+ ret <1 x double> %3
+}
+
+define <4 x float> @test_vmulq_laneq_f32(<4 x float> %a, <4 x float> %v) {
+; CHECK-LABEL: test_vmulq_laneq_f32:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = fmul <4 x float> %shuffle, %a
+ ret <4 x float> %mul
+}
+
+define <2 x double> @test_vmulq_laneq_f64(<2 x double> %a, <2 x double> %v) {
+; CHECK-LABEL: test_vmulq_laneq_f64:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %mul = fmul <2 x double> %shuffle, %a
+ ret <2 x double> %mul
+}
+
+define <2 x float> @test_vmulx_lane_f32(<2 x float> %a, <2 x float> %v) {
+; CHECK-LABEL: test_vmulx_lane_f32:
+; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
+ ret <2 x float> %vmulx2.i
+}
+
+define <4 x float> @test_vmulxq_lane_f32(<4 x float> %a, <2 x float> %v) {
+; CHECK-LABEL: test_vmulxq_lane_f32:
+; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
+ ret <4 x float> %vmulx2.i
+}
+
+define <2 x double> @test_vmulxq_lane_f64(<2 x double> %a, <1 x double> %v) {
+; CHECK-LABEL: test_vmulxq_lane_f64:
+; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
+ ret <2 x double> %vmulx2.i
+}
+
+define <2 x float> @test_vmulx_laneq_f32(<2 x float> %a, <4 x float> %v) {
+; CHECK-LABEL: test_vmulx_laneq_f32:
+; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
+ ret <2 x float> %vmulx2.i
+}
+
+define <4 x float> @test_vmulxq_laneq_f32(<4 x float> %a, <4 x float> %v) {
+; CHECK-LABEL: test_vmulxq_laneq_f32:
+; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
+ ret <4 x float> %vmulx2.i
+}
+
+define <2 x double> @test_vmulxq_laneq_f64(<2 x double> %a, <2 x double> %v) {
+; CHECK-LABEL: test_vmulxq_laneq_f64:
+; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
+ ret <2 x double> %vmulx2.i
+}
+
+define <4 x i16> @test_vmla_lane_s16_0(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmla_lane_s16_0:
+; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <8 x i16> @test_vmlaq_lane_s16_0(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlaq_lane_s16_0:
+; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <2 x i32> @test_vmla_lane_s32_0(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmla_lane_s32_0:
+; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <4 x i32> @test_vmlaq_lane_s32_0(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlaq_lane_s32_0:
+; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i16> @test_vmla_laneq_s16_0(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmla_laneq_s16_0:
+; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <8 x i16> @test_vmlaq_laneq_s16_0(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlaq_laneq_s16_0:
+; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <2 x i32> @test_vmla_laneq_s32_0(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmla_laneq_s32_0:
+; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <4 x i32> @test_vmlaq_laneq_s32_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlaq_laneq_s32_0:
+; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i16> @test_vmls_lane_s16_0(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmls_lane_s16_0:
+; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
+
+define <8 x i16> @test_vmlsq_lane_s16_0(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsq_lane_s16_0:
+; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %b
+ %sub = sub <8 x i16> %a, %mul
+ ret <8 x i16> %sub
+}
+
+define <2 x i32> @test_vmls_lane_s32_0(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmls_lane_s32_0:
+; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %b
+ %sub = sub <2 x i32> %a, %mul
+ ret <2 x i32> %sub
+}
+
+define <4 x i32> @test_vmlsq_lane_s32_0(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsq_lane_s32_0:
+; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %b
+ %sub = sub <4 x i32> %a, %mul
+ ret <4 x i32> %sub
+}
+
+define <4 x i16> @test_vmls_laneq_s16_0(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmls_laneq_s16_0:
+; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
+
+define <8 x i16> @test_vmlsq_laneq_s16_0(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsq_laneq_s16_0:
+; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %b
+ %sub = sub <8 x i16> %a, %mul
+ ret <8 x i16> %sub
+}
+
+define <2 x i32> @test_vmls_laneq_s32_0(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmls_laneq_s32_0:
+; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %b
+ %sub = sub <2 x i32> %a, %mul
+ ret <2 x i32> %sub
+}
+
+define <4 x i32> @test_vmlsq_laneq_s32_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsq_laneq_s32_0:
+; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %b
+ %sub = sub <4 x i32> %a, %mul
+ ret <4 x i32> %sub
+}
+
+define <4 x i16> @test_vmul_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmul_lane_s16_0:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmulq_lane_s16_0:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmul_lane_s32_0:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmulq_lane_s32_0:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_lane_u16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmul_lane_u16_0:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_lane_u16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmulq_lane_u16_0:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_lane_u32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmul_lane_u32_0:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_lane_u32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmulq_lane_u32_0:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmul_laneq_s16_0:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmulq_laneq_s16_0:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmul_laneq_s32_0:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmulq_laneq_s32_0:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_laneq_u16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmul_laneq_u16_0:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_laneq_u16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmulq_laneq_u16_0:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_laneq_u32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmul_laneq_u32_0:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_laneq_u32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmulq_laneq_u32_0:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <2 x float> @test_vfma_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK-LABEL: test_vfma_lane_f32_0:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmaq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
+; CHECK-LABEL: test_vfmaq_lane_f32_0:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfma_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfma_laneq_f32_0:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmaq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfmaq_laneq_f32_0:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK-LABEL: test_vfms_lane_f32_0:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <2 x float> %sub, <2 x float> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmsq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
+; CHECK-LABEL: test_vfmsq_lane_f32_0:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <2 x float> %sub, <2 x float> undef, <4 x i32> zeroinitializer
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfms_laneq_f32_0:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <4 x float> %sub, <4 x float> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmsq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfmsq_laneq_f32_0:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> zeroinitializer
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x double> @test_vfmaq_laneq_f64_0(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
+; CHECK-LABEL: test_vfmaq_laneq_f64_0:
+; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %lane = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <2 x double> @test_vfmsq_laneq_f64_0(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
+; CHECK-LABEL: test_vfmsq_laneq_f64_0:
+; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %v
+ %lane = shufflevector <2 x double> %sub, <2 x double> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <4 x i32> @test_vmlal_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlal_lane_s16_0:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlal_lane_s32_0:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_laneq_s16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlal_laneq_s16_0:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_laneq_s32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlal_laneq_s32_0:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlal_high_lane_s16_0:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlal_high_lane_s32_0:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_laneq_s16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlal_high_laneq_s16_0:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_laneq_s32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlal_high_laneq_s32_0:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlsl_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_lane_s16_0:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_lane_s32_0:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_laneq_s16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_laneq_s16_0:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_laneq_s32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_laneq_s32_0:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_high_lane_s16_0:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_high_lane_s32_0:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_laneq_s16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_high_laneq_s16_0:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_laneq_s32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_high_laneq_s32_0:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlal_lane_u16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlal_lane_u16_0:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_lane_u32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlal_lane_u32_0:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_laneq_u16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlal_laneq_u16_0:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_laneq_u32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlal_laneq_u32_0:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_lane_u16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlal_high_lane_u16_0:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_lane_u32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlal_high_lane_u32_0:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_laneq_u16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlal_high_laneq_u16_0:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_laneq_u32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlal_high_laneq_u32_0:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlsl_lane_u16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_lane_u16_0:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_lane_u32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_lane_u32_0:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_laneq_u16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_laneq_u16_0:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_laneq_u32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_laneq_u32_0:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_lane_u16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_high_lane_u16_0:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_lane_u32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_high_lane_u32_0:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_laneq_u16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK-LABEL: test_vmlsl_high_laneq_u16_0:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_laneq_u32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK-LABEL: test_vmlsl_high_laneq_u32_0:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmull_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmull_lane_s16_0:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmull_lane_s32_0:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_lane_u16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmull_lane_u16_0:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_lane_u32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmull_lane_u32_0:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmull_high_lane_s16_0:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmull_high_lane_s32_0:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_lane_u16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vmull_high_lane_u16_0:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_lane_u32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vmull_high_lane_u32_0:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmull_laneq_s16_0:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmull_laneq_s32_0:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_laneq_u16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmull_laneq_u16_0:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_laneq_u32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmull_laneq_u32_0:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmull_high_laneq_s16_0:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmull_high_laneq_s32_0:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_laneq_u16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vmull_high_laneq_u16_0:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_laneq_u32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vmull_high_laneq_u32_0:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vqdmlal_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmlal_lane_s16_0:
+; CHECK: qdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmlal2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmlal_lane_s32_0:
+; CHECK: qdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmlal2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlal_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmlal_high_lane_s16_0:
+; CHECK: qdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmlal2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmlal_high_lane_s32_0:
+; CHECK: qdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmlal2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlsl_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmlsl_lane_s16_0:
+; CHECK: qdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmlsl_lane_s32_0:
+; CHECK: qdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmlsl_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmlsl_high_lane_s16_0:
+; CHECK: qdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmlsl_high_lane_s32_0:
+; CHECK: qdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmull_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmull_lane_s16_0:
+; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmull_lane_s32_0:
+; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vqdmull_laneq_s16_0:
+; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vqdmull_laneq_s32_0:
+; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_high_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmull_high_lane_s16_0:
+; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_high_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmull_high_lane_s32_0:
+; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_high_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK-LABEL: test_vqdmull_high_laneq_s16_0:
+; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_high_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK-LABEL: test_vqdmull_high_laneq_s32_0:
+; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i16> @test_vqdmulh_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmulh_lane_s16_0:
+; CHECK: qdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmulh2.i = tail call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i16> %vqdmulh2.i
+}
+
+define <8 x i16> @test_vqdmulhq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqdmulhq_lane_s16_0:
+; CHECK: qdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %vqdmulh2.i = tail call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
+ ret <8 x i16> %vqdmulh2.i
+}
+
+define <2 x i32> @test_vqdmulh_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmulh_lane_s32_0:
+; CHECK: qdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmulh2.i = tail call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i32> %vqdmulh2.i
+}
+
+define <4 x i32> @test_vqdmulhq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqdmulhq_lane_s32_0:
+; CHECK: qdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %vqdmulh2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
+ ret <4 x i32> %vqdmulh2.i
+}
+
+define <4 x i16> @test_vqrdmulh_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqrdmulh_lane_s16_0:
+; CHECK: qrdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqrdmulh2.i = tail call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i16> %vqrdmulh2.i
+}
+
+define <8 x i16> @test_vqrdmulhq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK-LABEL: test_vqrdmulhq_lane_s16_0:
+; CHECK: qrdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %vqrdmulh2.i = tail call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
+ ret <8 x i16> %vqrdmulh2.i
+}
+
+define <2 x i32> @test_vqrdmulh_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqrdmulh_lane_s32_0:
+; CHECK: qrdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqrdmulh2.i = tail call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i32> %vqrdmulh2.i
+}
+
+define <4 x i32> @test_vqrdmulhq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK-LABEL: test_vqrdmulhq_lane_s32_0:
+; CHECK: qrdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %vqrdmulh2.i = tail call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
+ ret <4 x i32> %vqrdmulh2.i
+}
+
+define <2 x float> @test_vmul_lane_f32_0(<2 x float> %a, <2 x float> %v) {
+; CHECK-LABEL: test_vmul_lane_f32_0:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
+ %mul = fmul <2 x float> %shuffle, %a
+ ret <2 x float> %mul
+}
+
+define <4 x float> @test_vmulq_lane_f32_0(<4 x float> %a, <2 x float> %v) {
+; CHECK-LABEL: test_vmulq_lane_f32_0:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
+ %mul = fmul <4 x float> %shuffle, %a
+ ret <4 x float> %mul
+}
+
+define <2 x float> @test_vmul_laneq_f32_0(<2 x float> %a, <4 x float> %v) {
+; CHECK-LABEL: test_vmul_laneq_f32_0:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
+ %mul = fmul <2 x float> %shuffle, %a
+ ret <2 x float> %mul
+}
+
+define <1 x double> @test_vmul_laneq_f64_0(<1 x double> %a, <2 x double> %v) {
+; CHECK-LABEL: test_vmul_laneq_f64_0:
+; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %0 = bitcast <1 x double> %a to <8 x i8>
+ %1 = bitcast <8 x i8> %0 to double
+ %extract = extractelement <2 x double> %v, i32 0
+ %2 = fmul double %1, %extract
+ %3 = insertelement <1 x double> undef, double %2, i32 0
+ ret <1 x double> %3
+}
+
+define <4 x float> @test_vmulq_laneq_f32_0(<4 x float> %a, <4 x float> %v) {
+; CHECK-LABEL: test_vmulq_laneq_f32_0:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
+ %mul = fmul <4 x float> %shuffle, %a
+ ret <4 x float> %mul
+}
+
+define <2 x double> @test_vmulq_laneq_f64_0(<2 x double> %a, <2 x double> %v) {
+; CHECK-LABEL: test_vmulq_laneq_f64_0:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
+ %mul = fmul <2 x double> %shuffle, %a
+ ret <2 x double> %mul
+}
+
+define <2 x float> @test_vmulx_lane_f32_0(<2 x float> %a, <2 x float> %v) {
+; CHECK-LABEL: test_vmulx_lane_f32_0:
+; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
+ ret <2 x float> %vmulx2.i
+}
+
+define <4 x float> @test_vmulxq_lane_f32_0(<4 x float> %a, <2 x float> %v) {
+; CHECK-LABEL: test_vmulxq_lane_f32_0:
+; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
+ %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
+ ret <4 x float> %vmulx2.i
+}
+
+define <2 x double> @test_vmulxq_lane_f64_0(<2 x double> %a, <1 x double> %v) {
+; CHECK-LABEL: test_vmulxq_lane_f64_0:
+; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
+ ret <2 x double> %vmulx2.i
+}
+
+define <2 x float> @test_vmulx_laneq_f32_0(<2 x float> %a, <4 x float> %v) {
+; CHECK-LABEL: test_vmulx_laneq_f32_0:
+; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
+ ret <2 x float> %vmulx2.i
+}
+
+define <4 x float> @test_vmulxq_laneq_f32_0(<4 x float> %a, <4 x float> %v) {
+; CHECK-LABEL: test_vmulxq_laneq_f32_0:
+; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
+ %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
+ ret <4 x float> %vmulx2.i
+}
+
+define <2 x double> @test_vmulxq_laneq_f64_0(<2 x double> %a, <2 x double> %v) {
+; CHECK-LABEL: test_vmulxq_laneq_f64_0:
+; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+; CHECK-NEXT: ret
+entry:
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
+ ret <2 x double> %vmulx2.i
+}
+
diff --git a/test/CodeGen/AArch64/arm64-neon-3vdiff.ll b/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
new file mode 100644
index 000000000000..cb9b36c4c183
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
@@ -0,0 +1,1829 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8>, <8 x i8>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>)
+
+declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>)
+
+declare <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32>, <2 x i32>)
+
+declare <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16>, <4 x i16>)
+
+declare <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8>, <8 x i8>)
+
+declare <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32>, <2 x i32>)
+
+declare <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16>, <4 x i16>)
+
+declare <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8>, <8 x i8>)
+
+declare <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64>, <2 x i64>)
+
+declare <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32>, <4 x i32>)
+
+declare <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16>, <8 x i16>)
+
+declare <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64>, <2 x i64>)
+
+declare <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32>, <4 x i32>)
+
+declare <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_vaddl_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vaddl_s8:
+; CHECK: saddl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddl_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vaddl_s16:
+; CHECK: saddl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = sext <4 x i16> %a to <4 x i32>
+ %vmovl.i2.i = sext <4 x i16> %b to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddl_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vaddl_s32:
+; CHECK: saddl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = sext <2 x i32> %a to <2 x i64>
+ %vmovl.i2.i = sext <2 x i32> %b to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddl_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vaddl_u8:
+; CHECK: uaddl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = zext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddl_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vaddl_u16:
+; CHECK: uaddl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = zext <4 x i16> %a to <4 x i32>
+ %vmovl.i2.i = zext <4 x i16> %b to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddl_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vaddl_u32:
+; CHECK: uaddl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = zext <2 x i32> %a to <2 x i64>
+ %vmovl.i2.i = zext <2 x i32> %b to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddl_high_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vaddl_high_s8:
+; CHECK: saddl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = sext <8 x i8> %shuffle.i.i2.i to <8 x i16>
+ %add.i = add <8 x i16> %0, %1
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddl_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vaddl_high_s16:
+; CHECK: saddl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = sext <4 x i16> %shuffle.i.i2.i to <4 x i32>
+ %add.i = add <4 x i32> %0, %1
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddl_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vaddl_high_s32:
+; CHECK: saddl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = sext <2 x i32> %shuffle.i.i2.i to <2 x i64>
+ %add.i = add <2 x i64> %0, %1
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddl_high_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vaddl_high_u8:
+; CHECK: uaddl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = zext <8 x i8> %shuffle.i.i2.i to <8 x i16>
+ %add.i = add <8 x i16> %0, %1
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddl_high_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vaddl_high_u16:
+; CHECK: uaddl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = zext <4 x i16> %shuffle.i.i2.i to <4 x i32>
+ %add.i = add <4 x i32> %0, %1
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddl_high_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vaddl_high_u32:
+; CHECK: uaddl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = zext <2 x i32> %shuffle.i.i2.i to <2 x i64>
+ %add.i = add <2 x i64> %0, %1
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddw_s8(<8 x i16> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vaddw_s8:
+; CHECK: saddw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = sext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddw_s16(<4 x i32> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vaddw_s16:
+; CHECK: saddw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = sext <4 x i16> %b to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddw_s32(<2 x i64> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vaddw_s32:
+; CHECK: saddw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = sext <2 x i32> %b to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddw_u8(<8 x i16> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vaddw_u8:
+; CHECK: uaddw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = zext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddw_u16(<4 x i32> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vaddw_u16:
+; CHECK: uaddw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = zext <4 x i16> %b to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddw_u32(<2 x i64> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vaddw_u32:
+; CHECK: uaddw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = zext <2 x i32> %b to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddw_high_s8(<8 x i16> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vaddw_high_s8:
+; CHECK: saddw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %add.i = add <8 x i16> %0, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddw_high_s16(<4 x i32> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vaddw_high_s16:
+; CHECK: saddw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %add.i = add <4 x i32> %0, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddw_high_s32(<2 x i64> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vaddw_high_s32:
+; CHECK: saddw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %add.i = add <2 x i64> %0, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddw_high_u8(<8 x i16> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vaddw_high_u8:
+; CHECK: uaddw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %add.i = add <8 x i16> %0, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddw_high_u16(<4 x i32> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vaddw_high_u16:
+; CHECK: uaddw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %add.i = add <4 x i32> %0, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddw_high_u32(<2 x i64> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vaddw_high_u32:
+; CHECK: uaddw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %add.i = add <2 x i64> %0, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vsubl_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vsubl_s8:
+; CHECK: ssubl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
+ %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubl_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vsubl_s16:
+; CHECK: ssubl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = sext <4 x i16> %a to <4 x i32>
+ %vmovl.i2.i = sext <4 x i16> %b to <4 x i32>
+ %sub.i = sub <4 x i32> %vmovl.i.i, %vmovl.i2.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubl_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vsubl_s32:
+; CHECK: ssubl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = sext <2 x i32> %a to <2 x i64>
+ %vmovl.i2.i = sext <2 x i32> %b to <2 x i64>
+ %sub.i = sub <2 x i64> %vmovl.i.i, %vmovl.i2.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubl_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vsubl_u8:
+; CHECK: usubl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = zext <8 x i8> %b to <8 x i16>
+ %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubl_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vsubl_u16:
+; CHECK: usubl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = zext <4 x i16> %a to <4 x i32>
+ %vmovl.i2.i = zext <4 x i16> %b to <4 x i32>
+ %sub.i = sub <4 x i32> %vmovl.i.i, %vmovl.i2.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubl_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vsubl_u32:
+; CHECK: usubl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = zext <2 x i32> %a to <2 x i64>
+ %vmovl.i2.i = zext <2 x i32> %b to <2 x i64>
+ %sub.i = sub <2 x i64> %vmovl.i.i, %vmovl.i2.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubl_high_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vsubl_high_s8:
+; CHECK: ssubl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = sext <8 x i8> %shuffle.i.i2.i to <8 x i16>
+ %sub.i = sub <8 x i16> %0, %1
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubl_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vsubl_high_s16:
+; CHECK: ssubl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = sext <4 x i16> %shuffle.i.i2.i to <4 x i32>
+ %sub.i = sub <4 x i32> %0, %1
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubl_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vsubl_high_s32:
+; CHECK: ssubl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = sext <2 x i32> %shuffle.i.i2.i to <2 x i64>
+ %sub.i = sub <2 x i64> %0, %1
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubl_high_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vsubl_high_u8:
+; CHECK: usubl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = zext <8 x i8> %shuffle.i.i2.i to <8 x i16>
+ %sub.i = sub <8 x i16> %0, %1
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubl_high_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vsubl_high_u16:
+; CHECK: usubl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = zext <4 x i16> %shuffle.i.i2.i to <4 x i32>
+ %sub.i = sub <4 x i32> %0, %1
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubl_high_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vsubl_high_u32:
+; CHECK: usubl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = zext <2 x i32> %shuffle.i.i2.i to <2 x i64>
+ %sub.i = sub <2 x i64> %0, %1
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubw_s8(<8 x i16> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vsubw_s8:
+; CHECK: ssubw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = sext <8 x i8> %b to <8 x i16>
+ %sub.i = sub <8 x i16> %a, %vmovl.i.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubw_s16(<4 x i32> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vsubw_s16:
+; CHECK: ssubw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = sext <4 x i16> %b to <4 x i32>
+ %sub.i = sub <4 x i32> %a, %vmovl.i.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubw_s32(<2 x i64> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vsubw_s32:
+; CHECK: ssubw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = sext <2 x i32> %b to <2 x i64>
+ %sub.i = sub <2 x i64> %a, %vmovl.i.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubw_u8(<8 x i16> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vsubw_u8:
+; CHECK: usubw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = zext <8 x i8> %b to <8 x i16>
+ %sub.i = sub <8 x i16> %a, %vmovl.i.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubw_u16(<4 x i32> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vsubw_u16:
+; CHECK: usubw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = zext <4 x i16> %b to <4 x i32>
+ %sub.i = sub <4 x i32> %a, %vmovl.i.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubw_u32(<2 x i64> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vsubw_u32:
+; CHECK: usubw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = zext <2 x i32> %b to <2 x i64>
+ %sub.i = sub <2 x i64> %a, %vmovl.i.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubw_high_s8(<8 x i16> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vsubw_high_s8:
+; CHECK: ssubw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %sub.i = sub <8 x i16> %a, %0
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubw_high_s16(<4 x i32> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vsubw_high_s16:
+; CHECK: ssubw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %sub.i = sub <4 x i32> %a, %0
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubw_high_s32(<2 x i64> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vsubw_high_s32:
+; CHECK: ssubw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %sub.i = sub <2 x i64> %a, %0
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubw_high_u8(<8 x i16> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vsubw_high_u8:
+; CHECK: usubw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %sub.i = sub <8 x i16> %a, %0
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubw_high_u16(<4 x i32> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vsubw_high_u16:
+; CHECK: usubw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %sub.i = sub <4 x i32> %a, %0
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubw_high_u32(<2 x i64> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vsubw_high_u32:
+; CHECK: usubw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %sub.i = sub <2 x i64> %a, %0
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i8> @test_vaddhn_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vaddhn_s16:
+; CHECK: addhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vaddhn.i = add <8 x i16> %a, %b
+ %vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vaddhn2.i = trunc <8 x i16> %vaddhn1.i to <8 x i8>
+ ret <8 x i8> %vaddhn2.i
+}
+
+define <4 x i16> @test_vaddhn_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vaddhn_s32:
+; CHECK: addhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vaddhn.i = add <4 x i32> %a, %b
+ %vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
+ %vaddhn2.i = trunc <4 x i32> %vaddhn1.i to <4 x i16>
+ ret <4 x i16> %vaddhn2.i
+}
+
+define <2 x i32> @test_vaddhn_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vaddhn_s64:
+; CHECK: addhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vaddhn.i = add <2 x i64> %a, %b
+ %vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
+ %vaddhn2.i = trunc <2 x i64> %vaddhn1.i to <2 x i32>
+ ret <2 x i32> %vaddhn2.i
+}
+
+define <8 x i8> @test_vaddhn_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vaddhn_u16:
+; CHECK: addhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vaddhn.i = add <8 x i16> %a, %b
+ %vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vaddhn2.i = trunc <8 x i16> %vaddhn1.i to <8 x i8>
+ ret <8 x i8> %vaddhn2.i
+}
+
+define <4 x i16> @test_vaddhn_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vaddhn_u32:
+; CHECK: addhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vaddhn.i = add <4 x i32> %a, %b
+ %vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
+ %vaddhn2.i = trunc <4 x i32> %vaddhn1.i to <4 x i16>
+ ret <4 x i16> %vaddhn2.i
+}
+
+define <2 x i32> @test_vaddhn_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vaddhn_u64:
+; CHECK: addhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vaddhn.i = add <2 x i64> %a, %b
+ %vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
+ %vaddhn2.i = trunc <2 x i64> %vaddhn1.i to <2 x i32>
+ ret <2 x i32> %vaddhn2.i
+}
+
+define <16 x i8> @test_vaddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vaddhn_high_s16:
+; CHECK: addhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vaddhn.i.i = add <8 x i16> %a, %b
+ %vaddhn1.i.i = lshr <8 x i16> %vaddhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vaddhn2.i.i = trunc <8 x i16> %vaddhn1.i.i to <8 x i8>
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vaddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vaddhn_high_s32:
+; CHECK: addhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vaddhn.i.i = add <4 x i32> %a, %b
+ %vaddhn1.i.i = lshr <4 x i32> %vaddhn.i.i, <i32 16, i32 16, i32 16, i32 16>
+ %vaddhn2.i.i = trunc <4 x i32> %vaddhn1.i.i to <4 x i16>
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vaddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vaddhn_high_s64:
+; CHECK: addhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vaddhn.i.i = add <2 x i64> %a, %b
+ %vaddhn1.i.i = lshr <2 x i64> %vaddhn.i.i, <i64 32, i64 32>
+ %vaddhn2.i.i = trunc <2 x i64> %vaddhn1.i.i to <2 x i32>
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <16 x i8> @test_vaddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vaddhn_high_u16:
+; CHECK: addhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vaddhn.i.i = add <8 x i16> %a, %b
+ %vaddhn1.i.i = lshr <8 x i16> %vaddhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vaddhn2.i.i = trunc <8 x i16> %vaddhn1.i.i to <8 x i8>
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vaddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vaddhn_high_u32:
+; CHECK: addhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vaddhn.i.i = add <4 x i32> %a, %b
+ %vaddhn1.i.i = lshr <4 x i32> %vaddhn.i.i, <i32 16, i32 16, i32 16, i32 16>
+ %vaddhn2.i.i = trunc <4 x i32> %vaddhn1.i.i to <4 x i16>
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vaddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vaddhn_high_u64:
+; CHECK: addhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vaddhn.i.i = add <2 x i64> %a, %b
+ %vaddhn1.i.i = lshr <2 x i64> %vaddhn.i.i, <i64 32, i64 32>
+ %vaddhn2.i.i = trunc <2 x i64> %vaddhn1.i.i to <2 x i32>
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i8> @test_vraddhn_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vraddhn_s16:
+; CHECK: raddhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vraddhn2.i = tail call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i8> %vraddhn2.i
+}
+
+define <4 x i16> @test_vraddhn_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vraddhn_s32:
+; CHECK: raddhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vraddhn2.i = tail call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i16> %vraddhn2.i
+}
+
+define <2 x i32> @test_vraddhn_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vraddhn_s64:
+; CHECK: raddhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vraddhn2.i = tail call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i32> %vraddhn2.i
+}
+
+define <8 x i8> @test_vraddhn_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vraddhn_u16:
+; CHECK: raddhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vraddhn2.i = tail call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i8> %vraddhn2.i
+}
+
+define <4 x i16> @test_vraddhn_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vraddhn_u32:
+; CHECK: raddhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vraddhn2.i = tail call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i16> %vraddhn2.i
+}
+
+define <2 x i32> @test_vraddhn_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vraddhn_u64:
+; CHECK: raddhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vraddhn2.i = tail call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i32> %vraddhn2.i
+}
+
+define <16 x i8> @test_vraddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vraddhn_high_s16:
+; CHECK: raddhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vraddhn2.i.i = tail call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vraddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vraddhn_high_s32:
+; CHECK: raddhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vraddhn2.i.i = tail call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vraddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vraddhn_high_s64:
+; CHECK: raddhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vraddhn2.i.i = tail call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <16 x i8> @test_vraddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vraddhn_high_u16:
+; CHECK: raddhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vraddhn2.i.i = tail call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vraddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vraddhn_high_u32:
+; CHECK: raddhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vraddhn2.i.i = tail call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vraddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vraddhn_high_u64:
+; CHECK: raddhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vraddhn2.i.i = tail call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i8> @test_vsubhn_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vsubhn_s16:
+; CHECK: subhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vsubhn.i = sub <8 x i16> %a, %b
+ %vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vsubhn2.i = trunc <8 x i16> %vsubhn1.i to <8 x i8>
+ ret <8 x i8> %vsubhn2.i
+}
+
+define <4 x i16> @test_vsubhn_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vsubhn_s32:
+; CHECK: subhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vsubhn.i = sub <4 x i32> %a, %b
+ %vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
+ %vsubhn2.i = trunc <4 x i32> %vsubhn1.i to <4 x i16>
+ ret <4 x i16> %vsubhn2.i
+}
+
+define <2 x i32> @test_vsubhn_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vsubhn_s64:
+; CHECK: subhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vsubhn.i = sub <2 x i64> %a, %b
+ %vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
+ %vsubhn2.i = trunc <2 x i64> %vsubhn1.i to <2 x i32>
+ ret <2 x i32> %vsubhn2.i
+}
+
+define <8 x i8> @test_vsubhn_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vsubhn_u16:
+; CHECK: subhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vsubhn.i = sub <8 x i16> %a, %b
+ %vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vsubhn2.i = trunc <8 x i16> %vsubhn1.i to <8 x i8>
+ ret <8 x i8> %vsubhn2.i
+}
+
+define <4 x i16> @test_vsubhn_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vsubhn_u32:
+; CHECK: subhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vsubhn.i = sub <4 x i32> %a, %b
+ %vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
+ %vsubhn2.i = trunc <4 x i32> %vsubhn1.i to <4 x i16>
+ ret <4 x i16> %vsubhn2.i
+}
+
+define <2 x i32> @test_vsubhn_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vsubhn_u64:
+; CHECK: subhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vsubhn.i = sub <2 x i64> %a, %b
+ %vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
+ %vsubhn2.i = trunc <2 x i64> %vsubhn1.i to <2 x i32>
+ ret <2 x i32> %vsubhn2.i
+}
+
+define <16 x i8> @test_vsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vsubhn_high_s16:
+; CHECK: subhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vsubhn.i.i = sub <8 x i16> %a, %b
+ %vsubhn1.i.i = lshr <8 x i16> %vsubhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vsubhn2.i.i = trunc <8 x i16> %vsubhn1.i.i to <8 x i8>
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vsubhn_high_s32:
+; CHECK: subhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vsubhn.i.i = sub <4 x i32> %a, %b
+ %vsubhn1.i.i = lshr <4 x i32> %vsubhn.i.i, <i32 16, i32 16, i32 16, i32 16>
+ %vsubhn2.i.i = trunc <4 x i32> %vsubhn1.i.i to <4 x i16>
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vsubhn_high_s64:
+; CHECK: subhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vsubhn.i.i = sub <2 x i64> %a, %b
+ %vsubhn1.i.i = lshr <2 x i64> %vsubhn.i.i, <i64 32, i64 32>
+ %vsubhn2.i.i = trunc <2 x i64> %vsubhn1.i.i to <2 x i32>
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <16 x i8> @test_vsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vsubhn_high_u16:
+; CHECK: subhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vsubhn.i.i = sub <8 x i16> %a, %b
+ %vsubhn1.i.i = lshr <8 x i16> %vsubhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vsubhn2.i.i = trunc <8 x i16> %vsubhn1.i.i to <8 x i8>
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vsubhn_high_u32:
+; CHECK: subhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vsubhn.i.i = sub <4 x i32> %a, %b
+ %vsubhn1.i.i = lshr <4 x i32> %vsubhn.i.i, <i32 16, i32 16, i32 16, i32 16>
+ %vsubhn2.i.i = trunc <4 x i32> %vsubhn1.i.i to <4 x i16>
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vsubhn_high_u64:
+; CHECK: subhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vsubhn.i.i = sub <2 x i64> %a, %b
+ %vsubhn1.i.i = lshr <2 x i64> %vsubhn.i.i, <i64 32, i64 32>
+ %vsubhn2.i.i = trunc <2 x i64> %vsubhn1.i.i to <2 x i32>
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i8> @test_vrsubhn_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vrsubhn_s16:
+; CHECK: rsubhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vrsubhn2.i = tail call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i8> %vrsubhn2.i
+}
+
+define <4 x i16> @test_vrsubhn_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vrsubhn_s32:
+; CHECK: rsubhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vrsubhn2.i = tail call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i16> %vrsubhn2.i
+}
+
+define <2 x i32> @test_vrsubhn_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vrsubhn_s64:
+; CHECK: rsubhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vrsubhn2.i = tail call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i32> %vrsubhn2.i
+}
+
+define <8 x i8> @test_vrsubhn_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vrsubhn_u16:
+; CHECK: rsubhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vrsubhn2.i = tail call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i8> %vrsubhn2.i
+}
+
+define <4 x i16> @test_vrsubhn_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vrsubhn_u32:
+; CHECK: rsubhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vrsubhn2.i = tail call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i16> %vrsubhn2.i
+}
+
+define <2 x i32> @test_vrsubhn_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vrsubhn_u64:
+; CHECK: rsubhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vrsubhn2.i = tail call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i32> %vrsubhn2.i
+}
+
+define <16 x i8> @test_vrsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vrsubhn_high_s16:
+; CHECK: rsubhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vrsubhn2.i.i = tail call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vrsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vrsubhn_high_s32:
+; CHECK: rsubhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vrsubhn2.i.i = tail call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vrsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vrsubhn_high_s64:
+; CHECK: rsubhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vrsubhn2.i.i = tail call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <16 x i8> @test_vrsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vrsubhn_high_u16:
+; CHECK: rsubhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vrsubhn2.i.i = tail call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vrsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vrsubhn_high_u32:
+; CHECK: rsubhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vrsubhn2.i.i = tail call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vrsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vrsubhn_high_u64:
+; CHECK: rsubhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vrsubhn2.i.i = tail call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @test_vabdl_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vabdl_s8:
+; CHECK: sabdl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vabd.i.i = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b)
+ %vmovl.i.i = zext <8 x i8> %vabd.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i
+}
+
+define <4 x i32> @test_vabdl_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vabdl_s16:
+; CHECK: sabdl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vabd2.i.i = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b)
+ %vmovl.i.i = zext <4 x i16> %vabd2.i.i to <4 x i32>
+ ret <4 x i32> %vmovl.i.i
+}
+
+define <2 x i64> @test_vabdl_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vabdl_s32:
+; CHECK: sabdl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vabd2.i.i = tail call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %a, <2 x i32> %b)
+ %vmovl.i.i = zext <2 x i32> %vabd2.i.i to <2 x i64>
+ ret <2 x i64> %vmovl.i.i
+}
+
+define <8 x i16> @test_vabdl_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vabdl_u8:
+; CHECK: uabdl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vabd.i.i = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b)
+ %vmovl.i.i = zext <8 x i8> %vabd.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i
+}
+
+define <4 x i32> @test_vabdl_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vabdl_u16:
+; CHECK: uabdl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vabd2.i.i = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %a, <4 x i16> %b)
+ %vmovl.i.i = zext <4 x i16> %vabd2.i.i to <4 x i32>
+ ret <4 x i32> %vmovl.i.i
+}
+
+define <2 x i64> @test_vabdl_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vabdl_u32:
+; CHECK: uabdl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vabd2.i.i = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %a, <2 x i32> %b)
+ %vmovl.i.i = zext <2 x i32> %vabd2.i.i to <2 x i64>
+ ret <2 x i64> %vmovl.i.i
+}
+
+define <8 x i16> @test_vabal_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vabal_s8:
+; CHECK: sabal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vabd.i.i.i = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %b, <8 x i8> %c)
+ %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vabal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK-LABEL: test_vabal_s16:
+; CHECK: sabal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vabd2.i.i.i = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %b, <4 x i16> %c)
+ %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vabal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: test_vabal_s32:
+; CHECK: sabal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vabd2.i.i.i = tail call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %b, <2 x i32> %c)
+ %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vabal_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vabal_u8:
+; CHECK: uabal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vabd.i.i.i = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %b, <8 x i8> %c)
+ %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vabal_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK-LABEL: test_vabal_u16:
+; CHECK: uabal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vabd2.i.i.i = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %b, <4 x i16> %c)
+ %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vabal_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: test_vabal_u32:
+; CHECK: uabal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vabd2.i.i.i = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %b, <2 x i32> %c)
+ %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vabdl_high_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vabdl_high_s8:
+; CHECK: sabdl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vabd.i.i.i = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i.i
+}
+
+define <4 x i32> @test_vabdl_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vabdl_high_s16:
+; CHECK: sabdl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vabd2.i.i.i = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
+ ret <4 x i32> %vmovl.i.i.i
+}
+
+define <2 x i64> @test_vabdl_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vabdl_high_s32:
+; CHECK: sabdl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vabd2.i.i.i = tail call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
+ ret <2 x i64> %vmovl.i.i.i
+}
+
+define <8 x i16> @test_vabdl_high_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vabdl_high_u8:
+; CHECK: uabdl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vabd.i.i.i = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i.i
+}
+
+define <4 x i32> @test_vabdl_high_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vabdl_high_u16:
+; CHECK: uabdl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vabd2.i.i.i = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
+ ret <4 x i32> %vmovl.i.i.i
+}
+
+define <2 x i64> @test_vabdl_high_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vabdl_high_u32:
+; CHECK: uabdl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vabd2.i.i.i = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
+ ret <2 x i64> %vmovl.i.i.i
+}
+
+define <8 x i16> @test_vabal_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vabal_high_s8:
+; CHECK: sabal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vabd.i.i.i.i = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <8 x i8> %vabd.i.i.i.i to <8 x i16>
+ %add.i.i = add <8 x i16> %vmovl.i.i.i.i, %a
+ ret <8 x i16> %add.i.i
+}
+
+define <4 x i32> @test_vabal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: test_vabal_high_s16:
+; CHECK: sabal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vabd2.i.i.i.i = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <4 x i16> %vabd2.i.i.i.i to <4 x i32>
+ %add.i.i = add <4 x i32> %vmovl.i.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vabal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: test_vabal_high_s32:
+; CHECK: sabal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vabd2.i.i.i.i = tail call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <2 x i32> %vabd2.i.i.i.i to <2 x i64>
+ %add.i.i = add <2 x i64> %vmovl.i.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <8 x i16> @test_vabal_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vabal_high_u8:
+; CHECK: uabal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vabd.i.i.i.i = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <8 x i8> %vabd.i.i.i.i to <8 x i16>
+ %add.i.i = add <8 x i16> %vmovl.i.i.i.i, %a
+ ret <8 x i16> %add.i.i
+}
+
+define <4 x i32> @test_vabal_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: test_vabal_high_u16:
+; CHECK: uabal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vabd2.i.i.i.i = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <4 x i16> %vabd2.i.i.i.i to <4 x i32>
+ %add.i.i = add <4 x i32> %vmovl.i.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vabal_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: test_vabal_high_u32:
+; CHECK: uabal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vabd2.i.i.i.i = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <2 x i32> %vabd2.i.i.i.i to <2 x i64>
+ %add.i.i = add <2 x i64> %vmovl.i.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <8 x i16> @test_vmull_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vmull_s8:
+; CHECK: smull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %a, <8 x i8> %b)
+ ret <8 x i16> %vmull.i
+}
+
+define <4 x i32> @test_vmull_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vmull_s16:
+; CHECK: smull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %b)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vmull_s32:
+; CHECK: smull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %b)
+ ret <2 x i64> %vmull2.i
+}
+
+define <8 x i16> @test_vmull_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vmull_u8:
+; CHECK: umull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %a, <8 x i8> %b)
+ ret <8 x i16> %vmull.i
+}
+
+define <4 x i32> @test_vmull_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vmull_u16:
+; CHECK: umull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %b)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vmull_u32:
+; CHECK: umull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %b)
+ ret <2 x i64> %vmull2.i
+}
+
+define <8 x i16> @test_vmull_high_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vmull_high_s8:
+; CHECK: smull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ ret <8 x i16> %vmull.i.i
+}
+
+define <4 x i32> @test_vmull_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vmull_high_s16:
+; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ ret <4 x i32> %vmull2.i.i
+}
+
+define <2 x i64> @test_vmull_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vmull_high_s32:
+; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ ret <2 x i64> %vmull2.i.i
+}
+
+define <8 x i16> @test_vmull_high_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vmull_high_u8:
+; CHECK: umull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ ret <8 x i16> %vmull.i.i
+}
+
+define <4 x i32> @test_vmull_high_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vmull_high_u16:
+; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ ret <4 x i32> %vmull2.i.i
+}
+
+define <2 x i64> @test_vmull_high_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vmull_high_u32:
+; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ ret <2 x i64> %vmull2.i.i
+}
+
+define <8 x i16> @test_vmlal_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vmlal_s8:
+; CHECK: smlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %b, <8 x i8> %c)
+ %add.i = add <8 x i16> %vmull.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vmlal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK-LABEL: test_vmlal_s16:
+; CHECK: smlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %add.i = add <4 x i32> %vmull2.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vmlal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: test_vmlal_s32:
+; CHECK: smlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %add.i = add <2 x i64> %vmull2.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vmlal_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vmlal_u8:
+; CHECK: umlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %b, <8 x i8> %c)
+ %add.i = add <8 x i16> %vmull.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vmlal_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK-LABEL: test_vmlal_u16:
+; CHECK: umlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %add.i = add <4 x i32> %vmull2.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vmlal_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: test_vmlal_u32:
+; CHECK: umlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %add.i = add <2 x i64> %vmull2.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vmlal_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vmlal_high_s8:
+; CHECK: smlal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %add.i.i = add <8 x i16> %vmull.i.i.i, %a
+ ret <8 x i16> %add.i.i
+}
+
+define <4 x i32> @test_vmlal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: test_vmlal_high_s16:
+; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vmlal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: test_vmlal_high_s32:
+; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <8 x i16> @test_vmlal_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vmlal_high_u8:
+; CHECK: umlal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %add.i.i = add <8 x i16> %vmull.i.i.i, %a
+ ret <8 x i16> %add.i.i
+}
+
+define <4 x i32> @test_vmlal_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: test_vmlal_high_u16:
+; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vmlal_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: test_vmlal_high_u32:
+; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <8 x i16> @test_vmlsl_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vmlsl_s8:
+; CHECK: smlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %b, <8 x i8> %c)
+ %sub.i = sub <8 x i16> %a, %vmull.i.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vmlsl_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK-LABEL: test_vmlsl_s16:
+; CHECK: smlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %sub.i = sub <4 x i32> %a, %vmull2.i.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vmlsl_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: test_vmlsl_s32:
+; CHECK: smlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %sub.i = sub <2 x i64> %a, %vmull2.i.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vmlsl_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vmlsl_u8:
+; CHECK: umlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %b, <8 x i8> %c)
+ %sub.i = sub <8 x i16> %a, %vmull.i.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vmlsl_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK-LABEL: test_vmlsl_u16:
+; CHECK: umlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %sub.i = sub <4 x i32> %a, %vmull2.i.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vmlsl_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: test_vmlsl_u32:
+; CHECK: umlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %sub.i = sub <2 x i64> %a, %vmull2.i.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vmlsl_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vmlsl_high_s8:
+; CHECK: smlsl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %sub.i.i = sub <8 x i16> %a, %vmull.i.i.i
+ ret <8 x i16> %sub.i.i
+}
+
+define <4 x i32> @test_vmlsl_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: test_vmlsl_high_s16:
+; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+ ret <4 x i32> %sub.i.i
+}
+
+define <2 x i64> @test_vmlsl_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: test_vmlsl_high_s32:
+; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+ ret <2 x i64> %sub.i.i
+}
+
+define <8 x i16> @test_vmlsl_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vmlsl_high_u8:
+; CHECK: umlsl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %sub.i.i = sub <8 x i16> %a, %vmull.i.i.i
+ ret <8 x i16> %sub.i.i
+}
+
+define <4 x i32> @test_vmlsl_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: test_vmlsl_high_u16:
+; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+ ret <4 x i32> %sub.i.i
+}
+
+define <2 x i64> @test_vmlsl_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: test_vmlsl_high_u32:
+; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+ ret <2 x i64> %sub.i.i
+}
+
+define <4 x i32> @test_vqdmull_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vqdmull_s16:
+; CHECK: sqdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vqdmull2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %b)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vqdmull_s32:
+; CHECK: sqdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vqdmull2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %b)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmlal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK-LABEL: test_vqdmlal_s16:
+; CHECK: sqdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vqdmlal2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: test_vqdmlal_s32:
+; CHECK: sqdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vqdmlal2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlsl_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK-LABEL: test_vqdmlsl_s16:
+; CHECK: sqdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK-LABEL: test_vqdmlsl_s32:
+; CHECK: sqdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmull_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqdmull_high_s16:
+; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vqdmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ ret <4 x i32> %vqdmull2.i.i
+}
+
+define <2 x i64> @test_vqdmull_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqdmull_high_s32:
+; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vqdmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ ret <2 x i64> %vqdmull2.i.i
+}
+
+define <4 x i32> @test_vqdmlal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: test_vqdmlal_high_s16:
+; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vqdmlal2.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vqdmlal4.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i.i)
+ ret <4 x i32> %vqdmlal4.i.i
+}
+
+define <2 x i64> @test_vqdmlal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: test_vqdmlal_high_s32:
+; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vqdmlal2.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vqdmlal4.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i.i)
+ ret <2 x i64> %vqdmlal4.i.i
+}
+
+define <4 x i32> @test_vqdmlsl_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: test_vqdmlsl_high_s16:
+; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vqdmlsl2.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vqdmlsl4.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i.i)
+ ret <4 x i32> %vqdmlsl4.i.i
+}
+
+define <2 x i64> @test_vqdmlsl_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: test_vqdmlsl_high_s32:
+; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vqdmlsl2.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vqdmlsl4.i.i = tail call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i.i)
+ ret <2 x i64> %vqdmlsl4.i.i
+}
+
+define <8 x i16> @test_vmull_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vmull_p8:
+; CHECK: pmull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i = tail call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %a, <8 x i8> %b)
+ ret <8 x i16> %vmull.i
+}
+
+define <8 x i16> @test_vmull_high_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vmull_high_p8:
+; CHECK: pmull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ ret <8 x i16> %vmull.i.i
+}
+
+define i128 @test_vmull_p64(i64 %a, i64 %b) #4 {
+; CHECK-LABEL: test_vmull_p64
+; CHECK: pmull {{v[0-9]+}}.1q, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d
+entry:
+ %vmull2.i = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %a, i64 %b)
+ %vmull3.i = bitcast <16 x i8> %vmull2.i to i128
+ ret i128 %vmull3.i
+}
+
+define i128 @test_vmull_high_p64(<2 x i64> %a, <2 x i64> %b) #4 {
+; CHECK-LABEL: test_vmull_high_p64
+; CHECK: pmull2 {{v[0-9]+}}.1q, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %0 = extractelement <2 x i64> %a, i32 1
+ %1 = extractelement <2 x i64> %b, i32 1
+ %vmull2.i.i = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %0, i64 %1) #1
+ %vmull3.i.i = bitcast <16 x i8> %vmull2.i.i to i128
+ ret i128 %vmull3.i.i
+}
+
+declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64) #5
+
+
diff --git a/test/CodeGen/AArch64/arm64-neon-aba-abd.ll b/test/CodeGen/AArch64/arm64-neon-aba-abd.ll
new file mode 100644
index 000000000000..6404ab728011
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-aba-abd.ll
@@ -0,0 +1,236 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uabd_v8i8:
+ %abd = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uabd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %abd
+}
+
+define <8 x i8> @test_uaba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uaba_v8i8:
+ %abd = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+ %aba = add <8 x i8> %lhs, %abd
+; CHECK: uaba v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %aba
+}
+
+define <8 x i8> @test_sabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_sabd_v8i8:
+ %abd = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: sabd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %abd
+}
+
+define <8 x i8> @test_saba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_saba_v8i8:
+ %abd = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+ %aba = add <8 x i8> %lhs, %abd
+; CHECK: saba v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %aba
+}
+
+declare <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_uabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uabd_v16i8:
+ %abd = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uabd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %abd
+}
+
+define <16 x i8> @test_uaba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uaba_v16i8:
+ %abd = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+ %aba = add <16 x i8> %lhs, %abd
+; CHECK: uaba v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %aba
+}
+
+define <16 x i8> @test_sabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_sabd_v16i8:
+ %abd = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: sabd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %abd
+}
+
+define <16 x i8> @test_saba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_saba_v16i8:
+ %abd = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+ %aba = add <16 x i8> %lhs, %abd
+; CHECK: saba v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %aba
+}
+
+declare <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_uabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uabd_v4i16:
+ %abd = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uabd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %abd
+}
+
+define <4 x i16> @test_uaba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uaba_v4i16:
+ %abd = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %aba = add <4 x i16> %lhs, %abd
+; CHECK: uaba v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %aba
+}
+
+define <4 x i16> @test_sabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sabd_v4i16:
+ %abd = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sabd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %abd
+}
+
+define <4 x i16> @test_saba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_saba_v4i16:
+ %abd = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %aba = add <4 x i16> %lhs, %abd
+; CHECK: saba v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %aba
+}
+
+declare <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_uabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uabd_v8i16:
+ %abd = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uabd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %abd
+}
+
+define <8 x i16> @test_uaba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uaba_v8i16:
+ %abd = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %aba = add <8 x i16> %lhs, %abd
+; CHECK: uaba v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %aba
+}
+
+define <8 x i16> @test_sabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sabd_v8i16:
+ %abd = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sabd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %abd
+}
+
+define <8 x i16> @test_saba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_saba_v8i16:
+ %abd = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %aba = add <8 x i16> %lhs, %abd
+; CHECK: saba v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %aba
+}
+
+declare <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_uabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uabd_v2i32:
+ %abd = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uabd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %abd
+}
+
+define <2 x i32> @test_uaba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uaba_v2i32:
+ %abd = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ %aba = add <2 x i32> %lhs, %abd
+; CHECK: uaba v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %aba
+}
+
+define <2 x i32> @test_sabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sabd_v2i32:
+ %abd = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sabd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %abd
+}
+
+define <2 x i32> @test_sabd_v2i32_const() {
+; CHECK: test_sabd_v2i32_const:
+; CHECK: movi d1, #0x00ffffffff0000
+; CHECK-NEXT: sabd v0.2s, v0.2s, v1.2s
+ %1 = tail call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(
+ <2 x i32> <i32 -2147483648, i32 2147450880>,
+ <2 x i32> <i32 -65536, i32 65535>)
+ ret <2 x i32> %1
+}
+
+define <2 x i32> @test_saba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_saba_v2i32:
+ %abd = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ %aba = add <2 x i32> %lhs, %abd
+; CHECK: saba v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %aba
+}
+
+declare <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_uabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uabd_v4i32:
+ %abd = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uabd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %abd
+}
+
+define <4 x i32> @test_uaba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uaba_v4i32:
+ %abd = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ %aba = add <4 x i32> %lhs, %abd
+; CHECK: uaba v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %aba
+}
+
+define <4 x i32> @test_sabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sabd_v4i32:
+ %abd = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sabd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %abd
+}
+
+define <4 x i32> @test_saba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_saba_v4i32:
+ %abd = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ %aba = add <4 x i32> %lhs, %abd
+; CHECK: saba v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %aba
+}
+
+declare <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float>, <2 x float>)
+
+define <2 x float> @test_fabd_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fabd_v2f32:
+ %abd = call <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fabd v0.2s, v0.2s, v1.2s
+ ret <2 x float> %abd
+}
+
+declare <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float>, <4 x float>)
+
+define <4 x float> @test_fabd_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fabd_v4f32:
+ %abd = call <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fabd v0.4s, v0.4s, v1.4s
+ ret <4 x float> %abd
+}
+
+declare <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double>, <2 x double>)
+
+define <2 x double> @test_fabd_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fabd_v2f64:
+ %abd = call <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fabd v0.2d, v0.2d, v1.2d
+ ret <2 x double> %abd
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-across.ll b/test/CodeGen/AArch64/arm64-neon-across.ll
new file mode 100644
index 000000000000..3a63673f1209
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-across.ll
@@ -0,0 +1,460 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float>)
+
+declare float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float>)
+
+declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>)
+
+declare float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>)
+
+declare i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>)
+
+declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>)
+
+declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>)
+
+declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>)
+
+declare i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>)
+
+declare i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>)
+
+declare i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>)
+
+declare i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8>)
+
+declare i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>)
+
+declare i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8>)
+
+declare i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>)
+
+declare i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8>)
+
+define i16 @test_vaddlv_s8(<8 x i8> %a) {
+; CHECK: test_vaddlv_s8:
+; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a)
+ %0 = trunc i32 %saddlvv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddlv_s16(<4 x i16> %a) {
+; CHECK: test_vaddlv_s16:
+; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a)
+ ret i32 %saddlvv.i
+}
+
+define i16 @test_vaddlv_u8(<8 x i8> %a) {
+; CHECK: test_vaddlv_u8:
+; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a)
+ %0 = trunc i32 %uaddlvv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddlv_u16(<4 x i16> %a) {
+; CHECK: test_vaddlv_u16:
+; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a)
+ ret i32 %uaddlvv.i
+}
+
+define i16 @test_vaddlvq_s8(<16 x i8> %a) {
+; CHECK: test_vaddlvq_s8:
+; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a)
+ %0 = trunc i32 %saddlvv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddlvq_s16(<8 x i16> %a) {
+; CHECK: test_vaddlvq_s16:
+; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a)
+ ret i32 %saddlvv.i
+}
+
+define i64 @test_vaddlvq_s32(<4 x i32> %a) {
+; CHECK: test_vaddlvq_s32:
+; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %saddlvv.i = tail call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %a)
+ ret i64 %saddlvv.i
+}
+
+define i16 @test_vaddlvq_u8(<16 x i8> %a) {
+; CHECK: test_vaddlvq_u8:
+; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a)
+ %0 = trunc i32 %uaddlvv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddlvq_u16(<8 x i16> %a) {
+; CHECK: test_vaddlvq_u16:
+; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a)
+ ret i32 %uaddlvv.i
+}
+
+define i64 @test_vaddlvq_u32(<4 x i32> %a) {
+; CHECK: test_vaddlvq_u32:
+; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %uaddlvv.i = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a)
+ ret i64 %uaddlvv.i
+}
+
+define i8 @test_vmaxv_s8(<8 x i8> %a) {
+; CHECK: test_vmaxv_s8:
+; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a)
+ %0 = trunc i32 %smaxv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vmaxv_s16(<4 x i16> %a) {
+; CHECK: test_vmaxv_s16:
+; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a)
+ %0 = trunc i32 %smaxv.i to i16
+ ret i16 %0
+}
+
+define i8 @test_vmaxv_u8(<8 x i8> %a) {
+; CHECK: test_vmaxv_u8:
+; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a)
+ %0 = trunc i32 %umaxv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vmaxv_u16(<4 x i16> %a) {
+; CHECK: test_vmaxv_u16:
+; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a)
+ %0 = trunc i32 %umaxv.i to i16
+ ret i16 %0
+}
+
+define i8 @test_vmaxvq_s8(<16 x i8> %a) {
+; CHECK: test_vmaxvq_s8:
+; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a)
+ %0 = trunc i32 %smaxv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vmaxvq_s16(<8 x i16> %a) {
+; CHECK: test_vmaxvq_s16:
+; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a)
+ %0 = trunc i32 %smaxv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_s32(<4 x i32> %a) {
+; CHECK: test_vmaxvq_s32:
+; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a)
+ ret i32 %smaxv.i
+}
+
+define i8 @test_vmaxvq_u8(<16 x i8> %a) {
+; CHECK: test_vmaxvq_u8:
+; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a)
+ %0 = trunc i32 %umaxv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vmaxvq_u16(<8 x i16> %a) {
+; CHECK: test_vmaxvq_u16:
+; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a)
+ %0 = trunc i32 %umaxv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_u32(<4 x i32> %a) {
+; CHECK: test_vmaxvq_u32:
+; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a)
+ ret i32 %umaxv.i
+}
+
+define i8 @test_vminv_s8(<8 x i8> %a) {
+; CHECK: test_vminv_s8:
+; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a)
+ %0 = trunc i32 %sminv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vminv_s16(<4 x i16> %a) {
+; CHECK: test_vminv_s16:
+; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a)
+ %0 = trunc i32 %sminv.i to i16
+ ret i16 %0
+}
+
+define i8 @test_vminv_u8(<8 x i8> %a) {
+; CHECK: test_vminv_u8:
+; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a)
+ %0 = trunc i32 %uminv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vminv_u16(<4 x i16> %a) {
+; CHECK: test_vminv_u16:
+; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a)
+ %0 = trunc i32 %uminv.i to i16
+ ret i16 %0
+}
+
+define i8 @test_vminvq_s8(<16 x i8> %a) {
+; CHECK: test_vminvq_s8:
+; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a)
+ %0 = trunc i32 %sminv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vminvq_s16(<8 x i16> %a) {
+; CHECK: test_vminvq_s16:
+; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a)
+ %0 = trunc i32 %sminv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vminvq_s32(<4 x i32> %a) {
+; CHECK: test_vminvq_s32:
+; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a)
+ ret i32 %sminv.i
+}
+
+define i8 @test_vminvq_u8(<16 x i8> %a) {
+; CHECK: test_vminvq_u8:
+; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a)
+ %0 = trunc i32 %uminv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vminvq_u16(<8 x i16> %a) {
+; CHECK: test_vminvq_u16:
+; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a)
+ %0 = trunc i32 %uminv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vminvq_u32(<4 x i32> %a) {
+; CHECK: test_vminvq_u32:
+; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a)
+ ret i32 %uminv.i
+}
+
+define i8 @test_vaddv_s8(<8 x i8> %a) {
+; CHECK: test_vaddv_s8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a)
+ %0 = trunc i32 %vaddv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vaddv_s16(<4 x i16> %a) {
+; CHECK: test_vaddv_s16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a)
+ %0 = trunc i32 %vaddv.i to i16
+ ret i16 %0
+}
+
+define i8 @test_vaddv_u8(<8 x i8> %a) {
+; CHECK: test_vaddv_u8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a)
+ %0 = trunc i32 %vaddv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vaddv_u16(<4 x i16> %a) {
+; CHECK: test_vaddv_u16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a)
+ %0 = trunc i32 %vaddv.i to i16
+ ret i16 %0
+}
+
+define i8 @test_vaddvq_s8(<16 x i8> %a) {
+; CHECK: test_vaddvq_s8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a)
+ %0 = trunc i32 %vaddv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vaddvq_s16(<8 x i16> %a) {
+; CHECK: test_vaddvq_s16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a)
+ %0 = trunc i32 %vaddv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_s32(<4 x i32> %a) {
+; CHECK: test_vaddvq_s32:
+; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a)
+ ret i32 %vaddv.i
+}
+
+define i8 @test_vaddvq_u8(<16 x i8> %a) {
+; CHECK: test_vaddvq_u8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a)
+ %0 = trunc i32 %vaddv.i to i8
+ ret i8 %0
+}
+
+define i16 @test_vaddvq_u16(<8 x i16> %a) {
+; CHECK: test_vaddvq_u16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a)
+ %0 = trunc i32 %vaddv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_u32(<4 x i32> %a) {
+; CHECK: test_vaddvq_u32:
+; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a)
+ ret i32 %vaddv.i
+}
+
+define float @test_vmaxvq_f32(<4 x float> %a) {
+; CHECK: test_vmaxvq_f32:
+; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %0 = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %a)
+ ret float %0
+}
+
+define float @test_vminvq_f32(<4 x float> %a) {
+; CHECK: test_vminvq_f32:
+; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %0 = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %a)
+ ret float %0
+}
+
+define float @test_vmaxnmvq_f32(<4 x float> %a) {
+; CHECK: test_vmaxnmvq_f32:
+; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %0 = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %a)
+ ret float %0
+}
+
+define float @test_vminnmvq_f32(<4 x float> %a) {
+; CHECK: test_vminnmvq_f32:
+; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %0 = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %a)
+ ret float %0
+}
+
diff --git a/test/CodeGen/AArch64/arm64-neon-add-pairwise.ll b/test/CodeGen/AArch64/arm64-neon-add-pairwise.ll
new file mode 100644
index 000000000000..d3dc1b8d010f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-add-pairwise.ll
@@ -0,0 +1,100 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_addp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: test_addp_v8i8:
+ %tmp1 = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: addp v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_addp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_addp_v16i8:
+ %tmp1 = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: addp v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_addp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_addp_v4i16:
+ %tmp1 = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: addp v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_addp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_addp_v8i16:
+ %tmp1 = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: addp v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_addp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_addp_v2i32:
+ %tmp1 = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: addp v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_addp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_addp_v4i32:
+ %tmp1 = call <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: addp v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+
+declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_addp_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_addp_v2i64:
+ %val = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: addp v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %val
+}
+
+declare <2 x float> @llvm.aarch64.neon.addp.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.aarch64.neon.addp.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.aarch64.neon.addp.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_faddp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_faddp_v2f32:
+ %val = call <2 x float> @llvm.aarch64.neon.addp.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: faddp v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_faddp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_faddp_v4f32:
+ %val = call <4 x float> @llvm.aarch64.neon.addp.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: faddp v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_faddp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_faddp_v2f64:
+ %val = call <2 x double> @llvm.aarch64.neon.addp.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: faddp v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
+define i32 @test_vaddv.v2i32(<2 x i32> %a) {
+; CHECK-LABEL: test_vaddv.v2i32
+; CHECK: addp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %1 = tail call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a)
+ ret i32 %1
+}
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32>)
diff --git a/test/CodeGen/AArch64/arm64-neon-add-sub.ll b/test/CodeGen/AArch64/arm64-neon-add-sub.ll
new file mode 100644
index 000000000000..fbde606538ca
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-add-sub.ll
@@ -0,0 +1,237 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -aarch64-simd-scalar| FileCheck %s
+
+define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = add <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: add {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = add <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: add {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = add <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: add {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = add <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: add {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = add <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: add {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = add <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = add <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = fadd <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fadd {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = fadd <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = fadd <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: sub {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = sub <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: sub {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = sub <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: sub {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = sub <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: sub {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = sub <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: sub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = sub <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: sub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = sub <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = sub <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fsub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = fsub <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fsub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = fsub <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = fsub <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vadd_f64
+; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fadd <1 x double> %a, %b
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vmul_f64
+; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fmul <1 x double> %a, %b
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vdiv_f64
+; CHECK: fdiv d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fdiv <1 x double> %a, %b
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
+; CHECK-LABEL: test_vmla_f64
+; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fmul <1 x double> %b, %c
+ %2 = fadd <1 x double> %1, %a
+ ret <1 x double> %2
+}
+
+define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
+; CHECK-LABEL: test_vmls_f64
+; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fmul <1 x double> %b, %c
+ %2 = fsub <1 x double> %a, %1
+ ret <1 x double> %2
+}
+
+define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
+; CHECK-LABEL: test_vfms_f64
+; CHECK: fmsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fsub <1 x double> <double -0.000000e+00>, %b
+ %2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a)
+ ret <1 x double> %2
+}
+
+define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
+; CHECK-LABEL: test_vfma_f64
+; CHECK: fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vsub_f64
+; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fsub <1 x double> %a, %b
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vabd_f64
+; CHECK: fabd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vmax_f64
+; CHECK: fmax d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vmin_f64
+; CHECK: fmin d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vmaxnm_f64
+; CHECK: fmaxnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vminnm_f64
+; CHECK: fminnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vabs_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vabs_f64
+; CHECK: fabs d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vneg_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vneg_f64
+; CHECK: fneg d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fsub <1 x double> <double -0.000000e+00>, %a
+ ret <1 x double> %1
+}
+
+declare <1 x double> @llvm.fabs.v1f64(<1 x double>)
+declare <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>)
diff --git a/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll b/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll
new file mode 100644
index 000000000000..cba81ef99b94
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll
@@ -0,0 +1,1191 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu < %s | FileCheck %s
+
+define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp eq <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp eq <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp eq <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp eq <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp eq <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp eq <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp eq <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp sgt <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp sgt <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp sgt <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp sgt <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp sgt <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp sgt <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp sgt <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp slt <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp slt <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp slt <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp slt <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp slt <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp slt <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp slt <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp sge <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp sge <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp sge <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp sge <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp sge <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp sge <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp sge <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp sle <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp sle <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp sle <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp sle <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp sle <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp sle <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp sle <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ugt <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ugt <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp ugt <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp ugt <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp ugt <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp ugt <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp ugt <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp ult <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp ult <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp ult <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp ult <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp ult <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp ult <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp ult <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp uge <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp uge <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp uge <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp uge <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp uge <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp uge <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp uge <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp ule <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp ule <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp ule <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp ule <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp ule <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp ule <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp ule <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
+;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+ %tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
+;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
+ %tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
+;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
+ %tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
+;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
+ %tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
+;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
+ %tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
+;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
+ %tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
+;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
+ %tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
+;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+ %tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
+;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
+ %tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
+;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
+ %tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
+;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
+ %tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
+;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
+ %tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
+;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
+ %tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
+;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
+ %tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
+;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+ %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
+;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
+ %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
+;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
+ %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
+;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
+ %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
+;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
+ %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
+;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
+ %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
+;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
+ %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
+;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+ %tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
+;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
+ %tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
+;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
+ %tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
+;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
+ %tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
+;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
+ %tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
+;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
+ %tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
+;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
+ %tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
+;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+ %tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
+;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
+ %tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
+;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
+ %tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
+;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
+ %tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
+;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
+ %tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
+;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
+ %tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
+;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
+ %tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
+;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
+;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
+;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
+;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
+;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
+;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
+;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
+;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
+;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
+ %tmp3 = icmp uge <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
+ %tmp3 = icmp uge <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
+ %tmp3 = icmp uge <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
+ %tmp3 = icmp uge <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
+ %tmp3 = icmp uge <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
+ %tmp3 = icmp uge <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
+ %tmp3 = icmp uge <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
+ %tmp3 = icmp ugt <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
+ %tmp3 = icmp ugt <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
+ %tmp3 = icmp ugt <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
+ %tmp3 = icmp ugt <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
+ %tmp3 = icmp ugt <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
+ %tmp3 = icmp ugt <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
+ %tmp3 = icmp ugt <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v[[ZERO]].8b, v0.8b
+ %tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
+ %tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
+ %tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
+ %tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
+ %tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
+ %tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
+ %tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v[[ZERO]].8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ult <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
+ %tmp3 = icmp ult <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
+ %tmp3 = icmp ult <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
+ %tmp3 = icmp ult <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
+ %tmp3 = icmp ult <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
+ %tmp3 = icmp ult <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
+ %tmp3 = icmp ult <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <1 x i64> @cmeqz_v1i64(<1 x i64> %A) {
+; CHECK-LABEL: cmeqz_v1i64:
+; CHECK: cmeq d0, d0, #0
+ %tst = icmp eq <1 x i64> %A, <i64 0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @cmgez_v1i64(<1 x i64> %A) {
+; CHECK-LABEL: cmgez_v1i64:
+; CHECK: cmge d0, d0, #0
+ %tst = icmp sge <1 x i64> %A, <i64 0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @cmgtz_v1i64(<1 x i64> %A) {
+; CHECK-LABEL: cmgtz_v1i64:
+; CHECK: cmgt d0, d0, #0
+ %tst = icmp sgt <1 x i64> %A, <i64 0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @cmlez_v1i64(<1 x i64> %A) {
+; CHECK-LABEL: cmlez_v1i64:
+; CHECK: cmle d0, d0, #0
+ %tst = icmp sle <1 x i64> %A, <i64 0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @cmltz_v1i64(<1 x i64> %A) {
+; CHECK-LABEL: cmltz_v1i64:
+; CHECK: cmlt d0, d0, #0
+ %tst = icmp slt <1 x i64> %A, <i64 0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmeqz_v1f64(<1 x double> %A) {
+; CHECK-LABEL: fcmeqz_v1f64:
+; CHECK: fcmeq d0, d0, #0
+ %tst = fcmp oeq <1 x double> %A, <double 0.0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmgez_v1f64(<1 x double> %A) {
+; CHECK-LABEL: fcmgez_v1f64:
+; CHECK: fcmge d0, d0, #0
+ %tst = fcmp oge <1 x double> %A, <double 0.0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmgtz_v1f64(<1 x double> %A) {
+; CHECK-LABEL: fcmgtz_v1f64:
+; CHECK: fcmgt d0, d0, #0
+ %tst = fcmp ogt <1 x double> %A, <double 0.0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmlez_v1f64(<1 x double> %A) {
+; CHECK-LABEL: fcmlez_v1f64:
+; CHECK: fcmle d0, d0, #0
+ %tst = fcmp ole <1 x double> %A, <double 0.0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmltz_v1f64(<1 x double> %A) {
+; CHECK-LABEL: fcmltz_v1f64:
+; CHECK: fcmlt d0, d0, #0
+ %tst = fcmp olt <1 x double> %A, <double 0.0>
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-copy.ll b/test/CodeGen/AArch64/arm64-neon-copy.ll
new file mode 100644
index 000000000000..1cfba826d510
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -0,0 +1,1445 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+
+define <16 x i8> @ins16bw(<16 x i8> %tmp1, i8 %tmp2) {
+; CHECK-LABEL: ins16bw:
+; CHECK: ins {{v[0-9]+}}.b[15], {{w[0-9]+}}
+ %tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 15
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @ins8hw(<8 x i16> %tmp1, i16 %tmp2) {
+; CHECK-LABEL: ins8hw:
+; CHECK: ins {{v[0-9]+}}.h[6], {{w[0-9]+}}
+ %tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 6
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @ins4sw(<4 x i32> %tmp1, i32 %tmp2) {
+; CHECK-LABEL: ins4sw:
+; CHECK: ins {{v[0-9]+}}.s[2], {{w[0-9]+}}
+ %tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 2
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @ins2dw(<2 x i64> %tmp1, i64 %tmp2) {
+; CHECK-LABEL: ins2dw:
+; CHECK: ins {{v[0-9]+}}.d[1], {{x[0-9]+}}
+ %tmp3 = insertelement <2 x i64> %tmp1, i64 %tmp2, i32 1
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i8> @ins8bw(<8 x i8> %tmp1, i8 %tmp2) {
+; CHECK-LABEL: ins8bw:
+; CHECK: ins {{v[0-9]+}}.b[5], {{w[0-9]+}}
+ %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 5
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @ins4hw(<4 x i16> %tmp1, i16 %tmp2) {
+; CHECK-LABEL: ins4hw:
+; CHECK: ins {{v[0-9]+}}.h[3], {{w[0-9]+}}
+ %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 3
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) {
+; CHECK-LABEL: ins2sw:
+; CHECK: ins {{v[0-9]+}}.s[1], {{w[0-9]+}}
+ %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) {
+; CHECK-LABEL: ins16b16:
+; CHECK: ins {{v[0-9]+}}.b[15], {{v[0-9]+}}.b[2]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 2
+ %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @ins8h8(<8 x i16> %tmp1, <8 x i16> %tmp2) {
+; CHECK-LABEL: ins8h8:
+; CHECK: ins {{v[0-9]+}}.h[7], {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @ins4s4(<4 x i32> %tmp1, <4 x i32> %tmp2) {
+; CHECK-LABEL: ins4s4:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @ins2d2(<2 x i64> %tmp1, <2 x i64> %tmp2) {
+; CHECK-LABEL: ins2d2:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+ %tmp3 = extractelement <2 x i64> %tmp1, i32 0
+ %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
+ ret <2 x i64> %tmp4
+}
+
+define <4 x float> @ins4f4(<4 x float> %tmp1, <4 x float> %tmp2) {
+; CHECK-LABEL: ins4f4:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+ %tmp3 = extractelement <4 x float> %tmp1, i32 2
+ %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
+ ret <4 x float> %tmp4
+}
+
+define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) {
+; CHECK-LABEL: ins2df2:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+ %tmp3 = extractelement <2 x double> %tmp1, i32 0
+ %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
+ ret <2 x double> %tmp4
+}
+
+define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) {
+; CHECK-LABEL: ins8b16:
+; CHECK: ins {{v[0-9]+}}.b[15], {{v[0-9]+}}.b[2]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 2
+ %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @ins4h8(<4 x i16> %tmp1, <8 x i16> %tmp2) {
+; CHECK-LABEL: ins4h8:
+; CHECK: ins {{v[0-9]+}}.h[7], {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @ins2s4(<2 x i32> %tmp1, <4 x i32> %tmp2) {
+; CHECK-LABEL: ins2s4:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[1]
+ %tmp3 = extractelement <2 x i32> %tmp1, i32 1
+ %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @ins1d2(<1 x i64> %tmp1, <2 x i64> %tmp2) {
+; CHECK-LABEL: ins1d2:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+ %tmp3 = extractelement <1 x i64> %tmp1, i32 0
+ %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
+ ret <2 x i64> %tmp4
+}
+
+define <4 x float> @ins2f4(<2 x float> %tmp1, <4 x float> %tmp2) {
+; CHECK-LABEL: ins2f4:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[1]
+ %tmp3 = extractelement <2 x float> %tmp1, i32 1
+ %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
+ ret <4 x float> %tmp4
+}
+
+define <2 x double> @ins1f2(<1 x double> %tmp1, <2 x double> %tmp2) {
+; CHECK-LABEL: ins1f2:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+ %tmp3 = extractelement <1 x double> %tmp1, i32 0
+ %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
+ ret <2 x double> %tmp4
+}
+
+define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) {
+; CHECK-LABEL: ins16b8:
+; CHECK: ins {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[2]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 2
+ %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 7
+ ret <8 x i8> %tmp4
+}
+
+define <4 x i16> @ins8h4(<8 x i16> %tmp1, <4 x i16> %tmp2) {
+; CHECK-LABEL: ins8h4:
+; CHECK: ins {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
+ ret <4 x i16> %tmp4
+}
+
+define <2 x i32> @ins4s2(<4 x i32> %tmp1, <2 x i32> %tmp2) {
+; CHECK-LABEL: ins4s2:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
+ ret <2 x i32> %tmp4
+}
+
+define <1 x i64> @ins2d1(<2 x i64> %tmp1, <1 x i64> %tmp2) {
+; CHECK-LABEL: ins2d1:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[0]
+ %tmp3 = extractelement <2 x i64> %tmp1, i32 0
+ %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
+ ret <1 x i64> %tmp4
+}
+
+define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) {
+; CHECK-LABEL: ins4f2:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+ %tmp3 = extractelement <4 x float> %tmp1, i32 2
+ %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
+ ret <2 x float> %tmp4
+}
+
+define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
+; CHECK-LABEL: ins2f1:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+ %tmp3 = extractelement <2 x double> %tmp1, i32 1
+ %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
+ ret <1 x double> %tmp4
+}
+
+define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) {
+; CHECK-LABEL: ins8b8:
+; CHECK: ins {{v[0-9]+}}.b[4], {{v[0-9]+}}.b[2]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 2
+ %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 4
+ ret <8 x i8> %tmp4
+}
+
+define <4 x i16> @ins4h4(<4 x i16> %tmp1, <4 x i16> %tmp2) {
+; CHECK-LABEL: ins4h4:
+; CHECK: ins {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
+ ret <4 x i16> %tmp4
+}
+
+define <2 x i32> @ins2s2(<2 x i32> %tmp1, <2 x i32> %tmp2) {
+; CHECK-LABEL: ins2s2:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+ %tmp3 = extractelement <2 x i32> %tmp1, i32 0
+ %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
+ ret <2 x i32> %tmp4
+}
+
+define <1 x i64> @ins1d1(<1 x i64> %tmp1, <1 x i64> %tmp2) {
+; CHECK-LABEL: ins1d1:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[0]
+ %tmp3 = extractelement <1 x i64> %tmp1, i32 0
+ %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
+ ret <1 x i64> %tmp4
+}
+
+define <2 x float> @ins2f2(<2 x float> %tmp1, <2 x float> %tmp2) {
+; CHECK-LABEL: ins2f2:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+ %tmp3 = extractelement <2 x float> %tmp1, i32 0
+ %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
+ ret <2 x float> %tmp4
+}
+
+define <1 x double> @ins1df1(<1 x double> %tmp1, <1 x double> %tmp2) {
+; CHECK-LABEL: ins1df1:
+; CHECK-NOT: ins {{v[0-9]+}}
+ %tmp3 = extractelement <1 x double> %tmp1, i32 0
+ %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
+ ret <1 x double> %tmp4
+}
+
+define i32 @umovw16b(<16 x i8> %tmp1) {
+; CHECK-LABEL: umovw16b:
+; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[8]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 8
+ %tmp4 = zext i8 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @umovw8h(<8 x i16> %tmp1) {
+; CHECK-LABEL: umovw8h:
+; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = zext i16 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @umovw4s(<4 x i32> %tmp1) {
+; CHECK-LABEL: umovw4s:
+; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.s[2]
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ ret i32 %tmp3
+}
+
+define i64 @umovx2d(<2 x i64> %tmp1) {
+; CHECK-LABEL: umovx2d:
+; CHECK: mov {{x[0-9]+}}, {{v[0-9]+}}.d[1]
+ %tmp3 = extractelement <2 x i64> %tmp1, i32 1
+ ret i64 %tmp3
+}
+
+define i32 @umovw8b(<8 x i8> %tmp1) {
+; CHECK-LABEL: umovw8b:
+; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 7
+ %tmp4 = zext i8 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @umovw4h(<4 x i16> %tmp1) {
+; CHECK-LABEL: umovw4h:
+; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = zext i16 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @umovw2s(<2 x i32> %tmp1) {
+; CHECK-LABEL: umovw2s:
+; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.s[1]
+ %tmp3 = extractelement <2 x i32> %tmp1, i32 1
+ ret i32 %tmp3
+}
+
+define i64 @umovx1d(<1 x i64> %tmp1) {
+; CHECK-LABEL: umovx1d:
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ %tmp3 = extractelement <1 x i64> %tmp1, i32 0
+ ret i64 %tmp3
+}
+
+define i32 @smovw16b(<16 x i8> %tmp1) {
+; CHECK-LABEL: smovw16b:
+; CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.b[8]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 8
+ %tmp4 = sext i8 %tmp3 to i32
+ %tmp5 = add i32 %tmp4, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovw8h(<8 x i16> %tmp1) {
+; CHECK-LABEL: smovw8h:
+; CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = sext i16 %tmp3 to i32
+ %tmp5 = add i32 %tmp4, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovx16b(<16 x i8> %tmp1) {
+; CHECK-LABEL: smovx16b:
+; CHECK: smov {{[xw][0-9]+}}, {{v[0-9]+}}.b[8]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 8
+ %tmp4 = sext i8 %tmp3 to i32
+ %tmp5 = add i32 %tmp4, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovx8h(<8 x i16> %tmp1) {
+; CHECK-LABEL: smovx8h:
+; CHECK: smov {{[xw][0-9]+}}, {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = sext i16 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i64 @smovx4s(<4 x i32> %tmp1) {
+; CHECK-LABEL: smovx4s:
+; CHECK: smov {{x[0-9]+}}, {{v[0-9]+}}.s[2]
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ %tmp4 = sext i32 %tmp3 to i64
+ ret i64 %tmp4
+}
+
+define i32 @smovw8b(<8 x i8> %tmp1) {
+; CHECK-LABEL: smovw8b:
+; CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.b[4]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 4
+ %tmp4 = sext i8 %tmp3 to i32
+ %tmp5 = add i32 %tmp4, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovw4h(<4 x i16> %tmp1) {
+; CHECK-LABEL: smovw4h:
+; CHECK: smov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = sext i16 %tmp3 to i32
+ %tmp5 = add i32 %tmp4, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovx8b(<8 x i8> %tmp1) {
+; CHECK-LABEL: smovx8b:
+; CHECK: smov {{[xw][0-9]+}}, {{v[0-9]+}}.b[6]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 6
+ %tmp4 = sext i8 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @smovx4h(<4 x i16> %tmp1) {
+; CHECK-LABEL: smovx4h:
+; CHECK: smov {{[xw][0-9]+}}, {{v[0-9]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = sext i16 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i64 @smovx2s(<2 x i32> %tmp1) {
+; CHECK-LABEL: smovx2s:
+; CHECK: smov {{x[0-9]+}}, {{v[0-9]+}}.s[1]
+ %tmp3 = extractelement <2 x i32> %tmp1, i32 1
+ %tmp4 = sext i32 %tmp3 to i64
+ ret i64 %tmp4
+}
+
+define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) {
+; CHECK-LABEL: test_vcopy_lane_s8:
+; CHECK: ins {{v[0-9]+}}.b[5], {{v[0-9]+}}.b[3]
+ %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 11, i32 6, i32 7>
+ ret <8 x i8> %vset_lane
+}
+
+define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) {
+; CHECK-LABEL: test_vcopyq_laneq_s8:
+; CHECK: ins {{v[0-9]+}}.b[14], {{v[0-9]+}}.b[6]
+ %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 22, i32 15>
+ ret <16 x i8> %vset_lane
+}
+
+define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) {
+; CHECK-LABEL: test_vcopy_lane_swap_s8:
+; CHECK: ins {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[0]
+ %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 0>
+ ret <8 x i8> %vset_lane
+}
+
+define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) {
+; CHECK-LABEL: test_vcopyq_laneq_swap_s8:
+; CHECK: ins {{v[0-9]+}}.b[0], {{v[0-9]+}}.b[15]
+ %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 15, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ ret <16 x i8> %vset_lane
+}
+
+define <8 x i8> @test_vdup_n_u8(i8 %v1) #0 {
+; CHECK-LABEL: test_vdup_n_u8:
+; CHECK: dup {{v[0-9]+}}.8b, {{w[0-9]+}}
+ %vecinit.i = insertelement <8 x i8> undef, i8 %v1, i32 0
+ %vecinit1.i = insertelement <8 x i8> %vecinit.i, i8 %v1, i32 1
+ %vecinit2.i = insertelement <8 x i8> %vecinit1.i, i8 %v1, i32 2
+ %vecinit3.i = insertelement <8 x i8> %vecinit2.i, i8 %v1, i32 3
+ %vecinit4.i = insertelement <8 x i8> %vecinit3.i, i8 %v1, i32 4
+ %vecinit5.i = insertelement <8 x i8> %vecinit4.i, i8 %v1, i32 5
+ %vecinit6.i = insertelement <8 x i8> %vecinit5.i, i8 %v1, i32 6
+ %vecinit7.i = insertelement <8 x i8> %vecinit6.i, i8 %v1, i32 7
+ ret <8 x i8> %vecinit7.i
+}
+
+define <4 x i16> @test_vdup_n_u16(i16 %v1) #0 {
+; CHECK-LABEL: test_vdup_n_u16:
+; CHECK: dup {{v[0-9]+}}.4h, {{w[0-9]+}}
+ %vecinit.i = insertelement <4 x i16> undef, i16 %v1, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %v1, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %v1, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %v1, i32 3
+ ret <4 x i16> %vecinit3.i
+}
+
+define <2 x i32> @test_vdup_n_u32(i32 %v1) #0 {
+; CHECK-LABEL: test_vdup_n_u32:
+; CHECK: dup {{v[0-9]+}}.2s, {{w[0-9]+}}
+ %vecinit.i = insertelement <2 x i32> undef, i32 %v1, i32 0
+ %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %v1, i32 1
+ ret <2 x i32> %vecinit1.i
+}
+
+define <1 x i64> @test_vdup_n_u64(i64 %v1) #0 {
+; CHECK-LABEL: test_vdup_n_u64:
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ %vecinit.i = insertelement <1 x i64> undef, i64 %v1, i32 0
+ ret <1 x i64> %vecinit.i
+}
+
+define <16 x i8> @test_vdupq_n_u8(i8 %v1) #0 {
+; CHECK-LABEL: test_vdupq_n_u8:
+; CHECK: dup {{v[0-9]+}}.16b, {{w[0-9]+}}
+ %vecinit.i = insertelement <16 x i8> undef, i8 %v1, i32 0
+ %vecinit1.i = insertelement <16 x i8> %vecinit.i, i8 %v1, i32 1
+ %vecinit2.i = insertelement <16 x i8> %vecinit1.i, i8 %v1, i32 2
+ %vecinit3.i = insertelement <16 x i8> %vecinit2.i, i8 %v1, i32 3
+ %vecinit4.i = insertelement <16 x i8> %vecinit3.i, i8 %v1, i32 4
+ %vecinit5.i = insertelement <16 x i8> %vecinit4.i, i8 %v1, i32 5
+ %vecinit6.i = insertelement <16 x i8> %vecinit5.i, i8 %v1, i32 6
+ %vecinit7.i = insertelement <16 x i8> %vecinit6.i, i8 %v1, i32 7
+ %vecinit8.i = insertelement <16 x i8> %vecinit7.i, i8 %v1, i32 8
+ %vecinit9.i = insertelement <16 x i8> %vecinit8.i, i8 %v1, i32 9
+ %vecinit10.i = insertelement <16 x i8> %vecinit9.i, i8 %v1, i32 10
+ %vecinit11.i = insertelement <16 x i8> %vecinit10.i, i8 %v1, i32 11
+ %vecinit12.i = insertelement <16 x i8> %vecinit11.i, i8 %v1, i32 12
+ %vecinit13.i = insertelement <16 x i8> %vecinit12.i, i8 %v1, i32 13
+ %vecinit14.i = insertelement <16 x i8> %vecinit13.i, i8 %v1, i32 14
+ %vecinit15.i = insertelement <16 x i8> %vecinit14.i, i8 %v1, i32 15
+ ret <16 x i8> %vecinit15.i
+}
+
+define <8 x i16> @test_vdupq_n_u16(i16 %v1) #0 {
+; CHECK-LABEL: test_vdupq_n_u16:
+; CHECK: dup {{v[0-9]+}}.8h, {{w[0-9]+}}
+ %vecinit.i = insertelement <8 x i16> undef, i16 %v1, i32 0
+ %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %v1, i32 1
+ %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %v1, i32 2
+ %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %v1, i32 3
+ %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %v1, i32 4
+ %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %v1, i32 5
+ %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %v1, i32 6
+ %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %v1, i32 7
+ ret <8 x i16> %vecinit7.i
+}
+
+define <4 x i32> @test_vdupq_n_u32(i32 %v1) #0 {
+; CHECK-LABEL: test_vdupq_n_u32:
+; CHECK: dup {{v[0-9]+}}.4s, {{w[0-9]+}}
+ %vecinit.i = insertelement <4 x i32> undef, i32 %v1, i32 0
+ %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %v1, i32 1
+ %vecinit2.i = insertelement <4 x i32> %vecinit1.i, i32 %v1, i32 2
+ %vecinit3.i = insertelement <4 x i32> %vecinit2.i, i32 %v1, i32 3
+ ret <4 x i32> %vecinit3.i
+}
+
+define <2 x i64> @test_vdupq_n_u64(i64 %v1) #0 {
+; CHECK-LABEL: test_vdupq_n_u64:
+; CHECK: dup {{v[0-9]+}}.2d, {{x[0-9]+}}
+ %vecinit.i = insertelement <2 x i64> undef, i64 %v1, i32 0
+ %vecinit1.i = insertelement <2 x i64> %vecinit.i, i64 %v1, i32 1
+ ret <2 x i64> %vecinit1.i
+}
+
+define <8 x i8> @test_vdup_lane_s8(<8 x i8> %v1) #0 {
+; CHECK-LABEL: test_vdup_lane_s8:
+; CHECK: dup {{v[0-9]+}}.8b, {{v[0-9]+}}.b[5]
+ %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <8 x i8> %shuffle
+}
+
+define <4 x i16> @test_vdup_lane_s16(<4 x i16> %v1) #0 {
+; CHECK-LABEL: test_vdup_lane_s16:
+; CHECK: dup {{v[0-9]+}}.4h, {{v[0-9]+}}.h[2]
+ %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+ ret <4 x i16> %shuffle
+}
+
+define <2 x i32> @test_vdup_lane_s32(<2 x i32> %v1) #0 {
+; CHECK-LABEL: test_vdup_lane_s32:
+; CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+ %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ ret <2 x i32> %shuffle
+}
+
+define <16 x i8> @test_vdupq_lane_s8(<8 x i8> %v1) #0 {
+; CHECK-LABEL: test_vdupq_lane_s8:
+; CHECK: {{v[0-9]+}}.16b, {{v[0-9]+}}.b[5]
+ %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <16 x i8> %shuffle
+}
+
+define <8 x i16> @test_vdupq_lane_s16(<4 x i16> %v1) #0 {
+; CHECK-LABEL: test_vdupq_lane_s16:
+; CHECK: {{v[0-9]+}}.8h, {{v[0-9]+}}.h[2]
+ %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ ret <8 x i16> %shuffle
+}
+
+define <4 x i32> @test_vdupq_lane_s32(<2 x i32> %v1) #0 {
+; CHECK-LABEL: test_vdupq_lane_s32:
+; CHECK: {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+ %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %shuffle
+}
+
+define <2 x i64> @test_vdupq_lane_s64(<1 x i64> %v1) #0 {
+; CHECK-LABEL: test_vdupq_lane_s64:
+; CHECK: {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+ %shuffle = shufflevector <1 x i64> %v1, <1 x i64> undef, <2 x i32> zeroinitializer
+ ret <2 x i64> %shuffle
+}
+
+define <8 x i8> @test_vdup_laneq_s8(<16 x i8> %v1) #0 {
+; CHECK-LABEL: test_vdup_laneq_s8:
+; CHECK: dup {{v[0-9]+}}.8b, {{v[0-9]+}}.b[5]
+ %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <8 x i8> %shuffle
+}
+
+define <4 x i16> @test_vdup_laneq_s16(<8 x i16> %v1) #0 {
+; CHECK-LABEL: test_vdup_laneq_s16:
+; CHECK: dup {{v[0-9]+}}.4h, {{v[0-9]+}}.h[2]
+ %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+ ret <4 x i16> %shuffle
+}
+
+define <2 x i32> @test_vdup_laneq_s32(<4 x i32> %v1) #0 {
+; CHECK-LABEL: test_vdup_laneq_s32:
+; CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+ %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
+ ret <2 x i32> %shuffle
+}
+
+define <16 x i8> @test_vdupq_laneq_s8(<16 x i8> %v1) #0 {
+; CHECK-LABEL: test_vdupq_laneq_s8:
+; CHECK: dup {{v[0-9]+}}.16b, {{v[0-9]+}}.b[5]
+ %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <16 x i8> %shuffle
+}
+
+define <8 x i16> @test_vdupq_laneq_s16(<8 x i16> %v1) #0 {
+; CHECK-LABEL: test_vdupq_laneq_s16:
+; CHECK: {{v[0-9]+}}.8h, {{v[0-9]+}}.h[2]
+ %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ ret <8 x i16> %shuffle
+}
+
+define <4 x i32> @test_vdupq_laneq_s32(<4 x i32> %v1) #0 {
+; CHECK-LABEL: test_vdupq_laneq_s32:
+; CHECK: dup {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+ %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %shuffle
+}
+
+define <2 x i64> @test_vdupq_laneq_s64(<2 x i64> %v1) #0 {
+; CHECK-LABEL: test_vdupq_laneq_s64:
+; CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+ %shuffle = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
+ ret <2 x i64> %shuffle
+}
+
+define i64 @test_bitcastv8i8toi64(<8 x i8> %in) {
+; CHECK-LABEL: test_bitcastv8i8toi64:
+ %res = bitcast <8 x i8> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv4i16toi64(<4 x i16> %in) {
+; CHECK-LABEL: test_bitcastv4i16toi64:
+ %res = bitcast <4 x i16> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv2i32toi64(<2 x i32> %in) {
+; CHECK-LABEL: test_bitcastv2i32toi64:
+ %res = bitcast <2 x i32> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv2f32toi64(<2 x float> %in) {
+; CHECK-LABEL: test_bitcastv2f32toi64:
+ %res = bitcast <2 x float> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv1i64toi64(<1 x i64> %in) {
+; CHECK-LABEL: test_bitcastv1i64toi64:
+ %res = bitcast <1 x i64> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv1f64toi64(<1 x double> %in) {
+; CHECK-LABEL: test_bitcastv1f64toi64:
+ %res = bitcast <1 x double> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define <8 x i8> @test_bitcasti64tov8i8(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov8i8:
+ %res = bitcast i64 %in to <8 x i8>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <8 x i8> %res
+}
+
+define <4 x i16> @test_bitcasti64tov4i16(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov4i16:
+ %res = bitcast i64 %in to <4 x i16>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <4 x i16> %res
+}
+
+define <2 x i32> @test_bitcasti64tov2i32(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov2i32:
+ %res = bitcast i64 %in to <2 x i32>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <2 x i32> %res
+}
+
+define <2 x float> @test_bitcasti64tov2f32(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov2f32:
+ %res = bitcast i64 %in to <2 x float>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <2 x float> %res
+}
+
+define <1 x i64> @test_bitcasti64tov1i64(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov1i64:
+ %res = bitcast i64 %in to <1 x i64>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <1 x i64> %res
+}
+
+define <1 x double> @test_bitcasti64tov1f64(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov1f64:
+ %res = bitcast i64 %in to <1 x double>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <1 x double> %res
+}
+
+define <1 x i64> @test_bitcastv8i8tov1f64(<8 x i8> %a) #0 {
+; CHECK-LABEL: test_bitcastv8i8tov1f64:
+; CHECK: neg {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: fcvtzs {{[xd][0-9]+}}, {{d[0-9]+}}
+ %sub.i = sub <8 x i8> zeroinitializer, %a
+ %1 = bitcast <8 x i8> %sub.i to <1 x double>
+ %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
+ ret <1 x i64> %vcvt.i
+}
+
+define <1 x i64> @test_bitcastv4i16tov1f64(<4 x i16> %a) #0 {
+; CHECK-LABEL: test_bitcastv4i16tov1f64:
+; CHECK: neg {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-NEXT: fcvtzs {{[dx][0-9]+}}, {{d[0-9]+}}
+ %sub.i = sub <4 x i16> zeroinitializer, %a
+ %1 = bitcast <4 x i16> %sub.i to <1 x double>
+ %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
+ ret <1 x i64> %vcvt.i
+}
+
+define <1 x i64> @test_bitcastv2i32tov1f64(<2 x i32> %a) #0 {
+; CHECK-LABEL: test_bitcastv2i32tov1f64:
+; CHECK: neg {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-NEXT: fcvtzs {{[xd][0-9]+}}, {{d[0-9]+}}
+ %sub.i = sub <2 x i32> zeroinitializer, %a
+ %1 = bitcast <2 x i32> %sub.i to <1 x double>
+ %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
+ ret <1 x i64> %vcvt.i
+}
+
+define <1 x i64> @test_bitcastv1i64tov1f64(<1 x i64> %a) #0 {
+; CHECK-LABEL: test_bitcastv1i64tov1f64:
+; CHECK: neg {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NEXT: fcvtzs {{[dx][0-9]+}}, {{d[0-9]+}}
+ %sub.i = sub <1 x i64> zeroinitializer, %a
+ %1 = bitcast <1 x i64> %sub.i to <1 x double>
+ %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
+ ret <1 x i64> %vcvt.i
+}
+
+define <1 x i64> @test_bitcastv2f32tov1f64(<2 x float> %a) #0 {
+; CHECK-LABEL: test_bitcastv2f32tov1f64:
+; CHECK: fneg {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-NEXT: fcvtzs {{[xd][0-9]+}}, {{d[0-9]+}}
+ %sub.i = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %a
+ %1 = bitcast <2 x float> %sub.i to <1 x double>
+ %vcvt.i = fptosi <1 x double> %1 to <1 x i64>
+ ret <1 x i64> %vcvt.i
+}
+
+define <8 x i8> @test_bitcastv1f64tov8i8(<1 x i64> %a) #0 {
+; CHECK-LABEL: test_bitcastv1f64tov8i8:
+; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
+; CHECK-NEXT: neg {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %vcvt.i = sitofp <1 x i64> %a to <1 x double>
+ %1 = bitcast <1 x double> %vcvt.i to <8 x i8>
+ %sub.i = sub <8 x i8> zeroinitializer, %1
+ ret <8 x i8> %sub.i
+}
+
+define <4 x i16> @test_bitcastv1f64tov4i16(<1 x i64> %a) #0 {
+; CHECK-LABEL: test_bitcastv1f64tov4i16:
+; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
+; CHECK-NEXT: neg {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %vcvt.i = sitofp <1 x i64> %a to <1 x double>
+ %1 = bitcast <1 x double> %vcvt.i to <4 x i16>
+ %sub.i = sub <4 x i16> zeroinitializer, %1
+ ret <4 x i16> %sub.i
+}
+
+define <2 x i32> @test_bitcastv1f64tov2i32(<1 x i64> %a) #0 {
+; CHECK-LABEL: test_bitcastv1f64tov2i32:
+; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
+; CHECK-NEXT: neg {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %vcvt.i = sitofp <1 x i64> %a to <1 x double>
+ %1 = bitcast <1 x double> %vcvt.i to <2 x i32>
+ %sub.i = sub <2 x i32> zeroinitializer, %1
+ ret <2 x i32> %sub.i
+}
+
+define <1 x i64> @test_bitcastv1f64tov1i64(<1 x i64> %a) #0 {
+; CHECK-LABEL: test_bitcastv1f64tov1i64:
+; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
+; CHECK-NEXT: neg {{d[0-9]+}}, {{d[0-9]+}}
+ %vcvt.i = sitofp <1 x i64> %a to <1 x double>
+ %1 = bitcast <1 x double> %vcvt.i to <1 x i64>
+ %sub.i = sub <1 x i64> zeroinitializer, %1
+ ret <1 x i64> %sub.i
+}
+
+define <2 x float> @test_bitcastv1f64tov2f32(<1 x i64> %a) #0 {
+; CHECK-LABEL: test_bitcastv1f64tov2f32:
+; CHECK: scvtf {{d[0-9]+}}, {{[xd][0-9]+}}
+; CHECK-NEXT: fneg {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %vcvt.i = sitofp <1 x i64> %a to <1 x double>
+ %1 = bitcast <1 x double> %vcvt.i to <2 x float>
+ %sub.i = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %1
+ ret <2 x float> %sub.i
+}
+
+; Test insert element into an undef vector
+define <8 x i8> @scalar_to_vector.v8i8(i8 %a) {
+; CHECK-LABEL: scalar_to_vector.v8i8:
+; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
+ %b = insertelement <8 x i8> undef, i8 %a, i32 0
+ ret <8 x i8> %b
+}
+
+define <16 x i8> @scalar_to_vector.v16i8(i8 %a) {
+; CHECK-LABEL: scalar_to_vector.v16i8:
+; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
+ %b = insertelement <16 x i8> undef, i8 %a, i32 0
+ ret <16 x i8> %b
+}
+
+define <4 x i16> @scalar_to_vector.v4i16(i16 %a) {
+; CHECK-LABEL: scalar_to_vector.v4i16:
+; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
+ %b = insertelement <4 x i16> undef, i16 %a, i32 0
+ ret <4 x i16> %b
+}
+
+define <8 x i16> @scalar_to_vector.v8i16(i16 %a) {
+; CHECK-LABEL: scalar_to_vector.v8i16:
+; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
+ %b = insertelement <8 x i16> undef, i16 %a, i32 0
+ ret <8 x i16> %b
+}
+
+define <2 x i32> @scalar_to_vector.v2i32(i32 %a) {
+; CHECK-LABEL: scalar_to_vector.v2i32:
+; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
+ %b = insertelement <2 x i32> undef, i32 %a, i32 0
+ ret <2 x i32> %b
+}
+
+define <4 x i32> @scalar_to_vector.v4i32(i32 %a) {
+; CHECK-LABEL: scalar_to_vector.v4i32:
+; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
+ %b = insertelement <4 x i32> undef, i32 %a, i32 0
+ ret <4 x i32> %b
+}
+
+define <2 x i64> @scalar_to_vector.v2i64(i64 %a) {
+; CHECK-LABEL: scalar_to_vector.v2i64:
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ %b = insertelement <2 x i64> undef, i64 %a, i32 0
+ ret <2 x i64> %b
+}
+
+define <8 x i8> @testDUP.v1i8(<1 x i8> %a) {
+; CHECK-LABEL: testDUP.v1i8:
+; CHECK: dup v0.8b, v0.b[0]
+ %b = extractelement <1 x i8> %a, i32 0
+ %c = insertelement <8 x i8> undef, i8 %b, i32 0
+ %d = insertelement <8 x i8> %c, i8 %b, i32 1
+ %e = insertelement <8 x i8> %d, i8 %b, i32 2
+ %f = insertelement <8 x i8> %e, i8 %b, i32 3
+ %g = insertelement <8 x i8> %f, i8 %b, i32 4
+ %h = insertelement <8 x i8> %g, i8 %b, i32 5
+ %i = insertelement <8 x i8> %h, i8 %b, i32 6
+ %j = insertelement <8 x i8> %i, i8 %b, i32 7
+ ret <8 x i8> %j
+}
+
+define <8 x i16> @testDUP.v1i16(<1 x i16> %a) {
+; CHECK-LABEL: testDUP.v1i16:
+; CHECK: dup v0.8h, v0.h[0]
+ %b = extractelement <1 x i16> %a, i32 0
+ %c = insertelement <8 x i16> undef, i16 %b, i32 0
+ %d = insertelement <8 x i16> %c, i16 %b, i32 1
+ %e = insertelement <8 x i16> %d, i16 %b, i32 2
+ %f = insertelement <8 x i16> %e, i16 %b, i32 3
+ %g = insertelement <8 x i16> %f, i16 %b, i32 4
+ %h = insertelement <8 x i16> %g, i16 %b, i32 5
+ %i = insertelement <8 x i16> %h, i16 %b, i32 6
+ %j = insertelement <8 x i16> %i, i16 %b, i32 7
+ ret <8 x i16> %j
+}
+
+define <4 x i32> @testDUP.v1i32(<1 x i32> %a) {
+; CHECK-LABEL: testDUP.v1i32:
+; CHECK: dup v0.4s, v0.s[0]
+ %b = extractelement <1 x i32> %a, i32 0
+ %c = insertelement <4 x i32> undef, i32 %b, i32 0
+ %d = insertelement <4 x i32> %c, i32 %b, i32 1
+ %e = insertelement <4 x i32> %d, i32 %b, i32 2
+ %f = insertelement <4 x i32> %e, i32 %b, i32 3
+ ret <4 x i32> %f
+}
+
+define <8 x i8> @getl(<16 x i8> %x) #0 {
+; CHECK-LABEL: getl:
+; CHECK: ret
+ %vecext = extractelement <16 x i8> %x, i32 0
+ %vecinit = insertelement <8 x i8> undef, i8 %vecext, i32 0
+ %vecext1 = extractelement <16 x i8> %x, i32 1
+ %vecinit2 = insertelement <8 x i8> %vecinit, i8 %vecext1, i32 1
+ %vecext3 = extractelement <16 x i8> %x, i32 2
+ %vecinit4 = insertelement <8 x i8> %vecinit2, i8 %vecext3, i32 2
+ %vecext5 = extractelement <16 x i8> %x, i32 3
+ %vecinit6 = insertelement <8 x i8> %vecinit4, i8 %vecext5, i32 3
+ %vecext7 = extractelement <16 x i8> %x, i32 4
+ %vecinit8 = insertelement <8 x i8> %vecinit6, i8 %vecext7, i32 4
+ %vecext9 = extractelement <16 x i8> %x, i32 5
+ %vecinit10 = insertelement <8 x i8> %vecinit8, i8 %vecext9, i32 5
+ %vecext11 = extractelement <16 x i8> %x, i32 6
+ %vecinit12 = insertelement <8 x i8> %vecinit10, i8 %vecext11, i32 6
+ %vecext13 = extractelement <16 x i8> %x, i32 7
+ %vecinit14 = insertelement <8 x i8> %vecinit12, i8 %vecext13, i32 7
+ ret <8 x i8> %vecinit14
+}
+
+define <4 x i16> @test_dup_v2i32_v4i16(<2 x i32> %a) {
+; CHECK-LABEL: test_dup_v2i32_v4i16:
+; CHECK: dup v0.4h, v0.h[2]
+entry:
+ %x = extractelement <2 x i32> %a, i32 1
+ %vget_lane = trunc i32 %x to i16
+ %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3
+ ret <4 x i16> %vecinit3.i
+}
+
+define <8 x i16> @test_dup_v4i32_v8i16(<4 x i32> %a) {
+; CHECK-LABEL: test_dup_v4i32_v8i16:
+; CHECK: dup v0.8h, v0.h[6]
+entry:
+ %x = extractelement <4 x i32> %a, i32 3
+ %vget_lane = trunc i32 %x to i16
+ %vecinit.i = insertelement <8 x i16> undef, i16 %vget_lane, i32 0
+ %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %vget_lane, i32 1
+ %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %vget_lane, i32 2
+ %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %vget_lane, i32 3
+ %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %vget_lane, i32 4
+ %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %vget_lane, i32 5
+ %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %vget_lane, i32 6
+ %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %vget_lane, i32 7
+ ret <8 x i16> %vecinit7.i
+}
+
+define <4 x i16> @test_dup_v1i64_v4i16(<1 x i64> %a) {
+; CHECK-LABEL: test_dup_v1i64_v4i16:
+; CHECK: dup v0.4h, v0.h[0]
+entry:
+ %x = extractelement <1 x i64> %a, i32 0
+ %vget_lane = trunc i64 %x to i16
+ %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3
+ ret <4 x i16> %vecinit3.i
+}
+
+define <2 x i32> @test_dup_v1i64_v2i32(<1 x i64> %a) {
+; CHECK-LABEL: test_dup_v1i64_v2i32:
+; CHECK: dup v0.2s, v0.s[0]
+entry:
+ %x = extractelement <1 x i64> %a, i32 0
+ %vget_lane = trunc i64 %x to i32
+ %vecinit.i = insertelement <2 x i32> undef, i32 %vget_lane, i32 0
+ %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %vget_lane, i32 1
+ ret <2 x i32> %vecinit1.i
+}
+
+define <8 x i16> @test_dup_v2i64_v8i16(<2 x i64> %a) {
+; CHECK-LABEL: test_dup_v2i64_v8i16:
+; CHECK: dup v0.8h, v0.h[4]
+entry:
+ %x = extractelement <2 x i64> %a, i32 1
+ %vget_lane = trunc i64 %x to i16
+ %vecinit.i = insertelement <8 x i16> undef, i16 %vget_lane, i32 0
+ %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %vget_lane, i32 1
+ %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %vget_lane, i32 2
+ %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %vget_lane, i32 3
+ %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %vget_lane, i32 4
+ %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %vget_lane, i32 5
+ %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %vget_lane, i32 6
+ %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %vget_lane, i32 7
+ ret <8 x i16> %vecinit7.i
+}
+
+define <4 x i32> @test_dup_v2i64_v4i32(<2 x i64> %a) {
+; CHECK-LABEL: test_dup_v2i64_v4i32:
+; CHECK: dup v0.4s, v0.s[2]
+entry:
+ %x = extractelement <2 x i64> %a, i32 1
+ %vget_lane = trunc i64 %x to i32
+ %vecinit.i = insertelement <4 x i32> undef, i32 %vget_lane, i32 0
+ %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %vget_lane, i32 1
+ %vecinit2.i = insertelement <4 x i32> %vecinit1.i, i32 %vget_lane, i32 2
+ %vecinit3.i = insertelement <4 x i32> %vecinit2.i, i32 %vget_lane, i32 3
+ ret <4 x i32> %vecinit3.i
+}
+
+define <4 x i16> @test_dup_v4i32_v4i16(<4 x i32> %a) {
+; CHECK-LABEL: test_dup_v4i32_v4i16:
+; CHECK: dup v0.4h, v0.h[2]
+entry:
+ %x = extractelement <4 x i32> %a, i32 1
+ %vget_lane = trunc i32 %x to i16
+ %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3
+ ret <4 x i16> %vecinit3.i
+}
+
+define <4 x i16> @test_dup_v2i64_v4i16(<2 x i64> %a) {
+; CHECK-LABEL: test_dup_v2i64_v4i16:
+; CHECK: dup v0.4h, v0.h[0]
+entry:
+ %x = extractelement <2 x i64> %a, i32 0
+ %vget_lane = trunc i64 %x to i16
+ %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3
+ ret <4 x i16> %vecinit3.i
+}
+
+define <2 x i32> @test_dup_v2i64_v2i32(<2 x i64> %a) {
+; CHECK-LABEL: test_dup_v2i64_v2i32:
+; CHECK: dup v0.2s, v0.s[0]
+entry:
+ %x = extractelement <2 x i64> %a, i32 0
+ %vget_lane = trunc i64 %x to i32
+ %vecinit.i = insertelement <2 x i32> undef, i32 %vget_lane, i32 0
+ %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %vget_lane, i32 1
+ ret <2 x i32> %vecinit1.i
+}
+
+
+define <2 x float> @test_scalar_to_vector_f32_to_v2f32(<2 x float> %a) {
+; CHECK-LABEL: test_scalar_to_vector_f32_to_v2f32:
+; CHECK: fmaxp s{{[0-9]+}}, v{{[0-9]+}}.2s
+; CHECK-NEXT: ret
+entry:
+ %0 = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %a)
+ %1 = insertelement <1 x float> undef, float %0, i32 0
+ %2 = extractelement <1 x float> %1, i32 0
+ %vecinit1.i = insertelement <2 x float> undef, float %2, i32 0
+ ret <2 x float> %vecinit1.i
+}
+
+define <4 x float> @test_scalar_to_vector_f32_to_v4f32(<2 x float> %a) {
+; CHECK-LABEL: test_scalar_to_vector_f32_to_v4f32:
+; CHECK: fmaxp s{{[0-9]+}}, v{{[0-9]+}}.2s
+; CHECK-NEXT: ret
+entry:
+ %0 = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %a)
+ %1 = insertelement <1 x float> undef, float %0, i32 0
+ %2 = extractelement <1 x float> %1, i32 0
+ %vecinit1.i = insertelement <4 x float> undef, float %2, i32 0
+ ret <4 x float> %vecinit1.i
+}
+
+declare float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float>)
+
+define <2 x i32> @test_concat_undef_v1i32(<2 x i32> %a) {
+; CHECK-LABEL: test_concat_undef_v1i32:
+; CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %0 = extractelement <2 x i32> %a, i32 0
+ %vecinit1.i = insertelement <2 x i32> undef, i32 %0, i32 1
+ ret <2 x i32> %vecinit1.i
+}
+
+declare i32 @llvm.aarch64.neon.sqabs.i32(i32) #4
+
+define <2 x i32> @test_concat_v1i32_undef(i32 %a) {
+; CHECK-LABEL: test_concat_v1i32_undef:
+; CHECK: sqabs s{{[0-9]+}}, s{{[0-9]+}}
+; CHECK-NEXT: ret
+entry:
+ %b = tail call i32 @llvm.aarch64.neon.sqabs.i32(i32 %a)
+ %vecinit.i432 = insertelement <2 x i32> undef, i32 %b, i32 0
+ ret <2 x i32> %vecinit.i432
+}
+
+define <2 x i32> @test_concat_same_v1i32_v1i32(<2 x i32> %a) {
+; CHECK-LABEL: test_concat_same_v1i32_v1i32:
+; CHECK: dup v{{[0-9]+}}.2s, v{{[0-9]+}}.s[0]
+entry:
+ %0 = extractelement <2 x i32> %a, i32 0
+ %vecinit.i = insertelement <2 x i32> undef, i32 %0, i32 0
+ %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %0, i32 1
+ ret <2 x i32> %vecinit1.i
+}
+
+define <2 x i32> @test_concat_diff_v1i32_v1i32(i32 %a, i32 %b) {
+; CHECK-LABEL: test_concat_diff_v1i32_v1i32:
+; CHECK: sqabs s{{[0-9]+}}, s{{[0-9]+}}
+; CHECK: sqabs s{{[0-9]+}}, s{{[0-9]+}}
+; CHECK-NEXT: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %c = tail call i32 @llvm.aarch64.neon.sqabs.i32(i32 %a)
+ %d = insertelement <2 x i32> undef, i32 %c, i32 0
+ %e = tail call i32 @llvm.aarch64.neon.sqabs.i32(i32 %b)
+ %f = insertelement <2 x i32> undef, i32 %e, i32 0
+ %h = shufflevector <2 x i32> %d, <2 x i32> %f, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32> %h
+}
+
+define <16 x i8> @test_concat_v16i8_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y) #0 {
+; CHECK-LABEL: test_concat_v16i8_v16i8_v16i8:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecinit30 = shufflevector <16 x i8> %x, <16 x i8> %y, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i8> %vecinit30
+}
+
+define <16 x i8> @test_concat_v16i8_v8i8_v16i8(<8 x i8> %x, <16 x i8> %y) #0 {
+; CHECK-LABEL: test_concat_v16i8_v8i8_v16i8:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <8 x i8> %x, i32 0
+ %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
+ %vecext1 = extractelement <8 x i8> %x, i32 1
+ %vecinit2 = insertelement <16 x i8> %vecinit, i8 %vecext1, i32 1
+ %vecext3 = extractelement <8 x i8> %x, i32 2
+ %vecinit4 = insertelement <16 x i8> %vecinit2, i8 %vecext3, i32 2
+ %vecext5 = extractelement <8 x i8> %x, i32 3
+ %vecinit6 = insertelement <16 x i8> %vecinit4, i8 %vecext5, i32 3
+ %vecext7 = extractelement <8 x i8> %x, i32 4
+ %vecinit8 = insertelement <16 x i8> %vecinit6, i8 %vecext7, i32 4
+ %vecext9 = extractelement <8 x i8> %x, i32 5
+ %vecinit10 = insertelement <16 x i8> %vecinit8, i8 %vecext9, i32 5
+ %vecext11 = extractelement <8 x i8> %x, i32 6
+ %vecinit12 = insertelement <16 x i8> %vecinit10, i8 %vecext11, i32 6
+ %vecext13 = extractelement <8 x i8> %x, i32 7
+ %vecinit14 = insertelement <16 x i8> %vecinit12, i8 %vecext13, i32 7
+ %vecinit30 = shufflevector <16 x i8> %vecinit14, <16 x i8> %y, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i8> %vecinit30
+}
+
+define <16 x i8> @test_concat_v16i8_v16i8_v8i8(<16 x i8> %x, <8 x i8> %y) #0 {
+; CHECK-LABEL: test_concat_v16i8_v16i8_v8i8:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <16 x i8> %x, i32 0
+ %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
+ %vecext1 = extractelement <16 x i8> %x, i32 1
+ %vecinit2 = insertelement <16 x i8> %vecinit, i8 %vecext1, i32 1
+ %vecext3 = extractelement <16 x i8> %x, i32 2
+ %vecinit4 = insertelement <16 x i8> %vecinit2, i8 %vecext3, i32 2
+ %vecext5 = extractelement <16 x i8> %x, i32 3
+ %vecinit6 = insertelement <16 x i8> %vecinit4, i8 %vecext5, i32 3
+ %vecext7 = extractelement <16 x i8> %x, i32 4
+ %vecinit8 = insertelement <16 x i8> %vecinit6, i8 %vecext7, i32 4
+ %vecext9 = extractelement <16 x i8> %x, i32 5
+ %vecinit10 = insertelement <16 x i8> %vecinit8, i8 %vecext9, i32 5
+ %vecext11 = extractelement <16 x i8> %x, i32 6
+ %vecinit12 = insertelement <16 x i8> %vecinit10, i8 %vecext11, i32 6
+ %vecext13 = extractelement <16 x i8> %x, i32 7
+ %vecinit14 = insertelement <16 x i8> %vecinit12, i8 %vecext13, i32 7
+ %vecext15 = extractelement <8 x i8> %y, i32 0
+ %vecinit16 = insertelement <16 x i8> %vecinit14, i8 %vecext15, i32 8
+ %vecext17 = extractelement <8 x i8> %y, i32 1
+ %vecinit18 = insertelement <16 x i8> %vecinit16, i8 %vecext17, i32 9
+ %vecext19 = extractelement <8 x i8> %y, i32 2
+ %vecinit20 = insertelement <16 x i8> %vecinit18, i8 %vecext19, i32 10
+ %vecext21 = extractelement <8 x i8> %y, i32 3
+ %vecinit22 = insertelement <16 x i8> %vecinit20, i8 %vecext21, i32 11
+ %vecext23 = extractelement <8 x i8> %y, i32 4
+ %vecinit24 = insertelement <16 x i8> %vecinit22, i8 %vecext23, i32 12
+ %vecext25 = extractelement <8 x i8> %y, i32 5
+ %vecinit26 = insertelement <16 x i8> %vecinit24, i8 %vecext25, i32 13
+ %vecext27 = extractelement <8 x i8> %y, i32 6
+ %vecinit28 = insertelement <16 x i8> %vecinit26, i8 %vecext27, i32 14
+ %vecext29 = extractelement <8 x i8> %y, i32 7
+ %vecinit30 = insertelement <16 x i8> %vecinit28, i8 %vecext29, i32 15
+ ret <16 x i8> %vecinit30
+}
+
+define <16 x i8> @test_concat_v16i8_v8i8_v8i8(<8 x i8> %x, <8 x i8> %y) #0 {
+; CHECK-LABEL: test_concat_v16i8_v8i8_v8i8:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <8 x i8> %x, i32 0
+ %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
+ %vecext1 = extractelement <8 x i8> %x, i32 1
+ %vecinit2 = insertelement <16 x i8> %vecinit, i8 %vecext1, i32 1
+ %vecext3 = extractelement <8 x i8> %x, i32 2
+ %vecinit4 = insertelement <16 x i8> %vecinit2, i8 %vecext3, i32 2
+ %vecext5 = extractelement <8 x i8> %x, i32 3
+ %vecinit6 = insertelement <16 x i8> %vecinit4, i8 %vecext5, i32 3
+ %vecext7 = extractelement <8 x i8> %x, i32 4
+ %vecinit8 = insertelement <16 x i8> %vecinit6, i8 %vecext7, i32 4
+ %vecext9 = extractelement <8 x i8> %x, i32 5
+ %vecinit10 = insertelement <16 x i8> %vecinit8, i8 %vecext9, i32 5
+ %vecext11 = extractelement <8 x i8> %x, i32 6
+ %vecinit12 = insertelement <16 x i8> %vecinit10, i8 %vecext11, i32 6
+ %vecext13 = extractelement <8 x i8> %x, i32 7
+ %vecinit14 = insertelement <16 x i8> %vecinit12, i8 %vecext13, i32 7
+ %vecext15 = extractelement <8 x i8> %y, i32 0
+ %vecinit16 = insertelement <16 x i8> %vecinit14, i8 %vecext15, i32 8
+ %vecext17 = extractelement <8 x i8> %y, i32 1
+ %vecinit18 = insertelement <16 x i8> %vecinit16, i8 %vecext17, i32 9
+ %vecext19 = extractelement <8 x i8> %y, i32 2
+ %vecinit20 = insertelement <16 x i8> %vecinit18, i8 %vecext19, i32 10
+ %vecext21 = extractelement <8 x i8> %y, i32 3
+ %vecinit22 = insertelement <16 x i8> %vecinit20, i8 %vecext21, i32 11
+ %vecext23 = extractelement <8 x i8> %y, i32 4
+ %vecinit24 = insertelement <16 x i8> %vecinit22, i8 %vecext23, i32 12
+ %vecext25 = extractelement <8 x i8> %y, i32 5
+ %vecinit26 = insertelement <16 x i8> %vecinit24, i8 %vecext25, i32 13
+ %vecext27 = extractelement <8 x i8> %y, i32 6
+ %vecinit28 = insertelement <16 x i8> %vecinit26, i8 %vecext27, i32 14
+ %vecext29 = extractelement <8 x i8> %y, i32 7
+ %vecinit30 = insertelement <16 x i8> %vecinit28, i8 %vecext29, i32 15
+ ret <16 x i8> %vecinit30
+}
+
+define <8 x i16> @test_concat_v8i16_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y) #0 {
+; CHECK-LABEL: test_concat_v8i16_v8i16_v8i16:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecinit14 = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+ ret <8 x i16> %vecinit14
+}
+
+define <8 x i16> @test_concat_v8i16_v4i16_v8i16(<4 x i16> %x, <8 x i16> %y) #0 {
+; CHECK-LABEL: test_concat_v8i16_v4i16_v8i16:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <4 x i16> %x, i32 0
+ %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
+ %vecext1 = extractelement <4 x i16> %x, i32 1
+ %vecinit2 = insertelement <8 x i16> %vecinit, i16 %vecext1, i32 1
+ %vecext3 = extractelement <4 x i16> %x, i32 2
+ %vecinit4 = insertelement <8 x i16> %vecinit2, i16 %vecext3, i32 2
+ %vecext5 = extractelement <4 x i16> %x, i32 3
+ %vecinit6 = insertelement <8 x i16> %vecinit4, i16 %vecext5, i32 3
+ %vecinit14 = shufflevector <8 x i16> %vecinit6, <8 x i16> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+ ret <8 x i16> %vecinit14
+}
+
+define <8 x i16> @test_concat_v8i16_v8i16_v4i16(<8 x i16> %x, <4 x i16> %y) #0 {
+; CHECK-LABEL: test_concat_v8i16_v8i16_v4i16:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <8 x i16> %x, i32 0
+ %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
+ %vecext1 = extractelement <8 x i16> %x, i32 1
+ %vecinit2 = insertelement <8 x i16> %vecinit, i16 %vecext1, i32 1
+ %vecext3 = extractelement <8 x i16> %x, i32 2
+ %vecinit4 = insertelement <8 x i16> %vecinit2, i16 %vecext3, i32 2
+ %vecext5 = extractelement <8 x i16> %x, i32 3
+ %vecinit6 = insertelement <8 x i16> %vecinit4, i16 %vecext5, i32 3
+ %vecext7 = extractelement <4 x i16> %y, i32 0
+ %vecinit8 = insertelement <8 x i16> %vecinit6, i16 %vecext7, i32 4
+ %vecext9 = extractelement <4 x i16> %y, i32 1
+ %vecinit10 = insertelement <8 x i16> %vecinit8, i16 %vecext9, i32 5
+ %vecext11 = extractelement <4 x i16> %y, i32 2
+ %vecinit12 = insertelement <8 x i16> %vecinit10, i16 %vecext11, i32 6
+ %vecext13 = extractelement <4 x i16> %y, i32 3
+ %vecinit14 = insertelement <8 x i16> %vecinit12, i16 %vecext13, i32 7
+ ret <8 x i16> %vecinit14
+}
+
+define <8 x i16> @test_concat_v8i16_v4i16_v4i16(<4 x i16> %x, <4 x i16> %y) #0 {
+; CHECK-LABEL: test_concat_v8i16_v4i16_v4i16:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <4 x i16> %x, i32 0
+ %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
+ %vecext1 = extractelement <4 x i16> %x, i32 1
+ %vecinit2 = insertelement <8 x i16> %vecinit, i16 %vecext1, i32 1
+ %vecext3 = extractelement <4 x i16> %x, i32 2
+ %vecinit4 = insertelement <8 x i16> %vecinit2, i16 %vecext3, i32 2
+ %vecext5 = extractelement <4 x i16> %x, i32 3
+ %vecinit6 = insertelement <8 x i16> %vecinit4, i16 %vecext5, i32 3
+ %vecext7 = extractelement <4 x i16> %y, i32 0
+ %vecinit8 = insertelement <8 x i16> %vecinit6, i16 %vecext7, i32 4
+ %vecext9 = extractelement <4 x i16> %y, i32 1
+ %vecinit10 = insertelement <8 x i16> %vecinit8, i16 %vecext9, i32 5
+ %vecext11 = extractelement <4 x i16> %y, i32 2
+ %vecinit12 = insertelement <8 x i16> %vecinit10, i16 %vecext11, i32 6
+ %vecext13 = extractelement <4 x i16> %y, i32 3
+ %vecinit14 = insertelement <8 x i16> %vecinit12, i16 %vecext13, i32 7
+ ret <8 x i16> %vecinit14
+}
+
+define <4 x i32> @test_concat_v4i32_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y) #0 {
+; CHECK-LABEL: test_concat_v4i32_v4i32_v4i32:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecinit6 = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ ret <4 x i32> %vecinit6
+}
+
+define <4 x i32> @test_concat_v4i32_v2i32_v4i32(<2 x i32> %x, <4 x i32> %y) #0 {
+; CHECK-LABEL: test_concat_v4i32_v2i32_v4i32:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <2 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecext1 = extractelement <2 x i32> %x, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
+ %vecinit6 = shufflevector <4 x i32> %vecinit2, <4 x i32> %y, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ ret <4 x i32> %vecinit6
+}
+
+define <4 x i32> @test_concat_v4i32_v4i32_v2i32(<4 x i32> %x, <2 x i32> %y) #0 {
+; CHECK-LABEL: test_concat_v4i32_v4i32_v2i32:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecext1 = extractelement <4 x i32> %x, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
+ %vecext3 = extractelement <2 x i32> %y, i32 0
+ %vecinit4 = insertelement <4 x i32> %vecinit2, i32 %vecext3, i32 2
+ %vecext5 = extractelement <2 x i32> %y, i32 1
+ %vecinit6 = insertelement <4 x i32> %vecinit4, i32 %vecext5, i32 3
+ ret <4 x i32> %vecinit6
+}
+
+define <4 x i32> @test_concat_v4i32_v2i32_v2i32(<2 x i32> %x, <2 x i32> %y) #0 {
+; CHECK-LABEL: test_concat_v4i32_v2i32_v2i32:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecinit6 = shufflevector <2 x i32> %x, <2 x i32> %y, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %vecinit6
+}
+
+define <2 x i64> @test_concat_v2i64_v2i64_v2i64(<2 x i64> %x, <2 x i64> %y) #0 {
+; CHECK-LABEL: test_concat_v2i64_v2i64_v2i64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vecinit2 = shufflevector <2 x i64> %x, <2 x i64> %y, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %vecinit2
+}
+
+define <2 x i64> @test_concat_v2i64_v1i64_v2i64(<1 x i64> %x, <2 x i64> %y) #0 {
+; CHECK-LABEL: test_concat_v2i64_v1i64_v2i64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vecext = extractelement <1 x i64> %x, i32 0
+ %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0
+ %vecinit2 = shufflevector <2 x i64> %vecinit, <2 x i64> %y, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %vecinit2
+}
+
+define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 {
+; CHECK-LABEL: test_concat_v2i64_v2i64_v1i64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <2 x i64> %x, i32 0
+ %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0
+ %vecext1 = extractelement <1 x i64> %y, i32 0
+ %vecinit2 = insertelement <2 x i64> %vecinit, i64 %vecext1, i32 1
+ ret <2 x i64> %vecinit2
+}
+
+define <2 x i64> @test_concat_v2i64_v1i64_v1i64(<1 x i64> %x, <1 x i64> %y) #0 {
+; CHECK-LABEL: test_concat_v2i64_v1i64_v1i64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %vecext = extractelement <1 x i64> %x, i32 0
+ %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0
+ %vecext1 = extractelement <1 x i64> %y, i32 0
+ %vecinit2 = insertelement <2 x i64> %vecinit, i64 %vecext1, i32 1
+ ret <2 x i64> %vecinit2
+}
+
+
+define <4 x i16> @concat_vector_v4i16_const() {
+; CHECK-LABEL: concat_vector_v4i16_const:
+; CHECK: movi {{d[0-9]+}}, #0
+ %r = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <4 x i32> zeroinitializer
+ ret <4 x i16> %r
+}
+
+define <4 x i16> @concat_vector_v4i16_const_one() {
+; CHECK-LABEL: concat_vector_v4i16_const_one:
+; CHECK: movi {{v[0-9]+}}.4h, #0x1
+ %r = shufflevector <1 x i16> <i16 1>, <1 x i16> undef, <4 x i32> zeroinitializer
+ ret <4 x i16> %r
+}
+
+define <4 x i32> @concat_vector_v4i32_const() {
+; CHECK-LABEL: concat_vector_v4i32_const:
+; CHECK: movi {{v[0-9]+}}.2d, #0
+ %r = shufflevector <1 x i32> zeroinitializer, <1 x i32> undef, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+}
+
+define <8 x i8> @concat_vector_v8i8_const() {
+; CHECK-LABEL: concat_vector_v8i8_const:
+; CHECK: movi {{d[0-9]+}}, #0
+ %r = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <8 x i32> zeroinitializer
+ ret <8 x i8> %r
+}
+
+define <8 x i16> @concat_vector_v8i16_const() {
+; CHECK-LABEL: concat_vector_v8i16_const:
+; CHECK: movi {{v[0-9]+}}.2d, #0
+ %r = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <8 x i32> zeroinitializer
+ ret <8 x i16> %r
+}
+
+define <8 x i16> @concat_vector_v8i16_const_one() {
+; CHECK-LABEL: concat_vector_v8i16_const_one:
+; CHECK: movi {{v[0-9]+}}.8h, #0x1
+ %r = shufflevector <1 x i16> <i16 1>, <1 x i16> undef, <8 x i32> zeroinitializer
+ ret <8 x i16> %r
+}
+
+define <16 x i8> @concat_vector_v16i8_const() {
+; CHECK-LABEL: concat_vector_v16i8_const:
+; CHECK: movi {{v[0-9]+}}.2d, #0
+ %r = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <16 x i32> zeroinitializer
+ ret <16 x i8> %r
+}
+
+define <4 x i16> @concat_vector_v4i16(<1 x i16> %a) {
+; CHECK-LABEL: concat_vector_v4i16:
+; CHECK: dup v0.4h, v0.h[0]
+ %r = shufflevector <1 x i16> %a, <1 x i16> undef, <4 x i32> zeroinitializer
+ ret <4 x i16> %r
+}
+
+define <4 x i32> @concat_vector_v4i32(<1 x i32> %a) {
+; CHECK-LABEL: concat_vector_v4i32:
+; CHECK: dup v0.4s, v0.s[0]
+ %r = shufflevector <1 x i32> %a, <1 x i32> undef, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+}
+
+define <8 x i8> @concat_vector_v8i8(<1 x i8> %a) {
+; CHECK-LABEL: concat_vector_v8i8:
+; CHECK: dup v0.8b, v0.b[0]
+ %r = shufflevector <1 x i8> %a, <1 x i8> undef, <8 x i32> zeroinitializer
+ ret <8 x i8> %r
+}
+
+define <8 x i16> @concat_vector_v8i16(<1 x i16> %a) {
+; CHECK-LABEL: concat_vector_v8i16:
+; CHECK: dup v0.8h, v0.h[0]
+ %r = shufflevector <1 x i16> %a, <1 x i16> undef, <8 x i32> zeroinitializer
+ ret <8 x i16> %r
+}
+
+define <16 x i8> @concat_vector_v16i8(<1 x i8> %a) {
+; CHECK-LABEL: concat_vector_v16i8:
+; CHECK: dup v0.16b, v0.b[0]
+ %r = shufflevector <1 x i8> %a, <1 x i8> undef, <16 x i32> zeroinitializer
+ ret <16 x i8> %r
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll b/test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll
new file mode 100644
index 000000000000..276ac13da40e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
+; arm64 has a separate copy due to intrinsics
+
+define <4 x i32> @copyTuple.QPair(i32* %a, i32* %b) {
+; CHECK-LABEL: copyTuple.QPair:
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: ld2 { {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
+entry:
+ %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>, i64 1, i32* %a)
+ %extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0
+ %vld1 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i64 1, i32* %b)
+ %vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld1, 0
+ ret <4 x i32> %vld1.fca.0.extract
+}
+
+define <4 x i32> @copyTuple.QTriple(i32* %a, i32* %b, <4 x i32> %c) {
+; CHECK-LABEL: copyTuple.QTriple:
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: ld3 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
+entry:
+ %vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %a)
+ %extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0
+ %vld1 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, i64 1, i32* %b)
+ %vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld1, 0
+ ret <4 x i32> %vld1.fca.0.extract
+}
+
+define <4 x i32> @copyTuple.QQuad(i32* %a, i32* %b, <4 x i32> %c) {
+; CHECK-LABEL: copyTuple.QQuad:
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+; CHECK: ld4 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
+entry:
+ %vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %a)
+ %extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0
+ %vld1 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %b)
+ %vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld1, 0
+ ret <4 x i32> %vld1.fca.0.extract
+}
+
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*)
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*)
diff --git a/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/test/CodeGen/AArch64/arm64-neon-mul-div.ll
new file mode 100644
index 000000000000..720f3eb6a4bf
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-mul-div.ll
@@ -0,0 +1,797 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
+; arm64 has its own copy of this because of the intrinsics
+
+define <8 x i8> @mul8xi8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: mul8xi8:
+; CHECK: mul {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = mul <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @mul16xi8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: mul16xi8:
+; CHECK: mul {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = mul <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @mul4xi16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: mul4xi16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = mul <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @mul8xi16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: mul8xi16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = mul <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @mul2xi32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: mul2xi32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = mul <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @mul4x32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: mul4x32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = mul <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <1 x i64> @mul1xi64(<1 x i64> %A, <1 x i64> %B) {
+; CHECK-LABEL: mul1xi64:
+; CHECK: mul x{{[0-9]+}}, x{{[0-9]+}}, x{{[0-9]+}}
+ %tmp3 = mul <1 x i64> %A, %B;
+ ret <1 x i64> %tmp3
+}
+
+define <2 x i64> @mul2xi64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: mul2xi64:
+; CHECK: mul x{{[0-9]+}}, x{{[0-9]+}}, x{{[0-9]+}}
+; CHECK: mul x{{[0-9]+}}, x{{[0-9]+}}, x{{[0-9]+}}
+ %tmp3 = mul <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+ define <2 x float> @mul2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: mul2xfloat:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = fmul <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @mul4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: mul4xfloat:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = fmul <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+define <2 x double> @mul2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: mul2xdouble:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = fmul <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+
+ define <2 x float> @div2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: div2xfloat:
+; CHECK: fdiv {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = fdiv <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @div4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: div4xfloat:
+; CHECK: fdiv {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = fdiv <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+define <2 x double> @div2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: div2xdouble:
+; CHECK: fdiv {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = fdiv <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+define <1 x i8> @sdiv1x8(<1 x i8> %A, <1 x i8> %B) {
+; CHECK-LABEL: sdiv1x8:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <1 x i8> %A, %B;
+ ret <1 x i8> %tmp3
+}
+
+define <8 x i8> @sdiv8x8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: sdiv8x8:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @sdiv16x8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: sdiv16x8:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <1 x i16> @sdiv1x16(<1 x i16> %A, <1 x i16> %B) {
+; CHECK-LABEL: sdiv1x16:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <1 x i16> %A, %B;
+ ret <1 x i16> %tmp3
+}
+
+define <4 x i16> @sdiv4x16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: sdiv4x16:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sdiv8x16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: sdiv8x16:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <1 x i32> @sdiv1x32(<1 x i32> %A, <1 x i32> %B) {
+; CHECK-LABEL: sdiv1x32:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <1 x i32> %A, %B;
+ ret <1 x i32> %tmp3
+}
+
+define <2 x i32> @sdiv2x32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: sdiv2x32:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sdiv4x32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: sdiv4x32:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = sdiv <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <1 x i64> @sdiv1x64(<1 x i64> %A, <1 x i64> %B) {
+; CHECK-LABEL: sdiv1x64:
+; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp3 = sdiv <1 x i64> %A, %B;
+ ret <1 x i64> %tmp3
+}
+
+define <2 x i64> @sdiv2x64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: sdiv2x64:
+; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp3 = sdiv <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+define <1 x i8> @udiv1x8(<1 x i8> %A, <1 x i8> %B) {
+; CHECK-LABEL: udiv1x8:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <1 x i8> %A, %B;
+ ret <1 x i8> %tmp3
+}
+
+define <8 x i8> @udiv8x8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: udiv8x8:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @udiv16x8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: udiv16x8:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <1 x i16> @udiv1x16(<1 x i16> %A, <1 x i16> %B) {
+; CHECK-LABEL: udiv1x16:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <1 x i16> %A, %B;
+ ret <1 x i16> %tmp3
+}
+
+define <4 x i16> @udiv4x16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: udiv4x16:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @udiv8x16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: udiv8x16:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <1 x i32> @udiv1x32(<1 x i32> %A, <1 x i32> %B) {
+; CHECK-LABEL: udiv1x32:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <1 x i32> %A, %B;
+ ret <1 x i32> %tmp3
+}
+
+define <2 x i32> @udiv2x32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: udiv2x32:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @udiv4x32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: udiv4x32:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = udiv <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <1 x i64> @udiv1x64(<1 x i64> %A, <1 x i64> %B) {
+; CHECK-LABEL: udiv1x64:
+; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp3 = udiv <1 x i64> %A, %B;
+ ret <1 x i64> %tmp3
+}
+
+define <2 x i64> @udiv2x64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: udiv2x64:
+; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp3 = udiv <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+define <1 x i8> @srem1x8(<1 x i8> %A, <1 x i8> %B) {
+; CHECK-LABEL: srem1x8:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <1 x i8> %A, %B;
+ ret <1 x i8> %tmp3
+}
+
+define <8 x i8> @srem8x8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: srem8x8:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @srem16x8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: srem16x8:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <1 x i16> @srem1x16(<1 x i16> %A, <1 x i16> %B) {
+; CHECK-LABEL: srem1x16:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <1 x i16> %A, %B;
+ ret <1 x i16> %tmp3
+}
+
+define <4 x i16> @srem4x16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: srem4x16:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @srem8x16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: srem8x16:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <1 x i32> @srem1x32(<1 x i32> %A, <1 x i32> %B) {
+; CHECK-LABEL: srem1x32:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <1 x i32> %A, %B;
+ ret <1 x i32> %tmp3
+}
+
+define <2 x i32> @srem2x32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: srem2x32:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @srem4x32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: srem4x32:
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = srem <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <1 x i64> @srem1x64(<1 x i64> %A, <1 x i64> %B) {
+; CHECK-LABEL: srem1x64:
+; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp3 = srem <1 x i64> %A, %B;
+ ret <1 x i64> %tmp3
+}
+
+define <2 x i64> @srem2x64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: srem2x64:
+; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp3 = srem <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+define <1 x i8> @urem1x8(<1 x i8> %A, <1 x i8> %B) {
+; CHECK-LABEL: urem1x8:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <1 x i8> %A, %B;
+ ret <1 x i8> %tmp3
+}
+
+define <8 x i8> @urem8x8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: urem8x8:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @urem16x8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: urem16x8:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <1 x i16> @urem1x16(<1 x i16> %A, <1 x i16> %B) {
+; CHECK-LABEL: urem1x16:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <1 x i16> %A, %B;
+ ret <1 x i16> %tmp3
+}
+
+define <4 x i16> @urem4x16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: urem4x16:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @urem8x16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: urem8x16:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <1 x i32> @urem1x32(<1 x i32> %A, <1 x i32> %B) {
+; CHECK-LABEL: urem1x32:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <1 x i32> %A, %B;
+ ret <1 x i32> %tmp3
+}
+
+define <2 x i32> @urem2x32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: urem2x32:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @urem4x32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: urem4x32:
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ %tmp3 = urem <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <1 x i64> @urem1x64(<1 x i64> %A, <1 x i64> %B) {
+; CHECK-LABEL: urem1x64:
+; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp3 = urem <1 x i64> %A, %B;
+ ret <1 x i64> %tmp3
+}
+
+define <2 x i64> @urem2x64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: urem2x64:
+; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+ %tmp3 = urem <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+define <2 x float> @frem2f32(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: frem2f32:
+; CHECK: bl fmodf
+; CHECK: bl fmodf
+ %tmp3 = frem <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frem4f32(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: frem4f32:
+; CHECK: bl fmodf
+; CHECK: bl fmodf
+; CHECK: bl fmodf
+; CHECK: bl fmodf
+ %tmp3 = frem <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+
+define <1 x double> @frem1d64(<1 x double> %A, <1 x double> %B) {
+; CHECK-LABEL: frem1d64:
+; CHECK: bl fmod
+ %tmp3 = frem <1 x double> %A, %B;
+ ret <1 x double> %tmp3
+}
+
+define <2 x double> @frem2d64(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: frem2d64:
+; CHECK: bl fmod
+; CHECK: bl fmod
+ %tmp3 = frem <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.pmul.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8>, <16 x i8>)
+
+define <8 x i8> @poly_mulv8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK-LABEL: poly_mulv8i8:
+ %prod = call <8 x i8> @llvm.aarch64.neon.pmul.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: pmul v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %prod
+}
+
+define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK-LABEL: poly_mulv16i8:
+ %prod = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: pmul v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %prod
+}
+
+declare <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i16> @test_sqdmulh_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK-LABEL: test_sqdmulh_v4i16:
+ %prod = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sqdmulh v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %prod
+}
+
+define <8 x i16> @test_sqdmulh_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK-LABEL: test_sqdmulh_v8i16:
+ %prod = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sqdmulh v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %prod
+}
+
+define <2 x i32> @test_sqdmulh_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK-LABEL: test_sqdmulh_v2i32:
+ %prod = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sqdmulh v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %prod
+}
+
+define <4 x i32> @test_sqdmulh_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: test_sqdmulh_v4i32:
+ %prod = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sqdmulh v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %prod
+}
+
+declare <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16>, <8 x i16>)
+declare <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i16> @test_sqrdmulh_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK-LABEL: test_sqrdmulh_v4i16:
+ %prod = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sqrdmulh v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %prod
+}
+
+define <8 x i16> @test_sqrdmulh_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK-LABEL: test_sqrdmulh_v8i16:
+ %prod = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sqrdmulh v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %prod
+}
+
+define <2 x i32> @test_sqrdmulh_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK-LABEL: test_sqrdmulh_v2i32:
+ %prod = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sqrdmulh v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %prod
+}
+
+define <4 x i32> @test_sqrdmulh_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: test_sqrdmulh_v4i32:
+ %prod = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sqrdmulh v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %prod
+}
+
+declare <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @fmulx_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK-LABEL: fmulx_v2f32:
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: fmulx v0.2s, v0.2s, v1.2s
+ %val = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+ ret <2 x float> %val
+}
+
+define <4 x float> @fmulx_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK-LABEL: fmulx_v4f32:
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: fmulx v0.4s, v0.4s, v1.4s
+ %val = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+ ret <4 x float> %val
+}
+
+define <2 x double> @fmulx_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK-LABEL: fmulx_v2f64:
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: fmulx v0.2d, v0.2d, v1.2d
+ %val = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+ ret <2 x double> %val
+}
+
diff --git a/test/CodeGen/AArch64/arm64-neon-scalar-by-elem-mul.ll b/test/CodeGen/AArch64/arm64-neon-scalar-by-elem-mul.ll
new file mode 100644
index 000000000000..92ed23995098
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-scalar-by-elem-mul.ll
@@ -0,0 +1,124 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+define float @test_fmul_lane_ss2S(float %a, <2 x float> %v) {
+ ; CHECK-LABEL: test_fmul_lane_ss2S
+ ; CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = fmul float %a, %tmp1;
+ ret float %tmp2;
+}
+
+define float @test_fmul_lane_ss2S_swap(float %a, <2 x float> %v) {
+ ; CHECK-LABEL: test_fmul_lane_ss2S_swap
+ ; CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = fmul float %tmp1, %a;
+ ret float %tmp2;
+}
+
+
+define float @test_fmul_lane_ss4S(float %a, <4 x float> %v) {
+ ; CHECK-LABEL: test_fmul_lane_ss4S
+ ; CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fmul float %a, %tmp1;
+ ret float %tmp2;
+}
+
+define float @test_fmul_lane_ss4S_swap(float %a, <4 x float> %v) {
+ ; CHECK-LABEL: test_fmul_lane_ss4S_swap
+ ; CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fmul float %tmp1, %a;
+ ret float %tmp2;
+}
+
+
+define double @test_fmul_lane_ddD(double %a, <1 x double> %v) {
+ ; CHECK-LABEL: test_fmul_lane_ddD
+ ; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0]|d[0-9]+}}
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = fmul double %a, %tmp1;
+ ret double %tmp2;
+}
+
+
+
+define double @test_fmul_lane_dd2D(double %a, <2 x double> %v) {
+ ; CHECK-LABEL: test_fmul_lane_dd2D
+ ; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fmul double %a, %tmp1;
+ ret double %tmp2;
+}
+
+
+define double @test_fmul_lane_dd2D_swap(double %a, <2 x double> %v) {
+ ; CHECK-LABEL: test_fmul_lane_dd2D_swap
+ ; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fmul double %tmp1, %a;
+ ret double %tmp2;
+}
+
+declare float @llvm.aarch64.neon.fmulx.f32(float, float)
+
+define float @test_fmulx_lane_f32(float %a, <2 x float> %v) {
+ ; CHECK-LABEL: test_fmulx_lane_f32
+ ; CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = call float @llvm.aarch64.neon.fmulx.f32(float %a, float %tmp1)
+ ret float %tmp2;
+}
+
+define float @test_fmulx_laneq_f32(float %a, <4 x float> %v) {
+ ; CHECK-LABEL: test_fmulx_laneq_f32
+ ; CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.aarch64.neon.fmulx.f32(float %a, float %tmp1)
+ ret float %tmp2;
+}
+
+define float @test_fmulx_laneq_f32_swap(float %a, <4 x float> %v) {
+ ; CHECK-LABEL: test_fmulx_laneq_f32_swap
+ ; CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.aarch64.neon.fmulx.f32(float %tmp1, float %a)
+ ret float %tmp2;
+}
+
+declare double @llvm.aarch64.neon.fmulx.f64(double, double)
+
+define double @test_fmulx_lane_f64(double %a, <1 x double> %v) {
+ ; CHECK-LABEL: test_fmulx_lane_f64
+ ; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+.d\[0]|d[0-9]+}}
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = call double @llvm.aarch64.neon.fmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+define double @test_fmulx_laneq_f64_0(double %a, <2 x double> %v) {
+ ; CHECK-LABEL: test_fmulx_laneq_f64_0
+ ; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
+ %tmp1 = extractelement <2 x double> %v, i32 0
+ %tmp2 = call double @llvm.aarch64.neon.fmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+
+define double @test_fmulx_laneq_f64_1(double %a, <2 x double> %v) {
+ ; CHECK-LABEL: test_fmulx_laneq_f64_1
+ ; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.aarch64.neon.fmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+define double @test_fmulx_laneq_f64_1_swap(double %a, <2 x double> %v) {
+ ; CHECK-LABEL: test_fmulx_laneq_f64_1_swap
+ ; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.aarch64.neon.fmulx.f64(double %tmp1, double %a)
+ ret double %tmp2;
+}
+
diff --git a/test/CodeGen/AArch64/arm64-neon-select_cc.ll b/test/CodeGen/AArch64/arm64-neon-select_cc.ll
new file mode 100644
index 000000000000..95c582a5348c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-select_cc.ll
@@ -0,0 +1,206 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+define <8x i8> @test_select_cc_v8i8_i8(i8 %a, i8 %b, <8x i8> %c, <8x i8> %d ) {
+; CHECK-LABEL: test_select_cc_v8i8_i8:
+; CHECK-DAG: fmov s[[LHS:[0-9]+]], w0
+; CHECK-DAG: fmov s[[RHS:[0-9]+]], w1
+; CHECK: cmeq [[MASK:v[0-9]+]].8b, v[[LHS]].8b, v[[RHS]].8b
+; CHECK: dup [[DUPMASK:v[0-9]+]].8b, [[MASK]].b[0]
+; CHECK: bsl [[DUPMASK]].8b, v0.8b, v1.8b
+ %cmp31 = icmp eq i8 %a, %b
+ %e = select i1 %cmp31, <8x i8> %c, <8x i8> %d
+ ret <8x i8> %e
+}
+
+define <8x i8> @test_select_cc_v8i8_f32(float %a, float %b, <8x i8> %c, <8x i8> %d ) {
+; CHECK-LABEL: test_select_cc_v8i8_f32:
+; CHECK: fcmeq [[MASK:v[0-9]+]].2s, v0.2s, v1.2s
+; CHECK-NEXT: dup [[DUPMASK:v[0-9]+]].2s, [[MASK]].s[0]
+; CHECK-NEXT: bsl [[DUPMASK]].8b, v2.8b, v3.8b
+ %cmp31 = fcmp oeq float %a, %b
+ %e = select i1 %cmp31, <8x i8> %c, <8x i8> %d
+ ret <8x i8> %e
+}
+
+define <8x i8> @test_select_cc_v8i8_f64(double %a, double %b, <8x i8> %c, <8x i8> %d ) {
+; CHECK-LABEL: test_select_cc_v8i8_f64:
+; CHECK: fcmeq d[[MASK:[0-9]+]], d0, d1
+; CHECK-NEXT: bsl v[[MASK]].8b, v2.8b, v3.8b
+ %cmp31 = fcmp oeq double %a, %b
+ %e = select i1 %cmp31, <8x i8> %c, <8x i8> %d
+ ret <8x i8> %e
+}
+
+define <16x i8> @test_select_cc_v16i8_i8(i8 %a, i8 %b, <16x i8> %c, <16x i8> %d ) {
+; CHECK-LABEL: test_select_cc_v16i8_i8:
+; CHECK-DAG: fmov s[[LHS:[0-9]+]], w0
+; CHECK-DAG: fmov s[[RHS:[0-9]+]], w1
+; CHECK: cmeq [[MASK:v[0-9]+]].16b, v[[LHS]].16b, v[[RHS]].16b
+; CHECK: dup [[DUPMASK:v[0-9]+]].16b, [[MASK]].b[0]
+; CHECK: bsl [[DUPMASK]].16b, v0.16b, v1.16b
+ %cmp31 = icmp eq i8 %a, %b
+ %e = select i1 %cmp31, <16x i8> %c, <16x i8> %d
+ ret <16x i8> %e
+}
+
+define <16x i8> @test_select_cc_v16i8_f32(float %a, float %b, <16x i8> %c, <16x i8> %d ) {
+; CHECK-LABEL: test_select_cc_v16i8_f32:
+; CHECK: fcmeq [[MASK:v[0-9]+]].4s, v0.4s, v1.4s
+; CHECK-NEXT: dup [[DUPMASK:v[0-9]+]].4s, [[MASK]].s[0]
+; CHECK-NEXT: bsl [[DUPMASK]].16b, v2.16b, v3.16b
+ %cmp31 = fcmp oeq float %a, %b
+ %e = select i1 %cmp31, <16x i8> %c, <16x i8> %d
+ ret <16x i8> %e
+}
+
+define <16x i8> @test_select_cc_v16i8_f64(double %a, double %b, <16x i8> %c, <16x i8> %d ) {
+; CHECK-LABEL: test_select_cc_v16i8_f64:
+; CHECK: fcmeq [[MASK:v[0-9]+]].2d, v0.2d, v1.2d
+; CHECK-NEXT: dup [[DUPMASK:v[0-9]+]].2d, [[MASK]].d[0]
+; CHECK-NEXT: bsl [[DUPMASK]].16b, v2.16b, v3.16b
+ %cmp31 = fcmp oeq double %a, %b
+ %e = select i1 %cmp31, <16x i8> %c, <16x i8> %d
+ ret <16x i8> %e
+}
+
+define <4x i16> @test_select_cc_v4i16(i16 %a, i16 %b, <4x i16> %c, <4x i16> %d ) {
+; CHECK-LABEL: test_select_cc_v4i16:
+; CHECK-DAG: fmov s[[LHS:[0-9]+]], w0
+; CHECK-DAG: fmov s[[RHS:[0-9]+]], w1
+; CHECK: cmeq [[MASK:v[0-9]+]].4h, v[[LHS]].4h, v[[RHS]].4h
+; CHECK: dup [[DUPMASK:v[0-9]+]].4h, [[MASK]].h[0]
+; CHECK: bsl [[DUPMASK]].8b, v0.8b, v1.8b
+ %cmp31 = icmp eq i16 %a, %b
+ %e = select i1 %cmp31, <4x i16> %c, <4x i16> %d
+ ret <4x i16> %e
+}
+
+define <8x i16> @test_select_cc_v8i16(i16 %a, i16 %b, <8x i16> %c, <8x i16> %d ) {
+; CHECK-LABEL: test_select_cc_v8i16:
+; CHECK-DAG: fmov s[[LHS:[0-9]+]], w0
+; CHECK-DAG: fmov s[[RHS:[0-9]+]], w1
+; CHECK: cmeq [[MASK:v[0-9]+]].8h, v[[LHS]].8h, v[[RHS]].8h
+; CHECK: dup [[DUPMASK:v[0-9]+]].8h, [[MASK]].h[0]
+; CHECK: bsl [[DUPMASK]].16b, v0.16b, v1.16b
+ %cmp31 = icmp eq i16 %a, %b
+ %e = select i1 %cmp31, <8x i16> %c, <8x i16> %d
+ ret <8x i16> %e
+}
+
+define <2x i32> @test_select_cc_v2i32(i32 %a, i32 %b, <2x i32> %c, <2x i32> %d ) {
+; CHECK-LABEL: test_select_cc_v2i32:
+; CHECK-DAG: fmov s[[LHS:[0-9]+]], w0
+; CHECK-DAG: fmov s[[RHS:[0-9]+]], w1
+; CHECK: cmeq [[MASK:v[0-9]+]].2s, v[[LHS]].2s, v[[RHS]].2s
+; CHECK: dup [[DUPMASK:v[0-9]+]].2s, [[MASK]].s[0]
+; CHECK: bsl [[DUPMASK]].8b, v0.8b, v1.8b
+ %cmp31 = icmp eq i32 %a, %b
+ %e = select i1 %cmp31, <2x i32> %c, <2x i32> %d
+ ret <2x i32> %e
+}
+
+define <4x i32> @test_select_cc_v4i32(i32 %a, i32 %b, <4x i32> %c, <4x i32> %d ) {
+; CHECK-LABEL: test_select_cc_v4i32:
+; CHECK-DAG: fmov s[[LHS:[0-9]+]], w0
+; CHECK-DAG: fmov s[[RHS:[0-9]+]], w1
+; CHECK: cmeq [[MASK:v[0-9]+]].4s, v[[LHS]].4s, v[[RHS]].4s
+; CHECK: dup [[DUPMASK:v[0-9]+]].4s, [[MASK]].s[0]
+; CHECK: bsl [[DUPMASK]].16b, v0.16b, v1.16b
+ %cmp31 = icmp eq i32 %a, %b
+ %e = select i1 %cmp31, <4x i32> %c, <4x i32> %d
+ ret <4x i32> %e
+}
+
+define <1x i64> @test_select_cc_v1i64(i64 %a, i64 %b, <1x i64> %c, <1x i64> %d ) {
+; CHECK-LABEL: test_select_cc_v1i64:
+; CHECK-DAG: fmov d[[LHS:[0-9]+]], x0
+; CHECK-DAG: fmov d[[RHS:[0-9]+]], x1
+; CHECK: cmeq d[[MASK:[0-9]+]], d[[LHS]], d[[RHS]]
+; CHECK: bsl v[[MASK]].8b, v0.8b, v1.8b
+ %cmp31 = icmp eq i64 %a, %b
+ %e = select i1 %cmp31, <1x i64> %c, <1x i64> %d
+ ret <1x i64> %e
+}
+
+define <2x i64> @test_select_cc_v2i64(i64 %a, i64 %b, <2x i64> %c, <2x i64> %d ) {
+; CHECK-LABEL: test_select_cc_v2i64:
+; CHECK-DAG: fmov d[[LHS:[0-9]+]], x0
+; CHECK-DAG: fmov d[[RHS:[0-9]+]], x1
+; CHECK: cmeq [[MASK:v[0-9]+]].2d, v[[LHS]].2d, v[[RHS]].2d
+; CHECK: dup [[DUPMASK:v[0-9]+]].2d, [[MASK]].d[0]
+; CHECK: bsl [[DUPMASK]].16b, v0.16b, v1.16b
+ %cmp31 = icmp eq i64 %a, %b
+ %e = select i1 %cmp31, <2x i64> %c, <2x i64> %d
+ ret <2x i64> %e
+}
+
+define <1 x float> @test_select_cc_v1f32(float %a, float %b, <1 x float> %c, <1 x float> %d ) {
+; CHECK-LABEL: test_select_cc_v1f32:
+; CHECK: fcmeq [[MASK:v[0-9]+]].2s, v0.2s, v1.2s
+; CHECK-NEXT: bsl [[MASK]].8b, v2.8b, v3.8b
+ %cmp31 = fcmp oeq float %a, %b
+ %e = select i1 %cmp31, <1 x float> %c, <1 x float> %d
+ ret <1 x float> %e
+}
+
+define <2 x float> @test_select_cc_v2f32(float %a, float %b, <2 x float> %c, <2 x float> %d ) {
+; CHECK-LABEL: test_select_cc_v2f32:
+; CHECK: fcmeq [[MASK:v[0-9]+]].2s, v0.2s, v1.2s
+; CHECK: dup [[DUPMASK:v[0-9]+]].2s, [[MASK]].s[0]
+; CHECK: bsl [[DUPMASK]].8b, v2.8b, v3.8b
+ %cmp31 = fcmp oeq float %a, %b
+ %e = select i1 %cmp31, <2 x float> %c, <2 x float> %d
+ ret <2 x float> %e
+}
+
+define <4x float> @test_select_cc_v4f32(float %a, float %b, <4x float> %c, <4x float> %d ) {
+; CHECK-LABEL: test_select_cc_v4f32:
+; CHECK: fcmeq [[MASK:v[0-9]+]].4s, v0.4s, v1.4s
+; CHECK: dup [[DUPMASK:v[0-9]+]].4s, [[MASK]].s[0]
+; CHECK: bsl [[DUPMASK]].16b, v2.16b, v3.16b
+ %cmp31 = fcmp oeq float %a, %b
+ %e = select i1 %cmp31, <4x float> %c, <4x float> %d
+ ret <4x float> %e
+}
+
+define <4x float> @test_select_cc_v4f32_icmp(i32 %a, i32 %b, <4x float> %c, <4x float> %d ) {
+; CHECK-LABEL: test_select_cc_v4f32_icmp:
+; CHECK-DAG: fmov s[[LHS:[0-9]+]], w0
+; CHECK-DAG: fmov s[[RHS:[0-9]+]], w1
+; CHECK: cmeq [[MASK:v[0-9]+]].4s, v[[LHS]].4s, v[[RHS]].4s
+; CHECK: dup [[DUPMASK:v[0-9]+]].4s, [[MASK]].s[0]
+; CHECK: bsl [[DUPMASK]].16b, v0.16b, v1.16b
+ %cmp31 = icmp eq i32 %a, %b
+ %e = select i1 %cmp31, <4x float> %c, <4x float> %d
+ ret <4x float> %e
+}
+
+define <1 x double> @test_select_cc_v1f64(double %a, double %b, <1 x double> %c, <1 x double> %d ) {
+; CHECK-LABEL: test_select_cc_v1f64:
+; CHECK: fcmeq d[[MASK:[0-9]+]], d0, d1
+; CHECK: bsl v[[MASK]].8b, v2.8b, v3.8b
+ %cmp31 = fcmp oeq double %a, %b
+ %e = select i1 %cmp31, <1 x double> %c, <1 x double> %d
+ ret <1 x double> %e
+}
+
+define <1 x double> @test_select_cc_v1f64_icmp(i64 %a, i64 %b, <1 x double> %c, <1 x double> %d ) {
+; CHECK-LABEL: test_select_cc_v1f64_icmp:
+; CHECK-DAG: fmov [[LHS:d[0-9]+]], x0
+; CHECK-DAG: fmov [[RHS:d[0-9]+]], x1
+; CHECK: cmeq d[[MASK:[0-9]+]], [[LHS]], [[RHS]]
+; CHECK: bsl v[[MASK]].8b, v0.8b, v1.8b
+ %cmp31 = icmp eq i64 %a, %b
+ %e = select i1 %cmp31, <1 x double> %c, <1 x double> %d
+ ret <1 x double> %e
+}
+
+define <2 x double> @test_select_cc_v2f64(double %a, double %b, <2 x double> %c, <2 x double> %d ) {
+; CHECK-LABEL: test_select_cc_v2f64:
+; CHECK: fcmeq [[MASK:v[0-9]+]].2d, v0.2d, v1.2d
+; CHECK: dup [[DUPMASK:v[0-9]+]].2d, [[MASK]].d[0]
+; CHECK: bsl [[DUPMASK]].16b, v2.16b, v3.16b
+ %cmp31 = fcmp oeq double %a, %b
+ %e = select i1 %cmp31, <2 x double> %c, <2 x double> %d
+ ret <2 x double> %e
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll b/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
new file mode 100644
index 000000000000..cca6bfef7307
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
@@ -0,0 +1,482 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+
+%struct.uint8x16x2_t = type { [2 x <16 x i8>] }
+%struct.poly8x16x2_t = type { [2 x <16 x i8>] }
+%struct.uint8x16x3_t = type { [3 x <16 x i8>] }
+%struct.int8x16x2_t = type { [2 x <16 x i8>] }
+%struct.int16x8x2_t = type { [2 x <8 x i16>] }
+%struct.int32x4x2_t = type { [2 x <4 x i32>] }
+%struct.int64x2x2_t = type { [2 x <2 x i64>] }
+%struct.float32x4x2_t = type { [2 x <4 x float>] }
+%struct.float64x2x2_t = type { [2 x <2 x double>] }
+%struct.int8x8x2_t = type { [2 x <8 x i8>] }
+%struct.int16x4x2_t = type { [2 x <4 x i16>] }
+%struct.int32x2x2_t = type { [2 x <2 x i32>] }
+%struct.int64x1x2_t = type { [2 x <1 x i64>] }
+%struct.float32x2x2_t = type { [2 x <2 x float>] }
+%struct.float64x1x2_t = type { [2 x <1 x double>] }
+%struct.int8x16x3_t = type { [3 x <16 x i8>] }
+%struct.int16x8x3_t = type { [3 x <8 x i16>] }
+%struct.int32x4x3_t = type { [3 x <4 x i32>] }
+%struct.int64x2x3_t = type { [3 x <2 x i64>] }
+%struct.float32x4x3_t = type { [3 x <4 x float>] }
+%struct.float64x2x3_t = type { [3 x <2 x double>] }
+%struct.int8x8x3_t = type { [3 x <8 x i8>] }
+%struct.int16x4x3_t = type { [3 x <4 x i16>] }
+%struct.int32x2x3_t = type { [3 x <2 x i32>] }
+%struct.int64x1x3_t = type { [3 x <1 x i64>] }
+%struct.float32x2x3_t = type { [3 x <2 x float>] }
+%struct.float64x1x3_t = type { [3 x <1 x double>] }
+%struct.int8x16x4_t = type { [4 x <16 x i8>] }
+%struct.int16x8x4_t = type { [4 x <8 x i16>] }
+%struct.int32x4x4_t = type { [4 x <4 x i32>] }
+%struct.int64x2x4_t = type { [4 x <2 x i64>] }
+%struct.float32x4x4_t = type { [4 x <4 x float>] }
+%struct.float64x2x4_t = type { [4 x <2 x double>] }
+%struct.int8x8x4_t = type { [4 x <8 x i8>] }
+%struct.int16x4x4_t = type { [4 x <4 x i16>] }
+%struct.int32x2x4_t = type { [4 x <2 x i32>] }
+%struct.int64x1x4_t = type { [4 x <1 x i64>] }
+%struct.float32x2x4_t = type { [4 x <2 x float>] }
+%struct.float64x1x4_t = type { [4 x <1 x double>] }
+
+define <16 x i8> @test_ld_from_poll_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: test_ld_from_poll_v16i8:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = add <16 x i8> %a, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 2, i8 13, i8 14, i8 15, i8 16>
+ ret <16 x i8> %b
+}
+
+define <8 x i16> @test_ld_from_poll_v8i16(<8 x i16> %a) {
+; CHECK-LABEL: test_ld_from_poll_v8i16:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = add <8 x i16> %a, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
+ ret <8 x i16> %b
+}
+
+define <4 x i32> @test_ld_from_poll_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: test_ld_from_poll_v4i32:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = add <4 x i32> %a, <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %b
+}
+
+define <2 x i64> @test_ld_from_poll_v2i64(<2 x i64> %a) {
+; CHECK-LABEL: test_ld_from_poll_v2i64:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = add <2 x i64> %a, <i64 1, i64 2>
+ ret <2 x i64> %b
+}
+
+define <4 x float> @test_ld_from_poll_v4f32(<4 x float> %a) {
+; CHECK-LABEL: test_ld_from_poll_v4f32:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
+ ret <4 x float> %b
+}
+
+define <2 x double> @test_ld_from_poll_v2f64(<2 x double> %a) {
+; CHECK-LABEL: test_ld_from_poll_v2f64:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = fadd <2 x double> %a, <double 1.0, double 2.0>
+ ret <2 x double> %b
+}
+
+define <8 x i8> @test_ld_from_poll_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: test_ld_from_poll_v8i8:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = add <8 x i8> %a, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
+ ret <8 x i8> %b
+}
+
+define <4 x i16> @test_ld_from_poll_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: test_ld_from_poll_v4i16:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = add <4 x i16> %a, <i16 1, i16 2, i16 3, i16 4>
+ ret <4 x i16> %b
+}
+
+define <2 x i32> @test_ld_from_poll_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: test_ld_from_poll_v2i32:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+entry:
+ %b = add <2 x i32> %a, <i32 1, i32 2>
+ ret <2 x i32> %b
+}
+
+define <16 x i8> @test_vld1q_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld1q_dup_s8:
+; CHECK: ld1r {{{ ?v[0-9]+.16b ?}}}, [x0]
+entry:
+ %0 = load i8* %a, align 1
+ %1 = insertelement <16 x i8> undef, i8 %0, i32 0
+ %lane = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
+ ret <16 x i8> %lane
+}
+
+define <8 x i16> @test_vld1q_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld1q_dup_s16:
+; CHECK: ld1r {{{ ?v[0-9]+.8h ?}}}, [x0]
+entry:
+ %0 = load i16* %a, align 2
+ %1 = insertelement <8 x i16> undef, i16 %0, i32 0
+ %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
+ ret <8 x i16> %lane
+}
+
+define <4 x i32> @test_vld1q_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld1q_dup_s32:
+; CHECK: ld1r {{{ ?v[0-9]+.4s ?}}}, [x0]
+entry:
+ %0 = load i32* %a, align 4
+ %1 = insertelement <4 x i32> undef, i32 %0, i32 0
+ %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
+ ret <4 x i32> %lane
+}
+
+define <2 x i64> @test_vld1q_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld1q_dup_s64:
+; CHECK: ld1r {{{ ?v[0-9]+.2d ?}}}, [x0]
+entry:
+ %0 = load i64* %a, align 8
+ %1 = insertelement <2 x i64> undef, i64 %0, i32 0
+ %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
+ ret <2 x i64> %lane
+}
+
+define <4 x float> @test_vld1q_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld1q_dup_f32:
+; CHECK: ld1r {{{ ?v[0-9]+.4s ?}}}, [x0]
+entry:
+ %0 = load float* %a, align 4
+ %1 = insertelement <4 x float> undef, float %0, i32 0
+ %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
+ ret <4 x float> %lane
+}
+
+define <2 x double> @test_vld1q_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld1q_dup_f64:
+; CHECK: ld1r {{{ ?v[0-9]+.2d ?}}}, [x0]
+entry:
+ %0 = load double* %a, align 8
+ %1 = insertelement <2 x double> undef, double %0, i32 0
+ %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
+ ret <2 x double> %lane
+}
+
+define <8 x i8> @test_vld1_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld1_dup_s8:
+; CHECK: ld1r {{{ ?v[0-9]+.8b ?}}}, [x0]
+entry:
+ %0 = load i8* %a, align 1
+ %1 = insertelement <8 x i8> undef, i8 %0, i32 0
+ %lane = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
+ ret <8 x i8> %lane
+}
+
+define <4 x i16> @test_vld1_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld1_dup_s16:
+; CHECK: ld1r {{{ ?v[0-9]+.4h ?}}}, [x0]
+entry:
+ %0 = load i16* %a, align 2
+ %1 = insertelement <4 x i16> undef, i16 %0, i32 0
+ %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
+ ret <4 x i16> %lane
+}
+
+define <2 x i32> @test_vld1_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld1_dup_s32:
+; CHECK: ld1r {{{ ?v[0-9]+.2s ?}}}, [x0]
+entry:
+ %0 = load i32* %a, align 4
+ %1 = insertelement <2 x i32> undef, i32 %0, i32 0
+ %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
+ ret <2 x i32> %lane
+}
+
+define <1 x i64> @test_vld1_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld1_dup_s64:
+; CHECK: ldr {{d[0-9]+}}, [x0]
+entry:
+ %0 = load i64* %a, align 8
+ %1 = insertelement <1 x i64> undef, i64 %0, i32 0
+ ret <1 x i64> %1
+}
+
+define <2 x float> @test_vld1_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld1_dup_f32:
+; CHECK: ld1r {{{ ?v[0-9]+.2s ?}}}, [x0]
+entry:
+ %0 = load float* %a, align 4
+ %1 = insertelement <2 x float> undef, float %0, i32 0
+ %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
+ ret <2 x float> %lane
+}
+
+define <1 x double> @test_vld1_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld1_dup_f64:
+; CHECK: ldr {{d[0-9]+}}, [x0]
+entry:
+ %0 = load double* %a, align 8
+ %1 = insertelement <1 x double> undef, double %0, i32 0
+ ret <1 x double> %1
+}
+
+define <1 x i64> @testDUP.v1i64(i64* %a, i64* %b) #0 {
+; As there is a store operation depending on %1, LD1R pattern can't be selected.
+; So LDR and FMOV should be emitted.
+; CHECK-LABEL: testDUP.v1i64:
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}]
+; CHECK-DAG: fmov {{d[0-9]+}}, {{x[0-9]+}}
+; CHECK-DAG: str {{x[0-9]+}}, [{{x[0-9]+}}]
+ %1 = load i64* %a, align 8
+ store i64 %1, i64* %b, align 8
+ %vecinit.i = insertelement <1 x i64> undef, i64 %1, i32 0
+ ret <1 x i64> %vecinit.i
+}
+
+define <1 x double> @testDUP.v1f64(double* %a, double* %b) #0 {
+; As there is a store operation depending on %1, LD1R pattern can't be selected.
+; So LDR and FMOV should be emitted.
+; CHECK-LABEL: testDUP.v1f64:
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}]
+; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}]
+ %1 = load double* %a, align 8
+ store double %1, double* %b, align 8
+ %vecinit.i = insertelement <1 x double> undef, double %1, i32 0
+ ret <1 x double> %vecinit.i
+}
+
+define <16 x i8> @test_vld1q_lane_s8(i8* %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vld1q_lane_s8:
+; CHECK: ld1 { {{v[0-9]+}}.b }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i8* %a, align 1
+ %vld1_lane = insertelement <16 x i8> %b, i8 %0, i32 15
+ ret <16 x i8> %vld1_lane
+}
+
+define <8 x i16> @test_vld1q_lane_s16(i16* %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vld1q_lane_s16:
+; CHECK: ld1 { {{v[0-9]+}}.h }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i16* %a, align 2
+ %vld1_lane = insertelement <8 x i16> %b, i16 %0, i32 7
+ ret <8 x i16> %vld1_lane
+}
+
+define <4 x i32> @test_vld1q_lane_s32(i32* %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vld1q_lane_s32:
+; CHECK: ld1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i32* %a, align 4
+ %vld1_lane = insertelement <4 x i32> %b, i32 %0, i32 3
+ ret <4 x i32> %vld1_lane
+}
+
+define <2 x i64> @test_vld1q_lane_s64(i64* %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vld1q_lane_s64:
+; CHECK: ld1 { {{v[0-9]+}}.d }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i64* %a, align 8
+ %vld1_lane = insertelement <2 x i64> %b, i64 %0, i32 1
+ ret <2 x i64> %vld1_lane
+}
+
+define <4 x float> @test_vld1q_lane_f32(float* %a, <4 x float> %b) {
+; CHECK-LABEL: test_vld1q_lane_f32:
+; CHECK: ld1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load float* %a, align 4
+ %vld1_lane = insertelement <4 x float> %b, float %0, i32 3
+ ret <4 x float> %vld1_lane
+}
+
+define <2 x double> @test_vld1q_lane_f64(double* %a, <2 x double> %b) {
+; CHECK-LABEL: test_vld1q_lane_f64:
+; CHECK: ld1 { {{v[0-9]+}}.d }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load double* %a, align 8
+ %vld1_lane = insertelement <2 x double> %b, double %0, i32 1
+ ret <2 x double> %vld1_lane
+}
+
+define <8 x i8> @test_vld1_lane_s8(i8* %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vld1_lane_s8:
+; CHECK: ld1 { {{v[0-9]+}}.b }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i8* %a, align 1
+ %vld1_lane = insertelement <8 x i8> %b, i8 %0, i32 7
+ ret <8 x i8> %vld1_lane
+}
+
+define <4 x i16> @test_vld1_lane_s16(i16* %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vld1_lane_s16:
+; CHECK: ld1 { {{v[0-9]+}}.h }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i16* %a, align 2
+ %vld1_lane = insertelement <4 x i16> %b, i16 %0, i32 3
+ ret <4 x i16> %vld1_lane
+}
+
+define <2 x i32> @test_vld1_lane_s32(i32* %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vld1_lane_s32:
+; CHECK: ld1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i32* %a, align 4
+ %vld1_lane = insertelement <2 x i32> %b, i32 %0, i32 1
+ ret <2 x i32> %vld1_lane
+}
+
+define <1 x i64> @test_vld1_lane_s64(i64* %a, <1 x i64> %b) {
+; CHECK-LABEL: test_vld1_lane_s64:
+; CHECK: ldr {{d[0-9]+}}, [x0]
+entry:
+ %0 = load i64* %a, align 8
+ %vld1_lane = insertelement <1 x i64> undef, i64 %0, i32 0
+ ret <1 x i64> %vld1_lane
+}
+
+define <2 x float> @test_vld1_lane_f32(float* %a, <2 x float> %b) {
+; CHECK-LABEL: test_vld1_lane_f32:
+; CHECK: ld1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
+entry:
+ %0 = load float* %a, align 4
+ %vld1_lane = insertelement <2 x float> %b, float %0, i32 1
+ ret <2 x float> %vld1_lane
+}
+
+define <1 x double> @test_vld1_lane_f64(double* %a, <1 x double> %b) {
+; CHECK-LABEL: test_vld1_lane_f64:
+; CHECK: ldr {{d[0-9]+}}, [x0]
+entry:
+ %0 = load double* %a, align 8
+ %vld1_lane = insertelement <1 x double> undef, double %0, i32 0
+ ret <1 x double> %vld1_lane
+}
+
+define void @test_vst1q_lane_s8(i8* %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vst1q_lane_s8:
+; CHECK: st1 { {{v[0-9]+}}.b }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <16 x i8> %b, i32 15
+ store i8 %0, i8* %a, align 1
+ ret void
+}
+
+define void @test_vst1q_lane_s16(i16* %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vst1q_lane_s16:
+; CHECK: st1 { {{v[0-9]+}}.h }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <8 x i16> %b, i32 7
+ store i16 %0, i16* %a, align 2
+ ret void
+}
+
+define void @test_vst1q_lane_s32(i32* %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vst1q_lane_s32:
+; CHECK: st1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <4 x i32> %b, i32 3
+ store i32 %0, i32* %a, align 4
+ ret void
+}
+
+define void @test_vst1q_lane_s64(i64* %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vst1q_lane_s64:
+; CHECK: st1 { {{v[0-9]+}}.d }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <2 x i64> %b, i32 1
+ store i64 %0, i64* %a, align 8
+ ret void
+}
+
+define void @test_vst1q_lane_f32(float* %a, <4 x float> %b) {
+; CHECK-LABEL: test_vst1q_lane_f32:
+; CHECK: st1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <4 x float> %b, i32 3
+ store float %0, float* %a, align 4
+ ret void
+}
+
+define void @test_vst1q_lane_f64(double* %a, <2 x double> %b) {
+; CHECK-LABEL: test_vst1q_lane_f64:
+; CHECK: st1 { {{v[0-9]+}}.d }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <2 x double> %b, i32 1
+ store double %0, double* %a, align 8
+ ret void
+}
+
+define void @test_vst1_lane_s8(i8* %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vst1_lane_s8:
+; CHECK: st1 { {{v[0-9]+}}.b }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <8 x i8> %b, i32 7
+ store i8 %0, i8* %a, align 1
+ ret void
+}
+
+define void @test_vst1_lane_s16(i16* %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vst1_lane_s16:
+; CHECK: st1 { {{v[0-9]+}}.h }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <4 x i16> %b, i32 3
+ store i16 %0, i16* %a, align 2
+ ret void
+}
+
+define void @test_vst1_lane_s32(i32* %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vst1_lane_s32:
+; CHECK: st1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <2 x i32> %b, i32 1
+ store i32 %0, i32* %a, align 4
+ ret void
+}
+
+define void @test_vst1_lane_s64(i64* %a, <1 x i64> %b) {
+; CHECK-LABEL: test_vst1_lane_s64:
+; CHECK: st1 { {{v[0-9]+}}.d }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <1 x i64> %b, i32 0
+ store i64 %0, i64* %a, align 8
+ ret void
+}
+
+define void @test_vst1_lane_f32(float* %a, <2 x float> %b) {
+; CHECK-LABEL: test_vst1_lane_f32:
+; CHECK: st1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <2 x float> %b, i32 1
+ store float %0, float* %a, align 4
+ ret void
+}
+
+define void @test_vst1_lane_f64(double* %a, <1 x double> %b) {
+; CHECK-LABEL: test_vst1_lane_f64:
+; CHECK: str {{d[0-9]+}}, [x0]
+entry:
+ %0 = extractelement <1 x double> %b, i32 0
+ store double %0, double* %a, align 8
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-simd-shift.ll b/test/CodeGen/AArch64/arm64-neon-simd-shift.ll
new file mode 100644
index 000000000000..447fb6307f21
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-simd-shift.ll
@@ -0,0 +1,663 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @test_vshr_n_s8(<8 x i8> %a) {
+; CHECK: test_vshr_n_s8
+; CHECK: sshr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vshr_n = ashr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <8 x i8> %vshr_n
+}
+
+define <4 x i16> @test_vshr_n_s16(<4 x i16> %a) {
+; CHECK: test_vshr_n_s16
+; CHECK: sshr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vshr_n = ashr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
+ ret <4 x i16> %vshr_n
+}
+
+define <2 x i32> @test_vshr_n_s32(<2 x i32> %a) {
+; CHECK: test_vshr_n_s32
+; CHECK: sshr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vshr_n = ashr <2 x i32> %a, <i32 3, i32 3>
+ ret <2 x i32> %vshr_n
+}
+
+define <16 x i8> @test_vshrq_n_s8(<16 x i8> %a) {
+; CHECK: test_vshrq_n_s8
+; CHECK: sshr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vshr_n = ashr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <16 x i8> %vshr_n
+}
+
+define <8 x i16> @test_vshrq_n_s16(<8 x i16> %a) {
+; CHECK: test_vshrq_n_s16
+; CHECK: sshr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vshr_n = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %vshr_n
+}
+
+define <4 x i32> @test_vshrq_n_s32(<4 x i32> %a) {
+; CHECK: test_vshrq_n_s32
+; CHECK: sshr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vshr_n = ashr <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %vshr_n
+}
+
+define <2 x i64> @test_vshrq_n_s64(<2 x i64> %a) {
+; CHECK: test_vshrq_n_s64
+; CHECK: sshr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vshr_n = ashr <2 x i64> %a, <i64 3, i64 3>
+ ret <2 x i64> %vshr_n
+}
+
+define <8 x i8> @test_vshr_n_u8(<8 x i8> %a) {
+; CHECK: test_vshr_n_u8
+; CHECK: ushr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vshr_n = lshr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <8 x i8> %vshr_n
+}
+
+define <4 x i16> @test_vshr_n_u16(<4 x i16> %a) {
+; CHECK: test_vshr_n_u16
+; CHECK: ushr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vshr_n = lshr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
+ ret <4 x i16> %vshr_n
+}
+
+define <2 x i32> @test_vshr_n_u32(<2 x i32> %a) {
+; CHECK: test_vshr_n_u32
+; CHECK: ushr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vshr_n = lshr <2 x i32> %a, <i32 3, i32 3>
+ ret <2 x i32> %vshr_n
+}
+
+define <16 x i8> @test_vshrq_n_u8(<16 x i8> %a) {
+; CHECK: test_vshrq_n_u8
+; CHECK: ushr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vshr_n = lshr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <16 x i8> %vshr_n
+}
+
+define <8 x i16> @test_vshrq_n_u16(<8 x i16> %a) {
+; CHECK: test_vshrq_n_u16
+; CHECK: ushr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vshr_n = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %vshr_n
+}
+
+define <4 x i32> @test_vshrq_n_u32(<4 x i32> %a) {
+; CHECK: test_vshrq_n_u32
+; CHECK: ushr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vshr_n = lshr <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %vshr_n
+}
+
+define <2 x i64> @test_vshrq_n_u64(<2 x i64> %a) {
+; CHECK: test_vshrq_n_u64
+; CHECK: ushr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vshr_n = lshr <2 x i64> %a, <i64 3, i64 3>
+ ret <2 x i64> %vshr_n
+}
+
+define <8 x i8> @test_vsra_n_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsra_n_s8
+; CHECK: ssra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vsra_n = ashr <8 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %1 = add <8 x i8> %vsra_n, %a
+ ret <8 x i8> %1
+}
+
+define <4 x i16> @test_vsra_n_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsra_n_s16
+; CHECK: ssra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vsra_n = ashr <4 x i16> %b, <i16 3, i16 3, i16 3, i16 3>
+ %1 = add <4 x i16> %vsra_n, %a
+ ret <4 x i16> %1
+}
+
+define <2 x i32> @test_vsra_n_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vsra_n_s32
+; CHECK: ssra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vsra_n = ashr <2 x i32> %b, <i32 3, i32 3>
+ %1 = add <2 x i32> %vsra_n, %a
+ ret <2 x i32> %1
+}
+
+define <16 x i8> @test_vsraq_n_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsraq_n_s8
+; CHECK: ssra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vsra_n = ashr <16 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %1 = add <16 x i8> %vsra_n, %a
+ ret <16 x i8> %1
+}
+
+define <8 x i16> @test_vsraq_n_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsraq_n_s16
+; CHECK: ssra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vsra_n = ashr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %1 = add <8 x i16> %vsra_n, %a
+ ret <8 x i16> %1
+}
+
+define <4 x i32> @test_vsraq_n_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsraq_n_s32
+; CHECK: ssra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vsra_n = ashr <4 x i32> %b, <i32 3, i32 3, i32 3, i32 3>
+ %1 = add <4 x i32> %vsra_n, %a
+ ret <4 x i32> %1
+}
+
+define <2 x i64> @test_vsraq_n_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsraq_n_s64
+; CHECK: ssra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vsra_n = ashr <2 x i64> %b, <i64 3, i64 3>
+ %1 = add <2 x i64> %vsra_n, %a
+ ret <2 x i64> %1
+}
+
+define <8 x i8> @test_vsra_n_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsra_n_u8
+; CHECK: usra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vsra_n = lshr <8 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %1 = add <8 x i8> %vsra_n, %a
+ ret <8 x i8> %1
+}
+
+define <4 x i16> @test_vsra_n_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsra_n_u16
+; CHECK: usra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vsra_n = lshr <4 x i16> %b, <i16 3, i16 3, i16 3, i16 3>
+ %1 = add <4 x i16> %vsra_n, %a
+ ret <4 x i16> %1
+}
+
+define <2 x i32> @test_vsra_n_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vsra_n_u32
+; CHECK: usra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vsra_n = lshr <2 x i32> %b, <i32 3, i32 3>
+ %1 = add <2 x i32> %vsra_n, %a
+ ret <2 x i32> %1
+}
+
+define <16 x i8> @test_vsraq_n_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsraq_n_u8
+; CHECK: usra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vsra_n = lshr <16 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %1 = add <16 x i8> %vsra_n, %a
+ ret <16 x i8> %1
+}
+
+define <8 x i16> @test_vsraq_n_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsraq_n_u16
+; CHECK: usra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vsra_n = lshr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %1 = add <8 x i16> %vsra_n, %a
+ ret <8 x i16> %1
+}
+
+define <4 x i32> @test_vsraq_n_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsraq_n_u32
+; CHECK: usra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vsra_n = lshr <4 x i32> %b, <i32 3, i32 3, i32 3, i32 3>
+ %1 = add <4 x i32> %vsra_n, %a
+ ret <4 x i32> %1
+}
+
+define <2 x i64> @test_vsraq_n_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsraq_n_u64
+; CHECK: usra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vsra_n = lshr <2 x i64> %b, <i64 3, i64 3>
+ %1 = add <2 x i64> %vsra_n, %a
+ ret <2 x i64> %1
+}
+
+define <8 x i8> @test_vshrn_n_s16(<8 x i16> %a) {
+; CHECK: test_vshrn_n_s16
+; CHECK: shrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %1 = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
+ ret <8 x i8> %vshrn_n
+}
+
+define <4 x i16> @test_vshrn_n_s32(<4 x i32> %a) {
+; CHECK: test_vshrn_n_s32
+; CHECK: shrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %1 = ashr <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
+ %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
+ ret <4 x i16> %vshrn_n
+}
+
+define <2 x i32> @test_vshrn_n_s64(<2 x i64> %a) {
+; CHECK: test_vshrn_n_s64
+; CHECK: shrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %1 = ashr <2 x i64> %a, <i64 19, i64 19>
+ %vshrn_n = trunc <2 x i64> %1 to <2 x i32>
+ ret <2 x i32> %vshrn_n
+}
+
+define <8 x i8> @test_vshrn_n_u16(<8 x i16> %a) {
+; CHECK: test_vshrn_n_u16
+; CHECK: shrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %1 = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
+ ret <8 x i8> %vshrn_n
+}
+
+define <4 x i16> @test_vshrn_n_u32(<4 x i32> %a) {
+; CHECK: test_vshrn_n_u32
+; CHECK: shrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %1 = lshr <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
+ %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
+ ret <4 x i16> %vshrn_n
+}
+
+define <2 x i32> @test_vshrn_n_u64(<2 x i64> %a) {
+; CHECK: test_vshrn_n_u64
+; CHECK: shrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %1 = lshr <2 x i64> %a, <i64 19, i64 19>
+ %vshrn_n = trunc <2 x i64> %1 to <2 x i32>
+ ret <2 x i32> %vshrn_n
+}
+
+define <16 x i8> @test_vshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vshrn_high_n_s16
+; CHECK: shrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %1 = ashr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
+ %2 = bitcast <8 x i8> %a to <1 x i64>
+ %3 = bitcast <8 x i8> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %4
+}
+
+define <8 x i16> @test_vshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vshrn_high_n_s32
+; CHECK: shrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %1 = ashr <4 x i32> %b, <i32 9, i32 9, i32 9, i32 9>
+ %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
+ %2 = bitcast <4 x i16> %a to <1 x i64>
+ %3 = bitcast <4 x i16> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %4
+}
+
+define <4 x i32> @test_vshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vshrn_high_n_s64
+; CHECK: shrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %2 = ashr <2 x i64> %b, <i64 19, i64 19>
+ %vshrn_n = trunc <2 x i64> %2 to <2 x i32>
+ %3 = bitcast <2 x i32> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %4
+}
+
+define <16 x i8> @test_vshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vshrn_high_n_u16
+; CHECK: shrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %1 = lshr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
+ %2 = bitcast <8 x i8> %a to <1 x i64>
+ %3 = bitcast <8 x i8> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %4
+}
+
+define <8 x i16> @test_vshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vshrn_high_n_u32
+; CHECK: shrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %1 = lshr <4 x i32> %b, <i32 9, i32 9, i32 9, i32 9>
+ %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
+ %2 = bitcast <4 x i16> %a to <1 x i64>
+ %3 = bitcast <4 x i16> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %4
+}
+
+define <4 x i32> @test_vshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vshrn_high_n_u64
+; CHECK: shrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %2 = lshr <2 x i64> %b, <i64 19, i64 19>
+ %vshrn_n = trunc <2 x i64> %2 to <2 x i32>
+ %3 = bitcast <2 x i32> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %4
+}
+
+define <16 x i8> @test_vqshrun_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqshrun_high_n_s16
+; CHECK: sqshrun2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqshrun = tail call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqshrun_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqshrun_high_n_s32
+; CHECK: sqshrun2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqshrun = tail call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqshrun_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqshrun_high_n_s64
+; CHECK: sqshrun2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqshrun = tail call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <16 x i8> @test_vrshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vrshrn_high_n_s16
+; CHECK: rshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vrshrn = tail call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vrshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vrshrn_high_n_s32
+; CHECK: rshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vrshrn = tail call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vrshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vrshrn_high_n_s64
+; CHECK: rshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vrshrn = tail call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <16 x i8> @test_vqrshrun_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqrshrun_high_n_s16
+; CHECK: sqrshrun2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqrshrun = tail call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqrshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqrshrun_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqrshrun_high_n_s32
+; CHECK: sqrshrun2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqrshrun = tail call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqrshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqrshrun_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqrshrun_high_n_s64
+; CHECK: sqrshrun2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqrshrun = tail call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqrshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <16 x i8> @test_vqshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqshrn_high_n_s16
+; CHECK: sqshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqshrn_high_n_s32
+; CHECK: sqshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqshrn_high_n_s64
+; CHECK: sqshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <16 x i8> @test_vqshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqshrn_high_n_u16
+; CHECK: uqshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqshrn_high_n_u32
+; CHECK: uqshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqshrn_high_n_u64
+; CHECK: uqshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <16 x i8> @test_vqrshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqrshrn_high_n_s16
+; CHECK: sqrshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqrshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqrshrn_high_n_s32
+; CHECK: sqrshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqrshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqrshrn_high_n_s64
+; CHECK: sqrshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <16 x i8> @test_vqrshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqrshrn_high_n_u16
+; CHECK: uqrshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqrshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqrshrn_high_n_u32
+; CHECK: uqrshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqrshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqrshrn_high_n_u64
+; CHECK: uqrshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+
+
+declare <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64>, i32)
+
+declare <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32)
+
+declare <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32)
+
+declare <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32)
+
+declare <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32)
+
+declare <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32)
+
+declare <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float>, i32)
+
+declare <4 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float>, i32)
+
+declare <2 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float>, i32)
+
+declare <4 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32)
+
+declare <2 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double>, i32)
+
+define <1 x i64> @test_vcvt_n_s64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvt_n_s64_f64
+; CHECK: fcvtzs d{{[0-9]+}}, d{{[0-9]+}}, #64
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double> %a, i32 64)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvt_n_u64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvt_n_u64_f64
+; CHECK: fcvtzu d{{[0-9]+}}, d{{[0-9]+}}, #64
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double> %a, i32 64)
+ ret <1 x i64> %1
+}
+
+define <1 x double> @test_vcvt_n_f64_s64(<1 x i64> %a) {
+; CHECK-LABEL: test_vcvt_n_f64_s64
+; CHECK: scvtf d{{[0-9]+}}, d{{[0-9]+}}, #64
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64> %a, i32 64)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vcvt_n_f64_u64(<1 x i64> %a) {
+; CHECK-LABEL: test_vcvt_n_f64_u64
+; CHECK: ucvtf d{{[0-9]+}}, d{{[0-9]+}}, #64
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64> %a, i32 64)
+ ret <1 x double> %1
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double>, i32)
+declare <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double>, i32)
+declare <1 x double> @llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64>, i32)
+declare <1 x double> @llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64>, i32)
diff --git a/test/CodeGen/AArch64/arm64-neon-simd-vget.ll b/test/CodeGen/AArch64/arm64-neon-simd-vget.ll
new file mode 100644
index 000000000000..87f3956eb20f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-simd-vget.ll
@@ -0,0 +1,225 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @test_vget_high_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_high_s8:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_high_s16:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_high_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_vget_high_s32:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_s64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_high_s64:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vget_high_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_high_u8:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_high_u16:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_high_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_vget_high_u32:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_u64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_high_u64:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_p64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_high_p64:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_f16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_high_f16:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x float> @test_vget_high_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vget_high_f32:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x float> %shuffle.i
+}
+
+define <8 x i8> @test_vget_high_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_high_p8:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_high_p16:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <1 x double> @test_vget_high_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vget_high_f64:
+; CHECK: ext v0.16b, v0.16b, {{v[0-9]+}}.16b, #8
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> <i32 1>
+ ret <1 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_low_s8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_low_s16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_low_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_vget_low_s32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_s64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_low_s64:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_low_u8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_low_u16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_low_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_vget_low_u32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_u64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_low_u64:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_p64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_low_p64:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_f16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_low_f16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x float> @test_vget_low_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vget_low_f32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x float> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_low_p8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_low_p16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <1 x double> @test_vget_low_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vget_low_f64:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> zeroinitializer
+ ret <1 x double> %shuffle.i
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-v1i1-setcc.ll b/test/CodeGen/AArch64/arm64-neon-v1i1-setcc.ll
new file mode 100644
index 000000000000..74e3af8206f5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-v1i1-setcc.ll
@@ -0,0 +1,74 @@
+; RUN: llc %s -o - -verify-machineinstrs -mtriple=arm64-none-linux-gnu | FileCheck %s
+
+; This is the analogue of AArch64's file of the same name. It's mostly testing
+; some form of correct lowering occurs, the tests are a little artificial but I
+; strongly suspect there's room for improved CodeGen (FIXME).
+
+define i64 @test_sext_extr_cmp_0(<1 x i64> %v1, <1 x i64> %v2) {
+; CHECK-LABEL: test_sext_extr_cmp_0:
+; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: cset
+ %1 = icmp sge <1 x i64> %v1, %v2
+ %2 = extractelement <1 x i1> %1, i32 0
+ %vget_lane = sext i1 %2 to i64
+ ret i64 %vget_lane
+}
+
+define i64 @test_sext_extr_cmp_1(<1 x double> %v1, <1 x double> %v2) {
+; CHECK-LABEL: test_sext_extr_cmp_1:
+; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
+ %1 = fcmp oeq <1 x double> %v1, %v2
+ %2 = extractelement <1 x i1> %1, i32 0
+ %vget_lane = sext i1 %2 to i64
+ ret i64 %vget_lane
+}
+
+define <1 x i64> @test_select_v1i1_0(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
+; CHECK-LABEL: test_select_v1i1_0:
+; CHECK: cmeq d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: bic v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %1 = icmp eq <1 x i64> %v1, %v2
+ %res = select <1 x i1> %1, <1 x i64> zeroinitializer, <1 x i64> %v3
+ ret <1 x i64> %res
+}
+
+define <1 x i64> @test_select_v1i1_1(<1 x double> %v1, <1 x double> %v2, <1 x i64> %v3) {
+; CHECK-LABEL: test_select_v1i1_1:
+; CHECK: fcmeq d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: bic v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %1 = fcmp oeq <1 x double> %v1, %v2
+ %res = select <1 x i1> %1, <1 x i64> zeroinitializer, <1 x i64> %v3
+ ret <1 x i64> %res
+}
+
+define <1 x double> @test_select_v1i1_2(<1 x i64> %v1, <1 x i64> %v2, <1 x double> %v3) {
+; CHECK-LABEL: test_select_v1i1_2:
+; CHECK: cmeq d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: bic v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %1 = icmp eq <1 x i64> %v1, %v2
+ %res = select <1 x i1> %1, <1 x double> zeroinitializer, <1 x double> %v3
+ ret <1 x double> %res
+}
+
+define <1 x i64> @test_select_v1i1_3(i64 %lhs, i64 %rhs, <1 x i64> %v3) {
+; CHECK-LABEL: test_select_v1i1_3:
+; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}
+ %tst = icmp eq i64 %lhs, %rhs
+ %evil = insertelement <1 x i1> undef, i1 %tst, i32 0
+ %res = select <1 x i1> %evil, <1 x i64> zeroinitializer, <1 x i64> %v3
+ ret <1 x i64> %res
+}
+
+define i32 @test_br_extr_cmp(<1 x i64> %v1, <1 x i64> %v2) {
+; CHECK-LABEL: test_br_extr_cmp:
+; CHECK: cmp x{{[0-9]+}}, x{{[0-9]+}}
+ %1 = icmp eq <1 x i64> %v1, %v2
+ %2 = extractelement <1 x i1> %1, i32 0
+ br i1 %2, label %if.end, label %if.then
+
+if.then:
+ ret i32 0;
+
+if.end:
+ ret i32 1;
+}
diff --git a/test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll b/test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll
new file mode 100644
index 000000000000..8262fe43a66c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll
@@ -0,0 +1,175 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+; FIXME: We should not generate ld/st for such register spill/fill, because the
+; test case seems very simple and the register pressure is not high. If the
+; spill/fill algorithm is optimized, this test case may not be triggered. And
+; then we can delete it.
+define i32 @spill.DPairReg(i32* %arg1, i32 %arg2) {
+; CHECK-LABEL: spill.DPairReg:
+; CHECK: ld2 { v{{[0-9]+}}.2s, v{{[0-9]+}}.2s }, [{{x[0-9]+|sp}}]
+; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+entry:
+ %vld = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %arg1)
+ %cmp = icmp eq i32 %arg2, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo()
+ br label %if.end
+
+if.end:
+ %vld.extract = extractvalue { <2 x i32>, <2 x i32> } %vld, 0
+ %res = extractelement <2 x i32> %vld.extract, i32 1
+ ret i32 %res
+}
+
+define i16 @spill.DTripleReg(i16* %arg1, i32 %arg2) {
+; CHECK-LABEL: spill.DTripleReg:
+; CHECK: ld3 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}]
+; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+entry:
+ %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %arg1)
+ %cmp = icmp eq i32 %arg2, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo()
+ br label %if.end
+
+if.end:
+ %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0
+ %res = extractelement <4 x i16> %vld.extract, i32 1
+ ret i16 %res
+}
+
+define i16 @spill.DQuadReg(i16* %arg1, i32 %arg2) {
+; CHECK-LABEL: spill.DQuadReg:
+; CHECK: ld4 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}]
+; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+entry:
+ %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %arg1)
+ %cmp = icmp eq i32 %arg2, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo()
+ br label %if.end
+
+if.end:
+ %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0
+ %res = extractelement <4 x i16> %vld.extract, i32 0
+ ret i16 %res
+}
+
+define i32 @spill.QPairReg(i32* %arg1, i32 %arg2) {
+; CHECK-LABEL: spill.QPairReg:
+; CHECK: ld2 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}]
+; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+entry:
+ %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %arg1)
+ %cmp = icmp eq i32 %arg2, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo()
+ br label %if.end
+
+if.end:
+ %vld.extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0
+ %res = extractelement <4 x i32> %vld.extract, i32 1
+ ret i32 %res
+}
+
+define float @spill.QTripleReg(float* %arg1, i32 %arg2) {
+; CHECK-LABEL: spill.QTripleReg:
+; CHECK: ld3 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}]
+; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+entry:
+ %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %arg1)
+ %cmp = icmp eq i32 %arg2, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo()
+ br label %if.end
+
+if.end:
+ %vld3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 0
+ %res = extractelement <4 x float> %vld3.extract, i32 1
+ ret float %res
+}
+
+define i8 @spill.QQuadReg(i8* %arg1, i32 %arg2) {
+; CHECK-LABEL: spill.QQuadReg:
+; CHECK: ld4 { v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b }, [{{x[0-9]+|sp}}]
+; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
+entry:
+ %vld = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %arg1)
+ %cmp = icmp eq i32 %arg2, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo()
+ br label %if.end
+
+if.end:
+ %vld.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld, 0
+ %res = extractelement <16 x i8> %vld.extract, i32 1
+ ret i8 %res
+}
+
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32*)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*)
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*)
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32*)
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float*)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*)
+
+declare void @foo()
+
+; FIXME: We should not generate ld/st for such register spill/fill, because the
+; test case seems very simple and the register pressure is not high. If the
+; spill/fill algorithm is optimized, this test case may not be triggered. And
+; then we can delete it.
+; check the spill for Register Class QPair_with_qsub_0_in_FPR128Lo
+define <8 x i16> @test_2xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
+ tail call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
+ tail call void @foo()
+ %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
+ %1 = bitcast <2 x i64> %sv to <8 x i16>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %3 = mul <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+; check the spill for Register Class QTriple_with_qsub_0_in_FPR128Lo
+define <8 x i16> @test_3xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
+ tail call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
+ tail call void @foo()
+ %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
+ %1 = bitcast <2 x i64> %sv to <8 x i16>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %3 = mul <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+; check the spill for Register Class QQuad_with_qsub_0_in_FPR128Lo
+define <8 x i16> @test_4xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
+ tail call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
+ tail call void @foo()
+ %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
+ %1 = bitcast <2 x i64> %sv to <8 x i16>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %3 = mul <8 x i16> %2, %2
+ ret <8 x i16> %3
+}
+
+declare void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64>, <1 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
diff --git a/test/CodeGen/AArch64/arm64-patchpoint.ll b/test/CodeGen/AArch64/arm64-patchpoint.ll
new file mode 100644
index 000000000000..039cdfcc3858
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-patchpoint.ll
@@ -0,0 +1,171 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin -enable-misched=0 -mcpu=cyclone | FileCheck %s
+
+; Trivial patchpoint codegen
+;
+define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+; CHECK-LABEL: trivial_patchpoint_codegen:
+; CHECK: movz x16, #0xdead, lsl #32
+; CHECK-NEXT: movk x16, #0xbeef, lsl #16
+; CHECK-NEXT: movk x16, #0xcafe
+; CHECK-NEXT: blr x16
+; CHECK: movz x16, #0xdead, lsl #32
+; CHECK-NEXT: movk x16, #0xbeef, lsl #16
+; CHECK-NEXT: movk x16, #0xcaff
+; CHECK-NEXT: blr x16
+; CHECK: ret
+ %resolveCall2 = inttoptr i64 244837814094590 to i8*
+ %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %resolveCall3 = inttoptr i64 244837814094591 to i8*
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 20, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
+ ret i64 %result
+}
+
+; Caller frame metadata with stackmaps. This should not be optimized
+; as a leaf function.
+;
+; CHECK-LABEL: caller_meta_leaf
+; CHECK: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK: Ltmp
+; CHECK: mov sp, x29
+; CHECK: ret
+
+define void @caller_meta_leaf() {
+entry:
+ %metadata = alloca i64, i32 3, align 8
+ store i64 11, i64* %metadata
+ store i64 12, i64* %metadata
+ store i64 13, i64* %metadata
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+ ret void
+}
+
+; Test the webkit_jscc calling convention.
+; One argument will be passed in register, the other will be pushed on the stack.
+; Return value in x0.
+define void @jscall_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+; CHECK-LABEL: jscall_patchpoint_codegen:
+; CHECK: Ltmp
+; CHECK: str x{{.+}}, [sp]
+; CHECK-NEXT: mov x0, x{{.+}}
+; CHECK: Ltmp
+; CHECK-NEXT: movz x16, #0xffff, lsl #32
+; CHECK-NEXT: movk x16, #0xdead, lsl #16
+; CHECK-NEXT: movk x16, #0xbeef
+; CHECK-NEXT: blr x16
+ %resolveCall2 = inttoptr i64 281474417671919 to i8*
+ %result = tail call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveCall2, i32 2, i64 %p4, i64 %p2)
+ %resolveCall3 = inttoptr i64 244837814038255 to i8*
+ tail call webkit_jscc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveCall3, i32 2, i64 %p4, i64 %result)
+ ret void
+}
+
+; Test if the arguments are properly aligned and that we don't store undef arguments.
+define i64 @jscall_patchpoint_codegen2(i64 %callee) {
+entry:
+; CHECK-LABEL: jscall_patchpoint_codegen2:
+; CHECK: Ltmp
+; CHECK: orr w{{.+}}, wzr, #0x6
+; CHECK-NEXT: str x{{.+}}, [sp, #24]
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x4
+; CHECK-NEXT: str w{{.+}}, [sp, #16]
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x2
+; CHECK-NEXT: str x{{.+}}, [sp]
+; CHECK: Ltmp
+; CHECK-NEXT: movz x16, #0xffff, lsl #32
+; CHECK-NEXT: movk x16, #0xdead, lsl #16
+; CHECK-NEXT: movk x16, #0xbeef
+; CHECK-NEXT: blr x16
+ %call = inttoptr i64 281474417671919 to i8*
+ %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 6, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6)
+ ret i64 %result
+}
+
+; Test if the arguments are properly aligned and that we don't store undef arguments.
+define i64 @jscall_patchpoint_codegen3(i64 %callee) {
+entry:
+; CHECK-LABEL: jscall_patchpoint_codegen3:
+; CHECK: Ltmp
+; CHECK: movz w{{.+}}, #0xa
+; CHECK-NEXT: str x{{.+}}, [sp, #48]
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x8
+; CHECK-NEXT: str w{{.+}}, [sp, #36]
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x6
+; CHECK-NEXT: str x{{.+}}, [sp, #24]
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x4
+; CHECK-NEXT: str w{{.+}}, [sp, #16]
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x2
+; CHECK-NEXT: str x{{.+}}, [sp]
+; CHECK: Ltmp
+; CHECK-NEXT: movz x16, #0xffff, lsl #32
+; CHECK-NEXT: movk x16, #0xdead, lsl #16
+; CHECK-NEXT: movk x16, #0xbeef
+; CHECK-NEXT: blr x16
+ %call = inttoptr i64 281474417671919 to i8*
+ %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 10, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6, i32 undef, i32 8, i32 undef, i64 10)
+ ret i64 %result
+}
+
+; Test patchpoints reusing the same TargetConstant.
+; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4)
+; There is no way to verify this, since it depends on memory allocation.
+; But I think it's useful to include as a working example.
+define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) {
+entry:
+ %tmp80 = add i64 %tmp79, -16
+ %tmp81 = inttoptr i64 %tmp80 to i64*
+ %tmp82 = load i64* %tmp81, align 8
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
+ %tmp83 = load i64* %tmp33, align 8
+ %tmp84 = add i64 %tmp83, -24
+ %tmp85 = inttoptr i64 %tmp84 to i64*
+ %tmp86 = load i64* %tmp85, align 8
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
+ ret i64 10
+}
+
+; Test small patchpoints that don't emit calls.
+define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+; CHECK-LABEL: small_patchpoint_codegen:
+; CHECK: Ltmp
+; CHECK: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
+ ret void
+}
+
+; Test that scratch registers are spilled around patchpoints
+; CHECK: InlineAsm End
+; CHECK-NEXT: mov x{{[0-9]+}}, x16
+; CHECK-NEXT: mov x{{[0-9]+}}, x17
+; CHECK-NEXT: Ltmp
+; CHECK-NEXT: nop
+define void @clobberScratch(i32* %p) {
+ %v = load i32* %p
+ tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 5, i32 20, i8* null, i32 0, i32* %p, i32 %v)
+ store i32 %v, i32* %p
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+
+; CHECK-LABEL: test_i16:
+; CHECK: ldrh [[BREG:w[0-9]+]], [sp]
+; CHECK: add w0, w0, [[BREG]]
+define webkit_jscc i16 @test_i16(i16 zeroext %a, i16 zeroext %b) {
+ %sum = add i16 %a, %b
+ ret i16 %sum
+}
diff --git a/test/CodeGen/AArch64/arm64-pic-local-symbol.ll b/test/CodeGen/AArch64/arm64-pic-local-symbol.ll
new file mode 100644
index 000000000000..627e741fc32d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-pic-local-symbol.ll
@@ -0,0 +1,22 @@
+; RUN: llc -mtriple=arm64-unknown-linux-gnu -relocation-model=pic < %s | FileCheck %s
+
+@a = internal unnamed_addr global i32 0, align 4
+@.str = private unnamed_addr constant [6 x i8] c"test\0A\00", align 1
+
+define i32 @get() {
+; CHECK: get:
+; CHECK: adrp x{{[0-9]+}}, a
+; CHECK-NEXT: ldr w{{[0-9]+}}, [x{{[0-9]}}, :lo12:a]
+ %res = load i32* @a, align 4
+ ret i32 %res
+}
+
+define void @foo() nounwind {
+; CHECK: foo:
+; CHECK: adrp x{{[0-9]}}, .L.str
+; CHECK-NEXT: add x{{[0-9]}}, x{{[0-9]}}, :lo12:.L.str
+ tail call void @bar(i8* getelementptr inbounds ([6 x i8]* @.str, i64 0, i64 0))
+ ret void
+}
+
+declare void @bar(i8*)
diff --git a/test/CodeGen/AArch64/arm64-platform-reg.ll b/test/CodeGen/AArch64/arm64-platform-reg.ll
new file mode 100644
index 000000000000..651c793f73a4
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-platform-reg.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=arm64-apple-ios -o - %s | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
+
+; x18 is reserved as a platform register on Darwin but not on other
+; systems. Create loads of register pressure and make sure this is respected.
+
+; Also, fp must always refer to a valid frame record, even if it's not the one
+; of the current function, so it shouldn't be used either.
+
+@var = global [30 x i64] zeroinitializer
+
+define void @keep_live() {
+ %val = load volatile [30 x i64]* @var
+ store volatile [30 x i64] %val, [30 x i64]* @var
+
+; CHECK: ldr x18
+; CHECK: str x18
+
+; CHECK-DARWIN-NOT: ldr fp
+; CHECK-DARWIN-NOT: ldr x18
+; CHECK-DARWIN: Spill
+; CHECK-DARWIN-NOT: ldr fp
+; CHECK-DARWIN-NOT: ldr x18
+; CHECK-DARWIN: ret
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-popcnt.ll b/test/CodeGen/AArch64/arm64-popcnt.ll
new file mode 100644
index 000000000000..2afade2ee750
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-popcnt.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define i32 @cnt32_advsimd(i32 %x) nounwind readnone {
+ %cnt = tail call i32 @llvm.ctpop.i32(i32 %x)
+ ret i32 %cnt
+; CHECK: fmov s0, w0
+; CHECK: cnt.8b v0, v0
+; CHECK: uaddlv.8b h0, v0
+; CHECK: fmov w0, s0
+; CHECK: ret
+}
+
+define i64 @cnt64_advsimd(i64 %x) nounwind readnone {
+ %cnt = tail call i64 @llvm.ctpop.i64(i64 %x)
+ ret i64 %cnt
+; CHECK: fmov d0, x0
+; CHECK: cnt.8b v0, v0
+; CHECK: uaddlv.8b h0, v0
+; CHECK: fmov w0, s0
+; CHECK: ret
+}
+
+; Do not use AdvSIMD when -mno-implicit-float is specified.
+; rdar://9473858
+
+define i32 @cnt32(i32 %x) nounwind readnone noimplicitfloat {
+ %cnt = tail call i32 @llvm.ctpop.i32(i32 %x)
+ ret i32 %cnt
+; CHECK-LABEL: cnt32:
+; CHECK-NOT 16b
+; CHECK: ret
+}
+
+define i64 @cnt64(i64 %x) nounwind readnone noimplicitfloat {
+ %cnt = tail call i64 @llvm.ctpop.i64(i64 %x)
+ ret i64 %cnt
+; CHECK-LABEL: cnt64:
+; CHECK-NOT 16b
+; CHECK: ret
+}
+
+declare i32 @llvm.ctpop.i32(i32) nounwind readnone
+declare i64 @llvm.ctpop.i64(i64) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-prefetch.ll b/test/CodeGen/AArch64/arm64-prefetch.ll
new file mode 100644
index 000000000000..b2e06edf931c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-prefetch.ll
@@ -0,0 +1,88 @@
+; RUN: llc %s -march arm64 -o - | FileCheck %s
+
+@a = common global i32* null, align 8
+
+define void @test(i32 %i, i32 %j) nounwind ssp {
+entry:
+ ; CHECK: @test
+ %j.addr = alloca i32, align 4
+ store i32 %j, i32* %j.addr, align 4, !tbaa !0
+ %tmp = bitcast i32* %j.addr to i8*
+ ; CHECK: prfum pldl1strm
+ call void @llvm.prefetch(i8* %tmp, i32 0, i32 0, i32 1)
+ ; CHECK: prfum pldl3keep
+ call void @llvm.prefetch(i8* %tmp, i32 0, i32 1, i32 1)
+ ; CHECK: prfum pldl2keep
+ call void @llvm.prefetch(i8* %tmp, i32 0, i32 2, i32 1)
+ ; CHECK: prfum pldl1keep
+ call void @llvm.prefetch(i8* %tmp, i32 0, i32 3, i32 1)
+
+ ; CHECK: prfum pstl1strm
+ call void @llvm.prefetch(i8* %tmp, i32 1, i32 0, i32 1)
+ ; CHECK: prfum pstl3keep
+ call void @llvm.prefetch(i8* %tmp, i32 1, i32 1, i32 1)
+ ; CHECK: prfum pstl2keep
+ call void @llvm.prefetch(i8* %tmp, i32 1, i32 2, i32 1)
+ ; CHECK: prfum pstl1keep
+ call void @llvm.prefetch(i8* %tmp, i32 1, i32 3, i32 1)
+
+ %tmp1 = load i32* %j.addr, align 4, !tbaa !0
+ %add = add nsw i32 %tmp1, %i
+ %idxprom = sext i32 %add to i64
+ %tmp2 = load i32** @a, align 8, !tbaa !3
+ %arrayidx = getelementptr inbounds i32* %tmp2, i64 %idxprom
+ %tmp3 = bitcast i32* %arrayidx to i8*
+
+ ; CHECK: prfm pldl1strm
+ call void @llvm.prefetch(i8* %tmp3, i32 0, i32 0, i32 1)
+ %tmp4 = load i32** @a, align 8, !tbaa !3
+ %arrayidx3 = getelementptr inbounds i32* %tmp4, i64 %idxprom
+ %tmp5 = bitcast i32* %arrayidx3 to i8*
+
+ ; CHECK: prfm pldl3keep
+ call void @llvm.prefetch(i8* %tmp5, i32 0, i32 1, i32 1)
+ %tmp6 = load i32** @a, align 8, !tbaa !3
+ %arrayidx6 = getelementptr inbounds i32* %tmp6, i64 %idxprom
+ %tmp7 = bitcast i32* %arrayidx6 to i8*
+
+ ; CHECK: prfm pldl2keep
+ call void @llvm.prefetch(i8* %tmp7, i32 0, i32 2, i32 1)
+ %tmp8 = load i32** @a, align 8, !tbaa !3
+ %arrayidx9 = getelementptr inbounds i32* %tmp8, i64 %idxprom
+ %tmp9 = bitcast i32* %arrayidx9 to i8*
+
+ ; CHECK: prfm pldl1keep
+ call void @llvm.prefetch(i8* %tmp9, i32 0, i32 3, i32 1)
+ %tmp10 = load i32** @a, align 8, !tbaa !3
+ %arrayidx12 = getelementptr inbounds i32* %tmp10, i64 %idxprom
+ %tmp11 = bitcast i32* %arrayidx12 to i8*
+
+ ; CHECK: prfm pstl1strm
+ call void @llvm.prefetch(i8* %tmp11, i32 1, i32 0, i32 1)
+ %tmp12 = load i32** @a, align 8, !tbaa !3
+ %arrayidx15 = getelementptr inbounds i32* %tmp12, i64 %idxprom
+ %tmp13 = bitcast i32* %arrayidx15 to i8*
+
+ ; CHECK: prfm pstl3keep
+ call void @llvm.prefetch(i8* %tmp13, i32 1, i32 1, i32 1)
+ %tmp14 = load i32** @a, align 8, !tbaa !3
+ %arrayidx18 = getelementptr inbounds i32* %tmp14, i64 %idxprom
+ %tmp15 = bitcast i32* %arrayidx18 to i8*
+
+ ; CHECK: prfm pstl2keep
+ call void @llvm.prefetch(i8* %tmp15, i32 1, i32 2, i32 1)
+ %tmp16 = load i32** @a, align 8, !tbaa !3
+ %arrayidx21 = getelementptr inbounds i32* %tmp16, i64 %idxprom
+ %tmp17 = bitcast i32* %arrayidx21 to i8*
+
+ ; CHECK: prfm pstl1keep
+ call void @llvm.prefetch(i8* %tmp17, i32 1, i32 3, i32 1)
+ ret void
+}
+
+declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"any pointer", metadata !1}
diff --git a/test/CodeGen/AArch64/arm64-promote-const.ll b/test/CodeGen/AArch64/arm64-promote-const.ll
new file mode 100644
index 000000000000..380ff55d6839
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-promote-const.ll
@@ -0,0 +1,255 @@
+; Disable machine cse to stress the different path of the algorithm.
+; Otherwise, we always fall in the simple case, i.e., only one definition.
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-machine-cse -aarch64-stress-promote-const -mcpu=cyclone | FileCheck -check-prefix=PROMOTED %s
+; The REGULAR run just checks that the inputs passed to promote const expose
+; the appropriate patterns.
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-machine-cse -aarch64-promote-const=false -mcpu=cyclone | FileCheck -check-prefix=REGULAR %s
+
+%struct.uint8x16x4_t = type { [4 x <16 x i8>] }
+
+; Constant is a structure
+define %struct.uint8x16x4_t @test1() {
+; PROMOTED-LABEL: test1:
+; Promote constant has created a big constant for the whole structure
+; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], __PromotedConst@PAGE
+; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], __PromotedConst@PAGEOFF
+; Destination registers are defined by the ABI
+; PROMOTED-NEXT: ldp q0, q1, {{\[}}[[BASEADDR]]]
+; PROMOTED-NEXT: ldp q2, q3, {{\[}}[[BASEADDR]], #32]
+; PROMOTED-NEXT: ret
+
+; REGULAR-LABEL: test1:
+; Regular access is quite bad, it performs 4 loads, one for each chunk of
+; the structure
+; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL:lCP.*]]@PAGE
+; Destination registers are defined by the ABI
+; REGULAR: ldr q0, {{\[}}[[PAGEADDR]], [[CSTLABEL]]@PAGEOFF]
+; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL:lCP.*]]@PAGE
+; REGULAR: ldr q1, {{\[}}[[PAGEADDR]], [[CSTLABEL]]@PAGEOFF]
+; REGULAR: adrp [[PAGEADDR2:x[0-9]+]], [[CSTLABEL2:lCP.*]]@PAGE
+; REGULAR: ldr q2, {{\[}}[[PAGEADDR2]], [[CSTLABEL2]]@PAGEOFF]
+; REGULAR: adrp [[PAGEADDR3:x[0-9]+]], [[CSTLABEL3:lCP.*]]@PAGE
+; REGULAR: ldr q3, {{\[}}[[PAGEADDR3]], [[CSTLABEL3]]@PAGEOFF]
+; REGULAR-NEXT: ret
+entry:
+ ret %struct.uint8x16x4_t { [4 x <16 x i8>] [<16 x i8> <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>, <16 x i8> <i8 32, i8 124, i8 121, i8 120, i8 8, i8 117, i8 -56, i8 113, i8 -76, i8 110, i8 -53, i8 107, i8 7, i8 105, i8 103, i8 102>, <16 x i8> <i8 -24, i8 99, i8 -121, i8 97, i8 66, i8 95, i8 24, i8 93, i8 6, i8 91, i8 12, i8 89, i8 39, i8 87, i8 86, i8 85>, <16 x i8> <i8 -104, i8 83, i8 -20, i8 81, i8 81, i8 80, i8 -59, i8 78, i8 73, i8 77, i8 -37, i8 75, i8 122, i8 74, i8 37, i8 73>] }
+}
+
+; Two different uses of the same constant in the same basic block
+define <16 x i8> @test2(<16 x i8> %arg) {
+entry:
+; PROMOTED-LABEL: test2:
+; In stress mode, constant vector are promoted
+; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV1:__PromotedConst[0-9]+]]@PAGE
+; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV1]]@PAGEOFF
+; PROMOTED: ldr q[[REGNUM:[0-9]+]], {{\[}}[[BASEADDR]]]
+; Destination register is defined by ABI
+; PROMOTED-NEXT: add.16b v0, v0, v[[REGNUM]]
+; PROMOTED-NEXT: mla.16b v0, v0, v[[REGNUM]]
+; PROMOTED-NEXT: ret
+
+; REGULAR-LABEL: test2:
+; Regular access is strickly the same as promoted access.
+; The difference is that the address (and thus the space in memory) is not
+; shared between constants
+; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL:lCP.*]]@PAGE
+; REGULAR: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL]]@PAGEOFF]
+; Destination register is defined by ABI
+; REGULAR-NEXT: add.16b v0, v0, v[[REGNUM]]
+; REGULAR-NEXT: mla.16b v0, v0, v[[REGNUM]]
+; REGULAR-NEXT: ret
+ %add.i = add <16 x i8> %arg, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
+ %mul.i = mul <16 x i8> %add.i, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
+ %add.i9 = add <16 x i8> %add.i, %mul.i
+ ret <16 x i8> %add.i9
+}
+
+; Two different uses of the sane constant in two different basic blocks,
+; one dominates the other
+define <16 x i8> @test3(<16 x i8> %arg, i32 %path) {
+; PROMOTED-LABEL: test3:
+; In stress mode, constant vector are promoted
+; Since, the constant is the same as the previous function,
+; the same address must be used
+; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV1]]@PAGE
+; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV1]]@PAGEOFF
+; PROMOTED-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[BASEADDR]]]
+; Destination register is defined by ABI
+; PROMOTED-NEXT: add.16b v0, v0, v[[REGNUM]]
+; PROMOTED-NEXT: cbnz w0, [[LABEL:LBB.*]]
+; Next BB
+; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV2:__PromotedConst[0-9]+]]@PAGE
+; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV2]]@PAGEOFF
+; PROMOTED-NEXT: ldr q[[REGNUM]], {{\[}}[[BASEADDR]]]
+; Next BB
+; PROMOTED-NEXT: [[LABEL]]:
+; PROMOTED-NEXT: mul.16b [[DESTV:v[0-9]+]], v0, v[[REGNUM]]
+; PROMOTED-NEXT: add.16b v0, v0, [[DESTV]]
+; PROMOTED-NEXT: ret
+
+; REGULAR-LABEL: test3:
+; Regular mode does not elimitate common sub expression by its own.
+; In other words, the same loads appears several times.
+; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL1:lCP.*]]@PAGE
+; REGULAR-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL1]]@PAGEOFF]
+; Destination register is defined by ABI
+; REGULAR-NEXT: add.16b v0, v0, v[[REGNUM]]
+; REGULAR-NEXT: cbz w0, [[LABELelse:LBB.*]]
+; Next BB
+; Redundant load
+; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL1]]@PAGE
+; REGULAR-NEXT: ldr q[[REGNUM]], {{\[}}[[PAGEADDR]], [[CSTLABEL1]]@PAGEOFF]
+; REGULAR-NEXT: b [[LABELend:LBB.*]]
+; Next BB
+; REGULAR-NEXT: [[LABELelse]]
+; REGULAR-NEXT: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL2:lCP.*]]@PAGE
+; REGULAR-NEXT: ldr q[[REGNUM]], {{\[}}[[PAGEADDR]], [[CSTLABEL2]]@PAGEOFF]
+; Next BB
+; REGULAR-NEXT: [[LABELend]]:
+; REGULAR-NEXT: mul.16b [[DESTV:v[0-9]+]], v0, v[[REGNUM]]
+; REGULAR-NEXT: add.16b v0, v0, [[DESTV]]
+; REGULAR-NEXT: ret
+entry:
+ %add.i = add <16 x i8> %arg, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
+ %tobool = icmp eq i32 %path, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then: ; preds = %entry
+ %mul.i13 = mul <16 x i8> %add.i, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
+ br label %if.end
+
+if.else: ; preds = %entry
+ %mul.i = mul <16 x i8> %add.i, <i8 -24, i8 99, i8 -121, i8 97, i8 66, i8 95, i8 24, i8 93, i8 6, i8 91, i8 12, i8 89, i8 39, i8 87, i8 86, i8 85>
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %ret2.0 = phi <16 x i8> [ %mul.i13, %if.then ], [ %mul.i, %if.else ]
+ %add.i12 = add <16 x i8> %add.i, %ret2.0
+ ret <16 x i8> %add.i12
+}
+
+; Two different uses of the sane constant in two different basic blocks,
+; none dominates the other
+define <16 x i8> @test4(<16 x i8> %arg, i32 %path) {
+; PROMOTED-LABEL: test4:
+; In stress mode, constant vector are promoted
+; Since, the constant is the same as the previous function,
+; the same address must be used
+; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV1]]@PAGE
+; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV1]]@PAGEOFF
+; PROMOTED-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[BASEADDR]]]
+; Destination register is defined by ABI
+; PROMOTED-NEXT: add.16b v0, v0, v[[REGNUM]]
+; PROMOTED-NEXT: cbz w0, [[LABEL:LBB.*]]
+; Next BB
+; PROMOTED: mul.16b v0, v0, v[[REGNUM]]
+; Next BB
+; PROMOTED-NEXT: [[LABEL]]:
+; PROMOTED-NEXT: ret
+
+
+; REGULAR-LABEL: test4:
+; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL3:lCP.*]]@PAGE
+; REGULAR-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL3]]@PAGEOFF]
+; Destination register is defined by ABI
+; REGULAR-NEXT: add.16b v0, v0, v[[REGNUM]]
+; REGULAR-NEXT: cbz w0, [[LABEL:LBB.*]]
+; Next BB
+; Redundant expression
+; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL3]]@PAGE
+; REGULAR-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL3]]@PAGEOFF]
+; Destination register is defined by ABI
+; REGULAR-NEXT: mul.16b v0, v0, v[[REGNUM]]
+; Next BB
+; REGULAR-NEXT: [[LABEL]]:
+; REGULAR-NEXT: ret
+entry:
+ %add.i = add <16 x i8> %arg, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
+ %tobool = icmp eq i32 %path, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %mul.i = mul <16 x i8> %add.i, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ %ret.0 = phi <16 x i8> [ %mul.i, %if.then ], [ %add.i, %entry ]
+ ret <16 x i8> %ret.0
+}
+
+; Two different uses of the sane constant in two different basic blocks,
+; one is in a phi.
+define <16 x i8> @test5(<16 x i8> %arg, i32 %path) {
+; PROMOTED-LABEL: test5:
+; In stress mode, constant vector are promoted
+; Since, the constant is the same as the previous function,
+; the same address must be used
+; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV1]]@PAGE
+; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV1]]@PAGEOFF
+; PROMOTED-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[BASEADDR]]]
+; PROMOTED-NEXT: cbz w0, [[LABEL:LBB.*]]
+; Next BB
+; PROMOTED: add.16b [[DESTV:v[0-9]+]], v0, v[[REGNUM]]
+; PROMOTED-NEXT: mul.16b v[[REGNUM]], [[DESTV]], v[[REGNUM]]
+; Next BB
+; PROMOTED-NEXT: [[LABEL]]:
+; PROMOTED-NEXT: mul.16b [[TMP1:v[0-9]+]], v[[REGNUM]], v[[REGNUM]]
+; PROMOTED-NEXT: mul.16b [[TMP2:v[0-9]+]], [[TMP1]], [[TMP1]]
+; PROMOTED-NEXT: mul.16b [[TMP3:v[0-9]+]], [[TMP2]], [[TMP2]]
+; PROMOTED-NEXT: mul.16b v0, [[TMP3]], [[TMP3]]
+; PROMOTED-NEXT: ret
+
+; REGULAR-LABEL: test5:
+; REGULAR: cbz w0, [[LABELelse:LBB.*]]
+; Next BB
+; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL:lCP.*]]@PAGE
+; REGULAR-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL]]@PAGEOFF]
+; REGULAR-NEXT: add.16b [[DESTV:v[0-9]+]], v0, v[[REGNUM]]
+; REGULAR-NEXT: mul.16b v[[DESTREGNUM:[0-9]+]], [[DESTV]], v[[REGNUM]]
+; REGULAR-NEXT: b [[LABELend:LBB.*]]
+; Next BB
+; REGULAR-NEXT: [[LABELelse]]
+; REGULAR-NEXT: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL:lCP.*]]@PAGE
+; REGULAR-NEXT: ldr q[[DESTREGNUM]], {{\[}}[[PAGEADDR]], [[CSTLABEL]]@PAGEOFF]
+; Next BB
+; REGULAR-NEXT: [[LABELend]]:
+; REGULAR-NEXT: mul.16b [[TMP1:v[0-9]+]], v[[DESTREGNUM]], v[[DESTREGNUM]]
+; REGULAR-NEXT: mul.16b [[TMP2:v[0-9]+]], [[TMP1]], [[TMP1]]
+; REGULAR-NEXT: mul.16b [[TMP3:v[0-9]+]], [[TMP2]], [[TMP2]]
+; REGULAR-NEXT: mul.16b v0, [[TMP3]], [[TMP3]]
+; REGULAR-NEXT: ret
+entry:
+ %tobool = icmp eq i32 %path, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %add.i = add <16 x i8> %arg, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
+ %mul.i26 = mul <16 x i8> %add.i, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ %ret.0 = phi <16 x i8> [ %mul.i26, %if.then ], [ <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>, %entry ]
+ %mul.i25 = mul <16 x i8> %ret.0, %ret.0
+ %mul.i24 = mul <16 x i8> %mul.i25, %mul.i25
+ %mul.i23 = mul <16 x i8> %mul.i24, %mul.i24
+ %mul.i = mul <16 x i8> %mul.i23, %mul.i23
+ ret <16 x i8> %mul.i
+}
+
+define void @accessBig(i64* %storage) {
+; PROMOTED-LABEL: accessBig:
+; PROMOTED: adrp
+; PROMOTED: ret
+ %addr = bitcast i64* %storage to <1 x i80>*
+ store <1 x i80> <i80 483673642326615442599424>, <1 x i80>* %addr
+ ret void
+}
+
+define void @asmStatement() {
+; PROMOTED-LABEL: asmStatement:
+; PROMOTED-NOT: adrp
+; PROMOTED: ret
+ call void asm sideeffect "bfxil w0, w0, $0, $1", "i,i"(i32 28, i32 4)
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/arm64-redzone.ll b/test/CodeGen/AArch64/arm64-redzone.ll
new file mode 100644
index 000000000000..9b0c384c4d9e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-redzone.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -march=arm64 -aarch64-redzone | FileCheck %s
+
+define i32 @foo(i32 %a, i32 %b) nounwind ssp {
+; CHECK-LABEL: foo:
+; CHECK-NOT: sub sp, sp
+; CHECK: ret
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ %x = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 %b, i32* %b.addr, align 4
+ %tmp = load i32* %a.addr, align 4
+ %tmp1 = load i32* %b.addr, align 4
+ %add = add nsw i32 %tmp, %tmp1
+ store i32 %add, i32* %x, align 4
+ %tmp2 = load i32* %x, align 4
+ ret i32 %tmp2
+}
diff --git a/test/CodeGen/AArch64/arm64-reg-copy-noneon.ll b/test/CodeGen/AArch64/arm64-reg-copy-noneon.ll
new file mode 100644
index 000000000000..29255ef187c1
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-reg-copy-noneon.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=-neon < %s | FileCheck %s
+
+define float @copy_FPR32(float %a, float %b) {
+;CHECK-LABEL: copy_FPR32:
+;CHECK: fmov s0, s1
+ ret float %b;
+}
+
+define double @copy_FPR64(double %a, double %b) {
+;CHECK-LABEL: copy_FPR64:
+;CHECK: fmov d0, d1
+ ret double %b;
+}
+
+define fp128 @copy_FPR128(fp128 %a, fp128 %b) {
+;CHECK-LABEL: copy_FPR128:
+;CHECK: str q1, [sp, #-16]!
+;CHECK-NEXT: ldr q0, [sp, #16]!
+ ret fp128 %b;
+}
diff --git a/test/CodeGen/AArch64/arm64-register-offset-addressing.ll b/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
new file mode 100644
index 000000000000..045712bea6ac
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
@@ -0,0 +1,145 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
+
+define i8 @test_64bit_add(i16* %a, i64 %b) {
+; CHECK-LABEL: test_64bit_add:
+; CHECK: lsl [[REG:x[0-9]+]], x1, #1
+; CHECK: ldrb w0, [x0, [[REG]]]
+; CHECK: ret
+ %tmp1 = getelementptr inbounds i16* %a, i64 %b
+ %tmp2 = load i16* %tmp1
+ %tmp3 = trunc i16 %tmp2 to i8
+ ret i8 %tmp3
+}
+
+; These tests are trying to form SEXT and ZEXT operations that never leave i64
+; space, to make sure LLVM can adapt the offset register correctly.
+define void @ldst_8bit(i8* %base, i64 %offset) minsize {
+; CHECK-LABEL: ldst_8bit:
+
+ %off32.sext.tmp = shl i64 %offset, 32
+ %off32.sext = ashr i64 %off32.sext.tmp, 32
+ %addr8_sxtw = getelementptr i8* %base, i64 %off32.sext
+ %val8_sxtw = load volatile i8* %addr8_sxtw
+ %val32_signed = sext i8 %val8_sxtw to i32
+ store volatile i32 %val32_signed, i32* @var_32bit
+; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+ %addrint_uxtw = ptrtoint i8* %base to i64
+ %offset_uxtw = and i64 %offset, 4294967295
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
+ %val8_uxtw = load volatile i8* %addr_uxtw
+ %newval8 = add i8 %val8_uxtw, 1
+ store volatile i8 %newval8, i8* @var_8bit
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ ret void
+}
+
+
+define void @ldst_16bit(i16* %base, i64 %offset) minsize {
+; CHECK-LABEL: ldst_16bit:
+
+ %addrint_uxtw = ptrtoint i16* %base to i64
+ %offset_uxtw = and i64 %offset, 4294967295
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
+ %val8_uxtw = load volatile i16* %addr_uxtw
+ %newval8 = add i16 %val8_uxtw, 1
+ store volatile i16 %newval8, i16* @var_16bit
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ %base_sxtw = ptrtoint i16* %base to i64
+ %offset_sxtw.tmp = shl i64 %offset, 32
+ %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
+ %val16_sxtw = load volatile i16* %addr_sxtw
+ %val64_signed = sext i16 %val16_sxtw to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+
+ %base_uxtwN = ptrtoint i16* %base to i64
+ %offset_uxtwN = and i64 %offset, 4294967295
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 1
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
+ %val32 = load volatile i32* @var_32bit
+ %val16_trunc32 = trunc i32 %val32 to i16
+ store volatile i16 %val16_trunc32, i16* %addr_uxtwN
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
+ ret void
+}
+
+define void @ldst_32bit(i32* %base, i64 %offset) minsize {
+; CHECK-LABEL: ldst_32bit:
+
+ %addrint_uxtw = ptrtoint i32* %base to i64
+ %offset_uxtw = and i64 %offset, 4294967295
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
+ %val32_uxtw = load volatile i32* %addr_uxtw
+ %newval32 = add i32 %val32_uxtw, 1
+ store volatile i32 %newval32, i32* @var_32bit
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ %base_sxtw = ptrtoint i32* %base to i64
+ %offset_sxtw.tmp = shl i64 %offset, 32
+ %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
+ %val32_sxtw = load volatile i32* %addr_sxtw
+ %val64_signed = sext i32 %val32_sxtw to i64
+ store volatile i64 %val64_signed, i64* @var_64bit
+; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+
+ %base_uxtwN = ptrtoint i32* %base to i64
+ %offset_uxtwN = and i64 %offset, 4294967295
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 2
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
+ %val32 = load volatile i32* @var_32bit
+ store volatile i32 %val32, i32* %addr_uxtwN
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
+ ret void
+}
+
+define void @ldst_64bit(i64* %base, i64 %offset) minsize {
+; CHECK-LABEL: ldst_64bit:
+
+ %addrint_uxtw = ptrtoint i64* %base to i64
+ %offset_uxtw = and i64 %offset, 4294967295
+ %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
+ %addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
+ %val64_uxtw = load volatile i64* %addr_uxtw
+ %newval8 = add i64 %val64_uxtw, 1
+ store volatile i64 %newval8, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+
+ %base_sxtw = ptrtoint i64* %base to i64
+ %offset_sxtw.tmp = shl i64 %offset, 32
+ %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
+ %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
+ %addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
+ %val64_sxtw = load volatile i64* %addr_sxtw
+ store volatile i64 %val64_sxtw, i64* @var_64bit
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+
+
+ %base_uxtwN = ptrtoint i64* %base to i64
+ %offset_uxtwN = and i64 %offset, 4294967295
+ %offset2_uxtwN = shl i64 %offset_uxtwN, 3
+ %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
+ %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
+ %val64 = load volatile i64* @var_64bit
+ store volatile i64 %val64, i64* %addr_uxtwN
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
+ ret void
+}
+
+@var_8bit = global i8 0
+@var_16bit = global i16 0
+@var_32bit = global i32 0
+@var_64bit = global i64 0
diff --git a/test/CodeGen/AArch64/arm64-register-pairing.ll b/test/CodeGen/AArch64/arm64-register-pairing.ll
new file mode 100644
index 000000000000..99defb1aad7c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-register-pairing.ll
@@ -0,0 +1,53 @@
+; RUN: llc -mtriple=arm64-apple-ios < %s | FileCheck %s
+;
+; rdar://14075006
+
+define void @odd() nounwind {
+; CHECK-LABEL: odd:
+; CHECK: stp d15, d14, [sp, #-144]!
+; CHECK: stp d13, d12, [sp, #16]
+; CHECK: stp d11, d10, [sp, #32]
+; CHECK: stp d9, d8, [sp, #48]
+; CHECK: stp x28, x27, [sp, #64]
+; CHECK: stp x26, x25, [sp, #80]
+; CHECK: stp x24, x23, [sp, #96]
+; CHECK: stp x22, x21, [sp, #112]
+; CHECK: stp x20, x19, [sp, #128]
+; CHECK: movz x0, #0x2a
+; CHECK: ldp x20, x19, [sp, #128]
+; CHECK: ldp x22, x21, [sp, #112]
+; CHECK: ldp x24, x23, [sp, #96]
+; CHECK: ldp x26, x25, [sp, #80]
+; CHECK: ldp x28, x27, [sp, #64]
+; CHECK: ldp d9, d8, [sp, #48]
+; CHECK: ldp d11, d10, [sp, #32]
+; CHECK: ldp d13, d12, [sp, #16]
+; CHECK: ldp d15, d14, [sp], #144
+ call void asm sideeffect "mov x0, #42", "~{x0},~{x19},~{x21},~{x23},~{x25},~{x27},~{d8},~{d10},~{d12},~{d14}"() nounwind
+ ret void
+}
+
+define void @even() nounwind {
+; CHECK-LABEL: even:
+; CHECK: stp d15, d14, [sp, #-144]!
+; CHECK: stp d13, d12, [sp, #16]
+; CHECK: stp d11, d10, [sp, #32]
+; CHECK: stp d9, d8, [sp, #48]
+; CHECK: stp x28, x27, [sp, #64]
+; CHECK: stp x26, x25, [sp, #80]
+; CHECK: stp x24, x23, [sp, #96]
+; CHECK: stp x22, x21, [sp, #112]
+; CHECK: stp x20, x19, [sp, #128]
+; CHECK: movz x0, #0x2a
+; CHECK: ldp x20, x19, [sp, #128]
+; CHECK: ldp x22, x21, [sp, #112]
+; CHECK: ldp x24, x23, [sp, #96]
+; CHECK: ldp x26, x25, [sp, #80]
+; CHECK: ldp x28, x27, [sp, #64]
+; CHECK: ldp d9, d8, [sp, #48]
+; CHECK: ldp d11, d10, [sp, #32]
+; CHECK: ldp d13, d12, [sp, #16]
+; CHECK: ldp d15, d14, [sp], #144
+ call void asm sideeffect "mov x0, #42", "~{x0},~{x20},~{x22},~{x24},~{x26},~{x28},~{d9},~{d11},~{d13},~{d15}"() nounwind
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll b/test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll
new file mode 100644
index 000000000000..a1daf03f4fa9
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=arm64 -verify-machineinstrs < %s | FileCheck %s
+
+; We used to not mark NZCV as being used in the continuation basic-block
+; when lowering a 128-bit "select" to branches. This meant a subsequent use
+; of the same flags gave an internal fault here.
+
+declare void @foo(fp128)
+
+define double @test_f128csel_flags(i32 %lhs, fp128 %a, fp128 %b, double %l, double %r) nounwind {
+; CHECK: test_f128csel_flags
+
+ %tst = icmp ne i32 %lhs, 42
+ %val = select i1 %tst, fp128 %a, fp128 %b
+; CHECK: cmp w0, #42
+; CHECK: b.eq {{.?LBB0}}
+
+ call void @foo(fp128 %val)
+ %retval = select i1 %tst, double %l, double %r
+
+ ; It's also reasonably important that the actual fcsel comes before the
+ ; function call since bl may corrupt NZCV. We were doing the right thing anyway,
+ ; but just as well test it while we're here.
+; CHECK: fcsel {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, ne
+; CHECK: bl {{_?foo}}
+
+ ret double %retval
+}
diff --git a/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll b/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll
new file mode 100644
index 000000000000..fec89334801e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=arm64 -o - %s | FileCheck %s
+
+; This is mostly a "don't assert" test. The type of the RHS of a shift depended
+; on the phase of legalization, which led to the creation of an unexpected and
+; unselectable "rotr" node: (i32 (rotr i32, i64)).
+
+; FIXME: This test is xfailed because it relies on an optimization that has
+; been reverted (see PR17975).
+; XFAIL: *
+
+define void @foo(i64* nocapture %d) {
+; CHECK-LABEL: foo:
+; CHECK: rorv
+ %tmp = load i64* undef, align 8
+ %sub397 = sub i64 0, %tmp
+ %and398 = and i64 %sub397, 4294967295
+ %shr404 = lshr i64 %and398, 0
+ %or405 = or i64 0, %shr404
+ %xor406 = xor i64 %or405, 0
+ %xor417 = xor i64 0, %xor406
+ %xor428 = xor i64 0, %xor417
+ %sub430 = sub i64 %xor417, 0
+ %and431 = and i64 %sub430, 4294967295
+ %and432 = and i64 %xor428, 31
+ %sub433 = sub i64 32, %and432
+ %shl434 = shl i64 %and431, %sub433
+ %shr437 = lshr i64 %and431, %and432
+ %or438 = or i64 %shl434, %shr437
+ %xor439 = xor i64 %or438, %xor428
+ %sub441 = sub i64 %xor439, 0
+ store i64 %sub441, i64* %d, align 8
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-return-vector.ll b/test/CodeGen/AArch64/arm64-return-vector.ll
new file mode 100644
index 000000000000..9457d8bc6d07
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-return-vector.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+; 2x64 vector should be returned in Q0.
+
+define <2 x double> @test(<2 x double>* %p) nounwind {
+; CHECK: test
+; CHECK: ldr q0, [x0]
+; CHECK: ret
+ %tmp1 = load <2 x double>* %p, align 16
+ ret <2 x double> %tmp1
+}
diff --git a/test/CodeGen/AArch64/arm64-returnaddr.ll b/test/CodeGen/AArch64/arm64-returnaddr.ll
new file mode 100644
index 000000000000..285b29563c09
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-returnaddr.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define i8* @rt0(i32 %x) nounwind readnone {
+entry:
+; CHECK-LABEL: rt0:
+; CHECK: mov x0, x30
+; CHECK: ret
+ %0 = tail call i8* @llvm.returnaddress(i32 0)
+ ret i8* %0
+}
+
+define i8* @rt2() nounwind readnone {
+entry:
+; CHECK-LABEL: rt2:
+; CHECK: stp x29, x30, [sp, #-16]!
+; CHECK: mov x29, sp
+; CHECK: ldr x[[REG:[0-9]+]], [x29]
+; CHECK: ldr x[[REG2:[0-9]+]], [x[[REG]]]
+; CHECK: ldr x0, [x[[REG2]], #8]
+; CHECK: ldp x29, x30, [sp], #16
+; CHECK: ret
+ %0 = tail call i8* @llvm.returnaddress(i32 2)
+ ret i8* %0
+}
+
+declare i8* @llvm.returnaddress(i32) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-rev.ll b/test/CodeGen/AArch64/arm64-rev.ll
new file mode 100644
index 000000000000..30d9f4f3e670
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-rev.ll
@@ -0,0 +1,235 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define i32 @test_rev_w(i32 %a) nounwind {
+entry:
+; CHECK-LABEL: test_rev_w:
+; CHECK: rev w0, w0
+ %0 = tail call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %0
+}
+
+define i64 @test_rev_x(i64 %a) nounwind {
+entry:
+; CHECK-LABEL: test_rev_x:
+; CHECK: rev x0, x0
+ %0 = tail call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %0
+}
+
+declare i32 @llvm.bswap.i32(i32) nounwind readnone
+declare i64 @llvm.bswap.i64(i64) nounwind readnone
+
+define i32 @test_rev16_w(i32 %X) nounwind {
+entry:
+; CHECK-LABEL: test_rev16_w:
+; CHECK: rev16 w0, w0
+ %tmp1 = lshr i32 %X, 8
+ %X15 = bitcast i32 %X to i32
+ %tmp4 = shl i32 %X15, 8
+ %tmp2 = and i32 %tmp1, 16711680
+ %tmp5 = and i32 %tmp4, -16777216
+ %tmp9 = and i32 %tmp1, 255
+ %tmp13 = and i32 %tmp4, 65280
+ %tmp6 = or i32 %tmp5, %tmp2
+ %tmp10 = or i32 %tmp6, %tmp13
+ %tmp14 = or i32 %tmp10, %tmp9
+ ret i32 %tmp14
+}
+
+; 64-bit REV16 is *not* a swap then a 16-bit rotation:
+; 01234567 ->(bswap) 76543210 ->(rotr) 10765432
+; 01234567 ->(rev16) 10325476
+define i64 @test_rev16_x(i64 %a) nounwind {
+entry:
+; CHECK-LABEL: test_rev16_x:
+; CHECK-NOT: rev16 x0, x0
+ %0 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %1 = lshr i64 %0, 16
+ %2 = shl i64 %0, 48
+ %3 = or i64 %1, %2
+ ret i64 %3
+}
+
+define i64 @test_rev32_x(i64 %a) nounwind {
+entry:
+; CHECK-LABEL: test_rev32_x:
+; CHECK: rev32 x0, x0
+ %0 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %1 = lshr i64 %0, 32
+ %2 = shl i64 %0, 32
+ %3 = or i64 %1, %2
+ ret i64 %3
+}
+
+define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: test_vrev64D8:
+;CHECK: rev64.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %tmp2
+}
+
+define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: test_vrev64D16:
+;CHECK: rev64.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i16> %tmp2
+}
+
+define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: test_vrev64D32:
+;CHECK: rev64.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
+ ret <2 x i32> %tmp2
+}
+
+define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
+;CHECK-LABEL: test_vrev64Df:
+;CHECK: rev64.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
+ ret <2 x float> %tmp2
+}
+
+define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: test_vrev64Q8:
+;CHECK: rev64.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
+ ret <16 x i8> %tmp2
+}
+
+define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: test_vrev64Q16:
+;CHECK: rev64.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x i16> %tmp2
+}
+
+define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: test_vrev64Q32:
+;CHECK: rev64.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ ret <4 x i32> %tmp2
+}
+
+define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
+;CHECK-LABEL: test_vrev64Qf:
+;CHECK: rev64.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ ret <4 x float> %tmp2
+}
+
+define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: test_vrev32D8:
+;CHECK: rev32.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x i8> %tmp2
+}
+
+define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: test_vrev32D16:
+;CHECK: rev32.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ ret <4 x i16> %tmp2
+}
+
+define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: test_vrev32Q8:
+;CHECK: rev32.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
+ ret <16 x i8> %tmp2
+}
+
+define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: test_vrev32Q16:
+;CHECK: rev32.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ ret <8 x i16> %tmp2
+}
+
+define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: test_vrev16D8:
+;CHECK: rev16.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ ret <8 x i8> %tmp2
+}
+
+define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: test_vrev16Q8:
+;CHECK: rev16.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+ ret <16 x i8> %tmp2
+}
+
+; Undef shuffle indices should not prevent matching to VREV:
+
+define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: test_vrev64D8_undef:
+;CHECK: rev64.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %tmp2
+}
+
+define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: test_vrev32Q16_undef:
+;CHECK: rev32.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
+ ret <8 x i16> %tmp2
+}
+
+; vrev <4 x i16> should use REV32 and not REV64
+define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst) nounwind ssp {
+; CHECK-LABEL: test_vrev64:
+; CHECK: ldr [[DEST:q[0-9]+]],
+; CHECK: st1.h
+; CHECK: st1.h
+entry:
+ %0 = bitcast <4 x i16>* %source to <8 x i16>*
+ %tmp2 = load <8 x i16>* %0, align 4
+ %tmp3 = extractelement <8 x i16> %tmp2, i32 6
+ %tmp5 = insertelement <2 x i16> undef, i16 %tmp3, i32 0
+ %tmp9 = extractelement <8 x i16> %tmp2, i32 5
+ %tmp11 = insertelement <2 x i16> %tmp5, i16 %tmp9, i32 1
+ store <2 x i16> %tmp11, <2 x i16>* %dst, align 4
+ ret void
+}
+
+; Test vrev of float4
+define void @float_vrev64(float* nocapture %source, <4 x float>* nocapture %dest) nounwind noinline ssp {
+; CHECK: float_vrev64
+; CHECK: ldr [[DEST:q[0-9]+]],
+; CHECK: rev64.4s
+entry:
+ %0 = bitcast float* %source to <4 x float>*
+ %tmp2 = load <4 x float>* %0, align 4
+ %tmp5 = shufflevector <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x float> %tmp2, <4 x i32> <i32 0, i32 7, i32 0, i32 0>
+ %arrayidx8 = getelementptr inbounds <4 x float>* %dest, i32 11
+ store <4 x float> %tmp5, <4 x float>* %arrayidx8, align 4
+ ret void
+}
+
+
+define <4 x i32> @test_vrev32_bswap(<4 x i32> %source) nounwind {
+; CHECK-LABEL: test_vrev32_bswap:
+; CHECK: rev32.16b
+; CHECK-NOT: rev
+; CHECK: ret
+ %bswap = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %source)
+ ret <4 x i32> %bswap
+}
+
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-rounding.ll b/test/CodeGen/AArch64/arm64-rounding.ll
new file mode 100644
index 000000000000..931114447adf
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-rounding.ll
@@ -0,0 +1,208 @@
+; RUN: llc -O3 < %s -mcpu=cyclone | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
+target triple = "arm64-apple-ios6.0.0"
+
+; CHECK: test1
+; CHECK: frintx
+; CHECK: frintm
+define float @test1(float %a) #0 {
+entry:
+ %call = tail call float @floorf(float %a) nounwind readnone
+ ret float %call
+}
+
+declare float @floorf(float) nounwind readnone
+
+; CHECK: test2
+; CHECK: frintx
+; CHECK: frintm
+define double @test2(double %a) #0 {
+entry:
+ %call = tail call double @floor(double %a) nounwind readnone
+ ret double %call
+}
+
+declare double @floor(double) nounwind readnone
+
+; CHECK: test3
+; CHECK: frinti
+define float @test3(float %a) #0 {
+entry:
+ %call = tail call float @nearbyintf(float %a) nounwind readnone
+ ret float %call
+}
+
+declare float @nearbyintf(float) nounwind readnone
+
+; CHECK: test4
+; CHECK: frinti
+define double @test4(double %a) #0 {
+entry:
+ %call = tail call double @nearbyint(double %a) nounwind readnone
+ ret double %call
+}
+
+declare double @nearbyint(double) nounwind readnone
+
+; CHECK: test5
+; CHECK: frintx
+; CHECK: frintp
+define float @test5(float %a) #0 {
+entry:
+ %call = tail call float @ceilf(float %a) nounwind readnone
+ ret float %call
+}
+
+declare float @ceilf(float) nounwind readnone
+
+; CHECK: test6
+; CHECK: frintx
+; CHECK: frintp
+define double @test6(double %a) #0 {
+entry:
+ %call = tail call double @ceil(double %a) nounwind readnone
+ ret double %call
+}
+
+declare double @ceil(double) nounwind readnone
+
+; CHECK: test7
+; CHECK: frintx
+define float @test7(float %a) #0 {
+entry:
+ %call = tail call float @rintf(float %a) nounwind readnone
+ ret float %call
+}
+
+declare float @rintf(float) nounwind readnone
+
+; CHECK: test8
+; CHECK: frintx
+define double @test8(double %a) #0 {
+entry:
+ %call = tail call double @rint(double %a) nounwind readnone
+ ret double %call
+}
+
+declare double @rint(double) nounwind readnone
+
+; CHECK: test9
+; CHECK: frintx
+; CHECK: frintz
+define float @test9(float %a) #0 {
+entry:
+ %call = tail call float @truncf(float %a) nounwind readnone
+ ret float %call
+}
+
+declare float @truncf(float) nounwind readnone
+
+; CHECK: test10
+; CHECK: frintx
+; CHECK: frintz
+define double @test10(double %a) #0 {
+entry:
+ %call = tail call double @trunc(double %a) nounwind readnone
+ ret double %call
+}
+
+declare double @trunc(double) nounwind readnone
+
+; CHECK: test11
+; CHECK: frintx
+; CHECK: frinta
+define float @test11(float %a) #0 {
+entry:
+ %call = tail call float @roundf(float %a) nounwind readnone
+ ret float %call
+}
+
+declare float @roundf(float %a) nounwind readnone
+
+; CHECK: test12
+; CHECK: frintx
+; CHECK: frinta
+define double @test12(double %a) #0 {
+entry:
+ %call = tail call double @round(double %a) nounwind readnone
+ ret double %call
+}
+
+declare double @round(double %a) nounwind readnone
+
+; CHECK: test13
+; CHECK-NOT: frintx
+; CHECK: frintm
+define float @test13(float %a) #1 {
+entry:
+ %call = tail call float @floorf(float %a) nounwind readnone
+ ret float %call
+}
+
+; CHECK: test14
+; CHECK-NOT: frintx
+; CHECK: frintm
+define double @test14(double %a) #1 {
+entry:
+ %call = tail call double @floor(double %a) nounwind readnone
+ ret double %call
+}
+
+; CHECK: test15
+; CHECK-NOT: frintx
+; CHECK: frintp
+define float @test15(float %a) #1 {
+entry:
+ %call = tail call float @ceilf(float %a) nounwind readnone
+ ret float %call
+}
+
+; CHECK: test16
+; CHECK-NOT: frintx
+; CHECK: frintp
+define double @test16(double %a) #1 {
+entry:
+ %call = tail call double @ceil(double %a) nounwind readnone
+ ret double %call
+}
+
+; CHECK: test17
+; CHECK-NOT: frintx
+; CHECK: frintz
+define float @test17(float %a) #1 {
+entry:
+ %call = tail call float @truncf(float %a) nounwind readnone
+ ret float %call
+}
+
+; CHECK: test18
+; CHECK-NOT: frintx
+; CHECK: frintz
+define double @test18(double %a) #1 {
+entry:
+ %call = tail call double @trunc(double %a) nounwind readnone
+ ret double %call
+}
+
+; CHECK: test19
+; CHECK-NOT: frintx
+; CHECK: frinta
+define float @test19(float %a) #1 {
+entry:
+ %call = tail call float @roundf(float %a) nounwind readnone
+ ret float %call
+}
+
+; CHECK: test20
+; CHECK-NOT: frintx
+; CHECK: frinta
+define double @test20(double %a) #1 {
+entry:
+ %call = tail call double @round(double %a) nounwind readnone
+ ret double %call
+}
+
+
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind "unsafe-fp-math"="true" }
diff --git a/test/CodeGen/AArch64/arm64-scaled_iv.ll b/test/CodeGen/AArch64/arm64-scaled_iv.ll
new file mode 100644
index 000000000000..987373e542af
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-scaled_iv.ll
@@ -0,0 +1,38 @@
+; RUN: opt -S -loop-reduce < %s | FileCheck %s
+; Scaling factor in addressing mode are costly.
+; Make loop-reduce prefer unscaled accesses.
+; <rdar://problem/13806271>
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+; Function Attrs: nounwind ssp
+define void @mulDouble(double* nocapture %a, double* nocapture %b, double* nocapture %c) {
+; CHECK: @mulDouble
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+; CHECK: [[IV:%[^ ]+]] = phi i64 [ [[IVNEXT:%[^,]+]], %for.body ], [ 0, %entry ]
+; Only one induction variable should have been generated.
+; CHECK-NOT: phi
+ %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = add nsw i64 %indvars.iv, -1
+ %arrayidx = getelementptr inbounds double* %b, i64 %tmp
+ %tmp1 = load double* %arrayidx, align 8
+; The induction variable should carry the scaling factor: 1 * 8 = 8.
+; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 8
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next
+ %tmp2 = load double* %arrayidx2, align 8
+ %mul = fmul double %tmp1, %tmp2
+ %arrayidx4 = getelementptr inbounds double* %a, i64 %indvars.iv
+ store double %mul, double* %arrayidx4, align 8
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+; Comparison should be 19 * 8 = 152.
+; CHECK: icmp eq i32 {{%[^,]+}}, 152
+ %exitcond = icmp eq i32 %lftr.wideiv, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-scvt.ll b/test/CodeGen/AArch64/arm64-scvt.ll
new file mode 100644
index 000000000000..2e006cff159a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-scvt.ll
@@ -0,0 +1,830 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+; rdar://13082402
+
+define float @t1(i32* nocapture %src) nounwind ssp {
+entry:
+; CHECK-LABEL: t1:
+; CHECK: ldr s0, [x0]
+; CHECK: scvtf s0, s0
+ %tmp1 = load i32* %src, align 4
+ %tmp2 = sitofp i32 %tmp1 to float
+ ret float %tmp2
+}
+
+define float @t2(i32* nocapture %src) nounwind ssp {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: ldr s0, [x0]
+; CHECK: ucvtf s0, s0
+ %tmp1 = load i32* %src, align 4
+ %tmp2 = uitofp i32 %tmp1 to float
+ ret float %tmp2
+}
+
+define double @t3(i64* nocapture %src) nounwind ssp {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: ldr d0, [x0]
+; CHECK: scvtf d0, d0
+ %tmp1 = load i64* %src, align 4
+ %tmp2 = sitofp i64 %tmp1 to double
+ ret double %tmp2
+}
+
+define double @t4(i64* nocapture %src) nounwind ssp {
+entry:
+; CHECK-LABEL: t4:
+; CHECK: ldr d0, [x0]
+; CHECK: ucvtf d0, d0
+ %tmp1 = load i64* %src, align 4
+ %tmp2 = uitofp i64 %tmp1 to double
+ ret double %tmp2
+}
+
+; rdar://13136456
+define double @t5(i32* nocapture %src) nounwind ssp optsize {
+entry:
+; CHECK-LABEL: t5:
+; CHECK: ldr [[REG:w[0-9]+]], [x0]
+; CHECK: scvtf d0, [[REG]]
+ %tmp1 = load i32* %src, align 4
+ %tmp2 = sitofp i32 %tmp1 to double
+ ret double %tmp2
+}
+
+; Check that we load in FP register when we want to convert into
+; floating point value.
+; This is much faster than loading on GPR and making the conversion
+; GPR -> FPR.
+; <rdar://problem/14599607>
+;
+; Check the flollowing patterns for signed/unsigned:
+; 1. load with scaled imm to float.
+; 2. load with scaled register to float.
+; 3. load with scaled imm to double.
+; 4. load with scaled register to double.
+; 5. load with unscaled imm to float.
+; 6. load with unscaled imm to double.
+; With loading size: 8, 16, 32, and 64-bits.
+
+; ********* 1. load with scaled imm to float. *********
+define float @fct1(i8* nocapture %sp0) {
+; CHECK-LABEL: fct1:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 1
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = uitofp i8 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @fct2(i16* nocapture %sp0) {
+; CHECK-LABEL: fct2:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, #2]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 1
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = uitofp i16 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @fct3(i32* nocapture %sp0) {
+; CHECK-LABEL: fct3:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, #4]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 1
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = uitofp i32 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; i64 -> f32 is not supported on floating point unit.
+define float @fct4(i64* nocapture %sp0) {
+; CHECK-LABEL: fct4:
+; CHECK: ldr x[[REGNUM:[0-9]+]], [x0, #8]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], x[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i64* %sp0, i64 1
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = uitofp i64 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; ********* 2. load with scaled register to float. *********
+define float @fct5(i8* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct5:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, x1]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = uitofp i8 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @fct6(i16* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct6:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = uitofp i16 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @fct7(i32* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct7:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, x1, lsl #2]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = uitofp i32 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; i64 -> f32 is not supported on floating point unit.
+define float @fct8(i64* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct8:
+; CHECK: ldr x[[REGNUM:[0-9]+]], [x0, x1, lsl #3]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], x[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i64* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = uitofp i64 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+
+; ********* 3. load with scaled imm to double. *********
+define double @fct9(i8* nocapture %sp0) {
+; CHECK-LABEL: fct9:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 1
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = uitofp i8 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct10(i16* nocapture %sp0) {
+; CHECK-LABEL: fct10:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, #2]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 1
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = uitofp i16 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct11(i32* nocapture %sp0) {
+; CHECK-LABEL: fct11:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, #4]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 1
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = uitofp i32 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct12(i64* nocapture %sp0) {
+; CHECK-LABEL: fct12:
+; CHECK: ldr d[[REGNUM:[0-9]+]], [x0, #8]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i64* %sp0, i64 1
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = uitofp i64 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+; ********* 4. load with scaled register to double. *********
+define double @fct13(i8* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct13:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, x1]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = uitofp i8 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct14(i16* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct14:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = uitofp i16 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct15(i32* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct15:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, x1, lsl #2]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = uitofp i32 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct16(i64* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct16:
+; CHECK: ldr d[[REGNUM:[0-9]+]], [x0, x1, lsl #3]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i64* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = uitofp i64 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+; ********* 5. load with unscaled imm to float. *********
+define float @fct17(i8* nocapture %sp0) {
+entry:
+; CHECK-LABEL: fct17:
+; CHECK: ldur b[[REGNUM:[0-9]+]], [x0, #-1]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i8* %sp0 to i64
+ %add = add i64 %bitcast, -1
+ %addr = inttoptr i64 %add to i8*
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = uitofp i8 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @fct18(i16* nocapture %sp0) {
+; CHECK-LABEL: fct18:
+; CHECK: ldur h[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i16* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i16*
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = uitofp i16 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @fct19(i32* nocapture %sp0) {
+; CHECK-LABEL: fct19:
+; CHECK: ldur s[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i32* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i32*
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = uitofp i32 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; i64 -> f32 is not supported on floating point unit.
+define float @fct20(i64* nocapture %sp0) {
+; CHECK-LABEL: fct20:
+; CHECK: ldur x[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], x[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i64* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i64*
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = uitofp i64 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+
+}
+
+; ********* 6. load with unscaled imm to double. *********
+define double @fct21(i8* nocapture %sp0) {
+entry:
+; CHECK-LABEL: fct21:
+; CHECK: ldur b[[REGNUM:[0-9]+]], [x0, #-1]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i8* %sp0 to i64
+ %add = add i64 %bitcast, -1
+ %addr = inttoptr i64 %add to i8*
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = uitofp i8 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct22(i16* nocapture %sp0) {
+; CHECK-LABEL: fct22:
+; CHECK: ldur h[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i16* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i16*
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = uitofp i16 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct23(i32* nocapture %sp0) {
+; CHECK-LABEL: fct23:
+; CHECK: ldur s[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i32* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i32*
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = uitofp i32 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @fct24(i64* nocapture %sp0) {
+; CHECK-LABEL: fct24:
+; CHECK: ldur d[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i64* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i64*
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = uitofp i64 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+
+}
+
+; ********* 1s. load with scaled imm to float. *********
+define float @sfct1(i8* nocapture %sp0) {
+; CHECK-LABEL: sfct1:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: sshll.8h [[SEXTREG1:v[0-9]+]], v[[REGNUM]], #0
+; CHECK-NEXT: sshll.4s v[[SEXTREG:[0-9]+]], [[SEXTREG1]], #0
+; CHECK: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 1
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = sitofp i8 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @sfct2(i16* nocapture %sp0) {
+; CHECK-LABEL: sfct2:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, #2]
+; CHECK-NEXT: sshll.4s v[[SEXTREG:[0-9]+]], v[[REGNUM]], #0
+; CHECK: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 1
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = sitofp i16 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @sfct3(i32* nocapture %sp0) {
+; CHECK-LABEL: sfct3:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, #4]
+; CHECK-NEXT: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 1
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = sitofp i32 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; i64 -> f32 is not supported on floating point unit.
+define float @sfct4(i64* nocapture %sp0) {
+; CHECK-LABEL: sfct4:
+; CHECK: ldr x[[REGNUM:[0-9]+]], [x0, #8]
+; CHECK-NEXT: scvtf [[REG:s[0-9]+]], x[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i64* %sp0, i64 1
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = sitofp i64 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; ********* 2s. load with scaled register to float. *********
+define float @sfct5(i8* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: sfct5:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, x1]
+; CHECK-NEXT: sshll.8h [[SEXTREG1:v[0-9]+]], v[[REGNUM]], #0
+; CHECK-NEXT: sshll.4s v[[SEXTREG:[0-9]+]], [[SEXTREG1]], #0
+; CHECK: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = sitofp i8 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @sfct6(i16* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: sfct6:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: sshll.4s v[[SEXTREG:[0-9]+]], v[[REGNUM]], #0
+; CHECK: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = sitofp i16 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @sfct7(i32* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: sfct7:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, x1, lsl #2]
+; CHECK-NEXT: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = sitofp i32 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; i64 -> f32 is not supported on floating point unit.
+define float @sfct8(i64* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: sfct8:
+; CHECK: ldr x[[REGNUM:[0-9]+]], [x0, x1, lsl #3]
+; CHECK-NEXT: scvtf [[REG:s[0-9]+]], x[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i64* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = sitofp i64 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; ********* 3s. load with scaled imm to double. *********
+define double @sfct9(i8* nocapture %sp0) {
+; CHECK-LABEL: sfct9:
+; CHECK: ldrsb w[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 1
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = sitofp i8 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct10(i16* nocapture %sp0) {
+; CHECK-LABEL: sfct10:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, #2]
+; CHECK-NEXT: sshll.4s [[SEXTREG1:v[0-9]+]], v[[REGNUM]], #0
+; CHECK-NEXT: sshll.2d v[[SEXTREG:[0-9]+]], [[SEXTREG1]], #0
+; CHECK: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 1
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = sitofp i16 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct11(i32* nocapture %sp0) {
+; CHECK-LABEL: sfct11:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, #4]
+; CHECK-NEXT: sshll.2d v[[SEXTREG:[0-9]+]], v[[REGNUM]], #0
+; CHECK: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 1
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = sitofp i32 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct12(i64* nocapture %sp0) {
+; CHECK-LABEL: sfct12:
+; CHECK: ldr d[[REGNUM:[0-9]+]], [x0, #8]
+; CHECK-NEXT: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i64* %sp0, i64 1
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = sitofp i64 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+; ********* 4s. load with scaled register to double. *********
+define double @sfct13(i8* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: sfct13:
+; CHECK: ldrsb w[[REGNUM:[0-9]+]], [x0, x1]
+; CHECK-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = sitofp i8 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct14(i16* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: sfct14:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: sshll.4s [[SEXTREG1:v[0-9]+]], v[[REGNUM]], #0
+; CHECK-NEXT: sshll.2d v[[SEXTREG:[0-9]+]], [[SEXTREG1]], #0
+; CHECK: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = sitofp i16 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct15(i32* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: sfct15:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, x1, lsl #2]
+; CHECK-NEXT: sshll.2d v[[SEXTREG:[0-9]+]], v[[REGNUM]], #0
+; CHECK: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = sitofp i32 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct16(i64* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: sfct16:
+; CHECK: ldr d[[REGNUM:[0-9]+]], [x0, x1, lsl #3]
+; CHECK-NEXT: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i64* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = sitofp i64 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+; ********* 5s. load with unscaled imm to float. *********
+define float @sfct17(i8* nocapture %sp0) {
+entry:
+; CHECK-LABEL: sfct17:
+; CHECK: ldur b[[REGNUM:[0-9]+]], [x0, #-1]
+; CHECK-NEXT: sshll.8h [[SEXTREG1:v[0-9]+]], v[[REGNUM]], #0
+; CHECK-NEXT: sshll.4s v[[SEXTREG:[0-9]+]], [[SEXTREG1]], #0
+; CHECK: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i8* %sp0 to i64
+ %add = add i64 %bitcast, -1
+ %addr = inttoptr i64 %add to i8*
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = sitofp i8 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @sfct18(i16* nocapture %sp0) {
+; CHECK-LABEL: sfct18:
+; CHECK: ldur h[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: sshll.4s v[[SEXTREG:[0-9]+]], v[[REGNUM]], #0
+; CHECK: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i16* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i16*
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = sitofp i16 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define float @sfct19(i32* nocapture %sp0) {
+; CHECK-LABEL: sfct19:
+; CHECK: ldur s[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i32* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i32*
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = sitofp i32 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+; i64 -> f32 is not supported on floating point unit.
+define float @sfct20(i64* nocapture %sp0) {
+; CHECK-LABEL: sfct20:
+; CHECK: ldur x[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: scvtf [[REG:s[0-9]+]], x[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i64* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i64*
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = sitofp i64 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+
+}
+
+; ********* 6s. load with unscaled imm to double. *********
+define double @sfct21(i8* nocapture %sp0) {
+entry:
+; CHECK-LABEL: sfct21:
+; CHECK: ldursb w[[REGNUM:[0-9]+]], [x0, #-1]
+; CHECK-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i8* %sp0 to i64
+ %add = add i64 %bitcast, -1
+ %addr = inttoptr i64 %add to i8*
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = sitofp i8 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct22(i16* nocapture %sp0) {
+; CHECK-LABEL: sfct22:
+; CHECK: ldur h[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: sshll.4s [[SEXTREG1:v[0-9]+]], v[[REGNUM]], #0
+; CHECK-NEXT: sshll.2d v[[SEXTREG:[0-9]+]], [[SEXTREG1]], #0
+; CHECK: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i16* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i16*
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %val = sitofp i16 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct23(i32* nocapture %sp0) {
+; CHECK-LABEL: sfct23:
+; CHECK: ldur s[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: sshll.2d v[[SEXTREG:[0-9]+]], v[[REGNUM]], #0
+; CHECK: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i32* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i32*
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = sitofp i32 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+define double @sfct24(i64* nocapture %sp0) {
+; CHECK-LABEL: sfct24:
+; CHECK: ldur d[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i64* %sp0 to i64
+ %add = add i64 %bitcast, 1
+ %addr = inttoptr i64 %add to i64*
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %val = sitofp i64 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+
+}
+
+; Check that we do not use SSHLL code sequence when code size is a concern.
+define float @codesize_sfct17(i8* nocapture %sp0) optsize {
+entry:
+; CHECK-LABEL: codesize_sfct17:
+; CHECK: ldursb w[[REGNUM:[0-9]+]], [x0, #-1]
+; CHECK-NEXT: scvtf [[REG:s[0-9]+]], w[[REGNUM]]
+; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
+ %bitcast = ptrtoint i8* %sp0 to i64
+ %add = add i64 %bitcast, -1
+ %addr = inttoptr i64 %add to i8*
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %val = sitofp i8 %pix_sp0.0.copyload to float
+ %vmull.i = fmul float %val, %val
+ ret float %vmull.i
+}
+
+define double @codesize_sfct11(i32* nocapture %sp0) minsize {
+; CHECK-LABEL: sfct11:
+; CHECK: ldr w[[REGNUM:[0-9]+]], [x0, #4]
+; CHECK-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
+; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 1
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %val = sitofp i32 %pix_sp0.0.copyload to double
+ %vmull.i = fmul double %val, %val
+ ret double %vmull.i
+}
+
+; Adding fp128 custom lowering makes these a little fragile since we have to
+; return the correct mix of Legal/Expand from the custom method.
+;
+; rdar://problem/14991489
+
+define float @float_from_i128(i128 %in) {
+; CHECK-LABEL: float_from_i128:
+; CHECK: bl {{_?__floatuntisf}}
+ %conv = uitofp i128 %in to float
+ ret float %conv
+}
+
+define double @double_from_i128(i128 %in) {
+; CHECK-LABEL: double_from_i128:
+; CHECK: bl {{_?__floattidf}}
+ %conv = sitofp i128 %in to double
+ ret double %conv
+}
+
+define fp128 @fp128_from_i128(i128 %in) {
+; CHECK-LABEL: fp128_from_i128:
+; CHECK: bl {{_?__floatuntitf}}
+ %conv = uitofp i128 %in to fp128
+ ret fp128 %conv
+}
+
+define i128 @i128_from_float(float %in) {
+; CHECK-LABEL: i128_from_float
+; CHECK: bl {{_?__fixsfti}}
+ %conv = fptosi float %in to i128
+ ret i128 %conv
+}
+
+define i128 @i128_from_double(double %in) {
+; CHECK-LABEL: i128_from_double
+; CHECK: bl {{_?__fixunsdfti}}
+ %conv = fptoui double %in to i128
+ ret i128 %conv
+}
+
+define i128 @i128_from_fp128(fp128 %in) {
+; CHECK-LABEL: i128_from_fp128
+; CHECK: bl {{_?__fixtfti}}
+ %conv = fptosi fp128 %in to i128
+ ret i128 %conv
+}
+
diff --git a/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll b/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll
new file mode 100644
index 000000000000..045c9cd9aeb7
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -asm-verbose=false -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <4 x float> @foo(<4 x float> %val, <4 x float> %test) nounwind {
+; CHECK-LABEL: foo:
+; CHECK-NEXT: fcmeq.4s v0, v0, v1
+; CHECK-NEXT: fmov.4s v1, #1.00000000
+; CHECK-NEXT: and.16b v0, v0, v1
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq <4 x float> %val, %test
+ %ext = zext <4 x i1> %cmp to <4 x i32>
+ %result = sitofp <4 x i32> %ext to <4 x float>
+ ret <4 x float> %result
+}
+; Make sure the operation doesn't try to get folded when the sizes don't match,
+; as that ends up crashing later when trying to form a bitcast operation for
+; the folded nodes.
+define void @foo1(<4 x float> %val, <4 x float> %test, <4 x double>* %p) nounwind {
+; CHECK-LABEL: foo1:
+; CHECK: movi.4s
+; CHECK: scvtf.2d
+; CHECK: scvtf.2d
+ %cmp = fcmp oeq <4 x float> %val, %test
+ %ext = zext <4 x i1> %cmp to <4 x i32>
+ %result = sitofp <4 x i32> %ext to <4 x double>
+ store <4 x double> %result, <4 x double>* %p
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-shifted-sext.ll b/test/CodeGen/AArch64/arm64-shifted-sext.ll
new file mode 100644
index 000000000000..b7b4e5de1d5c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-shifted-sext.ll
@@ -0,0 +1,277 @@
+; RUN: llc -march=arm64 -mtriple=arm64-apple-ios < %s | FileCheck %s
+;
+; <rdar://problem/13820218>
+
+define signext i16 @extendedLeftShiftcharToshortBy4(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftcharToshortBy4:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sbfiz w0, [[REG]], #4, #8
+ %inc = add i8 %a, 1
+ %conv1 = sext i8 %inc to i32
+ %shl = shl nsw i32 %conv1, 4
+ %conv2 = trunc i32 %shl to i16
+ ret i16 %conv2
+}
+
+define signext i16 @extendedRightShiftcharToshortBy4(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftcharToshortBy4:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sbfx w0, [[REG]], #4, #4
+ %inc = add i8 %a, 1
+ %conv1 = sext i8 %inc to i32
+ %shr4 = lshr i32 %conv1, 4
+ %conv2 = trunc i32 %shr4 to i16
+ ret i16 %conv2
+}
+
+define signext i16 @extendedLeftShiftcharToshortBy8(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftcharToshortBy8:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sbfiz w0, [[REG]], #8, #8
+ %inc = add i8 %a, 1
+ %conv1 = sext i8 %inc to i32
+ %shl = shl nsw i32 %conv1, 8
+ %conv2 = trunc i32 %shl to i16
+ ret i16 %conv2
+}
+
+define signext i16 @extendedRightShiftcharToshortBy8(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftcharToshortBy8:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sxtb [[REG]], [[REG]]
+; CHECK: asr w0, [[REG]], #8
+ %inc = add i8 %a, 1
+ %conv1 = sext i8 %inc to i32
+ %shr4 = lshr i32 %conv1, 8
+ %conv2 = trunc i32 %shr4 to i16
+ ret i16 %conv2
+}
+
+define i32 @extendedLeftShiftcharTointBy4(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftcharTointBy4:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sbfiz w0, [[REG]], #4, #8
+ %inc = add i8 %a, 1
+ %conv = sext i8 %inc to i32
+ %shl = shl nsw i32 %conv, 4
+ ret i32 %shl
+}
+
+define i32 @extendedRightShiftcharTointBy4(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftcharTointBy4:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sbfx w0, [[REG]], #4, #4
+ %inc = add i8 %a, 1
+ %conv = sext i8 %inc to i32
+ %shr = ashr i32 %conv, 4
+ ret i32 %shr
+}
+
+define i32 @extendedLeftShiftcharTointBy8(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftcharTointBy8:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sbfiz w0, [[REG]], #8, #8
+ %inc = add i8 %a, 1
+ %conv = sext i8 %inc to i32
+ %shl = shl nsw i32 %conv, 8
+ ret i32 %shl
+}
+
+define i32 @extendedRightShiftcharTointBy8(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftcharTointBy8:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sxtb [[REG]], [[REG]]
+; CHECK: asr w0, [[REG]], #8
+ %inc = add i8 %a, 1
+ %conv = sext i8 %inc to i32
+ %shr = ashr i32 %conv, 8
+ ret i32 %shr
+}
+
+define i64 @extendedLeftShiftcharToint64By4(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftcharToint64By4:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sbfiz x0, x[[REG]], #4, #8
+ %inc = add i8 %a, 1
+ %conv = sext i8 %inc to i64
+ %shl = shl nsw i64 %conv, 4
+ ret i64 %shl
+}
+
+define i64 @extendedRightShiftcharToint64By4(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftcharToint64By4:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sbfx x0, x[[REG]], #4, #4
+ %inc = add i8 %a, 1
+ %conv = sext i8 %inc to i64
+ %shr = ashr i64 %conv, 4
+ ret i64 %shr
+}
+
+define i64 @extendedLeftShiftcharToint64By8(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftcharToint64By8:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sbfiz x0, x[[REG]], #8, #8
+ %inc = add i8 %a, 1
+ %conv = sext i8 %inc to i64
+ %shl = shl nsw i64 %conv, 8
+ ret i64 %shl
+}
+
+define i64 @extendedRightShiftcharToint64By8(i8 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftcharToint64By8:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sxtb x[[REG]], w[[REG]]
+; CHECK: asr x0, x[[REG]], #8
+ %inc = add i8 %a, 1
+ %conv = sext i8 %inc to i64
+ %shr = ashr i64 %conv, 8
+ ret i64 %shr
+}
+
+define i32 @extendedLeftShiftshortTointBy4(i16 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftshortTointBy4:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sbfiz w0, [[REG]], #4, #16
+ %inc = add i16 %a, 1
+ %conv = sext i16 %inc to i32
+ %shl = shl nsw i32 %conv, 4
+ ret i32 %shl
+}
+
+define i32 @extendedRightShiftshortTointBy4(i16 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftshortTointBy4:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sbfx w0, [[REG]], #4, #12
+ %inc = add i16 %a, 1
+ %conv = sext i16 %inc to i32
+ %shr = ashr i32 %conv, 4
+ ret i32 %shr
+}
+
+define i32 @extendedLeftShiftshortTointBy16(i16 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftshortTointBy16:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: lsl w0, [[REG]], #16
+ %inc = add i16 %a, 1
+ %conv2 = zext i16 %inc to i32
+ %shl = shl nuw i32 %conv2, 16
+ ret i32 %shl
+}
+
+define i32 @extendedRightShiftshortTointBy16(i16 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftshortTointBy16:
+; CHECK: add [[REG:w[0-9]+]], w0, #1
+; CHECK: sxth [[REG]], [[REG]]
+; CHECK: asr w0, [[REG]], #16
+ %inc = add i16 %a, 1
+ %conv = sext i16 %inc to i32
+ %shr = ashr i32 %conv, 16
+ ret i32 %shr
+}
+
+define i64 @extendedLeftShiftshortToint64By4(i16 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftshortToint64By4:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sbfiz x0, x[[REG]], #4, #16
+ %inc = add i16 %a, 1
+ %conv = sext i16 %inc to i64
+ %shl = shl nsw i64 %conv, 4
+ ret i64 %shl
+}
+
+define i64 @extendedRightShiftshortToint64By4(i16 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftshortToint64By4:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sbfx x0, x[[REG]], #4, #12
+ %inc = add i16 %a, 1
+ %conv = sext i16 %inc to i64
+ %shr = ashr i64 %conv, 4
+ ret i64 %shr
+}
+
+define i64 @extendedLeftShiftshortToint64By16(i16 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftshortToint64By16:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sbfiz x0, x[[REG]], #16, #16
+ %inc = add i16 %a, 1
+ %conv = sext i16 %inc to i64
+ %shl = shl nsw i64 %conv, 16
+ ret i64 %shl
+}
+
+define i64 @extendedRightShiftshortToint64By16(i16 signext %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftshortToint64By16:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sxth x[[REG]], w[[REG]]
+; CHECK: asr x0, x[[REG]], #16
+ %inc = add i16 %a, 1
+ %conv = sext i16 %inc to i64
+ %shr = ashr i64 %conv, 16
+ ret i64 %shr
+}
+
+define i64 @extendedLeftShiftintToint64By4(i32 %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftintToint64By4:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sbfiz x0, x[[REG]], #4, #32
+ %inc = add nsw i32 %a, 1
+ %conv = sext i32 %inc to i64
+ %shl = shl nsw i64 %conv, 4
+ ret i64 %shl
+}
+
+define i64 @extendedRightShiftintToint64By4(i32 %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftintToint64By4:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sbfx x0, x[[REG]], #4, #28
+ %inc = add nsw i32 %a, 1
+ %conv = sext i32 %inc to i64
+ %shr = ashr i64 %conv, 4
+ ret i64 %shr
+}
+
+define i64 @extendedLeftShiftintToint64By32(i32 %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedLeftShiftintToint64By32:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: lsl x0, x[[REG]], #32
+ %inc = add nsw i32 %a, 1
+ %conv2 = zext i32 %inc to i64
+ %shl = shl nuw i64 %conv2, 32
+ ret i64 %shl
+}
+
+define i64 @extendedRightShiftintToint64By32(i32 %a) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: extendedRightShiftintToint64By32:
+; CHECK: add w[[REG:[0-9]+]], w0, #1
+; CHECK: sxtw x[[REG]], w[[REG]]
+; CHECK: asr x0, x[[REG]], #32
+ %inc = add nsw i32 %a, 1
+ %conv = sext i32 %inc to i64
+ %shr = ashr i64 %conv, 32
+ ret i64 %shr
+}
diff --git a/test/CodeGen/AArch64/arm64-shrink-v1i64.ll b/test/CodeGen/AArch64/arm64-shrink-v1i64.ll
new file mode 100644
index 000000000000..f31a5702761c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-shrink-v1i64.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=arm64 < %s
+
+; The DAGCombiner tries to do following shrink:
+; Convert x+y to (VT)((SmallVT)x+(SmallVT)y)
+; But currently it can't handle vector type and will trigger an assertion failure
+; when it tries to generate an add mixed using vector type and scaler type.
+; This test checks that such assertion failur should not happen.
+define <1 x i64> @dotest(<1 x i64> %in0) {
+entry:
+ %0 = add <1 x i64> %in0, %in0
+ %vshl_n = shl <1 x i64> %0, <i64 32>
+ %vsra_n = ashr <1 x i64> %vshl_n, <i64 32>
+ ret <1 x i64> %vsra_n
+}
diff --git a/test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll b/test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll
new file mode 100644
index 000000000000..aed39e7ed8cb
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -O0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST
+
+define <16 x i8> @foo(<16 x i8> %a) nounwind optsize readnone ssp {
+; CHECK: uaddlv.16b h0, v0
+; CHECK: rshrn.8b v0, v0, #4
+; CHECK: dup.16b v0, v0[0]
+; CHECK: ret
+
+; CHECK-FAST: uaddlv.16b
+; CHECK-FAST: rshrn.8b
+; CHECK-FAST: dup.16b
+ %tmp = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) nounwind
+ %tmp1 = trunc i32 %tmp to i16
+ %tmp2 = insertelement <8 x i16> undef, i16 %tmp1, i32 0
+ %tmp3 = tail call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp2, i32 4)
+ %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <16 x i32> zeroinitializer
+ ret <16 x i8> %tmp4
+}
+
+declare <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-simplest-elf.ll b/test/CodeGen/AArch64/arm64-simplest-elf.ll
new file mode 100644
index 000000000000..1254365b8205
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-simplest-elf.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=arm64-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-linux-gnu -filetype=obj < %s | llvm-objdump - -r -d --triple=arm64-linux-gnu | FileCheck --check-prefix=CHECK-ELF %s
+
+define void @foo() nounwind {
+ ret void
+}
+
+ ; Check source looks ELF-like: no leading underscore, comments with //
+; CHECK: foo: // @foo
+; CHECK: ret
+
+ ; Similarly make sure ELF output works and is vaguely sane: aarch64 target
+ ; machine with correct section & symbol names.
+; CHECK-ELF: file format ELF64-aarch64
+
+; CHECK-ELF: Disassembly of section .text
+; CHECK-ELF-LABEL: foo:
+; CHECK-ELF: ret
diff --git a/test/CodeGen/AArch64/arm64-sincos.ll b/test/CodeGen/AArch64/arm64-sincos.ll
new file mode 100644
index 000000000000..06157b2580c4
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-sincos.ll
@@ -0,0 +1,42 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7 | FileCheck %s --check-prefix CHECK-IOS
+; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s --check-prefix CHECK-LINUX
+
+; Combine sin / cos into a single call.
+; rdar://12856873
+
+define float @test1(float %x) nounwind {
+entry:
+; CHECK-IOS-LABEL: test1:
+; CHECK-IOS: bl ___sincosf_stret
+; CHECK-IOS: fadd s0, s0, s1
+
+; CHECK-LINUX-LABEL: test1:
+; CHECK-LINUX: bl sinf
+; CHECK-LINUX: bl cosf
+
+ %call = tail call float @sinf(float %x) nounwind readnone
+ %call1 = tail call float @cosf(float %x) nounwind readnone
+ %add = fadd float %call, %call1
+ ret float %add
+}
+
+define double @test2(double %x) nounwind {
+entry:
+; CHECK-IOS-LABEL: test2:
+; CHECK-IOS: bl ___sincos_stret
+; CHECK-IOS: fadd d0, d0, d1
+
+; CHECK-LINUX-LABEL: test2:
+; CHECK-LINUX: bl sin
+; CHECK-LINUX: bl cos
+
+ %call = tail call double @sin(double %x) nounwind readnone
+ %call1 = tail call double @cos(double %x) nounwind readnone
+ %add = fadd double %call, %call1
+ ret double %add
+}
+
+declare float @sinf(float) readonly
+declare double @sin(double) readonly
+declare float @cosf(float) readonly
+declare double @cos(double) readonly
diff --git a/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll b/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll
new file mode 100644
index 000000000000..10b433b97757
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll
@@ -0,0 +1,22 @@
+; RUN: llc -march=arm64 -o - %s | FileCheck %s
+
+; ARM64ISelLowering.cpp was creating a new (floating-point) load for efficiency
+; but not updating chain-successors of the old one. As a result, the two memory
+; operations in this function both ended up direct successors to the EntryToken
+; and could be reordered.
+
+@var = global i32 0, align 4
+
+define float @foo() {
+; CHECK-LABEL: foo:
+ ; Load must come before we clobber @var
+; CHECK: adrp x[[VARBASE:[0-9]+]], {{_?var}}
+; CHECK: ldr [[SREG:s[0-9]+]], [x[[VARBASE]],
+; CHECK: str wzr, [x[[VARBASE]],
+
+ %val = load i32* @var, align 4
+ store i32 0, i32* @var, align 4
+
+ %fltval = sitofp i32 %val to float
+ ret float %fltval
+}
diff --git a/test/CodeGen/AArch64/arm64-sli-sri-opt.ll b/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
new file mode 100644
index 000000000000..7fec53993bc1
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
@@ -0,0 +1,41 @@
+; RUN: llc -aarch64-shift-insert-generation=true -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+define void @testLeftGood(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+; CHECK-LABEL: testLeftGood:
+; CHECK: sli.16b v0, v1, #3
+ %and.i = and <16 x i8> %src1, <i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252>
+ %vshl_n = shl <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %result = or <16 x i8> %and.i, %vshl_n
+ store <16 x i8> %result, <16 x i8>* %dest, align 16
+ ret void
+}
+
+define void @testLeftBad(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+; CHECK-LABEL: testLeftBad:
+; CHECK-NOT: sli
+ %and.i = and <16 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165>
+ %vshl_n = shl <16 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %result = or <16 x i8> %and.i, %vshl_n
+ store <16 x i8> %result, <16 x i8>* %dest, align 16
+ ret void
+}
+
+define void @testRightGood(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+; CHECK-LABEL: testRightGood:
+; CHECK: sri.16b v0, v1, #3
+ %and.i = and <16 x i8> %src1, <i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252>
+ %vshl_n = lshr <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %result = or <16 x i8> %and.i, %vshl_n
+ store <16 x i8> %result, <16 x i8>* %dest, align 16
+ ret void
+}
+
+define void @testRightBad(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+; CHECK-LABEL: testRightBad:
+; CHECK-NOT: sri
+ %and.i = and <16 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165>
+ %vshl_n = lshr <16 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %result = or <16 x i8> %and.i, %vshl_n
+ store <16 x i8> %result, <16 x i8>* %dest, align 16
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-smaxv.ll b/test/CodeGen/AArch64/arm64-smaxv.ll
new file mode 100644
index 000000000000..183e667643cc
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-smaxv.ll
@@ -0,0 +1,74 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+define signext i8 @test_vmaxv_s8(<8 x i8> %a1) {
+; CHECK: test_vmaxv_s8
+; CHECK: smaxv.8b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a1)
+ %0 = trunc i32 %vmaxv.i to i8
+ ret i8 %0
+}
+
+define signext i16 @test_vmaxv_s16(<4 x i16> %a1) {
+; CHECK: test_vmaxv_s16
+; CHECK: smaxv.4h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a1)
+ %0 = trunc i32 %vmaxv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vmaxv_s32(<2 x i32> %a1) {
+; CHECK: test_vmaxv_s32
+; 2 x i32 is not supported by the ISA, thus, this is a special case
+; CHECK: smaxp.2s v[[REGNUM:[0-9]+]], v0, v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> %a1)
+ ret i32 %vmaxv.i
+}
+
+define signext i8 @test_vmaxvq_s8(<16 x i8> %a1) {
+; CHECK: test_vmaxvq_s8
+; CHECK: smaxv.16b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a1)
+ %0 = trunc i32 %vmaxv.i to i8
+ ret i8 %0
+}
+
+define signext i16 @test_vmaxvq_s16(<8 x i16> %a1) {
+; CHECK: test_vmaxvq_s16
+; CHECK: smaxv.8h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a1)
+ %0 = trunc i32 %vmaxv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_s32(<4 x i32> %a1) {
+; CHECK: test_vmaxvq_s32
+; CHECK: smaxv.4s [[REGNUM:s[0-9]+]], v0
+; CHECK-NEXT: fmov w0, [[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a1)
+ ret i32 %vmaxv.i
+}
+
+declare i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32>)
+declare i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16>)
+declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>)
+declare i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32>)
+declare i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16>)
+declare i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8>)
+
diff --git a/test/CodeGen/AArch64/arm64-sminv.ll b/test/CodeGen/AArch64/arm64-sminv.ll
new file mode 100644
index 000000000000..195c4e59dc41
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-sminv.ll
@@ -0,0 +1,74 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+define signext i8 @test_vminv_s8(<8 x i8> %a1) {
+; CHECK: test_vminv_s8
+; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a1)
+ %0 = trunc i32 %vminv.i to i8
+ ret i8 %0
+}
+
+define signext i16 @test_vminv_s16(<4 x i16> %a1) {
+; CHECK: test_vminv_s16
+; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a1)
+ %0 = trunc i32 %vminv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vminv_s32(<2 x i32> %a1) {
+; CHECK: test_vminv_s32
+; 2 x i32 is not supported by the ISA, thus, this is a special case
+; CHECK: sminp.2s v[[REGNUM:[0-9]+]], v0, v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a1)
+ ret i32 %vminv.i
+}
+
+define signext i8 @test_vminvq_s8(<16 x i8> %a1) {
+; CHECK: test_vminvq_s8
+; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a1)
+ %0 = trunc i32 %vminv.i to i8
+ ret i8 %0
+}
+
+define signext i16 @test_vminvq_s16(<8 x i16> %a1) {
+; CHECK: test_vminvq_s16
+; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a1)
+ %0 = trunc i32 %vminv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vminvq_s32(<4 x i32> %a1) {
+; CHECK: test_vminvq_s32
+; CHECK: sminv.4s [[REGNUM:s[0-9]+]], v0
+; CHECK-NEXT: fmov w0, [[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a1)
+ ret i32 %vminv.i
+}
+
+declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>)
+declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>)
+declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>)
+declare i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32>)
+declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>)
+declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>)
+
diff --git a/test/CodeGen/AArch64/arm64-spill-lr.ll b/test/CodeGen/AArch64/arm64-spill-lr.ll
new file mode 100644
index 000000000000..fb6588e6ae46
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-spill-lr.ll
@@ -0,0 +1,74 @@
+; RUN: llc -mtriple=arm64-apple-ios < %s
+@bar = common global i32 0, align 4
+
+; Leaf function which uses all callee-saved registers and allocates >= 256 bytes on the stack
+; this will cause processFunctionBeforeCalleeSavedScan() to spill LR as an additional scratch
+; register.
+;
+; This is a crash-only regression test for rdar://15124582.
+define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) nounwind {
+entry:
+ %stack = alloca [128 x i32], align 4
+ %0 = bitcast [128 x i32]* %stack to i8*
+ %idxprom = sext i32 %a to i64
+ %arrayidx = getelementptr inbounds [128 x i32]* %stack, i64 0, i64 %idxprom
+ store i32 %b, i32* %arrayidx, align 4
+ %1 = load volatile i32* @bar, align 4
+ %2 = load volatile i32* @bar, align 4
+ %3 = load volatile i32* @bar, align 4
+ %4 = load volatile i32* @bar, align 4
+ %5 = load volatile i32* @bar, align 4
+ %6 = load volatile i32* @bar, align 4
+ %7 = load volatile i32* @bar, align 4
+ %8 = load volatile i32* @bar, align 4
+ %9 = load volatile i32* @bar, align 4
+ %10 = load volatile i32* @bar, align 4
+ %11 = load volatile i32* @bar, align 4
+ %12 = load volatile i32* @bar, align 4
+ %13 = load volatile i32* @bar, align 4
+ %14 = load volatile i32* @bar, align 4
+ %15 = load volatile i32* @bar, align 4
+ %16 = load volatile i32* @bar, align 4
+ %17 = load volatile i32* @bar, align 4
+ %18 = load volatile i32* @bar, align 4
+ %19 = load volatile i32* @bar, align 4
+ %20 = load volatile i32* @bar, align 4
+ %idxprom1 = sext i32 %c to i64
+ %arrayidx2 = getelementptr inbounds [128 x i32]* %stack, i64 0, i64 %idxprom1
+ %21 = load i32* %arrayidx2, align 4
+ %factor = mul i32 %h, -2
+ %factor67 = mul i32 %g, -2
+ %factor68 = mul i32 %f, -2
+ %factor69 = mul i32 %e, -2
+ %factor70 = mul i32 %d, -2
+ %factor71 = mul i32 %c, -2
+ %factor72 = mul i32 %b, -2
+ %sum = add i32 %2, %1
+ %sum73 = add i32 %sum, %3
+ %sum74 = add i32 %sum73, %4
+ %sum75 = add i32 %sum74, %5
+ %sum76 = add i32 %sum75, %6
+ %sum77 = add i32 %sum76, %7
+ %sum78 = add i32 %sum77, %8
+ %sum79 = add i32 %sum78, %9
+ %sum80 = add i32 %sum79, %10
+ %sum81 = add i32 %sum80, %11
+ %sum82 = add i32 %sum81, %12
+ %sum83 = add i32 %sum82, %13
+ %sum84 = add i32 %sum83, %14
+ %sum85 = add i32 %sum84, %15
+ %sum86 = add i32 %sum85, %16
+ %sum87 = add i32 %sum86, %17
+ %sum88 = add i32 %sum87, %18
+ %sum89 = add i32 %sum88, %19
+ %sum90 = add i32 %sum89, %20
+ %sub15 = sub i32 %21, %sum90
+ %sub16 = add i32 %sub15, %factor
+ %sub17 = add i32 %sub16, %factor67
+ %sub18 = add i32 %sub17, %factor68
+ %sub19 = add i32 %sub18, %factor69
+ %sub20 = add i32 %sub19, %factor70
+ %sub21 = add i32 %sub20, %factor71
+ %add = add i32 %sub21, %factor72
+ ret i32 %add
+}
diff --git a/test/CodeGen/AArch64/arm64-spill.ll b/test/CodeGen/AArch64/arm64-spill.ll
new file mode 100644
index 000000000000..47cdc2bd95e4
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-spill.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -verify-machineinstrs
+
+; CHECK: fpr128
+; CHECK: ld1.2d
+; CHECK: str q
+; CHECK: inlineasm
+; CHECK: ldr q
+; CHECK: st1.2d
+define void @fpr128(<4 x float>* %p) nounwind ssp {
+entry:
+ %x = load <4 x float>* %p, align 16
+ call void asm sideeffect "; inlineasm", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{q31},~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{sp},~{memory}"() nounwind
+ store <4 x float> %x, <4 x float>* %p, align 16
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll b/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll
new file mode 100644
index 000000000000..3949b85fbd32
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -verify-machineinstrs -march=arm64 | FileCheck %s
+
+; Check if sqshl/uqshl with constant shift amout can be selected.
+define i64 @test_vqshld_s64_i(i64 %a) {
+; CHECK-LABEL: test_vqshld_s64_i:
+; CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #36
+ %1 = tail call i64 @llvm.aarch64.neon.sqshl.i64(i64 %a, i64 36)
+ ret i64 %1
+}
+
+define i64 @test_vqshld_u64_i(i64 %a) {
+; CHECK-LABEL: test_vqshld_u64_i:
+; CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #36
+ %1 = tail call i64 @llvm.aarch64.neon.uqshl.i64(i64 %a, i64 36)
+ ret i64 %1
+}
+
+declare i64 @llvm.aarch64.neon.uqshl.i64(i64, i64)
+declare i64 @llvm.aarch64.neon.sqshl.i64(i64, i64)
diff --git a/test/CodeGen/AArch64/arm64-st1.ll b/test/CodeGen/AArch64/arm64-st1.ll
new file mode 100644
index 000000000000..4370484478c0
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-st1.ll
@@ -0,0 +1,676 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s
+
+define void @st1lane_16b(<16 x i8> %A, i8* %D) {
+; CHECK-LABEL: st1lane_16b
+; CHECK: st1.b
+ %tmp = extractelement <16 x i8> %A, i32 1
+ store i8 %tmp, i8* %D
+ ret void
+}
+
+define void @st1lane_8h(<8 x i16> %A, i16* %D) {
+; CHECK-LABEL: st1lane_8h
+; CHECK: st1.h
+ %tmp = extractelement <8 x i16> %A, i32 1
+ store i16 %tmp, i16* %D
+ ret void
+}
+
+define void @st1lane_4s(<4 x i32> %A, i32* %D) {
+; CHECK-LABEL: st1lane_4s
+; CHECK: st1.s
+ %tmp = extractelement <4 x i32> %A, i32 1
+ store i32 %tmp, i32* %D
+ ret void
+}
+
+define void @st1lane_4s_float(<4 x float> %A, float* %D) {
+; CHECK-LABEL: st1lane_4s_float
+; CHECK: st1.s
+ %tmp = extractelement <4 x float> %A, i32 1
+ store float %tmp, float* %D
+ ret void
+}
+
+define void @st1lane_2d(<2 x i64> %A, i64* %D) {
+; CHECK-LABEL: st1lane_2d
+; CHECK: st1.d
+ %tmp = extractelement <2 x i64> %A, i32 1
+ store i64 %tmp, i64* %D
+ ret void
+}
+
+define void @st1lane_2d_double(<2 x double> %A, double* %D) {
+; CHECK-LABEL: st1lane_2d_double
+; CHECK: st1.d
+ %tmp = extractelement <2 x double> %A, i32 1
+ store double %tmp, double* %D
+ ret void
+}
+
+define void @st1lane_8b(<8 x i8> %A, i8* %D) {
+; CHECK-LABEL: st1lane_8b
+; CHECK: st1.b
+ %tmp = extractelement <8 x i8> %A, i32 1
+ store i8 %tmp, i8* %D
+ ret void
+}
+
+define void @st1lane_4h(<4 x i16> %A, i16* %D) {
+; CHECK-LABEL: st1lane_4h
+; CHECK: st1.h
+ %tmp = extractelement <4 x i16> %A, i32 1
+ store i16 %tmp, i16* %D
+ ret void
+}
+
+define void @st1lane_2s(<2 x i32> %A, i32* %D) {
+; CHECK-LABEL: st1lane_2s
+; CHECK: st1.s
+ %tmp = extractelement <2 x i32> %A, i32 1
+ store i32 %tmp, i32* %D
+ ret void
+}
+
+define void @st1lane_2s_float(<2 x float> %A, float* %D) {
+; CHECK-LABEL: st1lane_2s_float
+; CHECK: st1.s
+ %tmp = extractelement <2 x float> %A, i32 1
+ store float %tmp, float* %D
+ ret void
+}
+
+define void @st2lane_16b(<16 x i8> %A, <16 x i8> %B, i8* %D) {
+; CHECK-LABEL: st2lane_16b
+; CHECK: st2.b
+ call void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, i64 1, i8* %D)
+ ret void
+}
+
+define void @st2lane_8h(<8 x i16> %A, <8 x i16> %B, i16* %D) {
+; CHECK-LABEL: st2lane_8h
+; CHECK: st2.h
+ call void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, i64 1, i16* %D)
+ ret void
+}
+
+define void @st2lane_4s(<4 x i32> %A, <4 x i32> %B, i32* %D) {
+; CHECK-LABEL: st2lane_4s
+; CHECK: st2.s
+ call void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, i64 1, i32* %D)
+ ret void
+}
+
+define void @st2lane_2d(<2 x i64> %A, <2 x i64> %B, i64* %D) {
+; CHECK-LABEL: st2lane_2d
+; CHECK: st2.d
+ call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64 1, i64* %D)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*) nounwind readnone
+declare void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*) nounwind readnone
+declare void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*) nounwind readnone
+declare void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*) nounwind readnone
+
+define void @st3lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %D) {
+; CHECK-LABEL: st3lane_16b
+; CHECK: st3.b
+ call void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i64 1, i8* %D)
+ ret void
+}
+
+define void @st3lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %D) {
+; CHECK-LABEL: st3lane_8h
+; CHECK: st3.h
+ call void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i64 1, i16* %D)
+ ret void
+}
+
+define void @st3lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %D) {
+; CHECK-LABEL: st3lane_4s
+; CHECK: st3.s
+ call void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i64 1, i32* %D)
+ ret void
+}
+
+define void @st3lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %D) {
+; CHECK-LABEL: st3lane_2d
+; CHECK: st3.d
+ call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64 1, i64* %D)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readnone
+declare void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readnone
+declare void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readnone
+declare void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readnone
+
+define void @st4lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %E) {
+; CHECK-LABEL: st4lane_16b
+; CHECK: st4.b
+ call void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 1, i8* %E)
+ ret void
+}
+
+define void @st4lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %E) {
+; CHECK-LABEL: st4lane_8h
+; CHECK: st4.h
+ call void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 1, i16* %E)
+ ret void
+}
+
+define void @st4lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %E) {
+; CHECK-LABEL: st4lane_4s
+; CHECK: st4.s
+ call void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 1, i32* %E)
+ ret void
+}
+
+define void @st4lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %E) {
+; CHECK-LABEL: st4lane_2d
+; CHECK: st4.d
+ call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 1, i64* %E)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readnone
+declare void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readnone
+declare void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readnone
+declare void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readnone
+
+
+define void @st2_8b(<8 x i8> %A, <8 x i8> %B, i8* %P) nounwind {
+; CHECK-LABEL: st2_8b
+; CHECK st2.8b
+ call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, i8* %P)
+ ret void
+}
+
+define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %P) nounwind {
+; CHECK-LABEL: st3_8b
+; CHECK st3.8b
+ call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %P)
+ ret void
+}
+
+define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %P) nounwind {
+; CHECK-LABEL: st4_8b
+; CHECK st4.8b
+ call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %P)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i8*) nounwind readonly
+
+define void @st2_16b(<16 x i8> %A, <16 x i8> %B, i8* %P) nounwind {
+; CHECK-LABEL: st2_16b
+; CHECK st2.16b
+ call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, i8* %P)
+ ret void
+}
+
+define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %P) nounwind {
+; CHECK-LABEL: st3_16b
+; CHECK st3.16b
+ call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %P)
+ ret void
+}
+
+define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %P) nounwind {
+; CHECK-LABEL: st4_16b
+; CHECK st4.16b
+ call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %P)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8>, <16 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*) nounwind readonly
+
+define void @st2_4h(<4 x i16> %A, <4 x i16> %B, i16* %P) nounwind {
+; CHECK-LABEL: st2_4h
+; CHECK st2.4h
+ call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, i16* %P)
+ ret void
+}
+
+define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %P) nounwind {
+; CHECK-LABEL: st3_4h
+; CHECK st3.4h
+ call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %P)
+ ret void
+}
+
+define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %P) nounwind {
+; CHECK-LABEL: st4_4h
+; CHECK st4.4h
+ call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %P)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16>, <4 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i16*) nounwind readonly
+
+define void @st2_8h(<8 x i16> %A, <8 x i16> %B, i16* %P) nounwind {
+; CHECK-LABEL: st2_8h
+; CHECK st2.8h
+ call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, i16* %P)
+ ret void
+}
+
+define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %P) nounwind {
+; CHECK-LABEL: st3_8h
+; CHECK st3.8h
+ call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %P)
+ ret void
+}
+
+define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %P) nounwind {
+; CHECK-LABEL: st4_8h
+; CHECK st4.8h
+ call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %P)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16>, <8 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i16*) nounwind readonly
+
+define void @st2_2s(<2 x i32> %A, <2 x i32> %B, i32* %P) nounwind {
+; CHECK-LABEL: st2_2s
+; CHECK st2.2s
+ call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, i32* %P)
+ ret void
+}
+
+define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %P) nounwind {
+; CHECK-LABEL: st3_2s
+; CHECK st3.2s
+ call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %P)
+ ret void
+}
+
+define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %P) nounwind {
+; CHECK-LABEL: st4_2s
+; CHECK st4.2s
+ call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %P)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32>, <2 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32*) nounwind readonly
+
+define void @st2_4s(<4 x i32> %A, <4 x i32> %B, i32* %P) nounwind {
+; CHECK-LABEL: st2_4s
+; CHECK st2.4s
+ call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, i32* %P)
+ ret void
+}
+
+define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %P) nounwind {
+; CHECK-LABEL: st3_4s
+; CHECK st3.4s
+ call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %P)
+ ret void
+}
+
+define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %P) nounwind {
+; CHECK-LABEL: st4_4s
+; CHECK st4.4s
+ call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %P)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly
+
+define void @st2_1d(<1 x i64> %A, <1 x i64> %B, i64* %P) nounwind {
+; CHECK-LABEL: st2_1d
+; CHECK st1.2d
+ call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, i64* %P)
+ ret void
+}
+
+define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %P) nounwind {
+; CHECK-LABEL: st3_1d
+; CHECK st1.3d
+ call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %P)
+ ret void
+}
+
+define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %P) nounwind {
+; CHECK-LABEL: st4_1d
+; CHECK st1.4d
+ call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %P)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64>, <1 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64*) nounwind readonly
+
+define void @st2_2d(<2 x i64> %A, <2 x i64> %B, i64* %P) nounwind {
+; CHECK-LABEL: st2_2d
+; CHECK st2.2d
+ call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64* %P)
+ ret void
+}
+
+define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %P) nounwind {
+; CHECK-LABEL: st3_2d
+; CHECK st2.3d
+ call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %P)
+ ret void
+}
+
+define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %P) nounwind {
+; CHECK-LABEL: st4_2d
+; CHECK st2.4d
+ call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %P)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64*) nounwind readonly
+
+declare void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16>, <4 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32>, <2 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float>, <2 x float>, float*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64>, <1 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double>, <1 x double>, double*) nounwind readonly
+
+define void @st1_x2_v8i8(<8 x i8> %A, <8 x i8> %B, i8* %addr) {
+; CHECK-LABEL: st1_x2_v8i8:
+; CHECK: st1.8b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, i8* %addr)
+ ret void
+}
+
+define void @st1_x2_v4i16(<4 x i16> %A, <4 x i16> %B, i16* %addr) {
+; CHECK-LABEL: st1_x2_v4i16:
+; CHECK: st1.4h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, i16* %addr)
+ ret void
+}
+
+define void @st1_x2_v2i32(<2 x i32> %A, <2 x i32> %B, i32* %addr) {
+; CHECK-LABEL: st1_x2_v2i32:
+; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, i32* %addr)
+ ret void
+}
+
+define void @st1_x2_v2f32(<2 x float> %A, <2 x float> %B, float* %addr) {
+; CHECK-LABEL: st1_x2_v2f32:
+; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float> %A, <2 x float> %B, float* %addr)
+ ret void
+}
+
+define void @st1_x2_v1i64(<1 x i64> %A, <1 x i64> %B, i64* %addr) {
+; CHECK-LABEL: st1_x2_v1i64:
+; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, i64* %addr)
+ ret void
+}
+
+define void @st1_x2_v1f64(<1 x double> %A, <1 x double> %B, double* %addr) {
+; CHECK-LABEL: st1_x2_v1f64:
+; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double> %A, <1 x double> %B, double* %addr)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8>, <16 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16>, <8 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float>, <4 x float>, float*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double>, <2 x double>, double*) nounwind readonly
+
+define void @st1_x2_v16i8(<16 x i8> %A, <16 x i8> %B, i8* %addr) {
+; CHECK-LABEL: st1_x2_v16i8:
+; CHECK: st1.16b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, i8* %addr)
+ ret void
+}
+
+define void @st1_x2_v8i16(<8 x i16> %A, <8 x i16> %B, i16* %addr) {
+; CHECK-LABEL: st1_x2_v8i16:
+; CHECK: st1.8h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, i16* %addr)
+ ret void
+}
+
+define void @st1_x2_v4i32(<4 x i32> %A, <4 x i32> %B, i32* %addr) {
+; CHECK-LABEL: st1_x2_v4i32:
+; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, i32* %addr)
+ ret void
+}
+
+define void @st1_x2_v4f32(<4 x float> %A, <4 x float> %B, float* %addr) {
+; CHECK-LABEL: st1_x2_v4f32:
+; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %A, <4 x float> %B, float* %addr)
+ ret void
+}
+
+define void @st1_x2_v2i64(<2 x i64> %A, <2 x i64> %B, i64* %addr) {
+; CHECK-LABEL: st1_x2_v2i64:
+; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64* %addr)
+ ret void
+}
+
+define void @st1_x2_v2f64(<2 x double> %A, <2 x double> %B, double* %addr) {
+; CHECK-LABEL: st1_x2_v2f64:
+; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double> %A, <2 x double> %B, double* %addr)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, float*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, double*) nounwind readonly
+
+define void @st1_x3_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %addr) {
+; CHECK-LABEL: st1_x3_v8i8:
+; CHECK: st1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %addr)
+ ret void
+}
+
+define void @st1_x3_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %addr) {
+; CHECK-LABEL: st1_x3_v4i16:
+; CHECK: st1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %addr)
+ ret void
+}
+
+define void @st1_x3_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %addr) {
+; CHECK-LABEL: st1_x3_v2i32:
+; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %addr)
+ ret void
+}
+
+define void @st1_x3_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, float* %addr) {
+; CHECK-LABEL: st1_x3_v2f32:
+; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, float* %addr)
+ ret void
+}
+
+define void @st1_x3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %addr) {
+; CHECK-LABEL: st1_x3_v1i64:
+; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %addr)
+ ret void
+}
+
+define void @st1_x3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, double* %addr) {
+; CHECK-LABEL: st1_x3_v1f64:
+; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, double* %addr)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, double*) nounwind readonly
+
+define void @st1_x3_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %addr) {
+; CHECK-LABEL: st1_x3_v16i8:
+; CHECK: st1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %addr)
+ ret void
+}
+
+define void @st1_x3_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %addr) {
+; CHECK-LABEL: st1_x3_v8i16:
+; CHECK: st1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %addr)
+ ret void
+}
+
+define void @st1_x3_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %addr) {
+; CHECK-LABEL: st1_x3_v4i32:
+; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %addr)
+ ret void
+}
+
+define void @st1_x3_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, float* %addr) {
+; CHECK-LABEL: st1_x3_v4f32:
+; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, float* %addr)
+ ret void
+}
+
+define void @st1_x3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %addr) {
+; CHECK-LABEL: st1_x3_v2i64:
+; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %addr)
+ ret void
+}
+
+define void @st1_x3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, double* %addr) {
+; CHECK-LABEL: st1_x3_v2f64:
+; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, double* %addr)
+ ret void
+}
+
+
+declare void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, float*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, double*) nounwind readonly
+
+define void @st1_x4_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %addr) {
+; CHECK-LABEL: st1_x4_v8i8:
+; CHECK: st1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %addr)
+ ret void
+}
+
+define void @st1_x4_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %addr) {
+; CHECK-LABEL: st1_x4_v4i16:
+; CHECK: st1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %addr)
+ ret void
+}
+
+define void @st1_x4_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %addr) {
+; CHECK-LABEL: st1_x4_v2i32:
+; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %addr)
+ ret void
+}
+
+define void @st1_x4_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, float* %addr) {
+; CHECK-LABEL: st1_x4_v2f32:
+; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, float* %addr)
+ ret void
+}
+
+define void @st1_x4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %addr) {
+; CHECK-LABEL: st1_x4_v1i64:
+; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %addr)
+ ret void
+}
+
+define void @st1_x4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, double* %addr) {
+; CHECK-LABEL: st1_x4_v1f64:
+; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, double* %addr)
+ ret void
+}
+
+declare void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, <2 x double>, double*) nounwind readonly
+
+define void @st1_x4_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %addr) {
+; CHECK-LABEL: st1_x4_v16i8:
+; CHECK: st1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %addr)
+ ret void
+}
+
+define void @st1_x4_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %addr) {
+; CHECK-LABEL: st1_x4_v8i16:
+; CHECK: st1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %addr)
+ ret void
+}
+
+define void @st1_x4_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %addr) {
+; CHECK-LABEL: st1_x4_v4i32:
+; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %addr)
+ ret void
+}
+
+define void @st1_x4_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, float* %addr) {
+; CHECK-LABEL: st1_x4_v4f32:
+; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, float* %addr)
+ ret void
+}
+
+define void @st1_x4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %addr) {
+; CHECK-LABEL: st1_x4_v2i64:
+; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %addr)
+ ret void
+}
+
+define void @st1_x4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, double* %addr) {
+; CHECK-LABEL: st1_x4_v2f64:
+; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
+ call void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, double* %addr)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-stack-no-frame.ll b/test/CodeGen/AArch64/arm64-stack-no-frame.ll
new file mode 100644
index 000000000000..b5970c00ff94
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-stack-no-frame.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s
+
+@global = global [20 x i64] zeroinitializer, align 8
+
+; The following function has enough locals to need some restoring, but not a
+; frame record. In an intermediate frame refactoring, prologue and epilogue were
+; inconsistent about how much to move SP.
+define void @test_stack_no_frame() {
+; CHECK: test_stack_no_frame
+; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
+ %local = alloca [20 x i64]
+ %val = load volatile [20 x i64]* @global, align 8
+ store volatile [20 x i64] %val, [20 x i64]* %local, align 8
+
+ %val2 = load volatile [20 x i64]* %local, align 8
+ store volatile [20 x i64] %val2, [20 x i64]* @global, align 8
+
+; CHECK: add sp, sp, #[[STACKSIZE]]
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-stackmap.ll b/test/CodeGen/AArch64/arm64-stackmap.ll
new file mode 100644
index 000000000000..2c7c6ae5d6d5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-stackmap.ll
@@ -0,0 +1,288 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
+;
+; Note: Print verbose stackmaps using -debug-only=stackmaps.
+
+; We are not getting the correct stack alignment when cross compiling for arm64.
+; So specify a datalayout here.
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 11
+; Num LargeConstants
+; CHECK-NEXT: .long 2
+; Num Callsites
+; CHECK-NEXT: .long 11
+
+; Functions and stack size
+; CHECK-NEXT: .quad _constantargs
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _osrinline
+; CHECK-NEXT: .quad 32
+; CHECK-NEXT: .quad _osrcold
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _propertyRead
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _propertyWrite
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _jsVoidCall
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _jsIntCall
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _spilledValue
+; CHECK-NEXT: .quad 160
+; CHECK-NEXT: .quad _spilledStackMapValue
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad _liveConstant
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad _clobberLR
+; CHECK-NEXT: .quad 112
+
+; Num LargeConstants
+; CHECK-NEXT: .quad 4294967295
+; CHECK-NEXT: .quad 4294967296
+
+; Constant arguments
+;
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .long L{{.*}}-_constantargs
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 4
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 65535
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 65536
+; SmallConstant
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+; LargeConstant at index 0
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 1
+
+define void @constantargs() {
+entry:
+ %0 = inttoptr i64 244837814094590 to i8*
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 20, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296)
+ ret void
+}
+
+; Inline OSR Exit
+;
+; CHECK-LABEL: .long L{{.*}}-_osrinline
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define void @osrinline(i64 %a, i64 %b) {
+entry:
+ ; Runtime void->void call.
+ call void inttoptr (i64 244837814094590 to void ()*)()
+ ; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
+ ret void
+}
+
+; Cold OSR Exit
+;
+; 2 live variables in register.
+;
+; CHECK-LABEL: .long L{{.*}}-_osrcold
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define void @osrcold(i64 %a, i64 %b) {
+entry:
+ %test = icmp slt i64 %a, %b
+ br i1 %test, label %ret, label %cold
+cold:
+ ; OSR patchpoint with 12-byte nop-slide and 2 live vars.
+ %thunk = inttoptr i64 244837814094590 to i8*
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4, i32 20, i8* %thunk, i32 0, i64 %a, i64 %b)
+ unreachable
+ret:
+ ret void
+}
+
+; Property Read
+; CHECK-LABEL: .long L{{.*}}-_propertyRead
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 0
+;
+; FIXME: There are currently no stackmap entries. After moving to
+; AnyRegCC, we will have entries for the object and return value.
+define i64 @propertyRead(i64* %obj) {
+entry:
+ %resolveRead = inttoptr i64 244837814094590 to i8*
+ %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveRead, i32 1, i64* %obj)
+ %add = add i64 %result, 3
+ ret i64 %add
+}
+
+; Property Write
+; CHECK-LABEL: .long L{{.*}}-_propertyWrite
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
+entry:
+ %resolveWrite = inttoptr i64 244837814094590 to i8*
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+ ret void
+}
+
+; Void JS Call
+;
+; 2 live variables in registers.
+;
+; CHECK-LABEL: .long L{{.*}}-_jsVoidCall
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+entry:
+ %resolveCall = inttoptr i64 244837814094590 to i8*
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 7, i32 20, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ ret void
+}
+
+; i64 JS Call
+;
+; 2 live variables in registers.
+;
+; CHECK-LABEL: .long L{{.*}}-_jsIntCall
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+entry:
+ %resolveCall = inttoptr i64 244837814094590 to i8*
+ %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 8, i32 20, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %add = add i64 %result, 3
+ ret i64 %add
+}
+
+; Spilled stack map values.
+;
+; Verify 28 stack map entries.
+;
+; CHECK-LABEL: .long L{{.*}}-_spilledValue
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 28
+;
+; Check that at least one is a spilled entry from RBP.
+; Location: Indirect FP + ...
+; CHECK: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 29
+define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) {
+entry:
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 11, i32 20, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
+ ret void
+}
+
+; Spilled stack map values.
+;
+; Verify 23 stack map entries.
+;
+; CHECK-LABEL: .long L{{.*}}-_spilledStackMapValue
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 30
+;
+; Check that at least one is a spilled entry from RBP.
+; Location: Indirect FP + ...
+; CHECK: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 29
+define webkit_jscc void @spilledStackMapValue(i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29) {
+entry:
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 12, i32 16, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29)
+ ret void
+}
+
+
+; Map a constant value.
+;
+; CHECK-LABEL: .long L{{.*}}-_liveConstant
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 33
+
+define void @liveConstant() {
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 8, i32 33)
+ ret void
+}
+
+; Map a value when LR is the only free register.
+;
+; CHECK-LABEL: .long L{{.*}}-_clobberLR
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Indirect FP (r29) - offset
+; CHECK-NEXT: .byte 3
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .short 29
+; CHECK-NEXT: .long -{{[0-9]+}}
+define void @clobberLR(i32 %a) {
+ tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x31}"() nounwind
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 8, i32 %a)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/AArch64/arm64-stackpointer.ll b/test/CodeGen/AArch64/arm64-stackpointer.ll
new file mode 100644
index 000000000000..581faf130f10
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-stackpointer.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s
+
+define i64 @get_stack() nounwind {
+entry:
+; CHECK-LABEL: get_stack:
+; CHECK: mov x0, sp
+ %sp = call i64 @llvm.read_register.i64(metadata !0)
+ ret i64 %sp
+}
+
+define void @set_stack(i64 %val) nounwind {
+entry:
+; CHECK-LABEL: set_stack:
+; CHECK: mov sp, x0
+ call void @llvm.write_register.i64(metadata !0, i64 %val)
+ ret void
+}
+
+declare i64 @llvm.read_register.i64(metadata) nounwind
+declare void @llvm.write_register.i64(metadata, i64) nounwind
+
+; register unsigned long current_stack_pointer asm("sp");
+; CHECK-NOT: .asciz "sp"
+!0 = metadata !{metadata !"sp\00"}
diff --git a/test/CodeGen/AArch64/arm64-stacksave.ll b/test/CodeGen/AArch64/arm64-stacksave.ll
new file mode 100644
index 000000000000..a79e99ba3234
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-stacksave.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -verify-coalescing
+; <rdar://problem/11522048>
+target triple = "arm64-apple-macosx10.8.0"
+
+; Verify that we can handle spilling the stack pointer without attempting
+; spilling it directly.
+; CHECK: f
+; CHECK: mov [[X0:x[0-9]+]], sp
+; CHECK: str [[X0]]
+; CHECK: inlineasm
+define void @f() nounwind ssp {
+entry:
+ %savedstack = call i8* @llvm.stacksave() nounwind
+ call void asm sideeffect "; inlineasm", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{sp},~{memory}"() nounwind
+ call void @llvm.stackrestore(i8* %savedstack) nounwind
+ ret void
+}
+
+declare i8* @llvm.stacksave() nounwind
+declare void @llvm.stackrestore(i8*) nounwind
diff --git a/test/CodeGen/AArch64/arm64-stp.ll b/test/CodeGen/AArch64/arm64-stp.ll
new file mode 100644
index 000000000000..40bdf22c995c
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-stp.ll
@@ -0,0 +1,101 @@
+; RUN: llc < %s -march=arm64 -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s
+; RUN: llc < %s -march=arm64 -aarch64-unscaled-mem-op=true\
+; RUN: -verify-machineinstrs -mcpu=cyclone | FileCheck -check-prefix=STUR_CHK %s
+
+; CHECK: stp_int
+; CHECK: stp w0, w1, [x2]
+define void @stp_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
+ store i32 %a, i32* %p, align 4
+ %add.ptr = getelementptr inbounds i32* %p, i64 1
+ store i32 %b, i32* %add.ptr, align 4
+ ret void
+}
+
+; CHECK: stp_long
+; CHECK: stp x0, x1, [x2]
+define void @stp_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
+ store i64 %a, i64* %p, align 8
+ %add.ptr = getelementptr inbounds i64* %p, i64 1
+ store i64 %b, i64* %add.ptr, align 8
+ ret void
+}
+
+; CHECK: stp_float
+; CHECK: stp s0, s1, [x0]
+define void @stp_float(float %a, float %b, float* nocapture %p) nounwind {
+ store float %a, float* %p, align 4
+ %add.ptr = getelementptr inbounds float* %p, i64 1
+ store float %b, float* %add.ptr, align 4
+ ret void
+}
+
+; CHECK: stp_double
+; CHECK: stp d0, d1, [x0]
+define void @stp_double(double %a, double %b, double* nocapture %p) nounwind {
+ store double %a, double* %p, align 8
+ %add.ptr = getelementptr inbounds double* %p, i64 1
+ store double %b, double* %add.ptr, align 8
+ ret void
+}
+
+; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
+define void @stur_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
+; STUR_CHK: stur_int
+; STUR_CHK: stp w{{[0-9]+}}, {{w[0-9]+}}, [x{{[0-9]+}}, #-8]
+; STUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i32* %p, i32 -1
+ store i32 %a, i32* %p1, align 2
+ %p2 = getelementptr inbounds i32* %p, i32 -2
+ store i32 %b, i32* %p2, align 2
+ ret void
+}
+
+define void @stur_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
+; STUR_CHK: stur_long
+; STUR_CHK: stp x{{[0-9]+}}, {{x[0-9]+}}, [x{{[0-9]+}}, #-16]
+; STUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i64* %p, i32 -1
+ store i64 %a, i64* %p1, align 2
+ %p2 = getelementptr inbounds i64* %p, i32 -2
+ store i64 %b, i64* %p2, align 2
+ ret void
+}
+
+define void @stur_float(float %a, float %b, float* nocapture %p) nounwind {
+; STUR_CHK: stur_float
+; STUR_CHK: stp s{{[0-9]+}}, {{s[0-9]+}}, [x{{[0-9]+}}, #-8]
+; STUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds float* %p, i32 -1
+ store float %a, float* %p1, align 2
+ %p2 = getelementptr inbounds float* %p, i32 -2
+ store float %b, float* %p2, align 2
+ ret void
+}
+
+define void @stur_double(double %a, double %b, double* nocapture %p) nounwind {
+; STUR_CHK: stur_double
+; STUR_CHK: stp d{{[0-9]+}}, {{d[0-9]+}}, [x{{[0-9]+}}, #-16]
+; STUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds double* %p, i32 -1
+ store double %a, double* %p1, align 2
+ %p2 = getelementptr inbounds double* %p, i32 -2
+ store double %b, double* %p2, align 2
+ ret void
+}
+
+define void @splat_v4i32(i32 %v, i32 *%p) {
+entry:
+
+; CHECK-LABEL: splat_v4i32
+; CHECK-DAG: stp w0, w0, [x1]
+; CHECK-DAG: stp w0, w0, [x1, #8]
+; CHECK: ret
+
+ %p17 = insertelement <4 x i32> undef, i32 %v, i32 0
+ %p18 = insertelement <4 x i32> %p17, i32 %v, i32 1
+ %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2
+ %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3
+ %p21 = bitcast i32* %p to <4 x i32>*
+ store <4 x i32> %p20, <4 x i32>* %p21, align 4
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-strict-align.ll b/test/CodeGen/AArch64/arm64-strict-align.ll
new file mode 100644
index 000000000000..5d137043a691
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-strict-align.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-apple-darwin -aarch64-no-strict-align | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-apple-darwin -aarch64-strict-align | FileCheck %s --check-prefix=CHECK-STRICT
+
+define i32 @f0(i32* nocapture %p) nounwind {
+; CHECK-STRICT: ldrh [[HIGH:w[0-9]+]], [x0, #2]
+; CHECK-STRICT: ldrh [[LOW:w[0-9]+]], [x0]
+; CHECK-STRICT: bfi [[LOW]], [[HIGH]], #16, #16
+; CHECK-STRICT: ret
+
+; CHECK: ldr w0, [x0]
+; CHECK: ret
+ %tmp = load i32* %p, align 2
+ ret i32 %tmp
+}
+
+define i64 @f1(i64* nocapture %p) nounwind {
+; CHECK-STRICT: ldp w[[LOW:[0-9]+]], w[[HIGH:[0-9]+]], [x0]
+; CHECK-STRICT: bfi x[[LOW]], x[[HIGH]], #32, #32
+; CHECK-STRICT: ret
+
+; CHECK: ldr x0, [x0]
+; CHECK: ret
+ %tmp = load i64* %p, align 4
+ ret i64 %tmp
+}
diff --git a/test/CodeGen/AArch64/arm64-stur.ll b/test/CodeGen/AArch64/arm64-stur.ll
new file mode 100644
index 000000000000..a2e684dc9528
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-stur.ll
@@ -0,0 +1,98 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s
+%struct.X = type <{ i32, i64, i64 }>
+
+define void @foo1(i32* %p, i64 %val) nounwind {
+; CHECK-LABEL: foo1:
+; CHECK: stur w1, [x0, #-4]
+; CHECK-NEXT: ret
+ %tmp1 = trunc i64 %val to i32
+ %ptr = getelementptr inbounds i32* %p, i64 -1
+ store i32 %tmp1, i32* %ptr, align 4
+ ret void
+}
+define void @foo2(i16* %p, i64 %val) nounwind {
+; CHECK-LABEL: foo2:
+; CHECK: sturh w1, [x0, #-2]
+; CHECK-NEXT: ret
+ %tmp1 = trunc i64 %val to i16
+ %ptr = getelementptr inbounds i16* %p, i64 -1
+ store i16 %tmp1, i16* %ptr, align 2
+ ret void
+}
+define void @foo3(i8* %p, i64 %val) nounwind {
+; CHECK-LABEL: foo3:
+; CHECK: sturb w1, [x0, #-1]
+; CHECK-NEXT: ret
+ %tmp1 = trunc i64 %val to i8
+ %ptr = getelementptr inbounds i8* %p, i64 -1
+ store i8 %tmp1, i8* %ptr, align 1
+ ret void
+}
+define void @foo4(i16* %p, i32 %val) nounwind {
+; CHECK-LABEL: foo4:
+; CHECK: sturh w1, [x0, #-2]
+; CHECK-NEXT: ret
+ %tmp1 = trunc i32 %val to i16
+ %ptr = getelementptr inbounds i16* %p, i32 -1
+ store i16 %tmp1, i16* %ptr, align 2
+ ret void
+}
+define void @foo5(i8* %p, i32 %val) nounwind {
+; CHECK-LABEL: foo5:
+; CHECK: sturb w1, [x0, #-1]
+; CHECK-NEXT: ret
+ %tmp1 = trunc i32 %val to i8
+ %ptr = getelementptr inbounds i8* %p, i32 -1
+ store i8 %tmp1, i8* %ptr, align 1
+ ret void
+}
+
+define void @foo(%struct.X* nocapture %p) nounwind optsize ssp {
+; CHECK-LABEL: foo:
+; CHECK-NOT: str
+; CHECK: stur xzr, [x0, #12]
+; CHECK-NEXT: stur xzr, [x0, #4]
+; CHECK-NEXT: ret
+ %B = getelementptr inbounds %struct.X* %p, i64 0, i32 1
+ %val = bitcast i64* %B to i8*
+ call void @llvm.memset.p0i8.i64(i8* %val, i8 0, i64 16, i32 1, i1 false)
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+
+; Unaligned 16b stores are split into 8b stores for performance.
+; radar://15424193
+
+; CHECK-LABEL: unaligned:
+; CHECK-NOT: str q0
+; CHECK: str d[[REG:[0-9]+]], [x0]
+; CHECK: ext.16b v[[REG2:[0-9]+]], v[[REG]], v[[REG]], #8
+; CHECK: str d[[REG2]], [x0, #8]
+define void @unaligned(<4 x i32>* %p, <4 x i32> %v) nounwind {
+ store <4 x i32> %v, <4 x i32>* %p, align 4
+ ret void
+}
+
+; CHECK-LABEL: aligned:
+; CHECK: str q0
+define void @aligned(<4 x i32>* %p, <4 x i32> %v) nounwind {
+ store <4 x i32> %v, <4 x i32>* %p
+ ret void
+}
+
+; Don't split one and two byte aligned stores.
+; radar://16349308
+
+; CHECK-LABEL: twobytealign:
+; CHECK: str q0
+define void @twobytealign(<4 x i32>* %p, <4 x i32> %v) nounwind {
+ store <4 x i32> %v, <4 x i32>* %p, align 2
+ ret void
+}
+; CHECK-LABEL: onebytealign:
+; CHECK: str q0
+define void @onebytealign(<4 x i32>* %p, <4 x i32> %v) nounwind {
+ store <4 x i32> %v, <4 x i32>* %p, align 1
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-subsections.ll b/test/CodeGen/AArch64/arm64-subsections.ll
new file mode 100644
index 000000000000..316e7c3a8ebd
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-subsections.ll
@@ -0,0 +1,5 @@
+; RUN: llc -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s --check-prefix=CHECK-MACHO
+; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s --check-prefix=CHECK-ELF
+
+; CHECK-MACHO: .subsections_via_symbols
+; CHECK-ELF-NOT: .subsections_via_symbols \ No newline at end of file
diff --git a/test/CodeGen/AArch64/arm64-subvector-extend.ll b/test/CodeGen/AArch64/arm64-subvector-extend.ll
new file mode 100644
index 000000000000..d5a178a9e656
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-subvector-extend.ll
@@ -0,0 +1,141 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
+
+; Test efficient codegen of vector extends up from legal type to 128 bit
+; and 256 bit vector types.
+
+;-----
+; Vectors of i16.
+;-----
+define <8 x i16> @func1(<8 x i8> %v0) nounwind {
+; CHECK-LABEL: func1:
+; CHECK-NEXT: ushll.8h v0, v0, #0
+; CHECK-NEXT: ret
+ %r = zext <8 x i8> %v0 to <8 x i16>
+ ret <8 x i16> %r
+}
+
+define <8 x i16> @func2(<8 x i8> %v0) nounwind {
+; CHECK-LABEL: func2:
+; CHECK-NEXT: sshll.8h v0, v0, #0
+; CHECK-NEXT: ret
+ %r = sext <8 x i8> %v0 to <8 x i16>
+ ret <8 x i16> %r
+}
+
+define <16 x i16> @func3(<16 x i8> %v0) nounwind {
+; CHECK-LABEL: func3:
+; CHECK-NEXT: ushll2.8h v1, v0, #0
+; CHECK-NEXT: ushll.8h v0, v0, #0
+; CHECK-NEXT: ret
+ %r = zext <16 x i8> %v0 to <16 x i16>
+ ret <16 x i16> %r
+}
+
+define <16 x i16> @func4(<16 x i8> %v0) nounwind {
+; CHECK-LABEL: func4:
+; CHECK-NEXT: sshll2.8h v1, v0, #0
+; CHECK-NEXT: sshll.8h v0, v0, #0
+; CHECK-NEXT: ret
+ %r = sext <16 x i8> %v0 to <16 x i16>
+ ret <16 x i16> %r
+}
+
+;-----
+; Vectors of i32.
+;-----
+
+define <4 x i32> @afunc1(<4 x i16> %v0) nounwind {
+; CHECK-LABEL: afunc1:
+; CHECK-NEXT: ushll.4s v0, v0, #0
+; CHECK-NEXT: ret
+ %r = zext <4 x i16> %v0 to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @afunc2(<4 x i16> %v0) nounwind {
+; CHECK-LABEL: afunc2:
+; CHECK-NEXT: sshll.4s v0, v0, #0
+; CHECK-NEXT: ret
+ %r = sext <4 x i16> %v0 to <4 x i32>
+ ret <4 x i32> %r
+}
+
+define <8 x i32> @afunc3(<8 x i16> %v0) nounwind {
+; CHECK-LABEL: afunc3:
+; CHECK-NEXT: ushll2.4s v1, v0, #0
+; CHECK-NEXT: ushll.4s v0, v0, #0
+; CHECK-NEXT: ret
+ %r = zext <8 x i16> %v0 to <8 x i32>
+ ret <8 x i32> %r
+}
+
+define <8 x i32> @afunc4(<8 x i16> %v0) nounwind {
+; CHECK-LABEL: afunc4:
+; CHECK-NEXT: sshll2.4s v1, v0, #0
+; CHECK-NEXT: sshll.4s v0, v0, #0
+; CHECK-NEXT: ret
+ %r = sext <8 x i16> %v0 to <8 x i32>
+ ret <8 x i32> %r
+}
+
+define <8 x i32> @bfunc1(<8 x i8> %v0) nounwind {
+; CHECK-LABEL: bfunc1:
+; CHECK-NEXT: ushll.8h v0, v0, #0
+; CHECK-NEXT: ushll2.4s v1, v0, #0
+; CHECK-NEXT: ushll.4s v0, v0, #0
+; CHECK-NEXT: ret
+ %r = zext <8 x i8> %v0 to <8 x i32>
+ ret <8 x i32> %r
+}
+
+define <8 x i32> @bfunc2(<8 x i8> %v0) nounwind {
+; CHECK-LABEL: bfunc2:
+; CHECK-NEXT: sshll.8h v0, v0, #0
+; CHECK-NEXT: sshll2.4s v1, v0, #0
+; CHECK-NEXT: sshll.4s v0, v0, #0
+; CHECK-NEXT: ret
+ %r = sext <8 x i8> %v0 to <8 x i32>
+ ret <8 x i32> %r
+}
+
+;-----
+; Vectors of i64.
+;-----
+
+define <4 x i64> @zfunc1(<4 x i32> %v0) nounwind {
+; CHECK-LABEL: zfunc1:
+; CHECK-NEXT: ushll2.2d v1, v0, #0
+; CHECK-NEXT: ushll.2d v0, v0, #0
+; CHECK-NEXT: ret
+ %r = zext <4 x i32> %v0 to <4 x i64>
+ ret <4 x i64> %r
+}
+
+define <4 x i64> @zfunc2(<4 x i32> %v0) nounwind {
+; CHECK-LABEL: zfunc2:
+; CHECK-NEXT: sshll2.2d v1, v0, #0
+; CHECK-NEXT: sshll.2d v0, v0, #0
+; CHECK-NEXT: ret
+ %r = sext <4 x i32> %v0 to <4 x i64>
+ ret <4 x i64> %r
+}
+
+define <4 x i64> @bfunc3(<4 x i16> %v0) nounwind {
+; CHECK-LABEL: func3:
+; CHECK-NEXT: ushll.4s v0, v0, #0
+; CHECK-NEXT: ushll2.2d v1, v0, #0
+; CHECK-NEXT: ushll.2d v0, v0, #0
+; CHECK-NEXT: ret
+ %r = zext <4 x i16> %v0 to <4 x i64>
+ ret <4 x i64> %r
+}
+
+define <4 x i64> @cfunc4(<4 x i16> %v0) nounwind {
+; CHECK-LABEL: func4:
+; CHECK-NEXT: sshll.4s v0, v0, #0
+; CHECK-NEXT: sshll2.2d v1, v0, #0
+; CHECK-NEXT: sshll.2d v0, v0, #0
+; CHECK-NEXT: ret
+ %r = sext <4 x i16> %v0 to <4 x i64>
+ ret <4 x i64> %r
+}
diff --git a/test/CodeGen/AArch64/arm64-swizzle-tbl-i16-layout.ll b/test/CodeGen/AArch64/arm64-swizzle-tbl-i16-layout.ll
new file mode 100644
index 000000000000..4ab2bee0ed16
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-swizzle-tbl-i16-layout.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+; rdar://13214163 - Make sure we generate a correct lookup table for the TBL
+; instruction when the element size of the vector is not 8 bits. We were
+; getting both the endianness wrong and the element indexing wrong.
+define <8 x i16> @foo(<8 x i16> %a) nounwind readnone {
+; CHECK: .section __TEXT,__literal16,16byte_literals
+; CHECK: .align 4
+; CHECK:lCPI0_0:
+; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 1 ; 0x1
+; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 1 ; 0x1
+; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 1 ; 0x1
+; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 1 ; 0x1
+; CHECK: .byte 8 ; 0x8
+; CHECK: .byte 9 ; 0x9
+; CHECK: .byte 8 ; 0x8
+; CHECK: .byte 9 ; 0x9
+; CHECK: .byte 8 ; 0x8
+; CHECK: .byte 9 ; 0x9
+; CHECK: .byte 8 ; 0x8
+; CHECK: .byte 9 ; 0x9
+; CHECK: .section __TEXT,__text,regular,pure_instructions
+; CHECK: .globl _foo
+; CHECK: .align 2
+; CHECK:_foo: ; @foo
+; CHECK: adrp [[BASE:x[0-9]+]], lCPI0_0@PAGE
+; CHECK: ldr q[[REG:[0-9]+]], {{\[}}[[BASE]], lCPI0_0@PAGEOFF]
+; CHECK: tbl.16b v0, { v0 }, v[[REG]]
+; CHECK: ret
+
+ %val = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %val
+}
diff --git a/test/CodeGen/AArch64/arm64-tbl.ll b/test/CodeGen/AArch64/arm64-tbl.ll
new file mode 100644
index 000000000000..b1ce15a1e19a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-tbl.ll
@@ -0,0 +1,132 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @tbl1_8b(<16 x i8> %A, <8 x i8> %B) nounwind {
+; CHECK: tbl1_8b
+; CHECK: tbl.8b
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %A, <8 x i8> %B)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @tbl1_16b(<16 x i8> %A, <16 x i8> %B) nounwind {
+; CHECK: tbl1_16b
+; CHECK: tbl.16b
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %A, <16 x i8> %B)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @tbl2_8b(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) {
+; CHECK: tbl2_8b
+; CHECK: tbl.8b
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @tbl2_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
+; CHECK: tbl2_16b
+; CHECK: tbl.16b
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @tbl3_8b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i8> %D) {
+; CHECK: tbl3_8b
+; CHECK: tbl.8b
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i8> %D)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @tbl3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) {
+; CHECK: tbl3_16b
+; CHECK: tbl.16b
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @tbl4_8b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <8 x i8> %E) {
+; CHECK: tbl4_8b
+; CHECK: tbl.8b
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <8 x i8> %E)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @tbl4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) {
+; CHECK: tbl4_16b
+; CHECK: tbl.16b
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E)
+ ret <16 x i8> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i8> @tbx1_8b(<8 x i8> %A, <16 x i8> %B, <8 x i8> %C) nounwind {
+; CHECK: tbx1_8b
+; CHECK: tbx.8b
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> %A, <16 x i8> %B, <8 x i8> %C)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @tbx1_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) nounwind {
+; CHECK: tbx1_16b
+; CHECK: tbx.16b
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @tbx2_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i8> %D) {
+; CHECK: tbx2_8b
+; CHECK: tbx.8b
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx2.v8i8(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i8> %D)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @tbx2_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) {
+; CHECK: tbx2_16b
+; CHECK: tbx.16b
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx2.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @tbx3_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <8 x i8> %E) {
+; CHECK: tbx3_8b
+; CHECK: tbx.8b
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx3.v8i8(< 8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <8 x i8> %E)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @tbx3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) {
+; CHECK: tbx3_16b
+; CHECK: tbx.16b
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx3.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @tbx4_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, <8 x i8> %F) {
+; CHECK: tbx4_8b
+; CHECK: tbx.8b
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbx4.v8i8(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, <8 x i8> %F)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @tbx4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, <16 x i8> %F) {
+; CHECK: tbx4_16b
+; CHECK: tbx.16b
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx4.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, <16 x i8> %F)
+ ret <16 x i8> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8>, <16 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.tbx2.v8i8(<8 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.tbx2.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.tbx3.v8i8(<8 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.tbx3.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.tbx4.v8i8(<8 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.tbx4.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+
diff --git a/test/CodeGen/AArch64/arm64-this-return.ll b/test/CodeGen/AArch64/arm64-this-return.ll
new file mode 100644
index 000000000000..30f5b9b064a3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-this-return.ll
@@ -0,0 +1,83 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+%struct.A = type { i8 }
+%struct.B = type { i32 }
+%struct.C = type { %struct.B }
+%struct.D = type { %struct.B }
+%struct.E = type { %struct.B, %struct.B }
+
+declare %struct.A* @A_ctor_base(%struct.A* returned)
+declare %struct.B* @B_ctor_base(%struct.B* returned, i32)
+declare %struct.B* @B_ctor_complete(%struct.B* returned, i32)
+
+declare %struct.A* @A_ctor_base_nothisret(%struct.A*)
+declare %struct.B* @B_ctor_base_nothisret(%struct.B*, i32)
+declare %struct.B* @B_ctor_complete_nothisret(%struct.B*, i32)
+
+define %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x) {
+entry:
+; CHECK-LABEL: C_ctor_base:
+; CHECK-NOT: mov {{x[0-9]+}}, x0
+; CHECK: bl {{_?A_ctor_base}}
+; CHECK-NOT: mov x0, {{x[0-9]+}}
+; CHECK: b {{_?B_ctor_base}}
+ %0 = bitcast %struct.C* %this to %struct.A*
+ %call = tail call %struct.A* @A_ctor_base(%struct.A* %0)
+ %1 = getelementptr inbounds %struct.C* %this, i32 0, i32 0
+ %call2 = tail call %struct.B* @B_ctor_base(%struct.B* %1, i32 %x)
+ ret %struct.C* %this
+}
+
+define %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x) {
+entry:
+; CHECK-LABEL: C_ctor_base_nothisret:
+; CHECK: mov [[SAVETHIS:x[0-9]+]], x0
+; CHECK: bl {{_?A_ctor_base_nothisret}}
+; CHECK: mov x0, [[SAVETHIS]]
+; CHECK-NOT: b {{_?B_ctor_base_nothisret}}
+ %0 = bitcast %struct.C* %this to %struct.A*
+ %call = tail call %struct.A* @A_ctor_base_nothisret(%struct.A* %0)
+ %1 = getelementptr inbounds %struct.C* %this, i32 0, i32 0
+ %call2 = tail call %struct.B* @B_ctor_base_nothisret(%struct.B* %1, i32 %x)
+ ret %struct.C* %this
+}
+
+define %struct.C* @C_ctor_complete(%struct.C* %this, i32 %x) {
+entry:
+; CHECK-LABEL: C_ctor_complete:
+; CHECK: b {{_?C_ctor_base}}
+ %call = tail call %struct.C* @C_ctor_base(%struct.C* %this, i32 %x)
+ ret %struct.C* %this
+}
+
+define %struct.C* @C_ctor_complete_nothisret(%struct.C* %this, i32 %x) {
+entry:
+; CHECK-LABEL: C_ctor_complete_nothisret:
+; CHECK-NOT: b {{_?C_ctor_base_nothisret}}
+ %call = tail call %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x)
+ ret %struct.C* %this
+}
+
+define %struct.D* @D_ctor_base(%struct.D* %this, i32 %x) {
+entry:
+; CHECK-LABEL: D_ctor_base:
+; CHECK-NOT: mov {{x[0-9]+}}, x0
+; CHECK: bl {{_?B_ctor_complete}}
+; CHECK-NOT: mov x0, {{x[0-9]+}}
+; CHECK: b {{_?B_ctor_complete}}
+ %b = getelementptr inbounds %struct.D* %this, i32 0, i32 0
+ %call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
+ %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
+ ret %struct.D* %this
+}
+
+define %struct.E* @E_ctor_base(%struct.E* %this, i32 %x) {
+entry:
+; CHECK-LABEL: E_ctor_base:
+; CHECK-NOT: b {{_?B_ctor_complete}}
+ %b = getelementptr inbounds %struct.E* %this, i32 0, i32 0
+ %call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
+ %b2 = getelementptr inbounds %struct.E* %this, i32 0, i32 1
+ %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b2, i32 %x)
+ ret %struct.E* %this
+}
diff --git a/test/CodeGen/AArch64/arm64-tls-darwin.ll b/test/CodeGen/AArch64/arm64-tls-darwin.ll
new file mode 100644
index 000000000000..5e8ec33ba417
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-tls-darwin.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=arm64-apple-ios7.0 %s -o - | FileCheck %s
+
+@var = thread_local global i8 0
+
+; N.b. x0 must be the result of the first load (i.e. the address of the
+; descriptor) when tlv_get_addr is called. Likewise the result is returned in
+; x0.
+define i8 @get_var() {
+; CHECK-LABEL: get_var:
+; CHECK: adrp x[[TLVPDESC_SLOT_HI:[0-9]+]], _var@TLVPPAGE
+; CHECK: ldr x0, [x[[TLVPDESC_SLOT_HI]], _var@TLVPPAGEOFF]
+; CHECK: ldr [[TLV_GET_ADDR:x[0-9]+]], [x0]
+; CHECK: blr [[TLV_GET_ADDR]]
+; CHECK: ldrb w0, [x0]
+
+ %val = load i8* @var, align 1
+ ret i8 %val
+}
diff --git a/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll b/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
new file mode 100644
index 000000000000..3daae625c84a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
@@ -0,0 +1,18 @@
+; RUN: llc -O0 -mtriple=arm64-none-linux-gnu -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s
+
+; If the .tlsdesccall and blr parts are emitted completely separately (even with
+; glue) then LLVM will separate them quite happily (with a spill at O0, hence
+; the option). This is definitely wrong, so we make sure they are emitted
+; together.
+
+@general_dynamic_var = external thread_local global i32
+
+define i32 @test_generaldynamic() {
+; CHECK-LABEL: test_generaldynamic:
+
+ %val = load i32* @general_dynamic_var
+ ret i32 %val
+
+; CHECK: .tlsdesccall general_dynamic_var
+; CHECK-NEXT: blr {{x[0-9]+}}
+}
diff --git a/test/CodeGen/AArch64/arm64-tls-dynamics.ll b/test/CodeGen/AArch64/arm64-tls-dynamics.ll
new file mode 100644
index 000000000000..e8a83fd7db3a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-tls-dynamics.ll
@@ -0,0 +1,135 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
+
+@general_dynamic_var = external thread_local global i32
+
+define i32 @test_generaldynamic() {
+; CHECK-LABEL: test_generaldynamic:
+
+ %val = load i32* @general_dynamic_var
+ ret i32 %val
+
+ ; FIXME: the adrp instructions are redundant (if harmless).
+; CHECK: adrp [[TLSDESC_HI:x[0-9]+]], :tlsdesc:general_dynamic_var
+; CHECK: add x0, [[TLSDESC_HI]], :tlsdesc_lo12:general_dynamic_var
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], :tlsdesc_lo12:general_dynamic_var]
+; CHECK: .tlsdesccall general_dynamic_var
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK: mrs x[[TP:[0-9]+]], TPIDR_EL0
+; CHECK: ldr w0, [x[[TP]], x0]
+
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
+
+}
+
+define i32* @test_generaldynamic_addr() {
+; CHECK-LABEL: test_generaldynamic_addr:
+
+ ret i32* @general_dynamic_var
+
+ ; FIXME: the adrp instructions are redundant (if harmless).
+; CHECK: adrp [[TLSDESC_HI:x[0-9]+]], :tlsdesc:general_dynamic_var
+; CHECK: add x0, [[TLSDESC_HI]], :tlsdesc_lo12:general_dynamic_var
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], :tlsdesc_lo12:general_dynamic_var]
+; CHECK: .tlsdesccall general_dynamic_var
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK: mrs [[TP:x[0-9]+]], TPIDR_EL0
+; CHECK: add x0, [[TP]], x0
+
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
+}
+
+@local_dynamic_var = external thread_local(localdynamic) global i32
+
+define i32 @test_localdynamic() {
+; CHECK-LABEL: test_localdynamic:
+
+ %val = load i32* @local_dynamic_var
+ ret i32 %val
+
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: add x0, x[[TLSDESC_HI]], :tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], :tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK: .tlsdesccall _TLS_MODULE_BASE_
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK: movz [[DTP_OFFSET:x[0-9]+]], #:dtprel_g1:local_dynamic_var
+; CHECK: movk [[DTP_OFFSET]], #:dtprel_g0_nc:local_dynamic_var
+
+; CHECK: add x[[TPREL:[0-9]+]], x0, [[DTP_OFFSET]]
+
+; CHECK: mrs x[[TPIDR:[0-9]+]], TPIDR_EL0
+
+; CHECK: ldr w0, [x[[TPIDR]], x[[TPREL]]]
+
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
+
+}
+
+define i32* @test_localdynamic_addr() {
+; CHECK-LABEL: test_localdynamic_addr:
+
+ ret i32* @local_dynamic_var
+
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: add x0, x[[TLSDESC_HI]], :tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], :tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK: .tlsdesccall _TLS_MODULE_BASE_
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK: movz [[DTP_OFFSET:x[0-9]+]], #:dtprel_g1:local_dynamic_var
+; CHECK: movk [[DTP_OFFSET]], #:dtprel_g0_nc:local_dynamic_var
+
+; CHECK: add [[TPREL:x[0-9]+]], x0, [[DTP_OFFSET]]
+
+; CHECK: mrs [[TPIDR:x[0-9]+]], TPIDR_EL0
+
+; CHECK: add x0, [[TPIDR]], [[TPREL]]
+
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
+
+}
+
+; The entire point of the local-dynamic access model is to have a single call to
+; the expensive resolver. Make sure we achieve that goal.
+
+@local_dynamic_var2 = external thread_local(localdynamic) global i32
+
+define i32 @test_localdynamic_deduplicate() {
+; CHECK-LABEL: test_localdynamic_deduplicate:
+
+ %val = load i32* @local_dynamic_var
+ %val2 = load i32* @local_dynamic_var2
+
+ %sum = add i32 %val, %val2
+ ret i32 %sum
+
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: add x0, x[[TLSDESC_HI]], :tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
+; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], :tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK: .tlsdesccall _TLS_MODULE_BASE_
+; CHECK-NEXT: blr [[CALLEE]]
+
+; CHECK-NOT: _TLS_MODULE_BASE_
+
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/arm64-tls-execs.ll b/test/CodeGen/AArch64/arm64-tls-execs.ll
new file mode 100644
index 000000000000..f0130d858896
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-tls-execs.ll
@@ -0,0 +1,63 @@
+; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -show-mc-encoding < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
+
+@initial_exec_var = external thread_local(initialexec) global i32
+
+define i32 @test_initial_exec() {
+; CHECK-LABEL: test_initial_exec:
+ %val = load i32* @initial_exec_var
+
+; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
+; CHECK: ldr x[[TP_OFFSET:[0-9]+]], [x[[GOTADDR]], :gottprel_lo12:initial_exec_var]
+; CHECK: mrs x[[TP:[0-9]+]], TPIDR_EL0
+; CHECK: ldr w0, [x[[TP]], x[[TP_OFFSET]]]
+
+; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
+; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
+
+ ret i32 %val
+}
+
+define i32* @test_initial_exec_addr() {
+; CHECK-LABEL: test_initial_exec_addr:
+ ret i32* @initial_exec_var
+
+; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
+; CHECK: ldr [[TP_OFFSET:x[0-9]+]], [x[[GOTADDR]], :gottprel_lo12:initial_exec_var]
+; CHECK: mrs [[TP:x[0-9]+]], TPIDR_EL0
+; CHECK: add x0, [[TP]], [[TP_OFFSET]]
+
+; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
+; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
+
+}
+
+@local_exec_var = thread_local(localexec) global i32 0
+
+define i32 @test_local_exec() {
+; CHECK-LABEL: test_local_exec:
+ %val = load i32* @local_exec_var
+
+; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var // encoding: [0bAAA{{[01]+}},A,0b101AAAAA,0x92]
+; CHECK: movk [[TP_OFFSET]], #:tprel_g0_nc:local_exec_var
+; CHECK: mrs x[[TP:[0-9]+]], TPIDR_EL0
+; CHECK: ldr w0, [x[[TP]], [[TP_OFFSET]]]
+
+; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G1
+; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
+
+ ret i32 %val
+}
+
+define i32* @test_local_exec_addr() {
+; CHECK-LABEL: test_local_exec_addr:
+ ret i32* @local_exec_var
+
+; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var
+; CHECK: movk [[TP_OFFSET]], #:tprel_g0_nc:local_exec_var
+; CHECK: mrs [[TP:x[0-9]+]], TPIDR_EL0
+; CHECK: add x0, [[TP]], [[TP_OFFSET]]
+
+; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G1
+; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
+}
diff --git a/test/CodeGen/AArch64/arm64-trap.ll b/test/CodeGen/AArch64/arm64-trap.ll
new file mode 100644
index 000000000000..5e99c32c57b3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-trap.ll
@@ -0,0 +1,8 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+define void @foo() nounwind {
+; CHECK: foo
+; CHECK: brk #0x1
+ tail call void @llvm.trap()
+ ret void
+}
+declare void @llvm.trap() nounwind
diff --git a/test/CodeGen/AArch64/arm64-trn.ll b/test/CodeGen/AArch64/arm64-trn.ll
new file mode 100644
index 000000000000..2db7a14e7549
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-trn.ll
@@ -0,0 +1,134 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: vtrni8:
+;CHECK: trn1.8b
+;CHECK: trn2.8b
+;CHECK-NEXT: add.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: vtrni16:
+;CHECK: trn1.4h
+;CHECK: trn2.4h
+;CHECK-NEXT: add.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+; 2xi32 TRN is redundant with ZIP
+define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: vtrni32:
+;CHECK: zip1.2s
+;CHECK: zip2.2s
+;CHECK-NEXT: add.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
+ %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: vtrnf:
+;CHECK: zip1.2s
+;CHECK: zip2.2s
+;CHECK-NEXT: fadd.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
+ %tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
+ %tmp5 = fadd <2 x float> %tmp3, %tmp4
+ ret <2 x float> %tmp5
+}
+
+define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: vtrnQi8:
+;CHECK: trn1.16b
+;CHECK: trn2.16b
+;CHECK-NEXT: add.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: vtrnQi16:
+;CHECK: trn1.8h
+;CHECK: trn2.8h
+;CHECK-NEXT: add.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: vtrnQi32:
+;CHECK: trn1.4s
+;CHECK: trn2.4s
+;CHECK-NEXT: add.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: vtrnQf:
+;CHECK: trn1.4s
+;CHECK: trn2.4s
+;CHECK-NEXT: fadd.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = fadd <4 x float> %tmp3, %tmp4
+ ret <4 x float> %tmp5
+}
+
+; Undef shuffle indices should not prevent matching to VTRN:
+
+define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: vtrni8_undef:
+;CHECK: trn1.8b
+;CHECK: trn2.8b
+;CHECK-NEXT: add.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: vtrnQi16_undef:
+;CHECK: trn1.8h
+;CHECK: trn2.8h
+;CHECK-NEXT: add.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
diff --git a/test/CodeGen/AArch64/arm64-trunc-store.ll b/test/CodeGen/AArch64/arm64-trunc-store.ll
new file mode 100644
index 000000000000..cf15247e1524
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-trunc-store.ll
@@ -0,0 +1,75 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+
+define void @bar(<8 x i16> %arg, <8 x i8>* %p) nounwind {
+; CHECK-LABEL: bar:
+; CHECK: xtn.8b v[[REG:[0-9]+]], v0
+; CHECK-NEXT: str d[[REG]], [x0]
+; CHECK-NEXT: ret
+ %tmp = trunc <8 x i16> %arg to <8 x i8>
+ store <8 x i8> %tmp, <8 x i8>* %p, align 8
+ ret void
+}
+
+@zptr8 = common global i8* null, align 8
+@zptr16 = common global i16* null, align 8
+@zptr32 = common global i32* null, align 8
+
+define void @fct32(i32 %arg, i64 %var) {
+; CHECK: fct32
+; CHECK: adrp [[GLOBALPAGE:x[0-9]+]], _zptr32@GOTPAGE
+; CHECK: ldr [[GLOBALOFF:x[0-9]+]], {{\[}}[[GLOBALPAGE]], _zptr32@GOTPAGEOFF]
+; CHECK: ldr [[GLOBALADDR:x[0-9]+]], {{\[}}[[GLOBALOFF]]]
+; w0 is %arg
+; CHECK-NEXT: sub w[[OFFSETREGNUM:[0-9]+]], w0, #1
+; w1 is %var truncated
+; CHECK-NEXT: str w1, {{\[}}[[GLOBALADDR]], w[[OFFSETREGNUM]], sxtw #2]
+; CHECK-NEXT: ret
+bb:
+ %.pre37 = load i32** @zptr32, align 8
+ %dec = add nsw i32 %arg, -1
+ %idxprom8 = sext i32 %dec to i64
+ %arrayidx9 = getelementptr inbounds i32* %.pre37, i64 %idxprom8
+ %tmp = trunc i64 %var to i32
+ store i32 %tmp, i32* %arrayidx9, align 4
+ ret void
+}
+
+define void @fct16(i32 %arg, i64 %var) {
+; CHECK: fct16
+; CHECK: adrp [[GLOBALPAGE:x[0-9]+]], _zptr16@GOTPAGE
+; CHECK: ldr [[GLOBALOFF:x[0-9]+]], {{\[}}[[GLOBALPAGE]], _zptr16@GOTPAGEOFF]
+; CHECK: ldr [[GLOBALADDR:x[0-9]+]], {{\[}}[[GLOBALOFF]]]
+; w0 is %arg
+; CHECK-NEXT: sub w[[OFFSETREGNUM:[0-9]+]], w0, #1
+; w1 is %var truncated
+; CHECK-NEXT: strh w1, {{\[}}[[GLOBALADDR]], w[[OFFSETREGNUM]], sxtw #1]
+; CHECK-NEXT: ret
+bb:
+ %.pre37 = load i16** @zptr16, align 8
+ %dec = add nsw i32 %arg, -1
+ %idxprom8 = sext i32 %dec to i64
+ %arrayidx9 = getelementptr inbounds i16* %.pre37, i64 %idxprom8
+ %tmp = trunc i64 %var to i16
+ store i16 %tmp, i16* %arrayidx9, align 4
+ ret void
+}
+
+define void @fct8(i32 %arg, i64 %var) {
+; CHECK: fct8
+; CHECK: adrp [[GLOBALPAGE:x[0-9]+]], _zptr8@GOTPAGE
+; CHECK: ldr [[GLOBALOFF:x[0-9]+]], {{\[}}[[GLOBALPAGE]], _zptr8@GOTPAGEOFF]
+; CHECK: ldr [[BASEADDR:x[0-9]+]], {{\[}}[[GLOBALOFF]]]
+; w0 is %arg
+; CHECK-NEXT: add [[ADDR:x[0-9]+]], [[BASEADDR]], w0, sxtw
+; w1 is %var truncated
+; CHECK-NEXT: sturb w1, {{\[}}[[ADDR]], #-1]
+; CHECK-NEXT: ret
+bb:
+ %.pre37 = load i8** @zptr8, align 8
+ %dec = add nsw i32 %arg, -1
+ %idxprom8 = sext i32 %dec to i64
+ %arrayidx9 = getelementptr inbounds i8* %.pre37, i64 %idxprom8
+ %tmp = trunc i64 %var to i8
+ store i8 %tmp, i8* %arrayidx9, align 4
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-umaxv.ll b/test/CodeGen/AArch64/arm64-umaxv.ll
new file mode 100644
index 000000000000..d523f317d087
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-umaxv.ll
@@ -0,0 +1,92 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp {
+; CHECK-LABEL: vmax_u8x8:
+; CHECK: umaxv.8b b[[REG:[0-9]+]], v0
+; CHECK: fmov [[REG2:w[0-9]+]], s[[REG]]
+; CHECK-NOT: and
+; CHECK: cbz [[REG2]],
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a) nounwind
+ %tmp = trunc i32 %vmaxv.i to i8
+ %tobool = icmp eq i8 %tmp, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then:
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+declare i32 @bar(...)
+
+define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp {
+; CHECK-LABEL: vmax_u4x16:
+; CHECK: umaxv.4h h[[REG:[0-9]+]], v0
+; CHECK: fmov [[REG2:w[0-9]+]], s[[REG]]
+; CHECK-NOT: and
+; CHECK: cbz [[REG2]],
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a) nounwind
+ %tmp = trunc i32 %vmaxv.i to i16
+ %tobool = icmp eq i16 %tmp, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then:
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp {
+; CHECK-LABEL: vmax_u8x16:
+; CHECK: umaxv.8h h[[REG:[0-9]+]], v0
+; CHECK: fmov [[REG2:w[0-9]+]], s[[REG]]
+; CHECK-NOT: and
+; CHECK: cbz [[REG2]],
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a) nounwind
+ %tmp = trunc i32 %vmaxv.i to i16
+ %tobool = icmp eq i16 %tmp, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then:
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp {
+; CHECK-LABEL: vmax_u16x8:
+; CHECK: umaxv.16b b[[REG:[0-9]+]], v0
+; CHECK: fmov [[REG2:w[0-9]+]], s[[REG]]
+; CHECK-NOT: and
+; CHECK: cbz [[REG2]],
+entry:
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a) nounwind
+ %tmp = trunc i32 %vmaxv.i to i8
+ %tobool = icmp eq i8 %tmp, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then:
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>) nounwind readnone
+declare i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16>) nounwind readnone
+declare i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16>) nounwind readnone
+declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-uminv.ll b/test/CodeGen/AArch64/arm64-uminv.ll
new file mode 100644
index 000000000000..3bade4b28b8f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-uminv.ll
@@ -0,0 +1,92 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp {
+; CHECK-LABEL: vmin_u8x8:
+; CHECK: uminv.8b b[[REG:[0-9]+]], v0
+; CHECK: fmov [[REG2:w[0-9]+]], s[[REG]]
+; CHECK-NOT: and
+; CHECK: cbz [[REG2]],
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a) nounwind
+ %tmp = trunc i32 %vminv.i to i8
+ %tobool = icmp eq i8 %tmp, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then:
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+declare i32 @bar(...)
+
+define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp {
+; CHECK-LABEL: vmin_u4x16:
+; CHECK: uminv.4h h[[REG:[0-9]+]], v0
+; CHECK: fmov [[REG2:w[0-9]+]], s[[REG]]
+; CHECK-NOT: and
+; CHECK: cbz [[REG2]],
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a) nounwind
+ %tmp = trunc i32 %vminv.i to i16
+ %tobool = icmp eq i16 %tmp, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then:
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp {
+; CHECK-LABEL: vmin_u8x16:
+; CHECK: uminv.8h h[[REG:[0-9]+]], v0
+; CHECK: fmov [[REG2:w[0-9]+]], s[[REG]]
+; CHECK-NOT: and
+; CHECK: cbz [[REG2]],
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a) nounwind
+ %tmp = trunc i32 %vminv.i to i16
+ %tobool = icmp eq i16 %tmp, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then:
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp {
+; CHECK-LABEL: vmin_u16x8:
+; CHECK: uminv.16b b[[REG:[0-9]+]], v0
+; CHECK: fmov [[REG2:w[0-9]+]], s[[REG]]
+; CHECK-NOT: and
+; CHECK: cbz [[REG2]],
+entry:
+ %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a) nounwind
+ %tmp = trunc i32 %vminv.i to i8
+ %tobool = icmp eq i8 %tmp, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then:
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>) nounwind readnone
+declare i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16>) nounwind readnone
+declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>) nounwind readnone
+declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-umov.ll b/test/CodeGen/AArch64/arm64-umov.ll
new file mode 100644
index 000000000000..a1ef9908646a
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-umov.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define zeroext i8 @f1(<16 x i8> %a) {
+; CHECK-LABEL: f1:
+; CHECK: mov.b w0, v0[3]
+; CHECK-NEXT: ret
+ %vecext = extractelement <16 x i8> %a, i32 3
+ ret i8 %vecext
+}
+
+define zeroext i16 @f2(<4 x i16> %a) {
+; CHECK-LABEL: f2:
+; CHECK: mov.h w0, v0[2]
+; CHECK-NEXT: ret
+ %vecext = extractelement <4 x i16> %a, i32 2
+ ret i16 %vecext
+}
+
+define i32 @f3(<2 x i32> %a) {
+; CHECK-LABEL: f3:
+; CHECK: mov.s w0, v0[1]
+; CHECK-NEXT: ret
+ %vecext = extractelement <2 x i32> %a, i32 1
+ ret i32 %vecext
+}
+
+define i64 @f4(<2 x i64> %a) {
+; CHECK-LABEL: f4:
+; CHECK: mov.d x0, v0[1]
+; CHECK-NEXT: ret
+ %vecext = extractelement <2 x i64> %a, i32 1
+ ret i64 %vecext
+}
diff --git a/test/CodeGen/AArch64/arm64-unaligned_ldst.ll b/test/CodeGen/AArch64/arm64-unaligned_ldst.ll
new file mode 100644
index 000000000000..20b80c09f72f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-unaligned_ldst.ll
@@ -0,0 +1,41 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+; rdar://r11231896
+
+define void @t1(i8* nocapture %a, i8* nocapture %b) nounwind {
+entry:
+; CHECK-LABEL: t1:
+; CHECK-NOT: orr
+; CHECK: ldr [[X0:x[0-9]+]], [x1]
+; CHECK: str [[X0]], [x0]
+ %tmp1 = bitcast i8* %b to i64*
+ %tmp2 = bitcast i8* %a to i64*
+ %tmp3 = load i64* %tmp1, align 1
+ store i64 %tmp3, i64* %tmp2, align 1
+ ret void
+}
+
+define void @t2(i8* nocapture %a, i8* nocapture %b) nounwind {
+entry:
+; CHECK-LABEL: t2:
+; CHECK-NOT: orr
+; CHECK: ldr [[W0:w[0-9]+]], [x1]
+; CHECK: str [[W0]], [x0]
+ %tmp1 = bitcast i8* %b to i32*
+ %tmp2 = bitcast i8* %a to i32*
+ %tmp3 = load i32* %tmp1, align 1
+ store i32 %tmp3, i32* %tmp2, align 1
+ ret void
+}
+
+define void @t3(i8* nocapture %a, i8* nocapture %b) nounwind {
+entry:
+; CHECK-LABEL: t3:
+; CHECK-NOT: orr
+; CHECK: ldrh [[W0:w[0-9]+]], [x1]
+; CHECK: strh [[W0]], [x0]
+ %tmp1 = bitcast i8* %b to i16*
+ %tmp2 = bitcast i8* %a to i16*
+ %tmp3 = load i16* %tmp1, align 1
+ store i16 %tmp3, i16* %tmp2, align 1
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-uzp.ll b/test/CodeGen/AArch64/arm64-uzp.ll
new file mode 100644
index 000000000000..cdd8d31c9981
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-uzp.ll
@@ -0,0 +1,107 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: vuzpi8:
+;CHECK: uzp1.8b
+;CHECK: uzp2.8b
+;CHECK-NEXT: add.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: vuzpi16:
+;CHECK: uzp1.4h
+;CHECK: uzp2.4h
+;CHECK-NEXT: add.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: vuzpQi8:
+;CHECK: uzp1.16b
+;CHECK: uzp2.16b
+;CHECK-NEXT: add.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: vuzpQi16:
+;CHECK: uzp1.8h
+;CHECK: uzp2.8h
+;CHECK-NEXT: add.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: vuzpQi32:
+;CHECK: uzp1.4s
+;CHECK: uzp2.4s
+;CHECK-NEXT: add.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: vuzpQf:
+;CHECK: uzp1.4s
+;CHECK: uzp2.4s
+;CHECK-NEXT: fadd.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %tmp5 = fadd <4 x float> %tmp3, %tmp4
+ ret <4 x float> %tmp5
+}
+
+; Undef shuffle indices should not prevent matching to VUZP:
+
+define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: vuzpi8_undef:
+;CHECK: uzp1.8b
+;CHECK: uzp2.8b
+;CHECK-NEXT: add.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: vuzpQi16_undef:
+;CHECK: uzp1.8h
+;CHECK: uzp2.8h
+;CHECK-NEXT: add.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 14>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 undef, i32 11, i32 13, i32 15>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
diff --git a/test/CodeGen/AArch64/arm64-vaargs.ll b/test/CodeGen/AArch64/arm64-vaargs.ll
new file mode 100644
index 000000000000..ce07635a5c87
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vaargs.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
+target triple = "arm64-apple-darwin11.0.0"
+
+define float @t1(i8* nocapture %fmt, ...) nounwind ssp {
+entry:
+; CHECK: t1
+; CHECK: fcvt
+ %argp = alloca i8*, align 8
+ %argp1 = bitcast i8** %argp to i8*
+ call void @llvm.va_start(i8* %argp1)
+ %0 = va_arg i8** %argp, i32
+ %1 = va_arg i8** %argp, float
+ call void @llvm.va_end(i8* %argp1)
+ ret float %1
+}
+
+declare void @llvm.va_start(i8*) nounwind
+
+declare void @llvm.va_end(i8*) nounwind
diff --git a/test/CodeGen/AArch64/arm64-vabs.ll b/test/CodeGen/AArch64/arm64-vabs.ll
new file mode 100644
index 000000000000..5afc8d9f3f49
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vabs.ll
@@ -0,0 +1,804 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+
+define <8 x i16> @sabdl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: sabdl8h:
+;CHECK: sabdl.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @sabdl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sabdl4s:
+;CHECK: sabdl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @sabdl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sabdl2d:
+;CHECK: sabdl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i16> @sabdl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: sabdl2_8h:
+;CHECK: sabdl2.8h
+ %load1 = load <16 x i8>* %A
+ %load2 = load <16 x i8>* %B
+ %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @sabdl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sabdl2_4s:
+;CHECK: sabdl2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @sabdl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sabdl2_2d:
+;CHECK: sabdl2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i16> @uabdl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uabdl8h:
+;CHECK: uabdl.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @uabdl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uabdl4s:
+;CHECK: uabdl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @uabdl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uabdl2d:
+;CHECK: uabdl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i16> @uabdl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uabdl2_8h:
+;CHECK: uabdl2.8h
+ %load1 = load <16 x i8>* %A
+ %load2 = load <16 x i8>* %B
+ %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @uabdl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uabdl2_4s:
+;CHECK: uabdl2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @uabdl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uabdl2_2d:
+;CHECK: uabdl2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x float> @fabd_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fabd_2s:
+;CHECK: fabd.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fabd_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fabd_4s:
+;CHECK: fabd.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @fabd_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fabd_2d:
+;CHECK: fabd.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <8 x i8> @sabd_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: sabd_8b:
+;CHECK: sabd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @sabd_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: sabd_16b:
+;CHECK: sabd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @sabd_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sabd_4h:
+;CHECK: sabd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sabd_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sabd_8h:
+;CHECK: sabd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @sabd_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sabd_2s:
+;CHECK: sabd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sabd_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sabd_4s:
+;CHECK: sabd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i8> @uabd_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uabd_8b:
+;CHECK: uabd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @uabd_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uabd_16b:
+;CHECK: uabd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @uabd_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uabd_4h:
+;CHECK: uabd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @uabd_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uabd_8h:
+;CHECK: uabd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @uabd_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uabd_2s:
+;CHECK: uabd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @uabd_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uabd_4s:
+;CHECK: uabd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i8> @sqabs_8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: sqabs_8b:
+;CHECK: sqabs.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqabs.v8i8(<8 x i8> %tmp1)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @sqabs_16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: sqabs_16b:
+;CHECK: sqabs.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqabs.v16i8(<16 x i8> %tmp1)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @sqabs_4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: sqabs_4h:
+;CHECK: sqabs.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqabs.v4i16(<4 x i16> %tmp1)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sqabs_8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqabs_8h:
+;CHECK: sqabs.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqabs.v8i16(<8 x i16> %tmp1)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @sqabs_2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: sqabs_2s:
+;CHECK: sqabs.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqabs.v2i32(<2 x i32> %tmp1)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sqabs_4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqabs_4s:
+;CHECK: sqabs.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqabs.v4i32(<4 x i32> %tmp1)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqabs.v8i8(<8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.sqabs.v16i8(<16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqabs.v4i16(<4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqabs.v8i16(<8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqabs.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqabs.v4i32(<4 x i32>) nounwind readnone
+
+define <8 x i8> @sqneg_8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: sqneg_8b:
+;CHECK: sqneg.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqneg.v8i8(<8 x i8> %tmp1)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @sqneg_16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: sqneg_16b:
+;CHECK: sqneg.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqneg.v16i8(<16 x i8> %tmp1)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @sqneg_4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: sqneg_4h:
+;CHECK: sqneg.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqneg.v4i16(<4 x i16> %tmp1)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sqneg_8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqneg_8h:
+;CHECK: sqneg.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> %tmp1)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @sqneg_2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: sqneg_2s:
+;CHECK: sqneg.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqneg.v2i32(<2 x i32> %tmp1)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sqneg_4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqneg_4s:
+;CHECK: sqneg.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqneg.v4i32(<4 x i32> %tmp1)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqneg.v8i8(<8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.sqneg.v16i8(<16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqneg.v4i16(<4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqneg.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqneg.v4i32(<4 x i32>) nounwind readnone
+
+define <8 x i8> @abs_8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: abs_8b:
+;CHECK: abs.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.abs.v8i8(<8 x i8> %tmp1)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @abs_16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: abs_16b:
+;CHECK: abs.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.abs.v16i8(<16 x i8> %tmp1)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @abs_4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: abs_4h:
+;CHECK: abs.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.abs.v4i16(<4 x i16> %tmp1)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @abs_8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: abs_8h:
+;CHECK: abs.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.abs.v8i16(<8 x i16> %tmp1)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @abs_2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: abs_2s:
+;CHECK: abs.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.abs.v2i32(<2 x i32> %tmp1)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @abs_4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: abs_4s:
+;CHECK: abs.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.abs.v4i32(<4 x i32> %tmp1)
+ ret <4 x i32> %tmp3
+}
+
+define <1 x i64> @abs_1d(<1 x i64> %A) nounwind {
+; CHECK-LABEL: abs_1d:
+; CHECK: abs d0, d0
+ %abs = call <1 x i64> @llvm.aarch64.neon.abs.v1i64(<1 x i64> %A)
+ ret <1 x i64> %abs
+}
+
+define i64 @abs_1d_honestly(i64 %A) nounwind {
+; CHECK-LABEL: abs_1d_honestly:
+; CHECK: abs d0, d0
+ %abs = call i64 @llvm.aarch64.neon.abs.i64(i64 %A)
+ ret i64 %abs
+}
+
+declare <8 x i8> @llvm.aarch64.neon.abs.v8i8(<8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.abs.v16i8(<16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.abs.v4i16(<4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.abs.v8i16(<8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.abs.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.abs.v4i32(<4 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.abs.v1i64(<1 x i64>) nounwind readnone
+declare i64 @llvm.aarch64.neon.abs.i64(i64) nounwind readnone
+
+define <8 x i16> @sabal8h(<8 x i8>* %A, <8 x i8>* %B, <8 x i16>* %C) nounwind {
+;CHECK-LABEL: sabal8h:
+;CHECK: sabal.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = load <8 x i16>* %C
+ %tmp4 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4.1 = zext <8 x i8> %tmp4 to <8 x i16>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4.1
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @sabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sabal4s:
+;CHECK: sabal.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4.1 = zext <4 x i16> %tmp4 to <4 x i32>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4.1
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @sabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sabal2d:
+;CHECK: sabal.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4.1 = zext <2 x i32> %tmp4 to <2 x i64>
+ %tmp4.1.1 = zext <2 x i32> %tmp4 to <2 x i64>
+ %tmp5 = add <2 x i64> %tmp3, %tmp4.1
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i16> @sabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwind {
+;CHECK-LABEL: sabal2_8h:
+;CHECK: sabal2.8h
+ %load1 = load <16 x i8>* %A
+ %load2 = load <16 x i8>* %B
+ %tmp3 = load <8 x i16>* %C
+ %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp4 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4.1 = zext <8 x i8> %tmp4 to <8 x i16>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4.1
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @sabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sabal2_4s:
+;CHECK: sabal2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp4 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4.1 = zext <4 x i16> %tmp4 to <4 x i32>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4.1
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @sabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sabal2_2d:
+;CHECK: sabal2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp4 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4.1 = zext <2 x i32> %tmp4 to <2 x i64>
+ %tmp5 = add <2 x i64> %tmp3, %tmp4.1
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i16> @uabal8h(<8 x i8>* %A, <8 x i8>* %B, <8 x i16>* %C) nounwind {
+;CHECK-LABEL: uabal8h:
+;CHECK: uabal.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = load <8 x i16>* %C
+ %tmp4 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4.1 = zext <8 x i8> %tmp4 to <8 x i16>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4.1
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @uabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: uabal4s:
+;CHECK: uabal.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4.1 = zext <4 x i16> %tmp4 to <4 x i32>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4.1
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @uabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: uabal2d:
+;CHECK: uabal.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4.1 = zext <2 x i32> %tmp4 to <2 x i64>
+ %tmp5 = add <2 x i64> %tmp3, %tmp4.1
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i16> @uabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwind {
+;CHECK-LABEL: uabal2_8h:
+;CHECK: uabal2.8h
+ %load1 = load <16 x i8>* %A
+ %load2 = load <16 x i8>* %B
+ %tmp3 = load <8 x i16>* %C
+ %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp4 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4.1 = zext <8 x i8> %tmp4 to <8 x i16>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4.1
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @uabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: uabal2_4s:
+;CHECK: uabal2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp4 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4.1 = zext <4 x i16> %tmp4 to <4 x i32>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4.1
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @uabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: uabal2_2d:
+;CHECK: uabal2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp4 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4.1 = zext <2 x i32> %tmp4 to <2 x i64>
+ %tmp5 = add <2 x i64> %tmp3, %tmp4.1
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i8> @saba_8b(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+;CHECK-LABEL: saba_8b:
+;CHECK: saba.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = load <8 x i8>* %C
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <16 x i8> @saba_16b(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
+;CHECK-LABEL: saba_16b:
+;CHECK: saba.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ %tmp4 = load <16 x i8>* %C
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <4 x i16> @saba_4h(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+;CHECK-LABEL: saba_4h:
+;CHECK: saba.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4 = load <4 x i16>* %C
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <8 x i16> @saba_8h(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
+;CHECK-LABEL: saba_8h:
+;CHECK: saba.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ %tmp4 = load <8 x i16>* %C
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <2 x i32> @saba_2s(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+;CHECK-LABEL: saba_2s:
+;CHECK: saba.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4 = load <2 x i32>* %C
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <4 x i32> @saba_4s(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: saba_4s:
+;CHECK: saba.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ %tmp4 = load <4 x i32>* %C
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <8 x i8> @uaba_8b(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+;CHECK-LABEL: uaba_8b:
+;CHECK: uaba.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = load <8 x i8>* %C
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <16 x i8> @uaba_16b(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
+;CHECK-LABEL: uaba_16b:
+;CHECK: uaba.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ %tmp4 = load <16 x i8>* %C
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <4 x i16> @uaba_4h(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+;CHECK-LABEL: uaba_4h:
+;CHECK: uaba.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp4 = load <4 x i16>* %C
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <8 x i16> @uaba_8h(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
+;CHECK-LABEL: uaba_8h:
+;CHECK: uaba.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ %tmp4 = load <8 x i16>* %C
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <2 x i32> @uaba_2s(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+;CHECK-LABEL: uaba_2s:
+;CHECK: uaba.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp4 = load <2 x i32>* %C
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <4 x i32> @uaba_4s(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: uaba_4s:
+;CHECK: uaba.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ %tmp4 = load <4 x i32>* %C
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+; Scalar FABD
+define float @fabds(float %a, float %b) nounwind {
+; CHECK-LABEL: fabds:
+; CHECK: fabd s0, s0, s1
+ %vabd.i = tail call float @llvm.aarch64.sisd.fabd.f32(float %a, float %b) nounwind
+ ret float %vabd.i
+}
+
+define double @fabdd(double %a, double %b) nounwind {
+; CHECK-LABEL: fabdd:
+; CHECK: fabd d0, d0, d1
+ %vabd.i = tail call double @llvm.aarch64.sisd.fabd.f64(double %a, double %b) nounwind
+ ret double %vabd.i
+}
+
+declare double @llvm.aarch64.sisd.fabd.f64(double, double) nounwind readnone
+declare float @llvm.aarch64.sisd.fabd.f32(float, float) nounwind readnone
+
+define <2 x i64> @uabdl_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
+; CHECK-LABEL: uabdl_from_extract_dup:
+; CHECK-NOT: ext.16b
+; CHECK: uabdl2.2d
+ %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
+ %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %res = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %lhs.high, <2 x i32> %rhsvec) nounwind
+ %res1 = zext <2 x i32> %res to <2 x i64>
+ ret <2 x i64> %res1
+}
+
+define <2 x i64> @sabdl_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
+; CHECK-LABEL: sabdl_from_extract_dup:
+; CHECK-NOT: ext.16b
+; CHECK: sabdl2.2d
+ %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
+ %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %res = tail call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %lhs.high, <2 x i32> %rhsvec) nounwind
+ %res1 = zext <2 x i32> %res to <2 x i64>
+ ret <2 x i64> %res1
+}
diff --git a/test/CodeGen/AArch64/arm64-vadd.ll b/test/CodeGen/AArch64/arm64-vadd.ll
new file mode 100644
index 000000000000..9ed8aa6d7c5d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vadd.ll
@@ -0,0 +1,941 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
+
+define <8 x i8> @addhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: addhn8b:
+;CHECK: addhn.8b
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.addhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @addhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: addhn4h:
+;CHECK: addhn.4h
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @addhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: addhn2s:
+;CHECK: addhn.2s
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.addhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @addhn2_16b(<8 x i16> %a, <8 x i16> %b) nounwind {
+;CHECK-LABEL: addhn2_16b:
+;CHECK: addhn.8b
+;CHECK-NEXT: addhn2.16b
+ %vaddhn2.i = tail call <8 x i8> @llvm.aarch64.neon.addhn.v8i8(<8 x i16> %a, <8 x i16> %b) nounwind
+ %vaddhn_high2.i = tail call <8 x i8> @llvm.aarch64.neon.addhn.v8i8(<8 x i16> %a, <8 x i16> %b) nounwind
+ %res = shufflevector <8 x i8> %vaddhn2.i, <8 x i8> %vaddhn_high2.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @addhn2_8h(<4 x i32> %a, <4 x i32> %b) nounwind {
+;CHECK-LABEL: addhn2_8h:
+;CHECK: addhn.4h
+;CHECK-NEXT: addhn2.8h
+ %vaddhn2.i = tail call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %a, <4 x i32> %b) nounwind
+ %vaddhn_high3.i = tail call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %a, <4 x i32> %b) nounwind
+ %res = shufflevector <4 x i16> %vaddhn2.i, <4 x i16> %vaddhn_high3.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @addhn2_4s(<2 x i64> %a, <2 x i64> %b) nounwind {
+;CHECK-LABEL: addhn2_4s:
+;CHECK: addhn.2s
+;CHECK-NEXT: addhn2.4s
+ %vaddhn2.i = tail call <2 x i32> @llvm.aarch64.neon.addhn.v2i32(<2 x i64> %a, <2 x i64> %b) nounwind
+ %vaddhn_high3.i = tail call <2 x i32> @llvm.aarch64.neon.addhn.v2i32(<2 x i64> %a, <2 x i64> %b) nounwind
+ %res = shufflevector <2 x i32> %vaddhn2.i, <2 x i32> %vaddhn_high3.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+declare <2 x i32> @llvm.aarch64.neon.addhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.addhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+
+
+define <8 x i8> @raddhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: raddhn8b:
+;CHECK: raddhn.8b
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @raddhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: raddhn4h:
+;CHECK: raddhn.4h
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @raddhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: raddhn2s:
+;CHECK: raddhn.2s
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @raddhn2_16b(<8 x i16> %a, <8 x i16> %b) nounwind {
+;CHECK-LABEL: raddhn2_16b:
+;CHECK: raddhn.8b
+;CHECK-NEXT: raddhn2.16b
+ %vraddhn2.i = tail call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) nounwind
+ %vraddhn_high2.i = tail call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) nounwind
+ %res = shufflevector <8 x i8> %vraddhn2.i, <8 x i8> %vraddhn_high2.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @raddhn2_8h(<4 x i32> %a, <4 x i32> %b) nounwind {
+;CHECK-LABEL: raddhn2_8h:
+;CHECK: raddhn.4h
+;CHECK-NEXT: raddhn2.8h
+ %vraddhn2.i = tail call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) nounwind
+ %vraddhn_high3.i = tail call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) nounwind
+ %res = shufflevector <4 x i16> %vraddhn2.i, <4 x i16> %vraddhn_high3.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @raddhn2_4s(<2 x i64> %a, <2 x i64> %b) nounwind {
+;CHECK-LABEL: raddhn2_4s:
+;CHECK: raddhn.2s
+;CHECK-NEXT: raddhn2.4s
+ %vraddhn2.i = tail call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) nounwind
+ %vraddhn_high3.i = tail call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) nounwind
+ %res = shufflevector <2 x i32> %vraddhn2.i, <2 x i32> %vraddhn_high3.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+declare <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @saddl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: saddl8h:
+;CHECK: saddl.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
+ %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @saddl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: saddl4s:
+;CHECK: saddl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
+ %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @saddl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: saddl2d:
+;CHECK: saddl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
+ %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i16> @saddl2_8h(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: saddl2_8h:
+; CHECK-NEXT: saddl2.8h v0, v0, v1
+; CHECK-NEXT: ret
+ %tmp = bitcast <16 x i8> %a to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <8 x i8>
+ %vmovl.i.i.i = sext <8 x i8> %tmp1 to <8 x i16>
+ %tmp2 = bitcast <16 x i8> %b to <2 x i64>
+ %shuffle.i.i4.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i.i4.i to <8 x i8>
+ %vmovl.i.i5.i = sext <8 x i8> %tmp3 to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i.i, %vmovl.i.i5.i
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @saddl2_4s(<8 x i16> %a, <8 x i16> %b) nounwind {
+; CHECK-LABEL: saddl2_4s:
+; CHECK-NEXT: saddl2.4s v0, v0, v1
+; CHECK-NEXT: ret
+ %tmp = bitcast <8 x i16> %a to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <4 x i16>
+ %vmovl.i.i.i = sext <4 x i16> %tmp1 to <4 x i32>
+ %tmp2 = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i.i4.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i.i4.i to <4 x i16>
+ %vmovl.i.i5.i = sext <4 x i16> %tmp3 to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i.i, %vmovl.i.i5.i
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @saddl2_2d(<4 x i32> %a, <4 x i32> %b) nounwind {
+; CHECK-LABEL: saddl2_2d:
+; CHECK-NEXT: saddl2.2d v0, v0, v1
+; CHECK-NEXT: ret
+ %tmp = bitcast <4 x i32> %a to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <2 x i32>
+ %vmovl.i.i.i = sext <2 x i32> %tmp1 to <2 x i64>
+ %tmp2 = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i.i4.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i.i4.i to <2 x i32>
+ %vmovl.i.i5.i = sext <2 x i32> %tmp3 to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i.i, %vmovl.i.i5.i
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @uaddl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uaddl8h:
+;CHECK: uaddl.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
+ %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @uaddl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uaddl4s:
+;CHECK: uaddl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
+ %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @uaddl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uaddl2d:
+;CHECK: uaddl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
+ %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+
+define <8 x i16> @uaddl2_8h(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: uaddl2_8h:
+; CHECK-NEXT: uaddl2.8h v0, v0, v1
+; CHECK-NEXT: ret
+ %tmp = bitcast <16 x i8> %a to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <8 x i8>
+ %vmovl.i.i.i = zext <8 x i8> %tmp1 to <8 x i16>
+ %tmp2 = bitcast <16 x i8> %b to <2 x i64>
+ %shuffle.i.i4.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i.i4.i to <8 x i8>
+ %vmovl.i.i5.i = zext <8 x i8> %tmp3 to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i.i, %vmovl.i.i5.i
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @uaddl2_4s(<8 x i16> %a, <8 x i16> %b) nounwind {
+; CHECK-LABEL: uaddl2_4s:
+; CHECK-NEXT: uaddl2.4s v0, v0, v1
+; CHECK-NEXT: ret
+ %tmp = bitcast <8 x i16> %a to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <4 x i16>
+ %vmovl.i.i.i = zext <4 x i16> %tmp1 to <4 x i32>
+ %tmp2 = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i.i4.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i.i4.i to <4 x i16>
+ %vmovl.i.i5.i = zext <4 x i16> %tmp3 to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i.i, %vmovl.i.i5.i
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @uaddl2_2d(<4 x i32> %a, <4 x i32> %b) nounwind {
+; CHECK-LABEL: uaddl2_2d:
+; CHECK-NEXT: uaddl2.2d v0, v0, v1
+; CHECK-NEXT: ret
+ %tmp = bitcast <4 x i32> %a to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <2 x i32>
+ %vmovl.i.i.i = zext <2 x i32> %tmp1 to <2 x i64>
+ %tmp2 = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i.i4.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i.i4.i to <2 x i32>
+ %vmovl.i.i5.i = zext <2 x i32> %tmp3 to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i.i, %vmovl.i.i5.i
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @uaddw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uaddw8h:
+;CHECK: uaddw.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
+ %tmp4 = add <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @uaddw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uaddw4s:
+;CHECK: uaddw.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
+ %tmp4 = add <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @uaddw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uaddw2d:
+;CHECK: uaddw.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
+ %tmp4 = add <2 x i64> %tmp1, %tmp3
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i16> @uaddw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uaddw2_8h:
+;CHECK: uaddw2.8h
+ %tmp1 = load <8 x i16>* %A
+
+ %tmp2 = load <16 x i8>* %B
+ %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %ext2 = zext <8 x i8> %high2 to <8 x i16>
+
+ %res = add <8 x i16> %tmp1, %ext2
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @uaddw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uaddw2_4s:
+;CHECK: uaddw2.4s
+ %tmp1 = load <4 x i32>* %A
+
+ %tmp2 = load <8 x i16>* %B
+ %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext2 = zext <4 x i16> %high2 to <4 x i32>
+
+ %res = add <4 x i32> %tmp1, %ext2
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @uaddw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uaddw2_2d:
+;CHECK: uaddw2.2d
+ %tmp1 = load <2 x i64>* %A
+
+ %tmp2 = load <4 x i32>* %B
+ %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %ext2 = zext <2 x i32> %high2 to <2 x i64>
+
+ %res = add <2 x i64> %tmp1, %ext2
+ ret <2 x i64> %res
+}
+
+define <8 x i16> @saddw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: saddw8h:
+;CHECK: saddw.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
+ %tmp4 = add <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @saddw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: saddw4s:
+;CHECK: saddw.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
+ %tmp4 = add <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @saddw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: saddw2d:
+;CHECK: saddw.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
+ %tmp4 = add <2 x i64> %tmp1, %tmp3
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i16> @saddw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: saddw2_8h:
+;CHECK: saddw2.8h
+ %tmp1 = load <8 x i16>* %A
+
+ %tmp2 = load <16 x i8>* %B
+ %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %ext2 = sext <8 x i8> %high2 to <8 x i16>
+
+ %res = add <8 x i16> %tmp1, %ext2
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @saddw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: saddw2_4s:
+;CHECK: saddw2.4s
+ %tmp1 = load <4 x i32>* %A
+
+ %tmp2 = load <8 x i16>* %B
+ %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext2 = sext <4 x i16> %high2 to <4 x i32>
+
+ %res = add <4 x i32> %tmp1, %ext2
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @saddw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: saddw2_2d:
+;CHECK: saddw2.2d
+ %tmp1 = load <2 x i64>* %A
+
+ %tmp2 = load <4 x i32>* %B
+ %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %ext2 = sext <2 x i32> %high2 to <2 x i64>
+
+ %res = add <2 x i64> %tmp1, %ext2
+ ret <2 x i64> %res
+}
+
+define <4 x i16> @saddlp4h(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: saddlp4h:
+;CHECK: saddlp.4h
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @saddlp2s(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: saddlp2s:
+;CHECK: saddlp.2s
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1)
+ ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @saddlp1d(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: saddlp1d:
+;CHECK: saddlp.1d
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> %tmp1)
+ ret <1 x i64> %tmp3
+}
+
+define <8 x i16> @saddlp8h(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: saddlp8h:
+;CHECK: saddlp.8h
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @saddlp4s(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: saddlp4s:
+;CHECK: saddlp.4s
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @saddlp2d(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: saddlp2d:
+;CHECK: saddlp.2d
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1)
+ ret <2 x i64> %tmp3
+}
+
+declare <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32>) nounwind readnone
+
+declare <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32>) nounwind readnone
+
+define <4 x i16> @uaddlp4h(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: uaddlp4h:
+;CHECK: uaddlp.4h
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uaddlp2s(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: uaddlp2s:
+;CHECK: uaddlp.2s
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
+ ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @uaddlp1d(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: uaddlp1d:
+;CHECK: uaddlp.1d
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> %tmp1)
+ ret <1 x i64> %tmp3
+}
+
+define <8 x i16> @uaddlp8h(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: uaddlp8h:
+;CHECK: uaddlp.8h
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @uaddlp4s(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: uaddlp4s:
+;CHECK: uaddlp.4s
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @uaddlp2d(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: uaddlp2d:
+;CHECK: uaddlp.2d
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
+ ret <2 x i64> %tmp3
+}
+
+declare <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32>) nounwind readnone
+
+declare <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32>) nounwind readnone
+
+define <4 x i16> @sadalp4h(<8 x i8>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sadalp4h:
+;CHECK: sadalp.4h
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1)
+ %tmp4 = load <4 x i16>* %B
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @sadalp2s(<4 x i16>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sadalp2s:
+;CHECK: sadalp.2s
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1)
+ %tmp4 = load <2 x i32>* %B
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <8 x i16> @sadalp8h(<16 x i8>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sadalp8h:
+;CHECK: sadalp.8h
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1)
+ %tmp4 = load <8 x i16>* %B
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @sadalp4s(<8 x i16>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sadalp4s:
+;CHECK: sadalp.4s
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1)
+ %tmp4 = load <4 x i32>* %B
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @sadalp2d(<4 x i32>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: sadalp2d:
+;CHECK: sadalp.2d
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1)
+ %tmp4 = load <2 x i64>* %B
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <4 x i16> @uadalp4h(<8 x i8>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uadalp4h:
+;CHECK: uadalp.4h
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
+ %tmp4 = load <4 x i16>* %B
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @uadalp2s(<4 x i16>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uadalp2s:
+;CHECK: uadalp.2s
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
+ %tmp4 = load <2 x i32>* %B
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <8 x i16> @uadalp8h(<16 x i8>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uadalp8h:
+;CHECK: uadalp.8h
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
+ %tmp4 = load <8 x i16>* %B
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @uadalp4s(<8 x i16>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uadalp4s:
+;CHECK: uadalp.4s
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
+ %tmp4 = load <4 x i32>* %B
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @uadalp2d(<4 x i32>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: uadalp2d:
+;CHECK: uadalp.2d
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
+ %tmp4 = load <2 x i64>* %B
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i8> @addp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: addp_8b:
+;CHECK: addp.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @addp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: addp_16b:
+;CHECK: addp.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @addp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: addp_4h:
+;CHECK: addp.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @addp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: addp_8h:
+;CHECK: addp.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @addp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: addp_2s:
+;CHECK: addp.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @addp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: addp_4s:
+;CHECK: addp.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @addp_2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: addp_2d:
+;CHECK: addp.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x float> @faddp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: faddp_2s:
+;CHECK: faddp.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.addp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @faddp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: faddp_4s:
+;CHECK: faddp.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.addp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @faddp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: faddp_2d:
+;CHECK: faddp.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.addp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.addp.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.addp.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.addp.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
+; CHECK-LABEL: uaddl2_duprhs
+; CHECK-NOT: ext.16b
+; CHECK: uaddl2.2d
+ %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
+ %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %lhs.ext = zext <2 x i32> %lhs.high to <2 x i64>
+ %rhs.ext = zext <2 x i32> %rhsvec to <2 x i64>
+
+ %res = add <2 x i64> %lhs.ext, %rhs.ext
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: saddl2_duplhs
+; CHECK-NOT: ext.16b
+; CHECK: saddl2.2d
+ %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
+ %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
+
+ %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %lhs.ext = sext <2 x i32> %lhsvec to <2 x i64>
+ %rhs.ext = sext <2 x i32> %rhs.high to <2 x i64>
+
+ %res = add <2 x i64> %lhs.ext, %rhs.ext
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
+; CHECK-LABEL: usubl2_duprhs
+; CHECK-NOT: ext.16b
+; CHECK: usubl2.2d
+ %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
+ %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %lhs.ext = zext <2 x i32> %lhs.high to <2 x i64>
+ %rhs.ext = zext <2 x i32> %rhsvec to <2 x i64>
+
+ %res = sub <2 x i64> %lhs.ext, %rhs.ext
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: ssubl2_duplhs
+; CHECK-NOT: ext.16b
+; CHECK: ssubl2.2d
+ %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
+ %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
+
+ %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %lhs.ext = sext <2 x i32> %lhsvec to <2 x i64>
+ %rhs.ext = sext <2 x i32> %rhs.high to <2 x i64>
+
+ %res = sub <2 x i64> %lhs.ext, %rhs.ext
+ ret <2 x i64> %res
+}
+
+define <8 x i8> @addhn8b_natural(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: addhn8b_natural:
+;CHECK: addhn.8b
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %sum = add <8 x i16> %tmp1, %tmp2
+ %high_bits = lshr <8 x i16> %sum, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %narrowed = trunc <8 x i16> %high_bits to <8 x i8>
+ ret <8 x i8> %narrowed
+}
+
+define <4 x i16> @addhn4h_natural(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: addhn4h_natural:
+;CHECK: addhn.4h
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %sum = add <4 x i32> %tmp1, %tmp2
+ %high_bits = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+ %narrowed = trunc <4 x i32> %high_bits to <4 x i16>
+ ret <4 x i16> %narrowed
+}
+
+define <2 x i32> @addhn2s_natural(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: addhn2s_natural:
+;CHECK: addhn.2s
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %sum = add <2 x i64> %tmp1, %tmp2
+ %high_bits = lshr <2 x i64> %sum, <i64 32, i64 32>
+ %narrowed = trunc <2 x i64> %high_bits to <2 x i32>
+ ret <2 x i32> %narrowed
+}
+
+define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, <8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: addhn2_16b_natural:
+;CHECK: addhn2.16b
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %sum = add <8 x i16> %tmp1, %tmp2
+ %high_bits = lshr <8 x i16> %sum, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %narrowed = trunc <8 x i16> %high_bits to <8 x i8>
+ %res = shufflevector <8 x i8> %low, <8 x i8> %narrowed, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, <4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: addhn2_8h_natural:
+;CHECK: addhn2.8h
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %sum = add <4 x i32> %tmp1, %tmp2
+ %high_bits = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+ %narrowed = trunc <4 x i32> %high_bits to <4 x i16>
+ %res = shufflevector <4 x i16> %low, <4 x i16> %narrowed, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, <2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: addhn2_4s_natural:
+;CHECK: addhn2.4s
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %sum = add <2 x i64> %tmp1, %tmp2
+ %high_bits = lshr <2 x i64> %sum, <i64 32, i64 32>
+ %narrowed = trunc <2 x i64> %high_bits to <2 x i32>
+ %res = shufflevector <2 x i32> %low, <2 x i32> %narrowed, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+define <8 x i8> @subhn8b_natural(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: subhn8b_natural:
+;CHECK: subhn.8b
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %diff = sub <8 x i16> %tmp1, %tmp2
+ %high_bits = lshr <8 x i16> %diff, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %narrowed = trunc <8 x i16> %high_bits to <8 x i8>
+ ret <8 x i8> %narrowed
+}
+
+define <4 x i16> @subhn4h_natural(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: subhn4h_natural:
+;CHECK: subhn.4h
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %diff = sub <4 x i32> %tmp1, %tmp2
+ %high_bits = lshr <4 x i32> %diff, <i32 16, i32 16, i32 16, i32 16>
+ %narrowed = trunc <4 x i32> %high_bits to <4 x i16>
+ ret <4 x i16> %narrowed
+}
+
+define <2 x i32> @subhn2s_natural(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: subhn2s_natural:
+;CHECK: subhn.2s
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %diff = sub <2 x i64> %tmp1, %tmp2
+ %high_bits = lshr <2 x i64> %diff, <i64 32, i64 32>
+ %narrowed = trunc <2 x i64> %high_bits to <2 x i32>
+ ret <2 x i32> %narrowed
+}
+
+define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, <8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: subhn2_16b_natural:
+;CHECK: subhn2.16b
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %diff = sub <8 x i16> %tmp1, %tmp2
+ %high_bits = lshr <8 x i16> %diff, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %narrowed = trunc <8 x i16> %high_bits to <8 x i8>
+ %res = shufflevector <8 x i8> %low, <8 x i8> %narrowed, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, <4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: subhn2_8h_natural:
+;CHECK: subhn2.8h
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %diff = sub <4 x i32> %tmp1, %tmp2
+ %high_bits = lshr <4 x i32> %diff, <i32 16, i32 16, i32 16, i32 16>
+ %narrowed = trunc <4 x i32> %high_bits to <4 x i16>
+ %res = shufflevector <4 x i16> %low, <4 x i16> %narrowed, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, <2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: subhn2_4s_natural:
+;CHECK: subhn2.4s
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %diff = sub <2 x i64> %tmp1, %tmp2
+ %high_bits = lshr <2 x i64> %diff, <i64 32, i64 32>
+ %narrowed = trunc <2 x i64> %high_bits to <2 x i32>
+ %res = shufflevector <2 x i32> %low, <2 x i32> %narrowed, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
diff --git a/test/CodeGen/AArch64/arm64-vaddlv.ll b/test/CodeGen/AArch64/arm64-vaddlv.ll
new file mode 100644
index 000000000000..2d6413812ec8
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vaddlv.ll
@@ -0,0 +1,26 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+define i64 @test_vaddlv_s32(<2 x i32> %a1) nounwind readnone {
+; CHECK: test_vaddlv_s32
+; CHECK: saddlp.1d v[[REGNUM:[0-9]+]], v[[INREG:[0-9]+]]
+; CHECK-NEXT: fmov x[[OUTREG:[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddlv.i = tail call i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32> %a1) nounwind
+ ret i64 %vaddlv.i
+}
+
+define i64 @test_vaddlv_u32(<2 x i32> %a1) nounwind readnone {
+; CHECK: test_vaddlv_u32
+; CHECK: uaddlp.1d v[[REGNUM:[0-9]+]], v[[INREG:[0-9]+]]
+; CHECK-NEXT: fmov x[[OUTREG:[0-9]+]], d[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddlv.i = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32> %a1) nounwind
+ ret i64 %vaddlv.i
+}
+
+declare i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32>) nounwind readnone
+
+declare i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32>) nounwind readnone
+
diff --git a/test/CodeGen/AArch64/arm64-vaddv.ll b/test/CodeGen/AArch64/arm64-vaddv.ll
new file mode 100644
index 000000000000..2d92ce6ea570
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vaddv.ll
@@ -0,0 +1,245 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s
+
+define signext i8 @test_vaddv_s8(<8 x i8> %a1) {
+; CHECK-LABEL: test_vaddv_s8:
+; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a1)
+ %0 = trunc i32 %vaddv.i to i8
+ ret i8 %0
+}
+
+define signext i16 @test_vaddv_s16(<4 x i16> %a1) {
+; CHECK-LABEL: test_vaddv_s16:
+; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a1)
+ %0 = trunc i32 %vaddv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddv_s32(<2 x i32> %a1) {
+; CHECK-LABEL: test_vaddv_s32:
+; 2 x i32 is not supported by the ISA, thus, this is a special case
+; CHECK: addp.2s v[[REGNUM:[0-9]+]], v0, v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a1)
+ ret i32 %vaddv.i
+}
+
+define i64 @test_vaddv_s64(<2 x i64> %a1) {
+; CHECK-LABEL: test_vaddv_s64:
+; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0
+; CHECK-NEXT: fmov x0, [[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %a1)
+ ret i64 %vaddv.i
+}
+
+define zeroext i8 @test_vaddv_u8(<8 x i8> %a1) {
+; CHECK-LABEL: test_vaddv_u8:
+; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a1)
+ %0 = trunc i32 %vaddv.i to i8
+ ret i8 %0
+}
+
+define i32 @test_vaddv_u8_masked(<8 x i8> %a1) {
+; CHECK-LABEL: test_vaddv_u8_masked:
+; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a1)
+ %0 = and i32 %vaddv.i, 511 ; 0x1ff
+ ret i32 %0
+}
+
+define zeroext i16 @test_vaddv_u16(<4 x i16> %a1) {
+; CHECK-LABEL: test_vaddv_u16:
+; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a1)
+ %0 = trunc i32 %vaddv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddv_u16_masked(<4 x i16> %a1) {
+; CHECK-LABEL: test_vaddv_u16_masked:
+; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a1)
+ %0 = and i32 %vaddv.i, 3276799 ; 0x31ffff
+ ret i32 %0
+}
+
+define i32 @test_vaddv_u32(<2 x i32> %a1) {
+; CHECK-LABEL: test_vaddv_u32:
+; 2 x i32 is not supported by the ISA, thus, this is a special case
+; CHECK: addp.2s v[[REGNUM:[0-9]+]], v0, v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> %a1)
+ ret i32 %vaddv.i
+}
+
+define float @test_vaddv_f32(<2 x float> %a1) {
+; CHECK-LABEL: test_vaddv_f32:
+; CHECK: faddp.2s s0, v0
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call float @llvm.aarch64.neon.faddv.f32.v2f32(<2 x float> %a1)
+ ret float %vaddv.i
+}
+
+define float @test_vaddv_v4f32(<4 x float> %a1) {
+; CHECK-LABEL: test_vaddv_v4f32:
+; CHECK: faddp.4s [[REGNUM:v[0-9]+]], v0, v0
+; CHECK: faddp.2s s0, [[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call float @llvm.aarch64.neon.faddv.f32.v4f32(<4 x float> %a1)
+ ret float %vaddv.i
+}
+
+define double @test_vaddv_f64(<2 x double> %a1) {
+; CHECK-LABEL: test_vaddv_f64:
+; CHECK: faddp.2d d0, v0
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call double @llvm.aarch64.neon.faddv.f64.v2f64(<2 x double> %a1)
+ ret double %vaddv.i
+}
+
+define i64 @test_vaddv_u64(<2 x i64> %a1) {
+; CHECK-LABEL: test_vaddv_u64:
+; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0
+; CHECK-NEXT: fmov x0, [[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a1)
+ ret i64 %vaddv.i
+}
+
+define <1 x i64> @test_vaddv_u64_to_vec(<2 x i64> %a1) {
+; CHECK-LABEL: test_vaddv_u64_to_vec:
+; CHECK: addp.2d d0, v0
+; CHECK-NOT: fmov
+; CHECK-NOT: ins
+; CHECK: ret
+entry:
+ %vaddv.i = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a1)
+ %vec = insertelement <1 x i64> undef, i64 %vaddv.i, i32 0
+ ret <1 x i64> %vec
+}
+
+define signext i8 @test_vaddvq_s8(<16 x i8> %a1) {
+; CHECK-LABEL: test_vaddvq_s8:
+; CHECK: addv.16b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a1)
+ %0 = trunc i32 %vaddv.i to i8
+ ret i8 %0
+}
+
+define signext i16 @test_vaddvq_s16(<8 x i16> %a1) {
+; CHECK-LABEL: test_vaddvq_s16:
+; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a1)
+ %0 = trunc i32 %vaddv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_s32(<4 x i32> %a1) {
+; CHECK-LABEL: test_vaddvq_s32:
+; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0
+; CHECK-NEXT: fmov w0, [[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a1)
+ ret i32 %vaddv.i
+}
+
+define zeroext i8 @test_vaddvq_u8(<16 x i8> %a1) {
+; CHECK-LABEL: test_vaddvq_u8:
+; CHECK: addv.16b b[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a1)
+ %0 = trunc i32 %vaddv.i to i8
+ ret i8 %0
+}
+
+define zeroext i16 @test_vaddvq_u16(<8 x i16> %a1) {
+; CHECK-LABEL: test_vaddvq_u16:
+; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0
+; CHECK-NEXT: fmov w0, s[[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a1)
+ %0 = trunc i32 %vaddv.i to i16
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_u32(<4 x i32> %a1) {
+; CHECK-LABEL: test_vaddvq_u32:
+; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0
+; CHECK-NEXT: fmov [[FMOVRES:w[0-9]+]], [[REGNUM]]
+; CHECK-NEXT: ret
+entry:
+ %vaddv.i = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a1)
+ ret i32 %vaddv.i
+}
+
+declare i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>)
+
+declare i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64>)
+
+declare i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32>)
+
+declare i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32>)
+
+declare i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>)
+
+declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>)
+
+declare float @llvm.aarch64.neon.faddv.f32.v2f32(<2 x float> %a1)
+declare float @llvm.aarch64.neon.faddv.f32.v4f32(<4 x float> %a1)
+declare double @llvm.aarch64.neon.faddv.f64.v2f64(<2 x double> %a1)
diff --git a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
new file mode 100644
index 000000000000..36a7bfd92520
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
@@ -0,0 +1,143 @@
+; RUN: llc -verify-machineinstrs -mtriple=arm64-linux-gnu -pre-RA-sched=linearize -enable-misched=false < %s | FileCheck %s
+
+%va_list = type {i8*, i8*, i8*, i32, i32}
+
+@var = global %va_list zeroinitializer, align 8
+
+declare void @llvm.va_start(i8*)
+
+define void @test_simple(i32 %n, ...) {
+; CHECK-LABEL: test_simple:
+; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
+; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #[[STACKSIZE]]
+
+; CHECK: adrp x[[VA_LIST_HI:[0-9]+]], var
+
+; CHECK: stp x1, x2, [sp, #[[GR_BASE:[0-9]+]]]
+; ... omit middle ones ...
+; CHECK: str x7, [sp, #
+
+; CHECK: stp q0, q1, [sp]
+; ... omit middle ones ...
+; CHECK: stp q6, q7, [sp, #
+
+; CHECK: str [[STACK_TOP]], [x[[VA_LIST_HI]], :lo12:var]
+
+; CHECK: add [[GR_TOPTMP:x[0-9]+]], sp, #[[GR_BASE]]
+; CHECK: add [[GR_TOP:x[0-9]+]], [[GR_TOPTMP]], #56
+; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
+; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
+
+; CHECK: mov [[VR_TOPTMP:x[0-9]+]], sp
+; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #128
+; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
+
+; CHECK: movn [[GR_OFFS:w[0-9]+]], #0x37
+; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24]
+
+; CHECK: orr [[VR_OFFS:w[0-9]+]], wzr, #0xffffff80
+; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_start(i8* %addr)
+
+ ret void
+}
+
+define void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
+; CHECK-LABEL: test_fewargs:
+; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
+; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #[[STACKSIZE]]
+
+; CHECK: adrp x[[VA_LIST_HI:[0-9]+]], var
+
+; CHECK: stp x3, x4, [sp, #[[GR_BASE:[0-9]+]]]
+; ... omit middle ones ...
+; CHECK: str x7, [sp, #
+
+; CHECK: stp q1, q2, [sp]
+; ... omit middle ones ...
+; CHECK: str q7, [sp, #
+
+; CHECK: str [[STACK_TOP]], [x[[VA_LIST_HI]], :lo12:var]
+
+; CHECK: add [[GR_TOPTMP:x[0-9]+]], sp, #[[GR_BASE]]
+; CHECK: add [[GR_TOP:x[0-9]+]], [[GR_TOPTMP]], #40
+; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
+; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
+
+; CHECK: mov [[VR_TOPTMP:x[0-9]+]], sp
+; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #112
+; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
+
+; CHECK: movn [[GR_OFFS:w[0-9]+]], #0x27
+; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24]
+
+; CHECK: movn [[VR_OFFS:w[0-9]+]], #0x6f
+; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_start(i8* %addr)
+
+ ret void
+}
+
+define void @test_nospare([8 x i64], [8 x float], ...) {
+; CHECK-LABEL: test_nospare:
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_start(i8* %addr)
+; CHECK-NOT: sub sp, sp
+; CHECK: mov [[STACK:x[0-9]+]], sp
+; CHECK: str [[STACK]], [{{x[0-9]+}}, :lo12:var]
+
+ ret void
+}
+
+; If there are non-variadic arguments on the stack (here two i64s) then the
+; __stack field should point just past them.
+define void @test_offsetstack([10 x i64], [3 x float], ...) {
+; CHECK-LABEL: test_offsetstack:
+; CHECK: sub sp, sp, #80
+; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #96
+; CHECK: str [[STACK_TOP]], [{{x[0-9]+}}, :lo12:var]
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_start(i8* %addr)
+ ret void
+}
+
+declare void @llvm.va_end(i8*)
+
+define void @test_va_end() nounwind {
+; CHECK-LABEL: test_va_end:
+; CHECK-NEXT: BB#0
+
+ %addr = bitcast %va_list* @var to i8*
+ call void @llvm.va_end(i8* %addr)
+
+ ret void
+; CHECK-NEXT: ret
+}
+
+declare void @llvm.va_copy(i8* %dest, i8* %src)
+
+@second_list = global %va_list zeroinitializer
+
+define void @test_va_copy() {
+; CHECK-LABEL: test_va_copy:
+ %srcaddr = bitcast %va_list* @var to i8*
+ %dstaddr = bitcast %va_list* @second_list to i8*
+ call void @llvm.va_copy(i8* %dstaddr, i8* %srcaddr)
+
+; CHECK: add x[[SRC:[0-9]+]], {{x[0-9]+}}, :lo12:var
+
+; CHECK: ldr [[BLOCK:q[0-9]+]], [x[[SRC]]]
+; CHECK: add x[[DST:[0-9]+]], {{x[0-9]+}}, :lo12:second_list
+; CHECK: str [[BLOCK]], [x[[DST]]]
+
+; CHECK: ldr [[BLOCK:q[0-9]+]], [x[[SRC]], #16]
+; CHECK: str [[BLOCK]], [x[[DST]], #16]
+ ret void
+; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/arm64-vbitwise.ll b/test/CodeGen/AArch64/arm64-vbitwise.ll
new file mode 100644
index 000000000000..93de95e52e53
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vbitwise.ll
@@ -0,0 +1,91 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @rbit_8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: rbit_8b:
+;CHECK: rbit.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8> %tmp1)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @rbit_16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: rbit_16b:
+;CHECK: rbit.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8> %tmp1)
+ ret <16 x i8> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8>) nounwind readnone
+
+define <8 x i16> @sxtl8h(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: sxtl8h:
+;CHECK: sshll.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
+ ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @uxtl8h(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: uxtl8h:
+;CHECK: ushll.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
+ ret <8 x i16> %tmp2
+}
+
+define <4 x i32> @sxtl4s(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: sxtl4s:
+;CHECK: sshll.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
+ ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @uxtl4s(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: uxtl4s:
+;CHECK: ushll.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @sxtl2d(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: sxtl2d:
+;CHECK: sshll.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
+ ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @uxtl2d(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: uxtl2d:
+;CHECK: ushll.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
+ ret <2 x i64> %tmp2
+}
+
+; Check for incorrect use of vector bic.
+; rdar://11553859
+define void @test_vsliq(i8* nocapture %src, i8* nocapture %dest) nounwind noinline ssp {
+entry:
+; CHECK-LABEL: test_vsliq:
+; CHECK-NOT: bic
+; CHECK: movi.2d [[REG1:v[0-9]+]], #0x0000ff000000ff
+; CHECK: and.16b v{{[0-9]+}}, v{{[0-9]+}}, [[REG1]]
+ %0 = bitcast i8* %src to <16 x i8>*
+ %1 = load <16 x i8>* %0, align 16
+ %and.i = and <16 x i8> %1, <i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0>
+ %2 = bitcast <16 x i8> %and.i to <8 x i16>
+ %vshl_n = shl <8 x i16> %2, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %3 = or <8 x i16> %2, %vshl_n
+ %4 = bitcast <8 x i16> %3 to <4 x i32>
+ %vshl_n8 = shl <4 x i32> %4, <i32 16, i32 16, i32 16, i32 16>
+ %5 = or <4 x i32> %4, %vshl_n8
+ %6 = bitcast <4 x i32> %5 to <16 x i8>
+ %7 = bitcast i8* %dest to <16 x i8>*
+ store <16 x i8> %6, <16 x i8>* %7, align 16
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-vclz.ll b/test/CodeGen/AArch64/arm64-vclz.ll
new file mode 100644
index 000000000000..cf5670a0354f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vclz.ll
@@ -0,0 +1,109 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+define <8 x i8> @test_vclz_u8(<8 x i8> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclz_u8:
+ ; CHECK: clz.8b v0, v0
+ ; CHECK-NEXT: ret
+ %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind
+ ret <8 x i8> %vclz.i
+}
+
+define <8 x i8> @test_vclz_s8(<8 x i8> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclz_s8:
+ ; CHECK: clz.8b v0, v0
+ ; CHECK-NEXT: ret
+ %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) nounwind
+ ret <8 x i8> %vclz.i
+}
+
+define <4 x i16> @test_vclz_u16(<4 x i16> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclz_u16:
+ ; CHECK: clz.4h v0, v0
+ ; CHECK-NEXT: ret
+ %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind
+ ret <4 x i16> %vclz1.i
+}
+
+define <4 x i16> @test_vclz_s16(<4 x i16> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclz_s16:
+ ; CHECK: clz.4h v0, v0
+ ; CHECK-NEXT: ret
+ %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) nounwind
+ ret <4 x i16> %vclz1.i
+}
+
+define <2 x i32> @test_vclz_u32(<2 x i32> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclz_u32:
+ ; CHECK: clz.2s v0, v0
+ ; CHECK-NEXT: ret
+ %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind
+ ret <2 x i32> %vclz1.i
+}
+
+define <2 x i32> @test_vclz_s32(<2 x i32> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclz_s32:
+ ; CHECK: clz.2s v0, v0
+ ; CHECK-NEXT: ret
+ %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) nounwind
+ ret <2 x i32> %vclz1.i
+}
+
+define <16 x i8> @test_vclzq_u8(<16 x i8> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclzq_u8:
+ ; CHECK: clz.16b v0, v0
+ ; CHECK-NEXT: ret
+ %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind
+ ret <16 x i8> %vclz.i
+}
+
+define <16 x i8> @test_vclzq_s8(<16 x i8> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclzq_s8:
+ ; CHECK: clz.16b v0, v0
+ ; CHECK-NEXT: ret
+ %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) nounwind
+ ret <16 x i8> %vclz.i
+}
+
+define <8 x i16> @test_vclzq_u16(<8 x i16> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclzq_u16:
+ ; CHECK: clz.8h v0, v0
+ ; CHECK-NEXT: ret
+ %vclz1.i = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) nounwind
+ ret <8 x i16> %vclz1.i
+}
+
+define <8 x i16> @test_vclzq_s16(<8 x i16> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclzq_s16:
+ ; CHECK: clz.8h v0, v0
+ ; CHECK-NEXT: ret
+ %vclz1.i = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) nounwind
+ ret <8 x i16> %vclz1.i
+}
+
+define <4 x i32> @test_vclzq_u32(<4 x i32> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclzq_u32:
+ ; CHECK: clz.4s v0, v0
+ ; CHECK-NEXT: ret
+ %vclz1.i = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) nounwind
+ ret <4 x i32> %vclz1.i
+}
+
+define <4 x i32> @test_vclzq_s32(<4 x i32> %a) nounwind readnone ssp {
+ ; CHECK-LABEL: test_vclzq_s32:
+ ; CHECK: clz.4s v0, v0
+ ; CHECK-NEXT: ret
+ %vclz1.i = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) nounwind
+ ret <4 x i32> %vclz1.i
+}
+
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
+
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) nounwind readnone
+
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) nounwind readnone
+
+declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone
+
+declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>, i1) nounwind readnone
+
+declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>, i1) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vcmp.ll b/test/CodeGen/AArch64/arm64-vcmp.ll
new file mode 100644
index 000000000000..982ab09ee69e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcmp.ll
@@ -0,0 +1,236 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+
+define void @fcmltz_4s(<4 x float> %a, <4 x i16>* %p) nounwind {
+;CHECK-LABEL: fcmltz_4s:
+;CHECK: fcmlt.4s [[REG:v[0-9]+]], v0, #0
+;CHECK-NEXT: xtn.4h v[[REG_1:[0-9]+]], [[REG]]
+;CHECK-NEXT: str d[[REG_1]], [x0]
+;CHECK-NEXT: ret
+ %tmp = fcmp olt <4 x float> %a, zeroinitializer
+ %tmp2 = sext <4 x i1> %tmp to <4 x i16>
+ store <4 x i16> %tmp2, <4 x i16>* %p, align 8
+ ret void
+}
+
+define <2 x i32> @facge_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: facge_2s:
+;CHECK: facge.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.facge.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @facge_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: facge_4s:
+;CHECK: facge.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.facge.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @facge_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: facge_2d:
+;CHECK: facge.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.facge.v2i64.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.facge.v2i32.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.facge.v4i32.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.facge.v2i64.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <2 x i32> @facgt_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: facgt_2s:
+;CHECK: facgt.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.facgt.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @facgt_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: facgt_4s:
+;CHECK: facgt.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.facgt.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @facgt_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: facgt_2d:
+;CHECK: facgt.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.facgt.v2i64.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.facgt.v2i32.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.facgt.v4i32.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.facgt.v2i64.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define i32 @facge_s(float %A, float %B) nounwind {
+; CHECK-LABEL: facge_s:
+; CHECK: facge {{s[0-9]+}}, s0, s1
+ %mask = call i32 @llvm.aarch64.neon.facge.i32.f32(float %A, float %B)
+ ret i32 %mask
+}
+
+define i64 @facge_d(double %A, double %B) nounwind {
+; CHECK-LABEL: facge_d:
+; CHECK: facge {{d[0-9]+}}, d0, d1
+ %mask = call i64 @llvm.aarch64.neon.facge.i64.f64(double %A, double %B)
+ ret i64 %mask
+}
+
+declare i64 @llvm.aarch64.neon.facge.i64.f64(double, double)
+declare i32 @llvm.aarch64.neon.facge.i32.f32(float, float)
+
+define i32 @facgt_s(float %A, float %B) nounwind {
+; CHECK-LABEL: facgt_s:
+; CHECK: facgt {{s[0-9]+}}, s0, s1
+ %mask = call i32 @llvm.aarch64.neon.facgt.i32.f32(float %A, float %B)
+ ret i32 %mask
+}
+
+define i64 @facgt_d(double %A, double %B) nounwind {
+; CHECK-LABEL: facgt_d:
+; CHECK: facgt {{d[0-9]+}}, d0, d1
+ %mask = call i64 @llvm.aarch64.neon.facgt.i64.f64(double %A, double %B)
+ ret i64 %mask
+}
+
+declare i64 @llvm.aarch64.neon.facgt.i64.f64(double, double)
+declare i32 @llvm.aarch64.neon.facgt.i32.f32(float, float)
+
+define <8 x i8> @cmtst_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: cmtst_8b:
+;CHECK: cmtst.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %commonbits = and <8 x i8> %tmp1, %tmp2
+ %mask = icmp ne <8 x i8> %commonbits, zeroinitializer
+ %res = sext <8 x i1> %mask to <8 x i8>
+ ret <8 x i8> %res
+}
+
+define <16 x i8> @cmtst_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: cmtst_16b:
+;CHECK: cmtst.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %commonbits = and <16 x i8> %tmp1, %tmp2
+ %mask = icmp ne <16 x i8> %commonbits, zeroinitializer
+ %res = sext <16 x i1> %mask to <16 x i8>
+ ret <16 x i8> %res
+}
+
+define <4 x i16> @cmtst_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: cmtst_4h:
+;CHECK: cmtst.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %commonbits = and <4 x i16> %tmp1, %tmp2
+ %mask = icmp ne <4 x i16> %commonbits, zeroinitializer
+ %res = sext <4 x i1> %mask to <4 x i16>
+ ret <4 x i16> %res
+}
+
+define <8 x i16> @cmtst_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: cmtst_8h:
+;CHECK: cmtst.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %commonbits = and <8 x i16> %tmp1, %tmp2
+ %mask = icmp ne <8 x i16> %commonbits, zeroinitializer
+ %res = sext <8 x i1> %mask to <8 x i16>
+ ret <8 x i16> %res
+}
+
+define <2 x i32> @cmtst_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: cmtst_2s:
+;CHECK: cmtst.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %commonbits = and <2 x i32> %tmp1, %tmp2
+ %mask = icmp ne <2 x i32> %commonbits, zeroinitializer
+ %res = sext <2 x i1> %mask to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define <4 x i32> @cmtst_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: cmtst_4s:
+;CHECK: cmtst.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %commonbits = and <4 x i32> %tmp1, %tmp2
+ %mask = icmp ne <4 x i32> %commonbits, zeroinitializer
+ %res = sext <4 x i1> %mask to <4 x i32>
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @cmtst_2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: cmtst_2d:
+;CHECK: cmtst.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %commonbits = and <2 x i64> %tmp1, %tmp2
+ %mask = icmp ne <2 x i64> %commonbits, zeroinitializer
+ %res = sext <2 x i1> %mask to <2 x i64>
+ ret <2 x i64> %res
+}
+
+define <1 x i64> @fcmeq_d(<1 x double> %A, <1 x double> %B) nounwind {
+; CHECK-LABEL: fcmeq_d:
+; CHECK: fcmeq {{d[0-9]+}}, d0, d1
+ %tst = fcmp oeq <1 x double> %A, %B
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmge_d(<1 x double> %A, <1 x double> %B) nounwind {
+; CHECK-LABEL: fcmge_d:
+; CHECK: fcmge {{d[0-9]+}}, d0, d1
+ %tst = fcmp oge <1 x double> %A, %B
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmle_d(<1 x double> %A, <1 x double> %B) nounwind {
+; CHECK-LABEL: fcmle_d:
+; CHECK: fcmge {{d[0-9]+}}, d1, d0
+ %tst = fcmp ole <1 x double> %A, %B
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmgt_d(<1 x double> %A, <1 x double> %B) nounwind {
+; CHECK-LABEL: fcmgt_d:
+; CHECK: fcmgt {{d[0-9]+}}, d0, d1
+ %tst = fcmp ogt <1 x double> %A, %B
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @fcmlt_d(<1 x double> %A, <1 x double> %B) nounwind {
+; CHECK-LABEL: fcmlt_d:
+; CHECK: fcmgt {{d[0-9]+}}, d1, d0
+ %tst = fcmp olt <1 x double> %A, %B
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
+
+define <1 x i64> @cmnez_d(<1 x i64> %A) nounwind {
+; CHECK-LABEL: cmnez_d:
+; CHECK: cmeq d[[EQ:[0-9]+]], d0, #0
+; CHECK: mvn.8b v0, v[[EQ]]
+ %tst = icmp ne <1 x i64> %A, zeroinitializer
+ %mask = sext <1 x i1> %tst to <1 x i64>
+ ret <1 x i64> %mask
+}
diff --git a/test/CodeGen/AArch64/arm64-vcnt.ll b/test/CodeGen/AArch64/arm64-vcnt.ll
new file mode 100644
index 000000000000..903501ec16a9
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcnt.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @cls_8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: cls_8b:
+;CHECK: cls.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8> %tmp1)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @cls_16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: cls_16b:
+;CHECK: cls.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8> %tmp1)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @cls_4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: cls_4h:
+;CHECK: cls.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16> %tmp1)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @cls_8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: cls_8h:
+;CHECK: cls.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16> %tmp1)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @cls_2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: cls_2s:
+;CHECK: cls.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32> %tmp1)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @cls_4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: cls_4s:
+;CHECK: cls.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.cls.v4i32(<4 x i32> %tmp1)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.cls.v4i32(<4 x i32>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vcombine.ll b/test/CodeGen/AArch64/arm64-vcombine.ll
new file mode 100644
index 000000000000..fa1299603af3
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcombine.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+; LowerCONCAT_VECTORS() was reversing the order of two parts.
+; rdar://11558157
+; rdar://11559553
+define <16 x i8> @test(<16 x i8> %q0, <16 x i8> %q1, i8* nocapture %dest) nounwind {
+entry:
+; CHECK-LABEL: test:
+; CHECK: ins.d v0[1], v1[0]
+ %0 = bitcast <16 x i8> %q0 to <2 x i64>
+ %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> zeroinitializer
+ %1 = bitcast <16 x i8> %q1 to <2 x i64>
+ %shuffle.i4 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32> zeroinitializer
+ %shuffle.i3 = shufflevector <1 x i64> %shuffle.i, <1 x i64> %shuffle.i4, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i3 to <16 x i8>
+ ret <16 x i8> %2
+}
diff --git a/test/CodeGen/AArch64/arm64-vcvt.ll b/test/CodeGen/AArch64/arm64-vcvt.ll
new file mode 100644
index 000000000000..6570f0e3e7eb
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcvt.ll
@@ -0,0 +1,686 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtas_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtas.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.fcvtas.v2i32.v2f32(<2 x float> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtas_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtas_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtas.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float> %A)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtas_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtas_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtas.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %A)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtas.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtau_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtau_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtau.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.fcvtau.v2i32.v2f32(<2 x float> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtau_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtau_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtau.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float> %A)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtau_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtau_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtau.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %A)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtau.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtms_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtms_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtms.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.fcvtms.v2i32.v2f32(<2 x float> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtms_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtms_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtms.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float> %A)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtms_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtms_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtms.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %A)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtms.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtmu_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtmu_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtmu.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.fcvtmu.v2i32.v2f32(<2 x float> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtmu_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtmu_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtmu.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float> %A)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtmu_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtmu_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtmu.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %A)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtmu.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtps_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtps_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtps.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.fcvtps.v2i32.v2f32(<2 x float> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtps_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtps_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtps.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float> %A)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtps_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtps_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtps.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %A)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtps.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtpu_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtpu_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtpu.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.fcvtpu.v2i32.v2f32(<2 x float> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtpu_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtpu_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtpu.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float> %A)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtpu_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtpu_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtpu.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtpu.v2i64.v2f64(<2 x double> %A)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtpu.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.fcvtpu.v2i64.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtns_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtns_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtns.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.fcvtns.v2i32.v2f32(<2 x float> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtns_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtns_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtns.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtns.v4i32.v4f32(<4 x float> %A)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtns_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtns_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtns.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtns.v2i64.v2f64(<2 x double> %A)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtns.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.fcvtns.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.fcvtns.v2i64.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtnu_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtnu_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtnu.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.fcvtnu.v2i32.v2f32(<2 x float> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtnu_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtnu_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtnu.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.fcvtnu.v4i32.v4f32(<4 x float> %A)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtnu_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtnu_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtnu.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtnu.v2i64.v2f64(<2 x double> %A)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtnu.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.fcvtnu.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.fcvtnu.v2i64.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtzs_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtzs_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtzs.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = fptosi <2 x float> %A to <2 x i32>
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtzs_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtzs_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtzs.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = fptosi <4 x float> %A to <4 x i32>
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtzs_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtzs_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtzs.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = fptosi <2 x double> %A to <2 x i64>
+ ret <2 x i64> %tmp3
+}
+
+
+define <2 x i32> @fcvtzu_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtzu_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtzu.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = fptoui <2 x float> %A to <2 x i32>
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtzu_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtzu_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtzu.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = fptoui <4 x float> %A to <4 x i32>
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtzu_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtzu_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtzu.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = fptoui <2 x double> %A to <2 x i64>
+ ret <2 x i64> %tmp3
+}
+
+define <2 x float> @frinta_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: frinta_2s:
+;CHECK-NOT: ld1
+;CHECK: frinta.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frinta_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: frinta_4s:
+;CHECK-NOT: ld1
+;CHECK: frinta.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frinta_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: frinta_2d:
+;CHECK-NOT: ld1
+;CHECK: frinta.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.round.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.round.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.round.v2f64(<2 x double>) nounwind readnone
+
+define <2 x float> @frinti_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: frinti_2s:
+;CHECK-NOT: ld1
+;CHECK: frinti.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %A)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frinti_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: frinti_4s:
+;CHECK-NOT: ld1
+;CHECK: frinti.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %A)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frinti_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: frinti_2d:
+;CHECK-NOT: ld1
+;CHECK: frinti.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %A)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.nearbyint.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) nounwind readnone
+
+define <2 x float> @frintm_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: frintm_2s:
+;CHECK-NOT: ld1
+;CHECK: frintm.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frintm_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: frintm_4s:
+;CHECK-NOT: ld1
+;CHECK: frintm.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frintm_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: frintm_2d:
+;CHECK-NOT: ld1
+;CHECK: frintm.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.floor.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.floor.v2f64(<2 x double>) nounwind readnone
+
+define <2 x float> @frintn_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: frintn_2s:
+;CHECK-NOT: ld1
+;CHECK: frintn.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.frintn.v2f32(<2 x float> %A)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frintn_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: frintn_4s:
+;CHECK-NOT: ld1
+;CHECK: frintn.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float> %A)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frintn_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: frintn_2d:
+;CHECK-NOT: ld1
+;CHECK: frintn.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double> %A)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.frintn.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double>) nounwind readnone
+
+define <2 x float> @frintp_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: frintp_2s:
+;CHECK-NOT: ld1
+;CHECK: frintp.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frintp_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: frintp_4s:
+;CHECK-NOT: ld1
+;CHECK: frintp.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frintp_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: frintp_2d:
+;CHECK-NOT: ld1
+;CHECK: frintp.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.ceil.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>) nounwind readnone
+
+define <2 x float> @frintx_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: frintx_2s:
+;CHECK-NOT: ld1
+;CHECK: frintx.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.rint.v2f32(<2 x float> %A)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frintx_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: frintx_4s:
+;CHECK-NOT: ld1
+;CHECK: frintx.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.rint.v4f32(<4 x float> %A)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frintx_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: frintx_2d:
+;CHECK-NOT: ld1
+;CHECK: frintx.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.rint.v2f64(<2 x double> %A)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.rint.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.rint.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.rint.v2f64(<2 x double>) nounwind readnone
+
+define <2 x float> @frintz_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: frintz_2s:
+;CHECK-NOT: ld1
+;CHECK: frintz.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frintz_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: frintz_4s:
+;CHECK-NOT: ld1
+;CHECK: frintz.4s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frintz_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: frintz_2d:
+;CHECK-NOT: ld1
+;CHECK: frintz.2d v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.trunc.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.trunc.v2f64(<2 x double>) nounwind readnone
+
+define <2 x float> @fcvtxn_2s(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtxn_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtxn v0.2s, v0.2d
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %A)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fcvtxn_4s(<2 x float> %ret, <2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtxn_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtxn2 v0.4s, v1.2d
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %A)
+ %res = shufflevector <2 x float> %ret, <2 x float> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x float> %res
+}
+
+declare <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double>) nounwind readnone
+
+define <2 x i32> @fcvtzsc_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtzsc_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtzs.2s v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float> %A, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtzsc_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtzsc_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtzs.4s v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> %A, i32 1)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtzsc_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtzsc_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtzs.2d v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double> %A, i32 1)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float>, i32) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float>, i32) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double>, i32) nounwind readnone
+
+define <2 x i32> @fcvtzuc_2s(<2 x float> %A) nounwind {
+;CHECK-LABEL: fcvtzuc_2s:
+;CHECK-NOT: ld1
+;CHECK: fcvtzu.2s v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float> %A, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @fcvtzuc_4s(<4 x float> %A) nounwind {
+;CHECK-LABEL: fcvtzuc_4s:
+;CHECK-NOT: ld1
+;CHECK: fcvtzu.4s v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> %A, i32 1)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @fcvtzuc_2d(<2 x double> %A) nounwind {
+;CHECK-LABEL: fcvtzuc_2d:
+;CHECK-NOT: ld1
+;CHECK: fcvtzu.2d v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double> %A, i32 1)
+ ret <2 x i64> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float>, i32) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double>, i32) nounwind readnone
+
+define <2 x float> @scvtf_2sc(<2 x i32> %A) nounwind {
+;CHECK-LABEL: scvtf_2sc:
+;CHECK-NOT: ld1
+;CHECK: scvtf.2s v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %A, i32 1)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @scvtf_4sc(<4 x i32> %A) nounwind {
+;CHECK-LABEL: scvtf_4sc:
+;CHECK-NOT: ld1
+;CHECK: scvtf.4s v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %A, i32 1)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @scvtf_2dc(<2 x i64> %A) nounwind {
+;CHECK-LABEL: scvtf_2dc:
+;CHECK-NOT: ld1
+;CHECK: scvtf.2d v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %A, i32 1)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
+
+define <2 x float> @ucvtf_2sc(<2 x i32> %A) nounwind {
+;CHECK-LABEL: ucvtf_2sc:
+;CHECK-NOT: ld1
+;CHECK: ucvtf.2s v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %A, i32 1)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @ucvtf_4sc(<4 x i32> %A) nounwind {
+;CHECK-LABEL: ucvtf_4sc:
+;CHECK-NOT: ld1
+;CHECK: ucvtf.4s v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %A, i32 1)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @ucvtf_2dc(<2 x i64> %A) nounwind {
+;CHECK-LABEL: ucvtf_2dc:
+;CHECK-NOT: ld1
+;CHECK: ucvtf.2d v0, v0, #1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %A, i32 1)
+ ret <2 x double> %tmp3
+}
+
+
+;CHECK-LABEL: autogen_SD28458:
+;CHECK: fcvt
+;CHECK: ret
+define void @autogen_SD28458(<8 x double> %val.f64, <8 x float>* %addr.f32) {
+ %Tr53 = fptrunc <8 x double> %val.f64 to <8 x float>
+ store <8 x float> %Tr53, <8 x float>* %addr.f32
+ ret void
+}
+
+;CHECK-LABEL: autogen_SD19225:
+;CHECK: fcvt
+;CHECK: ret
+define void @autogen_SD19225(<8 x double>* %addr.f64, <8 x float>* %addr.f32) {
+ %A = load <8 x float>* %addr.f32
+ %Tr53 = fpext <8 x float> %A to <8 x double>
+ store <8 x double> %Tr53, <8 x double>* %addr.f64
+ ret void
+}
+
+declare <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vcvt_f.ll b/test/CodeGen/AArch64/arm64-vcvt_f.ll
new file mode 100644
index 000000000000..1f393c21a1a1
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcvt_f.ll
@@ -0,0 +1,82 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+; RUN: llc < %s -O0 -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x double> @test_vcvt_f64_f32(<2 x float> %x) nounwind readnone ssp {
+; CHECK-LABEL: test_vcvt_f64_f32:
+ %vcvt1.i = fpext <2 x float> %x to <2 x double>
+; CHECK: fcvtl v0.2d, v0.2s
+ ret <2 x double> %vcvt1.i
+; CHECK: ret
+}
+
+define <2 x double> @test_vcvt_high_f64_f32(<4 x float> %x) nounwind readnone ssp {
+; CHECK-LABEL: test_vcvt_high_f64_f32:
+ %cvt_in = shufflevector <4 x float> %x, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+ %vcvt1.i = fpext <2 x float> %cvt_in to <2 x double>
+; CHECK: fcvtl2 v0.2d, v0.4s
+ ret <2 x double> %vcvt1.i
+; CHECK: ret
+}
+
+define <2 x float> @test_vcvt_f32_f64(<2 x double> %v) nounwind readnone ssp {
+; CHECK-LABEL: test_vcvt_f32_f64:
+ %vcvt1.i = fptrunc <2 x double> %v to <2 x float>
+; CHECK: fcvtn
+ ret <2 x float> %vcvt1.i
+; CHECK: ret
+}
+
+define <4 x float> @test_vcvt_high_f32_f64(<2 x float> %x, <2 x double> %v) nounwind readnone ssp {
+; CHECK-LABEL: test_vcvt_high_f32_f64:
+
+ %cvt = fptrunc <2 x double> %v to <2 x float>
+ %vcvt2.i = shufflevector <2 x float> %x, <2 x float> %cvt, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK: fcvtn2
+ ret <4 x float> %vcvt2.i
+; CHECK: ret
+}
+
+define <2 x float> @test_vcvtx_f32_f64(<2 x double> %v) nounwind readnone ssp {
+; CHECK-LABEL: test_vcvtx_f32_f64:
+ %vcvtx1.i = tail call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %v) nounwind
+; CHECK: fcvtxn
+ ret <2 x float> %vcvtx1.i
+; CHECK: ret
+}
+
+define <4 x float> @test_vcvtx_high_f32_f64(<2 x float> %x, <2 x double> %v) nounwind readnone ssp {
+; CHECK-LABEL: test_vcvtx_high_f32_f64:
+ %vcvtx2.i = tail call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %v) nounwind
+ %res = shufflevector <2 x float> %x, <2 x float> %vcvtx2.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK: fcvtxn2
+ ret <4 x float> %res
+; CHECK: ret
+}
+
+
+declare <2 x double> @llvm.aarch64.neon.vcvthighfp2df(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.vcvtfp2df(<2 x float>) nounwind readnone
+
+declare <2 x float> @llvm.aarch64.neon.vcvtdf2fp(<2 x double>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.vcvthighdf2fp(<2 x float>, <2 x double>) nounwind readnone
+
+declare <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double>) nounwind readnone
+
+define i16 @to_half(float %in) {
+; CHECK-LABEL: to_half:
+; CHECK: fcvt h[[HALFVAL:[0-9]+]], s0
+; CHECK: fmov {{w[0-9]+}}, {{s[0-9]+}}
+ %res = call i16 @llvm.convert.to.fp16.f32(float %in)
+ ret i16 %res
+}
+
+define float @from_half(i16 %in) {
+; CHECK-LABEL: from_half:
+; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
+; CHECK: fcvt s0, {{h[0-9]+}}
+ %res = call float @llvm.convert.from.fp16.f32(i16 %in)
+ ret float %res
+}
+
+declare float @llvm.convert.from.fp16.f32(i16) #1
+declare i16 @llvm.convert.to.fp16.f32(float) #1
diff --git a/test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll b/test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll
new file mode 100644
index 000000000000..1eb7b43d5755
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll
@@ -0,0 +1,73 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x float> @ucvt(<2 x i32> %a) nounwind readnone ssp {
+; CHECK-LABEL: ucvt:
+; CHECK: ucvtf.2s v0, v0
+; CHECK: ret
+
+ %vcvt.i = uitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %vcvt.i
+}
+
+define <2 x float> @scvt(<2 x i32> %a) nounwind readnone ssp {
+; CHECK-LABEL: scvt:
+; CHECK: scvtf.2s v0, v0
+; CHECK: ret
+ %vcvt.i = sitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %vcvt.i
+}
+
+define <4 x float> @ucvtq(<4 x i32> %a) nounwind readnone ssp {
+; CHECK-LABEL: ucvtq:
+; CHECK: ucvtf.4s v0, v0
+; CHECK: ret
+ %vcvt.i = uitofp <4 x i32> %a to <4 x float>
+ ret <4 x float> %vcvt.i
+}
+
+define <4 x float> @scvtq(<4 x i32> %a) nounwind readnone ssp {
+; CHECK-LABEL: scvtq:
+; CHECK: scvtf.4s v0, v0
+; CHECK: ret
+ %vcvt.i = sitofp <4 x i32> %a to <4 x float>
+ ret <4 x float> %vcvt.i
+}
+
+define <4 x float> @cvtf16(<4 x i16> %a) nounwind readnone ssp {
+; CHECK-LABEL: cvtf16:
+; CHECK: fcvtl v0.4s, v0.4h
+; CHECK-NEXT: ret
+ %vcvt1.i = tail call <4 x float> @llvm.aarch64.neon.vcvthf2fp(<4 x i16> %a) nounwind
+ ret <4 x float> %vcvt1.i
+}
+
+define <4 x float> @cvtf16_high(<8 x i16> %a) nounwind readnone ssp {
+; CHECK-LABEL: cvtf16_high:
+; CHECK: fcvtl2 v0.4s, v0.8h
+; CHECK-NEXT: ret
+ %in = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vcvt1.i = tail call <4 x float> @llvm.aarch64.neon.vcvthf2fp(<4 x i16> %in) nounwind
+ ret <4 x float> %vcvt1.i
+}
+
+
+
+define <4 x i16> @cvtf16f32(<4 x float> %a) nounwind readnone ssp {
+; CHECK-LABEL: cvtf16f32:
+; CHECK: fcvtn v0.4h, v0.4s
+; CHECK-NEXT: ret
+ %vcvt1.i = tail call <4 x i16> @llvm.aarch64.neon.vcvtfp2hf(<4 x float> %a) nounwind
+ ret <4 x i16> %vcvt1.i
+}
+
+define <8 x i16> @cvtf16f32_high(<4 x i16> %low, <4 x float> %high_big) {
+; CHECK-LABEL: cvtf16f32_high:
+; CHECK: fcvtn2 v0.8h, v1.4s
+; CHECK-NEXT: ret
+ %high = call <4 x i16> @llvm.aarch64.neon.vcvtfp2hf(<4 x float> %high_big)
+ %res = shufflevector <4 x i16> %low, <4 x i16> %high, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+declare <4 x float> @llvm.aarch64.neon.vcvthf2fp(<4 x i16>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.vcvtfp2hf(<4 x float>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vcvt_n.ll b/test/CodeGen/AArch64/arm64-vcvt_n.ll
new file mode 100644
index 000000000000..7ed5be6e8af9
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcvt_n.ll
@@ -0,0 +1,49 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x float> @cvtf32fxpu(<2 x i32> %a) nounwind readnone ssp {
+; CHECK-LABEL: cvtf32fxpu:
+; CHECK: ucvtf.2s v0, v0, #9
+; CHECK: ret
+ %vcvt_n1 = tail call <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %a, i32 9)
+ ret <2 x float> %vcvt_n1
+}
+
+define <2 x float> @cvtf32fxps(<2 x i32> %a) nounwind readnone ssp {
+; CHECK-LABEL: cvtf32fxps:
+; CHECK: scvtf.2s v0, v0, #12
+; CHECK: ret
+ %vcvt_n1 = tail call <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %a, i32 12)
+ ret <2 x float> %vcvt_n1
+}
+
+define <4 x float> @cvtqf32fxpu(<4 x i32> %a) nounwind readnone ssp {
+; CHECK-LABEL: cvtqf32fxpu:
+; CHECK: ucvtf.4s v0, v0, #18
+; CHECK: ret
+ %vcvt_n1 = tail call <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %a, i32 18)
+ ret <4 x float> %vcvt_n1
+}
+
+define <4 x float> @cvtqf32fxps(<4 x i32> %a) nounwind readnone ssp {
+; CHECK-LABEL: cvtqf32fxps:
+; CHECK: scvtf.4s v0, v0, #30
+; CHECK: ret
+ %vcvt_n1 = tail call <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %a, i32 30)
+ ret <4 x float> %vcvt_n1
+}
+define <2 x double> @f1(<2 x i64> %a) nounwind readnone ssp {
+ %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 12)
+ ret <2 x double> %vcvt_n1
+}
+
+define <2 x double> @f2(<2 x i64> %a) nounwind readnone ssp {
+ %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 9)
+ ret <2 x double> %vcvt_n1
+}
+
+declare <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
+declare <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
+declare <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll b/test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll
new file mode 100644
index 000000000000..985a5f762439
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x i32> @c1(<2 x float> %a) nounwind readnone ssp {
+; CHECK: c1
+; CHECK: fcvtzs.2s v0, v0
+; CHECK: ret
+ %vcvt.i = fptosi <2 x float> %a to <2 x i32>
+ ret <2 x i32> %vcvt.i
+}
+
+define <2 x i32> @c2(<2 x float> %a) nounwind readnone ssp {
+; CHECK: c2
+; CHECK: fcvtzu.2s v0, v0
+; CHECK: ret
+ %vcvt.i = fptoui <2 x float> %a to <2 x i32>
+ ret <2 x i32> %vcvt.i
+}
+
+define <4 x i32> @c3(<4 x float> %a) nounwind readnone ssp {
+; CHECK: c3
+; CHECK: fcvtzs.4s v0, v0
+; CHECK: ret
+ %vcvt.i = fptosi <4 x float> %a to <4 x i32>
+ ret <4 x i32> %vcvt.i
+}
+
+define <4 x i32> @c4(<4 x float> %a) nounwind readnone ssp {
+; CHECK: c4
+; CHECK: fcvtzu.4s v0, v0
+; CHECK: ret
+ %vcvt.i = fptoui <4 x float> %a to <4 x i32>
+ ret <4 x i32> %vcvt.i
+}
+
diff --git a/test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll b/test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll
new file mode 100644
index 000000000000..b29c22cbfda5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define float @fcvtxn(double %a) {
+; CHECK-LABEL: fcvtxn:
+; CHECK: fcvtxn s0, d0
+; CHECK-NEXT: ret
+ %vcvtxd.i = tail call float @llvm.aarch64.sisd.fcvtxn(double %a) nounwind
+ ret float %vcvtxd.i
+}
+
+declare float @llvm.aarch64.sisd.fcvtxn(double) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vecCmpBr.ll b/test/CodeGen/AArch64/arm64-vecCmpBr.ll
new file mode 100644
index 000000000000..c7321e4b7d07
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vecCmpBr.ll
@@ -0,0 +1,207 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s
+; ModuleID = 'arm64_vecCmpBr.c'
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+target triple = "arm64-apple-ios3.0.0"
+
+
+define i32 @anyZero64(<4 x i16> %a) #0 {
+; CHECK: _anyZero64:
+; CHECK: uminv.8b b[[REGNO1:[0-9]+]], v0
+; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
+; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]]
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: b _bar
+entry:
+ %0 = bitcast <4 x i16> %a to <8 x i8>
+ %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %0) #3
+ %1 = trunc i32 %vminv.i to i8
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %if.then, label %return
+
+if.then: ; preds = %entry
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+declare i32 @bar(...) #1
+
+define i32 @anyZero128(<8 x i16> %a) #0 {
+; CHECK: _anyZero128:
+; CHECK: uminv.16b b[[REGNO1:[0-9]+]], v0
+; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
+; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]]
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: b _bar
+
+entry:
+ %0 = bitcast <8 x i16> %a to <16 x i8>
+ %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %0) #3
+ %1 = trunc i32 %vminv.i to i8
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %if.then, label %return
+
+if.then: ; preds = %entry
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @anyNonZero64(<4 x i16> %a) #0 {
+; CHECK: _anyNonZero64:
+; CHECK: umaxv.8b b[[REGNO1:[0-9]+]], v0
+; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
+; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]]
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: movz w0, #0
+
+entry:
+ %0 = bitcast <4 x i16> %a to <8 x i8>
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %0) #3
+ %1 = trunc i32 %vmaxv.i to i8
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then: ; preds = %entry
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @anyNonZero128(<8 x i16> %a) #0 {
+; CHECK: _anyNonZero128:
+; CHECK: umaxv.16b b[[REGNO1:[0-9]+]], v0
+; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
+; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]]
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: movz w0, #0
+entry:
+ %0 = bitcast <8 x i16> %a to <16 x i8>
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %0) #3
+ %1 = trunc i32 %vmaxv.i to i8
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then: ; preds = %entry
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @allZero64(<4 x i16> %a) #0 {
+; CHECK: _allZero64:
+; CHECK: umaxv.8b b[[REGNO1:[0-9]+]], v0
+; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
+; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]]
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: b _bar
+entry:
+ %0 = bitcast <4 x i16> %a to <8 x i8>
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %0) #3
+ %1 = trunc i32 %vmaxv.i to i8
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %if.then, label %return
+
+if.then: ; preds = %entry
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @allZero128(<8 x i16> %a) #0 {
+; CHECK: _allZero128:
+; CHECK: umaxv.16b b[[REGNO1:[0-9]+]], v0
+; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
+; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]]
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: b _bar
+entry:
+ %0 = bitcast <8 x i16> %a to <16 x i8>
+ %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %0) #3
+ %1 = trunc i32 %vmaxv.i to i8
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %if.then, label %return
+
+if.then: ; preds = %entry
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @allNonZero64(<4 x i16> %a) #0 {
+; CHECK: _allNonZero64:
+; CHECK: uminv.8b b[[REGNO1:[0-9]+]], v0
+; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
+; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]]
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: movz w0, #0
+entry:
+ %0 = bitcast <4 x i16> %a to <8 x i8>
+ %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %0) #3
+ %1 = trunc i32 %vminv.i to i8
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then: ; preds = %entry
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+define i32 @allNonZero128(<8 x i16> %a) #0 {
+; CHECK: _allNonZero128:
+; CHECK: uminv.16b b[[REGNO1:[0-9]+]], v0
+; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
+; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]]
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: movz w0, #0
+entry:
+ %0 = bitcast <8 x i16> %a to <16 x i8>
+ %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %0) #3
+ %1 = trunc i32 %vminv.i to i8
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then: ; preds = %entry
+ %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval.0 = phi i32 [ %call1, %if.then ], [ 0, %entry ]
+ ret i32 %retval.0
+}
+
+declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>) #2
+
+declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>) #2
+
+declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>) #2
+
+declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>) #2
+
+attributes #0 = { nounwind ssp "target-cpu"="cyclone" }
+attributes #1 = { "target-cpu"="cyclone" }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
+attributes #4 = { nobuiltin nounwind }
diff --git a/test/CodeGen/AArch64/arm64-vecFold.ll b/test/CodeGen/AArch64/arm64-vecFold.ll
new file mode 100644
index 000000000000..aeacfccab3c4
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vecFold.ll
@@ -0,0 +1,145 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -o - %s| FileCheck %s
+
+define <16 x i8> @foov16i8(<8 x i16> %a0, <8 x i16> %b0) nounwind readnone ssp {
+; CHECK-LABEL: foov16i8:
+ %vshrn_low_shift = lshr <8 x i16> %a0, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %vshrn_low = trunc <8 x i16> %vshrn_low_shift to <8 x i8>
+ %vshrn_high_shift = lshr <8 x i16> %b0, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ %vshrn_high = trunc <8 x i16> %vshrn_high_shift to <8 x i8>
+; CHECK: shrn.8b v0, v0, #5
+; CHECK-NEXT: shrn2.16b v0, v1, #5
+; CHECK-NEXT: ret
+ %1 = bitcast <8 x i8> %vshrn_low to <1 x i64>
+ %2 = bitcast <8 x i8> %vshrn_high to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @foov8i16(<4 x i32> %a0, <4 x i32> %b0) nounwind readnone ssp {
+; CHECK-LABEL: foov8i16:
+ %vshrn_low_shift = lshr <4 x i32> %a0, <i32 5, i32 5, i32 5, i32 5>
+ %vshrn_low = trunc <4 x i32> %vshrn_low_shift to <4 x i16>
+ %vshrn_high_shift = lshr <4 x i32> %b0, <i32 5, i32 5, i32 5, i32 5>
+ %vshrn_high = trunc <4 x i32> %vshrn_high_shift to <4 x i16>
+; CHECK: shrn.4h v0, v0, #5
+; CHECK-NEXT: shrn2.8h v0, v1, #5
+; CHECK-NEXT: ret
+ %1 = bitcast <4 x i16> %vshrn_low to <1 x i64>
+ %2 = bitcast <4 x i16> %vshrn_high to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @foov4i32(<2 x i64> %a0, <2 x i64> %b0) nounwind readnone ssp {
+; CHECK-LABEL: foov4i32:
+ %vshrn_low_shift = lshr <2 x i64> %a0, <i64 5, i64 5>
+ %vshrn_low = trunc <2 x i64> %vshrn_low_shift to <2 x i32>
+ %vshrn_high_shift = lshr <2 x i64> %b0, <i64 5, i64 5>
+ %vshrn_high = trunc <2 x i64> %vshrn_high_shift to <2 x i32>
+; CHECK: shrn.2s v0, v0, #5
+; CHECK-NEXT: shrn2.4s v0, v1, #5
+; CHECK-NEXT: ret
+ %1 = bitcast <2 x i32> %vshrn_low to <1 x i64>
+ %2 = bitcast <2 x i32> %vshrn_high to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <8 x i16> @bar(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %b0, <4 x i32> %b1) nounwind readnone ssp {
+; CHECK-LABEL: bar:
+ %vaddhn2.i = tail call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %a0, <4 x i32> %a1) nounwind
+ %vaddhn2.i10 = tail call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %b0, <4 x i32> %b1) nounwind
+; CHECK: addhn.4h v0, v0, v1
+; CHECK-NEXT: addhn2.8h v0, v2, v3
+; CHECK-NEXT: ret
+ %1 = bitcast <4 x i16> %vaddhn2.i to <1 x i64>
+ %2 = bitcast <4 x i16> %vaddhn2.i10 to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <8 x i16> @baz(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %b0, <4 x i32> %b1) nounwind readnone ssp {
+; CHECK-LABEL: baz:
+ %vaddhn2.i = tail call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %a0, <4 x i32> %a1) nounwind
+ %vshrn_high_shift = ashr <4 x i32> %b0, <i32 5, i32 5, i32 5, i32 5>
+ %vshrn_high = trunc <4 x i32> %vshrn_high_shift to <4 x i16>
+; CHECK: addhn.4h v0, v0, v1
+; CHECK-NEXT: shrn2.8h v0, v2, #5
+; CHECK-NEXT: ret
+ %1 = bitcast <4 x i16> %vaddhn2.i to <1 x i64>
+ %2 = bitcast <4 x i16> %vshrn_high to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <8 x i16> @raddhn(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %b0, <4 x i32> %b1) nounwind readnone ssp {
+; CHECK-LABEL: raddhn:
+entry:
+; CHECK: raddhn.4h v0, v0, v1
+; CHECK-NEXT: raddhn2.8h v0, v2, v3
+; CHECK-NEXT: ret
+ %vraddhn2.i = tail call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a0, <4 x i32> %a1) nounwind
+ %vraddhn2.i10 = tail call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %b0, <4 x i32> %b1) nounwind
+ %0 = bitcast <4 x i16> %vraddhn2.i to <1 x i64>
+ %1 = bitcast <4 x i16> %vraddhn2.i10 to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @vrshrn(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %b0, <8 x i16> %b1) nounwind readnone ssp {
+; CHECK-LABEL: vrshrn:
+; CHECK: rshrn.8b v0, v0, #5
+; CHECK-NEXT: rshrn2.16b v0, v2, #6
+; CHECK-NEXT: ret
+ %vrshrn_n1 = tail call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %a0, i32 5)
+ %vrshrn_n4 = tail call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %b0, i32 6)
+ %1 = bitcast <8 x i8> %vrshrn_n1 to <1 x i64>
+ %2 = bitcast <8 x i8> %vrshrn_n4 to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <8 x i16> @vrsubhn(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %b0, <8 x i16> %b1) nounwind readnone ssp {
+; CHECK-LABEL: vrsubhn:
+; CHECK: rsubhn.8b v0, v0, v1
+; CHECK: rsubhn2.16b v0, v2, v3
+; CHECK-NEXT: ret
+ %vrsubhn2.i = tail call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a0, <8 x i16> %a1) nounwind
+ %vrsubhn2.i10 = tail call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %b0, <8 x i16> %b1) nounwind
+ %1 = bitcast <8 x i8> %vrsubhn2.i to <1 x i64>
+ %2 = bitcast <8 x i8> %vrsubhn2.i10 to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <8 x i16> @noOpt1(<2 x i32> %a0, <2 x i32> %a1, <4 x i32> %b0, <4 x i32> %b1) nounwind readnone ssp {
+; CHECK-LABEL: noOpt1:
+ %vqsub2.i = tail call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %a0, <2 x i32> %a1) nounwind
+ %vaddhn2.i = tail call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %b0, <4 x i32> %b1) nounwind
+; CHECK: sqsub.2s v0, v0, v1
+; CHECK-NEXT: addhn2.8h v0, v2, v3
+ %1 = bitcast <2 x i32> %vqsub2.i to <1 x i64>
+ %2 = bitcast <4 x i16> %vaddhn2.i to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.shrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.shrn.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.shrn.v2i32(<2 x i64>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+
diff --git a/test/CodeGen/AArch64/arm64-vector-ext.ll b/test/CodeGen/AArch64/arm64-vector-ext.ll
new file mode 100644
index 000000000000..650ff1e14f02
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vector-ext.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+;CHECK: @func30
+;CHECK: ushll.4s v0, v0, #0
+;CHECK: movi.4s v1, #0x1
+;CHECK: and.16b v0, v0, v1
+;CHECK: str q0, [x0]
+;CHECK: ret
+
+%T0_30 = type <4 x i1>
+%T1_30 = type <4 x i32>
+define void @func30(%T0_30 %v0, %T1_30* %p1) {
+ %r = zext %T0_30 %v0 to %T1_30
+ store %T1_30 %r, %T1_30* %p1
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-vector-imm.ll b/test/CodeGen/AArch64/arm64-vector-imm.ll
new file mode 100644
index 000000000000..9fb088b9a497
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vector-imm.ll
@@ -0,0 +1,134 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind {
+; CHECK-LABEL: v_orrimm:
+; CHECK-NOT: mov
+; CHECK-NOT: mvn
+; CHECK: orr
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = or <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind {
+; CHECK: v_orrimmQ
+; CHECK-NOT: mov
+; CHECK-NOT: mvn
+; CHECK: orr
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = or <16 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind {
+; CHECK-LABEL: v_bicimm:
+; CHECK-NOT: mov
+; CHECK-NOT: mvn
+; CHECK: bic
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = and <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind {
+; CHECK-LABEL: v_bicimmQ:
+; CHECK-NOT: mov
+; CHECK-NOT: mvn
+; CHECK: bic
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = and <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
+ ret <16 x i8> %tmp3
+}
+
+define <2 x double> @foo(<2 x double> %bar) nounwind {
+; CHECK: foo
+; CHECK: fmov.2d v1, #1.0000000
+ %add = fadd <2 x double> %bar, <double 1.0, double 1.0>
+ ret <2 x double> %add
+}
+
+define <4 x i32> @movi_4s_imm_t1() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_4s_imm_t1:
+; CHECK: movi.4s v0, #0x4b
+ ret <4 x i32> <i32 75, i32 75, i32 75, i32 75>
+}
+
+define <4 x i32> @movi_4s_imm_t2() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_4s_imm_t2:
+; CHECK: movi.4s v0, #0x4b, lsl #8
+ ret <4 x i32> <i32 19200, i32 19200, i32 19200, i32 19200>
+}
+
+define <4 x i32> @movi_4s_imm_t3() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_4s_imm_t3:
+; CHECK: movi.4s v0, #0x4b, lsl #16
+ ret <4 x i32> <i32 4915200, i32 4915200, i32 4915200, i32 4915200>
+}
+
+define <4 x i32> @movi_4s_imm_t4() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_4s_imm_t4:
+; CHECK: movi.4s v0, #0x4b, lsl #24
+ ret <4 x i32> <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200>
+}
+
+define <8 x i16> @movi_8h_imm_t5() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_8h_imm_t5:
+; CHECK: movi.8h v0, #0x4b
+ ret <8 x i16> <i16 75, i16 75, i16 75, i16 75, i16 75, i16 75, i16 75, i16 75>
+}
+
+; rdar://11989841
+define <8 x i16> @movi_8h_imm_t6() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_8h_imm_t6:
+; CHECK: movi.8h v0, #0x4b, lsl #8
+ ret <8 x i16> <i16 19200, i16 19200, i16 19200, i16 19200, i16 19200, i16 19200, i16 19200, i16 19200>
+}
+
+define <4 x i32> @movi_4s_imm_t7() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_4s_imm_t7:
+; CHECK: movi.4s v0, #0x4b, msl #8
+ret <4 x i32> <i32 19455, i32 19455, i32 19455, i32 19455>
+}
+
+define <4 x i32> @movi_4s_imm_t8() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_4s_imm_t8:
+; CHECK: movi.4s v0, #0x4b, msl #16
+ret <4 x i32> <i32 4980735, i32 4980735, i32 4980735, i32 4980735>
+}
+
+define <16 x i8> @movi_16b_imm_t9() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_16b_imm_t9:
+; CHECK: movi.16b v0, #0x4b
+ret <16 x i8> <i8 75, i8 75, i8 75, i8 75, i8 75, i8 75, i8 75, i8 75,
+ i8 75, i8 75, i8 75, i8 75, i8 75, i8 75, i8 75, i8 75>
+}
+
+define <2 x i64> @movi_2d_imm_t10() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_2d_imm_t10:
+; CHECK: movi.2d v0, #0xff00ff00ff00ff
+ret <2 x i64> <i64 71777214294589695, i64 71777214294589695>
+}
+
+define <4 x i32> @movi_4s_imm_t11() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_4s_imm_t11:
+; CHECK: fmov.4s v0, #-0.32812500
+ret <4 x i32> <i32 3198681088, i32 3198681088, i32 3198681088, i32 3198681088>
+}
+
+define <2 x i64> @movi_2d_imm_t12() nounwind readnone ssp {
+entry:
+; CHECK-LABEL: movi_2d_imm_t12:
+; CHECK: fmov.2d v0, #-0.17187500
+ret <2 x i64> <i64 13818732506632945664, i64 13818732506632945664>
+}
diff --git a/test/CodeGen/AArch64/arm64-vector-insertion.ll b/test/CodeGen/AArch64/arm64-vector-insertion.ll
new file mode 100644
index 000000000000..8fbff71f9fc2
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vector-insertion.ll
@@ -0,0 +1,33 @@
+; RUN: llc -march=arm64 -mcpu=generic -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+define void @test0f(float* nocapture %x, float %a) #0 {
+entry:
+ %0 = insertelement <4 x float> <float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %a, i32 0
+ %1 = bitcast float* %x to <4 x float>*
+ store <4 x float> %0, <4 x float>* %1, align 16
+ ret void
+
+ ; CHECK-LABEL: test0f
+ ; CHECK: movi.2d v[[TEMP:[0-9]+]], #0000000000000000
+ ; CHECK: ins.s v[[TEMP]][0], v{{[0-9]+}}[0]
+ ; CHECK: str q[[TEMP]], [x0]
+ ; CHECK: ret
+
+
+}
+
+
+define void @test1f(float* nocapture %x, float %a) #0 {
+entry:
+ %0 = insertelement <4 x float> <float undef, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, float %a, i32 0
+ %1 = bitcast float* %x to <4 x float>*
+ store <4 x float> %0, <4 x float>* %1, align 16
+ ret void
+
+ ; CHECK-LABEL: test1f
+ ; CHECK: fmov s[[TEMP:[0-9]+]], #1.0000000
+ ; CHECK: dup.4s v[[TEMP2:[0-9]+]], v[[TEMP]][0]
+ ; CHECK: ins.s v[[TEMP2]][0], v0[0]
+ ; CHECK: str q[[TEMP2]], [x0]
+ ; CHECK: ret
+}
diff --git a/test/CodeGen/AArch64/arm64-vector-ldst.ll b/test/CodeGen/AArch64/arm64-vector-ldst.ll
new file mode 100644
index 000000000000..c00191577d17
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vector-ldst.ll
@@ -0,0 +1,601 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s
+
+; rdar://9428579
+
+%type1 = type { <16 x i8> }
+%type2 = type { <8 x i8> }
+%type3 = type { <4 x i16> }
+
+
+define hidden fastcc void @t1(%type1** %argtable) nounwind {
+entry:
+; CHECK-LABEL: t1:
+; CHECK: ldr x[[REG:[0-9]+]], [x0]
+; CHECK: str q0, [x[[REG]]]
+ %tmp1 = load %type1** %argtable, align 8
+ %tmp2 = getelementptr inbounds %type1* %tmp1, i64 0, i32 0
+ store <16 x i8> zeroinitializer, <16 x i8>* %tmp2, align 16
+ ret void
+}
+
+define hidden fastcc void @t2(%type2** %argtable) nounwind {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: ldr x[[REG:[0-9]+]], [x0]
+; CHECK: str d0, [x[[REG]]]
+ %tmp1 = load %type2** %argtable, align 8
+ %tmp2 = getelementptr inbounds %type2* %tmp1, i64 0, i32 0
+ store <8 x i8> zeroinitializer, <8 x i8>* %tmp2, align 8
+ ret void
+}
+
+; add a bunch of tests for rdar://11246289
+
+@globalArray64x2 = common global <2 x i64>* null, align 8
+@globalArray32x4 = common global <4 x i32>* null, align 8
+@globalArray16x8 = common global <8 x i16>* null, align 8
+@globalArray8x16 = common global <16 x i8>* null, align 8
+@globalArray64x1 = common global <1 x i64>* null, align 8
+@globalArray32x2 = common global <2 x i32>* null, align 8
+@globalArray16x4 = common global <4 x i16>* null, align 8
+@globalArray8x8 = common global <8 x i8>* null, align 8
+@floatglobalArray64x2 = common global <2 x double>* null, align 8
+@floatglobalArray32x4 = common global <4 x float>* null, align 8
+@floatglobalArray64x1 = common global <1 x double>* null, align 8
+@floatglobalArray32x2 = common global <2 x float>* null, align 8
+
+define void @fct1_64x2(<2 x i64>* nocapture %array, i64 %offset) nounwind ssp {
+entry:
+; CHECK-LABEL: fct1_64x2:
+; CHECK: lsl [[SHIFTEDOFFSET:x[0-9]+]], x1, #4
+; CHECK: ldr [[DEST:q[0-9]+]], [x0, [[SHIFTEDOFFSET]]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
+ %arrayidx = getelementptr inbounds <2 x i64>* %array, i64 %offset
+ %tmp = load <2 x i64>* %arrayidx, align 16
+ %tmp1 = load <2 x i64>** @globalArray64x2, align 8
+ %arrayidx1 = getelementptr inbounds <2 x i64>* %tmp1, i64 %offset
+ store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
+ ret void
+}
+
+define void @fct2_64x2(<2 x i64>* nocapture %array) nounwind ssp {
+entry:
+; CHECK-LABEL: fct2_64x2:
+; CHECK: ldr [[DEST:q[0-9]+]], [x0, #48]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
+ %arrayidx = getelementptr inbounds <2 x i64>* %array, i64 3
+ %tmp = load <2 x i64>* %arrayidx, align 16
+ %tmp1 = load <2 x i64>** @globalArray64x2, align 8
+ %arrayidx1 = getelementptr inbounds <2 x i64>* %tmp1, i64 5
+ store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
+ ret void
+}
+
+define void @fct1_32x4(<4 x i32>* nocapture %array, i64 %offset) nounwind ssp {
+entry:
+; CHECK-LABEL: fct1_32x4:
+; CHECK: lsl [[SHIFTEDOFFSET:x[0-9]+]], x1, #4
+; CHECK: ldr [[DEST:q[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
+ %arrayidx = getelementptr inbounds <4 x i32>* %array, i64 %offset
+ %tmp = load <4 x i32>* %arrayidx, align 16
+ %tmp1 = load <4 x i32>** @globalArray32x4, align 8
+ %arrayidx1 = getelementptr inbounds <4 x i32>* %tmp1, i64 %offset
+ store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
+ ret void
+}
+
+define void @fct2_32x4(<4 x i32>* nocapture %array) nounwind ssp {
+entry:
+; CHECK-LABEL: fct2_32x4:
+; CHECK: ldr [[DEST:q[0-9]+]], [x0, #48]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
+ %arrayidx = getelementptr inbounds <4 x i32>* %array, i64 3
+ %tmp = load <4 x i32>* %arrayidx, align 16
+ %tmp1 = load <4 x i32>** @globalArray32x4, align 8
+ %arrayidx1 = getelementptr inbounds <4 x i32>* %tmp1, i64 5
+ store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
+ ret void
+}
+
+define void @fct1_16x8(<8 x i16>* nocapture %array, i64 %offset) nounwind ssp {
+entry:
+; CHECK-LABEL: fct1_16x8:
+; CHECK: lsl [[SHIFTEDOFFSET:x[0-9]+]], x1, #4
+; CHECK: ldr [[DEST:q[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
+ %arrayidx = getelementptr inbounds <8 x i16>* %array, i64 %offset
+ %tmp = load <8 x i16>* %arrayidx, align 16
+ %tmp1 = load <8 x i16>** @globalArray16x8, align 8
+ %arrayidx1 = getelementptr inbounds <8 x i16>* %tmp1, i64 %offset
+ store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
+ ret void
+}
+
+define void @fct2_16x8(<8 x i16>* nocapture %array) nounwind ssp {
+entry:
+; CHECK-LABEL: fct2_16x8:
+; CHECK: ldr [[DEST:q[0-9]+]], [x0, #48]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
+ %arrayidx = getelementptr inbounds <8 x i16>* %array, i64 3
+ %tmp = load <8 x i16>* %arrayidx, align 16
+ %tmp1 = load <8 x i16>** @globalArray16x8, align 8
+ %arrayidx1 = getelementptr inbounds <8 x i16>* %tmp1, i64 5
+ store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
+ ret void
+}
+
+define void @fct1_8x16(<16 x i8>* nocapture %array, i64 %offset) nounwind ssp {
+entry:
+; CHECK-LABEL: fct1_8x16:
+; CHECK: lsl [[SHIFTEDOFFSET:x[0-9]+]], x1, #4
+; CHECK: ldr [[DEST:q[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
+ %arrayidx = getelementptr inbounds <16 x i8>* %array, i64 %offset
+ %tmp = load <16 x i8>* %arrayidx, align 16
+ %tmp1 = load <16 x i8>** @globalArray8x16, align 8
+ %arrayidx1 = getelementptr inbounds <16 x i8>* %tmp1, i64 %offset
+ store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
+ ret void
+}
+
+define void @fct2_8x16(<16 x i8>* nocapture %array) nounwind ssp {
+entry:
+; CHECK-LABEL: fct2_8x16:
+; CHECK: ldr [[DEST:q[0-9]+]], [x0, #48]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
+ %arrayidx = getelementptr inbounds <16 x i8>* %array, i64 3
+ %tmp = load <16 x i8>* %arrayidx, align 16
+ %tmp1 = load <16 x i8>** @globalArray8x16, align 8
+ %arrayidx1 = getelementptr inbounds <16 x i8>* %tmp1, i64 5
+ store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
+ ret void
+}
+
+define void @fct1_64x1(<1 x i64>* nocapture %array, i64 %offset) nounwind ssp {
+entry:
+; CHECK-LABEL: fct1_64x1:
+; CHECK: lsl [[SHIFTEDOFFSET:x[0-9]+]], x1, #3
+; CHECK: ldr [[DEST:d[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
+ %arrayidx = getelementptr inbounds <1 x i64>* %array, i64 %offset
+ %tmp = load <1 x i64>* %arrayidx, align 8
+ %tmp1 = load <1 x i64>** @globalArray64x1, align 8
+ %arrayidx1 = getelementptr inbounds <1 x i64>* %tmp1, i64 %offset
+ store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
+ ret void
+}
+
+define void @fct2_64x1(<1 x i64>* nocapture %array) nounwind ssp {
+entry:
+; CHECK-LABEL: fct2_64x1:
+; CHECK: ldr [[DEST:d[0-9]+]], [x0, #24]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
+ %arrayidx = getelementptr inbounds <1 x i64>* %array, i64 3
+ %tmp = load <1 x i64>* %arrayidx, align 8
+ %tmp1 = load <1 x i64>** @globalArray64x1, align 8
+ %arrayidx1 = getelementptr inbounds <1 x i64>* %tmp1, i64 5
+ store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
+ ret void
+}
+
+define void @fct1_32x2(<2 x i32>* nocapture %array, i64 %offset) nounwind ssp {
+entry:
+; CHECK-LABEL: fct1_32x2:
+; CHECK: lsl [[SHIFTEDOFFSET:x[0-9]+]], x1, #3
+; CHECK: ldr [[DEST:d[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
+ %arrayidx = getelementptr inbounds <2 x i32>* %array, i64 %offset
+ %tmp = load <2 x i32>* %arrayidx, align 8
+ %tmp1 = load <2 x i32>** @globalArray32x2, align 8
+ %arrayidx1 = getelementptr inbounds <2 x i32>* %tmp1, i64 %offset
+ store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
+ ret void
+}
+
+define void @fct2_32x2(<2 x i32>* nocapture %array) nounwind ssp {
+entry:
+; CHECK-LABEL: fct2_32x2:
+; CHECK: ldr [[DEST:d[0-9]+]], [x0, #24]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
+ %arrayidx = getelementptr inbounds <2 x i32>* %array, i64 3
+ %tmp = load <2 x i32>* %arrayidx, align 8
+ %tmp1 = load <2 x i32>** @globalArray32x2, align 8
+ %arrayidx1 = getelementptr inbounds <2 x i32>* %tmp1, i64 5
+ store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
+ ret void
+}
+
+define void @fct1_16x4(<4 x i16>* nocapture %array, i64 %offset) nounwind ssp {
+entry:
+; CHECK-LABEL: fct1_16x4:
+; CHECK: lsl [[SHIFTEDOFFSET:x[0-9]+]], x1, #3
+; CHECK: ldr [[DEST:d[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
+ %arrayidx = getelementptr inbounds <4 x i16>* %array, i64 %offset
+ %tmp = load <4 x i16>* %arrayidx, align 8
+ %tmp1 = load <4 x i16>** @globalArray16x4, align 8
+ %arrayidx1 = getelementptr inbounds <4 x i16>* %tmp1, i64 %offset
+ store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
+ ret void
+}
+
+define void @fct2_16x4(<4 x i16>* nocapture %array) nounwind ssp {
+entry:
+; CHECK-LABEL: fct2_16x4:
+; CHECK: ldr [[DEST:d[0-9]+]], [x0, #24]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
+ %arrayidx = getelementptr inbounds <4 x i16>* %array, i64 3
+ %tmp = load <4 x i16>* %arrayidx, align 8
+ %tmp1 = load <4 x i16>** @globalArray16x4, align 8
+ %arrayidx1 = getelementptr inbounds <4 x i16>* %tmp1, i64 5
+ store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
+ ret void
+}
+
+define void @fct1_8x8(<8 x i8>* nocapture %array, i64 %offset) nounwind ssp {
+entry:
+; CHECK-LABEL: fct1_8x8:
+; CHECK: lsl [[SHIFTEDOFFSET:x[0-9]+]], x1, #3
+; CHECK: ldr [[DEST:d[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
+; CHECK: ldr [[BASE:x[0-9]+]],
+; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
+ %arrayidx = getelementptr inbounds <8 x i8>* %array, i64 %offset
+ %tmp = load <8 x i8>* %arrayidx, align 8
+ %tmp1 = load <8 x i8>** @globalArray8x8, align 8
+ %arrayidx1 = getelementptr inbounds <8 x i8>* %tmp1, i64 %offset
+ store <8 x i8> %tmp, <8 x i8>* %arrayidx1, align 8
+ ret void
+}
+
+; Add a bunch of tests for rdar://13258794: Match LDUR/STUR for D and Q
+; registers for unscaled vector accesses
+@str = global [63 x i8] c"Test case for rdar://13258794: LDUR/STUR for D and Q registers\00", align 1
+
+define <1 x i64> @fct0() nounwind readonly ssp {
+entry:
+; CHECK-LABEL: fct0:
+; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
+ %0 = load <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
+ ret <1 x i64> %0
+}
+
+define <2 x i32> @fct1() nounwind readonly ssp {
+entry:
+; CHECK-LABEL: fct1:
+; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
+ %0 = load <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
+ ret <2 x i32> %0
+}
+
+define <4 x i16> @fct2() nounwind readonly ssp {
+entry:
+; CHECK-LABEL: fct2:
+; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
+ %0 = load <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
+ ret <4 x i16> %0
+}
+
+define <8 x i8> @fct3() nounwind readonly ssp {
+entry:
+; CHECK-LABEL: fct3:
+; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
+ %0 = load <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
+ ret <8 x i8> %0
+}
+
+define <2 x i64> @fct4() nounwind readonly ssp {
+entry:
+; CHECK-LABEL: fct4:
+; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
+ %0 = load <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
+ ret <2 x i64> %0
+}
+
+define <4 x i32> @fct5() nounwind readonly ssp {
+entry:
+; CHECK-LABEL: fct5:
+; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
+ %0 = load <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
+ ret <4 x i32> %0
+}
+
+define <8 x i16> @fct6() nounwind readonly ssp {
+entry:
+; CHECK-LABEL: fct6:
+; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
+ %0 = load <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
+ ret <8 x i16> %0
+}
+
+define <16 x i8> @fct7() nounwind readonly ssp {
+entry:
+; CHECK-LABEL: fct7:
+; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
+ %0 = load <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
+ ret <16 x i8> %0
+}
+
+define void @fct8() nounwind ssp {
+entry:
+; CHECK-LABEL: fct8:
+; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
+; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
+ %0 = load <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
+ store <1 x i64> %0, <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <1 x i64>*), align 8
+ ret void
+}
+
+define void @fct9() nounwind ssp {
+entry:
+; CHECK-LABEL: fct9:
+; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
+; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
+ %0 = load <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
+ store <2 x i32> %0, <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <2 x i32>*), align 8
+ ret void
+}
+
+define void @fct10() nounwind ssp {
+entry:
+; CHECK-LABEL: fct10:
+; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
+; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
+ %0 = load <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
+ store <4 x i16> %0, <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <4 x i16>*), align 8
+ ret void
+}
+
+define void @fct11() nounwind ssp {
+entry:
+; CHECK-LABEL: fct11:
+; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
+; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
+ %0 = load <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
+ store <8 x i8> %0, <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <8 x i8>*), align 8
+ ret void
+}
+
+define void @fct12() nounwind ssp {
+entry:
+; CHECK-LABEL: fct12:
+; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
+; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
+ %0 = load <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
+ store <2 x i64> %0, <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <2 x i64>*), align 16
+ ret void
+}
+
+define void @fct13() nounwind ssp {
+entry:
+; CHECK-LABEL: fct13:
+; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
+; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
+ %0 = load <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
+ store <4 x i32> %0, <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <4 x i32>*), align 16
+ ret void
+}
+
+define void @fct14() nounwind ssp {
+entry:
+; CHECK-LABEL: fct14:
+; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
+; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
+ %0 = load <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
+ store <8 x i16> %0, <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <8 x i16>*), align 16
+ ret void
+}
+
+define void @fct15() nounwind ssp {
+entry:
+; CHECK-LABEL: fct15:
+; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
+; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
+ %0 = load <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
+ store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <16 x i8>*), align 16
+ ret void
+}
+
+; Check the building of vector from a single loaded value.
+; Part of <rdar://problem/14170854>
+;
+; Single loads with immediate offset.
+define <8 x i8> @fct16(i8* nocapture %sp0) {
+; CHECK-LABEL: fct16:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: mul.8b v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 1
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %vec = insertelement <8 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <8 x i8> %vec, %vec
+ ret <8 x i8> %vmull.i
+}
+
+define <16 x i8> @fct17(i8* nocapture %sp0) {
+; CHECK-LABEL: fct17:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, #1]
+; CHECK-NEXT: mul.16b v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 1
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %vec = insertelement <16 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <16 x i8> %vec, %vec
+ ret <16 x i8> %vmull.i
+}
+
+define <4 x i16> @fct18(i16* nocapture %sp0) {
+; CHECK-LABEL: fct18:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, #2]
+; CHECK-NEXT: mul.4h v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 1
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %vec = insertelement <4 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <4 x i16> %vec, %vec
+ ret <4 x i16> %vmull.i
+}
+
+define <8 x i16> @fct19(i16* nocapture %sp0) {
+; CHECK-LABEL: fct19:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, #2]
+; CHECK-NEXT: mul.8h v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 1
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %vec = insertelement <8 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <8 x i16> %vec, %vec
+ ret <8 x i16> %vmull.i
+}
+
+define <2 x i32> @fct20(i32* nocapture %sp0) {
+; CHECK-LABEL: fct20:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, #4]
+; CHECK-NEXT: mul.2s v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 1
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <2 x i32> %vec, %vec
+ ret <2 x i32> %vmull.i
+}
+
+define <4 x i32> @fct21(i32* nocapture %sp0) {
+; CHECK-LABEL: fct21:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, #4]
+; CHECK-NEXT: mul.4s v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 1
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %vec = insertelement <4 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <4 x i32> %vec, %vec
+ ret <4 x i32> %vmull.i
+}
+
+define <1 x i64> @fct22(i64* nocapture %sp0) {
+; CHECK-LABEL: fct22:
+; CHECK: ldr d0, [x0, #8]
+entry:
+ %addr = getelementptr i64* %sp0, i64 1
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %vec = insertelement <1 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
+ ret <1 x i64> %vec
+}
+
+define <2 x i64> @fct23(i64* nocapture %sp0) {
+; CHECK-LABEL: fct23:
+; CHECK: ldr d[[REGNUM:[0-9]+]], [x0, #8]
+entry:
+ %addr = getelementptr i64* %sp0, i64 1
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %vec = insertelement <2 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
+ ret <2 x i64> %vec
+}
+
+;
+; Single loads with register offset.
+define <8 x i8> @fct24(i8* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct24:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, x1]
+; CHECK-NEXT: mul.8b v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %vec = insertelement <8 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <8 x i8> %vec, %vec
+ ret <8 x i8> %vmull.i
+}
+
+define <16 x i8> @fct25(i8* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct25:
+; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, x1]
+; CHECK-NEXT: mul.16b v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i8* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i8* %addr, align 1
+ %vec = insertelement <16 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <16 x i8> %vec, %vec
+ ret <16 x i8> %vmull.i
+}
+
+define <4 x i16> @fct26(i16* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct26:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: mul.4h v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %vec = insertelement <4 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <4 x i16> %vec, %vec
+ ret <4 x i16> %vmull.i
+}
+
+define <8 x i16> @fct27(i16* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct27:
+; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: mul.8h v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i16* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i16* %addr, align 1
+ %vec = insertelement <8 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <8 x i16> %vec, %vec
+ ret <8 x i16> %vmull.i
+}
+
+define <2 x i32> @fct28(i32* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct28:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, x1, lsl #2]
+; CHECK-NEXT: mul.2s v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <2 x i32> %vec, %vec
+ ret <2 x i32> %vmull.i
+}
+
+define <4 x i32> @fct29(i32* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct29:
+; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, x1, lsl #2]
+; CHECK-NEXT: mul.4s v0, v[[REGNUM]], v[[REGNUM]]
+entry:
+ %addr = getelementptr i32* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i32* %addr, align 1
+ %vec = insertelement <4 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
+ %vmull.i = mul <4 x i32> %vec, %vec
+ ret <4 x i32> %vmull.i
+}
+
+define <1 x i64> @fct30(i64* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct30:
+; CHECK: ldr d0, [x0, x1, lsl #3]
+entry:
+ %addr = getelementptr i64* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %vec = insertelement <1 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
+ ret <1 x i64> %vec
+}
+
+define <2 x i64> @fct31(i64* nocapture %sp0, i64 %offset) {
+; CHECK-LABEL: fct31:
+; CHECK: ldr d0, [x0, x1, lsl #3]
+entry:
+ %addr = getelementptr i64* %sp0, i64 %offset
+ %pix_sp0.0.copyload = load i64* %addr, align 1
+ %vec = insertelement <2 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
+ ret <2 x i64> %vec
+}
diff --git a/test/CodeGen/AArch64/arm64-vext.ll b/test/CodeGen/AArch64/arm64-vext.ll
new file mode 100644
index 000000000000..2240dfd5a1ae
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vext.ll
@@ -0,0 +1,464 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+define void @test_vext_s8() nounwind ssp {
+ ; CHECK-LABEL: test_vext_s8:
+ ; CHECK: {{ext.8.*#1}}
+ %xS8x8 = alloca <8 x i8>, align 8
+ %__a = alloca <8 x i8>, align 8
+ %__b = alloca <8 x i8>, align 8
+ %tmp = load <8 x i8>* %xS8x8, align 8
+ store <8 x i8> %tmp, <8 x i8>* %__a, align 8
+ %tmp1 = load <8 x i8>* %xS8x8, align 8
+ store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
+ %tmp2 = load <8 x i8>* %__a, align 8
+ %tmp3 = load <8 x i8>* %__b, align 8
+ %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+ store <8 x i8> %vext, <8 x i8>* %xS8x8, align 8
+ ret void
+}
+
+define void @test_vext_u8() nounwind ssp {
+ ; CHECK-LABEL: test_vext_u8:
+ ; CHECK: {{ext.8.*#2}}
+ %xU8x8 = alloca <8 x i8>, align 8
+ %__a = alloca <8 x i8>, align 8
+ %__b = alloca <8 x i8>, align 8
+ %tmp = load <8 x i8>* %xU8x8, align 8
+ store <8 x i8> %tmp, <8 x i8>* %__a, align 8
+ %tmp1 = load <8 x i8>* %xU8x8, align 8
+ store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
+ %tmp2 = load <8 x i8>* %__a, align 8
+ %tmp3 = load <8 x i8>* %__b, align 8
+ %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ store <8 x i8> %vext, <8 x i8>* %xU8x8, align 8
+ ret void
+}
+
+define void @test_vext_p8() nounwind ssp {
+ ; CHECK-LABEL: test_vext_p8:
+ ; CHECK: {{ext.8.*#3}}
+ %xP8x8 = alloca <8 x i8>, align 8
+ %__a = alloca <8 x i8>, align 8
+ %__b = alloca <8 x i8>, align 8
+ %tmp = load <8 x i8>* %xP8x8, align 8
+ store <8 x i8> %tmp, <8 x i8>* %__a, align 8
+ %tmp1 = load <8 x i8>* %xP8x8, align 8
+ store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
+ %tmp2 = load <8 x i8>* %__a, align 8
+ %tmp3 = load <8 x i8>* %__b, align 8
+ %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ store <8 x i8> %vext, <8 x i8>* %xP8x8, align 8
+ ret void
+}
+
+define void @test_vext_s16() nounwind ssp {
+ ; CHECK-LABEL: test_vext_s16:
+ ; CHECK: {{ext.8.*#2}}
+ %xS16x4 = alloca <4 x i16>, align 8
+ %__a = alloca <4 x i16>, align 8
+ %__b = alloca <4 x i16>, align 8
+ %tmp = load <4 x i16>* %xS16x4, align 8
+ store <4 x i16> %tmp, <4 x i16>* %__a, align 8
+ %tmp1 = load <4 x i16>* %xS16x4, align 8
+ store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
+ %tmp2 = load <4 x i16>* %__a, align 8
+ %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
+ %tmp4 = load <4 x i16>* %__b, align 8
+ %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
+ %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
+ %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
+ %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ store <4 x i16> %vext, <4 x i16>* %xS16x4, align 8
+ ret void
+}
+
+define void @test_vext_u16() nounwind ssp {
+ ; CHECK-LABEL: test_vext_u16:
+ ; CHECK: {{ext.8.*#4}}
+ %xU16x4 = alloca <4 x i16>, align 8
+ %__a = alloca <4 x i16>, align 8
+ %__b = alloca <4 x i16>, align 8
+ %tmp = load <4 x i16>* %xU16x4, align 8
+ store <4 x i16> %tmp, <4 x i16>* %__a, align 8
+ %tmp1 = load <4 x i16>* %xU16x4, align 8
+ store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
+ %tmp2 = load <4 x i16>* %__a, align 8
+ %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
+ %tmp4 = load <4 x i16>* %__b, align 8
+ %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
+ %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
+ %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
+ %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ store <4 x i16> %vext, <4 x i16>* %xU16x4, align 8
+ ret void
+}
+
+define void @test_vext_p16() nounwind ssp {
+ ; CHECK-LABEL: test_vext_p16:
+ ; CHECK: {{ext.8.*#6}}
+ %xP16x4 = alloca <4 x i16>, align 8
+ %__a = alloca <4 x i16>, align 8
+ %__b = alloca <4 x i16>, align 8
+ %tmp = load <4 x i16>* %xP16x4, align 8
+ store <4 x i16> %tmp, <4 x i16>* %__a, align 8
+ %tmp1 = load <4 x i16>* %xP16x4, align 8
+ store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
+ %tmp2 = load <4 x i16>* %__a, align 8
+ %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
+ %tmp4 = load <4 x i16>* %__b, align 8
+ %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
+ %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
+ %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
+ %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ store <4 x i16> %vext, <4 x i16>* %xP16x4, align 8
+ ret void
+}
+
+define void @test_vext_s32() nounwind ssp {
+ ; CHECK-LABEL: test_vext_s32:
+ ; CHECK: {{ext.8.*#4}}
+ %xS32x2 = alloca <2 x i32>, align 8
+ %__a = alloca <2 x i32>, align 8
+ %__b = alloca <2 x i32>, align 8
+ %tmp = load <2 x i32>* %xS32x2, align 8
+ store <2 x i32> %tmp, <2 x i32>* %__a, align 8
+ %tmp1 = load <2 x i32>* %xS32x2, align 8
+ store <2 x i32> %tmp1, <2 x i32>* %__b, align 8
+ %tmp2 = load <2 x i32>* %__a, align 8
+ %tmp3 = bitcast <2 x i32> %tmp2 to <8 x i8>
+ %tmp4 = load <2 x i32>* %__b, align 8
+ %tmp5 = bitcast <2 x i32> %tmp4 to <8 x i8>
+ %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32>
+ %tmp7 = bitcast <8 x i8> %tmp5 to <2 x i32>
+ %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2>
+ store <2 x i32> %vext, <2 x i32>* %xS32x2, align 8
+ ret void
+}
+
+define void @test_vext_u32() nounwind ssp {
+ ; CHECK-LABEL: test_vext_u32:
+ ; CHECK: {{ext.8.*#4}}
+ %xU32x2 = alloca <2 x i32>, align 8
+ %__a = alloca <2 x i32>, align 8
+ %__b = alloca <2 x i32>, align 8
+ %tmp = load <2 x i32>* %xU32x2, align 8
+ store <2 x i32> %tmp, <2 x i32>* %__a, align 8
+ %tmp1 = load <2 x i32>* %xU32x2, align 8
+ store <2 x i32> %tmp1, <2 x i32>* %__b, align 8
+ %tmp2 = load <2 x i32>* %__a, align 8
+ %tmp3 = bitcast <2 x i32> %tmp2 to <8 x i8>
+ %tmp4 = load <2 x i32>* %__b, align 8
+ %tmp5 = bitcast <2 x i32> %tmp4 to <8 x i8>
+ %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32>
+ %tmp7 = bitcast <8 x i8> %tmp5 to <2 x i32>
+ %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2>
+ store <2 x i32> %vext, <2 x i32>* %xU32x2, align 8
+ ret void
+}
+
+define void @test_vext_f32() nounwind ssp {
+ ; CHECK-LABEL: test_vext_f32:
+ ; CHECK: {{ext.8.*#4}}
+ %xF32x2 = alloca <2 x float>, align 8
+ %__a = alloca <2 x float>, align 8
+ %__b = alloca <2 x float>, align 8
+ %tmp = load <2 x float>* %xF32x2, align 8
+ store <2 x float> %tmp, <2 x float>* %__a, align 8
+ %tmp1 = load <2 x float>* %xF32x2, align 8
+ store <2 x float> %tmp1, <2 x float>* %__b, align 8
+ %tmp2 = load <2 x float>* %__a, align 8
+ %tmp3 = bitcast <2 x float> %tmp2 to <8 x i8>
+ %tmp4 = load <2 x float>* %__b, align 8
+ %tmp5 = bitcast <2 x float> %tmp4 to <8 x i8>
+ %tmp6 = bitcast <8 x i8> %tmp3 to <2 x float>
+ %tmp7 = bitcast <8 x i8> %tmp5 to <2 x float>
+ %vext = shufflevector <2 x float> %tmp6, <2 x float> %tmp7, <2 x i32> <i32 1, i32 2>
+ store <2 x float> %vext, <2 x float>* %xF32x2, align 8
+ ret void
+}
+
+define void @test_vext_s64() nounwind ssp {
+ ; CHECK-LABEL: test_vext_s64:
+ ; CHECK_FIXME: {{ext.8.*#1}}
+ ; this just turns into a load of the second element
+ %xS64x1 = alloca <1 x i64>, align 8
+ %__a = alloca <1 x i64>, align 8
+ %__b = alloca <1 x i64>, align 8
+ %tmp = load <1 x i64>* %xS64x1, align 8
+ store <1 x i64> %tmp, <1 x i64>* %__a, align 8
+ %tmp1 = load <1 x i64>* %xS64x1, align 8
+ store <1 x i64> %tmp1, <1 x i64>* %__b, align 8
+ %tmp2 = load <1 x i64>* %__a, align 8
+ %tmp3 = bitcast <1 x i64> %tmp2 to <8 x i8>
+ %tmp4 = load <1 x i64>* %__b, align 8
+ %tmp5 = bitcast <1 x i64> %tmp4 to <8 x i8>
+ %tmp6 = bitcast <8 x i8> %tmp3 to <1 x i64>
+ %tmp7 = bitcast <8 x i8> %tmp5 to <1 x i64>
+ %vext = shufflevector <1 x i64> %tmp6, <1 x i64> %tmp7, <1 x i32> <i32 1>
+ store <1 x i64> %vext, <1 x i64>* %xS64x1, align 8
+ ret void
+}
+
+define void @test_vext_u64() nounwind ssp {
+ ; CHECK-LABEL: test_vext_u64:
+ ; CHECK_FIXME: {{ext.8.*#1}}
+ ; this is turned into a simple load of the 2nd element
+ %xU64x1 = alloca <1 x i64>, align 8
+ %__a = alloca <1 x i64>, align 8
+ %__b = alloca <1 x i64>, align 8
+ %tmp = load <1 x i64>* %xU64x1, align 8
+ store <1 x i64> %tmp, <1 x i64>* %__a, align 8
+ %tmp1 = load <1 x i64>* %xU64x1, align 8
+ store <1 x i64> %tmp1, <1 x i64>* %__b, align 8
+ %tmp2 = load <1 x i64>* %__a, align 8
+ %tmp3 = bitcast <1 x i64> %tmp2 to <8 x i8>
+ %tmp4 = load <1 x i64>* %__b, align 8
+ %tmp5 = bitcast <1 x i64> %tmp4 to <8 x i8>
+ %tmp6 = bitcast <8 x i8> %tmp3 to <1 x i64>
+ %tmp7 = bitcast <8 x i8> %tmp5 to <1 x i64>
+ %vext = shufflevector <1 x i64> %tmp6, <1 x i64> %tmp7, <1 x i32> <i32 1>
+ store <1 x i64> %vext, <1 x i64>* %xU64x1, align 8
+ ret void
+}
+
+define void @test_vextq_s8() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_s8:
+ ; CHECK: {{ext.16.*#4}}
+ %xS8x16 = alloca <16 x i8>, align 16
+ %__a = alloca <16 x i8>, align 16
+ %__b = alloca <16 x i8>, align 16
+ %tmp = load <16 x i8>* %xS8x16, align 16
+ store <16 x i8> %tmp, <16 x i8>* %__a, align 16
+ %tmp1 = load <16 x i8>* %xS8x16, align 16
+ store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
+ %tmp2 = load <16 x i8>* %__a, align 16
+ %tmp3 = load <16 x i8>* %__b, align 16
+ %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+ store <16 x i8> %vext, <16 x i8>* %xS8x16, align 16
+ ret void
+}
+
+define void @test_vextq_u8() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_u8:
+ ; CHECK: {{ext.16.*#5}}
+ %xU8x16 = alloca <16 x i8>, align 16
+ %__a = alloca <16 x i8>, align 16
+ %__b = alloca <16 x i8>, align 16
+ %tmp = load <16 x i8>* %xU8x16, align 16
+ store <16 x i8> %tmp, <16 x i8>* %__a, align 16
+ %tmp1 = load <16 x i8>* %xU8x16, align 16
+ store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
+ %tmp2 = load <16 x i8>* %__a, align 16
+ %tmp3 = load <16 x i8>* %__b, align 16
+ %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
+ store <16 x i8> %vext, <16 x i8>* %xU8x16, align 16
+ ret void
+}
+
+define void @test_vextq_p8() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_p8:
+ ; CHECK: {{ext.16.*#6}}
+ %xP8x16 = alloca <16 x i8>, align 16
+ %__a = alloca <16 x i8>, align 16
+ %__b = alloca <16 x i8>, align 16
+ %tmp = load <16 x i8>* %xP8x16, align 16
+ store <16 x i8> %tmp, <16 x i8>* %__a, align 16
+ %tmp1 = load <16 x i8>* %xP8x16, align 16
+ store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
+ %tmp2 = load <16 x i8>* %__a, align 16
+ %tmp3 = load <16 x i8>* %__b, align 16
+ %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21>
+ store <16 x i8> %vext, <16 x i8>* %xP8x16, align 16
+ ret void
+}
+
+define void @test_vextq_s16() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_s16:
+ ; CHECK: {{ext.16.*#14}}
+ %xS16x8 = alloca <8 x i16>, align 16
+ %__a = alloca <8 x i16>, align 16
+ %__b = alloca <8 x i16>, align 16
+ %tmp = load <8 x i16>* %xS16x8, align 16
+ store <8 x i16> %tmp, <8 x i16>* %__a, align 16
+ %tmp1 = load <8 x i16>* %xS16x8, align 16
+ store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
+ %tmp2 = load <8 x i16>* %__a, align 16
+ %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
+ %tmp4 = load <8 x i16>* %__b, align 16
+ %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
+ %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
+ %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
+ %vext = shufflevector <8 x i16> %tmp6, <8 x i16> %tmp7, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
+ store <8 x i16> %vext, <8 x i16>* %xS16x8, align 16
+ ret void
+}
+
+define void @test_vextq_u16() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_u16:
+ ; CHECK: {{ext.16.*#8}}
+ %xU16x8 = alloca <8 x i16>, align 16
+ %__a = alloca <8 x i16>, align 16
+ %__b = alloca <8 x i16>, align 16
+ %tmp = load <8 x i16>* %xU16x8, align 16
+ store <8 x i16> %tmp, <8 x i16>* %__a, align 16
+ %tmp1 = load <8 x i16>* %xU16x8, align 16
+ store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
+ %tmp2 = load <8 x i16>* %__a, align 16
+ %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
+ %tmp4 = load <8 x i16>* %__b, align 16
+ %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
+ %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
+ %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
+ %vext = shufflevector <8 x i16> %tmp6, <8 x i16> %tmp7, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+ store <8 x i16> %vext, <8 x i16>* %xU16x8, align 16
+ ret void
+}
+
+define void @test_vextq_p16() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_p16:
+ ; CHECK: {{ext.16.*#10}}
+ %xP16x8 = alloca <8 x i16>, align 16
+ %__a = alloca <8 x i16>, align 16
+ %__b = alloca <8 x i16>, align 16
+ %tmp = load <8 x i16>* %xP16x8, align 16
+ store <8 x i16> %tmp, <8 x i16>* %__a, align 16
+ %tmp1 = load <8 x i16>* %xP16x8, align 16
+ store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
+ %tmp2 = load <8 x i16>* %__a, align 16
+ %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
+ %tmp4 = load <8 x i16>* %__b, align 16
+ %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
+ %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
+ %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
+ %vext = shufflevector <8 x i16> %tmp6, <8 x i16> %tmp7, <8 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>
+ store <8 x i16> %vext, <8 x i16>* %xP16x8, align 16
+ ret void
+}
+
+define void @test_vextq_s32() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_s32:
+ ; CHECK: {{ext.16.*#4}}
+ %xS32x4 = alloca <4 x i32>, align 16
+ %__a = alloca <4 x i32>, align 16
+ %__b = alloca <4 x i32>, align 16
+ %tmp = load <4 x i32>* %xS32x4, align 16
+ store <4 x i32> %tmp, <4 x i32>* %__a, align 16
+ %tmp1 = load <4 x i32>* %xS32x4, align 16
+ store <4 x i32> %tmp1, <4 x i32>* %__b, align 16
+ %tmp2 = load <4 x i32>* %__a, align 16
+ %tmp3 = bitcast <4 x i32> %tmp2 to <16 x i8>
+ %tmp4 = load <4 x i32>* %__b, align 16
+ %tmp5 = bitcast <4 x i32> %tmp4 to <16 x i8>
+ %tmp6 = bitcast <16 x i8> %tmp3 to <4 x i32>
+ %tmp7 = bitcast <16 x i8> %tmp5 to <4 x i32>
+ %vext = shufflevector <4 x i32> %tmp6, <4 x i32> %tmp7, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ store <4 x i32> %vext, <4 x i32>* %xS32x4, align 16
+ ret void
+}
+
+define void @test_vextq_u32() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_u32:
+ ; CHECK: {{ext.16.*#8}}
+ %xU32x4 = alloca <4 x i32>, align 16
+ %__a = alloca <4 x i32>, align 16
+ %__b = alloca <4 x i32>, align 16
+ %tmp = load <4 x i32>* %xU32x4, align 16
+ store <4 x i32> %tmp, <4 x i32>* %__a, align 16
+ %tmp1 = load <4 x i32>* %xU32x4, align 16
+ store <4 x i32> %tmp1, <4 x i32>* %__b, align 16
+ %tmp2 = load <4 x i32>* %__a, align 16
+ %tmp3 = bitcast <4 x i32> %tmp2 to <16 x i8>
+ %tmp4 = load <4 x i32>* %__b, align 16
+ %tmp5 = bitcast <4 x i32> %tmp4 to <16 x i8>
+ %tmp6 = bitcast <16 x i8> %tmp3 to <4 x i32>
+ %tmp7 = bitcast <16 x i8> %tmp5 to <4 x i32>
+ %vext = shufflevector <4 x i32> %tmp6, <4 x i32> %tmp7, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ store <4 x i32> %vext, <4 x i32>* %xU32x4, align 16
+ ret void
+}
+
+define void @test_vextq_f32() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_f32:
+ ; CHECK: {{ext.16.*#12}}
+ %xF32x4 = alloca <4 x float>, align 16
+ %__a = alloca <4 x float>, align 16
+ %__b = alloca <4 x float>, align 16
+ %tmp = load <4 x float>* %xF32x4, align 16
+ store <4 x float> %tmp, <4 x float>* %__a, align 16
+ %tmp1 = load <4 x float>* %xF32x4, align 16
+ store <4 x float> %tmp1, <4 x float>* %__b, align 16
+ %tmp2 = load <4 x float>* %__a, align 16
+ %tmp3 = bitcast <4 x float> %tmp2 to <16 x i8>
+ %tmp4 = load <4 x float>* %__b, align 16
+ %tmp5 = bitcast <4 x float> %tmp4 to <16 x i8>
+ %tmp6 = bitcast <16 x i8> %tmp3 to <4 x float>
+ %tmp7 = bitcast <16 x i8> %tmp5 to <4 x float>
+ %vext = shufflevector <4 x float> %tmp6, <4 x float> %tmp7, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ store <4 x float> %vext, <4 x float>* %xF32x4, align 16
+ ret void
+}
+
+define void @test_vextq_s64() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_s64:
+ ; CHECK: {{ext.16.*#8}}
+ %xS64x2 = alloca <2 x i64>, align 16
+ %__a = alloca <2 x i64>, align 16
+ %__b = alloca <2 x i64>, align 16
+ %tmp = load <2 x i64>* %xS64x2, align 16
+ store <2 x i64> %tmp, <2 x i64>* %__a, align 16
+ %tmp1 = load <2 x i64>* %xS64x2, align 16
+ store <2 x i64> %tmp1, <2 x i64>* %__b, align 16
+ %tmp2 = load <2 x i64>* %__a, align 16
+ %tmp3 = bitcast <2 x i64> %tmp2 to <16 x i8>
+ %tmp4 = load <2 x i64>* %__b, align 16
+ %tmp5 = bitcast <2 x i64> %tmp4 to <16 x i8>
+ %tmp6 = bitcast <16 x i8> %tmp3 to <2 x i64>
+ %tmp7 = bitcast <16 x i8> %tmp5 to <2 x i64>
+ %vext = shufflevector <2 x i64> %tmp6, <2 x i64> %tmp7, <2 x i32> <i32 1, i32 2>
+ store <2 x i64> %vext, <2 x i64>* %xS64x2, align 16
+ ret void
+}
+
+define void @test_vextq_u64() nounwind ssp {
+ ; CHECK-LABEL: test_vextq_u64:
+ ; CHECK: {{ext.16.*#8}}
+ %xU64x2 = alloca <2 x i64>, align 16
+ %__a = alloca <2 x i64>, align 16
+ %__b = alloca <2 x i64>, align 16
+ %tmp = load <2 x i64>* %xU64x2, align 16
+ store <2 x i64> %tmp, <2 x i64>* %__a, align 16
+ %tmp1 = load <2 x i64>* %xU64x2, align 16
+ store <2 x i64> %tmp1, <2 x i64>* %__b, align 16
+ %tmp2 = load <2 x i64>* %__a, align 16
+ %tmp3 = bitcast <2 x i64> %tmp2 to <16 x i8>
+ %tmp4 = load <2 x i64>* %__b, align 16
+ %tmp5 = bitcast <2 x i64> %tmp4 to <16 x i8>
+ %tmp6 = bitcast <16 x i8> %tmp3 to <2 x i64>
+ %tmp7 = bitcast <16 x i8> %tmp5 to <2 x i64>
+ %vext = shufflevector <2 x i64> %tmp6, <2 x i64> %tmp7, <2 x i32> <i32 1, i32 2>
+ store <2 x i64> %vext, <2 x i64>* %xU64x2, align 16
+ ret void
+}
+
+; shuffles with an undef second operand can use an EXT also so long as the
+; indices wrap and stay sequential.
+; rdar://12051674
+define <16 x i8> @vext1(<16 x i8> %_a) nounwind {
+; CHECK-LABEL: vext1:
+; CHECK: ext.16b v0, v0, v0, #8
+ %vext = shufflevector <16 x i8> %_a, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <16 x i8> %vext
+}
+
+; <rdar://problem/12212062>
+define <2 x i64> @vext2(<2 x i64> %p0, <2 x i64> %p1) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: vext2:
+; CHECK: ext.16b v1, v1, v1, #8
+; CHECK: ext.16b v0, v0, v0, #8
+; CHECK: add.2d v0, v0, v1
+ %t0 = shufflevector <2 x i64> %p1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+ %t1 = shufflevector <2 x i64> %p0, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+ %t2 = add <2 x i64> %t1, %t0
+ ret <2 x i64> %t2
+}
diff --git a/test/CodeGen/AArch64/arm64-vext_reverse.ll b/test/CodeGen/AArch64/arm64-vext_reverse.ll
new file mode 100644
index 000000000000..c45e55edeca5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vext_reverse.ll
@@ -0,0 +1,172 @@
+; RUN: llc -mtriple=arm64-linux-gnuabi < %s | FileCheck %s
+
+; The following tests is to check the correctness of reversing input operand
+; of vext by enumerating all cases of using two undefs in shuffle masks.
+
+define <4 x i16> @vext_6701_0(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_6701_0:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #4
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_6701_12(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_6701_12:
+; CHECK: ext v0.8b, v0.8b, v0.8b, #4
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_6701_13(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_6701_13:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #4
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 7, i32 undef, i32 1>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_6701_14(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_6701_14:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #4
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 7, i32 0, i32 undef>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_6701_23(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_6701_23:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #4
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 undef, i32 undef, i32 1>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_6701_24(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_6701_24:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #4
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 undef, i32 0, i32 undef>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_6701_34(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_6701_34:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #4
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 7, i32 undef, i32 undef>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_5670_0(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_5670_0:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #2
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 5, i32 6, i32 7, i32 0>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_5670_12(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_5670_12:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #2
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 undef, i32 7, i32 0>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_5670_13(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_5670_13:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #2
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 6, i32 undef, i32 0>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_5670_14(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_5670_14:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #2
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 6, i32 7, i32 undef>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_5670_23(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_5670_23:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #2
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 5, i32 undef, i32 undef, i32 0>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_5670_24(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_5670_24:
+; CHECK: rev32 v0.4h, v1.4h
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 5, i32 undef, i32 7, i32 undef>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_5670_34(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_5670_34:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #2
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 5, i32 6, i32 undef, i32 undef>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_7012_0(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_7012_0:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #6
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 7, i32 0, i32 1, i32 2>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_7012_12(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_7012_12:
+; CHECK: ext v0.8b, v0.8b, v0.8b, #6
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 undef, i32 1, i32 2>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_7012_13(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_7012_13:
+; CHECK: rev32 v0.4h, v0.4h
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 0, i32 undef, i32 2>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_7012_14(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_7012_14:
+; CHECK: ext v0.8b, v0.8b, v0.8b, #6
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 0, i32 1, i32 undef>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_7012_23(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_7012_23:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #6
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 7, i32 undef, i32 undef, i32 2>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_7012_24(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_7012_24:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #6
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 7, i32 undef, i32 1, i32 undef>
+ ret <4 x i16> %x
+}
+
+define <4 x i16> @vext_7012_34(<4 x i16> %a1, <4 x i16> %a2) {
+entry:
+; CHECK-LABEL: vext_7012_34:
+; CHECK: ext v0.8b, v1.8b, v0.8b, #6
+ %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 7, i32 0, i32 undef, i32 undef>
+ ret <4 x i16> %x
+}
diff --git a/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll b/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll
new file mode 100644
index 000000000000..255a18216de5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll
@@ -0,0 +1,375 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
+
+;;; Float vectors
+
+%v2f32 = type <2 x float>
+; CHECK: test_v2f32.sqrt:
+define %v2f32 @test_v2f32.sqrt(%v2f32 %a) {
+ ; CHECK: fsqrt.2s
+ %1 = call %v2f32 @llvm.sqrt.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.powi:
+define %v2f32 @test_v2f32.powi(%v2f32 %a, i32 %b) {
+ ; CHECK: pow
+ %1 = call %v2f32 @llvm.powi.v2f32(%v2f32 %a, i32 %b)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.sin:
+define %v2f32 @test_v2f32.sin(%v2f32 %a) {
+ ; CHECK: sin
+ %1 = call %v2f32 @llvm.sin.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.cos:
+define %v2f32 @test_v2f32.cos(%v2f32 %a) {
+ ; CHECK: cos
+ %1 = call %v2f32 @llvm.cos.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.pow:
+define %v2f32 @test_v2f32.pow(%v2f32 %a, %v2f32 %b) {
+ ; CHECK: pow
+ %1 = call %v2f32 @llvm.pow.v2f32(%v2f32 %a, %v2f32 %b)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.exp:
+define %v2f32 @test_v2f32.exp(%v2f32 %a) {
+ ; CHECK: exp
+ %1 = call %v2f32 @llvm.exp.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.exp2:
+define %v2f32 @test_v2f32.exp2(%v2f32 %a) {
+ ; CHECK: exp
+ %1 = call %v2f32 @llvm.exp2.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.log:
+define %v2f32 @test_v2f32.log(%v2f32 %a) {
+ ; CHECK: log
+ %1 = call %v2f32 @llvm.log.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.log10:
+define %v2f32 @test_v2f32.log10(%v2f32 %a) {
+ ; CHECK: log
+ %1 = call %v2f32 @llvm.log10.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.log2:
+define %v2f32 @test_v2f32.log2(%v2f32 %a) {
+ ; CHECK: log
+ %1 = call %v2f32 @llvm.log2.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.fma:
+define %v2f32 @test_v2f32.fma(%v2f32 %a, %v2f32 %b, %v2f32 %c) {
+ ; CHECK: fma
+ %1 = call %v2f32 @llvm.fma.v2f32(%v2f32 %a, %v2f32 %b, %v2f32 %c)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.fabs:
+define %v2f32 @test_v2f32.fabs(%v2f32 %a) {
+ ; CHECK: fabs
+ %1 = call %v2f32 @llvm.fabs.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.floor:
+define %v2f32 @test_v2f32.floor(%v2f32 %a) {
+ ; CHECK: frintm.2s
+ %1 = call %v2f32 @llvm.floor.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.ceil:
+define %v2f32 @test_v2f32.ceil(%v2f32 %a) {
+ ; CHECK: frintp.2s
+ %1 = call %v2f32 @llvm.ceil.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.trunc:
+define %v2f32 @test_v2f32.trunc(%v2f32 %a) {
+ ; CHECK: frintz.2s
+ %1 = call %v2f32 @llvm.trunc.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.rint:
+define %v2f32 @test_v2f32.rint(%v2f32 %a) {
+ ; CHECK: frintx.2s
+ %1 = call %v2f32 @llvm.rint.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+; CHECK: test_v2f32.nearbyint:
+define %v2f32 @test_v2f32.nearbyint(%v2f32 %a) {
+ ; CHECK: frinti.2s
+ %1 = call %v2f32 @llvm.nearbyint.v2f32(%v2f32 %a)
+ ret %v2f32 %1
+}
+
+declare %v2f32 @llvm.sqrt.v2f32(%v2f32) #0
+declare %v2f32 @llvm.powi.v2f32(%v2f32, i32) #0
+declare %v2f32 @llvm.sin.v2f32(%v2f32) #0
+declare %v2f32 @llvm.cos.v2f32(%v2f32) #0
+declare %v2f32 @llvm.pow.v2f32(%v2f32, %v2f32) #0
+declare %v2f32 @llvm.exp.v2f32(%v2f32) #0
+declare %v2f32 @llvm.exp2.v2f32(%v2f32) #0
+declare %v2f32 @llvm.log.v2f32(%v2f32) #0
+declare %v2f32 @llvm.log10.v2f32(%v2f32) #0
+declare %v2f32 @llvm.log2.v2f32(%v2f32) #0
+declare %v2f32 @llvm.fma.v2f32(%v2f32, %v2f32, %v2f32) #0
+declare %v2f32 @llvm.fabs.v2f32(%v2f32) #0
+declare %v2f32 @llvm.floor.v2f32(%v2f32) #0
+declare %v2f32 @llvm.ceil.v2f32(%v2f32) #0
+declare %v2f32 @llvm.trunc.v2f32(%v2f32) #0
+declare %v2f32 @llvm.rint.v2f32(%v2f32) #0
+declare %v2f32 @llvm.nearbyint.v2f32(%v2f32) #0
+
+;;;
+
+%v4f32 = type <4 x float>
+; CHECK: test_v4f32.sqrt:
+define %v4f32 @test_v4f32.sqrt(%v4f32 %a) {
+ ; CHECK: fsqrt.4s
+ %1 = call %v4f32 @llvm.sqrt.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.powi:
+define %v4f32 @test_v4f32.powi(%v4f32 %a, i32 %b) {
+ ; CHECK: pow
+ %1 = call %v4f32 @llvm.powi.v4f32(%v4f32 %a, i32 %b)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.sin:
+define %v4f32 @test_v4f32.sin(%v4f32 %a) {
+ ; CHECK: sin
+ %1 = call %v4f32 @llvm.sin.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.cos:
+define %v4f32 @test_v4f32.cos(%v4f32 %a) {
+ ; CHECK: cos
+ %1 = call %v4f32 @llvm.cos.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.pow:
+define %v4f32 @test_v4f32.pow(%v4f32 %a, %v4f32 %b) {
+ ; CHECK: pow
+ %1 = call %v4f32 @llvm.pow.v4f32(%v4f32 %a, %v4f32 %b)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.exp:
+define %v4f32 @test_v4f32.exp(%v4f32 %a) {
+ ; CHECK: exp
+ %1 = call %v4f32 @llvm.exp.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.exp2:
+define %v4f32 @test_v4f32.exp2(%v4f32 %a) {
+ ; CHECK: exp
+ %1 = call %v4f32 @llvm.exp2.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.log:
+define %v4f32 @test_v4f32.log(%v4f32 %a) {
+ ; CHECK: log
+ %1 = call %v4f32 @llvm.log.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.log10:
+define %v4f32 @test_v4f32.log10(%v4f32 %a) {
+ ; CHECK: log
+ %1 = call %v4f32 @llvm.log10.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.log2:
+define %v4f32 @test_v4f32.log2(%v4f32 %a) {
+ ; CHECK: log
+ %1 = call %v4f32 @llvm.log2.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.fma:
+define %v4f32 @test_v4f32.fma(%v4f32 %a, %v4f32 %b, %v4f32 %c) {
+ ; CHECK: fma
+ %1 = call %v4f32 @llvm.fma.v4f32(%v4f32 %a, %v4f32 %b, %v4f32 %c)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.fabs:
+define %v4f32 @test_v4f32.fabs(%v4f32 %a) {
+ ; CHECK: fabs
+ %1 = call %v4f32 @llvm.fabs.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.floor:
+define %v4f32 @test_v4f32.floor(%v4f32 %a) {
+ ; CHECK: frintm.4s
+ %1 = call %v4f32 @llvm.floor.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.ceil:
+define %v4f32 @test_v4f32.ceil(%v4f32 %a) {
+ ; CHECK: frintp.4s
+ %1 = call %v4f32 @llvm.ceil.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.trunc:
+define %v4f32 @test_v4f32.trunc(%v4f32 %a) {
+ ; CHECK: frintz.4s
+ %1 = call %v4f32 @llvm.trunc.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.rint:
+define %v4f32 @test_v4f32.rint(%v4f32 %a) {
+ ; CHECK: frintx.4s
+ %1 = call %v4f32 @llvm.rint.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+; CHECK: test_v4f32.nearbyint:
+define %v4f32 @test_v4f32.nearbyint(%v4f32 %a) {
+ ; CHECK: frinti.4s
+ %1 = call %v4f32 @llvm.nearbyint.v4f32(%v4f32 %a)
+ ret %v4f32 %1
+}
+
+declare %v4f32 @llvm.sqrt.v4f32(%v4f32) #0
+declare %v4f32 @llvm.powi.v4f32(%v4f32, i32) #0
+declare %v4f32 @llvm.sin.v4f32(%v4f32) #0
+declare %v4f32 @llvm.cos.v4f32(%v4f32) #0
+declare %v4f32 @llvm.pow.v4f32(%v4f32, %v4f32) #0
+declare %v4f32 @llvm.exp.v4f32(%v4f32) #0
+declare %v4f32 @llvm.exp2.v4f32(%v4f32) #0
+declare %v4f32 @llvm.log.v4f32(%v4f32) #0
+declare %v4f32 @llvm.log10.v4f32(%v4f32) #0
+declare %v4f32 @llvm.log2.v4f32(%v4f32) #0
+declare %v4f32 @llvm.fma.v4f32(%v4f32, %v4f32, %v4f32) #0
+declare %v4f32 @llvm.fabs.v4f32(%v4f32) #0
+declare %v4f32 @llvm.floor.v4f32(%v4f32) #0
+declare %v4f32 @llvm.ceil.v4f32(%v4f32) #0
+declare %v4f32 @llvm.trunc.v4f32(%v4f32) #0
+declare %v4f32 @llvm.rint.v4f32(%v4f32) #0
+declare %v4f32 @llvm.nearbyint.v4f32(%v4f32) #0
+
+;;; Double vector
+
+%v2f64 = type <2 x double>
+; CHECK: test_v2f64.sqrt:
+define %v2f64 @test_v2f64.sqrt(%v2f64 %a) {
+ ; CHECK: fsqrt.2d
+ %1 = call %v2f64 @llvm.sqrt.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.powi:
+define %v2f64 @test_v2f64.powi(%v2f64 %a, i32 %b) {
+ ; CHECK: pow
+ %1 = call %v2f64 @llvm.powi.v2f64(%v2f64 %a, i32 %b)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.sin:
+define %v2f64 @test_v2f64.sin(%v2f64 %a) {
+ ; CHECK: sin
+ %1 = call %v2f64 @llvm.sin.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.cos:
+define %v2f64 @test_v2f64.cos(%v2f64 %a) {
+ ; CHECK: cos
+ %1 = call %v2f64 @llvm.cos.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.pow:
+define %v2f64 @test_v2f64.pow(%v2f64 %a, %v2f64 %b) {
+ ; CHECK: pow
+ %1 = call %v2f64 @llvm.pow.v2f64(%v2f64 %a, %v2f64 %b)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.exp:
+define %v2f64 @test_v2f64.exp(%v2f64 %a) {
+ ; CHECK: exp
+ %1 = call %v2f64 @llvm.exp.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.exp2:
+define %v2f64 @test_v2f64.exp2(%v2f64 %a) {
+ ; CHECK: exp
+ %1 = call %v2f64 @llvm.exp2.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.log:
+define %v2f64 @test_v2f64.log(%v2f64 %a) {
+ ; CHECK: log
+ %1 = call %v2f64 @llvm.log.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.log10:
+define %v2f64 @test_v2f64.log10(%v2f64 %a) {
+ ; CHECK: log
+ %1 = call %v2f64 @llvm.log10.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.log2:
+define %v2f64 @test_v2f64.log2(%v2f64 %a) {
+ ; CHECK: log
+ %1 = call %v2f64 @llvm.log2.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.fma:
+define %v2f64 @test_v2f64.fma(%v2f64 %a, %v2f64 %b, %v2f64 %c) {
+ ; CHECK: fma
+ %1 = call %v2f64 @llvm.fma.v2f64(%v2f64 %a, %v2f64 %b, %v2f64 %c)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.fabs:
+define %v2f64 @test_v2f64.fabs(%v2f64 %a) {
+ ; CHECK: fabs
+ %1 = call %v2f64 @llvm.fabs.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.floor:
+define %v2f64 @test_v2f64.floor(%v2f64 %a) {
+ ; CHECK: frintm.2d
+ %1 = call %v2f64 @llvm.floor.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.ceil:
+define %v2f64 @test_v2f64.ceil(%v2f64 %a) {
+ ; CHECK: frintp.2d
+ %1 = call %v2f64 @llvm.ceil.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.trunc:
+define %v2f64 @test_v2f64.trunc(%v2f64 %a) {
+ ; CHECK: frintz.2d
+ %1 = call %v2f64 @llvm.trunc.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.rint:
+define %v2f64 @test_v2f64.rint(%v2f64 %a) {
+ ; CHECK: frintx.2d
+ %1 = call %v2f64 @llvm.rint.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+; CHECK: test_v2f64.nearbyint:
+define %v2f64 @test_v2f64.nearbyint(%v2f64 %a) {
+ ; CHECK: frinti.2d
+ %1 = call %v2f64 @llvm.nearbyint.v2f64(%v2f64 %a)
+ ret %v2f64 %1
+}
+
+declare %v2f64 @llvm.sqrt.v2f64(%v2f64) #0
+declare %v2f64 @llvm.powi.v2f64(%v2f64, i32) #0
+declare %v2f64 @llvm.sin.v2f64(%v2f64) #0
+declare %v2f64 @llvm.cos.v2f64(%v2f64) #0
+declare %v2f64 @llvm.pow.v2f64(%v2f64, %v2f64) #0
+declare %v2f64 @llvm.exp.v2f64(%v2f64) #0
+declare %v2f64 @llvm.exp2.v2f64(%v2f64) #0
+declare %v2f64 @llvm.log.v2f64(%v2f64) #0
+declare %v2f64 @llvm.log10.v2f64(%v2f64) #0
+declare %v2f64 @llvm.log2.v2f64(%v2f64) #0
+declare %v2f64 @llvm.fma.v2f64(%v2f64, %v2f64, %v2f64) #0
+declare %v2f64 @llvm.fabs.v2f64(%v2f64) #0
+declare %v2f64 @llvm.floor.v2f64(%v2f64) #0
+declare %v2f64 @llvm.ceil.v2f64(%v2f64) #0
+declare %v2f64 @llvm.trunc.v2f64(%v2f64) #0
+declare %v2f64 @llvm.rint.v2f64(%v2f64) #0
+declare %v2f64 @llvm.nearbyint.v2f64(%v2f64) #0
+
+attributes #0 = { nounwind readonly }
diff --git a/test/CodeGen/AArch64/arm64-vhadd.ll b/test/CodeGen/AArch64/arm64-vhadd.ll
new file mode 100644
index 000000000000..6178bf9809dd
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vhadd.ll
@@ -0,0 +1,249 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @shadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: shadd8b:
+;CHECK: shadd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @shadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: shadd16b:
+;CHECK: shadd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @shadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: shadd4h:
+;CHECK: shadd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @shadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: shadd8h:
+;CHECK: shadd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @shadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: shadd2s:
+;CHECK: shadd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @shadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: shadd4s:
+;CHECK: shadd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <8 x i8> @uhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uhadd8b:
+;CHECK: uhadd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @uhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uhadd16b:
+;CHECK: uhadd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @uhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uhadd4h:
+;CHECK: uhadd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @uhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uhadd8h:
+;CHECK: uhadd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @uhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uhadd2s:
+;CHECK: uhadd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @uhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uhadd4s:
+;CHECK: uhadd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i8> @srhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: srhadd8b:
+;CHECK: srhadd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @srhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: srhadd16b:
+;CHECK: srhadd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @srhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: srhadd4h:
+;CHECK: srhadd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @srhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: srhadd8h:
+;CHECK: srhadd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @srhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: srhadd2s:
+;CHECK: srhadd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @srhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: srhadd4s:
+;CHECK: srhadd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <8 x i8> @urhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: urhadd8b:
+;CHECK: urhadd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @urhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: urhadd16b:
+;CHECK: urhadd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @urhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: urhadd4h:
+;CHECK: urhadd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @urhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: urhadd8h:
+;CHECK: urhadd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @urhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: urhadd2s:
+;CHECK: urhadd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @urhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: urhadd4s:
+;CHECK: urhadd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vhsub.ll b/test/CodeGen/AArch64/arm64-vhsub.ll
new file mode 100644
index 000000000000..13bfda3899e5
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vhsub.ll
@@ -0,0 +1,125 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @shsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: shsub8b:
+;CHECK: shsub.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @shsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: shsub16b:
+;CHECK: shsub.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @shsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: shsub4h:
+;CHECK: shsub.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @shsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: shsub8h:
+;CHECK: shsub.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @shsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: shsub2s:
+;CHECK: shsub.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @shsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: shsub4s:
+;CHECK: shsub.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.shsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <8 x i8> @uhsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uhsub8b:
+;CHECK: uhsub.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uhsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @uhsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uhsub16b:
+;CHECK: uhsub.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uhsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @uhsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uhsub4h:
+;CHECK: uhsub.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uhsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @uhsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uhsub8h:
+;CHECK: uhsub.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uhsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @uhsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uhsub2s:
+;CHECK: uhsub.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uhsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @uhsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uhsub4s:
+;CHECK: uhsub.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uhsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.uhsub.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uhsub.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uhsub.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.shsub.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.uhsub.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.uhsub.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uhsub.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-virtual_base.ll b/test/CodeGen/AArch64/arm64-virtual_base.ll
new file mode 100644
index 000000000000..cb9595453348
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-virtual_base.ll
@@ -0,0 +1,51 @@
+; RUN: llc < %s -O3 -march arm64 | FileCheck %s
+; <rdar://13463602>
+
+%struct.Counter_Struct = type { i64, i64 }
+%struct.Bicubic_Patch_Struct = type { %struct.Method_Struct*, i32, %struct.Object_Struct*, %struct.Texture_Struct*, %struct.Interior_Struct*, %struct.Object_Struct*, %struct.Object_Struct*, %struct.Bounding_Box_Struct, i64, i32, i32, i32, [4 x [4 x [3 x double]]], [3 x double], double, double, %struct.Bezier_Node_Struct* }
+%struct.Method_Struct = type { i32 (%struct.Object_Struct*, %struct.Ray_Struct*, %struct.istack_struct*)*, i32 (double*, %struct.Object_Struct*)*, void (double*, %struct.Object_Struct*, %struct.istk_entry*)*, i8* (%struct.Object_Struct*)*, void (%struct.Object_Struct*, double*, %struct.Transform_Struct*)*, void (%struct.Object_Struct*, double*, %struct.Transform_Struct*)*, void (%struct.Object_Struct*, double*, %struct.Transform_Struct*)*, void (%struct.Object_Struct*, %struct.Transform_Struct*)*, void (%struct.Object_Struct*)*, void (%struct.Object_Struct*)* }
+%struct.Object_Struct = type { %struct.Method_Struct*, i32, %struct.Object_Struct*, %struct.Texture_Struct*, %struct.Interior_Struct*, %struct.Object_Struct*, %struct.Object_Struct*, %struct.Bounding_Box_Struct, i64 }
+%struct.Texture_Struct = type { i16, i16, i16, i32, float, float, float, %struct.Warps_Struct*, %struct.Pattern_Struct*, %struct.Blend_Map_Struct*, %union.anon.9, %struct.Texture_Struct*, %struct.Pigment_Struct*, %struct.Tnormal_Struct*, %struct.Finish_Struct*, %struct.Texture_Struct*, i32 }
+%struct.Warps_Struct = type { i16, %struct.Warps_Struct* }
+%struct.Pattern_Struct = type { i16, i16, i16, i32, float, float, float, %struct.Warps_Struct*, %struct.Pattern_Struct*, %struct.Blend_Map_Struct*, %union.anon.6 }
+%struct.Blend_Map_Struct = type { i16, i16, i16, i64, %struct.Blend_Map_Entry* }
+%struct.Blend_Map_Entry = type { float, i8, %union.anon }
+%union.anon = type { [2 x double], [8 x i8] }
+%union.anon.6 = type { %struct.anon.7 }
+%struct.anon.7 = type { float, [3 x double] }
+%union.anon.9 = type { %struct.anon.10 }
+%struct.anon.10 = type { float, [3 x double] }
+%struct.Pigment_Struct = type { i16, i16, i16, i32, float, float, float, %struct.Warps_Struct*, %struct.Pattern_Struct*, %struct.Blend_Map_Struct*, %union.anon.0, [5 x float] }
+%union.anon.0 = type { %struct.anon }
+%struct.anon = type { float, [3 x double] }
+%struct.Tnormal_Struct = type { i16, i16, i16, i32, float, float, float, %struct.Warps_Struct*, %struct.Pattern_Struct*, %struct.Blend_Map_Struct*, %union.anon.3, float }
+%union.anon.3 = type { %struct.anon.4 }
+%struct.anon.4 = type { float, [3 x double] }
+%struct.Finish_Struct = type { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, [3 x float], [3 x float] }
+%struct.Interior_Struct = type { i64, i32, float, float, float, float, float, %struct.Media_Struct* }
+%struct.Media_Struct = type { i32, i32, i32, i32, i32, double, double, i32, i32, i32, i32, [5 x float], [5 x float], [5 x float], [5 x float], double, double, double, double*, %struct.Pigment_Struct*, %struct.Media_Struct* }
+%struct.Bounding_Box_Struct = type { [3 x float], [3 x float] }
+%struct.Ray_Struct = type { [3 x double], [3 x double], i32, [100 x %struct.Interior_Struct*] }
+%struct.istack_struct = type { %struct.istack_struct*, %struct.istk_entry*, i32 }
+%struct.istk_entry = type { double, [3 x double], [3 x double], %struct.Object_Struct*, i32, i32, double, double, i8* }
+%struct.Transform_Struct = type { [4 x [4 x double]], [4 x [4 x double]] }
+%struct.Bezier_Node_Struct = type { i32, [3 x double], double, i32, i8* }
+
+define void @Precompute_Patch_Values(%struct.Bicubic_Patch_Struct* %Shape) {
+; CHECK: Precompute_Patch_Values
+; CHECK: ldr [[VAL:x[0-9]+]], [x0, #288]
+; CHECK-NEXT: str [[VAL]], [sp, #232]
+; CHECK-NEXT: ldr [[VAL2:q[0-9]+]], [x0, #272]
+; CHECK-NEXT: stur [[VAL2]], {{\[}}sp, #216]
+entry:
+ %Control_Points = alloca [16 x [3 x double]], align 8
+ %arraydecay5.3.1 = getelementptr inbounds [16 x [3 x double]]* %Control_Points, i64 0, i64 9, i64 0
+ %tmp14 = bitcast double* %arraydecay5.3.1 to i8*
+ %arraydecay11.3.1 = getelementptr inbounds %struct.Bicubic_Patch_Struct* %Shape, i64 0, i32 12, i64 1, i64 3, i64 0
+ %tmp15 = bitcast double* %arraydecay11.3.1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp15, i64 24, i32 1, i1 false)
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
diff --git a/test/CodeGen/AArch64/arm64-vmax.ll b/test/CodeGen/AArch64/arm64-vmax.ll
new file mode 100644
index 000000000000..3f2c134dec6e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vmax.ll
@@ -0,0 +1,679 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @smax_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: smax_8b:
+;CHECK: smax.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @smax_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: smax_16b:
+;CHECK: smax.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @smax_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: smax_4h:
+;CHECK: smax.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @smax_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: smax_8h:
+;CHECK: smax.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @smax_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: smax_2s:
+;CHECK: smax.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @smax_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: smax_4s:
+;CHECK: smax.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i8> @umax_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: umax_8b:
+;CHECK: umax.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.umax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @umax_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: umax_16b:
+;CHECK: umax.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.umax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @umax_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: umax_4h:
+;CHECK: umax.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.umax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @umax_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: umax_8h:
+;CHECK: umax.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @umax_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: umax_2s:
+;CHECK: umax.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @umax_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: umax_4s:
+;CHECK: umax.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.umax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.umax.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.umax.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.umax.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.umax.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i8> @smin_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: smin_8b:
+;CHECK: smin.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @smin_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: smin_16b:
+;CHECK: smin.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @smin_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: smin_4h:
+;CHECK: smin.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @smin_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: smin_8h:
+;CHECK: smin.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @smin_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: smin_2s:
+;CHECK: smin.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @smin_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: smin_4s:
+;CHECK: smin.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i8> @umin_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: umin_8b:
+;CHECK: umin.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @umin_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: umin_16b:
+;CHECK: umin.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @umin_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: umin_4h:
+;CHECK: umin.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @umin_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: umin_8h:
+;CHECK: umin.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @umin_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: umin_2s:
+;CHECK: umin.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @umin_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: umin_4s:
+;CHECK: umin.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @smaxp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: smaxp_8b:
+;CHECK: smaxp.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.smaxp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @smaxp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: smaxp_16b:
+;CHECK: smaxp.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.smaxp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @smaxp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: smaxp_4h:
+;CHECK: smaxp.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.smaxp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @smaxp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: smaxp_8h:
+;CHECK: smaxp.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.smaxp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @smaxp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: smaxp_2s:
+;CHECK: smaxp.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.smaxp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @smaxp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: smaxp_4s:
+;CHECK: smaxp.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.smaxp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.smaxp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.smaxp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.smaxp.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.smaxp.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.smaxp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.smaxp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i8> @umaxp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: umaxp_8b:
+;CHECK: umaxp.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.umaxp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @umaxp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: umaxp_16b:
+;CHECK: umaxp.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.umaxp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @umaxp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: umaxp_4h:
+;CHECK: umaxp.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.umaxp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @umaxp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: umaxp_8h:
+;CHECK: umaxp.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.umaxp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @umaxp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: umaxp_2s:
+;CHECK: umaxp.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.umaxp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @umaxp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: umaxp_4s:
+;CHECK: umaxp.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.umaxp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.umaxp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.umaxp.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.umaxp.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.umaxp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @sminp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: sminp_8b:
+;CHECK: sminp.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sminp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @sminp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: sminp_16b:
+;CHECK: sminp.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sminp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @sminp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sminp_4h:
+;CHECK: sminp.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sminp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sminp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sminp_8h:
+;CHECK: sminp.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sminp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @sminp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sminp_2s:
+;CHECK: sminp.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sminp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sminp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sminp_4s:
+;CHECK: sminp.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sminp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sminp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.sminp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sminp.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sminp.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sminp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sminp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i8> @uminp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uminp_8b:
+;CHECK: uminp.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uminp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @uminp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uminp_16b:
+;CHECK: uminp.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uminp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @uminp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uminp_4h:
+;CHECK: uminp.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uminp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @uminp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uminp_8h:
+;CHECK: uminp.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uminp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @uminp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uminp_2s:
+;CHECK: uminp.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uminp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @uminp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uminp_4s:
+;CHECK: uminp.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uminp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.uminp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <16 x i8> @llvm.aarch64.neon.uminp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uminp.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.uminp.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uminp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uminp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x float> @fmax_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fmax_2s:
+;CHECK: fmax.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fmax.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fmax_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fmax_4s:
+;CHECK: fmax.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.fmax.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @fmax_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fmax_2d:
+;CHECK: fmax.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.fmax.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.fmax.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fmax.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fmax.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <2 x float> @fmaxp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fmaxp_2s:
+;CHECK: fmaxp.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fmaxp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fmaxp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fmaxp_4s:
+;CHECK: fmaxp.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.fmaxp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @fmaxp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fmaxp_2d:
+;CHECK: fmaxp.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.fmaxp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.fmaxp.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fmaxp.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fmaxp.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <2 x float> @fmin_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fmin_2s:
+;CHECK: fmin.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fmin_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fmin_4s:
+;CHECK: fmin.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @fmin_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fmin_2d:
+;CHECK: fmin.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <2 x float> @fminp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fminp_2s:
+;CHECK: fminp.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fminp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fminp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fminp_4s:
+;CHECK: fminp.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.fminp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @fminp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fminp_2d:
+;CHECK: fminp.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.fminp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.fminp.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fminp.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fminp.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <2 x float> @fminnmp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fminnmp_2s:
+;CHECK: fminnmp.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fminnmp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fminnmp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fminnmp_4s:
+;CHECK: fminnmp.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.fminnmp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @fminnmp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fminnmp_2d:
+;CHECK: fminnmp.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.fminnmp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.fminnmp.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fminnmp.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fminnmp.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <2 x float> @fmaxnmp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fmaxnmp_2s:
+;CHECK: fmaxnmp.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fmaxnmp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fmaxnmp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fmaxnmp_4s:
+;CHECK: fmaxnmp.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.fmaxnmp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @fmaxnmp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fmaxnmp_2d:
+;CHECK: fmaxnmp.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.fmaxnmp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.fmaxnmp.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fmaxnmp.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fmaxnmp.v2f64(<2 x double>, <2 x double>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vminmaxnm.ll b/test/CodeGen/AArch64/arm64-vminmaxnm.ll
new file mode 100644
index 000000000000..b5aca45cd479
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vminmaxnm.ll
@@ -0,0 +1,68 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x float> @f1(<2 x float> %a, <2 x float> %b) nounwind readnone ssp {
+; CHECK: fmaxnm.2s v0, v0, v1
+; CHECK: ret
+ %vmaxnm2.i = tail call <2 x float> @llvm.aarch64.neon.fmaxnm.v2f32(<2 x float> %a, <2 x float> %b) nounwind
+ ret <2 x float> %vmaxnm2.i
+}
+
+define <4 x float> @f2(<4 x float> %a, <4 x float> %b) nounwind readnone ssp {
+; CHECK: fmaxnm.4s v0, v0, v1
+; CHECK: ret
+ %vmaxnm2.i = tail call <4 x float> @llvm.aarch64.neon.fmaxnm.v4f32(<4 x float> %a, <4 x float> %b) nounwind
+ ret <4 x float> %vmaxnm2.i
+}
+
+define <2 x double> @f3(<2 x double> %a, <2 x double> %b) nounwind readnone ssp {
+; CHECK: fmaxnm.2d v0, v0, v1
+; CHECK: ret
+ %vmaxnm2.i = tail call <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double> %a, <2 x double> %b) nounwind
+ ret <2 x double> %vmaxnm2.i
+}
+
+define <2 x float> @f4(<2 x float> %a, <2 x float> %b) nounwind readnone ssp {
+; CHECK: fminnm.2s v0, v0, v1
+; CHECK: ret
+ %vminnm2.i = tail call <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float> %a, <2 x float> %b) nounwind
+ ret <2 x float> %vminnm2.i
+}
+
+define <4 x float> @f5(<4 x float> %a, <4 x float> %b) nounwind readnone ssp {
+; CHECK: fminnm.4s v0, v0, v1
+; CHECK: ret
+ %vminnm2.i = tail call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> %a, <4 x float> %b) nounwind
+ ret <4 x float> %vminnm2.i
+}
+
+define <2 x double> @f6(<2 x double> %a, <2 x double> %b) nounwind readnone ssp {
+; CHECK: fminnm.2d v0, v0, v1
+; CHECK: ret
+ %vminnm2.i = tail call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> %a, <2 x double> %b) nounwind
+ ret <2 x double> %vminnm2.i
+}
+
+declare <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double>, <2 x double>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double>, <2 x double>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fmaxnm.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x float> @llvm.aarch64.neon.fmaxnm.v2f32(<2 x float>, <2 x float>) nounwind readnone
+
+
+define double @test_fmaxnmv(<2 x double> %in) {
+; CHECK-LABEL: test_fmaxnmv:
+; CHECK: fmaxnmp.2d d0, v0
+ %max = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %in)
+ ret double %max
+}
+
+define double @test_fminnmv(<2 x double> %in) {
+; CHECK-LABEL: test_fminnmv:
+; CHECK: fminnmp.2d d0, v0
+ %min = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %in)
+ ret double %min
+}
+
+declare double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double>)
+declare double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double>)
diff --git a/test/CodeGen/AArch64/arm64-vmovn.ll b/test/CodeGen/AArch64/arm64-vmovn.ll
new file mode 100644
index 000000000000..67e2816a7f5f
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vmovn.ll
@@ -0,0 +1,242 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @xtn8b(<8 x i16> %A) nounwind {
+;CHECK-LABEL: xtn8b:
+;CHECK-NOT: ld1
+;CHECK: xtn.8b v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = trunc <8 x i16> %A to <8 x i8>
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @xtn4h(<4 x i32> %A) nounwind {
+;CHECK-LABEL: xtn4h:
+;CHECK-NOT: ld1
+;CHECK: xtn.4h v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = trunc <4 x i32> %A to <4 x i16>
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @xtn2s(<2 x i64> %A) nounwind {
+;CHECK-LABEL: xtn2s:
+;CHECK-NOT: ld1
+;CHECK: xtn.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = trunc <2 x i64> %A to <2 x i32>
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @xtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind {
+;CHECK-LABEL: xtn2_16b:
+;CHECK-NOT: ld1
+;CHECK: xtn2.16b v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = trunc <8 x i16> %A to <8 x i8>
+ %res = shufflevector <8 x i8> %ret, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @xtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind {
+;CHECK-LABEL: xtn2_8h:
+;CHECK-NOT: ld1
+;CHECK: xtn2.8h v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = trunc <4 x i32> %A to <4 x i16>
+ %res = shufflevector <4 x i16> %ret, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @xtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind {
+;CHECK-LABEL: xtn2_4s:
+;CHECK-NOT: ld1
+;CHECK: xtn2.4s v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = trunc <2 x i64> %A to <2 x i32>
+ %res = shufflevector <2 x i32> %ret, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+define <8 x i8> @sqxtn8b(<8 x i16> %A) nounwind {
+;CHECK-LABEL: sqxtn8b:
+;CHECK-NOT: ld1
+;CHECK: sqxtn.8b v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %A)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqxtn4h(<4 x i32> %A) nounwind {
+;CHECK-LABEL: sqxtn4h:
+;CHECK-NOT: ld1
+;CHECK: sqxtn.4h v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %A)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqxtn2s(<2 x i64> %A) nounwind {
+;CHECK-LABEL: sqxtn2s:
+;CHECK-NOT: ld1
+;CHECK: sqxtn.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind {
+;CHECK-LABEL: sqxtn2_16b:
+;CHECK-NOT: ld1
+;CHECK: sqxtn2.16b v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %A)
+ %res = shufflevector <8 x i8> %ret, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @sqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind {
+;CHECK-LABEL: sqxtn2_8h:
+;CHECK-NOT: ld1
+;CHECK: sqxtn2.8h v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %A)
+ %res = shufflevector <4 x i16> %ret, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @sqxtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind {
+;CHECK-LABEL: sqxtn2_4s:
+;CHECK-NOT: ld1
+;CHECK: sqxtn2.4s v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %A)
+ %res = shufflevector <2 x i32> %ret, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64>) nounwind readnone
+
+define <8 x i8> @uqxtn8b(<8 x i16> %A) nounwind {
+;CHECK-LABEL: uqxtn8b:
+;CHECK-NOT: ld1
+;CHECK: uqxtn.8b v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %A)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @uqxtn4h(<4 x i32> %A) nounwind {
+;CHECK-LABEL: uqxtn4h:
+;CHECK-NOT: ld1
+;CHECK: uqxtn.4h v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %A)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uqxtn2s(<2 x i64> %A) nounwind {
+;CHECK-LABEL: uqxtn2s:
+;CHECK-NOT: ld1
+;CHECK: uqxtn.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @uqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind {
+;CHECK-LABEL: uqxtn2_16b:
+;CHECK-NOT: ld1
+;CHECK: uqxtn2.16b v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %A)
+ %res = shufflevector <8 x i8> %ret, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @uqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind {
+;CHECK-LABEL: uqxtn2_8h:
+;CHECK-NOT: ld1
+;CHECK: uqxtn2.8h v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %A)
+ %res = shufflevector <4 x i16> %ret, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @uqxtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind {
+;CHECK-LABEL: uqxtn2_4s:
+;CHECK-NOT: ld1
+;CHECK: uqxtn2.4s v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %A)
+ %res = shufflevector <2 x i32> %ret, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+declare <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64>) nounwind readnone
+
+define <8 x i8> @sqxtun8b(<8 x i16> %A) nounwind {
+;CHECK-LABEL: sqxtun8b:
+;CHECK-NOT: ld1
+;CHECK: sqxtun.8b v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %A)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqxtun4h(<4 x i32> %A) nounwind {
+;CHECK-LABEL: sqxtun4h:
+;CHECK-NOT: ld1
+;CHECK: sqxtun.4h v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %A)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqxtun2s(<2 x i64> %A) nounwind {
+;CHECK-LABEL: sqxtun2s:
+;CHECK-NOT: ld1
+;CHECK: sqxtun.2s v0, v0
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %A)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqxtun2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind {
+;CHECK-LABEL: sqxtun2_16b:
+;CHECK-NOT: ld1
+;CHECK: sqxtun2.16b v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %A)
+ %res = shufflevector <8 x i8> %ret, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @sqxtun2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind {
+;CHECK-LABEL: sqxtun2_8h:
+;CHECK-NOT: ld1
+;CHECK: sqxtun2.8h v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %A)
+ %res = shufflevector <4 x i16> %ret, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @sqxtun2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind {
+;CHECK-LABEL: sqxtun2_4s:
+;CHECK-NOT: ld1
+;CHECK: sqxtun2.4s v0, v1
+;CHECK-NEXT: ret
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %A)
+ %res = shufflevector <2 x i32> %ret, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64>) nounwind readnone
+
diff --git a/test/CodeGen/AArch64/arm64-vmul.ll b/test/CodeGen/AArch64/arm64-vmul.ll
new file mode 100644
index 000000000000..6fa60fe346af
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vmul.ll
@@ -0,0 +1,2036 @@
+; RUN: llc -asm-verbose=false < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+
+define <8 x i16> @smull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: smull8h:
+;CHECK: smull.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @smull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: smull4s:
+;CHECK: smull.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @smull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: smull2d:
+;CHECK: smull.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
+
+define <8 x i16> @umull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: umull8h:
+;CHECK: umull.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @umull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: umull4s:
+;CHECK: umull.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @umull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: umull2d:
+;CHECK: umull.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
+
+define <4 x i32> @sqdmull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqdmull4s:
+;CHECK: sqdmull.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sqdmull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqdmull2d:
+;CHECK: sqdmull.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define <4 x i32> @sqdmull2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqdmull2_4s:
+;CHECK: sqdmull2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sqdmull2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqdmull2_2d:
+;CHECK: sqdmull2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+
+declare <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
+
+define <8 x i16> @pmull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: pmull8h:
+;CHECK: pmull.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+declare <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
+
+define <4 x i16> @sqdmulh_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqdmulh_4h:
+;CHECK: sqdmulh.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sqdmulh_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqdmulh_8h:
+;CHECK: sqdmulh.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @sqdmulh_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqdmulh_2s:
+;CHECK: sqdmulh.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sqdmulh_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqdmulh_4s:
+;CHECK: sqdmulh.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define i32 @sqdmulh_1s(i32* %A, i32* %B) nounwind {
+;CHECK-LABEL: sqdmulh_1s:
+;CHECK: sqdmulh s0, {{s[0-9]+}}, {{s[0-9]+}}
+ %tmp1 = load i32* %A
+ %tmp2 = load i32* %B
+ %tmp3 = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %tmp1, i32 %tmp2)
+ ret i32 %tmp3
+}
+
+declare <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare i32 @llvm.aarch64.neon.sqdmulh.i32(i32, i32) nounwind readnone
+
+define <4 x i16> @sqrdmulh_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_4h:
+;CHECK: sqrdmulh.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sqrdmulh_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_8h:
+;CHECK: sqrdmulh.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @sqrdmulh_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_2s:
+;CHECK: sqrdmulh.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sqrdmulh_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_4s:
+;CHECK: sqrdmulh.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define i32 @sqrdmulh_1s(i32* %A, i32* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_1s:
+;CHECK: sqrdmulh s0, {{s[0-9]+}}, {{s[0-9]+}}
+ %tmp1 = load i32* %A
+ %tmp2 = load i32* %B
+ %tmp3 = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %tmp1, i32 %tmp2)
+ ret i32 %tmp3
+}
+
+declare <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare i32 @llvm.aarch64.neon.sqrdmulh.i32(i32, i32) nounwind readnone
+
+define <2 x float> @fmulx_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fmulx_2s:
+;CHECK: fmulx.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @fmulx_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fmulx_4s:
+;CHECK: fmulx.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @fmulx_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fmulx_2d:
+;CHECK: fmulx.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x i32> @smlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: smlal4s:
+;CHECK: smlal.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @smlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: smlal2d:
+;CHECK: smlal.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <4 x i32> @smlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: smlsl4s:
+;CHECK: smlsl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp5 = sub <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @smlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: smlsl2d:
+;CHECK: smlsl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp5 = sub <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+declare <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>)
+declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @sqdmlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sqdmlal4s:
+;CHECK: sqdmlal.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp4)
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @sqdmlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sqdmlal2d:
+;CHECK: sqdmlal.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp4)
+ ret <2 x i64> %tmp5
+}
+
+define <4 x i32> @sqdmlal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sqdmlal2_4s:
+;CHECK: sqdmlal2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp4)
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @sqdmlal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sqdmlal2_2d:
+;CHECK: sqdmlal2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp4)
+ ret <2 x i64> %tmp5
+}
+
+define <4 x i32> @sqdmlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sqdmlsl4s:
+;CHECK: sqdmlsl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp4)
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @sqdmlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sqdmlsl2d:
+;CHECK: sqdmlsl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp4)
+ ret <2 x i64> %tmp5
+}
+
+define <4 x i32> @sqdmlsl2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sqdmlsl2_4s:
+;CHECK: sqdmlsl2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp4)
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @sqdmlsl2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sqdmlsl2_2d:
+;CHECK: sqdmlsl2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp4)
+ ret <2 x i64> %tmp5
+}
+
+define <4 x i32> @umlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: umlal4s:
+;CHECK: umlal.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @umlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: umlal2d:
+;CHECK: umlal.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <4 x i32> @umlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: umlsl4s:
+;CHECK: umlsl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp5 = sub <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @umlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: umlsl2d:
+;CHECK: umlsl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp5 = sub <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <2 x float> @fmla_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
+;CHECK-LABEL: fmla_2s:
+;CHECK: fmla.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = load <2 x float>* %C
+ %tmp4 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
+ ret <2 x float> %tmp4
+}
+
+define <4 x float> @fmla_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+;CHECK-LABEL: fmla_4s:
+;CHECK: fmla.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = load <4 x float>* %C
+ %tmp4 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
+ ret <4 x float> %tmp4
+}
+
+define <2 x double> @fmla_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
+;CHECK-LABEL: fmla_2d:
+;CHECK: fmla.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = load <2 x double>* %C
+ %tmp4 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp1, <2 x double> %tmp2, <2 x double> %tmp3)
+ ret <2 x double> %tmp4
+}
+
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
+
+define <2 x float> @fmls_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
+;CHECK-LABEL: fmls_2s:
+;CHECK: fmls.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = load <2 x float>* %C
+ %tmp4 = fsub <2 x float> <float -0.0, float -0.0>, %tmp2
+ %tmp5 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp1, <2 x float> %tmp4, <2 x float> %tmp3)
+ ret <2 x float> %tmp5
+}
+
+define <4 x float> @fmls_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+;CHECK-LABEL: fmls_4s:
+;CHECK: fmls.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = load <4 x float>* %C
+ %tmp4 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %tmp2
+ %tmp5 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp1, <4 x float> %tmp4, <4 x float> %tmp3)
+ ret <4 x float> %tmp5
+}
+
+define <2 x double> @fmls_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
+;CHECK-LABEL: fmls_2d:
+;CHECK: fmls.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = load <2 x double>* %C
+ %tmp4 = fsub <2 x double> <double -0.0, double -0.0>, %tmp2
+ %tmp5 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp1, <2 x double> %tmp4, <2 x double> %tmp3)
+ ret <2 x double> %tmp5
+}
+
+define <2 x float> @fmls_commuted_neg_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
+;CHECK-LABEL: fmls_commuted_neg_2s:
+;CHECK: fmls.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = load <2 x float>* %C
+ %tmp4 = fsub <2 x float> <float -0.0, float -0.0>, %tmp2
+ %tmp5 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp4, <2 x float> %tmp1, <2 x float> %tmp3)
+ ret <2 x float> %tmp5
+}
+
+define <4 x float> @fmls_commuted_neg_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+;CHECK-LABEL: fmls_commuted_neg_4s:
+;CHECK: fmls.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = load <4 x float>* %C
+ %tmp4 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %tmp2
+ %tmp5 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp4, <4 x float> %tmp1, <4 x float> %tmp3)
+ ret <4 x float> %tmp5
+}
+
+define <2 x double> @fmls_commuted_neg_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
+;CHECK-LABEL: fmls_commuted_neg_2d:
+;CHECK: fmls.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = load <2 x double>* %C
+ %tmp4 = fsub <2 x double> <double -0.0, double -0.0>, %tmp2
+ %tmp5 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp4, <2 x double> %tmp1, <2 x double> %tmp3)
+ ret <2 x double> %tmp5
+}
+
+define <2 x float> @fmls_indexed_2s(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone ssp {
+;CHECK-LABEL: fmls_indexed_2s:
+;CHECK: fmls.2s
+entry:
+ %0 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %c
+ %lane = shufflevector <2 x float> %b, <2 x float> undef, <2 x i32> zeroinitializer
+ %fmls1 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %0, <2 x float> %lane, <2 x float> %a)
+ ret <2 x float> %fmls1
+}
+
+define <4 x float> @fmls_indexed_4s(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone ssp {
+;CHECK-LABEL: fmls_indexed_4s:
+;CHECK: fmls.4s
+entry:
+ %0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
+ %lane = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
+ %fmls1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %lane, <4 x float> %a)
+ ret <4 x float> %fmls1
+}
+
+define <2 x double> @fmls_indexed_2d(<2 x double> %a, <2 x double> %b, <2 x double> %c) nounwind readnone ssp {
+;CHECK-LABEL: fmls_indexed_2d:
+;CHECK: fmls.2d
+entry:
+ %0 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
+ %lane = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> zeroinitializer
+ %fmls1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %0, <2 x double> %lane, <2 x double> %a)
+ ret <2 x double> %fmls1
+}
+
+define <2 x float> @fmla_indexed_scalar_2s(<2 x float> %a, <2 x float> %b, float %c) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: fmla_indexed_scalar_2s:
+; CHECK-NEXT: fmla.2s
+; CHECK-NEXT: ret
+ %v1 = insertelement <2 x float> undef, float %c, i32 0
+ %v2 = insertelement <2 x float> %v1, float %c, i32 1
+ %fmla1 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %v1, <2 x float> %b, <2 x float> %a) nounwind
+ ret <2 x float> %fmla1
+}
+
+define <4 x float> @fmla_indexed_scalar_4s(<4 x float> %a, <4 x float> %b, float %c) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: fmla_indexed_scalar_4s:
+; CHECK-NEXT: fmla.4s
+; CHECK-NEXT: ret
+ %v1 = insertelement <4 x float> undef, float %c, i32 0
+ %v2 = insertelement <4 x float> %v1, float %c, i32 1
+ %v3 = insertelement <4 x float> %v2, float %c, i32 2
+ %v4 = insertelement <4 x float> %v3, float %c, i32 3
+ %fmla1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %v4, <4 x float> %b, <4 x float> %a) nounwind
+ ret <4 x float> %fmla1
+}
+
+define <2 x double> @fmla_indexed_scalar_2d(<2 x double> %a, <2 x double> %b, double %c) nounwind readnone ssp {
+; CHECK-LABEL: fmla_indexed_scalar_2d:
+; CHECK-NEXT: fmla.2d
+; CHECK-NEXT: ret
+entry:
+ %v1 = insertelement <2 x double> undef, double %c, i32 0
+ %v2 = insertelement <2 x double> %v1, double %c, i32 1
+ %fmla1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %v2, <2 x double> %b, <2 x double> %a) nounwind
+ ret <2 x double> %fmla1
+}
+
+define <4 x i16> @mul_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: mul_4h:
+;CHECK-NOT: dup
+;CHECK: mul.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = mul <4 x i16> %tmp1, %tmp3
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @mul_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: mul_8h:
+;CHECK-NOT: dup
+;CHECK: mul.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = mul <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @mul_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: mul_2s:
+;CHECK-NOT: dup
+;CHECK: mul.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = mul <2 x i32> %tmp1, %tmp3
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @mul_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: mul_4s:
+;CHECK-NOT: dup
+;CHECK: mul.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = mul <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @mul_2d(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK-LABEL: mul_2d:
+; CHECK: mul
+; CHECK: mul
+ %tmp1 = mul <2 x i64> %A, %B
+ ret <2 x i64> %tmp1
+}
+
+define <2 x float> @fmul_lane_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fmul_lane_2s:
+;CHECK-NOT: dup
+;CHECK: fmul.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = shufflevector <2 x float> %tmp2, <2 x float> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = fmul <2 x float> %tmp1, %tmp3
+ ret <2 x float> %tmp4
+}
+
+define <4 x float> @fmul_lane_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fmul_lane_4s:
+;CHECK-NOT: dup
+;CHECK: fmul.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp2, <4 x float> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = fmul <4 x float> %tmp1, %tmp3
+ ret <4 x float> %tmp4
+}
+
+define <2 x double> @fmul_lane_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fmul_lane_2d:
+;CHECK-NOT: dup
+;CHECK: fmul.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = shufflevector <2 x double> %tmp2, <2 x double> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = fmul <2 x double> %tmp1, %tmp3
+ ret <2 x double> %tmp4
+}
+
+define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind {
+;CHECK-LABEL: fmul_lane_s:
+;CHECK-NOT: dup
+;CHECK: fmul.s s0, s0, v1[3]
+ %B = extractelement <4 x float> %vec, i32 3
+ %res = fmul float %A, %B
+ ret float %res
+}
+
+define double @fmul_lane_d(double %A, <2 x double> %vec) nounwind {
+;CHECK-LABEL: fmul_lane_d:
+;CHECK-NOT: dup
+;CHECK: fmul.d d0, d0, v1[1]
+ %B = extractelement <2 x double> %vec, i32 1
+ %res = fmul double %A, %B
+ ret double %res
+}
+
+
+
+define <2 x float> @fmulx_lane_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: fmulx_lane_2s:
+;CHECK-NOT: dup
+;CHECK: fmulx.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = shufflevector <2 x float> %tmp2, <2 x float> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %tmp1, <2 x float> %tmp3)
+ ret <2 x float> %tmp4
+}
+
+define <4 x float> @fmulx_lane_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: fmulx_lane_4s:
+;CHECK-NOT: dup
+;CHECK: fmulx.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp2, <4 x float> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %tmp1, <4 x float> %tmp3)
+ ret <4 x float> %tmp4
+}
+
+define <2 x double> @fmulx_lane_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: fmulx_lane_2d:
+;CHECK-NOT: dup
+;CHECK: fmulx.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = shufflevector <2 x double> %tmp2, <2 x double> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %tmp1, <2 x double> %tmp3)
+ ret <2 x double> %tmp4
+}
+
+define <4 x i16> @sqdmulh_lane_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqdmulh_lane_4h:
+;CHECK-NOT: dup
+;CHECK: sqdmulh.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp3)
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @sqdmulh_lane_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqdmulh_lane_8h:
+;CHECK-NOT: dup
+;CHECK: sqdmulh.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp3)
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @sqdmulh_lane_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqdmulh_lane_2s:
+;CHECK-NOT: dup
+;CHECK: sqdmulh.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp3)
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @sqdmulh_lane_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqdmulh_lane_4s:
+;CHECK-NOT: dup
+;CHECK: sqdmulh.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp3)
+ ret <4 x i32> %tmp4
+}
+
+define i32 @sqdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind {
+;CHECK-LABEL: sqdmulh_lane_1s:
+;CHECK-NOT: dup
+;CHECK: sqdmulh.s s0, {{s[0-9]+}}, {{v[0-9]+}}[1]
+ %tmp1 = extractelement <4 x i32> %B, i32 1
+ %tmp2 = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %A, i32 %tmp1)
+ ret i32 %tmp2
+}
+
+define <4 x i16> @sqrdmulh_lane_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_lane_4h:
+;CHECK-NOT: dup
+;CHECK: sqrdmulh.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp3)
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @sqrdmulh_lane_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_lane_8h:
+;CHECK-NOT: dup
+;CHECK: sqrdmulh.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp3)
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @sqrdmulh_lane_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_lane_2s:
+;CHECK-NOT: dup
+;CHECK: sqrdmulh.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp3)
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @sqrdmulh_lane_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqrdmulh_lane_4s:
+;CHECK-NOT: dup
+;CHECK: sqrdmulh.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp3)
+ ret <4 x i32> %tmp4
+}
+
+define i32 @sqrdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind {
+;CHECK-LABEL: sqrdmulh_lane_1s:
+;CHECK-NOT: dup
+;CHECK: sqrdmulh.s s0, {{s[0-9]+}}, {{v[0-9]+}}[1]
+ %tmp1 = extractelement <4 x i32> %B, i32 1
+ %tmp2 = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %A, i32 %tmp1)
+ ret i32 %tmp2
+}
+
+define <4 x i32> @sqdmull_lane_4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqdmull_lane_4s:
+;CHECK-NOT: dup
+;CHECK: sqdmull.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @sqdmull_lane_2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqdmull_lane_2d:
+;CHECK-NOT: dup
+;CHECK: sqdmull.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
+ ret <2 x i64> %tmp4
+}
+
+define <4 x i32> @sqdmull2_lane_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqdmull2_lane_4s:
+;CHECK-NOT: dup
+;CHECK: sqdmull2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @sqdmull2_lane_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqdmull2_lane_2d:
+;CHECK-NOT: dup
+;CHECK: sqdmull2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i64> %tmp4
+}
+
+define <4 x i32> @umull_lane_4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: umull_lane_4s:
+;CHECK-NOT: dup
+;CHECK: umull.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @umull_lane_2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: umull_lane_2d:
+;CHECK-NOT: dup
+;CHECK: umull.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
+ ret <2 x i64> %tmp4
+}
+
+define <4 x i32> @smull_lane_4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: smull_lane_4s:
+;CHECK-NOT: dup
+;CHECK: smull.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @smull_lane_2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: smull_lane_2d:
+;CHECK-NOT: dup
+;CHECK: smull.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
+ ret <2 x i64> %tmp4
+}
+
+define <4 x i32> @smlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: smlal_lane_4s:
+;CHECK-NOT: dup
+;CHECK: smlal.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
+ %tmp6 = add <4 x i32> %tmp3, %tmp5
+ ret <4 x i32> %tmp6
+}
+
+define <2 x i64> @smlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: smlal_lane_2d:
+;CHECK-NOT: dup
+;CHECK: smlal.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
+ %tmp6 = add <2 x i64> %tmp3, %tmp5
+ ret <2 x i64> %tmp6
+}
+
+define <4 x i32> @sqdmlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sqdmlal_lane_4s:
+;CHECK-NOT: dup
+;CHECK: sqdmlal.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
+ %tmp6 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp5)
+ ret <4 x i32> %tmp6
+}
+
+define <2 x i64> @sqdmlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sqdmlal_lane_2d:
+;CHECK-NOT: dup
+;CHECK: sqdmlal.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
+ %tmp6 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp5)
+ ret <2 x i64> %tmp6
+}
+
+define <4 x i32> @sqdmlal2_lane_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sqdmlal2_lane_4s:
+;CHECK-NOT: dup
+;CHECK: sqdmlal2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp6 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp5)
+ ret <4 x i32> %tmp6
+}
+
+define <2 x i64> @sqdmlal2_lane_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sqdmlal2_lane_2d:
+;CHECK-NOT: dup
+;CHECK: sqdmlal2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp6 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp5)
+ ret <2 x i64> %tmp6
+}
+
+define i32 @sqdmlal_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind {
+;CHECK-LABEL: sqdmlal_lane_1s:
+;CHECK: sqdmlal.4s
+ %lhs = insertelement <4 x i16> undef, i16 %B, i32 0
+ %rhs = shufflevector <4 x i16> %C, <4 x i16> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %prod.vec = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %lhs, <4 x i16> %rhs)
+ %prod = extractelement <4 x i32> %prod.vec, i32 0
+ %res = call i32 @llvm.aarch64.neon.sqadd.i32(i32 %A, i32 %prod)
+ ret i32 %res
+}
+declare i32 @llvm.aarch64.neon.sqadd.i32(i32, i32)
+
+define i32 @sqdmlsl_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind {
+;CHECK-LABEL: sqdmlsl_lane_1s:
+;CHECK: sqdmlsl.4s
+ %lhs = insertelement <4 x i16> undef, i16 %B, i32 0
+ %rhs = shufflevector <4 x i16> %C, <4 x i16> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %prod.vec = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %lhs, <4 x i16> %rhs)
+ %prod = extractelement <4 x i32> %prod.vec, i32 0
+ %res = call i32 @llvm.aarch64.neon.sqsub.i32(i32 %A, i32 %prod)
+ ret i32 %res
+}
+declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32)
+
+define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
+;CHECK-LABEL: sqdmlal_lane_1d:
+;CHECK: sqdmlal.s
+ %rhs = extractelement <2 x i32> %C, i32 1
+ %prod = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %B, i32 %rhs)
+ %res = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %A, i64 %prod)
+ ret i64 %res
+}
+declare i64 @llvm.aarch64.neon.sqdmulls.scalar(i32, i32)
+declare i64 @llvm.aarch64.neon.sqadd.i64(i64, i64)
+
+define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
+;CHECK-LABEL: sqdmlsl_lane_1d:
+;CHECK: sqdmlsl.s
+ %rhs = extractelement <2 x i32> %C, i32 1
+ %prod = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %B, i32 %rhs)
+ %res = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %A, i64 %prod)
+ ret i64 %res
+}
+declare i64 @llvm.aarch64.neon.sqsub.i64(i64, i64)
+
+
+define <4 x i32> @umlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: umlal_lane_4s:
+;CHECK-NOT: dup
+;CHECK: umlal.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
+ %tmp6 = add <4 x i32> %tmp3, %tmp5
+ ret <4 x i32> %tmp6
+}
+
+define <2 x i64> @umlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: umlal_lane_2d:
+;CHECK-NOT: dup
+;CHECK: umlal.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
+ %tmp6 = add <2 x i64> %tmp3, %tmp5
+ ret <2 x i64> %tmp6
+}
+
+
+define <4 x i32> @smlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: smlsl_lane_4s:
+;CHECK-NOT: dup
+;CHECK: smlsl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
+ %tmp6 = sub <4 x i32> %tmp3, %tmp5
+ ret <4 x i32> %tmp6
+}
+
+define <2 x i64> @smlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: smlsl_lane_2d:
+;CHECK-NOT: dup
+;CHECK: smlsl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
+ %tmp6 = sub <2 x i64> %tmp3, %tmp5
+ ret <2 x i64> %tmp6
+}
+
+define <4 x i32> @sqdmlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sqdmlsl_lane_4s:
+;CHECK-NOT: dup
+;CHECK: sqdmlsl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
+ %tmp6 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp5)
+ ret <4 x i32> %tmp6
+}
+
+define <2 x i64> @sqdmlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sqdmlsl_lane_2d:
+;CHECK-NOT: dup
+;CHECK: sqdmlsl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
+ %tmp6 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp5)
+ ret <2 x i64> %tmp6
+}
+
+define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: sqdmlsl2_lane_4s:
+;CHECK-NOT: dup
+;CHECK: sqdmlsl2.4s
+ %load1 = load <8 x i16>* %A
+ %load2 = load <8 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ %tmp6 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp5)
+ ret <4 x i32> %tmp6
+}
+
+define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: sqdmlsl2_lane_2d:
+;CHECK-NOT: dup
+;CHECK: sqdmlsl2.2d
+ %load1 = load <4 x i32>* %A
+ %load2 = load <4 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ %tmp6 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp5)
+ ret <2 x i64> %tmp6
+}
+
+define <4 x i32> @umlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+;CHECK-LABEL: umlsl_lane_4s:
+;CHECK-NOT: dup
+;CHECK: umlsl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i32>* %C
+ %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
+ %tmp6 = sub <4 x i32> %tmp3, %tmp5
+ ret <4 x i32> %tmp6
+}
+
+define <2 x i64> @umlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+;CHECK-LABEL: umlsl_lane_2d:
+;CHECK-NOT: dup
+;CHECK: umlsl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i64>* %C
+ %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
+ %tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
+ %tmp6 = sub <2 x i64> %tmp3, %tmp5
+ ret <2 x i64> %tmp6
+}
+
+; Scalar FMULX
+define float @fmulxs(float %a, float %b) nounwind {
+; CHECK-LABEL: fmulxs:
+; CHECKNEXT: fmulx s0, s0, s1
+ %fmulx.i = tail call float @llvm.aarch64.neon.fmulx.f32(float %a, float %b) nounwind
+; CHECKNEXT: ret
+ ret float %fmulx.i
+}
+
+define double @fmulxd(double %a, double %b) nounwind {
+; CHECK-LABEL: fmulxd:
+; CHECKNEXT: fmulx d0, d0, d1
+ %fmulx.i = tail call double @llvm.aarch64.neon.fmulx.f64(double %a, double %b) nounwind
+; CHECKNEXT: ret
+ ret double %fmulx.i
+}
+
+define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind {
+; CHECK-LABEL: fmulxs_lane:
+; CHECKNEXT: fmulx.s s0, s0, v1[3]
+ %b = extractelement <4 x float> %vec, i32 3
+ %fmulx.i = tail call float @llvm.aarch64.neon.fmulx.f32(float %a, float %b) nounwind
+; CHECKNEXT: ret
+ ret float %fmulx.i
+}
+
+define double @fmulxd_lane(double %a, <2 x double> %vec) nounwind {
+; CHECK-LABEL: fmulxd_lane:
+; CHECKNEXT: fmulx d0, d0, v1[1]
+ %b = extractelement <2 x double> %vec, i32 1
+ %fmulx.i = tail call double @llvm.aarch64.neon.fmulx.f64(double %a, double %b) nounwind
+; CHECKNEXT: ret
+ ret double %fmulx.i
+}
+
+declare double @llvm.aarch64.neon.fmulx.f64(double, double) nounwind readnone
+declare float @llvm.aarch64.neon.fmulx.f32(float, float) nounwind readnone
+
+
+define <8 x i16> @smull2_8h_simple(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: smull2_8h_simple:
+; CHECK-NEXT: smull2.8h v0, v0, v1
+; CHECK-NEXT: ret
+ %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %3 = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %1, <8 x i8> %2) #2
+ ret <8 x i16> %3
+}
+
+define <8 x i16> @foo0(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: foo0:
+; CHECK: smull2.8h v0, v0, v1
+ %tmp = bitcast <16 x i8> %a to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <8 x i8>
+ %tmp2 = bitcast <16 x i8> %b to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <8 x i8>
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp3) nounwind
+ ret <8 x i16> %vmull.i.i
+}
+
+define <4 x i32> @foo1(<8 x i16> %a, <8 x i16> %b) nounwind {
+; CHECK-LABEL: foo1:
+; CHECK: smull2.4s v0, v0, v1
+ %tmp = bitcast <8 x i16> %a to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <4 x i16>
+ %tmp2 = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <4 x i16>
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3) nounwind
+ ret <4 x i32> %vmull2.i.i
+}
+
+define <2 x i64> @foo2(<4 x i32> %a, <4 x i32> %b) nounwind {
+; CHECK-LABEL: foo2:
+; CHECK: smull2.2d v0, v0, v1
+ %tmp = bitcast <4 x i32> %a to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <2 x i32>
+ %tmp2 = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <2 x i32>
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3) nounwind
+ ret <2 x i64> %vmull2.i.i
+}
+
+define <8 x i16> @foo3(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: foo3:
+; CHECK: umull2.8h v0, v0, v1
+ %tmp = bitcast <16 x i8> %a to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <8 x i8>
+ %tmp2 = bitcast <16 x i8> %b to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <8 x i8>
+ %vmull.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp3) nounwind
+ ret <8 x i16> %vmull.i.i
+}
+
+define <4 x i32> @foo4(<8 x i16> %a, <8 x i16> %b) nounwind {
+; CHECK-LABEL: foo4:
+; CHECK: umull2.4s v0, v0, v1
+ %tmp = bitcast <8 x i16> %a to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <4 x i16>
+ %tmp2 = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <4 x i16>
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3) nounwind
+ ret <4 x i32> %vmull2.i.i
+}
+
+define <2 x i64> @foo5(<4 x i32> %a, <4 x i32> %b) nounwind {
+; CHECK-LABEL: foo5:
+; CHECK: umull2.2d v0, v0, v1
+ %tmp = bitcast <4 x i32> %a to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <2 x i32>
+ %tmp2 = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <2 x i32>
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3) nounwind
+ ret <2 x i64> %vmull2.i.i
+}
+
+define <4 x i32> @foo6(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: foo6:
+; CHECK-NEXT: smull2.4s v0, v1, v2[1]
+; CHECK-NEXT: ret
+entry:
+ %0 = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
+ %1 = bitcast <1 x i64> %shuffle.i to <4 x i16>
+ %shuffle = shufflevector <4 x i16> %c, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %1, <4 x i16> %shuffle) nounwind
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @foo7(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: foo7:
+; CHECK-NEXT: smull2.2d v0, v1, v2[1]
+; CHECK-NEXT: ret
+entry:
+ %0 = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
+ %1 = bitcast <1 x i64> %shuffle.i to <2 x i32>
+ %shuffle = shufflevector <2 x i32> %c, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %1, <2 x i32> %shuffle) nounwind
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @foo8(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: foo8:
+; CHECK-NEXT: umull2.4s v0, v1, v2[1]
+; CHECK-NEXT: ret
+entry:
+ %0 = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
+ %1 = bitcast <1 x i64> %shuffle.i to <4 x i16>
+ %shuffle = shufflevector <4 x i16> %c, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %1, <4 x i16> %shuffle) nounwind
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @foo9(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
+; CHECK-LABEL: foo9:
+; CHECK-NEXT: umull2.2d v0, v1, v2[1]
+; CHECK-NEXT: ret
+entry:
+ %0 = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
+ %1 = bitcast <1 x i64> %shuffle.i to <2 x i32>
+ %shuffle = shufflevector <2 x i32> %c, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %1, <2 x i32> %shuffle) nounwind
+ ret <2 x i64> %vmull2.i
+}
+
+define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind {
+; CHECK-LABEL: bar0:
+; CHECK: smlal2.8h v0, v1, v2
+; CHECK-NEXT: ret
+
+ %tmp = bitcast <16 x i8> %b to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <8 x i8>
+ %tmp2 = bitcast <16 x i8> %c to <2 x i64>
+ %shuffle.i3.i.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i.i to <8 x i8>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp3) nounwind
+ %add.i = add <8 x i16> %vmull.i.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind {
+; CHECK-LABEL: bar1:
+; CHECK: smlal2.4s v0, v1, v2
+; CHECK-NEXT: ret
+
+ %tmp = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <4 x i16>
+ %tmp2 = bitcast <8 x i16> %c to <2 x i64>
+ %shuffle.i3.i.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i.i to <4 x i16>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3) nounwind
+ %add.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind {
+; CHECK-LABEL: bar2:
+; CHECK: smlal2.2d v0, v1, v2
+; CHECK-NEXT: ret
+
+ %tmp = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <2 x i32>
+ %tmp2 = bitcast <4 x i32> %c to <2 x i64>
+ %shuffle.i3.i.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i.i to <2 x i32>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3) nounwind
+ %add.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind {
+; CHECK-LABEL: bar3:
+; CHECK: umlal2.8h v0, v1, v2
+; CHECK-NEXT: ret
+
+ %tmp = bitcast <16 x i8> %b to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <8 x i8>
+ %tmp2 = bitcast <16 x i8> %c to <2 x i64>
+ %shuffle.i3.i.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i.i to <8 x i8>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp3) nounwind
+ %add.i = add <8 x i16> %vmull.i.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind {
+; CHECK-LABEL: bar4:
+; CHECK: umlal2.4s v0, v1, v2
+; CHECK-NEXT: ret
+
+ %tmp = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <4 x i16>
+ %tmp2 = bitcast <8 x i16> %c to <2 x i64>
+ %shuffle.i3.i.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i.i to <4 x i16>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3) nounwind
+ %add.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind {
+; CHECK-LABEL: bar5:
+; CHECK: umlal2.2d v0, v1, v2
+; CHECK-NEXT: ret
+
+ %tmp = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i.i to <2 x i32>
+ %tmp2 = bitcast <4 x i32> %c to <2 x i64>
+ %shuffle.i3.i.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i.i to <2 x i32>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3) nounwind
+ %add.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind {
+; CHECK-LABEL: mlal2_1:
+; CHECK: smlal2.4s v0, v1, v2[3]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <4 x i16> %c, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %tmp = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <4 x i16>
+ %tmp2 = bitcast <8 x i16> %shuffle to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <4 x i16>
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3) nounwind
+ %add = add <4 x i32> %vmull2.i.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind {
+; CHECK-LABEL: mlal2_2:
+; CHECK: smlal2.2d v0, v1, v2[1]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <2 x i32> %c, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %tmp = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <2 x i32>
+ %tmp2 = bitcast <4 x i32> %shuffle to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <2 x i32>
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3) nounwind
+ %add = add <2 x i64> %vmull2.i.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind {
+; CHECK-LABEL: mlal2_4:
+; CHECK: umlal2.4s v0, v1, v2[2]
+; CHECK-NEXT: ret
+
+ %shuffle = shufflevector <4 x i16> %c, <4 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %tmp = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <4 x i16>
+ %tmp2 = bitcast <8 x i16> %shuffle to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <4 x i16>
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3) nounwind
+ %add = add <4 x i32> %vmull2.i.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @mlal2_5(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind {
+; CHECK-LABEL: mlal2_5:
+; CHECK: umlal2.2d v0, v1, v2[0]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <2 x i32> %c, <2 x i32> undef, <4 x i32> zeroinitializer
+ %tmp = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp1 = bitcast <1 x i64> %shuffle.i.i to <2 x i32>
+ %tmp2 = bitcast <4 x i32> %shuffle to <2 x i64>
+ %shuffle.i3.i = shufflevector <2 x i64> %tmp2, <2 x i64> undef, <1 x i32> <i32 1>
+ %tmp3 = bitcast <1 x i64> %shuffle.i3.i to <2 x i32>
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3) nounwind
+ %add = add <2 x i64> %vmull2.i.i, %a
+ ret <2 x i64> %add
+}
+
+; rdar://12328502
+define <2 x double> @vmulq_n_f64(<2 x double> %x, double %y) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: vmulq_n_f64:
+; CHECK-NOT: dup.2d
+; CHECK: fmul.2d v0, v0, v1[0]
+ %vecinit.i = insertelement <2 x double> undef, double %y, i32 0
+ %vecinit1.i = insertelement <2 x double> %vecinit.i, double %y, i32 1
+ %mul.i = fmul <2 x double> %vecinit1.i, %x
+ ret <2 x double> %mul.i
+}
+
+define <4 x float> @vmulq_n_f32(<4 x float> %x, float %y) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: vmulq_n_f32:
+; CHECK-NOT: dup.4s
+; CHECK: fmul.4s v0, v0, v1[0]
+ %vecinit.i = insertelement <4 x float> undef, float %y, i32 0
+ %vecinit1.i = insertelement <4 x float> %vecinit.i, float %y, i32 1
+ %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %y, i32 2
+ %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %y, i32 3
+ %mul.i = fmul <4 x float> %vecinit3.i, %x
+ ret <4 x float> %mul.i
+}
+
+define <2 x float> @vmul_n_f32(<2 x float> %x, float %y) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: vmul_n_f32:
+; CHECK-NOT: dup.2s
+; CHECK: fmul.2s v0, v0, v1[0]
+ %vecinit.i = insertelement <2 x float> undef, float %y, i32 0
+ %vecinit1.i = insertelement <2 x float> %vecinit.i, float %y, i32 1
+ %mul.i = fmul <2 x float> %vecinit1.i, %x
+ ret <2 x float> %mul.i
+}
+
+define <4 x i16> @vmla_laneq_s16_test(<4 x i16> %a, <4 x i16> %b, <8 x i16> %c) nounwind readnone ssp {
+entry:
+; CHECK: vmla_laneq_s16_test
+; CHECK-NOT: ext
+; CHECK: mla.4h v0, v1, v2[6]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6>
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <2 x i32> @vmla_laneq_s32_test(<2 x i32> %a, <2 x i32> %b, <4 x i32> %c) nounwind readnone ssp {
+entry:
+; CHECK: vmla_laneq_s32_test
+; CHECK-NOT: ext
+; CHECK: mla.2s v0, v1, v2[3]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <8 x i16> @not_really_vmlaq_laneq_s16_test(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone ssp {
+entry:
+; CHECK: not_really_vmlaq_laneq_s16_test
+; CHECK-NOT: ext
+; CHECK: mla.8h v0, v1, v2[5]
+; CHECK-NEXT: ret
+ %shuffle1 = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle2 = shufflevector <4 x i16> %shuffle1, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <8 x i16> %shuffle2, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <4 x i32> @not_really_vmlaq_laneq_s32_test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone ssp {
+entry:
+; CHECK: not_really_vmlaq_laneq_s32_test
+; CHECK-NOT: ext
+; CHECK: mla.4s v0, v1, v2[3]
+; CHECK-NEXT: ret
+ %shuffle1 = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle2 = shufflevector <2 x i32> %shuffle1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle2, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i32> @vmull_laneq_s16_test(<4 x i16> %a, <8 x i16> %b) nounwind readnone ssp {
+entry:
+; CHECK: vmull_laneq_s16_test
+; CHECK-NOT: ext
+; CHECK: smull.4s v0, v0, v1[6]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) #2
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @vmull_laneq_s32_test(<2 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
+entry:
+; CHECK: vmull_laneq_s32_test
+; CHECK-NOT: ext
+; CHECK: smull.2d v0, v0, v1[2]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 2>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) #2
+ ret <2 x i64> %vmull2.i
+}
+define <4 x i32> @vmull_laneq_u16_test(<4 x i16> %a, <8 x i16> %b) nounwind readnone ssp {
+entry:
+; CHECK: vmull_laneq_u16_test
+; CHECK-NOT: ext
+; CHECK: umull.4s v0, v0, v1[6]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6>
+ %vmull2.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) #2
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @vmull_laneq_u32_test(<2 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
+entry:
+; CHECK: vmull_laneq_u32_test
+; CHECK-NOT: ext
+; CHECK: umull.2d v0, v0, v1[2]
+; CHECK-NEXT: ret
+ %shuffle = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 2>
+ %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) #2
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @vmull_high_n_s16_test(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c, i32 %d) nounwind readnone optsize ssp {
+entry:
+; CHECK: vmull_high_n_s16_test
+; CHECK-NOT: ext
+; CHECK: smull2.4s
+; CHECK-NEXT: ret
+ %conv = trunc i32 %d to i16
+ %0 = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
+ %1 = bitcast <1 x i64> %shuffle.i.i to <4 x i16>
+ %vecinit.i = insertelement <4 x i16> undef, i16 %conv, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %conv, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %conv, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %conv, i32 3
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %1, <4 x i16> %vecinit3.i) nounwind
+ ret <4 x i32> %vmull2.i.i
+}
+
+define <2 x i64> @vmull_high_n_s32_test(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c, i32 %d) nounwind readnone optsize ssp {
+entry:
+; CHECK: vmull_high_n_s32_test
+; CHECK-NOT: ext
+; CHECK: smull2.2d
+; CHECK-NEXT: ret
+ %0 = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
+ %1 = bitcast <1 x i64> %shuffle.i.i to <2 x i32>
+ %vecinit.i = insertelement <2 x i32> undef, i32 %d, i32 0
+ %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %d, i32 1
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %1, <2 x i32> %vecinit1.i) nounwind
+ ret <2 x i64> %vmull2.i.i
+}
+
+define <4 x i32> @vmull_high_n_u16_test(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c, i32 %d) nounwind readnone optsize ssp {
+entry:
+; CHECK: vmull_high_n_u16_test
+; CHECK-NOT: ext
+; CHECK: umull2.4s
+; CHECK-NEXT: ret
+ %conv = trunc i32 %d to i16
+ %0 = bitcast <8 x i16> %b to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
+ %1 = bitcast <1 x i64> %shuffle.i.i to <4 x i16>
+ %vecinit.i = insertelement <4 x i16> undef, i16 %conv, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %conv, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %conv, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %conv, i32 3
+ %vmull2.i.i = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %1, <4 x i16> %vecinit3.i) nounwind
+ ret <4 x i32> %vmull2.i.i
+}
+
+define <2 x i64> @vmull_high_n_u32_test(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c, i32 %d) nounwind readnone optsize ssp {
+entry:
+; CHECK: vmull_high_n_u32_test
+; CHECK-NOT: ext
+; CHECK: umull2.2d
+; CHECK-NEXT: ret
+ %0 = bitcast <4 x i32> %b to <2 x i64>
+ %shuffle.i.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1>
+ %1 = bitcast <1 x i64> %shuffle.i.i to <2 x i32>
+ %vecinit.i = insertelement <2 x i32> undef, i32 %d, i32 0
+ %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %d, i32 1
+ %vmull2.i.i = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %1, <2 x i32> %vecinit1.i) nounwind
+ ret <2 x i64> %vmull2.i.i
+}
+
+define <4 x i32> @vmul_built_dup_test(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vmul_built_dup_test:
+; CHECK-NOT: ins
+; CHECK-NOT: dup
+; CHECK: mul.4s {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[1]
+ %vget_lane = extractelement <4 x i32> %b, i32 1
+ %vecinit.i = insertelement <4 x i32> undef, i32 %vget_lane, i32 0
+ %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %vget_lane, i32 1
+ %vecinit2.i = insertelement <4 x i32> %vecinit1.i, i32 %vget_lane, i32 2
+ %vecinit3.i = insertelement <4 x i32> %vecinit2.i, i32 %vget_lane, i32 3
+ %prod = mul <4 x i32> %a, %vecinit3.i
+ ret <4 x i32> %prod
+}
+
+define <4 x i16> @vmul_built_dup_fromsmall_test(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: vmul_built_dup_fromsmall_test:
+; CHECK-NOT: ins
+; CHECK-NOT: dup
+; CHECK: mul.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[3]
+ %vget_lane = extractelement <4 x i16> %b, i32 3
+ %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3
+ %prod = mul <4 x i16> %a, %vecinit3.i
+ ret <4 x i16> %prod
+}
+
+define <8 x i16> @vmulq_built_dup_fromsmall_test(<8 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: vmulq_built_dup_fromsmall_test:
+; CHECK-NOT: ins
+; CHECK-NOT: dup
+; CHECK: mul.8h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0]
+ %vget_lane = extractelement <4 x i16> %b, i32 0
+ %vecinit.i = insertelement <8 x i16> undef, i16 %vget_lane, i32 0
+ %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %vget_lane, i32 1
+ %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %vget_lane, i32 2
+ %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %vget_lane, i32 3
+ %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %vget_lane, i32 4
+ %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %vget_lane, i32 5
+ %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %vget_lane, i32 6
+ %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %vget_lane, i32 7
+ %prod = mul <8 x i16> %a, %vecinit7.i
+ ret <8 x i16> %prod
+}
+
+define <2 x i64> @mull_from_two_extracts(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: mull_from_two_extracts:
+; CHECK-NOT: ext
+; CHECK: sqdmull2.2d
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %res = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %lhs.high, <2 x i32> %rhs.high) nounwind
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @mlal_from_two_extracts(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: mlal_from_two_extracts:
+; CHECK-NOT: ext
+; CHECK: sqdmlal2.2d
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %res = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %lhs.high, <2 x i32> %rhs.high) nounwind
+ %sum = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %accum, <2 x i64> %res)
+ ret <2 x i64> %sum
+}
+
+define <2 x i64> @mull_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
+; CHECK-LABEL: mull_from_extract_dup:
+; CHECK-NOT: ext
+; CHECK: sqdmull2.2d
+ %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
+ %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+
+ %res = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %lhs.high, <2 x i32> %rhsvec) nounwind
+ ret <2 x i64> %res
+}
+
+define <8 x i16> @pmull_from_extract_dup(<16 x i8> %lhs, i8 %rhs) {
+; CHECK-LABEL: pmull_from_extract_dup:
+; CHECK-NOT: ext
+; CHECK: pmull2.8h
+ %rhsvec.0 = insertelement <8 x i8> undef, i8 %rhs, i32 0
+ %rhsvec = shufflevector <8 x i8> %rhsvec.0, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+
+ %lhs.high = shufflevector <16 x i8> %lhs, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+
+ %res = tail call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %lhs.high, <8 x i8> %rhsvec) nounwind
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @pmull_from_extract_duplane(<16 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK-LABEL: pmull_from_extract_duplane:
+; CHECK-NOT: ext
+; CHECK: pmull2.8h
+
+ %lhs.high = shufflevector <16 x i8> %lhs, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %rhs.high = shufflevector <8 x i8> %rhs, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+
+ %res = tail call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %lhs.high, <8 x i8> %rhs.high) nounwind
+ ret <8 x i16> %res
+}
+
+define <2 x i64> @sqdmull_from_extract_duplane(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: sqdmull_from_extract_duplane:
+; CHECK-NOT: ext
+; CHECK: sqdmull2.2d
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
+
+ %res = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %lhs.high, <2 x i32> %rhs.high) nounwind
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @sqdmlal_from_extract_duplane(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: sqdmlal_from_extract_duplane:
+; CHECK-NOT: ext
+; CHECK: sqdmlal2.2d
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
+
+ %res = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %lhs.high, <2 x i32> %rhs.high) nounwind
+ %sum = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %accum, <2 x i64> %res)
+ ret <2 x i64> %sum
+}
+
+define <2 x i64> @umlal_from_extract_duplane(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK-LABEL: umlal_from_extract_duplane:
+; CHECK-NOT: ext
+; CHECK: umlal2.2d
+
+ %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0>
+
+ %res = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %lhs.high, <2 x i32> %rhs.high) nounwind
+ %sum = add <2 x i64> %accum, %res
+ ret <2 x i64> %sum
+}
+
+define float @scalar_fmla_from_extract_v4f32(float %accum, float %lhs, <4 x float> %rvec) {
+; CHECK-LABEL: scalar_fmla_from_extract_v4f32:
+; CHECK: fmla.s s0, s1, v2[3]
+ %rhs = extractelement <4 x float> %rvec, i32 3
+ %res = call float @llvm.fma.f32(float %lhs, float %rhs, float %accum)
+ ret float %res
+}
+
+define float @scalar_fmla_from_extract_v2f32(float %accum, float %lhs, <2 x float> %rvec) {
+; CHECK-LABEL: scalar_fmla_from_extract_v2f32:
+; CHECK: fmla.s s0, s1, v2[1]
+ %rhs = extractelement <2 x float> %rvec, i32 1
+ %res = call float @llvm.fma.f32(float %lhs, float %rhs, float %accum)
+ ret float %res
+}
+
+define float @scalar_fmls_from_extract_v4f32(float %accum, float %lhs, <4 x float> %rvec) {
+; CHECK-LABEL: scalar_fmls_from_extract_v4f32:
+; CHECK: fmls.s s0, s1, v2[3]
+ %rhs.scal = extractelement <4 x float> %rvec, i32 3
+ %rhs = fsub float -0.0, %rhs.scal
+ %res = call float @llvm.fma.f32(float %lhs, float %rhs, float %accum)
+ ret float %res
+}
+
+define float @scalar_fmls_from_extract_v2f32(float %accum, float %lhs, <2 x float> %rvec) {
+; CHECK-LABEL: scalar_fmls_from_extract_v2f32:
+; CHECK: fmls.s s0, s1, v2[1]
+ %rhs.scal = extractelement <2 x float> %rvec, i32 1
+ %rhs = fsub float -0.0, %rhs.scal
+ %res = call float @llvm.fma.f32(float %lhs, float %rhs, float %accum)
+ ret float %res
+}
+
+declare float @llvm.fma.f32(float, float, float)
+
+define double @scalar_fmla_from_extract_v2f64(double %accum, double %lhs, <2 x double> %rvec) {
+; CHECK-LABEL: scalar_fmla_from_extract_v2f64:
+; CHECK: fmla.d d0, d1, v2[1]
+ %rhs = extractelement <2 x double> %rvec, i32 1
+ %res = call double @llvm.fma.f64(double %lhs, double %rhs, double %accum)
+ ret double %res
+}
+
+define double @scalar_fmls_from_extract_v2f64(double %accum, double %lhs, <2 x double> %rvec) {
+; CHECK-LABEL: scalar_fmls_from_extract_v2f64:
+; CHECK: fmls.d d0, d1, v2[1]
+ %rhs.scal = extractelement <2 x double> %rvec, i32 1
+ %rhs = fsub double -0.0, %rhs.scal
+ %res = call double @llvm.fma.f64(double %lhs, double %rhs, double %accum)
+ ret double %res
+}
+
+declare double @llvm.fma.f64(double, double, double)
+
+define <2 x float> @fmls_with_fneg_before_extract_v2f32(<2 x float> %accum, <2 x float> %lhs, <4 x float> %rhs) {
+; CHECK-LABEL: fmls_with_fneg_before_extract_v2f32:
+; CHECK: fmls.2s v0, v1, v2[3]
+ %rhs_neg = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %rhs
+ %splat = shufflevector <4 x float> %rhs_neg, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %res = call <2 x float> @llvm.fma.v2f32(<2 x float> %lhs, <2 x float> %splat, <2 x float> %accum)
+ ret <2 x float> %res
+}
+
+define <2 x float> @fmls_with_fneg_before_extract_v2f32_1(<2 x float> %accum, <2 x float> %lhs, <2 x float> %rhs) {
+; CHECK-LABEL: fmls_with_fneg_before_extract_v2f32_1:
+; CHECK: fmls.2s v0, v1, v2[1]
+ %rhs_neg = fsub <2 x float> <float -0.0, float -0.0>, %rhs
+ %splat = shufflevector <2 x float> %rhs_neg, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %res = call <2 x float> @llvm.fma.v2f32(<2 x float> %lhs, <2 x float> %splat, <2 x float> %accum)
+ ret <2 x float> %res
+}
+
+define <4 x float> @fmls_with_fneg_before_extract_v4f32(<4 x float> %accum, <4 x float> %lhs, <4 x float> %rhs) {
+; CHECK-LABEL: fmls_with_fneg_before_extract_v4f32:
+; CHECK: fmls.4s v0, v1, v2[3]
+ %rhs_neg = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %rhs
+ %splat = shufflevector <4 x float> %rhs_neg, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %res = call <4 x float> @llvm.fma.v4f32(<4 x float> %lhs, <4 x float> %splat, <4 x float> %accum)
+ ret <4 x float> %res
+}
+
+define <4 x float> @fmls_with_fneg_before_extract_v4f32_1(<4 x float> %accum, <4 x float> %lhs, <2 x float> %rhs) {
+; CHECK-LABEL: fmls_with_fneg_before_extract_v4f32_1:
+; CHECK: fmls.4s v0, v1, v2[1]
+ %rhs_neg = fsub <2 x float> <float -0.0, float -0.0>, %rhs
+ %splat = shufflevector <2 x float> %rhs_neg, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %res = call <4 x float> @llvm.fma.v4f32(<4 x float> %lhs, <4 x float> %splat, <4 x float> %accum)
+ ret <4 x float> %res
+}
+
+define <2 x double> @fmls_with_fneg_before_extract_v2f64(<2 x double> %accum, <2 x double> %lhs, <2 x double> %rhs) {
+; CHECK-LABEL: fmls_with_fneg_before_extract_v2f64:
+; CHECK: fmls.2d v0, v1, v2[1]
+ %rhs_neg = fsub <2 x double> <double -0.0, double -0.0>, %rhs
+ %splat = shufflevector <2 x double> %rhs_neg, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %res = call <2 x double> @llvm.fma.v2f64(<2 x double> %lhs, <2 x double> %splat, <2 x double> %accum)
+ ret <2 x double> %res
+}
+
+define <1 x double> @test_fmul_v1f64(<1 x double> %L, <1 x double> %R) nounwind {
+; CHECK-LABEL: test_fmul_v1f64:
+; CHECK: fmul
+ %prod = fmul <1 x double> %L, %R
+ ret <1 x double> %prod
+}
+
+define <1 x double> @test_fdiv_v1f64(<1 x double> %L, <1 x double> %R) nounwind {
+; CHECK-LABEL: test_fdiv_v1f64:
+; CHECK-LABEL: fdiv
+ %prod = fdiv <1 x double> %L, %R
+ ret <1 x double> %prod
+}
+
+define i64 @sqdmlal_d(i32 %A, i32 %B, i64 %C) nounwind {
+;CHECK-LABEL: sqdmlal_d:
+;CHECK: sqdmlal
+ %tmp4 = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %A, i32 %B)
+ %tmp5 = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %C, i64 %tmp4)
+ ret i64 %tmp5
+}
+
+define i64 @sqdmlsl_d(i32 %A, i32 %B, i64 %C) nounwind {
+;CHECK-LABEL: sqdmlsl_d:
+;CHECK: sqdmlsl
+ %tmp4 = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %A, i32 %B)
+ %tmp5 = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %C, i64 %tmp4)
+ ret i64 %tmp5
+}
+
+define <16 x i8> @test_pmull_64(i64 %l, i64 %r) nounwind {
+; CHECK-LABEL: test_pmull_64:
+; CHECK: pmull.1q
+ %val = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r)
+ ret <16 x i8> %val
+}
+
+define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind {
+; CHECK-LABEL: test_pmull_high_64:
+; CHECK: pmull2.1q
+ %l_hi = extractelement <2 x i64> %l, i32 1
+ %r_hi = extractelement <2 x i64> %r, i32 1
+ %val = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l_hi, i64 %r_hi)
+ ret <16 x i8> %val
+}
+
+declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64)
+
+define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind {
+; CHECK-LABEL: test_mul_v1i64:
+; CHECK: mul
+ %prod = mul <1 x i64> %lhs, %rhs
+ ret <1 x i64> %prod
+}
diff --git a/test/CodeGen/AArch64/arm64-volatile.ll b/test/CodeGen/AArch64/arm64-volatile.ll
new file mode 100644
index 000000000000..e00ac5acb5fb
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-volatile.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+define i64 @normal_load(i64* nocapture %bar) nounwind readonly {
+; CHECK: normal_load
+; CHECK: ldp
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+ %add.ptr = getelementptr inbounds i64* %bar, i64 1
+ %tmp = load i64* %add.ptr, align 8
+ %add.ptr1 = getelementptr inbounds i64* %bar, i64 2
+ %tmp1 = load i64* %add.ptr1, align 8
+ %add = add nsw i64 %tmp1, %tmp
+ ret i64 %add
+}
+
+define i64 @volatile_load(i64* nocapture %bar) nounwind {
+; CHECK: volatile_load
+; CHECK: ldr
+; CHECK-NEXT: ldr
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+ %add.ptr = getelementptr inbounds i64* %bar, i64 1
+ %tmp = load volatile i64* %add.ptr, align 8
+ %add.ptr1 = getelementptr inbounds i64* %bar, i64 2
+ %tmp1 = load volatile i64* %add.ptr1, align 8
+ %add = add nsw i64 %tmp1, %tmp
+ ret i64 %add
+}
diff --git a/test/CodeGen/AArch64/arm64-vpopcnt.ll b/test/CodeGen/AArch64/arm64-vpopcnt.ll
new file mode 100644
index 000000000000..25306eba4917
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vpopcnt.ll
@@ -0,0 +1,68 @@
+; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s
+target triple = "arm64-apple-ios"
+
+; The non-byte ones used to fail with "Cannot select"
+
+; CHECK-LABEL: ctpopv8i8
+; CHECK: cnt.8b
+define <8 x i8> @ctpopv8i8(<8 x i8> %x) nounwind readnone {
+ %cnt = tail call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %x)
+ ret <8 x i8> %cnt
+}
+
+declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>) nounwind readnone
+
+; CHECK-LABEL: ctpopv4i16
+; CHECK: cnt.8b
+define <4 x i16> @ctpopv4i16(<4 x i16> %x) nounwind readnone {
+ %cnt = tail call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %x)
+ ret <4 x i16> %cnt
+}
+
+declare <4 x i16> @llvm.ctpop.v4i16(<4 x i16>) nounwind readnone
+
+; CHECK-LABEL: ctpopv2i32
+; CHECK: cnt.8b
+define <2 x i32> @ctpopv2i32(<2 x i32> %x) nounwind readnone {
+ %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %x)
+ ret <2 x i32> %cnt
+}
+
+declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) nounwind readnone
+
+
+; CHECK-LABEL: ctpopv16i8
+; CHECK: cnt.16b
+define <16 x i8> @ctpopv16i8(<16 x i8> %x) nounwind readnone {
+ %cnt = tail call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %x)
+ ret <16 x i8> %cnt
+}
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) nounwind readnone
+
+; CHECK-LABEL: ctpopv8i16
+; CHECK: cnt.8b
+define <8 x i16> @ctpopv8i16(<8 x i16> %x) nounwind readnone {
+ %cnt = tail call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %x)
+ ret <8 x i16> %cnt
+}
+
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) nounwind readnone
+
+; CHECK-LABEL: ctpopv4i32
+; CHECK: cnt.8b
+define <4 x i32> @ctpopv4i32(<4 x i32> %x) nounwind readnone {
+ %cnt = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %x)
+ ret <4 x i32> %cnt
+}
+
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readnone
+
+; CHECK-LABEL: ctpopv2i64
+; CHECK: cnt.8b
+define <2 x i64> @ctpopv2i64(<2 x i64> %x) nounwind readnone {
+ %cnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
+ ret <2 x i64> %cnt
+}
+
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vqadd.ll b/test/CodeGen/AArch64/arm64-vqadd.ll
new file mode 100644
index 000000000000..20f7e2c7a893
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vqadd.ll
@@ -0,0 +1,332 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @sqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: sqadd8b:
+;CHECK: sqadd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqadd4h:
+;CHECK: sqadd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqadd2s:
+;CHECK: sqadd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <8 x i8> @uqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uqadd8b:
+;CHECK: uqadd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @uqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uqadd4h:
+;CHECK: uqadd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uqadd2s:
+;CHECK: uqadd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: sqadd16b:
+;CHECK: sqadd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @sqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqadd8h:
+;CHECK: sqadd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqadd4s:
+;CHECK: sqadd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: sqadd2d:
+;CHECK: sqadd.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define <16 x i8> @uqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uqadd16b:
+;CHECK: uqadd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @uqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uqadd8h:
+;CHECK: uqadd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @uqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uqadd4s:
+;CHECK: uqadd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @uqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: uqadd2d:
+;CHECK: uqadd.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.uqadd.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i8> @usqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: usqadd8b:
+;CHECK: usqadd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.usqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @usqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: usqadd4h:
+;CHECK: usqadd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.usqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @usqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: usqadd2s:
+;CHECK: usqadd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.usqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @usqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: usqadd16b:
+;CHECK: usqadd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.usqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @usqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: usqadd8h:
+;CHECK: usqadd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @usqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: usqadd4s:
+;CHECK: usqadd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @usqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: usqadd2d:
+;CHECK: usqadd.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define i64 @usqadd_d(i64 %l, i64 %r) nounwind {
+; CHECK-LABEL: usqadd_d:
+; CHECK: usqadd {{d[0-9]+}}, {{d[0-9]+}}
+ %sum = call i64 @llvm.aarch64.neon.usqadd.i64(i64 %l, i64 %r)
+ ret i64 %sum
+}
+
+define i32 @usqadd_s(i32 %l, i32 %r) nounwind {
+; CHECK-LABEL: usqadd_s:
+; CHECK: usqadd {{s[0-9]+}}, {{s[0-9]+}}
+ %sum = call i32 @llvm.aarch64.neon.usqadd.i32(i32 %l, i32 %r)
+ ret i32 %sum
+}
+
+declare <8 x i8> @llvm.aarch64.neon.usqadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.usqadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.usqadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+declare i64 @llvm.aarch64.neon.usqadd.i64(i64, i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.usqadd.i32(i32, i32) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.usqadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i8> @suqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: suqadd8b:
+;CHECK: suqadd.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @suqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: suqadd4h:
+;CHECK: suqadd.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @suqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: suqadd2s:
+;CHECK: suqadd.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @suqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: suqadd16b:
+;CHECK: suqadd.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @suqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: suqadd8h:
+;CHECK: suqadd.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @suqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: suqadd4s:
+;CHECK: suqadd.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @suqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: suqadd2d:
+;CHECK: suqadd.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define <1 x i64> @suqadd_1d(<1 x i64> %l, <1 x i64> %r) nounwind {
+; CHECK-LABEL: suqadd_1d:
+; CHECK: suqadd {{d[0-9]+}}, {{d[0-9]+}}
+ %sum = call <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64> %l, <1 x i64> %r)
+ ret <1 x i64> %sum
+}
+
+define i64 @suqadd_d(i64 %l, i64 %r) nounwind {
+; CHECK-LABEL: suqadd_d:
+; CHECK: suqadd {{d[0-9]+}}, {{d[0-9]+}}
+ %sum = call i64 @llvm.aarch64.neon.suqadd.i64(i64 %l, i64 %r)
+ ret i64 %sum
+}
+
+define i32 @suqadd_s(i32 %l, i32 %r) nounwind {
+; CHECK-LABEL: suqadd_s:
+; CHECK: suqadd {{s[0-9]+}}, {{s[0-9]+}}
+ %sum = call i32 @llvm.aarch64.neon.suqadd.i32(i32 %l, i32 %r)
+ ret i32 %sum
+}
+
+declare <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+declare i64 @llvm.aarch64.neon.suqadd.i64(i64, i64) nounwind readnone
+declare i32 @llvm.aarch64.neon.suqadd.i32(i32, i32) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vqsub.ll b/test/CodeGen/AArch64/arm64-vqsub.ll
new file mode 100644
index 000000000000..dde3ac3478e4
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vqsub.ll
@@ -0,0 +1,147 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @sqsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: sqsub8b:
+;CHECK: sqsub.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqsub4h:
+;CHECK: sqsub.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqsub2s:
+;CHECK: sqsub.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <8 x i8> @uqsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uqsub8b:
+;CHECK: uqsub.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @uqsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uqsub4h:
+;CHECK: uqsub.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uqsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uqsub2s:
+;CHECK: uqsub.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: sqsub16b:
+;CHECK: sqsub.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @sqsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqsub8h:
+;CHECK: sqsub.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sqsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqsub4s:
+;CHECK: sqsub.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sqsub2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: sqsub2d:
+;CHECK: sqsub.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define <16 x i8> @uqsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uqsub16b:
+;CHECK: uqsub.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @uqsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uqsub8h:
+;CHECK: uqsub.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @uqsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uqsub4s:
+;CHECK: uqsub.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @uqsub2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: uqsub2d:
+;CHECK: uqsub.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.sqsub.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.uqsub.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vselect.ll b/test/CodeGen/AArch64/arm64-vselect.ll
new file mode 100644
index 000000000000..9988512f530e
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vselect.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+;CHECK: @func63
+;CHECK: cmeq.4h v0, v0, v1
+
+;FIXME: currently, it will generate 3 instructions:
+; ushll.4s v0, v0, #0
+; shl.4s v0, v0, #31
+; sshr.4s v0, v0, #31
+;But these instrucitons can be optimized into 1 instruction:
+; sshll.4s v0, v0, #0
+
+;CHECK: bsl.16b v0, v2, v3
+;CHECK: str q0, [x0]
+;CHECK: ret
+
+%T0_63 = type <4 x i16>
+%T1_63 = type <4 x i32>
+%T2_63 = type <4 x i1>
+define void @func63(%T1_63* %out, %T0_63 %v0, %T0_63 %v1, %T1_63 %v2, %T1_63 %v3) {
+ %cond = icmp eq %T0_63 %v0, %v1
+ %r = select %T2_63 %cond, %T1_63 %v2, %T1_63 %v3
+ store %T1_63 %r, %T1_63* %out
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-vsetcc_fp.ll b/test/CodeGen/AArch64/arm64-vsetcc_fp.ll
new file mode 100644
index 000000000000..f4f4714dde4d
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vsetcc_fp.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
+define <2 x i32> @fcmp_one(<2 x float> %x, <2 x float> %y) nounwind optsize readnone {
+; CHECK-LABEL: fcmp_one:
+; CHECK-NEXT: fcmgt.2s [[REG:v[0-9]+]], v0, v1
+; CHECK-NEXT: fcmgt.2s [[REG2:v[0-9]+]], v1, v0
+; CHECK-NEXT: orr.8b v0, [[REG2]], [[REG]]
+; CHECK-NEXT: ret
+ %tmp = fcmp one <2 x float> %x, %y
+ %or = sext <2 x i1> %tmp to <2 x i32>
+ ret <2 x i32> %or
+}
diff --git a/test/CodeGen/AArch64/arm64-vshift.ll b/test/CodeGen/AArch64/arm64-vshift.ll
new file mode 100644
index 000000000000..65bd50cbe9d0
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vshift.ll
@@ -0,0 +1,1926 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -enable-misched=false | FileCheck %s
+
+define <8 x i8> @sqshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: sqshl8b:
+;CHECK: sqshl.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqshl4h:
+;CHECK: sqshl.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqshl2s:
+;CHECK: sqshl.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <8 x i8> @uqshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uqshl8b:
+;CHECK: uqshl.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @uqshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uqshl4h:
+;CHECK: uqshl.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uqshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uqshl2s:
+;CHECK: uqshl.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: sqshl16b:
+;CHECK: sqshl.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @sqshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqshl8h:
+;CHECK: sqshl.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sqshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqshl4s:
+;CHECK: sqshl.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sqshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: sqshl2d:
+;CHECK: sqshl.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define <16 x i8> @uqshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uqshl16b:
+;CHECK: uqshl.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @uqshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uqshl8h:
+;CHECK: uqshl.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @uqshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uqshl4s:
+;CHECK: uqshl.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @uqshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: uqshl2d:
+;CHECK: uqshl.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i8> @srshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: srshl8b:
+;CHECK: srshl.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @srshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: srshl4h:
+;CHECK: srshl.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @srshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: srshl2s:
+;CHECK: srshl.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <8 x i8> @urshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: urshl8b:
+;CHECK: urshl.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @urshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: urshl4h:
+;CHECK: urshl.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @urshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: urshl2s:
+;CHECK: urshl.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @srshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: srshl16b:
+;CHECK: srshl.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @srshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: srshl8h:
+;CHECK: srshl.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @srshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: srshl4s:
+;CHECK: srshl.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @srshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: srshl2d:
+;CHECK: srshl.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define <16 x i8> @urshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: urshl16b:
+;CHECK: urshl.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @urshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: urshl8h:
+;CHECK: urshl.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @urshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: urshl4s:
+;CHECK: urshl.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @urshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: urshl2d:
+;CHECK: urshl.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i8> @sqrshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: sqrshl8b:
+;CHECK: sqrshl.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqrshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sqrshl4h:
+;CHECK: sqrshl.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqrshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sqrshl2s:
+;CHECK: sqrshl.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <8 x i8> @uqrshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: uqrshl8b:
+;CHECK: uqrshl.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @uqrshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: uqrshl4h:
+;CHECK: uqrshl.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uqrshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: uqrshl2s:
+;CHECK: uqrshl.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqrshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: sqrshl16b:
+;CHECK: sqrshl.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @sqrshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sqrshl8h:
+;CHECK: sqrshl.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sqrshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sqrshl4s:
+;CHECK: sqrshl.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sqrshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: sqrshl2d:
+;CHECK: sqrshl.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define <16 x i8> @uqrshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: uqrshl16b:
+;CHECK: uqrshl.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @uqrshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: uqrshl8h:
+;CHECK: uqrshl.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @uqrshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: uqrshl4s:
+;CHECK: uqrshl.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @uqrshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: uqrshl2d:
+;CHECK: uqrshl.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqrshl.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqrshl.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.sqrshl.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <8 x i8> @llvm.aarch64.neon.uqrshl.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uqrshl.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uqrshl.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.uqrshl.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.sqrshl.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqrshl.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqrshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.sqrshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.uqrshl.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.uqrshl.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i8> @urshr8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: urshr8b:
+;CHECK: urshr.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @urshr4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: urshr4h:
+;CHECK: urshr.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @urshr2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: urshr2s:
+;CHECK: urshr.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @urshr16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: urshr16b:
+;CHECK: urshr.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @urshr8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: urshr8h:
+;CHECK: urshr.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @urshr4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: urshr4s:
+;CHECK: urshr.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @urshr2d(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: urshr2d:
+;CHECK: urshr.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i8> @srshr8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: srshr8b:
+;CHECK: srshr.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @srshr4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: srshr4h:
+;CHECK: srshr.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @srshr2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: srshr2s:
+;CHECK: srshr.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @srshr16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: srshr16b:
+;CHECK: srshr.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @srshr8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: srshr8h:
+;CHECK: srshr.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @srshr4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: srshr4s:
+;CHECK: srshr.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @srshr2d(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: srshr2d:
+;CHECK: srshr.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i8> @sqshlu8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: sqshlu8b:
+;CHECK: sqshlu.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqshlu4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: sqshlu4h:
+;CHECK: sqshlu.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqshlu2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: sqshlu2s:
+;CHECK: sqshlu.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqshlu16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: sqshlu16b:
+;CHECK: sqshlu.16b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @sqshlu8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqshlu8h:
+;CHECK: sqshlu.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sqshlu4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqshlu4s:
+;CHECK: sqshlu.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sqshlu2d(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqshlu2d:
+;CHECK: sqshlu.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i8> @rshrn8b(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: rshrn8b:
+;CHECK: rshrn.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @rshrn4h(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: rshrn4h:
+;CHECK: rshrn.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @rshrn2s(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: rshrn2s:
+;CHECK: rshrn.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @rshrn16b(<8 x i8> *%ret, <8 x i16>* %A) nounwind {
+;CHECK-LABEL: rshrn16b:
+;CHECK: rshrn2.16b v0, {{v[0-9]+}}, #1
+ %out = load <8 x i8>* %ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @rshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+;CHECK-LABEL: rshrn8h:
+;CHECK: rshrn2.8h v0, {{v[0-9]+}}, #1
+ %out = load <4 x i16>* %ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @rshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+;CHECK-LABEL: rshrn4s:
+;CHECK: rshrn2.4s v0, {{v[0-9]+}}, #1
+ %out = load <2 x i32>* %ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %tmp4
+}
+
+declare <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64>, i32) nounwind readnone
+
+define <8 x i8> @shrn8b(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: shrn8b:
+;CHECK: shrn.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @shrn4h(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: shrn4h:
+;CHECK: shrn.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+ %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @shrn2s(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: shrn2s:
+;CHECK: shrn.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
+ %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @shrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+;CHECK-LABEL: shrn16b:
+;CHECK: shrn2.16b v0, {{v[0-9]+}}, #1
+ %out = load <8 x i8>* %ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
+ %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @shrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+;CHECK-LABEL: shrn8h:
+;CHECK: shrn2.8h v0, {{v[0-9]+}}, #1
+ %out = load <4 x i16>* %ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+ %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
+ %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @shrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+;CHECK-LABEL: shrn4s:
+;CHECK: shrn2.4s v0, {{v[0-9]+}}, #1
+ %out = load <2 x i32>* %ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
+ %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
+ %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %tmp4
+}
+
+declare <8 x i8> @llvm.aarch64.neon.shrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.shrn.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.shrn.v2i32(<2 x i64>, i32) nounwind readnone
+
+define i32 @sqshrn1s(i64 %A) nounwind {
+; CHECK-LABEL: sqshrn1s:
+; CHECK: sqshrn {{s[0-9]+}}, d0, #1
+ %tmp = call i32 @llvm.aarch64.neon.sqshrn.i32(i64 %A, i32 1)
+ ret i32 %tmp
+}
+
+define <8 x i8> @sqshrn8b(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqshrn8b:
+;CHECK: sqshrn.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqshrn4h(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqshrn4h:
+;CHECK: sqshrn.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqshrn2s(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqshrn2s:
+;CHECK: sqshrn.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+
+define <16 x i8> @sqshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqshrn16b:
+;CHECK: sqshrn2.16b v0, {{v[0-9]+}}, #1
+ %out = load <8 x i8>* %ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @sqshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqshrn8h:
+;CHECK: sqshrn2.8h v0, {{v[0-9]+}}, #1
+ %out = load <4 x i16>* %ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @sqshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqshrn4s:
+;CHECK: sqshrn2.4s v0, {{v[0-9]+}}, #1
+ %out = load <2 x i32>* %ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %tmp4
+}
+
+declare i32 @llvm.aarch64.neon.sqshrn.i32(i64, i32) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64>, i32) nounwind readnone
+
+define i32 @sqshrun1s(i64 %A) nounwind {
+; CHECK-LABEL: sqshrun1s:
+; CHECK: sqshrun {{s[0-9]+}}, d0, #1
+ %tmp = call i32 @llvm.aarch64.neon.sqshrun.i32(i64 %A, i32 1)
+ ret i32 %tmp
+}
+
+define <8 x i8> @sqshrun8b(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqshrun8b:
+;CHECK: sqshrun.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqshrun4h(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqshrun4h:
+;CHECK: sqshrun.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqshrun2s(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqshrun2s:
+;CHECK: sqshrun.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqshrun16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqshrun16b:
+;CHECK: sqshrun2.16b v0, {{v[0-9]+}}, #1
+ %out = load <8 x i8>* %ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
+ %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @sqshrun8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqshrun8h:
+;CHECK: sqshrun2.8h v0, {{v[0-9]+}}, #1
+ %out = load <4 x i16>* %ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
+ %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @sqshrun4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqshrun4s:
+;CHECK: sqshrun2.4s v0, {{v[0-9]+}}, #1
+ %out = load <2 x i32>* %ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
+ %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %tmp4
+}
+
+declare i32 @llvm.aarch64.neon.sqshrun.i32(i64, i32) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64>, i32) nounwind readnone
+
+define i32 @sqrshrn1s(i64 %A) nounwind {
+; CHECK-LABEL: sqrshrn1s:
+; CHECK: sqrshrn {{s[0-9]+}}, d0, #1
+ %tmp = call i32 @llvm.aarch64.neon.sqrshrn.i32(i64 %A, i32 1)
+ ret i32 %tmp
+}
+
+define <8 x i8> @sqrshrn8b(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqrshrn8b:
+;CHECK: sqrshrn.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqrshrn4h(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqrshrn4h:
+;CHECK: sqrshrn.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqrshrn2s(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqrshrn2s:
+;CHECK: sqrshrn.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqrshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqrshrn16b:
+;CHECK: sqrshrn2.16b v0, {{v[0-9]+}}, #1
+ %out = load <8 x i8>* %ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @sqrshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqrshrn8h:
+;CHECK: sqrshrn2.8h v0, {{v[0-9]+}}, #1
+ %out = load <4 x i16>* %ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @sqrshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqrshrn4s:
+;CHECK: sqrshrn2.4s v0, {{v[0-9]+}}, #1
+ %out = load <2 x i32>* %ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %tmp4
+}
+
+declare i32 @llvm.aarch64.neon.sqrshrn.i32(i64, i32) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64>, i32) nounwind readnone
+
+define i32 @sqrshrun1s(i64 %A) nounwind {
+; CHECK-LABEL: sqrshrun1s:
+; CHECK: sqrshrun {{s[0-9]+}}, d0, #1
+ %tmp = call i32 @llvm.aarch64.neon.sqrshrun.i32(i64 %A, i32 1)
+ ret i32 %tmp
+}
+
+define <8 x i8> @sqrshrun8b(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqrshrun8b:
+;CHECK: sqrshrun.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqrshrun4h(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqrshrun4h:
+;CHECK: sqrshrun.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqrshrun2s(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqrshrun2s:
+;CHECK: sqrshrun.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqrshrun16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqrshrun16b:
+;CHECK: sqrshrun2.16b v0, {{v[0-9]+}}, #1
+ %out = load <8 x i8>* %ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
+ %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @sqrshrun8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqrshrun8h:
+;CHECK: sqrshrun2.8h v0, {{v[0-9]+}}, #1
+ %out = load <4 x i16>* %ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
+ %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @sqrshrun4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqrshrun4s:
+;CHECK: sqrshrun2.4s v0, {{v[0-9]+}}, #1
+ %out = load <2 x i32>* %ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
+ %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %tmp4
+}
+
+declare i32 @llvm.aarch64.neon.sqrshrun.i32(i64, i32) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64>, i32) nounwind readnone
+
+define i32 @uqrshrn1s(i64 %A) nounwind {
+; CHECK-LABEL: uqrshrn1s:
+; CHECK: uqrshrn {{s[0-9]+}}, d0, #1
+ %tmp = call i32 @llvm.aarch64.neon.uqrshrn.i32(i64 %A, i32 1)
+ ret i32 %tmp
+}
+
+define <8 x i8> @uqrshrn8b(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: uqrshrn8b:
+;CHECK: uqrshrn.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @uqrshrn4h(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: uqrshrn4h:
+;CHECK: uqrshrn.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uqrshrn2s(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: uqrshrn2s:
+;CHECK: uqrshrn.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @uqrshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+;CHECK-LABEL: uqrshrn16b:
+;CHECK: uqrshrn2.16b v0, {{v[0-9]+}}, #1
+ %out = load <8 x i8>* %ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @uqrshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+;CHECK-LABEL: uqrshrn8h:
+;CHECK: uqrshrn2.8h v0, {{v[0-9]+}}, #1
+ %out = load <4 x i16>* %ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @uqrshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+;CHECK-LABEL: uqrshrn4s:
+;CHECK: uqrshrn2.4s v0, {{v[0-9]+}}, #1
+ %out = load <2 x i32>* %ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %tmp4
+}
+
+declare i32 @llvm.aarch64.neon.uqrshrn.i32(i64, i32) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64>, i32) nounwind readnone
+
+define i32 @uqshrn1s(i64 %A) nounwind {
+; CHECK-LABEL: uqshrn1s:
+; CHECK: uqshrn {{s[0-9]+}}, d0, #1
+ %tmp = call i32 @llvm.aarch64.neon.uqshrn.i32(i64 %A, i32 1)
+ ret i32 %tmp
+}
+
+define <8 x i8> @uqshrn8b(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: uqshrn8b:
+;CHECK: uqshrn.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @uqshrn4h(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: uqshrn4h:
+;CHECK: uqshrn.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uqshrn2s(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: uqshrn2s:
+;CHECK: uqshrn.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @uqshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+;CHECK-LABEL: uqshrn16b:
+;CHECK: uqshrn2.16b v0, {{v[0-9]+}}, #1
+ %out = load <8 x i8>* %ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
+ %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @uqshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+;CHECK-LABEL: uqshrn8h:
+;CHECK: uqshrn2.8h v0, {{v[0-9]+}}, #1
+ %out = load <4 x i16>* %ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
+ %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @uqshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+;CHECK-LABEL: uqshrn4s:
+;CHECK: uqshrn2.4s v0, {{v[0-9]+}}, #1
+ %out = load <2 x i32>* %ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
+ %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %tmp4
+}
+
+declare i32 @llvm.aarch64.neon.uqshrn.i32(i64, i32) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64>, i32) nounwind readnone
+
+define <8 x i16> @ushll8h(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: ushll8h:
+;CHECK: ushll.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
+ %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @ushll4s(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: ushll4s:
+;CHECK: ushll.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
+ %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @ushll2d(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: ushll2d:
+;CHECK: ushll.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
+ %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i16> @ushll2_8h(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: ushll2_8h:
+;CHECK: ushll2.8h v0, {{v[0-9]+}}, #1
+ %load1 = load <16 x i8>* %A
+ %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
+ %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @ushll2_4s(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: ushll2_4s:
+;CHECK: ushll2.4s v0, {{v[0-9]+}}, #1
+ %load1 = load <8 x i16>* %A
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
+ %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @ushll2_2d(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: ushll2_2d:
+;CHECK: ushll2.2d v0, {{v[0-9]+}}, #1
+ %load1 = load <4 x i32>* %A
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
+ %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i16> @sshll8h(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: sshll8h:
+;CHECK: sshll.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
+ %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sshll4s(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: sshll4s:
+;CHECK: sshll.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
+ %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sshll2d(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: sshll2d:
+;CHECK: sshll.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
+ %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i16> @sshll2_8h(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: sshll2_8h:
+;CHECK: sshll2.8h v0, {{v[0-9]+}}, #1
+ %load1 = load <16 x i8>* %A
+ %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
+ %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sshll2_4s(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sshll2_4s:
+;CHECK: sshll2.4s v0, {{v[0-9]+}}, #1
+ %load1 = load <8 x i16>* %A
+ %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
+ %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sshll2_2d(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sshll2_2d:
+;CHECK: sshll2.2d v0, {{v[0-9]+}}, #1
+ %load1 = load <4 x i32>* %A
+ %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
+ %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i8> @sqshli8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: sqshli8b:
+;CHECK: sqshl.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sqshli4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: sqshli4h:
+;CHECK: sqshl.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sqshli2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: sqshli2s:
+;CHECK: sqshl.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @sqshli16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: sqshli16b:
+;CHECK: sqshl.16b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @sqshli8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: sqshli8h:
+;CHECK: sqshl.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sqshli4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: sqshli4s:
+;CHECK: sqshl.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sqshli2d(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: sqshli2d:
+;CHECK: sqshl.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i8> @uqshli8b(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: uqshli8b:
+;CHECK: uqshl.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+ ret <8 x i8> %tmp3
+}
+
+define <8 x i8> @uqshli8b_1(<8 x i8>* %A) nounwind {
+;CHECK-LABEL: uqshli8b_1:
+;CHECK: movi.8b [[REG:v[0-9]+]], #0x8
+;CHECK: uqshl.8b v0, v0, [[REG]]
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @uqshli4h(<4 x i16>* %A) nounwind {
+;CHECK-LABEL: uqshli4h:
+;CHECK: uqshl.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @uqshli2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: uqshli2s:
+;CHECK: uqshl.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @uqshli16b(<16 x i8>* %A) nounwind {
+;CHECK-LABEL: uqshli16b:
+;CHECK: uqshl.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @uqshli8h(<8 x i16>* %A) nounwind {
+;CHECK-LABEL: uqshli8h:
+;CHECK: uqshl.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @uqshli4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: uqshli4s:
+;CHECK: uqshl.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @uqshli2d(<2 x i64>* %A) nounwind {
+;CHECK-LABEL: uqshli2d:
+;CHECK: uqshl.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i8> @ursra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: ursra8b:
+;CHECK: ursra.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ %tmp4 = load <8 x i8>* %B
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @ursra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: ursra4h:
+;CHECK: ursra.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+ %tmp4 = load <4 x i16>* %B
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @ursra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: ursra2s:
+;CHECK: ursra.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
+ %tmp4 = load <2 x i32>* %B
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <16 x i8> @ursra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: ursra16b:
+;CHECK: ursra.16b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ %tmp4 = load <16 x i8>* %B
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @ursra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: ursra8h:
+;CHECK: ursra.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+ %tmp4 = load <8 x i16>* %B
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @ursra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: ursra4s:
+;CHECK: ursra.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+ %tmp4 = load <4 x i32>* %B
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @ursra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: ursra2d:
+;CHECK: ursra.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
+ %tmp4 = load <2 x i64>* %B
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i8> @srsra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: srsra8b:
+;CHECK: srsra.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ %tmp4 = load <8 x i8>* %B
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @srsra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: srsra4h:
+;CHECK: srsra.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
+ %tmp4 = load <4 x i16>* %B
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @srsra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: srsra2s:
+;CHECK: srsra.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
+ %tmp4 = load <2 x i32>* %B
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <16 x i8> @srsra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: srsra16b:
+;CHECK: srsra.16b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ %tmp4 = load <16 x i8>* %B
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @srsra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: srsra8h:
+;CHECK: srsra.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
+ %tmp4 = load <8 x i16>* %B
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @srsra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: srsra4s:
+;CHECK: srsra.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
+ %tmp4 = load <4 x i32>* %B
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @srsra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: srsra2d:
+;CHECK: srsra.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
+ %tmp4 = load <2 x i64>* %B
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i8> @usra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: usra8b:
+;CHECK: usra.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp4 = load <8 x i8>* %B
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @usra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: usra4h:
+;CHECK: usra.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
+ %tmp4 = load <4 x i16>* %B
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @usra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: usra2s:
+;CHECK: usra.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
+ %tmp4 = load <2 x i32>* %B
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <16 x i8> @usra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: usra16b:
+;CHECK: usra.16b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp4 = load <16 x i8>* %B
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @usra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: usra8h:
+;CHECK: usra.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %tmp4 = load <8 x i16>* %B
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @usra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: usra4s:
+;CHECK: usra.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = load <4 x i32>* %B
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @usra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: usra2d:
+;CHECK: usra.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
+ %tmp4 = load <2 x i64>* %B
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i8> @ssra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: ssra8b:
+;CHECK: ssra.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = ashr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp4 = load <8 x i8>* %B
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @ssra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: ssra4h:
+;CHECK: ssra.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp3 = ashr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
+ %tmp4 = load <4 x i16>* %B
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @ssra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: ssra2s:
+;CHECK: ssra.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = ashr <2 x i32> %tmp1, <i32 1, i32 1>
+ %tmp4 = load <2 x i32>* %B
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <16 x i8> @ssra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: ssra16b:
+;CHECK: ssra.16b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = ashr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp4 = load <16 x i8>* %B
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @ssra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: ssra8h:
+;CHECK: ssra.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp3 = ashr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %tmp4 = load <8 x i16>* %B
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @ssra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: ssra4s:
+;CHECK: ssra.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = ashr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+ %tmp4 = load <4 x i32>* %B
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @ssra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: ssra2d:
+;CHECK: ssra.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp3 = ashr <2 x i64> %tmp1, <i64 1, i64 1>
+ %tmp4 = load <2 x i64>* %B
+ %tmp5 = add <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i8> @shr_orr8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: shr_orr8b:
+;CHECK: shr.8b v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.8b
+;CHECK-NEXT: ret
+ %tmp1 = load <8 x i8>* %A
+ %tmp4 = load <8 x i8>* %B
+ %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp5 = or <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @shr_orr4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: shr_orr4h:
+;CHECK: shr.4h v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.8b
+;CHECK-NEXT: ret
+ %tmp1 = load <4 x i16>* %A
+ %tmp4 = load <4 x i16>* %B
+ %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
+ %tmp5 = or <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @shr_orr2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: shr_orr2s:
+;CHECK: shr.2s v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.8b
+;CHECK-NEXT: ret
+ %tmp1 = load <2 x i32>* %A
+ %tmp4 = load <2 x i32>* %B
+ %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
+ %tmp5 = or <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <16 x i8> @shr_orr16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: shr_orr16b:
+;CHECK: shr.16b v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.16b
+;CHECK-NEXT: ret
+ %tmp1 = load <16 x i8>* %A
+ %tmp4 = load <16 x i8>* %B
+ %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp5 = or <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @shr_orr8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: shr_orr8h:
+;CHECK: shr.8h v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.16b
+;CHECK-NEXT: ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp4 = load <8 x i16>* %B
+ %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %tmp5 = or <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @shr_orr4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: shr_orr4s:
+;CHECK: shr.4s v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.16b
+;CHECK-NEXT: ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp4 = load <4 x i32>* %B
+ %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = or <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @shr_orr2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: shr_orr2d:
+;CHECK: shr.2d v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.16b
+;CHECK-NEXT: ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp4 = load <2 x i64>* %B
+ %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
+ %tmp5 = or <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i8> @shl_orr8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: shl_orr8b:
+;CHECK: shl.8b v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.8b
+;CHECK-NEXT: ret
+ %tmp1 = load <8 x i8>* %A
+ %tmp4 = load <8 x i8>* %B
+ %tmp3 = shl <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp5 = or <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @shl_orr4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: shl_orr4h:
+;CHECK: shl.4h v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.8b
+;CHECK-NEXT: ret
+ %tmp1 = load <4 x i16>* %A
+ %tmp4 = load <4 x i16>* %B
+ %tmp3 = shl <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
+ %tmp5 = or <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @shl_orr2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: shl_orr2s:
+;CHECK: shl.2s v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.8b
+;CHECK-NEXT: ret
+ %tmp1 = load <2 x i32>* %A
+ %tmp4 = load <2 x i32>* %B
+ %tmp3 = shl <2 x i32> %tmp1, <i32 1, i32 1>
+ %tmp5 = or <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+define <16 x i8> @shl_orr16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: shl_orr16b:
+;CHECK: shl.16b v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.16b
+;CHECK-NEXT: ret
+ %tmp1 = load <16 x i8>* %A
+ %tmp4 = load <16 x i8>* %B
+ %tmp3 = shl <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp5 = or <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @shl_orr8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: shl_orr8h:
+;CHECK: shl.8h v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.16b
+;CHECK-NEXT: ret
+ %tmp1 = load <8 x i16>* %A
+ %tmp4 = load <8 x i16>* %B
+ %tmp3 = shl <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %tmp5 = or <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @shl_orr4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: shl_orr4s:
+;CHECK: shl.4s v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.16b
+;CHECK-NEXT: ret
+ %tmp1 = load <4 x i32>* %A
+ %tmp4 = load <4 x i32>* %B
+ %tmp3 = shl <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
+ %tmp5 = or <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @shl_orr2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: shl_orr2d:
+;CHECK: shl.2d v0, {{v[0-9]+}}, #1
+;CHECK-NEXT: orr.16b
+;CHECK-NEXT: ret
+ %tmp1 = load <2 x i64>* %A
+ %tmp4 = load <2 x i64>* %B
+ %tmp3 = shl <2 x i64> %tmp1, <i64 1, i64 1>
+ %tmp5 = or <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i16> @shll(<8 x i8> %in) {
+; CHECK-LABEL: shll:
+; CHECK: shll.8h v0, {{v[0-9]+}}, #8
+ %ext = zext <8 x i8> %in to <8 x i16>
+ %res = shl <8 x i16> %ext, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @shll_high(<8 x i16> %in) {
+; CHECK-LABEL: shll_high
+; CHECK: shll2.4s v0, {{v[0-9]+}}, #16
+ %extract = shufflevector <8 x i16> %in, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext = zext <4 x i16> %extract to <4 x i32>
+ %res = shl <4 x i32> %ext, <i32 16, i32 16, i32 16, i32 16>
+ ret <4 x i32> %res
+}
+
+define <8 x i8> @sli8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: sli8b:
+;CHECK: sli.8b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, i32 1)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sli4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: sli4h:
+;CHECK: sli.4h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, i32 1)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @sli2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: sli2s:
+;CHECK: sli.2s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, i32 1)
+ ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @sli1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+;CHECK-LABEL: sli1d:
+;CHECK: sli d0, {{d[0-9]+}}, #1
+ %tmp1 = load <1 x i64>* %A
+ %tmp2 = load <1 x i64>* %B
+ %tmp3 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, i32 1)
+ ret <1 x i64> %tmp3
+}
+
+define <16 x i8> @sli16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: sli16b:
+;CHECK: sli.16b v0, {{v[0-9]+}}, #1
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, i32 1)
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @sli8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: sli8h:
+;CHECK: sli.8h v0, {{v[0-9]+}}, #1
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, i32 1)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @sli4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: sli4s:
+;CHECK: sli.4s v0, {{v[0-9]+}}, #1
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, i32 1)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sli2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: sli2d:
+;CHECK: sli.2d v0, {{v[0-9]+}}, #1
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, i32 1)
+ ret <2 x i64> %tmp3
+}
+
+declare <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8>, <8 x i8>, i32) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16>, <4 x i16>, i32) nounwind readnone
+declare <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32>, <2 x i32>, i32) nounwind readnone
+declare <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64>, <1 x i64>, i32) nounwind readnone
+
+declare <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8>, <16 x i8>, i32) nounwind readnone
+declare <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16>, <8 x i16>, i32) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32>, <4 x i32>, i32) nounwind readnone
+declare <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64>, <2 x i64>, i32) nounwind readnone
+
+define <1 x i64> @ashr_v1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-LABEL: ashr_v1i64:
+; CHECK: neg d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: sshl d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %c = ashr <1 x i64> %a, %b
+ ret <1 x i64> %c
+}
diff --git a/test/CodeGen/AArch64/arm64-vshr.ll b/test/CodeGen/AArch64/arm64-vshr.ll
new file mode 100644
index 000000000000..21eb579f2522
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vshr.ll
@@ -0,0 +1,63 @@
+; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s
+
+define <8 x i16> @testShiftRightArith_v8i16(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: testShiftRightArith_v8i16:
+; CHECK: neg.8h [[REG1:v[0-9]+]], [[REG1]]
+; CHECK-NEXT: sshl.8h [[REG2:v[0-9]+]], [[REG2]], [[REG1]]
+
+entry:
+ %a.addr = alloca <8 x i16>, align 16
+ %b.addr = alloca <8 x i16>, align 16
+ store <8 x i16> %a, <8 x i16>* %a.addr, align 16
+ store <8 x i16> %b, <8 x i16>* %b.addr, align 16
+ %0 = load <8 x i16>* %a.addr, align 16
+ %1 = load <8 x i16>* %b.addr, align 16
+ %shr = ashr <8 x i16> %0, %1
+ ret <8 x i16> %shr
+}
+
+define <4 x i32> @testShiftRightArith_v4i32(<4 x i32> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: testShiftRightArith_v4i32:
+; CHECK: neg.4s [[REG3:v[0-9]+]], [[REG3]]
+; CHECK-NEXT: sshl.4s [[REG4:v[0-9]+]], [[REG4]], [[REG3]]
+entry:
+ %a.addr = alloca <4 x i32>, align 32
+ %b.addr = alloca <4 x i32>, align 32
+ store <4 x i32> %a, <4 x i32>* %a.addr, align 32
+ store <4 x i32> %b, <4 x i32>* %b.addr, align 32
+ %0 = load <4 x i32>* %a.addr, align 32
+ %1 = load <4 x i32>* %b.addr, align 32
+ %shr = ashr <4 x i32> %0, %1
+ ret <4 x i32> %shr
+}
+
+define <8 x i16> @testShiftRightLogical(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK: testShiftRightLogical
+; CHECK: neg.8h [[REG5:v[0-9]+]], [[REG5]]
+; CHECK-NEXT: ushl.8h [[REG6:v[0-9]+]], [[REG6]], [[REG5]]
+entry:
+ %a.addr = alloca <8 x i16>, align 16
+ %b.addr = alloca <8 x i16>, align 16
+ store <8 x i16> %a, <8 x i16>* %a.addr, align 16
+ store <8 x i16> %b, <8 x i16>* %b.addr, align 16
+ %0 = load <8 x i16>* %a.addr, align 16
+ %1 = load <8 x i16>* %b.addr, align 16
+ %shr = lshr <8 x i16> %0, %1
+ ret <8 x i16> %shr
+}
+
+define <1 x i64> @sshr_v1i64(<1 x i64> %A) nounwind {
+; CHECK-LABEL: sshr_v1i64:
+; CHECK: sshr d0, d0, #63
+ %tmp3 = ashr <1 x i64> %A, < i64 63 >
+ ret <1 x i64> %tmp3
+}
+
+define <1 x i64> @ushr_v1i64(<1 x i64> %A) nounwind {
+; CHECK-LABEL: ushr_v1i64:
+; CHECK: ushr d0, d0, #63
+ %tmp3 = lshr <1 x i64> %A, < i64 63 >
+ ret <1 x i64> %tmp3
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AArch64/arm64-vshuffle.ll b/test/CodeGen/AArch64/arm64-vshuffle.ll
new file mode 100644
index 000000000000..62fd96102d01
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vshuffle.ll
@@ -0,0 +1,115 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -mcpu=cyclone | FileCheck %s
+
+
+; The mask:
+; CHECK: lCPI0_0:
+; CHECK: .byte 2 ; 0x2
+; CHECK: .byte 255 ; 0xff
+; CHECK: .byte 6 ; 0x6
+; CHECK: .byte 255 ; 0xff
+; The second vector is legalized to undef and the elements of the first vector
+; are used instead.
+; CHECK: .byte 2 ; 0x2
+; CHECK: .byte 4 ; 0x4
+; CHECK: .byte 6 ; 0x6
+; CHECK: .byte 0 ; 0x0
+; CHECK: test1
+; CHECK: ldr d[[REG0:[0-9]+]], [{{.*}}, lCPI0_0
+; CHECK: movi.8h v[[REG1:[0-9]+]], #0x1, lsl #8
+; CHECK: tbl.8b v{{[0-9]+}}, { v[[REG1]] }, v[[REG0]]
+define <8 x i1> @test1() {
+entry:
+ %Shuff = shufflevector <8 x i1> <i1 0, i1 1, i1 2, i1 3, i1 4, i1 5, i1 6,
+ i1 7>,
+ <8 x i1> <i1 0, i1 1, i1 2, i1 3, i1 4, i1 5, i1 6,
+ i1 7>,
+ <8 x i32> <i32 2, i32 undef, i32 6, i32 undef, i32 10,
+ i32 12, i32 14, i32 0>
+ ret <8 x i1> %Shuff
+}
+
+; CHECK: lCPI1_0:
+; CHECK: .byte 2 ; 0x2
+; CHECK: .byte 255 ; 0xff
+; CHECK: .byte 6 ; 0x6
+; CHECK: .byte 255 ; 0xff
+; CHECK: .byte 10 ; 0xa
+; CHECK: .byte 12 ; 0xc
+; CHECK: .byte 14 ; 0xe
+; CHECK: .byte 0 ; 0x0
+; CHECK: test2
+; CHECK: ldr d[[REG0:[0-9]+]], [{{.*}}, lCPI1_0@PAGEOFF]
+; CHECK: adrp x[[REG2:[0-9]+]], lCPI1_1@PAGE
+; CHECK: ldr q[[REG1:[0-9]+]], [x[[REG2]], lCPI1_1@PAGEOFF]
+; CHECK: tbl.8b v{{[0-9]+}}, { v[[REG1]] }, v[[REG0]]
+define <8 x i1>@test2() {
+bb:
+ %Shuff = shufflevector <8 x i1> zeroinitializer,
+ <8 x i1> <i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 0, i1 0>,
+ <8 x i32> <i32 2, i32 undef, i32 6, i32 undef, i32 10, i32 12, i32 14,
+ i32 0>
+ ret <8 x i1> %Shuff
+}
+
+; CHECK: lCPI2_0:
+; CHECK: .byte 2 ; 0x2
+; CHECK: .byte 255 ; 0xff
+; CHECK: .byte 6 ; 0x6
+; CHECK: .byte 255 ; 0xff
+; CHECK: .byte 10 ; 0xa
+; CHECK: .byte 12 ; 0xc
+; CHECK: .byte 14 ; 0xe
+; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 2 ; 0x2
+; CHECK: .byte 255 ; 0xff
+; CHECK: .byte 6 ; 0x6
+; CHECK: .byte 255 ; 0xff
+; CHECK: .byte 10 ; 0xa
+; CHECK: .byte 12 ; 0xc
+; CHECK: .byte 14 ; 0xe
+; CHECK: .byte 0 ; 0x0
+; CHECK: test3
+; CHECK: adrp x[[REG3:[0-9]+]], lCPI2_0@PAGE
+; CHECK: ldr q[[REG0:[0-9]+]], [x[[REG3]], lCPI2_0@PAGEOFF]
+; CHECK: ldr q[[REG1:[0-9]+]], [x[[REG3]], lCPI2_1@PAGEOFF]
+; CHECK: tbl.16b v{{[0-9]+}}, { v[[REG1]] }, v[[REG0]]
+define <16 x i1> @test3(i1* %ptr, i32 %v) {
+bb:
+ %Shuff = shufflevector <16 x i1> <i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 0, i1 0>, <16 x i1> undef,
+ <16 x i32> <i32 2, i32 undef, i32 6, i32 undef, i32 10, i32 12, i32 14,
+ i32 0, i32 2, i32 undef, i32 6, i32 undef, i32 10, i32 12,
+ i32 14, i32 0>
+ ret <16 x i1> %Shuff
+}
+; CHECK: lCPI3_1:
+; CHECK: .byte 2 ; 0x2
+; CHECK: .byte 1 ; 0x1
+; CHECK: .byte 6 ; 0x6
+; CHECK: .byte 18 ; 0x12
+; CHECK: .byte 10 ; 0xa
+; CHECK: .byte 12 ; 0xc
+; CHECK: .byte 14 ; 0xe
+; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 2 ; 0x2
+; CHECK: .byte 31 ; 0x1f
+; CHECK: .byte 6 ; 0x6
+; CHECK: .byte 30 ; 0x1e
+; CHECK: .byte 10 ; 0xa
+; CHECK: .byte 12 ; 0xc
+; CHECK: .byte 14 ; 0xe
+; CHECK: .byte 0 ; 0x0
+; CHECK: _test4:
+; CHECK: ldr q[[REG1:[0-9]+]]
+; CHECK: movi.2d v[[REG0:[0-9]+]], #0000000000000000
+; CHECK: adrp x[[REG3:[0-9]+]], lCPI3_1@PAGE
+; CHECK: ldr q[[REG2:[0-9]+]], [x[[REG3]], lCPI3_1@PAGEOFF]
+; CHECK: tbl.16b v{{[0-9]+}}, { v[[REG0]], v[[REG1]] }, v[[REG2]]
+define <16 x i1> @test4(i1* %ptr, i32 %v) {
+bb:
+ %Shuff = shufflevector <16 x i1> zeroinitializer,
+ <16 x i1> <i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 0, i1 0, i1 0, i1 1,
+ i1 1, i1 0, i1 0, i1 1, i1 0, i1 0>,
+ <16 x i32> <i32 2, i32 1, i32 6, i32 18, i32 10, i32 12, i32 14, i32 0,
+ i32 2, i32 31, i32 6, i32 30, i32 10, i32 12, i32 14, i32 0>
+ ret <16 x i1> %Shuff
+}
diff --git a/test/CodeGen/AArch64/arm64-vsqrt.ll b/test/CodeGen/AArch64/arm64-vsqrt.ll
new file mode 100644
index 000000000000..02b7c7ec5d80
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vsqrt.ll
@@ -0,0 +1,232 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x float> @frecps_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: frecps_2s:
+;CHECK: frecps.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frecps_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: frecps_4s:
+;CHECK: frecps.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frecps_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: frecps_2d:
+;CHECK: frecps.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+
+define <2 x float> @frsqrts_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK-LABEL: frsqrts_2s:
+;CHECK: frsqrts.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frsqrts_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: frsqrts_4s:
+;CHECK: frsqrts.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frsqrts_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: frsqrts_2d:
+;CHECK: frsqrts.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp2 = load <2 x double>* %B
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
+ ret <2 x double> %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double>, <2 x double>) nounwind readnone
+
+define <2 x float> @frecpe_2s(<2 x float>* %A) nounwind {
+;CHECK-LABEL: frecpe_2s:
+;CHECK: frecpe.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.frecpe.v2f32(<2 x float> %tmp1)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frecpe_4s(<4 x float>* %A) nounwind {
+;CHECK-LABEL: frecpe_4s:
+;CHECK: frecpe.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.frecpe.v4f32(<4 x float> %tmp1)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frecpe_2d(<2 x double>* %A) nounwind {
+;CHECK-LABEL: frecpe_2d:
+;CHECK: frecpe.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.frecpe.v2f64(<2 x double> %tmp1)
+ ret <2 x double> %tmp3
+}
+
+define float @frecpe_s(float* %A) nounwind {
+;CHECK-LABEL: frecpe_s:
+;CHECK: frecpe s0, {{s[0-9]+}}
+ %tmp1 = load float* %A
+ %tmp3 = call float @llvm.aarch64.neon.frecpe.f32(float %tmp1)
+ ret float %tmp3
+}
+
+define double @frecpe_d(double* %A) nounwind {
+;CHECK-LABEL: frecpe_d:
+;CHECK: frecpe d0, {{d[0-9]+}}
+ %tmp1 = load double* %A
+ %tmp3 = call double @llvm.aarch64.neon.frecpe.f64(double %tmp1)
+ ret double %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.frecpe.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.frecpe.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.frecpe.v2f64(<2 x double>) nounwind readnone
+declare float @llvm.aarch64.neon.frecpe.f32(float) nounwind readnone
+declare double @llvm.aarch64.neon.frecpe.f64(double) nounwind readnone
+
+define float @frecpx_s(float* %A) nounwind {
+;CHECK-LABEL: frecpx_s:
+;CHECK: frecpx s0, {{s[0-9]+}}
+ %tmp1 = load float* %A
+ %tmp3 = call float @llvm.aarch64.neon.frecpx.f32(float %tmp1)
+ ret float %tmp3
+}
+
+define double @frecpx_d(double* %A) nounwind {
+;CHECK-LABEL: frecpx_d:
+;CHECK: frecpx d0, {{d[0-9]+}}
+ %tmp1 = load double* %A
+ %tmp3 = call double @llvm.aarch64.neon.frecpx.f64(double %tmp1)
+ ret double %tmp3
+}
+
+declare float @llvm.aarch64.neon.frecpx.f32(float) nounwind readnone
+declare double @llvm.aarch64.neon.frecpx.f64(double) nounwind readnone
+
+define <2 x float> @frsqrte_2s(<2 x float>* %A) nounwind {
+;CHECK-LABEL: frsqrte_2s:
+;CHECK: frsqrte.2s
+ %tmp1 = load <2 x float>* %A
+ %tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrte.v2f32(<2 x float> %tmp1)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @frsqrte_4s(<4 x float>* %A) nounwind {
+;CHECK-LABEL: frsqrte_4s:
+;CHECK: frsqrte.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrte.v4f32(<4 x float> %tmp1)
+ ret <4 x float> %tmp3
+}
+
+define <2 x double> @frsqrte_2d(<2 x double>* %A) nounwind {
+;CHECK-LABEL: frsqrte_2d:
+;CHECK: frsqrte.2d
+ %tmp1 = load <2 x double>* %A
+ %tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrte.v2f64(<2 x double> %tmp1)
+ ret <2 x double> %tmp3
+}
+
+define float @frsqrte_s(float* %A) nounwind {
+;CHECK-LABEL: frsqrte_s:
+;CHECK: frsqrte s0, {{s[0-9]+}}
+ %tmp1 = load float* %A
+ %tmp3 = call float @llvm.aarch64.neon.frsqrte.f32(float %tmp1)
+ ret float %tmp3
+}
+
+define double @frsqrte_d(double* %A) nounwind {
+;CHECK-LABEL: frsqrte_d:
+;CHECK: frsqrte d0, {{d[0-9]+}}
+ %tmp1 = load double* %A
+ %tmp3 = call double @llvm.aarch64.neon.frsqrte.f64(double %tmp1)
+ ret double %tmp3
+}
+
+declare <2 x float> @llvm.aarch64.neon.frsqrte.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.aarch64.neon.frsqrte.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.aarch64.neon.frsqrte.v2f64(<2 x double>) nounwind readnone
+declare float @llvm.aarch64.neon.frsqrte.f32(float) nounwind readnone
+declare double @llvm.aarch64.neon.frsqrte.f64(double) nounwind readnone
+
+define <2 x i32> @urecpe_2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: urecpe_2s:
+;CHECK: urecpe.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.urecpe.v2i32(<2 x i32> %tmp1)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @urecpe_4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: urecpe_4s:
+;CHECK: urecpe.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.urecpe.v4i32(<4 x i32> %tmp1)
+ ret <4 x i32> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.urecpe.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.urecpe.v4i32(<4 x i32>) nounwind readnone
+
+define <2 x i32> @ursqrte_2s(<2 x i32>* %A) nounwind {
+;CHECK-LABEL: ursqrte_2s:
+;CHECK: ursqrte.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.ursqrte.v2i32(<2 x i32> %tmp1)
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @ursqrte_4s(<4 x i32>* %A) nounwind {
+;CHECK-LABEL: ursqrte_4s:
+;CHECK: ursqrte.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp3 = call <4 x i32> @llvm.aarch64.neon.ursqrte.v4i32(<4 x i32> %tmp1)
+ ret <4 x i32> %tmp3
+}
+
+declare <2 x i32> @llvm.aarch64.neon.ursqrte.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.ursqrte.v4i32(<4 x i32>) nounwind readnone
+
+define float @f1(float %a, float %b) nounwind readnone optsize ssp {
+; CHECK-LABEL: f1:
+; CHECK: frsqrts s0, s0, s1
+; CHECK-NEXT: ret
+ %vrsqrtss.i = tail call float @llvm.aarch64.neon.frsqrts.f32(float %a, float %b) nounwind
+ ret float %vrsqrtss.i
+}
+
+define double @f2(double %a, double %b) nounwind readnone optsize ssp {
+; CHECK-LABEL: f2:
+; CHECK: frsqrts d0, d0, d1
+; CHECK-NEXT: ret
+ %vrsqrtsd.i = tail call double @llvm.aarch64.neon.frsqrts.f64(double %a, double %b) nounwind
+ ret double %vrsqrtsd.i
+}
+
+declare double @llvm.aarch64.neon.frsqrts.f64(double, double) nounwind readnone
+declare float @llvm.aarch64.neon.frsqrts.f32(float, float) nounwind readnone
diff --git a/test/CodeGen/AArch64/arm64-vsra.ll b/test/CodeGen/AArch64/arm64-vsra.ll
new file mode 100644
index 000000000000..5e9cef3e7e28
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vsra.ll
@@ -0,0 +1,150 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: vsras8:
+;CHECK: ssra.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
+ %tmp4 = add <8 x i8> %tmp1, %tmp3
+ ret <8 x i8> %tmp4
+}
+
+define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: vsras16:
+;CHECK: ssra.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
+ %tmp4 = add <4 x i16> %tmp1, %tmp3
+ ret <4 x i16> %tmp4
+}
+
+define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: vsras32:
+;CHECK: ssra.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 >
+ %tmp4 = add <2 x i32> %tmp1, %tmp3
+ ret <2 x i32> %tmp4
+}
+
+define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: vsraQs8:
+;CHECK: ssra.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
+ %tmp4 = add <16 x i8> %tmp1, %tmp3
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: vsraQs16:
+;CHECK: ssra.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
+ %tmp4 = add <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: vsraQs32:
+;CHECK: ssra.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = ashr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
+ %tmp4 = add <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: vsraQs64:
+;CHECK: ssra.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = ashr <2 x i64> %tmp2, < i64 63, i64 63 >
+ %tmp4 = add <2 x i64> %tmp1, %tmp3
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: vsrau8:
+;CHECK: usra.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
+ %tmp4 = add <8 x i8> %tmp1, %tmp3
+ ret <8 x i8> %tmp4
+}
+
+define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: vsrau16:
+;CHECK: usra.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
+ %tmp4 = add <4 x i16> %tmp1, %tmp3
+ ret <4 x i16> %tmp4
+}
+
+define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: vsrau32:
+;CHECK: usra.2s
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 >
+ %tmp4 = add <2 x i32> %tmp1, %tmp3
+ ret <2 x i32> %tmp4
+}
+
+
+define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: vsraQu8:
+;CHECK: usra.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
+ %tmp4 = add <16 x i8> %tmp1, %tmp3
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: vsraQu16:
+;CHECK: usra.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
+ %tmp4 = add <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: vsraQu32:
+;CHECK: usra.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
+ %tmp4 = add <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: vsraQu64:
+;CHECK: usra.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 >
+ %tmp4 = add <2 x i64> %tmp1, %tmp3
+ ret <2 x i64> %tmp4
+}
+
+define <1 x i64> @vsra_v1i64(<1 x i64> %A, <1 x i64> %B) nounwind {
+; CHECK-LABEL: vsra_v1i64:
+; CHECK: ssra d0, d1, #63
+ %tmp3 = ashr <1 x i64> %B, < i64 63 >
+ %tmp4 = add <1 x i64> %A, %tmp3
+ ret <1 x i64> %tmp4
+}
diff --git a/test/CodeGen/AArch64/arm64-vsub.ll b/test/CodeGen/AArch64/arm64-vsub.ll
new file mode 100644
index 000000000000..c2c8755c0669
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-vsub.ll
@@ -0,0 +1,417 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @subhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: subhn8b:
+;CHECK: subhn.8b
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.subhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @subhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: subhn4h:
+;CHECK: subhn.4h
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.subhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @subhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: subhn2s:
+;CHECK: subhn.2s
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.subhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @subhn2_16b(<8 x i16> %a, <8 x i16> %b) nounwind {
+;CHECK-LABEL: subhn2_16b:
+;CHECK: subhn.8b
+;CHECK-NEXT: subhn2.16b
+ %vsubhn2.i = tail call <8 x i8> @llvm.aarch64.neon.subhn.v8i8(<8 x i16> %a, <8 x i16> %b) nounwind
+ %vsubhn_high2.i = tail call <8 x i8> @llvm.aarch64.neon.subhn.v8i8(<8 x i16> %a, <8 x i16> %b) nounwind
+ %res = shufflevector <8 x i8> %vsubhn2.i, <8 x i8> %vsubhn_high2.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @subhn2_8h(<4 x i32> %a, <4 x i32> %b) nounwind {
+;CHECK-LABEL: subhn2_8h:
+;CHECK: subhn.4h
+;CHECK-NEXT: subhn2.8h
+ %vsubhn2.i = tail call <4 x i16> @llvm.aarch64.neon.subhn.v4i16(<4 x i32> %a, <4 x i32> %b) nounwind
+ %vsubhn_high3.i = tail call <4 x i16> @llvm.aarch64.neon.subhn.v4i16(<4 x i32> %a, <4 x i32> %b) nounwind
+ %res = shufflevector <4 x i16> %vsubhn2.i, <4 x i16> %vsubhn_high3.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @subhn2_4s(<2 x i64> %a, <2 x i64> %b) nounwind {
+;CHECK-LABEL: subhn2_4s:
+;CHECK: subhn.2s
+;CHECK-NEXT: subhn2.4s
+ %vsubhn2.i = tail call <2 x i32> @llvm.aarch64.neon.subhn.v2i32(<2 x i64> %a, <2 x i64> %b) nounwind
+ %vsubhn_high3.i = tail call <2 x i32> @llvm.aarch64.neon.subhn.v2i32(<2 x i64> %a, <2 x i64> %b) nounwind
+ %res = shufflevector <2 x i32> %vsubhn2.i, <2 x i32> %vsubhn_high3.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+declare <2 x i32> @llvm.aarch64.neon.subhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.subhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.subhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i8> @rsubhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: rsubhn8b:
+;CHECK: rsubhn.8b
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @rsubhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: rsubhn4h:
+;CHECK: rsubhn.4h
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @rsubhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK-LABEL: rsubhn2s:
+;CHECK: rsubhn.2s
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i64>* %B
+ %tmp3 = call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @rsubhn2_16b(<8 x i16> %a, <8 x i16> %b) nounwind {
+;CHECK-LABEL: rsubhn2_16b:
+;CHECK: rsubhn.8b
+;CHECK-NEXT: rsubhn2.16b
+ %vrsubhn2.i = tail call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) nounwind
+ %vrsubhn_high2.i = tail call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) nounwind
+ %res = shufflevector <8 x i8> %vrsubhn2.i, <8 x i8> %vrsubhn_high2.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @rsubhn2_8h(<4 x i32> %a, <4 x i32> %b) nounwind {
+;CHECK-LABEL: rsubhn2_8h:
+;CHECK: rsubhn.4h
+;CHECK-NEXT: rsubhn2.8h
+ %vrsubhn2.i = tail call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) nounwind
+ %vrsubhn_high3.i = tail call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) nounwind
+ %res = shufflevector <4 x i16> %vrsubhn2.i, <4 x i16> %vrsubhn_high3.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @rsubhn2_4s(<2 x i64> %a, <2 x i64> %b) nounwind {
+;CHECK-LABEL: rsubhn2_4s:
+;CHECK: rsubhn.2s
+;CHECK-NEXT: rsubhn2.4s
+ %vrsubhn2.i = tail call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) nounwind
+ %vrsubhn_high3.i = tail call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) nounwind
+ %res = shufflevector <2 x i32> %vrsubhn2.i, <2 x i32> %vrsubhn_high3.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %res
+}
+
+declare <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+declare <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
+declare <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @ssubl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: ssubl8h:
+;CHECK: ssubl.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
+ %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
+ %tmp5 = sub <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @ssubl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: ssubl4s:
+;CHECK: ssubl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
+ %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
+ %tmp5 = sub <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @ssubl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: ssubl2d:
+;CHECK: ssubl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
+ %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
+ %tmp5 = sub <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i16> @ssubl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: ssubl2_8h:
+;CHECK: ssubl2.8h
+ %tmp1 = load <16 x i8>* %A
+ %high1 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %ext1 = sext <8 x i8> %high1 to <8 x i16>
+
+ %tmp2 = load <16 x i8>* %B
+ %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %ext2 = sext <8 x i8> %high2 to <8 x i16>
+
+ %res = sub <8 x i16> %ext1, %ext2
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @ssubl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: ssubl2_4s:
+;CHECK: ssubl2.4s
+ %tmp1 = load <8 x i16>* %A
+ %high1 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext1 = sext <4 x i16> %high1 to <4 x i32>
+
+ %tmp2 = load <8 x i16>* %B
+ %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext2 = sext <4 x i16> %high2 to <4 x i32>
+
+ %res = sub <4 x i32> %ext1, %ext2
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @ssubl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: ssubl2_2d:
+;CHECK: ssubl2.2d
+ %tmp1 = load <4 x i32>* %A
+ %high1 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %ext1 = sext <2 x i32> %high1 to <2 x i64>
+
+ %tmp2 = load <4 x i32>* %B
+ %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %ext2 = sext <2 x i32> %high2 to <2 x i64>
+
+ %res = sub <2 x i64> %ext1, %ext2
+ ret <2 x i64> %res
+}
+
+define <8 x i16> @usubl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: usubl8h:
+;CHECK: usubl.8h
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
+ %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
+ %tmp5 = sub <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @usubl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: usubl4s:
+;CHECK: usubl.4s
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
+ %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
+ %tmp5 = sub <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @usubl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: usubl2d:
+;CHECK: usubl.2d
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
+ %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
+ %tmp5 = sub <2 x i64> %tmp3, %tmp4
+ ret <2 x i64> %tmp5
+}
+
+define <8 x i16> @usubl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: usubl2_8h:
+;CHECK: usubl2.8h
+ %tmp1 = load <16 x i8>* %A
+ %high1 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %ext1 = zext <8 x i8> %high1 to <8 x i16>
+
+ %tmp2 = load <16 x i8>* %B
+ %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %ext2 = zext <8 x i8> %high2 to <8 x i16>
+
+ %res = sub <8 x i16> %ext1, %ext2
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @usubl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: usubl2_4s:
+;CHECK: usubl2.4s
+ %tmp1 = load <8 x i16>* %A
+ %high1 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext1 = zext <4 x i16> %high1 to <4 x i32>
+
+ %tmp2 = load <8 x i16>* %B
+ %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext2 = zext <4 x i16> %high2 to <4 x i32>
+
+ %res = sub <4 x i32> %ext1, %ext2
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @usubl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: usubl2_2d:
+;CHECK: usubl2.2d
+ %tmp1 = load <4 x i32>* %A
+ %high1 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %ext1 = zext <2 x i32> %high1 to <2 x i64>
+
+ %tmp2 = load <4 x i32>* %B
+ %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %ext2 = zext <2 x i32> %high2 to <2 x i64>
+
+ %res = sub <2 x i64> %ext1, %ext2
+ ret <2 x i64> %res
+}
+
+define <8 x i16> @ssubw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: ssubw8h:
+;CHECK: ssubw.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
+ %tmp4 = sub <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @ssubw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: ssubw4s:
+;CHECK: ssubw.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
+ %tmp4 = sub <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @ssubw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: ssubw2d:
+;CHECK: ssubw.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
+ %tmp4 = sub <2 x i64> %tmp1, %tmp3
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i16> @ssubw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: ssubw2_8h:
+;CHECK: ssubw2.8h
+ %tmp1 = load <8 x i16>* %A
+
+ %tmp2 = load <16 x i8>* %B
+ %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %ext2 = sext <8 x i8> %high2 to <8 x i16>
+
+ %res = sub <8 x i16> %tmp1, %ext2
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @ssubw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: ssubw2_4s:
+;CHECK: ssubw2.4s
+ %tmp1 = load <4 x i32>* %A
+
+ %tmp2 = load <8 x i16>* %B
+ %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext2 = sext <4 x i16> %high2 to <4 x i32>
+
+ %res = sub <4 x i32> %tmp1, %ext2
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @ssubw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: ssubw2_2d:
+;CHECK: ssubw2.2d
+ %tmp1 = load <2 x i64>* %A
+
+ %tmp2 = load <4 x i32>* %B
+ %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %ext2 = sext <2 x i32> %high2 to <2 x i64>
+
+ %res = sub <2 x i64> %tmp1, %ext2
+ ret <2 x i64> %res
+}
+
+define <8 x i16> @usubw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: usubw8h:
+;CHECK: usubw.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
+ %tmp4 = sub <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @usubw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: usubw4s:
+;CHECK: usubw.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
+ %tmp4 = sub <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @usubw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+;CHECK-LABEL: usubw2d:
+;CHECK: usubw.2d
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
+ %tmp4 = sub <2 x i64> %tmp1, %tmp3
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i16> @usubw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: usubw2_8h:
+;CHECK: usubw2.8h
+ %tmp1 = load <8 x i16>* %A
+
+ %tmp2 = load <16 x i8>* %B
+ %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %ext2 = zext <8 x i8> %high2 to <8 x i16>
+
+ %res = sub <8 x i16> %tmp1, %ext2
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @usubw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: usubw2_4s:
+;CHECK: usubw2.4s
+ %tmp1 = load <4 x i32>* %A
+
+ %tmp2 = load <8 x i16>* %B
+ %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %ext2 = zext <4 x i16> %high2 to <4 x i32>
+
+ %res = sub <4 x i32> %tmp1, %ext2
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @usubw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: usubw2_2d:
+;CHECK: usubw2.2d
+ %tmp1 = load <2 x i64>* %A
+
+ %tmp2 = load <4 x i32>* %B
+ %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %ext2 = zext <2 x i32> %high2 to <2 x i64>
+
+ %res = sub <2 x i64> %tmp1, %ext2
+ ret <2 x i64> %res
+}
diff --git a/test/CodeGen/AArch64/arm64-weak-reference.ll b/test/CodeGen/AArch64/arm64-weak-reference.ll
new file mode 100644
index 000000000000..b2135e0960cb
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-weak-reference.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s
+
+@x = extern_weak global i32
+
+define i32 @fn() nounwind ssp {
+; CHECK-LABEL: fn:
+; CHECK: .weak_reference
+ %val = load i32* @x, align 4
+ ret i32 %val
+}
diff --git a/test/CodeGen/AArch64/arm64-xaluo.ll b/test/CodeGen/AArch64/arm64-xaluo.ll
new file mode 100644
index 000000000000..0c300de802b7
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-xaluo.ll
@@ -0,0 +1,524 @@
+; RUN: llc < %s -march=arm64 -aarch64-atomic-cfg-tidy=0 | FileCheck %s
+
+;
+; Get the actual value of the overflow bit.
+;
+define i1 @saddo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; CHECK-LABEL: saddo.i32
+; CHECK: adds w8, w0, w1
+; CHECK-NEXT: cset w0, vs
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define i1 @saddo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; CHECK-LABEL: saddo.i64
+; CHECK: adds x8, x0, x1
+; CHECK-NEXT: cset w0, vs
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; CHECK-LABEL: uaddo.i32
+; CHECK: adds w8, w0, w1
+; CHECK-NEXT: cset w0, hs
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; CHECK-LABEL: uaddo.i64
+; CHECK: adds x8, x0, x1
+; CHECK-NEXT: cset w0, hs
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define i1 @ssubo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; CHECK-LABEL: ssubo.i32
+; CHECK: subs w8, w0, w1
+; CHECK-NEXT: cset w0, vs
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; CHECK-LABEL: ssubo.i64
+; CHECK: subs x8, x0, x1
+; CHECK-NEXT: cset w0, vs
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; CHECK-LABEL: usubo.i32
+; CHECK: subs w8, w0, w1
+; CHECK-NEXT: cset w0, lo
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; CHECK-LABEL: usubo.i64
+; CHECK: subs x8, x0, x1
+; CHECK-NEXT: cset w0, lo
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; CHECK-LABEL: smulo.i32
+; CHECK: smull x8, w0, w1
+; CHECK-NEXT: lsr x9, x8, #32
+; CHECK-NEXT: cmp w9, w8, asr #31
+; CHECK-NEXT: cset w0, ne
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; CHECK-LABEL: smulo.i64
+; CHECK: mul x8, x0, x1
+; CHECK-NEXT: smulh x9, x0, x1
+; CHECK-NEXT: cmp x9, x8, asr #63
+; CHECK-NEXT: cset w0, ne
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; CHECK-LABEL: umulo.i32
+; CHECK: umull x8, w0, w1
+; CHECK-NEXT: cmp xzr, x8, lsr #32
+; CHECK-NEXT: cset w0, ne
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; CHECK-LABEL: umulo.i64
+; CHECK: umulh x8, x0, x1
+; CHECK-NEXT: cmp xzr, x8
+; CHECK-NEXT: cset w8, ne
+; CHECK-NEXT: mul x9, x0, x1
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+
+;
+; Check the use of the overflow bit in combination with a select instruction.
+;
+define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: saddo.select.i32
+; CHECK: cmn w0, w1
+; CHECK-NEXT: csel w0, w0, w1, vs
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: saddo.select.i64
+; CHECK: cmn x0, x1
+; CHECK-NEXT: csel x0, x0, x1, vs
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @uaddo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: uaddo.select.i32
+; CHECK: cmn w0, w1
+; CHECK-NEXT: csel w0, w0, w1, hs
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: uaddo.select.i64
+; CHECK: cmn x0, x1
+; CHECK-NEXT: csel x0, x0, x1, hs
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: ssubo.select.i32
+; CHECK: cmp w0, w1
+; CHECK-NEXT: csel w0, w0, w1, vs
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: ssubo.select.i64
+; CHECK: cmp x0, x1
+; CHECK-NEXT: csel x0, x0, x1, vs
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @usubo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: usubo.select.i32
+; CHECK: cmp w0, w1
+; CHECK-NEXT: csel w0, w0, w1, lo
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: usubo.select.i64
+; CHECK: cmp x0, x1
+; CHECK-NEXT: csel x0, x0, x1, lo
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: smulo.select.i32
+; CHECK: smull x8, w0, w1
+; CHECK-NEXT: lsr x9, x8, #32
+; CHECK-NEXT: cmp w9, w8, asr #31
+; CHECK-NEXT: csel w0, w0, w1, ne
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: smulo.select.i64
+; CHECK: mul x8, x0, x1
+; CHECK-NEXT: smulh x9, x0, x1
+; CHECK-NEXT: cmp x9, x8, asr #63
+; CHECK-NEXT: csel x0, x0, x1, ne
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: umulo.select.i32
+; CHECK: umull x8, w0, w1
+; CHECK-NEXT: cmp xzr, x8, lsr #32
+; CHECK-NEXT: csel w0, w0, w1, ne
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: umulo.select.i64
+; CHECK: umulh x8, x0, x1
+; CHECK-NEXT: cmp xzr, x8
+; CHECK-NEXT: csel x0, x0, x1, ne
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+
+;
+; Check the use of the overflow bit in combination with a branch instruction.
+;
+define i1 @saddo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: saddo.br.i32
+; CHECK: cmn w0, w1
+; CHECK-NEXT: b.vc
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @saddo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: saddo.br.i64
+; CHECK: cmn x0, x1
+; CHECK-NEXT: b.vc
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: uaddo.br.i32
+; CHECK: cmn w0, w1
+; CHECK-NEXT: b.lo
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: uaddo.br.i64
+; CHECK: cmn x0, x1
+; CHECK-NEXT: b.lo
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: ssubo.br.i32
+; CHECK: cmp w0, w1
+; CHECK-NEXT: b.vc
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: ssubo.br.i64
+; CHECK: cmp x0, x1
+; CHECK-NEXT: b.vc
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @usubo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: usubo.br.i32
+; CHECK: cmp w0, w1
+; CHECK-NEXT: b.hs
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @usubo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: usubo.br.i64
+; CHECK: cmp x0, x1
+; CHECK-NEXT: b.hs
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @smulo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: smulo.br.i32
+; CHECK: smull x8, w0, w1
+; CHECK-NEXT: lsr x9, x8, #32
+; CHECK-NEXT: cmp w9, w8, asr #31
+; CHECK-NEXT: b.eq
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @smulo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: smulo.br.i64
+; CHECK: mul x8, x0, x1
+; CHECK-NEXT: smulh x9, x0, x1
+; CHECK-NEXT: cmp x9, x8, asr #63
+; CHECK-NEXT: b.eq
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @umulo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: umulo.br.i32
+; CHECK: umull x8, w0, w1
+; CHECK-NEXT: cmp xzr, x8, lsr #32
+; CHECK-NEXT: b.eq
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define i1 @umulo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: umulo.br.i64
+; CHECK: umulh x8, x0, x1
+; CHECK-NEXT: cbz
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
+
diff --git a/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll b/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll
new file mode 100644
index 000000000000..c56d607aa812
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple=arm64-apple-ios -mcpu=cyclone < %s | FileCheck %s
+; rdar://12254953
+
+define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp {
+entry:
+; CHECK-LABEL: t:
+; CHECK: mov x0, [[REG1:x[0-9]+]]
+; CHECK: mov x1, [[REG2:x[0-9]+]]
+; CHECK: bl _foo
+; CHECK: mov x0, [[REG1]]
+; CHECK: mov x1, [[REG2]]
+ %call = call i32 @foo(i32 %c, i32 %d) nounwind
+ %call1 = call i32 @foo(i32 %c, i32 %d) nounwind
+ unreachable
+}
+
+declare i32 @foo(i32, i32)
diff --git a/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll b/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll
new file mode 100644
index 000000000000..349bb6fd78af
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll
@@ -0,0 +1,49 @@
+; RUN: llc -mtriple=arm64-apple-ios -mcpu=cyclone < %s | FileCheck %s
+; rdar://11481771
+; rdar://13713797
+
+define void @t1() nounwind ssp {
+entry:
+; CHECK-LABEL: t1:
+; CHECK-NOT: fmov
+; CHECK: movi.2d v0, #0000000000000000
+; CHECK: movi.2d v1, #0000000000000000
+; CHECK: movi.2d v2, #0000000000000000
+; CHECK: movi.2d v3, #0000000000000000
+ tail call void @bar(double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00) nounwind
+ ret void
+}
+
+define void @t2() nounwind ssp {
+entry:
+; CHECK-LABEL: t2:
+; CHECK-NOT: mov w0, wzr
+; CHECK: movz w0, #0
+; CHECK: movz w1, #0
+ tail call void @bari(i32 0, i32 0) nounwind
+ ret void
+}
+
+define void @t3() nounwind ssp {
+entry:
+; CHECK-LABEL: t3:
+; CHECK-NOT: mov x0, xzr
+; CHECK: movz x0, #0
+; CHECK: movz x1, #0
+ tail call void @barl(i64 0, i64 0) nounwind
+ ret void
+}
+
+define void @t4() nounwind ssp {
+; CHECK-LABEL: t4:
+; CHECK-NOT: fmov
+; CHECK: movi.2d v0, #0000000000000000
+; CHECK: movi.2d v1, #0000000000000000
+ tail call void @barf(float 0.000000e+00, float 0.000000e+00) nounwind
+ ret void
+}
+
+declare void @bar(double, double, double, double)
+declare void @bari(i32, i32)
+declare void @barl(i64, i64)
+declare void @barf(float, float)
diff --git a/test/CodeGen/AArch64/arm64-zext.ll b/test/CodeGen/AArch64/arm64-zext.ll
new file mode 100644
index 000000000000..8d9e5ea040ee
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-zext.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -march=arm64 | FileCheck %s
+
+define i64 @foo(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+; CHECK-LABEL: foo:
+; CHECK: add w0, w1, w0
+; CHECK: ret
+ %add = add i32 %b, %a
+ %conv = zext i32 %add to i64
+ ret i64 %conv
+}
diff --git a/test/CodeGen/AArch64/arm64-zextload-unscaled.ll b/test/CodeGen/AArch64/arm64-zextload-unscaled.ll
new file mode 100644
index 000000000000..c475dbd21eee
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-zextload-unscaled.ll
@@ -0,0 +1,40 @@
+; RUN: llc -march=arm64 < %s | FileCheck %s
+
+@var32 = global i32 0
+
+define void @test_zextloadi1_unscaled(i1* %base) {
+; CHECK-LABEL: test_zextloadi1_unscaled:
+; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-7]
+
+ %addr = getelementptr i1* %base, i32 -7
+ %val = load i1* %addr, align 1
+
+ %extended = zext i1 %val to i32
+ store i32 %extended, i32* @var32, align 4
+ ret void
+}
+
+define void @test_zextloadi8_unscaled(i8* %base) {
+; CHECK-LABEL: test_zextloadi8_unscaled:
+; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-7]
+
+ %addr = getelementptr i8* %base, i32 -7
+ %val = load i8* %addr, align 1
+
+ %extended = zext i8 %val to i32
+ store i32 %extended, i32* @var32, align 4
+ ret void
+}
+
+define void @test_zextloadi16_unscaled(i16* %base) {
+; CHECK-LABEL: test_zextloadi16_unscaled:
+; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-14]
+
+ %addr = getelementptr i16* %base, i32 -7
+ %val = load i16* %addr, align 2
+
+ %extended = zext i16 %val to i32
+ store i32 %extended, i32* @var32, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/arm64-zip.ll b/test/CodeGen/AArch64/arm64-zip.ll
new file mode 100644
index 000000000000..304b28099432
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-zip.ll
@@ -0,0 +1,107 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: vzipi8:
+;CHECK: zip1.8b
+;CHECK: zip2.8b
+;CHECK-NEXT: add.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK-LABEL: vzipi16:
+;CHECK: zip1.4h
+;CHECK: zip2.4h
+;CHECK-NEXT: add.4h
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: vzipQi8:
+;CHECK: zip1.16b
+;CHECK: zip2.16b
+;CHECK-NEXT: add.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
+
+define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LABEL: vzipQi16:
+;CHECK: zip1.8h
+;CHECK: zip2.8h
+;CHECK-NEXT: add.8h
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
+}
+
+define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LABEL: vzipQi32:
+;CHECK: zip1.4s
+;CHECK: zip2.4s
+;CHECK-NEXT: add.4s
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
+}
+
+define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK-LABEL: vzipQf:
+;CHECK: zip1.4s
+;CHECK: zip2.4s
+;CHECK-NEXT: fadd.4s
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %tmp5 = fadd <4 x float> %tmp3, %tmp4
+ ret <4 x float> %tmp5
+}
+
+; Undef shuffle indices should not prevent matching to VZIP:
+
+define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK-LABEL: vzipi8_undef:
+;CHECK: zip1.8b
+;CHECK: zip2.8b
+;CHECK-NEXT: add.8b
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 1, i32 9, i32 undef, i32 10, i32 3, i32 11>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 undef, i32 undef, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LABEL: vzipQi8_undef:
+;CHECK: zip1.16b
+;CHECK: zip2.16b
+;CHECK-NEXT: add.16b
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 undef, i32 undef, i32 undef, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 undef, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 undef, i32 14, i32 30, i32 undef, i32 31>
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
+}
diff --git a/test/CodeGen/AArch64/asm-large-immediate.ll b/test/CodeGen/AArch64/asm-large-immediate.ll
new file mode 100644
index 000000000000..05e4dddc7a7f
--- /dev/null
+++ b/test/CodeGen/AArch64/asm-large-immediate.ll
@@ -0,0 +1,10 @@
+; RUN: llc -march=aarch64 -no-integrated-as < %s | FileCheck %s
+
+define void @test() {
+entry:
+; CHECK: /* result: 68719476738 */
+ tail call void asm sideeffect "/* result: ${0:c} */", "i,~{dirflag},~{fpsr},~{flags}"( i64 68719476738 )
+; CHECK: /* result: -68719476738 */
+ tail call void asm sideeffect "/* result: ${0:n} */", "i,~{dirflag},~{fpsr},~{flags}"( i64 68719476738 )
+ ret void
+}
diff --git a/test/CodeGen/AArch64/assertion-rc-mismatch.ll b/test/CodeGen/AArch64/assertion-rc-mismatch.ll
new file mode 100644
index 000000000000..bcf206ec9bed
--- /dev/null
+++ b/test/CodeGen/AArch64/assertion-rc-mismatch.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+; Test case related to <rdar://problem/15633429>.
+
+; CHECK-LABEL: small
+define i64 @small(i64 %encodedBase) {
+cmp:
+ %lnot.i.i = icmp eq i64 %encodedBase, 0
+ br i1 %lnot.i.i, label %if, label %else
+if:
+ %tmp1 = call i8* @llvm.returnaddress(i32 0)
+ br label %end
+else:
+ %tmp3 = call i8* @llvm.returnaddress(i32 0)
+ %ptr = getelementptr inbounds i8* %tmp3, i64 -16
+ %ld = load i8* %ptr, align 4
+ %tmp2 = inttoptr i8 %ld to i8*
+ br label %end
+end:
+ %tmp = phi i8* [ %tmp1, %if ], [ %tmp2, %else ]
+ %coerce.val.pi56 = ptrtoint i8* %tmp to i64
+ ret i64 %coerce.val.pi56
+}
+
+declare i8* @llvm.returnaddress(i32)
diff --git a/test/CodeGen/AArch64/atomic-ops.ll b/test/CodeGen/AArch64/atomic-ops.ll
index 5857faf80a16..26301b92f9fe 100644
--- a/test/CodeGen/AArch64/atomic-ops.ll
+++ b/test/CodeGen/AArch64/atomic-ops.ll
@@ -1,5 +1,11 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-REG %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-REG
+
+
+; Point of CHECK-REG is to make sure UNPREDICTABLE instructions aren't created
+; (i.e. reusing a register for status & data in store exclusive).
+; CHECK-REG-NOT: stlxrb w[[NEW:[0-9]+]], w[[NEW]], [x{{[0-9]+}}]
+; CHECK-REG-NOT: stlxrb w[[NEW:[0-9]+]], x[[NEW]], [x{{[0-9]+}}]
@var8 = global i8 0
@var16 = global i16 0
@@ -11,20 +17,18 @@ define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
%old = atomicrmw add i8* @var8, i8 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: add w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stlxrb w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -33,20 +37,18 @@ define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
%old = atomicrmw add i16* @var16, i16 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: add w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -55,20 +57,18 @@ define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
%old = atomicrmw add i32* @var32, i32 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: add w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stlxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -77,15 +77,13 @@ define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
%old = atomicrmw add i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add [[NEW:x[0-9]+]], x[[OLD]], x0
-; CHECK-REG: add x[[NEW:[0-9]+]], x{{[0-9]+}}, x0
-; CHECK-REG-NOT: stxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -99,20 +97,18 @@ define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
%old = atomicrmw sub i8* @var8, i8 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: sub w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stxrb w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -121,20 +117,18 @@ define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
%old = atomicrmw sub i16* @var16, i16 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: sub w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stlxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -143,20 +137,18 @@ define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
%old = atomicrmw sub i32* @var32, i32 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: sub w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -165,15 +157,13 @@ define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
%old = atomicrmw sub i64* @var64, i64 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub [[NEW:x[0-9]+]], x[[OLD]], x0
-; CHECK-REG: sub x[[NEW:[0-9]+]], x{{[0-9]+}}, x0
-; CHECK-REG-NOT: stlxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -187,20 +177,18 @@ define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
%old = atomicrmw and i8* @var8, i8 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: and w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stlxrb w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -209,20 +197,18 @@ define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
%old = atomicrmw and i16* @var16, i16 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: and w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -231,20 +217,18 @@ define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
%old = atomicrmw and i32* @var32, i32 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: and w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stlxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -253,15 +237,13 @@ define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
%old = atomicrmw and i64* @var64, i64 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and [[NEW:x[0-9]+]], x[[OLD]], x0
-; CHECK-REG: and x[[NEW:[0-9]+]], x{{[0-9]+}}, x0
-; CHECK-REG-NOT: stxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -275,20 +257,18 @@ define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
%old = atomicrmw or i8* @var8, i8 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: orr w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stlxrb w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -297,20 +277,18 @@ define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
%old = atomicrmw or i16* @var16, i16 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: orr w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -319,20 +297,18 @@ define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
%old = atomicrmw or i32* @var32, i32 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: orr w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -341,15 +317,13 @@ define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
%old = atomicrmw or i64* @var64, i64 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr [[NEW:x[0-9]+]], x[[OLD]], x0
-; CHECK-REG: orr x[[NEW:[0-9]+]], x{{[0-9]+}}, x0
-; CHECK-REG-NOT: stlxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -363,20 +337,18 @@ define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
%old = atomicrmw xor i8* @var8, i8 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: eor w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stxrb w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -385,20 +357,18 @@ define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
%old = atomicrmw xor i16* @var16, i16 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: eor w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -407,20 +377,18 @@ define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
%old = atomicrmw xor i32* @var32, i32 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-REG: eor w[[NEW:[0-9]+]], w{{[0-9]+}}, w0
-; CHECK-REG-NOT: stlxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -429,15 +397,13 @@ define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
%old = atomicrmw xor i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor [[NEW:x[0-9]+]], x[[OLD]], x0
-; CHECK-REG: eor x[[NEW:[0-9]+]], x{{[0-9]+}}, x0
-; CHECK-REG-NOT: stxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -451,18 +417,17 @@ define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
%old = atomicrmw xchg i8* @var8, i8 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-REG-NOT: stxrb w0, w0, [x{{[0-9]+}}]
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -471,18 +436,17 @@ define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
%old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-REG-NOT: stlxrh w0, w0, [x{{[0-9]+}}]
; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -491,18 +455,17 @@ define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
%old = atomicrmw xchg i32* @var32, i32 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-REG-NOT: stlxr w0, w0, [x{{[0-9]+}}]
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -511,13 +474,12 @@ define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
%old = atomicrmw xchg i64* @var64, i64 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; ; CHECK: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-REG-NOT: stxr w0, x0, [x{{[0-9]+}}]
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], x0, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -532,21 +494,22 @@ define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
%old = atomicrmw min i8* @var8, i8 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]], sxtb
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, gt
-; CHECK-REG-NOT: stxrb w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: sxtb w[[OLD_EXT:[0-9]+]], w[[OLD]]
+; CHECK-NEXT: cmp w[[OLD_EXT]], w0, sxtb
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, le
+
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -555,21 +518,23 @@ define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
%old = atomicrmw min i16* @var16, i16 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]], sxth
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, gt
-; CHECK-REG-NOT: stlxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: sxth w[[OLD_EXT:[0-9]+]], w[[OLD]]
+; CHECK-NEXT: cmp w[[OLD_EXT]], w0, sxth
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, le
+
+
; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -578,21 +543,22 @@ define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
%old = atomicrmw min i32* @var32, i32 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]]
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, gt
-; CHECK-REG-NOT: stxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp w[[OLD]], w0
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, le
+
+
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -601,16 +567,17 @@ define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
%old = atomicrmw min i64* @var64, i64 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp x0, x[[OLD]]
-; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, gt
-; CHECK-REG: csel x[[NEW:[0-9]+]], x{{[0-9]+}}, x0, gt
-; CHECK-REG-NOT: stlxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp x[[OLD]], x0
+; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, le
+
+
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -624,21 +591,23 @@ define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
%old = atomicrmw max i8* @var8, i8 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]], sxtb
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, lt
-; CHECK-REG-NOT: stlxrb w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: sxtb w[[OLD_EXT:[0-9]+]], w[[OLD]]
+; CHECK-NEXT: cmp w[[OLD_EXT]], w0, sxtb
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
+
+
; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -647,21 +616,23 @@ define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
%old = atomicrmw max i16* @var16, i16 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]], sxth
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, lt
-; CHECK-REG-NOT: stxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: sxth w[[OLD_EXT:[0-9]+]], w[[OLD]]
+; CHECK-NEXT: cmp w[[OLD_EXT]], w0, sxth
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
+
+
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -670,21 +641,22 @@ define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
%old = atomicrmw max i32* @var32, i32 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]]
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, lt
-; CHECK-REG-NOT: stlxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp w[[OLD]], w0
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
+
+
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -693,16 +665,17 @@ define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
%old = atomicrmw max i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp x0, x[[OLD]]
-; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lt
-; CHECK-REG: csel x[[NEW:[0-9]+]], x{{[0-9]+}}, x0, lt
-; CHECK-REG-NOT: stlxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp x[[OLD]], x0
+; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, gt
+
+
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -716,21 +689,22 @@ define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
%old = atomicrmw umin i8* @var8, i8 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]], uxtb
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, hi
-; CHECK-REG-NOT: stlxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp w[[OLD]], w0, uxtb
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, ls
+
+
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -739,21 +713,22 @@ define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
%old = atomicrmw umin i16* @var16, i16 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]], uxth
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, hi
-; CHECK-REG-NOT: stxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp w[[OLD]], w0, uxth
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, ls
+
+
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -762,21 +737,22 @@ define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
%old = atomicrmw umin i32* @var32, i32 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]]
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, hi
-; CHECK-REG-NOT: stlxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp w[[OLD]], w0
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, ls
+
+
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -785,16 +761,17 @@ define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
%old = atomicrmw umin i64* @var64, i64 %offset acq_rel
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp x0, x[[OLD]]
-; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, hi
-; CHECK-REG: csel x[[NEW:[0-9]+]], x{{[0-9]+}}, x0, hi
-; CHECK-REG-NOT: stlxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp x[[OLD]], x0
+; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, ls
+
+
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -808,21 +785,22 @@ define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
%old = atomicrmw umax i8* @var8, i8 %offset acq_rel
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]], uxtb
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, lo
-; CHECK-REG-NOT: stlxrb w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp w[[OLD]], w0, uxtb
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
+
+
; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
@@ -831,21 +809,22 @@ define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
%old = atomicrmw umax i16* @var16, i16 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]], uxth
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, lo
-; CHECK-REG-NOT: stxrh w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp w[[OLD]], w0, uxth
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
+
+
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
@@ -854,21 +833,22 @@ define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
%old = atomicrmw umax i32* @var32, i32 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp w0, w[[OLD]]
-; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
-; CHECK-REG: csel w[[NEW:[0-9]+]], w{{[0-9]+}}, w0, lo
-; CHECK-REG-NOT: stlxr w[[NEW]], w[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp w[[OLD]], w0
+; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
+
+
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
@@ -877,16 +857,17 @@ define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
%old = atomicrmw umax i64* @var64, i64 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp x0, x[[OLD]]
-; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lo
-; CHECK-REG: csel x[[NEW:[0-9]+]], x{{[0-9]+}}, x0, lo
-; CHECK-REG-NOT: stlxr w[[NEW]], x[[NEW]], [x{{[0-9]+}}]
+
+; CHECK-NEXT: cmp x[[OLD]], x0
+; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, hi
+
+
; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -897,94 +878,96 @@ define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i8:
- %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
+ %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+ %old = extractvalue { i8, i1 } %pair, 0
+
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
-; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w[[OLD]], w0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
- ; As above, w1 is a reasonable guess.
-; CHECK-REG-NOT: stxrb w1, w1, [x{{[0-9]+}}]
-; CHECK: stxrb [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK: stxrb [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i8 %old
}
define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i16:
- %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
+ %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
+ %old = extractvalue { i16, i1 } %pair, 0
+
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
-; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w[[OLD]], w0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
- ; As above, w1 is a reasonable guess.
-; CHECK-REG-NOT: stlxrh w1, w1, [x{{[0-9]+}}]
-; CHECK: stlxrh [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK: stlxrh [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i16 %old
}
define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i32:
- %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
+ %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
+ %old = extractvalue { i32, i1 } %pair, 0
+
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w[[OLD]], w0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
- ; As above, w1 is a reasonable guess.
-; CHECK-REG-NOT: stlxr w1, w1, [x{{[0-9]+}}]
-; CHECK: stlxr [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK: stlxr [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
+; CHECK: mov {{[xw]}}0, {{[xw]}}[[OLD]]
ret i32 %old
}
-define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
+define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i64:
- %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
+ %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
+ %old = extractvalue { i64, i1 } %pair, 0
+
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp x[[OLD]], x0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
; As above, w1 is a reasonable guess.
-; CHECK-REG-NOT: stxr w1, x1, [x{{[0-9]+}}]
; CHECK: stxr [[STATUS:w[0-9]+]], x1, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
; CHECK-NOT: dmb
-; CHECK: mov x0, x[[OLD]]
- ret i64 %old
+; CHECK: str x[[OLD]],
+ store i64 %old, i64* @var64
+ ret void
}
define i8 @test_atomic_load_monotonic_i8() nounwind {
@@ -992,7 +975,7 @@ define i8 @test_atomic_load_monotonic_i8() nounwind {
%val = load atomic i8* @var8 monotonic, align 1
; CHECK-NOT: dmb
; CHECK: adrp x[[HIADDR:[0-9]+]], var8
-; CHECK: ldrb w0, [x[[HIADDR]], #:lo12:var8]
+; CHECK: ldrb w0, [x[[HIADDR]], {{#?}}:lo12:var8]
; CHECK-NOT: dmb
ret i8 %val
@@ -1017,7 +1000,7 @@ define i8 @test_atomic_load_acquire_i8() nounwind {
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK-NOT: dmb
-; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
; CHECK-NOT: dmb
; CHECK: ldarb w0, [x[[ADDR]]]
; CHECK-NOT: dmb
@@ -1030,7 +1013,7 @@ define i8 @test_atomic_load_seq_cst_i8() nounwind {
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var8
; CHECK-NOT: dmb
-; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], {{#?}}:lo12:var8
; CHECK-NOT: dmb
; CHECK: ldarb w0, [x[[ADDR]]]
; CHECK-NOT: dmb
@@ -1043,7 +1026,7 @@ define i16 @test_atomic_load_monotonic_i16() nounwind {
; CHECK-NOT: dmb
; CHECK: adrp x[[HIADDR:[0-9]+]], var16
; CHECK-NOT: dmb
-; CHECK: ldrh w0, [x[[HIADDR]], #:lo12:var16]
+; CHECK: ldrh w0, [x[[HIADDR]], {{#?}}:lo12:var16]
; CHECK-NOT: dmb
ret i16 %val
@@ -1068,7 +1051,7 @@ define i64 @test_atomic_load_seq_cst_i64() nounwind {
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var64
; CHECK-NOT: dmb
-; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], {{#?}}:lo12:var64
; CHECK-NOT: dmb
; CHECK: ldar x0, [x[[ADDR]]]
; CHECK-NOT: dmb
@@ -1079,7 +1062,7 @@ define void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
; CHECK-LABEL: test_atomic_store_monotonic_i8:
store atomic i8 %val, i8* @var8 monotonic, align 1
; CHECK: adrp x[[HIADDR:[0-9]+]], var8
-; CHECK: strb w0, [x[[HIADDR]], #:lo12:var8]
+; CHECK: strb w0, [x[[HIADDR]], {{#?}}:lo12:var8]
ret void
}
@@ -1101,7 +1084,7 @@ define void @test_atomic_store_release_i8(i8 %val) nounwind {
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var8
; CHECK-NOT: dmb
-; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], {{#?}}:lo12:var8
; CHECK-NOT: dmb
; CHECK: stlrb w0, [x[[ADDR]]]
; CHECK-NOT: dmb
@@ -1114,7 +1097,7 @@ define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var8
; CHECK-NOT: dmb
-; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], {{#?}}:lo12:var8
; CHECK-NOT: dmb
; CHECK: stlrb w0, [x[[ADDR]]]
; CHECK-NOT: dmb
@@ -1128,7 +1111,7 @@ define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
; CHECK-NOT: dmb
; CHECK: adrp x[[HIADDR:[0-9]+]], var16
; CHECK-NOT: dmb
-; CHECK: strh w0, [x[[HIADDR]], #:lo12:var16]
+; CHECK: strh w0, [x[[HIADDR]], {{#?}}:lo12:var16]
; CHECK-NOT: dmb
ret void
}
@@ -1153,7 +1136,7 @@ define void @test_atomic_store_release_i64(i64 %val) nounwind {
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var64
; CHECK-NOT: dmb
-; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var64
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], {{#?}}:lo12:var64
; CHECK-NOT: dmb
; CHECK: stlr x0, [x[[ADDR]]]
; CHECK-NOT: dmb
diff --git a/test/CodeGen/AArch64/basic-pic.ll b/test/CodeGen/AArch64/basic-pic.ll
index 682b7ba69d95..62d41bcead6b 100644
--- a/test/CodeGen/AArch64/basic-pic.ll
+++ b/test/CodeGen/AArch64/basic-pic.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -relocation-model=pic %s -o - | FileCheck %s
@var = global i32 0
@@ -7,7 +7,7 @@ define i32 @get_globalvar() {
%val = load i32* @var
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
-; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], #:got_lo12:var]
+; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], {{#?}}:got_lo12:var]
; CHECK: ldr w0, [x[[GOTLOC]]]
ret i32 %val
@@ -18,7 +18,7 @@ define i32* @get_globalvaraddr() {
%val = load i32* @var
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
-; CHECK: ldr x0, [x[[GOTHI]], #:got_lo12:var]
+; CHECK: ldr x0, [x[[GOTHI]], {{#?}}:got_lo12:var]
ret i32* @var
}
@@ -30,7 +30,7 @@ define i32 @get_hiddenvar() {
%val = load i32* @hiddenvar
; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
-; CHECK: ldr w0, [x[[HI]], #:lo12:hiddenvar]
+; CHECK: ldr w0, [x[[HI]], {{#?}}:lo12:hiddenvar]
ret i32 %val
}
@@ -40,7 +40,7 @@ define i32* @get_hiddenvaraddr() {
%val = load i32* @hiddenvar
; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
-; CHECK: add x0, [[HI]], #:lo12:hiddenvar
+; CHECK: add x0, [[HI]], {{#?}}:lo12:hiddenvar
ret i32* @hiddenvar
}
@@ -50,5 +50,5 @@ define void()* @get_func() {
ret void()* bitcast(void()*()* @get_func to void()*)
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:get_func
-; CHECK: ldr x0, [x[[GOTHI]], #:got_lo12:get_func]
+; CHECK: ldr x0, [x[[GOTHI]], {{#?}}:got_lo12:get_func]
}
diff --git a/test/CodeGen/AArch64/bitfield-insert-0.ll b/test/CodeGen/AArch64/bitfield-insert-0.ll
index 37a18b7fb613..da0ed8af3126 100644
--- a/test/CodeGen/AArch64/bitfield-insert-0.ll
+++ b/test/CodeGen/AArch64/bitfield-insert-0.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -filetype=obj < %s | llvm-objdump -disassemble - | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -filetype=obj -o - %s | llvm-objdump -disassemble - | FileCheck %s
; The encoding of lsb -> immr in the CGed bitfield instructions was wrong at one
; point, in the edge case where lsb = 0. Just make sure.
diff --git a/test/CodeGen/AArch64/bitfield-insert.ll b/test/CodeGen/AArch64/bitfield-insert.ll
index 1f046087abc0..2369a55aa92d 100644
--- a/test/CodeGen/AArch64/bitfield-insert.ll
+++ b/test/CodeGen/AArch64/bitfield-insert.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefix=CHECK
; First, a simple example from Clang. The registers could plausibly be
; different, but probably won't be.
@@ -7,8 +7,7 @@
define [1 x i64] @from_clang([1 x i64] %f.coerce, i32 %n) nounwind readnone {
; CHECK-LABEL: from_clang:
-; CHECK: bfi w0, w1, #3, #4
-; CHECK-NEXT: ret
+; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4
entry:
%f.coerce.fca.0.extract = extractvalue [1 x i64] %f.coerce, 0
@@ -26,6 +25,7 @@ entry:
define void @test_whole32(i32* %existing, i32* %new) {
; CHECK-LABEL: test_whole32:
+
; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #26, #5
%oldval = load volatile i32* %existing
@@ -62,8 +62,10 @@ define void @test_whole64(i64* %existing, i64* %new) {
define void @test_whole32_from64(i64* %existing, i64* %new) {
; CHECK-LABEL: test_whole32_from64:
-; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #{{0|16}}, #16
-; CHECK-NOT: and
+
+
+; CHECK: bfxil {{x[0-9]+}}, {{x[0-9]+}}, #0, #16
+
; CHECK: ret
%oldval = load volatile i64* %existing
@@ -80,8 +82,9 @@ define void @test_whole32_from64(i64* %existing, i64* %new) {
define void @test_32bit_masked(i32 *%existing, i32 *%new) {
; CHECK-LABEL: test_32bit_masked:
+
+; CHECK: and
; CHECK: bfi [[INSERT:w[0-9]+]], {{w[0-9]+}}, #3, #4
-; CHECK: and {{w[0-9]+}}, [[INSERT]], #0xff
%oldval = load volatile i32* %existing
%oldval_keep = and i32 %oldval, 135 ; = 0x87
@@ -98,8 +101,8 @@ define void @test_32bit_masked(i32 *%existing, i32 *%new) {
define void @test_64bit_masked(i64 *%existing, i64 *%new) {
; CHECK-LABEL: test_64bit_masked:
+; CHECK: and
; CHECK: bfi [[INSERT:x[0-9]+]], {{x[0-9]+}}, #40, #8
-; CHECK: and {{x[0-9]+}}, [[INSERT]], #0xffff00000000
%oldval = load volatile i64* %existing
%oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000
@@ -117,8 +120,9 @@ define void @test_64bit_masked(i64 *%existing, i64 *%new) {
; Mask is too complicated for literal ANDwwi, make sure other avenues are tried.
define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
; CHECK-LABEL: test_32bit_complexmask:
+
+; CHECK: and
; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4
-; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
%oldval = load volatile i32* %existing
%oldval_keep = and i32 %oldval, 647 ; = 0x287
@@ -137,6 +141,7 @@ define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
; CHECK-LABEL: test_32bit_badmask:
; CHECK-NOT: bfi
+; CHECK-NOT: bfm
; CHECK: ret
%oldval = load volatile i32* %existing
@@ -156,6 +161,7 @@ define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
define void @test_64bit_badmask(i64 *%existing, i64 *%new) {
; CHECK-LABEL: test_64bit_badmask:
; CHECK-NOT: bfi
+; CHECK-NOT: bfm
; CHECK: ret
%oldval = load volatile i64* %existing
@@ -186,8 +192,7 @@ define void @test_32bit_with_shr(i32* %existing, i32* %new) {
%combined = or i32 %oldval_keep, %newval_masked
store volatile i32 %combined, i32* %existing
; CHECK: lsr [[BIT:w[0-9]+]], {{w[0-9]+}}, #14
-; CHECK: bfi {{w[0-9]}}, [[BIT]], #26, #5
+; CHECK: bfi {{w[0-9]+}}, [[BIT]], #26, #5
ret void
}
-
diff --git a/test/CodeGen/AArch64/bitfield.ll b/test/CodeGen/AArch64/bitfield.ll
index 1c84f5d57854..0e1265372bd8 100644
--- a/test/CodeGen/AArch64/bitfield.ll
+++ b/test/CodeGen/AArch64/bitfield.ll
@@ -1,5 +1,4 @@
-
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK
@var32 = global i32 0
@var64 = global i64 0
@@ -24,7 +23,7 @@ define void @test_extendb(i8 %var) {
%uxt64 = zext i8 %var to i64
store volatile i64 %uxt64, i64* @var64
-; CHECK: uxtb {{x[0-9]+}}, {{w[0-9]+}}
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xff
ret void
}
@@ -48,7 +47,7 @@ define void @test_extendh(i16 %var) {
%uxt64 = zext i16 %var to i64
store volatile i64 %uxt64, i64* @var64
-; CHECK: uxth {{x[0-9]+}}, {{w[0-9]+}}
+; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffff
ret void
}
@@ -61,7 +60,7 @@ define void @test_extendw(i32 %var) {
%uxt64 = zext i32 %var to i64
store volatile i64 %uxt64, i64* @var64
-; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #0, #32
+; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #32
ret void
}
@@ -190,7 +189,6 @@ define i32 @test_ubfx32(i32* %addr) {
define i64 @test_ubfx64(i64* %addr) {
; CHECK-LABEL: test_ubfx64:
; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
-
%fields = load i64* %addr
%shifted = lshr i64 %fields, 25
%masked = and i64 %shifted, 1023
diff --git a/test/CodeGen/AArch64/blockaddress.ll b/test/CodeGen/AArch64/blockaddress.ll
index 8cda431b8e92..3a5dbdc945ca 100644
--- a/test/CodeGen/AArch64/blockaddress.ll
+++ b/test/CodeGen/AArch64/blockaddress.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s
@addr = global i8* null
@@ -9,7 +9,7 @@ define void @test_blockaddress() {
%val = load volatile i8** @addr
indirectbr i8* %val, [label %block]
; CHECK: adrp [[DEST_HI:x[0-9]+]], [[DEST_LBL:.Ltmp[0-9]+]]
-; CHECK: add [[DEST:x[0-9]+]], [[DEST_HI]], #:lo12:[[DEST_LBL]]
+; CHECK: add [[DEST:x[0-9]+]], [[DEST_HI]], {{#?}}:lo12:[[DEST_LBL]]
; CHECK: str [[DEST]],
; CHECK: ldr [[NEWDEST:x[0-9]+]]
; CHECK: br [[NEWDEST]]
diff --git a/test/CodeGen/AArch64/bool-loads.ll b/test/CodeGen/AArch64/bool-loads.ll
index 5c7640bc4218..881aeaa15dd5 100644
--- a/test/CodeGen/AArch64/bool-loads.ll
+++ b/test/CodeGen/AArch64/bool-loads.ll
@@ -1,54 +1,54 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
@var = global i1 0
define i32 @test_sextloadi32() {
-; CHECK: test_sextloadi32
+; CHECK-LABEL: test_sextloadi32
%val = load i1* @var
%ret = sext i1 %val to i32
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var]
-; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
+; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1|sbfx w[0-9]+, w[0-9]+, #0, #1}}
ret i32 %ret
; CHECK: ret
}
define i64 @test_sextloadi64() {
-; CHECK: test_sextloadi64
+; CHECK-LABEL: test_sextloadi64
%val = load i1* @var
%ret = sext i1 %val to i64
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var]
-; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
+; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1}}
ret i64 %ret
; CHECK: ret
}
define i32 @test_zextloadi32() {
-; CHECK: test_zextloadi32
+; CHECK-LABEL: test_zextloadi32
; It's not actually necessary that "ret" is next, but as far as LLVM
; is concerned only 0 or 1 should be loadable so no extension is
; necessary.
%val = load i1* @var
%ret = zext i1 %val to i32
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
ret i32 %ret
; CHECK-NEXT: ret
}
define i64 @test_zextloadi64() {
-; CHECK: test_zextloadi64
+; CHECK-LABEL: test_zextloadi64
; It's not actually necessary that "ret" is next, but as far as LLVM
; is concerned only 0 or 1 should be loadable so no extension is
; necessary.
%val = load i1* @var
%ret = zext i1 %val to i64
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
ret i64 %ret
; CHECK-NEXT: ret
diff --git a/test/CodeGen/AArch64/branch-relax-asm.ll b/test/CodeGen/AArch64/branch-relax-asm.ll
new file mode 100644
index 000000000000..7409c84e6180
--- /dev/null
+++ b/test/CodeGen/AArch64/branch-relax-asm.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=aarch64-apple-ios7.0 -disable-block-placement -aarch64-tbz-offset-bits=4 -o - %s | FileCheck %s
+define i32 @test_asm_length(i32 %in) {
+; CHECK-LABEL: test_asm_length:
+
+ ; It would be more natural to use just one "tbnz %false" here, but if the
+ ; number of instructions in the asm is counted reasonably, that block is out
+ ; of the limited range we gave tbz. So branch relaxation has to invert the
+ ; condition.
+; CHECK: tbz w0, #0, [[TRUE:LBB[0-9]+_[0-9]+]]
+; CHECK: b [[FALSE:LBB[0-9]+_[0-9]+]]
+
+; CHECK: [[TRUE]]:
+; CHECK: orr w0, wzr, #0x4
+; CHECK: nop
+; CHECK: nop
+; CHECK: nop
+; CHECK: nop
+; CHECK: nop
+; CHECK: nop
+; CHECK: ret
+
+; CHECK: [[FALSE]]:
+; CHECK: ret
+
+ %val = and i32 %in, 1
+ %tst = icmp eq i32 %val, 0
+ br i1 %tst, label %true, label %false
+
+true:
+ call void asm sideeffect "nop\0A\09nop\0A\09nop\0A\09nop\0A\09nop\0A\09nop", ""()
+ ret i32 4
+
+false:
+ ret i32 0
+}
diff --git a/test/CodeGen/AArch64/breg.ll b/test/CodeGen/AArch64/breg.ll
index 1ed5b9b755dd..952404495ce5 100644
--- a/test/CodeGen/AArch64/breg.ll
+++ b/test/CodeGen/AArch64/breg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s
@stored_label = global i8* null
@@ -7,7 +7,7 @@ define void @foo() {
%lab = load i8** @stored_label
indirectbr i8* %lab, [label %otherlab, label %retlab]
; CHECK: adrp {{x[0-9]+}}, stored_label
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:stored_label]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:stored_label]
; CHECK: br {{x[0-9]+}}
otherlab:
diff --git a/test/CodeGen/AArch64/callee-save.ll b/test/CodeGen/AArch64/callee-save.ll
index 52243b05b4b9..046e6ceac077 100644
--- a/test/CodeGen/AArch64/callee-save.ll
+++ b/test/CodeGen/AArch64/callee-save.ll
@@ -1,14 +1,14 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
@var = global float 0.0
define void @foo() {
; CHECK-LABEL: foo:
-; CHECK: stp d14, d15, [sp
-; CHECK: stp d12, d13, [sp
-; CHECK: stp d10, d11, [sp
-; CHECK: stp d8, d9, [sp
+; CHECK: stp d15, d14, [sp
+; CHECK: stp d13, d12, [sp
+; CHECK: stp d11, d10, [sp
+; CHECK: stp d9, d8, [sp
; Create lots of live variables to exhaust the supply of
; caller-saved registers
@@ -78,9 +78,9 @@ define void @foo() {
store volatile float %val31, float* @var
store volatile float %val32, float* @var
-; CHECK: ldp d8, d9, [sp
-; CHECK: ldp d10, d11, [sp
-; CHECK: ldp d12, d13, [sp
-; CHECK: ldp d14, d15, [sp
+; CHECK: ldp d9, d8, [sp
+; CHECK: ldp d11, d10, [sp
+; CHECK: ldp d13, d12, [sp
+; CHECK: ldp d15, d14, [sp
ret void
}
diff --git a/test/CodeGen/AArch64/cmpxchg-idioms.ll b/test/CodeGen/AArch64/cmpxchg-idioms.ll
new file mode 100644
index 000000000000..0c008c269794
--- /dev/null
+++ b/test/CodeGen/AArch64/cmpxchg-idioms.ll
@@ -0,0 +1,93 @@
+; RUN: llc -mtriple=aarch64-apple-ios7.0 -o - %s | FileCheck %s
+
+define i32 @test_return(i32* %p, i32 %oldval, i32 %newval) {
+; CHECK-LABEL: test_return:
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxr [[LOADED:w[0-9]+]], [x0]
+; CHECK: cmp [[LOADED]], w1
+; CHECK: b.ne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: stlxr [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x0]
+; CHECK: cbnz [[STATUS]], [[LOOP]]
+
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: orr w0, wzr, #0x1
+; CHECK: ret
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: mov w0, wzr
+; CHECK: ret
+
+ %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %conv = zext i1 %success to i32
+ ret i32 %conv
+}
+
+define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
+; CHECK-LABEL: test_return_bool:
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxrb [[LOADED:w[0-9]+]], [x0]
+; CHECK: cmp [[LOADED]], w1, uxtb
+; CHECK: b.ne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: stlxrb [[STATUS:w[0-9]+]], {{w[0-9]+}}, [x0]
+; CHECK: cbnz [[STATUS]], [[LOOP]]
+
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+ ; FIXME: DAG combine should be able to deal with this.
+; CHECK: orr [[TMP:w[0-9]+]], wzr, #0x1
+; CHECK: eor w0, [[TMP]], #0x1
+; CHECK: ret
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: mov [[TMP:w[0-9]+]], wzr
+; CHECK: eor w0, [[TMP]], #0x1
+; CHECK: ret
+
+ %pair = cmpxchg i8* %value, i8 %oldValue, i8 %newValue acq_rel monotonic
+ %success = extractvalue { i8, i1 } %pair, 1
+ %failure = xor i1 %success, 1
+ ret i1 %failure
+}
+
+define void @test_conditional(i32* %p, i32 %oldval, i32 %newval) {
+; CHECK-LABEL: test_conditional:
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldaxr [[LOADED:w[0-9]+]], [x0]
+; CHECK: cmp [[LOADED]], w1
+; CHECK: b.ne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: stlxr [[STATUS:w[0-9]+]], w2, [x0]
+; CHECK: cbnz [[STATUS]], [[LOOP]]
+
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: b _bar
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: b _baz
+
+ %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ tail call void @bar() #2
+ br label %end
+
+false:
+ tail call void @baz() #2
+ br label %end
+
+end:
+ ret void
+}
+
+declare void @bar()
+declare void @baz()
diff --git a/test/CodeGen/AArch64/code-model-large-abs.ll b/test/CodeGen/AArch64/code-model-large-abs.ll
index b387f285d1d4..ca92500855b4 100644
--- a/test/CodeGen/AArch64/code-model-large-abs.ll
+++ b/test/CodeGen/AArch64/code-model-large-abs.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -code-model=large -o - %s | FileCheck %s
@var8 = global i8 0
@var16 = global i16 0
diff --git a/test/CodeGen/AArch64/compare-branch.ll b/test/CodeGen/AArch64/compare-branch.ll
index 75efd9d4a0d6..a1a87cf51a1a 100644
--- a/test/CodeGen/AArch64/compare-branch.ll
+++ b/test/CodeGen/AArch64/compare-branch.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
@var32 = global i32 0
@var64 = global i64 0
diff --git a/test/CodeGen/AArch64/compiler-ident.ll b/test/CodeGen/AArch64/compiler-ident.ll
new file mode 100644
index 000000000000..035057194a06
--- /dev/null
+++ b/test/CodeGen/AArch64/compiler-ident.ll
@@ -0,0 +1,12 @@
+; RUN: llc -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
+
+; ModuleID = 'compiler-ident.c'
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+; CHECK: .ident "some LLVM version"
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"some LLVM version"}
+
diff --git a/test/CodeGen/AArch64/complex-fp-to-int.ll b/test/CodeGen/AArch64/complex-fp-to-int.ll
new file mode 100644
index 000000000000..13cf762c3d2e
--- /dev/null
+++ b/test/CodeGen/AArch64/complex-fp-to-int.ll
@@ -0,0 +1,141 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+define <2 x i64> @test_v2f32_to_signed_v2i64(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_signed_v2i64:
+; CHECK: fcvtl [[VAL64:v[0-9]+]].2d, v0.2s
+; CHECK: fcvtzs.2d v0, [[VAL64]]
+
+ %val = fptosi <2 x float> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <2 x i64> @test_v2f32_to_unsigned_v2i64(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_unsigned_v2i64:
+; CHECK: fcvtl [[VAL64:v[0-9]+]].2d, v0.2s
+; CHECK: fcvtzu.2d v0, [[VAL64]]
+
+ %val = fptoui <2 x float> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <2 x i16> @test_v2f32_to_signed_v2i16(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_signed_v2i16:
+; CHECK: fcvtzs.2s v0, v0
+
+ %val = fptosi <2 x float> %in to <2 x i16>
+ ret <2 x i16> %val
+}
+
+define <2 x i16> @test_v2f32_to_unsigned_v2i16(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_unsigned_v2i16:
+; CHECK: fcvtzs.2s v0, v0
+
+ %val = fptoui <2 x float> %in to <2 x i16>
+ ret <2 x i16> %val
+}
+
+define <2 x i8> @test_v2f32_to_signed_v2i8(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_signed_v2i8:
+; CHECK: fcvtzs.2s v0, v0
+
+ %val = fptosi <2 x float> %in to <2 x i8>
+ ret <2 x i8> %val
+}
+
+define <2 x i8> @test_v2f32_to_unsigned_v2i8(<2 x float> %in) {
+; CHECK-LABEL: test_v2f32_to_unsigned_v2i8:
+; CHECK: fcvtzs.2s v0, v0
+
+ %val = fptoui <2 x float> %in to <2 x i8>
+ ret <2 x i8> %val
+}
+
+define <4 x i16> @test_v4f32_to_signed_v4i16(<4 x float> %in) {
+; CHECK-LABEL: test_v4f32_to_signed_v4i16:
+; CHECK: fcvtzs.4s [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.4h v0, [[VAL64]]
+
+ %val = fptosi <4 x float> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <4 x i16> @test_v4f32_to_unsigned_v4i16(<4 x float> %in) {
+; CHECK-LABEL: test_v4f32_to_unsigned_v4i16:
+; CHECK: fcvtzu.4s [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.4h v0, [[VAL64]]
+
+ %val = fptoui <4 x float> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <4 x i8> @test_v4f32_to_signed_v4i8(<4 x float> %in) {
+; CHECK-LABEL: test_v4f32_to_signed_v4i8:
+; CHECK: fcvtzs.4s [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.4h v0, [[VAL64]]
+
+ %val = fptosi <4 x float> %in to <4 x i8>
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @test_v4f32_to_unsigned_v4i8(<4 x float> %in) {
+; CHECK-LABEL: test_v4f32_to_unsigned_v4i8:
+; CHECK: fcvtzs.4s [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.4h v0, [[VAL64]]
+
+ %val = fptoui <4 x float> %in to <4 x i8>
+ ret <4 x i8> %val
+}
+
+define <2 x i32> @test_v2f64_to_signed_v2i32(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_signed_v2i32:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptosi <2 x double> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x i32> @test_v2f64_to_unsigned_v2i32(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_unsigned_v2i32:
+; CHECK: fcvtzu.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptoui <2 x double> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x i16> @test_v2f64_to_signed_v2i16(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_signed_v2i16:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptosi <2 x double> %in to <2 x i16>
+ ret <2 x i16> %val
+}
+
+define <2 x i16> @test_v2f64_to_unsigned_v2i16(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_unsigned_v2i16:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptoui <2 x double> %in to <2 x i16>
+ ret <2 x i16> %val
+}
+
+define <2 x i8> @test_v2f64_to_signed_v2i8(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_signed_v2i8:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptosi <2 x double> %in to <2 x i8>
+ ret <2 x i8> %val
+}
+
+define <2 x i8> @test_v2f64_to_unsigned_v2i8(<2 x double> %in) {
+; CHECK-LABEL: test_v2f64_to_unsigned_v2i8:
+; CHECK: fcvtzs.2d [[VAL64:v[0-9]+]], v0
+; CHECK: xtn.2s v0, [[VAL64]]
+
+ %val = fptoui <2 x double> %in to <2 x i8>
+ ret <2 x i8> %val
+}
diff --git a/test/CodeGen/AArch64/complex-int-to-fp.ll b/test/CodeGen/AArch64/complex-int-to-fp.ll
new file mode 100644
index 000000000000..5c943f95c355
--- /dev/null
+++ b/test/CodeGen/AArch64/complex-int-to-fp.ll
@@ -0,0 +1,164 @@
+; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+
+; CHECK: autogen_SD19655
+; CHECK: scvtf
+; CHECK: ret
+define void @autogen_SD19655(<2 x i64>* %addr, <2 x float>* %addrfloat) {
+ %T = load <2 x i64>* %addr
+ %F = sitofp <2 x i64> %T to <2 x float>
+ store <2 x float> %F, <2 x float>* %addrfloat
+ ret void
+}
+
+define <2 x double> @test_signed_v2i32_to_v2f64(<2 x i32> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i32_to_v2f64:
+; CHECK: sshll.2d [[VAL64:v[0-9]+]], v0, #0
+; CHECK-NEXT: scvtf.2d v0, [[VAL64]]
+; CHECK-NEXT: ret
+ %conv = sitofp <2 x i32> %v to <2 x double>
+ ret <2 x double> %conv
+}
+
+define <2 x double> @test_unsigned_v2i32_to_v2f64(<2 x i32> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i32_to_v2f64
+; CHECK: ushll.2d [[VAL64:v[0-9]+]], v0, #0
+; CHECK-NEXT: ucvtf.2d v0, [[VAL64]]
+; CHECK-NEXT: ret
+ %conv = uitofp <2 x i32> %v to <2 x double>
+ ret <2 x double> %conv
+}
+
+define <2 x double> @test_signed_v2i16_to_v2f64(<2 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i16_to_v2f64:
+; CHECK: shl.2s [[TMP:v[0-9]+]], v0, #16
+; CHECK: sshr.2s [[VAL32:v[0-9]+]], [[TMP]], #16
+; CHECK: sshll.2d [[VAL64:v[0-9]+]], [[VAL32]], #0
+; CHECK: scvtf.2d v0, [[VAL64]]
+
+ %conv = sitofp <2 x i16> %v to <2 x double>
+ ret <2 x double> %conv
+}
+define <2 x double> @test_unsigned_v2i16_to_v2f64(<2 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i16_to_v2f64
+; CHECK: movi d[[MASK:[0-9]+]], #0x00ffff0000ffff
+; CHECK: and.8b [[VAL32:v[0-9]+]], v0, v[[MASK]]
+; CHECK: ushll.2d [[VAL64:v[0-9]+]], [[VAL32]], #0
+; CHECK: ucvtf.2d v0, [[VAL64]]
+
+ %conv = uitofp <2 x i16> %v to <2 x double>
+ ret <2 x double> %conv
+}
+
+define <2 x double> @test_signed_v2i8_to_v2f64(<2 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i8_to_v2f64:
+; CHECK: shl.2s [[TMP:v[0-9]+]], v0, #24
+; CHECK: sshr.2s [[VAL32:v[0-9]+]], [[TMP]], #24
+; CHECK: sshll.2d [[VAL64:v[0-9]+]], [[VAL32]], #0
+; CHECK: scvtf.2d v0, [[VAL64]]
+
+ %conv = sitofp <2 x i8> %v to <2 x double>
+ ret <2 x double> %conv
+}
+define <2 x double> @test_unsigned_v2i8_to_v2f64(<2 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i8_to_v2f64
+; CHECK: movi d[[MASK:[0-9]+]], #0x0000ff000000ff
+; CHECK: and.8b [[VAL32:v[0-9]+]], v0, v[[MASK]]
+; CHECK: ushll.2d [[VAL64:v[0-9]+]], [[VAL32]], #0
+; CHECK: ucvtf.2d v0, [[VAL64]]
+
+ %conv = uitofp <2 x i8> %v to <2 x double>
+ ret <2 x double> %conv
+}
+
+define <2 x float> @test_signed_v2i64_to_v2f32(<2 x i64> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i64_to_v2f32:
+; CHECK: scvtf.2d [[VAL64:v[0-9]+]], v0
+; CHECK: fcvtn v0.2s, [[VAL64]].2d
+
+ %conv = sitofp <2 x i64> %v to <2 x float>
+ ret <2 x float> %conv
+}
+define <2 x float> @test_unsigned_v2i64_to_v2f32(<2 x i64> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i64_to_v2f32
+; CHECK: ucvtf.2d [[VAL64:v[0-9]+]], v0
+; CHECK: fcvtn v0.2s, [[VAL64]].2d
+
+ %conv = uitofp <2 x i64> %v to <2 x float>
+ ret <2 x float> %conv
+}
+
+define <2 x float> @test_signed_v2i16_to_v2f32(<2 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i16_to_v2f32:
+; CHECK: shl.2s [[TMP:v[0-9]+]], v0, #16
+; CHECK: sshr.2s [[VAL32:v[0-9]+]], [[TMP]], #16
+; CHECK: scvtf.2s v0, [[VAL32]]
+
+ %conv = sitofp <2 x i16> %v to <2 x float>
+ ret <2 x float> %conv
+}
+define <2 x float> @test_unsigned_v2i16_to_v2f32(<2 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i16_to_v2f32
+; CHECK: movi d[[MASK:[0-9]+]], #0x00ffff0000ffff
+; CHECK: and.8b [[VAL32:v[0-9]+]], v0, v[[MASK]]
+; CHECK: ucvtf.2s v0, [[VAL32]]
+
+ %conv = uitofp <2 x i16> %v to <2 x float>
+ ret <2 x float> %conv
+}
+
+define <2 x float> @test_signed_v2i8_to_v2f32(<2 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v2i8_to_v2f32:
+; CHECK: shl.2s [[TMP:v[0-9]+]], v0, #24
+; CHECK: sshr.2s [[VAL32:v[0-9]+]], [[TMP]], #24
+; CHECK: scvtf.2s v0, [[VAL32]]
+
+ %conv = sitofp <2 x i8> %v to <2 x float>
+ ret <2 x float> %conv
+}
+define <2 x float> @test_unsigned_v2i8_to_v2f32(<2 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v2i8_to_v2f32
+; CHECK: movi d[[MASK:[0-9]+]], #0x0000ff000000ff
+; CHECK: and.8b [[VAL32:v[0-9]+]], v0, v[[MASK]]
+; CHECK: ucvtf.2s v0, [[VAL32]]
+
+ %conv = uitofp <2 x i8> %v to <2 x float>
+ ret <2 x float> %conv
+}
+
+define <4 x float> @test_signed_v4i16_to_v4f32(<4 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v4i16_to_v4f32:
+; CHECK: sshll.4s [[VAL32:v[0-9]+]], v0, #0
+; CHECK: scvtf.4s v0, [[VAL32]]
+
+ %conv = sitofp <4 x i16> %v to <4 x float>
+ ret <4 x float> %conv
+}
+
+define <4 x float> @test_unsigned_v4i16_to_v4f32(<4 x i16> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v4i16_to_v4f32
+; CHECK: ushll.4s [[VAL32:v[0-9]+]], v0, #0
+; CHECK: ucvtf.4s v0, [[VAL32]]
+
+ %conv = uitofp <4 x i16> %v to <4 x float>
+ ret <4 x float> %conv
+}
+
+define <4 x float> @test_signed_v4i8_to_v4f32(<4 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_signed_v4i8_to_v4f32:
+; CHECK: shl.4h [[TMP:v[0-9]+]], v0, #8
+; CHECK: sshr.4h [[VAL16:v[0-9]+]], [[TMP]], #8
+; CHECK: sshll.4s [[VAL32:v[0-9]+]], [[VAL16]], #0
+; CHECK: scvtf.4s v0, [[VAL32]]
+
+ %conv = sitofp <4 x i8> %v to <4 x float>
+ ret <4 x float> %conv
+}
+define <4 x float> @test_unsigned_v4i8_to_v4f32(<4 x i8> %v) nounwind readnone {
+; CHECK-LABEL: test_unsigned_v4i8_to_v4f32
+; CHECK: bic.4h v0, #0xff, lsl #8
+; CHECK: ushll.4s [[VAL32:v[0-9]+]], v0, #0
+; CHECK: ucvtf.4s v0, [[VAL32]]
+
+ %conv = uitofp <4 x i8> %v to <4 x float>
+ ret <4 x float> %conv
+}
diff --git a/test/CodeGen/AArch64/cond-sel.ll b/test/CodeGen/AArch64/cond-sel.ll
index 9c1dfeb3c8d3..5f81cba66cbc 100644
--- a/test/CodeGen/AArch64/cond-sel.ll
+++ b/test/CodeGen/AArch64/cond-sel.ll
@@ -1,25 +1,25 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mcpu=cyclone | FileCheck %s --check-prefix=CHECK
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@var32 = global i32 0
@var64 = global i64 0
-define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
; CHECK-LABEL: test_csel:
%tst1 = icmp ugt i32 %lhs32, %rhs32
%val1 = select i1 %tst1, i32 42, i32 52
store i32 %val1, i32* @var32
-; CHECK-DAG: movz [[W52:w[0-9]+]], #52
-; CHECK-DAG: movz [[W42:w[0-9]+]], #42
+; CHECK-DAG: movz [[W52:w[0-9]+]], #{{52|0x34}}
+; CHECK-DAG: movz [[W42:w[0-9]+]], #{{42|0x2a}}
; CHECK: csel {{w[0-9]+}}, [[W42]], [[W52]], hi
%rhs64 = sext i32 %rhs32 to i64
%tst2 = icmp sle i64 %lhs64, %rhs64
%val2 = select i1 %tst2, i64 %lhs64, i64 %rhs64
store i64 %val2, i64* @var64
-; CHECK-DAG: cmp [[LHS:x[0-9]+]], [[RHS:w[0-9]+]], sxtw
-; CHECK-DAG: sxtw [[EXT_RHS:x[0-9]+]], [[RHS]]
+; CHECK: sxtw [[EXT_RHS:x[0-9]+]], {{[wx]}}[[RHS:[0-9]+]]
+; CHECK: cmp [[LHS:x[0-9]+]], w[[RHS]], sxtw
; CHECK: csel {{x[0-9]+}}, [[LHS]], [[EXT_RHS]], le
ret void
@@ -34,8 +34,8 @@ define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %r
; CHECK-NOFP-NOT: fcmp
%val1 = select i1 %tst1, i32 42, i32 52
store i32 %val1, i32* @var32
-; CHECK: movz [[W52:w[0-9]+]], #52
-; CHECK: movz [[W42:w[0-9]+]], #42
+; CHECK: movz [[W52:w[0-9]+]], #{{52|0x34}}
+; CHECK: movz [[W42:w[0-9]+]], #{{42|0x2a}}
; CHECK: csel [[MAYBETRUE:w[0-9]+]], [[W42]], [[W52]], mi
; CHECK: csel {{w[0-9]+}}, [[W42]], [[MAYBETRUE]], gt
@@ -45,17 +45,17 @@ define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %r
; CHECK-NOFP-NOT: fcmp
%val2 = select i1 %tst2, i64 9, i64 15
store i64 %val2, i64* @var64
-; CHECK: movz [[CONST15:x[0-9]+]], #15
-; CHECK: movz [[CONST9:x[0-9]+]], #9
-; CHECK: csel [[MAYBETRUE:x[0-9]+]], [[CONST9]], [[CONST15]], eq
-; CHECK: csel {{x[0-9]+}}, [[CONST9]], [[MAYBETRUE]], vs
+; CHECK: orr w[[CONST15:[0-9]+]], wzr, #0xf
+; CHECK: movz {{[wx]}}[[CONST9:[0-9]+]], #{{9|0x9}}
+; CHECK: csel [[MAYBETRUE:x[0-9]+]], x[[CONST9]], x[[CONST15]], eq
+; CHECK: csel {{x[0-9]+}}, x[[CONST9]], [[MAYBETRUE]], vs
ret void
; CHECK: ret
}
-define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
; CHECK-LABEL: test_csinc:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
@@ -95,7 +95,7 @@ define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
; CHECK: ret
}
-define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
; CHECK-LABEL: test_csinv:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
@@ -135,7 +135,7 @@ define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
; CHECK: ret
}
-define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
+define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
; CHECK-LABEL: test_csneg:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
@@ -184,13 +184,13 @@ define void @test_cset(i32 %lhs, i32 %rhs, i64 %lhs64) {
%val1 = zext i1 %tst1 to i32
store i32 %val1, i32* @var32
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
-; CHECK: csinc {{w[0-9]+}}, wzr, wzr, ne
+; CHECK: cset {{w[0-9]+}}, eq
%rhs64 = sext i32 %rhs to i64
%tst2 = icmp ule i64 %lhs64, %rhs64
%val2 = zext i1 %tst2 to i64
store i64 %val2, i64* @var64
-; CHECK: csinc {{w[0-9]+}}, wzr, wzr, hi
+; CHECK: cset {{w[0-9]+}}, ls
ret void
; CHECK: ret
@@ -203,13 +203,13 @@ define void @test_csetm(i32 %lhs, i32 %rhs, i64 %lhs64) {
%val1 = sext i1 %tst1 to i32
store i32 %val1, i32* @var32
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
-; CHECK: csinv {{w[0-9]+}}, wzr, wzr, ne
+; CHECK: csetm {{w[0-9]+}}, eq
%rhs64 = sext i32 %rhs to i64
%tst2 = icmp ule i64 %lhs64, %rhs64
%val2 = sext i1 %tst2 to i64
store i64 %val2, i64* @var64
-; CHECK: csinv {{x[0-9]+}}, xzr, xzr, hi
+; CHECK: csetm {{x[0-9]+}}, ls
ret void
; CHECK: ret
diff --git a/test/CodeGen/AArch64/cpus.ll b/test/CodeGen/AArch64/cpus.ll
new file mode 100644
index 000000000000..f0f36bd5cea5
--- /dev/null
+++ b/test/CodeGen/AArch64/cpus.ll
@@ -0,0 +1,14 @@
+; This tests that llc accepts all valid AArch64 CPUs
+
+
+; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=generic 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=cortex-a53 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=cortex-a57 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
+
+; CHECK-NOT: {{.*}} is not a recognized processor for this target
+; INVALID: {{.*}} is not a recognized processor for this target
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
diff --git a/test/CodeGen/AArch64/directcond.ll b/test/CodeGen/AArch64/directcond.ll
index 12c7b6aed643..fbea4a6e5838 100644
--- a/test/CodeGen/AArch64/directcond.ll
+++ b/test/CodeGen/AArch64/directcond.ll
@@ -1,11 +1,10 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-atomic-cfg-tidy=0 | FileCheck %s --check-prefix=CHECK
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -aarch64-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-NOFP %s
define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) {
; CHECK-LABEL: test_select_i32:
%val = select i1 %bit, i32 %a, i32 %b
-; CHECK: movz [[ONE:w[0-9]+]], #1
-; CHECK: tst w0, [[ONE]]
+; CHECK: tst w0, #0x1
; CHECK-NEXT: csel w0, w1, w2, ne
ret i32 %val
@@ -14,8 +13,7 @@ define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) {
define i64 @test_select_i64(i1 %bit, i64 %a, i64 %b) {
; CHECK-LABEL: test_select_i64:
%val = select i1 %bit, i64 %a, i64 %b
-; CHECK: movz [[ONE:w[0-9]+]], #1
-; CHECK: tst w0, [[ONE]]
+; CHECK: tst w0, #0x1
; CHECK-NEXT: csel x0, x1, x2, ne
ret i64 %val
@@ -24,8 +22,7 @@ define i64 @test_select_i64(i1 %bit, i64 %a, i64 %b) {
define float @test_select_float(i1 %bit, float %a, float %b) {
; CHECK-LABEL: test_select_float:
%val = select i1 %bit, float %a, float %b
-; CHECK: movz [[ONE:w[0-9]+]], #1
-; CHECK: tst w0, [[ONE]]
+; CHECK: tst w0, #0x1
; CHECK-NEXT: fcsel s0, s0, s1, ne
; CHECK-NOFP-NOT: fcsel
ret float %val
@@ -34,8 +31,7 @@ define float @test_select_float(i1 %bit, float %a, float %b) {
define double @test_select_double(i1 %bit, double %a, double %b) {
; CHECK-LABEL: test_select_double:
%val = select i1 %bit, double %a, double %b
-; CHECK: movz [[ONE:w[0-9]+]], #1
-; CHECK: tst w0, [[ONE]]
+; CHECK: tst w0, #0x1
; CHECK-NEXT: fcsel d0, d0, d1, ne
; CHECK-NOFP-NOT: fcsel
@@ -45,7 +41,7 @@ define double @test_select_double(i1 %bit, double %a, double %b) {
define i32 @test_brcond(i1 %bit) {
; CHECK-LABEL: test_brcond:
br i1 %bit, label %true, label %false
-; CHECK: tbz {{w[0-9]+}}, #0, .LBB
+; CHECK: tbz {{w[0-9]+}}, #0, {{.?LBB}}
true:
ret i32 0
@@ -57,7 +53,7 @@ define i1 @test_setcc_float(float %lhs, float %rhs) {
; CHECK: test_setcc_float
%val = fcmp oeq float %lhs, %rhs
; CHECK: fcmp s0, s1
-; CHECK: csinc w0, wzr, wzr, ne
+; CHECK: cset w0, eq
; CHECK-NOFP-NOT: fcmp
ret i1 %val
}
@@ -66,7 +62,7 @@ define i1 @test_setcc_double(double %lhs, double %rhs) {
; CHECK: test_setcc_double
%val = fcmp oeq double %lhs, %rhs
; CHECK: fcmp d0, d1
-; CHECK: csinc w0, wzr, wzr, ne
+; CHECK: cset w0, eq
; CHECK-NOFP-NOT: fcmp
ret i1 %val
}
@@ -75,7 +71,7 @@ define i1 @test_setcc_i32(i32 %lhs, i32 %rhs) {
; CHECK: test_setcc_i32
%val = icmp ugt i32 %lhs, %rhs
; CHECK: cmp w0, w1
-; CHECK: csinc w0, wzr, wzr, ls
+; CHECK: cset w0, hi
ret i1 %val
}
@@ -83,6 +79,6 @@ define i1 @test_setcc_i64(i64 %lhs, i64 %rhs) {
; CHECK: test_setcc_i64
%val = icmp ne i64 %lhs, %rhs
; CHECK: cmp x0, x1
-; CHECK: csinc w0, wzr, wzr, eq
+; CHECK: cset w0, ne
ret i1 %val
}
diff --git a/test/CodeGen/AArch64/dp-3source.ll b/test/CodeGen/AArch64/dp-3source.ll
index 81d9e15532fa..22bd4a844e1a 100644
--- a/test/CodeGen/AArch64/dp-3source.ll
+++ b/test/CodeGen/AArch64/dp-3source.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
define i32 @test_madd32(i32 %val0, i32 %val1, i32 %val2) {
; CHECK-LABEL: test_madd32:
diff --git a/test/CodeGen/AArch64/dp1.ll b/test/CodeGen/AArch64/dp1.ll
index 6a8d55cdc7ea..662b41588541 100644
--- a/test/CodeGen/AArch64/dp1.ll
+++ b/test/CodeGen/AArch64/dp1.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
@var32 = global i32 0
@var64 = global i64 0
diff --git a/test/CodeGen/AArch64/dp2.ll b/test/CodeGen/AArch64/dp2.ll
index 48b0701ad1fa..71b31696372a 100644
--- a/test/CodeGen/AArch64/dp2.ll
+++ b/test/CodeGen/AArch64/dp2.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64 | FileCheck %s
@var32_0 = global i32 0
@var32_1 = global i32 0
@@ -13,7 +13,7 @@ define void @rorv_i64() {
%val3_tmp = shl i64 %val0_tmp, %val2_tmp
%val4_tmp = lshr i64 %val0_tmp, %val1_tmp
%val5_tmp = or i64 %val3_tmp, %val4_tmp
-; CHECK: ror {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: {{ror|rorv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val5_tmp, i64* @var64_0
ret void
}
@@ -23,7 +23,7 @@ define void @asrv_i64() {
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = ashr i64 %val0_tmp, %val1_tmp
-; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: {{asr|asrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val4_tmp, i64* @var64_1
ret void
}
@@ -33,7 +33,7 @@ define void @lsrv_i64() {
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = lshr i64 %val0_tmp, %val1_tmp
-; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: {{lsr|lsrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val4_tmp, i64* @var64_0
ret void
}
@@ -43,7 +43,7 @@ define void @lslv_i64() {
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = shl i64 %val0_tmp, %val1_tmp
-; CHECK: lsl {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
+; CHECK: {{lsl|lslv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val4_tmp, i64* @var64_1
ret void
}
@@ -75,7 +75,7 @@ define void @lsrv_i32() {
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
%val4_tmp = lshr i32 %val0_tmp, %val2_tmp
-; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val4_tmp, i32* @var32_0
ret void
}
@@ -86,7 +86,7 @@ define void @lslv_i32() {
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
%val4_tmp = shl i32 %val0_tmp, %val2_tmp
-; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val4_tmp, i32* @var32_1
ret void
}
@@ -100,7 +100,7 @@ define void @rorv_i32() {
%val3_tmp = shl i32 %val0_tmp, %val2_tmp
%val4_tmp = lshr i32 %val0_tmp, %val1_tmp
%val5_tmp = or i32 %val3_tmp, %val4_tmp
-; CHECK: ror {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: {{ror|rorv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val5_tmp, i32* @var32_0
ret void
}
@@ -111,7 +111,7 @@ define void @asrv_i32() {
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
%val4_tmp = ashr i32 %val0_tmp, %val2_tmp
-; CHECK: asr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val4_tmp, i32* @var32_1
ret void
}
@@ -143,7 +143,7 @@ define i32 @test_lsl32() {
%val = load i32* @var32_0
%ret = shl i32 1, %val
-; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
ret i32 %ret
}
@@ -153,7 +153,7 @@ define i32 @test_lsr32() {
%val = load i32* @var32_0
%ret = lshr i32 1, %val
-; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
ret i32 %ret
}
@@ -163,7 +163,7 @@ define i32 @test_asr32(i32 %in) {
%val = load i32* @var32_0
%ret = ashr i32 %in, %val
-; CHECK: asr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
ret i32 %ret
}
diff --git a/test/CodeGen/AArch64/eliminate-trunc.ll b/test/CodeGen/AArch64/eliminate-trunc.ll
new file mode 100644
index 000000000000..ea86a084cb42
--- /dev/null
+++ b/test/CodeGen/AArch64/eliminate-trunc.ll
@@ -0,0 +1,39 @@
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-apple-ios7.0 -mcpu=cyclone | FileCheck %s
+
+; Check trunc i64 operation is translated as a subregister access
+; eliminating an i32 induction varible.
+
+; CHECK-NOT: add {{x[0-9]+}}, {{x[0-9]+}}, #1
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #1
+; CHECK-NEXT: cmp {{w[0-9]+}}, {{w[0-9]+}}
+define void @test1_signed([8 x i8]* nocapture %a, i8* nocapture readonly %box, i8 %limit) minsize {
+entry:
+ %conv = zext i8 %limit to i32
+ %cmp223 = icmp eq i8 %limit, 0
+ br i1 %cmp223, label %for.end15, label %for.body4.lr.ph.us
+
+for.body4.us:
+ %indvars.iv = phi i64 [ 0, %for.body4.lr.ph.us ], [ %indvars.iv.next, %for.body4.us ]
+ %arrayidx6.us = getelementptr inbounds [8 x i8]* %a, i64 %indvars.iv26, i64 %indvars.iv
+ %0 = load i8* %arrayidx6.us, align 1
+ %idxprom7.us = zext i8 %0 to i64
+ %arrayidx8.us = getelementptr inbounds i8* %box, i64 %idxprom7.us
+ %1 = load i8* %arrayidx8.us, align 1
+ store i8 %1, i8* %arrayidx6.us, align 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %2 = trunc i64 %indvars.iv.next to i32
+ %cmp2.us = icmp slt i32 %2, %conv
+ br i1 %cmp2.us, label %for.body4.us, label %for.cond1.for.inc13_crit_edge.us
+
+for.body4.lr.ph.us:
+ %indvars.iv26 = phi i64 [ %indvars.iv.next27, %for.cond1.for.inc13_crit_edge.us ], [ 0, %entry ]
+ br label %for.body4.us
+
+for.cond1.for.inc13_crit_edge.us:
+ %indvars.iv.next27 = add nuw nsw i64 %indvars.iv26, 1
+ %exitcond28 = icmp eq i64 %indvars.iv26, 3
+ br i1 %exitcond28, label %for.end15, label %for.body4.lr.ph.us
+
+for.end15:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/extern-weak.ll b/test/CodeGen/AArch64/extern-weak.ll
index 322b3f4522d6..ce5c0f686615 100644
--- a/test/CodeGen/AArch64/extern-weak.ll
+++ b/test/CodeGen/AArch64/extern-weak.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -o - < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large -o - < %s | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large -o - %s | FileCheck --check-prefix=CHECK-LARGE %s
declare extern_weak i32 @var()
@@ -7,10 +7,10 @@ define i32()* @foo() {
; The usual ADRP/ADD pair can't be used for a weak reference because it must
; evaluate to 0 if the symbol is undefined. We use a litpool entry.
ret i32()* @var
-; CHECK: .LCPI0_0:
-; CHECK-NEXT: .xword var
-; CHECK: ldr x0, [{{x[0-9]+}}, #:lo12:.LCPI0_0]
+
+; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:var
+; CHECK: ldr x0, [x[[ADDRHI]], :got_lo12:var]
; In the large model, the usual relocations are absolute and can
; materialise 0.
@@ -25,27 +25,29 @@ define i32()* @foo() {
define i32* @bar() {
%addr = getelementptr [10 x i32]* @arr_var, i32 0, i32 5
-; CHECK: .LCPI1_0:
-; CHECK-NEXT: .xword arr_var
-; CHECK: ldr [[BASE:x[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI1_0]
+
+; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:arr_var
+; CHECK: ldr [[BASE:x[0-9]+]], [x[[ADDRHI]], :got_lo12:arr_var]
; CHECK: add x0, [[BASE]], #20
+
ret i32* %addr
; In the large model, the usual relocations are absolute and can
; materialise 0.
-; CHECK-LARGE: movz x0, #:abs_g3:arr_var
-; CHECK-LARGE: movk x0, #:abs_g2_nc:arr_var
-; CHECK-LARGE: movk x0, #:abs_g1_nc:arr_var
-; CHECK-LARGE: movk x0, #:abs_g0_nc:arr_var
+; CHECK-LARGE: movz [[ADDR:x[0-9]+]], #:abs_g3:arr_var
+; CHECK-LARGE: movk [[ADDR]], #:abs_g2_nc:arr_var
+; CHECK-LARGE: movk [[ADDR]], #:abs_g1_nc:arr_var
+; CHECK-LARGE: movk [[ADDR]], #:abs_g0_nc:arr_var
}
@defined_weak_var = internal unnamed_addr global i32 0
define i32* @wibble() {
ret i32* @defined_weak_var
+
; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
-; CHECK: add x0, [[BASE]], #:lo12:defined_weak_var
+; CHECK: add x0, [[BASE]], :lo12:defined_weak_var
; CHECK-LARGE: movz x0, #:abs_g3:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
diff --git a/test/CodeGen/AArch64/extract.ll b/test/CodeGen/AArch64/extract.ll
index 62d9ed2fc9d9..1fc9387fecc0 100644
--- a/test/CodeGen/AArch64/extract.ll
+++ b/test/CodeGen/AArch64/extract.ll
@@ -1,11 +1,11 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
define i64 @ror_i64(i64 %in) {
; CHECK-LABEL: ror_i64:
%left = shl i64 %in, 19
%right = lshr i64 %in, 45
%val5 = or i64 %left, %right
-; CHECK: extr {{x[0-9]+}}, x0, x0, #45
+; CHECK: ror {{x[0-9]+}}, x0, #45
ret i64 %val5
}
@@ -14,7 +14,7 @@ define i32 @ror_i32(i32 %in) {
%left = shl i32 %in, 9
%right = lshr i32 %in, 23
%val5 = or i32 %left, %right
-; CHECK: extr {{w[0-9]+}}, w0, w0, #23
+; CHECK: ror {{w[0-9]+}}, w0, #23
ret i32 %val5
}
diff --git a/test/CodeGen/AArch64/f16-convert.ll b/test/CodeGen/AArch64/f16-convert.ll
new file mode 100644
index 000000000000..12412d45aa6e
--- /dev/null
+++ b/test/CodeGen/AArch64/f16-convert.ll
@@ -0,0 +1,251 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios -asm-verbose=false | FileCheck %s
+
+define float @load0(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load0:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %tmp = load i16* %a, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load1(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load1:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %tmp = load i16* %a, align 2
+ %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
+ ret double %conv
+}
+
+define float @load2(i16* nocapture readonly %a, i32 %i) nounwind {
+; CHECK-LABEL: load2:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, w1, sxtw #1]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load3(i16* nocapture readonly %a, i32 %i) nounwind {
+; CHECK-LABEL: load3:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, w1, sxtw #1]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ %tmp = load i16* %arrayidx, align 2
+ %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
+ ret double %conv
+}
+
+define float @load4(i16* nocapture readonly %a, i64 %i) nounwind {
+; CHECK-LABEL: load4:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load5(i16* nocapture readonly %a, i64 %i) nounwind {
+; CHECK-LABEL: load5:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, x1, lsl #1]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ %tmp = load i16* %arrayidx, align 2
+ %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
+ ret double %conv
+}
+
+define float @load6(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load6:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, #20]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 10
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load7(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load7:
+; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, #20]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 10
+ %tmp = load i16* %arrayidx, align 2
+ %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
+ ret double %conv
+}
+
+define float @load8(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load8:
+; CHECK-NEXT: ldur [[HREG:h[0-9]+]], [x0, #-20]
+; CHECK-NEXT: fcvt s0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ %tmp = load i16* %arrayidx, align 2
+ %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
+ ret float %tmp1
+}
+
+define double @load9(i16* nocapture readonly %a) nounwind {
+; CHECK-LABEL: load9:
+; CHECK-NEXT: ldur [[HREG:h[0-9]+]], [x0, #-20]
+; CHECK-NEXT: fcvt d0, [[HREG]]
+; CHECK-NEXT: ret
+
+ %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ %tmp = load i16* %arrayidx, align 2
+ %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
+ ret double %conv
+}
+
+define void @store0(i16* nocapture %a, float %val) nounwind {
+; CHECK-LABEL: store0:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: str h0, [x0]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
+ store i16 %tmp, i16* %a, align 2
+ ret void
+}
+
+define void @store1(i16* nocapture %a, double %val) nounwind {
+; CHECK-LABEL: store1:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: str h0, [x0]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
+ store i16 %tmp, i16* %a, align 2
+ ret void
+}
+
+define void @store2(i16* nocapture %a, i32 %i, float %val) nounwind {
+; CHECK-LABEL: store2:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: str h0, [x0, w1, sxtw #1]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store3(i16* nocapture %a, i32 %i, double %val) nounwind {
+; CHECK-LABEL: store3:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: str h0, [x0, w1, sxtw #1]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
+ %idxprom = sext i32 %i to i64
+ %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store4(i16* nocapture %a, i64 %i, float %val) nounwind {
+; CHECK-LABEL: store4:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: str h0, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
+ %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store5(i16* nocapture %a, i64 %i, double %val) nounwind {
+; CHECK-LABEL: store5:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: str h0, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
+ %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store6(i16* nocapture %a, float %val) nounwind {
+; CHECK-LABEL: store6:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: str h0, [x0, #20]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
+ %arrayidx = getelementptr inbounds i16* %a, i64 10
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store7(i16* nocapture %a, double %val) nounwind {
+; CHECK-LABEL: store7:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: str h0, [x0, #20]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
+ %arrayidx = getelementptr inbounds i16* %a, i64 10
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store8(i16* nocapture %a, float %val) nounwind {
+; CHECK-LABEL: store8:
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: stur h0, [x0, #-20]
+; CHECK-NEXT: ret
+
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
+ %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+define void @store9(i16* nocapture %a, double %val) nounwind {
+; CHECK-LABEL: store9:
+; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: stur h0, [x0, #-20]
+; CHECK-NEXT: ret
+
+ %conv = fptrunc double %val to float
+ %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
+ %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ store i16 %tmp, i16* %arrayidx, align 2
+ ret void
+}
+
+declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
+declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
+declare i16 @llvm.convert.to.fp16.f64(double) nounwind readnone
+declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
diff --git a/test/CodeGen/AArch64/fast-isel-mul.ll b/test/CodeGen/AArch64/fast-isel-mul.ll
new file mode 100644
index 000000000000..d02c67f52f8d
--- /dev/null
+++ b/test/CodeGen/AArch64/fast-isel-mul.ll
@@ -0,0 +1,40 @@
+; RUN: llc -fast-isel -fast-isel-abort -mtriple=aarch64 -o - %s | FileCheck %s
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
+
+define void @test_mul8(i8 %lhs, i8 %rhs) {
+; CHECK-LABEL: test_mul8:
+; CHECK: mul w0, w0, w1
+; %lhs = load i8* @var8
+; %rhs = load i8* @var8
+ %prod = mul i8 %lhs, %rhs
+ store i8 %prod, i8* @var8
+ ret void
+}
+
+define void @test_mul16(i16 %lhs, i16 %rhs) {
+; CHECK-LABEL: test_mul16:
+; CHECK: mul w0, w0, w1
+ %prod = mul i16 %lhs, %rhs
+ store i16 %prod, i16* @var16
+ ret void
+}
+
+define void @test_mul32(i32 %lhs, i32 %rhs) {
+; CHECK-LABEL: test_mul32:
+; CHECK: mul w0, w0, w1
+ %prod = mul i32 %lhs, %rhs
+ store i32 %prod, i32* @var32
+ ret void
+}
+
+define void @test_mul64(i64 %lhs, i64 %rhs) {
+; CHECK-LABEL: test_mul64:
+; CHECK: mul x0, x0, x1
+ %prod = mul i64 %lhs, %rhs
+ store i64 %prod, i64* @var64
+ ret void
+}
diff --git a/test/CodeGen/AArch64/fastcc-reserved.ll b/test/CodeGen/AArch64/fastcc-reserved.ll
index c6c050570dd6..a392619a768d 100644
--- a/test/CodeGen/AArch64/fastcc-reserved.ll
+++ b/test/CodeGen/AArch64/fastcc-reserved.ll
@@ -12,8 +12,8 @@ define fastcc void @foo(i32 %in) {
%addr = alloca i8, i32 %in
; Normal frame setup stuff:
-; CHECK: sub sp, sp,
-; CHECK: stp x29, x30
+; CHECK: stp x29, x30, [sp, #-16]!
+; CHECK: mov x29, sp
; Reserve space for call-frame:
; CHECK: sub sp, sp, #16
@@ -26,8 +26,8 @@ define fastcc void @foo(i32 %in) {
; CHECK-NOT: sub sp, sp, #16
; CHECK-NOT: add sp, sp,
-; CHECK: ldp x29, x30
-; CHECK: add sp, sp,
+; CHECK: mov sp, x29
+; CHECK: ldp x29, x30, [sp], #16
ret void
}
@@ -38,8 +38,8 @@ define void @foo1(i32 %in) {
%addr = alloca i8, i32 %in
; Normal frame setup again
-; CHECK: sub sp, sp,
-; CHECK: stp x29, x30
+; CHECK: stp x29, x30, [sp, #-16]!
+; CHECK: mov x29, sp
; Reserve space for call-frame
; CHECK: sub sp, sp, #16
@@ -52,7 +52,7 @@ define void @foo1(i32 %in) {
; Check for epilogue (primarily to make sure sp spotted above wasn't
; part of it).
-; CHECK: ldp x29, x30
-; CHECK: add sp, sp,
+; CHECK: mov sp, x29
+; CHECK: ldp x29, x30, [sp], #16
ret void
}
diff --git a/test/CodeGen/AArch64/fastcc.ll b/test/CodeGen/AArch64/fastcc.ll
index a4cd37858ee4..9917fcd044fd 100644
--- a/test/CodeGen/AArch64/fastcc.ll
+++ b/test/CodeGen/AArch64/fastcc.ll
@@ -6,10 +6,13 @@
define fastcc void @func_stack0() {
; CHECK-LABEL: func_stack0:
-; CHECK: sub sp, sp, #48
+; CHECK: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-TAIL-LABEL: func_stack0:
-; CHECK-TAIL: sub sp, sp, #48
+; CHECK-TAIL: stp x29, x30, [sp, #-16]!
+; CHECK-TAIL-NEXT: mov x29, sp
+; CHECK-TAIL-NEXT: sub sp, sp, #32
call fastcc void @func_stack8([8 x i32] undef, i32 42)
@@ -24,6 +27,7 @@ define fastcc void @func_stack0() {
; CHECK: bl func_stack32
; CHECK-NOT: sub sp, sp,
+
; CHECK-TAIL: bl func_stack32
; CHECK-TAIL: sub sp, sp, #32
@@ -32,30 +36,39 @@ define fastcc void @func_stack0() {
; CHECK: bl func_stack0
; CHECK-NOT: sub sp, sp
+
; CHECK-TAIL: bl func_stack0
; CHECK-TAIL-NOT: sub sp, sp
ret void
-; CHECK: add sp, sp, #48
+; CHECK: mov sp, x29
+; CHECK-NEXT: ldp x29, x30, [sp], #16
; CHECK-NEXT: ret
-; CHECK-TAIL: add sp, sp, #48
-; CHECK-TAIL-NEXT: ret
+; CHECK-TAIL: mov sp, x29
+; CHECK-TAIL-NEXT: ldp x29, x30, [sp], #16
+; CHECK-TAIL-NEXT: ret
}
define fastcc void @func_stack8([8 x i32], i32 %stacked) {
; CHECK-LABEL: func_stack8:
-; CHECK: sub sp, sp, #48
+; CHECK: stp x29, x30, [sp, #-16]!
+; CHECK: mov x29, sp
+; CHECK: sub sp, sp, #32
+
; CHECK-TAIL-LABEL: func_stack8:
-; CHECK-TAIL: sub sp, sp, #48
+; CHECK-TAIL: stp x29, x30, [sp, #-16]!
+; CHECK-TAIL: mov x29, sp
+; CHECK-TAIL: sub sp, sp, #32
call fastcc void @func_stack8([8 x i32] undef, i32 42)
; CHECK: bl func_stack8
; CHECK-NOT: sub sp, sp,
+
; CHECK-TAIL: bl func_stack8
; CHECK-TAIL: sub sp, sp, #16
@@ -64,6 +77,7 @@ define fastcc void @func_stack8([8 x i32], i32 %stacked) {
; CHECK: bl func_stack32
; CHECK-NOT: sub sp, sp,
+
; CHECK-TAIL: bl func_stack32
; CHECK-TAIL: sub sp, sp, #32
@@ -76,19 +90,22 @@ define fastcc void @func_stack8([8 x i32], i32 %stacked) {
; CHECK-TAIL-NOT: sub sp, sp
ret void
-; CHECK: add sp, sp, #48
+; CHECK: mov sp, x29
+; CHECK-NEXT: ldp x29, x30, [sp], #16
; CHECK-NEXT: ret
-; CHECK-TAIL: add sp, sp, #64
+
+; CHECK-TAIL: mov sp, x29
+; CHECK-TAIL-NEXT: ldp x29, x30, [sp], #16
; CHECK-TAIL-NEXT: ret
}
define fastcc void @func_stack32([8 x i32], i128 %stacked0, i128 %stacked1) {
; CHECK-LABEL: func_stack32:
-; CHECK: sub sp, sp, #48
+; CHECK: mov x29, sp
; CHECK-TAIL-LABEL: func_stack32:
-; CHECK-TAIL: sub sp, sp, #48
+; CHECK-TAIL: mov x29, sp
call fastcc void @func_stack8([8 x i32] undef, i32 42)
@@ -103,6 +120,7 @@ define fastcc void @func_stack32([8 x i32], i128 %stacked0, i128 %stacked1) {
; CHECK: bl func_stack32
; CHECK-NOT: sub sp, sp,
+
; CHECK-TAIL: bl func_stack32
; CHECK-TAIL: sub sp, sp, #32
@@ -111,13 +129,16 @@ define fastcc void @func_stack32([8 x i32], i128 %stacked0, i128 %stacked1) {
; CHECK: bl func_stack0
; CHECK-NOT: sub sp, sp
+
; CHECK-TAIL: bl func_stack0
; CHECK-TAIL-NOT: sub sp, sp
ret void
-; CHECK: add sp, sp, #48
+; CHECK: mov sp, x29
+; CHECK-NEXT: ldp x29, x30, [sp], #16
; CHECK-NEXT: ret
-; CHECK-TAIL: add sp, sp, #80
+; CHECK-TAIL: mov sp, x29
+; CHECK-TAIL-NEXT: ldp x29, x30, [sp], #16
; CHECK-TAIL-NEXT: ret
}
diff --git a/test/CodeGen/AArch64/fcmp.ll b/test/CodeGen/AArch64/fcmp.ll
index a9518eabb754..3c74508bb12b 100644
--- a/test/CodeGen/AArch64/fcmp.ll
+++ b/test/CodeGen/AArch64/fcmp.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
declare void @bar(i32)
diff --git a/test/CodeGen/AArch64/fcvt-fixed.ll b/test/CodeGen/AArch64/fcvt-fixed.ll
index 9d66da49437b..ccb3616b70bf 100644
--- a/test/CodeGen/AArch64/fcvt-fixed.ll
+++ b/test/CodeGen/AArch64/fcvt-fixed.ll
@@ -1,4 +1,8 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -O0 | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 -O0
+
+; (The O0 test is to make sure FastISel still constrains its operands properly
+; and the verifier doesn't trigger).
@var32 = global i32 0
@var64 = global i64 0
diff --git a/test/CodeGen/AArch64/fcvt-int.ll b/test/CodeGen/AArch64/fcvt-int.ll
index b28eb3ea1bef..d549c7e78421 100644
--- a/test/CodeGen/AArch64/fcvt-int.ll
+++ b/test/CodeGen/AArch64/fcvt-int.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
define i32 @test_floattoi32(float %in) {
; CHECK-LABEL: test_floattoi32:
@@ -69,7 +69,7 @@ define float @test_i32tofloat(i32 %in) {
; CHECK-DAG: scvtf [[SIG:s[0-9]+]], {{w[0-9]+}}
%res = fsub float %signed, %unsigned
-; CHECL: fsub {{s[0-9]+}}, [[SIG]], [[UNSIG]]
+; CHECK: fsub {{s[0-9]+}}, [[SIG]], [[UNSIG]]
ret float %res
; CHECK: ret
}
diff --git a/test/CodeGen/AArch64/flags-multiuse.ll b/test/CodeGen/AArch64/flags-multiuse.ll
index e99c72833997..77bbcddc4926 100644
--- a/test/CodeGen/AArch64/flags-multiuse.ll
+++ b/test/CodeGen/AArch64/flags-multiuse.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s
; LLVM should be able to cope with multiple uses of the same flag-setting
; instruction at different points of a routine. Either by rematerializing the
@@ -15,7 +15,7 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
%val = zext i1 %test to i32
-; CHECK: csinc {{[xw][0-9]+}}, {{xzr|wzr}}, {{xzr|wzr}}, eq
+; CHECK: cset {{[xw][0-9]+}}, ne
store i32 %val, i32* @var
diff --git a/test/CodeGen/AArch64/floatdp_1source.ll b/test/CodeGen/AArch64/floatdp_1source.ll
index 3d7f8f0369fc..8c02787a2340 100644
--- a/test/CodeGen/AArch64/floatdp_1source.ll
+++ b/test/CodeGen/AArch64/floatdp_1source.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
@varhalf = global half 0.0
@varfloat = global float 0.0
diff --git a/test/CodeGen/AArch64/floatdp_2source.ll b/test/CodeGen/AArch64/floatdp_2source.ll
index bb655285ac54..262271784ec6 100644
--- a/test/CodeGen/AArch64/floatdp_2source.ll
+++ b/test/CodeGen/AArch64/floatdp_2source.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu -mcpu=cyclone | FileCheck %s
@varfloat = global float 0.0
@vardouble = global double 0.0
diff --git a/test/CodeGen/AArch64/fp-cond-sel.ll b/test/CodeGen/AArch64/fp-cond-sel.ll
index 572f42e210b1..b4f4d77cd0bc 100644
--- a/test/CodeGen/AArch64/fp-cond-sel.ll
+++ b/test/CodeGen/AArch64/fp-cond-sel.ll
@@ -1,25 +1,34 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -mcpu=cyclone | FileCheck %s --check-prefix=CHECK
@varfloat = global float 0.0
@vardouble = global double 0.0
+declare void @use_float(float)
+declare void @use_double(double)
+
define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
; CHECK-LABEL: test_csel:
%tst1 = icmp ugt i32 %lhs32, %rhs32
%val1 = select i1 %tst1, float 0.0, float 1.0
store float %val1, float* @varfloat
-; CHECK: ldr [[FLT0:s[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
-; CHECK: fmov [[FLT1:s[0-9]+]], #1.0
-; CHECK: fcsel {{s[0-9]+}}, [[FLT0]], [[FLT1]], hi
+; CHECK: movi v[[FLT0:[0-9]+]].2d, #0
+; CHECK: fmov s[[FLT1:[0-9]+]], #1.0
+; CHECK: fcsel {{s[0-9]+}}, s[[FLT0]], s[[FLT1]], hi
%rhs64 = sext i32 %rhs32 to i64
%tst2 = icmp sle i64 %lhs64, %rhs64
%val2 = select i1 %tst2, double 1.0, double 0.0
store double %val2, double* @vardouble
-; CHECK: ldr [[FLT0:d[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
-; CHECK: fmov [[FLT1:d[0-9]+]], #1.0
-; CHECK: fcsel {{d[0-9]+}}, [[FLT1]], [[FLT0]], le
+; FLT0 is reused from above on ARM64.
+; CHECK: fmov d[[FLT1:[0-9]+]], #1.0
+; CHECK: fcsel {{d[0-9]+}}, d[[FLT1]], d[[FLT0]], le
+
+ call void @use_float(float 0.0)
+ call void @use_float(float 1.0)
+
+ call void @use_double(double 0.0)
+ call void @use_double(double 1.0)
ret void
; CHECK: ret
diff --git a/test/CodeGen/AArch64/fp-dp3.ll b/test/CodeGen/AArch64/fp-dp3.ll
index 590557f1e8ed..10f88fdbbe96 100644
--- a/test/CodeGen/AArch64/fp-dp3.ll
+++ b/test/CodeGen/AArch64/fp-dp3.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -fp-contract=fast | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s -check-prefix=CHECK-NOFAST
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -fp-contract=fast | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s -check-prefix=CHECK-NOFAST
declare float @llvm.fma.f32(float, float, float)
declare double @llvm.fma.f64(double, double, double)
@@ -26,8 +26,9 @@ define float @test_fmsub(float %a, float %b, float %c) {
define float @test_fnmadd(float %a, float %b, float %c) {
; CHECK-LABEL: test_fnmadd:
; CHECK-NOFAST-LABEL: test_fnmadd:
+ %nega = fsub float -0.0, %a
%negc = fsub float -0.0, %c
- %val = call float @llvm.fma.f32(float %a, float %b, float %negc)
+ %val = call float @llvm.fma.f32(float %nega, float %b, float %negc)
; CHECK: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
; CHECK-NOFAST: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %val
@@ -36,9 +37,8 @@ define float @test_fnmadd(float %a, float %b, float %c) {
define float @test_fnmsub(float %a, float %b, float %c) {
; CHECK-LABEL: test_fnmsub:
; CHECK-NOFAST-LABEL: test_fnmsub:
- %nega = fsub float -0.0, %a
%negc = fsub float -0.0, %c
- %val = call float @llvm.fma.f32(float %nega, float %b, float %negc)
+ %val = call float @llvm.fma.f32(float %a, float %b, float %negc)
; CHECK: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
; CHECK-NOFAST: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %val
@@ -66,8 +66,9 @@ define double @testd_fmsub(double %a, double %b, double %c) {
define double @testd_fnmadd(double %a, double %b, double %c) {
; CHECK-LABEL: testd_fnmadd:
; CHECK-NOFAST-LABEL: testd_fnmadd:
+ %nega = fsub double -0.0, %a
%negc = fsub double -0.0, %c
- %val = call double @llvm.fma.f64(double %a, double %b, double %negc)
+ %val = call double @llvm.fma.f64(double %nega, double %b, double %negc)
; CHECK: fnmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
; CHECK-NOFAST: fnmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
ret double %val
@@ -76,9 +77,8 @@ define double @testd_fnmadd(double %a, double %b, double %c) {
define double @testd_fnmsub(double %a, double %b, double %c) {
; CHECK-LABEL: testd_fnmsub:
; CHECK-NOFAST-LABEL: testd_fnmsub:
- %nega = fsub double -0.0, %a
%negc = fsub double -0.0, %c
- %val = call double @llvm.fma.f64(double %nega, double %b, double %negc)
+ %val = call double @llvm.fma.f64(double %a, double %b, double %negc)
; CHECK: fnmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
; CHECK-NOFAST: fnmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
ret double %val
@@ -113,12 +113,13 @@ define float @test_fnmadd_unfused(float %a, float %b, float %c) {
; CHECK-NOFAST-LABEL: test_fnmadd_unfused:
%nega = fsub float -0.0, %a
%prod = fmul float %b, %c
- %sum = fadd float %nega, %prod
+ %diff = fsub float %nega, %prod
; CHECK: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
; CHECK-NOFAST-NOT: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
; CHECK-NOFAST: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
; CHECK-NOFAST: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
- ret float %sum
+; CHECK-NOFAST: ret
+ ret float %diff
}
define float @test_fnmsub_unfused(float %a, float %b, float %c) {
@@ -126,12 +127,37 @@ define float @test_fnmsub_unfused(float %a, float %b, float %c) {
; CHECK-NOFAST-LABEL: test_fnmsub_unfused:
%nega = fsub float -0.0, %a
%prod = fmul float %b, %c
- %diff = fsub float %nega, %prod
+ %sum = fadd float %nega, %prod
; CHECK: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
; CHECK-NOFAST-NOT: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
-; CHECK-NOFAST-DAG: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
-; CHECK-NOFAST-DAG: fneg {{s[0-9]+}}, {{s[0-9]+}}
-; CHECK-NOFAST-DAG: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
-; CHECK-NOFAST: ret
- ret float %diff
+; CHECK-NOFAST: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %sum
}
+
+; Another set of tests that check for multiply single use
+
+define float @test_fmadd_unfused_su(float %a, float %b, float %c) {
+; CHECK-LABEL: test_fmadd_unfused_su:
+ %prod = fmul float %b, %c
+ %sum = fadd float %a, %prod
+ %res = fadd float %sum, %prod
+; CHECK-NOT: fmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %res
+}
+
+define float @test_fmsub_unfused_su(float %a, float %b, float %c) {
+; CHECK-LABEL: test_fmsub_unfused_su:
+ %prod = fmul float %b, %c
+ %diff = fsub float %a, %prod
+ %res = fsub float %diff, %prod
+; CHECK-NOT: fmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ ret float %res
+}
+
diff --git a/test/CodeGen/AArch64/fp128-folding.ll b/test/CodeGen/AArch64/fp128-folding.ll
index b1c560d2b648..892b19c5cf33 100644
--- a/test/CodeGen/AArch64/fp128-folding.ll
+++ b/test/CodeGen/AArch64/fp128-folding.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
declare void @bar(i8*, i8*, i32*)
; SelectionDAG used to try to fold some fp128 operations using the ppc128 type,
@@ -12,6 +12,6 @@ define fp128 @test_folding() {
%fpval = sitofp i32 %val to fp128
; If the value is loaded from a constant pool into an fp128, it's been folded
; successfully.
-; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.LCPI
ret fp128 %fpval
}
diff --git a/test/CodeGen/AArch64/fp128.ll b/test/CodeGen/AArch64/fp128.ll
deleted file mode 100644
index c312bb1917ab..000000000000
--- a/test/CodeGen/AArch64/fp128.ll
+++ /dev/null
@@ -1,279 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-
-@lhs = global fp128 zeroinitializer
-@rhs = global fp128 zeroinitializer
-
-define fp128 @test_add() {
-; CHECK-LABEL: test_add:
-
- %lhs = load fp128* @lhs
- %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
-
- %val = fadd fp128 %lhs, %rhs
-; CHECK: bl __addtf3
- ret fp128 %val
-}
-
-define fp128 @test_sub() {
-; CHECK-LABEL: test_sub:
-
- %lhs = load fp128* @lhs
- %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
-
- %val = fsub fp128 %lhs, %rhs
-; CHECK: bl __subtf3
- ret fp128 %val
-}
-
-define fp128 @test_mul() {
-; CHECK-LABEL: test_mul:
-
- %lhs = load fp128* @lhs
- %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
-
- %val = fmul fp128 %lhs, %rhs
-; CHECK: bl __multf3
- ret fp128 %val
-}
-
-define fp128 @test_div() {
-; CHECK-LABEL: test_div:
-
- %lhs = load fp128* @lhs
- %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
-
- %val = fdiv fp128 %lhs, %rhs
-; CHECK: bl __divtf3
- ret fp128 %val
-}
-
-@var32 = global i32 0
-@var64 = global i64 0
-
-define void @test_fptosi() {
-; CHECK-LABEL: test_fptosi:
- %val = load fp128* @lhs
-
- %val32 = fptosi fp128 %val to i32
- store i32 %val32, i32* @var32
-; CHECK: bl __fixtfsi
-
- %val64 = fptosi fp128 %val to i64
- store i64 %val64, i64* @var64
-; CHECK: bl __fixtfdi
-
- ret void
-}
-
-define void @test_fptoui() {
-; CHECK-LABEL: test_fptoui:
- %val = load fp128* @lhs
-
- %val32 = fptoui fp128 %val to i32
- store i32 %val32, i32* @var32
-; CHECK: bl __fixunstfsi
-
- %val64 = fptoui fp128 %val to i64
- store i64 %val64, i64* @var64
-; CHECK: bl __fixunstfdi
-
- ret void
-}
-
-define void @test_sitofp() {
-; CHECK-LABEL: test_sitofp:
-
- %src32 = load i32* @var32
- %val32 = sitofp i32 %src32 to fp128
- store volatile fp128 %val32, fp128* @lhs
-; CHECK: bl __floatsitf
-
- %src64 = load i64* @var64
- %val64 = sitofp i64 %src64 to fp128
- store volatile fp128 %val64, fp128* @lhs
-; CHECK: bl __floatditf
-
- ret void
-}
-
-define void @test_uitofp() {
-; CHECK-LABEL: test_uitofp:
-
- %src32 = load i32* @var32
- %val32 = uitofp i32 %src32 to fp128
- store volatile fp128 %val32, fp128* @lhs
-; CHECK: bl __floatunsitf
-
- %src64 = load i64* @var64
- %val64 = uitofp i64 %src64 to fp128
- store volatile fp128 %val64, fp128* @lhs
-; CHECK: bl __floatunditf
-
- ret void
-}
-
-define i1 @test_setcc1() {
-; CHECK-LABEL: test_setcc1:
-
- %lhs = load fp128* @lhs
- %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
-
-; Technically, everything after the call to __letf2 is redundant, but we'll let
-; LLVM have its fun for now.
- %val = fcmp ole fp128 %lhs, %rhs
-; CHECK: bl __letf2
-; CHECK: cmp w0, #0
-; CHECK: csinc w0, wzr, wzr, gt
-
- ret i1 %val
-; CHECK: ret
-}
-
-define i1 @test_setcc2() {
-; CHECK-LABEL: test_setcc2:
-
- %lhs = load fp128* @lhs
- %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
-
-; Technically, everything after the call to __letf2 is redundant, but we'll let
-; LLVM have its fun for now.
- %val = fcmp ugt fp128 %lhs, %rhs
-; CHECK: bl __gttf2
-; CHECK: cmp w0, #0
-; CHECK: csinc [[GT:w[0-9]+]], wzr, wzr, le
-
-; CHECK: bl __unordtf2
-; CHECK: cmp w0, #0
-; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
-
-; CHECK: orr w0, [[UNORDERED]], [[GT]]
-
- ret i1 %val
-; CHECK: ret
-}
-
-define i32 @test_br_cc() {
-; CHECK-LABEL: test_br_cc:
-
- %lhs = load fp128* @lhs
- %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
-
- ; olt == !uge, which LLVM unfortunately "optimizes" this to.
- %cond = fcmp olt fp128 %lhs, %rhs
-; CHECK: bl __getf2
-; CHECK: cmp w0, #0
-; CHECK: csinc [[OGE:w[0-9]+]], wzr, wzr, lt
-
-; CHECK: bl __unordtf2
-; CHECK: cmp w0, #0
-; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
-
-; CHECK: orr [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]]
-; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]]
- br i1 %cond, label %iftrue, label %iffalse
-
-iftrue:
- ret i32 42
-; CHECK-NEXT: BB#
-; CHECK-NEXT: movz x0, #42
-; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]]
-
-iffalse:
- ret i32 29
-; CHECK: [[RET29]]:
-; CHECK-NEXT: movz x0, #29
-; CHECK-NEXT: [[REALRET]]:
-; CHECK: ret
-}
-
-define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
-; CHECK-LABEL: test_select:
-
- %val = select i1 %cond, fp128 %lhs, fp128 %rhs
- store fp128 %val, fp128* @lhs
-; CHECK: cmp w0, #0
-; CHECK: str q1, [sp]
-; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: BB#
-; CHECK-NEXT: str q0, [sp]
-; CHECK-NEXT: [[IFFALSE]]:
-; CHECK-NEXT: ldr q0, [sp]
-; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
- ret void
-; CHECK: ret
-}
-
-@varfloat = global float 0.0
-@vardouble = global double 0.0
-
-define void @test_round() {
-; CHECK-LABEL: test_round:
-
- %val = load fp128* @lhs
-
- %float = fptrunc fp128 %val to float
- store float %float, float* @varfloat
-; CHECK: bl __trunctfsf2
-; CHECK: str s0, [{{x[0-9]+}}, #:lo12:varfloat]
-
- %double = fptrunc fp128 %val to double
- store double %double, double* @vardouble
-; CHECK: bl __trunctfdf2
-; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble]
-
- ret void
-}
-
-define void @test_extend() {
-; CHECK-LABEL: test_extend:
-
- %val = load fp128* @lhs
-
- %float = load float* @varfloat
- %fromfloat = fpext float %float to fp128
- store volatile fp128 %fromfloat, fp128* @lhs
-; CHECK: bl __extendsftf2
-; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
-
- %double = load double* @vardouble
- %fromdouble = fpext double %double to fp128
- store volatile fp128 %fromdouble, fp128* @lhs
-; CHECK: bl __extenddftf2
-; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
-
- ret void
-; CHECK: ret
-}
-
-define fp128 @test_neg(fp128 %in) {
-; CHECK: [[MINUS0:.LCPI[0-9]+_0]]:
-; Make sure the weird hex constant below *is* -0.0
-; CHECK-NEXT: fp128 -0
-
-; CHECK-LABEL: test_neg:
-
- ; Could in principle be optimized to fneg which we can't select, this makes
- ; sure that doesn't happen.
- %ret = fsub fp128 0xL00000000000000008000000000000000, %in
-; CHECK: str q0, [sp, #-16]
-; CHECK-NEXT: ldr q1, [sp], #16
-; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:[[MINUS0]]]
-; CHECK: bl __subtf3
-
- ret fp128 %ret
-; CHECK: ret
-}
diff --git a/test/CodeGen/AArch64/fpimm.ll b/test/CodeGen/AArch64/fpimm.ll
index b8f716959449..e59520c4dc95 100644
--- a/test/CodeGen/AArch64/fpimm.ll
+++ b/test/CodeGen/AArch64/fpimm.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
@varf32 = global float 0.0
@varf64 = global double 0.0
@@ -13,7 +13,7 @@ define void @check_float() {
%newval2 = fadd float %val, 128.0
store volatile float %newval2, float* @varf32
-; CHECK-DAG: ldr [[HARD:s[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI0_0
+; CHECK-DAG: ldr [[HARD:s[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:.LCPI0_0
; CHECK: ret
ret void
@@ -29,7 +29,7 @@ define void @check_double() {
%newval2 = fadd double %val, 128.0
store volatile double %newval2, double* @varf64
-; CHECK-DAG: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI1_0
+; CHECK-DAG: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.LCPI1_0
; CHECK: ret
ret void
diff --git a/test/CodeGen/AArch64/frameaddr.ll b/test/CodeGen/AArch64/frameaddr.ll
index 182704bd6541..85d95e21c9b7 100644
--- a/test/CodeGen/AArch64/frameaddr.ll
+++ b/test/CodeGen/AArch64/frameaddr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
define i8* @t() nounwind {
entry:
@@ -12,7 +12,7 @@ define i8* @t2() nounwind {
entry:
; CHECK-LABEL: t2:
; CHECK: ldr x[[reg:[0-9]+]], [x29]
-; CHECK: ldr x[[reg]], [x[[reg]]]
+; CHECK: ldr {{x[0-9]+}}, [x[[reg]]]
%0 = call i8* @llvm.frameaddress(i32 2)
ret i8* %0
}
diff --git a/test/CodeGen/AArch64/free-zext.ll b/test/CodeGen/AArch64/free-zext.ll
new file mode 100644
index 000000000000..d69105eec381
--- /dev/null
+++ b/test/CodeGen/AArch64/free-zext.ll
@@ -0,0 +1,14 @@
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+
+define i64 @test_free_zext(i8* %a, i16* %b) {
+; CHECK-LABEL: test_free_zext
+; CHECK-DAG: ldrb w[[A:[0-9]+]], [x0]
+; CHECK: ldrh w[[B:[0-9]+]], [x1]
+; CHECK: add x0, x[[B]], x[[A]]
+ %1 = load i8* %a, align 1
+ %conv = zext i8 %1 to i64
+ %2 = load i16* %b, align 2
+ %conv1 = zext i16 %2 to i64
+ %add = add nsw i64 %conv1, %conv
+ ret i64 %add
+}
diff --git a/test/CodeGen/AArch64/func-argpassing.ll b/test/CodeGen/AArch64/func-argpassing.ll
index 430d77f9e932..abb732ccf43a 100644
--- a/test/CodeGen/AArch64/func-argpassing.ll
+++ b/test/CodeGen/AArch64/func-argpassing.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefix=CHECK %s
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
%myStruct = type { i64 , i8, i32 }
@@ -16,7 +16,7 @@ define void @take_i8s(i8 %val1, i8 %val2) {
store i8 %val2, i8* @var8
; Not using w1 may be technically allowed, but it would indicate a
; problem in itself.
-; CHECK: strb w1, [{{x[0-9]+}}, #:lo12:var8]
+; CHECK: strb w1, [{{x[0-9]+}}, {{#?}}:lo12:var8]
ret void
}
@@ -26,7 +26,7 @@ define void @add_floats(float %val1, float %val2) {
; CHECK: fadd [[ADDRES:s[0-9]+]], s0, s1
; CHECK-NOFP-NOT: fadd
store float %newval, float* @varfloat
-; CHECK: str [[ADDRES]], [{{x[0-9]+}}, #:lo12:varfloat]
+; CHECK: str [[ADDRES]], [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
ret void
}
@@ -41,12 +41,12 @@ define void @take_struct(%myStruct* byval %structval) {
; Some weird move means x0 is used for one access
; CHECK: ldr [[REG32:w[0-9]+]], [{{x[0-9]+|sp}}, #12]
store volatile i32 %val0, i32* @var32
-; CHECK: str [[REG32]], [{{x[0-9]+}}, #:lo12:var32]
+; CHECK: str [[REG32]], [{{x[0-9]+}}, {{#?}}:lo12:var32]
%val1 = load volatile i64* %addr1
; CHECK: ldr [[REG64:x[0-9]+]], [{{x[0-9]+|sp}}]
store volatile i64 %val1, i64* @var64
-; CHECK: str [[REG64]], [{{x[0-9]+}}, #:lo12:var64]
+; CHECK: str [[REG64]], [{{x[0-9]+}}, {{#?}}:lo12:var64]
ret void
}
@@ -60,15 +60,14 @@ define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %st
%val0 = load volatile i32* %addr0
; Some weird move means x0 is used for one access
-; CHECK: add x[[STRUCTVAL_ADDR:[0-9]+]], sp, #16
-; CHECK: ldr [[REG32:w[0-9]+]], [x[[STRUCTVAL_ADDR]], #12]
+; CHECK: ldr [[REG32:w[0-9]+]], [sp, #28]
store i32 %val0, i32* @var32
-; CHECK: str [[REG32]], [{{x[0-9]+}}, #:lo12:var32]
+; CHECK: str [[REG32]], [{{x[0-9]+}}, {{#?}}:lo12:var32]
%val1 = load volatile i64* %addr1
; CHECK: ldr [[REG64:x[0-9]+]], [sp, #16]
store i64 %val1, i64* @var64
-; CHECK: str [[REG64]], [{{x[0-9]+}}, #:lo12:var64]
+; CHECK: str [[REG64]], [{{x[0-9]+}}, {{#?}}:lo12:var64]
ret void
}
@@ -77,7 +76,7 @@ define i32 @return_int() {
; CHECK-LABEL: return_int:
%val = load i32* @var32
ret i32 %val
-; CHECK: ldr w0, [{{x[0-9]+}}, #:lo12:var32]
+; CHECK: ldr w0, [{{x[0-9]+}}, {{#?}}:lo12:var32]
; Make sure epilogue follows
; CHECK-NEXT: ret
}
@@ -85,7 +84,7 @@ define i32 @return_int() {
define double @return_double() {
; CHECK-LABEL: return_double:
ret double 3.14
-; CHECK: ldr d0, [{{x[0-9]+}}, #:lo12:.LCPI
+; CHECK: ldr d0, [{{x[0-9]+}}, {{#?}}:lo12:.LCPI
; CHECK-NOFP-NOT: ldr d0,
}
@@ -97,10 +96,10 @@ define [2 x i64] @return_struct() {
%addr = bitcast %myStruct* @varstruct to [2 x i64]*
%val = load [2 x i64]* %addr
ret [2 x i64] %val
-; CHECK: ldr x0, [{{x[0-9]+}}, #:lo12:varstruct]
+; CHECK-DAG: ldr x0, [{{x[0-9]+}}, {{#?}}:lo12:varstruct]
; Odd register regex below disallows x0 which we want to be live now.
-; CHECK: add {{x[1-9][0-9]*}}, {{x[1-9][0-9]*}}, #:lo12:varstruct
-; CHECK-NEXT: ldr x1, [{{x[1-9][0-9]*}}, #8]
+; CHECK-DAG: add {{x[1-9][0-9]*}}, {{x[1-9][0-9]*}}, {{#?}}:lo12:varstruct
+; CHECK: ldr x1, [{{x[1-9][0-9]*}}, #8]
; Make sure epilogue immediately follows
; CHECK-NEXT: ret
}
@@ -137,16 +136,16 @@ define i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var
store volatile i64 %val64, i64* @var64
; Currently nothing on local stack, so struct should be at sp
; CHECK: ldr [[VAL64:x[0-9]+]], [sp]
-; CHECK: str [[VAL64]], [{{x[0-9]+}}, #:lo12:var64]
+; CHECK: str [[VAL64]], [{{x[0-9]+}}, {{#?}}:lo12:var64]
store volatile double %notstacked, double* @vardouble
; CHECK-NOT: ldr d0
-; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble
+; CHECK: str d0, [{{x[0-9]+}}, {{#?}}:lo12:vardouble
; CHECK-NOFP-NOT: str d0,
%retval = load volatile i32* %stacked
ret i32 %retval
-; CHECK: ldr w0, [sp, #16]
+; CHECK-LE: ldr w0, [sp, #16]
}
define void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
@@ -156,34 +155,36 @@ define void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
store float %var8, float* @varfloat
; Beware as above: the offset would be different on big-endian
; machines if the first ldr were changed to use s-registers.
-; CHECK: ldr d[[VALFLOAT:[0-9]+]], [sp]
-; CHECK: str s[[VALFLOAT]], [{{x[0-9]+}}, #:lo12:varfloat]
+; CHECK: ldr {{[ds]}}[[VALFLOAT:[0-9]+]], [sp]
+; CHECK: str s[[VALFLOAT]], [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
ret void
}
; 128-bit integer types should be passed in xEVEN, xODD rather than
; the reverse. In this case x2 and x3. Nothing should use x1.
-define i32 @check_i128_regalign(i32 %val0, i128 %val1, i32 %val2) {
-; CHECK: check_i128_regalign
+define i64 @check_i128_regalign(i32 %val0, i128 %val1, i64 %val2) {
+; CHECK-LABEL: check_i128_regalign
store i128 %val1, i128* @var128
-; CHECK: str x2, [{{x[0-9]+}}, #:lo12:var128]
-; CHECK: str x3, [{{x[0-9]+}}, #8]
+; CHECK-DAG: str x2, [{{x[0-9]+}}, {{#?}}:lo12:var128]
+; CHECK-DAG: str x3, [{{x[0-9]+}}, #8]
- ret i32 %val2
+ ret i64 %val2
; CHECK: mov x0, x4
}
define void @check_i128_stackalign(i32 %val0, i32 %val1, i32 %val2, i32 %val3,
i32 %val4, i32 %val5, i32 %val6, i32 %val7,
i32 %stack1, i128 %stack2) {
-; CHECK: check_i128_stackalign
+; CHECK-LABEL: check_i128_stackalign
store i128 %stack2, i128* @var128
; Nothing local on stack in current codegen, so first stack is 16 away
-; CHECK: add x[[REG:[0-9]+]], sp, #16
-; CHECK: ldr {{x[0-9]+}}, [x[[REG]], #8]
+; CHECK-LE: add x[[REG:[0-9]+]], sp, #16
+; CHECK-LE: ldr {{x[0-9]+}}, [x[[REG]], #8]
+
; Important point is that we address sp+24 for second dword
-; CHECK: ldr {{x[0-9]+}}, [sp, #16]
+
+; CHECK: ldp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
ret void
}
@@ -195,3 +196,13 @@ define i32 @test_extern() {
; CHECK: bl memcpy
ret i32 0
}
+
+
+; A sub-i32 stack argument must be loaded on big endian with ldr{h,b}, not just
+; implicitly extended to a 32-bit load.
+define i16 @stacked_i16(i32 %val0, i32 %val1, i32 %val2, i32 %val3,
+ i32 %val4, i32 %val5, i32 %val6, i32 %val7,
+ i16 %stack1) {
+; CHECK-LABEL: stacked_i16
+ ret i16 %stack1
+}
diff --git a/test/CodeGen/AArch64/func-calls.ll b/test/CodeGen/AArch64/func-calls.ll
index ac188bb3bb57..422c5765ec48 100644
--- a/test/CodeGen/AArch64/func-calls.ll
+++ b/test/CodeGen/AArch64/func-calls.ll
@@ -1,5 +1,7 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-neon | FileCheck --check-prefix=CHECK-NONEON %s
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64_be-none-linux-gnu | FileCheck --check-prefix=CHECK-BE %s
%myStruct = type { i64 , i8, i32 }
@@ -22,15 +24,15 @@ define void @simple_args() {
%char1 = load i8* @var8
%char2 = load i8* @var8_2
call void @take_i8s(i8 %char1, i8 %char2)
-; CHECK-DAG: ldrb w0, [{{x[0-9]+}}, #:lo12:var8]
-; CHECK-DAG: ldrb w1, [{{x[0-9]+}}, #:lo12:var8_2]
+; CHECK-DAG: ldrb w0, [{{x[0-9]+}}, {{#?}}:lo12:var8]
+; CHECK-DAG: ldrb w1, [{{x[0-9]+}}, {{#?}}:lo12:var8_2]
; CHECK: bl take_i8s
%float1 = load float* @varfloat
%float2 = load float* @varfloat_2
call void @take_floats(float %float1, float %float2)
-; CHECK-DAG: ldr s1, [{{x[0-9]+}}, #:lo12:varfloat_2]
-; CHECK-DAG: ldr s0, [{{x[0-9]+}}, #:lo12:varfloat]
+; CHECK-DAG: ldr s1, [{{x[0-9]+}}, {{#?}}:lo12:varfloat_2]
+; CHECK-DAG: ldr s0, [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
; CHECK: bl take_floats
; CHECK-NOFP-NOT: ldr s1,
; CHECK-NOFP-NOT: ldr s0,
@@ -49,22 +51,22 @@ define void @simple_rets() {
%int = call i32 @return_int()
store i32 %int, i32* @var32
; CHECK: bl return_int
-; CHECK: str w0, [{{x[0-9]+}}, #:lo12:var32]
+; CHECK: str w0, [{{x[0-9]+}}, {{#?}}:lo12:var32]
%dbl = call double @return_double()
store double %dbl, double* @vardouble
; CHECK: bl return_double
-; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble]
+; CHECK: str d0, [{{x[0-9]+}}, {{#?}}:lo12:vardouble]
; CHECK-NOFP-NOT: str d0,
%arr = call [2 x i64] @return_smallstruct()
store [2 x i64] %arr, [2 x i64]* @varsmallstruct
; CHECK: bl return_smallstruct
; CHECK: str x1, [{{x[0-9]+}}, #8]
-; CHECK: str x0, [{{x[0-9]+}}, #:lo12:varsmallstruct]
+; CHECK: str x0, [{{x[0-9]+}}, {{#?}}:lo12:varsmallstruct]
call void @return_large_struct(%myStruct* sret @varstruct)
-; CHECK: add x8, {{x[0-9]+}}, #:lo12:varstruct
+; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
; CHECK: bl return_large_struct
ret void
@@ -86,19 +88,28 @@ define void @check_stack_args() {
; Want to check that the final double is passed in registers and
; that varstruct is passed on the stack. Rather dependent on how a
; memcpy gets created, but the following works for now.
-; CHECK: mov x[[SPREG:[0-9]+]], sp
-; CHECK-DAG: str {{w[0-9]+}}, [x[[SPREG]]]
-; CHECK-DAG: str {{w[0-9]+}}, [x[[SPREG]], #12]
-; CHECK-DAG: fmov d0,
+
+; CHECK-DAG: str {{q[0-9]+}}, [sp]
+; CHECK-DAG: fmov d[[FINAL_DOUBLE:[0-9]+]], #1.0
+; CHECK: mov v0.16b, v[[FINAL_DOUBLE]].16b
+
+; CHECK-NONEON-DAG: str {{q[0-9]+}}, [sp]
+; CHECK-NONEON-DAG: fmov d[[FINAL_DOUBLE:[0-9]+]], #1.0
+; CHECK-NONEON: fmov d0, d[[FINAL_DOUBLE]]
+
; CHECK: bl struct_on_stack
; CHECK-NOFP-NOT: fmov
call void @stacked_fpu(float -1.0, double 1.0, float 4.0, float 2.0,
float -2.0, float -8.0, float 16.0, float 1.0,
float 64.0)
-; CHECK: ldr s[[STACKEDREG:[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
-; CHECK: mov x0, sp
-; CHECK: str d[[STACKEDREG]], [x0]
+
+; CHECK: movz [[SIXTY_FOUR:w[0-9]+]], #0x4280, lsl #16
+; CHECK: str [[SIXTY_FOUR]], [sp]
+
+; CHECK-NONEON: movz [[SIXTY_FOUR:w[0-9]+]], #0x4280, lsl #16
+; CHECK-NONEON: str [[SIXTY_FOUR]], [sp]
+
; CHECK: bl stacked_fpu
ret void
}
@@ -117,17 +128,21 @@ define void @check_i128_align() {
call void @check_i128_stackalign(i32 0, i32 1, i32 2, i32 3,
i32 4, i32 5, i32 6, i32 7,
i32 42, i128 %val)
-; CHECK: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, #:lo12:var128]
+; CHECK: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:var128]
; CHECK: ldr [[I128HI:x[0-9]+]], [{{x[0-9]+}}, #8]
-; CHECK: mov x[[SPREG:[0-9]+]], sp
-; CHECK: str [[I128HI]], [x[[SPREG]], #24]
-; CHECK: str [[I128LO]], [x[[SPREG]], #16]
+; CHECK: stp [[I128LO]], [[I128HI]], [sp, #16]
+
+; CHECK-NONEON: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128]
+; CHECK-NONEON: ldr [[I128HI:x[0-9]+]], [{{x[0-9]+}}, #8]
+; CHECK-NONEON: stp [[I128LO]], [[I128HI]], [sp, #16]
; CHECK: bl check_i128_stackalign
call void @check_i128_regalign(i32 0, i128 42)
; CHECK-NOT: mov x1
-; CHECK: movz x2, #42
-; CHECK: mov x3, xzr
+; CHECK-LE: movz x2, #{{0x2a|42}}
+; CHECK-LE: mov x3, xzr
+; CHECK-BE: movz {{x|w}}3, #{{0x2a|42}}
+; CHECK-BE: mov x2, xzr
; CHECK: bl check_i128_regalign
ret void
@@ -139,7 +154,7 @@ define void @check_indirect_call() {
; CHECK-LABEL: check_indirect_call:
%func = load void()** @fptr
call void %func()
-; CHECK: ldr [[FPTR:x[0-9]+]], [{{x[0-9]+}}, #:lo12:fptr]
+; CHECK: ldr [[FPTR:x[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:fptr]
; CHECK: blr [[FPTR]]
ret void
diff --git a/test/CodeGen/AArch64/funcptr_cast.ll b/test/CodeGen/AArch64/funcptr_cast.ll
new file mode 100644
index 000000000000..a00b7bcaf6a2
--- /dev/null
+++ b/test/CodeGen/AArch64/funcptr_cast.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+
+define i8 @test() {
+; CHECK-LABEL: @test
+; CHECK: adrp {{x[0-9]+}}, foo
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, :lo12:foo
+; CHECK: ldrb w0, [{{x[0-9]+}}]
+entry:
+ %0 = load i8* bitcast (void (...)* @foo to i8*), align 1
+ ret i8 %0
+}
+
+declare void @foo(...)
diff --git a/test/CodeGen/AArch64/global-alignment.ll b/test/CodeGen/AArch64/global-alignment.ll
index 56e5cba519c1..451b9d6741ee 100644
--- a/test/CodeGen/AArch64/global-alignment.ll
+++ b/test/CodeGen/AArch64/global-alignment.ll
@@ -1,8 +1,9 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
@var32 = global [3 x i32] zeroinitializer
@var64 = global [3 x i64] zeroinitializer
@var32_align64 = global [3 x i32] zeroinitializer, align 8
+@alias = alias [3 x i32]* @var32_align64
define i64 @test_align32() {
; CHECK-LABEL: test_align32:
@@ -12,7 +13,7 @@ define i64 @test_align32() {
; emit an "LDR x0, [x0, #:lo12:var32] instruction to implement this load.
%val = load i64* %addr
; CHECK: adrp [[HIBITS:x[0-9]+]], var32
-; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], #:lo12:var32
+; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], {{#?}}:lo12:var32
; CHECK: ldr x0, [x[[ADDR]]]
ret i64 %val
@@ -27,7 +28,7 @@ define i64 @test_align64() {
%val = load i64* %addr
; CHECK: adrp x[[HIBITS:[0-9]+]], var64
; CHECK-NOT: add x[[HIBITS]]
-; CHECK: ldr x0, [x[[HIBITS]], #:lo12:var64]
+; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:var64]
ret i64 %val
}
@@ -41,7 +42,20 @@ define i64 @test_var32_align64() {
%val = load i64* %addr
; CHECK: adrp x[[HIBITS:[0-9]+]], var32_align64
; CHECK-NOT: add x[[HIBITS]]
-; CHECK: ldr x0, [x[[HIBITS]], #:lo12:var32_align64]
+; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:var32_align64]
+
+ ret i64 %val
+}
+
+define i64 @test_var32_alias() {
+; CHECK-LABEL: test_var32_alias:
+ %addr = bitcast [3 x i32]* @alias to i64*
+
+ ; Test that we can find the alignment for aliases.
+ %val = load i64* %addr
+; CHECK: adrp x[[HIBITS:[0-9]+]], alias
+; CHECK-NOT: add x[[HIBITS]]
+; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:alias]
ret i64 %val
}
@@ -56,7 +70,7 @@ define i64 @test_yet_another_var() {
; so we can't fold the load.
%val = load i64* bitcast({i32, i32}* @yet_another_var to i64*)
; CHECK: adrp [[HIBITS:x[0-9]+]], yet_another_var
-; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], #:lo12:yet_another_var
+; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], {{#?}}:lo12:yet_another_var
; CHECK: ldr x0, [x[[ADDR]]]
ret i64 %val
}
@@ -65,5 +79,5 @@ define i64()* @test_functions() {
; CHECK-LABEL: test_functions:
ret i64()* @test_yet_another_var
; CHECK: adrp [[HIBITS:x[0-9]+]], test_yet_another_var
-; CHECK: add x0, [[HIBITS]], #:lo12:test_yet_another_var
+; CHECK: add x0, [[HIBITS]], {{#?}}:lo12:test_yet_another_var
}
diff --git a/test/CodeGen/AArch64/global-merge-1.ll b/test/CodeGen/AArch64/global-merge-1.ll
new file mode 100644
index 000000000000..68aba5ebe065
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-1.ll
@@ -0,0 +1,26 @@
+; RUN: llc %s -mtriple=aarch64-none-linux-gnu -enable-global-merge -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-none-linux-gnu -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+
+; RUN: llc %s -mtriple=aarch64-apple-ios -enable-global-merge -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS
+; RUN: llc %s -mtriple=aarch64-apple-ios -enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS
+
+@m = internal global i32 0, align 4
+@n = internal global i32 0, align 4
+
+define void @f1(i32 %a1, i32 %a2) {
+;CHECK-APPLE-IOS: adrp x8, __MergedGlobals@PAGE
+;CHECK-APPLE-IOS-NOT: adrp
+;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals@PAGEOFF
+ store i32 %a1, i32* @m, align 4
+ store i32 %a2, i32* @n, align 4
+ ret void
+}
+
+;CHECK: .type _MergedGlobals,@object // @_MergedGlobals
+;CHECK: .local _MergedGlobals
+;CHECK: .comm _MergedGlobals,8,8
+
+;CHECK-APPLE-IOS: .zerofill __DATA,__bss,__MergedGlobals,8,3 ; @_MergedGlobals
diff --git a/test/CodeGen/AArch64/global-merge-2.ll b/test/CodeGen/AArch64/global-merge-2.ll
new file mode 100644
index 000000000000..a7735667b359
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-2.ll
@@ -0,0 +1,51 @@
+; RUN: llc %s -mtriple=aarch64-none-linux-gnu -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-apple-ios -enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS
+
+@x = global i32 0, align 4
+@y = global i32 0, align 4
+@z = global i32 0, align 4
+
+define void @f1(i32 %a1, i32 %a2) {
+;CHECK-APPLE-IOS-LABEL: _f1:
+;CHECK-APPLE-IOS: adrp x8, __MergedGlobals_x@PAGE
+;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals_x@PAGEOFF
+;CHECK-APPLE-IOS-NOT: adrp
+ store i32 %a1, i32* @x, align 4
+ store i32 %a2, i32* @y, align 4
+ ret void
+}
+
+define void @g1(i32 %a1, i32 %a2) {
+;CHECK-APPLE-IOS-LABEL: _g1:
+;CHECK-APPLE-IOS: adrp x8, __MergedGlobals_x@PAGE
+;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals_x@PAGEOFF
+;CHECK-APPLE-IOS-NOT: adrp
+ store i32 %a1, i32* @y, align 4
+ store i32 %a2, i32* @z, align 4
+ ret void
+}
+
+;CHECK: .type _MergedGlobals_x,@object // @_MergedGlobals_x
+;CHECK: .globl _MergedGlobals_x
+;CHECK: .align 3
+;CHECK: _MergedGlobals_x:
+;CHECK: .size _MergedGlobals_x, 12
+
+;CHECK: .globl x
+;CHECK: x = _MergedGlobals_x
+;CHECK: .globl y
+;CHECK: y = _MergedGlobals_x+4
+;CHECK: .globl z
+;CHECK: z = _MergedGlobals_x+8
+
+;CHECK-APPLE-IOS: .globl __MergedGlobals_x ; @_MergedGlobals_x
+;CHECK-APPLE-IOS: .zerofill __DATA,__common,__MergedGlobals_x,12,3
+
+;CHECK-APPLE-IOS: .globl _x
+;CHECK-APPLE-IOS: _x = __MergedGlobals_x
+;CHECK-APPLE-IOS: .globl _y
+;CHECK-APPLE-IOS: _y = __MergedGlobals_x+4
+;CHECK-APPLE-IOS: .globl _z
+;CHECK-APPLE-IOS: _z = __MergedGlobals_x+8
+;CHECK-APPLE-IOS: .subsections_via_symbols
diff --git a/test/CodeGen/AArch64/global-merge-3.ll b/test/CodeGen/AArch64/global-merge-3.ll
new file mode 100644
index 000000000000..d455d40edcc2
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-3.ll
@@ -0,0 +1,51 @@
+; RUN: llc %s -mtriple=aarch64-none-linux-gnu -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -global-merge-on-external -o - | FileCheck %s
+; RUN: llc %s -mtriple=aarch64-apple-ios -enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS
+
+@x = global [1000 x i32] zeroinitializer, align 1
+@y = global [1000 x i32] zeroinitializer, align 1
+@z = internal global i32 1, align 4
+
+define void @f1(i32 %a1, i32 %a2, i32 %a3) {
+;CHECK-APPLE-IOS: adrp x8, __MergedGlobals_x@PAGE
+;CHECK-APPLE-IOS-NOT: adrp
+;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals_x@PAGEOFF
+;CHECK-APPLE-IOS: adrp x9, __MergedGlobals_y@PAGE
+;CHECK-APPLE-IOS: add x9, x9, __MergedGlobals_y@PAGEOFF
+ %x3 = getelementptr inbounds [1000 x i32]* @x, i32 0, i64 3
+ %y3 = getelementptr inbounds [1000 x i32]* @y, i32 0, i64 3
+ store i32 %a1, i32* %x3, align 4
+ store i32 %a2, i32* %y3, align 4
+ store i32 %a3, i32* @z, align 4
+ ret void
+}
+
+;CHECK: .type _MergedGlobals_x,@object // @_MergedGlobals_x
+;CHECK: .globl _MergedGlobals_x
+;CHECK: .align 4
+;CHECK: _MergedGlobals_x:
+;CHECK: .size _MergedGlobals_x, 4004
+
+;CHECK: .type _MergedGlobals_y,@object // @_MergedGlobals_y
+;CHECK: .globl _MergedGlobals_y
+;CHECK: _MergedGlobals_y:
+;CHECK: .size _MergedGlobals_y, 4000
+
+;CHECK-APPLE-IOS: .globl __MergedGlobals_x ; @_MergedGlobals_x
+;CHECK-APPLE-IOS: .align 4
+;CHECK-APPLE-IOS: __MergedGlobals_x:
+;CHECK-APPLE-IOS: .long 1
+;CHECK-APPLE-IOS: .space 4000
+
+;CHECK-APPLE-IOS: .globl __MergedGlobals_y ; @_MergedGlobals_y
+;CHECK-APPLE-IOS: .zerofill __DATA,__common,__MergedGlobals_y,4000,4
+
+;CHECK: .globl x
+;CHECK: x = _MergedGlobals_x+4
+;CHECK: .globl y
+;CHECK: y = _MergedGlobals_y
+
+;CHECK-APPLE-IOS:.globl _x
+;CHECK-APPLE-IOS: _x = __MergedGlobals_x+4
+;CHECK-APPLE-IOS:.globl _y
+;CHECK-APPLE-IOS: _y = __MergedGlobals_y
diff --git a/test/CodeGen/AArch64/global-merge-4.ll b/test/CodeGen/AArch64/global-merge-4.ll
new file mode 100644
index 000000000000..a525ccd8dee3
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge-4.ll
@@ -0,0 +1,73 @@
+; RUN: llc %s -mtriple=aarch64-linux-gnuabi -enable-global-merge -o - | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+target triple = "arm64-apple-ios7.0.0"
+
+@bar = internal global [5 x i32] zeroinitializer, align 4
+@baz = internal global [5 x i32] zeroinitializer, align 4
+@foo = internal global [5 x i32] zeroinitializer, align 4
+
+; Function Attrs: nounwind ssp
+define internal void @initialize() #0 {
+ %1 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %1, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 0), align 4
+ %2 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %2, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 0), align 4
+ %3 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %3, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 1), align 4
+ %4 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %4, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 1), align 4
+ %5 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %5, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 2), align 4
+ %6 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %6, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 2), align 4
+ %7 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %7, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 3), align 4
+ %8 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %8, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 3), align 4
+ %9 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %9, i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 4), align 4
+ %10 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
+ store i32 %10, i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 4), align 4
+ ret void
+}
+
+declare i32 @calc(...)
+
+; Function Attrs: nounwind ssp
+define internal void @calculate() #0 {
+ %1 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 0), align 4
+ %2 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 0), align 4
+ %3 = mul nsw i32 %2, %1
+ store i32 %3, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 0), align 4
+ %4 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 1), align 4
+ %5 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 1), align 4
+ %6 = mul nsw i32 %5, %4
+ store i32 %6, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 1), align 4
+ %7 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 2), align 4
+ %8 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 2), align 4
+ %9 = mul nsw i32 %8, %7
+ store i32 %9, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 2), align 4
+ %10 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 3), align 4
+ %11 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 3), align 4
+ %12 = mul nsw i32 %11, %10
+ store i32 %12, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 3), align 4
+ %13 = load i32* getelementptr inbounds ([5 x i32]* @bar, i64 0, i64 4), align 4
+ %14 = load i32* getelementptr inbounds ([5 x i32]* @baz, i64 0, i64 4), align 4
+ %15 = mul nsw i32 %14, %13
+ store i32 %15, i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 4), align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone ssp
+define internal i32* @returnFoo() #1 {
+ ret i32* getelementptr inbounds ([5 x i32]* @foo, i64 0, i64 0)
+}
+
+;CHECK: .type _MergedGlobals,@object // @_MergedGlobals
+;CHECK: .local _MergedGlobals
+;CHECK: .comm _MergedGlobals,60,16
+
+attributes #0 = { nounwind ssp }
+attributes #1 = { nounwind readnone ssp }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/AArch64/global-merge.ll b/test/CodeGen/AArch64/global-merge.ll
new file mode 100644
index 000000000000..aed1dc4d1c7b
--- /dev/null
+++ b/test/CodeGen/AArch64/global-merge.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -O0 | FileCheck --check-prefix=NO-MERGE %s
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -O0 -global-merge-on-external=true | FileCheck --check-prefix=NO-MERGE %s
+
+; RUN: llc < %s -mtriple=aarch64-apple-ios -O0 | FileCheck %s --check-prefix=CHECK-APPLE-IOS-NO-MERGE
+; RUN: llc < %s -mtriple=aarch64-apple-ios -O0 -global-merge-on-external=true | FileCheck %s --check-prefix=CHECK-APPLE-IOS-NO-MERGE
+
+; FIXME: add O1/O2 test for aarch64-none-linux-gnu and aarch64-apple-ios
+
+@m = internal global i32 0, align 4
+@n = internal global i32 0, align 4
+
+define void @f1(i32 %a1, i32 %a2) {
+; CHECK-LABEL: f1:
+; CHECK: adrp x{{[0-9]+}}, _MergedGlobals
+; CHECK-NOT: adrp
+
+; CHECK-APPLE-IOS-LABEL: f1:
+; CHECK-APPLE-IOS: adrp x{{[0-9]+}}, __MergedGlobals
+; CHECK-APPLE-IOS-NOT: adrp
+ store i32 %a1, i32* @m, align 4
+ store i32 %a2, i32* @n, align 4
+ ret void
+}
+
+; CHECK: .local _MergedGlobals
+; CHECK: .comm _MergedGlobals,8,8
+; NO-MERGE-NOT: .local _MergedGlobals
+
+; CHECK-APPLE-IOS: .zerofill __DATA,__bss,__MergedGlobals,8,3
+; CHECK-APPLE-IOS-NO-MERGE-NOT: .zerofill __DATA,__bss,__MergedGlobals,8,3
diff --git a/test/CodeGen/AArch64/got-abuse.ll b/test/CodeGen/AArch64/got-abuse.ll
index 8b06031c88f7..7a02b104e777 100644
--- a/test/CodeGen/AArch64/got-abuse.ll
+++ b/test/CodeGen/AArch64/got-abuse.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -filetype=obj < %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -filetype=obj -o - %s
; LLVM gives well-defined semantics to this horrible construct (though C says
; it's undefined). Regardless, we shouldn't crash. The important feature here is
@@ -17,7 +17,7 @@ define void @foo() nounwind {
entry:
call void @consume(i32 ptrtoint (void ()* @func to i32))
; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:func
-; CHECK: ldr {{x[0-9]+}}, [x[[ADDRHI]], #:got_lo12:func]
+; CHECK: ldr {{x[0-9]+}}, [x[[ADDRHI]], {{#?}}:got_lo12:func]
ret void
}
diff --git a/test/CodeGen/AArch64/half.ll b/test/CodeGen/AArch64/half.ll
new file mode 100644
index 000000000000..a46094b9fb85
--- /dev/null
+++ b/test/CodeGen/AArch64/half.ll
@@ -0,0 +1,83 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
+
+define void @test_load_store(half* %in, half* %out) {
+; CHECK-LABEL: test_load_store:
+; CHECK: ldr [[TMP:h[0-9]+]], [x0]
+; CHECK: str [[TMP]], [x1]
+ %val = load half* %in
+ store half %val, half* %out
+ ret void
+}
+
+define i16 @test_bitcast_from_half(half* %addr) {
+; CHECK-LABEL: test_bitcast_from_half:
+; CHECK: ldrh w0, [x0]
+ %val = load half* %addr
+ %val_int = bitcast half %val to i16
+ ret i16 %val_int
+}
+
+define i16 @test_reg_bitcast_from_half(half %in) {
+; CHECK-LABEL: test_reg_bitcast_from_half:
+; CHECK-NOT: str
+; CHECK-NOT: ldr
+; CHECK-DAG: fmov w0, s0
+; CHECK: ret
+ %val = bitcast half %in to i16
+ ret i16 %val
+}
+
+define void @test_bitcast_to_half(half* %addr, i16 %in) {
+; CHECK-LABEL: test_bitcast_to_half:
+; CHECK: strh w1, [x0]
+ %val_fp = bitcast i16 %in to half
+ store half %val_fp, half* %addr
+ ret void
+}
+
+define half @test_reg_bitcast_to_half(i16 %in) {
+; CHECK-LABEL: test_reg_bitcast_to_half:
+; CHECK-NOT: str
+; CHECK-NOT: ldr
+; CHECK-DAG: fmov s0, w0
+; CHECK: ret
+
+ %val = bitcast i16 %in to half
+ ret half %val
+}
+
+define float @test_extend32(half* %addr) {
+; CHECK-LABEL: test_extend32:
+; CHECK: fcvt {{s[0-9]+}}, {{h[0-9]+}}
+
+ %val16 = load half* %addr
+ %val32 = fpext half %val16 to float
+ ret float %val32
+}
+
+define double @test_extend64(half* %addr) {
+; CHECK-LABEL: test_extend64:
+; CHECK: fcvt {{d[0-9]+}}, {{h[0-9]+}}
+
+ %val16 = load half* %addr
+ %val32 = fpext half %val16 to double
+ ret double %val32
+}
+
+define void @test_trunc32(float %in, half* %addr) {
+; CHECK-LABEL: test_trunc32:
+; CHECK: fcvt {{h[0-9]+}}, {{s[0-9]+}}
+
+ %val16 = fptrunc float %in to half
+ store half %val16, half* %addr
+ ret void
+}
+
+define void @test_trunc64(double %in, half* %addr) {
+; CHECK-LABEL: test_trunc64:
+; CHECK: fcvt {{h[0-9]+}}, {{d[0-9]+}}
+
+ %val16 = fptrunc double %in to half
+ store half %val16, half* %addr
+ ret void
+}
diff --git a/test/CodeGen/AArch64/hints.ll b/test/CodeGen/AArch64/hints.ll
new file mode 100644
index 000000000000..d7d9e23af1f1
--- /dev/null
+++ b/test/CodeGen/AArch64/hints.ll
@@ -0,0 +1,67 @@
+; RUN: llc -mtriple aarch64-eabi -o - %s | FileCheck %s
+
+declare void @llvm.aarch64.hint(i32) nounwind
+
+define void @hint_nop() {
+entry:
+ tail call void @llvm.aarch64.hint(i32 0) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_nop
+; CHECK: nop
+
+define void @hint_yield() {
+entry:
+ tail call void @llvm.aarch64.hint(i32 1) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_yield
+; CHECK: yield
+
+define void @hint_wfe() {
+entry:
+ tail call void @llvm.aarch64.hint(i32 2) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_wfe
+; CHECK: wfe
+
+define void @hint_wfi() {
+entry:
+ tail call void @llvm.aarch64.hint(i32 3) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_wfi
+; CHECK: wfi
+
+define void @hint_sev() {
+entry:
+ tail call void @llvm.aarch64.hint(i32 4) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_sev
+; CHECK: sev
+
+define void @hint_sevl() {
+entry:
+ tail call void @llvm.aarch64.hint(i32 5) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_sevl
+; CHECK: sevl
+
+define void @hint_undefined() {
+entry:
+ tail call void @llvm.aarch64.hint(i32 8) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_undefined
+; CHECK: hint #0x8
+
diff --git a/test/CodeGen/AArch64/i1-contents.ll b/test/CodeGen/AArch64/i1-contents.ll
new file mode 100644
index 000000000000..7f133fc3ea83
--- /dev/null
+++ b/test/CodeGen/AArch64/i1-contents.ll
@@ -0,0 +1,55 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
+%big = type i32
+
+@var = global %big 0
+
+; AAPCS: low 8 bits of %in (== w0) will be either 0 or 1. Need to extend to
+; 32-bits.
+define void @consume_i1_arg(i1 %in) {
+; CHECK-LABEL: consume_i1_arg:
+; CHECK: and [[BOOL32:w[0-9]+]], w0, #{{0x1|0xff}}
+; CHECK: str [[BOOL32]], [{{x[0-9]+}}, :lo12:var]
+ %val = zext i1 %in to %big
+ store %big %val, %big* @var
+ ret void
+}
+
+; AAPCS: low 8 bits of %val1 (== w0) will be either 0 or 1. Need to extend to
+; 32-bits (doesn't really matter if it's from 1 or 8 bits).
+define void @consume_i1_ret() {
+; CHECK-LABEL: consume_i1_ret:
+; CHECK: bl produce_i1_ret
+; CHECK: and [[BOOL32:w[0-9]+]], w0, #{{0x1|0xff}}
+; CHECK: str [[BOOL32]], [{{x[0-9]+}}, :lo12:var]
+ %val1 = call i1 @produce_i1_ret()
+ %val = zext i1 %val1 to %big
+ store %big %val, %big* @var
+ ret void
+}
+
+; AAPCS: low 8 bits of w0 must be either 0 or 1. Need to mask them off.
+define i1 @produce_i1_ret() {
+; CHECK-LABEL: produce_i1_ret:
+; CHECK: ldr [[VAR32:w[0-9]+]], [{{x[0-9]+}}, :lo12:var]
+; CHECK: and w0, [[VAR32]], #{{0x1|0xff}}
+ %val = load %big* @var
+ %val1 = trunc %big %val to i1
+ ret i1 %val1
+}
+
+define void @produce_i1_arg() {
+; CHECK-LABEL: produce_i1_arg:
+; CHECK: ldr [[VAR32:w[0-9]+]], [{{x[0-9]+}}, :lo12:var]
+; CHECK: and w0, [[VAR32]], #{{0x1|0xff}}
+; CHECK: bl consume_i1_arg
+ %val = load %big* @var
+ %val1 = trunc %big %val to i1
+ call void @consume_i1_arg(i1 %val1)
+ ret void
+}
+
+
+;define zeroext i1 @foo(i8 %in) {
+; %val = trunc i8 %in to i1
+; ret i1 %val
+;}
diff --git a/test/CodeGen/AArch64/i128-align.ll b/test/CodeGen/AArch64/i128-align.ll
index 21ca7eda66bb..a1b4d6f5a446 100644
--- a/test/CodeGen/AArch64/i128-align.ll
+++ b/test/CodeGen/AArch64/i128-align.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-apple-ios7.0 -verify-machineinstrs -o - %s | FileCheck %s
%struct = type { i32, i128, i8 }
@@ -13,7 +13,7 @@ define i64 @check_size() {
%diff = sub i64 %endi, %starti
ret i64 %diff
-; CHECK: movz x0, #48
+; CHECK: {{movz x0, #48|orr w0, wzr, #0x30}}
}
define i64 @check_field() {
@@ -25,5 +25,5 @@ define i64 @check_field() {
%diff = sub i64 %endi, %starti
ret i64 %diff
-; CHECK: movz x0, #16
+; CHECK: {{movz x0, #16|orr w0, wzr, #0x10}}
}
diff --git a/test/CodeGen/AArch64/i128-fast-isel-fallback.ll b/test/CodeGen/AArch64/i128-fast-isel-fallback.ll
new file mode 100644
index 000000000000..1cffbf3de052
--- /dev/null
+++ b/test/CodeGen/AArch64/i128-fast-isel-fallback.ll
@@ -0,0 +1,18 @@
+; RUN: llc -O0 -mtriple=arm64-apple-ios7.0 -mcpu=generic < %s | FileCheck %s
+
+; Function Attrs: nounwind ssp
+define void @test1() {
+ %1 = sext i32 0 to i128
+ call void @test2(i128 %1)
+ ret void
+
+; The i128 is 0 so the we can test to make sure it is propogated into the x
+; registers that make up the i128 pair
+
+; CHECK: mov x0, xzr
+; CHECK: mov x1, x0
+; CHECK: bl _test2
+
+}
+
+declare void @test2(i128)
diff --git a/test/CodeGen/AArch64/illegal-float-ops.ll b/test/CodeGen/AArch64/illegal-float-ops.ll
index 03c6d8d10087..9f7dd998bc21 100644
--- a/test/CodeGen/AArch64/illegal-float-ops.ll
+++ b/test/CodeGen/AArch64/illegal-float-ops.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
@varfloat = global float 0.0
@vardouble = global double 0.0
diff --git a/test/CodeGen/AArch64/init-array.ll b/test/CodeGen/AArch64/init-array.ll
index 076ae27721df..f47b490baebd 100644
--- a/test/CodeGen/AArch64/init-array.ll
+++ b/test/CodeGen/AArch64/init-array.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -use-init-array < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-none-eabi -verify-machineinstrs -use-init-array < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -use-init-array -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-none-eabi -verify-machineinstrs -use-init-array -o - %s | FileCheck %s
define internal void @_GLOBAL__I_a() section ".text.startup" {
ret void
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badI.ll b/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
index 61bbfc201354..9d833d936c06 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
@@ -1,7 +1,7 @@
-; RUN: not llc -mtriple=aarch64-none-linux-gnu < %s
+; RUN: not llc -mtriple=aarch64-none-linux-gnu -o - %s
define void @foo() {
; Out of range immediate for I.
- call void asm sideeffect "add x0, x0, $0", "I"(i32 4096)
+ call void asm sideeffect "add x0, x0, $0", "I"(i32 4097)
ret void
}
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badK.ll b/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
index 40746e1528ce..6ffc05dcbde1 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -mtriple=aarch64-none-linux-gnu < %s
+; RUN: not llc -mtriple=arm64-apple-ios7.0 -o - %s
define void @foo() {
; 32-bit bitpattern ending in 1101 can't be produced.
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll b/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
index 2c5338191fde..172601301993 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -mtriple=aarch64-none-linux-gnu < %s
+; RUN: not llc -mtriple=aarch64-none-linux-gnu -o - %s
define void @foo() {
; 32-bit bitpattern ending in 1101 can't be produced.
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badL.ll b/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
index d82d5a2ee4d0..3c2f60c1f837 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -mtriple=aarch64-none-linux-gnu < %s
+; RUN: not llc -mtriple=arm64-apple-ios7.0 -o - %s
define void @foo() {
; 32-bit bitpattern ending in 1101 can't be produced.
diff --git a/test/CodeGen/AArch64/inline-asm-constraints.ll b/test/CodeGen/AArch64/inline-asm-constraints.ll
deleted file mode 100644
index 18a3b37b41d1..000000000000
--- a/test/CodeGen/AArch64/inline-asm-constraints.ll
+++ /dev/null
@@ -1,137 +0,0 @@
-;RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-define i64 @test_inline_constraint_r(i64 %base, i32 %offset) {
-; CHECK-LABEL: test_inline_constraint_r:
- %val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 %base, i32 %offset)
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw
- ret i64 %val
-}
-
-define i16 @test_small_reg(i16 %lhs, i16 %rhs) {
-; CHECK-LABEL: test_small_reg:
- %val = call i16 asm sideeffect "add $0, $1, $2, sxth", "=r,r,r"(i16 %lhs, i16 %rhs)
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth
- ret i16 %val
-}
-
-define i64 @test_inline_constraint_r_imm(i64 %base, i32 %offset) {
-; CHECK-LABEL: test_inline_constraint_r_imm:
- %val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 4, i32 12)
-; CHECK: movz [[FOUR:x[0-9]+]], #4
-; CHECK: movz [[TWELVE:w[0-9]+]], #12
-; CHECK: add {{x[0-9]+}}, [[FOUR]], [[TWELVE]], sxtw
- ret i64 %val
-}
-
-; m is permitted to have a base/offset form. We don't do that
-; currently though.
-define i32 @test_inline_constraint_m(i32 *%ptr) {
-; CHECK-LABEL: test_inline_constraint_m:
- %val = call i32 asm "ldr $0, $1", "=r,m"(i32 *%ptr)
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
- ret i32 %val
-}
-
-@arr = global [8 x i32] zeroinitializer
-
-; Q should *never* have base/offset form even if given the chance.
-define i32 @test_inline_constraint_Q(i32 *%ptr) {
-; CHECK-LABEL: test_inline_constraint_Q:
- %val = call i32 asm "ldr $0, $1", "=r,Q"(i32* getelementptr([8 x i32]* @arr, i32 0, i32 1))
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
- ret i32 %val
-}
-
-@dump = global fp128 zeroinitializer
-
-define void @test_inline_constraint_w(<8 x i8> %vec64, <4 x float> %vec128, half %hlf, float %flt, double %dbl, fp128 %quad) {
-; CHECK: test_inline_constraint_w:
- call <8 x i8> asm sideeffect "add $0.8b, $1.8b, $1.8b", "=w,w"(<8 x i8> %vec64)
- call <8 x i8> asm sideeffect "fadd $0.4s, $1.4s, $1.4s", "=w,w"(<4 x float> %vec128)
-; CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-; CHECK: fadd {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-
- ; Arguably semantically dodgy to output "vN", but it's what GCC does
- ; so purely for compatibility we want vector registers to be output.
- call float asm sideeffect "fcvt ${0:s}, ${1:h}", "=w,w"(half undef)
- call float asm sideeffect "fadd $0.2s, $0.2s, $0.2s", "=w,w"(float %flt)
- call double asm sideeffect "fadd $0.2d, $0.2d, $0.2d", "=w,w"(double %dbl)
- call fp128 asm sideeffect "fadd $0.2d, $0.2d, $0.2d", "=w,w"(fp128 %quad)
-; CHECK: fcvt {{s[0-9]+}}, {{h[0-9]+}}
-; CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-; CHECK: fadd {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-; CHECK: fadd {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
- ret void
-}
-
-define void @test_inline_constraint_I() {
-; CHECK-LABEL: test_inline_constraint_I:
- call void asm sideeffect "add x0, x0, $0", "I"(i32 0)
- call void asm sideeffect "add x0, x0, $0", "I"(i64 4095)
-; CHECK: add x0, x0, #0
-; CHECK: add x0, x0, #4095
-
- ret void
-}
-
-; Skip J because it's useless
-
-define void @test_inline_constraint_K() {
-; CHECK-LABEL: test_inline_constraint_K:
- call void asm sideeffect "and w0, w0, $0", "K"(i32 2863311530) ; = 0xaaaaaaaa
- call void asm sideeffect "and w0, w0, $0", "K"(i32 65535)
-; CHECK: and w0, w0, #-1431655766
-; CHECK: and w0, w0, #65535
-
- ret void
-}
-
-define void @test_inline_constraint_L() {
-; CHECK-LABEL: test_inline_constraint_L:
- call void asm sideeffect "and x0, x0, $0", "L"(i64 4294967296) ; = 0xaaaaaaaa
- call void asm sideeffect "and x0, x0, $0", "L"(i64 65535)
-; CHECK: and x0, x0, #4294967296
-; CHECK: and x0, x0, #65535
-
- ret void
-}
-
-; Skip M and N because we don't support MOV pseudo-instructions yet.
-
-@var = global i32 0
-
-define void @test_inline_constraint_S() {
-; CHECK-LABEL: test_inline_constraint_S:
- call void asm sideeffect "adrp x0, $0", "S"(i32* @var)
- call void asm sideeffect "adrp x0, ${0:A}", "S"(i32* @var)
- call void asm sideeffect "add x0, x0, ${0:L}", "S"(i32* @var)
-; CHECK: adrp x0, var
-; CHECK: adrp x0, var
-; CHECK: add x0, x0, #:lo12:var
- ret void
-}
-
-define i32 @test_inline_constraint_S_label(i1 %in) {
-; CHECK-LABEL: test_inline_constraint_S_label:
- call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label, %loc))
-; CHECK: adr x0, .Ltmp{{[0-9]+}}
- br i1 %in, label %loc, label %loc2
-loc:
- ret i32 0
-loc2:
- ret i32 42
-}
-
-define void @test_inline_constraint_Y() {
-; CHECK-LABEL: test_inline_constraint_Y:
- call void asm sideeffect "fcmp s0, $0", "Y"(float 0.0)
-; CHECK: fcmp s0, #0.0
- ret void
-}
-
-define void @test_inline_constraint_Z() {
-; CHECK-LABEL: test_inline_constraint_Z:
- call void asm sideeffect "cmp w0, $0", "Z"(i32 0)
-; CHECK: cmp w0, #0
- ret void
-}
diff --git a/test/CodeGen/AArch64/inline-asm-modifiers.ll b/test/CodeGen/AArch64/inline-asm-modifiers.ll
deleted file mode 100644
index b7f4d3c57ba3..000000000000
--- a/test/CodeGen/AArch64/inline-asm-modifiers.ll
+++ /dev/null
@@ -1,147 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic < %s | FileCheck %s
-
-@var_simple = hidden global i32 0
-@var_got = global i32 0
-@var_tlsgd = thread_local global i32 0
-@var_tlsld = thread_local(localdynamic) global i32 0
-@var_tlsie = thread_local(initialexec) global i32 0
-@var_tlsle = thread_local(localexec) global i32 0
-
-define void @test_inline_modifier_L() nounwind {
-; CHECK-LABEL: test_inline_modifier_L:
- call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_simple)
- call void asm sideeffect "ldr x0, [x0, ${0:L}]", "S,~{x0}"(i32* @var_got)
- call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsgd)
- call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsld)
- call void asm sideeffect "ldr x0, [x0, ${0:L}]", "S,~{x0}"(i32* @var_tlsie)
- call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsle)
-; CHECK: add x0, x0, #:lo12:var_simple
-; CHECK: ldr x0, [x0, #:got_lo12:var_got]
-; CHECK: add x0, x0, #:tlsdesc_lo12:var_tlsgd
-; CHECK: add x0, x0, #:dtprel_lo12:var_tlsld
-; CHECK: ldr x0, [x0, #:gottprel_lo12:var_tlsie]
-; CHECK: add x0, x0, #:tprel_lo12:var_tlsle
-
- call void asm sideeffect "add x0, x0, ${0:L}", "Si,~{x0}"(i32 64)
- call void asm sideeffect "ldr x0, [x0, ${0:L}]", "Si,~{x0}"(i32 64)
-; CHECK: add x0, x0, #64
-; CHECK: ldr x0, [x0, #64]
-
- ret void
-}
-
-define void @test_inline_modifier_G() nounwind {
-; CHECK-LABEL: test_inline_modifier_G:
- call void asm sideeffect "add x0, x0, ${0:G}, lsl #12", "S,~{x0}"(i32* @var_tlsld)
- call void asm sideeffect "add x0, x0, ${0:G}, lsl #12", "S,~{x0}"(i32* @var_tlsle)
-; CHECK: add x0, x0, #:dtprel_hi12:var_tlsld, lsl #12
-; CHECK: add x0, x0, #:tprel_hi12:var_tlsle, lsl #12
-
- call void asm sideeffect "add x0, x0, ${0:G}", "Si,~{x0}"(i32 42)
-; CHECK: add x0, x0, #42
- ret void
-}
-
-define void @test_inline_modifier_A() nounwind {
-; CHECK-LABEL: test_inline_modifier_A:
- call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_simple)
- call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_got)
- call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_tlsgd)
- call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_tlsie)
- ; N.b. All tprel and dtprel relocs are modified: lo12 or granules.
-; CHECK: adrp x0, var_simple
-; CHECK: adrp x0, :got:var_got
-; CHECK: adrp x0, :tlsdesc:var_tlsgd
-; CHECK: adrp x0, :gottprel:var_tlsie
-
- call void asm sideeffect "adrp x0, ${0:A}", "Si,~{x0}"(i32 40)
-; CHECK: adrp x0, #40
-
- ret void
-}
-
-define void @test_inline_modifier_wx(i32 %small, i64 %big) nounwind {
-; CHECK-LABEL: test_inline_modifier_wx:
- call i32 asm sideeffect "add $0, $0, $0", "=r,0"(i32 %small)
- call i32 asm sideeffect "add ${0:w}, ${0:w}, ${0:w}", "=r,0"(i32 %small)
- call i32 asm sideeffect "add ${0:x}, ${0:x}, ${0:x}", "=r,0"(i32 %small)
-; CHECK: //APP
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-
- call i64 asm sideeffect "add $0, $0, $0", "=r,0"(i64 %big)
- call i64 asm sideeffect "add ${0:w}, ${0:w}, ${0:w}", "=r,0"(i64 %big)
- call i64 asm sideeffect "add ${0:x}, ${0:x}, ${0:x}", "=r,0"(i64 %big)
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-
- call i32 asm sideeffect "add ${0:w}, ${1:w}, ${1:w}", "=r,r"(i32 0)
- call i32 asm sideeffect "add ${0:x}, ${1:x}, ${1:x}", "=r,r"(i32 0)
-; CHECK: add {{w[0-9]+}}, wzr, wzr
-; CHECK: add {{x[0-9]+}}, xzr, xzr
-
- call i32 asm sideeffect "add ${0:w}, ${0:w}, ${1:w}", "=r,Ir,0"(i32 123, i32 %small)
- call i64 asm sideeffect "add ${0:x}, ${0:x}, ${1:x}", "=r,Ir,0"(i32 456, i64 %big)
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #123
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #456
-
- ret void
-}
-
-define void @test_inline_modifier_bhsdq() nounwind {
-; CHECK-LABEL: test_inline_modifier_bhsdq:
- call float asm sideeffect "ldr ${0:b}, [sp]", "=w"()
- call float asm sideeffect "ldr ${0:h}, [sp]", "=w"()
- call float asm sideeffect "ldr ${0:s}, [sp]", "=w"()
- call float asm sideeffect "ldr ${0:d}, [sp]", "=w"()
- call float asm sideeffect "ldr ${0:q}, [sp]", "=w"()
-; CHECK: ldr b0, [sp]
-; CHECK: ldr h0, [sp]
-; CHECK: ldr s0, [sp]
-; CHECK: ldr d0, [sp]
-; CHECK: ldr q0, [sp]
-
- call double asm sideeffect "ldr ${0:b}, [sp]", "=w"()
- call double asm sideeffect "ldr ${0:h}, [sp]", "=w"()
- call double asm sideeffect "ldr ${0:s}, [sp]", "=w"()
- call double asm sideeffect "ldr ${0:d}, [sp]", "=w"()
- call double asm sideeffect "ldr ${0:q}, [sp]", "=w"()
-; CHECK: ldr b0, [sp]
-; CHECK: ldr h0, [sp]
-; CHECK: ldr s0, [sp]
-; CHECK: ldr d0, [sp]
-; CHECK: ldr q0, [sp]
-
- call void asm sideeffect "fcmp b0, ${0:b}", "Yw"(float 0.0)
- call void asm sideeffect "fcmp h0, ${0:h}", "Yw"(float 0.0)
- call void asm sideeffect "fcmp s0, ${0:s}", "Yw"(float 0.0)
- call void asm sideeffect "fcmp d0, ${0:d}", "Yw"(float 0.0)
- call void asm sideeffect "fcmp q0, ${0:q}", "Yw"(float 0.0)
-; CHECK: fcmp b0, #0
-; CHECK: fcmp h0, #0
-; CHECK: fcmp s0, #0
-; CHECK: fcmp d0, #0
-; CHECK: fcmp q0, #0
-
- ret void
-}
-
-define void @test_inline_modifier_c() nounwind {
-; CHECK-LABEL: test_inline_modifier_c:
- call void asm sideeffect "adr x0, ${0:c}", "i"(i32 3)
-; CHECK: adr x0, 3
-
- ret void
-}
-
-define void @test_inline_modifier_a() nounwind {
-; CHECK-LABEL: test_inline_modifier_a:
- call void asm sideeffect "prfm pldl1keep, ${0:a}", "r"(i32* @var_simple)
-; CHECK: adrp [[VARHI:x[0-9]+]], var_simple
-; CHECK: add x[[VARADDR:[0-9]+]], [[VARHI]], #:lo12:var_simple
-; CHECK: prfm pldl1keep, [x[[VARADDR]]]
- ret void
-}
-
diff --git a/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll b/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll
new file mode 100644
index 000000000000..645214ac8ec7
--- /dev/null
+++ b/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll
@@ -0,0 +1,26 @@
+; We actually need to use -filetype=obj in this test because if we output
+; assembly, the current code path will bypass the parser and just write the
+; raw text out to the Streamer. We need to actually parse the inlineasm to
+; demonstrate the bug. Going the asm->obj route does not show the issue.
+; RUN: llc -mtriple=aarch64 < %s -filetype=obj | llvm-objdump -arch=aarch64 -d - | FileCheck %s
+
+; CHECK-LABEL: foo:
+; CHECK: a0 79 95 d2 movz x0, #0xabcd
+; CHECK: c0 03 5f d6 ret
+define i32 @foo() nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "ldr $0,=0xabcd", "=r"() nounwind
+ ret i32 %0
+}
+; CHECK-LABEL: bar:
+; CHECK: 40 00 00 58 ldr x0, #8
+; CHECK: c0 03 5f d6 ret
+; Make sure the constant pool entry comes after the return
+; CHECK-LABEL: $d.1:
+define i32 @bar() nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "ldr $0,=0x10001", "=r"() nounwind
+ ret i32 %0
+}
+
+
diff --git a/test/CodeGen/AArch64/intrinsics-memory-barrier.ll b/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
new file mode 100644
index 000000000000..09e34ae2d2ed
--- /dev/null
+++ b/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s -mtriple=aarch64-eabi -O=3 | FileCheck %s
+
+define void @test() {
+ ; CHECK: dmb sy
+ call void @llvm.aarch64.dmb(i32 15)
+ ; CHECK: dmb osh
+ call void @llvm.aarch64.dmb(i32 3)
+ ; CHECK: dsb sy
+ call void @llvm.aarch64.dsb(i32 15)
+ ; CHECK: dsb ishld
+ call void @llvm.aarch64.dsb(i32 9)
+ ; CHECK: isb
+ call void @llvm.aarch64.isb(i32 15)
+ ret void
+}
+
+; Important point is that the compiler should not reorder memory access
+; instructions around DMB.
+; Failure to do so, two STRs will collapse into one STP.
+define void @test_dmb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+
+ call void @llvm.aarch64.dmb(i32 15); CHECK: dmb sy
+
+ %d1 = getelementptr i32* %d, i64 1
+ store i32 %b, i32* %d1 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+
+ ret void
+}
+
+; Similarly for DSB.
+define void @test_dsb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+
+ call void @llvm.aarch64.dsb(i32 15); CHECK: dsb sy
+
+ %d1 = getelementptr i32* %d, i64 1
+ store i32 %b, i32* %d1 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+
+ ret void
+}
+
+; And ISB.
+define void @test_isb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+
+ call void @llvm.aarch64.isb(i32 15); CHECK: isb
+
+ %d1 = getelementptr i32* %d, i64 1
+ store i32 %b, i32* %d1 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+
+ ret void
+}
+
+declare void @llvm.aarch64.dmb(i32)
+declare void @llvm.aarch64.dsb(i32)
+declare void @llvm.aarch64.isb(i32)
diff --git a/test/CodeGen/AArch64/jump-table.ll b/test/CodeGen/AArch64/jump-table.ll
index 4bb094217af3..69fbd9972b87 100644
--- a/test/CodeGen/AArch64/jump-table.ll
+++ b/test/CodeGen/AArch64/jump-table.ll
@@ -1,5 +1,6 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -code-model=large -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -aarch64-atomic-cfg-tidy=0 -o - %s | FileCheck --check-prefix=CHECK-PIC %s
define i32 @test_jumptable(i32 %in) {
; CHECK: test_jumptable
@@ -11,7 +12,7 @@ define i32 @test_jumptable(i32 %in) {
i32 4, label %lbl4
]
; CHECK: adrp [[JTPAGE:x[0-9]+]], .LJTI0_0
-; CHECK: add x[[JT:[0-9]+]], [[JTPAGE]], #:lo12:.LJTI0_0
+; CHECK: add x[[JT:[0-9]+]], [[JTPAGE]], {{#?}}:lo12:.LJTI0_0
; CHECK: ldr [[DEST:x[0-9]+]], [x[[JT]], {{x[0-9]+}}, lsl #3]
; CHECK: br [[DEST]]
@@ -22,6 +23,12 @@ define i32 @test_jumptable(i32 %in) {
; CHECK-LARGE: ldr [[DEST:x[0-9]+]], [x[[JTADDR]], {{x[0-9]+}}, lsl #3]
; CHECK-LARGE: br [[DEST]]
+; CHECK-PIC: adrp [[JTPAGE:x[0-9]+]], .LJTI0_0
+; CHECK-PIC: add x[[JT:[0-9]+]], [[JTPAGE]], {{#?}}:lo12:.LJTI0_0
+; CHECK-PIC: ldrsw [[DEST:x[0-9]+]], [x[[JT]], {{x[0-9]+}}, lsl #2]
+; CHECK-PIC: add [[TABLE:x[0-9]+]], [[DEST]], x[[JT]]
+; CHECK-PIC: br [[TABLE]]
+
def:
ret i32 0
@@ -47,3 +54,12 @@ lbl4:
; CHECK-NEXT: .xword
; CHECK-NEXT: .xword
; CHECK-NEXT: .xword
+
+; CHECK-PIC-NOT: .data_region
+; CHECK-PIC: .LJTI0_0:
+; CHECK-PIC-NEXT: .word
+; CHECK-PIC-NEXT: .word
+; CHECK-PIC-NEXT: .word
+; CHECK-PIC-NEXT: .word
+; CHECK-PIC-NEXT: .word
+; CHECK-PIC-NOT: .end_data_region
diff --git a/test/CodeGen/AArch64/large-consts.ll b/test/CodeGen/AArch64/large-consts.ll
index 1b769c6e350d..6bf85e829f61 100644
--- a/test/CodeGen/AArch64/large-consts.ll
+++ b/test/CodeGen/AArch64/large-consts.ll
@@ -4,10 +4,11 @@
; it's not the linker's job to put it there.
define double @foo() {
-; CHECK: movz [[CPADDR:x[0-9]+]], #:abs_g3:.LCPI0_0 // encoding: [A,A,0xe0'A',0xd2'A']
-; CHECK: movk [[CPADDR]], #:abs_g2_nc:.LCPI0_0 // encoding: [A,A,0xc0'A',0xf2'A']
-; CHECK: movk [[CPADDR]], #:abs_g1_nc:.LCPI0_0 // encoding: [A,A,0xa0'A',0xf2'A']
-; CHECK: movk [[CPADDR]], #:abs_g0_nc:.LCPI0_0 // encoding: [A,A,0x80'A',0xf2'A']
+
+; CHECK: movz [[CPADDR:x[0-9]+]], #:abs_g3:.LCPI0_0 // encoding: [0bAAA01000,A,0b111AAAAA,0xd2]
+; CHECK: movk [[CPADDR]], #:abs_g2_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b110AAAAA,0xf2]
+; CHECK: movk [[CPADDR]], #:abs_g1_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b101AAAAA,0xf2]
+; CHECK: movk [[CPADDR]], #:abs_g0_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b100AAAAA,0xf2]
ret double 3.14159
}
diff --git a/test/CodeGen/AArch64/large-frame.ll b/test/CodeGen/AArch64/large-frame.ll
deleted file mode 100644
index fde3036aef4a..000000000000
--- a/test/CodeGen/AArch64/large-frame.ll
+++ /dev/null
@@ -1,119 +0,0 @@
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
-declare void @use_addr(i8*)
-
-@addr = global i8* null
-
-define void @test_bigframe() {
-; CHECK-LABEL: test_bigframe:
-; CHECK: .cfi_startproc
-
- %var1 = alloca i8, i32 20000000
- %var2 = alloca i8, i32 16
- %var3 = alloca i8, i32 20000000
-; CHECK: sub sp, sp, #496
-; CHECK: .cfi_def_cfa sp, 496
-; CHECK: str x30, [sp, #488]
- ; Total adjust is 39999536
-; CHECK: movz [[SUBCONST:x[0-9]+]], #22576
-; CHECK: movk [[SUBCONST]], #610, lsl #16
-; CHECK: sub sp, sp, [[SUBCONST]]
-; CHECK: .cfi_def_cfa sp, 40000032
-; CHECK: .cfi_offset x30, -8
-
- ; Total offset is 20000024
-; CHECK: movz [[VAR1OFFSET:x[0-9]+]], #11544
-; CHECK: movk [[VAR1OFFSET]], #305, lsl #16
-; CHECK: add {{x[0-9]+}}, sp, [[VAR1OFFSET]]
- store volatile i8* %var1, i8** @addr
-
- %var1plus2 = getelementptr i8* %var1, i32 2
- store volatile i8* %var1plus2, i8** @addr
-
-; CHECK: movz [[VAR2OFFSET:x[0-9]+]], #11528
-; CHECK: movk [[VAR2OFFSET]], #305, lsl #16
-; CHECK: add {{x[0-9]+}}, sp, [[VAR2OFFSET]]
- store volatile i8* %var2, i8** @addr
-
- %var2plus2 = getelementptr i8* %var2, i32 2
- store volatile i8* %var2plus2, i8** @addr
-
- store volatile i8* %var3, i8** @addr
-
- %var3plus2 = getelementptr i8* %var3, i32 2
- store volatile i8* %var3plus2, i8** @addr
-
-; CHECK: movz [[ADDCONST:x[0-9]+]], #22576
-; CHECK: movk [[ADDCONST]], #610, lsl #16
-; CHECK: add sp, sp, [[ADDCONST]]
-; CHECK: .cfi_endproc
- ret void
-}
-
-define void @test_mediumframe() {
-; CHECK-LABEL: test_mediumframe:
- %var1 = alloca i8, i32 1000000
- %var2 = alloca i8, i32 16
- %var3 = alloca i8, i32 1000000
-; CHECK: sub sp, sp, #496
-; CHECK: str x30, [sp, #488]
-; CHECK: sub sp, sp, #688
-; CHECK-NEXT: sub sp, sp, #488, lsl #12
-
- store volatile i8* %var1, i8** @addr
-; CHECK: add [[VAR1ADDR:x[0-9]+]], sp, #600
-; CHECK: add [[VAR1ADDR]], [[VAR1ADDR]], #244, lsl #12
-
- %var1plus2 = getelementptr i8* %var1, i32 2
- store volatile i8* %var1plus2, i8** @addr
-; CHECK: add [[VAR1PLUS2:x[0-9]+]], {{x[0-9]+}}, #2
-
- store volatile i8* %var2, i8** @addr
-; CHECK: add [[VAR2ADDR:x[0-9]+]], sp, #584
-; CHECK: add [[VAR2ADDR]], [[VAR2ADDR]], #244, lsl #12
-
- %var2plus2 = getelementptr i8* %var2, i32 2
- store volatile i8* %var2plus2, i8** @addr
-; CHECK: add [[VAR2PLUS2:x[0-9]+]], {{x[0-9]+}}, #2
-
- store volatile i8* %var3, i8** @addr
-
- %var3plus2 = getelementptr i8* %var3, i32 2
- store volatile i8* %var3plus2, i8** @addr
-
-; CHECK: add sp, sp, #688
-; CHECK: add sp, sp, #488, lsl #12
-; CHECK: ldr x30, [sp, #488]
-; CHECK: add sp, sp, #496
- ret void
-}
-
-
-@bigspace = global [8 x i64] zeroinitializer
-
-; If temporary registers are allocated for adjustment, they should *not* clobber
-; argument registers.
-define void @test_tempallocation([8 x i64] %val) nounwind {
-; CHECK-LABEL: test_tempallocation:
- %var = alloca i8, i32 1000000
-; CHECK: sub sp, sp,
-
-; Make sure the prologue is reasonably efficient
-; CHECK-NEXT: stp x29, x30, [sp,
-; CHECK-NEXT: stp x25, x26, [sp,
-; CHECK-NEXT: stp x23, x24, [sp,
-; CHECK-NEXT: stp x21, x22, [sp,
-; CHECK-NEXT: stp x19, x20, [sp,
-
-; Make sure we don't trash an argument register
-; CHECK-NOT: movz {{x[0-7],}}
-; CHECK: sub sp, sp,
-
-; CHECK-NOT: movz {{x[0-7],}}
-
-; CHECK: bl use_addr
- call void @use_addr(i8* %var)
-
- store [8 x i64] %val, [8 x i64]* @bigspace
- ret void
-; CHECK: ret
-}
diff --git a/test/CodeGen/AArch64/ldst-opt.ll b/test/CodeGen/AArch64/ldst-opt.ll
new file mode 100644
index 000000000000..e4f4295c8503
--- /dev/null
+++ b/test/CodeGen/AArch64/ldst-opt.ll
@@ -0,0 +1,767 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s
+
+; This file contains tests for the AArch64 load/store optimizer.
+
+%padding = type { i8*, i8*, i8*, i8* }
+%s.word = type { i32, i32 }
+%s.doubleword = type { i64, i32 }
+%s.quadword = type { fp128, i32 }
+%s.float = type { float, i32 }
+%s.double = type { double, i32 }
+%struct.word = type { %padding, %s.word }
+%struct.doubleword = type { %padding, %s.doubleword }
+%struct.quadword = type { %padding, %s.quadword }
+%struct.float = type { %padding, %s.float }
+%struct.double = type { %padding, %s.double }
+
+; Check the following transform:
+;
+; (ldr|str) X, [x0, #32]
+; ...
+; add x0, x0, #32
+; ->
+; (ldr|str) X, [x0, #32]!
+;
+; with X being either w1, x1, s0, d0 or q0.
+
+declare void @bar_word(%s.word*, i32)
+
+define void @load-pre-indexed-word(%struct.word* %ptr) nounwind {
+; CHECK-LABEL: load-pre-indexed-word
+; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.word* %ptr, i64 0, i32 1, i32 0
+ %add = load i32* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.word* %ptr, i64 0, i32 1
+ tail call void @bar_word(%s.word* %c, i32 %add)
+ ret void
+}
+
+define void @store-pre-indexed-word(%struct.word* %ptr, i32 %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-word
+; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.word* %ptr, i64 0, i32 1, i32 0
+ store i32 %val, i32* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.word* %ptr, i64 0, i32 1
+ tail call void @bar_word(%s.word* %c, i32 %val)
+ ret void
+}
+
+declare void @bar_doubleword(%s.doubleword*, i64)
+
+define void @load-pre-indexed-doubleword(%struct.doubleword* %ptr) nounwind {
+; CHECK-LABEL: load-pre-indexed-doubleword
+; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.doubleword* %ptr, i64 0, i32 1, i32 0
+ %add = load i64* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.doubleword* %ptr, i64 0, i32 1
+ tail call void @bar_doubleword(%s.doubleword* %c, i64 %add)
+ ret void
+}
+
+define void @store-pre-indexed-doubleword(%struct.doubleword* %ptr, i64 %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-doubleword
+; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.doubleword* %ptr, i64 0, i32 1, i32 0
+ store i64 %val, i64* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.doubleword* %ptr, i64 0, i32 1
+ tail call void @bar_doubleword(%s.doubleword* %c, i64 %val)
+ ret void
+}
+
+declare void @bar_quadword(%s.quadword*, fp128)
+
+define void @load-pre-indexed-quadword(%struct.quadword* %ptr) nounwind {
+; CHECK-LABEL: load-pre-indexed-quadword
+; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.quadword* %ptr, i64 0, i32 1, i32 0
+ %add = load fp128* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.quadword* %ptr, i64 0, i32 1
+ tail call void @bar_quadword(%s.quadword* %c, fp128 %add)
+ ret void
+}
+
+define void @store-pre-indexed-quadword(%struct.quadword* %ptr, fp128 %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-quadword
+; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.quadword* %ptr, i64 0, i32 1, i32 0
+ store fp128 %val, fp128* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.quadword* %ptr, i64 0, i32 1
+ tail call void @bar_quadword(%s.quadword* %c, fp128 %val)
+ ret void
+}
+
+declare void @bar_float(%s.float*, float)
+
+define void @load-pre-indexed-float(%struct.float* %ptr) nounwind {
+; CHECK-LABEL: load-pre-indexed-float
+; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.float* %ptr, i64 0, i32 1, i32 0
+ %add = load float* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.float* %ptr, i64 0, i32 1
+ tail call void @bar_float(%s.float* %c, float %add)
+ ret void
+}
+
+define void @store-pre-indexed-float(%struct.float* %ptr, float %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-float
+; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.float* %ptr, i64 0, i32 1, i32 0
+ store float %val, float* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.float* %ptr, i64 0, i32 1
+ tail call void @bar_float(%s.float* %c, float %val)
+ ret void
+}
+
+declare void @bar_double(%s.double*, double)
+
+define void @load-pre-indexed-double(%struct.double* %ptr) nounwind {
+; CHECK-LABEL: load-pre-indexed-double
+; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.double* %ptr, i64 0, i32 1, i32 0
+ %add = load double* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.double* %ptr, i64 0, i32 1
+ tail call void @bar_double(%s.double* %c, double %add)
+ ret void
+}
+
+define void @store-pre-indexed-double(%struct.double* %ptr, double %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-double
+; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}, #32]!
+entry:
+ %a = getelementptr inbounds %struct.double* %ptr, i64 0, i32 1, i32 0
+ store double %val, double* %a, align 4
+ br label %bar
+bar:
+ %c = getelementptr inbounds %struct.double* %ptr, i64 0, i32 1
+ tail call void @bar_double(%s.double* %c, double %val)
+ ret void
+}
+
+; Check the following transform:
+;
+; add x8, x8, #16
+; ...
+; ldr X, [x8]
+; ->
+; ldr X, [x8, #16]!
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+%pre.struct.i32 = type { i32, i32, i32}
+%pre.struct.i64 = type { i32, i64, i64}
+%pre.struct.i128 = type { i32, <2 x i64>, <2 x i64>}
+%pre.struct.float = type { i32, float, float}
+%pre.struct.double = type { i32, double, double}
+
+define i32 @load-pre-indexed-word2(%pre.struct.i32** %this, i1 %cond,
+ %pre.struct.i32* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-word2
+; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #4]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i32** %this
+ %gep1 = getelementptr inbounds %pre.struct.i32* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i32* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load i32* %retptr
+ ret i32 %ret
+}
+
+define i64 @load-pre-indexed-doubleword2(%pre.struct.i64** %this, i1 %cond,
+ %pre.struct.i64* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-doubleword2
+; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #8]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i64** %this
+ %gep1 = getelementptr inbounds %pre.struct.i64* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i64* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load i64* %retptr
+ ret i64 %ret
+}
+
+define <2 x i64> @load-pre-indexed-quadword2(%pre.struct.i128** %this, i1 %cond,
+ %pre.struct.i128* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-quadword2
+; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #16]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i128** %this
+ %gep1 = getelementptr inbounds %pre.struct.i128* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i128* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load <2 x i64>* %retptr
+ ret <2 x i64> %ret
+}
+
+define float @load-pre-indexed-float2(%pre.struct.float** %this, i1 %cond,
+ %pre.struct.float* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-float2
+; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #4]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.float** %this
+ %gep1 = getelementptr inbounds %pre.struct.float* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.float* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load float* %retptr
+ ret float %ret
+}
+
+define double @load-pre-indexed-double2(%pre.struct.double** %this, i1 %cond,
+ %pre.struct.double* %load2) nounwind {
+; CHECK-LABEL: load-pre-indexed-double2
+; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #8]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.double** %this
+ %gep1 = getelementptr inbounds %pre.struct.double* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.double* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ %ret = load double* %retptr
+ ret double %ret
+}
+
+; Check the following transform:
+;
+; add x8, x8, #16
+; ...
+; str X, [x8]
+; ->
+; str X, [x8, #16]!
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+define void @store-pre-indexed-word2(%pre.struct.i32** %this, i1 %cond,
+ %pre.struct.i32* %load2,
+ i32 %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-word2
+; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}, #4]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i32** %this
+ %gep1 = getelementptr inbounds %pre.struct.i32* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i32* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store i32 %val, i32* %retptr
+ ret void
+}
+
+define void @store-pre-indexed-doubleword2(%pre.struct.i64** %this, i1 %cond,
+ %pre.struct.i64* %load2,
+ i64 %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-doubleword2
+; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}, #8]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i64** %this
+ %gep1 = getelementptr inbounds %pre.struct.i64* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i64* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store i64 %val, i64* %retptr
+ ret void
+}
+
+define void @store-pre-indexed-quadword2(%pre.struct.i128** %this, i1 %cond,
+ %pre.struct.i128* %load2,
+ <2 x i64> %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-quadword2
+; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}, #16]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.i128** %this
+ %gep1 = getelementptr inbounds %pre.struct.i128* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.i128* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store <2 x i64> %val, <2 x i64>* %retptr
+ ret void
+}
+
+define void @store-pre-indexed-float2(%pre.struct.float** %this, i1 %cond,
+ %pre.struct.float* %load2,
+ float %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-float2
+; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}, #4]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.float** %this
+ %gep1 = getelementptr inbounds %pre.struct.float* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.float* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store float %val, float* %retptr
+ ret void
+}
+
+define void @store-pre-indexed-double2(%pre.struct.double** %this, i1 %cond,
+ %pre.struct.double* %load2,
+ double %val) nounwind {
+; CHECK-LABEL: store-pre-indexed-double2
+; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}, #8]!
+ br i1 %cond, label %if.then, label %if.end
+if.then:
+ %load1 = load %pre.struct.double** %this
+ %gep1 = getelementptr inbounds %pre.struct.double* %load1, i64 0, i32 1
+ br label %return
+if.end:
+ %gep2 = getelementptr inbounds %pre.struct.double* %load2, i64 0, i32 2
+ br label %return
+return:
+ %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
+ store double %val, double* %retptr
+ ret void
+}
+
+; Check the following transform:
+;
+; ldr X, [x20]
+; ...
+; add x20, x20, #32
+; ->
+; ldr X, [x20], #32
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+define void @load-post-indexed-word(i32* %array, i64 %count) nounwind {
+; CHECK-LABEL: load-post-indexed-word
+; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}], #16
+entry:
+ %gep1 = getelementptr i32* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr i32* %iv2, i64 -1
+ %load = load i32* %gep2
+ call void @use-word(i32 %load)
+ %load2 = load i32* %iv2
+ call void @use-word(i32 %load2)
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr i32* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @load-post-indexed-doubleword(i64* %array, i64 %count) nounwind {
+; CHECK-LABEL: load-post-indexed-doubleword
+; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}], #32
+entry:
+ %gep1 = getelementptr i64* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr i64* %iv2, i64 -1
+ %load = load i64* %gep2
+ call void @use-doubleword(i64 %load)
+ %load2 = load i64* %iv2
+ call void @use-doubleword(i64 %load2)
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr i64* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @load-post-indexed-quadword(<2 x i64>* %array, i64 %count) nounwind {
+; CHECK-LABEL: load-post-indexed-quadword
+; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}], #64
+entry:
+ %gep1 = getelementptr <2 x i64>* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr <2 x i64>* %iv2, i64 -1
+ %load = load <2 x i64>* %gep2
+ call void @use-quadword(<2 x i64> %load)
+ %load2 = load <2 x i64>* %iv2
+ call void @use-quadword(<2 x i64> %load2)
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr <2 x i64>* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @load-post-indexed-float(float* %array, i64 %count) nounwind {
+; CHECK-LABEL: load-post-indexed-float
+; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}], #16
+entry:
+ %gep1 = getelementptr float* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr float* %iv2, i64 -1
+ %load = load float* %gep2
+ call void @use-float(float %load)
+ %load2 = load float* %iv2
+ call void @use-float(float %load2)
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr float* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @load-post-indexed-double(double* %array, i64 %count) nounwind {
+; CHECK-LABEL: load-post-indexed-double
+; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}], #32
+entry:
+ %gep1 = getelementptr double* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr double* %iv2, i64 -1
+ %load = load double* %gep2
+ call void @use-double(double %load)
+ %load2 = load double* %iv2
+ call void @use-double(double %load2)
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr double* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+; Check the following transform:
+;
+; str X, [x20]
+; ...
+; add x20, x20, #32
+; ->
+; str X, [x20], #32
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+define void @store-post-indexed-word(i32* %array, i64 %count, i32 %val) nounwind {
+; CHECK-LABEL: store-post-indexed-word
+; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}], #16
+entry:
+ %gep1 = getelementptr i32* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr i32* %iv2, i64 -1
+ %load = load i32* %gep2
+ call void @use-word(i32 %load)
+ store i32 %val, i32* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr i32* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @store-post-indexed-doubleword(i64* %array, i64 %count, i64 %val) nounwind {
+; CHECK-LABEL: store-post-indexed-doubleword
+; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #32
+entry:
+ %gep1 = getelementptr i64* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr i64* %iv2, i64 -1
+ %load = load i64* %gep2
+ call void @use-doubleword(i64 %load)
+ store i64 %val, i64* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr i64* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @store-post-indexed-quadword(<2 x i64>* %array, i64 %count, <2 x i64> %val) nounwind {
+; CHECK-LABEL: store-post-indexed-quadword
+; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}], #64
+entry:
+ %gep1 = getelementptr <2 x i64>* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr <2 x i64>* %iv2, i64 -1
+ %load = load <2 x i64>* %gep2
+ call void @use-quadword(<2 x i64> %load)
+ store <2 x i64> %val, <2 x i64>* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr <2 x i64>* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @store-post-indexed-float(float* %array, i64 %count, float %val) nounwind {
+; CHECK-LABEL: store-post-indexed-float
+; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}], #16
+entry:
+ %gep1 = getelementptr float* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr float* %iv2, i64 -1
+ %load = load float* %gep2
+ call void @use-float(float %load)
+ store float %val, float* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr float* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+define void @store-post-indexed-double(double* %array, i64 %count, double %val) nounwind {
+; CHECK-LABEL: store-post-indexed-double
+; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}], #32
+entry:
+ %gep1 = getelementptr double* %array, i64 2
+ br label %body
+
+body:
+ %iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
+ %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
+ %gep2 = getelementptr double* %iv2, i64 -1
+ %load = load double* %gep2
+ call void @use-double(double %load)
+ store double %val, double* %iv2
+ %iv.next = add i64 %iv, -4
+ %gep3 = getelementptr double* %iv2, i64 4
+ %cond = icmp eq i64 %iv.next, 0
+ br i1 %cond, label %exit, label %body
+
+exit:
+ ret void
+}
+
+declare void @use-word(i32)
+declare void @use-doubleword(i64)
+declare void @use-quadword(<2 x i64>)
+declare void @use-float(float)
+declare void @use-double(double)
+
+; Check the following transform:
+;
+; (ldr|str) X, [x20]
+; ...
+; sub x20, x20, #16
+; ->
+; (ldr|str) X, [x20], #-16
+;
+; with X being either w0, x0, s0, d0 or q0.
+
+define void @post-indexed-sub-word(i32* %a, i32* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-word
+; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}], #-8
+; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}], #-8
+ br label %for.body
+for.body:
+ %phi1 = phi i32* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi i32* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr i32* %phi1, i64 -1
+ %load1 = load i32* %gep1
+ %gep2 = getelementptr i32* %phi2, i64 -1
+ store i32 %load1, i32* %gep2
+ %load2 = load i32* %phi1
+ store i32 %load2, i32* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr i32* %phi2, i64 -2
+ %gep4 = getelementptr i32* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
+
+define void @post-indexed-sub-doubleword(i64* %a, i64* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-doubleword
+; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}], #-16
+; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #-16
+ br label %for.body
+for.body:
+ %phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr i64* %phi1, i64 -1
+ %load1 = load i64* %gep1
+ %gep2 = getelementptr i64* %phi2, i64 -1
+ store i64 %load1, i64* %gep2
+ %load2 = load i64* %phi1
+ store i64 %load2, i64* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr i64* %phi2, i64 -2
+ %gep4 = getelementptr i64* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
+
+define void @post-indexed-sub-quadword(<2 x i64>* %a, <2 x i64>* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-quadword
+; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}], #-32
+; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}], #-32
+ br label %for.body
+for.body:
+ %phi1 = phi <2 x i64>* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi <2 x i64>* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr <2 x i64>* %phi1, i64 -1
+ %load1 = load <2 x i64>* %gep1
+ %gep2 = getelementptr <2 x i64>* %phi2, i64 -1
+ store <2 x i64> %load1, <2 x i64>* %gep2
+ %load2 = load <2 x i64>* %phi1
+ store <2 x i64> %load2, <2 x i64>* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr <2 x i64>* %phi2, i64 -2
+ %gep4 = getelementptr <2 x i64>* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
+
+define void @post-indexed-sub-float(float* %a, float* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-float
+; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}], #-8
+; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}], #-8
+ br label %for.body
+for.body:
+ %phi1 = phi float* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi float* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr float* %phi1, i64 -1
+ %load1 = load float* %gep1
+ %gep2 = getelementptr float* %phi2, i64 -1
+ store float %load1, float* %gep2
+ %load2 = load float* %phi1
+ store float %load2, float* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr float* %phi2, i64 -2
+ %gep4 = getelementptr float* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
+
+define void @post-indexed-sub-double(double* %a, double* %b, i64 %count) nounwind {
+; CHECK-LABEL: post-indexed-sub-double
+; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}], #-16
+; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}], #-16
+ br label %for.body
+for.body:
+ %phi1 = phi double* [ %gep4, %for.body ], [ %b, %0 ]
+ %phi2 = phi double* [ %gep3, %for.body ], [ %a, %0 ]
+ %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
+ %gep1 = getelementptr double* %phi1, i64 -1
+ %load1 = load double* %gep1
+ %gep2 = getelementptr double* %phi2, i64 -1
+ store double %load1, double* %gep2
+ %load2 = load double* %phi1
+ store double %load2, double* %phi2
+ %dec.i = add nsw i64 %i, -1
+ %gep3 = getelementptr double* %phi2, i64 -2
+ %gep4 = getelementptr double* %phi1, i64 -2
+ %cond = icmp sgt i64 %dec.i, 0
+ br i1 %cond, label %for.body, label %end
+end:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/ldst-regoffset.ll b/test/CodeGen/AArch64/ldst-regoffset.ll
index db30fd915fb0..e2fa08bcce69 100644
--- a/test/CodeGen/AArch64/ldst-regoffset.ll
+++ b/test/CodeGen/AArch64/ldst-regoffset.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@var_8bit = global i8 0
@@ -9,14 +9,14 @@
@var_float = global float 0.0
@var_double = global double 0.0
-define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) {
+define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_8bit:
%addr8_sxtw = getelementptr i8* %base, i32 %off32
%val8_sxtw = load volatile i8* %addr8_sxtw
%val32_signed = sext i8 %val8_sxtw to i32
store volatile i32 %val32_signed, i32* @var_32bit
-; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{[wx][0-9]+}}, sxtw]
%addr_lsl = getelementptr i8* %base, i64 %off64
%val8_lsl = load volatile i8* %addr_lsl
@@ -31,20 +31,20 @@ define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) {
%val8_uxtw = load volatile i8* %addr_uxtw
%newval8 = add i8 %val8_uxtw, 1
store volatile i8 %newval8, i8* @var_8bit
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
ret void
}
-define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
+define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_16bit:
%addr8_sxtwN = getelementptr i16* %base, i32 %off32
%val8_sxtwN = load volatile i16* %addr8_sxtwN
%val32_signed = sext i16 %val8_sxtwN to i32
store volatile i32 %val32_signed, i32* @var_32bit
-; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #1]
+; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #1]
%addr_lslN = getelementptr i16* %base, i64 %off64
%val8_lslN = load volatile i16* %addr_lslN
@@ -59,7 +59,7 @@ define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
%val8_uxtw = load volatile i16* %addr_uxtw
%newval8 = add i16 %val8_uxtw, 1
store volatile i16 %newval8, i16* @var_16bit
-; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
%base_sxtw = ptrtoint i16* %base to i64
%offset_sxtw = sext i32 %off32 to i64
@@ -68,7 +68,7 @@ define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
%val16_sxtw = load volatile i16* %addr_sxtw
%val64_signed = sext i16 %val16_sxtw to i64
store volatile i64 %val64_signed, i64* @var_64bit
-; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{[wx][0-9]+}}, sxtw]
%base_lsl = ptrtoint i16* %base to i64
@@ -87,17 +87,17 @@ define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
%val32 = load volatile i32* @var_32bit
%val16_trunc32 = trunc i32 %val32 to i16
store volatile i16 %val16_trunc32, i16* %addr_uxtwN
-; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #1]
ret void
}
-define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
+define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_32bit:
%addr_sxtwN = getelementptr i32* %base, i32 %off32
%val_sxtwN = load volatile i32* %addr_sxtwN
store volatile i32 %val_sxtwN, i32* @var_32bit
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #2]
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #2]
%addr_lslN = getelementptr i32* %base, i64 %off64
%val_lslN = load volatile i32* %addr_lslN
@@ -111,7 +111,7 @@ define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
%val_uxtw = load volatile i32* %addr_uxtw
%newval8 = add i32 %val_uxtw, 1
store volatile i32 %newval8, i32* @var_32bit
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
%base_sxtw = ptrtoint i32* %base to i64
@@ -121,7 +121,7 @@ define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
%val16_sxtw = load volatile i32* %addr_sxtw
%val64_signed = sext i32 %val16_sxtw to i64
store volatile i64 %val64_signed, i64* @var_64bit
-; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
%base_lsl = ptrtoint i32* %base to i64
@@ -139,17 +139,17 @@ define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
%addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
%val32 = load volatile i32* @var_32bit
store volatile i32 %val32, i32* %addr_uxtwN
-; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #2]
ret void
}
-define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
+define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_64bit:
%addr_sxtwN = getelementptr i64* %base, i32 %off32
%val_sxtwN = load volatile i64* %addr_sxtwN
store volatile i64 %val_sxtwN, i64* @var_64bit
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #3]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #3]
%addr_lslN = getelementptr i64* %base, i64 %off64
%val_lslN = load volatile i64* %addr_lslN
@@ -163,7 +163,7 @@ define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
%val8_uxtw = load volatile i64* %addr_uxtw
%newval8 = add i64 %val8_uxtw, 1
store volatile i64 %newval8, i64* @var_64bit
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
%base_sxtw = ptrtoint i64* %base to i64
%offset_sxtw = sext i32 %off32 to i64
@@ -171,7 +171,7 @@ define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
%addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
%val64_sxtw = load volatile i64* %addr_sxtw
store volatile i64 %val64_sxtw, i64* @var_64bit
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
%base_lsl = ptrtoint i64* %base to i64
%addrint_lsl = add i64 %base_lsl, %off64
@@ -187,17 +187,17 @@ define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
%addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
%val64 = load volatile i64* @var_64bit
store volatile i64 %val64, i64* %addr_uxtwN
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #3]
ret void
}
-define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
+define void @ldst_float(float* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_float:
%addr_sxtwN = getelementptr float* %base, i32 %off32
%val_sxtwN = load volatile float* %addr_sxtwN
store volatile float %val_sxtwN, float* @var_float
-; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #2]
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #2]
; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
%addr_lslN = getelementptr float* %base, i64 %off64
@@ -212,7 +212,7 @@ define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
%addr_uxtw = inttoptr i64 %addrint1_uxtw to float*
%val_uxtw = load volatile float* %addr_uxtw
store volatile float %val_uxtw, float* @var_float
-; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
%base_sxtw = ptrtoint float* %base to i64
@@ -221,7 +221,7 @@ define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
%addr_sxtw = inttoptr i64 %addrint_sxtw to float*
%val64_sxtw = load volatile float* %addr_sxtw
store volatile float %val64_sxtw, float* @var_float
-; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
%base_lsl = ptrtoint float* %base to i64
@@ -239,18 +239,18 @@ define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
%addr_uxtwN = inttoptr i64 %addrint_uxtwN to float*
%val64 = load volatile float* @var_float
store volatile float %val64, float* %addr_uxtwN
-; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
+; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #2]
; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
ret void
}
-define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
+define void @ldst_double(double* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_double:
%addr_sxtwN = getelementptr double* %base, i32 %off32
%val_sxtwN = load volatile double* %addr_sxtwN
store volatile double %val_sxtwN, double* @var_double
-; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #3]
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #3]
; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
%addr_lslN = getelementptr double* %base, i64 %off64
@@ -265,7 +265,7 @@ define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
%addr_uxtw = inttoptr i64 %addrint1_uxtw to double*
%val_uxtw = load volatile double* %addr_uxtw
store volatile double %val_uxtw, double* @var_double
-; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
%base_sxtw = ptrtoint double* %base to i64
@@ -274,7 +274,7 @@ define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
%addr_sxtw = inttoptr i64 %addrint_sxtw to double*
%val64_sxtw = load volatile double* %addr_sxtw
store volatile double %val64_sxtw, double* @var_double
-; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
%base_lsl = ptrtoint double* %base to i64
@@ -292,26 +292,26 @@ define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
%addr_uxtwN = inttoptr i64 %addrint_uxtwN to double*
%val64 = load volatile double* @var_double
store volatile double %val64, double* %addr_uxtwN
-; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
+; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #3]
; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
ret void
}
-define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
+define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_128bit:
%addr_sxtwN = getelementptr fp128* %base, i32 %off32
%val_sxtwN = load volatile fp128* %addr_sxtwN
store volatile fp128 %val_sxtwN, fp128* %base
-; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
-; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
%addr_lslN = getelementptr fp128* %base, i64 %off64
%val_lslN = load volatile fp128* %addr_lslN
store volatile fp128 %val_lslN, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #4]
-; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
%addrint_uxtw = ptrtoint fp128* %base to i64
%offset_uxtw = zext i32 %off32 to i64
@@ -319,8 +319,8 @@ define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
%addr_uxtw = inttoptr i64 %addrint1_uxtw to fp128*
%val_uxtw = load volatile fp128* %addr_uxtw
store volatile fp128 %val_uxtw, fp128* %base
-; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
-; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
%base_sxtw = ptrtoint fp128* %base to i64
%offset_sxtw = sext i32 %off32 to i64
@@ -328,8 +328,8 @@ define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
%addr_sxtw = inttoptr i64 %addrint_sxtw to fp128*
%val64_sxtw = load volatile fp128* %addr_sxtw
store volatile fp128 %val64_sxtw, fp128* %base
-; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
-; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
%base_lsl = ptrtoint fp128* %base to i64
%addrint_lsl = add i64 %base_lsl, %off64
@@ -337,7 +337,7 @@ define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
%val64_lsl = load volatile fp128* %addr_lsl
store volatile fp128 %val64_lsl, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
-; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
%base_uxtwN = ptrtoint fp128* %base to i64
%offset_uxtwN = zext i32 %off32 to i64
@@ -346,7 +346,7 @@ define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
%addr_uxtwN = inttoptr i64 %addrint_uxtwN to fp128*
%val64 = load volatile fp128* %base
store volatile fp128 %val64, fp128* %addr_uxtwN
-; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #4]
-; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
+; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #4]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
ret void
}
diff --git a/test/CodeGen/AArch64/ldst-unscaledimm.ll b/test/CodeGen/AArch64/ldst-unscaledimm.ll
index bea5bb5d6dd6..1de8443d9ed2 100644
--- a/test/CodeGen/AArch64/ldst-unscaledimm.ll
+++ b/test/CodeGen/AArch64/ldst-unscaledimm.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@var_8bit = global i8 0
@@ -160,7 +160,7 @@ define void @ldst_32bit() {
%val64_unsigned = zext i32 %val32_zext to i64
store volatile i64 %val64_unsigned, i64* @var_64bit
; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
; Sign-extension to 64-bits
%addr32_8_sext = getelementptr i8* %addr_8bit, i64 -12
@@ -169,7 +169,7 @@ define void @ldst_32bit() {
%val64_signed = sext i32 %val32_sext to i64
store volatile i64 %val64_signed, i64* @var_64bit
; CHECK: ldursw {{x[0-9]+}}, [{{x[0-9]+}}, #-12]
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
; Truncation from 64-bits
%addr64_8_trunc = getelementptr i8* %addr_8bit, i64 255
diff --git a/test/CodeGen/AArch64/ldst-unsignedimm.ll b/test/CodeGen/AArch64/ldst-unsignedimm.ll
index 44c1586e1ec7..e171d22b6c7c 100644
--- a/test/CodeGen/AArch64/ldst-unsignedimm.ll
+++ b/test/CodeGen/AArch64/ldst-unsignedimm.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@var_8bit = global i8 0
@@ -20,25 +20,25 @@ define void @ldst_8bit() {
%val32_signed = sext i8 %val8_sext32 to i32
store volatile i32 %val32_signed, i32* @var_32bit
; CHECK: adrp {{x[0-9]+}}, var_8bit
-; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
; match a zero-extending load volatile 8-bit -> 32-bit
%val8_zext32 = load volatile i8* @var_8bit
%val32_unsigned = zext i8 %val8_zext32 to i32
store volatile i32 %val32_unsigned, i32* @var_32bit
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
; match an any-extending load volatile 8-bit -> 32-bit
%val8_anyext = load volatile i8* @var_8bit
%newval8 = add i8 %val8_anyext, 1
store volatile i8 %newval8, i8* @var_8bit
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
; match a sign-extending load volatile 8-bit -> 64-bit
%val8_sext64 = load volatile i8* @var_8bit
%val64_signed = sext i8 %val8_sext64 to i64
store volatile i64 %val64_signed, i64* @var_64bit
-; CHECK: ldrsb {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrsb {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
; match a zero-extending load volatile 8-bit -> 64-bit.
; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
@@ -46,19 +46,19 @@ define void @ldst_8bit() {
%val8_zext64 = load volatile i8* @var_8bit
%val64_unsigned = zext i8 %val8_zext64 to i64
store volatile i64 %val64_unsigned, i64* @var_64bit
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
; truncating store volatile 32-bits to 8-bits
%val32 = load volatile i32* @var_32bit
%val8_trunc32 = trunc i32 %val32 to i8
store volatile i8 %val8_trunc32, i8* @var_8bit
-; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
; truncating store volatile 64-bits to 8-bits
%val64 = load volatile i64* @var_64bit
%val8_trunc64 = trunc i64 %val64 to i8
store volatile i8 %val8_trunc64, i8* @var_8bit
-; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_8bit]
+; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit]
ret void
}
@@ -74,25 +74,25 @@ define void @ldst_16bit() {
%val32_signed = sext i16 %val16_sext32 to i32
store volatile i32 %val32_signed, i32* @var_32bit
; CHECK: adrp {{x[0-9]+}}, var_16bit
-; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
; match a zero-extending load volatile 16-bit -> 32-bit
%val16_zext32 = load volatile i16* @var_16bit
%val32_unsigned = zext i16 %val16_zext32 to i32
store volatile i32 %val32_unsigned, i32* @var_32bit
-; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
; match an any-extending load volatile 16-bit -> 32-bit
%val16_anyext = load volatile i16* @var_16bit
%newval16 = add i16 %val16_anyext, 1
store volatile i16 %newval16, i16* @var_16bit
-; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
; match a sign-extending load volatile 16-bit -> 64-bit
%val16_sext64 = load volatile i16* @var_16bit
%val64_signed = sext i16 %val16_sext64 to i64
store volatile i64 %val64_signed, i64* @var_64bit
-; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
; match a zero-extending load volatile 16-bit -> 64-bit.
; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
@@ -100,19 +100,19 @@ define void @ldst_16bit() {
%val16_zext64 = load volatile i16* @var_16bit
%val64_unsigned = zext i16 %val16_zext64 to i64
store volatile i64 %val64_unsigned, i64* @var_64bit
-; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
; truncating store volatile 32-bits to 16-bits
%val32 = load volatile i32* @var_32bit
%val16_trunc32 = trunc i32 %val32 to i16
store volatile i16 %val16_trunc32, i16* @var_16bit
-; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
; truncating store volatile 64-bits to 16-bits
%val64 = load volatile i64* @var_64bit
%val16_trunc64 = trunc i64 %val64 to i16
store volatile i16 %val16_trunc64, i16* @var_16bit
-; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_16bit]
+; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit]
ret void
}
@@ -124,29 +124,29 @@ define void @ldst_32bit() {
%val32_noext = load volatile i32* @var_32bit
store volatile i32 %val32_noext, i32* @var_32bit
; CHECK: adrp {{x[0-9]+}}, var_32bit
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
-; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
; Zero-extension to 64-bits
%val32_zext = load volatile i32* @var_32bit
%val64_unsigned = zext i32 %val32_zext to i64
store volatile i64 %val64_unsigned, i64* @var_64bit
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
; Sign-extension to 64-bits
%val32_sext = load volatile i32* @var_32bit
%val64_signed = sext i32 %val32_sext to i64
store volatile i64 %val64_signed, i64* @var_64bit
-; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
+; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
+; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
; Truncation from 64-bits
%val64_trunc = load volatile i64* @var_64bit
%val32_trunc = trunc i64 %val64_trunc to i32
store volatile i32 %val32_trunc, i32* @var_32bit
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
-; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_32bit]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit]
ret void
}
@@ -165,7 +165,7 @@ define void @ldst_complex_offsets() {
; CHECK: ldst_complex_offsets
%arr8_addr = load volatile i8** @arr8
; CHECK: adrp {{x[0-9]+}}, arr8
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr8]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr8]
%arr8_sub1_addr = getelementptr i8* %arr8_addr, i64 1
%arr8_sub1 = load volatile i8* %arr8_sub1_addr
@@ -180,7 +180,7 @@ define void @ldst_complex_offsets() {
%arr16_addr = load volatile i16** @arr16
; CHECK: adrp {{x[0-9]+}}, arr16
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr16]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr16]
%arr16_sub1_addr = getelementptr i16* %arr16_addr, i64 1
%arr16_sub1 = load volatile i16* %arr16_sub1_addr
@@ -195,7 +195,7 @@ define void @ldst_complex_offsets() {
%arr32_addr = load volatile i32** @arr32
; CHECK: adrp {{x[0-9]+}}, arr32
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr32]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr32]
%arr32_sub1_addr = getelementptr i32* %arr32_addr, i64 1
%arr32_sub1 = load volatile i32* %arr32_sub1_addr
@@ -210,7 +210,7 @@ define void @ldst_complex_offsets() {
%arr64_addr = load volatile i64** @arr64
; CHECK: adrp {{x[0-9]+}}, arr64
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:arr64]
+; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr64]
%arr64_sub1_addr = getelementptr i64* %arr64_addr, i64 1
%arr64_sub1 = load volatile i64* %arr64_sub1_addr
@@ -230,11 +230,11 @@ define void @ldst_float() {
%valfp = load volatile float* @var_float
; CHECK: adrp {{x[0-9]+}}, var_float
-; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_float]
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_float]
; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
store volatile float %valfp, float* @var_float
-; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_float]
+; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_float]
; CHECK-NOFP-NOT: str {{s[0-9]+}},
ret void
@@ -245,11 +245,11 @@ define void @ldst_double() {
%valfp = load volatile double* @var_double
; CHECK: adrp {{x[0-9]+}}, var_double
-; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_double]
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_double]
; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
store volatile double %valfp, double* @var_double
-; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_double]
+; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_double]
; CHECK-NOFP-NOT: str {{d[0-9]+}},
ret void
diff --git a/test/CodeGen/AArch64/lit.local.cfg b/test/CodeGen/AArch64/lit.local.cfg
index 9a66a00189ea..125995cebf11 100644
--- a/test/CodeGen/AArch64/lit.local.cfg
+++ b/test/CodeGen/AArch64/lit.local.cfg
@@ -1,4 +1,10 @@
-targets = set(config.root.targets_to_build.split())
-if not 'AArch64' in targets:
+import re
+
+config.suffixes = ['.ll']
+
+if not 'AArch64' in config.root.targets:
config.unsupported = True
+# For now we don't test arm64-win32.
+if re.search(r'cygwin|mingw32|win32', config.target_triple):
+ config.unsupported = True
diff --git a/test/CodeGen/AArch64/literal_pools.ll b/test/CodeGen/AArch64/literal_pools.ll
deleted file mode 100644
index fc33aee10d84..000000000000
--- a/test/CodeGen/AArch64/literal_pools.ll
+++ /dev/null
@@ -1,103 +0,0 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK-LARGE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP-LARGE %s
-
-@var32 = global i32 0
-@var64 = global i64 0
-
-define void @foo() {
-; CHECK-LABEL: foo:
- %val32 = load i32* @var32
- %val64 = load i64* @var64
-
- %val32_lit32 = and i32 %val32, 123456785
- store volatile i32 %val32_lit32, i32* @var32
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{w[0-9]+}}, [x[[LITADDR]]]
-
- %val64_lit32 = and i64 %val64, 305402420
- store volatile i64 %val64_lit32, i64* @var64
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{w[0-9]+}}, [x[[LITADDR]]]
-
- %val64_lit32signed = and i64 %val64, -12345678
- store volatile i64 %val64_lit32signed, i64* @var64
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldrsw {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldrsw {{x[0-9]+}}, [x[[LITADDR]]]
-
- %val64_lit64 = and i64 %val64, 1234567898765432
- store volatile i64 %val64_lit64, i64* @var64
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
-; CHECK: ldr {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{x[0-9]+}}, [x[[LITADDR]]]
-
- ret void
-}
-
-@varfloat = global float 0.0
-@vardouble = global double 0.0
-
-define void @floating_lits() {
-; CHECK-LABEL: floating_lits:
-
- %floatval = load float* @varfloat
- %newfloat = fadd float %floatval, 128.0
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
-; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], #:lo12:[[CURLIT]]]
-; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{s[0-9]+}}, [x[[LITADDR]]]
-; CHECK-LARGE: fadd
-; CHECK-NOFP-LARGE-NOT: ldr {{s[0-9]+}},
-; CHECK-NOFP-LARGE-NOT: fadd
-
- store float %newfloat, float* @varfloat
-
- %doubleval = load double* @vardouble
- %newdouble = fadd double %doubleval, 129.0
-; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
-; CHECK: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]], #:lo12:[[CURLIT]]]
-; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, [[LIT128]]
-; CHECK: fadd {{d[0-9]+}}, {{d[0-9]+}}, [[LIT129]]
-; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
-; CHECK-NOFP-NOT: fadd
-
-; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
-; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
-; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
-; CHECK-NOFP-LARGE-NOT: ldr {{d[0-9]+}},
-
- store double %newdouble, double* @vardouble
-
- ret void
-}
diff --git a/test/CodeGen/AArch64/literal_pools_float.ll b/test/CodeGen/AArch64/literal_pools_float.ll
new file mode 100644
index 000000000000..e53b8b62c6f3
--- /dev/null
+++ b/test/CodeGen/AArch64/literal_pools_float.ll
@@ -0,0 +1,46 @@
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -mcpu=cyclone | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -code-model=large -mcpu=cyclone | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP-LARGE %s
+
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+
+define void @floating_lits() {
+; CHECK-LABEL: floating_lits:
+
+ %floatval = load float* @varfloat
+ %newfloat = fadd float %floatval, 128.0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]]
+; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
+
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI[0-9]+_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{s[0-9]+}}, [x[[LITADDR]]]
+; CHECK-LARGE: fadd
+; CHECK-NOFP-LARGE-NOT: ldr {{s[0-9]+}},
+; CHECK-NOFP-LARGE-NOT: fadd
+
+ store float %newfloat, float* @varfloat
+
+ %doubleval = load double* @vardouble
+ %newdouble = fadd double %doubleval, 129.0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]]
+; CHECK: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
+; CHECK-NOFP-NOT: fadd
+
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI[0-9]+_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
+; CHECK-NOFP-LARGE-NOT: ldr {{d[0-9]+}},
+
+ store double %newdouble, double* @vardouble
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/local_vars.ll b/test/CodeGen/AArch64/local_vars.ll
index b5cef859e35f..2f5b9f2adb48 100644
--- a/test/CodeGen/AArch64/local_vars.ll
+++ b/test/CodeGen/AArch64/local_vars.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -O0 | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -O0 -disable-fp-elim | FileCheck -check-prefix CHECK-WITHFP %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -disable-fp-elim | FileCheck -check-prefix CHECK-WITHFP-ARM64 %s
; Make sure a reasonably sane prologue and epilogue are
; generated. This test is not robust in the face of an frame-handling
@@ -16,7 +16,7 @@
declare void @foo()
define void @trivial_func() nounwind {
-; CHECK: trivial_func: // @trivial_func
+; CHECK-LABEL: trivial_func: // @trivial_func
; CHECK-NEXT: // BB#0
; CHECK-NEXT: ret
@@ -24,11 +24,14 @@ define void @trivial_func() nounwind {
}
define void @trivial_fp_func() {
-; CHECK-WITHFP-LABEL: trivial_fp_func:
+; CHECK-WITHFP-AARCH64-LABEL: trivial_fp_func:
+; CHECK-WITHFP-AARCH64: sub sp, sp, #16
+; CHECK-WITHFP-AARCH64: stp x29, x30, [sp]
+; CHECK-WITHFP-AARCH64-NEXT: mov x29, sp
-; CHECK-WITHFP: sub sp, sp, #16
-; CHECK-WITHFP: stp x29, x30, [sp]
-; CHECK-WITHFP-NEXT: mov x29, sp
+; CHECK-WITHFP-ARM64-LABEL: trivial_fp_func:
+; CHECK-WITHFP-ARM64: stp x29, x30, [sp, #-16]!
+; CHECK-WITHFP-ARM64-NEXT: mov x29, sp
; Dont't really care, but it would be a Bad Thing if this came after the epilogue.
; CHECK: bl foo
@@ -48,10 +51,10 @@ define void @stack_local() {
%val = load i64* @var
store i64 %val, i64* %local_var
-; CHECK: str {{x[0-9]+}}, [sp, #{{[0-9]+}}]
+; CHECK-DAG: str {{x[0-9]+}}, [sp, #{{[0-9]+}}]
store i64* %local_var, i64** @local_addr
-; CHECK: add {{x[0-9]+}}, sp, #{{[0-9]+}}
+; CHECK-DAG: add {{x[0-9]+}}, sp, #{{[0-9]+}}
ret void
}
diff --git a/test/CodeGen/AArch64/logical-imm.ll b/test/CodeGen/AArch64/logical-imm.ll
index e04bb510ebf2..a5e4a9956de7 100644
--- a/test/CodeGen/AArch64/logical-imm.ll
+++ b/test/CodeGen/AArch64/logical-imm.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
@var32 = global i32 0
@var64 = global i64 0
diff --git a/test/CodeGen/AArch64/logical_shifted_reg.ll b/test/CodeGen/AArch64/logical_shifted_reg.ll
index a08ba20c7f11..b249d72e0f90 100644
--- a/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -O0 | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
@var1_32 = global i32 0
@var2_32 = global i32 0
@@ -6,7 +6,7 @@
@var1_64 = global i64 0
@var2_64 = global i64 0
-define void @logical_32bit() {
+define void @logical_32bit() minsize {
; CHECK-LABEL: logical_32bit:
%val1 = load i32* @var1_32
%val2 = load i32* @var2_32
@@ -96,7 +96,7 @@ define void @logical_32bit() {
ret void
}
-define void @logical_64bit() {
+define void @logical_64bit() minsize {
; CHECK-LABEL: logical_64bit:
%val1 = load i64* @var1_64
%val2 = load i64* @var2_64
diff --git a/test/CodeGen/AArch64/mature-mc-support.ll b/test/CodeGen/AArch64/mature-mc-support.ll
new file mode 100644
index 000000000000..276c54d2cc4e
--- /dev/null
+++ b/test/CodeGen/AArch64/mature-mc-support.ll
@@ -0,0 +1,12 @@
+; Test that inline assembly is parsed by the MC layer when MC support is mature
+; (even when the output is assembly).
+
+; RUN: not llc -mtriple=aarch64-pc-linux < %s > /dev/null 2> %t3
+; RUN: FileCheck %s < %t3
+
+; RUN: not llc -mtriple=aarch64-pc-linux -filetype=obj < %s > /dev/null 2> %t4
+; RUN: FileCheck %s < %t4
+
+module asm " .this_directive_is_very_unlikely_to_exist"
+
+; CHECK: LLVM ERROR: Error parsing inline asm
diff --git a/test/CodeGen/AArch64/memcpy-f128.ll b/test/CodeGen/AArch64/memcpy-f128.ll
new file mode 100644
index 000000000000..76db2974ab4d
--- /dev/null
+++ b/test/CodeGen/AArch64/memcpy-f128.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s
+
+%structA = type { i128 }
+@stubA = internal unnamed_addr constant %structA zeroinitializer, align 8
+
+; Make sure we don't hit llvm_unreachable.
+
+define void @test1() {
+; CHECK-LABEL: @test1
+; CHECK: adrp
+; CHECK: ldr q0
+; CHECK: str q0
+; CHECK: ret
+entry:
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* bitcast (%structA* @stubA to i8*), i64 48, i32 8, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
diff --git a/test/CodeGen/AArch64/movw-consts.ll b/test/CodeGen/AArch64/movw-consts.ll
index 38e37db7b58c..93c181271755 100644
--- a/test/CodeGen/AArch64/movw-consts.ll
+++ b/test/CodeGen/AArch64/movw-consts.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -O0 < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s --check-prefix=CHECK
define i64 @test0() {
; CHECK-LABEL: test0:
@@ -9,43 +9,43 @@ define i64 @test0() {
define i64 @test1() {
; CHECK-LABEL: test1:
-; CHECK: movz x0, #1
+; CHECK: orr w0, wzr, #0x1
ret i64 1
}
define i64 @test2() {
; CHECK-LABEL: test2:
-; CHECK: movz x0, #65535
+; CHECK: orr w0, wzr, #0xffff
ret i64 65535
}
define i64 @test3() {
; CHECK-LABEL: test3:
-; CHECK: movz x0, #1, lsl #16
+; CHECK: orr w0, wzr, #0x10000
ret i64 65536
}
define i64 @test4() {
; CHECK-LABEL: test4:
-; CHECK: movz x0, #65535, lsl #16
+; CHECK: orr w0, wzr, #0xffff0000
ret i64 4294901760
}
define i64 @test5() {
; CHECK-LABEL: test5:
-; CHECK: movz x0, #1, lsl #32
+; CHECK: orr x0, xzr, #0x100000000
ret i64 4294967296
}
define i64 @test6() {
; CHECK-LABEL: test6:
-; CHECK: movz x0, #65535, lsl #32
+; CHECK: orr x0, xzr, #0xffff00000000
ret i64 281470681743360
}
define i64 @test7() {
; CHECK-LABEL: test7:
-; CHECK: movz x0, #1, lsl #48
+; CHECK: orr x0, xzr, #0x1000000000000
ret i64 281474976710656
}
@@ -53,7 +53,7 @@ define i64 @test7() {
; couldn't. Useful even for i64
define i64 @test8() {
; CHECK-LABEL: test8:
-; CHECK: movn w0, #60875
+; CHECK: movn w0, #{{60875|0xedcb}}
ret i64 4294906420
}
@@ -65,7 +65,7 @@ define i64 @test9() {
define i64 @test10() {
; CHECK-LABEL: test10:
-; CHECK: movn x0, #60875, lsl #16
+; CHECK: movn x0, #{{60875|0xedcb}}, lsl #16
ret i64 18446744069720047615
}
@@ -75,35 +75,35 @@ define i64 @test10() {
define void @test11() {
; CHECK-LABEL: test11:
-; CHECK: mov {{w[0-9]+}}, wzr
+; CHECK: str wzr
store i32 0, i32* @var32
ret void
}
define void @test12() {
; CHECK-LABEL: test12:
-; CHECK: movz {{w[0-9]+}}, #1
+; CHECK: orr {{w[0-9]+}}, wzr, #0x1
store i32 1, i32* @var32
ret void
}
define void @test13() {
; CHECK-LABEL: test13:
-; CHECK: movz {{w[0-9]+}}, #65535
+; CHECK: orr {{w[0-9]+}}, wzr, #0xffff
store i32 65535, i32* @var32
ret void
}
define void @test14() {
; CHECK-LABEL: test14:
-; CHECK: movz {{w[0-9]+}}, #1, lsl #16
+; CHECK: orr {{w[0-9]+}}, wzr, #0x10000
store i32 65536, i32* @var32
ret void
}
define void @test15() {
; CHECK-LABEL: test15:
-; CHECK: movz {{w[0-9]+}}, #65535, lsl #16
+; CHECK: orr {{w[0-9]+}}, wzr, #0xffff0000
store i32 4294901760, i32* @var32
ret void
}
@@ -119,6 +119,6 @@ define i64 @test17() {
; CHECK-LABEL: test17:
; Mustn't MOVN w0 here.
-; CHECK: movn x0, #2
+; CHECK: orr x0, xzr, #0xfffffffffffffffd
ret i64 -3
}
diff --git a/test/CodeGen/AArch64/movw-shift-encoding.ll b/test/CodeGen/AArch64/movw-shift-encoding.ll
index ec133bd706b1..178fccce333b 100644
--- a/test/CodeGen/AArch64/movw-shift-encoding.ll
+++ b/test/CodeGen/AArch64/movw-shift-encoding.ll
@@ -7,8 +7,9 @@
define i32* @get_var() {
ret i32* @var
-; CHECK: movz x0, #:abs_g3:var // encoding: [A,A,0xe0'A',0xd2'A']
-; CHECK: movk x0, #:abs_g2_nc:var // encoding: [A,A,0xc0'A',0xf2'A']
-; CHECK: movk x0, #:abs_g1_nc:var // encoding: [A,A,0xa0'A',0xf2'A']
-; CHECK: movk x0, #:abs_g0_nc:var // encoding: [A,A,0x80'A',0xf2'A']
+
+; CHECK: movz x0, #:abs_g3:var // encoding: [0bAAA00000,A,0b111AAAAA,0xd2]
+; CHECK: movk x0, #:abs_g2_nc:var // encoding: [0bAAA00000,A,0b110AAAAA,0xf2]
+; CHECK: movk x0, #:abs_g1_nc:var // encoding: [0bAAA00000,A,0b101AAAAA,0xf2]
+; CHECK: movk x0, #:abs_g0_nc:var // encoding: [0bAAA00000,A,0b100AAAAA,0xf2]
}
diff --git a/test/CodeGen/AArch64/mul-lohi.ll b/test/CodeGen/AArch64/mul-lohi.ll
new file mode 100644
index 000000000000..0689fbdcc078
--- /dev/null
+++ b/test/CodeGen/AArch64/mul-lohi.ll
@@ -0,0 +1,19 @@
+; RUN: llc -mtriple=arm64-apple-ios7.0 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm64_be-linux-gnu %s -o - | FileCheck --check-prefix=CHECK-BE %s
+
+define i128 @test_128bitmul(i128 %lhs, i128 %rhs) {
+; CHECK-LABEL: test_128bitmul:
+; CHECK-DAG: umulh [[CARRY:x[0-9]+]], x0, x2
+; CHECK-DAG: madd [[PART1:x[0-9]+]], x0, x3, [[CARRY]]
+; CHECK: madd x1, x1, x2, [[PART1]]
+; CHECK: mul x0, x0, x2
+
+; CHECK-BE-LABEL: test_128bitmul:
+; CHECK-BE-DAG: umulh [[CARRY:x[0-9]+]], x1, x3
+; CHECK-BE-DAG: madd [[PART1:x[0-9]+]], x1, x2, [[CARRY]]
+; CHECK-BE: madd x0, x0, x3, [[PART1]]
+; CHECK-BE: mul x1, x1, x3
+
+ %prod = mul i128 %lhs, %rhs
+ ret i128 %prod
+}
diff --git a/test/CodeGen/AArch64/mul_pow2.ll b/test/CodeGen/AArch64/mul_pow2.ll
new file mode 100644
index 000000000000..efc0ec8c40e3
--- /dev/null
+++ b/test/CodeGen/AArch64/mul_pow2.ll
@@ -0,0 +1,123 @@
+; RUN: llc < %s -march=aarch64 | FileCheck %s
+
+; Convert mul x, pow2 to shift.
+; Convert mul x, pow2 +/- 1 to shift + add/sub.
+
+define i32 @test2(i32 %x) {
+; CHECK-LABEL: test2
+; CHECK: lsl w0, w0, #1
+
+ %mul = shl nsw i32 %x, 1
+ ret i32 %mul
+}
+
+define i32 @test3(i32 %x) {
+; CHECK-LABEL: test3
+; CHECK: add w0, w0, w0, lsl #1
+
+ %mul = mul nsw i32 %x, 3
+ ret i32 %mul
+}
+
+define i32 @test4(i32 %x) {
+; CHECK-LABEL: test4
+; CHECK: lsl w0, w0, #2
+
+ %mul = shl nsw i32 %x, 2
+ ret i32 %mul
+}
+
+define i32 @test5(i32 %x) {
+; CHECK-LABEL: test5
+; CHECK: add w0, w0, w0, lsl #2
+
+
+ %mul = mul nsw i32 %x, 5
+ ret i32 %mul
+}
+
+define i32 @test7(i32 %x) {
+; CHECK-LABEL: test7
+; CHECK: lsl {{w[0-9]+}}, w0, #3
+; CHECK: sub w0, {{w[0-9]+}}, w0
+
+ %mul = mul nsw i32 %x, 7
+ ret i32 %mul
+}
+
+define i32 @test8(i32 %x) {
+; CHECK-LABEL: test8
+; CHECK: lsl w0, w0, #3
+
+ %mul = shl nsw i32 %x, 3
+ ret i32 %mul
+}
+
+define i32 @test9(i32 %x) {
+; CHECK-LABEL: test9
+; CHECK: add w0, w0, w0, lsl #3
+
+ %mul = mul nsw i32 %x, 9
+ ret i32 %mul
+}
+
+; Convert mul x, -pow2 to shift.
+; Convert mul x, -(pow2 +/- 1) to shift + add/sub.
+
+define i32 @ntest2(i32 %x) {
+; CHECK-LABEL: ntest2
+; CHECK: neg w0, w0, lsl #1
+
+ %mul = mul nsw i32 %x, -2
+ ret i32 %mul
+}
+
+define i32 @ntest3(i32 %x) {
+; CHECK-LABEL: ntest3
+; CHECK: add {{w[0-9]+}}, w0, w0, lsl #1
+; CHECK: neg w0, {{w[0-9]+}}
+
+ %mul = mul nsw i32 %x, -3
+ ret i32 %mul
+}
+
+define i32 @ntest4(i32 %x) {
+; CHECK-LABEL: ntest4
+; CHECK:neg w0, w0, lsl #2
+
+ %mul = mul nsw i32 %x, -4
+ ret i32 %mul
+}
+
+define i32 @ntest5(i32 %x) {
+; CHECK-LABEL: ntest5
+; CHECK: add {{w[0-9]+}}, w0, w0, lsl #2
+; CHECK: neg w0, {{w[0-9]+}}
+ %mul = mul nsw i32 %x, -5
+ ret i32 %mul
+}
+
+define i32 @ntest7(i32 %x) {
+; CHECK-LABEL: ntest7
+; CHECK: sub w0, w0, w0, lsl #3
+
+ %mul = mul nsw i32 %x, -7
+ ret i32 %mul
+}
+
+define i32 @ntest8(i32 %x) {
+; CHECK-LABEL: ntest8
+; CHECK: neg w0, w0, lsl #3
+
+ %mul = mul nsw i32 %x, -8
+ ret i32 %mul
+}
+
+define i32 @ntest9(i32 %x) {
+; CHECK-LABEL: ntest9
+; CHECK: add {{w[0-9]+}}, w0, w0, lsl #3
+; CHECK: neg w0, {{w[0-9]+}}
+
+ %mul = mul nsw i32 %x, -9
+ ret i32 %mul
+}
diff --git a/test/CodeGen/AArch64/neon-2velem-high.ll b/test/CodeGen/AArch64/neon-2velem-high.ll
deleted file mode 100644
index 97031d98b7c0..000000000000
--- a/test/CodeGen/AArch64/neon-2velem-high.ll
+++ /dev/null
@@ -1,331 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
-
-declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
-
-declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
-
-declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
-
-declare <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16>, <4 x i16>)
-
-declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
-
-declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>)
-
-declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>)
-
-declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>)
-
-declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>)
-
-define <4 x i32> @test_vmull_high_n_s16(<8 x i16> %a, i16 %b) {
-; CHECK: test_vmull_high_n_s16:
-; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
- %vmull15.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- ret <4 x i32> %vmull15.i.i
-}
-
-define <2 x i64> @test_vmull_high_n_s32(<4 x i32> %a, i32 %b) {
-; CHECK: test_vmull_high_n_s32:
-; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
- %vmull9.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- ret <2 x i64> %vmull9.i.i
-}
-
-define <4 x i32> @test_vmull_high_n_u16(<8 x i16> %a, i16 %b) {
-; CHECK: test_vmull_high_n_u16:
-; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
- %vmull15.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- ret <4 x i32> %vmull15.i.i
-}
-
-define <2 x i64> @test_vmull_high_n_u32(<4 x i32> %a, i32 %b) {
-; CHECK: test_vmull_high_n_u32:
-; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
- %vmull9.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- ret <2 x i64> %vmull9.i.i
-}
-
-define <4 x i32> @test_vqdmull_high_n_s16(<8 x i16> %a, i16 %b) {
-; CHECK: test_vqdmull_high_n_s16:
-; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
- %vqdmull15.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- ret <4 x i32> %vqdmull15.i.i
-}
-
-define <2 x i64> @test_vqdmull_high_n_s32(<4 x i32> %a, i32 %b) {
-; CHECK: test_vqdmull_high_n_s32:
-; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
- %vqdmull9.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- ret <2 x i64> %vqdmull9.i.i
-}
-
-define <4 x i32> @test_vmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
-; CHECK: test_vmlal_high_n_s16:
-; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
- %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
- ret <4 x i32> %add.i.i
-}
-
-define <2 x i64> @test_vmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
-; CHECK: test_vmlal_high_n_s32:
-; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
- %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
- ret <2 x i64> %add.i.i
-}
-
-define <4 x i32> @test_vmlal_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
-; CHECK: test_vmlal_high_n_u16:
-; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
- %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
- ret <4 x i32> %add.i.i
-}
-
-define <2 x i64> @test_vmlal_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
-; CHECK: test_vmlal_high_n_u32:
-; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
- %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
- ret <2 x i64> %add.i.i
-}
-
-define <4 x i32> @test_vqdmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
-; CHECK: test_vqdmlal_high_n_s16:
-; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
- %vqdmlal15.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- %vqdmlal17.i.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal15.i.i)
- ret <4 x i32> %vqdmlal17.i.i
-}
-
-define <2 x i64> @test_vqdmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
-; CHECK: test_vqdmlal_high_n_s32:
-; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
- %vqdmlal9.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- %vqdmlal11.i.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal9.i.i)
- ret <2 x i64> %vqdmlal11.i.i
-}
-
-define <4 x i32> @test_vmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
-; CHECK: test_vmlsl_high_n_s16:
-; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
- %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
- ret <4 x i32> %sub.i.i
-}
-
-define <2 x i64> @test_vmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
-; CHECK: test_vmlsl_high_n_s32:
-; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
- %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
- ret <2 x i64> %sub.i.i
-}
-
-define <4 x i32> @test_vmlsl_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
-; CHECK: test_vmlsl_high_n_u16:
-; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
- %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
- ret <4 x i32> %sub.i.i
-}
-
-define <2 x i64> @test_vmlsl_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
-; CHECK: test_vmlsl_high_n_u32:
-; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
- %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
- ret <2 x i64> %sub.i.i
-}
-
-define <4 x i32> @test_vqdmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
-; CHECK: test_vqdmlsl_high_n_s16:
-; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
- %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
- %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
- %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
- %vqdmlsl15.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
- %vqdmlsl17.i.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl15.i.i)
- ret <4 x i32> %vqdmlsl17.i.i
-}
-
-define <2 x i64> @test_vqdmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
-; CHECK: test_vqdmlsl_high_n_s32:
-; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
- %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
- %vqdmlsl9.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
- %vqdmlsl11.i.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i.i)
- ret <2 x i64> %vqdmlsl11.i.i
-}
-
-define <2 x float> @test_vmul_n_f32(<2 x float> %a, float %b) {
-; CHECK: test_vmul_n_f32:
-; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %vecinit.i = insertelement <2 x float> undef, float %b, i32 0
- %vecinit1.i = insertelement <2 x float> %vecinit.i, float %b, i32 1
- %mul.i = fmul <2 x float> %vecinit1.i, %a
- ret <2 x float> %mul.i
-}
-
-define <4 x float> @test_vmulq_n_f32(<4 x float> %a, float %b) {
-; CHECK: test_vmulq_n_f32:
-; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %vecinit.i = insertelement <4 x float> undef, float %b, i32 0
- %vecinit1.i = insertelement <4 x float> %vecinit.i, float %b, i32 1
- %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %b, i32 2
- %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %b, i32 3
- %mul.i = fmul <4 x float> %vecinit3.i, %a
- ret <4 x float> %mul.i
-}
-
-define <2 x double> @test_vmulq_n_f64(<2 x double> %a, double %b) {
-; CHECK: test_vmulq_n_f64:
-; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %vecinit.i = insertelement <2 x double> undef, double %b, i32 0
- %vecinit1.i = insertelement <2 x double> %vecinit.i, double %b, i32 1
- %mul.i = fmul <2 x double> %vecinit1.i, %a
- ret <2 x double> %mul.i
-}
-
-define <2 x float> @test_vfma_n_f32(<2 x float> %a, <2 x float> %b, float %n) {
-; CHECK: test_vfma_n_f32:
-; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %vecinit.i = insertelement <2 x float> undef, float %n, i32 0
- %vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %b, <2 x float> %vecinit1.i, <2 x float> %a)
- ret <2 x float> %0
-}
-
-define <4 x float> @test_vfmaq_n_f32(<4 x float> %a, <4 x float> %b, float %n) {
-; CHECK: test_vfmaq_n_f32:
-; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %vecinit.i = insertelement <4 x float> undef, float %n, i32 0
- %vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1
- %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2
- %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %b, <4 x float> %vecinit3.i, <4 x float> %a)
- ret <4 x float> %0
-}
-
-define <2 x float> @test_vfms_n_f32(<2 x float> %a, <2 x float> %b, float %n) {
-; CHECK: test_vfms_n_f32:
-; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %vecinit.i = insertelement <2 x float> undef, float %n, i32 0
- %vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1
- %0 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %b
- %1 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %0, <2 x float> %vecinit1.i, <2 x float> %a)
- ret <2 x float> %1
-}
-
-define <4 x float> @test_vfmsq_n_f32(<4 x float> %a, <4 x float> %b, float %n) {
-; CHECK: test_vfmsq_n_f32:
-; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
-entry:
- %vecinit.i = insertelement <4 x float> undef, float %n, i32 0
- %vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1
- %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2
- %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3
- %0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %b
- %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %vecinit3.i, <4 x float> %a)
- ret <4 x float> %1
-}
diff --git a/test/CodeGen/AArch64/neon-2velem.ll b/test/CodeGen/AArch64/neon-2velem.ll
deleted file mode 100644
index 9d6184243713..000000000000
--- a/test/CodeGen/AArch64/neon-2velem.ll
+++ /dev/null
@@ -1,2550 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
-
-declare <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double>, <2 x double>)
-
-declare <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float>, <4 x float>)
-
-declare <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float>, <2 x float>)
-
-declare <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32>, <2 x i32>)
-
-declare <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16>, <8 x i16>)
-
-declare <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16>, <4 x i16>)
-
-declare <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32>, <2 x i32>)
-
-declare <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16>, <8 x i16>)
-
-declare <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16>, <4 x i16>)
-
-declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>)
-
-declare <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16>, <4 x i16>)
-
-declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
-
-declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
-
-declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>)
-
-declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>)
-
-declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>)
-
-declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_vmla_lane_s16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmla_lane_s16:
-; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = mul <4 x i16> %shuffle, %b
- %add = add <4 x i16> %mul, %a
- ret <4 x i16> %add
-}
-
-define <8 x i16> @test_vmlaq_lane_s16(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlaq_lane_s16:
-; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
- %mul = mul <8 x i16> %shuffle, %b
- %add = add <8 x i16> %mul, %a
- ret <8 x i16> %add
-}
-
-define <2 x i32> @test_vmla_lane_s32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmla_lane_s32:
-; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %mul = mul <2 x i32> %shuffle, %b
- %add = add <2 x i32> %mul, %a
- ret <2 x i32> %add
-}
-
-define <4 x i32> @test_vmlaq_lane_s32(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlaq_lane_s32:
-; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %mul = mul <4 x i32> %shuffle, %b
- %add = add <4 x i32> %mul, %a
- ret <4 x i32> %add
-}
-
-define <4 x i16> @test_vmla_laneq_s16(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmla_laneq_s16:
-; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %mul = mul <4 x i16> %shuffle, %b
- %add = add <4 x i16> %mul, %a
- ret <4 x i16> %add
-}
-
-define <8 x i16> @test_vmlaq_laneq_s16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlaq_laneq_s16:
-; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
- %mul = mul <8 x i16> %shuffle, %b
- %add = add <8 x i16> %mul, %a
- ret <8 x i16> %add
-}
-
-define <2 x i32> @test_vmla_laneq_s32(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmla_laneq_s32:
-; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %mul = mul <2 x i32> %shuffle, %b
- %add = add <2 x i32> %mul, %a
- ret <2 x i32> %add
-}
-
-define <4 x i32> @test_vmlaq_laneq_s32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlaq_laneq_s32:
-; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = mul <4 x i32> %shuffle, %b
- %add = add <4 x i32> %mul, %a
- ret <4 x i32> %add
-}
-
-define <4 x i16> @test_vmls_lane_s16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmls_lane_s16:
-; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = mul <4 x i16> %shuffle, %b
- %sub = sub <4 x i16> %a, %mul
- ret <4 x i16> %sub
-}
-
-define <8 x i16> @test_vmlsq_lane_s16(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsq_lane_s16:
-; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
- %mul = mul <8 x i16> %shuffle, %b
- %sub = sub <8 x i16> %a, %mul
- ret <8 x i16> %sub
-}
-
-define <2 x i32> @test_vmls_lane_s32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmls_lane_s32:
-; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %mul = mul <2 x i32> %shuffle, %b
- %sub = sub <2 x i32> %a, %mul
- ret <2 x i32> %sub
-}
-
-define <4 x i32> @test_vmlsq_lane_s32(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsq_lane_s32:
-; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %mul = mul <4 x i32> %shuffle, %b
- %sub = sub <4 x i32> %a, %mul
- ret <4 x i32> %sub
-}
-
-define <4 x i16> @test_vmls_laneq_s16(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmls_laneq_s16:
-; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %mul = mul <4 x i16> %shuffle, %b
- %sub = sub <4 x i16> %a, %mul
- ret <4 x i16> %sub
-}
-
-define <8 x i16> @test_vmlsq_laneq_s16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsq_laneq_s16:
-; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
- %mul = mul <8 x i16> %shuffle, %b
- %sub = sub <8 x i16> %a, %mul
- ret <8 x i16> %sub
-}
-
-define <2 x i32> @test_vmls_laneq_s32(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmls_laneq_s32:
-; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %mul = mul <2 x i32> %shuffle, %b
- %sub = sub <2 x i32> %a, %mul
- ret <2 x i32> %sub
-}
-
-define <4 x i32> @test_vmlsq_laneq_s32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsq_laneq_s32:
-; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = mul <4 x i32> %shuffle, %b
- %sub = sub <4 x i32> %a, %mul
- ret <4 x i32> %sub
-}
-
-define <4 x i16> @test_vmul_lane_s16(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmul_lane_s16:
-; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = mul <4 x i16> %shuffle, %a
- ret <4 x i16> %mul
-}
-
-define <8 x i16> @test_vmulq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmulq_lane_s16:
-; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
- %mul = mul <8 x i16> %shuffle, %a
- ret <8 x i16> %mul
-}
-
-define <2 x i32> @test_vmul_lane_s32(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmul_lane_s32:
-; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %mul = mul <2 x i32> %shuffle, %a
- ret <2 x i32> %mul
-}
-
-define <4 x i32> @test_vmulq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmulq_lane_s32:
-; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %mul = mul <4 x i32> %shuffle, %a
- ret <4 x i32> %mul
-}
-
-define <4 x i16> @test_vmul_lane_u16(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmul_lane_u16:
-; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = mul <4 x i16> %shuffle, %a
- ret <4 x i16> %mul
-}
-
-define <8 x i16> @test_vmulq_lane_u16(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmulq_lane_u16:
-; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
- %mul = mul <8 x i16> %shuffle, %a
- ret <8 x i16> %mul
-}
-
-define <2 x i32> @test_vmul_lane_u32(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmul_lane_u32:
-; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %mul = mul <2 x i32> %shuffle, %a
- ret <2 x i32> %mul
-}
-
-define <4 x i32> @test_vmulq_lane_u32(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmulq_lane_u32:
-; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %mul = mul <4 x i32> %shuffle, %a
- ret <4 x i32> %mul
-}
-
-define <4 x i16> @test_vmul_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmul_laneq_s16:
-; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %mul = mul <4 x i16> %shuffle, %a
- ret <4 x i16> %mul
-}
-
-define <8 x i16> @test_vmulq_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmulq_laneq_s16:
-; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
- %mul = mul <8 x i16> %shuffle, %a
- ret <8 x i16> %mul
-}
-
-define <2 x i32> @test_vmul_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmul_laneq_s32:
-; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %mul = mul <2 x i32> %shuffle, %a
- ret <2 x i32> %mul
-}
-
-define <4 x i32> @test_vmulq_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmulq_laneq_s32:
-; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = mul <4 x i32> %shuffle, %a
- ret <4 x i32> %mul
-}
-
-define <4 x i16> @test_vmul_laneq_u16(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmul_laneq_u16:
-; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %mul = mul <4 x i16> %shuffle, %a
- ret <4 x i16> %mul
-}
-
-define <8 x i16> @test_vmulq_laneq_u16(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmulq_laneq_u16:
-; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
- %mul = mul <8 x i16> %shuffle, %a
- ret <8 x i16> %mul
-}
-
-define <2 x i32> @test_vmul_laneq_u32(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmul_laneq_u32:
-; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %mul = mul <2 x i32> %shuffle, %a
- ret <2 x i32> %mul
-}
-
-define <4 x i32> @test_vmulq_laneq_u32(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmulq_laneq_u32:
-; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = mul <4 x i32> %shuffle, %a
- ret <4 x i32> %mul
-}
-
-define <2 x float> @test_vfma_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
-; CHECK: test_vfma_lane_f32:
-; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %lane = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
- ret <2 x float> %0
-}
-
-declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
-
-define <4 x float> @test_vfmaq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
-; CHECK: test_vfmaq_lane_f32:
-; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %lane = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
- ret <4 x float> %0
-}
-
-declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
-
-define <2 x float> @test_vfma_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
-; CHECK: test_vfma_laneq_f32:
-; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %lane = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
- ret <2 x float> %0
-}
-
-define <4 x float> @test_vfmaq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
-; CHECK: test_vfmaq_laneq_f32:
-; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %lane = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
- ret <4 x float> %0
-}
-
-define <2 x float> @test_vfms_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
-; CHECK: test_vfms_lane_f32:
-; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
- %lane = shufflevector <2 x float> %sub, <2 x float> undef, <2 x i32> <i32 1, i32 1>
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
- ret <2 x float> %0
-}
-
-define <4 x float> @test_vfmsq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
-; CHECK: test_vfmsq_lane_f32:
-; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
- %lane = shufflevector <2 x float> %sub, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
- ret <4 x float> %0
-}
-
-define <2 x float> @test_vfms_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
-; CHECK: test_vfms_laneq_f32:
-; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
- %lane = shufflevector <4 x float> %sub, <4 x float> undef, <2 x i32> <i32 3, i32 3>
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
- ret <2 x float> %0
-}
-
-define <4 x float> @test_vfmsq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
-; CHECK: test_vfmsq_laneq_f32:
-; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
- %lane = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
- ret <4 x float> %0
-}
-
-define <2 x double> @test_vfmaq_lane_f64(<2 x double> %a, <2 x double> %b, <1 x double> %v) {
-; CHECK: test_vfmaq_lane_f64:
-; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %lane = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
- %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
- ret <2 x double> %0
-}
-
-declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
-
-define <2 x double> @test_vfmaq_laneq_f64(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
-; CHECK: test_vfmaq_laneq_f64:
-; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
-entry:
- %lane = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
- %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
- ret <2 x double> %0
-}
-
-define <2 x double> @test_vfmsq_lane_f64(<2 x double> %a, <2 x double> %b, <1 x double> %v) {
-; CHECK: test_vfmsq_lane_f64:
-; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %sub = fsub <1 x double> <double -0.000000e+00>, %v
- %lane = shufflevector <1 x double> %sub, <1 x double> undef, <2 x i32> zeroinitializer
- %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
- ret <2 x double> %0
-}
-
-define <2 x double> @test_vfmsq_laneq_f64(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
-; CHECK: test_vfmsq_laneq_f64:
-; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
-entry:
- %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %v
- %lane = shufflevector <2 x double> %sub, <2 x double> undef, <2 x i32> <i32 1, i32 1>
- %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
- ret <2 x double> %0
-}
-
-define <4 x i32> @test_vmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlal_lane_s16:
-; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlal_lane_s32:
-; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_laneq_s16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlal_laneq_s16:
-; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_laneq_s32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlal_laneq_s32:
-; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlal_high_lane_s16:
-; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlal_high_lane_s32:
-; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_high_laneq_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlal_high_laneq_s16:
-; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_high_laneq_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlal_high_laneq_s32:
-; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlsl_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsl_lane_s16:
-; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsl_lane_s32:
-; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_laneq_s16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsl_laneq_s16:
-; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_laneq_s32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsl_laneq_s32:
-; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsl_high_lane_s16:
-; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsl_high_lane_s32:
-; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_high_laneq_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsl_high_laneq_s16:
-; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_high_laneq_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsl_high_laneq_s32:
-; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlal_lane_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlal_lane_u16:
-; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_lane_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlal_lane_u32:
-; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_laneq_u16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlal_laneq_u16:
-; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_laneq_u32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlal_laneq_u32:
-; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_high_lane_u16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlal_high_lane_u16:
-; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_high_lane_u32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlal_high_lane_u32:
-; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_high_laneq_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlal_high_laneq_u16:
-; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_high_laneq_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlal_high_laneq_u32:
-; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlsl_lane_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsl_lane_u16:
-; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_lane_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsl_lane_u32:
-; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_laneq_u16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsl_laneq_u16:
-; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_laneq_u32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsl_laneq_u32:
-; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_high_lane_u16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsl_high_lane_u16:
-; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_high_lane_u32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsl_high_lane_u32:
-; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_high_laneq_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsl_high_laneq_u16:
-; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_high_laneq_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsl_high_laneq_u32:
-; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmull_lane_s16(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmull_lane_s16:
-; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_lane_s32(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmull_lane_s32:
-; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_lane_u16(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmull_lane_u16:
-; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_lane_u32(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmull_lane_u32:
-; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_high_lane_s16(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmull_high_lane_s16:
-; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_high_lane_s32(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmull_high_lane_s32:
-; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_high_lane_u16(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmull_high_lane_u16:
-; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_high_lane_u32(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmull_high_lane_u32:
-; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmull_laneq_s16:
-; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmull_laneq_s32:
-; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_laneq_u16(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmull_laneq_u16:
-; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_laneq_u32(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmull_laneq_u32:
-; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_high_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmull_high_laneq_s16:
-; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_high_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmull_high_laneq_s32:
-; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_high_laneq_u16(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmull_high_laneq_u16:
-; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_high_laneq_u32(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmull_high_laneq_u32:
-; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vqdmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vqdmlal_lane_s16:
-; CHECK: qdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
- ret <4 x i32> %vqdmlal4.i
-}
-
-define <2 x i64> @test_vqdmlal_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vqdmlal_lane_s32:
-; CHECK: qdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
- ret <2 x i64> %vqdmlal4.i
-}
-
-define <4 x i32> @test_vqdmlal_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vqdmlal_high_lane_s16:
-; CHECK: qdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
- ret <4 x i32> %vqdmlal4.i
-}
-
-define <2 x i64> @test_vqdmlal_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vqdmlal_high_lane_s32:
-; CHECK: qdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
- ret <2 x i64> %vqdmlal4.i
-}
-
-define <4 x i32> @test_vqdmlsl_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vqdmlsl_lane_s16:
-; CHECK: qdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
- ret <4 x i32> %vqdmlsl4.i
-}
-
-define <2 x i64> @test_vqdmlsl_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vqdmlsl_lane_s32:
-; CHECK: qdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
- ret <2 x i64> %vqdmlsl4.i
-}
-
-define <4 x i32> @test_vqdmlsl_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vqdmlsl_high_lane_s16:
-; CHECK: qdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
- ret <4 x i32> %vqdmlsl4.i
-}
-
-define <2 x i64> @test_vqdmlsl_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vqdmlsl_high_lane_s32:
-; CHECK: qdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
- ret <2 x i64> %vqdmlsl4.i
-}
-
-define <4 x i32> @test_vqdmull_lane_s16(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqdmull_lane_s16:
-; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_lane_s32(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqdmull_lane_s32:
-; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i32> @test_vqdmull_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vqdmull_laneq_s16:
-; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vqdmull_laneq_s32:
-; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i32> @test_vqdmull_high_lane_s16(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqdmull_high_lane_s16:
-; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_high_lane_s32(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqdmull_high_lane_s32:
-; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i32> @test_vqdmull_high_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vqdmull_high_laneq_s16:
-; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_high_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vqdmull_high_laneq_s32:
-; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i16> @test_vqdmulh_lane_s16(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqdmulh_lane_s16:
-; CHECK: qdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqdmulh2.i = tail call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i16> %vqdmulh2.i
-}
-
-define <8 x i16> @test_vqdmulhq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqdmulhq_lane_s16:
-; CHECK: qdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
- %vqdmulh2.i = tail call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
- ret <8 x i16> %vqdmulh2.i
-}
-
-define <2 x i32> @test_vqdmulh_lane_s32(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqdmulh_lane_s32:
-; CHECK: qdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vqdmulh2.i = tail call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i32> %vqdmulh2.i
-}
-
-define <4 x i32> @test_vqdmulhq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqdmulhq_lane_s32:
-; CHECK: qdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %vqdmulh2.i = tail call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
- ret <4 x i32> %vqdmulh2.i
-}
-
-define <4 x i16> @test_vqrdmulh_lane_s16(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqrdmulh_lane_s16:
-; CHECK: qrdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vqrdmulh2.i = tail call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i16> %vqrdmulh2.i
-}
-
-define <8 x i16> @test_vqrdmulhq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqrdmulhq_lane_s16:
-; CHECK: qrdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
- %vqrdmulh2.i = tail call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
- ret <8 x i16> %vqrdmulh2.i
-}
-
-define <2 x i32> @test_vqrdmulh_lane_s32(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqrdmulh_lane_s32:
-; CHECK: qrdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %vqrdmulh2.i = tail call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i32> %vqrdmulh2.i
-}
-
-define <4 x i32> @test_vqrdmulhq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqrdmulhq_lane_s32:
-; CHECK: qrdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %vqrdmulh2.i = tail call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
- ret <4 x i32> %vqrdmulh2.i
-}
-
-define <2 x float> @test_vmul_lane_f32(<2 x float> %a, <2 x float> %v) {
-; CHECK: test_vmul_lane_f32:
-; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
- %mul = fmul <2 x float> %shuffle, %a
- ret <2 x float> %mul
-}
-
-define <1 x double> @test_vmul_lane_f64(<1 x double> %a, <1 x double> %v) {
-; CHECK: test_vmul_lane_f64:
-; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
-entry:
- %0 = bitcast <1 x double> %a to <8 x i8>
- %1 = bitcast <8 x i8> %0 to double
- %extract = extractelement <1 x double> %v, i32 0
- %2 = fmul double %1, %extract
- %3 = insertelement <1 x double> undef, double %2, i32 0
- ret <1 x double> %3
-}
-
-define <4 x float> @test_vmulq_lane_f32(<4 x float> %a, <2 x float> %v) {
-; CHECK: test_vmulq_lane_f32:
-; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %mul = fmul <4 x float> %shuffle, %a
- ret <4 x float> %mul
-}
-
-define <2 x double> @test_vmulq_lane_f64(<2 x double> %a, <1 x double> %v) {
-; CHECK: test_vmulq_lane_f64:
-; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
- %mul = fmul <2 x double> %shuffle, %a
- ret <2 x double> %mul
-}
-
-define <2 x float> @test_vmul_laneq_f32(<2 x float> %a, <4 x float> %v) {
-; CHECK: test_vmul_laneq_f32:
-; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
- %mul = fmul <2 x float> %shuffle, %a
- ret <2 x float> %mul
-}
-
-define <1 x double> @test_vmul_laneq_f64(<1 x double> %a, <2 x double> %v) {
-; CHECK: test_vmul_laneq_f64:
-; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
-entry:
- %0 = bitcast <1 x double> %a to <8 x i8>
- %1 = bitcast <8 x i8> %0 to double
- %extract = extractelement <2 x double> %v, i32 1
- %2 = fmul double %1, %extract
- %3 = insertelement <1 x double> undef, double %2, i32 0
- ret <1 x double> %3
-}
-
-define <4 x float> @test_vmulq_laneq_f32(<4 x float> %a, <4 x float> %v) {
-; CHECK: test_vmulq_laneq_f32:
-; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %mul = fmul <4 x float> %shuffle, %a
- ret <4 x float> %mul
-}
-
-define <2 x double> @test_vmulq_laneq_f64(<2 x double> %a, <2 x double> %v) {
-; CHECK: test_vmulq_laneq_f64:
-; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
-entry:
- %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
- %mul = fmul <2 x double> %shuffle, %a
- ret <2 x double> %mul
-}
-
-define <2 x float> @test_vmulx_lane_f32(<2 x float> %a, <2 x float> %v) {
-; CHECK: test_vmulx_lane_f32:
-; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
- %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
- ret <2 x float> %vmulx2.i
-}
-
-define <4 x float> @test_vmulxq_lane_f32(<4 x float> %a, <2 x float> %v) {
-; CHECK: test_vmulxq_lane_f32:
-; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
-entry:
- %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
- ret <4 x float> %vmulx2.i
-}
-
-define <2 x double> @test_vmulxq_lane_f64(<2 x double> %a, <1 x double> %v) {
-; CHECK: test_vmulxq_lane_f64:
-; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
- %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
- ret <2 x double> %vmulx2.i
-}
-
-define <2 x float> @test_vmulx_laneq_f32(<2 x float> %a, <4 x float> %v) {
-; CHECK: test_vmulx_laneq_f32:
-; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
- %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
- ret <2 x float> %vmulx2.i
-}
-
-define <4 x float> @test_vmulxq_laneq_f32(<4 x float> %a, <4 x float> %v) {
-; CHECK: test_vmulxq_laneq_f32:
-; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
-entry:
- %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
- %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
- ret <4 x float> %vmulx2.i
-}
-
-define <2 x double> @test_vmulxq_laneq_f64(<2 x double> %a, <2 x double> %v) {
-; CHECK: test_vmulxq_laneq_f64:
-; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
-entry:
- %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
- %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
- ret <2 x double> %vmulx2.i
-}
-
-define <4 x i16> @test_vmla_lane_s16_0(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmla_lane_s16_0:
-; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i16> %shuffle, %b
- %add = add <4 x i16> %mul, %a
- ret <4 x i16> %add
-}
-
-define <8 x i16> @test_vmlaq_lane_s16_0(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlaq_lane_s16_0:
-; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
- %mul = mul <8 x i16> %shuffle, %b
- %add = add <8 x i16> %mul, %a
- ret <8 x i16> %add
-}
-
-define <2 x i32> @test_vmla_lane_s32_0(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmla_lane_s32_0:
-; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %mul = mul <2 x i32> %shuffle, %b
- %add = add <2 x i32> %mul, %a
- ret <2 x i32> %add
-}
-
-define <4 x i32> @test_vmlaq_lane_s32_0(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlaq_lane_s32_0:
-; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i32> %shuffle, %b
- %add = add <4 x i32> %mul, %a
- ret <4 x i32> %add
-}
-
-define <4 x i16> @test_vmla_laneq_s16_0(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmla_laneq_s16_0:
-; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i16> %shuffle, %b
- %add = add <4 x i16> %mul, %a
- ret <4 x i16> %add
-}
-
-define <8 x i16> @test_vmlaq_laneq_s16_0(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlaq_laneq_s16_0:
-; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
- %mul = mul <8 x i16> %shuffle, %b
- %add = add <8 x i16> %mul, %a
- ret <8 x i16> %add
-}
-
-define <2 x i32> @test_vmla_laneq_s32_0(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmla_laneq_s32_0:
-; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %mul = mul <2 x i32> %shuffle, %b
- %add = add <2 x i32> %mul, %a
- ret <2 x i32> %add
-}
-
-define <4 x i32> @test_vmlaq_laneq_s32_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlaq_laneq_s32_0:
-; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i32> %shuffle, %b
- %add = add <4 x i32> %mul, %a
- ret <4 x i32> %add
-}
-
-define <4 x i16> @test_vmls_lane_s16_0(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmls_lane_s16_0:
-; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i16> %shuffle, %b
- %sub = sub <4 x i16> %a, %mul
- ret <4 x i16> %sub
-}
-
-define <8 x i16> @test_vmlsq_lane_s16_0(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsq_lane_s16_0:
-; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
- %mul = mul <8 x i16> %shuffle, %b
- %sub = sub <8 x i16> %a, %mul
- ret <8 x i16> %sub
-}
-
-define <2 x i32> @test_vmls_lane_s32_0(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmls_lane_s32_0:
-; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %mul = mul <2 x i32> %shuffle, %b
- %sub = sub <2 x i32> %a, %mul
- ret <2 x i32> %sub
-}
-
-define <4 x i32> @test_vmlsq_lane_s32_0(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsq_lane_s32_0:
-; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i32> %shuffle, %b
- %sub = sub <4 x i32> %a, %mul
- ret <4 x i32> %sub
-}
-
-define <4 x i16> @test_vmls_laneq_s16_0(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmls_laneq_s16_0:
-; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i16> %shuffle, %b
- %sub = sub <4 x i16> %a, %mul
- ret <4 x i16> %sub
-}
-
-define <8 x i16> @test_vmlsq_laneq_s16_0(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsq_laneq_s16_0:
-; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
- %mul = mul <8 x i16> %shuffle, %b
- %sub = sub <8 x i16> %a, %mul
- ret <8 x i16> %sub
-}
-
-define <2 x i32> @test_vmls_laneq_s32_0(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmls_laneq_s32_0:
-; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %mul = mul <2 x i32> %shuffle, %b
- %sub = sub <2 x i32> %a, %mul
- ret <2 x i32> %sub
-}
-
-define <4 x i32> @test_vmlsq_laneq_s32_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsq_laneq_s32_0:
-; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i32> %shuffle, %b
- %sub = sub <4 x i32> %a, %mul
- ret <4 x i32> %sub
-}
-
-define <4 x i16> @test_vmul_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmul_lane_s16_0:
-; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i16> %shuffle, %a
- ret <4 x i16> %mul
-}
-
-define <8 x i16> @test_vmulq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmulq_lane_s16_0:
-; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
- %mul = mul <8 x i16> %shuffle, %a
- ret <8 x i16> %mul
-}
-
-define <2 x i32> @test_vmul_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmul_lane_s32_0:
-; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %mul = mul <2 x i32> %shuffle, %a
- ret <2 x i32> %mul
-}
-
-define <4 x i32> @test_vmulq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmulq_lane_s32_0:
-; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i32> %shuffle, %a
- ret <4 x i32> %mul
-}
-
-define <4 x i16> @test_vmul_lane_u16_0(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmul_lane_u16_0:
-; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i16> %shuffle, %a
- ret <4 x i16> %mul
-}
-
-define <8 x i16> @test_vmulq_lane_u16_0(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmulq_lane_u16_0:
-; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
- %mul = mul <8 x i16> %shuffle, %a
- ret <8 x i16> %mul
-}
-
-define <2 x i32> @test_vmul_lane_u32_0(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmul_lane_u32_0:
-; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %mul = mul <2 x i32> %shuffle, %a
- ret <2 x i32> %mul
-}
-
-define <4 x i32> @test_vmulq_lane_u32_0(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmulq_lane_u32_0:
-; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i32> %shuffle, %a
- ret <4 x i32> %mul
-}
-
-define <4 x i16> @test_vmul_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmul_laneq_s16_0:
-; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i16> %shuffle, %a
- ret <4 x i16> %mul
-}
-
-define <8 x i16> @test_vmulq_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmulq_laneq_s16_0:
-; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
- %mul = mul <8 x i16> %shuffle, %a
- ret <8 x i16> %mul
-}
-
-define <2 x i32> @test_vmul_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmul_laneq_s32_0:
-; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %mul = mul <2 x i32> %shuffle, %a
- ret <2 x i32> %mul
-}
-
-define <4 x i32> @test_vmulq_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmulq_laneq_s32_0:
-; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i32> %shuffle, %a
- ret <4 x i32> %mul
-}
-
-define <4 x i16> @test_vmul_laneq_u16_0(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmul_laneq_u16_0:
-; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i16> %shuffle, %a
- ret <4 x i16> %mul
-}
-
-define <8 x i16> @test_vmulq_laneq_u16_0(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmulq_laneq_u16_0:
-; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
- %mul = mul <8 x i16> %shuffle, %a
- ret <8 x i16> %mul
-}
-
-define <2 x i32> @test_vmul_laneq_u32_0(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmul_laneq_u32_0:
-; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %mul = mul <2 x i32> %shuffle, %a
- ret <2 x i32> %mul
-}
-
-define <4 x i32> @test_vmulq_laneq_u32_0(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmulq_laneq_u32_0:
-; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
- %mul = mul <4 x i32> %shuffle, %a
- ret <4 x i32> %mul
-}
-
-define <2 x float> @test_vfma_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
-; CHECK: test_vfma_lane_f32_0:
-; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %lane = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
- ret <2 x float> %0
-}
-
-define <4 x float> @test_vfmaq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
-; CHECK: test_vfmaq_lane_f32_0:
-; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %lane = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
- ret <4 x float> %0
-}
-
-define <2 x float> @test_vfma_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
-; CHECK: test_vfma_laneq_f32_0:
-; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %lane = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
- ret <2 x float> %0
-}
-
-define <4 x float> @test_vfmaq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
-; CHECK: test_vfmaq_laneq_f32_0:
-; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %lane = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
- ret <4 x float> %0
-}
-
-define <2 x float> @test_vfms_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
-; CHECK: test_vfms_lane_f32_0:
-; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
- %lane = shufflevector <2 x float> %sub, <2 x float> undef, <2 x i32> zeroinitializer
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
- ret <2 x float> %0
-}
-
-define <4 x float> @test_vfmsq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
-; CHECK: test_vfmsq_lane_f32_0:
-; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
- %lane = shufflevector <2 x float> %sub, <2 x float> undef, <4 x i32> zeroinitializer
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
- ret <4 x float> %0
-}
-
-define <2 x float> @test_vfms_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
-; CHECK: test_vfms_laneq_f32_0:
-; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
- %lane = shufflevector <4 x float> %sub, <4 x float> undef, <2 x i32> zeroinitializer
- %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
- ret <2 x float> %0
-}
-
-define <4 x float> @test_vfmsq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
-; CHECK: test_vfmsq_laneq_f32_0:
-; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
- %lane = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> zeroinitializer
- %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
- ret <4 x float> %0
-}
-
-define <2 x double> @test_vfmaq_laneq_f64_0(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
-; CHECK: test_vfmaq_laneq_f64_0:
-; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %lane = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
- %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
- ret <2 x double> %0
-}
-
-define <2 x double> @test_vfmsq_laneq_f64_0(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
-; CHECK: test_vfmsq_laneq_f64_0:
-; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %v
- %lane = shufflevector <2 x double> %sub, <2 x double> undef, <2 x i32> zeroinitializer
- %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
- ret <2 x double> %0
-}
-
-define <4 x i32> @test_vmlal_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlal_lane_s16_0:
-; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlal_lane_s32_0:
-; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_laneq_s16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlal_laneq_s16_0:
-; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_laneq_s32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlal_laneq_s32_0:
-; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlal_high_lane_s16_0:
-; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlal_high_lane_s32_0:
-; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_high_laneq_s16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlal_high_laneq_s16_0:
-; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_high_laneq_s32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlal_high_laneq_s32_0:
-; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlsl_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsl_lane_s16_0:
-; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsl_lane_s32_0:
-; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_laneq_s16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsl_laneq_s16_0:
-; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_laneq_s32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsl_laneq_s32_0:
-; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsl_high_lane_s16_0:
-; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsl_high_lane_s32_0:
-; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_high_laneq_s16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsl_high_laneq_s16_0:
-; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_high_laneq_s32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsl_high_laneq_s32_0:
-; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlal_lane_u16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlal_lane_u16_0:
-; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_lane_u32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlal_lane_u32_0:
-; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_laneq_u16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlal_laneq_u16_0:
-; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_laneq_u32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlal_laneq_u32_0:
-; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_high_lane_u16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlal_high_lane_u16_0:
-; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_high_lane_u32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlal_high_lane_u32_0:
-; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlal_high_laneq_u16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlal_high_laneq_u16_0:
-; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %add = add <4 x i32> %vmull2.i, %a
- ret <4 x i32> %add
-}
-
-define <2 x i64> @test_vmlal_high_laneq_u32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlal_high_laneq_u32_0:
-; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %add = add <2 x i64> %vmull2.i, %a
- ret <2 x i64> %add
-}
-
-define <4 x i32> @test_vmlsl_lane_u16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsl_lane_u16_0:
-; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_lane_u32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsl_lane_u32_0:
-; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_laneq_u16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsl_laneq_u16_0:
-; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_laneq_u32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsl_laneq_u32_0:
-; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_high_lane_u16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vmlsl_high_lane_u16_0:
-; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_high_lane_u32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vmlsl_high_lane_u32_0:
-; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmlsl_high_laneq_u16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
-; CHECK: test_vmlsl_high_laneq_u16_0:
-; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %sub = sub <4 x i32> %a, %vmull2.i
- ret <4 x i32> %sub
-}
-
-define <2 x i64> @test_vmlsl_high_laneq_u32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
-; CHECK: test_vmlsl_high_laneq_u32_0:
-; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %sub = sub <2 x i64> %a, %vmull2.i
- ret <2 x i64> %sub
-}
-
-define <4 x i32> @test_vmull_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmull_lane_s16_0:
-; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmull_lane_s32_0:
-; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_lane_u16_0(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmull_lane_u16_0:
-; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_lane_u32_0(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmull_lane_u32_0:
-; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_high_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmull_high_lane_s16_0:
-; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_high_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmull_high_lane_s32_0:
-; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_high_lane_u16_0(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vmull_high_lane_u16_0:
-; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_high_lane_u32_0(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vmull_high_lane_u32_0:
-; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmull_laneq_s16_0:
-; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmull_laneq_s32_0:
-; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_laneq_u16_0(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmull_laneq_u16_0:
-; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_laneq_u32_0(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmull_laneq_u32_0:
-; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_high_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmull_high_laneq_s16_0:
-; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_high_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmull_high_laneq_s32_0:
-; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vmull_high_laneq_u16_0(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vmull_high_laneq_u16_0:
-; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_high_laneq_u32_0(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vmull_high_laneq_u32_0:
-; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vmull2.i
-}
-
-define <4 x i32> @test_vqdmlal_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vqdmlal_lane_s16_0:
-; CHECK: qdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
- ret <4 x i32> %vqdmlal4.i
-}
-
-define <2 x i64> @test_vqdmlal_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vqdmlal_lane_s32_0:
-; CHECK: qdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
- ret <2 x i64> %vqdmlal4.i
-}
-
-define <4 x i32> @test_vqdmlal_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vqdmlal_high_lane_s16_0:
-; CHECK: qdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
- ret <4 x i32> %vqdmlal4.i
-}
-
-define <2 x i64> @test_vqdmlal_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vqdmlal_high_lane_s32_0:
-; CHECK: qdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
- ret <2 x i64> %vqdmlal4.i
-}
-
-define <4 x i32> @test_vqdmlsl_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vqdmlsl_lane_s16_0:
-; CHECK: qdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
- %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
- ret <4 x i32> %vqdmlsl4.i
-}
-
-define <2 x i64> @test_vqdmlsl_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vqdmlsl_lane_s32_0:
-; CHECK: qdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
- %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
- ret <2 x i64> %vqdmlsl4.i
-}
-
-define <4 x i32> @test_vqdmlsl_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
-; CHECK: test_vqdmlsl_high_lane_s16_0:
-; CHECK: qdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
- ret <4 x i32> %vqdmlsl4.i
-}
-
-define <2 x i64> @test_vqdmlsl_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
-; CHECK: test_vqdmlsl_high_lane_s32_0:
-; CHECK: qdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
- ret <2 x i64> %vqdmlsl4.i
-}
-
-define <4 x i32> @test_vqdmull_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqdmull_lane_s16_0:
-; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqdmull_lane_s32_0:
-; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i32> @test_vqdmull_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vqdmull_laneq_s16_0:
-; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vqdmull_laneq_s32_0:
-; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i32> @test_vqdmull_high_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqdmull_high_lane_s16_0:
-; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_high_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqdmull_high_lane_s32_0:
-; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i32> @test_vqdmull_high_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
-; CHECK: test_vqdmull_high_laneq_s16_0:
-; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_high_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
-; CHECK: test_vqdmull_high_laneq_s32_0:
-; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i16> @test_vqdmulh_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqdmulh_lane_s16_0:
-; CHECK: qdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vqdmulh2.i = tail call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i16> %vqdmulh2.i
-}
-
-define <8 x i16> @test_vqdmulhq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqdmulhq_lane_s16_0:
-; CHECK: qdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
- %vqdmulh2.i = tail call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
- ret <8 x i16> %vqdmulh2.i
-}
-
-define <2 x i32> @test_vqdmulh_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqdmulh_lane_s32_0:
-; CHECK: qdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vqdmulh2.i = tail call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i32> %vqdmulh2.i
-}
-
-define <4 x i32> @test_vqdmulhq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqdmulhq_lane_s32_0:
-; CHECK: qdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
- %vqdmulh2.i = tail call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
- ret <4 x i32> %vqdmulh2.i
-}
-
-define <4 x i16> @test_vqrdmulh_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqrdmulh_lane_s16_0:
-; CHECK: qrdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
- %vqrdmulh2.i = tail call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
- ret <4 x i16> %vqrdmulh2.i
-}
-
-define <8 x i16> @test_vqrdmulhq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
-; CHECK: test_vqrdmulhq_lane_s16_0:
-; CHECK: qrdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
-entry:
- %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
- %vqrdmulh2.i = tail call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
- ret <8 x i16> %vqrdmulh2.i
-}
-
-define <2 x i32> @test_vqrdmulh_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqrdmulh_lane_s32_0:
-; CHECK: qrdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
- %vqrdmulh2.i = tail call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
- ret <2 x i32> %vqrdmulh2.i
-}
-
-define <4 x i32> @test_vqrdmulhq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
-; CHECK: test_vqrdmulhq_lane_s32_0:
-; CHECK: qrdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
- %vqrdmulh2.i = tail call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
- ret <4 x i32> %vqrdmulh2.i
-}
-
-define <2 x float> @test_vmul_lane_f32_0(<2 x float> %a, <2 x float> %v) {
-; CHECK: test_vmul_lane_f32_0:
-; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
- %mul = fmul <2 x float> %shuffle, %a
- ret <2 x float> %mul
-}
-
-define <4 x float> @test_vmulq_lane_f32_0(<4 x float> %a, <2 x float> %v) {
-; CHECK: test_vmulq_lane_f32_0:
-; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
- %mul = fmul <4 x float> %shuffle, %a
- ret <4 x float> %mul
-}
-
-define <2 x float> @test_vmul_laneq_f32_0(<2 x float> %a, <4 x float> %v) {
-; CHECK: test_vmul_laneq_f32_0:
-; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
- %mul = fmul <2 x float> %shuffle, %a
- ret <2 x float> %mul
-}
-
-define <1 x double> @test_vmul_laneq_f64_0(<1 x double> %a, <2 x double> %v) {
-; CHECK: test_vmul_laneq_f64_0:
-; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
-entry:
- %0 = bitcast <1 x double> %a to <8 x i8>
- %1 = bitcast <8 x i8> %0 to double
- %extract = extractelement <2 x double> %v, i32 0
- %2 = fmul double %1, %extract
- %3 = insertelement <1 x double> undef, double %2, i32 0
- ret <1 x double> %3
-}
-
-define <4 x float> @test_vmulq_laneq_f32_0(<4 x float> %a, <4 x float> %v) {
-; CHECK: test_vmulq_laneq_f32_0:
-; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
- %mul = fmul <4 x float> %shuffle, %a
- ret <4 x float> %mul
-}
-
-define <2 x double> @test_vmulq_laneq_f64_0(<2 x double> %a, <2 x double> %v) {
-; CHECK: test_vmulq_laneq_f64_0:
-; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
- %mul = fmul <2 x double> %shuffle, %a
- ret <2 x double> %mul
-}
-
-define <2 x float> @test_vmulx_lane_f32_0(<2 x float> %a, <2 x float> %v) {
-; CHECK: test_vmulx_lane_f32_0:
-; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
- %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
- ret <2 x float> %vmulx2.i
-}
-
-define <4 x float> @test_vmulxq_lane_f32_0(<4 x float> %a, <2 x float> %v) {
-; CHECK: test_vmulxq_lane_f32_0:
-; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
- %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
- ret <4 x float> %vmulx2.i
-}
-
-define <2 x double> @test_vmulxq_lane_f64_0(<2 x double> %a, <1 x double> %v) {
-; CHECK: test_vmulxq_lane_f64_0:
-; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
- %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
- ret <2 x double> %vmulx2.i
-}
-
-define <2 x float> @test_vmulx_laneq_f32_0(<2 x float> %a, <4 x float> %v) {
-; CHECK: test_vmulx_laneq_f32_0:
-; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
- %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
- ret <2 x float> %vmulx2.i
-}
-
-define <4 x float> @test_vmulxq_laneq_f32_0(<4 x float> %a, <4 x float> %v) {
-; CHECK: test_vmulxq_laneq_f32_0:
-; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
-entry:
- %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
- %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
- ret <4 x float> %vmulx2.i
-}
-
-define <2 x double> @test_vmulxq_laneq_f64_0(<2 x double> %a, <2 x double> %v) {
-; CHECK: test_vmulxq_laneq_f64_0:
-; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
-entry:
- %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
- %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
- ret <2 x double> %vmulx2.i
-}
-
diff --git a/test/CodeGen/AArch64/neon-3vdiff.ll b/test/CodeGen/AArch64/neon-3vdiff.ll
deleted file mode 100644
index 171e2b2edad0..000000000000
--- a/test/CodeGen/AArch64/neon-3vdiff.ll
+++ /dev/null
@@ -1,1806 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>)
-
-declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
-
-declare <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16>, <4 x i16>)
-
-declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
-
-declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>)
-
-declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>)
-
-declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>)
-
-declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>)
-
-declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>)
-
-declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>)
-
-declare <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32>, <2 x i32>)
-
-declare <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16>, <4 x i16>)
-
-declare <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>)
-
-declare <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32>, <2 x i32>)
-
-declare <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16>, <4 x i16>)
-
-declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>)
-
-declare <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64>, <2 x i64>)
-
-declare <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32>, <4 x i32>)
-
-declare <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16>, <8 x i16>)
-
-declare <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64>, <2 x i64>)
-
-declare <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32>, <4 x i32>)
-
-declare <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_vaddl_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vaddl_s8:
-; CHECK: saddl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
- %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
- %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vaddl_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vaddl_s16:
-; CHECK: saddl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmovl.i.i = sext <4 x i16> %a to <4 x i32>
- %vmovl.i2.i = sext <4 x i16> %b to <4 x i32>
- %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vaddl_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vaddl_s32:
-; CHECK: saddl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmovl.i.i = sext <2 x i32> %a to <2 x i64>
- %vmovl.i2.i = sext <2 x i32> %b to <2 x i64>
- %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vaddl_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vaddl_u8:
-; CHECK: uaddl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
- %vmovl.i2.i = zext <8 x i8> %b to <8 x i16>
- %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vaddl_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vaddl_u16:
-; CHECK: uaddl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmovl.i.i = zext <4 x i16> %a to <4 x i32>
- %vmovl.i2.i = zext <4 x i16> %b to <4 x i32>
- %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vaddl_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vaddl_u32:
-; CHECK: uaddl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmovl.i.i = zext <2 x i32> %a to <2 x i64>
- %vmovl.i2.i = zext <2 x i32> %b to <2 x i64>
- %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vaddl_high_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vaddl_high_s8:
-; CHECK: saddl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
- %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %1 = sext <8 x i8> %shuffle.i.i2.i to <8 x i16>
- %add.i = add <8 x i16> %0, %1
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vaddl_high_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vaddl_high_s16:
-; CHECK: saddl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
- %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %1 = sext <4 x i16> %shuffle.i.i2.i to <4 x i32>
- %add.i = add <4 x i32> %0, %1
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vaddl_high_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vaddl_high_s32:
-; CHECK: saddl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
- %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %1 = sext <2 x i32> %shuffle.i.i2.i to <2 x i64>
- %add.i = add <2 x i64> %0, %1
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vaddl_high_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vaddl_high_u8:
-; CHECK: uaddl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
- %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %1 = zext <8 x i8> %shuffle.i.i2.i to <8 x i16>
- %add.i = add <8 x i16> %0, %1
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vaddl_high_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vaddl_high_u16:
-; CHECK: uaddl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
- %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %1 = zext <4 x i16> %shuffle.i.i2.i to <4 x i32>
- %add.i = add <4 x i32> %0, %1
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vaddl_high_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vaddl_high_u32:
-; CHECK: uaddl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
- %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %1 = zext <2 x i32> %shuffle.i.i2.i to <2 x i64>
- %add.i = add <2 x i64> %0, %1
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vaddw_s8(<8 x i16> %a, <8 x i8> %b) {
-; CHECK: test_vaddw_s8:
-; CHECK: saddw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
-entry:
- %vmovl.i.i = sext <8 x i8> %b to <8 x i16>
- %add.i = add <8 x i16> %vmovl.i.i, %a
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vaddw_s16(<4 x i32> %a, <4 x i16> %b) {
-; CHECK: test_vaddw_s16:
-; CHECK: saddw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
-entry:
- %vmovl.i.i = sext <4 x i16> %b to <4 x i32>
- %add.i = add <4 x i32> %vmovl.i.i, %a
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vaddw_s32(<2 x i64> %a, <2 x i32> %b) {
-; CHECK: test_vaddw_s32:
-; CHECK: saddw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
-entry:
- %vmovl.i.i = sext <2 x i32> %b to <2 x i64>
- %add.i = add <2 x i64> %vmovl.i.i, %a
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vaddw_u8(<8 x i16> %a, <8 x i8> %b) {
-; CHECK: test_vaddw_u8:
-; CHECK: uaddw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
-entry:
- %vmovl.i.i = zext <8 x i8> %b to <8 x i16>
- %add.i = add <8 x i16> %vmovl.i.i, %a
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vaddw_u16(<4 x i32> %a, <4 x i16> %b) {
-; CHECK: test_vaddw_u16:
-; CHECK: uaddw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
-entry:
- %vmovl.i.i = zext <4 x i16> %b to <4 x i32>
- %add.i = add <4 x i32> %vmovl.i.i, %a
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vaddw_u32(<2 x i64> %a, <2 x i32> %b) {
-; CHECK: test_vaddw_u32:
-; CHECK: uaddw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
-entry:
- %vmovl.i.i = zext <2 x i32> %b to <2 x i64>
- %add.i = add <2 x i64> %vmovl.i.i, %a
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vaddw_high_s8(<8 x i16> %a, <16 x i8> %b) {
-; CHECK: test_vaddw_high_s8:
-; CHECK: saddw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
- %add.i = add <8 x i16> %0, %a
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vaddw_high_s16(<4 x i32> %a, <8 x i16> %b) {
-; CHECK: test_vaddw_high_s16:
-; CHECK: saddw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
- %add.i = add <4 x i32> %0, %a
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vaddw_high_s32(<2 x i64> %a, <4 x i32> %b) {
-; CHECK: test_vaddw_high_s32:
-; CHECK: saddw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
- %add.i = add <2 x i64> %0, %a
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vaddw_high_u8(<8 x i16> %a, <16 x i8> %b) {
-; CHECK: test_vaddw_high_u8:
-; CHECK: uaddw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
- %add.i = add <8 x i16> %0, %a
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vaddw_high_u16(<4 x i32> %a, <8 x i16> %b) {
-; CHECK: test_vaddw_high_u16:
-; CHECK: uaddw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
- %add.i = add <4 x i32> %0, %a
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vaddw_high_u32(<2 x i64> %a, <4 x i32> %b) {
-; CHECK: test_vaddw_high_u32:
-; CHECK: uaddw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
- %add.i = add <2 x i64> %0, %a
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vsubl_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vsubl_s8:
-; CHECK: ssubl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
- %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
- %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i2.i
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vsubl_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vsubl_s16:
-; CHECK: ssubl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmovl.i.i = sext <4 x i16> %a to <4 x i32>
- %vmovl.i2.i = sext <4 x i16> %b to <4 x i32>
- %sub.i = sub <4 x i32> %vmovl.i.i, %vmovl.i2.i
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vsubl_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vsubl_s32:
-; CHECK: ssubl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmovl.i.i = sext <2 x i32> %a to <2 x i64>
- %vmovl.i2.i = sext <2 x i32> %b to <2 x i64>
- %sub.i = sub <2 x i64> %vmovl.i.i, %vmovl.i2.i
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vsubl_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vsubl_u8:
-; CHECK: usubl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
- %vmovl.i2.i = zext <8 x i8> %b to <8 x i16>
- %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i2.i
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vsubl_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vsubl_u16:
-; CHECK: usubl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmovl.i.i = zext <4 x i16> %a to <4 x i32>
- %vmovl.i2.i = zext <4 x i16> %b to <4 x i32>
- %sub.i = sub <4 x i32> %vmovl.i.i, %vmovl.i2.i
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vsubl_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vsubl_u32:
-; CHECK: usubl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmovl.i.i = zext <2 x i32> %a to <2 x i64>
- %vmovl.i2.i = zext <2 x i32> %b to <2 x i64>
- %sub.i = sub <2 x i64> %vmovl.i.i, %vmovl.i2.i
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vsubl_high_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vsubl_high_s8:
-; CHECK: ssubl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
- %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %1 = sext <8 x i8> %shuffle.i.i2.i to <8 x i16>
- %sub.i = sub <8 x i16> %0, %1
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vsubl_high_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsubl_high_s16:
-; CHECK: ssubl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
- %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %1 = sext <4 x i16> %shuffle.i.i2.i to <4 x i32>
- %sub.i = sub <4 x i32> %0, %1
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vsubl_high_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsubl_high_s32:
-; CHECK: ssubl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
- %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %1 = sext <2 x i32> %shuffle.i.i2.i to <2 x i64>
- %sub.i = sub <2 x i64> %0, %1
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vsubl_high_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vsubl_high_u8:
-; CHECK: usubl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
- %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %1 = zext <8 x i8> %shuffle.i.i2.i to <8 x i16>
- %sub.i = sub <8 x i16> %0, %1
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vsubl_high_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsubl_high_u16:
-; CHECK: usubl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
- %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %1 = zext <4 x i16> %shuffle.i.i2.i to <4 x i32>
- %sub.i = sub <4 x i32> %0, %1
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vsubl_high_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsubl_high_u32:
-; CHECK: usubl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
- %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %1 = zext <2 x i32> %shuffle.i.i2.i to <2 x i64>
- %sub.i = sub <2 x i64> %0, %1
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vsubw_s8(<8 x i16> %a, <8 x i8> %b) {
-; CHECK: test_vsubw_s8:
-; CHECK: ssubw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
-entry:
- %vmovl.i.i = sext <8 x i8> %b to <8 x i16>
- %sub.i = sub <8 x i16> %a, %vmovl.i.i
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vsubw_s16(<4 x i32> %a, <4 x i16> %b) {
-; CHECK: test_vsubw_s16:
-; CHECK: ssubw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
-entry:
- %vmovl.i.i = sext <4 x i16> %b to <4 x i32>
- %sub.i = sub <4 x i32> %a, %vmovl.i.i
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vsubw_s32(<2 x i64> %a, <2 x i32> %b) {
-; CHECK: test_vsubw_s32:
-; CHECK: ssubw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
-entry:
- %vmovl.i.i = sext <2 x i32> %b to <2 x i64>
- %sub.i = sub <2 x i64> %a, %vmovl.i.i
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vsubw_u8(<8 x i16> %a, <8 x i8> %b) {
-; CHECK: test_vsubw_u8:
-; CHECK: usubw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
-entry:
- %vmovl.i.i = zext <8 x i8> %b to <8 x i16>
- %sub.i = sub <8 x i16> %a, %vmovl.i.i
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vsubw_u16(<4 x i32> %a, <4 x i16> %b) {
-; CHECK: test_vsubw_u16:
-; CHECK: usubw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
-entry:
- %vmovl.i.i = zext <4 x i16> %b to <4 x i32>
- %sub.i = sub <4 x i32> %a, %vmovl.i.i
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vsubw_u32(<2 x i64> %a, <2 x i32> %b) {
-; CHECK: test_vsubw_u32:
-; CHECK: usubw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
-entry:
- %vmovl.i.i = zext <2 x i32> %b to <2 x i64>
- %sub.i = sub <2 x i64> %a, %vmovl.i.i
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vsubw_high_s8(<8 x i16> %a, <16 x i8> %b) {
-; CHECK: test_vsubw_high_s8:
-; CHECK: ssubw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
- %sub.i = sub <8 x i16> %a, %0
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vsubw_high_s16(<4 x i32> %a, <8 x i16> %b) {
-; CHECK: test_vsubw_high_s16:
-; CHECK: ssubw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
- %sub.i = sub <4 x i32> %a, %0
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vsubw_high_s32(<2 x i64> %a, <4 x i32> %b) {
-; CHECK: test_vsubw_high_s32:
-; CHECK: ssubw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
- %sub.i = sub <2 x i64> %a, %0
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vsubw_high_u8(<8 x i16> %a, <16 x i8> %b) {
-; CHECK: test_vsubw_high_u8:
-; CHECK: usubw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
- %sub.i = sub <8 x i16> %a, %0
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vsubw_high_u16(<4 x i32> %a, <8 x i16> %b) {
-; CHECK: test_vsubw_high_u16:
-; CHECK: usubw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
- %sub.i = sub <4 x i32> %a, %0
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vsubw_high_u32(<2 x i64> %a, <4 x i32> %b) {
-; CHECK: test_vsubw_high_u32:
-; CHECK: usubw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
- %sub.i = sub <2 x i64> %a, %0
- ret <2 x i64> %sub.i
-}
-
-define <8 x i8> @test_vaddhn_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vaddhn_s16:
-; CHECK: addhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vaddhn.i = add <8 x i16> %a, %b
- %vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- %vaddhn2.i = trunc <8 x i16> %vaddhn1.i to <8 x i8>
- ret <8 x i8> %vaddhn2.i
-}
-
-define <4 x i16> @test_vaddhn_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vaddhn_s32:
-; CHECK: addhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vaddhn.i = add <4 x i32> %a, %b
- %vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
- %vaddhn2.i = trunc <4 x i32> %vaddhn1.i to <4 x i16>
- ret <4 x i16> %vaddhn2.i
-}
-
-define <2 x i32> @test_vaddhn_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vaddhn_s64:
-; CHECK: addhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vaddhn.i = add <2 x i64> %a, %b
- %vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
- %vaddhn2.i = trunc <2 x i64> %vaddhn1.i to <2 x i32>
- ret <2 x i32> %vaddhn2.i
-}
-
-define <8 x i8> @test_vaddhn_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vaddhn_u16:
-; CHECK: addhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vaddhn.i = add <8 x i16> %a, %b
- %vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- %vaddhn2.i = trunc <8 x i16> %vaddhn1.i to <8 x i8>
- ret <8 x i8> %vaddhn2.i
-}
-
-define <4 x i16> @test_vaddhn_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vaddhn_u32:
-; CHECK: addhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vaddhn.i = add <4 x i32> %a, %b
- %vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
- %vaddhn2.i = trunc <4 x i32> %vaddhn1.i to <4 x i16>
- ret <4 x i16> %vaddhn2.i
-}
-
-define <2 x i32> @test_vaddhn_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vaddhn_u64:
-; CHECK: addhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vaddhn.i = add <2 x i64> %a, %b
- %vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
- %vaddhn2.i = trunc <2 x i64> %vaddhn1.i to <2 x i32>
- ret <2 x i32> %vaddhn2.i
-}
-
-define <16 x i8> @test_vaddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vaddhn_high_s16:
-; CHECK: addhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vaddhn.i.i = add <8 x i16> %a, %b
- %vaddhn1.i.i = lshr <8 x i16> %vaddhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- %vaddhn2.i.i = trunc <8 x i16> %vaddhn1.i.i to <8 x i8>
- %0 = bitcast <8 x i8> %r to <1 x i64>
- %1 = bitcast <8 x i8> %vaddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vaddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vaddhn_high_s32:
-; CHECK: addhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vaddhn.i.i = add <4 x i32> %a, %b
- %vaddhn1.i.i = lshr <4 x i32> %vaddhn.i.i, <i32 16, i32 16, i32 16, i32 16>
- %vaddhn2.i.i = trunc <4 x i32> %vaddhn1.i.i to <4 x i16>
- %0 = bitcast <4 x i16> %r to <1 x i64>
- %1 = bitcast <4 x i16> %vaddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
- ret <8 x i16> %2
-}
-
-define <4 x i32> @test_vaddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vaddhn_high_s64:
-; CHECK: addhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vaddhn.i.i = add <2 x i64> %a, %b
- %vaddhn1.i.i = lshr <2 x i64> %vaddhn.i.i, <i64 32, i64 32>
- %vaddhn2.i.i = trunc <2 x i64> %vaddhn1.i.i to <2 x i32>
- %0 = bitcast <2 x i32> %r to <1 x i64>
- %1 = bitcast <2 x i32> %vaddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
- ret <4 x i32> %2
-}
-
-define <16 x i8> @test_vaddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vaddhn_high_u16:
-; CHECK: addhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vaddhn.i.i = add <8 x i16> %a, %b
- %vaddhn1.i.i = lshr <8 x i16> %vaddhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- %vaddhn2.i.i = trunc <8 x i16> %vaddhn1.i.i to <8 x i8>
- %0 = bitcast <8 x i8> %r to <1 x i64>
- %1 = bitcast <8 x i8> %vaddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vaddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vaddhn_high_u32:
-; CHECK: addhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vaddhn.i.i = add <4 x i32> %a, %b
- %vaddhn1.i.i = lshr <4 x i32> %vaddhn.i.i, <i32 16, i32 16, i32 16, i32 16>
- %vaddhn2.i.i = trunc <4 x i32> %vaddhn1.i.i to <4 x i16>
- %0 = bitcast <4 x i16> %r to <1 x i64>
- %1 = bitcast <4 x i16> %vaddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
- ret <8 x i16> %2
-}
-
-define <4 x i32> @test_vaddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vaddhn_high_u64:
-; CHECK: addhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vaddhn.i.i = add <2 x i64> %a, %b
- %vaddhn1.i.i = lshr <2 x i64> %vaddhn.i.i, <i64 32, i64 32>
- %vaddhn2.i.i = trunc <2 x i64> %vaddhn1.i.i to <2 x i32>
- %0 = bitcast <2 x i32> %r to <1 x i64>
- %1 = bitcast <2 x i32> %vaddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
- ret <4 x i32> %2
-}
-
-define <8 x i8> @test_vraddhn_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vraddhn_s16:
-; CHECK: raddhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vraddhn2.i = tail call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
- ret <8 x i8> %vraddhn2.i
-}
-
-define <4 x i16> @test_vraddhn_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vraddhn_s32:
-; CHECK: raddhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vraddhn2.i = tail call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
- ret <4 x i16> %vraddhn2.i
-}
-
-define <2 x i32> @test_vraddhn_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vraddhn_s64:
-; CHECK: raddhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vraddhn2.i = tail call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
- ret <2 x i32> %vraddhn2.i
-}
-
-define <8 x i8> @test_vraddhn_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vraddhn_u16:
-; CHECK: raddhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vraddhn2.i = tail call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
- ret <8 x i8> %vraddhn2.i
-}
-
-define <4 x i16> @test_vraddhn_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vraddhn_u32:
-; CHECK: raddhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vraddhn2.i = tail call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
- ret <4 x i16> %vraddhn2.i
-}
-
-define <2 x i32> @test_vraddhn_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vraddhn_u64:
-; CHECK: raddhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vraddhn2.i = tail call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
- ret <2 x i32> %vraddhn2.i
-}
-
-define <16 x i8> @test_vraddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vraddhn_high_s16:
-; CHECK: raddhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vraddhn2.i.i = tail call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
- %0 = bitcast <8 x i8> %r to <1 x i64>
- %1 = bitcast <8 x i8> %vraddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vraddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vraddhn_high_s32:
-; CHECK: raddhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vraddhn2.i.i = tail call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
- %0 = bitcast <4 x i16> %r to <1 x i64>
- %1 = bitcast <4 x i16> %vraddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
- ret <8 x i16> %2
-}
-
-define <4 x i32> @test_vraddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vraddhn_high_s64:
-; CHECK: raddhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vraddhn2.i.i = tail call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
- %0 = bitcast <2 x i32> %r to <1 x i64>
- %1 = bitcast <2 x i32> %vraddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
- ret <4 x i32> %2
-}
-
-define <16 x i8> @test_vraddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vraddhn_high_u16:
-; CHECK: raddhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vraddhn2.i.i = tail call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
- %0 = bitcast <8 x i8> %r to <1 x i64>
- %1 = bitcast <8 x i8> %vraddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vraddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vraddhn_high_u32:
-; CHECK: raddhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vraddhn2.i.i = tail call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
- %0 = bitcast <4 x i16> %r to <1 x i64>
- %1 = bitcast <4 x i16> %vraddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
- ret <8 x i16> %2
-}
-
-define <4 x i32> @test_vraddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vraddhn_high_u64:
-; CHECK: raddhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vraddhn2.i.i = tail call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
- %0 = bitcast <2 x i32> %r to <1 x i64>
- %1 = bitcast <2 x i32> %vraddhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
- ret <4 x i32> %2
-}
-
-define <8 x i8> @test_vsubhn_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsubhn_s16:
-; CHECK: subhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vsubhn.i = sub <8 x i16> %a, %b
- %vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- %vsubhn2.i = trunc <8 x i16> %vsubhn1.i to <8 x i8>
- ret <8 x i8> %vsubhn2.i
-}
-
-define <4 x i16> @test_vsubhn_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsubhn_s32:
-; CHECK: subhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vsubhn.i = sub <4 x i32> %a, %b
- %vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
- %vsubhn2.i = trunc <4 x i32> %vsubhn1.i to <4 x i16>
- ret <4 x i16> %vsubhn2.i
-}
-
-define <2 x i32> @test_vsubhn_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vsubhn_s64:
-; CHECK: subhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vsubhn.i = sub <2 x i64> %a, %b
- %vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
- %vsubhn2.i = trunc <2 x i64> %vsubhn1.i to <2 x i32>
- ret <2 x i32> %vsubhn2.i
-}
-
-define <8 x i8> @test_vsubhn_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsubhn_u16:
-; CHECK: subhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vsubhn.i = sub <8 x i16> %a, %b
- %vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- %vsubhn2.i = trunc <8 x i16> %vsubhn1.i to <8 x i8>
- ret <8 x i8> %vsubhn2.i
-}
-
-define <4 x i16> @test_vsubhn_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsubhn_u32:
-; CHECK: subhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vsubhn.i = sub <4 x i32> %a, %b
- %vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
- %vsubhn2.i = trunc <4 x i32> %vsubhn1.i to <4 x i16>
- ret <4 x i16> %vsubhn2.i
-}
-
-define <2 x i32> @test_vsubhn_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vsubhn_u64:
-; CHECK: subhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vsubhn.i = sub <2 x i64> %a, %b
- %vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
- %vsubhn2.i = trunc <2 x i64> %vsubhn1.i to <2 x i32>
- ret <2 x i32> %vsubhn2.i
-}
-
-define <16 x i8> @test_vsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsubhn_high_s16:
-; CHECK: subhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vsubhn.i.i = sub <8 x i16> %a, %b
- %vsubhn1.i.i = lshr <8 x i16> %vsubhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- %vsubhn2.i.i = trunc <8 x i16> %vsubhn1.i.i to <8 x i8>
- %0 = bitcast <8 x i8> %r to <1 x i64>
- %1 = bitcast <8 x i8> %vsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsubhn_high_s32:
-; CHECK: subhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vsubhn.i.i = sub <4 x i32> %a, %b
- %vsubhn1.i.i = lshr <4 x i32> %vsubhn.i.i, <i32 16, i32 16, i32 16, i32 16>
- %vsubhn2.i.i = trunc <4 x i32> %vsubhn1.i.i to <4 x i16>
- %0 = bitcast <4 x i16> %r to <1 x i64>
- %1 = bitcast <4 x i16> %vsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
- ret <8 x i16> %2
-}
-
-define <4 x i32> @test_vsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vsubhn_high_s64:
-; CHECK: subhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vsubhn.i.i = sub <2 x i64> %a, %b
- %vsubhn1.i.i = lshr <2 x i64> %vsubhn.i.i, <i64 32, i64 32>
- %vsubhn2.i.i = trunc <2 x i64> %vsubhn1.i.i to <2 x i32>
- %0 = bitcast <2 x i32> %r to <1 x i64>
- %1 = bitcast <2 x i32> %vsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
- ret <4 x i32> %2
-}
-
-define <16 x i8> @test_vsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsubhn_high_u16:
-; CHECK: subhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vsubhn.i.i = sub <8 x i16> %a, %b
- %vsubhn1.i.i = lshr <8 x i16> %vsubhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- %vsubhn2.i.i = trunc <8 x i16> %vsubhn1.i.i to <8 x i8>
- %0 = bitcast <8 x i8> %r to <1 x i64>
- %1 = bitcast <8 x i8> %vsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsubhn_high_u32:
-; CHECK: subhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vsubhn.i.i = sub <4 x i32> %a, %b
- %vsubhn1.i.i = lshr <4 x i32> %vsubhn.i.i, <i32 16, i32 16, i32 16, i32 16>
- %vsubhn2.i.i = trunc <4 x i32> %vsubhn1.i.i to <4 x i16>
- %0 = bitcast <4 x i16> %r to <1 x i64>
- %1 = bitcast <4 x i16> %vsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
- ret <8 x i16> %2
-}
-
-define <4 x i32> @test_vsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vsubhn_high_u64:
-; CHECK: subhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vsubhn.i.i = sub <2 x i64> %a, %b
- %vsubhn1.i.i = lshr <2 x i64> %vsubhn.i.i, <i64 32, i64 32>
- %vsubhn2.i.i = trunc <2 x i64> %vsubhn1.i.i to <2 x i32>
- %0 = bitcast <2 x i32> %r to <1 x i64>
- %1 = bitcast <2 x i32> %vsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
- ret <4 x i32> %2
-}
-
-define <8 x i8> @test_vrsubhn_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vrsubhn_s16:
-; CHECK: rsubhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vrsubhn2.i = tail call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
- ret <8 x i8> %vrsubhn2.i
-}
-
-define <4 x i16> @test_vrsubhn_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vrsubhn_s32:
-; CHECK: rsubhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vrsubhn2.i = tail call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
- ret <4 x i16> %vrsubhn2.i
-}
-
-define <2 x i32> @test_vrsubhn_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vrsubhn_s64:
-; CHECK: rsubhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vrsubhn2.i = tail call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
- ret <2 x i32> %vrsubhn2.i
-}
-
-define <8 x i8> @test_vrsubhn_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vrsubhn_u16:
-; CHECK: rsubhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vrsubhn2.i = tail call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
- ret <8 x i8> %vrsubhn2.i
-}
-
-define <4 x i16> @test_vrsubhn_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vrsubhn_u32:
-; CHECK: rsubhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vrsubhn2.i = tail call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
- ret <4 x i16> %vrsubhn2.i
-}
-
-define <2 x i32> @test_vrsubhn_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vrsubhn_u64:
-; CHECK: rsubhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vrsubhn2.i = tail call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
- ret <2 x i32> %vrsubhn2.i
-}
-
-define <16 x i8> @test_vrsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vrsubhn_high_s16:
-; CHECK: rsubhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vrsubhn2.i.i = tail call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
- %0 = bitcast <8 x i8> %r to <1 x i64>
- %1 = bitcast <8 x i8> %vrsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vrsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vrsubhn_high_s32:
-; CHECK: rsubhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vrsubhn2.i.i = tail call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
- %0 = bitcast <4 x i16> %r to <1 x i64>
- %1 = bitcast <4 x i16> %vrsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
- ret <8 x i16> %2
-}
-
-define <4 x i32> @test_vrsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vrsubhn_high_s64:
-; CHECK: rsubhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vrsubhn2.i.i = tail call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
- %0 = bitcast <2 x i32> %r to <1 x i64>
- %1 = bitcast <2 x i32> %vrsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
- ret <4 x i32> %2
-}
-
-define <16 x i8> @test_vrsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vrsubhn_high_u16:
-; CHECK: rsubhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %vrsubhn2.i.i = tail call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
- %0 = bitcast <8 x i8> %r to <1 x i64>
- %1 = bitcast <8 x i8> %vrsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vrsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vrsubhn_high_u32:
-; CHECK: rsubhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %vrsubhn2.i.i = tail call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
- %0 = bitcast <4 x i16> %r to <1 x i64>
- %1 = bitcast <4 x i16> %vrsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
- ret <8 x i16> %2
-}
-
-define <4 x i32> @test_vrsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vrsubhn_high_u64:
-; CHECK: rsubhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-entry:
- %vrsubhn2.i.i = tail call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
- %0 = bitcast <2 x i32> %r to <1 x i64>
- %1 = bitcast <2 x i32> %vrsubhn2.i.i to <1 x i64>
- %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
- %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
- ret <4 x i32> %2
-}
-
-define <8 x i16> @test_vabdl_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vabdl_s8:
-; CHECK: sabdl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vabd.i.i = tail call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %a, <8 x i8> %b)
- %vmovl.i.i = zext <8 x i8> %vabd.i.i to <8 x i16>
- ret <8 x i16> %vmovl.i.i
-}
-
-define <4 x i32> @test_vabdl_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vabdl_s16:
-; CHECK: sabdl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vabd2.i.i = tail call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %a, <4 x i16> %b)
- %vmovl.i.i = zext <4 x i16> %vabd2.i.i to <4 x i32>
- ret <4 x i32> %vmovl.i.i
-}
-
-define <2 x i64> @test_vabdl_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vabdl_s32:
-; CHECK: sabdl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vabd2.i.i = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %a, <2 x i32> %b)
- %vmovl.i.i = zext <2 x i32> %vabd2.i.i to <2 x i64>
- ret <2 x i64> %vmovl.i.i
-}
-
-define <8 x i16> @test_vabdl_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vabdl_u8:
-; CHECK: uabdl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vabd.i.i = tail call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %a, <8 x i8> %b)
- %vmovl.i.i = zext <8 x i8> %vabd.i.i to <8 x i16>
- ret <8 x i16> %vmovl.i.i
-}
-
-define <4 x i32> @test_vabdl_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vabdl_u16:
-; CHECK: uabdl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vabd2.i.i = tail call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %a, <4 x i16> %b)
- %vmovl.i.i = zext <4 x i16> %vabd2.i.i to <4 x i32>
- ret <4 x i32> %vmovl.i.i
-}
-
-define <2 x i64> @test_vabdl_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vabdl_u32:
-; CHECK: uabdl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vabd2.i.i = tail call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %a, <2 x i32> %b)
- %vmovl.i.i = zext <2 x i32> %vabd2.i.i to <2 x i64>
- ret <2 x i64> %vmovl.i.i
-}
-
-define <8 x i16> @test_vabal_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vabal_s8:
-; CHECK: sabal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vabd.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %b, <8 x i8> %c)
- %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
- %add.i = add <8 x i16> %vmovl.i.i.i, %a
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vabal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
-; CHECK: test_vabal_s16:
-; CHECK: sabal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vabd2.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %b, <4 x i16> %c)
- %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
- %add.i = add <4 x i32> %vmovl.i.i.i, %a
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vabal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
-; CHECK: test_vabal_s32:
-; CHECK: sabal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vabd2.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %b, <2 x i32> %c)
- %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
- %add.i = add <2 x i64> %vmovl.i.i.i, %a
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vabal_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vabal_u8:
-; CHECK: uabal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vabd.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %b, <8 x i8> %c)
- %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
- %add.i = add <8 x i16> %vmovl.i.i.i, %a
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vabal_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
-; CHECK: test_vabal_u16:
-; CHECK: uabal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vabd2.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %b, <4 x i16> %c)
- %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
- %add.i = add <4 x i32> %vmovl.i.i.i, %a
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vabal_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
-; CHECK: test_vabal_u32:
-; CHECK: uabal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vabd2.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %b, <2 x i32> %c)
- %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
- %add.i = add <2 x i64> %vmovl.i.i.i, %a
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vabdl_high_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vabdl_high_s8:
-; CHECK: sabdl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vabd.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
- ret <8 x i16> %vmovl.i.i.i
-}
-
-define <4 x i32> @test_vabdl_high_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vabdl_high_s16:
-; CHECK: sabdl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vabd2.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
- ret <4 x i32> %vmovl.i.i.i
-}
-
-define <2 x i64> @test_vabdl_high_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vabdl_high_s32:
-; CHECK: sabdl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vabd2.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
- ret <2 x i64> %vmovl.i.i.i
-}
-
-define <8 x i16> @test_vabdl_high_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vabdl_high_u8:
-; CHECK: uabdl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vabd.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
- ret <8 x i16> %vmovl.i.i.i
-}
-
-define <4 x i32> @test_vabdl_high_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vabdl_high_u16:
-; CHECK: uabdl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vabd2.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
- ret <4 x i32> %vmovl.i.i.i
-}
-
-define <2 x i64> @test_vabdl_high_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vabdl_high_u32:
-; CHECK: uabdl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vabd2.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
- ret <2 x i64> %vmovl.i.i.i
-}
-
-define <8 x i16> @test_vabal_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vabal_high_s8:
-; CHECK: sabal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vabd.i.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- %vmovl.i.i.i.i = zext <8 x i8> %vabd.i.i.i.i to <8 x i16>
- %add.i.i = add <8 x i16> %vmovl.i.i.i.i, %a
- ret <8 x i16> %add.i.i
-}
-
-define <4 x i32> @test_vabal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
-; CHECK: test_vabal_high_s16:
-; CHECK: sabal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vabd2.i.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %vmovl.i.i.i.i = zext <4 x i16> %vabd2.i.i.i.i to <4 x i32>
- %add.i.i = add <4 x i32> %vmovl.i.i.i.i, %a
- ret <4 x i32> %add.i.i
-}
-
-define <2 x i64> @test_vabal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
-; CHECK: test_vabal_high_s32:
-; CHECK: sabal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vabd2.i.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %vmovl.i.i.i.i = zext <2 x i32> %vabd2.i.i.i.i to <2 x i64>
- %add.i.i = add <2 x i64> %vmovl.i.i.i.i, %a
- ret <2 x i64> %add.i.i
-}
-
-define <8 x i16> @test_vabal_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vabal_high_u8:
-; CHECK: uabal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vabd.i.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- %vmovl.i.i.i.i = zext <8 x i8> %vabd.i.i.i.i to <8 x i16>
- %add.i.i = add <8 x i16> %vmovl.i.i.i.i, %a
- ret <8 x i16> %add.i.i
-}
-
-define <4 x i32> @test_vabal_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
-; CHECK: test_vabal_high_u16:
-; CHECK: uabal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vabd2.i.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %vmovl.i.i.i.i = zext <4 x i16> %vabd2.i.i.i.i to <4 x i32>
- %add.i.i = add <4 x i32> %vmovl.i.i.i.i, %a
- ret <4 x i32> %add.i.i
-}
-
-define <2 x i64> @test_vabal_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
-; CHECK: test_vabal_high_u32:
-; CHECK: uabal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vabd2.i.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %vmovl.i.i.i.i = zext <2 x i32> %vabd2.i.i.i.i to <2 x i64>
- %add.i.i = add <2 x i64> %vmovl.i.i.i.i, %a
- ret <2 x i64> %add.i.i
-}
-
-define <8 x i16> @test_vmull_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vmull_s8:
-; CHECK: smull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %a, <8 x i8> %b)
- ret <8 x i16> %vmull.i
-}
-
-define <4 x i32> @test_vmull_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vmull_s16:
-; CHECK: smull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %b)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vmull_s32:
-; CHECK: smull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %b)
- ret <2 x i64> %vmull2.i
-}
-
-define <8 x i16> @test_vmull_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vmull_u8:
-; CHECK: umull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %a, <8 x i8> %b)
- ret <8 x i16> %vmull.i
-}
-
-define <4 x i32> @test_vmull_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vmull_u16:
-; CHECK: umull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %b)
- ret <4 x i32> %vmull2.i
-}
-
-define <2 x i64> @test_vmull_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vmull_u32:
-; CHECK: umull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %b)
- ret <2 x i64> %vmull2.i
-}
-
-define <8 x i16> @test_vmull_high_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vmull_high_s8:
-; CHECK: smull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- ret <8 x i16> %vmull.i.i
-}
-
-define <4 x i32> @test_vmull_high_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vmull_high_s16:
-; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- ret <4 x i32> %vmull2.i.i
-}
-
-define <2 x i64> @test_vmull_high_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vmull_high_s32:
-; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- ret <2 x i64> %vmull2.i.i
-}
-
-define <8 x i16> @test_vmull_high_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vmull_high_u8:
-; CHECK: umull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- ret <8 x i16> %vmull.i.i
-}
-
-define <4 x i32> @test_vmull_high_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vmull_high_u16:
-; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- ret <4 x i32> %vmull2.i.i
-}
-
-define <2 x i64> @test_vmull_high_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vmull_high_u32:
-; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- ret <2 x i64> %vmull2.i.i
-}
-
-define <8 x i16> @test_vmlal_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vmlal_s8:
-; CHECK: smlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %b, <8 x i8> %c)
- %add.i = add <8 x i16> %vmull.i.i, %a
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vmlal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
-; CHECK: test_vmlal_s16:
-; CHECK: smlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %c)
- %add.i = add <4 x i32> %vmull2.i.i, %a
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vmlal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
-; CHECK: test_vmlal_s32:
-; CHECK: smlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %c)
- %add.i = add <2 x i64> %vmull2.i.i, %a
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vmlal_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vmlal_u8:
-; CHECK: umlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %b, <8 x i8> %c)
- %add.i = add <8 x i16> %vmull.i.i, %a
- ret <8 x i16> %add.i
-}
-
-define <4 x i32> @test_vmlal_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
-; CHECK: test_vmlal_u16:
-; CHECK: umlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %c)
- %add.i = add <4 x i32> %vmull2.i.i, %a
- ret <4 x i32> %add.i
-}
-
-define <2 x i64> @test_vmlal_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
-; CHECK: test_vmlal_u32:
-; CHECK: umlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %c)
- %add.i = add <2 x i64> %vmull2.i.i, %a
- ret <2 x i64> %add.i
-}
-
-define <8 x i16> @test_vmlal_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vmlal_high_s8:
-; CHECK: smlal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vmull.i.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- %add.i.i = add <8 x i16> %vmull.i.i.i, %a
- ret <8 x i16> %add.i.i
-}
-
-define <4 x i32> @test_vmlal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
-; CHECK: test_vmlal_high_s16:
-; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
- ret <4 x i32> %add.i.i
-}
-
-define <2 x i64> @test_vmlal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
-; CHECK: test_vmlal_high_s32:
-; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
- ret <2 x i64> %add.i.i
-}
-
-define <8 x i16> @test_vmlal_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vmlal_high_u8:
-; CHECK: umlal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vmull.i.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- %add.i.i = add <8 x i16> %vmull.i.i.i, %a
- ret <8 x i16> %add.i.i
-}
-
-define <4 x i32> @test_vmlal_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
-; CHECK: test_vmlal_high_u16:
-; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
- ret <4 x i32> %add.i.i
-}
-
-define <2 x i64> @test_vmlal_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
-; CHECK: test_vmlal_high_u32:
-; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
- ret <2 x i64> %add.i.i
-}
-
-define <8 x i16> @test_vmlsl_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vmlsl_s8:
-; CHECK: smlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %b, <8 x i8> %c)
- %sub.i = sub <8 x i16> %a, %vmull.i.i
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vmlsl_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
-; CHECK: test_vmlsl_s16:
-; CHECK: smlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %c)
- %sub.i = sub <4 x i32> %a, %vmull2.i.i
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vmlsl_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
-; CHECK: test_vmlsl_s32:
-; CHECK: smlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %c)
- %sub.i = sub <2 x i64> %a, %vmull2.i.i
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vmlsl_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vmlsl_u8:
-; CHECK: umlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %b, <8 x i8> %c)
- %sub.i = sub <8 x i16> %a, %vmull.i.i
- ret <8 x i16> %sub.i
-}
-
-define <4 x i32> @test_vmlsl_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
-; CHECK: test_vmlsl_u16:
-; CHECK: umlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %c)
- %sub.i = sub <4 x i32> %a, %vmull2.i.i
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vmlsl_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
-; CHECK: test_vmlsl_u32:
-; CHECK: umlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %c)
- %sub.i = sub <2 x i64> %a, %vmull2.i.i
- ret <2 x i64> %sub.i
-}
-
-define <8 x i16> @test_vmlsl_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vmlsl_high_s8:
-; CHECK: smlsl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vmull.i.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- %sub.i.i = sub <8 x i16> %a, %vmull.i.i.i
- ret <8 x i16> %sub.i.i
-}
-
-define <4 x i32> @test_vmlsl_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
-; CHECK: test_vmlsl_high_s16:
-; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
- ret <4 x i32> %sub.i.i
-}
-
-define <2 x i64> @test_vmlsl_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
-; CHECK: test_vmlsl_high_s32:
-; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
- ret <2 x i64> %sub.i.i
-}
-
-define <8 x i16> @test_vmlsl_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vmlsl_high_u8:
-; CHECK: umlsl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vmull.i.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- %sub.i.i = sub <8 x i16> %a, %vmull.i.i.i
- ret <8 x i16> %sub.i.i
-}
-
-define <4 x i32> @test_vmlsl_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
-; CHECK: test_vmlsl_high_u16:
-; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
- ret <4 x i32> %sub.i.i
-}
-
-define <2 x i64> @test_vmlsl_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
-; CHECK: test_vmlsl_high_u32:
-; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
- ret <2 x i64> %sub.i.i
-}
-
-define <4 x i32> @test_vqdmull_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vqdmull_s16:
-; CHECK: sqdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %b)
- ret <4 x i32> %vqdmull2.i
-}
-
-define <2 x i64> @test_vqdmull_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vqdmull_s32:
-; CHECK: sqdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %b)
- ret <2 x i64> %vqdmull2.i
-}
-
-define <4 x i32> @test_vqdmlal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
-; CHECK: test_vqdmlal_s16:
-; CHECK: sqdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %c)
- %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
- ret <4 x i32> %vqdmlal4.i
-}
-
-define <2 x i64> @test_vqdmlal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
-; CHECK: test_vqdmlal_s32:
-; CHECK: sqdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %c)
- %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
- ret <2 x i64> %vqdmlal4.i
-}
-
-define <4 x i32> @test_vqdmlsl_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
-; CHECK: test_vqdmlsl_s16:
-; CHECK: sqdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-entry:
- %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %c)
- %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
- ret <4 x i32> %vqdmlsl4.i
-}
-
-define <2 x i64> @test_vqdmlsl_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
-; CHECK: test_vqdmlsl_s32:
-; CHECK: sqdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-entry:
- %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %c)
- %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
- ret <2 x i64> %vqdmlsl4.i
-}
-
-define <4 x i32> @test_vqdmull_high_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vqdmull_high_s16:
-; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vqdmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- ret <4 x i32> %vqdmull2.i.i
-}
-
-define <2 x i64> @test_vqdmull_high_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vqdmull_high_s32:
-; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vqdmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- ret <2 x i64> %vqdmull2.i.i
-}
-
-define <4 x i32> @test_vqdmlal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
-; CHECK: test_vqdmlal_high_s16:
-; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vqdmlal2.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %vqdmlal4.i.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i.i)
- ret <4 x i32> %vqdmlal4.i.i
-}
-
-define <2 x i64> @test_vqdmlal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
-; CHECK: test_vqdmlal_high_s32:
-; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vqdmlal2.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %vqdmlal4.i.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i.i)
- ret <2 x i64> %vqdmlal4.i.i
-}
-
-define <4 x i32> @test_vqdmlsl_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
-; CHECK: test_vqdmlsl_high_s16:
-; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-entry:
- %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vqdmlsl2.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
- %vqdmlsl4.i.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i.i)
- ret <4 x i32> %vqdmlsl4.i.i
-}
-
-define <2 x i64> @test_vqdmlsl_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
-; CHECK: test_vqdmlsl_high_s32:
-; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %vqdmlsl2.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
- %vqdmlsl4.i.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i.i)
- ret <2 x i64> %vqdmlsl4.i.i
-}
-
-define <8 x i16> @test_vmull_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vmull_p8:
-; CHECK: pmull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %a, <8 x i8> %b)
- ret <8 x i16> %vmull.i
-}
-
-define <8 x i16> @test_vmull_high_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vmull_high_p8:
-; CHECK: pmull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
- ret <8 x i16> %vmull.i.i
-}
-
diff --git a/test/CodeGen/AArch64/neon-aba-abd.ll b/test/CodeGen/AArch64/neon-aba-abd.ll
deleted file mode 100644
index 54009849ef60..000000000000
--- a/test/CodeGen/AArch64/neon-aba-abd.ll
+++ /dev/null
@@ -1,236 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_uabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uabd_v8i8:
- %abd = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: uabd v0.8b, v0.8b, v1.8b
- ret <8 x i8> %abd
-}
-
-define <8 x i8> @test_uaba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uaba_v8i8:
- %abd = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
- %aba = add <8 x i8> %lhs, %abd
-; CHECK: uaba v0.8b, v0.8b, v1.8b
- ret <8 x i8> %aba
-}
-
-define <8 x i8> @test_sabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_sabd_v8i8:
- %abd = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: sabd v0.8b, v0.8b, v1.8b
- ret <8 x i8> %abd
-}
-
-define <8 x i8> @test_saba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_saba_v8i8:
- %abd = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
- %aba = add <8 x i8> %lhs, %abd
-; CHECK: saba v0.8b, v0.8b, v1.8b
- ret <8 x i8> %aba
-}
-
-declare <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_uabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uabd_v16i8:
- %abd = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: uabd v0.16b, v0.16b, v1.16b
- ret <16 x i8> %abd
-}
-
-define <16 x i8> @test_uaba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uaba_v16i8:
- %abd = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
- %aba = add <16 x i8> %lhs, %abd
-; CHECK: uaba v0.16b, v0.16b, v1.16b
- ret <16 x i8> %aba
-}
-
-define <16 x i8> @test_sabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_sabd_v16i8:
- %abd = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: sabd v0.16b, v0.16b, v1.16b
- ret <16 x i8> %abd
-}
-
-define <16 x i8> @test_saba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_saba_v16i8:
- %abd = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
- %aba = add <16 x i8> %lhs, %abd
-; CHECK: saba v0.16b, v0.16b, v1.16b
- ret <16 x i8> %aba
-}
-
-declare <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_uabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uabd_v4i16:
- %abd = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: uabd v0.4h, v0.4h, v1.4h
- ret <4 x i16> %abd
-}
-
-define <4 x i16> @test_uaba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uaba_v4i16:
- %abd = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
- %aba = add <4 x i16> %lhs, %abd
-; CHECK: uaba v0.4h, v0.4h, v1.4h
- ret <4 x i16> %aba
-}
-
-define <4 x i16> @test_sabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sabd_v4i16:
- %abd = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sabd v0.4h, v0.4h, v1.4h
- ret <4 x i16> %abd
-}
-
-define <4 x i16> @test_saba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_saba_v4i16:
- %abd = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
- %aba = add <4 x i16> %lhs, %abd
-; CHECK: saba v0.4h, v0.4h, v1.4h
- ret <4 x i16> %aba
-}
-
-declare <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_uabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uabd_v8i16:
- %abd = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: uabd v0.8h, v0.8h, v1.8h
- ret <8 x i16> %abd
-}
-
-define <8 x i16> @test_uaba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uaba_v8i16:
- %abd = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
- %aba = add <8 x i16> %lhs, %abd
-; CHECK: uaba v0.8h, v0.8h, v1.8h
- ret <8 x i16> %aba
-}
-
-define <8 x i16> @test_sabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sabd_v8i16:
- %abd = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sabd v0.8h, v0.8h, v1.8h
- ret <8 x i16> %abd
-}
-
-define <8 x i16> @test_saba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_saba_v8i16:
- %abd = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
- %aba = add <8 x i16> %lhs, %abd
-; CHECK: saba v0.8h, v0.8h, v1.8h
- ret <8 x i16> %aba
-}
-
-declare <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_uabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uabd_v2i32:
- %abd = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: uabd v0.2s, v0.2s, v1.2s
- ret <2 x i32> %abd
-}
-
-define <2 x i32> @test_uaba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uaba_v2i32:
- %abd = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
- %aba = add <2 x i32> %lhs, %abd
-; CHECK: uaba v0.2s, v0.2s, v1.2s
- ret <2 x i32> %aba
-}
-
-define <2 x i32> @test_sabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sabd_v2i32:
- %abd = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sabd v0.2s, v0.2s, v1.2s
- ret <2 x i32> %abd
-}
-
-define <2 x i32> @test_sabd_v2i32_const() {
-; CHECK: test_sabd_v2i32_const:
-; CHECK: movi d1, #0xffffffff0000
-; CHECK-NEXT: sabd v0.2s, v0.2s, v1.2s
- %1 = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(
- <2 x i32> <i32 -2147483648, i32 2147450880>,
- <2 x i32> <i32 -65536, i32 65535>)
- ret <2 x i32> %1
-}
-
-define <2 x i32> @test_saba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_saba_v2i32:
- %abd = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
- %aba = add <2 x i32> %lhs, %abd
-; CHECK: saba v0.2s, v0.2s, v1.2s
- ret <2 x i32> %aba
-}
-
-declare <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_uabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uabd_v4i32:
- %abd = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: uabd v0.4s, v0.4s, v1.4s
- ret <4 x i32> %abd
-}
-
-define <4 x i32> @test_uaba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uaba_v4i32:
- %abd = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
- %aba = add <4 x i32> %lhs, %abd
-; CHECK: uaba v0.4s, v0.4s, v1.4s
- ret <4 x i32> %aba
-}
-
-define <4 x i32> @test_sabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sabd_v4i32:
- %abd = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sabd v0.4s, v0.4s, v1.4s
- ret <4 x i32> %abd
-}
-
-define <4 x i32> @test_saba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_saba_v4i32:
- %abd = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
- %aba = add <4 x i32> %lhs, %abd
-; CHECK: saba v0.4s, v0.4s, v1.4s
- ret <4 x i32> %aba
-}
-
-declare <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float>, <2 x float>)
-
-define <2 x float> @test_fabd_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fabd_v2f32:
- %abd = call <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fabd v0.2s, v0.2s, v1.2s
- ret <2 x float> %abd
-}
-
-declare <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float>, <4 x float>)
-
-define <4 x float> @test_fabd_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fabd_v4f32:
- %abd = call <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fabd v0.4s, v0.4s, v1.4s
- ret <4 x float> %abd
-}
-
-declare <2 x double> @llvm.arm.neon.vabds.v2f64(<2 x double>, <2 x double>)
-
-define <2 x double> @test_fabd_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fabd_v2f64:
- %abd = call <2 x double> @llvm.arm.neon.vabds.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fabd v0.2d, v0.2d, v1.2d
- ret <2 x double> %abd
-}
diff --git a/test/CodeGen/AArch64/neon-across.ll b/test/CodeGen/AArch64/neon-across.ll
deleted file mode 100644
index 733db970cf33..000000000000
--- a/test/CodeGen/AArch64/neon-across.ll
+++ /dev/null
@@ -1,476 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float>)
-
-declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float>)
-
-declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float>)
-
-declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float>)
-
-declare <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32>)
-
-declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8>)
-
-declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8>)
-
-declare <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32>)
-
-declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8>)
-
-declare <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32>)
-
-declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8>)
-
-declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8>)
-
-declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8>)
-
-declare <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32>)
-
-declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8>)
-
-declare <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32>)
-
-declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8>)
-
-declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8>)
-
-declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16>)
-
-declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8>)
-
-declare <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32>)
-
-declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16>)
-
-declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8>)
-
-declare <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32>)
-
-declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16>)
-
-declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8>)
-
-declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16>)
-
-declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8>)
-
-declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16>)
-
-declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8>)
-
-define i16 @test_vaddlv_s8(<8 x i8> %a) {
-; CHECK: test_vaddlv_s8:
-; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
-entry:
- %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8> %a)
- %0 = extractelement <1 x i16> %saddlv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vaddlv_s16(<4 x i16> %a) {
-; CHECK: test_vaddlv_s16:
-; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
-entry:
- %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16> %a)
- %0 = extractelement <1 x i32> %saddlv.i, i32 0
- ret i32 %0
-}
-
-define i16 @test_vaddlv_u8(<8 x i8> %a) {
-; CHECK: test_vaddlv_u8:
-; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
-entry:
- %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8> %a)
- %0 = extractelement <1 x i16> %uaddlv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vaddlv_u16(<4 x i16> %a) {
-; CHECK: test_vaddlv_u16:
-; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
-entry:
- %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16> %a)
- %0 = extractelement <1 x i32> %uaddlv.i, i32 0
- ret i32 %0
-}
-
-define i16 @test_vaddlvq_s8(<16 x i8> %a) {
-; CHECK: test_vaddlvq_s8:
-; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
-entry:
- %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8> %a)
- %0 = extractelement <1 x i16> %saddlv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vaddlvq_s16(<8 x i16> %a) {
-; CHECK: test_vaddlvq_s16:
-; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
-entry:
- %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16> %a)
- %0 = extractelement <1 x i32> %saddlv.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vaddlvq_s32(<4 x i32> %a) {
-; CHECK: test_vaddlvq_s32:
-; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %saddlv.i = tail call <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32> %a)
- %0 = extractelement <1 x i64> %saddlv.i, i32 0
- ret i64 %0
-}
-
-define i16 @test_vaddlvq_u8(<16 x i8> %a) {
-; CHECK: test_vaddlvq_u8:
-; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
-entry:
- %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8> %a)
- %0 = extractelement <1 x i16> %uaddlv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vaddlvq_u16(<8 x i16> %a) {
-; CHECK: test_vaddlvq_u16:
-; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
-entry:
- %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16> %a)
- %0 = extractelement <1 x i32> %uaddlv.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vaddlvq_u32(<4 x i32> %a) {
-; CHECK: test_vaddlvq_u32:
-; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %uaddlv.i = tail call <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32> %a)
- %0 = extractelement <1 x i64> %uaddlv.i, i32 0
- ret i64 %0
-}
-
-define i8 @test_vmaxv_s8(<8 x i8> %a) {
-; CHECK: test_vmaxv_s8:
-; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
-entry:
- %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8> %a)
- %0 = extractelement <1 x i8> %smaxv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vmaxv_s16(<4 x i16> %a) {
-; CHECK: test_vmaxv_s16:
-; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
-entry:
- %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16> %a)
- %0 = extractelement <1 x i16> %smaxv.i, i32 0
- ret i16 %0
-}
-
-define i8 @test_vmaxv_u8(<8 x i8> %a) {
-; CHECK: test_vmaxv_u8:
-; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
-entry:
- %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8> %a)
- %0 = extractelement <1 x i8> %umaxv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vmaxv_u16(<4 x i16> %a) {
-; CHECK: test_vmaxv_u16:
-; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
-entry:
- %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16> %a)
- %0 = extractelement <1 x i16> %umaxv.i, i32 0
- ret i16 %0
-}
-
-define i8 @test_vmaxvq_s8(<16 x i8> %a) {
-; CHECK: test_vmaxvq_s8:
-; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
-entry:
- %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8> %a)
- %0 = extractelement <1 x i8> %smaxv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vmaxvq_s16(<8 x i16> %a) {
-; CHECK: test_vmaxvq_s16:
-; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
-entry:
- %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16> %a)
- %0 = extractelement <1 x i16> %smaxv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vmaxvq_s32(<4 x i32> %a) {
-; CHECK: test_vmaxvq_s32:
-; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %smaxv.i = tail call <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32> %a)
- %0 = extractelement <1 x i32> %smaxv.i, i32 0
- ret i32 %0
-}
-
-define i8 @test_vmaxvq_u8(<16 x i8> %a) {
-; CHECK: test_vmaxvq_u8:
-; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
-entry:
- %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8> %a)
- %0 = extractelement <1 x i8> %umaxv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vmaxvq_u16(<8 x i16> %a) {
-; CHECK: test_vmaxvq_u16:
-; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
-entry:
- %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16> %a)
- %0 = extractelement <1 x i16> %umaxv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vmaxvq_u32(<4 x i32> %a) {
-; CHECK: test_vmaxvq_u32:
-; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %umaxv.i = tail call <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32> %a)
- %0 = extractelement <1 x i32> %umaxv.i, i32 0
- ret i32 %0
-}
-
-define i8 @test_vminv_s8(<8 x i8> %a) {
-; CHECK: test_vminv_s8:
-; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b
-entry:
- %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8> %a)
- %0 = extractelement <1 x i8> %sminv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vminv_s16(<4 x i16> %a) {
-; CHECK: test_vminv_s16:
-; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h
-entry:
- %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16> %a)
- %0 = extractelement <1 x i16> %sminv.i, i32 0
- ret i16 %0
-}
-
-define i8 @test_vminv_u8(<8 x i8> %a) {
-; CHECK: test_vminv_u8:
-; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b
-entry:
- %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8> %a)
- %0 = extractelement <1 x i8> %uminv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vminv_u16(<4 x i16> %a) {
-; CHECK: test_vminv_u16:
-; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h
-entry:
- %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16> %a)
- %0 = extractelement <1 x i16> %uminv.i, i32 0
- ret i16 %0
-}
-
-define i8 @test_vminvq_s8(<16 x i8> %a) {
-; CHECK: test_vminvq_s8:
-; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b
-entry:
- %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8> %a)
- %0 = extractelement <1 x i8> %sminv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vminvq_s16(<8 x i16> %a) {
-; CHECK: test_vminvq_s16:
-; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h
-entry:
- %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16> %a)
- %0 = extractelement <1 x i16> %sminv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vminvq_s32(<4 x i32> %a) {
-; CHECK: test_vminvq_s32:
-; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %sminv.i = tail call <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32> %a)
- %0 = extractelement <1 x i32> %sminv.i, i32 0
- ret i32 %0
-}
-
-define i8 @test_vminvq_u8(<16 x i8> %a) {
-; CHECK: test_vminvq_u8:
-; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b
-entry:
- %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8> %a)
- %0 = extractelement <1 x i8> %uminv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vminvq_u16(<8 x i16> %a) {
-; CHECK: test_vminvq_u16:
-; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h
-entry:
- %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16> %a)
- %0 = extractelement <1 x i16> %uminv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vminvq_u32(<4 x i32> %a) {
-; CHECK: test_vminvq_u32:
-; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %uminv.i = tail call <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32> %a)
- %0 = extractelement <1 x i32> %uminv.i, i32 0
- ret i32 %0
-}
-
-define i8 @test_vaddv_s8(<8 x i8> %a) {
-; CHECK: test_vaddv_s8:
-; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
-entry:
- %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
- %0 = extractelement <1 x i8> %vaddv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vaddv_s16(<4 x i16> %a) {
-; CHECK: test_vaddv_s16:
-; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
-entry:
- %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
- %0 = extractelement <1 x i16> %vaddv.i, i32 0
- ret i16 %0
-}
-
-define i8 @test_vaddv_u8(<8 x i8> %a) {
-; CHECK: test_vaddv_u8:
-; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
-entry:
- %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
- %0 = extractelement <1 x i8> %vaddv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vaddv_u16(<4 x i16> %a) {
-; CHECK: test_vaddv_u16:
-; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
-entry:
- %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
- %0 = extractelement <1 x i16> %vaddv.i, i32 0
- ret i16 %0
-}
-
-define i8 @test_vaddvq_s8(<16 x i8> %a) {
-; CHECK: test_vaddvq_s8:
-; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
-entry:
- %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
- %0 = extractelement <1 x i8> %vaddv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vaddvq_s16(<8 x i16> %a) {
-; CHECK: test_vaddvq_s16:
-; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
-entry:
- %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
- %0 = extractelement <1 x i16> %vaddv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vaddvq_s32(<4 x i32> %a) {
-; CHECK: test_vaddvq_s32:
-; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
- %0 = extractelement <1 x i32> %vaddv.i, i32 0
- ret i32 %0
-}
-
-define i8 @test_vaddvq_u8(<16 x i8> %a) {
-; CHECK: test_vaddvq_u8:
-; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
-entry:
- %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
- %0 = extractelement <1 x i8> %vaddv.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vaddvq_u16(<8 x i16> %a) {
-; CHECK: test_vaddvq_u16:
-; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
-entry:
- %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
- %0 = extractelement <1 x i16> %vaddv.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vaddvq_u32(<4 x i32> %a) {
-; CHECK: test_vaddvq_u32:
-; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
- %0 = extractelement <1 x i32> %vaddv.i, i32 0
- ret i32 %0
-}
-
-define float @test_vmaxvq_f32(<4 x float> %a) {
-; CHECK: test_vmaxvq_f32:
-; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %vmaxv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float> %a)
- %0 = extractelement <1 x float> %vmaxv.i, i32 0
- ret float %0
-}
-
-define float @test_vminvq_f32(<4 x float> %a) {
-; CHECK: test_vminvq_f32:
-; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %vminv.i = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float> %a)
- %0 = extractelement <1 x float> %vminv.i, i32 0
- ret float %0
-}
-
-define float @test_vmaxnmvq_f32(<4 x float> %a) {
-; CHECK: test_vmaxnmvq_f32:
-; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %vmaxnmv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float> %a)
- %0 = extractelement <1 x float> %vmaxnmv.i, i32 0
- ret float %0
-}
-
-define float @test_vminnmvq_f32(<4 x float> %a) {
-; CHECK: test_vminnmvq_f32:
-; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %vminnmv.i = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float> %a)
- %0 = extractelement <1 x float> %vminnmv.i, i32 0
- ret float %0
-}
-
diff --git a/test/CodeGen/AArch64/neon-add-pairwise.ll b/test/CodeGen/AArch64/neon-add-pairwise.ll
deleted file mode 100644
index 1abfed31908c..000000000000
--- a/test/CodeGen/AArch64/neon-add-pairwise.ll
+++ /dev/null
@@ -1,92 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_addp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: test_addp_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: addp v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vpadd.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_addp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_addp_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vpadd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: addp v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_addp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_addp_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: addp v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vpadd.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_addp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_addp_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vpadd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: addp v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_addp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_addp_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: addp v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vpadd.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_addp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_addp_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vpadd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: addp v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-
-declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
-
-define <2 x i64> @test_addp_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_addp_v2i64:
- %val = call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: addp v0.2d, v0.2d, v1.2d
- ret <2 x i64> %val
-}
-
-declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.arm.neon.vpadd.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.arm.neon.vpadd.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_faddp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_faddp_v2f32:
- %val = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: faddp v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_faddp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_faddp_v4f32:
- %val = call <4 x float> @llvm.arm.neon.vpadd.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: faddp v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_faddp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_faddp_v2f64:
- %val = call <2 x double> @llvm.arm.neon.vpadd.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: faddp v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
-
diff --git a/test/CodeGen/AArch64/neon-add-sub.ll b/test/CodeGen/AArch64/neon-add-sub.ll
deleted file mode 100644
index 078ba14bd87a..000000000000
--- a/test/CodeGen/AArch64/neon-add-sub.ll
+++ /dev/null
@@ -1,237 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: add {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
- %tmp3 = add <8 x i8> %A, %B;
- ret <8 x i8> %tmp3
-}
-
-define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: add {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
- %tmp3 = add <16 x i8> %A, %B;
- ret <16 x i8> %tmp3
-}
-
-define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: add {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
- %tmp3 = add <4 x i16> %A, %B;
- ret <4 x i16> %tmp3
-}
-
-define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: add {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
- %tmp3 = add <8 x i16> %A, %B;
- ret <8 x i16> %tmp3
-}
-
-define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: add {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
- %tmp3 = add <2 x i32> %A, %B;
- ret <2 x i32> %tmp3
-}
-
-define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: add {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
- %tmp3 = add <4 x i32> %A, %B;
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: add {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
- %tmp3 = add <2 x i64> %A, %B;
- ret <2 x i64> %tmp3
-}
-
-define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) {
-;CHECK: fadd {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
- %tmp3 = fadd <2 x float> %A, %B;
- ret <2 x float> %tmp3
-}
-
-define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) {
-;CHECK: fadd {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
- %tmp3 = fadd <4 x float> %A, %B;
- ret <4 x float> %tmp3
-}
-define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) {
-;CHECK: add {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
- %tmp3 = fadd <2 x double> %A, %B;
- ret <2 x double> %tmp3
-}
-
-define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: sub {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
- %tmp3 = sub <8 x i8> %A, %B;
- ret <8 x i8> %tmp3
-}
-
-define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: sub {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
- %tmp3 = sub <16 x i8> %A, %B;
- ret <16 x i8> %tmp3
-}
-
-define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: sub {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
- %tmp3 = sub <4 x i16> %A, %B;
- ret <4 x i16> %tmp3
-}
-
-define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: sub {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
- %tmp3 = sub <8 x i16> %A, %B;
- ret <8 x i16> %tmp3
-}
-
-define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: sub {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
- %tmp3 = sub <2 x i32> %A, %B;
- ret <2 x i32> %tmp3
-}
-
-define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: sub {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
- %tmp3 = sub <4 x i32> %A, %B;
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: sub {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
- %tmp3 = sub <2 x i64> %A, %B;
- ret <2 x i64> %tmp3
-}
-
-define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) {
-;CHECK: fsub {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
- %tmp3 = fsub <2 x float> %A, %B;
- ret <2 x float> %tmp3
-}
-
-define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) {
-;CHECK: fsub {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
- %tmp3 = fsub <4 x float> %A, %B;
- ret <4 x float> %tmp3
-}
-define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) {
-;CHECK: sub {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
- %tmp3 = fsub <2 x double> %A, %B;
- ret <2 x double> %tmp3
-}
-
-define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vadd_f64
-; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fadd <1 x double> %a, %b
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vmul_f64
-; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fmul <1 x double> %a, %b
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vdiv_f64
-; CHECK: fdiv d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fdiv <1 x double> %a, %b
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
-; CHECK-LABEL: test_vmla_f64
-; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
-; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fmul <1 x double> %b, %c
- %2 = fadd <1 x double> %1, %a
- ret <1 x double> %2
-}
-
-define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
-; CHECK-LABEL: test_vmls_f64
-; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
-; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fmul <1 x double> %b, %c
- %2 = fsub <1 x double> %a, %1
- ret <1 x double> %2
-}
-
-define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
-; CHECK-LABEL: test_vfms_f64
-; CHECK: fmsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fsub <1 x double> <double -0.000000e+00>, %b
- %2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a)
- ret <1 x double> %2
-}
-
-define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
-; CHECK-LABEL: test_vfma_f64
-; CHECK: fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vsub_f64
-; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fsub <1 x double> %a, %b
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vabd_f64
-; CHECK: fabd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.arm.neon.vabds.v1f64(<1 x double> %a, <1 x double> %b)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vmax_f64
-; CHECK: fmax d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.arm.neon.vmaxs.v1f64(<1 x double> %a, <1 x double> %b)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vmin_f64
-; CHECK: fmin d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.arm.neon.vmins.v1f64(<1 x double> %a, <1 x double> %b)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vmaxnm_f64
-; CHECK: fmaxnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnm.v1f64(<1 x double> %a, <1 x double> %b)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vminnm_f64
-; CHECK: fminnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.aarch64.neon.vminnm.v1f64(<1 x double> %a, <1 x double> %b)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vabs_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vabs_f64
-; CHECK: fabs d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vneg_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vneg_f64
-; CHECK: fneg d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fsub <1 x double> <double -0.000000e+00>, %a
- ret <1 x double> %1
-}
-
-declare <1 x double> @llvm.fabs.v1f64(<1 x double>)
-declare <1 x double> @llvm.aarch64.neon.vminnm.v1f64(<1 x double>, <1 x double>)
-declare <1 x double> @llvm.aarch64.neon.vmaxnm.v1f64(<1 x double>, <1 x double>)
-declare <1 x double> @llvm.arm.neon.vmins.v1f64(<1 x double>, <1 x double>)
-declare <1 x double> @llvm.arm.neon.vmaxs.v1f64(<1 x double>, <1 x double>)
-declare <1 x double> @llvm.arm.neon.vabds.v1f64(<1 x double>, <1 x double>)
-declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-bitcast.ll b/test/CodeGen/AArch64/neon-bitcast.ll
index f9ec70484024..61099d48fdd2 100644
--- a/test/CodeGen/AArch64/neon-bitcast.ll
+++ b/test/CodeGen/AArch64/neon-bitcast.ll
@@ -20,8 +20,8 @@ define <2 x i32> @test_v8i8_to_v2i32(<8 x i8> %in) nounwind {
ret <2 x i32> %val
}
-define <2 x float> @test_v8i8_to_v1f32(<8 x i8> %in) nounwind{
-; CHECK: test_v8i8_to_v1f32:
+define <2 x float> @test_v8i8_to_v2f32(<8 x i8> %in) nounwind{
+; CHECK: test_v8i8_to_v2f32:
; CHECK-NEXT: // BB#0:
; CHECK-NEXT: ret
@@ -67,8 +67,8 @@ define <2 x i32> @test_v4i16_to_v2i32(<4 x i16> %in) nounwind {
ret <2 x i32> %val
}
-define <2 x float> @test_v4i16_to_v1f32(<4 x i16> %in) nounwind{
-; CHECK: test_v4i16_to_v1f32:
+define <2 x float> @test_v4i16_to_v2f32(<4 x i16> %in) nounwind{
+; CHECK: test_v4i16_to_v2f32:
; CHECK-NEXT: // BB#0:
; CHECK-NEXT: ret
@@ -114,8 +114,8 @@ define <2 x i32> @test_v2i32_to_v2i32(<2 x i32> %in) nounwind {
ret <2 x i32> %val
}
-define <2 x float> @test_v2i32_to_v1f32(<2 x i32> %in) nounwind{
-; CHECK: test_v2i32_to_v1f32:
+define <2 x float> @test_v2i32_to_v2f32(<2 x i32> %in) nounwind{
+; CHECK: test_v2i32_to_v2f32:
; CHECK-NEXT: // BB#0:
; CHECK-NEXT: ret
diff --git a/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/test/CodeGen/AArch64/neon-bitwise-instructions.ll
index 1c43b979fc44..6497856c7d36 100644
--- a/test/CodeGen/AArch64/neon-bitwise-instructions.ll
+++ b/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -1,502 +1,579 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
define <8 x i8> @and8xi8(<8 x i8> %a, <8 x i8> %b) {
-;CHECK: and {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: and8xi8:
+; CHECK: and {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = and <8 x i8> %a, %b;
ret <8 x i8> %tmp1
}
define <16 x i8> @and16xi8(<16 x i8> %a, <16 x i8> %b) {
-;CHECK: and {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: and16xi8:
+; CHECK: and {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = and <16 x i8> %a, %b;
ret <16 x i8> %tmp1
}
define <8 x i8> @orr8xi8(<8 x i8> %a, <8 x i8> %b) {
-;CHECK: orr {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: orr8xi8:
+; CHECK: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = or <8 x i8> %a, %b;
ret <8 x i8> %tmp1
}
define <16 x i8> @orr16xi8(<16 x i8> %a, <16 x i8> %b) {
-;CHECK: orr {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: orr16xi8:
+; CHECK: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = or <16 x i8> %a, %b;
ret <16 x i8> %tmp1
}
define <8 x i8> @xor8xi8(<8 x i8> %a, <8 x i8> %b) {
-;CHECK: eor {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: xor8xi8:
+; CHECK: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <8 x i8> %a, %b;
ret <8 x i8> %tmp1
}
define <16 x i8> @xor16xi8(<16 x i8> %a, <16 x i8> %b) {
-;CHECK: eor {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: xor16xi8:
+; CHECK: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <16 x i8> %a, %b;
ret <16 x i8> %tmp1
}
define <8 x i8> @bsl8xi8_const(<8 x i8> %a, <8 x i8> %b) {
-;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
- %tmp1 = and <8 x i8> %a, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- %tmp2 = and <8 x i8> %b, < i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0 >
+; CHECK-LABEL: bsl8xi8_const:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp1 = and <8 x i8> %a, < i8 -1, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 0, i8 0 >
+ %tmp2 = and <8 x i8> %b, < i8 0, i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 -1, i8 -1 >
%tmp3 = or <8 x i8> %tmp1, %tmp2
ret <8 x i8> %tmp3
}
define <16 x i8> @bsl16xi8_const(<16 x i8> %a, <16 x i8> %b) {
-;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
- %tmp1 = and <16 x i8> %a, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- %tmp2 = and <16 x i8> %b, < i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0 >
+; CHECK-LABEL: bsl16xi8_const:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp1 = and <16 x i8> %a, < i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 0 >
+ %tmp2 = and <16 x i8> %b, < i8 0, i8 0, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 -1 >
%tmp3 = or <16 x i8> %tmp1, %tmp2
ret <16 x i8> %tmp3
}
define <8 x i8> @orn8xi8(<8 x i8> %a, <8 x i8> %b) {
-;CHECK: orn {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: orn8xi8:
+; CHECK: orn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <8 x i8> %b, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
%tmp2 = or <8 x i8> %a, %tmp1
ret <8 x i8> %tmp2
}
define <16 x i8> @orn16xi8(<16 x i8> %a, <16 x i8> %b) {
-;CHECK: orn {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: orn16xi8:
+; CHECK: orn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <16 x i8> %b, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
%tmp2 = or <16 x i8> %a, %tmp1
ret <16 x i8> %tmp2
}
define <8 x i8> @bic8xi8(<8 x i8> %a, <8 x i8> %b) {
-;CHECK: bic {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: bic8xi8:
+; CHECK: bic {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <8 x i8> %b, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
%tmp2 = and <8 x i8> %a, %tmp1
ret <8 x i8> %tmp2
}
define <16 x i8> @bic16xi8(<16 x i8> %a, <16 x i8> %b) {
-;CHECK: bic {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: bic16xi8:
+; CHECK: bic {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <16 x i8> %b, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
%tmp2 = and <16 x i8> %a, %tmp1
ret <16 x i8> %tmp2
}
define <2 x i32> @orrimm2s_lsl0(<2 x i32> %a) {
-;CHECK: orr {{v[0-31]+}}.2s, #0xff
+; CHECK-LABEL: orrimm2s_lsl0:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}
%tmp1 = or <2 x i32> %a, < i32 255, i32 255>
ret <2 x i32> %tmp1
}
define <2 x i32> @orrimm2s_lsl8(<2 x i32> %a) {
-;CHECK: orr {{v[0-31]+}}.2s, #0xff, lsl #8
+; CHECK-LABEL: orrimm2s_lsl8:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #8
%tmp1 = or <2 x i32> %a, < i32 65280, i32 65280>
ret <2 x i32> %tmp1
}
define <2 x i32> @orrimm2s_lsl16(<2 x i32> %a) {
-;CHECK: orr {{v[0-31]+}}.2s, #0xff, lsl #16
+; CHECK-LABEL: orrimm2s_lsl16:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #16
%tmp1 = or <2 x i32> %a, < i32 16711680, i32 16711680>
ret <2 x i32> %tmp1
}
define <2 x i32> @orrimm2s_lsl24(<2 x i32> %a) {
-;CHECK: orr {{v[0-31]+}}.2s, #0xff, lsl #24
+; CHECK-LABEL: orrimm2s_lsl24:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #24
%tmp1 = or <2 x i32> %a, < i32 4278190080, i32 4278190080>
ret <2 x i32> %tmp1
}
define <4 x i32> @orrimm4s_lsl0(<4 x i32> %a) {
-;CHECK: orr {{v[0-31]+}}.4s, #0xff
+; CHECK-LABEL: orrimm4s_lsl0:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}
%tmp1 = or <4 x i32> %a, < i32 255, i32 255, i32 255, i32 255>
ret <4 x i32> %tmp1
}
define <4 x i32> @orrimm4s_lsl8(<4 x i32> %a) {
-;CHECK: orr {{v[0-31]+}}.4s, #0xff, lsl #8
+; CHECK-LABEL: orrimm4s_lsl8:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #8
%tmp1 = or <4 x i32> %a, < i32 65280, i32 65280, i32 65280, i32 65280>
ret <4 x i32> %tmp1
}
define <4 x i32> @orrimm4s_lsl16(<4 x i32> %a) {
-;CHECK: orr {{v[0-31]+}}.4s, #0xff, lsl #16
+; CHECK-LABEL: orrimm4s_lsl16:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #16
%tmp1 = or <4 x i32> %a, < i32 16711680, i32 16711680, i32 16711680, i32 16711680>
ret <4 x i32> %tmp1
}
define <4 x i32> @orrimm4s_lsl24(<4 x i32> %a) {
-;CHECK: orr {{v[0-31]+}}.4s, #0xff, lsl #24
+; CHECK-LABEL: orrimm4s_lsl24:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #24
%tmp1 = or <4 x i32> %a, < i32 4278190080, i32 4278190080, i32 4278190080, i32 4278190080>
ret <4 x i32> %tmp1
}
define <4 x i16> @orrimm4h_lsl0(<4 x i16> %a) {
-;CHECK: orr {{v[0-31]+}}.4h, #0xff
+; CHECK-LABEL: orrimm4h_lsl0:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}
%tmp1 = or <4 x i16> %a, < i16 255, i16 255, i16 255, i16 255 >
ret <4 x i16> %tmp1
}
define <4 x i16> @orrimm4h_lsl8(<4 x i16> %a) {
-;CHECK: orr {{v[0-31]+}}.4h, #0xff, lsl #8
+; CHECK-LABEL: orrimm4h_lsl8:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
%tmp1 = or <4 x i16> %a, < i16 65280, i16 65280, i16 65280, i16 65280 >
ret <4 x i16> %tmp1
}
define <8 x i16> @orrimm8h_lsl0(<8 x i16> %a) {
-;CHECK: orr {{v[0-31]+}}.8h, #0xff
+; CHECK-LABEL: orrimm8h_lsl0:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}
%tmp1 = or <8 x i16> %a, < i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255 >
ret <8 x i16> %tmp1
}
define <8 x i16> @orrimm8h_lsl8(<8 x i16> %a) {
-;CHECK: orr {{v[0-31]+}}.8h, #0xff, lsl #8
+; CHECK-LABEL: orrimm8h_lsl8:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
%tmp1 = or <8 x i16> %a, < i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280 >
ret <8 x i16> %tmp1
}
define <2 x i32> @bicimm2s_lsl0(<2 x i32> %a) {
-;CHECK: bic {{v[0-31]+}}.2s, #0x10
+; CHECK-LABEL: bicimm2s_lsl0:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0x10|16}}
%tmp1 = and <2 x i32> %a, < i32 4294967279, i32 4294967279 >
ret <2 x i32> %tmp1
}
define <2 x i32> @bicimm2s_lsl8(<2 x i32> %a) {
-;CHECK: bic {{v[0-31]+}}.2s, #0x10, lsl #8
- %tmp1 = and <2 x i32> %a, < i32 18446744073709547519, i32 18446744073709547519 >
+; CHECK-LABEL: bicimm2s_lsl8:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0x10|16}}, lsl #8
+ %tmp1 = and <2 x i32> %a, < i32 4294963199, i32 4294963199 >
ret <2 x i32> %tmp1
}
define <2 x i32> @bicimm2s_lsl16(<2 x i32> %a) {
-;CHECK: bic {{v[0-31]+}}.2s, #0x10, lsl #16
- %tmp1 = and <2 x i32> %a, < i32 18446744073708503039, i32 18446744073708503039 >
+; CHECK-LABEL: bicimm2s_lsl16:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0x10|16}}, lsl #16
+ %tmp1 = and <2 x i32> %a, < i32 4293918719, i32 4293918719 >
ret <2 x i32> %tmp1
}
define <2 x i32> @bicimm2s_lsl124(<2 x i32> %a) {
-;CHECK: bic {{v[0-31]+}}.2s, #0x10, lsl #24
- %tmp1 = and <2 x i32> %a, < i32 18446744073441116159, i32 18446744073441116159>
+; CHECK-LABEL: bicimm2s_lsl124:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0x10|16}}, lsl #24
+ %tmp1 = and <2 x i32> %a, < i32 4026531839, i32 4026531839>
ret <2 x i32> %tmp1
}
define <4 x i32> @bicimm4s_lsl0(<4 x i32> %a) {
-;CHECK: bic {{v[0-31]+}}.4s, #0x10
+; CHECK-LABEL: bicimm4s_lsl0:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0x10|16}}
%tmp1 = and <4 x i32> %a, < i32 4294967279, i32 4294967279, i32 4294967279, i32 4294967279 >
ret <4 x i32> %tmp1
}
define <4 x i32> @bicimm4s_lsl8(<4 x i32> %a) {
-;CHECK: bic {{v[0-31]+}}.4s, #0x10, lsl #8
- %tmp1 = and <4 x i32> %a, < i32 18446744073709547519, i32 18446744073709547519, i32 18446744073709547519, i32 18446744073709547519 >
+; CHECK-LABEL: bicimm4s_lsl8:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0x10|16}}, lsl #8
+ %tmp1 = and <4 x i32> %a, < i32 4294963199, i32 4294963199, i32 4294963199, i32 4294963199 >
ret <4 x i32> %tmp1
}
define <4 x i32> @bicimm4s_lsl16(<4 x i32> %a) {
-;CHECK: bic {{v[0-31]+}}.4s, #0x10, lsl #16
- %tmp1 = and <4 x i32> %a, < i32 18446744073708503039, i32 18446744073708503039, i32 18446744073708503039, i32 18446744073708503039 >
+; CHECK-LABEL: bicimm4s_lsl16:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0x10|16}}, lsl #16
+ %tmp1 = and <4 x i32> %a, < i32 4293918719, i32 4293918719, i32 4293918719, i32 4293918719 >
ret <4 x i32> %tmp1
}
define <4 x i32> @bicimm4s_lsl124(<4 x i32> %a) {
-;CHECK: bic {{v[0-31]+}}.4s, #0x10, lsl #24
- %tmp1 = and <4 x i32> %a, < i32 18446744073441116159, i32 18446744073441116159, i32 18446744073441116159, i32 18446744073441116159>
+; CHECK-LABEL: bicimm4s_lsl124:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0x10|16}}, lsl #24
+ %tmp1 = and <4 x i32> %a, < i32 4026531839, i32 4026531839, i32 4026531839, i32 4026531839>
ret <4 x i32> %tmp1
}
define <4 x i16> @bicimm4h_lsl0_a(<4 x i16> %a) {
-;CHECK: bic {{v[0-31]+}}.4h, #0x10
- %tmp1 = and <4 x i16> %a, < i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599 >
+; CHECK-LABEL: bicimm4h_lsl0_a:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0x10|16}}
+ %tmp1 = and <4 x i16> %a, < i16 4294967279, i16 4294967279, i16 4294967279, i16 4294967279 >
ret <4 x i16> %tmp1
}
define <4 x i16> @bicimm4h_lsl0_b(<4 x i16> %a) {
-;CHECK: bic {{v[0-31]+}}.4h, #0x0
+; CHECK-LABEL: bicimm4h_lsl0_b:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0xff|255}}
%tmp1 = and <4 x i16> %a, < i16 65280, i16 65280, i16 65280, i16 65280 >
ret <4 x i16> %tmp1
}
define <4 x i16> @bicimm4h_lsl8_a(<4 x i16> %a) {
-;CHECK: bic {{v[0-31]+}}.4h, #0x10, lsl #8
- %tmp1 = and <4 x i16> %a, < i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519>
+; CHECK-LABEL: bicimm4h_lsl8_a:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0x10|16}}, lsl #8
+ %tmp1 = and <4 x i16> %a, < i16 4294963199, i16 4294963199, i16 4294963199, i16 4294963199>
ret <4 x i16> %tmp1
}
define <4 x i16> @bicimm4h_lsl8_b(<4 x i16> %a) {
-;CHECK: bic {{v[0-31]+}}.4h, #0x0, lsl #8
+; CHECK-LABEL: bicimm4h_lsl8_b:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
%tmp1 = and <4 x i16> %a, < i16 255, i16 255, i16 255, i16 255>
ret <4 x i16> %tmp1
}
define <8 x i16> @bicimm8h_lsl0_a(<8 x i16> %a) {
-;CHECK: bic {{v[0-31]+}}.8h, #0x10
- %tmp1 = and <8 x i16> %a, < i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599,
- i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599 >
+; CHECK-LABEL: bicimm8h_lsl0_a:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0x10|16}}
+ %tmp1 = and <8 x i16> %a, < i16 4294967279, i16 4294967279, i16 4294967279, i16 4294967279,
+ i16 4294967279, i16 4294967279, i16 4294967279, i16 4294967279 >
ret <8 x i16> %tmp1
}
define <8 x i16> @bicimm8h_lsl0_b(<8 x i16> %a) {
-;CHECK: bic {{v[0-31]+}}.8h, #0x0
+; CHECK-LABEL: bicimm8h_lsl0_b:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0xff|255}}
%tmp1 = and <8 x i16> %a, < i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280 >
ret <8 x i16> %tmp1
}
define <8 x i16> @bicimm8h_lsl8_a(<8 x i16> %a) {
-;CHECK: bic {{v[0-31]+}}.8h, #0x10, lsl #8
- %tmp1 = and <8 x i16> %a, < i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519,
- i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519>
+; CHECK-LABEL: bicimm8h_lsl8_a:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0x10|16}}, lsl #8
+ %tmp1 = and <8 x i16> %a, < i16 4294963199, i16 4294963199, i16 4294963199, i16 4294963199,
+ i16 4294963199, i16 4294963199, i16 4294963199, i16 4294963199>
ret <8 x i16> %tmp1
}
define <8 x i16> @bicimm8h_lsl8_b(<8 x i16> %a) {
-;CHECK: bic {{v[0-31]+}}.8h, #0x0, lsl #8
+; CHECK-LABEL: bicimm8h_lsl8_b:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
%tmp1 = and <8 x i16> %a, < i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
ret <8 x i16> %tmp1
}
define <2 x i32> @and2xi32(<2 x i32> %a, <2 x i32> %b) {
-;CHECK: and {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: and2xi32:
+; CHECK: and {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = and <2 x i32> %a, %b;
ret <2 x i32> %tmp1
}
define <4 x i16> @and4xi16(<4 x i16> %a, <4 x i16> %b) {
-;CHECK: and {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: and4xi16:
+; CHECK: and {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = and <4 x i16> %a, %b;
ret <4 x i16> %tmp1
}
define <1 x i64> @and1xi64(<1 x i64> %a, <1 x i64> %b) {
-;CHECK: and {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: and1xi64:
+; CHECK: and {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = and <1 x i64> %a, %b;
ret <1 x i64> %tmp1
}
define <4 x i32> @and4xi32(<4 x i32> %a, <4 x i32> %b) {
-;CHECK: and {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: and4xi32:
+; CHECK: and {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = and <4 x i32> %a, %b;
ret <4 x i32> %tmp1
}
define <8 x i16> @and8xi16(<8 x i16> %a, <8 x i16> %b) {
-;CHECK: and {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: and8xi16:
+; CHECK: and {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = and <8 x i16> %a, %b;
ret <8 x i16> %tmp1
}
define <2 x i64> @and2xi64(<2 x i64> %a, <2 x i64> %b) {
-;CHECK: and {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: and2xi64:
+; CHECK: and {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = and <2 x i64> %a, %b;
ret <2 x i64> %tmp1
}
define <2 x i32> @orr2xi32(<2 x i32> %a, <2 x i32> %b) {
-;CHECK: orr {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: orr2xi32:
+; CHECK: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = or <2 x i32> %a, %b;
ret <2 x i32> %tmp1
}
define <4 x i16> @orr4xi16(<4 x i16> %a, <4 x i16> %b) {
-;CHECK: orr {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: orr4xi16:
+; CHECK: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = or <4 x i16> %a, %b;
ret <4 x i16> %tmp1
}
define <1 x i64> @orr1xi64(<1 x i64> %a, <1 x i64> %b) {
-;CHECK: orr {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: orr1xi64:
+; CHECK: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = or <1 x i64> %a, %b;
ret <1 x i64> %tmp1
}
define <4 x i32> @orr4xi32(<4 x i32> %a, <4 x i32> %b) {
-;CHECK: orr {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: orr4xi32:
+; CHECK: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = or <4 x i32> %a, %b;
ret <4 x i32> %tmp1
}
define <8 x i16> @orr8xi16(<8 x i16> %a, <8 x i16> %b) {
-;CHECK: orr {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: orr8xi16:
+; CHECK: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = or <8 x i16> %a, %b;
ret <8 x i16> %tmp1
}
define <2 x i64> @orr2xi64(<2 x i64> %a, <2 x i64> %b) {
-;CHECK: orr {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: orr2xi64:
+; CHECK: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = or <2 x i64> %a, %b;
ret <2 x i64> %tmp1
}
define <2 x i32> @eor2xi32(<2 x i32> %a, <2 x i32> %b) {
-;CHECK: eor {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: eor2xi32:
+; CHECK: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <2 x i32> %a, %b;
ret <2 x i32> %tmp1
}
define <4 x i16> @eor4xi16(<4 x i16> %a, <4 x i16> %b) {
-;CHECK: eor {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: eor4xi16:
+; CHECK: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <4 x i16> %a, %b;
ret <4 x i16> %tmp1
}
define <1 x i64> @eor1xi64(<1 x i64> %a, <1 x i64> %b) {
-;CHECK: eor {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: eor1xi64:
+; CHECK: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <1 x i64> %a, %b;
ret <1 x i64> %tmp1
}
define <4 x i32> @eor4xi32(<4 x i32> %a, <4 x i32> %b) {
-;CHECK: eor {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: eor4xi32:
+; CHECK: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <4 x i32> %a, %b;
ret <4 x i32> %tmp1
}
define <8 x i16> @eor8xi16(<8 x i16> %a, <8 x i16> %b) {
-;CHECK: eor {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: eor8xi16:
+; CHECK: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <8 x i16> %a, %b;
ret <8 x i16> %tmp1
}
define <2 x i64> @eor2xi64(<2 x i64> %a, <2 x i64> %b) {
-;CHECK: eor {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: eor2xi64:
+; CHECK: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <2 x i64> %a, %b;
ret <2 x i64> %tmp1
}
define <2 x i32> @bic2xi32(<2 x i32> %a, <2 x i32> %b) {
-;CHECK: bic {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: bic2xi32:
+; CHECK: bic {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <2 x i32> %b, < i32 -1, i32 -1 >
%tmp2 = and <2 x i32> %a, %tmp1
ret <2 x i32> %tmp2
}
define <4 x i16> @bic4xi16(<4 x i16> %a, <4 x i16> %b) {
-;CHECK: bic {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: bic4xi16:
+; CHECK: bic {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <4 x i16> %b, < i16 -1, i16 -1, i16 -1, i16-1 >
%tmp2 = and <4 x i16> %a, %tmp1
ret <4 x i16> %tmp2
}
define <1 x i64> @bic1xi64(<1 x i64> %a, <1 x i64> %b) {
-;CHECK: bic {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: bic1xi64:
+; CHECK: bic {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <1 x i64> %b, < i64 -1>
%tmp2 = and <1 x i64> %a, %tmp1
ret <1 x i64> %tmp2
}
define <4 x i32> @bic4xi32(<4 x i32> %a, <4 x i32> %b) {
-;CHECK: bic {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: bic4xi32:
+; CHECK: bic {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <4 x i32> %b, < i32 -1, i32 -1, i32 -1, i32 -1>
%tmp2 = and <4 x i32> %a, %tmp1
ret <4 x i32> %tmp2
}
define <8 x i16> @bic8xi16(<8 x i16> %a, <8 x i16> %b) {
-;CHECK: bic {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: bic8xi16:
+; CHECK: bic {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <8 x i16> %b, < i16 -1, i16 -1, i16 -1, i16-1, i16 -1, i16 -1, i16 -1, i16 -1 >
%tmp2 = and <8 x i16> %a, %tmp1
ret <8 x i16> %tmp2
}
define <2 x i64> @bic2xi64(<2 x i64> %a, <2 x i64> %b) {
-;CHECK: bic {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: bic2xi64:
+; CHECK: bic {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <2 x i64> %b, < i64 -1, i64 -1>
%tmp2 = and <2 x i64> %a, %tmp1
ret <2 x i64> %tmp2
}
define <2 x i32> @orn2xi32(<2 x i32> %a, <2 x i32> %b) {
-;CHECK: orn {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: orn2xi32:
+; CHECK: orn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <2 x i32> %b, < i32 -1, i32 -1 >
%tmp2 = or <2 x i32> %a, %tmp1
ret <2 x i32> %tmp2
}
define <4 x i16> @orn4xi16(<4 x i16> %a, <4 x i16> %b) {
-;CHECK: orn {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: orn4xi16:
+; CHECK: orn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <4 x i16> %b, < i16 -1, i16 -1, i16 -1, i16-1 >
%tmp2 = or <4 x i16> %a, %tmp1
ret <4 x i16> %tmp2
}
define <1 x i64> @orn1xi64(<1 x i64> %a, <1 x i64> %b) {
-;CHECK: orn {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: orn1xi64:
+; CHECK: orn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = xor <1 x i64> %b, < i64 -1>
%tmp2 = or <1 x i64> %a, %tmp1
ret <1 x i64> %tmp2
}
define <4 x i32> @orn4xi32(<4 x i32> %a, <4 x i32> %b) {
-;CHECK: orn {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: orn4xi32:
+; CHECK: orn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <4 x i32> %b, < i32 -1, i32 -1, i32 -1, i32 -1>
%tmp2 = or <4 x i32> %a, %tmp1
ret <4 x i32> %tmp2
}
define <8 x i16> @orn8xi16(<8 x i16> %a, <8 x i16> %b) {
-;CHECK: orn {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: orn8xi16:
+; CHECK: orn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <8 x i16> %b, < i16 -1, i16 -1, i16 -1, i16-1, i16 -1, i16 -1, i16 -1, i16 -1 >
%tmp2 = or <8 x i16> %a, %tmp1
ret <8 x i16> %tmp2
}
define <2 x i64> @orn2xi64(<2 x i64> %a, <2 x i64> %b) {
-;CHECK: orn {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: orn2xi64:
+; CHECK: orn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = xor <2 x i64> %b, < i64 -1, i64 -1>
%tmp2 = or <2 x i64> %a, %tmp1
ret <2 x i64> %tmp2
}
+
define <2 x i32> @bsl2xi32_const(<2 x i32> %a, <2 x i32> %b) {
-;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
- %tmp1 = and <2 x i32> %a, < i32 -1, i32 -1 >
- %tmp2 = and <2 x i32> %b, < i32 0, i32 0 >
+; CHECK-LABEL: bsl2xi32_const:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp1 = and <2 x i32> %a, < i32 -1, i32 0 >
+ %tmp2 = and <2 x i32> %b, < i32 0, i32 -1 >
%tmp3 = or <2 x i32> %tmp1, %tmp2
ret <2 x i32> %tmp3
}
define <4 x i16> @bsl4xi16_const(<4 x i16> %a, <4 x i16> %b) {
-;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
- %tmp1 = and <4 x i16> %a, < i16 -1, i16 -1, i16 -1,i16 -1 >
- %tmp2 = and <4 x i16> %b, < i16 0, i16 0,i16 0, i16 0 >
+; CHECK-LABEL: bsl4xi16_const:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp1 = and <4 x i16> %a, < i16 -1, i16 0, i16 -1,i16 0 >
+ %tmp2 = and <4 x i16> %b, < i16 0, i16 -1,i16 0, i16 -1 >
%tmp3 = or <4 x i16> %tmp1, %tmp2
ret <4 x i16> %tmp3
}
define <1 x i64> @bsl1xi64_const(<1 x i64> %a, <1 x i64> %b) {
-;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
- %tmp1 = and <1 x i64> %a, < i64 -1 >
- %tmp2 = and <1 x i64> %b, < i64 0 >
+; CHECK-LABEL: bsl1xi64_const:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp1 = and <1 x i64> %a, < i64 -16 >
+ %tmp2 = and <1 x i64> %b, < i64 15 >
%tmp3 = or <1 x i64> %tmp1, %tmp2
ret <1 x i64> %tmp3
}
define <4 x i32> @bsl4xi32_const(<4 x i32> %a, <4 x i32> %b) {
-;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
- %tmp1 = and <4 x i32> %a, < i32 -1, i32 -1, i32 -1, i32 -1 >
- %tmp2 = and <4 x i32> %b, < i32 0, i32 0, i32 0, i32 0 >
+; CHECK-LABEL: bsl4xi32_const:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp1 = and <4 x i32> %a, < i32 -1, i32 0, i32 -1, i32 0 >
+ %tmp2 = and <4 x i32> %b, < i32 0, i32 -1, i32 0, i32 -1 >
%tmp3 = or <4 x i32> %tmp1, %tmp2
ret <4 x i32> %tmp3
}
define <8 x i16> @bsl8xi16_const(<8 x i16> %a, <8 x i16> %b) {
-;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
- %tmp1 = and <8 x i16> %a, < i16 -1, i16 -1, i16 -1,i16 -1, i16 -1, i16 -1, i16 -1,i16 -1 >
- %tmp2 = and <8 x i16> %b, < i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0 >
+; CHECK-LABEL: bsl8xi16_const:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp1 = and <8 x i16> %a, < i16 -1, i16 -1, i16 0,i16 0, i16 -1, i16 -1, i16 0,i16 0 >
+ %tmp2 = and <8 x i16> %b, < i16 0, i16 0, i16 -1, i16 -1, i16 0, i16 0, i16 -1, i16 -1 >
%tmp3 = or <8 x i16> %tmp1, %tmp2
ret <8 x i16> %tmp3
}
define <2 x i64> @bsl2xi64_const(<2 x i64> %a, <2 x i64> %b) {
-;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
- %tmp1 = and <2 x i64> %a, < i64 -1, i64 -1 >
- %tmp2 = and <2 x i64> %b, < i64 0, i64 0 >
+; CHECK-LABEL: bsl2xi64_const:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp1 = and <2 x i64> %a, < i64 -1, i64 0 >
+ %tmp2 = and <2 x i64> %b, < i64 0, i64 -1 >
%tmp3 = or <2 x i64> %tmp1, %tmp2
ret <2 x i64> %tmp3
}
define <8 x i8> @bsl8xi8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
-;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: bsl8xi8:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%1 = and <8 x i8> %v1, %v2
%2 = xor <8 x i8> %v1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%3 = and <8 x i8> %2, %v3
@@ -505,7 +582,8 @@ define <8 x i8> @bsl8xi8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
}
define <4 x i16> @bsl4xi16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
-;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: bsl4xi16:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%1 = and <4 x i16> %v1, %v2
%2 = xor <4 x i16> %v1, <i16 -1, i16 -1, i16 -1, i16 -1>
%3 = and <4 x i16> %2, %v3
@@ -514,7 +592,8 @@ define <4 x i16> @bsl4xi16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
}
define <2 x i32> @bsl2xi32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
-;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: bsl2xi32:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%1 = and <2 x i32> %v1, %v2
%2 = xor <2 x i32> %v1, <i32 -1, i32 -1>
%3 = and <2 x i32> %2, %v3
@@ -523,7 +602,8 @@ define <2 x i32> @bsl2xi32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
}
define <1 x i64> @bsl1xi64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
-;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+; CHECK-LABEL: bsl1xi64:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%1 = and <1 x i64> %v1, %v2
%2 = xor <1 x i64> %v1, <i64 -1>
%3 = and <1 x i64> %2, %v3
@@ -532,7 +612,8 @@ define <1 x i64> @bsl1xi64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
}
define <16 x i8> @bsl16xi8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
-;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: bsl16xi8:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%1 = and <16 x i8> %v1, %v2
%2 = xor <16 x i8> %v1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%3 = and <16 x i8> %2, %v3
@@ -541,7 +622,8 @@ define <16 x i8> @bsl16xi8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
}
define <8 x i16> @bsl8xi16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
-;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: bsl8xi16:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%1 = and <8 x i16> %v1, %v2
%2 = xor <8 x i16> %v1, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%3 = and <8 x i16> %2, %v3
@@ -550,7 +632,8 @@ define <8 x i16> @bsl8xi16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
}
define <4 x i32> @bsl4xi32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
-;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: bsl4xi32:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%1 = and <4 x i32> %v1, %v2
%2 = xor <4 x i32> %v1, <i32 -1, i32 -1, i32 -1, i32 -1>
%3 = and <4 x i32> %2, %v3
@@ -558,8 +641,73 @@ define <4 x i32> @bsl4xi32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
ret <4 x i32> %4
}
+define <8 x i8> @vselect_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: vselect_v8i8:
+; CHECK: movi {{d[0-9]+}}, #0x{{0*}}ffff
+; CHECK-NEXT: {{bsl v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b|and v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b}}
+ %b = select <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i8> %a, <8 x i8> <i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ ret <8 x i8> %b
+}
+
+define <4 x i16> @vselect_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: vselect_v4i16:
+; CHECK: movi {{d[0-9]+}}, #0x{{0*}}ffff
+; CHECK-NEXT: {{bsl v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b|and v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b}}
+ %b = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i16> %a, <4 x i16> <i16 undef, i16 0, i16 0, i16 0>
+ ret <4 x i16> %b
+}
+
+define <8 x i8> @vselect_cmp_ne(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: vselect_cmp_ne:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %cmp = icmp ne <8 x i8> %a, %b
+ %d = select <8 x i1> %cmp, <8 x i8> %b, <8 x i8> %c
+ ret <8 x i8> %d
+}
+
+define <8 x i8> @vselect_cmp_eq(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: vselect_cmp_eq:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %cmp = icmp eq <8 x i8> %a, %b
+ %d = select <8 x i1> %cmp, <8 x i8> %b, <8 x i8> %c
+ ret <8 x i8> %d
+}
+
+define <8 x i8> @vselect_cmpz_ne(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: vselect_cmpz_ne:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %cmp = icmp ne <8 x i8> %a, zeroinitializer
+ %d = select <8 x i1> %cmp, <8 x i8> %b, <8 x i8> %c
+ ret <8 x i8> %d
+}
+
+define <8 x i8> @vselect_cmpz_eq(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: vselect_cmpz_eq:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+; CHECK-NEXT: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %cmp = icmp eq <8 x i8> %a, zeroinitializer
+ %d = select <8 x i1> %cmp, <8 x i8> %b, <8 x i8> %c
+ ret <8 x i8> %d
+}
+
+define <8 x i8> @vselect_tst(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: vselect_tst:
+; CHECK: cmtst {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = and <8 x i8> %a, %b
+ %tmp4 = icmp ne <8 x i8> %tmp3, zeroinitializer
+ %d = select <8 x i1> %tmp4, <8 x i8> %b, <8 x i8> %c
+ ret <8 x i8> %d
+}
+
define <2 x i64> @bsl2xi64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3) {
-;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+; CHECK-LABEL: bsl2xi64:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%1 = and <2 x i64> %v1, %v2
%2 = xor <2 x i64> %v1, <i64 -1, i64 -1>
%3 = and <2 x i64> %2, %v3
@@ -568,27 +716,535 @@ define <2 x i64> @bsl2xi64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3) {
}
define <8 x i8> @orrimm8b_as_orrimm4h_lsl0(<8 x i8> %a) {
-;CHECK: orr {{v[0-31]+}}.4h, #0xff
+; CHECK-LABEL: orrimm8b_as_orrimm4h_lsl0:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}
%val = or <8 x i8> %a, <i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
ret <8 x i8> %val
}
define <8 x i8> @orrimm8b_as_orimm4h_lsl8(<8 x i8> %a) {
-;CHECK: orr {{v[0-31]+}}.4h, #0xff, lsl #8
+; CHECK-LABEL: orrimm8b_as_orimm4h_lsl8:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
%val = or <8 x i8> %a, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
ret <8 x i8> %val
}
define <16 x i8> @orimm16b_as_orrimm8h_lsl0(<16 x i8> %a) {
-;CHECK: orr {{v[0-31]+}}.8h, #0xff
+; CHECK-LABEL: orimm16b_as_orrimm8h_lsl0:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}
%val = or <16 x i8> %a, <i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
ret <16 x i8> %val
}
define <16 x i8> @orimm16b_as_orrimm8h_lsl8(<16 x i8> %a) {
-;CHECK: orr {{v[0-31]+}}.8h, #0xff, lsl #8
+; CHECK-LABEL: orimm16b_as_orrimm8h_lsl8:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
%val = or <16 x i8> %a, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
ret <16 x i8> %val
}
+define <8 x i8> @and8imm2s_lsl0(<8 x i8> %a) {
+; CHECK-LABEL: and8imm2s_lsl0:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}
+ %tmp1 = and <8 x i8> %a, < i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255>
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @and8imm2s_lsl8(<8 x i8> %a) {
+; CHECK-LABEL: and8imm2s_lsl8:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #8
+ %tmp1 = and <8 x i8> %a, < i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255>
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @and8imm2s_lsl16(<8 x i8> %a) {
+; CHECK-LABEL: and8imm2s_lsl16:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #16
+ %tmp1 = and <8 x i8> %a, < i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255>
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @and8imm2s_lsl24(<8 x i8> %a) {
+; CHECK-LABEL: and8imm2s_lsl24:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xfe|254}}, lsl #24
+ %tmp1 = and <8 x i8> %a, < i8 255, i8 255, i8 255, i8 1, i8 255, i8 255, i8 255, i8 1>
+ ret <8 x i8> %tmp1
+}
+
+define <4 x i16> @and16imm2s_lsl0(<4 x i16> %a) {
+; CHECK-LABEL: and16imm2s_lsl0:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}
+ %tmp1 = and <4 x i16> %a, < i16 65280, i16 65535, i16 65280, i16 65535>
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @and16imm2s_lsl8(<4 x i16> %a) {
+; CHECK-LABEL: and16imm2s_lsl8:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #8
+ %tmp1 = and <4 x i16> %a, < i16 255, i16 65535, i16 255, i16 65535>
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @and16imm2s_lsl16(<4 x i16> %a) {
+; CHECK-LABEL: and16imm2s_lsl16:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #16
+ %tmp1 = and <4 x i16> %a, < i16 65535, i16 65280, i16 65535, i16 65280>
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @and16imm2s_lsl24(<4 x i16> %a) {
+; CHECK-LABEL: and16imm2s_lsl24:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xfe|254}}, lsl #24
+ %tmp1 = and <4 x i16> %a, < i16 65535, i16 511, i16 65535, i16 511>
+ ret <4 x i16> %tmp1
+}
+
+
+define <1 x i64> @and64imm2s_lsl0(<1 x i64> %a) {
+; CHECK-LABEL: and64imm2s_lsl0:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}
+ %tmp1 = and <1 x i64> %a, < i64 -1095216660736>
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @and64imm2s_lsl8(<1 x i64> %a) {
+; CHECK-LABEL: and64imm2s_lsl8:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #8
+ %tmp1 = and <1 x i64> %a, < i64 -280375465148161>
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @and64imm2s_lsl16(<1 x i64> %a) {
+; CHECK-LABEL: and64imm2s_lsl16:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #16
+ %tmp1 = and <1 x i64> %a, < i64 -71776119077928961>
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @and64imm2s_lsl24(<1 x i64> %a) {
+; CHECK-LABEL: and64imm2s_lsl24:
+; CHECK: bic {{v[0-9]+}}.2s, #{{0xfe|254}}, lsl #24
+ %tmp1 = and <1 x i64> %a, < i64 144115183814443007>
+ ret <1 x i64> %tmp1
+}
+
+define <16 x i8> @and8imm4s_lsl0(<16 x i8> %a) {
+; CHECK-LABEL: and8imm4s_lsl0:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}
+ %tmp1 = and <16 x i8> %a, < i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255>
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @and8imm4s_lsl8(<16 x i8> %a) {
+; CHECK-LABEL: and8imm4s_lsl8:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #8
+ %tmp1 = and <16 x i8> %a, < i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255>
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @and8imm4s_lsl16(<16 x i8> %a) {
+; CHECK-LABEL: and8imm4s_lsl16:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #16
+ %tmp1 = and <16 x i8> %a, < i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255>
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @and8imm4s_lsl24(<16 x i8> %a) {
+; CHECK-LABEL: and8imm4s_lsl24:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xfe|254}}, lsl #24
+ %tmp1 = and <16 x i8> %a, < i8 255, i8 255, i8 255, i8 1, i8 255, i8 255, i8 255, i8 1, i8 255, i8 255, i8 255, i8 1, i8 255, i8 255, i8 255, i8 1>
+ ret <16 x i8> %tmp1
+}
+
+define <8 x i16> @and16imm4s_lsl0(<8 x i16> %a) {
+; CHECK-LABEL: and16imm4s_lsl0:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}
+ %tmp1 = and <8 x i16> %a, < i16 65280, i16 65535, i16 65280, i16 65535, i16 65280, i16 65535, i16 65280, i16 65535>
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @and16imm4s_lsl8(<8 x i16> %a) {
+; CHECK-LABEL: and16imm4s_lsl8:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #8
+ %tmp1 = and <8 x i16> %a, < i16 255, i16 65535, i16 255, i16 65535, i16 255, i16 65535, i16 255, i16 65535>
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @and16imm4s_lsl16(<8 x i16> %a) {
+; CHECK-LABEL: and16imm4s_lsl16:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #16
+ %tmp1 = and <8 x i16> %a, < i16 65535, i16 65280, i16 65535, i16 65280, i16 65535, i16 65280, i16 65535, i16 65280>
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @and16imm4s_lsl24(<8 x i16> %a) {
+; CHECK-LABEL: and16imm4s_lsl24:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xfe|254}}, lsl #24
+ %tmp1 = and <8 x i16> %a, < i16 65535, i16 511, i16 65535, i16 511, i16 65535, i16 511, i16 65535, i16 511>
+ ret <8 x i16> %tmp1
+}
+
+define <2 x i64> @and64imm4s_lsl0(<2 x i64> %a) {
+; CHECK-LABEL: and64imm4s_lsl0:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}
+ %tmp1 = and <2 x i64> %a, < i64 -1095216660736, i64 -1095216660736>
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @and64imm4s_lsl8(<2 x i64> %a) {
+; CHECK-LABEL: and64imm4s_lsl8:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #8
+ %tmp1 = and <2 x i64> %a, < i64 -280375465148161, i64 -280375465148161>
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @and64imm4s_lsl16(<2 x i64> %a) {
+; CHECK-LABEL: and64imm4s_lsl16:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #16
+ %tmp1 = and <2 x i64> %a, < i64 -71776119077928961, i64 -71776119077928961>
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @and64imm4s_lsl24(<2 x i64> %a) {
+; CHECK-LABEL: and64imm4s_lsl24:
+; CHECK: bic {{v[0-9]+}}.4s, #{{0xfe|254}}, lsl #24
+ %tmp1 = and <2 x i64> %a, < i64 144115183814443007, i64 144115183814443007>
+ ret <2 x i64> %tmp1
+}
+
+define <8 x i8> @and8imm4h_lsl0(<8 x i8> %a) {
+; CHECK-LABEL: and8imm4h_lsl0:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0xff|255}}
+ %tmp1 = and <8 x i8> %a, < i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @and8imm4h_lsl8(<8 x i8> %a) {
+; CHECK-LABEL: and8imm4h_lsl8:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
+ %tmp1 = and <8 x i8> %a, < i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
+ ret <8 x i8> %tmp1
+}
+
+define <2 x i32> @and16imm4h_lsl0(<2 x i32> %a) {
+; CHECK-LABEL: and16imm4h_lsl0:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0xff|255}}
+ %tmp1 = and <2 x i32> %a, < i32 4278255360, i32 4278255360>
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @and16imm4h_lsl8(<2 x i32> %a) {
+; CHECK-LABEL: and16imm4h_lsl8:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
+ %tmp1 = and <2 x i32> %a, < i32 16711935, i32 16711935>
+ ret <2 x i32> %tmp1
+}
+
+define <1 x i64> @and64imm4h_lsl0(<1 x i64> %a) {
+; CHECK-LABEL: and64imm4h_lsl0:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0xff|255}}
+ %tmp1 = and <1 x i64> %a, < i64 -71777214294589696>
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @and64imm4h_lsl8(<1 x i64> %a) {
+; CHECK-LABEL: and64imm4h_lsl8:
+; CHECK: bic {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
+ %tmp1 = and <1 x i64> %a, < i64 71777214294589695>
+ ret <1 x i64> %tmp1
+}
+
+define <16 x i8> @and8imm8h_lsl0(<16 x i8> %a) {
+; CHECK-LABEL: and8imm8h_lsl0:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0xff|255}}
+ %tmp1 = and <16 x i8> %a, < i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255 >
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @and8imm8h_lsl8(<16 x i8> %a) {
+; CHECK-LABEL: and8imm8h_lsl8:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
+ %tmp1 = and <16 x i8> %a, <i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0 >
+ ret <16 x i8> %tmp1
+}
+
+define <4 x i32> @and16imm8h_lsl0(<4 x i32> %a) {
+; CHECK-LABEL: and16imm8h_lsl0:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0xff|255}}
+ %tmp1 = and <4 x i32> %a, < i32 4278255360, i32 4278255360, i32 4278255360, i32 4278255360>
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @and16imm8h_lsl8(<4 x i32> %a) {
+; CHECK-LABEL: and16imm8h_lsl8:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
+ %tmp1 = and <4 x i32> %a, < i32 16711935, i32 16711935, i32 16711935, i32 16711935>
+ ret <4 x i32> %tmp1
+}
+
+define <2 x i64> @and64imm8h_lsl0(<2 x i64> %a) {
+; CHECK-LABEL: and64imm8h_lsl0:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0xff|255}}
+ %tmp1 = and <2 x i64> %a, < i64 -71777214294589696, i64 -71777214294589696>
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @and64imm8h_lsl8(<2 x i64> %a) {
+; CHECK-LABEL: and64imm8h_lsl8:
+; CHECK: bic {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
+ %tmp1 = and <2 x i64> %a, < i64 71777214294589695, i64 71777214294589695>
+ ret <2 x i64> %tmp1
+}
+
+define <8 x i8> @orr8imm2s_lsl0(<8 x i8> %a) {
+; CHECK-LABEL: orr8imm2s_lsl0:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}
+ %tmp1 = or <8 x i8> %a, < i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0>
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @orr8imm2s_lsl8(<8 x i8> %a) {
+; CHECK-LABEL: orr8imm2s_lsl8:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #8
+ %tmp1 = or <8 x i8> %a, < i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0>
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @orr8imm2s_lsl16(<8 x i8> %a) {
+; CHECK-LABEL: orr8imm2s_lsl16:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #16
+ %tmp1 = or <8 x i8> %a, < i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0>
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @orr8imm2s_lsl24(<8 x i8> %a) {
+; CHECK-LABEL: orr8imm2s_lsl24:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #24
+ %tmp1 = or <8 x i8> %a, < i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255>
+ ret <8 x i8> %tmp1
+}
+
+define <4 x i16> @orr16imm2s_lsl0(<4 x i16> %a) {
+; CHECK-LABEL: orr16imm2s_lsl0:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}
+ %tmp1 = or <4 x i16> %a, < i16 255, i16 0, i16 255, i16 0>
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @orr16imm2s_lsl8(<4 x i16> %a) {
+; CHECK-LABEL: orr16imm2s_lsl8:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #8
+ %tmp1 = or <4 x i16> %a, < i16 65280, i16 0, i16 65280, i16 0>
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @orr16imm2s_lsl16(<4 x i16> %a) {
+; CHECK-LABEL: orr16imm2s_lsl16:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #16
+ %tmp1 = or <4 x i16> %a, < i16 0, i16 255, i16 0, i16 255>
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @orr16imm2s_lsl24(<4 x i16> %a) {
+; CHECK-LABEL: orr16imm2s_lsl24:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #24
+ %tmp1 = or <4 x i16> %a, < i16 0, i16 65280, i16 0, i16 65280>
+ ret <4 x i16> %tmp1
+}
+
+define <1 x i64> @orr64imm2s_lsl0(<1 x i64> %a) {
+; CHECK-LABEL: orr64imm2s_lsl0:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}
+ %tmp1 = or <1 x i64> %a, < i64 1095216660735>
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @orr64imm2s_lsl8(<1 x i64> %a) {
+; CHECK-LABEL: orr64imm2s_lsl8:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #8
+ %tmp1 = or <1 x i64> %a, < i64 280375465148160>
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @orr64imm2s_lsl16(<1 x i64> %a) {
+; CHECK-LABEL: orr64imm2s_lsl16:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #16
+ %tmp1 = or <1 x i64> %a, < i64 71776119077928960>
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @orr64imm2s_lsl24(<1 x i64> %a) {
+; CHECK-LABEL: orr64imm2s_lsl24:
+; CHECK: orr {{v[0-9]+}}.2s, #{{0xff|255}}, lsl #24
+ %tmp1 = or <1 x i64> %a, < i64 -72057589759737856>
+ ret <1 x i64> %tmp1
+}
+
+define <16 x i8> @orr8imm4s_lsl0(<16 x i8> %a) {
+; CHECK-LABEL: orr8imm4s_lsl0:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}
+ %tmp1 = or <16 x i8> %a, < i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0>
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @orr8imm4s_lsl8(<16 x i8> %a) {
+; CHECK-LABEL: orr8imm4s_lsl8:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #8
+ %tmp1 = or <16 x i8> %a, < i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0>
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @orr8imm4s_lsl16(<16 x i8> %a) {
+; CHECK-LABEL: orr8imm4s_lsl16:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #16
+ %tmp1 = or <16 x i8> %a, < i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0>
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @orr8imm4s_lsl24(<16 x i8> %a) {
+; CHECK-LABEL: orr8imm4s_lsl24:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #24
+ %tmp1 = or <16 x i8> %a, < i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255>
+ ret <16 x i8> %tmp1
+}
+
+define <8 x i16> @orr16imm4s_lsl0(<8 x i16> %a) {
+; CHECK-LABEL: orr16imm4s_lsl0:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}
+ %tmp1 = or <8 x i16> %a, < i16 255, i16 0, i16 255, i16 0, i16 255, i16 0, i16 255, i16 0>
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @orr16imm4s_lsl8(<8 x i16> %a) {
+; CHECK-LABEL: orr16imm4s_lsl8:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #8
+ %tmp1 = or <8 x i16> %a, < i16 65280, i16 0, i16 65280, i16 0, i16 65280, i16 0, i16 65280, i16 0>
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @orr16imm4s_lsl16(<8 x i16> %a) {
+; CHECK-LABEL: orr16imm4s_lsl16:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #16
+ %tmp1 = or <8 x i16> %a, < i16 0, i16 255, i16 0, i16 255, i16 0, i16 255, i16 0, i16 255>
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @orr16imm4s_lsl24(<8 x i16> %a) {
+; CHECK-LABEL: orr16imm4s_lsl24:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #24
+ %tmp1 = or <8 x i16> %a, < i16 0, i16 65280, i16 0, i16 65280, i16 0, i16 65280, i16 0, i16 65280>
+ ret <8 x i16> %tmp1
+}
+
+define <2 x i64> @orr64imm4s_lsl0(<2 x i64> %a) {
+; CHECK-LABEL: orr64imm4s_lsl0:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}
+ %tmp1 = or <2 x i64> %a, < i64 1095216660735, i64 1095216660735>
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @orr64imm4s_lsl8(<2 x i64> %a) {
+; CHECK-LABEL: orr64imm4s_lsl8:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #8
+ %tmp1 = or <2 x i64> %a, < i64 280375465148160, i64 280375465148160>
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @orr64imm4s_lsl16(<2 x i64> %a) {
+; CHECK-LABEL: orr64imm4s_lsl16:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #16
+ %tmp1 = or <2 x i64> %a, < i64 71776119077928960, i64 71776119077928960>
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @orr64imm4s_lsl24(<2 x i64> %a) {
+; CHECK-LABEL: orr64imm4s_lsl24:
+; CHECK: orr {{v[0-9]+}}.4s, #{{0xff|255}}, lsl #24
+ %tmp1 = or <2 x i64> %a, < i64 -72057589759737856, i64 -72057589759737856>
+ ret <2 x i64> %tmp1
+}
+
+define <8 x i8> @orr8imm4h_lsl0(<8 x i8> %a) {
+; CHECK-LABEL: orr8imm4h_lsl0:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}
+ %tmp1 = or <8 x i8> %a, < i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @orr8imm4h_lsl8(<8 x i8> %a) {
+; CHECK-LABEL: orr8imm4h_lsl8:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
+ %tmp1 = or <8 x i8> %a, < i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <8 x i8> %tmp1
+}
+
+define <2 x i32> @orr16imm4h_lsl0(<2 x i32> %a) {
+; CHECK-LABEL: orr16imm4h_lsl0:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}
+ %tmp1 = or <2 x i32> %a, < i32 16711935, i32 16711935>
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @orr16imm4h_lsl8(<2 x i32> %a) {
+; CHECK-LABEL: orr16imm4h_lsl8:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
+ %tmp1 = or <2 x i32> %a, < i32 4278255360, i32 4278255360>
+ ret <2 x i32> %tmp1
+}
+
+define <1 x i64> @orr64imm4h_lsl0(<1 x i64> %a) {
+; CHECK-LABEL: orr64imm4h_lsl0:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}
+ %tmp1 = or <1 x i64> %a, < i64 71777214294589695>
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @orr64imm4h_lsl8(<1 x i64> %a) {
+; CHECK-LABEL: orr64imm4h_lsl8:
+; CHECK: orr {{v[0-9]+}}.4h, #{{0xff|255}}, lsl #8
+ %tmp1 = or <1 x i64> %a, < i64 -71777214294589696>
+ ret <1 x i64> %tmp1
+}
+
+define <16 x i8> @orr8imm8h_lsl0(<16 x i8> %a) {
+; CHECK-LABEL: orr8imm8h_lsl0:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}
+ %tmp1 = or <16 x i8> %a, < i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @orr8imm8h_lsl8(<16 x i8> %a) {
+; CHECK-LABEL: orr8imm8h_lsl8:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
+ %tmp1 = or <16 x i8> %a, < i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <16 x i8> %tmp1
+}
+
+define <4 x i32> @orr16imm8h_lsl0(<4 x i32> %a) {
+; CHECK-LABEL: orr16imm8h_lsl0:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}
+ %tmp1 = or <4 x i32> %a, < i32 16711935, i32 16711935, i32 16711935, i32 16711935>
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @orr16imm8h_lsl8(<4 x i32> %a) {
+; CHECK-LABEL: orr16imm8h_lsl8:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
+ %tmp1 = or <4 x i32> %a, < i32 4278255360, i32 4278255360, i32 4278255360, i32 4278255360>
+ ret <4 x i32> %tmp1
+}
+
+define <2 x i64> @orr64imm8h_lsl0(<2 x i64> %a) {
+; CHECK-LABEL: orr64imm8h_lsl0:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}
+ %tmp1 = or <2 x i64> %a, < i64 71777214294589695, i64 71777214294589695>
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @orr64imm8h_lsl8(<2 x i64> %a) {
+; CHECK-LABEL: orr64imm8h_lsl8:
+; CHECK: orr {{v[0-9]+}}.8h, #{{0xff|255}}, lsl #8
+ %tmp1 = or <2 x i64> %a, < i64 -71777214294589696, i64 -71777214294589696>
+ ret <2 x i64> %tmp1
+}
diff --git a/test/CodeGen/AArch64/neon-bsl.ll b/test/CodeGen/AArch64/neon-bsl.ll
deleted file mode 100644
index 6bd923dc2cca..000000000000
--- a/test/CodeGen/AArch64/neon-bsl.ll
+++ /dev/null
@@ -1,222 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-declare <2 x double> @llvm.arm.neon.vbsl.v2f64(<2 x double>, <2 x double>, <2 x double>)
-
-declare <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
-
-declare <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
-
-declare <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float>, <4 x float>, <4 x float>)
-
-declare <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
-
-declare <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-
-declare <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>)
-
-declare <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>)
-
-declare <1 x double> @llvm.arm.neon.vbsl.v1f64(<1 x double>, <1 x double>, <1 x double>)
-
-declare <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float>, <2 x float>, <2 x float>)
-
-declare <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64>, <1 x i64>, <1 x i64>)
-
-declare <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>)
-
-define <8 x i8> @test_vbsl_s8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
-; CHECK-LABEL: test_vbsl_s8:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3)
- ret <8 x i8> %vbsl.i
-}
-
-define <8 x i8> @test_vbsl_s16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
-; CHECK-LABEL: test_vbsl_s16:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3)
- %0 = bitcast <4 x i16> %vbsl3.i to <8 x i8>
- ret <8 x i8> %0
-}
-
-define <2 x i32> @test_vbsl_s32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
-; CHECK-LABEL: test_vbsl_s32:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3)
- ret <2 x i32> %vbsl3.i
-}
-
-define <1 x i64> @test_vbsl_s64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
-; CHECK-LABEL: test_vbsl_s64:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3)
- ret <1 x i64> %vbsl3.i
-}
-
-define <8 x i8> @test_vbsl_u8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
-; CHECK-LABEL: test_vbsl_u8:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3)
- ret <8 x i8> %vbsl.i
-}
-
-define <4 x i16> @test_vbsl_u16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
-; CHECK-LABEL: test_vbsl_u16:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3)
- ret <4 x i16> %vbsl3.i
-}
-
-define <2 x i32> @test_vbsl_u32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
-; CHECK-LABEL: test_vbsl_u32:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3)
- ret <2 x i32> %vbsl3.i
-}
-
-define <1 x i64> @test_vbsl_u64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
-; CHECK-LABEL: test_vbsl_u64:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3)
- ret <1 x i64> %vbsl3.i
-}
-
-define <2 x float> @test_vbsl_f32(<2 x float> %v1, <2 x float> %v2, <2 x float> %v3) {
-; CHECK-LABEL: test_vbsl_f32:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl3.i = tail call <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float> %v1, <2 x float> %v2, <2 x float> %v3)
- ret <2 x float> %vbsl3.i
-}
-
-define <1 x double> @test_vbsl_f64(<1 x i64> %v1, <1 x double> %v2, <1 x double> %v3) {
-; CHECK-LABEL: test_vbsl_f64:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl.i = bitcast <1 x i64> %v1 to <1 x double>
- %vbsl3.i = tail call <1 x double> @llvm.arm.neon.vbsl.v1f64(<1 x double> %vbsl.i, <1 x double> %v2, <1 x double> %v3)
- ret <1 x double> %vbsl3.i
-}
-
-define <8 x i8> @test_vbsl_p8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
-; CHECK-LABEL: test_vbsl_p8:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3)
- ret <8 x i8> %vbsl.i
-}
-
-define <4 x i16> @test_vbsl_p16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
-; CHECK-LABEL: test_vbsl_p16:
-; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-entry:
- %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3)
- ret <4 x i16> %vbsl3.i
-}
-
-define <16 x i8> @test_vbslq_s8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
-; CHECK-LABEL: test_vbslq_s8:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3)
- ret <16 x i8> %vbsl.i
-}
-
-define <8 x i16> @test_vbslq_s16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
-; CHECK-LABEL: test_vbslq_s16:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3)
- ret <8 x i16> %vbsl3.i
-}
-
-define <4 x i32> @test_vbslq_s32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
-; CHECK-LABEL: test_vbslq_s32:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3)
- ret <4 x i32> %vbsl3.i
-}
-
-define <2 x i64> @test_vbslq_s64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3) {
-; CHECK-LABEL: test_vbslq_s64:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3)
- ret <2 x i64> %vbsl3.i
-}
-
-define <16 x i8> @test_vbslq_u8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
-; CHECK-LABEL: test_vbslq_u8:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3)
- ret <16 x i8> %vbsl.i
-}
-
-define <8 x i16> @test_vbslq_u16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
-; CHECK-LABEL: test_vbslq_u16:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3)
- ret <8 x i16> %vbsl3.i
-}
-
-define <4 x i32> @test_vbslq_u32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
-; CHECK-LABEL: test_vbslq_u32:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3)
- ret <4 x i32> %vbsl3.i
-}
-
-define <2 x i64> @test_vbslq_u64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3) {
-; CHECK-LABEL: test_vbslq_u64:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3)
- ret <2 x i64> %vbsl3.i
-}
-
-define <4 x float> @test_vbslq_f32(<4 x i32> %v1, <4 x float> %v2, <4 x float> %v3) {
-; CHECK-LABEL: test_vbslq_f32:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl.i = bitcast <4 x i32> %v1 to <4 x float>
- %vbsl3.i = tail call <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float> %vbsl.i, <4 x float> %v2, <4 x float> %v3)
- ret <4 x float> %vbsl3.i
-}
-
-define <16 x i8> @test_vbslq_p8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
-; CHECK-LABEL: test_vbslq_p8:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3)
- ret <16 x i8> %vbsl.i
-}
-
-define <8 x i16> @test_vbslq_p16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
-; CHECK-LABEL: test_vbslq_p16:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3)
- ret <8 x i16> %vbsl3.i
-}
-
-define <2 x double> @test_vbslq_f64(<2 x i64> %v1, <2 x double> %v2, <2 x double> %v3) {
-; CHECK-LABEL: test_vbslq_f64:
-; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %vbsl.i = bitcast <2 x i64> %v1 to <2 x double>
- %vbsl3.i = tail call <2 x double> @llvm.arm.neon.vbsl.v2f64(<2 x double> %vbsl.i, <2 x double> %v2, <2 x double> %v3)
- ret <2 x double> %vbsl3.i
-}
-
diff --git a/test/CodeGen/AArch64/neon-compare-instructions.ll b/test/CodeGen/AArch64/neon-compare-instructions.ll
index 68f03425b276..6d89dfbacf41 100644
--- a/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -1,560 +1,631 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmeq8xi8:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp eq <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmeq16xi8:
+; CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp eq <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-LABEL: cmeq4xi16:
+; CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = icmp eq <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-LABEL: cmeq8xi16:
+; CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = icmp eq <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: cmeq2xi32:
+; CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = icmp eq <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: cmeq4xi32:
+; CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = icmp eq <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: cmeq2xi64:
+; CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = icmp eq <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmne8xi8:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmne16xi8:
+; CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmne4xi16:
+; CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmne8xi16:
+; CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmne2xi32:
+; CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmne4xi32:
+; CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmne2xi64:
+; CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmgt8xi8:
+; CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp sgt <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmgt16xi8:
+; CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp sgt <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-LABEL: cmgt4xi16:
+; CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = icmp sgt <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-LABEL: cmgt8xi16:
+; CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = icmp sgt <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: cmgt2xi32:
+; CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = icmp sgt <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: cmgt4xi32:
+; CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = icmp sgt <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: cmgt2xi64:
+; CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = icmp sgt <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: cmlt8xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
+; CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
%tmp3 = icmp slt <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: cmlt16xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
+; CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
%tmp3 = icmp slt <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: cmlt4xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
+; CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
%tmp3 = icmp slt <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: cmlt8xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
+; CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
%tmp3 = icmp slt <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: cmlt2xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
%tmp3 = icmp slt <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: cmlt4xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
%tmp3 = icmp slt <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: cmlt2xi64:
; Using registers other than v0, v1 are possible, but would be odd.
; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
%tmp3 = icmp slt <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmge8xi8:
+; CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp sge <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmge16xi8:
+; CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp sge <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-LABEL: cmge4xi16:
+; CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = icmp sge <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-LABEL: cmge8xi16:
+; CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = icmp sge <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: cmge2xi32:
+; CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = icmp sge <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: cmge4xi32:
+; CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = icmp sge <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: cmge2xi64:
+; CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = icmp sge <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: cmle8xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
+; CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
%tmp3 = icmp sle <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: cmle16xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
+; CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
%tmp3 = icmp sle <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: cmle4xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
+; CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
%tmp3 = icmp sle <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: cmle8xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
+; CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
%tmp3 = icmp sle <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: cmle2xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
%tmp3 = icmp sle <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: cmle4xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
%tmp3 = icmp sle <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: cmle2xi64:
; Using registers other than v0, v1 are possible, but would be odd.
; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
%tmp3 = icmp sle <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmhi8xi8:
+; CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ugt <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmhi16xi8:
+; CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ugt <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-LABEL: cmhi4xi16:
+; CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = icmp ugt <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-LABEL: cmhi8xi16:
+; CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = icmp ugt <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: cmhi2xi32:
+; CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = icmp ugt <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: cmhi4xi32:
+; CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = icmp ugt <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: cmhi2xi64:
+; CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = icmp ugt <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: cmlo8xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
+; CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
%tmp3 = icmp ult <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: cmlo16xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
+; CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
%tmp3 = icmp ult <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: cmlo4xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
+; CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
%tmp3 = icmp ult <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: cmlo8xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
+; CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
%tmp3 = icmp ult <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: cmlo2xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
%tmp3 = icmp ult <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: cmlo4xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
%tmp3 = icmp ult <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: cmlo2xi64:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
%tmp3 = icmp ult <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmhs8xi8:
+; CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp uge <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmhs16xi8:
+; CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp uge <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-LABEL: cmhs4xi16:
+; CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = icmp uge <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-LABEL: cmhs8xi16:
+; CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = icmp uge <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: cmhs2xi32:
+; CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = icmp uge <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: cmhs4xi32:
+; CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = icmp uge <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: cmhs2xi64:
+; CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = icmp uge <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
+; CHECK-LABEL: cmls8xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
+; CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
%tmp3 = icmp ule <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: cmls16xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
+; CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
%tmp3 = icmp ule <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
+; CHECK-LABEL: cmls4xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
+; CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
%tmp3 = icmp ule <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
+; CHECK-LABEL: cmls8xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
+; CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
%tmp3 = icmp ule <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: cmls2xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
%tmp3 = icmp ule <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: cmls4xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
%tmp3 = icmp ule <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
+; CHECK-LABEL: cmls2xi64:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
%tmp3 = icmp ule <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmtst8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmtst {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmtst8xi8:
+; CHECK: cmtst {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = and <8 x i8> %A, %B
%tmp4 = icmp ne <8 x i8> %tmp3, zeroinitializer
%tmp5 = sext <8 x i1> %tmp4 to <8 x i8>
@@ -562,7 +633,8 @@ define <8 x i8> @cmtst8xi8(<8 x i8> %A, <8 x i8> %B) {
}
define <16 x i8> @cmtst16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmtst {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmtst16xi8:
+; CHECK: cmtst {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = and <16 x i8> %A, %B
%tmp4 = icmp ne <16 x i8> %tmp3, zeroinitializer
%tmp5 = sext <16 x i1> %tmp4 to <16 x i8>
@@ -570,7 +642,8 @@ define <16 x i8> @cmtst16xi8(<16 x i8> %A, <16 x i8> %B) {
}
define <4 x i16> @cmtst4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmtst {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-LABEL: cmtst4xi16:
+; CHECK: cmtst {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = and <4 x i16> %A, %B
%tmp4 = icmp ne <4 x i16> %tmp3, zeroinitializer
%tmp5 = sext <4 x i1> %tmp4 to <4 x i16>
@@ -578,7 +651,8 @@ define <4 x i16> @cmtst4xi16(<4 x i16> %A, <4 x i16> %B) {
}
define <8 x i16> @cmtst8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmtst {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-LABEL: cmtst8xi16:
+; CHECK: cmtst {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = and <8 x i16> %A, %B
%tmp4 = icmp ne <8 x i16> %tmp3, zeroinitializer
%tmp5 = sext <8 x i1> %tmp4 to <8 x i16>
@@ -586,7 +660,8 @@ define <8 x i16> @cmtst8xi16(<8 x i16> %A, <8 x i16> %B) {
}
define <2 x i32> @cmtst2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmtst {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: cmtst2xi32:
+; CHECK: cmtst {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = and <2 x i32> %A, %B
%tmp4 = icmp ne <2 x i32> %tmp3, zeroinitializer
%tmp5 = sext <2 x i1> %tmp4 to <2 x i32>
@@ -594,7 +669,8 @@ define <2 x i32> @cmtst2xi32(<2 x i32> %A, <2 x i32> %B) {
}
define <4 x i32> @cmtst4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmtst {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: cmtst4xi32:
+; CHECK: cmtst {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = and <4 x i32> %A, %B
%tmp4 = icmp ne <4 x i32> %tmp3, zeroinitializer
%tmp5 = sext <4 x i1> %tmp4 to <4 x i32>
@@ -602,7 +678,8 @@ define <4 x i32> @cmtst4xi32(<4 x i32> %A, <4 x i32> %B) {
}
define <2 x i64> @cmtst2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmtst {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: cmtst2xi64:
+; CHECK: cmtst {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = and <2 x i64> %A, %B
%tmp4 = icmp ne <2 x i64> %tmp3, zeroinitializer
%tmp5 = sext <2 x i1> %tmp4 to <2 x i64>
@@ -612,49 +689,56 @@ define <2 x i64> @cmtst2xi64(<2 x i64> %A, <2 x i64> %B) {
define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
-;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+; CHECK-LABEL: cmeqz8xi8:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
%tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
-;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+; CHECK-LABEL: cmeqz16xi8:
+; CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
%tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
-;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+; CHECK-LABEL: cmeqz4xi16:
+; CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
%tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
-;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+; CHECK-LABEL: cmeqz8xi16:
+; CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
%tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
-;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+; CHECK-LABEL: cmeqz2xi32:
+; CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
%tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
-;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+; CHECK-LABEL: cmeqz4xi32:
+; CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
%tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
-;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+; CHECK-LABEL: cmeqz2xi64:
+; CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
%tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -662,49 +746,56 @@ define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
-;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+; CHECK-LABEL: cmgez8xi8:
+; CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
%tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
-;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+; CHECK-LABEL: cmgez16xi8:
+; CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
%tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
-;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+; CHECK-LABEL: cmgez4xi16:
+; CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
%tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
-;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+; CHECK-LABEL: cmgez8xi16:
+; CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
%tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
-;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+; CHECK-LABEL: cmgez2xi32:
+; CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
%tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
-;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+; CHECK-LABEL: cmgez4xi32:
+; CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
%tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
-;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+; CHECK-LABEL: cmgez2xi64:
+; CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
%tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -712,259 +803,294 @@ define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
-;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+; CHECK-LABEL: cmgtz8xi8:
+; CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
%tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
-;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+; CHECK-LABEL: cmgtz16xi8:
+; CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
%tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
-;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+; CHECK-LABEL: cmgtz4xi16:
+; CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
%tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
-;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+; CHECK-LABEL: cmgtz8xi16:
+; CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
%tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
-;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+; CHECK-LABEL: cmgtz2xi32:
+; CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
%tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
-;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+; CHECK-LABEL: cmgtz4xi32:
+; CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
%tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
-;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+; CHECK-LABEL: cmgtz2xi64:
+; CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
%tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
-;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+; CHECK-LABEL: cmlez8xi8:
+; CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
%tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
-;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+; CHECK-LABEL: cmlez16xi8:
+; CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
%tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
-;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+; CHECK-LABEL: cmlez4xi16:
+; CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
%tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
-;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+; CHECK-LABEL: cmlez8xi16:
+; CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
%tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
-;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+; CHECK-LABEL: cmlez2xi32:
+; CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
%tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
-;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+; CHECK-LABEL: cmlez4xi32:
+; CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
%tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
-;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+; CHECK-LABEL: cmlez2xi64:
+; CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
%tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
-;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+; CHECK-LABEL: cmltz8xi8:
+; CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
%tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
-;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+; CHECK-LABEL: cmltz16xi8:
+; CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
%tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
-;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+; CHECK-LABEL: cmltz4xi16:
+; CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
%tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
-;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+; CHECK-LABEL: cmltz8xi16:
+; CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
%tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
-;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+; CHECK-LABEL: cmltz2xi32:
+; CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
%tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
-;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+; CHECK-LABEL: cmltz4xi32:
+; CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
%tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
-;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+; CHECK-LABEL: cmltz2xi64:
+; CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
%tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
-;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmneqz8xi8:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
-;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmneqz16xi8:
+; CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
-;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmneqz4xi16:
+; CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
-;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmneqz8xi16:
+; CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
-;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmneqz2xi32:
+; CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
-;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmneqz4xi32:
+; CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
-;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmneqz2xi64:
+; CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
-;CHECK: movi {{v[0-9]+}}.8b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmhsz8xi8:
+; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp uge <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
-;CHECK: movi {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmhsz16xi8:
+; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp uge <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
-;CHECK: movi {{v[0-9]+}}.8b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-LABEL: cmhsz4xi16:
+; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = icmp uge <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
-;CHECK: movi {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-LABEL: cmhsz8xi16:
+; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = icmp uge <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
-;CHECK: movi {{v[0-9]+}}.8b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: cmhsz2xi32:
+; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = icmp uge <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
-;CHECK: movi {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: cmhsz4xi32:
+; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = icmp uge <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
-;CHECK: movi {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: cmhsz2xi64:
+; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = icmp uge <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -972,196 +1098,217 @@ define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
-;CHECK: movi {{v[0-9]+}}.8b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-LABEL: cmhiz8xi8:
+; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ugt <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
-;CHECK: movi {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: cmhiz16xi8:
+; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ugt <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
-;CHECK: movi {{v[0-9]+}}.8b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK-LABEL: cmhiz4xi16:
+; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = icmp ugt <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
-;CHECK: movi {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK-LABEL: cmhiz8xi16:
+; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = icmp ugt <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
-;CHECK: movi {{v[0-9]+}}.8b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: cmhiz2xi32:
+; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = icmp ugt <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
-;CHECK: movi {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: cmhiz4xi32:
+; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = icmp ugt <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
-;CHECK: movi {{v[0-9]+}}.16b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: cmhiz2xi64:
+; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = icmp ugt <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
+; CHECK-LABEL: cmlsz8xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: movi v1.8b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
+; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
%tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
+; CHECK-LABEL: cmlsz16xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: movi v1.16b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
+; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
%tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
+; CHECK-LABEL: cmlsz4xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: movi v1.8b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
+; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
%tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
+; CHECK-LABEL: cmlsz8xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: movi v1.16b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
+; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
%tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
+; CHECK-LABEL: cmlsz2xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: movi v1.8b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
%tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
+; CHECK-LABEL: cmlsz4xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: movi v1.16b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
%tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
+; CHECK-LABEL: cmlsz2xi64:
; Using registers other than v0, v1 are possible, but would be odd.
; LS implemented as HS, so check reversed operands.
-;CHECK: movi v1.16b, #0x0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
%tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
+; CHECK-LABEL: cmloz8xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: movi v1.8b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v1.8b, {{v[0-9]+}}.8b
+; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v1.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ult <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
}
define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
+; CHECK-LABEL: cmloz16xi8:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: movi v1.16b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
+; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
%tmp3 = icmp ult <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
}
define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
+; CHECK-LABEL: cmloz4xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: movi v1.8b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
+; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
%tmp3 = icmp ult <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
}
define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
+; CHECK-LABEL: cmloz8xi16:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: movi v1.16b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
+; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
%tmp3 = icmp ult <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
}
define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
+; CHECK-LABEL: cmloz2xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: movi v1.8b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
%tmp3 = icmp ult <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
+; CHECK-LABEL: cmloz4xi32:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: movi v1.16b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
%tmp3 = icmp ult <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
+; CHECK-LABEL: cmloz2xi64:
; Using registers other than v0, v1 are possible, but would be odd.
; LO implemented as HI, so check reversed operands.
-;CHECK: movi v1.16b, #0x0
-;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
%tmp3 = icmp ult <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -1169,144 +1316,162 @@ define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
define <2 x i32> @fcmoeq2xfloat(<2 x float> %A, <2 x float> %B) {
-;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: fcmoeq2xfloat:
+; CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = fcmp oeq <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmoeq4xfloat(<4 x float> %A, <4 x float> %B) {
-;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: fcmoeq4xfloat:
+; CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = fcmp oeq <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmoeq2xdouble(<2 x double> %A, <2 x double> %B) {
-;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: fcmoeq2xdouble:
+; CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = fcmp oeq <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmoge2xfloat(<2 x float> %A, <2 x float> %B) {
-;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: fcmoge2xfloat:
+; CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = fcmp oge <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmoge4xfloat(<4 x float> %A, <4 x float> %B) {
-;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: fcmoge4xfloat:
+; CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = fcmp oge <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmoge2xdouble(<2 x double> %A, <2 x double> %B) {
-;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: fcmoge2xdouble:
+; CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = fcmp oge <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmogt2xfloat(<2 x float> %A, <2 x float> %B) {
-;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK-LABEL: fcmogt2xfloat:
+; CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = fcmp ogt <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmogt4xfloat(<4 x float> %A, <4 x float> %B) {
-;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK-LABEL: fcmogt4xfloat:
+; CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = fcmp ogt <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmogt2xdouble(<2 x double> %A, <2 x double> %B) {
-;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK-LABEL: fcmogt2xdouble:
+; CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = fcmp ogt <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmole2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmole2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; OLE implemented as OGE, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
%tmp3 = fcmp ole <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmole4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmole4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; OLE implemented as OGE, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
%tmp3 = fcmp ole <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmole2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmole2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; OLE implemented as OGE, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
%tmp3 = fcmp ole <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmolt2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmolt2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; OLE implemented as OGE, so check reversed operands.
-;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
%tmp3 = fcmp olt <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmolt4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmolt4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; OLE implemented as OGE, so check reversed operands.
-;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
%tmp3 = fcmp olt <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmolt2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmolt2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; OLE implemented as OGE, so check reversed operands.
-;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
%tmp3 = fcmp olt <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmone2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmone2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
-;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
-;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp one <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmone4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmone4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
-;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp one <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmone2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmone2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
-;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; todo check reversed operands
%tmp3 = fcmp one <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
@@ -1315,11 +1480,12 @@ define <2 x i64> @fcmone2xdouble(<2 x double> %A, <2 x double> %B) {
define <2 x i32> @fcmord2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmord2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
-;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ord <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@@ -1327,22 +1493,24 @@ define <2 x i32> @fcmord2xfloat(<2 x float> %A, <2 x float> %B) {
define <4 x i32> @fcmord4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmord4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ord <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmord2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmord2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ord <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -1350,236 +1518,260 @@ define <2 x i64> @fcmord2xdouble(<2 x double> %A, <2 x double> %B) {
define <2 x i32> @fcmuno2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmuno2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
-;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp uno <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmuno4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmuno4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uno <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmuno2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmuno2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
-;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uno <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmueq2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmueq2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
-;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
-;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ueq <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmueq4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmueq4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
-;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ueq <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmueq2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmueq2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
-;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
-;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
+; CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ueq <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmuge2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmuge2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UGE = ULE with swapped operands, ULE implemented as !OGT.
-;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp uge <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmuge4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmuge4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UGE = ULE with swapped operands, ULE implemented as !OGT.
-;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uge <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmuge2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmuge2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; UGE = ULE with swapped operands, ULE implemented as !OGT.
-;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uge <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmugt2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmugt2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UGT = ULT with swapped operands, ULT implemented as !OGE.
-;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ugt <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmugt4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmugt4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UGT = ULT with swapped operands, ULT implemented as !OGE.
-;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ugt <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmugt2xdouble(<2 x double> %A, <2 x double> %B) {
-;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: fcmugt2xdouble:
+; CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ugt <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmule2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmule2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; ULE implemented as !OGT.
-;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ule <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmule4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmule4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; ULE implemented as !OGT.
-;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ule <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmule2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmule2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; ULE implemented as !OGT.
-;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ule <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmult2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmult2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; ULT implemented as !OGE.
-;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ult <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmult4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmult4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; ULT implemented as !OGE.
-;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ult <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmult2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmult2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; ULT implemented as !OGE.
-;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ult <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmune2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmune2xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UNE = !OEQ.
-;CHECK: fcmeq {{v[0-9]+}}.2s, v0.2s, v1.2s
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmeq {{v[0-9]+}}.2s, v0.2s, v1.2s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp une <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmune4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-LABEL: fcmune4xfloat:
; Using registers other than v0, v1 are possible, but would be odd.
; UNE = !OEQ.
-;CHECK: fcmeq {{v[0-9]+}}.4s, v0.4s, v1.4s
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmeq {{v[0-9]+}}.4s, v0.4s, v1.4s
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp une <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmune2xdouble:
; Using registers other than v0, v1 are possible, but would be odd.
; UNE = !OEQ.
-;CHECK: fcmeq {{v[0-9]+}}.2d, v0.2d, v1.2d
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmeq {{v[0-9]+}}.2d, v0.2d, v1.2d
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp une <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmoeqz2xfloat(<2 x float> %A) {
-;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+; CHECK-LABEL: fcmoeqz2xfloat:
+; CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
%tmp3 = fcmp oeq <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmoeqz4xfloat(<4 x float> %A) {
-;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+; CHECK-LABEL: fcmoeqz4xfloat:
+; CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
%tmp3 = fcmp oeq <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmoeqz2xdouble(<2 x double> %A) {
-;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+; CHECK-LABEL: fcmoeqz2xdouble:
+; CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
%tmp3 = fcmp oeq <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -1587,250 +1779,280 @@ define <2 x i64> @fcmoeqz2xdouble(<2 x double> %A) {
define <2 x i32> @fcmogez2xfloat(<2 x float> %A) {
-;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+; CHECK-LABEL: fcmogez2xfloat:
+; CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
%tmp3 = fcmp oge <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmogez4xfloat(<4 x float> %A) {
-;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+; CHECK-LABEL: fcmogez4xfloat:
+; CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
%tmp3 = fcmp oge <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmogez2xdouble(<2 x double> %A) {
-;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+; CHECK-LABEL: fcmogez2xdouble:
+; CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
%tmp3 = fcmp oge <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmogtz2xfloat(<2 x float> %A) {
-;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+; CHECK-LABEL: fcmogtz2xfloat:
+; CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
%tmp3 = fcmp ogt <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmogtz4xfloat(<4 x float> %A) {
-;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+; CHECK-LABEL: fcmogtz4xfloat:
+; CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
%tmp3 = fcmp ogt <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmogtz2xdouble(<2 x double> %A) {
-;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+; CHECK-LABEL: fcmogtz2xdouble:
+; CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
%tmp3 = fcmp ogt <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmoltz2xfloat(<2 x float> %A) {
-;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+; CHECK-LABEL: fcmoltz2xfloat:
+; CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
%tmp3 = fcmp olt <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmoltz4xfloat(<4 x float> %A) {
-;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+; CHECK-LABEL: fcmoltz4xfloat:
+; CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
%tmp3 = fcmp olt <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmoltz2xdouble(<2 x double> %A) {
-;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+; CHECK-LABEL: fcmoltz2xdouble:
+; CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
%tmp3 = fcmp olt <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmolez2xfloat(<2 x float> %A) {
-;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+; CHECK-LABEL: fcmolez2xfloat:
+; CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
%tmp3 = fcmp ole <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmolez4xfloat(<4 x float> %A) {
-;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+; CHECK-LABEL: fcmolez4xfloat:
+; CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
%tmp3 = fcmp ole <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmolez2xdouble(<2 x double> %A) {
-;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+; CHECK-LABEL: fcmolez2xdouble:
+; CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
%tmp3 = fcmp ole <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmonez2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmonez2xfloat:
; ONE with zero = OLT | OGT
-;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp one <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmonez4xfloat(<4 x float> %A) {
+; CHECK-LABEL: fcmonez4xfloat:
; ONE with zero = OLT | OGT
-;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp one <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmonez2xdouble(<2 x double> %A) {
+; CHECK-LABEL: fcmonez2xdouble:
; ONE with zero = OLT | OGT
-;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp one <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmordz2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmordz2xfloat:
; ORD with zero = OLT | OGE
-;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ord <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmordz4xfloat(<4 x float> %A) {
+; CHECK-LABEL: fcmordz4xfloat:
; ORD with zero = OLT | OGE
-;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ord <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmordz2xdouble(<2 x double> %A) {
+; CHECK-LABEL: fcmordz2xdouble:
; ORD with zero = OLT | OGE
-;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ord <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmueqz2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmueqz2xfloat:
; UEQ with zero = !ONE = !(OLT |OGT)
-;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ueq <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmueqz4xfloat(<4 x float> %A) {
+; CHECK-LABEL: fcmueqz4xfloat:
; UEQ with zero = !ONE = !(OLT |OGT)
-;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ueq <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmueqz2xdouble(<2 x double> %A) {
+; CHECK-LABEL: fcmueqz2xdouble:
; UEQ with zero = !ONE = !(OLT |OGT)
-;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ueq <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmugez2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmugez2xfloat:
; UGE with zero = !OLT
-;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp uge <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmugez4xfloat(<4 x float> %A) {
+; CHECK-LABEL: fcmugez4xfloat:
; UGE with zero = !OLT
-;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uge <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmugez2xdouble(<2 x double> %A) {
+; CHECK-LABEL: fcmugez2xdouble:
; UGE with zero = !OLT
-;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uge <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmugtz2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmugtz2xfloat:
; UGT with zero = !OLE
-;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ugt <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmugtz4xfloat(<4 x float> %A) {
+; CHECK-LABEL: fcmugtz4xfloat:
; UGT with zero = !OLE
-;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ugt <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmugtz2xdouble(<2 x double> %A) {
+; CHECK-LABEL: fcmugtz2xdouble:
; UGT with zero = !OLE
-;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ugt <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmultz2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmultz2xfloat:
; ULT with zero = !OGE
-;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ult <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmultz4xfloat(<4 x float> %A) {
-;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: fcmultz4xfloat:
+; CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ult <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmultz2xdouble(<2 x double> %A) {
-;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: fcmultz2xdouble:
+; CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ult <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -1838,53 +2060,59 @@ define <2 x i64> @fcmultz2xdouble(<2 x double> %A) {
define <2 x i32> @fcmulez2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmulez2xfloat:
; ULE with zero = !OGT
-;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ule <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmulez4xfloat(<4 x float> %A) {
+; CHECK-LABEL: fcmulez4xfloat:
; ULE with zero = !OGT
-;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ule <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmulez2xdouble(<2 x double> %A) {
+; CHECK-LABEL: fcmulez2xdouble:
; ULE with zero = !OGT
-;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ule <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
}
define <2 x i32> @fcmunez2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmunez2xfloat:
; UNE with zero = !OEQ with zero
-;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp une <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmunez4xfloat(<4 x float> %A) {
+; CHECK-LABEL: fcmunez4xfloat:
; UNE with zero = !OEQ with zero
-;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp une <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmunez2xdouble(<2 x double> %A) {
+; CHECK-LABEL: fcmunez2xdouble:
; UNE with zero = !OEQ with zero
-;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp une <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -1892,33 +2120,36 @@ define <2 x i64> @fcmunez2xdouble(<2 x double> %A) {
define <2 x i32> @fcmunoz2xfloat(<2 x float> %A) {
+; CHECK-LABEL: fcmunoz2xfloat:
; UNO with zero = !ORD = !(OLT | OGE)
-;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp uno <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
}
define <4 x i32> @fcmunoz4xfloat(<4 x float> %A) {
+; CHECK-LABEL: fcmunoz4xfloat:
; UNO with zero = !ORD = !(OLT | OGE)
-;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uno <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
define <2 x i64> @fcmunoz2xdouble(<2 x double> %A) {
+; CHECK-LABEL: fcmunoz2xdouble:
; UNO with zero = !ORD = !(OLT | OGE)
-;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
-;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uno <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
diff --git a/test/CodeGen/AArch64/neon-copy.ll b/test/CodeGen/AArch64/neon-copy.ll
deleted file mode 100644
index e18530e6ff8e..000000000000
--- a/test/CodeGen/AArch64/neon-copy.ll
+++ /dev/null
@@ -1,615 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
-
-
-define <16 x i8> @ins16bw(<16 x i8> %tmp1, i8 %tmp2) {
-;CHECK: ins {{v[0-31]+}}.b[15], {{w[0-31]+}}
- %tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 15
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @ins8hw(<8 x i16> %tmp1, i16 %tmp2) {
-;CHECK: ins {{v[0-31]+}}.h[6], {{w[0-31]+}}
- %tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 6
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @ins4sw(<4 x i32> %tmp1, i32 %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[2], {{w[0-31]+}}
- %tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @ins2dw(<2 x i64> %tmp1, i64 %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[1], {{x[0-31]+}}
- %tmp3 = insertelement <2 x i64> %tmp1, i64 %tmp2, i32 1
- ret <2 x i64> %tmp3
-}
-
-define <8 x i8> @ins8bw(<8 x i8> %tmp1, i8 %tmp2) {
-;CHECK: ins {{v[0-31]+}}.b[5], {{w[0-31]+}}
- %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 5
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @ins4hw(<4 x i16> %tmp1, i16 %tmp2) {
-;CHECK: ins {{v[0-31]+}}.h[3], {{w[0-31]+}}
- %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 3
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{w[0-31]+}}
- %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
- ret <2 x i32> %tmp3
-}
-
-define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.b[15], {{v[0-31]+}}.b[2]
- %tmp3 = extractelement <16 x i8> %tmp1, i32 2
- %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @ins8h8(<8 x i16> %tmp1, <8 x i16> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.h[7], {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <8 x i16> %tmp1, i32 2
- %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @ins4s4(<4 x i32> %tmp1, <4 x i32> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[2]
- %tmp3 = extractelement <4 x i32> %tmp1, i32 2
- %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @ins2d2(<2 x i64> %tmp1, <2 x i64> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[1], {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <2 x i64> %tmp1, i32 0
- %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
- ret <2 x i64> %tmp4
-}
-
-define <4 x float> @ins4f4(<4 x float> %tmp1, <4 x float> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[2]
- %tmp3 = extractelement <4 x float> %tmp1, i32 2
- %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
- ret <4 x float> %tmp4
-}
-
-define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[1], {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <2 x double> %tmp1, i32 0
- %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
- ret <2 x double> %tmp4
-}
-
-define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.b[15], {{v[0-31]+}}.b[2]
- %tmp3 = extractelement <8 x i8> %tmp1, i32 2
- %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @ins4h8(<4 x i16> %tmp1, <8 x i16> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.h[7], {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <4 x i16> %tmp1, i32 2
- %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @ins2s4(<2 x i32> %tmp1, <4 x i32> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[1]
- %tmp3 = extractelement <2 x i32> %tmp1, i32 1
- %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @ins1d2(<1 x i64> %tmp1, <2 x i64> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[1], {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <1 x i64> %tmp1, i32 0
- %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
- ret <2 x i64> %tmp4
-}
-
-define <4 x float> @ins2f4(<2 x float> %tmp1, <4 x float> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[1]
- %tmp3 = extractelement <2 x float> %tmp1, i32 1
- %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
- ret <4 x float> %tmp4
-}
-
-define <2 x double> @ins1f2(<1 x double> %tmp1, <2 x double> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[1], {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <1 x double> %tmp1, i32 0
- %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
- ret <2 x double> %tmp4
-}
-
-define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.b[7], {{v[0-31]+}}.b[2]
- %tmp3 = extractelement <16 x i8> %tmp1, i32 2
- %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 7
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @ins8h4(<8 x i16> %tmp1, <4 x i16> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.h[3], {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <8 x i16> %tmp1, i32 2
- %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @ins4s2(<4 x i32> %tmp1, <2 x i32> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[2]
- %tmp3 = extractelement <4 x i32> %tmp1, i32 2
- %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
- ret <2 x i32> %tmp4
-}
-
-define <1 x i64> @ins2d1(<2 x i64> %tmp1, <1 x i64> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[0], {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <2 x i64> %tmp1, i32 0
- %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
- ret <1 x i64> %tmp4
-}
-
-define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[2]
- %tmp3 = extractelement <4 x float> %tmp1, i32 2
- %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
- ret <2 x float> %tmp4
-}
-
-define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[0], {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <2 x double> %tmp1, i32 0
- %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
- ret <1 x double> %tmp4
-}
-
-define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.b[4], {{v[0-31]+}}.b[2]
- %tmp3 = extractelement <8 x i8> %tmp1, i32 2
- %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 4
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @ins4h4(<4 x i16> %tmp1, <4 x i16> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.h[3], {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <4 x i16> %tmp1, i32 2
- %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @ins2s2(<2 x i32> %tmp1, <2 x i32> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[0]
- %tmp3 = extractelement <2 x i32> %tmp1, i32 0
- %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
- ret <2 x i32> %tmp4
-}
-
-define <1 x i64> @ins1d1(<1 x i64> %tmp1, <1 x i64> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[0], {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <1 x i64> %tmp1, i32 0
- %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
- ret <1 x i64> %tmp4
-}
-
-define <2 x float> @ins2f2(<2 x float> %tmp1, <2 x float> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[0]
- %tmp3 = extractelement <2 x float> %tmp1, i32 0
- %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
- ret <2 x float> %tmp4
-}
-
-define <1 x double> @ins1df1(<1 x double> %tmp1, <1 x double> %tmp2) {
-;CHECK: ins {{v[0-31]+}}.d[0], {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <1 x double> %tmp1, i32 0
- %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
- ret <1 x double> %tmp4
-}
-
-define i32 @umovw16b(<16 x i8> %tmp1) {
-;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.b[8]
- %tmp3 = extractelement <16 x i8> %tmp1, i32 8
- %tmp4 = zext i8 %tmp3 to i32
- ret i32 %tmp4
-}
-
-define i32 @umovw8h(<8 x i16> %tmp1) {
-;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <8 x i16> %tmp1, i32 2
- %tmp4 = zext i16 %tmp3 to i32
- ret i32 %tmp4
-}
-
-define i32 @umovw4s(<4 x i32> %tmp1) {
-;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.s[2]
- %tmp3 = extractelement <4 x i32> %tmp1, i32 2
- ret i32 %tmp3
-}
-
-define i64 @umovx2d(<2 x i64> %tmp1) {
-;CHECK: umov {{x[0-31]+}}, {{v[0-31]+}}.d[0]
- %tmp3 = extractelement <2 x i64> %tmp1, i32 0
- ret i64 %tmp3
-}
-
-define i32 @umovw8b(<8 x i8> %tmp1) {
-;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.b[7]
- %tmp3 = extractelement <8 x i8> %tmp1, i32 7
- %tmp4 = zext i8 %tmp3 to i32
- ret i32 %tmp4
-}
-
-define i32 @umovw4h(<4 x i16> %tmp1) {
-;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <4 x i16> %tmp1, i32 2
- %tmp4 = zext i16 %tmp3 to i32
- ret i32 %tmp4
-}
-
-define i32 @umovw2s(<2 x i32> %tmp1) {
-;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.s[1]
- %tmp3 = extractelement <2 x i32> %tmp1, i32 1
- ret i32 %tmp3
-}
-
-define i64 @umovx1d(<1 x i64> %tmp1) {
-;CHECK: fmov {{x[0-31]+}}, {{d[0-31]+}}
- %tmp3 = extractelement <1 x i64> %tmp1, i32 0
- ret i64 %tmp3
-}
-
-define i32 @smovw16b(<16 x i8> %tmp1) {
-;CHECK: smov {{w[0-31]+}}, {{v[0-31]+}}.b[8]
- %tmp3 = extractelement <16 x i8> %tmp1, i32 8
- %tmp4 = sext i8 %tmp3 to i32
- %tmp5 = add i32 5, %tmp4
- ret i32 %tmp5
-}
-
-define i32 @smovw8h(<8 x i16> %tmp1) {
-;CHECK: smov {{w[0-31]+}}, {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <8 x i16> %tmp1, i32 2
- %tmp4 = sext i16 %tmp3 to i32
- %tmp5 = add i32 5, %tmp4
- ret i32 %tmp5
-}
-
-define i32 @smovx16b(<16 x i8> %tmp1) {
-;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.b[8]
- %tmp3 = extractelement <16 x i8> %tmp1, i32 8
- %tmp4 = sext i8 %tmp3 to i32
- ret i32 %tmp4
-}
-
-define i32 @smovx8h(<8 x i16> %tmp1) {
-;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <8 x i16> %tmp1, i32 2
- %tmp4 = sext i16 %tmp3 to i32
- ret i32 %tmp4
-}
-
-define i64 @smovx4s(<4 x i32> %tmp1) {
-;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.s[2]
- %tmp3 = extractelement <4 x i32> %tmp1, i32 2
- %tmp4 = sext i32 %tmp3 to i64
- ret i64 %tmp4
-}
-
-define i32 @smovw8b(<8 x i8> %tmp1) {
-;CHECK: smov {{w[0-31]+}}, {{v[0-31]+}}.b[4]
- %tmp3 = extractelement <8 x i8> %tmp1, i32 4
- %tmp4 = sext i8 %tmp3 to i32
- %tmp5 = add i32 5, %tmp4
- ret i32 %tmp5
-}
-
-define i32 @smovw4h(<4 x i16> %tmp1) {
-;CHECK: smov {{w[0-31]+}}, {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <4 x i16> %tmp1, i32 2
- %tmp4 = sext i16 %tmp3 to i32
- %tmp5 = add i32 5, %tmp4
- ret i32 %tmp5
-}
-
-define i32 @smovx8b(<8 x i8> %tmp1) {
-;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.b[6]
- %tmp3 = extractelement <8 x i8> %tmp1, i32 6
- %tmp4 = sext i8 %tmp3 to i32
- ret i32 %tmp4
-}
-
-define i32 @smovx4h(<4 x i16> %tmp1) {
-;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.h[2]
- %tmp3 = extractelement <4 x i16> %tmp1, i32 2
- %tmp4 = sext i16 %tmp3 to i32
- ret i32 %tmp4
-}
-
-define i64 @smovx2s(<2 x i32> %tmp1) {
-;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.s[1]
- %tmp3 = extractelement <2 x i32> %tmp1, i32 1
- %tmp4 = sext i32 %tmp3 to i64
- ret i64 %tmp4
-}
-
-define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) {
-;CHECK: ins {{v[0-9]+}}.b[5], {{v[0-9]+}}.b[3]
- %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 11, i32 6, i32 7>
- ret <8 x i8> %vset_lane
-}
-
-define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) {
-;CHECK: ins {{v[0-9]+}}.b[14], {{v[0-9]+}}.b[6]
- %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 22, i32 15>
- ret <16 x i8> %vset_lane
-}
-
-define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) {
-;CHECK: ins {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[0]
- %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 0>
- ret <8 x i8> %vset_lane
-}
-
-define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) {
-;CHECK: ins {{v[0-9]+}}.b[0], {{v[0-9]+}}.b[15]
- %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 15, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
- ret <16 x i8> %vset_lane
-}
-
-define <8 x i8> @test_vdup_n_u8(i8 %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.8b, {{w[0-9]+}}
- %vecinit.i = insertelement <8 x i8> undef, i8 %v1, i32 0
- %vecinit1.i = insertelement <8 x i8> %vecinit.i, i8 %v1, i32 1
- %vecinit2.i = insertelement <8 x i8> %vecinit1.i, i8 %v1, i32 2
- %vecinit3.i = insertelement <8 x i8> %vecinit2.i, i8 %v1, i32 3
- %vecinit4.i = insertelement <8 x i8> %vecinit3.i, i8 %v1, i32 4
- %vecinit5.i = insertelement <8 x i8> %vecinit4.i, i8 %v1, i32 5
- %vecinit6.i = insertelement <8 x i8> %vecinit5.i, i8 %v1, i32 6
- %vecinit7.i = insertelement <8 x i8> %vecinit6.i, i8 %v1, i32 7
- ret <8 x i8> %vecinit7.i
-}
-
-define <4 x i16> @test_vdup_n_u16(i16 %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.4h, {{w[0-9]+}}
- %vecinit.i = insertelement <4 x i16> undef, i16 %v1, i32 0
- %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %v1, i32 1
- %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %v1, i32 2
- %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %v1, i32 3
- ret <4 x i16> %vecinit3.i
-}
-
-define <2 x i32> @test_vdup_n_u32(i32 %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.2s, {{w[0-9]+}}
- %vecinit.i = insertelement <2 x i32> undef, i32 %v1, i32 0
- %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %v1, i32 1
- ret <2 x i32> %vecinit1.i
-}
-
-define <1 x i64> @test_vdup_n_u64(i64 %v1) #0 {
-;CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
- %vecinit.i = insertelement <1 x i64> undef, i64 %v1, i32 0
- ret <1 x i64> %vecinit.i
-}
-
-define <16 x i8> @test_vdupq_n_u8(i8 %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.16b, {{w[0-9]+}}
- %vecinit.i = insertelement <16 x i8> undef, i8 %v1, i32 0
- %vecinit1.i = insertelement <16 x i8> %vecinit.i, i8 %v1, i32 1
- %vecinit2.i = insertelement <16 x i8> %vecinit1.i, i8 %v1, i32 2
- %vecinit3.i = insertelement <16 x i8> %vecinit2.i, i8 %v1, i32 3
- %vecinit4.i = insertelement <16 x i8> %vecinit3.i, i8 %v1, i32 4
- %vecinit5.i = insertelement <16 x i8> %vecinit4.i, i8 %v1, i32 5
- %vecinit6.i = insertelement <16 x i8> %vecinit5.i, i8 %v1, i32 6
- %vecinit7.i = insertelement <16 x i8> %vecinit6.i, i8 %v1, i32 7
- %vecinit8.i = insertelement <16 x i8> %vecinit7.i, i8 %v1, i32 8
- %vecinit9.i = insertelement <16 x i8> %vecinit8.i, i8 %v1, i32 9
- %vecinit10.i = insertelement <16 x i8> %vecinit9.i, i8 %v1, i32 10
- %vecinit11.i = insertelement <16 x i8> %vecinit10.i, i8 %v1, i32 11
- %vecinit12.i = insertelement <16 x i8> %vecinit11.i, i8 %v1, i32 12
- %vecinit13.i = insertelement <16 x i8> %vecinit12.i, i8 %v1, i32 13
- %vecinit14.i = insertelement <16 x i8> %vecinit13.i, i8 %v1, i32 14
- %vecinit15.i = insertelement <16 x i8> %vecinit14.i, i8 %v1, i32 15
- ret <16 x i8> %vecinit15.i
-}
-
-define <8 x i16> @test_vdupq_n_u16(i16 %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.8h, {{w[0-9]+}}
- %vecinit.i = insertelement <8 x i16> undef, i16 %v1, i32 0
- %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %v1, i32 1
- %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %v1, i32 2
- %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %v1, i32 3
- %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %v1, i32 4
- %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %v1, i32 5
- %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %v1, i32 6
- %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %v1, i32 7
- ret <8 x i16> %vecinit7.i
-}
-
-define <4 x i32> @test_vdupq_n_u32(i32 %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.4s, {{w[0-9]+}}
- %vecinit.i = insertelement <4 x i32> undef, i32 %v1, i32 0
- %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %v1, i32 1
- %vecinit2.i = insertelement <4 x i32> %vecinit1.i, i32 %v1, i32 2
- %vecinit3.i = insertelement <4 x i32> %vecinit2.i, i32 %v1, i32 3
- ret <4 x i32> %vecinit3.i
-}
-
-define <2 x i64> @test_vdupq_n_u64(i64 %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.2d, {{x[0-9]+}}
- %vecinit.i = insertelement <2 x i64> undef, i64 %v1, i32 0
- %vecinit1.i = insertelement <2 x i64> %vecinit.i, i64 %v1, i32 1
- ret <2 x i64> %vecinit1.i
-}
-
-define <8 x i8> @test_vdup_lane_s8(<8 x i8> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.8b, {{v[0-9]+}}.b[5]
- %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
- ret <8 x i8> %shuffle
-}
-
-define <4 x i16> @test_vdup_lane_s16(<4 x i16> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.4h, {{v[0-9]+}}.h[2]
- %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
- ret <4 x i16> %shuffle
-}
-
-define <2 x i32> @test_vdup_lane_s32(<2 x i32> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
- %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- ret <2 x i32> %shuffle
-}
-
-define <16 x i8> @test_vdupq_lane_s8(<8 x i8> %v1) #0 {
-;CHECK: {{v[0-9]+}}.16b, {{v[0-9]+}}.b[5]
- %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
- ret <16 x i8> %shuffle
-}
-
-define <8 x i16> @test_vdupq_lane_s16(<4 x i16> %v1) #0 {
-;CHECK: {{v[0-9]+}}.8h, {{v[0-9]+}}.h[2]
- %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
- ret <8 x i16> %shuffle
-}
-
-define <4 x i32> @test_vdupq_lane_s32(<2 x i32> %v1) #0 {
-;CHECK: {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
- %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- ret <4 x i32> %shuffle
-}
-
-define <2 x i64> @test_vdupq_lane_s64(<1 x i64> %v1) #0 {
-;CHECK: {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
- %shuffle = shufflevector <1 x i64> %v1, <1 x i64> undef, <2 x i32> zeroinitializer
- ret <2 x i64> %shuffle
-}
-
-define <8 x i8> @test_vdup_laneq_s8(<16 x i8> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.8b, {{v[0-9]+}}.b[5]
- %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
- ret <8 x i8> %shuffle
-}
-
-define <4 x i16> @test_vdup_laneq_s16(<8 x i16> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.4h, {{v[0-9]+}}.h[2]
- %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
- ret <4 x i16> %shuffle
-}
-
-define <2 x i32> @test_vdup_laneq_s32(<4 x i32> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
- %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
- ret <2 x i32> %shuffle
-}
-
-define <16 x i8> @test_vdupq_laneq_s8(<16 x i8> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.16b, {{v[0-9]+}}.b[5]
- %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
- ret <16 x i8> %shuffle
-}
-
-define <8 x i16> @test_vdupq_laneq_s16(<8 x i16> %v1) #0 {
-;CHECK: {{v[0-9]+}}.8h, {{v[0-9]+}}.h[2]
- %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
- ret <8 x i16> %shuffle
-}
-
-define <4 x i32> @test_vdupq_laneq_s32(<4 x i32> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
- %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- ret <4 x i32> %shuffle
-}
-
-define <2 x i64> @test_vdupq_laneq_s64(<2 x i64> %v1) #0 {
-;CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
- %shuffle = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
- ret <2 x i64> %shuffle
-}
-
-define i64 @test_bitcastv8i8toi64(<8 x i8> %in) {
-; CHECK-LABEL: test_bitcastv8i8toi64:
- %res = bitcast <8 x i8> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
- ret i64 %res
-}
-
-define i64 @test_bitcastv4i16toi64(<4 x i16> %in) {
-; CHECK-LABEL: test_bitcastv4i16toi64:
- %res = bitcast <4 x i16> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
- ret i64 %res
-}
-
-define i64 @test_bitcastv2i32toi64(<2 x i32> %in) {
-; CHECK-LABEL: test_bitcastv2i32toi64:
- %res = bitcast <2 x i32> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
- ret i64 %res
-}
-
-define i64 @test_bitcastv2f32toi64(<2 x float> %in) {
-; CHECK-LABEL: test_bitcastv2f32toi64:
- %res = bitcast <2 x float> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
- ret i64 %res
-}
-
-define i64 @test_bitcastv1i64toi64(<1 x i64> %in) {
-; CHECK-LABEL: test_bitcastv1i64toi64:
- %res = bitcast <1 x i64> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
- ret i64 %res
-}
-
-define i64 @test_bitcastv1f64toi64(<1 x double> %in) {
-; CHECK-LABEL: test_bitcastv1f64toi64:
- %res = bitcast <1 x double> %in to i64
-; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
- ret i64 %res
-}
-
-define <8 x i8> @test_bitcasti64tov8i8(i64 %in) {
-; CHECK-LABEL: test_bitcasti64tov8i8:
- %res = bitcast i64 %in to <8 x i8>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
- ret <8 x i8> %res
-}
-
-define <4 x i16> @test_bitcasti64tov4i16(i64 %in) {
-; CHECK-LABEL: test_bitcasti64tov4i16:
- %res = bitcast i64 %in to <4 x i16>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
- ret <4 x i16> %res
-}
-
-define <2 x i32> @test_bitcasti64tov2i32(i64 %in) {
-; CHECK-LABEL: test_bitcasti64tov2i32:
- %res = bitcast i64 %in to <2 x i32>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
- ret <2 x i32> %res
-}
-
-define <2 x float> @test_bitcasti64tov2f32(i64 %in) {
-; CHECK-LABEL: test_bitcasti64tov2f32:
- %res = bitcast i64 %in to <2 x float>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
- ret <2 x float> %res
-}
-
-define <1 x i64> @test_bitcasti64tov1i64(i64 %in) {
-; CHECK-LABEL: test_bitcasti64tov1i64:
- %res = bitcast i64 %in to <1 x i64>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
- ret <1 x i64> %res
-}
-
-define <1 x double> @test_bitcasti64tov1f64(i64 %in) {
-; CHECK-LABEL: test_bitcasti64tov1f64:
- %res = bitcast i64 %in to <1 x double>
-; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
- ret <1 x double> %res
-} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-crypto.ll b/test/CodeGen/AArch64/neon-crypto.ll
deleted file mode 100644
index 0283e0e7ca2e..000000000000
--- a/test/CodeGen/AArch64/neon-crypto.ll
+++ /dev/null
@@ -1,149 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -mattr=+crypto | FileCheck %s
-; RUN: not llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon 2>&1 | FileCheck --check-prefix=CHECK-NO-CRYPTO %s
-
-declare <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
-
-declare <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
-
-declare <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
-
-declare <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
-
-declare <4 x i32> @llvm.aarch64.neon.sha1m(<4 x i32>, <1 x i32>, <4 x i32>) #1
-
-declare <4 x i32> @llvm.aarch64.neon.sha1p(<4 x i32>, <1 x i32>, <4 x i32>) #1
-
-declare <4 x i32> @llvm.aarch64.neon.sha1c(<4 x i32>, <1 x i32>, <4 x i32>) #1
-
-declare <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32>, <4 x i32>) #1
-
-declare <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32>, <4 x i32>) #1
-
-declare <1 x i32> @llvm.arm.neon.sha1h.v1i32(<1 x i32>) #1
-
-declare <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8>) #1
-
-declare <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8>) #1
-
-declare <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8>, <16 x i8>) #1
-
-declare <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8>, <16 x i8>) #1
-
-define <16 x i8> @test_vaeseq_u8(<16 x i8> %data, <16 x i8> %key) {
-; CHECK: test_vaeseq_u8:
-; CHECK: aese {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-; CHECK-NO-CRYPTO: Cannot select: intrinsic %llvm.arm.neon.aese
-entry:
- %aese.i = tail call <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8> %data, <16 x i8> %key)
- ret <16 x i8> %aese.i
-}
-
-define <16 x i8> @test_vaesdq_u8(<16 x i8> %data, <16 x i8> %key) {
-; CHECK: test_vaesdq_u8:
-; CHECK: aesd {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %aesd.i = tail call <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8> %data, <16 x i8> %key)
- ret <16 x i8> %aesd.i
-}
-
-define <16 x i8> @test_vaesmcq_u8(<16 x i8> %data) {
-; CHECK: test_vaesmcq_u8:
-; CHECK: aesmc {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %aesmc.i = tail call <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8> %data)
- ret <16 x i8> %aesmc.i
-}
-
-define <16 x i8> @test_vaesimcq_u8(<16 x i8> %data) {
-; CHECK: test_vaesimcq_u8:
-; CHECK: aesimc {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-entry:
- %aesimc.i = tail call <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8> %data)
- ret <16 x i8> %aesimc.i
-}
-
-define i32 @test_vsha1h_u32(i32 %hash_e) {
-; CHECK: test_vsha1h_u32:
-; CHECK: sha1h {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %sha1h.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
- %sha1h1.i = tail call <1 x i32> @llvm.arm.neon.sha1h.v1i32(<1 x i32> %sha1h.i)
- %0 = extractelement <1 x i32> %sha1h1.i, i32 0
- ret i32 %0
-}
-
-define <4 x i32> @test_vsha1su1q_u32(<4 x i32> %tw0_3, <4 x i32> %w12_15) {
-; CHECK: test_vsha1su1q_u32:
-; CHECK: sha1su1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %sha1su12.i = tail call <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32> %tw0_3, <4 x i32> %w12_15)
- ret <4 x i32> %sha1su12.i
-}
-
-define <4 x i32> @test_vsha256su0q_u32(<4 x i32> %w0_3, <4 x i32> %w4_7) {
-; CHECK: test_vsha256su0q_u32:
-; CHECK: sha256su0 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %sha256su02.i = tail call <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32> %w0_3, <4 x i32> %w4_7)
- ret <4 x i32> %sha256su02.i
-}
-
-define <4 x i32> @test_vsha1cq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
-; CHECK: test_vsha1cq_u32:
-; CHECK: sha1c {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %sha1c.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
- %sha1c1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1c(<4 x i32> %hash_abcd, <1 x i32> %sha1c.i, <4 x i32> %wk)
- ret <4 x i32> %sha1c1.i
-}
-
-define <4 x i32> @test_vsha1pq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
-; CHECK: test_vsha1pq_u32:
-; CHECK: sha1p {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %sha1p.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
- %sha1p1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1p(<4 x i32> %hash_abcd, <1 x i32> %sha1p.i, <4 x i32> %wk)
- ret <4 x i32> %sha1p1.i
-}
-
-define <4 x i32> @test_vsha1mq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
-; CHECK: test_vsha1mq_u32:
-; CHECK: sha1m {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %sha1m.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
- %sha1m1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1m(<4 x i32> %hash_abcd, <1 x i32> %sha1m.i, <4 x i32> %wk)
- ret <4 x i32> %sha1m1.i
-}
-
-define <4 x i32> @test_vsha1su0q_u32(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11) {
-; CHECK: test_vsha1su0q_u32:
-; CHECK: sha1su0 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %sha1su03.i = tail call <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11)
- ret <4 x i32> %sha1su03.i
-}
-
-define <4 x i32> @test_vsha256hq_u32(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk) {
-; CHECK: test_vsha256hq_u32:
-; CHECK: sha256h {{q[0-9]+}}, {{q[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %sha256h3.i = tail call <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
- ret <4 x i32> %sha256h3.i
-}
-
-define <4 x i32> @test_vsha256h2q_u32(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk) {
-; CHECK: test_vsha256h2q_u32:
-; CHECK: sha256h2 {{q[0-9]+}}, {{q[0-9]+}}, {{v[0-9]+}}.4s
-entry:
- %sha256h23.i = tail call <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
- ret <4 x i32> %sha256h23.i
-}
-
-define <4 x i32> @test_vsha256su1q_u32(<4 x i32> %tw0_3, <4 x i32> %w8_11, <4 x i32> %w12_15) {
-; CHECK: test_vsha256su1q_u32:
-; CHECK: sha256su1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-entry:
- %sha256su13.i = tail call <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32> %tw0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
- ret <4 x i32> %sha256su13.i
-}
-
diff --git a/test/CodeGen/AArch64/neon-diagnostics.ll b/test/CodeGen/AArch64/neon-diagnostics.ll
index f546aa7d3341..099b6856cec0 100644
--- a/test/CodeGen/AArch64/neon-diagnostics.ll
+++ b/test/CodeGen/AArch64/neon-diagnostics.ll
@@ -21,4 +21,4 @@ define <4 x i32> @test_vshrn_not_match(<2 x i32> %a, <2 x i64> %b) {
%shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
%4 = bitcast <2 x i64> %shuffle.i to <4 x i32>
ret <4 x i32> %4
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/neon-extract.ll b/test/CodeGen/AArch64/neon-extract.ll
index 5c52cd30676a..f270b54abb46 100644
--- a/test/CodeGen/AArch64/neon-extract.ll
+++ b/test/CodeGen/AArch64/neon-extract.ll
@@ -1,190 +1,222 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
define <8 x i8> @test_vext_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vext_s8:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+; CHECK-LABEL: test_vext_s8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x2|2}}
entry:
%vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
ret <8 x i8> %vext
}
define <4 x i16> @test_vext_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vext_s16:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+; CHECK-LABEL: test_vext_s16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x6|6}}
entry:
%vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
ret <4 x i16> %vext
}
define <2 x i32> @test_vext_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vext_s32:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+; CHECK-LABEL: test_vext_s32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x4|4}}
entry:
%vext = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 2>
ret <2 x i32> %vext
}
define <1 x i64> @test_vext_s64(<1 x i64> %a, <1 x i64> %b) {
-; CHECK: test_vext_s64:
+; CHECK-LABEL: test_vext_s64:
entry:
%vext = shufflevector <1 x i64> %a, <1 x i64> %b, <1 x i32> <i32 0>
ret <1 x i64> %vext
}
define <16 x i8> @test_vextq_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vextq_s8:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+; CHECK-LABEL: test_vextq_s8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x2|2}}
entry:
%vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
ret <16 x i8> %vext
}
define <8 x i16> @test_vextq_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vextq_s16:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+; CHECK-LABEL: test_vextq_s16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x6|6}}
entry:
%vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
ret <8 x i16> %vext
}
define <4 x i32> @test_vextq_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vextq_s32:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+; CHECK-LABEL: test_vextq_s32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x4|4}}
entry:
%vext = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
ret <4 x i32> %vext
}
define <2 x i64> @test_vextq_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vextq_s64:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+; CHECK-LABEL: test_vextq_s64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x8|8}}
entry:
%vext = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
ret <2 x i64> %vext
}
define <8 x i8> @test_vext_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vext_u8:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+; CHECK-LABEL: test_vext_u8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x2|2}}
entry:
%vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
ret <8 x i8> %vext
}
define <4 x i16> @test_vext_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vext_u16:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+; CHECK-LABEL: test_vext_u16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x6|6}}
entry:
%vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
ret <4 x i16> %vext
}
define <2 x i32> @test_vext_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vext_u32:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+; CHECK-LABEL: test_vext_u32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x4|4}}
entry:
%vext = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 2>
ret <2 x i32> %vext
}
define <1 x i64> @test_vext_u64(<1 x i64> %a, <1 x i64> %b) {
-; CHECK: test_vext_u64:
+; CHECK-LABEL: test_vext_u64:
entry:
%vext = shufflevector <1 x i64> %a, <1 x i64> %b, <1 x i32> <i32 0>
ret <1 x i64> %vext
}
define <16 x i8> @test_vextq_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vextq_u8:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+; CHECK-LABEL: test_vextq_u8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x2|2}}
entry:
%vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
ret <16 x i8> %vext
}
define <8 x i16> @test_vextq_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vextq_u16:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+; CHECK-LABEL: test_vextq_u16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x6|6}}
entry:
%vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
ret <8 x i16> %vext
}
define <4 x i32> @test_vextq_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vextq_u32:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+; CHECK-LABEL: test_vextq_u32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x4|4}}
entry:
%vext = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
ret <4 x i32> %vext
}
define <2 x i64> @test_vextq_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vextq_u64:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+; CHECK-LABEL: test_vextq_u64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x8|8}}
entry:
%vext = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
ret <2 x i64> %vext
}
define <2 x float> @test_vext_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vext_f32:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+; CHECK-LABEL: test_vext_f32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x4|4}}
entry:
%vext = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 2>
ret <2 x float> %vext
}
define <1 x double> @test_vext_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK: test_vext_f64:
+; CHECK-LABEL: test_vext_f64:
entry:
%vext = shufflevector <1 x double> %a, <1 x double> %b, <1 x i32> <i32 0>
ret <1 x double> %vext
}
define <4 x float> @test_vextq_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vextq_f32:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+; CHECK-LABEL: test_vextq_f32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x4|4}}
entry:
%vext = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
ret <4 x float> %vext
}
define <2 x double> @test_vextq_f64(<2 x double> %a, <2 x double> %b) {
-; CHECK: test_vextq_f64:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+; CHECK-LABEL: test_vextq_f64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x8|8}}
entry:
%vext = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 2>
ret <2 x double> %vext
}
define <8 x i8> @test_vext_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vext_p8:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+; CHECK-LABEL: test_vext_p8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x2|2}}
entry:
%vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
ret <8 x i8> %vext
}
define <4 x i16> @test_vext_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vext_p16:
-; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+; CHECK-LABEL: test_vext_p16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x6|6}}
entry:
%vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
ret <4 x i16> %vext
}
define <16 x i8> @test_vextq_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vextq_p8:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+; CHECK-LABEL: test_vextq_p8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x2|2}}
entry:
%vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
ret <16 x i8> %vext
}
define <8 x i16> @test_vextq_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vextq_p16:
-; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+; CHECK-LABEL: test_vextq_p16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x6|6}}
entry:
%vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
ret <8 x i16> %vext
}
+
+define <8 x i8> @test_undef_vext_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vext_s8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x2|2}}
+entry:
+ %vext = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 10, i32 10, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x i8> %vext
+}
+
+define <16 x i8> @test_undef_vextq_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vextq_s8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x6|6}}
+entry:
+ %vext = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 20, i32 20, i32 20, i32 20, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 20, i32 20, i32 20, i32 20, i32 20>
+ ret <16 x i8> %vext
+}
+
+define <4 x i16> @test_undef_vext_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vext_s16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x4|4}}
+entry:
+ %vext = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x i16> %vext
+}
+
+define <8 x i16> @test_undef_vextq_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vextq_s16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x6|6}}
+entry:
+ %vext = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 10, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i16> %vext
+}
diff --git a/test/CodeGen/AArch64/neon-facge-facgt.ll b/test/CodeGen/AArch64/neon-facge-facgt.ll
deleted file mode 100644
index 146256e4be11..000000000000
--- a/test/CodeGen/AArch64/neon-facge-facgt.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <2 x i32> @llvm.arm.neon.vacged(<2 x float>, <2 x float>)
-declare <4 x i32> @llvm.arm.neon.vacgeq(<4 x float>, <4 x float>)
-declare <2 x i64> @llvm.aarch64.neon.vacgeq(<2 x double>, <2 x double>)
-
-define <2 x i32> @facge_from_intr_v2i32(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: facge_from_intr_v2i32:
- %val = call <2 x i32> @llvm.arm.neon.vacged(<2 x float> %A, <2 x float> %B)
-; CHECK: facge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
- ret <2 x i32> %val
-}
-define <4 x i32> @facge_from_intr_v4i32( <4 x float> %A, <4 x float> %B) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: facge_from_intr_v4i32:
- %val = call <4 x i32> @llvm.arm.neon.vacgeq(<4 x float> %A, <4 x float> %B)
-; CHECK: facge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
- ret <4 x i32> %val
-}
-
-define <2 x i64> @facge_from_intr_v2i64(<2 x double> %A, <2 x double> %B) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: facge_from_intr_v2i64:
- %val = call <2 x i64> @llvm.aarch64.neon.vacgeq(<2 x double> %A, <2 x double> %B)
-; CHECK: facge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
- ret <2 x i64> %val
-}
-
-declare <2 x i32> @llvm.arm.neon.vacgtd(<2 x float>, <2 x float>)
-declare <4 x i32> @llvm.arm.neon.vacgtq(<4 x float>, <4 x float>)
-declare <2 x i64> @llvm.aarch64.neon.vacgtq(<2 x double>, <2 x double>)
-
-define <2 x i32> @facgt_from_intr_v2i32(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: facgt_from_intr_v2i32:
- %val = call <2 x i32> @llvm.arm.neon.vacgtd(<2 x float> %A, <2 x float> %B)
-; CHECK: facgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
- ret <2 x i32> %val
-}
-define <4 x i32> @facgt_from_intr_v4i32( <4 x float> %A, <4 x float> %B) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: facgt_from_intr_v4i32:
- %val = call <4 x i32> @llvm.arm.neon.vacgtq(<4 x float> %A, <4 x float> %B)
-; CHECK: facgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
- ret <4 x i32> %val
-}
-
-define <2 x i64> @facgt_from_intr_v2i64(<2 x double> %A, <2 x double> %B) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: facgt_from_intr_v2i64:
- %val = call <2 x i64> @llvm.aarch64.neon.vacgtq(<2 x double> %A, <2 x double> %B)
-; CHECK: facgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
- ret <2 x i64> %val
-}
-
diff --git a/test/CodeGen/AArch64/neon-fma.ll b/test/CodeGen/AArch64/neon-fma.ll
index dcf4e2878068..af70302ca939 100644
--- a/test/CodeGen/AArch64/neon-fma.ll
+++ b/test/CodeGen/AArch64/neon-fma.ll
@@ -1,21 +1,21 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
define <2 x float> @fmla2xfloat(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
-;CHECK: fmla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+;CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp1 = fmul <2 x float> %A, %B;
%tmp2 = fadd <2 x float> %C, %tmp1;
ret <2 x float> %tmp2
}
define <4 x float> @fmla4xfloat(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
-;CHECK: fmla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+;CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp1 = fmul <4 x float> %A, %B;
%tmp2 = fadd <4 x float> %C, %tmp1;
ret <4 x float> %tmp2
}
define <2 x double> @fmla2xdouble(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
-;CHECK: fmla {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+;CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp1 = fmul <2 x double> %A, %B;
%tmp2 = fadd <2 x double> %C, %tmp1;
ret <2 x double> %tmp2
@@ -23,21 +23,21 @@ define <2 x double> @fmla2xdouble(<2 x double> %A, <2 x double> %B, <2 x double>
define <2 x float> @fmls2xfloat(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
-;CHECK: fmls {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+;CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp1 = fmul <2 x float> %A, %B;
%tmp2 = fsub <2 x float> %C, %tmp1;
ret <2 x float> %tmp2
}
define <4 x float> @fmls4xfloat(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
-;CHECK: fmls {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+;CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp1 = fmul <4 x float> %A, %B;
%tmp2 = fsub <4 x float> %C, %tmp1;
ret <4 x float> %tmp2
}
define <2 x double> @fmls2xdouble(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
-;CHECK: fmls {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+;CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp1 = fmul <2 x double> %A, %B;
%tmp2 = fsub <2 x double> %C, %tmp1;
ret <2 x double> %tmp2
@@ -51,39 +51,39 @@ declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
define <2 x float> @fmla2xfloat_fused(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
-;CHECK: fmla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+;CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%val = call <2 x float> @llvm.fma.v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C)
ret <2 x float> %val
}
define <4 x float> @fmla4xfloat_fused(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
-;CHECK: fmla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+;CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%val = call <4 x float> @llvm.fma.v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C)
ret <4 x float> %val
}
define <2 x double> @fmla2xdouble_fused(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
-;CHECK: fmla {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+;CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%val = call <2 x double> @llvm.fma.v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C)
ret <2 x double> %val
}
define <2 x float> @fmls2xfloat_fused(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
-;CHECK: fmls {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+;CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%negA = fsub <2 x float> <float -0.0, float -0.0>, %A
%val = call <2 x float> @llvm.fma.v2f32(<2 x float> %negA, <2 x float> %B, <2 x float> %C)
ret <2 x float> %val
}
define <4 x float> @fmls4xfloat_fused(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
-;CHECK: fmls {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+;CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%negA = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %A
%val = call <4 x float> @llvm.fma.v4f32(<4 x float> %negA, <4 x float> %B, <4 x float> %C)
ret <4 x float> %val
}
define <2 x double> @fmls2xdouble_fused(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
-;CHECK: fmls {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+;CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%negA = fsub <2 x double> <double -0.0, double -0.0>, %A
%val = call <2 x double> @llvm.fma.v2f64(<2 x double> %negA, <2 x double> %B, <2 x double> %C)
ret <2 x double> %val
@@ -94,19 +94,39 @@ declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>)
declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>)
define <2 x float> @fmuladd2xfloat(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
-;CHECK: fmla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+;CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%val = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C)
ret <2 x float> %val
}
define <4 x float> @fmuladd4xfloat_fused(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
-;CHECK: fmla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+;CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%val = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C)
ret <4 x float> %val
}
define <2 x double> @fmuladd2xdouble_fused(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
-;CHECK: fmla {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+;CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%val = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C)
ret <2 x double> %val
}
+
+
+; Another set of tests that check for multiply single use
+
+define <2 x float> @fmla2xfloati_su(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+;CHECK-NOT: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp1 = fmul <2 x float> %A, %B;
+ %tmp2 = fadd <2 x float> %C, %tmp1;
+ %tmp3 = fadd <2 x float> %tmp2, %tmp1;
+ ret <2 x float> %tmp3
+}
+
+define <2 x double> @fmls2xdouble_su(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
+;CHECK-NOT: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp1 = fmul <2 x double> %A, %B;
+ %tmp2 = fsub <2 x double> %C, %tmp1;
+ %tmp3 = fsub <2 x double> %tmp2, %tmp1;
+ ret <2 x double> %tmp3
+}
+
diff --git a/test/CodeGen/AArch64/neon-fpround_f128.ll b/test/CodeGen/AArch64/neon-fpround_f128.ll
new file mode 100644
index 000000000000..a93f3f2723c3
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-fpround_f128.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+define <1 x double> @test_fpround_v1f128(<1 x fp128>* %a) {
+; CHECK-LABEL: test_fpround_v1f128:
+; CHECK: bl __trunctfdf2
+ %b = load <1 x fp128>* %a
+ %c = fptrunc <1 x fp128> %b to <1 x double>
+ ret <1 x double> %c
+}
+
+define <2 x double> @test_fpround_v2f128(<2 x fp128>* %a) {
+; CHECK-LABEL: test_fpround_v2f128:
+; CHECK: bl __trunctfdf2
+; CHECK: bl __trunctfdf2
+ %b = load <2 x fp128>* %a
+ %c = fptrunc <2 x fp128> %b to <2 x double>
+ ret <2 x double> %c
+}
diff --git a/test/CodeGen/AArch64/neon-frsqrt-frecp.ll b/test/CodeGen/AArch64/neon-frsqrt-frecp.ll
deleted file mode 100644
index 46fe25d74d9d..000000000000
--- a/test/CodeGen/AArch64/neon-frsqrt-frecp.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-; Set of tests for when the intrinsic is used.
-
-declare <2 x float> @llvm.arm.neon.vrsqrts.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.arm.neon.vrsqrts.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @frsqrts_from_intr_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: frsqrts v0.2s, v0.2s, v1.2s
- %val = call <2 x float> @llvm.arm.neon.vrsqrts.v2f32(<2 x float> %lhs, <2 x float> %rhs)
- ret <2 x float> %val
-}
-
-define <4 x float> @frsqrts_from_intr_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: frsqrts v0.4s, v0.4s, v1.4s
- %val = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %lhs, <4 x float> %rhs)
- ret <4 x float> %val
-}
-
-define <2 x double> @frsqrts_from_intr_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: frsqrts v0.2d, v0.2d, v1.2d
- %val = call <2 x double> @llvm.arm.neon.vrsqrts.v2f64(<2 x double> %lhs, <2 x double> %rhs)
- ret <2 x double> %val
-}
-
-declare <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.arm.neon.vrecps.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @frecps_from_intr_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: frecps v0.2s, v0.2s, v1.2s
- %val = call <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float> %lhs, <2 x float> %rhs)
- ret <2 x float> %val
-}
-
-define <4 x float> @frecps_from_intr_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: frecps v0.4s, v0.4s, v1.4s
- %val = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %lhs, <4 x float> %rhs)
- ret <4 x float> %val
-}
-
-define <2 x double> @frecps_from_intr_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: frecps v0.2d, v0.2d, v1.2d
- %val = call <2 x double> @llvm.arm.neon.vrecps.v2f64(<2 x double> %lhs, <2 x double> %rhs)
- ret <2 x double> %val
-}
-
diff --git a/test/CodeGen/AArch64/neon-halving-add-sub.ll b/test/CodeGen/AArch64/neon-halving-add-sub.ll
deleted file mode 100644
index a8f59dbdb0ad..000000000000
--- a/test/CodeGen/AArch64/neon-halving-add-sub.ll
+++ /dev/null
@@ -1,207 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_uhadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uhadd_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: uhadd v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_shadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_shadd_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: shadd v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_uhadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uhadd_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: uhadd v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_shadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_shadd_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: shadd v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_uhadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uhadd_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: uhadd v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_shadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_shadd_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: shadd v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_uhadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uhadd_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: uhadd v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_shadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_shadd_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: shadd v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_uhadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uhadd_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: uhadd v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_shadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_shadd_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: shadd v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vhaddu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_uhadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uhadd_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vhaddu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: uhadd v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_shadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_shadd_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: shadd v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-
-declare <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_uhsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uhsub_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: uhsub v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_shsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_shsub_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: shsub v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_uhsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uhsub_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: uhsub v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_shsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_shsub_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: shsub v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_uhsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uhsub_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: uhsub v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_shsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_shsub_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: shsub v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_uhsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uhsub_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: uhsub v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_shsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_shsub_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: shsub v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vhsubu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_uhsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uhsub_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vhsubu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: uhsub v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_shsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_shsub_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: shsub v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vhsubu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vhsubs.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_uhsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uhsub_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vhsubu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: uhsub v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_shsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_shsub_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vhsubs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: shsub v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
diff --git a/test/CodeGen/AArch64/neon-idiv.ll b/test/CodeGen/AArch64/neon-idiv.ll
new file mode 100644
index 000000000000..de402c4780be
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-idiv.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -mattr=+neon | FileCheck %s
+
+define <4 x i32> @test1(<4 x i32> %a) {
+ %rem = srem <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %rem
+; CHECK-LABEL: test1
+; FIXME: Can we lower this more efficiently?
+; CHECK: mul
+; CHECK: mul
+; CHECK: mul
+; CHECK: mul
+}
+
diff --git a/test/CodeGen/AArch64/neon-max-min-pairwise.ll b/test/CodeGen/AArch64/neon-max-min-pairwise.ll
deleted file mode 100644
index d757aca86a69..000000000000
--- a/test/CodeGen/AArch64/neon-max-min-pairwise.ll
+++ /dev/null
@@ -1,310 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_smaxp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: test_smaxp_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: smaxp v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_umaxp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
- %tmp1 = call <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: umaxp v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vpmaxs.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vpmaxu.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_smaxp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_smaxp_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vpmaxs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: smaxp v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_umaxp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_umaxp_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vpmaxu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: umaxp v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_smaxp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_smaxp_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: smaxp v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_umaxp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_umaxp_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: umaxp v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-
-declare <8 x i16> @llvm.arm.neon.vpmaxs.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vpmaxu.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_smaxp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_smaxp_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vpmaxs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: smaxp v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_umaxp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_umaxp_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vpmaxu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: umaxp v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-
-declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_smaxp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_smaxp_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: smaxp v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_umaxp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_umaxp_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: umaxp v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vpmaxs.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vpmaxu.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_smaxp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_smaxp_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vpmaxs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: smaxp v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_umaxp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_umaxp_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vpmaxu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: umaxp v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_sminp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: test_sminp_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: sminp v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_uminp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
- %tmp1 = call <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: uminp v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vpmins.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vpminu.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_sminp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_sminp_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vpmins.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: sminp v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_uminp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uminp_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vpminu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: uminp v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_sminp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sminp_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sminp v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_uminp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uminp_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: uminp v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-
-declare <8 x i16> @llvm.arm.neon.vpmins.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vpminu.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_sminp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sminp_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vpmins.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sminp v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_uminp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uminp_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vpminu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: uminp v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-
-declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_sminp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sminp_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sminp v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_uminp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uminp_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: uminp v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vpmins.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vpminu.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_sminp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sminp_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vpmins.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sminp v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_uminp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uminp_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vpminu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: uminp v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.arm.neon.vpmaxs.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.arm.neon.vpmaxs.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_fmaxp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fmaxp_v2f32:
- %val = call <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fmaxp v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_fmaxp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fmaxp_v4f32:
- %val = call <4 x float> @llvm.arm.neon.vpmaxs.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fmaxp v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_fmaxp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fmaxp_v2f64:
- %val = call <2 x double> @llvm.arm.neon.vpmaxs.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fmaxp v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
-
-declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.arm.neon.vpmins.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.arm.neon.vpmins.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_fminp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fminp_v2f32:
- %val = call <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fminp v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_fminp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fminp_v4f32:
- %val = call <4 x float> @llvm.arm.neon.vpmins.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fminp v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_fminp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fminp_v2f64:
- %val = call <2 x double> @llvm.arm.neon.vpmins.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fminp v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
-
-declare <2 x float> @llvm.aarch64.neon.vpmaxnm.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.aarch64.neon.vpmaxnm.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.aarch64.neon.vpmaxnm.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_fmaxnmp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fmaxnmp_v2f32:
- %val = call <2 x float> @llvm.aarch64.neon.vpmaxnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fmaxnmp v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_fmaxnmp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fmaxnmp_v4f32:
- %val = call <4 x float> @llvm.aarch64.neon.vpmaxnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fmaxnmp v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_fmaxnmp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fmaxnmp_v2f64:
- %val = call <2 x double> @llvm.aarch64.neon.vpmaxnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fmaxnmp v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
-
-declare <2 x float> @llvm.aarch64.neon.vpminnm.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.aarch64.neon.vpminnm.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.aarch64.neon.vpminnm.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_fminnmp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fminnmp_v2f32:
- %val = call <2 x float> @llvm.aarch64.neon.vpminnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fminnmp v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_fminnmp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fminnmp_v4f32:
- %val = call <4 x float> @llvm.aarch64.neon.vpminnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fminnmp v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_fminnmp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fminnmp_v2f64:
- %val = call <2 x double> @llvm.aarch64.neon.vpminnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fminnmp v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
-
diff --git a/test/CodeGen/AArch64/neon-max-min.ll b/test/CodeGen/AArch64/neon-max-min.ll
deleted file mode 100644
index 7889c77e37f1..000000000000
--- a/test/CodeGen/AArch64/neon-max-min.ll
+++ /dev/null
@@ -1,310 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_smax_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: test_smax_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: smax v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_umax_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
- %tmp1 = call <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: umax v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_smax_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_smax_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: smax v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_umax_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_umax_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: umax v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_smax_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_smax_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: smax v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_umax_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_umax_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: umax v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-
-declare <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_smax_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_smax_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: smax v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_umax_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_umax_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: umax v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-
-declare <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_smax_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_smax_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: smax v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_umax_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_umax_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: umax v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_smax_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_smax_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: smax v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_umax_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_umax_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: umax v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_smin_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; CHECK: test_smin_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: smin v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_umin_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
- %tmp1 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: umin v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_smin_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_smin_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: smin v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_umin_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_umin_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: umin v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_smin_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_smin_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: smin v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_umin_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_umin_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: umin v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-
-declare <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_smin_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_smin_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: smin v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_umin_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_umin_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: umin v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-
-declare <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_smin_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_smin_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: smin v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_umin_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_umin_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: umin v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_smin_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_smin_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: smin v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_umin_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_umin_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: umin v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.arm.neon.vmaxs.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_fmax_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fmax_v2f32:
- %val = call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fmax v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_fmax_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fmax_v4f32:
- %val = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fmax v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_fmax_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fmax_v2f64:
- %val = call <2 x double> @llvm.arm.neon.vmaxs.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fmax v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
-
-declare <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.arm.neon.vmins.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_fmin_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fmin_v2f32:
- %val = call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fmin v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_fmin_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fmin_v4f32:
- %val = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fmin v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_fmin_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fmin_v2f64:
- %val = call <2 x double> @llvm.arm.neon.vmins.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fmin v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
-
-
-declare <2 x float> @llvm.aarch64.neon.vmaxnm.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.aarch64.neon.vmaxnm.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.aarch64.neon.vmaxnm.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_fmaxnm_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fmaxnm_v2f32:
- %val = call <2 x float> @llvm.aarch64.neon.vmaxnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fmaxnm v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_fmaxnm_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fmaxnm_v4f32:
- %val = call <4 x float> @llvm.aarch64.neon.vmaxnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fmaxnm v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_fmaxnm_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fmaxnm_v2f64:
- %val = call <2 x double> @llvm.aarch64.neon.vmaxnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fmaxnm v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
-
-declare <2 x float> @llvm.aarch64.neon.vminnm.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.aarch64.neon.vminnm.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.aarch64.neon.vminnm.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @test_fminnm_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; CHECK: test_fminnm_v2f32:
- %val = call <2 x float> @llvm.aarch64.neon.vminnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
-; CHECK: fminnm v0.2s, v0.2s, v1.2s
- ret <2 x float> %val
-}
-
-define <4 x float> @test_fminnm_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; CHECK: test_fminnm_v4f32:
- %val = call <4 x float> @llvm.aarch64.neon.vminnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
-; CHECK: fminnm v0.4s, v0.4s, v1.4s
- ret <4 x float> %val
-}
-
-define <2 x double> @test_fminnm_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; CHECK: test_fminnm_v2f64:
- %val = call <2 x double> @llvm.aarch64.neon.vminnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
-; CHECK: fminnm v0.2d, v0.2d, v1.2d
- ret <2 x double> %val
-}
diff --git a/test/CodeGen/AArch64/neon-misc-scalar.ll b/test/CodeGen/AArch64/neon-misc-scalar.ll
deleted file mode 100644
index cca8deb45cba..000000000000
--- a/test/CodeGen/AArch64/neon-misc-scalar.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-;RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-declare <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64>)
-
-declare <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64>)
-
-declare <1 x i64> @llvm.arm.neon.vabs.v1i64(<1 x i64>)
-
-declare <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64>, <1 x i64>)
-
-declare <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_vuqadd_s64(<1 x i64> %a, <1 x i64> %b) {
-entry:
- ; CHECK: test_vuqadd_s64
- %vuqadd2.i = tail call <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64> %a, <1 x i64> %b)
- ; CHECK: suqadd d{{[0-9]+}}, d{{[0-9]+}}
- ret <1 x i64> %vuqadd2.i
-}
-
-define <1 x i64> @test_vsqadd_u64(<1 x i64> %a, <1 x i64> %b) {
-entry:
- ; CHECK: test_vsqadd_u64
- %vsqadd2.i = tail call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> %a, <1 x i64> %b)
- ; CHECK: usqadd d{{[0-9]+}}, d{{[0-9]+}}
- ret <1 x i64> %vsqadd2.i
-}
-
-define <1 x i64> @test_vabs_s64(<1 x i64> %a) {
- ; CHECK: test_vabs_s64
-entry:
- %vabs1.i = tail call <1 x i64> @llvm.arm.neon.vabs.v1i64(<1 x i64> %a)
- ; CHECK: abs d{{[0-9]+}}, d{{[0-9]+}}
- ret <1 x i64> %vabs1.i
-}
-
-define <1 x i64> @test_vqabs_s64(<1 x i64> %a) {
- ; CHECK: test_vqabs_s64
-entry:
- %vqabs1.i = tail call <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64> %a)
- ; CHECK: sqabs d{{[0-9]+}}, d{{[0-9]+}}
- ret <1 x i64> %vqabs1.i
-}
-
-define <1 x i64> @test_vqneg_s64(<1 x i64> %a) {
- ; CHECK: test_vqneg_s64
-entry:
- %vqneg1.i = tail call <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64> %a)
- ; CHECK: sqneg d{{[0-9]+}}, d{{[0-9]+}}
- ret <1 x i64> %vqneg1.i
-}
-
-define <1 x i64> @test_vneg_s64(<1 x i64> %a) {
- ; CHECK: test_vneg_s64
-entry:
- %sub.i = sub <1 x i64> zeroinitializer, %a
- ; CHECK: neg d{{[0-9]+}}, d{{[0-9]+}}
- ret <1 x i64> %sub.i
-}
-
diff --git a/test/CodeGen/AArch64/neon-misc.ll b/test/CodeGen/AArch64/neon-misc.ll
deleted file mode 100644
index 9660bf2c7a30..000000000000
--- a/test/CodeGen/AArch64/neon-misc.ll
+++ /dev/null
@@ -1,1799 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
-
-
-define <8 x i8> @test_vrev16_s8(<8 x i8> %a) #0 {
-; CHECK: rev16 v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
- ret <8 x i8> %shuffle.i
-}
-
-define <16 x i8> @test_vrev16q_s8(<16 x i8> %a) #0 {
-; CHECK: rev16 v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
- ret <16 x i8> %shuffle.i
-}
-
-define <8 x i8> @test_vrev32_s8(<8 x i8> %a) #0 {
-; CHECK: rev32 v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
- ret <8 x i8> %shuffle.i
-}
-
-define <4 x i16> @test_vrev32_s16(<4 x i16> %a) #0 {
-; CHECK: rev32 v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
- ret <4 x i16> %shuffle.i
-}
-
-define <16 x i8> @test_vrev32q_s8(<16 x i8> %a) #0 {
-; CHECK: rev32 v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
- ret <16 x i8> %shuffle.i
-}
-
-define <8 x i16> @test_vrev32q_s16(<8 x i16> %a) #0 {
-; CHECK: rev32 v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
- ret <8 x i16> %shuffle.i
-}
-
-define <8 x i8> @test_vrev64_s8(<8 x i8> %a) #0 {
-; CHECK: rev64 v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- ret <8 x i8> %shuffle.i
-}
-
-define <4 x i16> @test_vrev64_s16(<4 x i16> %a) #0 {
-; CHECK: rev64 v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i16> %shuffle.i
-}
-
-define <2 x i32> @test_vrev64_s32(<2 x i32> %a) #0 {
-; CHECK: rev64 v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
- ret <2 x i32> %shuffle.i
-}
-
-define <2 x float> @test_vrev64_f32(<2 x float> %a) #0 {
-; CHECK: rev64 v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %shuffle.i = shufflevector <2 x float> %a, <2 x float> undef, <2 x i32> <i32 1, i32 0>
- ret <2 x float> %shuffle.i
-}
-
-define <16 x i8> @test_vrev64q_s8(<16 x i8> %a) #0 {
-; CHECK: rev64 v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
- ret <16 x i8> %shuffle.i
-}
-
-define <8 x i16> @test_vrev64q_s16(<8 x i16> %a) #0 {
-; CHECK: rev64 v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
- ret <8 x i16> %shuffle.i
-}
-
-define <4 x i32> @test_vrev64q_s32(<4 x i32> %a) #0 {
-; CHECK: rev64 v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
- ret <4 x i32> %shuffle.i
-}
-
-define <4 x float> @test_vrev64q_f32(<4 x float> %a) #0 {
-; CHECK: rev64 v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
- ret <4 x float> %shuffle.i
-}
-
-define <4 x i16> @test_vpaddl_s8(<8 x i8> %a) #0 {
-; CHECK: saddlp v{{[0-9]+}}.4h, v{{[0-9]+}}.8b
- %vpaddl.i = tail call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %a) #4
- ret <4 x i16> %vpaddl.i
-}
-
-define <2 x i32> @test_vpaddl_s16(<4 x i16> %a) #0 {
-; CHECK: saddlp v{{[0-9]+}}.2s, v{{[0-9]+}}.4h
- %vpaddl1.i = tail call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %a) #4
- ret <2 x i32> %vpaddl1.i
-}
-
-define <1 x i64> @test_vpaddl_s32(<2 x i32> %a) #0 {
-; CHECK: saddlp v{{[0-9]+}}.1d, v{{[0-9]+}}.2s
- %vpaddl1.i = tail call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %a) #4
- ret <1 x i64> %vpaddl1.i
-}
-
-define <4 x i16> @test_vpaddl_u8(<8 x i8> %a) #0 {
-; CHECK: uaddlp v{{[0-9]+}}.4h, v{{[0-9]+}}.8b
- %vpaddl.i = tail call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %a) #4
- ret <4 x i16> %vpaddl.i
-}
-
-define <2 x i32> @test_vpaddl_u16(<4 x i16> %a) #0 {
-; CHECK: uaddlp v{{[0-9]+}}.2s, v{{[0-9]+}}.4h
- %vpaddl1.i = tail call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %a) #4
- ret <2 x i32> %vpaddl1.i
-}
-
-define <1 x i64> @test_vpaddl_u32(<2 x i32> %a) #0 {
-; CHECK: uaddlp v{{[0-9]+}}.1d, v{{[0-9]+}}.2s
- %vpaddl1.i = tail call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %a) #4
- ret <1 x i64> %vpaddl1.i
-}
-
-define <8 x i16> @test_vpaddlq_s8(<16 x i8> %a) #0 {
-; CHECK: saddlp v{{[0-9]+}}.8h, v{{[0-9]+}}.16b
- %vpaddl.i = tail call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %a) #4
- ret <8 x i16> %vpaddl.i
-}
-
-define <4 x i32> @test_vpaddlq_s16(<8 x i16> %a) #0 {
-; CHECK: saddlp v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
- %vpaddl1.i = tail call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %a) #4
- ret <4 x i32> %vpaddl1.i
-}
-
-define <2 x i64> @test_vpaddlq_s32(<4 x i32> %a) #0 {
-; CHECK: saddlp v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
- %vpaddl1.i = tail call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %a) #4
- ret <2 x i64> %vpaddl1.i
-}
-
-define <8 x i16> @test_vpaddlq_u8(<16 x i8> %a) #0 {
-; CHECK: uaddlp v{{[0-9]+}}.8h, v{{[0-9]+}}.16b
- %vpaddl.i = tail call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %a) #4
- ret <8 x i16> %vpaddl.i
-}
-
-define <4 x i32> @test_vpaddlq_u16(<8 x i16> %a) #0 {
-; CHECK: uaddlp v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
- %vpaddl1.i = tail call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %a) #4
- ret <4 x i32> %vpaddl1.i
-}
-
-define <2 x i64> @test_vpaddlq_u32(<4 x i32> %a) #0 {
-; CHECK: uaddlp v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
- %vpaddl1.i = tail call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %a) #4
- ret <2 x i64> %vpaddl1.i
-}
-
-define <4 x i16> @test_vpadal_s8(<4 x i16> %a, <8 x i8> %b) #0 {
-; CHECK: sadalp v{{[0-9]+}}.4h, v{{[0-9]+}}.8b
- %vpadal1.i = tail call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %a, <8 x i8> %b) #4
- ret <4 x i16> %vpadal1.i
-}
-
-define <2 x i32> @test_vpadal_s16(<2 x i32> %a, <4 x i16> %b) #0 {
-; CHECK: sadalp v{{[0-9]+}}.2s, v{{[0-9]+}}.4h
- %vpadal2.i = tail call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %a, <4 x i16> %b) #4
- ret <2 x i32> %vpadal2.i
-}
-
-define <1 x i64> @test_vpadal_s32(<1 x i64> %a, <2 x i32> %b) #0 {
-; CHECK: sadalp v{{[0-9]+}}.1d, v{{[0-9]+}}.2s
- %vpadal2.i = tail call <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64> %a, <2 x i32> %b) #4
- ret <1 x i64> %vpadal2.i
-}
-
-define <4 x i16> @test_vpadal_u8(<4 x i16> %a, <8 x i8> %b) #0 {
-; CHECK: uadalp v{{[0-9]+}}.4h, v{{[0-9]+}}.8b
- %vpadal1.i = tail call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %a, <8 x i8> %b) #4
- ret <4 x i16> %vpadal1.i
-}
-
-define <2 x i32> @test_vpadal_u16(<2 x i32> %a, <4 x i16> %b) #0 {
-; CHECK: uadalp v{{[0-9]+}}.2s, v{{[0-9]+}}.4h
- %vpadal2.i = tail call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %a, <4 x i16> %b) #4
- ret <2 x i32> %vpadal2.i
-}
-
-define <1 x i64> @test_vpadal_u32(<1 x i64> %a, <2 x i32> %b) #0 {
-; CHECK: uadalp v{{[0-9]+}}.1d, v{{[0-9]+}}.2s
- %vpadal2.i = tail call <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64> %a, <2 x i32> %b) #4
- ret <1 x i64> %vpadal2.i
-}
-
-define <8 x i16> @test_vpadalq_s8(<8 x i16> %a, <16 x i8> %b) #0 {
-; CHECK: sadalp v{{[0-9]+}}.8h, v{{[0-9]+}}.16b
- %vpadal1.i = tail call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %a, <16 x i8> %b) #4
- ret <8 x i16> %vpadal1.i
-}
-
-define <4 x i32> @test_vpadalq_s16(<4 x i32> %a, <8 x i16> %b) #0 {
-; CHECK: sadalp v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
- %vpadal2.i = tail call <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32> %a, <8 x i16> %b) #4
- ret <4 x i32> %vpadal2.i
-}
-
-define <2 x i64> @test_vpadalq_s32(<2 x i64> %a, <4 x i32> %b) #0 {
-; CHECK: sadalp v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
- %vpadal2.i = tail call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %a, <4 x i32> %b) #4
- ret <2 x i64> %vpadal2.i
-}
-
-define <8 x i16> @test_vpadalq_u8(<8 x i16> %a, <16 x i8> %b) #0 {
-; CHECK: uadalp v{{[0-9]+}}.8h, v{{[0-9]+}}.16b
- %vpadal1.i = tail call <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16> %a, <16 x i8> %b) #4
- ret <8 x i16> %vpadal1.i
-}
-
-define <4 x i32> @test_vpadalq_u16(<4 x i32> %a, <8 x i16> %b) #0 {
-; CHECK: uadalp v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
- %vpadal2.i = tail call <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32> %a, <8 x i16> %b) #4
- ret <4 x i32> %vpadal2.i
-}
-
-define <2 x i64> @test_vpadalq_u32(<2 x i64> %a, <4 x i32> %b) #0 {
-; CHECK: uadalp v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
- %vpadal2.i = tail call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %a, <4 x i32> %b) #4
- ret <2 x i64> %vpadal2.i
-}
-
-define <8 x i8> @test_vqabs_s8(<8 x i8> %a) #0 {
-; CHECK: sqabs v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %vqabs.i = tail call <8 x i8> @llvm.arm.neon.vqabs.v8i8(<8 x i8> %a) #4
- ret <8 x i8> %vqabs.i
-}
-
-define <16 x i8> @test_vqabsq_s8(<16 x i8> %a) #0 {
-; CHECK: sqabs v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %vqabs.i = tail call <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8> %a) #4
- ret <16 x i8> %vqabs.i
-}
-
-define <4 x i16> @test_vqabs_s16(<4 x i16> %a) #0 {
-; CHECK: sqabs v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %vqabs1.i = tail call <4 x i16> @llvm.arm.neon.vqabs.v4i16(<4 x i16> %a) #4
- ret <4 x i16> %vqabs1.i
-}
-
-define <8 x i16> @test_vqabsq_s16(<8 x i16> %a) #0 {
-; CHECK: sqabs v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %vqabs1.i = tail call <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16> %a) #4
- ret <8 x i16> %vqabs1.i
-}
-
-define <2 x i32> @test_vqabs_s32(<2 x i32> %a) #0 {
-; CHECK: sqabs v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vqabs1.i = tail call <2 x i32> @llvm.arm.neon.vqabs.v2i32(<2 x i32> %a) #4
- ret <2 x i32> %vqabs1.i
-}
-
-define <4 x i32> @test_vqabsq_s32(<4 x i32> %a) #0 {
-; CHECK: sqabs v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vqabs1.i = tail call <4 x i32> @llvm.arm.neon.vqabs.v4i32(<4 x i32> %a) #4
- ret <4 x i32> %vqabs1.i
-}
-
-define <2 x i64> @test_vqabsq_s64(<2 x i64> %a) #0 {
-; CHECK: sqabs v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vqabs1.i = tail call <2 x i64> @llvm.arm.neon.vqabs.v2i64(<2 x i64> %a) #4
- ret <2 x i64> %vqabs1.i
-}
-
-define <8 x i8> @test_vqneg_s8(<8 x i8> %a) #0 {
-; CHECK: sqneg v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %vqneg.i = tail call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %a) #4
- ret <8 x i8> %vqneg.i
-}
-
-define <16 x i8> @test_vqnegq_s8(<16 x i8> %a) #0 {
-; CHECK: sqneg v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %vqneg.i = tail call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %a) #4
- ret <16 x i8> %vqneg.i
-}
-
-define <4 x i16> @test_vqneg_s16(<4 x i16> %a) #0 {
-; CHECK: sqneg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %vqneg1.i = tail call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %a) #4
- ret <4 x i16> %vqneg1.i
-}
-
-define <8 x i16> @test_vqnegq_s16(<8 x i16> %a) #0 {
-; CHECK: sqneg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %vqneg1.i = tail call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %a) #4
- ret <8 x i16> %vqneg1.i
-}
-
-define <2 x i32> @test_vqneg_s32(<2 x i32> %a) #0 {
-; CHECK: sqneg v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vqneg1.i = tail call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %a) #4
- ret <2 x i32> %vqneg1.i
-}
-
-define <4 x i32> @test_vqnegq_s32(<4 x i32> %a) #0 {
-; CHECK: sqneg v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vqneg1.i = tail call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %a) #4
- ret <4 x i32> %vqneg1.i
-}
-
-define <2 x i64> @test_vqnegq_s64(<2 x i64> %a) #0 {
-; CHECK: sqneg v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vqneg1.i = tail call <2 x i64> @llvm.arm.neon.vqneg.v2i64(<2 x i64> %a) #4
- ret <2 x i64> %vqneg1.i
-}
-
-define <8 x i8> @test_vneg_s8(<8 x i8> %a) #0 {
-; CHECK: neg v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %sub.i = sub <8 x i8> zeroinitializer, %a
- ret <8 x i8> %sub.i
-}
-
-define <16 x i8> @test_vnegq_s8(<16 x i8> %a) #0 {
-; CHECK: neg v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %sub.i = sub <16 x i8> zeroinitializer, %a
- ret <16 x i8> %sub.i
-}
-
-define <4 x i16> @test_vneg_s16(<4 x i16> %a) #0 {
-; CHECK: neg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %sub.i = sub <4 x i16> zeroinitializer, %a
- ret <4 x i16> %sub.i
-}
-
-define <8 x i16> @test_vnegq_s16(<8 x i16> %a) #0 {
-; CHECK: neg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %sub.i = sub <8 x i16> zeroinitializer, %a
- ret <8 x i16> %sub.i
-}
-
-define <2 x i32> @test_vneg_s32(<2 x i32> %a) #0 {
-; CHECK: neg v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %sub.i = sub <2 x i32> zeroinitializer, %a
- ret <2 x i32> %sub.i
-}
-
-define <4 x i32> @test_vnegq_s32(<4 x i32> %a) #0 {
-; CHECK: neg v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %sub.i = sub <4 x i32> zeroinitializer, %a
- ret <4 x i32> %sub.i
-}
-
-define <2 x i64> @test_vnegq_s64(<2 x i64> %a) #0 {
-; CHECK: neg v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %sub.i = sub <2 x i64> zeroinitializer, %a
- ret <2 x i64> %sub.i
-}
-
-define <2 x float> @test_vneg_f32(<2 x float> %a) #0 {
-; CHECK: fneg v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %sub.i = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %a
- ret <2 x float> %sub.i
-}
-
-define <4 x float> @test_vnegq_f32(<4 x float> %a) #0 {
-; CHECK: fneg v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
- ret <4 x float> %sub.i
-}
-
-define <2 x double> @test_vnegq_f64(<2 x double> %a) #0 {
-; CHECK: fneg v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
- ret <2 x double> %sub.i
-}
-
-define <8 x i8> @test_vabs_s8(<8 x i8> %a) #0 {
-; CHECK: abs v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %vabs.i = tail call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %a) #4
- ret <8 x i8> %vabs.i
-}
-
-define <16 x i8> @test_vabsq_s8(<16 x i8> %a) #0 {
-; CHECK: abs v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %vabs.i = tail call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %a) #4
- ret <16 x i8> %vabs.i
-}
-
-define <4 x i16> @test_vabs_s16(<4 x i16> %a) #0 {
-; CHECK: abs v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %vabs1.i = tail call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %a) #4
- ret <4 x i16> %vabs1.i
-}
-
-define <8 x i16> @test_vabsq_s16(<8 x i16> %a) #0 {
-; CHECK: abs v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %vabs1.i = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %a) #4
- ret <8 x i16> %vabs1.i
-}
-
-define <2 x i32> @test_vabs_s32(<2 x i32> %a) #0 {
-; CHECK: abs v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vabs1.i = tail call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %a) #4
- ret <2 x i32> %vabs1.i
-}
-
-define <4 x i32> @test_vabsq_s32(<4 x i32> %a) #0 {
-; CHECK: abs v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vabs1.i = tail call <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32> %a) #4
- ret <4 x i32> %vabs1.i
-}
-
-define <2 x i64> @test_vabsq_s64(<2 x i64> %a) #0 {
-; CHECK: abs v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vabs1.i = tail call <2 x i64> @llvm.arm.neon.vabs.v2i64(<2 x i64> %a) #4
- ret <2 x i64> %vabs1.i
-}
-
-define <2 x float> @test_vabs_f32(<2 x float> %a) #1 {
-; CHECK: fabs v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vabs1.i = tail call <2 x float> @llvm.fabs.v2f32(<2 x float> %a) #4
- ret <2 x float> %vabs1.i
-}
-
-define <4 x float> @test_vabsq_f32(<4 x float> %a) #1 {
-; CHECK: fabs v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vabs1.i = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) #4
- ret <4 x float> %vabs1.i
-}
-
-define <2 x double> @test_vabsq_f64(<2 x double> %a) #1 {
-; CHECK: fabs v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vabs1.i = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %a) #4
- ret <2 x double> %vabs1.i
-}
-
-define <8 x i8> @test_vuqadd_s8(<8 x i8> %a, <8 x i8> %b) #0 {
-; CHECK: suqadd v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %vuqadd.i = tail call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %a, <8 x i8> %b) #4
- ret <8 x i8> %vuqadd.i
-}
-
-define <16 x i8> @test_vuqaddq_s8(<16 x i8> %a, <16 x i8> %b) #0 {
-; CHECK: suqadd v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %vuqadd.i = tail call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %a, <16 x i8> %b) #4
- ret <16 x i8> %vuqadd.i
-}
-
-define <4 x i16> @test_vuqadd_s16(<4 x i16> %a, <4 x i16> %b) #0 {
-; CHECK: suqadd v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %vuqadd2.i = tail call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %a, <4 x i16> %b) #4
- ret <4 x i16> %vuqadd2.i
-}
-
-define <8 x i16> @test_vuqaddq_s16(<8 x i16> %a, <8 x i16> %b) #0 {
-; CHECK: suqadd v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %vuqadd2.i = tail call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %a, <8 x i16> %b) #4
- ret <8 x i16> %vuqadd2.i
-}
-
-define <2 x i32> @test_vuqadd_s32(<2 x i32> %a, <2 x i32> %b) #0 {
-; CHECK: suqadd v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vuqadd2.i = tail call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %a, <2 x i32> %b) #4
- ret <2 x i32> %vuqadd2.i
-}
-
-define <4 x i32> @test_vuqaddq_s32(<4 x i32> %a, <4 x i32> %b) #0 {
-; CHECK: suqadd v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vuqadd2.i = tail call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %a, <4 x i32> %b) #4
- ret <4 x i32> %vuqadd2.i
-}
-
-define <2 x i64> @test_vuqaddq_s64(<2 x i64> %a, <2 x i64> %b) #0 {
-; CHECK: suqadd v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vuqadd2.i = tail call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %a, <2 x i64> %b) #4
- ret <2 x i64> %vuqadd2.i
-}
-
-define <8 x i8> @test_vcls_s8(<8 x i8> %a) #0 {
-; CHECK: cls v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %vcls.i = tail call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %a) #4
- ret <8 x i8> %vcls.i
-}
-
-define <16 x i8> @test_vclsq_s8(<16 x i8> %a) #0 {
-; CHECK: cls v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %vcls.i = tail call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %a) #4
- ret <16 x i8> %vcls.i
-}
-
-define <4 x i16> @test_vcls_s16(<4 x i16> %a) #0 {
-; CHECK: cls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %vcls1.i = tail call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %a) #4
- ret <4 x i16> %vcls1.i
-}
-
-define <8 x i16> @test_vclsq_s16(<8 x i16> %a) #0 {
-; CHECK: cls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %vcls1.i = tail call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %a) #4
- ret <8 x i16> %vcls1.i
-}
-
-define <2 x i32> @test_vcls_s32(<2 x i32> %a) #0 {
-; CHECK: cls v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcls1.i = tail call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %a) #4
- ret <2 x i32> %vcls1.i
-}
-
-define <4 x i32> @test_vclsq_s32(<4 x i32> %a) #0 {
-; CHECK: cls v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcls1.i = tail call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %a) #4
- ret <4 x i32> %vcls1.i
-}
-
-define <8 x i8> @test_vclz_s8(<8 x i8> %a) #0 {
-; CHECK: clz v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) #4
- ret <8 x i8> %vclz.i
-}
-
-define <16 x i8> @test_vclzq_s8(<16 x i8> %a) #0 {
-; CHECK: clz v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) #4
- ret <16 x i8> %vclz.i
-}
-
-define <4 x i16> @test_vclz_s16(<4 x i16> %a) #0 {
-; CHECK: clz v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
- %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) #4
- ret <4 x i16> %vclz1.i
-}
-
-define <8 x i16> @test_vclzq_s16(<8 x i16> %a) #0 {
-; CHECK: clz v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
- %vclz1.i = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) #4
- ret <8 x i16> %vclz1.i
-}
-
-define <2 x i32> @test_vclz_s32(<2 x i32> %a) #0 {
-; CHECK: clz v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) #4
- ret <2 x i32> %vclz1.i
-}
-
-define <4 x i32> @test_vclzq_s32(<4 x i32> %a) #0 {
-; CHECK: clz v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vclz1.i = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) #4
- ret <4 x i32> %vclz1.i
-}
-
-define <8 x i8> @test_vcnt_s8(<8 x i8> %a) #0 {
-; CHECK: cnt v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %vctpop.i = tail call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %a) #4
- ret <8 x i8> %vctpop.i
-}
-
-define <16 x i8> @test_vcntq_s8(<16 x i8> %a) #0 {
-; CHECK: cnt v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %vctpop.i = tail call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a) #4
- ret <16 x i8> %vctpop.i
-}
-
-define <8 x i8> @test_vmvn_s8(<8 x i8> %a) #0 {
-; CHECK: not v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %neg.i = xor <8 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
- ret <8 x i8> %neg.i
-}
-
-define <16 x i8> @test_vmvnq_s8(<16 x i8> %a) #0 {
-; CHECK: not v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %neg.i = xor <16 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
- ret <16 x i8> %neg.i
-}
-
-define <4 x i16> @test_vmvn_s16(<4 x i16> %a) #0 {
-; CHECK: not v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %neg.i = xor <4 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1>
- ret <4 x i16> %neg.i
-}
-
-define <8 x i16> @test_vmvnq_s16(<8 x i16> %a) #0 {
-; CHECK: not v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %neg.i = xor <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
- ret <8 x i16> %neg.i
-}
-
-define <2 x i32> @test_vmvn_s32(<2 x i32> %a) #0 {
-; CHECK: not v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %neg.i = xor <2 x i32> %a, <i32 -1, i32 -1>
- ret <2 x i32> %neg.i
-}
-
-define <4 x i32> @test_vmvnq_s32(<4 x i32> %a) #0 {
-; CHECK: not v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %neg.i = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
- ret <4 x i32> %neg.i
-}
-
-define <8 x i8> @test_vrbit_s8(<8 x i8> %a) #0 {
-; CHECK: rbit v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
- %vrbit.i = tail call <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8> %a) #4
- ret <8 x i8> %vrbit.i
-}
-
-define <16 x i8> @test_vrbitq_s8(<16 x i8> %a) #0 {
-; CHECK: rbit v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
- %vrbit.i = tail call <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8> %a) #4
- ret <16 x i8> %vrbit.i
-}
-
-define <8 x i8> @test_vmovn_s16(<8 x i16> %a) #0 {
-; CHECK: xtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
- %vmovn.i = trunc <8 x i16> %a to <8 x i8>
- ret <8 x i8> %vmovn.i
-}
-
-define <4 x i16> @test_vmovn_s32(<4 x i32> %a) #0 {
-; CHECK: xtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
- %vmovn.i = trunc <4 x i32> %a to <4 x i16>
- ret <4 x i16> %vmovn.i
-}
-
-define <2 x i32> @test_vmovn_s64(<2 x i64> %a) #0 {
-; CHECK: xtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
- %vmovn.i = trunc <2 x i64> %a to <2 x i32>
- ret <2 x i32> %vmovn.i
-}
-
-define <16 x i8> @test_vmovn_high_s16(<8 x i8> %a, <8 x i16> %b) #0 {
-; CHECK: xtn2 v{{[0-9]+}}.16b, v{{[0-9]+}}.8h
- %vmovn.i.i = trunc <8 x i16> %b to <8 x i8>
- %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %vmovn.i.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ret <16 x i8> %shuffle.i
-}
-
-define <8 x i16> @test_vmovn_high_s32(<4 x i16> %a, <4 x i32> %b) #0 {
-; CHECK: xtn2 v{{[0-9]+}}.8h, v{{[0-9]+}}.4s
- %vmovn.i.i = trunc <4 x i32> %b to <4 x i16>
- %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vmovn.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %shuffle.i
-}
-
-define <4 x i32> @test_vmovn_high_s64(<2 x i32> %a, <2 x i64> %b) #0 {
-; CHECK: xtn2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
- %vmovn.i.i = trunc <2 x i64> %b to <2 x i32>
- %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %vmovn.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i32> %shuffle.i
-}
-
-define <8 x i8> @test_vqmovun_s16(<8 x i16> %a) #0 {
-; CHECK: sqxtun v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
- %vqdmull1.i = tail call <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16> %a) #4
- ret <8 x i8> %vqdmull1.i
-}
-
-define <4 x i16> @test_vqmovun_s32(<4 x i32> %a) #0 {
-; CHECK: sqxtun v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
- %vqdmull1.i = tail call <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32> %a) #4
- ret <4 x i16> %vqdmull1.i
-}
-
-define <2 x i32> @test_vqmovun_s64(<2 x i64> %a) #0 {
-; CHECK: sqxtun v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
- %vqdmull1.i = tail call <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64> %a) #4
- ret <2 x i32> %vqdmull1.i
-}
-
-define <16 x i8> @test_vqmovun_high_s16(<8 x i8> %a, <8 x i16> %b) #0 {
-; CHECK: sqxtun2 v{{[0-9]+}}.16b, v{{[0-9]+}}.8h
- %vqdmull1.i.i = tail call <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16> %b) #4
- %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %vqdmull1.i.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ret <16 x i8> %shuffle.i
-}
-
-define <8 x i16> @test_vqmovun_high_s32(<4 x i16> %a, <4 x i32> %b) #0 {
-; CHECK: sqxtun2 v{{[0-9]+}}.8h, v{{[0-9]+}}.4s
- %vqdmull1.i.i = tail call <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32> %b) #4
- %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vqdmull1.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %shuffle.i
-}
-
-define <4 x i32> @test_vqmovun_high_s64(<2 x i32> %a, <2 x i64> %b) #0 {
-; CHECK: sqxtun2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
- %vqdmull1.i.i = tail call <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64> %b) #4
- %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %vqdmull1.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i32> %shuffle.i
-}
-
-define <8 x i8> @test_vqmovn_s16(<8 x i16> %a) #0 {
-; CHECK: sqxtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
- %vqmovn1.i = tail call <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16> %a) #4
- ret <8 x i8> %vqmovn1.i
-}
-
-define <4 x i16> @test_vqmovn_s32(<4 x i32> %a) #0 {
-; CHECK: sqxtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
- %vqmovn1.i = tail call <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32> %a) #4
- ret <4 x i16> %vqmovn1.i
-}
-
-define <2 x i32> @test_vqmovn_s64(<2 x i64> %a) #0 {
-; CHECK: sqxtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
- %vqmovn1.i = tail call <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64> %a) #4
- ret <2 x i32> %vqmovn1.i
-}
-
-define <16 x i8> @test_vqmovn_high_s16(<8 x i8> %a, <8 x i16> %b) #0 {
-; CHECK: sqxtn2 v{{[0-9]+}}.16b, v{{[0-9]+}}.8h
- %vqmovn1.i.i = tail call <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16> %b) #4
- %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %vqmovn1.i.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ret <16 x i8> %shuffle.i
-}
-
-define <8 x i16> @test_vqmovn_high_s32(<4 x i16> %a, <4 x i32> %b) #0 {
-; CHECK: test_vqmovn_high_s32
- %vqmovn1.i.i = tail call <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32> %b) #4
- %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vqmovn1.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %shuffle.i
-}
-
-define <4 x i32> @test_vqmovn_high_s64(<2 x i32> %a, <2 x i64> %b) #0 {
-; CHECK: test_vqmovn_high_s64
- %vqmovn1.i.i = tail call <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64> %b) #4
- %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %vqmovn1.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i32> %shuffle.i
-}
-
-define <8 x i8> @test_vqmovn_u16(<8 x i16> %a) #0 {
-; CHECK: uqxtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
- %vqmovn1.i = tail call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %a) #4
- ret <8 x i8> %vqmovn1.i
-}
-
-define <4 x i16> @test_vqmovn_u32(<4 x i32> %a) #0 {
-; CHECK: uqxtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
- %vqmovn1.i = tail call <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32> %a) #4
- ret <4 x i16> %vqmovn1.i
-}
-
-define <2 x i32> @test_vqmovn_u64(<2 x i64> %a) #0 {
-; CHECK: uqxtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
- %vqmovn1.i = tail call <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64> %a) #4
- ret <2 x i32> %vqmovn1.i
-}
-
-define <16 x i8> @test_vqmovn_high_u16(<8 x i8> %a, <8 x i16> %b) #0 {
-; CHECK: uqxtn2 v{{[0-9]+}}.16b, v{{[0-9]+}}.8h
- %vqmovn1.i.i = tail call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %b) #4
- %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %vqmovn1.i.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ret <16 x i8> %shuffle.i
-}
-
-define <8 x i16> @test_vqmovn_high_u32(<4 x i16> %a, <4 x i32> %b) #0 {
-; CHECK: uqxtn2 v{{[0-9]+}}.8h, v{{[0-9]+}}.4s
- %vqmovn1.i.i = tail call <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32> %b) #4
- %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vqmovn1.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %shuffle.i
-}
-
-define <4 x i32> @test_vqmovn_high_u64(<2 x i32> %a, <2 x i64> %b) #0 {
-; CHECK: uqxtn2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
- %vqmovn1.i.i = tail call <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64> %b) #4
- %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %vqmovn1.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i32> %shuffle.i
-}
-
-define <8 x i16> @test_vshll_n_s8(<8 x i8> %a) #0 {
-; CHECK: shll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #8
- %1 = sext <8 x i8> %a to <8 x i16>
- %vshll_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- ret <8 x i16> %vshll_n
-}
-
-define <4 x i32> @test_vshll_n_s16(<4 x i16> %a) #0 {
-; CHECK: shll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #16
- %1 = sext <4 x i16> %a to <4 x i32>
- %vshll_n = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
- ret <4 x i32> %vshll_n
-}
-
-define <2 x i64> @test_vshll_n_s32(<2 x i32> %a) #0 {
-; CHECK: shll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #32
- %1 = sext <2 x i32> %a to <2 x i64>
- %vshll_n = shl <2 x i64> %1, <i64 32, i64 32>
- ret <2 x i64> %vshll_n
-}
-
-define <8 x i16> @test_vshll_n_u8(<8 x i8> %a) #0 {
-; CHECK: shll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #8
- %1 = zext <8 x i8> %a to <8 x i16>
- %vshll_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- ret <8 x i16> %vshll_n
-}
-
-define <4 x i32> @test_vshll_n_u16(<4 x i16> %a) #0 {
-; CHECK: shll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #16
- %1 = zext <4 x i16> %a to <4 x i32>
- %vshll_n = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
- ret <4 x i32> %vshll_n
-}
-
-define <2 x i64> @test_vshll_n_u32(<2 x i32> %a) #0 {
-; CHECK: shll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #32
- %1 = zext <2 x i32> %a to <2 x i64>
- %vshll_n = shl <2 x i64> %1, <i64 32, i64 32>
- ret <2 x i64> %vshll_n
-}
-
-define <8 x i16> @test_vshll_high_n_s8(<16 x i8> %a) #0 {
-; CHECK: shll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #8
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %1 = sext <8 x i8> %shuffle.i to <8 x i16>
- %vshll_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- ret <8 x i16> %vshll_n
-}
-
-define <4 x i32> @test_vshll_high_n_s16(<8 x i16> %a) #0 {
-; CHECK: shll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #16
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %1 = sext <4 x i16> %shuffle.i to <4 x i32>
- %vshll_n = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
- ret <4 x i32> %vshll_n
-}
-
-define <2 x i64> @test_vshll_high_n_s32(<4 x i32> %a) #0 {
-; CHECK: shll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #32
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %1 = sext <2 x i32> %shuffle.i to <2 x i64>
- %vshll_n = shl <2 x i64> %1, <i64 32, i64 32>
- ret <2 x i64> %vshll_n
-}
-
-define <8 x i16> @test_vshll_high_n_u8(<16 x i8> %a) #0 {
-; CHECK: shll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #8
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %1 = zext <8 x i8> %shuffle.i to <8 x i16>
- %vshll_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
- ret <8 x i16> %vshll_n
-}
-
-define <4 x i32> @test_vshll_high_n_u16(<8 x i16> %a) #0 {
-; CHECK: shll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #16
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %1 = zext <4 x i16> %shuffle.i to <4 x i32>
- %vshll_n = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
- ret <4 x i32> %vshll_n
-}
-
-define <2 x i64> @test_vshll_high_n_u32(<4 x i32> %a) #0 {
-; CHECK: shll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #32
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- %1 = zext <2 x i32> %shuffle.i to <2 x i64>
- %vshll_n = shl <2 x i64> %1, <i64 32, i64 32>
- ret <2 x i64> %vshll_n
-}
-
-define <4 x i16> @test_vcvt_f16_f32(<4 x float> %a) #0 {
-; CHECK: fcvtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
- %vcvt1.i = tail call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %a) #4
- ret <4 x i16> %vcvt1.i
-}
-
-define <8 x i16> @test_vcvt_high_f16_f32(<4 x i16> %a, <4 x float> %b) #0 {
-; CHECK: fcvtn2 v{{[0-9]+}}.8h, v{{[0-9]+}}.4s
- %vcvt1.i.i = tail call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %b) #4
- %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vcvt1.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %shuffle.i
-}
-
-define <4 x float> @test_vcvt_f32_f16(<4 x i16> %a) #0 {
-; CHECK: fcvtl v{{[0-9]+}}.4s, v{{[0-9]+}}.4h
- %vcvt1.i = tail call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %a) #4
- ret <4 x float> %vcvt1.i
-}
-
-define <4 x float> @test_vcvt_high_f32_f16(<8 x i16> %a) #0 {
-; CHECK: fcvtl2 v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
- %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- %vcvt1.i.i = tail call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %shuffle.i.i) #4
- ret <4 x float> %vcvt1.i.i
-}
-
-define <2 x float> @test_vcvt_f32_f64(<2 x double> %a) #0 {
-; CHECK: fcvtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
- %vcvt.i = fptrunc <2 x double> %a to <2 x float>
- ret <2 x float> %vcvt.i
-}
-
-define <4 x float> @test_vcvt_high_f32_f64(<2 x float> %a, <2 x double> %b) #0 {
-; CHECK: fcvtn2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
- %vcvt.i.i = fptrunc <2 x double> %b to <2 x float>
- %shuffle.i = shufflevector <2 x float> %a, <2 x float> %vcvt.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x float> %shuffle.i
-}
-
-define <2 x float> @test_vcvtx_f32_f64(<2 x double> %a) #0 {
-; CHECK: fcvtxn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
- %vcvtx_f32_f641.i = tail call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %a) #4
- ret <2 x float> %vcvtx_f32_f641.i
-}
-
-define <4 x float> @test_vcvtx_high_f32_f64(<2 x float> %a, <2 x double> %b) #0 {
-; CHECK: fcvtxn2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
- %vcvtx_f32_f641.i.i = tail call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %b) #4
- %shuffle.i = shufflevector <2 x float> %a, <2 x float> %vcvtx_f32_f641.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x float> %shuffle.i
-}
-
-define <2 x double> @test_vcvt_f64_f32(<2 x float> %a) #0 {
-; CHECK: fcvtl v{{[0-9]+}}.2d, v{{[0-9]+}}.2s
- %vcvt.i = fpext <2 x float> %a to <2 x double>
- ret <2 x double> %vcvt.i
-}
-
-define <2 x double> @test_vcvt_high_f64_f32(<4 x float> %a) #0 {
-; CHECK: fcvtl2 v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
- %shuffle.i.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 2, i32 3>
- %vcvt.i.i = fpext <2 x float> %shuffle.i.i to <2 x double>
- ret <2 x double> %vcvt.i.i
-}
-
-define <2 x float> @test_vrndn_f32(<2 x float> %a) #0 {
-; CHECK: frintn v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrndn1.i = tail call <2 x float> @llvm.aarch64.neon.frintn.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrndn1.i
-}
-
-define <4 x float> @test_vrndnq_f32(<4 x float> %a) #0 {
-; CHECK: frintn v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrndn1.i = tail call <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrndn1.i
-}
-
-define <2 x double> @test_vrndnq_f64(<2 x double> %a) #0 {
-; CHECK: frintn v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrndn1.i = tail call <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrndn1.i
-}
-
-define <2 x float> @test_vrnda_f32(<2 x float> %a) #0 {
-; CHECK: frinta v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrnda1.i = tail call <2 x float> @llvm.round.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrnda1.i
-}
-
-define <4 x float> @test_vrndaq_f32(<4 x float> %a) #0 {
-; CHECK: frinta v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrnda1.i = tail call <4 x float> @llvm.round.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrnda1.i
-}
-
-define <2 x double> @test_vrndaq_f64(<2 x double> %a) #0 {
-; CHECK: frinta v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrnda1.i = tail call <2 x double> @llvm.round.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrnda1.i
-}
-
-define <2 x float> @test_vrndp_f32(<2 x float> %a) #0 {
-; CHECK: frintp v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrndp1.i = tail call <2 x float> @llvm.ceil.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrndp1.i
-}
-
-define <4 x float> @test_vrndpq_f32(<4 x float> %a) #0 {
-; CHECK: frintp v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrndp1.i = tail call <4 x float> @llvm.ceil.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrndp1.i
-}
-
-define <2 x double> @test_vrndpq_f64(<2 x double> %a) #0 {
-; CHECK: frintp v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrndp1.i = tail call <2 x double> @llvm.ceil.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrndp1.i
-}
-
-define <2 x float> @test_vrndm_f32(<2 x float> %a) #0 {
-; CHECK: frintm v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrndm1.i = tail call <2 x float> @llvm.floor.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrndm1.i
-}
-
-define <4 x float> @test_vrndmq_f32(<4 x float> %a) #0 {
-; CHECK: frintm v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrndm1.i = tail call <4 x float> @llvm.floor.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrndm1.i
-}
-
-define <2 x double> @test_vrndmq_f64(<2 x double> %a) #0 {
-; CHECK: frintm v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrndm1.i = tail call <2 x double> @llvm.floor.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrndm1.i
-}
-
-define <2 x float> @test_vrndx_f32(<2 x float> %a) #0 {
-; CHECK: frintx v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrndx1.i = tail call <2 x float> @llvm.rint.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrndx1.i
-}
-
-define <4 x float> @test_vrndxq_f32(<4 x float> %a) #0 {
-; CHECK: frintx v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrndx1.i = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrndx1.i
-}
-
-define <2 x double> @test_vrndxq_f64(<2 x double> %a) #0 {
-; CHECK: frintx v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrndx1.i = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrndx1.i
-}
-
-define <2 x float> @test_vrnd_f32(<2 x float> %a) #0 {
-; CHECK: frintz v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrnd1.i = tail call <2 x float> @llvm.trunc.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrnd1.i
-}
-
-define <4 x float> @test_vrndq_f32(<4 x float> %a) #0 {
-; CHECK: frintz v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrnd1.i = tail call <4 x float> @llvm.trunc.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrnd1.i
-}
-
-define <2 x double> @test_vrndq_f64(<2 x double> %a) #0 {
-; CHECK: frintz v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrnd1.i = tail call <2 x double> @llvm.trunc.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrnd1.i
-}
-
-define <2 x float> @test_vrndi_f32(<2 x float> %a) #0 {
-; CHECK: frinti v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrndi1.i = tail call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrndi1.i
-}
-
-define <4 x float> @test_vrndiq_f32(<4 x float> %a) #0 {
-; CHECK: frinti v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrndi1.i = tail call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrndi1.i
-}
-
-define <2 x double> @test_vrndiq_f64(<2 x double> %a) #0 {
-; CHECK: frinti v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrndi1.i = tail call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrndi1.i
-}
-
-define <2 x i32> @test_vcvt_s32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtzs v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvt.i = fptosi <2 x float> %a to <2 x i32>
- ret <2 x i32> %vcvt.i
-}
-
-define <4 x i32> @test_vcvtq_s32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtzs v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvt.i = fptosi <4 x float> %a to <4 x i32>
- ret <4 x i32> %vcvt.i
-}
-
-define <2 x i64> @test_vcvtq_s64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtzs v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvt.i = fptosi <2 x double> %a to <2 x i64>
- ret <2 x i64> %vcvt.i
-}
-
-define <2 x i32> @test_vcvt_u32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtzu v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvt.i = fptoui <2 x float> %a to <2 x i32>
- ret <2 x i32> %vcvt.i
-}
-
-define <4 x i32> @test_vcvtq_u32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtzu v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvt.i = fptoui <4 x float> %a to <4 x i32>
- ret <4 x i32> %vcvt.i
-}
-
-define <2 x i64> @test_vcvtq_u64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtzu v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvt.i = fptoui <2 x double> %a to <2 x i64>
- ret <2 x i64> %vcvt.i
-}
-
-define <2 x i32> @test_vcvtn_s32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtns v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvtns_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtns.v2i32.v2f32(<2 x float> %a) #4
- ret <2 x i32> %vcvtns_f321.i
-}
-
-define <4 x i32> @test_vcvtnq_s32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtns v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvtns_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtns.v4i32.v4f32(<4 x float> %a) #4
- ret <4 x i32> %vcvtns_f321.i
-}
-
-define <2 x i64> @test_vcvtnq_s64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtns v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvtns_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtns.v2i64.v2f64(<2 x double> %a) #4
- ret <2 x i64> %vcvtns_f641.i
-}
-
-define <2 x i32> @test_vcvtn_u32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtnu v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvtnu_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtnu.v2i32.v2f32(<2 x float> %a) #4
- ret <2 x i32> %vcvtnu_f321.i
-}
-
-define <4 x i32> @test_vcvtnq_u32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtnu v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvtnu_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtnu.v4i32.v4f32(<4 x float> %a) #4
- ret <4 x i32> %vcvtnu_f321.i
-}
-
-define <2 x i64> @test_vcvtnq_u64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtnu v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvtnu_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtnu.v2i64.v2f64(<2 x double> %a) #4
- ret <2 x i64> %vcvtnu_f641.i
-}
-
-define <2 x i32> @test_vcvtp_s32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtps v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvtps_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtps.v2i32.v2f32(<2 x float> %a) #4
- ret <2 x i32> %vcvtps_f321.i
-}
-
-define <4 x i32> @test_vcvtpq_s32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtps v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvtps_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float> %a) #4
- ret <4 x i32> %vcvtps_f321.i
-}
-
-define <2 x i64> @test_vcvtpq_s64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtps v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvtps_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %a) #4
- ret <2 x i64> %vcvtps_f641.i
-}
-
-define <2 x i32> @test_vcvtp_u32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtpu v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvtpu_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtpu.v2i32.v2f32(<2 x float> %a) #4
- ret <2 x i32> %vcvtpu_f321.i
-}
-
-define <4 x i32> @test_vcvtpq_u32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtpu v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvtpu_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float> %a) #4
- ret <4 x i32> %vcvtpu_f321.i
-}
-
-define <2 x i64> @test_vcvtpq_u64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtpu v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvtpu_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtpu.v2i64.v2f64(<2 x double> %a) #4
- ret <2 x i64> %vcvtpu_f641.i
-}
-
-define <2 x i32> @test_vcvtm_s32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtms v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvtms_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtms.v2i32.v2f32(<2 x float> %a) #4
- ret <2 x i32> %vcvtms_f321.i
-}
-
-define <4 x i32> @test_vcvtmq_s32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtms v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvtms_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float> %a) #4
- ret <4 x i32> %vcvtms_f321.i
-}
-
-define <2 x i64> @test_vcvtmq_s64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtms v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvtms_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %a) #4
- ret <2 x i64> %vcvtms_f641.i
-}
-
-define <2 x i32> @test_vcvtm_u32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtmu v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvtmu_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtmu.v2i32.v2f32(<2 x float> %a) #4
- ret <2 x i32> %vcvtmu_f321.i
-}
-
-define <4 x i32> @test_vcvtmq_u32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtmu v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvtmu_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float> %a) #4
- ret <4 x i32> %vcvtmu_f321.i
-}
-
-define <2 x i64> @test_vcvtmq_u64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtmu v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvtmu_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %a) #4
- ret <2 x i64> %vcvtmu_f641.i
-}
-
-define <2 x i32> @test_vcvta_s32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtas v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvtas_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtas.v2i32.v2f32(<2 x float> %a) #4
- ret <2 x i32> %vcvtas_f321.i
-}
-
-define <4 x i32> @test_vcvtaq_s32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtas v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvtas_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float> %a) #4
- ret <4 x i32> %vcvtas_f321.i
-}
-
-define <2 x i64> @test_vcvtaq_s64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtas v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvtas_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %a) #4
- ret <2 x i64> %vcvtas_f641.i
-}
-
-define <2 x i32> @test_vcvta_u32_f32(<2 x float> %a) #0 {
-; CHECK: fcvtau v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvtau_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtau.v2i32.v2f32(<2 x float> %a) #4
- ret <2 x i32> %vcvtau_f321.i
-}
-
-define <4 x i32> @test_vcvtaq_u32_f32(<4 x float> %a) #0 {
-; CHECK: fcvtau v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvtau_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float> %a) #4
- ret <4 x i32> %vcvtau_f321.i
-}
-
-define <2 x i64> @test_vcvtaq_u64_f64(<2 x double> %a) #0 {
-; CHECK: fcvtau v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvtau_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %a) #4
- ret <2 x i64> %vcvtau_f641.i
-}
-
-define <2 x float> @test_vrsqrte_f32(<2 x float> %a) #0 {
-; CHECK: frsqrte v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrsqrte1.i = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrsqrte1.i
-}
-
-define <4 x float> @test_vrsqrteq_f32(<4 x float> %a) #0 {
-; CHECK: frsqrte v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrsqrte1.i = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrsqrte1.i
-}
-
-define <2 x double> @test_vrsqrteq_f64(<2 x double> %a) #0 {
-; CHECK: frsqrte v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrsqrte1.i = tail call <2 x double> @llvm.arm.neon.vrsqrte.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrsqrte1.i
-}
-
-define <2 x float> @test_vrecpe_f32(<2 x float> %a) #0 {
-; CHECK: frecpe v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrecpe1.i = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %a) #4
- ret <2 x float> %vrecpe1.i
-}
-
-define <4 x float> @test_vrecpeq_f32(<4 x float> %a) #0 {
-; CHECK: frecpe v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrecpe1.i = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %a) #4
- ret <4 x float> %vrecpe1.i
-}
-
-define <2 x double> @test_vrecpeq_f64(<2 x double> %a) #0 {
-; CHECK: frecpe v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vrecpe1.i = tail call <2 x double> @llvm.arm.neon.vrecpe.v2f64(<2 x double> %a) #4
- ret <2 x double> %vrecpe1.i
-}
-
-define <2 x i32> @test_vrecpe_u32(<2 x i32> %a) #0 {
-; CHECK: urecpe v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vrecpe1.i = tail call <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32> %a) #4
- ret <2 x i32> %vrecpe1.i
-}
-
-define <4 x i32> @test_vrecpeq_u32(<4 x i32> %a) #0 {
-; CHECK: urecpe v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vrecpe1.i = tail call <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32> %a) #4
- ret <4 x i32> %vrecpe1.i
-}
-
-define <2 x float> @test_vsqrt_f32(<2 x float> %a) #0 {
-; CHECK: fsqrt v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vsqrt1.i = tail call <2 x float> @llvm.sqrt.v2f32(<2 x float> %a) #4
- ret <2 x float> %vsqrt1.i
-}
-
-define <4 x float> @test_vsqrtq_f32(<4 x float> %a) #0 {
-; CHECK: fsqrt v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vsqrt1.i = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a) #4
- ret <4 x float> %vsqrt1.i
-}
-
-define <2 x double> @test_vsqrtq_f64(<2 x double> %a) #0 {
-; CHECK: fsqrt v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vsqrt1.i = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a) #4
- ret <2 x double> %vsqrt1.i
-}
-
-define <2 x float> @test_vcvt_f32_s32(<2 x i32> %a) #0 {
-; CHECK: scvtf v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvt.i = sitofp <2 x i32> %a to <2 x float>
- ret <2 x float> %vcvt.i
-}
-
-define <2 x float> @test_vcvt_f32_u32(<2 x i32> %a) #0 {
-; CHECK: ucvtf v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
- %vcvt.i = uitofp <2 x i32> %a to <2 x float>
- ret <2 x float> %vcvt.i
-}
-
-define <4 x float> @test_vcvtq_f32_s32(<4 x i32> %a) #0 {
-; CHECK: scvtf v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvt.i = sitofp <4 x i32> %a to <4 x float>
- ret <4 x float> %vcvt.i
-}
-
-define <4 x float> @test_vcvtq_f32_u32(<4 x i32> %a) #0 {
-; CHECK: ucvtf v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
- %vcvt.i = uitofp <4 x i32> %a to <4 x float>
- ret <4 x float> %vcvt.i
-}
-
-define <2 x double> @test_vcvtq_f64_s64(<2 x i64> %a) #0 {
-; CHECK: scvtf v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvt.i = sitofp <2 x i64> %a to <2 x double>
- ret <2 x double> %vcvt.i
-}
-
-define <2 x double> @test_vcvtq_f64_u64(<2 x i64> %a) #0 {
-; CHECK: ucvtf v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
- %vcvt.i = uitofp <2 x i64> %a to <2 x double>
- ret <2 x double> %vcvt.i
-}
-
-declare <2 x double> @llvm.sqrt.v2f64(<2 x double>) #2
-
-declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) #2
-
-declare <2 x float> @llvm.sqrt.v2f32(<2 x float>) #2
-
-declare <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32>) #2
-
-declare <2 x double> @llvm.arm.neon.vrecpe.v2f64(<2 x double>) #2
-
-declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) #2
-
-declare <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float>) #2
-
-declare <2 x double> @llvm.arm.neon.vrsqrte.v2f64(<2 x double>) #2
-
-declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) #2
-
-declare <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.fcvtau.v2i32.v2f32(<2 x float>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.fcvtas.v2i32.v2f32(<2 x float>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.fcvtmu.v2i32.v2f32(<2 x float>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.fcvtms.v2i32.v2f32(<2 x float>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.fcvtpu.v2i64.v2f64(<2 x double>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.fcvtpu.v2i32.v2f32(<2 x float>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.fcvtps.v2i32.v2f32(<2 x float>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.fcvtnu.v2i64.v2f64(<2 x double>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.fcvtnu.v4i32.v4f32(<4 x float>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.fcvtnu.v2i32.v2f32(<2 x float>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.fcvtns.v2i64.v2f64(<2 x double>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.fcvtns.v4i32.v4f32(<4 x float>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.fcvtns.v2i32.v2f32(<2 x float>) #2
-
-declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) #3
-
-declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) #3
-
-declare <2 x float> @llvm.nearbyint.v2f32(<2 x float>) #3
-
-declare <2 x double> @llvm.trunc.v2f64(<2 x double>) #3
-
-declare <4 x float> @llvm.trunc.v4f32(<4 x float>) #3
-
-declare <2 x float> @llvm.trunc.v2f32(<2 x float>) #3
-
-declare <2 x double> @llvm.rint.v2f64(<2 x double>) #3
-
-declare <4 x float> @llvm.rint.v4f32(<4 x float>) #3
-
-declare <2 x float> @llvm.rint.v2f32(<2 x float>) #3
-
-declare <2 x double> @llvm.floor.v2f64(<2 x double>) #3
-
-declare <4 x float> @llvm.floor.v4f32(<4 x float>) #3
-
-declare <2 x float> @llvm.floor.v2f32(<2 x float>) #3
-
-declare <2 x double> @llvm.ceil.v2f64(<2 x double>) #3
-
-declare <4 x float> @llvm.ceil.v4f32(<4 x float>) #3
-
-declare <2 x float> @llvm.ceil.v2f32(<2 x float>) #3
-
-declare <2 x double> @llvm.round.v2f64(<2 x double>) #3
-
-declare <4 x float> @llvm.round.v4f32(<4 x float>) #3
-
-declare <2 x float> @llvm.round.v2f32(<2 x float>) #3
-
-declare <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double>) #2
-
-declare <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float>) #2
-
-declare <2 x float> @llvm.aarch64.neon.frintn.v2f32(<2 x float>) #2
-
-declare <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double>) #2
-
-declare <2 x float> @llvm.aarch64.neon.fcvtn.v2f32.v2f64(<2 x double>) #2
-
-declare <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64>) #2
-
-declare <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32>) #2
-
-declare <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16>) #2
-
-declare <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64>) #2
-
-declare <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32>) #2
-
-declare <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16>) #2
-
-declare <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64>) #2
-
-declare <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32>) #2
-
-declare <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16>) #2
-
-declare <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8>) #2
-
-declare <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8>) #2
-
-declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) #2
-
-declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>) #2
-
-declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) #2
-
-declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) #2
-
-declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) #2
-
-declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>, i1) #2
-
-declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) #2
-
-declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>, i1) #2
-
-declare <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32>) #2
-
-declare <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16>) #2
-
-declare <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8>) #2
-
-declare <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8>) #2
-
-declare <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64>, <2 x i64>) #2
-
-declare <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32>, <4 x i32>) #2
-
-declare <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32>, <2 x i32>) #2
-
-declare <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16>, <8 x i16>) #2
-
-declare <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16>, <4 x i16>) #2
-
-declare <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8>, <16 x i8>) #2
-
-declare <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8>, <8 x i8>) #2
-
-declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #3
-
-declare <4 x float> @llvm.fabs.v4f32(<4 x float>) #3
-
-declare <2 x float> @llvm.fabs.v2f32(<2 x float>) #3
-
-declare <2 x i64> @llvm.arm.neon.vabs.v2i64(<2 x i64>) #2
-
-declare <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32>) #2
-
-declare <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16>) #2
-
-declare <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8>) #2
-
-declare <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8>) #2
-
-declare <2 x i64> @llvm.arm.neon.vqneg.v2i64(<2 x i64>) #2
-
-declare <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32>) #2
-
-declare <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16>) #2
-
-declare <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8>) #2
-
-declare <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8>) #2
-
-declare <2 x i64> @llvm.arm.neon.vqabs.v2i64(<2 x i64>) #2
-
-declare <4 x i32> @llvm.arm.neon.vqabs.v4i32(<4 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vqabs.v2i32(<2 x i32>) #2
-
-declare <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vqabs.v4i16(<4 x i16>) #2
-
-declare <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8>) #2
-
-declare <8 x i8> @llvm.arm.neon.vqabs.v8i8(<8 x i8>) #2
-
-declare <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64>, <4 x i32>) #2
-
-declare <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32>, <8 x i16>) #2
-
-declare <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16>, <16 x i8>) #2
-
-declare <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64>, <4 x i32>) #2
-
-declare <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32>, <8 x i16>) #2
-
-declare <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16>, <16 x i8>) #2
-
-declare <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64>, <2 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32>, <4 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16>, <8 x i8>) #2
-
-declare <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64>, <2 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32>, <4 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16>, <8 x i8>) #2
-
-declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) #2
-
-declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16>) #2
-
-declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) #2
-
-declare <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32>) #2
-
-declare <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16>) #2
-
-declare <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8>) #2
-
-declare <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8>) #2
-
-declare <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32>) #2
-
-declare <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) #2
-
-declare <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16>) #2
-
-declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) #2
-
-
-define <1 x i64> @test_vcvt_s64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvt_s64_f64
-; CHECK: fcvtzs d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fptosi <1 x double> %a to <1 x i64>
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvt_u64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvt_u64_f64
-; CHECK: fcvtzu d{{[0-9]+}}, d{{[0-9]+}}
- %1 = fptoui <1 x double> %a to <1 x i64>
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvtn_s64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvtn_s64_f64
-; CHECK: fcvtns d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double> %a)
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvtn_u64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvtn_u64_f64
-; CHECK: fcvtnu d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double> %a)
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvtp_s64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvtp_s64_f64
-; CHECK: fcvtps d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double> %a)
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvtp_u64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvtp_u64_f64
-; CHECK: fcvtpu d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double> %a)
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvtm_s64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvtm_s64_f64
-; CHECK: fcvtms d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double> %a)
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvtm_u64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvtm_u64_f64
-; CHECK: fcvtmu d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double> %a)
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvta_s64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvta_s64_f64
-; CHECK: fcvtas d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double> %a)
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvta_u64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvta_u64_f64
-; CHECK: fcvtau d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double> %a)
- ret <1 x i64> %1
-}
-
-define <1 x double> @test_vcvt_f64_s64(<1 x i64> %a) {
-; CHECK-LABEL: test_vcvt_f64_s64
-; CHECK: scvtf d{{[0-9]+}}, d{{[0-9]+}}
- %1 = sitofp <1 x i64> %a to <1 x double>
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vcvt_f64_u64(<1 x i64> %a) {
-; CHECK-LABEL: test_vcvt_f64_u64
-; CHECK: ucvtf d{{[0-9]+}}, d{{[0-9]+}}
- %1 = uitofp <1 x i64> %a to <1 x double>
- ret <1 x double> %1
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double>)
-
-define <1 x double> @test_vrndn_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrndn_f64
-; CHECK: frintn d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.aarch64.neon.frintn.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrnda_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrnda_f64
-; CHECK: frinta d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.round.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrndp_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrndp_f64
-; CHECK: frintp d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.ceil.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrndm_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrndm_f64
-; CHECK: frintm d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.floor.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrndx_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrndx_f64
-; CHECK: frintx d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.rint.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrnd_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrnd_f64
-; CHECK: frintz d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.trunc.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrndi_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrndi_f64
-; CHECK: frinti d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-declare <1 x double> @llvm.nearbyint.v1f64(<1 x double>)
-declare <1 x double> @llvm.trunc.v1f64(<1 x double>)
-declare <1 x double> @llvm.rint.v1f64(<1 x double>)
-declare <1 x double> @llvm.floor.v1f64(<1 x double>)
-declare <1 x double> @llvm.ceil.v1f64(<1 x double>)
-declare <1 x double> @llvm.round.v1f64(<1 x double>)
-declare <1 x double> @llvm.aarch64.neon.frintn.v1f64(<1 x double>)
-
-define <1 x double> @test_vrsqrte_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrsqrte_f64
-; CHECK: frsqrte d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrecpe_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vrecpe_f64
-; CHECK: frecpe d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vsqrt_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vsqrt_f64
-; CHECK: fsqrt d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.sqrt.v1f64(<1 x double> %a)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrecps_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vrecps_f64
-; CHECK: frecps d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.arm.neon.vrecps.v1f64(<1 x double> %a, <1 x double> %b)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vrsqrts_f64(<1 x double> %a, <1 x double> %b) {
-; CHECK-LABEL: test_vrsqrts_f64
-; CHECK: frsqrts d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
- %1 = tail call <1 x double> @llvm.arm.neon.vrsqrts.v1f64(<1 x double> %a, <1 x double> %b)
- ret <1 x double> %1
-}
-
-declare <1 x double> @llvm.arm.neon.vrsqrts.v1f64(<1 x double>, <1 x double>)
-declare <1 x double> @llvm.arm.neon.vrecps.v1f64(<1 x double>, <1 x double>)
-declare <1 x double> @llvm.sqrt.v1f64(<1 x double>)
-declare <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double>)
-declare <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double>) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-mla-mls.ll b/test/CodeGen/AArch64/neon-mla-mls.ll
index 23e9223a8b7b..71bb0e70abfa 100644
--- a/test/CodeGen/AArch64/neon-mla-mls.ll
+++ b/test/CodeGen/AArch64/neon-mla-mls.ll
@@ -2,84 +2,84 @@
define <8 x i8> @mla8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
-;CHECK: mla {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+;CHECK: mla {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = mul <8 x i8> %A, %B;
%tmp2 = add <8 x i8> %C, %tmp1;
ret <8 x i8> %tmp2
}
define <16 x i8> @mla16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
-;CHECK: mla {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+;CHECK: mla {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = mul <16 x i8> %A, %B;
%tmp2 = add <16 x i8> %C, %tmp1;
ret <16 x i8> %tmp2
}
define <4 x i16> @mla4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
-;CHECK: mla {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
+;CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp1 = mul <4 x i16> %A, %B;
%tmp2 = add <4 x i16> %C, %tmp1;
ret <4 x i16> %tmp2
}
define <8 x i16> @mla8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
-;CHECK: mla {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
+;CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp1 = mul <8 x i16> %A, %B;
%tmp2 = add <8 x i16> %C, %tmp1;
ret <8 x i16> %tmp2
}
define <2 x i32> @mla2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
-;CHECK: mla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+;CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp1 = mul <2 x i32> %A, %B;
%tmp2 = add <2 x i32> %C, %tmp1;
ret <2 x i32> %tmp2
}
define <4 x i32> @mla4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
-;CHECK: mla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+;CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp1 = mul <4 x i32> %A, %B;
%tmp2 = add <4 x i32> %C, %tmp1;
ret <4 x i32> %tmp2
}
define <8 x i8> @mls8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
-;CHECK: mls {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+;CHECK: mls {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp1 = mul <8 x i8> %A, %B;
%tmp2 = sub <8 x i8> %C, %tmp1;
ret <8 x i8> %tmp2
}
define <16 x i8> @mls16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
-;CHECK: mls {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+;CHECK: mls {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp1 = mul <16 x i8> %A, %B;
%tmp2 = sub <16 x i8> %C, %tmp1;
ret <16 x i8> %tmp2
}
define <4 x i16> @mls4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
-;CHECK: mls {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
+;CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp1 = mul <4 x i16> %A, %B;
%tmp2 = sub <4 x i16> %C, %tmp1;
ret <4 x i16> %tmp2
}
define <8 x i16> @mls8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
-;CHECK: mls {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
+;CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp1 = mul <8 x i16> %A, %B;
%tmp2 = sub <8 x i16> %C, %tmp1;
ret <8 x i16> %tmp2
}
define <2 x i32> @mls2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
-;CHECK: mls {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+;CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp1 = mul <2 x i32> %A, %B;
%tmp2 = sub <2 x i32> %C, %tmp1;
ret <2 x i32> %tmp2
}
define <4 x i32> @mls4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
-;CHECK: mls {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+;CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp1 = mul <4 x i32> %A, %B;
%tmp2 = sub <4 x i32> %C, %tmp1;
ret <4 x i32> %tmp2
diff --git a/test/CodeGen/AArch64/neon-mov.ll b/test/CodeGen/AArch64/neon-mov.ll
index 60b13b8b9a0e..40649aeb1b8e 100644
--- a/test/CodeGen/AArch64/neon-mov.ll
+++ b/test/CodeGen/AArch64/neon-mov.ll
@@ -1,216 +1,259 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
define <8 x i8> @movi8b() {
-;CHECK: movi {{v[0-31]+}}.8b, #0x8
+; CHECK-LABEL: movi8b:
+; CHECK: movi {{v[0-9]+}}.8b, #{{0x8|8}}
ret <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
}
define <16 x i8> @movi16b() {
-;CHECK: movi {{v[0-31]+}}.16b, #0x8
+; CHECK-LABEL: movi16b:
+; CHECK: movi {{v[0-9]+}}.16b, #{{0x8|8}}
ret <16 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
}
define <2 x i32> @movi2s_lsl0() {
-;CHECK: movi {{v[0-31]+}}.2s, #0xff
+; CHECK-LABEL: movi2s_lsl0:
+; CHECK: movi {{d[0-9]+}}, #0x0000ff000000ff
ret <2 x i32> < i32 255, i32 255 >
}
define <2 x i32> @movi2s_lsl8() {
-;CHECK: movi {{v[0-31]+}}.2s, #0xff, lsl #8
+; CHECK-LABEL: movi2s_lsl8:
+; CHECK: movi {{d[0-9]+}}, #0x00ff000000ff00
ret <2 x i32> < i32 65280, i32 65280 >
}
define <2 x i32> @movi2s_lsl16() {
-;CHECK: movi {{v[0-31]+}}.2s, #0xff, lsl #16
+; CHECK-LABEL: movi2s_lsl16:
+; CHECK: movi {{d[0-9]+}}, #0xff000000ff0000
ret <2 x i32> < i32 16711680, i32 16711680 >
}
define <2 x i32> @movi2s_lsl24() {
-;CHECK: movi {{v[0-31]+}}.2s, #0xff, lsl #24
+; CHECK-LABEL: movi2s_lsl24:
+; CHECK: movi {{d[0-9]+}}, #0xff000000ff000000
ret <2 x i32> < i32 4278190080, i32 4278190080 >
}
define <4 x i32> @movi4s_lsl0() {
-;CHECK: movi {{v[0-31]+}}.4s, #0xff
+; CHECK-LABEL: movi4s_lsl0:
+; CHECK: movi {{v[0-9]+}}.2d, #0x0000ff000000ff
ret <4 x i32> < i32 255, i32 255, i32 255, i32 255 >
}
define <4 x i32> @movi4s_lsl8() {
-;CHECK: movi {{v[0-31]+}}.4s, #0xff, lsl #8
+; CHECK-LABEL: movi4s_lsl8:
+; CHECK: movi {{v[0-9]+}}.2d, #0x00ff000000ff00
ret <4 x i32> < i32 65280, i32 65280, i32 65280, i32 65280 >
}
define <4 x i32> @movi4s_lsl16() {
-;CHECK: movi {{v[0-31]+}}.4s, #0xff, lsl #16
+; CHECK-LABEL: movi4s_lsl16:
+; CHECK: movi {{v[0-9]+}}.2d, #0xff000000ff0000
ret <4 x i32> < i32 16711680, i32 16711680, i32 16711680, i32 16711680 >
}
define <4 x i32> @movi4s_lsl24() {
-;CHECK: movi {{v[0-31]+}}.4s, #0xff, lsl #24
+; CHECK-LABEL: movi4s_lsl24:
+; CHECK: movi {{v[0-9]+}}.2d, #0xff000000ff000000
ret <4 x i32> < i32 4278190080, i32 4278190080, i32 4278190080, i32 4278190080 >
}
define <4 x i16> @movi4h_lsl0() {
-;CHECK: movi {{v[0-31]+}}.4h, #0xff
+; CHECK-LABEL: movi4h_lsl0:
+; CHECK: movi {{d[0-9]+}}, #0xff00ff00ff00ff
ret <4 x i16> < i16 255, i16 255, i16 255, i16 255 >
}
define <4 x i16> @movi4h_lsl8() {
-;CHECK: movi {{v[0-31]+}}.4h, #0xff, lsl #8
+; CHECK-LABEL: movi4h_lsl8:
+; CHECK: movi d0, #0xff00ff00ff00ff00
ret <4 x i16> < i16 65280, i16 65280, i16 65280, i16 65280 >
}
define <8 x i16> @movi8h_lsl0() {
-;CHECK: movi {{v[0-31]+}}.8h, #0xff
+; CHECK-LABEL: movi8h_lsl0:
+; CHECK: movi v0.2d, #0xff00ff00ff00ff
ret <8 x i16> < i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255 >
}
define <8 x i16> @movi8h_lsl8() {
-;CHECK: movi {{v[0-31]+}}.8h, #0xff, lsl #8
+; CHECK-LABEL: movi8h_lsl8:
+; CHECK: movi v0.2d, #0xff00ff00ff00ff00
ret <8 x i16> < i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280 >
}
define <2 x i32> @mvni2s_lsl0() {
-;CHECK: mvni {{v[0-31]+}}.2s, #0x10
+; CHECK-LABEL: mvni2s_lsl0:
+; CHECK: mvni {{v[0-9]+}}.2s, #{{0x10|16}}
ret <2 x i32> < i32 4294967279, i32 4294967279 >
}
define <2 x i32> @mvni2s_lsl8() {
-;CHECK: mvni {{v[0-31]+}}.2s, #0x10, lsl #8
+; CHECK-LABEL: mvni2s_lsl8:
+; CHECK: mvni {{v[0-9]+}}.2s, #{{0x10|16}}, lsl #8
ret <2 x i32> < i32 4294963199, i32 4294963199 >
}
define <2 x i32> @mvni2s_lsl16() {
-;CHECK: mvni {{v[0-31]+}}.2s, #0x10, lsl #16
+; CHECK-LABEL: mvni2s_lsl16:
+; CHECK: mvni {{v[0-9]+}}.2s, #{{0x10|16}}, lsl #16
ret <2 x i32> < i32 4293918719, i32 4293918719 >
}
define <2 x i32> @mvni2s_lsl24() {
-;CHECK: mvni {{v[0-31]+}}.2s, #0x10, lsl #24
+; CHECK-LABEL: mvni2s_lsl24:
+; CHECK: mvni {{v[0-9]+}}.2s, #{{0x10|16}}, lsl #24
ret <2 x i32> < i32 4026531839, i32 4026531839 >
}
define <4 x i32> @mvni4s_lsl0() {
-;CHECK: mvni {{v[0-31]+}}.4s, #0x10
+; CHECK-LABEL: mvni4s_lsl0:
+; CHECK: mvni {{v[0-9]+}}.4s, #{{0x10|16}}
ret <4 x i32> < i32 4294967279, i32 4294967279, i32 4294967279, i32 4294967279 >
}
define <4 x i32> @mvni4s_lsl8() {
-;CHECK: mvni {{v[0-31]+}}.4s, #0x10, lsl #8
+; CHECK-LABEL: mvni4s_lsl8:
+; CHECK: mvni {{v[0-9]+}}.4s, #{{0x10|16}}, lsl #8
ret <4 x i32> < i32 4294963199, i32 4294963199, i32 4294963199, i32 4294963199 >
}
define <4 x i32> @mvni4s_lsl16() {
-;CHECK: mvni {{v[0-31]+}}.4s, #0x10, lsl #16
+; CHECK-LABEL: mvni4s_lsl16:
+; CHECK: mvni {{v[0-9]+}}.4s, #{{0x10|16}}, lsl #16
ret <4 x i32> < i32 4293918719, i32 4293918719, i32 4293918719, i32 4293918719 >
}
define <4 x i32> @mvni4s_lsl24() {
-;CHECK: mvni {{v[0-31]+}}.4s, #0x10, lsl #24
+; CHECK-LABEL: mvni4s_lsl24:
+; CHECK: mvni {{v[0-9]+}}.4s, #{{0x10|16}}, lsl #24
ret <4 x i32> < i32 4026531839, i32 4026531839, i32 4026531839, i32 4026531839 >
}
define <4 x i16> @mvni4h_lsl0() {
-;CHECK: mvni {{v[0-31]+}}.4h, #0x10
+; CHECK-LABEL: mvni4h_lsl0:
+; CHECK: mvni {{v[0-9]+}}.4h, #{{0x10|16}}
ret <4 x i16> < i16 65519, i16 65519, i16 65519, i16 65519 >
}
define <4 x i16> @mvni4h_lsl8() {
-;CHECK: mvni {{v[0-31]+}}.4h, #0x10, lsl #8
+; CHECK-LABEL: mvni4h_lsl8:
+; CHECK: mvni {{v[0-9]+}}.4h, #{{0x10|16}}, lsl #8
ret <4 x i16> < i16 61439, i16 61439, i16 61439, i16 61439 >
}
define <8 x i16> @mvni8h_lsl0() {
-;CHECK: mvni {{v[0-31]+}}.8h, #0x10
+; CHECK-LABEL: mvni8h_lsl0:
+; CHECK: mvni {{v[0-9]+}}.8h, #{{0x10|16}}
ret <8 x i16> < i16 65519, i16 65519, i16 65519, i16 65519, i16 65519, i16 65519, i16 65519, i16 65519 >
}
define <8 x i16> @mvni8h_lsl8() {
-;CHECK: mvni {{v[0-31]+}}.8h, #0x10, lsl #8
+; CHECK-LABEL: mvni8h_lsl8:
+; CHECK: mvni {{v[0-9]+}}.8h, #{{0x10|16}}, lsl #8
ret <8 x i16> < i16 61439, i16 61439, i16 61439, i16 61439, i16 61439, i16 61439, i16 61439, i16 61439 >
}
define <2 x i32> @movi2s_msl8(<2 x i32> %a) {
-;CHECK: movi {{v[0-31]+}}.2s, #0xff, msl #8
+; CHECK-LABEL: movi2s_msl8:
+; CHECK: movi {{d[0-9]+}}, #0x00ffff0000ffff
ret <2 x i32> < i32 65535, i32 65535 >
}
define <2 x i32> @movi2s_msl16() {
-;CHECK: movi {{v[0-31]+}}.2s, #0xff, msl #16
+; CHECK-LABEL: movi2s_msl16:
+; CHECK: movi d0, #0xffffff00ffffff
ret <2 x i32> < i32 16777215, i32 16777215 >
}
define <4 x i32> @movi4s_msl8() {
-;CHECK: movi {{v[0-31]+}}.4s, #0xff, msl #8
+; CHECK-LABEL: movi4s_msl8:
+; CHECK: movi v0.2d, #0x00ffff0000ffff
ret <4 x i32> < i32 65535, i32 65535, i32 65535, i32 65535 >
}
define <4 x i32> @movi4s_msl16() {
-;CHECK: movi {{v[0-31]+}}.4s, #0xff, msl #16
+; CHECK-LABEL: movi4s_msl16:
+; CHECK: movi v0.2d, #0xffffff00ffffff
ret <4 x i32> < i32 16777215, i32 16777215, i32 16777215, i32 16777215 >
}
define <2 x i32> @mvni2s_msl8() {
-;CHECK: mvni {{v[0-31]+}}.2s, #0x10, msl #8
+; CHECK-LABEL: mvni2s_msl8:
+; CHECK: mvni {{v[0-9]+}}.2s, #{{0x10|16}}, msl #8
ret <2 x i32> < i32 18446744073709547264, i32 18446744073709547264>
}
define <2 x i32> @mvni2s_msl16() {
-;CHECK: mvni {{v[0-31]+}}.2s, #0x10, msl #16
+; CHECK-LABEL: mvni2s_msl16:
+; CHECK: mvni {{v[0-9]+}}.2s, #{{0x10|16}}, msl #16
ret <2 x i32> < i32 18446744073708437504, i32 18446744073708437504>
}
define <4 x i32> @mvni4s_msl8() {
-;CHECK: mvni {{v[0-31]+}}.4s, #0x10, msl #8
+; CHECK-LABEL: mvni4s_msl8:
+; CHECK: mvni {{v[0-9]+}}.4s, #{{0x10|16}}, msl #8
ret <4 x i32> < i32 18446744073709547264, i32 18446744073709547264, i32 18446744073709547264, i32 18446744073709547264>
}
define <4 x i32> @mvni4s_msl16() {
-;CHECK: mvni {{v[0-31]+}}.4s, #0x10, msl #16
+; CHECK-LABEL: mvni4s_msl16:
+; CHECK: mvni {{v[0-9]+}}.4s, #{{0x10|16}}, msl #16
ret <4 x i32> < i32 18446744073708437504, i32 18446744073708437504, i32 18446744073708437504, i32 18446744073708437504>
}
define <2 x i64> @movi2d() {
-;CHECK: movi {{v[0-31]+}}.2d, #0xff0000ff0000ffff
+; CHECK-LABEL: movi2d:
+; CHECK: movi {{v[0-9]+}}.2d, #0xff0000ff0000ffff
ret <2 x i64> < i64 18374687574888349695, i64 18374687574888349695 >
}
define <1 x i64> @movid() {
-;CHECK: movi {{d[0-31]+}}, #0xff0000ff0000ffff
+; CHECK-LABEL: movid:
+; CHECK: movi {{d[0-9]+}}, #0xff0000ff0000ffff
ret <1 x i64> < i64 18374687574888349695 >
}
define <2 x float> @fmov2s() {
-;CHECK: fmov {{v[0-31]+}}.2s, #-12.00000000
+; CHECK-LABEL: fmov2s:
+; CHECK: fmov {{v[0-9]+}}.2s, #{{-12.00000000|-1.200000e\+01}}
ret <2 x float> < float -1.2e1, float -1.2e1>
}
define <4 x float> @fmov4s() {
-;CHECK: fmov {{v[0-31]+}}.4s, #-12.00000000
+; CHECK-LABEL: fmov4s:
+; CHECK: fmov {{v[0-9]+}}.4s, #{{-12.00000000|-1.200000e\+01}}
ret <4 x float> < float -1.2e1, float -1.2e1, float -1.2e1, float -1.2e1>
}
define <2 x double> @fmov2d() {
-;CHECK: fmov {{v[0-31]+}}.2d, #-12.00000000
+; CHECK-LABEL: fmov2d:
+; CHECK: fmov {{v[0-9]+}}.2d, #{{-12.00000000|-1.200000e\+01}}
ret <2 x double> < double -1.2e1, double -1.2e1>
}
define <2 x i32> @movi1d_1() {
-; CHECK: movi d0, #0xffffffff0000
+; CHECK-LABEL: movi1d_1:
+; CHECK: movi d0, #0x{{0*}}ffffffff0000
ret <2 x i32> < i32 -65536, i32 65535>
}
declare <2 x i32> @test_movi1d(<2 x i32>, <2 x i32>)
define <2 x i32> @movi1d() {
-; CHECK: movi d1, #0xffffffff0000
+; CHECK-LABEL: movi1d:
+; CHECK: adrp {{x[0-9]+}}, .{{[A-Z0-9_]+}}
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.{{[A-Z0-9_]+}}]
+; CHECK-NEXT: movi d1, #0x{{0*}}ffffffff0000
%1 = tail call <2 x i32> @test_movi1d(<2 x i32> <i32 -2147483648, i32 2147450880>, <2 x i32> <i32 -65536, i32 65535>)
ret <2 x i32> %1
}
diff --git a/test/CodeGen/AArch64/neon-mul-div.ll b/test/CodeGen/AArch64/neon-mul-div.ll
deleted file mode 100644
index e1be31326638..000000000000
--- a/test/CodeGen/AArch64/neon-mul-div.ll
+++ /dev/null
@@ -1,181 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-
-define <8 x i8> @mul8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: mul {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
- %tmp3 = mul <8 x i8> %A, %B;
- ret <8 x i8> %tmp3
-}
-
-define <16 x i8> @mul16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: mul {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
- %tmp3 = mul <16 x i8> %A, %B;
- ret <16 x i8> %tmp3
-}
-
-define <4 x i16> @mul4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: mul {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
- %tmp3 = mul <4 x i16> %A, %B;
- ret <4 x i16> %tmp3
-}
-
-define <8 x i16> @mul8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: mul {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
- %tmp3 = mul <8 x i16> %A, %B;
- ret <8 x i16> %tmp3
-}
-
-define <2 x i32> @mul2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: mul {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
- %tmp3 = mul <2 x i32> %A, %B;
- ret <2 x i32> %tmp3
-}
-
-define <4 x i32> @mul4x32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: mul {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
- %tmp3 = mul <4 x i32> %A, %B;
- ret <4 x i32> %tmp3
-}
-
- define <2 x float> @mul2xfloat(<2 x float> %A, <2 x float> %B) {
-;CHECK: fmul {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
- %tmp3 = fmul <2 x float> %A, %B;
- ret <2 x float> %tmp3
-}
-
-define <4 x float> @mul4xfloat(<4 x float> %A, <4 x float> %B) {
-;CHECK: fmul {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
- %tmp3 = fmul <4 x float> %A, %B;
- ret <4 x float> %tmp3
-}
-define <2 x double> @mul2xdouble(<2 x double> %A, <2 x double> %B) {
-;CHECK: fmul {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
- %tmp3 = fmul <2 x double> %A, %B;
- ret <2 x double> %tmp3
-}
-
-
- define <2 x float> @div2xfloat(<2 x float> %A, <2 x float> %B) {
-;CHECK: fdiv {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
- %tmp3 = fdiv <2 x float> %A, %B;
- ret <2 x float> %tmp3
-}
-
-define <4 x float> @div4xfloat(<4 x float> %A, <4 x float> %B) {
-;CHECK: fdiv {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
- %tmp3 = fdiv <4 x float> %A, %B;
- ret <4 x float> %tmp3
-}
-define <2 x double> @div2xdouble(<2 x double> %A, <2 x double> %B) {
-;CHECK: fdiv {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
- %tmp3 = fdiv <2 x double> %A, %B;
- ret <2 x double> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8>, <8 x i8>)
-declare <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8>, <16 x i8>)
-
-define <8 x i8> @poly_mulv8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: poly_mulv8i8:
- %prod = call <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: pmul v0.8b, v0.8b, v1.8b
- ret <8 x i8> %prod
-}
-
-define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: poly_mulv16i8:
- %prod = call <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: pmul v0.16b, v0.16b, v1.16b
- ret <16 x i8> %prod
-}
-
-declare <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16>, <8 x i16>)
-declare <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i16> @test_sqdmulh_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sqdmulh_v4i16:
- %prod = call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sqdmulh v0.4h, v0.4h, v1.4h
- ret <4 x i16> %prod
-}
-
-define <8 x i16> @test_sqdmulh_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sqdmulh_v8i16:
- %prod = call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sqdmulh v0.8h, v0.8h, v1.8h
- ret <8 x i16> %prod
-}
-
-define <2 x i32> @test_sqdmulh_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sqdmulh_v2i32:
- %prod = call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sqdmulh v0.2s, v0.2s, v1.2s
- ret <2 x i32> %prod
-}
-
-define <4 x i32> @test_sqdmulh_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sqdmulh_v4i32:
- %prod = call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sqdmulh v0.4s, v0.4s, v1.4s
- ret <4 x i32> %prod
-}
-
-declare <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16>, <4 x i16>)
-declare <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16>, <8 x i16>)
-declare <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32>, <2 x i32>)
-declare <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i16> @test_sqrdmulh_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sqrdmulh_v4i16:
- %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sqrdmulh v0.4h, v0.4h, v1.4h
- ret <4 x i16> %prod
-}
-
-define <8 x i16> @test_sqrdmulh_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sqrdmulh_v8i16:
- %prod = call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sqrdmulh v0.8h, v0.8h, v1.8h
- ret <8 x i16> %prod
-}
-
-define <2 x i32> @test_sqrdmulh_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sqrdmulh_v2i32:
- %prod = call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sqrdmulh v0.2s, v0.2s, v1.2s
- ret <2 x i32> %prod
-}
-
-define <4 x i32> @test_sqrdmulh_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sqrdmulh_v4i32:
- %prod = call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sqrdmulh v0.4s, v0.4s, v1.4s
- ret <4 x i32> %prod
-}
-
-declare <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float>, <2 x float>)
-declare <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double>, <2 x double>)
-
-define <2 x float> @fmulx_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: fmulx v0.2s, v0.2s, v1.2s
- %val = call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %lhs, <2 x float> %rhs)
- ret <2 x float> %val
-}
-
-define <4 x float> @fmulx_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: fmulx v0.4s, v0.4s, v1.4s
- %val = call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %lhs, <4 x float> %rhs)
- ret <4 x float> %val
-}
-
-define <2 x double> @fmulx_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
-; Using registers other than v0, v1 and v2 are possible, but would be odd.
-; CHECK: fmulx v0.2d, v0.2d, v1.2d
- %val = call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %lhs, <2 x double> %rhs)
- ret <2 x double> %val
-}
diff --git a/test/CodeGen/AArch64/neon-or-combine.ll b/test/CodeGen/AArch64/neon-or-combine.ll
new file mode 100644
index 000000000000..260f6935ddef
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-or-combine.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+; Check that the DAGCombiner does not crash with an assertion failure
+; when performing a target specific combine to simplify a 'or' dag node
+; according to the following rule:
+; (or (and B, A), (and C, ~A)) => (VBSL A, B, C)
+; The assertion failure was caused by an invalid comparison between APInt
+; values with different 'BitWidth'.
+
+define <8 x i8> @test1(<8 x i8> %a, <8 x i8> %b) {
+ %tmp1 = and <8 x i8> %a, < i8 -1, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 0, i8 0 >
+ %tmp2 = and <8 x i8> %b, < i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0 >
+ %tmp3 = or <8 x i8> %tmp1, %tmp2
+ ret <8 x i8> %tmp3
+}
+
+; CHECK-LABEL: test1
+; CHECK: ret
+
+define <16 x i8> @test2(<16 x i8> %a, <16 x i8> %b) {
+ %tmp1 = and <16 x i8> %a, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp2 = and <16 x i8> %b, < i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0 >
+ %tmp3 = or <16 x i8> %tmp1, %tmp2
+ ret <16 x i8> %tmp3
+}
+
+; CHECK-LABEL: test2
+; CHECK: ret
+
diff --git a/test/CodeGen/AArch64/neon-perm.ll b/test/CodeGen/AArch64/neon-perm.ll
index fa4d54dc745f..4f8571db7480 100644
--- a/test/CodeGen/AArch64/neon-perm.ll
+++ b/test/CodeGen/AArch64/neon-perm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
%struct.int8x8x2_t = type { [2 x <8 x i8>] }
%struct.int16x4x2_t = type { [2 x <4 x i16>] }
@@ -20,7 +20,7 @@
%struct.poly16x8x2_t = type { [2 x <8 x i16>] }
define <8 x i8> @test_vuzp1_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp1_s8:
+; CHECK-LABEL: test_vuzp1_s8:
; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -28,7 +28,7 @@ entry:
}
define <16 x i8> @test_vuzp1q_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzp1q_s8:
+; CHECK-LABEL: test_vuzp1q_s8:
; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -36,7 +36,7 @@ entry:
}
define <4 x i16> @test_vuzp1_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp1_s16:
+; CHECK-LABEL: test_vuzp1_s16:
; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -44,7 +44,7 @@ entry:
}
define <8 x i16> @test_vuzp1q_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzp1q_s16:
+; CHECK-LABEL: test_vuzp1q_s16:
; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -52,15 +52,15 @@ entry:
}
define <2 x i32> @test_vuzp1_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vuzp1_s32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vuzp1_s32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vuzp1q_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vuzp1q_s32:
+; CHECK-LABEL: test_vuzp1q_s32:
; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -68,15 +68,15 @@ entry:
}
define <2 x i64> @test_vuzp1q_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vuzp1q_s64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vuzp1q_s64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %shuffle.i
}
define <8 x i8> @test_vuzp1_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp1_u8:
+; CHECK-LABEL: test_vuzp1_u8:
; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -84,7 +84,7 @@ entry:
}
define <16 x i8> @test_vuzp1q_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzp1q_u8:
+; CHECK-LABEL: test_vuzp1q_u8:
; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -92,7 +92,7 @@ entry:
}
define <4 x i16> @test_vuzp1_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp1_u16:
+; CHECK-LABEL: test_vuzp1_u16:
; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -100,7 +100,7 @@ entry:
}
define <8 x i16> @test_vuzp1q_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzp1q_u16:
+; CHECK-LABEL: test_vuzp1q_u16:
; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -108,15 +108,15 @@ entry:
}
define <2 x i32> @test_vuzp1_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vuzp1_u32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vuzp1_u32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vuzp1q_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vuzp1q_u32:
+; CHECK-LABEL: test_vuzp1q_u32:
; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -124,23 +124,23 @@ entry:
}
define <2 x i64> @test_vuzp1q_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vuzp1q_u64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vuzp1q_u64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %shuffle.i
}
define <2 x float> @test_vuzp1_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vuzp1_f32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vuzp1_f32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
ret <2 x float> %shuffle.i
}
define <4 x float> @test_vuzp1q_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vuzp1q_f32:
+; CHECK-LABEL: test_vuzp1q_f32:
; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -148,15 +148,15 @@ entry:
}
define <2 x double> @test_vuzp1q_f64(<2 x double> %a, <2 x double> %b) {
-; CHECK: test_vuzp1q_f64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vuzp1q_f64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 2>
ret <2 x double> %shuffle.i
}
define <8 x i8> @test_vuzp1_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp1_p8:
+; CHECK-LABEL: test_vuzp1_p8:
; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -164,7 +164,7 @@ entry:
}
define <16 x i8> @test_vuzp1q_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzp1q_p8:
+; CHECK-LABEL: test_vuzp1q_p8:
; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -172,7 +172,7 @@ entry:
}
define <4 x i16> @test_vuzp1_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp1_p16:
+; CHECK-LABEL: test_vuzp1_p16:
; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -180,7 +180,7 @@ entry:
}
define <8 x i16> @test_vuzp1q_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzp1q_p16:
+; CHECK-LABEL: test_vuzp1q_p16:
; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -188,7 +188,7 @@ entry:
}
define <8 x i8> @test_vuzp2_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp2_s8:
+; CHECK-LABEL: test_vuzp2_s8:
; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -196,7 +196,7 @@ entry:
}
define <16 x i8> @test_vuzp2q_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzp2q_s8:
+; CHECK-LABEL: test_vuzp2q_s8:
; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -204,7 +204,7 @@ entry:
}
define <4 x i16> @test_vuzp2_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp2_s16:
+; CHECK-LABEL: test_vuzp2_s16:
; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -212,7 +212,7 @@ entry:
}
define <8 x i16> @test_vuzp2q_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzp2q_s16:
+; CHECK-LABEL: test_vuzp2q_s16:
; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -220,15 +220,15 @@ entry:
}
define <2 x i32> @test_vuzp2_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vuzp2_s32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vuzp2_s32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vuzp2q_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vuzp2q_s32:
+; CHECK-LABEL: test_vuzp2q_s32:
; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -236,16 +236,15 @@ entry:
}
define <2 x i64> @test_vuzp2q_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vuzp2q_s64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
-; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: test_vuzp2q_s64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle.i
}
define <8 x i8> @test_vuzp2_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp2_u8:
+; CHECK-LABEL: test_vuzp2_u8:
; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -253,7 +252,7 @@ entry:
}
define <16 x i8> @test_vuzp2q_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzp2q_u8:
+; CHECK-LABEL: test_vuzp2q_u8:
; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -261,7 +260,7 @@ entry:
}
define <4 x i16> @test_vuzp2_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp2_u16:
+; CHECK-LABEL: test_vuzp2_u16:
; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -269,7 +268,7 @@ entry:
}
define <8 x i16> @test_vuzp2q_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzp2q_u16:
+; CHECK-LABEL: test_vuzp2q_u16:
; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -277,15 +276,15 @@ entry:
}
define <2 x i32> @test_vuzp2_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vuzp2_u32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vuzp2_u32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vuzp2q_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vuzp2q_u32:
+; CHECK-LABEL: test_vuzp2q_u32:
; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -293,24 +292,23 @@ entry:
}
define <2 x i64> @test_vuzp2q_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vuzp2q_u64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
-; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: test_vuzp2q_u64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle.i
}
define <2 x float> @test_vuzp2_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vuzp2_f32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vuzp2_f32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
ret <2 x float> %shuffle.i
}
define <4 x float> @test_vuzp2q_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vuzp2q_f32:
+; CHECK-LABEL: test_vuzp2q_f32:
; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -318,16 +316,15 @@ entry:
}
define <2 x double> @test_vuzp2q_f64(<2 x double> %a, <2 x double> %b) {
-; CHECK: test_vuzp2q_f64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
-; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-LABEL: test_vuzp2q_f64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 3>
ret <2 x double> %shuffle.i
}
define <8 x i8> @test_vuzp2_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp2_p8:
+; CHECK-LABEL: test_vuzp2_p8:
; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -335,7 +332,7 @@ entry:
}
define <16 x i8> @test_vuzp2q_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzp2q_p8:
+; CHECK-LABEL: test_vuzp2q_p8:
; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -343,7 +340,7 @@ entry:
}
define <4 x i16> @test_vuzp2_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp2_p16:
+; CHECK-LABEL: test_vuzp2_p16:
; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -351,7 +348,7 @@ entry:
}
define <8 x i16> @test_vuzp2q_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzp2q_p16:
+; CHECK-LABEL: test_vuzp2q_p16:
; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -359,7 +356,7 @@ entry:
}
define <8 x i8> @test_vzip1_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip1_s8:
+; CHECK-LABEL: test_vzip1_s8:
; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -367,7 +364,7 @@ entry:
}
define <16 x i8> @test_vzip1q_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzip1q_s8:
+; CHECK-LABEL: test_vzip1q_s8:
; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -375,7 +372,7 @@ entry:
}
define <4 x i16> @test_vzip1_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip1_s16:
+; CHECK-LABEL: test_vzip1_s16:
; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -383,7 +380,7 @@ entry:
}
define <8 x i16> @test_vzip1q_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzip1q_s16:
+; CHECK-LABEL: test_vzip1q_s16:
; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -391,15 +388,15 @@ entry:
}
define <2 x i32> @test_vzip1_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vzip1_s32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vzip1_s32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vzip1q_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vzip1q_s32:
+; CHECK-LABEL: test_vzip1q_s32:
; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -407,15 +404,15 @@ entry:
}
define <2 x i64> @test_vzip1q_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vzip1q_s64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vzip1q_s64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %shuffle.i
}
define <8 x i8> @test_vzip1_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip1_u8:
+; CHECK-LABEL: test_vzip1_u8:
; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -423,7 +420,7 @@ entry:
}
define <16 x i8> @test_vzip1q_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzip1q_u8:
+; CHECK-LABEL: test_vzip1q_u8:
; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -431,7 +428,7 @@ entry:
}
define <4 x i16> @test_vzip1_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip1_u16:
+; CHECK-LABEL: test_vzip1_u16:
; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -439,7 +436,7 @@ entry:
}
define <8 x i16> @test_vzip1q_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzip1q_u16:
+; CHECK-LABEL: test_vzip1q_u16:
; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -447,15 +444,15 @@ entry:
}
define <2 x i32> @test_vzip1_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vzip1_u32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vzip1_u32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vzip1q_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vzip1q_u32:
+; CHECK-LABEL: test_vzip1q_u32:
; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -463,23 +460,23 @@ entry:
}
define <2 x i64> @test_vzip1q_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vzip1q_u64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vzip1q_u64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %shuffle.i
}
define <2 x float> @test_vzip1_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vzip1_f32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vzip1_f32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
ret <2 x float> %shuffle.i
}
define <4 x float> @test_vzip1q_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vzip1q_f32:
+; CHECK-LABEL: test_vzip1q_f32:
; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -487,15 +484,15 @@ entry:
}
define <2 x double> @test_vzip1q_f64(<2 x double> %a, <2 x double> %b) {
-; CHECK: test_vzip1q_f64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vzip1q_f64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 2>
ret <2 x double> %shuffle.i
}
define <8 x i8> @test_vzip1_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip1_p8:
+; CHECK-LABEL: test_vzip1_p8:
; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -503,7 +500,7 @@ entry:
}
define <16 x i8> @test_vzip1q_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzip1q_p8:
+; CHECK-LABEL: test_vzip1q_p8:
; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -511,7 +508,7 @@ entry:
}
define <4 x i16> @test_vzip1_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip1_p16:
+; CHECK-LABEL: test_vzip1_p16:
; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -519,7 +516,7 @@ entry:
}
define <8 x i16> @test_vzip1q_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzip1q_p16:
+; CHECK-LABEL: test_vzip1q_p16:
; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -527,7 +524,7 @@ entry:
}
define <8 x i8> @test_vzip2_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip2_s8:
+; CHECK-LABEL: test_vzip2_s8:
; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -535,7 +532,7 @@ entry:
}
define <16 x i8> @test_vzip2q_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzip2q_s8:
+; CHECK-LABEL: test_vzip2q_s8:
; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -543,7 +540,7 @@ entry:
}
define <4 x i16> @test_vzip2_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip2_s16:
+; CHECK-LABEL: test_vzip2_s16:
; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -551,7 +548,7 @@ entry:
}
define <8 x i16> @test_vzip2q_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzip2q_s16:
+; CHECK-LABEL: test_vzip2q_s16:
; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -559,15 +556,15 @@ entry:
}
define <2 x i32> @test_vzip2_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vzip2_s32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vzip2_s32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vzip2q_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vzip2q_s32:
+; CHECK-LABEL: test_vzip2q_s32:
; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -575,15 +572,15 @@ entry:
}
define <2 x i64> @test_vzip2q_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vzip2q_s64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_vzip2q_s64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle.i
}
define <8 x i8> @test_vzip2_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip2_u8:
+; CHECK-LABEL: test_vzip2_u8:
; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -591,7 +588,7 @@ entry:
}
define <16 x i8> @test_vzip2q_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzip2q_u8:
+; CHECK-LABEL: test_vzip2q_u8:
; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -599,7 +596,7 @@ entry:
}
define <4 x i16> @test_vzip2_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip2_u16:
+; CHECK-LABEL: test_vzip2_u16:
; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -607,7 +604,7 @@ entry:
}
define <8 x i16> @test_vzip2q_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzip2q_u16:
+; CHECK-LABEL: test_vzip2q_u16:
; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -615,15 +612,15 @@ entry:
}
define <2 x i32> @test_vzip2_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vzip2_u32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vzip2_u32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vzip2q_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vzip2q_u32:
+; CHECK-LABEL: test_vzip2q_u32:
; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -631,23 +628,23 @@ entry:
}
define <2 x i64> @test_vzip2q_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vzip2q_u64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_vzip2q_u64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle.i
}
define <2 x float> @test_vzip2_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vzip2_f32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vzip2_f32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
ret <2 x float> %shuffle.i
}
define <4 x float> @test_vzip2q_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vzip2q_f32:
+; CHECK-LABEL: test_vzip2q_f32:
; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -655,15 +652,15 @@ entry:
}
define <2 x double> @test_vzip2q_f64(<2 x double> %a, <2 x double> %b) {
-; CHECK: test_vzip2q_f64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_vzip2q_f64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 3>
ret <2 x double> %shuffle.i
}
define <8 x i8> @test_vzip2_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip2_p8:
+; CHECK-LABEL: test_vzip2_p8:
; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -671,7 +668,7 @@ entry:
}
define <16 x i8> @test_vzip2q_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzip2q_p8:
+; CHECK-LABEL: test_vzip2q_p8:
; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -679,7 +676,7 @@ entry:
}
define <4 x i16> @test_vzip2_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip2_p16:
+; CHECK-LABEL: test_vzip2_p16:
; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -687,7 +684,7 @@ entry:
}
define <8 x i16> @test_vzip2q_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzip2q_p16:
+; CHECK-LABEL: test_vzip2q_p16:
; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -695,7 +692,7 @@ entry:
}
define <8 x i8> @test_vtrn1_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn1_s8:
+; CHECK-LABEL: test_vtrn1_s8:
; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -703,7 +700,7 @@ entry:
}
define <16 x i8> @test_vtrn1q_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrn1q_s8:
+; CHECK-LABEL: test_vtrn1q_s8:
; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
@@ -711,7 +708,7 @@ entry:
}
define <4 x i16> @test_vtrn1_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn1_s16:
+; CHECK-LABEL: test_vtrn1_s16:
; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -719,7 +716,7 @@ entry:
}
define <8 x i16> @test_vtrn1q_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrn1q_s16:
+; CHECK-LABEL: test_vtrn1q_s16:
; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -727,15 +724,15 @@ entry:
}
define <2 x i32> @test_vtrn1_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vtrn1_s32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vtrn1_s32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vtrn1q_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vtrn1q_s32:
+; CHECK-LABEL: test_vtrn1q_s32:
; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -743,15 +740,15 @@ entry:
}
define <2 x i64> @test_vtrn1q_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vtrn1q_s64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vtrn1q_s64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %shuffle.i
}
define <8 x i8> @test_vtrn1_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn1_u8:
+; CHECK-LABEL: test_vtrn1_u8:
; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -759,7 +756,7 @@ entry:
}
define <16 x i8> @test_vtrn1q_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrn1q_u8:
+; CHECK-LABEL: test_vtrn1q_u8:
; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
@@ -767,7 +764,7 @@ entry:
}
define <4 x i16> @test_vtrn1_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn1_u16:
+; CHECK-LABEL: test_vtrn1_u16:
; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -775,7 +772,7 @@ entry:
}
define <8 x i16> @test_vtrn1q_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrn1q_u16:
+; CHECK-LABEL: test_vtrn1q_u16:
; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -783,15 +780,15 @@ entry:
}
define <2 x i32> @test_vtrn1_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vtrn1_u32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vtrn1_u32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vtrn1q_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vtrn1q_u32:
+; CHECK-LABEL: test_vtrn1q_u32:
; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -799,23 +796,23 @@ entry:
}
define <2 x i64> @test_vtrn1q_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vtrn1q_u64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vtrn1q_u64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %shuffle.i
}
define <2 x float> @test_vtrn1_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vtrn1_f32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK-LABEL: test_vtrn1_f32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
ret <2 x float> %shuffle.i
}
define <4 x float> @test_vtrn1q_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vtrn1q_f32:
+; CHECK-LABEL: test_vtrn1q_f32:
; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -823,15 +820,15 @@ entry:
}
define <2 x double> @test_vtrn1q_f64(<2 x double> %a, <2 x double> %b) {
-; CHECK: test_vtrn1q_f64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK-LABEL: test_vtrn1q_f64:
+; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 2>
ret <2 x double> %shuffle.i
}
define <8 x i8> @test_vtrn1_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn1_p8:
+; CHECK-LABEL: test_vtrn1_p8:
; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -839,7 +836,7 @@ entry:
}
define <16 x i8> @test_vtrn1q_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrn1q_p8:
+; CHECK-LABEL: test_vtrn1q_p8:
; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
@@ -847,7 +844,7 @@ entry:
}
define <4 x i16> @test_vtrn1_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn1_p16:
+; CHECK-LABEL: test_vtrn1_p16:
; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -855,7 +852,7 @@ entry:
}
define <8 x i16> @test_vtrn1q_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrn1q_p16:
+; CHECK-LABEL: test_vtrn1q_p16:
; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -863,7 +860,7 @@ entry:
}
define <8 x i8> @test_vtrn2_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn2_s8:
+; CHECK-LABEL: test_vtrn2_s8:
; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -871,7 +868,7 @@ entry:
}
define <16 x i8> @test_vtrn2q_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrn2q_s8:
+; CHECK-LABEL: test_vtrn2q_s8:
; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
@@ -879,7 +876,7 @@ entry:
}
define <4 x i16> @test_vtrn2_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn2_s16:
+; CHECK-LABEL: test_vtrn2_s16:
; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -887,7 +884,7 @@ entry:
}
define <8 x i16> @test_vtrn2q_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrn2q_s16:
+; CHECK-LABEL: test_vtrn2q_s16:
; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -895,15 +892,15 @@ entry:
}
define <2 x i32> @test_vtrn2_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vtrn2_s32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vtrn2_s32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vtrn2q_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vtrn2q_s32:
+; CHECK-LABEL: test_vtrn2q_s32:
; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -911,15 +908,15 @@ entry:
}
define <2 x i64> @test_vtrn2q_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vtrn2q_s64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_vtrn2q_s64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle.i
}
define <8 x i8> @test_vtrn2_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn2_u8:
+; CHECK-LABEL: test_vtrn2_u8:
; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -927,7 +924,7 @@ entry:
}
define <16 x i8> @test_vtrn2q_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrn2q_u8:
+; CHECK-LABEL: test_vtrn2q_u8:
; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
@@ -935,7 +932,7 @@ entry:
}
define <4 x i16> @test_vtrn2_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn2_u16:
+; CHECK-LABEL: test_vtrn2_u16:
; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -943,7 +940,7 @@ entry:
}
define <8 x i16> @test_vtrn2q_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrn2q_u16:
+; CHECK-LABEL: test_vtrn2q_u16:
; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -951,15 +948,15 @@ entry:
}
define <2 x i32> @test_vtrn2_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vtrn2_u32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vtrn2_u32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i32> %shuffle.i
}
define <4 x i32> @test_vtrn2q_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vtrn2q_u32:
+; CHECK-LABEL: test_vtrn2q_u32:
; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -967,23 +964,23 @@ entry:
}
define <2 x i64> @test_vtrn2q_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vtrn2q_u64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_vtrn2q_u64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle.i
}
define <2 x float> @test_vtrn2_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vtrn2_f32:
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vtrn2_f32:
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
ret <2 x float> %shuffle.i
}
define <4 x float> @test_vtrn2q_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vtrn2q_f32:
+; CHECK-LABEL: test_vtrn2q_f32:
; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -991,15 +988,15 @@ entry:
}
define <2 x double> @test_vtrn2q_f64(<2 x double> %a, <2 x double> %b) {
-; CHECK: test_vtrn2q_f64:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_vtrn2q_f64:
+; CHECK: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 3>
ret <2 x double> %shuffle.i
}
define <8 x i8> @test_vtrn2_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn2_p8:
+; CHECK-LABEL: test_vtrn2_p8:
; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
%shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
@@ -1007,7 +1004,7 @@ entry:
}
define <16 x i8> @test_vtrn2q_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrn2q_p8:
+; CHECK-LABEL: test_vtrn2q_p8:
; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
%shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
@@ -1015,7 +1012,7 @@ entry:
}
define <4 x i16> @test_vtrn2_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn2_p16:
+; CHECK-LABEL: test_vtrn2_p16:
; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
%shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -1023,15 +1020,1456 @@ entry:
}
define <8 x i16> @test_vtrn2q_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrn2q_p16:
+; CHECK-LABEL: test_vtrn2q_p16:
; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
ret <8 x i16> %shuffle.i
}
+define <8 x i8> @test_same_vuzp1_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp1_s8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vuzp1q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp1q_s8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vuzp1_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp1_s16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vuzp1q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp1q_s16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vuzp1q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vuzp1q_s32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_same_vuzp1_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp1_u8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vuzp1q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp1q_u8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vuzp1_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp1_u16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vuzp1q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp1q_u16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vuzp1q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vuzp1q_u32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_same_vuzp1q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_same_vuzp1q_f32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_same_vuzp1_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp1_p8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vuzp1q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp1q_p8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vuzp1_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp1_p16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vuzp1q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp1q_p16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_same_vuzp2_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp2_s8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vuzp2q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp2q_s8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vuzp2_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp2_s16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vuzp2q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp2q_s16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vuzp2q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vuzp2q_s32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_same_vuzp2_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp2_u8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vuzp2q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp2q_u8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vuzp2_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp2_u16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vuzp2q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp2q_u16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vuzp2q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vuzp2q_u32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_same_vuzp2q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_same_vuzp2q_f32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_same_vuzp2_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp2_p8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vuzp2q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vuzp2q_p8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vuzp2_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp2_p16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vuzp2q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vuzp2q_p16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_same_vzip1_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vzip1_s8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vzip1q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vzip1q_s8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vzip1_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vzip1_s16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vzip1q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vzip1q_s16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vzip1q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vzip1q_s32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_same_vzip1_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vzip1_u8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vzip1q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vzip1q_u8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vzip1_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vzip1_u16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vzip1q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vzip1q_u16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vzip1q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vzip1q_u32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_same_vzip1q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_same_vzip1q_f32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_same_vzip1_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vzip1_p8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vzip1q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vzip1q_p8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vzip1_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vzip1_p16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vzip1q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vzip1q_p16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_same_vzip2_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vzip2_s8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vzip2q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vzip2q_s8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vzip2_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vzip2_s16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vzip2q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vzip2q_s16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vzip2q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vzip2q_s32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_same_vzip2_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vzip2_u8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vzip2q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vzip2q_u8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vzip2_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vzip2_u16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vzip2q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vzip2q_u16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vzip2q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vzip2q_u32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_same_vzip2q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_same_vzip2q_f32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_same_vzip2_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vzip2_p8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vzip2q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vzip2q_p8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vzip2_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vzip2_p16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vzip2q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vzip2q_p16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_same_vtrn1_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn1_s8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vtrn1q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn1q_s8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vtrn1_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn1_s16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vtrn1q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn1q_s16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vtrn1q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vtrn1q_s32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_same_vtrn1_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn1_u8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vtrn1q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn1q_u8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vtrn1_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn1_u16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vtrn1q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn1q_u16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vtrn1q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vtrn1q_u32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_same_vtrn1q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_same_vtrn1q_f32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_same_vtrn1_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn1_p8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vtrn1q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn1q_p8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vtrn1_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn1_p16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vtrn1q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn1q_p16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_same_vtrn2_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn2_s8:
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vtrn2q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn2q_s8:
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vtrn2_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn2_s16:
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vtrn2q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn2q_s16:
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vtrn2q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vtrn2q_s32:
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_same_vtrn2_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn2_u8:
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vtrn2q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn2q_u8:
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vtrn2_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn2_u16:
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vtrn2q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn2q_u16:
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_same_vtrn2q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_same_vtrn2q_u32:
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %a, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_same_vtrn2q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_same_vtrn2q_f32:
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_same_vtrn2_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn2_p8:
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %a, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_same_vtrn2q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_same_vtrn2q_p8:
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %a, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_same_vtrn2_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn2_p16:
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %a, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_same_vtrn2q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_same_vtrn2q_p16:
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+
+define <8 x i8> @test_undef_vuzp1_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp1_s8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vuzp1q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_s8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vuzp1_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp1_s16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vuzp1q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_s16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vuzp1q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_s32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vuzp1_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp1_u8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vuzp1q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_u8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vuzp1_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp1_u16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vuzp1q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_u16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vuzp1q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_u32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_undef_vuzp1q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_f32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vuzp1_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp1_p8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vuzp1q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_p8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vuzp1_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp1_p16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vuzp1q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp1q_p16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vuzp2_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp2_s8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vuzp2q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_s8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vuzp2_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp2_s16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vuzp2q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_s16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vuzp2q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_s32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vuzp2_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp2_u8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vuzp2q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_u8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vuzp2_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp2_u16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vuzp2q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_u16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vuzp2q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_u32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_undef_vuzp2q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_f32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vuzp2_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp2_p8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vuzp2q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_p8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vuzp2_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp2_p16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vuzp2q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vuzp2q_p16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vzip1_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip1_s8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vzip1q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip1q_s8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vzip1_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip1_s16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vzip1q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip1q_s16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vzip1q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vzip1q_s32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vzip1_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip1_u8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vzip1q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip1q_u8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vzip1_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip1_u16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vzip1q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip1q_u16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vzip1q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vzip1q_u32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_undef_vzip1q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_undef_vzip1q_f32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vzip1_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip1_p8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vzip1q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip1q_p8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vzip1_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip1_p16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vzip1q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip1q_p16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vzip2_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip2_s8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vzip2q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip2q_s8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vzip2_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip2_s16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vzip2q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip2q_s16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vzip2q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vzip2q_s32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vzip2_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip2_u8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vzip2q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip2q_u8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vzip2_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip2_u16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vzip2q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip2q_u16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vzip2q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vzip2q_u32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_undef_vzip2q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_undef_vzip2q_f32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vzip2_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip2_p8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vzip2q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vzip2q_p8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vzip2_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip2_p16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vzip2q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vzip2q_p16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vtrn1_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn1_s8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vtrn1q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_s8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vtrn1_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn1_s16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vtrn1q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_s16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vtrn1q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_s32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vtrn1_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn1_u8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vtrn1q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_u8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vtrn1_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn1_u16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vtrn1q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_u16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vtrn1q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_u32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_undef_vtrn1q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_f32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vtrn1_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn1_p8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vtrn1q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_p8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vtrn1_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn1_p16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vtrn1q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn1q_p16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vtrn2_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn2_s8:
+; CHECK: rev16 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vtrn2q_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_s8:
+; CHECK: rev16 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vtrn2_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn2_s16:
+; CHECK: rev32 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vtrn2q_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_s16:
+; CHECK: rev32 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vtrn2q_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_s32:
+; CHECK: rev64 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vtrn2_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn2_u8:
+; CHECK: rev16 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vtrn2q_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_u8:
+; CHECK: rev16 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vtrn2_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn2_u16:
+; CHECK: rev32 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vtrn2q_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_u16:
+; CHECK: rev32 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_undef_vtrn2q_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_u32:
+; CHECK: rev64 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_undef_vtrn2q_f32(<4 x float> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_f32:
+; CHECK: rev64 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <8 x i8> @test_undef_vtrn2_p8(<8 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn2_p8:
+; CHECK: rev16 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_undef_vtrn2q_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_p8:
+; CHECK: rev16 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_undef_vtrn2_p16(<4 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn2_p16:
+; CHECK: rev32 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_undef_vtrn2q_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_undef_vtrn2q_p16:
+; CHECK: rev32 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
define %struct.int8x8x2_t @test_vuzp_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp_s8:
+; CHECK-LABEL: test_vuzp_s8:
; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1043,7 +2481,7 @@ entry:
}
define %struct.int16x4x2_t @test_vuzp_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp_s16:
+; CHECK-LABEL: test_vuzp_s16:
; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1055,9 +2493,9 @@ entry:
}
define %struct.int32x2x2_t @test_vuzp_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vuzp_s32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vuzp_s32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vuzp.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
%vuzp1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
@@ -1067,7 +2505,7 @@ entry:
}
define %struct.uint8x8x2_t @test_vuzp_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp_u8:
+; CHECK-LABEL: test_vuzp_u8:
; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1079,7 +2517,7 @@ entry:
}
define %struct.uint16x4x2_t @test_vuzp_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp_u16:
+; CHECK-LABEL: test_vuzp_u16:
; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1091,9 +2529,9 @@ entry:
}
define %struct.uint32x2x2_t @test_vuzp_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vuzp_u32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vuzp_u32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vuzp.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
%vuzp1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
@@ -1103,9 +2541,9 @@ entry:
}
define %struct.float32x2x2_t @test_vuzp_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vuzp_f32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vuzp_f32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vuzp.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
%vuzp1.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
@@ -1115,7 +2553,7 @@ entry:
}
define %struct.poly8x8x2_t @test_vuzp_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vuzp_p8:
+; CHECK-LABEL: test_vuzp_p8:
; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1127,7 +2565,7 @@ entry:
}
define %struct.poly16x4x2_t @test_vuzp_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vuzp_p16:
+; CHECK-LABEL: test_vuzp_p16:
; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1139,7 +2577,7 @@ entry:
}
define %struct.int8x16x2_t @test_vuzpq_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzpq_s8:
+; CHECK-LABEL: test_vuzpq_s8:
; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1151,7 +2589,7 @@ entry:
}
define %struct.int16x8x2_t @test_vuzpq_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzpq_s16:
+; CHECK-LABEL: test_vuzpq_s16:
; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1163,7 +2601,7 @@ entry:
}
define %struct.int32x4x2_t @test_vuzpq_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vuzpq_s32:
+; CHECK-LABEL: test_vuzpq_s32:
; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1175,7 +2613,7 @@ entry:
}
define %struct.uint8x16x2_t @test_vuzpq_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzpq_u8:
+; CHECK-LABEL: test_vuzpq_u8:
; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1187,7 +2625,7 @@ entry:
}
define %struct.uint16x8x2_t @test_vuzpq_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzpq_u16:
+; CHECK-LABEL: test_vuzpq_u16:
; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1199,7 +2637,7 @@ entry:
}
define %struct.uint32x4x2_t @test_vuzpq_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vuzpq_u32:
+; CHECK-LABEL: test_vuzpq_u32:
; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1211,7 +2649,7 @@ entry:
}
define %struct.float32x4x2_t @test_vuzpq_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vuzpq_f32:
+; CHECK-LABEL: test_vuzpq_f32:
; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1223,7 +2661,7 @@ entry:
}
define %struct.poly8x16x2_t @test_vuzpq_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vuzpq_p8:
+; CHECK-LABEL: test_vuzpq_p8:
; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1235,7 +2673,7 @@ entry:
}
define %struct.poly16x8x2_t @test_vuzpq_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vuzpq_p16:
+; CHECK-LABEL: test_vuzpq_p16:
; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1247,7 +2685,7 @@ entry:
}
define %struct.int8x8x2_t @test_vzip_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip_s8:
+; CHECK-LABEL: test_vzip_s8:
; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1259,7 +2697,7 @@ entry:
}
define %struct.int16x4x2_t @test_vzip_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip_s16:
+; CHECK-LABEL: test_vzip_s16:
; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1271,9 +2709,9 @@ entry:
}
define %struct.int32x2x2_t @test_vzip_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vzip_s32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vzip_s32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vzip.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
%vzip1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
@@ -1283,7 +2721,7 @@ entry:
}
define %struct.uint8x8x2_t @test_vzip_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip_u8:
+; CHECK-LABEL: test_vzip_u8:
; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1295,7 +2733,7 @@ entry:
}
define %struct.uint16x4x2_t @test_vzip_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip_u16:
+; CHECK-LABEL: test_vzip_u16:
; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1307,9 +2745,9 @@ entry:
}
define %struct.uint32x2x2_t @test_vzip_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vzip_u32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vzip_u32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vzip.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
%vzip1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
@@ -1319,9 +2757,9 @@ entry:
}
define %struct.float32x2x2_t @test_vzip_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vzip_f32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vzip_f32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vzip.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
%vzip1.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
@@ -1331,7 +2769,7 @@ entry:
}
define %struct.poly8x8x2_t @test_vzip_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vzip_p8:
+; CHECK-LABEL: test_vzip_p8:
; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1343,7 +2781,7 @@ entry:
}
define %struct.poly16x4x2_t @test_vzip_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vzip_p16:
+; CHECK-LABEL: test_vzip_p16:
; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1355,7 +2793,7 @@ entry:
}
define %struct.int8x16x2_t @test_vzipq_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzipq_s8:
+; CHECK-LABEL: test_vzipq_s8:
; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1367,7 +2805,7 @@ entry:
}
define %struct.int16x8x2_t @test_vzipq_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzipq_s16:
+; CHECK-LABEL: test_vzipq_s16:
; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1379,7 +2817,7 @@ entry:
}
define %struct.int32x4x2_t @test_vzipq_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vzipq_s32:
+; CHECK-LABEL: test_vzipq_s32:
; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1391,7 +2829,7 @@ entry:
}
define %struct.uint8x16x2_t @test_vzipq_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzipq_u8:
+; CHECK-LABEL: test_vzipq_u8:
; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1403,7 +2841,7 @@ entry:
}
define %struct.uint16x8x2_t @test_vzipq_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzipq_u16:
+; CHECK-LABEL: test_vzipq_u16:
; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1415,7 +2853,7 @@ entry:
}
define %struct.uint32x4x2_t @test_vzipq_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vzipq_u32:
+; CHECK-LABEL: test_vzipq_u32:
; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1427,7 +2865,7 @@ entry:
}
define %struct.float32x4x2_t @test_vzipq_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vzipq_f32:
+; CHECK-LABEL: test_vzipq_f32:
; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1439,7 +2877,7 @@ entry:
}
define %struct.poly8x16x2_t @test_vzipq_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vzipq_p8:
+; CHECK-LABEL: test_vzipq_p8:
; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1451,7 +2889,7 @@ entry:
}
define %struct.poly16x8x2_t @test_vzipq_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vzipq_p16:
+; CHECK-LABEL: test_vzipq_p16:
; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1463,7 +2901,7 @@ entry:
}
define %struct.int8x8x2_t @test_vtrn_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn_s8:
+; CHECK-LABEL: test_vtrn_s8:
; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1475,7 +2913,7 @@ entry:
}
define %struct.int16x4x2_t @test_vtrn_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn_s16:
+; CHECK-LABEL: test_vtrn_s16:
; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1487,9 +2925,9 @@ entry:
}
define %struct.int32x2x2_t @test_vtrn_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vtrn_s32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vtrn_s32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vtrn.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
%vtrn1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
@@ -1499,7 +2937,7 @@ entry:
}
define %struct.uint8x8x2_t @test_vtrn_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn_u8:
+; CHECK-LABEL: test_vtrn_u8:
; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1511,7 +2949,7 @@ entry:
}
define %struct.uint16x4x2_t @test_vtrn_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn_u16:
+; CHECK-LABEL: test_vtrn_u16:
; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1523,9 +2961,9 @@ entry:
}
define %struct.uint32x2x2_t @test_vtrn_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vtrn_u32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vtrn_u32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vtrn.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
%vtrn1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
@@ -1535,9 +2973,9 @@ entry:
}
define %struct.float32x2x2_t @test_vtrn_f32(<2 x float> %a, <2 x float> %b) {
-; CHECK: test_vtrn_f32:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
-; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_vtrn_f32:
+; CHECK: zip1 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: zip2 {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
entry:
%vtrn.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
%vtrn1.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
@@ -1547,7 +2985,7 @@ entry:
}
define %struct.poly8x8x2_t @test_vtrn_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtrn_p8:
+; CHECK-LABEL: test_vtrn_p8:
; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
entry:
@@ -1559,7 +2997,7 @@ entry:
}
define %struct.poly16x4x2_t @test_vtrn_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vtrn_p16:
+; CHECK-LABEL: test_vtrn_p16:
; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
entry:
@@ -1571,7 +3009,7 @@ entry:
}
define %struct.int8x16x2_t @test_vtrnq_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrnq_s8:
+; CHECK-LABEL: test_vtrnq_s8:
; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1583,7 +3021,7 @@ entry:
}
define %struct.int16x8x2_t @test_vtrnq_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrnq_s16:
+; CHECK-LABEL: test_vtrnq_s16:
; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1595,7 +3033,7 @@ entry:
}
define %struct.int32x4x2_t @test_vtrnq_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vtrnq_s32:
+; CHECK-LABEL: test_vtrnq_s32:
; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1607,7 +3045,7 @@ entry:
}
define %struct.uint8x16x2_t @test_vtrnq_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrnq_u8:
+; CHECK-LABEL: test_vtrnq_u8:
; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1619,7 +3057,7 @@ entry:
}
define %struct.uint16x8x2_t @test_vtrnq_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrnq_u16:
+; CHECK-LABEL: test_vtrnq_u16:
; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1631,7 +3069,7 @@ entry:
}
define %struct.uint32x4x2_t @test_vtrnq_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vtrnq_u32:
+; CHECK-LABEL: test_vtrnq_u32:
; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1643,7 +3081,7 @@ entry:
}
define %struct.float32x4x2_t @test_vtrnq_f32(<4 x float> %a, <4 x float> %b) {
-; CHECK: test_vtrnq_f32:
+; CHECK-LABEL: test_vtrnq_f32:
; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
@@ -1655,7 +3093,7 @@ entry:
}
define %struct.poly8x16x2_t @test_vtrnq_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vtrnq_p8:
+; CHECK-LABEL: test_vtrnq_p8:
; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
entry:
@@ -1667,7 +3105,7 @@ entry:
}
define %struct.poly16x8x2_t @test_vtrnq_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vtrnq_p16:
+; CHECK-LABEL: test_vtrnq_p16:
; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
@@ -1679,7 +3117,7 @@ entry:
}
define %struct.uint8x8x2_t @test_uzp(<16 x i8> %y) {
-; CHECK: test_uzp:
+; CHECK-LABEL: test_uzp:
%vuzp.i = shufflevector <16 x i8> %y, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
%vuzp1.i = shufflevector <16 x i8> %y, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -1687,7 +3125,4 @@ define %struct.uint8x8x2_t @test_uzp(<16 x i8> %y) {
%.fca.0.1.insert = insertvalue %struct.uint8x8x2_t %.fca.0.0.insert, <8 x i8> %vuzp1.i, 0, 1
ret %struct.uint8x8x2_t %.fca.0.1.insert
-; CHECK: dup {{d[0-9]+}}, {{v[0-9]+}}.d[1]
-; CHECK-NEXT: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-; CHECK-NEXT: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
}
diff --git a/test/CodeGen/AArch64/neon-rounding-halving-add.ll b/test/CodeGen/AArch64/neon-rounding-halving-add.ll
deleted file mode 100644
index 009da3b51a83..000000000000
--- a/test/CodeGen/AArch64/neon-rounding-halving-add.ll
+++ /dev/null
@@ -1,105 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_urhadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_urhadd_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: urhadd v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_srhadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_srhadd_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: srhadd v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_urhadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_urhadd_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: urhadd v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_srhadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_srhadd_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: srhadd v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_urhadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_urhadd_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: urhadd v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_srhadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_srhadd_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: srhadd v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_urhadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_urhadd_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: urhadd v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_srhadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_srhadd_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: srhadd v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vrhaddu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vrhadds.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_urhadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_urhadd_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vrhaddu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: urhadd v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_srhadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_srhadd_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vrhadds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: srhadd v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vrhaddu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vrhadds.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_urhadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_urhadd_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vrhaddu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: urhadd v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_srhadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_srhadd_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vrhadds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: srhadd v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-
diff --git a/test/CodeGen/AArch64/neon-rounding-shift.ll b/test/CodeGen/AArch64/neon-rounding-shift.ll
deleted file mode 100644
index 5b4ec2862c79..000000000000
--- a/test/CodeGen/AArch64/neon-rounding-shift.ll
+++ /dev/null
@@ -1,121 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_urshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_urshl_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: urshl v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_srshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_srshl_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: srshl v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_urshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_urshl_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: urshl v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_srshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_srshl_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: srshl v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_urshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_urshl_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: urshl v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_srshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_srshl_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: srshl v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_urshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_urshl_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: urshl v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_srshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_srshl_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: srshl v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_urshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_urshl_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: urshl v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_srshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_srshl_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: srshl v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_urshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_urshl_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: urshl v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_srshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_srshl_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: srshl v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>)
-
-define <2 x i64> @test_urshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_urshl_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: urshl v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
-define <2 x i64> @test_srshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_srshl_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: srshl v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
diff --git a/test/CodeGen/AArch64/neon-saturating-add-sub.ll b/test/CodeGen/AArch64/neon-saturating-add-sub.ll
deleted file mode 100644
index fc60d900e4db..000000000000
--- a/test/CodeGen/AArch64/neon-saturating-add-sub.ll
+++ /dev/null
@@ -1,241 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_uqadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uqadd_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: uqadd v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_sqadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_sqadd_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: sqadd v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_uqadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uqadd_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: uqadd v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_sqadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_sqadd_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: sqadd v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_uqadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uqadd_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: uqadd v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_sqadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sqadd_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sqadd v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_uqadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uqadd_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: uqadd v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_sqadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sqadd_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sqadd v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_uqadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uqadd_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: uqadd v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_sqadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sqadd_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sqadd v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_uqadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uqadd_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: uqadd v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_sqadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sqadd_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sqadd v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-
-
-declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
-
-define <2 x i64> @test_uqadd_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_uqadd_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: uqadd v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
-define <2 x i64> @test_sqadd_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_sqadd_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: sqadd v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
-declare <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_uqsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uqsub_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: uqsub v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_sqsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_sqsub_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: sqsub v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_uqsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uqsub_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: uqsub v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_sqsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_sqsub_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: sqsub v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_uqsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uqsub_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: uqsub v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_sqsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sqsub_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sqsub v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_uqsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uqsub_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: uqsub v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_sqsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sqsub_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sqsub v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_uqsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uqsub_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: uqsub v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_sqsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sqsub_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sqsub v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_uqsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uqsub_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: uqsub v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_sqsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sqsub_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sqsub v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
-
-define <2 x i64> @test_uqsub_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_uqsub_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: uqsub v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
-define <2 x i64> @test_sqsub_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_sqsub_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: sqsub v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
diff --git a/test/CodeGen/AArch64/neon-saturating-rounding-shift.ll b/test/CodeGen/AArch64/neon-saturating-rounding-shift.ll
deleted file mode 100644
index d89262c2abaa..000000000000
--- a/test/CodeGen/AArch64/neon-saturating-rounding-shift.ll
+++ /dev/null
@@ -1,121 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_uqrshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uqrshl_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: uqrshl v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_sqrshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_sqrshl_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: sqrshl v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_uqrshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uqrshl_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: uqrshl v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_sqrshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_sqrshl_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: sqrshl v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_uqrshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uqrshl_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: uqrshl v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_sqrshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sqrshl_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sqrshl v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_uqrshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uqrshl_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: uqrshl v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_sqrshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sqrshl_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sqrshl v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_uqrshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uqrshl_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: uqrshl v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_sqrshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sqrshl_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sqrshl v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_uqrshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uqrshl_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: uqrshl v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_sqrshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sqrshl_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sqrshl v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64>, <2 x i64>)
-
-define <2 x i64> @test_uqrshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_uqrshl_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: uqrshl v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
-define <2 x i64> @test_sqrshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_sqrshl_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: sqrshl v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
diff --git a/test/CodeGen/AArch64/neon-saturating-shift.ll b/test/CodeGen/AArch64/neon-saturating-shift.ll
deleted file mode 100644
index 11009fba7511..000000000000
--- a/test/CodeGen/AArch64/neon-saturating-shift.ll
+++ /dev/null
@@ -1,121 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_uqshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uqshl_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: uqshl v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_sqshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_sqshl_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: sqshl v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_uqshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_uqshl_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: uqshl v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_sqshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_sqshl_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: sqshl v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_uqshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_uqshl_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: uqshl v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_sqshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sqshl_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sqshl v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_uqshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_uqshl_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: uqshl v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_sqshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sqshl_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sqshl v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_uqshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_uqshl_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: uqshl v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_sqshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sqshl_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sqshl v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_uqshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_uqshl_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: uqshl v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_sqshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sqshl_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sqshl v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>)
-
-define <2 x i64> @test_uqshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_uqshl_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: uqshl v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
-define <2 x i64> @test_sqshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_sqshl_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: sqshl v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
diff --git a/test/CodeGen/AArch64/neon-scalar-abs.ll b/test/CodeGen/AArch64/neon-scalar-abs.ll
deleted file mode 100644
index 03a89e043e50..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-abs.ll
+++ /dev/null
@@ -1,61 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define i64 @test_vabsd_s64(i64 %a) {
-; CHECK: test_vabsd_s64
-; CHECK: abs {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vabs.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vabs1.i = tail call <1 x i64> @llvm.aarch64.neon.vabs(<1 x i64> %vabs.i)
- %0 = extractelement <1 x i64> %vabs1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vabs(<1 x i64>)
-
-define i8 @test_vqabsb_s8(i8 %a) {
-; CHECK: test_vqabsb_s8
-; CHECK: sqabs {{b[0-9]+}}, {{b[0-9]+}}
-entry:
- %vqabs.i = insertelement <1 x i8> undef, i8 %a, i32 0
- %vqabs1.i = call <1 x i8> @llvm.arm.neon.vqabs.v1i8(<1 x i8> %vqabs.i)
- %0 = extractelement <1 x i8> %vqabs1.i, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.arm.neon.vqabs.v1i8(<1 x i8>)
-
-define i16 @test_vqabsh_s16(i16 %a) {
-; CHECK: test_vqabsh_s16
-; CHECK: sqabs {{h[0-9]+}}, {{h[0-9]+}}
-entry:
- %vqabs.i = insertelement <1 x i16> undef, i16 %a, i32 0
- %vqabs1.i = call <1 x i16> @llvm.arm.neon.vqabs.v1i16(<1 x i16> %vqabs.i)
- %0 = extractelement <1 x i16> %vqabs1.i, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.arm.neon.vqabs.v1i16(<1 x i16>)
-
-define i32 @test_vqabss_s32(i32 %a) {
-; CHECK: test_vqabss_s32
-; CHECK: sqabs {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vqabs.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vqabs1.i = call <1 x i32> @llvm.arm.neon.vqabs.v1i32(<1 x i32> %vqabs.i)
- %0 = extractelement <1 x i32> %vqabs1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.arm.neon.vqabs.v1i32(<1 x i32>)
-
-define i64 @test_vqabsd_s64(i64 %a) {
-; CHECK: test_vqabsd_s64
-; CHECK: sqabs {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vqabs.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vqabs1.i = call <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64> %vqabs.i)
- %0 = extractelement <1 x i64> %vqabs1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64>)
diff --git a/test/CodeGen/AArch64/neon-scalar-add-sub.ll b/test/CodeGen/AArch64/neon-scalar-add-sub.ll
deleted file mode 100644
index 09ca880c8053..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-add-sub.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define <1 x i64> @add1xi64(<1 x i64> %A, <1 x i64> %B) {
-;CHECK: add {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- %tmp3 = add <1 x i64> %A, %B;
- ret <1 x i64> %tmp3
-}
-
-define <1 x i64> @sub1xi64(<1 x i64> %A, <1 x i64> %B) {
-;CHECK: sub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- %tmp3 = sub <1 x i64> %A, %B;
- ret <1 x i64> %tmp3
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vaddds(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vadddu(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_add_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_add_v1i64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vaddds(<1 x i64> %lhs, <1 x i64> %rhs)
-; CHECK: add {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_uadd_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_uadd_v1i64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vadddu(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: add {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vsubds(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vsubdu(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_sub_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sub_v1i64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vsubds(<1 x i64> %lhs, <1 x i64> %rhs)
-; CHECK: sub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_usub_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_usub_v1i64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vsubdu(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: sub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-
-
diff --git a/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll b/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
index 8ce42def409a..32f59626b381 100644
--- a/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
+++ b/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
@@ -4,56 +4,56 @@ declare float @llvm.fma.f32(float, float, float)
declare double @llvm.fma.f64(double, double, double)
define float @test_fmla_ss4S(float %a, float %b, <4 x float> %v) {
- ; CHECK: test_fmla_ss4S
- ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ ; CHECK-LABEL: test_fmla_ss4S
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
ret float %tmp2
}
define float @test_fmla_ss4S_swap(float %a, float %b, <4 x float> %v) {
- ; CHECK: test_fmla_ss4S_swap
- ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ ; CHECK-LABEL: test_fmla_ss4S_swap
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a)
ret float %tmp2
}
define float @test_fmla_ss2S(float %a, float %b, <2 x float> %v) {
- ; CHECK: test_fmla_ss2S
- ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1]
+ ; CHECK-LABEL: test_fmla_ss2S
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
ret float %tmp2
}
define double @test_fmla_ddD(double %a, double %b, <1 x double> %v) {
- ; CHECK: test_fmla_ddD
- ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0]
+ ; CHECK-LABEL: test_fmla_ddD
+ ; CHECK: {{fmla d[0-9]+, d[0-9]+, v[0-9]+.d\[0]|fmadd d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}
%tmp1 = extractelement <1 x double> %v, i32 0
%tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
ret double %tmp2
}
define double @test_fmla_dd2D(double %a, double %b, <2 x double> %v) {
- ; CHECK: test_fmla_dd2D
- ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ ; CHECK-LABEL: test_fmla_dd2D
+ ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
ret double %tmp2
}
define double @test_fmla_dd2D_swap(double %a, double %b, <2 x double> %v) {
- ; CHECK: test_fmla_dd2D_swap
- ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ ; CHECK-LABEL: test_fmla_dd2D_swap
+ ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a)
ret double %tmp2
}
define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) {
- ; CHECK: test_fmls_ss4S
- ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ ; CHECK-LABEL: test_fmls_ss4S
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = fsub float -0.0, %tmp1
%tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
@@ -61,8 +61,8 @@ define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) {
}
define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) {
- ; CHECK: test_fmls_ss4S_swap
- ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ ; CHECK-LABEL: test_fmls_ss4S_swap
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = fsub float -0.0, %tmp1
%tmp3 = call float @llvm.fma.f32(float %tmp1, float %tmp2, float %a)
@@ -71,8 +71,8 @@ define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) {
define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) {
- ; CHECK: test_fmls_ss2S
- ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1]
+ ; CHECK-LABEL: test_fmls_ss2S
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = fsub float -0.0, %tmp1
%tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
@@ -80,8 +80,8 @@ define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) {
}
define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) {
- ; CHECK: test_fmls_ddD
- ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0]
+ ; CHECK-LABEL: test_fmls_ddD
+ ; CHECK: {{fmls d[0-9]+, d[0-9]+, v[0-9]+.d\[0]|fmsub d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}
%tmp1 = extractelement <1 x double> %v, i32 0
%tmp2 = fsub double -0.0, %tmp1
%tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
@@ -89,8 +89,8 @@ define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) {
}
define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) {
- ; CHECK: test_fmls_dd2D
- ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ ; CHECK-LABEL: test_fmls_dd2D
+ ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = fsub double -0.0, %tmp1
%tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
@@ -98,8 +98,8 @@ define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) {
}
define double @test_fmls_dd2D_swap(double %a, double %b, <2 x double> %v) {
- ; CHECK: test_fmls_dd2D_swap
- ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ ; CHECK-LABEL: test_fmls_dd2D_swap
+ ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = fsub double -0.0, %tmp1
%tmp3 = call double @llvm.fma.f64(double %tmp1, double %tmp2, double %a)
diff --git a/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll b/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll
deleted file mode 100644
index 968ad3e8cf71..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll
+++ /dev/null
@@ -1,124 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
-
-define float @test_fmul_lane_ss2S(float %a, <2 x float> %v) {
- ; CHECK: test_fmul_lane_ss2S
- ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
- %tmp1 = extractelement <2 x float> %v, i32 1
- %tmp2 = fmul float %a, %tmp1;
- ret float %tmp2;
-}
-
-define float @test_fmul_lane_ss2S_swap(float %a, <2 x float> %v) {
- ; CHECK: test_fmul_lane_ss2S_swap
- ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
- %tmp1 = extractelement <2 x float> %v, i32 1
- %tmp2 = fmul float %tmp1, %a;
- ret float %tmp2;
-}
-
-
-define float @test_fmul_lane_ss4S(float %a, <4 x float> %v) {
- ; CHECK: test_fmul_lane_ss4S
- ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
- %tmp1 = extractelement <4 x float> %v, i32 3
- %tmp2 = fmul float %a, %tmp1;
- ret float %tmp2;
-}
-
-define float @test_fmul_lane_ss4S_swap(float %a, <4 x float> %v) {
- ; CHECK: test_fmul_lane_ss4S_swap
- ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
- %tmp1 = extractelement <4 x float> %v, i32 3
- %tmp2 = fmul float %tmp1, %a;
- ret float %tmp2;
-}
-
-
-define double @test_fmul_lane_ddD(double %a, <1 x double> %v) {
- ; CHECK: test_fmul_lane_ddD
- ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
- %tmp1 = extractelement <1 x double> %v, i32 0
- %tmp2 = fmul double %a, %tmp1;
- ret double %tmp2;
-}
-
-
-
-define double @test_fmul_lane_dd2D(double %a, <2 x double> %v) {
- ; CHECK: test_fmul_lane_dd2D
- ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
- %tmp1 = extractelement <2 x double> %v, i32 1
- %tmp2 = fmul double %a, %tmp1;
- ret double %tmp2;
-}
-
-
-define double @test_fmul_lane_dd2D_swap(double %a, <2 x double> %v) {
- ; CHECK: test_fmul_lane_dd2D_swap
- ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
- %tmp1 = extractelement <2 x double> %v, i32 1
- %tmp2 = fmul double %tmp1, %a;
- ret double %tmp2;
-}
-
-declare float @llvm.aarch64.neon.vmulx.f32(float, float)
-
-define float @test_fmulx_lane_f32(float %a, <2 x float> %v) {
- ; CHECK: test_fmulx_lane_f32
- ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
- %tmp1 = extractelement <2 x float> %v, i32 1
- %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1)
- ret float %tmp2;
-}
-
-define float @test_fmulx_laneq_f32(float %a, <4 x float> %v) {
- ; CHECK: test_fmulx_laneq_f32
- ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
- %tmp1 = extractelement <4 x float> %v, i32 3
- %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1)
- ret float %tmp2;
-}
-
-define float @test_fmulx_laneq_f32_swap(float %a, <4 x float> %v) {
- ; CHECK: test_fmulx_laneq_f32_swap
- ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
- %tmp1 = extractelement <4 x float> %v, i32 3
- %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %tmp1, float %a)
- ret float %tmp2;
-}
-
-declare double @llvm.aarch64.neon.vmulx.f64(double, double)
-
-define double @test_fmulx_lane_f64(double %a, <1 x double> %v) {
- ; CHECK: test_fmulx_lane_f64
- ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
- %tmp1 = extractelement <1 x double> %v, i32 0
- %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
- ret double %tmp2;
-}
-
-define double @test_fmulx_laneq_f64_0(double %a, <2 x double> %v) {
- ; CHECK: test_fmulx_laneq_f64_0
- ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
- %tmp1 = extractelement <2 x double> %v, i32 0
- %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
- ret double %tmp2;
-}
-
-
-define double @test_fmulx_laneq_f64_1(double %a, <2 x double> %v) {
- ; CHECK: test_fmulx_laneq_f64_1
- ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
- %tmp1 = extractelement <2 x double> %v, i32 1
- %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
- ret double %tmp2;
-}
-
-define double @test_fmulx_laneq_f64_1_swap(double %a, <2 x double> %v) {
- ; CHECK: test_fmulx_laneq_f64_1_swap
- ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
- %tmp1 = extractelement <2 x double> %v, i32 1
- %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %tmp1, double %a)
- ret double %tmp2;
-}
-
diff --git a/test/CodeGen/AArch64/neon-scalar-compare.ll b/test/CodeGen/AArch64/neon-scalar-compare.ll
deleted file mode 100644
index 5f10cbbab2a6..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-compare.ll
+++ /dev/null
@@ -1,343 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-;; Scalar Integer Compare
-
-define i64 @test_vceqd(i64 %a, i64 %b) {
-; CHECK: test_vceqd
-; CHECK: cmeq {{d[0-9]+}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vceq.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vceq1.i = insertelement <1 x i64> undef, i64 %b, i32 0
- %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64> %vceq.i, <1 x i64> %vceq1.i)
- %0 = extractelement <1 x i64> %vceq2.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vceqzd(i64 %a) {
-; CHECK: test_vceqzd
-; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
-entry:
- %vceqz.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vceqz1.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64> %vceqz.i, <1 x i64> zeroinitializer)
- %0 = extractelement <1 x i64> %vceqz1.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vcged(i64 %a, i64 %b) {
-; CHECK: test_vcged
-; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcge.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcge1.i = insertelement <1 x i64> undef, i64 %b, i32 0
- %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcge.i, <1 x i64> %vcge1.i)
- %0 = extractelement <1 x i64> %vcge2.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vcgezd(i64 %a) {
-; CHECK: test_vcgezd
-; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, #0x0
-entry:
- %vcgez.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcgez1.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcgez.i, <1 x i64> zeroinitializer)
- %0 = extractelement <1 x i64> %vcgez1.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vcgtd(i64 %a, i64 %b) {
-; CHECK: test_vcgtd
-; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcgt.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcgt1.i = insertelement <1 x i64> undef, i64 %b, i32 0
- %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i)
- %0 = extractelement <1 x i64> %vcgt2.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vcgtzd(i64 %a) {
-; CHECK: test_vcgtzd
-; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, #0x0
-entry:
- %vcgtz.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcgtz1.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgtz.i, <1 x i64> zeroinitializer)
- %0 = extractelement <1 x i64> %vcgtz1.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vcled(i64 %a, i64 %b) {
-; CHECK: test_vcled
-; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcgt.i = insertelement <1 x i64> undef, i64 %b, i32 0
- %vcgt1.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i)
- %0 = extractelement <1 x i64> %vcgt2.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vclezd(i64 %a) {
-; CHECK: test_vclezd
-; CHECK: cmle {{d[0-9]}}, {{d[0-9]}}, #0x0
-entry:
- %vclez.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vclez1.i = call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1i64.v1i64(<1 x i64> %vclez.i, <1 x i64> zeroinitializer)
- %0 = extractelement <1 x i64> %vclez1.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vcltd(i64 %a, i64 %b) {
-; CHECK: test_vcltd
-; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcge.i = insertelement <1 x i64> undef, i64 %b, i32 0
- %vcge1.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcge.i, <1 x i64> %vcge1.i)
- %0 = extractelement <1 x i64> %vcge2.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vcltzd(i64 %a) {
-; CHECK: test_vcltzd
-; CHECK: cmlt {{d[0-9]}}, {{d[0-9]}}, #0x0
-entry:
- %vcltz.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcltz1.i = call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1i64.v1i64(<1 x i64> %vcltz.i, <1 x i64> zeroinitializer)
- %0 = extractelement <1 x i64> %vcltz1.i, i32 0
- ret i64 %0
-}
-
-define i64 @test_vtstd(i64 %a, i64 %b) {
-; CHECK: test_vtstd
-; CHECK: cmtst {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vtst.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vtst1.i = insertelement <1 x i64> undef, i64 %b, i32 0
- %vtst2.i = call <1 x i64> @llvm.aarch64.neon.vtstd.v1i64.v1i64.v1i64(<1 x i64> %vtst.i, <1 x i64> %vtst1.i)
- %0 = extractelement <1 x i64> %vtst2.i, i32 0
- ret i64 %0
-}
-
-
-define <1 x i64> @test_vcage_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vcage_f64
-; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %vcage2.i = tail call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %a, <1 x double> %b) #2
- ret <1 x i64> %vcage2.i
-}
-
-define <1 x i64> @test_vcagt_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vcagt_f64
-; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %vcagt2.i = tail call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %a, <1 x double> %b) #2
- ret <1 x i64> %vcagt2.i
-}
-
-define <1 x i64> @test_vcale_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vcale_f64
-; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %vcage2.i = tail call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %b, <1 x double> %a) #2
- ret <1 x i64> %vcage2.i
-}
-
-define <1 x i64> @test_vcalt_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vcalt_f64
-; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %vcagt2.i = tail call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %b, <1 x double> %a) #2
- ret <1 x i64> %vcagt2.i
-}
-
-define <1 x i64> @test_vceq_s64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vceq_s64
-; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp eq <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vceq_u64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vceq_u64
-; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp eq <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vceq_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vceq_f64
-; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = fcmp oeq <1 x double> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcge_s64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vcge_s64
-; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp sge <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcge_u64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vcge_u64
-; CHECK: cmhs {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp uge <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcge_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vcge_f64
-; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = fcmp oge <1 x double> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcle_s64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vcle_s64
-; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp sle <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcle_u64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vcle_u64
-; CHECK: cmhs {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp ule <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcle_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vcle_f64
-; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = fcmp ole <1 x double> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcgt_s64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vcgt_s64
-; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp sgt <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcgt_u64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vcgt_u64
-; CHECK: cmhi {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp ugt <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vcgt_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vcgt_f64
-; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = fcmp ogt <1 x double> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vclt_s64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vclt_s64
-; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp slt <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vclt_u64(<1 x i64> %a, <1 x i64> %b) #0 {
-; CHECK: test_vclt_u64
-; CHECK: cmhi {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = icmp ult <1 x i64> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vclt_f64(<1 x double> %a, <1 x double> %b) #0 {
-; CHECK: test_vclt_f64
-; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
- %cmp.i = fcmp olt <1 x double> %a, %b
- %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
- ret <1 x i64> %sext.i
-}
-
-define <1 x i64> @test_vceqz_s64(<1 x i64> %a) #0 {
-; CHECK: test_vceqz_s64
-; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
- %1 = icmp eq <1 x i64> %a, zeroinitializer
- %vceqz.i = zext <1 x i1> %1 to <1 x i64>
- ret <1 x i64> %vceqz.i
-}
-
-define <1 x i64> @test_vceqz_u64(<1 x i64> %a) #0 {
-; CHECK: test_vceqz_u64
-; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
- %1 = icmp eq <1 x i64> %a, zeroinitializer
- %vceqz.i = zext <1 x i1> %1 to <1 x i64>
- ret <1 x i64> %vceqz.i
-}
-
-define <1 x i64> @test_vceqz_p64(<1 x i64> %a) #0 {
-; CHECK: test_vceqz_p64
-; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
- %1 = icmp eq <1 x i64> %a, zeroinitializer
- %vceqz.i = zext <1 x i1> %1 to <1 x i64>
- ret <1 x i64> %vceqz.i
-}
-
-define <2 x i64> @test_vceqzq_p64(<2 x i64> %a) #0 {
-; CHECK: test_vceqzq_p64
-; CHECK: cmeq {{v[0-9]}}.2d, {{v[0-9]}}.2d, #0
- %1 = icmp eq <2 x i64> %a, zeroinitializer
- %vceqz.i = zext <2 x i1> %1 to <2 x i64>
- ret <2 x i64> %vceqz.i
-}
-
-define <1 x i64> @test_vcgez_s64(<1 x i64> %a) #0 {
-; CHECK: test_vcgez_s64
-; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, #0x0
- %1 = icmp sge <1 x i64> %a, zeroinitializer
- %vcgez.i = zext <1 x i1> %1 to <1 x i64>
- ret <1 x i64> %vcgez.i
-}
-
-define <1 x i64> @test_vclez_s64(<1 x i64> %a) #0 {
-; CHECK: test_vclez_s64
-; CHECK: cmle {{d[0-9]}}, {{d[0-9]}}, #0x0
- %1 = icmp sle <1 x i64> %a, zeroinitializer
- %vclez.i = zext <1 x i1> %1 to <1 x i64>
- ret <1 x i64> %vclez.i
-}
-
-define <1 x i64> @test_vcgtz_s64(<1 x i64> %a) #0 {
-; CHECK: test_vcgtz_s64
-; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, #0x0
- %1 = icmp sgt <1 x i64> %a, zeroinitializer
- %vcgtz.i = zext <1 x i1> %1 to <1 x i64>
- ret <1 x i64> %vcgtz.i
-}
-
-define <1 x i64> @test_vcltz_s64(<1 x i64> %a) #0 {
-; CHECK: test_vcltz_s64
-; CHECK: cmlt {{d[0-9]}}, {{d[0-9]}}, #0
- %1 = icmp slt <1 x i64> %a, zeroinitializer
- %vcltz.i = zext <1 x i1> %1 to <1 x i64>
- ret <1 x i64> %vcltz.i
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i64> @llvm.aarch64.neon.vtstd.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vchs.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vchi.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
diff --git a/test/CodeGen/AArch64/neon-scalar-copy.ll b/test/CodeGen/AArch64/neon-scalar-copy.ll
index d433ff595d1c..6afac315a961 100644
--- a/test/CodeGen/AArch64/neon-scalar-copy.ll
+++ b/test/CodeGen/AArch64/neon-scalar-copy.ll
@@ -1,88 +1,120 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s --check-prefix=CHECK
+
define float @test_dup_sv2S(<2 x float> %v) {
- ;CHECK: test_dup_sv2S
- ;CHECK: dup {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ ; CHECK-LABEL: test_dup_sv2S
+ ; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1
ret float %tmp1
}
+define float @test_dup_sv2S_0(<2 x float> %v) {
+ ; CHECK-LABEL: test_dup_sv2S_0
+ ; CHECK-NOT: dup {{[vsd][0-9]+}}
+ ; CHECK-NOT: ins {{[vsd][0-9]+}}
+ ; CHECK: ret
+ %tmp1 = extractelement <2 x float> %v, i32 0
+ ret float %tmp1
+}
+
define float @test_dup_sv4S(<4 x float> %v) {
- ;CHECK: test_dup_sv4S
- ;CHECK: dup {{s[0-31]+}}, {{v[0-31]+}}.s[0]
+ ; CHECK-LABEL: test_dup_sv4S
+ ; CHECK-NOT: dup {{[vsd][0-9]+}}
+ ; CHECK-NOT: ins {{[vsd][0-9]+}}
+ ; CHECK: ret
%tmp1 = extractelement <4 x float> %v, i32 0
ret float %tmp1
}
define double @test_dup_dvD(<1 x double> %v) {
- ;CHECK: test_dup_dvD
- ;CHECK-NOT: dup {{d[0-31]+}}, {{v[0-31]+}}.d[0]
- ;CHECK: ret
+ ; CHECK-LABEL: test_dup_dvD
+ ; CHECK-NOT: dup {{[vsd][0-9]+}}
+ ; CHECK-NOT: ins {{[vsd][0-9]+}}
+ ; CHECK: ret
%tmp1 = extractelement <1 x double> %v, i32 0
ret double %tmp1
}
define double @test_dup_dv2D(<2 x double> %v) {
- ;CHECK: test_dup_dv2D
- ;CHECK: dup {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ ; CHECK-LABEL: test_dup_dv2D
+ ; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ ret double %tmp1
+}
+
+define double @test_dup_dv2D_0(<2 x double> %v) {
+ ; CHECK-LABEL: test_dup_dv2D_0
+ ; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+ ; CHECK: ret
%tmp1 = extractelement <2 x double> %v, i32 1
ret double %tmp1
}
define <1 x i8> @test_vector_dup_bv16B(<16 x i8> %v1) {
- ;CHECK: test_vector_dup_bv16B
- ;CHECK: dup {{b[0-31]+}}, {{v[0-31]+}}.b[14]
+ ; CHECK-LABEL: test_vector_dup_bv16B
%shuffle.i = shufflevector <16 x i8> %v1, <16 x i8> undef, <1 x i32> <i32 14>
ret <1 x i8> %shuffle.i
}
define <1 x i8> @test_vector_dup_bv8B(<8 x i8> %v1) {
- ;CHECK: test_vector_dup_bv8B
- ;CHECK: dup {{b[0-31]+}}, {{v[0-31]+}}.b[7]
+ ; CHECK-LABEL: test_vector_dup_bv8B
%shuffle.i = shufflevector <8 x i8> %v1, <8 x i8> undef, <1 x i32> <i32 7>
ret <1 x i8> %shuffle.i
}
define <1 x i16> @test_vector_dup_hv8H(<8 x i16> %v1) {
- ;CHECK: test_vector_dup_hv8H
- ;CHECK: dup {{h[0-31]+}}, {{v[0-31]+}}.h[7]
+ ; CHECK-LABEL: test_vector_dup_hv8H
%shuffle.i = shufflevector <8 x i16> %v1, <8 x i16> undef, <1 x i32> <i32 7>
ret <1 x i16> %shuffle.i
}
define <1 x i16> @test_vector_dup_hv4H(<4 x i16> %v1) {
- ;CHECK: test_vector_dup_hv4H
- ;CHECK: dup {{h[0-31]+}}, {{v[0-31]+}}.h[3]
+ ; CHECK-LABEL: test_vector_dup_hv4H
%shuffle.i = shufflevector <4 x i16> %v1, <4 x i16> undef, <1 x i32> <i32 3>
ret <1 x i16> %shuffle.i
}
define <1 x i32> @test_vector_dup_sv4S(<4 x i32> %v1) {
- ;CHECK: test_vector_dup_sv4S
- ;CHECK: dup {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ ; CHECK-LABEL: test_vector_dup_sv4S
%shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <1 x i32> <i32 3>
ret <1 x i32> %shuffle
}
define <1 x i32> @test_vector_dup_sv2S(<2 x i32> %v1) {
- ;CHECK: test_vector_dup_sv2S
- ;CHECK: dup {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ ; CHECK-LABEL: test_vector_dup_sv2S
%shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <1 x i32> <i32 1>
ret <1 x i32> %shuffle
}
define <1 x i64> @test_vector_dup_dv2D(<2 x i64> %v1) {
- ;CHECK: test_vector_dup_dv2D
- ;CHECK: dup {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ ; CHECK-LABEL: test_vector_dup_dv2D
+ ; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #8
%shuffle.i = shufflevector <2 x i64> %v1, <2 x i64> undef, <1 x i32> <i32 1>
ret <1 x i64> %shuffle.i
}
define <1 x i64> @test_vector_copy_dup_dv2D(<1 x i64> %a, <2 x i64> %c) {
- ;CHECK: test_vector_copy_dup_dv2D
- ;CHECK: dup {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ ; CHECK-LABEL: test_vector_copy_dup_dv2D
+ ; CHECK: {{dup|mov}} {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%vget_lane = extractelement <2 x i64> %c, i32 1
%vset_lane = insertelement <1 x i64> undef, i64 %vget_lane, i32 0
ret <1 x i64> %vset_lane
}
+; Undefined behaviour, so we really don't care what actually gets emitted, just
+; as long as we don't crash (since it could be dynamically unreachable).
+define i32 @test_out_of_range_extract(<4 x i32> %vec) {
+; CHECK-LABEL: test_out_of_range_extract:
+; CHECK: ret
+ %elt = extractelement <4 x i32> %vec, i32 4
+ ret i32 %elt
+}
+
+; Undefined behaviour, so we really don't care what actually gets emitted, just
+; as long as we don't crash (since it could be dynamically unreachable).
+define void @test_out_of_range_insert(<4 x i32> %vec, i32 %elt) {
+; CHECK-LABEL: test_out_of_range_insert:
+; CHECK: ret
+ insertelement <4 x i32> %vec, i32 %elt, i32 4
+ ret void
+}
diff --git a/test/CodeGen/AArch64/neon-scalar-cvt.ll b/test/CodeGen/AArch64/neon-scalar-cvt.ll
deleted file mode 100644
index a06d5d60a85b..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-cvt.ll
+++ /dev/null
@@ -1,137 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-define float @test_vcvts_f32_s32(i32 %a) {
-; CHECK: test_vcvts_f32_s32
-; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %0 = call float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i)
- ret float %0
-}
-
-declare float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32>)
-
-define double @test_vcvtd_f64_s64(i64 %a) {
-; CHECK: test_vcvtd_f64_s64
-; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %0 = call double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i)
- ret double %0
-}
-
-declare double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64>)
-
-define float @test_vcvts_f32_u32(i32 %a) {
-; CHECK: test_vcvts_f32_u32
-; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %0 = call float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i)
- ret float %0
-}
-
-declare float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32>)
-
-define double @test_vcvtd_f64_u64(i64 %a) {
-; CHECK: test_vcvtd_f64_u64
-; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %0 = call double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i)
- ret double %0
-}
-
-declare double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>)
-
-define float @test_vcvts_n_f32_s32(i32 %a) {
-; CHECK: test_vcvts_n_f32_s32
-; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
-entry:
- %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
- %0 = call float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
- ret float %0
-}
-
-declare float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32)
-
-define double @test_vcvtd_n_f64_s64(i64 %a) {
-; CHECK: test_vcvtd_n_f64_s64
-; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
-entry:
- %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
- %0 = call double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
- ret double %0
-}
-
-declare double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32)
-
-define float @test_vcvts_n_f32_u32(i32 %a) {
-; CHECK: test_vcvts_n_f32_u32
-; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
-entry:
- %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
- %0 = call float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
- ret float %0
-}
-
-declare float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32)
-
-define double @test_vcvtd_n_f64_u64(i64 %a) {
-; CHECK: test_vcvtd_n_f64_u64
-; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
-entry:
- %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
- %0 = call double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
- ret double %0
-}
-
-declare double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32)
-
-define i32 @test_vcvts_n_s32_f32(float %a) {
-; CHECK: test_vcvts_n_s32_f32
-; CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #1
-entry:
- %fcvtzs = insertelement <1 x float> undef, float %a, i32 0
- %fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float> %fcvtzs, i32 1)
- %0 = extractelement <1 x i32> %fcvtzs1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float>, i32)
-
-define i64 @test_vcvtd_n_s64_f64(double %a) {
-; CHECK: test_vcvtd_n_s64_f64
-; CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #1
-entry:
- %fcvtzs = insertelement <1 x double> undef, double %a, i32 0
- %fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double> %fcvtzs, i32 1)
- %0 = extractelement <1 x i64> %fcvtzs1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double>, i32)
-
-define i32 @test_vcvts_n_u32_f32(float %a) {
-; CHECK: test_vcvts_n_u32_f32
-; CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #32
-entry:
- %fcvtzu = insertelement <1 x float> undef, float %a, i32 0
- %fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float> %fcvtzu, i32 32)
- %0 = extractelement <1 x i32> %fcvtzu1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float>, i32)
-
-define i64 @test_vcvtd_n_u64_f64(double %a) {
-; CHECK: test_vcvtd_n_u64_f64
-; CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #64
-entry:
- %fcvtzu = insertelement <1 x double> undef, double %a, i32 0
- %fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double> %fcvtzu, i32 64)
- %0 = extractelement <1 x i64> %fcvtzu1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double>, i32)
diff --git a/test/CodeGen/AArch64/neon-scalar-extract-narrow.ll b/test/CodeGen/AArch64/neon-scalar-extract-narrow.ll
deleted file mode 100644
index faf521bc889a..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-extract-narrow.ll
+++ /dev/null
@@ -1,104 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-define i8 @test_vqmovunh_s16(i16 %a) {
-; CHECK: test_vqmovunh_s16
-; CHECK: sqxtun {{b[0-9]+}}, {{h[0-9]+}}
-entry:
- %vqmovun.i = insertelement <1 x i16> undef, i16 %a, i32 0
- %vqmovun1.i = call <1 x i8> @llvm.arm.neon.vqmovnsu.v1i8(<1 x i16> %vqmovun.i)
- %0 = extractelement <1 x i8> %vqmovun1.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vqmovuns_s32(i32 %a) {
-; CHECK: test_vqmovuns_s32
-; CHECK: sqxtun {{h[0-9]+}}, {{s[0-9]+}}
-entry:
- %vqmovun.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vqmovun1.i = call <1 x i16> @llvm.arm.neon.vqmovnsu.v1i16(<1 x i32> %vqmovun.i)
- %0 = extractelement <1 x i16> %vqmovun1.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vqmovund_s64(i64 %a) {
-; CHECK: test_vqmovund_s64
-; CHECK: sqxtun {{s[0-9]+}}, {{d[0-9]+}}
-entry:
- %vqmovun.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vqmovun1.i = call <1 x i32> @llvm.arm.neon.vqmovnsu.v1i32(<1 x i64> %vqmovun.i)
- %0 = extractelement <1 x i32> %vqmovun1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i8> @llvm.arm.neon.vqmovnsu.v1i8(<1 x i16>)
-declare <1 x i16> @llvm.arm.neon.vqmovnsu.v1i16(<1 x i32>)
-declare <1 x i32> @llvm.arm.neon.vqmovnsu.v1i32(<1 x i64>)
-
-define i8 @test_vqmovnh_s16(i16 %a) {
-; CHECK: test_vqmovnh_s16
-; CHECK: sqxtn {{b[0-9]+}}, {{h[0-9]+}}
-entry:
- %vqmovn.i = insertelement <1 x i16> undef, i16 %a, i32 0
- %vqmovn1.i = call <1 x i8> @llvm.arm.neon.vqmovns.v1i8(<1 x i16> %vqmovn.i)
- %0 = extractelement <1 x i8> %vqmovn1.i, i32 0
- ret i8 %0
-}
-
-define i16 @test_vqmovns_s32(i32 %a) {
-; CHECK: test_vqmovns_s32
-; CHECK: sqxtn {{h[0-9]+}}, {{s[0-9]+}}
-entry:
- %vqmovn.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vqmovn1.i = call <1 x i16> @llvm.arm.neon.vqmovns.v1i16(<1 x i32> %vqmovn.i)
- %0 = extractelement <1 x i16> %vqmovn1.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vqmovnd_s64(i64 %a) {
-; CHECK: test_vqmovnd_s64
-; CHECK: sqxtn {{s[0-9]+}}, {{d[0-9]+}}
-entry:
- %vqmovn.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vqmovn1.i = call <1 x i32> @llvm.arm.neon.vqmovns.v1i32(<1 x i64> %vqmovn.i)
- %0 = extractelement <1 x i32> %vqmovn1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i8> @llvm.arm.neon.vqmovns.v1i8(<1 x i16>)
-declare <1 x i16> @llvm.arm.neon.vqmovns.v1i16(<1 x i32>)
-declare <1 x i32> @llvm.arm.neon.vqmovns.v1i32(<1 x i64>)
-
-define i8 @test_vqmovnh_u16(i16 %a) {
-; CHECK: test_vqmovnh_u16
-; CHECK: uqxtn {{b[0-9]+}}, {{h[0-9]+}}
-entry:
- %vqmovn.i = insertelement <1 x i16> undef, i16 %a, i32 0
- %vqmovn1.i = call <1 x i8> @llvm.arm.neon.vqmovnu.v1i8(<1 x i16> %vqmovn.i)
- %0 = extractelement <1 x i8> %vqmovn1.i, i32 0
- ret i8 %0
-}
-
-
-define i16 @test_vqmovns_u32(i32 %a) {
-; CHECK: test_vqmovns_u32
-; CHECK: uqxtn {{h[0-9]+}}, {{s[0-9]+}}
-entry:
- %vqmovn.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vqmovn1.i = call <1 x i16> @llvm.arm.neon.vqmovnu.v1i16(<1 x i32> %vqmovn.i)
- %0 = extractelement <1 x i16> %vqmovn1.i, i32 0
- ret i16 %0
-}
-
-define i32 @test_vqmovnd_u64(i64 %a) {
-; CHECK: test_vqmovnd_u64
-; CHECK: uqxtn {{s[0-9]+}}, {{d[0-9]+}}
-entry:
- %vqmovn.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vqmovn1.i = call <1 x i32> @llvm.arm.neon.vqmovnu.v1i32(<1 x i64> %vqmovn.i)
- %0 = extractelement <1 x i32> %vqmovn1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i8> @llvm.arm.neon.vqmovnu.v1i8(<1 x i16>)
-declare <1 x i16> @llvm.arm.neon.vqmovnu.v1i16(<1 x i32>)
-declare <1 x i32> @llvm.arm.neon.vqmovnu.v1i32(<1 x i64>)
diff --git a/test/CodeGen/AArch64/neon-scalar-fabd.ll b/test/CodeGen/AArch64/neon-scalar-fabd.ll
deleted file mode 100644
index 75686d32064b..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-fabd.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define float @test_vabds_f32(float %a, float %b) {
-; CHECK-LABEL: test_vabds_f32
-; CHECK: fabd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vabd.i = insertelement <1 x float> undef, float %a, i32 0
- %vabd1.i = insertelement <1 x float> undef, float %b, i32 0
- %vabd2.i = call <1 x float> @llvm.aarch64.neon.vabd.v1f32(<1 x float> %vabd.i, <1 x float> %vabd1.i)
- %0 = extractelement <1 x float> %vabd2.i, i32 0
- ret float %0
-}
-
-define double @test_vabdd_f64(double %a, double %b) {
-; CHECK-LABEL: test_vabdd_f64
-; CHECK: fabd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vabd.i = insertelement <1 x double> undef, double %a, i32 0
- %vabd1.i = insertelement <1 x double> undef, double %b, i32 0
- %vabd2.i = call <1 x double> @llvm.aarch64.neon.vabd.v1f64(<1 x double> %vabd.i, <1 x double> %vabd1.i)
- %0 = extractelement <1 x double> %vabd2.i, i32 0
- ret double %0
-}
-
-declare <1 x double> @llvm.aarch64.neon.vabd.v1f64(<1 x double>, <1 x double>)
-declare <1 x float> @llvm.aarch64.neon.vabd.v1f32(<1 x float>, <1 x float>)
diff --git a/test/CodeGen/AArch64/neon-scalar-fcvt.ll b/test/CodeGen/AArch64/neon-scalar-fcvt.ll
deleted file mode 100644
index d7b84fae7375..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-fcvt.ll
+++ /dev/null
@@ -1,255 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-;; Scalar Floating-point Convert
-
-define float @test_vcvtxn(double %a) {
-; CHECK: test_vcvtxn
-; CHECK: fcvtxn {{s[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtf.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtf1.i = tail call <1 x float> @llvm.aarch64.neon.fcvtxn.v1f32.v1f64(<1 x double> %vcvtf.i)
- %0 = extractelement <1 x float> %vcvtf1.i, i32 0
- ret float %0
-}
-
-declare <1 x float> @llvm.aarch64.neon.fcvtxn.v1f32.v1f64(<1 x double>)
-
-define i32 @test_vcvtass(float %a) {
-; CHECK: test_vcvtass
-; CHECK: fcvtas {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtas.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtas1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtas.v1i32.v1f32(<1 x float> %vcvtas.i)
- %0 = extractelement <1 x i32> %vcvtas1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtas.v1i32.v1f32(<1 x float>)
-
-define i64 @test_test_vcvtasd(double %a) {
-; CHECK: test_test_vcvtasd
-; CHECK: fcvtas {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtas.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtas1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double> %vcvtas.i)
- %0 = extractelement <1 x i64> %vcvtas1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtaus(float %a) {
-; CHECK: test_vcvtaus
-; CHECK: fcvtau {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtau.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtau1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtau.v1i32.v1f32(<1 x float> %vcvtau.i)
- %0 = extractelement <1 x i32> %vcvtau1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtau.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtaud(double %a) {
-; CHECK: test_vcvtaud
-; CHECK: fcvtau {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtau.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtau1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double> %vcvtau.i)
- %0 = extractelement <1 x i64> %vcvtau1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtmss(float %a) {
-; CHECK: test_vcvtmss
-; CHECK: fcvtms {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtms.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtms1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtms.v1i32.v1f32(<1 x float> %vcvtms.i)
- %0 = extractelement <1 x i32> %vcvtms1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtms.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtmd_s64_f64(double %a) {
-; CHECK: test_vcvtmd_s64_f64
-; CHECK: fcvtms {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtms.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtms1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double> %vcvtms.i)
- %0 = extractelement <1 x i64> %vcvtms1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtmus(float %a) {
-; CHECK: test_vcvtmus
-; CHECK: fcvtmu {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtmu.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtmu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtmu.v1i32.v1f32(<1 x float> %vcvtmu.i)
- %0 = extractelement <1 x i32> %vcvtmu1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtmu.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtmud(double %a) {
-; CHECK: test_vcvtmud
-; CHECK: fcvtmu {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtmu.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtmu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double> %vcvtmu.i)
- %0 = extractelement <1 x i64> %vcvtmu1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtnss(float %a) {
-; CHECK: test_vcvtnss
-; CHECK: fcvtns {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtns.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtns1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtns.v1i32.v1f32(<1 x float> %vcvtns.i)
- %0 = extractelement <1 x i32> %vcvtns1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtns.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtnd_s64_f64(double %a) {
-; CHECK: test_vcvtnd_s64_f64
-; CHECK: fcvtns {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtns.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtns1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double> %vcvtns.i)
- %0 = extractelement <1 x i64> %vcvtns1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtnus(float %a) {
-; CHECK: test_vcvtnus
-; CHECK: fcvtnu {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtnu.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtnu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtnu.v1i32.v1f32(<1 x float> %vcvtnu.i)
- %0 = extractelement <1 x i32> %vcvtnu1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtnu.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtnud(double %a) {
-; CHECK: test_vcvtnud
-; CHECK: fcvtnu {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtnu.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtnu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double> %vcvtnu.i)
- %0 = extractelement <1 x i64> %vcvtnu1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtpss(float %a) {
-; CHECK: test_vcvtpss
-; CHECK: fcvtps {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtps.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtps1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtps.v1i32.v1f32(<1 x float> %vcvtps.i)
- %0 = extractelement <1 x i32> %vcvtps1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtps.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtpd_s64_f64(double %a) {
-; CHECK: test_vcvtpd_s64_f64
-; CHECK: fcvtps {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtps.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtps1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double> %vcvtps.i)
- %0 = extractelement <1 x i64> %vcvtps1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtpus(float %a) {
-; CHECK: test_vcvtpus
-; CHECK: fcvtpu {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtpu.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtpu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtpu.v1i32.v1f32(<1 x float> %vcvtpu.i)
- %0 = extractelement <1 x i32> %vcvtpu1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtpu.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtpud(double %a) {
-; CHECK: test_vcvtpud
-; CHECK: fcvtpu {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtpu.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtpu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double> %vcvtpu.i)
- %0 = extractelement <1 x i64> %vcvtpu1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtss(float %a) {
-; CHECK: test_vcvtss
-; CHECK: fcvtzs {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtzs.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtzs1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtzs.v1i32.v1f32(<1 x float> %vcvtzs.i)
- %0 = extractelement <1 x i32> %vcvtzs1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtzs.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtd_s64_f64(double %a) {
-; CHECK: test_vcvtd_s64_f64
-; CHECK: fcvtzs {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvzs.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvzs1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double> %vcvzs.i)
- %0 = extractelement <1 x i64> %vcvzs1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double>)
-
-define i32 @test_vcvtus(float %a) {
-; CHECK: test_vcvtus
-; CHECK: fcvtzu {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcvtzu.i = insertelement <1 x float> undef, float %a, i32 0
- %vcvtzu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtzu.v1i32.v1f32(<1 x float> %vcvtzu.i)
- %0 = extractelement <1 x i32> %vcvtzu1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.fcvtzu.v1i32.v1f32(<1 x float>)
-
-define i64 @test_vcvtud(double %a) {
-; CHECK: test_vcvtud
-; CHECK: fcvtzu {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcvtzu.i = insertelement <1 x double> undef, double %a, i32 0
- %vcvtzu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double> %vcvtzu.i)
- %0 = extractelement <1 x i64> %vcvtzu1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double>)
diff --git a/test/CodeGen/AArch64/neon-scalar-fp-compare.ll b/test/CodeGen/AArch64/neon-scalar-fp-compare.ll
deleted file mode 100644
index a6e58599acdb..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-fp-compare.ll
+++ /dev/null
@@ -1,328 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-;; Scalar Floating-point Compare
-
-define i32 @test_vceqs_f32(float %a, float %b) {
-; CHECK: test_vceqs_f32
-; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vceq.i = insertelement <1 x float> undef, float %a, i32 0
- %vceq1.i = insertelement <1 x float> undef, float %b, i32 0
- %vceq2.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> %vceq1.i)
- %0 = extractelement <1 x i32> %vceq2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vceqd_f64(double %a, double %b) {
-; CHECK: test_vceqd_f64
-; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vceq.i = insertelement <1 x double> undef, double %a, i32 0
- %vceq1.i = insertelement <1 x double> undef, double %b, i32 0
- %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double> %vceq.i, <1 x double> %vceq1.i)
- %0 = extractelement <1 x i64> %vceq2.i, i32 0
- ret i64 %0
-}
-
-define <1 x i64> @test_vceqz_f64(<1 x double> %a) #0 {
-; CHECK: test_vceqz_f64
-; CHECK: fcmeq {{d[0-9]+}}, {{d[0-9]+}}, #0.0
-entry:
- %0 = fcmp oeq <1 x double> %a, zeroinitializer
- %vceqz.i = zext <1 x i1> %0 to <1 x i64>
- ret <1 x i64> %vceqz.i
-}
-
-define i32 @test_vceqzs_f32(float %a) {
-; CHECK: test_vceqzs_f32
-; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, #0.0
-entry:
- %vceq.i = insertelement <1 x float> undef, float %a, i32 0
- %vceq1.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vceq1.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vceqzd_f64(double %a) {
-; CHECK: test_vceqzd_f64
-; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, #0.0
-entry:
- %vceq.i = insertelement <1 x double> undef, double %a, i32 0
- %vceq1.i = tail call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double> %vceq.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vceq1.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcges_f32(float %a, float %b) {
-; CHECK: test_vcges_f32
-; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcge.i = insertelement <1 x float> undef, float %a, i32 0
- %vcge1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i)
- %0 = extractelement <1 x i32> %vcge2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcged_f64(double %a, double %b) {
-; CHECK: test_vcged_f64
-; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcge.i = insertelement <1 x double> undef, double %a, i32 0
- %vcge1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i)
- %0 = extractelement <1 x i64> %vcge2.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcgezs_f32(float %a) {
-; CHECK: test_vcgezs_f32
-; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, #0.0
-entry:
- %vcge.i = insertelement <1 x float> undef, float %a, i32 0
- %vcge1.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vcge1.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcgezd_f64(double %a) {
-; CHECK: test_vcgezd_f64
-; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, #0.0
-entry:
- %vcge.i = insertelement <1 x double> undef, double %a, i32 0
- %vcge1.i = tail call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double> %vcge.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vcge1.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcgts_f32(float %a, float %b) {
-; CHECK: test_vcgts_f32
-; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcgt.i = insertelement <1 x float> undef, float %a, i32 0
- %vcgt1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i)
- %0 = extractelement <1 x i32> %vcgt2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcgtd_f64(double %a, double %b) {
-; CHECK: test_vcgtd_f64
-; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcgt.i = insertelement <1 x double> undef, double %a, i32 0
- %vcgt1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i)
- %0 = extractelement <1 x i64> %vcgt2.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcgtzs_f32(float %a) {
-; CHECK: test_vcgtzs_f32
-; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, #0.0
-entry:
- %vcgt.i = insertelement <1 x float> undef, float %a, i32 0
- %vcgt1.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vcgt1.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcgtzd_f64(double %a) {
-; CHECK: test_vcgtzd_f64
-; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, #0.0
-entry:
- %vcgt.i = insertelement <1 x double> undef, double %a, i32 0
- %vcgt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double> %vcgt.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vcgt1.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcles_f32(float %a, float %b) {
-; CHECK: test_vcles_f32
-; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcge.i = insertelement <1 x float> undef, float %a, i32 0
- %vcge1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i)
- %0 = extractelement <1 x i32> %vcge2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcled_f64(double %a, double %b) {
-; CHECK: test_vcled_f64
-; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcge.i = insertelement <1 x double> undef, double %a, i32 0
- %vcge1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i)
- %0 = extractelement <1 x i64> %vcge2.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vclezs_f32(float %a) {
-; CHECK: test_vclezs_f32
-; CHECK: fcmle {{s[0-9]}}, {{s[0-9]}}, #0.0
-entry:
- %vcle.i = insertelement <1 x float> undef, float %a, i32 0
- %vcle1.i = call <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float> %vcle.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vcle1.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vclezd_f64(double %a) {
-; CHECK: test_vclezd_f64
-; CHECK: fcmle {{d[0-9]}}, {{d[0-9]}}, #0.0
-entry:
- %vcle.i = insertelement <1 x double> undef, double %a, i32 0
- %vcle1.i = tail call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double> %vcle.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vcle1.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vclts_f32(float %a, float %b) {
-; CHECK: test_vclts_f32
-; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcgt.i = insertelement <1 x float> undef, float %b, i32 0
- %vcgt1.i = insertelement <1 x float> undef, float %a, i32 0
- %vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i)
- %0 = extractelement <1 x i32> %vcgt2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcltd_f64(double %a, double %b) {
-; CHECK: test_vcltd_f64
-; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcgt.i = insertelement <1 x double> undef, double %b, i32 0
- %vcgt1.i = insertelement <1 x double> undef, double %a, i32 0
- %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i)
- %0 = extractelement <1 x i64> %vcgt2.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcltzs_f32(float %a) {
-; CHECK: test_vcltzs_f32
-; CHECK: fcmlt {{s[0-9]}}, {{s[0-9]}}, #0.0
-entry:
- %vclt.i = insertelement <1 x float> undef, float %a, i32 0
- %vclt1.i = call <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float> %vclt.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vclt1.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcltzd_f64(double %a) {
-; CHECK: test_vcltzd_f64
-; CHECK: fcmlt {{d[0-9]}}, {{d[0-9]}}, #0.0
-entry:
- %vclt.i = insertelement <1 x double> undef, double %a, i32 0
- %vclt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double> %vclt.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vclt1.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcages_f32(float %a, float %b) {
-; CHECK: test_vcages_f32
-; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcage.i = insertelement <1 x float> undef, float %a, i32 0
- %vcage1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i)
- %0 = extractelement <1 x i32> %vcage2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcaged_f64(double %a, double %b) {
-; CHECK: test_vcaged_f64
-; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcage.i = insertelement <1 x double> undef, double %a, i32 0
- %vcage1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i)
- %0 = extractelement <1 x i64> %vcage2.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcagts_f32(float %a, float %b) {
-; CHECK: test_vcagts_f32
-; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcagt.i = insertelement <1 x float> undef, float %a, i32 0
- %vcagt1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcagt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcagt.i, <1 x float> %vcagt1.i)
- %0 = extractelement <1 x i32> %vcagt2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcagtd_f64(double %a, double %b) {
-; CHECK: test_vcagtd_f64
-; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcagt.i = insertelement <1 x double> undef, double %a, i32 0
- %vcagt1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcagt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcagt.i, <1 x double> %vcagt1.i)
- %0 = extractelement <1 x i64> %vcagt2.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcales_f32(float %a, float %b) {
-; CHECK: test_vcales_f32
-; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcage.i = insertelement <1 x float> undef, float %b, i32 0
- %vcage1.i = insertelement <1 x float> undef, float %a, i32 0
- %vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i)
- %0 = extractelement <1 x i32> %vcage2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcaled_f64(double %a, double %b) {
-; CHECK: test_vcaled_f64
-; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcage.i = insertelement <1 x double> undef, double %b, i32 0
- %vcage1.i = insertelement <1 x double> undef, double %a, i32 0
- %vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i)
- %0 = extractelement <1 x i64> %vcage2.i, i32 0
- ret i64 %0
-}
-
-define i32 @test_vcalts_f32(float %a, float %b) {
-; CHECK: test_vcalts_f32
-; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
-entry:
- %vcalt.i = insertelement <1 x float> undef, float %b, i32 0
- %vcalt1.i = insertelement <1 x float> undef, float %a, i32 0
- %vcalt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcalt.i, <1 x float> %vcalt1.i)
- %0 = extractelement <1 x i32> %vcalt2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vcaltd_f64(double %a, double %b) {
-; CHECK: test_vcaltd_f64
-; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
-entry:
- %vcalt.i = insertelement <1 x double> undef, double %b, i32 0
- %vcalt1.i = insertelement <1 x double> undef, double %a, i32 0
- %vcalt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcalt.i, <1 x double> %vcalt1.i)
- %0 = extractelement <1 x i64> %vcalt2.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
diff --git a/test/CodeGen/AArch64/neon-scalar-mul.ll b/test/CodeGen/AArch64/neon-scalar-mul.ll
deleted file mode 100644
index 991037f6cb88..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-mul.ll
+++ /dev/null
@@ -1,143 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-define i16 @test_vqdmulhh_s16(i16 %a, i16 %b) {
-; CHECK: test_vqdmulhh_s16
-; CHECK: sqdmulh {{h[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
- %1 = insertelement <1 x i16> undef, i16 %a, i32 0
- %2 = insertelement <1 x i16> undef, i16 %b, i32 0
- %3 = call <1 x i16> @llvm.arm.neon.vqdmulh.v1i16(<1 x i16> %1, <1 x i16> %2)
- %4 = extractelement <1 x i16> %3, i32 0
- ret i16 %4
-}
-
-define i32 @test_vqdmulhs_s32(i32 %a, i32 %b) {
-; CHECK: test_vqdmulhs_s32
-; CHECK: sqdmulh {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
- %1 = insertelement <1 x i32> undef, i32 %a, i32 0
- %2 = insertelement <1 x i32> undef, i32 %b, i32 0
- %3 = call <1 x i32> @llvm.arm.neon.vqdmulh.v1i32(<1 x i32> %1, <1 x i32> %2)
- %4 = extractelement <1 x i32> %3, i32 0
- ret i32 %4
-}
-
-declare <1 x i16> @llvm.arm.neon.vqdmulh.v1i16(<1 x i16>, <1 x i16>)
-declare <1 x i32> @llvm.arm.neon.vqdmulh.v1i32(<1 x i32>, <1 x i32>)
-
-define i16 @test_vqrdmulhh_s16(i16 %a, i16 %b) {
-; CHECK: test_vqrdmulhh_s16
-; CHECK: sqrdmulh {{h[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
- %1 = insertelement <1 x i16> undef, i16 %a, i32 0
- %2 = insertelement <1 x i16> undef, i16 %b, i32 0
- %3 = call <1 x i16> @llvm.arm.neon.vqrdmulh.v1i16(<1 x i16> %1, <1 x i16> %2)
- %4 = extractelement <1 x i16> %3, i32 0
- ret i16 %4
-}
-
-define i32 @test_vqrdmulhs_s32(i32 %a, i32 %b) {
-; CHECK: test_vqrdmulhs_s32
-; CHECK: sqrdmulh {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
- %1 = insertelement <1 x i32> undef, i32 %a, i32 0
- %2 = insertelement <1 x i32> undef, i32 %b, i32 0
- %3 = call <1 x i32> @llvm.arm.neon.vqrdmulh.v1i32(<1 x i32> %1, <1 x i32> %2)
- %4 = extractelement <1 x i32> %3, i32 0
- ret i32 %4
-}
-
-declare <1 x i16> @llvm.arm.neon.vqrdmulh.v1i16(<1 x i16>, <1 x i16>)
-declare <1 x i32> @llvm.arm.neon.vqrdmulh.v1i32(<1 x i32>, <1 x i32>)
-
-define float @test_vmulxs_f32(float %a, float %b) {
-; CHECK: test_vmulxs_f32
-; CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
- %1 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %b)
- ret float %1
-}
-
-define double @test_vmulxd_f64(double %a, double %b) {
-; CHECK: test_vmulxd_f64
-; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
- %1 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %b)
- ret double %1
-}
-
-declare float @llvm.aarch64.neon.vmulx.f32(float, float)
-declare double @llvm.aarch64.neon.vmulx.f64(double, double)
-
-define i32 @test_vqdmlalh_s16(i32 %a, i16 %b, i16 %c) {
-; CHECK: test_vqdmlalh_s16
-; CHECK: sqdmlal {{s[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
-entry:
- %vqdmlal.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vqdmlal1.i = insertelement <1 x i16> undef, i16 %b, i32 0
- %vqdmlal2.i = insertelement <1 x i16> undef, i16 %c, i32 0
- %vqdmlal3.i = call <1 x i32> @llvm.aarch64.neon.vqdmlal.v1i32(<1 x i32> %vqdmlal.i, <1 x i16> %vqdmlal1.i, <1 x i16> %vqdmlal2.i)
- %0 = extractelement <1 x i32> %vqdmlal3.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vqdmlals_s32(i64 %a, i32 %b, i32 %c) {
-; CHECK: test_vqdmlals_s32
-; CHECK: sqdmlal {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vqdmlal.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vqdmlal1.i = insertelement <1 x i32> undef, i32 %b, i32 0
- %vqdmlal2.i = insertelement <1 x i32> undef, i32 %c, i32 0
- %vqdmlal3.i = call <1 x i64> @llvm.aarch64.neon.vqdmlal.v1i64(<1 x i64> %vqdmlal.i, <1 x i32> %vqdmlal1.i, <1 x i32> %vqdmlal2.i)
- %0 = extractelement <1 x i64> %vqdmlal3.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vqdmlal.v1i32(<1 x i32>, <1 x i16>, <1 x i16>)
-declare <1 x i64> @llvm.aarch64.neon.vqdmlal.v1i64(<1 x i64>, <1 x i32>, <1 x i32>)
-
-define i32 @test_vqdmlslh_s16(i32 %a, i16 %b, i16 %c) {
-; CHECK: test_vqdmlslh_s16
-; CHECK: sqdmlsl {{s[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
-entry:
- %vqdmlsl.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vqdmlsl1.i = insertelement <1 x i16> undef, i16 %b, i32 0
- %vqdmlsl2.i = insertelement <1 x i16> undef, i16 %c, i32 0
- %vqdmlsl3.i = call <1 x i32> @llvm.aarch64.neon.vqdmlsl.v1i32(<1 x i32> %vqdmlsl.i, <1 x i16> %vqdmlsl1.i, <1 x i16> %vqdmlsl2.i)
- %0 = extractelement <1 x i32> %vqdmlsl3.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vqdmlsls_s32(i64 %a, i32 %b, i32 %c) {
-; CHECK: test_vqdmlsls_s32
-; CHECK: sqdmlsl {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vqdmlsl.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vqdmlsl1.i = insertelement <1 x i32> undef, i32 %b, i32 0
- %vqdmlsl2.i = insertelement <1 x i32> undef, i32 %c, i32 0
- %vqdmlsl3.i = call <1 x i64> @llvm.aarch64.neon.vqdmlsl.v1i64(<1 x i64> %vqdmlsl.i, <1 x i32> %vqdmlsl1.i, <1 x i32> %vqdmlsl2.i)
- %0 = extractelement <1 x i64> %vqdmlsl3.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vqdmlsl.v1i32(<1 x i32>, <1 x i16>, <1 x i16>)
-declare <1 x i64> @llvm.aarch64.neon.vqdmlsl.v1i64(<1 x i64>, <1 x i32>, <1 x i32>)
-
-define i32 @test_vqdmullh_s16(i16 %a, i16 %b) {
-; CHECK: test_vqdmullh_s16
-; CHECK: sqdmull {{s[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
-entry:
- %vqdmull.i = insertelement <1 x i16> undef, i16 %a, i32 0
- %vqdmull1.i = insertelement <1 x i16> undef, i16 %b, i32 0
- %vqdmull2.i = call <1 x i32> @llvm.arm.neon.vqdmull.v1i32(<1 x i16> %vqdmull.i, <1 x i16> %vqdmull1.i)
- %0 = extractelement <1 x i32> %vqdmull2.i, i32 0
- ret i32 %0
-}
-
-define i64 @test_vqdmulls_s32(i32 %a, i32 %b) {
-; CHECK: test_vqdmulls_s32
-; CHECK: sqdmull {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vqdmull.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vqdmull1.i = insertelement <1 x i32> undef, i32 %b, i32 0
- %vqdmull2.i = call <1 x i64> @llvm.arm.neon.vqdmull.v1i64(<1 x i32> %vqdmull.i, <1 x i32> %vqdmull1.i)
- %0 = extractelement <1 x i64> %vqdmull2.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i32> @llvm.arm.neon.vqdmull.v1i32(<1 x i16>, <1 x i16>)
-declare <1 x i64> @llvm.arm.neon.vqdmull.v1i64(<1 x i32>, <1 x i32>)
diff --git a/test/CodeGen/AArch64/neon-scalar-neg.ll b/test/CodeGen/AArch64/neon-scalar-neg.ll
deleted file mode 100644
index 4dc9d519783d..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-neg.ll
+++ /dev/null
@@ -1,61 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define i64 @test_vnegd_s64(i64 %a) {
-; CHECK: test_vnegd_s64
-; CHECK: neg {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vneg.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vneg1.i = tail call <1 x i64> @llvm.aarch64.neon.vneg(<1 x i64> %vneg.i)
- %0 = extractelement <1 x i64> %vneg1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vneg(<1 x i64>)
-
-define i8 @test_vqnegb_s8(i8 %a) {
-; CHECK: test_vqnegb_s8
-; CHECK: sqneg {{b[0-9]+}}, {{b[0-9]+}}
-entry:
- %vqneg.i = insertelement <1 x i8> undef, i8 %a, i32 0
- %vqneg1.i = call <1 x i8> @llvm.arm.neon.vqneg.v1i8(<1 x i8> %vqneg.i)
- %0 = extractelement <1 x i8> %vqneg1.i, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.arm.neon.vqneg.v1i8(<1 x i8>)
-
-define i16 @test_vqnegh_s16(i16 %a) {
-; CHECK: test_vqnegh_s16
-; CHECK: sqneg {{h[0-9]+}}, {{h[0-9]+}}
-entry:
- %vqneg.i = insertelement <1 x i16> undef, i16 %a, i32 0
- %vqneg1.i = call <1 x i16> @llvm.arm.neon.vqneg.v1i16(<1 x i16> %vqneg.i)
- %0 = extractelement <1 x i16> %vqneg1.i, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.arm.neon.vqneg.v1i16(<1 x i16>)
-
-define i32 @test_vqnegs_s32(i32 %a) {
-; CHECK: test_vqnegs_s32
-; CHECK: sqneg {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vqneg.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vqneg1.i = call <1 x i32> @llvm.arm.neon.vqneg.v1i32(<1 x i32> %vqneg.i)
- %0 = extractelement <1 x i32> %vqneg1.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.arm.neon.vqneg.v1i32(<1 x i32>)
-
-define i64 @test_vqnegd_s64(i64 %a) {
-; CHECK: test_vqnegd_s64
-; CHECK: sqneg {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vqneg.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vqneg1.i = call <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64> %vqneg.i)
- %0 = extractelement <1 x i64> %vqneg1.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64>) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-scalar-recip.ll b/test/CodeGen/AArch64/neon-scalar-recip.ll
deleted file mode 100644
index f21c27bee435..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-recip.ll
+++ /dev/null
@@ -1,116 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-define float @test_vrecpss_f32(float %a, float %b) {
-; CHECK: test_vrecpss_f32
-; CHECK: frecps {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
- %1 = insertelement <1 x float> undef, float %a, i32 0
- %2 = insertelement <1 x float> undef, float %b, i32 0
- %3 = call <1 x float> @llvm.arm.neon.vrecps.v1f32(<1 x float> %1, <1 x float> %2)
- %4 = extractelement <1 x float> %3, i32 0
- ret float %4
-}
-
-define double @test_vrecpsd_f64(double %a, double %b) {
-; CHECK: test_vrecpsd_f64
-; CHECK: frecps {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
- %1 = insertelement <1 x double> undef, double %a, i32 0
- %2 = insertelement <1 x double> undef, double %b, i32 0
- %3 = call <1 x double> @llvm.arm.neon.vrecps.v1f64(<1 x double> %1, <1 x double> %2)
- %4 = extractelement <1 x double> %3, i32 0
- ret double %4
-}
-
-declare <1 x float> @llvm.arm.neon.vrecps.v1f32(<1 x float>, <1 x float>)
-declare <1 x double> @llvm.arm.neon.vrecps.v1f64(<1 x double>, <1 x double>)
-
-define float @test_vrsqrtss_f32(float %a, float %b) {
-; CHECK: test_vrsqrtss_f32
-; CHECK: frsqrts {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
- %1 = insertelement <1 x float> undef, float %a, i32 0
- %2 = insertelement <1 x float> undef, float %b, i32 0
- %3 = call <1 x float> @llvm.arm.neon.vrsqrts.v1f32(<1 x float> %1, <1 x float> %2)
- %4 = extractelement <1 x float> %3, i32 0
- ret float %4
-}
-
-define double @test_vrsqrtsd_f64(double %a, double %b) {
-; CHECK: test_vrsqrtsd_f64
-; CHECK: frsqrts {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
- %1 = insertelement <1 x double> undef, double %a, i32 0
- %2 = insertelement <1 x double> undef, double %b, i32 0
- %3 = call <1 x double> @llvm.arm.neon.vrsqrts.v1f64(<1 x double> %1, <1 x double> %2)
- %4 = extractelement <1 x double> %3, i32 0
- ret double %4
-}
-
-declare <1 x float> @llvm.arm.neon.vrsqrts.v1f32(<1 x float>, <1 x float>)
-declare <1 x double> @llvm.arm.neon.vrsqrts.v1f64(<1 x double>, <1 x double>)
-
-define float @test_vrecpes_f32(float %a) {
-; CHECK: test_vrecpes_f32
-; CHECK: frecpe {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vrecpe.i = insertelement <1 x float> undef, float %a, i32 0
- %vrecpe1.i = tail call <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float> %vrecpe.i)
- %0 = extractelement <1 x float> %vrecpe1.i, i32 0
- ret float %0
-}
-
-define double @test_vrecped_f64(double %a) {
-; CHECK: test_vrecped_f64
-; CHECK: frecpe {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vrecpe.i = insertelement <1 x double> undef, double %a, i32 0
- %vrecpe1.i = tail call <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double> %vrecpe.i)
- %0 = extractelement <1 x double> %vrecpe1.i, i32 0
- ret double %0
-}
-
-declare <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float>)
-declare <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double>)
-
-define float @test_vrecpxs_f32(float %a) {
-; CHECK: test_vrecpxs_f32
-; CHECK: frecpx {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vrecpx.i = insertelement <1 x float> undef, float %a, i32 0
- %vrecpx1.i = tail call <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float> %vrecpx.i)
- %0 = extractelement <1 x float> %vrecpx1.i, i32 0
- ret float %0
-}
-
-define double @test_vrecpxd_f64(double %a) {
-; CHECK: test_vrecpxd_f64
-; CHECK: frecpx {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vrecpx.i = insertelement <1 x double> undef, double %a, i32 0
- %vrecpx1.i = tail call <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double> %vrecpx.i)
- %0 = extractelement <1 x double> %vrecpx1.i, i32 0
- ret double %0
-}
-
-declare <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float>)
-declare <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double>)
-
-define float @test_vrsqrtes_f32(float %a) {
-; CHECK: test_vrsqrtes_f32
-; CHECK: frsqrte {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vrsqrte.i = insertelement <1 x float> undef, float %a, i32 0
- %vrsqrte1.i = tail call <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float> %vrsqrte.i)
- %0 = extractelement <1 x float> %vrsqrte1.i, i32 0
- ret float %0
-}
-
-define double @test_vrsqrted_f64(double %a) {
-; CHECK: test_vrsqrted_f64
-; CHECK: frsqrte {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vrsqrte.i = insertelement <1 x double> undef, double %a, i32 0
- %vrsqrte1.i = tail call <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double> %vrsqrte.i)
- %0 = extractelement <1 x double> %vrsqrte1.i, i32 0
- ret double %0
-}
-
-declare <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float>)
-declare <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double>)
diff --git a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
deleted file mode 100644
index 80e8dc339d68..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
+++ /dev/null
@@ -1,247 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <1 x i64> @llvm.aarch64.neon.vpadd(<2 x i64>)
-
-define <1 x i64> @test_addp_v1i64(<2 x i64> %a) {
-; CHECK: test_addp_v1i64:
- %val = call <1 x i64> @llvm.aarch64.neon.vpadd(<2 x i64> %a)
-; CHECK: addp d0, v0.2d
- ret <1 x i64> %val
-}
-
-declare <1 x float> @llvm.aarch64.neon.vpfadd(<2 x float>)
-
-define <1 x float> @test_faddp_v1f32(<2 x float> %a) {
-; CHECK: test_faddp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpfadd(<2 x float> %a)
-; CHECK: faddp s0, v0.2s
- ret <1 x float> %val
-}
-
-declare <1 x double> @llvm.aarch64.neon.vpfaddq(<2 x double>)
-
-define <1 x double> @test_faddp_v1f64(<2 x double> %a) {
-; CHECK: test_faddp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpfaddq(<2 x double> %a)
-; CHECK: faddp d0, v0.2d
- ret <1 x double> %val
-}
-
-
-declare <1 x float> @llvm.aarch64.neon.vpmax(<2 x float>)
-
-define <1 x float> @test_fmaxp_v1f32(<2 x float> %a) {
-; CHECK: test_fmaxp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpmax(<2 x float> %a)
-; CHECK: fmaxp s0, v0.2s
- ret <1 x float> %val
-}
-
-declare <1 x double> @llvm.aarch64.neon.vpmaxq(<2 x double>)
-
-define <1 x double> @test_fmaxp_v1f64(<2 x double> %a) {
-; CHECK: test_fmaxp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpmaxq(<2 x double> %a)
-; CHECK: fmaxp d0, v0.2d
- ret <1 x double> %val
-}
-
-
-declare <1 x float> @llvm.aarch64.neon.vpmin(<2 x float>)
-
-define <1 x float> @test_fminp_v1f32(<2 x float> %a) {
-; CHECK: test_fminp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpmin(<2 x float> %a)
-; CHECK: fminp s0, v0.2s
- ret <1 x float> %val
-}
-
-declare <1 x double> @llvm.aarch64.neon.vpminq(<2 x double>)
-
-define <1 x double> @test_fminp_v1f64(<2 x double> %a) {
-; CHECK: test_fminp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpminq(<2 x double> %a)
-; CHECK: fminp d0, v0.2d
- ret <1 x double> %val
-}
-
-declare <1 x float> @llvm.aarch64.neon.vpfmaxnm(<2 x float>)
-
-define <1 x float> @test_fmaxnmp_v1f32(<2 x float> %a) {
-; CHECK: test_fmaxnmp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpfmaxnm(<2 x float> %a)
-; CHECK: fmaxnmp s0, v0.2s
- ret <1 x float> %val
-}
-
-declare <1 x double> @llvm.aarch64.neon.vpfmaxnmq(<2 x double>)
-
-define <1 x double> @test_fmaxnmp_v1f64(<2 x double> %a) {
-; CHECK: test_fmaxnmp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpfmaxnmq(<2 x double> %a)
-; CHECK: fmaxnmp d0, v0.2d
- ret <1 x double> %val
-}
-
-declare <1 x float> @llvm.aarch64.neon.vpfminnm(<2 x float>)
-
-define <1 x float> @test_fminnmp_v1f32(<2 x float> %a) {
-; CHECK: test_fminnmp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpfminnm(<2 x float> %a)
-; CHECK: fminnmp s0, v0.2s
- ret <1 x float> %val
-}
-
-declare <1 x double> @llvm.aarch64.neon.vpfminnmq(<2 x double>)
-
-define <1 x double> @test_fminnmp_v1f64(<2 x double> %a) {
-; CHECK: test_fminnmp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpfminnmq(<2 x double> %a)
-; CHECK: fminnmp d0, v0.2d
- ret <1 x double> %val
-}
-
-define float @test_vaddv_f32(<2 x float> %a) {
-; CHECK-LABEL: test_vaddv_f32
-; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
-}
-
-define float @test_vaddvq_f32(<4 x float> %a) {
-; CHECK-LABEL: test_vaddvq_f32
-; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
-}
-
-define double @test_vaddvq_f64(<2 x double> %a) {
-; CHECK-LABEL: test_vaddvq_f64
-; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
-}
-
-define float @test_vmaxv_f32(<2 x float> %a) {
-; CHECK-LABEL: test_vmaxv_f32
-; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
-}
-
-define double @test_vmaxvq_f64(<2 x double> %a) {
-; CHECK-LABEL: test_vmaxvq_f64
-; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
-}
-
-define float @test_vminv_f32(<2 x float> %a) {
-; CHECK-LABEL: test_vminv_f32
-; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
-}
-
-define double @test_vminvq_f64(<2 x double> %a) {
-; CHECK-LABEL: test_vminvq_f64
-; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
-}
-
-define double @test_vmaxnmvq_f64(<2 x double> %a) {
-; CHECK-LABEL: test_vmaxnmvq_f64
-; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
-}
-
-define float @test_vmaxnmv_f32(<2 x float> %a) {
-; CHECK-LABEL: test_vmaxnmv_f32
-; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
-}
-
-define double @test_vminnmvq_f64(<2 x double> %a) {
-; CHECK-LABEL: test_vminnmvq_f64
-; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
-}
-
-define float @test_vminnmv_f32(<2 x float> %a) {
-; CHECK-LABEL: test_vminnmv_f32
-; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
-}
-
-define <2 x i64> @test_vpaddq_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-LABEL: test_vpaddq_s64
-; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
- %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
- ret <2 x i64> %1
-}
-
-define <2 x i64> @test_vpaddq_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-LABEL: test_vpaddq_u64
-; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
- %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
- ret <2 x i64> %1
-}
-
-define i64 @test_vaddvq_s64(<2 x i64> %a) {
-; CHECK-LABEL: test_vaddvq_s64
-; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
- %2 = extractelement <1 x i64> %1, i32 0
- ret i64 %2
-}
-
-define i64 @test_vaddvq_u64(<2 x i64> %a) {
-; CHECK-LABEL: test_vaddvq_u64
-; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
- %2 = extractelement <1 x i64> %1, i32 0
- ret i64 %2
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>)
-
-declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
-
-declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double>)
-
-declare <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float>)
-
-declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float>) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-scalar-rounding-shift.ll b/test/CodeGen/AArch64/neon-scalar-rounding-shift.ll
deleted file mode 100644
index 83ceb4ebdad5..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-rounding-shift.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-
-declare <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_urshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_urshl_v1i64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: urshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_srshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_srshl_v1i64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: srshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vrshldu(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vrshlds(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_urshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_urshl_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vrshldu(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: urshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_srshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_srshl_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vrshlds(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: srshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-
-
diff --git a/test/CodeGen/AArch64/neon-scalar-saturating-add-sub.ll b/test/CodeGen/AArch64/neon-scalar-saturating-add-sub.ll
deleted file mode 100644
index bd66f80cebb6..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-saturating-add-sub.ll
+++ /dev/null
@@ -1,242 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <1 x i8> @llvm.arm.neon.vqaddu.v1i8(<1 x i8>, <1 x i8>)
-declare <1 x i8> @llvm.arm.neon.vqadds.v1i8(<1 x i8>, <1 x i8>)
-
-define <1 x i8> @test_uqadd_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
-; CHECK: test_uqadd_v1i8_aarch64:
- %tmp1 = call <1 x i8> @llvm.arm.neon.vqaddu.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
-;CHECK: uqadd {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
- ret <1 x i8> %tmp1
-}
-
-define <1 x i8> @test_sqadd_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
-; CHECK: test_sqadd_v1i8_aarch64:
- %tmp1 = call <1 x i8> @llvm.arm.neon.vqadds.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
-;CHECK: sqadd {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
- ret <1 x i8> %tmp1
-}
-
-declare <1 x i8> @llvm.arm.neon.vqsubu.v1i8(<1 x i8>, <1 x i8>)
-declare <1 x i8> @llvm.arm.neon.vqsubs.v1i8(<1 x i8>, <1 x i8>)
-
-define <1 x i8> @test_uqsub_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
-; CHECK: test_uqsub_v1i8_aarch64:
- %tmp1 = call <1 x i8> @llvm.arm.neon.vqsubu.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
-;CHECK: uqsub {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
- ret <1 x i8> %tmp1
-}
-
-define <1 x i8> @test_sqsub_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
-; CHECK: test_sqsub_v1i8_aarch64:
- %tmp1 = call <1 x i8> @llvm.arm.neon.vqsubs.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
-;CHECK: sqsub {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
- ret <1 x i8> %tmp1
-}
-
-declare <1 x i16> @llvm.arm.neon.vqaddu.v1i16(<1 x i16>, <1 x i16>)
-declare <1 x i16> @llvm.arm.neon.vqadds.v1i16(<1 x i16>, <1 x i16>)
-
-define <1 x i16> @test_uqadd_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
-; CHECK: test_uqadd_v1i16_aarch64:
- %tmp1 = call <1 x i16> @llvm.arm.neon.vqaddu.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
-;CHECK: uqadd {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
- ret <1 x i16> %tmp1
-}
-
-define <1 x i16> @test_sqadd_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
-; CHECK: test_sqadd_v1i16_aarch64:
- %tmp1 = call <1 x i16> @llvm.arm.neon.vqadds.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
-;CHECK: sqadd {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
- ret <1 x i16> %tmp1
-}
-
-declare <1 x i16> @llvm.arm.neon.vqsubu.v1i16(<1 x i16>, <1 x i16>)
-declare <1 x i16> @llvm.arm.neon.vqsubs.v1i16(<1 x i16>, <1 x i16>)
-
-define <1 x i16> @test_uqsub_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
-; CHECK: test_uqsub_v1i16_aarch64:
- %tmp1 = call <1 x i16> @llvm.arm.neon.vqsubu.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
-;CHECK: uqsub {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
- ret <1 x i16> %tmp1
-}
-
-define <1 x i16> @test_sqsub_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
-; CHECK: test_sqsub_v1i16_aarch64:
- %tmp1 = call <1 x i16> @llvm.arm.neon.vqsubs.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
-;CHECK: sqsub {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
- ret <1 x i16> %tmp1
-}
-
-declare <1 x i32> @llvm.arm.neon.vqaddu.v1i32(<1 x i32>, <1 x i32>)
-declare <1 x i32> @llvm.arm.neon.vqadds.v1i32(<1 x i32>, <1 x i32>)
-
-define <1 x i32> @test_uqadd_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
-; CHECK: test_uqadd_v1i32_aarch64:
- %tmp1 = call <1 x i32> @llvm.arm.neon.vqaddu.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
-;CHECK: uqadd {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
- ret <1 x i32> %tmp1
-}
-
-define <1 x i32> @test_sqadd_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
-; CHECK: test_sqadd_v1i32_aarch64:
- %tmp1 = call <1 x i32> @llvm.arm.neon.vqadds.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
-;CHECK: sqadd {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
- ret <1 x i32> %tmp1
-}
-
-declare <1 x i32> @llvm.arm.neon.vqsubu.v1i32(<1 x i32>, <1 x i32>)
-declare <1 x i32> @llvm.arm.neon.vqsubs.v1i32(<1 x i32>, <1 x i32>)
-
-define <1 x i32> @test_uqsub_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
-; CHECK: test_uqsub_v1i32_aarch64:
- %tmp1 = call <1 x i32> @llvm.arm.neon.vqsubu.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
-;CHECK: uqsub {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
- ret <1 x i32> %tmp1
-}
-
-
-define <1 x i32> @test_sqsub_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
-; CHECK: test_sqsub_v1i32_aarch64:
- %tmp1 = call <1 x i32> @llvm.arm.neon.vqsubs.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
-;CHECK: sqsub {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
- ret <1 x i32> %tmp1
-}
-
-declare <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_uqadd_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_uqadd_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: uqadd {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_sqadd_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sqadd_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: sqadd {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-declare <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_uqsub_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_uqsub_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: uqsub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_sqsub_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sqsub_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: sqsub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define i8 @test_vuqaddb_s8(i8 %a, i8 %b) {
-; CHECK: test_vuqaddb_s8
-; CHECK: suqadd {{b[0-9]+}}, {{b[0-9]+}}
-entry:
- %vuqadd.i = insertelement <1 x i8> undef, i8 %a, i32 0
- %vuqadd1.i = insertelement <1 x i8> undef, i8 %b, i32 0
- %vuqadd2.i = call <1 x i8> @llvm.aarch64.neon.vuqadd.v1i8(<1 x i8> %vuqadd.i, <1 x i8> %vuqadd1.i)
- %0 = extractelement <1 x i8> %vuqadd2.i, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vsqadd.v1i8(<1 x i8>, <1 x i8>)
-
-define i16 @test_vuqaddh_s16(i16 %a, i16 %b) {
-; CHECK: test_vuqaddh_s16
-; CHECK: suqadd {{h[0-9]+}}, {{h[0-9]+}}
-entry:
- %vuqadd.i = insertelement <1 x i16> undef, i16 %a, i32 0
- %vuqadd1.i = insertelement <1 x i16> undef, i16 %b, i32 0
- %vuqadd2.i = call <1 x i16> @llvm.aarch64.neon.vuqadd.v1i16(<1 x i16> %vuqadd.i, <1 x i16> %vuqadd1.i)
- %0 = extractelement <1 x i16> %vuqadd2.i, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vsqadd.v1i16(<1 x i16>, <1 x i16>)
-
-define i32 @test_vuqadds_s32(i32 %a, i32 %b) {
-; CHECK: test_vuqadds_s32
-; CHECK: suqadd {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vuqadd.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vuqadd1.i = insertelement <1 x i32> undef, i32 %b, i32 0
- %vuqadd2.i = call <1 x i32> @llvm.aarch64.neon.vuqadd.v1i32(<1 x i32> %vuqadd.i, <1 x i32> %vuqadd1.i)
- %0 = extractelement <1 x i32> %vuqadd2.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vsqadd.v1i32(<1 x i32>, <1 x i32>)
-
-define i64 @test_vuqaddd_s64(i64 %a, i64 %b) {
-; CHECK: test_vuqaddd_s64
-; CHECK: suqadd {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vuqadd.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vuqadd1.i = insertelement <1 x i64> undef, i64 %b, i32 0
- %vuqadd2.i = call <1 x i64> @llvm.aarch64.neon.vuqadd.v1i64(<1 x i64> %vuqadd.i, <1 x i64> %vuqadd1.i)
- %0 = extractelement <1 x i64> %vuqadd2.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vsqadd.v1i64(<1 x i64>, <1 x i64>)
-
-define i8 @test_vsqaddb_u8(i8 %a, i8 %b) {
-; CHECK: test_vsqaddb_u8
-; CHECK: usqadd {{b[0-9]+}}, {{b[0-9]+}}
-entry:
- %vsqadd.i = insertelement <1 x i8> undef, i8 %a, i32 0
- %vsqadd1.i = insertelement <1 x i8> undef, i8 %b, i32 0
- %vsqadd2.i = call <1 x i8> @llvm.aarch64.neon.vsqadd.v1i8(<1 x i8> %vsqadd.i, <1 x i8> %vsqadd1.i)
- %0 = extractelement <1 x i8> %vsqadd2.i, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vuqadd.v1i8(<1 x i8>, <1 x i8>)
-
-define i16 @test_vsqaddh_u16(i16 %a, i16 %b) {
-; CHECK: test_vsqaddh_u16
-; CHECK: usqadd {{h[0-9]+}}, {{h[0-9]+}}
-entry:
- %vsqadd.i = insertelement <1 x i16> undef, i16 %a, i32 0
- %vsqadd1.i = insertelement <1 x i16> undef, i16 %b, i32 0
- %vsqadd2.i = call <1 x i16> @llvm.aarch64.neon.vsqadd.v1i16(<1 x i16> %vsqadd.i, <1 x i16> %vsqadd1.i)
- %0 = extractelement <1 x i16> %vsqadd2.i, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vuqadd.v1i16(<1 x i16>, <1 x i16>)
-
-define i32 @test_vsqadds_u32(i32 %a, i32 %b) {
-; CHECK: test_vsqadds_u32
-; CHECK: usqadd {{s[0-9]+}}, {{s[0-9]+}}
-entry:
- %vsqadd.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vsqadd1.i = insertelement <1 x i32> undef, i32 %b, i32 0
- %vsqadd2.i = call <1 x i32> @llvm.aarch64.neon.vsqadd.v1i32(<1 x i32> %vsqadd.i, <1 x i32> %vsqadd1.i)
- %0 = extractelement <1 x i32> %vsqadd2.i, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vuqadd.v1i32(<1 x i32>, <1 x i32>)
-
-define i64 @test_vsqaddd_u64(i64 %a, i64 %b) {
-; CHECK: test_vsqaddd_u64
-; CHECK: usqadd {{d[0-9]+}}, {{d[0-9]+}}
-entry:
- %vsqadd.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsqadd1.i = insertelement <1 x i64> undef, i64 %b, i32 0
- %vsqadd2.i = call <1 x i64> @llvm.aarch64.neon.vsqadd.v1i64(<1 x i64> %vsqadd.i, <1 x i64> %vsqadd1.i)
- %0 = extractelement <1 x i64> %vsqadd2.i, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vuqadd.v1i64(<1 x i64>, <1 x i64>)
diff --git a/test/CodeGen/AArch64/neon-scalar-saturating-rounding-shift.ll b/test/CodeGen/AArch64/neon-scalar-saturating-rounding-shift.ll
deleted file mode 100644
index 0fd67dfa901c..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-saturating-rounding-shift.ll
+++ /dev/null
@@ -1,94 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_uqrshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_uqrshl_v1i64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: uqrshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
-
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_sqrshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sqrshl_v1i64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: sqrshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vqrshlu.v1i8(<1 x i8>, <1 x i8>)
-declare <1 x i8> @llvm.aarch64.neon.vqrshls.v1i8(<1 x i8>, <1 x i8>)
-
-define <1 x i8> @test_uqrshl_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
-; CHECK: test_uqrshl_v1i8_aarch64:
- %tmp1 = call <1 x i8> @llvm.aarch64.neon.vqrshlu.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
-;CHECK: uqrshl {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
-
- ret <1 x i8> %tmp1
-}
-
-define <1 x i8> @test_sqrshl_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
-; CHECK: test_sqrshl_v1i8_aarch64:
- %tmp1 = call <1 x i8> @llvm.aarch64.neon.vqrshls.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
-;CHECK: sqrshl {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
- ret <1 x i8> %tmp1
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vqrshlu.v1i16(<1 x i16>, <1 x i16>)
-declare <1 x i16> @llvm.aarch64.neon.vqrshls.v1i16(<1 x i16>, <1 x i16>)
-
-define <1 x i16> @test_uqrshl_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
-; CHECK: test_uqrshl_v1i16_aarch64:
- %tmp1 = call <1 x i16> @llvm.aarch64.neon.vqrshlu.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
-;CHECK: uqrshl {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
-
- ret <1 x i16> %tmp1
-}
-
-define <1 x i16> @test_sqrshl_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
-; CHECK: test_sqrshl_v1i16_aarch64:
- %tmp1 = call <1 x i16> @llvm.aarch64.neon.vqrshls.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
-;CHECK: sqrshl {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
- ret <1 x i16> %tmp1
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vqrshlu.v1i32(<1 x i32>, <1 x i32>)
-declare <1 x i32> @llvm.aarch64.neon.vqrshls.v1i32(<1 x i32>, <1 x i32>)
-
-define <1 x i32> @test_uqrshl_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
-; CHECK: test_uqrshl_v1i32_aarch64:
- %tmp1 = call <1 x i32> @llvm.aarch64.neon.vqrshlu.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
-;CHECK: uqrshl {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
-
- ret <1 x i32> %tmp1
-}
-
-define <1 x i32> @test_sqrshl_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
-; CHECK: test_sqrshl_v1i32_aarch64:
- %tmp1 = call <1 x i32> @llvm.aarch64.neon.vqrshls.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
-;CHECK: sqrshl {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
- ret <1 x i32> %tmp1
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vqrshlu.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vqrshls.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_uqrshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_uqrshl_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vqrshlu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: uqrshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
-
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_sqrshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sqrshl_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vqrshls.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: sqrshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-
-
diff --git a/test/CodeGen/AArch64/neon-scalar-saturating-shift.ll b/test/CodeGen/AArch64/neon-scalar-saturating-shift.ll
deleted file mode 100644
index 8fdea24a36d7..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-saturating-shift.ll
+++ /dev/null
@@ -1,88 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
-
-declare <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_uqshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_uqshl_v1i64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: uqshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_sqshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sqshl_v1i64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: sqshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vqshlu.v1i8(<1 x i8>, <1 x i8>)
-declare <1 x i8> @llvm.aarch64.neon.vqshls.v1i8(<1 x i8>, <1 x i8>)
-
-define <1 x i8> @test_uqshl_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
-; CHECK: test_uqshl_v1i8_aarch64:
- %tmp1 = call <1 x i8> @llvm.aarch64.neon.vqshlu.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
-;CHECK: uqshl {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
- ret <1 x i8> %tmp1
-}
-
-define <1 x i8> @test_sqshl_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
-; CHECK: test_sqshl_v1i8_aarch64:
- %tmp1 = call <1 x i8> @llvm.aarch64.neon.vqshls.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
-;CHECK: sqshl {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
- ret <1 x i8> %tmp1
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vqshlu.v1i16(<1 x i16>, <1 x i16>)
-declare <1 x i16> @llvm.aarch64.neon.vqshls.v1i16(<1 x i16>, <1 x i16>)
-
-define <1 x i16> @test_uqshl_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
-; CHECK: test_uqshl_v1i16_aarch64:
- %tmp1 = call <1 x i16> @llvm.aarch64.neon.vqshlu.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
-;CHECK: uqshl {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
- ret <1 x i16> %tmp1
-}
-
-define <1 x i16> @test_sqshl_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
-; CHECK: test_sqshl_v1i16_aarch64:
- %tmp1 = call <1 x i16> @llvm.aarch64.neon.vqshls.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
-;CHECK: sqshl {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
- ret <1 x i16> %tmp1
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vqshlu.v1i32(<1 x i32>, <1 x i32>)
-declare <1 x i32> @llvm.aarch64.neon.vqshls.v1i32(<1 x i32>, <1 x i32>)
-
-define <1 x i32> @test_uqshl_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
-; CHECK: test_uqshl_v1i32_aarch64:
- %tmp1 = call <1 x i32> @llvm.aarch64.neon.vqshlu.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
-;CHECK: uqshl {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
- ret <1 x i32> %tmp1
-}
-
-define <1 x i32> @test_sqshl_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
-; CHECK: test_sqshl_v1i32_aarch64:
- %tmp1 = call <1 x i32> @llvm.aarch64.neon.vqshls.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
-;CHECK: sqshl {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
- ret <1 x i32> %tmp1
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vqshlu.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vqshls.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_uqshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_uqshl_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vqshlu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: uqshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_sqshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sqshl_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vqshls.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-;CHECK: sqshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-
diff --git a/test/CodeGen/AArch64/neon-scalar-shift-imm.ll b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
deleted file mode 100644
index 62243618171a..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
+++ /dev/null
@@ -1,531 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define i64 @test_vshrd_n_s64(i64 %a) {
-; CHECK: test_vshrd_n_s64
-; CHECK: sshr {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsshr = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsshr1 = call <1 x i64> @llvm.aarch64.neon.vshrds.n(<1 x i64> %vsshr, i32 63)
- %0 = extractelement <1 x i64> %vsshr1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vshrds.n(<1 x i64>, i32)
-
-define i64 @test_vshrd_n_u64(i64 %a) {
-; CHECK: test_vshrd_n_u64
-; CHECK: ushr {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vushr = insertelement <1 x i64> undef, i64 %a, i32 0
- %vushr1 = call <1 x i64> @llvm.aarch64.neon.vshrdu.n(<1 x i64> %vushr, i32 63)
- %0 = extractelement <1 x i64> %vushr1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vshrdu.n(<1 x i64>, i32)
-
-define i64 @test_vrshrd_n_s64(i64 %a) {
-; CHECK: test_vrshrd_n_s64
-; CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsrshr = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsrshr1 = call <1 x i64> @llvm.aarch64.neon.vsrshr.v1i64(<1 x i64> %vsrshr, i32 63)
- %0 = extractelement <1 x i64> %vsrshr1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vsrshr.v1i64(<1 x i64>, i32)
-
-define i64 @test_vrshrd_n_u64(i64 %a) {
-; CHECK: test_vrshrd_n_u64
-; CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vurshr = insertelement <1 x i64> undef, i64 %a, i32 0
- %vurshr1 = call <1 x i64> @llvm.aarch64.neon.vurshr.v1i64(<1 x i64> %vurshr, i32 63)
- %0 = extractelement <1 x i64> %vurshr1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vurshr.v1i64(<1 x i64>, i32)
-
-define i64 @test_vsrad_n_s64(i64 %a, i64 %b) {
-; CHECK: test_vsrad_n_s64
-; CHECK: ssra {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vssra = insertelement <1 x i64> undef, i64 %a, i32 0
- %vssra1 = insertelement <1 x i64> undef, i64 %b, i32 0
- %vssra2 = call <1 x i64> @llvm.aarch64.neon.vsrads.n(<1 x i64> %vssra, <1 x i64> %vssra1, i32 63)
- %0 = extractelement <1 x i64> %vssra2, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vsrads.n(<1 x i64>, <1 x i64>, i32)
-
-define i64 @test_vsrad_n_u64(i64 %a, i64 %b) {
-; CHECK: test_vsrad_n_u64
-; CHECK: usra {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vusra = insertelement <1 x i64> undef, i64 %a, i32 0
- %vusra1 = insertelement <1 x i64> undef, i64 %b, i32 0
- %vusra2 = call <1 x i64> @llvm.aarch64.neon.vsradu.n(<1 x i64> %vusra, <1 x i64> %vusra1, i32 63)
- %0 = extractelement <1 x i64> %vusra2, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vsradu.n(<1 x i64>, <1 x i64>, i32)
-
-define i64 @test_vrsrad_n_s64(i64 %a, i64 %b) {
-; CHECK: test_vrsrad_n_s64
-; CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsrsra = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsrsra1 = insertelement <1 x i64> undef, i64 %b, i32 0
- %vsrsra2 = call <1 x i64> @llvm.aarch64.neon.vrsrads.n(<1 x i64> %vsrsra, <1 x i64> %vsrsra1, i32 63)
- %0 = extractelement <1 x i64> %vsrsra2, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vrsrads.n(<1 x i64>, <1 x i64>, i32)
-
-define i64 @test_vrsrad_n_u64(i64 %a, i64 %b) {
-; CHECK: test_vrsrad_n_u64
-; CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vursra = insertelement <1 x i64> undef, i64 %a, i32 0
- %vursra1 = insertelement <1 x i64> undef, i64 %b, i32 0
- %vursra2 = call <1 x i64> @llvm.aarch64.neon.vrsradu.n(<1 x i64> %vursra, <1 x i64> %vursra1, i32 63)
- %0 = extractelement <1 x i64> %vursra2, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vrsradu.n(<1 x i64>, <1 x i64>, i32)
-
-define i64 @test_vshld_n_s64(i64 %a) {
-; CHECK: test_vshld_n_s64
-; CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vshl = insertelement <1 x i64> undef, i64 %a, i32 0
- %vshl1 = call <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64> %vshl, i32 63)
- %0 = extractelement <1 x i64> %vshl1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64>, i32)
-
-define i64 @test_vshld_n_u64(i64 %a) {
-; CHECK: test_vshld_n_u64
-; CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vshl = insertelement <1 x i64> undef, i64 %a, i32 0
- %vshl1 = call <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64> %vshl, i32 63)
- %0 = extractelement <1 x i64> %vshl1, i32 0
- ret i64 %0
-}
-
-define i8 @test_vqshlb_n_s8(i8 %a) {
-; CHECK: test_vqshlb_n_s8
-; CHECK: sqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
-entry:
- %vsqshl = insertelement <1 x i8> undef, i8 %a, i32 0
- %vsqshl1 = call <1 x i8> @llvm.aarch64.neon.vqshls.n.v1i8(<1 x i8> %vsqshl, i32 7)
- %0 = extractelement <1 x i8> %vsqshl1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vqshls.n.v1i8(<1 x i8>, i32)
-
-define i16 @test_vqshlh_n_s16(i16 %a) {
-; CHECK: test_vqshlh_n_s16
-; CHECK: sqshl {{h[0-9]+}}, {{h[0-9]+}}, #15
-entry:
- %vsqshl = insertelement <1 x i16> undef, i16 %a, i32 0
- %vsqshl1 = call <1 x i16> @llvm.aarch64.neon.vqshls.n.v1i16(<1 x i16> %vsqshl, i32 15)
- %0 = extractelement <1 x i16> %vsqshl1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vqshls.n.v1i16(<1 x i16>, i32)
-
-define i32 @test_vqshls_n_s32(i32 %a) {
-; CHECK: test_vqshls_n_s32
-; CHECK: sqshl {{s[0-9]+}}, {{s[0-9]+}}, #31
-entry:
- %vsqshl = insertelement <1 x i32> undef, i32 %a, i32 0
- %vsqshl1 = call <1 x i32> @llvm.aarch64.neon.vqshls.n.v1i32(<1 x i32> %vsqshl, i32 31)
- %0 = extractelement <1 x i32> %vsqshl1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vqshls.n.v1i32(<1 x i32>, i32)
-
-define i64 @test_vqshld_n_s64(i64 %a) {
-; CHECK: test_vqshld_n_s64
-; CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsqshl = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsqshl1 = call <1 x i64> @llvm.aarch64.neon.vqshls.n.v1i64(<1 x i64> %vsqshl, i32 63)
- %0 = extractelement <1 x i64> %vsqshl1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vqshls.n.v1i64(<1 x i64>, i32)
-
-define i8 @test_vqshlb_n_u8(i8 %a) {
-; CHECK: test_vqshlb_n_u8
-; CHECK: uqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
-entry:
- %vuqshl = insertelement <1 x i8> undef, i8 %a, i32 0
- %vuqshl1 = call <1 x i8> @llvm.aarch64.neon.vqshlu.n.v1i8(<1 x i8> %vuqshl, i32 7)
- %0 = extractelement <1 x i8> %vuqshl1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vqshlu.n.v1i8(<1 x i8>, i32)
-
-define i16 @test_vqshlh_n_u16(i16 %a) {
-; CHECK: test_vqshlh_n_u16
-; CHECK: uqshl {{h[0-9]+}}, {{h[0-9]+}}, #15
-entry:
- %vuqshl = insertelement <1 x i16> undef, i16 %a, i32 0
- %vuqshl1 = call <1 x i16> @llvm.aarch64.neon.vqshlu.n.v1i16(<1 x i16> %vuqshl, i32 15)
- %0 = extractelement <1 x i16> %vuqshl1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vqshlu.n.v1i16(<1 x i16>, i32)
-
-define i32 @test_vqshls_n_u32(i32 %a) {
-; CHECK: test_vqshls_n_u32
-; CHECK: uqshl {{s[0-9]+}}, {{s[0-9]+}}, #31
-entry:
- %vuqshl = insertelement <1 x i32> undef, i32 %a, i32 0
- %vuqshl1 = call <1 x i32> @llvm.aarch64.neon.vqshlu.n.v1i32(<1 x i32> %vuqshl, i32 31)
- %0 = extractelement <1 x i32> %vuqshl1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vqshlu.n.v1i32(<1 x i32>, i32)
-
-define i64 @test_vqshld_n_u64(i64 %a) {
-; CHECK: test_vqshld_n_u64
-; CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vuqshl = insertelement <1 x i64> undef, i64 %a, i32 0
- %vuqshl1 = call <1 x i64> @llvm.aarch64.neon.vqshlu.n.v1i64(<1 x i64> %vuqshl, i32 63)
- %0 = extractelement <1 x i64> %vuqshl1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vqshlu.n.v1i64(<1 x i64>, i32)
-
-define i8 @test_vqshlub_n_s8(i8 %a) {
-; CHECK: test_vqshlub_n_s8
-; CHECK: sqshlu {{b[0-9]+}}, {{b[0-9]+}}, #7
-entry:
- %vsqshlu = insertelement <1 x i8> undef, i8 %a, i32 0
- %vsqshlu1 = call <1 x i8> @llvm.aarch64.neon.vsqshlu.v1i8(<1 x i8> %vsqshlu, i32 7)
- %0 = extractelement <1 x i8> %vsqshlu1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vsqshlu.v1i8(<1 x i8>, i32)
-
-define i16 @test_vqshluh_n_s16(i16 %a) {
-; CHECK: test_vqshluh_n_s16
-; CHECK: sqshlu {{h[0-9]+}}, {{h[0-9]+}}, #15
-entry:
- %vsqshlu = insertelement <1 x i16> undef, i16 %a, i32 0
- %vsqshlu1 = call <1 x i16> @llvm.aarch64.neon.vsqshlu.v1i16(<1 x i16> %vsqshlu, i32 15)
- %0 = extractelement <1 x i16> %vsqshlu1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vsqshlu.v1i16(<1 x i16>, i32)
-
-define i32 @test_vqshlus_n_s32(i32 %a) {
-; CHECK: test_vqshlus_n_s32
-; CHECK: sqshlu {{s[0-9]+}}, {{s[0-9]+}}, #31
-entry:
- %vsqshlu = insertelement <1 x i32> undef, i32 %a, i32 0
- %vsqshlu1 = call <1 x i32> @llvm.aarch64.neon.vsqshlu.v1i32(<1 x i32> %vsqshlu, i32 31)
- %0 = extractelement <1 x i32> %vsqshlu1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vsqshlu.v1i32(<1 x i32>, i32)
-
-define i64 @test_vqshlud_n_s64(i64 %a) {
-; CHECK: test_vqshlud_n_s64
-; CHECK: sqshlu {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsqshlu = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsqshlu1 = call <1 x i64> @llvm.aarch64.neon.vsqshlu.v1i64(<1 x i64> %vsqshlu, i32 63)
- %0 = extractelement <1 x i64> %vsqshlu1, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vsqshlu.v1i64(<1 x i64>, i32)
-
-define i64 @test_vsrid_n_s64(i64 %a, i64 %b) {
-; CHECK: test_vsrid_n_s64
-; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
- %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
- %0 = extractelement <1 x i64> %vsri2, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64>, <1 x i64>, i32)
-
-define i64 @test_vsrid_n_u64(i64 %a, i64 %b) {
-; CHECK: test_vsrid_n_u64
-; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
- %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
- %0 = extractelement <1 x i64> %vsri2, i32 0
- ret i64 %0
-}
-
-define i64 @test_vslid_n_s64(i64 %a, i64 %b) {
-; CHECK: test_vslid_n_s64
-; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
- %vsli2 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
- %0 = extractelement <1 x i64> %vsli2, i32 0
- ret i64 %0
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64>, <1 x i64>, i32)
-
-define i64 @test_vslid_n_u64(i64 %a, i64 %b) {
-; CHECK: test_vslid_n_u64
-; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
-entry:
- %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
- %vsli2 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
- %0 = extractelement <1 x i64> %vsli2, i32 0
- ret i64 %0
-}
-
-define i8 @test_vqshrnh_n_s16(i16 %a) {
-; CHECK: test_vqshrnh_n_s16
-; CHECK: sqshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
-entry:
- %vsqshrn = insertelement <1 x i16> undef, i16 %a, i32 0
- %vsqshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16> %vsqshrn, i32 8)
- %0 = extractelement <1 x i8> %vsqshrn1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16>, i32)
-
-define i16 @test_vqshrns_n_s32(i32 %a) {
-; CHECK: test_vqshrns_n_s32
-; CHECK: sqshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
-entry:
- %vsqshrn = insertelement <1 x i32> undef, i32 %a, i32 0
- %vsqshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32> %vsqshrn, i32 16)
- %0 = extractelement <1 x i16> %vsqshrn1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32>, i32)
-
-define i32 @test_vqshrnd_n_s64(i64 %a) {
-; CHECK: test_vqshrnd_n_s64
-; CHECK: sqshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
-entry:
- %vsqshrn = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsqshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64> %vsqshrn, i32 32)
- %0 = extractelement <1 x i32> %vsqshrn1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64>, i32)
-
-define i8 @test_vqshrnh_n_u16(i16 %a) {
-; CHECK: test_vqshrnh_n_u16
-; CHECK: uqshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
-entry:
- %vuqshrn = insertelement <1 x i16> undef, i16 %a, i32 0
- %vuqshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16> %vuqshrn, i32 8)
- %0 = extractelement <1 x i8> %vuqshrn1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16>, i32)
-
-define i16 @test_vqshrns_n_u32(i32 %a) {
-; CHECK: test_vqshrns_n_u32
-; CHECK: uqshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
-entry:
- %vuqshrn = insertelement <1 x i32> undef, i32 %a, i32 0
- %vuqshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32> %vuqshrn, i32 16)
- %0 = extractelement <1 x i16> %vuqshrn1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32>, i32)
-
-define i32 @test_vqshrnd_n_u64(i64 %a) {
-; CHECK: test_vqshrnd_n_u64
-; CHECK: uqshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
-entry:
- %vuqshrn = insertelement <1 x i64> undef, i64 %a, i32 0
- %vuqshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64> %vuqshrn, i32 32)
- %0 = extractelement <1 x i32> %vuqshrn1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64>, i32)
-
-define i8 @test_vqrshrnh_n_s16(i16 %a) {
-; CHECK: test_vqrshrnh_n_s16
-; CHECK: sqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
-entry:
- %vsqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0
- %vsqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16> %vsqrshrn, i32 8)
- %0 = extractelement <1 x i8> %vsqrshrn1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16>, i32)
-
-define i16 @test_vqrshrns_n_s32(i32 %a) {
-; CHECK: test_vqrshrns_n_s32
-; CHECK: sqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
-entry:
- %vsqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0
- %vsqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32> %vsqrshrn, i32 16)
- %0 = extractelement <1 x i16> %vsqrshrn1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32>, i32)
-
-define i32 @test_vqrshrnd_n_s64(i64 %a) {
-; CHECK: test_vqrshrnd_n_s64
-; CHECK: sqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
-entry:
- %vsqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64> %vsqrshrn, i32 32)
- %0 = extractelement <1 x i32> %vsqrshrn1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64>, i32)
-
-define i8 @test_vqrshrnh_n_u16(i16 %a) {
-; CHECK: test_vqrshrnh_n_u16
-; CHECK: uqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
-entry:
- %vuqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0
- %vuqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16> %vuqrshrn, i32 8)
- %0 = extractelement <1 x i8> %vuqrshrn1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16>, i32)
-
-define i16 @test_vqrshrns_n_u32(i32 %a) {
-; CHECK: test_vqrshrns_n_u32
-; CHECK: uqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
-entry:
- %vuqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0
- %vuqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32> %vuqrshrn, i32 16)
- %0 = extractelement <1 x i16> %vuqrshrn1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32>, i32)
-
-define i32 @test_vqrshrnd_n_u64(i64 %a) {
-; CHECK: test_vqrshrnd_n_u64
-; CHECK: uqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
-entry:
- %vuqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0
- %vuqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64> %vuqrshrn, i32 32)
- %0 = extractelement <1 x i32> %vuqrshrn1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64>, i32)
-
-define i8 @test_vqshrunh_n_s16(i16 %a) {
-; CHECK: test_vqshrunh_n_s16
-; CHECK: sqshrun {{b[0-9]+}}, {{h[0-9]+}}, #8
-entry:
- %vsqshrun = insertelement <1 x i16> undef, i16 %a, i32 0
- %vsqshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16> %vsqshrun, i32 8)
- %0 = extractelement <1 x i8> %vsqshrun1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16>, i32)
-
-define i16 @test_vqshruns_n_s32(i32 %a) {
-; CHECK: test_vqshruns_n_s32
-; CHECK: sqshrun {{h[0-9]+}}, {{s[0-9]+}}, #16
-entry:
- %vsqshrun = insertelement <1 x i32> undef, i32 %a, i32 0
- %vsqshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32> %vsqshrun, i32 16)
- %0 = extractelement <1 x i16> %vsqshrun1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32>, i32)
-
-define i32 @test_vqshrund_n_s64(i64 %a) {
-; CHECK: test_vqshrund_n_s64
-; CHECK: sqshrun {{s[0-9]+}}, {{d[0-9]+}}, #32
-entry:
- %vsqshrun = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsqshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64> %vsqshrun, i32 32)
- %0 = extractelement <1 x i32> %vsqshrun1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64>, i32)
-
-define i8 @test_vqrshrunh_n_s16(i16 %a) {
-; CHECK: test_vqrshrunh_n_s16
-; CHECK: sqrshrun {{b[0-9]+}}, {{h[0-9]+}}, #8
-entry:
- %vsqrshrun = insertelement <1 x i16> undef, i16 %a, i32 0
- %vsqrshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16> %vsqrshrun, i32 8)
- %0 = extractelement <1 x i8> %vsqrshrun1, i32 0
- ret i8 %0
-}
-
-declare <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16>, i32)
-
-define i16 @test_vqrshruns_n_s32(i32 %a) {
-; CHECK: test_vqrshruns_n_s32
-; CHECK: sqrshrun {{h[0-9]+}}, {{s[0-9]+}}, #16
-entry:
- %vsqrshrun = insertelement <1 x i32> undef, i32 %a, i32 0
- %vsqrshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32> %vsqrshrun, i32 16)
- %0 = extractelement <1 x i16> %vsqrshrun1, i32 0
- ret i16 %0
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32>, i32)
-
-define i32 @test_vqrshrund_n_s64(i64 %a) {
-; CHECK: test_vqrshrund_n_s64
-; CHECK: sqrshrun {{s[0-9]+}}, {{d[0-9]+}}, #32
-entry:
- %vsqrshrun = insertelement <1 x i64> undef, i64 %a, i32 0
- %vsqrshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64> %vsqrshrun, i32 32)
- %0 = extractelement <1 x i32> %vsqrshrun1, i32 0
- ret i32 %0
-}
-
-declare <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64>, i32)
diff --git a/test/CodeGen/AArch64/neon-scalar-shift.ll b/test/CodeGen/AArch64/neon-scalar-shift.ll
deleted file mode 100644
index 1222be50cf4b..000000000000
--- a/test/CodeGen/AArch64/neon-scalar-shift.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-declare <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_ushl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_ushl_v1i64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-; CHECK: ushl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
-
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_sshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sshl_v1i64:
- %tmp1 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
-; CHECK: sshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-declare <1 x i64> @llvm.aarch64.neon.vshldu(<1 x i64>, <1 x i64>)
-declare <1 x i64> @llvm.aarch64.neon.vshlds(<1 x i64>, <1 x i64>)
-
-define <1 x i64> @test_ushl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_ushl_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vshldu(<1 x i64> %lhs, <1 x i64> %rhs)
-; CHECK: ushl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-define <1 x i64> @test_sshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
-; CHECK: test_sshl_v1i64_aarch64:
- %tmp1 = call <1 x i64> @llvm.aarch64.neon.vshlds(<1 x i64> %lhs, <1 x i64> %rhs)
-; CHECK: sshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
- ret <1 x i64> %tmp1
-}
-
-
diff --git a/test/CodeGen/AArch64/neon-shift-left-long.ll b/test/CodeGen/AArch64/neon-shift-left-long.ll
index d45c47685b0f..d10d551805a6 100644
--- a/test/CodeGen/AArch64/neon-shift-left-long.ll
+++ b/test/CodeGen/AArch64/neon-shift-left-long.ll
@@ -191,3 +191,13 @@ define <2 x i64> @test_ushll2_shl0_v4i32(<4 x i32> %a) {
%tmp = zext <2 x i32> %1 to <2 x i64>
ret <2 x i64> %tmp
}
+
+define <8 x i16> @test_ushll_cmp(<8 x i8> %a, <8 x i8> %b) #0 {
+; CHECK: test_ushll_cmp:
+; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: ushll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #0
+ %cmp.i = icmp eq <8 x i8> %a, %b
+ %vcgtz.i.i = sext <8 x i1> %cmp.i to <8 x i8>
+ %vmovl.i.i.i = zext <8 x i8> %vcgtz.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i.i
+}
diff --git a/test/CodeGen/AArch64/neon-shift.ll b/test/CodeGen/AArch64/neon-shift.ll
deleted file mode 100644
index 33b04ceb4895..000000000000
--- a/test/CodeGen/AArch64/neon-shift.ll
+++ /dev/null
@@ -1,171 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-declare <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8>, <8 x i8>)
-declare <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8>, <8 x i8>)
-
-define <8 x i8> @test_uqshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_uqshl_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: ushl v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-define <8 x i8> @test_sqshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
-; CHECK: test_sqshl_v8i8:
- %tmp1 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
-; CHECK: sshl v0.8b, v0.8b, v1.8b
- ret <8 x i8> %tmp1
-}
-
-declare <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8>, <16 x i8>)
-
-define <16 x i8> @test_ushl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_ushl_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: ushl v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @test_sshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
-; CHECK: test_sshl_v16i8:
- %tmp1 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
-; CHECK: sshl v0.16b, v0.16b, v1.16b
- ret <16 x i8> %tmp1
-}
-
-declare <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16>, <4 x i16>)
-declare <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16>, <4 x i16>)
-
-define <4 x i16> @test_ushl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_ushl_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: ushl v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-define <4 x i16> @test_sshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: test_sshl_v4i16:
- %tmp1 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
-; CHECK: sshl v0.4h, v0.4h, v1.4h
- ret <4 x i16> %tmp1
-}
-
-declare <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16>, <8 x i16>)
-declare <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16>, <8 x i16>)
-
-define <8 x i16> @test_ushl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_ushl_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: ushl v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-define <8 x i16> @test_sshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
-; CHECK: test_sshl_v8i16:
- %tmp1 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
-; CHECK: sshl v0.8h, v0.8h, v1.8h
- ret <8 x i16> %tmp1
-}
-
-declare <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32>, <2 x i32>)
-declare <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32>, <2 x i32>)
-
-define <2 x i32> @test_ushl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_ushl_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: ushl v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-define <2 x i32> @test_sshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
-; CHECK: test_sshl_v2i32:
- %tmp1 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
-; CHECK: sshl v0.2s, v0.2s, v1.2s
- ret <2 x i32> %tmp1
-}
-
-declare <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32>, <4 x i32>)
-
-define <4 x i32> @test_ushl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_ushl_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: ushl v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-define <4 x i32> @test_sshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
-; CHECK: test_sshl_v4i32:
- %tmp1 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
-; CHECK: sshl v0.4s, v0.4s, v1.4s
- ret <4 x i32> %tmp1
-}
-
-declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>)
-
-define <2 x i64> @test_ushl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_ushl_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: ushl v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
-define <2 x i64> @test_sshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
-; CHECK: test_sshl_v2i64:
- %tmp1 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
-; CHECK: sshl v0.2d, v0.2d, v1.2d
- ret <2 x i64> %tmp1
-}
-
-
-define <8 x i8> @test_shl_v8i8(<8 x i8> %a) {
-; CHECK: test_shl_v8i8:
-; CHECK: shl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %tmp = shl <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- ret <8 x i8> %tmp
-}
-
-define <4 x i16> @test_shl_v4i16(<4 x i16> %a) {
-; CHECK: test_shl_v4i16:
-; CHECK: shl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %tmp = shl <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
- ret <4 x i16> %tmp
-}
-
-define <2 x i32> @test_shl_v2i32(<2 x i32> %a) {
-; CHECK: test_shl_v2i32:
-; CHECK: shl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %tmp = shl <2 x i32> %a, <i32 3, i32 3>
- ret <2 x i32> %tmp
-}
-
-define <16 x i8> @test_shl_v16i8(<16 x i8> %a) {
-; CHECK: test_shl_v16i8:
-; CHECK: shl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %tmp = shl <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- ret <16 x i8> %tmp
-}
-
-define <8 x i16> @test_shl_v8i16(<8 x i16> %a) {
-; CHECK: test_shl_v8i16:
-; CHECK: shl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %tmp = shl <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- ret <8 x i16> %tmp
-}
-
-define <4 x i32> @test_shl_v4i32(<4 x i32> %a) {
-; CHECK: test_shl_v4i32:
-; CHECK: shl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %tmp = shl <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
- ret <4 x i32> %tmp
-}
-
-define <2 x i64> @test_shl_v2i64(<2 x i64> %a) {
-; CHECK: test_shl_v2i64:
-; CHECK: shl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #63
- %tmp = shl <2 x i64> %a, <i64 63, i64 63>
- ret <2 x i64> %tmp
-}
-
diff --git a/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll b/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll
deleted file mode 100644
index d5557c0c8562..000000000000
--- a/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll
+++ /dev/null
@@ -1,2314 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define void @test_ldst1_v16i8(<16 x i8>* %ptr, <16 x i8>* %ptr2) {
-; CHECK-LABEL: test_ldst1_v16i8:
-; CHECK: ld1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
-; CHECK: st1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
- %tmp = load <16 x i8>* %ptr
- store <16 x i8> %tmp, <16 x i8>* %ptr2
- ret void
-}
-
-define void @test_ldst1_v8i16(<8 x i16>* %ptr, <8 x i16>* %ptr2) {
-; CHECK-LABEL: test_ldst1_v8i16:
-; CHECK: ld1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
-; CHECK: st1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
- %tmp = load <8 x i16>* %ptr
- store <8 x i16> %tmp, <8 x i16>* %ptr2
- ret void
-}
-
-define void @test_ldst1_v4i32(<4 x i32>* %ptr, <4 x i32>* %ptr2) {
-; CHECK-LABEL: test_ldst1_v4i32:
-; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
-; CHECK: st1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %tmp = load <4 x i32>* %ptr
- store <4 x i32> %tmp, <4 x i32>* %ptr2
- ret void
-}
-
-define void @test_ldst1_v2i64(<2 x i64>* %ptr, <2 x i64>* %ptr2) {
-; CHECK-LABEL: test_ldst1_v2i64:
-; CHECK: ld1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
-; CHECK: st1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
- %tmp = load <2 x i64>* %ptr
- store <2 x i64> %tmp, <2 x i64>* %ptr2
- ret void
-}
-
-define void @test_ldst1_v8i8(<8 x i8>* %ptr, <8 x i8>* %ptr2) {
-; CHECK-LABEL: test_ldst1_v8i8:
-; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
-; CHECK: st1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
- %tmp = load <8 x i8>* %ptr
- store <8 x i8> %tmp, <8 x i8>* %ptr2
- ret void
-}
-
-define void @test_ldst1_v4i16(<4 x i16>* %ptr, <4 x i16>* %ptr2) {
-; CHECK-LABEL: test_ldst1_v4i16:
-; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
-; CHECK: st1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
- %tmp = load <4 x i16>* %ptr
- store <4 x i16> %tmp, <4 x i16>* %ptr2
- ret void
-}
-
-define void @test_ldst1_v2i32(<2 x i32>* %ptr, <2 x i32>* %ptr2) {
-; CHECK-LABEL: test_ldst1_v2i32:
-; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
-; CHECK: st1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %tmp = load <2 x i32>* %ptr
- store <2 x i32> %tmp, <2 x i32>* %ptr2
- ret void
-}
-
-define void @test_ldst1_v1i64(<1 x i64>* %ptr, <1 x i64>* %ptr2) {
-; CHECK-LABEL: test_ldst1_v1i64:
-; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
-; CHECK: st1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %tmp = load <1 x i64>* %ptr
- store <1 x i64> %tmp, <1 x i64>* %ptr2
- ret void
-}
-
-%struct.int8x16x2_t = type { [2 x <16 x i8>] }
-%struct.int16x8x2_t = type { [2 x <8 x i16>] }
-%struct.int32x4x2_t = type { [2 x <4 x i32>] }
-%struct.int64x2x2_t = type { [2 x <2 x i64>] }
-%struct.float32x4x2_t = type { [2 x <4 x float>] }
-%struct.float64x2x2_t = type { [2 x <2 x double>] }
-%struct.int8x8x2_t = type { [2 x <8 x i8>] }
-%struct.int16x4x2_t = type { [2 x <4 x i16>] }
-%struct.int32x2x2_t = type { [2 x <2 x i32>] }
-%struct.int64x1x2_t = type { [2 x <1 x i64>] }
-%struct.float32x2x2_t = type { [2 x <2 x float>] }
-%struct.float64x1x2_t = type { [2 x <1 x double>] }
-%struct.int8x16x3_t = type { [3 x <16 x i8>] }
-%struct.int16x8x3_t = type { [3 x <8 x i16>] }
-%struct.int32x4x3_t = type { [3 x <4 x i32>] }
-%struct.int64x2x3_t = type { [3 x <2 x i64>] }
-%struct.float32x4x3_t = type { [3 x <4 x float>] }
-%struct.float64x2x3_t = type { [3 x <2 x double>] }
-%struct.int8x8x3_t = type { [3 x <8 x i8>] }
-%struct.int16x4x3_t = type { [3 x <4 x i16>] }
-%struct.int32x2x3_t = type { [3 x <2 x i32>] }
-%struct.int64x1x3_t = type { [3 x <1 x i64>] }
-%struct.float32x2x3_t = type { [3 x <2 x float>] }
-%struct.float64x1x3_t = type { [3 x <1 x double>] }
-%struct.int8x16x4_t = type { [4 x <16 x i8>] }
-%struct.int16x8x4_t = type { [4 x <8 x i16>] }
-%struct.int32x4x4_t = type { [4 x <4 x i32>] }
-%struct.int64x2x4_t = type { [4 x <2 x i64>] }
-%struct.float32x4x4_t = type { [4 x <4 x float>] }
-%struct.float64x2x4_t = type { [4 x <2 x double>] }
-%struct.int8x8x4_t = type { [4 x <8 x i8>] }
-%struct.int16x4x4_t = type { [4 x <4 x i16>] }
-%struct.int32x2x4_t = type { [4 x <2 x i32>] }
-%struct.int64x1x4_t = type { [4 x <1 x i64>] }
-%struct.float32x2x4_t = type { [4 x <2 x float>] }
-%struct.float64x1x4_t = type { [4 x <1 x double>] }
-
-
-define <16 x i8> @test_vld1q_s8(i8* readonly %a) {
-; CHECK-LABEL: test_vld1q_s8
-; CHECK: ld1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
- %vld1 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %a, i32 1)
- ret <16 x i8> %vld1
-}
-
-define <8 x i16> @test_vld1q_s16(i16* readonly %a) {
-; CHECK-LABEL: test_vld1q_s16
-; CHECK: ld1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld1 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %1, i32 2)
- ret <8 x i16> %vld1
-}
-
-define <4 x i32> @test_vld1q_s32(i32* readonly %a) {
-; CHECK-LABEL: test_vld1q_s32
-; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %vld1 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %1, i32 4)
- ret <4 x i32> %vld1
-}
-
-define <2 x i64> @test_vld1q_s64(i64* readonly %a) {
-; CHECK-LABEL: test_vld1q_s64
-; CHECK: ld1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %vld1 = tail call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %1, i32 8)
- ret <2 x i64> %vld1
-}
-
-define <4 x float> @test_vld1q_f32(float* readonly %a) {
-; CHECK-LABEL: test_vld1q_f32
-; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %1, i32 4)
- ret <4 x float> %vld1
-}
-
-define <2 x double> @test_vld1q_f64(double* readonly %a) {
-; CHECK-LABEL: test_vld1q_f64
-; CHECK: ld1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %vld1 = tail call <2 x double> @llvm.arm.neon.vld1.v2f64(i8* %1, i32 8)
- ret <2 x double> %vld1
-}
-
-define <8 x i8> @test_vld1_s8(i8* readonly %a) {
-; CHECK-LABEL: test_vld1_s8
-; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
- %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1)
- ret <8 x i8> %vld1
-}
-
-define <4 x i16> @test_vld1_s16(i16* readonly %a) {
-; CHECK-LABEL: test_vld1_s16
-; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2)
- ret <4 x i16> %vld1
-}
-
-define <2 x i32> @test_vld1_s32(i32* readonly %a) {
-; CHECK-LABEL: test_vld1_s32
-; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %vld1 = tail call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %1, i32 4)
- ret <2 x i32> %vld1
-}
-
-define <1 x i64> @test_vld1_s64(i64* readonly %a) {
-; CHECK-LABEL: test_vld1_s64
-; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %vld1 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %1, i32 8)
- ret <1 x i64> %vld1
-}
-
-define <2 x float> @test_vld1_f32(float* readonly %a) {
-; CHECK-LABEL: test_vld1_f32
-; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %vld1 = tail call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %1, i32 4)
- ret <2 x float> %vld1
-}
-
-define <1 x double> @test_vld1_f64(double* readonly %a) {
-; CHECK-LABEL: test_vld1_f64
-; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %vld1 = tail call <1 x double> @llvm.arm.neon.vld1.v1f64(i8* %1, i32 8)
- ret <1 x double> %vld1
-}
-
-define <8 x i8> @test_vld1_p8(i8* readonly %a) {
-; CHECK-LABEL: test_vld1_p8
-; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
- %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1)
- ret <8 x i8> %vld1
-}
-
-define <4 x i16> @test_vld1_p16(i16* readonly %a) {
-; CHECK-LABEL: test_vld1_p16
-; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2)
- ret <4 x i16> %vld1
-}
-
-define %struct.int8x16x2_t @test_vld2q_s8(i8* readonly %a) {
-; CHECK-LABEL: test_vld2q_s8
-; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
- %vld2 = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %a, i32 1)
- %vld2.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %vld2.fca.1.extract, 0, 1
- ret %struct.int8x16x2_t %.fca.0.1.insert
-}
-
-define %struct.int16x8x2_t @test_vld2q_s16(i16* readonly %a) {
-; CHECK-LABEL: test_vld2q_s16
-; CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld2 = tail call { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2.v8i16(i8* %1, i32 2)
- %vld2.fca.0.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %vld2.fca.1.extract, 0, 1
- ret %struct.int16x8x2_t %.fca.0.1.insert
-}
-
-define %struct.int32x4x2_t @test_vld2q_s32(i32* readonly %a) {
-; CHECK-LABEL: test_vld2q_s32
-; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %vld2 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8* %1, i32 4)
- %vld2.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %vld2.fca.1.extract, 0, 1
- ret %struct.int32x4x2_t %.fca.0.1.insert
-}
-
-define %struct.int64x2x2_t @test_vld2q_s64(i64* readonly %a) {
-; CHECK-LABEL: test_vld2q_s64
-; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %vld2 = tail call { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2.v2i64(i8* %1, i32 8)
- %vld2.fca.0.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.int64x2x2_t undef, <2 x i64> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x2_t %.fca.0.0.insert, <2 x i64> %vld2.fca.1.extract, 0, 1
- ret %struct.int64x2x2_t %.fca.0.1.insert
-}
-
-define %struct.float32x4x2_t @test_vld2q_f32(float* readonly %a) {
-; CHECK-LABEL: test_vld2q_f32
-; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4)
- %vld2.fca.0.extract = extractvalue { <4 x float>, <4 x float> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <4 x float>, <4 x float> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %vld2.fca.1.extract, 0, 1
- ret %struct.float32x4x2_t %.fca.0.1.insert
-}
-
-define %struct.float64x2x2_t @test_vld2q_f64(double* readonly %a) {
-; CHECK-LABEL: test_vld2q_f64
-; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %vld2 = tail call { <2 x double>, <2 x double> } @llvm.arm.neon.vld2.v2f64(i8* %1, i32 8)
- %vld2.fca.0.extract = extractvalue { <2 x double>, <2 x double> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <2 x double>, <2 x double> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.float64x2x2_t undef, <2 x double> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x2_t %.fca.0.0.insert, <2 x double> %vld2.fca.1.extract, 0, 1
- ret %struct.float64x2x2_t %.fca.0.1.insert
-}
-
-define %struct.int8x8x2_t @test_vld2_s8(i8* readonly %a) {
-; CHECK-LABEL: test_vld2_s8
-; CHECK: ld2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
- %vld2 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8* %a, i32 1)
- %vld2.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %vld2.fca.1.extract, 0, 1
- ret %struct.int8x8x2_t %.fca.0.1.insert
-}
-
-define %struct.int16x4x2_t @test_vld2_s16(i16* readonly %a) {
-; CHECK-LABEL: test_vld2_s16
-; CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld2 = tail call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2.v4i16(i8* %1, i32 2)
- %vld2.fca.0.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %vld2.fca.1.extract, 0, 1
- ret %struct.int16x4x2_t %.fca.0.1.insert
-}
-
-define %struct.int32x2x2_t @test_vld2_s32(i32* readonly %a) {
-; CHECK-LABEL: test_vld2_s32
-; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %vld2 = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8* %1, i32 4)
- %vld2.fca.0.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %vld2.fca.1.extract, 0, 1
- ret %struct.int32x2x2_t %.fca.0.1.insert
-}
-
-define %struct.int64x1x2_t @test_vld2_s64(i64* readonly %a) {
-; CHECK-LABEL: test_vld2_s64
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %vld2 = tail call { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8* %1, i32 8)
- %vld2.fca.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.int64x1x2_t undef, <1 x i64> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x2_t %.fca.0.0.insert, <1 x i64> %vld2.fca.1.extract, 0, 1
- ret %struct.int64x1x2_t %.fca.0.1.insert
-}
-
-define %struct.float32x2x2_t @test_vld2_f32(float* readonly %a) {
-; CHECK-LABEL: test_vld2_f32
-; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %vld2 = tail call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8* %1, i32 4)
- %vld2.fca.0.extract = extractvalue { <2 x float>, <2 x float> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <2 x float>, <2 x float> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %vld2.fca.1.extract, 0, 1
- ret %struct.float32x2x2_t %.fca.0.1.insert
-}
-
-define %struct.float64x1x2_t @test_vld2_f64(double* readonly %a) {
-; CHECK-LABEL: test_vld2_f64
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %vld2 = tail call { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8* %1, i32 8)
- %vld2.fca.0.extract = extractvalue { <1 x double>, <1 x double> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <1 x double>, <1 x double> } %vld2, 1
- %.fca.0.0.insert = insertvalue %struct.float64x1x2_t undef, <1 x double> %vld2.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x2_t %.fca.0.0.insert, <1 x double> %vld2.fca.1.extract, 0, 1
- ret %struct.float64x1x2_t %.fca.0.1.insert
-}
-
-define %struct.int8x16x3_t @test_vld3q_s8(i8* readonly %a) {
-; CHECK-LABEL: test_vld3q_s8
-; CHECK: ld3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
- %vld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %a, i32 1)
- %vld3.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.int8x16x3_t undef, <16 x i8> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x16x3_t %.fca.0.0.insert, <16 x i8> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x16x3_t %.fca.0.1.insert, <16 x i8> %vld3.fca.2.extract, 0, 2
- ret %struct.int8x16x3_t %.fca.0.2.insert
-}
-
-define %struct.int16x8x3_t @test_vld3q_s16(i16* readonly %a) {
-; CHECK-LABEL: test_vld3q_s16
-; CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3.v8i16(i8* %1, i32 2)
- %vld3.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.int16x8x3_t undef, <8 x i16> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x3_t %.fca.0.0.insert, <8 x i16> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x8x3_t %.fca.0.1.insert, <8 x i16> %vld3.fca.2.extract, 0, 2
- ret %struct.int16x8x3_t %.fca.0.2.insert
-}
-
-define %struct.int32x4x3_t @test_vld3q_s32(i32* readonly %a) {
-; CHECK-LABEL: test_vld3q_s32
-; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %vld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8* %1, i32 4)
- %vld3.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.int32x4x3_t undef, <4 x i32> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x3_t %.fca.0.0.insert, <4 x i32> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x4x3_t %.fca.0.1.insert, <4 x i32> %vld3.fca.2.extract, 0, 2
- ret %struct.int32x4x3_t %.fca.0.2.insert
-}
-
-define %struct.int64x2x3_t @test_vld3q_s64(i64* readonly %a) {
-; CHECK-LABEL: test_vld3q_s64
-; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %vld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3.v2i64(i8* %1, i32 8)
- %vld3.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.int64x2x3_t undef, <2 x i64> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x3_t %.fca.0.0.insert, <2 x i64> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x2x3_t %.fca.0.1.insert, <2 x i64> %vld3.fca.2.extract, 0, 2
- ret %struct.int64x2x3_t %.fca.0.2.insert
-}
-
-define %struct.float32x4x3_t @test_vld3q_f32(float* readonly %a) {
-; CHECK-LABEL: test_vld3q_f32
-; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8* %1, i32 4)
- %vld3.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.float32x4x3_t undef, <4 x float> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x3_t %.fca.0.0.insert, <4 x float> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x4x3_t %.fca.0.1.insert, <4 x float> %vld3.fca.2.extract, 0, 2
- ret %struct.float32x4x3_t %.fca.0.2.insert
-}
-
-define %struct.float64x2x3_t @test_vld3q_f64(double* readonly %a) {
-; CHECK-LABEL: test_vld3q_f64
-; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %vld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3.v2f64(i8* %1, i32 8)
- %vld3.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.float64x2x3_t undef, <2 x double> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x3_t %.fca.0.0.insert, <2 x double> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x2x3_t %.fca.0.1.insert, <2 x double> %vld3.fca.2.extract, 0, 2
- ret %struct.float64x2x3_t %.fca.0.2.insert
-}
-
-define %struct.int8x8x3_t @test_vld3_s8(i8* readonly %a) {
-; CHECK-LABEL: test_vld3_s8
-; CHECK: ld3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
- %vld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8* %a, i32 1)
- %vld3.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.int8x8x3_t undef, <8 x i8> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x3_t %.fca.0.0.insert, <8 x i8> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x8x3_t %.fca.0.1.insert, <8 x i8> %vld3.fca.2.extract, 0, 2
- ret %struct.int8x8x3_t %.fca.0.2.insert
-}
-
-define %struct.int16x4x3_t @test_vld3_s16(i16* readonly %a) {
-; CHECK-LABEL: test_vld3_s16
-; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %1, i32 2)
- %vld3.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.int16x4x3_t undef, <4 x i16> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x3_t %.fca.0.0.insert, <4 x i16> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x4x3_t %.fca.0.1.insert, <4 x i16> %vld3.fca.2.extract, 0, 2
- ret %struct.int16x4x3_t %.fca.0.2.insert
-}
-
-define %struct.int32x2x3_t @test_vld3_s32(i32* readonly %a) {
-; CHECK-LABEL: test_vld3_s32
-; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %vld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32(i8* %1, i32 4)
- %vld3.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.int32x2x3_t undef, <2 x i32> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x3_t %.fca.0.0.insert, <2 x i32> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x2x3_t %.fca.0.1.insert, <2 x i32> %vld3.fca.2.extract, 0, 2
- ret %struct.int32x2x3_t %.fca.0.2.insert
-}
-
-define %struct.int64x1x3_t @test_vld3_s64(i64* readonly %a) {
-; CHECK-LABEL: test_vld3_s64
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %vld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8* %1, i32 8)
- %vld3.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.int64x1x3_t undef, <1 x i64> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x3_t %.fca.0.0.insert, <1 x i64> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x1x3_t %.fca.0.1.insert, <1 x i64> %vld3.fca.2.extract, 0, 2
- ret %struct.int64x1x3_t %.fca.0.2.insert
-}
-
-define %struct.float32x2x3_t @test_vld3_f32(float* readonly %a) {
-; CHECK-LABEL: test_vld3_f32
-; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %vld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3.v2f32(i8* %1, i32 4)
- %vld3.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.float32x2x3_t undef, <2 x float> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x3_t %.fca.0.0.insert, <2 x float> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x2x3_t %.fca.0.1.insert, <2 x float> %vld3.fca.2.extract, 0, 2
- ret %struct.float32x2x3_t %.fca.0.2.insert
-}
-
-define %struct.float64x1x3_t @test_vld3_f64(double* readonly %a) {
-; CHECK-LABEL: test_vld3_f64
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %vld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8* %1, i32 8)
- %vld3.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 2
- %.fca.0.0.insert = insertvalue %struct.float64x1x3_t undef, <1 x double> %vld3.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x3_t %.fca.0.0.insert, <1 x double> %vld3.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x1x3_t %.fca.0.1.insert, <1 x double> %vld3.fca.2.extract, 0, 2
- ret %struct.float64x1x3_t %.fca.0.2.insert
-}
-
-define %struct.int8x16x4_t @test_vld4q_s8(i8* readonly %a) {
-; CHECK-LABEL: test_vld4q_s8
-; CHECK: ld4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
- %vld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8* %a, i32 1)
- %vld4.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.int8x16x4_t undef, <16 x i8> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x16x4_t %.fca.0.0.insert, <16 x i8> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x16x4_t %.fca.0.1.insert, <16 x i8> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int8x16x4_t %.fca.0.2.insert, <16 x i8> %vld4.fca.3.extract, 0, 3
- ret %struct.int8x16x4_t %.fca.0.3.insert
-}
-
-define %struct.int16x8x4_t @test_vld4q_s16(i16* readonly %a) {
-; CHECK-LABEL: test_vld4q_s16
-; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8* %1, i32 2)
- %vld4.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.int16x8x4_t undef, <8 x i16> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x4_t %.fca.0.0.insert, <8 x i16> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x8x4_t %.fca.0.1.insert, <8 x i16> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int16x8x4_t %.fca.0.2.insert, <8 x i16> %vld4.fca.3.extract, 0, 3
- ret %struct.int16x8x4_t %.fca.0.3.insert
-}
-
-define %struct.int32x4x4_t @test_vld4q_s32(i32* readonly %a) {
-; CHECK-LABEL: test_vld4q_s32
-; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %vld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32(i8* %1, i32 4)
- %vld4.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.int32x4x4_t undef, <4 x i32> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x4_t %.fca.0.0.insert, <4 x i32> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x4x4_t %.fca.0.1.insert, <4 x i32> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int32x4x4_t %.fca.0.2.insert, <4 x i32> %vld4.fca.3.extract, 0, 3
- ret %struct.int32x4x4_t %.fca.0.3.insert
-}
-
-define %struct.int64x2x4_t @test_vld4q_s64(i64* readonly %a) {
-; CHECK-LABEL: test_vld4q_s64
-; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %vld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4.v2i64(i8* %1, i32 8)
- %vld4.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.int64x2x4_t undef, <2 x i64> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x4_t %.fca.0.0.insert, <2 x i64> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x2x4_t %.fca.0.1.insert, <2 x i64> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int64x2x4_t %.fca.0.2.insert, <2 x i64> %vld4.fca.3.extract, 0, 3
- ret %struct.int64x2x4_t %.fca.0.3.insert
-}
-
-define %struct.float32x4x4_t @test_vld4q_f32(float* readonly %a) {
-; CHECK-LABEL: test_vld4q_f32
-; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %vld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8* %1, i32 4)
- %vld4.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.float32x4x4_t undef, <4 x float> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x4_t %.fca.0.0.insert, <4 x float> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x4x4_t %.fca.0.1.insert, <4 x float> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float32x4x4_t %.fca.0.2.insert, <4 x float> %vld4.fca.3.extract, 0, 3
- ret %struct.float32x4x4_t %.fca.0.3.insert
-}
-
-define %struct.float64x2x4_t @test_vld4q_f64(double* readonly %a) {
-; CHECK-LABEL: test_vld4q_f64
-; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %vld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4.v2f64(i8* %1, i32 8)
- %vld4.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.float64x2x4_t undef, <2 x double> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x4_t %.fca.0.0.insert, <2 x double> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x2x4_t %.fca.0.1.insert, <2 x double> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float64x2x4_t %.fca.0.2.insert, <2 x double> %vld4.fca.3.extract, 0, 3
- ret %struct.float64x2x4_t %.fca.0.3.insert
-}
-
-define %struct.int8x8x4_t @test_vld4_s8(i8* readonly %a) {
-; CHECK-LABEL: test_vld4_s8
-; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
- %vld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %a, i32 1)
- %vld4.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.int8x8x4_t undef, <8 x i8> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x4_t %.fca.0.0.insert, <8 x i8> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x8x4_t %.fca.0.1.insert, <8 x i8> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int8x8x4_t %.fca.0.2.insert, <8 x i8> %vld4.fca.3.extract, 0, 3
- ret %struct.int8x8x4_t %.fca.0.3.insert
-}
-
-define %struct.int16x4x4_t @test_vld4_s16(i16* readonly %a) {
-; CHECK-LABEL: test_vld4_s16
-; CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %vld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8* %1, i32 2)
- %vld4.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.int16x4x4_t undef, <4 x i16> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x4_t %.fca.0.0.insert, <4 x i16> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x4x4_t %.fca.0.1.insert, <4 x i16> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int16x4x4_t %.fca.0.2.insert, <4 x i16> %vld4.fca.3.extract, 0, 3
- ret %struct.int16x4x4_t %.fca.0.3.insert
-}
-
-define %struct.int32x2x4_t @test_vld4_s32(i32* readonly %a) {
-; CHECK-LABEL: test_vld4_s32
-; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %vld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32(i8* %1, i32 4)
- %vld4.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.int32x2x4_t undef, <2 x i32> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x4_t %.fca.0.0.insert, <2 x i32> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x2x4_t %.fca.0.1.insert, <2 x i32> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int32x2x4_t %.fca.0.2.insert, <2 x i32> %vld4.fca.3.extract, 0, 3
- ret %struct.int32x2x4_t %.fca.0.3.insert
-}
-
-define %struct.int64x1x4_t @test_vld4_s64(i64* readonly %a) {
-; CHECK-LABEL: test_vld4_s64
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %vld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8* %1, i32 8)
- %vld4.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.int64x1x4_t undef, <1 x i64> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x4_t %.fca.0.0.insert, <1 x i64> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x1x4_t %.fca.0.1.insert, <1 x i64> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int64x1x4_t %.fca.0.2.insert, <1 x i64> %vld4.fca.3.extract, 0, 3
- ret %struct.int64x1x4_t %.fca.0.3.insert
-}
-
-define %struct.float32x2x4_t @test_vld4_f32(float* readonly %a) {
-; CHECK-LABEL: test_vld4_f32
-; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %vld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4.v2f32(i8* %1, i32 4)
- %vld4.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.float32x2x4_t undef, <2 x float> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x4_t %.fca.0.0.insert, <2 x float> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x2x4_t %.fca.0.1.insert, <2 x float> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float32x2x4_t %.fca.0.2.insert, <2 x float> %vld4.fca.3.extract, 0, 3
- ret %struct.float32x2x4_t %.fca.0.3.insert
-}
-
-define %struct.float64x1x4_t @test_vld4_f64(double* readonly %a) {
-; CHECK-LABEL: test_vld4_f64
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %vld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8* %1, i32 8)
- %vld4.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 3
- %.fca.0.0.insert = insertvalue %struct.float64x1x4_t undef, <1 x double> %vld4.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x4_t %.fca.0.0.insert, <1 x double> %vld4.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x1x4_t %.fca.0.1.insert, <1 x double> %vld4.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float64x1x4_t %.fca.0.2.insert, <1 x double> %vld4.fca.3.extract, 0, 3
- ret %struct.float64x1x4_t %.fca.0.3.insert
-}
-
-declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*, i32)
-declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32)
-declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32)
-declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32)
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32)
-declare <2 x double> @llvm.arm.neon.vld1.v2f64(i8*, i32)
-declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*, i32)
-declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32)
-declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32)
-declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32)
-declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*, i32)
-declare <1 x double> @llvm.arm.neon.vld1.v1f64(i8*, i32)
-declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32)
-declare { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2.v8i16(i8*, i32)
-declare { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8*, i32)
-declare { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2.v2i64(i8*, i32)
-declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8*, i32)
-declare { <2 x double>, <2 x double> } @llvm.arm.neon.vld2.v2f64(i8*, i32)
-declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8*, i32)
-declare { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2.v4i16(i8*, i32)
-declare { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8*, i32)
-declare { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8*, i32)
-declare { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8*, i32)
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32)
-declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3.v8i16(i8*, i32)
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8*, i32)
-declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3.v2i64(i8*, i32)
-declare { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8*, i32)
-declare { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3.v2f64(i8*, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8*, i32)
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8*, i32)
-declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32(i8*, i32)
-declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3.v2f32(i8*, i32)
-declare { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8*, i32)
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8*, i32)
-declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8*, i32)
-declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32(i8*, i32)
-declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4.v2i64(i8*, i32)
-declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8*, i32)
-declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4.v2f64(i8*, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32)
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8*, i32)
-declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32(i8*, i32)
-declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4.v2f32(i8*, i32)
-declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8*, i32)
-
-define void @test_vst1q_s8(i8* %a, <16 x i8> %b) {
-; CHECK-LABEL: test_vst1q_s8
-; CHECK: st1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
- tail call void @llvm.arm.neon.vst1.v16i8(i8* %a, <16 x i8> %b, i32 1)
- ret void
-}
-
-define void @test_vst1q_s16(i16* %a, <8 x i16> %b) {
-; CHECK-LABEL: test_vst1q_s16
-; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst1.v8i16(i8* %1, <8 x i16> %b, i32 2)
- ret void
-}
-
-define void @test_vst1q_s32(i32* %a, <4 x i32> %b) {
-; CHECK-LABEL: test_vst1q_s32
-; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst1.v4i32(i8* %1, <4 x i32> %b, i32 4)
- ret void
-}
-
-define void @test_vst1q_s64(i64* %a, <2 x i64> %b) {
-; CHECK-LABEL: test_vst1q_s64
-; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst1.v2i64(i8* %1, <2 x i64> %b, i32 8)
- ret void
-}
-
-define void @test_vst1q_f32(float* %a, <4 x float> %b) {
-; CHECK-LABEL: test_vst1q_f32
-; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst1.v4f32(i8* %1, <4 x float> %b, i32 4)
- ret void
-}
-
-define void @test_vst1q_f64(double* %a, <2 x double> %b) {
-; CHECK-LABEL: test_vst1q_f64
-; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst1.v2f64(i8* %1, <2 x double> %b, i32 8)
- ret void
-}
-
-define void @test_vst1_s8(i8* %a, <8 x i8> %b) {
-; CHECK-LABEL: test_vst1_s8
-; CHECK: st1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
- tail call void @llvm.arm.neon.vst1.v8i8(i8* %a, <8 x i8> %b, i32 1)
- ret void
-}
-
-define void @test_vst1_s16(i16* %a, <4 x i16> %b) {
-; CHECK-LABEL: test_vst1_s16
-; CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst1.v4i16(i8* %1, <4 x i16> %b, i32 2)
- ret void
-}
-
-define void @test_vst1_s32(i32* %a, <2 x i32> %b) {
-; CHECK-LABEL: test_vst1_s32
-; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst1.v2i32(i8* %1, <2 x i32> %b, i32 4)
- ret void
-}
-
-define void @test_vst1_s64(i64* %a, <1 x i64> %b) {
-; CHECK-LABEL: test_vst1_s64
-; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst1.v1i64(i8* %1, <1 x i64> %b, i32 8)
- ret void
-}
-
-define void @test_vst1_f32(float* %a, <2 x float> %b) {
-; CHECK-LABEL: test_vst1_f32
-; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst1.v2f32(i8* %1, <2 x float> %b, i32 4)
- ret void
-}
-
-define void @test_vst1_f64(double* %a, <1 x double> %b) {
-; CHECK-LABEL: test_vst1_f64
-; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst1.v1f64(i8* %1, <1 x double> %b, i32 8)
- ret void
-}
-
-define void @test_vst2q_s8(i8* %a, [2 x <16 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_s8
-; CHECK: st2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %b.coerce, 1
- tail call void @llvm.arm.neon.vst2.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, i32 1)
- ret void
-}
-
-define void @test_vst2q_s16(i16* %a, [2 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_s16
-; CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1
- %1 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst2.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, i32 2)
- ret void
-}
-
-define void @test_vst2q_s32(i32* %a, [2 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_s32
-; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1
- %1 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst2.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, i32 4)
- ret void
-}
-
-define void @test_vst2q_s64(i64* %a, [2 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_s64
-; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1
- %1 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst2.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, i32 8)
- ret void
-}
-
-define void @test_vst2q_f32(float* %a, [2 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_f32
-; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1
- %1 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, i32 4)
- ret void
-}
-
-define void @test_vst2q_f64(double* %a, [2 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_f64
-; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce, 1
- %1 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst2.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, i32 8)
- ret void
-}
-
-define void @test_vst2_s8(i8* %a, [2 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst2_s8
-; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1
- tail call void @llvm.arm.neon.vst2.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, i32 1)
- ret void
-}
-
-define void @test_vst2_s16(i16* %a, [2 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst2_s16
-; CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1
- %1 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst2.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, i32 2)
- ret void
-}
-
-define void @test_vst2_s32(i32* %a, [2 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst2_s32
-; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1
- %1 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst2.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, i32 4)
- ret void
-}
-
-define void @test_vst2_s64(i64* %a, [2 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst2_s64
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1
- %1 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst2.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, i32 8)
- ret void
-}
-
-define void @test_vst2_f32(float* %a, [2 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst2_f32
-; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1
- %1 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst2.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, i32 4)
- ret void
-}
-
-define void @test_vst2_f64(double* %a, [2 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst2_f64
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce, 1
- %1 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst2.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, i32 8)
- ret void
-}
-
-define void @test_vst3q_s8(i8* %a, [3 x <16 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_s8
-; CHECK: st3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <16 x i8>] %b.coerce, 2
- tail call void @llvm.arm.neon.vst3.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, i32 1)
- ret void
-}
-
-define void @test_vst3q_s16(i16* %a, [3 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_s16
-; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <8 x i16>] %b.coerce, 2
- %1 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst3.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, i32 2)
- ret void
-}
-
-define void @test_vst3q_s32(i32* %a, [3 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_s32
-; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x i32>] %b.coerce, 2
- %1 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst3.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, i32 4)
- ret void
-}
-
-define void @test_vst3q_s64(i64* %a, [3 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_s64
-; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x i64>] %b.coerce, 2
- %1 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst3.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, i32 8)
- ret void
-}
-
-define void @test_vst3q_f32(float* %a, [3 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_f32
-; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x float>] %b.coerce, 2
- %1 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst3.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, i32 4)
- ret void
-}
-
-define void @test_vst3q_f64(double* %a, [3 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_f64
-; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x double>] %b.coerce, 2
- %1 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst3.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, i32 8)
- ret void
-}
-
-define void @test_vst3_s8(i8* %a, [3 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst3_s8
-; CHECK: st3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <8 x i8>] %b.coerce, 2
- tail call void @llvm.arm.neon.vst3.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, i32 1)
- ret void
-}
-
-define void @test_vst3_s16(i16* %a, [3 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst3_s16
-; CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x i16>] %b.coerce, 2
- %1 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst3.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, i32 2)
- ret void
-}
-
-define void @test_vst3_s32(i32* %a, [3 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst3_s32
-; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x i32>] %b.coerce, 2
- %1 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst3.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, i32 4)
- ret void
-}
-
-define void @test_vst3_s64(i64* %a, [3 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst3_s64
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <1 x i64>] %b.coerce, 2
- %1 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst3.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, i32 8)
- ret void
-}
-
-define void @test_vst3_f32(float* %a, [3 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst3_f32
-; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x float>] %b.coerce, 2
- %1 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst3.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, i32 4)
- ret void
-}
-
-define void @test_vst3_f64(double* %a, [3 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst3_f64
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <1 x double>] %b.coerce, 2
- %1 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst3.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, i32 8)
- ret void
-}
-
-define void @test_vst4q_s8(i8* %a, [4 x <16 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_s8
-; CHECK: st4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <16 x i8>] %b.coerce, 3
- tail call void @llvm.arm.neon.vst4.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, <16 x i8> %b.coerce.fca.3.extract, i32 1)
- ret void
-}
-
-define void @test_vst4q_s16(i16* %a, [4 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_s16
-; CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <8 x i16>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <8 x i16>] %b.coerce, 3
- %1 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst4.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, <8 x i16> %b.coerce.fca.3.extract, i32 2)
- ret void
-}
-
-define void @test_vst4q_s32(i32* %a, [4 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_s32
-; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x i32>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x i32>] %b.coerce, 3
- %1 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst4.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, <4 x i32> %b.coerce.fca.3.extract, i32 4)
- ret void
-}
-
-define void @test_vst4q_s64(i64* %a, [4 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_s64
-; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x i64>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x i64>] %b.coerce, 3
- %1 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst4.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, <2 x i64> %b.coerce.fca.3.extract, i32 8)
- ret void
-}
-
-define void @test_vst4q_f32(float* %a, [4 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_f32
-; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x float>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x float>] %b.coerce, 3
- %1 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst4.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, <4 x float> %b.coerce.fca.3.extract, i32 4)
- ret void
-}
-
-define void @test_vst4q_f64(double* %a, [4 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_f64
-; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x double>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x double>] %b.coerce, 3
- %1 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst4.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, <2 x double> %b.coerce.fca.3.extract, i32 8)
- ret void
-}
-
-define void @test_vst4_s8(i8* %a, [4 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst4_s8
-; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <8 x i8>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <8 x i8>] %b.coerce, 3
- tail call void @llvm.arm.neon.vst4.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, <8 x i8> %b.coerce.fca.3.extract, i32 1)
- ret void
-}
-
-define void @test_vst4_s16(i16* %a, [4 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst4_s16
-; CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x i16>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x i16>] %b.coerce, 3
- %1 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst4.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, <4 x i16> %b.coerce.fca.3.extract, i32 2)
- ret void
-}
-
-define void @test_vst4_s32(i32* %a, [4 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst4_s32
-; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x i32>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x i32>] %b.coerce, 3
- %1 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst4.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, <2 x i32> %b.coerce.fca.3.extract, i32 4)
- ret void
-}
-
-define void @test_vst4_s64(i64* %a, [4 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst4_s64
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <1 x i64>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <1 x i64>] %b.coerce, 3
- %1 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst4.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, <1 x i64> %b.coerce.fca.3.extract, i32 8)
- ret void
-}
-
-define void @test_vst4_f32(float* %a, [4 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst4_f32
-; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x float>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x float>] %b.coerce, 3
- %1 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst4.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, <2 x float> %b.coerce.fca.3.extract, i32 4)
- ret void
-}
-
-define void @test_vst4_f64(double* %a, [4 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst4_f64
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <1 x double>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <1 x double>] %b.coerce, 3
- %1 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst4.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, <1 x double> %b.coerce.fca.3.extract, i32 8)
- ret void
-}
-
-declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32)
-declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32)
-declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32)
-declare void @llvm.arm.neon.vst1.v2i64(i8*, <2 x i64>, i32)
-declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32)
-declare void @llvm.arm.neon.vst1.v2f64(i8*, <2 x double>, i32)
-declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32)
-declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>, i32)
-declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>, i32)
-declare void @llvm.arm.neon.vst1.v1i64(i8*, <1 x i64>, i32)
-declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32)
-declare void @llvm.arm.neon.vst1.v1f64(i8*, <1 x double>, i32)
-declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
-declare void @llvm.arm.neon.vst2.v8i16(i8*, <8 x i16>, <8 x i16>, i32)
-declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32)
-declare void @llvm.arm.neon.vst2.v2i64(i8*, <2 x i64>, <2 x i64>, i32)
-declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32)
-declare void @llvm.arm.neon.vst2.v2f64(i8*, <2 x double>, <2 x double>, i32)
-declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
-declare void @llvm.arm.neon.vst2.v4i16(i8*, <4 x i16>, <4 x i16>, i32)
-declare void @llvm.arm.neon.vst2.v2i32(i8*, <2 x i32>, <2 x i32>, i32)
-declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32)
-declare void @llvm.arm.neon.vst2.v2f32(i8*, <2 x float>, <2 x float>, i32)
-declare void @llvm.arm.neon.vst2.v1f64(i8*, <1 x double>, <1 x double>, i32)
-declare void @llvm.arm.neon.vst3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32)
-declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
-declare void @llvm.arm.neon.vst3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32)
-declare void @llvm.arm.neon.vst3.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32)
-declare void @llvm.arm.neon.vst3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32)
-declare void @llvm.arm.neon.vst3.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32)
-declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32)
-declare void @llvm.arm.neon.vst3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32)
-declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
-declare void @llvm.arm.neon.vst3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32)
-declare void @llvm.arm.neon.vst3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32)
-declare void @llvm.arm.neon.vst3.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32)
-declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32)
-declare void @llvm.arm.neon.vst4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32)
-declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32)
-declare void @llvm.arm.neon.vst4.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32)
-declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
-declare void @llvm.arm.neon.vst4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32)
-declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
-declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32)
-declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32)
-declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32)
-declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32)
-declare void @llvm.arm.neon.vst4.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32)
-
-define %struct.int8x16x2_t @test_vld1q_s8_x2(i8* %a) {
-; CHECK-LABEL: test_vld1q_s8_x2
-; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
- %1 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1)
- %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
- %3 = extractvalue { <16 x i8>, <16 x i8> } %1, 1
- %4 = insertvalue %struct.int8x16x2_t undef, <16 x i8> %2, 0, 0
- %5 = insertvalue %struct.int8x16x2_t %4, <16 x i8> %3, 0, 1
- ret %struct.int8x16x2_t %5
-}
-
-define %struct.int16x8x2_t @test_vld1q_s16_x2(i16* %a) {
-; CHECK-LABEL: test_vld1q_s16_x2
-; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2)
- %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0
- %4 = extractvalue { <8 x i16>, <8 x i16> } %2, 1
- %5 = insertvalue %struct.int16x8x2_t undef, <8 x i16> %3, 0, 0
- %6 = insertvalue %struct.int16x8x2_t %5, <8 x i16> %4, 0, 1
- ret %struct.int16x8x2_t %6
-}
-
-define %struct.int32x4x2_t @test_vld1q_s32_x2(i32* %a) {
-; CHECK-LABEL: test_vld1q_s32_x2
-; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x2.v4i32(i8* %1, i32 4)
- %3 = extractvalue { <4 x i32>, <4 x i32> } %2, 0
- %4 = extractvalue { <4 x i32>, <4 x i32> } %2, 1
- %5 = insertvalue %struct.int32x4x2_t undef, <4 x i32> %3, 0, 0
- %6 = insertvalue %struct.int32x4x2_t %5, <4 x i32> %4, 0, 1
- ret %struct.int32x4x2_t %6
-}
-
-define %struct.int64x2x2_t @test_vld1q_s64_x2(i64* %a) {
-; CHECK-LABEL: test_vld1q_s64_x2
-; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x2.v2i64(i8* %1, i32 8)
- %3 = extractvalue { <2 x i64>, <2 x i64> } %2, 0
- %4 = extractvalue { <2 x i64>, <2 x i64> } %2, 1
- %5 = insertvalue %struct.int64x2x2_t undef, <2 x i64> %3, 0, 0
- %6 = insertvalue %struct.int64x2x2_t %5, <2 x i64> %4, 0, 1
- ret %struct.int64x2x2_t %6
-}
-
-define %struct.float32x4x2_t @test_vld1q_f32_x2(float* %a) {
-; CHECK-LABEL: test_vld1q_f32_x2
-; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x2.v4f32(i8* %1, i32 4)
- %3 = extractvalue { <4 x float>, <4 x float> } %2, 0
- %4 = extractvalue { <4 x float>, <4 x float> } %2, 1
- %5 = insertvalue %struct.float32x4x2_t undef, <4 x float> %3, 0, 0
- %6 = insertvalue %struct.float32x4x2_t %5, <4 x float> %4, 0, 1
- ret %struct.float32x4x2_t %6
-}
-
-
-define %struct.float64x2x2_t @test_vld1q_f64_x2(double* %a) {
-; CHECK-LABEL: test_vld1q_f64_x2
-; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x2.v2f64(i8* %1, i32 8)
- %3 = extractvalue { <2 x double>, <2 x double> } %2, 0
- %4 = extractvalue { <2 x double>, <2 x double> } %2, 1
- %5 = insertvalue %struct.float64x2x2_t undef, <2 x double> %3, 0, 0
- %6 = insertvalue %struct.float64x2x2_t %5, <2 x double> %4, 0, 1
- ret %struct.float64x2x2_t %6
-}
-
-define %struct.int8x8x2_t @test_vld1_s8_x2(i8* %a) {
-; CHECK-LABEL: test_vld1_s8_x2
-; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
- %1 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x2.v8i8(i8* %a, i32 1)
- %2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
- %3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
- %4 = insertvalue %struct.int8x8x2_t undef, <8 x i8> %2, 0, 0
- %5 = insertvalue %struct.int8x8x2_t %4, <8 x i8> %3, 0, 1
- ret %struct.int8x8x2_t %5
-}
-
-define %struct.int16x4x2_t @test_vld1_s16_x2(i16* %a) {
-; CHECK-LABEL: test_vld1_s16_x2
-; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x2.v4i16(i8* %1, i32 2)
- %3 = extractvalue { <4 x i16>, <4 x i16> } %2, 0
- %4 = extractvalue { <4 x i16>, <4 x i16> } %2, 1
- %5 = insertvalue %struct.int16x4x2_t undef, <4 x i16> %3, 0, 0
- %6 = insertvalue %struct.int16x4x2_t %5, <4 x i16> %4, 0, 1
- ret %struct.int16x4x2_t %6
-}
-
-define %struct.int32x2x2_t @test_vld1_s32_x2(i32* %a) {
-; CHECK-LABEL: test_vld1_s32_x2
-; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x2.v2i32(i8* %1, i32 4)
- %3 = extractvalue { <2 x i32>, <2 x i32> } %2, 0
- %4 = extractvalue { <2 x i32>, <2 x i32> } %2, 1
- %5 = insertvalue %struct.int32x2x2_t undef, <2 x i32> %3, 0, 0
- %6 = insertvalue %struct.int32x2x2_t %5, <2 x i32> %4, 0, 1
- ret %struct.int32x2x2_t %6
-}
-
-define %struct.int64x1x2_t @test_vld1_s64_x2(i64* %a) {
-; CHECK-LABEL: test_vld1_s64_x2
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x2.v1i64(i8* %1, i32 8)
- %3 = extractvalue { <1 x i64>, <1 x i64> } %2, 0
- %4 = extractvalue { <1 x i64>, <1 x i64> } %2, 1
- %5 = insertvalue %struct.int64x1x2_t undef, <1 x i64> %3, 0, 0
- %6 = insertvalue %struct.int64x1x2_t %5, <1 x i64> %4, 0, 1
- ret %struct.int64x1x2_t %6
-}
-
-define %struct.float32x2x2_t @test_vld1_f32_x2(float* %a) {
-; CHECK-LABEL: test_vld1_f32_x2
-; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x2.v2f32(i8* %1, i32 4)
- %3 = extractvalue { <2 x float>, <2 x float> } %2, 0
- %4 = extractvalue { <2 x float>, <2 x float> } %2, 1
- %5 = insertvalue %struct.float32x2x2_t undef, <2 x float> %3, 0, 0
- %6 = insertvalue %struct.float32x2x2_t %5, <2 x float> %4, 0, 1
- ret %struct.float32x2x2_t %6
-}
-
-define %struct.float64x1x2_t @test_vld1_f64_x2(double* %a) {
-; CHECK-LABEL: test_vld1_f64_x2
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x2.v1f64(i8* %1, i32 8)
- %3 = extractvalue { <1 x double>, <1 x double> } %2, 0
- %4 = extractvalue { <1 x double>, <1 x double> } %2, 1
- %5 = insertvalue %struct.float64x1x2_t undef, <1 x double> %3, 0, 0
- %6 = insertvalue %struct.float64x1x2_t %5, <1 x double> %4, 0, 1
- ret %struct.float64x1x2_t %6
-}
-
-define %struct.int8x16x3_t @test_vld1q_s8_x3(i8* %a) {
-; CHECK-LABEL: test_vld1q_s8_x3
-; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b},
-; [{{x[0-9]+|sp}}]
- %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x3.v16i8(i8* %a, i32 1)
- %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 0
- %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 1
- %4 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 2
- %5 = insertvalue %struct.int8x16x3_t undef, <16 x i8> %2, 0, 0
- %6 = insertvalue %struct.int8x16x3_t %5, <16 x i8> %3, 0, 1
- %7 = insertvalue %struct.int8x16x3_t %6, <16 x i8> %4, 0, 2
- ret %struct.int8x16x3_t %7
-}
-
-define %struct.int16x8x3_t @test_vld1q_s16_x3(i16* %a) {
-; CHECK-LABEL: test_vld1q_s16_x3
-; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2)
- %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
- %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 1
- %5 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 2
- %6 = insertvalue %struct.int16x8x3_t undef, <8 x i16> %3, 0, 0
- %7 = insertvalue %struct.int16x8x3_t %6, <8 x i16> %4, 0, 1
- %8 = insertvalue %struct.int16x8x3_t %7, <8 x i16> %5, 0, 2
- ret %struct.int16x8x3_t %8
-}
-
-define %struct.int32x4x3_t @test_vld1q_s32_x3(i32* %a) {
-; CHECK-LABEL: test_vld1q_s32_x3
-; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %2 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x3.v4i32(i8* %1, i32 4)
- %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 0
- %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 1
- %5 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 2
- %6 = insertvalue %struct.int32x4x3_t undef, <4 x i32> %3, 0, 0
- %7 = insertvalue %struct.int32x4x3_t %6, <4 x i32> %4, 0, 1
- %8 = insertvalue %struct.int32x4x3_t %7, <4 x i32> %5, 0, 2
- ret %struct.int32x4x3_t %8
-}
-
-define %struct.int64x2x3_t @test_vld1q_s64_x3(i64* %a) {
-; CHECK-LABEL: test_vld1q_s64_x3
-; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8)
- %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
- %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 1
- %5 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 2
- %6 = insertvalue %struct.int64x2x3_t undef, <2 x i64> %3, 0, 0
- %7 = insertvalue %struct.int64x2x3_t %6, <2 x i64> %4, 0, 1
- %8 = insertvalue %struct.int64x2x3_t %7, <2 x i64> %5, 0, 2
- ret %struct.int64x2x3_t %8
-}
-
-define %struct.float32x4x3_t @test_vld1q_f32_x3(float* %a) {
-; CHECK-LABEL: test_vld1q_f32_x3
-; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %2 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x3.v4f32(i8* %1, i32 4)
- %3 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 0
- %4 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 1
- %5 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 2
- %6 = insertvalue %struct.float32x4x3_t undef, <4 x float> %3, 0, 0
- %7 = insertvalue %struct.float32x4x3_t %6, <4 x float> %4, 0, 1
- %8 = insertvalue %struct.float32x4x3_t %7, <4 x float> %5, 0, 2
- ret %struct.float32x4x3_t %8
-}
-
-
-define %struct.float64x2x3_t @test_vld1q_f64_x3(double* %a) {
-; CHECK-LABEL: test_vld1q_f64_x3
-; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %2 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x3.v2f64(i8* %1, i32 8)
- %3 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 0
- %4 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 1
- %5 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 2
- %6 = insertvalue %struct.float64x2x3_t undef, <2 x double> %3, 0, 0
- %7 = insertvalue %struct.float64x2x3_t %6, <2 x double> %4, 0, 1
- %8 = insertvalue %struct.float64x2x3_t %7, <2 x double> %5, 0, 2
- ret %struct.float64x2x3_t %8
-}
-
-define %struct.int8x8x3_t @test_vld1_s8_x3(i8* %a) {
-; CHECK-LABEL: test_vld1_s8_x3
-; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b},
-; [{{x[0-9]+|sp}}]
- %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x3.v8i8(i8* %a, i32 1)
- %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
- %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
- %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
- %5 = insertvalue %struct.int8x8x3_t undef, <8 x i8> %2, 0, 0
- %6 = insertvalue %struct.int8x8x3_t %5, <8 x i8> %3, 0, 1
- %7 = insertvalue %struct.int8x8x3_t %6, <8 x i8> %4, 0, 2
- ret %struct.int8x8x3_t %7
-}
-
-define %struct.int16x4x3_t @test_vld1_s16_x3(i16* %a) {
-; CHECK-LABEL: test_vld1_s16_x3
-; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x3.v4i16(i8* %1, i32 2)
- %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 0
- %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 1
- %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 2
- %6 = insertvalue %struct.int16x4x3_t undef, <4 x i16> %3, 0, 0
- %7 = insertvalue %struct.int16x4x3_t %6, <4 x i16> %4, 0, 1
- %8 = insertvalue %struct.int16x4x3_t %7, <4 x i16> %5, 0, 2
- ret %struct.int16x4x3_t %8
-}
-
-define %struct.int32x2x3_t @test_vld1_s32_x3(i32* %a) {
- %1 = bitcast i32* %a to i8*
-; CHECK-LABEL: test_vld1_s32_x3
-; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
-; [{{x[0-9]+|sp}}]
- %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x3.v2i32(i8* %1, i32 4)
- %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 0
- %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 1
- %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 2
- %6 = insertvalue %struct.int32x2x3_t undef, <2 x i32> %3, 0, 0
- %7 = insertvalue %struct.int32x2x3_t %6, <2 x i32> %4, 0, 1
- %8 = insertvalue %struct.int32x2x3_t %7, <2 x i32> %5, 0, 2
- ret %struct.int32x2x3_t %8
-}
-
-define %struct.int64x1x3_t @test_vld1_s64_x3(i64* %a) {
-; CHECK-LABEL: test_vld1_s64_x3
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %2 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x3.v1i64(i8* %1, i32 8)
- %3 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 0
- %4 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 1
- %5 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 2
- %6 = insertvalue %struct.int64x1x3_t undef, <1 x i64> %3, 0, 0
- %7 = insertvalue %struct.int64x1x3_t %6, <1 x i64> %4, 0, 1
- %8 = insertvalue %struct.int64x1x3_t %7, <1 x i64> %5, 0, 2
- ret %struct.int64x1x3_t %8
-}
-
-define %struct.float32x2x3_t @test_vld1_f32_x3(float* %a) {
-; CHECK-LABEL: test_vld1_f32_x3
-; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %2 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x3.v2f32(i8* %1, i32 4)
- %3 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 0
- %4 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 1
- %5 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 2
- %6 = insertvalue %struct.float32x2x3_t undef, <2 x float> %3, 0, 0
- %7 = insertvalue %struct.float32x2x3_t %6, <2 x float> %4, 0, 1
- %8 = insertvalue %struct.float32x2x3_t %7, <2 x float> %5, 0, 2
- ret %struct.float32x2x3_t %8
-}
-
-
-define %struct.float64x1x3_t @test_vld1_f64_x3(double* %a) {
-; CHECK-LABEL: test_vld1_f64_x3
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
-; [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %2 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x3.v1f64(i8* %1, i32 8)
- %3 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 0
- %4 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 1
- %5 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 2
- %6 = insertvalue %struct.float64x1x3_t undef, <1 x double> %3, 0, 0
- %7 = insertvalue %struct.float64x1x3_t %6, <1 x double> %4, 0, 1
- %8 = insertvalue %struct.float64x1x3_t %7, <1 x double> %5, 0, 2
- ret %struct.float64x1x3_t %8
-}
-
-define %struct.int8x16x4_t @test_vld1q_s8_x4(i8* %a) {
-; CHECK-LABEL: test_vld1q_s8_x4
-; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b,
-; v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
- %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x4.v16i8(i8* %a, i32 1)
- %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 0
- %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 1
- %4 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 2
- %5 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 3
- %6 = insertvalue %struct.int8x16x4_t undef, <16 x i8> %2, 0, 0
- %7 = insertvalue %struct.int8x16x4_t %6, <16 x i8> %3, 0, 1
- %8 = insertvalue %struct.int8x16x4_t %7, <16 x i8> %4, 0, 2
- %9 = insertvalue %struct.int8x16x4_t %8, <16 x i8> %5, 0, 3
- ret %struct.int8x16x4_t %9
-}
-
-define %struct.int16x8x4_t @test_vld1q_s16_x4(i16* %a) {
-; CHECK-LABEL: test_vld1q_s16_x4
-; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h,
-; v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x4.v8i16(i8* %1, i32 2)
- %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
- %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 1
- %5 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 2
- %6 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 3
- %7 = insertvalue %struct.int16x8x4_t undef, <8 x i16> %3, 0, 0
- %8 = insertvalue %struct.int16x8x4_t %7, <8 x i16> %4, 0, 1
- %9 = insertvalue %struct.int16x8x4_t %8, <8 x i16> %5, 0, 2
- %10 = insertvalue %struct.int16x8x4_t %9, <8 x i16> %6, 0, 3
- ret %struct.int16x8x4_t %10
-}
-
-define %struct.int32x4x4_t @test_vld1q_s32_x4(i32* %a) {
-; CHECK-LABEL: test_vld1q_s32_x4
-; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
-; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %2 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x4.v4i32(i8* %1, i32 4)
- %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 0
- %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 1
- %5 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 2
- %6 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 3
- %7 = insertvalue %struct.int32x4x4_t undef, <4 x i32> %3, 0, 0
- %8 = insertvalue %struct.int32x4x4_t %7, <4 x i32> %4, 0, 1
- %9 = insertvalue %struct.int32x4x4_t %8, <4 x i32> %5, 0, 2
- %10 = insertvalue %struct.int32x4x4_t %9, <4 x i32> %6, 0, 3
- ret %struct.int32x4x4_t %10
-}
-
-define %struct.int64x2x4_t @test_vld1q_s64_x4(i64* %a) {
-; CHECK-LABEL: test_vld1q_s64_x4
-; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
-; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x4.v2i64(i8* %1, i32 8)
- %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
- %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 1
- %5 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 2
- %6 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 3
- %7 = insertvalue %struct.int64x2x4_t undef, <2 x i64> %3, 0, 0
- %8 = insertvalue %struct.int64x2x4_t %7, <2 x i64> %4, 0, 1
- %9 = insertvalue %struct.int64x2x4_t %8, <2 x i64> %5, 0, 2
- %10 = insertvalue %struct.int64x2x4_t %9, <2 x i64> %6, 0, 3
- ret %struct.int64x2x4_t %10
-}
-
-define %struct.float32x4x4_t @test_vld1q_f32_x4(float* %a) {
-; CHECK-LABEL: test_vld1q_f32_x4
-; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
-; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4)
- %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0
- %4 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 1
- %5 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 2
- %6 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 3
- %7 = insertvalue %struct.float32x4x4_t undef, <4 x float> %3, 0, 0
- %8 = insertvalue %struct.float32x4x4_t %7, <4 x float> %4, 0, 1
- %9 = insertvalue %struct.float32x4x4_t %8, <4 x float> %5, 0, 2
- %10 = insertvalue %struct.float32x4x4_t %9, <4 x float> %6, 0, 3
- ret %struct.float32x4x4_t %10
-}
-
-define %struct.float64x2x4_t @test_vld1q_f64_x4(double* %a) {
-; CHECK-LABEL: test_vld1q_f64_x4
-; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
-; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %2 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x4.v2f64(i8* %1, i32 8)
- %3 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 0
- %4 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 1
- %5 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 2
- %6 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 3
- %7 = insertvalue %struct.float64x2x4_t undef, <2 x double> %3, 0, 0
- %8 = insertvalue %struct.float64x2x4_t %7, <2 x double> %4, 0, 1
- %9 = insertvalue %struct.float64x2x4_t %8, <2 x double> %5, 0, 2
- %10 = insertvalue %struct.float64x2x4_t %9, <2 x double> %6, 0, 3
- ret %struct.float64x2x4_t %10
-}
-
-define %struct.int8x8x4_t @test_vld1_s8_x4(i8* %a) {
-; CHECK-LABEL: test_vld1_s8_x4
-; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
-; v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
- %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1)
- %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
- %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
- %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
- %5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3
- %6 = insertvalue %struct.int8x8x4_t undef, <8 x i8> %2, 0, 0
- %7 = insertvalue %struct.int8x8x4_t %6, <8 x i8> %3, 0, 1
- %8 = insertvalue %struct.int8x8x4_t %7, <8 x i8> %4, 0, 2
- %9 = insertvalue %struct.int8x8x4_t %8, <8 x i8> %5, 0, 3
- ret %struct.int8x8x4_t %9
-}
-
-define %struct.int16x4x4_t @test_vld1_s16_x4(i16* %a) {
-; CHECK-LABEL: test_vld1_s16_x4
-; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h,
-; v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x4.v4i16(i8* %1, i32 2)
- %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 0
- %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 1
- %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 2
- %6 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 3
- %7 = insertvalue %struct.int16x4x4_t undef, <4 x i16> %3, 0, 0
- %8 = insertvalue %struct.int16x4x4_t %7, <4 x i16> %4, 0, 1
- %9 = insertvalue %struct.int16x4x4_t %8, <4 x i16> %5, 0, 2
- %10 = insertvalue %struct.int16x4x4_t %9, <4 x i16> %6, 0, 3
- ret %struct.int16x4x4_t %10
-}
-
-define %struct.int32x2x4_t @test_vld1_s32_x4(i32* %a) {
-; CHECK-LABEL: test_vld1_s32_x4
-; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
-; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = bitcast i32* %a to i8*
- %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x4.v2i32(i8* %1, i32 4)
- %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 0
- %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 1
- %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 2
- %6 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 3
- %7 = insertvalue %struct.int32x2x4_t undef, <2 x i32> %3, 0, 0
- %8 = insertvalue %struct.int32x2x4_t %7, <2 x i32> %4, 0, 1
- %9 = insertvalue %struct.int32x2x4_t %8, <2 x i32> %5, 0, 2
- %10 = insertvalue %struct.int32x2x4_t %9, <2 x i32> %6, 0, 3
- ret %struct.int32x2x4_t %10
-}
-
-define %struct.int64x1x4_t @test_vld1_s64_x4(i64* %a) {
-; CHECK-LABEL: test_vld1_s64_x4
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
-; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = bitcast i64* %a to i8*
- %2 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x4.v1i64(i8* %1, i32 8)
- %3 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 0
- %4 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 1
- %5 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 2
- %6 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 3
- %7 = insertvalue %struct.int64x1x4_t undef, <1 x i64> %3, 0, 0
- %8 = insertvalue %struct.int64x1x4_t %7, <1 x i64> %4, 0, 1
- %9 = insertvalue %struct.int64x1x4_t %8, <1 x i64> %5, 0, 2
- %10 = insertvalue %struct.int64x1x4_t %9, <1 x i64> %6, 0, 3
- ret %struct.int64x1x4_t %10
-}
-
-define %struct.float32x2x4_t @test_vld1_f32_x4(float* %a) {
-; CHECK-LABEL: test_vld1_f32_x4
-; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
-; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = bitcast float* %a to i8*
- %2 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x4.v2f32(i8* %1, i32 4)
- %3 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 0
- %4 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 1
- %5 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 2
- %6 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 3
- %7 = insertvalue %struct.float32x2x4_t undef, <2 x float> %3, 0, 0
- %8 = insertvalue %struct.float32x2x4_t %7, <2 x float> %4, 0, 1
- %9 = insertvalue %struct.float32x2x4_t %8, <2 x float> %5, 0, 2
- %10 = insertvalue %struct.float32x2x4_t %9, <2 x float> %6, 0, 3
- ret %struct.float32x2x4_t %10
-}
-
-
-define %struct.float64x1x4_t @test_vld1_f64_x4(double* %a) {
-; CHECK-LABEL: test_vld1_f64_x4
-; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
-; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = bitcast double* %a to i8*
- %2 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x4.v1f64(i8* %1, i32 8)
- %3 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 0
- %4 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 1
- %5 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 2
- %6 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 3
- %7 = insertvalue %struct.float64x1x4_t undef, <1 x double> %3, 0, 0
- %8 = insertvalue %struct.float64x1x4_t %7, <1 x double> %4, 0, 1
- %9 = insertvalue %struct.float64x1x4_t %8, <1 x double> %5, 0, 2
- %10 = insertvalue %struct.float64x1x4_t %9, <1 x double> %6, 0, 3
- ret %struct.float64x1x4_t %10
-}
-
-define void @test_vst1q_s8_x2(i8* %a, [2 x <16 x i8>] %b) {
-; CHECK-LABEL: test_vst1q_s8_x2
-; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <16 x i8>] %b, 0
- %2 = extractvalue [2 x <16 x i8>] %b, 1
- tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, i32 1)
- ret void
-}
-
-define void @test_vst1q_s16_x2(i16* %a, [2 x <8 x i16>] %b) {
-; CHECK-LABEL: test_vst1q_s16_x2
-; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <8 x i16>] %b, 0
- %2 = extractvalue [2 x <8 x i16>] %b, 1
- %3 = bitcast i16* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1, <8 x i16> %2, i32 2)
- ret void
-}
-
-define void @test_vst1q_s32_x2(i32* %a, [2 x <4 x i32>] %b) {
-; CHECK-LABEL: test_vst1q_s32_x2
-; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <4 x i32>] %b, 0
- %2 = extractvalue [2 x <4 x i32>] %b, 1
- %3 = bitcast i32* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v4i32(i8* %3, <4 x i32> %1, <4 x i32> %2, i32 4)
- ret void
-}
-
-define void @test_vst1q_s64_x2(i64* %a, [2 x <2 x i64>] %b) {
-; CHECK-LABEL: test_vst1q_s64_x2
-; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <2 x i64>] %b, 0
- %2 = extractvalue [2 x <2 x i64>] %b, 1
- %3 = bitcast i64* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v2i64(i8* %3, <2 x i64> %1, <2 x i64> %2, i32 8)
- ret void
-}
-
-define void @test_vst1q_f32_x2(float* %a, [2 x <4 x float>] %b) {
-; CHECK-LABEL: test_vst1q_f32_x2
-; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <4 x float>] %b, 0
- %2 = extractvalue [2 x <4 x float>] %b, 1
- %3 = bitcast float* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v4f32(i8* %3, <4 x float> %1, <4 x float> %2, i32 4)
- ret void
-}
-
-
-define void @test_vst1q_f64_x2(double* %a, [2 x <2 x double>] %b) {
-; CHECK-LABEL: test_vst1q_f64_x2
-; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <2 x double>] %b, 0
- %2 = extractvalue [2 x <2 x double>] %b, 1
- %3 = bitcast double* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v2f64(i8* %3, <2 x double> %1, <2 x double> %2, i32 8)
- ret void
-}
-
-define void @test_vst1_s8_x2(i8* %a, [2 x <8 x i8>] %b) {
-; CHECK-LABEL: test_vst1_s8_x2
-; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <8 x i8>] %b, 0
- %2 = extractvalue [2 x <8 x i8>] %b, 1
- tail call void @llvm.aarch64.neon.vst1x2.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 1)
- ret void
-}
-
-define void @test_vst1_s16_x2(i16* %a, [2 x <4 x i16>] %b) {
-; CHECK-LABEL: test_vst1_s16_x2
-; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <4 x i16>] %b, 0
- %2 = extractvalue [2 x <4 x i16>] %b, 1
- %3 = bitcast i16* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v4i16(i8* %3, <4 x i16> %1, <4 x i16> %2, i32 2)
- ret void
-}
-
-define void @test_vst1_s32_x2(i32* %a, [2 x <2 x i32>] %b) {
-; CHECK-LABEL: test_vst1_s32_x2
-; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <2 x i32>] %b, 0
- %2 = extractvalue [2 x <2 x i32>] %b, 1
- %3 = bitcast i32* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v2i32(i8* %3, <2 x i32> %1, <2 x i32> %2, i32 4)
- ret void
-}
-
-define void @test_vst1_s64_x2(i64* %a, [2 x <1 x i64>] %b) {
-; CHECK-LABEL: test_vst1_s64_x2
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <1 x i64>] %b, 0
- %2 = extractvalue [2 x <1 x i64>] %b, 1
- %3 = bitcast i64* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v1i64(i8* %3, <1 x i64> %1, <1 x i64> %2, i32 8)
- ret void
-}
-
-define void @test_vst1_f32_x2(float* %a, [2 x <2 x float>] %b) {
-; CHECK-LABEL: test_vst1_f32_x2
-; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <2 x float>] %b, 0
- %2 = extractvalue [2 x <2 x float>] %b, 1
- %3 = bitcast float* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v2f32(i8* %3, <2 x float> %1, <2 x float> %2, i32 4)
- ret void
-}
-
-define void @test_vst1_f64_x2(double* %a, [2 x <1 x double>] %b) {
-; CHECK-LABEL: test_vst1_f64_x2
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [2 x <1 x double>] %b, 0
- %2 = extractvalue [2 x <1 x double>] %b, 1
- %3 = bitcast double* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v1f64(i8* %3, <1 x double> %1, <1 x double> %2, i32 8)
- ret void
-}
-
-define void @test_vst1q_s8_x3(i8* %a, [3 x <16 x i8>] %b) {
-; CHECK-LABEL: test_vst1q_s8_x3
-; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <16 x i8>] %b, 0
- %2 = extractvalue [3 x <16 x i8>] %b, 1
- %3 = extractvalue [3 x <16 x i8>] %b, 2
- tail call void @llvm.aarch64.neon.vst1x3.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, <16 x i8> %3, i32 1)
- ret void
-}
-
-define void @test_vst1q_s16_x3(i16* %a, [3 x <8 x i16>] %b) {
-; CHECK-LABEL: test_vst1q_s16_x3
-; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <8 x i16>] %b, 0
- %2 = extractvalue [3 x <8 x i16>] %b, 1
- %3 = extractvalue [3 x <8 x i16>] %b, 2
- %4 = bitcast i16* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v8i16(i8* %4, <8 x i16> %1, <8 x i16> %2, <8 x i16> %3, i32 2)
- ret void
-}
-
-define void @test_vst1q_s32_x3(i32* %a, [3 x <4 x i32>] %b) {
-; CHECK-LABEL: test_vst1q_s32_x3
-; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <4 x i32>] %b, 0
- %2 = extractvalue [3 x <4 x i32>] %b, 1
- %3 = extractvalue [3 x <4 x i32>] %b, 2
- %4 = bitcast i32* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v4i32(i8* %4, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3, i32 4)
- ret void
-}
-
-define void @test_vst1q_s64_x3(i64* %a, [3 x <2 x i64>] %b) {
-; CHECK-LABEL: test_vst1q_s64_x3
-; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <2 x i64>] %b, 0
- %2 = extractvalue [3 x <2 x i64>] %b, 1
- %3 = extractvalue [3 x <2 x i64>] %b, 2
- %4 = bitcast i64* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v2i64(i8* %4, <2 x i64> %1, <2 x i64> %2, <2 x i64> %3, i32 8)
- ret void
-}
-
-define void @test_vst1q_f32_x3(float* %a, [3 x <4 x float>] %b) {
-; CHECK-LABEL: test_vst1q_f32_x3
-; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <4 x float>] %b, 0
- %2 = extractvalue [3 x <4 x float>] %b, 1
- %3 = extractvalue [3 x <4 x float>] %b, 2
- %4 = bitcast float* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v4f32(i8* %4, <4 x float> %1, <4 x float> %2, <4 x float> %3, i32 4)
- ret void
-}
-
-define void @test_vst1q_f64_x3(double* %a, [3 x <2 x double>] %b) {
-; CHECK-LABEL: test_vst1q_f64_x3
-; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <2 x double>] %b, 0
- %2 = extractvalue [3 x <2 x double>] %b, 1
- %3 = extractvalue [3 x <2 x double>] %b, 2
- %4 = bitcast double* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v2f64(i8* %4, <2 x double> %1, <2 x double> %2, <2 x double> %3, i32 8)
- ret void
-}
-
-define void @test_vst1_s8_x3(i8* %a, [3 x <8 x i8>] %b) {
-; CHECK-LABEL: test_vst1_s8_x3
-; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <8 x i8>] %b, 0
- %2 = extractvalue [3 x <8 x i8>] %b, 1
- %3 = extractvalue [3 x <8 x i8>] %b, 2
- tail call void @llvm.aarch64.neon.vst1x3.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, <8 x i8> %3, i32 1)
- ret void
-}
-
-define void @test_vst1_s16_x3(i16* %a, [3 x <4 x i16>] %b) {
-; CHECK-LABEL: test_vst1_s16_x3
-; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <4 x i16>] %b, 0
- %2 = extractvalue [3 x <4 x i16>] %b, 1
- %3 = extractvalue [3 x <4 x i16>] %b, 2
- %4 = bitcast i16* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v4i16(i8* %4, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, i32 2)
- ret void
-}
-
-define void @test_vst1_s32_x3(i32* %a, [3 x <2 x i32>] %b) {
-; CHECK-LABEL: test_vst1_s32_x3
-; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <2 x i32>] %b, 0
- %2 = extractvalue [3 x <2 x i32>] %b, 1
- %3 = extractvalue [3 x <2 x i32>] %b, 2
- %4 = bitcast i32* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, i32 4)
- ret void
-}
-
-define void @test_vst1_s64_x3(i64* %a, [3 x <1 x i64>] %b) {
-; CHECK-LABEL: test_vst1_s64_x3
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <1 x i64>] %b, 0
- %2 = extractvalue [3 x <1 x i64>] %b, 1
- %3 = extractvalue [3 x <1 x i64>] %b, 2
- %4 = bitcast i64* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, i32 8)
- ret void
-}
-
-define void @test_vst1_f32_x3(float* %a, [3 x <2 x float>] %b) {
-; CHECK-LABEL: test_vst1_f32_x3
-; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <2 x float>] %b, 0
- %2 = extractvalue [3 x <2 x float>] %b, 1
- %3 = extractvalue [3 x <2 x float>] %b, 2
- %4 = bitcast float* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v2f32(i8* %4, <2 x float> %1, <2 x float> %2, <2 x float> %3, i32 4)
- ret void
-}
-
-define void @test_vst1_f64_x3(double* %a, [3 x <1 x double>] %b) {
-; CHECK-LABEL: test_vst1_f64_x3
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
-; [{{x[0-9]+|sp}}]
- %1 = extractvalue [3 x <1 x double>] %b, 0
- %2 = extractvalue [3 x <1 x double>] %b, 1
- %3 = extractvalue [3 x <1 x double>] %b, 2
- %4 = bitcast double* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v1f64(i8* %4, <1 x double> %1, <1 x double> %2, <1 x double> %3, i32 8)
- ret void
-}
-
-define void @test_vst1q_s8_x4(i8* %a, [4 x <16 x i8>] %b) {
-; CHECK-LABEL: test_vst1q_s8_x4
-; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b,
-; v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <16 x i8>] %b, 0
- %2 = extractvalue [4 x <16 x i8>] %b, 1
- %3 = extractvalue [4 x <16 x i8>] %b, 2
- %4 = extractvalue [4 x <16 x i8>] %b, 3
- tail call void @llvm.aarch64.neon.vst1x4.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, <16 x i8> %3, <16 x i8> %4, i32 1)
- ret void
-}
-
-define void @test_vst1q_s16_x4(i16* %a, [4 x <8 x i16>] %b) {
-; CHECK-LABEL: test_vst1q_s16_x4
-; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h,
-; v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <8 x i16>] %b, 0
- %2 = extractvalue [4 x <8 x i16>] %b, 1
- %3 = extractvalue [4 x <8 x i16>] %b, 2
- %4 = extractvalue [4 x <8 x i16>] %b, 3
- %5 = bitcast i16* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v8i16(i8* %5, <8 x i16> %1, <8 x i16> %2, <8 x i16> %3, <8 x i16> %4, i32 2)
- ret void
-}
-
-define void @test_vst1q_s32_x4(i32* %a, [4 x <4 x i32>] %b) {
-; CHECK-LABEL: test_vst1q_s32_x4
-; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
-; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <4 x i32>] %b, 0
- %2 = extractvalue [4 x <4 x i32>] %b, 1
- %3 = extractvalue [4 x <4 x i32>] %b, 2
- %4 = extractvalue [4 x <4 x i32>] %b, 3
- %5 = bitcast i32* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v4i32(i8* %5, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, i32 4)
- ret void
-}
-
-define void @test_vst1q_s64_x4(i64* %a, [4 x <2 x i64>] %b) {
-; CHECK-LABEL: test_vst1q_s64_x4
-; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
-; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <2 x i64>] %b, 0
- %2 = extractvalue [4 x <2 x i64>] %b, 1
- %3 = extractvalue [4 x <2 x i64>] %b, 2
- %4 = extractvalue [4 x <2 x i64>] %b, 3
- %5 = bitcast i64* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v2i64(i8* %5, <2 x i64> %1, <2 x i64> %2, <2 x i64> %3, <2 x i64> %4, i32 8)
- ret void
-}
-
-define void @test_vst1q_f32_x4(float* %a, [4 x <4 x float>] %b) {
-; CHECK-LABEL: test_vst1q_f32_x4
-; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
-; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <4 x float>] %b, 0
- %2 = extractvalue [4 x <4 x float>] %b, 1
- %3 = extractvalue [4 x <4 x float>] %b, 2
- %4 = extractvalue [4 x <4 x float>] %b, 3
- %5 = bitcast float* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4)
- ret void
-}
-
-define void @test_vst1q_f64_x4(double* %a, [4 x <2 x double>] %b) {
-; CHECK-LABEL: test_vst1q_f64_x4
-; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
-; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <2 x double>] %b, 0
- %2 = extractvalue [4 x <2 x double>] %b, 1
- %3 = extractvalue [4 x <2 x double>] %b, 2
- %4 = extractvalue [4 x <2 x double>] %b, 3
- %5 = bitcast double* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8)
- ret void
-}
-
-define void @test_vst1_s8_x4(i8* %a, [4 x <8 x i8>] %b) {
-; CHECK-LABEL: test_vst1_s8_x4
-; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
-; v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <8 x i8>] %b, 0
- %2 = extractvalue [4 x <8 x i8>] %b, 1
- %3 = extractvalue [4 x <8 x i8>] %b, 2
- %4 = extractvalue [4 x <8 x i8>] %b, 3
- tail call void @llvm.aarch64.neon.vst1x4.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, <8 x i8> %3, <8 x i8> %4, i32 1)
- ret void
-}
-
-define void @test_vst1_s16_x4(i16* %a, [4 x <4 x i16>] %b) {
-; CHECK-LABEL: test_vst1_s16_x4
-; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h,
-; v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <4 x i16>] %b, 0
- %2 = extractvalue [4 x <4 x i16>] %b, 1
- %3 = extractvalue [4 x <4 x i16>] %b, 2
- %4 = extractvalue [4 x <4 x i16>] %b, 3
- %5 = bitcast i16* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v4i16(i8* %5, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, <4 x i16> %4, i32 2)
- ret void
-}
-
-define void @test_vst1_s32_x4(i32* %a, [4 x <2 x i32>] %b) {
-; CHECK-LABEL: test_vst1_s32_x4
-; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
-; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <2 x i32>] %b, 0
- %2 = extractvalue [4 x <2 x i32>] %b, 1
- %3 = extractvalue [4 x <2 x i32>] %b, 2
- %4 = extractvalue [4 x <2 x i32>] %b, 3
- %5 = bitcast i32* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v2i32(i8* %5, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, <2 x i32> %4, i32 4)
- ret void
-}
-
-define void @test_vst1_s64_x4(i64* %a, [4 x <1 x i64>] %b) {
-; CHECK-LABEL: test_vst1_s64_x4
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
-; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <1 x i64>] %b, 0
- %2 = extractvalue [4 x <1 x i64>] %b, 1
- %3 = extractvalue [4 x <1 x i64>] %b, 2
- %4 = extractvalue [4 x <1 x i64>] %b, 3
- %5 = bitcast i64* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v1i64(i8* %5, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, <1 x i64> %4, i32 8)
- ret void
-}
-
-define void @test_vst1_f32_x4(float* %a, [4 x <2 x float>] %b) {
-; CHECK-LABEL: test_vst1_f32_x4
-; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
-; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <2 x float>] %b, 0
- %2 = extractvalue [4 x <2 x float>] %b, 1
- %3 = extractvalue [4 x <2 x float>] %b, 2
- %4 = extractvalue [4 x <2 x float>] %b, 3
- %5 = bitcast float* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v2f32(i8* %5, <2 x float> %1, <2 x float> %2, <2 x float> %3, <2 x float> %4, i32 4)
- ret void
-}
-
-define void @test_vst1_f64_x4(double* %a, [4 x <1 x double>] %b) {
-; CHECK-LABEL: test_vst1_f64_x4
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
-; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
- %1 = extractvalue [4 x <1 x double>] %b, 0
- %2 = extractvalue [4 x <1 x double>] %b, 1
- %3 = extractvalue [4 x <1 x double>] %b, 2
- %4 = extractvalue [4 x <1 x double>] %b, 3
- %5 = bitcast double* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v1f64(i8* %5, <1 x double> %1, <1 x double> %2, <1 x double> %3, <1 x double> %4, i32 8)
- ret void
-}
-
-declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*, i32)
-declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*, i32)
-declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x2.v4i32(i8*, i32)
-declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x2.v2i64(i8*, i32)
-declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x2.v4f32(i8*, i32)
-declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x2.v2f64(i8*, i32)
-declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x2.v8i8(i8*, i32)
-declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x2.v4i16(i8*, i32)
-declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x2.v2i32(i8*, i32)
-declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x2.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x2.v2f32(i8*, i32)
-declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x2.v1f64(i8*, i32)
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x3.v16i8(i8*, i32)
-declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32)
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x3.v4i32(i8*, i32)
-declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32)
-declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x3.v4f32(i8*, i32)
-declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x3.v2f64(i8*, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x3.v8i8(i8*, i32)
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x3.v4i16(i8*, i32)
-declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x3.v2i32(i8*, i32)
-declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x3.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x3.v2f32(i8*, i32)
-declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x3.v1f64(i8*, i32)
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x4.v16i8(i8*, i32)
-declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x4.v8i16(i8*, i32)
-declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x4.v4i32(i8*, i32)
-declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x4.v2i64(i8*, i32)
-declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32)
-declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x4.v2f64(i8*, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32)
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x4.v4i16(i8*, i32)
-declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x4.v2i32(i8*, i32)
-declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x4.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x4.v2f32(i8*, i32)
-declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x4.v1f64(i8*, i32)
-declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v4i32(i8*, <4 x i32>, <4 x i32>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v2i64(i8*, <2 x i64>, <2 x i64>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v4f32(i8*, <4 x float>, <4 x float>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v2f64(i8*, <2 x double>, <2 x double>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v4i16(i8*, <4 x i16>, <4 x i16>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v2i32(i8*, <2 x i32>, <2 x i32>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v1i64(i8*, <1 x i64>, <1 x i64>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v2f32(i8*, <2 x float>, <2 x float>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v1f64(i8*, <1 x double>, <1 x double>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32)
diff --git a/test/CodeGen/AArch64/neon-simd-ldst-one.ll b/test/CodeGen/AArch64/neon-simd-ldst-one.ll
deleted file mode 100644
index 3f28320f23d5..000000000000
--- a/test/CodeGen/AArch64/neon-simd-ldst-one.ll
+++ /dev/null
@@ -1,2113 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-%struct.int8x16x2_t = type { [2 x <16 x i8>] }
-%struct.int16x8x2_t = type { [2 x <8 x i16>] }
-%struct.int32x4x2_t = type { [2 x <4 x i32>] }
-%struct.int64x2x2_t = type { [2 x <2 x i64>] }
-%struct.float32x4x2_t = type { [2 x <4 x float>] }
-%struct.float64x2x2_t = type { [2 x <2 x double>] }
-%struct.int8x8x2_t = type { [2 x <8 x i8>] }
-%struct.int16x4x2_t = type { [2 x <4 x i16>] }
-%struct.int32x2x2_t = type { [2 x <2 x i32>] }
-%struct.int64x1x2_t = type { [2 x <1 x i64>] }
-%struct.float32x2x2_t = type { [2 x <2 x float>] }
-%struct.float64x1x2_t = type { [2 x <1 x double>] }
-%struct.int8x16x3_t = type { [3 x <16 x i8>] }
-%struct.int16x8x3_t = type { [3 x <8 x i16>] }
-%struct.int32x4x3_t = type { [3 x <4 x i32>] }
-%struct.int64x2x3_t = type { [3 x <2 x i64>] }
-%struct.float32x4x3_t = type { [3 x <4 x float>] }
-%struct.float64x2x3_t = type { [3 x <2 x double>] }
-%struct.int8x8x3_t = type { [3 x <8 x i8>] }
-%struct.int16x4x3_t = type { [3 x <4 x i16>] }
-%struct.int32x2x3_t = type { [3 x <2 x i32>] }
-%struct.int64x1x3_t = type { [3 x <1 x i64>] }
-%struct.float32x2x3_t = type { [3 x <2 x float>] }
-%struct.float64x1x3_t = type { [3 x <1 x double>] }
-%struct.int8x16x4_t = type { [4 x <16 x i8>] }
-%struct.int16x8x4_t = type { [4 x <8 x i16>] }
-%struct.int32x4x4_t = type { [4 x <4 x i32>] }
-%struct.int64x2x4_t = type { [4 x <2 x i64>] }
-%struct.float32x4x4_t = type { [4 x <4 x float>] }
-%struct.float64x2x4_t = type { [4 x <2 x double>] }
-%struct.int8x8x4_t = type { [4 x <8 x i8>] }
-%struct.int16x4x4_t = type { [4 x <4 x i16>] }
-%struct.int32x2x4_t = type { [4 x <2 x i32>] }
-%struct.int64x1x4_t = type { [4 x <1 x i64>] }
-%struct.float32x2x4_t = type { [4 x <2 x float>] }
-%struct.float64x1x4_t = type { [4 x <1 x double>] }
-
-define <16 x i8> @test_vld1q_dup_s8(i8* %a) {
-; CHECK-LABEL: test_vld1q_dup_s8
-; CHECK: ld1r {{{v[0-9]+}}.16b}, [x0]
-entry:
- %0 = load i8* %a, align 1
- %1 = insertelement <16 x i8> undef, i8 %0, i32 0
- %lane = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
- ret <16 x i8> %lane
-}
-
-define <8 x i16> @test_vld1q_dup_s16(i16* %a) {
-; CHECK-LABEL: test_vld1q_dup_s16
-; CHECK: ld1r {{{v[0-9]+}}.8h}, [x0]
-entry:
- %0 = load i16* %a, align 2
- %1 = insertelement <8 x i16> undef, i16 %0, i32 0
- %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
- ret <8 x i16> %lane
-}
-
-define <4 x i32> @test_vld1q_dup_s32(i32* %a) {
-; CHECK-LABEL: test_vld1q_dup_s32
-; CHECK: ld1r {{{v[0-9]+}}.4s}, [x0]
-entry:
- %0 = load i32* %a, align 4
- %1 = insertelement <4 x i32> undef, i32 %0, i32 0
- %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
- ret <4 x i32> %lane
-}
-
-define <2 x i64> @test_vld1q_dup_s64(i64* %a) {
-; CHECK-LABEL: test_vld1q_dup_s64
-; CHECK: ld1r {{{v[0-9]+}}.2d}, [x0]
-entry:
- %0 = load i64* %a, align 8
- %1 = insertelement <2 x i64> undef, i64 %0, i32 0
- %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
- ret <2 x i64> %lane
-}
-
-define <4 x float> @test_vld1q_dup_f32(float* %a) {
-; CHECK-LABEL: test_vld1q_dup_f32
-; CHECK: ld1r {{{v[0-9]+}}.4s}, [x0]
-entry:
- %0 = load float* %a, align 4
- %1 = insertelement <4 x float> undef, float %0, i32 0
- %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
- ret <4 x float> %lane
-}
-
-define <2 x double> @test_vld1q_dup_f64(double* %a) {
-; CHECK-LABEL: test_vld1q_dup_f64
-; CHECK: ld1r {{{v[0-9]+}}.2d}, [x0]
-entry:
- %0 = load double* %a, align 8
- %1 = insertelement <2 x double> undef, double %0, i32 0
- %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
- ret <2 x double> %lane
-}
-
-define <8 x i8> @test_vld1_dup_s8(i8* %a) {
-; CHECK-LABEL: test_vld1_dup_s8
-; CHECK: ld1r {{{v[0-9]+}}.8b}, [x0]
-entry:
- %0 = load i8* %a, align 1
- %1 = insertelement <8 x i8> undef, i8 %0, i32 0
- %lane = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
- ret <8 x i8> %lane
-}
-
-define <4 x i16> @test_vld1_dup_s16(i16* %a) {
-; CHECK-LABEL: test_vld1_dup_s16
-; CHECK: ld1r {{{v[0-9]+}}.4h}, [x0]
-entry:
- %0 = load i16* %a, align 2
- %1 = insertelement <4 x i16> undef, i16 %0, i32 0
- %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
- ret <4 x i16> %lane
-}
-
-define <2 x i32> @test_vld1_dup_s32(i32* %a) {
-; CHECK-LABEL: test_vld1_dup_s32
-; CHECK: ld1r {{{v[0-9]+}}.2s}, [x0]
-entry:
- %0 = load i32* %a, align 4
- %1 = insertelement <2 x i32> undef, i32 %0, i32 0
- %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
- ret <2 x i32> %lane
-}
-
-define <1 x i64> @test_vld1_dup_s64(i64* %a) {
-; CHECK-LABEL: test_vld1_dup_s64
-; CHECK: ld1r {{{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = load i64* %a, align 8
- %1 = insertelement <1 x i64> undef, i64 %0, i32 0
- ret <1 x i64> %1
-}
-
-define <2 x float> @test_vld1_dup_f32(float* %a) {
-; CHECK-LABEL: test_vld1_dup_f32
-; CHECK: ld1r {{{v[0-9]+}}.2s}, [x0]
-entry:
- %0 = load float* %a, align 4
- %1 = insertelement <2 x float> undef, float %0, i32 0
- %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
- ret <2 x float> %lane
-}
-
-define <1 x double> @test_vld1_dup_f64(double* %a) {
-; CHECK-LABEL: test_vld1_dup_f64
-; CHECK: ld1r {{{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = load double* %a, align 8
- %1 = insertelement <1 x double> undef, double %0, i32 0
- ret <1 x double> %1
-}
-
-define %struct.int8x16x2_t @test_vld2q_dup_s8(i8* %a) {
-; CHECK-LABEL: test_vld2q_dup_s8
-; CHECK: ld2r {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, [x0]
-entry:
- %vld_dup = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8* %a, <16 x i8> undef, <16 x i8> undef, i32 0, i32 1)
- %0 = extractvalue { <16 x i8>, <16 x i8> } %vld_dup, 0
- %lane = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
- %1 = extractvalue { <16 x i8>, <16 x i8> } %vld_dup, 1
- %lane1 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %lane1, 0, 1
- ret %struct.int8x16x2_t %.fca.0.1.insert
-}
-
-define %struct.int16x8x2_t @test_vld2q_dup_s16(i16* %a) {
-; CHECK-LABEL: test_vld2q_dup_s16
-; CHECK: ld2r {{{v[0-9]+}}.8h, {{v[0-9]+}}.8h}, [x0]
-entry:
- %0 = bitcast i16* %a to i8*
- %vld_dup = tail call { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2lane.v8i16(i8* %0, <8 x i16> undef, <8 x i16> undef, i32 0, i32 2)
- %1 = extractvalue { <8 x i16>, <8 x i16> } %vld_dup, 0
- %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
- %2 = extractvalue { <8 x i16>, <8 x i16> } %vld_dup, 1
- %lane1 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %lane1, 0, 1
- ret %struct.int16x8x2_t %.fca.0.1.insert
-}
-
-define %struct.int32x4x2_t @test_vld2q_dup_s32(i32* %a) {
-; CHECK-LABEL: test_vld2q_dup_s32
-; CHECK: ld2r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
-entry:
- %0 = bitcast i32* %a to i8*
- %vld_dup = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8* %0, <4 x i32> undef, <4 x i32> undef, i32 0, i32 4)
- %1 = extractvalue { <4 x i32>, <4 x i32> } %vld_dup, 0
- %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x i32>, <4 x i32> } %vld_dup, 1
- %lane1 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %lane1, 0, 1
- ret %struct.int32x4x2_t %.fca.0.1.insert
-}
-
-define %struct.int64x2x2_t @test_vld2q_dup_s64(i64* %a) {
-; CHECK-LABEL: test_vld2q_dup_s64
-; CHECK: ld2r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
-entry:
- %0 = bitcast i64* %a to i8*
- %vld_dup = tail call { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2lane.v2i64(i8* %0, <2 x i64> undef, <2 x i64> undef, i32 0, i32 8)
- %1 = extractvalue { <2 x i64>, <2 x i64> } %vld_dup, 0
- %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x i64>, <2 x i64> } %vld_dup, 1
- %lane1 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int64x2x2_t undef, <2 x i64> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x2_t %.fca.0.0.insert, <2 x i64> %lane1, 0, 1
- ret %struct.int64x2x2_t %.fca.0.1.insert
-}
-
-define %struct.float32x4x2_t @test_vld2q_dup_f32(float* %a) {
-; CHECK-LABEL: test_vld2q_dup_f32
-; CHECK: ld2r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
-entry:
- %0 = bitcast float* %a to i8*
- %vld_dup = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2lane.v4f32(i8* %0, <4 x float> undef, <4 x float> undef, i32 0, i32 4)
- %1 = extractvalue { <4 x float>, <4 x float> } %vld_dup, 0
- %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x float>, <4 x float> } %vld_dup, 1
- %lane1 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %lane1, 0, 1
- ret %struct.float32x4x2_t %.fca.0.1.insert
-}
-
-define %struct.float64x2x2_t @test_vld2q_dup_f64(double* %a) {
-; CHECK-LABEL: test_vld2q_dup_f64
-; CHECK: ld2r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
-entry:
- %0 = bitcast double* %a to i8*
- %vld_dup = tail call { <2 x double>, <2 x double> } @llvm.arm.neon.vld2lane.v2f64(i8* %0, <2 x double> undef, <2 x double> undef, i32 0, i32 8)
- %1 = extractvalue { <2 x double>, <2 x double> } %vld_dup, 0
- %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x double>, <2 x double> } %vld_dup, 1
- %lane1 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float64x2x2_t undef, <2 x double> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x2_t %.fca.0.0.insert, <2 x double> %lane1, 0, 1
- ret %struct.float64x2x2_t %.fca.0.1.insert
-}
-
-define %struct.int8x8x2_t @test_vld2_dup_s8(i8* %a) {
-; CHECK-LABEL: test_vld2_dup_s8
-; CHECK: ld2r {{{v[0-9]+}}.8b, {{v[0-9]+}}.8b}, [x0]
-entry:
- %vld_dup = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8* %a, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
- %0 = extractvalue { <8 x i8>, <8 x i8> } %vld_dup, 0
- %lane = shufflevector <8 x i8> %0, <8 x i8> undef, <8 x i32> zeroinitializer
- %1 = extractvalue { <8 x i8>, <8 x i8> } %vld_dup, 1
- %lane1 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %lane1, 0, 1
- ret %struct.int8x8x2_t %.fca.0.1.insert
-}
-
-define %struct.int16x4x2_t @test_vld2_dup_s16(i16* %a) {
-; CHECK-LABEL: test_vld2_dup_s16
-; CHECK: ld2r {{{v[0-9]+}}.4h, {{v[0-9]+}}.4h}, [x0]
-entry:
- %0 = bitcast i16* %a to i8*
- %vld_dup = tail call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2lane.v4i16(i8* %0, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
- %1 = extractvalue { <4 x i16>, <4 x i16> } %vld_dup, 0
- %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x i16>, <4 x i16> } %vld_dup, 1
- %lane1 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %lane1, 0, 1
- ret %struct.int16x4x2_t %.fca.0.1.insert
-}
-
-define %struct.int32x2x2_t @test_vld2_dup_s32(i32* %a) {
-; CHECK-LABEL: test_vld2_dup_s32
-; CHECK: ld2r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
-entry:
- %0 = bitcast i32* %a to i8*
- %vld_dup = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2lane.v2i32(i8* %0, <2 x i32> undef, <2 x i32> undef, i32 0, i32 4)
- %1 = extractvalue { <2 x i32>, <2 x i32> } %vld_dup, 0
- %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x i32>, <2 x i32> } %vld_dup, 1
- %lane1 = shufflevector <2 x i32> %2, <2 x i32> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %lane1, 0, 1
- ret %struct.int32x2x2_t %.fca.0.1.insert
-}
-
-define %struct.int64x1x2_t @test_vld2_dup_s64(i64* %a) {
-; CHECK-LABEL: test_vld2_dup_s64
-; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = bitcast i64* %a to i8*
- %vld_dup = tail call { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8* %0, i32 8)
- %vld_dup.fca.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld_dup, 0
- %vld_dup.fca.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld_dup, 1
- %.fca.0.0.insert = insertvalue %struct.int64x1x2_t undef, <1 x i64> %vld_dup.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x2_t %.fca.0.0.insert, <1 x i64> %vld_dup.fca.1.extract, 0, 1
- ret %struct.int64x1x2_t %.fca.0.1.insert
-}
-
-define %struct.float32x2x2_t @test_vld2_dup_f32(float* %a) {
-; CHECK-LABEL: test_vld2_dup_f32
-; CHECK: ld2r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
-entry:
- %0 = bitcast float* %a to i8*
- %vld_dup = tail call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2lane.v2f32(i8* %0, <2 x float> undef, <2 x float> undef, i32 0, i32 4)
- %1 = extractvalue { <2 x float>, <2 x float> } %vld_dup, 0
- %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x float>, <2 x float> } %vld_dup, 1
- %lane1 = shufflevector <2 x float> %2, <2 x float> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %lane1, 0, 1
- ret %struct.float32x2x2_t %.fca.0.1.insert
-}
-
-define %struct.float64x1x2_t @test_vld2_dup_f64(double* %a) {
-; CHECK-LABEL: test_vld2_dup_f64
-; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = bitcast double* %a to i8*
- %vld_dup = tail call { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8* %0, i32 8)
- %vld_dup.fca.0.extract = extractvalue { <1 x double>, <1 x double> } %vld_dup, 0
- %vld_dup.fca.1.extract = extractvalue { <1 x double>, <1 x double> } %vld_dup, 1
- %.fca.0.0.insert = insertvalue %struct.float64x1x2_t undef, <1 x double> %vld_dup.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x2_t %.fca.0.0.insert, <1 x double> %vld_dup.fca.1.extract, 0, 1
- ret %struct.float64x1x2_t %.fca.0.1.insert
-}
-
-define %struct.int8x16x3_t @test_vld3q_dup_s8(i8* %a) {
-; CHECK-LABEL: test_vld3q_dup_s8
-; CHECK: ld3r {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, [x0]
-entry:
- %vld_dup = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3lane.v16i8(i8* %a, <16 x i8> undef, <16 x i8> undef, <16 x i8> undef, i32 0, i32 1)
- %0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 0
- %lane = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
- %1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 1
- %lane1 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
- %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 2
- %lane2 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int8x16x3_t undef, <16 x i8> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x16x3_t %.fca.0.0.insert, <16 x i8> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x16x3_t %.fca.0.1.insert, <16 x i8> %lane2, 0, 2
- ret %struct.int8x16x3_t %.fca.0.2.insert
-}
-
-define %struct.int16x8x3_t @test_vld3q_dup_s16(i16* %a) {
-; CHECK-LABEL: test_vld3q_dup_s16
-; CHECK: ld3r {{{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h}, [x0]
-entry:
- %0 = bitcast i16* %a to i8*
- %vld_dup = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8* %0, <8 x i16> undef, <8 x i16> undef, <8 x i16> undef, i32 0, i32 2)
- %1 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 0
- %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
- %2 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 1
- %lane1 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> zeroinitializer
- %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 2
- %lane2 = shufflevector <8 x i16> %3, <8 x i16> undef, <8 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int16x8x3_t undef, <8 x i16> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x3_t %.fca.0.0.insert, <8 x i16> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x8x3_t %.fca.0.1.insert, <8 x i16> %lane2, 0, 2
- ret %struct.int16x8x3_t %.fca.0.2.insert
-}
-
-define %struct.int32x4x3_t @test_vld3q_dup_s32(i32* %a) {
-; CHECK-LABEL: test_vld3q_dup_s32
-; CHECK: ld3r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
-entry:
- %0 = bitcast i32* %a to i8*
- %vld_dup = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3lane.v4i32(i8* %0, <4 x i32> undef, <4 x i32> undef, <4 x i32> undef, i32 0, i32 4)
- %1 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 0
- %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 1
- %lane1 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
- %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 2
- %lane2 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int32x4x3_t undef, <4 x i32> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x3_t %.fca.0.0.insert, <4 x i32> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x4x3_t %.fca.0.1.insert, <4 x i32> %lane2, 0, 2
- ret %struct.int32x4x3_t %.fca.0.2.insert
-}
-
-define %struct.int64x2x3_t @test_vld3q_dup_s64(i64* %a) {
-; CHECK-LABEL: test_vld3q_dup_s64
-; CHECK: ld3r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
-entry:
- %0 = bitcast i64* %a to i8*
- %vld_dup = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3lane.v2i64(i8* %0, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, i32 0, i32 8)
- %1 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 0
- %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 1
- %lane1 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
- %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 2
- %lane2 = shufflevector <2 x i64> %3, <2 x i64> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int64x2x3_t undef, <2 x i64> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x3_t %.fca.0.0.insert, <2 x i64> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x2x3_t %.fca.0.1.insert, <2 x i64> %lane2, 0, 2
- ret %struct.int64x2x3_t %.fca.0.2.insert
-}
-
-define %struct.float32x4x3_t @test_vld3q_dup_f32(float* %a) {
-; CHECK-LABEL: test_vld3q_dup_f32
-; CHECK: ld3r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
-entry:
- %0 = bitcast float* %a to i8*
- %vld_dup = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3lane.v4f32(i8* %0, <4 x float> undef, <4 x float> undef, <4 x float> undef, i32 0, i32 4)
- %1 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld_dup, 0
- %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld_dup, 1
- %lane1 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
- %3 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld_dup, 2
- %lane2 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float32x4x3_t undef, <4 x float> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x3_t %.fca.0.0.insert, <4 x float> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x4x3_t %.fca.0.1.insert, <4 x float> %lane2, 0, 2
- ret %struct.float32x4x3_t %.fca.0.2.insert
-}
-
-define %struct.float64x2x3_t @test_vld3q_dup_f64(double* %a) {
-; CHECK-LABEL: test_vld3q_dup_f64
-; CHECK: ld3r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
-entry:
- %0 = bitcast double* %a to i8*
- %vld_dup = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3lane.v2f64(i8* %0, <2 x double> undef, <2 x double> undef, <2 x double> undef, i32 0, i32 8)
- %1 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld_dup, 0
- %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld_dup, 1
- %lane1 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
- %3 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld_dup, 2
- %lane2 = shufflevector <2 x double> %3, <2 x double> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float64x2x3_t undef, <2 x double> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x3_t %.fca.0.0.insert, <2 x double> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x2x3_t %.fca.0.1.insert, <2 x double> %lane2, 0, 2
- ret %struct.float64x2x3_t %.fca.0.2.insert
-}
-
-define %struct.int8x8x3_t @test_vld3_dup_s8(i8* %a) {
-; CHECK-LABEL: test_vld3_dup_s8
-; CHECK: ld3r {{{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b}, [x0]
-entry:
- %vld_dup = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8* %a, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
- %0 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 0
- %lane = shufflevector <8 x i8> %0, <8 x i8> undef, <8 x i32> zeroinitializer
- %1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 1
- %lane1 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
- %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 2
- %lane2 = shufflevector <8 x i8> %2, <8 x i8> undef, <8 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int8x8x3_t undef, <8 x i8> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x3_t %.fca.0.0.insert, <8 x i8> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x8x3_t %.fca.0.1.insert, <8 x i8> %lane2, 0, 2
- ret %struct.int8x8x3_t %.fca.0.2.insert
-}
-
-define %struct.int16x4x3_t @test_vld3_dup_s16(i16* %a) {
-; CHECK-LABEL: test_vld3_dup_s16
-; CHECK: ld3r {{{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h}, [x0]
-entry:
- %0 = bitcast i16* %a to i8*
- %vld_dup = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8* %0, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
- %1 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 0
- %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 1
- %lane1 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> zeroinitializer
- %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 2
- %lane2 = shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int16x4x3_t undef, <4 x i16> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x3_t %.fca.0.0.insert, <4 x i16> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x4x3_t %.fca.0.1.insert, <4 x i16> %lane2, 0, 2
- ret %struct.int16x4x3_t %.fca.0.2.insert
-}
-
-define %struct.int32x2x3_t @test_vld3_dup_s32(i32* %a) {
-; CHECK-LABEL: test_vld3_dup_s32
-; CHECK: ld3r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
-entry:
- %0 = bitcast i32* %a to i8*
- %vld_dup = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3lane.v2i32(i8* %0, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 4)
- %1 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 0
- %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 1
- %lane1 = shufflevector <2 x i32> %2, <2 x i32> undef, <2 x i32> zeroinitializer
- %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 2
- %lane2 = shufflevector <2 x i32> %3, <2 x i32> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int32x2x3_t undef, <2 x i32> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x3_t %.fca.0.0.insert, <2 x i32> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x2x3_t %.fca.0.1.insert, <2 x i32> %lane2, 0, 2
- ret %struct.int32x2x3_t %.fca.0.2.insert
-}
-
-define %struct.int64x1x3_t @test_vld3_dup_s64(i64* %a) {
-; CHECK-LABEL: test_vld3_dup_s64
-; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = bitcast i64* %a to i8*
- %vld_dup = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8* %0, i32 8)
- %vld_dup.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 0
- %vld_dup.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 1
- %vld_dup.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 2
- %.fca.0.0.insert = insertvalue %struct.int64x1x3_t undef, <1 x i64> %vld_dup.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x3_t %.fca.0.0.insert, <1 x i64> %vld_dup.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x1x3_t %.fca.0.1.insert, <1 x i64> %vld_dup.fca.2.extract, 0, 2
- ret %struct.int64x1x3_t %.fca.0.2.insert
-}
-
-define %struct.float32x2x3_t @test_vld3_dup_f32(float* %a) {
-; CHECK-LABEL: test_vld3_dup_f32
-; CHECK: ld3r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
-entry:
- %0 = bitcast float* %a to i8*
- %vld_dup = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8* %0, <2 x float> undef, <2 x float> undef, <2 x float> undef, i32 0, i32 4)
- %1 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld_dup, 0
- %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld_dup, 1
- %lane1 = shufflevector <2 x float> %2, <2 x float> undef, <2 x i32> zeroinitializer
- %3 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld_dup, 2
- %lane2 = shufflevector <2 x float> %3, <2 x float> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float32x2x3_t undef, <2 x float> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x3_t %.fca.0.0.insert, <2 x float> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x2x3_t %.fca.0.1.insert, <2 x float> %lane2, 0, 2
- ret %struct.float32x2x3_t %.fca.0.2.insert
-}
-
-define %struct.float64x1x3_t @test_vld3_dup_f64(double* %a) {
-; CHECK-LABEL: test_vld3_dup_f64
-; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = bitcast double* %a to i8*
- %vld_dup = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8* %0, i32 8)
- %vld_dup.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld_dup, 0
- %vld_dup.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld_dup, 1
- %vld_dup.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld_dup, 2
- %.fca.0.0.insert = insertvalue %struct.float64x1x3_t undef, <1 x double> %vld_dup.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x3_t %.fca.0.0.insert, <1 x double> %vld_dup.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x1x3_t %.fca.0.1.insert, <1 x double> %vld_dup.fca.2.extract, 0, 2
- ret %struct.float64x1x3_t %.fca.0.2.insert
-}
-
-define %struct.int8x16x4_t @test_vld4q_dup_s8(i8* %a) {
-; CHECK-LABEL: test_vld4q_dup_s8
-; CHECK: ld4r {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, [x0]
-entry:
- %vld_dup = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4lane.v16i8(i8* %a, <16 x i8> undef, <16 x i8> undef, <16 x i8> undef, <16 x i8> undef, i32 0, i32 1)
- %0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 0
- %lane = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
- %1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 1
- %lane1 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
- %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 2
- %lane2 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> zeroinitializer
- %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 3
- %lane3 = shufflevector <16 x i8> %3, <16 x i8> undef, <16 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int8x16x4_t undef, <16 x i8> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x16x4_t %.fca.0.0.insert, <16 x i8> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x16x4_t %.fca.0.1.insert, <16 x i8> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int8x16x4_t %.fca.0.2.insert, <16 x i8> %lane3, 0, 3
- ret %struct.int8x16x4_t %.fca.0.3.insert
-}
-
-define %struct.int16x8x4_t @test_vld4q_dup_s16(i16* %a) {
-; CHECK-LABEL: test_vld4q_dup_s16
-; CHECK: ld4r {{{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h}, [x0]
-entry:
- %0 = bitcast i16* %a to i8*
- %vld_dup = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4lane.v8i16(i8* %0, <8 x i16> undef, <8 x i16> undef, <8 x i16> undef, <8 x i16> undef, i32 0, i32 2)
- %1 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 0
- %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
- %2 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 1
- %lane1 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> zeroinitializer
- %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 2
- %lane2 = shufflevector <8 x i16> %3, <8 x i16> undef, <8 x i32> zeroinitializer
- %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 3
- %lane3 = shufflevector <8 x i16> %4, <8 x i16> undef, <8 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int16x8x4_t undef, <8 x i16> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x4_t %.fca.0.0.insert, <8 x i16> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x8x4_t %.fca.0.1.insert, <8 x i16> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int16x8x4_t %.fca.0.2.insert, <8 x i16> %lane3, 0, 3
- ret %struct.int16x8x4_t %.fca.0.3.insert
-}
-
-define %struct.int32x4x4_t @test_vld4q_dup_s32(i32* %a) {
-; CHECK-LABEL: test_vld4q_dup_s32
-; CHECK: ld4r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
-entry:
- %0 = bitcast i32* %a to i8*
- %vld_dup = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4lane.v4i32(i8* %0, <4 x i32> undef, <4 x i32> undef, <4 x i32> undef, <4 x i32> undef, i32 0, i32 4)
- %1 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 0
- %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 1
- %lane1 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
- %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 2
- %lane2 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> zeroinitializer
- %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 3
- %lane3 = shufflevector <4 x i32> %4, <4 x i32> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int32x4x4_t undef, <4 x i32> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x4_t %.fca.0.0.insert, <4 x i32> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x4x4_t %.fca.0.1.insert, <4 x i32> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int32x4x4_t %.fca.0.2.insert, <4 x i32> %lane3, 0, 3
- ret %struct.int32x4x4_t %.fca.0.3.insert
-}
-
-define %struct.int64x2x4_t @test_vld4q_dup_s64(i64* %a) {
-; CHECK-LABEL: test_vld4q_dup_s64
-; CHECK: ld4r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
-entry:
- %0 = bitcast i64* %a to i8*
- %vld_dup = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4lane.v2i64(i8* %0, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, i32 0, i32 8)
- %1 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 0
- %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 1
- %lane1 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
- %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 2
- %lane2 = shufflevector <2 x i64> %3, <2 x i64> undef, <2 x i32> zeroinitializer
- %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 3
- %lane3 = shufflevector <2 x i64> %4, <2 x i64> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int64x2x4_t undef, <2 x i64> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x4_t %.fca.0.0.insert, <2 x i64> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x2x4_t %.fca.0.1.insert, <2 x i64> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int64x2x4_t %.fca.0.2.insert, <2 x i64> %lane3, 0, 3
- ret %struct.int64x2x4_t %.fca.0.3.insert
-}
-
-define %struct.float32x4x4_t @test_vld4q_dup_f32(float* %a) {
-; CHECK-LABEL: test_vld4q_dup_f32
-; CHECK: ld4r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
-entry:
- %0 = bitcast float* %a to i8*
- %vld_dup = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4lane.v4f32(i8* %0, <4 x float> undef, <4 x float> undef, <4 x float> undef, <4 x float> undef, i32 0, i32 4)
- %1 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld_dup, 0
- %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld_dup, 1
- %lane1 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
- %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld_dup, 2
- %lane2 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> zeroinitializer
- %4 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld_dup, 3
- %lane3 = shufflevector <4 x float> %4, <4 x float> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float32x4x4_t undef, <4 x float> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x4_t %.fca.0.0.insert, <4 x float> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x4x4_t %.fca.0.1.insert, <4 x float> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float32x4x4_t %.fca.0.2.insert, <4 x float> %lane3, 0, 3
- ret %struct.float32x4x4_t %.fca.0.3.insert
-}
-
-define %struct.float64x2x4_t @test_vld4q_dup_f64(double* %a) {
-; CHECK-LABEL: test_vld4q_dup_f64
-; CHECK: ld4r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
-entry:
- %0 = bitcast double* %a to i8*
- %vld_dup = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8* %0, <2 x double> undef, <2 x double> undef, <2 x double> undef, <2 x double> undef, i32 0, i32 8)
- %1 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld_dup, 0
- %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld_dup, 1
- %lane1 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
- %3 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld_dup, 2
- %lane2 = shufflevector <2 x double> %3, <2 x double> undef, <2 x i32> zeroinitializer
- %4 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld_dup, 3
- %lane3 = shufflevector <2 x double> %4, <2 x double> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float64x2x4_t undef, <2 x double> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x4_t %.fca.0.0.insert, <2 x double> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x2x4_t %.fca.0.1.insert, <2 x double> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float64x2x4_t %.fca.0.2.insert, <2 x double> %lane3, 0, 3
- ret %struct.float64x2x4_t %.fca.0.3.insert
-}
-
-define %struct.int8x8x4_t @test_vld4_dup_s8(i8* %a) {
-; CHECK-LABEL: test_vld4_dup_s8
-; CHECK: ld4r {{{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b}, [x0]
-entry:
- %vld_dup = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4lane.v8i8(i8* %a, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
- %0 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 0
- %lane = shufflevector <8 x i8> %0, <8 x i8> undef, <8 x i32> zeroinitializer
- %1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 1
- %lane1 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
- %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 2
- %lane2 = shufflevector <8 x i8> %2, <8 x i8> undef, <8 x i32> zeroinitializer
- %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 3
- %lane3 = shufflevector <8 x i8> %3, <8 x i8> undef, <8 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int8x8x4_t undef, <8 x i8> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x4_t %.fca.0.0.insert, <8 x i8> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x8x4_t %.fca.0.1.insert, <8 x i8> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int8x8x4_t %.fca.0.2.insert, <8 x i8> %lane3, 0, 3
- ret %struct.int8x8x4_t %.fca.0.3.insert
-}
-
-define %struct.int16x4x4_t @test_vld4_dup_s16(i16* %a) {
-; CHECK-LABEL: test_vld4_dup_s16
-; CHECK: ld4r {{{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h}, [x0]
-entry:
- %0 = bitcast i16* %a to i8*
- %vld_dup = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4lane.v4i16(i8* %0, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
- %1 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 0
- %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
- %2 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 1
- %lane1 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> zeroinitializer
- %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 2
- %lane2 = shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer
- %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 3
- %lane3 = shufflevector <4 x i16> %4, <4 x i16> undef, <4 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int16x4x4_t undef, <4 x i16> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x4_t %.fca.0.0.insert, <4 x i16> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x4x4_t %.fca.0.1.insert, <4 x i16> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int16x4x4_t %.fca.0.2.insert, <4 x i16> %lane3, 0, 3
- ret %struct.int16x4x4_t %.fca.0.3.insert
-}
-
-define %struct.int32x2x4_t @test_vld4_dup_s32(i32* %a) {
-; CHECK-LABEL: test_vld4_dup_s32
-; CHECK: ld4r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
-entry:
- %0 = bitcast i32* %a to i8*
- %vld_dup = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8* %0, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 4)
- %1 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 0
- %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 1
- %lane1 = shufflevector <2 x i32> %2, <2 x i32> undef, <2 x i32> zeroinitializer
- %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 2
- %lane2 = shufflevector <2 x i32> %3, <2 x i32> undef, <2 x i32> zeroinitializer
- %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 3
- %lane3 = shufflevector <2 x i32> %4, <2 x i32> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.int32x2x4_t undef, <2 x i32> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x4_t %.fca.0.0.insert, <2 x i32> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x2x4_t %.fca.0.1.insert, <2 x i32> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int32x2x4_t %.fca.0.2.insert, <2 x i32> %lane3, 0, 3
- ret %struct.int32x2x4_t %.fca.0.3.insert
-}
-
-define %struct.int64x1x4_t @test_vld4_dup_s64(i64* %a) {
-; CHECK-LABEL: test_vld4_dup_s64
-; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = bitcast i64* %a to i8*
- %vld_dup = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8* %0, i32 8)
- %vld_dup.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 0
- %vld_dup.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 1
- %vld_dup.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 2
- %vld_dup.fca.3.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 3
- %.fca.0.0.insert = insertvalue %struct.int64x1x4_t undef, <1 x i64> %vld_dup.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x4_t %.fca.0.0.insert, <1 x i64> %vld_dup.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x1x4_t %.fca.0.1.insert, <1 x i64> %vld_dup.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int64x1x4_t %.fca.0.2.insert, <1 x i64> %vld_dup.fca.3.extract, 0, 3
- ret %struct.int64x1x4_t %.fca.0.3.insert
-}
-
-define %struct.float32x2x4_t @test_vld4_dup_f32(float* %a) {
-; CHECK-LABEL: test_vld4_dup_f32
-; CHECK: ld4r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
-entry:
- %0 = bitcast float* %a to i8*
- %vld_dup = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4lane.v2f32(i8* %0, <2 x float> undef, <2 x float> undef, <2 x float> undef, <2 x float> undef, i32 0, i32 4)
- %1 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld_dup, 0
- %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
- %2 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld_dup, 1
- %lane1 = shufflevector <2 x float> %2, <2 x float> undef, <2 x i32> zeroinitializer
- %3 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld_dup, 2
- %lane2 = shufflevector <2 x float> %3, <2 x float> undef, <2 x i32> zeroinitializer
- %4 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld_dup, 3
- %lane3 = shufflevector <2 x float> %4, <2 x float> undef, <2 x i32> zeroinitializer
- %.fca.0.0.insert = insertvalue %struct.float32x2x4_t undef, <2 x float> %lane, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x4_t %.fca.0.0.insert, <2 x float> %lane1, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x2x4_t %.fca.0.1.insert, <2 x float> %lane2, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float32x2x4_t %.fca.0.2.insert, <2 x float> %lane3, 0, 3
- ret %struct.float32x2x4_t %.fca.0.3.insert
-}
-
-define %struct.float64x1x4_t @test_vld4_dup_f64(double* %a) {
-; CHECK-LABEL: test_vld4_dup_f64
-; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = bitcast double* %a to i8*
- %vld_dup = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8* %0, i32 8)
- %vld_dup.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld_dup, 0
- %vld_dup.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld_dup, 1
- %vld_dup.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld_dup, 2
- %vld_dup.fca.3.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld_dup, 3
- %.fca.0.0.insert = insertvalue %struct.float64x1x4_t undef, <1 x double> %vld_dup.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x4_t %.fca.0.0.insert, <1 x double> %vld_dup.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x1x4_t %.fca.0.1.insert, <1 x double> %vld_dup.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float64x1x4_t %.fca.0.2.insert, <1 x double> %vld_dup.fca.3.extract, 0, 3
- ret %struct.float64x1x4_t %.fca.0.3.insert
-}
-
-define <16 x i8> @test_vld1q_lane_s8(i8* %a, <16 x i8> %b) {
-; CHECK-LABEL: test_vld1q_lane_s8
-; CHECK: ld1 {{{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %0 = load i8* %a, align 1
- %vld1_lane = insertelement <16 x i8> %b, i8 %0, i32 15
- ret <16 x i8> %vld1_lane
-}
-
-define <8 x i16> @test_vld1q_lane_s16(i16* %a, <8 x i16> %b) {
-; CHECK-LABEL: test_vld1q_lane_s16
-; CHECK: ld1 {{{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %0 = load i16* %a, align 2
- %vld1_lane = insertelement <8 x i16> %b, i16 %0, i32 7
- ret <8 x i16> %vld1_lane
-}
-
-define <4 x i32> @test_vld1q_lane_s32(i32* %a, <4 x i32> %b) {
-; CHECK-LABEL: test_vld1q_lane_s32
-; CHECK: ld1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %0 = load i32* %a, align 4
- %vld1_lane = insertelement <4 x i32> %b, i32 %0, i32 3
- ret <4 x i32> %vld1_lane
-}
-
-define <2 x i64> @test_vld1q_lane_s64(i64* %a, <2 x i64> %b) {
-; CHECK-LABEL: test_vld1q_lane_s64
-; CHECK: ld1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %0 = load i64* %a, align 8
- %vld1_lane = insertelement <2 x i64> %b, i64 %0, i32 1
- ret <2 x i64> %vld1_lane
-}
-
-define <4 x float> @test_vld1q_lane_f32(float* %a, <4 x float> %b) {
-; CHECK-LABEL: test_vld1q_lane_f32
-; CHECK: ld1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %0 = load float* %a, align 4
- %vld1_lane = insertelement <4 x float> %b, float %0, i32 3
- ret <4 x float> %vld1_lane
-}
-
-define <2 x double> @test_vld1q_lane_f64(double* %a, <2 x double> %b) {
-; CHECK-LABEL: test_vld1q_lane_f64
-; CHECK: ld1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %0 = load double* %a, align 8
- %vld1_lane = insertelement <2 x double> %b, double %0, i32 1
- ret <2 x double> %vld1_lane
-}
-
-define <8 x i8> @test_vld1_lane_s8(i8* %a, <8 x i8> %b) {
-; CHECK-LABEL: test_vld1_lane_s8
-; CHECK: ld1 {{{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %0 = load i8* %a, align 1
- %vld1_lane = insertelement <8 x i8> %b, i8 %0, i32 7
- ret <8 x i8> %vld1_lane
-}
-
-define <4 x i16> @test_vld1_lane_s16(i16* %a, <4 x i16> %b) {
-; CHECK-LABEL: test_vld1_lane_s16
-; CHECK: ld1 {{{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %0 = load i16* %a, align 2
- %vld1_lane = insertelement <4 x i16> %b, i16 %0, i32 3
- ret <4 x i16> %vld1_lane
-}
-
-define <2 x i32> @test_vld1_lane_s32(i32* %a, <2 x i32> %b) {
-; CHECK-LABEL: test_vld1_lane_s32
-; CHECK: ld1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %0 = load i32* %a, align 4
- %vld1_lane = insertelement <2 x i32> %b, i32 %0, i32 1
- ret <2 x i32> %vld1_lane
-}
-
-define <1 x i64> @test_vld1_lane_s64(i64* %a, <1 x i64> %b) {
-; CHECK-LABEL: test_vld1_lane_s64
-; CHECK: ld1r {{{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = load i64* %a, align 8
- %vld1_lane = insertelement <1 x i64> undef, i64 %0, i32 0
- ret <1 x i64> %vld1_lane
-}
-
-define <2 x float> @test_vld1_lane_f32(float* %a, <2 x float> %b) {
-; CHECK-LABEL: test_vld1_lane_f32
-; CHECK: ld1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %0 = load float* %a, align 4
- %vld1_lane = insertelement <2 x float> %b, float %0, i32 1
- ret <2 x float> %vld1_lane
-}
-
-define <1 x double> @test_vld1_lane_f64(double* %a, <1 x double> %b) {
-; CHECK-LABEL: test_vld1_lane_f64
-; CHECK: ld1r {{{v[0-9]+}}.1d}, [x0]
-entry:
- %0 = load double* %a, align 8
- %vld1_lane = insertelement <1 x double> undef, double %0, i32 0
- ret <1 x double> %vld1_lane
-}
-
-define %struct.int16x8x2_t @test_vld2q_lane_s16(i16* %a, [2 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vld2q_lane_s16
-; CHECK: ld2 {{{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1
- %0 = bitcast i16* %a to i8*
- %vld2_lane = tail call { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, i32 7, i32 2)
- %vld2_lane.fca.0.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.int16x8x2_t %.fca.0.1.insert
-}
-
-define %struct.int32x4x2_t @test_vld2q_lane_s32(i32* %a, [2 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vld2q_lane_s32
-; CHECK: ld2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1
- %0 = bitcast i32* %a to i8*
- %vld2_lane = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, i32 3, i32 4)
- %vld2_lane.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.int32x4x2_t %.fca.0.1.insert
-}
-
-define %struct.int64x2x2_t @test_vld2q_lane_s64(i64* %a, [2 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vld2q_lane_s64
-; CHECK: ld2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1
- %0 = bitcast i64* %a to i8*
- %vld2_lane = tail call { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, i32 1, i32 8)
- %vld2_lane.fca.0.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.int64x2x2_t undef, <2 x i64> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x2_t %.fca.0.0.insert, <2 x i64> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.int64x2x2_t %.fca.0.1.insert
-}
-
-define %struct.float32x4x2_t @test_vld2q_lane_f32(float* %a, [2 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vld2q_lane_f32
-; CHECK: ld2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1
- %0 = bitcast float* %a to i8*
- %vld2_lane = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, i32 3, i32 4)
- %vld2_lane.fca.0.extract = extractvalue { <4 x float>, <4 x float> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <4 x float>, <4 x float> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.float32x4x2_t %.fca.0.1.insert
-}
-
-define %struct.float64x2x2_t @test_vld2q_lane_f64(double* %a, [2 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vld2q_lane_f64
-; CHECK: ld2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce, 1
- %0 = bitcast double* %a to i8*
- %vld2_lane = tail call { <2 x double>, <2 x double> } @llvm.arm.neon.vld2lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, i32 1, i32 8)
- %vld2_lane.fca.0.extract = extractvalue { <2 x double>, <2 x double> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <2 x double>, <2 x double> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.float64x2x2_t undef, <2 x double> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x2_t %.fca.0.0.insert, <2 x double> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.float64x2x2_t %.fca.0.1.insert
-}
-
-define %struct.int8x8x2_t @test_vld2_lane_s8(i8* %a, [2 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vld2_lane_s8
-; CHECK: ld2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1
- %vld2_lane = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, i32 7, i32 1)
- %vld2_lane.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.int8x8x2_t %.fca.0.1.insert
-}
-
-define %struct.int16x4x2_t @test_vld2_lane_s16(i16* %a, [2 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vld2_lane_s16
-; CHECK: ld2 {{{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1
- %0 = bitcast i16* %a to i8*
- %vld2_lane = tail call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, i32 3, i32 2)
- %vld2_lane.fca.0.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.int16x4x2_t %.fca.0.1.insert
-}
-
-define %struct.int32x2x2_t @test_vld2_lane_s32(i32* %a, [2 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vld2_lane_s32
-; CHECK: ld2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1
- %0 = bitcast i32* %a to i8*
- %vld2_lane = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, i32 1, i32 4)
- %vld2_lane.fca.0.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.int32x2x2_t %.fca.0.1.insert
-}
-
-define %struct.int64x1x2_t @test_vld2_lane_s64(i64* %a, [2 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vld2_lane_s64
-; CHECK: ld2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1
- %0 = bitcast i64* %a to i8*
- %vld2_lane = tail call { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, i32 0, i32 8)
- %vld2_lane.fca.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.int64x1x2_t undef, <1 x i64> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x2_t %.fca.0.0.insert, <1 x i64> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.int64x1x2_t %.fca.0.1.insert
-}
-
-define %struct.float32x2x2_t @test_vld2_lane_f32(float* %a, [2 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vld2_lane_f32
-; CHECK: ld2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1
- %0 = bitcast float* %a to i8*
- %vld2_lane = tail call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, i32 1, i32 4)
- %vld2_lane.fca.0.extract = extractvalue { <2 x float>, <2 x float> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <2 x float>, <2 x float> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.float32x2x2_t %.fca.0.1.insert
-}
-
-define %struct.float64x1x2_t @test_vld2_lane_f64(double* %a, [2 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vld2_lane_f64
-; CHECK: ld2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce, 1
- %0 = bitcast double* %a to i8*
- %vld2_lane = tail call { <1 x double>, <1 x double> } @llvm.arm.neon.vld2lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, i32 0, i32 8)
- %vld2_lane.fca.0.extract = extractvalue { <1 x double>, <1 x double> } %vld2_lane, 0
- %vld2_lane.fca.1.extract = extractvalue { <1 x double>, <1 x double> } %vld2_lane, 1
- %.fca.0.0.insert = insertvalue %struct.float64x1x2_t undef, <1 x double> %vld2_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x2_t %.fca.0.0.insert, <1 x double> %vld2_lane.fca.1.extract, 0, 1
- ret %struct.float64x1x2_t %.fca.0.1.insert
-}
-
-define %struct.int16x8x3_t @test_vld3q_lane_s16(i16* %a, [3 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vld3q_lane_s16
-; CHECK: ld3 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <8 x i16>] %b.coerce, 2
- %0 = bitcast i16* %a to i8*
- %vld3_lane = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, i32 7, i32 2)
- %vld3_lane.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.int16x8x3_t undef, <8 x i16> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x3_t %.fca.0.0.insert, <8 x i16> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x8x3_t %.fca.0.1.insert, <8 x i16> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.int16x8x3_t %.fca.0.2.insert
-}
-
-define %struct.int32x4x3_t @test_vld3q_lane_s32(i32* %a, [3 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vld3q_lane_s32
-; CHECK: ld3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x i32>] %b.coerce, 2
- %0 = bitcast i32* %a to i8*
- %vld3_lane = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, i32 3, i32 4)
- %vld3_lane.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.int32x4x3_t undef, <4 x i32> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x3_t %.fca.0.0.insert, <4 x i32> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x4x3_t %.fca.0.1.insert, <4 x i32> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.int32x4x3_t %.fca.0.2.insert
-}
-
-define %struct.int64x2x3_t @test_vld3q_lane_s64(i64* %a, [3 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vld3q_lane_s64
-; CHECK: ld3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x i64>] %b.coerce, 2
- %0 = bitcast i64* %a to i8*
- %vld3_lane = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, i32 1, i32 8)
- %vld3_lane.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.int64x2x3_t undef, <2 x i64> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x3_t %.fca.0.0.insert, <2 x i64> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x2x3_t %.fca.0.1.insert, <2 x i64> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.int64x2x3_t %.fca.0.2.insert
-}
-
-define %struct.float32x4x3_t @test_vld3q_lane_f32(float* %a, [3 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vld3q_lane_f32
-; CHECK: ld3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x float>] %b.coerce, 2
- %0 = bitcast float* %a to i8*
- %vld3_lane = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, i32 3, i32 4)
- %vld3_lane.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.float32x4x3_t undef, <4 x float> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x3_t %.fca.0.0.insert, <4 x float> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x4x3_t %.fca.0.1.insert, <4 x float> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.float32x4x3_t %.fca.0.2.insert
-}
-
-define %struct.float64x2x3_t @test_vld3q_lane_f64(double* %a, [3 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vld3q_lane_f64
-; CHECK: ld3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x double>] %b.coerce, 2
- %0 = bitcast double* %a to i8*
- %vld3_lane = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, i32 1, i32 8)
- %vld3_lane.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.float64x2x3_t undef, <2 x double> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x3_t %.fca.0.0.insert, <2 x double> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x2x3_t %.fca.0.1.insert, <2 x double> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.float64x2x3_t %.fca.0.2.insert
-}
-
-define %struct.int8x8x3_t @test_vld3_lane_s8(i8* %a, [3 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vld3_lane_s8
-; CHECK: ld3 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <8 x i8>] %b.coerce, 2
- %vld3_lane = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, i32 7, i32 1)
- %vld3_lane.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.int8x8x3_t undef, <8 x i8> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x3_t %.fca.0.0.insert, <8 x i8> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x8x3_t %.fca.0.1.insert, <8 x i8> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.int8x8x3_t %.fca.0.2.insert
-}
-
-define %struct.int16x4x3_t @test_vld3_lane_s16(i16* %a, [3 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vld3_lane_s16
-; CHECK: ld3 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x i16>] %b.coerce, 2
- %0 = bitcast i16* %a to i8*
- %vld3_lane = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, i32 3, i32 2)
- %vld3_lane.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.int16x4x3_t undef, <4 x i16> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x3_t %.fca.0.0.insert, <4 x i16> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x4x3_t %.fca.0.1.insert, <4 x i16> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.int16x4x3_t %.fca.0.2.insert
-}
-
-define %struct.int32x2x3_t @test_vld3_lane_s32(i32* %a, [3 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vld3_lane_s32
-; CHECK: ld3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x i32>] %b.coerce, 2
- %0 = bitcast i32* %a to i8*
- %vld3_lane = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, i32 1, i32 4)
- %vld3_lane.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.int32x2x3_t undef, <2 x i32> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x3_t %.fca.0.0.insert, <2 x i32> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x2x3_t %.fca.0.1.insert, <2 x i32> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.int32x2x3_t %.fca.0.2.insert
-}
-
-define %struct.int64x1x3_t @test_vld3_lane_s64(i64* %a, [3 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vld3_lane_s64
-; CHECK: ld3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <1 x i64>] %b.coerce, 2
- %0 = bitcast i64* %a to i8*
- %vld3_lane = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, i32 0, i32 8)
- %vld3_lane.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.int64x1x3_t undef, <1 x i64> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x3_t %.fca.0.0.insert, <1 x i64> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x1x3_t %.fca.0.1.insert, <1 x i64> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.int64x1x3_t %.fca.0.2.insert
-}
-
-define %struct.float32x2x3_t @test_vld3_lane_f32(float* %a, [3 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vld3_lane_f32
-; CHECK: ld3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x float>] %b.coerce, 2
- %0 = bitcast float* %a to i8*
- %vld3_lane = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, i32 1, i32 4)
- %vld3_lane.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.float32x2x3_t undef, <2 x float> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x3_t %.fca.0.0.insert, <2 x float> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x2x3_t %.fca.0.1.insert, <2 x float> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.float32x2x3_t %.fca.0.2.insert
-}
-
-define %struct.float64x1x3_t @test_vld3_lane_f64(double* %a, [3 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vld3_lane_f64
-; CHECK: ld3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <1 x double>] %b.coerce, 2
- %0 = bitcast double* %a to i8*
- %vld3_lane = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, i32 0, i32 8)
- %vld3_lane.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 2
- %.fca.0.0.insert = insertvalue %struct.float64x1x3_t undef, <1 x double> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x3_t %.fca.0.0.insert, <1 x double> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x1x3_t %.fca.0.1.insert, <1 x double> %vld3_lane.fca.2.extract, 0, 2
- ret %struct.float64x1x3_t %.fca.0.2.insert
-}
-
-define %struct.int8x16x4_t @test_vld4q_lane_s8(i8* %a, [4 x <16 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vld4q_lane_s8
-; CHECK: ld4 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <16 x i8>] %b.coerce, 3
- %vld3_lane = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4lane.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, <16 x i8> %b.coerce.fca.3.extract, i32 15, i32 1)
- %vld3_lane.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.int8x16x4_t undef, <16 x i8> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x16x4_t %.fca.0.0.insert, <16 x i8> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x16x4_t %.fca.0.1.insert, <16 x i8> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int8x16x4_t %.fca.0.2.insert, <16 x i8> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.int8x16x4_t %.fca.0.3.insert
-}
-
-define %struct.int16x8x4_t @test_vld4q_lane_s16(i16* %a, [4 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vld4q_lane_s16
-; CHECK: ld4 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <8 x i16>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <8 x i16>] %b.coerce, 3
- %0 = bitcast i16* %a to i8*
- %vld3_lane = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, <8 x i16> %b.coerce.fca.3.extract, i32 7, i32 2)
- %vld3_lane.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.int16x8x4_t undef, <8 x i16> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x8x4_t %.fca.0.0.insert, <8 x i16> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x8x4_t %.fca.0.1.insert, <8 x i16> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int16x8x4_t %.fca.0.2.insert, <8 x i16> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.int16x8x4_t %.fca.0.3.insert
-}
-
-define %struct.int32x4x4_t @test_vld4q_lane_s32(i32* %a, [4 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vld4q_lane_s32
-; CHECK: ld4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x i32>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x i32>] %b.coerce, 3
- %0 = bitcast i32* %a to i8*
- %vld3_lane = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, <4 x i32> %b.coerce.fca.3.extract, i32 3, i32 4)
- %vld3_lane.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.int32x4x4_t undef, <4 x i32> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x4x4_t %.fca.0.0.insert, <4 x i32> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x4x4_t %.fca.0.1.insert, <4 x i32> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int32x4x4_t %.fca.0.2.insert, <4 x i32> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.int32x4x4_t %.fca.0.3.insert
-}
-
-define %struct.int64x2x4_t @test_vld4q_lane_s64(i64* %a, [4 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vld4q_lane_s64
-; CHECK: ld4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x i64>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x i64>] %b.coerce, 3
- %0 = bitcast i64* %a to i8*
- %vld3_lane = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, <2 x i64> %b.coerce.fca.3.extract, i32 1, i32 8)
- %vld3_lane.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.int64x2x4_t undef, <2 x i64> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x2x4_t %.fca.0.0.insert, <2 x i64> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x2x4_t %.fca.0.1.insert, <2 x i64> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int64x2x4_t %.fca.0.2.insert, <2 x i64> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.int64x2x4_t %.fca.0.3.insert
-}
-
-define %struct.float32x4x4_t @test_vld4q_lane_f32(float* %a, [4 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vld4q_lane_f32
-; CHECK: ld4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x float>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x float>] %b.coerce, 3
- %0 = bitcast float* %a to i8*
- %vld3_lane = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, <4 x float> %b.coerce.fca.3.extract, i32 3, i32 4)
- %vld3_lane.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.float32x4x4_t undef, <4 x float> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x4x4_t %.fca.0.0.insert, <4 x float> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x4x4_t %.fca.0.1.insert, <4 x float> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float32x4x4_t %.fca.0.2.insert, <4 x float> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.float32x4x4_t %.fca.0.3.insert
-}
-
-define %struct.float64x2x4_t @test_vld4q_lane_f64(double* %a, [4 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vld4q_lane_f64
-; CHECK: ld4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x double>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x double>] %b.coerce, 3
- %0 = bitcast double* %a to i8*
- %vld3_lane = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, <2 x double> %b.coerce.fca.3.extract, i32 1, i32 8)
- %vld3_lane.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.float64x2x4_t undef, <2 x double> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x2x4_t %.fca.0.0.insert, <2 x double> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x2x4_t %.fca.0.1.insert, <2 x double> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float64x2x4_t %.fca.0.2.insert, <2 x double> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.float64x2x4_t %.fca.0.3.insert
-}
-
-define %struct.int8x8x4_t @test_vld4_lane_s8(i8* %a, [4 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vld4_lane_s8
-; CHECK: ld4 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <8 x i8>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <8 x i8>] %b.coerce, 3
- %vld3_lane = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, <8 x i8> %b.coerce.fca.3.extract, i32 7, i32 1)
- %vld3_lane.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.int8x8x4_t undef, <8 x i8> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int8x8x4_t %.fca.0.0.insert, <8 x i8> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int8x8x4_t %.fca.0.1.insert, <8 x i8> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int8x8x4_t %.fca.0.2.insert, <8 x i8> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.int8x8x4_t %.fca.0.3.insert
-}
-
-define %struct.int16x4x4_t @test_vld4_lane_s16(i16* %a, [4 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vld4_lane_s16
-; CHECK: ld4 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x i16>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x i16>] %b.coerce, 3
- %0 = bitcast i16* %a to i8*
- %vld3_lane = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, <4 x i16> %b.coerce.fca.3.extract, i32 3, i32 2)
- %vld3_lane.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.int16x4x4_t undef, <4 x i16> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int16x4x4_t %.fca.0.0.insert, <4 x i16> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int16x4x4_t %.fca.0.1.insert, <4 x i16> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int16x4x4_t %.fca.0.2.insert, <4 x i16> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.int16x4x4_t %.fca.0.3.insert
-}
-
-define %struct.int32x2x4_t @test_vld4_lane_s32(i32* %a, [4 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vld4_lane_s32
-; CHECK: ld4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x i32>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x i32>] %b.coerce, 3
- %0 = bitcast i32* %a to i8*
- %vld3_lane = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, <2 x i32> %b.coerce.fca.3.extract, i32 1, i32 4)
- %vld3_lane.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.int32x2x4_t undef, <2 x i32> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int32x2x4_t %.fca.0.0.insert, <2 x i32> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int32x2x4_t %.fca.0.1.insert, <2 x i32> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int32x2x4_t %.fca.0.2.insert, <2 x i32> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.int32x2x4_t %.fca.0.3.insert
-}
-
-define %struct.int64x1x4_t @test_vld4_lane_s64(i64* %a, [4 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vld4_lane_s64
-; CHECK: ld4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <1 x i64>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <1 x i64>] %b.coerce, 3
- %0 = bitcast i64* %a to i8*
- %vld3_lane = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, <1 x i64> %b.coerce.fca.3.extract, i32 0, i32 8)
- %vld3_lane.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.int64x1x4_t undef, <1 x i64> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.int64x1x4_t %.fca.0.0.insert, <1 x i64> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.int64x1x4_t %.fca.0.1.insert, <1 x i64> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.int64x1x4_t %.fca.0.2.insert, <1 x i64> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.int64x1x4_t %.fca.0.3.insert
-}
-
-define %struct.float32x2x4_t @test_vld4_lane_f32(float* %a, [4 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vld4_lane_f32
-; CHECK: ld4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x float>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x float>] %b.coerce, 3
- %0 = bitcast float* %a to i8*
- %vld3_lane = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, <2 x float> %b.coerce.fca.3.extract, i32 1, i32 4)
- %vld3_lane.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.float32x2x4_t undef, <2 x float> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float32x2x4_t %.fca.0.0.insert, <2 x float> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float32x2x4_t %.fca.0.1.insert, <2 x float> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float32x2x4_t %.fca.0.2.insert, <2 x float> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.float32x2x4_t %.fca.0.3.insert
-}
-
-define %struct.float64x1x4_t @test_vld4_lane_f64(double* %a, [4 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vld4_lane_f64
-; CHECK: ld4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <1 x double>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <1 x double>] %b.coerce, 3
- %0 = bitcast double* %a to i8*
- %vld3_lane = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, <1 x double> %b.coerce.fca.3.extract, i32 0, i32 8)
- %vld3_lane.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 0
- %vld3_lane.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 1
- %vld3_lane.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 2
- %vld3_lane.fca.3.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 3
- %.fca.0.0.insert = insertvalue %struct.float64x1x4_t undef, <1 x double> %vld3_lane.fca.0.extract, 0, 0
- %.fca.0.1.insert = insertvalue %struct.float64x1x4_t %.fca.0.0.insert, <1 x double> %vld3_lane.fca.1.extract, 0, 1
- %.fca.0.2.insert = insertvalue %struct.float64x1x4_t %.fca.0.1.insert, <1 x double> %vld3_lane.fca.2.extract, 0, 2
- %.fca.0.3.insert = insertvalue %struct.float64x1x4_t %.fca.0.2.insert, <1 x double> %vld3_lane.fca.3.extract, 0, 3
- ret %struct.float64x1x4_t %.fca.0.3.insert
-}
-
-define void @test_vst1q_lane_s8(i8* %a, <16 x i8> %b) {
-; CHECK-LABEL: test_vst1q_lane_s8
-; CHECK: st1 {{{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <16 x i8> %b, i32 15
- store i8 %0, i8* %a, align 1
- ret void
-}
-
-define void @test_vst1q_lane_s16(i16* %a, <8 x i16> %b) {
-; CHECK-LABEL: test_vst1q_lane_s16
-; CHECK: st1 {{{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <8 x i16> %b, i32 7
- store i16 %0, i16* %a, align 2
- ret void
-}
-
-define void @test_vst1q_lane_s32(i32* %a, <4 x i32> %b) {
-; CHECK-LABEL: test_vst1q_lane_s32
-; CHECK: st1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <4 x i32> %b, i32 3
- store i32 %0, i32* %a, align 4
- ret void
-}
-
-define void @test_vst1q_lane_s64(i64* %a, <2 x i64> %b) {
-; CHECK-LABEL: test_vst1q_lane_s64
-; CHECK: st1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <2 x i64> %b, i32 1
- store i64 %0, i64* %a, align 8
- ret void
-}
-
-define void @test_vst1q_lane_f32(float* %a, <4 x float> %b) {
-; CHECK-LABEL: test_vst1q_lane_f32
-; CHECK: st1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <4 x float> %b, i32 3
- store float %0, float* %a, align 4
- ret void
-}
-
-define void @test_vst1q_lane_f64(double* %a, <2 x double> %b) {
-; CHECK-LABEL: test_vst1q_lane_f64
-; CHECK: st1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <2 x double> %b, i32 1
- store double %0, double* %a, align 8
- ret void
-}
-
-define void @test_vst1_lane_s8(i8* %a, <8 x i8> %b) {
-; CHECK-LABEL: test_vst1_lane_s8
-; CHECK: st1 {{{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <8 x i8> %b, i32 7
- store i8 %0, i8* %a, align 1
- ret void
-}
-
-define void @test_vst1_lane_s16(i16* %a, <4 x i16> %b) {
-; CHECK-LABEL: test_vst1_lane_s16
-; CHECK: st1 {{{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <4 x i16> %b, i32 3
- store i16 %0, i16* %a, align 2
- ret void
-}
-
-define void @test_vst1_lane_s32(i32* %a, <2 x i32> %b) {
-; CHECK-LABEL: test_vst1_lane_s32
-; CHECK: st1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <2 x i32> %b, i32 1
- store i32 %0, i32* %a, align 4
- ret void
-}
-
-define void @test_vst1_lane_s64(i64* %a, <1 x i64> %b) {
-; CHECK-LABEL: test_vst1_lane_s64
-; CHECK: st1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <1 x i64> %b, i32 0
- store i64 %0, i64* %a, align 8
- ret void
-}
-
-define void @test_vst1_lane_f32(float* %a, <2 x float> %b) {
-; CHECK-LABEL: test_vst1_lane_f32
-; CHECK: st1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <2 x float> %b, i32 1
- store float %0, float* %a, align 4
- ret void
-}
-
-define void @test_vst1_lane_f64(double* %a, <1 x double> %b) {
-; CHECK-LABEL: test_vst1_lane_f64
-; CHECK: st1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %0 = extractelement <1 x double> %b, i32 0
- store double %0, double* %a, align 8
- ret void
-}
-
-define void @test_vst2q_lane_s8(i8* %a, [2 x <16 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_lane_s8
-; CHECK: st2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %b.coerce, 1
- tail call void @llvm.arm.neon.vst2lane.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, i32 15, i32 1)
- ret void
-}
-
-define void @test_vst2q_lane_s16(i16* %a, [2 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_lane_s16
-; CHECK: st2 {{{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1
- %0 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, i32 7, i32 2)
- ret void
-}
-
-define void @test_vst2q_lane_s32(i32* %a, [2 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_lane_s32
-; CHECK: st2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1
- %0 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, i32 3, i32 4)
- ret void
-}
-
-define void @test_vst2q_lane_s64(i64* %a, [2 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_lane_s64
-; CHECK: st2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1
- %0 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, i32 1, i32 8)
- ret void
-}
-
-define void @test_vst2q_lane_f32(float* %a, [2 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_lane_f32
-; CHECK: st2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1
- %0 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, i32 3, i32 4)
- ret void
-}
-
-define void @test_vst2q_lane_f64(double* %a, [2 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst2q_lane_f64
-; CHECK: st2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce, 1
- %0 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, i32 1, i32 8)
- ret void
-}
-
-define void @test_vst2_lane_s8(i8* %a, [2 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst2_lane_s8
-; CHECK: st2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1
- tail call void @llvm.arm.neon.vst2lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, i32 7, i32 1)
- ret void
-}
-
-define void @test_vst2_lane_s16(i16* %a, [2 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst2_lane_s16
-; CHECK: st2 {{{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1
- %0 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, i32 3, i32 2)
- ret void
-}
-
-define void @test_vst2_lane_s32(i32* %a, [2 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst2_lane_s32
-; CHECK: st2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1
- %0 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, i32 1, i32 4)
- ret void
-}
-
-define void @test_vst2_lane_s64(i64* %a, [2 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst2_lane_s64
-; CHECK: st2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1
- %0 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, i32 0, i32 8)
- ret void
-}
-
-define void @test_vst2_lane_f32(float* %a, [2 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst2_lane_f32
-; CHECK: st2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1
- %0 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, i32 1, i32 4)
- ret void
-}
-
-define void @test_vst2_lane_f64(double* %a, [2 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst2_lane_f64
-; CHECK: st2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce, 1
- %0 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, i32 0, i32 8)
- ret void
-}
-
-define void @test_vst3q_lane_s8(i8* %a, [3 x <16 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_lane_s8
-; CHECK: st3 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <16 x i8>] %b.coerce, 2
- tail call void @llvm.arm.neon.vst3lane.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, i32 15, i32 1)
- ret void
-}
-
-define void @test_vst3q_lane_s16(i16* %a, [3 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_lane_s16
-; CHECK: st3 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <8 x i16>] %b.coerce, 2
- %0 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, i32 7, i32 2)
- ret void
-}
-
-define void @test_vst3q_lane_s32(i32* %a, [3 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_lane_s32
-; CHECK: st3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x i32>] %b.coerce, 2
- %0 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, i32 3, i32 4)
- ret void
-}
-
-define void @test_vst3q_lane_s64(i64* %a, [3 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_lane_s64
-; CHECK: st3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x i64>] %b.coerce, 2
- %0 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, i32 1, i32 8)
- ret void
-}
-
-define void @test_vst3q_lane_f32(float* %a, [3 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_lane_f32
-; CHECK: st3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x float>] %b.coerce, 2
- %0 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, i32 3, i32 4)
- ret void
-}
-
-define void @test_vst3q_lane_f64(double* %a, [3 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst3q_lane_f64
-; CHECK: st3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x double>] %b.coerce, 2
- %0 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, i32 1, i32 8)
- ret void
-}
-
-define void @test_vst3_lane_s8(i8* %a, [3 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst3_lane_s8
-; CHECK: st3 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <8 x i8>] %b.coerce, 2
- tail call void @llvm.arm.neon.vst3lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, i32 7, i32 1)
- ret void
-}
-
-define void @test_vst3_lane_s16(i16* %a, [3 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst3_lane_s16
-; CHECK: st3 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <4 x i16>] %b.coerce, 2
- %0 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, i32 3, i32 2)
- ret void
-}
-
-define void @test_vst3_lane_s32(i32* %a, [3 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst3_lane_s32
-; CHECK: st3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x i32>] %b.coerce, 2
- %0 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, i32 1, i32 4)
- ret void
-}
-
-define void @test_vst3_lane_s64(i64* %a, [3 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst3_lane_s64
-; CHECK: st3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <1 x i64>] %b.coerce, 2
- %0 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, i32 0, i32 8)
- ret void
-}
-
-define void @test_vst3_lane_f32(float* %a, [3 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst3_lane_f32
-; CHECK: st3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <2 x float>] %b.coerce, 2
- %0 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, i32 1, i32 4)
- ret void
-}
-
-define void @test_vst3_lane_f64(double* %a, [3 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst3_lane_f64
-; CHECK: st3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [3 x <1 x double>] %b.coerce, 2
- %0 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, i32 0, i32 8)
- ret void
-}
-
-define void @test_vst4q_lane_s8(i16* %a, [4 x <16 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_lane_s8
-; CHECK: st4 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <16 x i8>] %b.coerce, 3
- %0 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v16i8(i8* %0, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, <16 x i8> %b.coerce.fca.3.extract, i32 15, i32 2)
- ret void
-}
-
-define void @test_vst4q_lane_s16(i16* %a, [4 x <8 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_lane_s16
-; CHECK: st4 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <8 x i16>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <8 x i16>] %b.coerce, 3
- %0 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, <8 x i16> %b.coerce.fca.3.extract, i32 7, i32 2)
- ret void
-}
-
-define void @test_vst4q_lane_s32(i32* %a, [4 x <4 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_lane_s32
-; CHECK: st4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x i32>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x i32>] %b.coerce, 3
- %0 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, <4 x i32> %b.coerce.fca.3.extract, i32 3, i32 4)
- ret void
-}
-
-define void @test_vst4q_lane_s64(i64* %a, [4 x <2 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_lane_s64
-; CHECK: st4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x i64>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x i64>] %b.coerce, 3
- %0 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, <2 x i64> %b.coerce.fca.3.extract, i32 1, i32 8)
- ret void
-}
-
-define void @test_vst4q_lane_f32(float* %a, [4 x <4 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_lane_f32
-; CHECK: st4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x float>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x float>] %b.coerce, 3
- %0 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, <4 x float> %b.coerce.fca.3.extract, i32 3, i32 4)
- ret void
-}
-
-define void @test_vst4q_lane_f64(double* %a, [4 x <2 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst4q_lane_f64
-; CHECK: st4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x double>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x double>] %b.coerce, 3
- %0 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, <2 x double> %b.coerce.fca.3.extract, i32 1, i32 8)
- ret void
-}
-
-define void @test_vst4_lane_s8(i8* %a, [4 x <8 x i8>] %b.coerce) {
-; CHECK-LABEL: test_vst4_lane_s8
-; CHECK: st4 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <8 x i8>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <8 x i8>] %b.coerce, 3
- tail call void @llvm.arm.neon.vst4lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, <8 x i8> %b.coerce.fca.3.extract, i32 7, i32 1)
- ret void
-}
-
-define void @test_vst4_lane_s16(i16* %a, [4 x <4 x i16>] %b.coerce) {
-; CHECK-LABEL: test_vst4_lane_s16
-; CHECK: st4 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <4 x i16>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <4 x i16>] %b.coerce, 3
- %0 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, <4 x i16> %b.coerce.fca.3.extract, i32 3, i32 2)
- ret void
-}
-
-define void @test_vst4_lane_s32(i32* %a, [4 x <2 x i32>] %b.coerce) {
-; CHECK-LABEL: test_vst4_lane_s32
-; CHECK: st4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x i32>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x i32>] %b.coerce, 3
- %0 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, <2 x i32> %b.coerce.fca.3.extract, i32 1, i32 4)
- ret void
-}
-
-define void @test_vst4_lane_s64(i64* %a, [4 x <1 x i64>] %b.coerce) {
-; CHECK-LABEL: test_vst4_lane_s64
-; CHECK: st4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <1 x i64>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <1 x i64>] %b.coerce, 3
- %0 = bitcast i64* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, <1 x i64> %b.coerce.fca.3.extract, i32 0, i32 8)
- ret void
-}
-
-define void @test_vst4_lane_f32(float* %a, [4 x <2 x float>] %b.coerce) {
-; CHECK-LABEL: test_vst4_lane_f32
-; CHECK: st4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <2 x float>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <2 x float>] %b.coerce, 3
- %0 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, <2 x float> %b.coerce.fca.3.extract, i32 1, i32 4)
- ret void
-}
-
-define void @test_vst4_lane_f64(double* %a, [4 x <1 x double>] %b.coerce) {
-; CHECK-LABEL: test_vst4_lane_f64
-; CHECK: st4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
-entry:
- %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce, 0
- %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce, 1
- %b.coerce.fca.2.extract = extractvalue [4 x <1 x double>] %b.coerce, 2
- %b.coerce.fca.3.extract = extractvalue [4 x <1 x double>] %b.coerce, 3
- %0 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, <1 x double> %b.coerce.fca.3.extract, i32 0, i32 8)
- ret void
-}
-
-declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8*, <16 x i8>, <16 x i8>, i32, i32)
-declare { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32)
-declare { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32)
-declare { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2lane.v2i64(i8*, <2 x i64>, <2 x i64>, i32, i32)
-declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2lane.v4f32(i8*, <4 x float>, <4 x float>, i32, i32)
-declare { <2 x double>, <2 x double> } @llvm.arm.neon.vld2lane.v2f64(i8*, <2 x double>, <2 x double>, i32, i32)
-declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32)
-declare { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32)
-declare { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32)
-declare { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2lane.v2f32(i8*, <2 x float>, <2 x float>, i32, i32)
-declare { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8*, i32)
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3lane.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32, i32)
-declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32)
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32)
-declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3lane.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32, i32)
-declare { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32)
-declare { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
-declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
-declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32)
-declare { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8*, i32)
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4lane.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32, i32)
-declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32)
-declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32)
-declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4lane.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32, i32)
-declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32, i32)
-declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
-declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
-declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8*, i32)
-declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32)
-declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8*, i32)
-declare { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2lane.v1i64(i8*, <1 x i64>, <1 x i64>, i32, i32)
-declare { <1 x double>, <1 x double> } @llvm.arm.neon.vld2lane.v1f64(i8*, <1 x double>, <1 x double>, i32, i32)
-declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
-declare { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32, i32)
-declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
-declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v16i8(i8*, <16 x i8>, <16 x i8>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v2i64(i8*, <2 x i64>, <2 x i64>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v4f32(i8*, <4 x float>, <4 x float>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v2f64(i8*, <2 x double>, <2 x double>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v1i64(i8*, <1 x i64>, <1 x i64>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v2f32(i8*, <2 x float>, <2 x float>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v1f64(i8*, <1 x double>, <1 x double>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32, i32) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-simd-ldst.ll b/test/CodeGen/AArch64/neon-simd-ldst.ll
deleted file mode 100644
index afc0901bbc0b..000000000000
--- a/test/CodeGen/AArch64/neon-simd-ldst.ll
+++ /dev/null
@@ -1,164 +0,0 @@
-; RUN: llc < %s -O2 -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define void @test_ldstq_4v(i8* noalias %io, i32 %count) {
-; CHECK-LABEL: test_ldstq_4v
-; CHECK: ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
-; CHECK: st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
-entry:
- %tobool62 = icmp eq i32 %count, 0
- br i1 %tobool62, label %while.end, label %while.body
-
-while.body: ; preds = %entry, %while.body
- %count.addr.063 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
- %dec = add i32 %count.addr.063, -1
- %vld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8* %io, i32 1)
- %vld4.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 3
- tail call void @llvm.arm.neon.vst4.v16i8(i8* %io, <16 x i8> %vld4.fca.0.extract, <16 x i8> %vld4.fca.1.extract, <16 x i8> %vld4.fca.2.extract, <16 x i8> %vld4.fca.3.extract, i32 1)
- %tobool = icmp eq i32 %dec, 0
- br i1 %tobool, label %while.end, label %while.body
-
-while.end: ; preds = %while.body, %entry
- ret void
-}
-
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8*, i32)
-
-declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32)
-
-define void @test_ldstq_3v(i8* noalias %io, i32 %count) {
-; CHECK-LABEL: test_ldstq_3v
-; CHECK: ld3 {v0.16b, v1.16b, v2.16b}, [x0]
-; CHECK: st3 {v0.16b, v1.16b, v2.16b}, [x0]
-entry:
- %tobool47 = icmp eq i32 %count, 0
- br i1 %tobool47, label %while.end, label %while.body
-
-while.body: ; preds = %entry, %while.body
- %count.addr.048 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
- %dec = add i32 %count.addr.048, -1
- %vld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %io, i32 1)
- %vld3.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 2
- tail call void @llvm.arm.neon.vst3.v16i8(i8* %io, <16 x i8> %vld3.fca.0.extract, <16 x i8> %vld3.fca.1.extract, <16 x i8> %vld3.fca.2.extract, i32 1)
- %tobool = icmp eq i32 %dec, 0
- br i1 %tobool, label %while.end, label %while.body
-
-while.end: ; preds = %while.body, %entry
- ret void
-}
-
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32)
-
-declare void @llvm.arm.neon.vst3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32)
-
-define void @test_ldstq_2v(i8* noalias %io, i32 %count) {
-; CHECK-LABEL: test_ldstq_2v
-; CHECK: ld2 {v0.16b, v1.16b}, [x0]
-; CHECK: st2 {v0.16b, v1.16b}, [x0]
-entry:
- %tobool22 = icmp eq i32 %count, 0
- br i1 %tobool22, label %while.end, label %while.body
-
-while.body: ; preds = %entry, %while.body
- %count.addr.023 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
- %dec = add i32 %count.addr.023, -1
- %vld2 = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %io, i32 1)
- %vld2.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 1
- tail call void @llvm.arm.neon.vst2.v16i8(i8* %io, <16 x i8> %vld2.fca.0.extract, <16 x i8> %vld2.fca.1.extract, i32 1)
- %tobool = icmp eq i32 %dec, 0
- br i1 %tobool, label %while.end, label %while.body
-
-while.end: ; preds = %while.body, %entry
- ret void
-}
-
-declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32)
-
-declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
-
-define void @test_ldst_4v(i8* noalias %io, i32 %count) {
-; CHECK-LABEL: test_ldst_4v
-; CHECK: ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
-; CHECK: st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
-entry:
- %tobool42 = icmp eq i32 %count, 0
- br i1 %tobool42, label %while.end, label %while.body
-
-while.body: ; preds = %entry, %while.body
- %count.addr.043 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
- %dec = add i32 %count.addr.043, -1
- %vld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %io, i32 1)
- %vld4.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 0
- %vld4.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 1
- %vld4.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 2
- %vld4.fca.3.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 3
- tail call void @llvm.arm.neon.vst4.v8i8(i8* %io, <8 x i8> %vld4.fca.0.extract, <8 x i8> %vld4.fca.1.extract, <8 x i8> %vld4.fca.2.extract, <8 x i8> %vld4.fca.3.extract, i32 1)
- %tobool = icmp eq i32 %dec, 0
- br i1 %tobool, label %while.end, label %while.body
-
-while.end: ; preds = %while.body, %entry
- ret void
-}
-
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32)
-
-declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
-
-define void @test_ldst_3v(i8* noalias %io, i32 %count) {
-; CHECK-LABEL: test_ldst_3v
-; CHECK: ld3 {v0.8b, v1.8b, v2.8b}, [x0]
-; CHECK: st3 {v0.8b, v1.8b, v2.8b}, [x0]
-entry:
- %tobool32 = icmp eq i32 %count, 0
- br i1 %tobool32, label %while.end, label %while.body
-
-while.body: ; preds = %entry, %while.body
- %count.addr.033 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
- %dec = add i32 %count.addr.033, -1
- %vld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8* %io, i32 1)
- %vld3.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 0
- %vld3.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 1
- %vld3.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 2
- tail call void @llvm.arm.neon.vst3.v8i8(i8* %io, <8 x i8> %vld3.fca.0.extract, <8 x i8> %vld3.fca.1.extract, <8 x i8> %vld3.fca.2.extract, i32 1)
- %tobool = icmp eq i32 %dec, 0
- br i1 %tobool, label %while.end, label %while.body
-
-while.end: ; preds = %while.body, %entry
- ret void
-}
-
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8*, i32)
-
-declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32)
-
-define void @test_ldst_2v(i8* noalias %io, i32 %count) {
-; CHECK-LABEL: test_ldst_2v
-; CHECK: ld2 {v0.8b, v1.8b}, [x0]
-; CHECK: st2 {v0.8b, v1.8b}, [x0]
-entry:
- %tobool22 = icmp eq i32 %count, 0
- br i1 %tobool22, label %while.end, label %while.body
-
-while.body: ; preds = %entry, %while.body
- %count.addr.023 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
- %dec = add i32 %count.addr.023, -1
- %vld2 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8* %io, i32 1)
- %vld2.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 0
- %vld2.fca.1.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 1
- tail call void @llvm.arm.neon.vst2.v8i8(i8* %io, <8 x i8> %vld2.fca.0.extract, <8 x i8> %vld2.fca.1.extract, i32 1)
- %tobool = icmp eq i32 %dec, 0
- br i1 %tobool, label %while.end, label %while.body
-
-while.end: ; preds = %while.body, %entry
- ret void
-}
-
-declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8*, i32)
-
-declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
-
diff --git a/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll b/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll
deleted file mode 100644
index 156fe1db0ff5..000000000000
--- a/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll
+++ /dev/null
@@ -1,354 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-;Check for a post-increment updating load.
-define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind {
-; CHECK: test_vld1_fx_update
-; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #8
- %A = load i16** %ptr
- %tmp0 = bitcast i16* %A to i8*
- %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 2)
- %tmp2 = getelementptr i16* %A, i32 4
- store i16* %tmp2, i16** %ptr
- ret <4 x i16> %tmp1
-}
-
-;Check for a post-increment updating load with register increment.
-define <2 x i32> @test_vld1_reg_update(i32** %ptr, i32 %inc) nounwind {
-; CHECK: test_vld1_reg_update
-; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %A = load i32** %ptr
- %tmp0 = bitcast i32* %A to i8*
- %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 4)
- %tmp2 = getelementptr i32* %A, i32 %inc
- store i32* %tmp2, i32** %ptr
- ret <2 x i32> %tmp1
-}
-
-define <2 x float> @test_vld2_fx_update(float** %ptr) nounwind {
-; CHECK: test_vld2_fx_update
-; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #16
- %A = load float** %ptr
- %tmp0 = bitcast float* %A to i8*
- %tmp1 = call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8* %tmp0, i32 4)
- %tmp2 = extractvalue { <2 x float>, <2 x float> } %tmp1, 0
- %tmp3 = getelementptr float* %A, i32 4
- store float* %tmp3, float** %ptr
- ret <2 x float> %tmp2
-}
-
-define <16 x i8> @test_vld2_reg_update(i8** %ptr, i32 %inc) nounwind {
-; CHECK: test_vld2_reg_update
-; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %A = load i8** %ptr
- %tmp0 = call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %A, i32 1)
- %tmp1 = extractvalue { <16 x i8>, <16 x i8> } %tmp0, 0
- %tmp2 = getelementptr i8* %A, i32 %inc
- store i8* %tmp2, i8** %ptr
- ret <16 x i8> %tmp1
-}
-
-define <4 x i32> @test_vld3_fx_update(i32** %ptr) nounwind {
-; CHECK: test_vld3_fx_update
-; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #48
- %A = load i32** %ptr
- %tmp0 = bitcast i32* %A to i8*
- %tmp1 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8* %tmp0, i32 4)
- %tmp2 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %tmp1, 0
- %tmp3 = getelementptr i32* %A, i32 12
- store i32* %tmp3, i32** %ptr
- ret <4 x i32> %tmp2
-}
-
-define <4 x i16> @test_vld3_reg_update(i16** %ptr, i32 %inc) nounwind {
-; CHECK: test_vld3_reg_update
-; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %A = load i16** %ptr
- %tmp0 = bitcast i16* %A to i8*
- %tmp1 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %tmp0, i32 2)
- %tmp2 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %tmp1, 0
- %tmp3 = getelementptr i16* %A, i32 %inc
- store i16* %tmp3, i16** %ptr
- ret <4 x i16> %tmp2
-}
-
-define <8 x i16> @test_vld4_fx_update(i16** %ptr) nounwind {
-; CHECK: test_vld4_fx_update
-; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], #64
- %A = load i16** %ptr
- %tmp0 = bitcast i16* %A to i8*
- %tmp1 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8* %tmp0, i32 8)
- %tmp2 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %tmp1, 0
- %tmp3 = getelementptr i16* %A, i32 32
- store i16* %tmp3, i16** %ptr
- ret <8 x i16> %tmp2
-}
-
-define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind {
-; CHECK: test_vld4_reg_update
-; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %A = load i8** %ptr
- %tmp0 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %A, i32 1)
- %tmp1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %tmp0, 0
- %tmp2 = getelementptr i8* %A, i32 %inc
- store i8* %tmp2, i8** %ptr
- ret <8 x i8> %tmp1
-}
-
-define void @test_vst1_fx_update(float** %ptr, <2 x float> %B) nounwind {
-; CHECK: test_vst1_fx_update
-; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #8
- %A = load float** %ptr
- %tmp0 = bitcast float* %A to i8*
- call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %B, i32 4)
- %tmp2 = getelementptr float* %A, i32 2
- store float* %tmp2, float** %ptr
- ret void
-}
-
-define void @test_vst1_reg_update(i16** %ptr, <8 x i16> %B, i32 %inc) nounwind {
-; CHECK: test_vst1_reg_update
-; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
- %A = load i16** %ptr
- %tmp0 = bitcast i16* %A to i8*
- call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %B, i32 2)
- %tmp1 = getelementptr i16* %A, i32 %inc
- store i16* %tmp1, i16** %ptr
- ret void
-}
-
-define void @test_vst2_fx_update(i64** %ptr, <1 x i64> %B) nounwind {
-; CHECK: test_vst2_fx_update
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}], #16
- %A = load i64** %ptr
- %tmp0 = bitcast i64* %A to i8*
- call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %B, <1 x i64> %B, i32 8)
- %tmp1 = getelementptr i64* %A, i32 2
- store i64* %tmp1, i64** %ptr
- ret void
-}
-
-define void @test_vst2_reg_update(i8** %ptr, <8 x i8> %B, i32 %inc) nounwind {
-; CHECK: test_vst2_reg_update
-; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
- %A = load i8** %ptr
- call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %B, <8 x i8> %B, i32 4)
- %tmp0 = getelementptr i8* %A, i32 %inc
- store i8* %tmp0, i8** %ptr
- ret void
-}
-
-define void @test_vst3_fx_update(i32** %ptr, <2 x i32> %B) nounwind {
-; CHECK: test_vst3_fx_update
-; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #24
- %A = load i32** %ptr
- %tmp0 = bitcast i32* %A to i8*
- call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %B, <2 x i32> %B, <2 x i32> %B, i32 4)
- %tmp1 = getelementptr i32* %A, i32 6
- store i32* %tmp1, i32** %ptr
- ret void
-}
-
-define void @test_vst3_reg_update(i16** %ptr, <8 x i16> %B, i32 %inc) nounwind {
-; CHECK: test_vst3_reg_update
-; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
- %A = load i16** %ptr
- %tmp0 = bitcast i16* %A to i8*
- call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %B, <8 x i16> %B, <8 x i16> %B, i32 2)
- %tmp1 = getelementptr i16* %A, i32 %inc
- store i16* %tmp1, i16** %ptr
- ret void
-}
-
-define void @test_vst4_fx_update(float** %ptr, <4 x float> %B) nounwind {
-; CHECK: test_vst4_fx_update
-; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}], #64
- %A = load float** %ptr
- %tmp0 = bitcast float* %A to i8*
- call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %B, <4 x float> %B, <4 x float> %B, <4 x float> %B, i32 4)
- %tmp1 = getelementptr float* %A, i32 16
- store float* %tmp1, float** %ptr
- ret void
-}
-
-define void @test_vst4_reg_update(i8** %ptr, <8 x i8> %B, i32 %inc) nounwind {
-; CHECK: test_vst4_reg_update
-; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
- %A = load i8** %ptr
- call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %B, <8 x i8> %B, <8 x i8> %B, <8 x i8> %B, i32 1)
- %tmp0 = getelementptr i8* %A, i32 %inc
- store i8* %tmp0, i8** %ptr
- ret void
-}
-
-
-declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32)
-declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32)
-declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32)
-declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8*, i32)
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8*, i32)
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8*, i32)
-declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8*, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32)
-
-declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32)
-declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32)
-declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32)
-declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
-declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
-declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
-declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
-declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
-
-define <16 x i8> @test_vld1x2_fx_update(i8* %a, i8** %ptr) {
-; CHECK: test_vld1x2_fx_update
-; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
- %1 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1)
- %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
- %tmp1 = getelementptr i8* %a, i32 32
- store i8* %tmp1, i8** %ptr
- ret <16 x i8> %2
-}
-
-define <8 x i16> @test_vld1x2_reg_update(i16* %a, i16** %ptr, i32 %inc) {
-; CHECK: test_vld1x2_reg_update
-; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2)
- %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0
- %tmp1 = getelementptr i16* %a, i32 %inc
- store i16* %tmp1, i16** %ptr
- ret <8 x i16> %3
-}
-
-define <2 x i64> @test_vld1x3_fx_update(i64* %a, i64** %ptr) {
-; CHECK: test_vld1x3_fx_update
-; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], #48
- %1 = bitcast i64* %a to i8*
- %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8)
- %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
- %tmp1 = getelementptr i64* %a, i32 6
- store i64* %tmp1, i64** %ptr
- ret <2 x i64> %3
-}
-
-define <8 x i16> @test_vld1x3_reg_update(i16* %a, i16** %ptr, i32 %inc) {
-; CHECK: test_vld1x3_reg_update
-; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2)
- %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
- %tmp1 = getelementptr i16* %a, i32 %inc
- store i16* %tmp1, i16** %ptr
- ret <8 x i16> %3
-}
-
-define <4 x float> @test_vld1x4_fx_update(float* %a, float** %ptr) {
-; CHECK: test_vld1x4_fx_update
-; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
- %1 = bitcast float* %a to i8*
- %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4)
- %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0
- %tmp1 = getelementptr float* %a, i32 16
- store float* %tmp1, float** %ptr
- ret <4 x float> %3
-}
-
-define <8 x i8> @test_vld1x4_reg_update(i8* readonly %a, i8** %ptr, i32 %inc) #0 {
-; CHECK: test_vld1x4_reg_update
-; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1)
- %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
- %tmp1 = getelementptr i8* %a, i32 %inc
- store i8* %tmp1, i8** %ptr
- ret <8 x i8> %2
-}
-
-define void @test_vst1x2_fx_update(i8* %a, [2 x <16 x i8>] %b.coerce, i8** %ptr) #2 {
-; CHECK: test_vst1x2_fx_update
-; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
- %1 = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %2 = extractvalue [2 x <16 x i8>] %b.coerce, 1
- tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, i32 1)
- %tmp1 = getelementptr i8* %a, i32 32
- store i8* %tmp1, i8** %ptr
- ret void
-}
-
-define void @test_vst1x2_reg_update(i16* %a, [2 x <8 x i16>] %b.coerce, i16** %ptr, i32 %inc) #2 {
-; CHECK: test_vst1x2_reg_update
-; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [2 x <8 x i16>] %b.coerce, 0
- %2 = extractvalue [2 x <8 x i16>] %b.coerce, 1
- %3 = bitcast i16* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1, <8 x i16> %2, i32 2)
- %tmp1 = getelementptr i16* %a, i32 %inc
- store i16* %tmp1, i16** %ptr
- ret void
-}
-
-define void @test_vst1x3_fx_update(i32* %a, [3 x <2 x i32>] %b.coerce, i32** %ptr) #2 {
-; CHECK: test_vst1x3_fx_update
-; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #24
- %1 = extractvalue [3 x <2 x i32>] %b.coerce, 0
- %2 = extractvalue [3 x <2 x i32>] %b.coerce, 1
- %3 = extractvalue [3 x <2 x i32>] %b.coerce, 2
- %4 = bitcast i32* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, i32 4)
- %tmp1 = getelementptr i32* %a, i32 6
- store i32* %tmp1, i32** %ptr
- ret void
-}
-
-define void @test_vst1x3_reg_update(i64* %a, [3 x <1 x i64>] %b.coerce, i64** %ptr, i32 %inc) #2 {
-; CHECK: test_vst1x3_reg_update
-; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [3 x <1 x i64>] %b.coerce, 0
- %2 = extractvalue [3 x <1 x i64>] %b.coerce, 1
- %3 = extractvalue [3 x <1 x i64>] %b.coerce, 2
- %4 = bitcast i64* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, i32 8)
- %tmp1 = getelementptr i64* %a, i32 %inc
- store i64* %tmp1, i64** %ptr
- ret void
-}
-
-define void @test_vst1x4_fx_update(float* %a, [4 x <4 x float>] %b.coerce, float** %ptr) #2 {
-; CHECK: test_vst1x4_fx_update
-; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
- %1 = extractvalue [4 x <4 x float>] %b.coerce, 0
- %2 = extractvalue [4 x <4 x float>] %b.coerce, 1
- %3 = extractvalue [4 x <4 x float>] %b.coerce, 2
- %4 = extractvalue [4 x <4 x float>] %b.coerce, 3
- %5 = bitcast float* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4)
- %tmp1 = getelementptr float* %a, i32 16
- store float* %tmp1, float** %ptr
- ret void
-}
-
-define void @test_vst1x4_reg_update(double* %a, [4 x <2 x double>] %b.coerce, double** %ptr, i32 %inc) #2 {
-; CHECK: test_vst1x4_reg_update
-; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [4 x <2 x double>] %b.coerce, 0
- %2 = extractvalue [4 x <2 x double>] %b.coerce, 1
- %3 = extractvalue [4 x <2 x double>] %b.coerce, 2
- %4 = extractvalue [4 x <2 x double>] %b.coerce, 3
- %5 = bitcast double* %a to i8*
- tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8)
- %tmp1 = getelementptr double* %a, i32 %inc
- store double* %tmp1, double** %ptr
- ret void
-}
-
-declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*, i32)
-declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*, i32)
-declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32)
-declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32)
-declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32)
-declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
-declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
-declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32)
-declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) #3
-declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32) #3
diff --git a/test/CodeGen/AArch64/neon-simd-post-ldst-one.ll b/test/CodeGen/AArch64/neon-simd-post-ldst-one.ll
deleted file mode 100644
index 80a934700c6b..000000000000
--- a/test/CodeGen/AArch64/neon-simd-post-ldst-one.ll
+++ /dev/null
@@ -1,319 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define { [2 x <16 x i8>] } @test_vld2q_dup_fx_update(i8* %a, i8** %ptr) {
-; CHECK-LABEL: test_vld2q_dup_fx_update
-; CHECK: ld2r {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #2
- %1 = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8* %a, <16 x i8> undef, <16 x i8> undef, i32 0, i32 1)
- %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
- %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> zeroinitializer
- %4 = extractvalue { <16 x i8>, <16 x i8> } %1, 1
- %5 = shufflevector <16 x i8> %4, <16 x i8> undef, <16 x i32> zeroinitializer
- %6 = insertvalue { [2 x <16 x i8>] } undef, <16 x i8> %3, 0, 0
- %7 = insertvalue { [2 x <16 x i8>] } %6, <16 x i8> %5, 0, 1
- %tmp1 = getelementptr i8* %a, i32 2
- store i8* %tmp1, i8** %ptr
- ret { [2 x <16 x i8>] } %7
-}
-
-define { [2 x <4 x i32>] } @test_vld2q_dup_reg_update(i32* %a, i32** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vld2q_dup_reg_update
-; CHECK: ld2r {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = bitcast i32* %a to i8*
- %2 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8* %1, <4 x i32> undef, <4 x i32> undef, i32 0, i32 4)
- %3 = extractvalue { <4 x i32>, <4 x i32> } %2, 0
- %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> zeroinitializer
- %5 = extractvalue { <4 x i32>, <4 x i32> } %2, 1
- %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <4 x i32> zeroinitializer
- %7 = insertvalue { [2 x <4 x i32>] } undef, <4 x i32> %4, 0, 0
- %8 = insertvalue { [2 x <4 x i32>] } %7, <4 x i32> %6, 0, 1
- %tmp1 = getelementptr i32* %a, i32 %inc
- store i32* %tmp1, i32** %ptr
- ret { [2 x <4 x i32>] } %8
-}
-
-define { [3 x <4 x i16>] } @test_vld3_dup_fx_update(i16* %a, i16** %ptr) {
-; CHECK-LABEL: test_vld3_dup_fx_update
-; CHECK: ld3r {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #6
- %1 = bitcast i16* %a to i8*
- %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8* %1, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
- %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 0
- %4 = shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer
- %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 1
- %6 = shufflevector <4 x i16> %5, <4 x i16> undef, <4 x i32> zeroinitializer
- %7 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 2
- %8 = shufflevector <4 x i16> %7, <4 x i16> undef, <4 x i32> zeroinitializer
- %9 = insertvalue { [3 x <4 x i16>] } undef, <4 x i16> %4, 0, 0
- %10 = insertvalue { [3 x <4 x i16>] } %9, <4 x i16> %6, 0, 1
- %11 = insertvalue { [3 x <4 x i16>] } %10, <4 x i16> %8, 0, 2
- %tmp1 = getelementptr i16* %a, i32 3
- store i16* %tmp1, i16** %ptr
- ret { [3 x <4 x i16>] } %11
-}
-
-define { [3 x <8 x i8>] } @test_vld3_dup_reg_update(i8* %a, i8** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vld3_dup_reg_update
-; CHECK: ld3r {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8* %a, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
- %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
- %3 = shufflevector <8 x i8> %2, <8 x i8> undef, <8 x i32> zeroinitializer
- %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
- %5 = shufflevector <8 x i8> %4, <8 x i8> undef, <8 x i32> zeroinitializer
- %6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
- %7 = shufflevector <8 x i8> %6, <8 x i8> undef, <8 x i32> zeroinitializer
- %8 = insertvalue { [3 x <8 x i8>] } undef, <8 x i8> %3, 0, 0
- %9 = insertvalue { [3 x <8 x i8>] } %8, <8 x i8> %5, 0, 1
- %10 = insertvalue { [3 x <8 x i8>] } %9, <8 x i8> %7, 0, 2
- %tmp1 = getelementptr i8* %a, i32 %inc
- store i8* %tmp1, i8** %ptr
- ret { [3 x <8 x i8>] }%10
-}
-
-define { [4 x <2 x i32>] } @test_vld4_dup_fx_update(i32* %a, i32** %ptr) #0 {
-; CHECK-LABEL: test_vld4_dup_fx_update
-; CHECK: ld4r {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #16
- %1 = bitcast i32* %a to i8*
- %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8* %1, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 4)
- %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 0
- %4 = shufflevector <2 x i32> %3, <2 x i32> undef, <2 x i32> zeroinitializer
- %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 1
- %6 = shufflevector <2 x i32> %5, <2 x i32> undef, <2 x i32> zeroinitializer
- %7 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 2
- %8 = shufflevector <2 x i32> %7, <2 x i32> undef, <2 x i32> zeroinitializer
- %9 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 3
- %10 = shufflevector <2 x i32> %9, <2 x i32> undef, <2 x i32> zeroinitializer
- %11 = insertvalue { [4 x <2 x i32>] } undef, <2 x i32> %4, 0, 0
- %12 = insertvalue { [4 x <2 x i32>] } %11, <2 x i32> %6, 0, 1
- %13 = insertvalue { [4 x <2 x i32>] } %12, <2 x i32> %8, 0, 2
- %14 = insertvalue { [4 x <2 x i32>] } %13, <2 x i32> %10, 0, 3
- %tmp1 = getelementptr i32* %a, i32 4
- store i32* %tmp1, i32** %ptr
- ret { [4 x <2 x i32>] } %14
-}
-
-define { [4 x <2 x double>] } @test_vld4_dup_reg_update(double* %a, double** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vld4_dup_reg_update
-; CHECK: ld4r {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = bitcast double* %a to i8*
- %2 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8* %1, <2 x double> undef, <2 x double> undef, <2 x double> undef, <2 x double> undef, i32 0, i32 8)
- %3 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 0
- %4 = shufflevector <2 x double> %3, <2 x double> undef, <2 x i32> zeroinitializer
- %5 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 1
- %6 = shufflevector <2 x double> %5, <2 x double> undef, <2 x i32> zeroinitializer
- %7 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 2
- %8 = shufflevector <2 x double> %7, <2 x double> undef, <2 x i32> zeroinitializer
- %9 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 3
- %10 = shufflevector <2 x double> %9, <2 x double> undef, <2 x i32> zeroinitializer
- %11 = insertvalue { [4 x <2 x double>] } undef, <2 x double> %4, 0, 0
- %12 = insertvalue { [4 x <2 x double>] } %11, <2 x double> %6, 0, 1
- %13 = insertvalue { [4 x <2 x double>] } %12, <2 x double> %8, 0, 2
- %14 = insertvalue { [4 x <2 x double>] } %13, <2 x double> %10, 0, 3
- %tmp1 = getelementptr double* %a, i32 %inc
- store double* %tmp1, double** %ptr
- ret { [4 x <2 x double>] } %14
-}
-
-define { [2 x <8 x i8>] } @test_vld2_lane_fx_update(i8* %a, [2 x <8 x i8>] %b, i8** %ptr) {
-; CHECK-LABEL: test_vld2_lane_fx_update
-; CHECK: ld2 {v{{[0-9]+}}.b, v{{[0-9]+}}.b}[7], [x{{[0-9]+|sp}}], #2
- %1 = extractvalue [2 x <8 x i8>] %b, 0
- %2 = extractvalue [2 x <8 x i8>] %b, 1
- %3 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 7, i32 1)
- %4 = extractvalue { <8 x i8>, <8 x i8> } %3, 0
- %5 = extractvalue { <8 x i8>, <8 x i8> } %3, 1
- %6 = insertvalue { [2 x <8 x i8>] } undef, <8 x i8> %4, 0, 0
- %7 = insertvalue { [2 x <8 x i8>] } %6, <8 x i8> %5, 0, 1
- %tmp1 = getelementptr i8* %a, i32 2
- store i8* %tmp1, i8** %ptr
- ret { [2 x <8 x i8>] } %7
-}
-
-define { [2 x <8 x i8>] } @test_vld2_lane_reg_update(i8* %a, [2 x <8 x i8>] %b, i8** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vld2_lane_reg_update
-; CHECK: ld2 {v{{[0-9]+}}.b, v{{[0-9]+}}.b}[6], [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [2 x <8 x i8>] %b, 0
- %2 = extractvalue [2 x <8 x i8>] %b, 1
- %3 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 6, i32 1)
- %4 = extractvalue { <8 x i8>, <8 x i8> } %3, 0
- %5 = extractvalue { <8 x i8>, <8 x i8> } %3, 1
- %6 = insertvalue { [2 x <8 x i8>] } undef, <8 x i8> %4, 0, 0
- %7 = insertvalue { [2 x <8 x i8>] } %6, <8 x i8> %5, 0, 1
- %tmp1 = getelementptr i8* %a, i32 %inc
- store i8* %tmp1, i8** %ptr
- ret { [2 x <8 x i8>] } %7
-}
-
-define { [3 x <2 x float>] } @test_vld3_lane_fx_update(float* %a, [3 x <2 x float>] %b, float** %ptr) {
-; CHECK-LABEL: test_vld3_lane_fx_update
-; CHECK: ld3 {v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s}[1], [x{{[0-9]+|sp}}], #12
- %1 = extractvalue [3 x <2 x float>] %b, 0
- %2 = extractvalue [3 x <2 x float>] %b, 1
- %3 = extractvalue [3 x <2 x float>] %b, 2
- %4 = bitcast float* %a to i8*
- %5 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8* %4, <2 x float> %1, <2 x float> %2, <2 x float> %3, i32 1, i32 4)
- %6 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %5, 0
- %7 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %5, 1
- %8 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %5, 2
- %9 = insertvalue { [3 x <2 x float>] } undef, <2 x float> %6, 0, 0
- %10 = insertvalue { [3 x <2 x float>] } %9, <2 x float> %7, 0, 1
- %11 = insertvalue { [3 x <2 x float>] } %10, <2 x float> %8, 0, 2
- %tmp1 = getelementptr float* %a, i32 3
- store float* %tmp1, float** %ptr
- ret { [3 x <2 x float>] } %11
-}
-
-define { [3 x <4 x i16>] } @test_vld3_lane_reg_update(i16* %a, [3 x <4 x i16>] %b, i16** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vld3_lane_reg_update
-; CHECK: ld3 {v{{[0-9]+}}.h, v{{[0-9]+}}.h, v{{[0-9]+}}.h}[3], [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [3 x <4 x i16>] %b, 0
- %2 = extractvalue [3 x <4 x i16>] %b, 1
- %3 = extractvalue [3 x <4 x i16>] %b, 2
- %4 = bitcast i16* %a to i8*
- %5 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8* %4, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, i32 3, i32 2)
- %6 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %5, 0
- %7 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %5, 1
- %8 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %5, 2
- %9 = insertvalue { [3 x <4 x i16>] } undef, <4 x i16> %6, 0, 0
- %10 = insertvalue { [3 x <4 x i16>] } %9, <4 x i16> %7, 0, 1
- %11 = insertvalue { [3 x <4 x i16>] } %10, <4 x i16> %8, 0, 2
- %tmp1 = getelementptr i16* %a, i32 %inc
- store i16* %tmp1, i16** %ptr
- ret { [3 x <4 x i16>] } %11
-}
-
-define { [4 x <2 x i32>] } @test_vld4_lane_fx_update(i32* readonly %a, [4 x <2 x i32>] %b, i32** %ptr) {
-; CHECK-LABEL: test_vld4_lane_fx_update
-; CHECK: ld4 {v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s}[1], [x{{[0-9]+|sp}}], #16
- %1 = extractvalue [4 x <2 x i32>] %b, 0
- %2 = extractvalue [4 x <2 x i32>] %b, 1
- %3 = extractvalue [4 x <2 x i32>] %b, 2
- %4 = extractvalue [4 x <2 x i32>] %b, 3
- %5 = bitcast i32* %a to i8*
- %6 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8* %5, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, <2 x i32> %4, i32 1, i32 4)
- %7 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %6, 0
- %8 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %6, 1
- %9 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %6, 2
- %10 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %6, 3
- %11 = insertvalue { [4 x <2 x i32>] } undef, <2 x i32> %7, 0, 0
- %12 = insertvalue { [4 x <2 x i32>] } %11, <2 x i32> %8, 0, 1
- %13 = insertvalue { [4 x <2 x i32>] } %12, <2 x i32> %9, 0, 2
- %14 = insertvalue { [4 x <2 x i32>] } %13, <2 x i32> %10, 0, 3
- %tmp1 = getelementptr i32* %a, i32 4
- store i32* %tmp1, i32** %ptr
- ret { [4 x <2 x i32>] } %14
-}
-
-define { [4 x <2 x double>] } @test_vld4_lane_reg_update(double* readonly %a, [4 x <2 x double>] %b, double** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vld4_lane_reg_update
-; CHECK: ld4 {v{{[0-9]+}}.d, v{{[0-9]+}}.d, v{{[0-9]+}}.d, v{{[0-9]+}}.d}[1], [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [4 x <2 x double>] %b, 0
- %2 = extractvalue [4 x <2 x double>] %b, 1
- %3 = extractvalue [4 x <2 x double>] %b, 2
- %4 = extractvalue [4 x <2 x double>] %b, 3
- %5 = bitcast double* %a to i8*
- %6 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 1, i32 8)
- %7 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %6, 0
- %8 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %6, 1
- %9 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %6, 2
- %10 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %6, 3
- %11 = insertvalue { [4 x <2 x double>] } undef, <2 x double> %7, 0, 0
- %12 = insertvalue { [4 x <2 x double>] } %11, <2 x double> %8, 0, 1
- %13 = insertvalue { [4 x <2 x double>] } %12, <2 x double> %9, 0, 2
- %14 = insertvalue { [4 x <2 x double>] } %13, <2 x double> %10, 0, 3
- %tmp1 = getelementptr double* %a, i32 %inc
- store double* %tmp1, double** %ptr
- ret { [4 x <2 x double>] } %14
-}
-
-define void @test_vst2_lane_fx_update(i8* %a, [2 x <8 x i8>] %b, i8** %ptr) {
-; CHECK-LABEL: test_vst2_lane_fx_update
-; CHECK: st2 {v{{[0-9]+}}.b, v{{[0-9]+}}.b}[7], [x{{[0-9]+|sp}}], #2
- %1 = extractvalue [2 x <8 x i8>] %b, 0
- %2 = extractvalue [2 x <8 x i8>] %b, 1
- call void @llvm.arm.neon.vst2lane.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 7, i32 1)
- %tmp1 = getelementptr i8* %a, i32 2
- store i8* %tmp1, i8** %ptr
- ret void
-}
-
-define void @test_vst2_lane_reg_update(i32* %a, [2 x <2 x i32>] %b.coerce, i32** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vst2_lane_reg_update
-; CHECK: st2 {v{{[0-9]+}}.s, v{{[0-9]+}}.s}[1], [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [2 x <2 x i32>] %b.coerce, 0
- %2 = extractvalue [2 x <2 x i32>] %b.coerce, 1
- %3 = bitcast i32* %a to i8*
- tail call void @llvm.arm.neon.vst2lane.v2i32(i8* %3, <2 x i32> %1, <2 x i32> %2, i32 1, i32 4)
- %tmp1 = getelementptr i32* %a, i32 %inc
- store i32* %tmp1, i32** %ptr
- ret void
-}
-
-define void @test_vst3_lane_fx_update(float* %a, [3 x <4 x float>] %b, float** %ptr) {
-; CHECK-LABEL: test_vst3_lane_fx_update
-; CHECK: st3 {v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s}[3], [x{{[0-9]+|sp}}], #12
- %1 = extractvalue [3 x <4 x float>] %b, 0
- %2 = extractvalue [3 x <4 x float>] %b, 1
- %3 = extractvalue [3 x <4 x float>] %b, 2
- %4 = bitcast float* %a to i8*
- call void @llvm.arm.neon.vst3lane.v4f32(i8* %4, <4 x float> %1, <4 x float> %2, <4 x float> %3, i32 3, i32 4)
- %tmp1 = getelementptr float* %a, i32 3
- store float* %tmp1, float** %ptr
- ret void
-}
-
-; Function Attrs: nounwind
-define void @test_vst3_lane_reg_update(i16* %a, [3 x <4 x i16>] %b, i16** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vst3_lane_reg_update
-; CHECK: st3 {v{{[0-9]+}}.h, v{{[0-9]+}}.h, v{{[0-9]+}}.h}[3], [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [3 x <4 x i16>] %b, 0
- %2 = extractvalue [3 x <4 x i16>] %b, 1
- %3 = extractvalue [3 x <4 x i16>] %b, 2
- %4 = bitcast i16* %a to i8*
- tail call void @llvm.arm.neon.vst3lane.v4i16(i8* %4, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, i32 3, i32 2)
- %tmp1 = getelementptr i16* %a, i32 %inc
- store i16* %tmp1, i16** %ptr
- ret void
-}
-
-define void @test_vst4_lane_fx_update(double* %a, [4 x <2 x double>] %b.coerce, double** %ptr) {
-; CHECK-LABEL: test_vst4_lane_fx_update
-; CHECK: st4 {v{{[0-9]+}}.d, v{{[0-9]+}}.d, v{{[0-9]+}}.d, v{{[0-9]+}}.d}[1], [x{{[0-9]+|sp}}], #32
- %1 = extractvalue [4 x <2 x double>] %b.coerce, 0
- %2 = extractvalue [4 x <2 x double>] %b.coerce, 1
- %3 = extractvalue [4 x <2 x double>] %b.coerce, 2
- %4 = extractvalue [4 x <2 x double>] %b.coerce, 3
- %5 = bitcast double* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 1, i32 8)
- %tmp1 = getelementptr double* %a, i32 4
- store double* %tmp1, double** %ptr
- ret void
-}
-
-
-define void @test_vst4_lane_reg_update(float* %a, [4 x <2 x float>] %b.coerce, float** %ptr, i32 %inc) {
-; CHECK-LABEL: test_vst4_lane_reg_update
-; CHECK: st4 {v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s}[1], [x{{[0-9]+|sp}}], x{{[0-9]+}}
- %1 = extractvalue [4 x <2 x float>] %b.coerce, 0
- %2 = extractvalue [4 x <2 x float>] %b.coerce, 1
- %3 = extractvalue [4 x <2 x float>] %b.coerce, 2
- %4 = extractvalue [4 x <2 x float>] %b.coerce, 3
- %5 = bitcast float* %a to i8*
- tail call void @llvm.arm.neon.vst4lane.v2f32(i8* %5, <2 x float> %1, <2 x float> %2, <2 x float> %3, <2 x float> %4, i32 1, i32 4)
- %tmp1 = getelementptr float* %a, i32 %inc
- store float* %tmp1, float** %ptr
- ret void
-}
-
-declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32)
-declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8*, <16 x i8>, <16 x i8>, i32, i32)
-declare { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32)
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
-declare { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32)
-declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32, i32)
-declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32)
-declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32)
-declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32, i32)
diff --git a/test/CodeGen/AArch64/neon-simd-shift.ll b/test/CodeGen/AArch64/neon-simd-shift.ll
deleted file mode 100644
index fd762656e56e..000000000000
--- a/test/CodeGen/AArch64/neon-simd-shift.ll
+++ /dev/null
@@ -1,1556 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define <8 x i8> @test_vshr_n_s8(<8 x i8> %a) {
-; CHECK: test_vshr_n_s8
-; CHECK: sshr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vshr_n = ashr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- ret <8 x i8> %vshr_n
-}
-
-define <4 x i16> @test_vshr_n_s16(<4 x i16> %a) {
-; CHECK: test_vshr_n_s16
-; CHECK: sshr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vshr_n = ashr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
- ret <4 x i16> %vshr_n
-}
-
-define <2 x i32> @test_vshr_n_s32(<2 x i32> %a) {
-; CHECK: test_vshr_n_s32
-; CHECK: sshr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vshr_n = ashr <2 x i32> %a, <i32 3, i32 3>
- ret <2 x i32> %vshr_n
-}
-
-define <16 x i8> @test_vshrq_n_s8(<16 x i8> %a) {
-; CHECK: test_vshrq_n_s8
-; CHECK: sshr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vshr_n = ashr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- ret <16 x i8> %vshr_n
-}
-
-define <8 x i16> @test_vshrq_n_s16(<8 x i16> %a) {
-; CHECK: test_vshrq_n_s16
-; CHECK: sshr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vshr_n = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- ret <8 x i16> %vshr_n
-}
-
-define <4 x i32> @test_vshrq_n_s32(<4 x i32> %a) {
-; CHECK: test_vshrq_n_s32
-; CHECK: sshr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vshr_n = ashr <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
- ret <4 x i32> %vshr_n
-}
-
-define <2 x i64> @test_vshrq_n_s64(<2 x i64> %a) {
-; CHECK: test_vshrq_n_s64
-; CHECK: sshr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vshr_n = ashr <2 x i64> %a, <i64 3, i64 3>
- ret <2 x i64> %vshr_n
-}
-
-define <8 x i8> @test_vshr_n_u8(<8 x i8> %a) {
-; CHECK: test_vshr_n_u8
-; CHECK: ushr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vshr_n = lshr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- ret <8 x i8> %vshr_n
-}
-
-define <4 x i16> @test_vshr_n_u16(<4 x i16> %a) {
-; CHECK: test_vshr_n_u16
-; CHECK: ushr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vshr_n = lshr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
- ret <4 x i16> %vshr_n
-}
-
-define <2 x i32> @test_vshr_n_u32(<2 x i32> %a) {
-; CHECK: test_vshr_n_u32
-; CHECK: ushr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vshr_n = lshr <2 x i32> %a, <i32 3, i32 3>
- ret <2 x i32> %vshr_n
-}
-
-define <16 x i8> @test_vshrq_n_u8(<16 x i8> %a) {
-; CHECK: test_vshrq_n_u8
-; CHECK: ushr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vshr_n = lshr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- ret <16 x i8> %vshr_n
-}
-
-define <8 x i16> @test_vshrq_n_u16(<8 x i16> %a) {
-; CHECK: test_vshrq_n_u16
-; CHECK: ushr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vshr_n = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- ret <8 x i16> %vshr_n
-}
-
-define <4 x i32> @test_vshrq_n_u32(<4 x i32> %a) {
-; CHECK: test_vshrq_n_u32
-; CHECK: ushr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vshr_n = lshr <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
- ret <4 x i32> %vshr_n
-}
-
-define <2 x i64> @test_vshrq_n_u64(<2 x i64> %a) {
-; CHECK: test_vshrq_n_u64
-; CHECK: ushr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vshr_n = lshr <2 x i64> %a, <i64 3, i64 3>
- ret <2 x i64> %vshr_n
-}
-
-define <8 x i8> @test_vsra_n_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vsra_n_s8
-; CHECK: ssra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vsra_n = ashr <8 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- %1 = add <8 x i8> %vsra_n, %a
- ret <8 x i8> %1
-}
-
-define <4 x i16> @test_vsra_n_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vsra_n_s16
-; CHECK: ssra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vsra_n = ashr <4 x i16> %b, <i16 3, i16 3, i16 3, i16 3>
- %1 = add <4 x i16> %vsra_n, %a
- ret <4 x i16> %1
-}
-
-define <2 x i32> @test_vsra_n_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vsra_n_s32
-; CHECK: ssra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vsra_n = ashr <2 x i32> %b, <i32 3, i32 3>
- %1 = add <2 x i32> %vsra_n, %a
- ret <2 x i32> %1
-}
-
-define <16 x i8> @test_vsraq_n_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vsraq_n_s8
-; CHECK: ssra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vsra_n = ashr <16 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- %1 = add <16 x i8> %vsra_n, %a
- ret <16 x i8> %1
-}
-
-define <8 x i16> @test_vsraq_n_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsraq_n_s16
-; CHECK: ssra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vsra_n = ashr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- %1 = add <8 x i16> %vsra_n, %a
- ret <8 x i16> %1
-}
-
-define <4 x i32> @test_vsraq_n_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsraq_n_s32
-; CHECK: ssra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vsra_n = ashr <4 x i32> %b, <i32 3, i32 3, i32 3, i32 3>
- %1 = add <4 x i32> %vsra_n, %a
- ret <4 x i32> %1
-}
-
-define <2 x i64> @test_vsraq_n_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vsraq_n_s64
-; CHECK: ssra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vsra_n = ashr <2 x i64> %b, <i64 3, i64 3>
- %1 = add <2 x i64> %vsra_n, %a
- ret <2 x i64> %1
-}
-
-define <8 x i8> @test_vsra_n_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vsra_n_u8
-; CHECK: usra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vsra_n = lshr <8 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- %1 = add <8 x i8> %vsra_n, %a
- ret <8 x i8> %1
-}
-
-define <4 x i16> @test_vsra_n_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vsra_n_u16
-; CHECK: usra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vsra_n = lshr <4 x i16> %b, <i16 3, i16 3, i16 3, i16 3>
- %1 = add <4 x i16> %vsra_n, %a
- ret <4 x i16> %1
-}
-
-define <2 x i32> @test_vsra_n_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vsra_n_u32
-; CHECK: usra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vsra_n = lshr <2 x i32> %b, <i32 3, i32 3>
- %1 = add <2 x i32> %vsra_n, %a
- ret <2 x i32> %1
-}
-
-define <16 x i8> @test_vsraq_n_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vsraq_n_u8
-; CHECK: usra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vsra_n = lshr <16 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- %1 = add <16 x i8> %vsra_n, %a
- ret <16 x i8> %1
-}
-
-define <8 x i16> @test_vsraq_n_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsraq_n_u16
-; CHECK: usra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vsra_n = lshr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- %1 = add <8 x i16> %vsra_n, %a
- ret <8 x i16> %1
-}
-
-define <4 x i32> @test_vsraq_n_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsraq_n_u32
-; CHECK: usra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vsra_n = lshr <4 x i32> %b, <i32 3, i32 3, i32 3, i32 3>
- %1 = add <4 x i32> %vsra_n, %a
- ret <4 x i32> %1
-}
-
-define <2 x i64> @test_vsraq_n_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vsraq_n_u64
-; CHECK: usra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vsra_n = lshr <2 x i64> %b, <i64 3, i64 3>
- %1 = add <2 x i64> %vsra_n, %a
- ret <2 x i64> %1
-}
-
-define <8 x i8> @test_vrshr_n_s8(<8 x i8> %a) {
-; CHECK: test_vrshr_n_s8
-; CHECK: srshr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vrshr_n = tail call <8 x i8> @llvm.aarch64.neon.vsrshr.v8i8(<8 x i8> %a, i32 3)
- ret <8 x i8> %vrshr_n
-}
-
-
-define <4 x i16> @test_vrshr_n_s16(<4 x i16> %a) {
-; CHECK: test_vrshr_n_s16
-; CHECK: srshr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vrshr_n = tail call <4 x i16> @llvm.aarch64.neon.vsrshr.v4i16(<4 x i16> %a, i32 3)
- ret <4 x i16> %vrshr_n
-}
-
-
-define <2 x i32> @test_vrshr_n_s32(<2 x i32> %a) {
-; CHECK: test_vrshr_n_s32
-; CHECK: srshr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vrshr_n = tail call <2 x i32> @llvm.aarch64.neon.vsrshr.v2i32(<2 x i32> %a, i32 3)
- ret <2 x i32> %vrshr_n
-}
-
-
-define <16 x i8> @test_vrshrq_n_s8(<16 x i8> %a) {
-; CHECK: test_vrshrq_n_s8
-; CHECK: srshr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vrshr_n = tail call <16 x i8> @llvm.aarch64.neon.vsrshr.v16i8(<16 x i8> %a, i32 3)
- ret <16 x i8> %vrshr_n
-}
-
-
-define <8 x i16> @test_vrshrq_n_s16(<8 x i16> %a) {
-; CHECK: test_vrshrq_n_s16
-; CHECK: srshr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vrshr_n = tail call <8 x i16> @llvm.aarch64.neon.vsrshr.v8i16(<8 x i16> %a, i32 3)
- ret <8 x i16> %vrshr_n
-}
-
-
-define <4 x i32> @test_vrshrq_n_s32(<4 x i32> %a) {
-; CHECK: test_vrshrq_n_s32
-; CHECK: srshr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vrshr_n = tail call <4 x i32> @llvm.aarch64.neon.vsrshr.v4i32(<4 x i32> %a, i32 3)
- ret <4 x i32> %vrshr_n
-}
-
-
-define <2 x i64> @test_vrshrq_n_s64(<2 x i64> %a) {
-; CHECK: test_vrshrq_n_s64
-; CHECK: srshr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vrshr_n = tail call <2 x i64> @llvm.aarch64.neon.vsrshr.v2i64(<2 x i64> %a, i32 3)
- ret <2 x i64> %vrshr_n
-}
-
-
-define <8 x i8> @test_vrshr_n_u8(<8 x i8> %a) {
-; CHECK: test_vrshr_n_u8
-; CHECK: urshr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vrshr_n = tail call <8 x i8> @llvm.aarch64.neon.vurshr.v8i8(<8 x i8> %a, i32 3)
- ret <8 x i8> %vrshr_n
-}
-
-
-define <4 x i16> @test_vrshr_n_u16(<4 x i16> %a) {
-; CHECK: test_vrshr_n_u16
-; CHECK: urshr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vrshr_n = tail call <4 x i16> @llvm.aarch64.neon.vurshr.v4i16(<4 x i16> %a, i32 3)
- ret <4 x i16> %vrshr_n
-}
-
-
-define <2 x i32> @test_vrshr_n_u32(<2 x i32> %a) {
-; CHECK: test_vrshr_n_u32
-; CHECK: urshr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vrshr_n = tail call <2 x i32> @llvm.aarch64.neon.vurshr.v2i32(<2 x i32> %a, i32 3)
- ret <2 x i32> %vrshr_n
-}
-
-
-define <16 x i8> @test_vrshrq_n_u8(<16 x i8> %a) {
-; CHECK: test_vrshrq_n_u8
-; CHECK: urshr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vrshr_n = tail call <16 x i8> @llvm.aarch64.neon.vurshr.v16i8(<16 x i8> %a, i32 3)
- ret <16 x i8> %vrshr_n
-}
-
-
-define <8 x i16> @test_vrshrq_n_u16(<8 x i16> %a) {
-; CHECK: test_vrshrq_n_u16
-; CHECK: urshr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vrshr_n = tail call <8 x i16> @llvm.aarch64.neon.vurshr.v8i16(<8 x i16> %a, i32 3)
- ret <8 x i16> %vrshr_n
-}
-
-
-define <4 x i32> @test_vrshrq_n_u32(<4 x i32> %a) {
-; CHECK: test_vrshrq_n_u32
-; CHECK: urshr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vrshr_n = tail call <4 x i32> @llvm.aarch64.neon.vurshr.v4i32(<4 x i32> %a, i32 3)
- ret <4 x i32> %vrshr_n
-}
-
-
-define <2 x i64> @test_vrshrq_n_u64(<2 x i64> %a) {
-; CHECK: test_vrshrq_n_u64
-; CHECK: urshr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vrshr_n = tail call <2 x i64> @llvm.aarch64.neon.vurshr.v2i64(<2 x i64> %a, i32 3)
- ret <2 x i64> %vrshr_n
-}
-
-
-define <8 x i8> @test_vrsra_n_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vrsra_n_s8
-; CHECK: srsra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %1 = tail call <8 x i8> @llvm.aarch64.neon.vsrshr.v8i8(<8 x i8> %b, i32 3)
- %vrsra_n = add <8 x i8> %1, %a
- ret <8 x i8> %vrsra_n
-}
-
-define <4 x i16> @test_vrsra_n_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vrsra_n_s16
-; CHECK: srsra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %1 = tail call <4 x i16> @llvm.aarch64.neon.vsrshr.v4i16(<4 x i16> %b, i32 3)
- %vrsra_n = add <4 x i16> %1, %a
- ret <4 x i16> %vrsra_n
-}
-
-define <2 x i32> @test_vrsra_n_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vrsra_n_s32
-; CHECK: srsra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %1 = tail call <2 x i32> @llvm.aarch64.neon.vsrshr.v2i32(<2 x i32> %b, i32 3)
- %vrsra_n = add <2 x i32> %1, %a
- ret <2 x i32> %vrsra_n
-}
-
-define <16 x i8> @test_vrsraq_n_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vrsraq_n_s8
-; CHECK: srsra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %1 = tail call <16 x i8> @llvm.aarch64.neon.vsrshr.v16i8(<16 x i8> %b, i32 3)
- %vrsra_n = add <16 x i8> %1, %a
- ret <16 x i8> %vrsra_n
-}
-
-define <8 x i16> @test_vrsraq_n_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vrsraq_n_s16
-; CHECK: srsra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %1 = tail call <8 x i16> @llvm.aarch64.neon.vsrshr.v8i16(<8 x i16> %b, i32 3)
- %vrsra_n = add <8 x i16> %1, %a
- ret <8 x i16> %vrsra_n
-}
-
-define <4 x i32> @test_vrsraq_n_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vrsraq_n_s32
-; CHECK: srsra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %1 = tail call <4 x i32> @llvm.aarch64.neon.vsrshr.v4i32(<4 x i32> %b, i32 3)
- %vrsra_n = add <4 x i32> %1, %a
- ret <4 x i32> %vrsra_n
-}
-
-define <2 x i64> @test_vrsraq_n_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vrsraq_n_s64
-; CHECK: srsra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %1 = tail call <2 x i64> @llvm.aarch64.neon.vsrshr.v2i64(<2 x i64> %b, i32 3)
- %vrsra_n = add <2 x i64> %1, %a
- ret <2 x i64> %vrsra_n
-}
-
-define <8 x i8> @test_vrsra_n_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vrsra_n_u8
-; CHECK: ursra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %1 = tail call <8 x i8> @llvm.aarch64.neon.vurshr.v8i8(<8 x i8> %b, i32 3)
- %vrsra_n = add <8 x i8> %1, %a
- ret <8 x i8> %vrsra_n
-}
-
-define <4 x i16> @test_vrsra_n_u16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vrsra_n_u16
-; CHECK: ursra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %1 = tail call <4 x i16> @llvm.aarch64.neon.vurshr.v4i16(<4 x i16> %b, i32 3)
- %vrsra_n = add <4 x i16> %1, %a
- ret <4 x i16> %vrsra_n
-}
-
-define <2 x i32> @test_vrsra_n_u32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vrsra_n_u32
-; CHECK: ursra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %1 = tail call <2 x i32> @llvm.aarch64.neon.vurshr.v2i32(<2 x i32> %b, i32 3)
- %vrsra_n = add <2 x i32> %1, %a
- ret <2 x i32> %vrsra_n
-}
-
-define <16 x i8> @test_vrsraq_n_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vrsraq_n_u8
-; CHECK: ursra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %1 = tail call <16 x i8> @llvm.aarch64.neon.vurshr.v16i8(<16 x i8> %b, i32 3)
- %vrsra_n = add <16 x i8> %1, %a
- ret <16 x i8> %vrsra_n
-}
-
-define <8 x i16> @test_vrsraq_n_u16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vrsraq_n_u16
-; CHECK: ursra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %1 = tail call <8 x i16> @llvm.aarch64.neon.vurshr.v8i16(<8 x i16> %b, i32 3)
- %vrsra_n = add <8 x i16> %1, %a
- ret <8 x i16> %vrsra_n
-}
-
-define <4 x i32> @test_vrsraq_n_u32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vrsraq_n_u32
-; CHECK: ursra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %1 = tail call <4 x i32> @llvm.aarch64.neon.vurshr.v4i32(<4 x i32> %b, i32 3)
- %vrsra_n = add <4 x i32> %1, %a
- ret <4 x i32> %vrsra_n
-}
-
-define <2 x i64> @test_vrsraq_n_u64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vrsraq_n_u64
-; CHECK: ursra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %1 = tail call <2 x i64> @llvm.aarch64.neon.vurshr.v2i64(<2 x i64> %b, i32 3)
- %vrsra_n = add <2 x i64> %1, %a
- ret <2 x i64> %vrsra_n
-}
-
-define <8 x i8> @test_vsri_n_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vsri_n_s8
-; CHECK: sri {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vsri_n = tail call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3)
- ret <8 x i8> %vsri_n
-}
-
-
-define <4 x i16> @test_vsri_n_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vsri_n_s16
-; CHECK: sri {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vsri = tail call <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16> %a, <4 x i16> %b, i32 3)
- ret <4 x i16> %vsri
-}
-
-
-define <2 x i32> @test_vsri_n_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vsri_n_s32
-; CHECK: sri {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vsri = tail call <2 x i32> @llvm.aarch64.neon.vsri.v2i32(<2 x i32> %a, <2 x i32> %b, i32 3)
- ret <2 x i32> %vsri
-}
-
-
-define <16 x i8> @test_vsriq_n_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vsriq_n_s8
-; CHECK: sri {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vsri_n = tail call <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3)
- ret <16 x i8> %vsri_n
-}
-
-
-define <8 x i16> @test_vsriq_n_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsriq_n_s16
-; CHECK: sri {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vsri = tail call <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16> %a, <8 x i16> %b, i32 3)
- ret <8 x i16> %vsri
-}
-
-
-define <4 x i32> @test_vsriq_n_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsriq_n_s32
-; CHECK: sri {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vsri = tail call <4 x i32> @llvm.aarch64.neon.vsri.v4i32(<4 x i32> %a, <4 x i32> %b, i32 3)
- ret <4 x i32> %vsri
-}
-
-
-define <2 x i64> @test_vsriq_n_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vsriq_n_s64
-; CHECK: sri {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vsri = tail call <2 x i64> @llvm.aarch64.neon.vsri.v2i64(<2 x i64> %a, <2 x i64> %b, i32 3)
- ret <2 x i64> %vsri
-}
-
-define <8 x i8> @test_vsri_n_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vsri_n_p8
-; CHECK: sri {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vsri_n = tail call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3)
- ret <8 x i8> %vsri_n
-}
-
-define <4 x i16> @test_vsri_n_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vsri_n_p16
-; CHECK: sri {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #15
- %vsri = tail call <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16> %a, <4 x i16> %b, i32 15)
- ret <4 x i16> %vsri
-}
-
-define <16 x i8> @test_vsriq_n_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vsriq_n_p8
-; CHECK: sri {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vsri_n = tail call <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3)
- ret <16 x i8> %vsri_n
-}
-
-define <8 x i16> @test_vsriq_n_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsriq_n_p16
-; CHECK: sri {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #15
- %vsri = tail call <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16> %a, <8 x i16> %b, i32 15)
- ret <8 x i16> %vsri
-}
-
-define <8 x i8> @test_vsli_n_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vsli_n_s8
-; CHECK: sli {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vsli_n = tail call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3)
- ret <8 x i8> %vsli_n
-}
-
-define <4 x i16> @test_vsli_n_s16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vsli_n_s16
-; CHECK: sli {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vsli = tail call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %a, <4 x i16> %b, i32 3)
- ret <4 x i16> %vsli
-}
-
-define <2 x i32> @test_vsli_n_s32(<2 x i32> %a, <2 x i32> %b) {
-; CHECK: test_vsli_n_s32
-; CHECK: sli {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vsli = tail call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> %a, <2 x i32> %b, i32 3)
- ret <2 x i32> %vsli
-}
-
-define <16 x i8> @test_vsliq_n_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vsliq_n_s8
-; CHECK: sli {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vsli_n = tail call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3)
- ret <16 x i8> %vsli_n
-}
-
-define <8 x i16> @test_vsliq_n_s16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsliq_n_s16
-; CHECK: sli {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vsli = tail call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %a, <8 x i16> %b, i32 3)
- ret <8 x i16> %vsli
-}
-
-define <4 x i32> @test_vsliq_n_s32(<4 x i32> %a, <4 x i32> %b) {
-; CHECK: test_vsliq_n_s32
-; CHECK: sli {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vsli = tail call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> %a, <4 x i32> %b, i32 3)
- ret <4 x i32> %vsli
-}
-
-define <2 x i64> @test_vsliq_n_s64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK: test_vsliq_n_s64
-; CHECK: sli {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vsli = tail call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> %a, <2 x i64> %b, i32 3)
- ret <2 x i64> %vsli
-}
-
-define <8 x i8> @test_vsli_n_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vsli_n_p8
-; CHECK: sli {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vsli_n = tail call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3)
- ret <8 x i8> %vsli_n
-}
-
-define <4 x i16> @test_vsli_n_p16(<4 x i16> %a, <4 x i16> %b) {
-; CHECK: test_vsli_n_p16
-; CHECK: sli {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #15
- %vsli = tail call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %a, <4 x i16> %b, i32 15)
- ret <4 x i16> %vsli
-}
-
-define <16 x i8> @test_vsliq_n_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vsliq_n_p8
-; CHECK: sli {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vsli_n = tail call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3)
- ret <16 x i8> %vsli_n
-}
-
-define <8 x i16> @test_vsliq_n_p16(<8 x i16> %a, <8 x i16> %b) {
-; CHECK: test_vsliq_n_p16
-; CHECK: sli {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #15
- %vsli = tail call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %a, <8 x i16> %b, i32 15)
- ret <8 x i16> %vsli
-}
-
-define <8 x i8> @test_vqshl_n_s8(<8 x i8> %a) {
-; CHECK: test_vqshl_n_s8
-; CHECK: sqshl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vqshl = tail call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %a, <8 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
- ret <8 x i8> %vqshl
-}
-
-
-define <4 x i16> @test_vqshl_n_s16(<4 x i16> %a) {
-; CHECK: test_vqshl_n_s16
-; CHECK: sqshl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vqshl = tail call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %a, <4 x i16> <i16 3, i16 3, i16 3, i16 3>)
- ret <4 x i16> %vqshl
-}
-
-
-define <2 x i32> @test_vqshl_n_s32(<2 x i32> %a) {
-; CHECK: test_vqshl_n_s32
-; CHECK: sqshl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vqshl = tail call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %a, <2 x i32> <i32 3, i32 3>)
- ret <2 x i32> %vqshl
-}
-
-
-define <16 x i8> @test_vqshlq_n_s8(<16 x i8> %a) {
-; CHECK: test_vqshlq_n_s8
-; CHECK: sqshl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vqshl_n = tail call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %a, <16 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
- ret <16 x i8> %vqshl_n
-}
-
-
-define <8 x i16> @test_vqshlq_n_s16(<8 x i16> %a) {
-; CHECK: test_vqshlq_n_s16
-; CHECK: sqshl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vqshl = tail call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %a, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
- ret <8 x i16> %vqshl
-}
-
-
-define <4 x i32> @test_vqshlq_n_s32(<4 x i32> %a) {
-; CHECK: test_vqshlq_n_s32
-; CHECK: sqshl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vqshl = tail call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %a, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
- ret <4 x i32> %vqshl
-}
-
-
-define <2 x i64> @test_vqshlq_n_s64(<2 x i64> %a) {
-; CHECK: test_vqshlq_n_s64
-; CHECK: sqshl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vqshl = tail call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %a, <2 x i64> <i64 3, i64 3>)
- ret <2 x i64> %vqshl
-}
-
-
-define <8 x i8> @test_vqshl_n_u8(<8 x i8> %a) {
-; CHECK: test_vqshl_n_u8
-; CHECK: uqshl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vqshl_n = tail call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %a, <8 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
- ret <8 x i8> %vqshl_n
-}
-
-
-define <4 x i16> @test_vqshl_n_u16(<4 x i16> %a) {
-; CHECK: test_vqshl_n_u16
-; CHECK: uqshl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vqshl = tail call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %a, <4 x i16> <i16 3, i16 3, i16 3, i16 3>)
- ret <4 x i16> %vqshl
-}
-
-
-define <2 x i32> @test_vqshl_n_u32(<2 x i32> %a) {
-; CHECK: test_vqshl_n_u32
-; CHECK: uqshl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vqshl = tail call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %a, <2 x i32> <i32 3, i32 3>)
- ret <2 x i32> %vqshl
-}
-
-
-define <16 x i8> @test_vqshlq_n_u8(<16 x i8> %a) {
-; CHECK: test_vqshlq_n_u8
-; CHECK: uqshl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vqshl_n = tail call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %a, <16 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
- ret <16 x i8> %vqshl_n
-}
-
-
-define <8 x i16> @test_vqshlq_n_u16(<8 x i16> %a) {
-; CHECK: test_vqshlq_n_u16
-; CHECK: uqshl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vqshl = tail call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %a, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
- ret <8 x i16> %vqshl
-}
-
-
-define <4 x i32> @test_vqshlq_n_u32(<4 x i32> %a) {
-; CHECK: test_vqshlq_n_u32
-; CHECK: uqshl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vqshl = tail call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %a, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
- ret <4 x i32> %vqshl
-}
-
-
-define <2 x i64> @test_vqshlq_n_u64(<2 x i64> %a) {
-; CHECK: test_vqshlq_n_u64
-; CHECK: uqshl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vqshl = tail call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %a, <2 x i64> <i64 3, i64 3>)
- ret <2 x i64> %vqshl
-}
-
-define <8 x i8> @test_vqshlu_n_s8(<8 x i8> %a) {
-; CHECK: test_vqshlu_n_s8
-; CHECK: sqshlu {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
- %vqshlu = tail call <8 x i8> @llvm.aarch64.neon.vsqshlu.v8i8(<8 x i8> %a, i32 3)
- ret <8 x i8> %vqshlu
-}
-
-
-define <4 x i16> @test_vqshlu_n_s16(<4 x i16> %a) {
-; CHECK: test_vqshlu_n_s16
-; CHECK: sqshlu {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
- %vqshlu = tail call <4 x i16> @llvm.aarch64.neon.vsqshlu.v4i16(<4 x i16> %a, i32 3)
- ret <4 x i16> %vqshlu
-}
-
-
-define <2 x i32> @test_vqshlu_n_s32(<2 x i32> %a) {
-; CHECK: test_vqshlu_n_s32
-; CHECK: sqshlu {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
- %vqshlu = tail call <2 x i32> @llvm.aarch64.neon.vsqshlu.v2i32(<2 x i32> %a, i32 3)
- ret <2 x i32> %vqshlu
-}
-
-
-define <16 x i8> @test_vqshluq_n_s8(<16 x i8> %a) {
-; CHECK: test_vqshluq_n_s8
-; CHECK: sqshlu {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
- %vqshlu = tail call <16 x i8> @llvm.aarch64.neon.vsqshlu.v16i8(<16 x i8> %a, i32 3)
- ret <16 x i8> %vqshlu
-}
-
-
-define <8 x i16> @test_vqshluq_n_s16(<8 x i16> %a) {
-; CHECK: test_vqshluq_n_s16
-; CHECK: sqshlu {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
- %vqshlu = tail call <8 x i16> @llvm.aarch64.neon.vsqshlu.v8i16(<8 x i16> %a, i32 3)
- ret <8 x i16> %vqshlu
-}
-
-
-define <4 x i32> @test_vqshluq_n_s32(<4 x i32> %a) {
-; CHECK: test_vqshluq_n_s32
-; CHECK: sqshlu {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
- %vqshlu = tail call <4 x i32> @llvm.aarch64.neon.vsqshlu.v4i32(<4 x i32> %a, i32 3)
- ret <4 x i32> %vqshlu
-}
-
-
-define <2 x i64> @test_vqshluq_n_s64(<2 x i64> %a) {
-; CHECK: test_vqshluq_n_s64
-; CHECK: sqshlu {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
- %vqshlu = tail call <2 x i64> @llvm.aarch64.neon.vsqshlu.v2i64(<2 x i64> %a, i32 3)
- ret <2 x i64> %vqshlu
-}
-
-
-define <8 x i8> @test_vshrn_n_s16(<8 x i16> %a) {
-; CHECK: test_vshrn_n_s16
-; CHECK: shrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %1 = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
- ret <8 x i8> %vshrn_n
-}
-
-define <4 x i16> @test_vshrn_n_s32(<4 x i32> %a) {
-; CHECK: test_vshrn_n_s32
-; CHECK: shrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %1 = ashr <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
- %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
- ret <4 x i16> %vshrn_n
-}
-
-define <2 x i32> @test_vshrn_n_s64(<2 x i64> %a) {
-; CHECK: test_vshrn_n_s64
-; CHECK: shrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %1 = ashr <2 x i64> %a, <i64 19, i64 19>
- %vshrn_n = trunc <2 x i64> %1 to <2 x i32>
- ret <2 x i32> %vshrn_n
-}
-
-define <8 x i8> @test_vshrn_n_u16(<8 x i16> %a) {
-; CHECK: test_vshrn_n_u16
-; CHECK: shrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %1 = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
- ret <8 x i8> %vshrn_n
-}
-
-define <4 x i16> @test_vshrn_n_u32(<4 x i32> %a) {
-; CHECK: test_vshrn_n_u32
-; CHECK: shrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %1 = lshr <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
- %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
- ret <4 x i16> %vshrn_n
-}
-
-define <2 x i32> @test_vshrn_n_u64(<2 x i64> %a) {
-; CHECK: test_vshrn_n_u64
-; CHECK: shrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %1 = lshr <2 x i64> %a, <i64 19, i64 19>
- %vshrn_n = trunc <2 x i64> %1 to <2 x i32>
- ret <2 x i32> %vshrn_n
-}
-
-define <16 x i8> @test_vshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vshrn_high_n_s16
-; CHECK: shrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %1 = ashr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
- %2 = bitcast <8 x i8> %a to <1 x i64>
- %3 = bitcast <8 x i8> %vshrn_n to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
- %4 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %4
-}
-
-define <8 x i16> @test_vshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vshrn_high_n_s32
-; CHECK: shrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %1 = ashr <4 x i32> %b, <i32 9, i32 9, i32 9, i32 9>
- %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
- %2 = bitcast <4 x i16> %a to <1 x i64>
- %3 = bitcast <4 x i16> %vshrn_n to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
- %4 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %4
-}
-
-define <4 x i32> @test_vshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vshrn_high_n_s64
-; CHECK: shrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %2 = ashr <2 x i64> %b, <i64 19, i64 19>
- %vshrn_n = trunc <2 x i64> %2 to <2 x i32>
- %3 = bitcast <2 x i32> %vshrn_n to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
- %4 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %4
-}
-
-define <16 x i8> @test_vshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vshrn_high_n_u16
-; CHECK: shrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %1 = lshr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
- %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
- %2 = bitcast <8 x i8> %a to <1 x i64>
- %3 = bitcast <8 x i8> %vshrn_n to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
- %4 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %4
-}
-
-define <8 x i16> @test_vshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vshrn_high_n_u32
-; CHECK: shrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %1 = lshr <4 x i32> %b, <i32 9, i32 9, i32 9, i32 9>
- %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
- %2 = bitcast <4 x i16> %a to <1 x i64>
- %3 = bitcast <4 x i16> %vshrn_n to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
- %4 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %4
-}
-
-define <4 x i32> @test_vshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vshrn_high_n_u64
-; CHECK: shrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %2 = lshr <2 x i64> %b, <i64 19, i64 19>
- %vshrn_n = trunc <2 x i64> %2 to <2 x i32>
- %3 = bitcast <2 x i32> %vshrn_n to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
- %4 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %4
-}
-
-define <8 x i8> @test_vqshrun_n_s16(<8 x i16> %a) {
-; CHECK: test_vqshrun_n_s16
-; CHECK: sqshrun {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %vqshrun = tail call <8 x i8> @llvm.aarch64.neon.vsqshrun.v8i8(<8 x i16> %a, i32 3)
- ret <8 x i8> %vqshrun
-}
-
-
-define <4 x i16> @test_vqshrun_n_s32(<4 x i32> %a) {
-; CHECK: test_vqshrun_n_s32
-; CHECK: sqshrun {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %vqshrun = tail call <4 x i16> @llvm.aarch64.neon.vsqshrun.v4i16(<4 x i32> %a, i32 9)
- ret <4 x i16> %vqshrun
-}
-
-define <2 x i32> @test_vqshrun_n_s64(<2 x i64> %a) {
-; CHECK: test_vqshrun_n_s64
-; CHECK: sqshrun {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %vqshrun = tail call <2 x i32> @llvm.aarch64.neon.vsqshrun.v2i32(<2 x i64> %a, i32 19)
- ret <2 x i32> %vqshrun
-}
-
-define <16 x i8> @test_vqshrun_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vqshrun_high_n_s16
-; CHECK: sqshrun2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %vqshrun = tail call <8 x i8> @llvm.aarch64.neon.vsqshrun.v8i8(<8 x i16> %b, i32 3)
- %1 = bitcast <8 x i8> %a to <1 x i64>
- %2 = bitcast <8 x i8> %vqshrun to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %3
-}
-
-define <8 x i16> @test_vqshrun_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vqshrun_high_n_s32
-; CHECK: sqshrun2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %vqshrun = tail call <4 x i16> @llvm.aarch64.neon.vsqshrun.v4i16(<4 x i32> %b, i32 9)
- %1 = bitcast <4 x i16> %a to <1 x i64>
- %2 = bitcast <4 x i16> %vqshrun to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %3
-}
-
-define <4 x i32> @test_vqshrun_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vqshrun_high_n_s64
-; CHECK: sqshrun2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %vqshrun = tail call <2 x i32> @llvm.aarch64.neon.vsqshrun.v2i32(<2 x i64> %b, i32 19)
- %2 = bitcast <2 x i32> %vqshrun to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <8 x i8> @test_vrshrn_n_s16(<8 x i16> %a) {
-; CHECK: test_vrshrn_n_s16
-; CHECK: rshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %vrshrn = tail call <8 x i8> @llvm.aarch64.neon.vrshrn.v8i8(<8 x i16> %a, i32 3)
- ret <8 x i8> %vrshrn
-}
-
-
-define <4 x i16> @test_vrshrn_n_s32(<4 x i32> %a) {
-; CHECK: test_vrshrn_n_s32
-; CHECK: rshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %vrshrn = tail call <4 x i16> @llvm.aarch64.neon.vrshrn.v4i16(<4 x i32> %a, i32 9)
- ret <4 x i16> %vrshrn
-}
-
-
-define <2 x i32> @test_vrshrn_n_s64(<2 x i64> %a) {
-; CHECK: test_vrshrn_n_s64
-; CHECK: rshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %vrshrn = tail call <2 x i32> @llvm.aarch64.neon.vrshrn.v2i32(<2 x i64> %a, i32 19)
- ret <2 x i32> %vrshrn
-}
-
-define <16 x i8> @test_vrshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vrshrn_high_n_s16
-; CHECK: rshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %vrshrn = tail call <8 x i8> @llvm.aarch64.neon.vrshrn.v8i8(<8 x i16> %b, i32 3)
- %1 = bitcast <8 x i8> %a to <1 x i64>
- %2 = bitcast <8 x i8> %vrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %3
-}
-
-define <8 x i16> @test_vrshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vrshrn_high_n_s32
-; CHECK: rshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %vrshrn = tail call <4 x i16> @llvm.aarch64.neon.vrshrn.v4i16(<4 x i32> %b, i32 9)
- %1 = bitcast <4 x i16> %a to <1 x i64>
- %2 = bitcast <4 x i16> %vrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %3
-}
-
-define <4 x i32> @test_vrshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vrshrn_high_n_s64
-; CHECK: rshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %vrshrn = tail call <2 x i32> @llvm.aarch64.neon.vrshrn.v2i32(<2 x i64> %b, i32 19)
- %2 = bitcast <2 x i32> %vrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <8 x i8> @test_vqrshrun_n_s16(<8 x i16> %a) {
-; CHECK: test_vqrshrun_n_s16
-; CHECK: sqrshrun {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %vqrshrun = tail call <8 x i8> @llvm.aarch64.neon.vsqrshrun.v8i8(<8 x i16> %a, i32 3)
- ret <8 x i8> %vqrshrun
-}
-
-define <4 x i16> @test_vqrshrun_n_s32(<4 x i32> %a) {
-; CHECK: test_vqrshrun_n_s32
-; CHECK: sqrshrun {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %vqrshrun = tail call <4 x i16> @llvm.aarch64.neon.vsqrshrun.v4i16(<4 x i32> %a, i32 9)
- ret <4 x i16> %vqrshrun
-}
-
-define <2 x i32> @test_vqrshrun_n_s64(<2 x i64> %a) {
-; CHECK: test_vqrshrun_n_s64
-; CHECK: sqrshrun {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %vqrshrun = tail call <2 x i32> @llvm.aarch64.neon.vsqrshrun.v2i32(<2 x i64> %a, i32 19)
- ret <2 x i32> %vqrshrun
-}
-
-define <16 x i8> @test_vqrshrun_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vqrshrun_high_n_s16
-; CHECK: sqrshrun2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %vqrshrun = tail call <8 x i8> @llvm.aarch64.neon.vsqrshrun.v8i8(<8 x i16> %b, i32 3)
- %1 = bitcast <8 x i8> %a to <1 x i64>
- %2 = bitcast <8 x i8> %vqrshrun to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %3
-}
-
-define <8 x i16> @test_vqrshrun_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vqrshrun_high_n_s32
-; CHECK: sqrshrun2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %vqrshrun = tail call <4 x i16> @llvm.aarch64.neon.vsqrshrun.v4i16(<4 x i32> %b, i32 9)
- %1 = bitcast <4 x i16> %a to <1 x i64>
- %2 = bitcast <4 x i16> %vqrshrun to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %3
-}
-
-define <4 x i32> @test_vqrshrun_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vqrshrun_high_n_s64
-; CHECK: sqrshrun2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %vqrshrun = tail call <2 x i32> @llvm.aarch64.neon.vsqrshrun.v2i32(<2 x i64> %b, i32 19)
- %2 = bitcast <2 x i32> %vqrshrun to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <8 x i8> @test_vqshrn_n_s16(<8 x i16> %a) {
-; CHECK: test_vqshrn_n_s16
-; CHECK: sqshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.vsqshrn.v8i8(<8 x i16> %a, i32 3)
- ret <8 x i8> %vqshrn
-}
-
-
-define <4 x i16> @test_vqshrn_n_s32(<4 x i32> %a) {
-; CHECK: test_vqshrn_n_s32
-; CHECK: sqshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.vsqshrn.v4i16(<4 x i32> %a, i32 9)
- ret <4 x i16> %vqshrn
-}
-
-
-define <2 x i32> @test_vqshrn_n_s64(<2 x i64> %a) {
-; CHECK: test_vqshrn_n_s64
-; CHECK: sqshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.vsqshrn.v2i32(<2 x i64> %a, i32 19)
- ret <2 x i32> %vqshrn
-}
-
-
-define <8 x i8> @test_vqshrn_n_u16(<8 x i16> %a) {
-; CHECK: test_vqshrn_n_u16
-; CHECK: uqshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.vuqshrn.v8i8(<8 x i16> %a, i32 3)
- ret <8 x i8> %vqshrn
-}
-
-
-define <4 x i16> @test_vqshrn_n_u32(<4 x i32> %a) {
-; CHECK: test_vqshrn_n_u32
-; CHECK: uqshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.vuqshrn.v4i16(<4 x i32> %a, i32 9)
- ret <4 x i16> %vqshrn
-}
-
-
-define <2 x i32> @test_vqshrn_n_u64(<2 x i64> %a) {
-; CHECK: test_vqshrn_n_u64
-; CHECK: uqshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.vuqshrn.v2i32(<2 x i64> %a, i32 19)
- ret <2 x i32> %vqshrn
-}
-
-
-define <16 x i8> @test_vqshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vqshrn_high_n_s16
-; CHECK: sqshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.vsqshrn.v8i8(<8 x i16> %b, i32 3)
- %1 = bitcast <8 x i8> %a to <1 x i64>
- %2 = bitcast <8 x i8> %vqshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %3
-}
-
-define <8 x i16> @test_vqshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vqshrn_high_n_s32
-; CHECK: sqshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.vsqshrn.v4i16(<4 x i32> %b, i32 9)
- %1 = bitcast <4 x i16> %a to <1 x i64>
- %2 = bitcast <4 x i16> %vqshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %3
-}
-
-define <4 x i32> @test_vqshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vqshrn_high_n_s64
-; CHECK: sqshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.vsqshrn.v2i32(<2 x i64> %b, i32 19)
- %2 = bitcast <2 x i32> %vqshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <16 x i8> @test_vqshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vqshrn_high_n_u16
-; CHECK: uqshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.vuqshrn.v8i8(<8 x i16> %b, i32 3)
- %1 = bitcast <8 x i8> %a to <1 x i64>
- %2 = bitcast <8 x i8> %vqshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %3
-}
-
-define <8 x i16> @test_vqshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vqshrn_high_n_u32
-; CHECK: uqshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.vuqshrn.v4i16(<4 x i32> %b, i32 9)
- %1 = bitcast <4 x i16> %a to <1 x i64>
- %2 = bitcast <4 x i16> %vqshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %3
-}
-
-define <4 x i32> @test_vqshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vqshrn_high_n_u64
-; CHECK: uqshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.vuqshrn.v2i32(<2 x i64> %b, i32 19)
- %2 = bitcast <2 x i32> %vqshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <8 x i8> @test_vqrshrn_n_s16(<8 x i16> %a) {
-; CHECK: test_vqrshrn_n_s16
-; CHECK: sqrshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.vsqrshrn.v8i8(<8 x i16> %a, i32 3)
- ret <8 x i8> %vqrshrn
-}
-
-
-define <4 x i16> @test_vqrshrn_n_s32(<4 x i32> %a) {
-; CHECK: test_vqrshrn_n_s32
-; CHECK: sqrshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.vsqrshrn.v4i16(<4 x i32> %a, i32 9)
- ret <4 x i16> %vqrshrn
-}
-
-
-define <2 x i32> @test_vqrshrn_n_s64(<2 x i64> %a) {
-; CHECK: test_vqrshrn_n_s64
-; CHECK: sqrshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.vsqrshrn.v2i32(<2 x i64> %a, i32 19)
- ret <2 x i32> %vqrshrn
-}
-
-
-define <8 x i8> @test_vqrshrn_n_u16(<8 x i16> %a) {
-; CHECK: test_vqrshrn_n_u16
-; CHECK: uqrshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
- %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.vuqrshrn.v8i8(<8 x i16> %a, i32 3)
- ret <8 x i8> %vqrshrn
-}
-
-
-define <4 x i16> @test_vqrshrn_n_u32(<4 x i32> %a) {
-; CHECK: test_vqrshrn_n_u32
-; CHECK: uqrshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
- %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.vuqrshrn.v4i16(<4 x i32> %a, i32 9)
- ret <4 x i16> %vqrshrn
-}
-
-
-define <2 x i32> @test_vqrshrn_n_u64(<2 x i64> %a) {
-; CHECK: test_vqrshrn_n_u64
-; CHECK: uqrshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
- %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.vuqrshrn.v2i32(<2 x i64> %a, i32 19)
- ret <2 x i32> %vqrshrn
-}
-
-
-define <16 x i8> @test_vqrshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vqrshrn_high_n_s16
-; CHECK: sqrshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.vsqrshrn.v8i8(<8 x i16> %b, i32 3)
- %1 = bitcast <8 x i8> %a to <1 x i64>
- %2 = bitcast <8 x i8> %vqrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %3
-}
-
-define <8 x i16> @test_vqrshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vqrshrn_high_n_s32
-; CHECK: sqrshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.vsqrshrn.v4i16(<4 x i32> %b, i32 9)
- %1 = bitcast <4 x i16> %a to <1 x i64>
- %2 = bitcast <4 x i16> %vqrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %3
-}
-
-define <4 x i32> @test_vqrshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vqrshrn_high_n_s64
-; CHECK: sqrshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.vsqrshrn.v2i32(<2 x i64> %b, i32 19)
- %2 = bitcast <2 x i32> %vqrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <16 x i8> @test_vqrshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
-; CHECK: test_vqrshrn_high_n_u16
-; CHECK: uqrshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
- %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.vuqrshrn.v8i8(<8 x i16> %b, i32 3)
- %1 = bitcast <8 x i8> %a to <1 x i64>
- %2 = bitcast <8 x i8> %vqrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
- ret <16 x i8> %3
-}
-
-define <8 x i16> @test_vqrshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
-; CHECK: test_vqrshrn_high_n_u32
-; CHECK: uqrshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
- %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.vuqrshrn.v4i16(<4 x i32> %b, i32 9)
- %1 = bitcast <4 x i16> %a to <1 x i64>
- %2 = bitcast <4 x i16> %vqrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
- ret <8 x i16> %3
-}
-
-define <4 x i32> @test_vqrshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
-; CHECK: test_vqrshrn_high_n_u64
-; CHECK: uqrshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
- %1 = bitcast <2 x i32> %a to <1 x i64>
- %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.vuqrshrn.v2i32(<2 x i64> %b, i32 19)
- %2 = bitcast <2 x i32> %vqrshrn to <1 x i64>
- %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
- %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
- ret <4 x i32> %3
-}
-
-define <2 x float> @test_vcvt_n_f32_s32(<2 x i32> %a) {
-; CHECK: test_vcvt_n_f32_s32
-; CHECK: scvtf {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #31
- %vcvt = tail call <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %a, i32 31)
- ret <2 x float> %vcvt
-}
-
-define <4 x float> @test_vcvtq_n_f32_s32(<4 x i32> %a) {
-; CHECK: test_vcvtq_n_f32_s32
-; CHECK: scvtf {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #31
- %vcvt = tail call <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %a, i32 31)
- ret <4 x float> %vcvt
-}
-
-define <2 x double> @test_vcvtq_n_f64_s64(<2 x i64> %a) {
-; CHECK: test_vcvtq_n_f64_s64
-; CHECK: scvtf {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #50
- %vcvt = tail call <2 x double> @llvm.arm.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 50)
- ret <2 x double> %vcvt
-}
-
-define <2 x float> @test_vcvt_n_f32_u32(<2 x i32> %a) {
-; CHECK: test_vcvt_n_f32_u32
-; CHECK: ucvtf {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #31
- %vcvt = tail call <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %a, i32 31)
- ret <2 x float> %vcvt
-}
-
-define <4 x float> @test_vcvtq_n_f32_u32(<4 x i32> %a) {
-; CHECK: test_vcvtq_n_f32_u32
-; CHECK: ucvtf {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #31
- %vcvt = tail call <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %a, i32 31)
- ret <4 x float> %vcvt
-}
-
-define <2 x double> @test_vcvtq_n_f64_u64(<2 x i64> %a) {
-; CHECK: test_vcvtq_n_f64_u64
-; CHECK: ucvtf {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #50
- %vcvt = tail call <2 x double> @llvm.arm.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 50)
- ret <2 x double> %vcvt
-}
-
-define <2 x i32> @test_vcvt_n_s32_f32(<2 x float> %a) {
-; CHECK: test_vcvt_n_s32_f32
-; CHECK: fcvtzs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #31
- %vcvt = tail call <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float> %a, i32 31)
- ret <2 x i32> %vcvt
-}
-
-define <4 x i32> @test_vcvtq_n_s32_f32(<4 x float> %a) {
-; CHECK: test_vcvtq_n_s32_f32
-; CHECK: fcvtzs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #31
- %vcvt = tail call <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> %a, i32 31)
- ret <4 x i32> %vcvt
-}
-
-define <2 x i64> @test_vcvtq_n_s64_f64(<2 x double> %a) {
-; CHECK: test_vcvtq_n_s64_f64
-; CHECK: fcvtzs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #50
- %vcvt = tail call <2 x i64> @llvm.arm.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double> %a, i32 50)
- ret <2 x i64> %vcvt
-}
-
-define <2 x i32> @test_vcvt_n_u32_f32(<2 x float> %a) {
-; CHECK: test_vcvt_n_u32_f32
-; CHECK: fcvtzu {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #31
- %vcvt = tail call <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float> %a, i32 31)
- ret <2 x i32> %vcvt
-}
-
-define <4 x i32> @test_vcvtq_n_u32_f32(<4 x float> %a) {
-; CHECK: test_vcvt_n_u32_f32
-; CHECK: fcvtzu {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #31
- %vcvt = tail call <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> %a, i32 31)
- ret <4 x i32> %vcvt
-}
-
-define <2 x i64> @test_vcvtq_n_u64_f64(<2 x double> %a) {
-; CHECK: test_vcvtq_n_u64_f64
-; CHECK: fcvtzu {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #50
- %vcvt = tail call <2 x i64> @llvm.arm.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double> %a, i32 50)
- ret <2 x i64> %vcvt
-}
-
-declare <8 x i8> @llvm.aarch64.neon.vsrshr.v8i8(<8 x i8>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vsrshr.v4i16(<4 x i16>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vsrshr.v2i32(<2 x i32>, i32)
-
-declare <16 x i8> @llvm.aarch64.neon.vsrshr.v16i8(<16 x i8>, i32)
-
-declare <8 x i16> @llvm.aarch64.neon.vsrshr.v8i16(<8 x i16>, i32)
-
-declare <4 x i32> @llvm.aarch64.neon.vsrshr.v4i32(<4 x i32>, i32)
-
-declare <2 x i64> @llvm.aarch64.neon.vsrshr.v2i64(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vurshr.v8i8(<8 x i8>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vurshr.v4i16(<4 x i16>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vurshr.v2i32(<2 x i32>, i32)
-
-declare <16 x i8> @llvm.aarch64.neon.vurshr.v16i8(<16 x i8>, i32)
-
-declare <8 x i16> @llvm.aarch64.neon.vurshr.v8i16(<8 x i16>, i32)
-
-declare <4 x i32> @llvm.aarch64.neon.vurshr.v4i32(<4 x i32>, i32)
-
-declare <2 x i64> @llvm.aarch64.neon.vurshr.v2i64(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8>, <8 x i8>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16>, <4 x i16>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vsri.v2i32(<2 x i32>, <2 x i32>, i32)
-
-declare <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8>, <16 x i8>, i32)
-
-declare <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16>, <8 x i16>, i32)
-
-declare <4 x i32> @llvm.aarch64.neon.vsri.v4i32(<4 x i32>, <4 x i32>, i32)
-
-declare <2 x i64> @llvm.aarch64.neon.vsri.v2i64(<2 x i64>, <2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8>, <8 x i8>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16>, <4 x i16>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32>, <2 x i32>, i32)
-
-declare <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8>, <16 x i8>, i32)
-
-declare <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16>, <8 x i16>, i32)
-
-declare <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32>, <4 x i32>, i32)
-
-declare <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64>, <2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vsqshlu.v8i8(<8 x i8>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vsqshlu.v4i16(<4 x i16>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vsqshlu.v2i32(<2 x i32>, i32)
-
-declare <16 x i8> @llvm.aarch64.neon.vsqshlu.v16i8(<16 x i8>, i32)
-
-declare <8 x i16> @llvm.aarch64.neon.vsqshlu.v8i16(<8 x i16>, i32)
-
-declare <4 x i32> @llvm.aarch64.neon.vsqshlu.v4i32(<4 x i32>, i32)
-
-declare <2 x i64> @llvm.aarch64.neon.vsqshlu.v2i64(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8>, <8 x i8>)
-
-declare <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16>, <4 x i16>)
-
-declare <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32>, <2 x i32>)
-
-declare <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8>, <16 x i8>)
-
-declare <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16>, <8 x i16>)
-
-declare <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>)
-
-declare <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8>, <8 x i8>)
-
-declare <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16>, <4 x i16>)
-
-declare <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32>, <2 x i32>)
-
-declare <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8>, <16 x i8>)
-
-declare <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16>, <8 x i16>)
-
-declare <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32>, <4 x i32>)
-
-declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>)
-
-declare <8 x i8> @llvm.aarch64.neon.vsqshrun.v8i8(<8 x i16>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vsqshrun.v4i16(<4 x i32>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vsqshrun.v2i32(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vrshrn.v8i8(<8 x i16>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vrshrn.v4i16(<4 x i32>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vrshrn.v2i32(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vsqrshrun.v8i8(<8 x i16>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vsqrshrun.v4i16(<4 x i32>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vsqrshrun.v2i32(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vsqshrn.v8i8(<8 x i16>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vsqshrn.v4i16(<4 x i32>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vsqshrn.v2i32(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vuqshrn.v8i8(<8 x i16>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vuqshrn.v4i16(<4 x i32>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vuqshrn.v2i32(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vsqrshrn.v8i8(<8 x i16>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vsqrshrn.v4i16(<4 x i32>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vsqrshrn.v2i32(<2 x i64>, i32)
-
-declare <8 x i8> @llvm.aarch64.neon.vuqrshrn.v8i8(<8 x i16>, i32)
-
-declare <4 x i16> @llvm.aarch64.neon.vuqrshrn.v4i16(<4 x i32>, i32)
-
-declare <2 x i32> @llvm.aarch64.neon.vuqrshrn.v2i32(<2 x i64>, i32)
-
-declare <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32)
-
-declare <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32)
-
-declare <2 x double> @llvm.arm.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32)
-
-declare <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32)
-
-declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32)
-
-declare <2 x double> @llvm.arm.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32)
-
-declare <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float>, i32)
-
-declare <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float>, i32)
-
-declare <2 x i64> @llvm.arm.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double>, i32)
-
-declare <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float>, i32)
-
-declare <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32)
-
-declare <2 x i64> @llvm.arm.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double>, i32)
-
-define <1 x i64> @test_vcvt_n_s64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvt_n_s64_f64
-; CHECK: fcvtzs d{{[0-9]+}}, d{{[0-9]+}}, #64
- %1 = tail call <1 x i64> @llvm.arm.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double> %a, i32 64)
- ret <1 x i64> %1
-}
-
-define <1 x i64> @test_vcvt_n_u64_f64(<1 x double> %a) {
-; CHECK-LABEL: test_vcvt_n_u64_f64
-; CHECK: fcvtzu d{{[0-9]+}}, d{{[0-9]+}}, #64
- %1 = tail call <1 x i64> @llvm.arm.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double> %a, i32 64)
- ret <1 x i64> %1
-}
-
-define <1 x double> @test_vcvt_n_f64_s64(<1 x i64> %a) {
-; CHECK-LABEL: test_vcvt_n_f64_s64
-; CHECK: scvtf d{{[0-9]+}}, d{{[0-9]+}}, #64
- %1 = tail call <1 x double> @llvm.arm.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64> %a, i32 64)
- ret <1 x double> %1
-}
-
-define <1 x double> @test_vcvt_n_f64_u64(<1 x i64> %a) {
-; CHECK-LABEL: test_vcvt_n_f64_u64
-; CHECK: ucvtf d{{[0-9]+}}, d{{[0-9]+}}, #64
- %1 = tail call <1 x double> @llvm.arm.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64> %a, i32 64)
- ret <1 x double> %1
-}
-
-declare <1 x i64> @llvm.arm.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double>, i32)
-declare <1 x i64> @llvm.arm.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double>, i32)
-declare <1 x double> @llvm.arm.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64>, i32)
-declare <1 x double> @llvm.arm.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64>, i32) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-simd-tbl.ll b/test/CodeGen/AArch64/neon-simd-tbl.ll
deleted file mode 100644
index 8eac1e88c4a5..000000000000
--- a/test/CodeGen/AArch64/neon-simd-tbl.ll
+++ /dev/null
@@ -1,828 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-declare <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
-
-declare <16 x i8> @llvm.aarch64.neon.vtbx3.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
-
-declare <16 x i8> @llvm.aarch64.neon.vtbx2.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
-
-declare <16 x i8> @llvm.aarch64.neon.vtbx1.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
-
-declare <8 x i8> @llvm.aarch64.neon.vtbx4.v8i8.v16i8(<8 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.aarch64.neon.vtbx3.v8i8.v16i8(<8 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8>, <16 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8>, <16 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8>, <8 x i8>)
-
-declare <16 x i8> @llvm.aarch64.neon.vtbl4.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
-
-declare <16 x i8> @llvm.aarch64.neon.vtbl3.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
-
-declare <16 x i8> @llvm.aarch64.neon.vtbl2.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
-
-declare <16 x i8> @llvm.aarch64.neon.vtbl1.v16i8.v16i8(<16 x i8>, <16 x i8>)
-
-declare <8 x i8> @llvm.aarch64.neon.vtbl4.v8i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.aarch64.neon.vtbl3.v8i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
-
-define <8 x i8> @test_vtbl1_s8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtbl1_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
- ret <8 x i8> %vtbl11.i
-}
-
-define <8 x i8> @test_vqtbl1_s8(<16 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vqtbl1_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %a, <8 x i8> %b)
- ret <8 x i8> %vtbl1.i
-}
-
-define <8 x i8> @test_vtbl2_s8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl2_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 1
- %vtbl1.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
- ret <8 x i8> %vtbl17.i
-}
-
-define <8 x i8> @test_vqtbl2_s8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl2_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
- %vtbl2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl2.i
-}
-
-define <8 x i8> @test_vtbl3_s8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl3_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 2
- %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl211.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %b)
- ret <8 x i8> %vtbl212.i
-}
-
-define <8 x i8> @test_vqtbl3_s8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl3_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
- %vtbl3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl3.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl3.i
-}
-
-define <8 x i8> @test_vtbl4_s8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl4_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 3
- %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl215.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %__a.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl215.i, <8 x i8> %b)
- ret <8 x i8> %vtbl216.i
-}
-
-define <8 x i8> @test_vqtbl4_s8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl4_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
- %vtbl4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl4.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl4.i
-}
-
-define <16 x i8> @test_vqtbl1q_s8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vqtbl1q_s8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %vtbl1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b)
- ret <16 x i8> %vtbl1.i
-}
-
-define <16 x i8> @test_vqtbl2q_s8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl2q_s8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
- %vtbl2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl2.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl2.i
-}
-
-define <16 x i8> @test_vqtbl3q_s8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl3q_s8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
- %vtbl3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl3.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl3.i
-}
-
-define <16 x i8> @test_vqtbl4q_s8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl4q_s8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
- %vtbl4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl4.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl4.i
-}
-
-define <8 x i8> @test_vtbx1_s8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vtbx1_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %c)
- %0 = icmp uge <8 x i8> %c, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
- %1 = sext <8 x i1> %0 to <8 x i8>
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl11.i)
- ret <8 x i8> %vbsl.i
-}
-
-define <8 x i8> @test_vtbx2_s8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx2_s8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 1
- %vtbx1.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx1.i, <8 x i8> %c)
- ret <8 x i8> %vtbx17.i
-}
-
-define <8 x i8> @test_vtbx3_s8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx3_s8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 2
- %vtbl2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl211.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %c)
- %0 = icmp uge <8 x i8> %c, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24>
- %1 = sext <8 x i1> %0 to <8 x i8>
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl212.i)
- ret <8 x i8> %vbsl.i
-}
-
-define <8 x i8> @test_vtbx4_s8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx4_s8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 3
- %vtbx2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx215.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %__b.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx2.i, <16 x i8> %vtbx215.i, <8 x i8> %c)
- ret <8 x i8> %vtbx216.i
-}
-
-define <8 x i8> @test_vqtbx1_s8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vqtbx1_s8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbx1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c)
- ret <8 x i8> %vtbx1.i
-}
-
-define <8 x i8> @test_vqtbx2_s8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx2_s8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
- %vtbx2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx2.i
-}
-
-define <8 x i8> @test_vqtbx3_s8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx3_s8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
- %vtbx3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx3.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx3.i
-}
-
-define <8 x i8> @test_vqtbx4_s8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx4_s8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
- %vtbx4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx4.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx4.i
-}
-
-define <16 x i8> @test_vqtbx1q_s8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vqtbx1q_s8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %vtbx1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c)
- ret <16 x i8> %vtbx1.i
-}
-
-define <16 x i8> @test_vqtbx2q_s8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx2q_s8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
- %vtbx2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx2.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx2.i
-}
-
-define <16 x i8> @test_vqtbx3q_s8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx3q_s8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
- %vtbx3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx3.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx3.i
-}
-
-define <16 x i8> @test_vqtbx4q_s8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx4q_s8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
- %vtbx4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx4.i
-}
-
-define <8 x i8> @test_vtbl1_u8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtbl1_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
- ret <8 x i8> %vtbl11.i
-}
-
-define <8 x i8> @test_vqtbl1_u8(<16 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vqtbl1_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %a, <8 x i8> %b)
- ret <8 x i8> %vtbl1.i
-}
-
-define <8 x i8> @test_vtbl2_u8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl2_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 1
- %vtbl1.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
- ret <8 x i8> %vtbl17.i
-}
-
-define <8 x i8> @test_vqtbl2_u8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl2_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
- %vtbl2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl2.i
-}
-
-define <8 x i8> @test_vtbl3_u8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl3_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 2
- %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl211.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %b)
- ret <8 x i8> %vtbl212.i
-}
-
-define <8 x i8> @test_vqtbl3_u8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl3_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
- %vtbl3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl3.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl3.i
-}
-
-define <8 x i8> @test_vtbl4_u8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl4_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 3
- %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl215.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %__a.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl215.i, <8 x i8> %b)
- ret <8 x i8> %vtbl216.i
-}
-
-define <8 x i8> @test_vqtbl4_u8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl4_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
- %vtbl4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl4.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl4.i
-}
-
-define <16 x i8> @test_vqtbl1q_u8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vqtbl1q_u8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %vtbl1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b)
- ret <16 x i8> %vtbl1.i
-}
-
-define <16 x i8> @test_vqtbl2q_u8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl2q_u8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
- %vtbl2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl2.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl2.i
-}
-
-define <16 x i8> @test_vqtbl3q_u8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl3q_u8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
- %vtbl3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl3.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl3.i
-}
-
-define <16 x i8> @test_vqtbl4q_u8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl4q_u8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
- %vtbl4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl4.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl4.i
-}
-
-define <8 x i8> @test_vtbx1_u8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vtbx1_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %c)
- %0 = icmp uge <8 x i8> %c, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
- %1 = sext <8 x i1> %0 to <8 x i8>
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl11.i)
- ret <8 x i8> %vbsl.i
-}
-
-define <8 x i8> @test_vtbx2_u8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx2_u8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 1
- %vtbx1.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx1.i, <8 x i8> %c)
- ret <8 x i8> %vtbx17.i
-}
-
-define <8 x i8> @test_vtbx3_u8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx3_u8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 2
- %vtbl2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl211.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %c)
- %0 = icmp uge <8 x i8> %c, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24>
- %1 = sext <8 x i1> %0 to <8 x i8>
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl212.i)
- ret <8 x i8> %vbsl.i
-}
-
-define <8 x i8> @test_vtbx4_u8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx4_u8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 3
- %vtbx2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx215.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %__b.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx2.i, <16 x i8> %vtbx215.i, <8 x i8> %c)
- ret <8 x i8> %vtbx216.i
-}
-
-define <8 x i8> @test_vqtbx1_u8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vqtbx1_u8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbx1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c)
- ret <8 x i8> %vtbx1.i
-}
-
-define <8 x i8> @test_vqtbx2_u8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx2_u8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
- %vtbx2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx2.i
-}
-
-define <8 x i8> @test_vqtbx3_u8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx3_u8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
- %vtbx3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx3.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx3.i
-}
-
-define <8 x i8> @test_vqtbx4_u8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx4_u8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
- %vtbx4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx4.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx4.i
-}
-
-define <16 x i8> @test_vqtbx1q_u8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vqtbx1q_u8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %vtbx1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c)
- ret <16 x i8> %vtbx1.i
-}
-
-define <16 x i8> @test_vqtbx2q_u8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx2q_u8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
- %vtbx2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx2.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx2.i
-}
-
-define <16 x i8> @test_vqtbx3q_u8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx3q_u8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
- %vtbx3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx3.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx3.i
-}
-
-define <16 x i8> @test_vqtbx4q_u8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx4q_u8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
- %vtbx4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx4.i
-}
-
-define <8 x i8> @test_vtbl1_p8(<8 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vtbl1_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
- ret <8 x i8> %vtbl11.i
-}
-
-define <8 x i8> @test_vqtbl1_p8(<16 x i8> %a, <8 x i8> %b) {
-; CHECK: test_vqtbl1_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %a, <8 x i8> %b)
- ret <8 x i8> %vtbl1.i
-}
-
-define <8 x i8> @test_vtbl2_p8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl2_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 1
- %vtbl1.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
- ret <8 x i8> %vtbl17.i
-}
-
-define <8 x i8> @test_vqtbl2_p8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl2_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
- %vtbl2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl2.i
-}
-
-define <8 x i8> @test_vtbl3_p8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl3_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 2
- %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl211.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %b)
- ret <8 x i8> %vtbl212.i
-}
-
-define <8 x i8> @test_vqtbl3_p8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl3_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
- %vtbl3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl3.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl3.i
-}
-
-define <8 x i8> @test_vtbl4_p8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vtbl4_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 3
- %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl215.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %__a.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl215.i, <8 x i8> %b)
- ret <8 x i8> %vtbl216.i
-}
-
-define <8 x i8> @test_vqtbl4_p8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) {
-; CHECK: test_vqtbl4_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
- %vtbl4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl4.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <8 x i8> %b)
- ret <8 x i8> %vtbl4.i
-}
-
-define <16 x i8> @test_vqtbl1q_p8(<16 x i8> %a, <16 x i8> %b) {
-; CHECK: test_vqtbl1q_p8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %vtbl1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b)
- ret <16 x i8> %vtbl1.i
-}
-
-define <16 x i8> @test_vqtbl2q_p8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl2q_p8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
- %vtbl2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl2.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl2.i
-}
-
-define <16 x i8> @test_vqtbl3q_p8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl3q_p8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
- %vtbl3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl3.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl3.i
-}
-
-define <16 x i8> @test_vqtbl4q_p8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) {
-; CHECK: test_vqtbl4q_p8:
-; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
- %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
- %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
- %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
- %vtbl4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl4.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <16 x i8> %b)
- ret <16 x i8> %vtbl4.i
-}
-
-define <8 x i8> @test_vtbx1_p8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vtbx1_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbl1.i = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %c)
- %0 = icmp uge <8 x i8> %c, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
- %1 = sext <8 x i1> %0 to <8 x i8>
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl11.i)
- ret <8 x i8> %vbsl.i
-}
-
-define <8 x i8> @test_vtbx2_p8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx2_p8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 1
- %vtbx1.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx1.i, <8 x i8> %c)
- ret <8 x i8> %vtbx17.i
-}
-
-define <8 x i8> @test_vtbx3_p8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx3_p8:
-; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 2
- %vtbl2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl211.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %c)
- %0 = icmp uge <8 x i8> %c, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24>
- %1 = sext <8 x i1> %0 to <8 x i8>
- %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl212.i)
- ret <8 x i8> %vbsl.i
-}
-
-define <8 x i8> @test_vtbx4_p8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vtbx4_p8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 3
- %vtbx2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx215.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %__b.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %vtbx216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx2.i, <16 x i8> %vtbx215.i, <8 x i8> %c)
- ret <8 x i8> %vtbx216.i
-}
-
-define <8 x i8> @test_vqtbx1_p8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) {
-; CHECK: test_vqtbx1_p8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %vtbx1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c)
- ret <8 x i8> %vtbx1.i
-}
-
-define <8 x i8> @test_vqtbx2_p8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx2_p8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
- %vtbx2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx2.i
-}
-
-define <8 x i8> @test_vqtbx3_p8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx3_p8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
- %vtbx3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx3.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx3.i
-}
-
-define <8 x i8> @test_vqtbx4_p8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) {
-; CHECK: test_vqtbx4_p8:
-; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
- %vtbx4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx4.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <8 x i8> %c)
- ret <8 x i8> %vtbx4.i
-}
-
-define <16 x i8> @test_vqtbx1q_p8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
-; CHECK: test_vqtbx1q_p8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %vtbx1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c)
- ret <16 x i8> %vtbx1.i
-}
-
-define <16 x i8> @test_vqtbx2q_p8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx2q_p8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
- %vtbx2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx2.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx2.i
-}
-
-define <16 x i8> @test_vqtbx3q_p8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx3q_p8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
- %vtbx3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx3.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx3.i
-}
-
-define <16 x i8> @test_vqtbx4q_p8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) {
-; CHECK: test_vqtbx4q_p8:
-; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
-entry:
- %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
- %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
- %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
- %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
- %vtbx4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <16 x i8> %c)
- ret <16 x i8> %vtbx4.i
-}
-
diff --git a/test/CodeGen/AArch64/neon-simd-vget.ll b/test/CodeGen/AArch64/neon-simd-vget.ll
deleted file mode 100644
index 6474499e4ff1..000000000000
--- a/test/CodeGen/AArch64/neon-simd-vget.ll
+++ /dev/null
@@ -1,225 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-
-define <8 x i8> @test_vget_high_s8(<16 x i8> %a) {
-; CHECK-LABEL: test_vget_high_s8:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ret <8 x i8> %shuffle.i
-}
-
-define <4 x i16> @test_vget_high_s16(<8 x i16> %a) {
-; CHECK-LABEL: test_vget_high_s16:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- ret <4 x i16> %shuffle.i
-}
-
-define <2 x i32> @test_vget_high_s32(<4 x i32> %a) {
-; CHECK-LABEL: test_vget_high_s32:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- ret <2 x i32> %shuffle.i
-}
-
-define <1 x i64> @test_vget_high_s64(<2 x i64> %a) {
-; CHECK-LABEL: test_vget_high_s64:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
- ret <1 x i64> %shuffle.i
-}
-
-define <8 x i8> @test_vget_high_u8(<16 x i8> %a) {
-; CHECK-LABEL: test_vget_high_u8:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ret <8 x i8> %shuffle.i
-}
-
-define <4 x i16> @test_vget_high_u16(<8 x i16> %a) {
-; CHECK-LABEL: test_vget_high_u16:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- ret <4 x i16> %shuffle.i
-}
-
-define <2 x i32> @test_vget_high_u32(<4 x i32> %a) {
-; CHECK-LABEL: test_vget_high_u32:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
- ret <2 x i32> %shuffle.i
-}
-
-define <1 x i64> @test_vget_high_u64(<2 x i64> %a) {
-; CHECK-LABEL: test_vget_high_u64:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
- ret <1 x i64> %shuffle.i
-}
-
-define <1 x i64> @test_vget_high_p64(<2 x i64> %a) {
-; CHECK-LABEL: test_vget_high_p64:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
- ret <1 x i64> %shuffle.i
-}
-
-define <4 x i16> @test_vget_high_f16(<8 x i16> %a) {
-; CHECK-LABEL: test_vget_high_f16:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- ret <4 x i16> %shuffle.i
-}
-
-define <2 x float> @test_vget_high_f32(<4 x float> %a) {
-; CHECK-LABEL: test_vget_high_f32:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 2, i32 3>
- ret <2 x float> %shuffle.i
-}
-
-define <8 x i8> @test_vget_high_p8(<16 x i8> %a) {
-; CHECK-LABEL: test_vget_high_p8:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ret <8 x i8> %shuffle.i
-}
-
-define <4 x i16> @test_vget_high_p16(<8 x i16> %a) {
-; CHECK-LABEL: test_vget_high_p16:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- ret <4 x i16> %shuffle.i
-}
-
-define <1 x double> @test_vget_high_f64(<2 x double> %a) {
-; CHECK-LABEL: test_vget_high_f64:
-; CHECK: dup d0, {{v[0-9]+}}.d[1]
-entry:
- %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> <i32 1>
- ret <1 x double> %shuffle.i
-}
-
-define <8 x i8> @test_vget_low_s8(<16 x i8> %a) {
-; CHECK-LABEL: test_vget_low_s8:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i8> %shuffle.i
-}
-
-define <4 x i16> @test_vget_low_s16(<8 x i16> %a) {
-; CHECK-LABEL: test_vget_low_s16:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i16> %shuffle.i
-}
-
-define <2 x i32> @test_vget_low_s32(<4 x i32> %a) {
-; CHECK-LABEL: test_vget_low_s32:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
- ret <2 x i32> %shuffle.i
-}
-
-define <1 x i64> @test_vget_low_s64(<2 x i64> %a) {
-; CHECK-LABEL: test_vget_low_s64:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
- ret <1 x i64> %shuffle.i
-}
-
-define <8 x i8> @test_vget_low_u8(<16 x i8> %a) {
-; CHECK-LABEL: test_vget_low_u8:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i8> %shuffle.i
-}
-
-define <4 x i16> @test_vget_low_u16(<8 x i16> %a) {
-; CHECK-LABEL: test_vget_low_u16:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i16> %shuffle.i
-}
-
-define <2 x i32> @test_vget_low_u32(<4 x i32> %a) {
-; CHECK-LABEL: test_vget_low_u32:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
- ret <2 x i32> %shuffle.i
-}
-
-define <1 x i64> @test_vget_low_u64(<2 x i64> %a) {
-; CHECK-LABEL: test_vget_low_u64:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
- ret <1 x i64> %shuffle.i
-}
-
-define <1 x i64> @test_vget_low_p64(<2 x i64> %a) {
-; CHECK-LABEL: test_vget_low_p64:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
- ret <1 x i64> %shuffle.i
-}
-
-define <4 x i16> @test_vget_low_f16(<8 x i16> %a) {
-; CHECK-LABEL: test_vget_low_f16:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i16> %shuffle.i
-}
-
-define <2 x float> @test_vget_low_f32(<4 x float> %a) {
-; CHECK-LABEL: test_vget_low_f32:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
- ret <2 x float> %shuffle.i
-}
-
-define <8 x i8> @test_vget_low_p8(<16 x i8> %a) {
-; CHECK-LABEL: test_vget_low_p8:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i8> %shuffle.i
-}
-
-define <4 x i16> @test_vget_low_p16(<8 x i16> %a) {
-; CHECK-LABEL: test_vget_low_p16:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i16> %shuffle.i
-}
-
-define <1 x double> @test_vget_low_f64(<2 x double> %a) {
-; CHECK-LABEL: test_vget_low_f64:
-; CHECK: ret
-entry:
- %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> zeroinitializer
- ret <1 x double> %shuffle.i
-}
diff --git a/test/CodeGen/AArch64/neon-truncStore-extLoad.ll b/test/CodeGen/AArch64/neon-truncStore-extLoad.ll
new file mode 100644
index 000000000000..1df3719c8867
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-truncStore-extLoad.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+; A vector TruncStore can not be selected.
+; Test a trunc IR and a vector store IR can be selected correctly.
+define void @truncStore.v2i64(<2 x i64> %a, <2 x i32>* %result) {
+; CHECK-LABEL: truncStore.v2i64:
+; CHECK: xtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
+; CHECK: {{st1 { v[0-9]+.2s }|str d[0-9]+}}, [x{{[0-9]+|sp}}]
+ %b = trunc <2 x i64> %a to <2 x i32>
+ store <2 x i32> %b, <2 x i32>* %result
+ ret void
+}
+
+define void @truncStore.v4i32(<4 x i32> %a, <4 x i16>* %result) {
+; CHECK-LABEL: truncStore.v4i32:
+; CHECK: xtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
+; CHECK: {{st1 { v[0-9]+.4h }|str d[0-9]+}}, [x{{[0-9]+|sp}}]
+ %b = trunc <4 x i32> %a to <4 x i16>
+ store <4 x i16> %b, <4 x i16>* %result
+ ret void
+}
+
+define void @truncStore.v8i16(<8 x i16> %a, <8 x i8>* %result) {
+; CHECK-LABEL: truncStore.v8i16:
+; CHECK: xtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
+; CHECK: {{st1 { v[0-9]+.8b }|str d[0-9]+}}, [x{{[0-9]+|sp}}]
+ %b = trunc <8 x i16> %a to <8 x i8>
+ store <8 x i8> %b, <8 x i8>* %result
+ ret void
+}
+
+; A vector LoadExt can not be selected.
+; Test a vector load IR and a sext/zext IR can be selected correctly.
+define <4 x i32> @loadSExt.v4i8(<4 x i8>* %ref) {
+; CHECK-LABEL: loadSExt.v4i8:
+; CHECK: ldrsb
+ %a = load <4 x i8>* %ref
+ %conv = sext <4 x i8> %a to <4 x i32>
+ ret <4 x i32> %conv
+}
+
+define <4 x i32> @loadZExt.v4i8(<4 x i8>* %ref) {
+; CHECK-LABEL: loadZExt.v4i8:
+; CHECK: ldrb
+ %a = load <4 x i8>* %ref
+ %conv = zext <4 x i8> %a to <4 x i32>
+ ret <4 x i32> %conv
+}
+
+define i32 @loadExt.i32(<4 x i8>* %ref) {
+; CHECK-LABEL: loadExt.i32:
+; CHECK: ldrb
+ %a = load <4 x i8>* %ref
+ %vecext = extractelement <4 x i8> %a, i32 0
+ %conv = zext i8 %vecext to i32
+ ret i32 %conv
+}
diff --git a/test/CodeGen/AArch64/nzcv-save.ll b/test/CodeGen/AArch64/nzcv-save.ll
new file mode 100644
index 000000000000..32baff3dbe64
--- /dev/null
+++ b/test/CodeGen/AArch64/nzcv-save.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=aarch64 < %s | FileCheck %s
+
+; CHECK: mrs [[NZCV_SAVE:x[0-9]+]], NZCV
+; CHECK: msr NZCV, [[NZCV_SAVE]]
+
+; DAG ends up with two uses for the flags from an ADCS node, which means they
+; must be saved for later.
+define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256* nocapture %dd) nounwind uwtable noinline ssp {
+entry:
+ %c = load i256* %cc
+ %d = load i256* %dd
+ %add = add nsw i256 %c, %d
+ store i256 %add, i256* %a, align 8
+ %or = or i256 %c, 1606938044258990275541962092341162602522202993782792835301376
+ %add6 = add nsw i256 %or, %d
+ store i256 %add6, i256* %b, align 8
+ ret void
+}
diff --git a/test/CodeGen/AArch64/pic-eh-stubs.ll b/test/CodeGen/AArch64/pic-eh-stubs.ll
index 6ec4b19a1204..e8c762504fc9 100644
--- a/test/CodeGen/AArch64/pic-eh-stubs.ll
+++ b/test/CodeGen/AArch64/pic-eh-stubs.ll
@@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s
+; RUN: llc -mtriple=arm64_be-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s
; Make sure exception-handling PIC code can be linked correctly. An alternative
; to the sequence described below would have .gcc_except_table itself writable
@@ -10,8 +11,8 @@
; ... referring indirectly to stubs for its typeinfo ...
; CHECK: // @TType Encoding = indirect pcrel sdata8
; ... one of which is "int"'s typeinfo
-; CHECK: .Ltmp9:
-; CHECK-NEXT: .xword .L_ZTIi.DW.stub-.Ltmp9
+; CHECK: [[TYPEINFO_LBL:.Ltmp[0-9]+]]: // TypeInfo 1
+; CHECK-NEXT: .xword .L_ZTIi.DW.stub-[[TYPEINFO_LBL]]
; .. and which is properly defined (in a writable section for the dynamic loader) later.
; CHECK: .section .data.rel,"aw"
diff --git a/test/CodeGen/AArch64/ragreedy-csr.ll b/test/CodeGen/AArch64/ragreedy-csr.ll
new file mode 100644
index 000000000000..de29b1baa8d5
--- /dev/null
+++ b/test/CodeGen/AArch64/ragreedy-csr.ll
@@ -0,0 +1,297 @@
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -regalloc=greedy -regalloc-csr-first-time-cost=15 | FileCheck %s
+
+; This testing case is reduced from 197.parser prune_match function.
+; We make sure that we do not use callee-saved registers (x19 to x25).
+; rdar://16162005
+
+; CHECK-LABEL: prune_match:
+; CHECK: entry
+; CHECK: {{str x30|stp x29, x30}}, [sp
+; CHECK-NOT: stp x25,
+; CHECK-NOT: stp x23, x24
+; CHECK-NOT: stp x21, x22
+; CHECK-NOT: stp x19, x20
+; CHECK: if.end
+; CHECK: return
+; CHECK: {{ldr x30|ldp x29, x30}}, [sp
+; CHECK-NOT: ldp x19, x20
+; CHECK-NOT: ldp x21, x22
+; CHECK-NOT: ldp x23, x24
+; CHECK-NOT: ldp x25,
+
+%struct.List_o_links_struct = type { i32, i32, i32, %struct.List_o_links_struct* }
+%struct.Connector_struct = type { i16, i16, i8, i8, %struct.Connector_struct*, i8* }
+%struct._RuneLocale = type { [8 x i8], [32 x i8], i32 (i8*, i64, i8**)*, i32 (i32, i8*, i64, i8**)*, i32, [256 x i32], [256 x i32], [256 x i32], %struct._RuneRange, %struct._RuneRange, %struct._RuneRange, i8*, i32, i32, %struct._RuneCharClass* }
+%struct._RuneRange = type { i32, %struct._RuneEntry* }
+%struct._RuneEntry = type { i32, i32, i32, i32* }
+%struct._RuneCharClass = type { [14 x i8], i32 }
+%struct.Exp_struct = type { i8, i8, i8, i8, %union.anon }
+%union.anon = type { %struct.E_list_struct* }
+%struct.E_list_struct = type { %struct.E_list_struct*, %struct.Exp_struct* }
+%struct.domain_struct = type { i8*, i32, %struct.List_o_links_struct*, i32, i32, %struct.d_tree_leaf_struct*, %struct.domain_struct* }
+%struct.d_tree_leaf_struct = type { %struct.domain_struct*, i32, %struct.d_tree_leaf_struct* }
+@_DefaultRuneLocale = external global %struct._RuneLocale
+declare i32 @__maskrune(i32, i64) #7
+define fastcc i32 @prune_match(%struct.Connector_struct* nocapture readonly %a, %struct.Connector_struct* nocapture readonly %b) #9 {
+entry:
+ %label56 = bitcast %struct.Connector_struct* %a to i16*
+ %0 = load i16* %label56, align 2
+ %label157 = bitcast %struct.Connector_struct* %b to i16*
+ %1 = load i16* %label157, align 2
+ %cmp = icmp eq i16 %0, %1
+ br i1 %cmp, label %if.end, label %return, !prof !988
+if.end:
+ %priority = getelementptr inbounds %struct.Connector_struct* %a, i64 0, i32 2
+ %2 = load i8* %priority, align 1
+ %priority5 = getelementptr inbounds %struct.Connector_struct* %b, i64 0, i32 2
+ %3 = load i8* %priority5, align 1
+ %string = getelementptr inbounds %struct.Connector_struct* %a, i64 0, i32 5
+ %4 = load i8** %string, align 8
+ %string7 = getelementptr inbounds %struct.Connector_struct* %b, i64 0, i32 5
+ %5 = load i8** %string7, align 8
+ br label %while.cond
+while.cond:
+ %lsr.iv27 = phi i64 [ %lsr.iv.next28, %if.end17 ], [ 0, %if.end ]
+ %scevgep55 = getelementptr i8* %4, i64 %lsr.iv27
+ %6 = load i8* %scevgep55, align 1
+ %idxprom.i.i = sext i8 %6 to i64
+ %isascii.i.i224 = icmp sgt i8 %6, -1
+ br i1 %isascii.i.i224, label %cond.true.i.i, label %cond.false.i.i, !prof !181
+cond.true.i.i:
+ %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
+ %7 = load i32* %arrayidx.i.i, align 4
+ %and.i.i = and i32 %7, 32768
+ br label %isupper.exit
+cond.false.i.i:
+ %8 = trunc i64 %idxprom.i.i to i8
+ %conv8 = sext i8 %8 to i32
+ %call3.i.i = tail call i32 @__maskrune(i32 %conv8, i64 32768) #3
+ br label %isupper.exit
+isupper.exit:
+ %tobool1.sink.i.in.i = phi i32 [ %and.i.i, %cond.true.i.i ], [ %call3.i.i, %cond.false.i.i ]
+ %tobool1.sink.i.i = icmp eq i32 %tobool1.sink.i.in.i, 0
+ br i1 %tobool1.sink.i.i, label %lor.rhs, label %while.body, !prof !989
+lor.rhs:
+ %sunkaddr = ptrtoint i8* %5 to i64
+ %sunkaddr58 = add i64 %sunkaddr, %lsr.iv27
+ %sunkaddr59 = inttoptr i64 %sunkaddr58 to i8*
+ %9 = load i8* %sunkaddr59, align 1
+ %idxprom.i.i214 = sext i8 %9 to i64
+ %isascii.i.i213225 = icmp sgt i8 %9, -1
+ br i1 %isascii.i.i213225, label %cond.true.i.i217, label %cond.false.i.i219, !prof !181
+cond.true.i.i217:
+ %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
+ %10 = load i32* %arrayidx.i.i215, align 4
+ %and.i.i216 = and i32 %10, 32768
+ br label %isupper.exit223
+cond.false.i.i219:
+ %11 = trunc i64 %idxprom.i.i214 to i8
+ %conv9 = sext i8 %11 to i32
+ %call3.i.i218 = tail call i32 @__maskrune(i32 %conv9, i64 32768) #3
+ br label %isupper.exit223
+isupper.exit223:
+ %tobool1.sink.i.in.i220 = phi i32 [ %and.i.i216, %cond.true.i.i217 ], [ %call3.i.i218, %cond.false.i.i219 ]
+ %tobool1.sink.i.i221 = icmp eq i32 %tobool1.sink.i.in.i220, 0
+ br i1 %tobool1.sink.i.i221, label %while.end, label %while.body, !prof !990
+while.body:
+ %sunkaddr60 = ptrtoint i8* %4 to i64
+ %sunkaddr61 = add i64 %sunkaddr60, %lsr.iv27
+ %sunkaddr62 = inttoptr i64 %sunkaddr61 to i8*
+ %12 = load i8* %sunkaddr62, align 1
+ %sunkaddr63 = ptrtoint i8* %5 to i64
+ %sunkaddr64 = add i64 %sunkaddr63, %lsr.iv27
+ %sunkaddr65 = inttoptr i64 %sunkaddr64 to i8*
+ %13 = load i8* %sunkaddr65, align 1
+ %cmp14 = icmp eq i8 %12, %13
+ br i1 %cmp14, label %if.end17, label %return, !prof !991
+if.end17:
+ %lsr.iv.next28 = add i64 %lsr.iv27, 1
+ br label %while.cond
+while.end:
+ %14 = or i8 %3, %2
+ %15 = icmp eq i8 %14, 0
+ br i1 %15, label %if.then23, label %if.else88, !prof !992
+if.then23:
+ %sunkaddr66 = ptrtoint %struct.Connector_struct* %a to i64
+ %sunkaddr67 = add i64 %sunkaddr66, 16
+ %sunkaddr68 = inttoptr i64 %sunkaddr67 to i8**
+ %16 = load i8** %sunkaddr68, align 8
+ %17 = load i8* %16, align 1
+ %cmp26 = icmp eq i8 %17, 83
+ %sunkaddr69 = ptrtoint i8* %4 to i64
+ %sunkaddr70 = add i64 %sunkaddr69, %lsr.iv27
+ %sunkaddr71 = inttoptr i64 %sunkaddr70 to i8*
+ %18 = load i8* %sunkaddr71, align 1
+ br i1 %cmp26, label %land.lhs.true28, label %while.cond59.preheader, !prof !993
+land.lhs.true28:
+ switch i8 %18, label %land.rhs.preheader [
+ i8 112, label %land.lhs.true35
+ i8 0, label %return
+ ], !prof !994
+land.lhs.true35:
+ %sunkaddr72 = ptrtoint i8* %5 to i64
+ %sunkaddr73 = add i64 %sunkaddr72, %lsr.iv27
+ %sunkaddr74 = inttoptr i64 %sunkaddr73 to i8*
+ %19 = load i8* %sunkaddr74, align 1
+ switch i8 %19, label %land.rhs.preheader [
+ i8 112, label %land.lhs.true43
+ ], !prof !995
+land.lhs.true43:
+ %20 = ptrtoint i8* %16 to i64
+ %21 = sub i64 0, %20
+ %scevgep52 = getelementptr i8* %4, i64 %21
+ %scevgep53 = getelementptr i8* %scevgep52, i64 %lsr.iv27
+ %scevgep54 = getelementptr i8* %scevgep53, i64 -1
+ %cmp45 = icmp eq i8* %scevgep54, null
+ br i1 %cmp45, label %return, label %lor.lhs.false47, !prof !996
+lor.lhs.false47:
+ %22 = ptrtoint i8* %16 to i64
+ %23 = sub i64 0, %22
+ %scevgep47 = getelementptr i8* %4, i64 %23
+ %scevgep48 = getelementptr i8* %scevgep47, i64 %lsr.iv27
+ %scevgep49 = getelementptr i8* %scevgep48, i64 -2
+ %cmp50 = icmp eq i8* %scevgep49, null
+ br i1 %cmp50, label %land.lhs.true52, label %while.cond59.preheader, !prof !997
+land.lhs.true52:
+ %sunkaddr75 = ptrtoint i8* %4 to i64
+ %sunkaddr76 = add i64 %sunkaddr75, %lsr.iv27
+ %sunkaddr77 = add i64 %sunkaddr76, -1
+ %sunkaddr78 = inttoptr i64 %sunkaddr77 to i8*
+ %24 = load i8* %sunkaddr78, align 1
+ %cmp55 = icmp eq i8 %24, 73
+ %cmp61233 = icmp eq i8 %18, 0
+ %or.cond265 = or i1 %cmp55, %cmp61233
+ br i1 %or.cond265, label %return, label %land.rhs.preheader, !prof !998
+while.cond59.preheader:
+ %cmp61233.old = icmp eq i8 %18, 0
+ br i1 %cmp61233.old, label %return, label %land.rhs.preheader, !prof !999
+land.rhs.preheader:
+ %scevgep33 = getelementptr i8* %5, i64 %lsr.iv27
+ %scevgep43 = getelementptr i8* %4, i64 %lsr.iv27
+ br label %land.rhs
+land.rhs:
+ %lsr.iv = phi i64 [ 0, %land.rhs.preheader ], [ %lsr.iv.next, %if.then83 ]
+ %25 = phi i8 [ %27, %if.then83 ], [ %18, %land.rhs.preheader ]
+ %scevgep34 = getelementptr i8* %scevgep33, i64 %lsr.iv
+ %26 = load i8* %scevgep34, align 1
+ %cmp64 = icmp eq i8 %26, 0
+ br i1 %cmp64, label %return, label %while.body66, !prof !1000
+while.body66:
+ %cmp68 = icmp eq i8 %25, 42
+ %cmp72 = icmp eq i8 %26, 42
+ %or.cond = or i1 %cmp68, %cmp72
+ br i1 %or.cond, label %if.then83, label %lor.lhs.false74, !prof !1001
+lor.lhs.false74:
+ %cmp77 = icmp ne i8 %25, %26
+ %cmp81 = icmp eq i8 %25, 94
+ %or.cond208 = or i1 %cmp77, %cmp81
+ br i1 %or.cond208, label %return, label %if.then83, !prof !1002
+if.then83:
+ %scevgep44 = getelementptr i8* %scevgep43, i64 %lsr.iv
+ %scevgep45 = getelementptr i8* %scevgep44, i64 1
+ %27 = load i8* %scevgep45, align 1
+ %cmp61 = icmp eq i8 %27, 0
+ %lsr.iv.next = add i64 %lsr.iv, 1
+ br i1 %cmp61, label %return, label %land.rhs, !prof !999
+if.else88:
+ %cmp89 = icmp eq i8 %2, 1
+ %cmp92 = icmp eq i8 %3, 2
+ %or.cond159 = and i1 %cmp89, %cmp92
+ br i1 %or.cond159, label %while.cond95.preheader, label %if.else123, !prof !1003
+while.cond95.preheader:
+ %sunkaddr79 = ptrtoint i8* %4 to i64
+ %sunkaddr80 = add i64 %sunkaddr79, %lsr.iv27
+ %sunkaddr81 = inttoptr i64 %sunkaddr80 to i8*
+ %28 = load i8* %sunkaddr81, align 1
+ %cmp97238 = icmp eq i8 %28, 0
+ br i1 %cmp97238, label %return, label %land.rhs99.preheader, !prof !1004
+land.rhs99.preheader:
+ %scevgep31 = getelementptr i8* %5, i64 %lsr.iv27
+ %scevgep40 = getelementptr i8* %4, i64 %lsr.iv27
+ br label %land.rhs99
+land.rhs99:
+ %lsr.iv17 = phi i64 [ 0, %land.rhs99.preheader ], [ %lsr.iv.next18, %if.then117 ]
+ %29 = phi i8 [ %31, %if.then117 ], [ %28, %land.rhs99.preheader ]
+ %scevgep32 = getelementptr i8* %scevgep31, i64 %lsr.iv17
+ %30 = load i8* %scevgep32, align 1
+ %cmp101 = icmp eq i8 %30, 0
+ br i1 %cmp101, label %return, label %while.body104, !prof !1005
+while.body104:
+ %cmp107 = icmp eq i8 %29, %30
+ %cmp111 = icmp eq i8 %29, 42
+ %or.cond209 = or i1 %cmp107, %cmp111
+ %cmp115 = icmp eq i8 %30, 94
+ %or.cond210 = or i1 %or.cond209, %cmp115
+ br i1 %or.cond210, label %if.then117, label %return, !prof !1006
+if.then117:
+ %scevgep41 = getelementptr i8* %scevgep40, i64 %lsr.iv17
+ %scevgep42 = getelementptr i8* %scevgep41, i64 1
+ %31 = load i8* %scevgep42, align 1
+ %cmp97 = icmp eq i8 %31, 0
+ %lsr.iv.next18 = add i64 %lsr.iv17, 1
+ br i1 %cmp97, label %return, label %land.rhs99, !prof !1004
+if.else123:
+ %cmp124 = icmp eq i8 %3, 1
+ %cmp127 = icmp eq i8 %2, 2
+ %or.cond160 = and i1 %cmp124, %cmp127
+ br i1 %or.cond160, label %while.cond130.preheader, label %return, !prof !1007
+while.cond130.preheader:
+ %sunkaddr82 = ptrtoint i8* %4 to i64
+ %sunkaddr83 = add i64 %sunkaddr82, %lsr.iv27
+ %sunkaddr84 = inttoptr i64 %sunkaddr83 to i8*
+ %32 = load i8* %sunkaddr84, align 1
+ %cmp132244 = icmp eq i8 %32, 0
+ br i1 %cmp132244, label %return, label %land.rhs134.preheader, !prof !1008
+land.rhs134.preheader:
+ %scevgep29 = getelementptr i8* %5, i64 %lsr.iv27
+ %scevgep37 = getelementptr i8* %4, i64 %lsr.iv27
+ br label %land.rhs134
+land.rhs134:
+ %lsr.iv22 = phi i64 [ 0, %land.rhs134.preheader ], [ %lsr.iv.next23, %if.then152 ]
+ %33 = phi i8 [ %35, %if.then152 ], [ %32, %land.rhs134.preheader ]
+ %scevgep30 = getelementptr i8* %scevgep29, i64 %lsr.iv22
+ %34 = load i8* %scevgep30, align 1
+ %cmp136 = icmp eq i8 %34, 0
+ br i1 %cmp136, label %return, label %while.body139, !prof !1009
+while.body139:
+ %cmp142 = icmp eq i8 %33, %34
+ %cmp146 = icmp eq i8 %34, 42
+ %or.cond211 = or i1 %cmp142, %cmp146
+ %cmp150 = icmp eq i8 %33, 94
+ %or.cond212 = or i1 %or.cond211, %cmp150
+ br i1 %or.cond212, label %if.then152, label %return, !prof !1010
+if.then152:
+ %scevgep38 = getelementptr i8* %scevgep37, i64 %lsr.iv22
+ %scevgep39 = getelementptr i8* %scevgep38, i64 1
+ %35 = load i8* %scevgep39, align 1
+ %cmp132 = icmp eq i8 %35, 0
+ %lsr.iv.next23 = add i64 %lsr.iv22, 1
+ br i1 %cmp132, label %return, label %land.rhs134, !prof !1008
+return:
+ %retval.0 = phi i32 [ 0, %entry ], [ 1, %land.lhs.true52 ], [ 1, %land.lhs.true43 ], [ 0, %if.else123 ], [ 1, %while.cond59.preheader ], [ 1, %while.cond95.preheader ], [ 1, %while.cond130.preheader ], [ 1, %land.lhs.true28 ], [ 1, %if.then83 ], [ 0, %lor.lhs.false74 ], [ 1, %land.rhs ], [ 1, %if.then117 ], [ 0, %while.body104 ], [ 1, %land.rhs99 ], [ 1, %if.then152 ], [ 0, %while.body139 ], [ 1, %land.rhs134 ], [ 0, %while.body ]
+ ret i32 %retval.0
+}
+!181 = metadata !{metadata !"branch_weights", i32 662038, i32 1}
+!988 = metadata !{metadata !"branch_weights", i32 12091450, i32 1916}
+!989 = metadata !{metadata !"branch_weights", i32 7564670, i32 4526781}
+!990 = metadata !{metadata !"branch_weights", i32 7484958, i32 13283499}
+!991 = metadata !{metadata !"branch_weights", i32 8677007, i32 4606493}
+!992 = metadata !{metadata !"branch_weights", i32 -1172426948, i32 145094705}
+!993 = metadata !{metadata !"branch_weights", i32 1468914, i32 5683688}
+!994 = metadata !{metadata !"branch_weights", i32 114025221, i32 -1217548794, i32 -1199521551, i32 87712616}
+!995 = metadata !{metadata !"branch_weights", i32 1853716452, i32 -444717951, i32 932776759}
+!996 = metadata !{metadata !"branch_weights", i32 1004870, i32 20259}
+!997 = metadata !{metadata !"branch_weights", i32 20071, i32 189}
+!998 = metadata !{metadata !"branch_weights", i32 -1020255939, i32 572177766}
+!999 = metadata !{metadata !"branch_weights", i32 2666513, i32 3466431}
+!1000 = metadata !{metadata !"branch_weights", i32 5117635, i32 1859780}
+!1001 = metadata !{metadata !"branch_weights", i32 354902465, i32 -1444604407}
+!1002 = metadata !{metadata !"branch_weights", i32 -1762419279, i32 1592770684}
+!1003 = metadata !{metadata !"branch_weights", i32 1435905930, i32 -1951930624}
+!1004 = metadata !{metadata !"branch_weights", i32 1, i32 504888}
+!1005 = metadata !{metadata !"branch_weights", i32 94662, i32 504888}
+!1006 = metadata !{metadata !"branch_weights", i32 -1897793104, i32 160196332}
+!1007 = metadata !{metadata !"branch_weights", i32 2074643678, i32 -29579071}
+!1008 = metadata !{metadata !"branch_weights", i32 1, i32 226163}
+!1009 = metadata !{metadata !"branch_weights", i32 58357, i32 226163}
+!1010 = metadata !{metadata !"branch_weights", i32 -2072848646, i32 92907517}
diff --git a/test/CodeGen/AArch64/rbit.ll b/test/CodeGen/AArch64/rbit.ll
new file mode 100644
index 000000000000..3404ae4b6bee
--- /dev/null
+++ b/test/CodeGen/AArch64/rbit.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s
+
+; CHECK-LABEL: rbit32
+; CHECK: rbit w0, w0
+define i32 @rbit32(i32 %t) {
+entry:
+ %rbit.i = call i32 @llvm.aarch64.rbit.i32(i32 %t)
+ ret i32 %rbit.i
+}
+
+; CHECK-LABEL: rbit64
+; CHECK: rbit x0, x0
+define i64 @rbit64(i64 %t) {
+entry:
+ %rbit.i = call i64 @llvm.aarch64.rbit.i64(i64 %t)
+ ret i64 %rbit.i
+}
+
+declare i64 @llvm.aarch64.rbit.i64(i64)
+declare i32 @llvm.aarch64.rbit.i32(i32)
diff --git a/test/CodeGen/AArch64/regress-bitcast-formals.ll b/test/CodeGen/AArch64/regress-bitcast-formals.ll
index 9655f90d826d..58e0542d84f5 100644
--- a/test/CodeGen/AArch64/regress-bitcast-formals.ll
+++ b/test/CodeGen/AArch64/regress-bitcast-formals.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-apple-ios7.0 -verify-machineinstrs < %s | FileCheck %s
; CallingConv.td requires a bitcast for vector arguments. Make sure we're
; actually capable of that (the test was omitted from LowerFormalArguments).
diff --git a/test/CodeGen/AArch64/regress-f128csel-flags.ll b/test/CodeGen/AArch64/regress-f128csel-flags.ll
index b35185ccd6f3..25b5e0c5f776 100644
--- a/test/CodeGen/AArch64/regress-f128csel-flags.ll
+++ b/test/CodeGen/AArch64/regress-f128csel-flags.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
; We used to not mark NZCV as being used in the continuation basic-block
; when lowering a 128-bit "select" to branches. This meant a subsequent use
diff --git a/test/CodeGen/AArch64/regress-fp128-livein.ll b/test/CodeGen/AArch64/regress-fp128-livein.ll
index cb8432a7e4e4..5e6ab0a9675b 100644
--- a/test/CodeGen/AArch64/regress-fp128-livein.ll
+++ b/test/CodeGen/AArch64/regress-fp128-livein.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s
; Regression test for NZCV reg live-in not being added to fp128csel IfTrue BB,
; causing a crash during live range calc.
diff --git a/test/CodeGen/AArch64/regress-tail-livereg.ll b/test/CodeGen/AArch64/regress-tail-livereg.ll
index 053249c6855f..03c3f33d9477 100644
--- a/test/CodeGen/AArch64/regress-tail-livereg.ll
+++ b/test/CodeGen/AArch64/regress-tail-livereg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s
@var = global void()* zeroinitializer
declare void @bar()
@@ -17,3 +17,17 @@ define void @foo() {
; CHECK: br {{x([0-79]|1[0-8])}}
ret void
}
+
+; No matter how tempting it is, LLVM should not use x30 since that'll be
+; restored to its incoming value before the "br".
+define void @test_x30_tail() {
+; CHECK-LABEL: test_x30_tail:
+; CHECK: mov [[DEST:x[0-9]+]], x30
+; CHECK: br [[DEST]]
+ %addr = call i8* @llvm.returnaddress(i32 0)
+ %faddr = bitcast i8* %addr to void()*
+ tail call void %faddr()
+ ret void
+}
+
+declare i8* @llvm.returnaddress(i32)
diff --git a/test/CodeGen/AArch64/regress-tblgen-chains.ll b/test/CodeGen/AArch64/regress-tblgen-chains.ll
index ff77fb4e48f7..477d99625eec 100644
--- a/test/CodeGen/AArch64/regress-tblgen-chains.ll
+++ b/test/CodeGen/AArch64/regress-tblgen-chains.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s
; When generating DAG selection tables, TableGen used to only flag an
; instruction as needing a chain on its own account if it had a built-in pattern
@@ -17,17 +17,18 @@ define i64 @test_chains() {
%locvar = alloca i8
call void @bar(i8* %locvar)
-; CHECK: bl bar
+; CHECK: bl {{_?bar}}
%inc.1 = load i8* %locvar
%inc.2 = zext i8 %inc.1 to i64
%inc.3 = add i64 %inc.2, 1
%inc.4 = trunc i64 %inc.3 to i8
store i8 %inc.4, i8* %locvar
-; CHECK: ldrb {{w[0-9]+}}, [sp, [[LOCADDR:#[0-9]+]]]
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #1
-; CHECK: strb {{w[0-9]+}}, [sp, [[LOCADDR]]]
-; CHECK: ldrb {{w[0-9]+}}, [sp, [[LOCADDR]]]
+
+; CHECK: ldurb {{w[0-9]+}}, [x29, [[LOCADDR:#-?[0-9]+]]]
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #1
+; CHECK: sturb {{w[0-9]+}}, [x29, [[LOCADDR]]]
+; CHECK: ldurb {{w[0-9]+}}, [x29, [[LOCADDR]]]
%ret.1 = load i8* %locvar
%ret.2 = zext i8 %ret.1 to i64
diff --git a/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll b/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
index 0ef981819ec3..c3167e4f4bdd 100644
--- a/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
+++ b/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
@@ -5,22 +5,7 @@ declare void @bar()
define void @test_w29_reserved() {
; CHECK-LABEL: test_w29_reserved:
-; CHECK: .cfi_startproc
-; CHECK: .cfi_def_cfa sp, 96
; CHECK: add x29, sp, #{{[0-9]+}}
-; CHECK: .cfi_def_cfa x29, 16
-; CHECK: .cfi_offset x30, -8
-; CHECK: .cfi_offset x29, -16
-; CHECK: .cfi_offset x28, -24
-; CHECK: .cfi_offset x27, -32
-; CHECK: .cfi_offset x26, -40
-; CHECK: .cfi_offset x25, -48
-; CHECK: .cfi_offset x24, -56
-; CHECK: .cfi_offset x23, -64
-; CHECK: .cfi_offset x22, -72
-; CHECK: .cfi_offset x21, -80
-; CHECK: .cfi_offset x20, -88
-; CHECK: .cfi_offset x19, -96
%val1 = load volatile i32* @var
%val2 = load volatile i32* @var
diff --git a/test/CodeGen/AArch64/regress-wzr-allocatable.ll b/test/CodeGen/AArch64/regress-wzr-allocatable.ll
deleted file mode 100644
index 764d2bc44f0d..000000000000
--- a/test/CodeGen/AArch64/regress-wzr-allocatable.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -O0
-
-; When WZR wasn't marked as reserved, this function tried to allocate
-; it at O0 and then generated an internal fault (mostly incidentally)
-; when it discovered that it was already in use for a multiplication.
-
-; I'm not really convinced this is a good test since it could easily
-; stop testing what it does now with no-one any the wiser. However, I
-; can't think of a better way to force the allocator to use WZR
-; specifically.
-
-define void @test() nounwind {
-entry:
- br label %for.cond
-
-for.cond: ; preds = %for.body, %entry
- br i1 undef, label %for.body, label %for.end
-
-for.body: ; preds = %for.cond
- br label %for.cond
-
-for.end: ; preds = %for.cond
- br label %for.cond6
-
-for.cond6: ; preds = %for.body9, %for.end
- br i1 undef, label %for.body9, label %while.cond30
-
-for.body9: ; preds = %for.cond6
- store i16 0, i16* undef, align 2
- %0 = load i32* undef, align 4
- %1 = load i32* undef, align 4
- %mul15 = mul i32 %0, %1
- %add16 = add i32 %mul15, 32768
- %div = udiv i32 %add16, 65535
- %add17 = add i32 %div, 1
- store i32 %add17, i32* undef, align 4
- br label %for.cond6
-
-while.cond30: ; preds = %for.cond6
- ret void
-}
diff --git a/test/CodeGen/AArch64/returnaddr.ll b/test/CodeGen/AArch64/returnaddr.ll
index c85f9ec4ffd5..b136f044cad8 100644
--- a/test/CodeGen/AArch64/returnaddr.ll
+++ b/test/CodeGen/AArch64/returnaddr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
define i8* @rt0(i32 %x) nounwind readnone {
entry:
diff --git a/test/CodeGen/AArch64/setcc-takes-i32.ll b/test/CodeGen/AArch64/setcc-takes-i32.ll
index bd79685d34b4..ec8615910cf0 100644
--- a/test/CodeGen/AArch64/setcc-takes-i32.ll
+++ b/test/CodeGen/AArch64/setcc-takes-i32.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -o - %s | FileCheck %s
; Most important point here is that the promotion of the i1 works
; correctly. Previously LLVM thought that i64 was the appropriate SetCC output,
diff --git a/test/CodeGen/AArch64/sibling-call.ll b/test/CodeGen/AArch64/sibling-call.ll
index 20f1062a44dc..34e3bb410e8c 100644
--- a/test/CodeGen/AArch64/sibling-call.ll
+++ b/test/CodeGen/AArch64/sibling-call.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -aarch64-load-store-opt=0 | FileCheck %s
declare void @callee_stack0()
declare void @callee_stack8([8 x i32], i64)
@@ -73,10 +73,10 @@ define void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
tail call void @callee_stack16([8 x i32] undef, i64 %b, i64 %a)
ret void
-; CHECK: ldr x0,
-; CHECK: ldr x1,
-; CHECK: str x1,
-; CHECK: str x0,
+; CHECK: ldr [[VAL0:x[0-9]+]],
+; CHECK: ldr [[VAL1:x[0-9]+]],
+; CHECK: str [[VAL1]],
+; CHECK: str [[VAL0]],
; CHECK-NOT: add sp, sp,
; CHECK: b callee_stack16
@@ -91,7 +91,7 @@ define void @indirect_tail() {
%fptr = load void(i32)** @func
tail call void %fptr(i32 42)
ret void
-; CHECK: ldr [[FPTR:x[1-9]+]], [{{x[0-9]+}}, #:lo12:func]
-; CHECK: movz w0, #42
+; CHECK: ldr [[FPTR:x[1-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:func]
+; CHECK: movz w0, #{{42|0x2a}}
; CHECK: br [[FPTR]]
}
diff --git a/test/CodeGen/AArch64/sincos-expansion.ll b/test/CodeGen/AArch64/sincos-expansion.ll
index 4cd44494d545..c3a172dfb427 100644
--- a/test/CodeGen/AArch64/sincos-expansion.ll
+++ b/test/CodeGen/AArch64/sincos-expansion.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
define float @test_sincos_f32(float %f) {
%sin = call float @sinf(float %f) readnone
diff --git a/test/CodeGen/AArch64/sincospow-vector-expansion.ll b/test/CodeGen/AArch64/sincospow-vector-expansion.ll
new file mode 100644
index 000000000000..22f33a83394b
--- /dev/null
+++ b/test/CodeGen/AArch64/sincospow-vector-expansion.ll
@@ -0,0 +1,96 @@
+; RUN: llc -o - %s -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+neon | FileCheck %s
+
+
+define <2 x float> @test_cos_v2f64(<2 x double> %v1) {
+; CHECK-LABEL: test_cos_v2f64:
+; CHECK: bl cos
+; CHECK: bl cos
+ %1 = call <2 x double> @llvm.cos.v2f64(<2 x double> %v1)
+ %2 = fptrunc <2 x double> %1 to <2 x float>
+ ret <2 x float> %2
+}
+
+define <2 x float> @test_sin_v2f64(<2 x double> %v1) {
+; CHECK-LABEL: test_sin_v2f64:
+; CHECK: bl sin
+; CHECK: bl sin
+ %1 = call <2 x double> @llvm.sin.v2f64(<2 x double> %v1)
+ %2 = fptrunc <2 x double> %1 to <2 x float>
+ ret <2 x float> %2
+}
+
+define <2 x float> @test_pow_v2f64(<2 x double> %v1, <2 x double> %v2) {
+; CHECK-LABEL: test_pow_v2f64:
+; CHECK: bl pow
+; CHECK: bl pow
+ %1 = call <2 x double> @llvm.pow.v2f64(<2 x double> %v1, <2 x double> %v2)
+ %2 = fptrunc <2 x double> %1 to <2 x float>
+ ret <2 x float> %2
+}
+
+declare <2 x double> @llvm.cos.v2f64(<2 x double>)
+declare <2 x double> @llvm.sin.v2f64(<2 x double>)
+declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_cos_v2f32(<2 x float> %v1) {
+; CHECK-LABEL: test_cos_v2f32:
+; CHECK: bl cos
+; CHECK: bl cos
+ %1 = call <2 x float> @llvm.cos.v2f32(<2 x float> %v1)
+ ret <2 x float> %1
+}
+
+define <2 x float> @test_sin_v2f32(<2 x float> %v1) {
+; CHECK-LABEL: test_sin_v2f32:
+; CHECK: bl sin
+; CHECK: bl sin
+ %1 = call <2 x float> @llvm.sin.v2f32(<2 x float> %v1)
+ ret <2 x float> %1
+}
+
+define <2 x float> @test_pow_v2f32(<2 x float> %v1, <2 x float> %v2) {
+; CHECK-LABEL: test_pow_v2f32:
+; CHECK: bl pow
+; CHECK: bl pow
+ %1 = call <2 x float> @llvm.pow.v2f32(<2 x float> %v1, <2 x float> %v2)
+ ret <2 x float> %1
+}
+
+declare <2 x float> @llvm.cos.v2f32(<2 x float>)
+declare <2 x float> @llvm.sin.v2f32(<2 x float>)
+declare <2 x float> @llvm.pow.v2f32(<2 x float>, <2 x float>)
+
+define <4 x float> @test_cos_v4f32(<4 x float> %v1) {
+; CHECK-LABEL: test_cos_v4f32:
+; CHECK: bl cos
+; CHECK: bl cos
+; CHECK: bl cos
+; CHECK: bl cos
+ %1 = call <4 x float> @llvm.cos.v4f32(<4 x float> %v1)
+ ret <4 x float> %1
+}
+
+define <4 x float> @test_sin_v4f32(<4 x float> %v1) {
+; CHECK-LABEL: test_sin_v4f32:
+; CHECK: bl sin
+; CHECK: bl sin
+; CHECK: bl sin
+; CHECK: bl sin
+ %1 = call <4 x float> @llvm.sin.v4f32(<4 x float> %v1)
+ ret <4 x float> %1
+}
+
+define <4 x float> @test_pow_v4f32(<4 x float> %v1, <4 x float> %v2) {
+; CHECK-LABEL: test_pow_v4f32:
+; CHECK: bl pow
+; CHECK: bl pow
+; CHECK: bl pow
+; CHECK: bl pow
+ %1 = call <4 x float> @llvm.pow.v4f32(<4 x float> %v1, <4 x float> %v2)
+ ret <4 x float> %1
+}
+
+declare <4 x float> @llvm.cos.v4f32(<4 x float>)
+declare <4 x float> @llvm.sin.v4f32(<4 x float>)
+declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>)
+
diff --git a/test/CodeGen/AArch64/tail-call.ll b/test/CodeGen/AArch64/tail-call.ll
index 81885f108512..8aab84215260 100644
--- a/test/CodeGen/AArch64/tail-call.ll
+++ b/test/CodeGen/AArch64/tail-call.ll
@@ -7,8 +7,10 @@ declare fastcc void @callee_stack16([8 x i32], i64, i64)
define fastcc void @caller_to0_from0() nounwind {
; CHECK-LABEL: caller_to0_from0:
; CHECK-NEXT: // BB
+
tail call fastcc void @callee_stack0()
ret void
+
; CHECK-NEXT: b callee_stack0
}
@@ -17,6 +19,7 @@ define fastcc void @caller_to0_from8([8 x i32], i64) {
tail call fastcc void @callee_stack0()
ret void
+
; CHECK: add sp, sp, #16
; CHECK-NEXT: b callee_stack0
}
@@ -29,8 +32,8 @@ define fastcc void @caller_to8_from0() {
; pointer (we didn't have arg space to reuse).
tail call fastcc void @callee_stack8([8 x i32] undef, i64 42)
ret void
-; CHECK: str {{x[0-9]+}}, [sp, #16]
-; CHECK-NEXT: add sp, sp, #16
+
+; CHECK: str {{x[0-9]+}}, [sp, #16]!
; CHECK-NEXT: b callee_stack8
}
@@ -41,8 +44,8 @@ define fastcc void @caller_to8_from8([8 x i32], i64 %a) {
; Key point is that the "%a" should go where at SP on entry.
tail call fastcc void @callee_stack8([8 x i32] undef, i64 42)
ret void
-; CHECK: str {{x[0-9]+}}, [sp, #16]
-; CHECK-NEXT: add sp, sp, #16
+
+; CHECK: str {{x[0-9]+}}, [sp, #16]!
; CHECK-NEXT: b callee_stack8
}
@@ -54,10 +57,10 @@ define fastcc void @caller_to16_from8([8 x i32], i64 %a) {
; above %a on the stack. If it tries to go below incoming-SP then the
; callee will not deallocate the space, even in fastcc.
tail call fastcc void @callee_stack16([8 x i32] undef, i64 42, i64 2)
-; CHECK: str {{x[0-9]+}}, [sp, #24]
-; CHECK: str {{x[0-9]+}}, [sp, #16]
-; CHECK: add sp, sp, #16
-; CHECK: b callee_stack16
+
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: b callee_stack16
ret void
}
@@ -69,8 +72,8 @@ define fastcc void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
; Key point is that the "%a" should go where at #16 above SP on entry.
tail call fastcc void @callee_stack8([8 x i32] undef, i64 42)
ret void
-; CHECK: str {{x[0-9]+}}, [sp, #32]
-; CHECK-NEXT: add sp, sp, #32
+
+; CHECK: str {{x[0-9]+}}, [sp, #32]!
; CHECK-NEXT: b callee_stack8
}
@@ -84,11 +87,8 @@ define fastcc void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
tail call fastcc void @callee_stack16([8 x i32] undef, i64 %b, i64 %a)
ret void
-; CHECK: ldr x0,
-; CHECK: ldr x1,
-; CHECK: str x1,
-; CHECK: str x0,
-
-; CHECK: add sp, sp, #16
-; CHECK: b callee_stack16
+; CHECK: ldp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: b callee_stack16
}
diff --git a/test/CodeGen/AArch64/tls-dynamic-together.ll b/test/CodeGen/AArch64/tls-dynamic-together.ll
deleted file mode 100644
index b5d7d8938444..000000000000
--- a/test/CodeGen/AArch64/tls-dynamic-together.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc -O0 -mtriple=aarch64-none-linux-gnu -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s
-
-; If the .tlsdesccall and blr parts are emitted completely separately (even with
-; glue) then LLVM will separate them quite happily (with a spill at O0, hence
-; the option). This is definitely wrong, so we make sure they are emitted
-; together.
-
-@general_dynamic_var = external thread_local global i32
-
-define i32 @test_generaldynamic() {
-; CHECK-LABEL: test_generaldynamic:
-
- %val = load i32* @general_dynamic_var
- ret i32 %val
-
-; CHECK: .tlsdesccall general_dynamic_var
-; CHECK-NEXT: blr {{x[0-9]+}}
-}
diff --git a/test/CodeGen/AArch64/tls-dynamics.ll b/test/CodeGen/AArch64/tls-dynamics.ll
deleted file mode 100644
index 68c481ce98b6..000000000000
--- a/test/CodeGen/AArch64/tls-dynamics.ll
+++ /dev/null
@@ -1,121 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
-
-@general_dynamic_var = external thread_local global i32
-
-define i32 @test_generaldynamic() {
-; CHECK-LABEL: test_generaldynamic:
-
- %val = load i32* @general_dynamic_var
- ret i32 %val
-
-; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
-; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var
-; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var]
-; CHECK: .tlsdesccall general_dynamic_var
-; CHECK-NEXT: blr [[CALLEE]]
-
-; CHECK: mrs x[[TP:[0-9]+]], tpidr_el0
-; CHECK: ldr w0, [x[[TP]], x0]
-
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
-; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_ADD_LO12_NC
-; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_LD64_LO12_NC
-; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
-
-}
-
-define i32* @test_generaldynamic_addr() {
-; CHECK-LABEL: test_generaldynamic_addr:
-
- ret i32* @general_dynamic_var
-
-; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
-; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var
-; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var]
-; CHECK: .tlsdesccall general_dynamic_var
-; CHECK-NEXT: blr [[CALLEE]]
-
-; CHECK: mrs [[TP:x[0-9]+]], tpidr_el0
-; CHECK: add x0, [[TP]], x0
-
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
-; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_ADD_LO12_NC
-; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_LD64_LO12_NC
-; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
-
-}
-
-@local_dynamic_var = external thread_local(localdynamic) global i32
-
-define i32 @test_localdynamic() {
-; CHECK-LABEL: test_localdynamic:
-
- %val = load i32* @local_dynamic_var
- ret i32 %val
-
-; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
-; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
-; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
-; CHECK: .tlsdesccall _TLS_MODULE_BASE_
-; CHECK-NEXT: blr [[CALLEE]]
-
-; CHECK: movz [[DTP_OFFSET:x[0-9]+]], #:dtprel_g1:local_dynamic_var
-; CHECK: movk [[DTP_OFFSET]], #:dtprel_g0_nc:local_dynamic_var
-
-; CHECK: ldr w0, [x0, [[DTP_OFFSET]]]
-
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
-; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_ADD_LO12_NC
-; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_LD64_LO12_NC
-; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
-
-}
-
-define i32* @test_localdynamic_addr() {
-; CHECK-LABEL: test_localdynamic_addr:
-
- ret i32* @local_dynamic_var
-
-; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
-; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
-; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
-; CHECK: .tlsdesccall _TLS_MODULE_BASE_
-; CHECK-NEXT: blr [[CALLEE]]
-
-; CHECK: movz [[DTP_OFFSET:x[0-9]+]], #:dtprel_g1:local_dynamic_var
-; CHECK: movk [[DTP_OFFSET]], #:dtprel_g0_nc:local_dynamic_var
-
-; CHECK: add x0, x0, [[DTP_OFFSET]]
-
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
-; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_ADD_LO12_NC
-; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_LD64_LO12_NC
-; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
-
-}
-
-; The entire point of the local-dynamic access model is to have a single call to
-; the expensive resolver. Make sure we achieve that goal.
-
-@local_dynamic_var2 = external thread_local(localdynamic) global i32
-
-define i32 @test_localdynamic_deduplicate() {
-; CHECK-LABEL: test_localdynamic_deduplicate:
-
- %val = load i32* @local_dynamic_var
- %val2 = load i32* @local_dynamic_var2
-
- %sum = add i32 %val, %val2
- ret i32 %sum
-
-; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
-; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
-; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
-; CHECK: .tlsdesccall _TLS_MODULE_BASE_
-; CHECK-NEXT: blr [[CALLEE]]
-
-; CHECK-NOT: _TLS_MODULE_BASE_
-
-; CHECK: ret
-}
diff --git a/test/CodeGen/AArch64/tls-execs.ll b/test/CodeGen/AArch64/tls-execs.ll
deleted file mode 100644
index 39ceb9a4795c..000000000000
--- a/test/CodeGen/AArch64/tls-execs.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -show-mc-encoding < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
-
-@initial_exec_var = external thread_local(initialexec) global i32
-
-define i32 @test_initial_exec() {
-; CHECK-LABEL: test_initial_exec:
- %val = load i32* @initial_exec_var
-
-; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
-; CHECK: ldr x[[TP_OFFSET:[0-9]+]], [x[[GOTADDR]], #:gottprel_lo12:initial_exec_var]
-; CHECK: mrs x[[TP:[0-9]+]], tpidr_el0
-; CHECK: ldr w0, [x[[TP]], x[[TP_OFFSET]]]
-
-; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
-; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
-
- ret i32 %val
-}
-
-define i32* @test_initial_exec_addr() {
-; CHECK-LABEL: test_initial_exec_addr:
- ret i32* @initial_exec_var
-
-; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
-; CHECK: ldr [[TP_OFFSET:x[0-9]+]], [x[[GOTADDR]], #:gottprel_lo12:initial_exec_var]
-; CHECK: mrs [[TP:x[0-9]+]], tpidr_el0
-; CHECK: add x0, [[TP]], [[TP_OFFSET]]
-
-; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
-; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
-
-}
-
-@local_exec_var = thread_local(initialexec) global i32 0
-
-define i32 @test_local_exec() {
-; CHECK-LABEL: test_local_exec:
- %val = load i32* @local_exec_var
-
-; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var // encoding: [A,A,0xa0'A',0x92'A']
-; CHECK: movk [[TP_OFFSET]], #:tprel_g0_nc:local_exec_var
-; CHECK: mrs x[[TP:[0-9]+]], tpidr_el0
-; CHECK: ldr w0, [x[[TP]], [[TP_OFFSET]]]
-
-; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G1
-; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
-
- ret i32 %val
-}
-
-define i32* @test_local_exec_addr() {
-; CHECK-LABEL: test_local_exec_addr:
- ret i32* @local_exec_var
-
-; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var
-; CHECK: movk [[TP_OFFSET]], #:tprel_g0_nc:local_exec_var
-; CHECK: mrs [[TP:x[0-9]+]], tpidr_el0
-; CHECK: add x0, [[TP]], [[TP_OFFSET]]
-
-; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G1
-; CHECK-RELOC: R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
-}
diff --git a/test/CodeGen/AArch64/trunc-v1i64.ll b/test/CodeGen/AArch64/trunc-v1i64.ll
new file mode 100644
index 000000000000..159b8e0cff33
--- /dev/null
+++ b/test/CodeGen/AArch64/trunc-v1i64.ll
@@ -0,0 +1,63 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon -verify-machineinstrs < %s | FileCheck %s
+
+; An optimization in DAG Combiner to fold
+; (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...))
+; will generate nodes like:
+; v1i32 trunc v1i64, v1i16 trunc v1i64, v1i8 trunc v1i64.
+; And such nodes will be defaultly scalarized in type legalization. But such
+; scalarization will cause an assertion failure, as v1i64 is a legal type in
+; AArch64. We change the default behaviour from be scalarized to be widen.
+
+; FIXME: Currently XTN is generated for v1i32, but it can be optimized.
+; Just like v1i16 and v1i8, there is no XTN generated.
+
+define <2 x i32> @test_v1i32_0(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i32_0:
+; CHECK: xtn v0.2s, v0.2d
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <2 x i32> <i32 0, i32 undef>
+ %2 = trunc <2 x i64> %1 to <2 x i32>
+ ret <2 x i32> %2
+}
+
+define <2 x i32> @test_v1i32_1(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i32_1:
+; CHECK: xtn v0.2s, v0.2d
+; CHECK-NEXT: dup v0.2s, v0.s[0]
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <2 x i32> <i32 undef, i32 0>
+ %2 = trunc <2 x i64> %1 to <2 x i32>
+ ret <2 x i32> %2
+}
+
+define <4 x i16> @test_v1i16_0(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i16_0:
+; CHECK-NOT: xtn
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
+ %2 = trunc <4 x i64> %1 to <4 x i16>
+ ret <4 x i16> %2
+}
+
+define <4 x i16> @test_v1i16_1(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i16_1:
+; CHECK-NOT: xtn
+; CHECK: dup v0.4h, v0.h[0]
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
+ %2 = trunc <4 x i64> %1 to <4 x i16>
+ ret <4 x i16> %2
+}
+
+define <8 x i8> @test_v1i8_0(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i8_0:
+; CHECK-NOT: xtn
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = trunc <8 x i64> %1 to <8 x i8>
+ ret <8 x i8> %2
+}
+
+define <8 x i8> @test_v1i8_1(<1 x i64> %in0) {
+; CHECK-LABEL: test_v1i8_1:
+; CHECK-NOT: xtn
+; CHECK: dup v0.8b, v0.b[0]
+ %1 = shufflevector <1 x i64> %in0, <1 x i64> undef, <8 x i32> <i32 undef, i32 undef, i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %2 = trunc <8 x i64> %1 to <8 x i8>
+ ret <8 x i8> %2
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/tst-br.ll b/test/CodeGen/AArch64/tst-br.ll
index 154bc08c144c..5dc7b5df475a 100644
--- a/test/CodeGen/AArch64/tst-br.ll
+++ b/test/CodeGen/AArch64/tst-br.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-atomic-cfg-tidy=0 | FileCheck %s
; We've got the usual issues with LLVM reordering blocks here. The
; tests are correct for the current order, but who knows when that
@@ -15,7 +15,7 @@ define i32 @test_tbz() {
%tbit0 = and i32 %val, 32768
%tst0 = icmp ne i32 %tbit0, 0
br i1 %tst0, label %test1, label %end1
-; CHECK: tbz {{w[0-9]+}}, #15, [[LBL_end1:.LBB0_[0-9]+]]
+; CHECK: tbz {{w[0-9]+}}, #15, [[LBL_end1:.?LBB0_[0-9]+]]
test1:
%tbit1 = and i32 %val, 4096
@@ -27,22 +27,22 @@ test2:
%tbit2 = and i64 %val64, 32768
%tst2 = icmp ne i64 %tbit2, 0
br i1 %tst2, label %test3, label %end1
-; CHECK: tbz {{x[0-9]+}}, #15, [[LBL_end1]]
+; CHECK: tbz {{[wx][0-9]+}}, #15, [[LBL_end1]]
test3:
%tbit3 = and i64 %val64, 4096
%tst3 = icmp ne i64 %tbit3, 0
br i1 %tst3, label %end2, label %end1
-; CHECK: tbz {{x[0-9]+}}, #12, [[LBL_end1]]
+; CHECK: tbz {{[wx][0-9]+}}, #12, [[LBL_end1]]
end2:
-; CHECK: movz x0, #1
+; CHECK: {{movz x0, #1|orr w0, wzr, #0x1}}
; CHECK-NEXT: ret
ret i32 1
end1:
; CHECK: [[LBL_end1]]:
-; CHECK-NEXT: mov x0, xzr
+; CHECK-NEXT: {{mov x0, xzr|mov w0, wzr}}
; CHECK-NEXT: ret
ret i32 0
}
diff --git a/test/CodeGen/AArch64/variadic.ll b/test/CodeGen/AArch64/variadic.ll
deleted file mode 100644
index 4c219eb83788..000000000000
--- a/test/CodeGen/AArch64/variadic.ll
+++ /dev/null
@@ -1,199 +0,0 @@
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 < %s | FileCheck --check-prefix=CHECK-NOFP %s
-
-%va_list = type {i8*, i8*, i8*, i32, i32}
-
-@var = global %va_list zeroinitializer
-
-declare void @llvm.va_start(i8*)
-
-define void @test_simple(i32 %n, ...) {
-; CHECK-LABEL: test_simple:
-; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK: mov x[[FPRBASE:[0-9]+]], sp
-; CHECK: str q7, [x[[FPRBASE]], #112]
-; CHECK: add x[[GPRBASE:[0-9]+]], sp, #[[GPRFROMSP:[0-9]+]]
-; CHECK: str x7, [x[[GPRBASE]], #48]
-
-; CHECK-NOFP: sub sp, sp, #[[STACKSIZE:[0-9]+]]
-; CHECK-NOFP: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK-NOFP: add x[[GPRBASE:[0-9]+]], sp, #[[GPRFROMSP:[0-9]+]]
-; CHECK-NOFP: str x7, [x[[GPRBASE]], #48]
-; CHECK-NOFP-NOT: str q7,
-; CHECK-NOFP: str x1, [sp, #[[GPRFROMSP]]]
-
-; Omit the middle ones
-
-; CHECK: str q0, [sp]
-; CHECK: str x1, [sp, #[[GPRFROMSP]]]
-
-; CHECK-NOFP-NOT: str q0, [sp]
-
- %addr = bitcast %va_list* @var to i8*
- call void @llvm.va_start(i8* %addr)
-; CHECK: movn [[VR_OFFS:w[0-9]+]], #127
-; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
-; CHECK: movn [[GR_OFFS:w[0-9]+]], #55
-; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24]
-; CHECK: add [[VR_TOP:x[0-9]+]], x[[FPRBASE]], #128
-; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
-; CHECK: add [[GR_TOP:x[0-9]+]], x[[GPRBASE]], #56
-; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
-; CHECK: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
-; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
-
-; CHECK-NOFP: str wzr, [x[[VA_LIST]], #28]
-; CHECK-NOFP: movn [[GR_OFFS:w[0-9]+]], #55
-; CHECK-NOFP: str [[GR_OFFS]], [x[[VA_LIST]], #24]
-; CHECK-NOFP: add [[GR_TOP:x[0-9]+]], x[[GPRBASE]], #56
-; CHECK-NOFP: str [[GR_TOP]], [x[[VA_LIST]], #8]
-; CHECK-NOFP: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
-; CHECK-NOFP: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
-
- ret void
-}
-
-define void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
-; CHECK-LABEL: test_fewargs:
-; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
-; CHECK: mov x[[FPRBASE:[0-9]+]], sp
-; CHECK: str q7, [x[[FPRBASE]], #96]
-; CHECK: add x[[GPRBASE:[0-9]+]], sp, #[[GPRFROMSP:[0-9]+]]
-; CHECK: str x7, [x[[GPRBASE]], #32]
-
-; CHECK-NOFP: sub sp, sp, #[[STACKSIZE:[0-9]+]]
-; CHECK-NOFP-NOT: str q7,
-; CHECK-NOFP: mov x[[GPRBASE:[0-9]+]], sp
-; CHECK-NOFP: str x7, [x[[GPRBASE]], #24]
-
-; Omit the middle ones
-
-; CHECK: str q1, [sp]
-; CHECK: str x3, [sp, #[[GPRFROMSP]]]
-
-; CHECK-NOFP-NOT: str q1, [sp]
-; CHECK-NOFP: str x4, [sp]
-
- %addr = bitcast %va_list* @var to i8*
- call void @llvm.va_start(i8* %addr)
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK: movn [[VR_OFFS:w[0-9]+]], #111
-; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
-; CHECK: movn [[GR_OFFS:w[0-9]+]], #39
-; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24]
-; CHECK: add [[VR_TOP:x[0-9]+]], x[[FPRBASE]], #112
-; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
-; CHECK: add [[GR_TOP:x[0-9]+]], x[[GPRBASE]], #40
-; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
-; CHECK: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
-; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
-
-; CHECK-NOFP: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK-NOFP: str wzr, [x[[VA_LIST]], #28]
-; CHECK-NOFP: movn [[GR_OFFS:w[0-9]+]], #31
-; CHECK-NOFP: str [[GR_OFFS]], [x[[VA_LIST]], #24]
-; CHECK-NOFP: add [[GR_TOP:x[0-9]+]], x[[GPRBASE]], #32
-; CHECK-NOFP: str [[GR_TOP]], [x[[VA_LIST]], #8]
-; CHECK-NOFP: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
-; CHECK-NOFP: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
-
- ret void
-}
-
-define void @test_nospare([8 x i64], [8 x float], ...) {
-; CHECK-LABEL: test_nospare:
-
- %addr = bitcast %va_list* @var to i8*
- call void @llvm.va_start(i8* %addr)
-; CHECK-NOT: sub sp, sp
-; CHECK: mov [[STACK:x[0-9]+]], sp
-; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
-
-; CHECK-NOFP-NOT: sub sp, sp
-; CHECK-NOFP: add [[STACK:x[0-9]+]], sp, #64
-; CHECK-NOFP: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
- ret void
-}
-
-; If there are non-variadic arguments on the stack (here two i64s) then the
-; __stack field should point just past them.
-define void @test_offsetstack([10 x i64], [3 x float], ...) {
-; CHECK-LABEL: test_offsetstack:
-; CHECK: sub sp, sp, #80
-; CHECK: mov x[[FPRBASE:[0-9]+]], sp
-; CHECK: str q7, [x[[FPRBASE]], #64]
-
-; CHECK-NOT: str x{{[0-9]+}},
-
-; CHECK-NOFP-NOT: str q7,
-; CHECK-NOT: str x7,
-
-; Omit the middle ones
-
-; CHECK: str q3, [sp]
-
- %addr = bitcast %va_list* @var to i8*
- call void @llvm.va_start(i8* %addr)
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK: movn [[VR_OFFS:w[0-9]+]], #79
-; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
-; CHECK: str wzr, [x[[VA_LIST]], #24]
-; CHECK: add [[VR_TOP:x[0-9]+]], x[[FPRBASE]], #80
-; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
-; CHECK: add [[STACK:x[0-9]+]], sp, #96
-; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
-
-; CHECK-NOFP: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK-NOFP: add [[STACK:x[0-9]+]], sp, #40
-; CHECK-NOFP: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
-; CHECK-NOFP: str wzr, [x[[VA_LIST]], #28]
-; CHECK-NOFP: str wzr, [x[[VA_LIST]], #24]
- ret void
-}
-
-declare void @llvm.va_end(i8*)
-
-define void @test_va_end() nounwind {
-; CHECK-LABEL: test_va_end:
-; CHECK-NEXT: BB#0
-; CHECK-NOFP: BB#0
-
- %addr = bitcast %va_list* @var to i8*
- call void @llvm.va_end(i8* %addr)
-
- ret void
-; CHECK-NEXT: ret
-; CHECK-NOFP-NEXT: ret
-}
-
-declare void @llvm.va_copy(i8* %dest, i8* %src)
-
-@second_list = global %va_list zeroinitializer
-
-define void @test_va_copy() {
-; CHECK-LABEL: test_va_copy:
- %srcaddr = bitcast %va_list* @var to i8*
- %dstaddr = bitcast %va_list* @second_list to i8*
- call void @llvm.va_copy(i8* %dstaddr, i8* %srcaddr)
-
-; Check beginning and end again:
-
-; CHECK: add x[[SRC_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK: add x[[DEST_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:second_list
-; CHECK: ldr [[BLOCK1:x[0-9]+]], [{{x[0-9]+}}, #:lo12:var]
-; CHECK: ldr [[BLOCK2:x[0-9]+]], [x[[SRC_LIST]], #24]
-; CHECK: str [[BLOCK1]], [{{x[0-9]+}}, #:lo12:second_list]
-; CHECK: str [[BLOCK2]], [x[[DEST_LIST]], #24]
-
-; CHECK-NOFP: add x[[SRC_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK-NOFP: add x[[DEST_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:second_list
-; CHECK-NOFP: ldr [[BLOCK1:x[0-9]+]], [{{x[0-9]+}}, #:lo12:var]
-; CHECK-NOFP: ldr [[BLOCK2:x[0-9]+]], [x[[SRC_LIST]], #24]
-; CHECK-NOFP: str [[BLOCK1]], [{{x[0-9]+}}, #:lo12:second_list]
-; CHECK-NOFP: str [[BLOCK2]], [x[[DEST_LIST]], #24]
-
- ret void
-; CHECK: ret
-; CHECK-NOFP: ret
-}
diff --git a/test/CodeGen/AArch64/zero-reg.ll b/test/CodeGen/AArch64/zero-reg.ll
index 9b1e52770ce4..bc112ab8db98 100644
--- a/test/CodeGen/AArch64/zero-reg.ll
+++ b/test/CodeGen/AArch64/zero-reg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
@var32 = global i32 0
@var64 = global i64 0
@@ -7,9 +7,9 @@ define void @test_zr() {
; CHECK-LABEL: test_zr:
store i32 0, i32* @var32
-; CHECK: str wzr, [{{x[0-9]+}}, #:lo12:var32]
+; CHECK: str wzr, [{{x[0-9]+}}, {{#?}}:lo12:var32]
store i64 0, i64* @var64
-; CHECK: str xzr, [{{x[0-9]+}}, #:lo12:var64]
+; CHECK: str xzr, [{{x[0-9]+}}, {{#?}}:lo12:var64]
ret void
; CHECK: ret
@@ -23,8 +23,7 @@ define void @test_sp(i32 %val) {
; instruction (0b11111 in the Rn field would mean "sp").
%addr = getelementptr i32* null, i64 0
store i32 %val, i32* %addr
-; CHECK: mov x[[NULL:[0-9]+]], xzr
-; CHECK: str {{w[0-9]+}}, [x[[NULL]]]
+; CHECK: str {{w[0-9]+}}, [{{x[0-9]+|sp}}]
ret void
; CHECK: ret
diff --git a/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll b/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll
index a0235f787061..f8bd886aa9e1 100644
--- a/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll
+++ b/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o /dev/null
%struct.layer_data = type { i32, [2048 x i8], i8*, [16 x i8], i32, i8*, i32, i32, [64 x i32], [64 x i32], [64 x i32], [64 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [12 x [64 x i16]] }
@ld = external global %struct.layer_data* ; <%struct.layer_data**> [#uses=1]
diff --git a/test/CodeGen/ARM/2007-04-03-PEIBug.ll b/test/CodeGen/ARM/2007-04-03-PEIBug.ll
index 8d3337c29fcf..cf5094fb3800 100644
--- a/test/CodeGen/ARM/2007-04-03-PEIBug.ll
+++ b/test/CodeGen/ARM/2007-04-03-PEIBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | not grep "add.*#0"
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @foo() {
entry:
@@ -10,3 +10,6 @@ entry:
}
declare i32 @bar(...)
+
+; CHECK-NOT: add{{.*}}#0
+
diff --git a/test/CodeGen/ARM/2007-05-14-InlineAsmCstCrash.ll b/test/CodeGen/ARM/2007-05-14-InlineAsmCstCrash.ll
index b3b0769347f1..99e67d501ca0 100644
--- a/test/CodeGen/ARM/2007-05-14-InlineAsmCstCrash.ll
+++ b/test/CodeGen/ARM/2007-05-14-InlineAsmCstCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o /dev/null
define i32 @test3() {
tail call void asm sideeffect "/* number: ${0:c} */", "i"( i32 1 )
diff --git a/test/CodeGen/ARM/2007-05-23-BadPreIndexedStore.ll b/test/CodeGen/ARM/2007-05-23-BadPreIndexedStore.ll
index 670048bf25c4..5988c65dae6c 100644
--- a/test/CodeGen/ARM/2007-05-23-BadPreIndexedStore.ll
+++ b/test/CodeGen/ARM/2007-05-23-BadPreIndexedStore.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | not grep "str.*\!"
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
%struct.shape_edge_t = type { %struct.shape_edge_t*, %struct.shape_edge_t*, i32, i32, i32, i32 }
%struct.shape_path_t = type { %struct.shape_edge_t*, %struct.shape_edge_t*, i32, i32, i32, i32, i32, i32 }
@@ -32,3 +32,6 @@ bb140: ; preds = %bb140, %cond_false
bb174: ; preds = %bb140, %cond_false
ret %struct.shape_path_t* null
}
+
+; CHECK-NOT: str{{.*}}!
+
diff --git a/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll b/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll
index a604c5cd574e..dabe62003d9f 100644
--- a/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll
+++ b/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | not grep 255
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
define i32 @main(i32 %argc, i8** %argv) {
entry:
@@ -12,3 +12,8 @@ bb2: ; preds = %bb1
bb3: ; preds = %bb1
ret i32 0
}
+
+; CHECK-NOT: 255
+; CHECK: .file{{.*}}SxtInRegBug.ll
+; CHECK-NOT: 255
+
diff --git a/test/CodeGen/ARM/2008-07-17-Fdiv.ll b/test/CodeGen/ARM/2008-07-17-Fdiv.ll
index 4cb768ef5b6d..9f50d92a4d8e 100644
--- a/test/CodeGen/ARM/2008-07-17-Fdiv.ll
+++ b/test/CodeGen/ARM/2008-07-17-Fdiv.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define float @f(float %a, float %b) nounwind {
%tmp = fdiv float %a, %b
diff --git a/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll b/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll
index 83fde07779bc..e86bc1ba5ccc 100644
--- a/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll
+++ b/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
; PR2589
define void @main({ i32 }*) {
diff --git a/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll b/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll
index 601a516eb09a..d16ad8cfbabf 100644
--- a/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll
+++ b/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o /dev/null
define hidden i64 @__muldi3(i64 %u, i64 %v) nounwind {
entry:
diff --git a/test/CodeGen/ARM/2009-03-09-AddrModeBug.ll b/test/CodeGen/ARM/2009-03-09-AddrModeBug.ll
index a1ce384b5345..7bb1429872b9 100644
--- a/test/CodeGen/ARM/2009-03-09-AddrModeBug.ll
+++ b/test/CodeGen/ARM/2009-03-09-AddrModeBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
%struct.hit_t = type { %struct.v_t, double }
%struct.node_t = type { %struct.hit_t, %struct.hit_t, i32 }
diff --git a/test/CodeGen/ARM/2009-04-06-AsmModifier.ll b/test/CodeGen/ARM/2009-04-06-AsmModifier.ll
index 7342f69631e6..e90c5b322db7 100644
--- a/test/CodeGen/ARM/2009-04-06-AsmModifier.ll
+++ b/test/CodeGen/ARM/2009-04-06-AsmModifier.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | grep "swi 107"
+; RUN: llc -mtriple=arm-eabi -no-integrated-as %s -o - | FileCheck %s
define i32 @_swilseek(i32) nounwind {
entry:
@@ -18,3 +18,6 @@ return: ; preds = %entry
%4 = load i32* %retval ; <i32> [#uses=1]
ret i32 %4
}
+
+; CHECK: swi 107
+
diff --git a/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll b/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll
index f6b3d2c0147b..ade6a10afef6 100644
--- a/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll
+++ b/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
; PR3795
define fastcc void @_D3foo3fooFAriZv({ i32, { double, double }* } %d_arg, i32 %x_arg) {
diff --git a/test/CodeGen/ARM/2009-04-08-FREM.ll b/test/CodeGen/ARM/2009-04-08-FREM.ll
index 99907fc697bd..606c6b1471b4 100644
--- a/test/CodeGen/ARM/2009-04-08-FREM.ll
+++ b/test/CodeGen/ARM/2009-04-08-FREM.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
declare i32 @printf(i8*, ...)
diff --git a/test/CodeGen/ARM/2009-04-08-FloatUndef.ll b/test/CodeGen/ARM/2009-04-08-FloatUndef.ll
index 05d2f26be0b7..9e32e05b040b 100644
--- a/test/CodeGen/ARM/2009-04-08-FloatUndef.ll
+++ b/test/CodeGen/ARM/2009-04-08-FloatUndef.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define void @execute_shader(<4 x float>* %OUT, <4 x float>* %IN, <4 x float>* %CONST) {
entry:
diff --git a/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll b/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll
index deb092bbf86e..5b1746301f4e 100644
--- a/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll
+++ b/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
; PR3954
define void @foo(...) nounwind {
diff --git a/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll b/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll
index 7046fccb5ee9..2bc7df028534 100644
--- a/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll
+++ b/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
+
%struct.List = type { %struct.List*, i32 }
@Node5 = external constant %struct.List ; <%struct.List*> [#uses=1]
@"\01LC" = external constant [7 x i8] ; <[7 x i8]*> [#uses=1]
diff --git a/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll b/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
index 1e2707f7b5bb..5d59fc64d922 100644
--- a/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
+++ b/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-; RUN: llc < %s -march=thumb | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
; PR4091
define void @foo(i32 %i, i32* %p) nounwind {
diff --git a/test/CodeGen/ARM/2009-07-09-asm-p-constraint.ll b/test/CodeGen/ARM/2009-07-09-asm-p-constraint.ll
index e1e94b641214..3cef0aa546a5 100644
--- a/test/CodeGen/ARM/2009-07-09-asm-p-constraint.ll
+++ b/test/CodeGen/ARM/2009-07-09-asm-p-constraint.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o /dev/null
define void @test(i8* %x) nounwind {
entry:
diff --git a/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll b/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
index 67616877beb2..bc4a95c3e00b 100644
--- a/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
+++ b/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
%struct.cli_ac_alt = type { i8, i8*, i16, i16, %struct.cli_ac_alt* }
%struct.cli_ac_node = type { i8, i8, %struct.cli_ac_patt*, %struct.cli_ac_node**, %struct.cli_ac_node* }
diff --git a/test/CodeGen/ARM/2009-08-23-linkerprivate.ll b/test/CodeGen/ARM/2009-08-23-linkerprivate.ll
deleted file mode 100644
index 392c70a9fd3e..000000000000
--- a/test/CodeGen/ARM/2009-08-23-linkerprivate.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | FileCheck %s
-
-; ModuleID = '/Volumes/MacOS9/tests/WebKit/JavaScriptCore/profiler/ProfilerServer.mm'
-
-@"\01l_objc_msgSend_fixup_alloc" = linker_private_weak hidden global i32 0, section "__DATA, __objc_msgrefs, coalesced", align 16
-
-; CHECK: .globl l_objc_msgSend_fixup_alloc
-; CHECK: .weak_definition l_objc_msgSend_fixup_alloc
diff --git a/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll b/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
index ee99c70ff0e6..b078ec06dbb8 100644
--- a/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
+++ b/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; pr4843
+
define <4 x i16> @v2regbug(<4 x i16>* %B) nounwind {
;CHECK-LABEL: v2regbug:
;CHECK: vzip.16
diff --git a/test/CodeGen/ARM/2009-09-10-postdec.ll b/test/CodeGen/ARM/2009-09-10-postdec.ll
index 10653b51c146..66ffe6a1a0fb 100644
--- a/test/CodeGen/ARM/2009-09-10-postdec.ll
+++ b/test/CodeGen/ARM/2009-09-10-postdec.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=arm < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
; Radar 7213850
define i32 @test(i8* %d, i32 %x, i32 %y) nounwind {
diff --git a/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll b/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll
index 758b59a4638d..dd9a6fd12d7e 100644
--- a/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll
+++ b/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon -mcpu=cortex-a9
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -mcpu=cortex-a9 %s -o /dev/null
define arm_aapcs_vfpcc <4 x float> @foo(i8* nocapture %pBuffer, i32 %numItems) nounwind {
%1 = ptrtoint i8* %pBuffer to i32
diff --git a/test/CodeGen/ARM/2009-09-24-spill-align.ll b/test/CodeGen/ARM/2009-09-24-spill-align.ll
index eb9c2d0f7f8f..224bd019481d 100644
--- a/test/CodeGen/ARM/2009-09-24-spill-align.ll
+++ b/test/CodeGen/ARM/2009-09-24-spill-align.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; pr4926
define void @test_vget_lanep16() nounwind {
diff --git a/test/CodeGen/ARM/2009-11-02-NegativeLane.ll b/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
index ca5ae8b62e8b..2597b413ec7c 100644
--- a/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
+++ b/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=cortex-a8 < %s | FileCheck %s
+; RUN: llc -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "armv7-eabi"
diff --git a/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll b/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
index 4fb2be02ce9a..38eb0ea2c891 100644
--- a/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
+++ b/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=cortex-a8 -mattr=-neonfp < %s | FileCheck %s
+; RUN: llc -mcpu=cortex-a8 -mattr=-neonfp -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
; PR5423
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll b/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll
index b0b4cb37d1a1..5e75d460aa7f 100644
--- a/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll
+++ b/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define void @"java.lang.String::getChars"([84 x i8]* %method, i32 %base_pc, [788 x i8]* %thread) {
%1 = sub i32 undef, 48 ; <i32> [#uses=1]
diff --git a/test/CodeGen/ARM/2010-04-09-NeonSelect.ll b/test/CodeGen/ARM/2010-04-09-NeonSelect.ll
index 89d6a68fcaeb..ceef0830fd2e 100644
--- a/test/CodeGen/ARM/2010-04-09-NeonSelect.ll
+++ b/test/CodeGen/ARM/2010-04-09-NeonSelect.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=arm -mattr=+neon < %s
-; Radar 7770501: Don't crash on SELECT and SELECT_CC with NEON vector values.
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o /dev/null
+; rdar://7770501 : Don't crash on SELECT and SELECT_CC with NEON vector values.
define void @vDSP_FFT16_copv(float* nocapture %O, float* nocapture %I, i32 %Direction) nounwind {
entry:
diff --git a/test/CodeGen/ARM/2010-04-14-SplitVector.ll b/test/CodeGen/ARM/2010-04-14-SplitVector.ll
index 5d0c3cf74aa5..cb3e04259ae3 100644
--- a/test/CodeGen/ARM/2010-04-14-SplitVector.ll
+++ b/test/CodeGen/ARM/2010-04-14-SplitVector.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=arm1136jf-s
+; RUN: llc -mtriple=arm-eabi -mcpu=arm1136jf-s %s -o /dev/null
; Radar 7854640
define void @test() nounwind {
diff --git a/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll b/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
index 35995b77c5bc..b040b2d91cd6 100644
--- a/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
+++ b/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
@@ -4,22 +4,26 @@
%struct.foo = type { i64, i64 }
-define zeroext i8 @t(%struct.foo* %this) noreturn optsize {
+define zeroext i8 @t(%struct.foo* %this, i1 %tst) noreturn optsize {
entry:
; ARM-LABEL: t:
-; ARM: str r2, [r1], r0
+; ARM-DAG: mov r[[ADDR:[0-9]+]], #8
+; ARM-DAG: mov [[VAL:r[0-9]+]], #0
+; ARM: str [[VAL]], [r[[ADDR]]], r0
; THUMB-LABEL: t:
-; THUMB-NOT: str r0, [r1], r0
-; THUMB: str r1, [r0]
+; THUMB-DAG: movs r[[ADDR:[0-9]+]], #8
+; THUMB-DAG: movs [[VAL:r[0-9]+]], #0
+; THUMB-NOT: str {{[a-z0-9]+}}, [{{[a-z0-9]+}}], {{[a-z0-9]+}}
+; THUMB: str [[VAL]], [r[[ADDR]]]
%0 = getelementptr inbounds %struct.foo* %this, i32 0, i32 1 ; <i64*> [#uses=1]
store i32 0, i32* inttoptr (i32 8 to i32*), align 8
- br i1 undef, label %bb.nph96, label %bb3
+ br i1 %tst, label %bb.nph96, label %bb3
bb3: ; preds = %entry
%1 = load i64* %0, align 4 ; <i64> [#uses=0]
- unreachable
+ ret i8 42
bb.nph96: ; preds = %entry
- unreachable
+ ret i8 3
}
diff --git a/test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll b/test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll
index e0f50c97ba52..cfaffd8234ba 100644
--- a/test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll
+++ b/test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon -O0 -optimize-regalloc -regalloc=basic
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -O0 -optimize-regalloc -regalloc=basic %s -o /dev/null
; This test would crash the rewriter when trying to handle a spill after one of
; the @llvm.arm.neon.vld3.v8i8 defined three parts of a register.
diff --git a/test/CodeGen/ARM/2010-05-21-BuildVector.ll b/test/CodeGen/ARM/2010-05-21-BuildVector.ll
index a400b7b288ce..5bc08b037a1c 100644
--- a/test/CodeGen/ARM/2010-05-21-BuildVector.ll
+++ b/test/CodeGen/ARM/2010-05-21-BuildVector.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
; Radar 7872877
define void @test(float* %fltp, i32 %packedValue, float* %table) nounwind {
diff --git a/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll b/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll
index 6f487962310f..f7ceb6e7e480 100644
--- a/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll
+++ b/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+neon
-; Radar 8084742
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o /dev/null
+; rdar://8084742
%struct.__int8x8x2_t = type { [2 x <8 x i8>] }
diff --git a/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll b/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll
index 984583e80688..fcabc900afa6 100644
--- a/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll
+++ b/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o /dev/null
@.str271 = external constant [21 x i8], align 4 ; <[21 x i8]*> [#uses=1]
@llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (i32, i8**)* @main to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
diff --git a/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll b/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll
index 2842437e7e42..80822c2c426b 100644
--- a/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll
+++ b/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll
@@ -1,4 +1,4 @@
-; RUN: llc -enable-correct-eh-support < %s
+; RUN: llc < %s
; PR7716
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin10.0.0"
diff --git a/test/CodeGen/ARM/2010-08-04-StackVariable.ll b/test/CodeGen/ARM/2010-08-04-StackVariable.ll
index 7aacd1aa70ca..48de24497189 100644
--- a/test/CodeGen/ARM/2010-08-04-StackVariable.ll
+++ b/test/CodeGen/ARM/2010-08-04-StackVariable.ll
@@ -123,7 +123,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!43 = metadata !{i32 26, i32 0, metadata !39, null}
!44 = metadata !{i32 786688, metadata !39, metadata !"k", metadata !2, i32 26, metadata !13, i32 0, i32 0} ; [ DW_TAG_auto_variable ]
!45 = metadata !{i32 27, i32 0, metadata !39, null}
-!46 = metadata !{metadata !0, metadata !9, metadata !16, metadata !17, metadata !20}
-!47 = metadata !{i32 0}
+!46 = metadata !{metadata !16, metadata !17, metadata !20}
+!47 = metadata !{}
!48 = metadata !{metadata !"small.cc", metadata !"/Users/manav/R8248330"}
!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll b/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll
deleted file mode 100644
index 305369435138..000000000000
--- a/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll
+++ /dev/null
@@ -1,285 +0,0 @@
-; This tests that MC/asm header conversion is smooth and that the
-; build attributes are correct
-
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi | FileCheck %s --check-prefix=V6
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi | FileCheck %s --check-prefix=V6M
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s | FileCheck %s --check-prefix=ARM1156T2F-S
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8
-; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-neon,-crypto | FileCheck %s --check-prefix=V8-FPARMv8
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-fp-armv8,-crypto | FileCheck %s --check-prefix=V8-NEON
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-crypto | FileCheck %s --check-prefix=V8-FPARMv8-NEON
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8-FPARMv8-NEON-CRYPTO
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9-mp | FileCheck %s --check-prefix=CORTEX-A9-MP
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD
-; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57
-
-; V6: .eabi_attribute 6, 6
-; V6: .eabi_attribute 8, 1
-; V6: .eabi_attribute 24, 1
-; V6: .eabi_attribute 25, 1
-; V6-NOT: .eabi_attribute 27
-; V6-NOT: .eabi_attribute 28
-; V6-NOT: .eabi_attribute 36
-; V6-NOT: .eabi_attribute 42
-; V6-NOT: .eabi_attribute 68
-
-; V6M: .eabi_attribute 6, 12
-; V6M: .eabi_attribute 7, 77
-; V6M: .eabi_attribute 8, 0
-; V6M: .eabi_attribute 9, 1
-; V6M: .eabi_attribute 24, 1
-; V6M: .eabi_attribute 25, 1
-; V6M-NOT: .eabi_attribute 27
-; V6M-NOT: .eabi_attribute 28
-; V6M-NOT: .eabi_attribute 36
-; V6M-NOT: .eabi_attribute 42
-; V6M-NOT: .eabi_attribute 68
-
-; ARM1156T2F-S: .cpu arm1156t2f-s
-; ARM1156T2F-S: .eabi_attribute 6, 8
-; ARM1156T2F-S: .eabi_attribute 8, 1
-; ARM1156T2F-S: .eabi_attribute 9, 2
-; ARM1156T2F-S: .fpu vfpv2
-; ARM1156T2F-S: .eabi_attribute 20, 1
-; ARM1156T2F-S: .eabi_attribute 21, 1
-; ARM1156T2F-S: .eabi_attribute 23, 3
-; ARM1156T2F-S: .eabi_attribute 24, 1
-; ARM1156T2F-S: .eabi_attribute 25, 1
-; ARM1156T2F-S-NOT: .eabi_attribute 27
-; ARM1156T2F-S-NOT: .eabi_attribute 28
-; ARM1156T2F-S-NOT: .eabi_attribute 36
-; ARM1156T2F-S-NOT: .eabi_attribute 42
-; ARM1156T2F-S-NOT: .eabi_attribute 68
-
-; V7M: .eabi_attribute 6, 10
-; V7M: .eabi_attribute 7, 77
-; V7M: .eabi_attribute 8, 0
-; V7M: .eabi_attribute 9, 2
-; V7M: .eabi_attribute 24, 1
-; V7M: .eabi_attribute 25, 1
-; V7M-NOT: .eabi_attribute 27
-; V7M-NOT: .eabi_attribute 28
-; V7M-NOT: .eabi_attribute 36
-; V7M-NOT: .eabi_attribute 42
-; V7M: .eabi_attribute 44, 0
-; V7M-NOT: .eabi_attribute 68
-
-; V7: .syntax unified
-; V7: .eabi_attribute 6, 10
-; V7: .eabi_attribute 20, 1
-; V7: .eabi_attribute 21, 1
-; V7: .eabi_attribute 23, 3
-; V7: .eabi_attribute 24, 1
-; V7: .eabi_attribute 25, 1
-; V7-NOT: .eabi_attribute 27
-; V7-NOT: .eabi_attribute 28
-; V7-NOT: .eabi_attribute 36
-; V7-NOT: .eabi_attribute 42
-; V7-NOT: .eabi_attribute 68
-
-; V8: .syntax unified
-; V8: .eabi_attribute 6, 14
-
-; Vt8: .syntax unified
-; Vt8: .eabi_attribute 6, 14
-
-; V8-FPARMv8: .syntax unified
-; V8-FPARMv8: .eabi_attribute 6, 14
-; V8-FPARMv8: .fpu fp-armv8
-
-; V8-NEON: .syntax unified
-; V8-NEON: .eabi_attribute 6, 14
-; V8-NEON: .fpu neon
-; V8-NEON: .eabi_attribute 12, 3
-
-; V8-FPARMv8-NEON: .syntax unified
-; V8-FPARMv8-NEON: .eabi_attribute 6, 14
-; V8-FPARMv8-NEON: .fpu neon-fp-armv8
-; V8-FPARMv8-NEON: .eabi_attribute 12, 3
-
-; V8-FPARMv8-NEON-CRYPTO: .syntax unified
-; V8-FPARMv8-NEON-CRYPTO: .eabi_attribute 6, 14
-; V8-FPARMv8-NEON-CRYPTO: .fpu crypto-neon-fp-armv8
-; V8-FPARMv8-NEON-CRYPTO: .eabi_attribute 12, 3
-
-; CORTEX-A9-SOFT: .cpu cortex-a9
-; CORTEX-A9-SOFT: .eabi_attribute 6, 10
-; CORTEX-A9-SOFT: .eabi_attribute 7, 65
-; CORTEX-A9-SOFT: .eabi_attribute 8, 1
-; CORTEX-A9-SOFT: .eabi_attribute 9, 2
-; CORTEX-A9-SOFT: .fpu neon
-; CORTEX-A9-SOFT: .eabi_attribute 20, 1
-; CORTEX-A9-SOFT: .eabi_attribute 21, 1
-; CORTEX-A9-SOFT: .eabi_attribute 23, 3
-; CORTEX-A9-SOFT: .eabi_attribute 24, 1
-; CORTEX-A9-SOFT: .eabi_attribute 25, 1
-; CORTEX-A9-SOFT-NOT: .eabi_attribute 27
-; CORTEX-A9-SOFT-NOT: .eabi_attribute 28
-; CORTEX-A9-SOFT: .eabi_attribute 36, 1
-; CORTEX-A9-SOFT-NOT: .eabi_attribute 42
-; CORTEX-A9-SOFT: .eabi_attribute 68, 1
-
-; CORTEX-A9-HARD: .cpu cortex-a9
-; CORTEX-A9-HARD: .eabi_attribute 6, 10
-; CORTEX-A9-HARD: .eabi_attribute 7, 65
-; CORTEX-A9-HARD: .eabi_attribute 8, 1
-; CORTEX-A9-HARD: .eabi_attribute 9, 2
-; CORTEX-A9-HARD: .fpu neon
-; CORTEX-A9-HARD: .eabi_attribute 20, 1
-; CORTEX-A9-HARD: .eabi_attribute 21, 1
-; CORTEX-A9-HARD: .eabi_attribute 23, 3
-; CORTEX-A9-HARD: .eabi_attribute 24, 1
-; CORTEX-A9-HARD: .eabi_attribute 25, 1
-; CORTEX-A9-HARD-NOT: .eabi_attribute 27
-; CORTEX-A9-HARD: .eabi_attribute 28, 1
-; CORTEX-A9-HARD: .eabi_attribute 36, 1
-; CORTEX-A9-HARD-NOT: .eabi_attribute 42
-; CORTEX-A9-HARD: .eabi_attribute 68, 1
-
-; CORTEX-A9-MP: .cpu cortex-a9-mp
-; CORTEX-A9-MP: .eabi_attribute 6, 10
-; CORTEX-A9-MP: .eabi_attribute 7, 65
-; CORTEX-A9-MP: .eabi_attribute 8, 1
-; CORTEX-A9-MP: .eabi_attribute 9, 2
-; CORTEX-A9-MP: .fpu neon
-; CORTEX-A9-MP: .eabi_attribute 20, 1
-; CORTEX-A9-MP: .eabi_attribute 21, 1
-; CORTEX-A9-MP: .eabi_attribute 23, 3
-; CORTEX-A9-MP: .eabi_attribute 24, 1
-; CORTEX-A9-MP: .eabi_attribute 25, 1
-; CORTEX-A9-NOT: .eabi_attribute 27
-; CORTEX-A9-NOT: .eabi_attribute 28
-; CORTEX-A9-MP: .eabi_attribute 36, 1
-; CORTEX-A9-MP: .eabi_attribute 42, 1
-; CORTEX-A9-MP: .eabi_attribute 68, 1
-
-; CORTEX-A15: .cpu cortex-a15
-; CORTEX-A15: .eabi_attribute 6, 10
-; CORTEX-A15: .eabi_attribute 7, 65
-; CORTEX-A15: .eabi_attribute 8, 1
-; CORTEX-A15: .eabi_attribute 9, 2
-; CORTEX-A15: .fpu neon-vfpv4
-; CORTEX-A15: .eabi_attribute 20, 1
-; CORTEX-A15: .eabi_attribute 21, 1
-; CORTEX-A15: .eabi_attribute 23, 3
-; CORTEX-A15: .eabi_attribute 24, 1
-; CORTEX-A15: .eabi_attribute 25, 1
-; CORTEX-A15-NOT: .eabi_attribute 27
-; CORTEX-A15-NOT: .eabi_attribute 28
-; CORTEX-A15: .eabi_attribute 36, 1
-; CORTEX-A15: .eabi_attribute 42, 1
-; CORTEX-A15: .eabi_attribute 44, 2
-; CORTEX-A15: .eabi_attribute 68, 3
-
-; CORTEX-M0: .cpu cortex-m0
-; CORTEX-M0: .eabi_attribute 6, 12
-; CORTEX-M0: .eabi_attribute 7, 77
-; CORTEX-M0: .eabi_attribute 8, 0
-; CORTEX-M0: .eabi_attribute 9, 1
-; CORTEX-M0: .eabi_attribute 24, 1
-; CORTEX-M0: .eabi_attribute 25, 1
-; CORTEX-M0-NOT: .eabi_attribute 27
-; CORTEX-M0-NOT: .eabi_attribute 28
-; CORTEX-M0-NOT: .eabi_attribute 36
-; CORTEX-M0-NOT: .eabi_attribute 42
-; CORTEX-M0-NOT: .eabi_attribute 68
-
-; CORTEX-M4-SOFT: .cpu cortex-m4
-; CORTEX-M4-SOFT: .eabi_attribute 6, 13
-; CORTEX-M4-SOFT: .eabi_attribute 7, 77
-; CORTEX-M4-SOFT: .eabi_attribute 8, 0
-; CORTEX-M4-SOFT: .eabi_attribute 9, 2
-; CORTEX-M4-SOFT: .fpu vfpv4-d16
-; CORTEX-M4-SOFT: .eabi_attribute 20, 1
-; CORTEX-M4-SOFT: .eabi_attribute 21, 1
-; CORTEX-M4-SOFT: .eabi_attribute 23, 3
-; CORTEX-M4-SOFT: .eabi_attribute 24, 1
-; CORTEX-M4-SOFT: .eabi_attribute 25, 1
-; CORTEX-M4-SOFT: .eabi_attribute 27, 1
-; CORTEX-M4-SOFT-NOT: .eabi_attribute 28
-; CORTEX-M4-SOFT: .eabi_attribute 36, 1
-; CORTEX-M4-SOFT-NOT: .eabi_attribute 42
-; CORTEX-M4-SOFT: .eabi_attribute 44, 0
-; CORTEX-M4-SOFT-NOT: .eabi_attribute 68
-
-; CORTEX-M4-HARD: .cpu cortex-m4
-; CORTEX-M4-HARD: .eabi_attribute 6, 13
-; CORTEX-M4-HARD: .eabi_attribute 7, 77
-; CORTEX-M4-HARD: .eabi_attribute 8, 0
-; CORTEX-M4-HARD: .eabi_attribute 9, 2
-; CORTEX-M4-HARD: .fpu vfpv4-d16
-; CORTEX-M4-HARD: .eabi_attribute 20, 1
-; CORTEX-M4-HARD: .eabi_attribute 21, 1
-; CORTEX-M4-HARD: .eabi_attribute 23, 3
-; CORTEX-M4-HARD: .eabi_attribute 24, 1
-; CORTEX-M4-HARD: .eabi_attribute 25, 1
-; CORTEX-M4-HARD: .eabi_attribute 27, 1
-; CORTEX-M4-HARD: .eabi_attribute 28, 1
-; CORTEX-M4-HARD: .eabi_attribute 36, 1
-; CORTEX-M4-HARD-NOT: .eabi_attribute 42
-; CORTEX-M4-HARD: .eabi_attribute 44, 0
-; CORTEX-M4-HRAD-NOT: .eabi_attribute 68
-
-; CORTEX-R5: .cpu cortex-r5
-; CORTEX-R5: .eabi_attribute 6, 10
-; CORTEX-R5: .eabi_attribute 7, 82
-; CORTEX-R5: .eabi_attribute 8, 1
-; CORTEX-R5: .eabi_attribute 9, 2
-; CORTEX-R5: .fpu vfpv3-d16
-; CORTEX-R5: .eabi_attribute 20, 1
-; CORTEX-R5: .eabi_attribute 21, 1
-; CORTEX-R5: .eabi_attribute 23, 3
-; CORTEX-R5: .eabi_attribute 24, 1
-; CORTEX-R5: .eabi_attribute 25, 1
-; CORTEX-R5: .eabi_attribute 27, 1
-; CORTEX-R5-NOT: .eabi_attribute 28
-; CORTEX-R5-NOT: .eabi_attribute 36
-; CORTEX-R5-NOT: .eabi_attribute 42
-; CORTEX-R5: .eabi_attribute 44, 2
-; CORTEX-R5-NOT: .eabi_attribute 68
-
-; CORTEX-A53: .cpu cortex-a53
-; CORTEX-A53: .eabi_attribute 6, 14
-; CORTEX-A53: .eabi_attribute 7, 65
-; CORTEX-A53: .eabi_attribute 8, 1
-; CORTEX-A53: .eabi_attribute 9, 2
-; CORTEX-A53: .fpu crypto-neon-fp-armv8
-; CORTEX-A53: .eabi_attribute 12, 3
-; CORTEX-A53: .eabi_attribute 24, 1
-; CORTEX-A53: .eabi_attribute 25, 1
-; CORTEX-A53-NOT: .eabi_attribute 27
-; CORTEX-A53-NOT: .eabi_attribute 28
-; CORTEX-A53: .eabi_attribute 36, 1
-; CORTEX-A53: .eabi_attribute 42, 1
-; CORTEX-A53: .eabi_attribute 44, 2
-; CORTEX-A53: .eabi_attribute 68, 3
-
-; CORTEX-A57: .cpu cortex-a57
-; CORTEX-A57: .eabi_attribute 6, 14
-; CORTEX-A57: .eabi_attribute 7, 65
-; CORTEX-A57: .eabi_attribute 8, 1
-; CORTEX-A57: .eabi_attribute 9, 2
-; CORTEX-A57: .fpu crypto-neon-fp-armv8
-; CORTEX-A57: .eabi_attribute 12, 3
-; CORTEX-A57: .eabi_attribute 24, 1
-; CORTEX-A57: .eabi_attribute 25, 1
-; CORTEX-A57-NOT: .eabi_attribute 27
-; CORTEX-A57-NOT: .eabi_attribute 28
-; CORTEX-A57: .eabi_attribute 36, 1
-; CORTEX-A57: .eabi_attribute 42, 1
-; CORTEX-A57: .eabi_attribute 44, 2
-; CORTEX-A57: .eabi_attribute 68, 3
-
-define i32 @f(i64 %z) {
- ret i32 0
-}
diff --git a/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll b/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll
index 32d350e9c8b1..e7e0580179c4 100644
--- a/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll
+++ b/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=arm1136jf-s | FileCheck %s
+; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=arm1136jf-s -arm-atomic-cfg-tidy=0 | FileCheck %s
; Radar 8589805: Counting the number of microcoded operations, such as for an
; LDM instruction, was causing an assertion failure because the microop count
; was being treated as an instruction count.
@@ -11,7 +11,7 @@
define i32 @test(i32 %x) {
entry:
%0 = tail call signext i16 undef(i32* undef)
- switch i32 undef, label %bb3 [
+ switch i32 %x, label %bb3 [
i32 0, label %bb4
i32 1, label %bb1
i32 2, label %bb2
diff --git a/test/CodeGen/ARM/2010-12-07-PEIBug.ll b/test/CodeGen/ARM/2010-12-07-PEIBug.ll
index eef6abd96451..4baee64962c8 100644
--- a/test/CodeGen/ARM/2010-12-07-PEIBug.ll
+++ b/test/CodeGen/ARM/2010-12-07-PEIBug.ll
@@ -5,11 +5,11 @@ define hidden void @foo() nounwind ssp {
entry:
; CHECK-LABEL: foo:
; CHECK: mov r7, sp
-; CHECK-NEXT: vpush {d8}
; CHECK-NEXT: vpush {d10, d11}
+; CHECK-NEXT: vpush {d8}
tail call void asm sideeffect "","~{d8},~{d10},~{d11}"() nounwind
-; CHECK: vpop {d10, d11}
-; CHECK-NEXT: vpop {d8}
+; CHECK: vpop {d8}
+; CHECK-NEXT: vpop {d10, d11}
ret void
}
diff --git a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
index f57411bb2c56..b1d59aa0fde8 100644
--- a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
+++ b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
@@ -17,7 +17,7 @@ target triple = "thumbv7-apple-darwin10"
; DW_OP_constu
; offset
-;CHECK: .long Lset6
+;CHECK: .long Lset7
;CHECK-NEXT: @ DW_AT_type
;CHECK-NEXT: @ DW_AT_decl_file
;CHECK-NEXT: @ DW_AT_decl_line
@@ -80,7 +80,7 @@ entry:
!0 = metadata !{i32 786478, metadata !47, metadata !1, metadata !"get1", metadata !"get1", metadata !"get1", i32 4, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get1, null, null, metadata !42, i32 4} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !47} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !47, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)", i1 true, metadata !"", i32 0, metadata !48, metadata !48, metadata !40, metadata !41, metadata !41, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !47, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)", i1 true, metadata !"", i32 0, metadata !48, metadata !48, metadata !40, metadata !41, metadata !48, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !47, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5, metadata !5}
!5 = metadata !{i32 786468, metadata !47, metadata !1, metadata !"_Bool", i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ]
@@ -126,5 +126,5 @@ entry:
!45 = metadata !{metadata !24, metadata !25}
!46 = metadata !{metadata !27, metadata !28}
!47 = metadata !{metadata !"foo.c", metadata !"/tmp/"}
-!48 = metadata !{i32 0}
+!48 = metadata !{}
!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll b/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll
index 85a113755bf4..3950c9e081f7 100644
--- a/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll
+++ b/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -O3 -mtriple=armv6-apple-darwin -relocation-model=pic -mcpu=arm1136jf-s | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -O3 -mtriple=armv6-apple-darwin -relocation-model=pic -mcpu=arm1136jf-s -arm-atomic-cfg-tidy=0 | FileCheck %s
; rdar://8959122 illegal register operands for UMULL instruction
; in cfrac nightly test.
; Armv6 generates a umull that must write to two distinct destination regs.
@@ -7,7 +7,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:64-n32"
target triple = "armv6-apple-darwin10"
-define void @ptoa() nounwind {
+define void @ptoa(i1 %tst, i8* %p8, i8 %val8) nounwind {
entry:
br i1 false, label %bb3, label %bb
@@ -16,7 +16,7 @@ bb: ; preds = %entry
bb3: ; preds = %bb, %entry
%0 = call noalias i8* @malloc() nounwind
- br i1 undef, label %bb46, label %bb8
+ br i1 %tst, label %bb46, label %bb8
bb8: ; preds = %bb3
%1 = getelementptr inbounds i8* %0, i32 0
@@ -35,7 +35,7 @@ bb8: ; preds = %bb3
%7 = or i8 %6, 48
%8 = add i8 %6, 87
%iftmp.5.0.1 = select i1 %5, i8 %7, i8 %8
- store i8 %iftmp.5.0.1, i8* undef, align 1
+ store i8 %iftmp.5.0.1, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -49,7 +49,7 @@ bb8: ; preds = %bb3
%13 = or i8 %12, 48
%14 = add i8 %12, 87
%iftmp.5.0.2 = select i1 %11, i8 %13, i8 %14
- store i8 %iftmp.5.0.2, i8* undef, align 1
+ store i8 %iftmp.5.0.2, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -73,8 +73,8 @@ bb8: ; preds = %bb3
%21 = udiv i32 %2, 100000
%22 = urem i32 %21, 10
%23 = icmp ult i32 %22, 10
- %iftmp.5.0.5 = select i1 %23, i8 0, i8 undef
- store i8 %iftmp.5.0.5, i8* undef, align 1
+ %iftmp.5.0.5 = select i1 %23, i8 0, i8 %val8
+ store i8 %iftmp.5.0.5, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -88,7 +88,7 @@ bb8: ; preds = %bb3
%28 = or i8 %27, 48
%29 = add i8 %27, 87
%iftmp.5.0.6 = select i1 %26, i8 %28, i8 %29
- store i8 %iftmp.5.0.6, i8* undef, align 1
+ store i8 %iftmp.5.0.6, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -102,7 +102,7 @@ bb8: ; preds = %bb3
%34 = or i8 %33, 48
%35 = add i8 %33, 87
%iftmp.5.0.7 = select i1 %32, i8 %34, i8 %35
- store i8 %iftmp.5.0.7, i8* undef, align 1
+ store i8 %iftmp.5.0.7, i8* %p8, align 1
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
@@ -117,7 +117,7 @@ bb8: ; preds = %bb3
%41 = add i8 %39, 87
%iftmp.5.0.8 = select i1 %38, i8 %40, i8 %41
store i8 %iftmp.5.0.8, i8* null, align 1
- unreachable
+ br label %bb46
bb46: ; preds = %bb3
ret void
diff --git a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
index bc72e126b407..837feb6e85c2 100644
--- a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
+++ b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
@@ -8,7 +8,7 @@
@oStruct = external global %struct.Outer, align 4
-define void @main() nounwind {
+define void @main(i8 %val8) nounwind {
; CHECK-LABEL: main:
; CHECK-NOT: ldrd
; CHECK: mul
@@ -28,7 +28,7 @@ for.body: ; preds = %_Z14printIsNotZeroi
br i1 %tobool.i14, label %_Z14printIsNotZeroi.exit17, label %if.then.i16
if.then.i16: ; preds = %_Z14printIsNotZeroi.exit
- unreachable
+ ret void
_Z14printIsNotZeroi.exit17: ; preds = %_Z14printIsNotZeroi.exit
br label %_Z14printIsNotZeroi.exit17.for.body_crit_edge
@@ -36,7 +36,7 @@ _Z14printIsNotZeroi.exit17: ; preds = %_Z14printIsNotZeroi
_Z14printIsNotZeroi.exit17.for.body_crit_edge: ; preds = %_Z14printIsNotZeroi.exit17
%b.phi.trans.insert = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %inc, i32 3
%tmp3.pre = load i8* %b.phi.trans.insert, align 1
- %phitmp27 = icmp eq i8 undef, 0
+ %phitmp27 = icmp eq i8 %val8, 0
br label %for.body
for.end: ; preds = %_Z14printIsNotZeroi.exit17
diff --git a/test/CodeGen/ARM/2011-04-12-AlignBug.ll b/test/CodeGen/ARM/2011-04-12-AlignBug.ll
index 317be94e86b0..97297f78c7e6 100644
--- a/test/CodeGen/ARM/2011-04-12-AlignBug.ll
+++ b/test/CodeGen/ARM/2011-04-12-AlignBug.ll
@@ -3,9 +3,9 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
target triple = "thumbv7-apple-darwin10.0.0"
; CHECK: align 3
-@.v = linker_private unnamed_addr constant <4 x i32> <i32 1, i32 2, i32 3, i32 4>, align 8
+@.v = private unnamed_addr constant <4 x i32> <i32 1, i32 2, i32 3, i32 4>, align 8
; CHECK: align 2
-@.strA = linker_private unnamed_addr constant [4 x i8] c"bar\00"
+@.strA = private unnamed_addr constant [4 x i8] c"bar\00"
; CHECK-NOT: align
-@.strB = linker_private unnamed_addr constant [4 x i8] c"foo\00", align 1
-@.strC = linker_private unnamed_addr constant [4 x i8] c"baz\00", section "__TEXT,__cstring,cstring_literals", align 1
+@.strB = private unnamed_addr constant [4 x i8] c"foo\00", align 1
+@.strC = private unnamed_addr constant [4 x i8] c"baz\00", section "__TEXT,__cstring,cstring_literals", align 1
diff --git a/test/CodeGen/ARM/2011-06-09-TailCallByVal.ll b/test/CodeGen/ARM/2011-06-09-TailCallByVal.ll
index 7f0f795486f8..12cdd04b7bb7 100644
--- a/test/CodeGen/ARM/2011-06-09-TailCallByVal.ll
+++ b/test/CodeGen/ARM/2011-06-09-TailCallByVal.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=pic -mcpu=cortex-a8 -arm-tail-calls=1 | FileCheck %s
+; RUN: llc < %s -relocation-model=pic -mcpu=cortex-a8 | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin10"
diff --git a/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll b/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll
index 101a91396eb7..d93cc57574b4 100644
--- a/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll
+++ b/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -arm-tail-calls=1 | FileCheck %s
+; RUN: llc < %s | FileCheck %s
; tail call inside a function where byval argument is splitted between
; registers and stack is currently unsupported.
; XFAIL: *
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
-target triple = "thumbv7-apple-ios"
+target triple = "thumbv7-apple-ios5.0"
%struct.A = type <{ i16, i16, i32, i16, i16, i32, i16, [8 x %struct.B], [418 x i8], %struct.C }>
%struct.B = type <{ i32, i16, i16 }>
diff --git a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
index bb7870764c50..ed2840bbff59 100644
--- a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
+++ b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
@@ -8,7 +8,7 @@
; DW_OP_constu
; offset
-;CHECK: .long Lset8
+;CHECK: .long Lset9
;CHECK-NEXT: @ DW_AT_type
;CHECK-NEXT: @ DW_AT_decl_file
;CHECK-NEXT: @ DW_AT_decl_line
@@ -75,7 +75,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!49}
-!0 = metadata !{i32 786449, metadata !47, i32 12, metadata !"clang", i1 true, metadata !"", i32 0, metadata !48, metadata !48, metadata !40, metadata !41, metadata !41, null} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, metadata !47, i32 12, metadata !"clang", i1 true, metadata !"", i32 0, metadata !48, metadata !48, metadata !40, metadata !41, metadata !48, null} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 786478, metadata !47, metadata !2, metadata !"get1", metadata !"get1", metadata !"", i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @get1, null, null, metadata !42, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [get1]
!2 = metadata !{i32 786473, metadata !47} ; [ DW_TAG_file_type ]
!3 = metadata !{i32 786453, metadata !47, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
@@ -123,5 +123,5 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!45 = metadata !{metadata !19, metadata !20}
!46 = metadata !{metadata !27, metadata !28}
!47 = metadata !{metadata !"ss3.c", metadata !"/private/tmp"}
-!48 = metadata !{i32 0}
+!48 = metadata !{}
!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/2011-10-26-memset-inline.ll b/test/CodeGen/ARM/2011-10-26-memset-inline.ll
index 03614eddbf70..17bd291a6b55 100644
--- a/test/CodeGen/ARM/2011-10-26-memset-inline.ll
+++ b/test/CodeGen/ARM/2011-10-26-memset-inline.ll
@@ -6,10 +6,10 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-
target triple = "thumbv7-apple-ios5.0.0"
; CHECK-GENERIC: strb
-; CHECK-GENERIT-NEXT: strb
-; CHECK-GENERIT-NEXT: strb
-; CHECK-GENERIT-NEXT: strb
-; CHECK-GENERIT-NEXT: strb
+; CHECK-GENERIC-NEXT: strb
+; CHECK-GENERIC-NEXT: strb
+; CHECK-GENERIC-NEXT: strb
+; CHECK-GENERIC-NEXT: strb
; CHECK-UNALIGNED: strb
; CHECK-UNALIGNED: str
define void @foo(i8* nocapture %c) nounwind optsize {
diff --git a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
index 850c51133f3e..c8e08c22ab19 100644
--- a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
+++ b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=arm -mcpu=cortex-a8 < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
; Trigger multiple NEON stores.
; CHECK: vst1.64
diff --git a/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll b/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll
index 8a65f2e82b75..a707a92c9fa0 100644
--- a/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll
+++ b/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; PR11319
@i8_res = global <2 x i8> <i8 0, i8 0>
diff --git a/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll b/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll
index 42eb32d14c74..c1554d848c44 100644
--- a/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll
+++ b/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; PR11319
@src1_v2i16 = global <2 x i16> <i16 0, i16 1>
diff --git a/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll b/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll
index 719571b3d1fd..c50461a42d8b 100644
--- a/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll
+++ b/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <2 x i32> @test1(<2 x double>* %A) {
; CHECK: test1
diff --git a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
index a263c9c8d678..86b58c8186b0 100644
--- a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
+++ b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mcpu=cortex-a9 %s -o - | FileCheck %s
@A = global <4 x float> <float 0., float 1., float 2., float 3.>
diff --git a/test/CodeGen/ARM/2012-04-10-DAGCombine.ll b/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
index 089dc9153afa..9b71be23b7eb 100644
--- a/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
+++ b/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 -enable-unsafe-fp-math
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 -enable-unsafe-fp-math %s -o /dev/null
;target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
;target triple = "armv7-none-linux-gnueabi"
diff --git a/test/CodeGen/ARM/2012-05-04-vmov.ll b/test/CodeGen/ARM/2012-05-04-vmov.ll
index 14dbf7ff4ac9..c604eed3dd00 100644
--- a/test/CodeGen/ARM/2012-05-04-vmov.ll
+++ b/test/CodeGen/ARM/2012-05-04-vmov.ll
@@ -1,5 +1,9 @@
-; RUN: llc -O1 -march=arm -mcpu=cortex-a9 < %s | FileCheck -check-prefix=A9-CHECK %s
-; RUN: llc -O1 -march=arm -mcpu=swift < %s | FileCheck -check-prefix=SWIFT-CHECK %s
+; RUN: llc -O1 -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - \
+; RUN: | FileCheck -check-prefix=A9-CHECK %s
+
+; RUN: llc -O1 -mtriple=arm-eabi -mcpu=swift %s -o - \
+; RUN: | FileCheck -check-prefix=SWIFT-CHECK %s
+
; Check that swift doesn't use vmov.32. <rdar://problem/10453003>.
define <2 x i32> @testuvec(<2 x i32> %A, <2 x i32> %B) nounwind {
diff --git a/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll b/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll
index dd678436c04e..7f30ae10e436 100644
--- a/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll
+++ b/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=arm -mcpu=swift < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=swift %s -o - | FileCheck %s
; <rdar://problem/10451892>
define void @f(i32 %x, i32* %p) nounwind ssp {
diff --git a/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll b/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
index 647ebd6bdfd4..e8d4fb22a59f 100644
--- a/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
+++ b/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; PR12281
; Test generataion of code for vmull instruction when multiplying 128-bit
diff --git a/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll b/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll
index 3bdbb3cf5801..8d777634f6c5 100644
--- a/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll
+++ b/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=arm7tdmi | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=arm7tdmi %s -o - | FileCheck %s
; movw is only legal for V6T2 and later.
; rdar://12300648
diff --git a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
index 38624e0641f2..5235e9cb2034 100644
--- a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
+++ b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
@@ -1,4 +1,4 @@
-; RUN: not llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
+; RUN: not llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - 2>&1 | FileCheck %s
; Check for error message:
; CHECK: non-trivial scalar-to-vector conversion, possible invalid constraint for vector type
diff --git a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
index 7ba693d6df4a..d389b5c5c1cf 100644
--- a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
+++ b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
@@ -1,4 +1,4 @@
-; RUN: not llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
+; RUN: not llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - 2>&1 | FileCheck %s
; Check for error message:
; CHECK: scalar-to-vector conversion failed, possible invalid constraint for vector type
diff --git a/test/CodeGen/ARM/2012-11-14-subs_carry.ll b/test/CodeGen/ARM/2012-11-14-subs_carry.ll
index 8df295a2f658..33083303a3d4 100644
--- a/test/CodeGen/ARM/2012-11-14-subs_carry.ll
+++ b/test/CodeGen/ARM/2012-11-14-subs_carry.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
;CHECK-LABEL: foo:
;CHECK: adds
diff --git a/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll b/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll
index 127429bc31e3..c5eba7d4773c 100644
--- a/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll
+++ b/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll
@@ -4,8 +4,8 @@
;CHECK-LABEL: foo:
;CHECK: sub sp, sp, #8
;CHECK: push {r11, lr}
-;CHECK: str r0, [sp, #8]
-;CHECK: add r0, sp, #8
+;CHECK: str r0, [sp, #12]
+;CHECK: add r0, sp, #12
;CHECK: bl fooUseParam
;CHECK: pop {r11, lr}
;CHECK: add sp, sp, #8
diff --git a/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll b/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll
index 08bf99b31f54..6bd23b102e79 100644
--- a/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll
+++ b/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll
@@ -72,7 +72,7 @@ define void @foo(double %p0, ; --> D0
double %p8, ; --> Stack
i32 %p9) #0 { ; --> R0, not Stack+8
entry:
- tail call void @fooUseI32(i32 %p9)
+ call void @fooUseI32(i32 %p9)
ret void
}
diff --git a/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll b/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll
index 6db71fed958e..e79a3ba741ec 100644
--- a/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll
+++ b/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll
@@ -23,9 +23,9 @@ define void @foo(double %vfp0, ; --> D0, NSAA=SP
entry:
;CHECK: sub sp, #8
;CHECK: push.w {r11, lr}
- ;CHECK: add r0, sp, #16
- ;CHECK: str r2, [sp, #20]
- ;CHECK: str r1, [sp, #16]
+ ;CHECK: add r0, sp, #8
+ ;CHECK: str r2, [sp, #12]
+ ;CHECK: str r1, [sp, #8]
;CHECK: bl fooUseStruct
call void @fooUseStruct(%st_t* %p1)
ret void
diff --git a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
index c4f5f54c3af0..162f86306ff4 100644
--- a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
+++ b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
@@ -42,36 +42,57 @@ UnifiedReturnBlock:
ret i32 %tmp13
}
-define hidden fastcc void @t3(i8** %retaddr) {
+define hidden fastcc void @t3(i8** %retaddr, i1 %tst, i8* %p8) {
; CHECK-LABEL: t3:
; CHECK: Block address taken
; CHECK-NOT: Address of block that was removed by CodeGen
bb:
store i8* blockaddress(@t3, %KBBlockZero_return_1), i8** %retaddr
- br i1 undef, label %bb77, label %bb7.i
+ br i1 %tst, label %bb77, label %bb7.i
bb7.i: ; preds = %bb35
br label %bb2.i
KBBlockZero_return_1: ; preds = %KBBlockZero.exit
- unreachable
+ ret void
KBBlockZero_return_0: ; preds = %KBBlockZero.exit
- unreachable
+ ret void
bb77: ; preds = %bb26, %bb12, %bb
ret void
bb2.i: ; preds = %bb6.i350, %bb7.i
- br i1 undef, label %bb6.i350, label %KBBlockZero.exit
+ br i1 %tst, label %bb6.i350, label %KBBlockZero.exit
bb6.i350: ; preds = %bb2.i
br label %bb2.i
KBBlockZero.exit: ; preds = %bb2.i
- indirectbr i8* undef, [label %KBBlockZero_return_1, label %KBBlockZero_return_0]
+ indirectbr i8* %p8, [label %KBBlockZero_return_1, label %KBBlockZero_return_0]
}
+@foo = global i32 ()* null
+define i32 @t4(i32 %x, i32 ()* %p_foo) {
+entry:
+;CHECK-LABEL: t4:
+;CHECK-V8-LABEL: t4:
+ %cmp = icmp slt i32 %x, 60
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ %tmp.2 = call i32 %p_foo()
+ %sub = add nsw i32 %x, -1
+ br label %return
+
+if.else: ; preds = %entry
+ %sub1 = add nsw i32 %x, -120
+ br label %return
+
+return: ; preds = %if.end5, %if.then4, %if.then
+ %retval.0 = phi i32 [ %sub, %if.then ], [ %sub1, %if.else ]
+ ret i32 %retval.0
+}
; If-converter was checking for the wrong predicate subsumes pattern when doing
; nested predicates.
diff --git a/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll b/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll
index defb94601141..efb82027b70d 100644
--- a/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll
+++ b/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+v7,+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mattr=+v7,+thumb2 %s -o - | FileCheck %s
define i8 @f1(i8* %call1, i8* %call3, i32 %h, i32 %w, i32 %Width) {
; CHECK: f1:
diff --git a/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll b/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
index a438c1f4556a..05a4ef05e958 100644
--- a/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
+++ b/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
; ModuleID = 'bugpoint-reduced-simplified.bc'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
target triple = "armv7--linux-gnueabi"
diff --git a/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll b/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll
new file mode 100644
index 000000000000..6c0fbd00bd1a
--- /dev/null
+++ b/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll
@@ -0,0 +1,56 @@
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -print-before=post-RA-sched %s -o - 2>&1 \
+; RUN: | FileCheck %s
+
+define void @vst(i8* %m, [4 x i64] %v) {
+entry:
+; CHECK: vst:
+; CHECK: VST1d64Q %R{{[0-9]+}}<kill>, 8, %D{{[0-9]+}}, pred:14, pred:%noreg, %Q{{[0-9]+}}_Q{{[0-9]+}}<imp-use>
+
+ %v0 = extractvalue [4 x i64] %v, 0
+ %v1 = extractvalue [4 x i64] %v, 1
+ %v2 = extractvalue [4 x i64] %v, 2
+ %v3 = extractvalue [4 x i64] %v, 3
+
+ %t0 = bitcast i64 %v0 to <8 x i8>
+ %t1 = bitcast i64 %v1 to <8 x i8>
+ %t2 = bitcast i64 %v2 to <8 x i8>
+ %t3 = bitcast i64 %v3 to <8 x i8>
+
+ %s0 = bitcast <8 x i8> %t0 to <1 x i64>
+ %s1 = bitcast <8 x i8> %t1 to <1 x i64>
+ %s2 = bitcast <8 x i8> %t2 to <1 x i64>
+ %s3 = bitcast <8 x i8> %t3 to <1 x i64>
+
+ %tmp0 = bitcast <1 x i64> %s2 to i64
+ %tmp1 = bitcast <1 x i64> %s3 to i64
+
+ %n0 = insertelement <2 x i64> undef, i64 %tmp0, i32 0
+ %n1 = insertelement <2 x i64> %n0, i64 %tmp1, i32 1
+
+ call void @llvm.arm.neon.vst4.v1i64(i8* %m, <1 x i64> %s0, <1 x i64> %s1, <1 x i64> %s2, <1 x i64> %s3, i32 8)
+
+ call void @bar(<2 x i64> %n1)
+
+ ret void
+}
+
+%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
+define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind {
+; CHECK: vtbx4:
+; CHECK: VTBX4 {{.*}}, pred:14, pred:%noreg, %Q{{[0-9]+}}_Q{{[0-9]+}}<imp-use>
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load %struct.__neon_int8x8x4_t* %B
+ %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
+ %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
+ %tmp7 = load <8 x i8>* %C
+ %tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
+ call void @bar2(%struct.__neon_int8x8x4_t %tmp2, <8 x i8> %tmp8)
+ ret <8 x i8> %tmp8
+}
+
+declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32)
+declare <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
+declare void @bar2(%struct.__neon_int8x8x4_t, <8 x i8>)
+declare void @bar(<2 x i64> %arg)
diff --git a/test/CodeGen/ARM/2014-02-05-vfp-regs-after-stack.ll b/test/CodeGen/ARM/2014-02-05-vfp-regs-after-stack.ll
new file mode 100644
index 000000000000..4c36a2a6a5ed
--- /dev/null
+++ b/test/CodeGen/ARM/2014-02-05-vfp-regs-after-stack.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -o - -filetype=asm | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv8-none--eabi"
+
+; CHECK-LABEL: fn1:
+define arm_aapcs_vfpcc float @fn1(double %a, double %b, double %c, double %d, double %e, double %f, double %g, float %h, double %i, float %j) {
+ ret float %j
+; CHECK: vldr s0, [sp, #8]
+}
+
+; CHECK-LABEL: fn2:
+define arm_aapcs_vfpcc float @fn2(double %a, double %b, double %c, double %d, double %e, double %f, float %h, <4 x float> %i, float %j) {
+ ret float %j
+; CHECK: vldr s0, [sp, #16]
+}
+
+; CHECK-LABEL: fn3:
+define arm_aapcs_vfpcc float @fn3(float %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, float %j) #0 {
+ ret float %j
+; CHECK: vldr s0, [sp, #8]
+}
diff --git a/test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll b/test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll
new file mode 100644
index 000000000000..33bfa2fa61cd
--- /dev/null
+++ b/test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll
@@ -0,0 +1,114 @@
+; RUN: llc -mtriple=arm-linux-gnueabihf < %s | FileCheck %s
+
+%struct4bytes = type { i32 }
+%struct8bytes8align = type { i64 }
+%struct12bytes = type { i32, i32, i32 }
+
+declare void @useIntPtr(%struct4bytes*)
+declare void @useLong(i64)
+declare void @usePtr(%struct8bytes8align*)
+
+; a -> r0
+; b -> r1..r3
+; c -> sp+0..sp+7
+define void @foo1(i32 %a, %struct12bytes* byval %b, i64 %c) {
+; CHECK-LABEL: foo1
+; CHECK: sub sp, sp, #16
+; CHECK: push {r11, lr}
+; CHECK: add [[SCRATCH:r[0-9]+]], sp, #12
+; CHECK: stm [[SCRATCH]], {r1, r2, r3}
+; CHECK: ldr r0, [sp, #24]
+; CHECK: ldr r1, [sp, #28]
+; CHECK: bl useLong
+; CHECK: pop {r11, lr}
+; CHECK: add sp, sp, #16
+
+ call void @useLong(i64 %c)
+ ret void
+}
+
+; a -> r0
+; b -> r2..r3
+define void @foo2(i32 %a, %struct8bytes8align* byval %b) {
+; CHECK-LABEL: foo2
+; CHECK: sub sp, sp, #8
+; CHECK: push {r11, lr}
+; CHECK: add r0, sp, #8
+; CHECK: str r3, [sp, #12]
+; CHECK: str r2, [sp, #8]
+; CHECK: bl usePtr
+; CHECK: pop {r11, lr}
+; CHECK: add sp, sp, #8
+
+ call void @usePtr(%struct8bytes8align* %b)
+ ret void
+}
+
+; a -> r0..r1
+; b -> r2
+define void @foo3(%struct8bytes8align* byval %a, %struct4bytes* byval %b) {
+; CHECK-LABEL: foo3
+; CHECK: sub sp, sp, #16
+; CHECK: push {r11, lr}
+; CHECK: add [[SCRATCH:r[0-9]+]], sp, #8
+; CHECK: stm [[SCRATCH]], {r0, r1, r2}
+; CHECK: add r0, sp, #8
+; CHECK: bl usePtr
+; CHECK: pop {r11, lr}
+; CHECK: add sp, sp, #16
+
+ call void @usePtr(%struct8bytes8align* %a)
+ ret void
+}
+
+; a -> r0
+; b -> r2..r3
+define void @foo4(%struct4bytes* byval %a, %struct8bytes8align* byval %b) {
+; CHECK-LABEL: foo4
+; CHECK: sub sp, sp, #16
+; CHECK: push {r11, lr}
+; CHECK: str r0, [sp, #8]
+; CHECK: add r0, sp, #16
+; CHECK: str r3, [sp, #20]
+; CHECK: str r2, [sp, #16]
+; CHECK: bl usePtr
+; CHECK: pop {r11, lr}
+; CHECK: add sp, sp, #16
+; CHECK: mov pc, lr
+
+ call void @usePtr(%struct8bytes8align* %b)
+ ret void
+}
+
+; a -> r0..r1
+; b -> r2
+; c -> r3
+define void @foo5(%struct8bytes8align* byval %a, %struct4bytes* byval %b, %struct4bytes* byval %c) {
+; CHECK-LABEL: foo5
+; CHECK: sub sp, sp, #16
+; CHECK: push {r11, lr}
+; CHECK: add [[SCRATCH:r[0-9]+]], sp, #8
+; CHECK: stm [[SCRATCH]], {r0, r1, r2, r3}
+; CHECK: add r0, sp, #8
+; CHECK: bl usePtr
+; CHECK: pop {r11, lr}
+; CHECK: add sp, sp, #16
+; CHECK: mov pc, lr
+
+ call void @usePtr(%struct8bytes8align* %a)
+ ret void
+}
+
+; a..c -> r0..r2
+; d -> sp+0..sp+7
+define void @foo6(i32 %a, i32 %b, i32 %c, %struct8bytes8align* byval %d) {
+; CHECK-LABEL: foo6
+; CHECK: push {r11, lr}
+; CHECK: add r0, sp, #8
+; CHECK: bl usePtr
+; CHECK: pop {r11, lr}
+; CHECK: mov pc, lr
+
+ call void @usePtr(%struct8bytes8align* %d)
+ ret void
+}
diff --git a/test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll b/test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll
new file mode 100644
index 000000000000..1e40e4afe5c0
--- /dev/null
+++ b/test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll
@@ -0,0 +1,50 @@
+; Assertion `Encoding == DW_EH_PE_absptr && "Can handle absptr encoding only"' failed.
+; Broken in r208166, fixed in 208715.
+
+; RUN: llc -mtriple=arm-linux-androideabi -o - -filetype=asm -relocation-model=pic %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv4t--linux-androideabi"
+
+@_ZTIi = external constant i8*
+
+define void @_Z3fn2v() #0 {
+entry:
+ invoke void @_Z3fn1v()
+ to label %try.cont unwind label %lpad
+
+lpad: ; preds = %entry
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* bitcast (i8** @_ZTIi to i8*)
+ %1 = extractvalue { i8*, i32 } %0, 1
+ %2 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #2
+ %matches = icmp eq i32 %1, %2
+ br i1 %matches, label %catch, label %eh.resume
+
+catch: ; preds = %lpad
+ %3 = extractvalue { i8*, i32 } %0, 0
+ %4 = tail call i8* @__cxa_begin_catch(i8* %3) #2
+ tail call void @__cxa_end_catch() #2
+ br label %try.cont
+
+try.cont: ; preds = %entry, %catch
+ ret void
+
+eh.resume: ; preds = %lpad
+ resume { i8*, i32 } %0
+}
+
+declare void @_Z3fn1v() #0
+
+declare i32 @__gxx_personality_v0(...)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.eh.typeid.for(i8*) #1
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/ARM/2014-07-18-earlyclobber-str-post.ll b/test/CodeGen/ARM/2014-07-18-earlyclobber-str-post.ll
new file mode 100644
index 000000000000..9ea762ae9bff
--- /dev/null
+++ b/test/CodeGen/ARM/2014-07-18-earlyclobber-str-post.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=armv7-linux-gnueabihf %s -o - | FileCheck %s
+
+; Check that we don't create an unpredictable STR instruction,
+; e.g. str r0, [r0], #4
+
+define i32* @earlyclobber-str-post(i32* %addr) nounwind {
+; CHECK: earlyclobber-str-post
+; CHECK-NOT: str r[[REG:[0-9]+]], [r[[REG]]], #4
+ %val = ptrtoint i32* %addr to i32
+ store i32 %val, i32* %addr
+ %new = getelementptr i32* %addr, i32 1
+ ret i32* %new
+}
diff --git a/test/CodeGen/ARM/DbgValueOtherTargets.test b/test/CodeGen/ARM/DbgValueOtherTargets.test
index bf90891de0a7..9ce2459cd811 100644
--- a/test/CodeGen/ARM/DbgValueOtherTargets.test
+++ b/test/CodeGen/ARM/DbgValueOtherTargets.test
@@ -1 +1 @@
-RUN: llc -O0 -march=arm -asm-verbose < %S/../Inputs/DbgValueOtherTargets.ll | FileCheck %S/../Inputs/DbgValueOtherTargets.ll
+RUN: llc -O0 -mtriple=arm-eabi -asm-verbose %S/../Inputs/DbgValueOtherTargets.ll -o - | FileCheck %S/../Inputs/DbgValueOtherTargets.ll
diff --git a/test/CodeGen/ARM/Windows/aapcs.ll b/test/CodeGen/ARM/Windows/aapcs.ll
new file mode 100644
index 000000000000..3f9a09f8e7f5
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/aapcs.ll
@@ -0,0 +1,16 @@
+; RUN: llc -mtriple=thumbv7-windows-itanium -mcpu=cortex-a9 -o - %s | FileCheck %s
+
+; AAPCS mandates an 8-byte stack alignment. The alloca is implicitly aligned,
+; and no bic is required.
+
+declare void @callee(i8 *%i)
+
+define void @caller() {
+ %i = alloca i8, align 8
+ call void @callee(i8* %i)
+ ret void
+}
+
+; CHECK: sub sp, #8
+; CHECK-NOT: bic
+
diff --git a/test/CodeGen/ARM/Windows/alloca.ll b/test/CodeGen/ARM/Windows/alloca.ll
new file mode 100644
index 000000000000..6a3d002ab3b3
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/alloca.ll
@@ -0,0 +1,22 @@
+; RUN: llc -O0 -mtriple thumbv7-windows-itanium -filetype asm -o - %s | FileCheck %s
+
+declare arm_aapcs_vfpcc i32 @num_entries()
+
+define arm_aapcs_vfpcc void @test___builtin_alloca() {
+entry:
+ %array = alloca i8*, align 4
+ %call = call arm_aapcs_vfpcc i32 @num_entries()
+ %mul = mul i32 4, %call
+ %0 = alloca i8, i32 %mul
+ store i8* %0, i8** %array, align 4
+ ret void
+}
+
+; CHECK: bl num_entries
+; CHECK: movs [[R1:r[0-9]+]], #7
+; CHECK: add.w [[R0:r[0-9]+]], [[R1]], [[R0]], lsl #2
+; CHECK: bic [[R0]], [[R0]], #7
+; CHECK: lsrs r4, [[R0]], #2
+; CHECK: bl __chkstk
+; CHECK: sub.w sp, sp, r4
+
diff --git a/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll b/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
new file mode 100644
index 000000000000..a82f6141dbb3
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
@@ -0,0 +1,27 @@
+; RUN: llc -mtriple thumbv7--windows-itanium -code-model large -filetype obj -o - %s \
+; RUN: | llvm-objdump -no-show-raw-insn -d - | FileCheck %s
+
+; ModuleID = 'reduced.c'
+target datalayout = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv7--windows-itanium"
+
+define arm_aapcs_vfpcc i8 @isel(i32 %i) {
+entry:
+ %i.addr = alloca i32, align 4
+ %buffer = alloca [4096 x i8], align 1
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32* %i.addr, align 4
+ %rem = urem i32 %0, 4096
+ %arrayidx = getelementptr inbounds [4096 x i8]* %buffer, i32 0, i32 %rem
+ %1 = load volatile i8* %arrayidx, align 1
+ ret i8 %1
+}
+
+; CHECK-LABEL: isel
+; CHECK: push {r4, r5}
+; CHECK: movw r4, #{{\d*}}
+; CHECK: movw r12, #0
+; CHECK: movt r12, #0
+; CHECK: blx r12
+; CHECK: sub.w sp, sp, r4
+
diff --git a/test/CodeGen/ARM/Windows/chkstk.ll b/test/CodeGen/ARM/Windows/chkstk.ll
new file mode 100644
index 000000000000..cb787e14b5ba
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/chkstk.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=thumbv7-windows -mcpu=cortex-a9 %s -o - \
+; RUN: | FileCheck -check-prefix CHECK-DEFAULT-CODE-MODEL %s
+
+; RUN: llc -mtriple=thumbv7-windows -mcpu=cortex-a9 -code-model=large %s -o - \
+; RUN: | FileCheck -check-prefix CHECK-LARGE-CODE-MODEL %s
+
+define arm_aapcs_vfpcc void @check_watermark() {
+entry:
+ %buffer = alloca [4096 x i8], align 1
+ ret void
+}
+
+; CHECK-DEFAULT-CODE-MODEL: check_watermark:
+; CHECK-DEFAULT-CODE-MODEL: movw r4, #1024
+; CHECK-DEFAULT-CODE-MODEL: bl __chkstk
+; CHECK-DEFAULT-CODE-MODEL: sub.w sp, sp, r4
+
+; CHECK-LARGE-CODE-MODEL: check_watermark:
+; CHECK-LARGE-CODE-MODEL: movw r12, :lower16:__chkstk
+; CHECK-LARGE-CODE-MODEL: movt r12, :upper16:__chkstk
+; CHECK-LARGE-CODE-MODEL: movw r4, #1024
+; CHECK-LARGE-CODE-MODEL: blx r12
+; CHECK-LARGE-CODE-MODEL: sub.w sp, sp, r4
+
diff --git a/test/CodeGen/ARM/Windows/dllimport.ll b/test/CodeGen/ARM/Windows/dllimport.ll
new file mode 100644
index 000000000000..bc737bd41827
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/dllimport.ll
@@ -0,0 +1,61 @@
+; RUN: llc -mtriple thumbv7-windows -filetype asm -o - %s | FileCheck %s
+
+; ModuleID = 'dllimport.c'
+
+@var = external dllimport global i32
+@ext = external global i32
+declare dllimport arm_aapcs_vfpcc i32 @external()
+declare arm_aapcs_vfpcc i32 @internal()
+
+define arm_aapcs_vfpcc i32 @get_var() {
+ %1 = load i32* @var, align 4
+ ret i32 %1
+}
+
+; CHECK-LABEL: get_var
+; CHECK: movw r0, :lower16:__imp_var
+; CHECK: movt r0, :upper16:__imp_var
+; CHECK: ldr r0, [r0]
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+
+define arm_aapcs_vfpcc i32 @get_ext() {
+ %1 = load i32* @ext, align 4
+ ret i32 %1
+}
+
+; CHECK-LABEL: get_ext
+; CHECK: movw r0, :lower16:ext
+; CHECK: movt r0, :upper16:ext
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+
+define arm_aapcs_vfpcc i32* @get_var_pointer() {
+ ret i32* @var
+}
+
+; CHECK-LABEL: get_var_pointer
+; CHECK: movw r0, :lower16:__imp_var
+; CHECK: movt r0, :upper16:__imp_var
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+
+define arm_aapcs_vfpcc i32 @call_external() {
+ %call = tail call arm_aapcs_vfpcc i32 @external()
+ ret i32 %call
+}
+
+; CHECK-LABEL: call_external
+; CHECK: movw r0, :lower16:__imp_external
+; CHECK: movt r0, :upper16:__imp_external
+; CHECK: ldr r0, [r0]
+; CHECK: bx r0
+
+define arm_aapcs_vfpcc i32 @call_internal() {
+ %call = tail call arm_aapcs_vfpcc i32 @internal()
+ ret i32 %call
+}
+
+; CHECK-LABEL: call_internal
+; CHECK: b internal
+
diff --git a/test/CodeGen/ARM/Windows/frame-register.ll b/test/CodeGen/ARM/Windows/frame-register.ll
new file mode 100644
index 000000000000..31167d7352e3
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/frame-register.ll
@@ -0,0 +1,22 @@
+; RUN: llc -mtriple thumbv7-windows -disable-fp-elim -filetype asm -o - %s \
+; RUN: | FileCheck %s
+
+declare void @callee(i32)
+
+define i32 @calleer(i32 %i) {
+entry:
+ %i.addr = alloca i32, align 4
+ %j = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32* %i.addr, align 4
+ %add = add nsw i32 %0, 1
+ store i32 %add, i32* %j, align 4
+ %1 = load i32* %j, align 4
+ call void @callee(i32 %1)
+ %2 = load i32* %j, align 4
+ %add1 = add nsw i32 %2, 1
+ ret i32 %add1
+}
+
+; CHECK: push.w {r11, lr}
+
diff --git a/test/CodeGen/ARM/Windows/global-minsize.ll b/test/CodeGen/ARM/Windows/global-minsize.ll
new file mode 100644
index 000000000000..c0be36caa6c4
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/global-minsize.ll
@@ -0,0 +1,16 @@
+; RUN: llc -mtriple=thumbv7-windows -filetype asm -o - %s | FileCheck %s
+
+@i = internal global i32 0, align 4
+
+; Function Attrs: minsize
+define arm_aapcs_vfpcc i32* @function() #0 {
+entry:
+ ret i32* @i
+}
+
+attributes #0 = { minsize }
+
+; CHECK: function:
+; CHECK: movw r0, :lower16:i
+; CHECK: movt r0, :upper16:i
+; CHECK: bx lr
diff --git a/test/CodeGen/ARM/Windows/hard-float.ll b/test/CodeGen/ARM/Windows/hard-float.ll
new file mode 100644
index 000000000000..f7b7ec273ce8
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/hard-float.ll
@@ -0,0 +1,10 @@
+; RUN: llc -mtriple=thumbv7-windows-itanium -mcpu=cortex-a9 -o - %s | FileCheck %s
+
+define float @function(float %f, float %g) nounwind {
+entry:
+ %h = fadd float %f, %g
+ ret float %h
+}
+
+; CHECK: vadd.f32 s0, s0, s1
+
diff --git a/test/CodeGen/ARM/Windows/integer-floating-point-conversion.ll b/test/CodeGen/ARM/Windows/integer-floating-point-conversion.ll
new file mode 100644
index 000000000000..acf21a1caad3
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/integer-floating-point-conversion.ll
@@ -0,0 +1,74 @@
+; RUN: llc -mtriple thumbv7-windows -filetype asm -o - %s | FileCheck %s
+
+define arm_aapcs_vfpcc i64 @stoi64(float %f) {
+entry:
+ %conv = fptosi float %f to i64
+ ret i64 %conv
+}
+
+; CHECK-LABEL: stoi64
+; CHECK: bl __stoi64
+
+define arm_aapcs_vfpcc i64 @stou64(float %f) {
+entry:
+ %conv = fptoui float %f to i64
+ ret i64 %conv
+}
+
+; CHECK-LABEL: stou64
+; CHECK: bl __stou64
+
+define arm_aapcs_vfpcc float @i64tos(i64 %i64) {
+entry:
+ %conv = sitofp i64 %i64 to float
+ ret float %conv
+}
+
+; CHECK-LABEL: i64tos
+; CHECK: bl __i64tos
+
+define arm_aapcs_vfpcc float @u64tos(i64 %u64) {
+entry:
+ %conv = uitofp i64 %u64 to float
+ ret float %conv
+}
+
+; CHECK-LABEL: u64tos
+; CHECK: bl __u64tos
+
+define arm_aapcs_vfpcc i64 @dtoi64(double %d) {
+entry:
+ %conv = fptosi double %d to i64
+ ret i64 %conv
+}
+
+; CHECK-LABEL: dtoi64
+; CHECK: bl __dtoi64
+
+define arm_aapcs_vfpcc i64 @dtou64(double %d) {
+entry:
+ %conv = fptoui double %d to i64
+ ret i64 %conv
+}
+
+; CHECK-LABEL: dtou64
+; CHECK: bl __dtou64
+
+define arm_aapcs_vfpcc double @i64tod(i64 %i64) {
+entry:
+ %conv = sitofp i64 %i64 to double
+ ret double %conv
+}
+
+; CHECK-LABEL: i64tod
+; CHECK: bl __i64tod
+
+define arm_aapcs_vfpcc double @u64tod(i64 %i64) {
+entry:
+ %conv = uitofp i64 %i64 to double
+ ret double %conv
+}
+
+; CHECK-LABEL: u64tod
+; CHECK: bl __u64tod
+
diff --git a/test/CodeGen/ARM/Windows/long-calls.ll b/test/CodeGen/ARM/Windows/long-calls.ll
new file mode 100644
index 000000000000..e35f414579af
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/long-calls.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=thumbv7-windows -mcpu=cortex-a9 -arm-long-calls -o - %s \
+; RUN: | FileCheck %s
+
+declare arm_aapcs_vfpcc void @callee()
+
+define arm_aapcs_vfpcc void @caller() nounwind {
+entry:
+ tail call void @callee()
+ ret void
+}
+
+; CHECK-LABEL: caller
+; CHECK: ldr [[REG:r[0-9]+]], [[CPI:.LCPI[_0-9]+]]
+; CHECK: bx [[REG]]
+; CHECK: .align 2
+; CHECK: [[CPI]]:
+; CHECK: .long callee
+
diff --git a/test/CodeGen/ARM/Windows/mangling.ll b/test/CodeGen/ARM/Windows/mangling.ll
new file mode 100644
index 000000000000..ce1fe2ee7e16
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/mangling.ll
@@ -0,0 +1,9 @@
+; RUN: llc -mtriple=thumbv7-windows -mcpu=cortex-a9 -o - %s | FileCheck %s
+
+define void @function() nounwind {
+entry:
+ ret void
+}
+
+; CHECK-LABEL: function
+
diff --git a/test/CodeGen/ARM/Windows/memset.ll b/test/CodeGen/ARM/Windows/memset.ll
new file mode 100644
index 000000000000..500e25e259c6
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/memset.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple thumbv7--windows-itanium -filetype asm -o - %s | FileCheck %s
+
+@source = common global [512 x i8] zeroinitializer, align 4
+
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+
+define void @function() {
+entry:
+ call void @llvm.memset.p0i8.i32(i8* bitcast ([512 x i8]* @source to i8*), i8 0, i32 512, i32 0, i1 false)
+ unreachable
+}
+
+; CHECK: movw r0, :lower16:source
+; CHECK: movt r0, :upper16:source
+; CHECK: movs r1, #0
+; CHECK: mov.w r2, #512
+; CHECK: memset
+
diff --git a/test/CodeGen/ARM/Windows/mov32t-bundling.ll b/test/CodeGen/ARM/Windows/mov32t-bundling.ll
new file mode 100644
index 000000000000..5f838378fa87
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/mov32t-bundling.ll
@@ -0,0 +1,28 @@
+; RUN: llc -mtriple thumbv7-windows-itanium -filetype asm -o - %s | FileCheck %s
+
+@_begin = external global i8
+@_end = external global i8
+
+declare arm_aapcs_vfpcc void @force_emission()
+
+define arm_aapcs_vfpcc void @bundle() {
+entry:
+ br i1 icmp uge (i32 sub (i32 ptrtoint (i8* @_end to i32), i32 ptrtoint (i8* @_begin to i32)), i32 4), label %if.then, label %if.end
+
+if.then:
+ tail call arm_aapcs_vfpcc void @force_emission()
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: bundle
+; CHECK-NOT: subs r0, r1, r0
+; CHECK: movw r0, :lower16:_begin
+; CHECK-NEXT: movt r0, :upper16:_begin
+; CHECK-NEXT: movw r1, :lower16:_end
+; CHECK-NEXT: movt r1, :upper16:_end
+; CHECK-NEXT: subs r0, r1, r0
+; CHECK-NEXT: cmp r0, #4
+
diff --git a/test/CodeGen/ARM/Windows/movw-movt-relocations.ll b/test/CodeGen/ARM/Windows/movw-movt-relocations.ll
new file mode 100644
index 000000000000..3ae6428d3a6b
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/movw-movt-relocations.ll
@@ -0,0 +1,27 @@
+; RUN: llc -mtriple=thumbv7-windows -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-WINDOWS
+
+; RUN: llc -mtriple=thumbv7-eabi -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-EABI
+
+@i = common global i32 0, align 4
+@j = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize readonly
+define i32 @relocation(i32 %j, i32 %k) {
+entry:
+ %0 = load i32* @i, align 4
+ %1 = load i32* @j, align 4
+ %add = add nsw i32 %1, %0
+ ret i32 %add
+}
+
+; CHECK-WINDOWS: movw r[[i:[0-4]]], :lower16:i
+; CHECK-WINDOWS-NEXT: movt r[[i]], :upper16:i
+; CHECK-WINDOWS: movw r[[j:[0-4]]], :lower16:j
+; CHECK-WINDOWS-NEXT: movt r[[j]], :upper16:j
+
+; CHECK-EABI: movw r[[i:[0-4]]], :lower16:i
+; CHECK-EABI: movw r[[j:[0-4]]], :lower16:j
+; CHECK-EABI-NEXT: movt r[[i]], :upper16:i
+; CHECK-EABI-NEXT: movt r[[j]], :upper16:j
diff --git a/test/CodeGen/ARM/Windows/no-aeabi.ll b/test/CodeGen/ARM/Windows/no-aeabi.ll
new file mode 100644
index 000000000000..3971b9ccf580
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/no-aeabi.ll
@@ -0,0 +1,32 @@
+; RUN: llc -mtriple=thumbv7-windows-itanium -mcpu=cortex-a9 -o - %s | FileCheck %s
+
+declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+
+@source = common global [512 x i8] zeroinitializer, align 4
+@target = common global [512 x i8] zeroinitializer, align 4
+
+define void @move() nounwind {
+entry:
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* bitcast ([512 x i8]* @target to i8*), i8* bitcast ([512 x i8]* @source to i8*), i32 512, i32 0, i1 false)
+ unreachable
+}
+
+; CHECK-NOT: __aeabi_memmove
+
+define void @copy() nounwind {
+entry:
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([512 x i8]* @target to i8*), i8* bitcast ([512 x i8]* @source to i8*), i32 512, i32 0, i1 false)
+ unreachable
+}
+
+; CHECK-NOT: __aeabi_memcpy
+
+define i32 @divide(i32 %i, i32 %j) nounwind {
+entry:
+ %quotient = sdiv i32 %i, %j
+ ret i32 %quotient
+}
+
+; CHECK-NOT: __aeabi_idiv
+
diff --git a/test/CodeGen/ARM/Windows/no-arm-mode.ll b/test/CodeGen/ARM/Windows/no-arm-mode.ll
new file mode 100644
index 000000000000..6db031fc9169
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/no-arm-mode.ll
@@ -0,0 +1,5 @@
+; RUN: not llc -mtriple=armv7-windows-itanium -mcpu=cortex-a9 -o /dev/null %s 2>&1 \
+; RUN: | FileCheck %s
+
+; CHECK: does not support ARM mode execution
+
diff --git a/test/CodeGen/ARM/Windows/no-ehabi.ll b/test/CodeGen/ARM/Windows/no-ehabi.ll
new file mode 100644
index 000000000000..4119b6da968e
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/no-ehabi.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=thumbv7-windows -mcpu=cortex-a9 -o - %s | FileCheck %s
+
+declare void @callee(i32 %i)
+
+define i32 @caller(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n, i32 %o,
+ i32 %p) {
+entry:
+ %q = add nsw i32 %j, %i
+ %r = add nsw i32 %q, %k
+ %s = add nsw i32 %r, %l
+ call void @callee(i32 %s)
+ %t = add nsw i32 %n, %m
+ %u = add nsw i32 %t, %o
+ %v = add nsw i32 %u, %p
+ call void @callee(i32 %v)
+ %w = add nsw i32 %v, %s
+ ret i32 %w
+}
+
+; CHECK-NOT: .save {{{.*}}}
+
diff --git a/test/CodeGen/ARM/Windows/pic.ll b/test/CodeGen/ARM/Windows/pic.ll
new file mode 100644
index 000000000000..28d371f45217
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/pic.ll
@@ -0,0 +1,16 @@
+; RUN: llc -mtriple thumbv7-windows-itanium -relocation-model pic -filetype asm -o - %s \
+; RUN: | FileCheck %s
+
+@external = external global i8
+
+define arm_aapcs_vfpcc i8 @return_external() {
+entry:
+ %0 = load i8* @external, align 1
+ ret i8 %0
+}
+
+; CHECK-LABEL: return_external
+; CHECK: movw r0, :lower16:external
+; CHECK: movt r0, :upper16:external
+; CHECK: ldrb r0, [r0]
+
diff --git a/test/CodeGen/ARM/Windows/read-only-data.ll b/test/CodeGen/ARM/Windows/read-only-data.ll
new file mode 100644
index 000000000000..0ccb5ededff2
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/read-only-data.ll
@@ -0,0 +1,15 @@
+; RUN: llc -mtriple thumbv7-windows -filetype asm -o - %s | FileCheck %s
+
+@.str = private unnamed_addr constant [7 x i8] c"string\00", align 1
+
+declare arm_aapcs_vfpcc void @callee(i8*)
+
+define arm_aapcs_vfpcc void @function() {
+entry:
+ call arm_aapcs_vfpcc void @callee(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0))
+ ret void
+}
+
+; CHECK: .section .rdata,"rd"
+; CHECK-NOT: .section ".rodata.str1.1"
+
diff --git a/test/CodeGen/ARM/Windows/structors.ll b/test/CodeGen/ARM/Windows/structors.ll
new file mode 100644
index 000000000000..a1a90265c03a
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/structors.ll
@@ -0,0 +1,12 @@
+; RUN: llc -mtriple thumbv7-windows-itanium -o - %s | FileCheck %s
+
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @function, i8* null }]
+
+define arm_aapcs_vfpcc void @function() {
+entry:
+ ret void
+}
+
+; CHECK: .section .CRT$XCU,"rd"
+; CHECK: .long function
+
diff --git a/test/CodeGen/ARM/Windows/vla.ll b/test/CodeGen/ARM/Windows/vla.ll
new file mode 100644
index 000000000000..56901dee0dfa
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/vla.ll
@@ -0,0 +1,31 @@
+; RUN: llc -mtriple=thumbv7-windows-itanium -mcpu=cortex-a9 -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-SMALL-CODE
+; RUN: llc -mtriple=thumbv7-windows-itanium -mcpu=cortex-a9 -code-model=large -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-LARGE-CODE
+; RUN: llc -mtriple=thumbv7-windows-msvc -mcpu=cortex-a9 -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-MSVC
+
+define arm_aapcs_vfpcc i8 @function(i32 %sz, i32 %idx) {
+entry:
+ %vla = alloca i8, i32 %sz, align 1
+ %arrayidx = getelementptr inbounds i8* %vla, i32 %idx
+ %0 = load volatile i8* %arrayidx, align 1
+ ret i8 %0
+}
+
+; CHECK-SMALL-CODE: adds [[R4:r[0-9]+]], #7
+; CHECK-SMALL-CODE: bic [[R4]], [[R4]], #7
+; CHECK-SMALL-CODE: lsrs r4, [[R4]], #2
+; CHECK-SMALL-CODE: bl __chkstk
+; CHECK-SMALL-CODE: sub.w sp, sp, r4
+
+; CHECK-LARGE-CODE: adds [[R4:r[0-9]+]], #7
+; CHECK-LARGE-CODE: bic [[R4]], [[R4]], #7
+; CHECK-LARGE-CODE: lsrs r4, [[R4]], #2
+; CHECK-LARGE-CODE: movw [[IP:r[0-9]+]], :lower16:__chkstk
+; CHECK-LARGE-CODE: movt [[IP]], :upper16:__chkstk
+; CHECK-LARGE-CODE: blx [[IP]]
+; CHECK-LARGE-CODE: sub.w sp, sp, r4
+
+; CHECK-MSVC-NOT: __chkstk
+
diff --git a/test/CodeGen/ARM/a15-mla.ll b/test/CodeGen/ARM/a15-mla.ll
index b233cc27c4ba..9867e27ba7fe 100644
--- a/test/CodeGen/ARM/a15-mla.ll
+++ b/test/CodeGen/ARM/a15-mla.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm -float-abi=hard -mcpu=cortex-a15 -mattr=+neon,+neonfp | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=hard -mcpu=cortex-a15 -mattr=+neon,+neonfp %s -o - \
+; RUN: | FileCheck %s
; This test checks that the VMLxForwarting feature is disabled for A15.
; CHECK: fun_a:
diff --git a/test/CodeGen/ARM/a15.ll b/test/CodeGen/ARM/a15.ll
index 6f816c1c2c53..9f0b2809a952 100644
--- a/test/CodeGen/ARM/a15.ll
+++ b/test/CodeGen/ARM/a15.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=cortex-a15 | FileCheck %s
+; RUN: llc -mtriple=arm -mcpu=cortex-a15 %s -o - | FileCheck %s
; CHECK: a
define i32 @a(i32 %x) {
diff --git a/test/CodeGen/ARM/aapcs-hfa-code.ll b/test/CodeGen/ARM/aapcs-hfa-code.ll
new file mode 100644
index 000000000000..396e83816ccf
--- /dev/null
+++ b/test/CodeGen/ARM/aapcs-hfa-code.ll
@@ -0,0 +1,111 @@
+; RUN: llc < %s -mtriple=armv7-linux-gnueabihf -o - | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7em-none-eabi -mcpu=cortex-m4 | FileCheck %s --check-prefix=CHECK-M4F
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+
+define arm_aapcs_vfpcc void @test_1float({ float } %a) {
+ call arm_aapcs_vfpcc void @test_1float({ float } { float 1.0 })
+ ret void
+
+; CHECK-LABEL: test_1float:
+; CHECK-DAG: vmov.f32 s0, #1.{{0+}}e+00
+; CHECK: bl test_1float
+
+; CHECK-M4F-LABEL: test_1float:
+; CHECK-M4F-DAG: vmov.f32 s0, #1.{{0+}}e+00
+; CHECK-M4F: bl test_1float
+}
+
+define arm_aapcs_vfpcc void @test_2float({ float, float } %a) {
+ call arm_aapcs_vfpcc void @test_2float({ float, float } { float 1.0, float 2.0 })
+ ret void
+
+; CHECK-LABEL: test_2float:
+; CHECK-DAG: vmov.f32 s0, #1.{{0+}}e+00
+; CHECK-DAG: vmov.f32 s1, #2.{{0+}}e+00
+; CHECK: bl test_2float
+
+; CHECK-M4F-LABEL: test_2float:
+; CHECK-M4F-DAG: vmov.f32 s0, #1.{{0+}}e+00
+; CHECK-M4F-DAG: vmov.f32 s1, #2.{{0+}}e+00
+; CHECK-M4F: bl test_2float
+}
+
+define arm_aapcs_vfpcc void @test_3float({ float, float, float } %a) {
+ call arm_aapcs_vfpcc void @test_3float({ float, float, float } { float 1.0, float 2.0, float 3.0 })
+ ret void
+
+; CHECK-LABEL: test_3float:
+; CHECK-DAG: vmov.f32 s0, #1.{{0+}}e+00
+; CHECK-DAG: vmov.f32 s1, #2.{{0+}}e+00
+; CHECK-DAG: vmov.f32 s2, #3.{{0+}}e+00
+; CHECK: bl test_3float
+
+; CHECK-M4F-LABEL: test_3float:
+; CHECK-M4F-DAG: vmov.f32 s0, #1.{{0+}}e+00
+; CHECK-M4F-DAG: vmov.f32 s1, #2.{{0+}}e+00
+; CHECK-M4F-DAG: vmov.f32 s2, #3.{{0+}}e+00
+; CHECK-M4F: bl test_3float
+}
+
+define arm_aapcs_vfpcc void @test_1double({ double } %a) {
+; CHECK-LABEL: test_1double:
+; CHECK-DAG: vmov.f64 d0, #1.{{0+}}e+00
+; CHECK: bl test_1double
+
+; CHECK-M4F-LABEL: test_1double:
+; CHECK-M4F: movs [[ONEHI:r[0-9]+]], #0
+; CHECK-M4F: movs [[ONELO:r[0-9]+]], #0
+; CHECK-M4F: movt [[ONEHI]], #16368
+; CHECK-M4F-DAG: vmov s0, [[ONELO]]
+; CHECK-M4F-DAG: vmov s1, [[ONEHI]]
+; CHECK-M4F: bl test_1double
+
+ call arm_aapcs_vfpcc void @test_1double({ double } { double 1.0 })
+ ret void
+}
+
+; Final double argument might be put in s15 & [sp] if we're careless. It should
+; go all on the stack.
+define arm_aapcs_vfpcc void @test_1double_nosplit([4 x float], [4 x double], [3 x float], double %a) {
+; CHECK-LABEL: test_1double_nosplit:
+; CHECK-DAG: mov [[ONELO:r[0-9]+]], #0
+; CHECK-DAG: movw [[ONEHI:r[0-9]+]], #0
+; CHECK-DAG: movt [[ONEHI]], #16368
+; CHECK: strd [[ONELO]], [[ONEHI]], [sp]
+; CHECK: bl test_1double_nosplit
+
+; CHECK-M4F-LABEL: test_1double_nosplit:
+; CHECK-M4F: movs [[ONELO:r[0-9]+]], #0
+; CHECK-M4F: movs [[ONEHI:r[0-9]+]], #0
+; CHECK-M4F: movt [[ONEHI]], #16368
+; CHECK-M4F-DAG: str [[ONELO]], [sp]
+; CHECK-M4F-DAG: str [[ONEHI]], [sp, #4]
+; CHECK-M4F: bl test_1double_nosplit
+ call arm_aapcs_vfpcc void @test_1double_nosplit([4 x float] undef, [4 x double] undef, [3 x float] undef, double 1.0)
+ ret void
+}
+
+; Final double argument might go at [sp, #4] if we're careless. Should go at
+; [sp, #8] to preserve alignment.
+define arm_aapcs_vfpcc void @test_1double_misaligned([4 x double], [4 x double], float, double) {
+ call arm_aapcs_vfpcc void @test_1double_misaligned([4 x double] undef, [4 x double] undef, float undef, double 1.0)
+
+; CHECK-LABEL: test_1double_misaligned:
+; CHECK-DAG: mov [[ONELO:r[0-9]+]], #0
+; CHECK-DAG: mov r[[BASE:[0-9]+]], sp
+; CHECK-DAG: movw [[ONEHI:r[0-9]+]], #0
+; CHECK-DAG: movt [[ONEHI]], #16368
+; CHECK-DAG: str [[ONELO]], [r[[BASE]], #8]!
+; CHECK-DAG: str [[ONEHI]], [r[[BASE]], #4]
+
+; CHECK-M4F-LABEL: test_1double_misaligned:
+; CHECK-M4F: movs [[ONELO:r[0-9]+]], #0
+; CHECK-M4F: movs [[ONEHI:r[0-9]+]], #0
+; CHECK-M4F: movt [[ONEHI]], #16368
+; CHECK-M4F-DAG: str [[ONELO]], [sp, #8]
+; CHECK-M4F-DAG: str [[ONEHI]], [sp, #12]
+; CHECK-M4F: bl test_1double_misaligned
+
+ ret void
+}
diff --git a/test/CodeGen/ARM/aapcs-hfa.ll b/test/CodeGen/ARM/aapcs-hfa.ll
new file mode 100644
index 000000000000..6448e00b3e7b
--- /dev/null
+++ b/test/CodeGen/ARM/aapcs-hfa.ll
@@ -0,0 +1,164 @@
+; RUN: llc < %s -float-abi=hard -debug-only arm-isel 2>&1 | FileCheck %s
+; RUN: llc < %s -float-abi=soft -debug-only arm-isel 2>&1 | FileCheck %s --check-prefix=SOFT
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7-none--eabi"
+
+; SOFT-NOT: isHA
+
+; CHECK: isHA: 1 { float }
+define void @f0b({ float } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { float, float }
+define void @f1({ float, float } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { float, float, float }
+define void @f1b({ float, float, float } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { float, float, float, float }
+define void @f1c({ float, float, float, float } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { float, float, float, float, float }
+define void @f2({ float, float, float, float, float } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { double }
+define void @f3({ double } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { double, double, double, double }
+define void @f4({ double, double, double, double } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { double, double, double, double, double }
+define void @f5({ double, double, double, double, double } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { i32, i32 }
+define void @f5b({ i32, i32 } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { [1 x float] }
+define void @f6({ [1 x float] } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { [4 x float] }
+define void @f7({ [4 x float] } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { [5 x float] }
+define void @f8({ [5 x float] } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 [1 x float]
+define void @f6b([1 x float] %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 [4 x float]
+define void @f7b([4 x float] %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 [5 x float]
+define void @f8b([5 x float] %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { [2 x float], [2 x float] }
+define void @f9({ [2 x float], [2 x float] } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { [1 x float], [3 x float] }
+define void @f9b({ [1 x float], [3 x float] } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { [3 x float], [3 x float] }
+define void @f10({ [3 x float], [3 x float] } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { <2 x float> }
+define void @f11({ <2 x float> } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { <3 x float> }
+define void @f12({ <3 x float> } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { <4 x float> }
+define void @f13({ <4 x float> } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { <2 x float>, <2 x float> }
+define void @f15({ <2 x float>, <2 x float> } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { <2 x float>, float }
+define void @f15b({ <2 x float>, float } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { <2 x float>, [2 x float] }
+define void @f15c({ <2 x float>, [2 x float] } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { <2 x float>, <4 x float> }
+define void @f16({ <2 x float>, <4 x float> } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { <2 x double> }
+define void @f17({ <2 x double> } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { <2 x i32> }
+define void @f18({ <2 x i32> } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { <2 x i64>, <4 x i32> }
+define void @f19({ <2 x i64>, <4 x i32> } %a) {
+ ret void
+}
+
+; CHECK: isHA: 1 { [4 x <4 x float>] }
+define void @f20({ [4 x <4 x float>] } %a) {
+ ret void
+}
+
+; CHECK: isHA: 0 { [5 x <4 x float>] }
+define void @f21({ [5 x <4 x float>] } %a) {
+ ret void
+}
+
+; CHECK-NOT: isHA
+define void @f22({ float } %a, ...) {
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/addrmode.ll b/test/CodeGen/ARM/addrmode.ll
index 748d25804447..8fd1da791f1f 100644
--- a/test/CodeGen/ARM/addrmode.ll
+++ b/test/CodeGen/ARM/addrmode.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc < %s -march=arm -stats 2>&1 | grep asm-printer | grep 4
+; RUN: llc -mtriple=arm-eabi -stats %s -o - 2>&1 | FileCheck %s
define i32 @t1(i32 %a) {
%b = mul i32 %a, 9
@@ -14,3 +14,6 @@ define i32 @t2(i32 %a) {
%d = load i32* %c
ret i32 %d
}
+
+; CHECK: 4 asm-printer
+
diff --git a/test/CodeGen/ARM/addrspacecast.ll b/test/CodeGen/ARM/addrspacecast.ll
index 2e98ba53c67a..7b6237d719d1 100644
--- a/test/CodeGen/ARM/addrspacecast.ll
+++ b/test/CodeGen/ARM/addrspacecast.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
; Check that codegen for an addrspace cast succeeds without error.
define <4 x i32 addrspace(1)*> @f (<4 x i32*> %x) {
diff --git a/test/CodeGen/ARM/argaddr.ll b/test/CodeGen/ARM/argaddr.ll
index 116a32f9c74d..40bc5e0b82a7 100644
--- a/test/CodeGen/ARM/argaddr.ll
+++ b/test/CodeGen/ARM/argaddr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define void @f(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
entry:
diff --git a/test/CodeGen/ARM/arm-abi-attr.ll b/test/CodeGen/ARM/arm-abi-attr.ll
new file mode 100644
index 000000000000..f3923ae5cc82
--- /dev/null
+++ b/test/CodeGen/ARM/arm-abi-attr.ll
@@ -0,0 +1,28 @@
+; RUN: llc -mtriple=arm-linux < %s | FileCheck %s --check-prefix=APCS
+; RUN: llc -mtriple=arm-linux -mattr=apcs < %s | \
+; RUN: FileCheck %s --check-prefix=APCS
+; RUN: llc -mtriple=arm-linux-gnueabi -mattr=apcs < %s | \
+; RUN: FileCheck %s --check-prefix=APCS
+
+; RUN: llc -mtriple=arm-linux-gnueabi < %s | FileCheck %s --check-prefix=AAPCS
+; RUN: llc -mtriple=arm-linux-gnueabi -mattr=aapcs < %s | \
+; RUN: FileCheck %s --check-prefix=AAPCS
+; RUN: llc -mtriple=arm-linux-gnu -mattr=aapcs < %s | \
+; RUN: FileCheck %s --check-prefix=AAPCS
+
+; The stack is 8 byte aligned on AAPCS and 4 on APCS, so we should get a BIC
+; only on APCS.
+
+define void @g() {
+; APCS: sub sp, sp, #8
+; APCS: bic sp, sp, #7
+
+; AAPCS: sub sp, sp, #8
+; AAPCS-NOT: bic
+
+ %c = alloca i8, align 8
+ call void @f(i8* %c)
+ ret void
+}
+
+declare void @f(i8*)
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 88d797e83648..14eef832e693 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -1,7 +1,8 @@
-; RUN: llc < %s -march=arm | FileCheck -check-prefix=ARM %s
-; RUN: llc < %s -march=thumb | FileCheck -check-prefix=THUMB %s
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck -check-prefix=T2 %s
-; RUN: llc < %s -mtriple=thumbv8 | FileCheck -check-prefix=V8 %s
+; RUN: llc -mtriple=arm-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=ARM %s
+; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=THUMB %s
+; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - \
+; RUN: | FileCheck -check-prefix=T2 %s
+; RUN: llc -mtriple=thumbv8-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=V8 %s
; FIXME: The -march=thumb test doesn't change if -disable-peephole is specified.
diff --git a/test/CodeGen/ARM/arm-asm.ll b/test/CodeGen/ARM/arm-asm.ll
index 2e35e3953f7e..e869abeb2dd6 100644
--- a/test/CodeGen/ARM/arm-asm.ll
+++ b/test/CodeGen/ARM/arm-asm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define void @frame_dummy() {
entry:
diff --git a/test/CodeGen/ARM/arm-modifier.ll b/test/CodeGen/ARM/arm-modifier.ll
index 854864277720..580f7e7a90c7 100644
--- a/test/CodeGen/ARM/arm-modifier.ll
+++ b/test/CodeGen/ARM/arm-modifier.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 -no-integrated-as %s -o - | FileCheck %s
define i32 @foo(float %scale, float %scale2) nounwind {
entry:
diff --git a/test/CodeGen/ARM/arm-negative-stride.ll b/test/CodeGen/ARM/arm-negative-stride.ll
index fb0f8ff87906..7decb974e268 100644
--- a/test/CodeGen/ARM/arm-negative-stride.ll
+++ b/test/CodeGen/ARM/arm-negative-stride.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
; This loop is rewritten with an indvar which counts down, which
; frees up a register from holding the trip count.
diff --git a/test/CodeGen/ARM/arm-ttype-target2.ll b/test/CodeGen/ARM/arm-ttype-target2.ll
index 8b5087f89c04..4d61cb5bb3a5 100644
--- a/test/CodeGen/ARM/arm-ttype-target2.ll
+++ b/test/CodeGen/ARM/arm-ttype-target2.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=armv7-none-linux-gnueabi -arm-enable-ehabi -arm-enable-ehabi-descriptors < %s | FileCheck %s
+; RUN: llc -mtriple=armv7-none-linux-gnueabi < %s | FileCheck %s
@_ZTVN10__cxxabiv117__class_type_infoE = external global i8*
@_ZTS3Foo = linkonce_odr constant [5 x i8] c"3Foo\00"
diff --git a/test/CodeGen/ARM/atomic-64bit.ll b/test/CodeGen/ARM/atomic-64bit.ll
index 0477d4f40160..462c1859dc91 100644
--- a/test/CodeGen/ARM/atomic-64bit.ll
+++ b/test/CodeGen/ARM/atomic-64bit.ll
@@ -1,12 +1,16 @@
-; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabihf -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-THUMB
+; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
+; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabihf | FileCheck %s --check-prefix=CHECK-THUMB --check-prefix=CHECK-THUMB-LE
+; RUN: llc < %s -mtriple=armebv7 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
+; RUN: llc < %s -mtriple=thumbebv7-none-linux-gnueabihf | FileCheck %s --check-prefix=CHECK-THUMB --check-prefix=CHECK-THUMB-BE
define i64 @test1(i64* %ptr, i64 %val) {
; CHECK-LABEL: test1:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: adds [[REG3:(r[0-9]?[02468])]], [[REG1]]
-; CHECK: adc [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-LE: adds [[REG3:(r[0-9]?[02468])]], [[REG1]]
+; CHECK-LE: adc [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE: adds [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE: adc [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
@@ -15,8 +19,10 @@ define i64 @test1(i64* %ptr, i64 %val) {
; CHECK-THUMB-LABEL: test1:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: adds.w [[REG3:[a-z0-9]+]], [[REG1]]
-; CHECK-THUMB: adc.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-LE: adds.w [[REG3:[a-z0-9]+]], [[REG1]]
+; CHECK-THUMB-LE: adc.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE: adds.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE: adc.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
@@ -30,8 +36,10 @@ define i64 @test2(i64* %ptr, i64 %val) {
; CHECK-LABEL: test2:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: subs [[REG3:(r[0-9]?[02468])]], [[REG1]]
-; CHECK: sbc [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-LE: subs [[REG3:(r[0-9]?[02468])]], [[REG1]]
+; CHECK-LE: sbc [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE: subs [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE: sbc [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
@@ -40,8 +48,10 @@ define i64 @test2(i64* %ptr, i64 %val) {
; CHECK-THUMB-LABEL: test2:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: subs.w [[REG3:[a-z0-9]+]], [[REG1]]
-; CHECK-THUMB: sbc.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-LE: subs.w [[REG3:[a-z0-9]+]], [[REG1]]
+; CHECK-THUMB-LE: sbc.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE: subs.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE: sbc.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
@@ -55,8 +65,10 @@ define i64 @test3(i64* %ptr, i64 %val) {
; CHECK-LABEL: test3:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: and [[REG3:(r[0-9]?[02468])]], [[REG1]]
-; CHECK: and [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-LE-DAG: and [[REG3:(r[0-9]?[02468])]], [[REG1]]
+; CHECK-LE-DAG: and [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE-DAG: and [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE-DAG: and [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
@@ -65,8 +77,10 @@ define i64 @test3(i64* %ptr, i64 %val) {
; CHECK-THUMB-LABEL: test3:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: and.w [[REG3:[a-z0-9]+]], [[REG1]]
-; CHECK-THUMB: and.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-LE-DAG: and.w [[REG3:[a-z0-9]+]], [[REG1]]
+; CHECK-THUMB-LE-DAG: and.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE-DAG: and.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE-DAG: and.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
@@ -80,8 +94,10 @@ define i64 @test4(i64* %ptr, i64 %val) {
; CHECK-LABEL: test4:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: orr [[REG3:(r[0-9]?[02468])]], [[REG1]]
-; CHECK: orr [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-LE-DAG: orr [[REG3:(r[0-9]?[02468])]], [[REG1]]
+; CHECK-LE-DAG: orr [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE-DAG: orr [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE-DAG: orr [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
@@ -90,8 +106,10 @@ define i64 @test4(i64* %ptr, i64 %val) {
; CHECK-THUMB-LABEL: test4:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: orr.w [[REG3:[a-z0-9]+]], [[REG1]]
-; CHECK-THUMB: orr.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-LE-DAG: orr.w [[REG3:[a-z0-9]+]], [[REG1]]
+; CHECK-THUMB-LE-DAG: orr.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE-DAG: orr.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE-DAG: orr.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
@@ -105,8 +123,10 @@ define i64 @test5(i64* %ptr, i64 %val) {
; CHECK-LABEL: test5:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: eor [[REG3:(r[0-9]?[02468])]], [[REG1]]
-; CHECK: eor [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-LE-DAG: eor [[REG3:(r[0-9]?[02468])]], [[REG1]]
+; CHECK-LE-DAG: eor [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE-DAG: eor [[REG4:(r[0-9]?[13579])]], [[REG2]]
+; CHECK-BE-DAG: eor [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
@@ -115,8 +135,10 @@ define i64 @test5(i64* %ptr, i64 %val) {
; CHECK-THUMB-LABEL: test5:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: eor.w [[REG3:[a-z0-9]+]], [[REG1]]
-; CHECK-THUMB: eor.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-LE-DAG: eor.w [[REG3:[a-z0-9]+]], [[REG1]]
+; CHECK-THUMB-LE-DAG: eor.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE-DAG: eor.w [[REG4:[a-z0-9]+]], [[REG2]]
+; CHECK-THUMB-BE-DAG: eor.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
@@ -149,10 +171,14 @@ define i64 @test6(i64* %ptr, i64 %val) {
define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
; CHECK-LABEL: test7:
-; CHECK: dmb {{ish$}}
+; CHECK-DAG: mov [[VAL1LO:r[0-9]+]], r1
+; CHECK-DAG: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: cmp [[REG1]]
-; CHECK: cmpeq [[REG2]]
+; CHECK-LE-DAG: eor [[MISMATCH_LO:r[0-9]+]], [[REG1]], [[VAL1LO]]
+; CHECK-LE-DAG: eor [[MISMATCH_HI:r[0-9]+]], [[REG2]], r2
+; CHECK-BE-DAG: eor [[MISMATCH_LO:r[0-9]+]], [[REG2]], r2
+; CHECK-BE-DAG: eor [[MISMATCH_HI:r[0-9]+]], [[REG1]], r1
+; CHECK: orrs {{r[0-9]+}}, [[MISMATCH_LO]], [[MISMATCH_HI]]
; CHECK: bne
; CHECK: strexd {{[a-z0-9]+}}, {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
; CHECK: cmp
@@ -162,16 +188,19 @@ define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
; CHECK-THUMB-LABEL: test7:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: cmp [[REG1]]
-; CHECK-THUMB: it eq
-; CHECK-THUMB: cmpeq [[REG2]]
+; CHECK-THUMB-LE-DAG: eor.w [[MISMATCH_LO:[a-z0-9]+]], [[REG1]], r2
+; CHECK-THUMB-LE-DAG: eor.w [[MISMATCH_HI:[a-z0-9]+]], [[REG2]], r3
+; CHECK-THUMB-BE-DAG: eor.w [[MISMATCH_HI:[a-z0-9]+]], [[REG1]], r2
+; CHECK-THUMB-BE-DAG: eor.w [[MISMATCH_LO:[a-z0-9]+]], [[REG2]], r3
+; CHECK-THUMB-LE: orrs [[MISMATCH_HI]], [[MISMATCH_LO]]
; CHECK-THUMB: bne
; CHECK-THUMB: strexd {{[a-z0-9]+}}, {{[a-z0-9]+}}, {{[a-z0-9]+}}
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
; CHECK-THUMB: dmb {{ish$}}
- %r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst
+ %pair = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst seq_cst
+ %r = extractvalue { i64, i1 } %pair, 0
ret i64 %r
}
@@ -216,9 +245,20 @@ define i64 @test10(i64* %ptr, i64 %val) {
; CHECK-LABEL: test10:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: subs {{[a-z0-9]+}}, [[REG1]], [[REG3:(r[0-9]?[02468])]]
-; CHECK: sbcs {{[a-z0-9]+}}, [[REG2]], [[REG4:(r[0-9]?[13579])]]
-; CHECK: blt
+; CHECK: mov [[CARRY_LO:[a-z0-9]+]], #0
+; CHECK: mov [[CARRY_HI:[a-z0-9]+]], #0
+; CHECK: mov [[OUT_HI:[a-z0-9]+]], r2
+; CHECK-LE: cmp [[REG1]], r1
+; CHECK-BE: cmp [[REG2]], r2
+; CHECK: movwls [[CARRY_LO]], #1
+; CHECK-LE: cmp [[REG2]], r2
+; CHECK-BE: cmp [[REG1]], r1
+; CHECK: movwle [[CARRY_HI]], #1
+; CHECK: moveq [[CARRY_HI]], [[CARRY_LO]]
+; CHECK: cmp [[CARRY_HI]], #0
+; CHECK: movne [[OUT_HI]], [[REG2]]
+; CHECK: mov [[OUT_LO:[a-z0-9]+]], r1
+; CHECK: movne [[OUT_LO]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
@@ -227,9 +267,20 @@ define i64 @test10(i64* %ptr, i64 %val) {
; CHECK-THUMB-LABEL: test10:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: subs.w {{[a-z0-9]+}}, [[REG1]], [[REG3:[a-z0-9]+]]
-; CHECK-THUMB: sbcs.w {{[a-z0-9]+}}, [[REG2]], [[REG4:[a-z0-9]+]]
-; CHECK-THUMB: blt
+; CHECK-THUMB: mov.w [[CARRY_LO:[a-z0-9]+|lr]], #0
+; CHECK-THUMB: movs [[CARRY_HI:[a-z0-9]+|lr]], #0
+; CHECK-THUMB-LE: cmp [[REG1]], r2
+; CHECK-THUMB-BE: cmp [[REG2]], r3
+; CHECK-THUMB: movls.w [[CARRY_LO]], #1
+; CHECK-THUMB-LE: cmp [[REG2]], r3
+; CHECK-THUMB-BE: cmp [[REG1]], r2
+; CHECK-THUMB: movle [[CARRY_HI]], #1
+; CHECK-THUMB: moveq [[CARRY_HI]], [[CARRY_LO]]
+; CHECK-THUMB: mov [[OUT_HI:[a-z0-9]+]], r3
+; CHECK-THUMB: cmp [[CARRY_HI]], #0
+; CHECK-THUMB: mov [[OUT_LO:[a-z0-9]+]], r2
+; CHECK-THUMB: movne [[OUT_HI]], [[REG2]]
+; CHECK-THUMB: movne [[OUT_LO]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
@@ -243,21 +294,42 @@ define i64 @test11(i64* %ptr, i64 %val) {
; CHECK-LABEL: test11:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: subs {{[a-z0-9]+}}, [[REG1]], [[REG3:(r[0-9]?[02468])]]
-; CHECK: sbcs {{[a-z0-9]+}}, [[REG2]], [[REG4:(r[0-9]?[13579])]]
-; CHECK: blo
+; CHECK: mov [[CARRY_LO:[a-z0-9]+]], #0
+; CHECK: mov [[CARRY_HI:[a-z0-9]+]], #0
+; CHECK: mov [[OUT_HI:[a-z0-9]+]], r2
+; CHECK-LE: cmp [[REG1]], r1
+; CHECK-BE: cmp [[REG2]], r2
+; CHECK: movwls [[CARRY_LO]], #1
+; CHECK-LE: cmp [[REG2]], r2
+; CHECK-BE: cmp [[REG1]], r1
+; CHECK: movwls [[CARRY_HI]], #1
+; CHECK: moveq [[CARRY_HI]], [[CARRY_LO]]
+; CHECK: cmp [[CARRY_HI]], #0
+; CHECK: movne [[OUT_HI]], [[REG2]]
+; CHECK: mov [[OUT_LO:[a-z0-9]+]], r1
+; CHECK: movne [[OUT_LO]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
; CHECK: dmb {{ish$}}
-
; CHECK-THUMB-LABEL: test11:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: subs.w {{[a-z0-9]+}}, [[REG1]], [[REG3:[a-z0-9]+]]
-; CHECK-THUMB: sbcs.w {{[a-z0-9]+}}, [[REG2]], [[REG4:[a-z0-9]+]]
-; CHECK-THUMB: blo
+; CHECK-THUMB: mov.w [[CARRY_LO:[a-z0-9]+]], #0
+; CHECK-THUMB: movs [[CARRY_HI:[a-z0-9]+]], #0
+; CHECK-THUMB-LE: cmp [[REG1]], r2
+; CHECK-THUMB-BE: cmp [[REG2]], r3
+; CHECK-THUMB: movls.w [[CARRY_LO]], #1
+; CHECK-THUMB-LE: cmp [[REG2]], r3
+; CHECK-THUMB-BE: cmp [[REG1]], r2
+; CHECK-THUMB: movls [[CARRY_HI]], #1
+; CHECK-THUMB: moveq [[CARRY_HI]], [[CARRY_LO]]
+; CHECK-THUMB: mov [[OUT_HI:[a-z0-9]+]], r3
+; CHECK-THUMB: cmp [[CARRY_HI]], #0
+; CHECK-THUMB: mov [[OUT_LO:[a-z0-9]+]], r2
+; CHECK-THUMB: movne [[OUT_HI]], [[REG2]]
+; CHECK-THUMB: movne [[OUT_LO]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
@@ -271,9 +343,20 @@ define i64 @test12(i64* %ptr, i64 %val) {
; CHECK-LABEL: test12:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: subs {{[a-z0-9]+}}, [[REG1]], [[REG3:(r[0-9]?[02468])]]
-; CHECK: sbcs {{[a-z0-9]+}}, [[REG2]], [[REG4:(r[0-9]?[13579])]]
-; CHECK: bge
+; CHECK: mov [[CARRY_LO:[a-z0-9]+]], #0
+; CHECK: mov [[CARRY_HI:[a-z0-9]+]], #0
+; CHECK: mov [[OUT_HI:[a-z0-9]+]], r2
+; CHECK-LE: cmp [[REG1]], r1
+; CHECK-BE: cmp [[REG2]], r2
+; CHECK: movwhi [[CARRY_LO]], #1
+; CHECK-LE: cmp [[REG2]], r2
+; CHECK-BE: cmp [[REG1]], r1
+; CHECK: movwgt [[CARRY_HI]], #1
+; CHECK: moveq [[CARRY_HI]], [[CARRY_LO]]
+; CHECK: cmp [[CARRY_HI]], #0
+; CHECK: movne [[OUT_HI]], [[REG2]]
+; CHECK: mov [[OUT_LO:[a-z0-9]+]], r1
+; CHECK: movne [[OUT_LO]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
@@ -282,9 +365,20 @@ define i64 @test12(i64* %ptr, i64 %val) {
; CHECK-THUMB-LABEL: test12:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: subs.w {{[a-z0-9]+}}, [[REG1]], [[REG3:[a-z0-9]+]]
-; CHECK-THUMB: sbcs.w {{[a-z0-9]+}}, [[REG2]], [[REG4:[a-z0-9]+]]
-; CHECK-THUMB: bge
+; CHECK-THUMB: mov.w [[CARRY_LO:[a-z0-9]+]], #0
+; CHECK-THUMB: movs [[CARRY_HI:[a-z0-9]+]], #0
+; CHECK-THUMB-LE: cmp [[REG1]], r2
+; CHECK-THUMB-BE: cmp [[REG2]], r3
+; CHECK-THUMB: movhi.w [[CARRY_LO]], #1
+; CHECK-THUMB-LE: cmp [[REG2]], r3
+; CHECK-THUMB-BE: cmp [[REG1]], r2
+; CHECK-THUMB: movgt [[CARRY_HI]], #1
+; CHECK-THUMB: moveq [[CARRY_HI]], [[CARRY_LO]]
+; CHECK-THUMB: mov [[OUT_HI:[a-z0-9]+]], r3
+; CHECK-THUMB: cmp [[CARRY_HI]], #0
+; CHECK-THUMB: mov [[OUT_LO:[a-z0-9]+]], r2
+; CHECK-THUMB: movne [[OUT_HI]], [[REG2]]
+; CHECK-THUMB: movne [[OUT_LO]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
@@ -298,9 +392,20 @@ define i64 @test13(i64* %ptr, i64 %val) {
; CHECK-LABEL: test13:
; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: subs {{[a-z0-9]+}}, [[REG1]], [[REG3:(r[0-9]?[02468])]]
-; CHECK: sbcs {{[a-z0-9]+}}, [[REG2]], [[REG4:(r[0-9]?[13579])]]
-; CHECK: bhs
+; CHECK: mov [[CARRY_LO:[a-z0-9]+]], #0
+; CHECK: mov [[CARRY_HI:[a-z0-9]+]], #0
+; CHECK: mov [[OUT_HI:[a-z0-9]+]], r2
+; CHECK-LE: cmp [[REG1]], r1
+; CHECK-BE: cmp [[REG2]], r2
+; CHECK: movwhi [[CARRY_LO]], #1
+; CHECK-LE: cmp [[REG2]], r2
+; CHECK-BE: cmp [[REG1]], r1
+; CHECK: movwhi [[CARRY_HI]], #1
+; CHECK: moveq [[CARRY_HI]], [[CARRY_LO]]
+; CHECK: cmp [[CARRY_HI]], #0
+; CHECK: movne [[OUT_HI]], [[REG2]]
+; CHECK: mov [[OUT_LO:[a-z0-9]+]], r1
+; CHECK: movne [[OUT_LO]], [[REG1]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
@@ -309,9 +414,20 @@ define i64 @test13(i64* %ptr, i64 %val) {
; CHECK-THUMB-LABEL: test13:
; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: subs.w {{[a-z0-9]+}}, [[REG1]], [[REG3:[a-z0-9]+]]
-; CHECK-THUMB: sbcs.w {{[a-z0-9]+}}, [[REG2]], [[REG4:[a-z0-9]+]]
-; CHECK-THUMB: bhs
+; CHECK-THUMB: mov.w [[CARRY_LO:[a-z0-9]+]], #0
+; CHECK-THUMB: movs [[CARRY_HI:[a-z0-9]+]], #0
+; CHECK-THUMB-LE: cmp [[REG1]], r2
+; CHECK-THUMB-BE: cmp [[REG2]], r3
+; CHECK-THUMB: movhi.w [[CARRY_LO]], #1
+; CHECK-THUMB-LE: cmp [[REG2]], r3
+; CHECK-THUMB-BE: cmp [[REG1]], r2
+; CHECK-THUMB: movhi [[CARRY_HI]], #1
+; CHECK-THUMB: moveq [[CARRY_HI]], [[CARRY_LO]]
+; CHECK-THUMB: mov [[OUT_HI:[a-z0-9]+]], r3
+; CHECK-THUMB: cmp [[CARRY_HI]], #0
+; CHECK-THUMB: mov [[OUT_LO:[a-z0-9]+]], r2
+; CHECK-THUMB: movne [[OUT_HI]], [[REG2]]
+; CHECK-THUMB: movne [[OUT_LO]], [[REG1]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
diff --git a/test/CodeGen/ARM/atomic-cmp.ll b/test/CodeGen/ARM/atomic-cmp.ll
index 51ada693d0b8..629b16d86ab5 100644
--- a/test/CodeGen/ARM/atomic-cmp.ll
+++ b/test/CodeGen/ARM/atomic-cmp.ll
@@ -10,6 +10,7 @@ define i8 @t(i8* %a, i8 %b, i8 %c) nounwind {
; T2-LABEL: t:
; T2: ldrexb
; T2: strexb
- %tmp0 = cmpxchg i8* %a, i8 %b, i8 %c monotonic
- ret i8 %tmp0
+ %tmp0 = cmpxchg i8* %a, i8 %b, i8 %c monotonic monotonic
+ %tmp1 = extractvalue { i8, i1 } %tmp0, 0
+ ret i8 %tmp1
}
diff --git a/test/CodeGen/ARM/atomic-cmpxchg.ll b/test/CodeGen/ARM/atomic-cmpxchg.ll
new file mode 100644
index 000000000000..4b79fa25145b
--- /dev/null
+++ b/test/CodeGen/ARM/atomic-cmpxchg.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -mtriple=arm-linux-gnueabi -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-ARM
+; RUN: llc < %s -mtriple=thumb-linux-gnueabi -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-THUMB
+
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-ARMV7
+; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-THUMBV7
+
+define zeroext i1 @test_cmpxchg_res_i8(i8* %addr, i8 %desired, i8 zeroext %new) {
+entry:
+ %0 = cmpxchg i8* %addr, i8 %desired, i8 %new monotonic monotonic
+ %1 = extractvalue { i8, i1 } %0, 1
+ ret i1 %1
+}
+
+; CHECK-ARM-LABEL: test_cmpxchg_res_i8
+; CHECK-ARM: bl __sync_val_compare_and_swap_1
+; CHECK-ARM: mov [[REG:r[0-9]+]], #0
+; CHECK-ARM: cmp r0, {{r[0-9]+}}
+; CHECK-ARM: moveq [[REG]], #1
+; CHECK-ARM: mov r0, [[REG]]
+
+; CHECK-THUMB-LABEL: test_cmpxchg_res_i8
+; CHECK-THUMB: bl __sync_val_compare_and_swap_1
+; CHECK-THUMB: mov [[R1:r[0-9]+]], r0
+; CHECK-THUMB: movs r0, #1
+; CHECK-THUMB: movs [[R2:r[0-9]+]], #0
+; CHECK-THUMB: cmp [[R1]], {{r[0-9]+}}
+; CHECK-THU<B: beq
+; CHECK-THUMB: mov r0, [[R2]]
+
+; CHECK-ARMV7-LABEL: test_cmpxchg_res_i8
+; CHECK-ARMV7: ldrexb [[R3:r[0-9]+]], [r0]
+; CHECK-ARMV7: mov [[R1:r[0-9]+]], #0
+; CHECK-ARMV7: cmp [[R3]], {{r[0-9]+}}
+; CHECK-ARMV7: bne
+; CHECK-ARMV7: strexb [[R3]], {{r[0-9]+}}, [{{r[0-9]+}}]
+; CHECK-ARMV7: mov [[R1]], #1
+; CHECK-ARMV7: cmp [[R3]], #0
+; CHECK-ARMV7: bne
+; CHECK-ARMV7: mov r0, [[R1]]
+
+; CHECK-THUMBV7-LABEL: test_cmpxchg_res_i8
+; CHECK-THUMBV7: ldrexb [[R3:r[0-9]+]], [r0]
+; CHECK-THUMBV7: cmp [[R3]], {{r[0-9]+}}
+; CHECK-THUMBV7: movne r0, #0
+; CHECK-THUMBV7: bxne lr
+; CHECK-THUMBV7: strexb [[R3]], {{r[0-9]+}}, [{{r[0-9]+}}]
+; CHECK-THUMBV7: cmp [[R3]], #0
+; CHECK-THUMBV7: itt eq
+; CHECK-THUMBV7: moveq r0, #1
+; CHECK-THUMBV7: bxeq lr
diff --git a/test/CodeGen/ARM/atomic-load-store.ll b/test/CodeGen/ARM/atomic-load-store.ll
index 53c7184d2a84..49342d2d1bfe 100644
--- a/test/CodeGen/ARM/atomic-load-store.ll
+++ b/test/CodeGen/ARM/atomic-load-store.ll
@@ -2,16 +2,16 @@
; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=ARM
; RUN: llc < %s -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s -check-prefix=THUMBTWO
; RUN: llc < %s -mtriple=thumbv6-apple-ios | FileCheck %s -check-prefix=THUMBONE
-; RUN llc < %s -mtriple=armv4-apple-ios | FileCheck %s -check-prefix=ARMV4
+; RUN: llc < %s -mtriple=armv4-apple-ios | FileCheck %s -check-prefix=ARMV4
define void @test1(i32* %ptr, i32 %val1) {
-; ARM: test1
+; ARM-LABEL: test1
; ARM: dmb {{ish$}}
; ARM-NEXT: str
; ARM-NEXT: dmb {{ish$}}
-; THUMBONE: test1
+; THUMBONE-LABEL: test1
; THUMBONE: __sync_lock_test_and_set_4
-; THUMBTWO: test1
+; THUMBTWO-LABEL: test1
; THUMBTWO: dmb {{ish$}}
; THUMBTWO-NEXT: str
; THUMBTWO-NEXT: dmb {{ish$}}
@@ -20,12 +20,12 @@ define void @test1(i32* %ptr, i32 %val1) {
}
define i32 @test2(i32* %ptr) {
-; ARM: test2
+; ARM-LABEL: test2
; ARM: ldr
; ARM-NEXT: dmb {{ish$}}
-; THUMBONE: test2
+; THUMBONE-LABEL: test2
; THUMBONE: __sync_val_compare_and_swap_4
-; THUMBTWO: test2
+; THUMBTWO-LABEL: test2
; THUMBTWO: ldr
; THUMBTWO-NEXT: dmb {{ish$}}
%val = load atomic i32* %ptr seq_cst, align 4
@@ -33,22 +33,35 @@ define i32 @test2(i32* %ptr) {
}
define void @test3(i8* %ptr1, i8* %ptr2) {
-; ARM: test3
+; ARM-LABEL: test3
+; ARM-NOT: dmb
; ARM: ldrb
+; ARM-NOT: dmb
; ARM: strb
-; THUMBTWO: test3
+; ARM-NOT: dmb
+; ARM: bx lr
+
+; THUMBTWO-LABEL: test3
+; THUMBTWO-NOT: dmb
; THUMBTWO: ldrb
+; THUMBTWO-NOT: dmb
; THUMBTWO: strb
-; THUMBONE: test3
+; THUMBTWO-NOT: dmb
+; THUMBTWO: bx lr
+
+; THUMBONE-LABEL: test3
+; THUMBONE-NOT: dmb
; THUMBONE: ldrb
+; THUMBONE-NOT: dmb
; THUMBONE: strb
+; THUMBONE-NOT: dmb
%val = load atomic i8* %ptr1 unordered, align 1
store atomic i8 %val, i8* %ptr2 unordered, align 1
ret void
}
define void @test4(i8* %ptr1, i8* %ptr2) {
-; THUMBONE: test4
+; THUMBONE-LABEL: test4
; THUMBONE: ___sync_val_compare_and_swap_1
; THUMBONE: ___sync_lock_test_and_set_1
%val = load atomic i8* %ptr1 seq_cst, align 1
@@ -57,14 +70,14 @@ define void @test4(i8* %ptr1, i8* %ptr2) {
}
define i64 @test_old_load_64bit(i64* %p) {
-; ARMV4: test_old_load_64bit
+; ARMV4-LABEL: test_old_load_64bit
; ARMV4: ___sync_val_compare_and_swap_8
%1 = load atomic i64* %p seq_cst, align 8
ret i64 %1
}
define void @test_old_store_64bit(i64* %p, i64 %v) {
-; ARMV4: test_old_store_64bit
+; ARMV4-LABEL: test_old_store_64bit
; ARMV4: ___sync_lock_test_and_set_8
store atomic i64 %v, i64* %p seq_cst, align 8
ret void
diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll
index 9a79c9fd7b1b..b988242ae57e 100644
--- a/test/CodeGen/ARM/atomic-op.ll
+++ b/test/CodeGen/ARM/atomic-op.ll
@@ -194,3 +194,42 @@ entry:
%0 = atomicrmw add i32* %p, i32 1 monotonic
ret i32 %0
}
+
+define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: test_cmpxchg_fail_order:
+
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ %oldval = extractvalue { i32, i1 } %pair, 0
+; CHECK: dmb ish
+; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
+; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
+; CHECK: cmp [[OLDVAL]], r1
+; CHECK: bxne lr
+; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
+; CHECK: cmp [[SUCCESS]], #0
+; CHECK: bne [[LOOP_BB]]
+; CHECK: dmb ish
+; CHECK: bx lr
+
+ ret i32 %oldval
+}
+
+define i32 @test_cmpxchg_fail_order1(i32 *%addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: test_cmpxchg_fail_order1:
+
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new acquire acquire
+ %oldval = extractvalue { i32, i1 } %pair, 0
+; CHECK-NOT: dmb ish
+; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
+; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
+; CHECK: cmp [[OLDVAL]], r1
+; CHECK: bne [[END_BB:\.?LBB[0-9]+_[0-9]+]]
+; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
+; CHECK: cmp [[SUCCESS]], #0
+; CHECK: bne [[LOOP_BB]]
+; CHECK: [[END_BB]]:
+; CHECK: dmb ish
+; CHECK: bx lr
+
+ ret i32 %oldval
+}
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
index 3f93929fd19d..7072aaaf733d 100644
--- a/test/CodeGen/ARM/atomic-ops-v8.ll
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -1,5 +1,7 @@
-; RUN: llc -mtriple=armv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=thumbv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=armv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE --check-prefix=CHECK-ARM --check-prefix=CHECK-ARM-LE
+; RUN: llc -mtriple=armebv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE --check-prefix=CHECK-ARM --check-prefix=CHECK-ARM-BE
+; RUN: llc -mtriple=thumbv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE --check-prefix=CHECK-THUMB --check-prefix=CHECK-THUMB-LE
+; RUN: llc -mtriple=thumbebv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE --check-prefix=CHECK-THUMB --check-prefix=CHECK-THUMB-BE
@var8 = global i8 0
@var16 = global i16 0
@@ -15,7 +17,7 @@ define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -38,7 +40,7 @@ define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -61,7 +63,7 @@ define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -75,7 +77,7 @@ define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
+define void @test_atomic_load_add_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_add_i64:
%old = atomicrmw add i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
@@ -84,20 +86,22 @@ define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: adds [[NEW1:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: adc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-LE-NEXT: adds{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
+; CHECK-LE-NEXT: adc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-BE-NEXT: adds{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
+; CHECK-BE-NEXT: adc{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
@@ -109,7 +113,7 @@ define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -132,7 +136,7 @@ define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -155,7 +159,7 @@ define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -169,7 +173,7 @@ define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
+define void @test_atomic_load_sub_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_sub_i64:
%old = atomicrmw sub i64* @var64, i64 %offset seq_cst
; CHECK-NOT: dmb
@@ -178,20 +182,22 @@ define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: subs [[NEW1:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: sbc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-LE-NEXT: subs{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
+; CHECK-LE-NEXT: sbc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-BE-NEXT: subs{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
+; CHECK-BE-NEXT: sbc{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
@@ -203,7 +209,7 @@ define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -226,7 +232,7 @@ define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -249,7 +255,7 @@ define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -263,7 +269,7 @@ define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
+define void @test_atomic_load_and_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_and_i64:
%old = atomicrmw and i64* @var64, i64 %offset acquire
; CHECK-NOT: dmb
@@ -272,20 +278,22 @@ define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: and{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: and{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
-; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
+; CHECK-LE-DAG: and{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
+; CHECK-LE-DAG: and{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
+; CHECK-BE-DAG: and{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
+; CHECK-BE-DAG: and{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
+; CHECK: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
@@ -297,7 +305,7 @@ define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -320,7 +328,7 @@ define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -343,7 +351,7 @@ define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -357,7 +365,7 @@ define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
+define void @test_atomic_load_or_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_or_i64:
%old = atomicrmw or i64* @var64, i64 %offset release
; CHECK-NOT: dmb
@@ -366,20 +374,22 @@ define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: orr{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: orr{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
-; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
+; CHECK-LE-DAG: orr{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
+; CHECK-LE-DAG: orr{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
+; CHECK-BE-DAG: orr{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
+; CHECK-BE-DAG: orr{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
+; CHECK: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
@@ -391,7 +401,7 @@ define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -414,7 +424,7 @@ define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -437,7 +447,7 @@ define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
@@ -451,7 +461,7 @@ define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
+define void @test_atomic_load_xor_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_xor_i64:
%old = atomicrmw xor i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
@@ -460,20 +470,22 @@ define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: eor{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: eor{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
-; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
+; CHECK-LE-DAG: eor{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
+; CHECK-LE-DAG: eor{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
+; CHECK-BE-DAG: eor{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
+; CHECK-BE-DAG: eor{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
+; CHECK: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
@@ -485,7 +497,7 @@ define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
@@ -507,7 +519,7 @@ define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
@@ -529,7 +541,7 @@ define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
@@ -542,7 +554,7 @@ define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
+define void @test_atomic_load_xchg_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_xchg_i64:
%old = atomicrmw xchg i64* @var64, i64 %offset acquire
; CHECK-NOT: dmb
@@ -551,7 +563,7 @@ define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
@@ -560,28 +572,28 @@ define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
-define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
+define i8 @test_atomic_load_min_i8(i8 signext %offset) nounwind {
; CHECK-LABEL: test_atomic_load_min_i8:
%old = atomicrmw min i8* @var8, i8 %offset acquire
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
-; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK-DAG: movw [[ADDR:r[0-9]+|lr]], :lower16:var8
+; CHECK-DAG: movt [[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexb r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
; CHECK-NEXT: sxtb r[[OLDX:[0-9]+]], r[[OLD]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLDX]], r0
-; Thumb mode: it ge
-; CHECK: movge r[[OLDX]], r0
-; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
+; Thumb mode: it le
+; CHECK: movle r[[OLDX]], r[[OLD]]
+; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[OLDX]], {{.*}}[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -591,23 +603,23 @@ define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
ret i8 %old
}
-define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
+define i16 @test_atomic_load_min_i16(i16 signext %offset) nounwind {
; CHECK-LABEL: test_atomic_load_min_i16:
%old = atomicrmw min i16* @var16, i16 %offset release
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
-; CHECK: movt r[[ADDR]], :upper16:var16
+; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var16
+; CHECK: movt [[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexh r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
; CHECK-NEXT: sxth r[[OLDX:[0-9]+]], r[[OLD]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLDX]], r0
-; Thumb mode: it ge
-; CHECK: movge r[[OLDX]], r0
-; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
+; Thumb mode: it le
+; CHECK: movle r[[OLDX]], r[[OLD]]
+; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r[[OLDX]], {{.*}}[[ADDR]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -626,13 +638,13 @@ define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
; CHECK-NEXT: cmp r[[OLD]], r0
-; Thumb mode: it lt
-; CHECK: movlt r[[NEW]], r[[OLD]]
+; Thumb mode: it le
+; CHECK: movle r[[NEW]], r[[OLD]]
; CHECK-NEXT: strex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
@@ -643,7 +655,7 @@ define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
+define void @test_atomic_load_min_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_min_i64:
%old = atomicrmw min i64* @var64, i64 %offset seq_cst
; CHECK-NOT: dmb
@@ -652,41 +664,54 @@ define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
-; CHECK-NEXT: blt .LBB{{[0-9]+}}_3
-; CHECK-NEXT: BB#2:
-; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
+; CHECK-ARM: mov [[HICARRY:r[0-9]+|lr]], #0
+; CHECK-ARM-LE: cmp [[OLD1]], r0
+; CHECK-ARM-LE: movwls [[LOCARRY]], #1
+; CHECK-ARM-LE: cmp [[OLD2]], r1
+; CHECK-ARM-LE: movwle [[HICARRY]], #1
+; CHECK-ARM-BE: cmp [[OLD2]], r1
+; CHECK-ARM-BE: movwls [[LOCARRY]], #1
+; CHECK-ARM-BE: cmp [[OLD1]], r0
+; CHECK-ARM-BE: movwle [[HICARRY]], #1
+; CHECK-ARM: moveq [[HICARRY]], [[LOCARRY]]
+; CHECK-ARM: cmp [[HICARRY]], #0
+; CHECK-ARM: mov [[MINHI:r[0-9]+]], r1
+; CHECK-ARM: movne [[MINHI]], [[OLD2]]
+; CHECK-ARM: mov [[MINLO:r[0-9]+]], r0
+; CHECK-ARM: movne [[MINLO]], [[OLD1]]
+; CHECK-ARM: stlexd [[STATUS:r[0-9]+]], [[MINLO]], [[MINHI]], [r[[ADDR]]]
+; CHECK-THUMB: stlexd [[STATUS:r[0-9]+]], {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
-define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
+define i8 @test_atomic_load_max_i8(i8 signext %offset) nounwind {
; CHECK-LABEL: test_atomic_load_max_i8:
%old = atomicrmw max i8* @var8, i8 %offset seq_cst
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
-; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var8
+; CHECK: movt [[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexb r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
; CHECK-NEXT: sxtb r[[OLDX:[0-9]+]], r[[OLD]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLDX]], r0
-; Thumb mode: it le
-; CHECK: movle r[[OLDX]], r0
-; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
+; Thumb mode: it gt
+; CHECK: movgt r[[OLDX]], r[[OLD]]
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[OLDX]], {{.*}}[[ADDR]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -696,7 +721,7 @@ define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
ret i8 %old
}
-define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
+define i16 @test_atomic_load_max_i16(i16 signext %offset) nounwind {
; CHECK-LABEL: test_atomic_load_max_i16:
%old = atomicrmw max i16* @var16, i16 %offset acquire
; CHECK-NOT: dmb
@@ -705,13 +730,13 @@ define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
; CHECK-NEXT: sxth r[[OLDX:[0-9]+]], r[[OLD]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLDX]], r0
-; Thumb mode: it le
-; CHECK: movle r[[OLDX]], r0
+; Thumb mode: it gt
+; CHECK: movgt r[[OLDX]], r[[OLD]]
; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
@@ -731,7 +756,7 @@ define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
@@ -748,7 +773,7 @@ define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
+define void @test_atomic_load_max_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_max_i64:
%old = atomicrmw max i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
@@ -757,41 +782,54 @@ define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
-; CHECK-NEXT: bge .LBB{{[0-9]+}}_3
-; CHECK-NEXT: BB#2:
-; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
+; CHECK-ARM: mov [[HICARRY:r[0-9]+|lr]], #0
+; CHECK-ARM-LE: cmp [[OLD1]], r0
+; CHECK-ARM-LE: movwhi [[LOCARRY]], #1
+; CHECK-ARM-LE: cmp [[OLD2]], r1
+; CHECK-ARM-LE: movwgt [[HICARRY]], #1
+; CHECK-ARM-BE: cmp [[OLD2]], r1
+; CHECK-ARM-BE: movwhi [[LOCARRY]], #1
+; CHECK-ARM-BE: cmp [[OLD1]], r0
+; CHECK-ARM-BE: movwgt [[HICARRY]], #1
+; CHECK-ARM: moveq [[HICARRY]], [[LOCARRY]]
+; CHECK-ARM: cmp [[HICARRY]], #0
+; CHECK-ARM: mov [[MINHI:r[0-9]+]], r1
+; CHECK-ARM: movne [[MINHI]], [[OLD2]]
+; CHECK-ARM: mov [[MINLO:r[0-9]+]], r0
+; CHECK-ARM: movne [[MINLO]], [[OLD1]]
+; CHECK-ARM: strexd [[STATUS:r[0-9]+]], [[MINLO]], [[MINHI]], [r[[ADDR]]]
+; CHECK-THUMB: strexd [[STATUS:r[0-9]+]], {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
-define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
+define i8 @test_atomic_load_umin_i8(i8 zeroext %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umin_i8:
%old = atomicrmw umin i8* @var8, i8 %offset monotonic
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
-; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var8
+; CHECK: movt [[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexb r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
; CHECK-NEXT: cmp r[[OLD]], r0
-; Thumb mode: it lo
-; CHECK: movlo r[[NEW]], r[[OLD]]
-; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; Thumb mode: it ls
+; CHECK: movls r[[NEW]], r[[OLD]]
+; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[NEW]], {{.*}}[[ADDR]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -801,23 +839,23 @@ define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
ret i8 %old
}
-define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
+define i16 @test_atomic_load_umin_i16(i16 zeroext %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umin_i16:
%old = atomicrmw umin i16* @var16, i16 %offset acquire
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
-; CHECK: movt r[[ADDR]], :upper16:var16
+; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var16
+; CHECK: movt [[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexh r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
; CHECK-NEXT: cmp r[[OLD]], r0
-; Thumb mode: it lo
-; CHECK: movlo r[[NEW]], r[[OLD]]
-; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; Thumb mode: it ls
+; CHECK: movls r[[NEW]], r[[OLD]]
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], {{.*}}[[ADDR]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -836,13 +874,13 @@ define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
; CHECK-NEXT: cmp r[[OLD]], r0
-; Thumb mode: it lo
-; CHECK: movlo r[[NEW]], r[[OLD]]
+; Thumb mode: it ls
+; CHECK: movls r[[NEW]], r[[OLD]]
; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
@@ -853,50 +891,63 @@ define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
+define void @test_atomic_load_umin_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umin_i64:
- %old = atomicrmw umin i64* @var64, i64 %offset acq_rel
+ %old = atomicrmw umin i64* @var64, i64 %offset seq_cst
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
-; CHECK-NEXT: blo .LBB{{[0-9]+}}_3
-; CHECK-NEXT: BB#2:
-; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
+; CHECK-ARM: mov [[HICARRY:r[0-9]+|lr]], #0
+; CHECK-ARM-LE: cmp [[OLD1]], r0
+; CHECK-ARM-LE: movwls [[LOCARRY]], #1
+; CHECK-ARM-LE: cmp [[OLD2]], r1
+; CHECK-ARM-LE: movwls [[HICARRY]], #1
+; CHECK-ARM-BE: cmp [[OLD2]], r1
+; CHECK-ARM-BE: movwls [[LOCARRY]], #1
+; CHECK-ARM-BE: cmp [[OLD1]], r0
+; CHECK-ARM-BE: movwls [[HICARRY]], #1
+; CHECK-ARM: moveq [[HICARRY]], [[LOCARRY]]
+; CHECK-ARM: cmp [[HICARRY]], #0
+; CHECK-ARM: mov [[MINHI:r[0-9]+]], r1
+; CHECK-ARM: movne [[MINHI]], [[OLD2]]
+; CHECK-ARM: mov [[MINLO:r[0-9]+]], r0
+; CHECK-ARM: movne [[MINLO]], [[OLD1]]
+; CHECK-ARM: stlexd [[STATUS:r[0-9]+]], [[MINLO]], [[MINHI]], [r[[ADDR]]]
+; CHECK-THUMB: stlexd [[STATUS:r[0-9]+]], {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
-define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
+define i8 @test_atomic_load_umax_i8(i8 zeroext %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umax_i8:
%old = atomicrmw umax i8* @var8, i8 %offset acq_rel
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
-; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var8
+; CHECK: movt [[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexb r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
; CHECK-NEXT: cmp r[[OLD]], r0
; Thumb mode: it hi
; CHECK: movhi r[[NEW]], r[[OLD]]
-; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[NEW]], {{.*}}[[ADDR]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -906,23 +957,23 @@ define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
ret i8 %old
}
-define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
+define i16 @test_atomic_load_umax_i16(i16 zeroext %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umax_i16:
%old = atomicrmw umax i16* @var16, i16 %offset monotonic
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
-; CHECK: movt r[[ADDR]], :upper16:var16
+; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var16
+; CHECK: movt [[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrexh r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
; CHECK-NEXT: cmp r[[OLD]], r0
; Thumb mode: it hi
; CHECK: movhi r[[NEW]], r[[OLD]]
-; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], {{.*}}[[ADDR]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -941,7 +992,7 @@ define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
@@ -958,50 +1009,64 @@ define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
ret i32 %old
}
-define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
+define void @test_atomic_load_umax_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umax_i64:
- %old = atomicrmw umax i64* @var64, i64 %offset release
+ %old = atomicrmw umax i64* @var64, i64 %offset seq_cst
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
-; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
-; CHECK-NEXT: bhs .LBB{{[0-9]+}}_3
-; CHECK-NEXT: BB#2:
-; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
+; CHECK-ARM: mov [[HICARRY:r[0-9]+|lr]], #0
+; CHECK-ARM-LE: cmp [[OLD1]], r0
+; CHECK-ARM-LE: movwhi [[LOCARRY]], #1
+; CHECK-ARM-LE: cmp [[OLD2]], r1
+; CHECK-ARM-LE: movwhi [[HICARRY]], #1
+; CHECK-ARM-BE: cmp [[OLD2]], r1
+; CHECK-ARM-BE: movwhi [[LOCARRY]], #1
+; CHECK-ARM-BE: cmp [[OLD1]], r0
+; CHECK-ARM-BE: movwhi [[HICARRY]], #1
+; CHECK-ARM: moveq [[HICARRY]], [[LOCARRY]]
+; CHECK-ARM: cmp [[HICARRY]], #0
+; CHECK-ARM: mov [[MINHI:r[0-9]+]], r1
+; CHECK-ARM: movne [[MINHI]], [[OLD2]]
+; CHECK-ARM: mov [[MINLO:r[0-9]+]], r0
+; CHECK-ARM: movne [[MINLO]], [[OLD1]]
+; CHECK-ARM: stlexd [[STATUS:r[0-9]+]], [[MINLO]], [[MINHI]], [r[[ADDR]]]
+; CHECK-THUMB: stlexd [[STATUS:r[0-9]+]], {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD1]]
-; CHECK-NEXT: mov r1, r[[OLD2]]
- ret i64 %old
+; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
-define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
+define i8 @test_atomic_cmpxchg_i8(i8 zeroext %wanted, i8 zeroext %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i8:
- %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
+ %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+ %old = extractvalue { i8, i1 } %pair, 0
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
; CHECK: movt r[[ADDR]], :upper16:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLD]], r0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
-; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
+; CHECK: strexb [[STATUS:r[0-9]+]], r1, {{.*}}[[ADDR]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -1011,23 +1076,24 @@ define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
ret i8 %old
}
-define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
+define i16 @test_atomic_cmpxchg_i16(i16 zeroext %wanted, i16 zeroext %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i16:
- %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
+ %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
+ %old = extractvalue { i16, i1 } %pair, 0
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
; CHECK: movt r[[ADDR]], :upper16:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLD]], r0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
-; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
+; CHECK: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -1037,59 +1103,65 @@ define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
ret i16 %old
}
-define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
+define void @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i32:
- %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
+ %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
+ %old = extractvalue { i32, i1 } %pair, 0
+ store i32 %old, i32* @var32
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
; CHECK: movt r[[ADDR]], :upper16:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
; r0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp r[[OLD]], r0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r1 is a reasonable guess.
-; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
+; CHECK: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, r[[OLD]]
- ret i32 %old
+; CHECK: str{{(.w)?}} r[[OLD]],
+ ret void
}
-define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
+define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK-LABEL: test_atomic_cmpxchg_i64:
- %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
+ %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
+ %old = extractvalue { i64, i1 } %pair, 0
; CHECK-NOT: dmb
; CHECK-NOT: mcr
; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
; CHECK: movt r[[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldrexd [[OLD1:r[0-9]+|lr]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
+; CHECK: ldrexd [[OLD1:r[0-9]+|lr]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: cmp [[OLD1]], r0
-; Thumb mode: it eq
-; CHECK: cmpeq [[OLD2]], r1
+; CHECK-LE-DAG: eor{{(\.w)?}} [[MISMATCH_LO:r[0-9]+|lr]], [[OLD1]], r0
+; CHECK-LE-DAG: eor{{(\.w)?}} [[MISMATCH_HI:r[0-9]+|lr]], [[OLD2]], r1
+; CHECK-LE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_LO]], [[MISMATCH_HI]]
+; CHECK-BE-DAG: eor{{(\.w)?}} [[MISMATCH_HI:r[0-9]+|lr]], [[OLD2]], r1
+; CHECK-BE-DAG: eor{{(\.w)?}} [[MISMATCH_LO:r[0-9]+|lr]], [[OLD1]], r0
+; CHECK-BE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_HI]], [[MISMATCH_LO]]
; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
; CHECK-NEXT: BB#2:
; As above, r2, r3 is a reasonable guess.
-; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]]
+; CHECK: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: mov r0, [[OLD1]]
-; CHECK-NEXT: mov r1, [[OLD2]]
- ret i64 %old
+; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
+ store i64 %old, i64* @var64
+ ret void
}
define i8 @test_atomic_load_monotonic_i8() nounwind {
@@ -1114,7 +1186,8 @@ define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
%val = load atomic i8* %addr monotonic, align 1
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: ldrb r0, [r0, r2]
+; CHECK-LE: ldrb r0, [r0, r2]
+; CHECK-BE: ldrb r0, [r1, r3]
; CHECK-NOT: dmb
; CHECK-NOT: mcr
@@ -1181,7 +1254,8 @@ define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind
%val = load atomic i32* %addr monotonic, align 4
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: ldr r0, [r0, r2]
+; CHECK-LE: ldr r0, [r0, r2]
+; CHECK-BE: ldr r0, [r1, r3]
; CHECK-NOT: dmb
; CHECK-NOT: mcr
@@ -1222,8 +1296,10 @@ define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val)
%addr = inttoptr i64 %addr_int to i8*
store atomic i8 %val, i8* %addr monotonic, align 1
-; CHECK: ldrb{{(\.w)?}} [[VAL:r[0-9]+]], [sp]
-; CHECK: strb [[VAL]], [r0, r2]
+; CHECK-LE: ldrb{{(\.w)?}} [[VAL:r[0-9]+]], [sp]
+; CHECK-LE: strb [[VAL]], [r0, r2]
+; CHECK-BE: ldrb{{(\.w)?}} [[VAL:r[0-9]+]], [sp, #3]
+; CHECK-BE: strb [[VAL]], [r1, r3]
ret void
}
@@ -1291,7 +1367,8 @@ define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %va
; CHECK: ldr [[VAL:r[0-9]+]], [sp]
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: str [[VAL]], [r0, r2]
+; CHECK-LE: str [[VAL]], [r0, r2]
+; CHECK-BE: str [[VAL]], [r1, r3]
; CHECK-NOT: dmb
; CHECK-NOT: mcr
@@ -1303,13 +1380,13 @@ define void @test_atomic_store_release_i64(i64 %val) nounwind {
store atomic i64 %val, i64* @var64 release, align 8
; CHECK-NOT: dmb
; CHECK-NOT: mcr
-; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
-; CHECK: movt r[[ADDR]], :upper16:var64
+; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var64
+; CHECK: movt [[ADDR]], :upper16:var64
; CHECK: .LBB{{[0-9]+}}_1:
; r0, r1 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK: stlexd [[STATUS:r[0-9]+]], r0, r1, {{.*}}[[ADDR]]
; CHECK-NEXT: cmp [[STATUS]], #0
; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
; CHECK-NOT: dmb
@@ -1337,7 +1414,7 @@ atomic_ver:
; The key point here is that the second dmb isn't immediately followed by the
; simple_ver basic block, which LLVM attempted to do when DMB had been marked
; with isBarrier. For now, look for something that looks like "somewhere".
-; CHECK-NEXT: mov
+; CHECK-NEXT: {{mov|bx}}
somewhere:
%combined = phi i32 [ %val, %atomic_ver ], [ %newval, %simple_ver]
ret i32 %combined
diff --git a/test/CodeGen/ARM/atomicrmw_minmax.ll b/test/CodeGen/ARM/atomicrmw_minmax.ll
index 5befc228e03c..68bf71486a23 100644
--- a/test/CodeGen/ARM/atomicrmw_minmax.ll
+++ b/test/CodeGen/ARM/atomicrmw_minmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=arm -mcpu=cortex-a9 < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s
; CHECK-LABEL: max:
define i32 @max(i8 %ctx, i32* %ptr, i32 %val)
@@ -15,7 +15,7 @@ define i32 @min(i8 %ctx, i32* %ptr, i32 %val)
{
; CHECK: ldrex
; CHECK: cmp [[old:r[0-9]*]], [[val:r[0-9]*]]
-; CHECK: movlo {{r[0-9]*}}, [[old]]
+; CHECK: movls {{r[0-9]*}}, [[old]]
%old = atomicrmw umin i32* %ptr, i32 %val monotonic
ret i32 %old
}
diff --git a/test/CodeGen/ARM/available_externally.ll b/test/CodeGen/ARM/available_externally.ll
index 0f646d582e71..d925b5c77706 100644
--- a/test/CodeGen/ARM/available_externally.ll
+++ b/test/CodeGen/ARM/available_externally.ll
@@ -11,6 +11,8 @@ define i32 @t1() {
}
; CHECK: L_A$non_lazy_ptr:
-; CHECK-NEXT: .long _A
+; CHECK-NEXT: .indirect_symbol _A
+; CHECK-NEXT: .long 0
; CHECK: L_B$non_lazy_ptr:
-; CHECK-NEXT: .long _B
+; CHECK-NEXT: .indirect_symbol _B
+; CHECK-NEXT: .long 0
diff --git a/test/CodeGen/ARM/bfc.ll b/test/CodeGen/ARM/bfc.ll
index 3a17d2b8cf99..1162aacee664 100644
--- a/test/CodeGen/ARM/bfc.ll
+++ b/test/CodeGen/ARM/bfc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6t2 %s -o - | FileCheck %s
; 4278190095 = 0xff00000f
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/ARM/bfi.ll b/test/CodeGen/ARM/bfi.ll
index 72a467809978..bce09da7618a 100644
--- a/test/CodeGen/ARM/bfi.ll
+++ b/test/CodeGen/ARM/bfi.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=arm -mattr=+v6t2 < %s | FileCheck %s
+; RUN: llc -mtriple=arm -mattr=+v6t2 %s -o - | FileCheck %s
%struct.F = type { [3 x i8], i8 }
diff --git a/test/CodeGen/ARM/bfx.ll b/test/CodeGen/ARM/bfx.ll
index 394da9e157ff..46f49e9fab20 100644
--- a/test/CodeGen/ARM/bfx.ll
+++ b/test/CodeGen/ARM/bfx.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v7 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v7 %s -o - | FileCheck %s
define i32 @sbfx1(i32 %a) {
; CHECK: sbfx1
diff --git a/test/CodeGen/ARM/bic.ll b/test/CodeGen/ARM/bic.ll
index 1dfd6278287d..691f8be4ab66 100644
--- a/test/CodeGen/ARM/bic.ll
+++ b/test/CodeGen/ARM/bic.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
%tmp = xor i32 %b, 4294967295
diff --git a/test/CodeGen/ARM/big-endian-eh-unwind.ll b/test/CodeGen/ARM/big-endian-eh-unwind.ll
new file mode 100644
index 000000000000..630dfed4467c
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-eh-unwind.ll
@@ -0,0 +1,73 @@
+; RUN: llc < %s -mtriple armeb-eabi -mattr v7 -filetype obj -o - | llvm-objdump -s - | FileCheck %s
+
+; ARM EHABI for big endian
+; This test case checks whether frame unwinding instructions are laid out in big endian format.
+;
+; This is the LLVM assembly generated from following C++ code:
+;
+; extern void foo(int);
+; void test(int a, int b) {
+; try {
+; foo(a);
+; } catch (...) {
+; foo(b);
+; }
+;}
+
+define void @_Z4testii(i32 %a, i32 %b) #0 {
+entry:
+ invoke void @_Z3fooi(i32 %a)
+ to label %try.cont unwind label %lpad
+
+lpad: ; preds = %entry
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = tail call i8* @__cxa_begin_catch(i8* %1) #2
+ invoke void @_Z3fooi(i32 %b)
+ to label %invoke.cont2 unwind label %lpad1
+
+invoke.cont2: ; preds = %lpad
+ tail call void @__cxa_end_catch()
+ br label %try.cont
+
+try.cont: ; preds = %entry, %invoke.cont2
+ ret void
+
+lpad1: ; preds = %lpad
+ %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @__cxa_end_catch()
+ to label %eh.resume unwind label %terminate.lpad
+
+eh.resume: ; preds = %lpad1
+ resume { i8*, i32 } %3
+
+terminate.lpad: ; preds = %lpad1
+ %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %5 = extractvalue { i8*, i32 } %4, 0
+ tail call void @__clang_call_terminate(i8* %5) #3
+ unreachable
+}
+
+declare void @_Z3fooi(i32) #0
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+; Function Attrs: noinline noreturn nounwind
+define linkonce_odr hidden void @__clang_call_terminate(i8*) #1 {
+ %2 = tail call i8* @__cxa_begin_catch(i8* %0) #2
+ tail call void @_ZSt9terminatev() #3
+ unreachable
+}
+
+declare void @_ZSt9terminatev()
+
+; CHECK-LABEL: Contents of section .ARM.extab:
+; CHECK-NEXT: 0000 00000000 00a8b0b0
+
diff --git a/test/CodeGen/ARM/big-endian-neon-bitconv.ll b/test/CodeGen/ARM/big-endian-neon-bitconv.ll
new file mode 100644
index 000000000000..427d2e731428
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-neon-bitconv.ll
@@ -0,0 +1,392 @@
+; RUN: llc < %s -march armeb -mtriple arm-eabi -mattr v7,neon -float-abi soft -o - | FileCheck %s
+; RUN: llc < %s -march armeb -mtriple arm-eabi -mattr v7,neon -float-abi hard -o - | FileCheck %s -check-prefix CHECK-HARD
+
+@v2i64 = global <2 x i64> zeroinitializer
+@v2i32 = global <2 x i32> zeroinitializer
+@v4i32 = global <4 x i32> zeroinitializer
+@v4i16 = global <4 x i16> zeroinitializer
+@v8i16 = global <8 x i16> zeroinitializer
+@v8i8 = global <8 x i8> zeroinitializer
+@v16i8 = global <16 x i8> zeroinitializer
+
+@v2f32 = global <2 x float> zeroinitializer
+@v2f64 = global <2 x double> zeroinitializer
+@v4f32 = global <4 x float> zeroinitializer
+
+
+; 64 bit conversions
+define void @conv_i64_to_v8i8( i64 %val, <8 x i8>* %store ) {
+; CHECK-LABEL: conv_i64_to_v8i8:
+; CHECK: vrev64.8
+ %v = bitcast i64 %val to <8 x i8>
+ %w = load <8 x i8>* @v8i8
+ %a = add <8 x i8> %v, %w
+ store <8 x i8> %a, <8 x i8>* %store
+ ret void
+}
+
+define void @conv_v8i8_to_i64( <8 x i8>* %load, <8 x i8>* %store ) {
+; CHECK-LABEL: conv_v8i8_to_i64:
+; CHECK: vrev64.8
+ %v = load <8 x i8>* %load
+ %w = load <8 x i8>* @v8i8
+ %a = add <8 x i8> %v, %w
+ %f = bitcast <8 x i8> %a to i64
+ call void @conv_i64_to_v8i8( i64 %f, <8 x i8>* %store )
+ ret void
+}
+
+define void @conv_i64_to_v4i16( i64 %val, <4 x i16>* %store ) {
+; CHECK-LABEL: conv_i64_to_v4i16:
+; CHECK: vrev64.16
+ %v = bitcast i64 %val to <4 x i16>
+ %w = load <4 x i16>* @v4i16
+ %a = add <4 x i16> %v, %w
+ store <4 x i16> %a, <4 x i16>* %store
+ ret void
+}
+
+define void @conv_v4i16_to_i64( <4 x i16>* %load, <4 x i16>* %store ) {
+; CHECK-LABEL: conv_v4i16_to_i64:
+; CHECK: vrev64.16
+ %v = load <4 x i16>* %load
+ %w = load <4 x i16>* @v4i16
+ %a = add <4 x i16> %v, %w
+ %f = bitcast <4 x i16> %a to i64
+ call void @conv_i64_to_v4i16( i64 %f, <4 x i16>* %store )
+ ret void
+}
+
+define void @conv_i64_to_v2i32( i64 %val, <2 x i32>* %store ) {
+; CHECK-LABEL: conv_i64_to_v2i32:
+; CHECK: vrev64.32
+ %v = bitcast i64 %val to <2 x i32>
+ %w = load <2 x i32>* @v2i32
+ %a = add <2 x i32> %v, %w
+ store <2 x i32> %a, <2 x i32>* %store
+ ret void
+}
+
+define void @conv_v2i32_to_i64( <2 x i32>* %load, <2 x i32>* %store ) {
+; CHECK-LABEL: conv_v2i32_to_i64:
+; CHECK: vrev64.32
+ %v = load <2 x i32>* %load
+ %w = load <2 x i32>* @v2i32
+ %a = add <2 x i32> %v, %w
+ %f = bitcast <2 x i32> %a to i64
+ call void @conv_i64_to_v2i32( i64 %f, <2 x i32>* %store )
+ ret void
+}
+
+define void @conv_i64_to_v2f32( i64 %val, <2 x float>* %store ) {
+; CHECK-LABEL: conv_i64_to_v2f32:
+; CHECK: vrev64.32
+ %v = bitcast i64 %val to <2 x float>
+ %w = load <2 x float>* @v2f32
+ %a = fadd <2 x float> %v, %w
+ store <2 x float> %a, <2 x float>* %store
+ ret void
+}
+
+define void @conv_v2f32_to_i64( <2 x float>* %load, <2 x float>* %store ) {
+; CHECK-LABEL: conv_v2f32_to_i64:
+; CHECK: vrev64.32
+ %v = load <2 x float>* %load
+ %w = load <2 x float>* @v2f32
+ %a = fadd <2 x float> %v, %w
+ %f = bitcast <2 x float> %a to i64
+ call void @conv_i64_to_v2f32( i64 %f, <2 x float>* %store )
+ ret void
+}
+
+define void @conv_f64_to_v8i8( double %val, <8 x i8>* %store ) {
+; CHECK-LABEL: conv_f64_to_v8i8:
+; CHECK: vrev64.8
+ %v = bitcast double %val to <8 x i8>
+ %w = load <8 x i8>* @v8i8
+ %a = add <8 x i8> %v, %w
+ store <8 x i8> %a, <8 x i8>* %store
+ ret void
+}
+
+define void @conv_v8i8_to_f64( <8 x i8>* %load, <8 x i8>* %store ) {
+; CHECK-LABEL: conv_v8i8_to_f64:
+; CHECK: vrev64.8
+ %v = load <8 x i8>* %load
+ %w = load <8 x i8>* @v8i8
+ %a = add <8 x i8> %v, %w
+ %f = bitcast <8 x i8> %a to double
+ call void @conv_f64_to_v8i8( double %f, <8 x i8>* %store )
+ ret void
+}
+
+define void @conv_f64_to_v4i16( double %val, <4 x i16>* %store ) {
+; CHECK-LABEL: conv_f64_to_v4i16:
+; CHECK: vrev64.16
+ %v = bitcast double %val to <4 x i16>
+ %w = load <4 x i16>* @v4i16
+ %a = add <4 x i16> %v, %w
+ store <4 x i16> %a, <4 x i16>* %store
+ ret void
+}
+
+define void @conv_v4i16_to_f64( <4 x i16>* %load, <4 x i16>* %store ) {
+; CHECK-LABEL: conv_v4i16_to_f64:
+; CHECK: vrev64.16
+ %v = load <4 x i16>* %load
+ %w = load <4 x i16>* @v4i16
+ %a = add <4 x i16> %v, %w
+ %f = bitcast <4 x i16> %a to double
+ call void @conv_f64_to_v4i16( double %f, <4 x i16>* %store )
+ ret void
+}
+
+define void @conv_f64_to_v2i32( double %val, <2 x i32>* %store ) {
+; CHECK-LABEL: conv_f64_to_v2i32:
+; CHECK: vrev64.32
+ %v = bitcast double %val to <2 x i32>
+ %w = load <2 x i32>* @v2i32
+ %a = add <2 x i32> %v, %w
+ store <2 x i32> %a, <2 x i32>* %store
+ ret void
+}
+
+define void @conv_v2i32_to_f64( <2 x i32>* %load, <2 x i32>* %store ) {
+; CHECK-LABEL: conv_v2i32_to_f64:
+; CHECK: vrev64.32
+ %v = load <2 x i32>* %load
+ %w = load <2 x i32>* @v2i32
+ %a = add <2 x i32> %v, %w
+ %f = bitcast <2 x i32> %a to double
+ call void @conv_f64_to_v2i32( double %f, <2 x i32>* %store )
+ ret void
+}
+
+define void @conv_f64_to_v2f32( double %val, <2 x float>* %store ) {
+; CHECK-LABEL: conv_f64_to_v2f32:
+; CHECK: vrev64.32
+ %v = bitcast double %val to <2 x float>
+ %w = load <2 x float>* @v2f32
+ %a = fadd <2 x float> %v, %w
+ store <2 x float> %a, <2 x float>* %store
+ ret void
+}
+
+define void @conv_v2f32_to_f64( <2 x float>* %load, <2 x float>* %store ) {
+; CHECK-LABEL: conv_v2f32_to_f64:
+; CHECK: vrev64.32
+ %v = load <2 x float>* %load
+ %w = load <2 x float>* @v2f32
+ %a = fadd <2 x float> %v, %w
+ %f = bitcast <2 x float> %a to double
+ call void @conv_f64_to_v2f32( double %f, <2 x float>* %store )
+ ret void
+}
+
+; 128 bit conversions
+
+
+define void @conv_i128_to_v16i8( i128 %val, <16 x i8>* %store ) {
+; CHECK-LABEL: conv_i128_to_v16i8:
+; CHECK: vrev32.8
+ %v = bitcast i128 %val to <16 x i8>
+ %w = load <16 x i8>* @v16i8
+ %a = add <16 x i8> %v, %w
+ store <16 x i8> %a, <16 x i8>* %store
+ ret void
+}
+
+define void @conv_v16i8_to_i128( <16 x i8>* %load, <16 x i8>* %store ) {
+; CHECK-LABEL: conv_v16i8_to_i128:
+; CHECK: vrev32.8
+ %v = load <16 x i8>* %load
+ %w = load <16 x i8>* @v16i8
+ %a = add <16 x i8> %v, %w
+ %f = bitcast <16 x i8> %a to i128
+ call void @conv_i128_to_v16i8( i128 %f, <16 x i8>* %store )
+ ret void
+}
+
+define void @conv_i128_to_v8i16( i128 %val, <8 x i16>* %store ) {
+; CHECK-LABEL: conv_i128_to_v8i16:
+; CHECK: vrev32.16
+ %v = bitcast i128 %val to <8 x i16>
+ %w = load <8 x i16>* @v8i16
+ %a = add <8 x i16> %v, %w
+ store <8 x i16> %a, <8 x i16>* %store
+ ret void
+}
+
+define void @conv_v8i16_to_i128( <8 x i16>* %load, <8 x i16>* %store ) {
+; CHECK-LABEL: conv_v8i16_to_i128:
+; CHECK: vrev32.16
+ %v = load <8 x i16>* %load
+ %w = load <8 x i16>* @v8i16
+ %a = add <8 x i16> %v, %w
+ %f = bitcast <8 x i16> %a to i128
+ call void @conv_i128_to_v8i16( i128 %f, <8 x i16>* %store )
+ ret void
+}
+
+define void @conv_i128_to_v4i32( i128 %val, <4 x i32>* %store ) {
+; CHECK-LABEL: conv_i128_to_v4i32:
+; CHECK: vrev64.32
+ %v = bitcast i128 %val to <4 x i32>
+ %w = load <4 x i32>* @v4i32
+ %a = add <4 x i32> %v, %w
+ store <4 x i32> %a, <4 x i32>* %store
+ ret void
+}
+
+define void @conv_v4i32_to_i128( <4 x i32>* %load, <4 x i32>* %store ) {
+; CHECK-LABEL: conv_v4i32_to_i128:
+; CHECK: vrev64.32
+ %v = load <4 x i32>* %load
+ %w = load <4 x i32>* @v4i32
+ %a = add <4 x i32> %v, %w
+ %f = bitcast <4 x i32> %a to i128
+ call void @conv_i128_to_v4i32( i128 %f, <4 x i32>* %store )
+ ret void
+}
+
+define void @conv_i128_to_v4f32( i128 %val, <4 x float>* %store ) {
+; CHECK-LABEL: conv_i128_to_v4f32:
+; CHECK: vrev64.32
+ %v = bitcast i128 %val to <4 x float>
+ %w = load <4 x float>* @v4f32
+ %a = fadd <4 x float> %v, %w
+ store <4 x float> %a, <4 x float>* %store
+ ret void
+}
+
+define void @conv_v4f32_to_i128( <4 x float>* %load, <4 x float>* %store ) {
+; CHECK-LABEL: conv_v4f32_to_i128:
+; CHECK: vrev64.32
+ %v = load <4 x float>* %load
+ %w = load <4 x float>* @v4f32
+ %a = fadd <4 x float> %v, %w
+ %f = bitcast <4 x float> %a to i128
+ call void @conv_i128_to_v4f32( i128 %f, <4 x float>* %store )
+ ret void
+}
+
+define void @conv_f128_to_v2f64( fp128 %val, <2 x double>* %store ) {
+; CHECK-LABEL: conv_f128_to_v2f64:
+; CHECK: vrev64.32
+ %v = bitcast fp128 %val to <2 x double>
+ %w = load <2 x double>* @v2f64
+ %a = fadd <2 x double> %v, %w
+ store <2 x double> %a, <2 x double>* %store
+ ret void
+}
+
+define void @conv_v2f64_to_f128( <2 x double>* %load, <2 x double>* %store ) {
+; CHECK-LABEL: conv_v2f64_to_f128:
+; CHECK: vrev64.32
+ %v = load <2 x double>* %load
+ %w = load <2 x double>* @v2f64
+ %a = fadd <2 x double> %v, %w
+ %f = bitcast <2 x double> %a to fp128
+ call void @conv_f128_to_v2f64( fp128 %f, <2 x double>* %store )
+ ret void
+}
+
+define void @conv_f128_to_v16i8( fp128 %val, <16 x i8>* %store ) {
+; CHECK-LABEL: conv_f128_to_v16i8:
+; CHECK: vrev32.8
+ %v = bitcast fp128 %val to <16 x i8>
+ %w = load <16 x i8>* @v16i8
+ %a = add <16 x i8> %v, %w
+ store <16 x i8> %a, <16 x i8>* %store
+ ret void
+}
+
+define void @conv_v16i8_to_f128( <16 x i8>* %load, <16 x i8>* %store ) {
+; CHECK-LABEL: conv_v16i8_to_f128:
+; CHECK: vrev32.8
+ %v = load <16 x i8>* %load
+ %w = load <16 x i8>* @v16i8
+ %a = add <16 x i8> %v, %w
+ %f = bitcast <16 x i8> %a to fp128
+ call void @conv_f128_to_v16i8( fp128 %f, <16 x i8>* %store )
+ ret void
+}
+
+define void @conv_f128_to_v8i16( fp128 %val, <8 x i16>* %store ) {
+; CHECK-LABEL: conv_f128_to_v8i16:
+; CHECK: vrev32.16
+ %v = bitcast fp128 %val to <8 x i16>
+ %w = load <8 x i16>* @v8i16
+ %a = add <8 x i16> %v, %w
+ store <8 x i16> %a, <8 x i16>* %store
+ ret void
+}
+
+define void @conv_v8i16_to_f128( <8 x i16>* %load, <8 x i16>* %store ) {
+; CHECK-LABEL: conv_v8i16_to_f128:
+; CHECK: vrev32.16
+ %v = load <8 x i16>* %load
+ %w = load <8 x i16>* @v8i16
+ %a = add <8 x i16> %v, %w
+ %f = bitcast <8 x i16> %a to fp128
+ call void @conv_f128_to_v8i16( fp128 %f, <8 x i16>* %store )
+ ret void
+}
+
+define void @conv_f128_to_v4f32( fp128 %val, <4 x float>* %store ) {
+; CHECK-LABEL: conv_f128_to_v4f32:
+; CHECK: vrev64.32
+ %v = bitcast fp128 %val to <4 x float>
+ %w = load <4 x float>* @v4f32
+ %a = fadd <4 x float> %v, %w
+ store <4 x float> %a, <4 x float>* %store
+ ret void
+}
+
+define void @conv_v4f32_to_f128( <4 x float>* %load, <4 x float>* %store ) {
+; CHECK-LABEL: conv_v4f32_to_f128:
+; CHECK: vrev64.32
+ %v = load <4 x float>* %load
+ %w = load <4 x float>* @v4f32
+ %a = fadd <4 x float> %v, %w
+ %f = bitcast <4 x float> %a to fp128
+ call void @conv_f128_to_v4f32( fp128 %f, <4 x float>* %store )
+ ret void
+}
+
+define void @arg_v4i32( <4 x i32> %var, <4 x i32>* %store ) {
+; CHECK-LABEL: arg_v4i32:
+; CHECK: vmov [[REG2:d[0-9]+]], r3, r2
+; CHECK: vmov [[REG1:d[0-9]+]], r1, r0
+; CHECK: vst1.64 {[[REG1]], [[REG2]]},
+; CHECK-HARD-LABEL: arg_v4i32:
+; CHECK-HARD-NOT: vmov
+; CHECK-HARD: vst1.64 {d0, d1}
+ store <4 x i32> %var, <4 x i32>* %store
+ ret void
+}
+
+define void @arg_v8i16( <8 x i16> %var, <8 x i16>* %store ) {
+; CHECK-LABEL: arg_v8i16:
+; CHECK: vmov [[REG2:d[0-9]+]], r3, r2
+; CHECK: vmov [[REG1:d[0-9]+]], r1, r0
+; CHECK: vst1.64 {[[REG1]], [[REG2]]},
+; CHECK-HARD-LABEL: arg_v8i16:
+; CHECK-HARD-NOT: vmov
+; CHECK-HARD: vst1.64 {d0, d1}
+ store <8 x i16> %var, <8 x i16>* %store
+ ret void
+}
+
+define void @arg_v16i8( <16 x i8> %var, <16 x i8>* %store ) {
+; CHECK-LABEL: arg_v16i8:
+; CHECK: vmov [[REG2:d[0-9]+]], r3, r2
+; CHECK: vmov [[REG1:d[0-9]+]], r1, r0
+; CHECK: vst1.64 {[[REG1]], [[REG2]]},
+; CHECK-HARD-LABEL: arg_v16i8:
+; CHECK-HARD-NOT: vmov
+; CHECK-HARD: vst1.64 {d0, d1}
+ store <16 x i8> %var, <16 x i8>* %store
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/big-endian-neon-extend.ll b/test/CodeGen/ARM/big-endian-neon-extend.ll
new file mode 100644
index 000000000000..931c6c3979c6
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-neon-extend.ll
@@ -0,0 +1,81 @@
+; RUN: llc < %s -mtriple armeb-eabi -mattr v7,neon -o - | FileCheck %s
+
+define void @vector_ext_2i8_to_2i64( <2 x i8>* %loadaddr, <2 x i64>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i8_to_2i64:
+; CHECK: vld1.16 {[[REG:d[0-9]+]]
+; CHECK: vmov.i64 {{q[0-9]+}}, #0xff
+; CHECK: vrev16.8 [[REG]], [[REG]]
+; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+ %1 = load <2 x i8>* %loadaddr
+ %2 = zext <2 x i8> %1 to <2 x i64>
+ store <2 x i64> %2, <2 x i64>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_2i16_to_2i64( <2 x i16>* %loadaddr, <2 x i64>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i16_to_2i64:
+; CHECK: vld1.32 {[[REG:d[0-9]+]]
+; CHECK: vmov.i64 {{q[0-9]+}}, #0xffff
+; CHECK: vrev32.16 [[REG]], [[REG]]
+; CHECK: vmovl.u16 {{q[0-9]+}}, [[REG]]
+ %1 = load <2 x i16>* %loadaddr
+ %2 = zext <2 x i16> %1 to <2 x i64>
+ store <2 x i64> %2, <2 x i64>* %storeaddr
+ ret void
+}
+
+
+define void @vector_ext_2i8_to_2i32( <2 x i8>* %loadaddr, <2 x i32>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i8_to_2i32:
+; CHECK: vld1.16 {[[REG:d[0-9]+]]
+; CHECK: vrev16.8 [[REG]], [[REG]]
+ %1 = load <2 x i8>* %loadaddr
+ %2 = zext <2 x i8> %1 to <2 x i32>
+ store <2 x i32> %2, <2 x i32>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_2i16_to_2i32( <2 x i16>* %loadaddr, <2 x i32>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i16_to_2i32:
+; CHECK: vld1.32 {[[REG:d[0-9]+]]
+; CHECK: vrev32.16 [[REG]], [[REG]]
+; CHECK: vmovl.u16 {{q[0-9]+}}, [[REG]]
+ %1 = load <2 x i16>* %loadaddr
+ %2 = zext <2 x i16> %1 to <2 x i32>
+ store <2 x i32> %2, <2 x i32>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_2i8_to_2i16( <2 x i8>* %loadaddr, <2 x i16>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_2i8_to_2i16:
+; CHECK: vld1.16 {[[REG:d[0-9]+]]
+; CHECK: vrev16.8 [[REG]], [[REG]]
+; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+ %1 = load <2 x i8>* %loadaddr
+ %2 = zext <2 x i8> %1 to <2 x i16>
+ store <2 x i16> %2, <2 x i16>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_4i8_to_4i32( <4 x i8>* %loadaddr, <4 x i32>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_4i8_to_4i32:
+; CHECK: vld1.32 {[[REG:d[0-9]+]]
+; CHECK: vrev32.8 [[REG]], [[REG]]
+; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+ %1 = load <4 x i8>* %loadaddr
+ %2 = zext <4 x i8> %1 to <4 x i32>
+ store <4 x i32> %2, <4 x i32>* %storeaddr
+ ret void
+}
+
+define void @vector_ext_4i8_to_4i16( <4 x i8>* %loadaddr, <4 x i16>* %storeaddr ) {
+; CHECK-LABEL: vector_ext_4i8_to_4i16:
+; CHECK: vld1.32 {[[REG:d[0-9]+]]
+; CHECK: vrev32.8 [[REG]], [[REG]]
+; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+ %1 = load <4 x i8>* %loadaddr
+ %2 = zext <4 x i8> %1 to <4 x i16>
+ store <4 x i16> %2, <4 x i16>* %storeaddr
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/big-endian-neon-trunc-store.ll b/test/CodeGen/ARM/big-endian-neon-trunc-store.ll
new file mode 100644
index 000000000000..65147ad5d3f7
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-neon-trunc-store.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple armeb-eabi -mattr v7,neon -o - | FileCheck %s
+
+define void @vector_trunc_store_2i64_to_2i16( <2 x i64>* %loadaddr, <2 x i16>* %storeaddr ) {
+; CHECK-LABEL: vector_trunc_store_2i64_to_2i16:
+; CHECK: vmovn.i64 [[REG:d[0-9]+]]
+; CHECK: vrev32.16 [[REG]], [[REG]]
+; CHECK: vuzp.16 [[REG]], [[REG2:d[0-9]+]]
+; CHECK: vrev32.16 [[REG]], [[REG2]]
+ %1 = load <2 x i64>* %loadaddr
+ %2 = trunc <2 x i64> %1 to <2 x i16>
+ store <2 x i16> %2, <2 x i16>* %storeaddr
+ ret void
+}
+
+define void @vector_trunc_store_4i32_to_4i8( <4 x i32>* %loadaddr, <4 x i8>* %storeaddr ) {
+; CHECK-LABEL: vector_trunc_store_4i32_to_4i8:
+; CHECK: vmovn.i32 [[REG:d[0-9]+]]
+; CHECK: vrev16.8 [[REG]], [[REG]]
+; CHECK: vuzp.8 [[REG]], [[REG2:d[0-9]+]]
+; CHECK: vrev32.8 [[REG]], [[REG2]]
+ %1 = load <4 x i32>* %loadaddr
+ %2 = trunc <4 x i32> %1 to <4 x i8>
+ store <4 x i8> %2, <4 x i8>* %storeaddr
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/big-endian-ret-f64.ll b/test/CodeGen/ARM/big-endian-ret-f64.ll
new file mode 100644
index 000000000000..614bfc0a5b3a
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-ret-f64.ll
@@ -0,0 +1,12 @@
+; RUN: llc -mtriple=armebv7a-eabi %s -O0 -o - | FileCheck %s
+; RUN: llc -mtriple=armebv8a-eabi %s -O0 -o - | FileCheck %s
+
+define double @fn() {
+; CHECK-LABEL: fn
+; CHECK: ldr r0, [sp]
+; CHECK: ldr r1, [sp, #4]
+ %r = alloca double, align 8
+ %1 = load double* %r, align 8
+ ret double %1
+}
+
diff --git a/test/CodeGen/ARM/big-endian-vector-callee.ll b/test/CodeGen/ARM/big-endian-vector-callee.ll
new file mode 100644
index 000000000000..4db8bdec8da1
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-vector-callee.ll
@@ -0,0 +1,1172 @@
+; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi soft %s -o - | FileCheck %s -check-prefix CHECK -check-prefix SOFT
+; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi hard %s -o - | FileCheck %s -check-prefix CHECK -check-prefix HARD
+
+; CHECK-LABEL: test_i64_f64:
+define i64 @test_i64_f64(double %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v1i64:
+define i64 @test_i64_v1i64(<1 x i64> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 d{{[0-9]+}}, d0
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v2f32:
+define i64 @test_i64_v2f32(<2 x float> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v2i32:
+define i64 @test_i64_v2i32(<2 x i32> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v4i16:
+define i64 @test_i64_v4i16(<4 x i16> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 d{{[0-9]+}}, d0
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v8i8:
+define i64 @test_i64_v8i8(<8 x i8> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 d{{[0-9]+}}, d0
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to i64
+ %3 = add i64 %2, %2
+ ret i64 %3
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_f64_i64:
+define double @test_f64_i64(i64 %p) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_f64_v1i64:
+define double @test_f64_v1i64(<1 x i64> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 d{{[0-9]+}}, d0
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_f64_v2f32:
+define double @test_f64_v2f32(<2 x float> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_f64_v2i32:
+define double @test_f64_v2i32(<2 x i32> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_f64_v4i16:
+define double @test_f64_v4i16(<4 x i16> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 d{{[0-9]+}}, d0
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_f64_v8i8:
+define double @test_f64_v8i8(<8 x i8> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 d{{[0-9]+}}, d0
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to double
+ %3 = fadd double %2, %2
+ ret double %3
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_v1i64_i64:
+define <1 x i64> @test_v1i64_i64(i64 %p) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+}
+
+; CHECK-LABEL: test_v1i64_f64:
+define <1 x i64> @test_v1i64_f64(double %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+}
+
+; CHECK-LABEL: test_v1i64_v2f32:
+define <1 x i64> @test_v1i64_v2f32(<2 x float> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+}
+
+; CHECK-LABEL: test_v1i64_v2i32:
+define <1 x i64> @test_v1i64_v2i32(<2 x i32> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+}
+
+; CHECK-LABEL: test_v1i64_v4i16:
+define <1 x i64> @test_v1i64_v4i16(<4 x i16> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 d{{[0-9]+}}, d0
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+}
+
+; CHECK-LABEL: test_v1i64_v8i8:
+define <1 x i64> @test_v1i64_v8i8(<8 x i8> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 d{{[0-9]+}}, d0
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to <1 x i64>
+ %3 = add <1 x i64> %2, %2
+ ret <1 x i64> %3
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+}
+
+; CHECK-LABEL: test_v2f32_i64:
+define <2 x float> @test_v2f32_i64(i64 %p) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2f32_f64:
+define <2 x float> @test_v2f32_f64(double %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2f32_v1i64:
+define <2 x float> @test_v2f32_v1i64(<1 x i64> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 d{{[0-9]+}}, d0
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2f32_v2i32:
+define <2 x float> @test_v2f32_v2i32(<2 x i32> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2f32_v4i16:
+define <2 x float> @test_v2f32_v4i16(<4 x i16> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 d{{[0-9]+}}, d0
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2f32_v8i8:
+define <2 x float> @test_v2f32_v8i8(<8 x i8> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 d{{[0-9]+}}, d0
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to <2 x float>
+ %3 = fadd <2 x float> %2, %2
+ ret <2 x float> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2i32_i64:
+define <2 x i32> @test_v2i32_i64(i64 %p) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2i32_f64:
+define <2 x i32> @test_v2i32_f64(double %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2i32_v1i64:
+define <2 x i32> @test_v2i32_v1i64(<1 x i64> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 d{{[0-9]+}}, d0
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2i32_v2f32:
+define <2 x i32> @test_v2i32_v2f32(<2 x float> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2i32_v4i16:
+define <2 x i32> @test_v2i32_v4i16(<4 x i16> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 d{{[0-9]+}}, d0
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v2i32_v8i8:
+define <2 x i32> @test_v2i32_v8i8(<8 x i8> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 d{{[0-9]+}}, d0
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to <2 x i32>
+ %3 = add <2 x i32> %2, %2
+ ret <2 x i32> %3
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+}
+
+; CHECK-LABEL: test_v4i16_i64:
+define <4 x i16> @test_v4i16_i64(i64 %p) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+}
+
+; CHECK-LABEL: test_v4i16_f64:
+define <4 x i16> @test_v4i16_f64(double %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+}
+
+; CHECK-LABEL: test_v4i16_v1i64:
+define <4 x i16> @test_v4i16_v1i64(<1 x i64> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 d{{[0-9]+}}, d0
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+}
+
+; CHECK-LABEL: test_v4i16_v2f32:
+define <4 x i16> @test_v4i16_v2f32(<2 x float> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+}
+
+; CHECK-LABEL: test_v4i16_v2i32:
+define <4 x i16> @test_v4i16_v2i32(<2 x i32> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+}
+
+; CHECK-LABEL: test_v4i16_v8i8:
+define <4 x i16> @test_v4i16_v8i8(<8 x i8> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 d{{[0-9]+}}, d0
+ %1 = add <8 x i8> %p, %p
+ %2 = bitcast <8 x i8> %1 to <4 x i16>
+ %3 = add <4 x i16> %2, %2
+ ret <4 x i16> %3
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+}
+
+; CHECK-LABEL: test_v8i8_i64:
+define <8 x i8> @test_v8i8_i64(i64 %p) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = add i64 %p, %p
+ %2 = bitcast i64 %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+}
+
+; CHECK-LABEL: test_v8i8_f64:
+define <8 x i8> @test_v8i8_f64(double %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd double %p, %p
+ %2 = bitcast double %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+}
+
+; CHECK-LABEL: test_v8i8_v1i64:
+define <8 x i8> @test_v8i8_v1i64(<1 x i64> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 d{{[0-9]+}}, d0
+ %1 = add <1 x i64> %p, %p
+ %2 = bitcast <1 x i64> %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+}
+
+; CHECK-LABEL: test_v8i8_v2f32:
+define <8 x i8> @test_v8i8_v2f32(<2 x float> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = fadd <2 x float> %p, %p
+ %2 = bitcast <2 x float> %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+}
+
+; CHECK-LABEL: test_v8i8_v2i32:
+define <8 x i8> @test_v8i8_v2i32(<2 x i32> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 d{{[0-9]+}}, d0
+ %1 = add <2 x i32> %p, %p
+ %2 = bitcast <2 x i32> %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+}
+
+; CHECK-LABEL: test_v8i8_v4i16:
+define <8 x i8> @test_v8i8_v4i16(<4 x i16> %p) {
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 d{{[0-9]+}}, d0
+ %1 = add <4 x i16> %p, %p
+ %2 = bitcast <4 x i16> %1 to <8 x i8>
+ %3 = add <8 x i8> %2, %2
+ ret <8 x i8> %3
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+}
+
+; CHECK-LABEL: test_f128_v2f64:
+define fp128 @test_f128_v2f64(<2 x double> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
+; HARD: vadd.f64 d{{[0-9]+}}, d1
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
+; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
+}
+
+; CHECK-LABEL: test_f128_v2i64:
+define fp128 @test_f128_v2i64(<2 x i64> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vadd.i64 q{{[0-9]+}}, q0
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
+; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
+}
+
+; CHECK-LABEL: test_f128_v4f32:
+define fp128 @test_f128_v4f32(<4 x float> %p) {
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
+; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
+}
+
+; CHECK-LABEL: test_f128_v4i32:
+define fp128 @test_f128_v4i32(<4 x i32> %p) {
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
+; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
+}
+
+; CHECK-LABEL: test_f128_v8i16:
+define fp128 @test_f128_v8i16(<8 x i16> %p) {
+; HARD: vrev64.16 q{{[0-9]+}}, q0
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
+; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
+}
+
+; CHECK-LABEL: test_f128_v16i8:
+define fp128 @test_f128_v16i8(<16 x i8> %p) {
+; HARD: vrev64.8 q{{[0-9]+}}, q0
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to fp128
+ %3 = fadd fp128 %2, %2
+ ret fp128 %3
+; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
+; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
+}
+
+; CHECK-LABEL: test_v2f64_f128:
+define <2 x double> @test_v2f64_f128(fp128 %p) {
+; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG1]][1], r1
+; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
+; CHECK: vmov.32 [[REG2]][1], r3
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+; SOFT: vadd.f64 [[REG1:d[0-9]+]]
+; SOFT: vadd.f64 [[REG2:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG2]]
+; SOFT: vmov r3, r2, [[REG1]]
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_v2f64_v2i64:
+define <2 x double> @test_v2f64_v2i64(<2 x i64> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vadd.i64 q{{[0-9]+}}, q0
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+; SOFT: vadd.f64 [[REG1:d[0-9]+]]
+; SOFT: vadd.f64 [[REG2:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG2]]
+; SOFT: vmov r3, r2, [[REG1]]
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_v2f64_v4f32:
+define <2 x double> @test_v2f64_v4f32(<4 x float> %p) {
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+; SOFT: vadd.f64 [[REG1:d[0-9]+]]
+; SOFT: vadd.f64 [[REG2:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG2]]
+; SOFT: vmov r3, r2, [[REG1]]
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_v2f64_v4i32:
+define <2 x double> @test_v2f64_v4i32(<4 x i32> %p) {
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+; SOFT: vadd.f64 [[REG1:d[0-9]+]]
+; SOFT: vadd.f64 [[REG2:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG2]]
+; SOFT: vmov r3, r2, [[REG1]]
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_v2f64_v8i16:
+define <2 x double> @test_v2f64_v8i16(<8 x i16> %p) {
+; HARD: vrev64.16 q{{[0-9]+}}, q0
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+; SOFT: vadd.f64 [[REG1:d[0-9]+]]
+; SOFT: vadd.f64 [[REG2:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG2]]
+; SOFT: vmov r3, r2, [[REG1]]
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_v2f64_v16i8:
+define <2 x double> @test_v2f64_v16i8(<16 x i8> %p) {
+; HARD: vrev64.8 q{{[0-9]+}}, q0
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <2 x double>
+ %3 = fadd <2 x double> %2, %2
+ ret <2 x double> %3
+; SOFT: vadd.f64 [[REG1:d[0-9]+]]
+; SOFT: vadd.f64 [[REG2:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG2]]
+; SOFT: vmov r3, r2, [[REG1]]
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+}
+
+; CHECK-LABEL: test_v2i64_f128:
+define <2 x i64> @test_v2i64_f128(fp128 %p) {
+; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG1]][1], r1
+; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
+; CHECK: vmov.32 [[REG2]][1], r3
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+}
+
+; CHECK-LABEL: test_v2i64_v2f64:
+define <2 x i64> @test_v2i64_v2f64(<2 x double> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
+; HARD: vadd.f64 d{{[0-9]+}}, d1
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+}
+
+; CHECK-LABEL: test_v2i64_v4f32:
+define <2 x i64> @test_v2i64_v4f32(<4 x float> %p) {
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+}
+
+; CHECK-LABEL: test_v2i64_v4i32:
+define <2 x i64> @test_v2i64_v4i32(<4 x i32> %p) {
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+}
+
+; CHECK-LABEL: test_v2i64_v8i16:
+define <2 x i64> @test_v2i64_v8i16(<8 x i16> %p) {
+; HARD: vrev64.16 q{{[0-9]+}}, q0
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+}
+
+; CHECK-LABEL: test_v2i64_v16i8:
+define <2 x i64> @test_v2i64_v16i8(<16 x i8> %p) {
+; HARD: vrev64.8 q{{[0-9]+}}, q0
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <2 x i64>
+ %3 = add <2 x i64> %2, %2
+ ret <2 x i64> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+}
+
+; CHECK-LABEL: test_v4f32_f128:
+define <4 x float> @test_v4f32_f128(fp128 %p) {
+; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG1]][1], r1
+; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
+; CHECK: vmov.32 [[REG2]][1], r3
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4f32_v2f64:
+define <4 x float> @test_v4f32_v2f64(<2 x double> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
+; HARD: vadd.f64 d{{[0-9]+}}, d1
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4f32_v2i64:
+define <4 x float> @test_v4f32_v2i64(<2 x i64> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vadd.i64 q{{[0-9]+}}, q0
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4f32_v4i32:
+define <4 x float> @test_v4f32_v4i32(<4 x i32> %p) {
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4f32_v8i16:
+define <4 x float> @test_v4f32_v8i16(<8 x i16> %p) {
+; HARD: vrev64.16 q{{[0-9]+}}, q0
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4f32_v16i8:
+define <4 x float> @test_v4f32_v16i8(<16 x i8> %p) {
+; HARD: vrev64.8 q{{[0-9]+}}, q0
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <4 x float>
+ %3 = fadd <4 x float> %2, %2
+ ret <4 x float> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4i32_f128:
+define <4 x i32> @test_v4i32_f128(fp128 %p) {
+; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG1]][1], r1
+; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
+; CHECK: vmov.32 [[REG2]][1], r3
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4i32_v2f64:
+define <4 x i32> @test_v4i32_v2f64(<2 x double> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
+; HARD: vadd.f64 d{{[0-9]+}}, d1
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4i32_v2i64:
+define <4 x i32> @test_v4i32_v2i64(<2 x i64> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vadd.i64 q{{[0-9]+}}, q0
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4i32_v4f32:
+define <4 x i32> @test_v4i32_v4f32(<4 x float> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4i32_v8i16:
+define <4 x i32> @test_v4i32_v8i16(<8 x i16> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.16 q{{[0-9]+}}, q0
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v4i32_v16i8:
+define <4 x i32> @test_v4i32_v16i8(<16 x i8> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.8 q{{[0-9]+}}, q0
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <4 x i32>
+ %3 = add <4 x i32> %2, %2
+ ret <4 x i32> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+}
+
+; CHECK-LABEL: test_v8i16_f128:
+define <8 x i16> @test_v8i16_f128(fp128 %p) {
+; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG1]][1], r1
+; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
+; CHECK: vmov.32 [[REG2]][1], r3
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+}
+
+; CHECK-LABEL: test_v8i16_v2f64:
+define <8 x i16> @test_v8i16_v2f64(<2 x double> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
+; HARD: vadd.f64 d{{[0-9]+}}, d1
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+}
+
+; CHECK-LABEL: test_v8i16_v2i64:
+define <8 x i16> @test_v8i16_v2i64(<2 x i64> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vadd.i64 q{{[0-9]+}}, q0
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+}
+
+; CHECK-LABEL: test_v8i16_v4f32:
+define <8 x i16> @test_v8i16_v4f32(<4 x float> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+}
+
+; CHECK-LABEL: test_v8i16_v4i32:
+define <8 x i16> @test_v8i16_v4i32(<4 x i32> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+}
+
+; CHECK-LABEL: test_v8i16_v16i8:
+define <8 x i16> @test_v8i16_v16i8(<16 x i8> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.8 q{{[0-9]+}}, q0
+ %1 = add <16 x i8> %p, %p
+ %2 = bitcast <16 x i8> %1 to <8 x i16>
+ %3 = add <8 x i16> %2, %2
+ ret <8 x i16> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+}
+
+; CHECK-LABEL: test_v16i8_f128:
+define <16 x i8> @test_v16i8_f128(fp128 %p) {
+; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG1]][1], r1
+; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
+; CHECK: vmov.32 [[REG2]][1], r3
+ %1 = fadd fp128 %p, %p
+ %2 = bitcast fp128 %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+}
+
+; CHECK-LABEL: test_v16i8_v2f64:
+define <16 x i8> @test_v16i8_v2f64(<2 x double> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
+; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
+; HARD: vadd.f64 d{{[0-9]+}}, d1
+; HARD: vadd.f64 d{{[0-9]+}}, d0
+ %1 = fadd <2 x double> %p, %p
+ %2 = bitcast <2 x double> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+}
+
+; CHECK-LABEL: test_v16i8_v2i64:
+define <16 x i8> @test_v16i8_v2i64(<2 x i64> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vadd.i64 q{{[0-9]+}}, q0
+ %1 = add <2 x i64> %p, %p
+ %2 = bitcast <2 x i64> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+}
+
+; CHECK-LABEL: test_v16i8_v4f32:
+define <16 x i8> @test_v16i8_v4f32(<4 x float> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = fadd <4 x float> %p, %p
+ %2 = bitcast <4 x float> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+}
+
+; CHECK-LABEL: test_v16i8_v4i32:
+define <16 x i8> @test_v16i8_v4i32(<4 x i32> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.32 q{{[0-9]+}}, q0
+ %1 = add <4 x i32> %p, %p
+ %2 = bitcast <4 x i32> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+}
+
+; CHECK-LABEL: test_v16i8_v8i16:
+define <16 x i8> @test_v16i8_v8i16(<8 x i16> %p) {
+; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
+; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
+; HARD: vrev64.16 q{{[0-9]+}}, q0
+ %1 = add <8 x i16> %p, %p
+ %2 = bitcast <8 x i16> %1 to <16 x i8>
+ %3 = add <16 x i8> %2, %2
+ ret <16 x i8> %3
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+}
diff --git a/test/CodeGen/ARM/big-endian-vector-caller.ll b/test/CodeGen/ARM/big-endian-vector-caller.ll
new file mode 100644
index 000000000000..d01b0a7c974f
--- /dev/null
+++ b/test/CodeGen/ARM/big-endian-vector-caller.ll
@@ -0,0 +1,1369 @@
+; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi soft %s -o - | FileCheck %s -check-prefix CHECK -check-prefix SOFT
+; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi hard %s -o - | FileCheck %s -check-prefix CHECK -check-prefix HARD
+
+; CHECK-LABEL: test_i64_f64:
+declare i64 @test_i64_f64_helper(double %p)
+define void @test_i64_f64(double* %p, i64* %q) {
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call i64 @test_i64_f64_helper(double %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v1i64:
+declare i64 @test_i64_v1i64_helper(<1 x i64> %p)
+define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) {
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call i64 @test_i64_v1i64_helper(<1 x i64> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v2f32:
+declare i64 @test_i64_v2f32_helper(<2 x float> %p)
+define void @test_i64_v2f32(<2 x float>* %p, i64* %q) {
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call i64 @test_i64_v2f32_helper(<2 x float> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v2i32:
+declare i64 @test_i64_v2i32_helper(<2 x i32> %p)
+define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) {
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call i64 @test_i64_v2i32_helper(<2 x i32> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v4i16:
+declare i64 @test_i64_v4i16_helper(<4 x i16> %p)
+define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) {
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call i64 @test_i64_v4i16_helper(<4 x i16> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_i64_v8i8:
+declare i64 @test_i64_v8i8_helper(<8 x i8> %p)
+define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) {
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call i64 @test_i64_v8i8_helper(<8 x i8> %2)
+ %4 = add i64 %3, %3
+ store i64 %4, i64* %q
+ ret void
+; CHECK: adds r1
+; CHECK: adc r0
+}
+
+; CHECK-LABEL: test_f64_i64:
+declare double @test_f64_i64_helper(i64 %p)
+define void @test_f64_i64(i64* %p, double* %q) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call double @test_f64_i64_helper(i64 %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_f64_v1i64:
+declare double @test_f64_v1i64_helper(<1 x i64> %p)
+define void @test_f64_v1i64(<1 x i64>* %p, double* %q) {
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call double @test_f64_v1i64_helper(<1 x i64> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_f64_v2f32:
+declare double @test_f64_v2f32_helper(<2 x float> %p)
+define void @test_f64_v2f32(<2 x float>* %p, double* %q) {
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call double @test_f64_v2f32_helper(<2 x float> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_f64_v2i32:
+declare double @test_f64_v2i32_helper(<2 x i32> %p)
+define void @test_f64_v2i32(<2 x i32>* %p, double* %q) {
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call double @test_f64_v2i32_helper(<2 x i32> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_f64_v4i16:
+declare double @test_f64_v4i16_helper(<4 x i16> %p)
+define void @test_f64_v4i16(<4 x i16>* %p, double* %q) {
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call double @test_f64_v4i16_helper(<4 x i16> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_f64_v8i8:
+declare double @test_f64_v8i8_helper(<8 x i8> %p)
+define void @test_f64_v8i8(<8 x i8>* %p, double* %q) {
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call double @test_f64_v8i8_helper(<8 x i8> %2)
+ %4 = fadd double %3, %3
+ store double %4, double* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.f64 [[REG]]
+; HARD: vadd.f64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v1i64_i64:
+declare <1 x i64> @test_v1i64_i64_helper(i64 %p)
+define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <1 x i64> @test_v1i64_i64_helper(i64 %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v1i64_f64:
+declare <1 x i64> @test_v1i64_f64_helper(double %p)
+define void @test_v1i64_f64(double* %p, <1 x i64>* %q) {
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <1 x i64> @test_v1i64_f64_helper(double %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v1i64_v2f32:
+declare <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %p)
+define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) {
+; HARD: vrev64.32 d0
+; SOFT: vadd.f32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v1i64_v2i32:
+declare <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %p)
+define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) {
+; HARD: vrev64.32 d0
+; SOFT: vadd.i32 [[REG:d[0-9]+]]
+; SOFT: vrev64.32 [[REG]]
+; SOFT: vmov r1, r0, [[REG]]
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v1i64_v4i16:
+declare <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %p)
+define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) {
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v1i64_v8i8:
+declare <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %p)
+define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) {
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %2)
+ %4 = add <1 x i64> %3, %3
+ store <1 x i64> %4, <1 x i64>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vadd.i64 [[REG]]
+; HARD: vadd.i64 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2f32_i64:
+declare <2 x float> @test_v2f32_i64_helper(i64 %p)
+define void @test_v2f32_i64(i64* %p, <2 x float>* %q) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <2 x float> @test_v2f32_i64_helper(i64 %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2f32_f64:
+declare <2 x float> @test_v2f32_f64_helper(double %p)
+define void @test_v2f32_f64(double* %p, <2 x float>* %q) {
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <2 x float> @test_v2f32_f64_helper(double %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2f32_v1i64:
+declare <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %p)
+define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) {
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2f32_v2i32:
+declare <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %p)
+define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) {
+; HARD: vrev64.32 d0
+; SOFT: vadd.i32 [[REG:d[0-9]+]]
+; SOFT: vrev64.32 [[REG]]
+; SOFT: vmov r1, r0, [[REG]]
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2f32_v4i16:
+declare <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %p)
+define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) {
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2f32_v8i8:
+declare <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %p)
+define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) {
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %2)
+ %4 = fadd <2 x float> %3, %3
+ store <2 x float> %4, <2 x float>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2i32_i64:
+declare <2 x i32> @test_v2i32_i64_helper(i64 %p)
+define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <2 x i32> @test_v2i32_i64_helper(i64 %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2i32_f64:
+declare <2 x i32> @test_v2i32_f64_helper(double %p)
+define void @test_v2i32_f64(double* %p, <2 x i32>* %q) {
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <2 x i32> @test_v2i32_f64_helper(double %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2i32_v1i64:
+declare <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %p)
+define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) {
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2i32_v2f32:
+declare <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %p)
+define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) {
+; HARD: vadd.f32 [[REG:d[0-9]+]]
+; HARD: vrev64.32 d0, [[REG]]
+; SOFT: vadd.f32 [[REG:d[0-9]+]]
+; SOFT: vrev64.32 [[REG]]
+; SOFT: vmov r1, r0, [[REG]]
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2i32_v4i16:
+declare <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %p)
+define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) {
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v2i32_v8i8:
+declare <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %p)
+define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) {
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %2)
+ %4 = add <2 x i32> %3, %3
+ store <2 x i32> %4, <2 x i32>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.32 [[REG]]
+; HARD: vrev64.32 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v4i16_i64:
+declare <4 x i16> @test_v4i16_i64_helper(i64 %p)
+define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <4 x i16> @test_v4i16_i64_helper(i64 %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v4i16_f64:
+declare <4 x i16> @test_v4i16_f64_helper(double %p)
+define void @test_v4i16_f64(double* %p, <4 x i16>* %q) {
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <4 x i16> @test_v4i16_f64_helper(double %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v4i16_v1i64:
+declare <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %p)
+define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) {
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v4i16_v2f32:
+declare <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %p)
+define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) {
+; HARD: vadd.f32 [[REG:d[0-9]+]]
+; HARD: vrev64.32 d0, [[REG]]
+; SOFT: vadd.f32 [[REG:d[0-9]+]]
+; SOFT: vrev64.32 [[REG]]
+; SOFT: vmov r1, r0, [[REG]]
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v4i16_v2i32:
+declare <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %p)
+define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) {
+; HARD: vadd.i32 [[REG:d[0-9]+]]
+; HARD: vrev64.32 d0, [[REG]]
+; SOFT: vadd.i32 [[REG:d[0-9]+]]
+; SOFT: vrev64.32 [[REG]]
+; SOFT: vmov r1, r0, [[REG]]
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v4i16_v8i8:
+declare <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %p)
+define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) {
+; SOFT: vrev64.8 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.8 d0
+ %1 = load <8 x i8>* %p
+ %2 = add <8 x i8> %1, %1
+ %3 = call <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %2)
+ %4 = add <4 x i16> %3, %3
+ store <4 x i16> %4, <4 x i16>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.16 [[REG]]
+; HARD: vrev64.16 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v8i8_i64:
+declare <8 x i8> @test_v8i8_i64_helper(i64 %p)
+define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) {
+; CHECK: adds r1
+; CHECK: adc r0
+ %1 = load i64* %p
+ %2 = add i64 %1, %1
+ %3 = call <8 x i8> @test_v8i8_i64_helper(i64 %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v8i8_f64:
+declare <8 x i8> @test_v8i8_f64_helper(double %p)
+define void @test_v8i8_f64(double* %p, <8 x i8>* %q) {
+; SOFT: vadd.f64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.f64 d0
+ %1 = load double* %p
+ %2 = fadd double %1, %1
+ %3 = call <8 x i8> @test_v8i8_f64_helper(double %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v8i8_v1i64:
+declare <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %p)
+define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) {
+; SOFT: vadd.i64 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vadd.i64 d0
+ %1 = load <1 x i64>* %p
+ %2 = add <1 x i64> %1, %1
+ %3 = call <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v8i8_v2f32:
+declare <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %p)
+define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) {
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+ %1 = load <2 x float>* %p
+ %2 = fadd <2 x float> %1, %1
+ %3 = call <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v8i8_v2i32:
+declare <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %p)
+define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) {
+; SOFT: vrev64.32 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.32 d0
+ %1 = load <2 x i32>* %p
+ %2 = add <2 x i32> %1, %1
+ %3 = call <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_v8i8_v4i16:
+declare <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %p)
+define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) {
+; SOFT: vrev64.16 [[REG:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG]]
+; HARD: vrev64.16 d0
+ %1 = load <4 x i16>* %p
+ %2 = add <4 x i16> %1, %1
+ %3 = call <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %2)
+ %4 = add <8 x i8> %3, %3
+ store <8 x i8> %4, <8 x i8>* %q
+ ret void
+; SOFT: vmov [[REG:d[0-9]+]], r1, r0
+; SOFT: vrev64.8 [[REG]]
+; HARD: vrev64.8 {{d[0-9]+}}, d0
+}
+
+; CHECK-LABEL: test_f128_v2f64:
+declare fp128 @test_f128_v2f64_helper(<2 x double> %p)
+define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) {
+; SOFT: vadd.f64 [[REG2:d[0-9]+]]
+; SOFT: vadd.f64 [[REG1:d[0-9]+]]
+; SOFT: vmov r1, r0, [[REG1]]
+; SOFT: vmov r3, r2, [[REG2]]
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call fp128 @test_f128_v2f64_helper(<2 x double> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+; CHECK: stm sp, {r0, r1, r2, r3}
+}
+
+; CHECK-LABEL: test_f128_v2i64:
+declare fp128 @test_f128_v2i64_helper(<2 x i64> %p)
+define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call fp128 @test_f128_v2i64_helper(<2 x i64> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+; CHECK: stm sp, {r0, r1, r2, r3}
+}
+
+; CHECK-LABEL: test_f128_v4f32:
+declare fp128 @test_f128_v4f32_helper(<4 x float> %p)
+define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call fp128 @test_f128_v4f32_helper(<4 x float> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+; CHECK: stm sp, {r0, r1, r2, r3}
+}
+
+; CHECK-LABEL: test_f128_v4i32:
+declare fp128 @test_f128_v4i32_helper(<4 x i32> %p)
+define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call fp128 @test_f128_v4i32_helper(<4 x i32> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+; CHECK: stm sp, {r0, r1, r2, r3}
+}
+
+; CHECK-LABEL: test_f128_v8i16:
+declare fp128 @test_f128_v8i16_helper(<8 x i16> %p)
+define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call fp128 @test_f128_v8i16_helper(<8 x i16> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+; CHECK: stm sp, {r0, r1, r2, r3}
+}
+
+; CHECK-LABEL: test_f128_v16i8:
+declare fp128 @test_f128_v16i8_helper(<16 x i8> %p)
+define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call fp128 @test_f128_v16i8_helper(<16 x i8> %2)
+ %4 = fadd fp128 %3, %3
+ store fp128 %4, fp128* %q
+ ret void
+; CHECK: stm sp, {r0, r1, r2, r3}
+}
+
+; CHECK-LABEL: test_v2f64_f128:
+declare <2 x double> @test_v2f64_f128_helper(fp128 %p)
+define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) {
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <2 x double> @test_v2f64_f128_helper(fp128 %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+
+}
+
+; CHECK-LABEL: test_v2f64_v2i64:
+declare <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %p)
+define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2f64_v4f32:
+declare <2 x double> @test_v2f64_v4f32_helper(<4 x float> %p)
+define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <2 x double> @test_v2f64_v4f32_helper(<4 x float> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2f64_v4i32:
+declare <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %p)
+define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2f64_v8i16:
+declare <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %p)
+define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2f64_v16i8:
+declare <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %p)
+define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %2)
+ %4 = fadd <2 x double> %3, %3
+ store <2 x double> %4, <2 x double>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2i64_f128:
+declare <2 x i64> @test_v2i64_f128_helper(fp128 %p)
+define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) {
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <2 x i64> @test_v2i64_f128_helper(fp128 %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2i64_v2f64:
+declare <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %p)
+define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) {
+; SOFT: vmov r1, r0, [[REG1]]
+; SOFT: vmov r3, r2, [[REG2]]
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2i64_v4f32:
+declare <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %p)
+define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2i64_v4i32:
+declare <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %p)
+define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2i64_v8i16:
+declare <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %p)
+define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v2i64_v16i8:
+declare <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %p)
+define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %2)
+ %4 = add <2 x i64> %3, %3
+ store <2 x i64> %4, <2 x i64>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4f32_f128:
+declare <4 x float> @test_v4f32_f128_helper(fp128 %p)
+define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) {
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <4 x float> @test_v4f32_f128_helper(fp128 %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4f32_v2f64:
+declare <4 x float> @test_v4f32_v2f64_helper(<2 x double> %p)
+define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <4 x float> @test_v4f32_v2f64_helper(<2 x double> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4f32_v2i64:
+declare <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %p)
+define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4f32_v4i32:
+declare <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %p)
+define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4f32_v8i16:
+declare <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %p)
+define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4f32_v16i8:
+declare <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %p)
+define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %2)
+ %4 = fadd <4 x float> %3, %3
+ store <4 x float> %4, <4 x float>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4i32_f128:
+declare <4 x i32> @test_v4i32_f128_helper(fp128 %p)
+define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) {
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <4 x i32> @test_v4i32_f128_helper(fp128 %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4i32_v2f64:
+declare <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %p)
+define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4i32_v2i64:
+declare <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %p)
+define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4i32_v4f32:
+declare <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %p)
+define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4i32_v8i16:
+declare <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %p)
+define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v4i32_v16i8:
+declare <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %p)
+define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %2)
+ %4 = add <4 x i32> %3, %3
+ store <4 x i32> %4, <4 x i32>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v8i16_f128:
+declare <8 x i16> @test_v8i16_f128_helper(fp128 %p)
+define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) {
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <8 x i16> @test_v8i16_f128_helper(fp128 %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v8i16_v2f64:
+declare <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %p)
+define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v8i16_v2i64:
+declare <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %p)
+define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v8i16_v4f32:
+declare <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %p)
+define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v8i16_v4i32:
+declare <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %p)
+define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v8i16_v16i8:
+declare <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %p)
+define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.8 q0
+ %1 = load <16 x i8>* %p
+ %2 = add <16 x i8> %1, %1
+ %3 = call <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %2)
+ %4 = add <8 x i16> %3, %3
+ store <8 x i16> %4, <8 x i16>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v16i8_f128:
+declare <16 x i8> @test_v16i8_f128_helper(fp128 %p)
+define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) {
+ %1 = load fp128* %p
+ %2 = fadd fp128 %1, %1
+ %3 = call <16 x i8> @test_v16i8_f128_helper(fp128 %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v16i8_v2f64:
+declare <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %p)
+define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.f64 d1
+; HARD: vadd.f64 d0
+ %1 = load <2 x double>* %p
+ %2 = fadd <2 x double> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v16i8_v2i64:
+declare <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %p)
+define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vadd.i64 q0
+ %1 = load <2 x i64>* %p
+ %2 = add <2 x i64> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v16i8_v4f32:
+declare <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %p)
+define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x float>* %p
+ %2 = fadd <4 x float> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v16i8_v4i32:
+declare <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %p)
+define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.32 q0
+ %1 = load <4 x i32>* %p
+ %2 = add <4 x i32> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
+
+; CHECK-LABEL: test_v16i8_v8i16:
+declare <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %p)
+define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) {
+; SOFT: vmov r1, r0
+; SOFT: vmov r3, r2
+; HARD: vrev64.16 q0
+ %1 = load <8 x i16>* %p
+ %2 = add <8 x i16> %1, %1
+ %3 = call <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %2)
+ %4 = add <16 x i8> %3, %3
+ store <16 x i8> %4, <16 x i8>* %q
+ ret void
+; SOFT: vmov {{d[0-9]+}}, r3, r2
+; SOFT: vmov {{d[0-9]+}}, r1, r0
+}
diff --git a/test/CodeGen/ARM/bits.ll b/test/CodeGen/ARM/bits.ll
index ce1b2ad5fad3..14aa27e90b64 100644
--- a/test/CodeGen/ARM/bits.ll
+++ b/test/CodeGen/ARM/bits.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
entry:
diff --git a/test/CodeGen/ARM/bswap16.ll b/test/CodeGen/ARM/bswap16.ll
new file mode 100644
index 000000000000..70c62d294eec
--- /dev/null
+++ b/test/CodeGen/ARM/bswap16.ll
@@ -0,0 +1,42 @@
+; RUN: llc -mtriple=arm-darwin -mattr=v6 < %s | FileCheck %s
+; RUN: llc -mtriple=thumb-darwin -mattr=v6 < %s | FileCheck %s
+
+
+define void @test1(i16* nocapture %data) {
+entry:
+ %0 = load i16* %data, align 2
+ %1 = tail call i16 @llvm.bswap.i16(i16 %0)
+ store i16 %1, i16* %data, align 2
+ ret void
+
+ ; CHECK-LABEL: test1:
+ ; CHECK: ldrh r[[R1:[0-9]+]], [r0]
+ ; CHECK: rev16 r[[R1]], r[[R1]]
+ ; CHECK: strh r[[R1]], [r0]
+}
+
+
+define void @test2(i16* nocapture %data, i16 zeroext %in) {
+entry:
+ %0 = tail call i16 @llvm.bswap.i16(i16 %in)
+ store i16 %0, i16* %data, align 2
+ ret void
+
+ ; CHECK-LABEL: test2:
+ ; CHECK: rev16 r[[R1:[0-9]+]], r1
+ ; CHECK: strh r[[R1]], [r0]
+}
+
+
+define i16 @test3(i16* nocapture %data) {
+entry:
+ %0 = load i16* %data, align 2
+ %1 = tail call i16 @llvm.bswap.i16(i16 %0)
+ ret i16 %1
+
+ ; CHECK-LABEL: test3:
+ ; CHECK: ldrh r[[R0:[0-9]+]], [r0]
+ ; CHECK: rev16 r[[R0]], r0
+}
+
+declare i16 @llvm.bswap.i16(i16)
diff --git a/test/CodeGen/ARM/build-attributes-encoding.s b/test/CodeGen/ARM/build-attributes-encoding.s
index 5ad51b284113..34a1ad38fb17 100644
--- a/test/CodeGen/ARM/build-attributes-encoding.s
+++ b/test/CodeGen/ARM/build-attributes-encoding.s
@@ -4,7 +4,7 @@
// RUN: | llvm-readobj -s -sd | FileCheck %s
// Tag_CPU_name (=5)
-.cpu Cortex-A8
+.cpu cortex-a8
// Tag_CPU_arch (=6)
.eabi_attribute 6, 10
@@ -61,7 +61,7 @@
.eabi_attribute 110, 160
// Check that tags > 128 are encoded properly
-.eabi_attribute 129, 1
+.eabi_attribute 129, "1"
.eabi_attribute 250, 1
// CHECK: Section {
@@ -71,15 +71,15 @@
// CHECK-NEXT: ]
// CHECK-NEXT: Address: 0x0
// CHECK-NEXT: Offset: 0x34
-// CHECK-NEXT: Size: 70
+// CHECK-NEXT: Size: 71
// CHECK-NEXT: Link: 0
// CHECK-NEXT: Info: 0
// CHECK-NEXT: AddressAlignment: 1
// CHECK-NEXT: EntrySize: 0
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 41450000 00616561 62690001 3B000000
+// CHECK-NEXT: 0000: 41460000 00616561 62690001 3C000000
// CHECK-NEXT: 0010: 05434F52 5445582D 41380006 0A074108
// CHECK-NEXT: 0020: 0109020A 030C0214 01150117 01180119
// CHECK-NEXT: 0030: 011B001C 0124012A 012C0244 036EA001
-// CHECK-NEXT: 0040: 810101FA 0101
+// CHECK-NEXT: 0040: 81013100 FA0101
// CHECK-NEXT: )
diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll
new file mode 100644
index 000000000000..d75d55d0fa68
--- /dev/null
+++ b/test/CodeGen/ARM/build-attributes.ll
@@ -0,0 +1,468 @@
+; This tests that MC/asm header conversion is smooth and that the
+; build attributes are correct
+
+; RUN: llc < %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale | FileCheck %s --check-prefix=XSCALE
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi | FileCheck %s --check-prefix=V6
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi | FileCheck %s --check-prefix=V6M
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s | FileCheck %s --check-prefix=ARM1156T2F-S
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-neon,-crypto | FileCheck %s --check-prefix=V8-FPARMv8
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-fp-armv8,-crypto | FileCheck %s --check-prefix=V8-NEON
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-crypto | FileCheck %s --check-prefix=V8-FPARMv8-NEON
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8-FPARMv8-NEON-CRYPTO
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,+d16 | FileCheck %s --check-prefix=CORTEX-A5-NONEON
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A5-NOFPU
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A12-NOFPU
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9-mp | FileCheck %s --check-prefix=CORTEX-A9-MP
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 | FileCheck %s --check-prefix=CORTEX-M3
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=CORTEX-A7-CHECK
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-NOFPU
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,+d16,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4
+; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC
+; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER
+; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -relocation-model=default | FileCheck %s --check-prefix=RELOC-OTHER
+; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -relocation-model=dynamic-no-pic | FileCheck %s --check-prefix=RELOC-OTHER
+; RUN: llc < %s -mtriple=arm-none-linux-gnueabi | FileCheck %s --check-prefix=RELOC-OTHER
+
+; XSCALE: .eabi_attribute 6, 5
+; XSCALE: .eabi_attribute 8, 1
+; XSCALE: .eabi_attribute 9, 1
+
+; V6: .eabi_attribute 6, 6
+; V6: .eabi_attribute 8, 1
+; V6: .eabi_attribute 24, 1
+; V6: .eabi_attribute 25, 1
+; V6-NOT: .eabi_attribute 27
+; V6-NOT: .eabi_attribute 28
+; V6-NOT: .eabi_attribute 36
+; V6-NOT: .eabi_attribute 42
+; V6-NOT: .eabi_attribute 68
+
+; V6M: .eabi_attribute 6, 12
+; V6M-NOT: .eabi_attribute 7
+; V6M: .eabi_attribute 8, 0
+; V6M: .eabi_attribute 9, 1
+; V6M: .eabi_attribute 24, 1
+; V6M: .eabi_attribute 25, 1
+; V6M-NOT: .eabi_attribute 27
+; V6M-NOT: .eabi_attribute 28
+; V6M-NOT: .eabi_attribute 36
+; V6M-NOT: .eabi_attribute 42
+; V6M-NOT: .eabi_attribute 68
+
+; ARM1156T2F-S: .cpu arm1156t2f-s
+; ARM1156T2F-S: .eabi_attribute 6, 8
+; ARM1156T2F-S: .eabi_attribute 8, 1
+; ARM1156T2F-S: .eabi_attribute 9, 2
+; ARM1156T2F-S: .fpu vfpv2
+; ARM1156T2F-S: .eabi_attribute 20, 1
+; ARM1156T2F-S: .eabi_attribute 21, 1
+; ARM1156T2F-S: .eabi_attribute 23, 3
+; ARM1156T2F-S: .eabi_attribute 24, 1
+; ARM1156T2F-S: .eabi_attribute 25, 1
+; ARM1156T2F-S-NOT: .eabi_attribute 27
+; ARM1156T2F-S-NOT: .eabi_attribute 28
+; ARM1156T2F-S-NOT: .eabi_attribute 36
+; ARM1156T2F-S-NOT: .eabi_attribute 42
+; ARM1156T2F-S-NOT: .eabi_attribute 68
+
+; V7M: .eabi_attribute 6, 10
+; V7M: .eabi_attribute 7, 77
+; V7M: .eabi_attribute 8, 0
+; V7M: .eabi_attribute 9, 2
+; V7M: .eabi_attribute 24, 1
+; V7M: .eabi_attribute 25, 1
+; V7M-NOT: .eabi_attribute 27
+; V7M-NOT: .eabi_attribute 28
+; V7M-NOT: .eabi_attribute 36
+; V7M-NOT: .eabi_attribute 42
+; V7M-NOT: .eabi_attribute 44
+; V7M-NOT: .eabi_attribute 68
+
+; V7: .syntax unified
+; V7: .eabi_attribute 6, 10
+; V7: .eabi_attribute 20, 1
+; V7: .eabi_attribute 21, 1
+; V7: .eabi_attribute 23, 3
+; V7: .eabi_attribute 24, 1
+; V7: .eabi_attribute 25, 1
+; V7-NOT: .eabi_attribute 27
+; V7-NOT: .eabi_attribute 28
+; V7-NOT: .eabi_attribute 36
+; V7-NOT: .eabi_attribute 42
+; V7-NOT: .eabi_attribute 68
+
+; V8: .syntax unified
+; V8: .eabi_attribute 6, 14
+
+; Vt8: .syntax unified
+; Vt8: .eabi_attribute 6, 14
+
+; V8-FPARMv8: .syntax unified
+; V8-FPARMv8: .eabi_attribute 6, 14
+; V8-FPARMv8: .fpu fp-armv8
+
+; V8-NEON: .syntax unified
+; V8-NEON: .eabi_attribute 6, 14
+; V8-NEON: .fpu neon
+; V8-NEON: .eabi_attribute 12, 3
+
+; V8-FPARMv8-NEON: .syntax unified
+; V8-FPARMv8-NEON: .eabi_attribute 6, 14
+; V8-FPARMv8-NEON: .fpu neon-fp-armv8
+; V8-FPARMv8-NEON: .eabi_attribute 12, 3
+
+; V8-FPARMv8-NEON-CRYPTO: .syntax unified
+; V8-FPARMv8-NEON-CRYPTO: .eabi_attribute 6, 14
+; V8-FPARMv8-NEON-CRYPTO: .fpu crypto-neon-fp-armv8
+; V8-FPARMv8-NEON-CRYPTO: .eabi_attribute 12, 3
+
+; Tag_CPU_arch 'ARMv7'
+; CORTEX-A7-CHECK: .eabi_attribute 6, 10
+; CORTEX-A7-NOFPU: .eabi_attribute 6, 10
+; CORTEX-A7-FPUV4: .eabi_attribute 6, 10
+
+; Tag_CPU_arch_profile 'A'
+; CORTEX-A7-CHECK: .eabi_attribute 7, 65
+; CORTEX-A7-NOFPU: .eabi_attribute 7, 65
+; CORTEX-A7-FPUV4: .eabi_attribute 7, 65
+
+; Tag_ARM_ISA_use
+; CORTEX-A7-CHECK: .eabi_attribute 8, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 8, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 8, 1
+
+; Tag_THUMB_ISA_use
+; CORTEX-A7-CHECK: .eabi_attribute 9, 2
+; CORTEX-A7-NOFPU: .eabi_attribute 9, 2
+; CORTEX-A7-FPUV4: .eabi_attribute 9, 2
+
+; CORTEX-A7-CHECK: .fpu neon-vfpv4
+; CORTEX-A7-NOFPU-NOT: .fpu
+; CORTEX-A7-FPUV4: .fpu vfpv4
+
+; Tag_ABI_FP_denormal
+; CORTEX-A7-CHECK: .eabi_attribute 20, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 20, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 20, 1
+
+; Tag_ABI_FP_exceptions
+; CORTEX-A7-CHECK: .eabi_attribute 21, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 21, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 21, 1
+
+; Tag_ABI_FP_number_model
+; CORTEX-A7-CHECK: .eabi_attribute 23, 3
+; CORTEX-A7-NOFPU: .eabi_attribute 23, 3
+; CORTEX-A7-FPUV4: .eabi_attribute 23, 3
+
+; Tag_ABI_align_needed
+; CORTEX-A7-CHECK: .eabi_attribute 24, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 24, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 24, 1
+
+; Tag_ABI_align_preserved
+; CORTEX-A7-CHECK: .eabi_attribute 25, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 25, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 25, 1
+
+; Tag_FP_HP_extension
+; CORTEX-A7-CHECK: .eabi_attribute 36, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 36, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 36, 1
+
+; Tag_MPextension_use
+; CORTEX-A7-CHECK: .eabi_attribute 42, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 42, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 42, 1
+
+; Tag_DIV_use
+; CORTEX-A7-CHECK: .eabi_attribute 44, 2
+; CORTEX-A7-NOFPU: .eabi_attribute 44, 2
+; CORTEX-A7-FPUV4: .eabi_attribute 44, 2
+
+; Tag_Virtualization_use
+; CORTEX-A7-CHECK: .eabi_attribute 68, 3
+; CORTEX-A7-NOFPU: .eabi_attribute 68, 3
+; CORTEX-A7-FPUV4: .eabi_attribute 68, 3
+
+; CORTEX-A5-DEFAULT: .cpu cortex-a5
+; CORTEX-A5-DEFAULT: .eabi_attribute 6, 10
+; CORTEX-A5-DEFAULT: .eabi_attribute 7, 65
+; CORTEX-A5-DEFAULT: .eabi_attribute 8, 1
+; CORTEX-A5-DEFAULT: .eabi_attribute 9, 2
+; CORTEX-A5-DEFAULT: .fpu neon-vfpv4
+; CORTEX-A5-DEFAULT: .eabi_attribute 20, 1
+; CORTEX-A5-DEFAULT: .eabi_attribute 21, 1
+; CORTEX-A5-DEFAULT: .eabi_attribute 23, 3
+; CORTEX-A5-DEFAULT: .eabi_attribute 24, 1
+; CORTEX-A5-DEFAULT: .eabi_attribute 25, 1
+; CORTEX-A5-DEFAULT: .eabi_attribute 42, 1
+; CORTEX-A5-DEFAULT: .eabi_attribute 68, 1
+
+; CORTEX-A5-NONEON: .cpu cortex-a5
+; CORTEX-A5-NONEON: .eabi_attribute 6, 10
+; CORTEX-A5-NONEON: .eabi_attribute 7, 65
+; CORTEX-A5-NONEON: .eabi_attribute 8, 1
+; CORTEX-A5-NONEON: .eabi_attribute 9, 2
+; CORTEX-A5-NONEON: .fpu vfpv4-d16
+; CORTEX-A5-NONEON: .eabi_attribute 20, 1
+; CORTEX-A5-NONEON: .eabi_attribute 21, 1
+; CORTEX-A5-NONEON: .eabi_attribute 23, 3
+; CORTEX-A5-NONEON: .eabi_attribute 24, 1
+; CORTEX-A5-NONEON: .eabi_attribute 25, 1
+; CORTEX-A5-NONEON: .eabi_attribute 42, 1
+; CORTEX-A5-NONEON: .eabi_attribute 68, 1
+
+; CORTEX-A5-NOFPU: .cpu cortex-a5
+; CORTEX-A5-NOFPU: .eabi_attribute 6, 10
+; CORTEX-A5-NOFPU: .eabi_attribute 7, 65
+; CORTEX-A5-NOFPU: .eabi_attribute 8, 1
+; CORTEX-A5-NOFPU: .eabi_attribute 9, 2
+; CORTEX-A5-NOFPU-NOT: .fpu
+; CORTEX-A5-NOFPU: .eabi_attribute 20, 1
+; CORTEX-A5-NOFPU: .eabi_attribute 21, 1
+; CORTEX-A5-NOFPU: .eabi_attribute 23, 3
+; CORTEX-A5-NOFPU: .eabi_attribute 24, 1
+; CORTEX-A5-NOFPU: .eabi_attribute 25, 1
+; CORTEX-A5-NOFPU: .eabi_attribute 42, 1
+; CORTEX-A5-NOFPU: .eabi_attribute 68, 1
+
+; CORTEX-A9-SOFT: .cpu cortex-a9
+; CORTEX-A9-SOFT: .eabi_attribute 6, 10
+; CORTEX-A9-SOFT: .eabi_attribute 7, 65
+; CORTEX-A9-SOFT: .eabi_attribute 8, 1
+; CORTEX-A9-SOFT: .eabi_attribute 9, 2
+; CORTEX-A9-SOFT: .fpu neon
+; CORTEX-A9-SOFT: .eabi_attribute 20, 1
+; CORTEX-A9-SOFT: .eabi_attribute 21, 1
+; CORTEX-A9-SOFT: .eabi_attribute 23, 3
+; CORTEX-A9-SOFT: .eabi_attribute 24, 1
+; CORTEX-A9-SOFT: .eabi_attribute 25, 1
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 27
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 28
+; CORTEX-A9-SOFT: .eabi_attribute 36, 1
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 42
+; CORTEX-A9-SOFT: .eabi_attribute 68, 1
+
+; CORTEX-A9-HARD: .cpu cortex-a9
+; CORTEX-A9-HARD: .eabi_attribute 6, 10
+; CORTEX-A9-HARD: .eabi_attribute 7, 65
+; CORTEX-A9-HARD: .eabi_attribute 8, 1
+; CORTEX-A9-HARD: .eabi_attribute 9, 2
+; CORTEX-A9-HARD: .fpu neon
+; CORTEX-A9-HARD: .eabi_attribute 20, 1
+; CORTEX-A9-HARD: .eabi_attribute 21, 1
+; CORTEX-A9-HARD: .eabi_attribute 23, 3
+; CORTEX-A9-HARD: .eabi_attribute 24, 1
+; CORTEX-A9-HARD: .eabi_attribute 25, 1
+; CORTEX-A9-HARD-NOT: .eabi_attribute 27
+; CORTEX-A9-HARD: .eabi_attribute 28, 1
+; CORTEX-A9-HARD: .eabi_attribute 36, 1
+; CORTEX-A9-HARD-NOT: .eabi_attribute 42
+; CORTEX-A9-HARD: .eabi_attribute 68, 1
+
+; CORTEX-A9-MP: .cpu cortex-a9-mp
+; CORTEX-A9-MP: .eabi_attribute 6, 10
+; CORTEX-A9-MP: .eabi_attribute 7, 65
+; CORTEX-A9-MP: .eabi_attribute 8, 1
+; CORTEX-A9-MP: .eabi_attribute 9, 2
+; CORTEX-A9-MP: .fpu neon
+; CORTEX-A9-MP: .eabi_attribute 20, 1
+; CORTEX-A9-MP: .eabi_attribute 21, 1
+; CORTEX-A9-MP: .eabi_attribute 23, 3
+; CORTEX-A9-MP: .eabi_attribute 24, 1
+; CORTEX-A9-MP: .eabi_attribute 25, 1
+; CORTEX-A9-MP-NOT: .eabi_attribute 27
+; CORTEX-A9-MP-NOT: .eabi_attribute 28
+; CORTEX-A9-MP: .eabi_attribute 36, 1
+; CORTEX-A9-MP: .eabi_attribute 42, 1
+; CORTEX-A9-MP: .eabi_attribute 68, 1
+
+; CORTEX-A12-DEFAULT: .cpu cortex-a12
+; CORTEX-A12-DEFAULT: .eabi_attribute 6, 10
+; CORTEX-A12-DEFAULT: .eabi_attribute 7, 65
+; CORTEX-A12-DEFAULT: .eabi_attribute 8, 1
+; CORTEX-A12-DEFAULT: .eabi_attribute 9, 2
+; CORTEX-A12-DEFAULT: .fpu neon-vfpv4
+; CORTEX-A12-DEFAULT: .eabi_attribute 20, 1
+; CORTEX-A12-DEFAULT: .eabi_attribute 21, 1
+; CORTEX-A12-DEFAULT: .eabi_attribute 23, 3
+; CORTEX-A12-DEFAULT: .eabi_attribute 24, 1
+; CORTEX-A12-DEFAULT: .eabi_attribute 25, 1
+; CORTEX-A12-DEFAULT: .eabi_attribute 42, 1
+; CORTEX-A12-DEFAULT: .eabi_attribute 44, 2
+; CORTEX-A12-DEFAULT: .eabi_attribute 68, 3
+
+; CORTEX-A12-NOFPU: .cpu cortex-a12
+; CORTEX-A12-NOFPU: .eabi_attribute 6, 10
+; CORTEX-A12-NOFPU: .eabi_attribute 7, 65
+; CORTEX-A12-NOFPU: .eabi_attribute 8, 1
+; CORTEX-A12-NOFPU: .eabi_attribute 9, 2
+; CORTEX-A12-NOFPU-NOT: .fpu
+; CORTEX-A12-NOFPU: .eabi_attribute 20, 1
+; CORTEX-A12-NOFPU: .eabi_attribute 21, 1
+; CORTEX-A12-NOFPU: .eabi_attribute 23, 3
+; CORTEX-A12-NOFPU: .eabi_attribute 24, 1
+; CORTEX-A12-NOFPU: .eabi_attribute 25, 1
+; CORTEX-A12-NOFPU: .eabi_attribute 42, 1
+; CORTEX-A12-NOFPU: .eabi_attribute 44, 2
+; CORTEX-A12-NOFPU: .eabi_attribute 68, 3
+
+; CORTEX-A15: .cpu cortex-a15
+; CORTEX-A15: .eabi_attribute 6, 10
+; CORTEX-A15: .eabi_attribute 7, 65
+; CORTEX-A15: .eabi_attribute 8, 1
+; CORTEX-A15: .eabi_attribute 9, 2
+; CORTEX-A15: .fpu neon-vfpv4
+; CORTEX-A15: .eabi_attribute 20, 1
+; CORTEX-A15: .eabi_attribute 21, 1
+; CORTEX-A15: .eabi_attribute 23, 3
+; CORTEX-A15: .eabi_attribute 24, 1
+; CORTEX-A15: .eabi_attribute 25, 1
+; CORTEX-A15-NOT: .eabi_attribute 27
+; CORTEX-A15-NOT: .eabi_attribute 28
+; CORTEX-A15: .eabi_attribute 36, 1
+; CORTEX-A15: .eabi_attribute 42, 1
+; CORTEX-A15: .eabi_attribute 44, 2
+; CORTEX-A15: .eabi_attribute 68, 3
+
+; CORTEX-M0: .cpu cortex-m0
+; CORTEX-M0: .eabi_attribute 6, 12
+; CORTEX-M0-NOT: .eabi_attribute 7
+; CORTEX-M0: .eabi_attribute 8, 0
+; CORTEX-M0: .eabi_attribute 9, 1
+; CORTEX-M0: .eabi_attribute 24, 1
+; CORTEX-M0: .eabi_attribute 25, 1
+; CORTEX-M0-NOT: .eabi_attribute 27
+; CORTEX-M0-NOT: .eabi_attribute 28
+; CORTEX-M0-NOT: .eabi_attribute 36
+; CORTEX-M0-NOT: .eabi_attribute 42
+; CORTEX-M0-NOT: .eabi_attribute 68
+
+; CORTEX-M3: .cpu cortex-m3
+; CORTEX-M3: .eabi_attribute 6, 10
+; CORTEX-M3: .eabi_attribute 7, 77
+; CORTEX-M3: .eabi_attribute 8, 0
+; CORTEX-M3: .eabi_attribute 9, 2
+; CORTEX-M3: .eabi_attribute 20, 1
+; CORTEX-M3: .eabi_attribute 21, 1
+; CORTEX-M3: .eabi_attribute 23, 3
+; CORTEX-M3: .eabi_attribute 24, 1
+; CORTEX-M3: .eabi_attribute 25, 1
+; CORTEX-M3-NOT: .eabi_attribute 27
+; CORTEX-M3-NOT: .eabi_attribute 28
+; CORTEX-M3-NOT: .eabi_attribute 36
+; CORTEX-M3-NOT: .eabi_attribute 42
+; CORTEX-M3-NOT: .eabi_attribute 44
+; CORTEX-M3-NOT: .eabi_attribute 68
+
+; CORTEX-M4-SOFT: .cpu cortex-m4
+; CORTEX-M4-SOFT: .eabi_attribute 6, 13
+; CORTEX-M4-SOFT: .eabi_attribute 7, 77
+; CORTEX-M4-SOFT: .eabi_attribute 8, 0
+; CORTEX-M4-SOFT: .eabi_attribute 9, 2
+; CORTEX-M4-SOFT: .fpu vfpv4-d16
+; CORTEX-M4-SOFT: .eabi_attribute 20, 1
+; CORTEX-M4-SOFT: .eabi_attribute 21, 1
+; CORTEX-M4-SOFT: .eabi_attribute 23, 3
+; CORTEX-M4-SOFT: .eabi_attribute 24, 1
+; CORTEX-M4-SOFT: .eabi_attribute 25, 1
+; CORTEX-M4-SOFT: .eabi_attribute 27, 1
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 28
+; CORTEX-M4-SOFT: .eabi_attribute 36, 1
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 42
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 44
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 68
+
+; CORTEX-M4-HARD: .cpu cortex-m4
+; CORTEX-M4-HARD: .eabi_attribute 6, 13
+; CORTEX-M4-HARD: .eabi_attribute 7, 77
+; CORTEX-M4-HARD: .eabi_attribute 8, 0
+; CORTEX-M4-HARD: .eabi_attribute 9, 2
+; CORTEX-M4-HARD: .fpu vfpv4-d16
+; CORTEX-M4-HARD: .eabi_attribute 20, 1
+; CORTEX-M4-HARD: .eabi_attribute 21, 1
+; CORTEX-M4-HARD: .eabi_attribute 23, 3
+; CORTEX-M4-HARD: .eabi_attribute 24, 1
+; CORTEX-M4-HARD: .eabi_attribute 25, 1
+; CORTEX-M4-HARD: .eabi_attribute 27, 1
+; CORTEX-M4-HARD: .eabi_attribute 28, 1
+; CORTEX-M4-HARD: .eabi_attribute 36, 1
+; CORTEX-M4-HARD-NOT: .eabi_attribute 42
+; CORTEX-M4-HARD-NOT: .eabi_attribute 44
+; CORTEX-M4-HARD-NOT: .eabi_attribute 68
+
+; CORTEX-R5: .cpu cortex-r5
+; CORTEX-R5: .eabi_attribute 6, 10
+; CORTEX-R5: .eabi_attribute 7, 82
+; CORTEX-R5: .eabi_attribute 8, 1
+; CORTEX-R5: .eabi_attribute 9, 2
+; CORTEX-R5: .fpu vfpv3-d16
+; CORTEX-R5: .eabi_attribute 20, 1
+; CORTEX-R5: .eabi_attribute 21, 1
+; CORTEX-R5: .eabi_attribute 23, 3
+; CORTEX-R5: .eabi_attribute 24, 1
+; CORTEX-R5: .eabi_attribute 25, 1
+; CORTEX-R5: .eabi_attribute 27, 1
+; CORTEX-R5-NOT: .eabi_attribute 28
+; CORTEX-R5-NOT: .eabi_attribute 36
+; CORTEX-R5-NOT: .eabi_attribute 42
+; CORTEX-R5: .eabi_attribute 44, 2
+; CORTEX-R5-NOT: .eabi_attribute 68
+
+; CORTEX-A53: .cpu cortex-a53
+; CORTEX-A53: .eabi_attribute 6, 14
+; CORTEX-A53: .eabi_attribute 7, 65
+; CORTEX-A53: .eabi_attribute 8, 1
+; CORTEX-A53: .eabi_attribute 9, 2
+; CORTEX-A53: .fpu crypto-neon-fp-armv8
+; CORTEX-A53: .eabi_attribute 12, 3
+; CORTEX-A53: .eabi_attribute 24, 1
+; CORTEX-A53: .eabi_attribute 25, 1
+; CORTEX-A53-NOT: .eabi_attribute 27
+; CORTEX-A53-NOT: .eabi_attribute 28
+; CORTEX-A53: .eabi_attribute 36, 1
+; CORTEX-A53: .eabi_attribute 42, 1
+; CORTEX-A53-NOT: .eabi_attribute 44
+; CORTEX-A53: .eabi_attribute 68, 3
+
+; CORTEX-A57: .cpu cortex-a57
+; CORTEX-A57: .eabi_attribute 6, 14
+; CORTEX-A57: .eabi_attribute 7, 65
+; CORTEX-A57: .eabi_attribute 8, 1
+; CORTEX-A57: .eabi_attribute 9, 2
+; CORTEX-A57: .fpu crypto-neon-fp-armv8
+; CORTEX-A57: .eabi_attribute 12, 3
+; CORTEX-A57: .eabi_attribute 24, 1
+; CORTEX-A57: .eabi_attribute 25, 1
+; CORTEX-A57-NOT: .eabi_attribute 27
+; CORTEX-A57-NOT: .eabi_attribute 28
+; CORTEX-A57: .eabi_attribute 36, 1
+; CORTEX-A57: .eabi_attribute 42, 1
+; CORTEX-A57-NOT: .eabi_attribute 44
+; CORTEX-A57: .eabi_attribute 68, 3
+
+; RELOC-PIC: .eabi_attribute 15, 1
+; RELOC-PIC: .eabi_attribute 16, 1
+; RELOC-PIC: .eabi_attribute 17, 2
+; RELOC-OTHER: .eabi_attribute 17, 1
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/cache-intrinsic.ll b/test/CodeGen/ARM/cache-intrinsic.ll
new file mode 100644
index 000000000000..6048917ee9a3
--- /dev/null
+++ b/test/CodeGen/ARM/cache-intrinsic.ll
@@ -0,0 +1,26 @@
+; RUN: llc %s -o - | FileCheck %s
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7--linux-gnueabihf"
+
+@buffer = global [32 x i8] c"This is a largely unused buffer\00", align 1
+@.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
+@.str1 = private unnamed_addr constant [25 x i8] c"Still, largely unused...\00", align 1
+
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0))
+ %call1 = call i8* @strcpy(i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds ([25 x i8]* @.str1, i32 0, i32 0)) #3
+ call void @llvm.clear_cache(i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds (i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i32 32)) #3
+ %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0))
+ ret i32 0
+}
+
+; CHECK: __clear_cache
+
+declare i32 @printf(i8*, ...)
+
+declare i8* @strcpy(i8*, i8*)
+
+declare void @llvm.clear_cache(i8*, i8*)
diff --git a/test/CodeGen/ARM/call-tc.ll b/test/CodeGen/ARM/call-tc.ll
index d4636021b599..a35fd7476465 100644
--- a/test/CodeGen/ARM/call-tc.ll
+++ b/test/CodeGen/ARM/call-tc.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -mtriple=armv6-apple-ios -mattr=+vfp2 -arm-tail-calls | FileCheck %s -check-prefix=CHECKV6
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -relocation-model=pic -mattr=+vfp2 -arm-tail-calls | FileCheck %s -check-prefix=CHECKELF
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-tail-calls | FileCheck %s -check-prefix=CHECKT2D
-; RUN: llc < %s -mtriple=thumbv7-apple-ios5.0 | FileCheck %s -check-prefix=CHECKT2D
+; RUN: llc < %s -mtriple=armv6-apple-ios5.0 -mattr=+vfp2 -arm-atomic-cfg-tidy=0 | FileCheck %s -check-prefix=CHECKV6
+; RUN: llc < %s -mtriple=thumbv7-apple-ios5.0 -arm-atomic-cfg-tidy=0 | FileCheck %s -check-prefix=CHECKT2D
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -relocation-model=pic -mattr=+vfp2 -arm-atomic-cfg-tidy=0 \
+; RUN: | FileCheck %s -check-prefix=CHECKELF
; Enable tailcall optimization for iOS 5.0
; rdar://9120031
diff --git a/test/CodeGen/ARM/call.ll b/test/CodeGen/ARM/call.ll
index 107e79a9e01e..f6301cf02032 100644
--- a/test/CodeGen/ARM/call.ll
+++ b/test/CodeGen/ARM/call.ll
@@ -1,7 +1,11 @@
-; RUN: llc < %s -march=arm -mattr=+v4t | FileCheck %s -check-prefix=CHECKV4
-; RUN: llc < %s -march=arm -mattr=+v5t | FileCheck %s -check-prefix=CHECKV5
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi\
-; RUN: -relocation-model=pic | FileCheck %s -check-prefix=CHECKELF
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - \
+; RUN: | FileCheck %s -check-prefix=CHECKV4
+
+; RUN: llc -mtriple=arm-eabi -mattr=+v5t %s -o - \
+; RUN: | FileCheck %s -check-prefix=CHECKV5
+
+; RUN: llc -mtriple=armv6-linux-gnueabi -relocation-model=pic %s -o - \
+; RUN: | FileCheck %s -check-prefix=CHECKELF
@t = weak global i32 ()* null ; <i32 ()**> [#uses=1]
diff --git a/test/CodeGen/ARM/carry.ll b/test/CodeGen/ARM/carry.ll
index f67987f8eb61..e344b08a8aeb 100644
--- a/test/CodeGen/ARM/carry.ll
+++ b/test/CodeGen/ARM/carry.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i64 @f1(i64 %a, i64 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/ARM/clz.ll b/test/CodeGen/ARM/clz.ll
index 5b6a584bbee8..68e8c7cef1bc 100644
--- a/test/CodeGen/ARM/clz.ll
+++ b/test/CodeGen/ARM/clz.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v5t | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v5t %s -o - | FileCheck %s
declare i32 @llvm.ctlz.i32(i32, i1)
diff --git a/test/CodeGen/ARM/cmpxchg-idioms.ll b/test/CodeGen/ARM/cmpxchg-idioms.ll
new file mode 100644
index 000000000000..fb88575cab3b
--- /dev/null
+++ b/test/CodeGen/ARM/cmpxchg-idioms.ll
@@ -0,0 +1,107 @@
+; RUN: llc -mtriple=thumbv7s-apple-ios7.0 -o - %s | FileCheck %s
+
+define i32 @test_return(i32* %p, i32 %oldval, i32 %newval) {
+; CHECK-LABEL: test_return:
+
+; CHECK: dmb ishst
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldrex [[LOADED:r[0-9]+]], [r0]
+; CHECK: cmp [[LOADED]], r1
+; CHECK: bne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: strex [[STATUS:r[0-9]+]], {{r[0-9]+}}, [r0]
+; CHECK: cmp [[STATUS]], #0
+; CHECK: bne [[LOOP]]
+
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: movs r0, #1
+; CHECK: dmb ish
+; CHECK: bx lr
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: movs r0, #0
+; CHECK: dmb ish
+; CHECK: bx lr
+
+ %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %conv = zext i1 %success to i32
+ ret i32 %conv
+}
+
+define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
+; CHECK-LABEL: test_return_bool:
+
+; CHECK: uxtb [[OLDBYTE:r[0-9]+]], r1
+; CHECK: dmb ishst
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldrexb [[LOADED:r[0-9]+]], [r0]
+; CHECK: cmp [[LOADED]], [[OLDBYTE]]
+; CHECK: bne [[FAIL:LBB[0-9]+_[0-9]+]]
+
+; CHECK: strexb [[STATUS:r[0-9]+]], {{r[0-9]+}}, [r0]
+; CHECK: cmp [[STATUS]], #0
+; CHECK: bne [[LOOP]]
+
+ ; FIXME: this eor is redundant. Need to teach DAG combine that.
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: movs [[TMP:r[0-9]+]], #1
+; CHECK: eor r0, [[TMP]], #1
+; CHECK: bx lr
+
+; CHECK: [[FAIL]]:
+; CHECK: movs [[TMP:r[0-9]+]], #0
+; CHECK: eor r0, [[TMP]], #1
+; CHECK: bx lr
+
+
+ %pair = cmpxchg i8* %value, i8 %oldValue, i8 %newValue acq_rel monotonic
+ %success = extractvalue { i8, i1 } %pair, 1
+ %failure = xor i1 %success, 1
+ ret i1 %failure
+}
+
+define void @test_conditional(i32* %p, i32 %oldval, i32 %newval) {
+; CHECK-LABEL: test_conditional:
+
+; CHECK: dmb ishst
+
+; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
+; CHECK: ldrex [[LOADED:r[0-9]+]], [r0]
+; CHECK: cmp [[LOADED]], r1
+; CHECK: bne [[FAILED:LBB[0-9]+_[0-9]+]]
+
+; CHECK: strex [[STATUS:r[0-9]+]], r2, [r0]
+; CHECK: cmp [[STATUS]], #0
+; CHECK: bne [[LOOP]]
+
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: dmb ish
+; CHECK: b.w _bar
+
+; CHECK: [[FAILED]]:
+; CHECK-NOT: cmp {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: dmb ish
+; CHECK: b.w _baz
+
+ %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ tail call void @bar() #2
+ br label %end
+
+false:
+ tail call void @baz() #2
+ br label %end
+
+end:
+ ret void
+}
+
+declare void @bar()
+declare void @baz()
diff --git a/test/CodeGen/ARM/cmpxchg-weak.ll b/test/CodeGen/ARM/cmpxchg-weak.ll
new file mode 100644
index 000000000000..126e33062623
--- /dev/null
+++ b/test/CodeGen/ARM/cmpxchg-weak.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s
+
+define void @test_cmpxchg_weak(i32 *%addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: test_cmpxchg_weak:
+
+ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ %oldval = extractvalue { i32, i1 } %pair, 0
+; CHECK: dmb ish
+; CHECK: ldrex [[LOADED:r[0-9]+]], [r0]
+; CHECK: cmp [[LOADED]], r1
+; CHECK: strexeq [[SUCCESS:r[0-9]+]], r2, [r0]
+; CHECK: cmpeq [[SUCCESS]], #0
+; CHECK: bne [[DONE:LBB[0-9]+_[0-9]+]]
+; CHECK: dmb ish
+; CHECK: [[DONE]]:
+; CHECK: str r3, [r0]
+; CHECK: bx lr
+
+ store i32 %oldval, i32* %addr
+ ret void
+}
+
+
+define i1 @test_cmpxchg_weak_to_bool(i32, i32 *%addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: test_cmpxchg_weak_to_bool:
+
+ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ %success = extractvalue { i32, i1 } %pair, 1
+
+; CHECK: dmb ish
+; CHECK: mov r0, #0
+; CHECK: ldrex [[LOADED:r[0-9]+]], [r1]
+; CHECK: cmp [[LOADED]], r2
+; CHECK: strexeq [[STATUS:r[0-9]+]], r3, [r1]
+; CHECK: cmpeq [[STATUS]], #0
+; CHECK: bne [[DONE:LBB[0-9]+_[0-9]+]]
+; CHECK: dmb ish
+; CHECK: mov r0, #1
+; CHECK: [[DONE]]:
+; CHECK: bx lr
+
+ ret i1 %success
+}
diff --git a/test/CodeGen/ARM/coalesce-dbgvalue.ll b/test/CodeGen/ARM/coalesce-dbgvalue.ll
index 86106a045201..606c9bc52d64 100644
--- a/test/CodeGen/ARM/coalesce-dbgvalue.ll
+++ b/test/CodeGen/ARM/coalesce-dbgvalue.ll
@@ -81,7 +81,7 @@ attributes #3 = { nounwind }
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 182024) (llvm/trunk 182023)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !15, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/d/b/pr16110.c] [DW_LANG_C99]
!1 = metadata !{metadata !"pr16110.c", metadata !"/d/b"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"pr16110", metadata !"pr16110", metadata !"", i32 7, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 ()* @pr16110, null, null, metadata !9, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [pr16110]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/d/b/pr16110.c]
diff --git a/test/CodeGen/ARM/compare-call.ll b/test/CodeGen/ARM/compare-call.ll
index fac2bc5e432f..323eb1f2dd3c 100644
--- a/test/CodeGen/ARM/compare-call.ll
+++ b/test/CodeGen/ARM/compare-call.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | \
-; RUN: grep vcmpe.f32
+; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o - | FileCheck %s
define void @test3(float* %glob, i32 %X) {
entry:
@@ -18,3 +17,6 @@ UnifiedReturnBlock: ; preds = %entry
}
declare i32 @bar(...)
+
+; CHECK: vcmpe.f32
+
diff --git a/test/CodeGen/ARM/constantfp.ll b/test/CodeGen/ARM/constantfp.ll
index 974bdd729efc..27b6e9b904d2 100644
--- a/test/CodeGen/ARM/constantfp.ll
+++ b/test/CodeGen/ARM/constantfp.ll
@@ -15,7 +15,7 @@ define arm_aapcs_vfpcc float @test_vmov_imm() {
; CHECK: vmov.i32 d0, #0
; CHECK-NONEON-LABEL: test_vmov_imm:
-; CHECK_NONEON: vldr s0, {{.?LCPI[0-9]+_[0-9]+}}
+; CHECK-NONEON: vldr s0, {{.?LCPI[0-9]+_[0-9]+}}
ret float 0.0
}
@@ -24,7 +24,7 @@ define arm_aapcs_vfpcc float @test_vmvn_imm() {
; CHECK: vmvn.i32 d0, #0xb0000000
; CHECK-NONEON-LABEL: test_vmvn_imm:
-; CHECK_NONEON: vldr s0, {{.?LCPI[0-9]+_[0-9]+}}
+; CHECK-NONEON: vldr s0, {{.?LCPI[0-9]+_[0-9]+}}
ret float 8589934080.0
}
@@ -33,7 +33,7 @@ define arm_aapcs_vfpcc double @test_vmov_f64() {
; CHECK: vmov.f64 d0, #1.0
; CHECK-NONEON-LABEL: test_vmov_f64:
-; CHECK_NONEON: vmov.f64 d0, #1.0
+; CHECK-NONEON: vmov.f64 d0, #1.0
ret double 1.0
}
@@ -43,7 +43,7 @@ define arm_aapcs_vfpcc double @test_vmov_double_imm() {
; CHECK: vmov.i32 d0, #0
; CHECK-NONEON-LABEL: test_vmov_double_imm:
-; CHECK_NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
+; CHECK-NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
ret double 0.0
}
@@ -52,7 +52,7 @@ define arm_aapcs_vfpcc double @test_vmvn_double_imm() {
; CHECK: vmvn.i32 d0, #0xb0000000
; CHECK-NONEON-LABEL: test_vmvn_double_imm:
-; CHECK_NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
+; CHECK-NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
ret double 0x4fffffff4fffffff
}
@@ -63,6 +63,6 @@ define arm_aapcs_vfpcc double @test_notvmvn_double_imm() {
; CHECK: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
; CHECK-NONEON-LABEL: test_notvmvn_double_imm:
-; CHECK_NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
+; CHECK-NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
ret double 0x4fffffffffffffff
}
diff --git a/test/CodeGen/ARM/crash-O0.ll b/test/CodeGen/ARM/crash-O0.ll
index 8bce4e0097fa..8855bb99aaf9 100644
--- a/test/CodeGen/ARM/crash-O0.ll
+++ b/test/CodeGen/ARM/crash-O0.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -relocation-model=pic -disable-fp-elim
+; RUN: llc < %s -O0 -relocation-model=pic -disable-fp-elim -no-integrated-as
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64-n32"
target triple = "armv6-apple-darwin10"
diff --git a/test/CodeGen/ARM/cse-ldrlit.ll b/test/CodeGen/ARM/cse-ldrlit.ll
new file mode 100644
index 000000000000..ea8c0ca8560d
--- /dev/null
+++ b/test/CodeGen/ARM/cse-ldrlit.ll
@@ -0,0 +1,61 @@
+; RUN: llc -mtriple=thumbv6m-apple-none-macho -relocation-model=pic -o - %s | FileCheck %s --check-prefix=CHECK-THUMB-PIC
+; RUN: llc -mtriple=arm-apple-none-macho -relocation-model=pic -o - %s | FileCheck %s --check-prefix=CHECK-ARM-PIC
+; RUN: llc -mtriple=thumbv6m-apple-none-macho -relocation-model=dynamic-no-pic -o - %s | FileCheck %s --check-prefix=CHECK-DYNAMIC
+; RUN: llc -mtriple=arm-apple-none-macho -relocation-model=dynamic-no-pic -o - %s | FileCheck %s --check-prefix=CHECK-DYNAMIC
+; RUN: llc -mtriple=thumbv6m-apple-none-macho -relocation-model=static -o - %s | FileCheck %s --check-prefix=CHECK-STATIC
+; RUN: llc -mtriple=arm-apple-none-macho -relocation-model=static -o - %s | FileCheck %s --check-prefix=CHECK-STATIC
+@var = global [16 x i32] zeroinitializer
+
+declare void @bar(i32*)
+
+define void @foo() {
+ %flag = load i32* getelementptr inbounds([16 x i32]* @var, i32 0, i32 1)
+ %tst = icmp eq i32 %flag, 0
+ br i1 %tst, label %true, label %false
+true:
+ tail call void @bar(i32* getelementptr inbounds([16 x i32]* @var, i32 0, i32 4))
+ ret void
+false:
+ ret void
+}
+
+; CHECK-THUMB-PIC-LABEL: foo:
+; CHECK-THUMB-PIC: ldr r0, LCPI0_0
+; CHECK-THUMB-PIC: LPC0_0:
+; CHECK-THUMB-PIC-NEXT: add r0, pc
+; CHECK-THUMB-PIC: ldr {{r[1-9][0-9]?}}, [r0, #4]
+
+; CHECK-THUMB-PIC: LCPI0_0:
+; CHECK-THUMB-PIC-NEXT: .long _var-(LPC0_0+4)
+; CHECK-THUMB-PIC-NOT: LCPI0_1
+
+
+; CHECK-ARM-PIC-LABEL: foo:
+; CHECK-ARM-PIC: ldr [[VAR_OFFSET:r[0-9]+]], LCPI0_0
+; CHECK-ARM-PIC: LPC0_0:
+; CHECK-ARM-PIC-NEXT: ldr r0, [pc, [[VAR_OFFSET]]]
+; CHECK-ARM-PIC: ldr {{r[1-9][0-9]?}}, [r0, #4]
+
+; CHECK-ARM-PIC: LCPI0_0:
+; CHECK-ARM-PIC-NEXT: .long _var-(LPC0_0+8)
+; CHECK-ARM-PIC-NOT: LCPI0_1
+
+
+; CHECK-DYNAMIC-LABEL: foo:
+; CHECK-DYNAMIC: ldr r0, LCPI0_0
+; CHECK-DYNAMIC: ldr {{r[1-9][0-9]?}}, [r0, #4]
+
+; CHECK-DYNAMIC: LCPI0_0:
+; CHECK-DYNAMIC-NEXT: .long _var
+; CHECK-DYNAMIC-NOT: LCPI0_1
+
+
+; CHECK-STATIC-LABEL: foo:
+; CHECK-STATIC: ldr r0, LCPI0_0
+; CHECK-STATIC: ldr {{r[1-9][0-9]?}}, [r0, #4]
+
+; CHECK-STATIC: LCPI0_0:
+; CHECK-STATIC-NEXT: .long _var{{$}}
+; CHECK-STATIC-NOT: LCPI0_1
+
+
diff --git a/test/CodeGen/ARM/ctz.ll b/test/CodeGen/ARM/ctz.ll
index 2c7efc7c5da5..2d88b0351cf3 100644
--- a/test/CodeGen/ARM/ctz.ll
+++ b/test/CodeGen/ARM/ctz.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6t2 %s -o - | FileCheck %s
declare i32 @llvm.cttz.i32(i32, i1)
diff --git a/test/CodeGen/ARM/dagcombine-concatvector.ll b/test/CodeGen/ARM/dagcombine-concatvector.ll
index 2927ea2f3ca9..62ed87fd7871 100644
--- a/test/CodeGen/ARM/dagcombine-concatvector.ll
+++ b/test/CodeGen/ARM/dagcombine-concatvector.ll
@@ -1,11 +1,14 @@
-; RUN: llc < %s -mtriple=thumbv7s-apple-ios3.0.0 -mcpu=generic | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7s-apple-ios3.0.0 -mcpu=generic | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc < %s -mtriple=thumbeb -mattr=v7,neon | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
; PR15525
; CHECK-LABEL: test1:
; CHECK: ldr.w [[REG:r[0-9]+]], [sp]
-; CHECK-NEXT: vmov {{d[0-9]+}}, r1, r2
-; CHECK-NEXT: vmov {{d[0-9]+}}, r3, [[REG]]
-; CHECK-NEXT: vst1.8 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0]
+; CHECK-LE-NEXT: vmov {{d[0-9]+}}, r1, r2
+; CHECK-LE-NEXT: vmov {{d[0-9]+}}, r3, [[REG]]
+; CHECK-BE-NEXT: vmov {{d[0-9]+}}, r2, r1
+; CHECK-BE: vmov {{d[0-9]+}}, [[REG]], r3
+; CHECK: vst1.8 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0]
; CHECK-NEXT: bx lr
define void @test1(i8* %arg, [4 x i64] %vec.coerce) {
bb:
diff --git a/test/CodeGen/ARM/data-in-code-annotations.ll b/test/CodeGen/ARM/data-in-code-annotations.ll
index da70178225eb..5eb81b24de0f 100644
--- a/test/CodeGen/ARM/data-in-code-annotations.ll
+++ b/test/CodeGen/ARM/data-in-code-annotations.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-apple-darwin -arm-atomic-cfg-tidy=0 | FileCheck %s
define double @f1() nounwind {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/ARM/debug-frame-large-stack.ll b/test/CodeGen/ARM/debug-frame-large-stack.ll
new file mode 100644
index 000000000000..5bafce9407e5
--- /dev/null
+++ b/test/CodeGen/ARM/debug-frame-large-stack.ll
@@ -0,0 +1,99 @@
+; RUN: llc -filetype=asm -o - < %s -mtriple arm-arm-none-eabi -disable-fp-elim| FileCheck %s --check-prefix=CHECK-ARM
+; RUN: llc -filetype=asm -o - < %s -mtriple arm-arm-none-eabi | FileCheck %s --check-prefix=CHECK-ARM-FP-ELIM
+
+define void @test1() {
+ %tmp = alloca [ 64 x i32 ] , align 4
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/large.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"large.c", metadata !"/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test1", metadata !"test1", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, void ()* @test1, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [test1]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/large.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5 "}
+!11 = metadata !{i32 2, i32 0, metadata !4, null}
+
+; CHECK-ARM-LABEL: test1:
+; CHECK-ARM: .cfi_startproc
+; CHECK-ARM: sub sp, sp, #256
+; CHECK-ARM: .cfi_endproc
+
+; CHECK-ARM-FP-ELIM-LABEL: test1:
+; CHECK-ARM-FP-ELIM: .cfi_startproc
+; CHECK-ARM-FP-ELIM: sub sp, sp, #256
+; CHECK-ARM-FP-ELIM: .cfi_endproc
+
+define void @test2() {
+ %tmp = alloca [ 4168 x i8 ] , align 4
+ ret void
+}
+
+; CHECK-ARM-LABEL: test2:
+; CHECK-ARM: .cfi_startproc
+; CHECK-ARM: push {r4, r5}
+; CHECK-ARM: .cfi_def_cfa_offset 8
+; CHECK-ARM: .cfi_offset r5, -4
+; CHECK-ARM: .cfi_offset r4, -8
+; CHECK-ARM: sub sp, sp, #72
+; CHECK-ARM: sub sp, sp, #4096
+; CHECK-ARM: .cfi_def_cfa_offset 4176
+; CHECK-ARM: .cfi_endproc
+
+; CHECK-ARM-FP_ELIM-LABEL: test2:
+; CHECK-ARM-FP_ELIM: .cfi_startproc
+; CHECK-ARM-FP_ELIM: push {r4, r5}
+; CHECK-ARM-FP_ELIM: .cfi_def_cfa_offset 8
+; CHECK-ARM-FP_ELIM: .cfi_offset 54, -4
+; CHECK-ARM-FP_ELIM: .cfi_offset r4, -8
+; CHECK-ARM-FP_ELIM: sub sp, sp, #72
+; CHECK-ARM-FP_ELIM: sub sp, sp, #4096
+; CHECK-ARM-FP_ELIM: .cfi_def_cfa_offset 4176
+; CHECK-ARM-FP_ELIM: .cfi_endproc
+
+define i32 @test3() {
+ %retval = alloca i32, align 4
+ %tmp = alloca i32, align 4
+ %a = alloca [805306369 x i8], align 16
+ store i32 0, i32* %tmp
+ %tmp1 = load i32* %tmp
+ ret i32 %tmp1
+}
+
+; CHECK-ARM-LABEL: test3:
+; CHECK-ARM: .cfi_startproc
+; CHECK-ARM: push {r4, r5, r11}
+; CHECK-ARM: .cfi_def_cfa_offset 12
+; CHECK-ARM: .cfi_offset r11, -4
+; CHECK-ARM: .cfi_offset r5, -8
+; CHECK-ARM: .cfi_offset r4, -12
+; CHECK-ARM: add r11, sp, #8
+; CHECK-ARM: .cfi_def_cfa r11, 4
+; CHECK-ARM: sub sp, sp, #20
+; CHECK-ARM: sub sp, sp, #805306368
+; CHECK-ARM: bic sp, sp, #15
+; CHECK-ARM: .cfi_endproc
+
+; CHECK-ARM-FP-ELIM-LABEL: test3:
+; CHECK-ARM-FP-ELIM: .cfi_startproc
+; CHECK-ARM-FP-ELIM: push {r4, r5, r11}
+; CHECK-ARM-FP-ELIM: .cfi_def_cfa_offset 12
+; CHECK-ARM-FP-ELIM: .cfi_offset r11, -4
+; CHECK-ARM-FP-ELIM: .cfi_offset r5, -8
+; CHECK-ARM-FP-ELIM: .cfi_offset r4, -12
+; CHECK-ARM-FP-ELIM: add r11, sp, #8
+; CHECK-ARM-FP-ELIM: .cfi_def_cfa r11, 4
+; CHECK-ARM-FP-ELIM: sub sp, sp, #20
+; CHECK-ARM-FP-ELIM: sub sp, sp, #805306368
+; CHECK-ARM-FP-ELIM: bic sp, sp, #15
+; CHECK-ARM-FP-ELIM: .cfi_endproc
+
diff --git a/test/CodeGen/ARM/debug-frame-no-debug.ll b/test/CodeGen/ARM/debug-frame-no-debug.ll
new file mode 100644
index 000000000000..81702c6e7491
--- /dev/null
+++ b/test/CodeGen/ARM/debug-frame-no-debug.ll
@@ -0,0 +1,97 @@
+; ARM EHABI integrated test
+
+; This test case checks that the ARM DWARF stack frame directives
+; are not generated if compiling with no debug information.
+
+; RUN: llc -mtriple arm-unknown-linux-gnueabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
+
+; RUN: llc -mtriple thumb-unknown-linux-gnueabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-FP
+
+;-------------------------------------------------------------------------------
+; Test 1
+;-------------------------------------------------------------------------------
+; This is the LLVM assembly generated from following C++ code:
+;
+; extern void print(int, int, int, int, int);
+; extern void print(double, double, double, double, double);
+;
+; void test(int a, int b, int c, int d, int e,
+; double m, double n, double p, double q, double r) {
+; try {
+; print(a, b, c, d, e);
+; } catch (...) {
+; print(m, n, p, q, r);
+; }
+; }
+
+declare void @_Z5printiiiii(i32, i32, i32, i32, i32)
+
+declare void @_Z5printddddd(double, double, double, double, double)
+
+define void @_Z4testiiiiiddddd(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e,
+ double %m, double %n, double %p,
+ double %q, double %r) {
+entry:
+ invoke void @_Z5printiiiii(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e)
+ to label %try.cont unwind label %lpad
+
+lpad:
+ %0 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = tail call i8* @__cxa_begin_catch(i8* %1)
+ invoke void @_Z5printddddd(double %m, double %n, double %p,
+ double %q, double %r)
+ to label %invoke.cont2 unwind label %lpad1
+
+invoke.cont2:
+ tail call void @__cxa_end_catch()
+ br label %try.cont
+
+try.cont:
+ ret void
+
+lpad1:
+ %3 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @__cxa_end_catch()
+ to label %eh.resume unwind label %terminate.lpad
+
+eh.resume:
+ resume { i8*, i32 } %3
+
+terminate.lpad:
+ %4 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %5 = extractvalue { i8*, i32 } %4, 0
+ tail call void @__clang_call_terminate(i8* %5)
+ unreachable
+}
+
+declare void @__clang_call_terminate(i8*)
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+declare void @_ZSt9terminatev()
+
+; CHECK-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; CHECK-FP-ELIM-NOT: .cfi_startproc
+; CHECK-FP-ELIM: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-FP-ELIM-NOT: .cfi_def_cfa_offset 36
+
+; CHECK-THUMB-FP-LABEL: _Z4testiiiiiddddd:
+; CHECK-THUMB-FP-NOT: .cfi_startproc
+; CHECK-THUMB-FP: push {r4, r5, r6, r7, lr}
+; CHECK-THUMB-FP-NOT: .cfi_def_cfa_offset 20
+
diff --git a/test/CodeGen/ARM/debug-frame-vararg.ll b/test/CodeGen/ARM/debug-frame-vararg.ll
new file mode 100644
index 000000000000..42ff82d81539
--- /dev/null
+++ b/test/CodeGen/ARM/debug-frame-vararg.ll
@@ -0,0 +1,142 @@
+; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype asm -o - %s | FileCheck %s --check-prefix=CHECK-FP
+; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype asm -o - %s -disable-fp-elim | FileCheck %s --check-prefix=CHECK-FP-ELIM
+; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s | FileCheck %s --check-prefix=CHECK-THUMB-FP
+; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s -disable-fp-elim | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
+
+; Tests that the initial space allocated to the varargs on the stack is
+; taken into account in the the .cfi_ directives.
+
+; Generated from the C program:
+; #include <stdarg.h>
+;
+; extern int foo(int);
+;
+; int sum(int count, ...) {
+; va_list vl;
+; va_start(vl, count);
+; int sum = 0;
+; for (int i = 0; i < count; i++) {
+; sum += foo(va_arg(vl, int));
+; }
+; va_end(vl);
+; }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/var.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"var.c", metadata !"/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"sum", metadata !"sum", metadata !"", i32 5, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32, ...)* @sum, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [sum]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/var.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5 "}
+!12 = metadata !{i32 786689, metadata !4, metadata !"count", metadata !5, i32 16777221, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [count] [line 5]
+!13 = metadata !{i32 5, i32 0, metadata !4, null}
+!14 = metadata !{i32 786688, metadata !4, metadata !"vl", metadata !5, i32 6, metadata !15, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [vl] [line 6]
+!15 = metadata !{i32 786454, metadata !16, null, metadata !"va_list", i32 30, i64 0, i64 0, i64 0, i32 0, metadata !17} ; [ DW_TAG_typedef ] [va_list] [line 30, size 0, align 0, offset 0] [from __builtin_va_list]
+!16 = metadata !{metadata !"/linux-x86_64-high/gcc_4.7.2/dbg/llvm/bin/../lib/clang/3.5/include/stdarg.h", metadata !"/tmp"}
+!17 = metadata !{i32 786454, metadata !1, null, metadata !"__builtin_va_list", i32 6, i64 0, i64 0, i64 0, i32 0, metadata !18} ; [ DW_TAG_typedef ] [__builtin_va_list] [line 6, size 0, align 0, offset 0] [from __va_list]
+!18 = metadata !{i32 786451, metadata !1, null, metadata !"__va_list", i32 6, i64 32, i64 32, i32 0, i32 0, null, metadata !19, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [__va_list] [line 6, size 32, align 32, offset 0] [def] [from ]
+!19 = metadata !{metadata !20}
+!20 = metadata !{i32 786445, metadata !1, metadata !18, metadata !"__ap", i32 6, i64 32, i64 32, i64 0, i32 0, metadata !21} ; [ DW_TAG_member ] [__ap] [line 6, size 32, align 32, offset 0] [from ]
+!21 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from ]
+!22 = metadata !{i32 6, i32 0, metadata !4, null}
+!23 = metadata !{i32 7, i32 0, metadata !4, null}
+!24 = metadata !{i32 786688, metadata !4, metadata !"sum", metadata !5, i32 8, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [sum] [line 8]
+!25 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!26 = metadata !{i32 786688, metadata !27, metadata !"i", metadata !5, i32 9, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [i] [line 9]
+!27 = metadata !{i32 786443, metadata !1, metadata !4, i32 9, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
+!28 = metadata !{i32 9, i32 0, metadata !27, null}
+!29 = metadata !{i32 10, i32 0, metadata !30, null}
+!30 = metadata !{i32 786443, metadata !1, metadata !27, i32 9, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
+!31 = metadata !{i32 11, i32 0, metadata !30, null}
+!32 = metadata !{i32 12, i32 0, metadata !4, null}
+!33 = metadata !{i32 13, i32 0, metadata !4, null}
+
+; CHECK-FP-LABEL: sum
+; CHECK-FP: .cfi_startproc
+; CHECK-FP: sub sp, sp, #16
+; CHECK-FP: .cfi_def_cfa_offset 16
+; CHECK-FP: push {r4, lr}
+; CHECK-FP: .cfi_def_cfa_offset 24
+; CHECK-FP: .cfi_offset lr, -20
+; CHECK-FP: .cfi_offset r4, -24
+; CHECK-FP: sub sp, sp, #8
+; CHECK-FP: .cfi_def_cfa_offset 32
+
+; CHECK-FP-ELIM-LABEL: sum
+; CHECK-FP-ELIM: .cfi_startproc
+; CHECK-FP-ELIM: sub sp, sp, #16
+; CHECK-FP-ELIM: .cfi_def_cfa_offset 16
+; CHECK-FP-ELIM: push {r4, r10, r11, lr}
+; CHECK-FP-ELIM: .cfi_def_cfa_offset 32
+; CHECK-FP-ELIM: .cfi_offset lr, -20
+; CHECK-FP-ELIM: .cfi_offset r11, -24
+; CHECK-FP-ELIM: .cfi_offset r10, -28
+; CHECK-FP-ELIM: .cfi_offset r4, -32
+; CHECK-FP-ELIM: add r11, sp, #8
+; CHECK-FP-ELIM: .cfi_def_cfa r11, 24
+
+; CHECK-THUMB-FP-LABEL: sum
+; CHECK-THUMB-FP: .cfi_startproc
+; CHECK-THUMB-FP: sub sp, #16
+; CHECK-THUMB-FP: .cfi_def_cfa_offset 16
+; CHECK-THUMB-FP: push {r4, r5, r7, lr}
+; CHECK-THUMB-FP: .cfi_def_cfa_offset 32
+; CHECK-THUMB-FP: .cfi_offset lr, -20
+; CHECK-THUMB-FP: .cfi_offset r7, -24
+; CHECK-THUMB-FP: .cfi_offset r5, -28
+; CHECK-THUMB-FP: .cfi_offset r4, -32
+; CHECK-THUMB-FP: sub sp, #8
+; CHECK-THUMB-FP: .cfi_def_cfa_offset 40
+
+; CHECK-THUMB-FP-ELIM-LABEL: sum
+; CHECK-THUMB-FP-ELIM: .cfi_startproc
+; CHECK-THUMB-FP-ELIM: sub sp, #16
+; CHECK-THUMB-FP-ELIM: .cfi_def_cfa_offset 16
+; CHECK-THUMB-FP-ELIM: push {r4, r5, r7, lr}
+; CHECK-THUMB-FP-ELIM: .cfi_def_cfa_offset 32
+; CHECK-THUMB-FP-ELIM: .cfi_offset lr, -20
+; CHECK-THUMB-FP-ELIM: .cfi_offset r7, -24
+; CHECK-THUMB-FP-ELIM: .cfi_offset r5, -28
+; CHECK-THUMB-FP-ELIM: .cfi_offset r4, -32
+; CHECK-THUMB-FP-ELIM: add r7, sp, #8
+; CHECK-THUMB-FP-ELIM: .cfi_def_cfa r7, 24
+
+define i32 @sum(i32 %count, ...) {
+entry:
+ %vl = alloca i8*, align 4
+ %vl1 = bitcast i8** %vl to i8*
+ call void @llvm.va_start(i8* %vl1)
+ %cmp4 = icmp sgt i32 %count, 0
+ br i1 %cmp4, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %ap.cur = load i8** %vl, align 4
+ %ap.next = getelementptr i8* %ap.cur, i32 4
+ store i8* %ap.next, i8** %vl, align 4
+ %0 = bitcast i8* %ap.cur to i32*
+ %1 = load i32* %0, align 4
+ %call = call i32 @foo(i32 %1) #1
+ %inc = add nsw i32 %i.05, 1
+ %exitcond = icmp eq i32 %inc, %count
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ call void @llvm.va_end(i8* %vl1)
+ ret i32 undef
+}
+
+declare void @llvm.va_start(i8*) nounwind
+
+declare i32 @foo(i32)
+
+declare void @llvm.va_end(i8*) nounwind
diff --git a/test/CodeGen/ARM/debug-frame.ll b/test/CodeGen/ARM/debug-frame.ll
new file mode 100644
index 000000000000..cb54aa8aec73
--- /dev/null
+++ b/test/CodeGen/ARM/debug-frame.ll
@@ -0,0 +1,575 @@
+; ARM EHABI integrated test
+
+; This test case checks whether the ARM DWARF stack frame directives
+; are properly generated or not.
+
+; We have to check several cases:
+; (1) arm with -disable-fp-elim
+; (2) arm without -disable-fp-elim
+; (3) armv7 with -disable-fp-elim
+; (4) armv7 without -disable-fp-elim
+; (5) thumb with -disable-fp-elim
+; (6) thumb without -disable-fp-elim
+; (7) thumbv7 with -disable-fp-elim
+; (8) thumbv7 without -disable-fp-elim
+; (9) thumbv7 with -no-integrated-as
+
+; RUN: llc -mtriple arm-unknown-linux-gnueabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP
+
+; RUN: llc -mtriple arm-unknown-linux-gnueabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
+
+; RUN: llc -mtriple thumb-unknown-linux-gnueabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-FP
+
+; RUN: llc -mtriple thumb-unknown-linux-gnueabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
+
+; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP
+
+; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP-ELIM
+
+; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
+; RUN: -disable-fp-elim -no-integrated-as -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP-NOIAS
+
+;-------------------------------------------------------------------------------
+; Test 1
+;-------------------------------------------------------------------------------
+; This is the LLVM assembly generated from following C++ code:
+;
+; extern void print(int, int, int, int, int);
+; extern void print(double, double, double, double, double);
+;
+; void test(int a, int b, int c, int d, int e,
+; double m, double n, double p, double q, double r) {
+; try {
+; print(a, b, c, d, e);
+; } catch (...) {
+; print(m, n, p, q, r);
+; }
+; }
+
+declare void @_Z5printiiiii(i32, i32, i32, i32, i32)
+
+declare void @_Z5printddddd(double, double, double, double, double)
+
+define void @_Z4testiiiiiddddd(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e,
+ double %m, double %n, double %p,
+ double %q, double %r) {
+entry:
+ invoke void @_Z5printiiiii(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e)
+ to label %try.cont unwind label %lpad
+
+lpad:
+ %0 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = tail call i8* @__cxa_begin_catch(i8* %1)
+ invoke void @_Z5printddddd(double %m, double %n, double %p,
+ double %q, double %r)
+ to label %invoke.cont2 unwind label %lpad1
+
+invoke.cont2:
+ tail call void @__cxa_end_catch()
+ br label %try.cont
+
+try.cont:
+ ret void
+
+lpad1:
+ %3 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @__cxa_end_catch()
+ to label %eh.resume unwind label %terminate.lpad
+
+eh.resume:
+ resume { i8*, i32 } %3
+
+terminate.lpad:
+ %4 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %5 = extractvalue { i8*, i32 } %4, 0
+ tail call void @__clang_call_terminate(i8* %5)
+ unreachable
+}
+
+declare void @__clang_call_terminate(i8*)
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+declare void @_ZSt9terminatev()
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/exp.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"exp.cpp", metadata !"/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test", metadata !"test", metadata !"_Z4testiiiiiddddd", i32 4, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32, i32, i32, i32, i32, double, double, double, double, double)* @_Z4testiiiiiddddd, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 4] [def] [scope 5] [test]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/exp.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8, metadata !8, metadata !8, metadata !8, metadata !8, metadata !9, metadata !9, metadata !9, metadata !9, metadata !9}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786468, null, null, metadata !"double", i32 0, i64 64, i64 64, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
+!10 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!12 = metadata !{metadata !"clang version 3.5 "}
+!13 = metadata !{i32 786689, metadata !4, metadata !"a", metadata !5, i32 16777220, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [a] [line 4]
+!14 = metadata !{i32 4, i32 0, metadata !4, null}
+!15 = metadata !{i32 786689, metadata !4, metadata !"b", metadata !5, i32 33554436, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [b] [line 4]
+!16 = metadata !{i32 786689, metadata !4, metadata !"c", metadata !5, i32 50331652, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [c] [line 4]
+!17 = metadata !{i32 786689, metadata !4, metadata !"d", metadata !5, i32 67108868, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [d] [line 4]
+!18 = metadata !{i32 786689, metadata !4, metadata !"e", metadata !5, i32 83886084, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [e] [line 4]
+!19 = metadata !{i32 786689, metadata !4, metadata !"m", metadata !5, i32 100663301, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [m] [line 5]
+!20 = metadata !{i32 5, i32 0, metadata !4, null}
+!21 = metadata !{i32 786689, metadata !4, metadata !"n", metadata !5, i32 117440517, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [n] [line 5]
+!22 = metadata !{i32 786689, metadata !4, metadata !"p", metadata !5, i32 134217733, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [p] [line 5]
+!23 = metadata !{i32 786689, metadata !4, metadata !"q", metadata !5, i32 150994949, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [q] [line 5]
+!24 = metadata !{i32 786689, metadata !4, metadata !"r", metadata !5, i32 167772165, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [r] [line 5]
+!25 = metadata !{i32 7, i32 0, metadata !26, null}
+!26 = metadata !{i32 786443, metadata !1, metadata !4, i32 6, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/exp.cpp]
+!27 = metadata !{i32 8, i32 0, metadata !26, null} ; [ DW_TAG_imported_declaration ]
+!28 = metadata !{i32 11, i32 0, metadata !26, null}
+!29 = metadata !{i32 9, i32 0, metadata !30, null}
+!30 = metadata !{i32 786443, metadata !1, metadata !4, i32 8, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/tmp/exp.cpp]
+!31 = metadata !{i32 10, i32 0, metadata !30, null}
+!32 = metadata !{i32 10, i32 0, metadata !4, null}
+!33 = metadata !{i32 11, i32 0, metadata !4, null}
+!34 = metadata !{i32 11, i32 0, metadata !30, null}
+
+; CHECK-FP-LABEL: _Z4testiiiiiddddd:
+; CHECK-FP: .cfi_startproc
+; CHECK-FP: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-FP: .cfi_def_cfa_offset 36
+; CHECK-FP: .cfi_offset lr, -4
+; CHECK-FP: .cfi_offset r11, -8
+; CHECK-FP: .cfi_offset r10, -12
+; CHECK-FP: .cfi_offset r9, -16
+; CHECK-FP: .cfi_offset r8, -20
+; CHECK-FP: .cfi_offset r7, -24
+; CHECK-FP: .cfi_offset r6, -28
+; CHECK-FP: .cfi_offset r5, -32
+; CHECK-FP: .cfi_offset r4, -36
+; CHECK-FP: add r11, sp, #28
+; CHECK-FP: .cfi_def_cfa r11, 8
+; CHECK-FP: sub sp, sp, #28
+; CHECK-FP: .cfi_endproc
+
+; CHECK-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; CHECK-FP-ELIM: .cfi_startproc
+; CHECK-FP-ELIM: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-FP-ELIM: .cfi_def_cfa_offset 36
+; CHECK-FP-ELIM: .cfi_offset lr, -4
+; CHECK-FP-ELIM: .cfi_offset r11, -8
+; CHECK-FP-ELIM: .cfi_offset r10, -12
+; CHECK-FP-ELIM: .cfi_offset r9, -16
+; CHECK-FP-ELIM: .cfi_offset r8, -20
+; CHECK-FP-ELIM: .cfi_offset r7, -24
+; CHECK-FP-ELIM: .cfi_offset r6, -28
+; CHECK-FP-ELIM: .cfi_offset r5, -32
+; CHECK-FP-ELIM: .cfi_offset r4, -36
+; CHECK-FP-ELIM: sub sp, sp, #28
+; CHECK-FP-ELIM: .cfi_def_cfa_offset 64
+; CHECK-FP-ELIM: .cfi_endproc
+
+; CHECK-V7-FP-LABEL: _Z4testiiiiiddddd:
+; CHECK-V7-FP: .cfi_startproc
+; CHECK-V7-FP: push {r4, r10, r11, lr}
+; CHECK-V7-FP: .cfi_def_cfa_offset 16
+; CHECK-V7-FP: .cfi_offset lr, -4
+; CHECK-V7-FP: .cfi_offset r11, -8
+; CHECK-V7-FP: .cfi_offset r10, -12
+; CHECK-V7-FP: .cfi_offset r4, -16
+; CHECK-V7-FP: add r11, sp, #8
+; CHECK-V7-FP: .cfi_def_cfa r11, 8
+; CHECK-V7-FP: vpush {d8, d9, d10, d11, d12}
+; CHECK-V7-FP: .cfi_offset d12, -24
+; CHECK-V7-FP: .cfi_offset d11, -32
+; CHECK-V7-FP: .cfi_offset d10, -40
+; CHECK-V7-FP: .cfi_offset d9, -48
+; CHECK-V7-FP: .cfi_offset d8, -56
+; CHECK-V7-FP: sub sp, sp, #24
+; CHECK-V7-FP: .cfi_endproc
+
+; CHECK-V7-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; CHECK-V7-FP-ELIM: .cfi_startproc
+; CHECK-V7-FP-ELIM: push {r4, lr}
+; CHECK-V7-FP-ELIM: .cfi_def_cfa_offset 8
+; CHECK-V7-FP-ELIM: .cfi_offset lr, -4
+; CHECK-V7-FP-ELIM: .cfi_offset r4, -8
+; CHECK-V7-FP-ELIM: vpush {d8, d9, d10, d11, d12}
+; CHECK-V7-FP-ELIM: .cfi_def_cfa_offset 48
+; CHECK-V7-FP-ELIM: .cfi_offset d12, -16
+; CHECK-V7-FP-ELIM: .cfi_offset d11, -24
+; CHECK-V7-FP-ELIM: .cfi_offset d10, -32
+; CHECK-V7-FP-ELIM: .cfi_offset d9, -40
+; CHECK-V7-FP-ELIM: .cfi_offset d8, -48
+; CHECK-V7-FP-ELIM: sub sp, sp, #24
+; CHECK-V7-FP-ELIM: .cfi_def_cfa_offset 72
+; CHECK-V7-FP-ELIM: .cfi_endproc
+
+; CHECK-THUMB-FP-LABEL: _Z4testiiiiiddddd:
+; CHECK-THUMB-FP: .cfi_startproc
+; CHECK-THUMB-FP: push {r4, r5, r6, r7, lr}
+; CHECK-THUMB-FP: .cfi_def_cfa_offset 20
+; CHECK-THUMB-FP: .cfi_offset lr, -4
+; CHECK-THUMB-FP: .cfi_offset r7, -8
+; CHECK-THUMB-FP: .cfi_offset r6, -12
+; CHECK-THUMB-FP: .cfi_offset r5, -16
+; CHECK-THUMB-FP: .cfi_offset r4, -20
+; CHECK-THUMB-FP: add r7, sp, #12
+; CHECK-THUMB-FP: .cfi_def_cfa r7, 8
+; CHECK-THUMB-FP: sub sp, #60
+; CHECK-THUMB-FP: .cfi_endproc
+
+; CHECK-THUMB-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; CHECK-THUMB-FP-ELIM: .cfi_startproc
+; CHECK-THUMB-FP-ELIM: push {r4, r5, r6, r7, lr}
+; CHECK-THUMB-FP-ELIM: .cfi_def_cfa_offset 20
+; CHECK-THUMB-FP-ELIM: .cfi_offset lr, -4
+; CHECK-THUMB-FP-ELIM: .cfi_offset r7, -8
+; CHECK-THUMB-FP-ELIM: .cfi_offset r6, -12
+; CHECK-THUMB-FP-ELIM: .cfi_offset r5, -16
+; CHECK-THUMB-FP-ELIM: .cfi_offset r4, -20
+; CHECK-THUMB-FP-ELIM: sub sp, #60
+; CHECK-THUMB-FP-ELIM: .cfi_def_cfa_offset 80
+; CHECK-THUMB-FP-ELIM: .cfi_endproc
+
+; CHECK-THUMB-V7-FP-LABEL: _Z4testiiiiiddddd:
+; CHECK-THUMB-V7-FP: .cfi_startproc
+; CHECK-THUMB-V7-FP: push.w {r4, r7, r11, lr}
+; CHECK-THUMB-V7-FP: .cfi_def_cfa_offset 16
+; CHECK-THUMB-V7-FP: .cfi_offset lr, -4
+; CHECK-THUMB-V7-FP: .cfi_offset r11, -8
+; CHECK-THUMB-V7-FP: .cfi_offset r7, -12
+; CHECK-THUMB-V7-FP: .cfi_offset r4, -16
+; CHECK-THUMB-V7-FP: add r7, sp, #4
+; CHECK-THUMB-V7-FP: .cfi_def_cfa r7, 12
+; CHECK-THUMB-V7-FP: vpush {d8, d9, d10, d11, d12}
+; CHECK-THUMB-V7-FP: .cfi_offset d12, -24
+; CHECK-THUMB-V7-FP: .cfi_offset d11, -32
+; CHECK-THUMB-V7-FP: .cfi_offset d10, -40
+; CHECK-THUMB-V7-FP: .cfi_offset d9, -48
+; CHECK-THUMB-V7-FP: .cfi_offset d8, -56
+; CHECK-THUMB-V7-FP: sub sp, #24
+; CHECK-THUMB-V7-FP: .cfi_endproc
+
+; CHECK-THUMB-V7-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; CHECK-THUMB-V7-FP-ELIM: .cfi_startproc
+; CHECK-THUMB-V7-FP-ELIM: push {r4, lr}
+; CHECK-THUMB-V7-FP-ELIM: .cfi_def_cfa_offset 8
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset lr, -4
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset r4, -8
+; CHECK-THUMB-V7-FP-ELIM: vpush {d8, d9, d10, d11, d12}
+; CHECK-THUMB-V7-FP-ELIM: .cfi_def_cfa_offset 48
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset d12, -16
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset d11, -24
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset d10, -32
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset d9, -40
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset d8, -48
+; CHECK-THUMB-V7-FP-ELIM: sub sp, #24
+; CHECK-THUMB-V7-FP-ELIM: .cfi_def_cfa_offset 72
+; CHECK-THUMB-V7-FP-ELIM: .cfi_endproc
+
+; CHECK-THUMB-V7-FP-NOIAS-LABEL: _Z4testiiiiiddddd:
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_startproc
+; CHECK-THUMB-V7-FP-NOIAS: push.w {r4, r7, r11, lr}
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_def_cfa_offset 16
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 14, -4
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 11, -8
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 7, -12
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 4, -16
+; CHECK-THUMB-V7-FP-NOIAS: add r7, sp, #4
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_def_cfa 7, 12
+; CHECK-THUMB-V7-FP-NOIAS: vpush {d8, d9, d10, d11, d12}
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 268, -24
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 267, -32
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 266, -40
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 265, -48
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_offset 264, -56
+; CHECK-THUMB-V7-FP-NOIAS: sub sp, #24
+; CHECK-THUMB-V7-FP-NOIAS: .cfi_endproc
+
+;-------------------------------------------------------------------------------
+; Test 2
+;-------------------------------------------------------------------------------
+
+declare void @throw_exception_2()
+
+define void @test2() {
+entry:
+ call void @throw_exception_2()
+ ret void
+}
+
+; CHECK-FP-LABEL: test2:
+; CHECK-FP: .cfi_startproc
+; CHECK-FP: push {r11, lr}
+; CHECK-FP: .cfi_def_cfa_offset 8
+; CHECK-FP: .cfi_offset lr, -4
+; CHECK-FP: .cfi_offset r11, -8
+; CHECK-FP: mov r11, sp
+; CHECK-FP: .cfi_def_cfa_register r11
+; CHECK-FP: pop {r11, lr}
+; CHECK-FP: mov pc, lr
+; CHECK-FP: .cfi_endproc
+
+; CHECK-FP-ELIM-LABEL: test2:
+; CHECK-FP-ELIM: .cfi_startproc
+; CHECK-FP-ELIM: push {r11, lr}
+; CHECK-FP-ELIM: .cfi_def_cfa_offset 8
+; CHECK-FP-ELIM: .cfi_offset lr, -4
+; CHECK-FP-ELIM: .cfi_offset r11, -8
+; CHECK-FP-ELIM: pop {r11, lr}
+; CHECK-FP-ELIM: mov pc, lr
+; CHECK-FP-ELIM: .cfi_endproc
+
+; CHECK-V7-FP-LABEL: test2:
+; CHECK-V7-FP: .cfi_startproc
+; CHECK-V7-FP: push {r11, lr}
+; CHECK-V7-FP: .cfi_def_cfa_offset 8
+; CHECK-V7-FP: .cfi_offset lr, -4
+; CHECK-V7-FP: .cfi_offset r11, -8
+; CHECK-V7-FP: mov r11, sp
+; CHECK-V7-FP: .cfi_def_cfa_register r11
+; CHECK-V7-FP: pop {r11, pc}
+; CHECK-V7-FP: .cfi_endproc
+
+; CHECK-V7-FP-ELIM-LABEL: test2:
+; CHECK-V7-FP-ELIM: .cfi_startproc
+; CHECK-V7-FP-ELIM: push {r11, lr}
+; CHECK-V7-FP-ELIM: .cfi_def_cfa_offset 8
+; CHECK-V7-FP-ELIM: .cfi_offset lr, -4
+; CHECK-V7-FP-ELIM: .cfi_offset r11, -8
+; CHECK-V7-FP-ELIM: pop {r11, pc}
+; CHECK-V7-FP-ELIM: .cfi_endproc
+
+; CHECK-THUMB-FP-LABEL: test2:
+; CHECK-THUMB-FP: .cfi_startproc
+; CHECK-THUMB-FP: push {r7, lr}
+; CHECK-THUMB-FP: .cfi_def_cfa_offset 8
+; CHECK-THUMB-FP: .cfi_offset lr, -4
+; CHECK-THUMB-FP: .cfi_offset r7, -8
+; CHECK-THUMB-FP: add r7, sp, #0
+; CHECK-THUMB-FP: .cfi_def_cfa_register r7
+; CHECK-THUMB-FP: pop {r7, pc}
+; CHECK-THUMB-FP: .cfi_endproc
+
+; CHECK-THUMB-FP-ELIM-LABEL: test2:
+; CHECK-THUMB-FP-ELIM: .cfi_startproc
+; CHECK-THUMB-FP-ELIM: push {r7, lr}
+; CHECK-THUMB-FP-ELIM: .cfi_def_cfa_offset 8
+; CHECK-THUMB-FP-ELIM: .cfi_offset lr, -4
+; CHECK-THUMB-FP-ELIM: .cfi_offset r7, -8
+; CHECK-THUMB-FP-ELIM: pop {r7, pc}
+; CHECK-THUMB-FP-ELIM: .cfi_endproc
+
+; CHECK-THUMB-V7-FP-LABEL: test2:
+; CHECK-THUMB-V7-FP: .cfi_startproc
+; CHECK-THUMB-V7-FP: push {r7, lr}
+; CHECK-THUMB-V7-FP: .cfi_def_cfa_offset 8
+; CHECK-THUMB-V7-FP: .cfi_offset lr, -4
+; CHECK-THUMB-V7-FP: .cfi_offset r7, -8
+; CHECK-THUMB-V7-FP: mov r7, sp
+; CHECK-THUMB-V7-FP: .cfi_def_cfa_register r7
+; CHECK-THUMB-V7-FP: pop {r7, pc}
+; CHECK-THUMB-V7-FP: .cfi_endproc
+
+; CHECK-THUMB-V7-FP-ELIM-LABEL: test2:
+; CHECK-THUMB-V7-FP-ELIM: .cfi_startproc
+; CHECK-THUMB-V7-FP-ELIM: push.w {r11, lr}
+; CHECK-THUMB-V7-FP-ELIM: .cfi_def_cfa_offset 8
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset lr, -4
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset r11, -8
+; CHECK-THUMB-V7-FP-ELIM: pop.w {r11, pc}
+; CHECK-THUMB-V7-FP-ELIM: .cfi_endproc
+
+
+;-------------------------------------------------------------------------------
+; Test 3
+;-------------------------------------------------------------------------------
+
+declare void @throw_exception_3(i32)
+
+define i32 @test3(i32 %a, i32 %b, i32 %c, i32 %d,
+ i32 %e, i32 %f, i32 %g, i32 %h) {
+entry:
+ %add = add nsw i32 %b, %a
+ %add1 = add nsw i32 %add, %c
+ %add2 = add nsw i32 %add1, %d
+ tail call void @throw_exception_3(i32 %add2)
+ %add3 = add nsw i32 %f, %e
+ %add4 = add nsw i32 %add3, %g
+ %add5 = add nsw i32 %add4, %h
+ tail call void @throw_exception_3(i32 %add5)
+ %add6 = add nsw i32 %add5, %add2
+ ret i32 %add6
+}
+
+; CHECK-FP-LABEL: test3:
+; CHECK-FP: .cfi_startproc
+; CHECK-FP: push {r4, r5, r11, lr}
+; CHECK-FP: .cfi_def_cfa_offset 16
+; CHECK-FP: .cfi_offset lr, -4
+; CHECK-FP: .cfi_offset r11, -8
+; CHECK-FP: .cfi_offset r5, -12
+; CHECK-FP: .cfi_offset r4, -16
+; CHECK-FP: add r11, sp, #8
+; CHECK-FP: .cfi_def_cfa r11, 8
+; CHECK-FP: pop {r4, r5, r11, lr}
+; CHECK-FP: mov pc, lr
+; CHECK-FP: .cfi_endproc
+
+; CHECK-FP-ELIM-LABEL: test3:
+; CHECK-FP-ELIM: .cfi_startproc
+; CHECK-FP-ELIM: push {r4, r5, r11, lr}
+; CHECK-FP-ELIM: .cfi_def_cfa_offset 16
+; CHECK-FP-ELIM: .cfi_offset lr, -4
+; CHECK-FP-ELIM: .cfi_offset r11, -8
+; CHECK-FP-ELIM: .cfi_offset r5, -12
+; CHECK-FP-ELIM: .cfi_offset r4, -16
+; CHECK-FP-ELIM: pop {r4, r5, r11, lr}
+; CHECK-FP-ELIM: mov pc, lr
+; CHECK-FP-ELIM: .cfi_endproc
+
+; CHECK-V7-FP-LABEL: test3:
+; CHECK-V7-FP: .cfi_startproc
+; CHECK-V7-FP: push {r4, r5, r11, lr}
+; CHECK-V7-FP: .cfi_def_cfa_offset 16
+; CHECK-V7-FP: .cfi_offset lr, -4
+; CHECK-V7-FP: .cfi_offset r11, -8
+; CHECK-V7-FP: .cfi_offset r5, -12
+; CHECK-V7-FP: .cfi_offset r4, -16
+; CHECK-V7-FP: add r11, sp, #8
+; CHECK-V7-FP: .cfi_def_cfa r11, 8
+; CHECK-V7-FP: pop {r4, r5, r11, pc}
+; CHECK-V7-FP: .cfi_endproc
+
+; CHECK-V7-FP-ELIM-LABEL: test3:
+; CHECK-V7-FP-ELIM: .cfi_startproc
+; CHECK-V7-FP-ELIM: push {r4, r5, r11, lr}
+; CHECK-V7-FP-ELIM: .cfi_def_cfa_offset 16
+; CHECK-V7-FP-ELIM: .cfi_offset lr, -4
+; CHECK-V7-FP-ELIM: .cfi_offset r11, -8
+; CHECK-V7-FP-ELIM: .cfi_offset r5, -12
+; CHECK-V7-FP-ELIM: .cfi_offset r4, -16
+; CHECK-V7-FP-ELIM: pop {r4, r5, r11, pc}
+; CHECK-V7-FP-ELIM: .cfi_endproc
+
+; CHECK-THUMB-FP-LABEL: test3:
+; CHECK-THUMB-FP: .cfi_startproc
+; CHECK-THUMB-FP: push {r4, r5, r7, lr}
+; CHECK-THUMB-FP: .cfi_def_cfa_offset 16
+; CHECK-THUMB-FP: .cfi_offset lr, -4
+; CHECK-THUMB-FP: .cfi_offset r7, -8
+; CHECK-THUMB-FP: .cfi_offset r5, -12
+; CHECK-THUMB-FP: .cfi_offset r4, -16
+; CHECK-THUMB-FP: add r7, sp, #8
+; CHECK-THUMB-FP: .cfi_def_cfa r7, 8
+; CHECK-THUMB-FP: pop {r4, r5, r7, pc}
+; CHECK-THUMB-FP: .cfi_endproc
+
+; CHECK-THUMB-FP-ELIM-LABEL: test3:
+; CHECK-THUMB-FP-ELIM: .cfi_startproc
+; CHECK-THUMB-FP-ELIM: push {r4, r5, r7, lr}
+; CHECK-THUMB-FP-ELIM: .cfi_def_cfa_offset 16
+; CHECK-THUMB-FP-ELIM: .cfi_offset lr, -4
+; CHECK-THUMB-FP-ELIM: .cfi_offset r7, -8
+; CHECK-THUMB-FP-ELIM: .cfi_offset r5, -12
+; CHECK-THUMB-FP-ELIM: .cfi_offset r4, -16
+; CHECK-THUMB-FP-ELIM: pop {r4, r5, r7, pc}
+; CHECK-THUMB-FP-ELIM: .cfi_endproc
+
+; CHECK-THUMB-V7-FP-LABEL: test3:
+; CHECK-THUMB-V7-FP: .cfi_startproc
+; CHECK-THUMB-V7-FP: push {r4, r5, r7, lr}
+; CHECK-THUMB-V7-FP: .cfi_def_cfa_offset 16
+; CHECK-THUMB-V7-FP: .cfi_offset lr, -4
+; CHECK-THUMB-V7-FP: .cfi_offset r7, -8
+; CHECK-THUMB-V7-FP: .cfi_offset r5, -12
+; CHECK-THUMB-V7-FP: .cfi_offset r4, -16
+; CHECK-THUMB-V7-FP: add r7, sp, #8
+; CHECK-THUMB-V7-FP: .cfi_def_cfa r7, 8
+; CHECK-THUMB-V7-FP: pop {r4, r5, r7, pc}
+; CHECK-THUMB-V7-FP: .cfi_endproc
+
+; CHECK-THUMB-V7-FP-ELIM-LABEL: test3:
+; CHECK-THUMB-V7-FP-ELIM: .cfi_startproc
+; CHECK-THUMB-V7-FP-ELIM: push.w {r4, r5, r11, lr}
+; CHECK-THUMB-V7-FP-ELIM: .cfi_def_cfa_offset 16
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset lr, -4
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset r11, -8
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset r5, -12
+; CHECK-THUMB-V7-FP-ELIM: .cfi_offset r4, -16
+; CHECK-THUMB-V7-FP-ELIM: pop.w {r4, r5, r11, pc}
+; CHECK-THUMB-V7-FP-ELIM: .cfi_endproc
+
+
+;-------------------------------------------------------------------------------
+; Test 4
+;-------------------------------------------------------------------------------
+
+define void @test4() nounwind {
+entry:
+ ret void
+}
+
+; CHECK-FP-LABEL: test4:
+; CHECK-FP: mov pc, lr
+; CHECK-FP-NOT: .cfi_def_cfa_offset
+
+; CHECK-FP-ELIM-LABEL: test4:
+; CHECK-FP-ELIM: mov pc, lr
+; CHECK-FP-ELIM-NOT: .cfi_def_cfa_offset
+
+; CHECK-V7-FP-LABEL: test4:
+; CHECK-V7-FP: bx lr
+; CHECK-V7-FP-NOT: .cfi_def_cfa_offset
+
+; CHECK-V7-FP-ELIM-LABEL: test4:
+; CHECK-V7-FP-ELIM: bx lr
+; CHECK-V7-FP-ELIM-NOT: .cfi_def_cfa_offset
+
+; CHECK-THUMB-FP-LABEL: test4:
+; CHECK-THUMB-FP: bx lr
+; CHECK-THUMB-FP-NOT: .cfi_def_cfa_offset
+
+; CHECK-THUMB-FP-ELIM-LABEL: test4:
+; CHECK-THUMB-FP-ELIM: bx lr
+; CHECK-THUMB-FP-ELIM-NOT: .cfi_def_cfa_offset
+
+; CHECK-THUMB-V7-FP-LABEL: test4:
+; CHECK-THUMB-V7-FP: bx lr
+; CHECK-THUMB-V7-FP-NOT: .cfi_def_cfa_offset
+
+; CHECK-THUMB-V7-FP-ELIM-LABEL: test4:
+; CHECK-THUMB-V7-FP-ELIM: bx lr
+; CHECK-THUMB-V7-FP-ELIM-NOT: .cfi_def_cfa_offset
+
diff --git a/test/CodeGen/ARM/debug-info-arg.ll b/test/CodeGen/ARM/debug-info-arg.ll
index e8bf3ba9d61f..31d0324de689 100644
--- a/test/CodeGen/ARM/debug-info-arg.ll
+++ b/test/CodeGen/ARM/debug-info-arg.ll
@@ -59,7 +59,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!24 = metadata !{i32 11, i32 81, metadata !1, null}
!25 = metadata !{i32 11, i32 101, metadata !1, null}
!26 = metadata !{i32 12, i32 3, metadata !27, null}
-!27 = metadata !{i32 786443, metadata !1, i32 11, i32 107, metadata !2, i32 0} ; [ DW_TAG_lexical_block ]
+!27 = metadata !{i32 786443, metadata !2, metadata !1, i32 11, i32 107, i32 0} ; [ DW_TAG_lexical_block ]
!28 = metadata !{i32 13, i32 5, metadata !27, null}
!29 = metadata !{i32 14, i32 1, metadata !27, null}
!30 = metadata !{metadata !1}
diff --git a/test/CodeGen/ARM/debug-info-blocks.ll b/test/CodeGen/ARM/debug-info-blocks.ll
index 6cbe4b4727cd..5ad5e59b880e 100644
--- a/test/CodeGen/ARM/debug-info-blocks.ll
+++ b/test/CodeGen/ARM/debug-info-blocks.ll
@@ -231,10 +231,10 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!133 = metadata !{i32 609, i32 175, metadata !23, null}
!134 = metadata !{i32 786689, metadata !23, metadata !"data", metadata !24, i32 67109473, metadata !108, i32 0, null} ; [ DW_TAG_arg_variable ]
!135 = metadata !{i32 609, i32 190, metadata !23, null}
-!136 = metadata !{i32 786688, metadata !23, metadata !"mydata", metadata !24, i32 604, metadata !50, i32 0, null, i64 1, i64 20, i64 2, i64 1, i64 4, i64 2, i64 1, i64 24} ; [ DW_TAG_auto_variable ]
+!136 = metadata !{i32 786688, metadata !23, metadata !"mydata", metadata !24, i32 604, metadata !50, i32 0, null, metadata !163} ; [ DW_TAG_auto_variable ]
!137 = metadata !{i32 604, i32 49, metadata !23, null}
-!138 = metadata !{i32 786688, metadata !23, metadata !"self", metadata !40, i32 604, metadata !90, i32 0, null, i64 1, i64 24} ; [ DW_TAG_auto_variable ]
-!139 = metadata !{i32 786688, metadata !23, metadata !"semi", metadata !24, i32 607, metadata !125, i32 0, null, i64 1, i64 28} ; [ DW_TAG_auto_variable ]
+!138 = metadata !{i32 786688, metadata !23, metadata !"self", metadata !40, i32 604, metadata !90, i32 0, null, metadata !164} ; [ DW_TAG_auto_variable ]
+!139 = metadata !{i32 786688, metadata !23, metadata !"semi", metadata !24, i32 607, metadata !125, i32 0, null, metadata !165} ; [ DW_TAG_auto_variable ]
!140 = metadata !{i32 607, i32 30, metadata !23, null}
!141 = metadata !{i32 610, i32 17, metadata !142, null}
!142 = metadata !{i32 786443, metadata !152, metadata !23, i32 609, i32 200, i32 94} ; [ DW_TAG_lexical_block ]
@@ -258,3 +258,6 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!160 = metadata !{metadata !"header.h", metadata !"/Volumes/Sandbox/llvm"}
!161 = metadata !{metadata !"header2.h", metadata !"/Volumes/Sandbox/llvm"}
!162 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!163 = metadata !{i64 1, i64 20, i64 2, i64 1, i64 4, i64 2, i64 1, i64 24}
+!164 = metadata !{i64 1, i64 24}
+!165 = metadata !{i64 1, i64 28}
diff --git a/test/CodeGen/ARM/debug-info-qreg.ll b/test/CodeGen/ARM/debug-info-qreg.ll
index ee515fd55c81..03ce312a9013 100644
--- a/test/CodeGen/ARM/debug-info-qreg.ll
+++ b/test/CodeGen/ARM/debug-info-qreg.ll
@@ -2,13 +2,15 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-macosx10.6.7"
-;CHECK: DW_OP_regx for Q register: D1
+;CHECK: sub-register
+;CHECK-NEXT: DW_OP_regx
;CHECK-NEXT: ascii
-;CHECK-NEXT: DW_OP_piece 8
+;CHECK-NEXT: DW_OP_piece
;CHECK-NEXT: byte 8
-;CHECK-NEXT: DW_OP_regx for Q register: D2
+;CHECK-NEXT: sub-register
+;CHECK-NEXT: DW_OP_regx
;CHECK-NEXT: ascii
-;CHECK-NEXT: DW_OP_piece 8
+;CHECK-NEXT: DW_OP_piece
;CHECK-NEXT: byte 8
@.str = external constant [13 x i8]
diff --git a/test/CodeGen/ARM/debug-info-s16-reg.ll b/test/CodeGen/ARM/debug-info-s16-reg.ll
index e92d9776db8c..ee9faf833ccf 100644
--- a/test/CodeGen/ARM/debug-info-s16-reg.ll
+++ b/test/CodeGen/ARM/debug-info-s16-reg.ll
@@ -1,9 +1,11 @@
; RUN: llc < %s - | FileCheck %s
; Radar 9309221
; Test dwarf reg no for s16
-;CHECK: DW_OP_regx for S register
+;CHECK: super-register
+;CHECK-NEXT: DW_OP_regx
;CHECK-NEXT: ascii
-;CHECK-NEXT: DW_OP_bit_piece 32 0
+;CHECK-NEXT: DW_OP_piece
+;CHECK-NEXT: 4
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-macosx10.6.7"
diff --git a/test/CodeGen/ARM/debug-info-sreg2.ll b/test/CodeGen/ARM/debug-info-sreg2.ll
index 854fcabbae87..71a696a6a4a9 100644
--- a/test/CodeGen/ARM/debug-info-sreg2.ll
+++ b/test/CodeGen/ARM/debug-info-sreg2.ll
@@ -3,13 +3,19 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-macosx10.6.7"
-;CHECK: Ldebug_loc0:
-;CHECK-NEXT: .long Ltmp0
-;CHECK-NEXT: .long Ltmp1
+;CHECK-LABEL: Lfunc_begin0:
+;CHECK: Ltmp[[K:[0-9]+]]:
+;CHECK: Ltmp[[L:[0-9]+]]:
+;CHECK-LABEL: Ldebug_loc0:
+;CHECK-NEXT: .long Ltmp[[K]]
+;CHECK-NEXT: .long Ltmp[[L]]
;CHECK-NEXT: Lset[[N:[0-9]+]] = Ltmp{{[0-9]+}}-Ltmp[[M:[0-9]+]] @ Loc expr size
;CHECK-NEXT: .short Lset[[N]]
;CHECK-NEXT: Ltmp[[M]]:
-;CHECK-NEXT: .byte 144 @ DW_OP_regx for S register
+;CHECK-NEXT: .byte 144 @ super-register
+;CHECK-NEXT: @ DW_OP_regx
+;CHECK-NEXT: .ascii
+;CHECK-NEXT: .byte {{[0-9]+}} @ DW_OP_{{.*}}piece
define void @_Z3foov() optsize ssp {
entry:
diff --git a/test/CodeGen/ARM/debug-segmented-stacks.ll b/test/CodeGen/ARM/debug-segmented-stacks.ll
new file mode 100644
index 000000000000..e866b4e124d8
--- /dev/null
+++ b/test/CodeGen/ARM/debug-segmented-stacks.ll
@@ -0,0 +1,82 @@
+; RUN: llc < %s -mtriple=arm-linux-unknown-gnueabi -verify-machineinstrs -filetype=asm | FileCheck %s -check-prefix=ARM-linux
+; RUN: llc < %s -mtriple=arm-linux-unknown-gnueabi -filetype=obj
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+define void @test_basic() #0 {
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
+ ret void
+
+; ARM-linux: test_basic:
+
+; ARM-linux: push {r4, r5}
+; ARM-linux: .cfi_def_cfa_offset 8
+; ARM-linux: .cfi_offset r5, -4
+; ARM-linux: .cfi_offset r4, -8
+; ARM-linux-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-linux-NEXT: mov r5, sp
+; ARM-linux-NEXT: ldr r4, [r4, #4]
+; ARM-linux-NEXT: cmp r4, r5
+; ARM-linux-NEXT: blo .LBB0_2
+
+; ARM-linux: mov r4, #48
+; ARM-linux-NEXT: mov r5, #0
+; ARM-linux-NEXT: stmdb sp!, {lr}
+; ARM-linux: .cfi_def_cfa_offset 12
+; ARM-linux: .cfi_offset lr, -12
+; ARM-linux-NEXT: bl __morestack
+; ARM-linux-NEXT: ldm sp!, {lr}
+; ARM-linux-NEXT: pop {r4, r5}
+; ARM-linux: .cfi_def_cfa_offset 0
+; ARM-linux-NEXT: bx lr
+
+; ARM-linux: pop {r4, r5}
+; ARM-linux: .cfi_def_cfa_offset 0
+; ARM-linux .cfi_same_value r4
+; ARM-linux .cfi_same_value r5
+}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/var.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"var.c", metadata !"/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test_basic",
+ metadata !"test_basic", metadata !"", i32 5, metadata !6, i1 false, i1 true,
+ i32 0, i32 0, null, i32 256, i1 false, void ()* @test_basic, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [sum]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/var.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5 "}
+!12 = metadata !{i32 786689, metadata !4, metadata !"count", metadata !5, i32 16777221, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [count] [line 5]
+!13 = metadata !{i32 5, i32 0, metadata !4, null}
+!14 = metadata !{i32 786688, metadata !4, metadata !"vl", metadata !5, i32 6, metadata !15, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [vl] [line 6]
+!15 = metadata !{i32 786454, metadata !16, null, metadata !"va_list", i32 30, i64 0, i64 0, i64 0, i32 0, metadata !17} ; [ DW_TAG_typedef ] [va_list] [line 30, size 0, align 0, offset 0] [from __builtin_va_list]
+!16 = metadata !{metadata !"/linux-x86_64-high/gcc_4.7.2/dbg/llvm/bin/../lib/clang/3.5/include/stdarg.h", metadata !"/tmp"}
+!17 = metadata !{i32 786454, metadata !1, null, metadata !"__builtin_va_list", i32 6, i64 0, i64 0, i64 0, i32 0, metadata !18} ; [ DW_TAG_typedef ] [__builtin_va_list] [line 6, size 0, align 0, offset 0] [from __va_list]
+!18 = metadata !{i32 786451, metadata !1, null, metadata !"__va_list", i32 6, i64 32, i64 32, i32 0, i32 0, null, metadata !19, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [__va_list] [line 6, size 32, align 32, offset 0] [def] [from ]
+!19 = metadata !{metadata !20}
+!20 = metadata !{i32 786445, metadata !1, metadata !18, metadata !"__ap", i32 6, i64 32, i64 32, i64 0, i32 0, metadata !21} ; [ DW_TAG_member ] [__ap] [line 6, size 32, align 32, offset 0] [from ]
+!21 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from ]
+!22 = metadata !{i32 6, i32 0, metadata !4, null}
+!23 = metadata !{i32 7, i32 0, metadata !4, null}
+!24 = metadata !{i32 786688, metadata !4, metadata !"test_basic", metadata !5, i32 8, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [sum] [line 8]
+!25 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!26 = metadata !{i32 786688, metadata !27, metadata !"i", metadata !5, i32 9, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [i] [line 9]
+!27 = metadata !{i32 786443, metadata !1, metadata !4, i32 9, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
+!28 = metadata !{i32 9, i32 0, metadata !27, null}
+!29 = metadata !{i32 10, i32 0, metadata !30, null}
+!30 = metadata !{i32 786443, metadata !1, metadata !27, i32 9, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
+!31 = metadata !{i32 11, i32 0, metadata !30, null}
+!32 = metadata !{i32 12, i32 0, metadata !4, null}
+!33 = metadata !{i32 13, i32 0, metadata !4, null}
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+attributes #0 = { "split-stack" }
diff --git a/test/CodeGen/ARM/default-float-abi.ll b/test/CodeGen/ARM/default-float-abi.ll
new file mode 100644
index 000000000000..1b26bbdd9259
--- /dev/null
+++ b/test/CodeGen/ARM/default-float-abi.ll
@@ -0,0 +1,22 @@
+; RUN: llc -mtriple=armv7-linux-gnueabihf %s -o - | FileCheck %s --check-prefix=CHECK-HARD
+; RUN: llc -mtriple=armv7-linux-eabihf %s -o - | FileCheck %s --check-prefix=CHECK-HARD
+; RUN: llc -mtriple=armv7-linux-gnueabihf -float-abi=soft %s -o - | FileCheck %s --check-prefix=CHECK-SOFT
+; RUN: llc -mtriple=armv7-linux-gnueabi %s -o - | FileCheck %s --check-prefix=CHECK-SOFT
+; RUN: llc -mtriple=armv7-linux-eabi -float-abi=hard %s -o - | FileCheck %s --check-prefix=CHECK-HARD
+; RUN: llc -mtriple=thumbv7-apple-ios6.0 %s -o - | FileCheck %s --check-prefix=CHECK-SOFT
+
+define float @test_abi(float %lhs, float %rhs) {
+ %sum = fadd float %lhs, %rhs
+ ret float %sum
+
+; CHECK-HARD-LABEL: test_abi:
+; CHECK-HARD-NOT: vmov
+; CHECK-HARD: vadd.f32 s0, s0, s1
+; CHECK-HARD-NOT: vmov
+
+; CHECK-SOFT-LABEL: test_abi:
+; CHECK-SOFT-DAG: vmov [[LHS:s[0-9]+]], r0
+; CHECK-SOFT-DAG: vmov [[RHS:s[0-9]+]], r1
+; CHECK-SOFT: vadd.f32 [[DEST:s[0-9]+]], [[LHS]], [[RHS]]
+; CHECK-SOFT: vmov r0, [[DEST]]
+}
diff --git a/test/CodeGen/ARM/divmod-eabi.ll b/test/CodeGen/ARM/divmod-eabi.ll
index 404cae0da2b2..7f72048d391e 100644
--- a/test/CodeGen/ARM/divmod-eabi.ll
+++ b/test/CodeGen/ARM/divmod-eabi.ll
@@ -1,6 +1,9 @@
; RUN: llc -mtriple armv7-none-eabi %s -o - | FileCheck %s --check-prefix=EABI
+; RUN: llc -mtriple armv7-none-eabihf %s -o - | FileCheck %s --check-prefix=EABI
; RUN: llc -mtriple armv7-linux-gnueabi %s -o - | FileCheck %s --check-prefix=GNU
; RUN: llc -mtriple armv7-apple-darwin %s -o - | FileCheck %s --check-prefix=DARWIN
+; FIXME: long-term, we will use "-apple-macho" and won't need this exception:
+; RUN: llc -mtriple armv7-apple-darwin-eabi %s -o - | FileCheck %s --check-prefix=DARWIN
define signext i16 @f16(i16 signext %a, i16 signext %b) {
; EABI-LABEL: f16:
@@ -186,7 +189,7 @@ entry:
%div = sdiv i32 %a, %b
; EABI: __aeabi_idivmod
; EABI: mov [[div:r[0-9]+]], r0
-; GNU __aeabi_idiv
+; GNU: __aeabi_idiv
; GNU: mov [[sum:r[0-9]+]], r0
; DARWIN: ___divsi3
; DARWIN: mov [[sum:r[0-9]+]], r0
diff --git a/test/CodeGen/ARM/dwarf-eh.ll b/test/CodeGen/ARM/dwarf-eh.ll
new file mode 100644
index 000000000000..0b8a072ec95c
--- /dev/null
+++ b/test/CodeGen/ARM/dwarf-eh.ll
@@ -0,0 +1,71 @@
+; RUN: llc -mtriple=arm-netbsd-eabi -o - -filetype=asm %s | \
+; RUN: FileCheck %s
+; RUN: llc -mtriple=arm-netbsd-eabi -o - -filetype=asm %s \
+; RUN: -relocation-model=pic | FileCheck -check-prefix=CHECK-PIC %s
+
+; ModuleID = 'test.cc'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv5e--netbsd-eabi"
+
+%struct.exception = type { i8 }
+
+@_ZTVN10__cxxabiv117__class_type_infoE = external global i8*
+@_ZTS9exception = linkonce_odr constant [11 x i8] c"9exception\00"
+@_ZTI9exception = linkonce_odr unnamed_addr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8** @_ZTVN10__cxxabiv117__class_type_infoE, i32 2) to i8*), i8* getelementptr inbounds ([11 x i8]* @_ZTS9exception, i32 0, i32 0) }
+
+define void @f() uwtable {
+ %1 = alloca i8*
+ %2 = alloca i32
+ %e = alloca %struct.exception*, align 4
+ invoke void @g()
+ to label %3 unwind label %4
+
+ br label %16
+
+ %5 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* bitcast ({ i8*, i8* }* @_ZTI9exception to i8*)
+ %6 = extractvalue { i8*, i32 } %5, 0
+ store i8* %6, i8** %1
+ %7 = extractvalue { i8*, i32 } %5, 1
+ store i32 %7, i32* %2
+ br label %8
+
+ %9 = load i32* %2
+ %10 = call i32 @llvm.eh.typeid.for(i8* bitcast ({ i8*, i8* }* @_ZTI9exception to i8*)) nounwind
+ %11 = icmp eq i32 %9, %10
+ br i1 %11, label %12, label %17
+
+ %13 = load i8** %1
+ %14 = call i8* @__cxa_begin_catch(i8* %13) #3
+ %15 = bitcast i8* %14 to %struct.exception*
+ store %struct.exception* %15, %struct.exception** %e
+ call void @__cxa_end_catch()
+ br label %16
+
+ ret void
+
+ %18 = load i8** %1
+ %19 = load i32* %2
+ %20 = insertvalue { i8*, i32 } undef, i8* %18, 0
+ %21 = insertvalue { i8*, i32 } %20, i32 %19, 1
+ resume { i8*, i32 } %21
+}
+
+declare void @g()
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+; CHECK: .cfi_personality 0,
+; CHECK: .cfi_lsda 0,
+; CHECK: @TType Encoding = absptr
+; CHECK: @ Call site Encoding = udata4
+; CHECK-PIC: .cfi_personality 155,
+; CHECK-PIC: .cfi_lsda 27,
+; CHECK-PIC: @TType Encoding = indirect pcrel sdata4
+; CHECK-PIC: @ Call site Encoding = udata4
diff --git a/test/CodeGen/ARM/dyn-stackalloc.ll b/test/CodeGen/ARM/dyn-stackalloc.ll
index de2820e98a1d..4ac5b8a31e5f 100644
--- a/test/CodeGen/ARM/dyn-stackalloc.ll
+++ b/test/CodeGen/ARM/dyn-stackalloc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
%struct.comment = type { i8**, i32*, i32, i8* }
%struct.info = type { i32, i32, i32, i32, i32, i32, i32, i8* }
diff --git a/test/CodeGen/ARM/ehabi-filters.ll b/test/CodeGen/ARM/ehabi-filters.ll
index cb5291b20e62..f86b66c30c5d 100644
--- a/test/CodeGen/ARM/ehabi-filters.ll
+++ b/test/CodeGen/ARM/ehabi-filters.ll
@@ -1,4 +1,4 @@
-; RUN: llc -arm-enable-ehabi -arm-enable-ehabi-descriptors < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
target triple = "armv7-none-linux-gnueabi"
diff --git a/test/CodeGen/ARM/ehabi-handlerdata-nounwind.ll b/test/CodeGen/ARM/ehabi-handlerdata-nounwind.ll
new file mode 100644
index 000000000000..42ca9888abbc
--- /dev/null
+++ b/test/CodeGen/ARM/ehabi-handlerdata-nounwind.ll
@@ -0,0 +1,61 @@
+; Test for handlerdata when the function has landingpad and nounwind.
+
+; This test case checks whether the handlerdata is generated for the function
+; with landingpad instruction, even if the function has "nounwind" atttribute.
+;
+; For example, although the following function never throws any exception,
+; however, it is still required to generate LSDA, otherwise, we can't catch
+; the exception properly.
+;
+; void test1() noexcept {
+; try {
+; throw_exception();
+; } catch (...) {
+; }
+; }
+
+; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype=asm -o - %s \
+; RUN: | FileCheck %s
+
+declare void @throw_exception()
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+define void @test1() nounwind {
+entry:
+ invoke void @throw_exception() to label %try.cont unwind label %lpad
+
+lpad:
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = tail call i8* @__cxa_begin_catch(i8* %1)
+ tail call void @__cxa_end_catch()
+ br label %try.cont
+
+try.cont:
+ ret void
+}
+
+; CHECK: .globl test1
+; CHECK: .align 2
+; CHECK: .type test1,%function
+; CHECK-LABEL: test1:
+; CHECK: .fnstart
+
+; CHECK-NOT: .cantunwind
+
+; CHECK: .personality __gxx_personality_v0
+; CHECK: .handlerdata
+; CHECK: .align 2
+; CHECK-LABEL: GCC_except_table0:
+; CHECK-LABEL: .Lexception0:
+; CHECK: .byte 255 @ @LPStart Encoding = omit
+; CHECK: .byte 0 @ @TType Encoding = absptr
+; CHECK: .asciz
+; CHECK: .byte 3 @ Call site Encoding = udata4
+; CHECK: .fnend
diff --git a/test/CodeGen/ARM/ehabi-handlerdata.ll b/test/CodeGen/ARM/ehabi-handlerdata.ll
new file mode 100644
index 000000000000..7045902f99cd
--- /dev/null
+++ b/test/CodeGen/ARM/ehabi-handlerdata.ll
@@ -0,0 +1,59 @@
+; ARM EHABI test for the handlerdata.
+
+; This test case checks whether the handlerdata for exception
+; handling is generated properly.
+;
+; (1) The handlerdata must not be empty.
+; (2) LPStartEncoding == DW_EH_PE_omit
+; (3) TTypeEncoding == DW_EH_PE_absptr
+; (4) CallSiteEncoding == DW_EH_PE_udata4
+
+; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype=asm -o - %s \
+; RUN: | FileCheck %s
+
+; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype=asm -o - %s \
+; RUN: -relocation-model=pic \
+; RUN: | FileCheck %s
+
+declare void @throw_exception()
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+define void @test1() {
+entry:
+ invoke void @throw_exception() to label %try.cont unwind label %lpad
+
+lpad:
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = tail call i8* @__cxa_begin_catch(i8* %1)
+ tail call void @__cxa_end_catch()
+ br label %try.cont
+
+try.cont:
+ ret void
+}
+
+; CHECK: .globl test1
+; CHECK: .align 2
+; CHECK: .type test1,%function
+; CHECK-LABEL: test1:
+; CHECK: .fnstart
+; CHECK: .personality __gxx_personality_v0
+; CHECK: .handlerdata
+; CHECK: .align 2
+; CHECK-LABEL: GCC_except_table0:
+; CHECK-LABEL: .Lexception0:
+; CHECK: .byte 255 @ @LPStart Encoding = omit
+; CHECK: .byte 0 @ @TType Encoding = absptr
+; CHECK: .asciz
+; CHECK: .byte 3 @ Call site Encoding = udata4
+; CHECK: .long
+; CHECK: .long
+; CHECK: .long
+; CHECK: .fnend
diff --git a/test/CodeGen/ARM/ehabi-no-landingpad.ll b/test/CodeGen/ARM/ehabi-no-landingpad.ll
index ac0dff421a6f..d5c74c5764dd 100644
--- a/test/CodeGen/ARM/ehabi-no-landingpad.ll
+++ b/test/CodeGen/ARM/ehabi-no-landingpad.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -mtriple=armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-unknown-linux-gnueabi | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
target triple = "armv7-unknown-linux-gnueabi"
diff --git a/test/CodeGen/ARM/ehabi-unwind.ll b/test/CodeGen/ARM/ehabi-unwind.ll
index fd7d0e63f3b8..a86f340d74e6 100644
--- a/test/CodeGen/ARM/ehabi-unwind.ll
+++ b/test/CodeGen/ARM/ehabi-unwind.ll
@@ -1,8 +1,7 @@
; Test that the EHABI unwind instruction generator does not encounter any
; unfamiliar instructions.
-; RUN: llc < %s -mtriple=thumbv7 -arm-enable-ehabi -disable-fp-elim
-; RUN: llc < %s -mtriple=thumbv7 -arm-enable-ehabi
-; RUN: llc < %s -mtriple=thumbv7 -arm-enable-ehabi -arm-enable-ehabi-descriptors
+; RUN: llc < %s -mtriple=thumbv7 -disable-fp-elim
+; RUN: llc < %s -mtriple=thumbv7
define void @_Z1fv() nounwind {
entry:
diff --git a/test/CodeGen/ARM/ehabi.ll b/test/CodeGen/ARM/ehabi.ll
index 66446528c31a..ebf0c2a00330 100644
--- a/test/CodeGen/ARM/ehabi.ll
+++ b/test/CodeGen/ARM/ehabi.ll
@@ -19,25 +19,53 @@
; (4) armv7 without -disable-fp-elim
; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
; RUN: -disable-fp-elim -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-FP
; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
; RUN: -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
; RUN: -disable-fp-elim -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP
; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
; RUN: -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
+; RUN: llc -mtriple arm-unknown-linux-androideabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP
+
+; RUN: llc -mtriple arm-unknown-linux-androideabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
+
+; RUN: llc -mtriple armv7-unknown-linux-androideabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP
+
+; RUN: llc -mtriple armv7-unknown-linux-androideabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
+
+; RUN: llc -mtriple arm-unknown-netbsd-eabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=DWARF-FP
+
+; RUN: llc -mtriple arm-unknown-netbsd-eabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=DWARF-FP-ELIM
+
+; RUN: llc -mtriple armv7-unknown-netbsd-eabi \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=DWARF-V7-FP
+
+; RUN: llc -mtriple armv7-unknown-netbsd-eabi \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=DWARF-V7-FP-ELIM
+
;-------------------------------------------------------------------------------
; Test 1
;-------------------------------------------------------------------------------
@@ -136,14 +164,14 @@ declare void @_ZSt9terminatev()
; CHECK-V7-FP-LABEL: _Z4testiiiiiddddd:
; CHECK-V7-FP: .fnstart
-; CHECK-V7-FP: .save {r4, r11, lr}
-; CHECK-V7-FP: push {r4, r11, lr}
-; CHECK-V7-FP: .setfp r11, sp, #4
-; CHECK-V7-FP: add r11, sp, #4
+; CHECK-V7-FP: .save {r4, r10, r11, lr}
+; CHECK-V7-FP: push {r4, r10, r11, lr}
+; CHECK-V7-FP: .setfp r11, sp, #8
+; CHECK-V7-FP: add r11, sp, #8
; CHECK-V7-FP: .vsave {d8, d9, d10, d11, d12}
; CHECK-V7-FP: vpush {d8, d9, d10, d11, d12}
-; CHECK-V7-FP: .pad #28
-; CHECK-V7-FP: sub sp, sp, #28
+; CHECK-V7-FP: .pad #24
+; CHECK-V7-FP: sub sp, sp, #24
; CHECK-V7-FP: .personality __gxx_personality_v0
; CHECK-V7-FP: .handlerdata
; CHECK-V7-FP: .fnend
@@ -160,6 +188,93 @@ declare void @_ZSt9terminatev()
; CHECK-V7-FP-ELIM: .handlerdata
; CHECK-V7-FP-ELIM: .fnend
+; DWARF-FP-LABEL: _Z4testiiiiiddddd:
+; DWARF-FP: .cfi_startproc
+; DWARF-FP: .cfi_personality 0, __gxx_personality_v0
+; DWARF-FP: .cfi_lsda 0, .Lexception0
+; DWARF-FP: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; DWARF-FP: .cfi_def_cfa_offset 36
+; DWARF-FP: .cfi_offset lr, -4
+; DWARF-FP: .cfi_offset r11, -8
+; DWARF-FP: .cfi_offset r10, -12
+; DWARF-FP: .cfi_offset r9, -16
+; DWARF-FP: .cfi_offset r8, -20
+; DWARF-FP: .cfi_offset r7, -24
+; DWARF-FP: .cfi_offset r6, -28
+; DWARF-FP: .cfi_offset r5, -32
+; DWARF-FP: .cfi_offset r4, -36
+; DWARF-FP: add r11, sp, #28
+; DWARF-FP: .cfi_def_cfa r11, 8
+; DWARF-FP: sub sp, sp, #28
+; DWARF-FP: sub sp, r11, #28
+; DWARF-FP: pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; DWARF-FP: mov pc, lr
+; DWARF-FP: .cfi_endproc
+
+; DWARF-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; DWARF-FP-ELIM: .cfi_startproc
+; DWARF-FP-ELIM: .cfi_personality 0, __gxx_personality_v0
+; DWARF-FP-ELIM: .cfi_lsda 0, .Lexception0
+; DWARF-FP-ELIM: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; DWARF-FP-ELIM: .cfi_def_cfa_offset 36
+; DWARF-FP-ELIM: .cfi_offset lr, -4
+; DWARF-FP-ELIM: .cfi_offset r11, -8
+; DWARF-FP-ELIM: .cfi_offset r10, -12
+; DWARF-FP-ELIM: .cfi_offset r9, -16
+; DWARF-FP-ELIM: .cfi_offset r8, -20
+; DWARF-FP-ELIM: .cfi_offset r7, -24
+; DWARF-FP-ELIM: .cfi_offset r6, -28
+; DWARF-FP-ELIM: .cfi_offset r5, -32
+; DWARF-FP-ELIM: .cfi_offset r4, -36
+; DWARF-FP-ELIM: sub sp, sp, #28
+; DWARF-FP-ELIM: .cfi_def_cfa_offset 64
+; DWARF-FP-ELIM: add sp, sp, #28
+; DWARF-FP-ELIM: pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; DWARF-FP-ELIM: mov pc, lr
+; DWARF-FP-ELIM: .cfi_endproc
+
+; DWARF-V7-FP-LABEL: _Z4testiiiiiddddd:
+; DWARF-V7-FP: .cfi_startproc
+; DWARF-V7-FP: .cfi_personality 0, __gxx_personality_v0
+; DWARF-V7-FP: .cfi_lsda 0, .Lexception0
+; DWARF-V7-FP: push {r4, r10, r11, lr}
+; DWARF-V7-FP: .cfi_def_cfa_offset 16
+; DWARF-V7-FP: .cfi_offset lr, -4
+; DWARF-V7-FP: .cfi_offset r11, -8
+; DWARF-V7-FP: .cfi_offset r10, -12
+; DWARF-V7-FP: .cfi_offset r4, -16
+; DWARF-V7-FP: add r11, sp, #8
+; DWARF-V7-FP: .cfi_def_cfa r11, 8
+; DWARF-V7-FP: vpush {d8, d9, d10, d11, d12}
+; DWARF-V7-FP: .cfi_offset d12, -24
+; DWARF-V7-FP: .cfi_offset d11, -32
+; DWARF-V7-FP: .cfi_offset d10, -40
+; DWARF-V7-FP: .cfi_offset d9, -48
+; DWARF-V7-FP: sub sp, sp, #24
+; DWARF-V7-FP: sub sp, r11, #48
+; DWARF-V7-FP: vpop {d8, d9, d10, d11, d12}
+; DWARF-V7-FP: pop {r4, r10, r11, pc}
+; DWARF-V7-FP: .cfi_endproc
+
+; DWARF-V7-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; DWARF-V7-FP-ELIM: .cfi_startproc
+; DWARF-V7-FP-ELIM: .cfi_personality 0, __gxx_personality_v0
+; DWARF-V7-FP-ELIM: .cfi_lsda 0, .Lexception0
+; DWARF-V7-FP-ELIM: push {r4, lr}
+; DWARF-V7-FP-ELIM: .cfi_def_cfa_offset 8
+; DWARF-V7-FP-ELIM: .cfi_offset lr, -4
+; DWARF-V7-FP-ELIM: .cfi_offset r4, -8
+; DWARF-V7-FP-ELIM: vpush {d8, d9, d10, d11, d12}
+; DWARF-V7-FP-ELIM: .cfi_offset d12, -16
+; DWARF-V7-FP-ELIM: .cfi_offset d11, -24
+; DWARF-V7-FP-ELIM: .cfi_offset d10, -32
+; DWARF-V7-FP-ELIM: .cfi_offset d9, -40
+; DWARF-V7-FP-ELIM: sub sp, sp, #24
+; DWARF-V7-FP-ELIM: .cfi_def_cfa_offset 72
+; DWARF-V7-FP-ELIM: add sp, sp, #24
+; DWARF-V7-FP-ELIM: vpop {d8, d9, d10, d11, d12}
+; DWARF-V7-FP-ELIM: pop {r4, pc}
+; DWARF-V7-FP-ELIM: .cfi_endproc
;-------------------------------------------------------------------------------
; Test 2
@@ -169,7 +284,7 @@ declare void @throw_exception_2()
define void @test2() {
entry:
- tail call void @throw_exception_2()
+ call void @throw_exception_2()
ret void
}
@@ -207,6 +322,48 @@ entry:
; CHECK-V7-FP-ELIM: pop {r11, pc}
; CHECK-V7-FP-ELIM: .fnend
+; DWARF-FP-LABEL: test2:
+; DWARF-FP: .cfi_startproc
+; DWARF-FP: push {r11, lr}
+; DWARF-FP: .cfi_def_cfa_offset 8
+; DWARF-FP: .cfi_offset lr, -4
+; DWARF-FP: .cfi_offset r11, -8
+; DWARF-FP: mov r11, sp
+; DWARF-FP: .cfi_def_cfa_register r11
+; DWARF-FP: pop {r11, lr}
+; DWARF-FP: mov pc, lr
+; DWARF-FP: .cfi_endproc
+
+; DWARF-FP-ELIM-LABEL: test2:
+; DWARF-FP-ELIM: .cfi_startproc
+; DWARF-FP-ELIM: push {r11, lr}
+; DWARF-FP-ELIM: .cfi_def_cfa_offset 8
+; DWARF-FP-ELIM: .cfi_offset lr, -4
+; DWARF-FP-ELIM: .cfi_offset r11, -8
+; DWARF-FP-ELIM: pop {r11, lr}
+; DWARF-FP-ELIM: mov pc, lr
+; DWARF-FP-ELIM: .cfi_endproc
+
+; DWARF-V7-FP-LABEL: test2:
+; DWARF-V7-FP: .cfi_startproc
+; DWARF-V7-FP: push {r11, lr}
+; DWARF-V7-FP: .cfi_def_cfa_offset 8
+; DWARF-V7-FP: .cfi_offset lr, -4
+; DWARF-V7-FP: .cfi_offset r11, -8
+; DWARF-V7-FP: mov r11, sp
+; DWARF-V7-FP: .cfi_def_cfa_register r11
+; DWARF-V7-FP: pop {r11, pc}
+; DWARF-V7-FP: .cfi_endproc
+
+; DWARF-V7-FP-ELIM-LABEL: test2:
+; DWARF-V7-FP-ELIM: .cfi_startproc
+; DWARF-V7-FP-ELIM: push {r11, lr}
+; DWARF-V7-FP-ELIM: .cfi_def_cfa_offset 8
+; DWARF-V7-FP-ELIM: .cfi_offset lr, -4
+; DWARF-V7-FP-ELIM: .cfi_offset r11, -8
+; DWARF-V7-FP-ELIM: pop {r11, pc}
+; DWARF-V7-FP-ELIM: .cfi_endproc
+
;-------------------------------------------------------------------------------
; Test 3
@@ -263,6 +420,56 @@ entry:
; CHECK-V7-FP-ELIM: pop {r4, r5, r11, pc}
; CHECK-V7-FP-ELIM: .fnend
+; DWARF-FP-LABEL: test3:
+; DWARF-FP: .cfi_startproc
+; DWARF-FP: push {r4, r5, r11, lr}
+; DWARF-FP: .cfi_def_cfa_offset 16
+; DWARF-FP: .cfi_offset lr, -4
+; DWARF-FP: .cfi_offset r11, -8
+; DWARF-FP: .cfi_offset r5, -12
+; DWARF-FP: .cfi_offset r4, -16
+; DWARF-FP: add r11, sp, #8
+; DWARF-FP: .cfi_def_cfa r11, 8
+; DWARF-FP: pop {r4, r5, r11, lr}
+; DWARF-FP: mov pc, lr
+; DWARF-FP: .cfi_endproc
+
+; DWARF-FP-ELIM-LABEL: test3:
+; DWARF-FP-ELIM: .cfi_startproc
+; DWARF-FP-ELIM: push {r4, r5, r11, lr}
+; DWARF-FP-ELIM: .cfi_def_cfa_offset 16
+; DWARF-FP-ELIM: .cfi_offset lr, -4
+; DWARF-FP-ELIM: .cfi_offset r11, -8
+; DWARF-FP-ELIM: .cfi_offset r5, -12
+; DWARF-FP-ELIM: .cfi_offset r4, -16
+; DWARF-FP-ELIM: pop {r4, r5, r11, lr}
+; DWARF-FP-ELIM: mov pc, lr
+; DWARF-FP-ELIM: .cfi_endproc
+
+; DWARF-V7-FP-LABEL: test3:
+; DWARF-V7-FP: .cfi_startproc
+; DWARF-V7-FP: push {r4, r5, r11, lr}
+; DWARF-V7-FP: .cfi_def_cfa_offset 16
+; DWARF-V7-FP: .cfi_offset lr, -4
+; DWARF-V7-FP: .cfi_offset r11, -8
+; DWARF-V7-FP: .cfi_offset r5, -12
+; DWARF-V7-FP: .cfi_offset r4, -16
+; DWARF-V7-FP: add r11, sp, #8
+; DWARF-V7-FP: .cfi_def_cfa r11, 8
+; DWARF-V7-FP: pop {r4, r5, r11, pc}
+; DWARF-V7-FP: .cfi_endproc
+
+; DWARF-V7-FP-ELIM-LABEL: test3:
+; DWARF-V7-FP-ELIM: .cfi_startproc
+; DWARF-V7-FP-ELIM: push {r4, r5, r11, lr}
+; DWARF-V7-FP-ELIM: .cfi_def_cfa_offset 16
+; DWARF-V7-FP-ELIM: .cfi_offset lr, -4
+; DWARF-V7-FP-ELIM: .cfi_offset r11, -8
+; DWARF-V7-FP-ELIM: .cfi_offset r5, -12
+; DWARF-V7-FP-ELIM: .cfi_offset r4, -16
+; DWARF-V7-FP-ELIM: pop {r4, r5, r11, pc}
+; DWARF-V7-FP-ELIM: .cfi_endproc
+
;-------------------------------------------------------------------------------
; Test 4
@@ -296,3 +503,27 @@ entry:
; CHECK-V7-FP-ELIM: bx lr
; CHECK-V7-FP-ELIM: .cantunwind
; CHECK-V7-FP-ELIM: .fnend
+
+; DWARF-FP-LABEL: test4:
+; DWARF-FP-NOT: .cfi_startproc
+; DWARF-FP: mov pc, lr
+; DWARF-FP-NOT: .cfi_endproc
+; DWARF-FP: .size test4,
+
+; DWARF-FP-ELIM-LABEL: test4:
+; DWARF-FP-ELIM-NOT: .cfi_startproc
+; DWARF-FP-ELIM: mov pc, lr
+; DWARF-FP-ELIM-NOT: .cfi_endproc
+; DWARF-FP-ELIM: .size test4,
+
+; DWARF-V7-FP-LABEL: test4:
+; DWARF-V7-FP-NOT: .cfi_startproc
+; DWARF-V7-FP: bx lr
+; DWARF-V7-FP-NOT: .cfi_endproc
+; DWARF-V7-FP: .size test4,
+
+; DWARF-V7-FP-ELIM-LABEL: test4:
+; DWARF-V7-FP-ELIM-NOT: .cfi_startproc
+; DWARF-V7-FP-ELIM: bx lr
+; DWARF-V7-FP-ELIM-NOT: .cfi_endproc
+; DWARF-V7-FP-ELIM: .size test4,
diff --git a/test/CodeGen/ARM/extload-knownzero.ll b/test/CodeGen/ARM/extload-knownzero.ll
index 8ccf58c39170..f55b95104b86 100644
--- a/test/CodeGen/ARM/extload-knownzero.ll
+++ b/test/CodeGen/ARM/extload-knownzero.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
; rdar://12771555
define void @foo(i16* %ptr, i32 %a) nounwind {
diff --git a/test/CodeGen/ARM/extloadi1.ll b/test/CodeGen/ARM/extloadi1.ll
index dc45ce705f44..2504c6c61e3e 100644
--- a/test/CodeGen/ARM/extloadi1.ll
+++ b/test/CodeGen/ARM/extloadi1.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
+
@handler_installed.6144.b = external global i1 ; <i1*> [#uses=1]
define void @__mf_sigusr1_respond() {
diff --git a/test/CodeGen/ARM/fadds.ll b/test/CodeGen/ARM/fadds.ll
index 21219ce18e26..b5d3bdae1f9d 100644
--- a/test/CodeGen/ARM/fadds.ll
+++ b/test/CodeGen/ARM/fadds.ll
@@ -1,9 +1,20 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CORTEXA8U
-; RUN: llc < %s -mtriple=arm-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8U
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
+
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - \
+; RUN: | FileCheck %s -check-prefix=NFP0
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8U
+
+; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8U
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA9
define float @test(float %a, float %b) {
entry:
diff --git a/test/CodeGen/ARM/fast-isel-call.ll b/test/CodeGen/ARM/fast-isel-call.ll
index 917a15d28bd7..2d7378e47f2f 100644
--- a/test/CodeGen/ARM/fast-isel-call.ll
+++ b/test/CodeGen/ARM/fast-isel-call.ll
@@ -8,8 +8,6 @@
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -mattr=-vfp2 | FileCheck %s --check-prefix=ARM-NOVFP
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -mattr=-vfp2 | FileCheck %s --check-prefix=THUMB-NOVFP
-; XFAIL: vg_leak
-
; Note that some of these tests assume that relocations are either
; movw/movt or constant pool loads. Different platforms will select
; different approaches.
diff --git a/test/CodeGen/ARM/fast-isel-crash2.ll b/test/CodeGen/ARM/fast-isel-crash2.ll
index d606877673dc..cccd9eb951ba 100644
--- a/test/CodeGen/ARM/fast-isel-crash2.ll
+++ b/test/CodeGen/ARM/fast-isel-crash2.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-apple-darwin
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-linux-gnueabi
+; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=thumbv7-apple-darwin
+; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=armv7-linux-gnueabi
; rdar://9515076
; (Make sure this doesn't crash.)
diff --git a/test/CodeGen/ARM/fast-isel-frameaddr.ll b/test/CodeGen/ARM/fast-isel-frameaddr.ll
index 8542bb5e27d2..93cdbbbbd86e 100644
--- a/test/CodeGen/ARM/fast-isel-frameaddr.ll
+++ b/test/CodeGen/ARM/fast-isel-frameaddr.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=DARWIN-ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=DARWIN-ARM
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=LINUX-ARM
-; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=DARWIN-THUMB2
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=DARWIN-THUMB2
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-linux-gnueabi | FileCheck %s --check-prefix=LINUX-THUMB2
define i8* @frameaddr_index0() nounwind {
@@ -34,14 +34,12 @@ entry:
; DARWIN-ARM-LABEL: frameaddr_index1:
; DARWIN-ARM: push {r7}
; DARWIN-ARM: mov r7, sp
-; DARWIN-ARM: mov r0, r7
-; DARWIN-ARM: ldr r0, [r0]
+; DARWIN-ARM: ldr r0, [r7]
; DARWIN-THUMB2-LABEL: frameaddr_index1:
; DARWIN-THUMB2: str r7, [sp, #-4]!
; DARWIN-THUMB2: mov r7, sp
-; DARWIN-THUMB2: mov r0, r7
-; DARWIN-THUMB2: ldr r0, [r0]
+; DARWIN-THUMB2: ldr r0, [r7]
; LINUX-ARM-LABEL: frameaddr_index1:
; LINUX-ARM: push {r11}
@@ -63,16 +61,14 @@ entry:
; DARWIN-ARM-LABEL: frameaddr_index3:
; DARWIN-ARM: push {r7}
; DARWIN-ARM: mov r7, sp
-; DARWIN-ARM: mov r0, r7
-; DARWIN-ARM: ldr r0, [r0]
+; DARWIN-ARM: ldr r0, [r7]
; DARWIN-ARM: ldr r0, [r0]
; DARWIN-ARM: ldr r0, [r0]
; DARWIN-THUMB2-LABEL: frameaddr_index3:
; DARWIN-THUMB2: str r7, [sp, #-4]!
; DARWIN-THUMB2: mov r7, sp
-; DARWIN-THUMB2: mov r0, r7
-; DARWIN-THUMB2: ldr r0, [r0]
+; DARWIN-THUMB2: ldr r0, [r7]
; DARWIN-THUMB2: ldr r0, [r0]
; DARWIN-THUMB2: ldr r0, [r0]
diff --git a/test/CodeGen/ARM/fast-isel-inline-asm.ll b/test/CodeGen/ARM/fast-isel-inline-asm.ll
new file mode 100644
index 000000000000..2eb25ec7738b
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-inline-asm.ll
@@ -0,0 +1,18 @@
+; RUN: llc -fast-isel < %s | FileCheck %s
+target datalayout = "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+target triple = "thumbv7-apple-ios5.0.0"
+
+%0 = type opaque
+
+; Make sure that the inline asm starts right after the call to bar.
+define void @test_inline_asm_sideeffect(%0* %call) {
+; CHECK: bl _bar
+; CHECK-NEXT: InlineAsm Start
+ call void @bar()
+ call void asm sideeffect "mov\09r7, r7\09\09@ marker", ""()
+ %1 = call %0* bitcast (i8* (i8*)* @foo to %0* (%0*)*)(%0* %call)
+ ret void
+}
+
+declare i8* @foo(i8*)
+declare void @bar()
diff --git a/test/CodeGen/ARM/fast-isel-intrinsic.ll b/test/CodeGen/ARM/fast-isel-intrinsic.ll
index b08b72baa61e..089209e45fc3 100644
--- a/test/CodeGen/ARM/fast-isel-intrinsic.ll
+++ b/test/CodeGen/ARM/fast-isel-intrinsic.ll
@@ -5,8 +5,6 @@
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -arm-long-calls -verify-machineinstrs | FileCheck %s --check-prefix=ARM-LONG
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -arm-long-calls -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-LONG
-; XFAIL: vg_leak
-
; Note that some of these tests assume that relocations are either
; movw/movt or constant pool loads. Different platforms will select
; different approaches.
@@ -15,7 +13,7 @@
@temp = common global [60 x i8] zeroinitializer, align 1
define void @t1() nounwind ssp {
-; ARM: t1
+; ARM-LABEL: t1:
; ARM: {{(movw r0, :lower16:_?message1)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:_?message1)|(ldr r0, \[r0\])}}
; ARM: add r0, r0, #5
@@ -23,12 +21,12 @@ define void @t1() nounwind ssp {
; ARM: movw r2, #10
; ARM: and r1, r1, #255
; ARM: bl {{_?}}memset
-; ARM-LONG: t1
+; ARM-LONG-LABEL: t1:
; ARM-LONG: {{(movw r3, :lower16:L_memset\$non_lazy_ptr)|(ldr r3, .LCPI)}}
; ARM-LONG: {{(movt r3, :upper16:L_memset\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
-; THUMB: t1
+; THUMB-LABEL: t1:
; THUMB: {{(movw r0, :lower16:_?message1)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:_?message1)|(ldr r0, \[r0\])}}
; THUMB: adds r0, #5
@@ -38,7 +36,7 @@ define void @t1() nounwind ssp {
; THUMB: movt r2, #0
; THUMB: and r1, r1, #255
; THUMB: bl {{_?}}memset
-; THUMB-LONG: t1
+; THUMB-LONG-LABEL: t1:
; THUMB-LONG: movw r3, :lower16:L_memset$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memset$non_lazy_ptr
; THUMB-LONG: ldr r3, [r3]
@@ -50,7 +48,7 @@ define void @t1() nounwind ssp {
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
define void @t2() nounwind ssp {
-; ARM: t2
+; ARM-LABEL: t2:
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
@@ -61,12 +59,12 @@ define void @t2() nounwind ssp {
; ARM: mov r0, r1
; ARM: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
; ARM: bl {{_?}}memcpy
-; ARM-LONG: t2
+; ARM-LONG-LABEL: t2:
; ARM-LONG: {{(movw r3, :lower16:L_memcpy\$non_lazy_ptr)|(ldr r3, .LCPI)}}
; ARM-LONG: {{(movt r3, :upper16:L_memcpy\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
-; THUMB: t2
+; THUMB-LABEL: t2:
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
@@ -78,7 +76,7 @@ define void @t2() nounwind ssp {
; THUMB: mov r0, r1
; THUMB: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
; THUMB: bl {{_?}}memcpy
-; THUMB-LONG: t2
+; THUMB-LONG-LABEL: t2:
; THUMB-LONG: movw r3, :lower16:L_memcpy$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memcpy$non_lazy_ptr
; THUMB-LONG: ldr r3, [r3]
@@ -90,7 +88,7 @@ define void @t2() nounwind ssp {
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
define void @t3() nounwind ssp {
-; ARM: t3
+; ARM-LABEL: t3:
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
@@ -99,12 +97,12 @@ define void @t3() nounwind ssp {
; ARM: movw r2, #10
; ARM: mov r0, r1
; ARM: bl {{_?}}memmove
-; ARM-LONG: t3
+; ARM-LONG-LABEL: t3:
; ARM-LONG: {{(movw r3, :lower16:L_memmove\$non_lazy_ptr)|(ldr r3, .LCPI)}}
; ARM-LONG: {{(movt r3, :upper16:L_memmove\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
-; THUMB: t3
+; THUMB-LABEL: t3:
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
@@ -116,7 +114,7 @@ define void @t3() nounwind ssp {
; THUMB: mov r0, r1
; THUMB: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
; THUMB: bl {{_?}}memmove
-; THUMB-LONG: t3
+; THUMB-LONG-LABEL: t3:
; THUMB-LONG: movw r3, :lower16:L_memmove$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memmove$non_lazy_ptr
; THUMB-LONG: ldr r3, [r3]
@@ -126,7 +124,7 @@ define void @t3() nounwind ssp {
}
define void @t4() nounwind ssp {
-; ARM: t4
+; ARM-LABEL: t4:
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
@@ -137,7 +135,7 @@ define void @t4() nounwind ssp {
; ARM: ldrh r1, [r0, #24]
; ARM: strh r1, [r0, #12]
; ARM: bx lr
-; THUMB: t4
+; THUMB-LABEL: t4:
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
@@ -155,7 +153,7 @@ define void @t4() nounwind ssp {
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
define void @t5() nounwind ssp {
-; ARM: t5
+; ARM-LABEL: t5:
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
@@ -170,7 +168,7 @@ define void @t5() nounwind ssp {
; ARM: ldrh r1, [r0, #24]
; ARM: strh r1, [r0, #12]
; ARM: bx lr
-; THUMB: t5
+; THUMB-LABEL: t5:
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
@@ -190,7 +188,7 @@ define void @t5() nounwind ssp {
}
define void @t6() nounwind ssp {
-; ARM: t6
+; ARM-LABEL: t6:
; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
@@ -215,7 +213,7 @@ define void @t6() nounwind ssp {
; ARM: ldrb r1, [r0, #25]
; ARM: strb r1, [r0, #13]
; ARM: bx lr
-; THUMB: t6
+; THUMB-LABEL: t6:
; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
@@ -253,9 +251,9 @@ define void @t7() nounwind ssp {
define i32 @t8(i32 %x) nounwind {
entry:
-; ARM: t8
+; ARM-LABEL: t8:
; ARM-NOT: FastISel missed call: %expval = call i32 @llvm.expect.i32(i32 %x, i32 1)
-; THUMB: t8
+; THUMB-LABEL: t8:
; THUMB-NOT: FastISel missed call: %expval = call i32 @llvm.expect.i32(i32 %x, i32 1)
%expval = call i32 @llvm.expect.i32(i32 %x, i32 1)
ret i32 %expval
diff --git a/test/CodeGen/ARM/fast-isel-static.ll b/test/CodeGen/ARM/fast-isel-static.ll
index 93c14a09205e..9bd0a51e7120 100644
--- a/test/CodeGen/ARM/fast-isel-static.ll
+++ b/test/CodeGen/ARM/fast-isel-static.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static -arm-long-calls | FileCheck -check-prefix=CHECK-LONG %s
-; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static -arm-long-calls | FileCheck -check-prefix=CHECK-LONG %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static | FileCheck -check-prefix=CHECK-NORM %s
-; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static | FileCheck -check-prefix=CHECK-NORM %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static -arm-long-calls | FileCheck -check-prefix=CHECK-LONG %s
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static -arm-long-calls | FileCheck -check-prefix=CHECK-LONG %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static | FileCheck -check-prefix=CHECK-NORM %s
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static | FileCheck -check-prefix=CHECK-NORM %s
define void @myadd(float* %sum, float* %addend) nounwind {
entry:
diff --git a/test/CodeGen/ARM/fast-tail-call.ll b/test/CodeGen/ARM/fast-tail-call.ll
index 9fbdc9d24b01..6472016c0572 100644
--- a/test/CodeGen/ARM/fast-tail-call.ll
+++ b/test/CodeGen/ARM/fast-tail-call.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv7-linux-gnueabi -O0 -arm-tail-calls < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -O0 < %s | FileCheck %s
; Primarily a non-crash test: Thumbv7 Linux does not have FastISel support,
; which led (via a convoluted route) to DAG nodes after a TC_RETURN that
diff --git a/test/CodeGen/ARM/fastcc-vfp.ll b/test/CodeGen/ARM/fastcc-vfp.ll
new file mode 100644
index 000000000000..4c98150c7081
--- /dev/null
+++ b/test/CodeGen/ARM/fastcc-vfp.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios -mattr=+vfp2 | FileCheck %s
+
+define fastcc double @t1(double %d0, double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, float %a, float %b) {
+entry:
+; CHECK-LABEL: t1:
+; CHECK-NOT: vmov
+; CHECK: vldr
+ %add = fadd float %a, %b
+ %conv = fpext float %add to double
+ ret double %conv
+}
+
+define fastcc double @t2(double %d0, double %d1, double %d2, double %d3, double %d4, double %d5, double %a, float %b, double %c) {
+entry:
+; CHECK-LABEL: t2:
+; CHECK-NOT: vmov
+; CHECK: vldr
+ %add = fadd double %a, %c
+ ret double %add
+}
+
+define fastcc float @t3(double %d0, double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, float %a, double %b, float %c) {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: vldr
+ %add = fadd float %a, %c
+ ret float %add
+}
+
+define fastcc double @t4(double %a, double %b) #0 {
+entry:
+; CHECK-LABEL: t4:
+; CHECK: vstr
+ %add = fadd double %a, %b
+ %sub = fsub double %a, %b
+ %call = tail call fastcc double @x(double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double %add, float 0.000000e+00, double %sub) #2
+ ret double %call
+}
+
+declare fastcc double @x(double, double, double, double, double, double, double, float, double)
diff --git a/test/CodeGen/ARM/fastisel-thumb-litpool.ll b/test/CodeGen/ARM/fastisel-thumb-litpool.ll
new file mode 100644
index 000000000000..aa9e7260fb2e
--- /dev/null
+++ b/test/CodeGen/ARM/fastisel-thumb-litpool.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple=thumbv7-apple-ios -O0 -o - %s | FileCheck %s
+
+; We used to accidentally create both an ARM and a Thumb ldr here. It led to an
+; assertion failure at the time, but could go all the way through to emission,
+; hence the CHECK-NOT.
+
+define i32 @test_thumb_ldrlit() minsize {
+; CHECK: ldr r0, LCPI0_0
+; CHECK-NOT: ldr
+ ret i32 12345678
+}
diff --git a/test/CodeGen/ARM/fdivs.ll b/test/CodeGen/ARM/fdivs.ll
index a4fecfe14588..7cab7668900a 100644
--- a/test/CodeGen/ARM/fdivs.ll
+++ b/test/CodeGen/ARM/fdivs.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s -check-prefix=VFP2
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s -check-prefix=NFP0
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=CORTEXA8
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=CORTEXA9
define float @test(float %a, float %b) {
entry:
diff --git a/test/CodeGen/ARM/fixunsdfdi.ll b/test/CodeGen/ARM/fixunsdfdi.ll
index 6db2385a63eb..f3406cc55fbb 100644
--- a/test/CodeGen/ARM/fixunsdfdi.ll
+++ b/test/CodeGen/ARM/fixunsdfdi.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-; RUN: llc < %s -march=arm -mattr=vfp2 | not grep vstr.64
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
+; RUN: llc -mtriple=arm-eabi -mattr=vfp2 %s -o - | FileCheck %s
define hidden i64 @__fixunsdfdi(double %x) nounwind readnone {
entry:
@@ -27,3 +27,6 @@ bb7: ; preds = %bb3
bb10: ; preds = %entry
ret i64 0
}
+
+; CHECK-NOT: vstr.64
+
diff --git a/test/CodeGen/ARM/fmacs.ll b/test/CodeGen/ARM/fmacs.ll
index f2486c65d3a2..6f8c0fe13c9d 100644
--- a/test/CodeGen/ARM/fmacs.ll
+++ b/test/CodeGen/ARM/fmacs.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=A9
-; RUN: llc < %s -mtriple=arm-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s -check-prefix=HARD
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s -check-prefix=VFP2
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s -check-prefix=NEON
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=A8
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
+; RUN: llc -mtriple=arm-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard %s -o - | FileCheck %s -check-prefix=HARD
define float @t1(float %acc, float %a, float %b) {
entry:
diff --git a/test/CodeGen/ARM/fmdrr-fmrrd.ll b/test/CodeGen/ARM/fmdrr-fmrrd.ll
index eb72faf8d811..a3669b42dc6d 100644
--- a/test/CodeGen/ARM/fmdrr-fmrrd.ll
+++ b/test/CodeGen/ARM/fmdrr-fmrrd.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=vfp2 | not grep fmdrr
-; RUN: llc < %s -march=arm -mattr=vfp2 | not grep fmrrd
+; RUN: llc -mtriple=arm-eabi -mattr=vfp2 %s -o - | FileCheck %s
; naive codegen for this is:
; _i:
@@ -11,3 +10,8 @@ define i64 @test(double %X) {
%Y = bitcast double %X to i64
ret i64 %Y
}
+
+; CHECK-LABEL: test:
+; CHECK-NOT: fmdrr
+; CHECK-NOT: fmrrd
+
diff --git a/test/CodeGen/ARM/fmscs.ll b/test/CodeGen/ARM/fmscs.ll
index f16ec172cb70..5aff74c4cc2b 100644
--- a/test/CodeGen/ARM/fmscs.ll
+++ b/test/CodeGen/ARM/fmscs.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s -check-prefix=VFP2
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s -check-prefix=NEON
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=A8
define float @t1(float %acc, float %a, float %b) {
entry:
diff --git a/test/CodeGen/ARM/fmuls.ll b/test/CodeGen/ARM/fmuls.ll
index d11f6bd1bd99..b24d867a7e86 100644
--- a/test/CodeGen/ARM/fmuls.ll
+++ b/test/CodeGen/ARM/fmuls.ll
@@ -1,9 +1,20 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CORTEXA8U
-; RUN: llc < %s -mtriple=arm-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8U
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
+
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - \
+; RUN: | FileCheck %s -check-prefix=NFP0
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8U
+
+; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8U
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA9
define float @test(float %a, float %b) {
entry:
diff --git a/test/CodeGen/ARM/fnegs.ll b/test/CodeGen/ARM/fnegs.ll
index dc4c2e33e491..36af8352433e 100644
--- a/test/CodeGen/ARM/fnegs.ll
+++ b/test/CodeGen/ARM/fnegs.ll
@@ -1,9 +1,20 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CORTEXA8U
-; RUN: llc < %s -mtriple=arm-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8U
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
+
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - \
+; RUN: | FileCheck %s -check-prefix=NFP0
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8U
+
+; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA8U
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - \
+; RUN: | FileCheck %s -check-prefix=CORTEXA9
define float @test1(float* %a) {
entry:
diff --git a/test/CodeGen/ARM/fnmacs.ll b/test/CodeGen/ARM/fnmacs.ll
index 825feaa0453f..ab35a9769b05 100644
--- a/test/CodeGen/ARM/fnmacs.ll
+++ b/test/CodeGen/ARM/fnmacs.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s -check-prefix=VFP2
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s -check-prefix=NEON
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=A8
define float @t1(float %acc, float %a, float %b) {
entry:
diff --git a/test/CodeGen/ARM/fnmscs.ll b/test/CodeGen/ARM/fnmscs.ll
index 78ccb6095e05..5fa6b219388d 100644
--- a/test/CodeGen/ARM/fnmscs.ll
+++ b/test/CodeGen/ARM/fnmscs.ll
@@ -1,9 +1,20 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 -regalloc=basic | FileCheck %s -check-prefix=A8
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=A8U
-; RUN: llc < %s -mtriple=arm-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8U
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
+
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - \
+; RUN: | FileCheck %s -check-prefix=NEON
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=A8
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 -regalloc=basic %s -o - \
+; RUN: | FileCheck %s -check-prefix=A8
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \
+; RUN: | FileCheck %s -check-prefix=A8U
+
+; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=A8U
define float @t1(float %acc, float %a, float %b) nounwind {
entry:
diff --git a/test/CodeGen/ARM/fnmul.ll b/test/CodeGen/ARM/fnmul.ll
index 6d7bc05ffa94..e14e5baeb8ab 100644
--- a/test/CodeGen/ARM/fnmul.ll
+++ b/test/CodeGen/ARM/fnmul.ll
@@ -1,5 +1,8 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | grep vnmul.f64
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 -enable-sign-dependent-rounding-fp-math | grep vmul.f64
+; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o - | FileCheck %s
+
+; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 -enable-sign-dependent-rounding-fp-math %s -o - \
+; RUN: | FileCheck %s -check-prefix CHECK-ROUNDING
+
define double @t1(double %a, double %b) {
@@ -9,3 +12,6 @@ entry:
ret double %tmp4
}
+; CHECK: vnmul.f64
+; CHECK-ROUNDING: vmul.f64
+
diff --git a/test/CodeGen/ARM/fnmuls.ll b/test/CodeGen/ARM/fnmuls.ll
index 3223885feda9..de3b053bfc51 100644
--- a/test/CodeGen/ARM/fnmuls.ll
+++ b/test/CodeGen/ARM/fnmuls.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s
define arm_aapcs_vfpcc float @test1(float %a, float %b) nounwind {
; CHECK: vnmul.f32 s0, s0, s1
diff --git a/test/CodeGen/ARM/fold-const.ll b/test/CodeGen/ARM/fold-const.ll
index 1ba561dd70b0..dc5419f24e2a 100644
--- a/test/CodeGen/ARM/fold-const.ll
+++ b/test/CodeGen/ARM/fold-const.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v7 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v7 %s -o - | FileCheck %s
define i32 @f(i32 %a) nounwind readnone optsize ssp {
entry:
diff --git a/test/CodeGen/ARM/fold-stack-adjust.ll b/test/CodeGen/ARM/fold-stack-adjust.ll
index 67fd129fd1c9..eb0120f7c1bb 100644
--- a/test/CodeGen/ARM/fold-stack-adjust.ll
+++ b/test/CodeGen/ARM/fold-stack-adjust.ll
@@ -1,6 +1,7 @@
-; RUN: llc -mtriple=thumbv7-apple-darwin-eabi < %s | FileCheck %s
-; RUN: llc -mtriple=thumbv6m-apple-darwin-eabi -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-T1
+; RUN: llc -mtriple=thumbv7-apple-none-macho < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-apple-none-macho -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-T1
; RUN: llc -mtriple=thumbv7-apple-darwin-ios -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-IOS
+; RUN: llc -mtriple=thumbv7--linux-gnueabi -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-LINUX
declare void @bar(i8*)
@@ -92,16 +93,16 @@ define void @check_vfp_fold() minsize {
; folded in except that doing so would clobber the value being returned.
define i64 @check_no_return_clobber() minsize {
; CHECK-LABEL: check_no_return_clobber:
-; CHECK: push.w {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK: push.w {r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NOT: sub sp,
; ...
-; CHECK: add sp, #40
+; CHECK: add sp, #24
; CHECK: pop.w {r11, pc}
; Just to keep iOS FileCheck within previous function:
; CHECK-IOS-LABEL: check_no_return_clobber:
- %var = alloca i8, i32 40
+ %var = alloca i8, i32 20
call void @bar(i8* %var)
ret i64 0
}
@@ -161,4 +162,57 @@ end:
; We want the epilogue to be the only thing in a basic block so that we hit
; the correct edge-case (first inst in block is correct one to adjust).
ret void
-} \ No newline at end of file
+}
+
+define void @test_varsize(...) minsize {
+; CHECK-T1-LABEL: test_varsize:
+; CHECK-T1: sub sp, #16
+; CHECK-T1: push {r2, r3, r4, r5, r7, lr}
+; ...
+; CHECK-T1: pop {r2, r3, r4, r5, r7}
+; CHECK-T1: pop {r3}
+; CHECK-T1: add sp, #16
+; CHECK-T1: bx r3
+
+; CHECK-LABEL: test_varsize:
+; CHECK: sub sp, #16
+; CHECK: push.w {r9, r10, r11, lr}
+; ...
+; CHECK: pop.w {r2, r3, r11, lr}
+; CHECK: add sp, #16
+; CHECK: bx lr
+
+ %var = alloca i8, i32 8
+ call void @bar(i8* %var)
+ ret void
+}
+
+%"MyClass" = type { i8*, i32, i32, float, float, float, [2 x i8], i32, i32* }
+
+declare float @foo()
+
+declare void @bar3()
+
+declare %"MyClass"* @bar2(%"MyClass"* returned, i16*, i32, float, float, i32, i32, i1 zeroext, i1 zeroext, i32)
+
+define fastcc float @check_vfp_no_return_clobber2(i16* %r, i16* %chars, i32 %length, i1 zeroext %flag) minsize {
+entry:
+; CHECK-LINUX-LABEL: check_vfp_no_return_clobber2
+; CHECK-LINUX: vpush {d0, d1, d2, d3, d4, d5, d6, d7, d8}
+; CHECK-NOT: sub sp,
+; ...
+; CHECK-LINUX: add sp
+; CHECK-LINUX: vpop {d8}
+ %run = alloca %"MyClass", align 4
+ %call = call %"MyClass"* @bar2(%"MyClass"* %run, i16* %chars, i32 %length, float 0.000000e+00, float 0.000000e+00, i32 1, i32 1, i1 zeroext false, i1 zeroext true, i32 3)
+ %call1 = call float @foo()
+ %cmp = icmp eq %"MyClass"* %run, null
+ br i1 %cmp, label %exit, label %if.then
+
+if.then: ; preds = %entry
+ call void @bar3()
+ br label %exit
+
+exit: ; preds = %if.then, %entry
+ ret float %call1
+}
diff --git a/test/CodeGen/ARM/formal.ll b/test/CodeGen/ARM/formal.ll
index 4ac10badea97..05a6be1075a8 100644
--- a/test/CodeGen/ARM/formal.ll
+++ b/test/CodeGen/ARM/formal.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
declare void @bar(i64 %x, i64 %y)
diff --git a/test/CodeGen/ARM/fp-arg-shuffle.ll b/test/CodeGen/ARM/fp-arg-shuffle.ll
index ae02b792e4d6..4996cc8ecbf0 100644
--- a/test/CodeGen/ARM/fp-arg-shuffle.ll
+++ b/test/CodeGen/ARM/fp-arg-shuffle.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon -float-abi=soft | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -float-abi=soft %s -o - | FileCheck %s
; CHECK: function1
; CHECK-NOT: vmov
diff --git a/test/CodeGen/ARM/fp-fast.ll b/test/CodeGen/ARM/fp-fast.ll
index ec5718738177..7d95a5efe905 100644
--- a/test/CodeGen/ARM/fp-fast.ll
+++ b/test/CodeGen/ARM/fp-fast.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=arm -mcpu=cortex-a9 -mattr=+vfp4 -enable-unsafe-fp-math < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 -mattr=+vfp4 -enable-unsafe-fp-math %s -o - \
+; RUN: | FileCheck %s
; CHECK: test1
define float @test1(float %x) {
diff --git a/test/CodeGen/ARM/fp.ll b/test/CodeGen/ARM/fp.ll
index fbf3a4a56ad5..7e1f000e88d9 100644
--- a/test/CodeGen/ARM/fp.ll
+++ b/test/CodeGen/ARM/fp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+vfp2 %s -o - | FileCheck %s
define float @f(i32 %a) {
;CHECK-LABEL: f:
diff --git a/test/CodeGen/ARM/fp16.ll b/test/CodeGen/ARM/fp16.ll
index a5c1aed277bb..d3f32556a093 100644
--- a/test/CodeGen/ARM/fp16.ll
+++ b/test/CodeGen/ARM/fp16.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s | FileCheck %s
; RUN: llc -mattr=+vfp3,+fp16 < %s | FileCheck --check-prefix=CHECK-FP16 %s
+; RUN: llc -mtriple=armv8-eabi < %s | FileCheck --check-prefix=CHECK-ARMV8 %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32"
target triple = "armv7-eabi"
@@ -9,24 +10,62 @@ target triple = "armv7-eabi"
define arm_aapcs_vfpcc void @foo() nounwind {
; CHECK-LABEL: foo:
-; CHECK-FP6-LABEL: foo:
+; CHECK-FP16-LABEL: foo:
+; CHECK-ARMV8-LABEL: foo:
entry:
%0 = load i16* @x, align 2
%1 = load i16* @y, align 2
- %2 = tail call float @llvm.convert.from.fp16(i16 %0)
+ %2 = tail call float @llvm.convert.from.fp16.f32(i16 %0)
; CHECK: __gnu_h2f_ieee
; CHECK-FP16: vcvtb.f32.f16
- %3 = tail call float @llvm.convert.from.fp16(i16 %1)
+; CHECK-ARMv8: vcvtb.f32.f16
+ %3 = tail call float @llvm.convert.from.fp16.f32(i16 %1)
; CHECK: __gnu_h2f_ieee
; CHECK-FP16: vcvtb.f32.f16
+; CHECK-ARMV8: vcvtb.f32.f16
%4 = fadd float %2, %3
- %5 = tail call i16 @llvm.convert.to.fp16(float %4)
+ %5 = tail call i16 @llvm.convert.to.fp16.f32(float %4)
; CHECK: __gnu_f2h_ieee
; CHECK-FP16: vcvtb.f16.f32
+; CHECK-ARMV8: vcvtb.f16.f32
store i16 %5, i16* @x, align 2
ret void
}
-declare float @llvm.convert.from.fp16(i16) nounwind readnone
+define arm_aapcs_vfpcc double @test_from_fp16(i16 %in) {
+; CHECK-LABEL: test_from_fp16:
+; CHECK-FP-LABEL: test_from_fp16:
+; CHECK-ARMV8-LABEL: test_from_fp16:
+ %val = call double @llvm.convert.from.fp16.f64(i16 %in)
+; CHECK: bl __gnu_h2f_ieee
+; CHECK: vmov [[TMP:s[0-9]+]], r0
+; CHECK: vcvt.f64.f32 d0, [[TMP]]
-declare i16 @llvm.convert.to.fp16(float) nounwind readnone
+; CHECK-FP16: vmov [[TMP16:s[0-9]+]], r0
+; CHECK-FP16: vcvtb.f32.f16 [[TMP32:s[0-9]+]], [[TMP16]]
+; CHECK-FP16: vcvt.f64.f32 d0, [[TMP32]]
+
+; CHECK-ARMV8: vmov [[TMP:s[0-9]+]], r0
+; CHECK-ARMV8: vcvtb.f64.f16 d0, [[TMP]]
+ ret double %val
+}
+
+define arm_aapcs_vfpcc i16 @test_to_fp16(double %in) {
+; CHECK-LABEL: test_to_fp16:
+; CHECK-FP-LABEL: test_to_fp16:
+; CHECK-ARMV8-LABEL: test_to_fp16:
+ %val = call i16 @llvm.convert.to.fp16.f64(double %in)
+; CHECK: bl __truncdfhf2
+
+; CHECK-FP16: bl __truncdfhf2
+
+; CHECK-ARMV8: vcvtb.f16.f64 [[TMP:s[0-9]+]], d0
+; CHECK-ARMV8: vmov r0, [[TMP]]
+ ret i16 %val
+}
+
+declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
+declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
+
+declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
+declare i16 @llvm.convert.to.fp16.f64(double) nounwind readnone
diff --git a/test/CodeGen/ARM/fp_convert.ll b/test/CodeGen/ARM/fp_convert.ll
index f0d910052a4d..6f4707573fb5 100644
--- a/test/CodeGen/ARM/fp_convert.ll
+++ b/test/CodeGen/ARM/fp_convert.ll
@@ -1,9 +1,20 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -mtriple=arm-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=VFP2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
+
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \
+; RUN: | FileCheck %s -check-prefix=NEON
+
+; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=NEON
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
define i32 @test1(float %a, float %b) {
; VFP2-LABEL: test1:
diff --git a/test/CodeGen/ARM/fpcmp-opt.ll b/test/CodeGen/ARM/fpcmp-opt.ll
index 3a0af16bf6d6..eab5988e3eb4 100644
--- a/test/CodeGen/ARM/fpcmp-opt.ll
+++ b/test/CodeGen/ARM/fpcmp-opt.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 -mattr=+vfp2 -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 -mattr=+vfp2 -enable-unsafe-fp-math %s -o - \
+; RUN: | FileCheck %s
+
; rdar://7461510
; rdar://10964603
diff --git a/test/CodeGen/ARM/fpcmp.ll b/test/CodeGen/ARM/fpcmp.ll
index 916a1ae4952a..e3ffd45a396d 100644
--- a/test/CodeGen/ARM/fpcmp.ll
+++ b/test/CodeGen/ARM/fpcmp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s
define i32 @f1(float %a) {
;CHECK-LABEL: f1:
diff --git a/test/CodeGen/ARM/fpconsts.ll b/test/CodeGen/ARM/fpconsts.ll
index 0679a47ded7b..5a45a9bd2ba5 100644
--- a/test/CodeGen/ARM/fpconsts.ll
+++ b/test/CodeGen/ARM/fpconsts.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp3 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp3 %s -o - | FileCheck %s
define float @t1(float %x) nounwind readnone optsize {
entry:
diff --git a/test/CodeGen/ARM/fpconv.ll b/test/CodeGen/ARM/fpconv.ll
index 326e0628b4e5..eadf9afd4764 100644
--- a/test/CodeGen/ARM/fpconv.ll
+++ b/test/CodeGen/ARM/fpconv.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s --check-prefix=CHECK-VFP
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefix=CHECK-VFP
+; RUN: llc -mtriple=arm-apple-darwin %s -o - | FileCheck %s
define float @f1(double %x) {
;CHECK-VFP-LABEL: f1:
diff --git a/test/CodeGen/ARM/fpmem.ll b/test/CodeGen/ARM/fpmem.ll
index 8fbd1d805840..3a454ed9631c 100644
--- a/test/CodeGen/ARM/fpmem.ll
+++ b/test/CodeGen/ARM/fpmem.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
+; RUN: llc -mtriple=arm -float-abi=soft -mattr=+vfp2 %s -o - | FileCheck %s
define float @f1(float %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/ARM/fpow.ll b/test/CodeGen/ARM/fpow.ll
index 6d487927ee61..3e37724d1c57 100644
--- a/test/CodeGen/ARM/fpow.ll
+++ b/test/CodeGen/ARM/fpow.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define double @t(double %x, double %y) nounwind optsize {
entry:
diff --git a/test/CodeGen/ARM/fptoint.ll b/test/CodeGen/ARM/fptoint.ll
index 740868725e90..f50d0b96fe99 100644
--- a/test/CodeGen/ARM/fptoint.ll
+++ b/test/CodeGen/ARM/fptoint.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -arm-atomic-cfg-tidy=0 -mattr=+v6,+vfp2 %s -o - | FileCheck %s
@i = weak global i32 0 ; <i32*> [#uses=2]
@u = weak global i32 0 ; <i32*> [#uses=2]
diff --git a/test/CodeGen/ARM/frame-register.ll b/test/CodeGen/ARM/frame-register.ll
new file mode 100644
index 000000000000..e6a55bddaf1c
--- /dev/null
+++ b/test/CodeGen/ARM/frame-register.ll
@@ -0,0 +1,38 @@
+; RUN: llc -mtriple arm-eabi -disable-fp-elim -filetype asm -o - %s \
+; RUN: | FileCheck -check-prefix CHECK-ARM %s
+
+; RUN: llc -mtriple thumb-eabi -disable-fp-elim -filetype asm -o - %s \
+; RUN: | FileCheck -check-prefix CHECK-THUMB %s
+
+; RUN: llc -mtriple arm-darwin -disable-fp-elim -filetype asm -o - %s \
+; RUN: | FileCheck -check-prefix CHECK-DARWIN-ARM %s
+
+; RUN: llc -mtriple thumb-darwin -disable-fp-elim -filetype asm -o - %s \
+; RUN: | FileCheck -check-prefix CHECK-DARWIN-THUMB %s
+
+declare void @callee(i32)
+
+define i32 @calleer(i32 %i) {
+entry:
+ %i.addr = alloca i32, align 4
+ %j = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32* %i.addr, align 4
+ %add = add nsw i32 %0, 1
+ store i32 %add, i32* %j, align 4
+ %1 = load i32* %j, align 4
+ call void @callee(i32 %1)
+ %2 = load i32* %j, align 4
+ %add1 = add nsw i32 %2, 1
+ ret i32 %add1
+}
+
+; CHECK-ARM: push {r11, lr}
+; CHECK-ARM: mov r11, sp
+
+; CHECK-THUMB: push {r4, r6, r7, lr}
+; CHECK-THUMB: add r7, sp, #8
+
+; CHECK-DARWIN-ARM: push {r7, lr}
+; CHECK-DARWIN-THUMB: push {r4, r7, lr}
+
diff --git a/test/CodeGen/ARM/fsubs.ll b/test/CodeGen/ARM/fsubs.ll
index 617b01881a2e..baff34ab31fc 100644
--- a/test/CodeGen/ARM/fsubs.ll
+++ b/test/CodeGen/ARM/fsubs.ll
@@ -1,8 +1,17 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=NFP1U
-; RUN: llc < %s -mtriple=arm-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=NFP1U
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NFP0
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
+; RUN: | FileCheck %s -check-prefix=VFP2
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=NFP1
+
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \
+; RUN: | FileCheck %s -check-prefix=NFP1U
+
+; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=NFP1U
+
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - \
+; RUN: | FileCheck %s -check-prefix=NFP0
define float @test(float %a, float %b) {
entry:
diff --git a/test/CodeGen/ARM/func-argpassing-endian.ll b/test/CodeGen/ARM/func-argpassing-endian.ll
new file mode 100644
index 000000000000..26f0597a05a7
--- /dev/null
+++ b/test/CodeGen/ARM/func-argpassing-endian.ll
@@ -0,0 +1,122 @@
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm-eabi -mattr=v7,neon | FileCheck --check-prefix=CHECK --check-prefix=CHECK-LE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=armeb-eabi -mattr=v7,neon | FileCheck --check-prefix=CHECK --check-prefix=CHECK-BE %s
+
+@var32 = global i32 0
+@vardouble = global double 0.0
+
+define void @arg_longint( i64 %val ) {
+; CHECK-LABEL: arg_longint:
+; CHECK-LE: str r0, [r1]
+; CHECK-BE: str r1, [r0]
+ %tmp = trunc i64 %val to i32
+ store i32 %tmp, i32* @var32
+ ret void
+}
+
+define void @arg_double( double %val ) {
+; CHECK-LABEL: arg_double:
+; CHECK: strd r0, r1, [r2]
+ store double %val, double* @vardouble
+ ret void
+}
+
+define void @arg_v4i32(<4 x i32> %vec ) {
+; CHECK-LABEL: arg_v4i32:
+; CHECK-LE: vmov {{d[0-9]+}}, r2, r3
+; CHECK-LE: vmov [[ARG_V4I32_REG:d[0-9]+]], r0, r1
+; CHECK-BE: vmov {{d[0-9]+}}, r3, r2
+; CHECK-BE: vmov [[ARG_V4I32_REG:d[0-9]+]], r1, r0
+; CHECK: vst1.32 {[[ARG_V4I32_REG]][0]}, [r0:32]
+ %tmp = extractelement <4 x i32> %vec, i32 0
+ store i32 %tmp, i32* @var32
+ ret void
+}
+
+define void @arg_v2f64(<2 x double> %vec ) {
+; CHECK-LABEL: arg_v2f64:
+; CHECK: strd r0, r1, [r2]
+ %tmp = extractelement <2 x double> %vec, i32 0
+ store double %tmp, double* @vardouble
+ ret void
+}
+
+define i64 @return_longint() {
+; CHECK-LABEL: return_longint:
+; CHECK-LE: mov r0, #42
+; CHECK-LE: mov r1, #0
+; CHECK-BE: mov r0, #0
+; CHECK-BE: mov r1, #42
+ ret i64 42
+}
+
+define double @return_double() {
+; CHECK-LABEL: return_double:
+; CHECK-LE: vmov r0, r1, {{d[0-9]+}}
+; CHECK-BE: vmov r1, r0, {{d[0-9]+}}
+ ret double 1.0
+}
+
+define <4 x i32> @return_v4i32() {
+; CHECK-LABEL: return_v4i32:
+; CHECK-LE: vmov r0, r1, {{d[0-9]+}}
+; CHECK-LE: vmov r2, r3, {{d[0-9]+}}
+; CHECK-BE: vmov r1, r0, {{d[0-9]+}}
+; CHECK-BE: vmov r3, r2, {{d[0-9]+}}
+ ret < 4 x i32> < i32 42, i32 43, i32 44, i32 45 >
+}
+
+define <2 x double> @return_v2f64() {
+; CHECK-LABEL: return_v2f64:
+; CHECK-LE: vmov r0, r1, {{d[0-9]+}}
+; CHECK-LE: vmov r2, r3, {{d[0-9]+}}
+; CHECK-BE: vmov r1, r0, {{d[0-9]+}}
+; CHECK-BE: vmov r3, r2, {{d[0-9]+}}
+ ret <2 x double> < double 3.14, double 6.28 >
+}
+
+define void @caller_arg_longint() {
+; CHECK-LABEL: caller_arg_longint:
+; CHECK-LE: mov r0, #42
+; CHECK-LE: mov r1, #0
+; CHECK-BE: mov r0, #0
+; CHECK-BE: mov r1, #42
+ call void @arg_longint( i64 42 )
+ ret void
+}
+
+define void @caller_arg_double() {
+; CHECK-LABEL: caller_arg_double:
+; CHECK-LE: vmov r0, r1, {{d[0-9]+}}
+; CHECK-BE: vmov r1, r0, {{d[0-9]+}}
+ call void @arg_double( double 1.0 )
+ ret void
+}
+
+define void @caller_return_longint() {
+; CHECK-LABEL: caller_return_longint:
+; CHECK-LE: str r0, [r1]
+; CHECK-BE: str r1, [r0]
+ %val = call i64 @return_longint()
+ %tmp = trunc i64 %val to i32
+ store i32 %tmp, i32* @var32
+ ret void
+}
+
+define void @caller_return_double() {
+; CHECK-LABEL: caller_return_double:
+; CHECK-LE: vmov {{d[0-9]+}}, r0, r1
+; CHECK-BE: vmov {{d[0-9]+}}, r1, r0
+ %val = call double @return_double( )
+ %tmp = fadd double %val, 3.14
+ store double %tmp, double* @vardouble
+ ret void
+}
+
+define void @caller_return_v2f64() {
+; CHECK-LABEL: caller_return_v2f64:
+; CHECK: strd r0, r1, [r2]
+ %val = call <2 x double> @return_v2f64( )
+ %tmp = extractelement <2 x double> %val, i32 0
+ store double %tmp, double* @vardouble
+ ret void
+}
diff --git a/test/CodeGen/ARM/global-merge-1.ll b/test/CodeGen/ARM/global-merge-1.ll
new file mode 100644
index 000000000000..341597e6188c
--- /dev/null
+++ b/test/CodeGen/ARM/global-merge-1.ll
@@ -0,0 +1,85 @@
+; RUN: llc %s -O0 -o - | FileCheck -check-prefix=NO-MERGE %s
+; RUN: llc %s -O0 -o - -enable-global-merge=false | FileCheck -check-prefix=NO-MERGE %s
+; RUN: llc %s -O0 -o - -enable-global-merge=true | FileCheck -check-prefix=NO-MERGE %s
+; RUN: llc %s -O1 -o - | FileCheck -check-prefix=MERGE %s
+; RUN: llc %s -O1 -o - -enable-global-merge=false | FileCheck -check-prefix=NO-MERGE %s
+; RUN: llc %s -O1 -o - -enable-global-merge=true | FileCheck -check-prefix=MERGE %s
+
+; MERGE-NOT: .zerofill __DATA,__bss,_bar,20,2
+; MERGE-NOT: .zerofill __DATA,__bss,_baz,20,2
+; MERGE-NOT: .zerofill __DATA,__bss,_foo,20,2
+; MERGE: .zerofill __DATA,__bss,__MergedGlobals,60,4
+; MERGE-NOT: .zerofill __DATA,__bss,_bar,20,2
+; MERGE-NOT: .zerofill __DATA,__bss,_baz,20,2
+; MERGE-NOT: .zerofill __DATA,__bss,_foo,20,2
+
+; NO-MERGE-NOT: .zerofill __DATA,__bss,__MergedGlobals,60,4
+; NO-MERGE: .zerofill __DATA,__bss,_bar,20,2
+; NO-MERGE: .zerofill __DATA,__bss,_baz,20,2
+; NO-MERGE: .zerofill __DATA,__bss,_foo,20,2
+; NO-MERGE-NOT: .zerofill __DATA,__bss,__MergedGlobals,60,4
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+@bar = internal global [5 x i32] zeroinitializer, align 4
+@baz = internal global [5 x i32] zeroinitializer, align 4
+@foo = internal global [5 x i32] zeroinitializer, align 4
+
+; Function Attrs: nounwind ssp
+define internal void @initialize() #0 {
+ %1 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %1, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 0), align 4, !tbaa !1
+ %2 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %2, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 0), align 4, !tbaa !1
+ %3 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %3, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 1), align 4, !tbaa !1
+ %4 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %4, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 1), align 4, !tbaa !1
+ %5 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %5, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 2), align 4, !tbaa !1
+ %6 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %6, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 2), align 4, !tbaa !1
+ %7 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %7, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 3), align 4, !tbaa !1
+ %8 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %8, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 3), align 4, !tbaa !1
+ %9 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %9, i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 4), align 4, !tbaa !1
+ %10 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #3
+ store i32 %10, i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 4), align 4, !tbaa !1
+ ret void
+}
+
+declare i32 @calc(...) #1
+
+; Function Attrs: nounwind ssp
+define internal void @calculate() #0 {
+ %1 = load <4 x i32>* bitcast ([5 x i32]* @bar to <4 x i32>*), align 4
+ %2 = load <4 x i32>* bitcast ([5 x i32]* @baz to <4 x i32>*), align 4
+ %3 = mul <4 x i32> %2, %1
+ store <4 x i32> %3, <4 x i32>* bitcast ([5 x i32]* @foo to <4 x i32>*), align 4
+ %4 = load i32* getelementptr inbounds ([5 x i32]* @bar, i32 0, i32 4), align 4, !tbaa !1
+ %5 = load i32* getelementptr inbounds ([5 x i32]* @baz, i32 0, i32 4), align 4, !tbaa !1
+ %6 = mul nsw i32 %5, %4
+ store i32 %6, i32* getelementptr inbounds ([5 x i32]* @foo, i32 0, i32 4), align 4, !tbaa !1
+ ret void
+}
+
+; Function Attrs: nounwind readnone ssp
+define internal i32* @returnFoo() #2 {
+ ret i32* getelementptr inbounds ([5 x i32]* @foo, i32 0, i32 0)
+}
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"LLVM version 3.4 "}
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"int", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/ARM/half.ll b/test/CodeGen/ARM/half.ll
new file mode 100644
index 000000000000..10cebb38c565
--- /dev/null
+++ b/test/CodeGen/ARM/half.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-OLD
+; RUN: llc < %s -mtriple=thumbv7s-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-F16
+; RUN: llc < %s -mtriple=thumbv8-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8
+
+define void @test_load_store(half* %in, half* %out) {
+; CHECK-LABEL: test_load_store:
+; CHECK: ldrh [[TMP:r[0-9]+]], [r0]
+; CHECK: strh [[TMP]], [r1]
+ %val = load half* %in
+ store half %val, half* %out
+ ret void
+}
+
+define i16 @test_bitcast_from_half(half* %addr) {
+; CHECK-LABEL: test_bitcast_from_half:
+; CHECK: ldrh r0, [r0]
+ %val = load half* %addr
+ %val_int = bitcast half %val to i16
+ ret i16 %val_int
+}
+
+define void @test_bitcast_to_half(half* %addr, i16 %in) {
+; CHECK-LABEL: test_bitcast_to_half:
+; CHECK: strh r1, [r0]
+ %val_fp = bitcast i16 %in to half
+ store half %val_fp, half* %addr
+ ret void
+}
+
+define float @test_extend32(half* %addr) {
+; CHECK-LABEL: test_extend32:
+
+; CHECK-OLD: b.w ___gnu_h2f_ieee
+; CHECK-F16: vcvtb.f32.f16
+; CHECK-V8: vcvtb.f32.f16
+ %val16 = load half* %addr
+ %val32 = fpext half %val16 to float
+ ret float %val32
+}
+
+define double @test_extend64(half* %addr) {
+; CHECK-LABEL: test_extend64:
+
+; CHECK-OLD: blx ___gnu_h2f_ieee
+; CHECK-OLD: vcvt.f64.f32
+; CHECK-F16: vcvtb.f32.f16
+; CHECK-F16: vcvt.f64.f32
+; CHECK-V8: vcvtb.f64.f16
+ %val16 = load half* %addr
+ %val32 = fpext half %val16 to double
+ ret double %val32
+}
+
+define void @test_trunc32(float %in, half* %addr) {
+; CHECK-LABEL: test_trunc32:
+
+; CHECK-OLD: blx ___gnu_f2h_ieee
+; CHECK-F16: vcvtb.f16.f32
+; CHECK-V8: vcvtb.f16.f32
+ %val16 = fptrunc float %in to half
+ store half %val16, half* %addr
+ ret void
+}
+
+define void @test_trunc64(double %in, half* %addr) {
+; CHECK-LABEL: test_trunc64:
+
+; CHECK-OLD: blx ___truncdfhf2
+; CHECK-F16: blx ___truncdfhf2
+; CHECK-V8: vcvtb.f16.f64
+ %val16 = fptrunc double %in to half
+ store half %val16, half* %addr
+ ret void
+}
diff --git a/test/CodeGen/ARM/hello.ll b/test/CodeGen/ARM/hello.ll
index 893b4266ac3c..d2685854dba0 100644
--- a/test/CodeGen/ARM/hello.ll
+++ b/test/CodeGen/ARM/hello.ll
@@ -1,8 +1,11 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi | grep mov | count 1
-; RUN: llc < %s -mtriple=armv6-linux-gnu --disable-fp-elim | \
-; RUN: grep mov | count 2
-; RUN: llc < %s -mtriple=armv6-apple-ios | grep mov | count 2
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
+; RUN: llc -mtriple=armv6-linux-gnueabi %s -o - | FileCheck %s
+
+; RUN: llc -mtriple=armv6-linux-gnu --disable-fp-elim %s -o - \
+; RUN: | FileCheck %s -check-prefix CHECK-FP-ELIM
+
+; RUN: llc -mtriple=armv6-apple-ios %s -o - \
+; RUN: | FileCheck %s -check-prefix CHECK-FP-ELIM
@str = internal constant [12 x i8] c"Hello World\00"
@@ -12,3 +15,11 @@ define i32 @main() {
}
declare i32 @puts(i8*)
+
+; CHECK: mov
+; CHECK-NOT: mov
+
+; CHECK-FP-ELIM: mov
+; CHECK-FP-ELIM: mov
+; CHECK-FP-ELIM-NOT: mov
+
diff --git a/test/CodeGen/ARM/hfa-in-contiguous-registers.ll b/test/CodeGen/ARM/hfa-in-contiguous-registers.ll
new file mode 100644
index 000000000000..f9ec6e0c645a
--- /dev/null
+++ b/test/CodeGen/ARM/hfa-in-contiguous-registers.ll
@@ -0,0 +1,94 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7-none--gnueabihf"
+
+%struct.s = type { float, float }
+%union.t = type { [4 x float] }
+
+; Equivalent C code:
+; struct s { float a; float b; };
+; float foo(float a, double b, struct s c) { return c.a; }
+; Argument allocation:
+; a -> s0
+; b -> d1
+; c -> s4, s5
+; s1 is unused
+; return in s0
+define float @test1(float %a, double %b, %struct.s %c) {
+entry:
+; CHECK-LABEL: test1
+; CHECK: vmov.f32 s0, s4
+; CHECK-NOT: vmov.f32 s0, s1
+
+ %result = extractvalue %struct.s %c, 0
+ ret float %result
+}
+
+; Equivalent C code:
+; union t { float a[4] };
+; float foo(float a, double b, union s c) { return c.a[0]; }
+; Argument allocation:
+; a -> s0
+; b -> d1
+; c -> s4..s7
+define float @test2(float %a, double %b, %union.t %c) #0 {
+entry:
+; CHECK-LABEL: test2
+; CHECK: vmov.f32 s0, s4
+; CHECK-NOT: vmov.f32 s0, s1
+
+ %result = extractvalue %union.t %c, 0, 0
+ ret float %result
+}
+
+; Equivalent C code:
+; struct s { float a; float b; };
+; float foo(float a, double b, struct s c, float d) { return d; }
+; Argument allocation:
+; a -> s0
+; b -> d1
+; c -> s4, s5
+; d -> s1
+; return in s0
+define float @test3(float %a, double %b, %struct.s %c, float %d) {
+entry:
+; CHECK-LABEL: test3
+; CHECK: vmov.f32 s0, s1
+; CHECK-NOT: vmov.f32 s0, s5
+
+ ret float %d
+}
+
+; Equivalent C code:
+; struct s { float a; float b; };
+; float foo(struct s a, struct s b) { return b.b; }
+; Argument allocation:
+; a -> s0, s1
+; b -> s2, s3
+; return in s0
+define float @test4(%struct.s %a, %struct.s %b) {
+entry:
+; CHECK-LABEL: test4
+; CHECK: vmov.f32 s0, s3
+
+ %result = extractvalue %struct.s %b, 1
+ ret float %result
+}
+
+; Equivalent C code:
+; struct s { float a; float b; };
+; float foo(struct s a, float b, struct s c) { return c.a; }
+; Argument allocation:
+; a -> s0, s1
+; b -> s2
+; c -> s3, s4
+; return in s0
+define float @test5(%struct.s %a, float %b, %struct.s %c) {
+entry:
+; CHECK-LABEL: test5
+; CHECK: vmov.f32 s0, s3
+
+ %result = extractvalue %struct.s %c, 0
+ ret float %result
+}
diff --git a/test/CodeGen/ARM/hints.ll b/test/CodeGen/ARM/hints.ll
new file mode 100644
index 000000000000..18abbbecaaf3
--- /dev/null
+++ b/test/CodeGen/ARM/hints.ll
@@ -0,0 +1,69 @@
+; RUN: llc -mtriple armv7-eabi -o - %s | FileCheck %s
+; RUN: llc -mtriple thumbv6m-eabi -o - %s | FileCheck %s
+; RUN: llc -mtriple thumbv7-eabi -o - %s | FileCheck %s
+
+declare void @llvm.arm.hint(i32) nounwind
+
+define void @hint_nop() {
+entry:
+ tail call void @llvm.arm.hint(i32 0) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_nop
+; CHECK: nop
+
+define void @hint_yield() {
+entry:
+ tail call void @llvm.arm.hint(i32 1) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_yield
+; CHECK: yield
+
+define void @hint_wfe() {
+entry:
+ tail call void @llvm.arm.hint(i32 2) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_wfe
+; CHECK: wfe
+
+define void @hint_wfi() {
+entry:
+ tail call void @llvm.arm.hint(i32 3) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_wfi
+; CHECK: wfi
+
+define void @hint_sev() {
+entry:
+ tail call void @llvm.arm.hint(i32 4) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_sev
+; CHECK: sev
+
+define void @hint_sevl() {
+entry:
+ tail call void @llvm.arm.hint(i32 5) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_sevl
+; CHECK: hint #5
+
+define void @hint_undefined() {
+entry:
+ tail call void @llvm.arm.hint(i32 8) nounwind
+ ret void
+}
+
+; CHECK-LABEL: hint_undefined
+; CHECK: hint #8
+
diff --git a/test/CodeGen/ARM/iabs.ll b/test/CodeGen/ARM/iabs.ll
index 600a8c29ea91..c52caf605dd1 100644
--- a/test/CodeGen/ARM/iabs.ll
+++ b/test/CodeGen/ARM/iabs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v4t | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s
;; Integer absolute value, should produce something as good as: ARM:
;; movs r0, r0
diff --git a/test/CodeGen/ARM/ifconv-kills.ll b/test/CodeGen/ARM/ifconv-kills.ll
index bf54ba2f730c..de80c927cea5 100644
--- a/test/CodeGen/ARM/ifconv-kills.ll
+++ b/test/CodeGen/ARM/ifconv-kills.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march arm -mcpu swift -verify-machineinstrs
+; RUN: llc -mtriple arm-eabi -mcpu swift -verify-machineinstrs %s -o /dev/null
declare i32 @f(i32 %p0, i32 %p1)
diff --git a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll
new file mode 100644
index 000000000000..5d8e477d681e
--- /dev/null
+++ b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll
@@ -0,0 +1,63 @@
+; RUN: llc < %s -mtriple=armv4t--linux-androideabi -print-machineinstrs=if-converter -o /dev/null 2>&1 | FileCheck %s
+; Fix a bug triggered in IfConverterTriangle when CvtBB has multiple
+; predecessors.
+; PR18752
+
+%classK = type { i8, %classF }
+%classF = type { i8 }
+%classL = type { %classG, i32, i32 }
+%classG = type { %classL* }
+%classM2 = type { %classL }
+
+define zeroext i1 @test(%classK* %this, %classM2* nocapture readnone %p1, %classM2* nocapture readnone %p2) align 2 {
+entry:
+ br i1 undef, label %for.end, label %for.body
+
+; Before if conversion, we have
+; for.body -> lor.lhs.false.i (62)
+; -> for.cond.backedge (62)
+; lor.lhs.false.i -> for.cond.backedge (1048575)
+; -> cond.false.i (1)
+; Afer if conversion, we have
+; for.body -> for.cond.backedge (130023362)
+; -> cond.false.i (62)
+; CHECK: BB#1: derived from LLVM BB %for.body
+; CHECK: Successors according to CFG: BB#2(130023362) BB#4(62)
+for.body:
+ br i1 undef, label %for.cond.backedge, label %lor.lhs.false.i, !prof !1
+
+for.cond.backedge:
+ %tobool = icmp eq %classL* undef, null
+ br i1 %tobool, label %for.end, label %for.body
+
+lor.lhs.false.i:
+ %tobool.i.i7 = icmp eq i32 undef, 0
+ br i1 %tobool.i.i7, label %for.cond.backedge, label %cond.false.i
+
+cond.false.i:
+ call void @_Z3fn1v()
+ unreachable
+
+for.end:
+ br i1 undef, label %if.else.i.i, label %if.then.i.i
+
+if.then.i.i:
+ store %classL* null, %classL** undef, align 4
+ br label %_ZN1M6spliceEv.exit
+
+if.else.i.i:
+ store %classL* null, %classL** null, align 4
+ br label %_ZN1M6spliceEv.exit
+
+_ZN1M6spliceEv.exit:
+ %LIS = getelementptr inbounds %classK* %this, i32 0, i32 1
+ call void @_ZN1F10handleMoveEb(%classF* %LIS, i1 zeroext false)
+ unreachable
+}
+
+declare %classL* @_ZN1M1JI1LS1_EcvPS1_Ev(%classM2*)
+declare void @_ZN1F10handleMoveEb(%classF*, i1 zeroext)
+declare void @_Z3fn1v()
+
+!0 = metadata !{metadata !"clang version 3.5"}
+!1 = metadata !{metadata !"branch_weights", i32 62, i32 62}
diff --git a/test/CodeGen/ARM/ifcvt-branch-weight.ll b/test/CodeGen/ARM/ifcvt-branch-weight.ll
new file mode 100644
index 000000000000..a994d3d01ae8
--- /dev/null
+++ b/test/CodeGen/ARM/ifcvt-branch-weight.ll
@@ -0,0 +1,42 @@
+; RUN: llc < %s -mtriple=thumbv8 -print-machineinstrs=if-converter -arm-atomic-cfg-tidy=0 -o /dev/null 2>&1 | FileCheck %s
+
+%struct.S = type { i8* (i8*)*, [1 x i8] }
+define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
+entry:
+ %0 = getelementptr inbounds %struct.S* %x, i32 0, i32 1, i32 0
+ %1 = load i8* %0, align 1
+ %2 = zext i8 %1 to i32
+ %3 = and i32 %2, 112
+ %4 = icmp eq i32 %3, 0
+ br i1 %4, label %return, label %bb
+
+bb:
+ %5 = getelementptr inbounds %struct.S* %y, i32 0, i32 1, i32 0
+ %6 = load i8* %5, align 1
+ %7 = zext i8 %6 to i32
+ %8 = and i32 %7, 112
+ %9 = icmp eq i32 %8, 0
+ br i1 %9, label %return, label %bb2
+
+; CHECK: BB#2: derived from LLVM BB %bb2
+; CHECK: Successors according to CFG: BB#3(192) BB#4(192)
+
+bb2:
+ %v10 = icmp eq i32 %3, 16
+ br i1 %v10, label %bb4, label %bb3, !prof !0
+
+bb3:
+ %v11 = icmp eq i32 %8, 16
+ br i1 %v11, label %bb4, label %return, !prof !1
+
+bb4:
+ %v12 = ptrtoint %struct.S* %x to i32
+ %phitmp = trunc i32 %v12 to i8
+ ret i8 %phitmp
+
+return:
+ ret i8 1
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 4, i32 12}
+!1 = metadata !{metadata !"branch_weights", i32 8, i32 16}
diff --git a/test/CodeGen/ARM/ifcvt1.ll b/test/CodeGen/ARM/ifcvt1.ll
index 5a55653239d1..cae2399d8736 100644
--- a/test/CodeGen/ARM/ifcvt1.ll
+++ b/test/CodeGen/ARM/ifcvt1.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
-; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s -check-prefix=SWIFT
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=A8
+; RUN: llc -mtriple=arm-eabi -mcpu=swift %s -o - | FileCheck %s -check-prefix=SWIFT
define i32 @t1(i32 %a, i32 %b) {
; A8-LABEL: t1:
diff --git a/test/CodeGen/ARM/ifcvt10.ll b/test/CodeGen/ARM/ifcvt10.ll
index 26c72723b287..509c182fc973 100644
--- a/test/CodeGen/ARM/ifcvt10.ll
+++ b/test/CodeGen/ARM/ifcvt10.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a9 | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-ios -arm-atomic-cfg-tidy=0 -mcpu=cortex-a9 | FileCheck %s
; rdar://8402126
; Make sure if-converter is not predicating vldmia and ldmia. These are
; micro-coded and would have long issue latency even if predicated on
diff --git a/test/CodeGen/ARM/ifcvt2.ll b/test/CodeGen/ARM/ifcvt2.ll
index e34edecf57ee..e445416f6e5d 100644
--- a/test/CodeGen/ARM/ifcvt2.ll
+++ b/test/CodeGen/ARM/ifcvt2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v4t | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s
define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: t1:
diff --git a/test/CodeGen/ARM/ifcvt3.ll b/test/CodeGen/ARM/ifcvt3.ll
index fa7d61887d9d..5da63dc5f022 100644
--- a/test/CodeGen/ARM/ifcvt3.ll
+++ b/test/CodeGen/ARM/ifcvt3.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+v4t | grep cmpne | count 1
-; RUN: llc < %s -march=arm -mattr=+v4t | grep bx | count 2
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s -check-prefix CHECK-V4-CMP
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s -check-prefix CHECK-V4-BX
define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: t1:
@@ -22,3 +22,11 @@ cond_next:
%tmp15 = add i32 %b, %a
ret i32 %tmp15
}
+
+; CHECK-V4-CMP: cmpne
+; CHECK-V4-CMP-NOT: cmpne
+
+; CHECK-V4-BX: bx
+; CHECK-V4-BX: bx
+; CHECK-V4-BX-NOT: bx
+
diff --git a/test/CodeGen/ARM/ifcvt4.ll b/test/CodeGen/ARM/ifcvt4.ll
index 53c789d184f6..8c6825aeda97 100644
--- a/test/CodeGen/ARM/ifcvt4.ll
+++ b/test/CodeGen/ARM/ifcvt4.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
; Do not if-convert when branches go to the different loops.
; CHECK-LABEL: t:
diff --git a/test/CodeGen/ARM/ifcvt9.ll b/test/CodeGen/ARM/ifcvt9.ll
index 05bdc459c83f..119171627d1c 100644
--- a/test/CodeGen/ARM/ifcvt9.ll
+++ b/test/CodeGen/ARM/ifcvt9.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define fastcc void @t() nounwind {
entry:
diff --git a/test/CodeGen/ARM/illegal-vector-bitcast.ll b/test/CodeGen/ARM/illegal-vector-bitcast.ll
index febe6f56b66c..7208fffbcc85 100644
--- a/test/CodeGen/ARM/illegal-vector-bitcast.ll
+++ b/test/CodeGen/ARM/illegal-vector-bitcast.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -mtriple=arm-linux
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
+; RUN: llc -mtriple=arm-linux %s -o /dev/null
define void @foo(<8 x float>* %f, <8 x float>* %g, <4 x i64>* %y)
{
diff --git a/test/CodeGen/ARM/imm.ll b/test/CodeGen/ARM/imm.ll
index 6f25f9dcb323..e7bc0afff70e 100644
--- a/test/CodeGen/ARM/imm.ll
+++ b/test/CodeGen/ARM/imm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | not grep CPI
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @test1(i32 %A) {
%B = add i32 %A, -268435441 ; <i32> [#uses=1]
@@ -14,3 +14,6 @@ define i32 @test3(i32 %A) {
ret i32 %B
}
+; CHECK-NOT: CPI
+
+
diff --git a/test/CodeGen/ARM/indirect-hidden.ll b/test/CodeGen/ARM/indirect-hidden.ll
new file mode 100644
index 000000000000..ae1c505bb683
--- /dev/null
+++ b/test/CodeGen/ARM/indirect-hidden.ll
@@ -0,0 +1,22 @@
+; RUN: llc -mtriple=thumbv7s-apple-ios7.0 -o - %s | FileCheck %s
+
+@var = external global i32
+@var_hidden = external hidden global i32
+
+define i32* @get_var() {
+ ret i32* @var
+}
+
+define i32* @get_var_hidden() {
+ ret i32* @var_hidden
+}
+
+; CHECK: .section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+
+; CHECK: .indirect_symbol _var
+; CHECK-NEXT: .long 0
+
+; CHECK-NOT: __DATA,__data
+
+; CHECK: .indirect_symbol _var_hidden
+; CHECK-NEXT: .long 0 \ No newline at end of file
diff --git a/test/CodeGen/ARM/indirect-reg-input.ll b/test/CodeGen/ARM/indirect-reg-input.ll
index b936455975c6..17f6a9c96f68 100644
--- a/test/CodeGen/ARM/indirect-reg-input.ll
+++ b/test/CodeGen/ARM/indirect-reg-input.ll
@@ -1,4 +1,4 @@
-; RUN: not llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
+; RUN: not llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - 2>&1 | FileCheck %s
; Check for error message:
; CHECK: error: inline asm not supported yet: don't know how to handle tied indirect register inputs
diff --git a/test/CodeGen/ARM/indirectbr-3.ll b/test/CodeGen/ARM/indirectbr-3.ll
index 5a9c45902edc..291fedb81104 100644
--- a/test/CodeGen/ARM/indirectbr-3.ll
+++ b/test/CodeGen/ARM/indirectbr-3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
; If ARMBaseInstrInfo::AnalyzeBlocks returns the wrong value, which was possible
; for blocks with indirect branches, the IfConverter could end up deleting
diff --git a/test/CodeGen/ARM/indirectbr.ll b/test/CodeGen/ARM/indirectbr.ll
index 1aeeb916e489..7c49cb310f39 100644
--- a/test/CodeGen/ARM/indirectbr.ll
+++ b/test/CodeGen/ARM/indirectbr.ll
@@ -11,6 +11,11 @@ define internal i32 @foo(i32 %i) nounwind {
; THUMB-LABEL: foo:
; THUMB2-LABEL: foo:
entry:
+ ; _nextaddr gets CSEed for use later on.
+; THUMB: ldr r[[NEXTADDR_REG:[0-9]+]], [[NEXTADDR_CPI:LCPI0_[0-9]+]]
+; THUMB: [[NEXTADDR_PCBASE:LPC0_[0-9]]]:
+; THUMB: add r[[NEXTADDR_REG]], pc
+
%0 = load i8** @nextaddr, align 4 ; <i8*> [#uses=2]
%1 = icmp eq i8* %0, null ; <i1> [#uses=1]
; indirect branch gets duplicated here
@@ -53,12 +58,11 @@ L1: ; preds = %L2, %bb2
; ARM: ldr [[R1:r[0-9]+]], LCPI
; ARM: add [[R1b:r[0-9]+]], pc, [[R1]]
; ARM: str [[R1b]]
+
; THUMB-LABEL: %L1
-; THUMB: ldr
-; THUMB: add
; THUMB: ldr [[R2:r[0-9]+]], LCPI
; THUMB: add [[R2]], pc
-; THUMB: str [[R2]]
+; THUMB: str [[R2]], [r[[NEXTADDR_REG]]]
; THUMB2-LABEL: %L1
; THUMB2: ldr [[R2:r[0-9]+]], LCPI
; THUMB2-NEXT: str{{(.w)?}} [[R2]]
@@ -67,4 +71,5 @@ L1: ; preds = %L2, %bb2
}
; ARM: .long Ltmp0-(LPC{{.*}}+8)
; THUMB: .long Ltmp0-(LPC{{.*}}+4)
+; THUMB: .long _nextaddr-([[NEXTADDR_PCBASE]]+4)
; THUMB2: .long Ltmp0
diff --git a/test/CodeGen/ARM/inline-diagnostics.ll b/test/CodeGen/ARM/inline-diagnostics.ll
new file mode 100644
index 000000000000..7b77da22d5f5
--- /dev/null
+++ b/test/CodeGen/ARM/inline-diagnostics.ll
@@ -0,0 +1,16 @@
+; RUN: not llc < %s -verify-machineinstrs -mtriple=armv7-none-linux-gnu -mattr=+neon 2>&1 | FileCheck %s
+
+%struct.float4 = type { float, float, float, float }
+
+; CHECK: error: Don't know how to handle indirect register inputs yet for constraint 'w'
+define float @inline_func(float %f1, float %f2) #0 {
+ %c1 = alloca %struct.float4, align 4
+ %c2 = alloca %struct.float4, align 4
+ %c3 = alloca %struct.float4, align 4
+ call void asm sideeffect "vmul.f32 ${2:q}, ${0:q}, ${1:q}", "=*r,=*r,*w"(%struct.float4* %c1, %struct.float4* %c2, %struct.float4* %c3) #1, !srcloc !1
+ %x = getelementptr inbounds %struct.float4* %c3, i32 0, i32 0
+ %1 = load float* %x, align 4
+ ret float %1
+}
+
+!1 = metadata !{i32 271, i32 305}
diff --git a/test/CodeGen/ARM/inlineasm-64bit.ll b/test/CodeGen/ARM/inlineasm-64bit.ll
index 683a0c4b7d30..d098a4383bc6 100644
--- a/test/CodeGen/ARM/inlineasm-64bit.ll
+++ b/test/CodeGen/ARM/inlineasm-64bit.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -O3 -mtriple=arm-linux-gnueabi | FileCheck %s
-; RUN: llc -mtriple=thumbv7-none-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=arm-linux-gnueabi -no-integrated-as | FileCheck %s
+; RUN: llc -mtriple=thumbv7-none-linux-gnueabi -verify-machineinstrs -no-integrated-as < %s | FileCheck %s
; check if regs are passing correctly
define void @i64_write(i64* %p, i64 %val) nounwind {
; CHECK-LABEL: i64_write:
diff --git a/test/CodeGen/ARM/inlineasm-imm-arm.ll b/test/CodeGen/ARM/inlineasm-imm-arm.ll
index 45dfcf0b82a5..603e52dd2d73 100644
--- a/test/CodeGen/ARM/inlineasm-imm-arm.ll
+++ b/test/CodeGen/ARM/inlineasm-imm-arm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi -no-integrated-as %s -o /dev/null
; Test ARM-mode "I" constraint, for any Data Processing immediate.
define i32 @testI(i32 %x) {
diff --git a/test/CodeGen/ARM/inlineasm-ldr-pseudo.ll b/test/CodeGen/ARM/inlineasm-ldr-pseudo.ll
new file mode 100644
index 000000000000..f63e4b0b3a17
--- /dev/null
+++ b/test/CodeGen/ARM/inlineasm-ldr-pseudo.ll
@@ -0,0 +1,17 @@
+; PR18354
+; We actually need to use -filetype=obj in this test because if we output
+; assembly, the current code path will bypass the parser and just write the
+; raw text out to the Streamer. We need to actually parse the inlineasm to
+; demonstrate the bug. Going the asm->obj route does not show the issue.
+; RUN: llc -mtriple=arm-none-linux < %s -filetype=obj | llvm-objdump -d - | FileCheck %s
+; RUN: llc -mtriple=arm-apple-darwin < %s -filetype=obj | llvm-objdump -d - | FileCheck %s
+; CHECK-LABEL: foo:
+; CHECK: 0: 00 00 9f e5 ldr r0, [pc]
+; CHECK: 4: 0e f0 a0 e1 mov pc, lr
+; Make sure the constant pool entry comes after the return
+; CHECK: 8: 01 00 00 00
+define i32 @foo() nounwind {
+entry:
+ %0 = tail call i32 asm sideeffect "ldr $0,=1", "=r"() nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/ARM/inlineasm-switch-mode-oneway-from-arm.ll b/test/CodeGen/ARM/inlineasm-switch-mode-oneway-from-arm.ll
new file mode 100644
index 000000000000..3be378d8adc5
--- /dev/null
+++ b/test/CodeGen/ARM/inlineasm-switch-mode-oneway-from-arm.ll
@@ -0,0 +1,18 @@
+;RUN: llc -mtriple=armv7-linux-gnueabi < %s | llvm-mc -triple=armv7-linux-gnueabi -filetype=obj | llvm-objdump -triple=armv7 -d - | FileCheck %s
+;RUN: llc -mtriple=armv7-linux-gnueabi < %s | FileCheck %s -check-prefix=ASM
+;RUN: llc -mtriple=armv7-apple-darwin < %s | FileCheck %s -check-prefix=ASM
+
+define hidden i32 @bah(i8* %start) #0 align 2 {
+ %1 = ptrtoint i8* %start to i32
+ %2 = tail call i32 asm sideeffect "@ Enter THUMB Mode\0A\09adr r3, 2f+1 \0A\09bx r3 \0A\09.code 16 \0A2: push {r7} \0A\09mov r7, $4 \0A\09svc 0x0 \0A\09pop {r7} \0A\09", "={r0},{r0},{r1},{r2},r,~{r3}"(i32 %1, i32 %1, i32 0, i32 983042) #3
+ %3 = add i32 %1, 1
+ ret i32 %3
+}
+; CHECK: $t
+; CHECK: $a
+; CHECK: 01 00 81 e2 add r0, r1, #1
+
+; .code 32 is implicit
+; ASM-LABEL: bah:
+; ASM: .code 16
+; ASM: .code 32
diff --git a/test/CodeGen/ARM/inlineasm-switch-mode-oneway-from-thumb.ll b/test/CodeGen/ARM/inlineasm-switch-mode-oneway-from-thumb.ll
new file mode 100644
index 000000000000..b9bd4c24da0f
--- /dev/null
+++ b/test/CodeGen/ARM/inlineasm-switch-mode-oneway-from-thumb.ll
@@ -0,0 +1,18 @@
+;RUN: llc -mtriple=thumbv7-linux-gnueabi < %s | llvm-mc -triple=thumbv7-linux-gnueabi -filetype=obj | llvm-objdump -triple=thumbv7 -d - | FileCheck %s
+;RUN: llc -mtriple=thumbv7-linux-gnueabi < %s | FileCheck %s -check-prefix=ASM
+;RUN: llc -mtriple=thumbv7-apple-darwin < %s | FileCheck %s -check-prefix=ASM
+
+define hidden i32 @bah(i8* %start) #0 align 2 {
+ %1 = ptrtoint i8* %start to i32
+ %2 = tail call i32 asm sideeffect "@ Enter ARM Mode \0A\09adr r3, 1f \0A\09bx r3 \0A\09.align 2 \0A\09.code 32 \0A1: push {r7} \0A\09mov r7, $4 \0A\09svc 0x0 \0A\09pop {r7} \0A\09", "={r0},{r0},{r1},{r2},r,~{r3}"(i32 %1, i32 %1, i32 0, i32 983042) #3
+ %3 = add i32 %1, 1
+ ret i32 %3
+}
+; CHECK: $a
+; CHECK: $t
+; CHECK: 48 1c adds r0, r1, #1
+
+; ASM: .code 16
+; ASM-LABEL: bah:
+; ASM: .code 32
+; ASM: .code 16
diff --git a/test/CodeGen/ARM/inlineasm-switch-mode.ll b/test/CodeGen/ARM/inlineasm-switch-mode.ll
new file mode 100644
index 000000000000..65fea114d7de
--- /dev/null
+++ b/test/CodeGen/ARM/inlineasm-switch-mode.ll
@@ -0,0 +1,22 @@
+;RUN: llc -mtriple=thumbv7-linux-gnueabi < %s | llvm-mc -triple=thumbv7-linux-gnueabi -filetype=obj > %t
+; Two pass decoding needed because llvm-objdump does not respect mapping symbols
+;RUN: llvm-objdump -triple=armv7 -d %t | FileCheck %s --check-prefix=ARM
+;RUN: llvm-objdump -triple=thumbv7 -d %t | FileCheck %s --check-prefix=THUMB
+
+define hidden i32 @bah(i8* %start) #0 align 2 {
+ %1 = ptrtoint i8* %start to i32
+ %2 = tail call i32 asm sideeffect "@ Enter ARM Mode \0A\09adr r3, 1f \0A\09bx r3 \0A\09.align 2 \0A\09.code 32 \0A1: push {r7} \0A\09mov r7, $4 \0A\09svc 0x0 \0A\09pop {r7} \0A\09@ Enter THUMB Mode\0A\09adr r3, 2f+1 \0A\09bx r3 \0A\09.code 16 \0A2: \0A\09", "={r0},{r0},{r1},{r2},r,~{r3}"(i32 %1, i32 %1, i32 0, i32 983042) #3
+ %3 = add i32 %1, 1
+ ret i32 %3
+}
+
+; ARM: $a
+; ARM-NEXT: 04 70 2d e5 str r7, [sp, #-4]!
+; ARM: $t
+; ARM-NEXT: 48 1c
+
+; THUMB: $a
+; THUMB-NEXT: 04 70
+; THUMB-NEXT: 2d e5
+; THUMB: $t
+; THUMB-NEXT: 48 1c adds r0, r1, #1
diff --git a/test/CodeGen/ARM/inlineasm.ll b/test/CodeGen/ARM/inlineasm.ll
index cca3c696b4a8..39962e08cdd6 100644
--- a/test/CodeGen/ARM/inlineasm.ll
+++ b/test/CodeGen/ARM/inlineasm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o /dev/null
define i32 @test1(i32 %tmp54) {
%tmp56 = tail call i32 asm "uxtb16 $0,$1", "=r,r"( i32 %tmp54 ) ; <i32> [#uses=1]
diff --git a/test/CodeGen/ARM/inlineasm2.ll b/test/CodeGen/ARM/inlineasm2.ll
index a99bccf5a654..5918738cbb2b 100644
--- a/test/CodeGen/ARM/inlineasm2.ll
+++ b/test/CodeGen/ARM/inlineasm2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define double @__ieee754_sqrt(double %x) {
%tmp2 = tail call double asm "fsqrtd ${0:P}, ${1:P}", "=w,w"( double %x )
diff --git a/test/CodeGen/ARM/inlineasm3.ll b/test/CodeGen/ARM/inlineasm3.ll
index 390a44e375b9..eb7ba59b69bf 100644
--- a/test/CodeGen/ARM/inlineasm3.ll
+++ b/test/CodeGen/ARM/inlineasm3.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+neon,+v6t2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon,+v6t2 -no-integrated-as %s -o - \
+; RUN: | FileCheck %s
; Radar 7449043
%struct.int32x4_t = type { <4 x i32> }
diff --git a/test/CodeGen/ARM/inlineasm4.ll b/test/CodeGen/ARM/inlineasm4.ll
index 4a1bccaf61c5..a117cd2618f9 100644
--- a/test/CodeGen/ARM/inlineasm4.ll
+++ b/test/CodeGen/ARM/inlineasm4.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define double @f(double %x) {
entry:
diff --git a/test/CodeGen/ARM/insn-sched1.ll b/test/CodeGen/ARM/insn-sched1.ll
index d188fae70340..2749a8e7cd2f 100644
--- a/test/CodeGen/ARM/insn-sched1.ll
+++ b/test/CodeGen/ARM/insn-sched1.ll
@@ -1,6 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+v6
-; RUN: llc < %s -mtriple=arm-apple-ios -mattr=+v6 |\
-; RUN: grep mov | count 3
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o /dev/null
+; RUN: llc -mtriple=arm-apple-ios -mattr=+v6 %s -o - | FileCheck %s
define i32 @test(i32 %x) {
%tmp = trunc i32 %x to i16 ; <i16> [#uses=1]
@@ -9,3 +8,9 @@ define i32 @test(i32 %x) {
}
declare i32 @f(i32, i16)
+
+; CHECK: mov
+; CHECK: mov
+; CHECK: mov
+; CHECK-NOT: mov
+
diff --git a/test/CodeGen/ARM/integer_insertelement.ll b/test/CodeGen/ARM/integer_insertelement.ll
index 1d72afefb5b8..bf403b92806b 100644
--- a/test/CodeGen/ARM/integer_insertelement.ll
+++ b/test/CodeGen/ARM/integer_insertelement.ll
@@ -1,4 +1,4 @@
-; RUN: llc %s -o - -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; This test checks that when inserting one (integer) element into a vector,
; the vector is not spuriously copied. "vorr dX, dY, dY" is the way of moving
diff --git a/test/CodeGen/ARM/interrupt-attr.ll b/test/CodeGen/ARM/interrupt-attr.ll
index 217fd696237e..cb67dd929f41 100644
--- a/test/CodeGen/ARM/interrupt-attr.ll
+++ b/test/CodeGen/ARM/interrupt-attr.ll
@@ -1,6 +1,6 @@
; RUN: llc -mtriple=arm-none-none-eabi -mcpu=cortex-a15 -o - %s | FileCheck --check-prefix=CHECK-A %s
; RUN: llc -mtriple=thumb-none-none-eabi -mcpu=cortex-a15 -o - %s | FileCheck --check-prefix=CHECK-A-THUMB %s
-; RUN: llc -mtriple=thumb-apple-darwin -mcpu=cortex-m3 -o - %s | FileCheck --check-prefix=CHECK-M %s
+; RUN: llc -mtriple=thumb-apple-none-macho -mcpu=cortex-m3 -o - %s | FileCheck --check-prefix=CHECK-M %s
declare arm_aapcscc void @bar()
@@ -12,42 +12,44 @@ define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" {
; Also need special function return setting pc and CPSR simultaneously.
; CHECK-A-LABEL: irq_fn:
-; CHECK-A: push {r0, r1, r2, r3, r11, lr}
-; CHECK-A: add r11, sp, #16
-; CHECK-A: sub sp, sp, #{{[0-9]+}}
+; CHECK-A: push {r0, r1, r2, r3, r10, r11, r12, lr}
+; CHECK-A: add r11, sp, #20
+; CHECK-A-NOT: sub sp, sp, #{{[0-9]+}}
; CHECK-A: bic sp, sp, #7
; CHECK-A: bl bar
-; CHECK-A: sub sp, r11, #16
-; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
+; CHECK-A: sub sp, r11, #20
+; CHECK-A: pop {r0, r1, r2, r3, r10, r11, r12, lr}
; CHECK-A: subs pc, lr, #4
; CHECK-A-THUMB-LABEL: irq_fn:
-; CHECK-A-THUMB: push {r0, r1, r2, r3, r4, r7, lr}
-; CHECK-A-THUMB: mov r4, sp
+; CHECK-A-THUMB: push.w {r0, r1, r2, r3, r4, r7, r12, lr}
; CHECK-A-THUMB: add r7, sp, #20
+; CHECK-A-THUMB: mov r4, sp
; CHECK-A-THUMB: bic r4, r4, #7
; CHECK-A-THUMB: bl bar
; CHECK-A-THUMB: sub.w r4, r7, #20
; CHECK-A-THUMB: mov sp, r4
-; CHECK-A-THUMB: pop.w {r0, r1, r2, r3, r4, r7, lr}
+; CHECK-A-THUMB: pop.w {r0, r1, r2, r3, r4, r7, r12, lr}
; CHECK-A-THUMB: subs pc, lr, #4
; Normal AAPCS function (r0-r3 pushed onto stack by hardware, lr set to
; appropriate sentinel so no special return needed).
-; CHECK-M: push {r4, r7, lr}
-; CHECK-M: add r7, sp, #4
-; CHECK-M: sub sp, #4
+; CHECK-M-LABEL: irq_fn:
+; CHECK-M: push.w {r4, r10, r11, lr}
+; CHECK-M: add.w r11, sp, #8
; CHECK-M: mov r4, sp
+; CHECK-M: bic r4, r4, #7
; CHECK-M: mov sp, r4
; CHECK-M: blx _bar
-; CHECK-M: subs r4, r7, #4
+; CHECK-M: sub.w r4, r11, #8
; CHECK-M: mov sp, r4
-; CHECK-M: pop {r4, r7, pc}
+; CHECK-M: pop.w {r4, r10, r11, pc}
call arm_aapcscc void @bar()
ret void
}
+; We don't push/pop r12, as it is banked for FIQ
define arm_aapcscc void @fiq_fn() alignstack(8) "interrupt"="FIQ" {
; CHECK-A-LABEL: fiq_fn:
; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr}
@@ -61,6 +63,8 @@ define arm_aapcscc void @fiq_fn() alignstack(8) "interrupt"="FIQ" {
; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr}
; CHECK-A: subs pc, lr, #4
+; CHECK-A-THUMB-LABEL: fiq_fn:
+; CHECK-M-LABEL: fiq_fn:
%val = load volatile [16 x i32]* @bigvar
store volatile [16 x i32] %val, [16 x i32]* @bigvar
ret void
@@ -68,13 +72,13 @@ define arm_aapcscc void @fiq_fn() alignstack(8) "interrupt"="FIQ" {
define arm_aapcscc void @swi_fn() alignstack(8) "interrupt"="SWI" {
; CHECK-A-LABEL: swi_fn:
-; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
; CHECK-A: add r11, sp, #44
; CHECK-A: sub sp, sp, #{{[0-9]+}}
; CHECK-A: bic sp, sp, #7
; [...]
; CHECK-A: sub sp, r11, #44
-; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
; CHECK-A: subs pc, lr, #0
%val = load volatile [16 x i32]* @bigvar
@@ -84,13 +88,13 @@ define arm_aapcscc void @swi_fn() alignstack(8) "interrupt"="SWI" {
define arm_aapcscc void @undef_fn() alignstack(8) "interrupt"="UNDEF" {
; CHECK-A-LABEL: undef_fn:
-; CHECK-A: push {r0, r1, r2, r3, r11, lr}
-; CHECK-A: add r11, sp, #16
-; CHECK-A: sub sp, sp, #{{[0-9]+}}
+; CHECK-A: push {r0, r1, r2, r3, r10, r11, r12, lr}
+; CHECK-A: add r11, sp, #20
+; CHECK-A-NOT: sub sp, sp, #{{[0-9]+}}
; CHECK-A: bic sp, sp, #7
; [...]
-; CHECK-A: sub sp, r11, #16
-; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
+; CHECK-A: sub sp, r11, #20
+; CHECK-A: pop {r0, r1, r2, r3, r10, r11, r12, lr}
; CHECK-A: subs pc, lr, #0
call void @bar()
@@ -99,13 +103,13 @@ define arm_aapcscc void @undef_fn() alignstack(8) "interrupt"="UNDEF" {
define arm_aapcscc void @abort_fn() alignstack(8) "interrupt"="ABORT" {
; CHECK-A-LABEL: abort_fn:
-; CHECK-A: push {r0, r1, r2, r3, r11, lr}
-; CHECK-A: add r11, sp, #16
-; CHECK-A: sub sp, sp, #{{[0-9]+}}
+; CHECK-A: push {r0, r1, r2, r3, r10, r11, r12, lr}
+; CHECK-A: add r11, sp, #20
+; CHECK-A-NOT: sub sp, sp, #{{[0-9]+}}
; CHECK-A: bic sp, sp, #7
; [...]
-; CHECK-A: sub sp, r11, #16
-; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
+; CHECK-A: sub sp, r11, #20
+; CHECK-A: pop {r0, r1, r2, r3, r10, r11, r12, lr}
; CHECK-A: subs pc, lr, #4
call void @bar()
diff --git a/test/CodeGen/ARM/intrinsics-crypto.ll b/test/CodeGen/ARM/intrinsics-crypto.ll
index c038fe6da84a..96413d341e4c 100644
--- a/test/CodeGen/ARM/intrinsics-crypto.ll
+++ b/test/CodeGen/ARM/intrinsics-crypto.ll
@@ -3,13 +3,13 @@
define arm_aapcs_vfpcc <16 x i8> @test_aesde(<16 x i8>* %a, <16 x i8> *%b) {
%tmp = load <16 x i8>* %a
%tmp2 = load <16 x i8>* %b
- %tmp3 = call <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8> %tmp, <16 x i8> %tmp2)
+ %tmp3 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %tmp, <16 x i8> %tmp2)
; CHECK: aesd.8 q{{[0-9]+}}, q{{[0-9]+}}
- %tmp4 = call <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8> %tmp3, <16 x i8> %tmp2)
+ %tmp4 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %tmp3, <16 x i8> %tmp2)
; CHECK: aese.8 q{{[0-9]+}}, q{{[0-9]+}}
- %tmp5 = call <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8> %tmp4)
+ %tmp5 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %tmp4)
; CHECK: aesimc.8 q{{[0-9]+}}, q{{[0-9]+}}
- %tmp6 = call <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8> %tmp5)
+ %tmp6 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %tmp5)
; CHECK: aesmc.8 q{{[0-9]+}}, q{{[0-9]+}}
ret <16 x i8> %tmp6
}
@@ -18,40 +18,42 @@ define arm_aapcs_vfpcc <4 x i32> @test_sha(<4 x i32> *%a, <4 x i32> *%b, <4 x i3
%tmp = load <4 x i32>* %a
%tmp2 = load <4 x i32>* %b
%tmp3 = load <4 x i32>* %c
- %res1 = call <4 x i32> @llvm.arm.neon.sha1h.v4i32(<4 x i32> %tmp)
+ %scalar = extractelement <4 x i32> %tmp, i32 0
+ %resscalar = call i32 @llvm.arm.neon.sha1h(i32 %scalar)
+ %res1 = insertelement <4 x i32> undef, i32 %resscalar, i32 0
; CHECK: sha1h.32 q{{[0-9]+}}, q{{[0-9]+}}
- %res2 = call <4 x i32> @llvm.arm.neon.sha1c.v4i32(<4 x i32> %tmp2, <4 x i32> %tmp3, <4 x i32> %res1)
+ %res2 = call <4 x i32> @llvm.arm.neon.sha1c(<4 x i32> %tmp2, i32 %scalar, <4 x i32> %res1)
; CHECK: sha1c.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
- %res3 = call <4 x i32> @llvm.arm.neon.sha1m.v4i32(<4 x i32> %res2, <4 x i32> %tmp3, <4 x i32> %res1)
+ %res3 = call <4 x i32> @llvm.arm.neon.sha1m(<4 x i32> %res2, i32 %scalar, <4 x i32> %res1)
; CHECK: sha1m.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
- %res4 = call <4 x i32> @llvm.arm.neon.sha1p.v4i32(<4 x i32> %res3, <4 x i32> %tmp3, <4 x i32> %res1)
+ %res4 = call <4 x i32> @llvm.arm.neon.sha1p(<4 x i32> %res3, i32 %scalar, <4 x i32> %res1)
; CHECK: sha1p.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
- %res5 = call <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32> %res4, <4 x i32> %tmp3, <4 x i32> %res1)
+ %res5 = call <4 x i32> @llvm.arm.neon.sha1su0(<4 x i32> %res4, <4 x i32> %tmp3, <4 x i32> %res1)
; CHECK: sha1su0.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
- %res6 = call <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32> %res5, <4 x i32> %res1)
+ %res6 = call <4 x i32> @llvm.arm.neon.sha1su1(<4 x i32> %res5, <4 x i32> %res1)
; CHECK: sha1su1.32 q{{[0-9]+}}, q{{[0-9]+}}
- %res7 = call <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32> %res6, <4 x i32> %tmp3, <4 x i32> %res1)
+ %res7 = call <4 x i32> @llvm.arm.neon.sha256h(<4 x i32> %res6, <4 x i32> %tmp3, <4 x i32> %res1)
; CHECK: sha256h.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
- %res8 = call <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32> %res7, <4 x i32> %tmp3, <4 x i32> %res1)
+ %res8 = call <4 x i32> @llvm.arm.neon.sha256h2(<4 x i32> %res7, <4 x i32> %tmp3, <4 x i32> %res1)
; CHECK: sha256h2.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
- %res9 = call <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32> %res8, <4 x i32> %tmp3, <4 x i32> %res1)
+ %res9 = call <4 x i32> @llvm.arm.neon.sha256su1(<4 x i32> %res8, <4 x i32> %tmp3, <4 x i32> %res1)
; CHECK: sha256su1.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
- %res10 = call <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32> %res9, <4 x i32> %tmp3)
+ %res10 = call <4 x i32> @llvm.arm.neon.sha256su0(<4 x i32> %res9, <4 x i32> %tmp3)
; CHECK: sha256su0.32 q{{[0-9]+}}, q{{[0-9]+}}
ret <4 x i32> %res10
}
-declare <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8>, <16 x i8>)
-declare <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8>)
-declare <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8>)
-declare <4 x i32> @llvm.arm.neon.sha1h.v4i32(<4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha1c.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha1m.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha1p.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.arm.neon.aesd(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.aese(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.aesimc(<16 x i8>)
+declare <16 x i8> @llvm.arm.neon.aesmc(<16 x i8>)
+declare i32 @llvm.arm.neon.sha1h(i32)
+declare <4 x i32> @llvm.arm.neon.sha1c(<4 x i32>, i32, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1m(<4 x i32>, i32, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1p(<4 x i32>, i32, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1su0(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha256h(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha256h2(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha256su1(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha256su0(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1su1(<4 x i32>, <4 x i32>)
diff --git a/test/CodeGen/ARM/intrinsics-memory-barrier.ll b/test/CodeGen/ARM/intrinsics-memory-barrier.ll
new file mode 100644
index 000000000000..5ee0b3e59902
--- /dev/null
+++ b/test/CodeGen/ARM/intrinsics-memory-barrier.ll
@@ -0,0 +1,55 @@
+; RUN: llc < %s -mtriple=armv7 -mattr=+db | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7 -mattr=+db | FileCheck %s
+
+; CHECK-LABEL: test
+define void @test() {
+ call void @llvm.arm.dmb(i32 3) ; CHECK: dmb osh
+ call void @llvm.arm.dsb(i32 7) ; CHECK: dsb nsh
+ call void @llvm.arm.isb(i32 15) ; CHECK: isb sy
+ ret void
+}
+
+; Important point is that the compiler should not reorder memory access
+; instructions around DMB.
+; Failure to do so, two STRs will collapse into one STRD.
+; CHECK-LABEL: test_dmb_reordering
+define void @test_dmb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}]
+
+ call void @llvm.arm.dmb(i32 15) ; CHECK: dmb sy
+
+ %d1 = getelementptr i32* %d, i32 1
+ store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
+
+ ret void
+}
+
+; Similarly for DSB.
+; CHECK-LABEL: test_dsb_reordering
+define void @test_dsb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}]
+
+ call void @llvm.arm.dsb(i32 15) ; CHECK: dsb sy
+
+ %d1 = getelementptr i32* %d, i32 1
+ store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
+
+ ret void
+}
+
+; And ISB.
+; CHECK-LABEL: test_isb_reordering
+define void @test_isb_reordering(i32 %a, i32 %b, i32* %d) {
+ store i32 %a, i32* %d ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}]
+
+ call void @llvm.arm.isb(i32 15) ; CHECK: isb sy
+
+ %d1 = getelementptr i32* %d, i32 1
+ store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
+
+ ret void
+}
+
+declare void @llvm.arm.dmb(i32)
+declare void @llvm.arm.dsb(i32)
+declare void @llvm.arm.isb(i32)
diff --git a/test/CodeGen/ARM/intrinsics-overflow.ll b/test/CodeGen/ARM/intrinsics-overflow.ll
new file mode 100644
index 000000000000..af3dd9dd4117
--- /dev/null
+++ b/test/CodeGen/ARM/intrinsics-overflow.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s -mtriple=arm-linux -mcpu=generic | FileCheck %s
+
+define i32 @uadd_overflow(i32 %a, i32 %b) #0 {
+ %sadd = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+ %1 = extractvalue { i32, i1 } %sadd, 1
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+
+ ; CHECK-LABEL: uadd_overflow:
+ ; CHECK: add r[[R2:[0-9]+]], r[[R0:[0-9]+]], r[[R1:[0-9]+]]
+ ; CHECK: mov r[[R1]], #1
+ ; CHECK: cmp r[[R2]], r[[R0]]
+ ; CHECK: movhs r[[R1]], #0
+}
+
+
+define i32 @sadd_overflow(i32 %a, i32 %b) #0 {
+ %sadd = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+ %1 = extractvalue { i32, i1 } %sadd, 1
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+
+ ; CHECK-LABEL: sadd_overflow:
+ ; CHECK: add r[[R2:[0-9]+]], r[[R0:[0-9]+]], r[[R1:[0-9]+]]
+ ; CHECK: mov r[[R1]], #1
+ ; CHECK: cmp r[[R2]], r[[R0]]
+ ; CHECK: movvc r[[R1]], #0
+}
+
+define i32 @usub_overflow(i32 %a, i32 %b) #0 {
+ %sadd = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+ %1 = extractvalue { i32, i1 } %sadd, 1
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+
+ ; CHECK-LABEL: usub_overflow:
+ ; CHECK: mov r[[R2]], #1
+ ; CHECK: cmp r[[R0]], r[[R1]]
+ ; CHECK: movhs r[[R2]], #0
+}
+
+define i32 @ssub_overflow(i32 %a, i32 %b) #0 {
+ %sadd = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+ %1 = extractvalue { i32, i1 } %sadd, 1
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+
+ ; CHECK-LABEL: ssub_overflow:
+ ; CHECK: mov r[[R2]], #1
+ ; CHECK: cmp r[[R0]], r[[R1]]
+ ; CHECK: movvc r[[R2]], #0
+}
+
+declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) #1
+declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) #2
+declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) #3
+declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) #4
diff --git a/test/CodeGen/ARM/intrinsics-v8.ll b/test/CodeGen/ARM/intrinsics-v8.ll
index 247bfc1e5884..ab1c3c00e405 100644
--- a/test/CodeGen/ARM/intrinsics-v8.ll
+++ b/test/CodeGen/ARM/intrinsics-v8.ll
@@ -10,10 +10,10 @@ define void @test() {
; CHECK: dsb ishld
call void @llvm.arm.dsb(i32 9)
; CHECK: sevl
- tail call void @llvm.arm.sevl() nounwind
+ tail call void @llvm.arm.hint(i32 5) nounwind
ret void
}
declare void @llvm.arm.dmb(i32)
declare void @llvm.arm.dsb(i32)
-declare void @llvm.arm.sevl() nounwind
+declare void @llvm.arm.hint(i32) nounwind
diff --git a/test/CodeGen/ARM/ispositive.ll b/test/CodeGen/ARM/ispositive.ll
index 2f1a2cfd7786..3086d7983e16 100644
--- a/test/CodeGen/ARM/ispositive.ll
+++ b/test/CodeGen/ARM/ispositive.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @test1(i32 %X) {
; CHECK: lsr{{.*}}#31
diff --git a/test/CodeGen/ARM/jump_tables.ll b/test/CodeGen/ARM/jump_tables.ll
new file mode 100644
index 000000000000..907a86c25387
--- /dev/null
+++ b/test/CodeGen/ARM/jump_tables.ll
@@ -0,0 +1,32 @@
+; RUN: llc <%s -mtriple=arm-unknown-linux-gnueabi -jump-table-type=single | FileCheck --check-prefix=ARM %s
+; RUN: llc <%s -mtriple=thumb-unknown-linux-gnueabi -jump-table-type=single | FileCheck --check-prefix=THUMB %s
+
+define void @indirect_fun() unnamed_addr jumptable {
+ ret void
+}
+define void ()* @get_fun() {
+ ret void ()* @indirect_fun
+
+; ARM: ldr r0, [[LABEL:.*]]
+; ARM: mov pc, lr
+; ARM: [[LABEL]]:
+; ARM: .long __llvm_jump_instr_table_0_1
+
+; THUMB: ldr r0, [[LABEL:.*]]
+; THUMB: bx lr
+; THUMB: [[LABEL]]:
+; THUMB: .long __llvm_jump_instr_table_0_1
+}
+
+; ARM: .globl __llvm_jump_instr_table_0_1
+; ARM: .align 3
+; ARM: .type __llvm_jump_instr_table_0_1,%function
+; ARM: __llvm_jump_instr_table_0_1:
+; ARM: b indirect_fun(PLT)
+
+; THUMB: .globl __llvm_jump_instr_table_0_1
+; THUMB: .align 3
+; THUMB: .thumb_func
+; THUMB: .type __llvm_jump_instr_table_0_1,%function
+; THUMB: __llvm_jump_instr_table_0_1:
+; THUMB: b indirect_fun(PLT)
diff --git a/test/CodeGen/ARM/large-stack.ll b/test/CodeGen/ARM/large-stack.ll
index ddf0f0ec7cc0..1a9a1fadeebe 100644
--- a/test/CodeGen/ARM/large-stack.ll
+++ b/test/CodeGen/ARM/large-stack.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define void @test1() {
%tmp = alloca [ 64 x i32 ] , align 4
diff --git a/test/CodeGen/ARM/ldaex-stlex.ll b/test/CodeGen/ARM/ldaex-stlex.ll
new file mode 100644
index 000000000000..bfdfea331563
--- /dev/null
+++ b/test/CodeGen/ARM/ldaex-stlex.ll
@@ -0,0 +1,92 @@
+; RUN: llc < %s -mtriple=armv8-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8-apple-darwin | FileCheck %s
+
+%0 = type { i32, i32 }
+
+; CHECK-LABEL: f0:
+; CHECK: ldaexd
+define i64 @f0(i8* %p) nounwind readonly {
+entry:
+ %ldaexd = tail call %0 @llvm.arm.ldaexd(i8* %p)
+ %0 = extractvalue %0 %ldaexd, 1
+ %1 = extractvalue %0 %ldaexd, 0
+ %2 = zext i32 %0 to i64
+ %3 = zext i32 %1 to i64
+ %shl = shl nuw i64 %2, 32
+ %4 = or i64 %shl, %3
+ ret i64 %4
+}
+
+; CHECK-LABEL: f1:
+; CHECK: stlexd
+define i32 @f1(i8* %ptr, i64 %val) nounwind {
+entry:
+ %tmp4 = trunc i64 %val to i32
+ %tmp6 = lshr i64 %val, 32
+ %tmp7 = trunc i64 %tmp6 to i32
+ %stlexd = tail call i32 @llvm.arm.stlexd(i32 %tmp4, i32 %tmp7, i8* %ptr)
+ ret i32 %stlexd
+}
+
+declare %0 @llvm.arm.ldaexd(i8*) nounwind readonly
+declare i32 @llvm.arm.stlexd(i32, i32, i8*) nounwind
+
+; CHECK-LABEL: test_load_i8:
+; CHECK: ldaexb r0, [r0]
+; CHECK-NOT: uxtb
+; CHECK-NOT: and
+define zeroext i8 @test_load_i8(i8* %addr) {
+ %val = call i32 @llvm.arm.ldaex.p0i8(i8* %addr)
+ %val8 = trunc i32 %val to i8
+ ret i8 %val8
+}
+
+; CHECK-LABEL: test_load_i16:
+; CHECK: ldaexh r0, [r0]
+; CHECK-NOT: uxth
+; CHECK-NOT: and
+define zeroext i16 @test_load_i16(i16* %addr) {
+ %val = call i32 @llvm.arm.ldaex.p0i16(i16* %addr)
+ %val16 = trunc i32 %val to i16
+ ret i16 %val16
+}
+
+; CHECK-LABEL: test_load_i32:
+; CHECK: ldaex r0, [r0]
+define i32 @test_load_i32(i32* %addr) {
+ %val = call i32 @llvm.arm.ldaex.p0i32(i32* %addr)
+ ret i32 %val
+}
+
+declare i32 @llvm.arm.ldaex.p0i8(i8*) nounwind readonly
+declare i32 @llvm.arm.ldaex.p0i16(i16*) nounwind readonly
+declare i32 @llvm.arm.ldaex.p0i32(i32*) nounwind readonly
+
+; CHECK-LABEL: test_store_i8:
+; CHECK-NOT: uxtb
+; CHECK: stlexb r0, r1, [r2]
+define i32 @test_store_i8(i32, i8 %val, i8* %addr) {
+ %extval = zext i8 %val to i32
+ %res = call i32 @llvm.arm.stlex.p0i8(i32 %extval, i8* %addr)
+ ret i32 %res
+}
+
+; CHECK-LABEL: test_store_i16:
+; CHECK-NOT: uxth
+; CHECK: stlexh r0, r1, [r2]
+define i32 @test_store_i16(i32, i16 %val, i16* %addr) {
+ %extval = zext i16 %val to i32
+ %res = call i32 @llvm.arm.stlex.p0i16(i32 %extval, i16* %addr)
+ ret i32 %res
+}
+
+; CHECK-LABEL: test_store_i32:
+; CHECK: stlex r0, r1, [r2]
+define i32 @test_store_i32(i32, i32 %val, i32* %addr) {
+ %res = call i32 @llvm.arm.stlex.p0i32(i32 %val, i32* %addr)
+ ret i32 %res
+}
+
+declare i32 @llvm.arm.stlex.p0i8(i32, i8*) nounwind
+declare i32 @llvm.arm.stlex.p0i16(i32, i16*) nounwind
+declare i32 @llvm.arm.stlex.p0i32(i32, i32*) nounwind
diff --git a/test/CodeGen/ARM/ldm.ll b/test/CodeGen/ARM/ldm.ll
index d5b805c721b7..3977da6da9cb 100644
--- a/test/CodeGen/ARM/ldm.ll
+++ b/test/CodeGen/ARM/ldm.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
-; RUN: llc < %s -mtriple=armv4t-apple-darwin | FileCheck %s -check-prefix=V4T
+; RUN: llc < %s -mtriple=armv7-apple-ios3.0 | FileCheck %s
+; RUN: llc < %s -mtriple=armv4t-apple-ios3.0 | FileCheck %s -check-prefix=V4T
@X = external global [0 x i32] ; <[0 x i32]*> [#uses=5]
diff --git a/test/CodeGen/ARM/ldr.ll b/test/CodeGen/ARM/ldr.ll
index e4c695b87bec..57e9977ff872 100644
--- a/test/CodeGen/ARM/ldr.ll
+++ b/test/CodeGen/ARM/ldr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f1(i32* %v) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/ARM/ldr_ext.ll b/test/CodeGen/ARM/ldr_ext.ll
index d29eb022bace..31aaba5c3c21 100644
--- a/test/CodeGen/ARM/ldr_ext.ll
+++ b/test/CodeGen/ARM/ldr_ext.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @test1(i8* %t1) nounwind {
; CHECK: ldrb
diff --git a/test/CodeGen/ARM/ldr_frame.ll b/test/CodeGen/ARM/ldr_frame.ll
index f071b8922d6f..ed964ecd3f83 100644
--- a/test/CodeGen/ARM/ldr_frame.ll
+++ b/test/CodeGen/ARM/ldr_frame.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v4t | not grep mov
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s
define i32 @f1() {
%buf = alloca [32 x i32], align 4
@@ -29,3 +29,6 @@ define i32 @f4() {
%tmp2 = zext i8 %tmp1 to i32
ret i32 %tmp2
}
+
+; CHECK-NOT: mov
+
diff --git a/test/CodeGen/ARM/ldr_post.ll b/test/CodeGen/ARM/ldr_post.ll
index f5ff7dda5e04..2558b16f3d3c 100644
--- a/test/CodeGen/ARM/ldr_post.ll
+++ b/test/CodeGen/ARM/ldr_post.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=swift %s -o - | FileCheck %s
; CHECK-LABEL: test1:
; CHECK: ldr {{.*, \[.*]}}, -r2
diff --git a/test/CodeGen/ARM/ldr_pre.ll b/test/CodeGen/ARM/ldr_pre.ll
index 82818272cf22..a97927a20ab1 100644
--- a/test/CodeGen/ARM/ldr_pre.ll
+++ b/test/CodeGen/ARM/ldr_pre.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=swift %s -o - | FileCheck %s
; CHECK-LABEL: test1:
; CHECK: ldr {{.*!}}
diff --git a/test/CodeGen/ARM/ldrd.ll b/test/CodeGen/ARM/ldrd.ll
index 864d18a88ae6..caef2e78bbfa 100644
--- a/test/CodeGen/ARM/ldrd.ll
+++ b/test/CodeGen/ARM/ldrd.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -regalloc=fast -optimize-regalloc=0 | FileCheck %s -check-prefix=A8
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-m3 -regalloc=fast -optimize-regalloc=0 | FileCheck %s -check-prefix=M3
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -regalloc=fast -optimize-regalloc=0 | FileCheck %s -check-prefix=A8 -check-prefix=CHECK
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-m3 -regalloc=fast -optimize-regalloc=0 | FileCheck %s -check-prefix=M3 -check-prefix=CHECK
; rdar://6949835
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -regalloc=basic | FileCheck %s -check-prefix=BASIC
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -regalloc=greedy | FileCheck %s -check-prefix=GREEDY
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -regalloc=basic | FileCheck %s -check-prefix=BASIC -check-prefix=CHECK
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -regalloc=greedy | FileCheck %s -check-prefix=GREEDY -check-prefix=CHECK
; Magic ARM pair hints works best with linearscan / fast.
diff --git a/test/CodeGen/ARM/ldstrex-m.ll b/test/CodeGen/ARM/ldstrex-m.ll
new file mode 100644
index 000000000000..b50699f4cde6
--- /dev/null
+++ b/test/CodeGen/ARM/ldstrex-m.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -mtriple=thumbv7m-none-eabi -mcpu=cortex-m4 | FileCheck %s
+
+; CHECK-LABEL: f0:
+; CHECK-NOT: ldrexd
+define i64 @f0(i64* %p) nounwind readonly {
+entry:
+ %0 = load atomic i64* %p seq_cst, align 8
+ ret i64 %0
+}
+
+; CHECK-LABEL: f1:
+; CHECK-NOT: strexd
+define void @f1(i64* %p) nounwind readonly {
+entry:
+ store atomic i64 0, i64* %p seq_cst, align 8
+ ret void
+}
+
+; CHECK-LABEL: f2:
+; CHECK-NOT: ldrexd
+; CHECK-NOT: strexd
+define i64 @f2(i64* %p) nounwind readonly {
+entry:
+ %0 = atomicrmw add i64* %p, i64 1 seq_cst
+ ret i64 %0
+}
+
+; CHECK-LABEL: f3:
+; CHECK: ldr
+define i32 @f3(i32* %p) nounwind readonly {
+entry:
+ %0 = load atomic i32* %p seq_cst, align 4
+ ret i32 %0
+}
+
+; CHECK-LABEL: f4:
+; CHECK: ldrb
+define i8 @f4(i8* %p) nounwind readonly {
+entry:
+ %0 = load atomic i8* %p seq_cst, align 4
+ ret i8 %0
+}
+
+; CHECK-LABEL: f5:
+; CHECK: str
+define void @f5(i32* %p) nounwind readonly {
+entry:
+ store atomic i32 0, i32* %p seq_cst, align 4
+ ret void
+}
+
+; CHECK-LABEL: f6:
+; CHECK: ldrex
+; CHECK: strex
+define i32 @f6(i32* %p) nounwind readonly {
+entry:
+ %0 = atomicrmw add i32* %p, i32 1 seq_cst
+ ret i32 %0
+}
diff --git a/test/CodeGen/ARM/ldstrex.ll b/test/CodeGen/ARM/ldstrex.ll
index 5eaae53da994..a40e255e83ea 100644
--- a/test/CodeGen/ARM/ldstrex.ll
+++ b/test/CodeGen/ARM/ldstrex.ll
@@ -36,17 +36,21 @@ declare i32 @llvm.arm.strexd(i32, i32, i8*) nounwind
; CHECK-LABEL: test_load_i8:
; CHECK: ldrexb r0, [r0]
; CHECK-NOT: uxtb
-define i32 @test_load_i8(i8* %addr) {
+; CHECK-NOT: and
+define zeroext i8 @test_load_i8(i8* %addr) {
%val = call i32 @llvm.arm.ldrex.p0i8(i8* %addr)
- ret i32 %val
+ %val8 = trunc i32 %val to i8
+ ret i8 %val8
}
; CHECK-LABEL: test_load_i16:
; CHECK: ldrexh r0, [r0]
; CHECK-NOT: uxth
-define i32 @test_load_i16(i16* %addr) {
+; CHECK-NOT: and
+define zeroext i16 @test_load_i16(i16* %addr) {
%val = call i32 @llvm.arm.ldrex.p0i16(i16* %addr)
- ret i32 %val
+ %val16 = trunc i32 %val to i16
+ ret i16 %val16
}
; CHECK-LABEL: test_load_i32:
@@ -137,3 +141,19 @@ define void @excl_addrmode() {
ret void
}
+
+; LLVM should know, even across basic blocks, that ldrex is setting the high
+; bits of its i32 to 0. There should be no zero-extend operation.
+define zeroext i8 @test_cross_block_zext_i8(i1 %tst, i8* %addr) {
+; CHECK: test_cross_block_zext_i8:
+; CHECK-NOT: uxtb
+; CHECK-NOT: and
+; CHECK: bx lr
+ %val = call i32 @llvm.arm.ldrex.p0i8(i8* %addr)
+ br i1 %tst, label %end, label %mid
+mid:
+ ret i8 42
+end:
+ %val8 = trunc i32 %val to i8
+ ret i8 %val8
+}
diff --git a/test/CodeGen/ARM/lit.local.cfg b/test/CodeGen/ARM/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/CodeGen/ARM/lit.local.cfg
+++ b/test/CodeGen/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/ARM/load.ll b/test/CodeGen/ARM/load.ll
index 253b0e145f81..ca16adc00822 100644
--- a/test/CodeGen/ARM/load.ll
+++ b/test/CodeGen/ARM/load.ll
@@ -1,9 +1,4 @@
-; RUN: llc < %s -march=arm > %t
-; RUN: grep ldrsb %t
-; RUN: grep ldrb %t
-; RUN: grep ldrsh %t
-; RUN: grep ldrh %t
-
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f1(i8* %p) {
entry:
@@ -32,3 +27,9 @@ entry:
%tmp4 = zext i16 %tmp to i32 ; <i32> [#uses=1]
ret i32 %tmp4
}
+
+; CHECK: ldrsb
+; CHECK: ldrb
+; CHECK: ldrsh
+; CHECK: ldrh
+
diff --git a/test/CodeGen/ARM/long-setcc.ll b/test/CodeGen/ARM/long-setcc.ll
index c76a5e4d4d1f..f09167ed9e78 100644
--- a/test/CodeGen/ARM/long-setcc.ll
+++ b/test/CodeGen/ARM/long-setcc.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm | grep cmp | count 1
-
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i1 @t1(i64 %x) {
%B = icmp slt i64 %x, 0
@@ -15,3 +14,7 @@ define i1 @t3(i32 %x) {
%tmp = icmp ugt i32 %x, -1
ret i1 %tmp
}
+
+; CHECK: cmp
+; CHECK-NOT: cmp
+
diff --git a/test/CodeGen/ARM/long.ll b/test/CodeGen/ARM/long.ll
index 7fffc81797cb..d0bff4a906e0 100644
--- a/test/CodeGen/ARM/long.ll
+++ b/test/CodeGen/ARM/long.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i64 @f1() {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/ARM/longMAC.ll b/test/CodeGen/ARM/longMAC.ll
index 2cf91c32bc1a..fed6ec02f32d 100644
--- a/test/CodeGen/ARM/longMAC.ll
+++ b/test/CodeGen/ARM/longMAC.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefix=CHECK --check-prefix=CHECK-LE
+; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s --check-prefix=CHECK-V7-LE
+; RUN: llc -mtriple=armeb-eabi %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
+; RUN: llc -mtriple=armebv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V7-BE
; Check generated signed and unsigned multiply accumulate long.
define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) {
@@ -42,3 +45,33 @@ define i64 @MACLongTest4(i32 %a, i32 %b, i32 %c) {
%add = add nsw i64 %mul, %conv2
ret i64 %add
}
+
+; Two things to check here: the @earlyclobber constraint (on <= v5) and the "$Rd = $R" ones.
+; + Without @earlyclobber the v7 code is natural. With it, the first two
+; registers must be distinct from the third.
+; + Without "$Rd = $R", this can be satisfied without a mov before the umlal
+; by trying to use 6 different registers in the MachineInstr. The natural
+; evolution of this attempt currently leaves only two movs in the final
+; function, both after the umlal. With it, *some* move has to happen
+; before the umlal.
+define i64 @MACLongTest5(i64 %c, i32 %a, i32 %b) {
+; CHECK-V7-LE-LABEL: MACLongTest5:
+; CHECK-V7-LE-LABEL: umlal r0, r1, r0, r0
+; CHECK-V7-BE-LABEL: MACLongTest5:
+; CHECK-V7-BE-LABEL: umlal r1, r0, r1, r1
+
+; CHECK-LABEL: MACLongTest5:
+; CHECK-LE: mov [[RDLO:r[0-9]+]], r0
+; CHECK-LE: umlal [[RDLO]], r1, r0, r0
+; CHECK-LE: mov r0, [[RDLO]]
+; CHECK-BE: mov [[RDLO:r[0-9]+]], r1
+; CHECK-BE: umlal [[RDLO]], r0, r1, r1
+; CHECK-BE: mov r1, [[RDLO]]
+
+ %conv.trunc = trunc i64 %c to i32
+ %conv = zext i32 %conv.trunc to i64
+ %conv1 = zext i32 %b to i64
+ %mul = mul i64 %conv, %conv
+ %add = add i64 %mul, %c
+ ret i64 %add
+}
diff --git a/test/CodeGen/ARM/long_shift.ll b/test/CodeGen/ARM/long_shift.ll
index 3e986d802d81..3ec5fa41aa6f 100644
--- a/test/CodeGen/ARM/long_shift.ll
+++ b/test/CodeGen/ARM/long_shift.ll
@@ -1,11 +1,16 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc -mtriple=armeb-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
define i64 @f0(i64 %A, i64 %B) {
; CHECK-LABEL: f0:
-; CHECK: lsrs r3, r3, #1
-; CHECK-NEXT: rrx r2, r2
-; CHECK-NEXT: subs r0, r0, r2
-; CHECK-NEXT: sbc r1, r1, r3
+; CHECK-LE: lsrs r3, r3, #1
+; CHECK-LE-NEXT: rrx r2, r2
+; CHECK-LE-NEXT: subs r0, r0, r2
+; CHECK-LE-NEXT: sbc r1, r1, r3
+; CHECK-BE: lsrs r2, r2, #1
+; CHECK-BE-NEXT: rrx r3, r3
+; CHECK-BE-NEXT: subs r1, r1, r3
+; CHECK-BE-NEXT: sbc r0, r0, r2
%tmp = bitcast i64 %A to i64
%tmp2 = lshr i64 %B, 1
%tmp3 = sub i64 %tmp, %tmp2
@@ -14,7 +19,8 @@ define i64 @f0(i64 %A, i64 %B) {
define i32 @f1(i64 %x, i64 %y) {
; CHECK-LABEL: f1:
-; CHECK: lsl{{.*}}r2
+; CHECK-LE: lsl{{.*}}r2
+; CHECK-BE: lsl{{.*}}r3
%a = shl i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
@@ -22,12 +28,20 @@ define i32 @f1(i64 %x, i64 %y) {
define i32 @f2(i64 %x, i64 %y) {
; CHECK-LABEL: f2:
-; CHECK: lsr{{.*}}r2
-; CHECK-NEXT: rsb r3, r2, #32
-; CHECK-NEXT: sub r2, r2, #32
-; CHECK-NEXT: orr r0, r0, r1, lsl r3
-; CHECK-NEXT: cmp r2, #0
-; CHECK-NEXT: asrge r0, r1, r2
+; CHECK-LE: lsr{{.*}}r2
+; CHECK-LE-NEXT: rsb r3, r2, #32
+; CHECK-LE-NEXT: sub r2, r2, #32
+; CHECK-LE-NEXT: orr r0, r0, r1, lsl r3
+; CHECK-LE-NEXT: cmp r2, #0
+; CHECK-LE-NEXT: asrge r0, r1, r2
+
+; CHECK-BE: lsr{{.*}}r3
+; CHECK-BE-NEXT: rsb r2, r3, #32
+; CHECK-BE-NEXT: orr r1, r1, r0, lsl r2
+; CHECK-BE-NEXT: sub r2, r3, #32
+; CHECK-BE-NEXT: cmp r2, #0
+; CHECK-BE-NEXT: asrge r1, r0, r2
+
%a = ashr i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
@@ -35,12 +49,20 @@ define i32 @f2(i64 %x, i64 %y) {
define i32 @f3(i64 %x, i64 %y) {
; CHECK-LABEL: f3:
-; CHECK: lsr{{.*}}r2
-; CHECK-NEXT: rsb r3, r2, #32
-; CHECK-NEXT: sub r2, r2, #32
-; CHECK-NEXT: orr r0, r0, r1, lsl r3
-; CHECK-NEXT: cmp r2, #0
-; CHECK-NEXT: lsrge r0, r1, r2
+; CHECK-LE: lsr{{.*}}r2
+; CHECK-LE-NEXT: rsb r3, r2, #32
+; CHECK-LE-NEXT: sub r2, r2, #32
+; CHECK-LE-NEXT: orr r0, r0, r1, lsl r3
+; CHECK-LE-NEXT: cmp r2, #0
+; CHECK-LE-NEXT: lsrge r0, r1, r2
+
+; CHECK-BE: lsr{{.*}}r3
+; CHECK-BE-NEXT: rsb r2, r3, #32
+; CHECK-BE-NEXT: orr r1, r1, r0, lsl r2
+; CHECK-BE-NEXT: sub r2, r3, #32
+; CHECK-BE-NEXT: cmp r2, #0
+; CHECK-BE-NEXT: lsrge r1, r0, r2
+
%a = lshr i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
diff --git a/test/CodeGen/ARM/lsr-scale-addr-mode.ll b/test/CodeGen/ARM/lsr-scale-addr-mode.ll
index 0c8d38748909..948024163ba7 100644
--- a/test/CodeGen/ARM/lsr-scale-addr-mode.ll
+++ b/test/CodeGen/ARM/lsr-scale-addr-mode.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | grep lsl | grep -F "lsl #2]"
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
; Should use scaled addressing mode.
define void @sintzero(i32* %a) nounwind {
@@ -17,3 +17,6 @@ cond_next: ; preds = %cond_next, %entry
return: ; preds = %cond_next
ret void
}
+
+; CHECK: lsl{{.*}}#2]
+
diff --git a/test/CodeGen/ARM/lsr-unfolded-offset.ll b/test/CodeGen/ARM/lsr-unfolded-offset.ll
index 26d4be2e06ff..3ad60d47b53b 100644
--- a/test/CodeGen/ARM/lsr-unfolded-offset.ll
+++ b/test/CodeGen/ARM/lsr-unfolded-offset.ll
@@ -1,10 +1,10 @@
-; RUN: llc -regalloc=greedy < %s | FileCheck %s
+; RUN: llc -regalloc=greedy -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
; LSR shouldn't introduce more induction variables than needed, increasing
; register pressure and therefore spilling. There is more room for improvement
; here.
-; CHECK: sub sp, #{{40|32|28|24}}
+; CHECK: sub sp, #{{40|36|32|28|24}}
; CHECK: %for.inc
; CHECK-NOT: ldr
diff --git a/test/CodeGen/ARM/machine-licm.ll b/test/CodeGen/ARM/machine-licm.ll
index fc9b22614d6d..ca6550178f92 100644
--- a/test/CodeGen/ARM/machine-licm.ll
+++ b/test/CodeGen/ARM/machine-licm.ll
@@ -5,20 +5,12 @@
; rdar://7354376
; rdar://8887598
-; The generated code is no where near ideal. It's not recognizing the two
-; constantpool entries being loaded can be merged into one.
-
@GV = external global i32 ; <i32*> [#uses=2]
define void @t(i32* nocapture %vals, i32 %c) nounwind {
entry:
; ARM-LABEL: t:
; ARM: ldr [[REGISTER_1:r[0-9]+]], LCPI0_0
-; Unfortunately currently ARM codegen doesn't cse the ldr from constantpool.
-; The issue is it can be read by an "add pc" or a "ldr [pc]" so it's messy
-; to add the pseudo instructions to make sure they are CSE'ed at the same
-; time as the "ldr cp".
-; ARM: ldr r{{[0-9]+}}, LCPI0_1
; ARM: LPC0_0:
; ARM: ldr r{{[0-9]+}}, [pc, [[REGISTER_1]]]
; ARM: ldr r{{[0-9]+}}, [r{{[0-9]+}}]
@@ -36,7 +28,7 @@ entry:
bb.nph: ; preds = %entry
; ARM: LCPI0_0:
-; ARM: LCPI0_1:
+; ARM-NOT: LCPI0_1:
; ARM: .section
; THUMB: BB#1
diff --git a/test/CodeGen/ARM/mature-mc-support.ll b/test/CodeGen/ARM/mature-mc-support.ll
new file mode 100644
index 000000000000..0a7e5b91adc5
--- /dev/null
+++ b/test/CodeGen/ARM/mature-mc-support.ll
@@ -0,0 +1,12 @@
+; Test that inline assembly is parsed by the MC layer when MC support is mature
+; (even when the output is assembly).
+
+; RUN: not llc -mtriple=arm-pc-linux < %s > /dev/null 2> %t1
+; RUN: FileCheck %s < %t1
+
+; RUN: not llc -mtriple=arm-pc-linux -filetype=obj < %s > /dev/null 2> %t2
+; RUN: FileCheck %s < %t2
+
+module asm " .this_directive_is_very_unlikely_to_exist"
+
+; CHECK: LLVM ERROR: Error parsing inline asm
diff --git a/test/CodeGen/ARM/mem.ll b/test/CodeGen/ARM/mem.ll
index f46c7a5857ab..3c9cd913add6 100644
--- a/test/CodeGen/ARM/mem.ll
+++ b/test/CodeGen/ARM/mem.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm | grep strb
-; RUN: llc < %s -march=arm | grep strh
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define void @f1() {
entry:
@@ -7,8 +6,13 @@ entry:
ret void
}
+; CHECK: strb
+
define void @f2() {
entry:
store i16 0, i16* null
ret void
}
+
+; CHECK: strh
+
diff --git a/test/CodeGen/ARM/memcpy-inline.ll b/test/CodeGen/ARM/memcpy-inline.ll
index 946c63ed40c8..84ce4a7f0e79 100644
--- a/test/CodeGen/ARM/memcpy-inline.ll
+++ b/test/CodeGen/ARM/memcpy-inline.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -pre-RA-sched=source -disable-post-ra | FileCheck %s
-
+; RUN: llc < %s -mtriple=thumbv6m-apple-ios -mcpu=cortex-m0 -pre-RA-sched=source -disable-post-ra | FileCheck %s -check-prefix=CHECK-T1
%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
@src = external global %struct.x
@@ -17,7 +17,12 @@ define i32 @t0() {
entry:
; CHECK-LABEL: t0:
; CHECK: vldr [[REG1:d[0-9]+]],
-; CHECK: vstr [[REG1]],
+; CHECK: vstr [[REG1]],
+; CHECK-T1-LABEL: t0:
+; CHECK-T1: ldrb [[TREG1:r[0-9]]],
+; CHECK-T1: strb [[TREG1]],
+; CHECK-T1: ldrh [[TREG2:r[0-9]]],
+; CHECK-T1: strh [[TREG2]]
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.x* @dst, i32 0, i32 0), i8* getelementptr inbounds (%struct.x* @src, i32 0, i32 0), i32 11, i32 8, i1 false)
ret i32 0
}
@@ -38,7 +43,8 @@ entry:
define void @t2(i8* nocapture %C) nounwind {
entry:
; CHECK-LABEL: t2:
-; CHECK: ldr [[REG2:r[0-9]+]], [r1, #32]
+; CHECK: movw [[REG2:r[0-9]+]], #16716
+; CHECK: movt [[REG2:r[0-9]+]], #72
; CHECK: str [[REG2]], [r0, #32]
; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
@@ -79,8 +85,14 @@ entry:
; CHECK: strb [[REG5]], [r0, #6]
; CHECK: movw [[REG6:r[0-9]+]], #21587
; CHECK: strh [[REG6]], [r0, #4]
-; CHECK: ldr [[REG7:r[0-9]+]],
+; CHECK: movw [[REG7:r[0-9]+]], #18500
+; CHECK: movt [[REG7:r[0-9]+]], #22866
; CHECK: str [[REG7]]
+; CHECK-T1-LABEL: t5:
+; CHECK-T1: movs [[TREG3:r[0-9]]],
+; CHECK-T1: strb [[TREG3]],
+; CHECK-T1: movs [[TREG4:r[0-9]]],
+; CHECK-T1: strb [[TREG4]],
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8]* @.str5, i64 0, i64 0), i64 7, i32 1, i1 false)
ret void
}
@@ -88,12 +100,17 @@ entry:
define void @t6() nounwind {
entry:
; CHECK-LABEL: t6:
-; CHECK: vld1.8 {[[REG8:d[0-9]+]]}, [r0]
-; CHECK: vstr [[REG8]], [r1]
+; CHECK: vld1.8 {[[REG9:d[0-9]+]]}, [r0]
+; CHECK: vstr [[REG9]], [r1]
; CHECK: adds r1, #6
; CHECK: adds r0, #6
; CHECK: vld1.8
; CHECK: vst1.16
+; CHECK-T1-LABEL: t6:
+; CHECK-T1: movs [[TREG5:r[0-9]]],
+; CHECK-T1: strh [[TREG5]],
+; CHECK-T1: ldr [[TREG6:r[0-9]]],
+; CHECK-T1: str [[TREG6]]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8]* @.str6, i64 0, i64 0), i64 14, i32 1, i1 false)
ret void
}
@@ -102,9 +119,12 @@ entry:
define void @t7(%struct.Foo* nocapture %a, %struct.Foo* nocapture %b) nounwind {
entry:
-; CHECK: t7
+; CHECK-LABEL: t7:
; CHECK: vld1.32
; CHECK: vst1.32
+; CHECK-T1-LABEL: t7:
+; CHECK-T1: ldr
+; CHECK-T1: str
%0 = bitcast %struct.Foo* %a to i8*
%1 = bitcast %struct.Foo* %b to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 16, i32 4, i1 false)
diff --git a/test/CodeGen/ARM/memfunc.ll b/test/CodeGen/ARM/memfunc.ll
index fe0056c42a11..8d3800b43c14 100644
--- a/test/CodeGen/ARM/memfunc.ll
+++ b/test/CodeGen/ARM/memfunc.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -mtriple=armv7-apple-ios -o - | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7m-darwin-eabi -o - | FileCheck %s --check-prefix=DARWIN
+; RUN: llc < %s -mtriple=thumbv7m-none-macho -o - | FileCheck %s --check-prefix=DARWIN
; RUN: llc < %s -mtriple=arm-none-eabi -o - | FileCheck --check-prefix=EABI %s
+; RUN: llc < %s -mtriple=arm-none-eabihf -o - | FileCheck --check-prefix=EABI %s
@from = common global [500 x i32] zeroinitializer, align 4
@to = common global [500 x i32] zeroinitializer, align 4
diff --git a/test/CodeGen/ARM/metadata-default.ll b/test/CodeGen/ARM/metadata-default.ll
new file mode 100644
index 000000000000..f6a3fe289cc1
--- /dev/null
+++ b/test/CodeGen/ARM/metadata-default.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7--none-eabi"
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = metadata !{i32 1, metadata !"wchar_size", i32 4}
+!1 = metadata !{i32 1, metadata !"min_enum_size", i32 4}
+
+; CHECK: .eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
+; CHECK: .eabi_attribute 26, 2 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/metadata-short-enums.ll b/test/CodeGen/ARM/metadata-short-enums.ll
new file mode 100644
index 000000000000..bccd3327e5b5
--- /dev/null
+++ b/test/CodeGen/ARM/metadata-short-enums.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7--none-eabi"
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = metadata !{i32 1, metadata !"wchar_size", i32 4}
+!1 = metadata !{i32 1, metadata !"min_enum_size", i32 1}
+
+; CHECK: .eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
+; CHECK: .eabi_attribute 26, 1 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/metadata-short-wchar.ll b/test/CodeGen/ARM/metadata-short-wchar.ll
new file mode 100644
index 000000000000..6de9bf174317
--- /dev/null
+++ b/test/CodeGen/ARM/metadata-short-wchar.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
+target triple = "armv7--none-eabi"
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = metadata !{i32 1, metadata !"wchar_size", i32 2}
+!1 = metadata !{i32 1, metadata !"min_enum_size", i32 4}
+
+; CHECK: .eabi_attribute 18, 2 @ Tag_ABI_PCS_wchar_t
+; CHECK: .eabi_attribute 26, 2 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/minsize-imms.ll b/test/CodeGen/ARM/minsize-imms.ll
new file mode 100644
index 000000000000..4c8ff393a408
--- /dev/null
+++ b/test/CodeGen/ARM/minsize-imms.ll
@@ -0,0 +1,57 @@
+; RUN: llc -mtriple=thumbv7m-macho -o - -show-mc-encoding %s | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-macho -o - -show-mc-encoding %s | FileCheck %s --check-prefix=CHECK-V6M
+; RUN: llc -mtriple=armv6-macho -o - -show-mc-encoding %s | FileCheck %s --check-prefix=CHECK-ARM
+define i32 @test_mov() minsize {
+; CHECK-LABEL: test_mov:
+; CHECK: movs r0, #255 @ encoding: [0xff,0x20]
+
+ ret i32 255
+}
+
+define i32 @test_mov_mvn() minsize {
+; CHECK-LABEL: test_mov_mvn:
+; CHECK: mvn r0, #203 @ encoding: [0x6f,0xf0,0xcb,0x00]
+
+; CHECK-V6M-LABEL: test_mov_mvn:
+; CHECK-V6M: movs [[TMP:r[0-7]]], #203 @ encoding: [0xcb,0x20]
+; CHECK-V6M: mvns r0, [[TMP]] @ encoding: [0xc0,0x43]
+
+; CHECK-ARM-LABEL: test_mov_mvn:
+; CHECK-ARM: mvn r0, #203 @ encoding: [0xcb,0x00,0xe0,0xe3]
+ ret i32 4294967092
+}
+
+define i32 @test_mov_lsl() minsize {
+; CHECK-LABEL: test_mov_lsl:
+; CHECK: mov.w r0, #589824 @ encoding: [0x4f,0xf4,0x10,0x20]
+
+; CHECK-V6M-LABEL: test_mov_lsl:
+; CHECK-V6M: movs [[TMP:r[0-7]]], #9 @ encoding: [0x09,0x20]
+; CHECK-V6M: lsls r0, [[TMP]], #16 @ encoding: [0x00,0x04]
+
+; CHECK-ARM-LABEL: test_mov_lsl:
+; CHECK-ARM: mov r0, #589824 @ encoding: [0x09,0x08,0xa0,0xe3]
+ ret i32 589824
+}
+
+define i32 @test_movw() minsize {
+; CHECK-LABEL: test_movw:
+; CHECK: movw r0, #65535
+
+; CHECK-V6M-LABEL: test_movw:
+; CHECK-V6M: ldr r0, [[CONSTPOOL:LCPI[0-9]+_[0-9]+]] @ encoding: [A,0x48]
+; CHECK-V6M: [[CONSTPOOL]]:
+; CHECK-V6M-NEXT: .long 65535
+
+; CHECK-ARM-LABEL: test_movw:
+; CHECK-ARM: mov r0, #255 @ encoding: [0xff,0x00,0xa0,0xe3]
+; CHECK-ARM: orr r0, r0, #65280 @ encoding: [0xff,0x0c,0x80,0xe3]
+ ret i32 65535
+}
+
+define i32 @test_regress1() {
+; CHECK-ARM-LABEL: test_regress1:
+; CHECK-ARM: mov r0, #248 @ encoding: [0xf8,0x00,0xa0,0xe3]
+; CHECK-ARM: orr r0, r0, #16252928 @ encoding: [0x3e,0x07,0x80,0xe3]
+ ret i32 16253176
+}
diff --git a/test/CodeGen/ARM/minsize-litpools.ll b/test/CodeGen/ARM/minsize-litpools.ll
new file mode 100644
index 000000000000..d5cd2a9b72e1
--- /dev/null
+++ b/test/CodeGen/ARM/minsize-litpools.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=thumbv7s %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv7s %s -o - | FileCheck %s
+
+; CodeGen should be able to set and reset the MinSize subtarget-feature, and
+; make use of it in deciding whether to use MOVW/MOVT for global variables or a
+; lit-pool load (saving roughly 2 bytes of code).
+
+@var = global i32 0
+
+define i32 @small_global() minsize {
+; CHECK-LABEL: small_global:
+; CHECK: ldr r[[GLOBDEST:[0-9]+]], {{.?LCPI0_0}}
+; CHECK: ldr r0, [r[[GLOBDEST]]]
+
+ %val = load i32* @var
+ ret i32 %val
+}
+
+define i32 @big_global() {
+; CHECK-LABEL: big_global:
+; CHECK: movw [[GLOBDEST:r[0-9]+]], :lower16:var
+; CHECK: movt [[GLOBDEST]], :upper16:var
+
+ %val = load i32* @var
+ ret i32 %val
+}
diff --git a/test/CodeGen/ARM/misched-copy-arm.ll b/test/CodeGen/ARM/misched-copy-arm.ll
index 5da335fa2030..bb2d42ca9ede 100644
--- a/test/CodeGen/ARM/misched-copy-arm.ll
+++ b/test/CodeGen/ARM/misched-copy-arm.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc < %s -march=thumb -mcpu=swift -pre-RA-sched=source -join-globalcopies -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=swift -pre-RA-sched=source -join-globalcopies -enable-misched -verify-misched -debug-only=misched -arm-atomic-cfg-tidy=0 %s -o - 2>&1 | FileCheck %s
;
; Loop counter copies should be eliminated.
; There is also a MUL here, but we don't care where it is scheduled.
diff --git a/test/CodeGen/ARM/mls.ll b/test/CodeGen/ARM/mls.ll
index 8f0d3a89a30a..6776e631f1da 100644
--- a/test/CodeGen/ARM/mls.ll
+++ b/test/CodeGen/ARM/mls.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+v6t2 -arm-use-mulops=false | FileCheck %s -check-prefix=NO_MULOPS
+; RUN: llc -mtriple=arm-eabi -mattr=+v6t2 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6t2 -arm-use-mulops=false %s -o - \
+; RUN: | FileCheck %s -check-prefix=NO_MULOPS
define i32 @f1(i32 %a, i32 %b, i32 %c) {
%tmp1 = mul i32 %a, %b
diff --git a/test/CodeGen/ARM/movt-movw-global.ll b/test/CodeGen/ARM/movt-movw-global.ll
index bbedea19d780..1e10af181f30 100644
--- a/test/CodeGen/ARM/movt-movw-global.ll
+++ b/test/CodeGen/ARM/movt-movw-global.ll
@@ -16,8 +16,8 @@ entry:
; IOS-PIC: movw r0, :lower16:(L_foo$non_lazy_ptr-(LPC0_0+8))
; IOS-PIC-NEXT: movt r0, :upper16:(L_foo$non_lazy_ptr-(LPC0_0+8))
-; IOS-STATIC-NOT: movw r0, :lower16:_foo
-; IOS-STATIC-NOT: movt r0, :upper16:_foo
+; IOS-STATIC: movw r0, :lower16:_foo
+; IOS-STATIC-NEXT: movt r0, :upper16:_foo
ret i32* @foo
}
@@ -32,8 +32,8 @@ entry:
; IOS-PIC: movw r1, :lower16:(L_foo$non_lazy_ptr-(LPC1_0+8))
; IOS-PIC-NEXT: movt r1, :upper16:(L_foo$non_lazy_ptr-(LPC1_0+8))
-; IOS-STATIC-NOT: movw r1, :lower16:_foo
-; IOS-STATIC-NOT: movt r1, :upper16:_foo
+; IOS-STATIC: movw r1, :lower16:_foo
+; IOS-STATIC-NEXT: movt r1, :upper16:_foo
store i32 %baz, i32* @foo, align 4
ret void
}
diff --git a/test/CodeGen/ARM/movt.ll b/test/CodeGen/ARM/movt.ll
index 25c1bfe32044..94c022ee2712 100644
--- a/test/CodeGen/ARM/movt.ll
+++ b/test/CodeGen/ARM/movt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; rdar://7317664
define i32 @t(i32 %X) nounwind {
diff --git a/test/CodeGen/ARM/mul.ll b/test/CodeGen/ARM/mul.ll
index 466a8020acce..5e150b00172c 100644
--- a/test/CodeGen/ARM/mul.ll
+++ b/test/CodeGen/ARM/mul.ll
@@ -1,11 +1,12 @@
-; RUN: llc < %s -march=arm | grep mul | count 2
-; RUN: llc < %s -march=arm | grep lsl | count 2
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f1(i32 %u) {
%tmp = mul i32 %u, %u
ret i32 %tmp
}
+; CHECK: mul
+
define i32 @f2(i32 %u, i32 %v) {
%tmp = mul i32 %u, %v
ret i32 %tmp
@@ -16,7 +17,16 @@ define i32 @f3(i32 %u) {
ret i32 %tmp
}
+; CHECK: mul
+; CHECK: lsl
+
define i32 @f4(i32 %u) {
%tmp = mul i32 %u, 4
ret i32 %tmp
}
+
+; CHECK-NOT: mul
+
+; CHECK: lsl
+; CHECK-NOT: lsl
+
diff --git a/test/CodeGen/ARM/mul_const.ll b/test/CodeGen/ARM/mul_const.ll
index 482d8f2888ce..ada3d4e5b9c4 100644
--- a/test/CodeGen/ARM/mul_const.ll
+++ b/test/CodeGen/ARM/mul_const.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @t9(i32 %v) nounwind readnone {
entry:
diff --git a/test/CodeGen/ARM/mulhi.ll b/test/CodeGen/ARM/mulhi.ll
index 63705c502779..c66a804808fd 100644
--- a/test/CodeGen/ARM/mulhi.ll
+++ b/test/CodeGen/ARM/mulhi.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s -check-prefix=V6
-; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=V4
-; RUN: llc < %s -march=thumb -mcpu=cortex-m3 | FileCheck %s -check-prefix=M3
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s -check-prefix=V6
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefix=V4
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s -check-prefix=M3
define i32 @smulhi(i32 %x, i32 %y) nounwind {
; V6-LABEL: smulhi:
diff --git a/test/CodeGen/ARM/mult-alt-generic-arm.ll b/test/CodeGen/ARM/mult-alt-generic-arm.ll
index a8104db337f5..05e9b0facd6c 100644
--- a/test/CodeGen/ARM/mult-alt-generic-arm.ll
+++ b/test/CodeGen/ARM/mult-alt-generic-arm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc < %s -march=arm -no-integrated-as
; ModuleID = 'mult-alt-generic.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32"
target triple = "arm"
diff --git a/test/CodeGen/ARM/mvn.ll b/test/CodeGen/ARM/mvn.ll
index 2c5ccd7442e0..e40ab1ec656b 100644
--- a/test/CodeGen/ARM/mvn.ll
+++ b/test/CodeGen/ARM/mvn.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | grep mvn | count 9
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f1() {
entry:
@@ -72,3 +72,17 @@ entry:
%tmp102 = icmp eq i32 -2, %a ; <i1> [#uses=1]
ret i1 %tmp102
}
+
+; CHECK-LABEL: mvn.ll
+; CHECK-LABEL: @f1
+; CHECK: mvn
+; CHECK: mvn
+; CHECK: mvn
+; CHECK: mvn
+; CHECK: mvn
+; CHECK: mvn
+; CHECK: mvn
+; CHECK: mvn
+; CHECK: mvn
+; CHECK-NOT: mvn
+
diff --git a/test/CodeGen/ARM/named-reg-alloc.ll b/test/CodeGen/ARM/named-reg-alloc.ll
new file mode 100644
index 000000000000..3c27d2244e3c
--- /dev/null
+++ b/test/CodeGen/ARM/named-reg-alloc.ll
@@ -0,0 +1,14 @@
+; RUN: not llc < %s -mtriple=arm-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=arm-linux-gnueabi 2>&1 | FileCheck %s
+
+define i32 @get_stack() nounwind {
+entry:
+; FIXME: Include an allocatable-specific error message
+; CHECK: Invalid register name global variable
+ %sp = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %sp
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"r5\00"}
diff --git a/test/CodeGen/ARM/named-reg-notareg.ll b/test/CodeGen/ARM/named-reg-notareg.ll
new file mode 100644
index 000000000000..af38b609b404
--- /dev/null
+++ b/test/CodeGen/ARM/named-reg-notareg.ll
@@ -0,0 +1,13 @@
+; RUN: not llc < %s -mtriple=arm-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=arm-linux-gnueabi 2>&1 | FileCheck %s
+
+define i32 @get_stack() nounwind {
+entry:
+; CHECK: Invalid register name global variable
+ %sp = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %sp
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"notareg\00"}
diff --git a/test/CodeGen/ARM/neon_arith1.ll b/test/CodeGen/ARM/neon_arith1.ll
index 58927374177a..42e7d82c51d7 100644
--- a/test/CodeGen/ARM/neon_arith1.ll
+++ b/test/CodeGen/ARM/neon_arith1.ll
@@ -1,7 +1,10 @@
-; RUN: llc < %s -march=arm -mattr=+neon | grep vadd
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @t_i8x8(<8 x i8> %a, <8 x i8> %b) nounwind {
entry:
%0 = add <8 x i8> %a, %b
ret <8 x i8> %0
}
+
+; CHECK: vadd
+
diff --git a/test/CodeGen/ARM/neon_cmp.ll b/test/CodeGen/ARM/neon_cmp.ll
index 046b5da22899..e1662c43c647 100644
--- a/test/CodeGen/ARM/neon_cmp.ll
+++ b/test/CodeGen/ARM/neon_cmp.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s
+
; bug 15283
; radar://13191881
; CHECK: vfcmp
diff --git a/test/CodeGen/ARM/neon_div.ll b/test/CodeGen/ARM/neon_div.ll
index 4a82c36676f2..4f1607ed5baf 100644
--- a/test/CodeGen/ARM/neon_div.ll
+++ b/test/CodeGen/ARM/neon_div.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+neon -pre-RA-sched=source -disable-post-ra | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -pre-RA-sched=source -disable-post-ra %s -o - \
+; RUN: | FileCheck %s
define <8 x i8> @sdivi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: vrecpe.f32
diff --git a/test/CodeGen/ARM/neon_fpconv.ll b/test/CodeGen/ARM/neon_fpconv.ll
index 149f4c777003..8e37ce778182 100644
--- a/test/CodeGen/ARM/neon_fpconv.ll
+++ b/test/CodeGen/ARM/neon_fpconv.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; PR12540: ARM backend lowering of FP_ROUND v2f64 to v2f32.
define <2 x float> @vtrunc(<2 x double> %a) {
diff --git a/test/CodeGen/ARM/neon_ld1.ll b/test/CodeGen/ARM/neon_ld1.ll
index b892d2db67d6..9fd3fc5f341a 100644
--- a/test/CodeGen/ARM/neon_ld1.ll
+++ b/test/CodeGen/ARM/neon_ld1.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s
; CHECK: t1
; CHECK: vldr d
diff --git a/test/CodeGen/ARM/neon_ld2.ll b/test/CodeGen/ARM/neon_ld2.ll
index 25a670b09778..571a16a061df 100644
--- a/test/CodeGen/ARM/neon_ld2.ll
+++ b/test/CodeGen/ARM/neon_ld2.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s --check-prefix=SWIFT
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mcpu=swift %s -o - | FileCheck %s --check-prefix=SWIFT
; CHECK: t1
; CHECK: vld1.64
diff --git a/test/CodeGen/ARM/neon_minmax.ll b/test/CodeGen/ARM/neon_minmax.ll
index 2e45919e7790..84e4b303c16d 100644
--- a/test/CodeGen/ARM/neon_minmax.ll
+++ b/test/CodeGen/ARM/neon_minmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=swift %s -o - | FileCheck %s
define float @fmin_ole(float %x) nounwind {
;CHECK-LABEL: fmin_ole:
diff --git a/test/CodeGen/ARM/neon_shift.ll b/test/CodeGen/ARM/neon_shift.ll
index 340f220fb362..3c09358cf138 100644
--- a/test/CodeGen/ARM/neon_shift.ll
+++ b/test/CodeGen/ARM/neon_shift.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; <rdar://problem/9055897>
define <4 x i16> @t1(<4 x i32> %a) nounwind {
diff --git a/test/CodeGen/ARM/neon_vabs.ll b/test/CodeGen/ARM/neon_vabs.ll
index 76b604423986..7a02512198be 100644
--- a/test/CodeGen/ARM/neon_vabs.ll
+++ b/test/CodeGen/ARM/neon_vabs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <4 x i32> @test1(<4 x i32> %a) nounwind {
; CHECK-LABEL: test1:
diff --git a/test/CodeGen/ARM/none-macho.ll b/test/CodeGen/ARM/none-macho.ll
new file mode 100644
index 000000000000..60c21716dc35
--- /dev/null
+++ b/test/CodeGen/ARM/none-macho.ll
@@ -0,0 +1,99 @@
+; RUN: llc -mtriple=thumbv7m-none-macho %s -o - -relocation-model=pic -disable-fp-elim | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NON-FAST
+; RUN: llc -mtriple=thumbv7m-none-macho -O0 %s -o - -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc -mtriple=thumbv7m-none-macho -filetype=obj %s -o /dev/null
+
+ ; Bare-metal should probably "declare" segments just like normal MachO
+; CHECK: __picsymbolstub4
+; CHECK: __StaticInit
+; CHECK: __text
+
+@var = external global i32
+
+define i32 @test_litpool() minsize {
+; CHECK-LABEL: test_litpool:
+ %val = load i32* @var
+ ret i32 %val
+
+ ; Lit-pool entries need to produce a "$non_lazy_ptr" version of the symbol.
+; CHECK: LCPI0_0:
+; CHECK-NEXT: .long L_var$non_lazy_ptr-(LPC0_0+4)
+}
+
+define i32 @test_movw_movt() {
+; CHECK-LABEL: test_movw_movt:
+ %val = load i32* @var
+ ret i32 %val
+
+ ; movw/movt should also address their symbols MachO-style
+; CHECK: movw [[RTMP:r[0-9]+]], :lower16:(L_var$non_lazy_ptr-(LPC1_0+4))
+; CHECK: movt [[RTMP]], :upper16:(L_var$non_lazy_ptr-(LPC1_0+4))
+; CHECK: LPC1_0:
+; CHECK: add [[RTMP]], pc
+}
+
+declare void @llvm.trap()
+
+define void @test_trap() {
+; CHECK-LABEL: test_trap:
+
+ ; Bare-metal MachO gets compiled on top of normal MachO toolchain which
+ ; understands trap natively.
+ call void @llvm.trap()
+; CHECK: trap
+
+ ret void
+}
+
+define i32 @test_frame_ptr() {
+; CHECK-LABEL: test_frame_ptr:
+ call void @test_trap()
+
+ ; Frame pointer is r11.
+; CHECK: mov r11, sp
+ ret i32 42
+}
+
+%big_arr = type [8 x i32]
+define void @test_two_areas(%big_arr* %addr) {
+; CHECK-LABEL: test_two_areas:
+ %val = load %big_arr* %addr
+ call void @test_trap()
+ store %big_arr %val, %big_arr* %addr
+
+ ; This goes with the choice of r7 as FP (largely). FP and LR have to be stored
+ ; consecutively on the stack for the frame record to be valid, which means we
+ ; need the 2 register-save areas employed by iOS.
+; CHECK-NON-FAST: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ...
+; CHECK-NON-FAST: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ ret void
+}
+
+define void @test_tail_call() {
+; CHECK-LABEL: test_tail_call:
+ tail call void @test_trap()
+
+ ; Tail calls should be available and use Thumb2 branch.
+; CHECK: b.w _test_trap
+ ret void
+}
+
+define float @test_softfloat_calls(float %in) {
+; CHECK-LABEL: test_softfloat_calls:
+ %sum = fadd float %in, %in
+
+ ; Soft-float calls should be GNU-style rather than RTABI and should not be the
+ ; *vfp variants used for ARMv6 iOS.
+; CHECK: blx ___addsf3{{$}}
+ ret float %sum
+}
+
+ ; Even bare-metal PIC needs GOT-like behaviour, in principle. Depends a bit on
+ ; the use-case of course, but LLVM doesn't know what that is.
+; CHECK: non_lazy_symbol_pointers
+; CHECK: L_var$non_lazy_ptr:
+; CHECK-NEXT: .indirect_symbol _var
+
+ ; All MachO objects should have this to give the linker leeway in removing
+ ; dead code.
+; CHECK: .subsections_via_symbols
diff --git a/test/CodeGen/ARM/noreturn.ll b/test/CodeGen/ARM/noreturn.ll
index 4c876cec9c10..edc3333455d2 100644
--- a/test/CodeGen/ARM/noreturn.ll
+++ b/test/CodeGen/ARM/noreturn.ll
@@ -43,6 +43,23 @@ entry:
unreachable
}
+; Test case for uwtable
+define i32 @test4() uwtable {
+; CHECK-LABEL: @test4
+; CHECK: push
+entry:
+ tail call void @overflow() #0
+ unreachable
+}
+
+define i32 @test5() uwtable {
+; CHECK-LABEL: @test5
+; CHECK: push
+entry:
+ tail call void @overflow_with_unwind() #1
+ unreachable
+}
+
; Function Attrs: noreturn
declare void @overflow_with_unwind() #1
diff --git a/test/CodeGen/ARM/null-streamer.ll b/test/CodeGen/ARM/null-streamer.ll
new file mode 100644
index 000000000000..350c45e5bebe
--- /dev/null
+++ b/test/CodeGen/ARM/null-streamer.ll
@@ -0,0 +1,7 @@
+; Test the null streamer with a terget streamer.
+; RUN: llc -O0 -filetype=null -mtriple=arm-linux < %s
+
+define i32 @main() {
+entry:
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/optimize-dmbs-v7.ll b/test/CodeGen/ARM/optimize-dmbs-v7.ll
new file mode 100644
index 000000000000..64f5e202d36a
--- /dev/null
+++ b/test/CodeGen/ARM/optimize-dmbs-v7.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s -mtriple=armv7 -mattr=+db | FileCheck %s
+
+@x1 = global i32 0, align 4
+@x2 = global i32 0, align 4
+
+define void @test() {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.013 = phi i32 [ 1, %entry ], [ %inc6, %for.body ]
+ store atomic i32 %i.013, i32* @x1 seq_cst, align 4
+ store atomic i32 %i.013, i32* @x1 seq_cst, align 4
+ store atomic i32 %i.013, i32* @x2 seq_cst, align 4
+ %inc6 = add nsw i32 %i.013, 1
+ %exitcond = icmp eq i32 %inc6, 2
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+
+; The for.body contains 3 seq_cst stores.
+; Hence it should have 3 dmb;str;dmb sequences with the middle dmbs collapsed
+; CHECK: %for.body
+; CHECK-NOT: str
+; CHECK: dmb
+; CHECK-NOT: dmb
+; CHECK: str
+
+; CHECK-NOT: str
+; CHECK: dmb
+; CHECK-NOT: dmb
+; CHECK: str
+
+; CHECK-NOT: str
+; CHECK: dmb
+; CHECK-NOT: dmb
+; CHECK: str
+
+; CHECK-NOT: str
+; CHECK: dmb
+; CHECK-NOT: dmb
+; CHECK-NOT: str
+; CHECK: %for.end
+}
+
+define void @test2() {
+ call void @llvm.arm.dmb(i32 11)
+ tail call void @test()
+ call void @llvm.arm.dmb(i32 11)
+ ret void
+; the call should prevent the two dmbs from collapsing
+; CHECK: test2:
+; CHECK: dmb
+; CHECK-NEXT: bl
+; CHECK-NEXT: dmb
+}
+
+define void @test3() {
+ call void @llvm.arm.dmb(i32 11)
+ call void @llvm.arm.dsb(i32 9)
+ call void @llvm.arm.dmb(i32 11)
+ ret void
+; the call should prevent the two dmbs from collapsing
+; CHECK: test3:
+; CHECK: dmb
+; CHECK-NEXT: dsb
+; CHECK-NEXT: dmb
+
+}
+
+
+declare void @llvm.arm.dmb(i32)
+declare void @llvm.arm.dsb(i32)
diff --git a/test/CodeGen/ARM/optselect-regclass.ll b/test/CodeGen/ARM/optselect-regclass.ll
index 1aa452089646..0acb2f2f0ecc 100644
--- a/test/CodeGen/ARM/optselect-regclass.ll
+++ b/test/CodeGen/ARM/optselect-regclass.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm -mcpu=swift -verify-machineinstrs
+; RUN: llc -mtriple=arm-eabi -mcpu=swift -verify-machineinstrs %s -o /dev/null
+
%union.opcode.0.2.5.8.15.28 = type { i32 }
@opcode = external global %union.opcode.0.2.5.8.15.28, align 4
diff --git a/test/CodeGen/ARM/out-of-registers.ll b/test/CodeGen/ARM/out-of-registers.ll
new file mode 100644
index 000000000000..790e4165d4c6
--- /dev/null
+++ b/test/CodeGen/ARM/out-of-registers.ll
@@ -0,0 +1,42 @@
+; RUN: llc -O3 %s -o - | FileCheck %s
+; ModuleID = 'fo.c'
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:32-n8:16:32-S64"
+target triple = "thumbv7-none-linux-gnueabi"
+
+; CHECK: vpush
+; CHECK: vpop
+
+define void @foo(float* nocapture %A) #0 {
+ %1= bitcast float* %A to i8*
+ %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8* %1, i32 4)
+ %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0
+ %divp_vec = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %3
+ %4 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 1
+ %div3p_vec = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %4
+ %5 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 2
+ %div8p_vec = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %5
+ %6 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 3
+ %div13p_vec = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %6
+ tail call void @llvm.arm.neon.vst4.v4f32(i8* %1, <4 x float> %divp_vec, <4 x float> %div3p_vec, <4 x float> %div8p_vec, <4 x float> %div13p_vec, i32 4)
+ ret void
+}
+
+; Function Attrs: nounwind
+declare i32 @llvm.annotation.i32(i32, i8*, i8*, i32) #1
+
+; Function Attrs: nounwind readonly
+
+; Function Attrs: nounwind
+declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) #1
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8*, i32) #2
+
+; Function Attrs: nounwind
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind readonly }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"Snapdragon LLVM ARM Compiler 3.4"}
+!1 = metadata !{metadata !1}
diff --git a/test/CodeGen/ARM/pack.ll b/test/CodeGen/ARM/pack.ll
index fbc115518f88..89abe28b0f54 100644
--- a/test/CodeGen/ARM/pack.ll
+++ b/test/CodeGen/ARM/pack.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
; CHECK: test1
; CHECK: pkhbt r0, r0, r1, lsl #16
diff --git a/test/CodeGen/ARM/phi.ll b/test/CodeGen/ARM/phi.ll
index dc1a95b0b78a..5a8f623e6f50 100644
--- a/test/CodeGen/ARM/phi.ll
+++ b/test/CodeGen/ARM/phi.ll
@@ -1,4 +1,6 @@
-; RUN: llc -march=arm -mattr=+v4t < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t -addr-sink-using-gep=1 %s -o - | FileCheck %s
+
; <rdar://problem/8686347>
define i32 @test1(i1 %a, i32* %b) {
diff --git a/test/CodeGen/ARM/popcnt.ll b/test/CodeGen/ARM/popcnt.ll
index bdf793d91b0a..7ace640c6b61 100644
--- a/test/CodeGen/ARM/popcnt.ll
+++ b/test/CodeGen/ARM/popcnt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; Implement ctpop with vcnt
define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
diff --git a/test/CodeGen/ARM/prefetch-thumb.ll b/test/CodeGen/ARM/prefetch-thumb.ll
deleted file mode 100644
index e6f6ae8d18b2..000000000000
--- a/test/CodeGen/ARM/prefetch-thumb.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+v7 | FileCheck %s -check-prefix=THUMB2
-; TODO: This test case will be merged back into prefetch.ll when ARM mode issue is solved.
-
-declare void @llvm.prefetch(i8*, i32, i32, i32) nounwind
-
-define void @t6() {
-entry:
-;ARM: t6:
-;ARM: pld [sp]
-;ARM: pld [sp, #50]
-
-;THUMB2: t6:
-;THUMB2: pld [sp]
-;THUMB2: pld [sp, #50]
-
-%red = alloca [100 x i8], align 1
-%0 = getelementptr inbounds [100 x i8]* %red, i32 0, i32 0
-%1 = getelementptr inbounds [100 x i8]* %red, i32 0, i32 50
-call void @llvm.prefetch(i8* %0, i32 0, i32 3, i32 1)
-call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
-ret void
-}
diff --git a/test/CodeGen/ARM/prefetch.ll b/test/CodeGen/ARM/prefetch.ll
index 5badb3114814..7350e0a90d89 100644
--- a/test/CodeGen/ARM/prefetch.ll
+++ b/test/CodeGen/ARM/prefetch.ll
@@ -1,9 +1,11 @@
-; RUN: llc < %s -march=thumb -mattr=-thumb2 | not grep pld
-; RUN: llc < %s -march=thumb -mattr=+v7 | FileCheck %s -check-prefix=THUMB2
-; RUN: llc < %s -march=arm -mattr=+v7 | FileCheck %s -check-prefix=ARM
-; RUN: llc < %s -march=arm -mcpu=cortex-a9-mp | FileCheck %s -check-prefix=ARM-MP
+; RUN: llc -mtriple=thumb-eabi -mattr=-thumb2 %s -o - | FileCheck %s -check-prefix CHECK-T1
+; RUN: llc -mtriple=thumb-eabi -mattr=+v7 %s -o - | FileCheck %s -check-prefix=THUMB2
+; RUN: llc -mtriple=arm-eabi -mattr=+v7 %s -o - | FileCheck %s -check-prefix=ARM
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9-mp %s -o - | FileCheck %s -check-prefix=ARM-MP
; rdar://8601536
+; CHECK-T1-NOT: pld
+
define void @t1(i8* %ptr) nounwind {
entry:
; ARM-LABEL: t1:
@@ -75,3 +77,21 @@ entry:
tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3, i32 0 )
ret void
}
+
+define void @t6() {
+entry:
+;ARM-LABEL: t6:
+;ARM: pld [sp]
+;ARM: pld [sp, #50]
+
+;THUMB2-LABEL: t6:
+;THUMB2: pld [sp]
+;THUMB2: pld [sp, #50]
+
+%red = alloca [100 x i8], align 1
+%0 = getelementptr inbounds [100 x i8]* %red, i32 0, i32 0
+%1 = getelementptr inbounds [100 x i8]* %red, i32 0, i32 50
+call void @llvm.prefetch(i8* %0, i32 0, i32 3, i32 1)
+call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
+ret void
+}
diff --git a/test/CodeGen/ARM/rbit.ll b/test/CodeGen/ARM/rbit.ll
new file mode 100644
index 000000000000..41f866fc8d2f
--- /dev/null
+++ b/test/CodeGen/ARM/rbit.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=armv8-eabi %s -o - | FileCheck %s
+
+; CHECK-LABEL: rbit
+; CHECK: rbit r0, r0
+define i32 @rbit(i32 %t) {
+entry:
+ %rbit = call i32 @llvm.arm.rbit(i32 %t)
+ ret i32 %rbit
+}
+
+; CHECK-LABEL: rbit_constant
+; CHECK: mov r0, #0
+; CHECK: rbit r0, r0
+define i32 @rbit_constant() {
+entry:
+ %rbit.i = call i32 @llvm.arm.rbit(i32 0)
+ ret i32 %rbit.i
+}
+
+declare i32 @llvm.arm.rbit(i32)
diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll
index 25484f484853..feed5ad2830a 100644
--- a/test/CodeGen/ARM/reg_sequence.ll
+++ b/test/CodeGen/ARM/reg_sequence.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -regalloc=basic | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 -regalloc=basic | FileCheck %s
; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's.
%struct.int16x8_t = type { <8 x i16> }
@@ -34,9 +34,11 @@ entry:
%12 = sext <4 x i16> %11 to <4 x i32> ; <<4 x i32>> [#uses=1]
%13 = mul <4 x i32> %1, %9 ; <<4 x i32>> [#uses=1]
%14 = mul <4 x i32> %3, %12 ; <<4 x i32>> [#uses=1]
- %15 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %13, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1]
- %16 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %14, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1]
- %17 = shufflevector <4 x i16> %15, <4 x i16> %16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> ; <<8 x i16>> [#uses=1]
+ %15 = lshr <4 x i32> %13, <i32 12, i32 12, i32 12, i32 12>
+ %trunc_15 = trunc <4 x i32> %15 to <4 x i16>
+ %16 = lshr <4 x i32> %14, <i32 12, i32 12, i32 12, i32 12>
+ %trunc_16 = trunc <4 x i32> %16 to <4 x i16>
+ %17 = shufflevector <4 x i16> %trunc_15, <4 x i16> %trunc_16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> ; <<8 x i16>> [#uses=1]
%18 = bitcast i16* %o_ptr to i8* ; <i8*> [#uses=1]
tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17, i32 1)
ret void
diff --git a/test/CodeGen/ARM/ret0.ll b/test/CodeGen/ARM/ret0.ll
index 5c312eb98a32..e51067b7ad51 100644
--- a/test/CodeGen/ARM/ret0.ll
+++ b/test/CodeGen/ARM/ret0.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define i32 @test() {
ret i32 0
diff --git a/test/CodeGen/ARM/ret_arg1.ll b/test/CodeGen/ARM/ret_arg1.ll
index 1ab947b1e20d..b7eef2059965 100644
--- a/test/CodeGen/ARM/ret_arg1.ll
+++ b/test/CodeGen/ARM/ret_arg1.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define i32 @test(i32 %a1) {
ret i32 %a1
diff --git a/test/CodeGen/ARM/ret_arg2.ll b/test/CodeGen/ARM/ret_arg2.ll
index 84477d042c74..bcb379b7d6a5 100644
--- a/test/CodeGen/ARM/ret_arg2.ll
+++ b/test/CodeGen/ARM/ret_arg2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define i32 @test(i32 %a1, i32 %a2) {
ret i32 %a2
diff --git a/test/CodeGen/ARM/ret_arg3.ll b/test/CodeGen/ARM/ret_arg3.ll
index f7f9057432d1..625162f7762a 100644
--- a/test/CodeGen/ARM/ret_arg3.ll
+++ b/test/CodeGen/ARM/ret_arg3.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
+
define i32 @test(i32 %a1, i32 %a2, i32 %a3) {
ret i32 %a3
}
diff --git a/test/CodeGen/ARM/ret_arg4.ll b/test/CodeGen/ARM/ret_arg4.ll
index f7b3e4a282b2..81b55fe7b9f3 100644
--- a/test/CodeGen/ARM/ret_arg4.ll
+++ b/test/CodeGen/ARM/ret_arg4.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define i32 @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
ret i32 %a4
diff --git a/test/CodeGen/ARM/ret_arg5.ll b/test/CodeGen/ARM/ret_arg5.ll
index c4f9fb5e0a9b..680e89f9aa40 100644
--- a/test/CodeGen/ARM/ret_arg5.ll
+++ b/test/CodeGen/ARM/ret_arg5.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define i32 @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5) {
ret i32 %a5
diff --git a/test/CodeGen/ARM/ret_f32_arg2.ll b/test/CodeGen/ARM/ret_f32_arg2.ll
index 2bafea675531..0caee0bbaed2 100644
--- a/test/CodeGen/ARM/ret_f32_arg2.ll
+++ b/test/CodeGen/ARM/ret_f32_arg2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define float @test_f32(float %a1, float %a2) {
ret float %a2
diff --git a/test/CodeGen/ARM/ret_f32_arg5.ll b/test/CodeGen/ARM/ret_f32_arg5.ll
index c6ce60ecb9c8..d39dc7e0526b 100644
--- a/test/CodeGen/ARM/ret_f32_arg5.ll
+++ b/test/CodeGen/ARM/ret_f32_arg5.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define float @test_f32_arg5(float %a1, float %a2, float %a3, float %a4, float %a5) {
ret float %a5
diff --git a/test/CodeGen/ARM/ret_f64_arg2.ll b/test/CodeGen/ARM/ret_f64_arg2.ll
index 386e85f4b9a5..c4519ff744c0 100644
--- a/test/CodeGen/ARM/ret_f64_arg2.ll
+++ b/test/CodeGen/ARM/ret_f64_arg2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define double @test_f64(double %a1, double %a2) {
ret double %a2
diff --git a/test/CodeGen/ARM/ret_f64_arg_reg_split.ll b/test/CodeGen/ARM/ret_f64_arg_reg_split.ll
index bdb0a606227b..ef1125097b3f 100644
--- a/test/CodeGen/ARM/ret_f64_arg_reg_split.ll
+++ b/test/CodeGen/ARM/ret_f64_arg_reg_split.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=arm8 -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mcpu=arm8 -mattr=+vfp2 %s -o /dev/null
define double @test_double_arg_reg_split(i32 %a1, double %a2) {
ret double %a2
diff --git a/test/CodeGen/ARM/ret_f64_arg_split.ll b/test/CodeGen/ARM/ret_f64_arg_split.ll
index 4f841a3cde7b..113092050ff9 100644
--- a/test/CodeGen/ARM/ret_f64_arg_split.ll
+++ b/test/CodeGen/ARM/ret_f64_arg_split.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define double @test_double_arg_split(i64 %a1, i32 %a2, double %a3) {
ret double %a3
diff --git a/test/CodeGen/ARM/ret_f64_arg_stack.ll b/test/CodeGen/ARM/ret_f64_arg_stack.ll
index 21443177d3de..f45923e7e8e5 100644
--- a/test/CodeGen/ARM/ret_f64_arg_stack.ll
+++ b/test/CodeGen/ARM/ret_f64_arg_stack.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define double @test_double_arg_stack(i64 %a1, i32 %a2, i32 %a3, double %a4) {
ret double %a4
diff --git a/test/CodeGen/ARM/ret_i128_arg2.ll b/test/CodeGen/ARM/ret_i128_arg2.ll
index 908c34f8cda6..a87f3f23b680 100644
--- a/test/CodeGen/ARM/ret_i128_arg2.ll
+++ b/test/CodeGen/ARM/ret_i128_arg2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define i128 @test_i128(i128 %a1, i128 %a2, i128 %a3) {
ret i128 %a3
diff --git a/test/CodeGen/ARM/ret_i64_arg2.ll b/test/CodeGen/ARM/ret_i64_arg2.ll
index b1a1024acaf1..531360008ba4 100644
--- a/test/CodeGen/ARM/ret_i64_arg2.ll
+++ b/test/CodeGen/ARM/ret_i64_arg2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define i64 @test_i64(i64 %a1, i64 %a2) {
ret i64 %a2
diff --git a/test/CodeGen/ARM/ret_i64_arg3.ll b/test/CodeGen/ARM/ret_i64_arg3.ll
index ffc1d2f4b52a..ce8da0ab8189 100644
--- a/test/CodeGen/ARM/ret_i64_arg3.ll
+++ b/test/CodeGen/ARM/ret_i64_arg3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define i64 @test_i64_arg3(i64 %a1, i64 %a2, i64 %a3) {
ret i64 %a3
diff --git a/test/CodeGen/ARM/ret_i64_arg_split.ll b/test/CodeGen/ARM/ret_i64_arg_split.ll
index 956bce558fc5..0583b277f56e 100644
--- a/test/CodeGen/ARM/ret_i64_arg_split.ll
+++ b/test/CodeGen/ARM/ret_i64_arg_split.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o /dev/null
define i64 @test_i64_arg_split(i64 %a1, i32 %a2, i64 %a3) {
ret i64 %a3
diff --git a/test/CodeGen/ARM/ret_void.ll b/test/CodeGen/ARM/ret_void.ll
index 2b7ae0562884..93dc5c1f654d 100644
--- a/test/CodeGen/ARM/ret_void.ll
+++ b/test/CodeGen/ARM/ret_void.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
define void @test() {
ret void
diff --git a/test/CodeGen/ARM/returned-ext.ll b/test/CodeGen/ARM/returned-ext.ll
index d2cdeb096a88..925e9e729f44 100644
--- a/test/CodeGen/ARM/returned-ext.ll
+++ b/test/CodeGen/ARM/returned-ext.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -arm-tail-calls | FileCheck %s -check-prefix=CHECKELF
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-tail-calls | FileCheck %s -check-prefix=CHECKT2D
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi | FileCheck %s -check-prefix=CHECKELF
+; RUN: llc < %s -mtriple=thumbv7-apple-ios5.0 | FileCheck %s -check-prefix=CHECKT2D
declare i16 @identity16(i16 returned %x)
declare i32 @identity32(i32 returned %x)
diff --git a/test/CodeGen/ARM/returned-trunc-tail-calls.ll b/test/CodeGen/ARM/returned-trunc-tail-calls.ll
index 59467271a7a7..6051a83dcebe 100644
--- a/test/CodeGen/ARM/returned-trunc-tail-calls.ll
+++ b/test/CodeGen/ARM/returned-trunc-tail-calls.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv7 -arm-tail-calls | FileCheck %s
+; RUN: llc < %s -mtriple=armv7 | FileCheck %s
declare i16 @ret16(i16 returned)
declare i32 @ret32(i32 returned)
diff --git a/test/CodeGen/ARM/rev.ll b/test/CodeGen/ARM/rev.ll
index 6c380aee3d93..f95f97105b9f 100644
--- a/test/CodeGen/ARM/rev.ll
+++ b/test/CodeGen/ARM/rev.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
define i32 @test1(i32 %X) nounwind {
; CHECK: test1
diff --git a/test/CodeGen/ARM/saxpy10-a9.ll b/test/CodeGen/ARM/saxpy10-a9.ll
new file mode 100644
index 000000000000..f8f5e18fcf5e
--- /dev/null
+++ b/test/CodeGen/ARM/saxpy10-a9.ll
@@ -0,0 +1,135 @@
+; RUN: llc < %s -march=arm -mtriple=thumbv7-apple-ios7.0.0 -float-abi=hard -mcpu=cortex-a9 -misched-postra -misched-bench -scheditins=false | FileCheck %s
+;
+; Test MI-Sched suppory latency based stalls on in in-order pipeline
+; using the new machine model.
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+
+; Don't be too strict with the top of the schedule, but most of it
+; should be nicely pipelined.
+;
+; CHECK: saxpy10:
+; CHECK: vldr
+; CHECK: vldr
+; CHECK: vldr
+; CHECK: vldr
+; CHECK: vldr
+; CHECK: vldr
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vmul
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vmul
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vmul
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vmul
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vmul
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vmul
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vmul
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vmul
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vldr
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vadd
+; CHECK-NEXT: vmov
+; CHECK-NEXT: bx
+;
+; This accumulates a sum rather than storing each result.
+define float @saxpy10(float* nocapture readonly %data1, float* nocapture readonly %data2, float %a) {
+entry:
+ %0 = load float* %data1, align 4
+ %mul = fmul float %0, %a
+ %1 = load float* %data2, align 4
+ %add = fadd float %mul, %1
+ %add2 = fadd float %add, 0.000000e+00
+ %arrayidx.1 = getelementptr inbounds float* %data1, i32 1
+ %2 = load float* %arrayidx.1, align 4
+ %mul.1 = fmul float %2, %a
+ %arrayidx1.1 = getelementptr inbounds float* %data2, i32 1
+ %3 = load float* %arrayidx1.1, align 4
+ %add.1 = fadd float %mul.1, %3
+ %add2.1 = fadd float %add2, %add.1
+ %arrayidx.2 = getelementptr inbounds float* %data1, i32 2
+ %4 = load float* %arrayidx.2, align 4
+ %mul.2 = fmul float %4, %a
+ %arrayidx1.2 = getelementptr inbounds float* %data2, i32 2
+ %5 = load float* %arrayidx1.2, align 4
+ %add.2 = fadd float %mul.2, %5
+ %add2.2 = fadd float %add2.1, %add.2
+ %arrayidx.3 = getelementptr inbounds float* %data1, i32 3
+ %6 = load float* %arrayidx.3, align 4
+ %mul.3 = fmul float %6, %a
+ %arrayidx1.3 = getelementptr inbounds float* %data2, i32 3
+ %7 = load float* %arrayidx1.3, align 4
+ %add.3 = fadd float %mul.3, %7
+ %add2.3 = fadd float %add2.2, %add.3
+ %arrayidx.4 = getelementptr inbounds float* %data1, i32 4
+ %8 = load float* %arrayidx.4, align 4
+ %mul.4 = fmul float %8, %a
+ %arrayidx1.4 = getelementptr inbounds float* %data2, i32 4
+ %9 = load float* %arrayidx1.4, align 4
+ %add.4 = fadd float %mul.4, %9
+ %add2.4 = fadd float %add2.3, %add.4
+ %arrayidx.5 = getelementptr inbounds float* %data1, i32 5
+ %10 = load float* %arrayidx.5, align 4
+ %mul.5 = fmul float %10, %a
+ %arrayidx1.5 = getelementptr inbounds float* %data2, i32 5
+ %11 = load float* %arrayidx1.5, align 4
+ %add.5 = fadd float %mul.5, %11
+ %add2.5 = fadd float %add2.4, %add.5
+ %arrayidx.6 = getelementptr inbounds float* %data1, i32 6
+ %12 = load float* %arrayidx.6, align 4
+ %mul.6 = fmul float %12, %a
+ %arrayidx1.6 = getelementptr inbounds float* %data2, i32 6
+ %13 = load float* %arrayidx1.6, align 4
+ %add.6 = fadd float %mul.6, %13
+ %add2.6 = fadd float %add2.5, %add.6
+ %arrayidx.7 = getelementptr inbounds float* %data1, i32 7
+ %14 = load float* %arrayidx.7, align 4
+ %mul.7 = fmul float %14, %a
+ %arrayidx1.7 = getelementptr inbounds float* %data2, i32 7
+ %15 = load float* %arrayidx1.7, align 4
+ %add.7 = fadd float %mul.7, %15
+ %add2.7 = fadd float %add2.6, %add.7
+ %arrayidx.8 = getelementptr inbounds float* %data1, i32 8
+ %16 = load float* %arrayidx.8, align 4
+ %mul.8 = fmul float %16, %a
+ %arrayidx1.8 = getelementptr inbounds float* %data2, i32 8
+ %17 = load float* %arrayidx1.8, align 4
+ %add.8 = fadd float %mul.8, %17
+ %add2.8 = fadd float %add2.7, %add.8
+ %arrayidx.9 = getelementptr inbounds float* %data1, i32 9
+ %18 = load float* %arrayidx.9, align 4
+ %mul.9 = fmul float %18, %a
+ %arrayidx1.9 = getelementptr inbounds float* %data2, i32 9
+ %19 = load float* %arrayidx1.9, align 4
+ %add.9 = fadd float %mul.9, %19
+ %add2.9 = fadd float %add2.8, %add.9
+ ret float %add2.9
+}
diff --git a/test/CodeGen/ARM/sbfx.ll b/test/CodeGen/ARM/sbfx.ll
index 36fbd1939c55..3c25edcaa75c 100644
--- a/test/CodeGen/ARM/sbfx.ll
+++ b/test/CodeGen/ARM/sbfx.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6t2 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
entry:
diff --git a/test/CodeGen/ARM/segmented-stacks-dynamic.ll b/test/CodeGen/ARM/segmented-stacks-dynamic.ll
new file mode 100644
index 000000000000..86f8ff8dd90b
--- /dev/null
+++ b/test/CodeGen/ARM/segmented-stacks-dynamic.ll
@@ -0,0 +1,64 @@
+; RUN: llc < %s -mtriple=arm-linux-androideabi -verify-machineinstrs | FileCheck %s -check-prefix=ARM-android
+; RUN: llc < %s -mtriple=arm-linux-unknown-gnueabi -verify-machineinstrs | FileCheck %s -check-prefix=ARM-linux
+; RUN: llc < %s -mtriple=arm-linux-androideabi -filetype=obj
+; RUN: llc < %s -mtriple=arm-linux-unknown-gnueabi -filetype=obj
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+define i32 @test_basic(i32 %l) #0 {
+ %mem = alloca i32, i32 %l
+ call void @dummy_use (i32* %mem, i32 %l)
+ %terminate = icmp eq i32 %l, 0
+ br i1 %terminate, label %true, label %false
+
+true:
+ ret i32 0
+
+false:
+ %newlen = sub i32 %l, 1
+ %retvalue = call i32 @test_basic(i32 %newlen)
+ ret i32 %retvalue
+
+; ARM-linux: test_basic:
+
+; ARM-linux: push {r4, r5}
+; ARM-linux-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-linux-NEXT: mov r5, sp
+; ARM-linux-NEXT: ldr r4, [r4, #4]
+; ARM-linux-NEXT: cmp r4, r5
+; ARM-linux-NEXT: blo .LBB0_2
+
+; ARM-linux: mov r4, #16
+; ARM-linux-NEXT: mov r5, #0
+; ARM-linux-NEXT: stmdb sp!, {lr}
+; ARM-linux-NEXT: bl __morestack
+; ARM-linux-NEXT: ldm sp!, {lr}
+; ARM-linux-NEXT: pop {r4, r5}
+; ARM-linux-NEXT: bx lr
+
+; ARM-linux: pop {r4, r5}
+
+
+; ARM-android: test_basic:
+
+; ARM-android: push {r4, r5}
+; ARM-android-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-android-NEXT: mov r5, sp
+; ARM-android-NEXT: ldr r4, [r4, #252]
+; ARM-android-NEXT: cmp r4, r5
+; ARM-android-NEXT: blo .LBB0_2
+
+; ARM-android: mov r4, #16
+; ARM-android-NEXT: mov r5, #0
+; ARM-android-NEXT: stmdb sp!, {lr}
+; ARM-android-NEXT: bl __morestack
+; ARM-android-NEXT: ldm sp!, {lr}
+; ARM-android-NEXT: pop {r4, r5}
+; ARM-android-NEXT: bx lr
+
+; ARM-android: pop {r4, r5}
+
+}
+
+attributes #0 = { "split-stack" }
diff --git a/test/CodeGen/ARM/segmented-stacks.ll b/test/CodeGen/ARM/segmented-stacks.ll
new file mode 100644
index 000000000000..9873bf332948
--- /dev/null
+++ b/test/CodeGen/ARM/segmented-stacks.ll
@@ -0,0 +1,249 @@
+; RUN: llc < %s -mtriple=arm-linux-androideabi -verify-machineinstrs | FileCheck %s -check-prefix=ARM-android
+; RUN: llc < %s -mtriple=arm-linux-unknown-gnueabi -verify-machineinstrs | FileCheck %s -check-prefix=ARM-linux
+
+; We used to crash with filetype=obj
+; RUN: llc < %s -mtriple=arm-linux-androideabi -filetype=obj
+; RUN: llc < %s -mtriple=arm-linux-unknown-gnueabi -filetype=obj
+
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+define void @test_basic() #0 {
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
+ ret void
+
+; ARM-linux: test_basic:
+
+; ARM-linux: push {r4, r5}
+; ARM-linux-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-linux-NEXT: mov r5, sp
+; ARM-linux-NEXT: ldr r4, [r4, #4]
+; ARM-linux-NEXT: cmp r4, r5
+; ARM-linux-NEXT: blo .LBB0_2
+
+; ARM-linux: mov r4, #48
+; ARM-linux-NEXT: mov r5, #0
+; ARM-linux-NEXT: stmdb sp!, {lr}
+; ARM-linux-NEXT: bl __morestack
+; ARM-linux-NEXT: ldm sp!, {lr}
+; ARM-linux-NEXT: pop {r4, r5}
+; ARM-linux-NEXT: bx lr
+
+; ARM-linux: pop {r4, r5}
+
+; ARM-android: test_basic:
+
+; ARM-android: push {r4, r5}
+; ARM-android-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-android-NEXT: mov r5, sp
+; ARM-android-NEXT: ldr r4, [r4, #252]
+; ARM-android-NEXT: cmp r4, r5
+; ARM-android-NEXT: blo .LBB0_2
+
+; ARM-android: mov r4, #48
+; ARM-android-NEXT: mov r5, #0
+; ARM-android-NEXT: stmdb sp!, {lr}
+; ARM-android-NEXT: bl __morestack
+; ARM-android-NEXT: ldm sp!, {lr}
+; ARM-android-NEXT: pop {r4, r5}
+; ARM-android-NEXT: bx lr
+
+; ARM-android: pop {r4, r5}
+
+}
+
+define i32 @test_nested(i32 * nest %closure, i32 %other) #0 {
+ %addend = load i32 * %closure
+ %result = add i32 %other, %addend
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
+ ret i32 %result
+
+; ARM-linux: test_nested:
+
+; ARM-linux: push {r4, r5}
+; ARM-linux-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-linux-NEXT: mov r5, sp
+; ARM-linux-NEXT: ldr r4, [r4, #4]
+; ARM-linux-NEXT: cmp r4, r5
+; ARM-linux-NEXT: blo .LBB1_2
+
+; ARM-linux: mov r4, #56
+; ARM-linux-NEXT: mov r5, #0
+; ARM-linux-NEXT: stmdb sp!, {lr}
+; ARM-linux-NEXT: bl __morestack
+; ARM-linux-NEXT: ldm sp!, {lr}
+; ARM-linux-NEXT: pop {r4, r5}
+; ARM-linux-NEXT: bx lr
+
+; ARM-linux: pop {r4, r5}
+
+; ARM-android: test_nested:
+
+; ARM-android: push {r4, r5}
+; ARM-android-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-android-NEXT: mov r5, sp
+; ARM-android-NEXT: ldr r4, [r4, #252]
+; ARM-android-NEXT: cmp r4, r5
+; ARM-android-NEXT: blo .LBB1_2
+
+; ARM-android: mov r4, #56
+; ARM-android-NEXT: mov r5, #0
+; ARM-android-NEXT: stmdb sp!, {lr}
+; ARM-android-NEXT: bl __morestack
+; ARM-android-NEXT: ldm sp!, {lr}
+; ARM-android-NEXT: pop {r4, r5}
+; ARM-android-NEXT: bx lr
+
+; ARM-android: pop {r4, r5}
+
+}
+
+define void @test_large() #0 {
+ %mem = alloca i32, i32 10000
+ call void @dummy_use (i32* %mem, i32 0)
+ ret void
+
+; ARM-linux: test_large:
+
+; ARM-linux: push {r4, r5}
+; ARM-linux-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-linux-NEXT: sub r5, sp, #40192
+; ARM-linux-NEXT: ldr r4, [r4, #4]
+; ARM-linux-NEXT: cmp r4, r5
+; ARM-linux-NEXT: blo .LBB2_2
+
+; ARM-linux: mov r4, #40192
+; ARM-linux-NEXT: mov r5, #0
+; ARM-linux-NEXT: stmdb sp!, {lr}
+; ARM-linux-NEXT: bl __morestack
+; ARM-linux-NEXT: ldm sp!, {lr}
+; ARM-linux-NEXT: pop {r4, r5}
+; ARM-linux-NEXT: bx lr
+
+; ARM-linux: pop {r4, r5}
+
+; ARM-android: test_large:
+
+; ARM-android: push {r4, r5}
+; ARM-android-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-android-NEXT: sub r5, sp, #40192
+; ARM-android-NEXT: ldr r4, [r4, #252]
+; ARM-android-NEXT: cmp r4, r5
+; ARM-android-NEXT: blo .LBB2_2
+
+; ARM-android: mov r4, #40192
+; ARM-android-NEXT: mov r5, #0
+; ARM-android-NEXT: stmdb sp!, {lr}
+; ARM-android-NEXT: bl __morestack
+; ARM-android-NEXT: ldm sp!, {lr}
+; ARM-android-NEXT: pop {r4, r5}
+; ARM-android-NEXT: bx lr
+
+; ARM-android: pop {r4, r5}
+
+}
+
+define fastcc void @test_fastcc() #0 {
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
+ ret void
+
+; ARM-linux: test_fastcc:
+
+; ARM-linux: push {r4, r5}
+; ARM-linux-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-linux-NEXT: mov r5, sp
+; ARM-linux-NEXT: ldr r4, [r4, #4]
+; ARM-linux-NEXT: cmp r4, r5
+; ARM-linux-NEXT: blo .LBB3_2
+
+; ARM-linux: mov r4, #48
+; ARM-linux-NEXT: mov r5, #0
+; ARM-linux-NEXT: stmdb sp!, {lr}
+; ARM-linux-NEXT: bl __morestack
+; ARM-linux-NEXT: ldm sp!, {lr}
+; ARM-linux-NEXT: pop {r4, r5}
+; ARM-linux-NEXT: bx lr
+
+; ARM-linux: pop {r4, r5}
+
+; ARM-android: test_fastcc:
+
+; ARM-android: push {r4, r5}
+; ARM-android-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-android-NEXT: mov r5, sp
+; ARM-android-NEXT: ldr r4, [r4, #252]
+; ARM-android-NEXT: cmp r4, r5
+; ARM-android-NEXT: blo .LBB3_2
+
+; ARM-android: mov r4, #48
+; ARM-android-NEXT: mov r5, #0
+; ARM-android-NEXT: stmdb sp!, {lr}
+; ARM-android-NEXT: bl __morestack
+; ARM-android-NEXT: ldm sp!, {lr}
+; ARM-android-NEXT: pop {r4, r5}
+; ARM-android-NEXT: bx lr
+
+; ARM-android: pop {r4, r5}
+
+}
+
+define fastcc void @test_fastcc_large() #0 {
+ %mem = alloca i32, i32 10000
+ call void @dummy_use (i32* %mem, i32 0)
+ ret void
+
+; ARM-linux: test_fastcc_large:
+
+; ARM-linux: push {r4, r5}
+; ARM-linux-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-linux-NEXT: sub r5, sp, #40192
+; ARM-linux-NEXT: ldr r4, [r4, #4]
+; ARM-linux-NEXT: cmp r4, r5
+; ARM-linux-NEXT: blo .LBB4_2
+
+; ARM-linux: mov r4, #40192
+; ARM-linux-NEXT: mov r5, #0
+; ARM-linux-NEXT: stmdb sp!, {lr}
+; ARM-linux-NEXT: bl __morestack
+; ARM-linux-NEXT: ldm sp!, {lr}
+; ARM-linux-NEXT: pop {r4, r5}
+; ARM-linux-NEXT: bx lr
+
+; ARM-linux: pop {r4, r5}
+
+; ARM-android: test_fastcc_large:
+
+; ARM-android: push {r4, r5}
+; ARM-android-NEXT: mrc p15, #0, r4, c13, c0, #3
+; ARM-android-NEXT: sub r5, sp, #40192
+; ARM-android-NEXT: ldr r4, [r4, #252]
+; ARM-android-NEXT: cmp r4, r5
+; ARM-android-NEXT: blo .LBB4_2
+
+; ARM-android: mov r4, #40192
+; ARM-android-NEXT: mov r5, #0
+; ARM-android-NEXT: stmdb sp!, {lr}
+; ARM-android-NEXT: bl __morestack
+; ARM-android-NEXT: ldm sp!, {lr}
+; ARM-android-NEXT: pop {r4, r5}
+; ARM-android-NEXT: bx lr
+
+; ARM-android: pop {r4, r5}
+
+}
+
+define void @test_nostack() #0 {
+ ret void
+
+; ARM-linux-LABEL: test_nostack:
+; ARM-linux-NOT: bl __morestack
+
+; ARM-android-LABEL: test_nostack:
+; ARM-android-NOT: bl __morestack
+}
+
+attributes #0 = { "split-stack" }
diff --git a/test/CodeGen/ARM/select-imm.ll b/test/CodeGen/ARM/select-imm.ll
index 6f4bfb81d51b..e2dc5542df04 100644
--- a/test/CodeGen/ARM/select-imm.ll
+++ b/test/CodeGen/ARM/select-imm.ll
@@ -1,6 +1,10 @@
-; RUN: llc < %s -march=arm | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s --check-prefix=ARMT2
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s --check-prefix=THUMB2
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s --check-prefix=ARM
+
+; RUN: llc -mtriple=arm-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - \
+; RUN: | FileCheck %s --check-prefix=ARMT2
+
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - \
+; RUN: | FileCheck %s --check-prefix=THUMB2
define i32 @t1(i32 %c) nounwind readnone {
entry:
diff --git a/test/CodeGen/ARM/select-undef.ll b/test/CodeGen/ARM/select-undef.ll
index 23f7eb8b352f..bae4d40d3364 100644
--- a/test/CodeGen/ARM/select-undef.ll
+++ b/test/CodeGen/ARM/select-undef.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm -mcpu=swift -verify-machineinstrs
+; RUN: llc -mtriple=arm-eabi -mcpu=swift -verify-machineinstrs %s -o /dev/null
+
define i32 @func(i32 %arg0, i32 %arg1) {
entry:
%cmp = icmp slt i32 %arg0, 10
diff --git a/test/CodeGen/ARM/select.ll b/test/CodeGen/ARM/select.ll
index ed006d643f87..e9394a720738 100644
--- a/test/CodeGen/ARM/select.ll
+++ b/test/CodeGen/ARM/select.ll
@@ -1,6 +1,10 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s --check-prefix=CHECK-VFP
-; RUN: llc < %s -mattr=+neon,+thumb2 -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=CHECK-NEON
+; RUN: llc -mtriple=arm-apple-darwin %s -o - | FileCheck %s
+
+; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
+; RUN: | FileCheck %s --check-prefix=CHECK-VFP
+
+; RUN: llc -mtriple=thumbv7-apple-darwin -mattr=+neon,+thumb2 %s -o - \
+; RUN: | FileCheck %s --check-prefix=CHECK-NEON
define i32 @f1(i32 %a.s) {
;CHECK-LABEL: f1:
diff --git a/test/CodeGen/ARM/setcc-sentinals.ll b/test/CodeGen/ARM/setcc-sentinals.ll
index 8878f9bf22df..dc45e0e13881 100644
--- a/test/CodeGen/ARM/setcc-sentinals.ll
+++ b/test/CodeGen/ARM/setcc-sentinals.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -mcpu=cortex-a8 -march=arm -asm-verbose=false | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 -asm-verbose=false %s -o - | FileCheck %s
define zeroext i1 @test0(i32 %x) nounwind {
; CHECK-LABEL: test0:
-; CHECK-NEXT: add [[REG:(r[0-9]+)|(lr)]], r0, #1
+; CHECK: add [[REG:(r[0-9]+)|(lr)]], r0, #1
; CHECK-NEXT: mov r0, #0
; CHECK-NEXT: cmp [[REG]], #1
; CHECK-NEXT: movwhi r0, #1
diff --git a/test/CodeGen/ARM/sjljehprepare-lower-empty-struct.ll b/test/CodeGen/ARM/sjljehprepare-lower-empty-struct.ll
new file mode 100644
index 000000000000..3cf2a08fe35d
--- /dev/null
+++ b/test/CodeGen/ARM/sjljehprepare-lower-empty-struct.ll
@@ -0,0 +1,31 @@
+; RUN: llc -mtriple=armv7-apple-ios -O0 < %s | FileCheck %s
+; RUN: llc -mtriple=armv7-apple-ios -O1 < %s | FileCheck %s
+; RUN: llc -mtriple=armv7-apple-ios -O2 < %s | FileCheck %s
+; RUN: llc -mtriple=armv7-apple-ios -O3 < %s | FileCheck %s
+
+; SjLjEHPrepare shouldn't crash when lowering empty structs.
+;
+; Checks that between in case of empty structs used as arguments
+; nothing happens, i.e. there are no instructions between
+; __Unwind_SjLj_Register and actual @bar invocation
+
+
+define i8* @foo(i8 %a, {} %c) {
+entry:
+; CHECK: bl __Unwind_SjLj_Register
+; CHECK-NEXT: {{[A-Z][a-zA-Z0-9]*}}:
+; CHECK-NEXT: bl _bar
+ invoke void @bar ()
+ to label %unreachable unwind label %handler
+
+unreachable:
+ unreachable
+
+handler:
+ %tmp = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @baz to i8*)
+ cleanup
+ resume { i8*, i32 } undef
+}
+
+declare void @bar()
+declare i32 @baz(...)
diff --git a/test/CodeGen/ARM/smml.ll b/test/CodeGen/ARM/smml.ll
index 99df0d4c96b9..fc73eb76d773 100644
--- a/test/CodeGen/ARM/smml.ll
+++ b/test/CodeGen/ARM/smml.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
+
define i32 @f(i32 %a, i32 %b, i32 %c) nounwind readnone ssp {
entry:
; CHECK-NOT: smmls
diff --git a/test/CodeGen/ARM/smul.ll b/test/CodeGen/ARM/smul.ll
index 686d791ce60d..b7ddd10a5682 100644
--- a/test/CodeGen/ARM/smul.ll
+++ b/test/CodeGen/ARM/smul.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mcpu=generic
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=generic %s -o /dev/null
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
@x = weak global i16 0 ; <i16*> [#uses=1]
@y = weak global i16 0 ; <i16*> [#uses=0]
diff --git a/test/CodeGen/ARM/spill-q.ll b/test/CodeGen/ARM/spill-q.ll
index b9246635e408..4fa97ea5b689 100644
--- a/test/CodeGen/ARM/spill-q.ll
+++ b/test/CodeGen/ARM/spill-q.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv7-elf -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-elf -mattr=+neon -arm-atomic-cfg-tidy=0 | FileCheck %s
; PR4789
%bar = type { float, float, float }
diff --git a/test/CodeGen/ARM/ssp-data-layout.ll b/test/CodeGen/ARM/ssp-data-layout.ll
new file mode 100644
index 000000000000..e7dafac7020d
--- /dev/null
+++ b/test/CodeGen/ARM/ssp-data-layout.ll
@@ -0,0 +1,528 @@
+; RUN: llc < %s -disable-fp-elim -march=arm -mcpu=cortex-a8 -mtriple arm-linux-gnu -o - | FileCheck %s
+; This test is fairly fragile. The goal is to ensure that "large" stack
+; objects are allocated closest to the stack protector (i.e., farthest away
+; from the Stack Pointer.) In standard SSP mode this means that large (>=
+; ssp-buffer-size) arrays and structures containing such arrays are
+; closet to the protector. With sspstrong and sspreq this means large
+; arrays/structures-with-arrays are closest, followed by small (< ssp-buffer-size)
+; arrays/structures-with-arrays, and then addr-taken variables.
+;
+; Ideally, we only want verify that the objects appear in the correct groups
+; and that the groups have the correct relative stack offset. The ordering
+; within a group is not relevant to this test. Unfortunately, there is not
+; an elegant way to do this, so just match the offset for each object.
+
+%struct.struct_large_char = type { [8 x i8] }
+%struct.struct_large_char2 = type { [2 x i8], [8 x i8] }
+%struct.struct_small_char = type { [2 x i8] }
+%struct.struct_large_nonchar = type { [8 x i32] }
+%struct.struct_small_nonchar = type { [2 x i16] }
+
+define void @layout_ssp() ssp {
+entry:
+; Expected stack layout for ssp is
+; 180 large_char . Group 1, nested arrays, arrays >= ssp-buffer-size
+; 172 struct_large_char .
+; 168 scalar1 | Everything else
+; 164 scalar2
+; 160 scalar3
+; 156 addr-of
+; 152 small_nonchar (84+68)
+; 112 large_nonchar
+; 110 small_char
+; 108 struct_small_char
+; 72 struct_large_nonchar
+; 68 struct_small_nonchar
+
+; CHECK: layout_ssp:
+; r[[SP]] is used as an offset into the stack later
+; CHECK: add r[[SP:[0-9]+]], sp, #68
+
+; CHECK: bl get_scalar1
+; CHECK: str r0, [sp, #168]
+; CHECK: bl end_scalar1
+
+; CHECK: bl get_scalar2
+; CHECK: str r0, [sp, #164]
+; CHECK: bl end_scalar2
+
+; CHECK: bl get_scalar3
+; CHECK: str r0, [sp, #160]
+; CHECK: bl end_scalar3
+
+; CHECK: bl get_addrof
+; CHECK: str r0, [sp, #156]
+; CHECK: bl end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: strh r0, [r[[SP]], #84]
+; CHECK: bl end_small_nonchar
+
+; CHECK: bl get_large_nonchar
+; CHECK: str r0, [sp, #112]
+; CHECK: bl end_large_nonchar
+
+; CHECK: bl get_small_char
+; CHECK: strb r0, [sp, #110]
+; CHECK: bl end_small_char
+
+; CHECK: bl get_large_char
+; CHECK: strb r0, [sp, #180]
+; CHECK: bl end_large_char
+
+; CHECK: bl get_struct_large_char
+; CHECK: strb r0, [sp, #172]
+; CHECK: bl end_struct_large_char
+
+; CHECK: bl get_struct_small_char
+; CHECK: strb r0, [sp, #108]
+; CHECK: bl end_struct_small_char
+
+; CHECK: bl get_struct_large_nonchar
+; CHECK:str r0, [sp, #72]
+; CHECK: bl end_struct_large_nonchar
+
+; CHECK: bl get_struct_small_nonchar
+; CHECK: strh r0, [r[[SP]]]
+; CHECK: bl end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @layout_sspstrong() sspstrong {
+entry:
+; Expected stack layout for sspstrong is
+; 144 large_nonchar . Group 1, nested arrays,
+; 136 large_char . arrays >= ssp-buffer-size
+; 128 struct_large_char .
+; 96 struct_large_nonchar .
+; 84+8 small_non_char | Group 2, nested arrays,
+; 90 small_char | arrays < ssp-buffer-size
+; 88 struct_small_char |
+; 84 struct_small_nonchar |
+; 80 addrof * Group 3, addr-of local
+; 76 scalar1 + Group 4, everything else
+; 72 scalar2 +
+; 68 scalar3 +
+;
+; CHECK: layout_sspstrong:
+; r[[SP]] is used as an offset into the stack later
+; CHECK: add r[[SP:[0-9]+]], sp, #84
+
+; CHECK: bl get_scalar1
+; CHECK: str r0, [sp, #76]
+; CHECK: bl end_scalar1
+
+; CHECK: bl get_scalar2
+; CHECK: str r0, [sp, #72]
+; CHECK: bl end_scalar2
+
+; CHECK: bl get_scalar3
+; CHECK: str r0, [sp, #68]
+; CHECK: bl end_scalar3
+
+; CHECK: bl get_addrof
+; CHECK: str r0, [sp, #80]
+; CHECK: bl end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: strh r0, [r[[SP]], #8]
+; CHECK: bl end_small_nonchar
+
+; CHECK: bl get_large_nonchar
+; CHECK: str r0, [sp, #144]
+; CHECK: bl end_large_nonchar
+
+; CHECK: bl get_small_char
+; CHECK: strb r0, [sp, #90]
+; CHECK: bl end_small_char
+
+; CHECK: bl get_large_char
+; CHECK: strb r0, [sp, #136]
+; CHECK: bl end_large_char
+
+; CHECK: bl get_struct_large_char
+; CHECK: strb r0, [sp, #128]
+; CHECK: bl end_struct_large_char
+
+; CHECK: bl get_struct_small_char
+; CHECK: strb r0, [sp, #88]
+; CHECK: bl end_struct_small_char
+
+; CHECK: bl get_struct_large_nonchar
+; CHECK: str r0, [sp, #96]
+; CHECK: bl end_struct_large_nonchar
+
+; CHECK: bl get_struct_small_nonchar
+; CHECK: strh r0, [r[[SP]]]
+; CHECK: bl end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @layout_sspreq() sspreq {
+entry:
+; Expected stack layout for sspreq is the same as sspstrong
+;
+; CHECK: layout_sspreq:
+; r[[SP]] is used as an offset into the stack later
+; CHECK: add r[[SP:[0-9]+]], sp, #84
+
+; CHECK: bl get_scalar1
+; CHECK: str r0, [sp, #76]
+; CHECK: bl end_scalar1
+
+; CHECK: bl get_scalar2
+; CHECK: str r0, [sp, #72]
+; CHECK: bl end_scalar2
+
+; CHECK: bl get_scalar3
+; CHECK: str r0, [sp, #68]
+; CHECK: bl end_scalar3
+
+; CHECK: bl get_addrof
+; CHECK: str r0, [sp, #80]
+; CHECK: bl end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: strh r0, [r[[SP]], #8]
+; CHECK: bl end_small_nonchar
+
+; CHECK: bl get_large_nonchar
+; CHECK: str r0, [sp, #144]
+; CHECK: bl end_large_nonchar
+
+; CHECK: bl get_small_char
+; CHECK: strb r0, [sp, #90]
+; CHECK: bl end_small_char
+
+; CHECK: bl get_large_char
+; CHECK: strb r0, [sp, #136]
+; CHECK: bl end_large_char
+
+; CHECK: bl get_struct_large_char
+; CHECK: strb r0, [sp, #128]
+; CHECK: bl end_struct_large_char
+
+; CHECK: bl get_struct_small_char
+; CHECK: strb r0, [sp, #88]
+; CHECK: bl end_struct_small_char
+
+; CHECK: bl get_struct_large_nonchar
+; CHECK: str r0, [sp, #96]
+; CHECK: bl end_struct_large_nonchar
+
+; CHECK: bl get_struct_small_nonchar
+; CHECK: strh r0, [r[[SP]]]
+; CHECK: bl end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @struct_with_protectable_arrays() sspstrong {
+entry:
+; Check to ensure that a structure which contains a small array followed by a
+; large array is assigned to the stack properly as a large object.
+; CHECK: struct_with_protectable_arrays:
+; CHECK: bl get_struct_small_char
+; CHECK: strb r0, [sp, #68]
+; CHECK: bl end_struct_small_char
+; CHECK: bl get_struct_large_char2
+; CHECK: strb r0, [sp, #106]
+; CHECK: bl end_struct_large_char2
+ %a = alloca %struct.struct_small_char, align 1
+ %b = alloca %struct.struct_large_char2, align 1
+ %d1 = alloca %struct.struct_large_nonchar, align 8
+ %d2 = alloca %struct.struct_small_nonchar, align 2
+ %call = call signext i8 @get_struct_small_char()
+ %foo = getelementptr inbounds %struct.struct_small_char* %a, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [2 x i8]* %foo, i32 0, i64 0
+ store i8 %call, i8* %arrayidx, align 1
+ call void @end_struct_small_char()
+ %call1 = call signext i8 @get_struct_large_char2()
+ %foo2 = getelementptr inbounds %struct.struct_large_char2* %b, i32 0, i32 1
+ %arrayidx3 = getelementptr inbounds [8 x i8]* %foo2, i32 0, i64 0
+ store i8 %call1, i8* %arrayidx3, align 1
+ call void @end_struct_large_char2()
+ %0 = bitcast %struct.struct_large_char2* %b to %struct.struct_large_char*
+ %coerce.dive = getelementptr %struct.struct_large_char* %0, i32 0, i32 0
+ %1 = bitcast [8 x i8]* %coerce.dive to i64*
+ %2 = load i64* %1, align 1
+ %coerce.dive4 = getelementptr %struct.struct_small_char* %a, i32 0, i32 0
+ %3 = bitcast [2 x i8]* %coerce.dive4 to i16*
+ %4 = load i16* %3, align 1
+ %coerce.dive5 = getelementptr %struct.struct_small_nonchar* %d2, i32 0, i32 0
+ %5 = bitcast [2 x i16]* %coerce.dive5 to i32*
+ %6 = load i32* %5, align 1
+ call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* byval align 8 %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0)
+ ret void
+}
+
+declare i32 @get_scalar1()
+declare void @end_scalar1()
+
+declare i32 @get_scalar2()
+declare void @end_scalar2()
+
+declare i32 @get_scalar3()
+declare void @end_scalar3()
+
+declare i32 @get_addrof()
+declare void @end_addrof()
+
+declare signext i16 @get_small_nonchar()
+declare void @end_small_nonchar()
+
+declare i32 @get_large_nonchar()
+declare void @end_large_nonchar()
+
+declare signext i8 @get_small_char()
+declare void @end_small_char()
+
+declare signext i8 @get_large_char()
+declare void @end_large_char()
+
+declare signext i8 @get_struct_large_char()
+declare void @end_struct_large_char()
+
+declare signext i8 @get_struct_large_char2()
+declare void @end_struct_large_char2()
+
+declare signext i8 @get_struct_small_char()
+declare void @end_struct_small_char()
+
+declare i32 @get_struct_large_nonchar()
+declare void @end_struct_large_nonchar()
+
+declare signext i16 @get_struct_small_nonchar()
+declare void @end_struct_small_nonchar()
+
+declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32)
diff --git a/test/CodeGen/ARM/stack-frame.ll b/test/CodeGen/ARM/stack-frame.ll
index 1dd57ddb9f2f..a3b0b66b1340 100644
--- a/test/CodeGen/ARM/stack-frame.ll
+++ b/test/CodeGen/ARM/stack-frame.ll
@@ -1,13 +1,14 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | grep add | count 1
+; RUN: llc -mtriple=arm-eabi < %s -o - | FileCheck %s
define void @f1() {
%c = alloca i8, align 1
ret void
}
+; CHECK-LABEL: f1:
+; CHECK: add
define i32 @f2() {
ret i32 1
}
-
-
+; CHECK-LABEL: f2:
+; CHECK-NOT: add
diff --git a/test/CodeGen/ARM/stackpointer.ll b/test/CodeGen/ARM/stackpointer.ll
new file mode 100644
index 000000000000..420a9166d790
--- /dev/null
+++ b/test/CodeGen/ARM/stackpointer.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s
+
+define i32 @get_stack() nounwind {
+entry:
+; CHECK-LABEL: get_stack:
+; CHECK: mov r0, sp
+ %sp = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %sp
+}
+
+define void @set_stack(i32 %val) nounwind {
+entry:
+; CHECK-LABEL: set_stack:
+; CHECK: mov sp, r0
+ call void @llvm.write_register.i32(metadata !0, i32 %val)
+ ret void
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+declare void @llvm.write_register.i32(metadata, i32) nounwind
+
+; register unsigned long current_stack_pointer asm("sp");
+; CHECK-NOT: .asciz "sp"
+!0 = metadata !{metadata !"sp\00"}
diff --git a/test/CodeGen/ARM/str_post.ll b/test/CodeGen/ARM/str_post.ll
index 32e3b856c03c..a4f864065d5e 100644
--- a/test/CodeGen/ARM/str_post.ll
+++ b/test/CodeGen/ARM/str_post.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i16 @test1(i32* %X, i16* %A) {
; CHECK-LABEL: test1:
diff --git a/test/CodeGen/ARM/str_pre.ll b/test/CodeGen/ARM/str_pre.ll
index d8b3f0e767b5..60e6e9ecfeb3 100644
--- a/test/CodeGen/ARM/str_pre.ll
+++ b/test/CodeGen/ARM/str_pre.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm | \
-; RUN: grep "str.*\!" | count 2
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define void @test1(i32* %X, i32* %A, i32** %dest) {
%B = load i32* %A ; <i32> [#uses=1]
@@ -16,3 +15,8 @@ define i16* @test2(i16* %X, i32* %A) {
store i16 %tmp, i16* %Y
ret i16* %Y
}
+
+; CHECK: str{{.*}}!
+; CHECK: str{{.*}}!
+; CHECK-NOT: str{{.*}}!
+
diff --git a/test/CodeGen/ARM/str_trunc.ll b/test/CodeGen/ARM/str_trunc.ll
index 2f1166b64b59..6739684d53bf 100644
--- a/test/CodeGen/ARM/str_trunc.ll
+++ b/test/CodeGen/ARM/str_trunc.ll
@@ -1,7 +1,4 @@
-; RUN: llc < %s -march=arm | \
-; RUN: grep strb | count 1
-; RUN: llc < %s -march=arm | \
-; RUN: grep strh | count 1
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define void @test1(i32 %v, i16* %ptr) {
%tmp = trunc i32 %v to i16 ; <i16> [#uses=1]
@@ -14,3 +11,10 @@ define void @test2(i32 %v, i8* %ptr) {
store i8 %tmp, i8* %ptr
ret void
}
+
+; CHECK: strh
+; CHECK-NOT: strh
+
+; CHECK: strb
+; CHECK-NOT: strb
+
diff --git a/test/CodeGen/ARM/struct-byval-frame-index.ll b/test/CodeGen/ARM/struct-byval-frame-index.ll
index 465ee1218fda..0fd55ec6c943 100644
--- a/test/CodeGen/ARM/struct-byval-frame-index.ll
+++ b/test/CodeGen/ARM/struct-byval-frame-index.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=cortex-a15 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=cortex-a15 -verify-machineinstrs -arm-atomic-cfg-tidy=0 | FileCheck %s
; Check a spill right after a function call with large struct byval is correctly
; generated.
diff --git a/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll b/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll
index 189926941eb2..0a9bc3c87f92 100644
--- a/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll
+++ b/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll
@@ -13,7 +13,7 @@
;structs at varying alignments. Each test is run for arm, thumb2 and thumb1.
;We check for the strings in the generated object code using llvm-objdump
;because it provides better assurance that we are generating instructions
-;for the correct architecture. Otherwise we could accidently generate an
+;for the correct architecture. Otherwise we could accidentally generate an
;ARM instruction for THUMB1 and wouldn't detect it because the assembly
;code representation is the same, but the object code would be generated
;incorrectly. For each test we check for the label, a load instruction of the
diff --git a/test/CodeGen/ARM/sub.ll b/test/CodeGen/ARM/sub.ll
index 7f82ca701261..9ac314dd6081 100644
--- a/test/CodeGen/ARM/sub.ll
+++ b/test/CodeGen/ARM/sub.ll
@@ -1,10 +1,13 @@
-; RUN: llc -march=arm -mcpu=cortex-a8 < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc -mtriple=armeb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
; 171 = 0x000000ab
define i64 @f1(i64 %a) {
; CHECK: f1
-; CHECK: subs r0, r0, #171
-; CHECK: sbc r1, r1, #0
+; CHECK-LE: subs r0, r0, #171
+; CHECK-LE: sbc r1, r1, #0
+; CHECK-BE: subs r1, r1, #171
+; CHECK-BE: sbc r0, r0, #0
%tmp = sub i64 %a, 171
ret i64 %tmp
}
@@ -12,8 +15,10 @@ define i64 @f1(i64 %a) {
; 66846720 = 0x03fc0000
define i64 @f2(i64 %a) {
; CHECK: f2
-; CHECK: subs r0, r0, #66846720
-; CHECK: sbc r1, r1, #0
+; CHECK-LE: subs r0, r0, #66846720
+; CHECK-LE: sbc r1, r1, #0
+; CHECK-BE: subs r1, r1, #66846720
+; CHECK-BE: sbc r0, r0, #0
%tmp = sub i64 %a, 66846720
ret i64 %tmp
}
@@ -21,8 +26,10 @@ define i64 @f2(i64 %a) {
; 734439407618 = 0x000000ab00000002
define i64 @f3(i64 %a) {
; CHECK: f3
-; CHECK: subs r0, r0, #2
-; CHECK: sbc r1, r1, #171
+; CHECK-LE: subs r0, r0, #2
+; CHECK-LE: sbc r1, r1, #171
+; CHECK-BE: subs r1, r1, #2
+; CHECK-BE: sbc r0, r0, #171
%tmp = sub i64 %a, 734439407618
ret i64 %tmp
}
diff --git a/test/CodeGen/ARM/subreg-remat.ll b/test/CodeGen/ARM/subreg-remat.ll
index 1bc0315354cb..d5abfc0af51b 100644
--- a/test/CodeGen/ARM/subreg-remat.ll
+++ b/test/CodeGen/ARM/subreg-remat.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -pre-RA-sched=source | FileCheck %s
+; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -pre-RA-sched=source -no-integrated-as | FileCheck %s
target triple = "thumbv7-apple-ios"
; <rdar://problem/10032939>
;
diff --git a/test/CodeGen/ARM/sxt_rot.ll b/test/CodeGen/ARM/sxt_rot.ll
index 656cd934fcf6..5ddea2ec13dc 100644
--- a/test/CodeGen/ARM/sxt_rot.ll
+++ b/test/CodeGen/ARM/sxt_rot.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
define i32 @test0(i8 %A) {
; CHECK: test0
diff --git a/test/CodeGen/ARM/t2-imm.ll b/test/CodeGen/ARM/t2-imm.ll
index 8b4145914e7c..23463b8a9e3a 100644
--- a/test/CodeGen/ARM/t2-imm.ll
+++ b/test/CodeGen/ARM/t2-imm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f6(i32 %a) {
; CHECK:f6
diff --git a/test/CodeGen/ARM/tail-call.ll b/test/CodeGen/ARM/tail-call.ll
new file mode 100644
index 000000000000..771158632ecf
--- /dev/null
+++ b/test/CodeGen/ARM/tail-call.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple armv7 -O0 -o - < %s | FileCheck %s -check-prefix CHECK-TAIL
+; RUN: llc -mtriple armv7 -O0 -disable-tail-calls -o - < %s \
+; RUN: | FileCheck %s -check-prefix CHECK-NO-TAIL
+
+declare i32 @callee(i32 %i)
+
+define i32 @caller(i32 %i) {
+entry:
+ %r = tail call i32 @callee(i32 %i)
+ ret i32 %r
+}
+
+; CHECK-TAIL-LABEL: caller
+; CHECK-TAIL: b callee
+
+; CHECK-NO-TAIL-LABEL: caller
+; CHECK-NO-TAIL: push {lr}
+; CHECK-NO-TAIL: bl callee
+; CHECK-NO-TAIL: pop {lr}
+; CHECK-NO-TAIL: bx lr
+
diff --git a/test/CodeGen/ARM/taildup-branch-weight.ll b/test/CodeGen/ARM/taildup-branch-weight.ll
new file mode 100644
index 000000000000..0a16071a6615
--- /dev/null
+++ b/test/CodeGen/ARM/taildup-branch-weight.ll
@@ -0,0 +1,54 @@
+; RUN: llc -mtriple=arm-eabi -print-machineinstrs=tailduplication -tail-dup-size=100 \
+; RUN: -enable-tail-merge=false -disable-cgp %s -o /dev/null 2>&1 \
+; RUN: | FileCheck %s
+
+; CHECK: Machine code for function test0:
+; CHECK: Successors according to CFG: BB#1(4) BB#2(124)
+
+define void @test0(i32 %a, i32 %b, i32* %c, i32* %d) {
+entry:
+ store i32 3, i32* %d
+ br label %B1
+
+B2:
+ store i32 2, i32* %c
+ br label %B4
+
+B3:
+ store i32 2, i32* %c
+ br label %B4
+
+B1:
+ store i32 1, i32* %d
+ %test0 = icmp slt i32 %a, %b
+ br i1 %test0, label %B2, label %B3, !prof !0
+
+B4:
+ ret void
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 4, i32 124}
+
+; CHECK: Machine code for function test1:
+; CHECK: Successors according to CFG: BB#1(8) BB#2(248)
+
+@g0 = common global i32 0, align 4
+
+define void @test1(i32 %a, i32 %b, i32* %c, i32* %d, i32* %e) {
+
+ %test0 = icmp slt i32 %a, %b
+ br i1 %test0, label %B1, label %B2, !prof !1
+
+B1:
+ br label %B3
+
+B2:
+ store i32 2, i32* %c
+ br label %B3
+
+B3:
+ store i32 3, i32* %e
+ ret void
+}
+
+!1 = metadata !{metadata !"branch_weights", i32 248, i32 8}
diff --git a/test/CodeGen/ARM/this-return.ll b/test/CodeGen/ARM/this-return.ll
index cb42de69f0aa..c681a1c80958 100644
--- a/test/CodeGen/ARM/this-return.ll
+++ b/test/CodeGen/ARM/this-return.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -arm-tail-calls | FileCheck %s -check-prefix=CHECKELF
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-tail-calls | FileCheck %s -check-prefix=CHECKT2D
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi | FileCheck %s -check-prefix=CHECKELF
+; RUN: llc < %s -mtriple=thumbv7-apple-ios5.0 | FileCheck %s -check-prefix=CHECKT2D
%struct.A = type { i8 }
%struct.B = type { i32 }
diff --git a/test/CodeGen/ARM/thumb-litpool.ll b/test/CodeGen/ARM/thumb-litpool.ll
new file mode 100644
index 000000000000..f68fdb6fdc0f
--- /dev/null
+++ b/test/CodeGen/ARM/thumb-litpool.ll
@@ -0,0 +1,15 @@
+; RUN: llc -mtriple=thumbv6m-apple-macho %s -relocation-model=static -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-apple-macho %s -relocation-model=pic -o - | FileCheck %s
+
+@var = global i8 zeroinitializer
+
+declare void @callee(i8*)
+
+define void @foo() minsize {
+; CHECK-LABEL: foo:
+; CHECK: ldr {{r[0-7]}}, LCPI0_0
+ call void @callee(i8* @var)
+ call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7}"()
+ call void @callee(i8* @var)
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/ARM/thumb2-it-block.ll b/test/CodeGen/ARM/thumb2-it-block.ll
index 47c5dccd6fee..c5e699c155a1 100644
--- a/test/CodeGen/ARM/thumb2-it-block.ll
+++ b/test/CodeGen/ARM/thumb2-it-block.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8 %s -o - | FileCheck %s
; PR11107
define i32 @test(i32 %a, i32 %b) {
diff --git a/test/CodeGen/ARM/tls-models.ll b/test/CodeGen/ARM/tls-models.ll
index ccc9032313b8..42c1ba911028 100644
--- a/test/CodeGen/ARM/tls-models.ll
+++ b/test/CodeGen/ARM/tls-models.ll
@@ -22,9 +22,9 @@ entry:
; Non-PIC code can use initial-exec, PIC code has to use general dynamic.
; CHECK-NONPIC-LABEL: f1:
- ; CHECK-NONPIC: external_gd(gottpoff)
+ ; CHECK-NONPIC: external_gd(GOTTPOFF)
; CHECK-PIC-LABEL: f1:
- ; CHECK-PIC: external_gd(tlsgd)
+ ; CHECK-PIC: external_gd(TLSGD)
}
define i32* @f2() {
@@ -34,9 +34,9 @@ entry:
; Non-PIC code can use local exec, PIC code can use local dynamic,
; but that is not implemented, so falls back to general dynamic.
; CHECK-NONPIC-LABEL: f2:
- ; CHECK-NONPIC: internal_gd(tpoff)
+ ; CHECK-NONPIC: internal_gd(TPOFF)
; CHECK-PIC-LABEL: f2:
- ; CHECK-PIC: internal_gd(tlsgd)
+ ; CHECK-PIC: internal_gd(TLSGD)
}
@@ -49,9 +49,9 @@ entry:
; Non-PIC code can use initial exec, PIC should use local dynamic,
; but that is not implemented, so falls back to general dynamic.
; CHECK-NONPIC-LABEL: f3:
- ; CHECK-NONPIC: external_ld(gottpoff)
+ ; CHECK-NONPIC: external_ld(GOTTPOFF)
; CHECK-PIC-LABEL: f3:
- ; CHECK-PIC: external_ld(tlsgd)
+ ; CHECK-PIC: external_ld(TLSGD)
}
define i32* @f4() {
@@ -61,9 +61,9 @@ entry:
; Non-PIC code can use local exec, PIC code can use local dynamic,
; but that is not implemented, so it falls back to general dynamic.
; CHECK-NONPIC-LABEL: f4:
- ; CHECK-NONPIC: internal_ld(tpoff)
+ ; CHECK-NONPIC: internal_ld(TPOFF)
; CHECK-PIC-LABEL: f4:
- ; CHECK-PIC: internal_ld(tlsgd)
+ ; CHECK-PIC: internal_ld(TLSGD)
}
@@ -75,9 +75,9 @@ entry:
; Non-PIC and PIC code will use initial exec as specified.
; CHECK-NONPIC-LABEL: f5:
- ; CHECK-NONPIC: external_ie(gottpoff)
+ ; CHECK-NONPIC: external_ie(GOTTPOFF)
; CHECK-PIC-LABEL: f5:
- ; CHECK-PIC: external_ie(gottpoff)
+ ; CHECK-PIC: external_ie(GOTTPOFF)
}
define i32* @f6() {
@@ -86,9 +86,9 @@ entry:
; Non-PIC code can use local exec, PIC code use initial exec as specified.
; CHECK-NONPIC-LABEL: f6:
- ; CHECK-NONPIC: internal_ie(tpoff)
+ ; CHECK-NONPIC: internal_ie(TPOFF)
; CHECK-PIC-LABEL: f6:
- ; CHECK-PIC: internal_ie(gottpoff)
+ ; CHECK-PIC: internal_ie(GOTTPOFF)
}
@@ -100,9 +100,9 @@ entry:
; Non-PIC and PIC code will use local exec as specified.
; CHECK-NONPIC-LABEL: f7:
- ; CHECK-NONPIC: external_le(tpoff)
+ ; CHECK-NONPIC: external_le(TPOFF)
; CHECK-PIC-LABEL: f7:
- ; CHECK-PIC: external_le(tpoff)
+ ; CHECK-PIC: external_le(TPOFF)
}
define i32* @f8() {
@@ -111,7 +111,7 @@ entry:
; Non-PIC and PIC code will use local exec as specified.
; CHECK-NONPIC-LABEL: f8:
- ; CHECK-NONPIC: internal_le(tpoff)
+ ; CHECK-NONPIC: internal_le(TPOFF)
; CHECK-PIC-LABEL: f8:
- ; CHECK-PIC: internal_le(tpoff)
+ ; CHECK-PIC: internal_le(TPOFF)
}
diff --git a/test/CodeGen/ARM/tls1.ll b/test/CodeGen/ARM/tls1.ll
index ec4278ce72f6..a1ca0b758b45 100644
--- a/test/CodeGen/ARM/tls1.ll
+++ b/test/CodeGen/ARM/tls1.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
-; RUN: grep "i(tpoff)"
+; RUN: grep "i(TPOFF)"
; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
; RUN: grep "__aeabi_read_tp"
; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi \
diff --git a/test/CodeGen/ARM/tls2.ll b/test/CodeGen/ARM/tls2.ll
index f04812583114..24b4794b061a 100644
--- a/test/CodeGen/ARM/tls2.ll
+++ b/test/CodeGen/ARM/tls2.ll
@@ -8,7 +8,7 @@
define i32 @f() {
; CHECK-NONPIC-LABEL: f:
; CHECK-NONPIC: ldr {{r.}}, [pc, {{r.}}]
-; CHECK-NONPIC: i(gottpoff)
+; CHECK-NONPIC: i(GOTTPOFF)
; CHECK-PIC-LABEL: f:
; CHECK-PIC: __tls_get_addr
entry:
@@ -19,7 +19,7 @@ entry:
define i32* @g() {
; CHECK-NONPIC-LABEL: g:
; CHECK-NONPIC: ldr {{r.}}, [pc, {{r.}}]
-; CHECK-NONPIC: i(gottpoff)
+; CHECK-NONPIC: i(GOTTPOFF)
; CHECK-PIC-LABEL: g:
; CHECK-PIC: __tls_get_addr
entry:
diff --git a/test/CodeGen/ARM/trap.ll b/test/CodeGen/ARM/trap.ll
index 6cb26e331ba0..0baf50b45b20 100644
--- a/test/CodeGen/ARM/trap.ll
+++ b/test/CodeGen/ARM/trap.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=INSTR
; RUN: llc < %s -mtriple=arm-apple-darwin -trap-func=_trap | FileCheck %s -check-prefix=FUNC
+; RUN: llc < %s -mtriple=arm-apple-darwin -trap-func=_trap -O0 | FileCheck %s -check-prefix=FUNC
; RUN: llc -mtriple=armv7-unknown-nacl -filetype=obj %s -o - \
; RUN: | llvm-objdump -disassemble -triple armv7-unknown-nacl - \
; RUN: | FileCheck %s -check-prefix=ENCODING-NACL
diff --git a/test/CodeGen/ARM/trunc_ldr.ll b/test/CodeGen/ARM/trunc_ldr.ll
index 3033c2ba3e25..2ce9b894d55a 100644
--- a/test/CodeGen/ARM/trunc_ldr.ll
+++ b/test/CodeGen/ARM/trunc_ldr.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm | grep ldrb.*7 | count 1
-; RUN: llc < %s -march=arm | grep ldrsb.*7 | count 1
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
%struct.A = type { i8, i8, i8, i8, i16, i8, i8, %struct.B** }
%struct.B = type { float, float, i32, i32, i32, [0 x i8] }
@@ -22,3 +21,10 @@ define i32 @f2(%struct.A* %d) {
%tmp57 = sext i8 %tmp56 to i32
ret i32 %tmp57
}
+
+; CHECK: ldrb{{.*}}7
+; CHECK-NOT: ldrb{{.*}}7
+
+; CHECK: ldrsb{{.*}}7
+; CHECK-NOT: ldrsb{{.*}}7
+
diff --git a/test/CodeGen/ARM/truncstore-dag-combine.ll b/test/CodeGen/ARM/truncstore-dag-combine.ll
index 5665440aa1d6..360e3e13f59e 100644
--- a/test/CodeGen/ARM/truncstore-dag-combine.ll
+++ b/test/CodeGen/ARM/truncstore-dag-combine.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v4t | not grep orr
-; RUN: llc < %s -march=arm -mattr=+v4t | not grep mov
+; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s
define void @bar(i8* %P, i16* %Q) {
entry:
@@ -16,3 +15,7 @@ entry:
store i32 %tmp, i32* %P1, align 1
ret void
}
+
+; CHECK-NOT: orr
+; CHECK-NOT: mov
+
diff --git a/test/CodeGen/ARM/tst_teq.ll b/test/CodeGen/ARM/tst_teq.ll
index c83111e69937..bac4fd91acf0 100644
--- a/test/CodeGen/ARM/tst_teq.ll
+++ b/test/CodeGen/ARM/tst_teq.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm | grep tst
-; RUN: llc < %s -march=arm | grep teq
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f(i32 %a) {
entry:
@@ -16,3 +15,7 @@ entry:
%retval = select i1 %0, i32 20, i32 10 ; <i32> [#uses=1]
ret i32 %retval
}
+
+; CHECK: tst
+; CHECK: teq
+
diff --git a/test/CodeGen/ARM/twoaddrinstr.ll b/test/CodeGen/ARM/twoaddrinstr.ll
index 2172f6b9a6cd..01df3b42d107 100644
--- a/test/CodeGen/ARM/twoaddrinstr.ll
+++ b/test/CodeGen/ARM/twoaddrinstr.ll
@@ -1,5 +1,5 @@
; Tests for the two-address instruction pass.
-; RUN: llc -march=arm -mcpu=cortex-a9 < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 -arm-atomic-cfg-tidy=0 %s -o - | FileCheck %s
define void @PR13378() nounwind {
; This was orriginally a crasher trying to schedule the instructions.
diff --git a/test/CodeGen/ARM/unaligned_load_store.ll b/test/CodeGen/ARM/unaligned_load_store.ll
index e7ff63f8dbb0..72163ae30c38 100644
--- a/test/CodeGen/ARM/unaligned_load_store.ll
+++ b/test/CodeGen/ARM/unaligned_load_store.ll
@@ -1,6 +1,11 @@
-; RUN: llc < %s -march=arm -pre-RA-sched=source | FileCheck %s -check-prefix=EXPANDED
-; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=cortex-a8 -mattr=-neon -arm-strict-align -pre-RA-sched=source | FileCheck %s -check-prefix=EXPANDED
-; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=UNALIGNED
+; RUN: llc -mtriple=arm-eabi -pre-RA-sched=source %s -o - \
+; RUN: | FileCheck %s -check-prefix=EXPANDED
+
+; RUN: llc -mtriple=armv6-apple-darwin -mcpu=cortex-a8 -mattr=-neon -arm-strict-align -pre-RA-sched=source %s -o - \
+; RUN: | FileCheck %s -check-prefix=EXPANDED
+
+; RUN: llc -mtriple=armv6-apple-darwin -mcpu=cortex-a8 %s -o - \
+; RUN: | FileCheck %s -check-prefix=UNALIGNED
; rdar://7113725
; rdar://12091029
diff --git a/test/CodeGen/ARM/unaligned_load_store_vector.ll b/test/CodeGen/ARM/unaligned_load_store_vector.ll
index 968a2c7ad0bb..000ed489c4c0 100644
--- a/test/CodeGen/ARM/unaligned_load_store_vector.ll
+++ b/test/CodeGen/ARM/unaligned_load_store_vector.ll
@@ -1,4 +1,4 @@
-;RUN: llc < %s -march=arm -mattr=+v7 -mattr=+neon | FileCheck %s
+;RUN: llc -mtriple=arm-eabi -mattr=+v7 -mattr=+neon %s -o - | FileCheck %s
;ALIGN = 1
;SIZE = 64
diff --git a/test/CodeGen/ARM/undefined.ll b/test/CodeGen/ARM/undefined.ll
new file mode 100644
index 000000000000..86422fb54412
--- /dev/null
+++ b/test/CodeGen/ARM/undefined.ll
@@ -0,0 +1,14 @@
+; RUN: llc -mtriple armv7-eabi -o - %s | FileCheck %s
+; RUN: llc -mtriple thumbv6m-eabi -o - %s | FileCheck %s
+; RUN: llc -mtriple thumbv7-eabi -o - %s | FileCheck %s
+
+declare void @llvm.arm.undefined(i32) nounwind
+
+define void @undefined_trap() {
+entry:
+ tail call void @llvm.arm.undefined(i32 254)
+ ret void
+}
+
+; CHECK-LABEL: undefined_trap
+; CHECK: udf #254
diff --git a/test/CodeGen/ARM/unord.ll b/test/CodeGen/ARM/unord.ll
index bd28034b3adb..7243e9973fd2 100644
--- a/test/CodeGen/ARM/unord.ll
+++ b/test/CodeGen/ARM/unord.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm | grep movne | count 1
-; RUN: llc < %s -march=arm | grep moveq | count 1
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f1(float %X, float %Y) {
%tmp = fcmp uno float %X, %Y
@@ -12,3 +11,10 @@ define i32 @f2(float %X, float %Y) {
%retval = select i1 %tmp, i32 1, i32 -1
ret i32 %retval
}
+
+; CHECK: movne
+; CHECK-NOT: movne
+
+; CHECK: moveq
+; CHECK-NOT: moveq
+
diff --git a/test/CodeGen/ARM/uxt_rot.ll b/test/CodeGen/ARM/uxt_rot.ll
index 628c0795d81a..235416a35e60 100644
--- a/test/CodeGen/ARM/uxt_rot.ll
+++ b/test/CodeGen/ARM/uxt_rot.ll
@@ -1,6 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | grep uxtb | count 1
-; RUN: llc < %s -march=arm -mattr=+v6 | grep uxtab | count 1
-; RUN: llc < %s -march=arm -mattr=+v6 | grep uxth | count 1
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
define zeroext i8 @test1(i32 %A.u) {
%B.u = trunc i32 %A.u to i8
@@ -22,3 +20,13 @@ define zeroext i32 @test3(i32 %A.u) {
%F.u = zext i16 %E.u to i32
ret i32 %F.u
}
+
+; CHECK: uxtb
+; CHECK-NOT: uxtb
+
+; CHECK: uxtab
+; CHECK-NOT: uxtab
+
+; CHECK: uxth
+; CHECK-NOT: uxth
+
diff --git a/test/CodeGen/ARM/v1-constant-fold.ll b/test/CodeGen/ARM/v1-constant-fold.ll
index eb49a81ab763..7421d25c1780 100644
--- a/test/CodeGen/ARM/v1-constant-fold.ll
+++ b/test/CodeGen/ARM/v1-constant-fold.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+v7,+vfp3,-neon | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+v7,+vfp3,-neon | FileCheck %s
; PR15611. Check that we don't crash when constant folding v1i32 types.
@@ -11,7 +11,7 @@ bb:
%tmp3 = insertelement <4 x i32> %tmp2, i32 0, i32 3
%tmp4 = add <4 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK: bl bar
- tail call void @bar(<4 x i32> %tmp4)
+ call void @bar(<4 x i32> %tmp4)
ret void
}
diff --git a/test/CodeGen/ARM/va_arg.ll b/test/CodeGen/ARM/va_arg.ll
index f18b49822847..d901a7461fc8 100644
--- a/test/CodeGen/ARM/va_arg.ll
+++ b/test/CodeGen/ARM/va_arg.ll
@@ -24,13 +24,13 @@ entry:
; CHECK-NOT: bfc
; CHECK: bx lr
-define double @test2(i32 %a, i32 %b, ...) nounwind optsize {
+define double @test2(i32 %a, i32* %b, ...) nounwind optsize {
entry:
%ap = alloca i8*, align 4 ; <i8**> [#uses=3]
%ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=2]
call void @llvm.va_start(i8* %ap1)
%0 = va_arg i8** %ap, i32 ; <i32> [#uses=0]
- store i32 %0, i32* undef
+ store i32 %0, i32* %b
%1 = va_arg i8** %ap, double ; <double> [#uses=1]
call void @llvm.va_end(i8* %ap1)
ret double %1
diff --git a/test/CodeGen/ARM/vaba.ll b/test/CodeGen/ARM/vaba.ll
index 97139e9b6ccc..6478b1843c69 100644
--- a/test/CodeGen/ARM/vaba.ll
+++ b/test/CodeGen/ARM/vaba.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vabas8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
;CHECK-LABEL: vabas8:
diff --git a/test/CodeGen/ARM/vabd.ll b/test/CodeGen/ARM/vabd.ll
index 2eb6d935de83..9ba8be28c776 100644
--- a/test/CodeGen/ARM/vabd.ll
+++ b/test/CodeGen/ARM/vabd.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vabds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vabds8:
diff --git a/test/CodeGen/ARM/vabs.ll b/test/CodeGen/ARM/vabs.ll
index 96dd38ec2e68..3a1aec86edfe 100644
--- a/test/CodeGen/ARM/vabs.ll
+++ b/test/CodeGen/ARM/vabs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vabss8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vabss8:
@@ -28,7 +28,7 @@ define <2 x float> @vabsf32(<2 x float>* %A) nounwind {
;CHECK-LABEL: vabsf32:
;CHECK: vabs.f32
%tmp1 = load <2 x float>* %A
- %tmp2 = call <2 x float> @llvm.arm.neon.vabs.v2f32(<2 x float> %tmp1)
+ %tmp2 = call <2 x float> @llvm.fabs.v2f32(<2 x float> %tmp1)
ret <2 x float> %tmp2
}
@@ -60,19 +60,19 @@ define <4 x float> @vabsQf32(<4 x float>* %A) nounwind {
;CHECK-LABEL: vabsQf32:
;CHECK: vabs.f32
%tmp1 = load <4 x float>* %A
- %tmp2 = call <4 x float> @llvm.arm.neon.vabs.v4f32(<4 x float> %tmp1)
+ %tmp2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %tmp1)
ret <4 x float> %tmp2
}
declare <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8>) nounwind readnone
declare <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16>) nounwind readnone
declare <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32>) nounwind readnone
-declare <2 x float> @llvm.arm.neon.vabs.v2f32(<2 x float>) nounwind readnone
+declare <2 x float> @llvm.fabs.v2f32(<2 x float>) nounwind readnone
declare <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8>) nounwind readnone
declare <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16>) nounwind readnone
declare <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32>) nounwind readnone
-declare <4 x float> @llvm.arm.neon.vabs.v4f32(<4 x float>) nounwind readnone
+declare <4 x float> @llvm.fabs.v4f32(<4 x float>) nounwind readnone
define <8 x i8> @vqabss8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vqabss8:
diff --git a/test/CodeGen/ARM/vadd.ll b/test/CodeGen/ARM/vadd.ll
index fcb5408272f4..86b0d0297018 100644
--- a/test/CodeGen/ARM/vadd.ll
+++ b/test/CodeGen/ARM/vadd.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vaddi8:
diff --git a/test/CodeGen/ARM/varargs-spill-stack-align-nacl.ll b/test/CodeGen/ARM/varargs-spill-stack-align-nacl.ll
new file mode 100644
index 000000000000..19d6cbe0cd8a
--- /dev/null
+++ b/test/CodeGen/ARM/varargs-spill-stack-align-nacl.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -mtriple=arm-nacl-gnueabi | FileCheck %s
+
+declare void @llvm.va_start(i8*)
+declare void @external_func(i8*)
+
+@va_list = external global i8*
+
+; On ARM, varargs arguments are passed in r0-r3 with the rest on the
+; stack. A varargs function must therefore spill rN-r3 just below the
+; function's initial stack pointer.
+;
+; This test checks for a bug in which a gap was left between the spill
+; area and varargs arguments on the stack when using 16 byte stack
+; alignment.
+
+define void @varargs_func(i32 %arg1, ...) {
+ call void @llvm.va_start(i8* bitcast (i8** @va_list to i8*))
+ call void @external_func(i8* bitcast (i8** @va_list to i8*))
+ ret void
+}
+; CHECK-LABEL: varargs_func:
+; Reserve space for the varargs save area. This currently reserves
+; more than enough (16 bytes rather than the 12 bytes needed).
+; CHECK: sub sp, sp, #16
+; CHECK: push {lr}
+; Align the stack pointer to a multiple of 16.
+; CHECK: sub sp, sp, #12
+; Calculate the address of the varargs save area and save varargs
+; arguments into it.
+; CHECK-NEXT: add r0, sp, #20
+; CHECK-NEXT: stm r0, {r1, r2, r3}
diff --git a/test/CodeGen/ARM/vargs.ll b/test/CodeGen/ARM/vargs.ll
index 5f3536cbb9a3..3b810f36cc79 100644
--- a/test/CodeGen/ARM/vargs.ll
+++ b/test/CodeGen/ARM/vargs.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm
+; RUN: llc -mtriple=arm-eabi %s -o /dev/null
+
@str = internal constant [43 x i8] c"Hello World %d %d %d %d %d %d %d %d %d %d\0A\00" ; <[43 x i8]*> [#uses=1]
define i32 @main() {
diff --git a/test/CodeGen/ARM/vbits.ll b/test/CodeGen/ARM/vbits.ll
index 7b48441958f6..dfeaacf2085f 100644
--- a/test/CodeGen/ARM/vbits.ll
+++ b/test/CodeGen/ARM/vbits.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -mcpu=cortex-a8 %s -o - | FileCheck %s
define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: v_andi8:
diff --git a/test/CodeGen/ARM/vbsl.ll b/test/CodeGen/ARM/vbsl.ll
index 1e53e51f8bb0..ddc37cc82441 100644
--- a/test/CodeGen/ARM/vbsl.ll
+++ b/test/CodeGen/ARM/vbsl.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; rdar://12471808
diff --git a/test/CodeGen/ARM/vceq.ll b/test/CodeGen/ARM/vceq.ll
index 0a1f2ebe4f83..e3202e402cc7 100644
--- a/test/CodeGen/ARM/vceq.ll
+++ b/test/CodeGen/ARM/vceq.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vceqi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vceqi8:
diff --git a/test/CodeGen/ARM/vcge.ll b/test/CodeGen/ARM/vcge.ll
index 81a59dbdfe90..3739f5ee8c51 100644
--- a/test/CodeGen/ARM/vcge.ll
+++ b/test/CodeGen/ARM/vcge.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vcges8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vcges8:
@@ -145,7 +145,7 @@ define <2 x i32> @vacgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK: vacge.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vacged(<2 x float> %tmp1, <2 x float> %tmp2)
+ %tmp3 = call <2 x i32> @llvm.arm.neon.vacge.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
ret <2 x i32> %tmp3
}
@@ -154,12 +154,12 @@ define <4 x i32> @vacgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK: vacge.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vacgeq(<4 x float> %tmp1, <4 x float> %tmp2)
+ %tmp3 = call <4 x i32> @llvm.arm.neon.vacge.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
ret <4 x i32> %tmp3
}
-declare <2 x i32> @llvm.arm.neon.vacged(<2 x float>, <2 x float>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vacgeq(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vacge.v2i32.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vacge.v4i32.v4f32(<4 x float>, <4 x float>) nounwind readnone
define <8 x i8> @vcgei8Z(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vcgei8Z:
diff --git a/test/CodeGen/ARM/vcgt.ll b/test/CodeGen/ARM/vcgt.ll
index 056866fe994b..2f736f689ab1 100644
--- a/test/CodeGen/ARM/vcgt.ll
+++ b/test/CodeGen/ARM/vcgt.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -regalloc=basic | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -regalloc=basic %s -o - | FileCheck %s
define <8 x i8> @vcgts8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vcgts8:
@@ -146,7 +146,7 @@ define <2 x i32> @vacgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK: vacgt.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vacgtd(<2 x float> %tmp1, <2 x float> %tmp2)
+ %tmp3 = call <2 x i32> @llvm.arm.neon.vacgt.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
ret <2 x i32> %tmp3
}
@@ -155,7 +155,7 @@ define <4 x i32> @vacgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK: vacgt.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vacgtq(<4 x float> %tmp1, <4 x float> %tmp2)
+ %tmp3 = call <4 x i32> @llvm.arm.neon.vacgt.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
ret <4 x i32> %tmp3
}
@@ -172,8 +172,8 @@ define <4 x i32> @vcgt_zext(<4 x float>* %A, <4 x float>* %B) nounwind {
ret <4 x i32> %tmp4
}
-declare <2 x i32> @llvm.arm.neon.vacgtd(<2 x float>, <2 x float>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vacgtq(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vacgt.v2i32.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vacgt.v4i32.v4f32(<4 x float>, <4 x float>) nounwind readnone
define <8 x i8> @vcgti8Z(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vcgti8Z:
diff --git a/test/CodeGen/ARM/vcnt.ll b/test/CodeGen/ARM/vcnt.ll
index 0b539799833d..390559b82807 100644
--- a/test/CodeGen/ARM/vcnt.ll
+++ b/test/CodeGen/ARM/vcnt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; NB: this tests vcnt, vclz, and vcls
define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
diff --git a/test/CodeGen/ARM/vcombine.ll b/test/CodeGen/ARM/vcombine.ll
index 527f93b6637c..33aa71df0be3 100644
--- a/test/CodeGen/ARM/vcombine.ll
+++ b/test/CodeGen/ARM/vcombine.ll
@@ -1,9 +1,12 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc -mtriple=armeb-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
define <16 x i8> @vcombine8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK: vcombine8
-; CHECK: vmov r0, r1, d16
-; CHECK: vmov r2, r3, d17
+; CHECK-LE: vmov r0, r1, d16
+; CHECK-LE: vmov r2, r3, d17
+; CHECK-BE: vmov r1, r0, d16
+; CHECK-BE: vmov r3, r2, d17
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -12,8 +15,10 @@ define <16 x i8> @vcombine8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
define <8 x i16> @vcombine16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK: vcombine16
-; CHECK: vmov r0, r1, d16
-; CHECK: vmov r2, r3, d17
+; CHECK-LE: vmov r0, r1, d16
+; CHECK-LE: vmov r2, r3, d17
+; CHECK-BE: vmov r1, r0, d16
+; CHECK-BE: vmov r3, r2, d17
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -22,8 +27,10 @@ define <8 x i16> @vcombine16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
define <4 x i32> @vcombine32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK: vcombine32
-; CHECK: vmov r0, r1, d16
-; CHECK: vmov r2, r3, d17
+; CHECK-LE: vmov r0, r1, d16
+; CHECK-LE: vmov r2, r3, d17
+; CHECK-BE: vmov r1, r0, d16
+; CHECK-BE: vmov r3, r2, d17
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
%tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -32,8 +39,10 @@ define <4 x i32> @vcombine32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
define <4 x float> @vcombinefloat(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK: vcombinefloat
-; CHECK: vmov r0, r1, d16
-; CHECK: vmov r2, r3, d17
+; CHECK-LE: vmov r0, r1, d16
+; CHECK-LE: vmov r2, r3, d17
+; CHECK-BE: vmov r1, r0, d16
+; CHECK-BE: vmov r3, r2, d17
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
%tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -42,8 +51,10 @@ define <4 x float> @vcombinefloat(<2 x float>* %A, <2 x float>* %B) nounwind {
define <2 x i64> @vcombine64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
; CHECK: vcombine64
-; CHECK: vmov r0, r1, d16
-; CHECK: vmov r2, r3, d17
+; CHECK-LE: vmov r0, r1, d16
+; CHECK-LE: vmov r2, r3, d17
+; CHECK-BE: vmov r1, r0, d16
+; CHECK-BE: vmov r3, r2, d17
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
%tmp3 = shufflevector <1 x i64> %tmp1, <1 x i64> %tmp2, <2 x i32> <i32 0, i32 1>
@@ -56,7 +67,8 @@ define <2 x i64> @vcombine64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
define <4 x i16> @vget_low16(<8 x i16>* %A) nounwind {
; CHECK: vget_low16
; CHECK-NOT: vst
-; CHECK: vmov r0, r1, d16
+; CHECK-LE: vmov r0, r1, d16
+; CHECK-BE: vmov r1, r0, d16
%tmp1 = load <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret <4 x i16> %tmp2
@@ -65,7 +77,8 @@ define <4 x i16> @vget_low16(<8 x i16>* %A) nounwind {
define <8 x i8> @vget_high8(<16 x i8>* %A) nounwind {
; CHECK: vget_high8
; CHECK-NOT: vst
-; CHECK: vmov r0, r1, d17
+; CHECK-LE: vmov r0, r1, d17
+; CHECK-BE: vmov r1, r0, d16
%tmp1 = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <8 x i8> %tmp2
diff --git a/test/CodeGen/ARM/vcvt.ll b/test/CodeGen/ARM/vcvt.ll
index 4f17dc559480..af4e6a3b0465 100644
--- a/test/CodeGen/ARM/vcvt.ll
+++ b/test/CodeGen/ARM/vcvt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon,+fp16 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon,+fp16 %s -o - | FileCheck %s
define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
;CHECK-LABEL: vcvt_f32tos32:
diff --git a/test/CodeGen/ARM/vdup.ll b/test/CodeGen/ARM/vdup.ll
index b24be2654dfc..89f355c68751 100644
--- a/test/CodeGen/ARM/vdup.ll
+++ b/test/CodeGen/ARM/vdup.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon -verify-machineinstrs %s -o - \
+; RUN: | FileCheck %s
define <8 x i8> @v_dup8(i8 %A) nounwind {
;CHECK-LABEL: v_dup8:
@@ -331,3 +332,35 @@ define <8 x i8> @check_i8(<16 x i8> %v) nounwind {
%2 = insertelement <8 x i8> %1, i8 %x, i32 1
ret <8 x i8> %2
}
+
+; Check that an SPR splat produces a vdup.
+
+define <2 x float> @check_spr_splat2(<2 x float> %p, i16 %q) {
+;CHECK-LABEL: check_spr_splat2:
+;CHECK: vdup.32 d
+ %conv = sitofp i16 %q to float
+ %splat.splatinsert = insertelement <2 x float> undef, float %conv, i32 0
+ %splat.splat = shufflevector <2 x float> %splat.splatinsert, <2 x float> undef, <2 x i32> zeroinitializer
+ %sub = fsub <2 x float> %splat.splat, %p
+ ret <2 x float> %sub
+}
+
+define <4 x float> @check_spr_splat4(<4 x float> %p, i16 %q) {
+;CHECK-LABEL: check_spr_splat4:
+;CHECK: vdup.32 q
+ %conv = sitofp i16 %q to float
+ %splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
+ %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
+ %sub = fsub <4 x float> %splat.splat, %p
+ ret <4 x float> %sub
+}
+
+define <4 x float> @check_spr_splat4_lane1(<4 x float> %p, i16 %q) {
+;CHECK-LABEL: check_spr_splat4_lane1:
+;CHECK: vdup.32 q{{.*}}, d{{.*}}[1]
+ %conv = sitofp i16 %q to float
+ %splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 1
+ %splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %sub = fsub <4 x float> %splat.splat, %p
+ ret <4 x float> %sub
+}
diff --git a/test/CodeGen/ARM/vector-spilling.ll b/test/CodeGen/ARM/vector-spilling.ll
new file mode 100644
index 000000000000..746c6dfcd114
--- /dev/null
+++ b/test/CodeGen/ARM/vector-spilling.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -march=arm -mtriple=armv7-linux-gnueabihf -arm-atomic-cfg-tidy=0 -float-abi=hard -mcpu=cortex-a9 -O3 | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32-S64"
+
+; This test will generate spills/fills using vldmia instructions that access 24 bytes of memory.
+; Check that we don't crash when we generate these instructions on Cortex-A9.
+
+; CHECK: test:
+; CHECK: vstmia
+; CHECK: vldmia
+define void @test(<8 x i64>* %src) #0 {
+entry:
+ %0 = getelementptr inbounds <8 x i64>* %src, i32 0
+ %1 = load <8 x i64>* %0, align 8
+
+ %2 = getelementptr inbounds <8 x i64>* %src, i32 1
+ %3 = load <8 x i64>* %2, align 8
+
+ %4 = getelementptr inbounds <8 x i64>* %src, i32 2
+ %5 = load <8 x i64>* %4, align 8
+
+ %6 = getelementptr inbounds <8 x i64>* %src, i32 3
+ %7 = load <8 x i64>* %6, align 8
+
+ %8 = shufflevector <8 x i64> %1, <8 x i64> %3, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %9 = shufflevector <8 x i64> %1, <8 x i64> %3, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+
+ tail call void(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>)* @foo(<8 x i64> %1, <8 x i64> %3, <8 x i64> %5, <8 x i64> %7, <8 x i64> %8, <8 x i64> %9)
+ ret void
+}
+
+declare void @foo(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>)
+
+attributes #0 = { noredzone "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll
index 5555a4759b00..4407451244e9 100644
--- a/test/CodeGen/ARM/vext.ll
+++ b/test/CodeGen/ARM/vext.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: test_vextd:
diff --git a/test/CodeGen/ARM/vfcmp.ll b/test/CodeGen/ARM/vfcmp.ll
index a23db7be7615..4b2fea9baa09 100644
--- a/test/CodeGen/ARM/vfcmp.ll
+++ b/test/CodeGen/ARM/vfcmp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; This tests fcmp operations that do not map directly to NEON instructions.
diff --git a/test/CodeGen/ARM/vfp-libcalls.ll b/test/CodeGen/ARM/vfp-libcalls.ll
new file mode 100644
index 000000000000..9d4e194e90ee
--- /dev/null
+++ b/test/CodeGen/ARM/vfp-libcalls.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple=armv6-apple-ios -mcpu=arm1136jf-s -o - %s | FileCheck %s --check-prefix=CHECK-HARD
+; RUN: llc -mtriple=thumbv6-apple-ios -mcpu=arm1136jf-s -o - %s | FileCheck %s --check-prefix=CHECK-SOFTISH
+; RUN: llc -mtriple=armv7s-apple-ios -soft-float -mcpu=arm1136jf-s -o - %s | FileCheck %s --check-prefix=CHECK-SOFT
+
+define float @test_call(float %a, float %b) {
+; CHECK-HARD: vadd.f32 {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-SOFTISH: blx ___addsf3vfp
+; CHECK-SOFT: bl ___addsf3{{$}}
+ %sum = fadd float %a, %b
+ ret float %sum
+} \ No newline at end of file
diff --git a/test/CodeGen/ARM/vfp-regs-dwarf.ll b/test/CodeGen/ARM/vfp-regs-dwarf.ll
new file mode 100644
index 000000000000..49767294ad28
--- /dev/null
+++ b/test/CodeGen/ARM/vfp-regs-dwarf.ll
@@ -0,0 +1,44 @@
+; RUN: llc -mtriple=armv7-linux-gnueabihf %s -o - | FileCheck %s
+
+; Generated from:
+; void stack_offsets() {
+; asm("" ::: "d8", "d9", "d11", "d13");
+; }
+; Compiled with: "clang -target armv7-linux-gnueabihf -O3"
+
+; The important point we're checking here is that the .cfi directives describe
+; the layout of the VFP registers correctly. The fact that the numbers are
+; monotonic in memory is also a nice property to have.
+
+define void @stack_offsets() {
+; CHECK-LABEL: stack_offsets:
+; CHECK: vpush {d13}
+; CHECK: vpush {d11}
+; CHECK: vpush {d8, d9}
+
+; CHECK: .cfi_offset {{269|d13}}, -8
+; CHECK: .cfi_offset {{267|d11}}, -16
+; CHECK: .cfi_offset {{265|d9}}, -24
+; CHECK: .cfi_offset {{264|d8}}, -32
+
+; CHECK: vpop {d8, d9}
+; CHECK: vpop {d11}
+; CHECK: vpop {d13}
+ call void asm sideeffect "", "~{d8},~{d9},~{d11},~{d13}"() #1
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/Users/tim/llvm/build/tmp.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"tmp.c", metadata !"/Users/tim/llvm/build"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, void ()* @stack_offsets, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [bar]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/Users/tim/llvm/build/tmp.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+
diff --git a/test/CodeGen/ARM/vhadd.ll b/test/CodeGen/ARM/vhadd.ll
index 9c2ed579c98e..6183db3702b3 100644
--- a/test/CodeGen/ARM/vhadd.ll
+++ b/test/CodeGen/ARM/vhadd.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vhadds8:
diff --git a/test/CodeGen/ARM/vhsub.ll b/test/CodeGen/ARM/vhsub.ll
index 4bc2e87ab577..f1a0cb27f576 100644
--- a/test/CodeGen/ARM/vhsub.ll
+++ b/test/CodeGen/ARM/vhsub.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vhsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vhsubs8:
diff --git a/test/CodeGen/ARM/vicmp.ll b/test/CodeGen/ARM/vicmp.ll
index 0a8f103102b1..bebb32062f71 100644
--- a/test/CodeGen/ARM/vicmp.ll
+++ b/test/CodeGen/ARM/vicmp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm -mattr=+neon %s -o - | FileCheck %s
; This tests icmp operations that do not map directly to NEON instructions.
; Not-equal (ne) operations are implemented by VCEQ/VMVN. Less-than (lt/ult)
diff --git a/test/CodeGen/ARM/vld1.ll b/test/CodeGen/ARM/vld1.ll
index 444d0d5b5edc..caeeada90ff5 100644
--- a/test/CodeGen/ARM/vld1.ll
+++ b/test/CodeGen/ARM/vld1.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -regalloc=basic | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s
+
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon -regalloc=basic %s -o - \
+; RUN: | FileCheck %s
define <8 x i8> @vld1i8(i8* %A) nounwind {
;CHECK-LABEL: vld1i8:
diff --git a/test/CodeGen/ARM/vld2.ll b/test/CodeGen/ARM/vld2.ll
index fddafeab91cc..7ac5cc709b33 100644
--- a/test/CodeGen/ARM/vld2.ll
+++ b/test/CodeGen/ARM/vld2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
%struct.__neon_int16x4x2_t = type { <4 x i16>, <4 x i16> }
diff --git a/test/CodeGen/ARM/vld3.ll b/test/CodeGen/ARM/vld3.ll
index d6eb4c2f6dd3..171a03c24da1 100644
--- a/test/CodeGen/ARM/vld3.ll
+++ b/test/CodeGen/ARM/vld3.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -regalloc=basic | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o -| FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -regalloc=basic %s -o - | FileCheck %s
%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
%struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
diff --git a/test/CodeGen/ARM/vld4.ll b/test/CodeGen/ARM/vld4.ll
index ff162bb022e1..94ad143ae0fd 100644
--- a/test/CodeGen/ARM/vld4.ll
+++ b/test/CodeGen/ARM/vld4.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
%struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
diff --git a/test/CodeGen/ARM/vlddup.ll b/test/CodeGen/ARM/vlddup.ll
index 5509f3e0a0da..64aac562c1eb 100644
--- a/test/CodeGen/ARM/vlddup.ll
+++ b/test/CodeGen/ARM/vlddup.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vld1dupi8(i8* %A) nounwind {
;CHECK-LABEL: vld1dupi8:
diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll
index 7a83a4c0cac6..c7d69ff9780a 100644
--- a/test/CodeGen/ARM/vldlane.ll
+++ b/test/CodeGen/ARM/vldlane.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -regalloc=basic | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s
+
+; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon -regalloc=basic %s -o - \
+; RUN: | FileCheck %s
define <8 x i8> @vld1lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vld1lanei8:
diff --git a/test/CodeGen/ARM/vldm-sched-a9.ll b/test/CodeGen/ARM/vldm-sched-a9.ll
index d0a9ac6d2b56..64f3770e3d21 100644
--- a/test/CodeGen/ARM/vldm-sched-a9.ll
+++ b/test/CodeGen/ARM/vldm-sched-a9.ll
@@ -1,13 +1,13 @@
-; RUN: llc < %s -march=arm -mtriple=armv7-linux-gnueabihf -float-abi=hard -mcpu=cortex-a9 -O3 | FileCheck %s
+; RUN: llc < %s -march=arm -mtriple=armv7-linux-gnueabihf -arm-atomic-cfg-tidy=0 -float-abi=hard -mcpu=cortex-a9 -O3 | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32-S64"
-; This test will generate spills/fills using vldmia instructions that access 64 bytes of memory.
-; Check that we don't crash when we generate these instructions on Cortex-A9.
+; This test used to test vector spilling using vstmia/vldmia instructions, but
+; the changes for PR:18825 prevent that spilling.
; CHECK: test:
-; CHECK: vstmia
-; CHECK: vldmia
+; CHECK-NOT: vstmia
+; CHECK-NOT: vldmia
define void @test(i64* %src) #0 {
entry:
%arrayidx39 = getelementptr inbounds i64* %src, i32 13
diff --git a/test/CodeGen/ARM/vminmax.ll b/test/CodeGen/ARM/vminmax.ll
index 81f45782a96f..1167ebe06717 100644
--- a/test/CodeGen/ARM/vminmax.ll
+++ b/test/CodeGen/ARM/vminmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vmins8:
diff --git a/test/CodeGen/ARM/vmla.ll b/test/CodeGen/ARM/vmla.ll
index caf655609c2b..6073fc5566fd 100644
--- a/test/CodeGen/ARM/vmla.ll
+++ b/test/CodeGen/ARM/vmla.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vmlai8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
;CHECK-LABEL: vmlai8:
diff --git a/test/CodeGen/ARM/vmls.ll b/test/CodeGen/ARM/vmls.ll
index 61f3424909e3..f86739cea3f1 100644
--- a/test/CodeGen/ARM/vmls.ll
+++ b/test/CodeGen/ARM/vmls.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vmlsi8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
;CHECK-LABEL: vmlsi8:
diff --git a/test/CodeGen/ARM/vmov.ll b/test/CodeGen/ARM/vmov.ll
index 8b63138bda81..7900af44ef08 100644
--- a/test/CodeGen/ARM/vmov.ll
+++ b/test/CodeGen/ARM/vmov.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @v_movi8() nounwind {
;CHECK-LABEL: v_movi8:
diff --git a/test/CodeGen/ARM/vmul.ll b/test/CodeGen/ARM/vmul.ll
index de329acdf3c7..0fa43d801bbe 100644
--- a/test/CodeGen/ARM/vmul.ll
+++ b/test/CodeGen/ARM/vmul.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vmuli8:
diff --git a/test/CodeGen/ARM/vneg.ll b/test/CodeGen/ARM/vneg.ll
index 1be4f748213a..4d548ddf8141 100644
--- a/test/CodeGen/ARM/vneg.ll
+++ b/test/CodeGen/ARM/vneg.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vnegs8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vnegs8:
diff --git a/test/CodeGen/ARM/vpadal.ll b/test/CodeGen/ARM/vpadal.ll
index a616a8d270a7..ffeac737fa36 100644
--- a/test/CodeGen/ARM/vpadal.ll
+++ b/test/CodeGen/ARM/vpadal.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <4 x i16> @vpadals8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vpadals8:
diff --git a/test/CodeGen/ARM/vpadd.ll b/test/CodeGen/ARM/vpadd.ll
index f84721f996cd..01cb1c74e38e 100644
--- a/test/CodeGen/ARM/vpadd.ll
+++ b/test/CodeGen/ARM/vpadd.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vpaddi8:
@@ -152,6 +152,17 @@ define void @addCombineToVPADDL() nounwind ssp {
ret void
}
+; Legalization produces a EXTRACT_VECTOR_ELT DAG node which performs an extend from
+; i16 to i32. In this case the input for the formed VPADDL needs to be a vector of i16s.
+define <2 x i16> @fromExtendingExtractVectorElt(<4 x i16> %in) {
+;CHECK-LABEL: fromExtendingExtractVectorElt:
+;CHECK: vpaddl.s16
+ %tmp1 = shufflevector <4 x i16> %in, <4 x i16> undef, <2 x i32> <i32 0, i32 2>
+ %tmp2 = shufflevector <4 x i16> %in, <4 x i16> undef, <2 x i32> <i32 1, i32 3>
+ %x = add <2 x i16> %tmp2, %tmp1
+ ret <2 x i16> %x
+}
+
declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone
declare <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16>) nounwind readnone
declare <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32>) nounwind readnone
diff --git a/test/CodeGen/ARM/vpminmax.ll b/test/CodeGen/ARM/vpminmax.ll
index c68b3193c19a..0b893e5bc892 100644
--- a/test/CodeGen/ARM/vpminmax.ll
+++ b/test/CodeGen/ARM/vpminmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vpmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vpmins8:
diff --git a/test/CodeGen/ARM/vqadd.ll b/test/CodeGen/ARM/vqadd.ll
index 784076685462..81acc8bc5abb 100644
--- a/test/CodeGen/ARM/vqadd.ll
+++ b/test/CodeGen/ARM/vqadd.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vqadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vqadds8:
diff --git a/test/CodeGen/ARM/vqshl.ll b/test/CodeGen/ARM/vqshl.ll
index b5cd71613d4a..4afef6dbd658 100644
--- a/test/CodeGen/ARM/vqshl.ll
+++ b/test/CodeGen/ARM/vqshl.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vqshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vqshls8:
diff --git a/test/CodeGen/ARM/vqshrn.ll b/test/CodeGen/ARM/vqshrn.ll
index 4abae700f877..f02482c0f77c 100644
--- a/test/CodeGen/ARM/vqshrn.ll
+++ b/test/CodeGen/ARM/vqshrn.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vqshrns8(<8 x i16>* %A) nounwind {
;CHECK-LABEL: vqshrns8:
diff --git a/test/CodeGen/ARM/vqsub.ll b/test/CodeGen/ARM/vqsub.ll
index 90bc3492fc53..4af438019208 100644
--- a/test/CodeGen/ARM/vqsub.ll
+++ b/test/CodeGen/ARM/vqsub.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vqsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vqsubs8:
diff --git a/test/CodeGen/ARM/vrec.ll b/test/CodeGen/ARM/vrec.ll
index c0deca995764..91979e5a3343 100644
--- a/test/CodeGen/ARM/vrec.ll
+++ b/test/CodeGen/ARM/vrec.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <2 x i32> @vrecpei32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vrecpei32:
diff --git a/test/CodeGen/ARM/vrev.ll b/test/CodeGen/ARM/vrev.ll
index b6da694e1805..7215ad615e81 100644
--- a/test/CodeGen/ARM/vrev.ll
+++ b/test/CodeGen/ARM/vrev.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev64D8:
@@ -178,3 +178,11 @@ entry:
ret void
}
+define <4 x i32> @test_vrev32_bswap(<4 x i32> %source) nounwind {
+; CHECK-LABEL: test_vrev32_bswap:
+; CHECK: vrev32.8
+ %bswap = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %source)
+ ret <4 x i32> %bswap
+}
+
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) nounwind readnone
diff --git a/test/CodeGen/ARM/vsel.ll b/test/CodeGen/ARM/vsel.ll
index 7e1f7146fd1c..746b1b000ef1 100644
--- a/test/CodeGen/ARM/vsel.ll
+++ b/test/CodeGen/ARM/vsel.ll
@@ -61,7 +61,7 @@ define void @test_vsel32slt(i32 %lhs32, i32 %rhs32, float %a, float %b) {
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
; CHECK: cmp r0, r1
-; CHECK: vselgt.f32 s0, s1, s0
+; CHECK: vselge.f32 s0, s1, s0
ret void
}
define void @test_vsel64slt(i32 %lhs32, i32 %rhs32, double %a, double %b) {
@@ -70,7 +70,7 @@ define void @test_vsel64slt(i32 %lhs32, i32 %rhs32, double %a, double %b) {
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
; CHECK: cmp r0, r1
-; CHECK: vselgt.f64 d16, d1, d0
+; CHECK: vselge.f64 d16, d1, d0
ret void
}
define void @test_vsel32sle(i32 %lhs32, i32 %rhs32, float %a, float %b) {
@@ -79,7 +79,7 @@ define void @test_vsel32sle(i32 %lhs32, i32 %rhs32, float %a, float %b) {
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
; CHECK: cmp r0, r1
-; CHECK: vselge.f32 s0, s1, s0
+; CHECK: vselgt.f32 s0, s1, s0
ret void
}
define void @test_vsel64sle(i32 %lhs32, i32 %rhs32, double %a, double %b) {
@@ -88,7 +88,7 @@ define void @test_vsel64sle(i32 %lhs32, i32 %rhs32, double %a, double %b) {
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
; CHECK: cmp r0, r1
-; CHECK: vselge.f64 d16, d1, d0
+; CHECK: vselgt.f64 d16, d1, d0
ret void
}
define void @test_vsel32ogt(float %lhs32, float %rhs32, float %a, float %b) {
diff --git a/test/CodeGen/ARM/vselect_imax.ll b/test/CodeGen/ARM/vselect_imax.ll
index 9ea56a47bd23..e999034fa47e 100644
--- a/test/CodeGen/ARM/vselect_imax.ll
+++ b/test/CodeGen/ARM/vselect_imax.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -cost-model -analyze -mtriple=thumbv7-apple-ios6.0.0 -march=arm -mcpu=cortex-a8 | FileCheck %s --check-prefix=COST
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; Make sure that ARM backend with NEON handles vselect.
define void @vmax_v4i32(<4 x i32>* %m, <4 x i32> %a, <4 x i32> %b) {
diff --git a/test/CodeGen/ARM/vshift.ll b/test/CodeGen/ARM/vshift.ll
index de380d3d12b3..618a137b5b05 100644
--- a/test/CodeGen/ARM/vshift.ll
+++ b/test/CodeGen/ARM/vshift.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vshls8:
@@ -180,7 +180,7 @@ define <8 x i8> @vlshri8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vlshri8:
;CHECK: vshr.u8
%tmp1 = load <8 x i8>* %A
- %tmp2 = lshr <8 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+ %tmp2 = lshr <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
ret <8 x i8> %tmp2
}
@@ -188,7 +188,7 @@ define <4 x i16> @vlshri16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vlshri16:
;CHECK: vshr.u16
%tmp1 = load <4 x i16>* %A
- %tmp2 = lshr <4 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16 >
+ %tmp2 = lshr <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
ret <4 x i16> %tmp2
}
@@ -196,7 +196,7 @@ define <2 x i32> @vlshri32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vlshri32:
;CHECK: vshr.u32
%tmp1 = load <2 x i32>* %A
- %tmp2 = lshr <2 x i32> %tmp1, < i32 32, i32 32 >
+ %tmp2 = lshr <2 x i32> %tmp1, < i32 31, i32 31 >
ret <2 x i32> %tmp2
}
@@ -204,7 +204,7 @@ define <1 x i64> @vlshri64(<1 x i64>* %A) nounwind {
;CHECK-LABEL: vlshri64:
;CHECK: vshr.u64
%tmp1 = load <1 x i64>* %A
- %tmp2 = lshr <1 x i64> %tmp1, < i64 64 >
+ %tmp2 = lshr <1 x i64> %tmp1, < i64 63 >
ret <1 x i64> %tmp2
}
@@ -252,7 +252,7 @@ define <16 x i8> @vlshrQi8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: vlshrQi8:
;CHECK: vshr.u8
%tmp1 = load <16 x i8>* %A
- %tmp2 = lshr <16 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+ %tmp2 = lshr <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
ret <16 x i8> %tmp2
}
@@ -260,7 +260,7 @@ define <8 x i16> @vlshrQi16(<8 x i16>* %A) nounwind {
;CHECK-LABEL: vlshrQi16:
;CHECK: vshr.u16
%tmp1 = load <8 x i16>* %A
- %tmp2 = lshr <8 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
+ %tmp2 = lshr <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
ret <8 x i16> %tmp2
}
@@ -268,7 +268,7 @@ define <4 x i32> @vlshrQi32(<4 x i32>* %A) nounwind {
;CHECK-LABEL: vlshrQi32:
;CHECK: vshr.u32
%tmp1 = load <4 x i32>* %A
- %tmp2 = lshr <4 x i32> %tmp1, < i32 32, i32 32, i32 32, i32 32 >
+ %tmp2 = lshr <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
ret <4 x i32> %tmp2
}
@@ -276,7 +276,7 @@ define <2 x i64> @vlshrQi64(<2 x i64>* %A) nounwind {
;CHECK-LABEL: vlshrQi64:
;CHECK: vshr.u64
%tmp1 = load <2 x i64>* %A
- %tmp2 = lshr <2 x i64> %tmp1, < i64 64, i64 64 >
+ %tmp2 = lshr <2 x i64> %tmp1, < i64 63, i64 63 >
ret <2 x i64> %tmp2
}
@@ -331,7 +331,7 @@ define <8 x i8> @vashri8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vashri8:
;CHECK: vshr.s8
%tmp1 = load <8 x i8>* %A
- %tmp2 = ashr <8 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+ %tmp2 = ashr <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
ret <8 x i8> %tmp2
}
@@ -339,7 +339,7 @@ define <4 x i16> @vashri16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vashri16:
;CHECK: vshr.s16
%tmp1 = load <4 x i16>* %A
- %tmp2 = ashr <4 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16 >
+ %tmp2 = ashr <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
ret <4 x i16> %tmp2
}
@@ -347,7 +347,7 @@ define <2 x i32> @vashri32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vashri32:
;CHECK: vshr.s32
%tmp1 = load <2 x i32>* %A
- %tmp2 = ashr <2 x i32> %tmp1, < i32 32, i32 32 >
+ %tmp2 = ashr <2 x i32> %tmp1, < i32 31, i32 31 >
ret <2 x i32> %tmp2
}
@@ -355,7 +355,7 @@ define <1 x i64> @vashri64(<1 x i64>* %A) nounwind {
;CHECK-LABEL: vashri64:
;CHECK: vshr.s64
%tmp1 = load <1 x i64>* %A
- %tmp2 = ashr <1 x i64> %tmp1, < i64 64 >
+ %tmp2 = ashr <1 x i64> %tmp1, < i64 63 >
ret <1 x i64> %tmp2
}
@@ -403,7 +403,7 @@ define <16 x i8> @vashrQi8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: vashrQi8:
;CHECK: vshr.s8
%tmp1 = load <16 x i8>* %A
- %tmp2 = ashr <16 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+ %tmp2 = ashr <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
ret <16 x i8> %tmp2
}
@@ -411,7 +411,7 @@ define <8 x i16> @vashrQi16(<8 x i16>* %A) nounwind {
;CHECK-LABEL: vashrQi16:
;CHECK: vshr.s16
%tmp1 = load <8 x i16>* %A
- %tmp2 = ashr <8 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
+ %tmp2 = ashr <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
ret <8 x i16> %tmp2
}
@@ -419,7 +419,7 @@ define <4 x i32> @vashrQi32(<4 x i32>* %A) nounwind {
;CHECK-LABEL: vashrQi32:
;CHECK: vshr.s32
%tmp1 = load <4 x i32>* %A
- %tmp2 = ashr <4 x i32> %tmp1, < i32 32, i32 32, i32 32, i32 32 >
+ %tmp2 = ashr <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
ret <4 x i32> %tmp2
}
@@ -427,6 +427,6 @@ define <2 x i64> @vashrQi64(<2 x i64>* %A) nounwind {
;CHECK-LABEL: vashrQi64:
;CHECK: vshr.s64
%tmp1 = load <2 x i64>* %A
- %tmp2 = ashr <2 x i64> %tmp1, < i64 64, i64 64 >
+ %tmp2 = ashr <2 x i64> %tmp1, < i64 63, i64 63 >
ret <2 x i64> %tmp2
}
diff --git a/test/CodeGen/ARM/vshiftins.ll b/test/CodeGen/ARM/vshiftins.ll
index 27610bfa677d..9526c3222017 100644
--- a/test/CodeGen/ARM/vshiftins.ll
+++ b/test/CodeGen/ARM/vshiftins.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vsli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vsli8:
diff --git a/test/CodeGen/ARM/vshl.ll b/test/CodeGen/ARM/vshl.ll
index 462f7fe7fb05..6228652fc715 100644
--- a/test/CodeGen/ARM/vshl.ll
+++ b/test/CodeGen/ARM/vshl.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vshls8:
diff --git a/test/CodeGen/ARM/vshll.ll b/test/CodeGen/ARM/vshll.ll
index ae806641480b..27873eb72753 100644
--- a/test/CodeGen/ARM/vshll.ll
+++ b/test/CodeGen/ARM/vshll.ll
@@ -1,51 +1,57 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i16> @vshlls8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vshlls8:
;CHECK: vshll.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i16> %tmp2
+ %tmp1 = load <8 x i8>* %A
+ %sext = sext <8 x i8> %tmp1 to <8 x i16>
+ %shift = shl <8 x i16> %sext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %shift
}
define <4 x i32> @vshlls16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vshlls16:
;CHECK: vshll.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i32> %tmp2
+ %tmp1 = load <4 x i16>* %A
+ %sext = sext <4 x i16> %tmp1 to <4 x i32>
+ %shift = shl <4 x i32> %sext, <i32 15, i32 15, i32 15, i32 15>
+ ret <4 x i32> %shift
}
define <2 x i64> @vshlls32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vshlls32:
;CHECK: vshll.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i64> %tmp2
+ %tmp1 = load <2 x i32>* %A
+ %sext = sext <2 x i32> %tmp1 to <2 x i64>
+ %shift = shl <2 x i64> %sext, <i64 31, i64 31>
+ ret <2 x i64> %shift
}
define <8 x i16> @vshllu8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vshllu8:
;CHECK: vshll.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i16> %tmp2
+ %tmp1 = load <8 x i8>* %A
+ %zext = zext <8 x i8> %tmp1 to <8 x i16>
+ %shift = shl <8 x i16> %zext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %shift
}
define <4 x i32> @vshllu16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vshllu16:
;CHECK: vshll.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i32> %tmp2
+ %tmp1 = load <4 x i16>* %A
+ %zext = zext <4 x i16> %tmp1 to <4 x i32>
+ %shift = shl <4 x i32> %zext, <i32 15, i32 15, i32 15, i32 15>
+ ret <4 x i32> %shift
}
define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vshllu32:
;CHECK: vshll.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i64> %tmp2
+ %tmp1 = load <2 x i32>* %A
+ %zext = zext <2 x i32> %tmp1 to <2 x i64>
+ %shift = shl <2 x i64> %zext, <i64 31, i64 31>
+ ret <2 x i64> %shift
}
; The following tests use the maximum shift count, so the signedness is
@@ -53,31 +59,58 @@ define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind {
define <8 x i16> @vshlli8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vshlli8:
;CHECK: vshll.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >)
- ret <8 x i16> %tmp2
+ %tmp1 = load <8 x i8>* %A
+ %sext = sext <8 x i8> %tmp1 to <8 x i16>
+ %shift = shl <8 x i16> %sext, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ret <8 x i16> %shift
}
define <4 x i32> @vshlli16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vshlli16:
;CHECK: vshll.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 16, i16 16, i16 16, i16 16 >)
- ret <4 x i32> %tmp2
+ %tmp1 = load <4 x i16>* %A
+ %zext = zext <4 x i16> %tmp1 to <4 x i32>
+ %shift = shl <4 x i32> %zext, <i32 16, i32 16, i32 16, i32 16>
+ ret <4 x i32> %shift
}
define <2 x i64> @vshlli32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vshlli32:
;CHECK: vshll.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 32, i32 32 >)
- ret <2 x i64> %tmp2
+ %tmp1 = load <2 x i32>* %A
+ %zext = zext <2 x i32> %tmp1 to <2 x i64>
+ %shift = shl <2 x i64> %zext, <i64 32, i64 32>
+ ret <2 x i64> %shift
}
-declare <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
+; And these have a shift just out of range so separate vmovl and vshl
+; instructions are needed.
+define <8 x i16> @vshllu8_bad(<8 x i8>* %A) nounwind {
+; CHECK-LABEL: vshllu8_bad:
+; CHECK: vmovl.u8
+; CHECK: vshl.i16
+ %tmp1 = load <8 x i8>* %A
+ %zext = zext <8 x i8> %tmp1 to <8 x i16>
+ %shift = shl <8 x i16> %zext, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
+ ret <8 x i16> %shift
+}
+
+define <4 x i32> @vshlls16_bad(<4 x i16>* %A) nounwind {
+; CHECK-LABEL: vshlls16_bad:
+; CHECK: vmovl.s16
+; CHECK: vshl.i32
+ %tmp1 = load <4 x i16>* %A
+ %sext = sext <4 x i16> %tmp1 to <4 x i32>
+ %shift = shl <4 x i32> %sext, <i32 17, i32 17, i32 17, i32 17>
+ ret <4 x i32> %shift
+}
-declare <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
+define <2 x i64> @vshllu32_bad(<2 x i32>* %A) nounwind {
+; CHECK-LABEL: vshllu32_bad:
+; CHECK: vmovl.u32
+; CHECK: vshl.i64
+ %tmp1 = load <2 x i32>* %A
+ %zext = zext <2 x i32> %tmp1 to <2 x i64>
+ %shift = shl <2 x i64> %zext, <i64 33, i64 33>
+ ret <2 x i64> %shift
+}
diff --git a/test/CodeGen/ARM/vshrn.ll b/test/CodeGen/ARM/vshrn.ll
index 40a94fee0d78..8aa009ab823e 100644
--- a/test/CodeGen/ARM/vshrn.ll
+++ b/test/CodeGen/ARM/vshrn.ll
@@ -1,32 +1,61 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vshrns8(<8 x i16>* %A) nounwind {
;CHECK-LABEL: vshrns8:
;CHECK: vshrn.i16
%tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
+ %tmp2 = lshr <8 x i16> %tmp1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
+ ret <8 x i8> %tmp3
}
define <4 x i16> @vshrns16(<4 x i32>* %A) nounwind {
;CHECK-LABEL: vshrns16:
;CHECK: vshrn.i32
%tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
+ %tmp2 = ashr <4 x i32> %tmp1, <i32 16, i32 16, i32 16, i32 16>
+ %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
+ ret <4 x i16> %tmp3
}
define <2 x i32> @vshrns32(<2 x i64>* %A) nounwind {
;CHECK-LABEL: vshrns32:
;CHECK: vshrn.i64
%tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vshiftn.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
+ %tmp2 = ashr <2 x i64> %tmp1, <i64 32, i64 32>
+ %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
+ ret <2 x i32> %tmp3
+}
+
+define <8 x i8> @vshrns8_bad(<8 x i16>* %A) nounwind {
+; CHECK-LABEL: vshrns8_bad:
+; CHECK: vshr.s16
+; CHECK: vmovn.i16
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = ashr <8 x i16> %tmp1, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
+ %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @vshrns16_bad(<4 x i32>* %A) nounwind {
+; CHECK-LABEL: vshrns16_bad:
+; CHECK: vshr.u32
+; CHECK: vmovn.i32
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = lshr <4 x i32> %tmp1, <i32 17, i32 17, i32 17, i32 17>
+ %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
+ ret <4 x i16> %tmp3
}
-declare <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vshiftn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+define <2 x i32> @vshrns32_bad(<2 x i64>* %A) nounwind {
+; CHECK-LABEL: vshrns32_bad:
+; CHECK: vshr.u64
+; CHECK: vmovn.i64
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = lshr <2 x i64> %tmp1, <i64 33, i64 33>
+ %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
+ ret <2 x i32> %tmp3
+}
define <8 x i8> @vrshrns8(<8 x i16>* %A) nounwind {
;CHECK-LABEL: vrshrns8:
diff --git a/test/CodeGen/ARM/vsra.ll b/test/CodeGen/ARM/vsra.ll
index 7a211c31ac0c..fa5985a330c4 100644
--- a/test/CodeGen/ARM/vsra.ll
+++ b/test/CodeGen/ARM/vsra.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vsras8:
;CHECK: vsra.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
- %tmp3 = ashr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- %tmp4 = add <8 x i8> %tmp1, %tmp3
+ %tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
+ %tmp4 = add <8 x i8> %tmp1, %tmp3
ret <8 x i8> %tmp4
}
@@ -15,7 +15,7 @@ define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK: vsra.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
- %tmp3 = ashr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 >
+ %tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
%tmp4 = add <4 x i16> %tmp1, %tmp3
ret <4 x i16> %tmp4
}
@@ -25,7 +25,7 @@ define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK: vsra.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
- %tmp3 = ashr <2 x i32> %tmp2, < i32 32, i32 32 >
+ %tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 >
%tmp4 = add <2 x i32> %tmp1, %tmp3
ret <2 x i32> %tmp4
}
@@ -35,7 +35,7 @@ define <1 x i64> @vsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
;CHECK: vsra.s64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
- %tmp3 = ashr <1 x i64> %tmp2, < i64 64 >
+ %tmp3 = ashr <1 x i64> %tmp2, < i64 63 >
%tmp4 = add <1 x i64> %tmp1, %tmp3
ret <1 x i64> %tmp4
}
@@ -45,7 +45,7 @@ define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK: vsra.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
- %tmp3 = ashr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+ %tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
%tmp4 = add <16 x i8> %tmp1, %tmp3
ret <16 x i8> %tmp4
}
@@ -55,7 +55,7 @@ define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK: vsra.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
- %tmp3 = ashr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
+ %tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
%tmp4 = add <8 x i16> %tmp1, %tmp3
ret <8 x i16> %tmp4
}
@@ -65,7 +65,7 @@ define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK: vsra.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
- %tmp3 = ashr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 >
+ %tmp3 = ashr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
%tmp4 = add <4 x i32> %tmp1, %tmp3
ret <4 x i32> %tmp4
}
@@ -75,7 +75,7 @@ define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK: vsra.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
- %tmp3 = ashr <2 x i64> %tmp2, < i64 64, i64 64 >
+ %tmp3 = ashr <2 x i64> %tmp2, < i64 63, i64 63 >
%tmp4 = add <2 x i64> %tmp1, %tmp3
ret <2 x i64> %tmp4
}
@@ -85,7 +85,7 @@ define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: vsra.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
- %tmp3 = lshr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+ %tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
%tmp4 = add <8 x i8> %tmp1, %tmp3
ret <8 x i8> %tmp4
}
@@ -95,7 +95,7 @@ define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK: vsra.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
- %tmp3 = lshr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 >
+ %tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
%tmp4 = add <4 x i16> %tmp1, %tmp3
ret <4 x i16> %tmp4
}
@@ -105,7 +105,7 @@ define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK: vsra.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
- %tmp3 = lshr <2 x i32> %tmp2, < i32 32, i32 32 >
+ %tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 >
%tmp4 = add <2 x i32> %tmp1, %tmp3
ret <2 x i32> %tmp4
}
@@ -115,7 +115,7 @@ define <1 x i64> @vsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
;CHECK: vsra.u64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
- %tmp3 = lshr <1 x i64> %tmp2, < i64 64 >
+ %tmp3 = lshr <1 x i64> %tmp2, < i64 63 >
%tmp4 = add <1 x i64> %tmp1, %tmp3
ret <1 x i64> %tmp4
}
@@ -125,7 +125,7 @@ define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK: vsra.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
- %tmp3 = lshr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+ %tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
%tmp4 = add <16 x i8> %tmp1, %tmp3
ret <16 x i8> %tmp4
}
@@ -135,7 +135,7 @@ define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK: vsra.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
- %tmp3 = lshr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
+ %tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
%tmp4 = add <8 x i16> %tmp1, %tmp3
ret <8 x i16> %tmp4
}
@@ -145,7 +145,7 @@ define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK: vsra.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
- %tmp3 = lshr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 >
+ %tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
%tmp4 = add <4 x i32> %tmp1, %tmp3
ret <4 x i32> %tmp4
}
@@ -155,7 +155,7 @@ define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK: vsra.u64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
- %tmp3 = lshr <2 x i64> %tmp2, < i64 64, i64 64 >
+ %tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 >
%tmp4 = add <2 x i64> %tmp1, %tmp3
ret <2 x i64> %tmp4
}
diff --git a/test/CodeGen/ARM/vst1.ll b/test/CodeGen/ARM/vst1.ll
index 36439fd7adf2..14f3ff066301 100644
--- a/test/CodeGen/ARM/vst1.ll
+++ b/test/CodeGen/ARM/vst1.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define void @vst1i8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vst1i8:
diff --git a/test/CodeGen/ARM/vst2.ll b/test/CodeGen/ARM/vst2.ll
index 7551a562cf0e..2180259d57f7 100644
--- a/test/CodeGen/ARM/vst2.ll
+++ b/test/CodeGen/ARM/vst2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define void @vst2i8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vst2i8:
diff --git a/test/CodeGen/ARM/vst3.ll b/test/CodeGen/ARM/vst3.ll
index 65625de34573..5f150edf31da 100644
--- a/test/CodeGen/ARM/vst3.ll
+++ b/test/CodeGen/ARM/vst3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon -fast-isel=0 -O0 | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -fast-isel=0 -O0 %s -o - | FileCheck %s
define void @vst3i8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vst3i8:
diff --git a/test/CodeGen/ARM/vst4.ll b/test/CodeGen/ARM/vst4.ll
index 83a6c7048650..44c76b5ed189 100644
--- a/test/CodeGen/ARM/vst4.ll
+++ b/test/CodeGen/ARM/vst4.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define void @vst4i8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vst4i8:
diff --git a/test/CodeGen/ARM/vstlane.ll b/test/CodeGen/ARM/vstlane.ll
index 34c5c70fffa3..7dd6e7b439ea 100644
--- a/test/CodeGen/ARM/vstlane.ll
+++ b/test/CodeGen/ARM/vstlane.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm -mattr=+neon %s -o - | FileCheck %s
define void @vst1lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vst1lanei8:
diff --git a/test/CodeGen/ARM/vsub.ll b/test/CodeGen/ARM/vsub.ll
index 6b95b97378e0..d1a094b92755 100644
--- a/test/CodeGen/ARM/vsub.ll
+++ b/test/CodeGen/ARM/vsub.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vsubi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vsubi8:
diff --git a/test/CodeGen/ARM/vtbl.ll b/test/CodeGen/ARM/vtbl.ll
index 21614b044f9a..32258a30da96 100644
--- a/test/CodeGen/ARM/vtbl.ll
+++ b/test/CodeGen/ARM/vtbl.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
diff --git a/test/CodeGen/ARM/vtrn.ll b/test/CodeGen/ARM/vtrn.ll
index 7d101bc61952..cdae7f8ec370 100644
--- a/test/CodeGen/ARM/vtrn.ll
+++ b/test/CodeGen/ARM/vtrn.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vtrni8:
diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll
index 2d193c114192..832be6c3daf1 100644
--- a/test/CodeGen/ARM/vuzp.ll
+++ b/test/CodeGen/ARM/vuzp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vuzpi8:
diff --git a/test/CodeGen/ARM/vzip.ll b/test/CodeGen/ARM/vzip.ll
index f71aef7ef139..f74dc62599cf 100644
--- a/test/CodeGen/ARM/vzip.ll
+++ b/test/CodeGen/ARM/vzip.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vzipi8:
diff --git a/test/CodeGen/ARM/warn-stack.ll b/test/CodeGen/ARM/warn-stack.ll
index 9538bbf10488..90a3e1f798ed 100644
--- a/test/CodeGen/ARM/warn-stack.ll
+++ b/test/CodeGen/ARM/warn-stack.ll
@@ -12,7 +12,7 @@ entry:
ret void
}
-; CHECK: warning: Stack size limit exceeded (96) in warn.
+; CHECK: warning: stack size limit exceeded (96) in warn
define void @warn() nounwind ssp {
entry:
%buffer = alloca [80 x i8], align 1
diff --git a/test/CodeGen/ARM/weak.ll b/test/CodeGen/ARM/weak.ll
index 5ac4b8c061d8..375ce22127a4 100644
--- a/test/CodeGen/ARM/weak.ll
+++ b/test/CodeGen/ARM/weak.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm | grep .weak.*f
-; RUN: llc < %s -march=arm | grep .weak.*h
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define weak i32 @f() {
entry:
@@ -14,3 +13,6 @@ entry:
declare extern_weak void @h()
+; CHECK: {{.}}weak{{.*}}f
+; CHECK: {{.}}weak{{.*}}h
+
diff --git a/test/CodeGen/ARM/weak2.ll b/test/CodeGen/ARM/weak2.ll
index cf327bbf5c87..82ab90efb118 100644
--- a/test/CodeGen/ARM/weak2.ll
+++ b/test/CodeGen/ARM/weak2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | grep .weak
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
define i32 @f(i32 %a) {
entry:
@@ -16,3 +16,6 @@ UnifiedReturnBlock: ; preds = %entry
}
declare extern_weak i32 @test_weak(...)
+
+; CHECK: {{.}}weak
+
diff --git a/test/CodeGen/ARM/widen-vmovs.ll b/test/CodeGen/ARM/widen-vmovs.ll
index 1efbc73650d8..316cfabab48c 100644
--- a/test/CodeGen/ARM/widen-vmovs.ll
+++ b/test/CodeGen/ARM/widen-vmovs.ll
@@ -17,7 +17,7 @@ target triple = "thumbv7-apple-ios"
; - Register liveness is verified.
; - The execution domain switch to vorr works across basic blocks.
-define void @Mm() nounwind {
+define void @Mm(i32 %in, float* %addr) nounwind {
entry:
br label %for.body4
@@ -27,10 +27,10 @@ for.body4:
for.body.i:
%tmp3.i = phi float [ 1.000000e+10, %for.body4 ], [ %add.i, %for.body.i ]
%add.i = fadd float %tmp3.i, 1.000000e+10
- %exitcond.i = icmp eq i32 undef, 41
+ %exitcond.i = icmp eq i32 %in, 41
br i1 %exitcond.i, label %rInnerproduct.exit, label %for.body.i
rInnerproduct.exit:
- store float %add.i, float* undef, align 4
+ store float %add.i, float* %addr, align 4
br label %for.body4
}
diff --git a/test/CodeGen/ARM/zero-cycle-zero.ll b/test/CodeGen/ARM/zero-cycle-zero.ll
new file mode 100644
index 000000000000..121a87f5b84d
--- /dev/null
+++ b/test/CodeGen/ARM/zero-cycle-zero.ll
@@ -0,0 +1,70 @@
+; RUN: llc -mtriple=armv8 -mcpu=cyclone < %s | FileCheck %s --check-prefix=CHECK-CYCLONE
+; RUN: llc -mtriple=armv8 -mcpu=swift < %s | FileCheck %s --check-prefix=CHECK-SWIFT
+
+declare arm_aapcs_vfpcc void @take_vec64(<2 x i32>)
+
+define void @test_vec64() {
+; CHECK-CYCLONE-LABEL: test_vec64:
+; CHECK-SWIFT-LABEL: test_vec64:
+
+ call arm_aapcs_vfpcc void @take_vec64(<2 x i32> <i32 0, i32 0>)
+ call arm_aapcs_vfpcc void @take_vec64(<2 x i32> <i32 0, i32 0>)
+; CHECK-CYCLONE-NOT: vmov.f64 d0,
+; CHECK-CYCLONE: vmov.i32 d0, #0
+; CHECK-CYCLONE: bl
+; CHECK-CYCLONE: vmov.i32 d0, #0
+; CHECK-CYCLONE: bl
+
+; CHECK-SWIFT: vmov.f64 [[ZEROREG:d[0-9]+]],
+; CHECK-SWIFT: vmov.i32 [[ZEROREG]], #0
+; CHECK-SWIFT: vorr d0, [[ZEROREG]], [[ZEROREG]]
+; CHECK-SWIFT: bl
+; CHECK-SWIFT: vorr d0, [[ZEROREG]], [[ZEROREG]]
+; CHECK-SWIFT: bl
+
+ ret void
+}
+
+declare arm_aapcs_vfpcc void @take_vec128(<8 x i16>)
+
+define void @test_vec128() {
+; CHECK-CYCLONE-LABEL: test_vec128:
+; CHECK-SWIFT-LABEL: test_vec128:
+
+ call arm_aapcs_vfpcc void @take_vec128(<8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
+ call arm_aapcs_vfpcc void @take_vec128(<8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
+; CHECK-CYCLONE-NOT: vmov.f64 [[ZEROREG:d[0-9]+]],
+; CHECK-CYCLONE: vmov.i32 q0, #0
+; CHECK-CYCLONE: bl
+; CHECK-CYCLONE: vmov.i32 q0, #0
+; CHECK-CYCLONE: bl
+
+; CHECK-SWIFT-NOT: vmov.f64 [[ZEROREG:d[0-9]+]],
+; CHECK-SWIFT: vmov.i32 [[ZEROREG:q[0-9]+]], #0
+; CHECK-SWIFT: vorr q0, [[ZEROREG]], [[ZEROREG]]
+; CHECK-SWIFT: bl
+; CHECK-SWIFT: vorr q0, [[ZEROREG]], [[ZEROREG]]
+; CHECK-SWIFT: bl
+
+ ret void
+}
+
+declare void @take_i32(i32)
+
+define void @test_i32() {
+; CHECK-CYCLONE-LABEL: test_i32:
+; CHECK-SWIFT-LABEL: test_i32:
+
+ call arm_aapcs_vfpcc void @take_i32(i32 0)
+ call arm_aapcs_vfpcc void @take_i32(i32 0)
+; CHECK-CYCLONE-NOT: vmov.f64 [[ZEROREG:d[0-9]+]],
+; CHECK-CYCLONE: mov r0, #0
+; CHECK-CYCLONE: bl
+; CHECK-CYCLONE: mov r0, #0
+; CHECK-CYCLONE: bl
+
+; It doesn't particularly matter what Swift does here, there isn't carefully
+; crafted behaviour that we might break in Cyclone.
+
+ ret void
+}
diff --git a/test/CodeGen/ARM/zextload_demandedbits.ll b/test/CodeGen/ARM/zextload_demandedbits.ll
index 3d3269cae236..6b6ce97ed9e2 100644
--- a/test/CodeGen/ARM/zextload_demandedbits.ll
+++ b/test/CodeGen/ARM/zextload_demandedbits.ll
@@ -6,7 +6,7 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-
%struct.spam = type { [3 x i32] }
%struct.barney = type { [2 x i32], [2 x i32] }
-; Make sure that the sext op does not get lost due to ComputeMaskedBits.
+; Make sure that the sext op does not get lost due to computeKnownBits.
; CHECK: quux
; CHECK: lsl
; CHECK: asr
diff --git a/test/CodeGen/CPP/atomic.ll b/test/CodeGen/CPP/atomic.ll
new file mode 100644
index 000000000000..e79c45d166a5
--- /dev/null
+++ b/test/CodeGen/CPP/atomic.ll
@@ -0,0 +1,89 @@
+; RUN: llc -march=cpp -o - %s | FileCheck %s
+
+define void @test_atomicrmw(i32* %addr, i32 %inc) {
+ %inst0 = atomicrmw xchg i32* %addr, i32 %inc seq_cst
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Xchg, {{.*}}, SequentiallyConsistent, CrossThread
+ ; CHECK: [[INST]]->setName("inst0");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst1 = atomicrmw add i32* %addr, i32 %inc seq_cst
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Add, {{.*}}, SequentiallyConsistent, CrossThread
+ ; CHECK: [[INST]]->setName("inst1");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst2 = atomicrmw volatile sub i32* %addr, i32 %inc singlethread monotonic
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Sub, {{.*}}, Monotonic, SingleThread
+ ; CHECK: [[INST]]->setName("inst2");
+ ; CHECK: [[INST]]->setVolatile(true);
+
+ %inst3 = atomicrmw and i32* %addr, i32 %inc acq_rel
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::And, {{.*}}, AcquireRelease, CrossThread
+ ; CHECK: [[INST]]->setName("inst3");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst4 = atomicrmw nand i32* %addr, i32 %inc release
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Nand, {{.*}}, Release, CrossThread
+ ; CHECK: [[INST]]->setName("inst4");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst5 = atomicrmw volatile or i32* %addr, i32 %inc singlethread seq_cst
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Or, {{.*}}, SequentiallyConsistent, SingleThread
+ ; CHECK: [[INST]]->setName("inst5");
+ ; CHECK: [[INST]]->setVolatile(true);
+
+ %inst6 = atomicrmw xor i32* %addr, i32 %inc release
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Xor, {{.*}}, Release, CrossThread
+ ; CHECK: [[INST]]->setName("inst6");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst7 = atomicrmw volatile max i32* %addr, i32 %inc singlethread monotonic
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Max, {{.*}}, Monotonic, SingleThread
+ ; CHECK: [[INST]]->setName("inst7");
+ ; CHECK: [[INST]]->setVolatile(true);
+
+ %inst8 = atomicrmw min i32* %addr, i32 %inc acquire
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::Min, {{.*}}, Acquire, CrossThread
+ ; CHECK: [[INST]]->setName("inst8");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+ %inst9 = atomicrmw volatile umax i32* %addr, i32 %inc monotonic
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::UMax, {{.*}}, Monotonic, CrossThread
+ ; CHECK: [[INST]]->setName("inst9");
+ ; CHECK: [[INST]]->setVolatile(true);
+
+ %inst10 = atomicrmw umin i32* %addr, i32 %inc singlethread release
+ ; CHECK: AtomicRMWInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicRMWInst(AtomicRMWInst::UMin, {{.*}}, Release, SingleThread
+ ; CHECK: [[INST]]->setName("inst10");
+ ; CHECK: [[INST]]->setVolatile(false);
+
+
+ ret void
+}
+
+define void @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
+ %inst0 = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ ; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, SequentiallyConsistent, Monotonic, CrossThread
+ ; CHECK: [[INST]]->setName("inst0");
+ ; CHECK: [[INST]]->setVolatile(false);
+ ; CHECK: [[INST]]->setWeak(false);
+
+ %inst1 = cmpxchg volatile i32* %addr, i32 %desired, i32 %new singlethread acq_rel acquire
+ ; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, AcquireRelease, Acquire, SingleThread
+ ; CHECK: [[INST]]->setName("inst1");
+ ; CHECK: [[INST]]->setVolatile(true);
+ ; CHECK: [[INST]]->setWeak(false);
+
+ %inst2 = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ ; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, SequentiallyConsistent, Monotonic, CrossThread
+ ; CHECK: [[INST]]->setName("inst2");
+ ; CHECK: [[INST]]->setVolatile(false);
+ ; CHECK: [[INST]]->setWeak(true);
+
+ %inst3 = cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new singlethread acq_rel acquire
+ ; CHECK: AtomicCmpXchgInst* [[INST:[a-zA-Z0-9_]+]] = new AtomicCmpXchgInst({{.*}}, AcquireRelease, Acquire, SingleThread
+ ; CHECK: [[INST]]->setName("inst3");
+ ; CHECK: [[INST]]->setVolatile(true);
+ ; CHECK: [[INST]]->setWeak(true);
+
+ ret void
+}
diff --git a/test/CodeGen/CPP/attributes.ll b/test/CodeGen/CPP/attributes.ll
new file mode 100644
index 000000000000..3dab617d80b9
--- /dev/null
+++ b/test/CodeGen/CPP/attributes.ll
@@ -0,0 +1,7 @@
+; RUN: llc < %s -march=cpp | FileCheck %s
+
+define void @f1(i8* byval, i8* inalloca) {
+; CHECK: ByVal
+; CHECK: InAlloca
+ ret void
+}
diff --git a/test/CodeGen/CPP/lit.local.cfg b/test/CodeGen/CPP/lit.local.cfg
index 4063dd1b8612..3ff5c6b69737 100644
--- a/test/CodeGen/CPP/lit.local.cfg
+++ b/test/CodeGen/CPP/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'CppBackend' in targets:
+if not 'CppBackend' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll b/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll
index 339f0f71ed5a..21c05f17a7c5 100644
--- a/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll
+++ b/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s
+; RUN: llc -no-integrated-as < %s
; XFAIL: sparc-sun-solaris2
; PR1308
; PR1557
diff --git a/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll b/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
index af522dc4c58d..0f82ba61b288 100644
--- a/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
+++ b/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s
+; RUN: llc -no-integrated-as < %s
; Test that we can have an "X" output constraint.
diff --git a/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll b/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
index f2c9b7f849b6..05989a0836cf 100644
--- a/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
+++ b/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s
+; RUN: llc -no-integrated-as < %s
%struct..0anon = type { [100 x i32] }
diff --git a/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll b/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
index 27c716222ef8..03ccbdfaf0cc 100644
--- a/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
+++ b/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s
+; RUN: llc -no-integrated-as < %s
define fastcc void @bc__support__high_resolution_time__initialize_clock_rate() {
entry:
diff --git a/test/CodeGen/Generic/2008-02-20-MatchingMem.ll b/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
index 7ffb734c713a..5ddb515bb75a 100644
--- a/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
+++ b/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s
+; RUN: llc -no-integrated-as < %s
; PR1133
; XFAIL: hexagon
define void @test(i32* %X) nounwind {
diff --git a/test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll b/test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll
new file mode 100644
index 000000000000..5c1cd0532511
--- /dev/null
+++ b/test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll
@@ -0,0 +1,19 @@
+; Test that opaque constants are not creating an infinite DAGCombine loop
+; RUN: llc < %s
+; XFAIL: r600, xcore
+
+@a = common global i32* null, align 8
+@c = common global i32 0, align 4
+@b = common global i32* null, align 8
+
+; Function Attrs: nounwind ssp uwtable
+define void @fn() {
+ store i32* inttoptr (i64 68719476735 to i32*), i32** @a, align 8
+ %1 = load i32* @c, align 4
+ %2 = sext i32 %1 to i64
+ %3 = lshr i64 %2, 12
+ %4 = and i64 %3, 68719476735
+ %5 = getelementptr inbounds i32* null, i64 %4
+ store i32* %5, i32** @b, align 8
+ ret void
+}
diff --git a/test/CodeGen/Generic/MachineBranchProb.ll b/test/CodeGen/Generic/MachineBranchProb.ll
index 802ee2cb0558..0e98280694c4 100644
--- a/test/CodeGen/Generic/MachineBranchProb.ll
+++ b/test/CodeGen/Generic/MachineBranchProb.ll
@@ -1,5 +1,8 @@
; RUN: llc < %s -print-machineinstrs=expand-isel-pseudos -o /dev/null 2>&1 | FileCheck %s
+; ARM & AArch64 run an extra SimplifyCFG which disrupts this test.
+; XFAIL: arm,aarch64
+
; Make sure we have the correct weight attached to each successor.
define i32 @test2(i32 %x) nounwind uwtable readnone ssp {
; CHECK: Machine code for function test2:
diff --git a/test/CodeGen/Generic/asm-large-immediate.ll b/test/CodeGen/Generic/asm-large-immediate.ll
index 891bbc9cc16d..67a7a1e75a83 100644
--- a/test/CodeGen/Generic/asm-large-immediate.ll
+++ b/test/CodeGen/Generic/asm-large-immediate.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -no-integrated-as < %s | FileCheck %s
define void @test() {
entry:
diff --git a/test/CodeGen/Generic/inline-asm-mem-clobber.ll b/test/CodeGen/Generic/inline-asm-mem-clobber.ll
index e523d031dc65..5aa827a0ab88 100644
--- a/test/CodeGen/Generic/inline-asm-mem-clobber.ll
+++ b/test/CodeGen/Generic/inline-asm-mem-clobber.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O2 < %s | FileCheck %s
+; RUN: llc -O2 -no-integrated-as < %s | FileCheck %s
@G = common global i32 0, align 4
diff --git a/test/CodeGen/Generic/inline-asm-special-strings.ll b/test/CodeGen/Generic/inline-asm-special-strings.ll
index d18221ef934d..5ef568863bad 100644
--- a/test/CodeGen/Generic/inline-asm-special-strings.ll
+++ b/test/CodeGen/Generic/inline-asm-special-strings.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | grep "foo 0 0"
+; RUN: llc -no-integrated-as < %s | grep "foo 0 0"
define void @bar() nounwind {
tail call void asm sideeffect "foo ${:uid} ${:uid}", ""() nounwind
diff --git a/test/CodeGen/Generic/no-target.ll b/test/CodeGen/Generic/no-target.ll
new file mode 100644
index 000000000000..4a4724fdf2de
--- /dev/null
+++ b/test/CodeGen/Generic/no-target.ll
@@ -0,0 +1,3 @@
+; RUN: not llc -mtriple le32-unknown-nacl %s -o - 2>&1 | FileCheck %s
+
+; CHECK: error: unable to get target for 'le32-unknown-nacl'
diff --git a/test/CodeGen/Generic/print-after.ll b/test/CodeGen/Generic/print-after.ll
index 7505907ef773..1b7ce84a8a54 100644
--- a/test/CodeGen/Generic/print-after.ll
+++ b/test/CodeGen/Generic/print-after.ll
@@ -1,4 +1,4 @@
-; RUN: not llc --help-hidden 2>&1 | FileCheck %s
+; RUN: llc --help-hidden 2>&1 | FileCheck %s
; CHECK: -print-after
; CHECK-NOT: -print-after-all
diff --git a/test/CodeGen/Generic/select.ll b/test/CodeGen/Generic/select.ll
index 77636eb6e615..c4841b79acb6 100644
--- a/test/CodeGen/Generic/select.ll
+++ b/test/CodeGen/Generic/select.ll
@@ -192,4 +192,3 @@ define <1 x i32> @checkScalariseVSELECT(<1 x i32> %a, <1 x i32> %b) {
%s = select <1 x i1> %cond, <1 x i32> %a, <1 x i32> %b
ret <1 x i32> %s
}
-
diff --git a/test/CodeGen/Generic/stop-after.ll b/test/CodeGen/Generic/stop-after.ll
index 557e097840af..5e0e350bc7fe 100644
--- a/test/CodeGen/Generic/stop-after.ll
+++ b/test/CodeGen/Generic/stop-after.ll
@@ -5,6 +5,6 @@
; STOP: Loop Strength Reduction
; STOP-NEXT: Machine Function Analysis
-; START: -machine-branch-prob -gc-lowering
+; START: -machine-branch-prob -jump-instr-tables -gc-lowering
; START: FunctionPass Manager
; START-NEXT: Lower Garbage Collection Instructions
diff --git a/test/CodeGen/Hexagon/hwloop-dbg.ll b/test/CodeGen/Hexagon/hwloop-dbg.ll
index bfdd8130d5bf..9537489b03d3 100644
--- a/test/CodeGen/Hexagon/hwloop-dbg.ll
+++ b/test/CodeGen/Hexagon/hwloop-dbg.ll
@@ -7,7 +7,7 @@ define void @foo(i32* nocapture %a, i32* nocapture %b) nounwind {
entry:
tail call void @llvm.dbg.value(metadata !{i32* %a}, i64 0, metadata !13), !dbg !17
tail call void @llvm.dbg.value(metadata !{i32* %b}, i64 0, metadata !14), !dbg !18
- tail call void @llvm.dbg.value(metadata !2, i64 0, metadata !15), !dbg !19
+ tail call void @llvm.dbg.value(metadata !30, i64 0, metadata !15), !dbg !19
br label %for.body, !dbg !19
for.body: ; preds = %for.body, %entry
@@ -38,7 +38,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.module.flags = !{!29}
!0 = metadata !{i32 786449, metadata !28, i32 12, metadata !"QuIC LLVM Hexagon Clang version 6.1-pre-unknown, (git://git-hexagon-aus.quicinc.com/llvm/clang-mainline.git e9382867661454cdf44addb39430741578e9765c) (llvm/llvm-mainline.git 36412bb1fcf03ed426d4437b41198bae066675ac)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, null, metadata !""} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] [DW_LANG_C99]
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !28, null, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32*, i32*)* @foo, null, null, metadata !11, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
!6 = metadata !{i32 786473, metadata !28} ; [ DW_TAG_file_type ]
@@ -46,8 +46,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!8 = metadata !{null, metadata !9, metadata !9}
!9 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int]
!10 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!11 = metadata !{metadata !12}
-!12 = metadata !{metadata !13, metadata !14, metadata !15}
+!11 = metadata !{metadata !13, metadata !14, metadata !15}
!13 = metadata !{i32 786689, metadata !5, metadata !"a", metadata !6, i32 16777217, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [a] [line 1]
!14 = metadata !{i32 786689, metadata !5, metadata !"b", metadata !6, i32 33554433, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [b] [line 1]
!15 = metadata !{i32 786688, metadata !16, metadata !"i", metadata !6, i32 2, metadata !10, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [i] [line 2]
@@ -62,3 +61,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!27 = metadata !{i32 6, i32 1, metadata !16, null}
!28 = metadata !{metadata !"hwloop-dbg.c", metadata !"/usr2/kparzysz/s.hex/t"}
!29 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!30 = metadata !{i32 0}
diff --git a/test/CodeGen/Hexagon/lit.local.cfg b/test/CodeGen/Hexagon/lit.local.cfg
index e96bab818a3c..ba72ff632d4e 100644
--- a/test/CodeGen/Hexagon/lit.local.cfg
+++ b/test/CodeGen/Hexagon/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Hexagon' in targets:
+if not 'Hexagon' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Hexagon/packetize_cond_inst.ll b/test/CodeGen/Hexagon/packetize_cond_inst.ll
index a48a9f62ec61..1fc6e82959e3 100644
--- a/test/CodeGen/Hexagon/packetize_cond_inst.ll
+++ b/test/CodeGen/Hexagon/packetize_cond_inst.ll
@@ -12,7 +12,7 @@ target triple = "hexagon-unknown--elf"
; }
; CHECK: cmp
; CHECK-NEXT: add
-; CHECH-NEXT: add
+; CHECK-NEXT: add
define i32 @ifcnv_add(i32, i32, i32) nounwind readnone {
%4 = icmp sgt i32 %2, %1
br i1 %4, label %5, label %7
diff --git a/test/CodeGen/MSP430/fp.ll b/test/CodeGen/MSP430/fp.ll
index 018090566f18..2559e23ae1f5 100644
--- a/test/CodeGen/MSP430/fp.ll
+++ b/test/CodeGen/MSP430/fp.ll
@@ -15,3 +15,15 @@ entry:
; CHECK: pop.w r4
ret void
}
+
+; Due to FPB not being marked as reserved, the register allocator used to select
+; r4 as the register for the "r" constraint below. This test verifies that this
+; does not happen anymore. Note that the only reason an ISR is used here is that
+; the register allocator selects r4 first instead of fifth in a normal function.
+define msp430_intrcc void @fpb_alloced() #0 {
+; CHECK-LABEL: fpb_alloced:
+; CHECK-NOT: mov.b #0, r4
+; CHECK: nop
+ call void asm sideeffect "nop", "r"(i8 0)
+ ret void
+}
diff --git a/test/CodeGen/MSP430/lit.local.cfg b/test/CodeGen/MSP430/lit.local.cfg
index a18fe6f927d8..b1cf1fbd21d7 100644
--- a/test/CodeGen/MSP430/lit.local.cfg
+++ b/test/CodeGen/MSP430/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'MSP430' in targets:
+if not 'MSP430' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/MSP430/misched-msp430.ll b/test/CodeGen/MSP430/misched-msp430.ll
new file mode 100644
index 000000000000..c8541eff5836
--- /dev/null
+++ b/test/CodeGen/MSP430/misched-msp430.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -mtriple=msp430-unknown-unknown -enable-misched | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"
+
+@y = common global i16 0, align 2
+@x = common global i16 0, align 2
+
+; Test that the MI Scheduler's initPolicy does not crash when i32 is
+; unsupported. The content of the asm check below is unimportant. It
+; only verifies that the code generator ran successfully.
+;
+; CHECK-LABEL: @f
+; CHECK: mov.w &y, &x
+; CHECK: ret
+define void @f() {
+entry:
+ %0 = load i16* @y, align 2
+ store i16 %0, i16* @x, align 2
+ ret void
+}
diff --git a/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll b/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
index 3381143c761d..8807d750e499 100644
--- a/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
+++ b/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s
-; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float < %s | FileCheck %s
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 < %s | FileCheck %s
define signext i8 @A(i8 %e.0, i8 signext %sum) nounwind {
entry:
diff --git a/test/CodeGen/Mips/2008-08-01-AsmInline.ll b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
index e274bc0e14f0..3c1bb39b4340 100644
--- a/test/CodeGen/Mips/2008-08-01-AsmInline.ll
+++ b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips < %s | FileCheck %s
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s
; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
%struct.DWstruct = type { i32, i32 }
diff --git a/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll b/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
index 2b2ee0fd7ad8..c3791dfc7ce6 100644
--- a/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
+++ b/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
@@ -1,9 +1,9 @@
; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-O32
; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-O32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
define float @h() nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/2010-07-20-Switch.ll b/test/CodeGen/Mips/2010-07-20-Switch.ll
index 38d7b7e25592..5c840775cf9e 100644
--- a/test/CodeGen/Mips/2010-07-20-Switch.ll
+++ b/test/CodeGen/Mips/2010-07-20-Switch.ll
@@ -2,10 +2,14 @@
; RUN: FileCheck %s -check-prefix=STATIC-O32
; RUN: llc < %s -march=mips -relocation-model=pic | \
; RUN: FileCheck %s -check-prefix=PIC-O32
+; RUN: llc < %s -march=mips64 -relocation-model=pic -mcpu=mips4 | \
+; RUN: FileCheck %s -check-prefix=N64
+; RUN: llc < %s -march=mips64 -relocation-model=static -mcpu=mips4 | \
+; RUN: FileCheck %s -check-prefix=N64
; RUN: llc < %s -march=mips64 -relocation-model=pic -mcpu=mips64 | \
-; RUN: FileCheck %s -check-prefix=N64
+; RUN: FileCheck %s -check-prefix=N64
; RUN: llc < %s -march=mips64 -relocation-model=static -mcpu=mips64 | \
-; RUN: FileCheck %s -check-prefix=N64
+; RUN: FileCheck %s -check-prefix=N64
define i32 @main() nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/2013-11-18-fp64-const0.ll b/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
index f8390d9a1ca7..6a210a0c76ce 100644
--- a/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
+++ b/test/CodeGen/Mips/2013-11-18-fp64-const0.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=mips -mattr=-fp64 < %s | FileCheck -check-prefix=CHECK-FP32 %s
-; RUN: llc -march=mips -mattr=+fp64 < %s | FileCheck -check-prefix=CHECK-FP64 %s
+; RUN: llc -march=mips -mcpu=mips32r2 -mattr=+fp64 < %s | FileCheck -check-prefix=CHECK-FP64 %s
; This test case is a simplified version of an llvm-stress generated test with
; seed=3718491962.
diff --git a/test/CodeGen/Mips/Fast-ISel/loadstore2.ll b/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
new file mode 100644
index 000000000000..f113a0eb1d54
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
@@ -0,0 +1,83 @@
+; ModuleID = 'loadstore2.c'
+target datalayout = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@c2 = common global i8 0, align 1
+@c1 = common global i8 0, align 1
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+
+@s2 = common global i16 0, align 2
+@s1 = common global i16 0, align 2
+@i2 = common global i32 0, align 4
+@i1 = common global i32 0, align 4
+@f2 = common global float 0.000000e+00, align 4
+@f1 = common global float 0.000000e+00, align 4
+@d2 = common global double 0.000000e+00, align 8
+@d1 = common global double 0.000000e+00, align 8
+
+; Function Attrs: nounwind
+define void @cfoo() #0 {
+entry:
+ %0 = load i8* @c2, align 1
+ store i8 %0, i8* @c1, align 1
+; CHECK-LABEL: cfoo:
+; CHECK: lbu $[[REGc:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: sb $[[REGc]], 0(${{[0-9]+}})
+
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @sfoo() #0 {
+entry:
+ %0 = load i16* @s2, align 2
+ store i16 %0, i16* @s1, align 2
+; CHECK-LABEL: sfoo:
+; CHECK: lhu $[[REGs:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: sh $[[REGs]], 0(${{[0-9]+}})
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @ifoo() #0 {
+entry:
+ %0 = load i32* @i2, align 4
+ store i32 %0, i32* @i1, align 4
+; CHECK-LABEL: ifoo:
+; CHECK: lw $[[REGi:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: sw $[[REGi]], 0(${{[0-9]+}})
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @ffoo() #0 {
+entry:
+ %0 = load float* @f2, align 4
+ store float %0, float* @f1, align 4
+; CHECK-LABEL: ffoo:
+; CHECK: lwc1 $f[[REGf:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: swc1 $f[[REGf]], 0(${{[0-9]+}})
+
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @dfoo() #0 {
+entry:
+ %0 = load double* @d2, align 8
+ store double %0, double* @d1, align 8
+; CHECK-LABEL: dfoo:
+; CHECK: ldc1 $f[[REGd:[0-9]+]], 0(${{[0-9]+}})
+; CHECK: sdc1 $f[[REGd]], 0(${{[0-9]+}})
+; CHECK: .end dfoo
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+
diff --git a/test/CodeGen/Mips/Fast-ISel/nullvoid.ll b/test/CodeGen/Mips/Fast-ISel/nullvoid.ll
new file mode 100644
index 000000000000..eeaff878bf54
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/nullvoid.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define void @foo() {
+entry:
+ ret void
+; CHECK: jr $ra
+}
diff --git a/test/CodeGen/Mips/Fast-ISel/simplestore.ll b/test/CodeGen/Mips/Fast-ISel/simplestore.ll
new file mode 100644
index 000000000000..5d52481dfdf3
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/simplestore.ll
@@ -0,0 +1,15 @@
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+
+@abcd = external global i32
+
+; Function Attrs: nounwind
+define void @foo() {
+entry:
+ store i32 12345, i32* @abcd, align 4
+; CHECK: addiu $[[REG1:[0-9]+]], $zero, 12345
+; CHECK: lw $[[REG2:[0-9]+]], %got(abcd)(${{[0-9]+}})
+; CHECK: sw $[[REG1]], 0($[[REG2]])
+ ret void
+}
+
diff --git a/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll b/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll
new file mode 100644
index 000000000000..6759c01c774b
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll
@@ -0,0 +1,38 @@
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+
+@f = common global float 0.000000e+00, align 4
+@de = common global double 0.000000e+00, align 8
+
+; Function Attrs: nounwind
+define void @f1() #0 {
+entry:
+ store float 0x3FFA76C8C0000000, float* @f, align 4
+ ret void
+; CHECK: .ent f1
+; CHECK: lui $[[REG1:[0-9]+]], 16339
+; CHECK: ori $[[REG2:[0-9]+]], $[[REG1]], 46662
+; CHECK: mtc1 $[[REG2]], $f[[REG3:[0-9]+]]
+; CHECK: lw $[[REG4:[0-9]+]], %got(f)(${{[0-9]+}})
+; CHECK: swc1 $f[[REG3]], 0($[[REG4]])
+; CHECK: .end f1
+
+}
+
+; Function Attrs: nounwind
+define void @d1() #0 {
+entry:
+ store double 1.234567e+00, double* @de, align 8
+; CHECK: .ent d1
+; CHECK: lui $[[REG1a:[0-9]+]], 16371
+; CHECK: ori $[[REG2a:[0-9]+]], $[[REG1a]], 49353
+; CHECK: lui $[[REG1b:[0-9]+]], 21403
+; CHECK: ori $[[REG2b:[0-9]+]], $[[REG1b]], 34951
+; CHECK: mtc1 $[[REG2b]], $f[[REG3:[0-9]+]]
+; CHECK: mthc1 $[[REG2a]], $f[[REG3]]
+; CHECK: sdc1 $f[[REG3]], 0(${{[0-9]+}})
+; CHECK: .end d1
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/Fast-ISel/simplestorei.ll b/test/CodeGen/Mips/Fast-ISel/simplestorei.ll
new file mode 100644
index 000000000000..7d2c8e73c352
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/simplestorei.ll
@@ -0,0 +1,65 @@
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+
+@ijk = external global i32
+
+; Function Attrs: nounwind
+define void @si2_1() #0 {
+entry:
+ store i32 32767, i32* @ijk, align 4
+; CHECK: .ent si2_1
+; CHECK: addiu $[[REG1:[0-9]+]], $zero, 32767
+; CHECK: lw $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
+; CHECK: sw $[[REG1]], 0($[[REG2]])
+
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @si2_2() #0 {
+entry:
+ store i32 -32768, i32* @ijk, align 4
+; CHECK: .ent si2_2
+; CHECK: addiu $[[REG1:[0-9]+]], $zero, -32768
+; CHECK: lw $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
+; CHECK: sw $[[REG1]], 0($[[REG2]])
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @ui2_1() #0 {
+entry:
+ store i32 65535, i32* @ijk, align 4
+; CHECK: .ent ui2_1
+; CHECK: ori $[[REG1:[0-9]+]], $zero, 65535
+; CHECK: lw $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
+; CHECK: sw $[[REG1]], 0($[[REG2]])
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @ui4_1() #0 {
+entry:
+ store i32 983040, i32* @ijk, align 4
+; CHECK: .ent ui4_1
+; CHECK: lui $[[REG1:[0-9]+]], 15
+; CHECK: lw $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
+; CHECK: sw $[[REG1]], 0($[[REG2]])
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @ui4_2() #0 {
+entry:
+ store i32 719566, i32* @ijk, align 4
+; CHECK: .ent ui4_2
+; CHECK: lui $[[REG1:[0-9]+]], 10
+; CHECK: ori $[[REG1]], $[[REG1]], 64206
+; CHECK: lw $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
+; CHECK: sw $[[REG1]], 0($[[REG2]])
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+
diff --git a/test/CodeGen/Mips/abicalls.ll b/test/CodeGen/Mips/abicalls.ll
new file mode 100644
index 000000000000..6fa33aa158ad
--- /dev/null
+++ b/test/CodeGen/Mips/abicalls.ll
@@ -0,0 +1,16 @@
+;
+; When the assembler is ready a .s file for it will
+; be created.
+
+; Note that EF_MIPS_CPIC is set by -mabicalls which is the default on Linux
+; TODO need to support -mno-abicalls
+
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-STATIC %s
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | FileCheck -check-prefix=CHECK-PIC %s
+; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips4 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-PIC %s
+; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips64 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-PIC %s
+
+; CHECK-STATIC: .abicalls
+; CHECK-STATIC-NEXT: pic0
+; CHECK-PIC: .abicalls
+; CHECK-PIC-NOT: pic0
diff --git a/test/CodeGen/Mips/abiflags-xx.ll b/test/CodeGen/Mips/abiflags-xx.ll
new file mode 100644
index 000000000000..c4610120fdd5
--- /dev/null
+++ b/test/CodeGen/Mips/abiflags-xx.ll
@@ -0,0 +1,5 @@
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -mattr=fpxx %s -o - | FileCheck %s
+
+; CHECK: .nan legacy
+; CHECK: .module fp=xx
+
diff --git a/test/CodeGen/Mips/abiflags32.ll b/test/CodeGen/Mips/abiflags32.ll
new file mode 100644
index 000000000000..e32d4a586ee3
--- /dev/null
+++ b/test/CodeGen/Mips/abiflags32.ll
@@ -0,0 +1,17 @@
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | FileCheck %s
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -mattr=fp64 %s -o - | FileCheck -check-prefix=CHECK-64 %s
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips64 -mattr=-n64,n32 %s -o - | FileCheck -check-prefix=CHECK-64n %s
+
+; CHECK: .nan legacy
+; We don't emit '.module fp=32' for compatibility with binutils 2.24 which
+; doesn't accept .module.
+; CHECK-NOT: .module fp=32
+
+; CHECK-64: .nan legacy
+; We do emit '.module fp=64' though since it contradicts the default value.
+; CHECK-64: .module fp=64
+
+; CHECK-64n: .nan legacy
+; We don't emit '.module fp=64' for compatibility with binutils 2.24 which
+; doesn't accept .module.
+; CHECK-64n-NOT: .module fp=64
diff --git a/test/CodeGen/Mips/addi.ll b/test/CodeGen/Mips/addi.ll
index 8f70a469c44f..01d409e521d7 100644
--- a/test/CodeGen/Mips/addi.ll
+++ b/test/CodeGen/Mips/addi.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s -check-prefix=16
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=16
@i = global i32 6, align 4
@j = global i32 12, align 4
diff --git a/test/CodeGen/Mips/align16.ll b/test/CodeGen/Mips/align16.ll
index 267cff54291d..689ae8307f57 100644
--- a/test/CodeGen/Mips/align16.ll
+++ b/test/CodeGen/Mips/align16.ll
@@ -25,7 +25,7 @@ entry:
call void @p(i32* %arrayidx1)
ret void
}
-; 16: save $ra, $s0, $s1, $s2, 2040
-; 16: addiu $sp, -56 # 16 bit inst
-; 16: addiu $sp, 56 # 16 bit inst
-; 16: restore $ra, $s0, $s1, $s2, 2040
+; 16: save $ra, 2040
+; 16: addiu $sp, -40 # 16 bit inst
+; 16: addiu $sp, 40 # 16 bit inst
+; 16: restore $ra, 2040
diff --git a/test/CodeGen/Mips/alloca16.ll b/test/CodeGen/Mips/alloca16.ll
index 017665f00bd4..4f6059878c3b 100644
--- a/test/CodeGen/Mips/alloca16.ll
+++ b/test/CodeGen/Mips/alloca16.ll
@@ -19,8 +19,8 @@ entry:
define void @test() nounwind {
entry:
-; 16: .frame $sp,24,$ra
-; 16: save $ra, $s0, $s1, $s2, 24
+; 16: .frame $sp,8,$ra
+; 16: save 8 # 16 bit inst
; 16: move $16, $sp
; 16: move ${{[0-9]+}}, $sp
; 16: subu $[[REGISTER:[0-9]+]], ${{[0-9]+}}, ${{[0-9]+}}
diff --git a/test/CodeGen/Mips/analyzebranch.ll b/test/CodeGen/Mips/analyzebranch.ll
index 8ec5d9313994..4b5d09778d79 100644
--- a/test/CodeGen/Mips/analyzebranch.ll
+++ b/test/CodeGen/Mips/analyzebranch.ll
@@ -1,9 +1,25 @@
-; RUN: llc -march=mips < %s | FileCheck %s
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR -check-prefix=32-GPR
+; RUN: llc -march=mips64 -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips64 -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips64 -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=FCC
+; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR -check-prefix=64-GPR
define double @foo(double %a, double %b) nounwind readnone {
entry:
-; CHECK: bc1f $BB
-; CHECK: nop
+; ALL-LABEL: foo:
+
+; FCC: bc1f $BB
+; FCC: nop
+
+; 32-GPR: mtc1 $zero, $[[Z:f[0-9]]]
+; 32-GPR: mthc1 $zero, $[[Z:f[0-9]]]
+; 64-GPR: dmtc1 $zero, $[[Z:f[0-9]]]
+; GPR: cmp.lt.d $[[FGRCC:f[0-9]+]], $[[Z]], $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB
%cmp = fcmp ogt double %a, 0.000000e+00
br i1 %cmp, label %if.end6, label %if.else
@@ -25,8 +41,17 @@ return: ; preds = %if.else, %if.end6
define void @f1(float %f) nounwind {
entry:
-; CHECK: bc1f $BB
-; CHECK: nop
+; ALL-LABEL: f1:
+
+; FCC: bc1f $BB
+; FCC: nop
+
+; GPR: mtc1 $zero, $[[Z:f[0-9]]]
+; GPR: cmp.eq.s $[[FGRCC:f[0-9]+]], $f12, $[[Z]]
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: beqz $[[GPRCC]], $BB
+
%cmp = fcmp une float %f, 0.000000e+00
br i1 %cmp, label %if.then, label %if.end
diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll
index 0e60fe1fbfbc..f4118ecec79d 100644
--- a/test/CodeGen/Mips/atomic.ll
+++ b/test/CodeGen/Mips/atomic.ll
@@ -1,5 +1,14 @@
-; RUN: llc -march=mipsel --disable-machine-licm < %s | FileCheck %s -check-prefix=CHECK-EL
-; RUN: llc -march=mips --disable-machine-licm < %s | FileCheck %s -check-prefix=CHECK-EB
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+
+; Keep one big-endian check so that we don't reduce testing, but don't add more
+; since endianness doesn't affect the body of the atomic operations.
+; RUN: llc -march=mips --disable-machine-licm -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EB
@x = common global i32 0, align 4
@@ -8,21 +17,16 @@ entry:
%0 = atomicrmw add i32* @x, i32 %incr monotonic
ret i32 %0
-; CHECK-EL-LABEL: AtomicLoadAdd32:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK-EL: addu $[[R2:[0-9]+]], $[[R1]], $4
-; CHECK-EL: sc $[[R2]], 0($[[R0]])
-; CHECK-EL: beqz $[[R2]], $[[BB0]]
-
-; CHECK-EB-LABEL: AtomicLoadAdd32:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK-EB: addu $[[R2:[0-9]+]], $[[R1]], $4
-; CHECK-EB: sc $[[R2]], 0($[[R0]])
-; CHECK-EB: beqz $[[R2]], $[[BB0]]
+; ALL-LABEL: AtomicLoadAdd32:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)(
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R1:[0-9]+]], 0($[[R0]])
+; ALL: addu $[[R2:[0-9]+]], $[[R1]], $4
+; ALL: sc $[[R2]], 0($[[R0]])
+; ALL: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicLoadNand32(i32 %incr) nounwind {
@@ -30,23 +34,17 @@ entry:
%0 = atomicrmw nand i32* @x, i32 %incr monotonic
ret i32 %0
-; CHECK-EL-LABEL: AtomicLoadNand32:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK-EL: and $[[R3:[0-9]+]], $[[R1]], $4
-; CHECK-EL: nor $[[R2:[0-9]+]], $zero, $[[R3]]
-; CHECK-EL: sc $[[R2]], 0($[[R0]])
-; CHECK-EL: beqz $[[R2]], $[[BB0]]
-
-; CHECK-EB-LABEL: AtomicLoadNand32:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK-EB: and $[[R3:[0-9]+]], $[[R1]], $4
-; CHECK-EB: nor $[[R2:[0-9]+]], $zero, $[[R3]]
-; CHECK-EB: sc $[[R2]], 0($[[R0]])
-; CHECK-EB: beqz $[[R2]], $[[BB0]]
+; ALL-LABEL: AtomicLoadNand32:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)(
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R1:[0-9]+]], 0($[[R0]])
+; ALL: and $[[R3:[0-9]+]], $[[R1]], $4
+; ALL: nor $[[R2:[0-9]+]], $zero, $[[R3]]
+; ALL: sc $[[R2]], 0($[[R0]])
+; ALL: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicSwap32(i32 %newval) nounwind {
@@ -57,19 +55,15 @@ entry:
%0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
ret i32 %0
-; CHECK-EL-LABEL: AtomicSwap32:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll ${{[0-9]+}}, 0($[[R0]])
-; CHECK-EL: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK-EL: beqz $[[R2]], $[[BB0]]
-
-; CHECK-EB-LABEL: AtomicSwap32:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll ${{[0-9]+}}, 0($[[R0]])
-; CHECK-EB: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK-EB: beqz $[[R2]], $[[BB0]]
+; ALL-LABEL: AtomicSwap32:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll ${{[0-9]+}}, 0($[[R0]])
+; ALL: sc $[[R2:[0-9]+]], 0($[[R0]])
+; ALL: beqz $[[R2]], $[[BB0]]
}
define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
@@ -77,26 +71,21 @@ entry:
%newval.addr = alloca i32, align 4
store i32 %newval, i32* %newval.addr, align 4
%tmp = load i32* %newval.addr, align 4
- %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic
- ret i32 %0
+ %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic monotonic
+ %1 = extractvalue { i32, i1 } %0, 0
+ ret i32 %1
+
+; ALL-LABEL: AtomicCmpSwap32:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)(
-; CHECK-EL-LABEL: AtomicCmpSwap32:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $2, 0($[[R0]])
-; CHECK-EL: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
-; CHECK-EL: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK-EL: beqz $[[R2]], $[[BB0]]
-; CHECK-EL: $[[BB1]]:
-
-; CHECK-EB-LABEL: AtomicCmpSwap32:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(x)
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $2, 0($[[R0]])
-; CHECK-EB: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
-; CHECK-EB: sc $[[R2:[0-9]+]], 0($[[R0]])
-; CHECK-EB: beqz $[[R2]], $[[BB0]]
-; CHECK-EB: $[[BB1]]:
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $2, 0($[[R0]])
+; ALL: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
+; ALL: sc $[[R2:[0-9]+]], 0($[[R0]])
+; ALL: beqz $[[R2]], $[[BB0]]
+; ALL: $[[BB1]]:
}
@@ -108,56 +97,38 @@ entry:
%0 = atomicrmw add i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK-EL-LABEL: AtomicLoadAdd8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EL: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EL: sc $[[R14]], 0($[[R2]])
-; CHECK-EL: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicLoadAdd8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EB: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
-; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
-; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EB: sc $[[R14]], 0($[[R2]])
-; CHECK-EB: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
-; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EB: sra $2, $[[R17]], 24
+; ALL-LABEL: AtomicLoadAdd8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; ALL: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 24
+; NO-SEB-SEH: sra $2, $[[R17]], 24
+
+; HAS-SEB-SEH: seb $2, $[[R16]]
}
define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
@@ -165,56 +136,38 @@ entry:
%0 = atomicrmw sub i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK-EL-LABEL: AtomicLoadSub8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EL: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EL: sc $[[R14]], 0($[[R2]])
-; CHECK-EL: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicLoadSub8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
+; ALL-LABEL: AtomicLoadSub8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EB: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
-; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
-; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EB: sc $[[R14]], 0($[[R2]])
-; CHECK-EB: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
-; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EB: sra $2, $[[R17]], 24
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; ALL: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 24
+; NO-SEB-SEH: sra $2, $[[R17]], 24
+
+; HAS-SEB-SEH:seb $2, $[[R16]]
}
define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
@@ -222,58 +175,39 @@ entry:
%0 = atomicrmw nand i8* @y, i8 %incr monotonic
ret i8 %0
-; CHECK-EL-LABEL: AtomicLoadNand8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EL: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EL: nor $[[R11:[0-9]+]], $zero, $[[R18]]
-; CHECK-EL: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EL: sc $[[R14]], 0($[[R2]])
-; CHECK-EL: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicLoadNand8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EB: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK-EB: nor $[[R11:[0-9]+]], $zero, $[[R18]]
-; CHECK-EB: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
-; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
-; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
-; CHECK-EB: sc $[[R14]], 0($[[R2]])
-; CHECK-EB: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
-; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EB: sra $2, $[[R17]], 24
+; ALL-LABEL: AtomicLoadNand8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
+; ALL: nor $[[R11:[0-9]+]], $zero, $[[R18]]
+; ALL: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 24
+; NO-SEB-SEH: sra $2, $[[R17]], 24
+
+; HAS-SEB-SEH: seb $2, $[[R16]]
}
define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
@@ -281,121 +215,170 @@ entry:
%0 = atomicrmw xchg i8* @y, i8 %newval monotonic
ret i8 %0
-; CHECK-EL-LABEL: AtomicSwap8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: sllv $[[R9:[0-9]+]], $4, $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EL: and $[[R18:[0-9]+]], $[[R9]], $[[R6]]
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EL: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
-; CHECK-EL: sc $[[R14]], 0($[[R2]])
-; CHECK-EL: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EL: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicSwap8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: sllv $[[R9:[0-9]+]], $4, $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R10:[0-9]+]], 0($[[R2]])
-; CHECK-EB: and $[[R18:[0-9]+]], $[[R9]], $[[R7]]
-; CHECK-EB: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
-; CHECK-EB: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
-; CHECK-EB: sc $[[R14]], 0($[[R2]])
-; CHECK-EB: beqz $[[R14]], $[[BB0]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
-; CHECK-EB: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
-; CHECK-EB: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EB: sra $2, $[[R17]], 24
+; ALL-LABEL: AtomicSwap8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: and $[[R18:[0-9]+]], $[[R9]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 24
+; NO-SEB-SEH: sra $2, $[[R17]], 24
+
+; HAS-SEB-SEH: seb $2, $[[R16]]
+
}
define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
entry:
- %0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic
+ %pair0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
+ %0 = extractvalue { i8, i1 } %pair0, 0
ret i8 %0
-; CHECK-EL-LABEL: AtomicCmpSwap8:
-; CHECK-EL: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EL: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EL: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EL: sll $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EL: ori $[[R5:[0-9]+]], $zero, 255
-; CHECK-EL: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK-EL: nor $[[R7:[0-9]+]], $zero, $[[R6]]
-; CHECK-EL: andi $[[R8:[0-9]+]], $4, 255
-; CHECK-EL: sllv $[[R9:[0-9]+]], $[[R8]], $[[R4]]
-; CHECK-EL: andi $[[R10:[0-9]+]], $5, 255
-; CHECK-EL: sllv $[[R11:[0-9]+]], $[[R10]], $[[R4]]
-
-; CHECK-EL: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EL: ll $[[R12:[0-9]+]], 0($[[R2]])
-; CHECK-EL: and $[[R13:[0-9]+]], $[[R12]], $[[R6]]
-; CHECK-EL: bne $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
-
-; CHECK-EL: and $[[R14:[0-9]+]], $[[R12]], $[[R7]]
-; CHECK-EL: or $[[R15:[0-9]+]], $[[R14]], $[[R11]]
-; CHECK-EL: sc $[[R15]], 0($[[R2]])
-; CHECK-EL: beqz $[[R15]], $[[BB0]]
-
-; CHECK-EL: $[[BB1]]:
-; CHECK-EL: srlv $[[R16:[0-9]+]], $[[R13]], $[[R4]]
-; CHECK-EL: sll $[[R17:[0-9]+]], $[[R16]], 24
-; CHECK-EL: sra $2, $[[R17]], 24
-
-; CHECK-EB-LABEL: AtomicCmpSwap8:
-; CHECK-EB: lw $[[R0:[0-9]+]], %got(y)
-; CHECK-EB: addiu $[[R1:[0-9]+]], $zero, -4
-; CHECK-EB: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
-; CHECK-EB: andi $[[R3:[0-9]+]], $[[R0]], 3
-; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
-; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
-; CHECK-EB: ori $[[R6:[0-9]+]], $zero, 255
-; CHECK-EB: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
-; CHECK-EB: nor $[[R8:[0-9]+]], $zero, $[[R7]]
-; CHECK-EB: andi $[[R9:[0-9]+]], $4, 255
-; CHECK-EB: sllv $[[R10:[0-9]+]], $[[R9]], $[[R5]]
-; CHECK-EB: andi $[[R11:[0-9]+]], $5, 255
-; CHECK-EB: sllv $[[R12:[0-9]+]], $[[R11]], $[[R5]]
-
-; CHECK-EB: $[[BB0:[A-Z_0-9]+]]:
-; CHECK-EB: ll $[[R13:[0-9]+]], 0($[[R2]])
-; CHECK-EB: and $[[R14:[0-9]+]], $[[R13]], $[[R7]]
-; CHECK-EB: bne $[[R14]], $[[R10]], $[[BB1:[A-Z_0-9]+]]
-
-; CHECK-EB: and $[[R15:[0-9]+]], $[[R13]], $[[R8]]
-; CHECK-EB: or $[[R16:[0-9]+]], $[[R15]], $[[R12]]
-; CHECK-EB: sc $[[R16]], 0($[[R2]])
-; CHECK-EB: beqz $[[R16]], $[[BB0]]
-
-; CHECK-EB: $[[BB1]]:
-; CHECK-EB: srlv $[[R17:[0-9]+]], $[[R14]], $[[R5]]
-; CHECK-EB: sll $[[R18:[0-9]+]], $[[R17]], 24
-; CHECK-EB: sra $2, $[[R18]], 24
+; ALL-LABEL: AtomicCmpSwap8:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(y)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(y)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: andi $[[R9:[0-9]+]], $4, 255
+; ALL: sllv $[[R10:[0-9]+]], $[[R9]], $[[R5]]
+; ALL: andi $[[R11:[0-9]+]], $5, 255
+; ALL: sllv $[[R12:[0-9]+]], $[[R11]], $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R13:[0-9]+]], 0($[[R2]])
+; ALL: and $[[R14:[0-9]+]], $[[R13]], $[[R7]]
+; ALL: bne $[[R14]], $[[R10]], $[[BB1:[A-Z_0-9]+]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R13]], $[[R8]]
+; ALL: or $[[R16:[0-9]+]], $[[R15]], $[[R12]]
+; ALL: sc $[[R16]], 0($[[R2]])
+; ALL: beqz $[[R16]], $[[BB0]]
+
+; ALL: $[[BB1]]:
+; ALL: srlv $[[R17:[0-9]+]], $[[R14]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R18:[0-9]+]], $[[R17]], 24
+; NO-SEB-SEH: sra $2, $[[R18]], 24
+
+; HAS-SEB-SEH: seb $2, $[[R17]]
}
+define i1 @AtomicCmpSwapRes8(i8* %ptr, i8 %oldval, i8 signext %newval) nounwind {
+entry:
+ %0 = cmpxchg i8* %ptr, i8 %oldval, i8 %newval monotonic monotonic
+ %1 = extractvalue { i8, i1 } %0, 1
+ ret i1 %1
+; ALL-LABEL: AtomicCmpSwapRes8
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $4, $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $4, 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 3
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 255
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: andi $[[R9:[0-9]+]], $5, 255
+; ALL: sllv $[[R10:[0-9]+]], $[[R9]], $[[R5]]
+; ALL: andi $[[R11:[0-9]+]], $6, 255
+; ALL: sllv $[[R12:[0-9]+]], $[[R11]], $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R13:[0-9]+]], 0($[[R2]])
+; ALL: and $[[R14:[0-9]+]], $[[R13]], $[[R7]]
+; ALL: bne $[[R14]], $[[R10]], $[[BB1:[A-Z_0-9]+]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R13]], $[[R8]]
+; ALL: or $[[R16:[0-9]+]], $[[R15]], $[[R12]]
+; ALL: sc $[[R16]], 0($[[R2]])
+; ALL: beqz $[[R16]], $[[BB0]]
+
+; ALL: $[[BB1]]:
+; ALL: srlv $[[R17:[0-9]+]], $[[R14]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R18:[0-9]+]], $[[R17]], 24
+; NO-SEB-SEH: sra $[[R19:[0-9]+]], $[[R18]], 24
+
+; HAS-SEB-SEH: seb $[[R19:[0-9]+]], $[[R17]]
+
+; ALL: xor $[[R20:[0-9]+]], $[[R19]], $5
+; ALL: sltiu $2, $[[R20]], 1
+}
+
+; Check one i16 so that we cover the seh sign extend
+@z = common global i16 0, align 1
+
+define signext i16 @AtomicLoadAdd16(i16 signext %incr) nounwind {
+entry:
+ %0 = atomicrmw add i16* @z, i16 %incr monotonic
+ ret i16 %0
+
+; ALL-LABEL: AtomicLoadAdd16:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(z)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(z)(
+
+; ALL: addiu $[[R1:[0-9]+]], $zero, -4
+; ALL: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
+; ALL: andi $[[R3:[0-9]+]], $[[R0]], 3
+; CHECK-EB: xori $[[R4:[0-9]+]], $[[R3]], 2
+; CHECK-EB: sll $[[R5:[0-9]+]], $[[R4]], 3
+; CHECK-EL: sll $[[R5:[0-9]+]], $[[R3]], 3
+; ALL: ori $[[R6:[0-9]+]], $zero, 65535
+; ALL: sllv $[[R7:[0-9]+]], $[[R6]], $[[R5]]
+; ALL: nor $[[R8:[0-9]+]], $zero, $[[R7]]
+; ALL: sllv $[[R9:[0-9]+]], $4, $[[R5]]
+
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R10:[0-9]+]], 0($[[R2]])
+; ALL: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
+; ALL: and $[[R12:[0-9]+]], $[[R11]], $[[R7]]
+; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
+; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
+; ALL: sc $[[R14]], 0($[[R2]])
+; ALL: beqz $[[R14]], $[[BB0]]
+
+; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
+; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
+
+; NO-SEB-SEH: sll $[[R17:[0-9]+]], $[[R16]], 16
+; NO-SEB-SEH: sra $2, $[[R17]], 16
+
+; MIPS32R2: seh $2, $[[R16]]
+}
+
+
@countsint = common global i32 0, align 4
define i32 @CheckSync(i32 %v) nounwind noinline {
@@ -403,19 +386,13 @@ entry:
%0 = atomicrmw add i32* @countsint, i32 %v seq_cst
ret i32 %0
-; CHECK-EL-LABEL: CheckSync:
-; CHECK-EL: sync 0
-; CHECK-EL: ll
-; CHECK-EL: sc
-; CHECK-EL: beq
-; CHECK-EL: sync 0
-
-; CHECK-EB-LABEL: CheckSync:
-; CHECK-EB: sync 0
-; CHECK-EB: ll
-; CHECK-EB: sc
-; CHECK-EB: beq
-; CHECK-EB: sync 0
+; ALL-LABEL: CheckSync:
+
+; ALL: sync
+; ALL: ll
+; ALL: sc
+; ALL: beq
+; ALL: sync
}
; make sure that this assertion in
@@ -429,8 +406,29 @@ entry:
define i32 @zeroreg() nounwind {
entry:
- %0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst
+ %pair0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst seq_cst
+ %0 = extractvalue { i32, i1 } %pair0, 0
%1 = icmp eq i32 %0, 1
%conv = zext i1 %1 to i32
ret i32 %conv
}
+
+; Check that MIPS32R6 has the correct offset range.
+; FIXME: At the moment, we don't seem to do addr+offset for any atomic load/store.
+define i32 @AtomicLoadAdd32_OffGt9Bit(i32 %incr) nounwind {
+entry:
+ %0 = atomicrmw add i32* getelementptr(i32* @x, i32 256), i32 %incr monotonic
+ ret i32 %0
+
+; ALL-LABEL: AtomicLoadAdd32_OffGt9Bit:
+
+; MIPS32-ANY: lw $[[R0:[0-9]+]], %got(x)
+; MIPS64-ANY: ld $[[R0:[0-9]+]], %got_disp(x)(
+
+; ALL: addiu $[[PTR:[0-9]+]], $[[R0]], 1024
+; ALL: $[[BB0:[A-Z_0-9]+]]:
+; ALL: ll $[[R1:[0-9]+]], 0($[[PTR]])
+; ALL: addu $[[R2:[0-9]+]], $[[R1]], $4
+; ALL: sc $[[R2]], 0($[[PTR]])
+; ALL: beqz $[[R2]], $[[BB0]]
+}
diff --git a/test/CodeGen/Mips/atomicops.ll b/test/CodeGen/Mips/atomicops.ll
index 0f0f01afc142..c26415233d0b 100644
--- a/test/CodeGen/Mips/atomicops.ll
+++ b/test/CodeGen/Mips/atomicops.ll
@@ -20,7 +20,8 @@ entry:
%add.i = add nsw i32 %0, 2
%1 = load volatile i32* %x, align 4
%call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind
- %2 = cmpxchg i32* %x, i32 1, i32 2 seq_cst
+ %pair = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst
+ %2 = extractvalue { i32, i1 } %pair, 0
%3 = load volatile i32* %x, align 4
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind
%4 = atomicrmw xchg i32* %x, i32 1 seq_cst
diff --git a/test/CodeGen/Mips/blez_bgez.ll b/test/CodeGen/Mips/blez_bgez.ll
index f6a5e4f47a5a..dcda047f8d09 100644
--- a/test/CodeGen/Mips/blez_bgez.ll
+++ b/test/CodeGen/Mips/blez_bgez.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=mipsel < %s | FileCheck %s
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
+; RUN: llc -march=mips64el < %s | FileCheck %s
; CHECK-LABEL: test_blez:
; CHECK: blez ${{[0-9]+}}, $BB
diff --git a/test/CodeGen/Mips/blockaddr.ll b/test/CodeGen/Mips/blockaddr.ll
index beab65f47196..d6dc7e7b24b0 100644
--- a/test/CodeGen/Mips/blockaddr.ll
+++ b/test/CodeGen/Mips/blockaddr.ll
@@ -1,11 +1,11 @@
; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-O32
; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-O32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-1
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-2
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-2
@reg = common global i8* null, align 4
@@ -43,8 +43,8 @@ entry:
; STATIC-MIPS16-1: li $[[R1_16:[0-9]+]], %hi($tmp[[TI_16:[0-9]+]])
; STATIC-MIPS16-1: sll ${{[0-9]+}}, $[[R1_16]], 16
; STATIC-MIPS16-2: li ${{[0-9]+}}, %lo($tmp{{[0-9]+}})
-; STATIC-MIPS16-1 jal dummy
-; STATIC-MIPS16-2 jal dummy
+; STATIC-MIPS16-1: jal dummy
+; STATIC-MIPS16-2: jal dummy
define void @f() nounwind {
entry:
diff --git a/test/CodeGen/Mips/bswap.ll b/test/CodeGen/Mips/bswap.ll
index f17b91aab802..812eef137773 100644
--- a/test/CodeGen/Mips/bswap.ll
+++ b/test/CodeGen/Mips/bswap.ll
@@ -1,28 +1,105 @@
; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=MIPS32
; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=MIPS64
-; RUN: llc < %s -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float | FileCheck %s -check-prefix=mips16
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 | FileCheck %s -check-prefix=MIPS16
define i32 @bswap32(i32 %x) nounwind readnone {
entry:
; MIPS32-LABEL: bswap32:
; MIPS32: wsbh $[[R0:[0-9]+]]
; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
-; mips16: .ent bswap32
+
+; MIPS64-LABEL: bswap32:
+; MIPS64: wsbh $[[R0:[0-9]+]]
+; MIPS64: rotr ${{[0-9]+}}, $[[R0]], 16
+
+; MIPS16-LABEL: bswap32:
+; MIPS16-DAG: srl $[[R0:[0-9]+]], $4, 8
+; MIPS16-DAG: srl $[[R1:[0-9]+]], $4, 24
+; MIPS16-DAG: sll $[[R2:[0-9]+]], $4, 8
+; MIPS16-DAG: sll $[[R3:[0-9]+]], $4, 24
+; MIPS16-DAG: li $[[R4:[0-9]+]], 65280
+; MIPS16-DAG: and $[[R4]], $[[R0]]
+; MIPS16-DAG: or $[[R1]], $[[R4]]
+; MIPS16-DAG: lw $[[R7:[0-9]+]], $CPI
+; MIPS16-DAG: and $[[R7]], $[[R2]]
+; MIPS16-DAG: or $[[R3]], $[[R7]]
+; MIPS16-DAG: or $[[R3]], $[[R1]]
+
%or.3 = call i32 @llvm.bswap.i32(i32 %x)
ret i32 %or.3
}
define i64 @bswap64(i64 %x) nounwind readnone {
entry:
+; MIPS32-LABEL: bswap64:
+; MIPS32: wsbh $[[R0:[0-9]+]]
+; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
+; MIPS32: wsbh $[[R0:[0-9]+]]
+; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
+
; MIPS64-LABEL: bswap64:
; MIPS64: dsbh $[[R0:[0-9]+]]
; MIPS64: dshd ${{[0-9]+}}, $[[R0]]
-; mips16: .ent bswap64
+
+; MIPS16-LABEL: bswap64:
+; MIPS16-DAG: srl $[[R0:[0-9]+]], $5, 8
+; MIPS16-DAG: srl $[[R1:[0-9]+]], $5, 24
+; MIPS16-DAG: sll $[[R2:[0-9]+]], $5, 8
+; MIPS16-DAG: sll $[[R3:[0-9]+]], $5, 24
+; MIPS16-DAG: li $[[R4:[0-9]+]], 65280
+; MIPS16-DAG: and $[[R0]], $[[R4]]
+; MIPS16-DAG: or $[[R1]], $[[R0]]
+; MIPS16-DAG: lw $[[R7:[0-9]+]], 1f
+; MIPS16-DAG: and $[[R2]], $[[R7]]
+; MIPS16-DAG: or $[[R3]], $[[R2]]
+; MIPS16-DAG: or $[[R3]], $[[R1]]
+; MIPS16-DAG: srl $[[R0:[0-9]+]], $4, 8
+; MIPS16-DAG: srl $[[R1:[0-9]+]], $4, 24
+; MIPS16-DAG: sll $[[R2:[0-9]+]], $4, 8
+; MIPS16-DAG: sll $[[R3:[0-9]+]], $4, 24
+; MIPS16-DAG: li $[[R4:[0-9]+]], 65280
+; MIPS16-DAG: and $[[R0]], $[[R4]]
+; MIPS16-DAG: or $[[R1]], $[[R0]]
+; MIPS16-DAG: lw $[[R7:[0-9]+]], 1f
+; MIPS16-DAG: and $[[R2]], $[[R7]]
+; MIPS16-DAG: or $[[R3]], $[[R2]]
+; MIPS16-DAG: or $[[R3]], $[[R1]]
+
%or.7 = call i64 @llvm.bswap.i64(i64 %x)
ret i64 %or.7
}
+define <4 x i32> @bswapv4i32(<4 x i32> %x) nounwind readnone {
+entry:
+; MIPS32-LABEL: bswapv4i32:
+; MIPS32: wsbh $[[R0:[0-9]+]]
+; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
+; MIPS32: wsbh $[[R0:[0-9]+]]
+; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
+; MIPS32: wsbh $[[R0:[0-9]+]]
+; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
+; MIPS32: wsbh $[[R0:[0-9]+]]
+; MIPS32: rotr ${{[0-9]+}}, $[[R0]], 16
+
+; MIPS64-LABEL: bswapv4i32:
+; MIPS64: wsbh $[[R0:[0-9]+]]
+; MIPS64: rotr ${{[0-9]+}}, $[[R0]], 16
+; MIPS64: wsbh $[[R0:[0-9]+]]
+; MIPS64: rotr ${{[0-9]+}}, $[[R0]], 16
+; MIPS64: wsbh $[[R0:[0-9]+]]
+; MIPS64: rotr ${{[0-9]+}}, $[[R0]], 16
+; MIPS64: wsbh $[[R0:[0-9]+]]
+; MIPS64: rotr ${{[0-9]+}}, $[[R0]], 16
+
+; Don't bother with a MIPS16 version. It's just bswap32 repeated four times and
+; would be very long
+
+ %ret = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %x)
+ ret <4 x i32> %ret
+}
+
declare i32 @llvm.bswap.i32(i32) nounwind readnone
declare i64 @llvm.bswap.i64(i64) nounwind readnone
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) nounwind readnone
diff --git a/test/CodeGen/Mips/buildpairextractelementf64.ll b/test/CodeGen/Mips/buildpairextractelementf64.ll
index 490d4273c5b6..7682a98ace99 100644
--- a/test/CodeGen/Mips/buildpairextractelementf64.ll
+++ b/test/CodeGen/Mips/buildpairextractelementf64.ll
@@ -1,15 +1,19 @@
-; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=FP32
-; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=FP32
-; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64
-; RUN: llc -march=mips -mattr=+fp64 < %s | FileCheck %s -check-prefix=FP64
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=NO-MFHC1 -check-prefix=ALL
+; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=NO-MFHC1 -check-prefix=ALL
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=HAS-MFHC1 -check-prefix=ALL
+; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=HAS-MFHC1 -check-prefix=ALL
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+fp64 < %s | FileCheck %s -check-prefix=HAS-MFHC1 -check-prefix=ALL
+; RUN: llc -march=mips -mcpu=mips32r2 -mattr=+fp64 < %s | FileCheck %s -check-prefix=HAS-MFHC1 -check-prefix=ALL
@a = external global i32
-; CHECK-LABEL: f:
-; FP32: mtc1
-; FP32: mtc1
-; FP64-DAG: mtc1
-; FP64-DAG: mthc1
+; ALL-LABEL: f:
+
+; NO-MFHC1: mtc1
+; NO-MFHC1: mtc1
+
+; HAS-MFHC1-DAG: mtc1
+; HAS-MFHC1-DAG: mthc1
define double @f(i32 %a1, double %d) nounwind {
entry:
@@ -18,11 +22,13 @@ entry:
ret double %add
}
-; CHECK-LABEL: f3:
-; FP32: mfc1
-; FP32: mfc1
-; FP64-DAG: mfc1
-; FP64-DAG: mfhc1
+; ALL-LABEL: f3:
+
+; NO-MFHC1: mfc1
+; NO-MFHC1: mfc1
+
+; HAS-MFHC1-DAG: mfc1
+; HAS-MFHC1-DAG: mfhc1
define void @f3(double %d, i32 %a1) nounwind {
entry:
diff --git a/test/CodeGen/Mips/cache-intrinsic.ll b/test/CodeGen/Mips/cache-intrinsic.ll
new file mode 100644
index 000000000000..2fa411589596
--- /dev/null
+++ b/test/CodeGen/Mips/cache-intrinsic.ll
@@ -0,0 +1,26 @@
+; RUN: llc %s -o - | FileCheck %s
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+@buffer = global [32 x i8] c"This is a largely unused buffer\00", align 1
+@.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
+@.str1 = private unnamed_addr constant [25 x i8] c"Still, largely unused...\00", align 1
+
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0))
+ %call1 = call i8* @strcpy(i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds ([25 x i8]* @.str1, i32 0, i32 0)) #3
+ call void @llvm.clear_cache(i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds (i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i32 32)) #3
+ %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0))
+ ret i32 0
+}
+
+; CHECK: __clear_cache
+
+declare i32 @printf(i8*, ...)
+
+declare i8* @strcpy(i8*, i8*)
+
+declare void @llvm.clear_cache(i8*, i8*)
diff --git a/test/CodeGen/Mips/call-optimization.ll b/test/CodeGen/Mips/call-optimization.ll
new file mode 100644
index 000000000000..bfa09eaae3cb
--- /dev/null
+++ b/test/CodeGen/Mips/call-optimization.ll
@@ -0,0 +1,91 @@
+; RUN: llc -march=mipsel -disable-mips-delay-filler < %s | \
+; RUN: FileCheck %s -check-prefix=O32
+; RUN: llc -march=mipsel -mips-load-target-from-got=false \
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=O32-LOADTGT
+
+@gd1 = common global double 0.000000e+00, align 8
+@gd2 = common global double 0.000000e+00, align 8
+
+; O32-LABEL: caller3:
+; O32-DAG: lw $25, %call16(callee3)
+; O32-DAG: move $gp
+; O32: jalr $25
+; O32-NOT: move $gp
+; O32: lw $25, %call16(callee3)
+; O32-NOT: move $gp
+; O32: jalr $25
+; O32-NOT: move $gp
+; O32: lw $25, %call16(callee3)
+; O32-NOT: move $gp
+; O32: jalr $25
+
+; O32-LOADTGT-LABEL: caller3:
+; O32-LOADTGT-DAG: lw $25, %call16(callee3)
+; O32-LOADTGT-DAG: move $gp
+; O32-LOADTGT: jalr $25
+; O32-LOADTGT-NOT: move $gp
+; O32-LOADTGT: move $25
+; O32-LOADTGT-NOT: move $gp
+; O32-LOADTGT: jalr $25
+; O32-LOADTGT-NOT: move $gp
+; O32-LOADTGT: move $25
+; O32-LOADTGT-NOT: move $gp
+; O32-LOADTGT: jalr $25
+
+define void @caller3(i32 %n) {
+entry:
+ tail call void @callee3()
+ tail call void @callee3()
+ %tobool1 = icmp eq i32 %n, 0
+ br i1 %tobool1, label %while.end, label %while.body
+
+while.body:
+ %n.addr.02 = phi i32 [ %dec, %while.body ], [ %n, %entry ]
+ %dec = add nsw i32 %n.addr.02, -1
+ tail call void @callee3()
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+declare void @callee3()
+
+; O32-LABEL: caller4:
+; O32-DAG: lw $25, %call16(ceil)
+; O32-DAG: move $gp
+; O32: jalr $25
+; O32-NOT: move $gp
+; O32: lw $25, %call16(ceil)
+; O32-NOT: move $gp
+; O32: jalr $25
+; O32-NOT: move $gp
+; O32: lw $25, %call16(ceil)
+; O32-NOT: move $gp
+; O32: jalr $25
+
+; O32-LOADTGT-LABEL: caller4:
+; O32-LOADTGT-DAG: lw $25, %call16(ceil)
+; O32-LOADTGT-DAG: move $gp
+; O32-LOADTGT: jalr $25
+; O32-LOADTGT-NOT: move $gp
+; O32-LOADTGT: move $25
+; O32-LOADTGT-NOT: move $gp
+; O32-LOADTGT: jalr $25
+; O32-LOADTGT-NOT: move $gp
+; O32-LOADTGT: move $25
+; O32-LOADTGT-NOT: move $gp
+; O32-LOADTGT: jalr $25
+
+define void @caller4(double %d) {
+entry:
+ %call = tail call double @ceil(double %d)
+ %call1 = tail call double @ceil(double %call)
+ store double %call1, double* @gd2, align 8
+ %call2 = tail call double @ceil(double %call1)
+ store double %call2, double* @gd1, align 8
+ ret void
+}
+
+declare double @ceil(double)
diff --git a/test/CodeGen/Mips/cconv/arguments-float.ll b/test/CodeGen/Mips/cconv/arguments-float.ll
new file mode 100644
index 000000000000..e2119ec08028
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-float.ll
@@ -0,0 +1,222 @@
+; RUN: llc -march=mips -relocation-model=static -soft-float < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32BE %s
+; RUN: llc -march=mipsel -relocation-model=static -soft-float < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32LE %s
+
+; RUN-TODO: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+
+; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+
+; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+
+; Test the floating point arguments for all ABI's and byte orders as specified
+; by section 5 of MD00305 (MIPS ABIs Described).
+;
+; N32/N64 are identical in this area so their checks have been combined into
+; the 'NEW' prefix (the N stands for New).
+
+@bytes = global [11 x i8] zeroinitializer
+@dwords = global [11 x i64] zeroinitializer
+@floats = global [11 x float] zeroinitializer
+@doubles = global [11 x double] zeroinitializer
+
+define void @double_args(double %a, double %b, double %c, double %d, double %e,
+ double %f, double %g, double %h, double %i) nounwind {
+entry:
+ %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ store volatile double %a, double* %0
+ %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2
+ store volatile double %b, double* %1
+ %2 = getelementptr [11 x double]* @doubles, i32 0, i32 3
+ store volatile double %c, double* %2
+ %3 = getelementptr [11 x double]* @doubles, i32 0, i32 4
+ store volatile double %d, double* %3
+ %4 = getelementptr [11 x double]* @doubles, i32 0, i32 5
+ store volatile double %e, double* %4
+ %5 = getelementptr [11 x double]* @doubles, i32 0, i32 6
+ store volatile double %f, double* %5
+ %6 = getelementptr [11 x double]* @doubles, i32 0, i32 7
+ store volatile double %g, double* %6
+ %7 = getelementptr [11 x double]* @doubles, i32 0, i32 8
+ store volatile double %h, double* %7
+ %8 = getelementptr [11 x double]* @doubles, i32 0, i32 9
+ store volatile double %i, double* %8
+ ret void
+}
+
+; ALL-LABEL: double_args:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+
+; The first four arguments are the same in O32/N32/N64.
+; The first argument is floating point but soft-float is enabled so floating
+; point registers are not used.
+; O32-DAG: sw $4, 8([[R2]])
+; O32-DAG: sw $5, 12([[R2]])
+; NEW-DAG: sd $4, 8([[R2]])
+
+; O32-DAG: sw $6, 16([[R2]])
+; O32-DAG: sw $7, 20([[R2]])
+; NEW-DAG: sd $5, 16([[R2]])
+
+; O32 has run out of argument registers and starts using the stack
+; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp)
+; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp)
+; O32-DAG: sw [[R3]], 24([[R2]])
+; O32-DAG: sw [[R4]], 28([[R2]])
+; NEW-DAG: sd $6, 24([[R2]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 32($sp)
+; O32-DAG: lw [[R4:\$[0-9]+]], 36($sp)
+; O32-DAG: sw [[R3]], 32([[R2]])
+; O32-DAG: sw [[R4]], 36([[R2]])
+; NEW-DAG: sd $7, 32([[R2]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 40($sp)
+; O32-DAG: lw [[R4:\$[0-9]+]], 44($sp)
+; O32-DAG: sw [[R3]], 40([[R2]])
+; O32-DAG: sw [[R4]], 44([[R2]])
+; NEW-DAG: sd $8, 40([[R2]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 48($sp)
+; O32-DAG: lw [[R4:\$[0-9]+]], 52($sp)
+; O32-DAG: sw [[R3]], 48([[R2]])
+; O32-DAG: sw [[R4]], 52([[R2]])
+; NEW-DAG: sd $9, 48([[R2]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 56($sp)
+; O32-DAG: lw [[R4:\$[0-9]+]], 60($sp)
+; O32-DAG: sw [[R3]], 56([[R2]])
+; O32-DAG: sw [[R4]], 60([[R2]])
+; NEW-DAG: sd $10, 56([[R2]])
+
+; N32/N64 have run out of registers and starts using the stack too
+; O32-DAG: lw [[R3:\$[0-9]+]], 64($sp)
+; O32-DAG: lw [[R4:\$[0-9]+]], 68($sp)
+; O32-DAG: sw [[R3]], 64([[R2]])
+; O32-DAG: sw [[R4]], 68([[R2]])
+; NEW-DAG: ld [[R3:\$[0-9]+]], 0($sp)
+; NEW-DAG: sd $11, 64([[R2]])
+
+define void @float_args(float %a, float %b, float %c, float %d, float %e,
+ float %f, float %g, float %h, float %i, float %j)
+ nounwind {
+entry:
+ %0 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ store volatile float %a, float* %0
+ %1 = getelementptr [11 x float]* @floats, i32 0, i32 2
+ store volatile float %b, float* %1
+ %2 = getelementptr [11 x float]* @floats, i32 0, i32 3
+ store volatile float %c, float* %2
+ %3 = getelementptr [11 x float]* @floats, i32 0, i32 4
+ store volatile float %d, float* %3
+ %4 = getelementptr [11 x float]* @floats, i32 0, i32 5
+ store volatile float %e, float* %4
+ %5 = getelementptr [11 x float]* @floats, i32 0, i32 6
+ store volatile float %f, float* %5
+ %6 = getelementptr [11 x float]* @floats, i32 0, i32 7
+ store volatile float %g, float* %6
+ %7 = getelementptr [11 x float]* @floats, i32 0, i32 8
+ store volatile float %h, float* %7
+ %8 = getelementptr [11 x float]* @floats, i32 0, i32 9
+ store volatile float %i, float* %8
+ %9 = getelementptr [11 x float]* @floats, i32 0, i32 10
+ store volatile float %j, float* %9
+ ret void
+}
+
+; ALL-LABEL: float_args:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(floats)(
+
+; The first four arguments are the same in O32/N32/N64.
+; The first argument isn't floating point so floating point registers are not
+; used.
+; MD00305 and GCC disagree on this one. MD00305 says that floats are treated
+; as 8-byte aligned and occupy two slots on O32. GCC is treating them as 4-byte
+; aligned and occupying one slot. We'll use GCC's definition.
+; ALL-DAG: sw $4, 4([[R2]])
+; ALL-DAG: sw $5, 8([[R2]])
+; ALL-DAG: sw $6, 12([[R2]])
+; ALL-DAG: sw $7, 16([[R2]])
+
+; O32 has run out of argument registers and starts using the stack
+; O32-DAG: lw [[R3:\$[0-9]+]], 16($sp)
+; O32-DAG: sw [[R3]], 20([[R2]])
+; NEW-DAG: sw $8, 20([[R2]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 20($sp)
+; O32-DAG: sw [[R3]], 24([[R2]])
+; NEW-DAG: sw $9, 24([[R2]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 24($sp)
+; O32-DAG: sw [[R3]], 28([[R2]])
+; NEW-DAG: sw $10, 28([[R2]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 28($sp)
+; O32-DAG: sw [[R3]], 32([[R2]])
+; NEW-DAG: sw $11, 32([[R2]])
+
+; N32/N64 have run out of registers and start using the stack too
+; O32-DAG: lw [[R3:\$[0-9]+]], 32($sp)
+; O32-DAG: sw [[R3]], 36([[R2]])
+; NEW-DAG: lw [[R3:\$[0-9]+]], 0($sp)
+; NEW-DAG: sw [[R3]], 36([[R2]])
+
+define void @double_arg2(i8 %a, double %b) nounwind {
+entry:
+ %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ store volatile i8 %a, i8* %0
+ %1 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ store volatile double %b, double* %1
+ ret void
+}
+
+; ALL-LABEL: double_arg2:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
+; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+
+; The first four arguments are the same in O32/N32/N64.
+; The first argument isn't floating point so floating point registers are not
+; used.
+; The second slot is insufficiently aligned for double on O32 so it is skipped.
+; Also, double occupies two slots on O32 and only one for N32/N64.
+; ALL-DAG: sb $4, 1([[R1]])
+; O32-DAG: sw $6, 8([[R2]])
+; O32-DAG: sw $7, 12([[R2]])
+; NEW-DAG: sd $5, 8([[R2]])
+
+define void @float_arg2(i8 %a, float %b) nounwind {
+entry:
+ %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ store volatile i8 %a, i8* %0
+ %1 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ store volatile float %b, float* %1
+ ret void
+}
+
+; ALL-LABEL: float_arg2:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
+; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(floats)(
+
+; The first four arguments are the same in O32/N32/N64.
+; The first argument isn't floating point so floating point registers are not
+; used.
+; MD00305 and GCC disagree on this one. MD00305 says that floats are treated
+; as 8-byte aligned and occupy two slots on O32. GCC is treating them as 4-byte
+; aligned and occupying one slot. We'll use GCC's definition.
+; ALL-DAG: sb $4, 1([[R1]])
+; ALL-DAG: sw $5, 4([[R2]])
diff --git a/test/CodeGen/Mips/cconv/arguments-fp128.ll b/test/CodeGen/Mips/cconv/arguments-fp128.ll
new file mode 100644
index 000000000000..c8cd8fd11e50
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-fp128.ll
@@ -0,0 +1,51 @@
+; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
+; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
+
+; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
+; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
+
+; Test the fp128 arguments for all ABI's and byte orders as specified
+; by section 2 of the MIPSpro N32 Handbook.
+;
+; O32 is not tested because long double is the same as double on O32.
+
+@ldoubles = global [11 x fp128] zeroinitializer
+
+define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
+entry:
+ %0 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 1
+ store volatile fp128 %a, fp128* %0
+ %1 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 2
+ store volatile fp128 %b, fp128* %1
+ %2 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 3
+ store volatile fp128 %c, fp128* %2
+ %3 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 4
+ store volatile fp128 %d, fp128* %3
+ %4 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 5
+ store volatile fp128 %e, fp128* %4
+ ret void
+}
+
+; ALL-LABEL: ldouble_args:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(ldoubles)(
+
+; The first four arguments are the same in N32/N64.
+; The first argument is floating point but soft-float is enabled so floating
+; point registers are not used.
+; ALL-DAG: sd $4, 16([[R2]])
+; ALL-DAG: sd $5, 24([[R2]])
+; ALL-DAG: sd $6, 32([[R2]])
+; ALL-DAG: sd $7, 40([[R2]])
+; ALL-DAG: sd $8, 48([[R2]])
+; ALL-DAG: sd $9, 56([[R2]])
+; ALL-DAG: sd $10, 64([[R2]])
+; ALL-DAG: sd $11, 72([[R2]])
+
+; N32/N64 have run out of registers and starts using the stack too
+; ALL-DAG: ld [[R3:\$[0-9]+]], 0($sp)
+; ALL-DAG: ld [[R4:\$[0-9]+]], 8($sp)
+; ALL-DAG: sd [[R3]], 80([[R2]])
+; ALL-DAG: sd [[R4]], 88([[R2]])
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll b/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
new file mode 100644
index 000000000000..aadf7d18c17d
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
@@ -0,0 +1,157 @@
+; RUN: llc -march=mips -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32BE %s
+; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32LE %s
+
+; RUN-TODO: llc -march=mips64 -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=N32 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=N32 --check-prefix=NEW %s
+
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=N64 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=N64 --check-prefix=NEW %s
+
+; Test the effect of varargs on floating point types in the non-variable part
+; of the argument list as specified by section 2 of the MIPSpro N32 Handbook.
+;
+; N32/N64 are almost identical in this area so many of their checks have been
+; combined into the 'NEW' prefix (the N stands for New).
+;
+; On O32, varargs prevents all FPU argument register usage. This contradicts
+; the N32 handbook, but agrees with the SYSV ABI and GCC's behaviour.
+
+@floats = global [11 x float] zeroinitializer
+@doubles = global [11 x double] zeroinitializer
+
+define void @double_args(double %a, ...)
+ nounwind {
+entry:
+ %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ store volatile double %a, double* %0
+
+ %ap = alloca i8*
+ %ap2 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap2)
+ %b = va_arg i8** %ap, double
+ %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2
+ store volatile double %b, double* %1
+ ret void
+}
+
+; ALL-LABEL: double_args:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+
+; O32 forbids using floating point registers for the non-variable portion.
+; N32/N64 allow it.
+; O32BE-DAG: mtc1 $5, [[FTMP1:\$f[0-9]*[02468]+]]
+; O32BE-DAG: mtc1 $4, [[FTMP2:\$f[0-9]*[13579]+]]
+; O32LE-DAG: mtc1 $4, [[FTMP1:\$f[0-9]*[02468]+]]
+; O32LE-DAG: mtc1 $5, [[FTMP2:\$f[0-9]*[13579]+]]
+; O32-DAG: sdc1 [[FTMP1]], 8([[R2]])
+; NEW-DAG: sdc1 $f12, 8([[R2]])
+
+; The varargs portion is dumped to stack
+; O32-DAG: sw $6, 16($sp)
+; O32-DAG: sw $7, 20($sp)
+; NEW-DAG: sd $5, 8($sp)
+; NEW-DAG: sd $6, 16($sp)
+; NEW-DAG: sd $7, 24($sp)
+; NEW-DAG: sd $8, 32($sp)
+; NEW-DAG: sd $9, 40($sp)
+; NEW-DAG: sd $10, 48($sp)
+; NEW-DAG: sd $11, 56($sp)
+
+; Get the varargs pointer
+; O32 has 4 bytes padding, 4 bytes for the varargs pointer, and 8 bytes reserved
+; for arguments 1 and 2.
+; N32/N64 has 8 bytes for the varargs pointer, and no reserved area.
+; O32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 16
+; O32-DAG: sw [[VAPTR]], 4($sp)
+; N32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 8
+; N32-DAG: sw [[VAPTR]], 4($sp)
+; N64-DAG: daddiu [[VAPTR:\$[0-9]+]], $sp, 8
+; N64-DAG: sd [[VAPTR]], 0($sp)
+
+; Increment the pointer then get the varargs arg
+; LLVM will rebind the load to the stack pointer instead of the varargs pointer
+; during lowering. This is fine and doesn't change the behaviour.
+; O32-DAG: addiu [[VAPTR]], [[VAPTR]], 8
+; O32-DAG: sw [[VAPTR]], 4($sp)
+; N32-DAG: addiu [[VAPTR]], [[VAPTR]], 8
+; N32-DAG: sw [[VAPTR]], 4($sp)
+; N64-DAG: daddiu [[VAPTR]], [[VAPTR]], 8
+; N64-DAG: sd [[VAPTR]], 0($sp)
+; O32-DAG: ldc1 [[FTMP1:\$f[0-9]+]], 16($sp)
+; NEW-DAG: ldc1 [[FTMP1:\$f[0-9]+]], 8($sp)
+; ALL-DAG: sdc1 [[FTMP1]], 16([[R2]])
+
+define void @float_args(float %a, ...) nounwind {
+entry:
+ %0 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ store volatile float %a, float* %0
+
+ %ap = alloca i8*
+ %ap2 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap2)
+ %b = va_arg i8** %ap, float
+ %1 = getelementptr [11 x float]* @floats, i32 0, i32 2
+ store volatile float %b, float* %1
+ ret void
+}
+
+; ALL-LABEL: float_args:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(floats)(
+
+; The first four arguments are the same in O32/N32/N64.
+; The non-variable portion should be unaffected.
+; O32-DAG: sw $4, 4([[R2]])
+; NEW-DAG: swc1 $f12, 4([[R2]])
+
+; The varargs portion is dumped to stack
+; O32-DAG: sw $5, 12($sp)
+; O32-DAG: sw $6, 16($sp)
+; O32-DAG: sw $7, 20($sp)
+; NEW-DAG: sd $5, 8($sp)
+; NEW-DAG: sd $6, 16($sp)
+; NEW-DAG: sd $7, 24($sp)
+; NEW-DAG: sd $8, 32($sp)
+; NEW-DAG: sd $9, 40($sp)
+; NEW-DAG: sd $10, 48($sp)
+; NEW-DAG: sd $11, 56($sp)
+
+; Get the varargs pointer
+; O32 has 4 bytes padding, 4 bytes for the varargs pointer, and should have 8
+; bytes reserved for arguments 1 and 2 (the first float arg) but as discussed in
+; arguments-float.ll, GCC doesn't agree with MD00305 and treats floats as 4
+; bytes so we only have 12 bytes total.
+; N32/N64 has 8 bytes for the varargs pointer, and no reserved area.
+; O32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 12
+; O32-DAG: sw [[VAPTR]], 4($sp)
+; N32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 8
+; N32-DAG: sw [[VAPTR]], 4($sp)
+; N64-DAG: daddiu [[VAPTR:\$[0-9]+]], $sp, 8
+; N64-DAG: sd [[VAPTR]], 0($sp)
+
+; Increment the pointer then get the varargs arg
+; LLVM will rebind the load to the stack pointer instead of the varargs pointer
+; during lowering. This is fine and doesn't change the behaviour.
+; N32/N64 is using ori instead of addiu/daddiu but (although odd) this is fine
+; since the stack is always aligned.
+; O32-DAG: addiu [[VAPTR]], [[VAPTR]], 4
+; O32-DAG: sw [[VAPTR]], 4($sp)
+; N32-DAG: ori [[VAPTR]], [[VAPTR]], 4
+; N32-DAG: sw [[VAPTR]], 4($sp)
+; N64-DAG: ori [[VAPTR]], [[VAPTR]], 4
+; N64-DAG: sd [[VAPTR]], 0($sp)
+; O32-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 12($sp)
+; NEW-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 8($sp)
+; ALL-DAG: swc1 [[FTMP1]], 8([[R2]])
+
+declare void @llvm.va_start(i8*)
+declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.va_end(i8*)
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-float.ll b/test/CodeGen/Mips/cconv/arguments-hard-float.ll
new file mode 100644
index 000000000000..9837f7ee5586
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-hard-float.ll
@@ -0,0 +1,211 @@
+; RUN: llc -march=mips -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32BE %s
+; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32LE %s
+
+; RUN-TODO: llc -march=mips64 -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+
+; Test the floating point arguments for all ABI's and byte orders as specified
+; by section 5 of MD00305 (MIPS ABIs Described).
+;
+; N32/N64 are identical in this area so their checks have been combined into
+; the 'NEW' prefix (the N stands for New).
+
+@bytes = global [11 x i8] zeroinitializer
+@dwords = global [11 x i64] zeroinitializer
+@floats = global [11 x float] zeroinitializer
+@doubles = global [11 x double] zeroinitializer
+
+define void @double_args(double %a, double %b, double %c, double %d, double %e,
+ double %f, double %g, double %h, double %i) nounwind {
+entry:
+ %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ store volatile double %a, double* %0
+ %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2
+ store volatile double %b, double* %1
+ %2 = getelementptr [11 x double]* @doubles, i32 0, i32 3
+ store volatile double %c, double* %2
+ %3 = getelementptr [11 x double]* @doubles, i32 0, i32 4
+ store volatile double %d, double* %3
+ %4 = getelementptr [11 x double]* @doubles, i32 0, i32 5
+ store volatile double %e, double* %4
+ %5 = getelementptr [11 x double]* @doubles, i32 0, i32 6
+ store volatile double %f, double* %5
+ %6 = getelementptr [11 x double]* @doubles, i32 0, i32 7
+ store volatile double %g, double* %6
+ %7 = getelementptr [11 x double]* @doubles, i32 0, i32 8
+ store volatile double %h, double* %7
+ %8 = getelementptr [11 x double]* @doubles, i32 0, i32 9
+ store volatile double %i, double* %8
+ ret void
+}
+
+; ALL-LABEL: double_args:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+
+; The first argument is floating point so floating point registers are used.
+; The first argument is the same for O32/N32/N64 but the second argument differs
+; by register
+; ALL-DAG: sdc1 $f12, 8([[R2]])
+; O32-DAG: sdc1 $f14, 16([[R2]])
+; NEW-DAG: sdc1 $f13, 16([[R2]])
+
+; O32 has run out of argument registers and starts using the stack
+; O32-DAG: ldc1 [[F1:\$f[0-9]+]], 16($sp)
+; O32-DAG: sdc1 [[F1]], 24([[R2]])
+; NEW-DAG: sdc1 $f14, 24([[R2]])
+; O32-DAG: ldc1 [[F1:\$f[0-9]+]], 24($sp)
+; O32-DAG: sdc1 [[F1]], 32([[R2]])
+; NEW-DAG: sdc1 $f15, 32([[R2]])
+; O32-DAG: ldc1 [[F1:\$f[0-9]+]], 32($sp)
+; O32-DAG: sdc1 [[F1]], 40([[R2]])
+; NEW-DAG: sdc1 $f16, 40([[R2]])
+; O32-DAG: ldc1 [[F1:\$f[0-9]+]], 40($sp)
+; O32-DAG: sdc1 [[F1]], 48([[R2]])
+; NEW-DAG: sdc1 $f17, 48([[R2]])
+; O32-DAG: ldc1 [[F1:\$f[0-9]+]], 48($sp)
+; O32-DAG: sdc1 [[F1]], 56([[R2]])
+; NEW-DAG: sdc1 $f18, 56([[R2]])
+; O32-DAG: ldc1 [[F1:\$f[0-9]+]], 56($sp)
+; O32-DAG: sdc1 [[F1]], 64([[R2]])
+; NEW-DAG: sdc1 $f19, 64([[R2]])
+
+; N32/N64 have run out of registers and start using the stack too
+; O32-DAG: ldc1 [[F1:\$f[0-9]+]], 64($sp)
+; O32-DAG: sdc1 [[F1]], 72([[R2]])
+; NEW-DAG: ldc1 [[F1:\$f[0-9]+]], 0($sp)
+; NEW-DAG: sdc1 [[F1]], 72([[R2]])
+
+define void @float_args(float %a, float %b, float %c, float %d, float %e,
+ float %f, float %g, float %h, float %i) nounwind {
+entry:
+ %0 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ store volatile float %a, float* %0
+ %1 = getelementptr [11 x float]* @floats, i32 0, i32 2
+ store volatile float %b, float* %1
+ %2 = getelementptr [11 x float]* @floats, i32 0, i32 3
+ store volatile float %c, float* %2
+ %3 = getelementptr [11 x float]* @floats, i32 0, i32 4
+ store volatile float %d, float* %3
+ %4 = getelementptr [11 x float]* @floats, i32 0, i32 5
+ store volatile float %e, float* %4
+ %5 = getelementptr [11 x float]* @floats, i32 0, i32 6
+ store volatile float %f, float* %5
+ %6 = getelementptr [11 x float]* @floats, i32 0, i32 7
+ store volatile float %g, float* %6
+ %7 = getelementptr [11 x float]* @floats, i32 0, i32 8
+ store volatile float %h, float* %7
+ %8 = getelementptr [11 x float]* @floats, i32 0, i32 9
+ store volatile float %i, float* %8
+ ret void
+}
+
+; ALL-LABEL: float_args:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
+; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(floats)(
+
+; The first argument is floating point so floating point registers are used.
+; The first argument is the same for O32/N32/N64 but the second argument differs
+; by register
+; ALL-DAG: swc1 $f12, 4([[R1]])
+; O32-DAG: swc1 $f14, 8([[R1]])
+; NEW-DAG: swc1 $f13, 8([[R1]])
+
+; O32 has run out of argument registers and (in theory) starts using the stack
+; I've yet to find a reference in the documentation about this but GCC uses up
+; the remaining two argument slots in the GPR's first. We'll do the same for
+; compatibility.
+; O32-DAG: sw $6, 12([[R1]])
+; NEW-DAG: swc1 $f14, 12([[R1]])
+; O32-DAG: sw $7, 16([[R1]])
+; NEW-DAG: swc1 $f15, 16([[R1]])
+
+; O32 is definitely out of registers now and switches to the stack.
+; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 16($sp)
+; O32-DAG: swc1 [[F1]], 20([[R1]])
+; NEW-DAG: swc1 $f16, 20([[R1]])
+; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 20($sp)
+; O32-DAG: swc1 [[F1]], 24([[R1]])
+; NEW-DAG: swc1 $f17, 24([[R1]])
+; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 24($sp)
+; O32-DAG: swc1 [[F1]], 28([[R1]])
+; NEW-DAG: swc1 $f18, 28([[R1]])
+; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 28($sp)
+; O32-DAG: swc1 [[F1]], 32([[R1]])
+; NEW-DAG: swc1 $f19, 32([[R1]])
+
+; N32/N64 have run out of registers and start using the stack too
+; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 32($sp)
+; O32-DAG: swc1 [[F1]], 36([[R1]])
+; NEW-DAG: lwc1 [[F1:\$f[0-9]+]], 0($sp)
+; NEW-DAG: swc1 [[F1]], 36([[R1]])
+
+
+define void @double_arg2(i8 %a, double %b) nounwind {
+entry:
+ %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ store volatile i8 %a, i8* %0
+ %1 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ store volatile double %b, double* %1
+ ret void
+}
+
+; ALL-LABEL: double_arg2:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
+; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(doubles)(
+
+; The first argument is the same in O32/N32/N64.
+; ALL-DAG: sb $4, 1([[R1]])
+
+; The first argument isn't floating point so floating point registers are not
+; used in O32, but N32/N64 will still use them.
+; The second slot is insufficiently aligned for double on O32 so it is skipped.
+; Also, double occupies two slots on O32 and only one for N32/N64.
+; O32LE-DAG: mtc1 $6, [[F1:\$f[0-9]*[02468]+]]
+; O32LE-DAG: mtc1 $7, [[F2:\$f[0-9]*[13579]+]]
+; O32BE-DAG: mtc1 $6, [[F2:\$f[0-9]*[13579]+]]
+; O32BE-DAG: mtc1 $7, [[F1:\$f[0-9]*[02468]+]]
+; O32-DAG: sdc1 [[F1]], 8([[R2]])
+; NEW-DAG: sdc1 $f13, 8([[R2]])
+
+define void @float_arg2(i8 %a, float %b) nounwind {
+entry:
+ %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ store volatile i8 %a, i8* %0
+ %1 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ store volatile float %b, float* %1
+ ret void
+}
+
+; ALL-LABEL: float_arg2:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
+; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(floats)(
+
+; The first argument is the same in O32/N32/N64.
+; ALL-DAG: sb $4, 1([[R1]])
+
+; The first argument isn't floating point so floating point registers are not
+; used in O32, but N32/N64 will still use them.
+; MD00305 and GCC disagree on this one. MD00305 says that floats are treated
+; as 8-byte aligned and occupy two slots on O32. GCC is treating them as 4-byte
+; aligned and occupying one slot. We'll use GCC's definition.
+; O32-DAG: sw $5, 4([[R2]])
+; NEW-DAG: swc1 $f13, 4([[R2]])
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll b/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
new file mode 100644
index 000000000000..5e3f403495f5
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
+
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
+
+; Test the fp128 arguments for all ABI's and byte orders as specified
+; by section 2 of the MIPSpro N32 Handbook.
+;
+; O32 is not tested because long double is the same as double on O32.
+
+@ldoubles = global [11 x fp128] zeroinitializer
+
+define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
+entry:
+ %0 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 1
+ store volatile fp128 %a, fp128* %0
+ %1 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 2
+ store volatile fp128 %b, fp128* %1
+ %2 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 3
+ store volatile fp128 %c, fp128* %2
+ %3 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 4
+ store volatile fp128 %d, fp128* %3
+ %4 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 5
+ store volatile fp128 %e, fp128* %4
+ ret void
+}
+
+; ALL-LABEL: ldouble_args:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(ldoubles)(
+
+; The first four arguments are the same in N32/N64.
+; ALL-DAG: sdc1 $f12, 16([[R2]])
+; ALL-DAG: sdc1 $f13, 24([[R2]])
+; ALL-DAG: sdc1 $f14, 32([[R2]])
+; ALL-DAG: sdc1 $f15, 40([[R2]])
+; ALL-DAG: sdc1 $f16, 48([[R2]])
+; ALL-DAG: sdc1 $f17, 56([[R2]])
+; ALL-DAG: sdc1 $f18, 64([[R2]])
+; ALL-DAG: sdc1 $f19, 72([[R2]])
+
+; N32/N64 have run out of registers and starts using the stack too
+; ALL-DAG: ld [[R3:\$[0-9]+]], 0($sp)
+; ALL-DAG: ld [[R4:\$[0-9]+]], 8($sp)
+; ALL-DAG: sd [[R3]], 80([[R2]])
+; ALL-DAG: sd [[R4]], 88([[R2]])
diff --git a/test/CodeGen/Mips/cconv/arguments.ll b/test/CodeGen/Mips/cconv/arguments.ll
new file mode 100644
index 000000000000..8fe29f3c8ce7
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments.ll
@@ -0,0 +1,170 @@
+; RUN: llc -march=mips -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32BE %s
+; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32LE %s
+
+; RUN-TODO: llc -march=mips64 -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+
+; Test the integer arguments for all ABI's and byte orders as specified by
+; section 5 of MD00305 (MIPS ABIs Described).
+;
+; N32/N64 are identical in this area so their checks have been combined into
+; the 'NEW' prefix (the N stands for New).
+;
+; Varargs are covered in arguments-hard-float-varargs.ll.
+
+@bytes = global [11 x i8] zeroinitializer
+@dwords = global [11 x i64] zeroinitializer
+@floats = global [11 x float] zeroinitializer
+@doubles = global [11 x double] zeroinitializer
+
+define void @align_to_arg_slots(i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %f, i8 %g,
+ i8 %h, i8 %i, i8 %j) nounwind {
+entry:
+ %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ store volatile i8 %a, i8* %0
+ %1 = getelementptr [11 x i8]* @bytes, i32 0, i32 2
+ store volatile i8 %b, i8* %1
+ %2 = getelementptr [11 x i8]* @bytes, i32 0, i32 3
+ store volatile i8 %c, i8* %2
+ %3 = getelementptr [11 x i8]* @bytes, i32 0, i32 4
+ store volatile i8 %d, i8* %3
+ %4 = getelementptr [11 x i8]* @bytes, i32 0, i32 5
+ store volatile i8 %e, i8* %4
+ %5 = getelementptr [11 x i8]* @bytes, i32 0, i32 6
+ store volatile i8 %f, i8* %5
+ %6 = getelementptr [11 x i8]* @bytes, i32 0, i32 7
+ store volatile i8 %g, i8* %6
+ %7 = getelementptr [11 x i8]* @bytes, i32 0, i32 8
+ store volatile i8 %h, i8* %7
+ %8 = getelementptr [11 x i8]* @bytes, i32 0, i32 9
+ store volatile i8 %i, i8* %8
+ %9 = getelementptr [11 x i8]* @bytes, i32 0, i32 10
+ store volatile i8 %j, i8* %9
+ ret void
+}
+
+; ALL-LABEL: align_to_arg_slots:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
+; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
+
+; The first four arguments are the same in O32/N32/N64
+; ALL-DAG: sb $4, 1([[R1]])
+; ALL-DAG: sb $5, 2([[R1]])
+; ALL-DAG: sb $6, 3([[R1]])
+; ALL-DAG: sb $7, 4([[R1]])
+
+; N32/N64 get an extra four arguments in registers
+; O32 starts loading from the stack. The addresses start at 16 because space is
+; always reserved for the first four arguments.
+; O32-DAG: lw [[R3:\$[0-9]+]], 16($sp)
+; O32-DAG: sb [[R3]], 5([[R1]])
+; NEW-DAG: sb $8, 5([[R1]])
+; O32-DAG: lw [[R3:\$[0-9]+]], 20($sp)
+; O32-DAG: sb [[R3]], 6([[R1]])
+; NEW-DAG: sb $9, 6([[R1]])
+; O32-DAG: lw [[R3:\$[0-9]+]], 24($sp)
+; O32-DAG: sb [[R3]], 7([[R1]])
+; NEW-DAG: sb $10, 7([[R1]])
+; O32-DAG: lw [[R3:\$[0-9]+]], 28($sp)
+; O32-DAG: sb [[R3]], 8([[R1]])
+; NEW-DAG: sb $11, 8([[R1]])
+
+; O32/N32/N64 are accessing the stack at this point.
+; Unlike O32, N32/N64 do not reserve space for the arguments.
+; increase by 4 for O32 and 8 for N32/N64.
+; O32-DAG: lw [[R3:\$[0-9]+]], 32($sp)
+; O32-DAG: sb [[R3]], 9([[R1]])
+; NEW-DAG: lw [[R3:\$[0-9]+]], 0($sp)
+; NEW-DAG: sb [[R3]], 9([[R1]])
+; O32-DAG: lw [[R3:\$[0-9]+]], 36($sp)
+; O32-DAG: sb [[R3]], 10([[R1]])
+; NEW-DAG: lw [[R3:\$[0-9]+]], 8($sp)
+; NEW-DAG: sb [[R3]], 10([[R1]])
+
+define void @slot_skipping(i8 %a, i64 %b, i8 %c, i8 %d,
+ i8 %e, i8 %f, i8 %g, i64 %i, i8 %j) nounwind {
+entry:
+ %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ store volatile i8 %a, i8* %0
+ %1 = getelementptr [11 x i64]* @dwords, i32 0, i32 1
+ store volatile i64 %b, i64* %1
+ %2 = getelementptr [11 x i8]* @bytes, i32 0, i32 2
+ store volatile i8 %c, i8* %2
+ %3 = getelementptr [11 x i8]* @bytes, i32 0, i32 3
+ store volatile i8 %d, i8* %3
+ %4 = getelementptr [11 x i8]* @bytes, i32 0, i32 4
+ store volatile i8 %e, i8* %4
+ %5 = getelementptr [11 x i8]* @bytes, i32 0, i32 5
+ store volatile i8 %f, i8* %5
+ %6 = getelementptr [11 x i8]* @bytes, i32 0, i32 6
+ store volatile i8 %g, i8* %6
+ %7 = getelementptr [11 x i64]* @dwords, i32 0, i32 2
+ store volatile i64 %i, i64* %7
+ %8 = getelementptr [11 x i8]* @bytes, i32 0, i32 7
+ store volatile i8 %j, i8* %8
+ ret void
+}
+
+; ALL-LABEL: slot_skipping:
+; We won't test the way the global address is calculated in this test. This is
+; just to get the register number for the other checks.
+; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes)
+; SYM64-DAG: ld [[R1:\$[0-9]]], %got_disp(bytes)(
+; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(dwords)
+; SYM64-DAG: ld [[R2:\$[0-9]]], %got_disp(dwords)(
+
+; The first argument is the same in O32/N32/N64.
+; ALL-DAG: sb $4, 1([[R1]])
+
+; The second slot is insufficiently aligned for i64 on O32 so it is skipped.
+; Also, i64 occupies two slots on O32 and only one for N32/N64.
+; O32-DAG: sw $6, 8([[R2]])
+; O32-DAG: sw $7, 12([[R2]])
+; NEW-DAG: sd $5, 8([[R2]])
+
+; N32/N64 get an extra four arguments in registers and still have two left from
+; the first four.
+; O32 starts loading from the stack. The addresses start at 16 because space is
+; always reserved for the first four arguments.
+; It's not clear why O32 uses lbu for this argument, but it's not wrong so we'll
+; accept it for now. The only IR difference is that this argument has
+; anyext from i8 and align 8 on it.
+; O32LE-DAG: lbu [[R3:\$[0-9]+]], 16($sp)
+; O32BE-DAG: lbu [[R3:\$[0-9]+]], 19($sp)
+; O32-DAG: sb [[R3]], 2([[R1]])
+; NEW-DAG: sb $6, 2([[R1]])
+; O32-DAG: lw [[R3:\$[0-9]+]], 20($sp)
+; O32-DAG: sb [[R3]], 3([[R1]])
+; NEW-DAG: sb $7, 3([[R1]])
+; O32-DAG: lw [[R3:\$[0-9]+]], 24($sp)
+; O32-DAG: sb [[R3]], 4([[R1]])
+; NEW-DAG: sb $8, 4([[R1]])
+; O32-DAG: lw [[R3:\$[0-9]+]], 28($sp)
+; O32-DAG: sb [[R3]], 5([[R1]])
+; NEW-DAG: sb $9, 5([[R1]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 32($sp)
+; O32-DAG: sb [[R3]], 6([[R1]])
+; NEW-DAG: sb $10, 6([[R1]])
+
+; O32-DAG: lw [[R3:\$[0-9]+]], 40($sp)
+; O32-DAG: sw [[R3]], 16([[R2]])
+; O32-DAG: lw [[R3:\$[0-9]+]], 44($sp)
+; O32-DAG: sw [[R3]], 20([[R2]])
+; NEW-DAG: sd $11, 16([[R2]])
+
+; O32/N32/N64 are accessing the stack at this point.
+; Unlike O32, N32/N64 do not reserve space for the arguments.
+; increase by 4 for O32 and 8 for N32/N64.
+; O32-DAG: lw [[R3:\$[0-9]+]], 48($sp)
+; O32-DAG: sb [[R3]], 7([[R1]])
+; NEW-DAG: lw [[R3:\$[0-9]+]], 0($sp)
+; NEW-DAG: sb [[R3]], 7([[R1]])
diff --git a/test/CodeGen/Mips/cconv/callee-saved-float.ll b/test/CodeGen/Mips/cconv/callee-saved-float.ll
new file mode 100644
index 000000000000..de4d9171aec4
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/callee-saved-float.ll
@@ -0,0 +1,111 @@
+; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
+; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
+
+; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=O32-INV %s
+; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=O32-INV %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N32-INV %s
+; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N32-INV %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N64-INV %s
+; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N64-INV %s
+
+; Test the the callee-saved registers are callee-saved as specified by section
+; 2 of the MIPSpro N32 Handbook and section 3 of the SYSV ABI spec.
+
+define void @fpu_clobber() nounwind {
+entry:
+ call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f13},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ ret void
+}
+
+; ALL-LABEL: fpu_clobber:
+; ALL-INV-NOT: sdc1 $f0,
+; ALL-INV-NOT: sdc1 $f1,
+; ALL-INV-NOT: sdc1 $f2,
+; ALL-INV-NOT: sdc1 $f3,
+; ALL-INV-NOT: sdc1 $f4,
+; ALL-INV-NOT: sdc1 $f5,
+; ALL-INV-NOT: sdc1 $f6,
+; ALL-INV-NOT: sdc1 $f7,
+; ALL-INV-NOT: sdc1 $f8,
+; ALL-INV-NOT: sdc1 $f9,
+; ALL-INV-NOT: sdc1 $f10,
+; ALL-INV-NOT: sdc1 $f11,
+; ALL-INV-NOT: sdc1 $f12,
+; ALL-INV-NOT: sdc1 $f13,
+; ALL-INV-NOT: sdc1 $f14,
+; ALL-INV-NOT: sdc1 $f15,
+; ALL-INV-NOT: sdc1 $f16,
+; ALL-INV-NOT: sdc1 $f17,
+; ALL-INV-NOT: sdc1 $f18,
+; ALL-INV-NOT: sdc1 $f19,
+; ALL-INV-NOT: sdc1 $f21,
+; ALL-INV-NOT: sdc1 $f23,
+
+; O32: addiu $sp, $sp, -48
+; O32-DAG: sdc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp)
+; O32-DAG: sdc1 [[F22:\$f22]], [[OFF22:[0-9]+]]($sp)
+; O32-DAG: sdc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp)
+; O32-DAG: sdc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp)
+; O32-DAG: sdc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp)
+; O32-DAG: sdc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp)
+; O32-DAG: ldc1 [[F20]], [[OFF20]]($sp)
+; O32-DAG: ldc1 [[F22]], [[OFF22]]($sp)
+; O32-DAG: ldc1 [[F24]], [[OFF24]]($sp)
+; O32-INV-NOT: sdc1 $f25,
+; O32-DAG: ldc1 [[F26]], [[OFF26]]($sp)
+; O32-INV-NOT: sdc1 $f27,
+; O32-DAG: ldc1 [[F28]], [[OFF28]]($sp)
+; O32-INV-NOT: sdc1 $f29,
+; O32-DAG: ldc1 [[F30]], [[OFF30]]($sp)
+; O32-INV-NOT: sdc1 $f31,
+; O32: addiu $sp, $sp, 48
+
+; N32: addiu $sp, $sp, -48
+; N32-DAG: sdc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp)
+; N32-DAG: sdc1 [[F22:\$f22]], [[OFF22:[0-9]+]]($sp)
+; N32-DAG: sdc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp)
+; N32-DAG: sdc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp)
+; N32-DAG: sdc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp)
+; N32-DAG: sdc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp)
+; N32-DAG: ldc1 [[F20]], [[OFF20]]($sp)
+; N32-DAG: ldc1 [[F22]], [[OFF22]]($sp)
+; N32-DAG: ldc1 [[F24]], [[OFF24]]($sp)
+; N32-INV-NOT: sdc1 $f25,
+; N32-DAG: ldc1 [[F26]], [[OFF26]]($sp)
+; N32-INV-NOT: sdc1 $f27,
+; N32-DAG: ldc1 [[F28]], [[OFF28]]($sp)
+; N32-INV-NOT: sdc1 $f29,
+; N32-DAG: ldc1 [[F30]], [[OFF30]]($sp)
+; N32-INV-NOT: sdc1 $f31,
+; N32: addiu $sp, $sp, 48
+
+; N64: addiu $sp, $sp, -64
+; N64-INV-NOT: sdc1 $f20,
+; N64-INV-NOT: sdc1 $f22,
+; N64-DAG: sdc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp)
+; N64-DAG: sdc1 [[F25:\$f25]], [[OFF25:[0-9]+]]($sp)
+; N64-DAG: sdc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp)
+; N64-DAG: sdc1 [[F27:\$f27]], [[OFF27:[0-9]+]]($sp)
+; N64-DAG: sdc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp)
+; N64-DAG: sdc1 [[F29:\$f29]], [[OFF29:[0-9]+]]($sp)
+; N64-DAG: sdc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp)
+; N64-DAG: sdc1 [[F31:\$f31]], [[OFF31:[0-9]+]]($sp)
+; N64-DAG: ldc1 [[F24]], [[OFF24]]($sp)
+; N64-DAG: ldc1 [[F25]], [[OFF25]]($sp)
+; N64-DAG: ldc1 [[F26]], [[OFF26]]($sp)
+; N64-DAG: ldc1 [[F27]], [[OFF27]]($sp)
+; N64-DAG: ldc1 [[F28]], [[OFF28]]($sp)
+; N64-DAG: ldc1 [[F29]], [[OFF29]]($sp)
+; N64-DAG: ldc1 [[F30]], [[OFF30]]($sp)
+; N64-DAG: ldc1 [[F31]], [[OFF31]]($sp)
+; N64: addiu $sp, $sp, 64
diff --git a/test/CodeGen/Mips/cconv/callee-saved-fpxx.ll b/test/CodeGen/Mips/cconv/callee-saved-fpxx.ll
new file mode 100644
index 000000000000..4b28b9962075
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/callee-saved-fpxx.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=mips -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX %s
+; RUN: llc -march=mipsel -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX %s
+; RUN: llc -march=mips -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX-INV %s
+; RUN: llc -march=mipsel -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX-INV %s
+
+; RUN-TODO: llc -march=mips64 -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX %s
+; RUN-TODO: llc -march=mips64el -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX %s
+; RUN-TODO: llc -march=mips64 -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX-INV --check-prefix=O32-FPXX-INV %s
+; RUN-TODO: llc -march=mips64el -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=ALL --check-prefix=O32-FPXX-INV --check-prefix=O32-FPXX-INV %s
+
+define void @fpu_clobber() nounwind {
+entry:
+ call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f13},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ ret void
+}
+
+; O32-FPXX-LABEL: fpu_clobber:
+; O32-FPXX-INV-NOT: sdc1 $f0,
+; O32-FPXX-INV-NOT: sdc1 $f1,
+; O32-FPXX-INV-NOT: sdc1 $f2,
+; O32-FPXX-INV-NOT: sdc1 $f3,
+; O32-FPXX-INV-NOT: sdc1 $f4,
+; O32-FPXX-INV-NOT: sdc1 $f5,
+; O32-FPXX-INV-NOT: sdc1 $f6,
+; O32-FPXX-INV-NOT: sdc1 $f7,
+; O32-FPXX-INV-NOT: sdc1 $f8,
+; O32-FPXX-INV-NOT: sdc1 $f9,
+; O32-FPXX-INV-NOT: sdc1 $f10,
+; O32-FPXX-INV-NOT: sdc1 $f11,
+; O32-FPXX-INV-NOT: sdc1 $f12,
+; O32-FPXX-INV-NOT: sdc1 $f13,
+; O32-FPXX-INV-NOT: sdc1 $f14,
+; O32-FPXX-INV-NOT: sdc1 $f15,
+; O32-FPXX-INV-NOT: sdc1 $f16,
+; O32-FPXX-INV-NOT: sdc1 $f17,
+; O32-FPXX-INV-NOT: sdc1 $f18,
+; O32-FPXX-INV-NOT: sdc1 $f19,
+; O32-FPXX-INV-NOT: sdc1 $f21,
+; O32-FPXX-INV-NOT: sdc1 $f23,
+; O32-FPXX-INV-NOT: sdc1 $f25,
+; O32-FPXX-INV-NOT: sdc1 $f27,
+; O32-FPXX-INV-NOT: sdc1 $f29,
+; O32-FPXX-INV-NOT: sdc1 $f31,
+
+; O32-FPXX: addiu $sp, $sp, -48
+; O32-FPXX-DAG: sdc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F22:\$f22]], [[OFF22:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp)
+; O32-FPXX-DAG: sdc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp)
+; O32-FPXX-DAG: ldc1 [[F20]], [[OFF20]]($sp)
+; O32-FPXX-DAG: ldc1 [[F22]], [[OFF22]]($sp)
+; O32-FPXX-DAG: ldc1 [[F24]], [[OFF24]]($sp)
+; O32-FPXX-DAG: ldc1 [[F26]], [[OFF26]]($sp)
+; O32-FPXX-DAG: ldc1 [[F28]], [[OFF28]]($sp)
+; O32-FPXX-DAG: ldc1 [[F30]], [[OFF30]]($sp)
+; O32-FPXX: addiu $sp, $sp, 48
diff --git a/test/CodeGen/Mips/cconv/callee-saved-fpxx1.ll b/test/CodeGen/Mips/cconv/callee-saved-fpxx1.ll
new file mode 100644
index 000000000000..489879e98ad3
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/callee-saved-fpxx1.ll
@@ -0,0 +1,24 @@
+; RUN: llc -march=mips -mattr=+o32,+fp64 < %s | FileCheck --check-prefix=O32-FP64-INV %s
+; RUN: llc -march=mipsel -mattr=+o32,+fp64 < %s | FileCheck --check-prefix=O32-FP64-INV %s
+
+; RUN: llc -march=mips -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=O32-FPXX %s
+; RUN: llc -march=mipsel -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=O32-FPXX %s
+
+; RUN-TODO: llc -march=mips64 -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=O32-FPXX %s
+; RUN-TODO: llc -march=mips64el -mattr=+o32,+fpxx < %s | FileCheck --check-prefix=O32-FPXX %s
+
+define void @fpu_clobber() nounwind {
+entry:
+ call void asm "# Clobber", "~{$f21}"()
+ ret void
+}
+
+; O32-FPXX-LABEL: fpu_clobber:
+
+; O32-FPXX: addiu $sp, $sp, -8
+
+; O32-FP64-INV-NOT: sdc1 $f20,
+; O32-FPXX-DAG: sdc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp)
+; O32-FPXX-DAG: ldc1 [[F20]], [[OFF20]]($sp)
+
+; O32-FPXX: addiu $sp, $sp, 8
diff --git a/test/CodeGen/Mips/cconv/callee-saved.ll b/test/CodeGen/Mips/cconv/callee-saved.ll
new file mode 100644
index 000000000000..293e99f0c8e6
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/callee-saved.ll
@@ -0,0 +1,167 @@
+; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
+; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
+
+; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
+; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32-INV %s
+; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32-INV %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s
+; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s
+
+; Test the the callee-saved registers are callee-saved as specified by section
+; 2 of the MIPSpro N32 Handbook and section 3 of the SYSV ABI spec.
+
+define void @gpr_clobber() nounwind {
+entry:
+ ; Clobbering the stack pointer is a bad idea so we'll skip that one
+ call void asm "# Clobber", "~{$0},~{$1},~{$2},~{$3},~{$4},~{$5},~{$6},~{$7},~{$8},~{$9},~{$10},~{$11},~{$12},~{$13},~{$14},~{$15},~{$16},~{$17},~{$18},~{$19},~{$20},~{$21},~{$22},~{$23},~{$24},~{$25},~{$26},~{$27},~{$28},~{$30},~{$31}"()
+ ret void
+}
+
+; ALL-LABEL: gpr_clobber:
+; O32: addiu $sp, $sp, -40
+; O32-INV-NOT: sw $0,
+; O32-INV-NOT: sw $1,
+; O32-INV-NOT: sw $2,
+; O32-INV-NOT: sw $3,
+; O32-INV-NOT: sw $4,
+; O32-INV-NOT: sw $5,
+; O32-INV-NOT: sw $6,
+; O32-INV-NOT: sw $7,
+; O32-INV-NOT: sw $8,
+; O32-INV-NOT: sw $9,
+; O32-INV-NOT: sw $10,
+; O32-INV-NOT: sw $11,
+; O32-INV-NOT: sw $12,
+; O32-INV-NOT: sw $13,
+; O32-INV-NOT: sw $14,
+; O32-INV-NOT: sw $15,
+; O32-DAG: sw [[G16:\$16]], [[OFF16:[0-9]+]]($sp)
+; O32-DAG: sw [[G17:\$17]], [[OFF17:[0-9]+]]($sp)
+; O32-DAG: sw [[G18:\$18]], [[OFF18:[0-9]+]]($sp)
+; O32-DAG: sw [[G19:\$19]], [[OFF19:[0-9]+]]($sp)
+; O32-DAG: sw [[G20:\$20]], [[OFF20:[0-9]+]]($sp)
+; O32-DAG: sw [[G21:\$21]], [[OFF21:[0-9]+]]($sp)
+; O32-DAG: sw [[G22:\$22]], [[OFF22:[0-9]+]]($sp)
+; O32-DAG: sw [[G23:\$23]], [[OFF23:[0-9]+]]($sp)
+; O32-INV-NOT: sw $24,
+; O32-INV-NOT: sw $25,
+; O32-INV-NOT: sw $26,
+; O32-INV-NOT: sw $27,
+; O32-INV-NOT: sw $28,
+; O32-INV-NOT: sw $29,
+; O32-DAG: sw [[G30:\$fp]], [[OFF30:[0-9]+]]($sp)
+; O32-DAG: sw [[G31:\$fp]], [[OFF31:[0-9]+]]($sp)
+; O32-DAG: lw [[G16]], [[OFF16]]($sp)
+; O32-DAG: lw [[G17]], [[OFF17]]($sp)
+; O32-DAG: lw [[G18]], [[OFF18]]($sp)
+; O32-DAG: lw [[G19]], [[OFF19]]($sp)
+; O32-DAG: lw [[G20]], [[OFF20]]($sp)
+; O32-DAG: lw [[G21]], [[OFF21]]($sp)
+; O32-DAG: lw [[G22]], [[OFF22]]($sp)
+; O32-DAG: lw [[G23]], [[OFF23]]($sp)
+; O32-DAG: lw [[G30]], [[OFF30]]($sp)
+; O32-DAG: lw [[G31]], [[OFF31]]($sp)
+; O32: addiu $sp, $sp, 40
+
+; N32: addiu $sp, $sp, -96
+; N32-INV-NOT: sd $0,
+; N32-INV-NOT: sd $1,
+; N32-INV-NOT: sd $2,
+; N32-INV-NOT: sd $3,
+; N32-INV-NOT: sd $4,
+; N32-INV-NOT: sd $5,
+; N32-INV-NOT: sd $6,
+; N32-INV-NOT: sd $7,
+; N32-INV-NOT: sd $8,
+; N32-INV-NOT: sd $9,
+; N32-INV-NOT: sd $10,
+; N32-INV-NOT: sd $11,
+; N32-INV-NOT: sd $12,
+; N32-INV-NOT: sd $13,
+; N32-INV-NOT: sd $14,
+; N32-INV-NOT: sd $15,
+; N32-DAG: sd [[G16:\$16]], [[OFF16:[0-9]+]]($sp)
+; N32-DAG: sd [[G17:\$17]], [[OFF17:[0-9]+]]($sp)
+; N32-DAG: sd [[G18:\$18]], [[OFF18:[0-9]+]]($sp)
+; N32-DAG: sd [[G19:\$19]], [[OFF19:[0-9]+]]($sp)
+; N32-DAG: sd [[G20:\$20]], [[OFF20:[0-9]+]]($sp)
+; N32-DAG: sd [[G21:\$21]], [[OFF21:[0-9]+]]($sp)
+; N32-DAG: sd [[G22:\$22]], [[OFF22:[0-9]+]]($sp)
+; N32-DAG: sd [[G23:\$23]], [[OFF23:[0-9]+]]($sp)
+; N32-INV-NOT: sd $24,
+; N32-INV-NOT: sd $25,
+; N32-INV-NOT: sd $26,
+; N32-INV-NOT: sd $27,
+; N32-DAG: sd [[G28:\$gp]], [[OFF28:[0-9]+]]($sp)
+; N32-INV-NOT: sd $29,
+; N32-DAG: sd [[G30:\$fp]], [[OFF30:[0-9]+]]($sp)
+; N32-DAG: sd [[G31:\$fp]], [[OFF31:[0-9]+]]($sp)
+; N32-DAG: ld [[G16]], [[OFF16]]($sp)
+; N32-DAG: ld [[G17]], [[OFF17]]($sp)
+; N32-DAG: ld [[G18]], [[OFF18]]($sp)
+; N32-DAG: ld [[G19]], [[OFF19]]($sp)
+; N32-DAG: ld [[G20]], [[OFF20]]($sp)
+; N32-DAG: ld [[G21]], [[OFF21]]($sp)
+; N32-DAG: ld [[G22]], [[OFF22]]($sp)
+; N32-DAG: ld [[G23]], [[OFF23]]($sp)
+; N32-DAG: ld [[G28]], [[OFF28]]($sp)
+; N32-DAG: ld [[G30]], [[OFF30]]($sp)
+; N32-DAG: ld [[G31]], [[OFF31]]($sp)
+; N32: addiu $sp, $sp, 96
+
+; N64: daddiu $sp, $sp, -96
+; N64-INV-NOT: sd $0,
+; N64-INV-NOT: sd $1,
+; N64-INV-NOT: sd $2,
+; N64-INV-NOT: sd $3,
+; N64-INV-NOT: sd $4,
+; N64-INV-NOT: sd $5,
+; N64-INV-NOT: sd $6,
+; N64-INV-NOT: sd $7,
+; N64-INV-NOT: sd $8,
+; N64-INV-NOT: sd $9,
+; N64-INV-NOT: sd $10,
+; N64-INV-NOT: sd $11,
+; N64-INV-NOT: sd $12,
+; N64-INV-NOT: sd $13,
+; N64-INV-NOT: sd $14,
+; N64-INV-NOT: sd $15,
+; N64-DAG: sd [[G16:\$16]], [[OFF16:[0-9]+]]($sp)
+; N64-DAG: sd [[G17:\$17]], [[OFF17:[0-9]+]]($sp)
+; N64-DAG: sd [[G18:\$18]], [[OFF18:[0-9]+]]($sp)
+; N64-DAG: sd [[G19:\$19]], [[OFF19:[0-9]+]]($sp)
+; N64-DAG: sd [[G20:\$20]], [[OFF20:[0-9]+]]($sp)
+; N64-DAG: sd [[G21:\$21]], [[OFF21:[0-9]+]]($sp)
+; N64-DAG: sd [[G22:\$22]], [[OFF22:[0-9]+]]($sp)
+; N64-DAG: sd [[G23:\$23]], [[OFF23:[0-9]+]]($sp)
+; N64-DAG: sd [[G30:\$fp]], [[OFF30:[0-9]+]]($sp)
+; N64-DAG: sd [[G31:\$fp]], [[OFF31:[0-9]+]]($sp)
+; N64-INV-NOT: sd $24,
+; N64-INV-NOT: sd $25,
+; N64-INV-NOT: sd $26,
+; N64-INV-NOT: sd $27,
+; N64-DAG: sd [[G28:\$gp]], [[OFF28:[0-9]+]]($sp)
+; N64-INV-NOT: sd $29,
+; N64-DAG: ld [[G16]], [[OFF16]]($sp)
+; N64-DAG: ld [[G17]], [[OFF17]]($sp)
+; N64-DAG: ld [[G18]], [[OFF18]]($sp)
+; N64-DAG: ld [[G19]], [[OFF19]]($sp)
+; N64-DAG: ld [[G20]], [[OFF20]]($sp)
+; N64-DAG: ld [[G21]], [[OFF21]]($sp)
+; N64-DAG: ld [[G22]], [[OFF22]]($sp)
+; N64-DAG: ld [[G23]], [[OFF23]]($sp)
+; N64-DAG: ld [[G28]], [[OFF28]]($sp)
+; N64-DAG: ld [[G30]], [[OFF30]]($sp)
+; N64-DAG: ld [[G31]], [[OFF31]]($sp)
+; N64: daddiu $sp, $sp, 96
diff --git a/test/CodeGen/Mips/cconv/memory-layout.ll b/test/CodeGen/Mips/cconv/memory-layout.ll
new file mode 100644
index 000000000000..0c3cc9ecedfe
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/memory-layout.ll
@@ -0,0 +1,140 @@
+; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+
+; Test the memory layout for all ABI's and byte orders as specified by section
+; 4 of MD00305 (MIPS ABIs Described).
+; Bitfields are not covered since they are not available as a type in LLVM IR.
+;
+; The assembly directives deal with endianness so we don't need to account for
+; that.
+
+; Deliberately request alignments that are too small for the target so we get
+; the minimum alignment instead of the preferred alignment.
+@byte = global i8 1, align 1
+@halfword = global i16 258, align 1
+@word = global i32 16909060, align 1
+@float = global float 1.0, align 1
+@dword = global i64 283686952306183, align 1
+@double = global double 1.0, align 1
+@pointer = global i8* @byte
+
+; ALL-NOT: .align
+; ALL-LABEL: byte:
+; ALL: .byte 1
+; ALL: .size byte, 1
+
+; ALL: .align 1
+; ALL-LABEL: halfword:
+; ALL: .2byte 258
+; ALL: .size halfword, 2
+
+; ALL: .align 2
+; ALL-LABEL: word:
+; ALL: .4byte 16909060
+; ALL: .size word, 4
+
+; ALL: .align 2
+; ALL-LABEL: float:
+; ALL: .4byte 1065353216
+; ALL: .size float, 4
+
+; ALL: .align 3
+; ALL-LABEL: dword:
+; ALL: .8byte 283686952306183
+; ALL: .size dword, 8
+
+; ALL: .align 3
+; ALL-LABEL: double:
+; ALL: .8byte 4607182418800017408
+; ALL: .size double, 8
+
+; O32: .align 2
+; N32: .align 2
+; N64: .align 3
+; ALL-LABEL: pointer:
+; O32: .4byte byte
+; O32: .size pointer, 4
+; N32: .4byte byte
+; N32: .size pointer, 4
+; N64: .8byte byte
+; N64: .size pointer, 8
+
+@byte_array = global [2 x i8] [i8 1, i8 2], align 1
+@halfword_array = global [2 x i16] [i16 1, i16 2], align 1
+@word_array = global [2 x i32] [i32 1, i32 2], align 1
+@float_array = global [2 x float] [float 1.0, float 2.0], align 1
+@dword_array = global [2 x i64] [i64 1, i64 2], align 1
+@double_array = global [2 x double] [double 1.0, double 2.0], align 1
+@pointer_array = global [2 x i8*] [i8* @byte, i8* @byte]
+
+; ALL-NOT: .align
+; ALL-LABEL: byte_array:
+; ALL: .ascii "\001\002"
+; ALL: .size byte_array, 2
+
+; ALL: .align 1
+; ALL-LABEL: halfword_array:
+; ALL: .2byte 1
+; ALL: .2byte 2
+; ALL: .size halfword_array, 4
+
+; ALL: .align 2
+; ALL-LABEL: word_array:
+; ALL: .4byte 1
+; ALL: .4byte 2
+; ALL: .size word_array, 8
+
+; ALL: .align 2
+; ALL-LABEL: float_array:
+; ALL: .4byte 1065353216
+; ALL: .4byte 1073741824
+; ALL: .size float_array, 8
+
+; ALL: .align 3
+; ALL-LABEL: dword_array:
+; ALL: .8byte 1
+; ALL: .8byte 2
+; ALL: .size dword_array, 16
+
+; ALL: .align 3
+; ALL-LABEL: double_array:
+; ALL: .8byte 4607182418800017408
+; ALL: .8byte 4611686018427387904
+; ALL: .size double_array, 16
+
+; O32: .align 2
+; N32: .align 2
+; N64: .align 3
+; ALL-LABEL: pointer_array:
+; O32: .4byte byte
+; O32: .4byte byte
+; O32: .size pointer_array, 8
+; N32: .4byte byte
+; N32: .4byte byte
+; N32: .size pointer_array, 8
+; N64: .8byte byte
+; N64: .8byte byte
+; N64: .size pointer_array, 16
+
+%mixed = type { i8, double, i16 }
+@mixed = global %mixed { i8 1, double 1.0, i16 515 }, align 1
+
+; ALL: .align 3
+; ALL-LABEL: mixed:
+; ALL: .byte 1
+; ALL: .space 7
+; ALL: .8byte 4607182418800017408
+; ALL: .2byte 515
+; ALL: .space 6
+; ALL: .size mixed, 24
+
+; Bitfields are not available in LLVM IR so we can't test them here.
diff --git a/test/CodeGen/Mips/cconv/reserved-space.ll b/test/CodeGen/Mips/cconv/reserved-space.ll
new file mode 100644
index 000000000000..b36f89ecc115
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/reserved-space.ll
@@ -0,0 +1,39 @@
+; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+
+; Test that O32 correctly reserved space for the four arguments, even when
+; there aren't any as per section 5 of MD00305 (MIPS ABIs Described).
+
+declare void @foo() nounwind;
+
+define void @reserved_space() nounwind {
+entry:
+ tail call void @foo()
+ ret void
+}
+
+; ALL-LABEL: reserved_space:
+; O32: addiu $sp, $sp, -24
+; O32: sw $ra, 20($sp)
+; O32: lw $ra, 20($sp)
+; O32: addiu $sp, $sp, 24
+; Despite pointers being 32-bit wide on N32, the return pointer is saved as a
+; 64-bit pointer. I've yet to find a documentation reference for this quirk but
+; this behaviour matches GCC so I have considered it to be correct.
+; N32: addiu $sp, $sp, -16
+; N32: sd $ra, 8($sp)
+; N32: ld $ra, 8($sp)
+; N32: addiu $sp, $sp, 16
+; N64: daddiu $sp, $sp, -16
+; N64: sd $ra, 8($sp)
+; N64: ld $ra, 8($sp)
+; N64: daddiu $sp, $sp, 16
diff --git a/test/CodeGen/Mips/cconv/return-float.ll b/test/CodeGen/Mips/cconv/return-float.ll
new file mode 100644
index 000000000000..28cf83d3efcf
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/return-float.ll
@@ -0,0 +1,48 @@
+; RUN: llc -mtriple=mips-linux-gnu -soft-float -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -mtriple=mipsel-linux-gnu -soft-float -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN-TODO: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+
+; RUN: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+
+; Test the float returns for all ABI's and byte orders as specified by
+; section 5 of MD00305 (MIPS ABIs Described).
+
+; We only test Linux because other OS's use different relocations and I don't
+; know if this is correct.
+
+@float = global float zeroinitializer
+@double = global double zeroinitializer
+
+define float @retfloat() nounwind {
+entry:
+ %0 = load volatile float* @float
+ ret float %0
+}
+
+; ALL-LABEL: retfloat:
+; O32-DAG: lui [[R1:\$[0-9]+]], %hi(float)
+; O32-DAG: lw $2, %lo(float)([[R1]])
+; N32-DAG: lui [[R1:\$[0-9]+]], %hi(float)
+; N32-DAG: lw $2, %lo(float)([[R1]])
+; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(float)($1)
+; N64-DAG: lw $2, 0([[R1]])
+
+define double @retdouble() nounwind {
+entry:
+ %0 = load volatile double* @double
+ ret double %0
+}
+
+; ALL-LABEL: retdouble:
+; O32-DAG: lw $2, %lo(double)([[R1:\$[0-9]+]])
+; O32-DAG: addiu [[R2:\$[0-9]+]], [[R1]], %lo(double)
+; O32-DAG: lw $3, 4([[R2]])
+; N32-DAG: ld $2, %lo(double)([[R1:\$[0-9]+]])
+; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(double)($1)
+; N64-DAG: ld $2, 0([[R1]])
diff --git a/test/CodeGen/Mips/cconv/return-hard-float.ll b/test/CodeGen/Mips/cconv/return-hard-float.ll
new file mode 100644
index 000000000000..3eb26fa9d24f
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/return-hard-float.ll
@@ -0,0 +1,59 @@
+; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN-TODO: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+
+; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static -mattr=+o32,+fp64 < %s | FileCheck --check-prefix=ALL --check-prefix=032FP64 %s
+; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static -mattr=+o32,+fp64 < %s | FileCheck --check-prefix=ALL --check-prefix=032FP64 %s
+
+; Test the float returns for all ABI's and byte orders as specified by
+; section 5 of MD00305 (MIPS ABIs Described).
+
+; We only test Linux because other OS's use different relocations and I don't
+; know if this is correct.
+
+@float = global float zeroinitializer
+@double = global double zeroinitializer
+
+define float @retfloat() nounwind {
+entry:
+ %0 = load volatile float* @float
+ ret float %0
+}
+
+; ALL-LABEL: retfloat:
+; O32-DAG: lui [[R1:\$[0-9]+]], %hi(float)
+; O32-DAG: lwc1 $f0, %lo(float)([[R1]])
+; N32-DAG: lui [[R1:\$[0-9]+]], %hi(float)
+; N32-DAG: lwc1 $f0, %lo(float)([[R1]])
+; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(float)($1)
+; N64-DAG: lwc1 $f0, 0([[R1]])
+
+define double @retdouble() nounwind {
+entry:
+ %0 = load volatile double* @double
+ ret double %0
+}
+
+; ALL-LABEL: retdouble:
+; O32-DAG: ldc1 $f0, %lo(double)([[R1:\$[0-9]+]])
+; N32-DAG: ldc1 $f0, %lo(double)([[R1:\$[0-9]+]])
+; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(double)($1)
+; N64-DAG: ldc1 $f0, 0([[R1]])
+
+define { double, double } @retComplexDouble() #0 {
+ %retval = alloca { double, double }, align 8
+ %1 = load { double, double }* %retval
+ ret { double, double } %1
+}
+
+; ALL-LABEL: retComplexDouble:
+; 032FP64-DAG: ldc1 $f0, 0($sp)
+; 032FP64-DAG: ldc1 $f2, 8($sp)
diff --git a/test/CodeGen/Mips/cconv/return-hard-fp128.ll b/test/CodeGen/Mips/cconv/return-hard-fp128.ll
new file mode 100644
index 000000000000..0da59efddd6c
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/return-hard-fp128.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+
+; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+
+; Test the fp128 returns for N32/N64 and all byte orders as specified by
+; section 5 of MD00305 (MIPS ABIs Described).
+;
+; O32 is not tested because long double is the same as double on O32.
+;
+@fp128 = global fp128 zeroinitializer
+
+define fp128 @retldouble() nounwind {
+entry:
+ %0 = load volatile fp128* @fp128
+ ret fp128 %0
+}
+
+; ALL-LABEL: retldouble:
+; N32-DAG: ld [[R2:\$[0-9]+]], %lo(fp128)([[R1:\$[0-9]+]])
+; N32-DAG: addiu [[R3:\$[0-9]+]], [[R1]], %lo(fp128)
+; N32-DAG: ld [[R4:\$[0-9]+]], 8([[R3]])
+; N32-DAG: dmtc1 [[R2]], $f0
+; N32-DAG: dmtc1 [[R4]], $f2
+
+; N64-DAG: ld [[R2:\$[0-9]+]], %got_disp(fp128)([[R1:\$[0-9]+]])
+; N64-DAG: ld [[R3:\$[0-9]+]], 0([[R2]])
+; N64-DAG: ld [[R4:\$[0-9]+]], 8([[R2]])
+; N64-DAG: dmtc1 [[R3]], $f0
+; N64-DAG: dmtc1 [[R4]], $f2
diff --git a/test/CodeGen/Mips/cconv/return.ll b/test/CodeGen/Mips/cconv/return.ll
new file mode 100644
index 000000000000..76ce5e44c4ae
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/return.ll
@@ -0,0 +1,66 @@
+; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN-TODO: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+
+; Test the integer returns for all ABI's and byte orders as specified by
+; section 5 of MD00305 (MIPS ABIs Described).
+
+; We only test Linux because other OS's use different relocations and I don't
+; know if this is correct.
+
+@byte = global i8 zeroinitializer
+@word = global i32 zeroinitializer
+@dword = global i64 zeroinitializer
+@float = global float zeroinitializer
+@double = global double zeroinitializer
+
+define i8 @reti8() nounwind {
+entry:
+ %0 = load volatile i8* @byte
+ ret i8 %0
+}
+
+; ALL-LABEL: reti8:
+; O32-DAG: lui [[R1:\$[0-9]+]], %hi(byte)
+; O32-DAG: lbu $2, %lo(byte)([[R1]])
+; N32-DAG: lui [[R1:\$[0-9]+]], %hi(byte)
+; N32-DAG: lbu $2, %lo(byte)([[R1]])
+; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(byte)($1)
+; N64-DAG: lbu $2, 0([[R1]])
+
+define i32 @reti32() nounwind {
+entry:
+ %0 = load volatile i32* @word
+ ret i32 %0
+}
+
+; ALL-LABEL: reti32:
+; O32-DAG: lui [[R1:\$[0-9]+]], %hi(word)
+; O32-DAG: lw $2, %lo(word)([[R1]])
+; N32-DAG: lui [[R1:\$[0-9]+]], %hi(word)
+; N32-DAG: lw $2, %lo(word)([[R1]])
+; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(word)($1)
+; N64-DAG: lw $2, 0([[R1]])
+
+define i64 @reti64() nounwind {
+entry:
+ %0 = load volatile i64* @dword
+ ret i64 %0
+}
+
+; ALL-LABEL: reti64:
+; On O32, we must use v0 and v1 for the return value
+; O32-DAG: lw $2, %lo(dword)([[R1:\$[0-9]+]])
+; O32-DAG: addiu [[R2:\$[0-9]+]], [[R1]], %lo(dword)
+; O32-DAG: lw $3, 4([[R2]])
+; N32-DAG: ld $2, %lo(dword)([[R1:\$[0-9]+]])
+; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(dword)([[R1:\$[0-9]+]])
+; N64-DAG: ld $2, 0([[R1]])
diff --git a/test/CodeGen/Mips/cconv/stack-alignment.ll b/test/CodeGen/Mips/cconv/stack-alignment.ll
new file mode 100644
index 000000000000..834033bc8da5
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/stack-alignment.ll
@@ -0,0 +1,28 @@
+; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+
+; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+
+; Test the stack alignment for all ABI's and byte orders as specified by
+; section 5 of MD00305 (MIPS ABIs Described).
+
+define void @local_bytes_1() nounwind {
+entry:
+ %0 = alloca i8
+ ret void
+}
+
+; ALL-LABEL: local_bytes_1:
+; O32: addiu $sp, $sp, -8
+; O32: addiu $sp, $sp, 8
+; N32: addiu $sp, $sp, -16
+; N32: addiu $sp, $sp, 16
+; N64: addiu $sp, $sp, -16
+; N64: addiu $sp, $sp, 16
diff --git a/test/CodeGen/Mips/cfi_offset.ll b/test/CodeGen/Mips/cfi_offset.ll
new file mode 100644
index 000000000000..e23855bd65d2
--- /dev/null
+++ b/test/CodeGen/Mips/cfi_offset.ll
@@ -0,0 +1,41 @@
+; RUN: llc -march=mips -mattr=+o32 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-EB
+; RUN: llc -march=mipsel -mattr=+o32 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-EL
+; RUN: llc -march=mips -mattr=+o32,+fpxx < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-EB
+; RUN: llc -march=mipsel -mattr=+o32,+fpxx < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-EL
+; RUN: llc -march=mips -mattr=+o32,+fp64 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-EB
+; RUN: llc -march=mipsel -mattr=+o32,+fp64 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-EL
+
+@var = global double 0.0
+
+declare void @foo(...)
+
+define void @bar() {
+
+; CHECK-LABEL: bar:
+
+; CHECK: .cfi_def_cfa_offset 40
+; CHECK: sdc1 $f22, 32($sp)
+; CHECK: sdc1 $f20, 24($sp)
+; CHECK: sw $ra, 20($sp)
+; CHECK: sw $16, 16($sp)
+
+; CHECK-EB: .cfi_offset 55, -8
+; CHECK-EB: .cfi_offset 54, -4
+; CHECK-EB: .cfi_offset 53, -16
+; CHECK-EB: .cfi_offset 52, -12
+
+; CHECK-EL: .cfi_offset 54, -8
+; CHECK-EL: .cfi_offset 55, -4
+; CHECK-EL: .cfi_offset 52, -16
+; CHECK-EL: .cfi_offset 53, -12
+
+; CHECK: .cfi_offset 31, -20
+; CHECK: .cfi_offset 16, -24
+
+ %val1 = load volatile double* @var
+ %val2 = load volatile double* @var
+ call void (...)* @foo() nounwind
+ store volatile double %val1, double* @var
+ store volatile double %val2, double* @var
+ ret void
+}
diff --git a/test/CodeGen/Mips/ci2.ll b/test/CodeGen/Mips/ci2.ll
new file mode 100644
index 000000000000..7187f0c75888
--- /dev/null
+++ b/test/CodeGen/Mips/ci2.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips16-constant-islands < %s | FileCheck %s -check-prefix=constisle
+
+@i = common global i32 0, align 4
+@b = common global i32 0, align 4
+@l = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define void @foo() #0 {
+entry:
+ store i32 305419896, i32* @i, align 4
+ %0 = load i32* @b, align 4
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 10, i32* @b, align 4
+ br label %if.end
+
+if.else: ; preds = %entry
+ store i32 20, i32* @b, align 4
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ call void asm sideeffect ".space 100000", ""() #1, !srcloc !1
+ store i32 305419896, i32* @l, align 4
+ ret void
+; constisle: $CPI0_1:
+; constisle .4byte 305419896 # 0x12345678
+; constisle #APP
+; constisle .space 100000
+; constisle #NO_APP
+; constisle $CPI0_0:
+; constisle .4byte 305419896 # 0x12345678
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+!1 = metadata !{i32 103}
diff --git a/test/CodeGen/Mips/cmov.ll b/test/CodeGen/Mips/cmov.ll
index c24c5ac26ae1..0c13fb1adfbe 100755
--- a/test/CodeGen/Mips/cmov.ll
+++ b/test/CodeGen/Mips/cmov.ll
@@ -1,16 +1,43 @@
-; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=O32
-; RUN: llc -march=mips -regalloc=basic < %s | FileCheck %s -check-prefix=O32
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck %s -check-prefix=N64
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc -march=mips -mcpu=mips32 -regalloc=basic < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-CMP
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc -march=mips64el -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-CMP
@i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
@i3 = common global i32* null, align 4
-; O32-DAG: lw $[[R0:[0-9]+]], %got(i3)
-; O32-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(i1)
-; O32: movn $[[R0]], $[[R1]], ${{[0-9]+}}
-; N64-DAG: ldr $[[R0:[0-9]+]]
-; N64-DAG: ld $[[R1:[0-9]+]], %got_disp(i1)
-; N64: movn $[[R0]], $[[R1]], ${{[0-9]+}}
+; ALL-LABEL: cmov1:
+
+; 32-CMOV-DAG: lw $[[R0:[0-9]+]], %got(i3)
+; 32-CMOV-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(i1)
+; 32-CMOV-DAG: movn $[[R0]], $[[R1]], $4
+; 32-CMOV-DAG: lw $2, 0($[[R0]])
+
+; 32-CMP-DAG: lw $[[R0:[0-9]+]], %got(i3)
+; 32-CMP-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(i1)
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[R1]], $4
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[R0]], $4
+; 32-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+; 32-CMP-DAG: lw $2, 0($[[T2]])
+
+; 64-CMOV-DAG: ldr $[[R0:[0-9]+]]
+; 64-CMOV-DAG: ld $[[R1:[0-9]+]], %got_disp(i1)
+; 64-CMOV-DAG: movn $[[R0]], $[[R1]], $4
+
+; 64-CMP-DAG: ld $[[R0:[0-9]+]], %got_disp(i3)(
+; 64-CMP-DAG: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(i1)
+; FIXME: This sll works around an implementation detail in the code generator
+; (setcc's result is i32 so bits 32-63 are undefined). It's not really
+; needed.
+; 64-CMP-DAG: sll $[[CC:[0-9]+]], $4, 0
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[R1]], $[[CC]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[R0]], $[[CC]]
+; 64-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+; 64-CMP-DAG: ld $2, 0($[[T2]])
+
define i32* @cmov1(i32 %s) nounwind readonly {
entry:
%tobool = icmp ne i32 %s, 0
@@ -22,14 +49,35 @@ entry:
@c = global i32 1, align 4
@d = global i32 0, align 4
-; O32-LABEL: cmov2:
-; O32: addiu $[[R1:[0-9]+]], ${{[a-z0-9]+}}, %got(d)
-; O32: addiu $[[R0:[0-9]+]], ${{[a-z0-9]+}}, %got(c)
-; O32: movn $[[R1]], $[[R0]], ${{[0-9]+}}
-; N64-LABEL: cmov2:
-; N64: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(d)
-; N64: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got_disp(c)
-; N64: movn $[[R1]], $[[R0]], ${{[0-9]+}}
+; ALL-LABEL: cmov2:
+
+; 32-CMOV-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(d)
+; 32-CMOV-DAG: addiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got(c)
+; 32-CMOV-DAG: movn $[[R1]], $[[R0]], $4
+; 32-CMOV-DAG: lw $2, 0($[[R0]])
+
+; 32-CMP-DAG: addiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got(d)
+; 32-CMP-DAG: addiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got(c)
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[R0]], $4
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[R1]], $4
+; 32-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+; 32-CMP-DAG: lw $2, 0($[[T2]])
+
+; 64-CMOV: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(d)
+; 64-CMOV: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got_disp(c)
+; 64-CMOV: movn $[[R1]], $[[R0]], $4
+
+; 64-CMP-DAG: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, %got_disp(d)
+; 64-CMP-DAG: daddiu $[[R0:[0-9]+]], ${{[0-9]+}}, %got_disp(c)
+; FIXME: This sll works around an implementation detail in the code generator
+; (setcc's result is i32 so bits 32-63 are undefined). It's not really
+; needed.
+; 64-CMP-DAG: sll $[[CC:[0-9]+]], $4, 0
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[R0]], $[[CC]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[R1]], $[[CC]]
+; 64-CMP-DAG: or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+; 64-CMP-DAG: lw $2, 0($[[T2]])
+
define i32 @cmov2(i32 %s) nounwind readonly {
entry:
%tobool = icmp ne i32 %s, 0
@@ -39,9 +87,28 @@ entry:
ret i32 %cond
}
-; O32-LABEL: cmov3:
-; O32: xori $[[R0:[0-9]+]], ${{[0-9]+}}, 234
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: cmov3:
+
+; We won't check the result register since we can't know if the move is first
+; or last. We do know it will be either one of two registers so we can at least
+; check that.
+
+; 32-CMOV: xori $[[R0:[0-9]+]], $4, 234
+; 32-CMOV: movz ${{[26]}}, $5, $[[R0]]
+
+; 32-CMP-DAG: xori $[[CC:[0-9]+]], $4, 234
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $5, $[[CC]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $6, $[[CC]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV: xori $[[R0:[0-9]+]], $4, 234
+; 64-CMOV: movz ${{[26]}}, $5, $[[R0]]
+
+; 64-CMP-DAG: xori $[[CC:[0-9]+]], $4, 234
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $5, $[[CC]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $6, $[[CC]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
define i32 @cmov3(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%cmp = icmp eq i32 %a, 234
@@ -49,9 +116,69 @@ entry:
ret i32 %cond
}
-; N64-LABEL: cmov4:
-; N64: xori $[[R0:[0-9]+]], ${{[0-9]+}}, 234
-; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: cmov3_ne:
+
+; We won't check the result register since we can't know if the move is first
+; or last. We do know it will be either one of two registers so we can at least
+; check that.
+
+; FIXME: Use xori instead of addiu+xor.
+; 32-CMOV: addiu $[[R0:[0-9]+]], $zero, 234
+; 32-CMOV: xor $[[R1:[0-9]+]], $4, $[[R0]]
+; 32-CMOV: movn ${{[26]}}, $5, $[[R1]]
+
+; 32-CMP-DAG: xori $[[CC:[0-9]+]], $4, 234
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $5, $[[CC]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $6, $[[CC]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; FIXME: Use xori instead of addiu+xor.
+; 64-CMOV: addiu $[[R0:[0-9]+]], $zero, 234
+; 64-CMOV: xor $[[R1:[0-9]+]], $4, $[[R0]]
+; 64-CMOV: movn ${{[26]}}, $5, $[[R1]]
+
+; 64-CMP-DAG: xori $[[CC:[0-9]+]], $4, 234
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $5, $[[CC]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $6, $[[CC]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+define i32 @cmov3_ne(i32 %a, i32 %b, i32 %c) nounwind readnone {
+entry:
+ %cmp = icmp ne i32 %a, 234
+ %cond = select i1 %cmp, i32 %b, i32 %c
+ ret i32 %cond
+}
+
+; ALL-LABEL: cmov4:
+
+; We won't check the result register since we can't know if the move is first
+; or last. We do know it will be one of two registers so we can at least check
+; that.
+
+; 32-CMOV-DAG: xori $[[R0:[0-9]+]], $4, 234
+; 32-CMOV-DAG: lw $[[R1:2]], 16($sp)
+; 32-CMOV-DAG: lw $[[R2:3]], 20($sp)
+; 32-CMOV-DAG: movz $[[R1]], $6, $[[R0]]
+; 32-CMOV-DAG: movz $[[R2]], $7, $[[R0]]
+
+; 32-CMP-DAG: xori $[[R0:[0-9]+]], $4, 234
+; 32-CMP-DAG: lw $[[R1:[0-9]+]], 16($sp)
+; 32-CMP-DAG: lw $[[R2:[0-9]+]], 20($sp)
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $6, $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $7, $[[R0]]
+; 32-CMP-DAG: selnez $[[T2:[0-9]+]], $[[R1]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T3:[0-9]+]], $[[R2]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T2]]
+; 32-CMP-DAG: or $3, $[[T1]], $[[T3]]
+
+; 64-CMOV: xori $[[R0:[0-9]+]], $4, 234
+; 64-CMOV: movz ${{[26]}}, $5, $[[R0]]
+
+; 64-CMP-DAG: xori $[[R0:[0-9]+]], $4, 234
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $5, $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $6, $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
define i64 @cmov4(i32 %a, i64 %b, i64 %c) nounwind readnone {
entry:
%cmp = icmp eq i32 %a, 234
@@ -59,6 +186,47 @@ entry:
ret i64 %cond
}
+; ALL-LABEL: cmov4_ne:
+
+; We won't check the result register since we can't know if the move is first
+; or last. We do know it will be one of two registers so we can at least check
+; that.
+
+; FIXME: Use xori instead of addiu+xor.
+; 32-CMOV-DAG: addiu $[[R0:[0-9]+]], $zero, 234
+; 32-CMOV-DAG: xor $[[R1:[0-9]+]], $4, $[[R0]]
+; 32-CMOV-DAG: lw $[[R2:2]], 16($sp)
+; 32-CMOV-DAG: lw $[[R3:3]], 20($sp)
+; 32-CMOV-DAG: movn $[[R2]], $6, $[[R1]]
+; 32-CMOV-DAG: movn $[[R3]], $7, $[[R1]]
+
+; 32-CMP-DAG: xori $[[R0:[0-9]+]], $4, 234
+; 32-CMP-DAG: lw $[[R1:[0-9]+]], 16($sp)
+; 32-CMP-DAG: lw $[[R2:[0-9]+]], 20($sp)
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $6, $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $7, $[[R0]]
+; 32-CMP-DAG: seleqz $[[T2:[0-9]+]], $[[R1]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T3:[0-9]+]], $[[R2]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T2]]
+; 32-CMP-DAG: or $3, $[[T1]], $[[T3]]
+
+; FIXME: Use xori instead of addiu+xor.
+; 64-CMOV: addiu $[[R0:[0-9]+]], $zero, 234
+; 64-CMOV: xor $[[R1:[0-9]+]], $4, $[[R0]]
+; 64-CMOV: movn ${{[26]}}, $5, $[[R1]]
+
+; 64-CMP-DAG: xori $[[R0:[0-9]+]], $4, 234
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $5, $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $6, $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+define i64 @cmov4_ne(i32 %a, i64 %b, i64 %c) nounwind readnone {
+entry:
+ %cmp = icmp ne i32 %a, 234
+ %cond = select i1 %cmp, i64 %b, i64 %c
+ ret i64 %cond
+}
+
; slti and conditional move.
;
; Check that, pattern
@@ -67,74 +235,271 @@ entry:
; (movz t, (setlt a, N + 1), f)
; if N + 1 fits in 16-bit.
-; O32-LABEL: slti0:
-; O32: slti $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: slti0:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; 32-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; 64-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @slti0(i32 %a) {
entry:
%cmp = icmp sgt i32 %a, 32766
- %cond = select i1 %cmp, i32 3, i32 4
+ %cond = select i1 %cmp, i32 3, i32 5
ret i32 %cond
}
-; O32-LABEL: slti1:
-; O32: slt ${{[0-9]+}}
+; ALL-LABEL: slti1:
+
+; 32-CMOV-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 32-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: movn $[[I5]], $[[I7]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: addiu $[[I32767:[0-9]+]], $zero, 32767
+; 32-CMP-DAG: slt $[[R0:[0-9]+]], $[[I32767]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I7]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I5]], $[[I7]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 64-CMP-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMP-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMP-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I7]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @slti1(i32 %a) {
entry:
%cmp = icmp sgt i32 %a, 32767
- %cond = select i1 %cmp, i32 3, i32 4
+ %cond = select i1 %cmp, i32 7, i32 5
ret i32 %cond
}
-; O32-LABEL: slti2:
-; O32: slti $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: slti2:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; 32-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; 64-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @slti2(i32 %a) {
entry:
%cmp = icmp sgt i32 %a, -32769
- %cond = select i1 %cmp, i32 3, i32 4
+ %cond = select i1 %cmp, i32 3, i32 5
ret i32 %cond
}
-; O32-LABEL: slti3:
-; O32: slt ${{[0-9]+}}
+; ALL-LABEL: slti3:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: lui $[[R1:[0-9]+]], 65535
+; 32-CMOV-DAG: ori $[[R1]], $[[R1]], 32766
+; 32-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: movn $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: lui $[[IMM:[0-9]+]], 65535
+; 32-CMP-DAG: ori $[[IMM]], $[[IMM]], 32766
+; 32-CMP-DAG: slt $[[R0:[0-9]+]], $[[I32767]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: lui $[[R1:[0-9]+]], 65535
+; 64-CMOV-DAG: ori $[[R1]], $[[R1]], 32766
+; 64-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMP-DAG: lui $[[IMM:[0-9]+]], 65535
+; 64-CMP-DAG: ori $[[IMM]], $[[IMM]], 32766
+; 64-CMP-DAG: slt $[[R0:[0-9]+]], $[[IMM]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @slti3(i32 %a) {
entry:
%cmp = icmp sgt i32 %a, -32770
- %cond = select i1 %cmp, i32 3, i32 4
+ %cond = select i1 %cmp, i32 3, i32 5
ret i32 %cond
}
; 64-bit patterns.
-; N64-LABEL: slti64_0:
-; N64: slti $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
-; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: slti64_0:
+
+; 32-CMOV-DAG: slt $[[CC:[0-9]+]], $zero, $4
+; 32-CMOV-DAG: addiu $[[I32766:[0-9]+]], $zero, 32766
+; 32-CMOV-DAG: sltu $[[R1:[0-9]+]], $[[I32766]], $5
+; 32-CMOV-DAG: movz $[[CC:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMOV-DAG: addiu $[[I4:3]], $zero, 4
+; 32-CMOV-DAG: movn $[[I4]], $[[I5]], $[[CC]]
+; 32-CMOV-DAG: addiu $2, $zero, 0
+
+; 32-CMP-DAG: slt $[[CC0:[0-9]+]], $zero, $4
+; 32-CMP-DAG: addiu $[[I32766:[0-9]+]], $zero, 32766
+; 32-CMP-DAG: sltu $[[CC1:[0-9]+]], $[[I32766]], $5
+; 32-CMP-DAG: selnez $[[CC2:[0-9]+]], $[[CC0]], $4
+; 32-CMP-DAG: seleqz $[[CC3:[0-9]+]], $[[CC1]], $4
+; 32-CMP: or $[[CC:[0-9]+]], $[[CC3]], $[[CC2]]
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: addiu $[[I4:[0-9]+]], $zero, 4
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I4]], $[[CC]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[CC]]
+; 32-CMP-DAG: or $3, $[[T1]], $[[T0]]
+; 32-CMP-DAG: addiu $2, $zero, 0
+
+; 64-CMOV-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMOV-DAG: addiu $[[I4:2]], $zero, 4
+; 64-CMOV-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; 64-CMOV-DAG: movz $[[I4]], $[[I5]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: addiu $[[I4:[0-9]+]], $zero, 4
+; 64-CMP-DAG: slti $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by adding/subtracting the result of slti
+; to/from one of the constants.
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I4]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i64 @slti64_0(i64 %a) {
entry:
%cmp = icmp sgt i64 %a, 32766
- %conv = select i1 %cmp, i64 3, i64 4
+ %conv = select i1 %cmp, i64 5, i64 4
ret i64 %conv
}
-; N64-LABEL: slti64_1:
-; N64: slt ${{[0-9]+}}
+; ALL-LABEL: slti64_1:
+
+; 32-CMOV-DAG: slt $[[CC:[0-9]+]], $zero, $4
+; 32-CMOV-DAG: addiu $[[I32766:[0-9]+]], $zero, 32767
+; 32-CMOV-DAG: sltu $[[R1:[0-9]+]], $[[I32766]], $5
+; 32-CMOV-DAG: movz $[[CC:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMOV-DAG: addiu $[[I4:3]], $zero, 4
+; 32-CMOV-DAG: movn $[[I4]], $[[I5]], $[[CC]]
+; 32-CMOV-DAG: addiu $2, $zero, 0
+
+; 32-CMP-DAG: slt $[[CC0:[0-9]+]], $zero, $4
+; 32-CMP-DAG: addiu $[[I32766:[0-9]+]], $zero, 32767
+; 32-CMP-DAG: sltu $[[CC1:[0-9]+]], $[[I32766]], $5
+; 32-CMP-DAG: selnez $[[CC2:[0-9]+]], $[[CC0]], $4
+; 32-CMP-DAG: seleqz $[[CC3:[0-9]+]], $[[CC1]], $4
+; 32-CMP: or $[[CC:[0-9]+]], $[[CC3]], $[[CC2]]
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: addiu $[[I4:[0-9]+]], $zero, 4
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I4]], $[[CC]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[CC]]
+; 32-CMP-DAG: or $3, $[[T1]], $[[T0]]
+; 32-CMP-DAG: addiu $2, $zero, 0
+
+; 64-CMOV-DAG: daddiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMOV-DAG: daddiu $[[I4:2]], $zero, 4
+; 64-CMOV-DAG: daddiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I4]], $[[I5]], $[[R0]]
+
+; 64-CMP-DAG: daddiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: daddiu $[[I4:2]], $zero, 4
+; 64-CMP-DAG: daddiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMP-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I4]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i64 @slti64_1(i64 %a) {
entry:
%cmp = icmp sgt i64 %a, 32767
- %conv = select i1 %cmp, i64 3, i64 4
+ %conv = select i1 %cmp, i64 5, i64 4
ret i64 %conv
}
-; N64-LABEL: slti64_2:
-; N64: slti $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
-; N64: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: slti64_2:
+
+; FIXME: The 32-bit versions of this test are too complicated to reasonably
+; match at the moment. They do show some missing optimizations though
+; such as:
+; (movz $a, $b, (neg $c)) -> (movn $a, $b, $c)
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I4:2]], $zero, 4
+; 64-CMOV-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; 64-CMOV-DAG: movz $[[I4]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I4:[0-9]+]], $zero, 4
+; 64-CMP-DAG: slti $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by adding/subtracting the result of slti
+; to/from one of the constants.
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I4]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i64 @slti64_2(i64 %a) {
entry:
@@ -143,56 +508,273 @@ entry:
ret i64 %conv
}
-; N64-LABEL: slti64_3:
-; N64: slt ${{[0-9]+}}
+; ALL-LABEL: slti64_3:
+
+; FIXME: The 32-bit versions of this test are too complicated to reasonably
+; match at the moment. They do show some missing optimizations though
+; such as:
+; (movz $a, $b, (neg $c)) -> (movn $a, $b, $c)
+
+; 64-CMOV-DAG: daddiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMOV-DAG: daddiu $[[I4:2]], $zero, 4
+; 64-CMOV-DAG: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, 32766
+; 64-CMOV-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I4]], $[[I5]], $[[R0]]
+
+; 64-CMP-DAG: daddiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: daddiu $[[I4:2]], $zero, 4
+; 64-CMP-DAG: daddiu $[[R1:[0-9]+]], ${{[0-9]+}}, 32766
+; 64-CMP-DAG: slt $[[R0:[0-9]+]], $[[R1]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I4]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i64 @slti64_3(i64 %a) {
entry:
%cmp = icmp sgt i64 %a, -32770
- %conv = select i1 %cmp, i64 3, i64 4
+ %conv = select i1 %cmp, i64 5, i64 4
ret i64 %conv
}
; sltiu instructions.
-; O32-LABEL: sltiu0:
-; O32: sltiu $[[R0:[0-9]+]], ${{[0-9]+}}, 32767
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: sltiu0:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: sltiu $[[R0:[0-9]+]], $4, 32767
+; 32-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: sltiu $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: sltiu $[[R0:[0-9]+]], $4, 32767
+; 64-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: sltiu $[[R0:[0-9]+]], $4, 32767
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @sltiu0(i32 %a) {
entry:
%cmp = icmp ugt i32 %a, 32766
- %cond = select i1 %cmp, i32 3, i32 4
+ %cond = select i1 %cmp, i32 3, i32 5
ret i32 %cond
}
-; O32-LABEL: sltiu1:
-; O32: sltu ${{[0-9]+}}
+; ALL-LABEL: sltiu1:
+
+; 32-CMOV-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 32-CMOV-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: movn $[[I5]], $[[I7]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: addiu $[[I32767:[0-9]+]], $zero, 32767
+; 32-CMP-DAG: sltu $[[R0:[0-9]+]], $[[I32767]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I7]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMOV-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I5]], $[[I7]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I7:[0-9]+]], $zero, 7
+; 64-CMP-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMP-DAG: addiu $[[R1:[0-9]+]], $zero, 32767
+; 64-CMP-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I7]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @sltiu1(i32 %a) {
entry:
%cmp = icmp ugt i32 %a, 32767
- %cond = select i1 %cmp, i32 3, i32 4
+ %cond = select i1 %cmp, i32 7, i32 5
ret i32 %cond
}
-; O32-LABEL: sltiu2:
-; O32: sltiu $[[R0:[0-9]+]], ${{[0-9]+}}, -32768
-; O32: movz ${{[0-9]+}}, ${{[0-9]+}}, $[[R0]]
+; ALL-LABEL: sltiu2:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: sltiu $[[R0:[0-9]+]], $4, -32768
+; 32-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: sltiu $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 32-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: sltiu $[[R0:[0-9]+]], $4, -32768
+; 64-CMOV-DAG: movz $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 64-CMP-DAG: sltiu $[[R0:[0-9]+]], $4, -32768
+; FIXME: We can do better than this by using selccz to choose between +0 and +2
+; 64-CMP-DAG: seleqz $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: selnez $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @sltiu2(i32 %a) {
entry:
%cmp = icmp ugt i32 %a, -32769
- %cond = select i1 %cmp, i32 3, i32 4
+ %cond = select i1 %cmp, i32 3, i32 5
ret i32 %cond
}
-; O32-LABEL: sltiu3:
-; O32: sltu ${{[0-9]+}}
+; ALL-LABEL: sltiu3:
+
+; 32-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 32-CMOV-DAG: lui $[[R1:[0-9]+]], 65535
+; 32-CMOV-DAG: ori $[[R1]], $[[R1]], 32766
+; 32-CMOV-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; 32-CMOV-DAG: movn $[[I5]], $[[I3]], $[[R0]]
+
+; 32-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 32-CMP-DAG: addiu $[[I5:[0-9]+]], $zero, 5
+; 32-CMP-DAG: lui $[[IMM:[0-9]+]], 65535
+; 32-CMP-DAG: ori $[[IMM]], $[[IMM]], 32766
+; 32-CMP-DAG: sltu $[[R0:[0-9]+]], $[[I32767]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 32-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 32-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 32-CMP-DAG: or $2, $[[T0]], $[[T1]]
+
+; 64-CMOV-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMOV-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMOV-DAG: lui $[[R1:[0-9]+]], 65535
+; 64-CMOV-DAG: ori $[[R1]], $[[R1]], 32766
+; 64-CMOV-DAG: sltu $[[R0:[0-9]+]], $[[R1]], $4
+; 64-CMOV-DAG: movn $[[I5]], $[[I3]], $[[R0]]
+
+; 64-CMP-DAG: addiu $[[I3:[0-9]+]], $zero, 3
+; 64-CMP-DAG: addiu $[[I5:2]], $zero, 5
+; 64-CMP-DAG: lui $[[IMM:[0-9]+]], 65535
+; 64-CMP-DAG: ori $[[IMM]], $[[IMM]], 32766
+; 64-CMP-DAG: sltu $[[R0:[0-9]+]], $[[IMM]], $4
+; FIXME: We can do better than this by using selccz to choose between -0 and -2
+; 64-CMP-DAG: selnez $[[T0:[0-9]+]], $[[I3]], $[[R0]]
+; 64-CMP-DAG: seleqz $[[T1:[0-9]+]], $[[I5]], $[[R0]]
+; 64-CMP-DAG: or $2, $[[T0]], $[[T1]]
define i32 @sltiu3(i32 %a) {
entry:
%cmp = icmp ugt i32 %a, -32770
- %cond = select i1 %cmp, i32 3, i32 4
+ %cond = select i1 %cmp, i32 3, i32 5
ret i32 %cond
}
+
+; Check if
+; (select (setxx a, N), x, x-1) or
+; (select (setxx a, N), x-1, x)
+; doesn't generate conditional moves
+; for constant operands whose difference is |1|
+
+define i32 @slti4(i32 %a) nounwind readnone {
+ %1 = icmp slt i32 %a, 7
+ %2 = select i1 %1, i32 4, i32 3
+ ret i32 %2
+}
+
+; ALL-LABEL: slti4:
+
+; 32-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMOV-DAG: addiu $2, [[R1]], 3
+; 32-CMOV-NOT: movn
+
+; 32-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMP-DAG: addiu $2, [[R1]], 3
+; 32-CMP-NOT: seleqz
+; 32-CMP-NOT: selnez
+
+; 64-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMOV-DAG: addiu $2, [[R1]], 3
+; 64-CMOV-NOT: movn
+
+; 64-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMP-DAG: addiu $2, [[R1]], 3
+; 64-CMP-NOT: seleqz
+; 64-CMP-NOT: selnez
+
+define i32 @slti5(i32 %a) nounwind readnone {
+ %1 = icmp slt i32 %a, 7
+ %2 = select i1 %1, i32 -3, i32 -4
+ ret i32 %2
+}
+
+; ALL-LABEL: slti5:
+
+; 32-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMOV-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
+; 32-CMOV-NOT: movn
+
+; 32-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMP-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
+; 32-CMP-NOT: seleqz
+; 32-CMP-NOT: selnez
+
+; 64-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMOV-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
+; 64-CMOV-NOT: movn
+
+; 64-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMP-DAG: addiu [[R3:\$[0-9]+]], [[R2:\$[a-z0-9]+]], -4
+; 64-CMP-NOT: seleqz
+; 64-CMP-NOT: selnez
+
+define i32 @slti6(i32 %a) nounwind readnone {
+ %1 = icmp slt i32 %a, 7
+ %2 = select i1 %1, i32 3, i32 4
+ ret i32 %2
+}
+
+; ALL-LABEL: slti6:
+
+; 32-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMOV-DAG: xori [[R1]], [[R1]], 1
+; 32-CMOV-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
+; 32-CMOV-NOT: movn
+
+; 32-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 32-CMP-DAG: xori [[R1]], [[R1]], 1
+; 32-CMP-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
+; 32-CMP-NOT: seleqz
+; 32-CMP-NOT: selnez
+
+; 64-CMOV-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMOV-DAG: xori [[R1]], [[R1]], 1
+; 64-CMOV-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
+; 64-CMOV-NOT: movn
+
+; 64-CMP-DAG: slti [[R1:\$[0-9]+]], $4, 7
+; 64-CMP-DAG: xori [[R1]], [[R1]], 1
+; 64-CMP-DAG: addiu [[R2:\$[0-9]+]], [[R1]], 3
+; 64-CMP-NOT: seleqz
+; 64-CMP-NOT: selnez
diff --git a/test/CodeGen/Mips/const-mult.ll b/test/CodeGen/Mips/const-mult.ll
index 8c0cbe3396b7..186202141dcb 100644
--- a/test/CodeGen/Mips/const-mult.ll
+++ b/test/CodeGen/Mips/const-mult.ll
@@ -1,6 +1,5 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=CHECK
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=CHECK
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=CHECK64
+; RUN: llc -march=mips64el < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK64
; CHECK-LABEL: mul5_32:
; CHECK: sll $[[R0:[0-9]+]], $4, 2
diff --git a/test/CodeGen/Mips/const4a.ll b/test/CodeGen/Mips/const4a.ll
index 0332327cec69..b4c509fcd8c0 100644
--- a/test/CodeGen/Mips/const4a.ll
+++ b/test/CodeGen/Mips/const4a.ll
@@ -15,14 +15,14 @@ define void @t() #0 {
entry:
store i32 -559023410, i32* @i, align 4
%0 = load i32* @b, align 4
-; no-load-relax lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst
+; no-load-relax: lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst
%tobool = icmp ne i32 %0, 0
br i1 %tobool, label %if.then, label %if.else
; no-load-relax: beqz ${{[0-9]+}}, $BB0_3
; no-load-relax: lw ${{[0-9]+}}, %call16(foo)(${{[0-9]+}})
; no-load-relax: b $BB0_4
; no-load-relax: .align 2
-; no-load-relax: $CPI0_0:
+; no-load-relax: $CPI0_1:
; no-load-relax: .4byte 3735943886
; no-load-relax: $BB0_3:
; no-load-relax: lw ${{[0-9]+}}, %call16(goo)(${{[0-9]+}})
diff --git a/test/CodeGen/Mips/const6.ll b/test/CodeGen/Mips/const6.ll
index 20cdc09f7be1..3f02ab907e1e 100644
--- a/test/CodeGen/Mips/const6.ll
+++ b/test/CodeGen/Mips/const6.ll
@@ -27,7 +27,7 @@ entry:
; no-load-relax: jalrc ${{[0-9]+}}
; no-load-relax: b $BB0_2
; no-load-relax: .align 2
-; no-load-relax: $CPI0_0:
+; no-load-relax: $CPI0_1:
; no-load-relax: .4byte 3735943886
; no-load-relax: $BB0_2:
diff --git a/test/CodeGen/Mips/const6a.ll b/test/CodeGen/Mips/const6a.ll
index 8b402accc7de..d34239058734 100644
--- a/test/CodeGen/Mips/const6a.ll
+++ b/test/CodeGen/Mips/const6a.ll
@@ -19,7 +19,7 @@ entry:
; load-relax: $CPI0_0:
; load-relax: .4byte 3735943886
; load-relax: .end t
- call void asm sideeffect ".space 40000", ""() #1, !srcloc !1
+ call void asm sideeffect ".space 10000", ""() #1, !srcloc !1
ret void
}
diff --git a/test/CodeGen/Mips/countleading.ll b/test/CodeGen/Mips/countleading.ll
new file mode 100644
index 000000000000..6e63cff123cf
--- /dev/null
+++ b/test/CodeGen/Mips/countleading.ll
@@ -0,0 +1,90 @@
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32-R1-R2 -check-prefix=MIPS32-GT-R1 %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32-R1-R2 -check-prefix=MIPS32-GT-R1 %s
+; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32-R6 -check-prefix=MIPS32-GT-R1 %s
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS4 %s
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64-GT-R1 %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64-GT-R1 %s
+; R!N: llc -march=mips64el -mcpu=mips64r6 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64-GT-R1 %s
+
+; Prefixes:
+; ALL - All
+; MIPS32-GT-R1 - MIPS64r1 and above (does not include MIPS64's)
+; MIPS64-GT-R1 - MIPS64r1 and above
+
+define i32 @ctlz_i32(i32 %X) nounwind readnone {
+entry:
+; ALL-LABEL: ctlz_i32:
+
+; MIPS4-NOT: clz
+
+; MIPS32-GT-R1: clz $2, $4
+
+; MIPS64-GT-R1: clz $2, $4
+
+ %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X, i1 true)
+ ret i32 %tmp1
+}
+
+declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
+
+define i32 @ctlo_i32(i32 %X) nounwind readnone {
+entry:
+; ALL-LABEL: ctlo_i32:
+
+; MIPS4-NOT: clo
+
+; MIPS32-GT-R1: clo $2, $4
+
+; MIPS64-GT-R1: clo $2, $4
+
+ %neg = xor i32 %X, -1
+ %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg, i1 true)
+ ret i32 %tmp1
+}
+
+define i64 @ctlz_i64(i64 %X) nounwind readnone {
+entry:
+; ALL-LABEL: ctlz_i64:
+
+; MIPS4-NOT: dclz
+
+; MIPS32-GT-R1-DAG: clz $[[R0:[0-9]+]], $4
+; MIPS32-GT-R1-DAG: clz $[[R1:[0-9]+]], $5
+; MIPS32-GT-R1-DAG: addiu $[[R2:2+]], $[[R0]], 32
+; MIPS32-R1-R2-DAG: movn $[[R2]], $[[R1]], $5
+; MIPS32-R6-DAG: seleqz $[[R5:[0-9]+]], $[[R2]], $5
+; MIPS32-R6-DAG: selnez $[[R6:[0-9]+]], $[[R1]], $5
+; MIPS32-R6-DAG: or $2, $[[R6]], $[[R5]]
+; MIPS32-GT-R1-DAG: addiu $3, $zero, 0
+
+; MIPS64-GT-R1: dclz $2, $4
+
+ %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true)
+ ret i64 %tmp1
+}
+
+declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
+
+define i64 @ctlo_i64(i64 %X) nounwind readnone {
+entry:
+; ALL-LABEL: ctlo_i64:
+
+; MIPS4-NOT: dclo
+
+; MIPS32-GT-R1-DAG: clo $[[R0:[0-9]+]], $4
+; MIPS32-GT-R1-DAG: clo $[[R1:[0-9]+]], $5
+; MIPS32-GT-R1-DAG: addiu $[[R2:2+]], $[[R0]], 32
+; MIPS32-GT-R1-DAG: addiu $[[R3:[0-9]+]], $zero, -1
+; MIPS32-GT-R1-DAG: xor $[[R4:[0-9]+]], $5, $[[R3]]
+; MIPS32-R1-R2-DAG: movn $[[R2]], $[[R1]], $[[R4]]
+; MIPS32-R6-DAG: selnez $[[R5:[0-9]+]], $[[R1]], $[[R4]]
+; MIPS32-R6-DAG: seleqz $[[R6:[0-9]+]], $[[R2]], $[[R4]]
+; MIPS32-R6-DAG: or $2, $[[R5]], $[[R6]]
+; MIPS32-GT-R1-DAG: addiu $3, $zero, 0
+
+; MIPS64-GT-R1: dclo $2, $4
+
+ %neg = xor i64 %X, -1
+ %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true)
+ ret i64 %tmp1
+}
diff --git a/test/CodeGen/Mips/ctlz.ll b/test/CodeGen/Mips/ctlz.ll
index 2ddb72755ac8..1f871664a6cf 100644
--- a/test/CodeGen/Mips/ctlz.ll
+++ b/test/CodeGen/Mips/ctlz.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=static
@x = global i32 28912, align 4
@y = common global i32 0, align 4
diff --git a/test/CodeGen/Mips/divrem.ll b/test/CodeGen/Mips/divrem.ll
index b631c3b279f4..97f836044406 100644
--- a/test/CodeGen/Mips/divrem.ll
+++ b/test/CodeGen/Mips/divrem.ll
@@ -1,77 +1,223 @@
-; RUN: llc -march=mips -verify-machineinstrs < %s |\
-; RUN: FileCheck %s -check-prefix=TRAP
-; RUN: llc -march=mips -mno-check-zero-division < %s |\
-; RUN: FileCheck %s -check-prefix=NOCHECK
+; RUN: llc -march=mips -mcpu=mips32 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC32 -check-prefix=ACC32-TRAP
+; RUN: llc -march=mips -mcpu=mips32r2 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC32 -check-prefix=ACC32-TRAP
+; RUN: llc -march=mips -mcpu=mips32r6 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=GPR32-TRAP
+; RUN: llc -march=mips64 -mcpu=mips64 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC64 -check-prefix=ACC64-TRAP
+; RUN: llc -march=mips64 -mcpu=mips64r2 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC64 -check-prefix=ACC64-TRAP
+; RUN: llc -march=mips64 -mcpu=mips64r6 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=GPR64-TRAP
-; TRAP-LABEL: sdiv1:
-; TRAP: div $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; TRAP: teq $[[R0]], $zero, 7
-; TRAP: mflo
+; RUN: llc -march=mips -mcpu=mips32 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC32 -check-prefix=NOCHECK
+; RUN: llc -march=mips -mcpu=mips32r2 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC32 -check-prefix=NOCHECK
+; RUN: llc -march=mips -mcpu=mips32r6 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=NOCHECK
+; RUN: llc -march=mips64 -mcpu=mips64 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC64 -check-prefix=NOCHECK
+; RUN: llc -march=mips64 -mcpu=mips64r2 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC64 -check-prefix=NOCHECK
+; RUN: llc -march=mips64 -mcpu=mips64r6 -mno-check-zero-division < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=NOCHECK
-; NOCHECK-LABEL: sdiv1:
-; NOCHECK-NOT: teq
-; NOCHECK: .end sdiv1
+; FileCheck Prefixes:
+; ALL - All targets
+; ACC32 - Accumulator based multiply/divide on 32-bit targets
+; ACC64 - Same as ACC32 but only for 64-bit targets
+; GPR32 - GPR based multiply/divide on 32-bit targets
+; GPR64 - Same as GPR32 but only for 64-bit targets
+; ACC32-TRAP - Same as TRAP and ACC32 combined
+; ACC64-TRAP - Same as TRAP and ACC64 combined
+; GPR32-TRAP - Same as TRAP and GPR32 combined
+; GPR64-TRAP - Same as TRAP and GPR64 combined
+; NOCHECK - Division by zero will not be detected
@g0 = common global i32 0, align 4
@g1 = common global i32 0, align 4
define i32 @sdiv1(i32 %a0, i32 %a1) nounwind readnone {
entry:
+; ALL-LABEL: sdiv1:
+
+; ACC32: div $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+
+; ACC64: div $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR32: div $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: div $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC32: mflo $2
+; ACC64: mflo $2
+
+; ALL: .end sdiv1
+
%div = sdiv i32 %a0, %a1
ret i32 %div
}
-; TRAP-LABEL: srem1:
-; TRAP: div $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; TRAP: teq $[[R0]], $zero, 7
-; TRAP: mfhi
-
define i32 @srem1(i32 %a0, i32 %a1) nounwind readnone {
entry:
+; ALL-LABEL: srem1:
+
+; ACC32: div $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+
+; ACC64: div $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR32: mod $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: mod $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC32: mfhi $2
+; ACC64: mfhi $2
+
+; ALL: .end srem1
+
%rem = srem i32 %a0, %a1
ret i32 %rem
}
-; TRAP-LABEL: udiv1:
-; TRAP: divu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; TRAP: teq $[[R0]], $zero, 7
-; TRAP: mflo
-
define i32 @udiv1(i32 %a0, i32 %a1) nounwind readnone {
entry:
+; ALL-LABEL: udiv1:
+
+; ACC32: divu $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR32: divu $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: divu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC32: mflo $2
+; ACC64: mflo $2
+
+; ALL: .end udiv1
%div = udiv i32 %a0, %a1
ret i32 %div
}
-; TRAP-LABEL: urem1:
-; TRAP: divu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; TRAP: teq $[[R0]], $zero, 7
-; TRAP: mfhi
-
define i32 @urem1(i32 %a0, i32 %a1) nounwind readnone {
entry:
+; ALL-LABEL: urem1:
+
+; ACC32: divu $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR32: modu $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: modu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC32: mfhi $2
+; ACC64: mfhi $2
+
+; ALL: .end urem1
+
%rem = urem i32 %a0, %a1
ret i32 %rem
}
-; TRAP: div $zero,
define i32 @sdivrem1(i32 %a0, i32 %a1, i32* nocapture %r) nounwind {
entry:
+; ALL-LABEL: sdivrem1:
+
+; ACC32: div $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC32: mflo $2
+; ACC32: mfhi $[[R0:[0-9]+]]
+; ACC32: sw $[[R0]], 0(${{[0-9]+}})
+
+; ACC64: div $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC64: mflo $2
+; ACC64: mfhi $[[R0:[0-9]+]]
+; ACC64: sw $[[R0]], 0(${{[0-9]+}})
+
+; GPR32: mod $[[R0:[0-9]+]], $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR32: sw $[[R0]], 0(${{[0-9]+}})
+; GPR32-DAG: div $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+
+; GPR64: mod $[[R0:[0-9]+]], $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR64: sw $[[R0]], 0(${{[0-9]+}})
+; GPR64-DAG: div $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; ALL: .end sdivrem1
+
%rem = srem i32 %a0, %a1
store i32 %rem, i32* %r, align 4
%div = sdiv i32 %a0, %a1
ret i32 %div
}
-; TRAP: divu $zero,
define i32 @udivrem1(i32 %a0, i32 %a1, i32* nocapture %r) nounwind {
entry:
+; ALL-LABEL: udivrem1:
+
+; ACC32: divu $zero, $4, $5
+; ACC32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC32: mflo $2
+; ACC32: mfhi $[[R0:[0-9]+]]
+; ACC32: sw $[[R0]], 0(${{[0-9]+}})
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC64: mflo $2
+; ACC64: mfhi $[[R0:[0-9]+]]
+; ACC64: sw $[[R0]], 0(${{[0-9]+}})
+
+; GPR32: modu $[[R0:[0-9]+]], $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR32: sw $[[R0]], 0(${{[0-9]+}})
+; GPR32-DAG: divu $2, $4, $5
+; GPR32-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; GPR64: modu $[[R0:[0-9]+]], $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR64: sw $[[R0]], 0(${{[0-9]+}})
+; GPR64-DAG: divu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; ALL: .end udivrem1
+
%rem = urem i32 %a0, %a1
store i32 %rem, i32* %r, align 4
%div = udiv i32 %a0, %a1
ret i32 %div
}
+; FIXME: It's not clear what this is supposed to test.
define i32 @killFlags() {
entry:
%0 = load i32* @g0, align 4
@@ -79,3 +225,164 @@ entry:
%div = sdiv i32 %0, %1
ret i32 %div
}
+
+define i64 @sdiv2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; ALL-LABEL: sdiv2:
+
+; ACC32: lw $25, %call16(__divdi3)(
+; ACC32: jalr $25
+
+; ACC64: ddiv $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR64: ddiv $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC64: mflo $2
+
+; ALL: .end sdiv2
+
+ %div = sdiv i64 %a0, %a1
+ ret i64 %div
+}
+
+define i64 @srem2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; ALL-LABEL: srem2:
+
+; ACC32: lw $25, %call16(__moddi3)(
+; ACC32: jalr $25
+
+; ACC64: div $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR64: dmod $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC64: mfhi $2
+
+; ALL: .end srem2
+
+ %rem = srem i64 %a0, %a1
+ ret i64 %rem
+}
+
+define i64 @udiv2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; ALL-LABEL: udiv2:
+
+; ACC32: lw $25, %call16(__udivdi3)(
+; ACC32: jalr $25
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR64: ddivu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC64: mflo $2
+
+; ALL: .end udiv2
+ %div = udiv i64 %a0, %a1
+ ret i64 %div
+}
+
+define i64 @urem2(i64 %a0, i64 %a1) nounwind readnone {
+entry:
+; ALL-LABEL: urem2:
+
+; ACC32: lw $25, %call16(__umoddi3)(
+; ACC32: jalr $25
+
+; ACC64: divu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+
+; GPR64: dmodu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+
+; NOCHECK-NOT: teq
+
+; ACC64: mfhi $2
+
+; ALL: .end urem2
+
+ %rem = urem i64 %a0, %a1
+ ret i64 %rem
+}
+
+define i64 @sdivrem2(i64 %a0, i64 %a1, i64* nocapture %r) nounwind {
+entry:
+; ALL-LABEL: sdivrem2:
+
+; sdivrem2 is too complex to effectively check. We can at least check for the
+; calls though.
+; ACC32: lw $25, %call16(__moddi3)(
+; ACC32: jalr $25
+; ACC32: lw $25, %call16(__divdi3)(
+; ACC32: jalr $25
+
+; ACC64: ddiv $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC64: mflo $2
+; ACC64: mfhi $[[R0:[0-9]+]]
+; ACC64: sd $[[R0]], 0(${{[0-9]+}})
+
+; GPR64: dmod $[[R0:[0-9]+]], $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR64: sd $[[R0]], 0(${{[0-9]+}})
+
+; GPR64-DAG: ddiv $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; ALL: .end sdivrem2
+
+ %rem = srem i64 %a0, %a1
+ store i64 %rem, i64* %r, align 8
+ %div = sdiv i64 %a0, %a1
+ ret i64 %div
+}
+
+define i64 @udivrem2(i64 %a0, i64 %a1, i64* nocapture %r) nounwind {
+entry:
+; ALL-LABEL: udivrem2:
+
+; udivrem2 is too complex to effectively check. We can at least check for the
+; calls though.
+; ACC32: lw $25, %call16(__umoddi3)(
+; ACC32: jalr $25
+; ACC32: lw $25, %call16(__udivdi3)(
+; ACC32: jalr $25
+
+; ACC64: ddivu $zero, $4, $5
+; ACC64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; ACC64: mflo $2
+; ACC64: mfhi $[[R0:[0-9]+]]
+; ACC64: sd $[[R0]], 0(${{[0-9]+}})
+
+; GPR64: dmodu $[[R0:[0-9]+]], $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+; GPR64: sd $[[R0]], 0(${{[0-9]+}})
+
+; GPR64-DAG: ddivu $2, $4, $5
+; GPR64-TRAP: teq $5, $zero, 7
+; NOCHECK-NOT: teq
+
+; ALL: .end udivrem2
+
+ %rem = urem i64 %a0, %a1
+ store i64 %rem, i64* %r, align 8
+ %div = udiv i64 %a0, %a1
+ ret i64 %div
+}
diff --git a/test/CodeGen/Mips/dsp-r1.ll b/test/CodeGen/Mips/dsp-r1.ll
index acdd17d1afd4..fbd970399640 100644
--- a/test/CodeGen/Mips/dsp-r1.ll
+++ b/test/CodeGen/Mips/dsp-r1.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mattr=+dsp < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32 -mattr=+dsp < %s | FileCheck %s
define i32 @test__builtin_mips_extr_w1(i32 %i0, i32, i64 %a0) nounwind {
entry:
diff --git a/test/CodeGen/Mips/eh-dwarf-cfa.ll b/test/CodeGen/Mips/eh-dwarf-cfa.ll
index 3a21332b5c5a..6554974bf849 100644
--- a/test/CodeGen/Mips/eh-dwarf-cfa.ll
+++ b/test/CodeGen/Mips/eh-dwarf-cfa.ll
@@ -1,4 +1,6 @@
; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | \
+; RUN: FileCheck %s -check-prefix=CHECK-MIPS64
; RUN: llc -march=mips64el -mcpu=mips64 < %s | \
; RUN: FileCheck %s -check-prefix=CHECK-MIPS64
diff --git a/test/CodeGen/Mips/eh-return32.ll b/test/CodeGen/Mips/eh-return32.ll
index c3003b34b162..748050c4d34b 100644
--- a/test/CodeGen/Mips/eh-return32.ll
+++ b/test/CodeGen/Mips/eh-return32.ll
@@ -1,4 +1,6 @@
-; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mipsel -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mipsel -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=R6
declare void @llvm.eh.return.i32(i32, i8*)
declare void @foo(...)
@@ -9,7 +11,7 @@ entry:
call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
unreachable
-; CHECK: f1
+; CHECK: f1:
; CHECK: addiu $sp, $sp, -[[spoffset:[0-9]+]]
; check that $a0-$a3 are saved on stack.
@@ -41,7 +43,8 @@ entry:
; CHECK: addiu $sp, $sp, [[spoffset]]
; CHECK: move $25, $2
; CHECK: move $ra, $2
-; CHECK: jr $ra
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
; CHECK: addu $sp, $sp, $3
}
@@ -50,7 +53,7 @@ entry:
call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
unreachable
-; CHECK: f2
+; CHECK: f2:
; CHECK: addiu $sp, $sp, -[[spoffset:[0-9]+]]
; check that $a0-$a3 are saved on stack.
@@ -80,6 +83,7 @@ entry:
; CHECK: addiu $sp, $sp, [[spoffset]]
; CHECK: move $25, $2
; CHECK: move $ra, $2
-; CHECK: jr $ra
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
; CHECK: addu $sp, $sp, $3
}
diff --git a/test/CodeGen/Mips/eh-return64.ll b/test/CodeGen/Mips/eh-return64.ll
index 32fc5e61899a..74a43231598c 100644
--- a/test/CodeGen/Mips/eh-return64.ll
+++ b/test/CodeGen/Mips/eh-return64.ll
@@ -1,4 +1,7 @@
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mips64el -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mips64el -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=NOT-R6
+; RUN: llc -march=mips64el -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=CHECK -check-prefix=R6
declare void @llvm.eh.return.i64(i64, i8*)
declare void @foo(...)
@@ -9,7 +12,7 @@ entry:
call void @llvm.eh.return.i64(i64 %offset, i8* %handler)
unreachable
-; CHECK: f1
+; CHECK: f1:
; CHECK: daddiu $sp, $sp, -[[spoffset:[0-9]+]]
; check that $a0-$a3 are saved on stack.
@@ -41,9 +44,9 @@ entry:
; CHECK: daddiu $sp, $sp, [[spoffset]]
; CHECK: move $25, $2
; CHECK: move $ra, $2
-; CHECK: jr $ra
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
; CHECK: daddu $sp, $sp, $3
-
}
define void @f2(i64 %offset, i8* %handler) {
@@ -51,7 +54,7 @@ entry:
call void @llvm.eh.return.i64(i64 %offset, i8* %handler)
unreachable
-; CHECK: f2
+; CHECK: f2:
; CHECK: .cfi_startproc
; CHECK: daddiu $sp, $sp, -[[spoffset:[0-9]+]]
; CHECK: .cfi_def_cfa_offset [[spoffset]]
@@ -83,7 +86,8 @@ entry:
; CHECK: daddiu $sp, $sp, [[spoffset]]
; CHECK: move $25, $2
; CHECK: move $ra, $2
-; CHECK: jr $ra
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
; CHECK: daddu $sp, $sp, $3
; CHECK: .cfi_endproc
}
diff --git a/test/CodeGen/Mips/ehframe-indirect.ll b/test/CodeGen/Mips/ehframe-indirect.ll
new file mode 100644
index 000000000000..e78497a9521e
--- /dev/null
+++ b/test/CodeGen/Mips/ehframe-indirect.ll
@@ -0,0 +1,34 @@
+; RUN: llc -mtriple=mipsel-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-android < %s | FileCheck %s
+
+define i32 @main() {
+; CHECK: .cfi_startproc
+; CHECK: .cfi_personality 128, DW.ref.__gxx_personality_v0
+
+entry:
+ invoke void @foo() to label %cont unwind label %lpad
+; CHECK: foo
+; CHECK: jalr
+
+lpad:
+ %0 = landingpad { i8*, i32 } personality i8*
+ bitcast (i32 (...)* @__gxx_personality_v0 to i8*) catch i8* null
+ ret i32 0
+
+cont:
+ ret i32 0
+}
+; CHECK: .cfi_endproc
+
+declare i32 @__gxx_personality_v0(...)
+
+declare void @foo()
+
+; CHECK: .hidden DW.ref.__gxx_personality_v0
+; CHECK: .weak DW.ref.__gxx_personality_v0
+; CHECK: .section .data.DW.ref.__gxx_personality_v0,"aGw",@progbits,DW.ref.__gxx_personality_v0,comdat
+; CHECK: .align 2
+; CHECK: .type DW.ref.__gxx_personality_v0,@object
+; CHECK: .size DW.ref.__gxx_personality_v0, 4
+; CHECK: DW.ref.__gxx_personality_v0:
+; CHECK: .4byte __gxx_personality_v0
diff --git a/test/CodeGen/Mips/elf_eflags.ll b/test/CodeGen/Mips/elf_eflags.ll
new file mode 100644
index 000000000000..00d8584fdad2
--- /dev/null
+++ b/test/CodeGen/Mips/elf_eflags.ll
@@ -0,0 +1,86 @@
+; This tests ELF EFLAGS setting with direct object.
+; When the assembler is ready a .s file for it will
+; be created.
+
+; Non-shared (static) is the absence of pic and or cpic.
+
+; EF_MIPS_NOREORDER (0x00000001) is always on by default currently
+; EF_MIPS_PIC (0x00000002)
+; EF_MIPS_CPIC (0x00000004) - See note below
+; EF_MIPS_ABI2 (0x00000020) - n32 not tested yet
+; EF_MIPS_ARCH_32 (0x50000000)
+; EF_MIPS_ARCH_64 (0x60000000)
+; EF_MIPS_ARCH_32R2 (0x70000000)
+; EF_MIPS_ARCH_64R2 (0x80000000)
+
+; Note that EF_MIPS_CPIC is set by -mabicalls which is the default on Linux
+; TODO need to support -mno-abicalls
+
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE32 %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | FileCheck -check-prefix=CHECK-LE32_PIC %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE32R2 %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 %s -o - | FileCheck -check-prefix=CHECK-LE32R2_PIC %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+micromips -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE32R2-MICROMIPS %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+micromips %s -o - | FileCheck -check-prefix=CHECK-LE32R2-MICROMIPS_PIC %s
+
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips4 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE64 %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips4 %s -o - | FileCheck -check-prefix=CHECK-LE64_PIC %s
+
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE64 %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64 %s -o - | FileCheck -check-prefix=CHECK-LE64_PIC %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64r2 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-LE64R2 %s
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64r2 %s -o - | FileCheck -check-prefix=CHECK-LE64R2_PIC %s
+
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+mips16 -relocation-model=pic %s -o - | FileCheck -check-prefix=CHECK-LE32R2-MIPS16 %s
+
+; 32(R1) bit with NO_REORDER and static
+; CHECK-LE32: .abicalls
+; CHECK-LE32: .option pic0
+; CHECK-LE32: .set noreorder
+;
+; 32(R1) bit with NO_REORDER and PIC
+; CHECK-LE32_PIC: .abicalls
+; CHECK-LE32_PIC: .set noreorder
+;
+; 32R2 bit with NO_REORDER and static
+; CHECK-LE32R2: .abicalls
+; CHECK-LE32R2: .option pic0
+; CHECK-LE32R2: .set noreorder
+;
+; 32R2 bit with NO_REORDER and PIC
+; CHECK-LE32R2_PIC: .abicalls
+; CHECK-LE32R2_PIC: .set noreorder
+;
+; 32R2 bit MICROMIPS with NO_REORDER and static
+; CHECK-LE32R2-MICROMIPS: .abicalls
+; CHECK-LE32R2-MICROMIPS: .option pic0
+; CHECK-LE32R2-MICROMIPS: .set micromips
+;
+; 32R2 bit MICROMIPS with NO_REORDER and PIC
+; CHECK-LE32R2-MICROMIPS_PIC: .abicalls
+; CHECK-LE32R2-MICROMIPS_PIC: .set micromips
+;
+; 64(R1) bit with NO_REORDER and static
+; CHECK-LE64: .abicalls
+; CHECK-LE64: .set noreorder
+;
+; 64(R1) bit with NO_REORDER and PIC
+; CHECK-LE64_PIC: .abicalls
+; CHECK-LE64_PIC: .set noreorder
+;
+; 64R2 bit with NO_REORDER and static
+; CHECK-LE64R2: .abicalls
+; CHECK-LE64R2: .set noreorder
+;
+; 64R2 bit with NO_REORDER and PIC
+; CHECK-LE64R2_PIC: .abicalls
+; CHECK-LE64R2_PIC: .set noreorder
+;
+; 32R2 bit MIPS16 with PIC
+; CHECK-LE32R2-MIPS16: .abicalls
+; CHECK-LE32R2-MIPS16: .set mips16
+
+define i32 @main() nounwind {
+entry:
+ ret i32 0
+}
diff --git a/test/CodeGen/Mips/ex2.ll b/test/CodeGen/Mips/ex2.ll
index c5535e7661a7..6d024c209c26 100644
--- a/test/CodeGen/Mips/ex2.ll
+++ b/test/CodeGen/Mips/ex2.ll
@@ -6,12 +6,11 @@
define i32 @main() {
; 16-LABEL: main:
; 16: .cfi_startproc
-; 16: save $ra, $s0, $s1, $s2, 40
-; 16: .cfi_def_cfa_offset 40
-; 16: .cfi_offset 18, -8
-; 16: .cfi_offset 17, -12
-; 16: .cfi_offset 16, -16
+; 16: save $16, $17, $ra, 32 # 16 bit inst
+; 16: .cfi_def_cfa_offset 32
; 16: .cfi_offset 31, -4
+; 16: .cfi_offset 17, -8
+; 16: .cfi_offset 16, -12
; 16: .cfi_endproc
entry:
%retval = alloca i32, align 4
diff --git a/test/CodeGen/Mips/f16abs.ll b/test/CodeGen/Mips/f16abs.ll
index 928914f067dd..0fba9c4fd08a 100644
--- a/test/CodeGen/Mips/f16abs.ll
+++ b/test/CodeGen/Mips/f16abs.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=static
@y = global double -1.450000e+00, align 8
@x = common global double 0.000000e+00, align 8
diff --git a/test/CodeGen/Mips/fabs.ll b/test/CodeGen/Mips/fabs.ll
index 49d8a7201e8b..ce1a9a60e7c2 100644
--- a/test/CodeGen/Mips/fabs.ll
+++ b/test/CodeGen/Mips/fabs.ll
@@ -1,21 +1,23 @@
-; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32 | FileCheck %s -check-prefix=32
-; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32r2 | FileCheck %s -check-prefix=32R2
-; RUN: llc < %s -mtriple=mips64el-linux-gnu -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=64
-; RUN: llc < %s -mtriple=mips64el-linux-gnu -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=64R2
-; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32 -enable-no-nans-fp-math | FileCheck %s -check-prefix=NO-NAN
+; Check that abs.[ds] is selected and does not depend on -enable-no-nans-fp-math
+; They obey the Has2008 and ABS2008 configuration bits which govern the
+; conformance to IEEE 754 (1985) and IEEE 754 (2008). When these bits are not
+; present, they confirm to 1985.
+; In 1985 mode, abs.[ds] are arithmetic (i.e. they raise invalid operation
+; exceptions when given NaN's). In 2008 mode, they are non-arithmetic (i.e.
+; they are copies and don't raise any exceptions).
-define float @foo0(float %a) nounwind readnone {
-entry:
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32 | FileCheck %s
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32r2 | FileCheck %s
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32 -enable-no-nans-fp-math | FileCheck %s
-; 32: lui $[[T0:[0-9]+]], 32767
-; 32: ori $[[MSK0:[0-9]+]], $[[T0]], 65535
-; 32: and $[[AND:[0-9]+]], ${{[0-9]+}}, $[[MSK0]]
-; 32: mtc1 $[[AND]], $f0
+; RUN: llc < %s -mtriple=mips64el-linux-gnu -mcpu=mips64 | FileCheck %s
+; RUN: llc < %s -mtriple=mips64el-linux-gnu -mcpu=mips64 -enable-no-nans-fp-math | FileCheck %s
-; 32R2: ins $[[INS:[0-9]+]], $zero, 31, 1
-; 32R2: mtc1 $[[INS]], $f0
+define float @foo0(float %a) nounwind readnone {
+entry:
-; NO-NAN: abs.s
+; CHECK-LABEL: foo0
+; CHECK: abs.s
%call = tail call float @fabsf(float %a) nounwind readnone
ret float %call
@@ -26,24 +28,8 @@ declare float @fabsf(float) nounwind readnone
define double @foo1(double %a) nounwind readnone {
entry:
-; 32: lui $[[T0:[0-9]+]], 32767
-; 32: ori $[[MSK0:[0-9]+]], $[[T0]], 65535
-; 32: and $[[AND:[0-9]+]], ${{[0-9]+}}, $[[MSK0]]
-; 32: mtc1 $[[AND]], $f1
-
-; 32R2: ins $[[INS:[0-9]+]], $zero, 31, 1
-; 32R2: mtc1 $[[INS]], $f1
-
-; 64: daddiu $[[T0:[0-9]+]], $zero, 1
-; 64: dsll $[[T1:[0-9]+]], ${{[0-9]+}}, 63
-; 64: daddiu $[[MSK0:[0-9]+]], $[[T1]], -1
-; 64: and $[[AND:[0-9]+]], ${{[0-9]+}}, $[[MSK0]]
-; 64: dmtc1 $[[AND]], $f0
-
-; 64R2: dins $[[INS:[0-9]+]], $zero, 63, 1
-; 64R2: dmtc1 $[[INS]], $f0
-
-; NO-NAN: abs.d
+; CHECK-LABEL: foo1:
+; CHECK: abs.d
%call = tail call double @fabs(double %a) nounwind readnone
ret double %call
diff --git a/test/CodeGen/Mips/fastcc.ll b/test/CodeGen/Mips/fastcc.ll
index 82919e7139bd..822902c27d2f 100644
--- a/test/CodeGen/Mips/fastcc.ll
+++ b/test/CodeGen/Mips/fastcc.ll
@@ -1,4 +1,8 @@
; RUN: llc < %s -march=mipsel | FileCheck %s
+; RUN: llc < %s -mtriple=mipsel-none-nacl-gnu \
+; RUN: | FileCheck %s -check-prefix=CHECK-NACL
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -mattr=+nooddspreg | FileCheck %s -check-prefix=NOODDSPREG
+
@gi0 = external global i32
@gi1 = external global i32
@@ -77,6 +81,8 @@
@g15 = external global i32
@g16 = external global i32
+@fa = common global [11 x float] zeroinitializer, align 4
+
define void @caller0() nounwind {
entry:
; CHECK: caller0
@@ -95,6 +101,11 @@ entry:
; CHECK: lw $5
; CHECK: lw $4
+; t6, t7 and t8 are reserved in NaCl and cannot be used for fastcc.
+; CHECK-NACL-NOT: lw $14
+; CHECK-NACL-NOT: lw $15
+; CHECK-NACL-NOT: lw $24
+
%0 = load i32* @gi0, align 4
%1 = load i32* @gi1, align 4
%2 = load i32* @gi2, align 4
@@ -134,6 +145,11 @@ entry:
; CHECK: sw $24
; CHECK: sw $3
+; t6, t7 and t8 are reserved in NaCl and cannot be used for fastcc.
+; CHECK-NACL-NOT: sw $14
+; CHECK-NACL-NOT: sw $15
+; CHECK-NACL-NOT: sw $24
+
store i32 %a0, i32* @g0, align 4
store i32 %a1, i32* @g1, align 4
store i32 %a2, i32* @g2, align 4
@@ -251,3 +267,84 @@ entry:
ret void
}
+define void @caller2() {
+entry:
+
+; NOODDSPREG-LABEL: caller2
+
+; Check that first 10 arguments are passed in even float registers
+; f0, f2, ... , f18. Check that 11th argument is passed on stack.
+
+; NOODDSPREG-DAG: lw $[[R0:[0-9]+]], %got(fa)(${{[0-9]+|gp}})
+; NOODDSPREG-DAG: lwc1 $f0, 0($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f2, 4($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f4, 8($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f6, 12($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f8, 16($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f10, 20($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f12, 24($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f14, 28($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f16, 32($[[R0]])
+; NOODDSPREG-DAG: lwc1 $f18, 36($[[R0]])
+
+; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 40($[[R0]])
+; NOODDSPREG-DAG: swc1 $[[F0]], 0($sp)
+
+ %0 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 0), align 4
+ %1 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 1), align 4
+ %2 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 2), align 4
+ %3 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 3), align 4
+ %4 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 4), align 4
+ %5 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 5), align 4
+ %6 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 6), align 4
+ %7 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 7), align 4
+ %8 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 8), align 4
+ %9 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 9), align 4
+ %10 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 10), align 4
+ tail call fastcc void @callee2(float %0, float %1, float %2, float %3,
+ float %4, float %5, float %6, float %7,
+ float %8, float %9, float %10)
+ ret void
+}
+
+define fastcc void @callee2(float %a0, float %a1, float %a2, float %a3,
+ float %a4, float %a5, float %a6, float %a7,
+ float %a8, float %a9, float %a10) {
+entry:
+
+; NOODDSPREG-LABEL: callee2
+
+; NOODDSPREG: addiu $sp, $sp, -[[OFFSET:[0-9]+]]
+
+; Check that first 10 arguments are received in even float registers
+; f0, f2, ... , f18. Check that 11th argument is received on stack.
+
+; NOODDSPREG-DAG: lw $[[R0:[0-9]+]], %got(fa)(${{[0-9]+|gp}})
+; NOODDSPREG-DAG: swc1 $f0, 0($[[R0]])
+; NOODDSPREG-DAG: swc1 $f2, 4($[[R0]])
+; NOODDSPREG-DAG: swc1 $f4, 8($[[R0]])
+; NOODDSPREG-DAG: swc1 $f6, 12($[[R0]])
+; NOODDSPREG-DAG: swc1 $f8, 16($[[R0]])
+; NOODDSPREG-DAG: swc1 $f10, 20($[[R0]])
+; NOODDSPREG-DAG: swc1 $f12, 24($[[R0]])
+; NOODDSPREG-DAG: swc1 $f14, 28($[[R0]])
+; NOODDSPREG-DAG: swc1 $f16, 32($[[R0]])
+; NOODDSPREG-DAG: swc1 $f18, 36($[[R0]])
+
+; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], [[OFFSET]]($sp)
+; NOODDSPREG-DAG: swc1 $[[F0]], 40($[[R0]])
+
+ store float %a0, float* getelementptr ([11 x float]* @fa, i32 0, i32 0), align 4
+ store float %a1, float* getelementptr ([11 x float]* @fa, i32 0, i32 1), align 4
+ store float %a2, float* getelementptr ([11 x float]* @fa, i32 0, i32 2), align 4
+ store float %a3, float* getelementptr ([11 x float]* @fa, i32 0, i32 3), align 4
+ store float %a4, float* getelementptr ([11 x float]* @fa, i32 0, i32 4), align 4
+ store float %a5, float* getelementptr ([11 x float]* @fa, i32 0, i32 5), align 4
+ store float %a6, float* getelementptr ([11 x float]* @fa, i32 0, i32 6), align 4
+ store float %a7, float* getelementptr ([11 x float]* @fa, i32 0, i32 7), align 4
+ store float %a8, float* getelementptr ([11 x float]* @fa, i32 0, i32 8), align 4
+ store float %a9, float* getelementptr ([11 x float]* @fa, i32 0, i32 9), align 4
+ store float %a10, float* getelementptr ([11 x float]* @fa, i32 0, i32 10), align 4
+ ret void
+}
+
diff --git a/test/CodeGen/Mips/fcmp.ll b/test/CodeGen/Mips/fcmp.ll
new file mode 100644
index 000000000000..b7759831c5a2
--- /dev/null
+++ b/test/CodeGen/Mips/fcmp.ll
@@ -0,0 +1,783 @@
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32-C
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32-C
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32-CMP
+; RUN: llc < %s -march=mips64el -mcpu=mips4 | FileCheck %s -check-prefix=ALL -check-prefix=64-C
+; RUN: llc < %s -march=mips64el -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=64-C
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=64-C
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMP
+
+define i32 @false_f32(float %a, float %b) nounwind {
+; ALL-LABEL: false_f32:
+; ALL: addiu $2, $zero, 0
+
+ %1 = fcmp false float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @oeq_f32(float %a, float %b) nounwind {
+; ALL-LABEL: oeq_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.eq.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.eq.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp oeq float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ogt_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ogt_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ule.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ule.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ogt float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @oge_f32(float %a, float %b) nounwind {
+; ALL-LABEL: oge_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ult.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ult.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp oge float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @olt_f32(float %a, float %b) nounwind {
+; ALL-LABEL: olt_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.olt.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.olt.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp olt float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ole_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ole_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ole.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ole.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ole float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @one_f32(float %a, float %b) nounwind {
+; ALL-LABEL: one_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ueq.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ueq.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp one float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ord_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ord_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.un.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.un.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp ord float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ueq_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ueq_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ueq.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ueq.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ueq float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ugt_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ugt_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ole.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ole.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ugt float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @uge_f32(float %a, float %b) nounwind {
+; ALL-LABEL: uge_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.olt.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.olt.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp uge float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ult_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ult_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ult.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ult.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ult float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ule_f32(float %a, float %b) nounwind {
+; ALL-LABEL: ule_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ule.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ule.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ule float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @une_f32(float %a, float %b) nounwind {
+; ALL-LABEL: une_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.eq.s $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.eq.s $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp une float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @uno_f32(float %a, float %b) nounwind {
+; ALL-LABEL: uno_f32:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.un.s $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.un.s $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp uno float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @true_f32(float %a, float %b) nounwind {
+; ALL-LABEL: true_f32:
+; ALL: addiu $2, $zero, 1
+
+ %1 = fcmp true float %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @false_f64(double %a, double %b) nounwind {
+; ALL-LABEL: false_f64:
+; ALL: addiu $2, $zero, 0
+
+ %1 = fcmp false double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @oeq_f64(double %a, double %b) nounwind {
+; ALL-LABEL: oeq_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.eq.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.eq.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp oeq double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ogt_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ogt_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ule.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ule.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ogt double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @oge_f64(double %a, double %b) nounwind {
+; ALL-LABEL: oge_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ult.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ult.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp oge double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @olt_f64(double %a, double %b) nounwind {
+; ALL-LABEL: olt_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.olt.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.olt.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp olt double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ole_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ole_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ole.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ole.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ole double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @one_f64(double %a, double %b) nounwind {
+; ALL-LABEL: one_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ueq.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ueq.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp one double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ord_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ord_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.un.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.un.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp ord double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ueq_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ueq_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ueq.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ueq.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ueq double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ugt_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ugt_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ole.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ole.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ugt double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @uge_f64(double %a, double %b) nounwind {
+; ALL-LABEL: uge_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.olt.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.olt.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp uge double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ult_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ult_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ult.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ult.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ult double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @ule_f64(double %a, double %b) nounwind {
+; ALL-LABEL: ule_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.ule.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.ule.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp ule double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @une_f64(double %a, double %b) nounwind {
+; ALL-LABEL: une_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.eq.d $f12, $f14
+; 32-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.eq.d $f12, $f13
+; 64-C-DAG: movf $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 32-CMP-DAG: andi $2, $[[T2]], 1
+
+; 64-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: not $[[T2:[0-9]+]], $[[T1]]
+; 64-CMP-DAG: andi $2, $[[T2]], 1
+
+ %1 = fcmp une double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @uno_f64(double %a, double %b) nounwind {
+; ALL-LABEL: uno_f64:
+
+; 32-C-DAG: addiu $[[T0:2]], $zero, 0
+; 32-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 32-C-DAG: c.un.d $f12, $f14
+; 32-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 64-C-DAG: addiu $[[T0:2]], $zero, 0
+; 64-C-DAG: addiu $[[T1:[0-9]+]], $zero, 1
+; 64-C-DAG: c.un.d $f12, $f13
+; 64-C-DAG: movt $[[T0]], $1, $fcc0
+
+; 32-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14
+; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 32-CMP-DAG: andi $2, $[[T1]], 1
+
+; 64-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f13
+; 64-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]]
+; 64-CMP-DAG: andi $2, $[[T1]], 1
+
+ %1 = fcmp uno double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @true_f64(double %a, double %b) nounwind {
+; ALL-LABEL: true_f64:
+; ALL: addiu $2, $zero, 1
+
+ %1 = fcmp true double %a, %b
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
diff --git a/test/CodeGen/Mips/fcopysign-f32-f64.ll b/test/CodeGen/Mips/fcopysign-f32-f64.ll
index 9f88d0c956b1..148a780fb930 100644
--- a/test/CodeGen/Mips/fcopysign-f32-f64.ll
+++ b/test/CodeGen/Mips/fcopysign-f32-f64.ll
@@ -1,3 +1,4 @@
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s -check-prefix=64
; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=64
; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=64R2
diff --git a/test/CodeGen/Mips/fcopysign.ll b/test/CodeGen/Mips/fcopysign.ll
index 1c57eca3c9ec..3a9d9c73b279 100644
--- a/test/CodeGen/Mips/fcopysign.ll
+++ b/test/CodeGen/Mips/fcopysign.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=32
; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=32R2
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s -check-prefix=64
; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=64
; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=64R2
@@ -16,7 +17,7 @@ entry:
; 32R2: ext $[[EXT:[0-9]+]], ${{[0-9]+}}, 31, 1
; 32R2: ins $[[INS:[0-9]+]], $[[EXT]], 31, 1
-; 32R2: mtc1 $[[INS]], $f1
+; 32R2: mthc1 $[[INS]], $f0
; 64: daddiu $[[T0:[0-9]+]], $zero, 1
; 64: dsll $[[MSK1:[0-9]+]], $[[T0]], 63
diff --git a/test/CodeGen/Mips/fixdfsf.ll b/test/CodeGen/Mips/fixdfsf.ll
index b08eefd71235..4271ac222edb 100644
--- a/test/CodeGen/Mips/fixdfsf.ll
+++ b/test/CodeGen/Mips/fixdfsf.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic1
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic2
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=pic1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=pic2
@x = common global double 0.000000e+00, align 8
@y = common global i32 0, align 4
diff --git a/test/CodeGen/Mips/fmadd1.ll b/test/CodeGen/Mips/fmadd1.ll
index 435b419368b3..271631efb40a 100644
--- a/test/CodeGen/Mips/fmadd1.ll
+++ b/test/CodeGen/Mips/fmadd1.ll
@@ -1,11 +1,58 @@
-; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -enable-no-nans-fp-math | FileCheck %s -check-prefix=32R2
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=64R2
-; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=32R2NAN
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=64R2NAN
+; Check that madd.[ds], msub.[ds], nmadd.[ds], and nmsub.[ds] are supported
+; correctly.
+; The spec for nmadd.[ds], and nmsub.[ds] does not state that they obey the
+; the Has2008 and ABS2008 configuration bits which govern the conformance to
+; IEEE 754 (1985) and IEEE 754 (2008). These instructions are therefore only
+; available when -enable-no-nans-fp-math is given.
+
+; RUN: llc < %s -march=mipsel -mcpu=mips32 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32 -check-prefix=32-NONAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32R2 -check-prefix=32R2-NONAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32R6 -check-prefix=32R6-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64 -check-prefix=64-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64R2 -check-prefix=64R2-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64R6 -check-prefix=64R6-NONAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32 -check-prefix=32-NAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32R2 -check-prefix=32R2-NAN
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32R6 -check-prefix=32R6-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64 -check-prefix=64-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64R2 -check-prefix=64R2-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64R6 -check-prefix=64R6-NAN
define float @FOO0float(float %a, float %b, float %c) nounwind readnone {
entry:
-; CHECK: madd.s
+; ALL-LABEL: FOO0float:
+
+; 32-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 32R2: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2: madd.s $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2: add.s $f0, $[[T1]], $[[T2]]
+
+; 32R6-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 64R2: madd.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2: add.s $f0, $[[T0]], $[[T1]]
+
+; 64R6-DAG: mul.s $[[T0:f[0-9]+]], $f12, $f13
+; 64R6-DAG: add.s $[[T1:f[0-9]+]], $[[T0]], $f14
+; 64R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: add.s $f0, $[[T1]], $[[T2]]
+
%mul = fmul float %a, %b
%add = fadd float %mul, %c
%add1 = fadd float %add, 0.000000e+00
@@ -14,7 +61,39 @@ entry:
define float @FOO1float(float %a, float %b, float %c) nounwind readnone {
entry:
-; CHECK: msub.s
+; ALL-LABEL: FOO1float:
+
+; 32-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 32R2: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2: msub.s $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2: add.s $f0, $[[T1]], $[[T2]]
+
+; 32R6-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: add.s $f0, $[[T1]], $[[T2]]
+
+; 64R2: msub.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2: add.s $f0, $[[T0]], $[[T1]]
+
+; 64R6-DAG: mul.s $[[T0:f[0-9]+]], $f12, $f13
+; 64R6-DAG: sub.s $[[T1:f[0-9]+]], $[[T0]], $f14
+; 64R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: add.s $f0, $[[T1]], $[[T2]]
+
%mul = fmul float %a, %b
%sub = fsub float %mul, %c
%add = fadd float %sub, 0.000000e+00
@@ -23,10 +102,44 @@ entry:
define float @FOO2float(float %a, float %b, float %c) nounwind readnone {
entry:
-; 32R2: nmadd.s
-; 64R2: nmadd.s
-; 32R2NAN: madd.s
-; 64R2NAN: madd.s
+; ALL-LABEL: FOO2float:
+
+; 32-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 32R2-NONAN: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2-NONAN: nmadd.s $f0, $[[T0]], $f12, $f14
+
+; 32R2-NAN: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2-NAN: madd.s $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2-NAN: sub.s $f0, $[[T2]], $[[T1]]
+
+; 32R6-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 64R2-NONAN: nmadd.s $f0, $f14, $f12, $f13
+
+; 64R2-NAN: madd.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2-NAN: sub.s $f0, $[[T1]], $[[T0]]
+
+; 64R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
%mul = fmul float %a, %b
%add = fadd float %mul, %c
%sub = fsub float 0.000000e+00, %add
@@ -35,10 +148,36 @@ entry:
define float @FOO3float(float %a, float %b, float %c) nounwind readnone {
entry:
-; 32R2: nmsub.s
-; 64R2: nmsub.s
-; 32R2NAN: msub.s
-; 64R2NAN: msub.s
+; ALL-LABEL: FOO3float:
+
+; 32-DAG: mtc1 $6, $[[T0:f[0-9]+]]
+; 32-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 32R2-NONAN: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2-NONAN: nmsub.s $f0, $[[T0]], $f12, $f14
+
+; 32R2-NAN: mtc1 $6, $[[T0:f[0-9]+]]
+; 32R2-NAN: msub.s $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2-NAN: sub.s $f0, $[[T2]], $[[T1]]
+
+; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
+; 64R2-NAN: msub.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2-NAN: sub.s $f0, $[[T1]], $[[T0]]
+
+; 64R6-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: sub.s $f0, $[[T2]], $[[T1]]
+
%mul = fmul float %a, %b
%sub = fsub float %mul, %c
%sub1 = fsub float 0.000000e+00, %sub
@@ -47,7 +186,40 @@ entry:
define double @FOO10double(double %a, double %b, double %c) nounwind readnone {
entry:
-; CHECK: madd.d
+; ALL-LABEL: FOO10double:
+
+; 32-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 32R2: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2: madd.d $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2: mthc1 $zero, $[[T2]]
+; 32R2: add.d $f0, $[[T1]], $[[T2]]
+
+; 32R6-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 64R2: madd.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2: add.d $f0, $[[T0]], $[[T1]]
+
+; 64R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: add.d $f0, $[[T1]], $[[T2]]
+
%mul = fmul double %a, %b
%add = fadd double %mul, %c
%add1 = fadd double %add, 0.000000e+00
@@ -56,7 +228,40 @@ entry:
define double @FOO11double(double %a, double %b, double %c) nounwind readnone {
entry:
-; CHECK: msub.d
+; ALL-LABEL: FOO11double:
+
+; 32-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 32R2: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2: msub.d $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2: mthc1 $zero, $[[T2]]
+; 32R2: add.d $f0, $[[T1]], $[[T2]]
+
+; 32R6-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: add.d $f0, $[[T1]], $[[T2]]
+
+; 64R2: msub.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2: add.d $f0, $[[T0]], $[[T1]]
+
+; 64R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: add.d $f0, $[[T1]], $[[T2]]
+
%mul = fmul double %a, %b
%sub = fsub double %mul, %c
%add = fadd double %sub, 0.000000e+00
@@ -65,10 +270,45 @@ entry:
define double @FOO12double(double %a, double %b, double %c) nounwind readnone {
entry:
-; 32R2: nmadd.d
-; 64R2: nmadd.d
-; 32R2NAN: madd.d
-; 64R2NAN: madd.d
+; ALL-LABEL: FOO12double:
+
+; 32-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 32R2-NONAN: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2-NONAN: nmadd.d $f0, $[[T0]], $f12, $f14
+
+; 32R2-NAN: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2-NAN: madd.d $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2-NAN: mthc1 $zero, $[[T2]]
+; 32R2-NAN: sub.d $f0, $[[T2]], $[[T1]]
+
+; 32R6-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 64R2-NONAN: nmadd.d $f0, $f14, $f12, $f13
+
+; 64R2-NAN: madd.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2-NAN: sub.d $f0, $[[T1]], $[[T0]]
+
+; 64R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
%mul = fmul double %a, %b
%add = fadd double %mul, %c
%sub = fsub double 0.000000e+00, %add
@@ -77,10 +317,45 @@ entry:
define double @FOO13double(double %a, double %b, double %c) nounwind readnone {
entry:
-; 32R2: nmsub.d
-; 64R2: nmsub.d
-; 32R2NAN: msub.d
-; 64R2NAN: msub.d
+; ALL-LABEL: FOO13double:
+
+; 32-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 32R2-NONAN: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2-NONAN: nmsub.d $f0, $[[T0]], $f12, $f14
+
+; 32R2-NAN: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R2-NAN: msub.d $[[T1:f[0-9]+]], $[[T0]], $f12, $f14
+; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R2-NAN: mthc1 $zero, $[[T2]]
+; 32R2-NAN: sub.d $f0, $[[T2]], $[[T1]]
+
+; 32R6-DAG: ldc1 $[[T0:f[0-9]+]], 16($sp)
+; 32R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f14
+; 32R6-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
+; 32R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
+; 64R2-NONAN: nmsub.d $f0, $f14, $f12, $f13
+
+; 64R2-NAN: msub.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64R2-NAN: sub.d $f0, $[[T1]], $[[T0]]
+
+; 64R6-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
+; 64R6-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
+; 64R6-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
+; 64R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
+
%mul = fmul double %a, %b
%sub = fsub double %mul, %c
%sub1 = fsub double 0.000000e+00, %sub
diff --git a/test/CodeGen/Mips/fneg.ll b/test/CodeGen/Mips/fneg.ll
index b322abdaa23c..4fb80fdb4f32 100644
--- a/test/CodeGen/Mips/fneg.ll
+++ b/test/CodeGen/Mips/fneg.ll
@@ -1,17 +1,30 @@
-; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s
+; Check that abs.[ds] is selected and does not depend on -enable-no-nans-fp-math
+; They obey the Has2008 and ABS2008 configuration bits which govern the
+; conformance to IEEE 754 (1985) and IEEE 754 (2008). When these bits are not
+; present, they confirm to 1985.
+; In 1985 mode, abs.[ds] are arithmetic (i.e. they raise invalid operation
+; exceptions when given NaN's). In 2008 mode, they are non-arithmetic (i.e.
+; they are copies and don't raise any exceptions).
-define float @foo0(i32 %a, float %d) nounwind readnone {
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32 | FileCheck %s
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32r2 | FileCheck %s
+; RUN: llc < %s -mtriple=mipsel-linux-gnu -mcpu=mips32 -enable-no-nans-fp-math | FileCheck %s
+
+; RUN: llc < %s -mtriple=mips64el-linux-gnu -mcpu=mips64 | FileCheck %s
+; RUN: llc < %s -mtriple=mips64el-linux-gnu -mcpu=mips64 -enable-no-nans-fp-math | FileCheck %s
+
+define float @foo0(float %d) nounwind readnone {
entry:
-; CHECK-NOT: neg.s
+; CHECK-LABEL: foo0:
+; CHECK: neg.s
%sub = fsub float -0.000000e+00, %d
ret float %sub
}
-define double @foo1(i32 %a, double %d) nounwind readnone {
+define double @foo1(double %d) nounwind readnone {
entry:
-; CHECK: foo1
-; CHECK-NOT: neg.d
-; CHECK: jr
+; CHECK-LABEL: foo1:
+; CHECK: neg.d
%sub = fsub double -0.000000e+00, %d
ret double %sub
}
diff --git a/test/CodeGen/Mips/fp-indexed-ls.ll b/test/CodeGen/Mips/fp-indexed-ls.ll
index 1c4a3fdb4a42..787e131f6ec5 100644
--- a/test/CodeGen/Mips/fp-indexed-ls.ll
+++ b/test/CodeGen/Mips/fp-indexed-ls.ll
@@ -1,4 +1,13 @@
-; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R1
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R2
+; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R6
+; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64r6 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64R6
+
+; Check that [ls][dwu]xc1 are not emitted for nacl.
+; RUN: llc -mtriple=mipsel-none-nacl-gnu -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=CHECK-NACL
%struct.S = type <{ [4 x float] }>
%struct.S2 = type <{ [4 x double] }>
@@ -12,7 +21,30 @@
define float @foo0(float* nocapture %b, i32 %o) nounwind readonly {
entry:
-; CHECK: lwxc1
+; ALL-LABEL: foo0:
+
+; MIPS32R1: sll $[[T1:[0-9]+]], $5, 2
+; MIPS32R1: addu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS32R1: lwc1 $f0, 0($[[T3]])
+
+; MIPS32R2: sll $[[T1:[0-9]+]], $5, 2
+; MIPS32R2: lwxc1 $f0, $[[T1]]($4)
+
+; MIPS32R6: sll $[[T1:[0-9]+]], $5, 2
+; MIPS32R6: addu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS32R6: lwc1 $f0, 0($[[T3]])
+
+; MIPS4: sll $[[T0:[0-9]+]], $5, 0
+; MIPS4: dsll $[[T1:[0-9]+]], $[[T0]], 2
+; MIPS4: lwxc1 $f0, $[[T1]]($4)
+
+; MIPS64R6: sll $[[T0:[0-9]+]], $5, 0
+; MIPS64R6: dsll $[[T1:[0-9]+]], $[[T0]], 2
+; MIPS64R6: daddu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS64R6: lwc1 $f0, 0($[[T3]])
+
+; CHECK-NACL-NOT: lwxc1
+
%arrayidx = getelementptr inbounds float* %b, i32 %o
%0 = load float* %arrayidx, align 4
ret float %0
@@ -20,7 +52,30 @@ entry:
define double @foo1(double* nocapture %b, i32 %o) nounwind readonly {
entry:
-; CHECK: ldxc1
+; ALL-LABEL: foo1:
+
+; MIPS32R1: sll $[[T1:[0-9]+]], $5, 3
+; MIPS32R1: addu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS32R1: ldc1 $f0, 0($[[T3]])
+
+; MIPS32R2: sll $[[T1:[0-9]+]], $5, 3
+; MIPS32R2: ldxc1 $f0, $[[T1]]($4)
+
+; MIPS32R6: sll $[[T1:[0-9]+]], $5, 3
+; MIPS32R6: addu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS32R6: ldc1 $f0, 0($[[T3]])
+
+; MIPS4: sll $[[T0:[0-9]+]], $5, 0
+; MIPS4: dsll $[[T1:[0-9]+]], $[[T0]], 3
+; MIPS4: ldxc1 $f0, $[[T1]]($4)
+
+; MIPS64R6: sll $[[T0:[0-9]+]], $5, 0
+; MIPS64R6: dsll $[[T1:[0-9]+]], $[[T0]], 3
+; MIPS64R6: daddu $[[T3:[0-9]+]], $4, $[[T1]]
+; MIPS64R6: ldc1 $f0, 0($[[T3]])
+
+; CHECK-NACL-NOT: ldxc1
+
%arrayidx = getelementptr inbounds double* %b, i32 %o
%0 = load double* %arrayidx, align 8
ret double %0
@@ -28,7 +83,23 @@ entry:
define float @foo2(i32 %b, i32 %c) nounwind readonly {
entry:
-; CHECK-NOT: luxc1
+; ALL-LABEL: foo2:
+
+; luxc1 did not exist in MIPS32r1
+; MIPS32R1-NOT: luxc1
+
+; luxc1 is a misnomer since it aligns the given pointer downwards and performs
+; an aligned load. We mustn't use it to handle unaligned loads.
+; MIPS32R2-NOT: luxc1
+
+; luxc1 was removed in MIPS32r6
+; MIPS32R6-NOT: luxc1
+
+; MIPS4-NOT: luxc1
+
+; luxc1 was removed in MIPS64r6
+; MIPS64R6-NOT: luxc1
+
%arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
%0 = load float* %arrayidx1, align 1
ret float %0
@@ -36,7 +107,28 @@ entry:
define void @foo3(float* nocapture %b, i32 %o) nounwind {
entry:
-; CHECK: swxc1
+; ALL-LABEL: foo3:
+
+; MIPS32R1-DAG: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R1-DAG: addu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS32R1-DAG: swc1 $[[T0]], 0($[[T1]])
+
+; MIPS32R2: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R2: swxc1 $[[T0]], ${{[0-9]+}}($4)
+
+; MIPS32R6-DAG: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R6-DAG: addu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS32R6-DAG: swc1 $[[T0]], 0($[[T1]])
+
+; MIPS4: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS4: swxc1 $[[T0]], ${{[0-9]+}}($4)
+
+; MIPS64R6-DAG: lwc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS64R6-DAG: daddu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS64R6-DAG: swc1 $[[T0]], 0($[[T1]])
+
+; CHECK-NACL-NOT: swxc1
+
%0 = load float* @gf, align 4
%arrayidx = getelementptr inbounds float* %b, i32 %o
store float %0, float* %arrayidx, align 4
@@ -45,7 +137,28 @@ entry:
define void @foo4(double* nocapture %b, i32 %o) nounwind {
entry:
-; CHECK: sdxc1
+; ALL-LABEL: foo4:
+
+; MIPS32R1-DAG: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R1-DAG: addu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS32R1-DAG: sdc1 $[[T0]], 0($[[T1]])
+
+; MIPS32R2: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R2: sdxc1 $[[T0]], ${{[0-9]+}}($4)
+
+; MIPS32R6-DAG: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS32R6-DAG: addu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS32R6-DAG: sdc1 $[[T0]], 0($[[T1]])
+
+; MIPS4: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS4: sdxc1 $[[T0]], ${{[0-9]+}}($4)
+
+; MIPS64R6-DAG: ldc1 $[[T0:f0]], 0(${{[0-9]+}})
+; MIPS64R6-DAG: daddu $[[T1:[0-9]+]], $4, ${{[0-9]+}}
+; MIPS64R6-DAG: sdc1 $[[T0]], 0($[[T1]])
+
+; CHECK-NACL-NOT: sdxc1
+
%0 = load double* @gd, align 8
%arrayidx = getelementptr inbounds double* %b, i32 %o
store double %0, double* %arrayidx, align 8
@@ -54,7 +167,18 @@ entry:
define void @foo5(i32 %b, i32 %c) nounwind {
entry:
-; CHECK-NOT: suxc1
+; ALL-LABEL: foo5:
+
+; MIPS32R1-NOT: suxc1
+
+; MIPS32R2-NOT: suxc1
+
+; MIPS32R6-NOT: suxc1
+
+; MIPS4-NOT: suxc1
+
+; MIPS64R6-NOT: suxc1
+
%0 = load float* @gf, align 4
%arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
store float %0, float* %arrayidx1, align 1
@@ -63,8 +187,18 @@ entry:
define double @foo6(i32 %b, i32 %c) nounwind readonly {
entry:
-; CHECK: foo6
-; CHECK-NOT: luxc1
+; ALL-LABEL: foo6:
+
+; MIPS32R1-NOT: luxc1
+
+; MIPS32R2-NOT: luxc1
+
+; MIPS32R6-NOT: luxc1
+
+; MIPS4-NOT: luxc1
+
+; MIPS64R6-NOT: luxc1
+
%arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
%0 = load double* %arrayidx1, align 1
ret double %0
@@ -72,8 +206,18 @@ entry:
define void @foo7(i32 %b, i32 %c) nounwind {
entry:
-; CHECK: foo7
-; CHECK-NOT: suxc1
+; ALL-LABEL: foo7:
+
+; MIPS32R1-NOT: suxc1
+
+; MIPS32R2-NOT: suxc1
+
+; MIPS32R6-NOT: suxc1
+
+; MIPS4-NOT: suxc1
+
+; MIPS64R6-NOT: suxc1
+
%0 = load double* @gd, align 8
%arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
store double %0, double* %arrayidx1, align 1
@@ -82,16 +226,36 @@ entry:
define float @foo8() nounwind readonly {
entry:
-; CHECK: foo8
-; CHECK-NOT: luxc1
+; ALL-LABEL: foo8:
+
+; MIPS32R1-NOT: luxc1
+
+; MIPS32R2-NOT: luxc1
+
+; MIPS32R6-NOT: luxc1
+
+; MIPS4-NOT: luxc1
+
+; MIPS64R6-NOT: luxc1
+
%0 = load float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1
ret float %0
}
define void @foo9(float %f) nounwind {
entry:
-; CHECK: foo9
-; CHECK-NOT: suxc1
+; ALL-LABEL: foo9:
+
+; MIPS32R1-NOT: suxc1
+
+; MIPS32R2-NOT: suxc1
+
+; MIPS32R6-NOT: suxc1
+
+; MIPS4-NOT: suxc1
+
+; MIPS64R6-NOT: suxc1
+
store float %f, float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1
ret void
}
diff --git a/test/CodeGen/Mips/fp16instrinsmc.ll b/test/CodeGen/Mips/fp16instrinsmc.ll
index bb43d2711c26..7ced36c016f7 100644
--- a/test/CodeGen/Mips/fp16instrinsmc.ll
+++ b/test/CodeGen/Mips/fp16instrinsmc.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=1010111 -mips-os16 < %s | FileCheck %s -check-prefix=fmask
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=pic
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static -mips32-function-mask=1010111 -mips-os16 < %s | FileCheck %s -check-prefix=fmask
@x = global float 1.500000e+00, align 4
@xn = global float -1.900000e+01, align 4
diff --git a/test/CodeGen/Mips/fp16mix.ll b/test/CodeGen/Mips/fp16mix.ll
index 8d85099ba9f2..a94f838fb675 100644
--- a/test/CodeGen/Mips/fp16mix.ll
+++ b/test/CodeGen/Mips/fp16mix.ll
@@ -1,8 +1,8 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=10 -mips-os16 < %s | FileCheck %s -check-prefix=fmask1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static -mips32-function-mask=10 -mips-os16 < %s | FileCheck %s -check-prefix=fmask1
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=01 -mips-os16 < %s | FileCheck %s -check-prefix=fmask2
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static -mips32-function-mask=01 -mips-os16 < %s | FileCheck %s -check-prefix=fmask2
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips32-function-mask=10. -mips-os16 < %s | FileCheck %s -check-prefix=fmask1nr
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static -mips32-function-mask=10. -mips-os16 < %s | FileCheck %s -check-prefix=fmask1nr
; Function Attrs: nounwind optsize readnone
define void @foo1() {
@@ -17,7 +17,7 @@ entry:
; fmask1: .set reorder
; fmask1: .end foo1
; fmask2: .ent foo1
-; fmask2: save {{.*}}
+; fmask2: jrc $ra
; fmask2: .end foo1
; fmask1nr: .ent foo1
; fmask1nr: .set noreorder
@@ -42,10 +42,10 @@ entry:
; fmask2: .set reorder
; fmask2: .end foo2
; fmask1: .ent foo2
-; fmask1: save {{.*}}
+; fmask1: jrc $ra
; fmask1: .end foo2
; fmask1nr: .ent foo2
-; fmask1nr: save {{.*}}
+; fmask1nr: jrc $ra
; fmask1nr: .end foo2
}
@@ -62,10 +62,10 @@ entry:
; fmask1: .set reorder
; fmask1: .end foo3
; fmask2: .ent foo3
-; fmask2: save {{.*}}
+; fmask2: jrc $ra
; fmask2: .end foo3
; fmask1r: .ent foo3
-; fmask1r: save {{.*}}
+; fmask1r: jrc $ra
; fmask1r: .end foo3
}
@@ -82,10 +82,10 @@ entry:
; fmask2: .set reorder
; fmask2: .end foo4
; fmask1: .ent foo4
-; fmask1: save {{.*}}
+; fmask1: jrc $ra
; fmask1: .end foo4
; fmask1nr: .ent foo4
-; fmask1nr: save {{.*}}
+; fmask1nr: jrc $ra
; fmask1nr: .end foo4
}
diff --git a/test/CodeGen/Mips/fp16static.ll b/test/CodeGen/Mips/fp16static.ll
index 240ec75a36b6..beb063db15ca 100644
--- a/test/CodeGen/Mips/fp16static.ll
+++ b/test/CodeGen/Mips/fp16static.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s -check-prefix=CHECK-STATIC16
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=CHECK-STATIC16
@x = common global float 0.000000e+00, align 4
diff --git a/test/CodeGen/Mips/fp64a.ll b/test/CodeGen/Mips/fp64a.ll
new file mode 100644
index 000000000000..5c2c87373a32
--- /dev/null
+++ b/test/CodeGen/Mips/fp64a.ll
@@ -0,0 +1,197 @@
+; Test that the FP64A ABI performs double precision moves via a spill/reload.
+; The requirement is really that odd-numbered double precision registers do not
+; use mfc1/mtc1 to move the bottom 32-bits (because the hardware will redirect
+; this to the top 32-bits of the even register) but we have to make the decision
+; before register allocation so we do this for all double-precision values.
+
+; We don't test MIPS32r1 since support for 64-bit coprocessors (such as a 64-bit
+; FPU) on a 32-bit architecture was added in MIPS32r2.
+; FIXME: We currently don't test that attempting to use FP64 on MIPS32r1 is an
+; error either. This is because a large number of CodeGen tests are
+; incorrectly using this case. We should fix those test cases then add
+; this check here.
+
+; RUN: llc -march=mips -mcpu=mips32r2 -mattr=fp64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32R2-NO-FP64A-BE
+; RUN: llc -march=mips -mcpu=mips32r2 -mattr=fp64,nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=32R2-FP64A-BE
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=fp64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32R2-NO-FP64A-LE
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=fp64,nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=32R2-FP64A-LE
+
+; RUN: llc -march=mips64 -mcpu=mips64 -mattr=fp64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-NO-FP64A
+; RUN: not llc -march=mips64 -mcpu=mips64 -mattr=fp64,nooddspreg < %s 2>&1 | FileCheck %s -check-prefix=64-FP64A
+; RUN: llc -march=mips64el -mcpu=mips64 -mattr=fp64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-NO-FP64A
+; RUN: not llc -march=mips64el -mcpu=mips64 -mattr=fp64,nooddspreg < %s 2>&1 | FileCheck %s -check-prefix=64-FP64A
+
+; 64-FP64A: LLVM ERROR: -mattr=+nooddspreg requires the O32 ABI.
+
+declare double @dbl();
+
+define double @call1(double %d, ...) {
+ ret double %d
+
+; ALL-LABEL: call1:
+
+; 32R2-NO-FP64A-LE-NOT: addiu $sp, $sp
+; 32R2-NO-FP64A-LE: mtc1 $4, $f0
+; 32R2-NO-FP64A-LE: mthc1 $5, $f0
+
+; 32R2-NO-FP64A-BE-NOT: addiu $sp, $sp
+; 32R2-NO-FP64A-BE: mtc1 $5, $f0
+; 32R2-NO-FP64A-BE: mthc1 $4, $f0
+
+; 32R2-FP64A-LE: addiu $sp, $sp, -8
+; 32R2-FP64A-LE: sw $4, 0($sp)
+; 32R2-FP64A-LE: sw $5, 4($sp)
+; 32R2-FP64A-LE: ldc1 $f0, 0($sp)
+
+; 32R2-FP64A-BE: addiu $sp, $sp, -8
+; 32R2-FP64A-BE: sw $5, 0($sp)
+; 32R2-FP64A-BE: sw $4, 4($sp)
+; 32R2-FP64A-BE: ldc1 $f0, 0($sp)
+
+; 64-NO-FP64A: daddiu $sp, $sp, -64
+; 64-NO-FP64A: mov.d $f0, $f12
+}
+
+define double @call2(i32 %i, double %d) {
+ ret double %d
+
+; ALL-LABEL: call2:
+
+; 32R2-NO-FP64A-LE: mtc1 $6, $f0
+; 32R2-NO-FP64A-LE: mthc1 $7, $f0
+
+; 32R2-NO-FP64A-BE: mtc1 $7, $f0
+; 32R2-NO-FP64A-BE: mthc1 $6, $f0
+
+; 32R2-FP64A-LE: addiu $sp, $sp, -8
+; 32R2-FP64A-LE: sw $6, 0($sp)
+; 32R2-FP64A-LE: sw $7, 4($sp)
+; 32R2-FP64A-LE: ldc1 $f0, 0($sp)
+
+; 32R2-FP64A-BE: addiu $sp, $sp, -8
+; 32R2-FP64A-BE: sw $7, 0($sp)
+; 32R2-FP64A-BE: sw $6, 4($sp)
+; 32R2-FP64A-BE: ldc1 $f0, 0($sp)
+
+; 64-NO-FP64A-NOT: daddiu $sp, $sp
+; 64-NO-FP64A: mov.d $f0, $f13
+}
+
+define double @call3(float %f1, float %f2, double %d) {
+ ret double %d
+
+; ALL-LABEL: call3:
+
+; 32R2-NO-FP64A-LE: mtc1 $6, $f0
+; 32R2-NO-FP64A-LE: mthc1 $7, $f0
+
+; 32R2-NO-FP64A-BE: mtc1 $7, $f0
+; 32R2-NO-FP64A-BE: mthc1 $6, $f0
+
+; 32R2-FP64A-LE: addiu $sp, $sp, -8
+; 32R2-FP64A-LE: sw $6, 0($sp)
+; 32R2-FP64A-LE: sw $7, 4($sp)
+; 32R2-FP64A-LE: ldc1 $f0, 0($sp)
+
+; 32R2-FP64A-BE: addiu $sp, $sp, -8
+; 32R2-FP64A-BE: sw $7, 0($sp)
+; 32R2-FP64A-BE: sw $6, 4($sp)
+; 32R2-FP64A-BE: ldc1 $f0, 0($sp)
+
+; 64-NO-FP64A-NOT: daddiu $sp, $sp
+; 64-NO-FP64A: mov.d $f0, $f14
+}
+
+define double @call4(float %f, double %d, ...) {
+ ret double %d
+
+; ALL-LABEL: call4:
+
+; 32R2-NO-FP64A-LE: mtc1 $6, $f0
+; 32R2-NO-FP64A-LE: mthc1 $7, $f0
+
+; 32R2-NO-FP64A-BE: mtc1 $7, $f0
+; 32R2-NO-FP64A-BE: mthc1 $6, $f0
+
+; 32R2-FP64A-LE: addiu $sp, $sp, -8
+; 32R2-FP64A-LE: sw $6, 0($sp)
+; 32R2-FP64A-LE: sw $7, 4($sp)
+; 32R2-FP64A-LE: ldc1 $f0, 0($sp)
+
+; 32R2-FP64A-BE: addiu $sp, $sp, -8
+; 32R2-FP64A-BE: sw $7, 0($sp)
+; 32R2-FP64A-BE: sw $6, 4($sp)
+; 32R2-FP64A-BE: ldc1 $f0, 0($sp)
+
+; 64-NO-FP64A: daddiu $sp, $sp, -48
+; 64-NO-FP64A: mov.d $f0, $f13
+}
+
+define double @call5(double %a, double %b, ...) {
+ %1 = fsub double %a, %b
+ ret double %1
+
+; ALL-LABEL: call5:
+
+; 32R2-NO-FP64A-LE-DAG: mtc1 $4, $[[T0:f[0-9]+]]
+; 32R2-NO-FP64A-LE-DAG: mthc1 $5, $[[T0:f[0-9]+]]
+; 32R2-NO-FP64A-LE-DAG: mtc1 $6, $[[T1:f[0-9]+]]
+; 32R2-NO-FP64A-LE-DAG: mthc1 $7, $[[T1:f[0-9]+]]
+; 32R2-NO-FP64A-LE: sub.d $f0, $[[T0]], $[[T1]]
+
+; 32R2-NO-FP64A-BE-DAG: mtc1 $5, $[[T0:f[0-9]+]]
+; 32R2-NO-FP64A-BE-DAG: mthc1 $4, $[[T0:f[0-9]+]]
+; 32R2-NO-FP64A-BE-DAG: mtc1 $7, $[[T1:f[0-9]+]]
+; 32R2-NO-FP64A-BE-DAG: mthc1 $6, $[[T1:f[0-9]+]]
+; 32R2-NO-FP64A-BE: sub.d $f0, $[[T0]], $[[T1]]
+
+; 32R2-FP64A-LE: addiu $sp, $sp, -8
+; 32R2-FP64A-LE: sw $6, 0($sp)
+; 32R2-FP64A-LE: sw $7, 4($sp)
+; 32R2-FP64A-LE: ldc1 $[[T1:f[0-9]+]], 0($sp)
+; 32R2-FP64A-LE: sw $4, 0($sp)
+; 32R2-FP64A-LE: sw $5, 4($sp)
+; 32R2-FP64A-LE: ldc1 $[[T0:f[0-9]+]], 0($sp)
+; 32R2-FP64A-LE: sub.d $f0, $[[T0]], $[[T1]]
+
+; 32R2-FP64A-BE: addiu $sp, $sp, -8
+; 32R2-FP64A-BE: sw $7, 0($sp)
+; 32R2-FP64A-BE: sw $6, 4($sp)
+; 32R2-FP64A-BE: ldc1 $[[T1:f[0-9]+]], 0($sp)
+; 32R2-FP64A-BE: sw $5, 0($sp)
+; 32R2-FP64A-BE: sw $4, 4($sp)
+; 32R2-FP64A-BE: ldc1 $[[T0:f[0-9]+]], 0($sp)
+; 32R2-FP64A-BE: sub.d $f0, $[[T0]], $[[T1]]
+
+; 64-NO-FP64A: sub.d $f0, $f12, $f13
+}
+
+define double @move_from(double %d) {
+ %1 = call double @dbl()
+ %2 = call double @call2(i32 0, double %1)
+ ret double %2
+
+; ALL-LABEL: move_from:
+
+; 32R2-NO-FP64A-LE-DAG: mfc1 $6, $f0
+; 32R2-NO-FP64A-LE-DAG: mfhc1 $7, $f0
+
+; 32R2-NO-FP64A-BE-DAG: mfc1 $7, $f0
+; 32R2-NO-FP64A-BE-DAG: mfhc1 $6, $f0
+
+; 32R2-FP64A-LE: addiu $sp, $sp, -32
+; 32R2-FP64A-LE: sdc1 $f0, 16($sp)
+; 32R2-FP64A-LE: lw $6, 16($sp)
+; FIXME: This store is redundant
+; 32R2-FP64A-LE: sdc1 $f0, 16($sp)
+; 32R2-FP64A-LE: lw $7, 20($sp)
+
+; 32R2-FP64A-BE: addiu $sp, $sp, -32
+; 32R2-FP64A-BE: sdc1 $f0, 16($sp)
+; 32R2-FP64A-BE: lw $6, 20($sp)
+; FIXME: This store is redundant
+; 32R2-FP64A-BE: sdc1 $f0, 16($sp)
+; 32R2-FP64A-BE: lw $7, 16($sp)
+
+; 64-NO-FP64A: mov.d $f13, $f0
+}
diff --git a/test/CodeGen/Mips/fpbr.ll b/test/CodeGen/Mips/fpbr.ll
index a136557cc4a3..311b83015a56 100644
--- a/test/CodeGen/Mips/fpbr.ll
+++ b/test/CodeGen/Mips/fpbr.ll
@@ -1,9 +1,25 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=FCC -check-prefix=32-FCC
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=FCC -check-prefix=32-FCC
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=GPR -check-prefix=32-GPR
+; RUN: llc < %s -march=mips64el -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=FCC -check-prefix=64-FCC
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=FCC -check-prefix=64-FCC
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=GPR -check-prefix=64-GPR
define void @func0(float %f2, float %f3) nounwind {
entry:
-; CHECK: c.eq.s
-; CHECK: bc1f
+; ALL-LABEL: func0:
+
+; 32-FCC: c.eq.s $f12, $f14
+; 64-FCC: c.eq.s $f12, $f13
+; FCC: bc1f $BB0_2
+
+; 32-GPR: cmp.eq.s $[[FGRCC:f[0-9]+]], $f12, $f14
+; 64-GPR: cmp.eq.s $[[FGRCC:f[0-9]+]], $f12, $f13
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; FIXME: We ought to be able to transform not+bnez -> beqz
+; GPR: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB0_2
+
%cmp = fcmp oeq float %f2, %f3
br i1 %cmp, label %if.then, label %if.else
@@ -25,8 +41,18 @@ declare void @g1(...)
define void @func1(float %f2, float %f3) nounwind {
entry:
-; CHECK: c.olt.s
-; CHECK: bc1f
+; ALL-LABEL: func1:
+
+; 32-FCC: c.olt.s $f12, $f14
+; 64-FCC: c.olt.s $f12, $f13
+; FCC: bc1f $BB1_2
+
+; 32-GPR: cmp.ule.s $[[FGRCC:f[0-9]+]], $f14, $f12
+; 64-GPR: cmp.ule.s $[[FGRCC:f[0-9]+]], $f13, $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB1_2
+
%cmp = fcmp olt float %f2, %f3
br i1 %cmp, label %if.then, label %if.else
@@ -44,8 +70,18 @@ if.end: ; preds = %if.else, %if.then
define void @func2(float %f2, float %f3) nounwind {
entry:
-; CHECK: c.ole.s
-; CHECK: bc1t
+; ALL-LABEL: func2:
+
+; 32-FCC: c.ole.s $f12, $f14
+; 64-FCC: c.ole.s $f12, $f13
+; FCC: bc1t $BB2_2
+
+; 32-GPR: cmp.ult.s $[[FGRCC:f[0-9]+]], $f14, $f12
+; 64-GPR: cmp.ult.s $[[FGRCC:f[0-9]+]], $f13, $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: beqz $[[GPRCC]], $BB2_2
+
%cmp = fcmp ugt float %f2, %f3
br i1 %cmp, label %if.else, label %if.then
@@ -63,8 +99,19 @@ if.end: ; preds = %if.else, %if.then
define void @func3(double %f2, double %f3) nounwind {
entry:
-; CHECK: c.eq.d
-; CHECK: bc1f
+; ALL-LABEL: func3:
+
+; 32-FCC: c.eq.d $f12, $f14
+; 64-FCC: c.eq.d $f12, $f13
+; FCC: bc1f $BB3_2
+
+; 32-GPR: cmp.eq.d $[[FGRCC:f[0-9]+]], $f12, $f14
+; 64-GPR: cmp.eq.d $[[FGRCC:f[0-9]+]], $f12, $f13
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; FIXME: We ought to be able to transform not+bnez -> beqz
+; GPR: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB3_2
+
%cmp = fcmp oeq double %f2, %f3
br i1 %cmp, label %if.then, label %if.else
@@ -82,8 +129,18 @@ if.end: ; preds = %if.else, %if.then
define void @func4(double %f2, double %f3) nounwind {
entry:
-; CHECK: c.olt.d
-; CHECK: bc1f
+; ALL-LABEL: func4:
+
+; 32-FCC: c.olt.d $f12, $f14
+; 64-FCC: c.olt.d $f12, $f13
+; FCC: bc1f $BB4_2
+
+; 32-GPR: cmp.ule.d $[[FGRCC:f[0-9]+]], $f14, $f12
+; 64-GPR: cmp.ule.d $[[FGRCC:f[0-9]+]], $f13, $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: bnez $[[GPRCC]], $BB4_2
+
%cmp = fcmp olt double %f2, %f3
br i1 %cmp, label %if.then, label %if.else
@@ -101,8 +158,18 @@ if.end: ; preds = %if.else, %if.then
define void @func5(double %f2, double %f3) nounwind {
entry:
-; CHECK: c.ole.d
-; CHECK: bc1t
+; ALL-LABEL: func5:
+
+; 32-FCC: c.ole.d $f12, $f14
+; 64-FCC: c.ole.d $f12, $f13
+; FCC: bc1t $BB5_2
+
+; 32-GPR: cmp.ult.d $[[FGRCC:f[0-9]+]], $f14, $f12
+; 64-GPR: cmp.ult.d $[[FGRCC:f[0-9]+]], $f13, $f12
+; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC:f[0-9]+]]
+; GPR-NOT: not $[[GPRCC]], $[[GPRCC]]
+; GPR: beqz $[[GPRCC]], $BB5_2
+
%cmp = fcmp ugt double %f2, %f3
br i1 %cmp, label %if.else, label %if.then
diff --git a/test/CodeGen/Mips/fpneeded.ll b/test/CodeGen/Mips/fpneeded.ll
index dcdebb92e40e..fdd8e8f707ef 100644
--- a/test/CodeGen/Mips/fpneeded.ll
+++ b/test/CodeGen/Mips/fpneeded.ll
@@ -10,7 +10,7 @@ entry:
ret float 1.000000e+00
}
-; 32: .set nomips16 # @fv
+; 32: .set nomips16
; 32: .ent fv
; 32: .set noreorder
; 32: .set nomacro
@@ -26,7 +26,7 @@ entry:
ret double 2.000000e+00
}
-; 32: .set nomips16 # @dv
+; 32: .set nomips16
; 32: .ent dv
; 32: .set noreorder
; 32: .set nomacro
@@ -44,7 +44,7 @@ entry:
ret void
}
-; 32: .set nomips16 # @vf
+; 32: .set nomips16
; 32: .ent vf
; 32: .set noreorder
; 32: .set nomacro
@@ -62,7 +62,7 @@ entry:
ret void
}
-; 32: .set nomips16 # @vd
+; 32: .set nomips16
; 32: .ent vd
; 32: .set noreorder
; 32: .set nomacro
@@ -83,7 +83,7 @@ entry:
ret void
}
-; 32: .set nomips16 # @foo1
+; 32: .set nomips16
; 32: .ent foo1
; 32: .set noreorder
; 32: .set nomacro
@@ -102,7 +102,7 @@ entry:
}
-; 32: .set nomips16 # @foo2
+; 32: .set nomips16
; 32: .ent foo2
; 32: .set noreorder
; 32: .set nomacro
@@ -120,7 +120,7 @@ entry:
ret void
}
-; 32: .set nomips16 # @foo3
+; 32: .set nomips16
; 32: .ent foo3
; 32: .set noreorder
; 32: .set nomacro
@@ -138,7 +138,7 @@ entry:
ret void
}
-; 32: .set mips16 # @vv
+; 32: .set mips16
; 32: .ent vv
; 32: save {{.+}}
diff --git a/test/CodeGen/Mips/fpnotneeded.ll b/test/CodeGen/Mips/fpnotneeded.ll
index b4fab6414223..e12d7baacdbb 100644
--- a/test/CodeGen/Mips/fpnotneeded.ll
+++ b/test/CodeGen/Mips/fpnotneeded.ll
@@ -1,4 +1,6 @@
-; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-os16 | FileCheck %s -check-prefix=32
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-os16 | FileCheck %s -check-prefix=32
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -relocation-model=static -O3 -mips16-constant-islands < %s -mips-os16 | FileCheck %s -check-prefix=cisle
@i = global i32 1, align 4
@f = global float 1.000000e+00, align 4
@@ -8,7 +10,7 @@ entry:
ret void
}
-; 32: .set mips16 # @vv
+; 32: .set mips16
; 32: .ent vv
; 32: save {{.+}}
@@ -21,7 +23,7 @@ entry:
ret i32 %0
}
-; 32: .set mips16 # @iv
+; 32: .set mips16
; 32: .ent iv
; 32: save {{.+}}
@@ -37,7 +39,7 @@ entry:
ret void
}
-; 32: .set mips16 # @vif
+; 32: .set mips16
; 32: .ent vif
; 32: save {{.+}}
@@ -50,13 +52,15 @@ entry:
ret void
}
-; 32: .set mips16 # @foo
+; 32: .set mips16
; 32: .ent foo
; 32: save {{.+}}
; 32: restore {{.+}}
; 32: .end foo
+; cisle: .end foo
+
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
@@ -65,7 +69,7 @@ entry:
ret float 1.000000e+00
}
-; 32: .set nomips16 # @fv
+; 32: .set nomips16
; 32: .ent fv
; 32: .set noreorder
; 32: .set nomacro
diff --git a/test/CodeGen/Mips/fptr2.ll b/test/CodeGen/Mips/fptr2.ll
index 77028dbde9aa..c8b5e0d1771e 100644
--- a/test/CodeGen/Mips/fptr2.ll
+++ b/test/CodeGen/Mips/fptr2.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=static16
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=static16
; Function Attrs: nounwind
define double @my_mul(double %a, double %b) #0 {
diff --git a/test/CodeGen/Mips/fpxx.ll b/test/CodeGen/Mips/fpxx.ll
new file mode 100644
index 000000000000..7e2ed22e2d80
--- /dev/null
+++ b/test/CodeGen/Mips/fpxx.ll
@@ -0,0 +1,221 @@
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-NOFPXX
+; RUN: llc -march=mipsel -mcpu=mips32 -mattr=fpxx < %s | FileCheck %s -check-prefix=ALL -check-prefix=32-FPXX
+
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32R2-NOFPXX
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=fpxx < %s | FileCheck %s -check-prefix=ALL -check-prefix=32R2-FPXX
+
+; RUN: llc -march=mips64 -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=4-NOFPXX
+; RUN: not llc -march=mips64 -mcpu=mips4 -mattr=fpxx < %s 2>&1 | FileCheck %s -check-prefix=4-FPXX
+
+; RUN: llc -march=mips64 -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-NOFPXX
+; RUN: not llc -march=mips64 -mcpu=mips64 -mattr=fpxx < %s 2>&1 | FileCheck %s -check-prefix=64-FPXX
+
+; RUN-TODO: llc -march=mips64 -mcpu=mips4 -mattr=-n64,+o32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=4-O32-NOFPXX
+; RUN-TODO: llc -march=mips64 -mcpu=mips4 -mattr=-n64,+o32 -mattr=fpxx < %s | FileCheck %s -check-prefix=ALL -check-prefix=4-O32-FPXX
+
+; RUN-TODO: llc -march=mips64 -mcpu=mips64 -mattr=-n64,+o32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-O32-NOFPXX
+; RUN-TODO: llc -march=mips64 -mcpu=mips64 -mattr=-n64,+o32 -mattr=fpxx < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-O32-FPXX
+
+declare double @dbl();
+
+; 4-FPXX: LLVM ERROR: FPXX is not permitted for the N32/N64 ABI's.
+; 64-FPXX: LLVM ERROR: FPXX is not permitted for the N32/N64 ABI's.
+
+define double @test1(double %d, ...) {
+ ret double %d
+
+; ALL-LABEL: test1:
+
+; 32-NOFPXX: mtc1 $4, $f0
+; 32-NOFPXX: mtc1 $5, $f1
+
+; 32-FPXX: addiu $sp, $sp, -8
+; 32-FPXX: sw $4, 0($sp)
+; 32-FPXX: sw $5, 4($sp)
+; 32-FPXX: ldc1 $f0, 0($sp)
+
+; 32R2-NOFPXX: mtc1 $4, $f0
+; 32R2-NOFPXX: mthc1 $5, $f0
+
+; 32R2-FPXX: mtc1 $4, $f0
+; 32R2-FPXX: mthc1 $5, $f0
+
+; floats/doubles are not passed in integer registers for n64, so dmtc1 is not used.
+; 4-NOFPXX: mov.d $f0, $f12
+
+; 64-NOFPXX: mov.d $f0, $f12
+}
+
+define double @test2(i32 %i, double %d) {
+ ret double %d
+
+; ALL-LABEL: test2:
+
+; 32-NOFPXX: mtc1 $6, $f0
+; 32-NOFPXX: mtc1 $7, $f1
+
+; 32-FPXX: addiu $sp, $sp, -8
+; 32-FPXX: sw $6, 0($sp)
+; 32-FPXX: sw $7, 4($sp)
+; 32-FPXX: ldc1 $f0, 0($sp)
+
+; 32R2-NOFPXX: mtc1 $6, $f0
+; 32R2-NOFPXX: mthc1 $7, $f0
+
+; 32R2-FPXX: mtc1 $6, $f0
+; 32R2-FPXX: mthc1 $7, $f0
+
+; 4-NOFPXX: mov.d $f0, $f13
+
+; 64-NOFPXX: mov.d $f0, $f13
+}
+
+define double @test3(float %f1, float %f2, double %d) {
+ ret double %d
+
+; ALL-LABEL: test3:
+
+; 32-NOFPXX: mtc1 $6, $f0
+; 32-NOFPXX: mtc1 $7, $f1
+
+; 32-FPXX: addiu $sp, $sp, -8
+; 32-FPXX: sw $6, 0($sp)
+; 32-FPXX: sw $7, 4($sp)
+; 32-FPXX: ldc1 $f0, 0($sp)
+
+; 32R2-NOFPXX: mtc1 $6, $f0
+; 32R2-NOFPXX: mthc1 $7, $f0
+
+; 32R2-FPXX: mtc1 $6, $f0
+; 32R2-FPXX: mthc1 $7, $f0
+
+; 4-NOFPXX: mov.d $f0, $f14
+
+; 64-NOFPXX: mov.d $f0, $f14
+}
+
+define double @test4(float %f, double %d, ...) {
+ ret double %d
+
+; ALL-LABEL: test4:
+
+; 32-NOFPXX: mtc1 $6, $f0
+; 32-NOFPXX: mtc1 $7, $f1
+
+; 32-FPXX: addiu $sp, $sp, -8
+; 32-FPXX: sw $6, 0($sp)
+; 32-FPXX: sw $7, 4($sp)
+; 32-FPXX: ldc1 $f0, 0($sp)
+
+; 32R2-NOFPXX: mtc1 $6, $f0
+; 32R2-NOFPXX: mthc1 $7, $f0
+
+; 32R2-FPXX: mtc1 $6, $f0
+; 32R2-FPXX: mthc1 $7, $f0
+
+; 4-NOFPXX: mov.d $f0, $f13
+
+; 64-NOFPXX: mov.d $f0, $f13
+}
+
+define double @test5() {
+ ret double 0.000000e+00
+
+; ALL-LABEL: test5:
+
+; 32-NOFPXX: mtc1 $zero, $f0
+; 32-NOFPXX: mtc1 $zero, $f1
+
+; 32-FPXX: addiu $sp, $sp, -8
+; 32-FPXX: sw $zero, 0($sp)
+; 32-FPXX: sw $zero, 4($sp)
+; 32-FPXX: ldc1 $f0, 0($sp)
+
+; 32R2-NOFPXX: mtc1 $zero, $f0
+; 32R2-NOFPXX: mthc1 $zero, $f0
+
+; 32R2-FPXX: mtc1 $zero, $f0
+; 32R2-FPXX: mthc1 $zero, $f0
+
+; 4-NOFPXX: dmtc1 $zero, $f0
+
+; 64-NOFPXX: dmtc1 $zero, $f0
+}
+
+define double @test6(double %a, double %b, ...) {
+ %1 = fsub double %a, %b
+ ret double %1
+
+; ALL-LABEL: test6:
+
+; 32-NOFPXX-DAG: mtc1 $4, $[[T0:f[0-9]+]]
+; 32-NOFPXX-DAG: mtc1 $5, ${{f[0-9]*[13579]}}
+; 32-NOFPXX-DAG: mtc1 $6, $[[T1:f[0-9]+]]
+; 32-NOFPXX-DAG: mtc1 $7, ${{f[0-9]*[13579]}}
+; 32-NOFPXX: sub.d $f0, $[[T0]], $[[T1]]
+
+; 32-FPXX: addiu $sp, $sp, -8
+; 32-FPXX: sw $6, 0($sp)
+; 32-FPXX: sw $7, 4($sp)
+; 32-FPXX: ldc1 $[[T1:f[0-9]+]], 0($sp)
+; 32-FPXX: sw $4, 0($sp)
+; 32-FPXX: sw $5, 4($sp)
+; 32-FPXX: ldc1 $[[T0:f[0-9]+]], 0($sp)
+; 32-FPXX: sub.d $f0, $[[T0]], $[[T1]]
+
+; 32R2-NOFPXX-DAG: mtc1 $4, $[[T0:f[0-9]+]]
+; 32R2-NOFPXX-DAG: mthc1 $5, $[[T0]]
+; 32R2-NOFPXX-DAG: mtc1 $6, $[[T1:f[0-9]+]]
+; 32R2-NOFPXX-DAG: mthc1 $7, $[[T1]]
+; 32R2-NOFPXX: sub.d $f0, $[[T0]], $[[T1]]
+
+; 32R2-FPXX-DAG: mtc1 $4, $[[T0:f[0-9]+]]
+; 32R2-FPXX-DAG: mthc1 $5, $[[T0]]
+; 32R2-FPXX-DAG: mtc1 $6, $[[T1:f[0-9]+]]
+; 32R2-FPXX-DAG: mthc1 $7, $[[T1]]
+; 32R2-FPXX: sub.d $f0, $[[T0]], $[[T1]]
+
+; floats/doubles are not passed in integer registers for n64, so dmtc1 is not used.
+; 4-NOFPXX: sub.d $f0, $f12, $f13
+
+; floats/doubles are not passed in integer registers for n64, so dmtc1 is not used.
+; 64-NOFPXX: sub.d $f0, $f12, $f13
+}
+
+define double @move_from1(double %d) {
+ %1 = call double @dbl()
+ %2 = call double @test2(i32 0, double %1)
+ ret double %2
+
+; ALL-LABEL: move_from1:
+
+; 32-NOFPXX-DAG: mfc1 $6, $f0
+; 32-NOFPXX-DAG: mfc1 $7, $f1
+
+; 32-FPXX: addiu $sp, $sp, -32
+; 32-FPXX: sdc1 $f0, 16($sp)
+; 32-FPXX: lw $6, 16($sp)
+; FIXME: This store is redundant
+; 32-FPXX: sdc1 $f0, 16($sp)
+; 32-FPXX: lw $7, 20($sp)
+
+; 32R2-NOFPXX-DAG: mfc1 $6, $f0
+; 32R2-NOFPXX-DAG: mfhc1 $7, $f0
+
+; 32R2-FPXX-DAG: mfc1 $6, $f0
+; 32R2-FPXX-DAG: mfhc1 $7, $f0
+
+; floats/doubles are not passed in integer registers for n64, so dmfc1 is not used.
+; We can't use inline assembly to force a copy either because trying to force
+; a copy to a GPR this way fails with ; "couldn't allocate input reg for
+; constraint 'r'". It therefore seems impossible to test the generation of dmfc1
+; in a simple test.
+; 4-NOFPXX: mov.d $f13, $f0
+
+; floats/doubles are not passed in integer registers for n64, so dmfc1 is not used.
+; We can't use inline assembly to force a copy either because trying to force
+; a copy to a GPR this way fails with ; "couldn't allocate input reg for
+; constraint 'r'". It therefore seems impossible to test the generation of dmfc1
+; in a simple test.
+; 64-NOFPXX: mov.d $f13, $f0
+}
diff --git a/test/CodeGen/Mips/global-address.ll b/test/CodeGen/Mips/global-address.ll
index 0d49a7424ad6..0785cfcc0515 100644
--- a/test/CodeGen/Mips/global-address.ll
+++ b/test/CodeGen/Mips/global-address.ll
@@ -1,9 +1,9 @@
; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-O32
; RUN: llc -march=mipsel -relocation-model=static -mtriple=mipsel-linux-gnu < %s | FileCheck %s -check-prefix=STATIC-O32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n32 -relocation-model=static -mtriple=mipsel-linux-gnu < %s | FileCheck %s -check-prefix=STATIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=static -mtriple=mipsel-linux-gnu < %s | FileCheck %s -check-prefix=STATIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
@s1 = internal unnamed_addr global i32 8, align 4
@g1 = external global i32
diff --git a/test/CodeGen/Mips/helloworld.ll b/test/CodeGen/Mips/helloworld.ll
index 058a041c16a9..36f4ad6b55c0 100644
--- a/test/CodeGen/Mips/helloworld.ll
+++ b/test/CodeGen/Mips/helloworld.ll
@@ -15,7 +15,7 @@ entry:
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0))
ret i32 0
-; SR: .set mips16 # @main
+; SR: .set mips16
; SR32: .set nomips16
; SR32: .ent main
@@ -25,10 +25,9 @@ entry:
; SR32: .set noreorder
; SR32: .set nomacro
; SR32: .set noat
-; SR: save $ra, $s0, $s1, $s2, [[FS:[0-9]+]]
+; SR: save $ra, 24 # 16 bit inst
; PE: .ent main
-; PE: .align 2
-; PE-NEXT: li $[[T1:[0-9]+]], %hi(_gp_disp)
+; PE: li $[[T1:[0-9]+]], %hi(_gp_disp)
; PE-NEXT: addiu $[[T2:[0-9]+]], $pc, %lo(_gp_disp)
; PE: sll $[[T3:[0-9]+]], $[[T1]], 16
; C1: lw ${{[0-9]+}}, %got($.str)(${{[0-9]+}})
@@ -37,7 +36,7 @@ entry:
; C2: move $25, ${{[0-9]+}}
; C1: move $gp, ${{[0-9]+}}
; C1: jalrc ${{[0-9]+}}
-; SR: restore $ra, $s0, $s1, $s2, [[FS]]
+; SR: restore $ra, 24 # 16 bit inst
; PE: li $2, 0
; PE: jrc $ra
diff --git a/test/CodeGen/Mips/hf16_1.ll b/test/CodeGen/Mips/hf16_1.ll
index c7454ee0a8dd..9879cd523af3 100644
--- a/test/CodeGen/Mips/hf16_1.ll
+++ b/test/CodeGen/Mips/hf16_1.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -soft-float -mips16-hard-float -O3 < %s | FileCheck %s -check-prefix=1
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -soft-float -mips16-hard-float -O3 < %s | FileCheck %s -check-prefix=2
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=1
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=2
@x = common global float 0.000000e+00, align 4
diff --git a/test/CodeGen/Mips/hf16call32.ll b/test/CodeGen/Mips/hf16call32.ll
index 461438e8bec0..aec9c71c485b 100644
--- a/test/CodeGen/Mips/hf16call32.ll
+++ b/test/CodeGen/Mips/hf16call32.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=stel
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=stel
@x = common global float 0.000000e+00, align 4
@y = common global float 0.000000e+00, align 4
diff --git a/test/CodeGen/Mips/hf16call32_body.ll b/test/CodeGen/Mips/hf16call32_body.ll
index 34bae26f85f3..adac31460c44 100644
--- a/test/CodeGen/Mips/hf16call32_body.ll
+++ b/test/CodeGen/Mips/hf16call32_body.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=stel
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=stel
@x = external global float
@xd = external global double
diff --git a/test/CodeGen/Mips/hf1_body.ll b/test/CodeGen/Mips/hf1_body.ll
index b2cce92aa1a4..5acfe86373d9 100644
--- a/test/CodeGen/Mips/hf1_body.ll
+++ b/test/CodeGen/Mips/hf1_body.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=picfp16
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=picfp16
@x = external global float
diff --git a/test/CodeGen/Mips/hfptrcall.ll b/test/CodeGen/Mips/hfptrcall.ll
index 25639dad63a8..9df8d900693c 100644
--- a/test/CodeGen/Mips/hfptrcall.ll
+++ b/test/CodeGen/Mips/hfptrcall.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=picel
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=picel
@ptrsv = global float ()* @sv, align 4
@ptrdv = global double ()* @dv, align 4
diff --git a/test/CodeGen/Mips/i32k.ll b/test/CodeGen/Mips/i32k.ll
index f4dd1eb78a1d..73f1302beec0 100644
--- a/test/CodeGen/Mips/i32k.ll
+++ b/test/CodeGen/Mips/i32k.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic -mips16-constant-islands=false -O3 < %s | FileCheck %s -check-prefix=16
@.str = private unnamed_addr constant [4 x i8] c"%i\0A\00", align 1
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-I-1.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-I-1.ll
index f9e53cbb07a4..c09108dc0744 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-I-1.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-I-1.ll
@@ -9,7 +9,7 @@ define i32 @main() nounwind {
entry:
;CHECK-ERRORS: error: invalid operand for inline asm constraint 'I'
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,I"(i32 7, i32 1048576) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,I"(i32 7, i32 1048576) nounwind
ret i32 0
}
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-J.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-J.ll
index 1fdf672fe197..2b24b0f82c57 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-J.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-J.ll
@@ -10,7 +10,7 @@ entry:
;CHECK-ERRORS: error: invalid operand for inline asm constraint 'J'
- tail call i32 asm "addi $0,$1,$2", "=r,r,J"(i32 1024, i32 3) nounwind
+ tail call i32 asm "addiu $0,$1,$2", "=r,r,J"(i32 1024, i32 3) nounwind
ret i32 0
}
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-L.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-L.ll
index 49dcc8745857..5edb3e24674e 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-L.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-L.ll
@@ -10,7 +10,7 @@ entry:
;CHECK-ERRORS: error: invalid operand for inline asm constraint 'L'
- tail call i32 asm "addi $0,$1,$2", "=r,r,L"(i32 7, i32 1048579) nounwind
+ tail call i32 asm "addiu $0,$1,$2", "=r,r,L"(i32 7, i32 1048579) nounwind
ret i32 0
}
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-N.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-N.ll
index 770669d913e8..eaa540acdafa 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-N.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-N.ll
@@ -11,7 +11,7 @@ entry:
;CHECK-ERRORS: error: invalid operand for inline asm constraint 'N'
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,N"(i32 7, i32 3) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,N"(i32 7, i32 3) nounwind
ret i32 0
}
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-O.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-O.ll
index cd4431ac5265..56afbaaa9cd6 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-O.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-O.ll
@@ -11,6 +11,6 @@ entry:
;CHECK-ERRORS: error: invalid operand for inline asm constraint 'O'
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,O"(i32 undef, i32 16384) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,O"(i32 undef, i32 16384) nounwind
ret i32 0
}
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-P.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-P.ll
index 0a4739ebb96b..0a55cb55e5f2 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-P.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-P.ll
@@ -11,6 +11,6 @@ entry:
;CHECK-ERRORS: error: invalid operand for inline asm constraint 'P'
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,P"(i32 undef, i32 655536) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,P"(i32 undef, i32 655536) nounwind
ret i32 0
}
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
index 94ded307fda9..a67ddce222ae 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
@@ -1,33 +1,34 @@
; Positive test for inline register constraints
;
-; RUN: llc -march=mipsel < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s
define i32 @main() nounwind {
entry:
; r with char
;CHECK: #APP
-;CHECK: addi ${{[0-9]+}},${{[0-9]+}},23
+;CHECK: addiu ${{[0-9]+}},${{[0-9]+}},23
;CHECK: #NO_APP
- tail call i8 asm sideeffect "addi $0,$1,$2", "=r,r,n"(i8 27, i8 23) nounwind
+ tail call i8 asm sideeffect "addiu $0,$1,$2", "=r,r,n"(i8 27, i8 23) nounwind
; r with short
;CHECK: #APP
-;CHECK: addi ${{[0-9]+}},${{[0-9]+}},13
+;CHECK: addiu ${{[0-9]+}},${{[0-9]+}},13
;CHECK: #NO_APP
- tail call i16 asm sideeffect "addi $0,$1,$2", "=r,r,n"(i16 17, i16 13) nounwind
+ tail call i16 asm sideeffect "addiu $0,$1,$2", "=r,r,n"(i16 17, i16 13) nounwind
; r with int
;CHECK: #APP
-;CHECK: addi ${{[0-9]+}},${{[0-9]+}},3
+;CHECK: addiu ${{[0-9]+}},${{[0-9]+}},3
;CHECK: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,n"(i32 7, i32 3) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,n"(i32 7, i32 3) nounwind
; Now c with 1024: make sure register $25 is picked
; CHECK: #APP
-; CHECK: addi $25,${{[0-9]+}},1024
+; CHECK: addiu $25,${{[0-9]+}},1024
; CHECK: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,$2", "=c,c,I"(i32 4194304, i32 1024) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=c,c,I"(i32 4194304, i32 1024) nounwind
; Now l with 1024: make sure register lo is picked. We do this by checking the instruction
; after the inline expression for a mflo to pull the value out of lo.
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll
index 787066602575..a7ba762b1064 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll
@@ -12,9 +12,9 @@ entry:
; r with long long
;CHECK: #APP
-;CHECK: addi ${{[0-9]+}},${{[0-9]+}},3
+;CHECK: addiu ${{[0-9]+}},${{[0-9]+}},3
;CHECK: #NO_APP
- tail call i64 asm sideeffect "addi $0,$1,$2", "=r,r,i"(i64 7, i64 3) nounwind
+ tail call i64 asm sideeffect "addiu $0,$1,$2", "=r,r,i"(i64 7, i64 3) nounwind
ret i32 0
}
diff --git a/test/CodeGen/Mips/inlineasm-operand-code.ll b/test/CodeGen/Mips/inlineasm-operand-code.ll
index 7bb4adc31bd8..6512851a11be 100644
--- a/test/CodeGen/Mips/inlineasm-operand-code.ll
+++ b/test/CodeGen/Mips/inlineasm-operand-code.ll
@@ -12,9 +12,9 @@ define i32 @constraint_X() nounwind {
entry:
;CHECK_LITTLE_32-LABEL: constraint_X:
;CHECK_LITTLE_32: #APP
-;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},0xfffffffffffffffd
+;CHECK_LITTLE_32: addiu ${{[0-9]+}},${{[0-9]+}},0xfffffffffffffffd
;CHECK_LITTLE_32: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,${2:X}", "=r,r,I"(i32 7, i32 -3) ;
+ tail call i32 asm sideeffect "addiu $0,$1,${2:X}", "=r,r,I"(i32 7, i32 -3) ;
ret i32 0
}
@@ -23,9 +23,9 @@ define i32 @constraint_x() nounwind {
entry:
;CHECK_LITTLE_32-LABEL: constraint_x:
;CHECK_LITTLE_32: #APP
-;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},0xfffd
+;CHECK_LITTLE_32: addiu ${{[0-9]+}},${{[0-9]+}},0xfffd
;CHECK_LITTLE_32: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,${2:x}", "=r,r,I"(i32 7, i32 -3) ;
+ tail call i32 asm sideeffect "addiu $0,$1,${2:x}", "=r,r,I"(i32 7, i32 -3) ;
ret i32 0
}
@@ -34,9 +34,9 @@ define i32 @constraint_d() nounwind {
entry:
;CHECK_LITTLE_32-LABEL: constraint_d:
;CHECK_LITTLE_32: #APP
-;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},-3
+;CHECK_LITTLE_32: addiu ${{[0-9]+}},${{[0-9]+}},-3
;CHECK_LITTLE_32: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,${2:d}", "=r,r,I"(i32 7, i32 -3) ;
+ tail call i32 asm sideeffect "addiu $0,$1,${2:d}", "=r,r,I"(i32 7, i32 -3) ;
ret i32 0
}
@@ -45,9 +45,9 @@ define i32 @constraint_m() nounwind {
entry:
;CHECK_LITTLE_32-LABEL: constraint_m:
;CHECK_LITTLE_32: #APP
-;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},-4
+;CHECK_LITTLE_32: addiu ${{[0-9]+}},${{[0-9]+}},-4
;CHECK_LITTLE_32: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,${2:m}", "=r,r,I"(i32 7, i32 -3) ;
+ tail call i32 asm sideeffect "addiu $0,$1,${2:m}", "=r,r,I"(i32 7, i32 -3) ;
ret i32 0
}
@@ -56,15 +56,15 @@ define i32 @constraint_z() nounwind {
entry:
;CHECK_LITTLE_32-LABEL: constraint_z:
;CHECK_LITTLE_32: #APP
-;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},-3
+;CHECK_LITTLE_32: addiu ${{[0-9]+}},${{[0-9]+}},-3
;CHECK_LITTLE_32: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,${2:z}", "=r,r,I"(i32 7, i32 -3) ;
+ tail call i32 asm sideeffect "addiu $0,$1,${2:z}", "=r,r,I"(i32 7, i32 -3) ;
; z with 0
;CHECK_LITTLE_32: #APP
-;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},$0
+;CHECK_LITTLE_32: addiu ${{[0-9]+}},${{[0-9]+}},$0
;CHECK_LITTLE_32: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,${2:z}", "=r,r,I"(i32 7, i32 0) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,${2:z}", "=r,r,I"(i32 7, i32 0) nounwind
ret i32 0
}
@@ -73,9 +73,9 @@ define i32 @constraint_longlong() nounwind {
entry:
;CHECK_LITTLE_32-LABEL: constraint_longlong:
;CHECK_LITTLE_32: #APP
-;CHECK_LITTLE_32: addi ${{[0-9]+}},${{[0-9]+}},3
+;CHECK_LITTLE_32: addiu ${{[0-9]+}},${{[0-9]+}},3
;CHECK_LITTLE_32: #NO_APP
- tail call i64 asm sideeffect "addi $0,$1,$2 \0A\09", "=r,r,X"(i64 1229801703532086340, i64 3) nounwind
+ tail call i64 asm sideeffect "addiu $0,$1,$2 \0A\09", "=r,r,X"(i64 1229801703532086340, i64 3) nounwind
ret i32 0
}
diff --git a/test/CodeGen/Mips/inlineasm_constraint.ll b/test/CodeGen/Mips/inlineasm_constraint.ll
index 8d30f45d84e3..76b73dc276ae 100644
--- a/test/CodeGen/Mips/inlineasm_constraint.ll
+++ b/test/CodeGen/Mips/inlineasm_constraint.ll
@@ -5,21 +5,21 @@ entry:
; First I with short
; CHECK: #APP
-; CHECK: addi ${{[0-9]+}},${{[0-9]+}},4096
+; CHECK: addiu ${{[0-9]+}},${{[0-9]+}},4096
; CHECK: #NO_APP
- tail call i16 asm sideeffect "addi $0,$1,$2", "=r,r,I"(i16 7, i16 4096) nounwind
+ tail call i16 asm sideeffect "addiu $0,$1,$2", "=r,r,I"(i16 7, i16 4096) nounwind
; Then I with int
; CHECK: #APP
-; CHECK: addi ${{[0-9]+}},${{[0-9]+}},-3
+; CHECK: addiu ${{[0-9]+}},${{[0-9]+}},-3
; CHECK: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,I"(i32 7, i32 -3) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,I"(i32 7, i32 -3) nounwind
; Now J with 0
; CHECK: #APP
-; CHECK: addi ${{[0-9]+}},${{[0-9]+}},0
+; CHECK: addiu ${{[0-9]+}},${{[0-9]+}},0
; CHECK: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,$2\0A\09 ", "=r,r,J"(i32 7, i16 0) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2\0A\09 ", "=r,r,J"(i32 7, i16 0) nounwind
; Now K with 64
; CHECK: #APP
@@ -35,29 +35,29 @@ entry:
; Now N with -3
; CHECK: #APP
-; CHECK: addi ${{[0-9]+}},${{[0-9]+}},-3
+; CHECK: addiu ${{[0-9]+}},${{[0-9]+}},-3
; CHECK: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,N"(i32 7, i32 -3) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,N"(i32 7, i32 -3) nounwind
; Now O with -3
; CHECK: #APP
-; CHECK: addi ${{[0-9]+}},${{[0-9]+}},-3
+; CHECK: addiu ${{[0-9]+}},${{[0-9]+}},-3
; CHECK: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,O"(i32 7, i16 -3) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,O"(i32 7, i16 -3) nounwind
; Now P with 65535
; CHECK: #APP
-; CHECK: addi ${{[0-9]+}},${{[0-9]+}},65535
+; CHECK: addiu ${{[0-9]+}},${{[0-9]+}},65535
; CHECK: #NO_APP
- tail call i32 asm sideeffect "addi $0,$1,$2", "=r,r,P"(i32 7, i32 65535) nounwind
+ tail call i32 asm sideeffect "addiu $0,$1,$2", "=r,r,P"(i32 7, i32 65535) nounwind
; Now R Which takes the address of c
%c = alloca i32, align 4
store i32 -4469539, i32* %c, align 4
- %8 = call i32 asm sideeffect "lwl $0, 1 + $1\0A\09lwr $0, 2 + $1\0A\09", "=r,*R"(i32* %c) #1
+ %8 = call i32 asm sideeffect "lw $0, 1 + $1\0A\09lw $0, 2 + $1\0A\09", "=r,*R"(i32* %c) #1
; CHECK: #APP
-; CHECK: lwl ${{[0-9]+}}, 1 + 0(${{[0-9]+}})
-; CHECK: lwr ${{[0-9]+}}, 2 + 0(${{[0-9]+}})
+; CHECK: lw ${{[0-9]+}}, 1 + 0(${{[0-9]+}})
+; CHECK: lw ${{[0-9]+}}, 2 + 0(${{[0-9]+}})
; CHECK: #NO_APP
ret i32 0
diff --git a/test/CodeGen/Mips/int-to-float-conversion.ll b/test/CodeGen/Mips/int-to-float-conversion.ll
index c2baf442f4ae..d226b48cb20f 100644
--- a/test/CodeGen/Mips/int-to-float-conversion.ll
+++ b/test/CodeGen/Mips/int-to-float-conversion.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=64
; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
@i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
diff --git a/test/CodeGen/Mips/l3mc.ll b/test/CodeGen/Mips/l3mc.ll
new file mode 100644
index 000000000000..3bfb389ba05d
--- /dev/null
+++ b/test/CodeGen/Mips/l3mc.ll
@@ -0,0 +1,114 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___fixunsdfsi
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___floatdidf
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___floatdisf
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___floatundidf
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___fixsfdi
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___fixunsdfdi
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___fixdfdi
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___fixunssfsi
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___fixunssfdi
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=__call_stub_fp___floatundisf
+
+@ll1 = global i64 0, align 8
+@ll2 = global i64 0, align 8
+@ll3 = global i64 0, align 8
+@l1 = global i32 0, align 4
+@l2 = global i32 0, align 4
+@l3 = global i32 0, align 4
+@ull1 = global i64 0, align 8
+@ull2 = global i64 0, align 8
+@ull3 = global i64 0, align 8
+@ul1 = global i32 0, align 4
+@ul2 = global i32 0, align 4
+@ul3 = global i32 0, align 4
+@d1 = global double 0.000000e+00, align 8
+@d2 = global double 0.000000e+00, align 8
+@d3 = global double 0.000000e+00, align 8
+@d4 = global double 0.000000e+00, align 8
+@f1 = global float 0.000000e+00, align 4
+@f2 = global float 0.000000e+00, align 4
+@f3 = global float 0.000000e+00, align 4
+@f4 = global float 0.000000e+00, align 4
+
+; Function Attrs: nounwind
+define void @_Z3foov() #0 {
+entry:
+ %0 = load double* @d1, align 8
+ %conv = fptosi double %0 to i64
+ store i64 %conv, i64* @ll1, align 8
+ %1 = load double* @d2, align 8
+ %conv1 = fptoui double %1 to i64
+ store i64 %conv1, i64* @ull1, align 8
+ %2 = load float* @f1, align 4
+ %conv2 = fptosi float %2 to i64
+ store i64 %conv2, i64* @ll2, align 8
+ %3 = load float* @f2, align 4
+ %conv3 = fptoui float %3 to i64
+ store i64 %conv3, i64* @ull2, align 8
+ %4 = load double* @d3, align 8
+ %conv4 = fptosi double %4 to i32
+ store i32 %conv4, i32* @l1, align 4
+ %5 = load double* @d4, align 8
+ %conv5 = fptoui double %5 to i32
+ store i32 %conv5, i32* @ul1, align 4
+ %6 = load float* @f3, align 4
+ %conv6 = fptosi float %6 to i32
+ store i32 %conv6, i32* @l2, align 4
+ %7 = load float* @f4, align 4
+ %conv7 = fptoui float %7 to i32
+ store i32 %conv7, i32* @ul2, align 4
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @_Z3goov() #0 {
+entry:
+ %0 = load i64* @ll1, align 8
+ %conv = sitofp i64 %0 to double
+ store double %conv, double* @d1, align 8
+ %1 = load i64* @ull1, align 8
+ %conv1 = uitofp i64 %1 to double
+ store double %conv1, double* @d2, align 8
+ %2 = load i64* @ll2, align 8
+ %conv2 = sitofp i64 %2 to float
+ store float %conv2, float* @f1, align 4
+ %3 = load i64* @ull2, align 8
+ %conv3 = uitofp i64 %3 to float
+ store float %conv3, float* @f2, align 4
+ %4 = load i32* @l1, align 4
+ %conv4 = sitofp i32 %4 to double
+ store double %conv4, double* @d3, align 8
+ %5 = load i32* @ul1, align 4
+ %conv5 = uitofp i32 %5 to double
+ store double %conv5, double* @d4, align 8
+ %6 = load i32* @l2, align 4
+ %conv6 = sitofp i32 %6 to float
+ store float %conv6, float* @f3, align 4
+ %7 = load i32* @ul2, align 4
+ %conv7 = uitofp i32 %7 to float
+ store float %conv7, float* @f4, align 4
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+; __call_stub_fp___fixunsdfsi: __call_stub_fp___fixunsdfsi:
+; __call_stub_fp___floatdidf: __call_stub_fp___floatdidf:
+; __call_stub_fp___floatdisf: __call_stub_fp___floatdisf:
+; __call_stub_fp___floatundidf: __call_stub_fp___floatundidf:
+; __call_stub_fp___fixsfdi: __call_stub_fp___fixsfdi:
+; __call_stub_fp___fixunsdfdi: __call_stub_fp___fixunsdfdi:
+; __call_stub_fp___fixdfdi: __call_stub_fp___fixdfdi:
+; __call_stub_fp___fixunssfsi: __call_stub_fp___fixunssfsi:
+; __call_stub_fp___fixunssfdi: __call_stub_fp___fixunssfdi:
+; __call_stub_fp___floatundisf: __call_stub_fp___floatundisf:
+
diff --git a/test/CodeGen/Mips/largefr1.ll b/test/CodeGen/Mips/largefr1.ll
deleted file mode 100644
index 9a5fd08d17ac..000000000000
--- a/test/CodeGen/Mips/largefr1.ll
+++ /dev/null
@@ -1,74 +0,0 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s -check-prefix=1
-
-
-@i = common global i32 0, align 4
-@j = common global i32 0, align 4
-@.str = private unnamed_addr constant [8 x i8] c"%i %i \0A\00", align 1
-
-define void @foo(i32* %p, i32 %i, i32 %j) nounwind {
-entry:
- %p.addr = alloca i32*, align 4
- %i.addr = alloca i32, align 4
- %j.addr = alloca i32, align 4
- store i32* %p, i32** %p.addr, align 4
- store i32 %i, i32* %i.addr, align 4
- store i32 %j, i32* %j.addr, align 4
- %0 = load i32* %j.addr, align 4
- %1 = load i32** %p.addr, align 4
- %2 = load i32* %i.addr, align 4
- %add.ptr = getelementptr inbounds i32* %1, i32 %2
- store i32 %0, i32* %add.ptr, align 4
- ret void
-}
-
-define i32 @main() nounwind {
-entry:
-; 1-LABEL: main:
-; 1: 1: .word -798000
-; 1: lw ${{[0-9]+}}, 1f
-; 1: b 2f
-; 1: .align 2
-; 1: .word 800020
-
-; 1: b 2f
-; 1: .align 2
-; 1: .word 400020
-
-; 1: move ${{[0-9]+}}, $sp
-; 1: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 1: addiu ${{[0-9]+}}, ${{[0-9]+}}, 0
-
-
-
-; 1: b 2f
-; 1: .align 2
-; 1: .word 400220
-
-; 1: move ${{[0-9]+}}, $sp
-; 1: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 1: lw ${{[0-9]+}}, 0(${{[0-9]+}})
-
-
-
-
- %retval = alloca i32, align 4
- %one = alloca [100000 x i32], align 4
- %two = alloca [100000 x i32], align 4
- store i32 0, i32* %retval
- %arrayidx = getelementptr inbounds [100000 x i32]* %one, i32 0, i32 0
- call void @foo(i32* %arrayidx, i32 50, i32 9999)
- %arrayidx1 = getelementptr inbounds [100000 x i32]* %two, i32 0, i32 0
- call void @foo(i32* %arrayidx1, i32 99999, i32 5555)
- %arrayidx2 = getelementptr inbounds [100000 x i32]* %one, i32 0, i32 50
- %0 = load i32* %arrayidx2, align 4
- store i32 %0, i32* @i, align 4
- %arrayidx3 = getelementptr inbounds [100000 x i32]* %two, i32 0, i32 99999
- %1 = load i32* %arrayidx3, align 4
- store i32 %1, i32* @j, align 4
- %2 = load i32* @i, align 4
- %3 = load i32* @j, align 4
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3)
- ret i32 0
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/test/CodeGen/Mips/largeimmprinting.ll b/test/CodeGen/Mips/largeimmprinting.ll
index 09fee3d9063f..0e9c91fb46df 100644
--- a/test/CodeGen/Mips/largeimmprinting.ll
+++ b/test/CodeGen/Mips/largeimmprinting.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | \
+; RUN: FileCheck %s -check-prefix=64
; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | \
-; RUN: FileCheck %s -check-prefix=64
+; RUN: FileCheck %s -check-prefix=64
%struct.S1 = type { [65536 x i8] }
diff --git a/test/CodeGen/Mips/lcb2.ll b/test/CodeGen/Mips/lcb2.ll
new file mode 100644
index 000000000000..715584b6797d
--- /dev/null
+++ b/test/CodeGen/Mips/lcb2.ll
@@ -0,0 +1,133 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips16-constant-islands=true < %s | FileCheck %s -check-prefix=lcb
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips16-constant-islands=true < %s | FileCheck %s -check-prefix=lcbn
+
+@i = global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define i32 @bnez() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !5
+ store i32 0, i32* @i, align 4, !tbaa !1
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i32 0
+}
+; lcb: .ent bnez
+; lcbn: .ent bnez
+; lcb: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}}
+; lcbn-NOT: bnez ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+; lcb: .end bnez
+; lcbn: .end bnez
+
+; Function Attrs: nounwind optsize
+define i32 @beqz() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 10, i32* @j, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !6
+ br label %if.end
+
+if.else: ; preds = %entry
+ store i32 55, i32* @j, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !7
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret i32 0
+}
+
+; lcb: .ent beqz
+; lcbn: .ent beqz
+; lcb: beqz ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}}
+; lcbn-NOT: beqz ${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+; lcb: .end beqz
+; lcbn: .end beqz
+
+
+; Function Attrs: nounwind optsize
+define void @bteqz() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %1 = load i32* @j, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, %1
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 1, i32* @k, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !8
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !9
+ store i32 2, i32* @k, align 4, !tbaa !1
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void
+}
+
+; lcb: .ent bteqz
+; lcbn: .ent bteqz
+; lcb: btnez $BB{{[0-9]+}}_{{[0-9]+}}
+; lcbn-NOT: btnez $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+; lcb: .end bteqz
+; lcbn: .end bteqz
+
+
+; Function Attrs: nounwind optsize
+define void @btz() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %1 = load i32* @j, align 4, !tbaa !1
+ %cmp1 = icmp sgt i32 %0, %1
+ br i1 %cmp1, label %if.then, label %if.end
+
+if.then: ; preds = %entry, %if.then
+ tail call void asm sideeffect ".space 60000", ""() #1, !srcloc !10
+ %2 = load i32* @i, align 4, !tbaa !1
+ %3 = load i32* @j, align 4, !tbaa !1
+ %cmp = icmp sgt i32 %2, %3
+ br i1 %cmp, label %if.then, label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+; lcb: .ent btz
+; lcbn: .ent btz
+; lcb: bteqz $BB{{[0-9]+}}_{{[0-9]+}}
+; lcbn-NOT: bteqz $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+; lcb: btnez $BB{{[0-9]+}}_{{[0-9]+}}
+; lcbn-NOT: btnez $BB{{[0-9]+}}_{{[0-9]+}} # 16 bit inst
+; lcb: .end btz
+; lcbn: .end btz
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5 (gitosis@dmz-portal.mips.com:clang.git ed197d08c90d82e1119774e10920e6f7a841c8ec) (gitosis@dmz-portal.mips.com:llvm.git b9235a363fa2dddb26ac01cbaed58efbc9eff392)"}
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"int", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!5 = metadata !{i32 59}
+!6 = metadata !{i32 156}
+!7 = metadata !{i32 210}
+!8 = metadata !{i32 299}
+!9 = metadata !{i32 340}
+!10 = metadata !{i32 412}
diff --git a/test/CodeGen/Mips/lcb3c.ll b/test/CodeGen/Mips/lcb3c.ll
new file mode 100644
index 000000000000..72a0b8cf5cea
--- /dev/null
+++ b/test/CodeGen/Mips/lcb3c.ll
@@ -0,0 +1,59 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -O0 < %s | FileCheck %s -check-prefix=lcb
+
+@i = global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @s() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 0, i32* @i, align 4
+ call void asm sideeffect ".space 1000", ""() #1, !srcloc !1
+ br label %if.end
+
+if.else: ; preds = %entry
+ store i32 1, i32* @i, align 4
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret i32 0
+; lcb: bnez $2, $BB0_2
+; lcb: b $BB0_1 # 16 bit inst
+; lcb: $BB0_1: # %if.then
+}
+
+; Function Attrs: nounwind
+define i32 @b() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 0, i32* @i, align 4
+ call void asm sideeffect ".space 1000000", ""() #1, !srcloc !2
+ br label %if.end
+
+if.else: ; preds = %entry
+ store i32 1, i32* @i, align 4
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret i32 0
+}
+
+; lcb: beqz $2, $BB1_1 # 16 bit inst
+; lcb: jal $BB1_2 # branch
+; lcb: $BB1_1: # %if.then
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+
+!1 = metadata !{i32 65}
+!2 = metadata !{i32 167}
diff --git a/test/CodeGen/Mips/lcb4a.ll b/test/CodeGen/Mips/lcb4a.ll
new file mode 100644
index 000000000000..e37feca78179
--- /dev/null
+++ b/test/CodeGen/Mips/lcb4a.ll
@@ -0,0 +1,69 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=ci
+
+@i = global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define i32 @foo() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !5
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void asm sideeffect ".space 1004", ""() #1, !srcloc !6
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
+ store i32 %storemerge, i32* @i, align 4, !tbaa !1
+ ret i32 0
+}
+
+; ci: beqz $3, $BB0_2
+; ci: # BB#1: # %if.else
+
+
+; Function Attrs: nounwind optsize
+define i32 @goo() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ tail call void asm sideeffect ".space 1000000", ""() #1, !srcloc !7
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void asm sideeffect ".space 1000004", ""() #1, !srcloc !8
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
+ store i32 %storemerge, i32* @i, align 4, !tbaa !1
+ ret i32 0
+}
+
+; ci: bnez $3, $BB1_1 # 16 bit inst
+; ci: jal $BB1_2 # branch
+; ci: nop
+; ci: $BB1_1: # %if.else
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"int", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!5 = metadata !{i32 58}
+!6 = metadata !{i32 108}
+!7 = metadata !{i32 190}
+!8 = metadata !{i32 243}
diff --git a/test/CodeGen/Mips/lcb5.ll b/test/CodeGen/Mips/lcb5.ll
new file mode 100644
index 000000000000..0a89c804945f
--- /dev/null
+++ b/test/CodeGen/Mips/lcb5.ll
@@ -0,0 +1,240 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=ci
+
+@i = global i32 0, align 4
+@j = common global i32 0, align 4
+@k = common global i32 0, align 4
+
+; Function Attrs: nounwind optsize
+define i32 @x0() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !5
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void asm sideeffect ".space 1004", ""() #1, !srcloc !6
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
+ store i32 %storemerge, i32* @i, align 4, !tbaa !1
+ ret i32 0
+}
+
+; ci: .ent x0
+; ci: beqz $3, $BB0_2
+; ci: $BB0_2:
+; ci: .end x0
+
+; Function Attrs: nounwind optsize
+define i32 @x1() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ tail call void asm sideeffect ".space 1000000", ""() #1, !srcloc !7
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void asm sideeffect ".space 1000004", ""() #1, !srcloc !8
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
+ store i32 %storemerge, i32* @i, align 4, !tbaa !1
+ ret i32 0
+}
+
+; ci: .ent x1
+; ci: bnez $3, $BB1_1 # 16 bit inst
+; ci: jal $BB1_2 # branch
+; ci: nop
+; ci: $BB1_1:
+; ci: .end x1
+
+; Function Attrs: nounwind optsize
+define i32 @y0() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 10, i32* @j, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !9
+ br label %if.end
+
+if.else: ; preds = %entry
+ store i32 55, i32* @j, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 1004", ""() #1, !srcloc !10
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret i32 0
+}
+
+; ci: .ent y0
+; ci: beqz $2, $BB2_2
+; ci: .end y0
+
+; Function Attrs: nounwind optsize
+define i32 @y1() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 10, i32* @j, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 1000000", ""() #1, !srcloc !11
+ br label %if.end
+
+if.else: ; preds = %entry
+ store i32 55, i32* @j, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 1000004", ""() #1, !srcloc !12
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret i32 0
+}
+
+; ci: .ent y1
+; ci: bnez $2, $BB3_1 # 16 bit inst
+; ci: jal $BB3_2 # branch
+; ci: nop
+; ci: $BB3_1:
+; ci: .end y1
+
+; Function Attrs: nounwind optsize
+define void @z0() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %1 = load i32* @j, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, %1
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 1, i32* @k, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !13
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void asm sideeffect ".space 10004", ""() #1, !srcloc !14
+ store i32 2, i32* @k, align 4, !tbaa !1
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void
+}
+
+; ci: .ent z0
+; ci: btnez $BB4_2
+; ci: .end z0
+
+; Function Attrs: nounwind optsize
+define void @z1() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %1 = load i32* @j, align 4, !tbaa !1
+ %cmp = icmp eq i32 %0, %1
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ store i32 1, i32* @k, align 4, !tbaa !1
+ tail call void asm sideeffect ".space 10000000", ""() #1, !srcloc !15
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void asm sideeffect ".space 10000004", ""() #1, !srcloc !16
+ store i32 2, i32* @k, align 4, !tbaa !1
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void
+}
+
+; ci: .ent z1
+; ci: bteqz $BB5_1 # 16 bit inst
+; ci: jal $BB5_2 # branch
+; ci: nop
+; ci: $BB5_1:
+; ci: .end z1
+
+; Function Attrs: nounwind optsize
+define void @z3() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %1 = load i32* @j, align 4, !tbaa !1
+ %cmp1 = icmp sgt i32 %0, %1
+ br i1 %cmp1, label %if.then, label %if.end
+
+if.then: ; preds = %entry, %if.then
+ tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !17
+ %2 = load i32* @i, align 4, !tbaa !1
+ %3 = load i32* @j, align 4, !tbaa !1
+ %cmp = icmp sgt i32 %2, %3
+ br i1 %cmp, label %if.then, label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+; ci: .ent z3
+; ci: bteqz $BB6_2
+; ci: .end z3
+
+; Function Attrs: nounwind optsize
+define void @z4() #0 {
+entry:
+ %0 = load i32* @i, align 4, !tbaa !1
+ %1 = load i32* @j, align 4, !tbaa !1
+ %cmp1 = icmp sgt i32 %0, %1
+ br i1 %cmp1, label %if.then, label %if.end
+
+if.then: ; preds = %entry, %if.then
+ tail call void asm sideeffect ".space 10000000", ""() #1, !srcloc !18
+ %2 = load i32* @i, align 4, !tbaa !1
+ %3 = load i32* @j, align 4, !tbaa !1
+ %cmp = icmp sgt i32 %2, %3
+ br i1 %cmp, label %if.then, label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+; ci: .ent z4
+; ci: btnez $BB7_1 # 16 bit inst
+; ci: jal $BB7_2 # branch
+; ci: nop
+; ci: .align 2
+; ci: $BB7_1:
+; ci: .end z4
+
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"int", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!5 = metadata !{i32 57}
+!6 = metadata !{i32 107}
+!7 = metadata !{i32 188}
+!8 = metadata !{i32 241}
+!9 = metadata !{i32 338}
+!10 = metadata !{i32 391}
+!11 = metadata !{i32 477}
+!12 = metadata !{i32 533}
+!13 = metadata !{i32 621}
+!14 = metadata !{i32 663}
+!15 = metadata !{i32 747}
+!16 = metadata !{i32 792}
+!17 = metadata !{i32 867}
+!18 = metadata !{i32 953}
diff --git a/test/CodeGen/Mips/lit.local.cfg b/test/CodeGen/Mips/lit.local.cfg
index 1fa54b428cd9..a3183a25afaa 100644
--- a/test/CodeGen/Mips/lit.local.cfg
+++ b/test/CodeGen/Mips/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Mips' in targets:
+if not 'Mips' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Mips/llvm-ir/call.ll b/test/CodeGen/Mips/llvm-ir/call.ll
new file mode 100644
index 000000000000..4cbf43cae28e
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/call.ll
@@ -0,0 +1,166 @@
+; Test the 'call' instruction and the tailcall variant.
+
+; FIXME: We should remove the need for -enable-mips-tail-calls
+; RUN: llc -march=mips -mcpu=mips32 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+; RUN: llc -march=mips -mcpu=mips32r2 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+; RUN: llc -march=mips -mcpu=mips32r6 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+; RUN: llc -march=mips64 -mcpu=mips4 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+; RUN: llc -march=mips64 -mcpu=mips64 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+; RUN: llc -march=mips64 -mcpu=mips64r2 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+; RUN: llc -march=mips64 -mcpu=mips64r6 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+
+declare void @extern_void_void()
+declare i32 @extern_i32_void()
+declare float @extern_float_void()
+
+define i32 @call_void_void() {
+; ALL-LABEL: call_void_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_void_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_void_void)($gp)
+
+; ALL: jalr $[[TGT]]
+
+ call void @extern_void_void()
+ ret i32 0
+}
+
+define i32 @call_i32_void() {
+; ALL-LABEL: call_i32_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_i32_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_i32_void)($gp)
+
+; ALL: jalr $[[TGT]]
+
+ %1 = call i32 @extern_i32_void()
+ %2 = add i32 %1, 1
+ ret i32 %2
+}
+
+define float @call_float_void() {
+; ALL-LABEL: call_float_void:
+
+; FIXME: Not sure why we don't use $gp directly on such a simple test. We should
+; look into it at some point.
+; O32: addu $[[GP:[0-9]+]], ${{[0-9]+}}, $25
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_float_void)($[[GP]])
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_float_void)($gp)
+
+; ALL: jalr $[[TGT]]
+
+; O32: move $gp, $[[GP]]
+
+ %1 = call float @extern_float_void()
+ %2 = fadd float %1, 1.0
+ ret float %2
+}
+
+define void @musttail_call_void_void() {
+; ALL-LABEL: musttail_call_void_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_void_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_void_void)($gp)
+
+; NOT-R6: jr $[[TGT]]
+; R6: r6.jr $[[TGT]]
+
+ musttail call void @extern_void_void()
+ ret void
+}
+
+define i32 @musttail_call_i32_void() {
+; ALL-LABEL: musttail_call_i32_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_i32_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_i32_void)($gp)
+
+; NOT-R6: jr $[[TGT]]
+; R6: r6.jr $[[TGT]]
+
+ %1 = musttail call i32 @extern_i32_void()
+ ret i32 %1
+}
+
+define float @musttail_call_float_void() {
+; ALL-LABEL: musttail_call_float_void:
+
+; O32: lw $[[TGT:[0-9]+]], %call16(extern_float_void)($gp)
+
+; N64: ld $[[TGT:[0-9]+]], %call16(extern_float_void)($gp)
+
+; NOT-R6: jr $[[TGT]]
+; R6: r6.jr $[[TGT]]
+
+ %1 = musttail call float @extern_float_void()
+ ret float %1
+}
+
+define i32 @indirect_call_void_void(void ()* %addr) {
+; ALL-LABEL: indirect_call_void_void:
+
+; ALL: move $25, $4
+; ALL: jalr $25
+
+ call void %addr()
+ ret i32 0
+}
+
+define i32 @indirect_call_i32_void(i32 ()* %addr) {
+; ALL-LABEL: indirect_call_i32_void:
+
+; ALL: move $25, $4
+; ALL: jalr $25
+
+ %1 = call i32 %addr()
+ %2 = add i32 %1, 1
+ ret i32 %2
+}
+
+define float @indirect_call_float_void(float ()* %addr) {
+; ALL-LABEL: indirect_call_float_void:
+
+; ALL: move $25, $4
+; ALL: jalr $25
+
+ %1 = call float %addr()
+ %2 = fadd float %1, 1.0
+ ret float %2
+}
+
+; We can't use 'musttail' here because the verifier is too conservative and
+; prohibits any prototype difference.
+define void @tail_indirect_call_void_void(void ()* %addr) {
+; ALL-LABEL: tail_indirect_call_void_void:
+
+; ALL: move $25, $4
+; ALL: jr $25
+
+ tail call void %addr()
+ ret void
+}
+
+define i32 @tail_indirect_call_i32_void(i32 ()* %addr) {
+; ALL-LABEL: tail_indirect_call_i32_void:
+
+; ALL: move $25, $4
+; ALL: jr $25
+
+ %1 = tail call i32 %addr()
+ ret i32 %1
+}
+
+define float @tail_indirect_call_float_void(float ()* %addr) {
+; ALL-LABEL: tail_indirect_call_float_void:
+
+; ALL: move $25, $4
+; ALL: jr $25
+
+ %1 = tail call float %addr()
+ ret float %1
+}
diff --git a/test/CodeGen/Mips/llvm-ir/indirectbr.ll b/test/CodeGen/Mips/llvm-ir/indirectbr.ll
new file mode 100644
index 000000000000..d8fd78774553
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/indirectbr.ll
@@ -0,0 +1,34 @@
+; Test all important variants of the unconditional 'br' instruction.
+
+; RUN: llc -march=mips -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=R6
+; RUN: llc -march=mips64 -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=R6
+
+define i32 @br(i8 *%addr) {
+; ALL-LABEL: br:
+; NOT-R6: jr $4 # <MCInst #{{[0-9]+}} JR
+; R6: jr $4 # <MCInst #{{[0-9]+}} JALR
+
+; ALL: $BB0_1: # %L1
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
+; ALL: addiu $2, $zero, 0
+
+; ALL: $BB0_2: # %L2
+; NOT-R6: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6: jr $ra # <MCInst #{{[0-9]+}} JALR
+; ALL: addiu $2, $zero, 1
+
+entry:
+ indirectbr i8* %addr, [label %L1, label %L2]
+
+L1:
+ ret i32 0
+
+L2:
+ ret i32 1
+}
diff --git a/test/CodeGen/Mips/llvm-ir/ret.ll b/test/CodeGen/Mips/llvm-ir/ret.ll
new file mode 100644
index 000000000000..8f5b1159760c
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/ret.ll
@@ -0,0 +1,205 @@
+; Test all important variants of the 'ret' instruction.
+;
+; For non-void returns it is necessary to have something to return so we also
+; test constant generation here.
+;
+; We'll test pointer returns in a separate file since the relocation model
+; affects it and it's undesirable to repeat the non-pointer returns for each
+; relocation model.
+
+; RUN: llc -march=mips -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=NO-MTHC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=R6
+; RUN: llc -march=mips64 -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=R6
+
+define void @ret_void() {
+; ALL-LABEL: ret_void:
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret void
+}
+
+define i8 @ret_i8() {
+; ALL-LABEL: ret_i8:
+; ALL-DAG: addiu $2, $zero, 3
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i8 3
+}
+
+define i16 @ret_i16_3() {
+; ALL-LABEL: ret_i16_3:
+; ALL-DAG: addiu $2, $zero, 3
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i16 3
+}
+
+define i16 @ret_i16_256() {
+; ALL-LABEL: ret_i16_256:
+; ALL-DAG: addiu $2, $zero, 256
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i16 256
+}
+
+define i16 @ret_i16_257() {
+; ALL-LABEL: ret_i16_257:
+; ALL-DAG: addiu $2, $zero, 257
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i16 257
+}
+
+define i32 @ret_i32_257() {
+; ALL-LABEL: ret_i32_257:
+; ALL-DAG: addiu $2, $zero, 257
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i32 257
+}
+
+define i32 @ret_i32_65536() {
+; ALL-LABEL: ret_i32_65536:
+; ALL-DAG: lui $2, 1
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i32 65536
+}
+
+define i32 @ret_i32_65537() {
+; ALL-LABEL: ret_i32_65537:
+; ALL: lui $[[T0:[0-9]+]], 1
+; ALL-DAG: ori $2, $[[T0]], 1
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i32 65537
+}
+
+define i64 @ret_i64_65537() {
+; ALL-LABEL: ret_i64_65537:
+; ALL: lui $[[T0:[0-9]+]], 1
+
+; GPR32-DAG: ori $3, $[[T0]], 1
+; GPR32-DAG: addiu $2, $zero, 0
+
+; GPR64-DAG: daddiu $2, $[[T0]], 1
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i64 65537
+}
+
+define i64 @ret_i64_281479271677952() {
+; ALL-LABEL: ret_i64_281479271677952:
+; ALL-DAG: lui $[[T0:[0-9]+]], 1
+
+; GPR32-DAG: ori $2, $[[T0]], 1
+; GPR32-DAG: addiu $3, $zero, 0
+
+; GPR64-DAG: daddiu $[[T1:[0-9]+]], $[[T0]], 1
+; GPR64-DAG: dsll $2, $[[T1]], 32
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i64 281479271677952
+}
+
+define i64 @ret_i64_281479271809026() {
+; ALL-LABEL: ret_i64_281479271809026:
+; GPR32-DAG: lui $[[T0:[0-9]+]], 1
+; GPR32-DAG: lui $[[T1:[0-9]+]], 2
+; GPR32-DAG: ori $2, $[[T0]], 1
+; GPR32-DAG: ori $3, $[[T1]], 2
+
+; GPR64-DAG: ori $[[T0:[0-9]+]], $zero, 32769
+; GPR64-DAG: dsll $[[T1:[0-9]+]], $[[T0]], 16
+; GPR64-DAG: daddiu $[[T0:[0-9]+]], $[[T0]], -32767
+; GPR64-DAG: dsll $[[T1:[0-9]+]], $[[T0]], 17
+; GPR64-DAG: daddiu $2, $[[T1]], 2
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret i64 281479271809026
+}
+
+define float @ret_float_0x0() {
+; ALL-LABEL: ret_float_0x0:
+
+; NO-MTHC1-DAG: mtc1 $zero, $f0
+
+; MTHC1-DAG: mtc1 $zero, $f0
+
+; DMTC-DAG: dmtc1 $zero, $f0
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret float 0x0000000000000000
+}
+
+define float @ret_float_0x3() {
+; ALL-LABEL: ret_float_0x3:
+
+; Use a constant pool
+; O32-DAG: lwc1 $f0, %lo($CPI
+; N64-DAG: lwc1 $f0, %got_ofst($CPI
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+; float constants are written as double constants
+ ret float 0x36b8000000000000
+}
+
+define double @ret_double_0x0() {
+; ALL-LABEL: ret_double_0x0:
+
+; NO-MTHC1-DAG: mtc1 $zero, $f0
+; NO-MTHC1-DAG: mtc1 $zero, $f1
+
+; MTHC1-DAG: mtc1 $zero, $f0
+; MTHC1-DAG: mthc1 $zero, $f0
+
+; DMTC-DAG: dmtc1 $zero, $f0
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret double 0x0000000000000000
+}
+
+define double @ret_double_0x3() {
+; ALL-LABEL: ret_double_0x3:
+
+; Use a constant pool
+; O32-DAG: ldc1 $f0, %lo($CPI
+; N64-DAG: ldc1 $f0, %got_ofst($CPI
+
+; NOT-R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JR
+; R6-DAG: jr $ra # <MCInst #{{[0-9]+}} JALR
+
+ ret double 0x0000000000000003
+}
diff --git a/test/CodeGen/Mips/load-store-left-right.ll b/test/CodeGen/Mips/load-store-left-right.ll
index d0928ee26613..a3f5ebfb5460 100644
--- a/test/CodeGen/Mips/load-store-left-right.ll
+++ b/test/CodeGen/Mips/load-store-left-right.ll
@@ -1,29 +1,439 @@
-; RUN: llc -march=mipsel < %s | FileCheck -check-prefix=EL %s
-; RUN: llc -march=mips < %s | FileCheck -check-prefix=EB %s
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32 -check-prefix=MIPS32-EL %s
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32 -check-prefix=MIPS32-EB %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32 -check-prefix=MIPS32-EL %s
+; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32 -check-prefix=MIPS32-EB %s
+; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32R6 -check-prefix=MIPS32R6-EL %s
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32R6 -check-prefix=MIPS32R6-EB %s
+; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
+; RUN: llc -march=mips64 -mcpu=mips4 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
+; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
+; RUN: llc -march=mips64 -mcpu=mips64 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
+; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
+; RUN: llc -march=mips64el -mcpu=mips64r6 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64R6 -check-prefix=MIPS64R6-EL %s
+; RUN: llc -march=mips64 -mcpu=mips64r6 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64R6 -check-prefix=MIPS64R6-EB %s
+%struct.SLL = type { i64 }
%struct.SI = type { i32 }
+%struct.SUI = type { i32 }
+@sll = common global %struct.SLL zeroinitializer, align 1
@si = common global %struct.SI zeroinitializer, align 1
+@sui = common global %struct.SUI zeroinitializer, align 1
-define i32 @foo_load_i() nounwind readonly {
+define i32 @load_SI() nounwind readonly {
entry:
-; EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
-; EL: lwr $[[R0]], 0($[[R1]])
-; EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
-; EB: lwr $[[R0]], 3($[[R1]])
+; ALL-LABEL: load_SI:
+
+; MIPS32-EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS32-EL: lwr $[[R0]], 0($[[R1]])
+
+; MIPS32-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS32-EB: lwr $[[R0]], 3($[[R1]])
+
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(si)(
+; MIPS32R6: lw $2, 0($[[PTR]])
+
+; MIPS64-EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64-EL: lwr $[[R0]], 0($[[R1]])
+
+; MIPS64-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64-EB: lwr $[[R0]], 3($[[R1]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)(
+; MIPS64R6: lw $2, 0($[[PTR]])
%0 = load i32* getelementptr inbounds (%struct.SI* @si, i32 0, i32 0), align 1
ret i32 %0
}
-define void @foo_store_i(i32 %a) nounwind {
+define void @store_SI(i32 %a) nounwind {
entry:
-; EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
-; EL: swr $[[R0]], 0($[[R1]])
-; EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
-; EB: swr $[[R0]], 3($[[R1]])
+; ALL-LABEL: store_SI:
+
+; MIPS32-EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS32-EL: swr $[[R0]], 0($[[R1]])
+
+; MIPS32-EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS32-EB: swr $[[R0]], 3($[[R1]])
+
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(si)(
+; MIPS32R6: sw $4, 0($[[PTR]])
+
+; MIPS64-EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64-EL: swr $[[R0]], 0($[[R1]])
+
+; MIPS64-EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64-EB: swr $[[R0]], 3($[[R1]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)(
+; MIPS64R6: sw $4, 0($[[PTR]])
store i32 %a, i32* getelementptr inbounds (%struct.SI* @si, i32 0, i32 0), align 1
ret void
}
+define i64 @load_SLL() nounwind readonly {
+entry:
+; ALL-LABEL: load_SLL:
+
+; MIPS32-EL: lwl $2, 3($[[R1:[0-9]+]])
+; MIPS32-EL: lwr $2, 0($[[R1]])
+; MIPS32-EL: lwl $3, 7($[[R1:[0-9]+]])
+; MIPS32-EL: lwr $3, 4($[[R1]])
+
+; MIPS32-EB: lwl $2, 0($[[R1:[0-9]+]])
+; MIPS32-EB: lwr $2, 3($[[R1]])
+; MIPS32-EB: lwl $3, 4($[[R1:[0-9]+]])
+; MIPS32-EB: lwr $3, 7($[[R1]])
+
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(sll)(
+; MIPS32R6-DAG: lw $2, 0($[[PTR]])
+; MIPS32R6-DAG: lw $3, 4($[[PTR]])
+
+; MIPS64-EL: ldl $[[R0:[0-9]+]], 7($[[R1:[0-9]+]])
+; MIPS64-EL: ldr $[[R0]], 0($[[R1]])
+
+; MIPS64-EB: ldl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64-EB: ldr $[[R0]], 7($[[R1]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sll)(
+; MIPS64R6: ld $2, 0($[[PTR]])
+
+ %0 = load i64* getelementptr inbounds (%struct.SLL* @sll, i64 0, i32 0), align 1
+ ret i64 %0
+}
+
+define i64 @load_SI_sext_to_i64() nounwind readonly {
+entry:
+; ALL-LABEL: load_SI_sext_to_i64:
+
+; MIPS32-EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS32-EL: lwr $[[R0]], 0($[[R1]])
+
+; MIPS32-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS32-EB: lwr $[[R0]], 3($[[R1]])
+
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(si)(
+; MIPS32R6-EL: lw $2, 0($[[PTR]])
+; MIPS32R6-EL: sra $3, $2, 31
+; MIPS32R6-EB: lw $3, 0($[[PTR]])
+; MIPS32R6-EB: sra $2, $3, 31
+
+; MIPS64-EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64-EL: lwr $[[R0]], 0($[[R1]])
+
+; MIPS64-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64-EB: lwr $[[R0]], 3($[[R1]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)(
+; MIPS64R6: lw $2, 0($[[PTR]])
+
+ %0 = load i32* getelementptr inbounds (%struct.SI* @si, i64 0, i32 0), align 1
+ %conv = sext i32 %0 to i64
+ ret i64 %conv
+}
+
+define i64 @load_UI() nounwind readonly {
+entry:
+; ALL-LABEL: load_UI:
+
+; MIPS32-EL-DAG: lwl $[[R2:2]], 3($[[R1:[0-9]+]])
+; MIPS32-EL-DAG: lwr $[[R2]], 0($[[R1]])
+; MIPS32-EL-DAG: addiu $3, $zero, 0
+
+; MIPS32-EB-DAG: lwl $[[R2:3]], 0($[[R1:[0-9]+]])
+; MIPS32-EB-DAG: lwr $[[R2]], 3($[[R1]])
+; MIPS32-EB-DAG: addiu $2, $zero, 0
+
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(sui)(
+; MIPS32R6-EL-DAG: lw $2, 0($[[PTR]])
+; MIPS32R6-EL-DAG: addiu $3, $zero, 0
+; MIPS32R6-EB-DAG: lw $3, 0($[[PTR]])
+; MIPS32R6-EB-DAG: addiu $2, $zero, 0
+
+; MIPS64-EL-DAG: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64-EL-DAG: lwr $[[R0]], 0($[[R1]])
+; MIPS64-EL-DAG: daddiu $[[R2:[0-9]+]], $zero, 1
+; MIPS64-EL-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 32
+; MIPS64-EL-DAG: daddiu $[[R4:[0-9]+]], $[[R3]], -1
+; MIPS64-EL-DAG: and ${{[0-9]+}}, $[[R0]], $[[R4]]
+
+; MIPS64-EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64-EB: lwr $[[R0]], 3($[[R1]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sui)(
+; MIPS64R6: lwu $2, 0($[[PTR]])
+
+ %0 = load i32* getelementptr inbounds (%struct.SUI* @sui, i64 0, i32 0), align 1
+ %conv = zext i32 %0 to i64
+ ret i64 %conv
+}
+
+define void @store_SLL(i64 %a) nounwind {
+entry:
+; ALL-LABEL: store_SLL:
+
+; MIPS32-EL-DAG: swl $[[A1:4]], 3($[[R1:[0-9]+]])
+; MIPS32-EL-DAG: swr $[[A1]], 0($[[R1]])
+; MIPS32-EL-DAG: swl $[[A2:5]], 7($[[R1:[0-9]+]])
+; MIPS32-EL-DAG: swr $[[A2]], 4($[[R1]])
+
+; MIPS32-EB-DAG: swl $[[A1:4]], 0($[[R1:[0-9]+]])
+; MIPS32-EB-DAG: swr $[[A1]], 3($[[R1]])
+; MIPS32-EB-DAG: swl $[[A1:5]], 4($[[R1:[0-9]+]])
+; MIPS32-EB-DAG: swr $[[A1]], 7($[[R1]])
+
+; MIPS32R6-DAG: lw $[[PTR:[0-9]+]], %got(sll)(
+; MIPS32R6-DAG: sw $4, 0($[[PTR]])
+; MIPS32R6-DAG: sw $5, 4($[[PTR]])
+
+; MIPS64-EL: sdl $[[R0:[0-9]+]], 7($[[R1:[0-9]+]])
+; MIPS64-EL: sdr $[[R0]], 0($[[R1]])
+
+; MIPS64-EB: sdl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64-EB: sdr $[[R0]], 7($[[R1]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sll)(
+; MIPS64R6: sd $4, 0($[[PTR]])
+
+ store i64 %a, i64* getelementptr inbounds (%struct.SLL* @sll, i64 0, i32 0), align 1
+ ret void
+}
+
+define void @store_SI_trunc_from_i64(i32 %a) nounwind {
+entry:
+; ALL-LABEL: store_SI_trunc_from_i64:
+
+; MIPS32-EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS32-EL: swr $[[R0]], 0($[[R1]])
+
+; MIPS32-EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS32-EB: swr $[[R0]], 3($[[R1]])
+
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(si)(
+; MIPS32R6: sw $4, 0($[[PTR]])
+
+; MIPS64-EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
+; MIPS64-EL: swr $[[R0]], 0($[[R1]])
+
+; MIPS64-EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
+; MIPS64-EB: swr $[[R0]], 3($[[R1]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)(
+; MIPS64R6: sw $4, 0($[[PTR]])
+
+ store i32 %a, i32* getelementptr inbounds (%struct.SI* @si, i64 0, i32 0), align 1
+ ret void
+}
+
+;
+; Structures are simply concatenations of the members. They are unaffected by
+; endianness
+;
+
+%struct.S0 = type { i8, i8 }
+@struct_s0 = common global %struct.S0 zeroinitializer, align 1
+%struct.S1 = type { i16, i16 }
+@struct_s1 = common global %struct.S1 zeroinitializer, align 1
+%struct.S2 = type { i32, i32 }
+@struct_s2 = common global %struct.S2 zeroinitializer, align 1
+
+define void @copy_struct_S0() nounwind {
+entry:
+; ALL-LABEL: copy_struct_S0:
+
+; MIPS32-EL: lw $[[PTR:[0-9]+]], %got(struct_s0)(
+; MIPS32-EB: lw $[[PTR:[0-9]+]], %got(struct_s0)(
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(struct_s0)(
+; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)(
+; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)(
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s0)(
+
+; FIXME: We should be able to do better than this on MIPS32r6/MIPS64r6 since
+; we have unaligned halfword load/store available
+; ALL-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
+; ALL-DAG: sb $[[R1]], 2($[[PTR]])
+; ALL-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]])
+; ALL-DAG: sb $[[R1]], 3($[[PTR]])
+
+ %0 = load %struct.S0* getelementptr inbounds (%struct.S0* @struct_s0, i32 0), align 1
+ store %struct.S0 %0, %struct.S0* getelementptr inbounds (%struct.S0* @struct_s0, i32 1), align 1
+ ret void
+}
+
+define void @copy_struct_S1() nounwind {
+entry:
+; ALL-LABEL: copy_struct_S1:
+
+; MIPS32-EL: lw $[[PTR:[0-9]+]], %got(struct_s1)(
+; MIPS32-EB: lw $[[PTR:[0-9]+]], %got(struct_s1)(
+; MIPS32-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32-DAG: sb $[[R1]], 4($[[PTR]])
+; MIPS32-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]])
+; MIPS32-DAG: sb $[[R1]], 5($[[PTR]])
+; MIPS32-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]])
+; MIPS32-DAG: sb $[[R1]], 6($[[PTR]])
+; MIPS32-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS32-DAG: sb $[[R1]], 7($[[PTR]])
+
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(struct_s1)(
+; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32R6-DAG: sh $[[R1]], 4($[[PTR]])
+; MIPS32R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]])
+; MIPS32R6-DAG: sh $[[R1]], 6($[[PTR]])
+
+; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)(
+; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)(
+; MIPS64-DAG: lbu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64-DAG: sb $[[R1]], 4($[[PTR]])
+; MIPS64-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]])
+; MIPS64-DAG: sb $[[R1]], 5($[[PTR]])
+; MIPS64-DAG: lbu $[[R1:[0-9]+]], 2($[[PTR]])
+; MIPS64-DAG: sb $[[R1]], 6($[[PTR]])
+; MIPS64-DAG: lbu $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS64-DAG: sb $[[R1]], 7($[[PTR]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s1)(
+; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64R6-DAG: sh $[[R1]], 4($[[PTR]])
+; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]])
+; MIPS64R6-DAG: sh $[[R1]], 6($[[PTR]])
+
+ %0 = load %struct.S1* getelementptr inbounds (%struct.S1* @struct_s1, i32 0), align 1
+ store %struct.S1 %0, %struct.S1* getelementptr inbounds (%struct.S1* @struct_s1, i32 1), align 1
+ ret void
+}
+
+define void @copy_struct_S2() nounwind {
+entry:
+; ALL-LABEL: copy_struct_S2:
+
+; MIPS32-EL: lw $[[PTR:[0-9]+]], %got(struct_s2)(
+; MIPS32-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS32-EL-DAG: lwr $[[R1]], 0($[[PTR]])
+; MIPS32-EL-DAG: swl $[[R1]], 11($[[PTR]])
+; MIPS32-EL-DAG: swr $[[R1]], 8($[[PTR]])
+; MIPS32-EL-DAG: lwl $[[R1:[0-9]+]], 7($[[PTR]])
+; MIPS32-EL-DAG: lwr $[[R1]], 4($[[PTR]])
+; MIPS32-EL-DAG: swl $[[R1]], 15($[[PTR]])
+; MIPS32-EL-DAG: swr $[[R1]], 12($[[PTR]])
+
+; MIPS32-EB: lw $[[PTR:[0-9]+]], %got(struct_s2)(
+; MIPS32-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32-EB-DAG: lwr $[[R1]], 3($[[PTR]])
+; MIPS32-EB-DAG: swl $[[R1]], 8($[[PTR]])
+; MIPS32-EB-DAG: swr $[[R1]], 11($[[PTR]])
+; MIPS32-EB-DAG: lwl $[[R1:[0-9]+]], 4($[[PTR]])
+; MIPS32-EB-DAG: lwr $[[R1]], 7($[[PTR]])
+; MIPS32-EB-DAG: swl $[[R1]], 12($[[PTR]])
+; MIPS32-EB-DAG: swr $[[R1]], 15($[[PTR]])
+
+; MIPS32R6: lw $[[PTR:[0-9]+]], %got(struct_s2)(
+; MIPS32R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS32R6-DAG: sw $[[R1]], 8($[[PTR]])
+; MIPS32R6-DAG: lw $[[R1:[0-9]+]], 4($[[PTR]])
+; MIPS32R6-DAG: sw $[[R1]], 12($[[PTR]])
+
+; MIPS64-EL: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)(
+; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]])
+; MIPS64-EL-DAG: swl $[[R1]], 11($[[PTR]])
+; MIPS64-EL-DAG: swr $[[R1]], 8($[[PTR]])
+; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 7($[[PTR]])
+; MIPS64-EL-DAG: lwr $[[R1]], 4($[[PTR]])
+; MIPS64-EL-DAG: swl $[[R1]], 15($[[PTR]])
+; MIPS64-EL-DAG: swr $[[R1]], 12($[[PTR]])
+
+; MIPS64-EB: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)(
+; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]])
+; MIPS64-EB-DAG: swl $[[R1]], 8($[[PTR]])
+; MIPS64-EB-DAG: swr $[[R1]], 11($[[PTR]])
+; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 4($[[PTR]])
+; MIPS64-EB-DAG: lwr $[[R1]], 7($[[PTR]])
+; MIPS64-EB-DAG: swl $[[R1]], 12($[[PTR]])
+; MIPS64-EB-DAG: swr $[[R1]], 15($[[PTR]])
+
+; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(struct_s2)(
+; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64R6-DAG: sw $[[R1]], 8($[[PTR]])
+; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 4($[[PTR]])
+; MIPS64R6-DAG: sw $[[R1]], 12($[[PTR]])
+
+ %0 = load %struct.S2* getelementptr inbounds (%struct.S2* @struct_s2, i32 0), align 1
+ store %struct.S2 %0, %struct.S2* getelementptr inbounds (%struct.S2* @struct_s2, i32 1), align 1
+ ret void
+}
+
+;
+; Arrays are simply concatenations of the members. They are unaffected by
+; endianness
+;
+
+@arr = common global [7 x i8] zeroinitializer, align 1
+
+define void @pass_array_byval() nounwind {
+entry:
+; ALL-LABEL: pass_array_byval:
+
+; MIPS32-EL: lw $[[SPTR:[0-9]+]], %got(arr)(
+; MIPS32-EL-DAG: lwl $[[R1:4]], 3($[[PTR]])
+; MIPS32-EL-DAG: lwr $[[R1]], 0($[[PTR]])
+; MIPS32-EL-DAG: lbu $[[R2:[0-9]+]], 4($[[PTR]])
+; MIPS32-EL-DAG: lbu $[[R3:[0-9]+]], 5($[[PTR]])
+; MIPS32-EL-DAG: sll $[[T0:[0-9]+]], $[[R3]], 8
+; MIPS32-EL-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[R2]]
+; MIPS32-EL-DAG: lbu $[[R4:[0-9]+]], 6($[[PTR]])
+; MIPS32-EL-DAG: sll $[[T2:[0-9]+]], $[[R4]], 16
+; MIPS32-EL-DAG: or $5, $[[T1]], $[[T2]]
+
+; MIPS32-EB: lw $[[SPTR:[0-9]+]], %got(arr)(
+; MIPS32-EB-DAG: lwl $[[R1:4]], 0($[[PTR]])
+; MIPS32-EB-DAG: lwr $[[R1]], 3($[[PTR]])
+; MIPS32-EB-DAG: lbu $[[R2:[0-9]+]], 5($[[PTR]])
+; MIPS32-EB-DAG: lbu $[[R3:[0-9]+]], 4($[[PTR]])
+; MIPS32-EB-DAG: sll $[[T0:[0-9]+]], $[[R3]], 8
+; MIPS32-EB-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[R2]]
+; MIPS32-EB-DAG: sll $[[T1]], $[[T1]], 16
+; MIPS32-EB-DAG: lbu $[[R4:[0-9]+]], 6($[[PTR]])
+; MIPS32-EB-DAG: sll $[[T2:[0-9]+]], $[[R4]], 8
+; MIPS32-EB-DAG: or $5, $[[T1]], $[[T2]]
+
+; MIPS32R6: lw $[[SPTR:[0-9]+]], %got(arr)(
+; MIPS32R6-DAG: lw $4, 0($[[PTR]])
+; MIPS32R6-EL-DAG: lhu $[[R2:[0-9]+]], 4($[[PTR]])
+; MIPS32R6-EL-DAG: lbu $[[R3:[0-9]+]], 6($[[PTR]])
+; MIPS32R6-EL-DAG: sll $[[T0:[0-9]+]], $[[R3]], 16
+; MIPS32R6-EL-DAG: or $5, $[[R2]], $[[T0]]
+
+; MIPS32R6-EB-DAG: lhu $[[R2:[0-9]+]], 4($[[PTR]])
+; MIPS32R6-EB-DAG: lbu $[[R3:[0-9]+]], 6($[[PTR]])
+; MIPS32R6-EB-DAG: sll $[[T0:[0-9]+]], $[[R2]], 16
+; MIPS32R6-EB-DAG: or $5, $[[T0]], $[[R3]]
+
+; MIPS64-EL: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
+; MIPS64-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[PTR]])
+; MIPS64-EL-DAG: lwr $[[R1]], 0($[[PTR]])
+
+; MIPS64-EB: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
+; MIPS64-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[PTR]])
+; MIPS64-EB-DAG: lwr $[[R1]], 3($[[PTR]])
+; MIPS64-EB-DAG: dsll $[[R1]], $[[R1]], 32
+; MIPS64-EB-DAG: lbu $[[R2:[0-9]+]], 5($[[PTR]])
+; MIPS64-EB-DAG: lbu $[[R3:[0-9]+]], 4($[[PTR]])
+; MIPS64-EB-DAG: dsll $[[T0:[0-9]+]], $[[R3]], 8
+; MIPS64-EB-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[R2]]
+; MIPS64-EB-DAG: dsll $[[T1]], $[[T1]], 16
+; MIPS64-EB-DAG: or $[[T3:[0-9]+]], $[[R1]], $[[T1]]
+; MIPS64-EB-DAG: lbu $[[R4:[0-9]+]], 6($[[PTR]])
+; MIPS64-EB-DAG: dsll $[[T4:[0-9]+]], $[[R4]], 8
+; MIPS64-EB-DAG: or $4, $[[T3]], $[[T4]]
+
+; MIPS64R6: ld $[[SPTR:[0-9]+]], %got_disp(arr)(
+
+ tail call void @extern_func([7 x i8]* byval @arr) nounwind
+ ret void
+}
+
+declare void @extern_func([7 x i8]* byval)
diff --git a/test/CodeGen/Mips/longbranch.ll b/test/CodeGen/Mips/longbranch.ll
index af192d0e9217..a403744c8fd5 100644
--- a/test/CodeGen/Mips/longbranch.ll
+++ b/test/CodeGen/Mips/longbranch.ll
@@ -1,35 +1,163 @@
-; RUN: llc -march=mipsel -force-mips-long-branch -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=O32
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 -force-mips-long-branch -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=N64
+; RUN: llc -march=mipsel < %s | FileCheck %s
+; RUN: llc -march=mipsel -force-mips-long-branch -O3 < %s \
+; RUN: | FileCheck %s -check-prefix=O32
+; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 -force-mips-long-branch -O3 \
+; RUN: < %s | FileCheck %s -check-prefix=N64
+; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 -force-mips-long-branch -O3 \
+; RUN: < %s | FileCheck %s -check-prefix=N64
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=micromips \
+; RUN: -force-mips-long-branch -O3 < %s | FileCheck %s -check-prefix=MICROMIPS
+; RUN: llc -mtriple=mipsel-none-nacl -force-mips-long-branch -O3 < %s \
+; RUN: | FileCheck %s -check-prefix=NACL
-@g0 = external global i32
-define void @foo1(i32 %s) nounwind {
+@x = external global i32
+
+define void @test1(i32 %s) {
entry:
-; O32: nop
-; O32: addiu $sp, $sp, -8
-; O32: bal
-; O32: lui $1, 0
-; O32: addiu $1, $1, {{[0-9]+}}
-; N64: nop
-; N64: daddiu $sp, $sp, -16
-; N64: lui $1, 0
-; N64: daddiu $1, $1, 0
-; N64: dsll $1, $1, 16
-; N64: daddiu $1, $1, 0
-; N64: bal
-; N64: dsll $1, $1, 16
-; N64: daddiu $1, $1, {{[0-9]+}}
-
- %tobool = icmp eq i32 %s, 0
- br i1 %tobool, label %if.end, label %if.then
-
-if.then: ; preds = %entry
- %0 = load i32* @g0, align 4
- %add = add nsw i32 %0, 12
- store i32 %add, i32* @g0, align 4
- br label %if.end
-
-if.end: ; preds = %entry, %if.then
+ %cmp = icmp eq i32 %s, 0
+ br i1 %cmp, label %end, label %then
+
+then:
+ store i32 1, i32* @x, align 4
+ br label %end
+
+end:
ret void
-}
+
+; First check the normal version (without long branch). beqz jumps to return,
+; and fallthrough block stores 1 to global variable.
+
+; CHECK: lui $[[R0:[0-9]+]], %hi(_gp_disp)
+; CHECK: addiu $[[R0]], $[[R0]], %lo(_gp_disp)
+; CHECK: beqz $4, $[[BB0:BB[0-9_]+]]
+; CHECK: addu $[[GP:[0-9]+]], $[[R0]], $25
+; CHECK: lw $[[R1:[0-9]+]], %got(x)($[[GP]])
+; CHECK: addiu $[[R2:[0-9]+]], $zero, 1
+; CHECK: sw $[[R2]], 0($[[R1]])
+; CHECK: $[[BB0]]:
+; CHECK: jr $ra
+; CHECK: nop
+
+
+; Check the MIPS32 version. Check that branch logic is inverted, so that the
+; target of the new branch (bnez) is the fallthrough block of the original
+; branch. Check that fallthrough block of the new branch contains long branch
+; expansion which at the end indirectly jumps to the target of the original
+; branch.
+
+; O32: lui $[[R0:[0-9]+]], %hi(_gp_disp)
+; O32: addiu $[[R0]], $[[R0]], %lo(_gp_disp)
+; O32: bnez $4, $[[BB0:BB[0-9_]+]]
+; O32: addu $[[GP:[0-9]+]], $[[R0]], $25
+
+; Check for long branch expansion:
+; O32: addiu $sp, $sp, -8
+; O32-NEXT: sw $ra, 0($sp)
+; O32-NEXT: lui $1, %hi(($[[BB2:BB[0-9_]+]])-($[[BB1:BB[0-9_]+]]))
+; O32-NEXT: bal $[[BB1]]
+; O32-NEXT: addiu $1, $1, %lo(($[[BB2]])-($[[BB1]]))
+; O32-NEXT: $[[BB1]]:
+; O32-NEXT: addu $1, $ra, $1
+; O32-NEXT: lw $ra, 0($sp)
+; O32-NEXT: jr $1
+; O32-NEXT: addiu $sp, $sp, 8
+
+; O32: $[[BB0]]:
+; O32: lw $[[R1:[0-9]+]], %got(x)($[[GP]])
+; O32: addiu $[[R2:[0-9]+]], $zero, 1
+; O32: sw $[[R2]], 0($[[R1]])
+; O32: $[[BB2]]:
+; O32: jr $ra
+; O32: nop
+
+
+; Check the MIPS64 version.
+
+; N64: lui $[[R0:[0-9]+]], %hi(%neg(%gp_rel(test1)))
+; N64: bnez $4, $[[BB0:BB[0-9_]+]]
+; N64: daddu $[[R1:[0-9]+]], $[[R0]], $25
+
+; Check for long branch expansion:
+; N64: daddiu $sp, $sp, -16
+; N64-NEXT: sd $ra, 0($sp)
+; N64-NEXT: daddiu $1, $zero, %hi(($[[BB2:BB[0-9_]+]])-($[[BB1:BB[0-9_]+]]))
+; N64-NEXT: dsll $1, $1, 16
+; N64-NEXT: bal $[[BB1]]
+; N64-NEXT: daddiu $1, $1, %lo(($[[BB2]])-($[[BB1]]))
+; N64-NEXT: $[[BB1]]:
+; N64-NEXT: daddu $1, $ra, $1
+; N64-NEXT: ld $ra, 0($sp)
+; N64-NEXT: jr $1
+; N64-NEXT: daddiu $sp, $sp, 16
+
+; N64: $[[BB0]]:
+; N64: daddiu $[[GP:[0-9]+]], $[[R1]], %lo(%neg(%gp_rel(test1)))
+; N64: ld $[[R2:[0-9]+]], %got_disp(x)($[[GP]])
+; N64: addiu $[[R3:[0-9]+]], $zero, 1
+; N64: sw $[[R3]], 0($[[R2]])
+; N64: $[[BB2]]:
+; N64: jr $ra
+; N64: nop
+
+
+; Check the microMIPS version.
+
+; MICROMIPS: lui $[[R0:[0-9]+]], %hi(_gp_disp)
+; MICROMIPS: addiu $[[R0]], $[[R0]], %lo(_gp_disp)
+; MICROMIPS: bnez $4, $[[BB0:BB[0-9_]+]]
+; MICROMIPS: addu $[[GP:[0-9]+]], $[[R0]], $25
+
+; Check for long branch expansion:
+; MICROMIPS: addiu $sp, $sp, -8
+; MICROMIPS-NEXT: sw $ra, 0($sp)
+; MICROMIPS-NEXT: lui $1, %hi(($[[BB2:BB[0-9_]+]])-($[[BB1:BB[0-9_]+]]))
+; MICROMIPS-NEXT: bal $[[BB1]]
+; MICROMIPS-NEXT: addiu $1, $1, %lo(($[[BB2]])-($[[BB1]]))
+; MICROMIPS-NEXT: $[[BB1]]:
+; MICROMIPS-NEXT: addu $1, $ra, $1
+; MICROMIPS-NEXT: lw $ra, 0($sp)
+; MICROMIPS-NEXT: jr $1
+; MICROMIPS-NEXT: addiu $sp, $sp, 8
+
+; MICROMIPS: $[[BB0]]:
+; MICROMIPS: lw $[[R1:[0-9]+]], %got(x)($[[GP]])
+; MICROMIPS: addiu $[[R2:[0-9]+]], $zero, 1
+; MICROMIPS: sw $[[R2]], 0($[[R1]])
+; MICROMIPS: $[[BB2]]:
+; MICROMIPS: jr $ra
+; MICROMIPS: nop
+
+
+; Check the NaCl version. Check that sp change is not in the branch delay slot
+; of "jr $1" instruction. Check that target of indirect branch "jr $1" is
+; bundle aligned.
+
+; NACL: lui $[[R0:[0-9]+]], %hi(_gp_disp)
+; NACL: addiu $[[R0]], $[[R0]], %lo(_gp_disp)
+; NACL: bnez $4, $[[BB0:BB[0-9_]+]]
+; NACL: addu $[[GP:[0-9]+]], $[[R0]], $25
+
+; Check for long branch expansion:
+; NACL: addiu $sp, $sp, -8
+; NACL-NEXT: sw $ra, 0($sp)
+; NACL-NEXT: lui $1, %hi(($[[BB2:BB[0-9_]+]])-($[[BB1:BB[0-9_]+]]))
+; NACL-NEXT: bal $[[BB1]]
+; NACL-NEXT: addiu $1, $1, %lo(($[[BB2]])-($[[BB1]]))
+; NACL-NEXT: $[[BB1]]:
+; NACL-NEXT: addu $1, $ra, $1
+; NACL-NEXT: lw $ra, 0($sp)
+; NACL-NEXT: addiu $sp, $sp, 8
+; NACL-NEXT: jr $1
+; NACL-NEXT: nop
+
+; NACL: $[[BB0]]:
+; NACL: lw $[[R1:[0-9]+]], %got(x)($[[GP]])
+; NACL: addiu $[[R2:[0-9]+]], $zero, 1
+; NACL: sw $[[R2]], 0($[[R1]])
+; NACL: .align 4
+; NACL-NEXT: $[[BB2]]:
+; NACL: jr $ra
+; NACL: nop
+}
diff --git a/test/CodeGen/Mips/madd-msub.ll b/test/CodeGen/Mips/madd-msub.ll
index 0dbb2c27b8f9..82229677ff11 100644
--- a/test/CodeGen/Mips/madd-msub.ll
+++ b/test/CodeGen/Mips/madd-msub.ll
@@ -1,9 +1,49 @@
-; RUN: llc -march=mips < %s | FileCheck %s -check-prefix=32
-; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s -check-prefix=DSP
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32
+; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=32R6
+; RUN: llc -march=mips -mcpu=mips32 -mattr=dsp < %s | FileCheck %s -check-prefix=DSP
+; RUN: llc -march=mips -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64
+; RUN: llc -march=mips -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64
+; RUN: llc -march=mips -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64R6
+
+; FIXME: The MIPS16 test should check its output
; RUN: llc -march=mips -mcpu=mips16 < %s
-; 32: madd ${{[0-9]+}}
-; DSP: madd $ac
+; ALL-LABEL: madd1:
+
+; 32-DAG: sra $[[T0:[0-9]+]], $6, 31
+; 32-DAG: mtlo $6
+; 32-DAG: [[m:m]]add ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: sra $[[T0:[0-9]+]], $6, 31
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: madd $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
+; 32R6-DAG: sra $[[T3:[0-9]+]], $6, 31
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
+
+; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64-DAG: d[[m:m]]ult $[[T1]], $[[T0]]
+; 64-DAG: [[m]]flo $[[T2:[0-9]+]]
+; 64-DAG: sll $[[T3:[0-9]+]], $6, 0
+; 64-DAG: daddu $2, $[[T2]], $[[T3]]
+
+; 64R6-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64R6-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64R6-DAG: dmul $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 64R6-DAG: sll $[[T3:[0-9]+]], $6, 0
+; 64R6-DAG: daddu $2, $[[T2]], $[[T3]]
+
define i64 @madd1(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
@@ -14,8 +54,47 @@ entry:
ret i64 %add
}
-; 32: maddu ${{[0-9]+}}
-; DSP: maddu $ac
+; ALL-LABEL: madd2:
+
+; FIXME: We don't really need this instruction
+; 32-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; 32-DAG: mtlo $6
+; 32-DAG: [[m:m]]addu ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: maddu $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
+; FIXME: There's a redundant move here. We should remove it
+; 32R6-DAG: muhu $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T3]], $[[T2]]
+
+; 64-DAG: dsll $[[T0:[0-9]+]], $4, 32
+; 64-DAG: dsrl $[[T1:[0-9]+]], $[[T0]], 32
+; 64-DAG: dsll $[[T2:[0-9]+]], $5, 32
+; 64-DAG: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; 64-DAG: d[[m:m]]ult $[[T3]], $[[T1]]
+; 64-DAG: [[m]]flo $[[T4:[0-9]+]]
+; 64-DAG: dsll $[[T5:[0-9]+]], $6, 32
+; 64-DAG: dsrl $[[T6:[0-9]+]], $[[T5]], 32
+; 64-DAG: daddu $2, $[[T4]], $[[T6]]
+
+; 64R6-DAG: dsll $[[T0:[0-9]+]], $4, 32
+; 64R6-DAG: dsrl $[[T1:[0-9]+]], $[[T0]], 32
+; 64R6-DAG: dsll $[[T2:[0-9]+]], $5, 32
+; 64R6-DAG: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; 64R6-DAG: dmul $[[T4:[0-9]+]], $[[T3]], $[[T1]]
+; 64R6-DAG: dsll $[[T5:[0-9]+]], $6, 32
+; 64R6-DAG: dsrl $[[T6:[0-9]+]], $[[T5]], 32
+; 64R6-DAG: daddu $2, $[[T4]], $[[T6]]
+
define i64 @madd2(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = zext i32 %a to i64
@@ -26,8 +105,38 @@ entry:
ret i64 %add
}
-; 32: madd ${{[0-9]+}}
-; DSP: madd $ac
+; ALL-LABEL: madd3:
+
+; 32-DAG: mthi $6
+; 32-DAG: mtlo $7
+; 32-DAG: [[m:m]]add ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: mthi $[[AC:ac[0-3]+]], $6
+; DSP-DAG: mtlo $[[AC]], $7
+; DSP-DAG: madd $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $7
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $7
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $6
+; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
+
+; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64-DAG: d[[m:m]]ult $[[T1]], $[[T0]]
+; 64-DAG: [[m]]flo $[[T2:[0-9]+]]
+; 64-DAG: daddu $2, $[[T2]], $6
+
+; 64R6-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64R6-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64R6-DAG: dmul $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 64R6-DAG: daddu $2, $[[T2]], $6
+
define i64 @madd3(i32 %a, i32 %b, i64 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
@@ -37,8 +146,41 @@ entry:
ret i64 %add
}
-; 32: msub ${{[0-9]+}}
-; DSP: msub $ac
+; ALL-LABEL: msub1:
+
+; 32-DAG: sra $[[T0:[0-9]+]], $6, 31
+; 32-DAG: mtlo $6
+; 32-DAG: [[m:m]]sub ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: sra $[[T0:[0-9]+]], $6, 31
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: msub $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T3:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T3]], $[[T0]]
+; 32R6-DAG: sra $[[T5:[0-9]+]], $6, 31
+; 32R6-DAG: subu $2, $[[T5]], $[[T4]]
+; 32R6-DAG: subu $3, $6, $[[T1]]
+
+; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64-DAG: d[[m:m]]ult $[[T1]], $[[T0]]
+; 64-DAG: [[m]]flo $[[T2:[0-9]+]]
+; 64-DAG: sll $[[T3:[0-9]+]], $6, 0
+; 64-DAG: dsubu $2, $[[T3]], $[[T2]]
+
+; 64R6-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64R6-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64R6-DAG: dmul $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 64R6-DAG: sll $[[T3:[0-9]+]], $6, 0
+; 64R6-DAG: dsubu $2, $[[T3]], $[[T2]]
+
define i64 @msub1(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = sext i32 %c to i64
@@ -49,8 +191,48 @@ entry:
ret i64 %sub
}
-; 32: msubu ${{[0-9]+}}
-; DSP: msubu $ac
+; ALL-LABEL: msub2:
+
+; FIXME: We don't really need this instruction
+; 32-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; 32-DAG: mtlo $6
+; 32-DAG: [[m:m]]subu ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: msubu $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: muhu $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG: negu $2, $[[T3]]
+; 32R6-DAG: subu $3, $6, $[[T1]]
+
+; 64-DAG: dsll $[[T0:[0-9]+]], $4, 32
+; 64-DAG: dsrl $[[T1:[0-9]+]], $[[T0]], 32
+; 64-DAG: dsll $[[T2:[0-9]+]], $5, 32
+; 64-DAG: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; 64-DAG: d[[m:m]]ult $[[T3]], $[[T1]]
+; 64-DAG: [[m]]flo $[[T4:[0-9]+]]
+; 64-DAG: dsll $[[T5:[0-9]+]], $6, 32
+; 64-DAG: dsrl $[[T6:[0-9]+]], $[[T5]], 32
+; 64-DAG: dsubu $2, $[[T6]], $[[T4]]
+
+; 64R6-DAG: dsll $[[T0:[0-9]+]], $4, 32
+; 64R6-DAG: dsrl $[[T1:[0-9]+]], $[[T0]], 32
+; 64R6-DAG: dsll $[[T2:[0-9]+]], $5, 32
+; 64R6-DAG: dsrl $[[T3:[0-9]+]], $[[T2]], 32
+; 64R6-DAG: dmul $[[T4:[0-9]+]], $[[T3]], $[[T1]]
+; 64R6-DAG: dsll $[[T5:[0-9]+]], $6, 32
+; 64R6-DAG: dsrl $[[T6:[0-9]+]], $[[T5]], 32
+; 64R6-DAG: dsubu $2, $[[T6]], $[[T4]]
+
define i64 @msub2(i32 %a, i32 %b, i32 %c) nounwind readnone {
entry:
%conv = zext i32 %c to i64
@@ -61,8 +243,39 @@ entry:
ret i64 %sub
}
-; 32: msub ${{[0-9]+}}
-; DSP: msub $ac
+; ALL-LABEL: msub3:
+
+; FIXME: We don't really need this instruction
+; 32-DAG: mthi $6
+; 32-DAG: mtlo $7
+; 32-DAG: [[m:m]]sub ${{[45]}}, ${{[45]}}
+; 32-DAG: [[m]]fhi $2
+; 32-DAG: [[m]]flo $3
+
+; DSP-DAG: addiu $[[T0:[0-9]+]], $zero, 0
+; DSP-DAG: mtlo $[[AC:ac[0-3]+]], $6
+; DSP-DAG: msub $[[AC]], ${{[45]}}, ${{[45]}}
+; DSP-DAG: mfhi $2, $[[AC]]
+; DSP-DAG: mflo $3, $[[AC]]
+
+; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $7, $[[T1]]
+; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG: subu $2, $6, $[[T3]]
+; 32R6-DAG: subu $3, $7, $[[T1]]
+
+; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64-DAG: d[[m:m]]ult $[[T1]], $[[T0]]
+; 64-DAG: [[m]]flo $[[T2:[0-9]+]]
+; 64-DAG: dsubu $2, $6, $[[T2]]
+
+; 64R6-DAG: sll $[[T0:[0-9]+]], $4, 0
+; 64R6-DAG: sll $[[T1:[0-9]+]], $5, 0
+; 64R6-DAG: dmul $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 64R6-DAG: dsubu $2, $6, $[[T2]]
+
define i64 @msub3(i32 %a, i32 %b, i64 %c) nounwind readnone {
entry:
%conv = sext i32 %a to i64
diff --git a/test/CodeGen/Mips/mature-mc-support.ll b/test/CodeGen/Mips/mature-mc-support.ll
new file mode 100644
index 000000000000..6e5998d8a7cb
--- /dev/null
+++ b/test/CodeGen/Mips/mature-mc-support.ll
@@ -0,0 +1,32 @@
+; Test that inline assembly is parsed by the MC layer when MC support is mature
+; (even when the output is assembly).
+; FIXME: Mips doesn't use the integrated assembler by default so we only test
+; that -filetype=obj tries to parse the assembly.
+
+; SKIP: not llc -march=mips < %s > /dev/null 2> %t1
+; SKIP: FileCheck %s < %t1
+
+; RUN: not llc -march=mips -filetype=obj < %s > /dev/null 2> %t2
+; RUN: FileCheck %s < %t2
+
+; SKIP: not llc -march=mipsel < %s > /dev/null 2> %t3
+; SKIP: FileCheck %s < %t3
+
+; RUN: not llc -march=mipsel -filetype=obj < %s > /dev/null 2> %t4
+; RUN: FileCheck %s < %t4
+
+; SKIP: not llc -march=mips64 < %s > /dev/null 2> %t5
+; SKIP: FileCheck %s < %t5
+
+; RUN: not llc -march=mips64 -filetype=obj < %s > /dev/null 2> %t6
+; RUN: FileCheck %s < %t6
+
+; SKIP: not llc -march=mips64el < %s > /dev/null 2> %t7
+; SKIP: FileCheck %s < %t7
+
+; RUN: not llc -march=mips64el -filetype=obj < %s > /dev/null 2> %t8
+; RUN: FileCheck %s < %t8
+
+module asm " .this_directive_is_very_unlikely_to_exist"
+
+; CHECK: LLVM ERROR: Error parsing inline asm
diff --git a/test/CodeGen/Mips/mbrsize4a.ll b/test/CodeGen/Mips/mbrsize4a.ll
new file mode 100644
index 000000000000..c80299166ab4
--- /dev/null
+++ b/test/CodeGen/Mips/mbrsize4a.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static -mips16-constant-islands < %s | FileCheck %s -check-prefix=jal16
+
+@j = global i32 10, align 4
+@.str = private unnamed_addr constant [11 x i8] c"at bottom\0A\00", align 1
+@i = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ br label %z
+
+z: ; preds = %y, %entry
+ %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
+ call void asm sideeffect ".space 10000000", ""() #2, !srcloc !1
+ br label %y
+
+y: ; preds = %z
+ %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0))
+ br label %z
+
+return: ; No predecessors!
+ %0 = load i32* %retval
+ ret i32 %0
+; jal16: jal $BB{{[0-9]+}}_{{[0-9]+}}
+}
+
+declare i32 @foo(...) #1
+
+declare i32 @printf(i8*, ...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
+
+!1 = metadata !{i32 68}
diff --git a/test/CodeGen/Mips/micromips-atomic.ll b/test/CodeGen/Mips/micromips-atomic.ll
new file mode 100644
index 000000000000..a50e0b7850c3
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-atomic.ll
@@ -0,0 +1,18 @@
+; RUN: llc %s -march=mipsel -mcpu=mips32r2 -mattr=micromips -filetype=asm \
+; RUN: -relocation-model=pic -o - | FileCheck %s
+
+@x = common global i32 0, align 4
+
+define i32 @AtomicLoadAdd32(i32 %incr) nounwind {
+entry:
+ %0 = atomicrmw add i32* @x, i32 %incr monotonic
+ ret i32 %0
+
+; CHECK-LABEL: AtomicLoadAdd32:
+; CHECK: lw $[[R0:[0-9]+]], %got(x)
+; CHECK: $[[BB0:[A-Z_0-9]+]]:
+; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
+; CHECK: addu $[[R2:[0-9]+]], $[[R1]], $4
+; CHECK: sc $[[R2]], 0($[[R0]])
+; CHECK: beqz $[[R2]], $[[BB0]]
+}
diff --git a/test/CodeGen/Mips/micromips-directives.ll b/test/CodeGen/Mips/micromips-directives.ll
new file mode 100644
index 000000000000..dd0bd5836d28
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-directives.ll
@@ -0,0 +1,16 @@
+; This test checks if the '.set [no]micromips' directives
+; are emitted before a function's entry label.
+
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+micromips %s -o - | \
+; RUN: FileCheck %s -check-prefix=CHECK-MM
+; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=-micromips %s -o - | \
+; RUN: FileCheck %s -check-prefix=CHECK-NO-MM
+
+define i32 @main() nounwind {
+entry:
+ ret i32 0
+}
+
+; CHECK-MM: .set micromips
+; CHECK-NO-MM: .set nomicromips
+; CHECK: main:
diff --git a/test/CodeGen/Mips/micromips-jal.ll b/test/CodeGen/Mips/micromips-jal.ll
new file mode 100644
index 000000000000..fccc22919728
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-jal.ll
@@ -0,0 +1,48 @@
+; RUN: llc %s -march=mipsel -mcpu=mips32r2 -mattr=micromips -filetype=asm \
+; RUN: -relocation-model=static -o - | FileCheck %s
+
+define i32 @sum(i32 %a, i32 %b) nounwind uwtable {
+entry:
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 %b, i32* %b.addr, align 4
+ %0 = load i32* %a.addr, align 4
+ %1 = load i32* %b.addr, align 4
+ %add = add nsw i32 %0, %1
+ ret i32 %add
+}
+
+define i32 @main() nounwind uwtable {
+entry:
+ %retval = alloca i32, align 4
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* %y, align 4
+ %1 = load i32* %z, align 4
+ %call = call i32 @sum(i32 %0, i32 %1)
+ store i32 %call, i32* %x, align 4
+ %2 = load i32* %x, align 4
+ ret i32 %2
+}
+
+; CHECK: .text
+
+; CHECK: .globl sum
+; CHECK: .type sum,@function
+; CHECK: .set micromips
+; CHECK: .ent sum
+; CHECK-LABEL: sum:
+; CHECK: .end sum
+
+; CHECK: .globl main
+; CHECK: .type main,@function
+; CHECK: .set micromips
+; CHECK: .ent main
+; CHECK-LABEL: main:
+
+; CHECK: jal sum
+
+; CHECK: .end main
diff --git a/test/CodeGen/Mips/micromips-load-effective-address.ll b/test/CodeGen/Mips/micromips-load-effective-address.ll
new file mode 100644
index 000000000000..afba760f0e62
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-load-effective-address.ll
@@ -0,0 +1,29 @@
+; RUN: llc %s -march=mipsel -mattr=micromips -filetype=asm \
+; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
+
+define i32 @sum(i32* %x, i32* %y) nounwind uwtable {
+entry:
+ %x.addr = alloca i32*, align 8
+ %y.addr = alloca i32*, align 8
+ store i32* %x, i32** %x.addr, align 8
+ store i32* %y, i32** %y.addr, align 8
+ %0 = load i32** %x.addr, align 8
+ %1 = load i32* %0, align 4
+ %2 = load i32** %y.addr, align 8
+ %3 = load i32* %2, align 4
+ %add = add nsw i32 %1, %3
+ ret i32 %add
+}
+
+define i32 @main() nounwind uwtable {
+entry:
+ %retval = alloca i32, align 4
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ store i32 0, i32* %retval
+ %call = call i32 @sum(i32* %x, i32* %y)
+ ret i32 %call
+}
+
+; CHECK: addiu ${{[0-9]+}}, $sp, {{[0-9]+}}
+; CHECK: addiu ${{[0-9]+}}, $sp, {{[0-9]+}}
diff --git a/test/CodeGen/Mips/mips16-hf-attr.ll b/test/CodeGen/Mips/mips16-hf-attr.ll
new file mode 100644
index 000000000000..d9ad6295bef8
--- /dev/null
+++ b/test/CodeGen/Mips/mips16-hf-attr.ll
@@ -0,0 +1,45 @@
+; Check that stubs generation for mips16 hard-float mode does not depend
+; on the function 'use-soft-float' attribute's value.
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel \
+; RUN: -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s
+
+define void @bar_sf() #0 {
+; CHECK: bar_sf:
+entry:
+ %call1 = call float @foo(float 1.000000e+00)
+; CHECK: lw $2, %call16(foo)($3)
+; CHECK: lw $5, %got(__mips16_call_stub_sf_1)($3)
+ ret void
+}
+
+define void @bar_hf() #1 {
+; CHECK: bar_hf:
+entry:
+ %call1 = call float @foo(float 1.000000e+00)
+; CHECK: lw $2, %call16(foo)($3)
+; CHECK: lw $5, %got(__mips16_call_stub_sf_1)($3)
+ ret void
+}
+
+declare float @foo(float) #2
+
+attributes #0 = {
+ nounwind
+ "less-precise-fpmad"="false" "no-frame-pointer-elim"="true"
+ "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false"
+ "no-nans-fp-math"="false" "stack-protector-buffer-size"="8"
+ "unsafe-fp-math"="false" "use-soft-float"="false"
+}
+attributes #1 = {
+ nounwind
+ "less-precise-fpmad"="false" "no-frame-pointer-elim"="true"
+ "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false"
+ "no-nans-fp-math"="false" "stack-protector-buffer-size"="8"
+ "unsafe-fp-math"="false" "use-soft-float"="true"
+}
+attributes #2 = {
+ "less-precise-fpmad"="false" "no-frame-pointer-elim"="true"
+ "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false"
+ "no-nans-fp-math"="false" "stack-protector-buffer-size"="8"
+ "unsafe-fp-math"="false" "use-soft-float"="true"
+}
diff --git a/test/CodeGen/Mips/mips16_32_1.ll b/test/CodeGen/Mips/mips16_32_1.ll
index e156641d4e50..f6096b402f2d 100644
--- a/test/CodeGen/Mips/mips16_32_1.ll
+++ b/test/CodeGen/Mips/mips16_32_1.ll
@@ -6,9 +6,8 @@ entry:
ret void
}
-; CHECK: .set mips16 # @foo
+; CHECK: .set mips16
; CHECK: .ent foo
-; CHECK: save {{.+}}
-; CHECK: restore {{.+}}
+; CHECK: jrc $ra
; CHECK: .end foo
attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_10.ll b/test/CodeGen/Mips/mips16_32_10.ll
index 7c017b8e4b75..ff9831ed7622 100644
--- a/test/CodeGen/Mips/mips16_32_10.ll
+++ b/test/CodeGen/Mips/mips16_32_10.ll
@@ -4,7 +4,7 @@ define void @foo() #0 {
entry:
ret void
}
-; 16: .set nomips16 # @foo
+; 16: .set nomips16
; 16: .ent foo
; 16: .set noreorder
; 16: .set nomacro
@@ -21,11 +21,10 @@ entry:
ret void
}
-; 16: .set mips16 # @nofoo
+; 16: .set mips16
; 16: .ent nofoo
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end nofoo
define i32 @main() #2 {
@@ -33,7 +32,7 @@ entry:
ret i32 0
}
-; 16: .set nomips16 # @main
+; 16: .set nomips16
; 16: .ent main
; 16: .set noreorder
; 16: .set nomacro
diff --git a/test/CodeGen/Mips/mips16_32_3.ll b/test/CodeGen/Mips/mips16_32_3.ll
index dd94ec1ce80a..c5a29a0b8fdb 100644
--- a/test/CodeGen/Mips/mips16_32_3.ll
+++ b/test/CodeGen/Mips/mips16_32_3.ll
@@ -6,22 +6,20 @@ entry:
ret void
}
-; 16: .set mips16 # @foo
+; 16: .set mips16
; 16: .ent foo
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end foo
-; 32: .set mips16 # @foo
+; 32: .set mips16
; 32: .ent foo
-; 32: save {{.+}}
-; 32: restore {{.+}}
+; 32: jrc $ra
; 32: .end foo
define void @nofoo() #1 {
entry:
ret void
}
-; 16: .set nomips16 # @nofoo
+; 16: .set nomips16
; 16: .ent nofoo
; 16: .set noreorder
; 16: .set nomacro
@@ -32,7 +30,7 @@ entry:
; 16: .set macro
; 16: .set reorder
; 16: .end nofoo
-; 32: .set nomips16 # @nofoo
+; 32: .set nomips16
; 32: .ent nofoo
; 32: .set noreorder
; 32: .set nomacro
@@ -48,12 +46,11 @@ entry:
ret i32 0
}
-; 16: .set mips16 # @main
+; 16: .set mips16
; 16: .ent main
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end main
-; 32: .set nomips16 # @main
+; 32: .set nomips16
; 32: .ent main
; 32: .set noreorder
; 32: .set nomacro
diff --git a/test/CodeGen/Mips/mips16_32_4.ll b/test/CodeGen/Mips/mips16_32_4.ll
index 5e4907139445..1238363d907e 100644
--- a/test/CodeGen/Mips/mips16_32_4.ll
+++ b/test/CodeGen/Mips/mips16_32_4.ll
@@ -6,22 +6,20 @@ entry:
ret void
}
-; 16: .set mips16 # @foo
+; 16: .set mips16
; 16: .ent foo
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end foo
-; 32: .set mips16 # @foo
+; 32: .set mips16
; 32: .ent foo
-; 32: save {{.+}}
-; 32: restore {{.+}}
+; 32: jrc $ra
; 32: .end foo
define void @nofoo() #1 {
entry:
ret void
}
-; 16: .set nomips16 # @nofoo
+; 16: .set nomips16
; 16: .ent nofoo
; 16: .set noreorder
; 16: .set nomacro
@@ -32,7 +30,7 @@ entry:
; 16: .set macro
; 16: .set reorder
; 16: .end nofoo
-; 32: .set nomips16 # @nofoo
+; 32: .set nomips16
; 32: .ent nofoo
; 32: .set noreorder
; 32: .set nomacro
@@ -48,15 +46,13 @@ entry:
ret i32 0
}
-; 16: .set mips16 # @main
+; 16: .set mips16
; 16: .ent main
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end main
-; 32: .set mips16 # @main
+; 32: .set mips16
; 32: .ent main
-; 32: save {{.+}}
-; 32: restore {{.+}}
+; 32: jrc $ra
; 32: .end main
diff --git a/test/CodeGen/Mips/mips16_32_5.ll b/test/CodeGen/Mips/mips16_32_5.ll
index 17900a2dc75f..5d4c8a1af563 100644
--- a/test/CodeGen/Mips/mips16_32_5.ll
+++ b/test/CodeGen/Mips/mips16_32_5.ll
@@ -6,22 +6,20 @@ entry:
ret void
}
-; 16: .set mips16 # @foo
+; 16: .set mips16
; 16: .ent foo
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end foo
-; 32: .set mips16 # @foo
+; 32: .set mips16
; 32: .ent foo
-; 32: save {{.+}}
-; 32: restore {{.+}}
+; 32: jrc $ra
; 32: .end foo
define void @nofoo() #1 {
entry:
ret void
}
-; 16: .set nomips16 # @nofoo
+; 16: .set nomips16
; 16: .ent nofoo
; 16: .set noreorder
; 16: .set nomacro
@@ -32,7 +30,7 @@ entry:
; 16: .set macro
; 16: .set reorder
; 16: .end nofoo
-; 32: .set nomips16 # @nofoo
+; 32: .set nomips16
; 32: .ent nofoo
; 32: .set noreorder
; 32: .set nomacro
@@ -48,7 +46,7 @@ entry:
ret i32 0
}
-; 16: .set nomips16 # @main
+; 16: .set nomips16
; 16: .ent main
; 16: .set noreorder
; 16: .set nomacro
@@ -60,7 +58,7 @@ entry:
; 16: .set reorder
; 16: .end main
-; 32: .set nomips16 # @main
+; 32: .set nomips16
; 32: .ent main
; 32: .set noreorder
; 32: .set nomacro
diff --git a/test/CodeGen/Mips/mips16_32_6.ll b/test/CodeGen/Mips/mips16_32_6.ll
index a77031af8be6..63323b608bc5 100644
--- a/test/CodeGen/Mips/mips16_32_6.ll
+++ b/test/CodeGen/Mips/mips16_32_6.ll
@@ -6,12 +6,11 @@ entry:
ret void
}
-; 16: .set mips16 # @foo
+; 16: .set mips16
; 16: .ent foo
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end foo
-; 32: .set nomips16 # @foo
+; 32: .set nomips16
; 32: .ent foo
; 32: .set noreorder
; 32: .set nomacro
@@ -27,7 +26,7 @@ entry:
ret void
}
-; 16: .set nomips16 # @nofoo
+; 16: .set nomips16
; 16: .ent nofoo
; 16: .set noreorder
; 16: .set nomacro
@@ -38,7 +37,7 @@ entry:
; 16: .set macro
; 16: .set reorder
; 16: .end nofoo
-; 32: .set nomips16 # @nofoo
+; 32: .set nomips16
; 32: .ent nofoo
; 32: .set noreorder
; 32: .set nomacro
@@ -54,7 +53,7 @@ entry:
ret i32 0
}
-; 16: .set nomips16 # @main
+; 16: .set nomips16
; 16: .ent main
; 16: .set noreorder
; 16: .set nomacro
@@ -66,7 +65,7 @@ entry:
; 16: .set reorder
; 16: .end main
-; 32: .set nomips16 # @main
+; 32: .set nomips16
; 32: .ent main
; 32: .set noreorder
; 32: .set nomacro
diff --git a/test/CodeGen/Mips/mips16_32_7.ll b/test/CodeGen/Mips/mips16_32_7.ll
index 895b5d4346a8..480a23c8b25e 100644
--- a/test/CodeGen/Mips/mips16_32_7.ll
+++ b/test/CodeGen/Mips/mips16_32_7.ll
@@ -6,12 +6,11 @@ entry:
ret void
}
-; 16: .set mips16 # @foo
+; 16: .set mips16
; 16: .ent foo
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end foo
-; 32: .set nomips16 # @foo
+; 32: .set nomips16
; 32: .ent foo
; 32: .set noreorder
; 32: .set nomacro
@@ -27,7 +26,7 @@ entry:
ret void
}
-; 16: .set nomips16 # @nofoo
+; 16: .set nomips16
; 16: .ent nofoo
; 16: .set noreorder
; 16: .set nomacro
@@ -38,7 +37,7 @@ entry:
; 16: .set macro
; 16: .set reorder
; 16: .end nofoo
-; 32: .set nomips16 # @nofoo
+; 32: .set nomips16
; 32: .ent nofoo
; 32: .set noreorder
; 32: .set nomacro
@@ -54,16 +53,14 @@ entry:
ret i32 0
}
-; 16: .set mips16 # @main
+; 16: .set mips16
; 16: .ent main
-; 16: save {{.+}}
-; 16: restore {{.+}}
+; 16: jrc $ra
; 16: .end main
-; 32: .set mips16 # @main
+; 32: .set mips16
; 32: .ent main
-; 32: save {{.+}}
-; 32: restore {{.+}}
+; 32: jrc $ra
; 32: .end main
diff --git a/test/CodeGen/Mips/mips16_32_8.ll b/test/CodeGen/Mips/mips16_32_8.ll
index 4152d687093e..2f5bc219cf35 100644
--- a/test/CodeGen/Mips/mips16_32_8.ll
+++ b/test/CodeGen/Mips/mips16_32_8.ll
@@ -14,10 +14,9 @@ entry:
ret void
}
-; 32: .set mips16 # @foo
+; 32: .set mips16
; 32: .ent foo
-; 32: save {{.+}}
-; 32: restore {{.+}}
+; 32: jrc $ra
; 32: .end foo
define void @nofoo() #1 {
@@ -33,7 +32,7 @@ entry:
ret void
}
-; 32: .set nomips16 # @nofoo
+; 32: .set nomips16
; 32: .ent nofoo
; 32: .set noreorder
; 32: .set nomacro
@@ -57,7 +56,7 @@ entry:
ret i32 0
}
-; 32: .set nomips16 # @main
+; 32: .set nomips16
; 32: .ent main
; 32: .set noreorder
; 32: .set nomacro
diff --git a/test/CodeGen/Mips/mips16_32_9.ll b/test/CodeGen/Mips/mips16_32_9.ll
index c9b494f2a890..8543147bed03 100644
--- a/test/CodeGen/Mips/mips16_32_9.ll
+++ b/test/CodeGen/Mips/mips16_32_9.ll
@@ -5,17 +5,16 @@ entry:
ret void
}
-; 32: .set mips16 # @foo
+; 32: .set mips16
; 32: .ent foo
-; 32: save {{.+}}
-; 32: restore {{.+}}
+; 32: jrc $ra
; 32: .end foo
define void @nofoo() #1 {
entry:
ret void
}
-; 32: .set nomips16 # @nofoo
+; 32: .set nomips16
; 32: .ent nofoo
; 32: .set noreorder
; 32: .set nomacro
@@ -31,10 +30,9 @@ entry:
ret i32 0
}
-; 32: .set mips16 # @main
+; 32: .set mips16
; 32: .ent main
-; 32: save {{.+}}
-; 32: restore {{.+}}
+; 32: jrc $ra
; 32: .end main
diff --git a/test/CodeGen/Mips/mips16_fpret.ll b/test/CodeGen/Mips/mips16_fpret.ll
index c132f63cfb01..fe87604d6107 100644
--- a/test/CodeGen/Mips/mips16_fpret.ll
+++ b/test/CodeGen/Mips/mips16_fpret.ll
@@ -1,7 +1,7 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=1
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=2
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=3
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=4
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=1
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=2
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=3
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=4
@x = global float 0x41F487E980000000, align 4
diff --git a/test/CodeGen/Mips/mips16ex.ll b/test/CodeGen/Mips/mips16ex.ll
index ecb30b5c63b8..a1a99191595d 100644
--- a/test/CodeGen/Mips/mips16ex.ll
+++ b/test/CodeGen/Mips/mips16ex.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
-;16: $eh_func_begin0=.
+;16: .cfi_personality
+;16-NEXT: [[TMP:.*]]:
+;16-NEXT: $eh_func_begin0 = ([[TMP]])
@.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1
@_ZTIi = external constant i8*
@.str1 = private unnamed_addr constant [15 x i8] c"exception %i \0A\00", align 1
diff --git a/test/CodeGen/Mips/mips16fpe.ll b/test/CodeGen/Mips/mips16fpe.ll
index 10c5163f7fd0..987980e080ff 100644
--- a/test/CodeGen/Mips/mips16fpe.ll
+++ b/test/CodeGen/Mips/mips16fpe.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=16hf
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16hf
@x = global float 5.000000e+00, align 4
@y = global float 1.500000e+01, align 4
diff --git a/test/CodeGen/Mips/mips32r6/compatibility.ll b/test/CodeGen/Mips/mips32r6/compatibility.ll
new file mode 100644
index 000000000000..8eac8d4683d1
--- /dev/null
+++ b/test/CodeGen/Mips/mips32r6/compatibility.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck %s
+; RUN: not llc -march=mipsel -mcpu=mips32r6 -mattr=+dsp < %s 2>&1 | FileCheck --check-prefix=DSP %s
+
+; CHECK: foo:
+; DSP: MIPS32r6 is not compatible with the DSP ASE
+
+define void @foo() nounwind {
+ ret void
+}
diff --git a/test/CodeGen/Mips/mips64-f128.ll b/test/CodeGen/Mips/mips64-f128.ll
index dc8bbfdd5baf..7f7d515d690e 100644
--- a/test/CodeGen/Mips/mips64-f128.ll
+++ b/test/CodeGen/Mips/mips64-f128.ll
@@ -1,5 +1,11 @@
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips4 -soft-float -O1 \
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64 -soft-float -O1 \
-; RUN: -disable-mips-delay-filler < %s | FileCheck %s
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r2 -soft-float -O1 \
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=C_CC_FMT
+; RUN: llc -mtriple=mips64el-unknown-unknown -mcpu=mips64r6 -soft-float -O1 \
+; RUN: -disable-mips-delay-filler < %s | FileCheck %s -check-prefix=ALL -check-prefix=CMP_CC_FMT
@gld0 = external global fp128
@gld1 = external global fp128
@@ -7,8 +13,8 @@
@gf1 = external global float
@gd1 = external global double
-; CHECK-LABEL: addLD:
-; CHECK: ld $25, %call16(__addtf3)
+; ALL-LABEL: addLD:
+; ALL: ld $25, %call16(__addtf3)
define fp128 @addLD() {
entry:
@@ -18,8 +24,8 @@ entry:
ret fp128 %add
}
-; CHECK-LABEL: subLD:
-; CHECK: ld $25, %call16(__subtf3)
+; ALL-LABEL: subLD:
+; ALL: ld $25, %call16(__subtf3)
define fp128 @subLD() {
entry:
@@ -29,8 +35,8 @@ entry:
ret fp128 %sub
}
-; CHECK-LABEL: mulLD:
-; CHECK: ld $25, %call16(__multf3)
+; ALL-LABEL: mulLD:
+; ALL: ld $25, %call16(__multf3)
define fp128 @mulLD() {
entry:
@@ -40,8 +46,8 @@ entry:
ret fp128 %mul
}
-; CHECK-LABEL: divLD:
-; CHECK: ld $25, %call16(__divtf3)
+; ALL-LABEL: divLD:
+; ALL: ld $25, %call16(__divtf3)
define fp128 @divLD() {
entry:
@@ -51,8 +57,8 @@ entry:
ret fp128 %div
}
-; CHECK-LABEL: conv_LD_char:
-; CHECK: ld $25, %call16(__floatsitf)
+; ALL-LABEL: conv_LD_char:
+; ALL: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_char(i8 signext %a) {
entry:
@@ -60,8 +66,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_short:
-; CHECK: ld $25, %call16(__floatsitf)
+; ALL-LABEL: conv_LD_short:
+; ALL: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_short(i16 signext %a) {
entry:
@@ -69,8 +75,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_int:
-; CHECK: ld $25, %call16(__floatsitf)
+; ALL-LABEL: conv_LD_int:
+; ALL: ld $25, %call16(__floatsitf)
define fp128 @conv_LD_int(i32 %a) {
entry:
@@ -78,8 +84,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_LL:
-; CHECK: ld $25, %call16(__floatditf)
+; ALL-LABEL: conv_LD_LL:
+; ALL: ld $25, %call16(__floatditf)
define fp128 @conv_LD_LL(i64 %a) {
entry:
@@ -87,8 +93,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_UChar:
-; CHECK: ld $25, %call16(__floatunsitf)
+; ALL-LABEL: conv_LD_UChar:
+; ALL: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UChar(i8 zeroext %a) {
entry:
@@ -96,8 +102,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_UShort:
-; CHECK: ld $25, %call16(__floatunsitf)
+; ALL-LABEL: conv_LD_UShort:
+; ALL: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UShort(i16 zeroext %a) {
entry:
@@ -105,8 +111,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_UInt:
-; CHECK: ld $25, %call16(__floatunsitf)
+; ALL-LABEL: conv_LD_UInt:
+; ALL: ld $25, %call16(__floatunsitf)
define fp128 @conv_LD_UInt(i32 %a) {
entry:
@@ -114,8 +120,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_ULL:
-; CHECK: ld $25, %call16(__floatunditf)
+; ALL-LABEL: conv_LD_ULL:
+; ALL: ld $25, %call16(__floatunditf)
define fp128 @conv_LD_ULL(i64 %a) {
entry:
@@ -123,8 +129,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_char_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_char_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define signext i8 @conv_char_LD(fp128 %a) {
entry:
@@ -132,8 +138,8 @@ entry:
ret i8 %conv
}
-; CHECK-LABEL: conv_short_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_short_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define signext i16 @conv_short_LD(fp128 %a) {
entry:
@@ -141,8 +147,8 @@ entry:
ret i16 %conv
}
-; CHECK-LABEL: conv_int_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_int_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define i32 @conv_int_LD(fp128 %a) {
entry:
@@ -150,8 +156,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: conv_LL_LD:
-; CHECK: ld $25, %call16(__fixtfdi)
+; ALL-LABEL: conv_LL_LD:
+; ALL: ld $25, %call16(__fixtfdi)
define i64 @conv_LL_LD(fp128 %a) {
entry:
@@ -159,8 +165,8 @@ entry:
ret i64 %conv
}
-; CHECK-LABEL: conv_UChar_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_UChar_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define zeroext i8 @conv_UChar_LD(fp128 %a) {
entry:
@@ -168,8 +174,8 @@ entry:
ret i8 %conv
}
-; CHECK-LABEL: conv_UShort_LD:
-; CHECK: ld $25, %call16(__fixtfsi)
+; ALL-LABEL: conv_UShort_LD:
+; ALL: ld $25, %call16(__fixtfsi)
define zeroext i16 @conv_UShort_LD(fp128 %a) {
entry:
@@ -177,8 +183,8 @@ entry:
ret i16 %conv
}
-; CHECK-LABEL: conv_UInt_LD:
-; CHECK: ld $25, %call16(__fixunstfsi)
+; ALL-LABEL: conv_UInt_LD:
+; ALL: ld $25, %call16(__fixunstfsi)
define i32 @conv_UInt_LD(fp128 %a) {
entry:
@@ -186,8 +192,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: conv_ULL_LD:
-; CHECK: ld $25, %call16(__fixunstfdi)
+; ALL-LABEL: conv_ULL_LD:
+; ALL: ld $25, %call16(__fixunstfdi)
define i64 @conv_ULL_LD(fp128 %a) {
entry:
@@ -195,8 +201,8 @@ entry:
ret i64 %conv
}
-; CHECK-LABEL: conv_LD_float:
-; CHECK: ld $25, %call16(__extendsftf2)
+; ALL-LABEL: conv_LD_float:
+; ALL: ld $25, %call16(__extendsftf2)
define fp128 @conv_LD_float(float %a) {
entry:
@@ -204,8 +210,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_LD_double:
-; CHECK: ld $25, %call16(__extenddftf2)
+; ALL-LABEL: conv_LD_double:
+; ALL: ld $25, %call16(__extenddftf2)
define fp128 @conv_LD_double(double %a) {
entry:
@@ -213,8 +219,8 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: conv_float_LD:
-; CHECK: ld $25, %call16(__trunctfsf2)
+; ALL-LABEL: conv_float_LD:
+; ALL: ld $25, %call16(__trunctfsf2)
define float @conv_float_LD(fp128 %a) {
entry:
@@ -222,8 +228,8 @@ entry:
ret float %conv
}
-; CHECK-LABEL: conv_double_LD:
-; CHECK: ld $25, %call16(__trunctfdf2)
+; ALL-LABEL: conv_double_LD:
+; ALL: ld $25, %call16(__trunctfdf2)
define double @conv_double_LD(fp128 %a) {
entry:
@@ -231,13 +237,13 @@ entry:
ret double %conv
}
-; CHECK-LABEL: libcall1_fabsl:
-; CHECK-DAG: ld $[[R0:[0-9]+]], 8($[[R4:[0-9]+]])
-; CHECK-DAG: daddiu $[[R1:[0-9]+]], $zero, 1
-; CHECK-DAG: dsll $[[R2:[0-9]+]], $[[R1]], 63
-; CHECK-DAG: daddiu $[[R3:[0-9]+]], $[[R2]], -1
-; CHECK-DAG: and $4, $[[R0]], $[[R3]]
-; CHECK-DAG: ld $2, 0($[[R4]])
+; ALL-LABEL: libcall1_fabsl:
+; ALL-DAG: ld $[[R0:[0-9]+]], 8($[[R4:[0-9]+]])
+; ALL-DAG: daddiu $[[R1:[0-9]+]], $zero, 1
+; ALL-DAG: dsll $[[R2:[0-9]+]], $[[R1]], 63
+; ALL-DAG: daddiu $[[R3:[0-9]+]], $[[R2]], -1
+; ALL-DAG: and $4, $[[R0]], $[[R3]]
+; ALL-DAG: ld $2, 0($[[R4]])
define fp128 @libcall1_fabsl() {
entry:
@@ -248,8 +254,8 @@ entry:
declare fp128 @fabsl(fp128) #1
-; CHECK-LABEL: libcall1_ceill:
-; CHECK: ld $25, %call16(ceill)
+; ALL-LABEL: libcall1_ceill:
+; ALL: ld $25, %call16(ceill)
define fp128 @libcall1_ceill() {
entry:
@@ -260,8 +266,8 @@ entry:
declare fp128 @ceill(fp128) #1
-; CHECK-LABEL: libcall1_sinl:
-; CHECK: ld $25, %call16(sinl)
+; ALL-LABEL: libcall1_sinl:
+; ALL: ld $25, %call16(sinl)
define fp128 @libcall1_sinl() {
entry:
@@ -272,8 +278,8 @@ entry:
declare fp128 @sinl(fp128) #2
-; CHECK-LABEL: libcall1_cosl:
-; CHECK: ld $25, %call16(cosl)
+; ALL-LABEL: libcall1_cosl:
+; ALL: ld $25, %call16(cosl)
define fp128 @libcall1_cosl() {
entry:
@@ -284,8 +290,8 @@ entry:
declare fp128 @cosl(fp128) #2
-; CHECK-LABEL: libcall1_expl:
-; CHECK: ld $25, %call16(expl)
+; ALL-LABEL: libcall1_expl:
+; ALL: ld $25, %call16(expl)
define fp128 @libcall1_expl() {
entry:
@@ -296,8 +302,8 @@ entry:
declare fp128 @expl(fp128) #2
-; CHECK-LABEL: libcall1_exp2l:
-; CHECK: ld $25, %call16(exp2l)
+; ALL-LABEL: libcall1_exp2l:
+; ALL: ld $25, %call16(exp2l)
define fp128 @libcall1_exp2l() {
entry:
@@ -308,8 +314,8 @@ entry:
declare fp128 @exp2l(fp128) #2
-; CHECK-LABEL: libcall1_logl:
-; CHECK: ld $25, %call16(logl)
+; ALL-LABEL: libcall1_logl:
+; ALL: ld $25, %call16(logl)
define fp128 @libcall1_logl() {
entry:
@@ -320,8 +326,8 @@ entry:
declare fp128 @logl(fp128) #2
-; CHECK-LABEL: libcall1_log2l:
-; CHECK: ld $25, %call16(log2l)
+; ALL-LABEL: libcall1_log2l:
+; ALL: ld $25, %call16(log2l)
define fp128 @libcall1_log2l() {
entry:
@@ -332,8 +338,8 @@ entry:
declare fp128 @log2l(fp128) #2
-; CHECK-LABEL: libcall1_log10l:
-; CHECK: ld $25, %call16(log10l)
+; ALL-LABEL: libcall1_log10l:
+; ALL: ld $25, %call16(log10l)
define fp128 @libcall1_log10l() {
entry:
@@ -344,8 +350,8 @@ entry:
declare fp128 @log10l(fp128) #2
-; CHECK-LABEL: libcall1_nearbyintl:
-; CHECK: ld $25, %call16(nearbyintl)
+; ALL-LABEL: libcall1_nearbyintl:
+; ALL: ld $25, %call16(nearbyintl)
define fp128 @libcall1_nearbyintl() {
entry:
@@ -356,8 +362,8 @@ entry:
declare fp128 @nearbyintl(fp128) #1
-; CHECK-LABEL: libcall1_floorl:
-; CHECK: ld $25, %call16(floorl)
+; ALL-LABEL: libcall1_floorl:
+; ALL: ld $25, %call16(floorl)
define fp128 @libcall1_floorl() {
entry:
@@ -368,8 +374,8 @@ entry:
declare fp128 @floorl(fp128) #1
-; CHECK-LABEL: libcall1_sqrtl:
-; CHECK: ld $25, %call16(sqrtl)
+; ALL-LABEL: libcall1_sqrtl:
+; ALL: ld $25, %call16(sqrtl)
define fp128 @libcall1_sqrtl() {
entry:
@@ -380,8 +386,8 @@ entry:
declare fp128 @sqrtl(fp128) #2
-; CHECK-LABEL: libcall1_rintl:
-; CHECK: ld $25, %call16(rintl)
+; ALL-LABEL: libcall1_rintl:
+; ALL: ld $25, %call16(rintl)
define fp128 @libcall1_rintl() {
entry:
@@ -392,8 +398,8 @@ entry:
declare fp128 @rintl(fp128) #1
-; CHECK-LABEL: libcall_powil:
-; CHECK: ld $25, %call16(__powitf2)
+; ALL-LABEL: libcall_powil:
+; ALL: ld $25, %call16(__powitf2)
define fp128 @libcall_powil(fp128 %a, i32 %b) {
entry:
@@ -403,18 +409,18 @@ entry:
declare fp128 @llvm.powi.f128(fp128, i32) #3
-; CHECK-LABEL: libcall2_copysignl:
-; CHECK-DAG: daddiu $[[R2:[0-9]+]], $zero, 1
-; CHECK-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63
-; CHECK-DAG: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK-DAG: ld $[[R1:[0-9]+]], 8($[[R0]])
-; CHECK-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]]
-; CHECK-DAG: ld $[[R5:[0-9]+]], %got_disp(gld0)
-; CHECK-DAG: ld $[[R6:[0-9]+]], 8($[[R5]])
-; CHECK-DAG: daddiu $[[R7:[0-9]+]], $[[R3]], -1
-; CHECK-DAG: and $[[R8:[0-9]+]], $[[R6]], $[[R7]]
-; CHECK-DAG: or $4, $[[R8]], $[[R4]]
-; CHECK-DAG: ld $2, 0($[[R5]])
+; ALL-LABEL: libcall2_copysignl:
+; ALL-DAG: daddiu $[[R2:[0-9]+]], $zero, 1
+; ALL-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63
+; ALL-DAG: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL-DAG: ld $[[R1:[0-9]+]], 8($[[R0]])
+; ALL-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]]
+; ALL-DAG: ld $[[R5:[0-9]+]], %got_disp(gld0)
+; ALL-DAG: ld $[[R6:[0-9]+]], 8($[[R5]])
+; ALL-DAG: daddiu $[[R7:[0-9]+]], $[[R3]], -1
+; ALL-DAG: and $[[R8:[0-9]+]], $[[R6]], $[[R7]]
+; ALL-DAG: or $4, $[[R8]], $[[R4]]
+; ALL-DAG: ld $2, 0($[[R5]])
define fp128 @libcall2_copysignl() {
entry:
@@ -426,8 +432,8 @@ entry:
declare fp128 @copysignl(fp128, fp128) #1
-; CHECK-LABEL: libcall2_powl:
-; CHECK: ld $25, %call16(powl)
+; ALL-LABEL: libcall2_powl:
+; ALL: ld $25, %call16(powl)
define fp128 @libcall2_powl() {
entry:
@@ -439,8 +445,8 @@ entry:
declare fp128 @powl(fp128, fp128) #2
-; CHECK-LABEL: libcall2_fmodl:
-; CHECK: ld $25, %call16(fmodl)
+; ALL-LABEL: libcall2_fmodl:
+; ALL: ld $25, %call16(fmodl)
define fp128 @libcall2_fmodl() {
entry:
@@ -452,8 +458,8 @@ entry:
declare fp128 @fmodl(fp128, fp128) #2
-; CHECK-LABEL: libcall3_fmal:
-; CHECK: ld $25, %call16(fmal)
+; ALL-LABEL: libcall3_fmal:
+; ALL: ld $25, %call16(fmal)
define fp128 @libcall3_fmal() {
entry:
@@ -466,8 +472,8 @@ entry:
declare fp128 @llvm.fma.f128(fp128, fp128, fp128) #4
-; CHECK-LABEL: cmp_lt:
-; CHECK: ld $25, %call16(__lttf2)
+; ALL-LABEL: cmp_lt:
+; ALL: ld $25, %call16(__lttf2)
define i32 @cmp_lt(fp128 %a, fp128 %b) {
entry:
@@ -476,8 +482,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_le:
-; CHECK: ld $25, %call16(__letf2)
+; ALL-LABEL: cmp_le:
+; ALL: ld $25, %call16(__letf2)
define i32 @cmp_le(fp128 %a, fp128 %b) {
entry:
@@ -486,8 +492,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_gt:
-; CHECK: ld $25, %call16(__gttf2)
+; ALL-LABEL: cmp_gt:
+; ALL: ld $25, %call16(__gttf2)
define i32 @cmp_gt(fp128 %a, fp128 %b) {
entry:
@@ -496,8 +502,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_ge:
-; CHECK: ld $25, %call16(__getf2)
+; ALL-LABEL: cmp_ge:
+; ALL: ld $25, %call16(__getf2)
define i32 @cmp_ge(fp128 %a, fp128 %b) {
entry:
@@ -506,8 +512,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_eq:
-; CHECK: ld $25, %call16(__eqtf2)
+; ALL-LABEL: cmp_eq:
+; ALL: ld $25, %call16(__eqtf2)
define i32 @cmp_eq(fp128 %a, fp128 %b) {
entry:
@@ -516,8 +522,8 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: cmp_ne:
-; CHECK: ld $25, %call16(__netf2)
+; ALL-LABEL: cmp_ne:
+; ALL: ld $25, %call16(__netf2)
define i32 @cmp_ne(fp128 %a, fp128 %b) {
entry:
@@ -526,10 +532,10 @@ entry:
ret i32 %conv
}
-; CHECK-LABEL: load_LD_LD:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $2, 0($[[R0]])
-; CHECK: ld $4, 8($[[R0]])
+; ALL-LABEL: load_LD_LD:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL: ld $2, 0($[[R0]])
+; ALL: ld $4, 8($[[R0]])
define fp128 @load_LD_LD() {
entry:
@@ -537,11 +543,11 @@ entry:
ret fp128 %0
}
-; CHECK-LABEL: load_LD_float:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gf1)
-; CHECK: lw $4, 0($[[R0]])
-; CHECK: ld $25, %call16(__extendsftf2)
-; CHECK: jalr $25
+; ALL-LABEL: load_LD_float:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gf1)
+; ALL: lw $4, 0($[[R0]])
+; ALL: ld $25, %call16(__extendsftf2)
+; ALL: jalr $25
define fp128 @load_LD_float() {
entry:
@@ -550,11 +556,11 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: load_LD_double:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gd1)
-; CHECK: ld $4, 0($[[R0]])
-; CHECK: ld $25, %call16(__extenddftf2)
-; CHECK: jalr $25
+; ALL-LABEL: load_LD_double:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gd1)
+; ALL: ld $4, 0($[[R0]])
+; ALL: ld $25, %call16(__extenddftf2)
+; ALL: jalr $25
define fp128 @load_LD_double() {
entry:
@@ -563,13 +569,13 @@ entry:
ret fp128 %conv
}
-; CHECK-LABEL: store_LD_LD:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $[[R1:[0-9]+]], 0($[[R0]])
-; CHECK: ld $[[R2:[0-9]+]], 8($[[R0]])
-; CHECK: ld $[[R3:[0-9]+]], %got_disp(gld0)
-; CHECK: sd $[[R2]], 8($[[R3]])
-; CHECK: sd $[[R1]], 0($[[R3]])
+; ALL-LABEL: store_LD_LD:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL: ld $[[R1:[0-9]+]], 0($[[R0]])
+; ALL: ld $[[R2:[0-9]+]], 8($[[R0]])
+; ALL: ld $[[R3:[0-9]+]], %got_disp(gld0)
+; ALL: sd $[[R2]], 8($[[R3]])
+; ALL: sd $[[R1]], 0($[[R3]])
define void @store_LD_LD() {
entry:
@@ -578,14 +584,14 @@ entry:
ret void
}
-; CHECK-LABEL: store_LD_float:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $4, 0($[[R0]])
-; CHECK: ld $5, 8($[[R0]])
-; CHECK: ld $25, %call16(__trunctfsf2)
-; CHECK: jalr $25
-; CHECK: ld $[[R1:[0-9]+]], %got_disp(gf1)
-; CHECK: sw $2, 0($[[R1]])
+; ALL-LABEL: store_LD_float:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL: ld $4, 0($[[R0]])
+; ALL: ld $5, 8($[[R0]])
+; ALL: ld $25, %call16(__trunctfsf2)
+; ALL: jalr $25
+; ALL: ld $[[R1:[0-9]+]], %got_disp(gf1)
+; ALL: sw $2, 0($[[R1]])
define void @store_LD_float() {
entry:
@@ -595,14 +601,14 @@ entry:
ret void
}
-; CHECK-LABEL: store_LD_double:
-; CHECK: ld $[[R0:[0-9]+]], %got_disp(gld1)
-; CHECK: ld $4, 0($[[R0]])
-; CHECK: ld $5, 8($[[R0]])
-; CHECK: ld $25, %call16(__trunctfdf2)
-; CHECK: jalr $25
-; CHECK: ld $[[R1:[0-9]+]], %got_disp(gd1)
-; CHECK: sd $2, 0($[[R1]])
+; ALL-LABEL: store_LD_double:
+; ALL: ld $[[R0:[0-9]+]], %got_disp(gld1)
+; ALL: ld $4, 0($[[R0]])
+; ALL: ld $5, 8($[[R0]])
+; ALL: ld $25, %call16(__trunctfdf2)
+; ALL: jalr $25
+; ALL: ld $[[R1:[0-9]+]], %got_disp(gd1)
+; ALL: sd $2, 0($[[R1]])
define void @store_LD_double() {
entry:
@@ -612,11 +618,22 @@ entry:
ret void
}
-; CHECK-LABEL: select_LD:
-; CHECK: movn $8, $6, $4
-; CHECK: movn $9, $7, $4
-; CHECK: move $2, $8
-; CHECK: move $4, $9
+; ALL-LABEL: select_LD:
+; C_CC_FMT: movn $8, $6, $4
+; C_CC_FMT: movn $9, $7, $4
+; C_CC_FMT: move $2, $8
+; C_CC_FMT: move $4, $9
+
+; FIXME: This sll works around an implementation detail in the code generator
+; (setcc's result is i32 so bits 32-63 are undefined). It's not really
+; needed.
+; CMP_CC_FMT-DAG: sll $[[CC:[0-9]+]], $4, 0
+; CMP_CC_FMT-DAG: seleqz $[[EQ1:[0-9]+]], $8, $[[CC]]
+; CMP_CC_FMT-DAG: selnez $[[NE1:[0-9]+]], $6, $[[CC]]
+; CMP_CC_FMT-DAG: or $2, $[[NE1]], $[[EQ1]]
+; CMP_CC_FMT-DAG: seleqz $[[EQ2:[0-9]+]], $9, $[[CC]]
+; CMP_CC_FMT-DAG: selnez $[[NE2:[0-9]+]], $7, $[[CC]]
+; CMP_CC_FMT-DAG: or $4, $[[NE2]], $[[EQ2]]
define fp128 @select_LD(i32 %a, i64, fp128 %b, fp128 %c) {
entry:
@@ -625,18 +642,27 @@ entry:
ret fp128 %cond
}
-; CHECK-LABEL: selectCC_LD:
-; CHECK: move $[[R0:[0-9]+]], $11
-; CHECK: move $[[R1:[0-9]+]], $10
-; CHECK: move $[[R2:[0-9]+]], $9
-; CHECK: move $[[R3:[0-9]+]], $8
-; CHECK: ld $25, %call16(__gttf2)($gp)
-; CHECK: jalr $25
-; CHECK: slti $1, $2, 1
-; CHECK: movz $[[R1]], $[[R3]], $1
-; CHECK: movz $[[R0]], $[[R2]], $1
-; CHECK: move $2, $[[R1]]
-; CHECK: move $4, $[[R0]]
+; ALL-LABEL: selectCC_LD:
+; ALL: move $[[R0:[0-9]+]], $11
+; ALL: move $[[R1:[0-9]+]], $10
+; ALL: move $[[R2:[0-9]+]], $9
+; ALL: move $[[R3:[0-9]+]], $8
+; ALL: ld $25, %call16(__gttf2)($gp)
+; ALL: jalr $25
+
+; C_CC_FMT: slti $[[CC:[0-9]+]], $2, 1
+; C_CC_FMT: movz $[[R1]], $[[R3]], $[[CC]]
+; C_CC_FMT: movz $[[R0]], $[[R2]], $[[CC]]
+; C_CC_FMT: move $2, $[[R1]]
+; C_CC_FMT: move $4, $[[R0]]
+
+; CMP_CC_FMT: slt $[[CC:[0-9]+]], $zero, $2
+; CMP_CC_FMT: seleqz $[[EQ1:[0-9]+]], $[[R1]], $[[CC]]
+; CMP_CC_FMT: selnez $[[NE1:[0-9]+]], $[[R3]], $[[CC]]
+; CMP_CC_FMT: or $2, $[[NE1]], $[[EQ1]]
+; CMP_CC_FMT: seleqz $[[EQ2:[0-9]+]], $[[R0]], $[[CC]]
+; CMP_CC_FMT: selnez $[[NE2:[0-9]+]], $[[R2]], $[[CC]]
+; CMP_CC_FMT: or $4, $[[NE2]], $[[EQ2]]
define fp128 @selectCC_LD(fp128 %a, fp128 %b, fp128 %c, fp128 %d) {
entry:
diff --git a/test/CodeGen/Mips/mips64-fp-indexed-ls.ll b/test/CodeGen/Mips/mips64-fp-indexed-ls.ll
deleted file mode 100644
index bbdc05cd2d8f..000000000000
--- a/test/CodeGen/Mips/mips64-fp-indexed-ls.ll
+++ /dev/null
@@ -1,110 +0,0 @@
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
-
-%struct.S = type <{ [4 x float] }>
-%struct.S2 = type <{ [4 x double] }>
-%struct.S3 = type <{ i8, float }>
-
-@s = external global [4 x %struct.S]
-@gf = external global float
-@gd = external global double
-@s2 = external global [4 x %struct.S2]
-@s3 = external global %struct.S3
-
-define float @foo0(float* nocapture %b, i32 %o) nounwind readonly {
-entry:
-; CHECK: lwxc1
- %idxprom = zext i32 %o to i64
- %arrayidx = getelementptr inbounds float* %b, i64 %idxprom
- %0 = load float* %arrayidx, align 4
- ret float %0
-}
-
-define double @foo1(double* nocapture %b, i32 %o) nounwind readonly {
-entry:
-; CHECK: ldxc1
- %idxprom = zext i32 %o to i64
- %arrayidx = getelementptr inbounds double* %b, i64 %idxprom
- %0 = load double* %arrayidx, align 8
- ret double %0
-}
-
-define float @foo2(i32 %b, i32 %c) nounwind readonly {
-entry:
-; CHECK-NOT: luxc1
- %idxprom = zext i32 %c to i64
- %idxprom1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [4 x %struct.S]* @s, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
- %0 = load float* %arrayidx2, align 1
- ret float %0
-}
-
-define void @foo3(float* nocapture %b, i32 %o) nounwind {
-entry:
-; CHECK: swxc1
- %0 = load float* @gf, align 4
- %idxprom = zext i32 %o to i64
- %arrayidx = getelementptr inbounds float* %b, i64 %idxprom
- store float %0, float* %arrayidx, align 4
- ret void
-}
-
-define void @foo4(double* nocapture %b, i32 %o) nounwind {
-entry:
-; CHECK: sdxc1
- %0 = load double* @gd, align 8
- %idxprom = zext i32 %o to i64
- %arrayidx = getelementptr inbounds double* %b, i64 %idxprom
- store double %0, double* %arrayidx, align 8
- ret void
-}
-
-define void @foo5(i32 %b, i32 %c) nounwind {
-entry:
-; CHECK-NOT: suxc1
- %0 = load float* @gf, align 4
- %idxprom = zext i32 %c to i64
- %idxprom1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [4 x %struct.S]* @s, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
- store float %0, float* %arrayidx2, align 1
- ret void
-}
-
-define double @foo6(i32 %b, i32 %c) nounwind readonly {
-entry:
-; CHECK: foo6
-; CHECK-NOT: luxc1
- %idxprom = zext i32 %c to i64
- %idxprom1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [4 x %struct.S2]* @s2, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
- %0 = load double* %arrayidx2, align 1
- ret double %0
-}
-
-define void @foo7(i32 %b, i32 %c) nounwind {
-entry:
-; CHECK: foo7
-; CHECK-NOT: suxc1
- %0 = load double* @gd, align 8
- %idxprom = zext i32 %c to i64
- %idxprom1 = zext i32 %b to i64
- %arrayidx2 = getelementptr inbounds [4 x %struct.S2]* @s2, i64 0, i64 %idxprom1, i32 0, i64 %idxprom
- store double %0, double* %arrayidx2, align 1
- ret void
-}
-
-define float @foo8() nounwind readonly {
-entry:
-; CHECK: foo8
-; CHECK-NOT: luxc1
- %0 = load float* getelementptr inbounds (%struct.S3* @s3, i64 0, i32 1), align 1
- ret float %0
-}
-
-define void @foo9(float %f) nounwind {
-entry:
-; CHECK: foo9
-; CHECK-NOT: suxc1
- store float %f, float* getelementptr inbounds (%struct.S3* @s3, i64 0, i32 1), align 1
- ret void
-}
-
diff --git a/test/CodeGen/Mips/mips64-sret.ll b/test/CodeGen/Mips/mips64-sret.ll
index e01609f3b1e4..7a52c3d41d69 100644
--- a/test/CodeGen/Mips/mips64-sret.ll
+++ b/test/CodeGen/Mips/mips64-sret.ll
@@ -1,16 +1,23 @@
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 -O3 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
-%struct.S = type { [8 x i32] }
+define void @foo(i32* noalias sret %agg.result) nounwind {
+entry:
+; CHECK-LABEL: foo:
+; CHECK: sw {{.*}}, 0($4)
+; CHECK: jr $ra
+; CHECK-NEXT: move $2, $4
-@g = common global %struct.S zeroinitializer, align 4
+ store i32 42, i32* %agg.result
+ ret void
+}
-define void @f(%struct.S* noalias sret %agg.result) nounwind {
+define void @bar(i32 %v, i32* noalias sret %agg.result) nounwind {
entry:
-; CHECK: move $2, $4
+; CHECK-LABEL: bar:
+; CHECK: sw $4, 0($5)
+; CHECK: jr $ra
+; CHECK-NEXT: move $2, $5
- %0 = bitcast %struct.S* %agg.result to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.S* @g to i8*), i64 32, i32 4, i1 false)
+ store i32 %v, i32* %agg.result
ret void
}
-
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
diff --git a/test/CodeGen/Mips/mips64countleading.ll b/test/CodeGen/Mips/mips64countleading.ll
deleted file mode 100644
index b2b67e51ade0..000000000000
--- a/test/CodeGen/Mips/mips64countleading.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
-
-define i64 @t1(i64 %X) nounwind readnone {
-entry:
-; CHECK: dclz
- %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true)
- ret i64 %tmp1
-}
-
-declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
-
-define i64 @t3(i64 %X) nounwind readnone {
-entry:
-; CHECK: dclo
- %neg = xor i64 %X, -1
- %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true)
- ret i64 %tmp1
-}
-
diff --git a/test/CodeGen/Mips/mips64directive.ll b/test/CodeGen/Mips/mips64directive.ll
index fa81b729e9c8..3d95f519bc64 100644
--- a/test/CodeGen/Mips/mips64directive.ll
+++ b/test/CodeGen/Mips/mips64directive.ll
@@ -1,3 +1,4 @@
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s
; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s
@gl = global i64 1250999896321, align 8
diff --git a/test/CodeGen/Mips/mips64ext.ll b/test/CodeGen/Mips/mips64ext.ll
index 02a35f8e6ed7..22ea0eb7769c 100644
--- a/test/CodeGen/Mips/mips64ext.ll
+++ b/test/CodeGen/Mips/mips64ext.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s
define i64 @zext64_32(i32 %a) nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/mips64fpimm0.ll b/test/CodeGen/Mips/mips64fpimm0.ll
index 17716da0c670..19e076d1ecda 100644
--- a/test/CodeGen/Mips/mips64fpimm0.ll
+++ b/test/CodeGen/Mips/mips64fpimm0.ll
@@ -1,3 +1,4 @@
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s
; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s
define double @foo1() nounwind readnone {
diff --git a/test/CodeGen/Mips/mips64fpldst.ll b/test/CodeGen/Mips/mips64fpldst.ll
index 24647b20bf2e..2f42270b645d 100644
--- a/test/CodeGen/Mips/mips64fpldst.ll
+++ b/test/CodeGen/Mips/mips64fpldst.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=CHECK-N64
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n32 | FileCheck %s -check-prefix=CHECK-N32
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=-n64,n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=-n64,n32 | FileCheck %s -check-prefix=CHECK-N32
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=-n64,n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=-n64,n32 | FileCheck %s -check-prefix=CHECK-N32
@f0 = common global float 0.000000e+00, align 4
@d0 = common global double 0.000000e+00, align 8
diff --git a/test/CodeGen/Mips/mips64imm.ll b/test/CodeGen/Mips/mips64imm.ll
index 1fc8636c480b..c3fc61df42ba 100644
--- a/test/CodeGen/Mips/mips64imm.ll
+++ b/test/CodeGen/Mips/mips64imm.ll
@@ -1,3 +1,4 @@
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s
; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
define i32 @foo1() nounwind readnone {
diff --git a/test/CodeGen/Mips/mips64instrs.ll b/test/CodeGen/Mips/mips64instrs.ll
index 2894d698adcc..ed617be6532e 100644
--- a/test/CodeGen/Mips/mips64instrs.ll
+++ b/test/CodeGen/Mips/mips64instrs.ll
@@ -1,98 +1,128 @@
-; RUN: llc -march=mips64el -mcpu=mips64 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips4 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS4 -check-prefix=ACCMULDIV %s
+; RUN: llc -march=mips64el -mcpu=mips64 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=HAS-DCLO -check-prefix=ACCMULDIV %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=HAS-DCLO -check-prefix=ACCMULDIV %s
+; RUN: llc -march=mips64el -mcpu=mips64r6 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=HAS-DCLO -check-prefix=GPRMULDIV %s
@gll0 = common global i64 0, align 8
@gll1 = common global i64 0, align 8
define i64 @f0(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: daddu
+; ALL-LABEL: f0:
+; ALL: daddu $2, ${{[45]}}, ${{[45]}}
%add = add nsw i64 %a1, %a0
ret i64 %add
}
define i64 @f1(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: dsubu
+; ALL-LABEL: f1:
+; ALL: dsubu $2, $4, $5
%sub = sub nsw i64 %a0, %a1
ret i64 %sub
}
define i64 @f4(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: and
+; ALL-LABEL: f4:
+; ALL: and $2, ${{[45]}}, ${{[45]}}
%and = and i64 %a1, %a0
ret i64 %and
}
define i64 @f5(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: or
+; ALL-LABEL: f5:
+; ALL: or $2, ${{[45]}}, ${{[45]}}
%or = or i64 %a1, %a0
ret i64 %or
}
define i64 @f6(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: xor
+; ALL-LABEL: f6:
+; ALL: xor $2, ${{[45]}}, ${{[45]}}
%xor = xor i64 %a1, %a0
ret i64 %xor
}
define i64 @f7(i64 %a0) nounwind readnone {
entry:
-; CHECK: daddiu ${{[0-9]+}}, ${{[0-9]+}}, 20
+; ALL-LABEL: f7:
+; ALL: daddiu $2, $4, 20
%add = add nsw i64 %a0, 20
ret i64 %add
}
define i64 @f8(i64 %a0) nounwind readnone {
entry:
-; CHECK: daddiu ${{[0-9]+}}, ${{[0-9]+}}, -20
+; ALL-LABEL: f8:
+; ALL: daddiu $2, $4, -20
%sub = add nsw i64 %a0, -20
ret i64 %sub
}
define i64 @f9(i64 %a0) nounwind readnone {
entry:
-; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}, 20
+; ALL-LABEL: f9:
+; ALL: andi $2, $4, 20
%and = and i64 %a0, 20
ret i64 %and
}
define i64 @f10(i64 %a0) nounwind readnone {
entry:
-; CHECK: ori ${{[0-9]+}}, ${{[0-9]+}}, 20
+; ALL-LABEL: f10:
+; ALL: ori $2, $4, 20
%or = or i64 %a0, 20
ret i64 %or
}
define i64 @f11(i64 %a0) nounwind readnone {
entry:
-; CHECK: xori ${{[0-9]+}}, ${{[0-9]+}}, 20
+; ALL-LABEL: f11:
+; ALL: xori $2, $4, 20
%xor = xor i64 %a0, 20
ret i64 %xor
}
define i64 @f12(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK: mult
+; ALL-LABEL: f12:
+
+; ACCMULDIV: mult ${{[45]}}, ${{[45]}}
+; GPRMULDIV: dmul $2, ${{[45]}}, ${{[45]}}
+
%mul = mul nsw i64 %b, %a
ret i64 %mul
}
define i64 @f13(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK: mult
+; ALL-LABEL: f13:
+
+; ACCMULDIV: mult ${{[45]}}, ${{[45]}}
+; GPRMULDIV: dmul $2, ${{[45]}}, ${{[45]}}
+
%mul = mul i64 %b, %a
ret i64 %mul
}
define i64 @f14(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK-LABEL: f14:
-; CHECK: ddiv $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; CHECK: teq $[[R0]], $zero, 7
-; CHECK: mflo
+; ALL-LABEL: f14:
+; ALL-DAG: ld $[[P0:[0-9]+]], %got_disp(gll0)(
+; ALL-DAG: ld $[[P1:[0-9]+]], %got_disp(gll1)(
+; ALL-DAG: ld $[[T0:[0-9]+]], 0($[[P0]])
+; ALL-DAG: ld $[[T1:[0-9]+]], 0($[[P1]])
+
+; ACCMULDIV: ddiv $zero, $[[T0]], $[[T1]]
+; ACCMULDIV: teq $[[T1]], $zero, 7
+; ACCMULDIV: mflo $2
+
+; GPRMULDIV: ddiv $2, $[[T0]], $[[T1]]
+; GPRMULDIV: teq $[[T1]], $zero, 7
+
%0 = load i64* @gll0, align 8
%1 = load i64* @gll1, align 8
%div = sdiv i64 %0, %1
@@ -101,10 +131,19 @@ entry:
define i64 @f15() nounwind readnone {
entry:
-; CHECK-LABEL: f15:
-; CHECK: ddivu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; CHECK: teq $[[R0]], $zero, 7
-; CHECK: mflo
+; ALL-LABEL: f15:
+; ALL-DAG: ld $[[P0:[0-9]+]], %got_disp(gll0)(
+; ALL-DAG: ld $[[P1:[0-9]+]], %got_disp(gll1)(
+; ALL-DAG: ld $[[T0:[0-9]+]], 0($[[P0]])
+; ALL-DAG: ld $[[T1:[0-9]+]], 0($[[P1]])
+
+; ACCMULDIV: ddivu $zero, $[[T0]], $[[T1]]
+; ACCMULDIV: teq $[[T1]], $zero, 7
+; ACCMULDIV: mflo $2
+
+; GPRMULDIV: ddivu $2, $[[T0]], $[[T1]]
+; GPRMULDIV: teq $[[T1]], $zero, 7
+
%0 = load i64* @gll0, align 8
%1 = load i64* @gll1, align 8
%div = udiv i64 %0, %1
@@ -113,20 +152,30 @@ entry:
define i64 @f16(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK-LABEL: f16:
-; CHECK: ddiv $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; CHECK: teq $[[R0]], $zero, 7
-; CHECK: mfhi
+; ALL-LABEL: f16:
+
+; ACCMULDIV: ddiv $zero, $4, $5
+; ACCMULDIV: teq $5, $zero, 7
+; ACCMULDIV: mfhi $2
+
+; GPRMULDIV: dmod $2, $4, $5
+; GPRMULDIV: teq $5, $zero, 7
+
%rem = srem i64 %a, %b
ret i64 %rem
}
define i64 @f17(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK-LABEL: f17:
-; CHECK: ddivu $zero, ${{[0-9]+}}, $[[R0:[0-9]+]]
-; CHECK: teq $[[R0]], $zero, 7
-; CHECK: mfhi
+; ALL-LABEL: f17:
+
+; ACCMULDIV: ddivu $zero, $4, $5
+; ACCMULDIV: teq $5, $zero, 7
+; ACCMULDIV: mfhi $2
+
+; GPRMULDIV: dmodu $2, $4, $5
+; GPRMULDIV: teq $5, $zero, 7
+
%rem = urem i64 %a, %b
ret i64 %rem
}
@@ -135,14 +184,26 @@ declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
define i64 @f18(i64 %X) nounwind readnone {
entry:
-; CHECK: dclz $2, $4
+; ALL-LABEL: f18:
+
+; The MIPS4 version is too long to reasonably test. At least check we don't get dclz
+; MIPS4-NOT: dclz
+
+; HAS-DCLO: dclz $2, $4
+
%tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true)
ret i64 %tmp1
}
define i64 @f19(i64 %X) nounwind readnone {
entry:
-; CHECK: dclo $2, $4
+; ALL-LABEL: f19:
+
+; The MIPS4 version is too long to reasonably test. At least check we don't get dclo
+; MIPS4-NOT: dclo
+
+; HAS-DCLO: dclo $2, $4
+
%neg = xor i64 %X, -1
%tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true)
ret i64 %tmp1
@@ -150,7 +211,8 @@ entry:
define i64 @f20(i64 %a, i64 %b) nounwind readnone {
entry:
-; CHECK: nor
+; ALL-LABEL: f20:
+; ALL: nor $2, ${{[45]}}, ${{[45]}}
%or = or i64 %b, %a
%neg = xor i64 %or, -1
ret i64 %neg
diff --git a/test/CodeGen/Mips/mips64intldst.ll b/test/CodeGen/Mips/mips64intldst.ll
index 0e310a8670f9..c3607baeefeb 100644
--- a/test/CodeGen/Mips/mips64intldst.ll
+++ b/test/CodeGen/Mips/mips64intldst.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=CHECK-N64
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n32 | FileCheck %s -check-prefix=CHECK-N32
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=-n64,n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=-n64,n32 | FileCheck %s -check-prefix=CHECK-N32
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=-n64,n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=-n64,n32 | FileCheck %s -check-prefix=CHECK-N32
@c = common global i8 0, align 4
@s = common global i16 0, align 4
diff --git a/test/CodeGen/Mips/mips64lea.ll b/test/CodeGen/Mips/mips64lea.ll
index 54d504f92266..e866b217a59c 100644
--- a/test/CodeGen/Mips/mips64lea.ll
+++ b/test/CodeGen/Mips/mips64lea.ll
@@ -1,3 +1,4 @@
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s
; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
define void @foo3() nounwind {
diff --git a/test/CodeGen/Mips/mips64load-store-left-right.ll b/test/CodeGen/Mips/mips64load-store-left-right.ll
deleted file mode 100644
index 4561429ad8b9..000000000000
--- a/test/CodeGen/Mips/mips64load-store-left-right.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck -check-prefix=EL %s
-; RUN: llc -march=mips64 -mcpu=mips64 -mattr=n64 < %s | FileCheck -check-prefix=EB %s
-
-%struct.SLL = type { i64 }
-%struct.SI = type { i32 }
-%struct.SUI = type { i32 }
-
-@sll = common global %struct.SLL zeroinitializer, align 1
-@si = common global %struct.SI zeroinitializer, align 1
-@sui = common global %struct.SUI zeroinitializer, align 1
-
-define i64 @foo_load_ll() nounwind readonly {
-entry:
-; EL: ldl $[[R0:[0-9]+]], 7($[[R1:[0-9]+]])
-; EL: ldr $[[R0]], 0($[[R1]])
-; EB: ldl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
-; EB: ldr $[[R0]], 7($[[R1]])
-
- %0 = load i64* getelementptr inbounds (%struct.SLL* @sll, i64 0, i32 0), align 1
- ret i64 %0
-}
-
-define i64 @foo_load_i() nounwind readonly {
-entry:
-; EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
-; EL: lwr $[[R0]], 0($[[R1]])
-; EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
-; EB: lwr $[[R0]], 3($[[R1]])
-
- %0 = load i32* getelementptr inbounds (%struct.SI* @si, i64 0, i32 0), align 1
- %conv = sext i32 %0 to i64
- ret i64 %conv
-}
-
-define i64 @foo_load_ui() nounwind readonly {
-entry:
-; EL: lwl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
-; EL: lwr $[[R0]], 0($[[R1]])
-; EL: daddiu $[[R2:[0-9]+]], $zero, 1
-; EL: dsll $[[R3:[0-9]+]], $[[R2]], 32
-; EL: daddiu $[[R4:[0-9]+]], $[[R3]], -1
-; EL: and ${{[0-9]+}}, $[[R0]], $[[R4]]
-; EB: lwl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
-; EB: lwr $[[R0]], 3($[[R1]])
-
-
- %0 = load i32* getelementptr inbounds (%struct.SUI* @sui, i64 0, i32 0), align 1
- %conv = zext i32 %0 to i64
- ret i64 %conv
-}
-
-define void @foo_store_ll(i64 %a) nounwind {
-entry:
-; EL: sdl $[[R0:[0-9]+]], 7($[[R1:[0-9]+]])
-; EL: sdr $[[R0]], 0($[[R1]])
-; EB: sdl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
-; EB: sdr $[[R0]], 7($[[R1]])
-
- store i64 %a, i64* getelementptr inbounds (%struct.SLL* @sll, i64 0, i32 0), align 1
- ret void
-}
-
-define void @foo_store_i(i32 %a) nounwind {
-entry:
-; EL: swl $[[R0:[0-9]+]], 3($[[R1:[0-9]+]])
-; EL: swr $[[R0]], 0($[[R1]])
-; EB: swl $[[R0:[0-9]+]], 0($[[R1:[0-9]+]])
-; EB: swr $[[R0]], 3($[[R1]])
-
- store i32 %a, i32* getelementptr inbounds (%struct.SI* @si, i64 0, i32 0), align 1
- ret void
-}
-
diff --git a/test/CodeGen/Mips/mips64muldiv.ll b/test/CodeGen/Mips/mips64muldiv.ll
index fd036a2ca9fb..32d05a9da369 100644
--- a/test/CodeGen/Mips/mips64muldiv.ll
+++ b/test/CodeGen/Mips/mips64muldiv.ll
@@ -1,49 +1,79 @@
-; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC
+; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC
+; RUN: llc -march=mips64el -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ACC
+; RUN: llc -march=mips64el -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR
+
+; FileCheck prefixes:
+; ALL - All targets
+; ACC - Targets with accumulator based mul/div (i.e. pre-MIPS32r6)
+; GPR - Targets with register based mul/div (i.e. MIPS32r6)
define i64 @m0(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: dmult
-; CHECK: mflo
+; ALL-LABEL: m0:
+; ACC: dmult ${{[45]}}, ${{[45]}}
+; ACC: mflo $2
+; GPR: dmul $2, ${{[45]}}, ${{[45]}}
%mul = mul i64 %a1, %a0
ret i64 %mul
}
define i64 @m1(i64 %a) nounwind readnone {
entry:
-; CHECK: dmult
-; CHECK: mfhi
+; ALL-LABEL: m1:
+; ALL: lui $[[T0:[0-9]+]], 21845
+; ALL: addiu $[[T0]], $[[T0]], 21845
+; ALL: dsll $[[T0]], $[[T0]], 16
+; ALL: addiu $[[T0]], $[[T0]], 21845
+; ALL: dsll $[[T0]], $[[T0]], 16
+; ALL: addiu $[[T0]], $[[T0]], 21846
+
+; ACC: dmult $4, $[[T0]]
+; ACC: mfhi $[[T1:[0-9]+]]
+; GPR: dmuh $[[T1:[0-9]+]], $4, $[[T0]]
+
+; ALL: dsrl $2, $[[T1]], 63
+; ALL: daddu $2, $[[T1]], $2
%div = sdiv i64 %a, 3
ret i64 %div
}
define i64 @d0(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: ddivu
-; CHECK: mflo
+; ALL-LABEL: d0:
+; ACC: ddivu $zero, $4, $5
+; ACC: mflo $2
+; GPR: ddivu $2, $4, $5
%div = udiv i64 %a0, %a1
ret i64 %div
}
define i64 @d1(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: ddiv
-; CHECK: mflo
+; ALL-LABEL: d1:
+; ACC: ddiv $zero, $4, $5
+; ACC: mflo $2
+; GPR: ddiv $2, $4, $5
%div = sdiv i64 %a0, %a1
ret i64 %div
}
define i64 @d2(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: ddivu
-; CHECK: mfhi
+; ALL-LABEL: d2:
+; ACC: ddivu $zero, $4, $5
+; ACC: mfhi $2
+; GPR: dmodu $2, $4, $5
%rem = urem i64 %a0, %a1
ret i64 %rem
}
define i64 @d3(i64 %a0, i64 %a1) nounwind readnone {
entry:
-; CHECK: ddiv
-; CHECK: mfhi
+; ALL-LABEL: d3:
+; ACC: ddiv $zero, $4, $5
+; ACC: mfhi $2
+; GPR: dmod $2, $4, $5
%rem = srem i64 %a0, %a1
ret i64 %rem
}
diff --git a/test/CodeGen/Mips/mips64r6/compatibility.ll b/test/CodeGen/Mips/mips64r6/compatibility.ll
new file mode 100644
index 000000000000..429f68d784bb
--- /dev/null
+++ b/test/CodeGen/Mips/mips64r6/compatibility.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=mipsel -mcpu=mips64r6 < %s | FileCheck %s
+; RUN: not llc -march=mipsel -mcpu=mips64r6 -mattr=+dsp < %s 2>&1 | FileCheck --check-prefix=DSP %s
+
+; CHECK: foo:
+; DSP: MIPS64r6 is not compatible with the DSP ASE
+
+define void @foo() nounwind {
+ ret void
+}
diff --git a/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
index f4854f880542..db653eadf2f7 100644
--- a/test/CodeGen/Mips/mno-ldc1-sdc1.ll
+++ b/test/CodeGen/Mips/mno-ldc1-sdc1.ll
@@ -1,33 +1,113 @@
-; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 -mcpu=mips32r2 \
-; RUN: < %s | FileCheck %s -check-prefix=LE-PIC
-; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 < %s | \
-; RUN: FileCheck %s -check-prefix=LE-STATIC
-; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 < %s | \
-; RUN: FileCheck %s -check-prefix=BE-PIC
+; Check that [sl]dc1 are normally emitted. MIPS32r2 should have [sl]dxc1 too.
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R1-LDC1
; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | \
-; RUN: FileCheck %s -check-prefix=CHECK-LDC1-SDC1
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R2-LDXC1
+; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R6-LDC1
+
+; Check that -mno-ldc1-sdc1 disables [sl]dc1
+; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R1 \
+; RUN: -check-prefix=32R1-LE -check-prefix=32R1-LE-PIC
+; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r2 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R2 \
+; RUN: -check-prefix=32R2-LE -check-prefix=32R2-LE-PIC
+; RUN: llc -march=mipsel -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r6 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R6 \
+; RUN: -check-prefix=32R6-LE -check-prefix=32R6-LE-PIC
+
+; Check again for big-endian
+; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R1 \
+; RUN: -check-prefix=32R1-BE -check-prefix=32R1-BE-PIC
+; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r2 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R2 \
+; RUN: -check-prefix=32R2-BE -check-prefix=32R2-BE-PIC
+; RUN: llc -march=mips -relocation-model=pic -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r6 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R6 \
+; RUN: -check-prefix=32R6-BE -check-prefix=32R6-BE-PIC
+
+; Check again for the static relocation model
+; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R1 \
+; RUN: -check-prefix=32R1-LE -check-prefix=32R1-LE-STATIC
+; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r2 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R2 \
+; RUN: -check-prefix=32R2-LE -check-prefix=32R2-LE-STATIC
+; RUN: llc -march=mipsel -relocation-model=static -mno-ldc1-sdc1 \
+; RUN: -mcpu=mips32r6 < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=32R6 \
+; RUN: -check-prefix=32R6-LE -check-prefix=32R6-LE-STATIC
@g0 = common global double 0.000000e+00, align 8
-; LE-PIC-LABEL: test_ldc1:
-; LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
-; LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
-; LE-PIC-DAG: mtc1 $[[R0]], $f0
-; LE-PIC-DAG: mtc1 $[[R1]], $f1
-; LE-STATIC-LABEL: test_ldc1:
-; LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
-; LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
-; LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
-; LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
-; LE-STATIC-DAG: mtc1 $[[R1]], $f0
-; LE-STATIC-DAG: mtc1 $[[R3]], $f1
-; BE-PIC-LABEL: test_ldc1:
-; BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
-; BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
-; BE-PIC-DAG: mtc1 $[[R1]], $f0
-; BE-PIC-DAG: mtc1 $[[R0]], $f1
-; CHECK-LDC1-SDC1-LABEL: test_ldc1:
-; CHECK-LDC1-SDC1: ldc1 $f{{[0-9]+}}
+; ALL-LABEL: test_ldc1:
+
+; 32R1-LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R1-LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R1-LE-PIC-DAG: mtc1 $[[R0]], $f0
+; 32R1-LE-PIC-DAG: mtc1 $[[R1]], $f1
+
+; 32R2-LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R2-LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R2-LE-PIC-DAG: mtc1 $[[R0]], $f0
+; 32R2-LE-PIC-DAG: mthc1 $[[R1]], $f0
+
+; 32R6-LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-LE-PIC-DAG: mtc1 $[[R0]], $f0
+; 32R6-LE-PIC-DAG: mthc1 $[[R1]], $f0
+
+; 32R1-LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
+; 32R1-LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
+; 32R1-LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
+; 32R1-LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
+; 32R1-LE-STATIC-DAG: mtc1 $[[R1]], $f0
+; 32R1-LE-STATIC-DAG: mtc1 $[[R3]], $f1
+
+; 32R2-LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
+; 32R2-LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
+; 32R2-LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
+; 32R2-LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
+; 32R2-LE-STATIC-DAG: mtc1 $[[R1]], $f0
+; 32R2-LE-STATIC-DAG: mthc1 $[[R3]], $f0
+
+; 32R6-LE-STATIC-DAG: lui $[[R0:[0-9]+]], %hi(g0)
+; 32R6-LE-STATIC-DAG: lw $[[R1:[0-9]+]], %lo(g0)($[[R0]])
+; 32R6-LE-STATIC-DAG: addiu $[[R2:[0-9]+]], $[[R0]], %lo(g0)
+; 32R6-LE-STATIC-DAG: lw $[[R3:[0-9]+]], 4($[[R2]])
+; 32R6-LE-STATIC-DAG: mtc1 $[[R1]], $f0
+; 32R6-LE-STATIC-DAG: mthc1 $[[R3]], $f0
+
+; 32R1-BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R1-BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R1-BE-PIC-DAG: mtc1 $[[R1]], $f0
+; 32R1-BE-PIC-DAG: mtc1 $[[R0]], $f1
+
+; 32R2-BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R2-BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R2-BE-PIC-DAG: mtc1 $[[R1]], $f0
+; 32R2-BE-PIC-DAG: mthc1 $[[R0]], $f0
+
+; 32R6-BE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-BE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-BE-PIC-DAG: mtc1 $[[R1]], $f0
+; 32R6-BE-PIC-DAG: mthc1 $[[R0]], $f0
+
+; 32R1-LDC1: ldc1 $f0, 0(${{[0-9]+}})
+
+; 32R2-LDXC1: ldc1 $f0, 0(${{[0-9]+}})
+
+; 32R6-LDC1: ldc1 $f0, 0(${{[0-9]+}})
define double @test_ldc1() {
entry:
@@ -35,25 +115,64 @@ entry:
ret double %0
}
-; LE-PIC-LABEL: test_sdc1:
-; LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
-; LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
-; LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
-; LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
-; LE-STATIC-LABEL: test_sdc1:
-; LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
-; LE-STATIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
-; LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
-; LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
-; LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
-; LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
-; BE-PIC-LABEL: test_sdc1:
-; BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
-; BE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
-; BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
-; BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
-; CHECK-LDC1-SDC1-LABEL: test_sdc1:
-; CHECK-LDC1-SDC1: sdc1 $f{{[0-9]+}}
+; ALL-LABEL: test_sdc1:
+
+; 32R1-LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R1-LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R1-LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R1-LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R2-LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R2-LE-PIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R2-LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R2-LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R6-LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R6-LE-PIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R6-LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R6-LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R1-LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R1-LE-STATIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R1-LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
+; 32R1-LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
+; 32R1-LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
+; 32R1-LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
+
+; 32R2-LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R2-LE-STATIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R2-LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
+; 32R2-LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
+; 32R2-LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
+; 32R2-LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
+
+; 32R6-LE-STATIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R6-LE-STATIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R6-LE-STATIC-DAG: lui $[[R2:[0-9]+]], %hi(g0)
+; 32R6-LE-STATIC-DAG: sw $[[R0]], %lo(g0)($[[R2]])
+; 32R6-LE-STATIC-DAG: addiu $[[R3:[0-9]+]], $[[R2]], %lo(g0)
+; 32R6-LE-STATIC-DAG: sw $[[R1]], 4($[[R3]])
+
+; 32R1-BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R1-BE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R1-BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
+; 32R1-BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
+
+; 32R2-BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R2-BE-PIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R2-BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
+; 32R2-BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
+
+; 32R6-BE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R6-BE-PIC-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R6-BE-PIC-DAG: sw $[[R1]], 0(${{[0-9]+}})
+; 32R6-BE-PIC-DAG: sw $[[R0]], 4(${{[0-9]+}})
+
+; 32R1-LDC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
+
+; 32R2-LDXC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
+
+; 32R6-LDC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
define void @test_sdc1(double %a) {
entry:
@@ -61,14 +180,35 @@ entry:
ret void
}
+; ALL-LABEL: test_ldxc1:
+
+; 32R1-LE-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R1-LE-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R1-BE-DAG: lw $[[R0:[0-9]+]], 4(${{[0-9]+}})
+; 32R1-BE-DAG: lw $[[R1:[0-9]+]], 0(${{[0-9]+}})
+; 32R1-DAG: mtc1 $[[R0]], $f0
+; 32R1-DAG: mtc1 $[[R1]], $f1
+
+; 32R2-LE-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R2-LE-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R2-BE-DAG: lw $[[R0:[0-9]+]], 4(${{[0-9]+}})
+; 32R2-BE-DAG: lw $[[R1:[0-9]+]], 0(${{[0-9]+}})
+; 32R2-DAG: mtc1 $[[R0]], $f0
+; 32R2-DAG: mthc1 $[[R1]], $f0
-; LE-PIC-LABEL: test_ldxc1:
-; LE-PIC-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
-; LE-PIC-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
-; LE-PIC-DAG: mtc1 $[[R0]], $f0
-; LE-PIC-DAG: mtc1 $[[R1]], $f1
-; CHECK-LDC1-SDC1-LABEL: test_ldxc1:
-; CHECK-LDC1-SDC1: ldxc1 $f{{[0-9]+}}
+; 32R6-LE-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-LE-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-BE-DAG: lw $[[R0:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-BE-DAG: lw $[[R1:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-DAG: mtc1 $[[R0]], $f0
+; 32R6-DAG: mthc1 $[[R1]], $f0
+
+; 32R1-LDC1: ldc1 $f0, 0(${{[0-9]+}})
+
+; 32R2-LDXC1: sll $[[OFFSET:[0-9]+]], $5, 3
+; 32R2-LDXC1: ldxc1 $f0, $[[OFFSET]]($4)
+
+; 32R6-LDC1: ldc1 $f0, 0(${{[0-9]+}})
define double @test_ldxc1(double* nocapture readonly %a, i32 %i) {
entry:
@@ -77,13 +217,29 @@ entry:
ret double %0
}
-; LE-PIC-LABEL: test_sdxc1:
-; LE-PIC-DAG: mfc1 $[[R0:[0-9]+]], $f12
-; LE-PIC-DAG: mfc1 $[[R1:[0-9]+]], $f13
-; LE-PIC-DAG: sw $[[R0]], 0(${{[0-9]+}})
-; LE-PIC-DAG: sw $[[R1]], 4(${{[0-9]+}})
-; CHECK-LDC1-SDC1-LABEL: test_sdxc1:
-; CHECK-LDC1-SDC1: sdxc1 $f{{[0-9]+}}
+; ALL-LABEL: test_sdxc1:
+
+; 32R1-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R1-DAG: mfc1 $[[R1:[0-9]+]], $f13
+; 32R1-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R1-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R2-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R2-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R2-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R2-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R6-DAG: mfc1 $[[R0:[0-9]+]], $f12
+; 32R6-DAG: mfhc1 $[[R1:[0-9]+]], $f12
+; 32R6-DAG: sw $[[R0]], 0(${{[0-9]+}})
+; 32R6-DAG: sw $[[R1]], 4(${{[0-9]+}})
+
+; 32R1-LDC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
+
+; 32R2-LDXC1: sll $[[OFFSET:[0-9]+]], $7, 3
+; 32R2-LDXC1: sdxc1 $f{{[0-9]+}}, $[[OFFSET]]($6)
+
+; 32R6-LDC1: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}})
define void @test_sdxc1(double %b, double* nocapture %a, i32 %i) {
entry:
diff --git a/test/CodeGen/Mips/msa/2r_vector_scalar.ll b/test/CodeGen/Mips/msa/2r_vector_scalar.ll
index 6f6e1b9ce2f8..64e459e4d9a9 100644
--- a/test/CodeGen/Mips/msa/2r_vector_scalar.ll
+++ b/test/CodeGen/Mips/msa/2r_vector_scalar.ll
@@ -1,8 +1,14 @@
; Test the MSA intrinsics that are encoded with the 2R instruction format and
; convert scalars to vectors.
-; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
-; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32
+; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64
@llvm_mips_fill_b_ARG1 = global i32 23, align 16
@llvm_mips_fill_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
@@ -17,11 +23,12 @@ entry:
declare <16 x i8> @llvm.mips.fill.b(i32) nounwind
-; CHECK: llvm_mips_fill_b_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]],
-; CHECK-DAG: fill.b [[R2:\$w[0-9]+]], [[R1]]
-; CHECK-DAG: st.b [[R2]],
-; CHECK: .size llvm_mips_fill_b_test
+; MIPS-ANY: llvm_mips_fill_b_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]],
+; MIPS64-DAG: ld [[R1:\$[0-9]+]],
+; MIPS-ANY-DAG: fill.b [[R2:\$w[0-9]+]], [[R1]]
+; MIPS-ANY-DAG: st.b [[R2]],
+; MIPS-ANY: .size llvm_mips_fill_b_test
;
@llvm_mips_fill_h_ARG1 = global i32 23, align 16
@llvm_mips_fill_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
@@ -36,11 +43,12 @@ entry:
declare <8 x i16> @llvm.mips.fill.h(i32) nounwind
-; CHECK: llvm_mips_fill_h_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]],
-; CHECK-DAG: fill.h [[R2:\$w[0-9]+]], [[R1]]
-; CHECK-DAG: st.h [[R2]],
-; CHECK: .size llvm_mips_fill_h_test
+; MIPS-ANY: llvm_mips_fill_h_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]],
+; MIPS64-DAG: ld [[R1:\$[0-9]+]],
+; MIPS-ANY-DAG: fill.h [[R2:\$w[0-9]+]], [[R1]]
+; MIPS-ANY-DAG: st.h [[R2]],
+; MIPS-ANY: .size llvm_mips_fill_h_test
;
@llvm_mips_fill_w_ARG1 = global i32 23, align 16
@llvm_mips_fill_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
@@ -55,11 +63,12 @@ entry:
declare <4 x i32> @llvm.mips.fill.w(i32) nounwind
-; CHECK: llvm_mips_fill_w_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]],
-; CHECK-DAG: fill.w [[R2:\$w[0-9]+]], [[R1]]
-; CHECK-DAG: st.w [[R2]],
-; CHECK: .size llvm_mips_fill_w_test
+; MIPS-ANY: llvm_mips_fill_w_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]],
+; MIPS64-DAG: ld [[R1:\$[0-9]+]],
+; MIPS-ANY-DAG: fill.w [[R2:\$w[0-9]+]], [[R1]]
+; MIPS-ANY-DAG: st.w [[R2]],
+; MIPS-ANY: .size llvm_mips_fill_w_test
;
@llvm_mips_fill_d_ARG1 = global i64 23, align 16
@llvm_mips_fill_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
@@ -74,14 +83,18 @@ entry:
declare <2 x i64> @llvm.mips.fill.d(i64) nounwind
-; CHECK: llvm_mips_fill_d_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
-; CHECK-DAG: lw [[R2:\$[0-9]+]], 4(
-; CHECK-DAG: ldi.b [[R3:\$w[0-9]+]], 0
-; CHECK-DAG: insert.w [[R3]][0], [[R1]]
-; CHECK-DAG: insert.w [[R3]][1], [[R2]]
-; CHECK-DAG: insert.w [[R3]][2], [[R1]]
-; CHECK-DAG: insert.w [[R3]][3], [[R2]]
-; CHECK-DAG: st.w [[R3]],
-; CHECK: .size llvm_mips_fill_d_test
-;
+; MIPS-ANY: llvm_mips_fill_d_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], 0(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], 4(
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_fill_d_ARG1)
+; MIPS32-DAG: ldi.b [[R3:\$w[0-9]+]], 0
+; MIPS32-DAG: insert.w [[R3]][0], [[R1]]
+; MIPS32-DAG: insert.w [[R3]][1], [[R2]]
+; MIPS32-DAG: insert.w [[R3]][2], [[R1]]
+; MIPS32-DAG: insert.w [[R3]][3], [[R2]]
+; MIPS64-DAG: fill.d [[WD:\$w[0-9]+]], [[R1]]
+; MIPS32-DAG: st.w [[R3]],
+; MIPS64-DAG: ld [[RD:\$[0-9]+]], %got_disp(llvm_mips_fill_d_RES)
+; MIPS64-DAG: st.d [[WD]], 0([[RD]])
+; MIPS-ANY: .size llvm_mips_fill_d_test
+; \ No newline at end of file
diff --git a/test/CodeGen/Mips/msa/3r-s.ll b/test/CodeGen/Mips/msa/3r-s.ll
index 30cf265233e5..581c3bfd78af 100644
--- a/test/CodeGen/Mips/msa/3r-s.ll
+++ b/test/CodeGen/Mips/msa/3r-s.ll
@@ -5,98 +5,114 @@
; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
@llvm_mips_sld_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
-@llvm_mips_sld_b_ARG2 = global i32 10, align 16
+@llvm_mips_sld_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sld_b_ARG3 = global i32 10, align 16
@llvm_mips_sld_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
define void @llvm_mips_sld_b_test() nounwind {
entry:
%0 = load <16 x i8>* @llvm_mips_sld_b_ARG1
- %1 = load i32* @llvm_mips_sld_b_ARG2
- %2 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, i32 %1)
- store <16 x i8> %2, <16 x i8>* @llvm_mips_sld_b_RES
+ %1 = load <16 x i8>* @llvm_mips_sld_b_ARG2
+ %2 = load i32* @llvm_mips_sld_b_ARG3
+ %3 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, <16 x i8> %1, i32 %2)
+ store <16 x i8> %3, <16 x i8>* @llvm_mips_sld_b_RES
ret void
}
-declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, i32) nounwind
+declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, <16 x i8>, i32) nounwind
; CHECK: llvm_mips_sld_b_test:
; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_b_ARG1)
; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_b_ARG2)
-; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
-; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
-; CHECK-DAG: sld.b [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_sld_b_ARG3)
+; CHECK-DAG: ld.b [[WD:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R3]])
+; CHECK-DAG: sld.b [[WD]], [[WS]]{{\[}}[[RT]]{{\]}}
; CHECK-DAG: st.b [[WD]]
; CHECK: .size llvm_mips_sld_b_test
;
@llvm_mips_sld_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
-@llvm_mips_sld_h_ARG2 = global i32 10, align 16
+@llvm_mips_sld_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sld_h_ARG3 = global i32 10, align 16
@llvm_mips_sld_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
define void @llvm_mips_sld_h_test() nounwind {
entry:
%0 = load <8 x i16>* @llvm_mips_sld_h_ARG1
- %1 = load i32* @llvm_mips_sld_h_ARG2
- %2 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, i32 %1)
- store <8 x i16> %2, <8 x i16>* @llvm_mips_sld_h_RES
+ %1 = load <8 x i16>* @llvm_mips_sld_h_ARG2
+ %2 = load i32* @llvm_mips_sld_h_ARG3
+ %3 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, <8 x i16> %1, i32 %2)
+ store <8 x i16> %3, <8 x i16>* @llvm_mips_sld_h_RES
ret void
}
-declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, i32) nounwind
+declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, <8 x i16>, i32) nounwind
; CHECK: llvm_mips_sld_h_test:
; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_h_ARG1)
-; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_h_ARG2)
-; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
-; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
-; CHECK-DAG: sld.h [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_h_ARG2)
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_sld_h_ARG3)
+; CHECK-DAG: ld.h [[WD:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R3]])
+; CHECK-DAG: sld.h [[WD]], [[WS]]{{\[}}[[RT]]{{\]}}
; CHECK-DAG: st.h [[WD]]
; CHECK: .size llvm_mips_sld_h_test
;
@llvm_mips_sld_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
-@llvm_mips_sld_w_ARG2 = global i32 10, align 16
+@llvm_mips_sld_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sld_w_ARG3 = global i32 10, align 16
@llvm_mips_sld_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
define void @llvm_mips_sld_w_test() nounwind {
entry:
%0 = load <4 x i32>* @llvm_mips_sld_w_ARG1
- %1 = load i32* @llvm_mips_sld_w_ARG2
- %2 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, i32 %1)
- store <4 x i32> %2, <4 x i32>* @llvm_mips_sld_w_RES
+ %1 = load <4 x i32>* @llvm_mips_sld_w_ARG2
+ %2 = load i32* @llvm_mips_sld_w_ARG3
+ %3 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, <4 x i32> %1, i32 %2)
+ store <4 x i32> %3, <4 x i32>* @llvm_mips_sld_w_RES
ret void
}
-declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, i32) nounwind
+declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, <4 x i32>, i32) nounwind
; CHECK: llvm_mips_sld_w_test:
; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_w_ARG1)
-; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_w_ARG2)
-; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
-; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
-; CHECK-DAG: sld.w [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_w_ARG2)
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_sld_w_ARG3)
+; CHECK-DAG: ld.w [[WD:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R3]])
+; CHECK-DAG: sld.w [[WD]], [[WS]]{{\[}}[[RT]]{{\]}}
; CHECK-DAG: st.w [[WD]]
; CHECK: .size llvm_mips_sld_w_test
;
@llvm_mips_sld_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
-@llvm_mips_sld_d_ARG2 = global i32 10, align 16
+@llvm_mips_sld_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sld_d_ARG3 = global i32 10, align 16
@llvm_mips_sld_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
define void @llvm_mips_sld_d_test() nounwind {
entry:
%0 = load <2 x i64>* @llvm_mips_sld_d_ARG1
- %1 = load i32* @llvm_mips_sld_d_ARG2
- %2 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, i32 %1)
- store <2 x i64> %2, <2 x i64>* @llvm_mips_sld_d_RES
+ %1 = load <2 x i64>* @llvm_mips_sld_d_ARG2
+ %2 = load i32* @llvm_mips_sld_d_ARG3
+ %3 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, <2 x i64> %1, i32 %2)
+ store <2 x i64> %3, <2 x i64>* @llvm_mips_sld_d_RES
ret void
}
-declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, i32) nounwind
+declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, <2 x i64>, i32) nounwind
; CHECK: llvm_mips_sld_d_test:
; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_sld_d_ARG1)
-; CHECK-DAG: lw [[RT:\$[0-9]+]], %got(llvm_mips_sld_d_ARG2)
-; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
-; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R2]])
-; CHECK-DAG: sld.d [[WD:\$w[0-9]+]], [[WS]]{{\[}}[[RT]]{{\]}}
+; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_sld_d_ARG2)
+; CHECK-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_sld_d_ARG3)
+; CHECK-DAG: ld.d [[WD:\$w[0-9]+]], 0([[R1]])
+; CHECK-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R2]])
+; CHECK-DAG: lw [[RT:\$[0-9]+]], 0([[R3]])
+; CHECK-DAG: sld.d [[WD]], [[WS]]{{\[}}[[RT]]{{\]}}
; CHECK-DAG: st.d [[WD]]
; CHECK: .size llvm_mips_sld_d_test
;
diff --git a/test/CodeGen/Mips/msa/arithmetic_float.ll b/test/CodeGen/Mips/msa/arithmetic_float.ll
index dc3872129205..86e57ac85a3b 100644
--- a/test/CodeGen/Mips/msa/arithmetic_float.ll
+++ b/test/CodeGen/Mips/msa/arithmetic_float.ll
@@ -295,7 +295,8 @@ define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind {
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
%3 = fmul <2 x double> <double 2.0, double 2.0>, %2
- ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo(
+ ; CHECK-DAG: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[G_PTR]])
; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
store <2 x double> %3, <2 x double>* %c
; CHECK-DAG: st.d [[R4]], 0($4)
diff --git a/test/CodeGen/Mips/msa/basic_operations.ll b/test/CodeGen/Mips/msa/basic_operations.ll
index 0169a0780d36..dbdf42be49ca 100644
--- a/test/CodeGen/Mips/msa/basic_operations.ll
+++ b/test/CodeGen/Mips/msa/basic_operations.ll
@@ -6,10 +6,11 @@
@v8i16 = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@v4i32 = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>
@v2i64 = global <2 x i64> <i64 0, i64 0>
+@i32 = global i32 0
@i64 = global i64 0
define void @const_v16i8() nounwind {
- ; MIPS32-AE: const_v16i8:
+ ; MIPS32-AE-LABEL: const_v16i8:
store volatile <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8>*@v16i8
; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
@@ -18,10 +19,12 @@ define void @const_v16i8() nounwind {
; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1
store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, <16 x i8>*@v16i8
- ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, <16 x i8>*@v16i8
- ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, <16 x i8>*@v16i8
; MIPS32-BE: ldi.h [[R1:\$w[0-9]+]], 256
@@ -35,14 +38,15 @@ define void @const_v16i8() nounwind {
; MIPS32-AE-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]]
store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8
- ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[G_PTR]])
ret void
; MIPS32-AE: .size const_v16i8
}
define void @const_v8i16() nounwind {
- ; MIPS32-AE: const_v8i16:
+ ; MIPS32-AE-LABEL: const_v8i16:
store volatile <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16>*@v8i16
; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
@@ -51,7 +55,8 @@ define void @const_v8i16() nounwind {
; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1
store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, <8 x i16>*@v8i16
- ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <8 x i16> <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, <8 x i16>*@v8i16
; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 4
@@ -64,14 +69,15 @@ define void @const_v8i16() nounwind {
; MIPS32-AE-DAG: fill.w [[R1:\$w[0-9]+]], [[R2]]
store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, <8 x i16>*@v8i16
- ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[G_PTR]])
ret void
; MIPS32-AE: .size const_v8i16
}
define void @const_v4i32() nounwind {
- ; MIPS32-AE: const_v4i32:
+ ; MIPS32-AE-LABEL: const_v4i32:
store volatile <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32>*@v4i32
; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
@@ -80,7 +86,8 @@ define void @const_v4i32() nounwind {
; MIPS32-AE: ldi.w [[R1:\$w[0-9]+]], 1
store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 31>, <4 x i32>*@v4i32
- ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>, <4 x i32>*@v4i32
; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 1
@@ -89,17 +96,19 @@ define void @const_v4i32() nounwind {
; MIPS32-AE: ldi.h [[R1:\$w[0-9]+]], 1
store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, <4 x i32>*@v4i32
- ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, <4 x i32>*@v4i32
- ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
ret void
; MIPS32-AE: .size const_v4i32
}
define void @const_v2i64() nounwind {
- ; MIPS32-AE: const_v2i64:
+ ; MIPS32-AE-LABEL: const_v2i64:
store volatile <2 x i64> <i64 0, i64 0>, <2 x i64>*@v2i64
; MIPS32-AE: ldi.b [[R1:\$w[0-9]+]], 0
@@ -117,17 +126,19 @@ define void @const_v2i64() nounwind {
; MIPS32-AE: ldi.d [[R1:\$w[0-9]+]], 1
store volatile <2 x i64> <i64 1, i64 31>, <2 x i64>*@v2i64
- ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <2 x i64> <i64 3, i64 4>, <2 x i64>*@v2i64
- ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32-AE: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
ret void
; MIPS32-AE: .size const_v2i64
}
define void @nonconst_v16i8(i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %f, i8 %g, i8 %h) nounwind {
- ; MIPS32-AE: nonconst_v16i8:
+ ; MIPS32-AE-LABEL: nonconst_v16i8:
%1 = insertelement <16 x i8> undef, i8 %a, i32 0
%2 = insertelement <16 x i8> %1, i8 %b, i32 1
@@ -177,7 +188,7 @@ define void @nonconst_v16i8(i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %f, i8 %g, i8
}
define void @nonconst_v8i16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g, i16 %h) nounwind {
- ; MIPS32-AE: nonconst_v8i16:
+ ; MIPS32-AE-LABEL: nonconst_v8i16:
%1 = insertelement <8 x i16> undef, i16 %a, i32 0
%2 = insertelement <8 x i16> %1, i16 %b, i32 1
@@ -211,7 +222,7 @@ define void @nonconst_v8i16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16
}
define void @nonconst_v4i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
- ; MIPS32-AE: nonconst_v4i32:
+ ; MIPS32-AE-LABEL: nonconst_v4i32:
%1 = insertelement <4 x i32> undef, i32 %a, i32 0
%2 = insertelement <4 x i32> %1, i32 %b, i32 1
@@ -229,7 +240,7 @@ define void @nonconst_v4i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
}
define void @nonconst_v2i64(i64 %a, i64 %b) nounwind {
- ; MIPS32-AE: nonconst_v2i64:
+ ; MIPS32-AE-LABEL: nonconst_v2i64:
%1 = insertelement <2 x i64> undef, i64 %a, i32 0
%2 = insertelement <2 x i64> %1, i64 %b, i32 1
@@ -245,7 +256,7 @@ define void @nonconst_v2i64(i64 %a, i64 %b) nounwind {
}
define i32 @extract_sext_v16i8() nounwind {
- ; MIPS32-AE: extract_sext_v16i8:
+ ; MIPS32-AE-LABEL: extract_sext_v16i8:
%1 = load <16 x i8>* @v16i8
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
@@ -264,7 +275,7 @@ define i32 @extract_sext_v16i8() nounwind {
}
define i32 @extract_sext_v8i16() nounwind {
- ; MIPS32-AE: extract_sext_v8i16:
+ ; MIPS32-AE-LABEL: extract_sext_v8i16:
%1 = load <8 x i16>* @v8i16
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
@@ -283,7 +294,7 @@ define i32 @extract_sext_v8i16() nounwind {
}
define i32 @extract_sext_v4i32() nounwind {
- ; MIPS32-AE: extract_sext_v4i32:
+ ; MIPS32-AE-LABEL: extract_sext_v4i32:
%1 = load <4 x i32>* @v4i32
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
@@ -299,7 +310,7 @@ define i32 @extract_sext_v4i32() nounwind {
}
define i64 @extract_sext_v2i64() nounwind {
- ; MIPS32-AE: extract_sext_v2i64:
+ ; MIPS32-AE-LABEL: extract_sext_v2i64:
%1 = load <2 x i64>* @v2i64
; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]],
@@ -318,7 +329,7 @@ define i64 @extract_sext_v2i64() nounwind {
}
define i32 @extract_zext_v16i8() nounwind {
- ; MIPS32-AE: extract_zext_v16i8:
+ ; MIPS32-AE-LABEL: extract_zext_v16i8:
%1 = load <16 x i8>* @v16i8
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
@@ -336,7 +347,7 @@ define i32 @extract_zext_v16i8() nounwind {
}
define i32 @extract_zext_v8i16() nounwind {
- ; MIPS32-AE: extract_zext_v8i16:
+ ; MIPS32-AE-LABEL: extract_zext_v8i16:
%1 = load <8 x i16>* @v8i16
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
@@ -354,7 +365,7 @@ define i32 @extract_zext_v8i16() nounwind {
}
define i32 @extract_zext_v4i32() nounwind {
- ; MIPS32-AE: extract_zext_v4i32:
+ ; MIPS32-AE-LABEL: extract_zext_v4i32:
%1 = load <4 x i32>* @v4i32
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
@@ -370,7 +381,7 @@ define i32 @extract_zext_v4i32() nounwind {
}
define i64 @extract_zext_v2i64() nounwind {
- ; MIPS32-AE: extract_zext_v2i64:
+ ; MIPS32-AE-LABEL: extract_zext_v2i64:
%1 = load <2 x i64>* @v2i64
; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]],
@@ -387,8 +398,200 @@ define i64 @extract_zext_v2i64() nounwind {
; MIPS32-AE: .size extract_zext_v2i64
}
+define i32 @extract_sext_v16i8_vidx() nounwind {
+ ; MIPS32-AE-LABEL: extract_sext_v16i8_vidx:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v16i8)(
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = add <16 x i8> %1, %1
+ ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <16 x i8> %2, i32 %3
+ %5 = sext i8 %4 to i32
+ ; MIPS32-AE-DAG: splat.b $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]]
+ ; MIPS32-AE-DAG: sra [[R6:\$[0-9]+]], [[R5]], 24
+
+ ret i32 %5
+ ; MIPS32-AE: .size extract_sext_v16i8_vidx
+}
+
+define i32 @extract_sext_v8i16_vidx() nounwind {
+ ; MIPS32-AE-LABEL: extract_sext_v8i16_vidx:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v8i16)(
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = add <8 x i16> %1, %1
+ ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <8 x i16> %2, i32 %3
+ %5 = sext i16 %4 to i32
+ ; MIPS32-AE-DAG: splat.h $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]]
+ ; MIPS32-AE-DAG: sra [[R6:\$[0-9]+]], [[R5]], 16
+
+ ret i32 %5
+ ; MIPS32-AE: .size extract_sext_v8i16_vidx
+}
+
+define i32 @extract_sext_v4i32_vidx() nounwind {
+ ; MIPS32-AE-LABEL: extract_sext_v4i32_vidx:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4i32)(
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = add <4 x i32> %1, %1
+ ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <4 x i32> %2, i32 %3
+ ; MIPS32-AE-DAG: splat.w $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]]
+ ; MIPS32-AE-NOT: sra
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_sext_v4i32_vidx
+}
+
+define i64 @extract_sext_v2i64_vidx() nounwind {
+ ; MIPS32-AE-LABEL: extract_sext_v2i64_vidx:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2i64)(
+ ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = add <2 x i64> %1, %1
+ ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <2 x i64> %2, i32 %3
+ ; MIPS32-AE-DAG: splat.w $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]]
+ ; MIPS32-AE-DAG: splat.w $w[[R4:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R6:\$[0-9]+]], $f[[R4]]
+ ; MIPS32-AE-NOT: sra
+
+ ret i64 %4
+ ; MIPS32-AE: .size extract_sext_v2i64_vidx
+}
+
+define i32 @extract_zext_v16i8_vidx() nounwind {
+ ; MIPS32-AE-LABEL: extract_zext_v16i8_vidx:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v16i8)(
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = add <16 x i8> %1, %1
+ ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <16 x i8> %2, i32 %3
+ %5 = zext i8 %4 to i32
+ ; MIPS32-AE-DAG: splat.b $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]]
+ ; MIPS32-AE-DAG: srl [[R6:\$[0-9]+]], [[R5]], 24
+
+ ret i32 %5
+ ; MIPS32-AE: .size extract_zext_v16i8_vidx
+}
+
+define i32 @extract_zext_v8i16_vidx() nounwind {
+ ; MIPS32-AE-LABEL: extract_zext_v8i16_vidx:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v8i16)(
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = add <8 x i16> %1, %1
+ ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <8 x i16> %2, i32 %3
+ %5 = zext i16 %4 to i32
+ ; MIPS32-AE-DAG: splat.h $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]]
+ ; MIPS32-AE-DAG: srl [[R6:\$[0-9]+]], [[R5]], 16
+
+ ret i32 %5
+ ; MIPS32-AE: .size extract_zext_v8i16_vidx
+}
+
+define i32 @extract_zext_v4i32_vidx() nounwind {
+ ; MIPS32-AE-LABEL: extract_zext_v4i32_vidx:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4i32)(
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = add <4 x i32> %1, %1
+ ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <4 x i32> %2, i32 %3
+ ; MIPS32-AE-DAG: splat.w $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]]
+ ; MIPS32-AE-NOT: srl
+
+ ret i32 %4
+ ; MIPS32-AE: .size extract_zext_v4i32_vidx
+}
+
+define i64 @extract_zext_v2i64_vidx() nounwind {
+ ; MIPS32-AE-LABEL: extract_zext_v2i64_vidx:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2i64)(
+ ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = add <2 x i64> %1, %1
+ ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <2 x i64> %2, i32 %3
+ ; MIPS32-AE-DAG: splat.w $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]]
+ ; MIPS32-AE-DAG: splat.w $w[[R4:[0-9]+]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: mfc1 [[R6:\$[0-9]+]], $f[[R4]]
+ ; MIPS32-AE-NOT: srl
+
+ ret i64 %4
+ ; MIPS32-AE: .size extract_zext_v2i64_vidx
+}
+
define void @insert_v16i8(i32 %a) nounwind {
- ; MIPS32-AE: insert_v16i8:
+ ; MIPS32-AE-LABEL: insert_v16i8:
%1 = load <16 x i8>* @v16i8
; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
@@ -410,7 +613,7 @@ define void @insert_v16i8(i32 %a) nounwind {
}
define void @insert_v8i16(i32 %a) nounwind {
- ; MIPS32-AE: insert_v8i16:
+ ; MIPS32-AE-LABEL: insert_v8i16:
%1 = load <8 x i16>* @v8i16
; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
@@ -432,7 +635,7 @@ define void @insert_v8i16(i32 %a) nounwind {
}
define void @insert_v4i32(i32 %a) nounwind {
- ; MIPS32-AE: insert_v4i32:
+ ; MIPS32-AE-LABEL: insert_v4i32:
%1 = load <4 x i32>* @v4i32
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
@@ -451,7 +654,7 @@ define void @insert_v4i32(i32 %a) nounwind {
}
define void @insert_v2i64(i64 %a) nounwind {
- ; MIPS32-AE: insert_v2i64:
+ ; MIPS32-AE-LABEL: insert_v2i64:
%1 = load <2 x i64>* @v2i64
; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
@@ -470,8 +673,131 @@ define void @insert_v2i64(i64 %a) nounwind {
; MIPS32-AE: .size insert_v2i64
}
+define void @insert_v16i8_vidx(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v16i8_vidx:
+
+ %1 = load <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]],
+
+ %2 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %a2 = trunc i32 %a to i8
+ %a3 = sext i8 %a2 to i32
+ %a4 = trunc i32 %a3 to i8
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %3 = insertelement <16 x i8> %1, i8 %a4, i32 %2
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[IDX]]]
+ ; MIPS32-AE-DAG: insert.b [[R1]][0], $4
+ ; MIPS32-AE-DAG: neg [[NIDX:\$[0-9]+]], [[IDX]]
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[NIDX]]]
+
+ store <16 x i8> %3, <16 x i8>* @v16i8
+ ; MIPS32-AE-DAG: st.b [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v16i8_vidx
+}
+
+define void @insert_v8i16_vidx(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v8i16_vidx:
+
+ %1 = load <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]],
+
+ %2 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %a2 = trunc i32 %a to i16
+ %a3 = sext i16 %a2 to i32
+ %a4 = trunc i32 %a3 to i16
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %3 = insertelement <8 x i16> %1, i16 %a4, i32 %2
+ ; MIPS32-AE-DAG: sll [[BIDX:\$[0-9]+]], [[IDX]], 1
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[BIDX]]]
+ ; MIPS32-AE-DAG: insert.h [[R1]][0], $4
+ ; MIPS32-AE-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[NIDX]]]
+
+ store <8 x i16> %3, <8 x i16>* @v8i16
+ ; MIPS32-AE-DAG: st.h [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v8i16_vidx
+}
+
+define void @insert_v4i32_vidx(i32 %a) nounwind {
+ ; MIPS32-AE: insert_v4i32_vidx:
+
+ %1 = load <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %3 = insertelement <4 x i32> %1, i32 %a, i32 %2
+ ; MIPS32-AE-DAG: sll [[BIDX:\$[0-9]+]], [[IDX]], 2
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[BIDX]]]
+ ; MIPS32-AE-DAG: insert.w [[R1]][0], $4
+ ; MIPS32-AE-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[NIDX]]]
+
+ store <4 x i32> %3, <4 x i32>* @v4i32
+ ; MIPS32-AE-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v4i32_vidx
+}
+
+define void @insert_v2i64_vidx(i64 %a) nounwind {
+ ; MIPS32-AE: insert_v2i64_vidx:
+
+ %1 = load <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = load i32* @i32
+ ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ ; MIPS32-AE-NOT: andi
+ ; MIPS32-AE-NOT: sra
+
+ %3 = insertelement <2 x i64> %1, i64 %a, i32 %2
+ ; TODO: This code could be a lot better but it works. The legalizer splits
+ ; 64-bit inserts into two 32-bit inserts because there is no i64 type on
+ ; MIPS32. The obvious optimisation is to perform both insert.w's at once while
+ ; the vector is rotated.
+ ; MIPS32-AE-DAG: sll [[BIDX:\$[0-9]+]], [[IDX]], 2
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[BIDX]]]
+ ; MIPS32-AE-DAG: insert.w [[R1]][0], $4
+ ; MIPS32-AE-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[NIDX]]]
+ ; MIPS32-AE-DAG: addiu [[IDX2:\$[0-9]+]], [[IDX]], 1
+ ; MIPS32-AE-DAG: sll [[BIDX:\$[0-9]+]], [[IDX2]], 2
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[BIDX]]]
+ ; MIPS32-AE-DAG: insert.w [[R1]][0], $5
+ ; MIPS32-AE-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
+ ; MIPS32-AE-DAG: sld.b [[R1]], [[R1]]{{\[}}[[NIDX]]]
+
+ store <2 x i64> %3, <2 x i64>* @v2i64
+ ; MIPS32-AE-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32-AE: .size insert_v2i64_vidx
+}
+
define void @truncstore() nounwind {
- ; MIPS32-AE: truncstore:
+ ; MIPS32-AE-LABEL: truncstore:
store volatile <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i8>*@v4i8
; TODO: What code should be emitted?
diff --git a/test/CodeGen/Mips/msa/basic_operations_float.ll b/test/CodeGen/Mips/msa/basic_operations_float.ll
index 1f538108a1fa..a0c9d29e231a 100644
--- a/test/CodeGen/Mips/msa/basic_operations_float.ll
+++ b/test/CodeGen/Mips/msa/basic_operations_float.ll
@@ -3,11 +3,12 @@
@v4f32 = global <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>
@v2f64 = global <2 x double> <double 0.0, double 0.0>
+@i32 = global i32 0
@f32 = global float 0.0
@f64 = global double 0.0
define void @const_v4f32() nounwind {
- ; MIPS32: const_v4f32:
+ ; MIPS32-LABEL: const_v4f32:
store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>*@v4f32
; MIPS32: ldi.b [[R1:\$w[0-9]+]], 0
@@ -17,7 +18,8 @@ define void @const_v4f32() nounwind {
; MIPS32: fill.w [[R2:\$w[0-9]+]], [[R1]]
store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, <4 x float>*@v4f32
- ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, <4 x float>*@v4f32
; MIPS32: lui [[R1:\$[0-9]+]], 18304
@@ -25,45 +27,53 @@ define void @const_v4f32() nounwind {
; MIPS32: fill.w [[R3:\$w[0-9]+]], [[R2]]
store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, <4 x float>*@v4f32
- ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, <4 x float>*@v4f32
- ; MIPS32: ld.w [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.w [[R1:\$w[0-9]+]], 0([[G_PTR]])
ret void
; MIPS32: .size const_v4f32
}
define void @const_v2f64() nounwind {
- ; MIPS32: const_v2f64:
+ ; MIPS32-LABEL: const_v2f64:
store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64
; MIPS32: ldi.b [[R1:\$w[0-9]+]], 0
store volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64
- ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64
- ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64
- ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64
- ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64
- ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64
- ; MIPS32: ld.d [[R1:\$w[0-9]+]], %lo(
+ ; MIPS32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
+ ; MIPS32: ld.d [[R1:\$w[0-9]+]], 0([[G_PTR]])
ret void
; MIPS32: .size const_v2f64
}
define void @nonconst_v4f32() nounwind {
- ; MIPS32: nonconst_v4f32:
+ ; MIPS32-LABEL: nonconst_v4f32:
%1 = load float *@f32
%2 = insertelement <4 x float> undef, float %1, i32 0
@@ -79,7 +89,7 @@ define void @nonconst_v4f32() nounwind {
}
define void @nonconst_v2f64() nounwind {
- ; MIPS32: nonconst_v2f64:
+ ; MIPS32-LABEL: nonconst_v2f64:
%1 = load double *@f64
%2 = insertelement <2 x double> undef, double %1, i32 0
@@ -93,7 +103,7 @@ define void @nonconst_v2f64() nounwind {
}
define float @extract_v4f32() nounwind {
- ; MIPS32: extract_v4f32:
+ ; MIPS32-LABEL: extract_v4f32:
%1 = load <4 x float>* @v4f32
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
@@ -111,7 +121,7 @@ define float @extract_v4f32() nounwind {
}
define float @extract_v4f32_elt0() nounwind {
- ; MIPS32: extract_v4f32_elt0:
+ ; MIPS32-LABEL: extract_v4f32_elt0:
%1 = load <4 x float>* @v4f32
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
@@ -128,8 +138,47 @@ define float @extract_v4f32_elt0() nounwind {
; MIPS32: .size extract_v4f32_elt0
}
+define float @extract_v4f32_elt2() nounwind {
+ ; MIPS32-LABEL: extract_v4f32_elt2:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
+
+ %2 = fadd <4 x float> %1, %1
+ ; MIPS32-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = extractelement <4 x float> %2, i32 2
+ ; Element 2 can be obtained by splatting it across the vector and extracting
+ ; $w0:sub_lo
+ ; MIPS32-DAG: splati.w $w0, [[R1]][2]
+
+ ret float %3
+ ; MIPS32: .size extract_v4f32_elt2
+}
+
+define float @extract_v4f32_vidx() nounwind {
+ ; MIPS32-LABEL: extract_v4f32_vidx:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)(
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = fadd <4 x float> %1, %1
+ ; MIPS32-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <4 x float> %2, i32 %3
+ ; MIPS32-DAG: splat.w $w0, [[R1]]{{\[}}[[IDX]]]
+
+ ret float %4
+ ; MIPS32: .size extract_v4f32_vidx
+}
+
define double @extract_v2f64() nounwind {
- ; MIPS32: extract_v2f64:
+ ; MIPS32-LABEL: extract_v2f64:
%1 = load <2 x double>* @v2f64
; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
@@ -152,7 +201,7 @@ define double @extract_v2f64() nounwind {
}
define double @extract_v2f64_elt0() nounwind {
- ; MIPS32: extract_v2f64_elt0:
+ ; MIPS32-LABEL: extract_v2f64_elt0:
%1 = load <2 x double>* @v2f64
; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
@@ -172,8 +221,29 @@ define double @extract_v2f64_elt0() nounwind {
; MIPS32: .size extract_v2f64_elt0
}
+define double @extract_v2f64_vidx() nounwind {
+ ; MIPS32-LABEL: extract_v2f64_vidx:
+
+ %1 = load <2 x double>* @v2f64
+ ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)(
+ ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = fadd <2 x double> %1, %1
+ ; MIPS32-DAG: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
+
+ %3 = load i32* @i32
+ ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %4 = extractelement <2 x double> %2, i32 %3
+ ; MIPS32-DAG: splat.d $w0, [[R1]]{{\[}}[[IDX]]]
+
+ ret double %4
+ ; MIPS32: .size extract_v2f64_vidx
+}
+
define void @insert_v4f32(float %a) nounwind {
- ; MIPS32: insert_v4f32:
+ ; MIPS32-LABEL: insert_v4f32:
%1 = load <4 x float>* @v4f32
; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]],
@@ -190,7 +260,7 @@ define void @insert_v4f32(float %a) nounwind {
}
define void @insert_v2f64(double %a) nounwind {
- ; MIPS32: insert_v2f64:
+ ; MIPS32-LABEL: insert_v2f64:
%1 = load <2 x double>* @v2f64
; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]],
@@ -205,3 +275,55 @@ define void @insert_v2f64(double %a) nounwind {
ret void
; MIPS32: .size insert_v2f64
}
+
+define void @insert_v4f32_vidx(float %a) nounwind {
+ ; MIPS32-LABEL: insert_v4f32_vidx:
+
+ %1 = load <4 x float>* @v4f32
+ ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)(
+ ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = load i32* @i32
+ ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %3 = insertelement <4 x float> %1, float %a, i32 %2
+ ; float argument passed in $f12
+ ; MIPS32-DAG: sll [[BIDX:\$[0-9]+]], [[IDX]], 2
+ ; MIPS32-DAG: sld.b [[R1]], [[R1]]{{\[}}[[BIDX]]]
+ ; MIPS32-DAG: insve.w [[R1]][0], $w12[0]
+ ; MIPS32-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
+ ; MIPS32-DAG: sld.b [[R1]], [[R1]]{{\[}}[[NIDX]]]
+
+ store <4 x float> %3, <4 x float>* @v4f32
+ ; MIPS32-DAG: st.w [[R1]]
+
+ ret void
+ ; MIPS32: .size insert_v4f32_vidx
+}
+
+define void @insert_v2f64_vidx(double %a) nounwind {
+ ; MIPS32-LABEL: insert_v2f64_vidx:
+
+ %1 = load <2 x double>* @v2f64
+ ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)(
+ ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
+
+ %2 = load i32* @i32
+ ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
+ ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]])
+
+ %3 = insertelement <2 x double> %1, double %a, i32 %2
+ ; double argument passed in $f12
+ ; MIPS32-DAG: sll [[BIDX:\$[0-9]+]], [[IDX]], 3
+ ; MIPS32-DAG: sld.b [[R1]], [[R1]]{{\[}}[[BIDX]]]
+ ; MIPS32-DAG: insve.d [[R1]][0], $w12[0]
+ ; MIPS32-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
+ ; MIPS32-DAG: sld.b [[R1]], [[R1]]{{\[}}[[NIDX]]]
+
+ store <2 x double> %3, <2 x double>* @v2f64
+ ; MIPS32-DAG: st.d [[R1]]
+
+ ret void
+ ; MIPS32: .size insert_v2f64_vidx
+}
diff --git a/test/CodeGen/Mips/msa/bitwise.ll b/test/CodeGen/Mips/msa/bitwise.ll
index 9a88c47b7e1f..5d57198a9355 100644
--- a/test/CodeGen/Mips/msa/bitwise.ll
+++ b/test/CodeGen/Mips/msa/bitwise.ll
@@ -990,9 +990,10 @@ define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>*
%6 = and <16 x i8> %2, %4
%7 = or <16 x i8> %5, %6
; bmnz is the same operation
- ; CHECK-DAG: bmnz.v [[R1]], [[R2]], [[R3]]
+ ; (vselect Mask, IfSet, IfClr) -> (BMNZ IfClr, IfSet, Mask)
+ ; CHECK-DAG: bmnz.v [[R2]], [[R1]], [[R3]]
store <16 x i8> %7, <16 x i8>* %c
- ; CHECK-DAG: st.b [[R1]], 0($4)
+ ; CHECK-DAG: st.b [[R2]], 0($4)
ret void
; CHECK: .size bsel_v16i8
diff --git a/test/CodeGen/Mips/msa/compare.ll b/test/CodeGen/Mips/msa/compare.ll
index 6408d7ba09f4..87ca1482da81 100644
--- a/test/CodeGen/Mips/msa/compare.ll
+++ b/test/CodeGen/Mips/msa/compare.ll
@@ -761,7 +761,8 @@ define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
%4 = icmp sgt <8 x i16> %1, %2
; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
store <8 x i16> %5, <8 x i16>* %d
; CHECK-DAG: st.h [[R4]], 0($4)
@@ -782,7 +783,8 @@ define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
%4 = icmp sgt <4 x i32> %1, %2
; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
store <4 x i32> %5, <4 x i32>* %d
; CHECK-DAG: st.w [[R4]], 0($4)
@@ -803,7 +805,8 @@ define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
%4 = icmp sgt <2 x i64> %1, %2
; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
store <2 x i64> %5, <2 x i64>* %d
; CHECK-DAG: st.d [[R4]], 0($4)
@@ -846,7 +849,8 @@ define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
%4 = icmp ugt <8 x i16> %1, %2
; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
store <8 x i16> %5, <8 x i16>* %d
; CHECK-DAG: st.h [[R4]], 0($4)
@@ -867,7 +871,8 @@ define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
%4 = icmp ugt <4 x i32> %1, %2
; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
store <4 x i32> %5, <4 x i32>* %d
; CHECK-DAG: st.w [[R4]], 0($4)
@@ -888,7 +893,8 @@ define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
%4 = icmp ugt <2 x i64> %1, %2
; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
store <2 x i64> %5, <2 x i64>* %d
; CHECK-DAG: st.d [[R4]], 0($4)
@@ -906,7 +912,7 @@ define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <16 x i8> %1, %2
; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
- %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %4 = select <16 x i1> %3, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %1
; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
store <16 x i8> %4, <16 x i8>* %d
; CHECK-DAG: st.b [[R4]], 0($4)
@@ -925,7 +931,7 @@ define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <8 x i16> %1, %2
; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
- %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %4 = select <8 x i1> %3, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %1
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
store <8 x i16> %4, <8 x i16>* %d
@@ -945,7 +951,7 @@ define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <4 x i32> %1, %2
; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
- %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %4 = select <4 x i1> %3, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> %1
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
store <4 x i32> %4, <4 x i32>* %d
@@ -965,7 +971,7 @@ define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp sgt <2 x i64> %1, %2
; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
- %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ %4 = select <2 x i1> %3, <2 x i64> <i64 1, i64 1>, <2 x i64> %1
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
store <2 x i64> %4, <2 x i64>* %d
@@ -985,7 +991,7 @@ define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <16 x i8> %1, %2
; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
- %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %4 = select <16 x i1> %3, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %1
; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
store <16 x i8> %4, <16 x i8>* %d
; CHECK-DAG: st.b [[R4]], 0($4)
@@ -1004,7 +1010,7 @@ define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <8 x i16> %1, %2
; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
- %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %4 = select <8 x i1> %3, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %1
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
store <8 x i16> %4, <8 x i16>* %d
@@ -1024,7 +1030,7 @@ define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <4 x i32> %1, %2
; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
- %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %4 = select <4 x i1> %3, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> %1
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
store <4 x i32> %4, <4 x i32>* %d
@@ -1044,7 +1050,7 @@ define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = icmp ugt <2 x i64> %1, %2
; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
- %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
+ %4 = select <2 x i1> %3, <2 x i64> <i64 1, i64 1>, <2 x i64> %1
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
store <2 x i64> %4, <2 x i64>* %d
diff --git a/test/CodeGen/Mips/msa/compare_float.ll b/test/CodeGen/Mips/msa/compare_float.ll
index 2fc61f89c7fa..e93221b93612 100644
--- a/test/CodeGen/Mips/msa/compare_float.ll
+++ b/test/CodeGen/Mips/msa/compare_float.ll
@@ -32,12 +32,9 @@ define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) noun
store <2 x i64> %4, <2 x i64>* %c
ret void
- ; FIXME: This code is correct, but poor. Ideally it would be similar to
- ; the code in @false_v4f32
+ ; (setcc $a, $b, SETFALSE) is always folded
; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0
- ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63
- ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63
- ; CHECK-DAG: st.d [[R4]], 0($4)
+ ; CHECK-DAG: st.w [[R1]], 0($4)
; CHECK: .size false_v2f64
}
@@ -509,12 +506,9 @@ define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounw
store <2 x i64> %4, <2 x i64>* %c
ret void
- ; FIXME: This code is correct, but poor. Ideally it would be similar to
- ; the code in @true_v4f32
- ; CHECK-DAG: ldi.d [[R1:\$w[0-9]+]], 1
- ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63
- ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63
- ; CHECK-DAG: st.d [[R4]], 0($4)
+ ; (setcc $a, $b, SETTRUE) is always folded.
+ ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], -1
+ ; CHECK-DAG: st.w [[R1]], 0($4)
; CHECK: .size true_v2f64
}
@@ -531,7 +525,8 @@ define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
%4 = fcmp ogt <4 x float> %1, %2
; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <4 x i1> %4, <4 x float> %1, <4 x float> %3
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
store <4 x float> %5, <4 x float>* %d
; CHECK-DAG: st.w [[R4]], 0($4)
@@ -552,7 +547,8 @@ define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
%4 = fcmp ogt <2 x double> %1, %2
; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%5 = select <2 x i1> %4, <2 x double> %1, <2 x double> %3
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
store <2 x double> %5, <2 x double>* %d
; CHECK-DAG: st.d [[R4]], 0($4)
@@ -571,7 +567,8 @@ define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
%3 = fcmp ogt <4 x float> %1, %2
; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <4 x i1> %3, <4 x float> %1, <4 x float> zeroinitializer
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3:\$w[0-9]+]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]]
store <4 x float> %4, <4 x float>* %d
; CHECK-DAG: st.w [[R4]], 0($4)
@@ -590,7 +587,8 @@ define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
%3 = fcmp ogt <2 x double> %1, %2
; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
%4 = select <2 x i1> %3, <2 x double> %1, <2 x double> zeroinitializer
- ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3:\$w[0-9]+]]
+ ; Note that IfSet and IfClr are swapped since the condition is inverted
+ ; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]]
store <2 x double> %4, <2 x double>* %d
; CHECK-DAG: st.d [[R4]], 0($4)
diff --git a/test/CodeGen/Mips/msa/elm_copy.ll b/test/CodeGen/Mips/msa/elm_copy.ll
index ed3e52cbffc2..0dd75fa3db12 100644
--- a/test/CodeGen/Mips/msa/elm_copy.ll
+++ b/test/CodeGen/Mips/msa/elm_copy.ll
@@ -1,8 +1,14 @@
; Test the MSA intrinsics that are encoded with the ELM instruction format and
; are element extraction operations.
-; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
-; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32
+; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64
@llvm_mips_copy_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_copy_s_b_RES = global i32 0, align 16
@@ -17,11 +23,15 @@ entry:
declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
-; CHECK: llvm_mips_copy_s_b_test:
-; CHECK: ld.b
-; CHECK: copy_s.b
-; CHECK: sw
-; CHECK: .size llvm_mips_copy_s_b_test
+; MIPS-ANY: llvm_mips_copy_s_b_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_s_b_ARG1)
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_s_b_ARG1)
+; MIPS-ANY-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: copy_s.b [[RD:\$[0-9]+]], [[WS]][1]
+; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_s_b_RES)
+; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_s_b_RES)
+; MIPS-ANY-DAG: sw [[RD]], 0([[RES]])
+; MIPS-ANY: .size llvm_mips_copy_s_b_test
;
@llvm_mips_copy_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_copy_s_h_RES = global i32 0, align 16
@@ -36,11 +46,15 @@ entry:
declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
-; CHECK: llvm_mips_copy_s_h_test:
-; CHECK: ld.h
-; CHECK: copy_s.h
-; CHECK: sw
-; CHECK: .size llvm_mips_copy_s_h_test
+; MIPS-ANY: llvm_mips_copy_s_h_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_s_h_ARG1)
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_s_h_ARG1)
+; MIPS-ANY-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: copy_s.h [[RD:\$[0-9]+]], [[WS]][1]
+; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_s_h_RES)
+; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_s_h_RES)
+; MIPS-ANY-DAG: sw [[RD]], 0([[RES]])
+; MIPS-ANY: .size llvm_mips_copy_s_h_test
;
@llvm_mips_copy_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_copy_s_w_RES = global i32 0, align 16
@@ -55,11 +69,15 @@ entry:
declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
-; CHECK: llvm_mips_copy_s_w_test:
-; CHECK: ld.w
-; CHECK: copy_s.w
-; CHECK: sw
-; CHECK: .size llvm_mips_copy_s_w_test
+; MIPS-ANY: llvm_mips_copy_s_w_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_s_w_ARG1)
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_s_w_ARG1)
+; MIPS-ANY-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: copy_s.w [[RD:\$[0-9]+]], [[WS]][1]
+; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_s_w_RES)
+; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_s_w_RES)
+; MIPS-ANY-DAG: sw [[RD]], 0([[RES]])
+; MIPS-ANY: .size llvm_mips_copy_s_w_test
;
@llvm_mips_copy_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_copy_s_d_RES = global i64 0, align 16
@@ -74,13 +92,20 @@ entry:
declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind
-; CHECK: llvm_mips_copy_s_d_test:
-; CHECK: ld.w
-; CHECK: copy_s.w
-; CHECK: copy_s.w
-; CHECK: sw
-; CHECK: sw
-; CHECK: .size llvm_mips_copy_s_d_test
+; MIPS-ANY: llvm_mips_copy_s_d_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_s_d_ARG1)
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_s_d_ARG1)
+; MIPS32-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS64-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS32-DAG: copy_s.w [[RD1:\$[0-9]+]], [[WS]][2]
+; MIPS32-DAG: copy_s.w [[RD2:\$[0-9]+]], [[WS]][3]
+; MIPS64-DAG: copy_s.d [[RD:\$[0-9]+]], [[WS]][1]
+; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_s_d_RES)
+; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_s_d_RES)
+; MIPS32-DAG: sw [[RD1]], 0([[RES]])
+; MIPS32-DAG: sw [[RD2]], 4([[RES]])
+; MIPS64-DAG: sd [[RD]], 0([[RES]])
+; MIPS-ANY: .size llvm_mips_copy_s_d_test
;
@llvm_mips_copy_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_copy_u_b_RES = global i32 0, align 16
@@ -95,11 +120,15 @@ entry:
declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) nounwind
-; CHECK: llvm_mips_copy_u_b_test:
-; CHECK: ld.b
-; CHECK: copy_u.b
-; CHECK: sw
-; CHECK: .size llvm_mips_copy_u_b_test
+; MIPS-ANY: llvm_mips_copy_u_b_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_u_b_ARG1)
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_u_b_ARG1)
+; MIPS-ANY-DAG: ld.b [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: copy_u.b [[RD:\$[0-9]+]], [[WS]][1]
+; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_u_b_RES)
+; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_u_b_RES)
+; MIPS-ANY-DAG: sw [[RD]], 0([[RES]])
+; MIPS-ANY: .size llvm_mips_copy_u_b_test
;
@llvm_mips_copy_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_copy_u_h_RES = global i32 0, align 16
@@ -114,11 +143,15 @@ entry:
declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) nounwind
-; CHECK: llvm_mips_copy_u_h_test:
-; CHECK: ld.h
-; CHECK: copy_u.h
-; CHECK: sw
-; CHECK: .size llvm_mips_copy_u_h_test
+; MIPS-ANY: llvm_mips_copy_u_h_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_u_h_ARG1)
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_u_h_ARG1)
+; MIPS-ANY-DAG: ld.h [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: copy_u.h [[RD:\$[0-9]+]], [[WS]][1]
+; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_u_h_RES)
+; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_u_h_RES)
+; MIPS-ANY-DAG: sw [[RD]], 0([[RES]])
+; MIPS-ANY: .size llvm_mips_copy_u_h_test
;
@llvm_mips_copy_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_copy_u_w_RES = global i32 0, align 16
@@ -133,11 +166,15 @@ entry:
declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind
-; CHECK: llvm_mips_copy_u_w_test:
-; CHECK: ld.w
-; CHECK: copy_u.w
-; CHECK: sw
-; CHECK: .size llvm_mips_copy_u_w_test
+; MIPS-ANY: llvm_mips_copy_u_w_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_u_w_ARG1)
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_u_w_ARG1)
+; MIPS-ANY-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: copy_u.w [[RD:\$[0-9]+]], [[WS]][1]
+; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_u_w_RES)
+; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_u_w_RES)
+; MIPS-ANY-DAG: sw [[RD]], 0([[RES]])
+; MIPS-ANY: .size llvm_mips_copy_u_w_test
;
@llvm_mips_copy_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_copy_u_d_RES = global i64 0, align 16
@@ -152,11 +189,18 @@ entry:
declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32) nounwind
-; CHECK: llvm_mips_copy_u_d_test:
-; CHECK: ld.w
-; CHECK: copy_s.w
-; CHECK: copy_s.w
-; CHECK: sw
-; CHECK: sw
-; CHECK: .size llvm_mips_copy_u_d_test
+; MIPS-ANY: llvm_mips_copy_u_d_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_copy_u_d_ARG1)
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_copy_u_d_ARG1)
+; MIPS32-DAG: ld.w [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS64-DAG: ld.d [[WS:\$w[0-9]+]], 0([[R1]])
+; MIPS32-DAG: copy_s.w [[RD1:\$[0-9]+]], [[WS]][2]
+; MIPS32-DAG: copy_s.w [[RD2:\$[0-9]+]], [[WS]][3]
+; MIPS64-DAG: copy_u.d [[RD:\$[0-9]+]], [[WS]][1]
+; MIPS32-DAG: lw [[RES:\$[0-9]+]], %got(llvm_mips_copy_u_d_RES)
+; MIPS64-DAG: ld [[RES:\$[0-9]+]], %got_disp(llvm_mips_copy_u_d_RES)
+; MIPS32-DAG: sw [[RD1]], 0([[RES]])
+; MIPS32-DAG: sw [[RD2]], 4([[RES]])
+; MIPS64-DAG: sd [[RD]], 0([[RES]])
+; MIPS-ANY: .size llvm_mips_copy_u_d_test
;
diff --git a/test/CodeGen/Mips/msa/elm_insv.ll b/test/CodeGen/Mips/msa/elm_insv.ll
index fa7ceaf0c6bf..c746e523def6 100644
--- a/test/CodeGen/Mips/msa/elm_insv.ll
+++ b/test/CodeGen/Mips/msa/elm_insv.ll
@@ -1,8 +1,14 @@
; Test the MSA element insertion intrinsics that are encoded with the ELM
; instruction format.
-; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
-; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32
+; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS32
+; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s -check-prefix=MIPS-ANY -check-prefix=MIPS64
@llvm_mips_insert_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_insert_b_ARG3 = global i32 27, align 16
@@ -19,12 +25,12 @@ entry:
declare <16 x i8> @llvm.mips.insert.b(<16 x i8>, i32, i32) nounwind
-; CHECK: llvm_mips_insert_b_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
-; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0(
-; CHECK-DAG: insert.b [[R2]][1], [[R1]]
-; CHECK-DAG: st.b [[R2]], 0(
-; CHECK: .size llvm_mips_insert_b_test
+; MIPS-ANY: llvm_mips_insert_b_test:
+; MIPS-ANY-DAG: lw [[R1:\$[0-9]+]], 0(
+; MIPS-ANY-DAG: ld.b [[R2:\$w[0-9]+]], 0(
+; MIPS-ANY-DAG: insert.b [[R2]][1], [[R1]]
+; MIPS-ANY-DAG: st.b [[R2]], 0(
+; MIPS-ANY: .size llvm_mips_insert_b_test
;
@llvm_mips_insert_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_insert_h_ARG3 = global i32 27, align 16
@@ -41,12 +47,12 @@ entry:
declare <8 x i16> @llvm.mips.insert.h(<8 x i16>, i32, i32) nounwind
-; CHECK: llvm_mips_insert_h_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
-; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0(
-; CHECK-DAG: insert.h [[R2]][1], [[R1]]
-; CHECK-DAG: st.h [[R2]], 0(
-; CHECK: .size llvm_mips_insert_h_test
+; MIPS-ANY: llvm_mips_insert_h_test:
+; MIPS-ANY-DAG: lw [[R1:\$[0-9]+]], 0(
+; MIPS-ANY-DAG: ld.h [[R2:\$w[0-9]+]], 0(
+; MIPS-ANY-DAG: insert.h [[R2]][1], [[R1]]
+; MIPS-ANY-DAG: st.h [[R2]], 0(
+; MIPS-ANY: .size llvm_mips_insert_h_test
;
@llvm_mips_insert_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_insert_w_ARG3 = global i32 27, align 16
@@ -63,12 +69,12 @@ entry:
declare <4 x i32> @llvm.mips.insert.w(<4 x i32>, i32, i32) nounwind
-; CHECK: llvm_mips_insert_w_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
-; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0(
-; CHECK-DAG: insert.w [[R2]][1], [[R1]]
-; CHECK-DAG: st.w [[R2]], 0(
-; CHECK: .size llvm_mips_insert_w_test
+; MIPS-ANY: llvm_mips_insert_w_test:
+; MIPS-ANY-DAG: lw [[R1:\$[0-9]+]], 0(
+; MIPS-ANY-DAG: ld.w [[R2:\$w[0-9]+]], 0(
+; MIPS-ANY-DAG: insert.w [[R2]][1], [[R1]]
+; MIPS-ANY-DAG: st.w [[R2]], 0(
+; MIPS-ANY: .size llvm_mips_insert_w_test
;
@llvm_mips_insert_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_insert_d_ARG3 = global i64 27, align 16
@@ -85,14 +91,18 @@ entry:
declare <2 x i64> @llvm.mips.insert.d(<2 x i64>, i32, i64) nounwind
-; CHECK: llvm_mips_insert_d_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], 0(
-; CHECK-DAG: lw [[R2:\$[0-9]+]], 4(
-; CHECK-DAG: ld.w [[R3:\$w[0-9]+]],
-; CHECK-DAG: insert.w [[R3]][2], [[R1]]
-; CHECK-DAG: insert.w [[R3]][3], [[R2]]
-; CHECK-DAG: st.w [[R3]],
-; CHECK: .size llvm_mips_insert_d_test
+; MIPS-ANY: llvm_mips_insert_d_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], 0(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], 4(
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], 0(
+; MIPS32-DAG: ld.w [[R3:\$w[0-9]+]],
+; MIPS64-DAG: ld.d [[W1:\$w[0-9]+]],
+; MIPS32-DAG: insert.w [[R3]][2], [[R1]]
+; MIPS32-DAG: insert.w [[R3]][3], [[R2]]
+; MIPS64-DAG: insert.d [[W1]][1], [[R1]]
+; MIPS32-DAG: st.w [[R3]],
+; MIPS64-DAG: st.d [[W1]],
+; MIPS-ANY: .size llvm_mips_insert_d_test
;
@llvm_mips_insve_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_insve_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
@@ -109,14 +119,16 @@ entry:
declare <16 x i8> @llvm.mips.insve.b(<16 x i8>, i32, <16 x i8>) nounwind
-; CHECK: llvm_mips_insve_b_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_b_ARG1)(
-; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_b_ARG3)(
-; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
-; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
-; CHECK-DAG: insve.b [[R3]][1], [[R4]][0]
-; CHECK-DAG: st.b [[R3]],
-; CHECK: .size llvm_mips_insve_b_test
+; MIPS-ANY: llvm_mips_insve_b_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_b_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_b_ARG3)(
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_insve_b_ARG1)(
+; MIPS64-DAG: ld [[R2:\$[0-9]+]], %got_disp(llvm_mips_insve_b_ARG3)(
+; MIPS-ANY-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]])
+; MIPS-ANY-DAG: insve.b [[R3]][1], [[R4]][0]
+; MIPS-ANY-DAG: st.b [[R3]],
+; MIPS-ANY: .size llvm_mips_insve_b_test
;
@llvm_mips_insve_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_insve_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
@@ -133,14 +145,16 @@ entry:
declare <8 x i16> @llvm.mips.insve.h(<8 x i16>, i32, <8 x i16>) nounwind
-; CHECK: llvm_mips_insve_h_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_h_ARG1)(
-; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_h_ARG3)(
-; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
-; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]])
-; CHECK-DAG: insve.h [[R3]][1], [[R4]][0]
-; CHECK-DAG: st.h [[R3]],
-; CHECK: .size llvm_mips_insve_h_test
+; MIPS-ANY: llvm_mips_insve_h_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_h_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_h_ARG3)(
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_insve_h_ARG1)(
+; MIPS64-DAG: ld [[R2:\$[0-9]+]], %got_disp(llvm_mips_insve_h_ARG3)(
+; MIPS-ANY-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]])
+; MIPS-ANY-DAG: insve.h [[R3]][1], [[R4]][0]
+; MIPS-ANY-DAG: st.h [[R3]],
+; MIPS-ANY: .size llvm_mips_insve_h_test
;
@llvm_mips_insve_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_insve_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
@@ -157,14 +171,16 @@ entry:
declare <4 x i32> @llvm.mips.insve.w(<4 x i32>, i32, <4 x i32>) nounwind
-; CHECK: llvm_mips_insve_w_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_w_ARG1)(
-; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_w_ARG3)(
-; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
-; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]])
-; CHECK-DAG: insve.w [[R3]][1], [[R4]][0]
-; CHECK-DAG: st.w [[R3]],
-; CHECK: .size llvm_mips_insve_w_test
+; MIPS-ANY: llvm_mips_insve_w_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_w_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_w_ARG3)(
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_insve_w_ARG1)(
+; MIPS64-DAG: ld [[R2:\$[0-9]+]], %got_disp(llvm_mips_insve_w_ARG3)(
+; MIPS-ANY-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]])
+; MIPS-ANY-DAG: insve.w [[R3]][1], [[R4]][0]
+; MIPS-ANY-DAG: st.w [[R3]],
+; MIPS-ANY: .size llvm_mips_insve_w_test
;
@llvm_mips_insve_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_insve_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
@@ -181,12 +197,14 @@ entry:
declare <2 x i64> @llvm.mips.insve.d(<2 x i64>, i32, <2 x i64>) nounwind
-; CHECK: llvm_mips_insve_d_test:
-; CHECK-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_d_ARG1)(
-; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_d_ARG3)(
-; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
-; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]])
-; CHECK-DAG: insve.d [[R3]][1], [[R4]][0]
-; CHECK-DAG: st.d [[R3]],
-; CHECK: .size llvm_mips_insve_d_test
+; MIPS-ANY: llvm_mips_insve_d_test:
+; MIPS32-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_insve_d_ARG1)(
+; MIPS32-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_insve_d_ARG3)(
+; MIPS64-DAG: ld [[R1:\$[0-9]+]], %got_disp(llvm_mips_insve_d_ARG1)(
+; MIPS64-DAG: ld [[R2:\$[0-9]+]], %got_disp(llvm_mips_insve_d_ARG3)(
+; MIPS-ANY-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]])
+; MIPS-ANY-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]])
+; MIPS-ANY-DAG: insve.d [[R3]][1], [[R4]][0]
+; MIPS-ANY-DAG: st.d [[R3]],
+; MIPS-ANY: .size llvm_mips_insve_d_test
;
diff --git a/test/CodeGen/Mips/msa/elm_shift_slide.ll b/test/CodeGen/Mips/msa/elm_shift_slide.ll
index 39d670dac841..00a6544b1207 100644
--- a/test/CodeGen/Mips/msa/elm_shift_slide.ll
+++ b/test/CodeGen/Mips/msa/elm_shift_slide.ll
@@ -5,17 +5,19 @@
; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
@llvm_mips_sldi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+@llvm_mips_sldi_b_ARG2 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_sldi_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
define void @llvm_mips_sldi_b_test() nounwind {
entry:
%0 = load <16 x i8>* @llvm_mips_sldi_b_ARG1
- %1 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, i32 1)
- store <16 x i8> %1, <16 x i8>* @llvm_mips_sldi_b_RES
+ %1 = load <16 x i8>* @llvm_mips_sldi_b_ARG2
+ %2 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, <16 x i8> %1, i32 1)
+ store <16 x i8> %2, <16 x i8>* @llvm_mips_sldi_b_RES
ret void
}
-declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, i32) nounwind
+declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32) nounwind
; CHECK: llvm_mips_sldi_b_test:
; CHECK: ld.b
@@ -24,17 +26,19 @@ declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, i32) nounwind
; CHECK: .size llvm_mips_sldi_b_test
;
@llvm_mips_sldi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+@llvm_mips_sldi_h_ARG2 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_sldi_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
define void @llvm_mips_sldi_h_test() nounwind {
entry:
%0 = load <8 x i16>* @llvm_mips_sldi_h_ARG1
- %1 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, i32 1)
- store <8 x i16> %1, <8 x i16>* @llvm_mips_sldi_h_RES
+ %1 = load <8 x i16>* @llvm_mips_sldi_h_ARG2
+ %2 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, <8 x i16> %1, i32 1)
+ store <8 x i16> %2, <8 x i16>* @llvm_mips_sldi_h_RES
ret void
}
-declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, i32) nounwind
+declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32) nounwind
; CHECK: llvm_mips_sldi_h_test:
; CHECK: ld.h
@@ -43,17 +47,19 @@ declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, i32) nounwind
; CHECK: .size llvm_mips_sldi_h_test
;
@llvm_mips_sldi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@llvm_mips_sldi_w_ARG2 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_sldi_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
define void @llvm_mips_sldi_w_test() nounwind {
entry:
%0 = load <4 x i32>* @llvm_mips_sldi_w_ARG1
- %1 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, i32 1)
- store <4 x i32> %1, <4 x i32>* @llvm_mips_sldi_w_RES
+ %1 = load <4 x i32>* @llvm_mips_sldi_w_ARG2
+ %2 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, <4 x i32> %1, i32 1)
+ store <4 x i32> %2, <4 x i32>* @llvm_mips_sldi_w_RES
ret void
}
-declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, i32) nounwind
+declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32) nounwind
; CHECK: llvm_mips_sldi_w_test:
; CHECK: ld.w
@@ -62,17 +68,19 @@ declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, i32) nounwind
; CHECK: .size llvm_mips_sldi_w_test
;
@llvm_mips_sldi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+@llvm_mips_sldi_d_ARG2 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_sldi_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
define void @llvm_mips_sldi_d_test() nounwind {
entry:
%0 = load <2 x i64>* @llvm_mips_sldi_d_ARG1
- %1 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, i32 1)
- store <2 x i64> %1, <2 x i64>* @llvm_mips_sldi_d_RES
+ %1 = load <2 x i64>* @llvm_mips_sldi_d_ARG2
+ %2 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, <2 x i64> %1, i32 1)
+ store <2 x i64> %2, <2 x i64>* @llvm_mips_sldi_d_RES
ret void
}
-declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, i32) nounwind
+declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32) nounwind
; CHECK: llvm_mips_sldi_d_test:
; CHECK: ld.d
diff --git a/test/CodeGen/Mips/msa/frameindex.ll b/test/CodeGen/Mips/msa/frameindex.ll
index 3088e1ba9893..07e67bf04287 100644
--- a/test/CodeGen/Mips/msa/frameindex.ll
+++ b/test/CodeGen/Mips/msa/frameindex.ll
@@ -83,3 +83,312 @@ define void @loadstore_v16i8_just_over_simm16() nounwind {
ret void
; MIPS32-AE: .size loadstore_v16i8_just_over_simm16
}
+
+define void @loadstore_v8i16_near() nounwind {
+ ; MIPS32-AE: loadstore_v8i16_near:
+
+ %1 = alloca <8 x i16>
+ %2 = load volatile <8 x i16>* %1
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0($sp)
+ store volatile <8 x i16> %2, <8 x i16>* %1
+ ; MIPS32-AE: st.h [[R1]], 0($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v8i16_near
+}
+
+define void @loadstore_v8i16_unaligned() nounwind {
+ ; MIPS32-AE: loadstore_v8i16_unaligned:
+
+ %1 = alloca [2 x <8 x i16>]
+ %2 = bitcast [2 x <8 x i16>]* %1 to i8*
+ %3 = getelementptr i8* %2, i32 1
+ %4 = bitcast i8* %3 to [2 x <8 x i16>]*
+ %5 = getelementptr [2 x <8 x i16>]* %4, i32 0, i32 0
+
+ %6 = load volatile <8 x i16>* %5
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <8 x i16> %6, <8 x i16>* %5
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1
+ ; MIPS32-AE: st.h [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v8i16_unaligned
+}
+
+define void @loadstore_v8i16_just_under_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v8i16_just_under_simm10:
+
+ %1 = alloca <8 x i16>
+ %2 = alloca [1008 x i8] ; Push the frame right up to 1024 bytes
+
+ %3 = load volatile <8 x i16>* %1
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 1008($sp)
+ store volatile <8 x i16> %3, <8 x i16>* %1
+ ; MIPS32-AE: st.h [[R1]], 1008($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v8i16_just_under_simm10
+}
+
+define void @loadstore_v8i16_just_over_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v8i16_just_over_simm10:
+
+ %1 = alloca <8 x i16>
+ %2 = alloca [1009 x i8] ; Push the frame just over 1024 bytes
+
+ %3 = load volatile <8 x i16>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1024
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <8 x i16> %3, <8 x i16>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1024
+ ; MIPS32-AE: st.h [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v8i16_just_over_simm10
+}
+
+define void @loadstore_v8i16_just_under_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v8i16_just_under_simm16:
+
+ %1 = alloca <8 x i16>
+ %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
+
+ %3 = load volatile <8 x i16>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <8 x i16> %3, <8 x i16>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.h [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v8i16_just_under_simm16
+}
+
+define void @loadstore_v8i16_just_over_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v8i16_just_over_simm16:
+
+ %1 = alloca <8 x i16>
+ %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
+
+ %3 = load volatile <8 x i16>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <8 x i16> %3, <8 x i16>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.h [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v8i16_just_over_simm16
+}
+
+define void @loadstore_v4i32_near() nounwind {
+ ; MIPS32-AE: loadstore_v4i32_near:
+
+ %1 = alloca <4 x i32>
+ %2 = load volatile <4 x i32>* %1
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0($sp)
+ store volatile <4 x i32> %2, <4 x i32>* %1
+ ; MIPS32-AE: st.w [[R1]], 0($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v4i32_near
+}
+
+define void @loadstore_v4i32_unaligned() nounwind {
+ ; MIPS32-AE: loadstore_v4i32_unaligned:
+
+ %1 = alloca [2 x <4 x i32>]
+ %2 = bitcast [2 x <4 x i32>]* %1 to i8*
+ %3 = getelementptr i8* %2, i32 1
+ %4 = bitcast i8* %3 to [2 x <4 x i32>]*
+ %5 = getelementptr [2 x <4 x i32>]* %4, i32 0, i32 0
+
+ %6 = load volatile <4 x i32>* %5
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <4 x i32> %6, <4 x i32>* %5
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1
+ ; MIPS32-AE: st.w [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v4i32_unaligned
+}
+
+define void @loadstore_v4i32_just_under_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v4i32_just_under_simm10:
+
+ %1 = alloca <4 x i32>
+ %2 = alloca [2032 x i8] ; Push the frame right up to 2048 bytes
+
+ %3 = load volatile <4 x i32>* %1
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 2032($sp)
+ store volatile <4 x i32> %3, <4 x i32>* %1
+ ; MIPS32-AE: st.w [[R1]], 2032($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v4i32_just_under_simm10
+}
+
+define void @loadstore_v4i32_just_over_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v4i32_just_over_simm10:
+
+ %1 = alloca <4 x i32>
+ %2 = alloca [2033 x i8] ; Push the frame just over 2048 bytes
+
+ %3 = load volatile <4 x i32>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 2048
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <4 x i32> %3, <4 x i32>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 2048
+ ; MIPS32-AE: st.w [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v4i32_just_over_simm10
+}
+
+define void @loadstore_v4i32_just_under_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v4i32_just_under_simm16:
+
+ %1 = alloca <4 x i32>
+ %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
+
+ %3 = load volatile <4 x i32>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <4 x i32> %3, <4 x i32>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.w [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v4i32_just_under_simm16
+}
+
+define void @loadstore_v4i32_just_over_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v4i32_just_over_simm16:
+
+ %1 = alloca <4 x i32>
+ %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
+
+ %3 = load volatile <4 x i32>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <4 x i32> %3, <4 x i32>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.w [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v4i32_just_over_simm16
+}
+
+define void @loadstore_v2i64_near() nounwind {
+ ; MIPS32-AE: loadstore_v2i64_near:
+
+ %1 = alloca <2 x i64>
+ %2 = load volatile <2 x i64>* %1
+ ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0($sp)
+ store volatile <2 x i64> %2, <2 x i64>* %1
+ ; MIPS32-AE: st.d [[R1]], 0($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v2i64_near
+}
+
+define void @loadstore_v2i64_unaligned() nounwind {
+ ; MIPS32-AE: loadstore_v2i64_unaligned:
+
+ %1 = alloca [2 x <2 x i64>]
+ %2 = bitcast [2 x <2 x i64>]* %1 to i8*
+ %3 = getelementptr i8* %2, i32 1
+ %4 = bitcast i8* %3 to [2 x <2 x i64>]*
+ %5 = getelementptr [2 x <2 x i64>]* %4, i32 0, i32 0
+
+ %6 = load volatile <2 x i64>* %5
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1
+ ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <2 x i64> %6, <2 x i64>* %5
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 1
+ ; MIPS32-AE: st.d [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v2i64_unaligned
+}
+
+define void @loadstore_v2i64_just_under_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v2i64_just_under_simm10:
+
+ %1 = alloca <2 x i64>
+ %2 = alloca [4080 x i8] ; Push the frame right up to 4096 bytes
+
+ %3 = load volatile <2 x i64>* %1
+ ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 4080($sp)
+ store volatile <2 x i64> %3, <2 x i64>* %1
+ ; MIPS32-AE: st.d [[R1]], 4080($sp)
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v2i64_just_under_simm10
+}
+
+define void @loadstore_v2i64_just_over_simm10() nounwind {
+ ; MIPS32-AE: loadstore_v2i64_just_over_simm10:
+
+ %1 = alloca <2 x i64>
+ %2 = alloca [4081 x i8] ; Push the frame just over 4096 bytes
+
+ %3 = load volatile <2 x i64>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 4096
+ ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <2 x i64> %3, <2 x i64>* %1
+ ; MIPS32-AE: addiu [[BASE:\$[0-9]+]], $sp, 4096
+ ; MIPS32-AE: st.d [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v2i64_just_over_simm10
+}
+
+define void @loadstore_v2i64_just_under_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v2i64_just_under_simm16:
+
+ %1 = alloca <2 x i64>
+ %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes
+
+ %3 = load volatile <2 x i64>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <2 x i64> %3, <2 x i64>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.d [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v2i64_just_under_simm16
+}
+
+define void @loadstore_v2i64_just_over_simm16() nounwind {
+ ; MIPS32-AE: loadstore_v2i64_just_over_simm16:
+
+ %1 = alloca <2 x i64>
+ %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes
+
+ %3 = load volatile <2 x i64>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
+ store volatile <2 x i64> %3, <2 x i64>* %1
+ ; MIPS32-AE: ori [[R2:\$[0-9]+]], $zero, 32768
+ ; MIPS32-AE: addu [[BASE:\$[0-9]+]], $sp, [[R2]]
+ ; MIPS32-AE: st.d [[R1]], 0([[BASE]])
+
+ ret void
+ ; MIPS32-AE: .size loadstore_v2i64_just_over_simm16
+}
diff --git a/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll
index 24e27cbf14b8..f25ab2280602 100644
--- a/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll
+++ b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll
@@ -10,7 +10,7 @@
; The legalizer legalized ; the <4 x i8>'s into <4 x i32>'s, then a call to
; isVSplat() returned the splat value for <i8 -1, i8 -1, ...> as a 32-bit APInt
; (255), but the zeroinitializer splat value as an 8-bit APInt (0). The
-; assertion occured when trying to check the values were bitwise inverses of
+; assertion occurred when trying to check the values were bitwise inverses of
; each-other.
;
; It should at least successfully build.
diff --git a/test/CodeGen/Mips/msa/shift-dagcombine.ll b/test/CodeGen/Mips/msa/shift-dagcombine.ll
index 0d809fb4fbf1..322acff3ff49 100644
--- a/test/CodeGen/Mips/msa/shift-dagcombine.ll
+++ b/test/CodeGen/Mips/msa/shift-dagcombine.ll
@@ -37,7 +37,8 @@ define void @lshr_v4i32(<4 x i32>* %c) nounwind {
%2 = lshr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
<i32 0, i32 1, i32 2, i32 3>
; CHECK-NOT: srl
- ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], %lo
+ ; CHECK-DAG: addiu [[CPOOL:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0([[CPOOL]])
; CHECK-NOT: srl
store volatile <4 x i32> %2, <4 x i32>* %c
; CHECK-DAG: st.w [[R1]], 0($4)
diff --git a/test/CodeGen/Mips/msa/shuffle.ll b/test/CodeGen/Mips/msa/shuffle.ll
index 316c669c3ac6..faeec5d58dd4 100644
--- a/test/CodeGen/Mips/msa/shuffle.ll
+++ b/test/CodeGen/Mips/msa/shuffle.ll
@@ -7,7 +7,8 @@ define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
%1 = load <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]]
store <16 x i8> %2, <16 x i8>* %c
; CHECK-DAG: st.b [[R3]], 0($4)
@@ -37,7 +38,8 @@ define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
%2 = load <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16>
- ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R2]]
store <16 x i8> %3, <16 x i8>* %c
; CHECK-DAG: st.b [[R3]], 0($4)
@@ -54,8 +56,11 @@ define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
%2 = load <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
- ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
- ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R2]]
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]])
+ ; The concatenation step of vshf is bitwise not vectorwise so we must reverse
+ ; the operands to get the right answer.
+ ; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R1]]
store <16 x i8> %3, <16 x i8>* %c
; CHECK-DAG: st.b [[R3]], 0($4)
@@ -83,7 +88,8 @@ define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
%1 = load <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]]
store <8 x i16> %2, <8 x i16>* %c
; CHECK-DAG: st.h [[R3]], 0($4)
@@ -113,7 +119,8 @@ define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
%2 = load <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8>
- ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R2]]
store <8 x i16> %3, <8 x i16>* %c
; CHECK-DAG: st.h [[R3]], 0($4)
@@ -130,8 +137,11 @@ define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
%2 = load <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
- ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
- ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R2]]
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]])
+ ; The concatenation step of vshf is bitwise not vectorwise so we must reverse
+ ; the operands to get the right answer.
+ ; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R1]]
store <8 x i16> %3, <8 x i16>* %c
; CHECK-DAG: st.h [[R3]], 0($4)
@@ -207,8 +217,11 @@ define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
%2 = load <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4>
- ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo
- ; CHECK-DAG: vshf.w [[R3]], [[R1]], [[R2]]
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[PTR_A]])
+ ; The concatenation step of vshf is bitwise not vectorwise so we must reverse
+ ; the operands to get the right answer.
+ ; CHECK-DAG: vshf.w [[R3]], [[R2]], [[R1]]
store <4 x i32> %3, <4 x i32>* %c
; CHECK-DAG: st.w [[R3]], 0($4)
@@ -236,7 +249,8 @@ define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
%1 = load <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
- ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]]
store <2 x i64> %2, <2 x i64>* %c
; CHECK-DAG: st.d [[R3]], 0($4)
@@ -266,7 +280,8 @@ define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
%2 = load <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2>
- ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]])
; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R2]]
store <2 x i64> %3, <2 x i64>* %c
; CHECK-DAG: st.d [[R3]], 0($4)
@@ -283,8 +298,11 @@ define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
%2 = load <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2>
- ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
- ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R2]]
+ ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
+ ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]])
+ ; The concatenation step of vshf is bitwise not vectorwise so we must reverse
+ ; the operands to get the right answer.
+ ; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R1]]
store <2 x i64> %3, <2 x i64>* %c
; CHECK-DAG: st.d [[R3]], 0($4)
diff --git a/test/CodeGen/Mips/msa/special.ll b/test/CodeGen/Mips/msa/special.ll
index 60a4369dfb1c..b9badf5dc582 100644
--- a/test/CodeGen/Mips/msa/special.ll
+++ b/test/CodeGen/Mips/msa/special.ll
@@ -1,6 +1,13 @@
; Test the MSA intrinsics that are encoded with the SPECIAL instruction format.
-; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s --check-prefix=MIPS32
+; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64 < %s | \
+; RUN: FileCheck %s --check-prefix=MIPS64
+; RUN: llc -march=mips -mcpu=mips32r6 -mattr=+msa < %s | \
+; RUN: FileCheck %s --check-prefix=MIPS32
+; RUN: llc -march=mips64 -mcpu=mips64r6 -mattr=+msa < %s | \
+; RUN: FileCheck %s --check-prefix=MIPS64
define i32 @llvm_mips_lsa_test(i32 %a, i32 %b) nounwind {
entry:
@@ -10,9 +17,9 @@ entry:
declare i32 @llvm.mips.lsa(i32, i32, i32) nounwind
-; CHECK: llvm_mips_lsa_test:
-; CHECK: lsa {{\$[0-9]+}}, {{\$[0-9]+}}, {{\$[0-9]+}}, 2
-; CHECK: .size llvm_mips_lsa_test
+; MIPS32: llvm_mips_lsa_test:
+; MIPS32: lsa {{\$[0-9]+}}, $5, $4, 2
+; MIPS32: .size llvm_mips_lsa_test
define i32 @lsa_test(i32 %a, i32 %b) nounwind {
entry:
@@ -21,6 +28,29 @@ entry:
ret i32 %1
}
-; CHECK: lsa_test:
-; CHECK: lsa {{\$[0-9]+}}, {{\$[0-9]+}}, {{\$[0-9]+}}, 2
-; CHECK: .size lsa_test
+; MIPS32: lsa_test:
+; MIPS32: lsa {{\$[0-9]+}}, $5, $4, 2
+; MIPS32: .size lsa_test
+
+define i64 @llvm_mips_dlsa_test(i64 %a, i64 %b) nounwind {
+entry:
+ %0 = tail call i64 @llvm.mips.dlsa(i64 %a, i64 %b, i32 2)
+ ret i64 %0
+}
+
+declare i64 @llvm.mips.dlsa(i64, i64, i32) nounwind
+
+; MIPS64: llvm_mips_dlsa_test:
+; MIPS64: dlsa {{\$[0-9]+}}, $5, $4, 2
+; MIPS64: .size llvm_mips_dlsa_test
+
+define i64 @dlsa_test(i64 %a, i64 %b) nounwind {
+entry:
+ %0 = shl i64 %b, 2
+ %1 = add i64 %a, %0
+ ret i64 %1
+}
+
+; MIPS64: dlsa_test:
+; MIPS64: dlsa {{\$[0-9]+}}, $5, $4, 2
+; MIPS64: .size dlsa_test
diff --git a/test/CodeGen/Mips/msa/vec.ll b/test/CodeGen/Mips/msa/vec.ll
index 5bddf5aea405..d5b97f52fb83 100644
--- a/test/CodeGen/Mips/msa/vec.ll
+++ b/test/CodeGen/Mips/msa/vec.ll
@@ -104,12 +104,12 @@ entry:
ret void
}
-; CHECK: and_v_b_test:
-; CHECK: ld.b
-; CHECK: ld.b
-; CHECK: and.v
-; CHECK: st.b
-; CHECK: .size and_v_b_test
+; ANYENDIAN: and_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: and.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size and_v_b_test
;
define void @and_v_h_test() nounwind {
entry:
@@ -120,12 +120,12 @@ entry:
ret void
}
-; CHECK: and_v_h_test:
-; CHECK: ld.h
-; CHECK: ld.h
-; CHECK: and.v
-; CHECK: st.h
-; CHECK: .size and_v_h_test
+; ANYENDIAN: and_v_h_test:
+; ANYENDIAN: ld.h
+; ANYENDIAN: ld.h
+; ANYENDIAN: and.v
+; ANYENDIAN: st.h
+; ANYENDIAN: .size and_v_h_test
;
define void @and_v_w_test() nounwind {
@@ -137,12 +137,12 @@ entry:
ret void
}
-; CHECK: and_v_w_test:
-; CHECK: ld.w
-; CHECK: ld.w
-; CHECK: and.v
-; CHECK: st.w
-; CHECK: .size and_v_w_test
+; ANYENDIAN: and_v_w_test:
+; ANYENDIAN: ld.w
+; ANYENDIAN: ld.w
+; ANYENDIAN: and.v
+; ANYENDIAN: st.w
+; ANYENDIAN: .size and_v_w_test
;
define void @and_v_d_test() nounwind {
@@ -154,12 +154,12 @@ entry:
ret void
}
-; CHECK: and_v_d_test:
-; CHECK: ld.d
-; CHECK: ld.d
-; CHECK: and.v
-; CHECK: st.d
-; CHECK: .size and_v_d_test
+; ANYENDIAN: and_v_d_test:
+; ANYENDIAN: ld.d
+; ANYENDIAN: ld.d
+; ANYENDIAN: and.v
+; ANYENDIAN: st.d
+; ANYENDIAN: .size and_v_d_test
;
@llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
@@ -431,9 +431,9 @@ entry:
; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
-; bmnz.v is the same as bsel.v with wt and wd_in swapped
-; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
-; ANYENDIAN-DAG: st.b [[R6]], 0(
+; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
; ANYENDIAN: .size llvm_mips_bsel_v_b_test
@llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@@ -462,9 +462,9 @@ entry:
; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
-; bmnz.v is the same as bsel.v with wt and wd_in swapped
-; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
-; ANYENDIAN-DAG: st.b [[R6]], 0(
+; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
; ANYENDIAN: .size llvm_mips_bsel_v_h_test
@llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@@ -493,9 +493,9 @@ entry:
; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
-; bmnz.v is the same as bsel.v with wt and wd_in swapped
-; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
-; ANYENDIAN-DAG: st.b [[R6]], 0(
+; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
; ANYENDIAN: .size llvm_mips_bsel_v_w_test
@llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@@ -524,9 +524,9 @@ entry:
; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
-; bmnz.v is the same as bsel.v with wt and wd_in swapped
-; ANYENDIAN-DAG: bmnz.v [[R6]], [[R5]], [[R4]]
-; ANYENDIAN-DAG: st.b [[R6]], 0(
+; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
+; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
+; ANYENDIAN-DAG: st.b [[R5]], 0(
; ANYENDIAN: .size llvm_mips_bsel_v_d_test
@llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@@ -722,12 +722,12 @@ entry:
ret void
}
-; CHECK: or_v_b_test:
-; CHECK: ld.b
-; CHECK: ld.b
-; CHECK: or.v
-; CHECK: st.b
-; CHECK: .size or_v_b_test
+; ANYENDIAN: or_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: or.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size or_v_b_test
;
define void @or_v_h_test() nounwind {
entry:
@@ -738,12 +738,12 @@ entry:
ret void
}
-; CHECK: or_v_h_test:
-; CHECK: ld.h
-; CHECK: ld.h
-; CHECK: or.v
-; CHECK: st.h
-; CHECK: .size or_v_h_test
+; ANYENDIAN: or_v_h_test:
+; ANYENDIAN: ld.h
+; ANYENDIAN: ld.h
+; ANYENDIAN: or.v
+; ANYENDIAN: st.h
+; ANYENDIAN: .size or_v_h_test
;
define void @or_v_w_test() nounwind {
@@ -755,12 +755,12 @@ entry:
ret void
}
-; CHECK: or_v_w_test:
-; CHECK: ld.w
-; CHECK: ld.w
-; CHECK: or.v
-; CHECK: st.w
-; CHECK: .size or_v_w_test
+; ANYENDIAN: or_v_w_test:
+; ANYENDIAN: ld.w
+; ANYENDIAN: ld.w
+; ANYENDIAN: or.v
+; ANYENDIAN: st.w
+; ANYENDIAN: .size or_v_w_test
;
define void @or_v_d_test() nounwind {
@@ -772,12 +772,12 @@ entry:
ret void
}
-; CHECK: or_v_d_test:
-; CHECK: ld.d
-; CHECK: ld.d
-; CHECK: or.v
-; CHECK: st.d
-; CHECK: .size or_v_d_test
+; ANYENDIAN: or_v_d_test:
+; ANYENDIAN: ld.d
+; ANYENDIAN: ld.d
+; ANYENDIAN: or.v
+; ANYENDIAN: st.d
+; ANYENDIAN: .size or_v_d_test
;
@llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
@@ -880,12 +880,12 @@ entry:
ret void
}
-; CHECK: xor_v_b_test:
-; CHECK: ld.b
-; CHECK: ld.b
-; CHECK: xor.v
-; CHECK: st.b
-; CHECK: .size xor_v_b_test
+; ANYENDIAN: xor_v_b_test:
+; ANYENDIAN: ld.b
+; ANYENDIAN: ld.b
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.b
+; ANYENDIAN: .size xor_v_b_test
;
define void @xor_v_h_test() nounwind {
entry:
@@ -896,12 +896,12 @@ entry:
ret void
}
-; CHECK: xor_v_h_test:
-; CHECK: ld.h
-; CHECK: ld.h
-; CHECK: xor.v
-; CHECK: st.h
-; CHECK: .size xor_v_h_test
+; ANYENDIAN: xor_v_h_test:
+; ANYENDIAN: ld.h
+; ANYENDIAN: ld.h
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.h
+; ANYENDIAN: .size xor_v_h_test
;
define void @xor_v_w_test() nounwind {
@@ -913,12 +913,12 @@ entry:
ret void
}
-; CHECK: xor_v_w_test:
-; CHECK: ld.w
-; CHECK: ld.w
-; CHECK: xor.v
-; CHECK: st.w
-; CHECK: .size xor_v_w_test
+; ANYENDIAN: xor_v_w_test:
+; ANYENDIAN: ld.w
+; ANYENDIAN: ld.w
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.w
+; ANYENDIAN: .size xor_v_w_test
;
define void @xor_v_d_test() nounwind {
@@ -930,12 +930,12 @@ entry:
ret void
}
-; CHECK: xor_v_d_test:
-; CHECK: ld.d
-; CHECK: ld.d
-; CHECK: xor.v
-; CHECK: st.d
-; CHECK: .size xor_v_d_test
+; ANYENDIAN: xor_v_d_test:
+; ANYENDIAN: ld.d
+; ANYENDIAN: ld.d
+; ANYENDIAN: xor.v
+; ANYENDIAN: st.d
+; ANYENDIAN: .size xor_v_d_test
;
declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
diff --git a/test/CodeGen/Mips/nacl-align.ll b/test/CodeGen/Mips/nacl-align.ll
new file mode 100644
index 000000000000..e61b8347760e
--- /dev/null
+++ b/test/CodeGen/Mips/nacl-align.ll
@@ -0,0 +1,96 @@
+; RUN: llc -filetype=asm -mtriple=mipsel-none-nacl -relocation-model=static \
+; RUN: -O3 < %s | FileCheck %s
+
+
+; This test tests that NaCl functions are bundle-aligned.
+
+define void @test0() {
+ ret void
+
+; CHECK: .align 4
+; CHECK-NOT: .align
+; CHECK-LABEL: test0:
+
+}
+
+
+; This test tests that blocks that are jumped to through jump table are
+; bundle-aligned.
+
+define i32 @test1(i32 %i) {
+entry:
+ switch i32 %i, label %default [
+ i32 0, label %bb1
+ i32 1, label %bb2
+ i32 2, label %bb3
+ i32 3, label %bb4
+ ]
+
+bb1:
+ ret i32 111
+bb2:
+ ret i32 222
+bb3:
+ ret i32 333
+bb4:
+ ret i32 444
+default:
+ ret i32 555
+
+
+; CHECK-LABEL: test1:
+
+; CHECK: .align 4
+; CHECK-NEXT: ${{BB[0-9]+_[0-9]+}}:
+; CHECK-NEXT: jr $ra
+; CHECK-NEXT: addiu $2, $zero, 111
+; CHECK-NEXT: .align 4
+; CHECK-NEXT: ${{BB[0-9]+_[0-9]+}}:
+; CHECK-NEXT: jr $ra
+; CHECK-NEXT: addiu $2, $zero, 222
+; CHECK-NEXT: .align 4
+; CHECK-NEXT: ${{BB[0-9]+_[0-9]+}}:
+; CHECK-NEXT: jr $ra
+; CHECK-NEXT: addiu $2, $zero, 333
+; CHECK-NEXT: .align 4
+; CHECK-NEXT: ${{BB[0-9]+_[0-9]+}}:
+; CHECK-NEXT: jr $ra
+; CHECK-NEXT: addiu $2, $zero, 444
+
+}
+
+
+; This test tests that a block whose address is taken is bundle-aligned in NaCl.
+
+@bb_array = constant [2 x i8*] [i8* blockaddress(@test2, %bb1),
+ i8* blockaddress(@test2, %bb2)], align 4
+
+define i32 @test2(i32 %i) {
+entry:
+ %elementptr = getelementptr inbounds [2 x i8*]* @bb_array, i32 0, i32 %i
+ %0 = load i8** %elementptr, align 4
+ indirectbr i8* %0, [label %bb1, label %bb2]
+
+bb1:
+ ret i32 111
+bb2:
+ ret i32 222
+
+
+; CHECK-LABEL: test2:
+
+; Note that there are two consecutive labels - one temporary and one for
+; basic block.
+
+; CHECK: .align 4
+; CHECK-NEXT: ${{[a-zA-Z0-9]+}}:
+; CHECK-NEXT: ${{BB[0-9]+_[0-9]+}}:
+; CHECK-NEXT: jr $ra
+; CHECK-NEXT: addiu $2, $zero, 111
+; CHECK-NEXT: .align 4
+; CHECK-NEXT: ${{[a-zA-Z0-9]+}}:
+; CHECK-NEXT: ${{BB[0-9]+_[0-9]+}}:
+; CHECK-NEXT: jr $ra
+; CHECK-NEXT: addiu $2, $zero, 222
+
+}
diff --git a/test/CodeGen/Mips/nacl-branch-delay.ll b/test/CodeGen/Mips/nacl-branch-delay.ll
new file mode 100644
index 000000000000..d251eee07526
--- /dev/null
+++ b/test/CodeGen/Mips/nacl-branch-delay.ll
@@ -0,0 +1,71 @@
+; RUN: llc -filetype=asm -mtriple=mipsel-none-linux -relocation-model=static \
+; RUN: -O3 < %s | FileCheck %s
+
+; RUN: llc -filetype=asm -mtriple=mipsel-none-nacl -relocation-model=static \
+; RUN: -O3 < %s | FileCheck %s -check-prefix=CHECK-NACL
+
+@x = global i32 0, align 4
+declare void @f1(i32)
+declare void @f2()
+
+
+define void @test1() {
+ %1 = load i32* @x, align 4
+ call void @f1(i32 %1)
+ ret void
+
+
+; CHECK-LABEL: test1
+
+; We first make sure that for non-NaCl targets branch-delay slot contains
+; dangerous instructions.
+
+; Check that branch-delay slot is used to load argument from x before function
+; call.
+
+; CHECK: jal
+; CHECK-NEXT: lw $4, %lo(x)(${{[0-9]+}})
+
+; Check that branch-delay slot is used for adjusting sp before return.
+
+; CHECK: jr $ra
+; CHECK-NEXT: addiu $sp, $sp, {{[0-9]+}}
+
+
+; For NaCl, check that branch-delay slot doesn't contain dangerous instructions.
+
+; CHECK-NACL: jal
+; CHECK-NACL-NEXT: nop
+
+; CHECK-NACL: jr $ra
+; CHECK-NACL-NEXT: nop
+}
+
+
+define void @test2() {
+ store i32 1, i32* @x, align 4
+ tail call void @f2()
+ ret void
+
+
+; CHECK-LABEL: test2
+
+; Check that branch-delay slot is used for storing to x before function call.
+
+; CHECK: jal
+; CHECK-NEXT: sw ${{[0-9]+}}, %lo(x)(${{[0-9]+}})
+
+; Check that branch-delay slot is used for adjusting sp before return.
+
+; CHECK: jr $ra
+; CHECK-NEXT: addiu $sp, $sp, {{[0-9]+}}
+
+
+; For NaCl, check that branch-delay slot doesn't contain dangerous instructions.
+
+; CHECK-NACL: jal
+; CHECK-NACL-NEXT: nop
+
+; CHECK-NACL: jr $ra
+; CHECK-NACL-NEXT: nop
+}
diff --git a/test/CodeGen/Mips/nacl-reserved-regs.ll b/test/CodeGen/Mips/nacl-reserved-regs.ll
new file mode 100644
index 000000000000..ae21283b1fb7
--- /dev/null
+++ b/test/CodeGen/Mips/nacl-reserved-regs.ll
@@ -0,0 +1,51 @@
+; RUN: llc -march=mipsel -O3 < %s | FileCheck %s
+; RUN: llc -mtriple=mipsel-none-nacl-gnu -O3 < %s \
+; RUN: | FileCheck %s -check-prefix=CHECK-NACL
+
+@var = external global i32
+
+define void @f() {
+ %val1 = load volatile i32* @var
+ %val2 = load volatile i32* @var
+ %val3 = load volatile i32* @var
+ %val4 = load volatile i32* @var
+ %val5 = load volatile i32* @var
+ %val6 = load volatile i32* @var
+ %val7 = load volatile i32* @var
+ %val8 = load volatile i32* @var
+ %val9 = load volatile i32* @var
+ %val10 = load volatile i32* @var
+ %val11 = load volatile i32* @var
+ %val12 = load volatile i32* @var
+ %val13 = load volatile i32* @var
+ %val14 = load volatile i32* @var
+ %val15 = load volatile i32* @var
+ %val16 = load volatile i32* @var
+ store volatile i32 %val1, i32* @var
+ store volatile i32 %val2, i32* @var
+ store volatile i32 %val3, i32* @var
+ store volatile i32 %val4, i32* @var
+ store volatile i32 %val5, i32* @var
+ store volatile i32 %val6, i32* @var
+ store volatile i32 %val7, i32* @var
+ store volatile i32 %val8, i32* @var
+ store volatile i32 %val9, i32* @var
+ store volatile i32 %val10, i32* @var
+ store volatile i32 %val11, i32* @var
+ store volatile i32 %val12, i32* @var
+ store volatile i32 %val13, i32* @var
+ store volatile i32 %val14, i32* @var
+ store volatile i32 %val15, i32* @var
+ store volatile i32 %val16, i32* @var
+ ret void
+
+; Check that t6, t7 and t8 are used in non-NaCl code.
+; CHECK: lw $14
+; CHECK: lw $15
+; CHECK: lw $24
+
+; t6, t7 and t8 are reserved in NaCl.
+; CHECK-NACL-NOT: lw $14
+; CHECK-NACL-NOT: lw $15
+; CHECK-NACL-NOT: lw $24
+}
diff --git a/test/CodeGen/Mips/no-odd-spreg.ll b/test/CodeGen/Mips/no-odd-spreg.ll
new file mode 100644
index 000000000000..572e940bc467
--- /dev/null
+++ b/test/CodeGen/Mips/no-odd-spreg.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ODDSPREG -check-prefix=ODDSPREG-NO-EMIT
+; RUN: llc -march=mipsel -mcpu=mips32 -mattr=+nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOODDSPREG
+; RUN: llc -march=mipsel -mcpu=mips32r6 -mattr=fp64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=ODDSPREG -check-prefix=ODDSPREG-NO-EMIT
+; RUN: llc -march=mipsel -mcpu=mips32r6 -mattr=fp64,+nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOODDSPREG
+; RUN: llc -march=mipsel -mcpu=mips32r6 -mattr=fpxx,-nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=ODDSPREG -check-prefix=ODDSPREG-EMIT
+
+; We don't emit a directive unless we need to. This is to support versions of
+; GAS which do not support the directive.
+; ODDSPREG-EMIT: .module oddspreg
+; ODDSPREG-NO-EMIT-NOT: .module oddspreg
+; NOODDSPREG: .module nooddspreg
+
+define float @two_floats(float %a) {
+entry:
+ ; Clobber all except $f12 and $f13
+ ;
+ ; The intention is that if odd single precision registers are permitted, the
+ ; allocator will choose $f12 and $f13 to avoid the spill/reload.
+ ;
+ ; On the other hand, if odd single precision registers are not permitted, it
+ ; will be forced to spill/reload either %a or %0.
+
+ %0 = fadd float %a, 1.0
+ call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ %1 = fadd float %a, %0
+ ret float %1
+}
+
+; ALL-LABEL: two_floats:
+; ODDSPREG: add.s $f13, $f12, ${{f[0-9]+}}
+; ODDSPREG-NOT: swc1
+; ODDSPREG-NOT: lwc1
+; ODDSPREG: add.s $f0, $f12, $f13
+
+; NOODDSPREG: add.s $[[T0:f[0-9]*[02468]]], $f12, ${{f[0-9]+}}
+; NOODDSPREG: swc1 $[[T0]],
+; NOODDSPREG: lwc1 $[[T1:f[0-9]*[02468]]],
+; NOODDSPREG: add.s $f0, $f12, $[[T1]]
+
+define double @two_doubles(double %a) {
+entry:
+ ; Clobber all except $f12 and $f13
+ ;
+ ; -mno-odd-sp-reg doesn't need to affect double precision values so both cases
+ ; use $f12 and $f13.
+
+ %0 = fadd double %a, 1.0
+ call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ %1 = fadd double %a, %0
+ ret double %1
+}
+
+; ALL-LABEL: two_doubles:
+; ALL: add.d $[[T0:f[0-9]+]], $f12, ${{f[0-9]+}}
+; ALL: add.d $f0, $f12, $[[T0]]
+
+
+; INVALID: -mattr=+nooddspreg is not currently permitted for a 32-bit FPU register file (FR=0 mode).
diff --git a/test/CodeGen/Mips/nomips16.ll b/test/CodeGen/Mips/nomips16.ll
index bf7c667d057f..0affb16ac7c2 100644
--- a/test/CodeGen/Mips/nomips16.ll
+++ b/test/CodeGen/Mips/nomips16.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s
@x = global float 0.000000e+00, align 4
@.str = private unnamed_addr constant [20 x i8] c"in main: mips16 %f\0A\00", align 1
diff --git a/test/CodeGen/Mips/null-streamer.ll b/test/CodeGen/Mips/null-streamer.ll
new file mode 100644
index 000000000000..56cebbfafb7c
--- /dev/null
+++ b/test/CodeGen/Mips/null-streamer.ll
@@ -0,0 +1,7 @@
+; Test the null streamer with a terget streamer.
+; RUN: llc -O0 -filetype=null -mtriple=mips-linux < %s
+
+define i32 @main() {
+entry:
+ ret i32 0
+}
diff --git a/test/CodeGen/Mips/null.ll b/test/CodeGen/Mips/null.ll
index 00c66a9928f6..bc78a27f199c 100644
--- a/test/CodeGen/Mips/null.ll
+++ b/test/CodeGen/Mips/null.ll
@@ -5,7 +5,7 @@ define i32 @main() nounwind {
entry:
ret i32 0
-; 16: .set mips16 # @main
+; 16: .set mips16
; 16: jrc $ra
diff --git a/test/CodeGen/Mips/o32_cc.ll b/test/CodeGen/Mips/o32_cc.ll
index 08e5aab4f7ac..c28f9abcadcd 100644
--- a/test/CodeGen/Mips/o32_cc.ll
+++ b/test/CodeGen/Mips/o32_cc.ll
@@ -1,12 +1,13 @@
-; RUN: llc -march=mipsel < %s | FileCheck %s
-; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck %s
-; RUN: llc -march=mipsel < %s | FileCheck -check-prefix=FP32EL %s
-; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck -check-prefix=FP64EL %s
+; RUN: llc -march=mipsel < %s | FileCheck -check-prefix=ALL %s
+; RUN: llc -march=mipsel -mattr=+fp64 < %s | FileCheck -check-prefix=ALL %s
+; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck -check-prefix=ALL -check-prefix=NO-MFHC1 %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck -check-prefix=ALL -check-prefix=HAS-MFHC1 %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+fp64 < %s | FileCheck -check-prefix=ALL -check-prefix=HAS-MFHC1 %s
; $f12, $f14
-; CHECK-LABEL: testlowercall0:
-; CHECK-DAG: ldc1 $f12, %lo
-; CHECK-DAG: ldc1 $f14, %lo
+; ALL-LABEL: testlowercall0:
+; ALL-DAG: ldc1 $f12, %lo
+; ALL-DAG: ldc1 $f14, %lo
define void @testlowercall0() nounwind {
entry:
tail call void @f0(double 5.000000e+00, double 6.000000e+00) nounwind
@@ -16,9 +17,9 @@ entry:
declare void @f0(double, double)
; $f12, $f14
-; CHECK-LABEL: testlowercall1:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: lwc1 $f14, %lo
+; ALL-LABEL: testlowercall1:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: lwc1 $f14, %lo
define void @testlowercall1() nounwind {
entry:
tail call void @f1(float 8.000000e+00, float 9.000000e+00) nounwind
@@ -28,9 +29,9 @@ entry:
declare void @f1(float, float)
; $f12, $f14
-; CHECK-LABEL: testlowercall2:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: ldc1 $f14, %lo
+; ALL-LABEL: testlowercall2:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: ldc1 $f14, %lo
define void @testlowercall2() nounwind {
entry:
tail call void @f2(float 8.000000e+00, double 6.000000e+00) nounwind
@@ -40,9 +41,9 @@ entry:
declare void @f2(float, double)
; $f12, $f14
-; CHECK-LABEL: testlowercall3:
-; CHECK-DAG: ldc1 $f12, %lo
-; CHECK-DAG: lwc1 $f14, %lo
+; ALL-LABEL: testlowercall3:
+; ALL-DAG: ldc1 $f12, %lo
+; ALL-DAG: lwc1 $f14, %lo
define void @testlowercall3() nounwind {
entry:
tail call void @f3(double 5.000000e+00, float 9.000000e+00) nounwind
@@ -52,11 +53,11 @@ entry:
declare void @f3(double, float)
; $4, $5, $6, $7
-; CHECK-LABEL: testlowercall4:
-; CHECK-DAG: addiu $4, $zero, 12
-; CHECK-DAG: addiu $5, $zero, 13
-; CHECK-DAG: addiu $6, $zero, 14
-; CHECK-DAG: addiu $7, $zero, 15
+; ALL-LABEL: testlowercall4:
+; ALL-DAG: addiu $4, $zero, 12
+; ALL-DAG: addiu $5, $zero, 13
+; ALL-DAG: addiu $6, $zero, 14
+; ALL-DAG: addiu $7, $zero, 15
define void @testlowercall4() nounwind {
entry:
tail call void @f4(i32 12, i32 13, i32 14, i32 15) nounwind
@@ -66,11 +67,11 @@ entry:
declare void @f4(i32, i32, i32, i32)
; $f12, $6, stack
-; CHECK-LABEL: testlowercall5:
-; CHECK-DAG: ldc1 $f12, %lo
-; CHECK-DAG: addiu $6, $zero, 23
-; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
-; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
+; ALL-LABEL: testlowercall5:
+; ALL-DAG: ldc1 $f12, %lo
+; ALL-DAG: addiu $6, $zero, 23
+; ALL-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; ALL-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall5() nounwind {
entry:
tail call void @f5(double 1.500000e+01, i32 23, double 1.700000e+01) nounwind
@@ -80,10 +81,10 @@ entry:
declare void @f5(double, i32, double)
; $f12, $6, $7
-; CHECK-LABEL: testlowercall6:
-; CHECK-DAG: ldc1 $f12, %lo
-; CHECK-DAG: addiu $6, $zero, 33
-; CHECK-DAG: addiu $7, $zero, 24
+; ALL-LABEL: testlowercall6:
+; ALL-DAG: ldc1 $f12, %lo
+; ALL-DAG: addiu $6, $zero, 33
+; ALL-DAG: addiu $7, $zero, 24
define void @testlowercall6() nounwind {
entry:
tail call void @f6(double 2.500000e+01, i32 33, i32 24) nounwind
@@ -93,10 +94,10 @@ entry:
declare void @f6(double, i32, i32)
; $f12, $5, $6
-; CHECK-LABEL: testlowercall7:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: addiu $5, $zero, 43
-; CHECK-DAG: addiu $6, $zero, 34
+; ALL-LABEL: testlowercall7:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: addiu $5, $zero, 43
+; ALL-DAG: addiu $6, $zero, 34
define void @testlowercall7() nounwind {
entry:
tail call void @f7(float 1.800000e+01, i32 43, i32 34) nounwind
@@ -106,12 +107,12 @@ entry:
declare void @f7(float, i32, i32)
; $4, $5, $6, stack
-; CHECK-LABEL: testlowercall8:
-; CHECK-DAG: addiu $4, $zero, 22
-; CHECK-DAG: addiu $5, $zero, 53
-; CHECK-DAG: addiu $6, $zero, 44
-; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
-; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
+; ALL-LABEL: testlowercall8:
+; ALL-DAG: addiu $4, $zero, 22
+; ALL-DAG: addiu $5, $zero, 53
+; ALL-DAG: addiu $6, $zero, 44
+; ALL-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; ALL-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall8() nounwind {
entry:
tail call void @f8(i32 22, i32 53, i32 44, double 4.000000e+00) nounwind
@@ -121,11 +122,11 @@ entry:
declare void @f8(i32, i32, i32, double)
; $4, $5, $6, $7
-; CHECK-LABEL: testlowercall9:
-; CHECK-DAG: addiu $4, $zero, 32
-; CHECK-DAG: addiu $5, $zero, 63
-; CHECK-DAG: addiu $6, $zero, 54
-; CHECK-DAG: lui $7, 16688
+; ALL-LABEL: testlowercall9:
+; ALL-DAG: addiu $4, $zero, 32
+; ALL-DAG: addiu $5, $zero, 63
+; ALL-DAG: addiu $6, $zero, 54
+; ALL-DAG: lui $7, 16688
define void @testlowercall9() nounwind {
entry:
tail call void @f9(i32 32, i32 63, i32 54, float 1.100000e+01) nounwind
@@ -135,15 +136,16 @@ entry:
declare void @f9(i32, i32, i32, float)
; $4, $5, ($6, $7)
-; CHECK-LABEL: testlowercall10:
-; CHECK-DAG: addiu $4, $zero, 42
-; CHECK-DAG: addiu $5, $zero, 73
-; FP32EL-LABEL: testlowercall10:
-; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
-; FP64EL-LABEL: testlowercall10:
-; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
+; ALL-LABEL: testlowercall10:
+
+; ALL-DAG: addiu $4, $zero, 42
+; ALL-DAG: addiu $5, $zero, 73
+
+; NO-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; NO-MFHC1-DAG: mfc1 $7, $f{{[0-9]+}}
+
+; HAS-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; HAS-MFHC1-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall10() nounwind {
entry:
tail call void @f10(i32 42, i32 73, double 2.700000e+01) nounwind
@@ -153,14 +155,14 @@ entry:
declare void @f10(i32, i32, double)
; $4, ($6, $7)
-; CHECK-LABEL: testlowercall11:
-; CHECK-DAG: addiu $4, $zero, 52
-; FP32EL-LABEL: testlowercall11:
-; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
-; FP64EL-LABEL: testlowercall11:
-; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
+; ALL-LABEL: testlowercall11:
+; ALL-DAG: addiu $4, $zero, 52
+
+; NO-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; NO-MFHC1-DAG: mfc1 $7, $f{{[0-9]+}}
+
+; HAS-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; HAS-MFHC1-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall11() nounwind {
entry:
tail call void @f11(i32 52, double 1.600000e+01) nounwind
@@ -170,11 +172,11 @@ entry:
declare void @f11(i32, double)
; $f12, $f14, $6, $7
-; CHECK-LABEL: testlowercall12:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: lwc1 $f14, %lo
-; CHECK-DAG: lui $6, 16672
-; CHECK-DAG: lui $7, 16808
+; ALL-LABEL: testlowercall12:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: lwc1 $f14, %lo
+; ALL-DAG: lui $6, 16672
+; ALL-DAG: lui $7, 16808
define void @testlowercall12() nounwind {
entry:
tail call void @f12(float 2.800000e+01, float 1.900000e+01, float 1.000000e+01, float 2.100000e+01) nounwind
@@ -184,11 +186,11 @@ entry:
declare void @f12(float, float, float, float)
; $f12, $5, $6, $7
-; CHECK-LABEL: testlowercall13:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: addiu $5, $zero, 83
-; CHECK-DAG: lui $6, 16800
-; CHECK-DAG: addiu $7, $zero, 25
+; ALL-LABEL: testlowercall13:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: addiu $5, $zero, 83
+; ALL-DAG: lui $6, 16800
+; ALL-DAG: addiu $7, $zero, 25
define void @testlowercall13() nounwind {
entry:
tail call void @f13(float 3.800000e+01, i32 83, float 2.000000e+01, i32 25) nounwind
@@ -199,10 +201,10 @@ entry:
declare void @f13(float, i32, float, i32)
; $f12, $f14, $7
-; CHECK-LABEL: testlowercall14:
-; CHECK-DAG: ldc1 $f12, %lo
-; CHECK-DAG: lwc1 $f14, %lo
-; CHECK-DAG: lui $7, 16880
+; ALL-LABEL: testlowercall14:
+; ALL-DAG: ldc1 $f12, %lo
+; ALL-DAG: lwc1 $f14, %lo
+; ALL-DAG: lui $7, 16880
define void @testlowercall14() nounwind {
entry:
tail call void @f14(double 3.500000e+01, float 2.900000e+01, float 3.000000e+01) nounwind
@@ -212,15 +214,15 @@ entry:
declare void @f14(double, float, float)
; $f12, $f14, ($6, $7)
-; CHECK-LABEL: testlowercall15:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: lwc1 $f14, %lo
-; FP32EL-LABEL: testlowercall15:
-; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
-; FP64EL-LABEL: testlowercall15:
-; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
+; ALL-LABEL: testlowercall15:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: lwc1 $f14, %lo
+
+; NO-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; NO-MFHC1-DAG: mfc1 $7, $f{{[0-9]+}}
+
+; HAS-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; HAS-MFHC1-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall15() nounwind {
entry:
tail call void @f15(float 4.800000e+01, float 3.900000e+01, double 3.700000e+01) nounwind
@@ -230,11 +232,11 @@ entry:
declare void @f15(float, float, double)
; $4, $5, $6, $7
-; CHECK-LABEL: testlowercall16:
-; CHECK-DAG: addiu $4, $zero, 62
-; CHECK-DAG: lui $5, 16964
-; CHECK-DAG: addiu $6, $zero, 64
-; CHECK-DAG: lui $7, 16888
+; ALL-LABEL: testlowercall16:
+; ALL-DAG: addiu $4, $zero, 62
+; ALL-DAG: lui $5, 16964
+; ALL-DAG: addiu $6, $zero, 64
+; ALL-DAG: lui $7, 16888
define void @testlowercall16() nounwind {
entry:
tail call void @f16(i32 62, float 4.900000e+01, i32 64, float 3.100000e+01) nounwind
@@ -244,11 +246,11 @@ entry:
declare void @f16(i32, float, i32, float)
; $4, $5, $6, $7
-; CHECK-LABEL: testlowercall17:
-; CHECK-DAG: addiu $4, $zero, 72
-; CHECK-DAG: lui $5, 17004
-; CHECK-DAG: addiu $6, $zero, 74
-; CHECK-DAG: addiu $7, $zero, 35
+; ALL-LABEL: testlowercall17:
+; ALL-DAG: addiu $4, $zero, 72
+; ALL-DAG: lui $5, 17004
+; ALL-DAG: addiu $6, $zero, 74
+; ALL-DAG: addiu $7, $zero, 35
define void @testlowercall17() nounwind {
entry:
tail call void @f17(i32 72, float 5.900000e+01, i32 74, i32 35) nounwind
@@ -258,11 +260,11 @@ entry:
declare void @f17(i32, float, i32, i32)
; $4, $5, $6, $7
-; CHECK-LABEL: testlowercall18:
-; CHECK-DAG: addiu $4, $zero, 82
-; CHECK-DAG: addiu $5, $zero, 93
-; CHECK-DAG: lui $6, 16928
-; CHECK-DAG: addiu $7, $zero, 45
+; ALL-LABEL: testlowercall18:
+; ALL-DAG: addiu $4, $zero, 82
+; ALL-DAG: addiu $5, $zero, 93
+; ALL-DAG: lui $6, 16928
+; ALL-DAG: addiu $7, $zero, 45
define void @testlowercall18() nounwind {
entry:
tail call void @f18(i32 82, i32 93, float 4.000000e+01, i32 45) nounwind
@@ -273,16 +275,16 @@ declare void @f18(i32, i32, float, i32)
; $4, ($6, $7), stack
-; CHECK-LABEL: testlowercall20:
-; CHECK-DAG: addiu $4, $zero, 92
-; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
-; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
-; FP32EL-LABEL: testlowercall20:
-; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
-; FP64EL-LABEL: testlowercall20:
-; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
+; ALL-LABEL: testlowercall20:
+; ALL-DAG: addiu $4, $zero, 92
+; ALL-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; ALL-DAG: sw ${{[a-z0-9]+}}, 20($sp)
+
+; NO-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; NO-MFHC1-DAG: mfc1 $7, $f{{[0-9]+}}
+
+; HAS-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; HAS-MFHC1-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall20() nounwind {
entry:
tail call void @f20(i32 92, double 2.600000e+01, double 4.700000e+01) nounwind
@@ -292,9 +294,9 @@ entry:
declare void @f20(i32, double, double)
; $f12, $5
-; CHECK-LABEL: testlowercall21:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: addiu $5, $zero, 103
+; ALL-LABEL: testlowercall21:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: addiu $5, $zero, 103
define void @testlowercall21() nounwind {
entry:
tail call void @f21(float 5.800000e+01, i32 103) nounwind
@@ -304,15 +306,15 @@ entry:
declare void @f21(float, i32)
; $f12, $5, ($6, $7)
-; CHECK-LABEL: testlowercall22:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: addiu $5, $zero, 113
-; FP32EL-LABEL: testlowercall22:
-; FP32EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP32EL-DAG: mfc1 $7, $f{{[0-9]+}}
-; FP64EL-LABEL: testlowercall22:
-; FP64EL-DAG: mfc1 $6, $f{{[0-9]+}}
-; FP64EL-DAG: mfhc1 $7, $f{{[0-9]+}}
+; ALL-LABEL: testlowercall22:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: addiu $5, $zero, 113
+
+; NO-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; NO-MFHC1-DAG: mfc1 $7, $f{{[0-9]+}}
+
+; HAS-MFHC1-DAG: mfc1 $6, $f{{[0-9]+}}
+; HAS-MFHC1-DAG: mfhc1 $7, $f{{[0-9]+}}
define void @testlowercall22() nounwind {
entry:
tail call void @f22(float 6.800000e+01, i32 113, double 5.700000e+01) nounwind
@@ -322,9 +324,9 @@ entry:
declare void @f22(float, i32, double)
; $f12, f6
-; CHECK-LABEL: testlowercall23:
-; CHECK-DAG: ldc1 $f12, %lo
-; CHECK-DAG: addiu $6, $zero, 123
+; ALL-LABEL: testlowercall23:
+; ALL-DAG: ldc1 $f12, %lo
+; ALL-DAG: addiu $6, $zero, 123
define void @testlowercall23() nounwind {
entry:
tail call void @f23(double 4.500000e+01, i32 123) nounwind
@@ -334,11 +336,11 @@ entry:
declare void @f23(double, i32)
; $f12,$6, stack
-; CHECK-LABEL: testlowercall24:
-; CHECK-DAG: ldc1 $f12, %lo
-; CHECK-DAG: addiu $6, $zero, 133
-; CHECK-DAG: sw ${{[a-z0-9]+}}, 16($sp)
-; CHECK-DAG: sw ${{[a-z0-9]+}}, 20($sp)
+; ALL-LABEL: testlowercall24:
+; ALL-DAG: ldc1 $f12, %lo
+; ALL-DAG: addiu $6, $zero, 133
+; ALL-DAG: sw ${{[a-z0-9]+}}, 16($sp)
+; ALL-DAG: sw ${{[a-z0-9]+}}, 20($sp)
define void @testlowercall24() nounwind {
entry:
tail call void @f24(double 5.500000e+01, i32 133, double 6.700000e+01) nounwind
@@ -347,19 +349,19 @@ entry:
declare void @f24(double, i32, double)
-; CHECK-LABEL: testlowercall25:
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: lwc1 $f14, %lo
-; CHECK-DAG: lui $6
-; CHECK-DAG: lui $7
-; CHECK-DAG: lwc1 $f12, %lo
-; CHECK-DAG: addiu $5, $zero, 83
-; CHECK-DAG: lui $6
-; CHECK-DAG: addiu $7, $zero, 25
-; CHECK-DAG: addiu $4, $zero, 82
-; CHECK-DAG: addiu $5, $zero, 93
-; CHECK-DAG: lui $6
-; CHECK-DAG: addiu $7, $zero, 45
+; ALL-LABEL: testlowercall25:
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: lwc1 $f14, %lo
+; ALL-DAG: lui $6
+; ALL-DAG: lui $7
+; ALL-DAG: lwc1 $f12, %lo
+; ALL-DAG: addiu $5, $zero, 83
+; ALL-DAG: lui $6
+; ALL-DAG: addiu $7, $zero, 25
+; ALL-DAG: addiu $4, $zero, 82
+; ALL-DAG: addiu $5, $zero, 93
+; ALL-DAG: lui $6
+; ALL-DAG: addiu $7, $zero, 45
define void @testlowercall25() nounwind {
entry:
tail call void @f12(float 2.800000e+01, float 1.900000e+01, float 1.000000e+01, float 2.100000e+01) nounwind
diff --git a/test/CodeGen/Mips/octeon.ll b/test/CodeGen/Mips/octeon.ll
new file mode 100644
index 000000000000..d5ff9bdf3608
--- /dev/null
+++ b/test/CodeGen/Mips/octeon.ll
@@ -0,0 +1,29 @@
+; RUN: llc -O1 < %s -march=mips64 -mcpu=octeon | FileCheck %s -check-prefix=OCTEON
+; RUN: llc -O1 < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=MIPS64
+
+define i64 @addi64(i64 %a, i64 %b) nounwind {
+entry:
+; OCTEON-LABEL: addi64:
+; OCTEON: jr $ra
+; OCTEON: baddu $2, $4, $5
+; MIPS64-LABEL: addi64:
+; MIPS64: daddu
+; MIPS64: jr
+; MIPS64: andi
+ %add = add i64 %a, %b
+ %and = and i64 %add, 255
+ ret i64 %and
+}
+
+define i64 @mul(i64 %a, i64 %b) nounwind {
+entry:
+; OCTEON-LABEL: mul:
+; OCTEON: jr $ra
+; OCTEON: dmul $2, $4, $5
+; MIPS64-LABEL: mul:
+; MIPS64: dmult
+; MIPS64: jr
+; MIPS64: mflo
+ %res = mul i64 %a, %b
+ ret i64 %res
+}
diff --git a/test/CodeGen/Mips/octeon_popcnt.ll b/test/CodeGen/Mips/octeon_popcnt.ll
new file mode 100644
index 000000000000..52c37f69d020
--- /dev/null
+++ b/test/CodeGen/Mips/octeon_popcnt.ll
@@ -0,0 +1,47 @@
+; RUN: llc -O1 -march=mips64 -mcpu=octeon < %s | FileCheck %s -check-prefix=OCTEON
+; RUN: llc -O1 -march=mips64 -mcpu=mips64 < %s | FileCheck %s -check-prefix=MIPS64
+
+define i8 @cnt8(i8 %x) nounwind readnone {
+ %cnt = tail call i8 @llvm.ctpop.i8(i8 %x)
+ ret i8 %cnt
+; OCTEON-LABEL: cnt8:
+; OCTEON: jr $ra
+; OCTEON: pop $2, $1
+; MIPS64-LABEL: cnt8:
+; MIPS64-NOT: pop
+}
+
+define i16 @cnt16(i16 %x) nounwind readnone {
+ %cnt = tail call i16 @llvm.ctpop.i16(i16 %x)
+ ret i16 %cnt
+; OCTEON-LABEL: cnt16:
+; OCTEON: jr $ra
+; OCTEON: pop $2, $1
+; MIPS64-LABEL: cnt16:
+; MIPS64-NOT: pop
+}
+
+define i32 @cnt32(i32 %x) nounwind readnone {
+ %cnt = tail call i32 @llvm.ctpop.i32(i32 %x)
+ ret i32 %cnt
+; OCTEON-LABEL: cnt32:
+; OCTEON: jr $ra
+; OCTEON: pop $2, $4
+; MIPS64-LABEL: cnt32:
+; MIPS64-NOT: pop
+}
+
+define i64 @cnt64(i64 %x) nounwind readnone {
+ %cnt = tail call i64 @llvm.ctpop.i64(i64 %x)
+ ret i64 %cnt
+; OCTEON-LABEL: cnt64:
+; OCTEON: jr $ra
+; OCTEON: dpop $2, $4
+; MIPS64-LABEL: cnt64:
+; MIPS64-NOT: dpop
+}
+
+declare i8 @llvm.ctpop.i8(i8) nounwind readnone
+declare i16 @llvm.ctpop.i16(i16) nounwind readnone
+declare i32 @llvm.ctpop.i32(i32) nounwind readnone
+declare i64 @llvm.ctpop.i64(i64) nounwind readnone
diff --git a/test/CodeGen/Mips/optimize-fp-math.ll b/test/CodeGen/Mips/optimize-fp-math.ll
index 8b71dc42344c..7886f29f5cef 100644
--- a/test/CodeGen/Mips/optimize-fp-math.ll
+++ b/test/CodeGen/Mips/optimize-fp-math.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=64
; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
; 32-LABEL: test_sqrtf_float_:
diff --git a/test/CodeGen/Mips/optimize-pic-o0.ll b/test/CodeGen/Mips/optimize-pic-o0.ll
new file mode 100644
index 000000000000..554d49e728c7
--- /dev/null
+++ b/test/CodeGen/Mips/optimize-pic-o0.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=mipsel -O0 < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 0, i32* %retval
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 10
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ call void bitcast (void (...)* @foo to void ()*)()
+; CHECK: jalr $25
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %1 = load i32* %i, align 4
+ %inc = add nsw i32 %1, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %2 = load i32* %retval
+ ret i32 %2
+}
+
+declare void @foo(...)
diff --git a/test/CodeGen/Mips/powif64_16.ll b/test/CodeGen/Mips/powif64_16.ll
index 35a7ca9201e2..48757276bb8c 100644
--- a/test/CodeGen/Mips/powif64_16.ll
+++ b/test/CodeGen/Mips/powif64_16.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -mips16-hard-float -soft-float -relocation-model=static < %s | FileCheck %s
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s
declare float @llvm.powi.f32(float %Val, i32 %power)
declare double @llvm.powi.f64(double %Val, i32 %power)
diff --git a/test/CodeGen/Mips/prevent-hoisting.ll b/test/CodeGen/Mips/prevent-hoisting.ll
new file mode 100644
index 000000000000..da665c210909
--- /dev/null
+++ b/test/CodeGen/Mips/prevent-hoisting.ll
@@ -0,0 +1,144 @@
+; RUN: llc -march=mipsel -O3 < %s | FileCheck %s
+
+
+; MIPS direct branches implicitly define register $at. This test makes sure that
+; code hoisting optimization (which moves identical instructions at the start of
+; two basic blocks to the common predecessor block) takes this into account and
+; doesn't move definition of $at to the predecessor block (which would make $at
+; live-in at the start of successor block).
+
+
+; CHECK-LABEL: readLumaCoeff8x8_CABAC
+
+; The check for "addiu" instruction is added so that we can match the correct "b" instruction.
+; CHECK: addiu ${{[0-9]+}}, $zero, -1
+; CHECK: b $[[BB0:BB[0-9_]+]]
+
+; Check that sll instruction that writes to $1 starts basic block.
+; CHECK: {{BB[0-9_#]+}}:
+; CHECK-NEXT: sll $1, $[[R0:[0-9]+]], 4
+
+; Check that identical sll instruction starts another basic block.
+; CHECK: [[BB0]]:
+; CHECK-NEXT: sll $1, $[[R0]], 4
+
+
+%struct.img_par = type { i32, i32, i32, i32, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [16 x i16]], [6 x [32 x i32]], [16 x [16 x i32]], [4 x [12 x [4 x [4 x i32]]]], [16 x i32], i8**, i32*, i32***, i32**, i32, i32, i32, i32, %struct.Slice*, %struct.macroblock*, i32, i32, i32, i32, i32, i32, %struct.DecRefPicMarking_s*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32***, i32***, i32****, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [3 x [2 x i32]], i32, i32, i32, i32, %struct.timeb, %struct.timeb, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.Slice = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.datapartition*, %struct.MotionInfoContexts*, %struct.TextureInfoContexts*, i32, i32*, i32*, i32*, i32, i32*, i32*, i32*, i32 (%struct.img_par*, %struct.inp_par*)*, i32, i32, i32, i32 }
+%struct.datapartition = type { %struct.Bitstream*, %struct.DecodingEnvironment, i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)* }
+%struct.Bitstream = type { i32, i32, i32, i32, i8*, i32 }
+%struct.DecodingEnvironment = type { i32, i32, i32, i32, i32, i8*, i32* }
+%struct.syntaxelement = type { i32, i32, i32, i32, i32, i32, i32, i32, void (i32, i32, i32*, i32*)*, void (%struct.syntaxelement*, %struct.img_par*, %struct.DecodingEnvironment*)* }
+%struct.MotionInfoContexts = type { [4 x [11 x %struct.BiContextType]], [2 x [9 x %struct.BiContextType]], [2 x [10 x %struct.BiContextType]], [2 x [6 x %struct.BiContextType]], [4 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x %struct.BiContextType] }
+%struct.BiContextType = type { i16, i8 }
+%struct.TextureInfoContexts = type { [2 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x [4 x %struct.BiContextType]], [10 x [4 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]] }
+%struct.inp_par = type { [1000 x i8], [1000 x i8], [1000 x i8], i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.macroblock = type { i32, [2 x i32], i32, i32, %struct.macroblock*, %struct.macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], i32, i64, i64, i32, i32, [4 x i8], [4 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.DecRefPicMarking_s = type { i32, i32, i32, i32, i32, %struct.DecRefPicMarking_s* }
+%struct.timeb = type { i32, i16, i16, i16 }
+
+@assignSE2partition = external global [0 x [20 x i32]]
+@FIELD_SCAN8x8 = external constant [64 x [2 x i8]]
+
+
+define void @readLumaCoeff8x8_CABAC(%struct.img_par* %img, i32 %b8) {
+
+ %1 = load i32* undef, align 4
+ br i1 false, label %2, label %3
+
+; <label>:2 ; preds = %0
+ br label %3
+
+; <label>:3 ; preds = %2, %0
+ br i1 undef, label %switch.lookup, label %4
+
+switch.lookup: ; preds = %3
+ br label %4
+
+; <label>:4 ; preds = %switch.lookup, %3
+ br i1 undef, label %5, label %6
+
+; <label>:5 ; preds = %4
+ br label %6
+
+; <label>:6 ; preds = %5, %4
+ %7 = phi [2 x i8]* [ getelementptr inbounds ([64 x [2 x i8]]* @FIELD_SCAN8x8, i32 0, i32 0), %4 ], [ null, %5 ]
+ br i1 undef, label %switch.lookup6, label %8
+
+switch.lookup6: ; preds = %6
+ br label %8
+
+; <label>:8 ; preds = %switch.lookup6, %6
+ br i1 undef, label %.loopexit, label %9
+
+; <label>:9 ; preds = %8
+ %10 = and i32 %b8, 1
+ %11 = shl nuw nsw i32 %10, 3
+ %12 = getelementptr inbounds %struct.Slice* null, i32 0, i32 9
+ br i1 undef, label %.preheader, label %.preheader11
+
+.preheader11: ; preds = %21, %9
+ %k.014 = phi i32 [ %27, %21 ], [ 0, %9 ]
+ %coef_ctr.013 = phi i32 [ %23, %21 ], [ -1, %9 ]
+ br i1 false, label %13, label %14
+
+; <label>:13 ; preds = %.preheader11
+ br label %15
+
+; <label>:14 ; preds = %.preheader11
+ br label %15
+
+; <label>:15 ; preds = %14, %13
+ %16 = getelementptr inbounds [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
+ %17 = load i32* %16, align 4
+ %18 = getelementptr inbounds %struct.datapartition* null, i32 %17, i32 2
+ %19 = load i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)** %18, align 4
+ %20 = call i32 %19(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* undef)
+ br i1 false, label %.loopexit, label %21
+
+; <label>:21 ; preds = %15
+ %22 = add i32 %coef_ctr.013, 1
+ %23 = add i32 %22, 0
+ %24 = getelementptr inbounds [2 x i8]* %7, i32 %23, i32 0
+ %25 = add nsw i32 0, %11
+ %26 = getelementptr inbounds %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %25
+ store i32 0, i32* %26, align 4
+ %27 = add nsw i32 %k.014, 1
+ %28 = icmp slt i32 %27, 65
+ br i1 %28, label %.preheader11, label %.loopexit
+
+.preheader: ; preds = %36, %9
+ %k.110 = phi i32 [ %45, %36 ], [ 0, %9 ]
+ %coef_ctr.29 = phi i32 [ %39, %36 ], [ -1, %9 ]
+ br i1 false, label %29, label %30
+
+; <label>:29 ; preds = %.preheader
+ br label %31
+
+; <label>:30 ; preds = %.preheader
+ br label %31
+
+; <label>:31 ; preds = %30, %29
+ %32 = getelementptr inbounds [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
+ %33 = load i32* %32, align 4
+ %34 = getelementptr inbounds %struct.datapartition* null, i32 %33
+ %35 = call i32 undef(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* %34)
+ br i1 false, label %.loopexit, label %36
+
+; <label>:36 ; preds = %31
+ %37 = load i32* undef, align 4
+ %38 = add i32 %coef_ctr.29, 1
+ %39 = add i32 %38, %37
+ %40 = getelementptr inbounds [2 x i8]* %7, i32 %39, i32 0
+ %41 = load i8* %40, align 1
+ %42 = zext i8 %41 to i32
+ %43 = add nsw i32 %42, %11
+ %44 = getelementptr inbounds %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %43
+ store i32 0, i32* %44, align 4
+ %45 = add nsw i32 %k.110, 1
+ %46 = icmp slt i32 %45, 65
+ br i1 %46, label %.preheader, label %.loopexit
+
+.loopexit: ; preds = %36, %31, %21, %15, %8
+ ret void
+}
diff --git a/test/CodeGen/Mips/remat-immed-load.ll b/test/CodeGen/Mips/remat-immed-load.ll
index d93964bcaef6..b53b156e9eec 100644
--- a/test/CodeGen/Mips/remat-immed-load.ll
+++ b/test/CodeGen/Mips/remat-immed-load.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | FileCheck %s -check-prefix=64
; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck %s -check-prefix=64
define void @f0() nounwind {
diff --git a/test/CodeGen/Mips/rotate.ll b/test/CodeGen/Mips/rotate.ll
index 813bbdf18bbd..70eff6e224d0 100644
--- a/test/CodeGen/Mips/rotate.ll
+++ b/test/CodeGen/Mips/rotate.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=mips16
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32r2 -mattr=+mips16 < %s | FileCheck %s -check-prefix=mips16
; CHECK: rotrv $2, $4
; mips16: .ent rot0
diff --git a/test/CodeGen/Mips/s2rem.ll b/test/CodeGen/Mips/s2rem.ll
new file mode 100644
index 000000000000..9edb5be2771e
--- /dev/null
+++ b/test/CodeGen/Mips/s2rem.ll
@@ -0,0 +1,92 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC
+
+
+@xi = common global i32 0, align 4
+@x = common global float 0.000000e+00, align 4
+@xd = common global double 0.000000e+00, align 8
+
+; Function Attrs: nounwind
+define void @it() #0 {
+entry:
+ %call = call i32 @i(i32 1)
+ store i32 %call, i32* @xi, align 4
+ ret void
+; PIC: .ent it
+; STATIC: .ent it
+; PIC: save $16, $17, $ra, [[FS:[0-9]+]]
+; STATIC: save $16, $ra, [[FS:[0-9]+]]
+; PIC: restore $16, $17, $ra, [[FS]]
+; STATIC: restore $16, $ra, [[FS]]
+; PIC: .end it
+; STATIC: .end it
+}
+
+declare i32 @i(i32) #1
+
+; Function Attrs: nounwind
+define void @ft() #0 {
+entry:
+ %call = call float @f()
+ store float %call, float* @x, align 4
+ ret void
+; PIC: .ent ft
+; PIC: save $16, $17, $ra, $18, [[FS:[0-9]+]]
+; PIC: restore $16, $17, $ra, $18, [[FS]]
+; PIC: .end ft
+}
+
+declare float @f() #1
+
+; Function Attrs: nounwind
+define void @dt() #0 {
+entry:
+ %call = call double @d()
+ store double %call, double* @xd, align 8
+ ret void
+; PIC: .ent dt
+; PIC: save $16, $17, $ra, $18, [[FS:[0-9]+]]
+; PIC: restore $16, $17, $ra, $18, [[FS]]
+; PIC: .end dt
+}
+
+declare double @d() #1
+
+; Function Attrs: nounwind
+define void @fft() #0 {
+entry:
+ %0 = load float* @x, align 4
+ %call = call float @ff(float %0)
+ store float %call, float* @x, align 4
+ ret void
+; PIC: .ent fft
+; PIC: save $16, $17, $ra, $18, [[FS:[0-9]+]]
+; PIC: restore $16, $17, $ra, $18, [[FS]]
+; PIC: .end fft
+}
+
+declare float @ff(float) #1
+
+; Function Attrs: nounwind
+define void @vft() #0 {
+entry:
+ %0 = load float* @x, align 4
+ call void @vf(float %0)
+ ret void
+; PIC: .ent vft
+; STATIC: .ent vft
+; PIC: save $16, $ra, [[FS:[0-9]+]]
+; STATIC: save $16, $ra, [[FS:[0-9]+]]
+; PIC: restore $16, $ra, [[FS]]
+; STATIC: restore $16, $ra, [[FS]]
+; PIC: .end vft
+; STATIC: .end vft
+}
+
+declare void @vf(float) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+
diff --git a/test/CodeGen/Mips/sel1c.ll b/test/CodeGen/Mips/sel1c.ll
index 4c4784de6aa8..edd2e3e43b79 100644
--- a/test/CodeGen/Mips/sel1c.ll
+++ b/test/CodeGen/Mips/sel1c.ll
@@ -10,7 +10,7 @@ entry:
%0 = load i32* @i, align 4
%1 = load i32* @j, align 4
%cmp = icmp eq i32 %0, %1
- %cond = select i1 %cmp, i32 1, i32 2
+ %cond = select i1 %cmp, i32 1, i32 3
store i32 %cond, i32* @k, align 4
ret void
; cond-b-short: bteqz $BB0_{{[0-9]+}} # 16 bit inst
diff --git a/test/CodeGen/Mips/sel2c.ll b/test/CodeGen/Mips/sel2c.ll
index 25dfaa9ba87e..4b211245f46e 100644
--- a/test/CodeGen/Mips/sel2c.ll
+++ b/test/CodeGen/Mips/sel2c.ll
@@ -10,7 +10,7 @@ entry:
%0 = load i32* @i, align 4
%1 = load i32* @j, align 4
%cmp = icmp ne i32 %0, %1
- %cond = select i1 %cmp, i32 1, i32 2
+ %cond = select i1 %cmp, i32 1, i32 3
store i32 %cond, i32* @k, align 4
; cond-b-short: btnez $BB0_{{[0-9]+}} # 16 bit inst
ret void
diff --git a/test/CodeGen/Mips/select.ll b/test/CodeGen/Mips/select.ll
index 06e2a86ad176..eb2198b36dff 100644
--- a/test/CodeGen/Mips/select.ll
+++ b/test/CodeGen/Mips/select.ll
@@ -1,135 +1,705 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32R2
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32R6
+; RUN: llc < %s -march=mips64el -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=64
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=64R2
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=64R6
@d2 = external global double
@d3 = external global double
-define i32 @sel1(i32 %s, i32 %f0, i32 %f1) nounwind readnone {
+define i32 @i32_icmp_ne_i32_val(i32 %s, i32 %f0, i32 %f1) nounwind readnone {
entry:
-; CHECK: movn
+; ALL-LABEL: i32_icmp_ne_i32_val:
+
+; 32: movn $5, $6, $4
+; 32: move $2, $5
+
+; 32R2: movn $5, $6, $4
+; 32R2: move $2, $5
+
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $5, $4
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $6, $4
+; 32R6: or $2, $[[T1]], $[[T0]]
+
+; 64: movn $5, $6, $4
+; 64: move $2, $5
+
+; 64R2: movn $5, $6, $4
+; 64R2: move $2, $5
+
+; 64R6-DAG: seleqz $[[T0:[0-9]+]], $5, $4
+; 64R6-DAG: selnez $[[T1:[0-9]+]], $6, $4
+; 64R6: or $2, $[[T1]], $[[T0]]
+
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, i32 %f1, i32 %f0
ret i32 %cond
}
-define float @sel2(i32 %s, float %f0, float %f1) nounwind readnone {
+define i64 @i32_icmp_ne_i64_val(i32 %s, i64 %f0, i64 %f1) nounwind readnone {
+entry:
+; ALL-LABEL: i32_icmp_ne_i64_val:
+
+; 32-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32-DAG: movn $6, $[[F1]], $4
+; 32-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32: movn $7, $[[F1H]], $4
+; 32: move $2, $6
+; 32: move $3, $7
+
+; 32R2-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32R2-DAG: movn $6, $[[F1]], $4
+; 32R2-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32R2: movn $7, $[[F1H]], $4
+; 32R2: move $2, $6
+; 32R2: move $3, $7
+
+; 32R6-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $6, $4
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $[[F1]], $4
+; 32R6: or $2, $[[T1]], $[[T0]]
+; 32R6-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $7, $4
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $[[F1H]], $4
+; 32R6: or $3, $[[T1]], $[[T0]]
+
+; 64: movn $5, $6, $4
+; 64: move $2, $5
+
+; 64R2: movn $5, $6, $4
+; 64R2: move $2, $5
+
+; FIXME: This sll works around an implementation detail in the code generator
+; (setcc's result is i32 so bits 32-63 are undefined). It's not really
+; needed.
+; 64R6-DAG: sll $[[CC:[0-9]+]], $4, 0
+; 64R6-DAG: seleqz $[[T0:[0-9]+]], $5, $[[CC]]
+; 64R6-DAG: selnez $[[T1:[0-9]+]], $6, $[[CC]]
+; 64R6: or $2, $[[T1]], $[[T0]]
+
+ %tobool = icmp ne i32 %s, 0
+ %cond = select i1 %tobool, i64 %f1, i64 %f0
+ ret i64 %cond
+}
+
+define i64 @i64_icmp_ne_i64_val(i64 %s, i64 %f0, i64 %f1) nounwind readnone {
entry:
-; CHECK: movn.s
+; ALL-LABEL: i64_icmp_ne_i64_val:
+
+; 32-DAG: or $[[CC:[0-9]+]], $4
+; 32-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32-DAG: movn $6, $[[F1]], $[[CC]]
+; 32-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32: movn $7, $[[F1H]], $[[CC]]
+; 32: move $2, $6
+; 32: move $3, $7
+
+; 32R2-DAG: or $[[CC:[0-9]+]], $4
+; 32R2-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32R2-DAG: movn $6, $[[F1]], $[[CC]]
+; 32R2-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32R2: movn $7, $[[F1H]], $[[CC]]
+; 32R2: move $2, $6
+; 32R2: move $3, $7
+
+; 32R6-DAG: lw $[[F1:[0-9]+]], 16($sp)
+; 32R6-DAG: or $[[T2:[0-9]+]], $4, $5
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $6, $[[T2]]
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $[[F1]], $[[T2]]
+; 32R6: or $2, $[[T1]], $[[T0]]
+; 32R6-DAG: lw $[[F1H:[0-9]+]], 20($sp)
+; 32R6-DAG: seleqz $[[T0:[0-9]+]], $7, $[[T2]]
+; 32R6-DAG: selnez $[[T1:[0-9]+]], $[[F1H]], $[[T2]]
+; 32R6: or $3, $[[T1]], $[[T0]]
+
+; 64: movn $5, $6, $4
+; 64: move $2, $5
+
+; 64R2: movn $5, $6, $4
+; 64R2: move $2, $5
+
+; 64R6-DAG: seleqz $[[T0:[0-9]+]], $5, $4
+; 64R6-DAG: selnez $[[T1:[0-9]+]], $6, $4
+; 64R6: or $2, $[[T1]], $[[T0]]
+
+ %tobool = icmp ne i64 %s, 0
+ %cond = select i1 %tobool, i64 %f1, i64 %f0
+ ret i64 %cond
+}
+
+define float @i32_icmp_ne_f32_val(i32 %s, float %f0, float %f1) nounwind readnone {
+entry:
+; ALL-LABEL: i32_icmp_ne_f32_val:
+
+; 32-DAG: mtc1 $5, $[[F0:f[0-9]+]]
+; 32-DAG: mtc1 $6, $[[F1:f0]]
+; 32: movn.s $[[F1]], $[[F0]], $4
+
+; 32R2-DAG: mtc1 $5, $[[F0:f[0-9]+]]
+; 32R2-DAG: mtc1 $6, $[[F1:f0]]
+; 32R2: movn.s $[[F1]], $[[F0]], $4
+
+; 32R6-DAG: mtc1 $5, $[[F0:f[0-9]+]]
+; 32R6-DAG: mtc1 $6, $[[F1:f[0-9]+]]
+; 32R6: sltu $[[T0:[0-9]+]], $zero, $4
+; 32R6: mtc1 $[[T0]], $[[CC:f0]]
+; 32R6: sel.s $[[CC]], $[[F1]], $[[F0]]
+
+; 64: movn.s $f14, $f13, $4
+; 64: mov.s $f0, $f14
+
+; 64R2: movn.s $f14, $f13, $4
+; 64R2: mov.s $f0, $f14
+
+; 64R6: sltu $[[T0:[0-9]+]], $zero, $4
+; 64R6: mtc1 $[[T0]], $[[CC:f0]]
+; 64R6: sel.s $[[CC]], $f14, $f13
+
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, float %f0, float %f1
ret float %cond
}
-define double @sel2_1(i32 %s, double %f0, double %f1) nounwind readnone {
+define double @i32_icmp_ne_f64_val(i32 %s, double %f0, double %f1) nounwind readnone {
entry:
-; CHECK: movn.d
+; ALL-LABEL: i32_icmp_ne_f64_val:
+
+; 32-DAG: mtc1 $6, $[[F0:f[1-3]*[02468]+]]
+; 32-DAG: mtc1 $7, $[[F0H:f[1-3]*[13579]+]]
+; 32-DAG: ldc1 $[[F1:f0]], 16($sp)
+; 32: movn.d $[[F1]], $[[F0]], $4
+
+; 32R2-DAG: mtc1 $6, $[[F0:f[0-9]+]]
+; 32R2-DAG: mthc1 $7, $[[F0]]
+; 32R2-DAG: ldc1 $[[F1:f0]], 16($sp)
+; 32R2: movn.d $[[F1]], $[[F0]], $4
+
+; 32R6-DAG: mtc1 $6, $[[F0:f[0-9]+]]
+; 32R6-DAG: mthc1 $7, $[[F0]]
+; 32R6-DAG: sltu $[[T0:[0-9]+]], $zero, $4
+; 32R6-DAG: mtc1 $[[T0]], $[[CC:f0]]
+; 32R6-DAG: ldc1 $[[F1:f[0-9]+]], 16($sp)
+; 32R6: sel.d $[[CC]], $[[F1]], $[[F0]]
+
+; 64: movn.d $f14, $f13, $4
+; 64: mov.d $f0, $f14
+
+; 64R2: movn.d $f14, $f13, $4
+; 64R2: mov.d $f0, $f14
+
+; 64R6-DAG: sltu $[[T0:[0-9]+]], $zero, $4
+; 64R6-DAG: mtc1 $[[T0]], $[[CC:f0]]
+; 64R6: sel.d $[[CC]], $f14, $f13
+
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, double %f0, double %f1
ret double %cond
}
-define float @sel3(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
+define float @f32_fcmp_oeq_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.eq.s
-; CHECK: movt.s
+; ALL-LABEL: f32_fcmp_oeq_f32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.eq.s $[[F2]], $[[F3]]
+; 32: movt.s $f14, $f12, $fcc0
+; 32: mov.s $f0, $f14
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.eq.s $[[F2]], $[[F3]]
+; 32R2: movt.s $f14, $f12, $fcc0
+; 32R2: mov.s $f0, $f14
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.eq.s $[[CC:f0]], $[[F2]], $[[F3]]
+; 32R6: sel.s $[[CC]], $f14, $f12
+
+; 64: c.eq.s $f14, $f15
+; 64: movt.s $f13, $f12, $fcc0
+; 64: mov.s $f0, $f13
+
+; 64R2: c.eq.s $f14, $f15
+; 64R2: movt.s $f13, $f12, $fcc0
+; 64R2: mov.s $f0, $f13
+
+; 64R6: cmp.eq.s $[[CC:f0]], $f14, $f15
+; 64R6: sel.s $[[CC]], $f13, $f12
+
%cmp = fcmp oeq float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
}
-define float @sel4(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
+define float @f32_fcmp_olt_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.olt.s
-; CHECK: movt.s
+; ALL-LABEL: f32_fcmp_olt_f32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.olt.s $[[F2]], $[[F3]]
+; 32: movt.s $f14, $f12, $fcc0
+; 32: mov.s $f0, $f14
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.olt.s $[[F2]], $[[F3]]
+; 32R2: movt.s $f14, $f12, $fcc0
+; 32R2: mov.s $f0, $f14
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.lt.s $[[CC:f0]], $[[F2]], $[[F3]]
+; 32R6: sel.s $[[CC]], $f14, $f12
+
+; 64: c.olt.s $f14, $f15
+; 64: movt.s $f13, $f12, $fcc0
+; 64: mov.s $f0, $f13
+
+; 64R2: c.olt.s $f14, $f15
+; 64R2: movt.s $f13, $f12, $fcc0
+; 64R2: mov.s $f0, $f13
+
+; 64R6: cmp.lt.s $[[CC:f0]], $f14, $f15
+; 64R6: sel.s $[[CC]], $f13, $f12
+
%cmp = fcmp olt float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
}
-define float @sel5(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
+define float @f32_fcmp_ogt_f32_val(float %f0, float %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.ule.s
-; CHECK: movf.s
+; ALL-LABEL: f32_fcmp_ogt_f32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.ule.s $[[F2]], $[[F3]]
+; 32: movf.s $f14, $f12, $fcc0
+; 32: mov.s $f0, $f14
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.ule.s $[[F2]], $[[F3]]
+; 32R2: movf.s $f14, $f12, $fcc0
+; 32R2: mov.s $f0, $f14
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.lt.s $[[CC:f0]], $[[F3]], $[[F2]]
+; 32R6: sel.s $[[CC]], $f14, $f12
+
+; 64: c.ule.s $f14, $f15
+; 64: movf.s $f13, $f12, $fcc0
+; 64: mov.s $f0, $f13
+
+; 64R2: c.ule.s $f14, $f15
+; 64R2: movf.s $f13, $f12, $fcc0
+; 64R2: mov.s $f0, $f13
+
+; 64R6: cmp.lt.s $[[CC:f0]], $f15, $f14
+; 64R6: sel.s $[[CC]], $f13, $f12
+
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
}
-define double @sel5_1(double %f0, double %f1, float %f2, float %f3) nounwind readnone {
+define double @f32_fcmp_ogt_f64_val(double %f0, double %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.ule.s
-; CHECK: movf.d
+; ALL-LABEL: f32_fcmp_ogt_f64_val:
+
+; 32-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp)
+; 32-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp)
+; 32: c.ule.s $[[F2]], $[[F3]]
+; 32: movf.d $f14, $f12, $fcc0
+; 32: mov.d $f0, $f14
+
+; 32R2-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R2-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp)
+; 32R2: c.ule.s $[[F2]], $[[F3]]
+; 32R2: movf.d $f14, $f12, $fcc0
+; 32R2: mov.d $f0, $f14
+
+; 32R6-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R6-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp)
+; 32R6: cmp.lt.s $[[CC:f0]], $[[F3]], $[[F2]]
+; 32R6: sel.d $[[CC]], $f14, $f12
+
+; 64: c.ule.s $f14, $f15
+; 64: movf.d $f13, $f12, $fcc0
+; 64: mov.d $f0, $f13
+
+; 64R2: c.ule.s $f14, $f15
+; 64R2: movf.d $f13, $f12, $fcc0
+; 64R2: mov.d $f0, $f13
+
+; 64R6: cmp.lt.s $[[CC:f0]], $f15, $f14
+; 64R6: sel.d $[[CC]], $f13, $f12
+
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
}
-define double @sel6(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
+define double @f64_fcmp_oeq_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK: c.eq.d
-; CHECK: movt.d
+; ALL-LABEL: f64_fcmp_oeq_f64_val:
+
+; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32: c.eq.d $[[F2]], $[[F3]]
+; 32: movt.d $f14, $f12, $fcc0
+; 32: mov.d $f0, $f14
+
+; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R2: c.eq.d $[[F2]], $[[F3]]
+; 32R2: movt.d $f14, $f12, $fcc0
+; 32R2: mov.d $f0, $f14
+
+; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R6: cmp.eq.d $[[CC:f0]], $[[F2]], $[[F3]]
+; 32R6: sel.d $[[CC]], $f14, $f12
+
+; 64: c.eq.d $f14, $f15
+; 64: movt.d $f13, $f12, $fcc0
+; 64: mov.d $f0, $f13
+
+; 64R2: c.eq.d $f14, $f15
+; 64R2: movt.d $f13, $f12, $fcc0
+; 64R2: mov.d $f0, $f13
+
+; 64R6: cmp.eq.d $[[CC:f0]], $f14, $f15
+; 64R6: sel.d $[[CC]], $f13, $f12
+
%cmp = fcmp oeq double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
}
-define double @sel7(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
+define double @f64_fcmp_olt_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK: c.olt.d
-; CHECK: movt.d
+; ALL-LABEL: f64_fcmp_olt_f64_val:
+
+; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32: c.olt.d $[[F2]], $[[F3]]
+; 32: movt.d $f14, $f12, $fcc0
+; 32: mov.d $f0, $f14
+
+; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R2: c.olt.d $[[F2]], $[[F3]]
+; 32R2: movt.d $f14, $f12, $fcc0
+; 32R2: mov.d $f0, $f14
+
+; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R6: cmp.lt.d $[[CC:f0]], $[[F2]], $[[F3]]
+; 32R6: sel.d $[[CC]], $f14, $f12
+
+; 64: c.olt.d $f14, $f15
+; 64: movt.d $f13, $f12, $fcc0
+; 64: mov.d $f0, $f13
+
+; 64R2: c.olt.d $f14, $f15
+; 64R2: movt.d $f13, $f12, $fcc0
+; 64R2: mov.d $f0, $f13
+
+; 64R6: cmp.lt.d $[[CC:f0]], $f14, $f15
+; 64R6: sel.d $[[CC]], $f13, $f12
+
%cmp = fcmp olt double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
}
-define double @sel8(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
+define double @f64_fcmp_ogt_f64_val(double %f0, double %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK: c.ule.d
-; CHECK: movf.d
+; ALL-LABEL: f64_fcmp_ogt_f64_val:
+
+; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32: c.ule.d $[[F2]], $[[F3]]
+; 32: movf.d $f14, $f12, $fcc0
+; 32: mov.d $f0, $f14
+
+; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R2: c.ule.d $[[F2]], $[[F3]]
+; 32R2: movf.d $f14, $f12, $fcc0
+; 32R2: mov.d $f0, $f14
+
+; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp)
+; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp)
+; 32R6: cmp.lt.d $[[CC:f0]], $[[F3]], $[[F2]]
+; 32R6: sel.d $[[CC]], $f14, $f12
+
+; 64: c.ule.d $f14, $f15
+; 64: movf.d $f13, $f12, $fcc0
+; 64: mov.d $f0, $f13
+
+; 64R2: c.ule.d $f14, $f15
+; 64R2: movf.d $f13, $f12, $fcc0
+; 64R2: mov.d $f0, $f13
+
+; 64R6: cmp.lt.d $[[CC:f0]], $f15, $f14
+; 64R6: sel.d $[[CC]], $f13, $f12
+
%cmp = fcmp ogt double %f2, %f3
%cond = select i1 %cmp, double %f0, double %f1
ret double %cond
}
-define float @sel8_1(float %f0, float %f1, double %f2, double %f3) nounwind readnone {
+define float @f64_fcmp_ogt_f32_val(float %f0, float %f1, double %f2, double %f3) nounwind readnone {
entry:
-; CHECK: c.ule.d
-; CHECK: movf.s
+; ALL-LABEL: f64_fcmp_ogt_f32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[1-3]*[02468]+]]
+; 32-DAG: mtc1 $7, $[[F2H:f[1-3]*[13579]+]]
+; 32-DAG: ldc1 $[[F3:f[0-9]+]], 16($sp)
+; 32: c.ule.d $[[F2]], $[[F3]]
+; 32: movf.s $f14, $f12, $fcc0
+; 32: mov.s $f0, $f14
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mthc1 $7, $[[F2]]
+; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 16($sp)
+; 32R2: c.ule.d $[[F2]], $[[F3]]
+; 32R2: movf.s $f14, $f12, $fcc0
+; 32R2: mov.s $f0, $f14
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mthc1 $7, $[[F2]]
+; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 16($sp)
+; 32R6: cmp.lt.d $[[CC:f0]], $[[F3]], $[[F2]]
+; 32R6: sel.s $[[CC]], $f14, $f12
+
+; 64: c.ule.d $f14, $f15
+; 64: movf.s $f13, $f12, $fcc0
+; 64: mov.s $f0, $f13
+
+; 64R2: c.ule.d $f14, $f15
+; 64R2: movf.s $f13, $f12, $fcc0
+; 64R2: mov.s $f0, $f13
+
+; 64R6: cmp.lt.d $[[CC:f0]], $f15, $f14
+; 64R6: sel.s $[[CC]], $f13, $f12
+
%cmp = fcmp ogt double %f2, %f3
%cond = select i1 %cmp, float %f0, float %f1
ret float %cond
}
-define i32 @sel9(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
+define i32 @f32_fcmp_oeq_i32_val(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.eq.s
-; CHECK: movt
+; ALL-LABEL: f32_fcmp_oeq_i32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.eq.s $[[F2]], $[[F3]]
+; 32: movt $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.eq.s $[[F2]], $[[F3]]
+; 32R2: movt $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.eq.s $[[CC:f[0-9]+]], $[[F2]], $[[F3]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64: c.eq.s $f14, $f15
+; 64: movt $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2: c.eq.s $f14, $f15
+; 64R2: movt $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6: cmp.eq.s $[[CC:f[0-9]+]], $f14, $f15
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%cmp = fcmp oeq float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
}
-define i32 @sel10(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
+define i32 @f32_fcmp_olt_i32_val(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.olt.s
-; CHECK: movt
+; ALL-LABEL: f32_fcmp_olt_i32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.olt.s $[[F2]], $[[F3]]
+; 32: movt $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.olt.s $[[F2]], $[[F3]]
+; 32R2: movt $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.lt.s $[[CC:f[0-9]+]], $[[F2]], $[[F3]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64: c.olt.s $f14, $f15
+; 64: movt $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2: c.olt.s $f14, $f15
+; 64R2: movt $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6: cmp.lt.s $[[CC:f[0-9]+]], $f14, $f15
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
%cmp = fcmp olt float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
}
-define i32 @sel11(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
+define i32 @f32_fcmp_ogt_i32_val(i32 %f0, i32 %f1, float %f2, float %f3) nounwind readnone {
entry:
-; CHECK: c.ule.s
-; CHECK: movf
+; ALL-LABEL: f32_fcmp_ogt_i32_val:
+
+; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32: c.ule.s $[[F2]], $[[F3]]
+; 32: movf $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R2: c.ule.s $[[F2]], $[[F3]]
+; 32R2: movf $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]]
+; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]]
+; 32R6: cmp.lt.s $[[CC:f[0-9]+]], $[[F3]], $[[F2]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64: c.ule.s $f14, $f15
+; 64: movf $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2: c.ule.s $f14, $f15
+; 64R2: movf $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6: cmp.lt.s $[[CC:f[0-9]+]], $f15, $f14
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%cmp = fcmp ogt float %f2, %f3
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
}
-define i32 @sel12(i32 %f0, i32 %f1) nounwind readonly {
+define i32 @f64_fcmp_oeq_i32_val(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK: c.eq.d
-; CHECK: movt
+; ALL-LABEL: f64_fcmp_oeq_i32_val:
+
+; 32-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32: c.eq.d $[[TMP]], $[[TMP1]]
+; 32: movt $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R2-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R2-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R2-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R2: c.eq.d $[[TMP]], $[[TMP1]]
+; 32R2: movt $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R6-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R6-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R6-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R6: cmp.eq.d $[[CC:f[0-9]+]], $[[TMP]], $[[TMP1]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_oeq_i32_val)))
+; 64-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64: c.eq.d $[[TMP]], $[[TMP1]]
+; 64: movt $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_oeq_i32_val)))
+; 64R2-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R2-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R2-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R2: c.eq.d $[[TMP]], $[[TMP1]]
+; 64R2: movt $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_oeq_i32_val)))
+; 64R6-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R6-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R6-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R6: cmp.eq.d $[[CC:f[0-9]+]], $[[TMP]], $[[TMP1]]
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%tmp = load double* @d2, align 8
%tmp1 = load double* @d3, align 8
%cmp = fcmp oeq double %tmp, %tmp1
@@ -137,10 +707,76 @@ entry:
ret i32 %cond
}
-define i32 @sel13(i32 %f0, i32 %f1) nounwind readonly {
+define i32 @f64_fcmp_olt_i32_val(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK: c.olt.d
-; CHECK: movt
+; ALL-LABEL: f64_fcmp_olt_i32_val:
+
+; 32-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32: c.olt.d $[[TMP]], $[[TMP1]]
+; 32: movt $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R2-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R2-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R2-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R2: c.olt.d $[[TMP]], $[[TMP1]]
+; 32R2: movt $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R6-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R6-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R6-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R6: cmp.lt.d $[[CC:f[0-9]+]], $[[TMP]], $[[TMP1]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_olt_i32_val)))
+; 64-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64: c.olt.d $[[TMP]], $[[TMP1]]
+; 64: movt $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_olt_i32_val)))
+; 64R2-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R2-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R2-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R2: c.olt.d $[[TMP]], $[[TMP1]]
+; 64R2: movt $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_olt_i32_val)))
+; 64R6-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R6-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R6-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R6: cmp.lt.d $[[CC:f[0-9]+]], $[[TMP]], $[[TMP1]]
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%tmp = load double* @d2, align 8
%tmp1 = load double* @d3, align 8
%cmp = fcmp olt double %tmp, %tmp1
@@ -148,10 +784,76 @@ entry:
ret i32 %cond
}
-define i32 @sel14(i32 %f0, i32 %f1) nounwind readonly {
+define i32 @f64_fcmp_ogt_i32_val(i32 %f0, i32 %f1) nounwind readonly {
entry:
-; CHECK: c.ule.d
-; CHECK: movf
+; ALL-LABEL: f64_fcmp_ogt_i32_val:
+
+; 32-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32: c.ule.d $[[TMP]], $[[TMP1]]
+; 32: movf $5, $4, $fcc0
+; 32: move $2, $5
+
+; 32R2-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R2-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R2-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R2-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R2: c.ule.d $[[TMP]], $[[TMP1]]
+; 32R2: movf $5, $4, $fcc0
+; 32R2: move $2, $5
+
+; 32R6-DAG: addiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(_gp_disp)
+; 32R6-DAG: addu $[[GOT:[0-9]+]], $[[T0]], $25
+; 32R6-DAG: lw $[[D2:[0-9]+]], %got(d2)($1)
+; 32R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 32R6-DAG: lw $[[D3:[0-9]+]], %got(d3)($1)
+; 32R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 32R6: cmp.lt.d $[[CC:f[0-9]+]], $[[TMP1]], $[[TMP]]
+; 32R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 32R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 32R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 32R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 32R6: or $2, $[[NE]], $[[EQ]]
+
+; 64-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_ogt_i32_val)))
+; 64-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64: c.ule.d $[[TMP]], $[[TMP1]]
+; 64: movf $5, $4, $fcc0
+; 64: move $2, $5
+
+; 64R2-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_ogt_i32_val)))
+; 64R2-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R2-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R2-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R2-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R2-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R2: c.ule.d $[[TMP]], $[[TMP1]]
+; 64R2: movf $5, $4, $fcc0
+; 64R2: move $2, $5
+
+; 64R6-DAG: daddiu $[[T0:[0-9]+]], ${{[0-9]+}}, %lo(%neg(%gp_rel(f64_fcmp_ogt_i32_val)))
+; 64R6-DAG: daddu $[[GOT:[0-9]+]], $[[T0]], $25
+; 64R6-DAG: ld $[[D2:[0-9]+]], %got_disp(d2)($1)
+; 64R6-DAG: ldc1 $[[TMP:f[0-9]+]], 0($[[D2]])
+; 64R6-DAG: ld $[[D3:[0-9]+]], %got_disp(d3)($1)
+; 64R6-DAG: ldc1 $[[TMP1:f[0-9]+]], 0($[[D3]])
+; 64R6: cmp.lt.d $[[CC:f[0-9]+]], $[[TMP1]], $[[TMP]]
+; 64R6: mfc1 $[[CCGPR:[0-9]+]], $[[CC]]
+; 64R6: andi $[[CCGPR]], $[[CCGPR]], 1
+; 64R6: seleqz $[[EQ:[0-9]+]], $5, $[[CCGPR]]
+; 64R6: selnez $[[NE:[0-9]+]], $4, $[[CCGPR]]
+; 64R6: or $2, $[[NE]], $[[EQ]]
+
%tmp = load double* @d2, align 8
%tmp1 = load double* @d3, align 8
%cmp = fcmp ogt double %tmp, %tmp1
diff --git a/test/CodeGen/Mips/selectcc.ll b/test/CodeGen/Mips/selectcc.ll
index aeef60ecb806..9790a0a3e411 100644
--- a/test/CodeGen/Mips/selectcc.ll
+++ b/test/CodeGen/Mips/selectcc.ll
@@ -1,5 +1,7 @@
-; RUN: llc -march=mipsel < %s
-; RUN: llc -march=mipsel -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
+; RUN: llc -march=mipsel -mcpu=mips32 < %s
+; RUN: llc -march=mipsel -mcpu=mips32 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
+; RUN: llc -march=mipsel -mcpu=mips32r2 < %s
+; RUN: llc -march=mipsel -mcpu=mips32r2 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
@gf0 = external global float
@gf1 = external global float
@@ -16,13 +18,11 @@ entry:
; SOURCE-SCHED: lw
; SOURCE-SCHED: lui
; SOURCE-SCHED: sw
-; SOURCE-SCHED: addiu
-; SOURCE-SCHED: addiu
-; SOURCE-SCHED: c.olt.s
-; SOURCE-SCHED: movt
+; SOURCE-SCHED: lw
+; SOURCE-SCHED: lwc1
; SOURCE-SCHED: mtc1
+; SOURCE-SCHED: c.olt.s
; SOURCE-SCHED: jr
-
store float 0.000000e+00, float* @gf0, align 4
store float 1.000000e+00, float* @gf1, align 4
%cmp = fcmp olt float %a, %b
diff --git a/test/CodeGen/Mips/sint-fp-store_pattern.ll b/test/CodeGen/Mips/sint-fp-store_pattern.ll
index c44ea080a886..2735d787432d 100644
--- a/test/CodeGen/Mips/sint-fp-store_pattern.ll
+++ b/test/CodeGen/Mips/sint-fp-store_pattern.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
+; RUN: llc -march=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=64
; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
@gint_ = external global i32
diff --git a/test/CodeGen/Mips/sr1.ll b/test/CodeGen/Mips/sr1.ll
new file mode 100644
index 000000000000..610693d58b3f
--- /dev/null
+++ b/test/CodeGen/Mips/sr1.ll
@@ -0,0 +1,60 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s
+
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=NEG
+
+@f = common global float 0.000000e+00, align 4
+
+; Function Attrs: nounwind
+define void @foo1() #0 {
+entry:
+ %c = alloca [10 x i8], align 1
+ %arraydecay = getelementptr inbounds [10 x i8]* %c, i32 0, i32 0
+ call void @x(i8* %arraydecay)
+ %arraydecay1 = getelementptr inbounds [10 x i8]* %c, i32 0, i32 0
+ call void @x(i8* %arraydecay1)
+ ret void
+; CHECK: .ent foo1
+; CHECK: save $16, $17, $ra, [[FS:[0-9]+]] # 16 bit inst
+; CHECK: restore $16, $17, $ra, [[FS]] # 16 bit inst
+; CHECK: .end foo1
+}
+
+declare void @x(i8*) #1
+
+; Function Attrs: nounwind
+define void @foo2() #0 {
+entry:
+ %c = alloca [150 x i8], align 1
+ %arraydecay = getelementptr inbounds [150 x i8]* %c, i32 0, i32 0
+ call void @x(i8* %arraydecay)
+ %arraydecay1 = getelementptr inbounds [150 x i8]* %c, i32 0, i32 0
+ call void @x(i8* %arraydecay1)
+ ret void
+; CHECK: .ent foo2
+; CHECK: save $16, $17, $ra, [[FS:[0-9]+]]
+; CHECK: restore $16, $17, $ra, [[FS]]
+; CHECK: .end foo2
+}
+
+; Function Attrs: nounwind
+define void @foo3() #0 {
+entry:
+ %call = call float @xf()
+ store float %call, float* @f, align 4
+ ret void
+; CHECK: .ent foo3
+; CHECK: save $16, $17, $ra, $18, [[FS:[0-9]+]]
+; CHECK: restore $16, $17, $ra, $18, [[FS]]
+; CHECK: .end foo3
+; NEG: .ent foo3
+; NEG-NOT: save $16, $17, $ra, $18, [[FS:[0-9]+]] # 16 bit inst
+; NEG-NOT: restore $16, $17, $ra, $18, [[FS]] # 16 bit inst
+; NEG: .end foo3
+}
+
+declare float @xf() #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+
diff --git a/test/CodeGen/Mips/start-asm-file.ll b/test/CodeGen/Mips/start-asm-file.ll
new file mode 100644
index 000000000000..88724643166c
--- /dev/null
+++ b/test/CodeGen/Mips/start-asm-file.ll
@@ -0,0 +1,91 @@
+; Check the emission of directives at the start of an asm file.
+; This test is XFAILED until we fix the emission of '.option pic0' on
+; N32. At the moment we check if subtarget is Mips64 when we should be
+; checking the Subtarget's ABI.
+
+; ### O32 ABI ###
+; RUN: llc -filetype=asm -mtriple mips-unknown-linux -mcpu=mips32 \
+; RUN: -relocation-model=static %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-STATIC-O32 -check-prefix=CHECK-STATIC-O32-NLEGACY %s
+
+; RUN: llc -filetype=asm -mtriple mips-unknown-linux -mcpu=mips32 \
+; RUN: -relocation-model=pic %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-PIC-O32 -check-prefix=CHECK-PIC-O32-NLEGACY %s
+
+; RUN: llc -filetype=asm -mtriple mips-unknown-linux -mcpu=mips32 \
+; RUN: -relocation-model=static -mattr=+nan2008 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-STATIC-O32 -check-prefix=CHECK-STATIC-O32-N2008 %s
+
+; RUN: llc -filetype=asm -mtriple mips-unknown-linux -mcpu=mips32 \
+; RUN: -relocation-model=pic -mattr=+nan2008 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-PIC-O32 -check-prefix=CHECK-PIC-O32-N2008 %s
+
+; ### N32 ABI ###
+; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
+; RUN: -relocation-model=static -mattr=-n64,+n32 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-STATIC-N32 -check-prefix=CHECK-STATIC-N32-NLEGACY %s
+
+; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
+; RUN: -relocation-model=pic -mattr=-n64,+n32 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-PIC-N32 -check-prefix=CHECK-PIC-N32-NLEGACY %s
+
+; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
+; RUN: -relocation-model=static -mattr=-n64,+n32,+nan2008 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-STATIC-N32 -check-prefix=CHECK-STATIC-N32-N2008 %s
+
+; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
+; RUN: -relocation-model=pic -mattr=-n64,+n32,+nan2008 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-PIC-N32 -check-prefix=CHECK-PIC-N32-N2008 %s
+
+; ### N64 ABI ###
+; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
+; RUN: -relocation-model=static -mattr=+n64 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-STATIC-N64 -check-prefix=CHECK-STATIC-N64-NLEGACY %s
+
+; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
+; RUN: -relocation-model=pic -mattr=+n64 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-PIC-N64 -check-prefix=CHECK-PIC-N64-NLEGACY %s
+
+; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
+; RUN: -relocation-model=static -mattr=+n64,+nan2008 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-STATIC-N64 -check-prefix=CHECK-STATIC-N64-N2008 %s
+
+; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
+; RUN: -relocation-model=pic -mattr=+n64,+nan2008 %s -o - | \
+; RUN: FileCheck -check-prefix=CHECK-PIC-N64 -check-prefix=CHECK-PIC-N64-N2008 %s
+
+; CHECK-STATIC-O32: .abicalls
+; CHECK-STATIC-O32: .option pic0
+; CHECK-STATIC-O32: .section .mdebug.abi32
+; CHECK-STATIC-O32-NLEGACY: .nan legacy
+; CHECK-STATIC-O32-N2008: .nan 2008
+
+; CHECK-PIC-O32: .abicalls
+; CHECK-PIC-O32-NOT: .option pic0
+; CHECK-PIC-O32: .section .mdebug.abi32
+; CHECK-PIC-O32-NLEGACY: .nan legacy
+; CHECK-PIC-O32-N2008: .nan 2008
+
+; CHECK-STATIC-N32: .abicalls
+; CHECK-STATIC-N32: .option pic0
+; CHECK-STATIC-N32: .section .mdebug.abiN32
+; CHECK-STATIC-N32-NLEGACY: .nan legacy
+; CHECK-STATIC-N32-N2008: .nan 2008
+
+; CHECK-PIC-N32: .abicalls
+; CHECK-PIC-N32-NOT: .option pic0
+; CHECK-PIC-N32: .section .mdebug.abiN32
+; CHECK-PIC-N32-NLEGACY: .nan legacy
+; CHECK-PIC-N32-N2008: .nan 2008
+
+; CHECK-STATIC-N64: .abicalls
+; CHECK-STATIC-N64-NOT: .option pic0
+; CHECK-STATIC-N64: .section .mdebug.abi64
+; CHECK-STATIC-N64-NLEGACY: .nan legacy
+; CHECK-STATIC-N64-N2008: .nan 2008
+
+; CHECK-PIC-N64: .abicalls
+; CHECK-PIC-N64-NOT: .option pic0
+; CHECK-PIC-N64: .section .mdebug.abi64
+; CHECK-PIC-N64-NLEGACY: .nan legacy
+; CHECK-PIC-N64-N2008: .nan 2008
diff --git a/test/CodeGen/Mips/tail16.ll b/test/CodeGen/Mips/tail16.ll
new file mode 100644
index 000000000000..4e62e557478a
--- /dev/null
+++ b/test/CodeGen/Mips/tail16.ll
@@ -0,0 +1,20 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s
+
+; Function Attrs: nounwind optsize
+define float @h() {
+entry:
+ %call = tail call float bitcast (float (...)* @g to float ()*)()
+ ret float %call
+; CHECK: .ent h
+; CHECK: save $16, $ra, $18, 32
+; CHECK: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_0)(${{[0-9]+}})
+; CHECK: restore $16, $ra, $18, 32
+; CHECK: .end h
+}
+
+; Function Attrs: optsize
+declare float @g(...)
+
+
+
+
diff --git a/test/CodeGen/Mips/tls-alias.ll b/test/CodeGen/Mips/tls-alias.ll
index 3c810542cca3..b61f84e03761 100644
--- a/test/CodeGen/Mips/tls-alias.ll
+++ b/test/CodeGen/Mips/tls-alias.ll
@@ -1,10 +1,10 @@
; RUN: llc -march=mipsel -relocation-model=pic -disable-mips-delay-filler < %s | FileCheck %s
@foo = thread_local global i32 42
-@bar = hidden alias i32* @foo
+@bar = hidden thread_local alias i32* @foo
define i32* @zed() {
; CHECK-DAG: __tls_get_addr
-; CHECK-DAG: %tlsgd(bar)
+; CHECK-DAG: %tlsldm(bar)
ret i32* @bar
}
diff --git a/test/CodeGen/Mips/tls.ll b/test/CodeGen/Mips/tls.ll
index 23a8f93a9d7c..b14ad5ba452b 100644
--- a/test/CodeGen/Mips/tls.ll
+++ b/test/CodeGen/Mips/tls.ll
@@ -1,10 +1,10 @@
; RUN: llc -march=mipsel -disable-mips-delay-filler < %s | \
-; RUN: FileCheck %s -check-prefix=PIC
+; RUN: FileCheck %s -check-prefix=PIC -check-prefix=CHECK
; RUN: llc -march=mipsel -relocation-model=static -disable-mips-delay-filler < \
-; RUN: %s | FileCheck %s -check-prefix=STATIC
+; RUN: %s | FileCheck %s -check-prefix=STATIC -check-prefix=CHECK
; RUN: llc -march=mipsel -relocation-model=static -disable-mips-delay-filler \
; RUN: -mips-fix-global-base-reg=false < %s | \
-; RUN: FileCheck %s -check-prefix=STATICGP
+; RUN: FileCheck %s -check-prefix=STATICGP -check-prefix=CHECK
@t1 = thread_local global i32 0, align 4
diff --git a/test/CodeGen/Mips/trap1.ll b/test/CodeGen/Mips/trap1.ll
index bfcd7fed30d9..90755130e7c2 100644
--- a/test/CodeGen/Mips/trap1.ll
+++ b/test/CodeGen/Mips/trap1.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=pic < %s | FileCheck %s -check-prefix=pic
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=pic
declare void @llvm.trap()
diff --git a/test/CodeGen/Mips/unalignedload.ll b/test/CodeGen/Mips/unalignedload.ll
index 19f3af7f344a..2002b1c60abe 100644
--- a/test/CodeGen/Mips/unalignedload.ll
+++ b/test/CodeGen/Mips/unalignedload.ll
@@ -1,5 +1,9 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s -check-prefix=CHECK-EL
-; RUN: llc < %s -march=mips | FileCheck %s -check-prefix=CHECK-EB
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=ALL-EL -check-prefix=MIPS32-EL
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=ALL-EB -check-prefix=MIPS32-EB
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=ALL-EL -check-prefix=MIPS32-EL
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=ALL-EB -check-prefix=MIPS32-EB
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=ALL-EL -check-prefix=MIPS32R6-EL
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=ALL-EB -check-prefix=MIPS32R6-EB
%struct.S2 = type { %struct.S1, %struct.S1 }
%struct.S1 = type { i8, i8 }
%struct.S4 = type { [7 x i8] }
@@ -7,21 +11,71 @@
@s2 = common global %struct.S2 zeroinitializer, align 1
@s4 = common global %struct.S4 zeroinitializer, align 1
-define void @foo1() nounwind {
+define void @bar1() nounwind {
entry:
-; CHECK-EL-DAG: lbu ${{[0-9]+}}, 2($[[R0:[0-9]+]])
-; CHECK-EL-DAG: lbu ${{[0-9]+}}, 3($[[R0]])
-; CHECK-EL: jalr
-; CHECK-EL-DAG: lwl $[[R1:[0-9]+]], 3($[[R2:[0-9]+]])
-; CHECK-EL-DAG: lwr $[[R1]], 0($[[R2]])
-
-; CHECK-EB-DAG: lbu ${{[0-9]+}}, 3($[[R0:[0-9]+]])
-; CHECK-EB-DAG: lbu ${{[0-9]+}}, 2($[[R0]])
-; CHECK-EB: jalr
-; CHECK-EB-DAG: lwl $[[R1:[0-9]+]], 0($[[R2:[0-9]+]])
-; CHECK-EB-DAG: lwr $[[R1]], 3($[[R2]])
+; ALL-LABEL: bar1:
+
+; ALL-DAG: lw $[[R0:[0-9]+]], %got(s2)(
+
+; MIPS32-EL-DAG: lbu $[[PART1:[0-9]+]], 2($[[R0]])
+; MIPS32-EL-DAG: lbu $[[PART2:[0-9]+]], 3($[[R0]])
+; MIPS32-EL-DAG: sll $[[T0:[0-9]+]], $[[PART2]], 8
+; MIPS32-EL-DAG: or $4, $[[T0]], $[[PART1]]
+
+; MIPS32-EB-DAG: lbu $[[PART1:[0-9]+]], 2($[[R0]])
+; MIPS32-EB-DAG: lbu $[[PART2:[0-9]+]], 3($[[R0]])
+; MIPS32-EB-DAG: sll $[[T0:[0-9]+]], $[[PART1]], 8
+; MIPS32-EB-DAG: or $[[T1:[0-9]+]], $[[T0]], $[[PART2]]
+; MIPS32-EB-DAG: sll $4, $[[T1]], 16
+
+; MIPS32R6-DAG: lhu $[[PART1:[0-9]+]], 2($[[R0]])
tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2* @s2, i32 0, i32 1)) nounwind
+ ret void
+}
+
+define void @bar2() nounwind {
+entry:
+; ALL-LABEL: bar2:
+
+; ALL-DAG: lw $[[R2:[0-9]+]], %got(s4)(
+
+; MIPS32-EL-DAG: lwl $[[R1:4]], 3($[[R2]])
+; MIPS32-EL-DAG: lwr $[[R1]], 0($[[R2]])
+; MIPS32-EL-DAG: lbu $[[T0:[0-9]+]], 4($[[R2]])
+; MIPS32-EL-DAG: lbu $[[T1:[0-9]+]], 5($[[R2]])
+; MIPS32-EL-DAG: lbu $[[T2:[0-9]+]], 6($[[R2]])
+; MIPS32-EL-DAG: sll $[[T3:[0-9]+]], $[[T1]], 8
+; MIPS32-EL-DAG: or $[[T4:[0-9]+]], $[[T3]], $[[T0]]
+; MIPS32-EL-DAG: sll $[[T5:[0-9]+]], $[[T2]], 16
+; MIPS32-EL-DAG: or $5, $[[T4]], $[[T5]]
+
+; MIPS32-EB-DAG: lwl $[[R1:4]], 0($[[R2]])
+; MIPS32-EB-DAG: lwr $[[R1]], 3($[[R2]])
+; MIPS32-EB-DAG: lbu $[[T0:[0-9]+]], 4($[[R2]])
+; MIPS32-EB-DAG: lbu $[[T1:[0-9]+]], 5($[[R2]])
+; MIPS32-EB-DAG: lbu $[[T2:[0-9]+]], 6($[[R2]])
+; MIPS32-EB-DAG: sll $[[T3:[0-9]+]], $[[T0]], 8
+; MIPS32-EB-DAG: or $[[T4:[0-9]+]], $[[T3]], $[[T1]]
+; MIPS32-EB-DAG: sll $[[T5:[0-9]+]], $[[T4]], 16
+; MIPS32-EB-DAG: sll $[[T6:[0-9]+]], $[[T2]], 8
+; MIPS32-EB-DAG: or $5, $[[T5]], $[[T6]]
+
+; FIXME: We should be able to do better than this using lhu
+; MIPS32R6-EL-DAG: lw $4, 0($[[R2]])
+; MIPS32R6-EL-DAG: lhu $[[T0:[0-9]+]], 4($[[R2]])
+; MIPS32R6-EL-DAG: lbu $[[T1:[0-9]+]], 6($[[R2]])
+; MIPS32R6-EL-DAG: sll $[[T2:[0-9]+]], $[[T1]], 16
+; MIPS32R6-EL-DAG: or $5, $[[T0]], $[[T2]]
+
+; FIXME: We should be able to do better than this using lhu
+; MIPS32R6-EB-DAG: lw $4, 0($[[R2]])
+; MIPS32R6-EB-DAG: lhu $[[T0:[0-9]+]], 4($[[R2]])
+; MIPS32R6-EB-DAG: lbu $[[T1:[0-9]+]], 6($[[R2]])
+; MIPS32R6-EB-DAG: sll $[[T2:[0-9]+]], $[[T0]], 16
+; MIPS32R6-EB-DAG: sll $[[T3:[0-9]+]], $[[T1]], 8
+; MIPS32R6-EB-DAG: or $5, $[[T2]], $[[T3]]
+
tail call void @foo4(%struct.S4* byval @s4) nounwind
ret void
}
diff --git a/test/CodeGen/Mips/zeroreg.ll b/test/CodeGen/Mips/zeroreg.ll
index e0e93e2e7682..a1b6cb0322b1 100644
--- a/test/CodeGen/Mips/zeroreg.ll
+++ b/test/CodeGen/Mips/zeroreg.ll
@@ -1,21 +1,109 @@
-; RUN: llc < %s -march=mipsel | FileCheck %s
+; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32R6
+; RUN: llc < %s -march=mipsel -mcpu=mips4 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL -check-prefix=64-CMOV
+; RUN: llc < %s -march=mipsel -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL -check-prefix=64R6
@g1 = external global i32
-define i32 @foo0(i32 %s) nounwind readonly {
+define i32 @sel_icmp_nez_i32_z0(i32 %s) nounwind readonly {
entry:
-; CHECK: movn ${{[0-9]+}}, $zero
+; ALL-LABEL: sel_icmp_nez_i32_z0:
+
+; 32-CMOV: lw $2, 0(${{[0-9]+}})
+; 32-CMOV: movn $2, $zero, $4
+
+; 32R6: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6: seleqz $2, $[[R0]], $4
+
+; 64-CMOV: lw $2, 0(${{[0-9]+}})
+; 64-CMOV: movn $2, $zero, $4
+
+; 64R6: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 64R6: seleqz $2, $[[R0]], $4
+
%tobool = icmp ne i32 %s, 0
%0 = load i32* @g1, align 4
%cond = select i1 %tobool, i32 0, i32 %0
ret i32 %cond
}
-define i32 @foo1(i32 %s) nounwind readonly {
+define i32 @sel_icmp_nez_i32_z1(i32 %s) nounwind readonly {
entry:
-; CHECK: movz ${{[0-9]+}}, $zero
+; ALL-LABEL: sel_icmp_nez_i32_z1:
+
+; 32-CMOV: lw $2, 0(${{[0-9]+}})
+; 32-CMOV: movz $2, $zero, $4
+
+; 32R6: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6: selnez $2, $[[R0]], $4
+
+; 64-CMOV: lw $2, 0(${{[0-9]+}})
+; 64-CMOV: movz $2, $zero, $4
+
+; 64R6: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 64R6: selnez $2, $[[R0]], $4
+
%tobool = icmp ne i32 %s, 0
%0 = load i32* @g1, align 4
%cond = select i1 %tobool, i32 %0, i32 0
ret i32 %cond
}
+
+@g2 = external global i64
+
+define i64 @sel_icmp_nez_i64_z0(i64 %s) nounwind readonly {
+entry:
+; ALL-LABEL: sel_icmp_nez_i64_z0:
+
+; 32-CMOV-DAG: lw $[[R0:2]], 0(${{[0-9]+}})
+; 32-CMOV-DAG: lw $[[R1:3]], 4(${{[0-9]+}})
+; 32-CMOV-DAG: movn $[[R0]], $zero, $4
+; 32-CMOV-DAG: movn $[[R1]], $zero, $4
+
+; 32R6-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-DAG: or $[[CC:[0-9]+]], $4, $5
+; 32R6-DAG: seleqz $2, $[[R0]], $[[CC]]
+; 32R6-DAG: seleqz $3, $[[R1]], $[[CC]]
+
+; 64-CMOV: ld $2, 0(${{[0-9]+}})
+; 64-CMOV: movn $2, $zero, $4
+
+; 64R6: ld $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 64R6: seleqz $2, $[[R0]], $4
+
+ %tobool = icmp ne i64 %s, 0
+ %0 = load i64* @g2, align 4
+ %cond = select i1 %tobool, i64 0, i64 %0
+ ret i64 %cond
+}
+
+define i64 @sel_icmp_nez_i64_z1(i64 %s) nounwind readonly {
+entry:
+; ALL-LABEL: sel_icmp_nez_i64_z1:
+
+; 32-CMOV-DAG: lw $[[R0:2]], 0(${{[0-9]+}})
+; 32-CMOV-DAG: lw $[[R1:3]], 4(${{[0-9]+}})
+; 32-CMOV-DAG: movz $[[R0]], $zero, $4
+; 32-CMOV-DAG: movz $[[R1]], $zero, $4
+
+; 32R6-DAG: lw $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 32R6-DAG: lw $[[R1:[0-9]+]], 4(${{[0-9]+}})
+; 32R6-DAG: or $[[CC:[0-9]+]], $4, $5
+; 32R6-DAG: selnez $2, $[[R0]], $[[CC]]
+; 32R6-DAG: selnez $3, $[[R1]], $[[CC]]
+
+; 64-CMOV: ld $2, 0(${{[0-9]+}})
+; 64-CMOV: movz $2, $zero, $4
+
+; 64R6: ld $[[R0:[0-9]+]], 0(${{[0-9]+}})
+; 64R6: selnez $2, $[[R0]], $4
+
+ %tobool = icmp ne i64 %s, 0
+ %0 = load i64* @g2, align 4
+ %cond = select i1 %tobool, i64 %0, i64 0
+ ret i64 %cond
+}
diff --git a/test/CodeGen/NVPTX/access-non-generic.ll b/test/CodeGen/NVPTX/access-non-generic.ll
new file mode 100644
index 000000000000..c225abf0fd85
--- /dev/null
+++ b/test/CodeGen/NVPTX/access-non-generic.ll
@@ -0,0 +1,91 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX
+; RUN: opt < %s -S -nvptx-favor-non-generic -dce | FileCheck %s --check-prefix IR
+
+@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
+@scalar = internal addrspace(3) global float 0.000000e+00, align 4
+
+; Verifies nvptx-favor-non-generic correctly optimizes generic address space
+; usage to non-generic address space usage for the patterns we claim to handle:
+; 1. load cast
+; 2. store cast
+; 3. load gep cast
+; 4. store gep cast
+; gep and cast can be an instruction or a constant expression. This function
+; tries all possible combinations.
+define float @ld_st_shared_f32(i32 %i, float %v) {
+; IR-LABEL: @ld_st_shared_f32
+; IR-NOT: addrspacecast
+; PTX-LABEL: ld_st_shared_f32(
+ ; load cast
+ %1 = load float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
+; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
+ ; store cast
+ store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
+; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
+ ; use syncthreads to disable optimizations across components
+ call void @llvm.cuda.syncthreads()
+; PTX: bar.sync 0;
+
+ ; cast; load
+ %2 = addrspacecast float addrspace(3)* @scalar to float*
+ %3 = load float* %2, align 4
+; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
+ ; cast; store
+ store float %v, float* %2, align 4
+; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
+ call void @llvm.cuda.syncthreads()
+; PTX: bar.sync 0;
+
+ ; load gep cast
+ %4 = load float* getelementptr inbounds ([10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
+; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
+ ; store gep cast
+ store float %v, float* getelementptr inbounds ([10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
+; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
+ call void @llvm.cuda.syncthreads()
+; PTX: bar.sync 0;
+
+ ; gep cast; load
+ %5 = getelementptr inbounds [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
+ %6 = load float* %5, align 4
+; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
+ ; gep cast; store
+ store float %v, float* %5, align 4
+; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
+ call void @llvm.cuda.syncthreads()
+; PTX: bar.sync 0;
+
+ ; cast; gep; load
+ %7 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]*
+ %8 = getelementptr inbounds [10 x float]* %7, i32 0, i32 %i
+ %9 = load float* %8, align 4
+; PTX: ld.shared.f32 %f{{[0-9]+}}, [%{{(r|rl|rd)[0-9]+}}];
+ ; cast; gep; store
+ store float %v, float* %8, align 4
+; PTX: st.shared.f32 [%{{(r|rl|rd)[0-9]+}}], %f{{[0-9]+}};
+ call void @llvm.cuda.syncthreads()
+; PTX: bar.sync 0;
+
+ %sum2 = fadd float %1, %3
+ %sum3 = fadd float %sum2, %4
+ %sum4 = fadd float %sum3, %6
+ %sum5 = fadd float %sum4, %9
+ ret float %sum5
+}
+
+; When hoisting an addrspacecast between different pointer types, replace the
+; addrspacecast with a bitcast.
+define i32 @ld_int_from_float() {
+; IR-LABEL: @ld_int_from_float
+; IR: load i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*)
+; PTX-LABEL: ld_int_from_float(
+; PTX: ld.shared.u{{(32|64)}}
+ %1 = load i32* addrspacecast(float addrspace(3)* @scalar to i32*), align 4
+ ret i32 %1
+}
+
+declare void @llvm.cuda.syncthreads() #3
+
+attributes #3 = { noduplicate nounwind }
+
diff --git a/test/CodeGen/NVPTX/addrspacecast-gvar.ll b/test/CodeGen/NVPTX/addrspacecast-gvar.ll
new file mode 100644
index 000000000000..6afbdb8a429f
--- /dev/null
+++ b/test/CodeGen/NVPTX/addrspacecast-gvar.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: .visible .global .align 4 .u32 g = 42;
+; CHECK: .visible .global .align 4 .u32 g2 = generic(g);
+; CHECK: .visible .global .align 4 .u32 g3 = g;
+
+@g = addrspace(1) global i32 42
+@g2 = addrspace(1) global i32* addrspacecast (i32 addrspace(1)* @g to i32*)
+@g3 = addrspace(1) global i32 addrspace(1)* @g
diff --git a/test/CodeGen/NVPTX/addrspacecast.ll b/test/CodeGen/NVPTX/addrspacecast.ll
new file mode 100644
index 000000000000..03b9a9844752
--- /dev/null
+++ b/test/CodeGen/NVPTX/addrspacecast.ll
@@ -0,0 +1,99 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -disable-nvptx-favor-non-generic | FileCheck %s -check-prefix=PTX32
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -disable-nvptx-favor-non-generic | FileCheck %s -check-prefix=PTX64
+
+
+define i32 @conv1(i32 addrspace(1)* %ptr) {
+; PTX32: conv1
+; PTX32: cvta.global.u32
+; PTX32: ld.u32
+; PTX64: conv1
+; PTX64: cvta.global.u64
+; PTX64: ld.u32
+ %genptr = addrspacecast i32 addrspace(1)* %ptr to i32*
+ %val = load i32* %genptr
+ ret i32 %val
+}
+
+define i32 @conv2(i32 addrspace(3)* %ptr) {
+; PTX32: conv2
+; PTX32: cvta.shared.u32
+; PTX32: ld.u32
+; PTX64: conv2
+; PTX64: cvta.shared.u64
+; PTX64: ld.u32
+ %genptr = addrspacecast i32 addrspace(3)* %ptr to i32*
+ %val = load i32* %genptr
+ ret i32 %val
+}
+
+define i32 @conv3(i32 addrspace(4)* %ptr) {
+; PTX32: conv3
+; PTX32: cvta.const.u32
+; PTX32: ld.u32
+; PTX64: conv3
+; PTX64: cvta.const.u64
+; PTX64: ld.u32
+ %genptr = addrspacecast i32 addrspace(4)* %ptr to i32*
+ %val = load i32* %genptr
+ ret i32 %val
+}
+
+define i32 @conv4(i32 addrspace(5)* %ptr) {
+; PTX32: conv4
+; PTX32: cvta.local.u32
+; PTX32: ld.u32
+; PTX64: conv4
+; PTX64: cvta.local.u64
+; PTX64: ld.u32
+ %genptr = addrspacecast i32 addrspace(5)* %ptr to i32*
+ %val = load i32* %genptr
+ ret i32 %val
+}
+
+define i32 @conv5(i32* %ptr) {
+; PTX32: conv5
+; PTX32: cvta.to.global.u32
+; PTX32: ld.global.u32
+; PTX64: conv5
+; PTX64: cvta.to.global.u64
+; PTX64: ld.global.u32
+ %specptr = addrspacecast i32* %ptr to i32 addrspace(1)*
+ %val = load i32 addrspace(1)* %specptr
+ ret i32 %val
+}
+
+define i32 @conv6(i32* %ptr) {
+; PTX32: conv6
+; PTX32: cvta.to.shared.u32
+; PTX32: ld.shared.u32
+; PTX64: conv6
+; PTX64: cvta.to.shared.u64
+; PTX64: ld.shared.u32
+ %specptr = addrspacecast i32* %ptr to i32 addrspace(3)*
+ %val = load i32 addrspace(3)* %specptr
+ ret i32 %val
+}
+
+define i32 @conv7(i32* %ptr) {
+; PTX32: conv7
+; PTX32: cvta.to.const.u32
+; PTX32: ld.const.u32
+; PTX64: conv7
+; PTX64: cvta.to.const.u64
+; PTX64: ld.const.u32
+ %specptr = addrspacecast i32* %ptr to i32 addrspace(4)*
+ %val = load i32 addrspace(4)* %specptr
+ ret i32 %val
+}
+
+define i32 @conv8(i32* %ptr) {
+; PTX32: conv8
+; PTX32: cvta.to.local.u32
+; PTX32: ld.local.u32
+; PTX64: conv8
+; PTX64: cvta.to.local.u64
+; PTX64: ld.local.u32
+ %specptr = addrspacecast i32* %ptr to i32 addrspace(5)*
+ %val = load i32 addrspace(5)* %specptr
+ ret i32 %val
+}
diff --git a/test/CodeGen/NVPTX/aggr-param.ll b/test/CodeGen/NVPTX/aggr-param.ll
new file mode 100644
index 000000000000..21deb7ebce87
--- /dev/null
+++ b/test/CodeGen/NVPTX/aggr-param.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; Make sure aggregate param types get emitted properly.
+
+%struct.float4 = type { float, float, float, float }
+
+; CHECK: .visible .func bar
+; CHECK: .param .align 4 .b8 bar_param_0[16]
+define void @bar(%struct.float4 %f) {
+entry:
+ ret void
+}
+
+; CHECK: .visible .func foo
+; CHECK: .param .align 4 .b8 foo_param_0[20]
+define void @foo([5 x i32] %f) {
+entry:
+ ret void
+}
+
diff --git a/test/CodeGen/NVPTX/arg-lowering.ll b/test/CodeGen/NVPTX/arg-lowering.ll
new file mode 100644
index 000000000000..f7b8a1491d37
--- /dev/null
+++ b/test/CodeGen/NVPTX/arg-lowering.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: .visible .func (.param .align 16 .b8 func_retval0[16]) foo0(
+; CHECK: .param .align 4 .b8 foo0_param_0[8]
+define <4 x float> @foo0({float, float} %arg0) {
+ ret <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>
+}
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[8]) foo1(
+; CHECK: .param .align 8 .b8 foo1_param_0[16]
+define <2 x float> @foo1({float, float, i64} %arg0) {
+ ret <2 x float> <float 1.0, float 1.0>
+}
diff --git a/test/CodeGen/NVPTX/arithmetic-fp-sm20.ll b/test/CodeGen/NVPTX/arithmetic-fp-sm20.ll
index e474fa4df5ce..c167db4b46dc 100644
--- a/test/CodeGen/NVPTX/arithmetic-fp-sm20.ll
+++ b/test/CodeGen/NVPTX/arithmetic-fp-sm20.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -fp-contract=fast | FileCheck %s
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -fp-contract=fast | FileCheck %s
;; These tests should run for all targets
@@ -9,28 +9,28 @@
;;; f64
define double @fadd_f64(double %a, double %b) {
-; CHECK: add.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}}
+; CHECK: add.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
; CHECK: ret
%ret = fadd double %a, %b
ret double %ret
}
define double @fsub_f64(double %a, double %b) {
-; CHECK: sub.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}}
+; CHECK: sub.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
; CHECK: ret
%ret = fsub double %a, %b
ret double %ret
}
define double @fmul_f64(double %a, double %b) {
-; CHECK: mul.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}}
+; CHECK: mul.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
; CHECK: ret
%ret = fmul double %a, %b
ret double %ret
}
define double @fdiv_f64(double %a, double %b) {
-; CHECK: div.rn.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}}
+; CHECK: div.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}
; CHECK: ret
%ret = fdiv double %a, %b
ret double %ret
diff --git a/test/CodeGen/NVPTX/arithmetic-int.ll b/test/CodeGen/NVPTX/arithmetic-int.ll
index 8d73b7e6c4c6..b5a2872299b7 100644
--- a/test/CodeGen/NVPTX/arithmetic-int.ll
+++ b/test/CodeGen/NVPTX/arithmetic-int.ll
@@ -9,70 +9,70 @@
;;; i64
define i64 @add_i64(i64 %a, i64 %b) {
-; CHECK: add.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: add.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = add i64 %a, %b
ret i64 %ret
}
define i64 @sub_i64(i64 %a, i64 %b) {
-; CHECK: sub.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: sub.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = sub i64 %a, %b
ret i64 %ret
}
define i64 @mul_i64(i64 %a, i64 %b) {
-; CHECK: mul.lo.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: mul.lo.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = mul i64 %a, %b
ret i64 %ret
}
define i64 @sdiv_i64(i64 %a, i64 %b) {
-; CHECK: div.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: div.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = sdiv i64 %a, %b
ret i64 %ret
}
define i64 @udiv_i64(i64 %a, i64 %b) {
-; CHECK: div.u64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: div.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = udiv i64 %a, %b
ret i64 %ret
}
define i64 @srem_i64(i64 %a, i64 %b) {
-; CHECK: rem.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: rem.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = srem i64 %a, %b
ret i64 %ret
}
define i64 @urem_i64(i64 %a, i64 %b) {
-; CHECK: rem.u64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: rem.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = urem i64 %a, %b
ret i64 %ret
}
define i64 @and_i64(i64 %a, i64 %b) {
-; CHECK: and.b64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: and.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = and i64 %a, %b
ret i64 %ret
}
define i64 @or_i64(i64 %a, i64 %b) {
-; CHECK: or.b64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: or.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = or i64 %a, %b
ret i64 %ret
}
define i64 @xor_i64(i64 %a, i64 %b) {
-; CHECK: xor.b64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: xor.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%ret = xor i64 %a, %b
ret i64 %ret
@@ -80,7 +80,7 @@ define i64 @xor_i64(i64 %a, i64 %b) {
define i64 @shl_i64(i64 %a, i64 %b) {
; PTX requires 32-bit shift amount
-; CHECK: shl.b64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %r{{[0-9]+}}
+; CHECK: shl.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = shl i64 %a, %b
ret i64 %ret
@@ -88,7 +88,7 @@ define i64 @shl_i64(i64 %a, i64 %b) {
define i64 @ashr_i64(i64 %a, i64 %b) {
; PTX requires 32-bit shift amount
-; CHECK: shr.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %r{{[0-9]+}}
+; CHECK: shr.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = ashr i64 %a, %b
ret i64 %ret
@@ -96,7 +96,7 @@ define i64 @ashr_i64(i64 %a, i64 %b) {
define i64 @lshr_i64(i64 %a, i64 %b) {
; PTX requires 32-bit shift amount
-; CHECK: shr.u64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %r{{[0-9]+}}
+; CHECK: shr.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%ret = lshr i64 %a, %b
ret i64 %ret
diff --git a/test/CodeGen/NVPTX/atomics.ll b/test/CodeGen/NVPTX/atomics.ll
new file mode 100644
index 000000000000..daadb6e9c1a0
--- /dev/null
+++ b/test/CodeGen/NVPTX/atomics.ll
@@ -0,0 +1,182 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK-LABEL: atom0
+define i32 @atom0(i32* %addr, i32 %val) {
+; CHECK: atom.add.u32
+ %ret = atomicrmw add i32* %addr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom1
+define i64 @atom1(i64* %addr, i64 %val) {
+; CHECK: atom.add.u64
+ %ret = atomicrmw add i64* %addr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK-LABEL: atom2
+define i32 @atom2(i32* %subr, i32 %val) {
+; CHECK: neg.s32
+; CHECK: atom.add.u32
+ %ret = atomicrmw sub i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom3
+define i64 @atom3(i64* %subr, i64 %val) {
+; CHECK: neg.s64
+; CHECK: atom.add.u64
+ %ret = atomicrmw sub i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK-LABEL: atom4
+define i32 @atom4(i32* %subr, i32 %val) {
+; CHECK: atom.and.b32
+ %ret = atomicrmw and i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom5
+define i64 @atom5(i64* %subr, i64 %val) {
+; CHECK: atom.and.b64
+ %ret = atomicrmw and i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+;; NAND not yet supported
+;define i32 @atom6(i32* %subr, i32 %val) {
+; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst
+; ret i32 %ret
+;}
+
+;define i64 @atom7(i64* %subr, i64 %val) {
+; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst
+; ret i64 %ret
+;}
+
+; CHECK-LABEL: atom8
+define i32 @atom8(i32* %subr, i32 %val) {
+; CHECK: atom.or.b32
+ %ret = atomicrmw or i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom9
+define i64 @atom9(i64* %subr, i64 %val) {
+; CHECK: atom.or.b64
+ %ret = atomicrmw or i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK-LABEL: atom10
+define i32 @atom10(i32* %subr, i32 %val) {
+; CHECK: atom.xor.b32
+ %ret = atomicrmw xor i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom11
+define i64 @atom11(i64* %subr, i64 %val) {
+; CHECK: atom.xor.b64
+ %ret = atomicrmw xor i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK-LABEL: atom12
+define i32 @atom12(i32* %subr, i32 %val) {
+; CHECK: atom.max.s32
+ %ret = atomicrmw max i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom13
+define i64 @atom13(i64* %subr, i64 %val) {
+; CHECK: atom.max.s64
+ %ret = atomicrmw max i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK-LABEL: atom14
+define i32 @atom14(i32* %subr, i32 %val) {
+; CHECK: atom.min.s32
+ %ret = atomicrmw min i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom15
+define i64 @atom15(i64* %subr, i64 %val) {
+; CHECK: atom.min.s64
+ %ret = atomicrmw min i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK-LABEL: atom16
+define i32 @atom16(i32* %subr, i32 %val) {
+; CHECK: atom.max.u32
+ %ret = atomicrmw umax i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom17
+define i64 @atom17(i64* %subr, i64 %val) {
+; CHECK: atom.max.u64
+ %ret = atomicrmw umax i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK-LABEL: atom18
+define i32 @atom18(i32* %subr, i32 %val) {
+; CHECK: atom.min.u32
+ %ret = atomicrmw umin i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK-LABEL: atom19
+define i64 @atom19(i64* %subr, i64 %val) {
+; CHECK: atom.min.u64
+ %ret = atomicrmw umin i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+declare float @llvm.nvvm.atomic.load.add.f32.p0f32(float* %addr, float %val)
+
+; CHECK-LABEL: atomic_add_f32_generic
+define float @atomic_add_f32_generic(float* %addr, float %val) {
+; CHECK: atom.add.f32
+ %ret = call float @llvm.nvvm.atomic.load.add.f32.p0f32(float* %addr, float %val)
+ ret float %ret
+}
+
+declare float @llvm.nvvm.atomic.load.add.f32.p1f32(float addrspace(1)* %addr, float %val)
+
+; CHECK-LABEL: atomic_add_f32_addrspace1
+define float @atomic_add_f32_addrspace1(float addrspace(1)* %addr, float %val) {
+; CHECK: atom.global.add.f32
+ %ret = call float @llvm.nvvm.atomic.load.add.f32.p1f32(float addrspace(1)* %addr, float %val)
+ ret float %ret
+}
+
+declare float @llvm.nvvm.atomic.load.add.f32.p3f32(float addrspace(3)* %addr, float %val)
+
+; CHECK-LABEL: atomic_add_f32_addrspace3
+define float @atomic_add_f32_addrspace3(float addrspace(3)* %addr, float %val) {
+; CHECK: atom.shared.add.f32
+ %ret = call float @llvm.nvvm.atomic.load.add.f32.p3f32(float addrspace(3)* %addr, float %val)
+ ret float %ret
+}
+
+; CHECK-LABEL: atomic_cmpxchg_i32
+define i32 @atomic_cmpxchg_i32(i32* %addr, i32 %cmp, i32 %new) {
+; CHECK: atom.cas.b32
+ %pairold = cmpxchg i32* %addr, i32 %cmp, i32 %new seq_cst seq_cst
+ ret i32 %new
+}
+
+; CHECK-LABEL: atomic_cmpxchg_i64
+define i64 @atomic_cmpxchg_i64(i64* %addr, i64 %cmp, i64 %new) {
+; CHECK: atom.cas.b64
+ %pairold = cmpxchg i64* %addr, i64 %cmp, i64 %new seq_cst seq_cst
+ ret i64 %new
+}
diff --git a/test/CodeGen/NVPTX/bfe.ll b/test/CodeGen/NVPTX/bfe.ll
new file mode 100644
index 000000000000..2e816fec2c59
--- /dev/null
+++ b/test/CodeGen/NVPTX/bfe.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK: bfe0
+define i32 @bfe0(i32 %a) {
+; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 4, 4
+; CHECK-NOT: shr
+; CHECK-NOT: and
+ %val0 = ashr i32 %a, 4
+ %val1 = and i32 %val0, 15
+ ret i32 %val1
+}
+
+; CHECK: bfe1
+define i32 @bfe1(i32 %a) {
+; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 3, 3
+; CHECK-NOT: shr
+; CHECK-NOT: and
+ %val0 = ashr i32 %a, 3
+ %val1 = and i32 %val0, 7
+ ret i32 %val1
+}
+
+; CHECK: bfe2
+define i32 @bfe2(i32 %a) {
+; CHECK: bfe.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, 5, 3
+; CHECK-NOT: shr
+; CHECK-NOT: and
+ %val0 = ashr i32 %a, 5
+ %val1 = and i32 %val0, 7
+ ret i32 %val1
+}
diff --git a/test/CodeGen/NVPTX/bug17709.ll b/test/CodeGen/NVPTX/bug17709.ll
index 92f0fcb11e41..076c44684579 100644
--- a/test/CodeGen/NVPTX/bug17709.ll
+++ b/test/CodeGen/NVPTX/bug17709.ll
@@ -4,7 +4,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
target triple = "nvptx64-nvidia-cuda"
-define linker_private ptx_device { double, double } @__utils1_MOD_trace(%"struct.array2_complex(kind=8).43.5.57"* noalias %m) {
+define private ptx_device { double, double } @__utils1_MOD_trace(%"struct.array2_complex(kind=8).43.5.57"* noalias %m) {
entry:
;unreachable
%t0 = insertvalue {double, double} undef, double 1.0, 0
diff --git a/test/CodeGen/NVPTX/call-with-alloca-buffer.ll b/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
new file mode 100644
index 000000000000..83d491637041
--- /dev/null
+++ b/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
@@ -0,0 +1,66 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+; Checks how NVPTX lowers alloca buffers and their passing to functions.
+;
+; Produced with the following CUDA code:
+; extern "C" __attribute__((device)) void callee(float* f, char* buf);
+;
+; extern "C" __attribute__((global)) void kernel_func(float* a) {
+; char buf[4 * sizeof(float)];
+; *(reinterpret_cast<float*>(&buf[0])) = a[0];
+; *(reinterpret_cast<float*>(&buf[1])) = a[1];
+; *(reinterpret_cast<float*>(&buf[2])) = a[2];
+; *(reinterpret_cast<float*>(&buf[3])) = a[3];
+; callee(a, buf);
+; }
+
+; CHECK: .visible .entry kernel_func
+define void @kernel_func(float* %a) {
+entry:
+ %buf = alloca [16 x i8], align 4
+
+; CHECK: .local .align 4 .b8 __local_depot0[16]
+; CHECK: mov.u64 %rd[[BUF_REG:[0-9]+]]
+; CHECK: cvta.local.u64 %SP, %rd[[BUF_REG]]
+
+; CHECK: ld.param.u64 %rd[[A_REG:[0-9]+]], [kernel_func_param_0]
+; CHECK: ld.f32 %f[[A0_REG:[0-9]+]], [%rd[[A_REG]]]
+; CHECK: st.f32 [%SP+0], %f[[A0_REG]]
+
+ %0 = load float* %a, align 4
+ %1 = bitcast [16 x i8]* %buf to float*
+ store float %0, float* %1, align 4
+ %arrayidx2 = getelementptr inbounds float* %a, i64 1
+ %2 = load float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds [16 x i8]* %buf, i64 0, i64 1
+ %3 = bitcast i8* %arrayidx3 to float*
+ store float %2, float* %3, align 4
+ %arrayidx4 = getelementptr inbounds float* %a, i64 2
+ %4 = load float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds [16 x i8]* %buf, i64 0, i64 2
+ %5 = bitcast i8* %arrayidx5 to float*
+ store float %4, float* %5, align 4
+ %arrayidx6 = getelementptr inbounds float* %a, i64 3
+ %6 = load float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds [16 x i8]* %buf, i64 0, i64 3
+ %7 = bitcast i8* %arrayidx7 to float*
+ store float %6, float* %7, align 4
+
+; CHECK: add.u64 %rd[[SP_REG:[0-9]+]], %SP, 0
+; CHECK: .param .b64 param0;
+; CHECK-NEXT: st.param.b64 [param0+0], %rd[[A_REG]]
+; CHECK-NEXT: .param .b64 param1;
+; CHECK-NEXT: st.param.b64 [param1+0], %rd[[SP_REG]]
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: callee,
+
+ %arraydecay = getelementptr inbounds [16 x i8]* %buf, i64 0, i64 0
+ call void @callee(float* %a, i8* %arraydecay) #2
+ ret void
+}
+
+declare void @callee(float*, i8*)
+
+!nvvm.annotations = !{!0}
+
+!0 = metadata !{void (float*)* @kernel_func, metadata !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/compare-int.ll b/test/CodeGen/NVPTX/compare-int.ll
index c595f215f6f1..e4e0601db59f 100644
--- a/test/CodeGen/NVPTX/compare-int.ll
+++ b/test/CodeGen/NVPTX/compare-int.ll
@@ -9,8 +9,8 @@
;;; i64
define i64 @icmp_eq_i64(i64 %a, i64 %b) {
-; CHECK: setp.eq.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.eq.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp eq i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -18,8 +18,8 @@ define i64 @icmp_eq_i64(i64 %a, i64 %b) {
}
define i64 @icmp_ne_i64(i64 %a, i64 %b) {
-; CHECK: setp.ne.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.ne.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp ne i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -27,8 +27,8 @@ define i64 @icmp_ne_i64(i64 %a, i64 %b) {
}
define i64 @icmp_ugt_i64(i64 %a, i64 %b) {
-; CHECK: setp.gt.u64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.gt.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp ugt i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -36,8 +36,8 @@ define i64 @icmp_ugt_i64(i64 %a, i64 %b) {
}
define i64 @icmp_uge_i64(i64 %a, i64 %b) {
-; CHECK: setp.ge.u64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.ge.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp uge i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -45,8 +45,8 @@ define i64 @icmp_uge_i64(i64 %a, i64 %b) {
}
define i64 @icmp_ult_i64(i64 %a, i64 %b) {
-; CHECK: setp.lt.u64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.lt.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp ult i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -54,8 +54,8 @@ define i64 @icmp_ult_i64(i64 %a, i64 %b) {
}
define i64 @icmp_ule_i64(i64 %a, i64 %b) {
-; CHECK: setp.le.u64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.le.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp ule i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -63,8 +63,8 @@ define i64 @icmp_ule_i64(i64 %a, i64 %b) {
}
define i64 @icmp_sgt_i64(i64 %a, i64 %b) {
-; CHECK: setp.gt.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.gt.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp sgt i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -72,8 +72,8 @@ define i64 @icmp_sgt_i64(i64 %a, i64 %b) {
}
define i64 @icmp_sge_i64(i64 %a, i64 %b) {
-; CHECK: setp.ge.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.ge.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp sge i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -81,8 +81,8 @@ define i64 @icmp_sge_i64(i64 %a, i64 %b) {
}
define i64 @icmp_slt_i64(i64 %a, i64 %b) {
-; CHECK: setp.lt.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.lt.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp slt i64 %a, %b
%ret = zext i1 %cmp to i64
@@ -90,8 +90,8 @@ define i64 @icmp_slt_i64(i64 %a, i64 %b) {
}
define i64 @icmp_sle_i64(i64 %a, i64 %b) {
-; CHECK: setp.le.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}}
-; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]]
+; CHECK: setp.le.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
+; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]]
; CHECK: ret
%cmp = icmp sle i64 %a, %b
%ret = zext i1 %cmp to i64
diff --git a/test/CodeGen/NVPTX/convert-fp.ll b/test/CodeGen/NVPTX/convert-fp.ll
index 1882121fa724..4b5446e317f4 100644
--- a/test/CodeGen/NVPTX/convert-fp.ll
+++ b/test/CodeGen/NVPTX/convert-fp.ll
@@ -10,7 +10,7 @@ define i16 @cvt_i16_f32(float %x) {
}
define i16 @cvt_i16_f64(double %x) {
-; CHECK: cvt.rzi.u16.f64 %rs{{[0-9]+}}, %fl{{[0-9]+}};
+; CHECK: cvt.rzi.u16.f64 %rs{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fptoui double %x to i16
ret i16 %a
@@ -24,7 +24,7 @@ define i32 @cvt_i32_f32(float %x) {
}
define i32 @cvt_i32_f64(double %x) {
-; CHECK: cvt.rzi.u32.f64 %r{{[0-9]+}}, %fl{{[0-9]+}};
+; CHECK: cvt.rzi.u32.f64 %r{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fptoui double %x to i32
ret i32 %a
@@ -32,14 +32,14 @@ define i32 @cvt_i32_f64(double %x) {
define i64 @cvt_i64_f32(float %x) {
-; CHECK: cvt.rzi.u64.f32 %rl{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: cvt.rzi.u64.f32 %rd{{[0-9]+}}, %f{{[0-9]+}};
; CHECK: ret;
%a = fptoui float %x to i64
ret i64 %a
}
define i64 @cvt_i64_f64(double %x) {
-; CHECK: cvt.rzi.u64.f64 %rl{{[0-9]+}}, %fl{{[0-9]+}};
+; CHECK: cvt.rzi.u64.f64 %rd{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fptoui double %x to i64
ret i64 %a
@@ -60,14 +60,14 @@ define float @cvt_f32_i32(i32 %x) {
}
define float @cvt_f32_i64(i64 %x) {
-; CHECK: cvt.rn.f32.u64 %f{{[0-9]+}}, %rl{{[0-9]+}};
+; CHECK: cvt.rn.f32.u64 %f{{[0-9]+}}, %rd{{[0-9]+}};
; CHECK: ret;
%a = uitofp i64 %x to float
ret float %a
}
define float @cvt_f32_f64(double %x) {
-; CHECK: cvt.rn.f32.f64 %f{{[0-9]+}}, %fl{{[0-9]+}};
+; CHECK: cvt.rn.f32.f64 %f{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fptrunc double %x to float
ret float %a
@@ -88,56 +88,56 @@ define float @cvt_f32_s32(i32 %x) {
}
define float @cvt_f32_s64(i64 %x) {
-; CHECK: cvt.rn.f32.s64 %f{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: cvt.rn.f32.s64 %f{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%a = sitofp i64 %x to float
ret float %a
}
define double @cvt_f64_i16(i16 %x) {
-; CHECK: cvt.rn.f64.u16 %fl{{[0-9]+}}, %rs{{[0-9]+}};
+; CHECK: cvt.rn.f64.u16 %fd{{[0-9]+}}, %rs{{[0-9]+}};
; CHECK: ret;
%a = uitofp i16 %x to double
ret double %a
}
define double @cvt_f64_i32(i32 %x) {
-; CHECK: cvt.rn.f64.u32 %fl{{[0-9]+}}, %r{{[0-9]+}};
+; CHECK: cvt.rn.f64.u32 %fd{{[0-9]+}}, %r{{[0-9]+}};
; CHECK: ret;
%a = uitofp i32 %x to double
ret double %a
}
define double @cvt_f64_i64(i64 %x) {
-; CHECK: cvt.rn.f64.u64 %fl{{[0-9]+}}, %rl{{[0-9]+}};
+; CHECK: cvt.rn.f64.u64 %fd{{[0-9]+}}, %rd{{[0-9]+}};
; CHECK: ret;
%a = uitofp i64 %x to double
ret double %a
}
define double @cvt_f64_f32(float %x) {
-; CHECK: cvt.f64.f32 %fl{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: cvt.f64.f32 %fd{{[0-9]+}}, %f{{[0-9]+}};
; CHECK: ret;
%a = fpext float %x to double
ret double %a
}
define double @cvt_f64_s16(i16 %x) {
-; CHECK: cvt.rn.f64.s16 %fl{{[0-9]+}}, %rs{{[0-9]+}}
+; CHECK: cvt.rn.f64.s16 %fd{{[0-9]+}}, %rs{{[0-9]+}}
; CHECK: ret
%a = sitofp i16 %x to double
ret double %a
}
define double @cvt_f64_s32(i32 %x) {
-; CHECK: cvt.rn.f64.s32 %fl{{[0-9]+}}, %r{{[0-9]+}}
+; CHECK: cvt.rn.f64.s32 %fd{{[0-9]+}}, %r{{[0-9]+}}
; CHECK: ret
%a = sitofp i32 %x to double
ret double %a
}
define double @cvt_f64_s64(i64 %x) {
-; CHECK: cvt.rn.f64.s64 %fl{{[0-9]+}}, %rl{{[0-9]+}}
+; CHECK: cvt.rn.f64.s64 %fd{{[0-9]+}}, %rd{{[0-9]+}}
; CHECK: ret
%a = sitofp i64 %x to double
ret double %a
diff --git a/test/CodeGen/NVPTX/convert-int-sm20.ll b/test/CodeGen/NVPTX/convert-int-sm20.ll
index 227cd31e11b3..57a231629e00 100644
--- a/test/CodeGen/NVPTX/convert-int-sm20.ll
+++ b/test/CodeGen/NVPTX/convert-int-sm20.ll
@@ -48,16 +48,16 @@ define i32 @cvt_i32_i64(i64 %x) {
; i64
define i64 @cvt_i64_i16(i16 %x) {
-; CHECK: ld.param.u16 %rl[[R0:[0-9]+]], [cvt_i64_i16_param_{{[0-9]+}}]
-; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rl[[R0]]
+; CHECK: ld.param.u16 %rd[[R0:[0-9]+]], [cvt_i64_i16_param_{{[0-9]+}}]
+; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rd[[R0]]
; CHECK: ret
%a = zext i16 %x to i64
ret i64 %a
}
define i64 @cvt_i64_i32(i32 %x) {
-; CHECK: ld.param.u32 %rl[[R0:[0-9]+]], [cvt_i64_i32_param_{{[0-9]+}}]
-; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rl[[R0]]
+; CHECK: ld.param.u32 %rd[[R0:[0-9]+]], [cvt_i64_i32_param_{{[0-9]+}}]
+; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rd[[R0]]
; CHECK: ret
%a = zext i32 %x to i64
ret i64 %a
diff --git a/test/CodeGen/NVPTX/div-ri.ll b/test/CodeGen/NVPTX/div-ri.ll
new file mode 100644
index 000000000000..7f796e0239fc
--- /dev/null
+++ b/test/CodeGen/NVPTX/div-ri.ll
@@ -0,0 +1,8 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -nvptx-prec-divf32=0 | FileCheck %s
+
+define float @foo(float %a) {
+; CHECK: div.approx.f32
+ %div = fdiv float %a, 13.0
+ ret float %div
+}
+
diff --git a/test/CodeGen/NVPTX/envreg.ll b/test/CodeGen/NVPTX/envreg.ll
new file mode 100644
index 000000000000..a341b49ecdf3
--- /dev/null
+++ b/test/CodeGen/NVPTX/envreg.ll
@@ -0,0 +1,139 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg0()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg1()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg2()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg3()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg4()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg5()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg6()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg7()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg8()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg9()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg10()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg11()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg12()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg13()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg14()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg15()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg16()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg17()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg18()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg19()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg20()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg21()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg22()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg23()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg24()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg25()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg26()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg27()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg28()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg29()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg30()
+declare i32 @llvm.nvvm.read.ptx.sreg.envreg31()
+
+
+; CHECK: foo
+define i32 @foo() {
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg0
+ %val0 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg0()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg1
+ %val1 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg1()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg2
+ %val2 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg2()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg3
+ %val3 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg3()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg4
+ %val4 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg4()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg5
+ %val5 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg5()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg6
+ %val6 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg6()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg7
+ %val7 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg7()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg8
+ %val8 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg8()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg9
+ %val9 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg9()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg10
+ %val10 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg10()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg11
+ %val11 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg11()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg12
+ %val12 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg12()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg13
+ %val13 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg13()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg14
+ %val14 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg14()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg15
+ %val15 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg15()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg16
+ %val16 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg16()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg17
+ %val17 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg17()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg18
+ %val18 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg18()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg19
+ %val19 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg19()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg20
+ %val20 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg20()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg21
+ %val21 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg21()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg22
+ %val22 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg22()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg23
+ %val23 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg23()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg24
+ %val24 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg24()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg25
+ %val25 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg25()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg26
+ %val26 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg26()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg27
+ %val27 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg27()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg28
+ %val28 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg28()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg29
+ %val29 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg29()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg30
+ %val30 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg30()
+; CHECK: mov.b32 %r{{[0-9]+}}, %envreg31
+ %val31 = tail call i32 @llvm.nvvm.read.ptx.sreg.envreg31()
+
+
+ %ret0 = add i32 %val0, %val1
+ %ret1 = add i32 %ret0, %val2
+ %ret2 = add i32 %ret1, %val3
+ %ret3 = add i32 %ret2, %val4
+ %ret4 = add i32 %ret3, %val5
+ %ret5 = add i32 %ret4, %val6
+ %ret6 = add i32 %ret5, %val7
+ %ret7 = add i32 %ret6, %val8
+ %ret8 = add i32 %ret7, %val9
+ %ret9 = add i32 %ret8, %val10
+ %ret10 = add i32 %ret9, %val11
+ %ret11 = add i32 %ret10, %val12
+ %ret12 = add i32 %ret11, %val13
+ %ret13 = add i32 %ret12, %val14
+ %ret14 = add i32 %ret13, %val15
+ %ret15 = add i32 %ret14, %val16
+ %ret16 = add i32 %ret15, %val17
+ %ret17 = add i32 %ret16, %val18
+ %ret18 = add i32 %ret17, %val19
+ %ret19 = add i32 %ret18, %val20
+ %ret20 = add i32 %ret19, %val21
+ %ret21 = add i32 %ret20, %val22
+ %ret22 = add i32 %ret21, %val23
+ %ret23 = add i32 %ret22, %val24
+ %ret24 = add i32 %ret23, %val25
+ %ret25 = add i32 %ret24, %val26
+ %ret26 = add i32 %ret25, %val27
+ %ret27 = add i32 %ret26, %val28
+ %ret28 = add i32 %ret27, %val29
+ %ret29 = add i32 %ret28, %val30
+ %ret30 = add i32 %ret29, %val31
+
+ ret i32 %ret30
+}
diff --git a/test/CodeGen/NVPTX/fma.ll b/test/CodeGen/NVPTX/fma.ll
index 4ef1a9a4cefb..14b5c45b87d8 100644
--- a/test/CodeGen/NVPTX/fma.ll
+++ b/test/CodeGen/NVPTX/fma.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -fp-contract=fast | FileCheck %s
define ptx_device float @t1_f32(float %x, float %y, float %z) {
; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
@@ -9,7 +9,7 @@ define ptx_device float @t1_f32(float %x, float %y, float %z) {
}
define ptx_device double @t1_f64(double %x, double %y, double %z) {
-; CHECK: fma.rn.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}};
+; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%a = fmul double %x, %y
%b = fadd double %a, %z
diff --git a/test/CodeGen/NVPTX/fp-contract.ll b/test/CodeGen/NVPTX/fp-contract.ll
new file mode 100644
index 000000000000..3f68b188ba75
--- /dev/null
+++ b/test/CodeGen/NVPTX/fp-contract.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -fp-contract=fast | FileCheck %s --check-prefix=FAST
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_30 | FileCheck %s --check-prefix=DEFAULT
+
+target triple = "nvptx64-unknown-cuda"
+
+;; Make sure we are generating proper instruction sequences for fused ops
+;; If fusion is allowed, we try to form fma.rn at the PTX level, and emit
+;; add.f32 otherwise. Without an explicit rounding mode on add.f32, ptxas
+;; is free to fuse with a multiply if it is able. If fusion is not allowed,
+;; we do not form fma.rn at the PTX level and explicitly generate add.rn
+;; for all adds to prevent ptxas from fusion the ops.
+
+;; FAST-LABEL: @t0
+;; DEFAULT-LABEL: @t0
+define float @t0(float %a, float %b, float %c) {
+;; FAST: fma.rn.f32
+;; DEFAULT: mul.rn.f32
+;; DEFAULT: add.rn.f32
+ %v0 = fmul float %a, %b
+ %v1 = fadd float %v0, %c
+ ret float %v1
+}
+
+;; FAST-LABEL: @t1
+;; DEFAULT-LABEL: @t1
+define float @t1(float %a, float %b) {
+;; We cannot form an fma here, but make sure we explicitly emit add.rn.f32
+;; to prevent ptxas from fusing this with anything else.
+;; FAST: add.f32
+;; DEFAULT: add.rn.f32
+ %v1 = fadd float %a, %b
+ ret float %v1
+}
diff --git a/test/CodeGen/NVPTX/fp-literals.ll b/test/CodeGen/NVPTX/fp-literals.ll
index 0cc2413e009f..755e0f9250a1 100644
--- a/test/CodeGen/NVPTX/fp-literals.ll
+++ b/test/CodeGen/NVPTX/fp-literals.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -fp-contract=fast | FileCheck %s
+
+target triple = "nvptx64-unknown-cuda"
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
; Make sure we can properly differentiate between single-precision and
; double-precision FP literals.
@@ -11,7 +14,7 @@ define float @myaddf(float %a) {
}
; CHECK: myaddd
-; CHECK: add.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, 0d3FF0000000000000
+; CHECK: add.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, 0d3FF0000000000000
define double @myaddd(double %a) {
%ret = fadd double %a, 1.0
ret double %ret
diff --git a/test/CodeGen/NVPTX/fp16.ll b/test/CodeGen/NVPTX/fp16.ll
new file mode 100644
index 000000000000..8770399f2ec9
--- /dev/null
+++ b/test/CodeGen/NVPTX/fp16.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=nvptx -verify-machineinstrs < %s | FileCheck %s
+
+declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
+declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
+declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
+declare i16 @llvm.convert.to.fp16.f64(double) nounwind readnone
+
+; CHECK-LABEL: @test_convert_fp16_to_fp32
+; CHECK: cvt.f32.f16
+define void @test_convert_fp16_to_fp32(float addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %val = load i16 addrspace(1)* %in, align 2
+ %cvt = call float @llvm.convert.from.fp16.f32(i16 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+
+; CHECK-LABEL: @test_convert_fp16_to_fp64
+; CHECK: cvt.f64.f16
+define void @test_convert_fp16_to_fp64(double addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %val = load i16 addrspace(1)* %in, align 2
+ %cvt = call double @llvm.convert.from.fp16.f64(i16 %val) nounwind readnone
+ store double %cvt, double addrspace(1)* %out, align 4
+ ret void
+}
+
+
+; CHECK-LABEL: @test_convert_fp32_to_fp16
+; CHECK: cvt.rn.f16.f32
+define void @test_convert_fp32_to_fp16(i16 addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %val = load float addrspace(1)* %in, align 2
+ %cvt = call i16 @llvm.convert.to.fp16.f32(float %val) nounwind readnone
+ store i16 %cvt, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+
+; CHECK-LABEL: @test_convert_fp64_to_fp16
+; CHECK: cvt.rn.f16.f64
+define void @test_convert_fp64_to_fp16(i16 addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+ %val = load double addrspace(1)* %in, align 2
+ %cvt = call i16 @llvm.convert.to.fp16.f64(double %val) nounwind readnone
+ store i16 %cvt, i16 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/NVPTX/gvar-init.ll b/test/CodeGen/NVPTX/gvar-init.ll
new file mode 100644
index 000000000000..8c959422e66a
--- /dev/null
+++ b/test/CodeGen/NVPTX/gvar-init.ll
@@ -0,0 +1,5 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; Error out if initializer is given for address spaces that do not support initializers
+; XFAIL: *
+@g0 = addrspace(3) global i32 42
diff --git a/test/CodeGen/NVPTX/half.ll b/test/CodeGen/NVPTX/half.ll
new file mode 100644
index 000000000000..aa08cc78e91a
--- /dev/null
+++ b/test/CodeGen/NVPTX/half.ll
@@ -0,0 +1,70 @@
+; RUN: llc < %s -march=nvptx | FileCheck %s
+
+define void @test_load_store(half addrspace(1)* %in, half addrspace(1)* %out) {
+; CHECK-LABEL: @test_load_store
+; CHECK: ld.global.u16 [[TMP:%rs[0-9]+]], [{{%r[0-9]+}}]
+; CHECK: st.global.u16 [{{%r[0-9]+}}], [[TMP]]
+ %val = load half addrspace(1)* %in
+ store half %val, half addrspace(1) * %out
+ ret void
+}
+
+define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) {
+; CHECK-LABEL: @test_bitcast_from_half
+; CHECK: ld.global.u16 [[TMP:%rs[0-9]+]], [{{%r[0-9]+}}]
+; CHECK: st.global.u16 [{{%r[0-9]+}}], [[TMP]]
+ %val = load half addrspace(1) * %in
+ %val_int = bitcast half %val to i16
+ store i16 %val_int, i16 addrspace(1)* %out
+ ret void
+}
+
+define void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) {
+; CHECK-LABEL: @test_bitcast_to_half
+; CHECK: ld.global.u16 [[TMP:%rs[0-9]+]], [{{%r[0-9]+}}]
+; CHECK: st.global.u16 [{{%r[0-9]+}}], [[TMP]]
+ %val = load i16 addrspace(1)* %in
+ %val_fp = bitcast i16 %val to half
+ store half %val_fp, half addrspace(1)* %out
+ ret void
+}
+
+define void @test_extend32(half addrspace(1)* %in, float addrspace(1)* %out) {
+; CHECK-LABEL: @test_extend32
+; CHECK: cvt.f32.f16
+
+ %val16 = load half addrspace(1)* %in
+ %val32 = fpext half %val16 to float
+ store float %val32, float addrspace(1)* %out
+ ret void
+}
+
+define void @test_extend64(half addrspace(1)* %in, double addrspace(1)* %out) {
+; CHECK-LABEL: @test_extend64
+; CHECK: cvt.f64.f16
+
+ %val16 = load half addrspace(1)* %in
+ %val64 = fpext half %val16 to double
+ store double %val64, double addrspace(1)* %out
+ ret void
+}
+
+define void @test_trunc32(float addrspace(1)* %in, half addrspace(1)* %out) {
+; CHECK-LABEL: test_trunc32
+; CHECK: cvt.rn.f16.f32
+
+ %val32 = load float addrspace(1)* %in
+ %val16 = fptrunc float %val32 to half
+ store half %val16, half addrspace(1)* %out
+ ret void
+}
+
+define void @test_trunc64(double addrspace(1)* %in, half addrspace(1)* %out) {
+; CHECK-LABEL: @test_trunc64
+; CHECK: cvt.rn.f16.f64
+
+ %val32 = load double addrspace(1)* %in
+ %val16 = fptrunc double %val32 to half
+ store half %val16, half addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/NVPTX/imad.ll b/test/CodeGen/NVPTX/imad.ll
new file mode 100644
index 000000000000..67421c7cac4b
--- /dev/null
+++ b/test/CodeGen/NVPTX/imad.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: imad
+define i32 @imad(i32 %a, i32 %b, i32 %c) {
+; CHECK: mad.lo.s32
+ %val0 = mul i32 %a, %b
+ %val1 = add i32 %val0, %c
+ ret i32 %val1
+}
diff --git a/test/CodeGen/NVPTX/implicit-def.ll b/test/CodeGen/NVPTX/implicit-def.ll
index 06d3d562046e..2d2c6e527f6d 100644
--- a/test/CodeGen/NVPTX/implicit-def.ll
+++ b/test/CodeGen/NVPTX/implicit-def.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -O0 -march=nvptx -mcpu=sm_20 -asm-verbose=1 | FileCheck %s
; CHECK: // implicit-def: %f[[F0:[0-9]+]]
-; CHECK: add.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f[[F0]];
+; CHECK: add.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f[[F0]];
define float @foo(float %a) {
%ret = fadd float %a, undef
ret float %ret
diff --git a/test/CodeGen/NVPTX/inline-asm.ll b/test/CodeGen/NVPTX/inline-asm.ll
index d76eb4239ee3..6f0578d4cff4 100644
--- a/test/CodeGen/NVPTX/inline-asm.ll
+++ b/test/CodeGen/NVPTX/inline-asm.ll
@@ -7,3 +7,10 @@ entry:
%0 = call float asm "ex2.approx.ftz.f32 $0, $1;", "=f,f"(float %x)
ret float %0
}
+
+define i32 @foo(i1 signext %cond, i32 %a, i32 %b) #0 {
+entry:
+; CHECK: selp.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %p{{[0-9]+}}
+ %0 = tail call i32 asm "selp.b32 $0, $1, $2, $3;", "=r,r,r,b"(i32 %a, i32 %b, i1 %cond)
+ ret i32 %0
+}
diff --git a/test/CodeGen/NVPTX/intrinsic-old.ll b/test/CodeGen/NVPTX/intrinsic-old.ll
index af91bb442412..3c51776c0ec9 100644
--- a/test/CodeGen/NVPTX/intrinsic-old.ll
+++ b/test/CodeGen/NVPTX/intrinsic-old.ll
@@ -198,7 +198,7 @@ define ptx_device i32 @test_clock() {
}
define ptx_device i64 @test_clock64() {
-; CHECK: mov.u64 %rl{{[0-9]+}}, %clock64;
+; CHECK: mov.u64 %rd{{[0-9]+}}, %clock64;
; CHECK: ret;
%x = call i64 @llvm.ptx.read.clock64()
ret i64 %x
diff --git a/test/CodeGen/NVPTX/intrinsics.ll b/test/CodeGen/NVPTX/intrinsics.ll
index 78e1e7789014..34b671d70e94 100644
--- a/test/CodeGen/NVPTX/intrinsics.ll
+++ b/test/CodeGen/NVPTX/intrinsics.ll
@@ -9,7 +9,7 @@ define ptx_device float @test_fabsf(float %f) {
}
define ptx_device double @test_fabs(double %d) {
-; CHECK: abs.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}};
+; CHECK: abs.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
%x = call double @llvm.fabs.f64(double %d)
ret double %x
diff --git a/test/CodeGen/NVPTX/isspacep.ll b/test/CodeGen/NVPTX/isspacep.ll
new file mode 100644
index 000000000000..47fa7a6714df
--- /dev/null
+++ b/test/CodeGen/NVPTX/isspacep.ll
@@ -0,0 +1,35 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+declare i1 @llvm.nvvm.isspacep.const(i8*) readnone noinline
+declare i1 @llvm.nvvm.isspacep.global(i8*) readnone noinline
+declare i1 @llvm.nvvm.isspacep.local(i8*) readnone noinline
+declare i1 @llvm.nvvm.isspacep.shared(i8*) readnone noinline
+
+; CHECK: is_const
+define i1 @is_const(i8* %addr) {
+; CHECK: isspacep.const
+ %v = tail call i1 @llvm.nvvm.isspacep.const(i8* %addr)
+ ret i1 %v
+}
+
+; CHECK: is_global
+define i1 @is_global(i8* %addr) {
+; CHECK: isspacep.global
+ %v = tail call i1 @llvm.nvvm.isspacep.global(i8* %addr)
+ ret i1 %v
+}
+
+; CHECK: is_local
+define i1 @is_local(i8* %addr) {
+; CHECK: isspacep.local
+ %v = tail call i1 @llvm.nvvm.isspacep.local(i8* %addr)
+ ret i1 %v
+}
+
+; CHECK: is_shared
+define i1 @is_shared(i8* %addr) {
+; CHECK: isspacep.shared
+ %v = tail call i1 @llvm.nvvm.isspacep.shared(i8* %addr)
+ ret i1 %v
+}
+
diff --git a/test/CodeGen/NVPTX/ld-addrspace.ll b/test/CodeGen/NVPTX/ld-addrspace.ll
index 133ef09afdb2..f33659c92e84 100644
--- a/test/CodeGen/NVPTX/ld-addrspace.ll
+++ b/test/CodeGen/NVPTX/ld-addrspace.ll
@@ -6,7 +6,7 @@
define i8 @ld_global_i8(i8 addrspace(1)* %ptr) {
; PTX32: ld.global.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.global.u8 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.global.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i8 addrspace(1)* %ptr
ret i8 %a
@@ -15,7 +15,7 @@ define i8 @ld_global_i8(i8 addrspace(1)* %ptr) {
define i8 @ld_shared_i8(i8 addrspace(3)* %ptr) {
; PTX32: ld.shared.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.shared.u8 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.shared.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i8 addrspace(3)* %ptr
ret i8 %a
@@ -24,7 +24,7 @@ define i8 @ld_shared_i8(i8 addrspace(3)* %ptr) {
define i8 @ld_local_i8(i8 addrspace(5)* %ptr) {
; PTX32: ld.local.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.local.u8 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.local.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i8 addrspace(5)* %ptr
ret i8 %a
@@ -34,7 +34,7 @@ define i8 @ld_local_i8(i8 addrspace(5)* %ptr) {
define i16 @ld_global_i16(i16 addrspace(1)* %ptr) {
; PTX32: ld.global.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.global.u16 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.global.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i16 addrspace(1)* %ptr
ret i16 %a
@@ -43,7 +43,7 @@ define i16 @ld_global_i16(i16 addrspace(1)* %ptr) {
define i16 @ld_shared_i16(i16 addrspace(3)* %ptr) {
; PTX32: ld.shared.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.shared.u16 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.shared.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i16 addrspace(3)* %ptr
ret i16 %a
@@ -52,7 +52,7 @@ define i16 @ld_shared_i16(i16 addrspace(3)* %ptr) {
define i16 @ld_local_i16(i16 addrspace(5)* %ptr) {
; PTX32: ld.local.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.local.u16 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.local.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i16 addrspace(5)* %ptr
ret i16 %a
@@ -62,7 +62,7 @@ define i16 @ld_local_i16(i16 addrspace(5)* %ptr) {
define i32 @ld_global_i32(i32 addrspace(1)* %ptr) {
; PTX32: ld.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.global.u32 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i32 addrspace(1)* %ptr
ret i32 %a
@@ -71,7 +71,7 @@ define i32 @ld_global_i32(i32 addrspace(1)* %ptr) {
define i32 @ld_shared_i32(i32 addrspace(3)* %ptr) {
; PTX32: ld.shared.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.shared.u32 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i32 addrspace(3)* %ptr
ret i32 %a
@@ -80,7 +80,7 @@ define i32 @ld_shared_i32(i32 addrspace(3)* %ptr) {
define i32 @ld_local_i32(i32 addrspace(5)* %ptr) {
; PTX32: ld.local.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.local.u32 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i32 addrspace(5)* %ptr
ret i32 %a
@@ -88,27 +88,27 @@ define i32 @ld_local_i32(i32 addrspace(5)* %ptr) {
;; i64
define i64 @ld_global_i64(i64 addrspace(1)* %ptr) {
-; PTX32: ld.global.u64 %rl{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.global.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.global.u64 %rl{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i64 addrspace(1)* %ptr
ret i64 %a
}
define i64 @ld_shared_i64(i64 addrspace(3)* %ptr) {
-; PTX32: ld.shared.u64 %rl{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.shared.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.shared.u64 %rl{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i64 addrspace(3)* %ptr
ret i64 %a
}
define i64 @ld_local_i64(i64 addrspace(5)* %ptr) {
-; PTX32: ld.local.u64 %rl{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.local.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.local.u64 %rl{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i64 addrspace(5)* %ptr
ret i64 %a
@@ -118,7 +118,7 @@ define i64 @ld_local_i64(i64 addrspace(5)* %ptr) {
define float @ld_global_f32(float addrspace(1)* %ptr) {
; PTX32: ld.global.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.global.f32 %f{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load float addrspace(1)* %ptr
ret float %a
@@ -127,7 +127,7 @@ define float @ld_global_f32(float addrspace(1)* %ptr) {
define float @ld_shared_f32(float addrspace(3)* %ptr) {
; PTX32: ld.shared.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.shared.f32 %f{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load float addrspace(3)* %ptr
ret float %a
@@ -136,7 +136,7 @@ define float @ld_shared_f32(float addrspace(3)* %ptr) {
define float @ld_local_f32(float addrspace(5)* %ptr) {
; PTX32: ld.local.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.local.f32 %f{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load float addrspace(5)* %ptr
ret float %a
@@ -144,27 +144,27 @@ define float @ld_local_f32(float addrspace(5)* %ptr) {
;; f64
define double @ld_global_f64(double addrspace(1)* %ptr) {
-; PTX32: ld.global.f64 %fl{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.global.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.global.f64 %fl{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load double addrspace(1)* %ptr
ret double %a
}
define double @ld_shared_f64(double addrspace(3)* %ptr) {
-; PTX32: ld.shared.f64 %fl{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.shared.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.shared.f64 %fl{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load double addrspace(3)* %ptr
ret double %a
}
define double @ld_local_f64(double addrspace(5)* %ptr) {
-; PTX32: ld.local.f64 %fl{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.local.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.local.f64 %fl{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load double addrspace(5)* %ptr
ret double %a
diff --git a/test/CodeGen/NVPTX/ld-generic.ll b/test/CodeGen/NVPTX/ld-generic.ll
index 3728268c24d5..d629e0ecc647 100644
--- a/test/CodeGen/NVPTX/ld-generic.ll
+++ b/test/CodeGen/NVPTX/ld-generic.ll
@@ -6,7 +6,7 @@
define i8 @ld_global_i8(i8 addrspace(0)* %ptr) {
; PTX32: ld.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.u8 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i8 addrspace(0)* %ptr
ret i8 %a
@@ -16,7 +16,7 @@ define i8 @ld_global_i8(i8 addrspace(0)* %ptr) {
define i16 @ld_global_i16(i16 addrspace(0)* %ptr) {
; PTX32: ld.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.u16 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i16 addrspace(0)* %ptr
ret i16 %a
@@ -26,7 +26,7 @@ define i16 @ld_global_i16(i16 addrspace(0)* %ptr) {
define i32 @ld_global_i32(i32 addrspace(0)* %ptr) {
; PTX32: ld.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.u32 %r{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i32 addrspace(0)* %ptr
ret i32 %a
@@ -34,9 +34,9 @@ define i32 @ld_global_i32(i32 addrspace(0)* %ptr) {
;; i64
define i64 @ld_global_i64(i64 addrspace(0)* %ptr) {
-; PTX32: ld.u64 %rl{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.u64 %rl{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i64 addrspace(0)* %ptr
ret i64 %a
@@ -46,7 +46,7 @@ define i64 @ld_global_i64(i64 addrspace(0)* %ptr) {
define float @ld_global_f32(float addrspace(0)* %ptr) {
; PTX32: ld.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.f32 %f{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load float addrspace(0)* %ptr
ret float %a
@@ -54,9 +54,9 @@ define float @ld_global_f32(float addrspace(0)* %ptr) {
;; f64
define double @ld_global_f64(double addrspace(0)* %ptr) {
-; PTX32: ld.f64 %fl{{[0-9]+}}, [%r{{[0-9]+}}]
+; PTX32: ld.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
-; PTX64: ld.f64 %fl{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load double addrspace(0)* %ptr
ret double %a
diff --git a/test/CodeGen/NVPTX/ldparam-v4.ll b/test/CodeGen/NVPTX/ldparam-v4.ll
new file mode 100644
index 000000000000..ec306aafe854
--- /dev/null
+++ b/test/CodeGen/NVPTX/ldparam-v4.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+declare <4 x float> @bar()
+
+define void @foo(<4 x float>* %ptr) {
+; CHECK: ld.param.v4.f32
+ %val = tail call <4 x float> @bar()
+ store <4 x float> %val, <4 x float>* %ptr
+ ret void
+}
diff --git a/test/CodeGen/NVPTX/ldu-i8.ll b/test/CodeGen/NVPTX/ldu-i8.ll
index 81a82b2c38b5..9cc667557906 100644
--- a/test/CodeGen/NVPTX/ldu-i8.ll
+++ b/test/CodeGen/NVPTX/ldu-i8.ll
@@ -2,13 +2,15 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
-declare i8 @llvm.nvvm.ldu.global.i.i8(i8*)
+declare i8 @llvm.nvvm.ldu.global.i.i8.p0i8(i8*)
define i8 @foo(i8* %a) {
; Ensure we properly truncate off the high-order 24 bits
; CHECK: ldu.global.u8
; CHECK: cvt.u32.u16
; CHECK: and.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, 255
- %val = tail call i8 @llvm.nvvm.ldu.global.i.i8(i8* %a)
+ %val = tail call i8 @llvm.nvvm.ldu.global.i.i8.p0i8(i8* %a), !align !0
ret i8 %val
}
+
+!0 = metadata !{i32 4}
diff --git a/test/CodeGen/NVPTX/ldu-ldg.ll b/test/CodeGen/NVPTX/ldu-ldg.ll
new file mode 100644
index 000000000000..3b0619ff5175
--- /dev/null
+++ b/test/CodeGen/NVPTX/ldu-ldg.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+declare i8 @llvm.nvvm.ldu.global.i.i8.p1i8(i8 addrspace(1)* %ptr)
+declare i32 @llvm.nvvm.ldu.global.i.i32.p1i32(i32 addrspace(1)* %ptr)
+declare i8 @llvm.nvvm.ldg.global.i.i8.p1i8(i8 addrspace(1)* %ptr)
+declare i32 @llvm.nvvm.ldg.global.i.i32.p1i32(i32 addrspace(1)* %ptr)
+
+
+; CHECK: func0
+define i8 @func0(i8 addrspace(1)* %ptr) {
+; ldu.global.u8
+ %val = tail call i8 @llvm.nvvm.ldu.global.i.i8.p1i8(i8 addrspace(1)* %ptr), !align !0
+ ret i8 %val
+}
+
+; CHECK: func1
+define i32 @func1(i32 addrspace(1)* %ptr) {
+; ldu.global.u32
+ %val = tail call i32 @llvm.nvvm.ldu.global.i.i32.p1i32(i32 addrspace(1)* %ptr), !align !0
+ ret i32 %val
+}
+
+; CHECK: func2
+define i8 @func2(i8 addrspace(1)* %ptr) {
+; ld.global.nc.u8
+ %val = tail call i8 @llvm.nvvm.ldg.global.i.i8.p1i8(i8 addrspace(1)* %ptr), !align !0
+ ret i8 %val
+}
+
+; CHECK: func3
+define i32 @func3(i32 addrspace(1)* %ptr) {
+; ld.global.nc.u32
+ %val = tail call i32 @llvm.nvvm.ldg.global.i.i32.p1i32(i32 addrspace(1)* %ptr), !align !0
+ ret i32 %val
+}
+
+
+
+!0 = metadata !{i32 4}
diff --git a/test/CodeGen/NVPTX/ldu-reg-plus-offset.ll b/test/CodeGen/NVPTX/ldu-reg-plus-offset.ll
index 26cadc401b79..55707ea85106 100644
--- a/test/CodeGen/NVPTX/ldu-reg-plus-offset.ll
+++ b/test/CodeGen/NVPTX/ldu-reg-plus-offset.ll
@@ -7,9 +7,9 @@ define void @reg_plus_offset(i32* %a) {
; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+32];
; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+36];
%p2 = getelementptr i32* %a, i32 8
- %t1 = call i32 @llvm.nvvm.ldu.global.i.i32(i32* %p2), !align !1
+ %t1 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p2), !align !1
%p3 = getelementptr i32* %a, i32 9
- %t2 = call i32 @llvm.nvvm.ldu.global.i.i32(i32* %p3), !align !1
+ %t2 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p3), !align !1
%t3 = mul i32 %t1, %t2
store i32 %t3, i32* %a
ret void
@@ -17,5 +17,5 @@ define void @reg_plus_offset(i32* %a) {
!1 = metadata !{ i32 4 }
-declare i32 @llvm.nvvm.ldu.global.i.i32(i32*)
+declare i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32*)
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
diff --git a/test/CodeGen/NVPTX/lit.local.cfg b/test/CodeGen/NVPTX/lit.local.cfg
index 85cf8c2c8c07..2cb98eb371b2 100644
--- a/test/CodeGen/NVPTX/lit.local.cfg
+++ b/test/CodeGen/NVPTX/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'NVPTX' in targets:
+if not 'NVPTX' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/NVPTX/local-stack-frame.ll b/test/CodeGen/NVPTX/local-stack-frame.ll
index 178dff1a5d3f..377eee9170e6 100644
--- a/test/CodeGen/NVPTX/local-stack-frame.ll
+++ b/test/CodeGen/NVPTX/local-stack-frame.ll
@@ -3,16 +3,16 @@
; Ensure we access the local stack properly
-; PTX32: mov.u32 %r{{[0-9]+}}, __local_depot{{[0-9]+}};
-; PTX32: cvta.local.u32 %SP, %r{{[0-9]+}};
-; PTX32: ld.param.u32 %r{{[0-9]+}}, [foo_param_0];
-; PTX32: st.u32 [%SP+0], %r{{[0-9]+}};
-; PTX64: mov.u64 %rl{{[0-9]+}}, __local_depot{{[0-9]+}};
-; PTX64: cvta.local.u64 %SP, %rl{{[0-9]+}};
-; PTX64: ld.param.u32 %r{{[0-9]+}}, [foo_param_0];
-; PTX64: st.u32 [%SP+0], %r{{[0-9]+}};
+; PTX32: mov.u32 %r{{[0-9]+}}, __local_depot{{[0-9]+}};
+; PTX32: cvta.local.u32 %SP, %r{{[0-9]+}};
+; PTX32: ld.param.u32 %r{{[0-9]+}}, [foo_param_0];
+; PTX32: st.volatile.u32 [%SP+0], %r{{[0-9]+}};
+; PTX64: mov.u64 %rd{{[0-9]+}}, __local_depot{{[0-9]+}};
+; PTX64: cvta.local.u64 %SP, %rd{{[0-9]+}};
+; PTX64: ld.param.u32 %r{{[0-9]+}}, [foo_param_0];
+; PTX64: st.volatile.u32 [%SP+0], %r{{[0-9]+}};
define void @foo(i32 %a) {
%local = alloca i32, align 4
- store i32 %a, i32* %local
+ store volatile i32 %a, i32* %local
ret void
}
diff --git a/test/CodeGen/NVPTX/managed.ll b/test/CodeGen/NVPTX/managed.ll
new file mode 100644
index 000000000000..4d7e7817f77b
--- /dev/null
+++ b/test/CodeGen/NVPTX/managed.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK: .visible .global .align 4 .u32 device_g;
+@device_g = addrspace(1) global i32 zeroinitializer
+; CHECK: .visible .global .attribute(.managed) .align 4 .u32 managed_g;
+@managed_g = addrspace(1) global i32 zeroinitializer
+
+
+!nvvm.annotations = !{!0}
+!0 = metadata !{i32 addrspace(1)* @managed_g, metadata !"managed", i32 1}
diff --git a/test/CodeGen/NVPTX/misaligned-vector-ldst.ll b/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
new file mode 100644
index 000000000000..90c9c4306de7
--- /dev/null
+++ b/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
@@ -0,0 +1,77 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+; CHECK-LABEL: t1
+define <4 x float> @t1(i8* %p1) {
+; CHECK-NOT: ld.v4
+; CHECK-NOT: ld.v2
+; CHECK-NOT: ld.f32
+; CHECK: ld.u8
+ %cast = bitcast i8* %p1 to <4 x float>*
+ %r = load <4 x float>* %cast, align 1
+ ret <4 x float> %r
+}
+
+; CHECK-LABEL: t2
+define <4 x float> @t2(i8* %p1) {
+; CHECK-NOT: ld.v4
+; CHECK-NOT: ld.v2
+; CHECK: ld.f32
+ %cast = bitcast i8* %p1 to <4 x float>*
+ %r = load <4 x float>* %cast, align 4
+ ret <4 x float> %r
+}
+
+; CHECK-LABEL: t3
+define <4 x float> @t3(i8* %p1) {
+; CHECK-NOT: ld.v4
+; CHECK: ld.v2
+ %cast = bitcast i8* %p1 to <4 x float>*
+ %r = load <4 x float>* %cast, align 8
+ ret <4 x float> %r
+}
+
+; CHECK-LABEL: t4
+define <4 x float> @t4(i8* %p1) {
+; CHECK: ld.v4
+ %cast = bitcast i8* %p1 to <4 x float>*
+ %r = load <4 x float>* %cast, align 16
+ ret <4 x float> %r
+}
+
+
+; CHECK-LABEL: s1
+define void @s1(<4 x float>* %p1, <4 x float> %v) {
+; CHECK-NOT: st.v4
+; CHECK-NOT: st.v2
+; CHECK-NOT: st.f32
+; CHECK: st.u8
+ store <4 x float> %v, <4 x float>* %p1, align 1
+ ret void
+}
+
+; CHECK-LABEL: s2
+define void @s2(<4 x float>* %p1, <4 x float> %v) {
+; CHECK-NOT: st.v4
+; CHECK-NOT: st.v2
+; CHECK: st.f32
+ store <4 x float> %v, <4 x float>* %p1, align 4
+ ret void
+}
+
+; CHECK-LABEL: s3
+define void @s3(<4 x float>* %p1, <4 x float> %v) {
+; CHECK-NOT: st.v4
+ store <4 x float> %v, <4 x float>* %p1, align 8
+ ret void
+}
+
+; CHECK-LABEL: s4
+define void @s4(<4 x float>* %p1, <4 x float> %v) {
+; CHECK: st.v4
+ store <4 x float> %v, <4 x float>* %p1, align 16
+ ret void
+}
+
diff --git a/test/CodeGen/NVPTX/mulwide.ll b/test/CodeGen/NVPTX/mulwide.ll
new file mode 100644
index 000000000000..43bb63098f67
--- /dev/null
+++ b/test/CodeGen/NVPTX/mulwide.ll
@@ -0,0 +1,46 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -O3 | FileCheck %s --check-prefix=OPT
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -O0 | FileCheck %s --check-prefix=NOOPT
+
+; OPT-LABEL: @mulwide16
+; NOOPT-LABEL: @mulwide16
+define i32 @mulwide16(i16 %a, i16 %b) {
+; OPT: mul.wide.s16
+; NOOPT: mul.lo.s32
+ %val0 = sext i16 %a to i32
+ %val1 = sext i16 %b to i32
+ %val2 = mul i32 %val0, %val1
+ ret i32 %val2
+}
+
+; OPT-LABEL: @mulwideu16
+; NOOPT-LABEL: @mulwideu16
+define i32 @mulwideu16(i16 %a, i16 %b) {
+; OPT: mul.wide.u16
+; NOOPT: mul.lo.s32
+ %val0 = zext i16 %a to i32
+ %val1 = zext i16 %b to i32
+ %val2 = mul i32 %val0, %val1
+ ret i32 %val2
+}
+
+; OPT-LABEL: @mulwide32
+; NOOPT-LABEL: @mulwide32
+define i64 @mulwide32(i32 %a, i32 %b) {
+; OPT: mul.wide.s32
+; NOOPT: mul.lo.s64
+ %val0 = sext i32 %a to i64
+ %val1 = sext i32 %b to i64
+ %val2 = mul i64 %val0, %val1
+ ret i64 %val2
+}
+
+; OPT-LABEL: @mulwideu32
+; NOOPT-LABEL: @mulwideu32
+define i64 @mulwideu32(i32 %a, i32 %b) {
+; OPT: mul.wide.u32
+; NOOPT: mul.lo.s64
+ %val0 = zext i32 %a to i64
+ %val1 = zext i32 %b to i64
+ %val2 = mul i64 %val0, %val1
+ ret i64 %val2
+}
diff --git a/test/CodeGen/NVPTX/noduplicate-syncthreads.ll b/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
new file mode 100644
index 000000000000..64745fcba3ba
--- /dev/null
+++ b/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
@@ -0,0 +1,74 @@
+; RUN: opt < %s -O3 -S | FileCheck %s
+
+; Make sure the call to syncthreads is not duplicate here by the LLVM
+; optimizations, because it has the noduplicate attribute set.
+
+; CHECK: call void @llvm.cuda.syncthreads
+; CHECK-NOT: call void @llvm.cuda.syncthreads
+
+; Function Attrs: nounwind
+define void @foo(float* %output) #1 {
+entry:
+ %output.addr = alloca float*, align 8
+ store float* %output, float** %output.addr, align 8
+ %0 = load float** %output.addr, align 8
+ %arrayidx = getelementptr inbounds float* %0, i64 0
+ %1 = load float* %arrayidx, align 4
+ %conv = fpext float %1 to double
+ %cmp = fcmp olt double %conv, 1.000000e+01
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ %2 = load float** %output.addr, align 8
+ %3 = load float* %2, align 4
+ %conv1 = fpext float %3 to double
+ %add = fadd double %conv1, 1.000000e+00
+ %conv2 = fptrunc double %add to float
+ store float %conv2, float* %2, align 4
+ br label %if.end
+
+if.else: ; preds = %entry
+ %4 = load float** %output.addr, align 8
+ %5 = load float* %4, align 4
+ %conv3 = fpext float %5 to double
+ %add4 = fadd double %conv3, 2.000000e+00
+ %conv5 = fptrunc double %add4 to float
+ store float %conv5, float* %4, align 4
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ call void @llvm.cuda.syncthreads()
+ %6 = load float** %output.addr, align 8
+ %arrayidx6 = getelementptr inbounds float* %6, i64 0
+ %7 = load float* %arrayidx6, align 4
+ %conv7 = fpext float %7 to double
+ %cmp8 = fcmp olt double %conv7, 1.000000e+01
+ br i1 %cmp8, label %if.then9, label %if.else13
+
+if.then9: ; preds = %if.end
+ %8 = load float** %output.addr, align 8
+ %9 = load float* %8, align 4
+ %conv10 = fpext float %9 to double
+ %add11 = fadd double %conv10, 3.000000e+00
+ %conv12 = fptrunc double %add11 to float
+ store float %conv12, float* %8, align 4
+ br label %if.end17
+
+if.else13: ; preds = %if.end
+ %10 = load float** %output.addr, align 8
+ %11 = load float* %10, align 4
+ %conv14 = fpext float %11 to double
+ %add15 = fadd double %conv14, 4.000000e+00
+ %conv16 = fptrunc double %add15 to float
+ store float %conv16, float* %10, align 4
+ br label %if.end17
+
+if.end17: ; preds = %if.else13, %if.then9
+ ret void
+}
+
+; Function Attrs: noduplicate nounwind
+declare void @llvm.cuda.syncthreads() #2
+
+!0 = metadata !{void (float*)* @foo, metadata !"kernel", i32 1}
+!1 = metadata !{null, metadata !"align", i32 8}
diff --git a/test/CodeGen/NVPTX/nvvm-reflect.ll b/test/CodeGen/NVPTX/nvvm-reflect.ll
index 0d02194651e3..21e9c69e657a 100644
--- a/test/CodeGen/NVPTX/nvvm-reflect.ll
+++ b/test/CodeGen/NVPTX/nvvm-reflect.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -S -nvvm-reflect -nvvm-reflect-list USE_MUL=0 -O2 | FileCheck %s --check-prefix=USE_MUL_0
; RUN: opt < %s -S -nvvm-reflect -nvvm-reflect-list USE_MUL=1 -O2 | FileCheck %s --check-prefix=USE_MUL_1
-@str = private addrspace(4) unnamed_addr constant [8 x i8] c"USE_MUL\00"
+@str = private unnamed_addr addrspace(4) constant [8 x i8] c"USE_MUL\00"
declare i32 @__nvvm_reflect(i8*)
declare i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)*)
@@ -32,3 +32,17 @@ exit:
%ret = phi float [%ret1, %use_mul], [%ret2, %use_add]
ret float %ret
}
+
+declare i32 @llvm.nvvm.reflect.p0i8(i8*)
+
+; USE_MUL_0: define i32 @intrinsic
+; USE_MUL_1: define i32 @intrinsic
+define i32 @intrinsic() {
+; USE_MUL_0-NOT: call i32 @llvm.nvvm.reflect
+; USE_MUL_0: ret i32 0
+; USE_MUL_1-NOT: call i32 @llvm.nvvm.reflect
+; USE_MUL_1: ret i32 1
+ %ptr = tail call i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)* getelementptr inbounds ([8 x i8] addrspace(4)* @str, i32 0, i32 0))
+ %reflect = tail call i32 @llvm.nvvm.reflect.p0i8(i8* %ptr)
+ ret i32 %reflect
+}
diff --git a/test/CodeGen/NVPTX/pr13291-i1-store.ll b/test/CodeGen/NVPTX/pr13291-i1-store.ll
index e7a81be01b14..cc67a6fff8e4 100644
--- a/test/CodeGen/NVPTX/pr13291-i1-store.ll
+++ b/test/CodeGen/NVPTX/pr13291-i1-store.ll
@@ -5,7 +5,7 @@ define ptx_kernel void @t1(i1* %a) {
; PTX32: mov.u16 %rs{{[0-9]+}}, 0;
; PTX32-NEXT: st.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}};
; PTX64: mov.u16 %rs{{[0-9]+}}, 0;
-; PTX64-NEXT: st.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}};
+; PTX64-NEXT: st.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}};
store i1 false, i1* %a
ret void
}
@@ -15,7 +15,7 @@ define ptx_kernel void @t2(i1* %a, i8* %b) {
; PTX32: ld.u8 %rs{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: and.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, 1;
; PTX32: setp.eq.b16 %p{{[0-9]+}}, %rs{{[0-9]+}}, 1;
-; PTX64: ld.u8 %rs{{[0-9]+}}, [%rl{{[0-9]+}}]
+; PTX64: ld.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: and.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, 1;
; PTX64: setp.eq.b16 %p{{[0-9]+}}, %rs{{[0-9]+}}, 1;
diff --git a/test/CodeGen/NVPTX/rotate.ll b/test/CodeGen/NVPTX/rotate.ll
new file mode 100644
index 000000000000..dfc8b4fd5fcb
--- /dev/null
+++ b/test/CodeGen/NVPTX/rotate.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck --check-prefix=SM20 %s
+; RUN: llc < %s -march=nvptx -mcpu=sm_35 | FileCheck --check-prefix=SM35 %s
+
+
+declare i32 @llvm.nvvm.rotate.b32(i32, i32)
+declare i64 @llvm.nvvm.rotate.b64(i64, i32)
+declare i64 @llvm.nvvm.rotate.right.b64(i64, i32)
+
+; SM20: rotate32
+; SM35: rotate32
+define i32 @rotate32(i32 %a, i32 %b) {
+; SM20: shl.b32
+; SM20: sub.s32
+; SM20: shr.b32
+; SM20: add.u32
+; SM35: shf.l.wrap.b32
+ %val = tail call i32 @llvm.nvvm.rotate.b32(i32 %a, i32 %b)
+ ret i32 %val
+}
+
+; SM20: rotate64
+; SM35: rotate64
+define i64 @rotate64(i64 %a, i32 %b) {
+; SM20: shl.b64
+; SM20: sub.u32
+; SM20: shr.b64
+; SM20: add.u64
+; SM35: shf.l.wrap.b32
+; SM35: shf.l.wrap.b32
+ %val = tail call i64 @llvm.nvvm.rotate.b64(i64 %a, i32 %b)
+ ret i64 %val
+}
+
+; SM20: rotateright64
+; SM35: rotateright64
+define i64 @rotateright64(i64 %a, i32 %b) {
+; SM20: shr.b64
+; SM20: sub.u32
+; SM20: shl.b64
+; SM20: add.u64
+; SM35: shf.r.wrap.b32
+; SM35: shf.r.wrap.b32
+ %val = tail call i64 @llvm.nvvm.rotate.right.b64(i64 %a, i32 %b)
+ ret i64 %val
+}
+
+; SM20: rotl0
+; SM35: rotl0
+define i32 @rotl0(i32 %x) {
+; SM20: shl.b32
+; SM20: shr.b32
+; SM20: add.u32
+; SM35: shf.l.wrap.b32
+ %t0 = shl i32 %x, 8
+ %t1 = lshr i32 %x, 24
+ %t2 = or i32 %t0, %t1
+ ret i32 %t2
+}
diff --git a/test/CodeGen/NVPTX/shift-parts.ll b/test/CodeGen/NVPTX/shift-parts.ll
new file mode 100644
index 000000000000..748297caf339
--- /dev/null
+++ b/test/CodeGen/NVPTX/shift-parts.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: shift_parts_left_128
+define void @shift_parts_left_128(i128* %val, i128* %amtptr) {
+; CHECK: shl.b64
+; CHECK: mov.u32
+; CHECK: sub.s32
+; CHECK: shr.u64
+; CHECK: or.b64
+; CHECK: add.s32
+; CHECK: shl.b64
+; CHECK: setp.gt.s32
+; CHECK: selp.b64
+; CHECK: shl.b64
+ %amt = load i128* %amtptr
+ %a = load i128* %val
+ %val0 = shl i128 %a, %amt
+ store i128 %val0, i128* %val
+ ret void
+}
+
+; CHECK: shift_parts_right_128
+define void @shift_parts_right_128(i128* %val, i128* %amtptr) {
+; CHECK: shr.u64
+; CHECK: sub.s32
+; CHECK: shl.b64
+; CHECK: or.b64
+; CHECK: add.s32
+; CHECK: shr.s64
+; CHECK: setp.gt.s32
+; CHECK: selp.b64
+; CHECK: shr.s64
+ %amt = load i128* %amtptr
+ %a = load i128* %val
+ %val0 = ashr i128 %a, %amt
+ store i128 %val0, i128* %val
+ ret void
+}
diff --git a/test/CodeGen/NVPTX/st-addrspace.ll b/test/CodeGen/NVPTX/st-addrspace.ll
index 68c09fe065bc..34a83f343324 100644
--- a/test/CodeGen/NVPTX/st-addrspace.ll
+++ b/test/CodeGen/NVPTX/st-addrspace.ll
@@ -7,7 +7,7 @@
define void @st_global_i8(i8 addrspace(1)* %ptr, i8 %a) {
; PTX32: st.global.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}}
; PTX32: ret
-; PTX64: st.global.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}}
+; PTX64: st.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; PTX64: ret
store i8 %a, i8 addrspace(1)* %ptr
ret void
@@ -16,7 +16,7 @@ define void @st_global_i8(i8 addrspace(1)* %ptr, i8 %a) {
define void @st_shared_i8(i8 addrspace(3)* %ptr, i8 %a) {
; PTX32: st.shared.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}}
; PTX32: ret
-; PTX64: st.shared.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}}
+; PTX64: st.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; PTX64: ret
store i8 %a, i8 addrspace(3)* %ptr
ret void
@@ -25,7 +25,7 @@ define void @st_shared_i8(i8 addrspace(3)* %ptr, i8 %a) {
define void @st_local_i8(i8 addrspace(5)* %ptr, i8 %a) {
; PTX32: st.local.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}}
; PTX32: ret
-; PTX64: st.local.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}}
+; PTX64: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; PTX64: ret
store i8 %a, i8 addrspace(5)* %ptr
ret void
@@ -36,7 +36,7 @@ define void @st_local_i8(i8 addrspace(5)* %ptr, i8 %a) {
define void @st_global_i16(i16 addrspace(1)* %ptr, i16 %a) {
; PTX32: st.global.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}}
; PTX32: ret
-; PTX64: st.global.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}}
+; PTX64: st.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; PTX64: ret
store i16 %a, i16 addrspace(1)* %ptr
ret void
@@ -45,7 +45,7 @@ define void @st_global_i16(i16 addrspace(1)* %ptr, i16 %a) {
define void @st_shared_i16(i16 addrspace(3)* %ptr, i16 %a) {
; PTX32: st.shared.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}}
; PTX32: ret
-; PTX64: st.shared.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}}
+; PTX64: st.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; PTX64: ret
store i16 %a, i16 addrspace(3)* %ptr
ret void
@@ -54,7 +54,7 @@ define void @st_shared_i16(i16 addrspace(3)* %ptr, i16 %a) {
define void @st_local_i16(i16 addrspace(5)* %ptr, i16 %a) {
; PTX32: st.local.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}}
; PTX32: ret
-; PTX64: st.local.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}}
+; PTX64: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; PTX64: ret
store i16 %a, i16 addrspace(5)* %ptr
ret void
@@ -65,7 +65,7 @@ define void @st_local_i16(i16 addrspace(5)* %ptr, i16 %a) {
define void @st_global_i32(i32 addrspace(1)* %ptr, i32 %a) {
; PTX32: st.global.u32 [%r{{[0-9]+}}], %r{{[0-9]+}}
; PTX32: ret
-; PTX64: st.global.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}}
+; PTX64: st.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; PTX64: ret
store i32 %a, i32 addrspace(1)* %ptr
ret void
@@ -74,7 +74,7 @@ define void @st_global_i32(i32 addrspace(1)* %ptr, i32 %a) {
define void @st_shared_i32(i32 addrspace(3)* %ptr, i32 %a) {
; PTX32: st.shared.u32 [%r{{[0-9]+}}], %r{{[0-9]+}}
; PTX32: ret
-; PTX64: st.shared.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}}
+; PTX64: st.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; PTX64: ret
store i32 %a, i32 addrspace(3)* %ptr
ret void
@@ -83,7 +83,7 @@ define void @st_shared_i32(i32 addrspace(3)* %ptr, i32 %a) {
define void @st_local_i32(i32 addrspace(5)* %ptr, i32 %a) {
; PTX32: st.local.u32 [%r{{[0-9]+}}], %r{{[0-9]+}}
; PTX32: ret
-; PTX64: st.local.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}}
+; PTX64: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; PTX64: ret
store i32 %a, i32 addrspace(5)* %ptr
ret void
@@ -92,27 +92,27 @@ define void @st_local_i32(i32 addrspace(5)* %ptr, i32 %a) {
;; i64
define void @st_global_i64(i64 addrspace(1)* %ptr, i64 %a) {
-; PTX32: st.global.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}}
+; PTX32: st.global.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}}
; PTX32: ret
-; PTX64: st.global.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}}
+; PTX64: st.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; PTX64: ret
store i64 %a, i64 addrspace(1)* %ptr
ret void
}
define void @st_shared_i64(i64 addrspace(3)* %ptr, i64 %a) {
-; PTX32: st.shared.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}}
+; PTX32: st.shared.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}}
; PTX32: ret
-; PTX64: st.shared.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}}
+; PTX64: st.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; PTX64: ret
store i64 %a, i64 addrspace(3)* %ptr
ret void
}
define void @st_local_i64(i64 addrspace(5)* %ptr, i64 %a) {
-; PTX32: st.local.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}}
+; PTX32: st.local.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}}
; PTX32: ret
-; PTX64: st.local.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}}
+; PTX64: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; PTX64: ret
store i64 %a, i64 addrspace(5)* %ptr
ret void
@@ -123,7 +123,7 @@ define void @st_local_i64(i64 addrspace(5)* %ptr, i64 %a) {
define void @st_global_f32(float addrspace(1)* %ptr, float %a) {
; PTX32: st.global.f32 [%r{{[0-9]+}}], %f{{[0-9]+}}
; PTX32: ret
-; PTX64: st.global.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}}
+; PTX64: st.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; PTX64: ret
store float %a, float addrspace(1)* %ptr
ret void
@@ -132,7 +132,7 @@ define void @st_global_f32(float addrspace(1)* %ptr, float %a) {
define void @st_shared_f32(float addrspace(3)* %ptr, float %a) {
; PTX32: st.shared.f32 [%r{{[0-9]+}}], %f{{[0-9]+}}
; PTX32: ret
-; PTX64: st.shared.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}}
+; PTX64: st.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; PTX64: ret
store float %a, float addrspace(3)* %ptr
ret void
@@ -141,7 +141,7 @@ define void @st_shared_f32(float addrspace(3)* %ptr, float %a) {
define void @st_local_f32(float addrspace(5)* %ptr, float %a) {
; PTX32: st.local.f32 [%r{{[0-9]+}}], %f{{[0-9]+}}
; PTX32: ret
-; PTX64: st.local.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}}
+; PTX64: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; PTX64: ret
store float %a, float addrspace(5)* %ptr
ret void
@@ -150,27 +150,27 @@ define void @st_local_f32(float addrspace(5)* %ptr, float %a) {
;; f64
define void @st_global_f64(double addrspace(1)* %ptr, double %a) {
-; PTX32: st.global.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}}
+; PTX32: st.global.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}}
; PTX32: ret
-; PTX64: st.global.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}}
+; PTX64: st.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; PTX64: ret
store double %a, double addrspace(1)* %ptr
ret void
}
define void @st_shared_f64(double addrspace(3)* %ptr, double %a) {
-; PTX32: st.shared.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}}
+; PTX32: st.shared.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}}
; PTX32: ret
-; PTX64: st.shared.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}}
+; PTX64: st.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; PTX64: ret
store double %a, double addrspace(3)* %ptr
ret void
}
define void @st_local_f64(double addrspace(5)* %ptr, double %a) {
-; PTX32: st.local.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}}
+; PTX32: st.local.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}}
; PTX32: ret
-; PTX64: st.local.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}}
+; PTX64: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; PTX64: ret
store double %a, double addrspace(5)* %ptr
ret void
diff --git a/test/CodeGen/NVPTX/st-generic.ll b/test/CodeGen/NVPTX/st-generic.ll
index b9c616fbd19e..022f7ab214ca 100644
--- a/test/CodeGen/NVPTX/st-generic.ll
+++ b/test/CodeGen/NVPTX/st-generic.ll
@@ -7,7 +7,7 @@
define void @st_global_i8(i8 addrspace(0)* %ptr, i8 %a) {
; PTX32: st.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}}
; PTX32: ret
-; PTX64: st.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}}
+; PTX64: st.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; PTX64: ret
store i8 %a, i8 addrspace(0)* %ptr
ret void
@@ -18,7 +18,7 @@ define void @st_global_i8(i8 addrspace(0)* %ptr, i8 %a) {
define void @st_global_i16(i16 addrspace(0)* %ptr, i16 %a) {
; PTX32: st.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}}
; PTX32: ret
-; PTX64: st.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}}
+; PTX64: st.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; PTX64: ret
store i16 %a, i16 addrspace(0)* %ptr
ret void
@@ -29,7 +29,7 @@ define void @st_global_i16(i16 addrspace(0)* %ptr, i16 %a) {
define void @st_global_i32(i32 addrspace(0)* %ptr, i32 %a) {
; PTX32: st.u32 [%r{{[0-9]+}}], %r{{[0-9]+}}
; PTX32: ret
-; PTX64: st.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}}
+; PTX64: st.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; PTX64: ret
store i32 %a, i32 addrspace(0)* %ptr
ret void
@@ -38,9 +38,9 @@ define void @st_global_i32(i32 addrspace(0)* %ptr, i32 %a) {
;; i64
define void @st_global_i64(i64 addrspace(0)* %ptr, i64 %a) {
-; PTX32: st.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}}
+; PTX32: st.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}}
; PTX32: ret
-; PTX64: st.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}}
+; PTX64: st.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; PTX64: ret
store i64 %a, i64 addrspace(0)* %ptr
ret void
@@ -51,7 +51,7 @@ define void @st_global_i64(i64 addrspace(0)* %ptr, i64 %a) {
define void @st_global_f32(float addrspace(0)* %ptr, float %a) {
; PTX32: st.f32 [%r{{[0-9]+}}], %f{{[0-9]+}}
; PTX32: ret
-; PTX64: st.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}}
+; PTX64: st.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; PTX64: ret
store float %a, float addrspace(0)* %ptr
ret void
@@ -60,9 +60,9 @@ define void @st_global_f32(float addrspace(0)* %ptr, float %a) {
;; f64
define void @st_global_f64(double addrspace(0)* %ptr, double %a) {
-; PTX32: st.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}}
+; PTX32: st.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}}
; PTX32: ret
-; PTX64: st.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}}
+; PTX64: st.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; PTX64: ret
store double %a, double addrspace(0)* %ptr
ret void
diff --git a/test/CodeGen/NVPTX/surf-read-cuda.ll b/test/CodeGen/NVPTX/surf-read-cuda.ll
new file mode 100644
index 000000000000..10a1ecc4c473
--- /dev/null
+++ b/test/CodeGen/NVPTX/surf-read-cuda.ll
@@ -0,0 +1,53 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=SM20
+; RUN: llc < %s -march=nvptx -mcpu=sm_30 | FileCheck %s --check-prefix=SM30
+
+target triple = "nvptx-unknown-cuda"
+
+declare i32 @llvm.nvvm.suld.1d.i32.trap(i64, i32)
+declare i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)*)
+
+
+; SM20-LABEL: .entry foo
+; SM30-LABEL: .entry foo
+define void @foo(i64 %img, float* %red, i32 %idx) {
+; SM20: ld.param.u64 %rd[[SURFREG:[0-9]+]], [foo_param_0];
+; SM20: suld.b.1d.b32.trap {%r[[RED:[0-9]+]]}, [%rd[[SURFREG]], {%r{{[0-9]+}}}]
+; SM30: ld.param.u64 %rd[[SURFREG:[0-9]+]], [foo_param_0];
+; SM30: suld.b.1d.b32.trap {%r[[RED:[0-9]+]]}, [%rd[[SURFREG]], {%r{{[0-9]+}}}]
+ %val = tail call i32 @llvm.nvvm.suld.1d.i32.trap(i64 %img, i32 %idx)
+; SM20: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
+; SM30: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
+ %ret = sitofp i32 %val to float
+; SM20: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
+; SM30: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
+ store float %ret, float* %red
+ ret void
+}
+
+@surf0 = internal addrspace(1) global i64 0, align 8
+
+; SM20-LABEL: .entry bar
+; SM30-LABEL: .entry bar
+define void @bar(float* %red, i32 %idx) {
+; SM30: mov.u64 %rd[[SURFHANDLE:[0-9]+]], surf0
+ %surfHandle = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @surf0)
+; SM20: suld.b.1d.b32.trap {%r[[RED:[0-9]+]]}, [surf0, {%r{{[0-9]+}}}]
+; SM30: suld.b.1d.b32.trap {%r[[RED:[0-9]+]]}, [%rd[[SURFHANDLE]], {%r{{[0-9]+}}}]
+ %val = tail call i32 @llvm.nvvm.suld.1d.i32.trap(i64 %surfHandle, i32 %idx)
+; SM20: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
+; SM30: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
+ %ret = sitofp i32 %val to float
+; SM20: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
+; SM30: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
+ store float %ret, float* %red
+ ret void
+}
+
+
+
+
+!nvvm.annotations = !{!1, !2, !3}
+!1 = metadata !{void (i64, float*, i32)* @foo, metadata !"kernel", i32 1}
+!2 = metadata !{void (float*, i32)* @bar, metadata !"kernel", i32 1}
+!3 = metadata !{i64 addrspace(1)* @surf0, metadata !"surface", i32 1}
+
diff --git a/test/CodeGen/NVPTX/surf-read.ll b/test/CodeGen/NVPTX/surf-read.ll
new file mode 100644
index 000000000000..a69d03efe0d2
--- /dev/null
+++ b/test/CodeGen/NVPTX/surf-read.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+target triple = "nvptx-unknown-nvcl"
+
+declare i32 @llvm.nvvm.suld.1d.i32.trap(i64, i32)
+
+; CHECK: .entry foo
+define void @foo(i64 %img, float* %red, i32 %idx) {
+; CHECK: suld.b.1d.b32.trap {%r[[RED:[0-9]+]]}, [foo_param_0, {%r{{[0-9]+}}}]
+ %val = tail call i32 @llvm.nvvm.suld.1d.i32.trap(i64 %img, i32 %idx)
+; CHECK: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
+ %ret = sitofp i32 %val to float
+; CHECK: st.f32 [%r{{[0-9]+}}], %f[[REDF]]
+ store float %ret, float* %red
+ ret void
+}
+
+!nvvm.annotations = !{!1, !2}
+!1 = metadata !{void (i64, float*, i32)* @foo, metadata !"kernel", i32 1}
+!2 = metadata !{void (i64, float*, i32)* @foo, metadata !"rdwrimage", i32 0}
diff --git a/test/CodeGen/NVPTX/surf-write-cuda.ll b/test/CodeGen/NVPTX/surf-write-cuda.ll
new file mode 100644
index 000000000000..654c47f46957
--- /dev/null
+++ b/test/CodeGen/NVPTX/surf-write-cuda.ll
@@ -0,0 +1,42 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=SM20
+; RUN: llc < %s -march=nvptx -mcpu=sm_30 | FileCheck %s --check-prefix=SM30
+
+target triple = "nvptx-unknown-cuda"
+
+declare void @llvm.nvvm.sust.b.1d.i32.trap(i64, i32, i32)
+declare i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)*)
+
+
+; SM20-LABEL: .entry foo
+; SM30-LABEL: .entry foo
+define void @foo(i64 %img, i32 %val, i32 %idx) {
+; SM20: ld.param.u64 %rd[[SURFREG:[0-9]+]], [foo_param_0];
+; SM20: sust.b.1d.b32.trap [%rd[[SURFREG]], {%r{{[0-9]+}}}], {%r{{[0-9]+}}}
+; SM30: ld.param.u64 %rd[[SURFREG:[0-9]+]], [foo_param_0];
+; SM30: sust.b.1d.b32.trap [%rd[[SURFREG]], {%r{{[0-9]+}}}], {%r{{[0-9]+}}}
+ tail call void @llvm.nvvm.sust.b.1d.i32.trap(i64 %img, i32 %idx, i32 %val)
+ ret void
+}
+
+
+@surf0 = internal addrspace(1) global i64 0, align 8
+
+
+
+; SM20-LABEL: .entry bar
+; SM30-LABEL: .entry bar
+define void @bar(i32 %val, i32 %idx) {
+; SM30: mov.u64 %rd[[SURFHANDLE:[0-9]+]], surf0
+ %surfHandle = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @surf0)
+; SM20: sust.b.1d.b32.trap [surf0, {%r{{[0-9]+}}}], {%r{{[0-9]+}}}
+; SM30: sust.b.1d.b32.trap [%rd[[SURFREG]], {%r{{[0-9]+}}}], {%r{{[0-9]+}}}
+ tail call void @llvm.nvvm.sust.b.1d.i32.trap(i64 %surfHandle, i32 %idx, i32 %val)
+ ret void
+}
+
+
+!nvvm.annotations = !{!1, !2, !3}
+!1 = metadata !{void (i64, i32, i32)* @foo, metadata !"kernel", i32 1}
+!2 = metadata !{void (i32, i32)* @bar, metadata !"kernel", i32 1}
+!3 = metadata !{i64 addrspace(1)* @surf0, metadata !"surface", i32 1}
+
diff --git a/test/CodeGen/NVPTX/surf-write.ll b/test/CodeGen/NVPTX/surf-write.ll
new file mode 100644
index 000000000000..880231f96599
--- /dev/null
+++ b/test/CodeGen/NVPTX/surf-write.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+target triple = "nvptx-unknown-nvcl"
+
+declare void @llvm.nvvm.sust.b.1d.i32.trap(i64, i32, i32)
+
+; CHECK: .entry foo
+define void @foo(i64 %img, i32 %val, i32 %idx) {
+; CHECK: sust.b.1d.b32.trap [foo_param_0, {%r{{[0-9]+}}}], {%r{{[0-9]+}}}
+ tail call void @llvm.nvvm.sust.b.1d.i32.trap(i64 %img, i32 %idx, i32 %val)
+ ret void
+}
+
+!nvvm.annotations = !{!1, !2}
+!1 = metadata !{void (i64, i32, i32)* @foo, metadata !"kernel", i32 1}
+!2 = metadata !{void (i64, i32, i32)* @foo, metadata !"wroimage", i32 0}
diff --git a/test/CodeGen/NVPTX/symbol-naming.ll b/test/CodeGen/NVPTX/symbol-naming.ll
new file mode 100644
index 000000000000..bd1333f1c4e6
--- /dev/null
+++ b/test/CodeGen/NVPTX/symbol-naming.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX32
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX64
+
+; Verify that the NVPTX target removes invalid symbol names prior to emitting
+; PTX.
+
+; PTX32-NOT: .str
+; PTX64-NOT: .str
+
+; PTX32-DAG: _$_str1
+; PTX32-DAG: _$_str
+
+; PTX64-DAG: _$_str1
+; PTX64-DAG: _$_str
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+
+@.str = private unnamed_addr constant [13 x i8] c"%d %f %c %d\0A\00", align 1
+@_$_str = private unnamed_addr constant [13 x i8] c"%d %f %c %d\0A\00", align 1
+
+
+; Function Attrs: nounwind
+define void @foo(i32 %a, float %b, i8 signext %c, i32 %e) {
+entry:
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0))
+ ret void
+}
+
+declare i32 @printf(i8*, ...)
diff --git a/test/CodeGen/NVPTX/tex-read-cuda.ll b/test/CodeGen/NVPTX/tex-read-cuda.ll
new file mode 100644
index 000000000000..ee0cefa919b1
--- /dev/null
+++ b/test/CodeGen/NVPTX/tex-read-cuda.ll
@@ -0,0 +1,46 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=SM20
+; RUN: llc < %s -march=nvptx -mcpu=sm_30 | FileCheck %s --check-prefix=SM30
+
+
+target triple = "nvptx-unknown-cuda"
+
+declare { float, float, float, float } @llvm.nvvm.tex.unified.1d.v4f32.s32(i64, i32)
+declare i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)*)
+
+; SM20-LABEL: .entry foo
+; SM30-LABEL: .entry foo
+define void @foo(i64 %img, float* %red, i32 %idx) {
+; SM20: ld.param.u64 %rd[[TEXREG:[0-9]+]], [foo_param_0];
+; SM20: tex.1d.v4.f32.s32 {%f[[RED:[0-9]+]], %f[[GREEN:[0-9]+]], %f[[BLUE:[0-9]+]], %f[[ALPHA:[0-9]+]]}, [%rd[[TEXREG]], {%r{{[0-9]+}}}]
+; SM30: ld.param.u64 %rd[[TEXREG:[0-9]+]], [foo_param_0];
+; SM30: tex.1d.v4.f32.s32 {%f[[RED:[0-9]+]], %f[[GREEN:[0-9]+]], %f[[BLUE:[0-9]+]], %f[[ALPHA:[0-9]+]]}, [%rd[[TEXREG]], {%r{{[0-9]+}}}]
+ %val = tail call { float, float, float, float } @llvm.nvvm.tex.unified.1d.v4f32.s32(i64 %img, i32 %idx)
+ %ret = extractvalue { float, float, float, float } %val, 0
+; SM20: st.f32 [%r{{[0-9]+}}], %f[[RED]]
+; SM30: st.f32 [%r{{[0-9]+}}], %f[[RED]]
+ store float %ret, float* %red
+ ret void
+}
+
+
+@tex0 = internal addrspace(1) global i64 0, align 8
+
+; SM20-LABEL: .entry bar
+; SM30-LABEL: .entry bar
+define void @bar(float* %red, i32 %idx) {
+; SM30: mov.u64 %rd[[TEXHANDLE:[0-9]+]], tex0
+ %texHandle = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @tex0)
+; SM20: tex.1d.v4.f32.s32 {%f[[RED:[0-9]+]], %f[[GREEN:[0-9]+]], %f[[BLUE:[0-9]+]], %f[[ALPHA:[0-9]+]]}, [tex0, {%r{{[0-9]+}}}]
+; SM30: tex.1d.v4.f32.s32 {%f[[RED:[0-9]+]], %f[[GREEN:[0-9]+]], %f[[BLUE:[0-9]+]], %f[[ALPHA:[0-9]+]]}, [%rd[[TEXHANDLE]], {%r{{[0-9]+}}}]
+ %val = tail call { float, float, float, float } @llvm.nvvm.tex.unified.1d.v4f32.s32(i64 %texHandle, i32 %idx)
+ %ret = extractvalue { float, float, float, float } %val, 0
+; SM20: st.f32 [%r{{[0-9]+}}], %f[[RED]]
+; SM30: st.f32 [%r{{[0-9]+}}], %f[[RED]]
+ store float %ret, float* %red
+ ret void
+}
+
+!nvvm.annotations = !{!1, !2, !3}
+!1 = metadata !{void (i64, float*, i32)* @foo, metadata !"kernel", i32 1}
+!2 = metadata !{void (float*, i32)* @bar, metadata !"kernel", i32 1}
+!3 = metadata !{i64 addrspace(1)* @tex0, metadata !"texture", i32 1}
diff --git a/test/CodeGen/NVPTX/tex-read.ll b/test/CodeGen/NVPTX/tex-read.ll
new file mode 100644
index 000000000000..55e4bfc9e453
--- /dev/null
+++ b/test/CodeGen/NVPTX/tex-read.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+target triple = "nvptx-unknown-nvcl"
+
+declare { float, float, float, float } @llvm.nvvm.tex.1d.v4f32.s32(i64, i64, i32)
+
+; CHECK: .entry foo
+define void @foo(i64 %img, i64 %sampler, float* %red, i32 %idx) {
+; CHECK: tex.1d.v4.f32.s32 {%f[[RED:[0-9]+]], %f[[GREEN:[0-9]+]], %f[[BLUE:[0-9]+]], %f[[ALPHA:[0-9]+]]}, [foo_param_0, foo_param_1, {%r{{[0-9]+}}}]
+ %val = tail call { float, float, float, float } @llvm.nvvm.tex.1d.v4f32.s32(i64 %img, i64 %sampler, i32 %idx)
+ %ret = extractvalue { float, float, float, float } %val, 0
+; CHECK: st.f32 [%r{{[0-9]+}}], %f[[RED]]
+ store float %ret, float* %red
+ ret void
+}
+
+!nvvm.annotations = !{!1, !2, !3}
+!1 = metadata !{void (i64, i64, float*, i32)* @foo, metadata !"kernel", i32 1}
+!2 = metadata !{void (i64, i64, float*, i32)* @foo, metadata !"rdoimage", i32 0}
+!3 = metadata !{void (i64, i64, float*, i32)* @foo, metadata !"sampler", i32 1}
diff --git a/test/CodeGen/NVPTX/texsurf-queries.ll b/test/CodeGen/NVPTX/texsurf-queries.ll
new file mode 100644
index 000000000000..c7637ccff77a
--- /dev/null
+++ b/test/CodeGen/NVPTX/texsurf-queries.ll
@@ -0,0 +1,103 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=SM20
+; RUN: llc < %s -march=nvptx -mcpu=sm_30 | FileCheck %s --check-prefix=SM30
+
+target triple = "nvptx-unknown-cuda"
+
+@tex0 = internal addrspace(1) global i64 0, align 8
+@surf0 = internal addrspace(1) global i64 0, align 8
+
+declare i32 @llvm.nvvm.txq.width(i64)
+declare i32 @llvm.nvvm.txq.height(i64)
+declare i32 @llvm.nvvm.suq.width(i64)
+declare i32 @llvm.nvvm.suq.height(i64)
+declare i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)*)
+
+
+; SM20-LABEL: @t0
+; SM30-LABEL: @t0
+define i32 @t0(i64 %texHandle) {
+; SM20: txq.width.b32
+; SM30: txq.width.b32
+ %width = tail call i32 @llvm.nvvm.txq.width(i64 %texHandle)
+ ret i32 %width
+}
+
+; SM20-LABEL: @t1
+; SM30-LABEL: @t1
+define i32 @t1() {
+; SM30: mov.u64 %rd[[HANDLE:[0-9]+]], tex0
+ %texHandle = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @tex0)
+; SM20: txq.width.b32 %r{{[0-9]+}}, [tex0]
+; SM30: txq.width.b32 %r{{[0-9]+}}, [%rd[[HANDLE:[0-9]+]]]
+ %width = tail call i32 @llvm.nvvm.txq.width(i64 %texHandle)
+ ret i32 %width
+}
+
+
+; SM20-LABEL: @t2
+; SM30-LABEL: @t2
+define i32 @t2(i64 %texHandle) {
+; SM20: txq.height.b32
+; SM30: txq.height.b32
+ %height = tail call i32 @llvm.nvvm.txq.height(i64 %texHandle)
+ ret i32 %height
+}
+
+; SM20-LABEL: @t3
+; SM30-LABEL: @t3
+define i32 @t3() {
+; SM30: mov.u64 %rd[[HANDLE:[0-9]+]], tex0
+ %texHandle = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @tex0)
+; SM20: txq.height.b32 %r{{[0-9]+}}, [tex0]
+; SM30: txq.height.b32 %r{{[0-9]+}}, [%rd[[HANDLE:[0-9]+]]]
+ %height = tail call i32 @llvm.nvvm.txq.height(i64 %texHandle)
+ ret i32 %height
+}
+
+
+; SM20-LABEL: @s0
+; SM30-LABEL: @s0
+define i32 @s0(i64 %surfHandle) {
+; SM20: suq.width.b32
+; SM30: suq.width.b32
+ %width = tail call i32 @llvm.nvvm.suq.width(i64 %surfHandle)
+ ret i32 %width
+}
+
+; SM20-LABEL: @s1
+; SM30-LABEL: @s1
+define i32 @s1() {
+; SM30: mov.u64 %rd[[HANDLE:[0-9]+]], surf0
+ %surfHandle = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @surf0)
+; SM20: suq.width.b32 %r{{[0-9]+}}, [surf0]
+; SM30: suq.width.b32 %r{{[0-9]+}}, [%rd[[HANDLE:[0-9]+]]]
+ %width = tail call i32 @llvm.nvvm.suq.width(i64 %surfHandle)
+ ret i32 %width
+}
+
+
+; SM20-LABEL: @s2
+; SM30-LABEL: @s2
+define i32 @s2(i64 %surfHandle) {
+; SM20: suq.height.b32
+; SM30: suq.height.b32
+ %height = tail call i32 @llvm.nvvm.suq.height(i64 %surfHandle)
+ ret i32 %height
+}
+
+; SM20-LABEL: @s3
+; SM30-LABEL: @s3
+define i32 @s3() {
+; SM30: mov.u64 %rd[[HANDLE:[0-9]+]], surf0
+ %surfHandle = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @surf0)
+; SM20: suq.height.b32 %r{{[0-9]+}}, [surf0]
+; SM30: suq.height.b32 %r{{[0-9]+}}, [%rd[[HANDLE:[0-9]+]]]
+ %height = tail call i32 @llvm.nvvm.suq.height(i64 %surfHandle)
+ ret i32 %height
+}
+
+
+
+!nvvm.annotations = !{!1, !2}
+!1 = metadata !{i64 addrspace(1)* @tex0, metadata !"texture", i32 1}
+!2 = metadata !{i64 addrspace(1)* @surf0, metadata !"surface", i32 1}
diff --git a/test/CodeGen/NVPTX/vec-param-load.ll b/test/CodeGen/NVPTX/vec-param-load.ll
index a384348a6590..4193ac4085cc 100644
--- a/test/CodeGen/NVPTX/vec-param-load.ll
+++ b/test/CodeGen/NVPTX/vec-param-load.ll
@@ -5,9 +5,9 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define <16 x float> @foo(<16 x float> %a) {
; Make sure we index into vectors properly
-; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0];
-; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0+16];
-; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0+32];
; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0+48];
+; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0+32];
+; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0+16];
+; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [foo_param_0];
ret <16 x float> %a
}
diff --git a/test/CodeGen/NVPTX/vector-call.ll b/test/CodeGen/NVPTX/vector-call.ll
new file mode 100644
index 000000000000..a03d7fd41914
--- /dev/null
+++ b/test/CodeGen/NVPTX/vector-call.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+target triple = "nvptx-unknown-cuda"
+
+declare void @bar(<4 x i32>)
+
+; CHECK-LABEL @foo
+define void @foo(<4 x i32> %a) {
+; CHECK: st.param.v4.b32
+ tail call void @bar(<4 x i32> %a)
+ ret void
+}
diff --git a/test/CodeGen/NVPTX/weak-global.ll b/test/CodeGen/NVPTX/weak-global.ll
new file mode 100644
index 000000000000..2bef4c5228a9
--- /dev/null
+++ b/test/CodeGen/NVPTX/weak-global.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; CHECK: .weak .global .align 4 .u32 g
+@g = common addrspace(1) global i32 zeroinitializer
+
+define i32 @func0() {
+ %val = load i32 addrspace(1)* @g
+ ret i32 %val
+}
diff --git a/test/CodeGen/NVPTX/weak-linkage.ll b/test/CodeGen/NVPTX/weak-linkage.ll
new file mode 100644
index 000000000000..7a1335783642
--- /dev/null
+++ b/test/CodeGen/NVPTX/weak-linkage.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK: .weak .func foo
+define weak void @foo() {
+ ret void
+}
+
+; CHECK: .visible .func bar
+define void @bar() {
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/2007-04-24-InlineAsm-I-Modifier.ll b/test/CodeGen/PowerPC/2007-04-24-InlineAsm-I-Modifier.ll
index 73736c57fea6..5eb6e3757471 100644
--- a/test/CodeGen/PowerPC/2007-04-24-InlineAsm-I-Modifier.ll
+++ b/test/CodeGen/PowerPC/2007-04-24-InlineAsm-I-Modifier.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8.8.0 | grep "foo r3, r4"
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8.8.0 | grep "bari r3, 47"
+; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8.8.0 -no-integrated-as | grep "foo r3, r4"
+; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8.8.0 -no-integrated-as | grep "bari r3, 47"
; PR1351
diff --git a/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll b/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll
index 1df51406fac9..490aa0c1442c 100644
--- a/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll
+++ b/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s
+; RUN: llc -no-integrated-as < %s
; PR1382
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
index 3d3728dcde12..df83f8b191c6 100644
--- a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
+++ b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -mcpu=g5 < %s | FileCheck %s
+; RUN: llc -mcpu=g5 -addr-sink-using-gep=1 < %s | FileCheck %s
;; Formerly crashed, see PR 1508
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc64-apple-darwin8"
diff --git a/test/CodeGen/PowerPC/2008-07-10-SplatMiscompile.ll b/test/CodeGen/PowerPC/2008-07-10-SplatMiscompile.ll
index 00a402e0e487..8802b97d2a6a 100644
--- a/test/CodeGen/PowerPC/2008-07-10-SplatMiscompile.ll
+++ b/test/CodeGen/PowerPC/2008-07-10-SplatMiscompile.ll
@@ -1,6 +1,5 @@
; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vadduhm
; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vsubuhm
-; XFAIL: *
define <4 x i32> @test() nounwind {
ret <4 x i32> < i32 4293066722, i32 4293066722, i32 4293066722, i32 4293066722>
diff --git a/test/CodeGen/PowerPC/2008-12-12-EH.ll b/test/CodeGen/PowerPC/2008-12-12-EH.ll
deleted file mode 100644
index a2a5e9e39641..000000000000
--- a/test/CodeGen/PowerPC/2008-12-12-EH.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -disable-cfi -march=ppc32 -mtriple=powerpc-apple-darwin9 | grep ^__Z1fv.eh
-
-define void @_Z1fv() {
-entry:
- br label %return
-
-return:
- ret void
-}
diff --git a/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll b/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll
deleted file mode 100644
index ae2acd43e9c3..000000000000
--- a/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin | FileCheck %s
-
-; ModuleID = '/Volumes/MacOS9/tests/WebKit/JavaScriptCore/profiler/ProfilerServer.mm'
-
-@"\01l_objc_msgSend_fixup_alloc" = linker_private_weak hidden global i32 0, section "__DATA, __objc_msgrefs, coalesced", align 16
-
-; CHECK: .globl l_objc_msgSend_fixup_alloc
-; CHECK: .weak_definition l_objc_msgSend_fixup_alloc
diff --git a/test/CodeGen/PowerPC/Atomics-32.ll b/test/CodeGen/PowerPC/Atomics-32.ll
index 64f149541bef..b7f23b1dd83e 100644
--- a/test/CodeGen/PowerPC/Atomics-32.ll
+++ b/test/CodeGen/PowerPC/Atomics-32.ll
@@ -529,63 +529,73 @@ define void @test_compare_and_swap() nounwind {
entry:
%0 = load i8* @uc, align 1
%1 = load i8* @sc, align 1
- %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic
+ %pair2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
+ %2 = extractvalue { i8, i1 } %pair2, 0
store i8 %2, i8* @sc, align 1
%3 = load i8* @uc, align 1
%4 = load i8* @sc, align 1
- %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic
+ %pair5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
+ %5 = extractvalue { i8, i1 } %pair5, 0
store i8 %5, i8* @uc, align 1
%6 = load i8* @uc, align 1
%7 = zext i8 %6 to i16
%8 = load i8* @sc, align 1
%9 = sext i8 %8 to i16
%10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic
+ %pair11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
+ %11 = extractvalue { i16, i1 } %pair11, 0
store i16 %11, i16* @ss, align 2
%12 = load i8* @uc, align 1
%13 = zext i8 %12 to i16
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
%16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic
+ %pair17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
+ %17 = extractvalue { i16, i1 } %pair17, 0
store i16 %17, i16* @us, align 2
%18 = load i8* @uc, align 1
%19 = zext i8 %18 to i32
%20 = load i8* @sc, align 1
%21 = sext i8 %20 to i32
%22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic
+ %pair23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
+ %23 = extractvalue { i32, i1 } %pair23, 0
store i32 %23, i32* @si, align 4
%24 = load i8* @uc, align 1
%25 = zext i8 %24 to i32
%26 = load i8* @sc, align 1
%27 = sext i8 %26 to i32
%28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic
+ %pair29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
+ %29 = extractvalue { i32, i1 } %pair29, 0
store i32 %29, i32* @ui, align 4
%30 = load i8* @uc, align 1
%31 = zext i8 %30 to i32
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i32
%34 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
- %35 = cmpxchg i32* %34, i32 %31, i32 %33 monotonic
+ %pair35 = cmpxchg i32* %34, i32 %31, i32 %33 monotonic monotonic
+ %35 = extractvalue { i32, i1 } %pair35, 0
store i32 %35, i32* @sl, align 4
%36 = load i8* @uc, align 1
%37 = zext i8 %36 to i32
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i32
%40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
- %41 = cmpxchg i32* %40, i32 %37, i32 %39 monotonic
+ %pair41 = cmpxchg i32* %40, i32 %37, i32 %39 monotonic monotonic
+ %41 = extractvalue { i32, i1 } %pair41, 0
store i32 %41, i32* @ul, align 4
%42 = load i8* @uc, align 1
%43 = load i8* @sc, align 1
- %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic
+ %pair44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
+ %44 = extractvalue { i8, i1 } %pair44, 0
%45 = icmp eq i8 %44, %42
%46 = zext i1 %45 to i32
store i32 %46, i32* @ui, align 4
%47 = load i8* @uc, align 1
%48 = load i8* @sc, align 1
- %49 = cmpxchg i8* @uc, i8 %47, i8 %48 monotonic
+ %pair49 = cmpxchg i8* @uc, i8 %47, i8 %48 monotonic monotonic
+ %49 = extractvalue { i8, i1 } %pair49, 0
%50 = icmp eq i8 %49, %47
%51 = zext i1 %50 to i32
store i32 %51, i32* @ui, align 4
@@ -594,7 +604,8 @@ entry:
%54 = load i8* @sc, align 1
%55 = sext i8 %54 to i16
%56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %57 = cmpxchg i16* %56, i16 %53, i16 %55 monotonic
+ %pair57 = cmpxchg i16* %56, i16 %53, i16 %55 monotonic monotonic
+ %57 = extractvalue { i16, i1 } %pair57, 0
%58 = icmp eq i16 %57, %53
%59 = zext i1 %58 to i32
store i32 %59, i32* @ui, align 4
@@ -603,7 +614,8 @@ entry:
%62 = load i8* @sc, align 1
%63 = sext i8 %62 to i16
%64 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %65 = cmpxchg i16* %64, i16 %61, i16 %63 monotonic
+ %pair65 = cmpxchg i16* %64, i16 %61, i16 %63 monotonic monotonic
+ %65 = extractvalue { i16, i1 } %pair65, 0
%66 = icmp eq i16 %65, %61
%67 = zext i1 %66 to i32
store i32 %67, i32* @ui, align 4
@@ -612,7 +624,8 @@ entry:
%70 = load i8* @sc, align 1
%71 = sext i8 %70 to i32
%72 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %73 = cmpxchg i32* %72, i32 %69, i32 %71 monotonic
+ %pair73 = cmpxchg i32* %72, i32 %69, i32 %71 monotonic monotonic
+ %73 = extractvalue { i32, i1 } %pair73, 0
%74 = icmp eq i32 %73, %69
%75 = zext i1 %74 to i32
store i32 %75, i32* @ui, align 4
@@ -621,7 +634,8 @@ entry:
%78 = load i8* @sc, align 1
%79 = sext i8 %78 to i32
%80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %81 = cmpxchg i32* %80, i32 %77, i32 %79 monotonic
+ %pair81 = cmpxchg i32* %80, i32 %77, i32 %79 monotonic monotonic
+ %81 = extractvalue { i32, i1 } %pair81, 0
%82 = icmp eq i32 %81, %77
%83 = zext i1 %82 to i32
store i32 %83, i32* @ui, align 4
@@ -630,7 +644,8 @@ entry:
%86 = load i8* @sc, align 1
%87 = sext i8 %86 to i32
%88 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
- %89 = cmpxchg i32* %88, i32 %85, i32 %87 monotonic
+ %pair89 = cmpxchg i32* %88, i32 %85, i32 %87 monotonic monotonic
+ %89 = extractvalue { i32, i1 } %pair89, 0
%90 = icmp eq i32 %89, %85
%91 = zext i1 %90 to i32
store i32 %91, i32* @ui, align 4
@@ -639,7 +654,8 @@ entry:
%94 = load i8* @sc, align 1
%95 = sext i8 %94 to i32
%96 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
- %97 = cmpxchg i32* %96, i32 %93, i32 %95 monotonic
+ %pair97 = cmpxchg i32* %96, i32 %93, i32 %95 monotonic monotonic
+ %97 = extractvalue { i32, i1 } %pair97, 0
%98 = icmp eq i32 %97, %93
%99 = zext i1 %98 to i32
store i32 %99, i32* @ui, align 4
diff --git a/test/CodeGen/PowerPC/Atomics-64.ll b/test/CodeGen/PowerPC/Atomics-64.ll
index d35b84874705..122b54e080ac 100644
--- a/test/CodeGen/PowerPC/Atomics-64.ll
+++ b/test/CodeGen/PowerPC/Atomics-64.ll
@@ -536,64 +536,64 @@ define void @test_compare_and_swap() nounwind {
entry:
%0 = load i8* @uc, align 1
%1 = load i8* @sc, align 1
- %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic
+ %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
store i8 %2, i8* @sc, align 1
%3 = load i8* @uc, align 1
%4 = load i8* @sc, align 1
- %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic
+ %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
store i8 %5, i8* @uc, align 1
%6 = load i8* @uc, align 1
%7 = zext i8 %6 to i16
%8 = load i8* @sc, align 1
%9 = sext i8 %8 to i16
%10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic
+ %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
store i16 %11, i16* @ss, align 2
%12 = load i8* @uc, align 1
%13 = zext i8 %12 to i16
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
%16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic
+ %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
store i16 %17, i16* @us, align 2
%18 = load i8* @uc, align 1
%19 = zext i8 %18 to i32
%20 = load i8* @sc, align 1
%21 = sext i8 %20 to i32
%22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic
+ %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
store i32 %23, i32* @si, align 4
%24 = load i8* @uc, align 1
%25 = zext i8 %24 to i32
%26 = load i8* @sc, align 1
%27 = sext i8 %26 to i32
%28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic
+ %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
store i32 %29, i32* @ui, align 4
%30 = load i8* @uc, align 1
%31 = zext i8 %30 to i64
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i64
%34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic
+ %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic monotonic
store i64 %35, i64* @sl, align 8
%36 = load i8* @uc, align 1
%37 = zext i8 %36 to i64
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i64
%40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic
+ %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic monotonic
store i64 %41, i64* @ul, align 8
%42 = load i8* @uc, align 1
%43 = load i8* @sc, align 1
- %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic
+ %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
%45 = icmp eq i8 %44, %42
%46 = zext i1 %45 to i8
%47 = zext i8 %46 to i32
store i32 %47, i32* @ui, align 4
%48 = load i8* @uc, align 1
%49 = load i8* @sc, align 1
- %50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic
+ %50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic monotonic
%51 = icmp eq i8 %50, %48
%52 = zext i1 %51 to i8
%53 = zext i8 %52 to i32
@@ -603,7 +603,7 @@ entry:
%56 = load i8* @sc, align 1
%57 = sext i8 %56 to i16
%58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
- %59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic
+ %59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic monotonic
%60 = icmp eq i16 %59, %55
%61 = zext i1 %60 to i8
%62 = zext i8 %61 to i32
@@ -613,7 +613,7 @@ entry:
%65 = load i8* @sc, align 1
%66 = sext i8 %65 to i16
%67 = bitcast i8* bitcast (i16* @us to i8*) to i16*
- %68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic
+ %68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic monotonic
%69 = icmp eq i16 %68, %64
%70 = zext i1 %69 to i8
%71 = zext i8 %70 to i32
@@ -623,7 +623,7 @@ entry:
%74 = load i8* @sc, align 1
%75 = sext i8 %74 to i32
%76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic
+ %77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic monotonic
%78 = icmp eq i32 %77, %73
%79 = zext i1 %78 to i8
%80 = zext i8 %79 to i32
@@ -633,7 +633,7 @@ entry:
%83 = load i8* @sc, align 1
%84 = sext i8 %83 to i32
%85 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic
+ %86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic monotonic
%87 = icmp eq i32 %86, %82
%88 = zext i1 %87 to i8
%89 = zext i8 %88 to i32
@@ -643,7 +643,7 @@ entry:
%92 = load i8* @sc, align 1
%93 = sext i8 %92 to i64
%94 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic
+ %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic monotonic
%96 = icmp eq i64 %95, %91
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
@@ -653,7 +653,7 @@ entry:
%101 = load i8* @sc, align 1
%102 = sext i8 %101 to i64
%103 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic
+ %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic monotonic
%105 = icmp eq i64 %104, %100
%106 = zext i1 %105 to i8
%107 = zext i8 %106 to i32
diff --git a/test/CodeGen/PowerPC/Frames-alloca.ll b/test/CodeGen/PowerPC/Frames-alloca.ll
index 4588bc05352b..c701fef8e629 100644
--- a/test/CodeGen/PowerPC/Frames-alloca.ll
+++ b/test/CodeGen/PowerPC/Frames-alloca.ll
@@ -12,15 +12,15 @@
; CHECK-PPC32-NOFP: stw r31, -4(r1)
; CHECK-PPC32-NOFP: lwz r1, 0(r1)
; CHECK-PPC32-NOFP: lwz r31, -4(r1)
-; CHECK-PPC32-RS: stwu r1, -80(r1)
-; CHECK-PPC32-RS-NOFP: stwu r1, -80(r1)
+; CHECK-PPC32-RS: stwu r1, -48(r1)
+; CHECK-PPC32-RS-NOFP: stwu r1, -48(r1)
; CHECK-PPC64: std r31, -8(r1)
-; CHECK-PPC64: stdu r1, -128(r1)
+; CHECK-PPC64: stdu r1, -64(r1)
; CHECK-PPC64: ld r1, 0(r1)
; CHECK-PPC64: ld r31, -8(r1)
; CHECK-PPC64-NOFP: std r31, -8(r1)
-; CHECK-PPC64-NOFP: stdu r1, -128(r1)
+; CHECK-PPC64-NOFP: stdu r1, -64(r1)
; CHECK-PPC64-NOFP: ld r1, 0(r1)
; CHECK-PPC64-NOFP: ld r31, -8(r1)
diff --git a/test/CodeGen/PowerPC/Frames-large.ll b/test/CodeGen/PowerPC/Frames-large.ll
index d07fea726770..0ccea42619af 100644
--- a/test/CodeGen/PowerPC/Frames-large.ll
+++ b/test/CodeGen/PowerPC/Frames-large.ll
@@ -15,9 +15,9 @@ define i32* @f1() nounwind {
; PPC32-NOFP: _f1:
; PPC32-NOFP: lis r0, -1
-; PPC32-NOFP: ori r0, r0, 32704
+; PPC32-NOFP: ori r0, r0, 32736
; PPC32-NOFP: stwux r1, r1, r0
-; PPC32-NOFP: addi r3, r1, 68
+; PPC32-NOFP: addi r3, r1, 36
; PPC32-NOFP: lwz r1, 0(r1)
; PPC32-NOFP: blr
@@ -25,10 +25,10 @@ define i32* @f1() nounwind {
; PPC32-FP: _f1:
; PPC32-FP: lis r0, -1
; PPC32-FP: stw r31, -4(r1)
-; PPC32-FP: ori r0, r0, 32704
+; PPC32-FP: ori r0, r0, 32736
; PPC32-FP: stwux r1, r1, r0
; PPC32-FP: mr r31, r1
-; PPC32-FP: addi r3, r31, 64
+; PPC32-FP: addi r3, r31, 32
; PPC32-FP: lwz r1, 0(r1)
; PPC32-FP: lwz r31, -4(r1)
; PPC32-FP: blr
@@ -36,9 +36,9 @@ define i32* @f1() nounwind {
; PPC64-NOFP: _f1:
; PPC64-NOFP: lis r0, -1
-; PPC64-NOFP: ori r0, r0, 32656
+; PPC64-NOFP: ori r0, r0, 32720
; PPC64-NOFP: stdux r1, r1, r0
-; PPC64-NOFP: addi r3, r1, 116
+; PPC64-NOFP: addi r3, r1, 52
; PPC64-NOFP: ld r1, 0(r1)
; PPC64-NOFP: blr
@@ -46,10 +46,10 @@ define i32* @f1() nounwind {
; PPC64-FP: _f1:
; PPC64-FP: lis r0, -1
; PPC64-FP: std r31, -8(r1)
-; PPC64-FP: ori r0, r0, 32640
+; PPC64-FP: ori r0, r0, 32704
; PPC64-FP: stdux r1, r1, r0
; PPC64-FP: mr r31, r1
-; PPC64-FP: addi r3, r31, 124
+; PPC64-FP: addi r3, r31, 60
; PPC64-FP: ld r1, 0(r1)
; PPC64-FP: ld r31, -8(r1)
; PPC64-FP: blr
diff --git a/test/CodeGen/PowerPC/Frames-small.ll b/test/CodeGen/PowerPC/Frames-small.ll
index 0f6bd1021f80..28c1a5b54dd2 100644
--- a/test/CodeGen/PowerPC/Frames-small.ll
+++ b/test/CodeGen/PowerPC/Frames-small.ll
@@ -1,25 +1,25 @@
; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -o %t1
; RUN: not grep "stw r31, -4(r1)" %t1
-; RUN: grep "stwu r1, -16448(r1)" %t1
-; RUN: grep "addi r1, r1, 16448" %t1
+; RUN: grep "stwu r1, -16416(r1)" %t1
+; RUN: grep "addi r1, r1, 16416" %t1
; RUN: llc < %s -march=ppc32 | \
; RUN: not grep "lwz r31, -4(r1)"
; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -disable-fp-elim \
; RUN: -o %t2
; RUN: grep "stw r31, -4(r1)" %t2
-; RUN: grep "stwu r1, -16448(r1)" %t2
-; RUN: grep "addi r1, r1, 16448" %t2
+; RUN: grep "stwu r1, -16416(r1)" %t2
+; RUN: grep "addi r1, r1, 16416" %t2
; RUN: grep "lwz r31, -4(r1)" %t2
; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin8 -o %t3
; RUN: not grep "std r31, -8(r1)" %t3
-; RUN: grep "stdu r1, -16496(r1)" %t3
-; RUN: grep "addi r1, r1, 16496" %t3
+; RUN: grep "stdu r1, -16432(r1)" %t3
+; RUN: grep "addi r1, r1, 16432" %t3
; RUN: not grep "ld r31, -8(r1)" %t3
; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin8 -disable-fp-elim \
; RUN: -o %t4
; RUN: grep "std r31, -8(r1)" %t4
-; RUN: grep "stdu r1, -16512(r1)" %t4
-; RUN: grep "addi r1, r1, 16512" %t4
+; RUN: grep "stdu r1, -16448(r1)" %t4
+; RUN: grep "addi r1, r1, 16448" %t4
; RUN: grep "ld r31, -8(r1)" %t4
define i32* @f1() {
diff --git a/test/CodeGen/PowerPC/aa-tbaa.ll b/test/CodeGen/PowerPC/aa-tbaa.ll
new file mode 100644
index 000000000000..1939841f1f7e
--- /dev/null
+++ b/test/CodeGen/PowerPC/aa-tbaa.ll
@@ -0,0 +1,41 @@
+; RUN: llc -enable-misched -misched=shuffle -enable-aa-sched-mi -use-tbaa-in-sched-mi=0 -post-RA-scheduler=0 -mcpu=ppc64 < %s | FileCheck %s
+
+; REQUIRES: asserts
+; -misched=shuffle is NDEBUG only!
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%"class.llvm::MCOperand" = type { i8, %union.anon.110 }
+%union.anon.110 = type { i64 }
+
+define void @foo(i32 %v) {
+entry:
+ %MCOp = alloca %"class.llvm::MCOperand", align 8
+ br label %next
+
+; CHECK-LABEL: @foo
+
+next:
+ %sunkaddr18 = ptrtoint %"class.llvm::MCOperand"* %MCOp to i64
+ %sunkaddr19 = add i64 %sunkaddr18, 8
+ %sunkaddr20 = inttoptr i64 %sunkaddr19 to double*
+ store double 0.000000e+00, double* %sunkaddr20, align 8, !tbaa !1
+ %sunkaddr21 = ptrtoint %"class.llvm::MCOperand"* %MCOp to i64
+ %sunkaddr22 = add i64 %sunkaddr21, 8
+ %sunkaddr23 = inttoptr i64 %sunkaddr22 to i32*
+ store i32 %v, i32* %sunkaddr23, align 8, !tbaa !2
+ ret void
+
+; Make sure that the 64-bit store comes first, regardless of what TBAA says
+; about the two not aliasing!
+; CHECK: li [[REG:[0-9]+]], 0
+; CHECK: std [[REG]], -[[OFF:[0-9]+]](1)
+; CHECK: stw 3, -[[OFF]](1)
+; CHECK: blr
+}
+
+!0 = metadata !{ metadata !"root" }
+!1 = metadata !{ metadata !"set1", metadata !0 }
+!2 = metadata !{ metadata !"set2", metadata !0 }
+
diff --git a/test/CodeGen/PowerPC/alias.ll b/test/CodeGen/PowerPC/alias.ll
new file mode 100644
index 000000000000..86e41148a0d7
--- /dev/null
+++ b/test/CodeGen/PowerPC/alias.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -code-model=medium| FileCheck --check-prefix=CHECK --check-prefix=MEDIUM %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK --check-prefix=LARGE %s
+
+@foo = global i32 42
+@fooa = alias i32* @foo
+
+@foo2 = global i64 42
+@foo2a = alias i64* @foo2
+
+; CHECK-LABEL: bar:
+define i32 @bar() {
+; MEDIUM: addis 3, 2, fooa@toc@ha
+; LARGE: addis 3, 2, .LC1@toc@ha
+ %a = load i32* @fooa
+ ret i32 %a
+}
+
+; CHECK-LABEL: bar2:
+define i64 @bar2() {
+; MEDIUM: addis 3, 2, foo2a@toc@ha
+; MEDIUM: addi 3, 3, foo2a@toc@l
+; LARGE: addis 3, 2, .LC3@toc@ha
+ %a = load i64* @foo2a
+ ret i64 %a
+}
+
+; LARGE: .LC1:
+; LARGE-NEXT: .tc fooa[TC],fooa
+
+; LARGE: .LC3:
+; LARGE-NEXT: .tc foo2a[TC],foo2a
diff --git a/test/CodeGen/PowerPC/anon_aggr.ll b/test/CodeGen/PowerPC/anon_aggr.ll
index ce07d8845ddb..6c4f140de127 100644
--- a/test/CodeGen/PowerPC/anon_aggr.ll
+++ b/test/CodeGen/PowerPC/anon_aggr.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mcpu=pwr7 -mtriple=powerpc64-unknown-linux-gnu -fast-isel=false < %s | FileCheck %s
+; RUN: llc -O0 -mcpu=ppc64 -mtriple=powerpc64-unknown-linux-gnu -fast-isel=false < %s | FileCheck %s
; RUN: llc -O0 -mcpu=g4 -mtriple=powerpc-apple-darwin8 < %s | FileCheck -check-prefix=DARWIN32 %s
; RUN: llc -O0 -mcpu=ppc970 -mtriple=powerpc64-apple-darwin8 < %s | FileCheck -check-prefix=DARWIN64 %s
@@ -62,8 +62,7 @@ unequal:
}
; CHECK-LABEL: func2:
-; CHECK: addi [[REG1:[0-9]+]], 1, 64
-; CHECK: ld [[REG2:[0-9]+]], 8([[REG1]])
+; CHECK: ld [[REG2:[0-9]+]], 72(1)
; CHECK: cmpld {{[0-9]+}}, 4, [[REG2]]
; CHECK-DAG: std [[REG2]], -[[OFFSET1:[0-9]+]]
; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]]
@@ -82,8 +81,7 @@ unequal:
; DARWIN32: lwz r3, -[[OFFSET2]]
; DARWIN64: _func2:
-; DARWIN64: addi r[[REG1:[0-9]+]], r1, 64
-; DARWIN64: ld r[[REG2:[0-9]+]], 8(r[[REG1]])
+; DARWIN64: ld r[[REG2:[0-9]+]], 72(r1)
; DARWIN64: mr
; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN64: cmpld cr{{[0-9]+}}, r[[REGA]], r[[REG2]]
@@ -108,10 +106,8 @@ unequal:
}
; CHECK-LABEL: func3:
-; CHECK: addi [[REG1:[0-9]+]], 1, 64
-; CHECK: addi [[REG2:[0-9]+]], 1, 48
-; CHECK: ld [[REG3:[0-9]+]], 8([[REG1]])
-; CHECK: ld [[REG4:[0-9]+]], 8([[REG2]])
+; CHECK: ld [[REG3:[0-9]+]], 72(1)
+; CHECK: ld [[REG4:[0-9]+]], 56(1)
; CHECK: cmpld {{[0-9]+}}, [[REG4]], [[REG3]]
; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1)
; CHECK: std [[REG4]], -[[OFFSET2:[0-9]+]](1)
@@ -130,10 +126,8 @@ unequal:
; DARWIN32: lwz r3, -[[OFFSET1]]
; DARWIN64: _func3:
-; DARWIN64: addi r[[REG1:[0-9]+]], r1, 64
-; DARWIN64: addi r[[REG2:[0-9]+]], r1, 48
-; DARWIN64: ld r[[REG3:[0-9]+]], 8(r[[REG1]])
-; DARWIN64: ld r[[REG4:[0-9]+]], 8(r[[REG2]])
+; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1)
+; DARWIN64: ld r[[REG4:[0-9]+]], 56(r1)
; DARWIN64: cmpld cr{{[0-9]+}}, r[[REG4]], r[[REG3]]
; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG4]], -[[OFFSET2:[0-9]+]]
@@ -157,12 +151,11 @@ unequal:
}
; CHECK-LABEL: func4:
-; CHECK: addi [[REG1:[0-9]+]], 1, 128
+; CHECK: ld [[REG3:[0-9]+]], 136(1)
; CHECK: ld [[REG2:[0-9]+]], 120(1)
-; CHECK: ld [[REG3:[0-9]+]], 8([[REG1]])
; CHECK: cmpld {{[0-9]+}}, [[REG2]], [[REG3]]
-; CHECK: std [[REG2]], -[[OFFSET1:[0-9]+]](1)
; CHECK: std [[REG3]], -[[OFFSET2:[0-9]+]](1)
+; CHECK: std [[REG2]], -[[OFFSET1:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
@@ -178,9 +171,8 @@ unequal:
; DARWIN32: lwz r[[REG1]], -[[OFFSET2]]
; DARWIN64: _func4:
-; DARWIN64: addi r[[REG1:[0-9]+]], r1, 128
; DARWIN64: ld r[[REG2:[0-9]+]], 120(r1)
-; DARWIN64: ld r[[REG3:[0-9]+]], 8(r[[REG1]])
+; DARWIN64: ld r[[REG3:[0-9]+]], 136(r1)
; DARWIN64: mr r[[REG4:[0-9]+]], r[[REG2]]
; DARWIN64: cmpld cr{{[0-9]+}}, r[[REG2]], r[[REG3]]
; DARWIN64: std r[[REG4]], -[[OFFSET1:[0-9]+]]
diff --git a/test/CodeGen/PowerPC/atomic-1.ll b/test/CodeGen/PowerPC/atomic-1.ll
index 1737916375ca..997a016a5dcd 100644
--- a/test/CodeGen/PowerPC/atomic-1.ll
+++ b/test/CodeGen/PowerPC/atomic-1.ll
@@ -11,7 +11,8 @@ define i32 @exchange_and_add(i32* %mem, i32 %val) nounwind {
define i32 @exchange_and_cmp(i32* %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: lwarx
- %tmp = cmpxchg i32* %mem, i32 0, i32 1 monotonic
+ %tmppair = cmpxchg i32* %mem, i32 0, i32 1 monotonic monotonic
+ %tmp = extractvalue { i32, i1 } %tmppair, 0
; CHECK: stwcx.
; CHECK: stwcx.
ret i32 %tmp
diff --git a/test/CodeGen/PowerPC/atomic-2.ll b/test/CodeGen/PowerPC/atomic-2.ll
index e56a77966714..843250f10b4f 100644
--- a/test/CodeGen/PowerPC/atomic-2.ll
+++ b/test/CodeGen/PowerPC/atomic-2.ll
@@ -11,7 +11,8 @@ define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
define i64 @exchange_and_cmp(i64* %mem) nounwind {
; CHECK-LABEL: exchange_and_cmp:
; CHECK: ldarx
- %tmp = cmpxchg i64* %mem, i64 0, i64 1 monotonic
+ %tmppair = cmpxchg i64* %mem, i64 0, i64 1 monotonic monotonic
+ %tmp = extractvalue { i64, i1 } %tmppair, 0
; CHECK: stdcx.
; CHECK: stdcx.
ret i64 %tmp
diff --git a/test/CodeGen/PowerPC/available-externally.ll b/test/CodeGen/PowerPC/available-externally.ll
index abed0de80b88..53c435995485 100644
--- a/test/CodeGen/PowerPC/available-externally.ll
+++ b/test/CodeGen/PowerPC/available-externally.ll
@@ -1,7 +1,8 @@
; RUN: llc < %s -relocation-model=static | FileCheck %s -check-prefix=STATIC
-; RUN: llc < %s -relocation-model=pic | FileCheck %s -check-prefix=PIC
+; RUN: llc < %s -relocation-model=pic -mtriple=powerpc-apple-darwin8 | FileCheck %s -check-prefix=PIC
+; RUN: llc < %s -relocation-model=pic -mtriple=powerpc-unknown-linux | FileCheck %s -check-prefix=PICELF
; RUN: llc < %s -relocation-model=pic -mtriple=powerpc64-apple-darwin8 | FileCheck %s -check-prefix=PIC64
-; RUN: llc < %s -relocation-model=dynamic-no-pic | FileCheck %s -check-prefix=DYNAMIC
+; RUN: llc < %s -relocation-model=dynamic-no-pic -mtriple=powerpc-apple-darwin8 | FileCheck %s -check-prefix=DYNAMIC
; RUN: llc < %s -relocation-model=dynamic-no-pic -mtriple=powerpc64-apple-darwin8 | FileCheck %s -check-prefix=DYNAMIC64
; PR4482
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -18,6 +19,10 @@ entry:
; PIC: bl L_exact_log2$stub
; PIC: blr
+; PICELF: foo:
+; PICELF: bl exact_log2@PLT
+; PICELF: blr
+
; PIC64: _foo:
; PIC64: bl L_exact_log2$stub
; PIC64: blr
diff --git a/test/CodeGen/PowerPC/bdzlr.ll b/test/CodeGen/PowerPC/bdzlr.ll
index e487558e942a..29b74c6c8c66 100644
--- a/test/CodeGen/PowerPC/bdzlr.ll
+++ b/test/CodeGen/PowerPC/bdzlr.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-crbits | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s -check-prefix=CHECK-CRB
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -54,6 +55,12 @@ for.end: ; preds = %for.body, %if.end,
; CHECK: bnelr
; CHECK: bdzlr
; CHECK-NOT: blr
+
+; CHECK-CRB: @lua_xmove
+; CHECK-CRB: bclr 12,
+; CHECK-CRB: bclr 12,
+; CHECK-CRB: bdzlr
+; CHECK-CRB-NOT: blr
}
attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/cc.ll b/test/CodeGen/PowerPC/cc.ll
index ab724f5a7e2d..f92121bd7202 100644
--- a/test/CodeGen/PowerPC/cc.ll
+++ b/test/CodeGen/PowerPC/cc.ll
@@ -19,7 +19,7 @@ end:
; CHECK-LABEL: @test1
; CHECK: mfcr [[REG1:[0-9]+]]
-; CHECK-DAG: cmpld
+; CHECK-DAG: cmpd
; CHECK-DAG: mfocrf [[REG2:[0-9]+]],
; CHECK-DAG: stw [[REG1]], 8(1)
; CHECK-DAG: stw [[REG2]], -4(1)
@@ -52,7 +52,7 @@ end:
; CHECK-LABEL: @test2
; CHECK: mfcr [[REG1:[0-9]+]]
-; CHECK-DAG: cmpld
+; CHECK-DAG: cmpd
; CHECK-DAG: mfocrf [[REG2:[0-9]+]],
; CHECK-DAG: stw [[REG1]], 8(1)
; CHECK-DAG: stw [[REG2]], -4(1)
diff --git a/test/CodeGen/PowerPC/coalesce-ext.ll b/test/CodeGen/PowerPC/coalesce-ext.ll
index f19175c9beaa..eb7cd261b564 100644
--- a/test/CodeGen/PowerPC/coalesce-ext.ll
+++ b/test/CodeGen/PowerPC/coalesce-ext.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=ppc64 -mtriple=powerpc64-apple-darwin < %s | FileCheck %s
+; RUN: llc -march=ppc64 -mcpu=g5 -mtriple=powerpc64-apple-darwin < %s | FileCheck %s
; Check that the peephole optimizer knows about sext and zext instructions.
; CHECK: test1sext
define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind {
diff --git a/test/CodeGen/PowerPC/complex-return.ll b/test/CodeGen/PowerPC/complex-return.ll
index 3eb30e93fd31..8a6adaee5556 100644
--- a/test/CodeGen/PowerPC/complex-return.ll
+++ b/test/CodeGen/PowerPC/complex-return.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=pwr7 -O0 < %s | FileCheck %s
+; RUN: llc -mcpu=ppc64 -O0 < %s | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -26,8 +26,8 @@ entry:
; CHECK-LABEL: foo:
; CHECK: lfd 3
; CHECK: lfd 4
-; CHECK: lfd 2
; CHECK: lfd 1
+; CHECK: lfd 2
define { float, float } @oof() nounwind {
entry:
diff --git a/test/CodeGen/PowerPC/crash.ll b/test/CodeGen/PowerPC/crash.ll
new file mode 100644
index 000000000000..5cecca72fdbf
--- /dev/null
+++ b/test/CodeGen/PowerPC/crash.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7
+
+define void @test1(i1 %x, i8 %x2, i8* %x3, i64 %x4) {
+entry:
+ %tmp3 = and i64 %x4, 16
+ %bf.shl = trunc i64 %tmp3 to i8
+ %bf.clear = and i8 %x2, -17
+ %bf.set = or i8 %bf.shl, %bf.clear
+ br i1 %x, label %if.then, label %if.end
+
+if.then:
+ ret void
+
+if.end:
+ store i8 %bf.set, i8* %x3, align 4
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/crbit-asm.ll b/test/CodeGen/PowerPC/crbit-asm.ll
new file mode 100644
index 000000000000..373e334f02bd
--- /dev/null
+++ b/test/CodeGen/PowerPC/crbit-asm.ll
@@ -0,0 +1,59 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define zeroext i1 @testi1(i1 zeroext %b1, i1 zeroext %b2) #0 {
+entry:
+ %0 = tail call i8 asm "crand $0, $1, $2", "=^wc,^wc,^wc"(i1 %b1, i1 %b2) #0
+ %1 = and i8 %0, 1
+ %tobool3 = icmp ne i8 %1, 0
+ ret i1 %tobool3
+
+; CHECK-LABEL: @testi1
+; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
+; CHECK-DAG: li [[REG1:[0-9]+]], 0
+; CHECK-DAG: cror [[REG2:[0-9]+]], 1, 1
+; CHECK-DAG: andi. {{[0-9]+}}, 4, 1
+; CHECK-DAG: crand [[REG3:[0-9]+]], [[REG2]], 1
+; CHECK-DAG: li [[REG4:[0-9]+]], 1
+; CHECK: isel 3, [[REG4]], [[REG1]], [[REG3]]
+; CHECK: blr
+}
+
+define signext i32 @testi32(i32 signext %b1, i32 signext %b2) #0 {
+entry:
+ %0 = tail call i32 asm "crand $0, $1, $2", "=^wc,^wc,^wc"(i32 %b1, i32 %b2) #0
+ ret i32 %0
+
+; The ABI sign_extend should combine with the any_extend from the asm result,
+; and the result will be 0 or -1. This highlights the fact that only the first
+; bit is meaningful.
+; CHECK-LABEL: @testi32
+; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
+; CHECK-DAG: li [[REG1:[0-9]+]], 0
+; CHECK-DAG: cror [[REG2:[0-9]+]], 1, 1
+; CHECK-DAG: andi. {{[0-9]+}}, 4, 1
+; CHECK-DAG: crand [[REG3:[0-9]+]], [[REG2]], 1
+; CHECK-DAG: li [[REG4:[0-9]+]], -1
+; CHECK: isel 3, [[REG4]], [[REG1]], [[REG3]]
+; CHECK: blr
+}
+
+define zeroext i8 @testi8(i8 zeroext %b1, i8 zeroext %b2) #0 {
+entry:
+ %0 = tail call i8 asm "crand $0, $1, $2", "=^wc,^wc,^wc"(i8 %b1, i8 %b2) #0
+ ret i8 %0
+
+; CHECK-LABEL: @testi8
+; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
+; CHECK-DAG: li [[REG1:[0-9]+]], 0
+; CHECK-DAG: cror [[REG2:[0-9]+]], 1, 1
+; CHECK-DAG: andi. {{[0-9]+}}, 4, 1
+; CHECK-DAG: crand [[REG3:[0-9]+]], [[REG2]], 1
+; CHECK-DAG: li [[REG4:[0-9]+]], 1
+; CHECK: isel 3, [[REG4]], [[REG1]], [[REG3]]
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/crbits.ll b/test/CodeGen/PowerPC/crbits.ll
new file mode 100644
index 000000000000..06e90019db76
--- /dev/null
+++ b/test/CodeGen/PowerPC/crbits.ll
@@ -0,0 +1,192 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define zeroext i1 @test1(float %v1, float %v2) #0 {
+entry:
+ %cmp = fcmp oge float %v1, %v2
+ %cmp2 = fcmp ole float %v2, 0.000000e+00
+ %and5 = and i1 %cmp, %cmp2
+ ret i1 %and5
+
+; CHECK-LABEL: @test1
+; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
+; CHECK-DAG: li [[REG1:[0-9]+]], 1
+; CHECK-DAG: lfs [[REG2:[0-9]+]],
+; CHECK-DAG: fcmpu {{[0-9]+}}, 2, [[REG2]]
+; CHECK: crnor
+; CHECK: crnor
+; CHECK: crnand [[REG4:[0-9]+]],
+; CHECK: isel 3, 0, [[REG1]], [[REG4]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i1 @test2(float %v1, float %v2) #0 {
+entry:
+ %cmp = fcmp oge float %v1, %v2
+ %cmp2 = fcmp ole float %v2, 0.000000e+00
+ %xor5 = xor i1 %cmp, %cmp2
+ ret i1 %xor5
+
+; CHECK-LABEL: @test2
+; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
+; CHECK-DAG: li [[REG1:[0-9]+]], 1
+; CHECK-DAG: lfs [[REG2:[0-9]+]],
+; CHECK-DAG: fcmpu {{[0-9]+}}, 2, [[REG2]]
+; CHECK: crnor
+; CHECK: crnor
+; CHECK: creqv [[REG4:[0-9]+]],
+; CHECK: isel 3, 0, [[REG1]], [[REG4]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i1 @test3(float %v1, float %v2, i32 signext %x) #0 {
+entry:
+ %cmp = fcmp oge float %v1, %v2
+ %cmp2 = fcmp ole float %v2, 0.000000e+00
+ %cmp4 = icmp ne i32 %x, -2
+ %and7 = and i1 %cmp2, %cmp4
+ %xor8 = xor i1 %cmp, %and7
+ ret i1 %xor8
+
+; CHECK-LABEL: @test3
+; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
+; CHECK-DAG: li [[REG1:[0-9]+]], 1
+; CHECK-DAG: lfs [[REG2:[0-9]+]],
+; CHECK-DAG: fcmpu {{[0-9]+}}, 2, [[REG2]]
+; CHECK: crnor
+; CHECK: crnor
+; CHECK: crandc
+; CHECK: creqv [[REG4:[0-9]+]],
+; CHECK: isel 3, 0, [[REG1]], [[REG4]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i1 @test4(i1 zeroext %v1, i1 zeroext %v2, i1 zeroext %v3) #0 {
+entry:
+ %and8 = and i1 %v1, %v2
+ %or9 = or i1 %and8, %v3
+ ret i1 %or9
+
+; CHECK-DAG: @test4
+; CHECK: and [[REG1:[0-9]+]], 3, 4
+; CHECK: or 3, [[REG1]], 5
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i1 @test5(i1 zeroext %v1, i1 zeroext %v2, i32 signext %v3) #0 {
+entry:
+ %and6 = and i1 %v1, %v2
+ %cmp = icmp ne i32 %v3, -2
+ %or7 = or i1 %and6, %cmp
+ ret i1 %or7
+
+; CHECK-LABEL: @test5
+; CHECK-DAG: and [[REG1:[0-9]+]], 3, 4
+; CHECK-DAG: cmpwi {{[0-9]+}}, 5, -2
+; CHECK-DAG: li [[REG3:[0-9]+]], 1
+; CHECK-DAG: andi. {{[0-9]+}}, [[REG1]], 1
+; CHECK-DAG: crandc [[REG5:[0-9]+]],
+; CHECK: isel 3, 0, [[REG3]], [[REG5]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i1 @test6(i1 zeroext %v1, i1 zeroext %v2, i32 signext %v3) #0 {
+entry:
+ %cmp = icmp ne i32 %v3, -2
+ %or6 = or i1 %cmp, %v2
+ %and7 = and i1 %or6, %v1
+ ret i1 %and7
+
+; CHECK-LABEL: @test6
+; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
+; CHECK-DAG: cmpwi {{[0-9]+}}, 5, -2
+; CHECK-DAG: cror [[REG1:[0-9]+]], 1, 1
+; CHECK-DAG: andi. {{[0-9]+}}, 4, 1
+; CHECK-DAG: li [[REG2:[0-9]+]], 1
+; CHECK-DAG: crorc [[REG4:[0-9]+]], 1,
+; CHECK-DAG: crnand [[REG5:[0-9]+]], [[REG4]], [[REG1]]
+; CHECK: isel 3, 0, [[REG2]], [[REG5]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define signext i32 @test7(i1 zeroext %v2, i32 signext %i1, i32 signext %i2) #0 {
+entry:
+ %cond = select i1 %v2, i32 %i1, i32 %i2
+ ret i32 %cond
+
+; CHECK-LABEL: @test7
+; CHECK: andi. {{[0-9]+}}, 3, 1
+; CHECK: isel 3, 4, 5, 1
+; CHECK: blr
+}
+
+define signext i32 @exttest7(i32 signext %a) #0 {
+entry:
+ %cmp = icmp eq i32 %a, 5
+ %cond = select i1 %cmp, i32 7, i32 8
+ ret i32 %cond
+
+; CHECK-LABEL: @exttest7
+; CHECK-DAG: cmplwi {{[0-9]+}}, 3, 5
+; CHECK-DAG: li [[REG1:[0-9]+]], 8
+; CHECK-DAG: li [[REG2:[0-9]+]], 7
+; CHECK: isel 3, [[REG2]], [[REG1]],
+; CHECK-NOT: rldicl
+; CHECK: blr
+}
+
+define zeroext i32 @exttest8() #0 {
+entry:
+ %v0 = load i64* undef, align 8
+ %sub = sub i64 80, %v0
+ %div = lshr i64 %sub, 1
+ %conv13 = trunc i64 %div to i32
+ %cmp14 = icmp ugt i32 %conv13, 80
+ %.conv13 = select i1 %cmp14, i32 0, i32 %conv13
+ ret i32 %.conv13
+; CHECK-LABEL: @exttest8
+; This is a don't-crash test: %conv13 is both one of the possible select output
+; values and also an input to the conditional feeding it.
+}
+
+; Function Attrs: nounwind readnone
+define float @test8(i1 zeroext %v2, float %v1, float %v3) #0 {
+entry:
+ %cond = select i1 %v2, float %v1, float %v3
+ ret float %cond
+
+; CHECK-LABEL: @test8
+; CHECK: andi. {{[0-9]+}}, 3, 1
+; CHECK: bclr 12, 1, 0
+; CHECK: fmr 1, 2
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define signext i32 @test10(i32 signext %v1, i32 signext %v2) #0 {
+entry:
+ %tobool = icmp ne i32 %v1, 0
+ %lnot = icmp eq i32 %v2, 0
+ %and3 = and i1 %tobool, %lnot
+ %and = zext i1 %and3 to i32
+ ret i32 %and
+
+; CHECK-LABEL: @test10
+; CHECK-DAG: cmpwi {{[0-9]+}}, 3, 0
+; CHECK-DAG: cmpwi {{[0-9]+}}, 4, 0
+; CHECK-DAG: li [[REG2:[0-9]+]], 1
+; CHECK-DAG: crorc [[REG3:[0-9]+]],
+; CHECK: isel 3, 0, [[REG2]], [[REG3]]
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/ctrloop-large-ec.ll b/test/CodeGen/PowerPC/ctrloop-large-ec.ll
index c18bdabdb03a..cce23fabf63e 100644
--- a/test/CodeGen/PowerPC/ctrloop-large-ec.ll
+++ b/test/CodeGen/PowerPC/ctrloop-large-ec.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=ppc32 < %s
+; RUN: llc -mcpu=ppc32 < %s | FileCheck %s
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"
target triple = "powerpc-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/ctrloop-le.ll b/test/CodeGen/PowerPC/ctrloop-le.ll
index 7b8185ed5261..60b0536f9924 100644
--- a/test/CodeGen/PowerPC/ctrloop-le.ll
+++ b/test/CodeGen/PowerPC/ctrloop-le.ll
@@ -2,6 +2,9 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "powerpc64-unknown-linux-gnu"
; RUN: llc < %s -march=ppc64 | FileCheck %s
+; XFAIL: *
+; SE needs improvement
+
; CHECK: test_pos1_ir_sle
; CHECK: bdnz
; a < b
diff --git a/test/CodeGen/PowerPC/ctrloop-lt.ll b/test/CodeGen/PowerPC/ctrloop-lt.ll
index eaab61a826d9..a9dc42c1c971 100644
--- a/test/CodeGen/PowerPC/ctrloop-lt.ll
+++ b/test/CodeGen/PowerPC/ctrloop-lt.ll
@@ -2,6 +2,9 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "powerpc64-unknown-linux-gnu"
; RUN: llc < %s -march=ppc64 | FileCheck %s
+; XFAIL: *
+; SE needs improvement
+
; CHECK: test_pos1_ir_slt
; CHECK: bdnz
; a < b
diff --git a/test/CodeGen/PowerPC/ctrloop-sh.ll b/test/CodeGen/PowerPC/ctrloop-sh.ll
new file mode 100644
index 000000000000..d8e6fc79a665
--- /dev/null
+++ b/test/CodeGen/PowerPC/ctrloop-sh.ll
@@ -0,0 +1,72 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-p:32:32-i128:64-n32"
+target triple = "powerpc-ellcc-linux"
+
+; Function Attrs: nounwind
+define void @foo1(i128* %a, i128* readonly %b, i128* readonly %c) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %0 = load i128* %b, align 16
+ %1 = load i128* %c, align 16
+ %shl = shl i128 %0, %1
+ store i128 %shl, i128* %a, align 16
+ %inc = add nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %inc, 2048
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+
+; CHECK-LABEL: @foo1
+; CHECK-NOT: mtctr
+}
+
+; Function Attrs: nounwind
+define void @foo2(i128* %a, i128* readonly %b, i128* readonly %c) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %0 = load i128* %b, align 16
+ %1 = load i128* %c, align 16
+ %shl = ashr i128 %0, %1
+ store i128 %shl, i128* %a, align 16
+ %inc = add nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %inc, 2048
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+
+; CHECK-LABEL: @foo2
+; CHECK-NOT: mtctr
+}
+
+; Function Attrs: nounwind
+define void @foo3(i128* %a, i128* readonly %b, i128* readonly %c) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %0 = load i128* %b, align 16
+ %1 = load i128* %c, align 16
+ %shl = lshr i128 %0, %1
+ store i128 %shl, i128* %a, align 16
+ %inc = add nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %inc, 2048
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+
+; CHECK-LABEL: @foo3
+; CHECK-NOT: mtctr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/dbg.ll b/test/CodeGen/PowerPC/dbg.ll
index cb93decac8e9..6beea558c0db 100644
--- a/test/CodeGen/PowerPC/dbg.ll
+++ b/test/CodeGen/PowerPC/dbg.ll
@@ -18,7 +18,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.module.flags = !{!22}
!0 = metadata !{i32 720913, metadata !21, i32 12, metadata !"clang version 3.1", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !"", metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 720942, metadata !21, null, metadata !"main", metadata !"main", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32, i8**)* @main, null, null, metadata !13, i32 0} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 720937, metadata !21} ; [ DW_TAG_file_type ]
@@ -28,8 +28,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!10 = metadata !{i32 720911, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !11} ; [ DW_TAG_pointer_type ]
!11 = metadata !{i32 720911, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !12} ; [ DW_TAG_pointer_type ]
!12 = metadata !{i32 720932, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 8} ; [ DW_TAG_base_type ]
-!13 = metadata !{metadata !14}
-!14 = metadata !{metadata !15, metadata !16}
+!13 = metadata !{metadata !15, metadata !16}
!15 = metadata !{i32 721153, metadata !5, metadata !"argc", metadata !6, i32 16777217, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!16 = metadata !{i32 721153, metadata !5, metadata !"argv", metadata !6, i32 33554433, metadata !10, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!17 = metadata !{i32 1, i32 14, metadata !5, null}
diff --git a/test/CodeGen/PowerPC/early-ret2.ll b/test/CodeGen/PowerPC/early-ret2.ll
index a274e2c2658f..17847770a831 100644
--- a/test/CodeGen/PowerPC/early-ret2.ll
+++ b/test/CodeGen/PowerPC/early-ret2.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-crbits | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s -check-prefix=CHECK-CRB
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -10,13 +11,16 @@ while.body.lr.ph: ; preds = %entry
br i1 undef, label %while.end, label %while.body
while.body: ; preds = %while.body, %while.body.lr.ph
- br i1 false, label %while.end, label %while.body, !llvm.vectorizer.already_vectorized !0
+ br i1 false, label %while.end, label %while.body, !llvm.loop.vectorize.already_vectorized !0
while.end: ; preds = %while.body, %while.body.lr.ph, %entry
ret void
; CHECK: @_Z8example3iPiS_
; CHECK: bnelr
+
+; CHECK-CRB: @_Z8example3iPiS_
+; CHECK-CRB: bclr 12,
}
attributes #0 = { noinline nounwind }
diff --git a/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll b/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
index db0d8ed0ffa4..ac41e8c27700 100644
--- a/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
+++ b/test/CodeGen/PowerPC/fast-isel-conversion-p5.ll
@@ -116,18 +116,6 @@ entry:
ret void
}
-define void @fptoui_float_i64(float %a) nounwind ssp {
-entry:
-; ELF64: fptoui_float_i64
- %b.addr = alloca i64, align 4
- %conv = fptoui float %a to i64
-; ELF64: fctiduz
-; ELF64: stfd
-; ELF64: ld
- store i64 %conv, i64* %b.addr, align 4
- ret void
-}
-
define void @fptoui_double_i32(double %a) nounwind ssp {
entry:
; ELF64: fptoui_double_i32
@@ -140,14 +128,3 @@ entry:
ret void
}
-define void @fptoui_double_i64(double %a) nounwind ssp {
-entry:
-; ELF64: fptoui_double_i64
- %b.addr = alloca i64, align 8
- %conv = fptoui double %a to i64
-; ELF64: fctiduz
-; ELF64: stfd
-; ELF64: ld
- store i64 %conv, i64* %b.addr, align 8
- ret void
-}
diff --git a/test/CodeGen/PowerPC/fast-isel-conversion.ll b/test/CodeGen/PowerPC/fast-isel-conversion.ll
index a31c31210c39..5e00675c0398 100644
--- a/test/CodeGen/PowerPC/fast-isel-conversion.ll
+++ b/test/CodeGen/PowerPC/fast-isel-conversion.ll
@@ -1,15 +1,24 @@
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
+; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=970 | FileCheck %s --check-prefix=PPC970
+
+;; Tests for 970 don't use -fast-isel-abort because we intentionally punt
+;; to SelectionDAG in some cases.
; Test sitofp
define void @sitofp_single_i64(i64 %a, float %b) nounwind ssp {
entry:
; ELF64: sitofp_single_i64
+; PPC970: sitofp_single_i64
%b.addr = alloca float, align 4
%conv = sitofp i64 %a to float
; ELF64: std
; ELF64: lfd
; ELF64: fcfids
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -17,11 +26,16 @@ entry:
define void @sitofp_single_i32(i32 %a, float %b) nounwind ssp {
entry:
; ELF64: sitofp_single_i32
+; PPC970: sitofp_single_i32
%b.addr = alloca float, align 4
%conv = sitofp i32 %a to float
; ELF64: std
; ELF64: lfiwax
; ELF64: fcfids
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -29,12 +43,18 @@ entry:
define void @sitofp_single_i16(i16 %a, float %b) nounwind ssp {
entry:
; ELF64: sitofp_single_i16
+; PPC970: sitofp_single_i16
%b.addr = alloca float, align 4
%conv = sitofp i16 %a to float
; ELF64: extsh
; ELF64: std
; ELF64: lfd
; ELF64: fcfids
+; PPC970: extsh
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -42,12 +62,18 @@ entry:
define void @sitofp_single_i8(i8 %a) nounwind ssp {
entry:
; ELF64: sitofp_single_i8
+; PPC970: sitofp_single_i8
%b.addr = alloca float, align 4
%conv = sitofp i8 %a to float
; ELF64: extsb
; ELF64: std
; ELF64: lfd
; ELF64: fcfids
+; PPC970: extsb
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -55,11 +81,15 @@ entry:
define void @sitofp_double_i32(i32 %a, double %b) nounwind ssp {
entry:
; ELF64: sitofp_double_i32
+; PPC970: sitofp_double_i32
%b.addr = alloca double, align 8
%conv = sitofp i32 %a to double
; ELF64: std
; ELF64: lfiwax
; ELF64: fcfid
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -67,11 +97,15 @@ entry:
define void @sitofp_double_i64(i64 %a, double %b) nounwind ssp {
entry:
; ELF64: sitofp_double_i64
+; PPC970: sitofp_double_i64
%b.addr = alloca double, align 8
%conv = sitofp i64 %a to double
; ELF64: std
; ELF64: lfd
; ELF64: fcfid
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -79,12 +113,17 @@ entry:
define void @sitofp_double_i16(i16 %a, double %b) nounwind ssp {
entry:
; ELF64: sitofp_double_i16
+; PPC970: sitofp_double_i16
%b.addr = alloca double, align 8
%conv = sitofp i16 %a to double
; ELF64: extsh
; ELF64: std
; ELF64: lfd
; ELF64: fcfid
+; PPC970: extsh
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -92,12 +131,17 @@ entry:
define void @sitofp_double_i8(i8 %a, double %b) nounwind ssp {
entry:
; ELF64: sitofp_double_i8
+; PPC970: sitofp_double_i8
%b.addr = alloca double, align 8
%conv = sitofp i8 %a to double
; ELF64: extsb
; ELF64: std
; ELF64: lfd
; ELF64: fcfid
+; PPC970: extsb
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -107,11 +151,13 @@ entry:
define void @uitofp_single_i64(i64 %a, float %b) nounwind ssp {
entry:
; ELF64: uitofp_single_i64
+; PPC970: uitofp_single_i64
%b.addr = alloca float, align 4
%conv = uitofp i64 %a to float
; ELF64: std
; ELF64: lfd
; ELF64: fcfidus
+; PPC970-NOT: fcfidus
store float %conv, float* %b.addr, align 4
ret void
}
@@ -119,11 +165,14 @@ entry:
define void @uitofp_single_i32(i32 %a, float %b) nounwind ssp {
entry:
; ELF64: uitofp_single_i32
+; PPC970: uitofp_single_i32
%b.addr = alloca float, align 4
%conv = uitofp i32 %a to float
; ELF64: std
; ELF64: lfiwzx
; ELF64: fcfidus
+; PPC970-NOT: lfiwzx
+; PPC970-NOT: fcfidus
store float %conv, float* %b.addr, align 4
ret void
}
@@ -131,12 +180,18 @@ entry:
define void @uitofp_single_i16(i16 %a, float %b) nounwind ssp {
entry:
; ELF64: uitofp_single_i16
+; PPC970: uitofp_single_i16
%b.addr = alloca float, align 4
%conv = uitofp i16 %a to float
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48
; ELF64: std
; ELF64: lfd
; ELF64: fcfidus
+; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 16, 31
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -144,12 +199,18 @@ entry:
define void @uitofp_single_i8(i8 %a) nounwind ssp {
entry:
; ELF64: uitofp_single_i8
+; PPC970: uitofp_single_i8
%b.addr = alloca float, align 4
%conv = uitofp i8 %a to float
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56
; ELF64: std
; ELF64: lfd
; ELF64: fcfidus
+; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 24, 31
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
+; PPC970: frsp
store float %conv, float* %b.addr, align 4
ret void
}
@@ -157,11 +218,13 @@ entry:
define void @uitofp_double_i64(i64 %a, double %b) nounwind ssp {
entry:
; ELF64: uitofp_double_i64
+; PPC970: uitofp_double_i64
%b.addr = alloca double, align 8
%conv = uitofp i64 %a to double
; ELF64: std
; ELF64: lfd
; ELF64: fcfidu
+; PPC970-NOT: fcfidu
store double %conv, double* %b.addr, align 8
ret void
}
@@ -169,11 +232,14 @@ entry:
define void @uitofp_double_i32(i32 %a, double %b) nounwind ssp {
entry:
; ELF64: uitofp_double_i32
+; PPC970: uitofp_double_i32
%b.addr = alloca double, align 8
%conv = uitofp i32 %a to double
; ELF64: std
; ELF64: lfiwzx
; ELF64: fcfidu
+; PPC970-NOT: lfiwzx
+; PPC970-NOT: fcfidu
store double %conv, double* %b.addr, align 8
ret void
}
@@ -181,12 +247,17 @@ entry:
define void @uitofp_double_i16(i16 %a, double %b) nounwind ssp {
entry:
; ELF64: uitofp_double_i16
+; PPC970: uitofp_double_i16
%b.addr = alloca double, align 8
%conv = uitofp i16 %a to double
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 48
; ELF64: std
; ELF64: lfd
; ELF64: fcfidu
+; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 16, 31
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -194,12 +265,17 @@ entry:
define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp {
entry:
; ELF64: uitofp_double_i8
+; PPC970: uitofp_double_i8
%b.addr = alloca double, align 8
%conv = uitofp i8 %a to double
; ELF64: rldicl {{[0-9]+}}, {{[0-9]+}}, 0, 56
; ELF64: std
; ELF64: lfd
; ELF64: fcfidu
+; PPC970: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 24, 31
+; PPC970: std
+; PPC970: lfd
+; PPC970: fcfid
store double %conv, double* %b.addr, align 8
ret void
}
@@ -209,11 +285,15 @@ entry:
define void @fptosi_float_i32(float %a) nounwind ssp {
entry:
; ELF64: fptosi_float_i32
+; PPC970: fptosi_float_i32
%b.addr = alloca i32, align 4
%conv = fptosi float %a to i32
; ELF64: fctiwz
; ELF64: stfd
; ELF64: lwa
+; PPC970: fctiwz
+; PPC970: stfd
+; PPC970: lwa
store i32 %conv, i32* %b.addr, align 4
ret void
}
@@ -221,11 +301,15 @@ entry:
define void @fptosi_float_i64(float %a) nounwind ssp {
entry:
; ELF64: fptosi_float_i64
+; PPC970: fptosi_float_i64
%b.addr = alloca i64, align 4
%conv = fptosi float %a to i64
; ELF64: fctidz
; ELF64: stfd
; ELF64: ld
+; PPC970: fctidz
+; PPC970: stfd
+; PPC970: ld
store i64 %conv, i64* %b.addr, align 4
ret void
}
@@ -233,11 +317,15 @@ entry:
define void @fptosi_double_i32(double %a) nounwind ssp {
entry:
; ELF64: fptosi_double_i32
+; PPC970: fptosi_double_i32
%b.addr = alloca i32, align 8
%conv = fptosi double %a to i32
; ELF64: fctiwz
; ELF64: stfd
; ELF64: lwa
+; PPC970: fctiwz
+; PPC970: stfd
+; PPC970: lwa
store i32 %conv, i32* %b.addr, align 8
ret void
}
@@ -245,11 +333,15 @@ entry:
define void @fptosi_double_i64(double %a) nounwind ssp {
entry:
; ELF64: fptosi_double_i64
+; PPC970: fptosi_double_i64
%b.addr = alloca i64, align 8
%conv = fptosi double %a to i64
; ELF64: fctidz
; ELF64: stfd
; ELF64: ld
+; PPC970: fctidz
+; PPC970: stfd
+; PPC970: ld
store i64 %conv, i64* %b.addr, align 8
ret void
}
@@ -259,11 +351,15 @@ entry:
define void @fptoui_float_i32(float %a) nounwind ssp {
entry:
; ELF64: fptoui_float_i32
+; PPC970: fptoui_float_i32
%b.addr = alloca i32, align 4
%conv = fptoui float %a to i32
; ELF64: fctiwuz
; ELF64: stfd
; ELF64: lwz
+; PPC970: fctidz
+; PPC970: stfd
+; PPC970: lwz
store i32 %conv, i32* %b.addr, align 4
ret void
}
@@ -271,11 +367,13 @@ entry:
define void @fptoui_float_i64(float %a) nounwind ssp {
entry:
; ELF64: fptoui_float_i64
+; PPC970: fptoui_float_i64
%b.addr = alloca i64, align 4
%conv = fptoui float %a to i64
; ELF64: fctiduz
; ELF64: stfd
; ELF64: ld
+; PPC970-NOT: fctiduz
store i64 %conv, i64* %b.addr, align 4
ret void
}
@@ -283,11 +381,15 @@ entry:
define void @fptoui_double_i32(double %a) nounwind ssp {
entry:
; ELF64: fptoui_double_i32
+; PPC970: fptoui_double_i32
%b.addr = alloca i32, align 8
%conv = fptoui double %a to i32
; ELF64: fctiwuz
; ELF64: stfd
; ELF64: lwz
+; PPC970: fctidz
+; PPC970: stfd
+; PPC970: lwz
store i32 %conv, i32* %b.addr, align 8
ret void
}
@@ -295,11 +397,13 @@ entry:
define void @fptoui_double_i64(double %a) nounwind ssp {
entry:
; ELF64: fptoui_double_i64
+; PPC970: fptoui_double_i64
%b.addr = alloca i64, align 8
%conv = fptoui double %a to i64
; ELF64: fctiduz
; ELF64: stfd
; ELF64: ld
+; PPC970-NOT: fctiduz
store i64 %conv, i64* %b.addr, align 8
ret void
}
diff --git a/test/CodeGen/PowerPC/float-to-int.ll b/test/CodeGen/PowerPC/float-to-int.ll
index 39cd4f929f8d..9c897cb96e7c 100644
--- a/test/CodeGen/PowerPC/float-to-int.ll
+++ b/test/CodeGen/PowerPC/float-to-int.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx | FileCheck -check-prefix=CHECK-VSX %s
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=g5
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -12,6 +13,12 @@ define i64 @foo(float %a) nounwind {
; CHECK: stfd [[REG]],
; CHECK: ld 3,
; CHECK: blr
+
+; CHECK-VSX: @foo
+; CHECK-VSX: xscvdpsxds [[REG:[0-9]+]], 1
+; CHECK-VSX: stxsdx [[REG]],
+; CHECK-VSX: ld 3,
+; CHECK-VSX: blr
}
define i64 @foo2(double %a) nounwind {
@@ -23,6 +30,12 @@ define i64 @foo2(double %a) nounwind {
; CHECK: stfd [[REG]],
; CHECK: ld 3,
; CHECK: blr
+
+; CHECK-VSX: @foo2
+; CHECK-VSX: xscvdpsxds [[REG:[0-9]+]], 1
+; CHECK-VSX: stxsdx [[REG]],
+; CHECK-VSX: ld 3,
+; CHECK-VSX: blr
}
define i64 @foo3(float %a) nounwind {
@@ -34,6 +47,12 @@ define i64 @foo3(float %a) nounwind {
; CHECK: stfd [[REG]],
; CHECK: ld 3,
; CHECK: blr
+
+; CHECK-VSX: @foo3
+; CHECK-VSX: xscvdpuxds [[REG:[0-9]+]], 1
+; CHECK-VSX: stxsdx [[REG]],
+; CHECK-VSX: ld 3,
+; CHECK-VSX: blr
}
define i64 @foo4(double %a) nounwind {
@@ -45,6 +64,12 @@ define i64 @foo4(double %a) nounwind {
; CHECK: stfd [[REG]],
; CHECK: ld 3,
; CHECK: blr
+
+; CHECK-VSX: @foo4
+; CHECK-VSX: xscvdpuxds [[REG:[0-9]+]], 1
+; CHECK-VSX: stxsdx [[REG]],
+; CHECK-VSX: ld 3,
+; CHECK-VSX: blr
}
define i32 @goo(float %a) nounwind {
@@ -56,6 +81,12 @@ define i32 @goo(float %a) nounwind {
; CHECK: stfiwx [[REG]],
; CHECK: lwz 3,
; CHECK: blr
+
+; CHECK-VSX: @goo
+; CHECK-VSX: xscvdpsxws [[REG:[0-9]+]], 1
+; CHECK-VSX: stfiwx [[REG]],
+; CHECK-VSX: lwz 3,
+; CHECK-VSX: blr
}
define i32 @goo2(double %a) nounwind {
@@ -67,6 +98,12 @@ define i32 @goo2(double %a) nounwind {
; CHECK: stfiwx [[REG]],
; CHECK: lwz 3,
; CHECK: blr
+
+; CHECK-VSX: @goo2
+; CHECK-VSX: xscvdpsxws [[REG:[0-9]+]], 1
+; CHECK-VSX: stfiwx [[REG]],
+; CHECK-VSX: lwz 3,
+; CHECK-VSX: blr
}
define i32 @goo3(float %a) nounwind {
@@ -78,6 +115,12 @@ define i32 @goo3(float %a) nounwind {
; CHECK: stfiwx [[REG]],
; CHECK: lwz 3,
; CHECK: blr
+
+; CHECK-VSX: @goo3
+; CHECK-VSX: xscvdpuxws [[REG:[0-9]+]], 1
+; CHECK-VSX: stfiwx [[REG]],
+; CHECK-VSX: lwz 3,
+; CHECK-VSX: blr
}
define i32 @goo4(double %a) nounwind {
@@ -89,5 +132,11 @@ define i32 @goo4(double %a) nounwind {
; CHECK: stfiwx [[REG]],
; CHECK: lwz 3,
; CHECK: blr
+
+; CHECK-VSX: @goo4
+; CHECK-VSX: xscvdpuxws [[REG:[0-9]+]], 1
+; CHECK-VSX: stfiwx [[REG]],
+; CHECK-VSX: lwz 3,
+; CHECK-VSX: blr
}
diff --git a/test/CodeGen/PowerPC/fold-zero.ll b/test/CodeGen/PowerPC/fold-zero.ll
index c7ec6fade53e..c1eea43017d9 100644
--- a/test/CodeGen/PowerPC/fold-zero.ll
+++ b/test/CodeGen/PowerPC/fold-zero.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-crbits | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck -check-prefix=CHECK-CRB %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -12,3 +13,13 @@ define i32 @test1(i1 %a, i32 %c) nounwind {
; CHECK: blr
}
+define i32 @test2(i1 %a, i32 %c) nounwind {
+ %x = select i1 %a, i32 0, i32 %c
+ ret i32 %x
+
+; CHECK-CRB: @test2
+; CHECK-CRB-NOT: li {{[0-9]+}}, 0
+; CHECK-CRB: isel 3, 0,
+; CHECK-CRB: blr
+}
+
diff --git a/test/CodeGen/PowerPC/func-addr.ll b/test/CodeGen/PowerPC/func-addr.ll
new file mode 100644
index 000000000000..4533c6258a52
--- /dev/null
+++ b/test/CodeGen/PowerPC/func-addr.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple powerpc64-linux < %s | FileCheck %s
+; RUN: llc -O0 -mtriple powerpc64-linux < %s | FileCheck %s
+
+define void @foo() {
+ ret void
+}
+declare i32 @bar(i8*)
+
+; CHECK-LABEL: {{^}}zed:
+; CHECK: addis 3, 2, foo@toc@ha
+; CHECK-NEXT: addi 3, 3, foo@toc@l
+; CHECK-NEXT: bl bar
+
+define void @zed() {
+ call i32 @bar(i8* bitcast (void ()* @foo to i8*))
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/hello-reloc.s b/test/CodeGen/PowerPC/hello-reloc.s
index 9bbfb3817890..97dfbb5362fa 100644
--- a/test/CodeGen/PowerPC/hello-reloc.s
+++ b/test/CodeGen/PowerPC/hello-reloc.s
@@ -1,14 +1,10 @@
; This tests for the basic implementation of PPCMachObjectWriter.cpp,
; which is responsible for writing mach-o relocation entries for (PIC)
; PowerPC objects.
-; NOTE: Darwin PPC asm syntax is not yet supported by PPCAsmParser,
-; so this test case uses ELF PPC asm syntax to produce a mach-o object.
-; Once PPCAsmParser supports darwin asm syntax, this test case should
-; be updated accordingly.
; RUN: llvm-mc -filetype=obj -relocation-model=pic -mcpu=g4 -triple=powerpc-apple-darwin8 %s -o - | llvm-readobj -relocations | FileCheck -check-prefix=DARWIN-G4-DUMP %s
-; .machine ppc7400
+ .machine ppc7400
.section __TEXT,__textcoal_nt,coalesced,pure_instructions
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.section __TEXT,__text,regular,pure_instructions
@@ -16,40 +12,40 @@
.align 4
_main: ; @main
; BB#0: ; %entry
- mflr 0
- stw 31, -4(1)
- stw 0, 8(1)
- stwu 1, -80(1)
+ mflr r0
+ stw r31, -4(r1)
+ stw r0, 8(r1)
+ stwu r1, -80(r1)
bl L0$pb
L0$pb:
- mr 31, 1
- li 5, 0
+ mr r31, r1
+ li r5, 0
mflr 2
- stw 3, 68(31)
- stw 5, 72(31)
- stw 4, 64(31)
- addis 2, 2, (L_.str-L0$pb)@ha
- la 3, (L_.str-L0$pb)@l(2)
+ stw r3, 68(r31)
+ stw r5, 72(r31)
+ stw r4, 64(r31)
+ addis r2, r2, ha16(L_.str-L0$pb)
+ la r3, lo16(L_.str-L0$pb)(r2)
bl L_puts$stub
- li 3, 0
- addi 1, 1, 80
- lwz 0, 8(1)
- lwz 31, -4(1)
- mtlr 0
+ li r3, 0
+ addi r1, r1, 80
+ lwz r0, 8(r1)
+ lwz r31, -4(r1)
+ mtlr r0
blr
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align 4
L_puts$stub:
.indirect_symbol _puts
- mflr 0
+ mflr r0
bcl 20, 31, L_puts$stub$tmp
L_puts$stub$tmp:
- mflr 11
- addis 11, 11, (L_puts$lazy_ptr-L_puts$stub$tmp)@ha
- mtlr 0
- lwzu 12, (L_puts$lazy_ptr-L_puts$stub$tmp)@l(11)
- mtctr 12
+ mflr r11
+ addis r11, r11, ha16(L_puts$lazy_ptr-L_puts$stub$tmp)
+ mtlr r0
+ lwzu r12, lo16(L_puts$lazy_ptr-L_puts$stub$tmp)(r11)
+ mtctr r12
bctr
.section __DATA,__la_symbol_ptr,lazy_symbol_pointers
L_puts$lazy_ptr:
@@ -66,17 +62,17 @@ L_.str: ; @.str
; DARWIN-G4-DUMP:AddressSize: 32bit
; DARWIN-G4-DUMP:Relocations [
; DARWIN-G4-DUMP: Section __text {
-; DARWIN-G4-DUMP: 0x34 1 2 0 PPC_RELOC_BR24 0 -
-; DARWIN-G4-DUMP: 0x30 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 _main
-; DARWIN-G4-DUMP: 0x0 0 2 n/a PPC_RELOC_PAIR 1 _main
-; DARWIN-G4-DUMP: 0x2C 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 _main
-; DARWIN-G4-DUMP: 0x60 0 2 n/a PPC_RELOC_PAIR 1 _main
+; DARWIN-G4-DUMP: 0x34 1 2 0 PPC_RELOC_BR24 0 0x3
+; DARWIN-G4-DUMP: 0x30 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 0x74
+; DARWIN-G4-DUMP: 0x0 0 2 n/a PPC_RELOC_PAIR 1 0x14
+; DARWIN-G4-DUMP: 0x2C 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 0x74
+; DARWIN-G4-DUMP: 0x60 0 2 n/a PPC_RELOC_PAIR 1 0x14
; DARWIN-G4-DUMP: }
; DARWIN-G4-DUMP: Section __picsymbolstub1 {
-; DARWIN-G4-DUMP: 0x14 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 _main
-; DARWIN-G4-DUMP: 0x0 0 2 n/a PPC_RELOC_PAIR 1 _main
-; DARWIN-G4-DUMP: 0xC 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 _main
-; DARWIN-G4-DUMP: 0x18 0 2 n/a PPC_RELOC_PAIR 1 _main
+; DARWIN-G4-DUMP: 0x14 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 0x70
+; DARWIN-G4-DUMP: 0x0 0 2 n/a PPC_RELOC_PAIR 1 0x58
+; DARWIN-G4-DUMP: 0xC 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 0x70
+; DARWIN-G4-DUMP: 0x18 0 2 n/a PPC_RELOC_PAIR 1 0x58
; DARWIN-G4-DUMP: }
; DARWIN-G4-DUMP: Section __la_symbol_ptr {
; DARWIN-G4-DUMP: 0x0 0 2 1 PPC_RELOC_VANILLA 0 dyld_stub_binding_helper
diff --git a/test/CodeGen/PowerPC/i1-to-double.ll b/test/CodeGen/PowerPC/i1-to-double.ll
new file mode 100644
index 000000000000..e3d9fc2ab228
--- /dev/null
+++ b/test/CodeGen/PowerPC/i1-to-double.ll
@@ -0,0 +1,21 @@
+; RUN: llc -march=ppc32 -mcpu=ppc32 -mtriple=powerpc-unknown-linux-gnu < %s | FileCheck %s
+define double @test(i1 %X) {
+ %Y = uitofp i1 %X to double
+ ret double %Y
+}
+
+; CHECK-LABEL: @test
+
+; CHECK: andi. {{[0-9]+}}, 3, 1
+; CHECK: bc 12, 1,
+
+; CHECK: li 3, .LCP[[L1:[A-Z0-9_]+]]@l
+; CHECK: addis 3, 3, .LCP[[L1]]@ha
+; CHECK: lfs 1, 0(3)
+; CHECK: blr
+
+; CHECK: li 3, .LCP[[L2:[A-Z0-9_]+]]@l
+; CHECK: addis 3, 3, .LCP[[L2]]@ha
+; CHECK: lfs 1, 0(3)
+; CHECK: blr
+
diff --git a/test/CodeGen/PowerPC/i32-to-float.ll b/test/CodeGen/PowerPC/i32-to-float.ll
index 2707d0352de1..371f4e858dc4 100644
--- a/test/CodeGen/PowerPC/i32-to-float.ll
+++ b/test/CodeGen/PowerPC/i32-to-float.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=g5 | FileCheck %s
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr6 | FileCheck -check-prefix=CHECK-PWR6 %s
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 | FileCheck -check-prefix=CHECK-A2 %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx | FileCheck -check-prefix=CHECK-VSX %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -29,6 +30,12 @@ entry:
; CHECK-A2: lfiwax [[REG:[0-9]+]],
; CHECK-A2: fcfids 1, [[REG]]
; CHECK-A2: blr
+
+; CHECK-VSX: @foo
+; CHECK-VSX: stw 3,
+; CHECK-VSX: lfiwax [[REG:[0-9]+]],
+; CHECK-VSX: fcfids 1, [[REG]]
+; CHECK-VSX: blr
}
define double @goo(i32 %a) nounwind {
@@ -54,6 +61,12 @@ entry:
; CHECK-A2: lfiwax [[REG:[0-9]+]],
; CHECK-A2: fcfid 1, [[REG]]
; CHECK-A2: blr
+
+; CHECK-VSX: @goo
+; CHECK-VSX: stw 3,
+; CHECK-VSX: lfiwax [[REG:[0-9]+]],
+; CHECK-VSX: xscvsxddp 1, [[REG]]
+; CHECK-VSX: blr
}
define float @foou(i32 %a) nounwind {
@@ -66,6 +79,12 @@ entry:
; CHECK-A2: lfiwzx [[REG:[0-9]+]],
; CHECK-A2: fcfidus 1, [[REG]]
; CHECK-A2: blr
+
+; CHECK-VSX: @foou
+; CHECK-VSX: stw 3,
+; CHECK-VSX: lfiwzx [[REG:[0-9]+]],
+; CHECK-VSX: fcfidus 1, [[REG]]
+; CHECK-VSX: blr
}
define double @goou(i32 %a) nounwind {
@@ -78,5 +97,11 @@ entry:
; CHECK-A2: lfiwzx [[REG:[0-9]+]],
; CHECK-A2: fcfidu 1, [[REG]]
; CHECK-A2: blr
+
+; CHECK-VSX: @goou
+; CHECK-VSX: stw 3,
+; CHECK-VSX: lfiwzx [[REG:[0-9]+]],
+; CHECK-VSX: xscvuxddp 1, [[REG]]
+; CHECK-VSX: blr
}
diff --git a/test/CodeGen/PowerPC/i64-to-float.ll b/test/CodeGen/PowerPC/i64-to-float.ll
index b81d109e7f45..025a875c1907 100644
--- a/test/CodeGen/PowerPC/i64-to-float.ll
+++ b/test/CodeGen/PowerPC/i64-to-float.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx | FileCheck -check-prefix=CHECK-VSX %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -12,6 +13,12 @@ entry:
; CHECK: lfd [[REG:[0-9]+]],
; CHECK: fcfids 1, [[REG]]
; CHECK: blr
+
+; CHECK-VSX: @foo
+; CHECK-VSX: std 3,
+; CHECK-VSX: lxsdx [[REG:[0-9]+]],
+; CHECK-VSX: fcfids 1, [[REG]]
+; CHECK-VSX: blr
}
define double @goo(i64 %a) nounwind {
@@ -24,6 +31,12 @@ entry:
; CHECK: lfd [[REG:[0-9]+]],
; CHECK: fcfid 1, [[REG]]
; CHECK: blr
+
+; CHECK-VSX: @goo
+; CHECK-VSX: std 3,
+; CHECK-VSX: lxsdx [[REG:[0-9]+]],
+; CHECK-VSX: xscvsxddp 1, [[REG]]
+; CHECK-VSX: blr
}
define float @foou(i64 %a) nounwind {
@@ -36,6 +49,12 @@ entry:
; CHECK: lfd [[REG:[0-9]+]],
; CHECK: fcfidus 1, [[REG]]
; CHECK: blr
+
+; CHECK-VSX: @foou
+; CHECK-VSX: std 3,
+; CHECK-VSX: lxsdx [[REG:[0-9]+]],
+; CHECK-VSX: fcfidus 1, [[REG]]
+; CHECK-VSX: blr
}
define double @goou(i64 %a) nounwind {
@@ -48,5 +67,11 @@ entry:
; CHECK: lfd [[REG:[0-9]+]],
; CHECK: fcfidu 1, [[REG]]
; CHECK: blr
+
+; CHECK-VSX: @goou
+; CHECK-VSX: std 3,
+; CHECK-VSX: lxsdx [[REG:[0-9]+]],
+; CHECK-VSX: xscvuxddp 1, [[REG]]
+; CHECK-VSX: blr
}
diff --git a/test/CodeGen/PowerPC/indexed-load.ll b/test/CodeGen/PowerPC/indexed-load.ll
new file mode 100644
index 000000000000..59fc058c9414
--- /dev/null
+++ b/test/CodeGen/PowerPC/indexed-load.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s | FileCheck %s
+
+; The SplitIndexingFromLoad tranformation exposed an isel backend bug. This
+; testcase used to generate stwx 4, 3, 64. stwx does not have an
+; immediate-offset format (note the 64) and it should not be matched.
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+%class.test = type { [64 x i8], [5 x i8] }
+
+; CHECK-LABEL: f:
+; CHECK-NOT: stwx {{[0-9]+}}, {{[0-9]+}}, 64
+define void @f(%class.test* %this) {
+entry:
+ %Subminor.i.i = getelementptr inbounds %class.test* %this, i64 0, i32 1
+ %0 = bitcast [5 x i8]* %Subminor.i.i to i40*
+ %bf.load2.i.i = load i40* %0, align 4
+ %bf.clear7.i.i = and i40 %bf.load2.i.i, -8589934592
+ store i40 %bf.clear7.i.i, i40* %0, align 4
+ ret void
+}
diff --git a/test/CodeGen/PowerPC/inlineasm-copy.ll b/test/CodeGen/PowerPC/inlineasm-copy.ll
index 59c338883561..0d5f6a6aeb97 100644
--- a/test/CodeGen/PowerPC/inlineasm-copy.ll
+++ b/test/CodeGen/PowerPC/inlineasm-copy.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=ppc32 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=ppc32 -no-integrated-as -verify-machineinstrs | FileCheck %s
; CHECK-NOT: mr
define i32 @test(i32 %Y, i32 %X) {
diff --git a/test/CodeGen/PowerPC/jaggedstructs.ll b/test/CodeGen/PowerPC/jaggedstructs.ll
index 82d4fef10cb3..9365e581529a 100644
--- a/test/CodeGen/PowerPC/jaggedstructs.ll
+++ b/test/CodeGen/PowerPC/jaggedstructs.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=pwr7 -O0 -fast-isel=false < %s | FileCheck %s
+; RUN: llc -mcpu=ppc64 -O0 -fast-isel=false < %s | FileCheck %s
; This tests receiving and re-passing parameters consisting of structures
; of size 3, 5, 6, and 7. They are to be found/placed right-adjusted in
diff --git a/test/CodeGen/PowerPC/lit.local.cfg b/test/CodeGen/PowerPC/lit.local.cfg
index 2e463005586f..5d33887ff0a4 100644
--- a/test/CodeGen/PowerPC/lit.local.cfg
+++ b/test/CodeGen/PowerPC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'PowerPC' in targets:
+if not 'PowerPC' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/PowerPC/lsa.ll b/test/CodeGen/PowerPC/lsa.ll
index 8a6338ef5a02..a892a4cf4140 100644
--- a/test/CodeGen/PowerPC/lsa.ll
+++ b/test/CodeGen/PowerPC/lsa.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=ppc64 | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/mature-mc-support.ll b/test/CodeGen/PowerPC/mature-mc-support.ll
new file mode 100644
index 000000000000..7c83e184a6f8
--- /dev/null
+++ b/test/CodeGen/PowerPC/mature-mc-support.ll
@@ -0,0 +1,27 @@
+; Test that inline assembly is parsed by the MC layer when MC support is mature
+; (even when the output is assembly).
+; FIXME: PowerPC doesn't use the integrated assembler by default in all cases
+; so we only test that -filetype=obj tries to parse the assembly.
+; FIXME: PowerPC doesn't appear to support -filetype=obj for ppc64le
+
+; SKIP: not llc -march=ppc32 < %s > /dev/null 2> %t1
+; SKIP: FileCheck %s < %t1
+
+; RUN: not llc -march=ppc32 -filetype=obj < %s > /dev/null 2> %t2
+; RUN: FileCheck %s < %t2
+
+; SKIP: not llc -march=ppc64 < %s > /dev/null 2> %t3
+; SKIP: FileCheck %s < %t3
+
+; RUN: not llc -march=ppc64 -filetype=obj < %s > /dev/null 2> %t4
+; RUN: FileCheck %s < %t4
+
+; SKIP: not llc -march=ppc64le < %s > /dev/null 2> %t5
+; SKIP: FileCheck %s < %t5
+
+; SKIP: not llc -march=ppc64le -filetype=obj < %s > /dev/null 2> %t6
+; SKIP: FileCheck %s < %t6
+
+module asm " .this_directive_is_very_unlikely_to_exist"
+
+; CHECK: LLVM ERROR: Error parsing inline asm
diff --git a/test/CodeGen/PowerPC/mcm-10.ll b/test/CodeGen/PowerPC/mcm-10.ll
index b479559b97f5..c3ab74725ce6 100644
--- a/test/CodeGen/PowerPC/mcm-10.ll
+++ b/test/CodeGen/PowerPC/mcm-10.ll
@@ -18,7 +18,8 @@ entry:
; CHECK-LABEL: test_fn_static:
; CHECK: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
-; CHECK: lwz {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
+; CHECK: lwa {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
+; CHECK-NOT: extsw
; CHECK: stw {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
; CHECK: .type [[VAR]],@object
; CHECK: .local [[VAR]]
diff --git a/test/CodeGen/PowerPC/mcm-11.ll b/test/CodeGen/PowerPC/mcm-11.ll
index c49e8655cf5b..033045c74c8a 100644
--- a/test/CodeGen/PowerPC/mcm-11.ll
+++ b/test/CodeGen/PowerPC/mcm-11.ll
@@ -18,7 +18,8 @@ entry:
; CHECK-LABEL: test_file_static:
; CHECK: addis [[REG1:[0-9]+]], 2, [[VAR:[a-z0-9A-Z_.]+]]@toc@ha
-; CHECK: lwz {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
+; CHECK: lwa {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
+; CHECK-NOT: extsw
; CHECK: stw {{[0-9]+}}, [[VAR]]@toc@l([[REG1]])
; CHECK: .type [[VAR]],@object
; CHECK: .data
diff --git a/test/CodeGen/PowerPC/mcm-obj-2.ll b/test/CodeGen/PowerPC/mcm-obj-2.ll
index a6e985545164..c42cf0c36ea8 100644
--- a/test/CodeGen/PowerPC/mcm-obj-2.ll
+++ b/test/CodeGen/PowerPC/mcm-obj-2.ll
@@ -22,7 +22,7 @@ entry:
; CHECK: Relocations [
; CHECK: Section (2) .rela.text {
; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM2:[^ ]+]]
-; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM2]]
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM2]]
; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM2]]
@gi = global i32 5, align 4
@@ -39,7 +39,7 @@ entry:
; accessing file-scope variable gi.
;
; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM3:[^ ]+]]
-; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM3]]
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM3]]
; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM3]]
define double @test_double_const() nounwind {
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r0.ll b/test/CodeGen/PowerPC/named-reg-alloc-r0.ll
new file mode 100644
index 000000000000..e683f99bd422
--- /dev/null
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r0.ll
@@ -0,0 +1,15 @@
+; RUN: not llc < %s -mtriple=powerpc-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=powerpc-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+
+define i32 @get_reg() nounwind {
+entry:
+; FIXME: Include an allocatable-specific error message
+; CHECK: Invalid register name global variable
+ %reg = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %reg
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"r0\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll b/test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll
new file mode 100644
index 000000000000..b047f9f92588
--- /dev/null
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mtriple=powerpc64-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+
+define i64 @get_reg() nounwind {
+entry:
+ %reg = call i64 @llvm.read_register.i64(metadata !0)
+ ret i64 %reg
+
+; CHECK-LABEL: @get_reg
+; CHECK: mr 3, 1
+
+; CHECK-DARWIN-LABEL: @get_reg
+; CHECK-DARWIN: mr r3, r1
+}
+
+declare i64 @llvm.read_register.i64(metadata) nounwind
+
+!0 = metadata !{metadata !"r1\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r1.ll b/test/CodeGen/PowerPC/named-reg-alloc-r1.ll
new file mode 100644
index 000000000000..9d0eb34caa5b
--- /dev/null
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r1.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -mtriple=powerpc-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc < %s -mtriple=powerpc64-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+
+define i32 @get_reg() nounwind {
+entry:
+ %reg = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %reg
+
+; CHECK-LABEL: @get_reg
+; CHECK: mr 3, 1
+
+; CHECK-DARWIN-LABEL: @get_reg
+; CHECK-DARWIN: mr r3, r1
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"r1\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll b/test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll
new file mode 100644
index 000000000000..df5085bbf7df
--- /dev/null
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mtriple=powerpc64-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+
+define i64 @get_reg() nounwind {
+entry:
+ %reg = call i64 @llvm.read_register.i64(metadata !0)
+ ret i64 %reg
+
+; CHECK-LABEL: @get_reg
+; CHECK: mr 3, 13
+
+; CHECK-DARWIN-LABEL: @get_reg
+; CHECK-DARWIN: mr r3, r13
+}
+
+declare i64 @llvm.read_register.i64(metadata) nounwind
+
+!0 = metadata !{metadata !"r13\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r13.ll b/test/CodeGen/PowerPC/named-reg-alloc-r13.ll
new file mode 100644
index 000000000000..900ebb2f4854
--- /dev/null
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r13.ll
@@ -0,0 +1,18 @@
+; RUN: not llc < %s -mtriple=powerpc-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+
+define i32 @get_reg() nounwind {
+entry:
+; FIXME: Include an allocatable-specific error message
+; CHECK-DARWIN: Invalid register name global variable
+ %reg = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %reg
+
+; CHECK-LABEL: @get_reg
+; CHECK: mr 3, 13
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"r13\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll b/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll
new file mode 100644
index 000000000000..0da33fa5f19a
--- /dev/null
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll
@@ -0,0 +1,17 @@
+; RUN: not llc < %s -mtriple=powerpc64-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+
+define i64 @get_reg() nounwind {
+entry:
+; FIXME: Include an allocatable-specific error message
+; CHECK-DARWIN: Invalid register name global variable
+ %reg = call i64 @llvm.read_register.i64(metadata !0)
+ ret i64 %reg
+
+; CHECK-LABEL: @get_reg
+; CHECK: mr 3, 2
+}
+
+declare i64 @llvm.read_register.i64(metadata) nounwind
+
+!0 = metadata !{metadata !"r2\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r2.ll b/test/CodeGen/PowerPC/named-reg-alloc-r2.ll
new file mode 100644
index 000000000000..51e7e3ee0339
--- /dev/null
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r2.ll
@@ -0,0 +1,18 @@
+; RUN: not llc < %s -mtriple=powerpc-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+
+define i32 @get_reg() nounwind {
+entry:
+; FIXME: Include an allocatable-specific error message
+; CHECK-DARWIN: Invalid register name global variable
+ %reg = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %reg
+
+; CHECK-LABEL: @get_reg
+; CHECK: mr 3, 2
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"r2\00"}
diff --git a/test/CodeGen/PowerPC/optcmp.ll b/test/CodeGen/PowerPC/optcmp.ll
index 35aabfa52c1d..d929eae20608 100644
--- a/test/CodeGen/PowerPC/optcmp.ll
+++ b/test/CodeGen/PowerPC/optcmp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -disable-ppc-cmp-opt=0 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -mattr=-crbits -disable-ppc-cmp-opt=0 | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll b/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
new file mode 100644
index 000000000000..6e0aec27b7c1
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc32-i1-vaarg.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=ppc32 -mcpu=ppc32 | FileCheck %s
+; RUN: llc < %s -march=ppc32 -mcpu=ppc32 -mtriple=powerpc-darwin | FileCheck %s -check-prefix=CHECK-D
+target triple = "powerpc-unknown-linux-gnu"
+
+declare void @printf(i8*, ...)
+
+define void @main() {
+ call void (i8*, ...)* @printf(i8* undef, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @main
+; CHECK-DAG li 4, 0
+; CHECK-DAG: crxor 6, 6, 6
+; CHECK: bl printf
+
+; CHECK-D-LABEL: @main
+; CHECK-D: li r4, 0
+; CHECK-D: bl L_printf$stub
+
diff --git a/test/CodeGen/PowerPC/ppc32-lshrti3.ll b/test/CodeGen/PowerPC/ppc32-lshrti3.ll
new file mode 100644
index 000000000000..6e76feaf1b34
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc32-lshrti3.ll
@@ -0,0 +1,39 @@
+; RUN: llc -O=2 < %s -mtriple=powerpc-netbsd | FileCheck %s
+
+; CHECK-NOT: bl __lshrti3
+
+; ModuleID = 'lshrti3-ppc32.c'
+target datalayout = "E-m:e-p:32:32-i64:64-n32"
+target triple = "powerpc--netbsd"
+
+; Function Attrs: nounwind uwtable
+define i32 @fn1() #0 {
+entry:
+ %.promoted = load i72* inttoptr (i32 1 to i72*), align 4
+ br label %while.cond
+
+while.cond: ; preds = %while.cond, %entry
+ %bf.set3 = phi i72 [ %bf.set, %while.cond ], [ %.promoted, %entry ]
+ %bf.lshr = lshr i72 %bf.set3, 40
+ %bf.lshr.tr = trunc i72 %bf.lshr to i32
+ %bf.cast = and i32 %bf.lshr.tr, 65535
+ %dec = add nsw i32 %bf.lshr.tr, 65535
+ %0 = zext i32 %dec to i72
+ %bf.value = shl nuw i72 %0, 40
+ %bf.shl = and i72 %bf.value, 72056494526300160
+ %bf.clear2 = and i72 %bf.set3, -72056494526300161
+ %bf.set = or i72 %bf.shl, %bf.clear2
+ %tobool = icmp eq i32 %bf.cast, 0
+ br i1 %tobool, label %while.end, label %while.cond
+
+while.end: ; preds = %while.cond
+ %bf.set.lcssa = phi i72 [ %bf.set, %while.cond ]
+ store i72 %bf.set.lcssa, i72* inttoptr (i32 1 to i72*), align 4
+ ret i32 undef
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 (213754)"}
diff --git a/test/CodeGen/PowerPC/ppc32-pic.ll b/test/CodeGen/PowerPC/ppc32-pic.ll
new file mode 100644
index 000000000000..5bb78a4655ae
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc32-pic.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -relocation-model=pic | FileCheck %s
+@foobar = common global i32 0, align 4
+
+define i32 @foo() {
+entry:
+ %0 = load i32* @foobar, align 4
+ ret i32 %0
+}
+
+; CHECK: [[POFF:\.L[0-9]+\$poff]]:
+; CHECK-NEXT: .long .L.TOC.-[[PB:\.L[0-9]+\$pb]]
+; CHECK-NEXT: foo:
+; CHECK: bl [[PB]]
+; CHECK-NEXT: [[PB]]:
+; CHECK: mflr 30
+; CHECK: lwz [[REG:[0-9]+]], [[POFF]]-[[PB]](30)
+; CHECK-NEXT: add 30, [[REG]], 30
+; CHECK: lwz [[VREG:[0-9]+]], [[VREF:\.LC[0-9]+]]-.L.TOC.(30)
+; CHECK: lwz {{[0-9]+}}, 0([[VREG]])
+; CHECK: [[VREF]]:
+; CHECK-NEXT: .long foobar
diff --git a/test/CodeGen/PowerPC/ppc32-vacopy.ll b/test/CodeGen/PowerPC/ppc32-vacopy.ll
index bc394125f135..fa540452ac28 100644
--- a/test/CodeGen/PowerPC/ppc32-vacopy.ll
+++ b/test/CodeGen/PowerPC/ppc32-vacopy.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple="powerpc-unknown-linux-gnu" < %s | FileCheck %s
+; RUN: llc -mtriple="powerpc-unknown-linux-gnu" -mcpu=ppc64 < %s | FileCheck %s
; PR15286
%va_list = type {i8, i8, i16, i8*, i8*}
diff --git a/test/CodeGen/PowerPC/ppc64-altivec-abi.ll b/test/CodeGen/PowerPC/ppc64-altivec-abi.ll
new file mode 100644
index 000000000000..0bed329f0e54
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-altivec-abi.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=ppc64 -mattr=+altivec | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Verify that in the 64-bit Linux ABI, vector arguments take up space
+; in the parameter save area.
+
+define i64 @callee(i64 %a, <4 x i32> %b, i64 %c, <4 x i32> %d, i64 %e) {
+entry:
+ ret i64 %e
+}
+; CHECK-LABEL: callee:
+; CHECK: ld 3, 112(1)
+
+define void @caller(i64 %x, <4 x i32> %y) {
+entry:
+ tail call void @test(i64 %x, <4 x i32> %y, i64 %x, <4 x i32> %y, i64 %x)
+ ret void
+}
+; CHECK-LABEL: caller:
+; CHECK: std 3, 112(1)
+
+declare void @test(i64, <4 x i32>, i64, <4 x i32>, i64)
+
diff --git a/test/CodeGen/PowerPC/ppc64-byval-align.ll b/test/CodeGen/PowerPC/ppc64-byval-align.ll
new file mode 100644
index 000000000000..0e73cf2b0e05
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-byval-align.ll
@@ -0,0 +1,56 @@
+; RUN: llc -O1 < %s -march=ppc64 -mcpu=pwr7 | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.test = type { i64, [8 x i8] }
+%struct.pad = type { [8 x i64] }
+
+@gt = common global %struct.test zeroinitializer, align 16
+@gp = common global %struct.pad zeroinitializer, align 8
+
+define signext i32 @callee1(i32 signext %x, %struct.test* byval align 16 nocapture readnone %y, i32 signext %z) {
+entry:
+ ret i32 %z
+}
+; CHECK-LABEL: @callee1
+; CHECK: mr 3, 7
+; CHECK: blr
+
+declare signext i32 @test1(i32 signext, %struct.test* byval align 16, i32 signext)
+define void @caller1(i32 signext %z) {
+entry:
+ %call = tail call signext i32 @test1(i32 signext 0, %struct.test* byval align 16 @gt, i32 signext %z)
+ ret void
+}
+; CHECK-LABEL: @caller1
+; CHECK: mr [[REG:[0-9]+]], 3
+; CHECK: mr 7, [[REG]]
+; CHECK: bl test1
+
+define i64 @callee2(%struct.pad* byval nocapture readnone %x, i32 signext %y, %struct.test* byval align 16 nocapture readonly %z) {
+entry:
+ %x1 = getelementptr inbounds %struct.test* %z, i64 0, i32 0
+ %0 = load i64* %x1, align 16
+ ret i64 %0
+}
+; CHECK-LABEL: @callee2
+; CHECK: ld [[REG:[0-9]+]], 128(1)
+; CHECK: mr 3, [[REG]]
+; CHECK: blr
+
+declare i64 @test2(%struct.pad* byval, i32 signext, %struct.test* byval align 16)
+define void @caller2(i64 %z) {
+entry:
+ %tmp = alloca %struct.test, align 16
+ %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test* %tmp, i64 0, i32 0
+ store i64 %z, i64* %.compoundliteral.sroa.0.0..sroa_idx, align 16
+ %call = call i64 @test2(%struct.pad* byval @gp, i32 signext 0, %struct.test* byval align 16 %tmp)
+ ret void
+}
+; CHECK-LABEL: @caller2
+; CHECK: std 3, [[OFF:[0-9]+]](1)
+; CHECK: ld [[REG:[0-9]+]], [[OFF]](1)
+; CHECK: std [[REG]], 128(1)
+; CHECK: bl test2
+
diff --git a/test/CodeGen/PowerPC/ppc64-calls.ll b/test/CodeGen/PowerPC/ppc64-calls.ll
index 1f3bb7111efd..31794be25beb 100644
--- a/test/CodeGen/PowerPC/ppc64-calls.ll
+++ b/test/CodeGen/PowerPC/ppc64-calls.ll
@@ -42,12 +42,18 @@ define void @test_indirect(void ()* nocapture %fp) nounwind {
ret void
}
-; Absolute vales should be have the TOC restore 'nop'
+; Absolute values must use the regular indirect call sequence
+; The main purpose of this test is to ensure that BLA is not
+; used on 64-bit SVR4 (as e.g. on Darwin).
define void @test_abs() nounwind {
; CHECK-LABEL: test_abs:
tail call void inttoptr (i64 1024 to void ()*)() nounwind
-; CHECK: bla 1024
-; CHECK-NEXT: nop
+; CHECK: ld [[FP:[0-9]+]], 1024(0)
+; CHECK: ld 11, 1040(0)
+; CHECK: ld 2, 1032(0)
+; CHECK-NEXT: mtctr [[FP]]
+; CHECK-NEXT: bctrl
+; CHECK-NEXT: ld 2, 40(1)
ret void
}
diff --git a/test/CodeGen/PowerPC/ppc64-smallarg.ll b/test/CodeGen/PowerPC/ppc64-smallarg.ll
new file mode 100644
index 000000000000..0d5b078e217a
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-smallarg.ll
@@ -0,0 +1,59 @@
+; Verify that small structures and float arguments are passed in the
+; least significant part of a stack slot doubleword.
+
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.large_arg = type { [8 x i64] }
+%struct.small_arg = type { i16, i8 }
+
+@gl = common global %struct.large_arg zeroinitializer, align 8
+@gs = common global %struct.small_arg zeroinitializer, align 2
+@gf = common global float 0.000000e+00, align 4
+
+define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval nocapture readnone %pad, %struct.small_arg* byval nocapture readonly %x) {
+entry:
+ %0 = bitcast %struct.small_arg* %x to i32*
+ %1 = bitcast %struct.small_arg* %agg.result to i32*
+ %2 = load i32* %0, align 2
+ store i32 %2, i32* %1, align 2
+ ret void
+}
+; CHECK: @callee1
+; CHECK: lwz {{[0-9]+}}, 124(1)
+; CHECK: blr
+
+define void @caller1() {
+entry:
+ %tmp = alloca %struct.small_arg, align 2
+ call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval @gl, %struct.small_arg* byval @gs)
+ ret void
+}
+; CHECK: @caller1
+; CHECK: stw {{[0-9]+}}, 124(1)
+; CHECK: bl test1
+
+declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval, %struct.small_arg* byval)
+
+define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
+entry:
+ ret float %x
+}
+; CHECK: @callee2
+; CHECK: lfs {{[0-9]+}}, 156(1)
+; CHECK: blr
+
+define void @caller2() {
+entry:
+ %0 = load float* @gf, align 4
+ %call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
+ ret void
+}
+; CHECK: @caller2
+; CHECK: stfs {{[0-9]+}}, 156(1)
+; CHECK: bl test2
+
+declare float @test2(float, float, float, float, float, float, float, float, float, float, float, float, float, float)
+
diff --git a/test/CodeGen/PowerPC/ppc64le-aggregates.ll b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
new file mode 100644
index 000000000000..9eed623bacaa
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
@@ -0,0 +1,329 @@
+; RUN: llc < %s -march=ppc64le -mcpu=pwr8 -mattr=+altivec | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+;
+; Verify use of registers for float/vector aggregate return.
+;
+
+define [8 x float] @return_float([8 x float] %x) {
+entry:
+ ret [8 x float] %x
+}
+; CHECK-LABEL: @return_float
+; CHECK: %entry
+; CHECK-NEXT: blr
+
+define [8 x double] @return_double([8 x double] %x) {
+entry:
+ ret [8 x double] %x
+}
+; CHECK-LABEL: @return_double
+; CHECK: %entry
+; CHECK-NEXT: blr
+
+define [4 x ppc_fp128] @return_ppcf128([4 x ppc_fp128] %x) {
+entry:
+ ret [4 x ppc_fp128] %x
+}
+; CHECK-LABEL: @return_ppcf128
+; CHECK: %entry
+; CHECK-NEXT: blr
+
+define [8 x <4 x i32>] @return_v4i32([8 x <4 x i32>] %x) {
+entry:
+ ret [8 x <4 x i32>] %x
+}
+; CHECK-LABEL: @return_v4i32
+; CHECK: %entry
+; CHECK-NEXT: blr
+
+
+;
+; Verify amount of space taken up by aggregates in the parameter save area.
+;
+
+define i64 @callee_float([7 x float] %a, [7 x float] %b, i64 %c) {
+entry:
+ ret i64 %c
+}
+; CHECK-LABEL: @callee_float
+; CHECK: ld 3, 96(1)
+; CHECK: blr
+
+define void @caller_float(i64 %x, [7 x float] %y) {
+entry:
+ tail call void @test_float([7 x float] %y, [7 x float] %y, i64 %x)
+ ret void
+}
+; CHECK-LABEL: @caller_float
+; CHECK: std 3, 96(1)
+; CHECK: bl test_float
+
+declare void @test_float([7 x float], [7 x float], i64)
+
+define i64 @callee_double(i64 %a, [7 x double] %b, i64 %c) {
+entry:
+ ret i64 %c
+}
+; CHECK-LABEL: @callee_double
+; CHECK: ld 3, 96(1)
+; CHECK: blr
+
+define void @caller_double(i64 %x, [7 x double] %y) {
+entry:
+ tail call void @test_double(i64 %x, [7 x double] %y, i64 %x)
+ ret void
+}
+; CHECK-LABEL: @caller_double
+; CHECK: std 3, 96(1)
+; CHECK: bl test_double
+
+declare void @test_double(i64, [7 x double], i64)
+
+define i64 @callee_ppcf128(i64 %a, [4 x ppc_fp128] %b, i64 %c) {
+entry:
+ ret i64 %c
+}
+; CHECK-LABEL: @callee_ppcf128
+; CHECK: ld 3, 104(1)
+; CHECK: blr
+
+define void @caller_ppcf128(i64 %x, [4 x ppc_fp128] %y) {
+entry:
+ tail call void @test_ppcf128(i64 %x, [4 x ppc_fp128] %y, i64 %x)
+ ret void
+}
+; CHECK-LABEL: @caller_ppcf128
+; CHECK: std 3, 104(1)
+; CHECK: bl test_ppcf128
+
+declare void @test_ppcf128(i64, [4 x ppc_fp128], i64)
+
+define i64 @callee_i64(i64 %a, [7 x i64] %b, i64 %c) {
+entry:
+ ret i64 %c
+}
+; CHECK-LABEL: @callee_i64
+; CHECK: ld 3, 96(1)
+; CHECK: blr
+
+define void @caller_i64(i64 %x, [7 x i64] %y) {
+entry:
+ tail call void @test_i64(i64 %x, [7 x i64] %y, i64 %x)
+ ret void
+}
+; CHECK-LABEL: @caller_i64
+; CHECK: std 3, 96(1)
+; CHECK: bl test_i64
+
+declare void @test_i64(i64, [7 x i64], i64)
+
+define i64 @callee_i128(i64 %a, [4 x i128] %b, i64 %c) {
+entry:
+ ret i64 %c
+}
+; CHECK-LABEL: @callee_i128
+; CHECK: ld 3, 112(1)
+; CHECK: blr
+
+define void @caller_i128(i64 %x, [4 x i128] %y) {
+entry:
+ tail call void @test_i128(i64 %x, [4 x i128] %y, i64 %x)
+ ret void
+}
+; CHECK-LABEL: @caller_i128
+; CHECK: std 3, 112(1)
+; CHECK: bl test_i128
+
+declare void @test_i128(i64, [4 x i128], i64)
+
+define i64 @callee_v4i32(i64 %a, [4 x <4 x i32>] %b, i64 %c) {
+entry:
+ ret i64 %c
+}
+; CHECK-LABEL: @callee_v4i32
+; CHECK: ld 3, 112(1)
+; CHECK: blr
+
+define void @caller_v4i32(i64 %x, [4 x <4 x i32>] %y) {
+entry:
+ tail call void @test_v4i32(i64 %x, [4 x <4 x i32>] %y, i64 %x)
+ ret void
+}
+; CHECK-LABEL: @caller_v4i32
+; CHECK: std 3, 112(1)
+; CHECK: bl test_v4i32
+
+declare void @test_v4i32(i64, [4 x <4 x i32>], i64)
+
+
+;
+; Verify handling of floating point arguments in GPRs
+;
+
+%struct.float8 = type { [8 x float] }
+%struct.float5 = type { [5 x float] }
+%struct.float2 = type { [2 x float] }
+
+@g8 = common global %struct.float8 zeroinitializer, align 4
+@g5 = common global %struct.float5 zeroinitializer, align 4
+@g2 = common global %struct.float2 zeroinitializer, align 4
+
+define float @callee0([7 x float] %a, [7 x float] %b) {
+entry:
+ %b.extract = extractvalue [7 x float] %b, 6
+ ret float %b.extract
+}
+; CHECK-LABEL: @callee0
+; CHECK: stw 10, [[OFF:.*]](1)
+; CHECK: lfs 1, [[OFF]](1)
+; CHECK: blr
+
+define void @caller0([7 x float] %a) {
+entry:
+ tail call void @test0([7 x float] %a, [7 x float] %a)
+ ret void
+}
+; CHECK-LABEL: @caller0
+; CHECK-DAG: fmr 8, 1
+; CHECK-DAG: fmr 9, 2
+; CHECK-DAG: fmr 10, 3
+; CHECK-DAG: fmr 11, 4
+; CHECK-DAG: fmr 12, 5
+; CHECK-DAG: fmr 13, 6
+; CHECK-DAG: stfs 7, [[OFF:[0-9]+]](1)
+; CHECK-DAG: lwz 10, [[OFF]](1)
+; CHECK: bl test0
+
+declare void @test0([7 x float], [7 x float])
+
+define float @callee1([8 x float] %a, [8 x float] %b) {
+entry:
+ %b.extract = extractvalue [8 x float] %b, 7
+ ret float %b.extract
+}
+; CHECK-LABEL: @callee1
+; CHECK: rldicl [[REG:[0-9]+]], 10, 32, 32
+; CHECK: stw [[REG]], [[OFF:.*]](1)
+; CHECK: lfs 1, [[OFF]](1)
+; CHECK: blr
+
+define void @caller1([8 x float] %a) {
+entry:
+ tail call void @test1([8 x float] %a, [8 x float] %a)
+ ret void
+}
+; CHECK-LABEL: @caller1
+; CHECK-DAG: fmr 9, 1
+; CHECK-DAG: fmr 10, 2
+; CHECK-DAG: fmr 11, 3
+; CHECK-DAG: fmr 12, 4
+; CHECK-DAG: fmr 13, 5
+; CHECK-DAG: stfs 5, [[OFF0:[0-9]+]](1)
+; CHECK-DAG: stfs 6, [[OFF1:[0-9]+]](1)
+; CHECK-DAG: stfs 7, [[OFF2:[0-9]+]](1)
+; CHECK-DAG: stfs 8, [[OFF3:[0-9]+]](1)
+; CHECK-DAG: lwz [[REG0:[0-9]+]], [[OFF0]](1)
+; CHECK-DAG: lwz [[REG1:[0-9]+]], [[OFF1]](1)
+; CHECK-DAG: lwz [[REG2:[0-9]+]], [[OFF2]](1)
+; CHECK-DAG: lwz [[REG3:[0-9]+]], [[OFF3]](1)
+; CHECK-DAG: sldi [[REG1]], [[REG1]], 32
+; CHECK-DAG: sldi [[REG3]], [[REG3]], 32
+; CHECK-DAG: or 9, [[REG0]], [[REG1]]
+; CHECK-DAG: or 10, [[REG2]], [[REG3]]
+; CHECK: bl test1
+
+declare void @test1([8 x float], [8 x float])
+
+define float @callee2([8 x float] %a, [5 x float] %b, [2 x float] %c) {
+entry:
+ %c.extract = extractvalue [2 x float] %c, 1
+ ret float %c.extract
+}
+; CHECK-LABEL: @callee2
+; CHECK: rldicl [[REG:[0-9]+]], 10, 32, 32
+; CHECK: stw [[REG]], [[OFF:.*]](1)
+; CHECK: lfs 1, [[OFF]](1)
+; CHECK: blr
+
+define void @caller2() {
+entry:
+ %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
+ %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
+ %2 = load [2 x float]* getelementptr inbounds (%struct.float2* @g2, i64 0, i32 0), align 4
+ tail call void @test2([8 x float] %0, [5 x float] %1, [2 x float] %2)
+ ret void
+}
+; CHECK-LABEL: @caller2
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK-DAG: lfs 1, 0([[REG]])
+; CHECK-DAG: lfs 2, 4([[REG]])
+; CHECK-DAG: lfs 3, 8([[REG]])
+; CHECK-DAG: lfs 4, 12([[REG]])
+; CHECK-DAG: lfs 5, 16([[REG]])
+; CHECK-DAG: lfs 6, 20([[REG]])
+; CHECK-DAG: lfs 7, 24([[REG]])
+; CHECK-DAG: lfs 8, 28([[REG]])
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK-DAG: lfs 9, 0([[REG]])
+; CHECK-DAG: lfs 10, 4([[REG]])
+; CHECK-DAG: lfs 11, 8([[REG]])
+; CHECK-DAG: lfs 12, 12([[REG]])
+; CHECK-DAG: lfs 13, 16([[REG]])
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK-DAG: lwz [[REG0:[0-9]+]], 0([[REG]])
+; CHECK-DAG: lwz [[REG1:[0-9]+]], 4([[REG]])
+; CHECK-DAG: sldi [[REG1]], [[REG1]], 32
+; CHECK-DAG: or 10, [[REG0]], [[REG1]]
+; CHECK: bl test2
+
+declare void @test2([8 x float], [5 x float], [2 x float])
+
+define double @callee3([8 x float] %a, [5 x float] %b, double %c) {
+entry:
+ ret double %c
+}
+; CHECK-LABEL: @callee3
+; CHECK: std 10, [[OFF:.*]](1)
+; CHECK: lfd 1, [[OFF]](1)
+; CHECK: blr
+
+define void @caller3(double %d) {
+entry:
+ %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
+ %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
+ tail call void @test3([8 x float] %0, [5 x float] %1, double %d)
+ ret void
+}
+; CHECK-LABEL: @caller3
+; CHECK: stfd 1, [[OFF:.*]](1)
+; CHECK: ld 10, [[OFF]](1)
+; CHECK: bl test3
+
+declare void @test3([8 x float], [5 x float], double)
+
+define float @callee4([8 x float] %a, [5 x float] %b, float %c) {
+entry:
+ ret float %c
+}
+; CHECK-LABEL: @callee4
+; CHECK: stw 10, [[OFF:.*]](1)
+; CHECK: lfs 1, [[OFF]](1)
+; CHECK: blr
+
+define void @caller4(float %f) {
+entry:
+ %0 = load [8 x float]* getelementptr inbounds (%struct.float8* @g8, i64 0, i32 0), align 4
+ %1 = load [5 x float]* getelementptr inbounds (%struct.float5* @g5, i64 0, i32 0), align 4
+ tail call void @test4([8 x float] %0, [5 x float] %1, float %f)
+ ret void
+}
+; CHECK-LABEL: @caller4
+; CHECK: stfs 1, [[OFF:.*]](1)
+; CHECK: lwz 10, [[OFF]](1)
+; CHECK: bl test4
+
+declare void @test4([8 x float], [5 x float], float)
+
diff --git a/test/CodeGen/PowerPC/ppc64le-calls.ll b/test/CodeGen/PowerPC/ppc64le-calls.ll
new file mode 100644
index 000000000000..0d667dde96b4
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64le-calls.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=ppc64le -mcpu=pwr8 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+; Indirect calls requires a full stub creation
+define void @test_indirect(void ()* nocapture %fp) {
+; CHECK-LABEL: @test_indirect
+ tail call void %fp()
+; CHECK-DAG: std 2, 24(1)
+; CHECK-DAG: mr 12, 3
+; CHECK-DAG: mtctr 3
+; CHECK: bctrl
+; CHECK-NEXT: ld 2, 24(1)
+ ret void
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64le-crsave.ll b/test/CodeGen/PowerPC/ppc64le-crsave.ll
new file mode 100644
index 000000000000..17174d7ad764
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64le-crsave.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+@_ZTIi = external constant i8*
+declare i8* @__cxa_allocate_exception(i64)
+declare void @__cxa_throw(i8*, i8*, i8*)
+
+define void @crsave() {
+entry:
+ call void asm sideeffect "", "~{cr2}"()
+ call void asm sideeffect "", "~{cr3}"()
+ call void asm sideeffect "", "~{cr4}"()
+
+ %exception = call i8* @__cxa_allocate_exception(i64 4)
+ %0 = bitcast i8* %exception to i32*
+ store i32 0, i32* %0
+ call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
+ unreachable
+
+return: ; No predecessors!
+ ret void
+}
+; CHECK-LABEL: @crsave
+; CHECK: .cfi_offset cr2, 8
+; CHECK: .cfi_offset cr3, 8
+; CHECK: .cfi_offset cr4, 8
+
diff --git a/test/CodeGen/PowerPC/ppc64le-localentry.ll b/test/CodeGen/PowerPC/ppc64le-localentry.ll
new file mode 100644
index 000000000000..4676ce8eadc6
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64le-localentry.ll
@@ -0,0 +1,46 @@
+; RUN: llc -march=ppc64le -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -march=ppc64le -mcpu=pwr8 -O0 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+@number64 = global i64 10, align 8
+
+; CHECK: .abiversion 2
+
+define i64 @use_toc(i64 %a) nounwind {
+entry:
+; CHECK-LABEL: @use_toc
+; CHECK-NEXT: .Ltmp[[TMP1:[0-9]+]]:
+; CHECK-NEXT: addis 2, 12, .TOC.-.Ltmp[[TMP1]]@ha
+; CHECK-NEXT: addi 2, 2, .TOC.-.Ltmp[[TMP1]]@l
+; CHECK-NEXT: .Ltmp[[TMP2:[0-9]+]]:
+; CHECK-NEXT: .localentry use_toc, .Ltmp[[TMP2]]-.Ltmp[[TMP1]]
+; CHECK-NEXT: %entry
+ %0 = load i64* @number64, align 8
+ %cmp = icmp eq i64 %0, %a
+ %conv1 = zext i1 %cmp to i64
+ ret i64 %conv1
+}
+
+declare void @callee()
+define void @use_toc_implicit() nounwind {
+entry:
+; CHECK-LABEL: @use_toc_implicit
+; CHECK-NEXT: .Ltmp[[TMP1:[0-9]+]]:
+; CHECK-NEXT: addis 2, 12, .TOC.-.Ltmp[[TMP1]]@ha
+; CHECK-NEXT: addi 2, 2, .TOC.-.Ltmp[[TMP1]]@l
+; CHECK-NEXT: .Ltmp[[TMP2:[0-9]+]]:
+; CHECK-NEXT: .localentry use_toc_implicit, .Ltmp[[TMP2]]-.Ltmp[[TMP1]]
+; CHECK-NEXT: %entry
+ call void @callee()
+ ret void
+}
+
+define i64 @no_toc(i64 %a) nounwind {
+entry:
+; CHECK-LABEL: @no_toc
+; CHECK-NEXT: %entry
+ ret i64 %a
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64le-smallarg.ll b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
new file mode 100644
index 000000000000..120c14039f99
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64le-smallarg.ll
@@ -0,0 +1,59 @@
+; Verify that small structures and float arguments are passed in the
+; least significant part of a stack slot doubleword.
+
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+%struct.large_arg = type { [8 x i64] }
+%struct.small_arg = type { i16, i8 }
+
+@gl = common global %struct.large_arg zeroinitializer, align 8
+@gs = common global %struct.small_arg zeroinitializer, align 2
+@gf = common global float 0.000000e+00, align 4
+
+define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval nocapture readnone %pad, %struct.small_arg* byval nocapture readonly %x) {
+entry:
+ %0 = bitcast %struct.small_arg* %x to i32*
+ %1 = bitcast %struct.small_arg* %agg.result to i32*
+ %2 = load i32* %0, align 2
+ store i32 %2, i32* %1, align 2
+ ret void
+}
+; CHECK: @callee1
+; CHECK: lwz {{[0-9]+}}, 104(1)
+; CHECK: blr
+
+define void @caller1() {
+entry:
+ %tmp = alloca %struct.small_arg, align 2
+ call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval @gl, %struct.small_arg* byval @gs)
+ ret void
+}
+; CHECK: @caller1
+; CHECK: stw {{[0-9]+}}, 104(1)
+; CHECK: bl test1
+
+declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval, %struct.small_arg* byval)
+
+define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) {
+entry:
+ ret float %x
+}
+; CHECK: @callee2
+; CHECK: lfs {{[0-9]+}}, 136(1)
+; CHECK: blr
+
+define void @caller2() {
+entry:
+ %0 = load float* @gf, align 4
+ %call = tail call float @test2(float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float %0)
+ ret void
+}
+; CHECK: @caller2
+; CHECK: stfs {{[0-9]+}}, 136(1)
+; CHECK: bl test2
+
+declare float @test2(float, float, float, float, float, float, float, float, float, float, float, float, float, float)
+
diff --git a/test/CodeGen/PowerPC/ppcf128-endian.ll b/test/CodeGen/PowerPC/ppcf128-endian.ll
new file mode 100644
index 000000000000..2a5f13a5c3da
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppcf128-endian.ll
@@ -0,0 +1,154 @@
+; RUN: llc -mcpu=pwr7 -mattr=+altivec < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+@g = common global ppc_fp128 0xM00000000000000000000000000000000, align 16
+
+define void @callee(ppc_fp128 %x) {
+entry:
+ %x.addr = alloca ppc_fp128, align 16
+ store ppc_fp128 %x, ppc_fp128* %x.addr, align 16
+ %0 = load ppc_fp128* %x.addr, align 16
+ store ppc_fp128 %0, ppc_fp128* @g, align 16
+ ret void
+}
+; CHECK: @callee
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK: stfd 2, 8([[REG]])
+; CHECK: stfd 1, 0([[REG]])
+; CHECK: blr
+
+define void @caller() {
+entry:
+ %0 = load ppc_fp128* @g, align 16
+ call void @test(ppc_fp128 %0)
+ ret void
+}
+; CHECK: @caller
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK: lfd 2, 8([[REG]])
+; CHECK: lfd 1, 0([[REG]])
+; CHECK: bl test
+
+declare void @test(ppc_fp128)
+
+define void @caller_const() {
+entry:
+ call void @test(ppc_fp128 0xM3FF00000000000000000000000000000)
+ ret void
+}
+; CHECK: .LCPI[[LC:[0-9]+]]_0:
+; CHECK: .long 1065353216
+; CHECK: .LCPI[[LC]]_1:
+; CHECK: .long 0
+; CHECK: @caller_const
+; CHECK: addi [[REG0:[0-9]+]], {{[0-9]+}}, .LCPI[[LC]]_0
+; CHECK: addi [[REG1:[0-9]+]], {{[0-9]+}}, .LCPI[[LC]]_1
+; CHECK: lfs 1, 0([[REG0]])
+; CHECK: lfs 2, 0([[REG1]])
+; CHECK: bl test
+
+define ppc_fp128 @result() {
+entry:
+ %0 = load ppc_fp128* @g, align 16
+ ret ppc_fp128 %0
+}
+; CHECK: @result
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK: lfd 1, 0([[REG]])
+; CHECK: lfd 2, 8([[REG]])
+; CHECK: blr
+
+define void @use_result() {
+entry:
+ %call = tail call ppc_fp128 @test_result() #3
+ store ppc_fp128 %call, ppc_fp128* @g, align 16
+ ret void
+}
+; CHECK: @use_result
+; CHECK: bl test_result
+; CHECK: ld [[REG:[0-9]+]], .LC
+; CHECK: stfd 2, 8([[REG]])
+; CHECK: stfd 1, 0([[REG]])
+; CHECK: blr
+
+declare ppc_fp128 @test_result()
+
+define void @caller_result() {
+entry:
+ %call = tail call ppc_fp128 @test_result()
+ tail call void @test(ppc_fp128 %call)
+ ret void
+}
+; CHECK: @caller_result
+; CHECK: bl test_result
+; CHECK-NEXT: nop
+; CHECK-NEXT: bl test
+; CHECK-NEXT: nop
+
+define i128 @convert_from(ppc_fp128 %x) {
+entry:
+ %0 = bitcast ppc_fp128 %x to i128
+ ret i128 %0
+}
+; CHECK: @convert_from
+; CHECK: stfd 1, [[OFF1:.*]](1)
+; CHECK: stfd 2, [[OFF2:.*]](1)
+; CHECK: ld 3, [[OFF1]](1)
+; CHECK: ld 4, [[OFF2]](1)
+; CHECK: blr
+
+define ppc_fp128 @convert_to(i128 %x) {
+entry:
+ %0 = bitcast i128 %x to ppc_fp128
+ ret ppc_fp128 %0
+}
+; CHECK: @convert_to
+; CHECK: std 3, [[OFF1:.*]](1)
+; CHECK: std 4, [[OFF2:.*]](1)
+; CHECK: lfd 1, [[OFF1]](1)
+; CHECK: lfd 2, [[OFF2]](1)
+; CHECK: blr
+
+define ppc_fp128 @convert_to2(i128 %x) {
+entry:
+ %shl = shl i128 %x, 1
+ %0 = bitcast i128 %shl to ppc_fp128
+ ret ppc_fp128 %0
+}
+
+; CHECK: @convert_to
+; CHECK: std 3, [[OFF1:.*]](1)
+; CHECK: std 4, [[OFF2:.*]](1)
+; CHECK: lfd 1, [[OFF1]](1)
+; CHECK: lfd 2, [[OFF2]](1)
+; CHECK: blr
+
+define double @convert_vector(<4 x i32> %x) {
+entry:
+ %cast = bitcast <4 x i32> %x to ppc_fp128
+ %conv = fptrunc ppc_fp128 %cast to double
+ ret double %conv
+}
+; CHECK: @convert_vector
+; CHECK: addi [[REG:[0-9]+]], 1, [[OFF:.*]]
+; CHECK: stvx 2, 0, [[REG]]
+; CHECK: lfd 1, [[OFF]](1)
+; CHECK: blr
+
+declare void @llvm.va_start(i8*)
+
+define double @vararg(i32 %a, ...) {
+entry:
+ %va = alloca i8*, align 8
+ %va1 = bitcast i8** %va to i8*
+ call void @llvm.va_start(i8* %va1)
+ %arg = va_arg i8** %va, ppc_fp128
+ %conv = fptrunc ppc_fp128 %arg to double
+ ret double %conv
+}
+; CHECK: @vararg
+; CHECK: lfd 1, 0({{[0-9]+}})
+; CHECK: blr
+
diff --git a/test/CodeGen/PowerPC/pr17168.ll b/test/CodeGen/PowerPC/pr17168.ll
index 2848221e0764..24bcda02b3a5 100644
--- a/test/CodeGen/PowerPC/pr17168.ll
+++ b/test/CodeGen/PowerPC/pr17168.ll
@@ -56,7 +56,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 190311)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !298, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c] [DW_LANG_C99]
!1 = metadata !{metadata !"bt.c", metadata !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !82, metadata !102, metadata !114, metadata !132, metadata !145, metadata !154, metadata !155, metadata !162, metadata !183, metadata !200, metadata !201, metadata !207, metadata !208, metadata !215, metadata !221, metadata !230, metadata !238, metadata !246, metadata !255, metadata !260, metadata !261, metadata !268, metadata !274, metadata !279, metadata !280, metadata !287, metadata !293}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 74, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, null, metadata !12, i32 74} ; [ DW_TAG_subprogram ] [line 74] [def] [main]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
diff --git a/test/CodeGen/PowerPC/pr18663-2.ll b/test/CodeGen/PowerPC/pr18663-2.ll
new file mode 100644
index 000000000000..6b54440c4d56
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr18663-2.ll
@@ -0,0 +1,153 @@
+; RUN: llc < %s -march=ppc64 -mtriple=powerpc64-unknown-linux-gnu
+; RUN: llc < %s -march=ppc64le -mtriple=powerpc64le-unknown-linux-gnu
+
+%"class.std::__1::locale::id.1580.4307.4610.8491" = type { %"struct.std::__1::once_flag.1579.4306.4609.8490", i32 }
+%"struct.std::__1::once_flag.1579.4306.4609.8490" = type { i64 }
+%"class.Foam::IOerror.1581.4308.4611.8505" = type { %"class.Foam::error.1535.4262.4565.8504", %"class.Foam::string.1530.4257.4560.8499", i32, i32 }
+%"class.Foam::error.1535.4262.4565.8504" = type { %"class.std::exception.1523.4250.4553.8492", [36 x i8], %"class.Foam::string.1530.4257.4560.8499", %"class.Foam::string.1530.4257.4560.8499", i32, i8, i8, %"class.Foam::OStringStream.1534.4261.4564.8503"* }
+%"class.std::exception.1523.4250.4553.8492" = type { i32 (...)** }
+%"class.Foam::OStringStream.1534.4261.4564.8503" = type { %"class.Foam::OSstream.1533.4260.4563.8502" }
+%"class.Foam::OSstream.1533.4260.4563.8502" = type { [50 x i8], %"class.Foam::fileName.1531.4258.4561.8500", %"class.std::__1::basic_ostream.1532.4259.4562.8501"* }
+%"class.Foam::fileName.1531.4258.4561.8500" = type { %"class.Foam::string.1530.4257.4560.8499" }
+%"class.std::__1::basic_ostream.1532.4259.4562.8501" = type { i32 (...)**, [148 x i8] }
+%"class.Foam::string.1530.4257.4560.8499" = type { %"class.std::__1::basic_string.1529.4256.4559.8498" }
+%"class.std::__1::basic_string.1529.4256.4559.8498" = type { %"class.std::__1::__compressed_pair.1528.4255.4558.8497" }
+%"class.std::__1::__compressed_pair.1528.4255.4558.8497" = type { %"class.std::__1::__libcpp_compressed_pair_imp.1527.4254.4557.8496" }
+%"class.std::__1::__libcpp_compressed_pair_imp.1527.4254.4557.8496" = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep.1526.4253.4556.8495" }
+%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep.1526.4253.4556.8495" = type { %union.anon.1525.4252.4555.8494 }
+%union.anon.1525.4252.4555.8494 = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long.1524.4251.4554.8493" }
+%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long.1524.4251.4554.8493" = type { i64, i64, i8* }
+
+@.str3 = external unnamed_addr constant [16 x i8], align 1
+@_ZNSt3__15ctypeIcE2idE = external global %"class.std::__1::locale::id.1580.4307.4610.8491"
+@_ZN4Foam12FatalIOErrorE = external global %"class.Foam::IOerror.1581.4308.4611.8505"
+@.str204 = external unnamed_addr constant [18 x i8], align 1
+@.str205 = external unnamed_addr constant [34 x i8], align 1
+
+declare void @_ZN4FoamlsERNS_7OstreamEPKc() #0
+
+declare i32 @__gxx_personality_v0(...)
+
+declare void @_ZNKSt3__18ios_base6getlocEv() #0
+
+declare void @_ZNKSt3__16locale9use_facetERNS0_2idE() #0
+
+; Function Attrs: noreturn
+declare void @_ZNKSt3__121__basic_string_commonILb1EE20__throw_length_errorEv() #1 align 2
+
+declare void @_ZN4Foam6string6expandEb() #0
+
+declare void @_ZN4Foam8IFstreamC1ERKNS_8fileNameENS_8IOstream12streamFormatENS4_13versionNumberE() #0
+
+declare void @_ZN4Foam7IOerrorclEPKcS2_iRKNS_8IOstreamE() #0
+
+declare void @_ZN4Foam7IOerror4exitEi() #0
+
+; Function Attrs: inlinehint
+declare void @_ZN4Foam8fileName12stripInvalidEv() #2 align 2
+
+define void @_ZN4Foam3CSVINS_6VectorIdEEE4readEv() #0 align 2 {
+entry:
+ invoke void @_ZN4Foam6string6expandEb()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %entry
+ br i1 undef, label %if.then.i.i.i.i176, label %_ZN4Foam6stringC2ERKS0_.exit.i
+
+if.then.i.i.i.i176: ; preds = %invoke.cont
+ invoke void @_ZNKSt3__121__basic_string_commonILb1EE20__throw_length_errorEv()
+ to label %.noexc unwind label %lpad
+
+.noexc: ; preds = %if.then.i.i.i.i176
+ unreachable
+
+_ZN4Foam6stringC2ERKS0_.exit.i: ; preds = %invoke.cont
+ invoke void @_ZN4Foam8fileName12stripInvalidEv()
+ to label %invoke.cont2 unwind label %lpad.i
+
+lpad.i: ; preds = %_ZN4Foam6stringC2ERKS0_.exit.i
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %ehcleanup142
+
+invoke.cont2: ; preds = %_ZN4Foam6stringC2ERKS0_.exit.i
+ invoke void @_ZN4Foam8IFstreamC1ERKNS_8fileNameENS_8IOstream12streamFormatENS4_13versionNumberE()
+ to label %invoke.cont4 unwind label %lpad3
+
+invoke.cont4: ; preds = %invoke.cont2
+ br i1 undef, label %for.body, label %if.then
+
+if.then: ; preds = %invoke.cont4
+ invoke void @_ZN4Foam7IOerrorclEPKcS2_iRKNS_8IOstreamE()
+ to label %invoke.cont8 unwind label %lpad5
+
+invoke.cont8: ; preds = %if.then
+ invoke void @_ZN4FoamlsERNS_7OstreamEPKc()
+ to label %memptr.end.i unwind label %lpad5
+
+memptr.end.i: ; preds = %invoke.cont8
+ invoke void @_ZN4Foam7IOerror4exitEi()
+ to label %if.end unwind label %lpad5
+
+lpad: ; preds = %if.then.i.i.i.i176, %entry
+ %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %ehcleanup142
+
+lpad3: ; preds = %invoke.cont2
+ %2 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %ehcleanup142
+
+lpad5: ; preds = %memptr.end.i, %invoke.cont8, %if.then
+ %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %ehcleanup142
+
+if.end: ; preds = %memptr.end.i
+ br i1 undef, label %for.body, label %vector.body
+
+for.body: ; preds = %if.end, %invoke.cont4
+ invoke void @_ZNKSt3__18ios_base6getlocEv()
+ to label %.noexc205 unwind label %lpad19
+
+.noexc205: ; preds = %for.body
+ invoke void @_ZNKSt3__16locale9use_facetERNS0_2idE()
+ to label %invoke.cont.i.i.i unwind label %lpad.i.i.i
+
+invoke.cont.i.i.i: ; preds = %.noexc205
+ unreachable
+
+lpad.i.i.i: ; preds = %.noexc205
+ %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %ehcleanup142
+
+lpad19: ; preds = %for.body
+ %5 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ br label %ehcleanup142
+
+vector.body: ; preds = %vector.body, %if.end
+ %vec.phi = phi <8 x i32> [ %10, %vector.body ], [ undef, %if.end ]
+ %vec.phi1302 = phi <8 x i32> [ %11, %vector.body ], [ undef, %if.end ]
+ %vec.phi1303 = phi <8 x i32> [ %12, %vector.body ], [ undef, %if.end ]
+ %vec.phi1304 = phi <8 x i32> [ %13, %vector.body ], [ undef, %if.end ]
+ %6 = icmp sgt <8 x i32> undef, %vec.phi
+ %7 = icmp sgt <8 x i32> undef, %vec.phi1302
+ %8 = icmp sgt <8 x i32> undef, %vec.phi1303
+ %9 = icmp sgt <8 x i32> undef, %vec.phi1304
+ %10 = select <8 x i1> %6, <8 x i32> undef, <8 x i32> %vec.phi
+ %11 = select <8 x i1> %7, <8 x i32> undef, <8 x i32> %vec.phi1302
+ %12 = select <8 x i1> %8, <8 x i32> undef, <8 x i32> %vec.phi1303
+ %13 = select <8 x i1> %9, <8 x i32> undef, <8 x i32> %vec.phi1304
+ br label %vector.body
+
+ehcleanup142: ; preds = %lpad19, %lpad.i.i.i, %lpad5, %lpad3, %lpad, %lpad.i
+ resume { i8*, i32 } undef
+}
+
+attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { noreturn "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { inlinehint "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/test/CodeGen/PowerPC/pr18663.ll b/test/CodeGen/PowerPC/pr18663.ll
new file mode 100644
index 000000000000..1b85223aa09a
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr18663.ll
@@ -0,0 +1,298 @@
+; RUN: llc < %s -march=ppc64 -mtriple=powerpc64-unknown-linux-gnu
+; RUN: llc < %s -march=ppc64le -mtriple=powerpc64le-unknown-linux-gnu
+
+%class.Point.1 = type { %class.Tensor.0 }
+%class.Tensor.0 = type { [3 x double] }
+%class.TriaObjectAccessor.57 = type { %class.TriaAccessor.56 }
+%class.TriaAccessor.56 = type { i32, i32, %class.Triangulation.55* }
+%class.Triangulation.55 = type { %class.Subscriptor, %"class.std::vector.46", %"class.std::vector", %"class.std::vector.3.8", [255 x %class.Boundary.50*], i32, %struct.TriaNumberCache.54 }
+%class.Subscriptor = type { i32 (...)**, i32, %"class.std::type_info.2"* }
+%"class.std::type_info.2" = type { i32 (...)**, i8* }
+%"class.std::vector.46" = type { %"struct.std::_Vector_base.45" }
+%"struct.std::_Vector_base.45" = type { %"struct.std::_Vector_base<TriangulationLevel<3> *, std::allocator<TriangulationLevel<3> *> >::_Vector_impl.44" }
+%"struct.std::_Vector_base<TriangulationLevel<3> *, std::allocator<TriangulationLevel<3> *> >::_Vector_impl.44" = type { %class.TriangulationLevel.43**, %class.TriangulationLevel.43**, %class.TriangulationLevel.43** }
+%class.TriangulationLevel.43 = type { %class.TriangulationLevel.0.37, %"struct.TriangulationLevel<3>::HexesData.42" }
+%class.TriangulationLevel.0.37 = type { %class.TriangulationLevel.1.31, %"struct.TriangulationLevel<2>::QuadsData.36" }
+%class.TriangulationLevel.1.31 = type { %class.TriangulationLevel, %"struct.TriangulationLevel<1>::LinesData.30" }
+%class.TriangulationLevel = type { %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.7.12", %"class.std::vector.12.15" }
+%"class.std::vector.7.12" = type { %"struct.std::_Vector_base" }
+%"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<std::pair<int, int>, std::allocator<std::pair<int, int> > >::_Vector_impl.10" }
+%"struct.std::_Vector_base<std::pair<int, int>, std::allocator<std::pair<int, int> > >::_Vector_impl.10" = type { %"struct.std::pair.9"*, %"struct.std::pair.9"*, %"struct.std::pair.9"* }
+%"struct.std::pair.9" = type opaque
+%"class.std::vector.12.15" = type { %"struct.std::_Vector_base.13.14" }
+%"struct.std::_Vector_base.13.14" = type { %"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl.13" }
+%"struct.std::_Vector_base<unsigned int, std::allocator<unsigned int> >::_Vector_impl.13" = type { i32*, i32*, i32* }
+%"struct.TriangulationLevel<1>::LinesData.30" = type { %"class.std::vector.17.20", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29" }
+%"class.std::vector.17.20" = type { %"struct.std::_Vector_base.18.19" }
+%"struct.std::_Vector_base.18.19" = type { %"struct.std::_Vector_base<Line, std::allocator<Line> >::_Vector_impl.18" }
+%"struct.std::_Vector_base<Line, std::allocator<Line> >::_Vector_impl.18" = type { %class.Line.17*, %class.Line.17*, %class.Line.17* }
+%class.Line.17 = type { [2 x i32] }
+%"class.std::vector.22.23" = type { %"struct.std::_Vector_base.23.22" }
+%"struct.std::_Vector_base.23.22" = type { %"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl.21" }
+%"struct.std::_Vector_base<int, std::allocator<int> >::_Vector_impl.21" = type { i32*, i32*, i32* }
+%"class.std::vector.27.26" = type { %"struct.std::_Vector_base.28.25" }
+%"struct.std::_Vector_base.28.25" = type { %"struct.std::_Vector_base<unsigned char, std::allocator<unsigned char> >::_Vector_impl.24" }
+%"struct.std::_Vector_base<unsigned char, std::allocator<unsigned char> >::_Vector_impl.24" = type { i8*, i8*, i8* }
+%"class.std::vector.32.29" = type { %"struct.std::_Vector_base.33.28" }
+%"struct.std::_Vector_base.33.28" = type { %"struct.std::_Vector_base<void *, std::allocator<void *> >::_Vector_impl.27" }
+%"struct.std::_Vector_base<void *, std::allocator<void *> >::_Vector_impl.27" = type { i8**, i8**, i8** }
+%"struct.TriangulationLevel<2>::QuadsData.36" = type { %"class.std::vector.37.35", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29" }
+%"class.std::vector.37.35" = type { %"struct.std::_Vector_base.38.34" }
+%"struct.std::_Vector_base.38.34" = type { %"struct.std::_Vector_base<Quad, std::allocator<Quad> >::_Vector_impl.33" }
+%"struct.std::_Vector_base<Quad, std::allocator<Quad> >::_Vector_impl.33" = type { %class.Quad.32*, %class.Quad.32*, %class.Quad.32* }
+%class.Quad.32 = type { [4 x i32] }
+%"struct.TriangulationLevel<3>::HexesData.42" = type { %"class.std::vector.42.41", %"class.std::vector.22.23", %"class.std::vector.3.8", %"class.std::vector.3.8", %"class.std::vector.27.26", %"class.std::vector.32.29", %"class.std::vector.3.8" }
+%"class.std::vector.42.41" = type { %"struct.std::_Vector_base.43.40" }
+%"struct.std::_Vector_base.43.40" = type { %"struct.std::_Vector_base<Hexahedron, std::allocator<Hexahedron> >::_Vector_impl.39" }
+%"struct.std::_Vector_base<Hexahedron, std::allocator<Hexahedron> >::_Vector_impl.39" = type { %class.Hexahedron.38*, %class.Hexahedron.38*, %class.Hexahedron.38* }
+%class.Hexahedron.38= type { [6 x i32] }
+%"class.std::vector" = type { %"struct.std::_Vector_base.48.48" }
+%"struct.std::_Vector_base.48.48" = type { %"struct.std::_Vector_base<Point<3>, std::allocator<Point<3> > >::_Vector_impl.47" }
+%"struct.std::_Vector_base<Point<3>, std::allocator<Point<3> > >::_Vector_impl.47" = type { %class.Point.1*, %class.Point.1*, %class.Point.1* }
+%"class.std::vector.3.8" = type { %"struct.std::_Bvector_base.7" }
+%"struct.std::_Bvector_base.7" = type { %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl.6" }
+%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl.6" = type { %"struct.std::_Bit_iterator.5", %"struct.std::_Bit_iterator.5", i64* }
+%"struct.std::_Bit_iterator.5" = type { %"struct.std::_Bit_iterator_base.base.4", [4 x i8] }
+%"struct.std::_Bit_iterator_base.base.4" = type <{ i64*, i32 }>
+%class.Boundary.50 = type opaque
+%struct.TriaNumberCache.54 = type { %struct.TriaNumberCache.52.52, i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
+%struct.TriaNumberCache.52.52 = type { %struct.TriaNumberCache.53.51, i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
+%struct.TriaNumberCache.53.51 = type { i32, %"class.std::vector.12.15", i32, %"class.std::vector.12.15" }
+
+define void @_ZNK18TriaObjectAccessorILi3ELi3EE10barycenterEv(%class.Point.1* noalias nocapture sret %agg.result, %class.TriaObjectAccessor.57* %this) #0 align 2 {
+entry:
+ %0 = load double* null, align 8
+ %1 = load double* undef, align 8
+ %call18 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
+ %2 = load double* undef, align 8
+ %call21 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
+ %3 = load double* undef, align 8
+ %call33 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 3)
+ %4 = load double* null, align 8
+ %5 = load double* undef, align 8
+ %call45 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 7)
+ %6 = load double* undef, align 8
+ %call48 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 0)
+ %7 = load double* undef, align 8
+ %call66 = tail call dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57* %this, i32 zeroext 6)
+ %8 = load double* undef, align 8
+ %mul334 = fmul double undef, 2.000000e+00
+ %mul579 = fmul double %2, %5
+ %mul597 = fmul double undef, %mul579
+ %mul679 = fmul double %2, %8
+ %mul1307 = fmul double undef, %1
+ %mul2092 = fmul double undef, %4
+ %mul2679 = fmul double undef, undef
+ %mul2931 = fmul double undef, %3
+ %mul3094 = fmul double undef, %3
+ %mul3096 = fmul double %mul3094, %8
+ %sub3097 = fsub double 0.000000e+00, %mul3096
+ %add3105 = fadd double undef, %sub3097
+ %add3113 = fadd double 0.000000e+00, %add3105
+ %sub3121 = fsub double %add3113, undef
+ %sub3129 = fsub double %sub3121, undef
+ %add3137 = fadd double undef, %sub3129
+ %add3145 = fadd double undef, %add3137
+ %sub3153 = fsub double %add3145, undef
+ %sub3162 = fsub double %sub3153, 0.000000e+00
+ %add3171 = fadd double undef, %sub3162
+ %add3180 = fadd double undef, %add3171
+ %add3189 = fadd double 0.000000e+00, %add3180
+ %mul3197 = fmul double %4, %mul2679
+ %sub3198 = fsub double %add3189, %mul3197
+ %sub3207 = fsub double %sub3198, 0.000000e+00
+ %mul3212 = fmul double %2, undef
+ %mul3214 = fmul double %mul3212, undef
+ %sub3215 = fsub double %sub3207, %mul3214
+ %mul3222 = fmul double %5, 0.000000e+00
+ %sub3223 = fsub double %sub3215, %mul3222
+ %mul3228 = fmul double %2, undef
+ %mul3230 = fmul double %3, %mul3228
+ %add3231 = fadd double %mul3230, %sub3223
+ %mul3236 = fmul double undef, undef
+ %mul3238 = fmul double %mul3236, %8
+ %add3239 = fadd double %mul3238, %add3231
+ %mul3244 = fmul double %mul1307, %3
+ %mul3246 = fmul double %mul3244, %7
+ %sub3247 = fsub double %add3239, %mul3246
+ %mul3252 = fmul double undef, undef
+ %mul3254 = fmul double %mul3252, %7
+ %add3255 = fadd double %mul3254, %sub3247
+ %sub3263 = fsub double %add3255, undef
+ %add3271 = fadd double 0.000000e+00, %sub3263
+ %sub3279 = fsub double %add3271, undef
+ %sub3287 = fsub double %sub3279, undef
+ %mul3292 = fmul double %mul1307, %5
+ %mul3294 = fmul double %mul3292, undef
+ %add3295 = fadd double %mul3294, %sub3287
+ %add3303 = fadd double undef, %add3295
+ %add3311 = fadd double 0.000000e+00, %add3303
+ %mul3318 = fmul double undef, %7
+ %sub3319 = fsub double %add3311, %mul3318
+ %mul3326 = fmul double %4, %mul3228
+ %sub3327 = fsub double %sub3319, %mul3326
+ %mul3334 = fmul double undef, %8
+ %sub3335 = fsub double %sub3327, %mul3334
+ %add3343 = fadd double undef, %sub3335
+ %mul3350 = fmul double %mul3212, %7
+ %add3351 = fadd double %mul3350, %add3343
+ %mul3358 = fmul double %mul2092, undef
+ %sub3359 = fsub double %add3351, %mul3358
+ %mul3362 = fmul double undef, %1
+ %mul3366 = fmul double 0.000000e+00, %8
+ %add3367 = fadd double %mul3366, %sub3359
+ %mul3372 = fmul double %mul3362, %5
+ %sub3375 = fsub double %add3367, undef
+ %add3383 = fadd double undef, %sub3375
+ %mul3389 = fmul double %2, 0.000000e+00
+ %mul3391 = fmul double %4, %mul3389
+ %sub3392 = fsub double %add3383, %mul3391
+ %mul3396 = fmul double undef, 0.000000e+00
+ %mul3400 = fmul double undef, %7
+ %sub3401 = fsub double %sub3392, %mul3400
+ %mul3407 = fmul double %mul3396, %4
+ %mul3409 = fmul double %mul3407, %8
+ %add3410 = fadd double %mul3409, %sub3401
+ %add3419 = fadd double undef, %add3410
+ %mul3423 = fmul double undef, %mul334
+ %add3428 = fadd double undef, %add3419
+ %add3437 = fadd double undef, %add3428
+ %mul3443 = fmul double %mul3423, %3
+ %mul3445 = fmul double %mul3443, %8
+ %sub3446 = fsub double %add3437, %mul3445
+ %mul3453 = fmul double %mul3372, undef
+ %add3454 = fadd double %mul3453, %sub3446
+ %add3462 = fadd double 0.000000e+00, %add3454
+ %mul3467 = fmul double %mul3362, %3
+ %mul3469 = fmul double %mul3467, %8
+ %sub3470 = fsub double %add3462, %mul3469
+ %add3478 = fadd double 0.000000e+00, %sub3470
+ %sub3486 = fsub double %add3478, undef
+ %mul3490 = fmul double %mul334, 0.000000e+00
+ %mul3492 = fmul double %2, %mul3490
+ %mul3494 = fmul double %mul3492, undef
+ %sub3495 = fsub double %sub3486, %mul3494
+ %sub3503 = fsub double %sub3495, undef
+ %sub3512 = fsub double %sub3503, undef
+ %add3520 = fadd double undef, %sub3512
+ %sub3528 = fsub double %add3520, undef
+ %add3537 = fadd double undef, %sub3528
+ %add3545 = fadd double 0.000000e+00, %add3537
+ %sub3553 = fsub double %add3545, undef
+ %add3561 = fadd double undef, %sub3553
+ %sub3569 = fsub double %add3561, undef
+ %mul3574 = fmul double undef, undef
+ %mul3576 = fmul double %mul3574, %7
+ %add3577 = fadd double %mul3576, %sub3569
+ %sub3585 = fsub double %add3577, undef
+ %mul3592 = fmul double %4, undef
+ %sub3593 = fsub double %sub3585, %mul3592
+ %mul3598 = fmul double %2, undef
+ %mul3600 = fmul double %mul3598, %7
+ %add3601 = fadd double %mul3600, %sub3593
+ %mul3608 = fmul double %mul3598, undef
+ %sub3609 = fsub double %add3601, %mul3608
+ %sub3618 = fsub double %sub3609, undef
+ %add3627 = fadd double undef, %sub3618
+ %add3635 = fadd double undef, %add3627
+ %mul3638 = fmul double undef, %2
+ %mul3640 = fmul double %mul3638, %5
+ %mul3642 = fmul double %mul3640, %7
+ %sub3643 = fsub double %add3635, %mul3642
+ %mul3648 = fmul double %1, undef
+ %mul3650 = fmul double %mul3648, %8
+ %sub3651 = fsub double %sub3643, %mul3650
+ %mul3656 = fmul double %mul3638, %4
+ %mul3658 = fmul double %mul3656, %8
+ %add3659 = fadd double %mul3658, %sub3651
+ %mul3666 = fmul double %5, 0.000000e+00
+ %add3667 = fadd double %mul3666, %add3659
+ %sub3675 = fsub double %add3667, undef
+ %mul3680 = fmul double %mul3638, %3
+ %mul3682 = fmul double %mul3680, %8
+ %sub3683 = fsub double %sub3675, %mul3682
+ %add3692 = fadd double 0.000000e+00, %sub3683
+ %mul3696 = fmul double undef, undef
+ %mul3698 = fmul double %mul3696, %4
+ %mul3700 = fmul double %mul3698, %8
+ %add3701 = fadd double %mul3700, %add3692
+ %sub3710 = fsub double %add3701, undef
+ %mul3716 = fmul double undef, %3
+ %mul3718 = fmul double %mul3716, %8
+ %sub3719 = fsub double %sub3710, %mul3718
+ %add3727 = fadd double undef, %sub3719
+ %mul3734 = fmul double %mul3574, %8
+ %add3735 = fadd double %mul3734, %add3727
+ %sub3743 = fsub double %add3735, 0.000000e+00
+ %add3751 = fadd double 0.000000e+00, %sub3743
+ %mul3758 = fmul double %6, 0.000000e+00
+ %sub3759 = fsub double %add3751, %mul3758
+ %mul3764 = fmul double undef, %mul2931
+ %mul3766 = fmul double %mul3764, undef
+ %sub3767 = fsub double %sub3759, %mul3766
+ %add3775 = fadd double 0.000000e+00, %sub3767
+ %add3783 = fadd double undef, %add3775
+ %sub3791 = fsub double %add3783, 0.000000e+00
+ %add3799 = fadd double undef, %sub3791
+ %sub3807 = fsub double %add3799, undef
+ %mul3814 = fmul double 0.000000e+00, undef
+ %add3815 = fadd double %mul3814, %sub3807
+ %mul3822 = fmul double %mul597, undef
+ %sub3823 = fsub double %add3815, %mul3822
+ %add3831 = fadd double undef, %sub3823
+ %mul3836 = fmul double undef, %mul679
+ %mul3838 = fmul double %6, %mul3836
+ %sub3839 = fsub double %add3831, %mul3838
+ %add3847 = fadd double undef, %sub3839
+ %add3855 = fadd double undef, %add3847
+ %mul3858 = fmul double undef, %8
+ %mul3860 = fmul double undef, %mul3858
+ %mul3862 = fmul double %6, %mul3860
+ %sub3863 = fsub double %add3855, %mul3862
+ %add3872 = fadd double undef, %sub3863
+ %sub3880 = fsub double %add3872, undef
+ %sub3889 = fsub double %sub3880, undef
+ %sub3898 = fsub double %sub3889, undef
+ %add3907 = fadd double undef, %sub3898
+ %sub3915 = fsub double %add3907, 0.000000e+00
+ %add3923 = fadd double undef, %sub3915
+ %mul3930 = fmul double %3, undef
+ %add3931 = fadd double %mul3930, %add3923
+ %add3940 = fadd double undef, %add3931
+ %sub3949 = fsub double %add3940, undef
+ %mul3952 = fmul double %2, %3
+ %sub3957 = fsub double %sub3949, undef
+ %sub3966 = fsub double %sub3957, undef
+ %add3975 = fadd double undef, %sub3966
+ %add3983 = fadd double undef, %add3975
+ %sub3992 = fsub double %add3983, undef
+ %mul3997 = fmul double undef, %mul3952
+ %mul3999 = fmul double %mul3997, %8
+ %add4000 = fadd double %mul3999, %sub3992
+ %sub4008 = fsub double %add4000, undef
+ %add4017 = fadd double undef, %sub4008
+ %add4026 = fadd double 0.000000e+00, %add4017
+ %mul4034 = fmul double %6, undef
+ %sub4035 = fsub double %add4026, %mul4034
+ %add4043 = fadd double undef, %sub4035
+ %sub4051 = fsub double %add4043, 0.000000e+00
+ %mul4916 = fmul double 0.000000e+00, %sub4051
+ %mul4917 = fmul double %mul4916, 0x3FC5555555555555
+ %mul7317 = fmul double 0.000000e+00, %3
+ %mul7670 = fmul double %0, %mul7317
+ %mul8882 = fmul double %0, 0.000000e+00
+ %mul8884 = fmul double undef, %mul8882
+ %sub8885 = fsub double 0.000000e+00, %mul8884
+ %mul8892 = fmul double %mul7670, undef
+ %add8893 = fadd double %mul8892, %sub8885
+ %mul8900 = fmul double undef, undef
+ %add8901 = fadd double %mul8900, %add8893
+ %mul9767 = fmul double 0.000000e+00, %add8901
+ %mul9768 = fmul double %mul9767, 0x3FC5555555555555
+ store double %mul4917, double* undef, align 8
+ store double %mul9768, double* undef, align 8
+ ret void
+}
+
+declare dereferenceable(24) %class.Point.1* @_ZNK18TriaObjectAccessorILi3ELi3EE6vertexEj(%class.TriaObjectAccessor.57*, i32 zeroext) #0
+
diff --git a/test/CodeGen/PowerPC/pr20442.ll b/test/CodeGen/PowerPC/pr20442.ll
new file mode 100644
index 000000000000..ad43a04e70c4
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr20442.ll
@@ -0,0 +1,79 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-p:32:32-i64:64-n32"
+target triple = "powerpc-unknown-linux-gnu"
+
+; This code would cause code generation like this after PPCCTRLoops ran:
+; %indvar = phi i32 [ 0, %for.body ], [ %indvar.next, %if.then6 ]
+; %j.1.ph13 = phi i32 [ %j.110, %if.then6 ], [ 0, %for.body ], [ 0, %for.body ]
+; %c.0.ph12 = phi i32 [ %dec, %if.then6 ], [ %2, %for.body ], [ %2, %for.body ]
+; which would fail verification because the created induction variable does not
+; have as many predecessor entries as the other PHIs.
+; CHECK-LABEL: @fn1
+; CHECK: mtctr
+
+%struct.anon = type { i32 }
+%struct.anon.0 = type { i32 }
+
+@b = common global %struct.anon* null, align 4
+@a = common global %struct.anon.0* null, align 4
+
+; Function Attrs: nounwind readonly uwtable
+define i32 @fn1() #0 {
+entry:
+ %0 = load %struct.anon** @b, align 4
+ %1 = ptrtoint %struct.anon* %0 to i32
+ %cmp = icmp sgt %struct.anon* %0, null
+ %2 = load %struct.anon.0** @a, align 4
+ br i1 %cmp, label %for.bodythread-pre-split, label %if.end8
+
+for.bodythread-pre-split: ; preds = %entry
+ %aclass = getelementptr inbounds %struct.anon.0* %2, i32 0, i32 0
+ %.pr = load i32* %aclass, align 4
+ br label %for.body
+
+for.body: ; preds = %for.bodythread-pre-split, %for.body
+ switch i32 %.pr, label %for.body [
+ i32 0, label %while.body.lr.ph.preheader
+ i32 2, label %while.body.lr.ph.preheader
+ ]
+
+while.body.lr.ph.preheader: ; preds = %for.body, %for.body
+ br label %while.body.lr.ph
+
+while.body.lr.ph: ; preds = %while.body.lr.ph.preheader, %if.then6
+ %j.1.ph13 = phi i32 [ %j.110.lcssa, %if.then6 ], [ 0, %while.body.lr.ph.preheader ]
+ %c.0.ph12 = phi i32 [ %dec, %if.then6 ], [ %1, %while.body.lr.ph.preheader ]
+ br label %while.body
+
+while.cond: ; preds = %while.body
+ %cmp2 = icmp slt i32 %inc7, %c.0.ph12
+ br i1 %cmp2, label %while.body, label %if.end8.loopexit
+
+while.body: ; preds = %while.body.lr.ph, %while.cond
+ %j.110 = phi i32 [ %j.1.ph13, %while.body.lr.ph ], [ %inc7, %while.cond ]
+ %aclass_index = getelementptr inbounds %struct.anon* %0, i32 %j.110, i32 0
+ %3 = load i32* %aclass_index, align 4
+ %aclass5 = getelementptr inbounds %struct.anon.0* %2, i32 %3, i32 0
+ %4 = load i32* %aclass5, align 4
+ %tobool = icmp eq i32 %4, 0
+ %inc7 = add nsw i32 %j.110, 1
+ br i1 %tobool, label %while.cond, label %if.then6
+
+if.then6: ; preds = %while.body
+ %j.110.lcssa = phi i32 [ %j.110, %while.body ]
+ %dec = add nsw i32 %c.0.ph12, -1
+ %cmp29 = icmp slt i32 %j.110.lcssa, %dec
+ br i1 %cmp29, label %while.body.lr.ph, label %if.end8.loopexit17
+
+if.end8.loopexit: ; preds = %while.cond
+ br label %if.end8
+
+if.end8.loopexit17: ; preds = %if.then6
+ br label %if.end8
+
+if.end8: ; preds = %if.end8.loopexit17, %if.end8.loopexit, %entry
+ ret i32 undef
+}
+
+attributes #0 = { nounwind readonly uwtable }
+
diff --git a/test/CodeGen/PowerPC/private.ll b/test/CodeGen/PowerPC/private.ll
index f9405f6af2ff..633fa651037f 100644
--- a/test/CodeGen/PowerPC/private.ll
+++ b/test/CodeGen/PowerPC/private.ll
@@ -1,24 +1,28 @@
; Test to make sure that the 'private' is used correctly.
;
-; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu > %t
-; RUN: grep .Lfoo: %t
-; RUN: grep bl.*\.Lfoo %t
-; RUN: grep .Lbaz: %t
-; RUN: grep lis.*\.Lbaz %t
-; RUN: llc < %s -mtriple=powerpc-apple-darwin > %t
-; RUN: grep L_foo: %t
-; RUN: grep bl.*\L_foo %t
-; RUN: grep L_baz: %t
-; RUN: grep lis.*\L_baz %t
+; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu | \
+; RUN: FileCheck --check-prefix=LINUX %s
+;
+; RUN: llc < %s -mtriple=powerpc-apple-darwin | \
+; RUN: FileCheck --check-prefix=OSX %s
+; LINUX: .Lfoo:
+; OSX: l_foo:
define private void @foo() nounwind {
ret void
}
-@baz = private global i32 4
-
define i32 @bar() nounwind {
+; LINUX: bl{{.*}}.Lfoo
+; OSX: bl{{.*}}l_foo
call void @foo()
+
+; LINUX: lis{{.*}}.Lbaz
+; OSX: lis{{.*}}l_baz
%1 = load i32* @baz, align 4
ret i32 %1
}
+
+; LINUX: .Lbaz:
+; OSX: l_baz:
+@baz = private global i32 4
diff --git a/test/CodeGen/PowerPC/pwr7-gt-nop.ll b/test/CodeGen/PowerPC/pwr7-gt-nop.ll
new file mode 100644
index 000000000000..8c8545d60df7
--- /dev/null
+++ b/test/CodeGen/PowerPC/pwr7-gt-nop.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -mcpu=pwr7 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @foo(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c, float* nocapture %d) #0 {
+
+; CHECK-LABEL: @foo
+
+entry:
+ %0 = load float* %b, align 4
+ store float %0, float* %a, align 4
+ %1 = load float* %c, align 4
+ store float %1, float* %b, align 4
+ %2 = load float* %a, align 4
+ store float %2, float* %d, align 4
+ ret void
+
+; CHECK: lfs [[REG1:[0-9]+]], 0(4)
+; CHECK: stfs [[REG1]], 0(3)
+; CHECK: ori 2, 2, 0
+; CHECK: lfs [[REG2:[0-9]+]], 0(5)
+; CHECK: stfs [[REG2]], 0(4)
+; CHECK: ori 2, 2, 0
+; CHECK: lfs [[REG3:[0-9]+]], 0(3)
+; CHECK: stfs [[REG3]], 0(6)
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/resolvefi-basereg.ll b/test/CodeGen/PowerPC/resolvefi-basereg.ll
new file mode 100644
index 000000000000..62c2d139920a
--- /dev/null
+++ b/test/CodeGen/PowerPC/resolvefi-basereg.ll
@@ -0,0 +1,362 @@
+; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
+
+; Due to a bug in resolveFrameIndex we ended up with invalid addresses
+; containing a base register 0. Verify that this no longer happens.
+; CHECK-NOT: (0)
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.Info = type { i32, i32, i8*, i8*, i8*, [32 x i8*], i64, [32 x i64], i64, i64, i64, [32 x i64] }
+%struct.S1998 = type { [2 x i32*], i64, i64, double, i16, i32, [29 x %struct.anon], i16, i8, i32, [8 x i8] }
+%struct.anon = type { [16 x double], i32, i16, i32, [3 x i8], [6 x i8], [4 x i32], i8 }
+
+@info = global %struct.Info zeroinitializer, align 8
+@fails = global i32 0, align 4
+@intarray = global [256 x i32] zeroinitializer, align 4
+@s1998 = global %struct.S1998 zeroinitializer, align 16
+@a1998 = external global [5 x %struct.S1998]
+
+define void @test1998() {
+entry:
+ %i = alloca i32, align 4
+ %j = alloca i32, align 4
+ %tmp = alloca i32, align 4
+ %agg.tmp = alloca %struct.S1998, align 16
+ %agg.tmp111 = alloca %struct.S1998, align 16
+ %agg.tmp112 = alloca %struct.S1998, align 16
+ %agg.tmp113 = alloca %struct.S1998, align 16
+ %agg.tmp114 = alloca %struct.S1998, align 16
+ %agg.tmp115 = alloca %struct.S1998, align 16
+ %agg.tmp116 = alloca %struct.S1998, align 16
+ %agg.tmp117 = alloca %struct.S1998, align 16
+ %agg.tmp118 = alloca %struct.S1998, align 16
+ %agg.tmp119 = alloca %struct.S1998, align 16
+ call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.S1998* @s1998 to i8*), i8 0, i64 5168, i32 16, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8 0, i64 25840, i32 16, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.Info* @info to i8*), i8 0, i64 832, i32 8, i1 false)
+ store i8* bitcast (%struct.S1998* @s1998 to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 2), align 8
+ store i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 3), align 8
+ store i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 3) to i8*), i8** getelementptr inbounds (%struct.Info* @info, i32 0, i32 4), align 8
+ store i64 5168, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 6), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 9), align 8
+ store i64 16, i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 10), align 8
+ %0 = load i64* getelementptr inbounds (%struct.Info* @info, i32 0, i32 8), align 8
+ %sub = sub i64 %0, 1
+ %and = and i64 ptrtoint (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 3) to i64), %sub
+ %tobool = icmp ne i64 %and, 0
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ %1 = load i32* @fails, align 4
+ %inc = add nsw i32 %1, 1
+ store i32 %inc, i32* @fails, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ store i32 0, i32* %i, align 4
+ store i32 0, i32* %j, align 4
+ %2 = load i32* %i, align 4
+ %idxprom = sext i32 %2 to i64
+ %arrayidx = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
+ store i8* bitcast (i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
+ %3 = load i32* %i, align 4
+ %idxprom1 = sext i32 %3 to i64
+ %arrayidx2 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
+ store i64 8, i64* %arrayidx2, align 8
+ %4 = load i32* %i, align 4
+ %idxprom3 = sext i32 %4 to i64
+ %arrayidx4 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
+ store i64 8, i64* %arrayidx4, align 8
+ store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
+ store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
+ %5 = load i32* %i, align 4
+ %inc5 = add nsw i32 %5, 1
+ store i32 %inc5, i32* %i, align 4
+ %6 = load i32* %i, align 4
+ %idxprom6 = sext i32 %6 to i64
+ %arrayidx7 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
+ store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
+ %7 = load i32* %i, align 4
+ %idxprom8 = sext i32 %7 to i64
+ %arrayidx9 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
+ store i64 8, i64* %arrayidx9, align 8
+ %8 = load i32* %i, align 4
+ %idxprom10 = sext i32 %8 to i64
+ %arrayidx11 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
+ store i64 8, i64* %arrayidx11, align 8
+ store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1), align 8
+ store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
+ %9 = load i32* %i, align 4
+ %inc12 = add nsw i32 %9, 1
+ store i32 %inc12, i32* %i, align 4
+ %10 = load i32* %i, align 4
+ %idxprom13 = sext i32 %10 to i64
+ %arrayidx14 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
+ store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
+ %11 = load i32* %i, align 4
+ %idxprom15 = sext i32 %11 to i64
+ %arrayidx16 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
+ store i64 8, i64* %arrayidx16, align 8
+ %12 = load i32* %i, align 4
+ %idxprom17 = sext i32 %12 to i64
+ %arrayidx18 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
+ store i64 8, i64* %arrayidx18, align 8
+ store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2), align 8
+ store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
+ %13 = load i32* %i, align 4
+ %inc19 = add nsw i32 %13, 1
+ store i32 %inc19, i32* %i, align 4
+ %14 = load i32* %i, align 4
+ %idxprom20 = sext i32 %14 to i64
+ %arrayidx21 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
+ store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
+ %15 = load i32* %i, align 4
+ %idxprom22 = sext i32 %15 to i64
+ %arrayidx23 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
+ store i64 8, i64* %arrayidx23, align 8
+ %16 = load i32* %i, align 4
+ %idxprom24 = sext i32 %16 to i64
+ %arrayidx25 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
+ store i64 16, i64* %arrayidx25, align 8
+ store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3), align 16
+ store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
+ %17 = load i32* %i, align 4
+ %inc26 = add nsw i32 %17, 1
+ store i32 %inc26, i32* %i, align 4
+ %18 = load i32* %i, align 4
+ %idxprom27 = sext i32 %18 to i64
+ %arrayidx28 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
+ store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
+ %19 = load i32* %i, align 4
+ %idxprom29 = sext i32 %19 to i64
+ %arrayidx30 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
+ store i64 2, i64* %arrayidx30, align 8
+ %20 = load i32* %i, align 4
+ %idxprom31 = sext i32 %20 to i64
+ %arrayidx32 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
+ store i64 2, i64* %arrayidx32, align 8
+ store i16 -15897, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4), align 2
+ store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
+ %21 = load i32* %i, align 4
+ %inc33 = add nsw i32 %21, 1
+ store i32 %inc33, i32* %i, align 4
+ store i32 -419541644, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 5), align 4
+ store i32 2125926812, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 5), align 4
+ %22 = load i32* %j, align 4
+ %inc34 = add nsw i32 %22, 1
+ store i32 %inc34, i32* %j, align 4
+ %23 = load i32* %i, align 4
+ %idxprom35 = sext i32 %23 to i64
+ %arrayidx36 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
+ store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
+ %24 = load i32* %i, align 4
+ %idxprom37 = sext i32 %24 to i64
+ %arrayidx38 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
+ store i64 8, i64* %arrayidx38, align 8
+ %25 = load i32* %i, align 4
+ %idxprom39 = sext i32 %25 to i64
+ %arrayidx40 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
+ store i64 8, i64* %arrayidx40, align 8
+ store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
+ store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
+ %26 = load i32* %i, align 4
+ %inc41 = add nsw i32 %26, 1
+ store i32 %inc41, i32* %i, align 4
+ %bf.load = load i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+ %bf.clear = and i32 %bf.load, 7
+ %bf.set = or i32 %bf.clear, 16
+ store i32 %bf.set, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 1), align 8
+ %bf.load42 = load i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+ %bf.clear43 = and i32 %bf.load42, 7
+ %bf.set44 = or i32 %bf.clear43, 24
+ store i32 %bf.set44, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 1), align 8
+ %27 = load i32* %j, align 4
+ %inc45 = add nsw i32 %27, 1
+ store i32 %inc45, i32* %j, align 4
+ %bf.load46 = load i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+ %bf.clear47 = and i16 %bf.load46, 127
+ store i16 %bf.clear47, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 2), align 4
+ %bf.load48 = load i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+ %bf.clear49 = and i16 %bf.load48, 127
+ store i16 %bf.clear49, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 2), align 4
+ %28 = load i32* %j, align 4
+ %inc50 = add nsw i32 %28, 1
+ store i32 %inc50, i32* %j, align 4
+ %bf.load51 = load i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+ %bf.clear52 = and i32 %bf.load51, 63
+ store i32 %bf.clear52, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 3), align 8
+ %bf.load53 = load i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+ %bf.clear54 = and i32 %bf.load53, 63
+ %bf.set55 = or i32 %bf.clear54, 64
+ store i32 %bf.set55, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 3), align 8
+ %29 = load i32* %j, align 4
+ %inc56 = add nsw i32 %29, 1
+ store i32 %inc56, i32* %j, align 4
+ %bf.load57 = load i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+ %bf.clear58 = and i24 %bf.load57, 63
+ store i24 %bf.clear58, i24* bitcast ([3 x i8]* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 4) to i24*), align 4
+ %bf.load59 = load i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+ %bf.clear60 = and i24 %bf.load59, 63
+ store i24 %bf.clear60, i24* bitcast ([3 x i8]* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 4) to i24*), align 4
+ %30 = load i32* %j, align 4
+ %inc61 = add nsw i32 %30, 1
+ store i32 %inc61, i32* %j, align 4
+ %31 = load i32* %i, align 4
+ %idxprom62 = sext i32 %31 to i64
+ %arrayidx63 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
+ store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
+ %32 = load i32* %i, align 4
+ %idxprom64 = sext i32 %32 to i64
+ %arrayidx65 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
+ store i64 1, i64* %arrayidx65, align 8
+ %33 = load i32* %i, align 4
+ %idxprom66 = sext i32 %33 to i64
+ %arrayidx67 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
+ store i64 1, i64* %arrayidx67, align 8
+ store i8 -83, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
+ store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
+ %34 = load i32* %i, align 4
+ %inc68 = add nsw i32 %34, 1
+ store i32 %inc68, i32* %i, align 4
+ %35 = load i32* %i, align 4
+ %idxprom69 = sext i32 %35 to i64
+ %arrayidx70 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
+ store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
+ %36 = load i32* %i, align 4
+ %idxprom71 = sext i32 %36 to i64
+ %arrayidx72 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
+ store i64 1, i64* %arrayidx72, align 8
+ %37 = load i32* %i, align 4
+ %idxprom73 = sext i32 %37 to i64
+ %arrayidx74 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
+ store i64 1, i64* %arrayidx74, align 8
+ store i8 34, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
+ store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
+ %38 = load i32* %i, align 4
+ %inc75 = add nsw i32 %38, 1
+ store i32 %inc75, i32* %i, align 4
+ %39 = load i32* %i, align 4
+ %idxprom76 = sext i32 %39 to i64
+ %arrayidx77 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
+ store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
+ %40 = load i32* %i, align 4
+ %idxprom78 = sext i32 %40 to i64
+ %arrayidx79 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
+ store i64 4, i64* %arrayidx79, align 8
+ %41 = load i32* %i, align 4
+ %idxprom80 = sext i32 %41 to i64
+ %arrayidx81 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
+ store i64 4, i64* %arrayidx81, align 8
+ store i32 -3, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
+ store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
+ %42 = load i32* %i, align 4
+ %inc82 = add nsw i32 %42, 1
+ store i32 %inc82, i32* %i, align 4
+ %43 = load i32* %i, align 4
+ %idxprom83 = sext i32 %43 to i64
+ %arrayidx84 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
+ store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
+ %44 = load i32* %i, align 4
+ %idxprom85 = sext i32 %44 to i64
+ %arrayidx86 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
+ store i64 1, i64* %arrayidx86, align 8
+ %45 = load i32* %i, align 4
+ %idxprom87 = sext i32 %45 to i64
+ %arrayidx88 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
+ store i64 1, i64* %arrayidx88, align 8
+ store i8 106, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
+ store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
+ %46 = load i32* %i, align 4
+ %inc89 = add nsw i32 %46, 1
+ store i32 %inc89, i32* %i, align 4
+ %47 = load i32* %i, align 4
+ %idxprom90 = sext i32 %47 to i64
+ %arrayidx91 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
+ store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
+ %48 = load i32* %i, align 4
+ %idxprom92 = sext i32 %48 to i64
+ %arrayidx93 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
+ store i64 2, i64* %arrayidx93, align 8
+ %49 = load i32* %i, align 4
+ %idxprom94 = sext i32 %49 to i64
+ %arrayidx95 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
+ store i64 2, i64* %arrayidx95, align 8
+ store i16 29665, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7), align 2
+ store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
+ %50 = load i32* %i, align 4
+ %inc96 = add nsw i32 %50, 1
+ store i32 %inc96, i32* %i, align 4
+ %51 = load i32* %i, align 4
+ %idxprom97 = sext i32 %51 to i64
+ %arrayidx98 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
+ store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
+ %52 = load i32* %i, align 4
+ %idxprom99 = sext i32 %52 to i64
+ %arrayidx100 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
+ store i64 1, i64* %arrayidx100, align 8
+ %53 = load i32* %i, align 4
+ %idxprom101 = sext i32 %53 to i64
+ %arrayidx102 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
+ store i64 1, i64* %arrayidx102, align 8
+ store i8 52, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), align 1
+ store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
+ %54 = load i32* %i, align 4
+ %inc103 = add nsw i32 %54, 1
+ store i32 %inc103, i32* %i, align 4
+ %55 = load i32* %i, align 4
+ %idxprom104 = sext i32 %55 to i64
+ %arrayidx105 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
+ store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
+ %56 = load i32* %i, align 4
+ %idxprom106 = sext i32 %56 to i64
+ %arrayidx107 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
+ store i64 4, i64* %arrayidx107, align 8
+ %57 = load i32* %i, align 4
+ %idxprom108 = sext i32 %57 to i64
+ %arrayidx109 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
+ store i64 4, i64* %arrayidx109, align 8
+ store i32 -54118453, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9), align 4
+ store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
+ %58 = load i32* %i, align 4
+ %inc110 = add nsw i32 %58, 1
+ store i32 %inc110, i32* %i, align 4
+ store i32 %inc110, i32* %tmp
+ %59 = load i32* %tmp
+ %60 = load i32* %i, align 4
+ store i32 %60, i32* getelementptr inbounds (%struct.Info* @info, i32 0, i32 0), align 4
+ %61 = load i32* %j, align 4
+ store i32 %61, i32* getelementptr inbounds (%struct.Info* @info, i32 0, i32 1), align 4
+ %62 = bitcast %struct.S1998* %agg.tmp111 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %62, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ %63 = bitcast %struct.S1998* %agg.tmp112 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %63, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval align 16 %agg.tmp112)
+ call void @checkx1998(%struct.S1998* byval align 16 %agg.tmp)
+ %64 = bitcast %struct.S1998* %agg.tmp113 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %64, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ %65 = bitcast %struct.S1998* %agg.tmp114 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %65, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ %66 = bitcast %struct.S1998* %agg.tmp115 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %66, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ call void (i32, ...)* @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval align 16 %agg.tmp113, i64 2, %struct.S1998* byval align 16 %agg.tmp114, %struct.S1998* byval align 16 %agg.tmp115)
+ %67 = bitcast %struct.S1998* %agg.tmp116 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %67, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ %68 = bitcast %struct.S1998* %agg.tmp117 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %68, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ %69 = bitcast %struct.S1998* %agg.tmp118 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %69, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false)
+ %70 = bitcast %struct.S1998* %agg.tmp119 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %70, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false)
+ call void (i32, ...)* @check1998va(i32 signext 2, %struct.S1998* byval align 16 %agg.tmp116, %struct.S1998* byval align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval align 16 %agg.tmp118, %struct.S1998* byval align 16 %agg.tmp119)
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+
+declare void @check1998(%struct.S1998* sret, %struct.S1998* byval align 16, %struct.S1998*, %struct.S1998* byval align 16)
+declare void @check1998va(i32 signext, ...)
+declare void @checkx1998(%struct.S1998* byval align 16 %arg)
+
diff --git a/test/CodeGen/PowerPC/resolvefi-disp.ll b/test/CodeGen/PowerPC/resolvefi-disp.ll
new file mode 100644
index 000000000000..ca42bcd767a0
--- /dev/null
+++ b/test/CodeGen/PowerPC/resolvefi-disp.ll
@@ -0,0 +1,71 @@
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -print-after=localstackalloc <%s >%t 2>&1 && FileCheck <%t %s
+
+; Due to a bug in isFrameOffsetLegal we ended up with resolveFrameIndex creating
+; addresses with out-of-range displacements. Verify that this no longer happens.
+; CHECK-NOT: LD {{3276[8-9]}}
+; CHECK-NOT: LD {{327[7-9][0-9]}}
+; CHECK-NOT: LD {{32[8-9][0-9][0-9]}}
+; CHECK-NOT: LD {{3[3-9][0-9][0-9][0-9]}}
+; CHECK-NOT: LD {{[4-9][0-9][0-9][0-9][0-9]}}
+; CHECK-NOT: LD {{[1-9][0-9][0-9][0-9][0-9][0-9]+}}
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+%struct.S2760 = type { <2 x float>, %struct.anon, i32, [28 x i8] }
+%struct.anon = type { [11 x %struct.anon.0], i64, [6 x { i64, i64 }], [24 x i8] }
+%struct.anon.0 = type { [30 x %union.U4DI], i8, [0 x i16], [30 x i8] }
+%union.U4DI = type { <4 x i64> }
+
+@s2760 = external global %struct.S2760
+@fails = external global i32
+
+define void @check2760(%struct.S2760* noalias sret %agg.result, %struct.S2760* byval align 16, %struct.S2760* %arg1, %struct.S2760* byval align 16) {
+entry:
+ %arg0 = alloca %struct.S2760, align 32
+ %arg2 = alloca %struct.S2760, align 32
+ %arg1.addr = alloca %struct.S2760*, align 8
+ %ret = alloca %struct.S2760, align 32
+ %b1 = alloca %struct.S2760, align 32
+ %b2 = alloca %struct.S2760, align 32
+ %2 = bitcast %struct.S2760* %arg0 to i8*
+ %3 = bitcast %struct.S2760* %0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 11104, i32 16, i1 false)
+ %4 = bitcast %struct.S2760* %arg2 to i8*
+ %5 = bitcast %struct.S2760* %1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* %5, i64 11104, i32 16, i1 false)
+ store %struct.S2760* %arg1, %struct.S2760** %arg1.addr, align 8
+ %6 = bitcast %struct.S2760* %ret to i8*
+ call void @llvm.memset.p0i8.i64(i8* %6, i8 0, i64 11104, i32 32, i1 false)
+ %7 = bitcast %struct.S2760* %b1 to i8*
+ call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 11104, i32 32, i1 false)
+ %8 = bitcast %struct.S2760* %b2 to i8*
+ call void @llvm.memset.p0i8.i64(i8* %8, i8 0, i64 11104, i32 32, i1 false)
+ %b = getelementptr inbounds %struct.S2760* %arg0, i32 0, i32 1
+ %g = getelementptr inbounds %struct.anon* %b, i32 0, i32 1
+ %9 = load i64* %g, align 8
+ %10 = load i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
+ %cmp = icmp ne i64 %9, %10
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ %11 = load i32* @fails, align 4
+ %inc = add nsw i32 %11, 1
+ store i32 %inc, i32* @fails, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %12 = load i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
+ %b3 = getelementptr inbounds %struct.S2760* %ret, i32 0, i32 1
+ %g4 = getelementptr inbounds %struct.anon* %b3, i32 0, i32 1
+ store i64 %12, i64* %g4, align 8
+ %13 = bitcast %struct.S2760* %agg.result to i8*
+ %14 = bitcast %struct.S2760* %ret to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %13, i8* %14, i64 11104, i32 32, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+
diff --git a/test/CodeGen/PowerPC/rlwimi-and.ll b/test/CodeGen/PowerPC/rlwimi-and.ll
index 7963249ddf83..213363ee819f 100644
--- a/test/CodeGen/PowerPC/rlwimi-and.ll
+++ b/test/CodeGen/PowerPC/rlwimi-and.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=-crbits < %s | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-bgq-linux"
diff --git a/test/CodeGen/PowerPC/rlwimi-dyn-and.ll b/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
new file mode 100644
index 000000000000..e02801fafbf5
--- /dev/null
+++ b/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
@@ -0,0 +1,48 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define i32 @test1() #0 {
+entry:
+ %conv67.reload = load i32* undef
+ %const = bitcast i32 65535 to i32
+ br label %next
+
+next:
+ %shl161 = shl nuw nsw i32 %conv67.reload, 15
+ %0 = load i8* undef, align 1
+ %conv169 = zext i8 %0 to i32
+ %shl170 = shl nuw nsw i32 %conv169, 7
+ %const_mat = add i32 %const, -32767
+ %shl161.masked = and i32 %shl161, %const_mat
+ %conv174 = or i32 %shl170, %shl161.masked
+ ret i32 %conv174
+
+; CHECK-LABEL: @test1
+; CHECK-NOT: rlwimi 3, {{[0-9]+}}, 15, 0, 16
+; CHECK: blr
+}
+
+define i32 @test2() #0 {
+entry:
+ %conv67.reload = load i32* undef
+ %const = bitcast i32 65535 to i32
+ br label %next
+
+next:
+ %shl161 = shl nuw nsw i32 %conv67.reload, 15
+ %0 = load i8* undef, align 1
+ %conv169 = zext i8 %0 to i32
+ %shl170 = shl nuw nsw i32 %conv169, 7
+ %shl161.masked = and i32 %shl161, 32768
+ %conv174 = or i32 %shl170, %shl161.masked
+ ret i32 %conv174
+
+; CHECK-LABEL: @test2
+; CHECK: slwi 3, {{[0-9]+}}, 7
+; CHECK: rlwimi 3, {{[0-9]+}}, 15, 16, 16
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/sdag-ppcf128.ll b/test/CodeGen/PowerPC/sdag-ppcf128.ll
index 535ece6d3dfe..c46bc6b22dde 100644
--- a/test/CodeGen/PowerPC/sdag-ppcf128.ll
+++ b/test/CodeGen/PowerPC/sdag-ppcf128.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mattr=-crbits < %s | FileCheck %s
;
; PR14751: Unsupported type in SelectionDAG::getConstantFP()
diff --git a/test/CodeGen/PowerPC/sections.ll b/test/CodeGen/PowerPC/sections.ll
index 0ff4a89ff379..d77dfddd0f90 100644
--- a/test/CodeGen/PowerPC/sections.ll
+++ b/test/CodeGen/PowerPC/sections.ll
@@ -1,8 +1,12 @@
; Test to make sure that bss sections are printed with '.section' directive.
; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -relocation-model=pic | FileCheck %s -check-prefix=PIC
@A = global i32 0
; CHECK: .section .bss,"aw",@nobits
; CHECK: .globl A
+; PIC: .section .got2,"aw",@progbits
+; PIC: .section .bss,"aw",@nobits
+; PIC: .globl A
diff --git a/test/CodeGen/PowerPC/setcc_no_zext.ll b/test/CodeGen/PowerPC/setcc_no_zext.ll
index 9b2036e1dc52..467e921f74f1 100644
--- a/test/CodeGen/PowerPC/setcc_no_zext.ll
+++ b/test/CodeGen/PowerPC/setcc_no_zext.ll
@@ -1,5 +1,9 @@
; RUN: llc < %s -march=ppc32 | not grep rlwinm
+; FIXME: This optimization has temporarily regressed with crbits enabled by
+; default at the default CodeOpt level.
+; XFAIL: *
+
define i32 @setcc_one_or_zero(i32* %a) {
entry:
%tmp.1 = icmp ne i32* %a, null ; <i1> [#uses=1]
diff --git a/test/CodeGen/PowerPC/seteq-0.ll b/test/CodeGen/PowerPC/seteq-0.ll
index 731958374ee2..b7dd78085eb1 100644
--- a/test/CodeGen/PowerPC/seteq-0.ll
+++ b/test/CodeGen/PowerPC/seteq-0.ll
@@ -1,9 +1,12 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | \
-; RUN: grep "srwi r., r., 5"
+; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | FileCheck %s
define i32 @eq0(i32 %a) {
%tmp.1 = icmp eq i32 %a, 0 ; <i1> [#uses=1]
%tmp.2 = zext i1 %tmp.1 to i32 ; <i32> [#uses=1]
ret i32 %tmp.2
+
+; CHECK: cntlzw [[REG:r[0-9]+]], r3
+; CHECK: rlwinm r3, [[REG]], 27, 31, 31
+; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/sjlj.ll b/test/CodeGen/PowerPC/sjlj.ll
index 414640b2b7e3..f9f887af31f3 100644
--- a/test/CodeGen/PowerPC/sjlj.ll
+++ b/test/CodeGen/PowerPC/sjlj.ll
@@ -134,8 +134,8 @@ return: ; preds = %if.end, %if.then
; CHECK: @main2
; CHECK: addis [[REG:[0-9]+]], 2, env_sigill@toc@ha
-; CHECK: std 31, env_sigill@toc@l([[REG]])
-; CHECK: addi [[REGB:[0-9]+]], [[REG]], env_sigill@toc@l
+; CHECK-DAG: std 31, env_sigill@toc@l([[REG]])
+; CHECK-DAG: addi [[REGB:[0-9]+]], [[REG]], env_sigill@toc@l
; CHECK-DAG: std [[REGB]], [[OFF:[0-9]+]](31) # 8-byte Folded Spill
; CHECK-DAG: std 1, 16([[REGB]])
; CHECK-DAG: std 2, 24([[REGB]])
diff --git a/test/CodeGen/PowerPC/splat-bug.ll b/test/CodeGen/PowerPC/splat-bug.ll
new file mode 100644
index 000000000000..4b5250b259fa
--- /dev/null
+++ b/test/CodeGen/PowerPC/splat-bug.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mcpu=ppc64 -O0 -fast-isel=false < %s | FileCheck %s
+
+; Checks for a previous bug where vspltisb/vaddubm were issued in place
+; of vsplitsh/vadduhm.
+
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@a = external global <16 x i8>
+
+define void @foo() nounwind ssp {
+; CHECK: foo:
+ store <16 x i8> <i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16, i8 0, i8 16>, <16 x i8>* @a
+; CHECK: vspltish [[REG:[0-9]+]], 8
+; CHECK: vadduhm {{[0-9]+}}, [[REG]], [[REG]]
+ ret void
+}
+
diff --git a/test/CodeGen/PowerPC/srl-mask.ll b/test/CodeGen/PowerPC/srl-mask.ll
new file mode 100644
index 000000000000..2749df99fd4f
--- /dev/null
+++ b/test/CodeGen/PowerPC/srl-mask.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define i64 @foo(i64 %x) #0 {
+entry:
+; CHECK-LABEL: @foo
+ %a = lshr i64 %x, 35
+ %b = and i64 %a, 65535
+; CHECK: rldicl 3, 3, 29, 48
+ ret i64 %b
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/stack-realign.ll b/test/CodeGen/PowerPC/stack-realign.ll
index 1c7a36aeeabf..a59fceb5bdd0 100644
--- a/test/CodeGen/PowerPC/stack-realign.ll
+++ b/test/CodeGen/PowerPC/stack-realign.ll
@@ -1,5 +1,7 @@
; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -disable-fp-elim < %s | FileCheck -check-prefix=CHECK-FP %s
+; RUN: llc -mtriple=powerpc-unknown-linux-gnu -disable-fp-elim < %s | FileCheck -check-prefix=CHECK-32 %s
+; RUN: llc -mtriple=powerpc-unknown-linux-gnu -disable-fp-elim -relocation-model=pic < %s | FileCheck -check-prefix=CHECK-32-PIC %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -7,6 +9,8 @@ target triple = "powerpc64-unknown-linux-gnu"
declare void @bar(i32*)
+@barbaz = external global i32
+
define void @goo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
@@ -16,8 +20,9 @@ entry:
store i32 %0, i32* %arrayidx, align 32
%b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
+ %2 = load i32* @barbaz, align 4
%arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
+ store i32 %2, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx)
ret void
}
@@ -69,6 +74,24 @@ entry:
; CHECK-FP-DAG: mtlr 0
; CHECK-FP: blr
+; CHECK-32-LABEL: @goo
+; CHECK-32-DAG: mflr 0
+; CHECK-32-DAG: rlwinm [[REG:[0-9]+]], 1, 0, 27, 31
+; CHECK-32-DAG: stw 30, -8(1)
+; CHECK-32-DAG: mr 30, 1
+; CHECK-32-DAG: stw 0, 4(1)
+; CHECK-32-DAG: subfic 0, [[REG]], -64
+; CHECK-32: stwux 1, 1, 0
+
+; CHECK-32-PIC-LABEL: @goo
+; CHECK-32-PIC-DAG: mflr 0
+; CHECK-32-PIC-DAG: rlwinm [[REG:[0-9]+]], 1, 0, 27, 31
+; CHECK-32-PIC-DAG: stw 29, -12(1)
+; CHECK-32-PIC-DAG: mr 29, 1
+; CHECK-32-PIC-DAG: stw 0, 4(1)
+; CHECK-32-PIC-DAG: subfic 0, [[REG]], -64
+; CHECK-32-PIC: stwux 1, 1, 0
+
; The large-frame-size case.
define void @hoo(%struct.s* byval nocapture readonly %a) {
entry:
@@ -99,6 +122,34 @@ entry:
; CHECK: blr
+; CHECK-32-LABEL: @hoo
+
+; CHECK-32-DAG: lis [[REG1:[0-9]+]], -13
+; CHECK-32-DAG: rlwinm [[REG3:[0-9]+]], 1, 0, 27, 31
+; CHECK-32-DAG: mflr 0
+; CHECK-32-DAG: ori [[REG2:[0-9]+]], [[REG1]], 51904
+; CHECK-32-DAG: stw 30, -8(1)
+; CHECK-32-DAG: mr 30, 1
+; CHECK-32-DAG: stw 0, 4(1)
+; CHECK-32-DAG: subfc 0, [[REG3]], [[REG2]]
+; CHECK-32: stwux 1, 1, 0
+
+; CHECK-32: blr
+
+; CHECK-32-PIC-LABEL: @hoo
+
+; CHECK-32-PIC-DAG: lis [[REG1:[0-9]+]], -13
+; CHECK-32-PIC-DAG: rlwinm [[REG3:[0-9]+]], 1, 0, 27, 31
+; CHECK-32-PIC-DAG: mflr 0
+; CHECK-32-PIC-DAG: ori [[REG2:[0-9]+]], [[REG1]], 51904
+; CHECK-32-PIC-DAG: stw 29, -12(1)
+; CHECK-32-PIC-DAG: mr 29, 1
+; CHECK-32-PIC-DAG: stw 0, 4(1)
+; CHECK-32-PIC-DAG: subfc 0, [[REG3]], [[REG2]]
+; CHECK-32: stwux 1, 1, 0
+
+; CHECK-32: blr
+
; Make sure that the FP save area is still allocated correctly relative to
; where r30 is saved.
define void @loo(%struct.s* byval nocapture readonly %a) {
diff --git a/test/CodeGen/PowerPC/stfiwx.ll b/test/CodeGen/PowerPC/stfiwx.ll
index 1ad558c6abc9..588e44fb28d3 100644
--- a/test/CodeGen/PowerPC/stfiwx.ll
+++ b/test/CodeGen/PowerPC/stfiwx.ll
@@ -1,18 +1,27 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=stfiwx -o %t1
-; RUN: grep stfiwx %t1
-; RUN: not grep r1 %t1
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=-stfiwx \
-; RUN: -o %t2
-; RUN: not grep stfiwx %t2
-; RUN: grep r1 %t2
+; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=stfiwx | FileCheck %s
+; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=-stfiwx | FileCheck -check-prefix=CHECK-LS %s
-define void @test(float %a, i32* %b) nounwind {
+define void @test1(float %a, i32* %b) nounwind {
+; CHECK-LABEL: @test1
+; CHECK-LS-LABEL: @test1
%tmp.2 = fptosi float %a to i32 ; <i32> [#uses=1]
store i32 %tmp.2, i32* %b
ret void
+
+; CHECK-NOT: lwz
+; CHECK-NOT: stw
+; CHECK: stfiwx
+; CHECK: blr
+
+; CHECK-LS: lwz
+; CHECK-LS: stw
+; CHECK-LS-NOT: stfiwx
+; CHECK-LS: blr
}
define void @test2(float %a, i32* %b, i32 %i) nounwind {
+; CHECK-LABEL: @test2
+; CHECK-LS-LABEL: @test2
%tmp.2 = getelementptr i32* %b, i32 1 ; <i32*> [#uses=1]
%tmp.5 = getelementptr i32* %b, i32 %i ; <i32*> [#uses=1]
%tmp.7 = fptosi float %a to i32 ; <i32> [#uses=3]
@@ -20,5 +29,15 @@ define void @test2(float %a, i32* %b, i32 %i) nounwind {
store i32 %tmp.7, i32* %tmp.2
store i32 %tmp.7, i32* %b
ret void
+
+; CHECK-NOT: lwz
+; CHECK-NOT: stw
+; CHECK: stfiwx
+; CHECK: blr
+
+; CHECK-LS: lwz
+; CHECK-LS: stw
+; CHECK-LS-NOT: stfiwx
+; CHECK-LS: blr
}
diff --git a/test/CodeGen/PowerPC/structsinmem.ll b/test/CodeGen/PowerPC/structsinmem.ll
index 5b8dead16893..b5552af0eb51 100644
--- a/test/CodeGen/PowerPC/structsinmem.ll
+++ b/test/CodeGen/PowerPC/structsinmem.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=pwr7 -O0 -disable-fp-elim -fast-isel=false < %s | FileCheck %s
+; RUN: llc -mcpu=ppc64 -O0 -disable-fp-elim -fast-isel=false < %s | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/structsinregs.ll b/test/CodeGen/PowerPC/structsinregs.ll
index fb3bd7cd57e6..cfe32e9560ae 100644
--- a/test/CodeGen/PowerPC/structsinregs.ll
+++ b/test/CodeGen/PowerPC/structsinregs.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=pwr7 -O0 -disable-fp-elim -fast-isel=false < %s | FileCheck %s
+; RUN: llc -mcpu=ppc64 -O0 -disable-fp-elim -fast-isel=false < %s | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/subsumes-pred-regs.ll b/test/CodeGen/PowerPC/subsumes-pred-regs.ll
index 97ac788164ab..da637cd2548b 100644
--- a/test/CodeGen/PowerPC/subsumes-pred-regs.ll
+++ b/test/CodeGen/PowerPC/subsumes-pred-regs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=ppc64 | FileCheck %s
+; RUN: llc < %s -mcpu=ppc64 -mattr=-crbits | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/svr4-redzone.ll b/test/CodeGen/PowerPC/svr4-redzone.ll
index 7c51b67aeecb..bee3ac32b648 100644
--- a/test/CodeGen/PowerPC/svr4-redzone.ll
+++ b/test/CodeGen/PowerPC/svr4-redzone.ll
@@ -36,4 +36,4 @@ entry:
; PPC32: stwu 1, -240(1)
; PPC64-LABEL: bigstack:
-; PPC64: stdu 1, -352(1)
+; PPC64: stdu 1, -288(1)
diff --git a/test/CodeGen/PowerPC/tls-2.ll b/test/CodeGen/PowerPC/tls-2.ll
deleted file mode 100644
index c2faf9062469..000000000000
--- a/test/CodeGen/PowerPC/tls-2.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
-target triple = "powerpc64-unknown-freebsd10.0"
-; RUN: llc -O1 < %s -march=ppc64 | FileCheck %s
-
-@a = thread_local global i32 0, align 4
-
-;CHECK-LABEL: localexec:
-define i32 @localexec() nounwind {
-entry:
-;CHECK: addis [[REG1:[0-9]+]], 13, a@tprel@ha
-;CHECK-NEXT: li [[REG2:[0-9]+]], 42
-;CHECK-NEXT: stw [[REG2]], a@tprel@l([[REG1]])
- store i32 42, i32* @a, align 4
- ret i32 0
-}
diff --git a/test/CodeGen/PowerPC/tls-gd.ll b/test/CodeGen/PowerPC/tls-gd.ll
deleted file mode 100644
index 5f0ef9a050da..000000000000
--- a/test/CodeGen/PowerPC/tls-gd.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc -mcpu=pwr7 -O0 -relocation-model=pic < %s | FileCheck %s
-
-; Test correct assembly code generation for thread-local storage using
-; the general dynamic model.
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
-target triple = "powerpc64-unknown-linux-gnu"
-
-@a = thread_local global i32 0, align 4
-
-define signext i32 @main() nounwind {
-entry:
- %retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32* @a, align 4
- ret i32 %0
-}
-
-; CHECK: addis [[REG:[0-9]+]], 2, a@got@tlsgd@ha
-; CHECK-NEXT: addi 3, [[REG]], a@got@tlsgd@l
-; CHECK: bl __tls_get_addr(a@tlsgd)
-; CHECK-NEXT: nop
-
diff --git a/test/CodeGen/PowerPC/tls-ie.ll b/test/CodeGen/PowerPC/tls-ie.ll
deleted file mode 100644
index c5cfba7b3f7a..000000000000
--- a/test/CodeGen/PowerPC/tls-ie.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc -mcpu=pwr7 -O0 <%s | FileCheck %s
-
-; Test correct assembly code generation for thread-local storage
-; using the initial-exec model.
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
-target triple = "powerpc64-unknown-linux-gnu"
-
-@a = external thread_local global i32
-
-define signext i32 @main() nounwind {
-entry:
- %retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32* @a, align 4
- ret i32 %0
-}
-
-; CHECK: addis [[REG1:[0-9]+]], 2, a@got@tprel@ha
-; CHECK: ld [[REG2:[0-9]+]], a@got@tprel@l([[REG1]])
-; CHECK: add {{[0-9]+}}, [[REG2]], a@tls
-
diff --git a/test/CodeGen/PowerPC/tls-ld-2.ll b/test/CodeGen/PowerPC/tls-ld-2.ll
deleted file mode 100644
index 4399b330ea47..000000000000
--- a/test/CodeGen/PowerPC/tls-ld-2.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc -mcpu=pwr7 -O1 -relocation-model=pic < %s | FileCheck %s
-
-; Test peephole optimization for thread-local storage using the
-; local dynamic model.
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
-target triple = "powerpc64-unknown-linux-gnu"
-
-@a = hidden thread_local global i32 0, align 4
-
-define signext i32 @main() nounwind {
-entry:
- %retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32* @a, align 4
- ret i32 %0
-}
-
-; CHECK: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha
-; CHECK-NEXT: addi 3, [[REG]], a@got@tlsld@l
-; CHECK: bl __tls_get_addr(a@tlsld)
-; CHECK-NEXT: nop
-; CHECK: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
-; CHECK-NEXT: lwa {{[0-9]+}}, a@dtprel@l([[REG2]])
diff --git a/test/CodeGen/PowerPC/tls-ld.ll b/test/CodeGen/PowerPC/tls-ld.ll
deleted file mode 100644
index db02a56f6a22..000000000000
--- a/test/CodeGen/PowerPC/tls-ld.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc -mcpu=pwr7 -O0 -relocation-model=pic < %s | FileCheck %s
-
-; Test correct assembly code generation for thread-local storage using
-; the local dynamic model.
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
-target triple = "powerpc64-unknown-linux-gnu"
-
-@a = hidden thread_local global i32 0, align 4
-
-define signext i32 @main() nounwind {
-entry:
- %retval = alloca i32, align 4
- store i32 0, i32* %retval
- %0 = load i32* @a, align 4
- ret i32 %0
-}
-
-; CHECK: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha
-; CHECK-NEXT: addi 3, [[REG]], a@got@tlsld@l
-; CHECK: bl __tls_get_addr(a@tlsld)
-; CHECK-NEXT: nop
-; CHECK: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
-; CHECK-NEXT: addi {{[0-9]+}}, [[REG2]], a@dtprel@l
diff --git a/test/CodeGen/PowerPC/tls-pic.ll b/test/CodeGen/PowerPC/tls-pic.ll
new file mode 100644
index 000000000000..9f3ab6e3b491
--- /dev/null
+++ b/test/CodeGen/PowerPC/tls-pic.ll
@@ -0,0 +1,55 @@
+; RUN: llc -march=ppc64 -mcpu=pwr7 -O0 -relocation-model=pic < %s | FileCheck -check-prefix=OPT0 %s
+; RUN: llc -march=ppc64 -mcpu=pwr7 -O1 -relocation-model=pic < %s | FileCheck -check-prefix=OPT1 %s
+
+target triple = "powerpc64-unknown-linux-gnu"
+; Test correct assembly code generation for thread-local storage using
+; the local dynamic model.
+
+@a = hidden thread_local global i32 0, align 4
+
+define signext i32 @main() nounwind {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* @a, align 4
+ ret i32 %0
+}
+
+; OPT0-LABEL: main:
+; OPT0: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha
+; OPT0-NEXT: addi 3, [[REG]], a@got@tlsld@l
+; OPT0: bl __tls_get_addr(a@tlsld)
+; OPT0-NEXT: nop
+; OPT0: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
+; OPT0-NEXT: addi {{[0-9]+}}, [[REG2]], a@dtprel@l
+
+; Test peephole optimization for thread-local storage using the
+; local dynamic model.
+
+; OPT1-LABEL: main:
+; OPT1: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha
+; OPT1-NEXT: addi 3, [[REG]], a@got@tlsld@l
+; OPT1: bl __tls_get_addr(a@tlsld)
+; OPT1-NEXT: nop
+; OPT1: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
+; OPT1-NEXT: lwa {{[0-9]+}}, a@dtprel@l([[REG2]])
+
+; Test correct assembly code generation for thread-local storage using
+; the general dynamic model.
+
+@a2 = thread_local global i32 0, align 4
+
+define signext i32 @main2() nounwind {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* @a2, align 4
+ ret i32 %0
+}
+
+; OPT1-LABEL: main2
+; OPT1: addis [[REG:[0-9]+]], 2, a2@got@tlsgd@ha
+; OPT1-NEXT: addi 3, [[REG]], a2@got@tlsgd@l
+; OPT1: bl __tls_get_addr(a2@tlsgd)
+; OPT1-NEXT: nop
+
diff --git a/test/CodeGen/PowerPC/tls.ll b/test/CodeGen/PowerPC/tls.ll
index 4e0a822399dd..59b4de755988 100644
--- a/test/CodeGen/PowerPC/tls.ll
+++ b/test/CodeGen/PowerPC/tls.ll
@@ -1,7 +1,8 @@
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
-target triple = "powerpc64-unknown-freebsd10.0"
-; RUN: llc -O0 < %s -march=ppc64 | FileCheck -check-prefix=OPT0 %s
-; RUN: llc -O1 < %s -march=ppc64 | FileCheck -check-prefix=OPT1 %s
+; RUN: llc -O0 < %s -march=ppc64 -mcpu=ppc64 | FileCheck -check-prefix=OPT0 %s
+; RUN: llc -O1 < %s -march=ppc64 -mcpu=ppc64 | FileCheck -check-prefix=OPT1 %s
+; RUN: llc -O0 < %s -march=ppc32 -mcpu=ppc | FileCheck -check-prefix=OPT0-PPC32 %s
+
+target triple = "powerpc64-unknown-linux-gnu"
@a = thread_local global i32 0, align 4
@@ -19,3 +20,27 @@ entry:
store i32 42, i32* @a, align 4
ret i32 0
}
+
+; Test correct assembly code generation for thread-local storage
+; using the initial-exec model.
+
+@a2 = external thread_local global i32
+
+define signext i32 @main2() nounwind {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* @a2, align 4
+ ret i32 %0
+}
+
+; OPT1-LABEL: main2:
+; OPT1: addis [[REG1:[0-9]+]], 2, a2@got@tprel@ha
+; OPT1: ld [[REG2:[0-9]+]], a2@got@tprel@l([[REG1]])
+; OPT1: add {{[0-9]+}}, [[REG2]], a2@tls
+
+;OPT0-PPC32-LABEL: main2:
+;OPT0-PPC32: li [[REG1:[0-9]+]], _GLOBAL_OFFSET_TABLE_@l
+;OPT0-PPC32: addis [[REG1]], [[REG1]], _GLOBAL_OFFSET_TABLE_@ha
+;OPT0-PPC32: lwz [[REG2:[0-9]+]], a2@got@tprel@l([[REG1]])
+;OPT0-PPC32: add 3, [[REG2]], a2@tls
diff --git a/test/CodeGen/PowerPC/toc-load-sched-bug.ll b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
new file mode 100644
index 000000000000..d437915e6c3f
--- /dev/null
+++ b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
@@ -0,0 +1,534 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+; This test checks for misordering of a TOC restore instruction relative
+; to subsequent uses of the TOC register. Previously this test broke
+; because there was no TOC register dependency between the instructions,
+; and the usual stack-adjust instructions that held the TOC restore in
+; place were optimized away.
+
+%"class.llvm::Module" = type { %"class.llvm::LLVMContext"*, %"class.llvm::iplist", %"class.llvm::iplist.0", %"class.llvm::iplist.9", %"struct.llvm::ilist", %"class.std::basic_string", %"class.llvm::ValueSymbolTable"*, %"class.llvm::StringMap", %"class.std::unique_ptr", %"class.std::basic_string", %"class.std::basic_string", i8*, %"class.llvm::RandomNumberGenerator"*, %"class.std::basic_string", %"class.llvm::DataLayout" }
+%"class.llvm::iplist" = type { %"struct.llvm::ilist_traits", %"class.llvm::GlobalVariable"* }
+%"struct.llvm::ilist_traits" = type { %"class.llvm::ilist_node" }
+%"class.llvm::ilist_node" = type { %"class.llvm::ilist_half_node", %"class.llvm::GlobalVariable"* }
+%"class.llvm::ilist_half_node" = type { %"class.llvm::GlobalVariable"* }
+%"class.llvm::GlobalVariable" = type { %"class.llvm::GlobalObject", %"class.llvm::ilist_node", i8 }
+%"class.llvm::GlobalObject" = type { %"class.llvm::GlobalValue", %"class.std::basic_string", %"class.llvm::Comdat"* }
+%"class.llvm::GlobalValue" = type { %"class.llvm::Constant", i32, %"class.llvm::Module"* }
+%"class.llvm::Constant" = type { %"class.llvm::User" }
+%"class.llvm::User" = type { %"class.llvm::Value.base", i32, %"class.llvm::Use"* }
+%"class.llvm::Value.base" = type <{ i32 (...)**, %"class.llvm::Type"*, %"class.llvm::Use"*, %"class.llvm::StringMapEntry"*, i8, i8, i16 }>
+%"class.llvm::Type" = type { %"class.llvm::LLVMContext"*, i32, i32, %"class.llvm::Type"** }
+%"class.llvm::StringMapEntry" = type opaque
+%"class.llvm::Use" = type { %"class.llvm::Value"*, %"class.llvm::Use"*, %"class.llvm::PointerIntPair" }
+%"class.llvm::Value" = type { i32 (...)**, %"class.llvm::Type"*, %"class.llvm::Use"*, %"class.llvm::StringMapEntry"*, i8, i8, i16 }
+%"class.llvm::PointerIntPair" = type { i64 }
+%"class.llvm::Comdat" = type { %"class.llvm::StringMapEntry.43"*, i32 }
+%"class.llvm::StringMapEntry.43" = type opaque
+%"class.llvm::iplist.0" = type { %"struct.llvm::ilist_traits.1", %"class.llvm::Function"* }
+%"struct.llvm::ilist_traits.1" = type { %"class.llvm::ilist_node.7" }
+%"class.llvm::ilist_node.7" = type { %"class.llvm::ilist_half_node.8", %"class.llvm::Function"* }
+%"class.llvm::ilist_half_node.8" = type { %"class.llvm::Function"* }
+%"class.llvm::Function" = type { %"class.llvm::GlobalObject", %"class.llvm::ilist_node.7", %"class.llvm::iplist.44", %"class.llvm::iplist.52", %"class.llvm::ValueSymbolTable"*, %"class.llvm::AttributeSet" }
+%"class.llvm::iplist.44" = type { %"struct.llvm::ilist_traits.45", %"class.llvm::BasicBlock"* }
+%"struct.llvm::ilist_traits.45" = type { %"class.llvm::ilist_half_node.51" }
+%"class.llvm::ilist_half_node.51" = type { %"class.llvm::BasicBlock"* }
+%"class.llvm::BasicBlock" = type { %"class.llvm::Value.base", %"class.llvm::ilist_node.61", %"class.llvm::iplist.62", %"class.llvm::Function"* }
+%"class.llvm::ilist_node.61" = type { %"class.llvm::ilist_half_node.51", %"class.llvm::BasicBlock"* }
+%"class.llvm::iplist.62" = type { %"struct.llvm::ilist_traits.63", %"class.llvm::Instruction"* }
+%"struct.llvm::ilist_traits.63" = type { %"class.llvm::ilist_half_node.69" }
+%"class.llvm::ilist_half_node.69" = type { %"class.llvm::Instruction"* }
+%"class.llvm::Instruction" = type { %"class.llvm::User", %"class.llvm::ilist_node.70", %"class.llvm::BasicBlock"*, %"class.llvm::DebugLoc" }
+%"class.llvm::ilist_node.70" = type { %"class.llvm::ilist_half_node.69", %"class.llvm::Instruction"* }
+%"class.llvm::DebugLoc" = type { i32, i32 }
+%"class.llvm::iplist.52" = type { %"struct.llvm::ilist_traits.53", %"class.llvm::Argument"* }
+%"struct.llvm::ilist_traits.53" = type { %"class.llvm::ilist_half_node.59" }
+%"class.llvm::ilist_half_node.59" = type { %"class.llvm::Argument"* }
+%"class.llvm::Argument" = type { %"class.llvm::Value.base", %"class.llvm::ilist_node.60", %"class.llvm::Function"* }
+%"class.llvm::ilist_node.60" = type { %"class.llvm::ilist_half_node.59", %"class.llvm::Argument"* }
+%"class.llvm::AttributeSet" = type { %"class.llvm::AttributeSetImpl"* }
+%"class.llvm::AttributeSetImpl" = type opaque
+%"class.llvm::iplist.9" = type { %"struct.llvm::ilist_traits.10", %"class.llvm::GlobalAlias"* }
+%"struct.llvm::ilist_traits.10" = type { %"class.llvm::ilist_node.16" }
+%"class.llvm::ilist_node.16" = type { %"class.llvm::ilist_half_node.17", %"class.llvm::GlobalAlias"* }
+%"class.llvm::ilist_half_node.17" = type { %"class.llvm::GlobalAlias"* }
+%"class.llvm::GlobalAlias" = type { %"class.llvm::GlobalValue", %"class.llvm::ilist_node.16" }
+%"struct.llvm::ilist" = type { %"class.llvm::iplist.18" }
+%"class.llvm::iplist.18" = type { %"struct.llvm::ilist_traits.19", %"class.llvm::NamedMDNode"* }
+%"struct.llvm::ilist_traits.19" = type { %"class.llvm::ilist_node.24" }
+%"class.llvm::ilist_node.24" = type { %"class.llvm::ilist_half_node.25", %"class.llvm::NamedMDNode"* }
+%"class.llvm::ilist_half_node.25" = type { %"class.llvm::NamedMDNode"* }
+%"class.llvm::NamedMDNode" = type { %"class.llvm::ilist_node.24", %"class.std::basic_string", %"class.llvm::Module"*, i8* }
+%"class.llvm::ValueSymbolTable" = type opaque
+%"class.llvm::StringMap" = type { %"class.llvm::StringMapImpl", %"class.llvm::MallocAllocator" }
+%"class.llvm::StringMapImpl" = type { %"class.llvm::StringMapEntryBase"**, i32, i32, i32, i32 }
+%"class.llvm::StringMapEntryBase" = type { i32 }
+%"class.llvm::MallocAllocator" = type { i8 }
+%"class.std::unique_ptr" = type { %"class.std::tuple" }
+%"class.std::tuple" = type { %"struct.std::_Tuple_impl" }
+%"struct.std::_Tuple_impl" = type { %"struct.std::_Head_base.28" }
+%"struct.std::_Head_base.28" = type { %"class.llvm::GVMaterializer"* }
+%"class.llvm::GVMaterializer" = type opaque
+%"class.llvm::RandomNumberGenerator" = type opaque
+%"class.std::basic_string" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
+%"class.llvm::DataLayout" = type { i8, i32, i32, [4 x i8], %"class.llvm::SmallVector", %"class.llvm::SmallVector.29", %"class.llvm::SmallVector.36", i8* }
+%"class.llvm::SmallVector" = type { %"class.llvm::SmallVectorImpl.base", %"struct.llvm::SmallVectorStorage" }
+%"class.llvm::SmallVectorImpl.base" = type { %"class.llvm::SmallVectorTemplateBase.base" }
+%"class.llvm::SmallVectorTemplateBase.base" = type { %"class.llvm::SmallVectorTemplateCommon.base" }
+%"class.llvm::SmallVectorTemplateCommon.base" = type <{ %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion" }>
+%"class.llvm::SmallVectorBase" = type { i8*, i8*, i8* }
+%"struct.llvm::AlignedCharArrayUnion" = type { %"struct.llvm::AlignedCharArray" }
+%"struct.llvm::AlignedCharArray" = type { [1 x i8] }
+%"struct.llvm::SmallVectorStorage" = type { [7 x %"struct.llvm::AlignedCharArrayUnion"] }
+%"class.llvm::SmallVector.29" = type { %"class.llvm::SmallVectorImpl.30", %"struct.llvm::SmallVectorStorage.35" }
+%"class.llvm::SmallVectorImpl.30" = type { %"class.llvm::SmallVectorTemplateBase.31" }
+%"class.llvm::SmallVectorTemplateBase.31" = type { %"class.llvm::SmallVectorTemplateCommon.32" }
+%"class.llvm::SmallVectorTemplateCommon.32" = type { %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion.33" }
+%"struct.llvm::AlignedCharArrayUnion.33" = type { %"struct.llvm::AlignedCharArray.34" }
+%"struct.llvm::AlignedCharArray.34" = type { [8 x i8] }
+%"struct.llvm::SmallVectorStorage.35" = type { [15 x %"struct.llvm::AlignedCharArrayUnion.33"] }
+%"class.llvm::SmallVector.36" = type { %"class.llvm::SmallVectorImpl.37", %"struct.llvm::SmallVectorStorage.42" }
+%"class.llvm::SmallVectorImpl.37" = type { %"class.llvm::SmallVectorTemplateBase.38" }
+%"class.llvm::SmallVectorTemplateBase.38" = type { %"class.llvm::SmallVectorTemplateCommon.39" }
+%"class.llvm::SmallVectorTemplateCommon.39" = type { %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion.40" }
+%"struct.llvm::AlignedCharArrayUnion.40" = type { %"struct.llvm::AlignedCharArray.41" }
+%"struct.llvm::AlignedCharArray.41" = type { [16 x i8] }
+%"struct.llvm::SmallVectorStorage.42" = type { [7 x %"struct.llvm::AlignedCharArrayUnion.40"] }
+%"class.llvm::SMDiagnostic" = type { %"class.llvm::SourceMgr"*, %"class.llvm::SMLoc", %"class.std::basic_string", i32, i32, i32, %"class.std::basic_string", %"class.std::basic_string", %"class.std::vector.79", %"class.llvm::SmallVector.84" }
+%"class.llvm::SourceMgr" = type { %"class.std::vector", %"class.std::vector.74", i8*, void (%"class.llvm::SMDiagnostic"*, i8*)*, i8* }
+%"class.std::vector" = type { %"struct.std::_Vector_base" }
+%"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<llvm::SourceMgr::SrcBuffer, std::allocator<llvm::SourceMgr::SrcBuffer> >::_Vector_impl" }
+%"struct.std::_Vector_base<llvm::SourceMgr::SrcBuffer, std::allocator<llvm::SourceMgr::SrcBuffer> >::_Vector_impl" = type { %"struct.llvm::SourceMgr::SrcBuffer"*, %"struct.llvm::SourceMgr::SrcBuffer"*, %"struct.llvm::SourceMgr::SrcBuffer"* }
+%"struct.llvm::SourceMgr::SrcBuffer" = type { %"class.llvm::MemoryBuffer"*, %"class.llvm::SMLoc" }
+%"class.llvm::MemoryBuffer" = type { i32 (...)**, i8*, i8* }
+%"class.std::vector.74" = type { %"struct.std::_Vector_base.75" }
+%"struct.std::_Vector_base.75" = type { %"struct.std::_Vector_base<std::basic_string<char>, std::allocator<std::basic_string<char> > >::_Vector_impl" }
+%"struct.std::_Vector_base<std::basic_string<char>, std::allocator<std::basic_string<char> > >::_Vector_impl" = type { %"class.std::basic_string"*, %"class.std::basic_string"*, %"class.std::basic_string"* }
+%"class.llvm::SMLoc" = type { i8* }
+%"class.std::vector.79" = type { %"struct.std::_Vector_base.80" }
+%"struct.std::_Vector_base.80" = type { %"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" }
+%"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>, std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" = type { %"struct.std::pair"*, %"struct.std::pair"*, %"struct.std::pair"* }
+%"struct.std::pair" = type { i32, i32 }
+%"class.llvm::SmallVector.84" = type { %"class.llvm::SmallVectorImpl.85", %"struct.llvm::SmallVectorStorage.90" }
+%"class.llvm::SmallVectorImpl.85" = type { %"class.llvm::SmallVectorTemplateBase.86" }
+%"class.llvm::SmallVectorTemplateBase.86" = type { %"class.llvm::SmallVectorTemplateCommon.87" }
+%"class.llvm::SmallVectorTemplateCommon.87" = type { %"class.llvm::SmallVectorBase", %"struct.llvm::AlignedCharArrayUnion.88" }
+%"struct.llvm::AlignedCharArrayUnion.88" = type { %"struct.llvm::AlignedCharArray.89" }
+%"struct.llvm::AlignedCharArray.89" = type { [24 x i8] }
+%"struct.llvm::SmallVectorStorage.90" = type { [3 x %"struct.llvm::AlignedCharArrayUnion.88"] }
+%"class.llvm::LLVMContext" = type { %"class.llvm::LLVMContextImpl"* }
+%"class.llvm::LLVMContextImpl" = type opaque
+%"class.std::allocator" = type { i8 }
+%"class.llvm::ErrorOr.109" = type { %union.anon.110, i8, [7 x i8] }
+%union.anon.110 = type { %"struct.llvm::AlignedCharArrayUnion.93" }
+%"struct.llvm::AlignedCharArrayUnion.93" = type { %"struct.llvm::AlignedCharArray.94" }
+%"struct.llvm::AlignedCharArray.94" = type { [16 x i8] }
+%"class.llvm::ErrorOr" = type { %union.anon, i8, [7 x i8] }
+%union.anon = type { %"struct.llvm::AlignedCharArrayUnion.93" }
+%"class.std::error_category" = type { i32 (...)** }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep_base" }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep_base" = type { i64, i64, i32 }
+%"class.llvm::SMFixIt" = type { %"class.llvm::SMRange", %"class.std::basic_string" }
+%"class.llvm::SMRange" = type { %"class.llvm::SMLoc", %"class.llvm::SMLoc" }
+%"struct.llvm::NamedRegionTimer" = type { %"class.llvm::TimeRegion" }
+%"class.llvm::TimeRegion" = type { %"class.llvm::Timer"* }
+%"class.llvm::Timer" = type { %"class.llvm::TimeRecord", %"class.std::basic_string", i8, %"class.llvm::TimerGroup"*, %"class.llvm::Timer"**, %"class.llvm::Timer"* }
+%"class.llvm::TimeRecord" = type { double, double, double, i64 }
+%"class.llvm::TimerGroup" = type { %"class.std::basic_string", %"class.llvm::Timer"*, %"class.std::vector.103", %"class.llvm::TimerGroup"**, %"class.llvm::TimerGroup"* }
+%"class.std::vector.103" = type { %"struct.std::_Vector_base.104" }
+%"struct.std::_Vector_base.104" = type { %"struct.std::_Vector_base<std::pair<llvm::TimeRecord, std::basic_string<char> >, std::allocator<std::pair<llvm::TimeRecord, std::basic_string<char> > > >::_Vector_impl" }
+%"struct.std::_Vector_base<std::pair<llvm::TimeRecord, std::basic_string<char> >, std::allocator<std::pair<llvm::TimeRecord, std::basic_string<char> > > >::_Vector_impl" = type { %"struct.std::pair.108"*, %"struct.std::pair.108"*, %"struct.std::pair.108"* }
+%"struct.std::pair.108" = type opaque
+%struct.LLVMOpaqueContext = type opaque
+%struct.LLVMOpaqueMemoryBuffer = type opaque
+%struct.LLVMOpaqueModule = type opaque
+%"class.llvm::raw_string_ostream" = type { %"class.llvm::raw_ostream.base", %"class.std::basic_string"* }
+%"class.llvm::raw_ostream.base" = type <{ i32 (...)**, i8*, i8*, i8*, i32 }>
+%"class.llvm::raw_ostream" = type { i32 (...)**, i8*, i8*, i8*, i32 }
+
+@.str = private unnamed_addr constant [28 x i8] c"Could not open input file: \00", align 1
+@.str1 = private unnamed_addr constant [54 x i8] c"!HasError && \22Cannot get value when an error exists!\22\00", align 1
+@.str2 = private unnamed_addr constant [61 x i8] c"/home/wschmidt/llvm/llvm-test/include/llvm/Support/ErrorOr.h\00", align 1
+@__PRETTY_FUNCTION__._ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv = private unnamed_addr constant [206 x i8] c"storage_type *llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer> > >::getStorage() [T = std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer> >]\00", align 1
+@_ZNSs4_Rep20_S_empty_rep_storageE = external global [0 x i64]
+
+declare void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(%"class.llvm::ErrorOr"* sret, [2 x i64], i64) #1
+
+declare void @_ZN4llvm16NamedRegionTimerC1ENS_9StringRefES1_b(%"struct.llvm::NamedRegionTimer"*, [2 x i64], [2 x i64], i1 zeroext) #1
+
+; Function Attrs: nounwind
+define %"class.llvm::Module"* @_ZN4llvm11ParseIRFileERKSsRNS_12SMDiagnosticERNS_11LLVMContextE(%"class.std::basic_string"* nocapture readonly dereferenceable(8) %Filename, %"class.llvm::SMDiagnostic"* dereferenceable(200) %Err, %"class.llvm::LLVMContext"* dereferenceable(8) %Context) #0 {
+entry:
+; CHECK: .globl _ZN4llvm11ParseIRFileERKSsRNS_12SMDiagnosticERNS_11LLVMContextE
+; CHECK: bctrl
+; CHECK: ld 2, 24(1)
+; CHECK: addis [[REG:[0-9]+]], 2, .L.str@toc@ha
+; CHECK: addi {{[0-9]+}}, [[REG]], .L.str@toc@l
+; CHECK: bl _ZNSs6insertEmPKcm
+ %.atomicdst.i.i.i.i.i46 = alloca i32, align 4
+ %ref.tmp.i.i47 = alloca %"class.std::allocator", align 1
+ %.atomicdst.i.i.i.i.i = alloca i32, align 4
+ %ref.tmp.i.i = alloca %"class.std::allocator", align 1
+ %ref.tmp.i.i2.i = alloca %"class.std::allocator", align 1
+ %ref.tmp.i.i.i = alloca %"class.std::allocator", align 1
+ %FileOrErr = alloca %"class.llvm::ErrorOr", align 8
+ %ref.tmp = alloca %"class.llvm::SMDiagnostic", align 8
+ %ref.tmp5 = alloca %"class.std::basic_string", align 8
+ %_M_p.i.i.i = getelementptr inbounds %"class.std::basic_string"* %Filename, i64 0, i32 0, i32 0
+ %0 = load i8** %_M_p.i.i.i, align 8, !tbaa !1
+ %1 = ptrtoint i8* %0 to i64
+ %arrayidx.i.i.i = getelementptr inbounds i8* %0, i64 -24
+ %_M_length.i.i = bitcast i8* %arrayidx.i.i.i to i64*
+ %2 = load i64* %_M_length.i.i, align 8, !tbaa !7
+ %.fca.0.insert18 = insertvalue [2 x i64] undef, i64 %1, 0
+ %.fca.1.insert21 = insertvalue [2 x i64] %.fca.0.insert18, i64 %2, 1
+ call void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(%"class.llvm::ErrorOr"* sret %FileOrErr, [2 x i64] %.fca.1.insert21, i64 -1) #3
+ %HasError.i24 = getelementptr inbounds %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 1
+ %bf.load.i25 = load i8* %HasError.i24, align 8
+ %3 = and i8 %bf.load.i25, 1
+ %bf.cast.i26 = icmp eq i8 %3, 0
+ br i1 %bf.cast.i26, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit, label %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
+
+_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit: ; preds = %entry
+ %retval.sroa.0.0..sroa_cast.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to i64*
+ %retval.sroa.0.0.copyload.i = load i64* %retval.sroa.0.0..sroa_cast.i, align 8
+ %retval.sroa.3.0..sroa_idx.i = getelementptr inbounds %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 0, i32 0, i32 0, i32 0, i64 8
+ %retval.sroa.3.0..sroa_cast.i = bitcast i8* %retval.sroa.3.0..sroa_idx.i to i64*
+ %retval.sroa.3.0.copyload.i = load i64* %retval.sroa.3.0..sroa_cast.i, align 8
+ %phitmp = trunc i64 %retval.sroa.0.0.copyload.i to i32
+ %cmp.i = icmp eq i32 %phitmp, 0
+ br i1 %cmp.i, label %cond.false.i.i, label %if.then
+
+if.then: ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
+ %.c = inttoptr i64 %retval.sroa.3.0.copyload.i to %"class.std::error_category"*
+ %4 = load i8** %_M_p.i.i.i, align 8, !tbaa !1
+ %arrayidx.i.i.i30 = getelementptr inbounds i8* %4, i64 -24
+ %_M_length.i.i31 = bitcast i8* %arrayidx.i.i.i30 to i64*
+ %5 = load i64* %_M_length.i.i31, align 8, !tbaa !7
+ %6 = inttoptr i64 %retval.sroa.3.0.copyload.i to void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)***
+ %vtable.i = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*** %6, align 8, !tbaa !11
+ %vfn.i = getelementptr inbounds void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vtable.i, i64 3
+ %7 = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vfn.i, align 8
+ call void %7(%"class.std::basic_string"* sret %ref.tmp5, %"class.std::error_category"* %.c, i32 signext %phitmp) #3
+ %call2.i.i = call dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"* %ref.tmp5, i64 0, i8* getelementptr inbounds ([28 x i8]* @.str, i64 0, i64 0), i64 27) #3
+ %_M_p2.i.i.i.i = getelementptr inbounds %"class.std::basic_string"* %call2.i.i, i64 0, i32 0, i32 0
+ %8 = load i8** %_M_p2.i.i.i.i, align 8, !tbaa !13
+ store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p2.i.i.i.i, align 8, !tbaa !1
+ %arrayidx.i.i.i36 = getelementptr inbounds i8* %8, i64 -24
+ %_M_length.i.i37 = bitcast i8* %arrayidx.i.i.i36 to i64*
+ %9 = load i64* %_M_length.i.i37, align 8, !tbaa !7
+ %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 2
+ %10 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
+ %11 = bitcast %"class.llvm::SMDiagnostic"* %ref.tmp to i8*
+ call void @llvm.memset.p0i8.i64(i8* %11, i8 0, i64 16, i32 8, i1 false) #3
+ call void @llvm.lifetime.start(i64 1, i8* %10) #3
+ %tobool.i.i4.i = icmp eq i8* %4, null
+ br i1 %tobool.i.i4.i, label %if.then.i.i6.i, label %if.end.i.i8.i
+
+if.then.i.i6.i: ; preds = %if.then
+ %_M_p.i.i.i.i.i.i5.i = getelementptr inbounds %"class.std::basic_string"* %Filename.i, i64 0, i32 0, i32 0
+ store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i5.i, align 8, !tbaa !13
+ br label %_ZNK4llvm9StringRefcvSsEv.exit9.i
+
+if.end.i.i8.i: ; preds = %if.then
+ call void @_ZNSsC1EPKcmRKSaIcE(%"class.std::basic_string"* %Filename.i, i8* %4, i64 %5, %"class.std::allocator"* dereferenceable(1) %ref.tmp.i.i2.i) #3
+ br label %_ZNK4llvm9StringRefcvSsEv.exit9.i
+
+_ZNK4llvm9StringRefcvSsEv.exit9.i: ; preds = %if.end.i.i8.i, %if.then.i.i6.i
+ call void @llvm.lifetime.end(i64 1, i8* %10) #3
+ %LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 3
+ store i32 -1, i32* %LineNo.i, align 8, !tbaa !14
+ %ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 4
+ store i32 -1, i32* %ColumnNo.i, align 4, !tbaa !21
+ %Kind.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 5
+ store i32 0, i32* %Kind.i, align 8, !tbaa !22
+ %Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 6
+ %12 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i.i, i64 0, i32 0
+ call void @llvm.lifetime.start(i64 1, i8* %12) #3
+ %tobool.i.i.i = icmp eq i8* %8, null
+ br i1 %tobool.i.i.i, label %if.then.i.i.i, label %if.end.i.i.i
+
+if.then.i.i.i: ; preds = %_ZNK4llvm9StringRefcvSsEv.exit9.i
+ %_M_p.i.i.i.i.i.i.i = getelementptr inbounds %"class.std::basic_string"* %Message.i, i64 0, i32 0, i32 0
+ store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i.i, align 8, !tbaa !13
+ br label %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
+
+if.end.i.i.i: ; preds = %_ZNK4llvm9StringRefcvSsEv.exit9.i
+ call void @_ZNSsC1EPKcmRKSaIcE(%"class.std::basic_string"* %Message.i, i8* %8, i64 %9, %"class.std::allocator"* dereferenceable(1) %ref.tmp.i.i.i) #3
+ br label %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
+
+_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds = %if.then.i.i.i, %if.end.i.i.i
+ call void @llvm.lifetime.end(i64 1, i8* %12) #3
+ %_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7, i32 0, i32 0
+ store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i, align 8, !tbaa !13
+ %Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8
+ %13 = bitcast %"class.std::vector.79"* %Ranges.i to i8*
+ call void @llvm.memset.p0i8.i64(i8* %13, i8 0, i64 24, i32 8, i1 false) #3
+ %14 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 0
+ %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0
+ store i8* %14, i8** %BeginX.i.i.i.i.i.i, align 8, !tbaa !23
+ %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 1
+ store i8* %14, i8** %EndX.i.i.i.i.i.i, align 8, !tbaa !25
+ %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 2
+ %add.ptr.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 96
+ store i8* %add.ptr.i.i.i.i.i.i, i8** %CapacityX.i.i.i.i.i.i, align 8, !tbaa !26
+ %15 = bitcast %"class.llvm::SMDiagnostic"* %Err to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %15, i8* %11, i64 16, i32 8, i1 false) #3
+ %Filename.i38 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 2
+ call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Filename.i38, %"class.std::basic_string"* dereferenceable(8) %Filename.i) #3
+ %LineNo.i39 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 3
+ %16 = bitcast i32* %LineNo.i39 to i8*
+ %17 = bitcast i32* %LineNo.i to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %16, i8* %17, i64 12, i32 4, i1 false) #3
+ %Message.i40 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 6
+ call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Message.i40, %"class.std::basic_string"* dereferenceable(8) %Message.i) #3
+ %LineContents.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 7
+ %LineContents7.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7
+ call void @_ZNSs4swapERSs(%"class.std::basic_string"* %LineContents.i, %"class.std::basic_string"* dereferenceable(8) %LineContents7.i) #3
+ %Ranges.i41 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8
+ %_M_start.i7.i.i.i = getelementptr inbounds %"class.std::vector.79"* %Ranges.i41, i64 0, i32 0, i32 0, i32 0
+ %18 = load %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
+ %_M_finish.i9.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 1
+ %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 2
+ %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0
+ %19 = bitcast %"class.std::vector.79"* %Ranges.i41 to i8*
+ call void @llvm.memset.p0i8.i64(i8* %19, i8 0, i64 16, i32 8, i1 false) #3
+ %20 = load %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
+ store %"struct.std::pair"* %20, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
+ store %"struct.std::pair"* null, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
+ %_M_finish3.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 1
+ %21 = load %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
+ store %"struct.std::pair"* %21, %"struct.std::pair"** %_M_finish.i9.i.i.i, align 8, !tbaa !27
+ store %"struct.std::pair"* null, %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
+ %_M_end_of_storage4.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 2
+ %22 = load %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
+ store %"struct.std::pair"* %22, %"struct.std::pair"** %_M_end_of_storage.i11.i.i.i, align 8, !tbaa !27
+ store %"struct.std::pair"* null, %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
+ %tobool.i.i.i.i.i.i = icmp eq %"struct.std::pair"* %18, null
+ br i1 %tobool.i.i.i.i.i.i, label %_ZN4llvm12SMDiagnosticaSEOS0_.exit, label %if.then.i.i.i.i.i.i
+
+if.then.i.i.i.i.i.i: ; preds = %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
+ %23 = bitcast %"struct.std::pair"* %18 to i8*
+ call void @_ZdlPv(i8* %23) #3
+ br label %_ZN4llvm12SMDiagnosticaSEOS0_.exit
+
+_ZN4llvm12SMDiagnosticaSEOS0_.exit: ; preds = %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit, %if.then.i.i.i.i.i.i
+ %24 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 9, i32 0
+ %25 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0
+ %call2.i.i42 = call dereferenceable(48) %"class.llvm::SmallVectorImpl.85"* @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(%"class.llvm::SmallVectorImpl.85"* %24, %"class.llvm::SmallVectorImpl.85"* dereferenceable(48) %25) #3
+ call void @_ZN4llvm12SMDiagnosticD2Ev(%"class.llvm::SMDiagnostic"* %ref.tmp) #3
+ %26 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i, i64 0, i32 0
+ call void @llvm.lifetime.start(i64 1, i8* %26) #3
+ %27 = bitcast i8* %arrayidx.i.i.i36 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
+ %cmp.i.i.i = icmp eq i8* %arrayidx.i.i.i36, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
+ br i1 %cmp.i.i.i, label %_ZNSsD1Ev.exit, label %if.then.i.i.i45, !prof !28
+
+if.then.i.i.i45: ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit
+ %_M_refcount.i.i.i = getelementptr inbounds i8* %8, i64 -8
+ %28 = bitcast i8* %_M_refcount.i.i.i to i32*
+ br i1 icmp ne (i8* bitcast (i32 (i32*, void (i8*)*)* @__pthread_key_create to i8*), i8* null), label %if.then.i.i.i.i, label %if.else.i.i.i.i
+
+if.then.i.i.i.i: ; preds = %if.then.i.i.i45
+ %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast i32* %.atomicdst.i.i.i.i.i to i8*
+ call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ %29 = atomicrmw volatile add i32* %28, i32 -1 acq_rel
+ store i32 %29, i32* %.atomicdst.i.i.i.i.i, align 4
+ %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32* %.atomicdst.i.i.i.i.i, align 4
+ call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
+
+if.else.i.i.i.i: ; preds = %if.then.i.i.i45
+ %30 = load i32* %28, align 4, !tbaa !29
+ %add.i.i.i.i.i = add nsw i32 %30, -1
+ store i32 %add.i.i.i.i.i, i32* %28, align 4, !tbaa !29
+ br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
+
+_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i: ; preds = %if.else.i.i.i.i, %if.then.i.i.i.i
+ %retval.0.i.i.i.i = phi i32 [ %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i, %if.then.i.i.i.i ], [ %30, %if.else.i.i.i.i ]
+ %cmp3.i.i.i = icmp slt i32 %retval.0.i.i.i.i, 1
+ br i1 %cmp3.i.i.i, label %if.then4.i.i.i, label %_ZNSsD1Ev.exit
+
+if.then4.i.i.i: ; preds = %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
+ call void @_ZNSs4_Rep10_M_destroyERKSaIcE(%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"* %27, %"class.std::allocator"* dereferenceable(1) %ref.tmp.i.i) #3
+ br label %_ZNSsD1Ev.exit
+
+_ZNSsD1Ev.exit: ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i, %if.then4.i.i.i
+ call void @llvm.lifetime.end(i64 1, i8* %26) #3
+ %31 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
+ call void @llvm.lifetime.start(i64 1, i8* %31) #3
+ %_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
+ %32 = load i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
+ %arrayidx.i.i.i49 = getelementptr inbounds i8* %32, i64 -24
+ %33 = bitcast i8* %arrayidx.i.i.i49 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
+ %cmp.i.i.i50 = icmp eq i8* %arrayidx.i.i.i49, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
+ br i1 %cmp.i.i.i50, label %_ZNSsD1Ev.exit62, label %if.then.i.i.i52, !prof !28
+
+if.then.i.i.i52: ; preds = %_ZNSsD1Ev.exit
+ %_M_refcount.i.i.i51 = getelementptr inbounds i8* %32, i64 -8
+ %34 = bitcast i8* %_M_refcount.i.i.i51 to i32*
+ br i1 icmp ne (i8* bitcast (i32 (i32*, void (i8*)*)* @__pthread_key_create to i8*), i8* null), label %if.then.i.i.i.i55, label %if.else.i.i.i.i57
+
+if.then.i.i.i.i55: ; preds = %if.then.i.i.i52
+ %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast i32* %.atomicdst.i.i.i.i.i46 to i8*
+ call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ %35 = atomicrmw volatile add i32* %34, i32 -1 acq_rel
+ store i32 %35, i32* %.atomicdst.i.i.i.i.i46, align 4
+ %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32* %.atomicdst.i.i.i.i.i46, align 4
+ call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
+
+if.else.i.i.i.i57: ; preds = %if.then.i.i.i52
+ %36 = load i32* %34, align 4, !tbaa !29
+ %add.i.i.i.i.i56 = add nsw i32 %36, -1
+ store i32 %add.i.i.i.i.i56, i32* %34, align 4, !tbaa !29
+ br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
+
+_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60: ; preds = %if.else.i.i.i.i57, %if.then.i.i.i.i55
+ %retval.0.i.i.i.i58 = phi i32 [ %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54, %if.then.i.i.i.i55 ], [ %36, %if.else.i.i.i.i57 ]
+ %cmp3.i.i.i59 = icmp slt i32 %retval.0.i.i.i.i58, 1
+ br i1 %cmp3.i.i.i59, label %if.then4.i.i.i61, label %_ZNSsD1Ev.exit62
+
+if.then4.i.i.i61: ; preds = %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
+ call void @_ZNSs4_Rep10_M_destroyERKSaIcE(%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"* %33, %"class.std::allocator"* dereferenceable(1) %ref.tmp.i.i47) #3
+ br label %_ZNSsD1Ev.exit62
+
+_ZNSsD1Ev.exit62: ; preds = %_ZNSsD1Ev.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60, %if.then4.i.i.i61
+ call void @llvm.lifetime.end(i64 1, i8* %31) #3
+ br label %cleanup
+
+cond.false.i.i: ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
+ call void @__assert_fail(i8* getelementptr inbounds ([54 x i8]* @.str1, i64 0, i64 0), i8* getelementptr inbounds ([61 x i8]* @.str2, i64 0, i64 0), i32 zeroext 242, i8* getelementptr inbounds ([206 x i8]* @__PRETTY_FUNCTION__._ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv, i64 0, i64 0)) #7
+ unreachable
+
+_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit: ; preds = %entry
+ %_M_head_impl.i.i.i.i.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to %"class.llvm::MemoryBuffer"**
+ %37 = load %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i, align 8, !tbaa !27
+ %call9 = call %"class.llvm::Module"* @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiagnosticERNS_11LLVMContextE(%"class.llvm::MemoryBuffer"* %37, %"class.llvm::SMDiagnostic"* dereferenceable(200) %Err, %"class.llvm::LLVMContext"* dereferenceable(8) %Context)
+ br label %cleanup
+
+cleanup: ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit, %_ZNSsD1Ev.exit62
+ %retval.0 = phi %"class.llvm::Module"* [ null, %_ZNSsD1Ev.exit62 ], [ %call9, %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE3getEv.exit ]
+ %bf.load.i = load i8* %HasError.i24, align 8
+ %38 = and i8 %bf.load.i, 1
+ %bf.cast.i = icmp eq i8 %38, 0
+ br i1 %bf.cast.i, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i, label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.exit
+
+_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i: ; preds = %cleanup
+ %_M_head_impl.i.i.i.i.i.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to %"class.llvm::MemoryBuffer"**
+ %39 = load %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
+ %cmp.i.i = icmp eq %"class.llvm::MemoryBuffer"* %39, null
+ br i1 %cmp.i.i, label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i, label %_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i
+
+_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i: ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i
+ %40 = bitcast %"class.llvm::MemoryBuffer"* %39 to void (%"class.llvm::MemoryBuffer"*)***
+ %vtable.i.i.i = load void (%"class.llvm::MemoryBuffer"*)*** %40, align 8, !tbaa !11
+ %vfn.i.i.i = getelementptr inbounds void (%"class.llvm::MemoryBuffer"*)** %vtable.i.i.i, i64 1
+ %41 = load void (%"class.llvm::MemoryBuffer"*)** %vfn.i.i.i, align 8
+ call void %41(%"class.llvm::MemoryBuffer"* %39) #3
+ br label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i
+
+_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i: ; preds = %_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i, %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i
+ store %"class.llvm::MemoryBuffer"* null, %"class.llvm::MemoryBuffer"** %_M_head_impl.i.i.i.i.i.i, align 8, !tbaa !27
+ br label %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.exit
+
+_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.exit: ; preds = %cleanup, %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i
+ ret %"class.llvm::Module"* %retval.0
+}
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture) #3
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #3
+
+; Function Attrs: noreturn nounwind
+declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*) #4
+
+declare dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"*, i64, i8*, i64) #1
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #3
+
+; Function Attrs: nounwind
+declare void @_ZNSs4_Rep10_M_destroyERKSaIcE(%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*, %"class.std::allocator"* dereferenceable(1)) #0
+
+; Function Attrs: nounwind
+declare extern_weak signext i32 @__pthread_key_create(i32*, void (i8*)*) #0
+
+; Function Attrs: nobuiltin nounwind
+declare void @_ZdlPv(i8*) #6
+
+declare void @_ZNSsC1EPKcmRKSaIcE(%"class.std::basic_string"*, i8*, i64, %"class.std::allocator"* dereferenceable(1)) #1
+
+declare hidden void @_ZN4llvm12SMDiagnosticD2Ev(%"class.llvm::SMDiagnostic"* readonly %this) unnamed_addr #2 align 2
+
+declare dereferenceable(48) %"class.llvm::SmallVectorImpl.85"* @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(%"class.llvm::SmallVectorImpl.85"* %this, %"class.llvm::SmallVectorImpl.85"* dereferenceable(48) %RHS) #0 align 2
+
+declare %"class.llvm::Module"* @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiagnosticERNS_11LLVMContextE(%"class.llvm::MemoryBuffer"* %Buffer, %"class.llvm::SMDiagnostic"* dereferenceable(200) %Err, %"class.llvm::LLVMContext"* dereferenceable(8) %Context) #0
+
+declare void @_ZNSs4swapERSs(%"class.std::basic_string"*, %"class.std::basic_string"* dereferenceable(8)) #1
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { inlinehint nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind }
+attributes #4 = { noreturn nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #5 = { nounwind readonly "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #6 = { nobuiltin nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #7 = { noreturn nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.6.0 (trunk 215115) (llvm/trunk 215117)"}
+!1 = metadata !{metadata !2, metadata !4, i64 0}
+!2 = metadata !{metadata !"_ZTSSs", metadata !3, i64 0}
+!3 = metadata !{metadata !"_ZTSNSs12_Alloc_hiderE", metadata !4, i64 0}
+!4 = metadata !{metadata !"any pointer", metadata !5, i64 0}
+!5 = metadata !{metadata !"omnipotent char", metadata !6, i64 0}
+!6 = metadata !{metadata !"Simple C/C++ TBAA"}
+!7 = metadata !{metadata !8, metadata !9, i64 0}
+!8 = metadata !{metadata !"_ZTSNSs9_Rep_baseE", metadata !9, i64 0, metadata !9, i64 8, metadata !10, i64 16}
+!9 = metadata !{metadata !"long", metadata !5, i64 0}
+!10 = metadata !{metadata !"int", metadata !5, i64 0}
+!11 = metadata !{metadata !12, metadata !12, i64 0}
+!12 = metadata !{metadata !"vtable pointer", metadata !6, i64 0}
+!13 = metadata !{metadata !3, metadata !4, i64 0}
+!14 = metadata !{metadata !15, metadata !10, i64 24}
+!15 = metadata !{metadata !"_ZTSN4llvm12SMDiagnosticE", metadata !4, i64 0, metadata !16, i64 8, metadata !2, i64 16, metadata !10, i64 24, metadata !10, i64 28, metadata !17, i64 32, metadata !2, i64 40, metadata !2, i64 48, metadata !18, i64 56, metadata !19, i64 80}
+!16 = metadata !{metadata !"_ZTSN4llvm5SMLocE", metadata !4, i64 0}
+!17 = metadata !{metadata !"_ZTSN4llvm9SourceMgr8DiagKindE", metadata !5, i64 0}
+!18 = metadata !{metadata !"_ZTSSt6vectorISt4pairIjjESaIS1_EE"}
+!19 = metadata !{metadata !"_ZTSN4llvm11SmallVectorINS_7SMFixItELj4EEE", metadata !20, i64 48}
+!20 = metadata !{metadata !"_ZTSN4llvm18SmallVectorStorageINS_7SMFixItELj4EEE", metadata !5, i64 0}
+!21 = metadata !{metadata !15, metadata !10, i64 28}
+!22 = metadata !{metadata !15, metadata !17, i64 32}
+!23 = metadata !{metadata !24, metadata !4, i64 0}
+!24 = metadata !{metadata !"_ZTSN4llvm15SmallVectorBaseE", metadata !4, i64 0, metadata !4, i64 8, metadata !4, i64 16}
+!25 = metadata !{metadata !24, metadata !4, i64 8}
+!26 = metadata !{metadata !24, metadata !4, i64 16}
+!27 = metadata !{metadata !4, metadata !4, i64 0}
+!28 = metadata !{metadata !"branch_weights", i32 64, i32 4}
+!29 = metadata !{metadata !10, metadata !10, i64 0}
+!30 = metadata !{metadata !31, metadata !4, i64 8}
+!31 = metadata !{metadata !"_ZTSN4llvm12MemoryBufferE", metadata !4, i64 8, metadata !4, i64 16}
+!32 = metadata !{metadata !31, metadata !4, i64 16}
+!33 = metadata !{metadata !5, metadata !5, i64 0}
+!34 = metadata !{metadata !35, metadata !4, i64 0}
+!35 = metadata !{metadata !"_ZTSSt12_Vector_baseISt4pairIjjESaIS1_EE", metadata !36, i64 0}
+!36 = metadata !{metadata !"_ZTSNSt12_Vector_baseISt4pairIjjESaIS1_EE12_Vector_implE", metadata !4, i64 0, metadata !4, i64 8, metadata !4, i64 16}
+!37 = metadata !{metadata !38, metadata !38, i64 0}
+!38 = metadata !{metadata !"bool", metadata !5, i64 0}
+!39 = metadata !{i8 0, i8 2}
+!40 = metadata !{metadata !41, metadata !4, i64 0}
+!41 = metadata !{metadata !"_ZTSN4llvm10TimeRegionE", metadata !4, i64 0}
+!42 = metadata !{metadata !43, metadata !44, i64 32}
+!43 = metadata !{metadata !"_ZTSN4llvm11raw_ostreamE", metadata !4, i64 8, metadata !4, i64 16, metadata !4, i64 24, metadata !44, i64 32}
+!44 = metadata !{metadata !"_ZTSN4llvm11raw_ostream10BufferKindE", metadata !5, i64 0}
+!45 = metadata !{metadata !43, metadata !4, i64 24}
+!46 = metadata !{metadata !43, metadata !4, i64 8}
+!47 = metadata !{i64 0, i64 8, metadata !27, i64 8, i64 8, metadata !27}
diff --git a/test/CodeGen/PowerPC/unaligned.ll b/test/CodeGen/PowerPC/unaligned.ll
index d05080338f33..d469c62f2f05 100644
--- a/test/CodeGen/PowerPC/unaligned.ll
+++ b/test/CodeGen/PowerPC/unaligned.ll
@@ -65,9 +65,9 @@ entry:
; These loads and stores are legalized into aligned loads and stores
; using aligned stack slots.
; CHECK: @foo6
-; CHECK: ld
-; CHECK: ld
-; CHECK: std
-; CHECK: std
+; CHECK-DAG: ld
+; CHECK-DAG: ld
+; CHECK-DAG: stdx
+; CHECK: stdx
}
diff --git a/test/CodeGen/PowerPC/unwind-dw2-g.ll b/test/CodeGen/PowerPC/unwind-dw2-g.ll
index 260d03664295..24b52070f4f7 100644
--- a/test/CodeGen/PowerPC/unwind-dw2-g.ll
+++ b/test/CodeGen/PowerPC/unwind-dw2-g.ll
@@ -23,7 +23,7 @@ attributes #0 = { nounwind }
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/unwind-dw2.c] [DW_LANG_C99]
!1 = metadata !{metadata !"/tmp/unwind-dw2.c", metadata !"/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, void ()* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/unwind-dw2.c]
diff --git a/test/CodeGen/PowerPC/varargs-struct-float.ll b/test/CodeGen/PowerPC/varargs-struct-float.ll
index fb1835f580b2..0fd9fc50892e 100644
--- a/test/CodeGen/PowerPC/varargs-struct-float.ll
+++ b/test/CodeGen/PowerPC/varargs-struct-float.ll
@@ -16,8 +16,8 @@ entry:
ret void
}
-; CHECK: stfs {{[0-9]+}}, 60(1)
-; CHECK: ld 4, 56(1)
+; CHECK: stfs {{[0-9]+}}, 116(1)
+; CHECK: lwz 4, 116(1)
; CHECK: bl
declare void @testvaSf1(i32, ...)
diff --git a/test/CodeGen/PowerPC/vec_cmp.ll b/test/CodeGen/PowerPC/vec_cmp.ll
index 83e0e0263061..516b2dd58b99 100644
--- a/test/CodeGen/PowerPC/vec_cmp.ll
+++ b/test/CodeGen/PowerPC/vec_cmp.ll
@@ -1,6 +1,6 @@
; RUN: llc -mcpu=pwr6 -mattr=+altivec < %s | FileCheck %s
-; Check vector comparisons using altivec. For non native types, just basic
+; Check vector comparisons using altivec. For non-native types, just basic
; comparison instruction check is done. For altivec supported type (16i8,
; 8i16, 4i32, and 4f32) all the comparisons operators (==, !=, >, >=, <, <=)
; are checked.
@@ -36,7 +36,7 @@ define <8 x i8> @v8si8_cmp(<8 x i8> %x, <8 x i8> %y) nounwind readnone {
; CHECK: vcmpequh {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
-; Adicional tests for v16i8 since it is a altivec native type
+; Additional tests for v16i8 since it is a altivec native type
define <16 x i8> @v16si8_cmp_eq(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
%cmp = icmp eq <16 x i8> %x, %y
@@ -63,9 +63,8 @@ entry:
ret <16 x i8> %sext
}
; CHECK-LABEL: v16si8_cmp_le:
-; CHECK: vcmpequb [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtsb [[RCMPLE:[0-9]+]], 3, 2
-; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]]
+; CHECK: vcmpgtsb [[RET:[0-9]+]], 2, 3
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <16 x i8> @v16ui8_cmp_le(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -74,9 +73,8 @@ entry:
ret <16 x i8> %sext
}
; CHECK-LABEL: v16ui8_cmp_le:
-; CHECK: vcmpequb [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtub [[RCMPLE:[0-9]+]], 3, 2
-; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]]
+; CHECK: vcmpgtub [[RET:[0-9]+]], 2, 3
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <16 x i8> @v16si8_cmp_lt(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -121,9 +119,8 @@ entry:
ret <16 x i8> %sext
}
; CHECK-LABEL: v16si8_cmp_ge:
-; CHECK: vcmpequb [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtsb [[RCMPGT:[0-9]+]], 2, 3
-; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]]
+; CHECK: vcmpgtsb [[RET:[0-9]+]], 3, 2
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <16 x i8> @v16ui8_cmp_ge(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -132,9 +129,8 @@ entry:
ret <16 x i8> %sext
}
; CHECK-LABEL: v16ui8_cmp_ge:
-; CHECK: vcmpequb [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtub [[RCMPGT:[0-9]+]], 2, 3
-; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]]
+; CHECK: vcmpgtub [[RET:[0-9]+]], 3, 2
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <32 x i8> @v32si8_cmp(<32 x i8> %x, <32 x i8> %y) nounwind readnone {
@@ -165,7 +161,7 @@ define <4 x i16> @v4si16_cmp(<4 x i16> %x, <4 x i16> %y) nounwind readnone {
; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
-; Adicional tests for v8i16 since it is an altivec native type
+; Additional tests for v8i16 since it is an altivec native type
define <8 x i16> @v8si16_cmp_eq(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -193,9 +189,8 @@ entry:
ret <8 x i16> %sext
}
; CHECK-LABEL: v8si16_cmp_le:
-; CHECK: vcmpequh [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtsh [[RCMPLE:[0-9]+]], 3, 2
-; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]]
+; CHECK: vcmpgtsh [[RET:[0-9]+]], 2, 3
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <8 x i16> @v8ui16_cmp_le(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -204,9 +199,8 @@ entry:
ret <8 x i16> %sext
}
; CHECK-LABEL: v8ui16_cmp_le:
-; CHECK: vcmpequh [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtuh [[RCMPLE:[0-9]+]], 3, 2
-; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]]
+; CHECK: vcmpgtuh [[RET:[0-9]+]], 2, 3
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <8 x i16> @v8si16_cmp_lt(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -251,9 +245,8 @@ entry:
ret <8 x i16> %sext
}
; CHECK-LABEL: v8si16_cmp_ge:
-; CHECK: vcmpequh [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtsh [[RCMPGT:[0-9]+]], 2, 3
-; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]]
+; CHECK: vcmpgtsh [[RET:[0-9]+]], 3, 2
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <8 x i16> @v8ui16_cmp_ge(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -262,9 +255,8 @@ entry:
ret <8 x i16> %sext
}
; CHECK-LABEL: v8ui16_cmp_ge:
-; CHECK: vcmpequh [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtuh [[RCMPGT:[0-9]+]], 2, 3
-; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]]
+; CHECK: vcmpgtuh [[RET:[0-9]+]], 3, 2
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <16 x i16> @v16si16_cmp(<16 x i16> %x, <16 x i16> %y) nounwind readnone {
@@ -298,7 +290,7 @@ define <2 x i32> @v2si32_cmp(<2 x i32> %x, <2 x i32> %y) nounwind readnone {
; CHECK: vcmpequw {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
-; Adicional tests for v4si32 since it is an altivec native type
+; Additional tests for v4si32 since it is an altivec native type
define <4 x i32> @v4si32_cmp_eq(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -326,9 +318,8 @@ entry:
ret <4 x i32> %sext
}
; CHECK-LABEL: v4si32_cmp_le:
-; CHECK: vcmpequw [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtsw [[RCMPLE:[0-9]+]], 3, 2
-; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]]
+; CHECK: vcmpgtsw [[RET:[0-9]+]], 2, 3
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <4 x i32> @v4ui32_cmp_le(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -337,9 +328,8 @@ entry:
ret <4 x i32> %sext
}
; CHECK-LABEL: v4ui32_cmp_le:
-; CHECK: vcmpequw [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtuw [[RCMPLE:[0-9]+]], 3, 2
-; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]]
+; CHECK: vcmpgtuw [[RET:[0-9]+]], 2, 3
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <4 x i32> @v4si32_cmp_lt(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -384,9 +374,8 @@ entry:
ret <4 x i32> %sext
}
; CHECK-LABEL: v4si32_cmp_ge:
-; CHECK: vcmpequw [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtsw [[RCMPGT:[0-9]+]], 2, 3
-; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]]
+; CHECK: vcmpgtsw [[RET:[0-9]+]], 3, 2
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <4 x i32> @v4ui32_cmp_ge(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -395,9 +384,8 @@ entry:
ret <4 x i32> %sext
}
; CHECK-LABEL: v4ui32_cmp_ge:
-; CHECK: vcmpequw [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtuw [[RCMPGT:[0-9]+]], 2, 3
-; CHECK-NEXT: vor 2, [[RCMPGT]], [[RCMPEQ]]
+; CHECK: vcmpgtuw [[RET:[0-9]+]], 3, 2
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
define <8 x i32> @v8si32_cmp(<8 x i32> %x, <8 x i32> %y) nounwind readnone {
@@ -449,7 +437,7 @@ entry:
; CHECK: vcmpeqfp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
-; Adicional tests for v4f32 since it is a altivec native type
+; Additional tests for v4f32 since it is a altivec native type
define <4 x float> @v4f32_cmp_eq(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -480,9 +468,7 @@ entry:
ret <4 x float> %0
}
; CHECK-LABEL: v4f32_cmp_le:
-; CHECK: vcmpeqfp [[RCMPEQ:[0-9]+]], 2, 3
-; CHECK-NEXT: vcmpgtfp [[RCMPLE:[0-9]+]], 3, 2
-; CHECK-NEXT: vor 2, [[RCMPLE]], [[RCMPEQ]]
+; CHECK: vcmpgefp 2, 3, 2
define <4 x float> @v4f32_cmp_lt(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -514,6 +500,50 @@ entry:
; CHECK-LABEL: v4f32_cmp_gt:
; CHECK: vcmpgtfp 2, 2, 3
+define <4 x float> @v4f32_cmp_ule(<4 x float> %x, <4 x float> %y) nounwind readnone {
+entry:
+ %cmp = fcmp ule <4 x float> %x, %y
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %0 = bitcast <4 x i32> %sext to <4 x float>
+ ret <4 x float> %0
+}
+; CHECK-LABEL: v4f32_cmp_ule:
+; CHECK: vcmpgtfp [[RET:[0-9]+]], 2, 3
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+
+define <4 x float> @v4f32_cmp_ult(<4 x float> %x, <4 x float> %y) nounwind readnone {
+entry:
+ %cmp = fcmp ult <4 x float> %x, %y
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %0 = bitcast <4 x i32> %sext to <4 x float>
+ ret <4 x float> %0
+}
+; CHECK-LABEL: v4f32_cmp_ult:
+; CHECK: vcmpgefp [[RET:[0-9]+]], 2, 3
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+
+define <4 x float> @v4f32_cmp_uge(<4 x float> %x, <4 x float> %y) nounwind readnone {
+entry:
+ %cmp = fcmp uge <4 x float> %x, %y
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %0 = bitcast <4 x i32> %sext to <4 x float>
+ ret <4 x float> %0
+}
+; CHECK-LABEL: v4f32_cmp_uge:
+; CHECK: vcmpgtfp [[RET:[0-9]+]], 3, 2
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+
+define <4 x float> @v4f32_cmp_ugt(<4 x float> %x, <4 x float> %y) nounwind readnone {
+entry:
+ %cmp = fcmp ugt <4 x float> %x, %y
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %0 = bitcast <4 x i32> %sext to <4 x float>
+ ret <4 x float> %0
+}
+; CHECK-LABEL: v4f32_cmp_ugt:
+; CHECK: vcmpgefp [[RET:[0-9]+]], 3, 2
+; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+
define <8 x float> @v8f32_cmp(<8 x float> %x, <8 x float> %y) nounwind readnone {
entry:
diff --git a/test/CodeGen/PowerPC/vec_misaligned.ll b/test/CodeGen/PowerPC/vec_misaligned.ll
index d7ed64a5b1cf..304a84d49a9d 100644
--- a/test/CodeGen/PowerPC/vec_misaligned.ll
+++ b/test/CodeGen/PowerPC/vec_misaligned.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
+; RUN: llc < %s -march=ppc32 -mcpu=g5 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s -check-prefix=CHECK-LE
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc-apple-darwin8"
@@ -8,6 +10,8 @@ target triple = "powerpc-apple-darwin8"
define void @foo(i32 %x, ...) {
entry:
+; CHECK: foo:
+; CHECK-LE: foo:
%x_addr = alloca i32 ; <i32*> [#uses=1]
%ap = alloca i8* ; <i8**> [#uses=3]
%ap.0 = alloca i8* ; <i8**> [#uses=3]
@@ -27,6 +31,10 @@ entry:
%tmp8 = getelementptr %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
%tmp9 = getelementptr %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
%tmp10 = load <16 x i8>* %tmp9, align 4 ; <<16 x i8>> [#uses=1]
+; CHECK: lvsl
+; CHECK: vperm
+; CHECK-LE: lvsr
+; CHECK-LE: vperm
store <16 x i8> %tmp10, <16 x i8>* %tmp8, align 4
br label %return
diff --git a/test/CodeGen/PowerPC/vec_mul.ll b/test/CodeGen/PowerPC/vec_mul.ll
index c376751d8060..8a448156c98e 100644
--- a/test/CodeGen/PowerPC/vec_mul.ll
+++ b/test/CodeGen/PowerPC/vec_mul.ll
@@ -1,4 +1,6 @@
; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -march=ppc32 -mattr=+altivec | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec | FileCheck %s -check-prefix=CHECK-LE
define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
%tmp = load <4 x i32>* %X ; <<4 x i32>> [#uses=1]
@@ -9,6 +11,9 @@ define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
; CHECK-LABEL: test_v4i32:
; CHECK: vmsumuhm
; CHECK-NOT: mullw
+; CHECK-LE-LABEL: test_v4i32:
+; CHECK-LE: vmsumuhm
+; CHECK-LE-NOT: mullw
define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
%tmp = load <8 x i16>* %X ; <<8 x i16>> [#uses=1]
@@ -19,6 +24,9 @@ define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
; CHECK-LABEL: test_v8i16:
; CHECK: vmladduhm
; CHECK-NOT: mullw
+; CHECK-LE-LABEL: test_v8i16:
+; CHECK-LE: vmladduhm
+; CHECK-LE-NOT: mullw
define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
%tmp = load <16 x i8>* %X ; <<16 x i8>> [#uses=1]
@@ -30,6 +38,11 @@ define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
; CHECK: vmuloub
; CHECK: vmuleub
; CHECK-NOT: mullw
+; CHECK-LE-LABEL: test_v16i8:
+; CHECK-LE: vmuloub [[REG1:[0-9]+]]
+; CHECK-LE: vmuleub [[REG2:[0-9]+]]
+; CHECK-LE: vperm {{[0-9]+}}, [[REG2]], [[REG1]]
+; CHECK-LE-NOT: mullw
define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
%tmp = load <4 x float>* %X
@@ -44,3 +57,7 @@ define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
; CHECK: vspltisw [[ZNEG:[0-9]+]], -1
; CHECK: vslw {{[0-9]+}}, [[ZNEG]], [[ZNEG]]
; CHECK: vmaddfp
+; CHECK-LE-LABEL: test_float:
+; CHECK-LE: vspltisw [[ZNEG:[0-9]+]], -1
+; CHECK-LE: vslw {{[0-9]+}}, [[ZNEG]], [[ZNEG]]
+; CHECK-LE: vmaddfp
diff --git a/test/CodeGen/PowerPC/vec_shuffle_le.ll b/test/CodeGen/PowerPC/vec_shuffle_le.ll
new file mode 100644
index 000000000000..a4b2119f6ebc
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_shuffle_le.ll
@@ -0,0 +1,209 @@
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s
+
+define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VPKUHUM_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vpkuhum [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VPKUHUM_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VPKUHUM_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+; CHECK: vpkuhum
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VPKUWUM_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vpkuwum [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VPKUWUM_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VPKUWUM_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
+; CHECK: vpkuwum
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGLB_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vmrglb [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLB_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGLB_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
+; CHECK: vmrglb
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGHB_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vmrghb [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHB_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGHB_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
+; CHECK: vmrghb
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGLH_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vmrglh [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLH_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGLH_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
+; CHECK: vmrglh
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGHH_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vmrghh [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHH_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGHH_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
+; CHECK: vmrghh
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGLW_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vmrglw [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGLW_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGLW_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+; CHECK: vmrglw
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VMRGHW_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vmrghw [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VMRGHW_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VMRGHW_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
+; CHECK: vmrghw
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
+define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
+entry:
+; CHECK: VSLDOI_xy:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
+; CHECK: lvx [[REG1:[0-9]+]]
+; CHECK: lvx [[REG2:[0-9]+]]
+; CHECK: vsldoi [[REG3:[0-9]+]], [[REG2]], [[REG1]], 4
+ store <16 x i8> %tmp3, <16 x i8>* %A
+ ret void
+}
+
+define void @VSLDOI_xx(<16 x i8>* %A) {
+entry:
+; CHECK: VSLDOI_xx:
+ %tmp = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+; CHECK: vsldoi
+ store <16 x i8> %tmp2, <16 x i8>* %A
+ ret void
+}
+
diff --git a/test/CodeGen/PowerPC/vec_urem_const.ll b/test/CodeGen/PowerPC/vec_urem_const.ll
new file mode 100644
index 000000000000..814a826aae8a
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_urem_const.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mcpu=pwr6 -mattr=+altivec < %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+; Common code used to replace the urem by a mulhu, and compilation would
+; then crash since mulhu isn't supported on vector types.
+
+define <4 x i32> @test(<4 x i32> %x) {
+entry:
+ %0 = urem <4 x i32> %x, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ ret <4 x i32> %0
+}
diff --git a/test/CodeGen/PowerPC/vperm-instcombine.ll b/test/CodeGen/PowerPC/vperm-instcombine.ll
new file mode 100644
index 000000000000..d9084c8bb595
--- /dev/null
+++ b/test/CodeGen/PowerPC/vperm-instcombine.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+define <16 x i8> @foo() nounwind ssp {
+; CHECK: @foo
+;; Arguments are {0,1,...,15},{16,17,...,31},{30,28,26,...,0}
+ %1 = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> <i32 50462976, i32 117835012, i32 185207048, i32 252579084>, <4 x i32> <i32 319951120, i32 387323156, i32 454695192, i32 522067228>, <16 x i8> <i8 30, i8 28, i8 26, i8 24, i8 22, i8 20, i8 18, i8 16, i8 14, i8 12, i8 10, i8 8, i8 6, i8 4, i8 2, i8 0>)
+ %2 = bitcast <4 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+;; Revised arguments are {16,17,...31},{0,1,...,15},{1,3,5,...,31}
+;; optimized into the following:
+; CHECK: ret <16 x i8> <i8 17, i8 19, i8 21, i8 23, i8 25, i8 27, i8 29, i8 31, i8 1, i8 3, i8 5, i8 7, i8 9, i8 11, i8 13, i8 15>
+}
+
+declare <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32>, <4 x i32>, <16 x i8>)
diff --git a/test/CodeGen/PowerPC/vperm-lowering.ll b/test/CodeGen/PowerPC/vperm-lowering.ll
new file mode 100644
index 000000000000..d55d26c959b6
--- /dev/null
+++ b/test/CodeGen/PowerPC/vperm-lowering.ll
@@ -0,0 +1,66 @@
+; RUN: llc -O0 -fast-isel=false -mcpu=ppc64 < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+define <16 x i8> @foo() nounwind ssp {
+ %1 = shufflevector <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 1, i32 6, i32 11>
+ ret <16 x i8> %1
+}
+
+; CHECK: .LCPI0_0:
+; CHECK: .byte 31
+; CHECK: .byte 26
+; CHECK: .byte 21
+; CHECK: .byte 16
+; CHECK: .byte 11
+; CHECK: .byte 6
+; CHECK: .byte 1
+; CHECK: .byte 28
+; CHECK: .byte 23
+; CHECK: .byte 18
+; CHECK: .byte 13
+; CHECK: .byte 8
+; CHECK: .byte 3
+; CHECK: .byte 30
+; CHECK: .byte 25
+; CHECK: .byte 20
+; CHECK: .LCPI0_1:
+; CHECK: .byte 0
+; CHECK: .byte 1
+; CHECK: .byte 2
+; CHECK: .byte 3
+; CHECK: .byte 4
+; CHECK: .byte 5
+; CHECK: .byte 6
+; CHECK: .byte 7
+; CHECK: .byte 8
+; CHECK: .byte 9
+; CHECK: .byte 10
+; CHECK: .byte 11
+; CHECK: .byte 12
+; CHECK: .byte 13
+; CHECK: .byte 14
+; CHECK: .byte 15
+; CHECK: .LCPI0_2:
+; CHECK: .byte 16
+; CHECK: .byte 17
+; CHECK: .byte 18
+; CHECK: .byte 19
+; CHECK: .byte 20
+; CHECK: .byte 21
+; CHECK: .byte 22
+; CHECK: .byte 23
+; CHECK: .byte 24
+; CHECK: .byte 25
+; CHECK: .byte 26
+; CHECK: .byte 27
+; CHECK: .byte 28
+; CHECK: .byte 29
+; CHECK: .byte 30
+; CHECK: .byte 31
+; CHECK: foo:
+; CHECK: addis [[REG1:[0-9]+]], 2, .LCPI0_2@toc@ha
+; CHECK: addi [[REG2:[0-9]+]], [[REG1]], .LCPI0_2@toc@l
+; CHECK: lvx [[REG3:[0-9]+]], 0, [[REG2]]
+; CHECK: vperm {{[0-9]+}}, [[REG3]], {{[0-9]+}}, {{[0-9]+}}
diff --git a/test/CodeGen/PowerPC/vsx-args.ll b/test/CodeGen/PowerPC/vsx-args.ll
new file mode 100644
index 000000000000..520aeb5fa909
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-args.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mcpu=pwr7 -mattr=+vsx | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+declare <2 x double> @sv(<2 x double>, <2 x i64>, <4 x float>) #0
+
+define <2 x double> @main(<4 x float> %a, <2 x double> %b, <2 x i64> %c) #1 {
+entry:
+ %ca = tail call <2 x double> @sv(<2 x double> %b, <2 x i64> %c, <4 x float> %a)
+ %v = fadd <2 x double> %ca, <double 1.0, double 1.0>
+ ret <2 x double> %v
+
+; CHECK-LABEL: @main
+; CHECK-DAG: vor [[V:[0-9]+]], 2, 2
+; CHECK-DAG: xxlor 34, 35, 35
+; CHECK-DAG: xxlor 35, 36, 36
+; CHECK-DAG: vor 4, [[V]], [[V]]
+; CHECK-DAG: bl sv
+; CHECK-DAG: lxvd2x [[VC:[0-9]+]],
+; CHECK: xvadddp 34, 34, [[VC]]
+; CHECK: blr
+}
+
+attributes #0 = { noinline nounwind readnone }
+attributes #1 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/vsx-fma-m.ll b/test/CodeGen/PowerPC/vsx-fma-m.ll
new file mode 100644
index 000000000000..da4a20481e62
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-fma-m.ll
@@ -0,0 +1,238 @@
+; RUN: llc < %s -mcpu=pwr7 -mattr=+vsx | FileCheck %s
+
+; Also run with -schedule-ppc-vsx-fma-mutation-early as a stress test for the
+; live-interval-updating logic.
+; RUN: llc < %s -mcpu=pwr7 -mattr=+vsx -schedule-ppc-vsx-fma-mutation-early
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define void @test1(double %a, double %b, double %c, double %e, double* nocapture %d) #0 {
+entry:
+ %0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
+ store double %0, double* %d, align 8
+ %1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
+ %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ store double %1, double* %arrayidx1, align 8
+ ret void
+
+; CHECK-LABEL: @test1
+; CHECK-DAG: li [[C1:[0-9]+]], 8
+; CHECK-DAG: xsmaddmdp 3, 2, 1
+; CHECK-DAG: xsmaddadp 1, 2, 4
+; CHECK-DAG: stxsdx 3, 0, 7
+; CHECK-DAG: stxsdx 1, 7, [[C1]]
+; CHECK: blr
+}
+
+define void @test2(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+entry:
+ %0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
+ store double %0, double* %d, align 8
+ %1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
+ %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ store double %1, double* %arrayidx1, align 8
+ %2 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
+ %arrayidx2 = getelementptr inbounds double* %d, i64 2
+ store double %2, double* %arrayidx2, align 8
+ ret void
+
+; CHECK-LABEL: @test2
+; CHECK-DAG: li [[C1:[0-9]+]], 8
+; CHECK-DAG: li [[C2:[0-9]+]], 16
+; CHECK-DAG: xsmaddmdp 3, 2, 1
+; CHECK-DAG: xsmaddmdp 4, 2, 1
+; CHECK-DAG: xsmaddadp 1, 2, 5
+; CHECK-DAG: stxsdx 3, 0, 8
+; CHECK-DAG: stxsdx 4, 8, [[C1]]
+; CHECK-DAG: stxsdx 1, 8, [[C2]]
+; CHECK: blr
+}
+
+define void @test3(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+entry:
+ %0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
+ store double %0, double* %d, align 8
+ %1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
+ %2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
+ %arrayidx1 = getelementptr inbounds double* %d, i64 3
+ store double %2, double* %arrayidx1, align 8
+ %3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
+ %arrayidx2 = getelementptr inbounds double* %d, i64 2
+ store double %3, double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %d, i64 1
+ store double %1, double* %arrayidx3, align 8
+ ret void
+
+; CHECK-LABEL: @test3
+; CHECK-DAG: fmr [[F1:[0-9]+]], 1
+; CHECK-DAG: li [[C1:[0-9]+]], 24
+; CHECK-DAG: li [[C2:[0-9]+]], 16
+; CHECK-DAG: li [[C3:[0-9]+]], 8
+; CHECK-DAG: xsmaddmdp 4, 2, 1
+; CHECK-DAG: xsmaddadp 1, 2, 5
+
+; Note: We could convert this next FMA to M-type as well, but it would require
+; re-ordering the instructions.
+; CHECK-DAG: xsmaddadp [[F1]], 2, 3
+
+; CHECK-DAG: xsmaddmdp 2, 3, 4
+; CHECK-DAG: stxsdx [[F1]], 0, 8
+; CHECK-DAG: stxsdx 2, 8, [[C1]]
+; CHECK-DAG: stxsdx 1, 8, [[C2]]
+; CHECK-DAG: stxsdx 4, 8, [[C3]]
+; CHECK: blr
+}
+
+define void @test4(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
+entry:
+ %0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
+ store double %0, double* %d, align 8
+ %1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
+ %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ store double %1, double* %arrayidx1, align 8
+ %2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
+ %arrayidx3 = getelementptr inbounds double* %d, i64 3
+ store double %2, double* %arrayidx3, align 8
+ %3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
+ %arrayidx4 = getelementptr inbounds double* %d, i64 2
+ store double %3, double* %arrayidx4, align 8
+ ret void
+
+; CHECK-LABEL: @test4
+; CHECK-DAG: fmr [[F1:[0-9]+]], 1
+; CHECK-DAG: li [[C1:[0-9]+]], 8
+; CHECK-DAG: li [[C2:[0-9]+]], 16
+; CHECK-DAG: xsmaddmdp 4, 2, 1
+
+; Note: We could convert this next FMA to M-type as well, but it would require
+; re-ordering the instructions.
+; CHECK-DAG: xsmaddadp 1, 2, 5
+
+; CHECK-DAG: xsmaddadp [[F1]], 2, 3
+; CHECK-DAG: stxsdx [[F1]], 0, 8
+; CHECK-DAG: stxsdx 4, 8, [[C1]]
+; CHECK-DAG: li [[C3:[0-9]+]], 24
+; CHECK-DAG: xsmaddadp 4, 2, 3
+; CHECK-DAG: stxsdx 4, 8, [[C3]]
+; CHECK-DAG: stxsdx 1, 8, [[C2]]
+; CHECK: blr
+}
+
+declare double @llvm.fma.f64(double, double, double) #0
+
+define void @testv1(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double>* nocapture %d) #0 {
+entry:
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
+ store <2 x double> %0, <2 x double>* %d, align 8
+ %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
+ %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+ ret void
+
+; CHECK-LABEL: @testv1
+; CHECK-DAG: xvmaddmdp 36, 35, 34
+; CHECK-DAG: xvmaddadp 34, 35, 37
+; CHECK-DAG: li [[C1:[0-9]+]], 16
+; CHECK-DAG: stxvd2x 36, 0, 3
+; CHECK-DAG: stxvd2x 34, 3, [[C1:[0-9]+]]
+; CHECK: blr
+}
+
+define void @testv2(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+entry:
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
+ store <2 x double> %0, <2 x double>* %d, align 8
+ %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
+ %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+ %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
+ %arrayidx2 = getelementptr inbounds <2 x double>* %d, i64 2
+ store <2 x double> %2, <2 x double>* %arrayidx2, align 8
+ ret void
+
+; CHECK-LABEL: @testv2
+; CHECK-DAG: xvmaddmdp 36, 35, 34
+; CHECK-DAG: xvmaddmdp 37, 35, 34
+; CHECK-DAG: li [[C1:[0-9]+]], 16
+; CHECK-DAG: li [[C2:[0-9]+]], 32
+; CHECK-DAG: xvmaddadp 34, 35, 38
+; CHECK-DAG: stxvd2x 36, 0, 3
+; CHECK-DAG: stxvd2x 37, 3, [[C1:[0-9]+]]
+; CHECK-DAG: stxvd2x 34, 3, [[C2:[0-9]+]]
+; CHECK: blr
+}
+
+define void @testv3(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+entry:
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
+ store <2 x double> %0, <2 x double>* %d, align 8
+ %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
+ %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
+ %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 3
+ store <2 x double> %2, <2 x double>* %arrayidx1, align 8
+ %3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
+ %arrayidx2 = getelementptr inbounds <2 x double>* %d, i64 2
+ store <2 x double> %3, <2 x double>* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds <2 x double>* %d, i64 1
+ store <2 x double> %1, <2 x double>* %arrayidx3, align 8
+ ret void
+
+; CHECK-LABEL: @testv3
+; CHECK-DAG: xxlor [[V1:[0-9]+]], 34, 34
+; CHECK-DAG: xvmaddmdp 37, 35, 34
+; CHECK-DAG: li [[C1:[0-9]+]], 48
+; CHECK-DAG: li [[C2:[0-9]+]], 32
+; CHECK-DAG: xvmaddadp 34, 35, 38
+; CHECK-DAG: li [[C3:[0-9]+]], 16
+
+; Note: We could convert this next FMA to M-type as well, but it would require
+; re-ordering the instructions.
+; CHECK-DAG: xvmaddadp [[V1]], 35, 36
+
+; CHECK-DAG: xvmaddmdp 35, 36, 37
+; CHECK-DAG: stxvd2x 32, 0, 3
+; CHECK-DAG: stxvd2x 35, 3, [[C1]]
+; CHECK-DAG: stxvd2x 34, 3, [[C2]]
+; CHECK-DAG: stxvd2x 37, 3, [[C3]]
+; CHECK: blr
+}
+
+define void @testv4(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
+entry:
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
+ store <2 x double> %0, <2 x double>* %d, align 8
+ %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
+ %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ store <2 x double> %1, <2 x double>* %arrayidx1, align 8
+ %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
+ %arrayidx3 = getelementptr inbounds <2 x double>* %d, i64 3
+ store <2 x double> %2, <2 x double>* %arrayidx3, align 8
+ %3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
+ %arrayidx4 = getelementptr inbounds <2 x double>* %d, i64 2
+ store <2 x double> %3, <2 x double>* %arrayidx4, align 8
+ ret void
+
+; CHECK-LABEL: @testv4
+; CHECK-DAG: xxlor [[V1:[0-9]+]], 34, 34
+; CHECK-DAG: xvmaddmdp 37, 35, 34
+; CHECK-DAG: li [[C1:[0-9]+]], 16
+; CHECK-DAG: li [[C2:[0-9]+]], 32
+; CHECK-DAG: xvmaddadp 34, 35, 38
+
+; Note: We could convert this next FMA to M-type as well, but it would require
+; re-ordering the instructions.
+; CHECK-DAG: xvmaddadp [[V1]], 35, 36
+
+; CHECK-DAG: stxvd2x 32, 0, 3
+; CHECK-DAG: stxvd2x 37, 3, [[C1]]
+; CHECK-DAG: li [[C3:[0-9]+]], 48
+; CHECK-DAG: xvmaddadp 37, 35, 36
+; CHECK-DAG: stxvd2x 37, 3, [[C3]]
+; CHECK-DAG: stxvd2x 34, 3, [[C2]]
+; CHECK: blr
+}
+
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/vsx-self-copy.ll b/test/CodeGen/PowerPC/vsx-self-copy.ll
new file mode 100644
index 000000000000..23615ca10c1e
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-self-copy.ll
@@ -0,0 +1,27 @@
+; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define double @takFP(double %x, double %y, double %z) #0 {
+entry:
+ br i1 undef, label %if.then, label %return
+
+if.then: ; preds = %if.then, %entry
+ %x.tr16 = phi double [ %call, %if.then ], [ %x, %entry ]
+ %call = tail call double @takFP(double undef, double undef, double undef)
+ %call4 = tail call double @takFP(double undef, double %x.tr16, double undef)
+ %cmp = fcmp olt double undef, %call
+ br i1 %cmp, label %if.then, label %return
+
+return: ; preds = %if.then, %entry
+ %z.tr.lcssa = phi double [ %z, %entry ], [ %call4, %if.then ]
+ ret double %z.tr.lcssa
+
+; CHECK: @takFP
+; CHECK-NOT: xxlor 0, 0, 0
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/vsx-spill.ll b/test/CodeGen/PowerPC/vsx-spill.ll
new file mode 100644
index 000000000000..29bc6fcc7100
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-spill.ll
@@ -0,0 +1,49 @@
+; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define double @foo1(double %a) nounwind {
+entry:
+ call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"() nounwind
+ br label %return
+
+; CHECK: @foo1
+; CHECK: xxlor [[R1:[0-9]+]], 1, 1
+; CHECK: xxlor 1, [[R1]], [[R1]]
+; CHECK: blr
+
+return: ; preds = %entry
+ ret double %a
+}
+
+define double @foo2(double %a) nounwind {
+entry:
+ %b = fadd double %a, %a
+ call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"() nounwind
+ br label %return
+
+; CHECK: @foo2
+; CHECK: {{xxlor|xsadddp}} [[R1:[0-9]+]], 1, 1
+; CHECK: {{xxlor|xsadddp}} 1, [[R1]], [[R1]]
+; CHECK: blr
+
+return: ; preds = %entry
+ ret double %b
+}
+
+define double @foo3(double %a) nounwind {
+entry:
+ call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31},~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() nounwind
+ br label %return
+
+; CHECK: @foo3
+; CHECK: stxsdx 1,
+; CHECK: lxsdx [[R1:[0-9]+]],
+; CHECK: xsadddp 1, [[R1]], [[R1]]
+; CHECK: blr
+
+return: ; preds = %entry
+ %b = fadd double %a, %a
+ ret double %b
+}
+
diff --git a/test/CodeGen/PowerPC/vsx.ll b/test/CodeGen/PowerPC/vsx.ll
new file mode 100644
index 000000000000..2f226e1f614c
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx.ll
@@ -0,0 +1,651 @@
+; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define double @test1(double %a, double %b) {
+entry:
+ %v = fmul double %a, %b
+ ret double %v
+
+; CHECK-LABEL: @test1
+; CHECK: xsmuldp 1, 1, 2
+; CHECK: blr
+}
+
+define double @test2(double %a, double %b) {
+entry:
+ %v = fdiv double %a, %b
+ ret double %v
+
+; CHECK-LABEL: @test2
+; CHECK: xsdivdp 1, 1, 2
+; CHECK: blr
+}
+
+define double @test3(double %a, double %b) {
+entry:
+ %v = fadd double %a, %b
+ ret double %v
+
+; CHECK-LABEL: @test3
+; CHECK: xsadddp 1, 1, 2
+; CHECK: blr
+}
+
+define <2 x double> @test4(<2 x double> %a, <2 x double> %b) {
+entry:
+ %v = fadd <2 x double> %a, %b
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test4
+; CHECK: xvadddp 34, 34, 35
+; CHECK: blr
+}
+
+define <4 x i32> @test5(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %v = xor <4 x i32> %a, %b
+ ret <4 x i32> %v
+
+; CHECK-LABEL: @test5
+; CHECK: xxlxor 34, 34, 35
+; CHECK: blr
+}
+
+define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
+entry:
+ %v = xor <8 x i16> %a, %b
+ ret <8 x i16> %v
+
+; CHECK-LABEL: @test6
+; CHECK: xxlxor 34, 34, 35
+; CHECK: blr
+}
+
+define <16 x i8> @test7(<16 x i8> %a, <16 x i8> %b) {
+entry:
+ %v = xor <16 x i8> %a, %b
+ ret <16 x i8> %v
+
+; CHECK-LABEL: @test7
+; CHECK: xxlxor 34, 34, 35
+; CHECK: blr
+}
+
+define <4 x i32> @test8(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %v = or <4 x i32> %a, %b
+ ret <4 x i32> %v
+
+; CHECK-LABEL: @test8
+; CHECK: xxlor 34, 34, 35
+; CHECK: blr
+}
+
+define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
+entry:
+ %v = or <8 x i16> %a, %b
+ ret <8 x i16> %v
+
+; CHECK-LABEL: @test9
+; CHECK: xxlor 34, 34, 35
+; CHECK: blr
+}
+
+define <16 x i8> @test10(<16 x i8> %a, <16 x i8> %b) {
+entry:
+ %v = or <16 x i8> %a, %b
+ ret <16 x i8> %v
+
+; CHECK-LABEL: @test10
+; CHECK: xxlor 34, 34, 35
+; CHECK: blr
+}
+
+define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %v = and <4 x i32> %a, %b
+ ret <4 x i32> %v
+
+; CHECK-LABEL: @test11
+; CHECK: xxland 34, 34, 35
+; CHECK: blr
+}
+
+define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
+entry:
+ %v = and <8 x i16> %a, %b
+ ret <8 x i16> %v
+
+; CHECK-LABEL: @test12
+; CHECK: xxland 34, 34, 35
+; CHECK: blr
+}
+
+define <16 x i8> @test13(<16 x i8> %a, <16 x i8> %b) {
+entry:
+ %v = and <16 x i8> %a, %b
+ ret <16 x i8> %v
+
+; CHECK-LABEL: @test13
+; CHECK: xxland 34, 34, 35
+; CHECK: blr
+}
+
+define <4 x i32> @test14(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %v = or <4 x i32> %a, %b
+ %w = xor <4 x i32> %v, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %w
+
+; CHECK-LABEL: @test14
+; CHECK: xxlnor 34, 34, 35
+; CHECK: blr
+}
+
+define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
+entry:
+ %v = or <8 x i16> %a, %b
+ %w = xor <8 x i16> %v, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %w
+
+; CHECK-LABEL: @test15
+; CHECK: xxlnor 34, 34, 35
+; CHECK: blr
+}
+
+define <16 x i8> @test16(<16 x i8> %a, <16 x i8> %b) {
+entry:
+ %v = or <16 x i8> %a, %b
+ %w = xor <16 x i8> %v, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ ret <16 x i8> %w
+
+; CHECK-LABEL: @test16
+; CHECK: xxlnor 34, 34, 35
+; CHECK: blr
+}
+
+define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %w = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %v = and <4 x i32> %a, %w
+ ret <4 x i32> %v
+
+; CHECK-LABEL: @test17
+; CHECK: xxlandc 34, 34, 35
+; CHECK: blr
+}
+
+define <8 x i16> @test18(<8 x i16> %a, <8 x i16> %b) {
+entry:
+ %w = xor <8 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %v = and <8 x i16> %a, %w
+ ret <8 x i16> %v
+
+; CHECK-LABEL: @test18
+; CHECK: xxlandc 34, 34, 35
+; CHECK: blr
+}
+
+define <16 x i8> @test19(<16 x i8> %a, <16 x i8> %b) {
+entry:
+ %w = xor <16 x i8> %b, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %v = and <16 x i8> %a, %w
+ ret <16 x i8> %v
+
+; CHECK-LABEL: @test19
+; CHECK: xxlandc 34, 34, 35
+; CHECK: blr
+}
+
+define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
+entry:
+ %m = icmp eq <4 x i32> %c, %d
+ %v = select <4 x i1> %m, <4 x i32> %a, <4 x i32> %b
+ ret <4 x i32> %v
+
+; CHECK-LABEL: @test20
+; CHECK: vcmpequw {{[0-9]+}}, 4, 5
+; CHECK: xxsel 34, 35, 34, {{[0-9]+}}
+; CHECK: blr
+}
+
+define <4 x float> @test21(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) {
+entry:
+ %m = fcmp oeq <4 x float> %c, %d
+ %v = select <4 x i1> %m, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %v
+
+; CHECK-LABEL: @test21
+; CHECK: xvcmpeqsp [[V1:[0-9]+]], 36, 37
+; CHECK: xxsel 34, 35, 34, [[V1]]
+; CHECK: blr
+}
+
+define <4 x float> @test22(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) {
+entry:
+ %m = fcmp ueq <4 x float> %c, %d
+ %v = select <4 x i1> %m, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %v
+
+; CHECK-LABEL: @test22
+; CHECK-DAG: xvcmpeqsp {{[0-9]+}}, 37, 37
+; CHECK-DAG: xvcmpeqsp {{[0-9]+}}, 36, 36
+; CHECK-DAG: xvcmpeqsp {{[0-9]+}}, 36, 37
+; CHECK-DAG: xxlnor
+; CHECK-DAG: xxlnor
+; CHECK-DAG: xxlor
+; CHECK-DAG: xxlor
+; CHECK: xxsel 34, 35, 34, {{[0-9]+}}
+; CHECK: blr
+}
+
+define <8 x i16> @test23(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
+entry:
+ %m = icmp eq <8 x i16> %c, %d
+ %v = select <8 x i1> %m, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %v
+
+; CHECK-LABEL: @test23
+; CHECK: vcmpequh {{[0-9]+}}, 4, 5
+; CHECK: xxsel 34, 35, 34, {{[0-9]+}}
+; CHECK: blr
+}
+
+define <16 x i8> @test24(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
+entry:
+ %m = icmp eq <16 x i8> %c, %d
+ %v = select <16 x i1> %m, <16 x i8> %a, <16 x i8> %b
+ ret <16 x i8> %v
+
+; CHECK-LABEL: @test24
+; CHECK: vcmpequb {{[0-9]+}}, 4, 5
+; CHECK: xxsel 34, 35, 34, {{[0-9]+}}
+; CHECK: blr
+}
+
+define <2 x double> @test25(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) {
+entry:
+ %m = fcmp oeq <2 x double> %c, %d
+ %v = select <2 x i1> %m, <2 x double> %a, <2 x double> %b
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test25
+; CHECK: xvcmpeqdp [[V1:[0-9]+]], 36, 37
+; CHECK: xxsel 34, 35, 34, [[V1]]
+; CHECK: blr
+}
+
+define <2 x i64> @test26(<2 x i64> %a, <2 x i64> %b) {
+ %v = add <2 x i64> %a, %b
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test26
+
+; Make sure we use only two stores (one for each operand).
+; CHECK: stxvd2x 35,
+; CHECK: stxvd2x 34,
+; CHECK-NOT: stxvd2x
+
+; FIXME: The code quality here is not good; just make sure we do something for now.
+; CHECK: add
+; CHECK: add
+; CHECK: blr
+}
+
+define <2 x i64> @test27(<2 x i64> %a, <2 x i64> %b) {
+ %v = and <2 x i64> %a, %b
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test27
+; CHECK: xxland 34, 34, 35
+; CHECK: blr
+}
+
+define <2 x double> @test28(<2 x double>* %a) {
+ %v = load <2 x double>* %a, align 16
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test28
+; CHECK: lxvd2x 34, 0, 3
+; CHECK: blr
+}
+
+define void @test29(<2 x double>* %a, <2 x double> %b) {
+ store <2 x double> %b, <2 x double>* %a, align 16
+ ret void
+
+; CHECK-LABEL: @test29
+; CHECK: stxvd2x 34, 0, 3
+; CHECK: blr
+}
+
+define <2 x double> @test28u(<2 x double>* %a) {
+ %v = load <2 x double>* %a, align 8
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test28u
+; CHECK: lxvd2x 34, 0, 3
+; CHECK: blr
+}
+
+define void @test29u(<2 x double>* %a, <2 x double> %b) {
+ store <2 x double> %b, <2 x double>* %a, align 8
+ ret void
+
+; CHECK-LABEL: @test29u
+; CHECK: stxvd2x 34, 0, 3
+; CHECK: blr
+}
+
+define <2 x i64> @test30(<2 x i64>* %a) {
+ %v = load <2 x i64>* %a, align 16
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test30
+; CHECK: lxvd2x 34, 0, 3
+; CHECK: blr
+}
+
+define void @test31(<2 x i64>* %a, <2 x i64> %b) {
+ store <2 x i64> %b, <2 x i64>* %a, align 16
+ ret void
+
+; CHECK-LABEL: @test31
+; CHECK: stxvd2x 34, 0, 3
+; CHECK: blr
+}
+
+define <2 x double> @test40(<2 x i64> %a) {
+ %v = uitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test40
+; CHECK: xvcvuxddp 34, 34
+; CHECK: blr
+}
+
+define <2 x double> @test41(<2 x i64> %a) {
+ %v = sitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test41
+; CHECK: xvcvsxddp 34, 34
+; CHECK: blr
+}
+
+define <2 x i64> @test42(<2 x double> %a) {
+ %v = fptoui <2 x double> %a to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test42
+; CHECK: xvcvdpuxds 34, 34
+; CHECK: blr
+}
+
+define <2 x i64> @test43(<2 x double> %a) {
+ %v = fptosi <2 x double> %a to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test43
+; CHECK: xvcvdpsxds 34, 34
+; CHECK: blr
+}
+
+define <2 x float> @test44(<2 x i64> %a) {
+ %v = uitofp <2 x i64> %a to <2 x float>
+ ret <2 x float> %v
+
+; CHECK-LABEL: @test44
+; FIXME: The code quality here looks pretty bad.
+; CHECK: blr
+}
+
+define <2 x float> @test45(<2 x i64> %a) {
+ %v = sitofp <2 x i64> %a to <2 x float>
+ ret <2 x float> %v
+
+; CHECK-LABEL: @test45
+; FIXME: The code quality here looks pretty bad.
+; CHECK: blr
+}
+
+define <2 x i64> @test46(<2 x float> %a) {
+ %v = fptoui <2 x float> %a to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test46
+; FIXME: The code quality here looks pretty bad.
+; CHECK: blr
+}
+
+define <2 x i64> @test47(<2 x float> %a) {
+ %v = fptosi <2 x float> %a to <2 x i64>
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test47
+; FIXME: The code quality here looks pretty bad.
+; CHECK: blr
+}
+
+define <2 x double> @test50(double* %a) {
+ %v = load double* %a, align 8
+ %w = insertelement <2 x double> undef, double %v, i32 0
+ %x = insertelement <2 x double> %w, double %v, i32 1
+ ret <2 x double> %x
+
+; CHECK-LABEL: @test50
+; CHECK: lxvdsx 34, 0, 3
+; CHECK: blr
+}
+
+define <2 x double> @test51(<2 x double> %a, <2 x double> %b) {
+ %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test51
+; CHECK: xxpermdi 34, 34, 34, 0
+; CHECK: blr
+}
+
+define <2 x double> @test52(<2 x double> %a, <2 x double> %b) {
+ %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test52
+; CHECK: xxpermdi 34, 34, 35, 0
+; CHECK: blr
+}
+
+define <2 x double> @test53(<2 x double> %a, <2 x double> %b) {
+ %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 0>
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test53
+; CHECK: xxpermdi 34, 35, 34, 0
+; CHECK: blr
+}
+
+define <2 x double> @test54(<2 x double> %a, <2 x double> %b) {
+ %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test54
+; CHECK: xxpermdi 34, 34, 35, 2
+; CHECK: blr
+}
+
+define <2 x double> @test55(<2 x double> %a, <2 x double> %b) {
+ %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x double> %v
+
+; CHECK-LABEL: @test55
+; CHECK: xxpermdi 34, 34, 35, 3
+; CHECK: blr
+}
+
+define <2 x i64> @test56(<2 x i64> %a, <2 x i64> %b) {
+ %v = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test56
+; CHECK: xxpermdi 34, 34, 35, 3
+; CHECK: blr
+}
+
+define <2 x i64> @test60(<2 x i64> %a, <2 x i64> %b) {
+ %v = shl <2 x i64> %a, %b
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test60
+; This should scalarize, and the current code quality is not good.
+; CHECK: stxvd2x
+; CHECK: stxvd2x
+; CHECK: sld
+; CHECK: sld
+; CHECK: lxvd2x
+; CHECK: blr
+}
+
+define <2 x i64> @test61(<2 x i64> %a, <2 x i64> %b) {
+ %v = lshr <2 x i64> %a, %b
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test61
+; This should scalarize, and the current code quality is not good.
+; CHECK: stxvd2x
+; CHECK: stxvd2x
+; CHECK: srd
+; CHECK: srd
+; CHECK: lxvd2x
+; CHECK: blr
+}
+
+define <2 x i64> @test62(<2 x i64> %a, <2 x i64> %b) {
+ %v = ashr <2 x i64> %a, %b
+ ret <2 x i64> %v
+
+; CHECK-LABEL: @test62
+; This should scalarize, and the current code quality is not good.
+; CHECK: stxvd2x
+; CHECK: stxvd2x
+; CHECK: srad
+; CHECK: srad
+; CHECK: lxvd2x
+; CHECK: blr
+}
+
+define double @test63(<2 x double> %a) {
+ %v = extractelement <2 x double> %a, i32 0
+ ret double %v
+
+; CHECK-LABEL: @test63
+; CHECK: xxlor 1, 34, 34
+; CHECK: blr
+}
+
+define double @test64(<2 x double> %a) {
+ %v = extractelement <2 x double> %a, i32 1
+ ret double %v
+
+; CHECK-LABEL: @test64
+; CHECK: xxpermdi 1, 34, 34, 2
+; CHECK: blr
+}
+
+define <2 x i1> @test65(<2 x i64> %a, <2 x i64> %b) {
+ %w = icmp eq <2 x i64> %a, %b
+ ret <2 x i1> %w
+
+; CHECK-LABEL: @test65
+; CHECK: vcmpequw 2, 2, 3
+; CHECK: blr
+}
+
+define <2 x i1> @test66(<2 x i64> %a, <2 x i64> %b) {
+ %w = icmp ne <2 x i64> %a, %b
+ ret <2 x i1> %w
+
+; CHECK-LABEL: @test66
+; CHECK: vcmpequw {{[0-9]+}}, 2, 3
+; CHECK: xxlnor 34, {{[0-9]+}}, {{[0-9]+}}
+; CHECK: blr
+}
+
+define <2 x i1> @test67(<2 x i64> %a, <2 x i64> %b) {
+ %w = icmp ult <2 x i64> %a, %b
+ ret <2 x i1> %w
+
+; CHECK-LABEL: @test67
+; This should scalarize, and the current code quality is not good.
+; CHECK: stxvd2x
+; CHECK: stxvd2x
+; CHECK: cmpld
+; CHECK: cmpld
+; CHECK: lxvd2x
+; CHECK: blr
+}
+
+define <2 x double> @test68(<2 x i32> %a) {
+ %w = sitofp <2 x i32> %a to <2 x double>
+ ret <2 x double> %w
+
+; CHECK-LABEL: @test68
+; CHECK: xxsldwi [[V1:[0-9]+]], 34, 34, 1
+; CHECK: xvcvsxwdp 34, [[V1]]
+; CHECK: blr
+}
+
+define <2 x double> @test69(<2 x i16> %a) {
+ %w = sitofp <2 x i16> %a to <2 x double>
+ ret <2 x double> %w
+
+; CHECK-LABEL: @test69
+; CHECK: vspltisw [[V1:[0-9]+]], 8
+; CHECK: vadduwm [[V2:[0-9]+]], [[V1]], [[V1]]
+; CHECK: vslw [[V3:[0-9]+]], 2, [[V2]]
+; CHECK: vsraw {{[0-9]+}}, [[V3]], [[V2]]
+; CHECK: xxsldwi [[V4:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}, 1
+; CHECK: xvcvsxwdp 34, [[V4]]
+; CHECK: blr
+}
+
+define <2 x double> @test70(<2 x i8> %a) {
+ %w = sitofp <2 x i8> %a to <2 x double>
+ ret <2 x double> %w
+
+; CHECK-LABEL: @test70
+; CHECK: vspltisw [[V1:[0-9]+]], 12
+; CHECK: vadduwm [[V2:[0-9]+]], [[V1]], [[V1]]
+; CHECK: vslw [[V3:[0-9]+]], 2, [[V2]]
+; CHECK: vsraw {{[0-9]+}}, [[V3]], [[V2]]
+; CHECK: xxsldwi [[V4:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}, 1
+; CHECK: xvcvsxwdp 34, [[V4]]
+; CHECK: blr
+}
+
+define <2 x i32> @test80(i32 %v) {
+ %b1 = insertelement <2 x i32> undef, i32 %v, i32 0
+ %b2 = shufflevector <2 x i32> %b1, <2 x i32> undef, <2 x i32> zeroinitializer
+ %i = add <2 x i32> %b2, <i32 2, i32 3>
+ ret <2 x i32> %i
+
+; CHECK-LABEL: @test80
+; CHECK-DAG: addi [[R1:[0-9]+]], 3, 3
+; CHECK-DAG: addi [[R2:[0-9]+]], 1, -16
+; CHECK-DAG: addi [[R3:[0-9]+]], 3, 2
+; CHECK: std [[R1]], -8(1)
+; CHECK: std [[R3]], -16(1)
+; CHECK: lxvd2x 34, 0, [[R2]]
+; CHECK-NOT: stxvd2x
+; CHECK: blr
+}
+
+define <2 x double> @test81(<4 x float> %b) {
+ %w = bitcast <4 x float> %b to <2 x double>
+ ret <2 x double> %w
+
+; CHECK-LABEL: @test81
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/vtable-reloc.ll b/test/CodeGen/PowerPC/vtable-reloc.ll
new file mode 100644
index 000000000000..995a5d03ba5b
--- /dev/null
+++ b/test/CodeGen/PowerPC/vtable-reloc.ll
@@ -0,0 +1,11 @@
+; RUN: llc -O0 < %s | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@_ZTV3foo = linkonce_odr unnamed_addr constant [1 x i8*] [i8* bitcast (void ()* @__cxa_pure_virtual to i8*)]
+declare void @__cxa_pure_virtual()
+
+; CHECK: .section .data.rel.ro
+; CHECK: .quad __cxa_pure_virtual
+
diff --git a/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll b/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
index 130d8faaf8bc..e038b3f2fb25 100644
--- a/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
+++ b/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
@@ -3,7 +3,7 @@
; RUN: llc -mtriple=powerpc-apple-darwin9 -O0 < %s | FileCheck --check-prefix=CHECK-D89 %s
; RUN: llc -mtriple=powerpc-apple-darwin8 -O0 < %s | FileCheck --check-prefix=CHECK-D89 %s
-@v1 = linkonce_odr global i32 32
+@v1 = linkonce_odr constant i32 32
; CHECK: .globl _v1
; CHECK: .weak_def_can_be_hidden _v1
@@ -15,13 +15,17 @@ define i32 @f1() {
ret i32 %x
}
-@v2 = linkonce_odr global i32 32
+@v2 = linkonce_odr constant i32 32
; CHECK: .globl _v2
; CHECK: .weak_definition _v2
; CHECK-D89: .globl _v2
; CHECK-D89: .weak_definition _v2
+define i32* @f2() {
+ ret i32* @v2
+}
+
@v3 = linkonce_odr unnamed_addr global i32 32
; CHECK: .globl _v3
; CHECK: .weak_def_can_be_hidden _v3
@@ -29,10 +33,18 @@ define i32 @f1() {
; CHECK-D89: .globl _v3
; CHECK-D89: .weak_definition _v3
-define i32* @f2() {
- ret i32* @v2
-}
-
define i32* @f3() {
ret i32* @v3
}
+
+@v4 = linkonce_odr global i32 32
+; CHECK: .globl _v4
+; CHECK: .weak_definition _v4
+
+; CHECK-D89: .globl _v4
+; CHECK-D89: .weak_definition _v4
+
+define i32 @f4() {
+ %x = load i32 * @v4
+ ret i32 %x
+}
diff --git a/test/CodeGen/R600/32-bit-local-address-space.ll b/test/CodeGen/R600/32-bit-local-address-space.ll
index 7a126878bef4..7dec42637421 100644
--- a/test/CodeGen/R600/32-bit-local-address-space.ll
+++ b/test/CodeGen/R600/32-bit-local-address-space.ll
@@ -11,7 +11,7 @@
; CHECK-LABEL: @local_address_load
; CHECK: V_MOV_B32_e{{32|64}} [[PTR:v[0-9]]]
-; CHECK: DS_READ_B32 [[PTR]]
+; CHECK: DS_READ_B32 v{{[0-9]+}}, [[PTR]]
define void @local_address_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%0 = load i32 addrspace(3)* %in
@@ -32,9 +32,8 @@ entry:
}
; CHECK-LABEL: @local_address_gep_const_offset
-; CHECK: S_ADD_I32 [[SPTR:s[0-9]]]
-; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
-; CHECK: DS_READ_B32 [[VPTR]]
+; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
+; CHECK: DS_READ_B32 v{{[0-9]+}}, [[VPTR]], 0x4,
define void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%0 = getelementptr i32 addrspace(3)* %in, i32 1
@@ -43,6 +42,19 @@ entry:
ret void
}
+; Offset too large, can't fold into 16-bit immediate offset.
+; CHECK-LABEL: @local_address_gep_large_const_offset
+; CHECK: S_ADD_I32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
+; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; CHECK: DS_READ_B32 [[VPTR]]
+define void @local_address_gep_large_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
+entry:
+ %0 = getelementptr i32 addrspace(3)* %in, i32 16385
+ %1 = load i32 addrspace(3)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
; CHECK-LABEL: @null_32bit_lds_ptr:
; CHECK: V_CMP_NE_I32
; CHECK-NOT: V_CMP_NE_I32
@@ -69,7 +81,7 @@ define void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %
; CHECK-LABEL: @infer_ptr_alignment_global_offset:
; CHECK: V_MOV_B32_e32 [[REG:v[0-9]+]], 0
-; CHECK: DS_READ_B32 v{{[0-9]+}}, 0, [[REG]]
+; CHECK: DS_READ_B32 v{{[0-9]+}}, [[REG]]
define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %tid) {
%val = load float addrspace(3)* @g_lds
store float %val, float addrspace(1)* %out
@@ -80,9 +92,47 @@ define void @infer_ptr_alignment_global_offset(float addrspace(1)* %out, i32 %ti
@ptr = addrspace(3) global i32 addrspace(3)* null
@dst = addrspace(3) global [16384 x i32] zeroinitializer
-; SI-LABEL: @global_ptr:
-; SI-CHECK: DS_WRITE_B32
+; CHECK-LABEL: @global_ptr:
+; CHECK: DS_WRITE_B32
define void @global_ptr() nounwind {
store i32 addrspace(3)* getelementptr ([16384 x i32] addrspace(3)* @dst, i32 0, i32 16), i32 addrspace(3)* addrspace(3)* @ptr
ret void
}
+
+; CHECK-LABEL: @local_address_store
+; CHECK: DS_WRITE_B32
+define void @local_address_store(i32 addrspace(3)* %out, i32 %val) {
+ store i32 %val, i32 addrspace(3)* %out
+ ret void
+}
+
+; CHECK-LABEL: @local_address_gep_store
+; CHECK: S_ADD_I32 [[SADDR:s[0-9]+]],
+; CHECK: V_MOV_B32_e32 [[ADDR:v[0-9]+]], [[SADDR]]
+; CHECK: DS_WRITE_B32 [[ADDR]], v{{[0-9]+}},
+define void @local_address_gep_store(i32 addrspace(3)* %out, i32, i32 %val, i32 %offset) {
+ %gep = getelementptr i32 addrspace(3)* %out, i32 %offset
+ store i32 %val, i32 addrspace(3)* %gep, align 4
+ ret void
+}
+
+; CHECK-LABEL: @local_address_gep_const_offset_store
+; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], s{{[0-9]+}}
+; CHECK: V_MOV_B32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
+; CHECK: DS_WRITE_B32 [[VPTR]], [[VAL]], 0x4
+define void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
+ %gep = getelementptr i32 addrspace(3)* %out, i32 1
+ store i32 %val, i32 addrspace(3)* %gep, align 4
+ ret void
+}
+
+; Offset too large, can't fold into 16-bit immediate offset.
+; CHECK-LABEL: @local_address_gep_large_const_offset_store
+; CHECK: S_ADD_I32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
+; CHECK: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; CHECK: DS_WRITE_B32 [[VPTR]], v{{[0-9]+}}, 0
+define void @local_address_gep_large_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
+ %gep = getelementptr i32 addrspace(3)* %out, i32 16385
+ store i32 %val, i32 addrspace(3)* %gep, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/64bit-kernel-args.ll b/test/CodeGen/R600/64bit-kernel-args.ll
index 0d6bfb144d3d..2d82c1e53919 100644
--- a/test/CodeGen/R600/64bit-kernel-args.ll
+++ b/test/CodeGen/R600/64bit-kernel-args.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
; SI-CHECK: @f64_kernel_arg
-; SI-CHECK-DAG: S_LOAD_DWORDX2 s[{{[0-9]:[0-9]}}], s[0:1], 9
-; SI-CHECK-DAG: S_LOAD_DWORDX2 s[{{[0-9]:[0-9]}}], s[0:1], 11
+; SI-CHECK-DAG: S_LOAD_DWORDX2 s[{{[0-9]:[0-9]}}], s[0:1], 0x9
+; SI-CHECK-DAG: S_LOAD_DWORDX2 s[{{[0-9]:[0-9]}}], s[0:1], 0xb
; SI-CHECK: BUFFER_STORE_DWORDX2
define void @f64_kernel_arg(double addrspace(1)* %out, double %in) {
entry:
diff --git a/test/CodeGen/R600/add.ll b/test/CodeGen/R600/add.ll
index 3d5506bfa5d2..711a2bc41774 100644
--- a/test/CodeGen/R600/add.ll
+++ b/test/CodeGen/R600/add.ll
@@ -1,10 +1,9 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
-;EG-CHECK-LABEL: @test1:
+;FUNC-LABEL: @test1:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @test1:
;SI-CHECK: V_ADD_I32_e32 [[REG:v[0-9]+]], {{v[0-9]+, v[0-9]+}}
;SI-CHECK-NOT: [[REG]]
;SI-CHECK: BUFFER_STORE_DWORD [[REG]],
@@ -17,11 +16,10 @@ define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
-;EG-CHECK-LABEL: @test2:
+;FUNC-LABEL: @test2:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @test2:
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -34,13 +32,12 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK-LABEL: @test4:
+;FUNC-LABEL: @test4:
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @test4:
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -54,3 +51,117 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @test8
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+define void @test8(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
+entry:
+ %0 = add <8 x i32> %a, %b
+ store <8 x i32> %0, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test16
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; EG-CHECK: ADD_INT
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADD_I32
+define void @test16(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
+entry:
+ %0 = add <16 x i32> %a, %b
+ store <16 x i32> %0, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @add64
+; SI-CHECK: S_ADD_I32
+; SI-CHECK: S_ADDC_U32
+define void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+entry:
+ %0 = add i64 %a, %b
+ store i64 %0, i64 addrspace(1)* %out
+ ret void
+}
+
+; The V_ADDC_U32 and V_ADD_I32 instruction can't read SGPRs, because they
+; use VCC. The test is designed so that %a will be stored in an SGPR and
+; %0 will be stored in a VGPR, so the comiler will be forced to copy %a
+; to a VGPR before doing the add.
+
+; FUNC-LABEL: @add64_sgpr_vgpr
+; SI-CHECK-NOT: V_ADDC_U32_e32 s
+define void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
+entry:
+ %0 = load i64 addrspace(1)* %in
+ %1 = add i64 %a, %0
+ store i64 %1, i64 addrspace(1)* %out
+ ret void
+}
+
+; Test i64 add inside a branch. We don't allow SALU instructions inside of
+; branches.
+; FIXME: We are being conservative here. We could allow this in some cases.
+; FUNC-LABEL: @add64_in_branch
+; SI-CHECK-NOT: S_ADD_I32
+; SI-CHECK-NOT: S_ADDC_U32
+define void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
+entry:
+ %0 = icmp eq i64 %a, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = load i64 addrspace(1)* %in
+ br label %endif
+
+else:
+ %2 = add i64 %a, %b
+ br label %endif
+
+endif:
+ %3 = phi i64 [%1, %if], [%2, %else]
+ store i64 %3, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/add_i64.ll b/test/CodeGen/R600/add_i64.ll
index 303a1cb03914..f733d9040421 100644
--- a/test/CodeGen/R600/add_i64.ll
+++ b/test/CodeGen/R600/add_i64.ll
@@ -1,14 +1,13 @@
-; XFAIL: *
-; This will fail until i64 add is enabled
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
-; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI %s
-
-declare i32 @llvm.SI.tid() readnone
+declare i32 @llvm.r600.read.tidig.x() readnone
; SI-LABEL: @test_i64_vreg:
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) {
- %tid = call i32 @llvm.SI.tid() readnone
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr i64 addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr i64 addrspace(1)* %inB, i32 %tid
%a = load i64 addrspace(1)* %a_ptr
@@ -20,6 +19,8 @@ define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noa
; Check that the SGPR add operand is correctly moved to a VGPR.
; SI-LABEL: @sgpr_operand:
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 addrspace(1)* noalias %in_bar, i64 %a) {
%foo = load i64 addrspace(1)* %in, align 8
%result = add i64 %foo, %a
@@ -31,6 +32,8 @@ define void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noal
; SGPR as other operand.
;
; SI-LABEL: @sgpr_operand_reversed:
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %a) {
%foo = load i64 addrspace(1)* %in, align 8
%result = add i64 %a, %foo
@@ -40,6 +43,10 @@ define void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace
; SI-LABEL: @test_v2i64_sreg:
+; SI: S_ADD_I32
+; SI: S_ADDC_U32
+; SI: S_ADD_I32
+; SI: S_ADDC_U32
define void @test_v2i64_sreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %a, <2 x i64> %b) {
%result = add <2 x i64> %a, %b
store <2 x i64> %result, <2 x i64> addrspace(1)* %out
@@ -47,8 +54,12 @@ define void @test_v2i64_sreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %a,
}
; SI-LABEL: @test_v2i64_vreg:
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
- %tid = call i32 @llvm.SI.tid() readnone
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr <2 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <2 x i64> addrspace(1)* %inB, i32 %tid
%a = load <2 x i64> addrspace(1)* %a_ptr
@@ -57,3 +68,17 @@ define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> add
store <2 x i64> %result, <2 x i64> addrspace(1)* %out
ret void
}
+
+; SI-LABEL: @trunc_i64_add_to_i32
+; SI: S_LOAD_DWORDX2 s{{\[}}[[SREG0:[0-9]+]]
+; SI: S_LOAD_DWORDX2 s{{\[}}[[SREG1:[0-9]+]]
+; SI: S_ADD_I32 [[SRESULT:s[0-9]+]], s[[SREG1]], s[[SREG0]]
+; SI-NOT: ADDC
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+define void @trunc_i64_add_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+ %add = add i64 %b, %a
+ %trunc = trunc i64 %add to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/address-space.ll b/test/CodeGen/R600/address-space.ll
index 1fc616a4ed42..f75a8ac5e6a5 100644
--- a/test/CodeGen/R600/address-space.ll
+++ b/test/CodeGen/R600/address-space.ll
@@ -1,14 +1,17 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck %s
; Test that codegenprepare understands address space sizes
%struct.foo = type { [3 x float], [3 x float] }
+; FIXME: Extra V_MOV from SGPR to VGPR for second read. The address is
+; already in a VGPR after the first read.
+
; CHECK-LABEL: @do_as_ptr_calcs:
-; CHECK: S_ADD_I32 {{s[0-9]+}},
-; CHECK: S_ADD_I32 [[SREG1:s[0-9]+]],
+; CHECK: S_LOAD_DWORD [[SREG1:s[0-9]+]],
; CHECK: V_MOV_B32_e32 [[VREG1:v[0-9]+]], [[SREG1]]
-; CHECK: DS_READ_B32 [[VREG1]],
+; CHECK: DS_READ_B32 v{{[0-9]+}}, [[VREG1]], 0x14
+; CHECK: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0xc
define void @do_as_ptr_calcs(%struct.foo addrspace(3)* nocapture %ptr) nounwind {
entry:
%x = getelementptr inbounds %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 0
diff --git a/test/CodeGen/R600/and.ll b/test/CodeGen/R600/and.ll
index ee9bc836eb86..e20037e6bb67 100644
--- a/test/CodeGen/R600/and.ll
+++ b/test/CodeGen/R600/and.ll
@@ -1,13 +1,12 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-;EG-CHECK: @test2
-;EG-CHECK: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: @test2
+; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test2
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -18,17 +17,16 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK: @test4
-;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: @test4
+; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test4
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -38,3 +36,84 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @s_and_i32
+; SI: S_AND_B32
+define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %and = and i32 %a, %b
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @s_and_constant_i32
+; SI: S_AND_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
+define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
+ %and = and i32 %a, 1234567
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_and_i32
+; SI: V_AND_B32
+define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %and = and i32 %a, %b
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_and_constant_i32
+; SI: V_AND_B32
+define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %and = and i32 %a, 1234567
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @s_and_i64
+; SI: S_AND_B64
+define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+ %and = and i64 %a, %b
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FIXME: Should use SGPRs
+; FUNC-LABEL: @s_and_i1
+; SI: V_AND_B32
+define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
+ %and = and i1 %a, %b
+ store i1 %and, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @s_and_constant_i64
+; SI: S_AND_B64
+define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
+ %and = and i64 %a, 281474976710655
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_and_i64
+; SI: V_AND_B32
+; SI: V_AND_B32
+define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
+ %a = load i64 addrspace(1)* %aptr, align 8
+ %b = load i64 addrspace(1)* %bptr, align 8
+ %and = and i64 %a, %b
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_and_constant_i64
+; SI: V_AND_B32
+; SI: V_AND_B32
+define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
+ %a = load i64 addrspace(1)* %aptr, align 8
+ %and = and i64 %a, 1234567
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/anyext.ll b/test/CodeGen/R600/anyext.ll
new file mode 100644
index 000000000000..bbe5d0a393e6
--- /dev/null
+++ b/test/CodeGen/R600/anyext.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+; CHECK-LABEL: @anyext_i1_i32
+; CHECK: V_CNDMASK_B32_e64
+define void @anyext_i1_i32(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp eq i32 %cond, 0
+ %1 = zext i1 %0 to i8
+ %2 = xor i8 %1, -1
+ %3 = and i8 %2, 1
+ %4 = zext i8 %3 to i32
+ store i32 %4, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/array-ptr-calc-i32.ll b/test/CodeGen/R600/array-ptr-calc-i32.ll
new file mode 100644
index 000000000000..a2b697823519
--- /dev/null
+++ b/test/CodeGen/R600/array-ptr-calc-i32.ll
@@ -0,0 +1,44 @@
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI -mattr=+promote-alloca < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
+
+declare i32 @llvm.SI.tid() nounwind readnone
+declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
+
+; The required pointer calculations for the alloca'd actually requires
+; an add and won't be folded into the addressing, which fails with a
+; 64-bit pointer add. This should work since private pointers should
+; be 32-bits.
+
+; SI-LABEL: @test_private_array_ptr_calc:
+
+; FIXME: We end up with zero argument for ADD, because
+; SIRegisterInfo::eliminateFrameIndex() blindly replaces the frame index
+; with the appropriate offset. We should fold this into the store.
+; SI-ALLOCA: V_ADD_I32_e32 [[PTRREG:v[0-9]+]], 0, v{{[0-9]+}}
+; SI-ALLOCA: BUFFER_STORE_DWORD {{v[0-9]+}}, s[{{[0-9]+:[0-9]+}}], [[PTRREG]]
+;
+; FIXME: The AMDGPUPromoteAlloca pass should be able to convert this
+; alloca to a vector. It currently fails because it does not know how
+; to interpret:
+; getelementptr [4 x i32]* %alloca, i32 1, i32 %b
+
+; SI-PROMOTE: V_ADD_I32_e32 [[PTRREG:v[0-9]+]]
+; SI-PROMOTE: DS_WRITE_B32 {{v[0-9]+}}, [[PTRREG]]
+define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
+ %alloca = alloca [4 x i32], i32 4, align 16
+ %tid = call i32 @llvm.SI.tid() readnone
+ %a_ptr = getelementptr i32 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i32 addrspace(1)* %inB, i32 %tid
+ %a = load i32 addrspace(1)* %a_ptr
+ %b = load i32 addrspace(1)* %b_ptr
+ %result = add i32 %a, %b
+ %alloca_ptr = getelementptr [4 x i32]* %alloca, i32 1, i32 %b
+ store i32 %result, i32* %alloca_ptr, align 4
+ ; Dummy call
+ call void @llvm.AMDGPU.barrier.local() nounwind noduplicate
+ %reload = load i32* %alloca_ptr, align 4
+ %out_ptr = getelementptr i32 addrspace(1)* %out, i32 %tid
+ store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/array-ptr-calc-i64.ll b/test/CodeGen/R600/array-ptr-calc-i64.ll
index 652bbfe2a415..e254c5f64637 100644
--- a/test/CodeGen/R600/array-ptr-calc-i64.ll
+++ b/test/CodeGen/R600/array-ptr-calc-i64.ll
@@ -1,5 +1,5 @@
; XFAIL: *
-; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI %s
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI %s
declare i32 @llvm.SI.tid() readnone
diff --git a/test/CodeGen/R600/atomic_cmp_swap_local.ll b/test/CodeGen/R600/atomic_cmp_swap_local.ll
new file mode 100644
index 000000000000..eb9539eec516
--- /dev/null
+++ b/test/CodeGen/R600/atomic_cmp_swap_local.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @lds_atomic_cmpxchg_ret_i32_offset:
+; SI: S_LOAD_DWORD [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: V_MOV_B32_e32 [[VCMP:v[0-9]+]], 7
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI-DAG: V_MOV_B32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
+; SI: DS_CMPST_RTN_B32 [[RESULT:v[0-9]+]], [[VPTR]], [[VCMP]], [[VSWAP]], 0x10, [M0]
+; SI: S_ENDPGM
+define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_cmpxchg_ret_i64_offset:
+; SI: S_LOAD_DWORDX2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: S_MOV_B64 s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7
+; SI-DAG: V_MOV_B32_e32 v[[LOVCMP:[0-9]+]], s[[LOSCMP]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVCMP:[0-9]+]], s[[HISCMP]]
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI-DAG: V_MOV_B32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]]
+; SI-DAG: V_MOV_B32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
+; SI: DS_CMPST_RTN_B64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}}, 0x20, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr, i64 %swap) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
+ %result = extractvalue { i64, i1 } %pair, 0
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/atomic_load_add.ll b/test/CodeGen/R600/atomic_load_add.ll
index 0bc48a3590b2..c26f9cd80eaf 100644
--- a/test/CodeGen/R600/atomic_load_add.ll
+++ b/test/CodeGen/R600/atomic_load_add.ll
@@ -1,23 +1,38 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; R600-CHECK-LABEL: @atomic_add_local
-; R600-CHECK: LDS_ADD *
-; SI-CHECK-LABEL: @atomic_add_local
-; SI-CHECK: DS_ADD_U32_RTN 0
+; FUNC-LABEL: @atomic_add_local
+; R600: LDS_ADD *
+; SI: DS_ADD_RTN_U32
define void @atomic_add_local(i32 addrspace(3)* %local) {
-entry:
- %0 = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
+ %unused = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
ret void
}
-; R600-CHECK-LABEL: @atomic_add_ret_local
-; R600-CHECK: LDS_ADD_RET *
-; SI-CHECK-LABEL: @atomic_add_ret_local
-; SI-CHECK: DS_ADD_U32_RTN 0
+; FUNC-LABEL: @atomic_add_local_const_offset
+; R600: LDS_ADD *
+; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
+ %gep = getelementptr i32 addrspace(3)* %local, i32 4
+ %val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
+ ret void
+}
+
+; FUNC-LABEL: @atomic_add_ret_local
+; R600: LDS_ADD_RET *
+; SI: DS_ADD_RTN_U32
define void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
-entry:
- %0 = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
- store i32 %0, i32 addrspace(1)* %out
+ %val = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @atomic_add_ret_local_const_offset
+; R600: LDS_ADD_RET *
+; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x14
+define void @atomic_add_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
+ %gep = getelementptr i32 addrspace(3)* %local, i32 5
+ %val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
+ store i32 %val, i32 addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/R600/atomic_load_sub.ll b/test/CodeGen/R600/atomic_load_sub.ll
index e4a682932c82..3569d91e08dc 100644
--- a/test/CodeGen/R600/atomic_load_sub.ll
+++ b/test/CodeGen/R600/atomic_load_sub.ll
@@ -1,23 +1,38 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; R600-CHECK-LABEL: @atomic_sub_local
-; R600-CHECK: LDS_SUB *
-; SI-CHECK-LABEL: @atomic_sub_local
-; SI-CHECK: DS_SUB_U32_RTN 0
+; FUNC-LABEL: @atomic_sub_local
+; R600: LDS_SUB *
+; SI: DS_SUB_RTN_U32
define void @atomic_sub_local(i32 addrspace(3)* %local) {
-entry:
- %0 = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
+ %unused = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
ret void
}
-; R600-CHECK-LABEL: @atomic_sub_ret_local
-; R600-CHECK: LDS_SUB_RET *
-; SI-CHECK-LABEL: @atomic_sub_ret_local
-; SI-CHECK: DS_SUB_U32_RTN 0
+; FUNC-LABEL: @atomic_sub_local_const_offset
+; R600: LDS_SUB *
+; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
+ %gep = getelementptr i32 addrspace(3)* %local, i32 4
+ %val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
+ ret void
+}
+
+; FUNC-LABEL: @atomic_sub_ret_local
+; R600: LDS_SUB_RET *
+; SI: DS_SUB_RTN_U32
define void @atomic_sub_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
-entry:
- %0 = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
- store i32 %0, i32 addrspace(1)* %out
+ %val = atomicrmw volatile sub i32 addrspace(3)* %local, i32 5 seq_cst
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @atomic_sub_ret_local_const_offset
+; R600: LDS_SUB_RET *
+; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x14
+define void @atomic_sub_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
+ %gep = getelementptr i32 addrspace(3)* %local, i32 5
+ %val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
+ store i32 %val, i32 addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/R600/basic-branch.ll b/test/CodeGen/R600/basic-branch.ll
new file mode 100644
index 000000000000..d084132d4fcc
--- /dev/null
+++ b/test/CodeGen/R600/basic-branch.ll
@@ -0,0 +1,15 @@
+; XFAIL: *
+; RUN: llc -O0 -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK-LABEL: @test_branch(
+define void @test_branch(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
+ %cmp = icmp ne i32 %val, 0
+ br i1 %cmp, label %store, label %end
+
+store:
+ store i32 222, i32 addrspace(1)* %out
+ ret void
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/R600/basic-loop.ll b/test/CodeGen/R600/basic-loop.ll
new file mode 100644
index 000000000000..6d0ff0743b85
--- /dev/null
+++ b/test/CodeGen/R600/basic-loop.ll
@@ -0,0 +1,18 @@
+; XFAIL: *
+; RUN: llc -O0 -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s
+
+; CHECK-LABEL: @test_loop:
+define void @test_loop(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
+entry:
+ br label %loop.body
+
+loop.body:
+ %i = phi i32 [0, %entry], [%i.inc, %loop.body]
+ store i32 222, i32 addrspace(1)* %out
+ %cmp = icmp ne i32 %i, %val
+ %i.inc = add i32 %i, 1
+ br i1 %cmp, label %loop.body, label %end
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/R600/bfi_int.ll b/test/CodeGen/R600/bfi_int.ll
index bbfe856fc930..d18702a1de98 100644
--- a/test/CodeGen/R600/bfi_int.ll
+++ b/test/CodeGen/R600/bfi_int.ll
@@ -38,7 +38,7 @@ entry:
; R600-CHECK: @bfi_sha256_ma
; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], KC0[2].Z, KC0[2].W
; R600-CHECK: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
-; SI-CHECK: V_XOR_B32_e64 [[DST:v[0-9]+]], {{[sv][0-9]+, v[0-9]+}}
+; SI-CHECK: V_XOR_B32_e32 [[DST:v[0-9]+]], {{[sv][0-9]+, v[0-9]+}}
; SI-CHECK: V_BFI_B32 {{v[0-9]+}}, [[DST]], {{[sv][0-9]+, [sv][0-9]+}}
define void @bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
diff --git a/test/CodeGen/R600/big_alu.ll b/test/CodeGen/R600/big_alu.ll
index 6b683769fe06..511e8ef62951 100644
--- a/test/CodeGen/R600/big_alu.ll
+++ b/test/CodeGen/R600/big_alu.ll
@@ -101,7 +101,7 @@ IF137: ; preds = %main_body
%88 = insertelement <4 x float> %87, float %32, i32 2
%89 = insertelement <4 x float> %88, float 0.000000e+00, i32 3
%90 = call float @llvm.AMDGPU.dp4(<4 x float> %85, <4 x float> %89)
- %91 = call float @llvm.AMDGPU.rsq(float %90)
+ %91 = call float @llvm.AMDGPU.rsq.f32(float %90)
%92 = fmul float %30, %91
%93 = fmul float %31, %91
%94 = fmul float %32, %91
@@ -344,7 +344,7 @@ ENDIF136: ; preds = %main_body, %ENDIF15
%325 = insertelement <4 x float> %324, float %318, i32 2
%326 = insertelement <4 x float> %325, float 0.000000e+00, i32 3
%327 = call float @llvm.AMDGPU.dp4(<4 x float> %322, <4 x float> %326)
- %328 = call float @llvm.AMDGPU.rsq(float %327)
+ %328 = call float @llvm.AMDGPU.rsq.f32(float %327)
%329 = fmul float %314, %328
%330 = fmul float %316, %328
%331 = fmul float %318, %328
@@ -377,7 +377,7 @@ ENDIF136: ; preds = %main_body, %ENDIF15
%358 = insertelement <4 x float> %357, float %45, i32 2
%359 = insertelement <4 x float> %358, float 0.000000e+00, i32 3
%360 = call float @llvm.AMDGPU.dp4(<4 x float> %355, <4 x float> %359)
- %361 = call float @llvm.AMDGPU.rsq(float %360)
+ %361 = call float @llvm.AMDGPU.rsq.f32(float %360)
%362 = fmul float %45, %361
%363 = call float @fabs(float %362)
%364 = fmul float %176, 0x3FECCCCCC0000000
@@ -403,7 +403,7 @@ ENDIF136: ; preds = %main_body, %ENDIF15
%384 = insertelement <4 x float> %383, float %45, i32 2
%385 = insertelement <4 x float> %384, float 0.000000e+00, i32 3
%386 = call float @llvm.AMDGPU.dp4(<4 x float> %381, <4 x float> %385)
- %387 = call float @llvm.AMDGPU.rsq(float %386)
+ %387 = call float @llvm.AMDGPU.rsq.f32(float %386)
%388 = fmul float %45, %387
%389 = call float @fabs(float %388)
%390 = fmul float %176, 0x3FF51EB860000000
@@ -1041,7 +1041,7 @@ IF179: ; preds = %ENDIF175
%896 = insertelement <4 x float> %895, float %45, i32 2
%897 = insertelement <4 x float> %896, float 0.000000e+00, i32 3
%898 = call float @llvm.AMDGPU.dp4(<4 x float> %893, <4 x float> %897)
- %899 = call float @llvm.AMDGPU.rsq(float %898)
+ %899 = call float @llvm.AMDGPU.rsq.f32(float %898)
%900 = fmul float %45, %899
%901 = call float @fabs(float %900)
%902 = fmul float %176, 0x3FECCCCCC0000000
@@ -1150,7 +1150,7 @@ ENDIF178: ; preds = %ENDIF175, %IF179
declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) #1
; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #1
+declare float @llvm.AMDGPU.rsq.f32(float) #1
; Function Attrs: readnone
declare <4 x float> @llvm.AMDGPU.tex(<4 x float>, i32, i32, i32) #1
diff --git a/test/CodeGen/R600/bitcast.ll b/test/CodeGen/R600/bitcast.ll
index bccc41638570..0be79e658f5c 100644
--- a/test/CodeGen/R600/bitcast.ll
+++ b/test/CodeGen/R600/bitcast.ll
@@ -1,9 +1,11 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; This test just checks that the compiler doesn't crash.
-; CHECK-LABEL: @v32i8_to_v8i32
-; CHECK: S_ENDPGM
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+; FUNC-LABEL: @v32i8_to_v8i32
+; SI: S_ENDPGM
define void @v32i8_to_v8i32(<32 x i8> addrspace(2)* inreg) #0 {
entry:
%1 = load <32 x i8> addrspace(2)* %0
@@ -15,7 +17,62 @@ entry:
ret void
}
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+; FUNC-LABEL: @i8ptr_v16i8ptr
+; SI: S_ENDPGM
+define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
+entry:
+ %0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)*
+ %1 = load <16 x i8> addrspace(1)* %0
+ store <16 x i8> %1, <16 x i8> addrspace(1)* %out
+ ret void
+}
-attributes #0 = { "ShaderType"="0" }
+define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %load = load float addrspace(1)* %in, align 4
+ %bc = bitcast float %load to <2 x i16>
+ store <2 x i16> %bc, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
+ %load = load <2 x i16> addrspace(1)* %in, align 4
+ %bc = bitcast <2 x i16> %load to float
+ store float %bc, float addrspace(1)* %out, align 4
+ ret void
+}
+define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 4
+ %bc = bitcast <4 x i8> %load to i32
+ store i32 %bc, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %bc = bitcast i32 %load to <4 x i8>
+ store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bitcast_v2i32_to_f64
+; SI: S_ENDPGM
+define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %val = load <2 x i32> addrspace(1)* %in, align 8
+ %add = add <2 x i32> %val, <i32 4, i32 9>
+ %bc = bitcast <2 x i32> %add to double
+ store double %bc, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @bitcast_f64_to_v2i32
+; SI: S_ENDPGM
+define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
+ %val = load double addrspace(1)* %in, align 8
+ %add = fadd double %val, 4.0
+ %bc = bitcast double %add to <2 x i32>
+ store <2 x i32> %bc, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/bswap.ll b/test/CodeGen/R600/bswap.ll
new file mode 100644
index 000000000000..6aebe851366c
--- /dev/null
+++ b/test/CodeGen/R600/bswap.ll
@@ -0,0 +1,50 @@
+; RUN: llc -march=r600 -mcpu=SI < %s
+
+declare i32 @llvm.bswap.i32(i32) nounwind readnone
+declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) nounwind readnone
+declare i64 @llvm.bswap.i64(i64) nounwind readnone
+declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) nounwind readnone
+declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>) nounwind readnone
+
+define void @test_bswap_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %bswap = call i32 @llvm.bswap.i32(i32 %val) nounwind readnone
+ store i32 %bswap, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @test_bswap_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) nounwind {
+ %val = load <2 x i32> addrspace(1)* %in, align 8
+ %bswap = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %val) nounwind readnone
+ store <2 x i32> %bswap, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+define void @test_bswap_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) nounwind {
+ %val = load <4 x i32> addrspace(1)* %in, align 16
+ %bswap = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %val) nounwind readnone
+ store <4 x i32> %bswap, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+define void @test_bswap_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+ %val = load i64 addrspace(1)* %in, align 8
+ %bswap = call i64 @llvm.bswap.i64(i64 %val) nounwind readnone
+ store i64 %bswap, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+define void @test_bswap_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) nounwind {
+ %val = load <2 x i64> addrspace(1)* %in, align 16
+ %bswap = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %val) nounwind readnone
+ store <2 x i64> %bswap, <2 x i64> addrspace(1)* %out, align 16
+ ret void
+}
+
+define void @test_bswap_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) nounwind {
+ %val = load <4 x i64> addrspace(1)* %in, align 32
+ %bswap = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %val) nounwind readnone
+ store <4 x i64> %bswap, <4 x i64> addrspace(1)* %out, align 32
+ ret void
+}
diff --git a/test/CodeGen/R600/call.ll b/test/CodeGen/R600/call.ll
new file mode 100644
index 000000000000..d80347490b39
--- /dev/null
+++ b/test/CodeGen/R600/call.ll
@@ -0,0 +1,33 @@
+; RUN: not llc -march=r600 -mcpu=SI -verify-machineinstrs< %s 2>&1 | FileCheck %s
+; RUN: not llc -march=r600 -mcpu=cypress < %s 2>&1 | FileCheck %s
+
+; CHECK: error: unsupported call to function defined_function in test_call
+
+
+declare i32 @external_function(i32) nounwind
+
+define i32 @defined_function(i32 %x) nounwind noinline {
+ %y = add i32 %x, 8
+ ret i32 %y
+}
+
+define void @test_call(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %a = load i32 addrspace(1)* %in
+ %b = load i32 addrspace(1)* %b_ptr
+ %c = call i32 @defined_function(i32 %b) nounwind
+ %result = add i32 %a, %c
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @test_call_external(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %a = load i32 addrspace(1)* %in
+ %b = load i32 addrspace(1)* %b_ptr
+ %c = call i32 @external_function(i32 %b) nounwind
+ %result = add i32 %a, %c
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
diff --git a/test/CodeGen/R600/cayman-loop-bug.ll b/test/CodeGen/R600/cayman-loop-bug.ll
new file mode 100644
index 000000000000..a87352895eb3
--- /dev/null
+++ b/test/CodeGen/R600/cayman-loop-bug.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s
+
+; CHECK-LABEL: @main
+; CHECK: LOOP_START_DX10
+; CHECK: ALU_PUSH_BEFORE
+; CHECK: LOOP_START_DX10
+; CHECK: PUSH
+; CHECK-NOT: ALU_PUSH_BEFORE
+; CHECK: END_LOOP
+; CHECK: END_LOOP
+define void @main (<4 x float> inreg %reg0) #0 {
+entry:
+ br label %outer_loop
+outer_loop:
+ %cnt = phi i32 [0, %entry], [%cnt_incr, %inner_loop]
+ %cond = icmp eq i32 %cnt, 16
+ br i1 %cond, label %outer_loop_body, label %exit
+outer_loop_body:
+ %cnt_incr = add i32 %cnt, 1
+ br label %inner_loop
+inner_loop:
+ %cnt2 = phi i32 [0, %outer_loop_body], [%cnt2_incr, %inner_loop_body]
+ %cond2 = icmp eq i32 %cnt2, 16
+ br i1 %cond, label %inner_loop_body, label %outer_loop
+inner_loop_body:
+ %cnt2_incr = add i32 %cnt2, 1
+ br label %inner_loop
+exit:
+ ret void
+}
+
+attributes #0 = { "ShaderType"="0" } \ No newline at end of file
diff --git a/test/CodeGen/R600/cf-stack-bug.ll b/test/CodeGen/R600/cf-stack-bug.ll
new file mode 100644
index 000000000000..c3a4612e6ac9
--- /dev/null
+++ b/test/CodeGen/R600/cf-stack-bug.ll
@@ -0,0 +1,227 @@
+; RUN: llc -march=r600 -mcpu=redwood -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=sumo -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=barts -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=turks -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=caicos -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG64 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=cedar -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=BUG32 --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=juniper -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=NOBUG --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=cypress -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=NOBUG --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=cayman -debug-only=r600cf %s -o - 2>&1 | FileCheck %s --check-prefix=NOBUG --check-prefix=FUNC
+
+; REQUIRES: asserts
+
+; We are currently allocating 2 extra sub-entries on Evergreen / NI for
+; non-WQM push instructions if we change this to 1, then we will need to
+; add one level of depth to each of these tests.
+
+; BUG64-NOT: Applying bug work-around
+; BUG32-NOT: Applying bug work-around
+; NOBUG-NOT: Applying bug work-around
+; FUNC-LABEL: @nested3
+define void @nested3(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp sgt i32 %cond, 0
+ br i1 %0, label %if.1, label %end
+
+if.1:
+ %1 = icmp sgt i32 %cond, 10
+ br i1 %1, label %if.2, label %if.store.1
+
+if.store.1:
+ store i32 1, i32 addrspace(1)* %out
+ br label %end
+
+if.2:
+ %2 = icmp sgt i32 %cond, 20
+ br i1 %2, label %if.3, label %if.2.store
+
+if.2.store:
+ store i32 2, i32 addrspace(1)* %out
+ br label %end
+
+if.3:
+ store i32 3, i32 addrspace(1)* %out
+ br label %end
+
+end:
+ ret void
+}
+
+; BUG64: Applying bug work-around
+; BUG32-NOT: Applying bug work-around
+; NOBUG-NOT: Applying bug work-around
+; FUNC-LABEL: @nested4
+define void @nested4(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp sgt i32 %cond, 0
+ br i1 %0, label %if.1, label %end
+
+if.1:
+ %1 = icmp sgt i32 %cond, 10
+ br i1 %1, label %if.2, label %if.1.store
+
+if.1.store:
+ store i32 1, i32 addrspace(1)* %out
+ br label %end
+
+if.2:
+ %2 = icmp sgt i32 %cond, 20
+ br i1 %2, label %if.3, label %if.2.store
+
+if.2.store:
+ store i32 2, i32 addrspace(1)* %out
+ br label %end
+
+if.3:
+ %3 = icmp sgt i32 %cond, 30
+ br i1 %3, label %if.4, label %if.3.store
+
+if.3.store:
+ store i32 3, i32 addrspace(1)* %out
+ br label %end
+
+if.4:
+ store i32 4, i32 addrspace(1)* %out
+ br label %end
+
+end:
+ ret void
+}
+
+; BUG64: Applying bug work-around
+; BUG32-NOT: Applying bug work-around
+; NOBUG-NOT: Applying bug work-around
+; FUNC-LABEL: @nested7
+define void @nested7(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp sgt i32 %cond, 0
+ br i1 %0, label %if.1, label %end
+
+if.1:
+ %1 = icmp sgt i32 %cond, 10
+ br i1 %1, label %if.2, label %if.1.store
+
+if.1.store:
+ store i32 1, i32 addrspace(1)* %out
+ br label %end
+
+if.2:
+ %2 = icmp sgt i32 %cond, 20
+ br i1 %2, label %if.3, label %if.2.store
+
+if.2.store:
+ store i32 2, i32 addrspace(1)* %out
+ br label %end
+
+if.3:
+ %3 = icmp sgt i32 %cond, 30
+ br i1 %3, label %if.4, label %if.3.store
+
+if.3.store:
+ store i32 3, i32 addrspace(1)* %out
+ br label %end
+
+if.4:
+ %4 = icmp sgt i32 %cond, 40
+ br i1 %4, label %if.5, label %if.4.store
+
+if.4.store:
+ store i32 4, i32 addrspace(1)* %out
+ br label %end
+
+if.5:
+ %5 = icmp sgt i32 %cond, 50
+ br i1 %5, label %if.6, label %if.5.store
+
+if.5.store:
+ store i32 5, i32 addrspace(1)* %out
+ br label %end
+
+if.6:
+ %6 = icmp sgt i32 %cond, 60
+ br i1 %6, label %if.7, label %if.6.store
+
+if.6.store:
+ store i32 6, i32 addrspace(1)* %out
+ br label %end
+
+if.7:
+ store i32 7, i32 addrspace(1)* %out
+ br label %end
+
+end:
+ ret void
+}
+
+; BUG64: Applying bug work-around
+; BUG32: Applying bug work-around
+; NOBUG-NOT: Applying bug work-around
+; FUNC-LABEL: @nested8
+define void @nested8(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %0 = icmp sgt i32 %cond, 0
+ br i1 %0, label %if.1, label %end
+
+if.1:
+ %1 = icmp sgt i32 %cond, 10
+ br i1 %1, label %if.2, label %if.1.store
+
+if.1.store:
+ store i32 1, i32 addrspace(1)* %out
+ br label %end
+
+if.2:
+ %2 = icmp sgt i32 %cond, 20
+ br i1 %2, label %if.3, label %if.2.store
+
+if.2.store:
+ store i32 2, i32 addrspace(1)* %out
+ br label %end
+
+if.3:
+ %3 = icmp sgt i32 %cond, 30
+ br i1 %3, label %if.4, label %if.3.store
+
+if.3.store:
+ store i32 3, i32 addrspace(1)* %out
+ br label %end
+
+if.4:
+ %4 = icmp sgt i32 %cond, 40
+ br i1 %4, label %if.5, label %if.4.store
+
+if.4.store:
+ store i32 4, i32 addrspace(1)* %out
+ br label %end
+
+if.5:
+ %5 = icmp sgt i32 %cond, 50
+ br i1 %5, label %if.6, label %if.5.store
+
+if.5.store:
+ store i32 5, i32 addrspace(1)* %out
+ br label %end
+
+if.6:
+ %6 = icmp sgt i32 %cond, 60
+ br i1 %6, label %if.7, label %if.6.store
+
+if.6.store:
+ store i32 6, i32 addrspace(1)* %out
+ br label %end
+
+if.7:
+ %7 = icmp sgt i32 %cond, 70
+ br i1 %7, label %if.8, label %if.7.store
+
+if.7.store:
+ store i32 7, i32 addrspace(1)* %out
+ br label %end
+
+if.8:
+ store i32 8, i32 addrspace(1)* %out
+ br label %end
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
new file mode 100644
index 000000000000..f8b4a61a7db5
--- /dev/null
+++ b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
@@ -0,0 +1,19 @@
+; RUN: opt -codegenprepare -S -o - %s | FileCheck --check-prefix=OPT --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-LLC --check-prefix=FUNC %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "r600--"
+
+; FUNC-LABEL: @test
+; OPT: mul nsw i32
+; OPT-NEXT: sext
+; SI-LLC: V_MUL_LO_I32
+; SI-LLC-NOT: V_MUL_HI
+define void @test(i8 addrspace(1)* nocapture readonly %in, i32 %a, i8 %b) {
+entry:
+ %0 = mul nsw i32 %a, 3
+ %1 = sext i32 %0 to i64
+ %2 = getelementptr i8 addrspace(1)* %in, i64 %1
+ store i8 %b, i8 addrspace(1)* %2
+ ret void
+}
diff --git a/test/CodeGen/R600/concat_vectors.ll b/test/CodeGen/R600/concat_vectors.ll
new file mode 100644
index 000000000000..9abc5a627c1c
--- /dev/null
+++ b/test/CodeGen/R600/concat_vectors.ll
@@ -0,0 +1,249 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @test_concat_v1i32
+; SI-NOT: MOVREL
+define void @test_concat_v1i32(<2 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
+ %concat = shufflevector <1 x i32> %a, <1 x i32> %b, <2 x i32> <i32 0, i32 1>
+ store <2 x i32> %concat, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v2i32
+; SI-NOT: MOVREL
+define void @test_concat_v2i32(<4 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %concat = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i32> %concat, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v4i32
+; SI-NOT: MOVREL
+define void @test_concat_v4i32(<8 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
+ %concat = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x i32> %concat, <8 x i32> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v8i32
+; SI-NOT: MOVREL
+define void @test_concat_v8i32(<16 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) nounwind {
+ %concat = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <16 x i32> %concat, <16 x i32> addrspace(1)* %out, align 64
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v16i32
+; SI-NOT: MOVREL
+define void @test_concat_v16i32(<32 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) nounwind {
+ %concat = shufflevector <16 x i32> %a, <16 x i32> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ store <32 x i32> %concat, <32 x i32> addrspace(1)* %out, align 128
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v1f32
+; SI-NOT: MOVREL
+define void @test_concat_v1f32(<2 x float> addrspace(1)* %out, <1 x float> %a, <1 x float> %b) nounwind {
+ %concat = shufflevector <1 x float> %a, <1 x float> %b, <2 x i32> <i32 0, i32 1>
+ store <2 x float> %concat, <2 x float> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v2f32
+; SI-NOT: MOVREL
+define void @test_concat_v2f32(<4 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) nounwind {
+ %concat = shufflevector <2 x float> %a, <2 x float> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x float> %concat, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v4f32
+; SI-NOT: MOVREL
+define void @test_concat_v4f32(<8 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) nounwind {
+ %concat = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x float> %concat, <8 x float> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v8f32
+; SI-NOT: MOVREL
+define void @test_concat_v8f32(<16 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) nounwind {
+ %concat = shufflevector <8 x float> %a, <8 x float> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <16 x float> %concat, <16 x float> addrspace(1)* %out, align 64
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v16f32
+; SI-NOT: MOVREL
+define void @test_concat_v16f32(<32 x float> addrspace(1)* %out, <16 x float> %a, <16 x float> %b) nounwind {
+ %concat = shufflevector <16 x float> %a, <16 x float> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ store <32 x float> %concat, <32 x float> addrspace(1)* %out, align 128
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v1i64
+; SI-NOT: MOVREL
+define void @test_concat_v1i64(<2 x double> addrspace(1)* %out, <1 x double> %a, <1 x double> %b) nounwind {
+ %concat = shufflevector <1 x double> %a, <1 x double> %b, <2 x i32> <i32 0, i32 1>
+ store <2 x double> %concat, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v2i64
+; SI-NOT: MOVREL
+define void @test_concat_v2i64(<4 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
+ %concat = shufflevector <2 x double> %a, <2 x double> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x double> %concat, <4 x double> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v4i64
+; SI-NOT: MOVREL
+define void @test_concat_v4i64(<8 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
+ %concat = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x double> %concat, <8 x double> addrspace(1)* %out, align 64
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v8i64
+; SI-NOT: MOVREL
+define void @test_concat_v8i64(<16 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
+ %concat = shufflevector <8 x double> %a, <8 x double> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <16 x double> %concat, <16 x double> addrspace(1)* %out, align 128
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v16i64
+; SI-NOT: MOVREL
+define void @test_concat_v16i64(<32 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
+ %concat = shufflevector <16 x double> %a, <16 x double> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ store <32 x double> %concat, <32 x double> addrspace(1)* %out, align 256
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v1f64
+; SI-NOT: MOVREL
+define void @test_concat_v1f64(<2 x double> addrspace(1)* %out, <1 x double> %a, <1 x double> %b) nounwind {
+ %concat = shufflevector <1 x double> %a, <1 x double> %b, <2 x i32> <i32 0, i32 1>
+ store <2 x double> %concat, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v2f64
+; SI-NOT: MOVREL
+define void @test_concat_v2f64(<4 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) nounwind {
+ %concat = shufflevector <2 x double> %a, <2 x double> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x double> %concat, <4 x double> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v4f64
+; SI-NOT: MOVREL
+define void @test_concat_v4f64(<8 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) nounwind {
+ %concat = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x double> %concat, <8 x double> addrspace(1)* %out, align 64
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v8f64
+; SI-NOT: MOVREL
+define void @test_concat_v8f64(<16 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b) nounwind {
+ %concat = shufflevector <8 x double> %a, <8 x double> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <16 x double> %concat, <16 x double> addrspace(1)* %out, align 128
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v16f64
+; SI-NOT: MOVREL
+define void @test_concat_v16f64(<32 x double> addrspace(1)* %out, <16 x double> %a, <16 x double> %b) nounwind {
+ %concat = shufflevector <16 x double> %a, <16 x double> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ store <32 x double> %concat, <32 x double> addrspace(1)* %out, align 256
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v1i1
+; SI-NOT: MOVREL
+define void @test_concat_v1i1(<2 x i1> addrspace(1)* %out, <1 x i1> %a, <1 x i1> %b) nounwind {
+ %concat = shufflevector <1 x i1> %a, <1 x i1> %b, <2 x i32> <i32 0, i32 1>
+ store <2 x i1> %concat, <2 x i1> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v2i1
+; SI-NOT: MOVREL
+define void @test_concat_v2i1(<4 x i1> addrspace(1)* %out, <2 x i1> %a, <2 x i1> %b) nounwind {
+ %concat = shufflevector <2 x i1> %a, <2 x i1> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i1> %concat, <4 x i1> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v4i1
+; SI-NOT: MOVREL
+define void @test_concat_v4i1(<8 x i1> addrspace(1)* %out, <4 x i1> %a, <4 x i1> %b) nounwind {
+ %concat = shufflevector <4 x i1> %a, <4 x i1> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x i1> %concat, <8 x i1> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v8i1
+; SI-NOT: MOVREL
+define void @test_concat_v8i1(<16 x i1> addrspace(1)* %out, <8 x i1> %a, <8 x i1> %b) nounwind {
+ %concat = shufflevector <8 x i1> %a, <8 x i1> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <16 x i1> %concat, <16 x i1> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v16i1
+; SI-NOT: MOVREL
+define void @test_concat_v16i1(<32 x i1> addrspace(1)* %out, <16 x i1> %a, <16 x i1> %b) nounwind {
+ %concat = shufflevector <16 x i1> %a, <16 x i1> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ store <32 x i1> %concat, <32 x i1> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v32i1
+; SI-NOT: MOVREL
+define void @test_concat_v32i1(<64 x i1> addrspace(1)* %out, <32 x i1> %a, <32 x i1> %b) nounwind {
+ %concat = shufflevector <32 x i1> %a, <32 x i1> %b, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ store <64 x i1> %concat, <64 x i1> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v1i16
+; SI-NOT: MOVREL
+define void @test_concat_v1i16(<2 x i16> addrspace(1)* %out, <1 x i16> %a, <1 x i16> %b) nounwind {
+ %concat = shufflevector <1 x i16> %a, <1 x i16> %b, <2 x i32> <i32 0, i32 1>
+ store <2 x i16> %concat, <2 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v2i16
+; SI-NOT: MOVREL
+define void @test_concat_v2i16(<4 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) nounwind {
+ %concat = shufflevector <2 x i16> %a, <2 x i16> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i16> %concat, <4 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v4i16
+; SI-NOT: MOVREL
+define void @test_concat_v4i16(<8 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b) nounwind {
+ %concat = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x i16> %concat, <8 x i16> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v8i16
+; SI-NOT: MOVREL
+define void @test_concat_v8i16(<16 x i16> addrspace(1)* %out, <8 x i16> %a, <8 x i16> %b) nounwind {
+ %concat = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <16 x i16> %concat, <16 x i16> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @test_concat_v16i16
+; SI-NOT: MOVREL
+define void @test_concat_v16i16(<32 x i16> addrspace(1)* %out, <16 x i16> %a, <16 x i16> %b) nounwind {
+ %concat = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ store <32 x i16> %concat, <32 x i16> addrspace(1)* %out, align 64
+ ret void
+}
diff --git a/test/CodeGen/R600/copy-illegal-type.ll b/test/CodeGen/R600/copy-illegal-type.ll
new file mode 100644
index 000000000000..f7c2321ae8fe
--- /dev/null
+++ b/test/CodeGen/R600/copy-illegal-type.ll
@@ -0,0 +1,166 @@
+; RUN: llc -march=r600 -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @test_copy_v4i8
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x2
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x3
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x4
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out3, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_extra_use
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+
+; After scalarizing v4i8 loads is fixed.
+; XSI: BUFFER_LOAD_DWORD
+; XSI: V_BFE
+; XSI: V_ADD
+; XSI: V_ADD
+; XSI: V_ADD
+; XSI: BUFFER_STORE_DWORD
+; XSI: BUFFER_STORE_DWORD
+
+; SI: S_ENDPGM
+define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x2_extra_use
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+
+; XSI: BUFFER_LOAD_DWORD
+; XSI: BFE
+; XSI: BUFFER_STORE_DWORD
+; XSI: V_ADD
+; XSI: BUFFER_STORE_DWORD
+; XSI-NEXT: BUFFER_STORE_DWORD
+
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v3i8
+; SI-NOT: BFE
+; SI-NOT: BFI
+; SI: S_ENDPGM
+define void @test_copy_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
+ %val = load <3 x i8> addrspace(1)* %in, align 4
+ store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_volatile_load
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: S_ENDPGM
+define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load volatile <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_volatile_store
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: S_ENDPGM
+define void @test_copy_v4i8_volatile_store(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store volatile <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/ctlz_zero_undef.ll b/test/CodeGen/R600/ctlz_zero_undef.ll
new file mode 100644
index 000000000000..1340ef98c605
--- /dev/null
+++ b/test/CodeGen/R600/ctlz_zero_undef.ll
@@ -0,0 +1,70 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
+declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
+
+; FUNC-LABEL: @s_ctlz_zero_undef_i32:
+; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
+; SI: S_FLBIT_I32_B32 [[SRESULT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
+; EG: FFBH_UINT {{\*? *}}[[RESULT]]
+define void @s_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+ %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
+ store i32 %ctlz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctlz_zero_undef_i32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_FFBH_U32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
+; EG: FFBH_UINT {{\*? *}}[[RESULT]]
+define void @v_ctlz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %val = load i32 addrspace(1)* %valptr, align 4
+ %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 true) nounwind readnone
+ store i32 %ctlz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctlz_zero_undef_v2i32:
+; SI: BUFFER_LOAD_DWORDX2
+; SI: V_FFBH_U32_e32
+; SI: V_FFBH_U32_e32
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
+; EG: FFBH_UINT {{\*? *}}[[RESULT]]
+; EG: FFBH_UINT {{\*? *}}[[RESULT]]
+define void @v_ctlz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
+ %val = load <2 x i32> addrspace(1)* %valptr, align 8
+ %ctlz = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
+ store <2 x i32> %ctlz, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_ctlz_zero_undef_v4i32:
+; SI: BUFFER_LOAD_DWORDX4
+; SI: V_FFBH_U32_e32
+; SI: V_FFBH_U32_e32
+; SI: V_FFBH_U32_e32
+; SI: V_FFBH_U32_e32
+; SI: BUFFER_STORE_DWORDX4
+; SI: S_ENDPGM
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
+; EG: FFBH_UINT {{\*? *}}[[RESULT]]
+; EG: FFBH_UINT {{\*? *}}[[RESULT]]
+; EG: FFBH_UINT {{\*? *}}[[RESULT]]
+; EG: FFBH_UINT {{\*? *}}[[RESULT]]
+define void @v_ctlz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
+ %val = load <4 x i32> addrspace(1)* %valptr, align 16
+ %ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
+ store <4 x i32> %ctlz, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/ctpop.ll b/test/CodeGen/R600/ctpop.ll
new file mode 100644
index 000000000000..22a3022145f1
--- /dev/null
+++ b/test/CodeGen/R600/ctpop.ll
@@ -0,0 +1,284 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.ctpop.i32(i32) nounwind readnone
+declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readnone
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) nounwind readnone
+declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>) nounwind readnone
+
+; FUNC-LABEL: @s_ctpop_i32:
+; SI: S_LOAD_DWORD [[SVAL:s[0-9]+]],
+; SI: S_BCNT1_I32_B32 [[SRESULT:s[0-9]+]], [[SVAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ store i32 %ctpop, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; XXX - Why 0 in register?
+; FUNC-LABEL: @v_ctpop_i32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
+; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VZERO]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ store i32 %ctpop, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_add_chain_i32
+; SI: BUFFER_LOAD_DWORD [[VAL0:v[0-9]+]],
+; SI: BUFFER_LOAD_DWORD [[VAL1:v[0-9]+]],
+; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
+; SI: V_BCNT_U32_B32_e32 [[MIDRESULT:v[0-9]+]], [[VAL1]], [[VZERO]]
+; SI-NOT: ADD
+; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1) nounwind {
+ %val0 = load i32 addrspace(1)* %in0, align 4
+ %val1 = load i32 addrspace(1)* %in1, align 4
+ %ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
+ %ctpop1 = call i32 @llvm.ctpop.i32(i32 %val1) nounwind readnone
+ %add = add i32 %ctpop0, %ctpop1
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v2i32:
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %in) nounwind {
+ %val = load <2 x i32> addrspace(1)* %in, align 8
+ %ctpop = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %val) nounwind readnone
+ store <2 x i32> %ctpop, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v4i32:
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %in) nounwind {
+ %val = load <4 x i32> addrspace(1)* %in, align 16
+ %ctpop = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %val) nounwind readnone
+ store <4 x i32> %ctpop, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v8i32:
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrspace(1)* noalias %in) nounwind {
+ %val = load <8 x i32> addrspace(1)* %in, align 32
+ %ctpop = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %val) nounwind readnone
+ store <8 x i32> %ctpop, <8 x i32> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v16i32:
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: V_BCNT_U32_B32_e32
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> addrspace(1)* noalias %in) nounwind {
+ %val = load <16 x i32> addrspace(1)* %in, align 32
+ %ctpop = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %val) nounwind readnone
+ store <16 x i32> %ctpop, <16 x i32> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_inline_constant:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %ctpop, 4
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_inline_constant_inv:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 4, %ctpop
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_literal:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_MOV_B32_e32 [[LIT:v[0-9]+]], 0x1869f
+; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %ctpop, 99999
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_var:
+; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI-DAG: S_LOAD_DWORD [[VAR:s[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %ctpop, %const
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_var_inv:
+; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI-DAG: S_LOAD_DWORD [[VAR:s[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %add = add i32 %const, %ctpop
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i32_add_vvar_inv
+; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]], {{.*}} + 0x0
+; SI-DAG: BUFFER_LOAD_DWORD [[VAR:v[0-9]+]], {{.*}} + 0x10
+; SI: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BCNT_INT
+define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 addrspace(1)* noalias %constptr) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
+ %gep = getelementptr i32 addrspace(1)* %constptr, i32 4
+ %const = load i32 addrspace(1)* %gep, align 4
+ %add = add i32 %const, %ctpop
+ store i32 %add, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FIXME: We currently disallow SALU instructions in all branches,
+; but there are some cases when the should be allowed.
+
+; FUNC-LABEL: @ctpop_i32_in_br
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_BCNT_U32_B32_e64 [[RESULT:v[0-9]+]], [[VAL]], 0
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+; EG: BCNT_INT
+define void @ctpop_i32_in_br(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %cond) {
+entry:
+ %0 = icmp eq i32 %cond, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = load i32 addrspace(1)* %in
+ %2 = call i32 @llvm.ctpop.i32(i32 %1)
+ br label %endif
+
+else:
+ %3 = getelementptr i32 addrspace(1)* %in, i32 1
+ %4 = load i32 addrspace(1)* %3
+ br label %endif
+
+endif:
+ %5 = phi i32 [%2, %if], [%4, %else]
+ store i32 %5, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/ctpop64.ll b/test/CodeGen/R600/ctpop64.ll
new file mode 100644
index 000000000000..b36ecc68d895
--- /dev/null
+++ b/test/CodeGen/R600/ctpop64.ll
@@ -0,0 +1,122 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i64 @llvm.ctpop.i64(i64) nounwind readnone
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) nounwind readnone
+declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>) nounwind readnone
+declare <16 x i64> @llvm.ctpop.v16i64(<16 x i64>) nounwind readnone
+
+; FUNC-LABEL: @s_ctpop_i64:
+; SI: S_LOAD_DWORDX2 [[SVAL:s\[[0-9]+:[0-9]+\]]],
+; SI: S_BCNT1_I32_B64 [[SRESULT:s[0-9]+]], [[SVAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+define void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
+ %ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
+ %truncctpop = trunc i64 %ctpop to i32
+ store i32 %truncctpop, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_i64:
+; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
+; SI: V_MOV_B32_e32 [[VZERO:v[0-9]+]], 0
+; SI: V_BCNT_U32_B32_e32 [[MIDRESULT:v[0-9]+]], v[[LOVAL]], [[VZERO]]
+; SI-NEXT: V_BCNT_U32_B32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
+ %val = load i64 addrspace(1)* %in, align 8
+ %ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
+ %truncctpop = trunc i64 %ctpop to i32
+ store i32 %truncctpop, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @s_ctpop_v2i64:
+; SI: S_BCNT1_I32_B64
+; SI: S_BCNT1_I32_B64
+; SI: S_ENDPGM
+define void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val) nounwind {
+ %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
+ %truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
+ store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_ctpop_v4i64:
+; SI: S_BCNT1_I32_B64
+; SI: S_BCNT1_I32_B64
+; SI: S_BCNT1_I32_B64
+; SI: S_BCNT1_I32_B64
+; SI: S_ENDPGM
+define void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val) nounwind {
+ %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
+ %truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
+ store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v2i64:
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: S_ENDPGM
+define void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in) nounwind {
+ %val = load <2 x i64> addrspace(1)* %in, align 16
+ %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
+ %truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
+ store <2 x i32> %truncctpop, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_ctpop_v4i64:
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: V_BCNT_U32_B32
+; SI: S_ENDPGM
+define void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrspace(1)* noalias %in) nounwind {
+ %val = load <4 x i64> addrspace(1)* %in, align 32
+ %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
+ %truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
+ store <4 x i32> %truncctpop, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FIXME: We currently disallow SALU instructions in all branches,
+; but there are some cases when the should be allowed.
+
+; FUNC-LABEL: @ctpop_i64_in_br
+; SI: V_BCNT_U32_B32_e64 [[BCNT_LO:v[0-9]+]], v{{[0-9]+}}, 0
+; SI: V_BCNT_U32_B32_e32 v[[BCNT:[0-9]+]], v{{[0-9]+}}, [[BCNT_LO]]
+; SI: V_MOV_B32_e32 v[[ZERO:[0-9]+]], 0
+; SI: BUFFER_STORE_DWORDX2 v[
+; SI: [[BCNT]]:[[ZERO]]]
+; SI: S_ENDPGM
+define void @ctpop_i64_in_br(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i32 %cond) {
+entry:
+ %0 = icmp eq i32 %cond, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = load i64 addrspace(1)* %in
+ %2 = call i64 @llvm.ctpop.i64(i64 %1)
+ br label %endif
+
+else:
+ %3 = getelementptr i64 addrspace(1)* %in, i32 1
+ %4 = load i64 addrspace(1)* %3
+ br label %endif
+
+endif:
+ %5 = phi i64 [%2, %if], [%4, %else]
+ store i64 %5, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/cttz_zero_undef.ll b/test/CodeGen/R600/cttz_zero_undef.ll
new file mode 100644
index 000000000000..9c4a3558d094
--- /dev/null
+++ b/test/CodeGen/R600/cttz_zero_undef.ll
@@ -0,0 +1,70 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
+declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>, i1) nounwind readnone
+declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1) nounwind readnone
+
+; FUNC-LABEL: @s_cttz_zero_undef_i32:
+; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
+; SI: S_FF1_I32_B32 [[SRESULT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
+; EG: FFBL_INT {{\*? *}}[[RESULT]]
+define void @s_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+ %cttz = call i32 @llvm.cttz.i32(i32 %val, i1 true) nounwind readnone
+ store i32 %cttz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_cttz_zero_undef_i32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_FFBL_B32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
+; EG: FFBL_INT {{\*? *}}[[RESULT]]
+define void @v_cttz_zero_undef_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %val = load i32 addrspace(1)* %valptr, align 4
+ %cttz = call i32 @llvm.cttz.i32(i32 %val, i1 true) nounwind readnone
+ store i32 %cttz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_cttz_zero_undef_v2i32:
+; SI: BUFFER_LOAD_DWORDX2
+; SI: V_FFBL_B32_e32
+; SI: V_FFBL_B32_e32
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
+; EG: FFBL_INT {{\*? *}}[[RESULT]]
+; EG: FFBL_INT {{\*? *}}[[RESULT]]
+define void @v_cttz_zero_undef_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
+ %val = load <2 x i32> addrspace(1)* %valptr, align 8
+ %cttz = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %val, i1 true) nounwind readnone
+ store <2 x i32> %cttz, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_cttz_zero_undef_v4i32:
+; SI: BUFFER_LOAD_DWORDX4
+; SI: V_FFBL_B32_e32
+; SI: V_FFBL_B32_e32
+; SI: V_FFBL_B32_e32
+; SI: V_FFBL_B32_e32
+; SI: BUFFER_STORE_DWORDX4
+; SI: S_ENDPGM
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
+; EG: FFBL_INT {{\*? *}}[[RESULT]]
+; EG: FFBL_INT {{\*? *}}[[RESULT]]
+; EG: FFBL_INT {{\*? *}}[[RESULT]]
+; EG: FFBL_INT {{\*? *}}[[RESULT]]
+define void @v_cttz_zero_undef_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
+ %val = load <4 x i32> addrspace(1)* %valptr, align 16
+ %cttz = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %val, i1 true) nounwind readnone
+ store <4 x i32> %cttz, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/cvt_f32_ubyte.ll b/test/CodeGen/R600/cvt_f32_ubyte.ll
new file mode 100644
index 000000000000..06a601065c3e
--- /dev/null
+++ b/test/CodeGen/R600/cvt_f32_ubyte.ll
@@ -0,0 +1,175 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @load_i8_to_f32:
+; SI: BUFFER_LOAD_UBYTE [[LOADREG:v[0-9]+]],
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI: V_CVT_F32_UBYTE0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
+; SI: BUFFER_STORE_DWORD [[CONV]],
+define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
+ %load = load i8 addrspace(1)* %in, align 1
+ %cvt = uitofp i8 %load to float
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @load_v2i8_to_v2f32:
+; SI: BUFFER_LOAD_USHORT [[LOADREG:v[0-9]+]],
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI-NOT: AND
+; SI-DAG: V_CVT_F32_UBYTE1_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
+; SI-DAG: V_CVT_F32_UBYTE0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
+; SI: BUFFER_STORE_DWORDX2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <2 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <2 x i8> %load to <2 x float>
+ store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @load_v3i8_to_v3f32:
+; SI-NOT: BFE
+; SI-NOT: V_CVT_F32_UBYTE3_e32
+; SI-DAG: V_CVT_F32_UBYTE2_e32
+; SI-DAG: V_CVT_F32_UBYTE1_e32
+; SI-DAG: V_CVT_F32_UBYTE0_e32
+; SI: BUFFER_STORE_DWORDX2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <3 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <3 x i8> %load to <3 x float>
+ store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @load_v4i8_to_v4f32:
+; We can't use BUFFER_LOAD_DWORD here, because the load is byte aligned, and
+; BUFFER_LOAD_DWORD requires dword alignment.
+; SI: BUFFER_LOAD_USHORT
+; SI: BUFFER_LOAD_USHORT
+; SI: V_OR_B32_e32 [[LOADREG:v[0-9]+]]
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI-DAG: V_CVT_F32_UBYTE3_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
+; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, [[LOADREG]]
+; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, [[LOADREG]]
+; SI-DAG: V_CVT_F32_UBYTE0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
+; SI: BUFFER_STORE_DWORDX4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <4 x i8> %load to <4 x float>
+ store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; XXX - This should really still be able to use the V_CVT_F32_UBYTE0
+; for each component, but computeKnownBits doesn't handle vectors very
+; well.
+
+; SI-LABEL: @load_v4i8_to_v4f32_2_uses:
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_CVT_F32_UBYTE0_e32
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_CVT_F32_UBYTE0_e32
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_CVT_F32_UBYTE0_e32
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_CVT_F32_UBYTE0_e32
+
+; XXX - replace with this when v4i8 loads aren't scalarized anymore.
+; XSI: BUFFER_LOAD_DWORD
+; XSI: V_CVT_F32_U32_e32
+; XSI: V_CVT_F32_U32_e32
+; XSI: V_CVT_F32_U32_e32
+; XSI: V_CVT_F32_U32_e32
+; SI: S_ENDPGM
+define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 4
+ %cvt = uitofp <4 x i8> %load to <4 x float>
+ store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
+ %add = add <4 x i8> %load, <i8 9, i8 9, i8 9, i8 9> ; Second use of %load
+ store <4 x i8> %add, <4 x i8> addrspace(1)* %out2, align 4
+ ret void
+}
+
+; Make sure this doesn't crash.
+; SI-LABEL: @load_v7i8_to_v7f32:
+; SI: S_ENDPGM
+define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <7 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <7 x i8> %load to <7 x float>
+ store <7 x float> %cvt, <7 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @load_v8i8_to_v8f32:
+; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI-DAG: V_CVT_F32_UBYTE3_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: V_CVT_F32_UBYTE0_e32 v{{[0-9]+}}, v[[LOLOAD]]
+; SI-DAG: V_CVT_F32_UBYTE3_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-DAG: V_CVT_F32_UBYTE0_e32 v{{[0-9]+}}, v[[HILOAD]]
+; SI-NOT: BFE
+; SI-NOT: LSHR
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <8 x i8> addrspace(1)* %in, align 1
+ %cvt = uitofp <8 x i8> %load to <8 x float>
+ store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @i8_zext_inreg_i32_to_f32:
+; SI: BUFFER_LOAD_DWORD [[LOADREG:v[0-9]+]],
+; SI: V_ADD_I32_e32 [[ADD:v[0-9]+]], 2, [[LOADREG]]
+; SI-NEXT: V_CVT_F32_UBYTE0_e32 [[CONV:v[0-9]+]], [[ADD]]
+; SI: BUFFER_STORE_DWORD [[CONV]],
+define void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 2
+ %inreg = and i32 %add, 255
+ %cvt = uitofp i32 %inreg to float
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @i8_zext_inreg_hi1_to_f32:
+define void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %inreg = and i32 %load, 65280
+ %shr = lshr i32 %inreg, 8
+ %cvt = uitofp i32 %shr to float
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+
+; We don't get these ones because of the zext, but instcombine removes
+; them so it shouldn't really matter.
+define void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
+ %load = load i8 addrspace(1)* %in, align 1
+ %ext = zext i8 %load to i32
+ %cvt = uitofp i32 %ext to float
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 1
+ %ext = zext <4 x i8> %load to <4 x i32>
+ %cvt = uitofp <4 x i32> %ext to <4 x float>
+ store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/default-fp-mode.ll b/test/CodeGen/R600/default-fp-mode.ll
new file mode 100644
index 000000000000..b24a7a246fda
--- /dev/null
+++ b/test/CodeGen/R600/default-fp-mode.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals,+fp64-denormals < %s | FileCheck -check-prefix=FP64-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -mattr=+fp32-denormals,-fp64-denormals < %s | FileCheck -check-prefix=FP32-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -mattr=+fp32-denormals,+fp64-denormals < %s | FileCheck -check-prefix=BOTH-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals,-fp64-denormals < %s | FileCheck -check-prefix=NO-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -mattr=+fp64-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+
+; FUNC-LABEL: @test_kernel
+
+; DEFAULT: FloatMode: 192
+; DEFAULT: IeeeMode: 0
+
+; FP64-DENORMAL: FloatMode: 192
+; FP64-DENORMAL: IeeeMode: 0
+
+; FP32-DENORMAL: FloatMode: 48
+; FP32-DENORMAL: IeeeMode: 0
+
+; BOTH-DENORMAL: FloatMode: 240
+; BOTH-DENORMAL: IeeeMode: 0
+
+; NO-DENORMAL: FloatMode: 0
+; NO-DENORMAL: IeeeMode: 0
+define void @test_kernel(float addrspace(1)* %out0, double addrspace(1)* %out1) nounwind {
+ store float 0.0, float addrspace(1)* %out0
+ store double 0.0, double addrspace(1)* %out1
+ ret void
+}
diff --git a/test/CodeGen/R600/elf.r600.ll b/test/CodeGen/R600/elf.r600.ll
index 0590efb0915f..4436c07c5a77 100644
--- a/test/CodeGen/R600/elf.r600.ll
+++ b/test/CodeGen/R600/elf.r600.ll
@@ -6,7 +6,7 @@
; CONFIG-CHECK: .section .AMDGPU.config
; CONFIG-CHECK-NEXT: .long 166100
-; CONFIG-CHECK-NEXT: .long 258
+; CONFIG-CHECK-NEXT: .long 2
; CONFIG-CHECK-NEXT: .long 165900
; CONFIG-CHECK-NEXT: .long 0
define void @test(float addrspace(1)* %out, i32 %p) {
diff --git a/test/CodeGen/R600/extload.ll b/test/CodeGen/R600/extload.ll
index aa660b38838d..dc056e0ecdd5 100644
--- a/test/CodeGen/R600/extload.ll
+++ b/test/CodeGen/R600/extload.ll
@@ -1,8 +1,9 @@
-; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; EG-LABEL: @anyext_load_i8:
+; FUNC-LABEL: @anyext_load_i8:
; EG: AND_INT
-; EG-NEXT: 255
+; EG: 255
define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspace(1)* nocapture noalias %src) nounwind {
%cast = bitcast i8 addrspace(1)* %src to i32 addrspace(1)*
%load = load i32 addrspace(1)* %cast, align 1
@@ -12,10 +13,11 @@ define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspac
ret void
}
-; EG-LABEL: @anyext_load_i16:
+; FUNC-LABEL: @anyext_load_i16:
; EG: AND_INT
-; EG: LSHL
-; EG: 65535
+; EG: AND_INT
+; EG-DAG: 65535
+; EG-DAG: -65536
define void @anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrspace(1)* nocapture noalias %src) nounwind {
%cast = bitcast i16 addrspace(1)* %src to i32 addrspace(1)*
%load = load i32 addrspace(1)* %cast, align 1
@@ -25,9 +27,9 @@ define void @anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrs
ret void
}
-; EG-LABEL: @anyext_load_lds_i8:
+; FUNC-LABEL: @anyext_load_lds_i8:
; EG: AND_INT
-; EG-NEXT: 255
+; EG: 255
define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addrspace(3)* nocapture noalias %src) nounwind {
%cast = bitcast i8 addrspace(3)* %src to i32 addrspace(3)*
%load = load i32 addrspace(3)* %cast, align 1
@@ -37,10 +39,11 @@ define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addr
ret void
}
-; EG-LABEL: @anyext_load_lds_i16:
+; FUNC-LABEL: @anyext_load_lds_i16:
+; EG: AND_INT
; EG: AND_INT
-; EG: LSHL
-; EG: 65535
+; EG-DAG: 65535
+; EG-DAG: -65536
define void @anyext_load_lds_i16(i16 addrspace(3)* nocapture noalias %out, i16 addrspace(3)* nocapture noalias %src) nounwind {
%cast = bitcast i16 addrspace(3)* %src to i32 addrspace(3)*
%load = load i32 addrspace(3)* %cast, align 1
@@ -49,3 +52,72 @@ define void @anyext_load_lds_i16(i16 addrspace(3)* nocapture noalias %out, i16 a
store <2 x i16> %x, <2 x i16> addrspace(3)* %castOut, align 1
ret void
}
+
+; FUNC-LABEL: @sextload_global_i8_to_i64
+; SI: BUFFER_LOAD_SBYTE [[LOAD:v[0-9]+]],
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: BUFFER_STORE_DWORDX2
+define void @sextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %a = load i8 addrspace(1)* %in, align 8
+ %ext = sext i8 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sextload_global_i16_to_i64
+; SI: BUFFER_LOAD_SSHORT [[LOAD:v[0-9]+]],
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: BUFFER_STORE_DWORDX2
+define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %a = load i16 addrspace(1)* %in, align 8
+ %ext = sext i16 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sextload_global_i32_to_i64
+; SI: BUFFER_LOAD_DWORD [[LOAD:v[0-9]+]],
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: BUFFER_STORE_DWORDX2
+define void @sextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %a = load i32 addrspace(1)* %in, align 8
+ %ext = sext i32 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @zextload_global_i8_to_i64
+; SI: S_MOV_B32 [[ZERO:s[0-9]+]], 0
+; SI: BUFFER_LOAD_UBYTE [[LOAD:v[0-9]+]],
+; SI: V_MOV_B32_e32 {{v[0-9]+}}, [[ZERO]]
+; SI: BUFFER_STORE_DWORDX2
+define void @zextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %a = load i8 addrspace(1)* %in, align 8
+ %ext = zext i8 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @zextload_global_i16_to_i64
+; SI: S_MOV_B32 [[ZERO:s[0-9]+]], 0
+; SI: BUFFER_LOAD_USHORT [[LOAD:v[0-9]+]],
+; SI: V_MOV_B32_e32 {{v[0-9]+}}, [[ZERO]]
+; SI: BUFFER_STORE_DWORDX2
+define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %a = load i16 addrspace(1)* %in, align 8
+ %ext = zext i16 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @zextload_global_i32_to_i64
+; SI: S_MOV_B32 [[ZERO:s[0-9]+]], 0
+; SI: BUFFER_LOAD_DWORD [[LOAD:v[0-9]+]],
+; SI: V_MOV_B32_e32 {{v[0-9]+}}, [[ZERO]]
+; SI: BUFFER_STORE_DWORDX2
+define void @zextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %a = load i32 addrspace(1)* %in, align 8
+ %ext = zext i32 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/extract_vector_elt_i16.ll b/test/CodeGen/R600/extract_vector_elt_i16.ll
new file mode 100644
index 000000000000..5cd1b04bd1de
--- /dev/null
+++ b/test/CodeGen/R600/extract_vector_elt_i16.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @extract_vector_elt_v2i16
+; SI: BUFFER_LOAD_USHORT
+; SI: BUFFER_STORE_SHORT
+; SI: BUFFER_LOAD_USHORT
+; SI: BUFFER_STORE_SHORT
+define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) nounwind {
+ %p0 = extractelement <2 x i16> %foo, i32 0
+ %p1 = extractelement <2 x i16> %foo, i32 1
+ %out1 = getelementptr i16 addrspace(1)* %out, i32 1
+ store i16 %p1, i16 addrspace(1)* %out, align 2
+ store i16 %p0, i16 addrspace(1)* %out1, align 2
+ ret void
+}
+
+; FUNC-LABEL: @extract_vector_elt_v4i16
+; SI: BUFFER_LOAD_USHORT
+; SI: BUFFER_STORE_SHORT
+; SI: BUFFER_LOAD_USHORT
+; SI: BUFFER_STORE_SHORT
+define void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) nounwind {
+ %p0 = extractelement <4 x i16> %foo, i32 0
+ %p1 = extractelement <4 x i16> %foo, i32 2
+ %out1 = getelementptr i16 addrspace(1)* %out, i32 1
+ store i16 %p1, i16 addrspace(1)* %out, align 2
+ store i16 %p0, i16 addrspace(1)* %out1, align 2
+ ret void
+}
diff --git a/test/CodeGen/R600/fabs.ll b/test/CodeGen/R600/fabs.ll
index 2cd3a4f604f2..b87ce2254095 100644
--- a/test/CodeGen/R600/fabs.ll
+++ b/test/CodeGen/R600/fabs.ll
@@ -49,6 +49,17 @@ entry:
ret void
}
+; SI-CHECK-LABEL: @fabs_fold
+; SI-CHECK-NOT: V_AND_B32_e32
+; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, s{{[0-9]+}}, |v{{[0-9]+}}|
+define void @fabs_fold(float addrspace(1)* %out, float %in0, float %in1) {
+entry:
+ %0 = call float @fabs(float %in0)
+ %1 = fmul float %0, %in1
+ store float %1, float addrspace(1)* %out
+ ret void
+}
+
declare float @fabs(float ) readnone
declare <2 x float> @llvm.fabs.v2f32(<2 x float> ) readnone
declare <4 x float> @llvm.fabs.v4f32(<4 x float> ) readnone
diff --git a/test/CodeGen/R600/fadd.ll b/test/CodeGen/R600/fadd.ll
index f467bb785779..5d2b806039a2 100644
--- a/test/CodeGen/R600/fadd.ll
+++ b/test/CodeGen/R600/fadd.ll
@@ -1,9 +1,8 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
-; R600-CHECK: @fadd_f32
+; FUNC-LABEL: @fadd_f32
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
-; SI-CHECK: @fadd_f32
; SI-CHECK: V_ADD_F32
define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) {
entry:
@@ -12,10 +11,9 @@ entry:
ret void
}
-; R600-CHECK: @fadd_v2f32
+; FUNC-LABEL: @fadd_v2f32
; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
-; SI-CHECK: @fadd_v2f32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
@@ -25,12 +23,11 @@ entry:
ret void
}
-; R600-CHECK: @fadd_v4f32
+; FUNC-LABEL: @fadd_v4f32
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-CHECK: @fadd_v4f32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
@@ -43,3 +40,27 @@ define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)
store <4 x float> %result, <4 x float> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @fadd_v8f32
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; R600-CHECK: ADD
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+; SI-CHECK: V_ADD_F32
+define void @fadd_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) {
+entry:
+ %0 = fadd <8 x float> %a, %b
+ store <8 x float> %0, <8 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fceil.ll b/test/CodeGen/R600/fceil.ll
new file mode 100644
index 000000000000..458363adc1e3
--- /dev/null
+++ b/test/CodeGen/R600/fceil.ll
@@ -0,0 +1,131 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.ceil.f32(float) nounwind readnone
+declare <2 x float> @llvm.ceil.v2f32(<2 x float>) nounwind readnone
+declare <3 x float> @llvm.ceil.v3f32(<3 x float>) nounwind readnone
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone
+declare <8 x float> @llvm.ceil.v8f32(<8 x float>) nounwind readnone
+declare <16 x float> @llvm.ceil.v16f32(<16 x float>) nounwind readnone
+
+; FUNC-LABEL: @fceil_f32:
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+define void @fceil_f32(float addrspace(1)* %out, float %x) {
+ %y = call float @llvm.ceil.f32(float %x) nounwind readnone
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v2f32:
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
+; EG: CEIL {{\*? *}}[[RESULT]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+define void @fceil_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
+ %y = call <2 x float> @llvm.ceil.v2f32(<2 x float> %x) nounwind readnone
+ store <2 x float> %y, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v3f32:
+; FIXME-SI: V_CEIL_F32_e32
+; FIXME-SI: V_CEIL_F32_e32
+; FIXME-SI: V_CEIL_F32_e32
+; FIXME-EG: v3 is treated as v2 and v1, hence 2 stores
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT1:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT2:T[0-9]+]]{{\.[XYZW]}}
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+define void @fceil_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
+ %y = call <3 x float> @llvm.ceil.v3f32(<3 x float> %x) nounwind readnone
+ store <3 x float> %y, <3 x float> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v4f32:
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+]]{{\.[XYZW]}}
+; EG: CEIL {{\*? *}}[[RESULT]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+; EG: CEIL {{\*? *}}[[RESULT]]
+define void @fceil_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
+ %y = call <4 x float> @llvm.ceil.v4f32(<4 x float> %x) nounwind readnone
+ store <4 x float> %y, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v8f32:
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT1:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT2:T[0-9]+]]{{\.[XYZW]}}
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+define void @fceil_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
+ %y = call <8 x float> @llvm.ceil.v8f32(<8 x float> %x) nounwind readnone
+ store <8 x float> %y, <8 x float> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v16f32:
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; SI: V_CEIL_F32_e32
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT1:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT2:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT3:T[0-9]+]]{{\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT4:T[0-9]+]]{{\.[XYZW]}}
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT1]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT2]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT3]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT3]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT3]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT3]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
+; EG-DAG: CEIL {{\*? *}}[[RESULT4]]
+define void @fceil_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %x) {
+ %y = call <16 x float> @llvm.ceil.v16f32(<16 x float> %x) nounwind readnone
+ store <16 x float> %y, <16 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fceil64.ll b/test/CodeGen/R600/fceil64.ll
new file mode 100644
index 000000000000..b42aefa17328
--- /dev/null
+++ b/test/CodeGen/R600/fceil64.ll
@@ -0,0 +1,103 @@
+; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare double @llvm.ceil.f64(double) nounwind readnone
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>) nounwind readnone
+declare <3 x double> @llvm.ceil.v3f64(<3 x double>) nounwind readnone
+declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone
+declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone
+declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
+
+; FUNC-LABEL: @fceil_f64:
+; CI: V_CEIL_F64_e32
+; SI: S_BFE_I32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
+; SI: S_ADD_I32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
+; SI: S_LSHR_B64
+; SI: S_NOT_B64
+; SI: S_AND_B64
+; SI: S_AND_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
+; SI: CMP_LT_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: CMP_GT_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: CMP_GT_F64
+; SI: CNDMASK_B32
+; SI: CMP_NE_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: V_ADD_F64
+define void @fceil_f64(double addrspace(1)* %out, double %x) {
+ %y = call double @llvm.ceil.f64(double %x) nounwind readnone
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v2f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+ %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x) nounwind readnone
+ store <2 x double> %y, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FIXME-FUNC-LABEL: @fceil_v3f64:
+; FIXME-CI: V_CEIL_F64_e32
+; FIXME-CI: V_CEIL_F64_e32
+; FIXME-CI: V_CEIL_F64_e32
+; define void @fceil_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; %y = call <3 x double> @llvm.ceil.v3f64(<3 x double> %x) nounwind readnone
+; store <3 x double> %y, <3 x double> addrspace(1)* %out
+; ret void
+; }
+
+; FUNC-LABEL: @fceil_v4f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+ %y = call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone
+ store <4 x double> %y, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v8f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+ %y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
+ store <8 x double> %y, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fceil_v16f64:
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+; CI: V_CEIL_F64_e32
+define void @fceil_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+ %y = call <16 x double> @llvm.ceil.v16f64(<16 x double> %x) nounwind readnone
+ store <16 x double> %y, <16 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fcmp64.ll b/test/CodeGen/R600/fcmp64.ll
index bcc7a8c8567a..8cbe9f686648 100644
--- a/test/CodeGen/R600/fcmp64.ll
+++ b/test/CodeGen/R600/fcmp64.ll
@@ -53,7 +53,7 @@ define void @fge_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
}
; CHECK: @fne_f64
-; CHECK: V_CMP_NEQ_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: V_CMP_NEQ_F64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
diff --git a/test/CodeGen/R600/fconst64.ll b/test/CodeGen/R600/fconst64.ll
index 5c5ee7e9091b..9c3a7e3d2e93 100644
--- a/test/CodeGen/R600/fconst64.ll
+++ b/test/CodeGen/R600/fconst64.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
; CHECK: @fconst_f64
-; CHECK: V_MOV_B32_e32 {{v[0-9]+}}, 0.000000e+00
-; CHECK-NEXT: V_MOV_B32_e32 {{v[0-9]+}}, 2.312500e+00
+; CHECK-DAG: S_MOV_B32 {{s[0-9]+}}, 0x40140000
+; CHECK-DAG: S_MOV_B32 {{s[0-9]+}}, 0
define void @fconst_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
%r1 = load double addrspace(1)* %in
diff --git a/test/CodeGen/R600/fcopysign.f32.ll b/test/CodeGen/R600/fcopysign.f32.ll
new file mode 100644
index 000000000000..7b4425bed724
--- /dev/null
+++ b/test/CodeGen/R600/fcopysign.f32.ll
@@ -0,0 +1,50 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+
+declare float @llvm.copysign.f32(float, float) nounwind readnone
+declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) nounwind readnone
+
+; Try to identify arg based on higher address.
+; FUNC-LABEL: @test_copysign_f32:
+; SI: S_LOAD_DWORD [[SSIGN:s[0-9]+]], {{.*}} 0xc
+; SI: V_MOV_B32_e32 [[VSIGN:v[0-9]+]], [[SSIGN]]
+; SI-DAG: S_LOAD_DWORD [[SMAG:s[0-9]+]], {{.*}} 0xb
+; SI-DAG: V_MOV_B32_e32 [[VMAG:v[0-9]+]], [[SMAG]]
+; SI-DAG: S_MOV_B32 [[SCONST:s[0-9]+]], 0x7fffffff
+; SI: V_BFI_B32 [[RESULT:v[0-9]+]], [[SCONST]], [[VMAG]], [[VSIGN]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+
+; EG: BFI_INT
+define void @test_copysign_f32(float addrspace(1)* %out, float %mag, float %sign) nounwind {
+ %result = call float @llvm.copysign.f32(float %mag, float %sign)
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copysign_v2f32:
+; SI: S_ENDPGM
+
+; EG: BFI_INT
+; EG: BFI_INT
+define void @test_copysign_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %mag, <2 x float> %sign) nounwind {
+ %result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> %sign)
+ store <2 x float> %result, <2 x float> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_copysign_v4f32:
+; SI: S_ENDPGM
+
+; EG: BFI_INT
+; EG: BFI_INT
+; EG: BFI_INT
+; EG: BFI_INT
+define void @test_copysign_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %mag, <4 x float> %sign) nounwind {
+ %result = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign)
+ store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
diff --git a/test/CodeGen/R600/fcopysign.f64.ll b/test/CodeGen/R600/fcopysign.f64.ll
new file mode 100644
index 000000000000..ea7a6db67f34
--- /dev/null
+++ b/test/CodeGen/R600/fcopysign.f64.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare double @llvm.copysign.f64(double, double) nounwind readnone
+declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) nounwind readnone
+declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) nounwind readnone
+
+; FUNC-LABEL: @test_copysign_f64:
+; SI-DAG: S_LOAD_DWORDX2 s{{\[}}[[SSIGN_LO:[0-9]+]]:[[SSIGN_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI: V_MOV_B32_e32 v[[VSIGN_HI:[0-9]+]], s[[SSIGN_HI]]
+; SI-DAG: S_LOAD_DWORDX2 s{{\[}}[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: V_MOV_B32_e32 v[[VMAG_HI:[0-9]+]], s[[SMAG_HI]]
+; SI-DAG: S_MOV_B32 [[SCONST:s[0-9]+]], 0x7fffffff
+; SI: V_BFI_B32 v[[VRESULT_HI:[0-9]+]], [[SCONST]], v[[VMAG_HI]], v[[VSIGN_HI]]
+; SI: V_MOV_B32_e32 v[[VMAG_LO:[0-9]+]], s[[SMAG_LO]]
+; SI: BUFFER_STORE_DWORDX2 v{{\[}}[[VMAG_LO]]:[[VRESULT_HI]]{{\]}}
+; SI: S_ENDPGM
+define void @test_copysign_f64(double addrspace(1)* %out, double %mag, double %sign) nounwind {
+ %result = call double @llvm.copysign.f64(double %mag, double %sign)
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_copysign_v2f64:
+; SI: S_ENDPGM
+define void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %mag, <2 x double> %sign) nounwind {
+ %result = call <2 x double> @llvm.copysign.v2f64(<2 x double> %mag, <2 x double> %sign)
+ store <2 x double> %result, <2 x double> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_copysign_v4f64:
+; SI: S_ENDPGM
+define void @test_copysign_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %mag, <4 x double> %sign) nounwind {
+ %result = call <4 x double> @llvm.copysign.v4f64(<4 x double> %mag, <4 x double> %sign)
+ store <4 x double> %result, <4 x double> addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/fdiv.ll b/test/CodeGen/R600/fdiv.ll
index 3d21524de0f4..20db65c5eb60 100644
--- a/test/CodeGen/R600/fdiv.ll
+++ b/test/CodeGen/R600/fdiv.ll
@@ -1,20 +1,37 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; These tests check that fdiv is expanded correctly and also test that the
; scheduler is scheduling the RECIP_IEEE and MUL_IEEE instructions in separate
; instruction groups.
-; R600-CHECK: @fdiv_v2f32
-; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z
-; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y
-; R600-CHECK-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS
-; R600-CHECK-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, PS
-; SI-CHECK: @fdiv_v2f32
-; SI-CHECK-DAG: V_RCP_F32
-; SI-CHECK-DAG: V_MUL_F32
-; SI-CHECK-DAG: V_RCP_F32
-; SI-CHECK-DAG: V_MUL_F32
+; FUNC-LABEL: @fdiv_f32
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, PS
+
+; SI-DAG: V_RCP_F32
+; SI-DAG: V_MUL_F32
+define void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) {
+entry:
+ %0 = fdiv float %a, %b
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+
+
+; FUNC-LABEL: @fdiv_v2f32
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, PS
+
+; SI-DAG: V_RCP_F32
+; SI-DAG: V_MUL_F32
+; SI-DAG: V_RCP_F32
+; SI-DAG: V_MUL_F32
define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
entry:
%0 = fdiv <2 x float> %a, %b
@@ -22,24 +39,24 @@ entry:
ret void
}
-; R600-CHECK: @fdiv_v4f32
-; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
-; R600-CHECK-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
-; R600-CHECK-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
-; R600-CHECK-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
-; SI-CHECK: @fdiv_v4f32
-; SI-CHECK-DAG: V_RCP_F32
-; SI-CHECK-DAG: V_MUL_F32
-; SI-CHECK-DAG: V_RCP_F32
-; SI-CHECK-DAG: V_MUL_F32
-; SI-CHECK-DAG: V_RCP_F32
-; SI-CHECK-DAG: V_MUL_F32
-; SI-CHECK-DAG: V_RCP_F32
-; SI-CHECK-DAG: V_MUL_F32
+; FUNC-LABEL: @fdiv_v4f32
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
+; R600-DAG: MUL_IEEE {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
+
+; SI-DAG: V_RCP_F32
+; SI-DAG: V_MUL_F32
+; SI-DAG: V_RCP_F32
+; SI-DAG: V_MUL_F32
+; SI-DAG: V_RCP_F32
+; SI-DAG: V_MUL_F32
+; SI-DAG: V_RCP_F32
+; SI-DAG: V_MUL_F32
define void @fdiv_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1) * %in
diff --git a/test/CodeGen/R600/ffloor.ll b/test/CodeGen/R600/ffloor.ll
new file mode 100644
index 000000000000..31c6116988e6
--- /dev/null
+++ b/test/CodeGen/R600/ffloor.ll
@@ -0,0 +1,104 @@
+; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare double @llvm.floor.f64(double) nounwind readnone
+declare <2 x double> @llvm.floor.v2f64(<2 x double>) nounwind readnone
+declare <3 x double> @llvm.floor.v3f64(<3 x double>) nounwind readnone
+declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone
+declare <8 x double> @llvm.floor.v8f64(<8 x double>) nounwind readnone
+declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone
+
+; FUNC-LABEL: @ffloor_f64:
+; CI: V_FLOOR_F64_e32
+
+; SI: S_BFE_I32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
+; SI: S_ADD_I32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
+; SI: S_LSHR_B64
+; SI: S_NOT_B64
+; SI: S_AND_B64
+; SI: S_AND_B32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
+; SI: CMP_LT_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: CMP_GT_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: CMP_LT_F64
+; SI: CNDMASK_B32
+; SI: CMP_NE_I32
+; SI: CNDMASK_B32
+; SI: CNDMASK_B32
+; SI: V_ADD_F64
+define void @ffloor_f64(double addrspace(1)* %out, double %x) {
+ %y = call double @llvm.floor.f64(double %x) nounwind readnone
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @ffloor_v2f64:
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+ %y = call <2 x double> @llvm.floor.v2f64(<2 x double> %x) nounwind readnone
+ store <2 x double> %y, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FIXME-FUNC-LABEL: @ffloor_v3f64:
+; FIXME-CI: V_FLOOR_F64_e32
+; FIXME-CI: V_FLOOR_F64_e32
+; FIXME-CI: V_FLOOR_F64_e32
+; define void @ffloor_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; %y = call <3 x double> @llvm.floor.v3f64(<3 x double> %x) nounwind readnone
+; store <3 x double> %y, <3 x double> addrspace(1)* %out
+; ret void
+; }
+
+; FUNC-LABEL: @ffloor_v4f64:
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+define void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+ %y = call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone
+ store <4 x double> %y, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @ffloor_v8f64:
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+define void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+ %y = call <8 x double> @llvm.floor.v8f64(<8 x double> %x) nounwind readnone
+ store <8 x double> %y, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @ffloor_v16f64:
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+; CI: V_FLOOR_F64_e32
+define void @ffloor_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+ %y = call <16 x double> @llvm.floor.v16f64(<16 x double> %x) nounwind readnone
+ store <16 x double> %y, <16 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fma.ll b/test/CodeGen/R600/fma.ll
index 51e9d29a5ca2..d72ffeceb921 100644
--- a/test/CodeGen/R600/fma.ll
+++ b/test/CodeGen/R600/fma.ll
@@ -1,8 +1,15 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; CHECK: @fma_f32
-; CHECK: V_FMA_F32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}}
+declare float @llvm.fma.f32(float, float, float) nounwind readnone
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
+declare double @llvm.fma.f64(double, double, double) nounwind readnone
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
+declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
+
+; FUNC-LABEL: @fma_f32
+; SI: V_FMA_F32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}}
define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2, float addrspace(1)* %in3) {
%r0 = load float addrspace(1)* %in1
@@ -13,11 +20,36 @@ define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
ret void
}
-declare float @llvm.fma.f32(float, float, float)
+; FUNC-LABEL: @fma_v2f32
+; SI: V_FMA_F32
+; SI: V_FMA_F32
+define void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
+ <2 x float> addrspace(1)* %in2, <2 x float> addrspace(1)* %in3) {
+ %r0 = load <2 x float> addrspace(1)* %in1
+ %r1 = load <2 x float> addrspace(1)* %in2
+ %r2 = load <2 x float> addrspace(1)* %in3
+ %r3 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %r0, <2 x float> %r1, <2 x float> %r2)
+ store <2 x float> %r3, <2 x float> addrspace(1)* %out
+ ret void
+}
-; CHECK: @fma_f64
-; CHECK: V_FMA_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
+; FUNC-LABEL: @fma_v4f32
+; SI: V_FMA_F32
+; SI: V_FMA_F32
+; SI: V_FMA_F32
+; SI: V_FMA_F32
+define void @fma_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
+ <4 x float> addrspace(1)* %in2, <4 x float> addrspace(1)* %in3) {
+ %r0 = load <4 x float> addrspace(1)* %in1
+ %r1 = load <4 x float> addrspace(1)* %in2
+ %r2 = load <4 x float> addrspace(1)* %in3
+ %r3 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %r0, <4 x float> %r1, <4 x float> %r2)
+ store <4 x float> %r3, <4 x float> addrspace(1)* %out
+ ret void
+}
+; FUNC-LABEL: @fma_f64
+; SI: V_FMA_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2, double addrspace(1)* %in3) {
%r0 = load double addrspace(1)* %in1
@@ -28,4 +60,30 @@ define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
ret void
}
-declare double @llvm.fma.f64(double, double, double)
+; FUNC-LABEL: @fma_v2f64
+; SI: V_FMA_F64
+; SI: V_FMA_F64
+define void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
+ <2 x double> addrspace(1)* %in2, <2 x double> addrspace(1)* %in3) {
+ %r0 = load <2 x double> addrspace(1)* %in1
+ %r1 = load <2 x double> addrspace(1)* %in2
+ %r2 = load <2 x double> addrspace(1)* %in3
+ %r3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %r0, <2 x double> %r1, <2 x double> %r2)
+ store <2 x double> %r3, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fma_v4f64
+; SI: V_FMA_F64
+; SI: V_FMA_F64
+; SI: V_FMA_F64
+; SI: V_FMA_F64
+define void @fma_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in1,
+ <4 x double> addrspace(1)* %in2, <4 x double> addrspace(1)* %in3) {
+ %r0 = load <4 x double> addrspace(1)* %in1
+ %r1 = load <4 x double> addrspace(1)* %in2
+ %r2 = load <4 x double> addrspace(1)* %in3
+ %r3 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %r0, <4 x double> %r1, <4 x double> %r2)
+ store <4 x double> %r3, <4 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fnearbyint.ll b/test/CodeGen/R600/fnearbyint.ll
new file mode 100644
index 000000000000..1c1d7315189f
--- /dev/null
+++ b/test/CodeGen/R600/fnearbyint.ll
@@ -0,0 +1,57 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s
+; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s
+
+; This should have the exactly the same output as the test for rint,
+; so no need to check anything.
+
+declare float @llvm.nearbyint.f32(float) #0
+declare <2 x float> @llvm.nearbyint.v2f32(<2 x float>) #0
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) #0
+declare double @llvm.nearbyint.f64(double) #0
+declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) #0
+declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) #0
+
+
+define void @fnearbyint_f32(float addrspace(1)* %out, float %in) #1 {
+entry:
+ %0 = call float @llvm.nearbyint.f32(float %in)
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+define void @fnearbyint_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+entry:
+ %0 = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %in)
+ store <2 x float> %0, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+define void @fnearbyint_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #1 {
+entry:
+ %0 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %in)
+ store <4 x float> %0, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+define void @nearbyint_f64(double addrspace(1)* %out, double %in) {
+entry:
+ %0 = call double @llvm.nearbyint.f64(double %in)
+ store double %0, double addrspace(1)* %out
+ ret void
+}
+define void @nearbyint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+entry:
+ %0 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %in)
+ store <2 x double> %0, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+define void @nearbyint_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+entry:
+ %0 = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %in)
+ store <4 x double> %0, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind readonly }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/R600/fneg.ll b/test/CodeGen/R600/fneg.ll
index f4e6be62467a..4cddc7378956 100644
--- a/test/CodeGen/R600/fneg.ll
+++ b/test/CodeGen/R600/fneg.ll
@@ -51,7 +51,7 @@ entry:
; R600-CHECK: -KC0[2].Z
; SI-CHECK-LABEL: @fneg_free
; XXX: We could use V_ADD_F32_e64 with the negate bit here instead.
-; SI-CHECK: V_SUB_F32_e64 v{{[0-9]}}, 0.000000e+00, s{{[0-9]}}, 0, 0, 0, 0
+; SI-CHECK: V_SUB_F32_e64 v{{[0-9]}}, 0.000000e+00, s{{[0-9]}}, 0, 0
define void @fneg_free(float addrspace(1)* %out, i32 %in) {
entry:
%0 = bitcast i32 %in to float
@@ -59,3 +59,14 @@ entry:
store float %1, float addrspace(1)* %out
ret void
}
+
+; SI-CHECK-LABEL: @fneg_fold
+; SI-CHECK-NOT: V_XOR_B32
+; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+define void @fneg_fold(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = fsub float -0.0, %in
+ %1 = fmul float %0, %in
+ store float %1, float addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fp16_to_fp.ll b/test/CodeGen/R600/fp16_to_fp.ll
new file mode 100644
index 000000000000..777eadc34ead
--- /dev/null
+++ b/test/CodeGen/R600/fp16_to_fp.ll
@@ -0,0 +1,28 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
+declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
+
+; SI-LABEL: @test_convert_fp16_to_fp32:
+; SI: BUFFER_LOAD_USHORT [[VAL:v[0-9]+]]
+; SI: V_CVT_F32_F16_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_DWORD [[RESULT]]
+define void @test_convert_fp16_to_fp32(float addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %val = load i16 addrspace(1)* %in, align 2
+ %cvt = call float @llvm.convert.from.fp16.f32(i16 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+
+; SI-LABEL: @test_convert_fp16_to_fp64:
+; SI: BUFFER_LOAD_USHORT [[VAL:v[0-9]+]]
+; SI: V_CVT_F32_F16_e32 [[RESULT32:v[0-9]+]], [[VAL]]
+; SI: V_CVT_F64_F32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[RESULT32]]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
+define void @test_convert_fp16_to_fp64(double addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %val = load i16 addrspace(1)* %in, align 2
+ %cvt = call double @llvm.convert.from.fp16.f64(i16 %val) nounwind readnone
+ store double %cvt, double addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/fp32_to_fp16.ll b/test/CodeGen/R600/fp32_to_fp16.ll
new file mode 100644
index 000000000000..6b5ff00b5f60
--- /dev/null
+++ b/test/CodeGen/R600/fp32_to_fp16.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
+
+; SI-LABEL: @test_convert_fp32_to_fp16:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]]
+; SI: V_CVT_F16_F32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+define void @test_convert_fp32_to_fp16(i16 addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %val = load float addrspace(1)* %in, align 4
+ %cvt = call i16 @llvm.convert.to.fp16.f32(float %val) nounwind readnone
+ store i16 %cvt, i16 addrspace(1)* %out, align 2
+ ret void
+}
diff --git a/test/CodeGen/R600/fp_to_sint.ll b/test/CodeGen/R600/fp_to_sint.ll
index 8302b4f8233e..235045aaaaaa 100644
--- a/test/CodeGen/R600/fp_to_sint.ll
+++ b/test/CodeGen/R600/fp_to_sint.ll
@@ -1,31 +1,206 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-
-; R600-CHECK: @fp_to_sint_v2i32
-; R600-CHECK: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; R600-CHECK: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; SI-CHECK: @fp_to_sint_v2i32
-; SI-CHECK: V_CVT_I32_F32_e32
-; SI-CHECK: V_CVT_I32_F32_e32
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+
+; FUNC-LABEL: @fp_to_sint_v2i32
+; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; SI: V_CVT_I32_F32_e32
+; SI: V_CVT_I32_F32_e32
define void @fp_to_sint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
%result = fptosi <2 x float> %in to <2 x i32>
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
}
-; R600-CHECK: @fp_to_sint_v4i32
-; R600-CHECK: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; R600-CHECK: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW]}}
-; R600-CHECK: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; R600-CHECK: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; SI-CHECK: @fp_to_sint_v4i32
-; SI-CHECK: V_CVT_I32_F32_e32
-; SI-CHECK: V_CVT_I32_F32_e32
-; SI-CHECK: V_CVT_I32_F32_e32
-; SI-CHECK: V_CVT_I32_F32_e32
+; FUNC-LABEL: @fp_to_sint_v4i32
+; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW]}}
+; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; SI: V_CVT_I32_F32_e32
+; SI: V_CVT_I32_F32_e32
+; SI: V_CVT_I32_F32_e32
+; SI: V_CVT_I32_F32_e32
define void @fp_to_sint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%value = load <4 x float> addrspace(1) * %in
%result = fptosi <4 x float> %value to <4 x i32>
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @fp_to_sint_i64
+
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+
+; Check that the compiler doesn't crash with a "cannot select" error
+; SI: S_ENDPGM
+define void @fp_to_sint_i64 (i64 addrspace(1)* %out, float %in) {
+entry:
+ %0 = fptosi float %in to i64
+ store i64 %0, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC: @fp_to_sint_v2i64
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+
+; SI: S_ENDPGM
+define void @fp_to_sint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
+ %conv = fptosi <2 x float> %x to <2 x i64>
+ store <2 x i64> %conv, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC: @fp_to_sint_v4i64
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+
+; SI: S_ENDPGM
+define void @fp_to_sint_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
+ %conv = fptosi <4 x float> %x to <4 x i64>
+ store <4 x i64> %conv, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fp_to_uint.f64.ll b/test/CodeGen/R600/fp_to_uint.f64.ll
new file mode 100644
index 000000000000..bf607cef0884
--- /dev/null
+++ b/test/CodeGen/R600/fp_to_uint.f64.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @fp_to_uint_i32_f64
+; SI: V_CVT_U32_F64_e32
+define void @fp_to_uint_i32_f64(i32 addrspace(1)* %out, double %in) {
+ %cast = fptoui double %in to i32
+ store i32 %cast, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/fp_to_uint.ll b/test/CodeGen/R600/fp_to_uint.ll
index 77db43b39c5f..a13018bdfecf 100644
--- a/test/CodeGen/R600/fp_to_uint.ll
+++ b/test/CodeGen/R600/fp_to_uint.ll
@@ -1,12 +1,11 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; R600-CHECK: @fp_to_uint_v2i32
-; R600-CHECK: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; R600-CHECK: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-CHECK: @fp_to_uint_v2i32
-; SI-CHECK: V_CVT_U32_F32_e32
-; SI-CHECK: V_CVT_U32_F32_e32
+; FUNC-LABEL: @fp_to_uint_v2i32
+; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; SI: V_CVT_U32_F32_e32
+; SI: V_CVT_U32_F32_e32
define void @fp_to_uint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
%result = fptoui <2 x float> %in to <2 x i32>
@@ -14,16 +13,15 @@ define void @fp_to_uint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
ret void
}
-; R600-CHECK: @fp_to_uint_v4i32
-; R600-CHECK: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; R600-CHECK: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; R600-CHECK: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; SI-CHECK: @fp_to_uint_v4i32
-; SI-CHECK: V_CVT_U32_F32_e32
-; SI-CHECK: V_CVT_U32_F32_e32
-; SI-CHECK: V_CVT_U32_F32_e32
-; SI-CHECK: V_CVT_U32_F32_e32
+; FUNC-LABEL: @fp_to_uint_v4i32
+; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+; SI: V_CVT_U32_F32_e32
+; SI: V_CVT_U32_F32_e32
+; SI: V_CVT_U32_F32_e32
+; SI: V_CVT_U32_F32_e32
define void @fp_to_uint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%value = load <4 x float> addrspace(1) * %in
@@ -31,3 +29,179 @@ define void @fp_to_uint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspac
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG --check-prefix=FUNC %s
+
+; FUNC: @fp_to_uint_i64
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+
+; SI: S_ENDPGM
+define void @fp_to_uint_i64(i64 addrspace(1)* %out, float %x) {
+ %conv = fptoui float %x to i64
+ store i64 %conv, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC: @fp_to_uint_v2i64
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+
+; SI: S_ENDPGM
+define void @fp_to_uint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
+ %conv = fptoui <2 x float> %x to <2 x i64>
+ store <2 x i64> %conv, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC: @fp_to_uint_v4i64
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: AND_INT
+; EG-DAG: LSHR
+; EG-DAG: SUB_INT
+; EG-DAG: AND_INT
+; EG-DAG: ASHR
+; EG-DAG: AND_INT
+; EG-DAG: OR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: LSHL
+; EG-DAG: LSHL
+; EG-DAG: SUB_INT
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT
+; EG-DAG: SETGT_INT
+; EG-DAG: XOR_INT
+; EG-DAG: XOR_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDGE_INT
+; EG-DAG: CNDGE_INT
+
+; SI: S_ENDPGM
+define void @fp_to_uint_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
+ %conv = fptoui <4 x float> %x to <4 x i64>
+ store <4 x i64> %conv, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fsub64.ll b/test/CodeGen/R600/fsub64.ll
index 1445a20839ad..f5e5708f1b41 100644
--- a/test/CodeGen/R600/fsub64.ll
+++ b/test/CodeGen/R600/fsub64.ll
@@ -1,8 +1,7 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
-
-; CHECK: @fsub_f64
-; CHECK: V_ADD_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}, 0, 0, 0, 0, 2
+; RUN: llc -march=r600 -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; SI-LABEL: @fsub_f64:
+; SI: V_ADD_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double addrspace(1)* %in1
diff --git a/test/CodeGen/R600/ftrunc.ll b/test/CodeGen/R600/ftrunc.ll
new file mode 100644
index 000000000000..0d7d4679fe3d
--- /dev/null
+++ b/test/CodeGen/R600/ftrunc.ll
@@ -0,0 +1,119 @@
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG --check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI --check-prefix=FUNC %s
+
+declare float @llvm.trunc.f32(float) nounwind readnone
+declare <2 x float> @llvm.trunc.v2f32(<2 x float>) nounwind readnone
+declare <3 x float> @llvm.trunc.v3f32(<3 x float>) nounwind readnone
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
+declare <8 x float> @llvm.trunc.v8f32(<8 x float>) nounwind readnone
+declare <16 x float> @llvm.trunc.v16f32(<16 x float>) nounwind readnone
+
+; FUNC-LABEL: @ftrunc_f32:
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_f32(float addrspace(1)* %out, float %x) {
+ %y = call float @llvm.trunc.f32(float %x) nounwind readnone
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @ftrunc_v2f32:
+; EG: TRUNC
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %x) {
+ %y = call <2 x float> @llvm.trunc.v2f32(<2 x float> %x) nounwind readnone
+ store <2 x float> %y, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+; FIXME-FUNC-LABEL: @ftrunc_v3f32:
+; FIXME-EG: TRUNC
+; FIXME-EG: TRUNC
+; FIXME-EG: TRUNC
+; FIXME-SI: V_TRUNC_F32_e32
+; FIXME-SI: V_TRUNC_F32_e32
+; FIXME-SI: V_TRUNC_F32_e32
+; define void @ftrunc_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %x) {
+; %y = call <3 x float> @llvm.trunc.v3f32(<3 x float> %x) nounwind readnone
+; store <3 x float> %y, <3 x float> addrspace(1)* %out
+; ret void
+; }
+
+; FUNC-LABEL: @ftrunc_v4f32:
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %x) {
+ %y = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) nounwind readnone
+ store <4 x float> %y, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @ftrunc_v8f32:
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %x) {
+ %y = call <8 x float> @llvm.trunc.v8f32(<8 x float> %x) nounwind readnone
+ store <8 x float> %y, <8 x float> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @ftrunc_v16f32:
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; EG: TRUNC
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+; SI: V_TRUNC_F32_e32
+define void @ftrunc_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %x) {
+ %y = call <16 x float> @llvm.trunc.v16f32(<16 x float> %x) nounwind readnone
+ store <16 x float> %y, <16 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/gep-address-space.ll b/test/CodeGen/R600/gep-address-space.ll
index 4ea21dde8a05..ab2c0bf92fe3 100644
--- a/test/CodeGen/R600/gep-address-space.ll
+++ b/test/CodeGen/R600/gep-address-space.ll
@@ -1,13 +1,23 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck %s
define void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind {
-; CHECK-LABEL @use_gep_address_space:
-; CHECK: S_ADD_I32
+; CHECK-LABEL: @use_gep_address_space:
+; CHECK: V_MOV_B32_e32 [[PTR:v[0-9]+]], s{{[0-9]+}}
+; CHECK: DS_WRITE_B32 [[PTR]], v{{[0-9]+}}, 0x40
%p = getelementptr [1024 x i32] addrspace(3)* %array, i16 0, i16 16
store i32 99, i32 addrspace(3)* %p
ret void
}
+define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind {
+; CHECK-LABEL: @use_gep_address_space_large_offset:
+; CHECK: S_ADD_I32
+; CHECK: DS_WRITE_B32
+ %p = getelementptr [1024 x i32] addrspace(3)* %array, i16 0, i16 16384
+ store i32 99, i32 addrspace(3)* %p
+ ret void
+}
+
define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind {
; CHECK-LABEL: @gep_as_vector_v4:
; CHECK: S_ADD_I32
diff --git a/test/CodeGen/R600/gv-const-addrspace-fail.ll b/test/CodeGen/R600/gv-const-addrspace-fail.ll
new file mode 100644
index 000000000000..ebd781107627
--- /dev/null
+++ b/test/CodeGen/R600/gv-const-addrspace-fail.ll
@@ -0,0 +1,58 @@
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+
+@a = internal addrspace(2) constant [1 x i8] [ i8 7 ], align 1
+
+; FUNC-LABEL: @test_i8
+; EG: CF_END
+; SI: BUFFER_STORE_BYTE
+; SI: S_ENDPGM
+define void @test_i8( i32 %s, i8 addrspace(1)* %out) #3 {
+ %arrayidx = getelementptr inbounds [1 x i8] addrspace(2)* @a, i32 0, i32 %s
+ %1 = load i8 addrspace(2)* %arrayidx, align 1
+ store i8 %1, i8 addrspace(1)* %out
+ ret void
+}
+
+@b = internal addrspace(2) constant [1 x i16] [ i16 7 ], align 2
+
+; FUNC-LABEL: @test_i16
+; EG: CF_END
+; SI: BUFFER_STORE_SHORT
+; SI: S_ENDPGM
+define void @test_i16( i32 %s, i16 addrspace(1)* %out) #3 {
+ %arrayidx = getelementptr inbounds [1 x i16] addrspace(2)* @b, i32 0, i32 %s
+ %1 = load i16 addrspace(2)* %arrayidx, align 2
+ store i16 %1, i16 addrspace(1)* %out
+ ret void
+}
+
+%struct.bar = type { float, [5 x i8] }
+
+; The illegal i8s aren't handled
+@struct_bar_gv = internal addrspace(2) unnamed_addr constant [1 x %struct.bar] [ %struct.bar { float 16.0, [5 x i8] [i8 0, i8 1, i8 2, i8 3, i8 4] } ]
+
+; FUNC-LABEL: @struct_bar_gv_load
+define void @struct_bar_gv_load(i8 addrspace(1)* %out, i32 %index) {
+ %gep = getelementptr inbounds [1 x %struct.bar] addrspace(2)* @struct_bar_gv, i32 0, i32 0, i32 1, i32 %index
+ %load = load i8 addrspace(2)* %gep, align 1
+ store i8 %load, i8 addrspace(1)* %out, align 1
+ ret void
+}
+
+
+; The private load isn't scalarzied.
+@array_vector_gv = internal addrspace(2) constant [4 x <4 x i32>] [ <4 x i32> <i32 1, i32 2, i32 3, i32 4>,
+ <4 x i32> <i32 5, i32 6, i32 7, i32 8>,
+ <4 x i32> <i32 9, i32 10, i32 11, i32 12>,
+ <4 x i32> <i32 13, i32 14, i32 15, i32 16> ]
+
+; FUNC-LABEL: @array_vector_gv_load
+define void @array_vector_gv_load(<4 x i32> addrspace(1)* %out, i32 %index) {
+ %gep = getelementptr inbounds [4 x <4 x i32>] addrspace(2)* @array_vector_gv, i32 0, i32 %index
+ %load = load <4 x i32> addrspace(2)* %gep, align 16
+ store <4 x i32> %load, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/gv-const-addrspace.ll b/test/CodeGen/R600/gv-const-addrspace.ll
new file mode 100644
index 000000000000..e0ac317f9986
--- /dev/null
+++ b/test/CodeGen/R600/gv-const-addrspace.ll
@@ -0,0 +1,97 @@
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+
+@b = internal addrspace(2) constant [1 x i16] [ i16 7 ], align 2
+
+@float_gv = internal unnamed_addr addrspace(2) constant [5 x float] [float 0.0, float 1.0, float 2.0, float 3.0, float 4.0], align 4
+
+; FUNC-LABEL: @float
+; FIXME: We should be using S_LOAD_DWORD here.
+; SI: BUFFER_LOAD_DWORD
+
+; EG-DAG: MOV {{\** *}}T2.X
+; EG-DAG: MOV {{\** *}}T3.X
+; EG-DAG: MOV {{\** *}}T4.X
+; EG-DAG: MOV {{\** *}}T5.X
+; EG-DAG: MOV {{\** *}}T6.X
+; EG: MOVA_INT
+
+define void @float(float addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = getelementptr inbounds [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
+ %1 = load float addrspace(2)* %0
+ store float %1, float addrspace(1)* %out
+ ret void
+}
+
+@i32_gv = internal unnamed_addr addrspace(2) constant [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4], align 4
+
+; FUNC-LABEL: @i32
+
+; FIXME: We should be using S_LOAD_DWORD here.
+; SI: BUFFER_LOAD_DWORD
+
+; EG-DAG: MOV {{\** *}}T2.X
+; EG-DAG: MOV {{\** *}}T3.X
+; EG-DAG: MOV {{\** *}}T4.X
+; EG-DAG: MOV {{\** *}}T5.X
+; EG-DAG: MOV {{\** *}}T6.X
+; EG: MOVA_INT
+
+define void @i32(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = getelementptr inbounds [5 x i32] addrspace(2)* @i32_gv, i32 0, i32 %index
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+
+%struct.foo = type { float, [5 x i32] }
+
+@struct_foo_gv = internal unnamed_addr addrspace(2) constant [1 x %struct.foo] [ %struct.foo { float 16.0, [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4] } ]
+
+; FUNC-LABEL: @struct_foo_gv_load
+; SI: S_LOAD_DWORD
+
+define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
+ %gep = getelementptr inbounds [1 x %struct.foo] addrspace(2)* @struct_foo_gv, i32 0, i32 0, i32 1, i32 %index
+ %load = load i32 addrspace(2)* %gep, align 4
+ store i32 %load, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+@array_v1_gv = internal addrspace(2) constant [4 x <1 x i32>] [ <1 x i32> <i32 1>,
+ <1 x i32> <i32 2>,
+ <1 x i32> <i32 3>,
+ <1 x i32> <i32 4> ]
+
+; FUNC-LABEL: @array_v1_gv_load
+; FIXME: We should be using S_LOAD_DWORD here.
+; SI: BUFFER_LOAD_DWORD
+define void @array_v1_gv_load(<1 x i32> addrspace(1)* %out, i32 %index) {
+ %gep = getelementptr inbounds [4 x <1 x i32>] addrspace(2)* @array_v1_gv, i32 0, i32 %index
+ %load = load <1 x i32> addrspace(2)* %gep, align 4
+ store <1 x i32> %load, <1 x i32> addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @gv_addressing_in_branch(float addrspace(1)* %out, i32 %index, i32 %a) {
+entry:
+ %0 = icmp eq i32 0, %a
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = getelementptr inbounds [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
+ %2 = load float addrspace(2)* %1
+ store float %2, float addrspace(1)* %out
+ br label %endif
+
+else:
+ store float 1.0, float addrspace(1)* %out
+ br label %endif
+
+endif:
+ ret void
+}
diff --git a/test/CodeGen/R600/half.ll b/test/CodeGen/R600/half.ll
new file mode 100644
index 000000000000..42aa4faa99f4
--- /dev/null
+++ b/test/CodeGen/R600/half.ll
@@ -0,0 +1,61 @@
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+
+define void @test_load_store(half addrspace(1)* %in, half addrspace(1)* %out) {
+; CHECK-LABEL: @test_load_store
+; CHECK: BUFFER_LOAD_USHORT [[TMP:v[0-9]+]]
+; CHECK: BUFFER_STORE_SHORT [[TMP]]
+ %val = load half addrspace(1)* %in
+ store half %val, half addrspace(1) * %out
+ ret void
+}
+
+define void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) {
+; CHECK-LABEL: @test_bitcast_from_half
+; CHECK: BUFFER_LOAD_USHORT [[TMP:v[0-9]+]]
+; CHECK: BUFFER_STORE_SHORT [[TMP]]
+ %val = load half addrspace(1) * %in
+ %val_int = bitcast half %val to i16
+ store i16 %val_int, i16 addrspace(1)* %out
+ ret void
+}
+
+define void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) {
+; CHECK-LABEL: @test_bitcast_to_half
+; CHECK: BUFFER_LOAD_USHORT [[TMP:v[0-9]+]]
+; CHECK: BUFFER_STORE_SHORT [[TMP]]
+ %val = load i16 addrspace(1)* %in
+ %val_fp = bitcast i16 %val to half
+ store half %val_fp, half addrspace(1)* %out
+ ret void
+}
+
+define void @test_extend32(half addrspace(1)* %in, float addrspace(1)* %out) {
+; CHECK-LABEL: @test_extend32
+; CHECK: V_CVT_F32_F16_e32
+
+ %val16 = load half addrspace(1)* %in
+ %val32 = fpext half %val16 to float
+ store float %val32, float addrspace(1)* %out
+ ret void
+}
+
+define void @test_extend64(half addrspace(1)* %in, double addrspace(1)* %out) {
+; CHECK-LABEL: @test_extend64
+; CHECK: V_CVT_F32_F16_e32
+; CHECK: V_CVT_F64_F32_e32
+
+ %val16 = load half addrspace(1)* %in
+ %val64 = fpext half %val16 to double
+ store double %val64, double addrspace(1)* %out
+ ret void
+}
+
+define void @test_trunc32(float addrspace(1)* %in, half addrspace(1)* %out) {
+; CHECK-LABEL: @test_trunc32
+; CHECK: V_CVT_F16_F32_e32
+
+ %val32 = load float addrspace(1)* %in
+ %val16 = fptrunc float %val32 to half
+ store half %val16, half addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/icmp64.ll b/test/CodeGen/R600/icmp64.ll
new file mode 100644
index 000000000000..c9e62ff934ee
--- /dev/null
+++ b/test/CodeGen/R600/icmp64.ll
@@ -0,0 +1,92 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @test_i64_eq:
+; SI: V_CMP_EQ_I64
+define void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp eq i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_ne:
+; SI: V_CMP_NE_I64
+define void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ne i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_slt:
+; SI: V_CMP_LT_I64
+define void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp slt i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_ult:
+; SI: V_CMP_LT_U64
+define void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ult i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_sle:
+; SI: V_CMP_LE_I64
+define void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp sle i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_ule:
+; SI: V_CMP_LE_U64
+define void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ule i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_sgt:
+; SI: V_CMP_GT_I64
+define void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp sgt i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_ugt:
+; SI: V_CMP_GT_U64
+define void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ugt i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_sge:
+; SI: V_CMP_GE_I64
+define void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp sge i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_i64_uge:
+; SI: V_CMP_GE_U64
+define void @test_i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %cmp = icmp uge i64 %a, %b
+ %result = sext i1 %cmp to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/indirect-private-64.ll b/test/CodeGen/R600/indirect-private-64.ll
new file mode 100644
index 000000000000..5747434935b3
--- /dev/null
+++ b/test/CodeGen/R600/indirect-private-64.ll
@@ -0,0 +1,89 @@
+; RUN: llc -march=r600 -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
+; RUN: llc -march=r600 -mcpu=SI -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
+
+
+declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+
+; SI-LABEL: @private_access_f64_alloca:
+
+; SI-ALLOCA: BUFFER_STORE_DWORDX2
+; SI-ALLOCA: BUFFER_LOAD_DWORDX2
+
+; SI-PROMOTE: DS_WRITE_B64
+; SI-PROMOTE: DS_READ_B64
+define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind {
+ %val = load double addrspace(1)* %in, align 8
+ %array = alloca double, i32 16, align 8
+ %ptr = getelementptr double* %array, i32 %b
+ store double %val, double* %ptr, align 8
+ call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+ %result = load double* %ptr, align 8
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @private_access_v2f64_alloca:
+
+; SI-ALLOCA: BUFFER_STORE_DWORDX4
+; SI-ALLOCA: BUFFER_LOAD_DWORDX4
+
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
+ %val = load <2 x double> addrspace(1)* %in, align 16
+ %array = alloca <2 x double>, i32 16, align 16
+ %ptr = getelementptr <2 x double>* %array, i32 %b
+ store <2 x double> %val, <2 x double>* %ptr, align 16
+ call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+ %result = load <2 x double>* %ptr, align 16
+ store <2 x double> %result, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @private_access_i64_alloca:
+
+; SI-ALLOCA: BUFFER_STORE_DWORDX2
+; SI-ALLOCA: BUFFER_LOAD_DWORDX2
+
+; SI-PROMOTE: DS_WRITE_B64
+; SI-PROMOTE: DS_READ_B64
+define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind {
+ %val = load i64 addrspace(1)* %in, align 8
+ %array = alloca i64, i32 16, align 8
+ %ptr = getelementptr i64* %array, i32 %b
+ store i64 %val, i64* %ptr, align 8
+ call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+ %result = load i64* %ptr, align 8
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @private_access_v2i64_alloca:
+
+; SI-ALLOCA: BUFFER_STORE_DWORDX4
+; SI-ALLOCA: BUFFER_LOAD_DWORDX4
+
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
+ %val = load <2 x i64> addrspace(1)* %in, align 16
+ %array = alloca <2 x i64>, i32 16, align 16
+ %ptr = getelementptr <2 x i64>* %array, i32 %b
+ store <2 x i64> %val, <2 x i64>* %ptr, align 16
+ call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
+ %result = load <2 x i64>* %ptr, align 16
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/infinite-loop-evergreen.ll b/test/CodeGen/R600/infinite-loop-evergreen.ll
new file mode 100644
index 000000000000..f6e39b3d8306
--- /dev/null
+++ b/test/CodeGen/R600/infinite-loop-evergreen.ll
@@ -0,0 +1,10 @@
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s
+
+define void @inf_loop_irreducible_cfg() nounwind {
+entry:
+ br label %block
+
+block:
+ br label %block
+}
diff --git a/test/CodeGen/R600/infinite-loop.ll b/test/CodeGen/R600/infinite-loop.ll
new file mode 100644
index 000000000000..68ffaae1c428
--- /dev/null
+++ b/test/CodeGen/R600/infinite-loop.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @infinite_loop:
+; SI: V_MOV_B32_e32 [[REG:v[0-9]+]], 0x3e7
+; SI: BB0_1:
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_WAITCNT vmcnt(0) expcnt(0)
+; SI: S_BRANCH BB0_1
+define void @infinite_loop(i32 addrspace(1)* %out) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ store i32 999, i32 addrspace(1)* %out, align 4
+ br label %for.body
+}
+
diff --git a/test/CodeGen/R600/input-mods.ll b/test/CodeGen/R600/input-mods.ll
new file mode 100644
index 000000000000..13bfbab85695
--- /dev/null
+++ b/test/CodeGen/R600/input-mods.ll
@@ -0,0 +1,26 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
+
+;EG-CHECK-LABEL: @test
+;EG-CHECK: EXP_IEEE *
+;CM-CHECK-LABEL: @test
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.X, -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Y (MASKED), -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Z (MASKED), -|T{{[0-9]+}}.X|
+;CM-CHECK: EXP_IEEE * T{{[0-9]+}}.W (MASKED), -|T{{[0-9]+}}.X|
+
+define void @test(<4 x float> inreg %reg0) #0 {
+ %r0 = extractelement <4 x float> %reg0, i32 0
+ %r1 = call float @llvm.fabs.f32(float %r0)
+ %r2 = fsub float -0.000000e+00, %r1
+ %r3 = call float @llvm.exp2.f32(float %r2)
+ %vec = insertelement <4 x float> undef, float %r3, i32 0
+ call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0)
+ ret void
+}
+
+declare float @llvm.exp2.f32(float) readnone
+declare float @llvm.fabs.f32(float) readnone
+declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/insert_vector_elt.ll b/test/CodeGen/R600/insert_vector_elt.ll
index 05aeccebac00..43b4efc93377 100644
--- a/test/CodeGen/R600/insert_vector_elt.ll
+++ b/test/CodeGen/R600/insert_vector_elt.ll
@@ -1,16 +1,201 @@
-; XFAIL: *
-; RUN: llc < %s -march=r600 -mcpu=redwood -o %t
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
-define void @var_insert(<4 x i32> addrspace(1)* %out, <4 x i32> %x, i32 %val, i32 %idx) nounwind {
-entry:
- %tmp3 = insertelement <4 x i32> %x, i32 %val, i32 %idx ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %out
+; FIXME: Broken on evergreen
+; FIXME: For some reason the 8 and 16 vectors are being stored as
+; individual elements instead of 128-bit stores.
+
+
+; FIXME: Why is the constant moved into the intermediate register and
+; not just directly into the vector component?
+
+; SI-LABEL: @insertelement_v4f32_0:
+; S_LOAD_DWORDX4 s{{[}}[[LOW_REG:[0-9]+]]:
+; V_MOV_B32_e32
+; V_MOV_B32_e32 [[CONSTREG:v[0-9]+]], 5.000000e+00
+; V_MOV_B32_e32 v[[LOW_REG]], [[CONSTREG]]
+; BUFFER_STORE_DWORDX4 v{{[}}[[LOW_REG]]:
+define void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 0
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @insertelement_v4f32_1:
+define void @insertelement_v4f32_1(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 1
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @insertelement_v4f32_2:
+define void @insertelement_v4f32_2(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 2
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @insertelement_v4f32_3:
+define void @insertelement_v4f32_3(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 3
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @insertelement_v4i32_0:
+define void @insertelement_v4i32_0(<4 x i32> addrspace(1)* %out, <4 x i32> %a) nounwind {
+ %vecins = insertelement <4 x i32> %a, i32 999, i32 0
+ store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
ret void
}
-define void @var_extract(i32 addrspace(1)* %out, <4 x i32> %x, i32 %idx) nounwind {
+; SI-LABEL: @dynamic_insertelement_v2f32:
+; SI: V_MOV_B32_e32 [[CONST:v[0-9]+]], 5.000000e+00
+; SI: V_MOVRELD_B32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
+; SI: BUFFER_STORE_DWORDX2 {{v\[}}[[LOW_RESULT_REG]]:
+define void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 %b
+ store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4f32:
+; SI: V_MOV_B32_e32 [[CONST:v[0-9]+]], 5.000000e+00
+; SI: V_MOVRELD_B32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
+; SI: BUFFER_STORE_DWORDX4 {{v\[}}[[LOW_RESULT_REG]]:
+define void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %b
+ store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v8f32:
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <8 x float> %a, float 5.000000e+00, i32 %b
+ store <8 x float> %vecins, <8 x float> addrspace(1)* %out, align 32
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v16f32:
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <16 x float> %a, float 5.000000e+00, i32 %b
+ store <16 x float> %vecins, <16 x float> addrspace(1)* %out, align 64
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v2i32:
+; SI: BUFFER_STORE_DWORDX2
+define void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x i32> %a, i32 5, i32 %b
+ store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4i32:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x i32> %a, i32 5, i32 %b
+ store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v8i32:
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <8 x i32> %a, i32 5, i32 %b
+ store <8 x i32> %vecins, <8 x i32> addrspace(1)* %out, align 32
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v16i32:
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <16 x i32> %a, i32 5, i32 %b
+ store <16 x i32> %vecins, <16 x i32> addrspace(1)* %out, align 64
+ ret void
+}
+
+
+; SI-LABEL: @dynamic_insertelement_v2i16:
+; FIXMESI: BUFFER_STORE_DWORDX2
+define void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x i16> %a, i16 5, i32 %b
+ store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4i16:
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x i16> %a, i16 5, i32 %b
+ store <4 x i16> %vecins, <4 x i16> addrspace(1)* %out, align 16
+ ret void
+}
+
+
+; SI-LABEL: @dynamic_insertelement_v2i8:
+; FIXMESI: BUFFER_STORE_USHORT
+define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x i8> %a, i8 5, i32 %b
+ store <2 x i8> %vecins, <2 x i8> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4i8:
+; FIXMESI: BUFFER_STORE_DWORD
+define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x i8> %a, i8 5, i32 %b
+ store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v8i8:
+; FIXMESI: BUFFER_STORE_DWORDX2
+define void @dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <8 x i8> %a, i8 5, i32 %b
+ store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v16i8:
+; FIXMESI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <16 x i8> %a, i8 5, i32 %b
+ store <16 x i8> %vecins, <16 x i8> addrspace(1)* %out, align 16
+ ret void
+}
+
+; This test requires handling INSERT_SUBREG in SIFixSGPRCopies. Check that
+; the compiler doesn't crash.
+; SI-LABEL: @insert_split_bb
+define void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b) {
entry:
- %tmp3 = extractelement <4 x i32> %x, i32 %idx ; <<i32>> [#uses=1]
- store i32 %tmp3, i32 addrspace(1)* %out
+ %0 = insertelement <2 x i32> undef, i32 %a, i32 0
+ %1 = icmp eq i32 %a, 0
+ br i1 %1, label %if, label %else
+
+if:
+ %2 = load i32 addrspace(1)* %in
+ %3 = insertelement <2 x i32> %0, i32 %2, i32 1
+ br label %endif
+
+else:
+ %4 = getelementptr i32 addrspace(1)* %in, i32 1
+ %5 = load i32 addrspace(1)* %4
+ %6 = insertelement <2 x i32> %0, i32 %5, i32 1
+ br label %endif
+
+endif:
+ %7 = phi <2 x i32> [%3, %if], [%6, %else]
+ store <2 x i32> %7, <2 x i32> addrspace(1)* %out
ret void
}
diff --git a/test/CodeGen/R600/insert_vector_elt_f64.ll b/test/CodeGen/R600/insert_vector_elt_f64.ll
new file mode 100644
index 000000000000..595bc59655ac
--- /dev/null
+++ b/test/CodeGen/R600/insert_vector_elt_f64.ll
@@ -0,0 +1,36 @@
+; REQUIRES: asserts
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+
+
+; SI-LABEL: @dynamic_insertelement_v2f64:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x double> %a, double 8.0, i32 %b
+ store <2 x double> %vecins, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v2f64:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %a, i32 %b) nounwind {
+ %vecins = insertelement <2 x i64> %a, i64 5, i32 %b
+ store <2 x i64> %vecins, <2 x i64> addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v4f64:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, i32 %b) nounwind {
+ %vecins = insertelement <4 x double> %a, double 8.0, i32 %b
+ store <4 x double> %vecins, <4 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @dynamic_insertelement_v8f64:
+; SI: BUFFER_STORE_DWORDX4
+define void @dynamic_insertelement_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, i32 %b) nounwind {
+ %vecins = insertelement <8 x double> %a, double 8.0, i32 %b
+ store <8 x double> %vecins, <8 x double> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/jump-address.ll b/test/CodeGen/R600/jump-address.ll
index ae9c8bba4fd6..a1cd3882443a 100644
--- a/test/CodeGen/R600/jump-address.ll
+++ b/test/CodeGen/R600/jump-address.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: JUMP @3
+; CHECK: JUMP @6
; CHECK: EXPORT
; CHECK-NOT: EXPORT
diff --git a/test/CodeGen/R600/kernel-args.ll b/test/CodeGen/R600/kernel-args.ll
index 247e3163823f..6fc69792fd3d 100644
--- a/test/CodeGen/R600/kernel-args.ll
+++ b/test/CodeGen/R600/kernel-args.ll
@@ -17,7 +17,7 @@ entry:
; EG-CHECK-LABEL: @i8_zext_arg
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
; SI-CHECK-LABEL: @i8_zext_arg
-; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
+; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
entry:
@@ -29,7 +29,7 @@ entry:
; EG-CHECK-LABEL: @i8_sext_arg
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
; SI-CHECK-LABEL: @i8_sext_arg
-; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
+; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
entry:
@@ -53,7 +53,7 @@ entry:
; EG-CHECK-LABEL: @i16_zext_arg
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
; SI-CHECK-LABEL: @i16_zext_arg
-; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
+; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
entry:
@@ -65,7 +65,7 @@ entry:
; EG-CHECK-LABEL: @i16_sext_arg
; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
; SI-CHECK-LABEL: @i16_sext_arg
-; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
+; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
entry:
@@ -77,7 +77,7 @@ entry:
; EG-CHECK-LABEL: @i32_arg
; EG-CHECK: T{{[0-9]\.[XYZW]}}, KC0[2].Z
; SI-CHECK-LABEL: @i32_arg
-; S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
+; S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) nounwind {
entry:
store i32 %in, i32 addrspace(1)* %out, align 4
@@ -87,7 +87,7 @@ entry:
; EG-CHECK-LABEL: @f32_arg
; EG-CHECK: T{{[0-9]\.[XYZW]}}, KC0[2].Z
; SI-CHECK-LABEL: @f32_arg
-; S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
+; S_LOAD_DWORD s{{[0-9]}}, s[0:1], 0xb
define void @f32_arg(float addrspace(1)* nocapture %out, float %in) nounwind {
entry:
store float %in, float addrspace(1)* %out, align 4
@@ -122,7 +122,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
; SI-CHECK-LABEL: @v2i32_arg
-; SI-CHECK: S_LOAD_DWORDX2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 11
+; SI-CHECK: S_LOAD_DWORDX2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
define void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2 x i32> %in) nounwind {
entry:
store <2 x i32> %in, <2 x i32> addrspace(1)* %out, align 4
@@ -133,7 +133,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
; SI-CHECK-LABEL: @v2f32_arg
-; SI-CHECK: S_LOAD_DWORDX2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 11
+; SI-CHECK: S_LOAD_DWORDX2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
define void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) nounwind {
entry:
store <2 x float> %in, <2 x float> addrspace(1)* %out, align 4
@@ -166,7 +166,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
; SI-CHECK-LABEL: @v3i32_arg
-; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 13
+; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
define void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind {
entry:
store <3 x i32> %in, <3 x i32> addrspace(1)* %out, align 4
@@ -178,7 +178,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
; SI-CHECK-LABEL: @v3f32_arg
-; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 13
+; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
define void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind {
entry:
store <3 x float> %in, <3 x float> addrspace(1)* %out, align 4
@@ -223,7 +223,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
; SI-CHECK-LABEL: @v4i32_arg
-; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 13
+; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
define void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4 x i32> %in) nounwind {
entry:
store <4 x i32> %in, <4 x i32> addrspace(1)* %out, align 4
@@ -236,7 +236,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
; SI-CHECK-LABEL: @v4f32_arg
-; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 13
+; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
define void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) nounwind {
entry:
store <4 x float> %in, <4 x float> addrspace(1)* %out, align 4
@@ -300,7 +300,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
; SI-CHECK-LABEL: @v8i32_arg
-; SI-CHECK: S_LOAD_DWORDX8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 17
+; SI-CHECK: S_LOAD_DWORDX8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x11
define void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8 x i32> %in) nounwind {
entry:
store <8 x i32> %in, <8 x i32> addrspace(1)* %out, align 4
@@ -317,7 +317,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
; SI-CHECK-LABEL: @v8f32_arg
-; SI-CHECK: S_LOAD_DWORDX8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 17
+; SI-CHECK: S_LOAD_DWORDX8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x11
define void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <8 x float> %in) nounwind {
entry:
store <8 x float> %in, <8 x float> addrspace(1)* %out, align 4
@@ -422,7 +422,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
; SI-CHECK-LABEL: @v16i32_arg
-; SI-CHECK: S_LOAD_DWORDX16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 25
+; SI-CHECK: S_LOAD_DWORDX16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x19
define void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <16 x i32> %in) nounwind {
entry:
store <16 x i32> %in, <16 x i32> addrspace(1)* %out, align 4
@@ -447,7 +447,7 @@ entry:
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
; SI-CHECK-LABEL: @v16f32_arg
-; SI-CHECK: S_LOAD_DWORDX16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 25
+; SI-CHECK: S_LOAD_DWORDX16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x19
define void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out, <16 x float> %in) nounwind {
entry:
store <16 x float> %in, <16 x float> addrspace(1)* %out, align 4
diff --git a/test/CodeGen/R600/large-alloca.ll b/test/CodeGen/R600/large-alloca.ll
new file mode 100644
index 000000000000..d8be6d40f310
--- /dev/null
+++ b/test/CodeGen/R600/large-alloca.ll
@@ -0,0 +1,14 @@
+; XFAIL: *
+; REQUIRES: asserts
+; RUN: llc -march=r600 -mcpu=SI < %s
+
+define void @large_alloca(i32 addrspace(1)* %out, i32 %x, i32 %y) nounwind {
+ %large = alloca [8192 x i32], align 4
+ %gep = getelementptr [8192 x i32]* %large, i32 0, i32 8191
+ store i32 %x, i32* %gep
+ %gep1 = getelementptr [8192 x i32]* %large, i32 0, i32 %y
+ %0 = load i32* %gep1
+ store i32 %0, i32 addrspace(1)* %out
+ ret void
+}
+
diff --git a/test/CodeGen/R600/large-constant-initializer.ll b/test/CodeGen/R600/large-constant-initializer.ll
new file mode 100644
index 000000000000..191b5c3de912
--- /dev/null
+++ b/test/CodeGen/R600/large-constant-initializer.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=r600 -mcpu=SI < %s
+; CHECK: S_ENDPGM
+
+@gv = external unnamed_addr addrspace(2) constant [239 x i32], align 4
+
+define void @opencv_cvtfloat_crash(i32 addrspace(1)* %out, i32 %x) nounwind {
+ %val = load i32 addrspace(2)* getelementptr ([239 x i32] addrspace(2)* @gv, i64 0, i64 239), align 4
+ %mul12 = mul nsw i32 %val, 7
+ br i1 undef, label %exit, label %bb
+
+bb:
+ %cmp = icmp slt i32 %x, 0
+ br label %exit
+
+exit:
+ ret void
+}
+
diff --git a/test/CodeGen/R600/lds-output-queue.ll b/test/CodeGen/R600/lds-output-queue.ll
index 63a4332d3c41..d5dc061964e9 100644
--- a/test/CodeGen/R600/lds-output-queue.ll
+++ b/test/CodeGen/R600/lds-output-queue.ll
@@ -8,7 +8,7 @@
; CHECK-NOT: ALU clause
; CHECK: MOV * T{{[0-9]\.[XYZW]}}, OQAP
-@local_mem = internal addrspace(3) unnamed_addr global [2 x i32] [i32 1, i32 2], align 4
+@local_mem = internal unnamed_addr addrspace(3) global [2 x i32] [i32 1, i32 2], align 4
define void @lds_input_queue(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %index) {
entry:
@@ -87,7 +87,7 @@ declare void @llvm.AMDGPU.barrier.local()
; CHECK-LABEL: @local_global_alias
; CHECK: LDS_READ_RET
; CHECK-NOT: ALU clause
-; CHECK MOV * T{{[0-9]\.[XYZW]}}, OQAP
+; CHECK: MOV * T{{[0-9]\.[XYZW]}}, OQAP
define void @local_global_alias(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = getelementptr inbounds [2 x i32] addrspace(3)* @local_mem, i32 0, i32 0
diff --git a/test/CodeGen/R600/lds-size.ll b/test/CodeGen/R600/lds-size.ll
index 2185180fd83f..9182e2561500 100644
--- a/test/CodeGen/R600/lds-size.ll
+++ b/test/CodeGen/R600/lds-size.ll
@@ -6,7 +6,7 @@
; CHECK-LABEL: @test
; CHECK: .long 166120
; CHECK-NEXT: .long 1
-@lds = internal addrspace(3) unnamed_addr global i32 zeroinitializer, align 4
+@lds = internal unnamed_addr addrspace(3) global i32 zeroinitializer, align 4
define void @test(i32 addrspace(1)* %out, i32 %cond) {
entry:
diff --git a/test/CodeGen/R600/lit.local.cfg b/test/CodeGen/R600/lit.local.cfg
index 2d8930ad0e88..ad9ce2541ef7 100644
--- a/test/CodeGen/R600/lit.local.cfg
+++ b/test/CodeGen/R600/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'R600' in targets:
+if not 'R600' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/R600/llvm.AMDGPU.abs.ll b/test/CodeGen/R600/llvm.AMDGPU.abs.ll
new file mode 100644
index 000000000000..a0a47b7c4701
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.abs.ll
@@ -0,0 +1,48 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.abs(i32) nounwind readnone
+
+; Legacy name
+declare i32 @llvm.AMDIL.abs.i32(i32) nounwind readnone
+
+; FUNC-LABEL: @s_abs_i32
+; SI: S_SUB_I32
+; SI: S_MAX_I32
+; SI: S_ENDPGM
+
+; EG: SUB_INT
+; EG: MAX_INT
+define void @s_abs_i32(i32 addrspace(1)* %out, i32 %src) nounwind {
+ %abs = call i32 @llvm.AMDGPU.abs(i32 %src) nounwind readnone
+ store i32 %abs, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_abs_i32
+; SI: V_SUB_I32_e32
+; SI: V_MAX_I32_e32
+; SI: S_ENDPGM
+
+; EG: SUB_INT
+; EG: MAX_INT
+define void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
+ %val = load i32 addrspace(1)* %src, align 4
+ %abs = call i32 @llvm.AMDGPU.abs(i32 %val) nounwind readnone
+ store i32 %abs, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @abs_i32_legacy_amdil
+; SI: V_SUB_I32_e32
+; SI: V_MAX_I32_e32
+; SI: S_ENDPGM
+
+; EG: SUB_INT
+; EG: MAX_INT
+define void @abs_i32_legacy_amdil(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
+ %val = load i32 addrspace(1)* %src, align 4
+ %abs = call i32 @llvm.AMDIL.abs.i32(i32 %val) nounwind readnone
+ store i32 %abs, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll b/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
new file mode 100644
index 000000000000..47f5255e5012
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
@@ -0,0 +1,28 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+; FUNC-LABEL: @test_barrier_global
+; EG: GROUP_BARRIER
+; SI: S_BARRIER
+
+define void @test_barrier_global(i32 addrspace(1)* %out) {
+entry:
+ %0 = call i32 @llvm.r600.read.tidig.x()
+ %1 = getelementptr i32 addrspace(1)* %out, i32 %0
+ store i32 %0, i32 addrspace(1)* %1
+ call void @llvm.AMDGPU.barrier.global()
+ %2 = call i32 @llvm.r600.read.local.size.x()
+ %3 = sub i32 %2, 1
+ %4 = sub i32 %3, %0
+ %5 = getelementptr i32 addrspace(1)* %out, i32 %4
+ %6 = load i32 addrspace(1)* %5
+ store i32 %6, i32 addrspace(1)* %1
+ ret void
+}
+
+declare void @llvm.AMDGPU.barrier.global()
+
+declare i32 @llvm.r600.read.tidig.x() #0
+declare i32 @llvm.r600.read.local.size.x() #0
+
+attributes #0 = { readnone }
diff --git a/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll b/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
index 8d3c9ca22300..7203675bb47b 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
@@ -1,8 +1,11 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; CHECK: GROUP_BARRIER
+; FUNC-LABEL: @test_barrier_local
+; EG: GROUP_BARRIER
+; SI: S_BARRIER
-define void @test(i32 addrspace(1)* %out) {
+define void @test_barrier_local(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.x()
%1 = getelementptr i32 addrspace(1)* %out, i32 %0
@@ -17,8 +20,9 @@ entry:
ret void
}
-declare i32 @llvm.r600.read.tidig.x() #0
declare void @llvm.AMDGPU.barrier.local()
+
+declare i32 @llvm.r600.read.tidig.x() #0
declare i32 @llvm.r600.read.local.size.x() #0
attributes #0 = { readnone }
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
new file mode 100644
index 000000000000..eb5094232825
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
@@ -0,0 +1,426 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfe_i32_arg_arg_arg
+; SI: V_BFE_I32
+; EG: BFE_INT
+; EG: encoding: [{{[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+}},0xac
+define void @bfe_i32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_arg_arg_imm
+; SI: V_BFE_I32
+; EG: BFE_INT
+define void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 123) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_arg_imm_arg
+; SI: V_BFE_I32
+; EG: BFE_INT
+define void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 123, i32 %src2) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_imm_arg_arg
+; SI: V_BFE_I32
+; EG: BFE_INT
+define void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 123, i32 %src1, i32 %src2) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_bfe_print_arg
+; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 2, 8
+define void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) nounwind {
+ %load = load i32 addrspace(1)* %src0, align 4
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 2, i32 8) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_arg_0_width_reg_offset
+; SI-NOT: BFE
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 0) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_arg_0_width_imm_offset
+; SI-NOT: BFE
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 8, i32 0) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_test_6
+; SI: V_LSHLREV_B32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; SI: S_ENDPGM
+define void @bfe_i32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 1, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_test_7
+; SI-NOT: SHL
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+define void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 0, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FIXME: The shifts should be 1 BFE
+; FUNC-LABEL: @bfe_i32_test_8
+; SI: BUFFER_LOAD_DWORD
+; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
+; SI: S_ENDPGM
+define void @bfe_i32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_test_9
+; SI-NOT: BFE
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_i32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_test_10
+; SI-NOT: BFE
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_i32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 1, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_test_11
+; SI-NOT: BFE
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_i32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 8, i32 24)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_test_12
+; SI-NOT: BFE
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_i32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 24, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_test_13
+; SI: V_ASHRREV_I32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_i32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = ashr i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
+}
+
+; FUNC-LABEL: @bfe_i32_test_14
+; SI-NOT: LSHR
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_i32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = lshr i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_0
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 0, i32 0, i32 0) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_1
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 12334, i32 0, i32 0) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_2
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 0, i32 0, i32 1) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_3
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 1, i32 0, i32 1) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_4
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 4294967295, i32 0, i32 1) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_5
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 128, i32 7, i32 1) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_6
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0xffffff80
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 128, i32 0, i32 8) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_7
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 127, i32 0, i32 8) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_8
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 127, i32 6, i32 8) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_9
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 65536, i32 16, i32 8) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_10
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 65535, i32 16, i32 16) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_11
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -6
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 4, i32 4) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_12
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 31, i32 1) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_13
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 131070, i32 16, i32 16) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_14
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 40
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 2, i32 30) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_15
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 10
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 4, i32 28) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_16
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 4294967295, i32 1, i32 7) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_17
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 255, i32 1, i32 31) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_i32_constant_fold_test_18
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_i32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind {
+ %bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 255, i32 31, i32 1) nounwind readnone
+ store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; XXX - This should really be a single BFE, but the sext_inreg of the
+; extended type i24 is never custom lowered.
+; FUNC-LABEL: @bfe_sext_in_reg_i24
+; SI: BUFFER_LOAD_DWORD [[LOAD:v[0-9]+]],
+; SI: V_LSHLREV_B32_e32 {{v[0-9]+}}, 8, {{v[0-9]+}}
+; SI: V_ASHRREV_I32_e32 {{v[0-9]+}}, 8, {{v[0-9]+}}
+; XSI: V_BFE_I32 [[BFE:v[0-9]+]], [[LOAD]], 0, 8
+; XSI-NOT: SHL
+; XSI-NOT: SHR
+; XSI: BUFFER_STORE_DWORD [[BFE]],
+define void @bfe_sext_in_reg_i24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 0, i32 24)
+ %shl = shl i32 %bfe, 8
+ %ashr = ashr i32 %shl, 8
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
new file mode 100644
index 000000000000..1a62253eeb74
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
@@ -0,0 +1,554 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.bfe.u32(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfe_u32_arg_arg_arg
+; SI: V_BFE_U32
+; EG: BFE_UINT
+define void @bfe_u32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_arg_arg_imm
+; SI: V_BFE_U32
+; EG: BFE_UINT
+define void @bfe_u32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 %src1, i32 123) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_arg_imm_arg
+; SI: V_BFE_U32
+; EG: BFE_UINT
+define void @bfe_u32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 123, i32 %src2) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_imm_arg_arg
+; SI: V_BFE_U32
+; EG: BFE_UINT
+define void @bfe_u32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 123, i32 %src1, i32 %src2) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_arg_0_width_reg_offset
+; SI-NOT: BFE
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 %src1, i32 0) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_arg_0_width_imm_offset
+; SI-NOT: BFE
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 %src0, i32 8, i32 0) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_zextload_i8
+; SI: BUFFER_LOAD_UBYTE
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_zextload_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %load = load i8 addrspace(1)* %in
+ %ext = zext i8 %load to i32
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_zext_in_reg_i8
+; SI: BUFFER_LOAD_DWORD
+; SI: V_ADD_I32
+; SI-NEXT: V_AND_B32_e32
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_zext_in_reg_i8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 255
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_zext_in_reg_i16
+; SI: BUFFER_LOAD_DWORD
+; SI: V_ADD_I32
+; SI-NEXT: V_AND_B32_e32
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_zext_in_reg_i16(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 65535
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 0, i32 16)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_zext_in_reg_i8_offset_1
+; SI: BUFFER_LOAD_DWORD
+; SI: V_ADD_I32
+; SI: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_zext_in_reg_i8_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 255
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 1, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_zext_in_reg_i8_offset_3
+; SI: BUFFER_LOAD_DWORD
+; SI: V_ADD_I32
+; SI-NEXT: V_AND_B32_e32 {{v[0-9]+}}, 0xf8
+; SI-NEXT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_zext_in_reg_i8_offset_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 255
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 3, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_zext_in_reg_i8_offset_7
+; SI: BUFFER_LOAD_DWORD
+; SI: V_ADD_I32
+; SI-NEXT: V_AND_B32_e32 {{v[0-9]+}}, 0x80
+; SI-NEXT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_zext_in_reg_i8_offset_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 255
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 7, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_zext_in_reg_i16_offset_8
+; SI: BUFFER_LOAD_DWORD
+; SI: V_ADD_I32
+; SI-NEXT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_zext_in_reg_i16_offset_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %load = load i32 addrspace(1)* %in, align 4
+ %add = add i32 %load, 1
+ %ext = and i32 %add, 65535
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %ext, i32 8, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_1
+; SI: BUFFER_LOAD_DWORD
+; SI: V_AND_B32_e32 {{v[0-9]+}}, 1, {{v[0-9]+}}
+; SI: S_ENDPGM
+; EG: AND_INT T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, 1,
+define void @bfe_u32_test_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 0, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @bfe_u32_test_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+define void @bfe_u32_test_3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_4
+; SI-NOT: LSHL
+; SI-NOT: SHR
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+define void @bfe_u32_test_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %shr = lshr i32 %shl, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shr, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_5
+; SI: BUFFER_LOAD_DWORD
+; SI-NOT: LSHL
+; SI-NOT: SHR
+; SI: V_BFE_I32 {{v[0-9]+}}, {{v[0-9]+}}, 0, 1
+; SI: S_ENDPGM
+define void @bfe_u32_test_5(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %shr = ashr i32 %shl, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shr, i32 0, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_6
+; SI: V_LSHLREV_B32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; SI: S_ENDPGM
+define void @bfe_u32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 1, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_7
+; SI: V_LSHLREV_B32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 0, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_8
+; SI-NOT: BFE
+; SI: V_AND_B32_e32 {{v[0-9]+}}, 1, {{v[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_9
+; SI-NOT: BFE
+; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_10
+; SI-NOT: BFE
+; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 1, i32 31)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_11
+; SI-NOT: BFE
+; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 8, i32 24)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_12
+; SI-NOT: BFE
+; SI: V_LSHRREV_B32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %x, i32 24, i32 8)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_13
+; V_ASHRREV_U32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = ashr i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
+}
+
+; FUNC-LABEL: @bfe_u32_test_14
+; SI-NOT: LSHR
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_u32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = lshr i32 %x, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.u32(i32 %shl, i32 31, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4 ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_0
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 0, i32 0, i32 0) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_1
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 12334, i32 0, i32 0) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_2
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 0, i32 0, i32 1) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_3
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 1, i32 0, i32 1) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_4
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], -1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 4294967295, i32 0, i32 1) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_5
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 128, i32 7, i32 1) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_6
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x80
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 128, i32 0, i32 8) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_7
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 127, i32 0, i32 8) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_8
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 127, i32 6, i32 8) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_9
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFEfppppppppppppp
+define void @bfe_u32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 65536, i32 16, i32 8) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_10
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 65535, i32 16, i32 16) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_11
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 10
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 160, i32 4, i32 4) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_12
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 160, i32 31, i32 1) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_13
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 1
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 131070, i32 16, i32 16) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_14
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 40
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 160, i32 2, i32 30) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_15
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 10
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 160, i32 4, i32 28) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_16
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 4294967295, i32 1, i32 7) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_17
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0x7f
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 255, i32 1, i32 31) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_u32_constant_fold_test_18
+; SI-NOT: BFE
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], 0
+; SI: BUFFER_STORE_DWORD [[VREG]],
+; SI: S_ENDPGM
+; EG-NOT: BFE
+define void @bfe_u32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind {
+ %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 255, i32 31, i32 1) nounwind readnone
+ store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfi.ll b/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
new file mode 100644
index 000000000000..e1de45b4ba29
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
@@ -0,0 +1,41 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.bfi(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfi_arg_arg_arg
+; SI: V_BFI_B32
+; EG: BFI_INT
+define void @bfi_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+ %bfi = call i32 @llvm.AMDGPU.bfi(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
+ store i32 %bfi, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfi_arg_arg_imm
+; SI: V_BFI_B32
+; EG: BFI_INT
+define void @bfi_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfi = call i32 @llvm.AMDGPU.bfi(i32 %src0, i32 %src1, i32 123) nounwind readnone
+ store i32 %bfi, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfi_arg_imm_arg
+; SI: V_BFI_B32
+; EG: BFI_INT
+define void @bfi_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
+ %bfi = call i32 @llvm.AMDGPU.bfi(i32 %src0, i32 123, i32 %src2) nounwind readnone
+ store i32 %bfi, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfi_imm_arg_arg
+; SI: V_BFI_B32
+; EG: BFI_INT
+define void @bfi_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
+ %bfi = call i32 @llvm.AMDGPU.bfi(i32 123, i32 %src1, i32 %src2) nounwind readnone
+ store i32 %bfi, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfm.ll b/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
new file mode 100644
index 000000000000..ef8721e4a978
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
@@ -0,0 +1,40 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.bfm(i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfm_arg_arg
+; SI: V_BFM
+; EG: BFM_INT
+define void @bfm_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %bfm = call i32 @llvm.AMDGPU.bfm(i32 %src0, i32 %src1) nounwind readnone
+ store i32 %bfm, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfm_arg_imm
+; SI: V_BFM
+; EG: BFM_INT
+define void @bfm_arg_imm(i32 addrspace(1)* %out, i32 %src0) nounwind {
+ %bfm = call i32 @llvm.AMDGPU.bfm(i32 %src0, i32 123) nounwind readnone
+ store i32 %bfm, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfm_imm_arg
+; SI: V_BFM
+; EG: BFM_INT
+define void @bfm_imm_arg(i32 addrspace(1)* %out, i32 %src1) nounwind {
+ %bfm = call i32 @llvm.AMDGPU.bfm(i32 123, i32 %src1) nounwind readnone
+ store i32 %bfm, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfm_imm_imm
+; SI: V_BFM
+; EG: BFM_INT
+define void @bfm_imm_imm(i32 addrspace(1)* %out) nounwind {
+ %bfm = call i32 @llvm.AMDGPU.bfm(i32 123, i32 456) nounwind readnone
+ store i32 %bfm, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.brev.ll b/test/CodeGen/R600/llvm.AMDGPU.brev.ll
new file mode 100644
index 000000000000..68a5ad0649c2
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.brev.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.brev(i32) nounwind readnone
+
+; FUNC-LABEL: @s_brev_i32:
+; SI: S_LOAD_DWORD [[VAL:s[0-9]+]],
+; SI: S_BREV_B32 [[SRESULT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+; SI: S_ENDPGM
+define void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
+ %ctlz = call i32 @llvm.AMDGPU.brev(i32 %val) nounwind readnone
+ store i32 %ctlz, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @v_brev_i32:
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_BFREV_B32_e32 [[RESULT:v[0-9]+]], [[VAL]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
+ %val = load i32 addrspace(1)* %valptr, align 4
+ %ctlz = call i32 @llvm.AMDGPU.brev(i32 %val) nounwind readnone
+ store i32 %ctlz, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.clamp.ll b/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
new file mode 100644
index 000000000000..d608953a0dd2
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
@@ -0,0 +1,28 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.clamp.f32(float, float, float) nounwind readnone
+declare float @llvm.AMDIL.clamp.f32(float, float, float) nounwind readnone
+
+; FUNC-LABEL: @clamp_0_1_f32
+; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
+; SI: V_ADD_F32_e64 [[RESULT:v[0-9]+]], [[ARG]], 0, 1, 0
+; SI: BUFFER_STORE_DWORD [[RESULT]]
+; SI: S_ENDPGM
+
+; EG: MOV_SAT
+define void @clamp_0_1_f32(float addrspace(1)* %out, float %src) nounwind {
+ %clamp = call float @llvm.AMDGPU.clamp.f32(float %src, float 0.0, float 1.0) nounwind readnone
+ store float %clamp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @clamp_0_1_amdil_legacy_f32
+; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
+; SI: V_ADD_F32_e64 [[RESULT:v[0-9]+]], [[ARG]], 0, 1, 0
+; SI: BUFFER_STORE_DWORD [[RESULT]]
+define void @clamp_0_1_amdil_legacy_f32(float addrspace(1)* %out, float %src) nounwind {
+ %clamp = call float @llvm.AMDIL.clamp.f32(float %src, float 0.0, float 1.0) nounwind readnone
+ store float %clamp, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll b/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
new file mode 100644
index 000000000000..6facb4782e98
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
@@ -0,0 +1,42 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.AMDGPU.cvt.f32.ubyte0(i32) nounwind readnone
+declare float @llvm.AMDGPU.cvt.f32.ubyte1(i32) nounwind readnone
+declare float @llvm.AMDGPU.cvt.f32.ubyte2(i32) nounwind readnone
+declare float @llvm.AMDGPU.cvt.f32.ubyte3(i32) nounwind readnone
+
+; SI-LABEL: @test_unpack_byte0_to_float:
+; SI: V_CVT_F32_UBYTE0
+define void @test_unpack_byte0_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte0(i32 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_unpack_byte1_to_float:
+; SI: V_CVT_F32_UBYTE1
+define void @test_unpack_byte1_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte1(i32 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_unpack_byte2_to_float:
+; SI: V_CVT_F32_UBYTE2
+define void @test_unpack_byte2_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte2(i32 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_unpack_byte3_to_float:
+; SI: V_CVT_F32_UBYTE3
+define void @test_unpack_byte3_to_float(float addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %cvt = call float @llvm.AMDGPU.cvt.f32.ubyte3(i32 %val) nounwind readnone
+ store float %cvt, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
new file mode 100644
index 000000000000..c8c73573e073
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.AMDGPU.div.fixup.f32(float, float, float) nounwind readnone
+declare double @llvm.AMDGPU.div.fixup.f64(double, double, double) nounwind readnone
+
+; SI-LABEL: @test_div_fixup_f32:
+; SI-DAG: S_LOAD_DWORD [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: S_LOAD_DWORD [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: V_MOV_B32_e32 [[VC:v[0-9]+]], [[SC]]
+; SI-DAG: S_LOAD_DWORD [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: V_MOV_B32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: V_DIV_FIXUP_F32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @test_div_fixup_f32(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+ %result = call float @llvm.AMDGPU.div.fixup.f32(float %a, float %b, float %c) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_div_fixup_f64:
+; SI: V_DIV_FIXUP_F64
+define void @test_div_fixup_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
+ %result = call double @llvm.AMDGPU.div.fixup.f64(double %a, double %b, double %c) nounwind readnone
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
new file mode 100644
index 000000000000..4f1e827c2cbd
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare float @llvm.AMDGPU.div.fmas.f32(float, float, float) nounwind readnone
+declare double @llvm.AMDGPU.div.fmas.f64(double, double, double) nounwind readnone
+
+; SI-LABEL: @test_div_fmas_f32:
+; SI-DAG: S_LOAD_DWORD [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: S_LOAD_DWORD [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: V_MOV_B32_e32 [[VC:v[0-9]+]], [[SC]]
+; SI-DAG: S_LOAD_DWORD [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: V_MOV_B32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: V_DIV_FMAS_F32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @test_div_fmas_f64:
+; SI: V_DIV_FMAS_F64
+define void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
+ %result = call double @llvm.AMDGPU.div.fmas.f64(double %a, double %b, double %c) nounwind readnone
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll b/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
new file mode 100644
index 000000000000..527c8da10a3c
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
@@ -0,0 +1,48 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare { float, i1 } @llvm.AMDGPU.div.scale.f32(float, float, i1) nounwind readnone
+declare { double, i1 } @llvm.AMDGPU.div.scale.f64(double, double, i1) nounwind readnone
+
+; SI-LABEL @test_div_scale_f32_1:
+; SI: V_DIV_SCALE_F32
+define void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr) nounwind {
+ %a = load float addrspace(1)* %aptr, align 4
+ %b = load float addrspace(1)* %bptr, align 4
+ %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
+ %result0 = extractvalue { float, i1 } %result, 0
+ store float %result0, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f32_2:
+; SI: V_DIV_SCALE_F32
+define void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr) nounwind {
+ %a = load float addrspace(1)* %aptr, align 4
+ %b = load float addrspace(1)* %bptr, align 4
+ %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b, i1 true) nounwind readnone
+ %result0 = extractvalue { float, i1 } %result, 0
+ store float %result0, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f64_1:
+; SI: V_DIV_SCALE_F64
+define void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %bptr, double addrspace(1)* %cptr) nounwind {
+ %a = load double addrspace(1)* %aptr, align 8
+ %b = load double addrspace(1)* %bptr, align 8
+ %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 false) nounwind readnone
+ %result0 = extractvalue { double, i1 } %result, 0
+ store double %result0, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f64_1:
+; SI: V_DIV_SCALE_F64
+define void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %bptr, double addrspace(1)* %cptr) nounwind {
+ %a = load double addrspace(1)* %aptr, align 8
+ %b = load double addrspace(1)* %bptr, align 8
+ %result = call { double, i1 } @llvm.AMDGPU.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
+ %result0 = extractvalue { double, i1 } %result, 0
+ store double %result0, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.fract.ll b/test/CodeGen/R600/llvm.AMDGPU.fract.ll
new file mode 100644
index 000000000000..72ec1c57571e
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.fract.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.fract.f32(float) nounwind readnone
+
+; Legacy name
+declare float @llvm.AMDIL.fraction.f32(float) nounwind readnone
+
+; FUNC-LABEL: @fract_f32
+; SI: V_FRACT_F32
+; EG: FRACT
+define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) nounwind {
+ %val = load float addrspace(1)* %src, align 4
+ %fract = call float @llvm.AMDGPU.fract.f32(float %val) nounwind readnone
+ store float %fract, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @fract_f32_legacy_amdil
+; SI: V_FRACT_F32
+; EG: FRACT
+define void @fract_f32_legacy_amdil(float addrspace(1)* %out, float addrspace(1)* %src) nounwind {
+ %val = load float addrspace(1)* %src, align 4
+ %fract = call float @llvm.AMDIL.fraction.f32(float %val) nounwind readnone
+ store float %fract, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imad24.ll b/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
new file mode 100644
index 000000000000..95795ea63b93
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
@@ -0,0 +1,21 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; XUN: llc -march=r600 -mcpu=r600 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; XUN: llc -march=r600 -mcpu=r770 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+
+; FIXME: Store of i32 seems to be broken pre-EG somehow?
+
+declare i32 @llvm.AMDGPU.imad24(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @test_imad24
+; SI: V_MAD_I32_I24
+; CM: MULADD_INT24
+; R600: MULLO_INT
+; R600: ADD_INT
+define void @test_imad24(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+ %mad = call i32 @llvm.AMDGPU.imad24(i32 %src0, i32 %src1, i32 %src2) nounwind readnone
+ store i32 %mad, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imax.ll b/test/CodeGen/R600/llvm.AMDGPU.imax.ll
index 1336f4eeeedd..01c9f435b9fc 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imax.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imax.ll
@@ -1,12 +1,23 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-;CHECK: V_MAX_I32_e32
-
-define void @main(i32 %p0, i32 %p1) #0 {
+; SI-LABEL: @vector_imax
+; SI: V_MAX_I32_e32
+define void @vector_imax(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
- %0 = call i32 @llvm.AMDGPU.imax(i32 %p0, i32 %p1)
- %1 = bitcast i32 %0 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %1, float %1, float %1, float %1)
+ %load = load i32 addrspace(1)* %in, align 4
+ %max = call i32 @llvm.AMDGPU.imax(i32 %p0, i32 %load)
+ %bc = bitcast i32 %max to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @scalar_imax
+; SI: S_MAX_I32
+define void @scalar_imax(i32 %p0, i32 %p1) #0 {
+entry:
+ %max = call i32 @llvm.AMDGPU.imax(i32 %p0, i32 %p1)
+ %bc = bitcast i32 %max to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
ret void
}
@@ -15,7 +26,7 @@ declare i32 @llvm.AMDGPU.imax(i32, i32) #1
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-attributes #0 = { "ShaderType"="0" }
-attributes #1 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imin.ll b/test/CodeGen/R600/llvm.AMDGPU.imin.ll
index 3435ea471e47..565bf3444081 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imin.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imin.ll
@@ -1,12 +1,23 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-;CHECK: V_MIN_I32_e32
-
-define void @main(i32 %p0, i32 %p1) #0 {
+; SI-LABEL: @vector_imin
+; SI: V_MIN_I32_e32
+define void @vector_imin(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
- %0 = call i32 @llvm.AMDGPU.imin(i32 %p0, i32 %p1)
- %1 = bitcast i32 %0 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %1, float %1, float %1, float %1)
+ %load = load i32 addrspace(1)* %in, align 4
+ %min = call i32 @llvm.AMDGPU.imin(i32 %p0, i32 %load)
+ %bc = bitcast i32 %min to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @scalar_imin
+; SI: S_MIN_I32
+define void @scalar_imin(i32 %p0, i32 %p1) #0 {
+entry:
+ %min = call i32 @llvm.AMDGPU.imin(i32 %p0, i32 %p1)
+ %bc = bitcast i32 %min to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
ret void
}
@@ -15,7 +26,7 @@ declare i32 @llvm.AMDGPU.imin(i32, i32) #1
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-attributes #0 = { "ShaderType"="0" }
-attributes #1 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imul24.ll b/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
new file mode 100644
index 000000000000..8ee3520daeae
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
@@ -0,0 +1,15 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.imul24(i32, i32) nounwind readnone
+
+; FUNC-LABEL: @test_imul24
+; SI: V_MUL_I32_I24
+; CM: MUL_INT24
+; R600: MULLO_INT
+define void @test_imul24(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %mul = call i32 @llvm.AMDGPU.imul24(i32 %src0, i32 %src1) nounwind readnone
+ store i32 %mul, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.kill.ll b/test/CodeGen/R600/llvm.AMDGPU.kill.ll
index bec5cdf65f1b..1f82ffb53f1d 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.kill.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.kill.ll
@@ -1,13 +1,17 @@
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
+; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; SI-LABEL: @kill_gs
-; SI: V_CMPX_LE_F32
+; SI-LABEL: @kill_gs_const
+; SI-NOT: V_CMPX_LE_F32
+; SI: S_MOV_B64 exec, 0
-define void @kill_gs() #0 {
+define void @kill_gs_const() #0 {
main_body:
%0 = icmp ule i32 0, 3
%1 = select i1 %0, float 1.000000e+00, float -1.000000e+00
call void @llvm.AMDGPU.kill(float %1)
+ %2 = icmp ule i32 3, 0
+ %3 = select i1 %2, float 1.000000e+00, float -1.000000e+00
+ call void @llvm.AMDGPU.kill(float %3)
ret void
}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll b/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
new file mode 100644
index 000000000000..51964eefa64f
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
@@ -0,0 +1,13 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.legacy.rsq(float) nounwind readnone
+
+; FUNC-LABEL: @rsq_legacy_f32
+; SI: V_RSQ_LEGACY_F32_e32
+; EG: RECIPSQRT_IEEE
+define void @rsq_legacy_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rsq = call float @llvm.AMDGPU.legacy.rsq(float %src) nounwind readnone
+ store float %rsq, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll b/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
new file mode 100644
index 000000000000..b5dda0ce81f9
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
@@ -0,0 +1,30 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare double @llvm.AMDGPU.rcp.f64(double) nounwind readnone
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+; FUNC-LABEL: @rcp_f64
+; SI: V_RCP_F64_e32
+define void @rcp_f64(double addrspace(1)* %out, double %src) nounwind {
+ %rcp = call double @llvm.AMDGPU.rcp.f64(double %src) nounwind readnone
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @rcp_pat_f64
+; SI: V_RCP_F64_e32
+define void @rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
+ %rcp = fdiv double 1.0, %src
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @rsq_rcp_pat_f64
+; SI-UNSAFE: V_RSQ_F64_e32
+; SI-SAFE-NOT: V_RSQ_F64_e32
+define void @rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
+ %sqrt = call double @llvm.sqrt.f64(double %src) nounwind readnone
+ %rcp = call double @llvm.AMDGPU.rcp.f64(double %sqrt) nounwind readnone
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rcp.ll b/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
new file mode 100644
index 000000000000..8d5d66e149ba
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
@@ -0,0 +1,65 @@
+; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+
+; XUN: llc -march=r600 -mcpu=SI -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE-SPDENORM -check-prefix=SI -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.rcp.f32(float) nounwind readnone
+declare double @llvm.AMDGPU.rcp.f64(double) nounwind readnone
+
+
+declare float @llvm.sqrt.f32(float) nounwind readnone
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+; FUNC-LABEL: @rcp_f32
+; SI: V_RCP_F32_e32
+define void @rcp_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rcp = call float @llvm.AMDGPU.rcp.f32(float %src) nounwind readnone
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @rcp_f64
+; SI: V_RCP_F64_e32
+define void @rcp_f64(double addrspace(1)* %out, double %src) nounwind {
+ %rcp = call double @llvm.AMDGPU.rcp.f64(double %src) nounwind readnone
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @rcp_pat_f32
+; SI-SAFE: V_RCP_F32_e32
+; XSI-SAFE-SPDENORM-NOT: V_RCP_F32_e32
+define void @rcp_pat_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rcp = fdiv float 1.0, %src
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @rcp_pat_f64
+; SI: V_RCP_F64_e32
+define void @rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
+ %rcp = fdiv double 1.0, %src
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @rsq_rcp_pat_f32
+; SI-UNSAFE: V_RSQ_F32_e32
+; SI-SAFE: V_SQRT_F32_e32
+; SI-SAFE: V_RCP_F32_e32
+define void @rsq_rcp_pat_f32(float addrspace(1)* %out, float %src) nounwind {
+ %sqrt = call float @llvm.sqrt.f32(float %src) nounwind readnone
+ %rcp = call float @llvm.AMDGPU.rcp.f32(float %sqrt) nounwind readnone
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @rsq_rcp_pat_f64
+; SI-UNSAFE: V_RSQ_F64_e32
+; SI-SAFE-NOT: V_RSQ_F64_e32
+define void @rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
+ %sqrt = call double @llvm.sqrt.f64(double %src) nounwind readnone
+ %rcp = call double @llvm.AMDGPU.rcp.f64(double %sqrt) nounwind readnone
+ store double %rcp, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
new file mode 100644
index 000000000000..100d6ff77707
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare double @llvm.AMDGPU.rsq.clamped.f64(double) nounwind readnone
+
+; FUNC-LABEL: @rsq_clamped_f64
+; SI: V_RSQ_CLAMP_F64_e32
+define void @rsq_clamped_f64(double addrspace(1)* %out, double %src) nounwind {
+ %rsq_clamped = call double @llvm.AMDGPU.rsq.clamped.f64(double %src) nounwind readnone
+ store double %rsq_clamped, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
new file mode 100644
index 000000000000..683df7355ac6
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+
+declare float @llvm.AMDGPU.rsq.clamped.f32(float) nounwind readnone
+
+; FUNC-LABEL: @rsq_clamped_f32
+; SI: V_RSQ_CLAMP_F32_e32
+; EG: RECIPSQRT_CLAMPED
+define void @rsq_clamped_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rsq_clamped = call float @llvm.AMDGPU.rsq.clamped.f32(float %src) nounwind readnone
+ store float %rsq_clamped, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
new file mode 100644
index 000000000000..27cf6b28fd66
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
@@ -0,0 +1,13 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.rsq.f32(float) nounwind readnone
+
+; FUNC-LABEL: @rsq_f32
+; SI: V_RSQ_F32_e32
+; EG: RECIPSQRT_IEEE
+define void @rsq_f32(float addrspace(1)* %out, float %src) nounwind {
+ %rsq = call float @llvm.AMDGPU.rsq.f32(float %src) nounwind readnone
+ store float %rsq, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll b/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
new file mode 100644
index 000000000000..1c736d447ea9
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare double @llvm.AMDGPU.trig.preop.f64(double, i32) nounwind readnone
+
+; SI-LABEL: @test_trig_preop_f64:
+; SI-DAG: BUFFER_LOAD_DWORD [[SEG:v[0-9]+]]
+; SI-DAG: BUFFER_LOAD_DWORDX2 [[SRC:v\[[0-9]+:[0-9]+\]]],
+; SI: V_TRIG_PREOP_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[SRC]], [[SEG]]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @test_trig_preop_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load double addrspace(1)* %aptr, align 8
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %result = call double @llvm.AMDGPU.trig.preop.f64(double %a, i32 %b) nounwind readnone
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @test_trig_preop_f64_imm_segment:
+; SI: BUFFER_LOAD_DWORDX2 [[SRC:v\[[0-9]+:[0-9]+\]]],
+; SI: V_TRIG_PREOP_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[SRC]], 7
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @test_trig_preop_f64_imm_segment(double addrspace(1)* %out, double addrspace(1)* %aptr) nounwind {
+ %a = load double addrspace(1)* %aptr, align 8
+ %result = call double @llvm.AMDGPU.trig.preop.f64(double %a, i32 7) nounwind readnone
+ store double %result, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umad24.ll b/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
new file mode 100644
index 000000000000..afdfb18a563b
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
@@ -0,0 +1,19 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; XUN: llc -march=r600 -mcpu=r600 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; XUN: llc -march=r600 -mcpu=rv770 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.umad24(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @test_umad24
+; SI: V_MAD_U32_U24
+; EG: MULADD_UINT24
+; R600: MULLO_UINT
+; R600: ADD_INT
+define void @test_umad24(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+ %mad = call i32 @llvm.AMDGPU.umad24(i32 %src0, i32 %src1, i32 %src2) nounwind readnone
+ store i32 %mad, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umax.ll b/test/CodeGen/R600/llvm.AMDGPU.umax.ll
index 4cfa133208e3..1b8da2e15534 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umax.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umax.ll
@@ -1,12 +1,38 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-;CHECK: V_MAX_U32_e32
-
-define void @main(i32 %p0, i32 %p1) #0 {
+; SI-LABEL: @vector_umax
+; SI: V_MAX_U32_e32
+define void @vector_umax(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
- %0 = call i32 @llvm.AMDGPU.umax(i32 %p0, i32 %p1)
- %1 = bitcast i32 %0 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %1, float %1, float %1, float %1)
+ %load = load i32 addrspace(1)* %in, align 4
+ %max = call i32 @llvm.AMDGPU.umax(i32 %p0, i32 %load)
+ %bc = bitcast i32 %max to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @scalar_umax
+; SI: S_MAX_U32
+define void @scalar_umax(i32 %p0, i32 %p1) #0 {
+entry:
+ %max = call i32 @llvm.AMDGPU.umax(i32 %p0, i32 %p1)
+ %bc = bitcast i32 %max to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @trunc_zext_umax
+; SI: BUFFER_LOAD_UBYTE [[VREG:v[0-9]+]],
+; SI: V_MAX_U32_e32 [[RESULT:v[0-9]+]], 0, [[VREG]]
+; SI-NOT: AND
+; SI: BUFFER_STORE_SHORT [[RESULT]],
+define void @trunc_zext_umax(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
+ %tmp5 = load i8 addrspace(1)* %src, align 1
+ %tmp2 = zext i8 %tmp5 to i32
+ %tmp3 = tail call i32 @llvm.AMDGPU.umax(i32 %tmp2, i32 0) nounwind readnone
+ %tmp4 = trunc i32 %tmp3 to i8
+ %tmp6 = zext i8 %tmp4 to i16
+ store i16 %tmp6, i16 addrspace(1)* %out, align 2
ret void
}
@@ -15,7 +41,7 @@ declare i32 @llvm.AMDGPU.umax(i32, i32) #1
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-attributes #0 = { "ShaderType"="0" }
-attributes #1 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umin.ll b/test/CodeGen/R600/llvm.AMDGPU.umin.ll
index 14af0519bc90..08397f8356c9 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umin.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umin.ll
@@ -1,12 +1,38 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
-;CHECK: V_MIN_U32_e32
-
-define void @main(i32 %p0, i32 %p1) #0 {
+; SI-LABEL: @vector_umin
+; SI: V_MIN_U32_e32
+define void @vector_umin(i32 %p0, i32 %p1, i32 addrspace(1)* %in) #0 {
main_body:
- %0 = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %p1)
- %1 = bitcast i32 %0 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %1, float %1, float %1, float %1)
+ %load = load i32 addrspace(1)* %in, align 4
+ %min = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %load)
+ %bc = bitcast i32 %min to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @scalar_umin
+; SI: S_MIN_U32
+define void @scalar_umin(i32 %p0, i32 %p1) #0 {
+entry:
+ %min = call i32 @llvm.AMDGPU.umin(i32 %p0, i32 %p1)
+ %bc = bitcast i32 %min to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %bc, float %bc, float %bc, float %bc)
+ ret void
+}
+
+; SI-LABEL: @trunc_zext_umin
+; SI: BUFFER_LOAD_UBYTE [[VREG:v[0-9]+]],
+; SI: V_MIN_U32_e32 [[RESULT:v[0-9]+]], 0, [[VREG]]
+; SI-NOT: AND
+; SI: BUFFER_STORE_SHORT [[RESULT]],
+define void @trunc_zext_umin(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
+ %tmp5 = load i8 addrspace(1)* %src, align 1
+ %tmp2 = zext i8 %tmp5 to i32
+ %tmp3 = tail call i32 @llvm.AMDGPU.umin(i32 %tmp2, i32 0) nounwind readnone
+ %tmp4 = trunc i32 %tmp3 to i8
+ %tmp6 = zext i8 %tmp4 to i16
+ store i16 %tmp6, i16 addrspace(1)* %out, align 2
ret void
}
@@ -15,7 +41,7 @@ declare i32 @llvm.AMDGPU.umin(i32, i32) #1
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-attributes #0 = { "ShaderType"="0" }
-attributes #1 = { readnone }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umul24.ll b/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
new file mode 100644
index 000000000000..72a36029fb31
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; XUN: llc -march=r600 -mcpu=r600 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+; XUN: llc -march=r600 -mcpu=r770 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.umul24(i32, i32) nounwind readnone
+
+; FUNC-LABEL: @test_umul24
+; SI: V_MUL_U32_U24
+; R600: MUL_UINT24
+; R600: MULLO_UINT
+define void @test_umul24(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+ %mul = call i32 @llvm.AMDGPU.umul24(i32 %src0, i32 %src1) nounwind readnone
+ store i32 %mul, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.SI.gather4.ll b/test/CodeGen/R600/llvm.SI.gather4.ll
new file mode 100644
index 000000000000..8402faaa4dca
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.gather4.ll
@@ -0,0 +1,508 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: @gather4_v2
+;CHECK: IMAGE_GATHER4 {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_v2() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.v2i32(<2 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4
+;CHECK: IMAGE_GATHER4 {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_cl
+;CHECK: IMAGE_GATHER4_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.cl.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_l
+;CHECK: IMAGE_GATHER4_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_l() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.l.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b
+;CHECK: IMAGE_GATHER4_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_cl
+;CHECK: IMAGE_GATHER4_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.cl.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_cl_v8
+;CHECK: IMAGE_GATHER4_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_cl_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.cl.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_lz_v2
+;CHECK: IMAGE_GATHER4_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_lz_v2() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_lz
+;CHECK: IMAGE_GATHER4_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_lz() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.lz.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+
+;CHECK-LABEL: @gather4_o
+;CHECK: IMAGE_GATHER4_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_cl_o
+;CHECK: IMAGE_GATHER4_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_cl_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.cl.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_cl_o_v8
+;CHECK: IMAGE_GATHER4_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_cl_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.cl.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_l_o
+;CHECK: IMAGE_GATHER4_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_l_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.l.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_l_o_v8
+;CHECK: IMAGE_GATHER4_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_l_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.l.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_o
+;CHECK: IMAGE_GATHER4_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_o_v8
+;CHECK: IMAGE_GATHER4_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_b_cl_o
+;CHECK: IMAGE_GATHER4_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_b_cl_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.b.cl.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_lz_o
+;CHECK: IMAGE_GATHER4_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_lz_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.lz.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+
+;CHECK-LABEL: @gather4_c
+;CHECK: IMAGE_GATHER4_C {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_cl
+;CHECK: IMAGE_GATHER4_C_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.cl.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_cl_v8
+;CHECK: IMAGE_GATHER4_C_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_cl_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.cl.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_l
+;CHECK: IMAGE_GATHER4_C_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_l() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.l.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_l_v8
+;CHECK: IMAGE_GATHER4_C_L {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_l_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.l.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b
+;CHECK: IMAGE_GATHER4_C_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b_v8
+;CHECK: IMAGE_GATHER4_C_B {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b_cl
+;CHECK: IMAGE_GATHER4_C_B_CL {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.cl.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_lz
+;CHECK: IMAGE_GATHER4_C_LZ {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_lz() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.lz.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+
+;CHECK-LABEL: @gather4_c_o
+;CHECK: IMAGE_GATHER4_C_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_o_v8
+;CHECK: IMAGE_GATHER4_C_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_cl_o
+;CHECK: IMAGE_GATHER4_C_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_cl_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.cl.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_l_o
+;CHECK: IMAGE_GATHER4_C_L_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_l_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.l.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b_o
+;CHECK: IMAGE_GATHER4_C_B_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_b_cl_o
+;CHECK: IMAGE_GATHER4_C_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_b_cl_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.b.cl.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_lz_o
+;CHECK: IMAGE_GATHER4_C_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_lz_o() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.lz.o.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @gather4_c_lz_o_v8
+;CHECK: IMAGE_GATHER4_C_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @gather4_c_lz_o_v8() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.gather4.c.lz.o.v8i32(<8 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+
+declare <4 x float> @llvm.SI.gather4.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.cl.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.l.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.cl.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.cl.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.lz.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.lz.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare <4 x float> @llvm.SI.gather4.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.cl.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.cl.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.l.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.l.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.b.cl.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.lz.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare <4 x float> @llvm.SI.gather4.c.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.cl.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.cl.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.l.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.l.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.cl.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.lz.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare <4 x float> @llvm.SI.gather4.c.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.cl.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.l.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.b.cl.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.lz.o.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.gather4.c.lz.o.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.SI.getlod.ll b/test/CodeGen/R600/llvm.SI.getlod.ll
new file mode 100644
index 000000000000..a7a17ec3fffa
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.getlod.ll
@@ -0,0 +1,44 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: @getlod
+;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @getlod() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.getlod.i32(i32 undef, <32 x i8> undef, <16 x i8> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
+ ret void
+}
+
+;CHECK-LABEL: @getlod_v2
+;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @getlod_v2() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.getlod.v2i32(<2 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
+ ret void
+}
+
+;CHECK-LABEL: @getlod_v4
+;CHECK: IMAGE_GET_LOD {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @getlod_v4() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.getlod.v4i32(<4 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 15, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r0, float %r1)
+ ret void
+}
+
+
+declare <4 x float> @llvm.SI.getlod.i32(i32, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.getlod.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.getlod.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.SI.image.ll b/test/CodeGen/R600/llvm.SI.image.ll
new file mode 100644
index 000000000000..eac0b8eead3a
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.image.ll
@@ -0,0 +1,49 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: @image_load
+;CHECK: IMAGE_LOAD {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @image_load() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.load.v4i32(<4 x i32> undef, <8 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @image_load_mip
+;CHECK: IMAGE_LOAD_MIP {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @image_load_mip() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.load.mip.v4i32(<4 x i32> undef, <8 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @getresinfo
+;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @getresinfo() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.getresinfo.i32(i32 undef, <8 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+declare <4 x float> @llvm.SI.image.load.v4i32(<4 x i32>, <8 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.load.mip.v4i32(<4 x i32>, <8 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.getresinfo.i32(i32, <8 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.SI.image.sample.ll b/test/CodeGen/R600/llvm.SI.image.sample.ll
new file mode 100644
index 000000000000..14dff7eb5fea
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.image.sample.ll
@@ -0,0 +1,289 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: @sample
+;CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_cl
+;CHECK: IMAGE_SAMPLE_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_d
+;CHECK: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_d() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.d.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_d_cl
+;CHECK: IMAGE_SAMPLE_D_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_d_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.d.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_l
+;CHECK: IMAGE_SAMPLE_L {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_l() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_b
+;CHECK: IMAGE_SAMPLE_B {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_b() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.b.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_b_cl
+;CHECK: IMAGE_SAMPLE_B_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_b_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.b.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_lz
+;CHECK: IMAGE_SAMPLE_LZ {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_lz() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.lz.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_cd
+;CHECK: IMAGE_SAMPLE_CD {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_cd() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.cd.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_cd_cl
+;CHECK: IMAGE_SAMPLE_CD_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_cd_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.cd.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c
+;CHECK: IMAGE_SAMPLE_C {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_cl
+;CHECK: IMAGE_SAMPLE_C_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_d
+;CHECK: IMAGE_SAMPLE_C_D {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_d() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.d.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_d_cl
+;CHECK: IMAGE_SAMPLE_C_D_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_d_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.d.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_l
+;CHECK: IMAGE_SAMPLE_C_L {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_l() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.l.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_b
+;CHECK: IMAGE_SAMPLE_C_B {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_b() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.b.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_b_cl
+;CHECK: IMAGE_SAMPLE_C_B_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_b_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.b.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_lz
+;CHECK: IMAGE_SAMPLE_C_LZ {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_lz() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.lz.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_cd
+;CHECK: IMAGE_SAMPLE_C_CD {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_cd() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.cd.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_cd_cl
+;CHECK: IMAGE_SAMPLE_C_CD_CL {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_cd_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.cd.cl.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+declare <4 x float> @llvm.SI.image.sample.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.d.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.d.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.l.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.b.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.b.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.lz.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.cd.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.cd.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.d.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.d.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.l.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.b.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.b.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.lz.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.cd.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.cd.cl.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.SI.image.sample.o.ll b/test/CodeGen/R600/llvm.SI.image.sample.o.ll
new file mode 100644
index 000000000000..ed3ef9140143
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.image.sample.o.ll
@@ -0,0 +1,289 @@
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: @sample
+;CHECK: IMAGE_SAMPLE_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_cl
+;CHECK: IMAGE_SAMPLE_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_d
+;CHECK: IMAGE_SAMPLE_D_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_d() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.d.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_d_cl
+;CHECK: IMAGE_SAMPLE_D_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_d_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.d.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_l
+;CHECK: IMAGE_SAMPLE_L_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_l() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.l.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_b
+;CHECK: IMAGE_SAMPLE_B_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_b() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.b.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_b_cl
+;CHECK: IMAGE_SAMPLE_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_b_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.b.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_lz
+;CHECK: IMAGE_SAMPLE_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_lz() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.lz.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_cd
+;CHECK: IMAGE_SAMPLE_CD_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_cd() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.cd.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_cd_cl
+;CHECK: IMAGE_SAMPLE_CD_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_cd_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.cd.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c
+;CHECK: IMAGE_SAMPLE_C_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_cl
+;CHECK: IMAGE_SAMPLE_C_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_d
+;CHECK: IMAGE_SAMPLE_C_D_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_d() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.d.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_d_cl
+;CHECK: IMAGE_SAMPLE_C_D_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_d_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.d.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_l
+;CHECK: IMAGE_SAMPLE_C_L_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_l() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.l.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_b
+;CHECK: IMAGE_SAMPLE_C_B_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_b() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.b.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_b_cl
+;CHECK: IMAGE_SAMPLE_C_B_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_b_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.b.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_lz
+;CHECK: IMAGE_SAMPLE_C_LZ_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_lz() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.lz.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_cd
+;CHECK: IMAGE_SAMPLE_C_CD_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_cd() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.cd.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+;CHECK-LABEL: @sample_c_cd_cl
+;CHECK: IMAGE_SAMPLE_C_CD_CL_O {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @sample_c_cd_cl() #0 {
+main_body:
+ %r = call <4 x float> @llvm.SI.image.sample.c.cd.cl.o.v4i32(<4 x i32> undef, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
+ %r0 = extractelement <4 x float> %r, i32 0
+ %r1 = extractelement <4 x float> %r, i32 1
+ %r2 = extractelement <4 x float> %r, i32 2
+ %r3 = extractelement <4 x float> %r, i32 3
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %r0, float %r1, float %r2, float %r3)
+ ret void
+}
+
+
+declare <4 x float> @llvm.SI.image.sample.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.d.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.d.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.l.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.b.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.b.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.lz.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.cd.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.cd.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare <4 x float> @llvm.SI.image.sample.c.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.d.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.d.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.l.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.b.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.b.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.lz.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.cd.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare <4 x float> @llvm.SI.image.sample.c.cd.cl.o.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.SI.sample-masked.ll b/test/CodeGen/R600/llvm.SI.sample-masked.ll
index e5e4ec4f0674..445359a4ced7 100644
--- a/test/CodeGen/R600/llvm.SI.sample-masked.ll
+++ b/test/CodeGen/R600/llvm.SI.sample-masked.ll
@@ -2,7 +2,7 @@
; CHECK-LABEL: @v1
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 13
-define void @v1(i32 %a1) {
+define void @v1(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -15,7 +15,7 @@ entry:
; CHECK-LABEL: @v2
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 11
-define void @v2(i32 %a1) {
+define void @v2(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -28,7 +28,7 @@ entry:
; CHECK-LABEL: @v3
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 14
-define void @v3(i32 %a1) {
+define void @v3(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -41,7 +41,7 @@ entry:
; CHECK-LABEL: @v4
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 7
-define void @v4(i32 %a1) {
+define void @v4(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -54,7 +54,7 @@ entry:
; CHECK-LABEL: @v5
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 10
-define void @v5(i32 %a1) {
+define void @v5(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -66,7 +66,7 @@ entry:
; CHECK-LABEL: @v6
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 6
-define void @v6(i32 %a1) {
+define void @v6(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -78,7 +78,7 @@ entry:
; CHECK-LABEL: @v7
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 9
-define void @v7(i32 %a1) {
+define void @v7(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -91,3 +91,5 @@ entry:
declare <4 x float> @llvm.SI.sample.v1i32(<1 x i32>, <32 x i8>, <16 x i8>, i32) readnone
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.SI.sample.ll b/test/CodeGen/R600/llvm.SI.sample.ll
index d41737c65927..24e8f640d90e 100644
--- a/test/CodeGen/R600/llvm.SI.sample.ll
+++ b/test/CodeGen/R600/llvm.SI.sample.ll
@@ -17,7 +17,7 @@
;CHECK-DAG: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 14
;CHECK-DAG: IMAGE_SAMPLE {{v[0-9]+}}, 8
-define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
+define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) #0 {
%v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
%v2 = insertelement <4 x i32> undef, i32 %a1, i32 1
%v3 = insertelement <4 x i32> undef, i32 %a1, i32 2
@@ -137,7 +137,7 @@ define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
; CHECK: @v1
; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 15
-define void @v1(i32 %a1) {
+define void @v1(i32 %a1) #0 {
entry:
%0 = insertelement <1 x i32> undef, i32 %a1, i32 0
%1 = call <4 x float> @llvm.SI.sample.v1i32(<1 x i32> %0, <32 x i8> undef, <16 x i8> undef, i32 0)
@@ -155,3 +155,5 @@ declare <4 x float> @llvm.SI.sample.v1i32(<1 x i32>, <32 x i8>, <16 x i8>, i32)
declare <4 x float> @llvm.SI.sample.(<4 x i32>, <32 x i8>, <16 x i8>, i32) readnone
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.SI.sampled.ll b/test/CodeGen/R600/llvm.SI.sampled.ll
index 21ac725ae039..366456f44e6c 100644
--- a/test/CodeGen/R600/llvm.SI.sampled.ll
+++ b/test/CodeGen/R600/llvm.SI.sampled.ll
@@ -17,7 +17,7 @@
;CHECK-DAG: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 14
;CHECK-DAG: IMAGE_SAMPLE_D {{v[0-9]+}}, 8
-define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
+define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) #0 {
%v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
%v2 = insertelement <4 x i32> undef, i32 %a1, i32 1
%v3 = insertelement <4 x i32> undef, i32 %a1, i32 2
@@ -138,3 +138,5 @@ define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
declare <4 x float> @llvm.SI.sampled.(<4 x i32>, <32 x i8>, <16 x i8>, i32) readnone
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.SI.sendmsg.ll b/test/CodeGen/R600/llvm.SI.sendmsg.ll
index cfcc7c4e40ee..581d422b0952 100644
--- a/test/CodeGen/R600/llvm.SI.sendmsg.ll
+++ b/test/CodeGen/R600/llvm.SI.sendmsg.ll
@@ -1,10 +1,10 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
; CHECK-LABEL: @main
-; CHECK: S_SENDMSG 34
-; CHECK: S_SENDMSG 274
-; CHECK: S_SENDMSG 562
-; CHECK: S_SENDMSG 3
+; CHECK: S_SENDMSG Gs(emit stream 0)
+; CHECK: S_SENDMSG Gs(cut stream 1)
+; CHECK: S_SENDMSG Gs(emit-cut stream 2)
+; CHECK: S_SENDMSG Gs_done(nop)
define void @main() {
main_body:
diff --git a/test/CodeGen/R600/llvm.SI.tbuffer.store.ll b/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
index fa7c3cabadc5..740581a69666 100644
--- a/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
+++ b/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
@@ -1,8 +1,8 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK_LABEL: @test1
-;CHECK: TBUFFER_STORE_FORMAT_XYZW {{v\[[0-9]+:[0-9]+\]}}, 32, -1, 0, -1, 0, 14, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
-define void @test1(i32 %a1, i32 %vaddr) {
+;CHECK-LABEL: @test1
+;CHECK: TBUFFER_STORE_FORMAT_XYZW {{v\[[0-9]+:[0-9]+\]}}, 0x20, -1, 0, -1, 0, 14, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
+define void @test1(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 1,
@@ -10,9 +10,9 @@ define void @test1(i32 %a1, i32 %vaddr) {
ret void
}
-;CHECK_LABEL: @test2
-;CHECK: TBUFFER_STORE_FORMAT_XYZ {{v\[[0-9]+:[0-9]+\]}}, 24, -1, 0, -1, 0, 13, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
-define void @test2(i32 %a1, i32 %vaddr) {
+;CHECK-LABEL: @test2
+;CHECK: TBUFFER_STORE_FORMAT_XYZ {{v\[[0-9]+:[0-9]+\]}}, 0x18, -1, 0, -1, 0, 13, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
+define void @test2(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
i32 3, i32 %vaddr, i32 0, i32 24, i32 13, i32 4, i32 1, i32 0, i32 1,
@@ -20,9 +20,9 @@ define void @test2(i32 %a1, i32 %vaddr) {
ret void
}
-;CHECK_LABEL: @test3
-;CHECK: TBUFFER_STORE_FORMAT_XY {{v\[[0-9]+:[0-9]+\]}}, 16, -1, 0, -1, 0, 11, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
-define void @test3(i32 %a1, i32 %vaddr) {
+;CHECK-LABEL: @test3
+;CHECK: TBUFFER_STORE_FORMAT_XY {{v\[[0-9]+:[0-9]+\]}}, 0x10, -1, 0, -1, 0, 11, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
+define void @test3(i32 %a1, i32 %vaddr) #0 {
%vdata = insertelement <2 x i32> undef, i32 %a1, i32 0
call void @llvm.SI.tbuffer.store.v2i32(<16 x i8> undef, <2 x i32> %vdata,
i32 2, i32 %vaddr, i32 0, i32 16, i32 11, i32 4, i32 1, i32 0, i32 1,
@@ -30,9 +30,9 @@ define void @test3(i32 %a1, i32 %vaddr) {
ret void
}
-;CHECK_LABEL: @test4
-;CHECK: TBUFFER_STORE_FORMAT_X {{v[0-9]+}}, 8, -1, 0, -1, 0, 4, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
-define void @test4(i32 %vdata, i32 %vaddr) {
+;CHECK-LABEL: @test4
+;CHECK: TBUFFER_STORE_FORMAT_X {{v[0-9]+}}, 0x8, -1, 0, -1, 0, 4, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
+define void @test4(i32 %vdata, i32 %vaddr) #0 {
call void @llvm.SI.tbuffer.store.i32(<16 x i8> undef, i32 %vdata,
i32 1, i32 %vaddr, i32 0, i32 8, i32 4, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
@@ -42,3 +42,5 @@ define void @test4(i32 %vdata, i32 %vaddr) {
declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
declare void @llvm.SI.tbuffer.store.v2i32(<16 x i8>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+
+attributes #0 = { "ShaderType"="1" }
diff --git a/test/CodeGen/R600/llvm.amdgpu.dp4.ll b/test/CodeGen/R600/llvm.amdgpu.dp4.ll
new file mode 100644
index 000000000000..812b6a40ee59
--- /dev/null
+++ b/test/CodeGen/R600/llvm.amdgpu.dp4.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s
+
+declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) nounwind readnone
+
+define void @test_dp4(float addrspace(1)* %out, <4 x float> addrspace(1)* %a, <4 x float> addrspace(1)* %b) nounwind {
+ %src0 = load <4 x float> addrspace(1)* %a, align 16
+ %src1 = load <4 x float> addrspace(1)* %b, align 16
+ %dp4 = call float @llvm.AMDGPU.dp4(<4 x float> %src0, <4 x float> %src1) nounwind readnone
+ store float %dp4, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.amdgpu.kilp.ll b/test/CodeGen/R600/llvm.amdgpu.kilp.ll
new file mode 100644
index 000000000000..1b8b1bfd2089
--- /dev/null
+++ b/test/CodeGen/R600/llvm.amdgpu.kilp.ll
@@ -0,0 +1,20 @@
+; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @kilp_gs_const
+; SI: S_MOV_B64 exec, 0
+define void @kilp_gs_const() #0 {
+main_body:
+ %0 = icmp ule i32 0, 3
+ %1 = select i1 %0, float 1.000000e+00, float -1.000000e+00
+ call void @llvm.AMDGPU.kilp(float %1)
+ %2 = icmp ule i32 3, 0
+ %3 = select i1 %2, float 1.000000e+00, float -1.000000e+00
+ call void @llvm.AMDGPU.kilp(float %3)
+ ret void
+}
+
+declare void @llvm.AMDGPU.kilp(float)
+
+attributes #0 = { "ShaderType"="2" }
+
+!0 = metadata !{metadata !"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.amdgpu.lrp.ll b/test/CodeGen/R600/llvm.amdgpu.lrp.ll
new file mode 100644
index 000000000000..c493a016e330
--- /dev/null
+++ b/test/CodeGen/R600/llvm.amdgpu.lrp.ll
@@ -0,0 +1,12 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare float @llvm.AMDGPU.lrp(float, float, float) nounwind readnone
+
+; FUNC-LABEL: @test_lrp
+; SI: V_SUB_F32
+; SI: V_MAD_F32
+define void @test_lrp(float addrspace(1)* %out, float %src0, float %src1, float %src2) nounwind {
+ %mad = call float @llvm.AMDGPU.lrp(float %src0, float %src1, float %src2) nounwind readnone
+ store float %mad, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.cos.ll b/test/CodeGen/R600/llvm.cos.ll
index aaf2305dd0ba..9e7a4deda69b 100644
--- a/test/CodeGen/R600/llvm.cos.ll
+++ b/test/CodeGen/R600/llvm.cos.ll
@@ -1,19 +1,40 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s -check-prefix=EG -check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s -check-prefix=SI -check-prefix=FUNC
-;CHECK: MULADD_IEEE *
-;CHECK: FRACT *
-;CHECK: ADD *
-;CHECK: COS * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;FUNC-LABEL: test
+;EG: MULADD_IEEE *
+;EG: FRACT *
+;EG: ADD *
+;EG: COS * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG-NOT: COS
+;SI: V_COS_F32
+;SI-NOT: V_COS_F32
-define void @test(<4 x float> inreg %reg0) #0 {
- %r0 = extractelement <4 x float> %reg0, i32 0
- %r1 = call float @llvm.cos.f32(float %r0)
- %vec = insertelement <4 x float> undef, float %r1, i32 0
- call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0)
+define void @test(float addrspace(1)* %out, float %x) #1 {
+ %cos = call float @llvm.cos.f32(float %x)
+ store float %cos, float addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: testv
+;EG: COS * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG: COS * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG: COS * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG: COS * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG-NOT: COS
+;SI: V_COS_F32
+;SI: V_COS_F32
+;SI: V_COS_F32
+;SI: V_COS_F32
+;SI-NOT: V_COS_F32
+
+define void @testv(<4 x float> addrspace(1)* %out, <4 x float> inreg %vx) #1 {
+ %cos = call <4 x float> @llvm.cos.v4f32(<4 x float> %vx)
+ store <4 x float> %cos, <4 x float> addrspace(1)* %out
ret void
}
declare float @llvm.cos.f32(float) readnone
-declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+declare <4 x float> @llvm.cos.v4f32(<4 x float>) readnone
attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.exp2.ll b/test/CodeGen/R600/llvm.exp2.ll
new file mode 100644
index 000000000000..119d5ef49a5e
--- /dev/null
+++ b/test/CodeGen/R600/llvm.exp2.ll
@@ -0,0 +1,79 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
+
+;FUNC-LABEL: @test
+;EG-CHECK: EXP_IEEE
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_EXP_F32
+
+define void @test(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = call float @llvm.exp2.f32(float %in)
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: @testv2
+;EG-CHECK: EXP_IEEE
+;EG-CHECK: EXP_IEEE
+; FIXME: We should be able to merge these packets together on Cayman so we
+; have a maximum of 4 instructions.
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_EXP_F32
+;SI-CHECK: V_EXP_F32
+
+define void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+entry:
+ %0 = call <2 x float> @llvm.exp2.v2f32(<2 x float> %in)
+ store <2 x float> %0, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: @testv4
+;EG-CHECK: EXP_IEEE
+;EG-CHECK: EXP_IEEE
+;EG-CHECK: EXP_IEEE
+;EG-CHECK: EXP_IEEE
+; FIXME: We should be able to merge these packets together on Cayman so we
+; have a maximum of 4 instructions.
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_EXP_F32
+;SI-CHECK: V_EXP_F32
+;SI-CHECK: V_EXP_F32
+;SI-CHECK: V_EXP_F32
+define void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+entry:
+ %0 = call <4 x float> @llvm.exp2.v4f32(<4 x float> %in)
+ store <4 x float> %0, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.exp2.f32(float) readnone
+declare <2 x float> @llvm.exp2.v2f32(<2 x float>) readnone
+declare <4 x float> @llvm.exp2.v4f32(<4 x float>) readnone
diff --git a/test/CodeGen/R600/llvm.log2.ll b/test/CodeGen/R600/llvm.log2.ll
new file mode 100644
index 000000000000..4cba2d44a5c3
--- /dev/null
+++ b/test/CodeGen/R600/llvm.log2.ll
@@ -0,0 +1,79 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
+
+;FUNC-LABEL: @test
+;EG-CHECK: LOG_IEEE
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_LOG_F32
+
+define void @test(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = call float @llvm.log2.f32(float %in)
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: @testv2
+;EG-CHECK: LOG_IEEE
+;EG-CHECK: LOG_IEEE
+; FIXME: We should be able to merge these packets together on Cayman so we
+; have a maximum of 4 instructions.
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_LOG_F32
+;SI-CHECK: V_LOG_F32
+
+define void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+entry:
+ %0 = call <2 x float> @llvm.log2.v2f32(<2 x float> %in)
+ store <2 x float> %0, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: @testv4
+;EG-CHECK: LOG_IEEE
+;EG-CHECK: LOG_IEEE
+;EG-CHECK: LOG_IEEE
+;EG-CHECK: LOG_IEEE
+; FIXME: We should be able to merge these packets together on Cayman so we
+; have a maximum of 4 instructions.
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI-CHECK: V_LOG_F32
+;SI-CHECK: V_LOG_F32
+;SI-CHECK: V_LOG_F32
+;SI-CHECK: V_LOG_F32
+define void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+entry:
+ %0 = call <4 x float> @llvm.log2.v4f32(<4 x float> %in)
+ store <4 x float> %0, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.log2.f32(float) readnone
+declare <2 x float> @llvm.log2.v2f32(<2 x float>) readnone
+declare <4 x float> @llvm.log2.v4f32(<4 x float>) readnone
diff --git a/test/CodeGen/R600/llvm.pow.ll b/test/CodeGen/R600/llvm.pow.ll
index b587d2b2aea1..c4ae652619c2 100644
--- a/test/CodeGen/R600/llvm.pow.ll
+++ b/test/CodeGen/R600/llvm.pow.ll
@@ -1,10 +1,11 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL NON-IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}}
-;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;CHECK-LABEL: test1:
+;CHECK: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
-define void @test(<4 x float> inreg %reg0) #0 {
+define void @test1(<4 x float> inreg %reg0) #0 {
%r0 = extractelement <4 x float> %reg0, i32 0
%r1 = extractelement <4 x float> %reg0, i32 1
%r2 = call float @llvm.pow.f32( float %r0, float %r1)
@@ -13,7 +14,27 @@ define void @test(<4 x float> inreg %reg0) #0 {
ret void
}
+;CHECK-LABEL: test2:
+;CHECK: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
+;CHECK-NEXT: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PS}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], PV\.[XYZW]}},
+define void @test2(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
+ %vec = call <4 x float> @llvm.pow.v4f32( <4 x float> %reg0, <4 x float> %reg1)
+ call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0)
+ ret void
+}
+
declare float @llvm.pow.f32(float ,float ) readonly
+declare <4 x float> @llvm.pow.v4f32(<4 x float> ,<4 x float> ) readonly
declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.rint.f64.ll b/test/CodeGen/R600/llvm.rint.f64.ll
new file mode 100644
index 000000000000..3e2884b7ce02
--- /dev/null
+++ b/test/CodeGen/R600/llvm.rint.f64.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @rint_f64
+; CI: V_RNDNE_F64_e32
+
+; SI-DAG: V_ADD_F64
+; SI-DAG: V_ADD_F64
+; SI-DAG V_CMP_GT_F64_e64
+; SI: V_CNDMASK_B32
+; SI: V_CNDMASK_B32
+; SI: S_ENDPGM
+define void @rint_f64(double addrspace(1)* %out, double %in) {
+entry:
+ %0 = call double @llvm.rint.f64(double %in)
+ store double %0, double addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @rint_v2f64
+; CI: V_RNDNE_F64_e32
+; CI: V_RNDNE_F64_e32
+define void @rint_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+entry:
+ %0 = call <2 x double> @llvm.rint.v2f64(<2 x double> %in)
+ store <2 x double> %0, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @rint_v4f64
+; CI: V_RNDNE_F64_e32
+; CI: V_RNDNE_F64_e32
+; CI: V_RNDNE_F64_e32
+; CI: V_RNDNE_F64_e32
+define void @rint_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+entry:
+ %0 = call <4 x double> @llvm.rint.v4f64(<4 x double> %in)
+ store <4 x double> %0, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+
+declare double @llvm.rint.f64(double) #0
+declare <2 x double> @llvm.rint.v2f64(<2 x double>) #0
+declare <4 x double> @llvm.rint.v4f64(<4 x double>) #0
diff --git a/test/CodeGen/R600/llvm.rint.ll b/test/CodeGen/R600/llvm.rint.ll
index c174b335f0e8..209bb4358fd5 100644
--- a/test/CodeGen/R600/llvm.rint.ll
+++ b/test/CodeGen/R600/llvm.rint.ll
@@ -1,54 +1,61 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-
-; R600-CHECK: @f32
-; R600-CHECK: RNDNE
-; SI-CHECK: @f32
-; SI-CHECK: V_RNDNE_F32_e32
-define void @f32(float addrspace(1)* %out, float %in) {
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @rint_f32
+; R600: RNDNE
+
+; SI: V_RNDNE_F32_e32
+define void @rint_f32(float addrspace(1)* %out, float %in) {
entry:
- %0 = call float @llvm.rint.f32(float %in)
+ %0 = call float @llvm.rint.f32(float %in) #0
store float %0, float addrspace(1)* %out
ret void
}
-; R600-CHECK: @v2f32
-; R600-CHECK: RNDNE
-; R600-CHECK: RNDNE
-; SI-CHECK: @v2f32
-; SI-CHECK: V_RNDNE_F32_e32
-; SI-CHECK: V_RNDNE_F32_e32
-define void @v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+; FUNC-LABEL: @rint_v2f32
+; R600: RNDNE
+; R600: RNDNE
+
+; SI: V_RNDNE_F32_e32
+; SI: V_RNDNE_F32_e32
+define void @rint_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
- %0 = call <2 x float> @llvm.rint.v2f32(<2 x float> %in)
+ %0 = call <2 x float> @llvm.rint.v2f32(<2 x float> %in) #0
store <2 x float> %0, <2 x float> addrspace(1)* %out
ret void
}
-; R600-CHECK: @v4f32
-; R600-CHECK: RNDNE
-; R600-CHECK: RNDNE
-; R600-CHECK: RNDNE
-; R600-CHECK: RNDNE
-; SI-CHECK: @v4f32
-; SI-CHECK: V_RNDNE_F32_e32
-; SI-CHECK: V_RNDNE_F32_e32
-; SI-CHECK: V_RNDNE_F32_e32
-; SI-CHECK: V_RNDNE_F32_e32
-define void @v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+; FUNC-LABEL: @rint_v4f32
+; R600: RNDNE
+; R600: RNDNE
+; R600: RNDNE
+; R600: RNDNE
+
+; SI: V_RNDNE_F32_e32
+; SI: V_RNDNE_F32_e32
+; SI: V_RNDNE_F32_e32
+; SI: V_RNDNE_F32_e32
+define void @rint_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
- %0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %in)
+ %0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %in) #0
store <4 x float> %0, <4 x float> addrspace(1)* %out
ret void
}
-; Function Attrs: nounwind readonly
-declare float @llvm.rint.f32(float) #0
+; FUNC-LABEL: @legacy_amdil_round_nearest_f32
+; R600: RNDNE
-; Function Attrs: nounwind readonly
-declare <2 x float> @llvm.rint.v2f32(<2 x float>) #0
+; SI: V_RNDNE_F32_e32
+define void @legacy_amdil_round_nearest_f32(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = call float @llvm.AMDIL.round.nearest.f32(float %in) #0
+ store float %0, float addrspace(1)* %out
+ ret void
+}
-; Function Attrs: nounwind readonly
+declare float @llvm.AMDIL.round.nearest.f32(float) #0
+declare float @llvm.rint.f32(float) #0
+declare <2 x float> @llvm.rint.v2f32(<2 x float>) #0
declare <4 x float> @llvm.rint.v4f32(<4 x float>) #0
-attributes #0 = { nounwind readonly }
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.sin.ll b/test/CodeGen/R600/llvm.sin.ll
index 9eb998315fef..53006bad5c4b 100644
--- a/test/CodeGen/R600/llvm.sin.ll
+++ b/test/CodeGen/R600/llvm.sin.ll
@@ -1,19 +1,59 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-;CHECK: MULADD_IEEE *
-;CHECK: FRACT *
-;CHECK: ADD *
-;CHECK: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-
-define void @test(<4 x float> inreg %reg0) #0 {
- %r0 = extractelement <4 x float> %reg0, i32 0
- %r1 = call float @llvm.sin.f32( float %r0)
- %vec = insertelement <4 x float> undef, float %r1, i32 0
- call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0)
+;RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+;RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=SI-SAFE -check-prefix=FUNC %s
+;RUN: llc -march=r600 -mcpu=SI -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI -check-prefix=SI-UNSAFE -check-prefix=FUNC %s
+
+;FUNC-LABEL: test
+;EG: MULADD_IEEE *
+;EG: FRACT *
+;EG: ADD *
+;EG: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG-NOT: SIN
+;SI: V_MUL_F32
+;SI: V_FRACT_F32
+;SI: V_SIN_F32
+;SI-NOT: V_SIN_F32
+
+define void @test(float addrspace(1)* %out, float %x) #1 {
+ %sin = call float @llvm.sin.f32(float %x)
+ store float %sin, float addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: testf
+;SI-UNSAFE: 4.774
+;SI-UNSAFE: V_MUL_F32
+;SI-SAFE: V_MUL_F32
+;SI-SAFE: V_MUL_F32
+;SI: V_FRACT_F32
+;SI: V_SIN_F32
+;SI-NOT: V_SIN_F32
+
+define void @testf(float addrspace(1)* %out, float %x) #1 {
+ %y = fmul float 3.0, %x
+ %sin = call float @llvm.sin.f32(float %y)
+ store float %sin, float addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: testv
+;EG: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG: SIN * T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+;EG-NOT: SIN
+;SI: V_SIN_F32
+;SI: V_SIN_F32
+;SI: V_SIN_F32
+;SI: V_SIN_F32
+;SI-NOT: V_SIN_F32
+
+define void @testv(<4 x float> addrspace(1)* %out, <4 x float> %vx) #1 {
+ %sin = call <4 x float> @llvm.sin.v4f32( <4 x float> %vx)
+ store <4 x float> %sin, <4 x float> addrspace(1)* %out
ret void
}
declare float @llvm.sin.f32(float) readnone
-declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+declare <4 x float> @llvm.sin.v4f32(<4 x float>) readnone
attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/llvm.sqrt.ll b/test/CodeGen/R600/llvm.sqrt.ll
index 0d0d18618990..4eee37ffbe21 100644
--- a/test/CodeGen/R600/llvm.sqrt.ll
+++ b/test/CodeGen/R600/llvm.sqrt.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=r600 --mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 --mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 --mcpu=SI -verify-machineinstrs| FileCheck %s --check-prefix=SI-CHECK
; R600-CHECK-LABEL: @sqrt_f32
; R600-CHECK: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].Z
diff --git a/test/CodeGen/R600/llvm.trunc.ll b/test/CodeGen/R600/llvm.trunc.ll
new file mode 100644
index 000000000000..fa6fb9906dde
--- /dev/null
+++ b/test/CodeGen/R600/llvm.trunc.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK-LABEL: @trunc_f32
+; CHECK: TRUNC
+
+define void @trunc_f32(float addrspace(1)* %out, float %in) {
+entry:
+ %0 = call float @llvm.trunc.f32(float %in)
+ store float %0, float addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.trunc.f32(float)
diff --git a/test/CodeGen/R600/load-i1.ll b/test/CodeGen/R600/load-i1.ll
new file mode 100644
index 000000000000..9ba81b85f59b
--- /dev/null
+++ b/test/CodeGen/R600/load-i1.ll
@@ -0,0 +1,107 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+
+
+; SI-LABEL: @global_copy_i1_to_i1
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_AND_B32_e32 v{{[0-9]+}}, 1
+; SI: BUFFER_STORE_BYTE
+; SI: S_ENDPGM
+define void @global_copy_i1_to_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ store i1 %load, i1 addrspace(1)* %out, align 1
+ ret void
+}
+
+; SI-LABEL: @global_sextload_i1_to_i32
+; XSI: BUFFER_LOAD_BYTE
+; SI: BUFFER_STORE_DWORD
+; SI: S_ENDPGM
+define void @global_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = sext i1 %load to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @global_zextload_i1_to_i32
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_STORE_DWORD
+; SI: S_ENDPGM
+define void @global_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = zext i1 %load to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @global_sextload_i1_to_i64
+; XSI: BUFFER_LOAD_BYTE
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @global_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = sext i1 %load to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @global_zextload_i1_to_i64
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @global_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = zext i1 %load to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @i1_arg
+; SI: BUFFER_LOAD_UBYTE
+; SI: V_AND_B32_e32
+; SI: BUFFER_STORE_BYTE
+; SI: S_ENDPGM
+define void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
+ store i1 %x, i1 addrspace(1)* %out, align 1
+ ret void
+}
+
+; SI-LABEL: @i1_arg_zext_i32
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_STORE_DWORD
+; SI: S_ENDPGM
+define void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
+ %ext = zext i1 %x to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @i1_arg_zext_i64
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
+ %ext = zext i1 %x to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @i1_arg_sext_i32
+; XSI: BUFFER_LOAD_BYTE
+; SI: BUFFER_STORE_DWORD
+; SI: S_ENDPGM
+define void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
+ %ext = sext i1 %x to i32
+ store i32 %ext, i32addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @i1_arg_sext_i64
+; XSI: BUFFER_LOAD_BYTE
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @i1_arg_sext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
+ %ext = sext i1 %x to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/load.ll b/test/CodeGen/R600/load.ll
index 0153524d136c..8905fbd3aeb6 100644
--- a/test/CodeGen/R600/load.ll
+++ b/test/CodeGen/R600/load.ll
@@ -1,16 +1,15 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
;===------------------------------------------------------------------------===;
; GLOBAL ADDRESS SPACE
;===------------------------------------------------------------------------===;
; Load an i8 value from the global address space.
-; R600-CHECK-LABEL: @load_i8
+; FUNC-LABEL: @load_i8
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_i8
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
%1 = load i8 addrspace(1)* %in
@@ -19,13 +18,12 @@ define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
ret void
}
-; R600-CHECK-LABEL: @load_i8_sext
+; FUNC-LABEL: @load_i8_sext
; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 24
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
; R600-CHECK: 24
-; SI-CHECK-LABEL: @load_i8_sext
; SI-CHECK: BUFFER_LOAD_SBYTE
define void @load_i8_sext(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
@@ -35,10 +33,9 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i8
+; FUNC-LABEL: @load_v2i8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: @load_v2i8
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
define void @load_v2i8(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
@@ -49,7 +46,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i8_sext
+; FUNC-LABEL: @load_v2i8_sext
; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
@@ -60,7 +57,6 @@ entry:
; R600-CHECK-DAG: 24
; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
; R600-CHECK-DAG: 24
-; SI-CHECK-LABEL: @load_v2i8_sext
; SI-CHECK: BUFFER_LOAD_SBYTE
; SI-CHECK: BUFFER_LOAD_SBYTE
define void @load_v2i8_sext(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
@@ -71,12 +67,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i8
+; FUNC-LABEL: @load_v4i8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
; R600-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: @load_v4i8
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
; SI-CHECK: BUFFER_LOAD_UBYTE
@@ -89,7 +84,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i8_sext
+; FUNC-LABEL: @load_v4i8_sext
; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: VTX_READ_8 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
@@ -110,7 +105,6 @@ entry:
; R600-CHECK-DAG: 24
; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
; R600-CHECK-DAG: 24
-; SI-CHECK-LABEL: @load_v4i8_sext
; SI-CHECK: BUFFER_LOAD_SBYTE
; SI-CHECK: BUFFER_LOAD_SBYTE
; SI-CHECK: BUFFER_LOAD_SBYTE
@@ -124,9 +118,8 @@ entry:
}
; Load an i16 value from the global address space.
-; R600-CHECK-LABEL: @load_i16
+; FUNC-LABEL: @load_i16
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_i16
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
entry:
@@ -136,13 +129,12 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i16_sext
+; FUNC-LABEL: @load_i16_sext
; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 16
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
; R600-CHECK: 16
-; SI-CHECK-LABEL: @load_i16_sext
; SI-CHECK: BUFFER_LOAD_SSHORT
define void @load_i16_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
entry:
@@ -152,10 +144,9 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i16
+; FUNC-LABEL: @load_v2i16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: @load_v2i16
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_v2i16(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
@@ -166,7 +157,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i16_sext
+; FUNC-LABEL: @load_v2i16_sext
; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
@@ -177,7 +168,6 @@ entry:
; R600-CHECK-DAG: 16
; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
; R600-CHECK-DAG: 16
-; SI-CHECK-LABEL: @load_v2i16_sext
; SI-CHECK: BUFFER_LOAD_SSHORT
; SI-CHECK: BUFFER_LOAD_SSHORT
define void @load_v2i16_sext(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
@@ -188,12 +178,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i16
+; FUNC-LABEL: @load_v4i16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
; R600-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: @load_v4i16
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
; SI-CHECK: BUFFER_LOAD_USHORT
@@ -206,7 +195,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i16_sext
+; FUNC-LABEL: @load_v4i16_sext
; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
; R600-CHECK-DAG: VTX_READ_16 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
@@ -227,7 +216,6 @@ entry:
; R600-CHECK-DAG: 16
; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
; R600-CHECK-DAG: 16
-; SI-CHECK-LABEL: @load_v4i16_sext
; SI-CHECK: BUFFER_LOAD_SSHORT
; SI-CHECK: BUFFER_LOAD_SSHORT
; SI-CHECK: BUFFER_LOAD_SSHORT
@@ -241,10 +229,9 @@ entry:
}
; load an i32 value from the global address space.
-; R600-CHECK-LABEL: @load_i32
+; FUNC-LABEL: @load_i32
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK-LABEL: @load_i32
; SI-CHECK: BUFFER_LOAD_DWORD v{{[0-9]+}}
define void @load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -254,10 +241,9 @@ entry:
}
; load a f32 value from the global address space.
-; R600-CHECK-LABEL: @load_f32
+; FUNC-LABEL: @load_f32
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK-LABEL: @load_f32
; SI-CHECK: BUFFER_LOAD_DWORD v{{[0-9]+}}
define void @load_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
entry:
@@ -267,10 +253,9 @@ entry:
}
; load a v2f32 value from the global address space
-; R600-CHECK-LABEL: @load_v2f32
+; FUNC-LABEL: @load_v2f32
+; R600-CHECK: MEM_RAT
; R600-CHECK: VTX_READ_64
-
-; SI-CHECK-LABEL: @load_v2f32
; SI-CHECK: BUFFER_LOAD_DWORDX2
define void @load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
entry:
@@ -279,11 +264,8 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i64
-; R600-CHECK: MEM_RAT
-; R600-CHECK: MEM_RAT
-
-; SI-CHECK-LABEL: @load_i64
+; FUNC-LABEL: @load_i64
+; R600-CHECK: VTX_READ_64
; SI-CHECK: BUFFER_LOAD_DWORDX2
define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
entry:
@@ -292,13 +274,12 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i64_sext
+; FUNC-LABEL: @load_i64_sext
; R600-CHECK: MEM_RAT
; R600-CHECK: MEM_RAT
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.x
; R600-CHECK: 31
-; SI-CHECK-LABEL: @load_i64_sext
-; SI-CHECK: BUFFER_LOAD_DWORDX2 [[VAL:v\[[0-9]:[0-9]\]]]
+; SI-CHECK: BUFFER_LOAD_DWORD
define void @load_i64_sext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -308,7 +289,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i64_zext
+; FUNC-LABEL: @load_i64_zext
; R600-CHECK: MEM_RAT
; R600-CHECK: MEM_RAT
define void @load_i64_zext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -319,18 +300,65 @@ entry:
ret void
}
+; FUNC-LABEL: @load_v8i32
+; R600-CHECK: VTX_READ_128
+; R600-CHECK: VTX_READ_128
+; XXX: We should be using DWORDX4 instructions on SI.
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+define void @load_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) {
+entry:
+ %0 = load <8 x i32> addrspace(1)* %in
+ store <8 x i32> %0, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @load_v16i32
+; R600-CHECK: VTX_READ_128
+; R600-CHECK: VTX_READ_128
+; R600-CHECK: VTX_READ_128
+; R600-CHECK: VTX_READ_128
+; XXX: We should be using DWORDX4 instructions on SI.
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+; SI-CHECK: BUFFER_LOAD_DWORD
+define void @load_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) {
+entry:
+ %0 = load <16 x i32> addrspace(1)* %in
+ store <16 x i32> %0, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
;===------------------------------------------------------------------------===;
; CONSTANT ADDRESS SPACE
;===------------------------------------------------------------------------===;
; Load a sign-extended i8 value
-; R600-CHECK-LABEL: @load_const_i8_sext
+; FUNC-LABEL: @load_const_i8_sext
; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 24
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
; R600-CHECK: 24
-; SI-CHECK-LABEL: @load_const_i8_sext
; SI-CHECK: BUFFER_LOAD_SBYTE v{{[0-9]+}},
define void @load_const_i8_sext(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
@@ -341,9 +369,8 @@ entry:
}
; Load an aligned i8 value
-; R600-CHECK-LABEL: @load_const_i8_aligned
+; FUNC-LABEL: @load_const_i8_aligned
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_const_i8_aligned
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
@@ -354,9 +381,8 @@ entry:
}
; Load an un-aligned i8 value
-; R600-CHECK-LABEL: @load_const_i8_unaligned
+; FUNC-LABEL: @load_const_i8_unaligned
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_const_i8_unaligned
; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
@@ -368,13 +394,12 @@ entry:
}
; Load a sign-extended i16 value
-; R600-CHECK-LABEL: @load_const_i16_sext
+; FUNC-LABEL: @load_const_i16_sext
; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
; R600-CHECK: 16
; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
; R600-CHECK: 16
-; SI-CHECK-LABEL: @load_const_i16_sext
; SI-CHECK: BUFFER_LOAD_SSHORT
define void @load_const_i16_sext(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
@@ -385,9 +410,8 @@ entry:
}
; Load an aligned i16 value
-; R600-CHECK-LABEL: @load_const_i16_aligned
+; FUNC-LABEL: @load_const_i16_aligned
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_const_i16_aligned
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_const_i16_aligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
@@ -398,9 +422,8 @@ entry:
}
; Load an un-aligned i16 value
-; R600-CHECK-LABEL: @load_const_i16_unaligned
+; FUNC-LABEL: @load_const_i16_unaligned
; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: @load_const_i16_unaligned
; SI-CHECK: BUFFER_LOAD_USHORT
define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
@@ -412,10 +435,9 @@ entry:
}
; Load an i32 value from the constant address space.
-; R600-CHECK-LABEL: @load_const_addrspace_i32
+; FUNC-LABEL: @load_const_addrspace_i32
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK-LABEL: @load_const_addrspace_i32
; SI-CHECK: S_LOAD_DWORD s{{[0-9]+}}
define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
@@ -425,10 +447,9 @@ entry:
}
; Load a f32 value from the constant address space.
-; R600-CHECK-LABEL: @load_const_addrspace_f32
+; FUNC-LABEL: @load_const_addrspace_f32
; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK-LABEL: @load_const_addrspace_f32
; SI-CHECK: S_LOAD_DWORD s{{[0-9]+}}
define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
%1 = load float addrspace(2)* %in
@@ -441,9 +462,8 @@ define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(
;===------------------------------------------------------------------------===;
; Load an i8 value from the local address space.
-; R600-CHECK-LABEL: @load_i8_local
+; FUNC-LABEL: @load_i8_local
; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-LABEL: @load_i8_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U8
@@ -454,10 +474,9 @@ define void @load_i8_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
ret void
}
-; R600-CHECK-LABEL: @load_i8_sext_local
+; FUNC-LABEL: @load_i8_sext_local
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: ASHR
-; SI-CHECK-LABEL: @load_i8_sext_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I8
@@ -469,10 +488,9 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i8_local
+; FUNC-LABEL: @load_v2i8_local
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-LABEL: @load_v2i8_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U8
@@ -485,12 +503,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i8_sext_local
+; FUNC-LABEL: @load_v2i8_sext_local
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
-; SI-CHECK-LABEL: @load_v2i8_sext_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I8
@@ -503,12 +520,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i8_local
+; FUNC-LABEL: @load_v4i8_local
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-LABEL: @load_v4i8_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U8
@@ -523,7 +539,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i8_sext_local
+; FUNC-LABEL: @load_v4i8_sext_local
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
; R600-CHECK-DAG: LDS_UBYTE_READ_RET
@@ -532,7 +548,6 @@ entry:
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
-; SI-CHECK-LABEL: @load_v4i8_sext_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I8
@@ -548,9 +563,8 @@ entry:
}
; Load an i16 value from the local address space.
-; R600-CHECK-LABEL: @load_i16_local
+; FUNC-LABEL: @load_i16_local
; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-LABEL: @load_i16_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U16
@@ -562,10 +576,9 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_i16_sext_local
+; FUNC-LABEL: @load_i16_sext_local
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: ASHR
-; SI-CHECK-LABEL: @load_i16_sext_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I16
@@ -577,10 +590,9 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i16_local
+; FUNC-LABEL: @load_v2i16_local
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-LABEL: @load_v2i16_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U16
@@ -593,12 +605,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v2i16_sext_local
+; FUNC-LABEL: @load_v2i16_sext_local
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
-; SI-CHECK-LABEL: @load_v2i16_sext_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I16
@@ -611,12 +622,11 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i16_local
+; FUNC-LABEL: @load_v4i16_local
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-LABEL: @load_v4i16_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_U16
@@ -631,7 +641,7 @@ entry:
ret void
}
-; R600-CHECK-LABEL: @load_v4i16_sext_local
+; FUNC-LABEL: @load_v4i16_sext_local
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
; R600-CHECK-DAG: LDS_USHORT_READ_RET
@@ -640,7 +650,6 @@ entry:
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
; R600-CHECK-DAG: ASHR
-; SI-CHECK-LABEL: @load_v4i16_sext_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_I16
@@ -656,9 +665,8 @@ entry:
}
; load an i32 value from the local address space.
-; R600-CHECK-LABEL: @load_i32_local
+; FUNC-LABEL: @load_i32_local
; R600-CHECK: LDS_READ_RET
-; SI-CHECK-LABEL: @load_i32_local
; SI-CHECK-NOT: S_WQM_B64
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_B32
@@ -670,9 +678,8 @@ entry:
}
; load a f32 value from the local address space.
-; R600-CHECK-LABEL: @load_f32_local
+; FUNC-LABEL: @load_f32_local
; R600-CHECK: LDS_READ_RET
-; SI-CHECK-LABEL: @load_f32_local
; SI-CHECK: S_MOV_B32 m0
; SI-CHECK: DS_READ_B32
define void @load_f32_local(float addrspace(1)* %out, float addrspace(3)* %in) {
@@ -683,13 +690,11 @@ entry:
}
; load a v2f32 value from the local address space
-; R600-CHECK-LABEL: @load_v2f32_local
+; FUNC-LABEL: @load_v2f32_local
; R600-CHECK: LDS_READ_RET
; R600-CHECK: LDS_READ_RET
-; SI-CHECK-LABEL: @load_v2f32_local
; SI-CHECK: S_MOV_B32 m0
-; SI-CHECK: DS_READ_B32
-; SI-CHECK: DS_READ_B32
+; SI-CHECK: DS_READ_B64
define void @load_v2f32_local(<2 x float> addrspace(1)* %out, <2 x float> addrspace(3)* %in) {
entry:
%0 = load <2 x float> addrspace(3)* %in
diff --git a/test/CodeGen/R600/load64.ll b/test/CodeGen/R600/load64.ll
index e351e4135a7d..a117557e98e0 100644
--- a/test/CodeGen/R600/load64.ll
+++ b/test/CodeGen/R600/load64.ll
@@ -1,18 +1,28 @@
; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
; load a f64 value from the global address space.
-; CHECK: @load_f64
+; CHECK-LABEL: @load_f64:
; CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
+; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
define void @load_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
-entry:
- %0 = load double addrspace(1)* %in
- store double %0, double addrspace(1)* %out
+ %1 = load double addrspace(1)* %in
+ store double %1, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: @load_i64:
+; CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
+; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
+define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %tmp = load i64 addrspace(1)* %in
+ store i64 %tmp, i64 addrspace(1)* %out, align 8
ret void
}
; Load a f64 value from the constant address space.
-; CHECK: @load_const_addrspace_f64
+; CHECK-LABEL: @load_const_addrspace_f64:
; CHECK: S_LOAD_DWORDX2 s[{{[0-9]+:[0-9]+}}]
+; CHECK: BUFFER_STORE_DWORDX2 v[{{[0-9]+:[0-9]+}}]
define void @load_const_addrspace_f64(double addrspace(1)* %out, double addrspace(2)* %in) {
%1 = load double addrspace(2)* %in
store double %1, double addrspace(1)* %out
diff --git a/test/CodeGen/R600/local-64.ll b/test/CodeGen/R600/local-64.ll
new file mode 100644
index 000000000000..c52b41bb1b5a
--- /dev/null
+++ b/test/CodeGen/R600/local-64.ll
@@ -0,0 +1,158 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @local_i32_load
+; SI: DS_READ_B32 [[REG:v[0-9]+]], v{{[0-9]+}}, 0x1c, [M0]
+; SI: BUFFER_STORE_DWORD [[REG]],
+define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %in, i32 7
+ %val = load i32 addrspace(3)* %gep, align 4
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @local_i32_load_0_offset
+; SI: DS_READ_B32 [[REG:v[0-9]+]], v{{[0-9]+}}, 0x0, [M0]
+; SI: BUFFER_STORE_DWORD [[REG]],
+define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
+ %val = load i32 addrspace(3)* %in, align 4
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @local_i8_load_i16_max_offset
+; SI-NOT: ADD
+; SI: DS_READ_U8 [[REG:v[0-9]+]], {{v[0-9]+}}, 0xffff, [M0]
+; SI: BUFFER_STORE_BYTE [[REG]],
+define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
+ %gep = getelementptr i8 addrspace(3)* %in, i32 65535
+ %val = load i8 addrspace(3)* %gep, align 4
+ store i8 %val, i8 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @local_i8_load_over_i16_max_offset
+; SI: S_ADD_I32 [[ADDR:s[0-9]+]], s{{[0-9]+}}, 0x10000
+; SI: V_MOV_B32_e32 [[VREGADDR:v[0-9]+]], [[ADDR]]
+; SI: DS_READ_U8 [[REG:v[0-9]+]], [[VREGADDR]], 0x0, [M0]
+; SI: BUFFER_STORE_BYTE [[REG]],
+define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
+ %gep = getelementptr i8 addrspace(3)* %in, i32 65536
+ %val = load i8 addrspace(3)* %gep, align 4
+ store i8 %val, i8 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @local_i64_load
+; SI-NOT: ADD
+; SI: DS_READ_B64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}}, 0x38, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[REG]],
+define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %in, i32 7
+ %val = load i64 addrspace(3)* %gep, align 8
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_i64_load_0_offset
+; SI: DS_READ_B64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0x0, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[REG]],
+define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
+ %val = load i64 addrspace(3)* %in, align 8
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_f64_load
+; SI-NOT: ADD
+; SI: DS_READ_B64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}}, 0x38, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[REG]],
+define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
+ %gep = getelementptr double addrspace(3)* %in, i32 7
+ %val = load double addrspace(3)* %gep, align 8
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_f64_load_0_offset
+; SI: DS_READ_B64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0x0, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[REG]],
+define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
+ %val = load double addrspace(3)* %in, align 8
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_i64_store
+; SI-NOT: ADD
+; SI: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x38 [M0]
+define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %out, i32 7
+ store i64 5678, i64 addrspace(3)* %gep, align 8
+ ret void
+}
+
+; SI-LABEL: @local_i64_store_0_offset
+; SI-NOT: ADD
+; SI: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x0 [M0]
+define void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
+ store i64 1234, i64 addrspace(3)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_f64_store
+; SI-NOT: ADD
+; SI: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x38 [M0]
+define void @local_f64_store(double addrspace(3)* %out) nounwind {
+ %gep = getelementptr double addrspace(3)* %out, i32 7
+ store double 16.0, double addrspace(3)* %gep, align 8
+ ret void
+}
+
+; SI-LABEL: @local_f64_store_0_offset
+; SI: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x0 [M0]
+define void @local_f64_store_0_offset(double addrspace(3)* %out) nounwind {
+ store double 20.0, double addrspace(3)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @local_v2i64_store
+; SI-NOT: ADD
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x78 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x70 [M0]
+define void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
+ %gep = getelementptr <2 x i64> addrspace(3)* %out, i32 7
+ store <2 x i64> <i64 5678, i64 5678>, <2 x i64> addrspace(3)* %gep, align 16
+ ret void
+}
+
+; SI-LABEL: @local_v2i64_store_0_offset
+; SI-NOT: ADD
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x8 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x0 [M0]
+define void @local_v2i64_store_0_offset(<2 x i64> addrspace(3)* %out) nounwind {
+ store <2 x i64> <i64 1234, i64 1234>, <2 x i64> addrspace(3)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: @local_v4i64_store
+; SI-NOT: ADD
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0xf8 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0xf0 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0xe8 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0xe0 [M0]
+define void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
+ %gep = getelementptr <4 x i64> addrspace(3)* %out, i32 7
+ store <4 x i64> <i64 5678, i64 5678, i64 5678, i64 5678>, <4 x i64> addrspace(3)* %gep, align 16
+ ret void
+}
+
+; SI-LABEL: @local_v4i64_store_0_offset
+; SI-NOT: ADD
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x18 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x10 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x8 [M0]
+; SI-DAG: DS_WRITE_B64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, 0x0 [M0]
+define void @local_v4i64_store_0_offset(<4 x i64> addrspace(3)* %out) nounwind {
+ store <4 x i64> <i64 1234, i64 1234, i64 1234, i64 1234>, <4 x i64> addrspace(3)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/local-atomics.ll b/test/CodeGen/R600/local-atomics.ll
new file mode 100644
index 000000000000..5a44951055ea
--- /dev/null
+++ b/test/CodeGen/R600/local-atomics.ll
@@ -0,0 +1,254 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; FUNC-LABEL: @lds_atomic_xchg_ret_i32:
+; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
+; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
+; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; SI: DS_WRXCHG_RTN_B32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]], 0x0, [M0]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xchg_ret_i32_offset:
+; SI: DS_WRXCHG_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; XXX - Is it really necessary to load 4 into VGPR?
+; FUNC-LABEL: @lds_atomic_add_ret_i32:
+; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]],
+; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4
+; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; SI: DS_ADD_RTN_U32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]], 0x0, [M0]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_ret_i32_offset:
+; SI: DS_ADD_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_ret_i32:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_INC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]], 0x0
+; SI: S_ENDPGM
+define void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_ret_i32_offset:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_INC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]], 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_ret_i32:
+; SI: DS_SUB_RTN_U32
+; SI: S_ENDPGM
+define void @lds_atomic_sub_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_ret_i32_offset:
+; SI: DS_SUB_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_ret_i32:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_DEC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]], 0x0
+; SI: S_ENDPGM
+define void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_ret_i32_offset:
+; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1
+; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
+; SI: DS_DEC_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]], 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_ret_i32:
+; SI: DS_AND_RTN_B32
+; SI: S_ENDPGM
+define void @lds_atomic_and_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_ret_i32_offset:
+; SI: DS_AND_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_ret_i32:
+; SI: DS_OR_RTN_B32
+; SI: S_ENDPGM
+define void @lds_atomic_or_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_ret_i32_offset:
+; SI: DS_OR_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_ret_i32:
+; SI: DS_XOR_RTN_B32
+; SI: S_ENDPGM
+define void @lds_atomic_xor_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_ret_i32_offset:
+; SI: DS_XOR_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FIXME: There is no atomic nand instr
+; XFUNC-LABEL: @lds_atomic_nand_ret_i32:uction, so we somehow need to expand this.
+; define void @lds_atomic_nand_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+; %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst
+; store i32 %result, i32 addrspace(1)* %out, align 4
+; ret void
+; }
+
+; FUNC-LABEL: @lds_atomic_min_ret_i32:
+; SI: DS_MIN_RTN_I32
+; SI: S_ENDPGM
+define void @lds_atomic_min_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_min_ret_i32_offset:
+; SI: DS_MIN_RTN_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_ret_i32:
+; SI: DS_MAX_RTN_I32
+; SI: S_ENDPGM
+define void @lds_atomic_max_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_ret_i32_offset:
+; SI: DS_MAX_RTN_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_ret_i32:
+; SI: DS_MIN_RTN_U32
+; SI: S_ENDPGM
+define void @lds_atomic_umin_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_ret_i32_offset:
+; SI: DS_MIN_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_ret_i32:
+; SI: DS_MAX_RTN_U32
+; SI: S_ENDPGM
+define void @lds_atomic_umax_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_ret_i32_offset:
+; SI: DS_MAX_RTN_U32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10
+; SI: S_ENDPGM
+define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/local-atomics64.ll b/test/CodeGen/R600/local-atomics64.ll
new file mode 100644
index 000000000000..849b033d84a9
--- /dev/null
+++ b/test/CodeGen/R600/local-atomics64.ll
@@ -0,0 +1,251 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; FUNC-LABEL: @lds_atomic_xchg_ret_i64:
+; SI: DS_WRXCHG_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xchg_ret_i64_offset:
+; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_ret_i64:
+; SI: DS_ADD_RTN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_add_ret_i64_offset:
+; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; SI: DS_ADD_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}, 0x20, [M0]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i64 4
+ %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_ret_i64:
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI: DS_INC_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_inc_ret_i64_offset:
+; SI: DS_INC_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_ret_i64:
+; SI: DS_SUB_RTN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_sub_ret_i64_offset:
+; SI: DS_SUB_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_ret_i64:
+; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
+; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
+; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
+; SI: DS_DEC_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
+; SI: S_ENDPGM
+define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_dec_ret_i64_offset:
+; SI: DS_DEC_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_ret_i64:
+; SI: DS_AND_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_and_ret_i64_offset:
+; SI: DS_AND_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_ret_i64:
+; SI: DS_OR_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_or_ret_i64_offset:
+; SI: DS_OR_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_ret_i64:
+; SI: DS_XOR_RTN_B64
+; SI: S_ENDPGM
+define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_xor_ret_i64_offset:
+; SI: DS_XOR_RTN_B64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FIXME: There is no atomic nand instr
+; XFUNC-LABEL: @lds_atomic_nand_ret_i64:uction, so we somehow need to expand this.
+; define void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
+; store i64 %result, i64 addrspace(1)* %out, align 8
+; ret void
+; }
+
+; FUNC-LABEL: @lds_atomic_min_ret_i64:
+; SI: DS_MIN_RTN_I64
+; SI: S_ENDPGM
+define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_min_ret_i64_offset:
+; SI: DS_MIN_RTN_I64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_ret_i64:
+; SI: DS_MAX_RTN_I64
+; SI: S_ENDPGM
+define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_max_ret_i64_offset:
+; SI: DS_MAX_RTN_I64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_ret_i64:
+; SI: DS_MIN_RTN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umin_ret_i64_offset:
+; SI: DS_MIN_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_ret_i64:
+; SI: DS_MAX_RTN_U64
+; SI: S_ENDPGM
+define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @lds_atomic_umax_ret_i64_offset:
+; SI: DS_MAX_RTN_U64 {{.*}} 0x20
+; SI: S_ENDPGM
+define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
+ %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/local-memory-two-objects.ll b/test/CodeGen/R600/local-memory-two-objects.ll
index e2d840645d01..e29e4cc88fd9 100644
--- a/test/CodeGen/R600/local-memory-two-objects.ll
+++ b/test/CodeGen/R600/local-memory-two-objects.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
-@local_memory_two_objects.local_mem0 = internal addrspace(3) unnamed_addr global [4 x i32] zeroinitializer, align 4
-@local_memory_two_objects.local_mem1 = internal addrspace(3) unnamed_addr global [4 x i32] zeroinitializer, align 4
+@local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] zeroinitializer, align 4
+@local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] zeroinitializer, align 4
; EG-CHECK: @local_memory_two_objects
@@ -17,18 +17,19 @@
; this consistently on evergreen GPUs.
; EG-CHECK: LDS_WRITE
; EG-CHECK: LDS_WRITE
-; SI-CHECK: DS_WRITE_B32 0, {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
-; SI-CHECK-NOT: DS_WRITE_B32 0, {{v[0-9]*}}, v[[ADDRW]]
+; SI-CHECK: DS_WRITE_B32 {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
+; SI-CHECK-NOT: DS_WRITE_B32 {{v[0-9]*}}, v[[ADDRW]]
; GROUP_BARRIER must be the last instruction in a clause
; EG-CHECK: GROUP_BARRIER
; EG-CHECK-NEXT: ALU clause
-; Make sure the lds reads are using different addresses.
+; Make sure the lds reads are using different addresses, at different
+; constant offsets.
; EG-CHECK: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
; EG-CHECK-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
-; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, 0, [[ADDRR:v[0-9]+]]
-; SI-CHECK-NOT: DS_READ_B32 {{v[0-9]+}}, 0, [[ADDRR]]
+; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, [[ADDRR:v[0-9]+]], 0x10
+; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, [[ADDRR]], 0x0,
define void @local_memory_two_objects(i32 addrspace(1)* %out) {
entry:
diff --git a/test/CodeGen/R600/local-memory.ll b/test/CodeGen/R600/local-memory.ll
index 2168a3d0bd27..51af4844cb5a 100644
--- a/test/CodeGen/R600/local-memory.ll
+++ b/test/CodeGen/R600/local-memory.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
; RUN: llc < %s -march=r600 -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=CI-CHECK %s
-@local_memory.local_mem = internal addrspace(3) unnamed_addr global [128 x i32] zeroinitializer, align 4
+@local_memory.local_mem = internal unnamed_addr addrspace(3) global [128 x i32] zeroinitializer, align 4
; EG-CHECK-LABEL: @local_memory
; SI-CHECK-LABEL: @local_memory
@@ -17,8 +17,8 @@
; CI-CHECK-NEXT: .long 32768
; EG-CHECK: LDS_WRITE
-; SI-CHECK_NOT: S_WQM_B64
-; SI-CHECK: DS_WRITE_B32 0
+; SI-CHECK-NOT: S_WQM_B64
+; SI-CHECK: DS_WRITE_B32
; GROUP_BARRIER must be the last instruction in a clause
; EG-CHECK: GROUP_BARRIER
@@ -26,7 +26,7 @@
; SI-CHECK: S_BARRIER
; EG-CHECK: LDS_READ_RET
-; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, 0
+; SI-CHECK: DS_READ_B32 {{v[0-9]+}},
define void @local_memory(i32 addrspace(1)* %out) {
entry:
diff --git a/test/CodeGen/R600/loop-idiom.ll b/test/CodeGen/R600/loop-idiom.ll
new file mode 100644
index 000000000000..128f661077ea
--- /dev/null
+++ b/test/CodeGen/R600/loop-idiom.ll
@@ -0,0 +1,54 @@
+; RUN: opt -basicaa -loop-idiom -S < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
+; RUN: opt -basicaa -loop-idiom -S < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "r600--"
+
+
+
+; Make sure loop-idiom doesn't create memcpy or memset. There are no library
+; implementations of these for R600.
+
+; FUNC: @no_memcpy
+; R600-NOT: @llvm.memcpy
+; SI-NOT: @llvm.memcpy
+define void @no_memcpy(i8 addrspace(3)* %in, i32 %size) {
+entry:
+ %dest = alloca i8, i32 32
+ br label %for.body
+
+for.body:
+ %0 = phi i32 [0, %entry], [%4, %for.body]
+ %1 = getelementptr i8 addrspace(3)* %in, i32 %0
+ %2 = getelementptr i8* %dest, i32 %0
+ %3 = load i8 addrspace(3)* %1
+ store i8 %3, i8* %2
+ %4 = add i32 %0, 1
+ %5 = icmp eq i32 %4, %size
+ br i1 %5, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
+
+; FUNC: @no_memset
+; R600-NOT: @llvm.memset
+; R600-NOT: @memset_pattern16
+; SI-NOT: @llvm.memset
+; SI-NOT: @memset_pattern16
+define void @no_memset(i32 %size) {
+entry:
+ %dest = alloca i8, i32 32
+ br label %for.body
+
+for.body:
+ %0 = phi i32 [0, %entry], [%2, %for.body]
+ %1 = getelementptr i8* %dest, i32 %0
+ store i8 0, i8* %1
+ %2 = add i32 %0, 1
+ %3 = icmp eq i32 %2, %size
+ br i1 %3, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/CodeGen/R600/mad_int24.ll b/test/CodeGen/R600/mad_int24.ll
index df063ece35ae..abb52907b9b8 100644
--- a/test/CodeGen/R600/mad_int24.ll
+++ b/test/CodeGen/R600/mad_int24.ll
@@ -1,12 +1,15 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; EG-CHECK: @i32_mad24
+; FUNC-LABEL: @i32_mad24
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
-; EG-CHECK: MULLO_INT
-; CM-CHECK: MULADD_INT24 {{[ *]*}}T{{[0-9].[XYZW]}}, KC0[2].Z, KC0[2].W, KC0[3].X
-; SI-CHECK: V_MAD_I32_I24
+; EG: MULLO_INT
+; Make sure we aren't masking the inputs.
+; CM-NOT: AND
+; CM: MULADD_INT24
+; SI-NOT: AND
+; SI: V_MAD_I32_I24
define void @i32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
%0 = shl i32 %a, 8
diff --git a/test/CodeGen/R600/mad_uint24.ll b/test/CodeGen/R600/mad_uint24.ll
index 66a070ed9d4a..0f0893bd53c4 100644
--- a/test/CodeGen/R600/mad_uint24.ll
+++ b/test/CodeGen/R600/mad_uint24.ll
@@ -1,11 +1,10 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; EG-CHECK-LABEL: @u32_mad24
-; EG-CHECK: MULADD_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W, KC0[3].X
-; SI-CHECK-LABEL: @u32_mad24
-; SI-CHECK: V_MAD_U32_U24
+; FUNC-LABEL: @u32_mad24
+; EG: MULADD_UINT24
+; SI: V_MAD_U32_U24
define void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
@@ -19,21 +18,14 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i16_mad24
-; EG-CHECK-DAG: VTX_READ_16 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
-; EG-CHECK-DAG: VTX_READ_16 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
-; EG-CHECK-DAG: VTX_READ_16 [[C:T[0-9]\.X]], T{{[0-9]}}.X, 48
+; FUNC-LABEL: @i16_mad24
; The order of A and B does not matter.
-; EG-CHECK: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]], [[A]], [[B]], [[C]]
+; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
-; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MAD_CHAN]], literal.x
-; EG-CHECK: 16
-; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
-; EG-CHECK: 16
-; SI-CHECK-LABEL: @i16_mad24
-; SI-CHECK: V_MAD_U32_U24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 16, [[MAD]]
-; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 16, [[LSHL]]
+; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
+; EG: 16
+; SI: V_MAD_U32_U24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; SI: V_BFE_I32 v{{[0-9]}}, [[MAD]], 0, 16
define void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
entry:
@@ -44,21 +36,13 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i8_mad24
-; EG-CHECK-DAG: VTX_READ_8 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
-; EG-CHECK-DAG: VTX_READ_8 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
-; EG-CHECK-DAG: VTX_READ_8 [[C:T[0-9]\.X]], T{{[0-9]}}.X, 48
-; The order of A and B does not matter.
-; EG-CHECK: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]], [[A]], [[B]], [[C]]
+; FUNC-LABEL: @i8_mad24
+; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
-; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MAD_CHAN]], literal.x
-; EG-CHECK: 24
-; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
-; EG-CHECK: 24
-; SI-CHECK-LABEL: @i8_mad24
-; SI-CHECK: V_MAD_U32_U24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 24, [[MUL]]
-; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 24, [[LSHL]]
+; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
+; EG: 8
+; SI: V_MAD_U32_U24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 8
define void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
entry:
@@ -68,3 +52,24 @@ entry:
store i32 %2, i32 addrspace(1)* %out
ret void
}
+
+; This tests for a bug where the mad_u24 pattern matcher would call
+; SimplifyDemandedBits on the first operand of the mul instruction
+; assuming that the pattern would be matched to a 24-bit mad. This
+; led to some instructions being incorrectly erased when the entire
+; 24-bit mad pattern wasn't being matched.
+
+; Check that the select instruction is not deleted.
+; FUNC-LABEL: @i24_i32_i32_mad
+; EG: CNDE_INT
+; SI: V_CNDMASK
+define void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+entry:
+ %0 = ashr i32 %a, 8
+ %1 = icmp ne i32 %c, 0
+ %2 = select i1 %1, i32 %0, i32 34
+ %3 = mul i32 %2, %c
+ %4 = add i32 %3, %d
+ store i32 %4, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/mubuf.ll b/test/CodeGen/R600/mubuf.ll
new file mode 100644
index 000000000000..f465d3dad8f0
--- /dev/null
+++ b/test/CodeGen/R600/mubuf.ll
@@ -0,0 +1,98 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -show-mc-encoding -verify-machineinstrs | FileCheck %s
+
+;;;==========================================================================;;;
+;;; MUBUF LOAD TESTS
+;;;==========================================================================;;;
+
+; MUBUF load with an immediate byte offset that fits into 12-bits
+; CHECK-LABEL: @mubuf_load0
+; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0x4 ; encoding: [0x04,0x80
+define void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %in, i64 1
+ %1 = load i32 addrspace(1)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; MUBUF load with the largest possible immediate offset
+; CHECK-LABEL: @mubuf_load1
+; CHECK: BUFFER_LOAD_UBYTE v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0xfff ; encoding: [0xff,0x8f
+define void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
+entry:
+ %0 = getelementptr i8 addrspace(1)* %in, i64 4095
+ %1 = load i8 addrspace(1)* %0
+ store i8 %1, i8 addrspace(1)* %out
+ ret void
+}
+
+; MUBUF load with an immediate byte offset that doesn't fit into 12-bits
+; CHECK-LABEL: @mubuf_load2
+; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0x0 ; encoding: [0x00,0x80
+define void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %in, i64 1024
+ %1 = load i32 addrspace(1)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; MUBUF load with a 12-bit immediate offset and a register offset
+; CHECK-LABEL: @mubuf_load3
+; CHECK-NOT: ADD
+; CHECK: BUFFER_LOAD_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0x4 ; encoding: [0x04,0x80
+define void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %in, i64 %offset
+ %1 = getelementptr i32 addrspace(1)* %0, i64 1
+ %2 = load i32 addrspace(1)* %1
+ store i32 %2, i32 addrspace(1)* %out
+ ret void
+}
+
+;;;==========================================================================;;;
+;;; MUBUF STORE TESTS
+;;;==========================================================================;;;
+
+; MUBUF store with an immediate byte offset that fits into 12-bits
+; CHECK-LABEL: @mubuf_store0
+; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0x4 ; encoding: [0x04,0x80
+define void @mubuf_store0(i32 addrspace(1)* %out) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %out, i64 1
+ store i32 0, i32 addrspace(1)* %0
+ ret void
+}
+
+; MUBUF store with the largest possible immediate offset
+; CHECK-LABEL: @mubuf_store1
+; CHECK: BUFFER_STORE_BYTE v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0xfff ; encoding: [0xff,0x8f
+
+define void @mubuf_store1(i8 addrspace(1)* %out) {
+entry:
+ %0 = getelementptr i8 addrspace(1)* %out, i64 4095
+ store i8 0, i8 addrspace(1)* %0
+ ret void
+}
+
+; MUBUF store with an immediate byte offset that doesn't fit into 12-bits
+; CHECK-LABEL: @mubuf_store2
+; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0x0 ; encoding: [0x00,0x80
+define void @mubuf_store2(i32 addrspace(1)* %out) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %out, i64 1024
+ store i32 0, i32 addrspace(1)* %0
+ ret void
+}
+
+; MUBUF store with a 12-bit immediate offset and a register offset
+; CHECK-LABEL: @mubuf_store3
+; CHECK-NOT: ADD
+; CHECK: BUFFER_STORE_DWORD v{{[0-9]}}, s[{{[0-9]:[0-9]}}] + v[{{[0-9]:[0-9]}}] + 0x4 ; encoding: [0x04,0x80
+define void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) {
+entry:
+ %0 = getelementptr i32 addrspace(1)* %out, i64 %offset
+ %1 = getelementptr i32 addrspace(1)* %0, i64 1
+ store i32 0, i32 addrspace(1)* %1
+ ret void
+}
diff --git a/test/CodeGen/R600/mul.ll b/test/CodeGen/R600/mul.ll
index 8c27e28df164..d231e92e27fa 100644
--- a/test/CodeGen/R600/mul.ll
+++ b/test/CodeGen/R600/mul.ll
@@ -1,15 +1,14 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s -check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; mul24 and mad24 are affected
-;EG-CHECK: @test2
-;EG-CHECK: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: @test2
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test2
-;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -20,17 +19,16 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK: @test4
-;EG-CHECK: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: @test4
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test4
-;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -40,3 +38,44 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @trunc_i64_mul_to_i32
+; SI: S_LOAD_DWORD
+; SI: S_LOAD_DWORD
+; SI: V_MUL_LO_I32
+; SI: BUFFER_STORE_DWORD
+define void @trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+ %mul = mul i64 %b, %a
+ %trunc = trunc i64 %mul to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 8
+ ret void
+}
+
+; This 64-bit multiply should just use MUL_HI and MUL_LO, since the top
+; 32-bits of both arguments are sign bits.
+; FUNC-LABEL: @mul64_sext_c
+; EG-DAG: MULLO_INT
+; EG-DAG: MULHI_INT
+; SI-DAG: V_MUL_LO_I32
+; SI-DAG: V_MUL_HI_I32
+define void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
+entry:
+ %0 = sext i32 %in to i64
+ %1 = mul i64 %0, 80
+ store i64 %1, i64 addrspace(1)* %out
+ ret void
+}
+
+; A standard 64-bit multiply. The expansion should be around 6 instructions.
+; It would be difficult to match the expansion correctly without writing
+; a really complicated list of FileCheck expressions. I don't want
+; to confuse people who may 'break' this test with a correct optimization,
+; so this test just uses FUNC-LABEL to make sure the compiler does not
+; crash with a 'failed to select' error.
+; FUNC-LABEL: @mul64
+define void @mul64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+entry:
+ %0 = mul i64 %a, %b
+ store i64 %0, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/mul_int24.ll b/test/CodeGen/R600/mul_int24.ll
index 66a1a9e5bd99..046911ba147d 100644
--- a/test/CodeGen/R600/mul_int24.ll
+++ b/test/CodeGen/R600/mul_int24.ll
@@ -1,12 +1,15 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; EG-CHECK: @i32_mul24
+; FUNC-LABEL: @i32_mul24
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
-; EG-CHECK: MULLO_INT
-; CM-CHECK: MUL_INT24 {{[ *]*}}T{{[0-9].[XYZW]}}, KC0[2].Z, KC0[2].W
-; SI-CHECK: V_MUL_I32_I24
+; EG: MULLO_INT
+; Make sure we are not masking the inputs
+; CM-NOT: AND
+; CM: MUL_INT24
+; SI-NOT: AND
+; SI: V_MUL_I32_I24
define void @i32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%0 = shl i32 %a, 8
diff --git a/test/CodeGen/R600/mul_uint24.ll b/test/CodeGen/R600/mul_uint24.ll
index 6e6d5496789f..419f2751b83e 100644
--- a/test/CodeGen/R600/mul_uint24.ll
+++ b/test/CodeGen/R600/mul_uint24.ll
@@ -1,11 +1,10 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; EG-CHECK-LABEL: @u32_mul24
-; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
-; SI-CHECK-LABEL: @u32_mul24
-; SI-CHECK: V_MUL_U32_U24
+; FUNC-LABEL: @u32_mul24
+; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
+; SI: V_MUL_U32_U24
define void @u32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
@@ -18,21 +17,13 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i16_mul24
-; EG-CHECK-DAG: VTX_READ_16 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
-; EG-CHECK-DAG: VTX_READ_16 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
-; The order of A and B does not matter.
-; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]]
+; FUNC-LABEL: @i16_mul24
+; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
; The result must be sign-extended
-; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x
-; EG-CHECK: 16
-; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
-; EG-CHECK: 16
-; SI-CHECK-LABEL: @i16_mul24
-; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 16, [[MUL]]
-; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 16, [[LSHL]]
-
+; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
+; EG: 16
+; SI: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 16,
define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
entry:
%0 = mul i16 %a, %b
@@ -41,20 +32,12 @@ entry:
ret void
}
-; EG-CHECK-LABEL: @i8_mul24
-; EG-CHECK-DAG: VTX_READ_8 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
-; EG-CHECK-DAG: VTX_READ_8 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
-; The order of A and B does not matter.
-; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]]
+; FUNC-LABEL: @i8_mul24
+; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
; The result must be sign-extended
-; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x
-; EG-CHECK: 24
-; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
-; EG-CHECK: 24
-; SI-CHECK-LABEL: @i8_mul24
-; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 24, [[MUL]]
-; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 24, [[LSHL]]
+; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
+; SI: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
+; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 8,
define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
entry:
@@ -63,3 +46,21 @@ entry:
store i32 %1, i32 addrspace(1)* %out
ret void
}
+
+; Multiply with 24-bit inputs and 64-bit output
+; FUNC_LABEL: @mul24_i64
+; EG; MUL_UINT24
+; EG: MULHI
+; SI: V_MUL_U32_U24
+; FIXME: SI support 24-bit mulhi
+; SI: V_MUL_HI_U32
+define void @mul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+entry:
+ %0 = shl i64 %a, 40
+ %a_24 = lshr i64 %0, 40
+ %1 = shl i64 %b, 40
+ %b_24 = lshr i64 %1, 40
+ %2 = mul i64 %a_24, %b_24
+ store i64 %2, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/mulhu.ll b/test/CodeGen/R600/mulhu.ll
index d5fc01412123..864012748118 100644
--- a/test/CodeGen/R600/mulhu.ll
+++ b/test/CodeGen/R600/mulhu.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK: V_MOV_B32_e32 v{{[0-9]+}}, -1431655765
+;CHECK: V_MOV_B32_e32 v{{[0-9]+}}, 0xaaaaaaab
;CHECK: V_MUL_HI_U32 v0, {{[sv][0-9]+}}, {{v[0-9]+}}
;CHECK-NEXT: V_LSHRREV_B32_e32 v0, 1, v0
diff --git a/test/CodeGen/R600/no-initializer-constant-addrspace.ll b/test/CodeGen/R600/no-initializer-constant-addrspace.ll
new file mode 100644
index 000000000000..ab82e7ee7993
--- /dev/null
+++ b/test/CodeGen/R600/no-initializer-constant-addrspace.ll
@@ -0,0 +1,20 @@
+; RUN: llc -march=r600 -mcpu=SI -o /dev/null %s
+; RUN: llc -march=r600 -mcpu=cypress -o /dev/null %s
+
+@extern_const_addrspace = external unnamed_addr addrspace(2) constant [5 x i32], align 4
+
+; FUNC-LABEL: @load_extern_const_init
+define void @load_extern_const_init(i32 addrspace(1)* %out) nounwind {
+ %val = load i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @extern_const_addrspace, i64 0, i64 3), align 4
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+@undef_const_addrspace = unnamed_addr addrspace(2) constant [5 x i32] undef, align 4
+
+; FUNC-LABEL: @load_undef_const_init
+define void @load_undef_const_init(i32 addrspace(1)* %out) nounwind {
+ %val = load i32 addrspace(2)* getelementptr ([5 x i32] addrspace(2)* @undef_const_addrspace, i64 0, i64 3), align 4
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
index 35d23b3d27ad..3c3b475d077c 100644
--- a/test/CodeGen/R600/or.ll
+++ b/test/CodeGen/R600/or.ll
@@ -1,13 +1,13 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
-; EG-CHECK-LABEL: @or_v2i32
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG-LABEL: @or_v2i32
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @or_v2i32
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI-LABEL: @or_v2i32
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -18,17 +18,17 @@ define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in)
ret void
}
-; EG-CHECK-LABEL: @or_v4i32
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG-LABEL: @or_v4i32
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: @or_v4i32
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI-LABEL: @or_v4i32
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -39,15 +39,107 @@ define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in)
ret void
}
-; EG-CHECK-LABEL: @or_i64
-; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
-; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
-; SI-CHECK-LABEL: @or_i64
-; SI-CHECK: V_OR_B32_e32 v{{[0-9]}}
-; SI-CHECK: V_OR_B32_e32 v{{[0-9]}}
-define void @or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
-entry:
- %0 = or i64 %a, %b
- store i64 %0, i64 addrspace(1)* %out
- ret void
+; SI-LABEL: @scalar_or_i32
+; SI: S_OR_B32
+define void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %or = or i32 %a, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @vector_or_i32
+; SI: V_OR_B32_e32 v{{[0-9]}}
+define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b) {
+ %loada = load i32 addrspace(1)* %a
+ %or = or i32 %loada, %b
+ store i32 %or, i32 addrspace(1)* %out
+ ret void
+}
+
+; EG-LABEL: @scalar_or_i64
+; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
+; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
+; SI-LABEL: @scalar_or_i64
+; SI: S_OR_B64
+define void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+ %or = or i64 %a, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @vector_or_i64
+; SI: V_OR_B32_e32 v{{[0-9]}}
+; SI: V_OR_B32_e32 v{{[0-9]}}
+define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+ %loada = load i64 addrspace(1)* %a, align 8
+ %loadb = load i64 addrspace(1)* %a, align 8
+ %or = or i64 %loada, %loadb
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @scalar_vector_or_i64
+; SI: V_OR_B32_e32 v{{[0-9]}}
+; SI: V_OR_B32_e32 v{{[0-9]}}
+define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 %b) {
+ %loada = load i64 addrspace(1)* %a
+ %or = or i64 %loada, %b
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @vector_or_i64_loadimm
+; SI-DAG: S_MOV_B32 [[LO_S_IMM:s[0-9]+]], 0xdf77987f
+; SI-DAG: S_MOV_B32 [[HI_S_IMM:s[0-9]+]], 0x146f
+; SI-DAG: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
+; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
+; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
+; SI: S_ENDPGM
+define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+ %loada = load i64 addrspace(1)* %a, align 8
+ %or = or i64 %loada, 22470723082367
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: The or 0 should really be removed.
+; SI-LABEL: @vector_or_i64_imm
+; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
+; SI: V_OR_B32_e32 {{v[0-9]+}}, 8, v[[LO_VREG]]
+; SI: V_OR_B32_e32 {{v[0-9]+}}, 0, {{.*}}
+; SI: S_ENDPGM
+define void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
+ %loada = load i64 addrspace(1)* %a, align 8
+ %or = or i64 %loada, 8
+ store i64 %or, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @trunc_i64_or_to_i32
+; SI: S_LOAD_DWORDX2 s{{\[}}[[SREG0:[0-9]+]]
+; SI: S_LOAD_DWORDX2 s{{\[}}[[SREG1:[0-9]+]]
+; SI: S_OR_B32 [[SRESULT:s[0-9]+]], s[[SREG1]], s[[SREG0]]
+; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; SI: BUFFER_STORE_DWORD [[VRESULT]],
+define void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+ %add = or i64 %b, %a
+ %trunc = trunc i64 %add to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 8
+ ret void
+}
+
+; EG-CHECK: @or_i1
+; EG-CHECK: OR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
+
+; SI-CHECK: @or_i1
+; SI-CHECK: S_OR_B64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
+define void @or_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
+ %a = load float addrspace(1) * %in0
+ %b = load float addrspace(1) * %in1
+ %acmp = fcmp oge float %a, 0.000000e+00
+ %bcmp = fcmp oge float %b, 0.000000e+00
+ %or = or i1 %acmp, %bcmp
+ %result = select i1 %or, float %a, float %b
+ store float %result, float addrspace(1)* %out
+ ret void
}
diff --git a/test/CodeGen/R600/parallelandifcollapse.ll b/test/CodeGen/R600/parallelandifcollapse.ll
index 4afaf684bfce..8a269e0cb43a 100644
--- a/test/CodeGen/R600/parallelandifcollapse.ll
+++ b/test/CodeGen/R600/parallelandifcollapse.ll
@@ -7,6 +7,12 @@
; CHECK: AND_INT
; CHECK-NEXT: AND_INT
; CHECK-NEXT: OR_INT
+
+; FIXME: For some reason having the allocas here allowed the flatten cfg pass
+; to do its transfomation, however now that we are using local memory for
+; allocas, the transformation isn't happening.
+; XFAIL: *
+
define void @_Z9chk1D_512v() #0 {
entry:
%a0 = alloca i32, align 4
diff --git a/test/CodeGen/R600/parallelorifcollapse.ll b/test/CodeGen/R600/parallelorifcollapse.ll
index b0db7cdd0671..feca688c30aa 100644
--- a/test/CodeGen/R600/parallelorifcollapse.ll
+++ b/test/CodeGen/R600/parallelorifcollapse.ll
@@ -3,6 +3,11 @@
;
; CFG flattening should use parallel-or to generate branch conditions and
; then merge if-regions with the same bodies.
+
+; FIXME: For some reason having the allocas here allowed the flatten cfg pass
+; to do its transfomation, however now that we are using local memory for
+; allocas, the transformation isn't happening.
+; XFAIL: *
;
; CHECK: OR_INT
; CHECK-NEXT: OR_INT
diff --git a/test/CodeGen/R600/private-memory-atomics.ll b/test/CodeGen/R600/private-memory-atomics.ll
new file mode 100644
index 000000000000..def4f9dee521
--- /dev/null
+++ b/test/CodeGen/R600/private-memory-atomics.ll
@@ -0,0 +1,31 @@
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s
+
+; This works because promote allocas pass replaces these with LDS atomics.
+
+; Private atomics have no real use, but at least shouldn't crash on it.
+define void @atomicrmw_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+entry:
+ %tmp = alloca [2 x i32]
+ %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ store i32 0, i32* %tmp1
+ store i32 1, i32* %tmp2
+ %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %tmp4 = atomicrmw add i32* %tmp3, i32 7 acq_rel
+ store i32 %tmp4, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @cmpxchg_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+entry:
+ %tmp = alloca [2 x i32]
+ %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ store i32 0, i32* %tmp1
+ store i32 1, i32* %tmp2
+ %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %tmp4 = cmpxchg i32* %tmp3, i32 0, i32 1 acq_rel monotonic
+ %val = extractvalue { i32, i1 } %tmp4, 0
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/private-memory-broken.ll b/test/CodeGen/R600/private-memory-broken.ll
new file mode 100644
index 000000000000..40860858eb0f
--- /dev/null
+++ b/test/CodeGen/R600/private-memory-broken.ll
@@ -0,0 +1,20 @@
+; RUN: not llc -verify-machineinstrs -march=r600 -mcpu=SI %s -o /dev/null 2>&1 | FileCheck %s
+
+; Make sure promote alloca pass doesn't crash
+
+; CHECK: unsupported call
+
+declare i32 @foo(i32*) nounwind
+
+define void @call_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+entry:
+ %tmp = alloca [2 x i32]
+ %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ store i32 0, i32* %tmp1
+ store i32 1, i32* %tmp2
+ %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %val = call i32 @foo(i32* %tmp3) nounwind
+ store i32 %val, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/private-memory.ll b/test/CodeGen/R600/private-memory.ll
index 48a013c8e549..124d9fa64505 100644
--- a/test/CodeGen/R600/private-memory.ll
+++ b/test/CodeGen/R600/private-memory.ll
@@ -1,24 +1,23 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
-
-; This test checks that uses and defs of the AR register happen in the same
-; instruction clause.
-
-; R600-CHECK-LABEL: @mova_same_clause
-; R600-CHECK: MOVA_INT
-; R600-CHECK-NOT: ALU clause
-; R600-CHECK: 0 + AR.x
-; R600-CHECK: MOVA_INT
-; R600-CHECK-NOT: ALU clause
-; R600-CHECK: 0 + AR.x
-
-; SI-CHECK-LABEL: @mova_same_clause
-; SI-CHECK: V_READFIRSTLANE
-; SI-CHECK: V_MOVRELD
-; SI-CHECK: S_CBRANCH
-; SI-CHECK: V_READFIRSTLANE
-; SI-CHECK: V_MOVRELD
-; SI-CHECK: S_CBRANCH
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
+; RUN: llc -mattr=+promote-alloca -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -mattr=-promote-alloca -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
+
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+
+; FUNC-LABEL: @mova_same_clause
+
+; R600: LDS_WRITE
+; R600: LDS_WRITE
+; R600: LDS_READ
+; R600: LDS_READ
+
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+
+; SI-ALLOCA: BUFFER_STORE_DWORD v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s{{[0-9]+}}
+; SI-ALLOCA: BUFFER_STORE_DWORD v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s{{[0-9]+}}
define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
%stack = alloca [5 x i32], align 4
@@ -46,10 +45,10 @@ entry:
; XXX: This generated code has unnecessary MOVs, we should be able to optimize
; this.
-; R600-CHECK-LABEL: @multiple_structs
-; R600-CHECK-NOT: MOVA_INT
-; SI-CHECK-LABEL: @multiple_structs
-; SI-CHECK-NOT: V_MOVREL
+; FUNC-LABEL: @multiple_structs
+; R600-NOT: MOVA_INT
+; SI-NOT: V_MOVREL
+; SI-NOT: V_MOVREL
%struct.point = type { i32, i32 }
define void @multiple_structs(i32 addrspace(1)* %out) {
@@ -77,10 +76,9 @@ entry:
; loads and stores should be lowered to copies, so there shouldn't be any
; MOVA instructions.
-; R600-CHECK-LABLE: @direct_loop
-; R600-CHECK-NOT: MOVA_INT
-; SI-CHECK-LABEL: @direct_loop
-; SI-CHECK-NOT: V_MOVREL
+; FUNC-LABEL: @direct_loop
+; R600-NOT: MOVA_INT
+; SI-NOT: V_MOVREL
define void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -113,3 +111,183 @@ for.end:
store i32 %value, i32 addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @short_array
+
+; R600: MOVA_INT
+
+; SI-PROMOTE: BUFFER_STORE_SHORT v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s{{[0-9]+}}
+; SI-PROMOTE: BUFFER_STORE_SHORT v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s{{[0-9]+}}
+; SI-PROMOTE-NOT: MOVREL
+; SI-PROMOTE: BUFFER_LOAD_SSHORT v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}] + v{{[0-9]+}} + s{{[0-9]+}}
+define void @short_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = alloca [2 x i16]
+ %1 = getelementptr [2 x i16]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i16]* %0, i32 0, i32 1
+ store i16 0, i16* %1
+ store i16 1, i16* %2
+ %3 = getelementptr [2 x i16]* %0, i32 0, i32 %index
+ %4 = load i16* %3
+ %5 = sext i16 %4 to i32
+ store i32 %5, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @char_array
+
+; R600: MOVA_INT
+
+; SI-DAG: BUFFER_STORE_BYTE v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s{{[0-9]+}}, 0x0
+; SI-DAG: BUFFER_STORE_BYTE v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s{{[0-9]+}}, 0x1
+define void @char_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = alloca [2 x i8]
+ %1 = getelementptr [2 x i8]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i8]* %0, i32 0, i32 1
+ store i8 0, i8* %1
+ store i8 1, i8* %2
+ %3 = getelementptr [2 x i8]* %0, i32 0, i32 %index
+ %4 = load i8* %3
+ %5 = sext i8 %4 to i32
+ store i32 %5, i32 addrspace(1)* %out
+ ret void
+
+}
+
+; Make sure we don't overwrite workitem information with private memory
+
+; FUNC-LABEL: @work_item_info
+; R600-NOT: MOV T0.X
+; Additional check in case the move ends up in the last slot
+; R600-NOT: MOV * TO.X
+
+; SI-NOT: V_MOV_B32_e{{(32|64)}} v0
+define void @work_item_info(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %0 = alloca [2 x i32]
+ %1 = getelementptr [2 x i32]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i32]* %0, i32 0, i32 1
+ store i32 0, i32* %1
+ store i32 1, i32* %2
+ %3 = getelementptr [2 x i32]* %0, i32 0, i32 %in
+ %4 = load i32* %3
+ %5 = call i32 @llvm.r600.read.tidig.x()
+ %6 = add i32 %4, %5
+ store i32 %6, i32 addrspace(1)* %out
+ ret void
+}
+
+; Test that two stack objects are not stored in the same register
+; The second stack object should be in T3.X
+; FUNC-LABEL: @no_overlap
+; R600_CHECK: MOV
+; R600_CHECK: [[CHAN:[XYZW]]]+
+; R600-NOT: [[CHAN]]+
+; SI: V_MOV_B32_e32 v3
+define void @no_overlap(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %0 = alloca [3 x i8], align 1
+ %1 = alloca [2 x i8], align 1
+ %2 = getelementptr [3 x i8]* %0, i32 0, i32 0
+ %3 = getelementptr [3 x i8]* %0, i32 0, i32 1
+ %4 = getelementptr [3 x i8]* %0, i32 0, i32 2
+ %5 = getelementptr [2 x i8]* %1, i32 0, i32 0
+ %6 = getelementptr [2 x i8]* %1, i32 0, i32 1
+ store i8 0, i8* %2
+ store i8 1, i8* %3
+ store i8 2, i8* %4
+ store i8 1, i8* %5
+ store i8 0, i8* %6
+ %7 = getelementptr [3 x i8]* %0, i32 0, i32 %in
+ %8 = getelementptr [2 x i8]* %1, i32 0, i32 %in
+ %9 = load i8* %7
+ %10 = load i8* %8
+ %11 = add i8 %9, %10
+ %12 = sext i8 %11 to i32
+ store i32 %12, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @char_array_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x [2 x i8]]
+ %gep0 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 1
+ store i8 0, i8* %gep0
+ store i8 1, i8* %gep1
+ %gep2 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 %index
+ %load = load i8* %gep2
+ %sext = sext i8 %load to i32
+ store i32 %sext, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @i32_array_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x [2 x i32]]
+ %gep0 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 1
+ store i32 0, i32* %gep0
+ store i32 1, i32* %gep1
+ %gep2 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
+ %load = load i32* %gep2
+ store i32 %load, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @i64_array_array(i64 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x [2 x i64]]
+ %gep0 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 1
+ store i64 0, i64* %gep0
+ store i64 1, i64* %gep1
+ %gep2 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 %index
+ %load = load i64* %gep2
+ store i64 %load, i64 addrspace(1)* %out
+ ret void
+}
+
+%struct.pair32 = type { i32, i32 }
+
+define void @struct_array_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x [2 x %struct.pair32]]
+ %gep0 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
+ %gep1 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 1, i32 1
+ store i32 0, i32* %gep0
+ store i32 1, i32* %gep1
+ %gep2 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 %index, i32 0
+ %load = load i32* %gep2
+ store i32 %load, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %alloca = alloca [2 x %struct.pair32]
+ %gep0 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
+ %gep1 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 1, i32 0
+ store i32 0, i32* %gep0
+ store i32 1, i32* %gep1
+ %gep2 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 %index, i32 0
+ %load = load i32* %gep2
+ store i32 %load, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+entry:
+ %tmp = alloca [2 x i32]
+ %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ store i32 0, i32* %tmp1
+ store i32 1, i32* %tmp2
+ %cmp = icmp eq i32 %in, 0
+ %sel = select i1 %cmp, i32* %tmp1, i32* %tmp2
+ %load = load i32* %sel
+ store i32 %load, i32 addrspace(1)* %out
+ ret void
+}
+
diff --git a/test/CodeGen/R600/pv.ll b/test/CodeGen/R600/pv.ll
index 5a930b292682..55eb56d3fb1d 100644
--- a/test/CodeGen/R600/pv.ll
+++ b/test/CodeGen/R600/pv.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 | FileCheck %s
;CHECK: DOT4 * T{{[0-9]\.W}} (MASKED)
-;CHECK: MAX T{{[0-9].[XYZW]}}, 0.0, PV.X
+;CHECK: MAX T{{[0-9].[XYZW]}}, PV.X, 0.0
define void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2, <4 x float> inreg %reg3, <4 x float> inreg %reg4, <4 x float> inreg %reg5, <4 x float> inreg %reg6, <4 x float> inreg %reg7) #0 {
main_body:
@@ -103,7 +103,7 @@ main_body:
%95 = insertelement <4 x float> %94, float 0.000000e+00, i32 3
%96 = call float @llvm.AMDGPU.dp4(<4 x float> %91, <4 x float> %95)
%97 = call float @fabs(float %96)
- %98 = call float @llvm.AMDGPU.rsq(float %97)
+ %98 = call float @llvm.AMDGPU.rsq.f32(float %97)
%99 = fmul float %4, %98
%100 = fmul float %5, %98
%101 = fmul float %6, %98
@@ -225,7 +225,7 @@ declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) #1
declare float @fabs(float) #2
; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #1
+declare float @llvm.AMDGPU.rsq.f32(float) #1
; Function Attrs: readnone
declare float @llvm.AMDIL.clamp.(float, float, float) #1
diff --git a/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll b/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll
new file mode 100644
index 000000000000..c89398f00662
--- /dev/null
+++ b/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll
@@ -0,0 +1,59 @@
+;RUN: llc < %s -march=r600 -mcpu=cayman
+;REQUIRES: asserts
+
+define void @main(<4 x float> inreg, <4 x float> inreg) #0 {
+main_body:
+ %2 = extractelement <4 x float> %0, i32 0
+ %3 = extractelement <4 x float> %0, i32 1
+ %4 = extractelement <4 x float> %0, i32 2
+ %5 = extractelement <4 x float> %0, i32 3
+ %6 = insertelement <4 x float> undef, float %2, i32 0
+ %7 = insertelement <4 x float> %6, float %3, i32 1
+ %8 = insertelement <4 x float> %7, float %4, i32 2
+ %9 = insertelement <4 x float> %8, float %5, i32 3
+ %10 = call <4 x float> @llvm.AMDGPU.cube(<4 x float> %9)
+ %11 = extractelement <4 x float> %10, i32 0
+ %12 = extractelement <4 x float> %10, i32 1
+ %13 = extractelement <4 x float> %10, i32 2
+ %14 = extractelement <4 x float> %10, i32 3
+ %15 = call float @fabs(float %13)
+ %16 = fdiv float 1.000000e+00, %15
+ %17 = fmul float %11, %16
+ %18 = fadd float %17, 1.500000e+00
+ %19 = fmul float %12, %16
+ %20 = fadd float %19, 1.500000e+00
+ %21 = insertelement <4 x float> undef, float %20, i32 0
+ %22 = insertelement <4 x float> %21, float %18, i32 1
+ %23 = insertelement <4 x float> %22, float %14, i32 2
+ %24 = insertelement <4 x float> %23, float %5, i32 3
+ %25 = extractelement <4 x float> %24, i32 0
+ %26 = extractelement <4 x float> %24, i32 1
+ %27 = extractelement <4 x float> %24, i32 2
+ %28 = extractelement <4 x float> %24, i32 3
+ %29 = insertelement <4 x float> undef, float %25, i32 0
+ %30 = insertelement <4 x float> %29, float %26, i32 1
+ %31 = insertelement <4 x float> %30, float %27, i32 2
+ %32 = insertelement <4 x float> %31, float %28, i32 3
+ %33 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %32, i32 16, i32 0, i32 13)
+ %34 = extractelement <4 x float> %33, i32 0
+ %35 = insertelement <4 x float> undef, float %34, i32 0
+ %36 = insertelement <4 x float> %35, float %34, i32 1
+ %37 = insertelement <4 x float> %36, float %34, i32 2
+ %38 = insertelement <4 x float> %37, float 1.000000e+00, i32 3
+ call void @llvm.R600.store.swizzle(<4 x float> %38, i32 0, i32 0)
+ ret void
+}
+
+; Function Attrs: readnone
+declare <4 x float> @llvm.AMDGPU.cube(<4 x float>) #1
+
+; Function Attrs: readnone
+declare float @fabs(float) #1
+
+; Function Attrs: readnone
+declare <4 x float> @llvm.AMDGPU.tex(<4 x float>, i32, i32, i32) #1
+
+declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { readnone }
diff --git a/test/CodeGen/R600/register-count-comments.ll b/test/CodeGen/R600/register-count-comments.ll
new file mode 100644
index 000000000000..329077cde57d
--- /dev/null
+++ b/test/CodeGen/R600/register-count-comments.ll
@@ -0,0 +1,20 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+
+declare i32 @llvm.SI.tid() nounwind readnone
+
+; SI-LABEL: @foo:
+; SI: .section .AMDGPU.csdata
+; SI: ; Kernel info:
+; SI: ; NumSgprs: {{[0-9]+}}
+; SI: ; NumVgprs: {{[0-9]+}}
+define void @foo(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %abase, i32 addrspace(1)* %bbase) nounwind {
+ %tid = call i32 @llvm.SI.tid() nounwind readnone
+ %aptr = getelementptr i32 addrspace(1)* %abase, i32 %tid
+ %bptr = getelementptr i32 addrspace(1)* %bbase, i32 %tid
+ %outptr = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %result = add i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %outptr, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/reorder-stores.ll b/test/CodeGen/R600/reorder-stores.ll
new file mode 100644
index 000000000000..be2fcc6849fb
--- /dev/null
+++ b/test/CodeGen/R600/reorder-stores.ll
@@ -0,0 +1,104 @@
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @no_reorder_v2f64_global_load_store
+; SI: BUFFER_LOAD_DWORDX2
+; SI: BUFFER_LOAD_DWORDX2
+; SI: BUFFER_LOAD_DWORDX2
+; SI: BUFFER_LOAD_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind {
+ %tmp1 = load <2 x double> addrspace(1)* %x, align 16
+ %tmp4 = load <2 x double> addrspace(1)* %y, align 16
+ store <2 x double> %tmp4, <2 x double> addrspace(1)* %x, align 16
+ store <2 x double> %tmp1, <2 x double> addrspace(1)* %y, align 16
+ ret void
+}
+
+; SI-LABEL: @no_reorder_scalarized_v2f64_local_load_store
+; SI: DS_READ_B64
+; SI: DS_READ_B64
+; SI: DS_WRITE_B64
+; SI: DS_WRITE_B64
+; SI: S_ENDPGM
+define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind {
+ %tmp1 = load <2 x double> addrspace(3)* %x, align 16
+ %tmp4 = load <2 x double> addrspace(3)* %y, align 16
+ store <2 x double> %tmp4, <2 x double> addrspace(3)* %x, align 16
+ store <2 x double> %tmp1, <2 x double> addrspace(3)* %y, align 16
+ ret void
+}
+
+; SI-LABEL: @no_reorder_split_v8i32_global_load_store
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+; SI: BUFFER_LOAD_DWORD
+
+
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: BUFFER_STORE_DWORD
+; SI: S_ENDPGM
+define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind {
+ %tmp1 = load <8 x i32> addrspace(1)* %x, align 32
+ %tmp4 = load <8 x i32> addrspace(1)* %y, align 32
+ store <8 x i32> %tmp4, <8 x i32> addrspace(1)* %x, align 32
+ store <8 x i32> %tmp1, <8 x i32> addrspace(1)* %y, align 32
+ ret void
+}
+
+; SI-LABEL: @no_reorder_extload_64
+; SI: DS_READ_B64
+; SI: DS_READ_B64
+; SI: DS_WRITE_B64
+; SI-NOT: DS_READ
+; SI: DS_WRITE_B64
+; SI: S_ENDPGM
+define void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind {
+ %tmp1 = load <2 x i32> addrspace(3)* %x, align 8
+ %tmp4 = load <2 x i32> addrspace(3)* %y, align 8
+ %tmp1ext = zext <2 x i32> %tmp1 to <2 x i64>
+ %tmp4ext = zext <2 x i32> %tmp4 to <2 x i64>
+ %tmp7 = add <2 x i64> %tmp1ext, <i64 1, i64 1>
+ %tmp9 = add <2 x i64> %tmp4ext, <i64 1, i64 1>
+ %trunctmp9 = trunc <2 x i64> %tmp9 to <2 x i32>
+ %trunctmp7 = trunc <2 x i64> %tmp7 to <2 x i32>
+ store <2 x i32> %trunctmp9, <2 x i32> addrspace(3)* %x, align 8
+ store <2 x i32> %trunctmp7, <2 x i32> addrspace(3)* %y, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/rotl.i64.ll b/test/CodeGen/R600/rotl.i64.ll
new file mode 100644
index 000000000000..bda0b6694a8d
--- /dev/null
+++ b/test/CodeGen/R600/rotl.i64.ll
@@ -0,0 +1,34 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @s_rotl_i64:
+; SI: S_LSHL_B64
+; SI: S_SUB_I32
+; SI: S_LSHR_B64
+; SI: S_OR_B64
+define void @s_rotl_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
+entry:
+ %0 = shl i64 %x, %y
+ %1 = sub i64 64, %y
+ %2 = lshr i64 %x, %1
+ %3 = or i64 %0, %2
+ store i64 %3, i64 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @v_rotl_i64:
+; SI: V_LSHL_B64
+; SI: V_SUB_I32
+; SI: V_LSHR_B64
+; SI: V_OR_B32
+; SI: V_OR_B32
+define void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
+entry:
+ %x = load i64 addrspace(1)* %xptr, align 8
+ %y = load i64 addrspace(1)* %yptr, align 8
+ %tmp0 = shl i64 %x, %y
+ %tmp1 = sub i64 64, %y
+ %tmp2 = lshr i64 %x, %tmp1
+ %tmp3 = or i64 %tmp0, %tmp2
+ store i64 %tmp3, i64 addrspace(1)* %in, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/rotl.ll b/test/CodeGen/R600/rotl.ll
new file mode 100644
index 000000000000..83f657fd4cce
--- /dev/null
+++ b/test/CodeGen/R600/rotl.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @rotl_i32:
+; R600: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
+; R600-NEXT: 32
+; R600: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
+
+; SI: S_SUB_I32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
+; SI: V_MOV_B32_e32 [[VDST:v[0-9]+]], [[SDST]]
+; SI: V_ALIGNBIT_B32 {{v[0-9]+, [s][0-9]+, v[0-9]+}}, [[VDST]]
+define void @rotl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+entry:
+ %0 = shl i32 %x, %y
+ %1 = sub i32 32, %y
+ %2 = lshr i32 %x, %1
+ %3 = or i32 %0, %2
+ store i32 %3, i32 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @rotl_v2i32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+define void @rotl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
+entry:
+ %0 = shl <2 x i32> %x, %y
+ %1 = sub <2 x i32> <i32 32, i32 32>, %y
+ %2 = lshr <2 x i32> %x, %1
+ %3 = or <2 x i32> %0, %2
+ store <2 x i32> %3, <2 x i32> addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @rotl_v4i32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+define void @rotl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
+entry:
+ %0 = shl <4 x i32> %x, %y
+ %1 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
+ %2 = lshr <4 x i32> %x, %1
+ %3 = or <4 x i32> %0, %2
+ store <4 x i32> %3, <4 x i32> addrspace(1)* %in
+ ret void
+}
diff --git a/test/CodeGen/R600/rotr.i64.ll b/test/CodeGen/R600/rotr.i64.ll
new file mode 100644
index 000000000000..c264751baeb1
--- /dev/null
+++ b/test/CodeGen/R600/rotr.i64.ll
@@ -0,0 +1,58 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @s_rotr_i64
+; SI: S_LSHR_B64
+; SI: S_SUB_I32
+; SI: S_LSHL_B64
+; SI: S_OR_B64
+define void @s_rotr_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
+entry:
+ %tmp0 = sub i64 64, %y
+ %tmp1 = shl i64 %x, %tmp0
+ %tmp2 = lshr i64 %x, %y
+ %tmp3 = or i64 %tmp1, %tmp2
+ store i64 %tmp3, i64 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @v_rotr_i64
+; SI: V_LSHR_B64
+; SI: V_SUB_I32
+; SI: V_LSHL_B64
+; SI: V_OR_B32
+; SI: V_OR_B32
+define void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
+entry:
+ %x = load i64 addrspace(1)* %xptr, align 8
+ %y = load i64 addrspace(1)* %yptr, align 8
+ %tmp0 = sub i64 64, %y
+ %tmp1 = shl i64 %x, %tmp0
+ %tmp2 = lshr i64 %x, %y
+ %tmp3 = or i64 %tmp1, %tmp2
+ store i64 %tmp3, i64 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @s_rotr_v2i64
+define void @s_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> %x, <2 x i64> %y) {
+entry:
+ %tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
+ %tmp1 = shl <2 x i64> %x, %tmp0
+ %tmp2 = lshr <2 x i64> %x, %y
+ %tmp3 = or <2 x i64> %tmp1, %tmp2
+ store <2 x i64> %tmp3, <2 x i64> addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @v_rotr_v2i64
+define void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
+entry:
+ %x = load <2 x i64> addrspace(1)* %xptr, align 8
+ %y = load <2 x i64> addrspace(1)* %yptr, align 8
+ %tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
+ %tmp1 = shl <2 x i64> %x, %tmp0
+ %tmp2 = lshr <2 x i64> %x, %y
+ %tmp3 = or <2 x i64> %tmp1, %tmp2
+ store <2 x i64> %tmp3, <2 x i64> addrspace(1)* %in
+ ret void
+}
diff --git a/test/CodeGen/R600/rotr.ll b/test/CodeGen/R600/rotr.ll
index edf7aeebea0f..a5a4da480738 100644
--- a/test/CodeGen/R600/rotr.ll
+++ b/test/CodeGen/R600/rotr.ll
@@ -1,37 +1,52 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; R600-CHECK-LABEL: @rotr:
-; R600-CHECK: BIT_ALIGN_INT
+; FUNC-LABEL: @rotr_i32:
+; R600: BIT_ALIGN_INT
-; SI-CHECK-LABEL: @rotr:
-; SI-CHECK: V_ALIGNBIT_B32
-define void @rotr(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+; SI: V_ALIGNBIT_B32
+define void @rotr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
entry:
- %0 = sub i32 32, %y
- %1 = shl i32 %x, %0
- %2 = lshr i32 %x, %y
- %3 = or i32 %1, %2
- store i32 %3, i32 addrspace(1)* %in
+ %tmp0 = sub i32 32, %y
+ %tmp1 = shl i32 %x, %tmp0
+ %tmp2 = lshr i32 %x, %y
+ %tmp3 = or i32 %tmp1, %tmp2
+ store i32 %tmp3, i32 addrspace(1)* %in
ret void
}
-; R600-CHECK-LABEL: @rotl:
-; R600-CHECK: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
-; R600-CHECK-NEXT: 32
-; R600-CHECK: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
+; FUNC-LABEL: @rotr_v2i32:
+; R600: BIT_ALIGN_INT
+; R600: BIT_ALIGN_INT
+; SI: V_ALIGNBIT_B32
+; SI: V_ALIGNBIT_B32
+define void @rotr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
+entry:
+ %tmp0 = sub <2 x i32> <i32 32, i32 32>, %y
+ %tmp1 = shl <2 x i32> %x, %tmp0
+ %tmp2 = lshr <2 x i32> %x, %y
+ %tmp3 = or <2 x i32> %tmp1, %tmp2
+ store <2 x i32> %tmp3, <2 x i32> addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @rotr_v4i32:
+; R600: BIT_ALIGN_INT
+; R600: BIT_ALIGN_INT
+; R600: BIT_ALIGN_INT
+; R600: BIT_ALIGN_INT
-; SI-CHECK-LABEL: @rotl:
-; SI-CHECK: S_SUB_I32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
-; SI-CHECK: V_MOV_B32_e32 [[VDST:v[0-9]+]], [[SDST]]
-; SI-CHECK: V_ALIGNBIT_B32 {{v[0-9]+, [s][0-9]+, v[0-9]+}}, [[VDST]]
-define void @rotl(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+; SI: V_ALIGNBIT_B32
+; SI: V_ALIGNBIT_B32
+; SI: V_ALIGNBIT_B32
+; SI: V_ALIGNBIT_B32
+define void @rotr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
entry:
- %0 = shl i32 %x, %y
- %1 = sub i32 32, %y
- %2 = lshr i32 %x, %1
- %3 = or i32 %0, %2
- store i32 %3, i32 addrspace(1)* %in
+ %tmp0 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
+ %tmp1 = shl <4 x i32> %x, %tmp0
+ %tmp2 = lshr <4 x i32> %x, %y
+ %tmp3 = or <4 x i32> %tmp1, %tmp2
+ store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %in
ret void
}
diff --git a/test/CodeGen/R600/rsq.ll b/test/CodeGen/R600/rsq.ll
new file mode 100644
index 000000000000..3069f62724b7
--- /dev/null
+++ b/test/CodeGen/R600/rsq.ll
@@ -0,0 +1,28 @@
+; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI %s
+; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI %s
+
+declare float @llvm.sqrt.f32(float) nounwind readnone
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+; SI-LABEL: @rsq_f32
+; SI: V_RSQ_F32_e32
+; SI: S_ENDPGM
+define void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %val = load float addrspace(1)* %in, align 4
+ %sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv float 1.0, %sqrt
+ store float %div, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @rsq_f64
+; SI-UNSAFE: V_RSQ_F64_e32
+; SI-SAFE: V_SQRT_F64_e32
+; SI: S_ENDPGM
+define void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+ %val = load double addrspace(1)* %in, align 4
+ %sqrt = call double @llvm.sqrt.f64(double %val) nounwind readnone
+ %div = fdiv double 1.0, %sqrt
+ store double %div, double addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/saddo.ll b/test/CodeGen/R600/saddo.ll
new file mode 100644
index 000000000000..c80480e85512
--- /dev/null
+++ b/test/CodeGen/R600/saddo.ll
@@ -0,0 +1,62 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+
+declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+
+; FUNC-LABEL: @saddo_i64_zext
+define void @saddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %sadd, 0
+ %carry = extractvalue { i64, i1 } %sadd, 1
+ %ext = zext i1 %carry to i64
+ %add2 = add i64 %val, %ext
+ store i64 %add2, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_saddo_i32
+define void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+ %sadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %sadd, 0
+ %carry = extractvalue { i32, i1 } %sadd, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_saddo_i32
+define void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %sadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %sadd, 0
+ %carry = extractvalue { i32, i1 } %sadd, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @s_saddo_i64
+define void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+ %sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %sadd, 0
+ %carry = extractvalue { i64, i1 } %sadd, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_saddo_i64
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
+define void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %a = load i64 addrspace(1)* %aptr, align 4
+ %b = load i64 addrspace(1)* %bptr, align 4
+ %sadd = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %sadd, 0
+ %carry = extractvalue { i64, i1 } %sadd, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
diff --git a/test/CodeGen/R600/salu-to-valu.ll b/test/CodeGen/R600/salu-to-valu.ll
new file mode 100644
index 000000000000..e7719b6feb3b
--- /dev/null
+++ b/test/CodeGen/R600/salu-to-valu.ll
@@ -0,0 +1,90 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+
+; In this test both the pointer and the offset operands to the
+; BUFFER_LOAD instructions end up being stored in vgprs. This
+; requires us to add the pointer and offset together, store the
+; result in the offset operand (vaddr), and then store 0 in an
+; sgpr register pair and use that for the pointer operand
+; (low 64-bits of srsrc).
+
+; CHECK-LABEL: @mubuf
+
+; Make sure we aren't using VGPRs for the source operand of S_MOV_B64
+; CHECK-NOT: S_MOV_B64 s[{{[0-9]+:[0-9]+}}], v
+
+; Make sure we aren't using VGPR's for the srsrc operand of BUFFER_LOAD_*
+; instructions
+; CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}]
+; CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}]
+define void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+entry:
+ %0 = call i32 @llvm.r600.read.tidig.x() #1
+ %1 = call i32 @llvm.r600.read.tidig.y() #1
+ %2 = sext i32 %0 to i64
+ %3 = sext i32 %1 to i64
+ br label %loop
+
+loop:
+ %4 = phi i64 [0, %entry], [%5, %loop]
+ %5 = add i64 %2, %4
+ %6 = getelementptr i8 addrspace(1)* %in, i64 %5
+ %7 = load i8 addrspace(1)* %6, align 1
+ %8 = or i64 %5, 1
+ %9 = getelementptr i8 addrspace(1)* %in, i64 %8
+ %10 = load i8 addrspace(1)* %9, align 1
+ %11 = add i8 %7, %10
+ %12 = sext i8 %11 to i32
+ store i32 %12, i32 addrspace(1)* %out
+ %13 = icmp slt i64 %5, 10
+ br i1 %13, label %loop, label %done
+
+done:
+ ret void
+}
+
+declare i32 @llvm.r600.read.tidig.x() #1
+declare i32 @llvm.r600.read.tidig.y() #1
+
+attributes #1 = { nounwind readnone }
+
+; Test moving an SMRD instruction to the VALU
+
+; CHECK-LABEL: @smrd_valu
+; CHECK: BUFFER_LOAD_DWORD [[OUT:v[0-9]+]]
+; CHECK: BUFFER_STORE_DWORD [[OUT]]
+
+define void @smrd_valu(i32 addrspace(2)* addrspace(1)* %in, i32 %a, i32 addrspace(1)* %out) {
+entry:
+ %0 = icmp ne i32 %a, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = load i32 addrspace(2)* addrspace(1)* %in
+ br label %endif
+
+else:
+ %2 = getelementptr i32 addrspace(2)* addrspace(1)* %in
+ %3 = load i32 addrspace(2)* addrspace(1)* %2
+ br label %endif
+
+endif:
+ %4 = phi i32 addrspace(2)* [%1, %if], [%3, %else]
+ %5 = getelementptr i32 addrspace(2)* %4, i32 3000
+ %6 = load i32 addrspace(2)* %5
+ store i32 %6, i32 addrspace(1)* %out
+ ret void
+}
+
+; Test moving ann SMRD with an immediate offset to the VALU
+
+; CHECK-LABEL: @smrd_valu2
+; CHECK: BUFFER_LOAD_DWORD
+define void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in) {
+entry:
+ %0 = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %1 = add i32 %0, 4
+ %2 = getelementptr [8 x i32] addrspace(2)* %in, i32 %0, i32 4
+ %3 = load i32 addrspace(2)* %2
+ store i32 %3, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/scalar_to_vector.ll b/test/CodeGen/R600/scalar_to_vector.ll
new file mode 100644
index 000000000000..bcccb065818e
--- /dev/null
+++ b/test/CodeGen/R600/scalar_to_vector.ll
@@ -0,0 +1,80 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+
+; FUNC-LABEL: @scalar_to_vector_v2i32
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_LSHRREV_B32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: S_ENDPGM
+define void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %tmp1 = load i32 addrspace(1)* %in, align 4
+ %bc = bitcast i32 %tmp1 to <2 x i16>
+ %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i16> %tmp2, <4 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @scalar_to_vector_v2f32
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_LSHRREV_B32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: BUFFER_STORE_SHORT [[RESULT]]
+; SI: S_ENDPGM
+define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %tmp1 = load float addrspace(1)* %in, align 4
+ %bc = bitcast float %tmp1 to <2 x i16>
+ %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i16> %tmp2, <4 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; Getting a SCALAR_TO_VECTOR seems to be tricky. These cases managed
+; to produce one, but for some reason never made it to selection.
+
+
+; define void @scalar_to_vector_test2(<8 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+; %tmp1 = load i32 addrspace(1)* %in, align 4
+; %bc = bitcast i32 %tmp1 to <4 x i8>
+
+; %tmp2 = shufflevector <4 x i8> %bc, <4 x i8> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+; store <8 x i8> %tmp2, <8 x i8> addrspace(1)* %out, align 4
+; ret void
+; }
+
+; define void @scalar_to_vector_test3(<4 x i32> addrspace(1)* %out) nounwind {
+; %newvec0 = insertelement <2 x i64> undef, i64 12345, i32 0
+; %newvec1 = insertelement <2 x i64> %newvec0, i64 undef, i32 1
+; %bc = bitcast <2 x i64> %newvec1 to <4 x i32>
+; %add = add <4 x i32> %bc, <i32 1, i32 2, i32 3, i32 4>
+; store <4 x i32> %add, <4 x i32> addrspace(1)* %out, align 16
+; ret void
+; }
+
+; define void @scalar_to_vector_test4(<8 x i16> addrspace(1)* %out) nounwind {
+; %newvec0 = insertelement <4 x i32> undef, i32 12345, i32 0
+; %bc = bitcast <4 x i32> %newvec0 to <8 x i16>
+; %add = add <8 x i16> %bc, <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>
+; store <8 x i16> %add, <8 x i16> addrspace(1)* %out, align 16
+; ret void
+; }
+
+; define void @scalar_to_vector_test5(<4 x i16> addrspace(1)* %out) nounwind {
+; %newvec0 = insertelement <2 x i32> undef, i32 12345, i32 0
+; %bc = bitcast <2 x i32> %newvec0 to <4 x i16>
+; %add = add <4 x i16> %bc, <i16 1, i16 2, i16 3, i16 4>
+; store <4 x i16> %add, <4 x i16> addrspace(1)* %out, align 16
+; ret void
+; }
+
+; define void @scalar_to_vector_test6(<4 x i16> addrspace(1)* %out) nounwind {
+; %newvec0 = insertelement <2 x i32> undef, i32 12345, i32 0
+; %bc = bitcast <2 x i32> %newvec0 to <4 x i16>
+; %add = add <4 x i16> %bc, <i16 1, i16 2, i16 3, i16 4>
+; store <4 x i16> %add, <4 x i16> addrspace(1)* %out, align 16
+; ret void
+; }
diff --git a/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll b/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
new file mode 100644
index 000000000000..3d2142d53ecf
--- /dev/null
+++ b/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
@@ -0,0 +1,162 @@
+; XFAIL: *
+; REQUIRES: asserts
+; RUN: llc -O0 -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck %s -check-prefix=SI
+
+declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
+
+
+; SI-LABEL: @main(
+define void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
+main_body:
+ %0 = extractelement <4 x float> %reg1, i32 0
+ %1 = extractelement <4 x float> %reg1, i32 2
+ %2 = fcmp ult float %0, 0.000000e+00
+ %3 = select i1 %2, float 1.000000e+00, float 0.000000e+00
+ %4 = fsub float -0.000000e+00, %3
+ %5 = fptosi float %4 to i32
+ %6 = bitcast i32 %5 to float
+ %7 = bitcast float %6 to i32
+ %8 = icmp ne i32 %7, 0
+ br i1 %8, label %LOOP, label %ENDIF
+
+Flow1: ; preds = %ENDIF19, %ENDIF16
+ %9 = phi float [ %115, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %10 = phi float [ %114, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %11 = phi float [ %113, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %12 = phi float [ %112, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %13 = phi float [ %111, %ENDIF19 ], [ undef, %ENDIF16 ]
+ %14 = phi i1 [ false, %ENDIF19 ], [ true, %ENDIF16 ]
+ br label %Flow
+
+Flow2: ; preds = %Flow
+ br label %ENDIF
+
+ENDIF: ; preds = %main_body, %Flow2
+ %temp.0 = phi float [ 0.000000e+00, %main_body ], [ %104, %Flow2 ]
+ %temp1.0 = phi float [ 1.000000e+00, %main_body ], [ %103, %Flow2 ]
+ %temp2.0 = phi float [ 0.000000e+00, %main_body ], [ %102, %Flow2 ]
+ %temp3.0 = phi float [ 0.000000e+00, %main_body ], [ %101, %Flow2 ]
+ %15 = extractelement <4 x float> %reg1, i32 1
+ %16 = extractelement <4 x float> %reg1, i32 3
+ %17 = load <4 x float> addrspace(9)* null
+ %18 = extractelement <4 x float> %17, i32 0
+ %19 = fmul float %18, %0
+ %20 = load <4 x float> addrspace(9)* null
+ %21 = extractelement <4 x float> %20, i32 1
+ %22 = fmul float %21, %0
+ %23 = load <4 x float> addrspace(9)* null
+ %24 = extractelement <4 x float> %23, i32 2
+ %25 = fmul float %24, %0
+ %26 = load <4 x float> addrspace(9)* null
+ %27 = extractelement <4 x float> %26, i32 3
+ %28 = fmul float %27, %0
+ %29 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+ %30 = extractelement <4 x float> %29, i32 0
+ %31 = fmul float %30, %15
+ %32 = fadd float %31, %19
+ %33 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+ %34 = extractelement <4 x float> %33, i32 1
+ %35 = fmul float %34, %15
+ %36 = fadd float %35, %22
+ %37 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+ %38 = extractelement <4 x float> %37, i32 2
+ %39 = fmul float %38, %15
+ %40 = fadd float %39, %25
+ %41 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1)
+ %42 = extractelement <4 x float> %41, i32 3
+ %43 = fmul float %42, %15
+ %44 = fadd float %43, %28
+ %45 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+ %46 = extractelement <4 x float> %45, i32 0
+ %47 = fmul float %46, %1
+ %48 = fadd float %47, %32
+ %49 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+ %50 = extractelement <4 x float> %49, i32 1
+ %51 = fmul float %50, %1
+ %52 = fadd float %51, %36
+ %53 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+ %54 = extractelement <4 x float> %53, i32 2
+ %55 = fmul float %54, %1
+ %56 = fadd float %55, %40
+ %57 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 2)
+ %58 = extractelement <4 x float> %57, i32 3
+ %59 = fmul float %58, %1
+ %60 = fadd float %59, %44
+ %61 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+ %62 = extractelement <4 x float> %61, i32 0
+ %63 = fmul float %62, %16
+ %64 = fadd float %63, %48
+ %65 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+ %66 = extractelement <4 x float> %65, i32 1
+ %67 = fmul float %66, %16
+ %68 = fadd float %67, %52
+ %69 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+ %70 = extractelement <4 x float> %69, i32 2
+ %71 = fmul float %70, %16
+ %72 = fadd float %71, %56
+ %73 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 3)
+ %74 = extractelement <4 x float> %73, i32 3
+ %75 = fmul float %74, %16
+ %76 = fadd float %75, %60
+ %77 = insertelement <4 x float> undef, float %64, i32 0
+ %78 = insertelement <4 x float> %77, float %68, i32 1
+ %79 = insertelement <4 x float> %78, float %72, i32 2
+ %80 = insertelement <4 x float> %79, float %76, i32 3
+ call void @llvm.AMDGPU.barrier.local()
+ %81 = insertelement <4 x float> undef, float %temp.0, i32 0
+ %82 = insertelement <4 x float> %81, float %temp1.0, i32 1
+ %83 = insertelement <4 x float> %82, float %temp2.0, i32 2
+ %84 = insertelement <4 x float> %83, float %temp3.0, i32 3
+ call void @llvm.AMDGPU.barrier.local()
+ ret void
+
+LOOP: ; preds = %main_body, %Flow
+ %temp.1 = phi float [ %109, %Flow ], [ 0.000000e+00, %main_body ]
+ %temp1.1 = phi float [ %108, %Flow ], [ 1.000000e+00, %main_body ]
+ %temp2.1 = phi float [ %107, %Flow ], [ 0.000000e+00, %main_body ]
+ %temp3.1 = phi float [ %106, %Flow ], [ 0.000000e+00, %main_body ]
+ %temp4.0 = phi float [ %105, %Flow ], [ -2.000000e+00, %main_body ]
+ %85 = fcmp uge float %temp4.0, %0
+ %86 = select i1 %85, float 1.000000e+00, float 0.000000e+00
+ %87 = fsub float -0.000000e+00, %86
+ %88 = fptosi float %87 to i32
+ %89 = bitcast i32 %88 to float
+ %90 = bitcast float %89 to i32
+ %91 = icmp ne i32 %90, 0
+ %92 = xor i1 %91, true
+ br i1 %92, label %ENDIF16, label %Flow
+
+ENDIF16: ; preds = %LOOP
+ %93 = fcmp une float %1, %temp4.0
+ %94 = select i1 %93, float 1.000000e+00, float 0.000000e+00
+ %95 = fsub float -0.000000e+00, %94
+ %96 = fptosi float %95 to i32
+ %97 = bitcast i32 %96 to float
+ %98 = bitcast float %97 to i32
+ %99 = icmp ne i32 %98, 0
+ %100 = xor i1 %99, true
+ br i1 %100, label %ENDIF19, label %Flow1
+
+Flow: ; preds = %Flow1, %LOOP
+ %101 = phi float [ %temp3.1, %Flow1 ], [ %temp3.1, %LOOP ]
+ %102 = phi float [ %temp2.1, %Flow1 ], [ %temp2.1, %LOOP ]
+ %103 = phi float [ %temp1.1, %Flow1 ], [ %temp1.1, %LOOP ]
+ %104 = phi float [ %temp.1, %Flow1 ], [ %temp.1, %LOOP ]
+ %105 = phi float [ %9, %Flow1 ], [ undef, %LOOP ]
+ %106 = phi float [ %10, %Flow1 ], [ undef, %LOOP ]
+ %107 = phi float [ %11, %Flow1 ], [ undef, %LOOP ]
+ %108 = phi float [ %12, %Flow1 ], [ undef, %LOOP ]
+ %109 = phi float [ %13, %Flow1 ], [ undef, %LOOP ]
+ %110 = phi i1 [ %14, %Flow1 ], [ true, %LOOP ]
+ br i1 %110, label %Flow2, label %LOOP
+
+ENDIF19: ; preds = %ENDIF16
+ %111 = fadd float %temp.1, 1.000000e+00
+ %112 = fadd float %temp1.1, 0.000000e+00
+ %113 = fadd float %temp2.1, 0.000000e+00
+ %114 = fadd float %temp3.1, 0.000000e+00
+ %115 = fadd float %temp4.0, 1.000000e+00
+ br label %Flow1
+}
+
+attributes #0 = { "ShaderType"="1" }
diff --git a/test/CodeGen/R600/sdiv.ll b/test/CodeGen/R600/sdiv.ll
index 3dd10c8a61c1..e922d5c18680 100644
--- a/test/CodeGen/R600/sdiv.ll
+++ b/test/CodeGen/R600/sdiv.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; The code generated by sdiv is long and complex and may frequently change.
; The goal of this test is to make sure the ISel doesn't fail.
@@ -9,9 +10,9 @@
; This was fixed by adding an additional pattern in R600Instructions.td to
; match this pattern with a CNDGE_INT.
-; CHECK: CF_END
-
-define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+; FUNC-LABEL: @sdiv_i32
+; EG: CF_END
+define void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in
%den = load i32 addrspace(1) * %den_ptr
@@ -19,3 +20,84 @@ define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
store i32 %result, i32 addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @sdiv_i32_4
+define void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %num = load i32 addrspace(1) * %in
+ %result = sdiv i32 %num, 4
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; Multiply by a weird constant to make sure setIntDivIsCheap is
+; working.
+
+; FUNC-LABEL: @slow_sdiv_i32_3435
+; SI: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]],
+; SI: V_MOV_B32_e32 [[MAGIC:v[0-9]+]], 0x98a1930b
+; SI: V_MUL_HI_I32 [[TMP:v[0-9]+]], [[VAL]], [[MAGIC]]
+; SI: V_ADD_I32
+; SI: V_LSHRREV_B32
+; SI: V_ASHRREV_I32
+; SI: V_ADD_I32
+; SI: BUFFER_STORE_DWORD
+; SI: S_ENDPGM
+define void @slow_sdiv_i32_3435(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %num = load i32 addrspace(1) * %in
+ %result = sdiv i32 %num, 3435
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %den_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %num = load <2 x i32> addrspace(1) * %in
+ %den = load <2 x i32> addrspace(1) * %den_ptr
+ %result = sdiv <2 x i32> %num, %den
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @sdiv_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %num = load <2 x i32> addrspace(1) * %in
+ %result = sdiv <2 x i32> %num, <i32 4, i32 4>
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %den_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %num = load <4 x i32> addrspace(1) * %in
+ %den = load <4 x i32> addrspace(1) * %den_ptr
+ %result = sdiv <4 x i32> %num, %den
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %num = load <4 x i32> addrspace(1) * %in
+ %result = sdiv <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; Tests for 64-bit divide bypass.
+; define void @test_get_quotient(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; %result = sdiv i64 %a, %b
+; store i64 %result, i64 addrspace(1)* %out, align 8
+; ret void
+; }
+
+; define void @test_get_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; %result = srem i64 %a, %b
+; store i64 %result, i64 addrspace(1)* %out, align 8
+; ret void
+; }
+
+; define void @test_get_quotient_and_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+; %resultdiv = sdiv i64 %a, %b
+; %resultrem = srem i64 %a, %b
+; %result = add i64 %resultdiv, %resultrem
+; store i64 %result, i64 addrspace(1)* %out, align 8
+; ret void
+; }
diff --git a/test/CodeGen/R600/select-i1.ll b/test/CodeGen/R600/select-i1.ll
new file mode 100644
index 000000000000..009dd7f68dea
--- /dev/null
+++ b/test/CodeGen/R600/select-i1.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FIXME: This should go in existing select.ll test, except the current testcase there is broken on SI
+
+; FUNC-LABEL: @select_i1
+; SI: V_CNDMASK_B32
+; SI-NOT: V_CNDMASK_B32
+define void @select_i1(i1 addrspace(1)* %out, i32 %cond, i1 %a, i1 %b) nounwind {
+ %cmp = icmp ugt i32 %cond, 5
+ %sel = select i1 %cmp, i1 %a, i1 %b
+ store i1 %sel, i1 addrspace(1)* %out, align 4
+ ret void
+}
+
diff --git a/test/CodeGen/R600/select-vectors.ll b/test/CodeGen/R600/select-vectors.ll
new file mode 100644
index 000000000000..94605fe08ad8
--- /dev/null
+++ b/test/CodeGen/R600/select-vectors.ll
@@ -0,0 +1,155 @@
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; Test expansion of scalar selects on vectors.
+; Evergreen not enabled since it seems to be having problems with doubles.
+
+
+; FUNC-LABEL: @select_v4i8
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) nounwind {
+ %cmp = icmp eq i8 %c, 0
+ %select = select i1 %cmp, <4 x i8> %a, <4 x i8> %b
+ store <4 x i8> %select, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @select_v4i16
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <4 x i16> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <4 x i16> %a, <4 x i16> %b
+ store <4 x i16> %select, <4 x i16> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @select_v2i32
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: BUFFER_STORE_DWORDX2
+define void @select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <2 x i32> %a, <2 x i32> %b
+ store <2 x i32> %select, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @select_v4i32
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: BUFFER_STORE_DWORDX4
+define void @select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <4 x i32> %a, <4 x i32> %b
+ store <4 x i32> %select, <4 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v8i32
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <8 x i32> %a, <8 x i32> %b
+ store <8 x i32> %select, <8 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v2f32
+; SI: BUFFER_STORE_DWORDX2
+define void @select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <2 x float> %a, <2 x float> %b
+ store <2 x float> %select, <2 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v4f32
+; SI: BUFFER_STORE_DWORDX4
+define void @select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <4 x float> %a, <4 x float> %b
+ store <4 x float> %select, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v8f32
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <8 x float> %a, <8 x float> %b
+ store <8 x float> %select, <8 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v2f64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <2 x double> %a, <2 x double> %b
+ store <2 x double> %select, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v4f64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <4 x double> %a, <4 x double> %b
+ store <4 x double> %select, <4 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @select_v8f64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+define void @select_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, <8 x double> %b, i32 %c) nounwind {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, <8 x double> %a, <8 x double> %b
+ store <8 x double> %select, <8 x double> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/select.ll b/test/CodeGen/R600/select.ll
index f9401424ac12..7d5156834b9d 100644
--- a/test/CodeGen/R600/select.ll
+++ b/test/CodeGen/R600/select.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
; Normally icmp + select is optimized to select_cc, when this happens the
; DAGLegalizer never sees the select and doesn't have a chance to leaglize it.
@@ -6,13 +7,13 @@
; In order to avoid the select_cc optimization, this test case calculates the
; condition for the select in a separate basic block.
-; CHECK-LABEL: @select
-; CHECK-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.X
-; CHECK-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.X
-; CHECK-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
-; CHECK-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
-; CHECK-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XYZW
-; CHECK-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XYZW
+; FUNC-LABEL: @select
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.X
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.X
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XYZW
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XYZW
define void @select (i32 addrspace(1)* %i32out, float addrspace(1)* %f32out,
<2 x i32> addrspace(1)* %v2i32out, <2 x float> addrspace(1)* %v2f32out,
<4 x i32> addrspace(1)* %v4i32out, <4 x float> addrspace(1)* %v4f32out,
diff --git a/test/CodeGen/R600/select64.ll b/test/CodeGen/R600/select64.ll
new file mode 100644
index 000000000000..dba25e3bd21e
--- /dev/null
+++ b/test/CodeGen/R600/select64.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+
+; CHECK-LABEL: @select0
+; i64 select should be split into two i32 selects, and we shouldn't need
+; to use a shfit to extract the hi dword of the input.
+; CHECK-NOT: S_LSHR_B64
+; CHECK: V_CNDMASK
+; CHECK: V_CNDMASK
+define void @select0(i64 addrspace(1)* %out, i32 %cond, i64 %in) {
+entry:
+ %0 = icmp ugt i32 %cond, 5
+ %1 = select i1 %0, i64 0, i64 %in
+ store i64 %1, i64 addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: @select_trunc_i64
+; CHECK: V_CNDMASK_B32
+; CHECK-NOT: V_CNDMASK_B32
+define void @select_trunc_i64(i32 addrspace(1)* %out, i32 %cond, i64 %in) nounwind {
+ %cmp = icmp ugt i32 %cond, 5
+ %sel = select i1 %cmp, i64 0, i64 %in
+ %trunc = trunc i64 %sel to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; CHECK-LABEL: @select_trunc_i64_2
+; CHECK: V_CNDMASK_B32
+; CHECK-NOT: V_CNDMASK_B32
+define void @select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 %a, i64 %b) nounwind {
+ %cmp = icmp ugt i32 %cond, 5
+ %sel = select i1 %cmp, i64 %a, i64 %b
+ %trunc = trunc i64 %sel to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; CHECK-LABEL: @v_select_trunc_i64_2
+; CHECK: V_CNDMASK_B32
+; CHECK-NOT: V_CNDMASK_B32
+define void @v_select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %cmp = icmp ugt i32 %cond, 5
+ %a = load i64 addrspace(1)* %aptr, align 8
+ %b = load i64 addrspace(1)* %bptr, align 8
+ %sel = select i1 %cmp, i64 %a, i64 %b
+ %trunc = trunc i64 %sel to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/selectcc-opt.ll b/test/CodeGen/R600/selectcc-opt.ll
index 834c03069522..bdb6867850ba 100644
--- a/test/CodeGen/R600/selectcc-opt.ll
+++ b/test/CodeGen/R600/selectcc-opt.ll
@@ -1,8 +1,10 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; CHECK: @test_a
-; CHECK-NOT: CND
-; CHECK: SET{{[NEQGTL]+}}_DX10
+
+; FUNC-LABEL: @test_a
+; EG-NOT: CND
+; EG: SET{{[NEQGTL]+}}_DX10
define void @test_a(i32 addrspace(1)* %out, float %in) {
entry:
@@ -28,10 +30,10 @@ ENDIF:
; Same as test_a, but the branch labels are swapped to produce the inverse cc
; for the icmp instruction
-; CHECK: @test_b
-; CHECK: SET{{[GTEQN]+}}_DX10
-; CHECK-NEXT: PRED_
-; CHECK-NEXT: ALU clause starting
+; EG-LABEL: @test_b
+; EG: SET{{[GTEQN]+}}_DX10
+; EG-NEXT: PRED_
+; EG-NEXT: ALU clause starting
define void @test_b(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp olt float %in, 0.0
@@ -54,8 +56,8 @@ ENDIF:
}
; Test a CND*_INT instruction with float true/false values
-; CHECK: @test_c
-; CHECK: CND{{[GTE]+}}_INT
+; EG-LABEL: @test_c
+; EG: CND{{[GTE]+}}_INT
define void @test_c(float addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
@@ -63,3 +65,15 @@ entry:
store float %1, float addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @selectcc_bool
+; SI: V_CMP_NE_I32
+; SI-NEXT: V_CNDMASK_B32_e64
+; SI-NOT: CMP
+; SI-NOT: CNDMASK
+define void @selectcc_bool(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = select i1 %icmp0, i32 -1, i32 0
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/selectcc.ll b/test/CodeGen/R600/selectcc.ll
new file mode 100644
index 000000000000..a8f57cf1b572
--- /dev/null
+++ b/test/CodeGen/R600/selectcc.ll
@@ -0,0 +1,19 @@
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @selectcc_i64
+; EG: XOR_INT
+; EG: XOR_INT
+; EG: OR_INT
+; EG: CNDE_INT
+; EG: CNDE_INT
+; SI: V_CMP_EQ_I64
+; SI: V_CNDMASK
+; SI: V_CNDMASK
+define void @selectcc_i64(i64 addrspace(1) * %out, i64 %lhs, i64 %rhs, i64 %true, i64 %false) {
+entry:
+ %0 = icmp eq i64 %lhs, %rhs
+ %1 = select i1 %0, i64 %true, i64 %false
+ store i64 %1, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/setcc-equivalent.ll b/test/CodeGen/R600/setcc-equivalent.ll
new file mode 100644
index 000000000000..f796748fcefe
--- /dev/null
+++ b/test/CodeGen/R600/setcc-equivalent.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
+; XFAIL: *
+
+; EG-LABEL: @and_setcc_setcc_i32
+; EG: AND_INT
+; EG-NEXT: SETE_INT
+define void @and_setcc_setcc_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %cmp1 = icmp eq i32 %a, -1
+ %cmp2 = icmp eq i32 %b, -1
+ %and = and i1 %cmp1, %cmp2
+ %ext = sext i1 %and to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; EG-LABEL: @and_setcc_setcc_v4i32
+; EG: AND_INT
+; EG: AND_INT
+; EG: SETE_INT
+; EG: AND_INT
+; EG: SETE_INT
+; EG: AND_INT
+; EG: SETE_INT
+define void @and_setcc_setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
+ %cmp1 = icmp eq <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %cmp2 = icmp eq <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %and = and <4 x i1> %cmp1, %cmp2
+ %ext = sext <4 x i1> %and to <4 x i32>
+ store <4 x i32> %ext, <4 x i32> addrspace(1)* %out, align 4
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/R600/setcc-opt.ll b/test/CodeGen/R600/setcc-opt.ll
new file mode 100644
index 000000000000..8e831e409191
--- /dev/null
+++ b/test/CodeGen/R600/setcc-opt.ll
@@ -0,0 +1,15 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; SI-LABEL: @sext_bool_icmp_ne
+; SI: V_CMP_NE_I32
+; SI-NEXT: V_CNDMASK_B32
+; SI-NOT: V_CMP_NE_I32
+; SI-NOT: V_CNDMASK_B32
+; SI: S_ENDPGM
+define void @sext_bool_icmp_ne(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = sext i1 %icmp0 to i32
+ %icmp1 = icmp ne i32 %ext, 0
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/setcc.ll b/test/CodeGen/R600/setcc.ll
index 8d34c4ad4fe5..5bd95b79c0f0 100644
--- a/test/CodeGen/R600/setcc.ll
+++ b/test/CodeGen/R600/setcc.ll
@@ -1,5 +1,5 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+;RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
; FUNC-LABEL: @setcc_v2i32
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[3].X, KC0[3].Z
@@ -96,7 +96,9 @@ entry:
; R600-DAG: SETNE_INT
; SI: V_CMP_O_F32
; SI: V_CMP_NEQ_F32
-; SI: S_AND_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_AND_B32_e32
define void @f32_one(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp one float %a, %b
@@ -128,7 +130,9 @@ entry:
; R600-DAG: SETNE_INT
; SI: V_CMP_U_F32
; SI: V_CMP_EQ_F32
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f32_ueq(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp ueq float %a, %b
@@ -142,7 +146,9 @@ entry:
; R600: SETE_DX10
; SI: V_CMP_U_F32
; SI: V_CMP_GT_F32
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f32_ugt(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp ugt float %a, %b
@@ -156,7 +162,9 @@ entry:
; R600: SETE_DX10
; SI: V_CMP_U_F32
; SI: V_CMP_GE_F32
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f32_uge(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp uge float %a, %b
@@ -170,7 +178,9 @@ entry:
; R600: SETE_DX10
; SI: V_CMP_U_F32
; SI: V_CMP_LT_F32
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f32_ult(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp ult float %a, %b
@@ -184,7 +194,9 @@ entry:
; R600: SETE_DX10
; SI: V_CMP_U_F32
; SI: V_CMP_LE_F32
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f32_ule(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp ule float %a, %b
diff --git a/test/CodeGen/R600/setcc64.ll b/test/CodeGen/R600/setcc64.ll
index 9202fc01f555..54a33b30940a 100644
--- a/test/CodeGen/R600/setcc64.ll
+++ b/test/CodeGen/R600/setcc64.ll
@@ -1,4 +1,4 @@
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+;RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
; XXX: Merge this into setcc, once R600 supports 64-bit operations
@@ -59,7 +59,9 @@ entry:
; FUNC-LABEL: @f64_one
; SI: V_CMP_O_F64
; SI: V_CMP_NEQ_F64
-; SI: S_AND_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_AND_B32_e32
define void @f64_one(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp one double %a, %b
@@ -81,7 +83,9 @@ entry:
; FUNC-LABEL: @f64_ueq
; SI: V_CMP_U_F64
; SI: V_CMP_EQ_F64
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f64_ueq(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp ueq double %a, %b
@@ -93,7 +97,9 @@ entry:
; FUNC-LABEL: @f64_ugt
; SI: V_CMP_U_F64
; SI: V_CMP_GT_F64
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f64_ugt(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp ugt double %a, %b
@@ -105,7 +111,9 @@ entry:
; FUNC-LABEL: @f64_uge
; SI: V_CMP_U_F64
; SI: V_CMP_GE_F64
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f64_uge(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp uge double %a, %b
@@ -117,7 +125,9 @@ entry:
; FUNC-LABEL: @f64_ult
; SI: V_CMP_U_F64
; SI: V_CMP_LT_F64
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f64_ult(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp ult double %a, %b
@@ -129,7 +139,9 @@ entry:
; FUNC-LABEL: @f64_ule
; SI: V_CMP_U_F64
; SI: V_CMP_LE_F64
-; SI: S_OR_B64
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: V_OR_B32_e32
define void @f64_ule(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp ule double %a, %b
diff --git a/test/CodeGen/R600/seto.ll b/test/CodeGen/R600/seto.ll
index 8633a4b804af..cc942c10a91e 100644
--- a/test/CodeGen/R600/seto.ll
+++ b/test/CodeGen/R600/seto.ll
@@ -1,6 +1,7 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK: V_CMP_O_F32_e64 s[0:1], {{[sv][0-9]+, [sv][0-9]+}}, 0, 0, 0, 0
+;CHECK-LABEL: @main
+;CHECK: V_CMP_O_F32_e32 vcc, {{[sv][0-9]+, v[0-9]+}}
define void @main(float %p) {
main_body:
diff --git a/test/CodeGen/R600/setuo.ll b/test/CodeGen/R600/setuo.ll
index c77a37e19041..33007fc754b8 100644
--- a/test/CodeGen/R600/setuo.ll
+++ b/test/CodeGen/R600/setuo.ll
@@ -1,6 +1,7 @@
;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-;CHECK: V_CMP_U_F32_e64 s[0:1], {{[sv][0-9]+, [sv][0-9]+}}, 0, 0, 0, 0
+;CHECK-LABEL: @main
+;CHECK: V_CMP_U_F32_e32 vcc, {{[sv][0-9]+, v[0-9]+}}
define void @main(float %p) {
main_body:
diff --git a/test/CodeGen/R600/sext-in-reg.ll b/test/CodeGen/R600/sext-in-reg.ll
new file mode 100644
index 000000000000..1b02e4bf8015
--- /dev/null
+++ b/test/CodeGen/R600/sext-in-reg.ll
@@ -0,0 +1,524 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+declare i32 @llvm.AMDGPU.imax(i32, i32) nounwind readnone
+
+
+; FUNC-LABEL: @sext_in_reg_i1_i32
+; SI: S_LOAD_DWORD [[ARG:s[0-9]+]],
+; SI: S_BFE_I32 [[SEXTRACT:s[0-9]+]], [[ARG]], 0x10000
+; SI: V_MOV_B32_e32 [[EXTRACT:v[0-9]+]], [[SEXTRACT]]
+; SI: BUFFER_STORE_DWORD [[EXTRACT]],
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
+; EG: BFE_INT [[RES]], {{.*}}, 0.0, 1
+; EG-NEXT: LSHR * [[ADDR]]
+define void @sext_in_reg_i1_i32(i32 addrspace(1)* %out, i32 %in) {
+ %shl = shl i32 %in, 31
+ %sext = ashr i32 %shl, 31
+ store i32 %sext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i8_to_i32
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: S_SEXT_I32_I8 [[EXTRACT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
+; SI: BUFFER_STORE_DWORD [[VEXTRACT]],
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
+; EG: ADD_INT
+; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
+; EG-NEXT: LSHR * [[ADDR]]
+define void @sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %c = add i32 %a, %b ; add to prevent folding into extload
+ %shl = shl i32 %c, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i16_to_i32
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: S_SEXT_I32_I16 [[EXTRACT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
+; SI: BUFFER_STORE_DWORD [[VEXTRACT]],
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
+; EG: ADD_INT
+; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
+; EG-NEXT: LSHR * [[ADDR]]
+define void @sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %c = add i32 %a, %b ; add to prevent folding into extload
+ %shl = shl i32 %c, 16
+ %ashr = ashr i32 %shl, 16
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i8_to_v1i32
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: S_SEXT_I32_I8 [[EXTRACT:s[0-9]+]], [[VAL]]
+; SI: V_MOV_B32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
+; SI: BUFFER_STORE_DWORD [[VEXTRACT]],
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
+; EG: ADD_INT
+; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
+; EG-NEXT: LSHR * [[ADDR]]
+define void @sext_in_reg_i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
+ %c = add <1 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <1 x i32> %c, <i32 24>
+ %ashr = ashr <1 x i32> %shl, <i32 24>
+ store <1 x i32> %ashr, <1 x i32> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i1_to_i64
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: S_BFE_I32 s{{[0-9]+}}, s{{[0-9]+}}, 0x10000
+; SI: S_MOV_B32 {{s[0-9]+}}, -1
+; SI: BUFFER_STORE_DWORDX2
+define void @sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %c = add i64 %a, %b
+ %shl = shl i64 %c, 63
+ %ashr = ashr i64 %shl, 63
+ store i64 %ashr, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i8_to_i64
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: S_SEXT_I32_I8 [[EXTRACT:s[0-9]+]], [[VAL]]
+; SI: S_MOV_B32 {{s[0-9]+}}, -1
+; SI: BUFFER_STORE_DWORDX2
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES_LO:T[0-9]+\.[XYZW]]], [[ADDR_LO:T[0-9]+.[XYZW]]]
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES_HI:T[0-9]+\.[XYZW]]], [[ADDR_HI:T[0-9]+.[XYZW]]]
+; EG: ADD_INT
+; EG-NEXT: BFE_INT {{\*?}} [[RES_LO]], {{.*}}, 0.0, literal
+; EG: ASHR [[RES_HI]]
+; EG-NOT: BFE_INT
+; EG: LSHR
+; EG: LSHR
+;; TODO Check address computation, using | with variables in {{}} does not work,
+;; also the _LO/_HI order might be different
+define void @sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %c = add i64 %a, %b
+ %shl = shl i64 %c, 56
+ %ashr = ashr i64 %shl, 56
+ store i64 %ashr, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i16_to_i64
+; SI: S_ADD_I32 [[VAL:s[0-9]+]],
+; SI: S_SEXT_I32_I16 [[EXTRACT:s[0-9]+]], [[VAL]]
+; SI: S_MOV_B32 {{s[0-9]+}}, -1
+; SI: BUFFER_STORE_DWORDX2
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES_LO:T[0-9]+\.[XYZW]]], [[ADDR_LO:T[0-9]+.[XYZW]]]
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES_HI:T[0-9]+\.[XYZW]]], [[ADDR_HI:T[0-9]+.[XYZW]]]
+; EG: ADD_INT
+; EG-NEXT: BFE_INT {{\*?}} [[RES_LO]], {{.*}}, 0.0, literal
+; EG: ASHR [[RES_HI]]
+; EG-NOT: BFE_INT
+; EG: LSHR
+; EG: LSHR
+;; TODO Check address computation, using | with variables in {{}} does not work,
+;; also the _LO/_HI order might be different
+define void @sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %c = add i64 %a, %b
+ %shl = shl i64 %c, 48
+ %ashr = ashr i64 %shl, 48
+ store i64 %ashr, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i32_to_i64
+; SI: S_LOAD_DWORD
+; SI: S_LOAD_DWORD
+; SI: S_ADD_I32 [[ADD:s[0-9]+]],
+; SI: S_ASHR_I32 s{{[0-9]+}}, [[ADD]], 31
+; SI: BUFFER_STORE_DWORDX2
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES_LO:T[0-9]+\.[XYZW]]], [[ADDR_LO:T[0-9]+.[XYZW]]]
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES_HI:T[0-9]+\.[XYZW]]], [[ADDR_HI:T[0-9]+.[XYZW]]]
+; EG-NOT: BFE_INT
+; EG: ADD_INT {{\*?}} [[RES_LO]]
+; EG: ASHR [[RES_HI]]
+; EG: ADD_INT
+; EG: LSHR
+; EG: LSHR
+;; TODO Check address computation, using | with variables in {{}} does not work,
+;; also the _LO/_HI order might be different
+define void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %c = add i64 %a, %b
+ %shl = shl i64 %c, 32
+ %ashr = ashr i64 %shl, 32
+ store i64 %ashr, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; This is broken on Evergreen for some reason related to the <1 x i64> kernel arguments.
+; XFUNC-LABEL: @sext_in_reg_i8_to_v1i64
+; XSI: S_BFE_I32 [[EXTRACT:s[0-9]+]], {{s[0-9]+}}, 524288
+; XSI: S_ASHR_I32 {{v[0-9]+}}, [[EXTRACT]], 31
+; XSI: BUFFER_STORE_DWORD
+; XEG: BFE_INT
+; XEG: ASHR
+; define void @sext_in_reg_i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a, <1 x i64> %b) nounwind {
+; %c = add <1 x i64> %a, %b
+; %shl = shl <1 x i64> %c, <i64 56>
+; %ashr = ashr <1 x i64> %shl, <i64 56>
+; store <1 x i64> %ashr, <1 x i64> addrspace(1)* %out, align 8
+; ret void
+; }
+
+; FUNC-LABEL: @sext_in_reg_i1_in_i32_other_amount
+; SI-NOT: BFE
+; SI: S_LSHL_B32 [[REG:s[0-9]+]], {{s[0-9]+}}, 6
+; SI: S_ASHR_I32 {{s[0-9]+}}, [[REG]], 7
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
+; EG-NOT: BFE
+; EG: ADD_INT
+; EG: LSHL
+; EG: ASHR [[RES]]
+; EG: LSHR {{\*?}} [[ADDR]]
+define void @sext_in_reg_i1_in_i32_other_amount(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %c = add i32 %a, %b
+ %x = shl i32 %c, 6
+ %y = ashr i32 %x, 7
+ store i32 %y, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v2i1_in_v2i32_other_amount
+; SI: S_LSHL_B32 [[REG0:s[0-9]+]], {{s[0-9]}}, 6
+; SI: S_ASHR_I32 {{s[0-9]+}}, [[REG0]], 7
+; SI: S_LSHL_B32 [[REG1:s[0-9]+]], {{s[0-9]}}, 6
+; SI: S_ASHR_I32 {{s[0-9]+}}, [[REG1]], 7
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
+; EG-NOT: BFE
+; EG: ADD_INT
+; EG: LSHL
+; EG: ASHR [[RES]]
+; EG: LSHL
+; EG: ASHR [[RES]]
+; EG: LSHR {{\*?}} [[ADDR]]
+define void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %c = add <2 x i32> %a, %b
+ %x = shl <2 x i32> %c, <i32 6, i32 6>
+ %y = ashr <2 x i32> %x, <i32 7, i32 7>
+ store <2 x i32> %y, <2 x i32> addrspace(1)* %out, align 2
+ ret void
+}
+
+
+; FUNC-LABEL: @sext_in_reg_v2i1_to_v2i32
+; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
+; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
+; SI: BUFFER_STORE_DWORDX2
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: LSHR {{\*?}} [[ADDR]]
+define void @sext_in_reg_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %c = add <2 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i32> %c, <i32 31, i32 31>
+ %ashr = ashr <2 x i32> %shl, <i32 31, i32 31>
+ store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v4i1_to_v4i32
+; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
+; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
+; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
+; SI: S_BFE_I32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
+; SI: BUFFER_STORE_DWORDX4
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW][XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: LSHR {{\*?}} [[ADDR]]
+define void @sext_in_reg_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
+ %c = add <4 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31>
+ %ashr = ashr <4 x i32> %shl, <i32 31, i32 31, i32 31, i32 31>
+ store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v2i8_to_v2i32
+; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
+; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
+; SI: BUFFER_STORE_DWORDX2
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: LSHR {{\*?}} [[ADDR]]
+define void @sext_in_reg_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %c = add <2 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i32> %c, <i32 24, i32 24>
+ %ashr = ashr <2 x i32> %shl, <i32 24, i32 24>
+ store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v4i8_to_v4i32
+; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
+; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
+; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
+; SI: S_SEXT_I32_I8 {{s[0-9]+}}, {{s[0-9]+}}
+; SI: BUFFER_STORE_DWORDX4
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW][XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: LSHR {{\*?}} [[ADDR]]
+define void @sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) nounwind {
+ %c = add <4 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <4 x i32> %c, <i32 24, i32 24, i32 24, i32 24>
+ %ashr = ashr <4 x i32> %shl, <i32 24, i32 24, i32 24, i32 24>
+ store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_v2i16_to_v2i32
+; SI: S_SEXT_I32_I16 {{s[0-9]+}}, {{s[0-9]+}}
+; SI: S_SEXT_I32_I16 {{s[0-9]+}}, {{s[0-9]+}}
+; SI: BUFFER_STORE_DWORDX2
+
+; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
+; EG: BFE_INT [[RES]]
+; EG: BFE_INT [[RES]]
+; EG: LSHR {{\*?}} [[ADDR]]
+define void @sext_in_reg_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) nounwind {
+ %c = add <2 x i32> %a, %b ; add to prevent folding into extload
+ %shl = shl <2 x i32> %c, <i32 16, i32 16>
+ %ashr = ashr <2 x i32> %shl, <i32 16, i32 16>
+ store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @testcase
+define void @testcase(i8 addrspace(1)* %out, i8 %a) nounwind {
+ %and_a_1 = and i8 %a, 1
+ %cmp_eq = icmp eq i8 %and_a_1, 0
+ %cmp_slt = icmp slt i8 %a, 0
+ %sel0 = select i1 %cmp_slt, i8 0, i8 %a
+ %sel1 = select i1 %cmp_eq, i8 0, i8 %a
+ %xor = xor i8 %sel0, %sel1
+ store i8 %xor, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @testcase_3
+define void @testcase_3(i8 addrspace(1)* %out, i8 %a) nounwind {
+ %and_a_1 = and i8 %a, 1
+ %cmp_eq = icmp eq i8 %and_a_1, 0
+ %cmp_slt = icmp slt i8 %a, 0
+ %sel0 = select i1 %cmp_slt, i8 0, i8 %a
+ %sel1 = select i1 %cmp_eq, i8 0, i8 %a
+ %xor = xor i8 %sel0, %sel1
+ store i8 %xor, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @vgpr_sext_in_reg_v4i8_to_v4i32
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
+define void @vgpr_sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) nounwind {
+ %loada = load <4 x i32> addrspace(1)* %a, align 16
+ %loadb = load <4 x i32> addrspace(1)* %b, align 16
+ %c = add <4 x i32> %loada, %loadb ; add to prevent folding into extload
+ %shl = shl <4 x i32> %c, <i32 24, i32 24, i32 24, i32 24>
+ %ashr = ashr <4 x i32> %shl, <i32 24, i32 24, i32 24, i32 24>
+ store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @vgpr_sext_in_reg_v4i16_to_v4i32
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
+; SI: V_BFE_I32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
+define void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) nounwind {
+ %loada = load <4 x i32> addrspace(1)* %a, align 16
+ %loadb = load <4 x i32> addrspace(1)* %b, align 16
+ %c = add <4 x i32> %loada, %loadb ; add to prevent folding into extload
+ %shl = shl <4 x i32> %c, <i32 16, i32 16, i32 16, i32 16>
+ %ashr = ashr <4 x i32> %shl, <i32 16, i32 16, i32 16, i32 16>
+ store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FIXME: The BFE should really be eliminated. I think it should happen
+; when computeKnownBitsForTargetNode is implemented for imax.
+
+; FUNC-LABEL: @sext_in_reg_to_illegal_type
+; SI: BUFFER_LOAD_SBYTE
+; SI: V_MAX_I32
+; SI: V_BFE_I32
+; SI: BUFFER_STORE_SHORT
+define void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) nounwind {
+ %tmp5 = load i8 addrspace(1)* %src, align 1
+ %tmp2 = sext i8 %tmp5 to i32
+ %tmp3 = tail call i32 @llvm.AMDGPU.imax(i32 %tmp2, i32 0) nounwind readnone
+ %tmp4 = trunc i32 %tmp3 to i8
+ %tmp6 = sext i8 %tmp4 to i16
+ store i16 %tmp6, i16 addrspace(1)* %out, align 2
+ ret void
+}
+
+declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
+
+; FUNC-LABEL: @bfe_0_width
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_0_width(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
+ %load = load i32 addrspace(1)* %ptr, align 4
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 8, i32 0) nounwind readnone
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_8_bfe_8
+; SI: V_BFE_I32
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_8_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
+ %load = load i32 addrspace(1)* %ptr, align 4
+ %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 8) nounwind readnone
+ %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 8) nounwind readnone
+ store i32 %bfe1, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @bfe_8_bfe_16
+; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
+; SI: S_ENDPGM
+define void @bfe_8_bfe_16(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
+ %load = load i32 addrspace(1)* %ptr, align 4
+ %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 8) nounwind readnone
+ %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 16) nounwind readnone
+ store i32 %bfe1, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; This really should be folded into 1
+; FUNC-LABEL: @bfe_16_bfe_8
+; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 8
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @bfe_16_bfe_8(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) nounwind {
+ %load = load i32 addrspace(1)* %ptr, align 4
+ %bfe0 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 0, i32 16) nounwind readnone
+ %bfe1 = call i32 @llvm.AMDGPU.bfe.i32(i32 %bfe0, i32 0, i32 8) nounwind readnone
+ store i32 %bfe1, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; Make sure there isn't a redundant BFE
+; FUNC-LABEL: @sext_in_reg_i8_to_i32_bfe
+; SI: S_SEXT_I32_I8 s{{[0-9]+}}, s{{[0-9]+}}
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @sext_in_reg_i8_to_i32_bfe(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %c = add i32 %a, %b ; add to prevent folding into extload
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %c, i32 0, i32 8) nounwind readnone
+ %shl = shl i32 %bfe, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i8_to_i32_bfe_wrong
+define void @sext_in_reg_i8_to_i32_bfe_wrong(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %c = add i32 %a, %b ; add to prevent folding into extload
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %c, i32 8, i32 0) nounwind readnone
+ %shl = shl i32 %bfe, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sextload_i8_to_i32_bfe
+; SI: BUFFER_LOAD_SBYTE
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @sextload_i8_to_i32_bfe(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) nounwind {
+ %load = load i8 addrspace(1)* %ptr, align 1
+ %sext = sext i8 %load to i32
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %sext, i32 0, i32 8) nounwind readnone
+ %shl = shl i32 %bfe, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sextload_i8_to_i32_bfe_0:
+; SI-NOT: BFE
+; SI: S_ENDPGM
+define void @sextload_i8_to_i32_bfe_0(i32 addrspace(1)* %out, i8 addrspace(1)* %ptr) nounwind {
+ %load = load i8 addrspace(1)* %ptr, align 1
+ %sext = sext i8 %load to i32
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %sext, i32 8, i32 0) nounwind readnone
+ %shl = shl i32 %bfe, 24
+ %ashr = ashr i32 %shl, 24
+ store i32 %ashr, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i1_bfe_offset_0:
+; SI-NOT: SHR
+; SI-NOT: SHL
+; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
+; SI: S_ENDPGM
+define void @sext_in_reg_i1_bfe_offset_0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 31
+ %shr = ashr i32 %shl, 31
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 0, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i1_bfe_offset_1
+; SI: BUFFER_LOAD_DWORD
+; SI-NOT: SHL
+; SI-NOT: SHR
+; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 1
+; SI: S_ENDPGM
+define void @sext_in_reg_i1_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 30
+ %shr = ashr i32 %shl, 30
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 1, i32 1)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sext_in_reg_i2_bfe_offset_1:
+; SI: BUFFER_LOAD_DWORD
+; SI: V_LSHLREV_B32_e32 v{{[0-9]+}}, 30, v{{[0-9]+}}
+; SI: V_ASHRREV_I32_e32 v{{[0-9]+}}, 30, v{{[0-9]+}}
+; SI: V_BFE_I32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 2
+; SI: S_ENDPGM
+define void @sext_in_reg_i2_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %x = load i32 addrspace(1)* %in, align 4
+ %shl = shl i32 %x, 30
+ %shr = ashr i32 %shl, 30
+ %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shr, i32 1, i32 2)
+ store i32 %bfe, i32 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/sgpr-control-flow.ll b/test/CodeGen/R600/sgpr-control-flow.ll
new file mode 100644
index 000000000000..06ad24d959cf
--- /dev/null
+++ b/test/CodeGen/R600/sgpr-control-flow.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+;
+;
+; Most SALU instructions ignore control flow, so we need to make sure
+; they don't overwrite values from other blocks.
+
+; SI-NOT: S_ADD
+
+define void @sgpr_if_else(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+entry:
+ %0 = icmp eq i32 %a, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = add i32 %b, %c
+ br label %endif
+
+else:
+ %2 = add i32 %d, %e
+ br label %endif
+
+endif:
+ %3 = phi i32 [%1, %if], [%2, %else]
+ %4 = add i32 %3, %a
+ store i32 %4, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll b/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
index d74161bf6dc1..9d8a623125fe 100644
--- a/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
+++ b/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
; Copy VGPR -> SGPR used twice as an instruction operand, which is then
; used in an REG_SEQUENCE that also needs to be handled.
diff --git a/test/CodeGen/R600/sgpr-copy.ll b/test/CodeGen/R600/sgpr-copy.ll
index 5472c1bb1ca9..c7d5bf90644e 100644
--- a/test/CodeGen/R600/sgpr-copy.ll
+++ b/test/CodeGen/R600/sgpr-copy.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
; This test checks that no VGPR to SGPR copies are created by the register
; allocator.
@@ -70,7 +70,7 @@ main_body:
%55 = fadd float %54, %53
%56 = fmul float %45, %45
%57 = fadd float %55, %56
- %58 = call float @llvm.AMDGPU.rsq(float %57)
+ %58 = call float @llvm.AMDGPU.rsq.f32(float %57)
%59 = fmul float %43, %58
%60 = fmul float %44, %58
%61 = fmul float %45, %58
@@ -212,7 +212,7 @@ declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
declare <4 x float> @llvm.SI.sample.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32) #1
; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #3
+declare float @llvm.AMDGPU.rsq.f32(float) #3
; Function Attrs: readnone
declare float @llvm.AMDIL.exp.(float) #3
diff --git a/test/CodeGen/R600/shl.ll b/test/CodeGen/R600/shl.ll
index 4a6aab4a104a..43fab2a39dcc 100644
--- a/test/CodeGen/R600/shl.ll
+++ b/test/CodeGen/R600/shl.ll
@@ -39,5 +39,118 @@ define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
ret void
}
-; XXX: Add SI test for i64 shl once i64 stores and i64 function arguments are
-; supported.
+;EG-CHECK: @shl_i64
+;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+;EG-CHECK: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+;EG-CHECK: LSHR {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: LSHL {{\*? *}}[[HISMTMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], [[SHIFT]]
+;EG-CHECK-DAG: OR_INT {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], {{[[HISMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+;EG-CHECK-DAG: LSHL {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], [[OPLO]], {{PS|[[SHIFT]]}}
+;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
+
+;SI-CHECK: @shl_i64
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %a = load i64 addrspace(1) * %in
+ %b = load i64 addrspace(1) * %b_ptr
+ %result = shl i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK: @shl_v2i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK: @shl_v2i64
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %a = load <2 x i64> addrspace(1) * %in
+ %b = load <2 x i64> addrspace(1) * %b_ptr
+ %result = shl <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK: @shl_v4i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHC]]
+;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHD]]
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, 1
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHC]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHD]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHC]]
+;EG-CHECK-DAG: LSHL {{.*}}, [[SHD]]
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: LSHL
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK: @shl_v4i64
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHL_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %a = load <4 x i64> addrspace(1) * %in
+ %b = load <4 x i64> addrspace(1) * %b_ptr
+ %result = shl <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/si-annotate-cf-assertion.ll b/test/CodeGen/R600/si-annotate-cf-assertion.ll
index 9886fe9169bb..daa4667150bc 100644
--- a/test/CodeGen/R600/si-annotate-cf-assertion.ll
+++ b/test/CodeGen/R600/si-annotate-cf-assertion.ll
@@ -1,5 +1,6 @@
+; REQUIRES: asserts
; XFAIL: *
-; RUN: llc -march=r600 -mcpu=SI -asm-verbose=false < %s | FileCheck %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs-asm-verbose=false < %s | FileCheck %s
define void @test(i32 addrspace(1)* %g, i8 addrspace(3)* %l, i32 %x) nounwind {
diff --git a/test/CodeGen/R600/si-sgpr-spill.ll b/test/CodeGen/R600/si-sgpr-spill.ll
index 05c5e31f3fad..53a096513bbc 100644
--- a/test/CodeGen/R600/si-sgpr-spill.ll
+++ b/test/CodeGen/R600/si-sgpr-spill.ll
@@ -1,8 +1,5 @@
; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck %s
-; XXX: Enable when spilling is supported
-; XFAIL: *
-
; These tests check that the compiler won't crash when it needs to spill
; SGPRs.
@@ -206,7 +203,7 @@ main_body:
%198 = fadd float %197, %196
%199 = fmul float %97, %97
%200 = fadd float %198, %199
- %201 = call float @llvm.AMDGPU.rsq(float %200)
+ %201 = call float @llvm.AMDGPU.rsq.f32(float %200)
%202 = fmul float %95, %201
%203 = fmul float %96, %201
%204 = fmul float %202, %29
@@ -387,7 +384,7 @@ IF67: ; preds = %LOOP65
%355 = fadd float %354, %353
%356 = fmul float %352, %352
%357 = fadd float %355, %356
- %358 = call float @llvm.AMDGPU.rsq(float %357)
+ %358 = call float @llvm.AMDGPU.rsq.f32(float %357)
%359 = fmul float %350, %358
%360 = fmul float %351, %358
%361 = fmul float %352, %358
@@ -515,7 +512,7 @@ IF67: ; preds = %LOOP65
%483 = fadd float %482, %481
%484 = fmul float %109, %109
%485 = fadd float %483, %484
- %486 = call float @llvm.AMDGPU.rsq(float %485)
+ %486 = call float @llvm.AMDGPU.rsq.f32(float %485)
%487 = fmul float %107, %486
%488 = fmul float %108, %486
%489 = fmul float %109, %486
@@ -544,7 +541,7 @@ IF67: ; preds = %LOOP65
%512 = fadd float %511, %510
%513 = fmul float %97, %97
%514 = fadd float %512, %513
- %515 = call float @llvm.AMDGPU.rsq(float %514)
+ %515 = call float @llvm.AMDGPU.rsq.f32(float %514)
%516 = fmul float %95, %515
%517 = fmul float %96, %515
%518 = fmul float %97, %515
@@ -661,7 +658,7 @@ declare i32 @llvm.SI.tid() #2
declare float @ceil(float) #3
; Function Attrs: readnone
-declare float @llvm.AMDGPU.rsq(float) #2
+declare float @llvm.AMDGPU.rsq.f32(float) #2
; Function Attrs: nounwind readnone
declare <4 x float> @llvm.SI.sampled.v8i32(<8 x i32>, <32 x i8>, <16 x i8>, i32) #1
@@ -690,3 +687,880 @@ attributes #3 = { readonly }
attributes #4 = { nounwind readonly }
!0 = metadata !{metadata !"const", null, i32 1}
+
+; CHECK-LABEL: @main1
+; CHECK: S_ENDPGM
+define void @main1([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %21 = getelementptr [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
+ %22 = load <16 x i8> addrspace(2)* %21, !tbaa !0
+ %23 = call float @llvm.SI.load.const(<16 x i8> %22, i32 0)
+ %24 = call float @llvm.SI.load.const(<16 x i8> %22, i32 4)
+ %25 = call float @llvm.SI.load.const(<16 x i8> %22, i32 8)
+ %26 = call float @llvm.SI.load.const(<16 x i8> %22, i32 12)
+ %27 = call float @llvm.SI.load.const(<16 x i8> %22, i32 28)
+ %28 = call float @llvm.SI.load.const(<16 x i8> %22, i32 48)
+ %29 = call float @llvm.SI.load.const(<16 x i8> %22, i32 52)
+ %30 = call float @llvm.SI.load.const(<16 x i8> %22, i32 56)
+ %31 = call float @llvm.SI.load.const(<16 x i8> %22, i32 64)
+ %32 = call float @llvm.SI.load.const(<16 x i8> %22, i32 68)
+ %33 = call float @llvm.SI.load.const(<16 x i8> %22, i32 72)
+ %34 = call float @llvm.SI.load.const(<16 x i8> %22, i32 76)
+ %35 = call float @llvm.SI.load.const(<16 x i8> %22, i32 128)
+ %36 = call float @llvm.SI.load.const(<16 x i8> %22, i32 132)
+ %37 = call float @llvm.SI.load.const(<16 x i8> %22, i32 144)
+ %38 = call float @llvm.SI.load.const(<16 x i8> %22, i32 148)
+ %39 = call float @llvm.SI.load.const(<16 x i8> %22, i32 152)
+ %40 = call float @llvm.SI.load.const(<16 x i8> %22, i32 160)
+ %41 = call float @llvm.SI.load.const(<16 x i8> %22, i32 164)
+ %42 = call float @llvm.SI.load.const(<16 x i8> %22, i32 168)
+ %43 = call float @llvm.SI.load.const(<16 x i8> %22, i32 172)
+ %44 = call float @llvm.SI.load.const(<16 x i8> %22, i32 176)
+ %45 = call float @llvm.SI.load.const(<16 x i8> %22, i32 180)
+ %46 = call float @llvm.SI.load.const(<16 x i8> %22, i32 184)
+ %47 = call float @llvm.SI.load.const(<16 x i8> %22, i32 192)
+ %48 = call float @llvm.SI.load.const(<16 x i8> %22, i32 196)
+ %49 = call float @llvm.SI.load.const(<16 x i8> %22, i32 200)
+ %50 = call float @llvm.SI.load.const(<16 x i8> %22, i32 208)
+ %51 = call float @llvm.SI.load.const(<16 x i8> %22, i32 212)
+ %52 = call float @llvm.SI.load.const(<16 x i8> %22, i32 216)
+ %53 = call float @llvm.SI.load.const(<16 x i8> %22, i32 220)
+ %54 = call float @llvm.SI.load.const(<16 x i8> %22, i32 236)
+ %55 = call float @llvm.SI.load.const(<16 x i8> %22, i32 240)
+ %56 = call float @llvm.SI.load.const(<16 x i8> %22, i32 244)
+ %57 = call float @llvm.SI.load.const(<16 x i8> %22, i32 248)
+ %58 = call float @llvm.SI.load.const(<16 x i8> %22, i32 252)
+ %59 = call float @llvm.SI.load.const(<16 x i8> %22, i32 256)
+ %60 = call float @llvm.SI.load.const(<16 x i8> %22, i32 260)
+ %61 = call float @llvm.SI.load.const(<16 x i8> %22, i32 264)
+ %62 = call float @llvm.SI.load.const(<16 x i8> %22, i32 268)
+ %63 = call float @llvm.SI.load.const(<16 x i8> %22, i32 272)
+ %64 = call float @llvm.SI.load.const(<16 x i8> %22, i32 276)
+ %65 = call float @llvm.SI.load.const(<16 x i8> %22, i32 280)
+ %66 = call float @llvm.SI.load.const(<16 x i8> %22, i32 284)
+ %67 = call float @llvm.SI.load.const(<16 x i8> %22, i32 288)
+ %68 = call float @llvm.SI.load.const(<16 x i8> %22, i32 292)
+ %69 = call float @llvm.SI.load.const(<16 x i8> %22, i32 464)
+ %70 = call float @llvm.SI.load.const(<16 x i8> %22, i32 468)
+ %71 = call float @llvm.SI.load.const(<16 x i8> %22, i32 472)
+ %72 = call float @llvm.SI.load.const(<16 x i8> %22, i32 496)
+ %73 = call float @llvm.SI.load.const(<16 x i8> %22, i32 500)
+ %74 = call float @llvm.SI.load.const(<16 x i8> %22, i32 504)
+ %75 = call float @llvm.SI.load.const(<16 x i8> %22, i32 512)
+ %76 = call float @llvm.SI.load.const(<16 x i8> %22, i32 516)
+ %77 = call float @llvm.SI.load.const(<16 x i8> %22, i32 524)
+ %78 = call float @llvm.SI.load.const(<16 x i8> %22, i32 532)
+ %79 = call float @llvm.SI.load.const(<16 x i8> %22, i32 536)
+ %80 = call float @llvm.SI.load.const(<16 x i8> %22, i32 540)
+ %81 = call float @llvm.SI.load.const(<16 x i8> %22, i32 544)
+ %82 = call float @llvm.SI.load.const(<16 x i8> %22, i32 548)
+ %83 = call float @llvm.SI.load.const(<16 x i8> %22, i32 552)
+ %84 = call float @llvm.SI.load.const(<16 x i8> %22, i32 556)
+ %85 = call float @llvm.SI.load.const(<16 x i8> %22, i32 560)
+ %86 = call float @llvm.SI.load.const(<16 x i8> %22, i32 564)
+ %87 = call float @llvm.SI.load.const(<16 x i8> %22, i32 568)
+ %88 = call float @llvm.SI.load.const(<16 x i8> %22, i32 572)
+ %89 = call float @llvm.SI.load.const(<16 x i8> %22, i32 576)
+ %90 = call float @llvm.SI.load.const(<16 x i8> %22, i32 580)
+ %91 = call float @llvm.SI.load.const(<16 x i8> %22, i32 584)
+ %92 = call float @llvm.SI.load.const(<16 x i8> %22, i32 588)
+ %93 = call float @llvm.SI.load.const(<16 x i8> %22, i32 592)
+ %94 = call float @llvm.SI.load.const(<16 x i8> %22, i32 596)
+ %95 = call float @llvm.SI.load.const(<16 x i8> %22, i32 600)
+ %96 = call float @llvm.SI.load.const(<16 x i8> %22, i32 604)
+ %97 = call float @llvm.SI.load.const(<16 x i8> %22, i32 608)
+ %98 = call float @llvm.SI.load.const(<16 x i8> %22, i32 612)
+ %99 = call float @llvm.SI.load.const(<16 x i8> %22, i32 616)
+ %100 = call float @llvm.SI.load.const(<16 x i8> %22, i32 624)
+ %101 = call float @llvm.SI.load.const(<16 x i8> %22, i32 628)
+ %102 = call float @llvm.SI.load.const(<16 x i8> %22, i32 632)
+ %103 = call float @llvm.SI.load.const(<16 x i8> %22, i32 636)
+ %104 = call float @llvm.SI.load.const(<16 x i8> %22, i32 640)
+ %105 = call float @llvm.SI.load.const(<16 x i8> %22, i32 644)
+ %106 = call float @llvm.SI.load.const(<16 x i8> %22, i32 648)
+ %107 = call float @llvm.SI.load.const(<16 x i8> %22, i32 652)
+ %108 = call float @llvm.SI.load.const(<16 x i8> %22, i32 656)
+ %109 = call float @llvm.SI.load.const(<16 x i8> %22, i32 660)
+ %110 = call float @llvm.SI.load.const(<16 x i8> %22, i32 664)
+ %111 = call float @llvm.SI.load.const(<16 x i8> %22, i32 668)
+ %112 = call float @llvm.SI.load.const(<16 x i8> %22, i32 672)
+ %113 = call float @llvm.SI.load.const(<16 x i8> %22, i32 676)
+ %114 = call float @llvm.SI.load.const(<16 x i8> %22, i32 680)
+ %115 = call float @llvm.SI.load.const(<16 x i8> %22, i32 684)
+ %116 = call float @llvm.SI.load.const(<16 x i8> %22, i32 688)
+ %117 = call float @llvm.SI.load.const(<16 x i8> %22, i32 692)
+ %118 = call float @llvm.SI.load.const(<16 x i8> %22, i32 696)
+ %119 = call float @llvm.SI.load.const(<16 x i8> %22, i32 700)
+ %120 = call float @llvm.SI.load.const(<16 x i8> %22, i32 704)
+ %121 = call float @llvm.SI.load.const(<16 x i8> %22, i32 708)
+ %122 = call float @llvm.SI.load.const(<16 x i8> %22, i32 712)
+ %123 = call float @llvm.SI.load.const(<16 x i8> %22, i32 716)
+ %124 = call float @llvm.SI.load.const(<16 x i8> %22, i32 864)
+ %125 = call float @llvm.SI.load.const(<16 x i8> %22, i32 868)
+ %126 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
+ %127 = load <32 x i8> addrspace(2)* %126, !tbaa !0
+ %128 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
+ %129 = load <16 x i8> addrspace(2)* %128, !tbaa !0
+ %130 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 1
+ %131 = load <32 x i8> addrspace(2)* %130, !tbaa !0
+ %132 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 1
+ %133 = load <16 x i8> addrspace(2)* %132, !tbaa !0
+ %134 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 2
+ %135 = load <32 x i8> addrspace(2)* %134, !tbaa !0
+ %136 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 2
+ %137 = load <16 x i8> addrspace(2)* %136, !tbaa !0
+ %138 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 3
+ %139 = load <32 x i8> addrspace(2)* %138, !tbaa !0
+ %140 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 3
+ %141 = load <16 x i8> addrspace(2)* %140, !tbaa !0
+ %142 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 4
+ %143 = load <32 x i8> addrspace(2)* %142, !tbaa !0
+ %144 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 4
+ %145 = load <16 x i8> addrspace(2)* %144, !tbaa !0
+ %146 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 5
+ %147 = load <32 x i8> addrspace(2)* %146, !tbaa !0
+ %148 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 5
+ %149 = load <16 x i8> addrspace(2)* %148, !tbaa !0
+ %150 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 6
+ %151 = load <32 x i8> addrspace(2)* %150, !tbaa !0
+ %152 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 6
+ %153 = load <16 x i8> addrspace(2)* %152, !tbaa !0
+ %154 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 7
+ %155 = load <32 x i8> addrspace(2)* %154, !tbaa !0
+ %156 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 7
+ %157 = load <16 x i8> addrspace(2)* %156, !tbaa !0
+ %158 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 8
+ %159 = load <32 x i8> addrspace(2)* %158, !tbaa !0
+ %160 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 8
+ %161 = load <16 x i8> addrspace(2)* %160, !tbaa !0
+ %162 = fcmp ugt float %17, 0.000000e+00
+ %163 = select i1 %162, float 1.000000e+00, float 0.000000e+00
+ %164 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %4, <2 x i32> %6)
+ %165 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %4, <2 x i32> %6)
+ %166 = call float @llvm.SI.fs.interp(i32 2, i32 0, i32 %4, <2 x i32> %6)
+ %167 = call float @llvm.SI.fs.interp(i32 3, i32 0, i32 %4, <2 x i32> %6)
+ %168 = call float @llvm.SI.fs.interp(i32 0, i32 1, i32 %4, <2 x i32> %6)
+ %169 = call float @llvm.SI.fs.interp(i32 1, i32 1, i32 %4, <2 x i32> %6)
+ %170 = call float @llvm.SI.fs.interp(i32 2, i32 1, i32 %4, <2 x i32> %6)
+ %171 = call float @llvm.SI.fs.interp(i32 3, i32 1, i32 %4, <2 x i32> %6)
+ %172 = call float @llvm.SI.fs.interp(i32 0, i32 2, i32 %4, <2 x i32> %6)
+ %173 = call float @llvm.SI.fs.interp(i32 1, i32 2, i32 %4, <2 x i32> %6)
+ %174 = call float @llvm.SI.fs.interp(i32 2, i32 2, i32 %4, <2 x i32> %6)
+ %175 = call float @llvm.SI.fs.interp(i32 3, i32 2, i32 %4, <2 x i32> %6)
+ %176 = call float @llvm.SI.fs.interp(i32 0, i32 3, i32 %4, <2 x i32> %6)
+ %177 = call float @llvm.SI.fs.interp(i32 1, i32 3, i32 %4, <2 x i32> %6)
+ %178 = call float @llvm.SI.fs.interp(i32 2, i32 3, i32 %4, <2 x i32> %6)
+ %179 = call float @llvm.SI.fs.interp(i32 3, i32 3, i32 %4, <2 x i32> %6)
+ %180 = call float @llvm.SI.fs.interp(i32 0, i32 4, i32 %4, <2 x i32> %6)
+ %181 = call float @llvm.SI.fs.interp(i32 1, i32 4, i32 %4, <2 x i32> %6)
+ %182 = call float @llvm.SI.fs.interp(i32 2, i32 4, i32 %4, <2 x i32> %6)
+ %183 = call float @llvm.SI.fs.interp(i32 3, i32 4, i32 %4, <2 x i32> %6)
+ %184 = call float @llvm.SI.fs.interp(i32 0, i32 5, i32 %4, <2 x i32> %6)
+ %185 = call float @llvm.SI.fs.interp(i32 1, i32 5, i32 %4, <2 x i32> %6)
+ %186 = call float @llvm.SI.fs.interp(i32 2, i32 5, i32 %4, <2 x i32> %6)
+ %187 = call float @llvm.SI.fs.interp(i32 3, i32 5, i32 %4, <2 x i32> %6)
+ %188 = call float @llvm.SI.fs.interp(i32 0, i32 6, i32 %4, <2 x i32> %6)
+ %189 = call float @llvm.SI.fs.interp(i32 1, i32 6, i32 %4, <2 x i32> %6)
+ %190 = call float @llvm.SI.fs.interp(i32 2, i32 6, i32 %4, <2 x i32> %6)
+ %191 = call float @llvm.SI.fs.interp(i32 3, i32 6, i32 %4, <2 x i32> %6)
+ %192 = call float @llvm.SI.fs.interp(i32 0, i32 7, i32 %4, <2 x i32> %6)
+ %193 = call float @llvm.SI.fs.interp(i32 1, i32 7, i32 %4, <2 x i32> %6)
+ %194 = call float @llvm.SI.fs.interp(i32 2, i32 7, i32 %4, <2 x i32> %6)
+ %195 = call float @llvm.SI.fs.interp(i32 3, i32 7, i32 %4, <2 x i32> %6)
+ %196 = fmul float %14, %124
+ %197 = fadd float %196, %125
+ %198 = call float @llvm.AMDIL.clamp.(float %163, float 0.000000e+00, float 1.000000e+00)
+ %199 = call float @llvm.AMDIL.clamp.(float 0.000000e+00, float 0.000000e+00, float 1.000000e+00)
+ %200 = call float @llvm.AMDIL.clamp.(float 0.000000e+00, float 0.000000e+00, float 1.000000e+00)
+ %201 = call float @llvm.AMDIL.clamp.(float 1.000000e+00, float 0.000000e+00, float 1.000000e+00)
+ %202 = bitcast float %198 to i32
+ %203 = icmp ne i32 %202, 0
+ %. = select i1 %203, float -1.000000e+00, float 1.000000e+00
+ %204 = fsub float -0.000000e+00, %164
+ %205 = fadd float %44, %204
+ %206 = fsub float -0.000000e+00, %165
+ %207 = fadd float %45, %206
+ %208 = fsub float -0.000000e+00, %166
+ %209 = fadd float %46, %208
+ %210 = fmul float %205, %205
+ %211 = fmul float %207, %207
+ %212 = fadd float %211, %210
+ %213 = fmul float %209, %209
+ %214 = fadd float %212, %213
+ %215 = call float @llvm.AMDGPU.rsq.f32(float %214)
+ %216 = fmul float %205, %215
+ %217 = fmul float %207, %215
+ %218 = fmul float %209, %215
+ %219 = fmul float %., %54
+ %220 = fmul float %13, %47
+ %221 = fmul float %197, %48
+ %222 = bitcast float %174 to i32
+ %223 = bitcast float %175 to i32
+ %224 = insertelement <2 x i32> undef, i32 %222, i32 0
+ %225 = insertelement <2 x i32> %224, i32 %223, i32 1
+ %226 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %225, <32 x i8> %131, <16 x i8> %133, i32 2)
+ %227 = extractelement <4 x float> %226, i32 0
+ %228 = extractelement <4 x float> %226, i32 1
+ %229 = extractelement <4 x float> %226, i32 2
+ %230 = extractelement <4 x float> %226, i32 3
+ %231 = fmul float %227, 0x4012611180000000
+ %232 = fmul float %228, 0x4012611180000000
+ %233 = fmul float %229, 0x4012611180000000
+ %234 = call float @llvm.AMDGPU.lrp(float %27, float %231, float 1.000000e+00)
+ %235 = call float @llvm.AMDGPU.lrp(float %27, float %232, float 1.000000e+00)
+ %236 = call float @llvm.AMDGPU.lrp(float %27, float %233, float 1.000000e+00)
+ %237 = fmul float %216, %184
+ %238 = fmul float %217, %185
+ %239 = fadd float %238, %237
+ %240 = fmul float %218, %186
+ %241 = fadd float %239, %240
+ %242 = fmul float %216, %187
+ %243 = fmul float %217, %188
+ %244 = fadd float %243, %242
+ %245 = fmul float %218, %189
+ %246 = fadd float %244, %245
+ %247 = fmul float %216, %190
+ %248 = fmul float %217, %191
+ %249 = fadd float %248, %247
+ %250 = fmul float %218, %192
+ %251 = fadd float %249, %250
+ %252 = call float @llvm.AMDIL.clamp.(float %251, float 0.000000e+00, float 1.000000e+00)
+ %253 = fmul float %214, 0x3F5A36E2E0000000
+ %254 = call float @llvm.AMDIL.clamp.(float %253, float 0.000000e+00, float 1.000000e+00)
+ %255 = fsub float -0.000000e+00, %254
+ %256 = fadd float 1.000000e+00, %255
+ %257 = call float @llvm.pow.f32(float %252, float 2.500000e-01)
+ %258 = fmul float %39, %257
+ %259 = fmul float %241, %258
+ %260 = fmul float %246, %258
+ %261 = fmul float %259, %230
+ %262 = fmul float %260, %230
+ %263 = fadd float %252, 0x3EE4F8B580000000
+ %264 = fsub float -0.000000e+00, %252
+ %265 = fadd float 1.000000e+00, %264
+ %266 = fmul float 1.200000e+01, %265
+ %267 = fadd float %266, 4.000000e+00
+ %268 = fsub float -0.000000e+00, %267
+ %269 = fmul float %268, %263
+ %270 = fsub float -0.000000e+00, %267
+ %271 = fmul float %270, %263
+ %272 = fsub float -0.000000e+00, %267
+ %273 = fmul float %272, %263
+ %274 = fdiv float 1.000000e+00, %269
+ %275 = fdiv float 1.000000e+00, %271
+ %276 = fdiv float 1.000000e+00, %273
+ %277 = fmul float %261, %274
+ %278 = fmul float %262, %275
+ %279 = fmul float %263, %276
+ br label %LOOP
+
+LOOP: ; preds = %LOOP, %main_body
+ %temp144.0 = phi float [ 1.000000e+00, %main_body ], [ %292, %LOOP ]
+ %temp168.0 = phi float [ %176, %main_body ], [ %288, %LOOP ]
+ %temp169.0 = phi float [ %177, %main_body ], [ %289, %LOOP ]
+ %temp170.0 = phi float [ %256, %main_body ], [ %290, %LOOP ]
+ %280 = bitcast float %temp168.0 to i32
+ %281 = bitcast float %temp169.0 to i32
+ %282 = insertelement <4 x i32> undef, i32 %280, i32 0
+ %283 = insertelement <4 x i32> %282, i32 %281, i32 1
+ %284 = insertelement <4 x i32> %283, i32 0, i32 2
+ %285 = insertelement <4 x i32> %284, i32 undef, i32 3
+ %286 = call <4 x float> @llvm.SI.samplel.v4i32(<4 x i32> %285, <32 x i8> %147, <16 x i8> %149, i32 2)
+ %287 = extractelement <4 x float> %286, i32 3
+ %288 = fadd float %temp168.0, %277
+ %289 = fadd float %temp169.0, %278
+ %290 = fadd float %temp170.0, %279
+ %291 = fsub float -0.000000e+00, %287
+ %292 = fadd float %290, %291
+ %293 = fcmp oge float 0.000000e+00, %292
+ %294 = sext i1 %293 to i32
+ %295 = bitcast i32 %294 to float
+ %296 = bitcast float %295 to i32
+ %297 = icmp ne i32 %296, 0
+ br i1 %297, label %IF189, label %LOOP
+
+IF189: ; preds = %LOOP
+ %298 = extractelement <4 x float> %286, i32 0
+ %299 = extractelement <4 x float> %286, i32 1
+ %300 = extractelement <4 x float> %286, i32 2
+ %301 = fsub float -0.000000e+00, %292
+ %302 = fadd float %temp144.0, %301
+ %303 = fdiv float 1.000000e+00, %302
+ %304 = fmul float %292, %303
+ %305 = fadd float %304, -1.000000e+00
+ %306 = fmul float %305, %277
+ %307 = fadd float %306, %288
+ %308 = fmul float %305, %278
+ %309 = fadd float %308, %289
+ %310 = fsub float -0.000000e+00, %176
+ %311 = fadd float %307, %310
+ %312 = fsub float -0.000000e+00, %177
+ %313 = fadd float %309, %312
+ %314 = fadd float %176, %311
+ %315 = fadd float %177, %313
+ %316 = fmul float %311, %67
+ %317 = fmul float %313, %68
+ %318 = fmul float %316, %55
+ %319 = fmul float %316, %56
+ %320 = fmul float %317, %57
+ %321 = fadd float %320, %318
+ %322 = fmul float %317, %58
+ %323 = fadd float %322, %319
+ %324 = fadd float %178, %321
+ %325 = fadd float %179, %323
+ %326 = fmul float %316, %59
+ %327 = fmul float %316, %60
+ %328 = fmul float %316, %61
+ %329 = fmul float %316, %62
+ %330 = fmul float %317, %63
+ %331 = fadd float %330, %326
+ %332 = fmul float %317, %64
+ %333 = fadd float %332, %327
+ %334 = fmul float %317, %65
+ %335 = fadd float %334, %328
+ %336 = fmul float %317, %66
+ %337 = fadd float %336, %329
+ %338 = fadd float %168, %331
+ %339 = fadd float %169, %333
+ %340 = fadd float %170, %335
+ %341 = fadd float %171, %337
+ %342 = bitcast float %338 to i32
+ %343 = bitcast float %339 to i32
+ %344 = insertelement <2 x i32> undef, i32 %342, i32 0
+ %345 = insertelement <2 x i32> %344, i32 %343, i32 1
+ %346 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %345, <32 x i8> %135, <16 x i8> %137, i32 2)
+ %347 = extractelement <4 x float> %346, i32 0
+ %348 = extractelement <4 x float> %346, i32 1
+ %349 = extractelement <4 x float> %346, i32 2
+ %350 = extractelement <4 x float> %346, i32 3
+ %351 = fmul float %347, %23
+ %352 = fmul float %348, %24
+ %353 = fmul float %349, %25
+ %354 = fmul float %350, %26
+ %355 = fmul float %351, %180
+ %356 = fmul float %352, %181
+ %357 = fmul float %353, %182
+ %358 = fmul float %354, %183
+ %359 = fsub float -0.000000e+00, %350
+ %360 = fadd float 1.000000e+00, %359
+ %361 = fmul float %360, %49
+ %362 = call float @llvm.AMDGPU.lrp(float %361, float %347, float %355)
+ %363 = call float @llvm.AMDGPU.lrp(float %361, float %348, float %356)
+ %364 = call float @llvm.AMDGPU.lrp(float %361, float %349, float %357)
+ %365 = bitcast float %340 to i32
+ %366 = bitcast float %341 to i32
+ %367 = insertelement <2 x i32> undef, i32 %365, i32 0
+ %368 = insertelement <2 x i32> %367, i32 %366, i32 1
+ %369 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %368, <32 x i8> %151, <16 x i8> %153, i32 2)
+ %370 = extractelement <4 x float> %369, i32 2
+ %371 = fmul float %362, %234
+ %372 = fmul float %363, %235
+ %373 = fmul float %364, %236
+ %374 = fmul float %358, %230
+ %375 = bitcast float %314 to i32
+ %376 = bitcast float %315 to i32
+ %377 = insertelement <2 x i32> undef, i32 %375, i32 0
+ %378 = insertelement <2 x i32> %377, i32 %376, i32 1
+ %379 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %378, <32 x i8> %139, <16 x i8> %141, i32 2)
+ %380 = extractelement <4 x float> %379, i32 0
+ %381 = extractelement <4 x float> %379, i32 1
+ %382 = extractelement <4 x float> %379, i32 2
+ %383 = extractelement <4 x float> %379, i32 3
+ %384 = fcmp olt float 0.000000e+00, %382
+ %385 = sext i1 %384 to i32
+ %386 = bitcast i32 %385 to float
+ %387 = bitcast float %386 to i32
+ %388 = icmp ne i32 %387, 0
+ %.224 = select i1 %388, float %381, float %380
+ %.225 = select i1 %388, float %383, float %381
+ %389 = bitcast float %324 to i32
+ %390 = bitcast float %325 to i32
+ %391 = insertelement <2 x i32> undef, i32 %389, i32 0
+ %392 = insertelement <2 x i32> %391, i32 %390, i32 1
+ %393 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %392, <32 x i8> %143, <16 x i8> %145, i32 2)
+ %394 = extractelement <4 x float> %393, i32 0
+ %395 = extractelement <4 x float> %393, i32 1
+ %396 = extractelement <4 x float> %393, i32 2
+ %397 = extractelement <4 x float> %393, i32 3
+ %398 = fcmp olt float 0.000000e+00, %396
+ %399 = sext i1 %398 to i32
+ %400 = bitcast i32 %399 to float
+ %401 = bitcast float %400 to i32
+ %402 = icmp ne i32 %401, 0
+ %temp112.1 = select i1 %402, float %395, float %394
+ %temp113.1 = select i1 %402, float %397, float %395
+ %403 = fmul float %.224, 2.000000e+00
+ %404 = fadd float %403, -1.000000e+00
+ %405 = fmul float %.225, 2.000000e+00
+ %406 = fadd float %405, -1.000000e+00
+ %407 = fmul float %temp112.1, 2.000000e+00
+ %408 = fadd float %407, -1.000000e+00
+ %409 = fmul float %temp113.1, 2.000000e+00
+ %410 = fadd float %409, -1.000000e+00
+ %411 = fsub float -0.000000e+00, %404
+ %412 = fmul float %411, %35
+ %413 = fsub float -0.000000e+00, %406
+ %414 = fmul float %413, %35
+ %415 = fsub float -0.000000e+00, %408
+ %416 = fmul float %415, %36
+ %417 = fsub float -0.000000e+00, %410
+ %418 = fmul float %417, %36
+ %419 = fmul float %416, %370
+ %420 = fmul float %418, %370
+ %421 = call float @fabs(float %412)
+ %422 = call float @fabs(float %414)
+ %423 = fsub float -0.000000e+00, %421
+ %424 = fadd float 1.000000e+00, %423
+ %425 = fsub float -0.000000e+00, %422
+ %426 = fadd float 1.000000e+00, %425
+ %427 = fmul float %424, %419
+ %428 = fadd float %427, %412
+ %429 = fmul float %426, %420
+ %430 = fadd float %429, %414
+ %431 = fmul float %428, %428
+ %432 = fmul float %430, %430
+ %433 = fadd float %431, %432
+ %434 = fsub float -0.000000e+00, %433
+ %435 = fadd float 0x3FF00068E0000000, %434
+ %436 = call float @llvm.AMDIL.clamp.(float %435, float 0.000000e+00, float 1.000000e+00)
+ %437 = call float @llvm.AMDGPU.rsq.f32(float %436)
+ %438 = fmul float %437, %436
+ %439 = fsub float -0.000000e+00, %436
+ %440 = call float @llvm.AMDGPU.cndlt(float %439, float %438, float 0.000000e+00)
+ %441 = fmul float %184, %428
+ %442 = fmul float %185, %428
+ %443 = fmul float %186, %428
+ %444 = fmul float %187, %430
+ %445 = fadd float %444, %441
+ %446 = fmul float %188, %430
+ %447 = fadd float %446, %442
+ %448 = fmul float %189, %430
+ %449 = fadd float %448, %443
+ %450 = fmul float %190, %440
+ %451 = fadd float %450, %445
+ %452 = fmul float %191, %440
+ %453 = fadd float %452, %447
+ %454 = fmul float %192, %440
+ %455 = fadd float %454, %449
+ %456 = fmul float %451, %451
+ %457 = fmul float %453, %453
+ %458 = fadd float %457, %456
+ %459 = fmul float %455, %455
+ %460 = fadd float %458, %459
+ %461 = call float @llvm.AMDGPU.rsq.f32(float %460)
+ %462 = fmul float %451, %461
+ %463 = fmul float %453, %461
+ %464 = fmul float %455, %461
+ %465 = fcmp olt float 0.000000e+00, %219
+ %466 = sext i1 %465 to i32
+ %467 = bitcast i32 %466 to float
+ %468 = bitcast float %467 to i32
+ %469 = icmp ne i32 %468, 0
+ br i1 %469, label %IF198, label %ENDIF197
+
+IF198: ; preds = %IF189
+ %470 = fsub float -0.000000e+00, %462
+ %471 = fsub float -0.000000e+00, %463
+ %472 = fsub float -0.000000e+00, %464
+ br label %ENDIF197
+
+ENDIF197: ; preds = %IF189, %IF198
+ %temp14.0 = phi float [ %472, %IF198 ], [ %464, %IF189 ]
+ %temp13.0 = phi float [ %471, %IF198 ], [ %463, %IF189 ]
+ %temp12.0 = phi float [ %470, %IF198 ], [ %462, %IF189 ]
+ %473 = bitcast float %220 to i32
+ %474 = bitcast float %221 to i32
+ %475 = insertelement <2 x i32> undef, i32 %473, i32 0
+ %476 = insertelement <2 x i32> %475, i32 %474, i32 1
+ %477 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %476, <32 x i8> %159, <16 x i8> %161, i32 2)
+ %478 = extractelement <4 x float> %477, i32 0
+ %479 = extractelement <4 x float> %477, i32 1
+ %480 = extractelement <4 x float> %477, i32 2
+ %481 = extractelement <4 x float> %477, i32 3
+ %482 = fmul float %478, %40
+ %483 = fadd float %482, %41
+ %484 = fmul float %479, %40
+ %485 = fadd float %484, %41
+ %486 = fmul float %480, %40
+ %487 = fadd float %486, %41
+ %488 = fmul float %481, %42
+ %489 = fadd float %488, %43
+ %490 = bitcast float %172 to i32
+ %491 = bitcast float %173 to i32
+ %492 = insertelement <2 x i32> undef, i32 %490, i32 0
+ %493 = insertelement <2 x i32> %492, i32 %491, i32 1
+ %494 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> %493, <32 x i8> %155, <16 x i8> %157, i32 2)
+ %495 = extractelement <4 x float> %494, i32 0
+ %496 = extractelement <4 x float> %494, i32 1
+ %497 = extractelement <4 x float> %494, i32 2
+ %498 = extractelement <4 x float> %494, i32 3
+ %499 = fmul float %498, 3.200000e+01
+ %500 = fadd float %499, -1.600000e+01
+ %501 = call float @llvm.AMDIL.exp.(float %500)
+ %502 = fmul float %495, %501
+ %503 = fmul float %496, %501
+ %504 = fmul float %497, %501
+ %505 = fmul float %28, %502
+ %506 = fadd float %505, %193
+ %507 = fmul float %29, %503
+ %508 = fadd float %507, %194
+ %509 = fmul float %30, %504
+ %510 = fadd float %509, %195
+ %511 = fmul float %506, %489
+ %512 = fmul float %508, %489
+ %513 = fmul float %510, %489
+ %514 = fmul float %489, 5.000000e-01
+ %515 = fadd float %514, 5.000000e-01
+ %516 = fmul float %483, %515
+ %517 = fadd float %516, %511
+ %518 = fmul float %485, %515
+ %519 = fadd float %518, %512
+ %520 = fmul float %487, %515
+ %521 = fadd float %520, %513
+ %522 = fmul float %517, %371
+ %523 = fmul float %519, %372
+ %524 = fmul float %521, %373
+ %525 = fmul float %428, 0x3FDB272440000000
+ %526 = fmul float %430, 0xBFDB272440000000
+ %527 = fadd float %526, %525
+ %528 = fmul float %440, 0x3FE99999A0000000
+ %529 = fadd float %527, %528
+ %530 = fmul float %529, 5.000000e-01
+ %531 = fadd float %530, 0x3FE3333340000000
+ %532 = fmul float %531, %531
+ %533 = fmul float %522, %532
+ %534 = fmul float %523, %532
+ %535 = fmul float %524, %532
+ %536 = fsub float -0.000000e+00, %72
+ %537 = fsub float -0.000000e+00, %73
+ %538 = fsub float -0.000000e+00, %74
+ %539 = fmul float %temp12.0, %536
+ %540 = fmul float %temp13.0, %537
+ %541 = fadd float %540, %539
+ %542 = fmul float %temp14.0, %538
+ %543 = fadd float %541, %542
+ %544 = call float @llvm.AMDIL.clamp.(float %543, float 0.000000e+00, float 1.000000e+00)
+ %545 = fmul float %371, %544
+ %546 = fmul float %372, %544
+ %547 = fmul float %373, %544
+ %548 = fmul float %545, %69
+ %549 = fmul float %546, %70
+ %550 = fmul float %547, %71
+ %551 = fsub float -0.000000e+00, %164
+ %552 = fadd float %97, %551
+ %553 = fsub float -0.000000e+00, %165
+ %554 = fadd float %98, %553
+ %555 = fsub float -0.000000e+00, %166
+ %556 = fadd float %99, %555
+ %557 = fmul float %552, %552
+ %558 = fmul float %554, %554
+ %559 = fadd float %558, %557
+ %560 = fmul float %556, %556
+ %561 = fadd float %559, %560
+ %562 = call float @llvm.AMDGPU.rsq.f32(float %561)
+ %563 = fmul float %562, %561
+ %564 = fsub float -0.000000e+00, %561
+ %565 = call float @llvm.AMDGPU.cndlt(float %564, float %563, float 0.000000e+00)
+ %566 = fsub float -0.000000e+00, %84
+ %567 = fadd float %565, %566
+ %568 = fsub float -0.000000e+00, %83
+ %569 = fadd float %565, %568
+ %570 = fsub float -0.000000e+00, %82
+ %571 = fadd float %565, %570
+ %572 = fsub float -0.000000e+00, %84
+ %573 = fadd float %83, %572
+ %574 = fsub float -0.000000e+00, %83
+ %575 = fadd float %82, %574
+ %576 = fsub float -0.000000e+00, %82
+ %577 = fadd float %81, %576
+ %578 = fdiv float 1.000000e+00, %573
+ %579 = fdiv float 1.000000e+00, %575
+ %580 = fdiv float 1.000000e+00, %577
+ %581 = fmul float %567, %578
+ %582 = fmul float %569, %579
+ %583 = fmul float %571, %580
+ %584 = fcmp olt float %565, %83
+ %585 = sext i1 %584 to i32
+ %586 = bitcast i32 %585 to float
+ %587 = bitcast float %586 to i32
+ %588 = icmp ne i32 %587, 0
+ br i1 %588, label %ENDIF200, label %ELSE202
+
+ELSE202: ; preds = %ENDIF197
+ %589 = fcmp olt float %565, %82
+ %590 = sext i1 %589 to i32
+ %591 = bitcast i32 %590 to float
+ %592 = bitcast float %591 to i32
+ %593 = icmp ne i32 %592, 0
+ br i1 %593, label %ENDIF200, label %ELSE205
+
+ENDIF200: ; preds = %ELSE205, %ELSE202, %ENDIF197
+ %temp80.0 = phi float [ %581, %ENDIF197 ], [ %.226, %ELSE205 ], [ %582, %ELSE202 ]
+ %temp88.0 = phi float [ %122, %ENDIF197 ], [ %.227, %ELSE205 ], [ %120, %ELSE202 ]
+ %temp89.0 = phi float [ %123, %ENDIF197 ], [ %.228, %ELSE205 ], [ %121, %ELSE202 ]
+ %temp90.0 = phi float [ %120, %ENDIF197 ], [ %116, %ELSE205 ], [ %118, %ELSE202 ]
+ %temp91.0 = phi float [ %121, %ENDIF197 ], [ %117, %ELSE205 ], [ %119, %ELSE202 ]
+ %594 = fcmp olt float %565, %83
+ %595 = sext i1 %594 to i32
+ %596 = bitcast i32 %595 to float
+ %597 = bitcast float %596 to i32
+ %598 = icmp ne i32 %597, 0
+ br i1 %598, label %ENDIF209, label %ELSE211
+
+ELSE205: ; preds = %ELSE202
+ %599 = fcmp olt float %565, %81
+ %600 = sext i1 %599 to i32
+ %601 = bitcast i32 %600 to float
+ %602 = bitcast float %601 to i32
+ %603 = icmp ne i32 %602, 0
+ %.226 = select i1 %603, float %583, float 1.000000e+00
+ %.227 = select i1 %603, float %118, float %116
+ %.228 = select i1 %603, float %119, float %117
+ br label %ENDIF200
+
+ELSE211: ; preds = %ENDIF200
+ %604 = fcmp olt float %565, %82
+ %605 = sext i1 %604 to i32
+ %606 = bitcast i32 %605 to float
+ %607 = bitcast float %606 to i32
+ %608 = icmp ne i32 %607, 0
+ br i1 %608, label %ENDIF209, label %ELSE214
+
+ENDIF209: ; preds = %ELSE214, %ELSE211, %ENDIF200
+ %temp52.0 = phi float [ %108, %ENDIF200 ], [ %100, %ELSE214 ], [ %104, %ELSE211 ]
+ %temp53.0 = phi float [ %109, %ENDIF200 ], [ %101, %ELSE214 ], [ %105, %ELSE211 ]
+ %temp54.0 = phi float [ %110, %ENDIF200 ], [ %102, %ELSE214 ], [ %106, %ELSE211 ]
+ %temp55.0 = phi float [ %111, %ENDIF200 ], [ %103, %ELSE214 ], [ %107, %ELSE211 ]
+ %temp68.0 = phi float [ %112, %ENDIF200 ], [ %.230, %ELSE214 ], [ %108, %ELSE211 ]
+ %temp69.0 = phi float [ %113, %ENDIF200 ], [ %.231, %ELSE214 ], [ %109, %ELSE211 ]
+ %temp70.0 = phi float [ %114, %ENDIF200 ], [ %.232, %ELSE214 ], [ %110, %ELSE211 ]
+ %temp71.0 = phi float [ %115, %ENDIF200 ], [ %.233, %ELSE214 ], [ %111, %ELSE211 ]
+ %609 = fmul float %164, %85
+ %610 = fmul float %165, %86
+ %611 = fadd float %609, %610
+ %612 = fmul float %166, %87
+ %613 = fadd float %611, %612
+ %614 = fmul float %167, %88
+ %615 = fadd float %613, %614
+ %616 = fmul float %164, %89
+ %617 = fmul float %165, %90
+ %618 = fadd float %616, %617
+ %619 = fmul float %166, %91
+ %620 = fadd float %618, %619
+ %621 = fmul float %167, %92
+ %622 = fadd float %620, %621
+ %623 = fmul float %164, %93
+ %624 = fmul float %165, %94
+ %625 = fadd float %623, %624
+ %626 = fmul float %166, %95
+ %627 = fadd float %625, %626
+ %628 = fmul float %167, %96
+ %629 = fadd float %627, %628
+ %630 = fsub float -0.000000e+00, %78
+ %631 = fadd float 1.000000e+00, %630
+ %632 = call float @fabs(float %615)
+ %633 = call float @fabs(float %622)
+ %634 = fcmp oge float %631, %632
+ %635 = sext i1 %634 to i32
+ %636 = bitcast i32 %635 to float
+ %637 = bitcast float %636 to i32
+ %638 = and i32 %637, 1065353216
+ %639 = bitcast i32 %638 to float
+ %640 = fcmp oge float %631, %633
+ %641 = sext i1 %640 to i32
+ %642 = bitcast i32 %641 to float
+ %643 = bitcast float %642 to i32
+ %644 = and i32 %643, 1065353216
+ %645 = bitcast i32 %644 to float
+ %646 = fmul float %639, %645
+ %647 = fmul float %629, %646
+ %648 = fmul float %615, %temp68.0
+ %649 = fadd float %648, %temp70.0
+ %650 = fmul float %622, %temp69.0
+ %651 = fadd float %650, %temp71.0
+ %652 = fmul float %615, %temp52.0
+ %653 = fadd float %652, %temp54.0
+ %654 = fmul float %622, %temp53.0
+ %655 = fadd float %654, %temp55.0
+ %656 = fadd float %temp80.0, -1.000000e+00
+ %657 = fmul float %656, %77
+ %658 = fadd float %657, 1.000000e+00
+ %659 = call float @llvm.AMDIL.clamp.(float %658, float 0.000000e+00, float 1.000000e+00)
+ %660 = bitcast float %649 to i32
+ %661 = bitcast float %651 to i32
+ %662 = bitcast float 0.000000e+00 to i32
+ %663 = insertelement <4 x i32> undef, i32 %660, i32 0
+ %664 = insertelement <4 x i32> %663, i32 %661, i32 1
+ %665 = insertelement <4 x i32> %664, i32 %662, i32 2
+ %666 = insertelement <4 x i32> %665, i32 undef, i32 3
+ %667 = call <4 x float> @llvm.SI.samplel.v4i32(<4 x i32> %666, <32 x i8> %127, <16 x i8> %129, i32 2)
+ %668 = extractelement <4 x float> %667, i32 0
+ %669 = extractelement <4 x float> %667, i32 1
+ %670 = bitcast float %653 to i32
+ %671 = bitcast float %655 to i32
+ %672 = bitcast float 0.000000e+00 to i32
+ %673 = insertelement <4 x i32> undef, i32 %670, i32 0
+ %674 = insertelement <4 x i32> %673, i32 %671, i32 1
+ %675 = insertelement <4 x i32> %674, i32 %672, i32 2
+ %676 = insertelement <4 x i32> %675, i32 undef, i32 3
+ %677 = call <4 x float> @llvm.SI.samplel.v4i32(<4 x i32> %676, <32 x i8> %127, <16 x i8> %129, i32 2)
+ %678 = extractelement <4 x float> %677, i32 0
+ %679 = extractelement <4 x float> %677, i32 1
+ %680 = fsub float -0.000000e+00, %669
+ %681 = fadd float 1.000000e+00, %680
+ %682 = fsub float -0.000000e+00, %679
+ %683 = fadd float 1.000000e+00, %682
+ %684 = fmul float %681, 2.500000e-01
+ %685 = fmul float %683, 2.500000e-01
+ %686 = fsub float -0.000000e+00, %684
+ %687 = fadd float %668, %686
+ %688 = fsub float -0.000000e+00, %685
+ %689 = fadd float %678, %688
+ %690 = fmul float %647, %temp88.0
+ %691 = fadd float %690, %temp89.0
+ %692 = fmul float %647, %temp90.0
+ %693 = fadd float %692, %temp91.0
+ %694 = call float @llvm.AMDIL.clamp.(float %691, float 0.000000e+00, float 1.000000e+00)
+ %695 = call float @llvm.AMDIL.clamp.(float %693, float 0.000000e+00, float 1.000000e+00)
+ %696 = fsub float -0.000000e+00, %694
+ %697 = fadd float %668, %696
+ %698 = fsub float -0.000000e+00, %695
+ %699 = fadd float %678, %698
+ %700 = fmul float %668, %668
+ %701 = fmul float %678, %678
+ %702 = fsub float -0.000000e+00, %700
+ %703 = fadd float %687, %702
+ %704 = fsub float -0.000000e+00, %701
+ %705 = fadd float %689, %704
+ %706 = fcmp uge float %703, %75
+ %707 = select i1 %706, float %703, float %75
+ %708 = fcmp uge float %705, %75
+ %709 = select i1 %708, float %705, float %75
+ %710 = fmul float %697, %697
+ %711 = fadd float %710, %707
+ %712 = fmul float %699, %699
+ %713 = fadd float %712, %709
+ %714 = fdiv float 1.000000e+00, %711
+ %715 = fdiv float 1.000000e+00, %713
+ %716 = fmul float %707, %714
+ %717 = fmul float %709, %715
+ %718 = fcmp oge float %697, 0.000000e+00
+ %719 = sext i1 %718 to i32
+ %720 = bitcast i32 %719 to float
+ %721 = bitcast float %720 to i32
+ %722 = icmp ne i32 %721, 0
+ %.229 = select i1 %722, float 1.000000e+00, float %716
+ %723 = fcmp oge float %699, 0.000000e+00
+ %724 = sext i1 %723 to i32
+ %725 = bitcast i32 %724 to float
+ %726 = bitcast float %725 to i32
+ %727 = icmp ne i32 %726, 0
+ %temp28.0 = select i1 %727, float 1.000000e+00, float %717
+ %728 = call float @llvm.AMDGPU.lrp(float %659, float %temp28.0, float %.229)
+ %729 = call float @llvm.pow.f32(float %728, float %76)
+ %730 = fmul float %729, %79
+ %731 = fadd float %730, %80
+ %732 = call float @llvm.AMDIL.clamp.(float %731, float 0.000000e+00, float 1.000000e+00)
+ %733 = fmul float %732, %732
+ %734 = fmul float 2.000000e+00, %732
+ %735 = fsub float -0.000000e+00, %734
+ %736 = fadd float 3.000000e+00, %735
+ %737 = fmul float %733, %736
+ %738 = fmul float %548, %737
+ %739 = fmul float %549, %737
+ %740 = fmul float %550, %737
+ %741 = fmul float %738, %515
+ %742 = fadd float %741, %533
+ %743 = fmul float %739, %515
+ %744 = fadd float %743, %534
+ %745 = fmul float %740, %515
+ %746 = fadd float %745, %535
+ %747 = call float @llvm.AMDGPU.lrp(float %230, float %287, float 1.000000e+00)
+ %748 = call float @llvm.AMDGPU.lrp(float %37, float %298, float 1.000000e+00)
+ %749 = call float @llvm.AMDGPU.lrp(float %37, float %299, float 1.000000e+00)
+ %750 = call float @llvm.AMDGPU.lrp(float %37, float %300, float 1.000000e+00)
+ %751 = call float @llvm.AMDGPU.lrp(float %38, float %747, float 1.000000e+00)
+ %752 = fmul float %748, %751
+ %753 = fmul float %749, %751
+ %754 = fmul float %750, %751
+ %755 = fmul float %742, %752
+ %756 = fmul float %744, %753
+ %757 = fmul float %746, %754
+ %758 = fmul float %temp12.0, %216
+ %759 = fmul float %temp13.0, %217
+ %760 = fadd float %759, %758
+ %761 = fmul float %temp14.0, %218
+ %762 = fadd float %760, %761
+ %763 = call float @fabs(float %762)
+ %764 = fmul float %763, %763
+ %765 = fmul float %764, %50
+ %766 = fadd float %765, %51
+ %767 = call float @llvm.AMDIL.clamp.(float %766, float 0.000000e+00, float 1.000000e+00)
+ %768 = fsub float -0.000000e+00, %767
+ %769 = fadd float 1.000000e+00, %768
+ %770 = fmul float %33, %769
+ %771 = fmul float %33, %769
+ %772 = fmul float %33, %769
+ %773 = fmul float %34, %769
+ %774 = call float @llvm.AMDGPU.lrp(float %770, float %31, float %755)
+ %775 = call float @llvm.AMDGPU.lrp(float %771, float %31, float %756)
+ %776 = call float @llvm.AMDGPU.lrp(float %772, float %31, float %757)
+ %777 = call float @llvm.AMDGPU.lrp(float %773, float %32, float %374)
+ %778 = fcmp uge float %774, 0x3E6FFFFE60000000
+ %779 = select i1 %778, float %774, float 0x3E6FFFFE60000000
+ %780 = fcmp uge float %775, 0x3E6FFFFE60000000
+ %781 = select i1 %780, float %775, float 0x3E6FFFFE60000000
+ %782 = fcmp uge float %776, 0x3E6FFFFE60000000
+ %783 = select i1 %782, float %776, float 0x3E6FFFFE60000000
+ %784 = fcmp uge float %779, 6.550400e+04
+ %785 = select i1 %784, float 6.550400e+04, float %779
+ %786 = fcmp uge float %781, 6.550400e+04
+ %787 = select i1 %786, float 6.550400e+04, float %781
+ %788 = fcmp uge float %783, 6.550400e+04
+ %789 = select i1 %788, float 6.550400e+04, float %783
+ %790 = fmul float %777, %52
+ %791 = fadd float %790, %53
+ %792 = call float @llvm.AMDIL.clamp.(float %791, float 0.000000e+00, float 1.000000e+00)
+ %793 = call i32 @llvm.SI.packf16(float %785, float %787)
+ %794 = bitcast i32 %793 to float
+ %795 = call i32 @llvm.SI.packf16(float %789, float %792)
+ %796 = bitcast i32 %795 to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %794, float %796, float %794, float %796)
+ ret void
+
+ELSE214: ; preds = %ELSE211
+ %797 = fcmp olt float %565, %81
+ %798 = sext i1 %797 to i32
+ %799 = bitcast i32 %798 to float
+ %800 = bitcast float %799 to i32
+ %801 = icmp ne i32 %800, 0
+ %.230 = select i1 %801, float %104, float %100
+ %.231 = select i1 %801, float %105, float %101
+ %.232 = select i1 %801, float %106, float %102
+ %.233 = select i1 %801, float %107, float %103
+ br label %ENDIF209
+}
+
+; Function Attrs: readnone
+declare float @llvm.AMDIL.clamp.(float, float, float) #2
+
+; Function Attrs: nounwind readnone
+declare <4 x float> @llvm.SI.sample.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32) #1
+
+; Function Attrs: readnone
+declare float @llvm.AMDGPU.lrp(float, float, float) #2
+
+; Function Attrs: nounwind readnone
+declare <4 x float> @llvm.SI.samplel.v4i32(<4 x i32>, <32 x i8>, <16 x i8>, i32) #1
+
+; Function Attrs: readnone
+declare float @llvm.AMDGPU.cndlt(float, float, float) #2
+
+; Function Attrs: readnone
+declare float @llvm.AMDIL.exp.(float) #2
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { readnone }
+attributes #3 = { nounwind readonly }
+attributes #4 = { readonly }
diff --git a/test/CodeGen/R600/sign_extend.ll b/test/CodeGen/R600/sign_extend.ll
index 1212cee9446e..e3bee507de67 100644
--- a/test/CodeGen/R600/sign_extend.ll
+++ b/test/CodeGen/R600/sign_extend.ll
@@ -1,12 +1,61 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; SI-LABEL: @s_sext_i1_to_i32:
+; SI: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %cmp = icmp eq i32 %a, %b
+ %sext = sext i1 %cmp to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
-; CHECK: V_ASHR
-define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+; SI-LABEL: @test:
+; SI: V_ASHR
+; SI: S_ENDPG
+define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
entry:
- %0 = mul i32 %a, %b
- %1 = add i32 %0, %c
- %2 = sext i32 %1 to i64
- store i64 %2, i64 addrspace(1)* %out
+ %mul = mul i32 %a, %b
+ %add = add i32 %mul, %c
+ %sext = sext i32 %add to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @s_sext_i1_to_i64:
+; SI: V_CNDMASK_B32_e64
+; SI: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %cmp = icmp eq i32 %a, %b
+ %sext = sext i1 %cmp to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @s_sext_i32_to_i64:
+; SI: S_ASHR_I32
+; SI: S_ENDPGM
+define void @s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a) nounwind {
+ %sext = sext i32 %a to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @v_sext_i32_to_i64:
+; SI: V_ASHR
+; SI: S_ENDPGM
+define void @v_sext_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %val = load i32 addrspace(1)* %in, align 4
+ %sext = sext i32 %val to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @s_sext_i16_to_i64:
+; SI: S_ENDPGM
+define void @s_sext_i16_to_i64(i64 addrspace(1)* %out, i16 %a) nounwind {
+ %sext = sext i16 %a to i64
+ store i64 %sext, i64 addrspace(1)* %out, align 8
ret void
}
diff --git a/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll b/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
new file mode 100644
index 000000000000..e6f8ce8ef0ee
--- /dev/null
+++ b/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
@@ -0,0 +1,38 @@
+; XFAIL: *
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI %s
+
+; 64-bit select was originally lowered with a build_pair, and this
+; could be simplified to 1 cndmask instead of 2, but that broken when
+; it started being implemented with a v2i32 build_vector and
+; bitcasting.
+define void @trunc_select_i64(i32 addrspace(1)* %out, i64 %a, i64 %b, i32 %c) {
+ %cmp = icmp eq i32 %c, 0
+ %select = select i1 %cmp, i64 %a, i64 %b
+ %trunc = trunc i64 %select to i32
+ store i32 %trunc, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FIXME: Fix truncating store for local memory
+; SI-LABEL: @trunc_load_alloca_i64:
+; SI: V_MOVRELS_B32
+; SI-NOT: V_MOVRELS_B32
+; SI: S_ENDPGM
+define void @trunc_load_alloca_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) {
+ %idx = add i32 %a, %b
+ %alloca = alloca i64, i32 4
+ %gep0 = getelementptr i64* %alloca, i64 0
+ %gep1 = getelementptr i64* %alloca, i64 1
+ %gep2 = getelementptr i64* %alloca, i64 2
+ %gep3 = getelementptr i64* %alloca, i64 3
+ store i64 24, i64* %gep0, align 8
+ store i64 9334, i64* %gep1, align 8
+ store i64 3935, i64* %gep2, align 8
+ store i64 9342, i64* %gep3, align 8
+ %gep = getelementptr i64* %alloca, i32 %idx
+ %load = load i64* %gep, align 8
+ %mask = and i64 %load, 4294967296
+ %add = add i64 %mask, -1
+ store i64 %add, i64 addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/sint_to_fp.ll b/test/CodeGen/R600/sint_to_fp.ll
index 9241799091c0..b27dfda8aea6 100644
--- a/test/CodeGen/R600/sint_to_fp.ll
+++ b/test/CodeGen/R600/sint_to_fp.ll
@@ -29,3 +29,25 @@ define void @sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspac
store <4 x float> %result, <4 x float> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @sint_to_fp_i1_f32:
+; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; SI-NEXT: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, -1.000000e+00, [[CMP]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @sint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = uitofp i1 %cmp to float
+ store float %fp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @sint_to_fp_i1_f32_load:
+; SI: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, -1.000000e+00
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @sint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 %in) {
+ %fp = sitofp i1 %in to float
+ store float %fp, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/sint_to_fp64.ll b/test/CodeGen/R600/sint_to_fp64.ll
index 5abc9d15965d..12b8cf57cf5f 100644
--- a/test/CodeGen/R600/sint_to_fp64.ll
+++ b/test/CodeGen/R600/sint_to_fp64.ll
@@ -1,9 +1,35 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; CHECK: @sint_to_fp64
-; CHECK: V_CVT_F64_I32_e32
+; SI: @sint_to_fp64
+; SI: V_CVT_F64_I32_e32
define void @sint_to_fp64(double addrspace(1)* %out, i32 %in) {
%result = sitofp i32 %in to double
store double %result, double addrspace(1)* %out
ret void
}
+
+; SI-LABEL: @sint_to_fp_i1_f64:
+; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
+; we should be able to fold the SGPRs into the V_CNDMASK instructions.
+; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = sitofp i1 %cmp to double
+ store double %fp, double addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @sint_to_fp_i1_f64_load:
+; SI: V_CNDMASK_B32_e64 [[IRESULT:v[0-9]]], 0, -1
+; SI-NEXT: V_CVT_F64_I32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
+; SI: S_ENDPGM
+define void @sint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
+ %fp = sitofp i1 %in to double
+ store double %fp, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/smrd.ll b/test/CodeGen/R600/smrd.ll
new file mode 100644
index 000000000000..dec61855b018
--- /dev/null
+++ b/test/CodeGen/R600/smrd.ll
@@ -0,0 +1,98 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -show-mc-encoding -verify-machineinstrs | FileCheck %s
+
+; SMRD load with an immediate offset.
+; CHECK-LABEL: @smrd0
+; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
+define void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32 addrspace(2)* %ptr, i64 1
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with the largest possible immediate offset.
+; CHECK-LABEL: @smrd1
+; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
+define void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32 addrspace(2)* %ptr, i64 255
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with an offset greater than the largest possible immediate.
+; CHECK-LABEL: @smrd2
+; CHECK: S_MOV_B32 s[[OFFSET:[0-9]]], 0x400
+; CHECK: S_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
+define void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32 addrspace(2)* %ptr, i64 256
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load with a 64-bit offset
+; CHECK-LABEL: @smrd3
+; CHECK-DAG: S_MOV_B32 s[[SHI:[0-9]+]], 4
+; CHECK-DAG: S_MOV_B32 s[[SLO:[0-9]+]], 0
+; FIXME: We don't need to copy these values to VGPRs
+; CHECK-DAG: V_MOV_B32_e32 v[[VHI:[0-9]+]], s[[SHI]]
+; CHECK-DAG: V_MOV_B32_e32 v[[VLO:[0-9]+]], s[[SLO]]
+; FIXME: We should be able to use S_LOAD_DWORD here
+; BUFFER_LOAD_DWORD v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}] + v[[[VLO]]:[[VHI]]] + 0x0
+
+define void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
+entry:
+ %0 = getelementptr i32 addrspace(2)* %ptr, i64 4294967296 ; 2 ^ 32
+ %1 = load i32 addrspace(2)* %0
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; SMRD load using the load.const intrinsic with an immediate offset
+; CHECK-LABEL: @smrd_load_const0
+; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
+define void @smrd_load_const0(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %21 = load <16 x i8> addrspace(2)* %20
+ %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ ret void
+}
+
+; SMRD load using the load.const intrinsic with an offset greater largest possible
+; immediate offset.
+; CHECK-LABEL: @smrd_load_const1
+; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
+define void @smrd_load_const1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %21 = load <16 x i8> addrspace(2)* %20
+ %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1020)
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ ret void
+}
+; SMRD load using the load.const intrinsic with the largetst possible
+; immediate offset.
+; CHECK-LABEL: @smrd_load_const2
+; CHECK: S_BUFFER_LOAD_DWORD s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
+define void @smrd_load_const2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %21 = load <16 x i8> addrspace(2)* %20
+ %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1024)
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/sra.ll b/test/CodeGen/R600/sra.ll
index fe9df104ae11..9eb3dc544074 100644
--- a/test/CodeGen/R600/sra.ll
+++ b/test/CodeGen/R600/sra.ll
@@ -52,3 +52,133 @@ entry:
ret void
}
+;EG-CHECK-LABEL: @ashr_i64_2
+;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+;EG-CHECK: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+;EG-CHECK: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: LSHR {{\*? *}}[[LOSMTMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], [[SHIFT]]
+;EG-CHECK-DAG: OR_INT {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], {{[[LOSMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+;EG-CHECK-DAG: ASHR {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+;EG-CHECK-DAG: ASHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], literal
+;EG-CHECK-DAG: ASHR {{\*? *}}[[HIBIG:T[0-9]+\.[XYZW]]], [[OPHI]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+
+;SI-CHECK-LABEL: @ashr_i64_2
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+define void @ashr_i64_2(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+entry:
+ %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %a = load i64 addrspace(1) * %in
+ %b = load i64 addrspace(1) * %b_ptr
+ %result = ashr i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK-LABEL: @ashr_v2i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK-LABEL: @ashr_v2i64
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %a = load <2 x i64> addrspace(1) * %in
+ %b = load <2 x i64> addrspace(1) * %b_ptr
+ %result = ashr <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK-LABEL: @ashr_v4i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHC]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHD]]
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHC]]
+;EG-CHECK-DAG: ASHR {{.*}}, [[SHD]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: ASHR {{.*}}, literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK-LABEL: @ashr_v4i64
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_ASHR_I64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @ashr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %a = load <4 x i64> addrspace(1) * %in
+ %b = load <4 x i64> addrspace(1) * %b_ptr
+ %result = ashr <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
diff --git a/test/CodeGen/R600/srem.ll b/test/CodeGen/R600/srem.ll
new file mode 100644
index 000000000000..65e33952d29e
--- /dev/null
+++ b/test/CodeGen/R600/srem.ll
@@ -0,0 +1,50 @@
+; RUN: llc -march=r600 -mcpu=SI < %s
+; RUN: llc -march=r600 -mcpu=redwood < %s
+
+define void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %num = load i32 addrspace(1) * %in
+ %den = load i32 addrspace(1) * %den_ptr
+ %result = srem i32 %num, %den
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @srem_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %num = load i32 addrspace(1) * %in
+ %result = srem i32 %num, 4
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %den_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %num = load <2 x i32> addrspace(1) * %in
+ %den = load <2 x i32> addrspace(1) * %den_ptr
+ %result = srem <2 x i32> %num, %den
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %num = load <2 x i32> addrspace(1) * %in
+ %result = srem <2 x i32> %num, <i32 4, i32 4>
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %den_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %num = load <4 x i32> addrspace(1) * %in
+ %den = load <4 x i32> addrspace(1) * %den_ptr
+ %result = srem <4 x i32> %num, %den
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %num = load <4 x i32> addrspace(1) * %in
+ %result = srem <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/srl.ll b/test/CodeGen/R600/srl.ll
index 76373552fb16..44ad73f073ed 100644
--- a/test/CodeGen/R600/srl.ll
+++ b/test/CodeGen/R600/srl.ll
@@ -39,3 +39,129 @@ define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+;EG-CHECK: @lshr_i64
+;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+;EG-CHECK: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+;EG-CHECK: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: LSHR {{\*? *}}[[LOSMTMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], [[SHIFT]]
+;EG-CHECK-DAG: OR_INT {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], {{[[LOSMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+;EG-CHECK-DAG: LSHR {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+;EG-CHECK-DAG: LSHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
+
+;SI-CHECK: @lshr_i64
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %a = load i64 addrspace(1) * %in
+ %b = load i64 addrspace(1) * %b_ptr
+ %result = lshr i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;EG-CHECK: @lshr_v2i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK: @lshr_v2i64
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %a = load <2 x i64> addrspace(1) * %in
+ %b = load <2 x i64> addrspace(1) * %b_ptr
+ %result = lshr <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+
+;EG-CHECK: @lshr_v4i64
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHC]]
+;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHD]]
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHL {{.*}}, 1
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHC]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHD]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHC]]
+;EG-CHECK-DAG: LSHR {{.*}}, [[SHD]]
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: OR_INT
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: LSHR
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+;EG-CHECK-DAG: CNDE_INT
+
+;SI-CHECK: @lshr_v4i64
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI-CHECK: V_LSHR_B64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+define void @lshr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %a = load <4 x i64> addrspace(1) * %in
+ %b = load <4 x i64> addrspace(1) * %b_ptr
+ %result = lshr <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/ssubo.ll b/test/CodeGen/R600/ssubo.ll
new file mode 100644
index 000000000000..b330276ae9e7
--- /dev/null
+++ b/test/CodeGen/R600/ssubo.ll
@@ -0,0 +1,64 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+
+declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+
+; FUNC-LABEL: @ssubo_i64_zext
+define void @ssubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %ssub, 0
+ %carry = extractvalue { i64, i1 } %ssub, 1
+ %ext = zext i1 %carry to i64
+ %add2 = add i64 %val, %ext
+ store i64 %add2, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_ssubo_i32
+define void @s_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+ %ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %ssub, 0
+ %carry = extractvalue { i32, i1 } %ssub, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_ssubo_i32
+define void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %ssub, 0
+ %carry = extractvalue { i32, i1 } %ssub, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @s_ssubo_i64
+; SI: S_SUB_I32
+; SI: S_SUBB_U32
+define void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+ %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %ssub, 0
+ %carry = extractvalue { i64, i1 } %ssub, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_ssubo_i64
+; SI: V_SUB_I32_e32
+; SI: V_SUBB_U32_e32
+define void @v_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %a = load i64 addrspace(1)* %aptr, align 4
+ %b = load i64 addrspace(1)* %bptr, align 4
+ %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %ssub, 0
+ %carry = extractvalue { i64, i1 } %ssub, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
diff --git a/test/CodeGen/R600/store-v3i32.ll b/test/CodeGen/R600/store-v3i32.ll
new file mode 100644
index 000000000000..33578035daa2
--- /dev/null
+++ b/test/CodeGen/R600/store-v3i32.ll
@@ -0,0 +1,12 @@
+; XFAIL: *
+; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+
+; 3 vectors have the same size and alignment as 4 vectors, so this
+; should be done in a single store.
+
+; SI-LABEL: @store_v3i32:
+; SI: BUFFER_STORE_DWORDX4
+define void @store_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a) nounwind {
+ store <3 x i32> %a, <3 x i32> addrspace(1)* %out, align 16
+ ret void
+}
diff --git a/test/CodeGen/R600/store-v3i64.ll b/test/CodeGen/R600/store-v3i64.ll
new file mode 100644
index 000000000000..58d28b567bd1
--- /dev/null
+++ b/test/CodeGen/R600/store-v3i64.ll
@@ -0,0 +1,28 @@
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI
+
+; SI-LABEL: @global_store_v3i64:
+; SI: BUFFER_STORE_DWORDX4
+; SI: BUFFER_STORE_DWORDX4
+define void @global_store_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
+ store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 32
+ ret void
+}
+
+; SI-LABEL: @global_store_v3i64_unaligned:
+define void @global_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
+ store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 1
+ ret void
+}
+
+; SI-LABEL: @local_store_v3i64:
+define void @local_store_v3i64(<3 x i64> addrspace(3)* %out, <3 x i64> %x) {
+ store <3 x i64> %x, <3 x i64> addrspace(3)* %out, align 32
+ ret void
+}
+
+; SI-LABEL: @local_store_v3i64_unaligned:
+define void @local_store_v3i64_unaligned(<3 x i64> addrspace(1)* %out, <3 x i64> %x) {
+ store <3 x i64> %x, <3 x i64> addrspace(1)* %out, align 1
+ ret void
+}
diff --git a/test/CodeGen/R600/store-vector-ptrs.ll b/test/CodeGen/R600/store-vector-ptrs.ll
index 01210ce1f944..41c5edc280d7 100644
--- a/test/CodeGen/R600/store-vector-ptrs.ll
+++ b/test/CodeGen/R600/store-vector-ptrs.ll
@@ -1,5 +1,6 @@
+; REQUIRES: asserts
; XFAIL: *
-; RUN: llc -march=r600 -mcpu=SI < %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s
define void @store_vector_ptrs(<4 x i32*>* %out, <4 x [1024 x i32]*> %array) nounwind {
%p = getelementptr <4 x [1024 x i32]*> %array, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
diff --git a/test/CodeGen/R600/store.ll b/test/CodeGen/R600/store.ll
index 5e51d5691747..dd275338d7c0 100644
--- a/test/CodeGen/R600/store.ll
+++ b/test/CodeGen/R600/store.ll
@@ -1,10 +1,18 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=CM-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=CM-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
;===------------------------------------------------------------------------===;
; Global Address Space
;===------------------------------------------------------------------------===;
+; FUNC-LABEL: @store_i1
+; EG-CHECK: MEM_RAT MSKOR
+; SI-CHECK: BUFFER_STORE_BYTE
+define void @store_i1(i1 addrspace(1)* %out) {
+entry:
+ store i1 true, i1 addrspace(1)* %out
+ ret void
+}
; i8 store
; EG-CHECK-LABEL: @store_i8
@@ -169,10 +177,39 @@ entry:
ret void
}
+; FUNC-LABEL: @store_i64_i8
+; EG-CHECK: MEM_RAT MSKOR
+; SI-CHECK: BUFFER_STORE_BYTE
+define void @store_i64_i8(i8 addrspace(1)* %out, i64 %in) {
+entry:
+ %0 = trunc i64 %in to i8
+ store i8 %0, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @store_i64_i16
+; EG-CHECK: MEM_RAT MSKOR
+; SI-CHECK: BUFFER_STORE_SHORT
+define void @store_i64_i16(i16 addrspace(1)* %out, i64 %in) {
+entry:
+ %0 = trunc i64 %in to i16
+ store i16 %0, i16 addrspace(1)* %out
+ ret void
+}
+
;===------------------------------------------------------------------------===;
; Local Address Space
;===------------------------------------------------------------------------===;
+; FUNC-LABEL: @store_local_i1
+; EG-CHECK: LDS_BYTE_WRITE
+; SI-CHECK: DS_WRITE_B8
+define void @store_local_i1(i1 addrspace(3)* %out) {
+entry:
+ store i1 true, i1 addrspace(3)* %out
+ ret void
+}
+
; EG-CHECK-LABEL: @store_local_i8
; EG-CHECK: LDS_BYTE_WRITE
; SI-CHECK-LABEL: @store_local_i8
@@ -226,8 +263,7 @@ entry:
; CM-CHECK: LDS_WRITE
; CM-CHECK: LDS_WRITE
; SI-CHECK-LABEL: @store_local_v2i32
-; SI-CHECK: DS_WRITE_B32
-; SI-CHECK: DS_WRITE_B32
+; SI-CHECK: DS_WRITE_B64
define void @store_local_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> %in) {
entry:
store <2 x i32> %in, <2 x i32> addrspace(3)* %out
@@ -255,6 +291,26 @@ entry:
ret void
}
+; FUNC-LABEL: @store_local_i64_i8
+; EG-CHECK: LDS_BYTE_WRITE
+; SI-CHECK: DS_WRITE_B8
+define void @store_local_i64_i8(i8 addrspace(3)* %out, i64 %in) {
+entry:
+ %0 = trunc i64 %in to i8
+ store i8 %0, i8 addrspace(3)* %out
+ ret void
+}
+
+; FUNC-LABEL: @store_local_i64_i16
+; EG-CHECK: LDS_SHORT_WRITE
+; SI-CHECK: DS_WRITE_B16
+define void @store_local_i64_i16(i16 addrspace(3)* %out, i64 %in) {
+entry:
+ %0 = trunc i64 %in to i16
+ store i16 %0, i16 addrspace(3)* %out
+ ret void
+}
+
; The stores in this function are combined by the optimizer to create a
; 64-bit store with 32-bit alignment. This is legal for SI and the legalizer
; should not try to split the 64-bit store back into 2 32-bit stores.
@@ -280,3 +336,29 @@ entry:
}
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+; When i128 was a legal type this program generated cannot select errors:
+
+; FUNC-LABEL: @i128-const-store
+; FIXME: We should be able to to this with one store instruction
+; EG-CHECK: STORE_RAW
+; EG-CHECK: STORE_RAW
+; EG-CHECK: STORE_RAW
+; EG-CHECK: STORE_RAW
+; CM-CHECK: STORE_DWORD
+; CM-CHECK: STORE_DWORD
+; CM-CHECK: STORE_DWORD
+; CM-CHECK: STORE_DWORD
+; SI: BUFFER_STORE_DWORDX2
+; SI: BUFFER_STORE_DWORDX2
+define void @i128-const-store(i32 addrspace(1)* %out) {
+entry:
+ store i32 1, i32 addrspace(1)* %out, align 4
+ %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %out, i64 1
+ store i32 1, i32 addrspace(1)* %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds i32 addrspace(1)* %out, i64 2
+ store i32 2, i32 addrspace(1)* %arrayidx4, align 4
+ %arrayidx6 = getelementptr inbounds i32 addrspace(1)* %out, i64 3
+ store i32 2, i32 addrspace(1)* %arrayidx6, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/sub.ll b/test/CodeGen/R600/sub.ll
index 5fdd2b820c1a..8e64148142d2 100644
--- a/test/CodeGen/R600/sub.ll
+++ b/test/CodeGen/R600/sub.ll
@@ -1,13 +1,14 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+;RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+;RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-;EG-CHECK: @test2
-;EG-CHECK: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+declare i32 @llvm.r600.read.tidig.x() readnone
-;SI-CHECK: @test2
-;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;FUNC-LABEL: @test2
+;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+;SI: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -18,17 +19,16 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK: @test4
-;EG-CHECK: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;FUNC-LABEL: @test4
+;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: @test4
-;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -38,3 +38,38 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @s_sub_i64:
+; SI: S_SUB_I32
+; SI: S_SUBB_U32
+
+; EG-DAG: SETGE_UINT
+; EG-DAG: CNDE_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+define void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind {
+ %result = sub i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @v_sub_i64:
+; SI: V_SUB_I32_e32
+; SI: V_SUBB_U32_e32
+
+; EG-DAG: SETGE_UINT
+; EG-DAG: CNDE_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT
+define void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr i64 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i64 addrspace(1)* %inB, i32 %tid
+ %a = load i64 addrspace(1)* %a_ptr
+ %b = load i64 addrspace(1)* %b_ptr
+ %result = sub i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/trunc-store-i1.ll b/test/CodeGen/R600/trunc-store-i1.ll
new file mode 100644
index 000000000000..a3975c8b8e4c
--- /dev/null
+++ b/test/CodeGen/R600/trunc-store-i1.ll
@@ -0,0 +1,32 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+
+
+; SI-LABEL: @global_truncstore_i32_to_i1
+; SI: S_LOAD_DWORD [[LOAD:s[0-9]+]],
+; SI: S_AND_B32 [[SREG:s[0-9]+]], [[LOAD]], 1
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], [[SREG]]
+; SI: BUFFER_STORE_BYTE [[VREG]],
+define void @global_truncstore_i32_to_i1(i1 addrspace(1)* %out, i32 %val) nounwind {
+ %trunc = trunc i32 %val to i1
+ store i1 %trunc, i1 addrspace(1)* %out, align 1
+ ret void
+}
+
+; SI-LABEL: @global_truncstore_i64_to_i1
+; SI: BUFFER_STORE_BYTE
+define void @global_truncstore_i64_to_i1(i1 addrspace(1)* %out, i64 %val) nounwind {
+ %trunc = trunc i64 %val to i1
+ store i1 %trunc, i1 addrspace(1)* %out, align 1
+ ret void
+}
+
+; SI-LABEL: @global_truncstore_i16_to_i1
+; SI: S_LOAD_DWORD [[LOAD:s[0-9]+]],
+; SI: S_AND_B32 [[SREG:s[0-9]+]], [[LOAD]], 1
+; SI: V_MOV_B32_e32 [[VREG:v[0-9]+]], [[SREG]]
+; SI: BUFFER_STORE_BYTE [[VREG]],
+define void @global_truncstore_i16_to_i1(i1 addrspace(1)* %out, i16 %val) nounwind {
+ %trunc = trunc i16 %val to i1
+ store i1 %trunc, i1 addrspace(1)* %out, align 1
+ ret void
+}
diff --git a/test/CodeGen/R600/trunc.ll b/test/CodeGen/R600/trunc.ll
index 6bbd7f7b510e..31cdfcd1a884 100644
--- a/test/CodeGen/R600/trunc.ll
+++ b/test/CodeGen/R600/trunc.ll
@@ -3,7 +3,7 @@
define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
; SI-LABEL: @trunc_i64_to_i32_store
-; SI: S_LOAD_DWORD s0, s[0:1], 11
+; SI: S_LOAD_DWORD s0, s[0:1], 0xb
; SI: V_MOV_B32_e32 v0, s0
; SI: BUFFER_STORE_DWORD v0
@@ -16,16 +16,31 @@ define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
ret void
}
+; SI-LABEL: @trunc_load_shl_i64:
+; SI-DAG: S_LOAD_DWORDX2
+; SI-DAG: S_LOAD_DWORD [[SREG:s[0-9]+]],
+; SI: S_LSHL_B32 [[SHL:s[0-9]+]], [[SREG]], 2
+; SI: V_MOV_B32_e32 [[VSHL:v[0-9]+]], [[SHL]]
+; SI: BUFFER_STORE_DWORD [[VSHL]],
+define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) {
+ %b = shl i64 %a, 2
+ %result = trunc i64 %b to i32
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
; SI-LABEL: @trunc_shl_i64:
-; SI: S_LOAD_DWORDX2
-; SI: S_LOAD_DWORDX2 [[SREG:s\[[0-9]+:[0-9]+\]]]
-; SI: S_LSHL_B64 s{{\[}}[[LO_SREG:[0-9]+]]:{{[0-9]+\]}}, [[SREG]], 2
-; SI: MOV_B32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG]]
+; SI: S_LOAD_DWORDX2 s{{\[}}[[LO_SREG:[0-9]+]]:{{[0-9]+\]}},
+; SI: S_ADD_I32 s[[LO_ADD:[0-9]+]], s[[LO_SREG]],
+; SI: S_LSHL_B64 s{{\[}}[[LO_SREG2:[0-9]+]]:{{[0-9]+\]}}, s{{\[}}[[LO_ADD]]:{{[0-9]+\]}}, 2
+; SI: V_MOV_B32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG2]]
; SI: BUFFER_STORE_DWORD v[[LO_VREG]],
-define void @trunc_shl_i64(i32 addrspace(1)* %out, i64 %a) {
- %b = shl i64 %a, 2
+define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) {
+ %aa = add i64 %a, 234 ; Prevent shrinking store.
+ %b = shl i64 %aa, 2
%result = trunc i64 %b to i32
store i32 %result, i32 addrspace(1)* %out, align 4
+ store i64 %b, i64 addrspace(1)* %out2, align 8 ; Prevent reducing ops to 32-bits
ret void
}
diff --git a/test/CodeGen/R600/uaddo.ll b/test/CodeGen/R600/uaddo.ll
new file mode 100644
index 000000000000..a80e502eef2a
--- /dev/null
+++ b/test/CodeGen/R600/uaddo.ll
@@ -0,0 +1,69 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+
+declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+
+; FUNC-LABEL: @uaddo_i64_zext
+; SI: ADD
+; SI: ADDC
+; SI: ADDC
+define void @uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %uadd, 0
+ %carry = extractvalue { i64, i1 } %uadd, 1
+ %ext = zext i1 %carry to i64
+ %add2 = add i64 %val, %ext
+ store i64 %add2, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_uaddo_i32
+; SI: S_ADD_I32
+define void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+ %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %uadd, 0
+ %carry = extractvalue { i32, i1 } %uadd, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_uaddo_i32
+; SI: V_ADD_I32
+define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %uadd, 0
+ %carry = extractvalue { i32, i1 } %uadd, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @s_uaddo_i64
+; SI: S_ADD_I32
+; SI: S_ADDC_U32
+define void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+ %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %uadd, 0
+ %carry = extractvalue { i64, i1 } %uadd, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_uaddo_i64
+; SI: V_ADD_I32
+; SI: V_ADDC_U32
+define void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %a = load i64 addrspace(1)* %aptr, align 4
+ %b = load i64 addrspace(1)* %bptr, align 4
+ %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %uadd, 0
+ %carry = extractvalue { i64, i1 } %uadd, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
diff --git a/test/CodeGen/R600/udivrem.ll b/test/CodeGen/R600/udivrem.ll
new file mode 100644
index 000000000000..5f5753adca3f
--- /dev/null
+++ b/test/CodeGen/R600/udivrem.ll
@@ -0,0 +1,358 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG --check-prefix=FUNC %s
+
+; FUNC-LABEL: @test_udivrem
+; EG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG: CNDE_INT
+; EG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG: CNDE_INT
+; EG: MULHI
+; EG: MULLO_INT
+; EG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+
+; SI: V_RCP_IFLAG_F32_e32 [[RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[RCP_HI:v[0-9]+]], [[RCP]]
+; SI-DAG: V_MUL_LO_I32 [[RCP_LO:v[0-9]+]], [[RCP]]
+; SI-DAG: V_SUB_I32_e32 [[NEG_RCP_LO:v[0-9]+]], 0, [[RCP_LO]]
+; SI: V_CNDMASK_B32_e64
+; SI: V_MUL_HI_U32 [[E:v[0-9]+]], {{v[0-9]+}}, [[RCP]]
+; SI-DAG: V_ADD_I32_e32 [[RCP_A_E:v[0-9]+]], [[E]], [[RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[RCP_S_E:v[0-9]+]], [[E]], [[RCP]]
+; SI: V_CNDMASK_B32_e64
+; SI: V_MUL_HI_U32 [[Quotient:v[0-9]+]]
+; SI: V_MUL_LO_I32 [[Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI: V_AND_B32_e32 [[Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[Quotient_A_One:v[0-9]+]], 1, [[Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @test_udivrem(i32 addrspace(1)* %out, i32 %x, i32 %y) {
+ %result0 = udiv i32 %x, %y
+ store i32 %result0, i32 addrspace(1)* %out
+ %result1 = urem i32 %x, %y
+ store i32 %result1, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @test_udivrem_v2
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[FIRST_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[FIRST_RCP_HI:v[0-9]+]], [[FIRST_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[FIRST_RCP_LO:v[0-9]+]], [[FIRST_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[FIRST_NEG_RCP_LO:v[0-9]+]], 0, [[FIRST_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FIRST_E:v[0-9]+]], {{v[0-9]+}}, [[FIRST_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[FIRST_RCP_A_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_RCP_S_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FIRST_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[FIRST_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[FIRST_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FIRST_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[FIRST_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[FIRST_Quotient_A_One:v[0-9]+]], {{.*}}, [[FIRST_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[FIRST_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[SECOND_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[SECOND_RCP_HI:v[0-9]+]], [[SECOND_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[SECOND_RCP_LO:v[0-9]+]], [[SECOND_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[SECOND_NEG_RCP_LO:v[0-9]+]], 0, [[SECOND_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[SECOND_E:v[0-9]+]], {{v[0-9]+}}, [[SECOND_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[SECOND_RCP_A_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_RCP_S_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[SECOND_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[SECOND_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[SECOND_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[SECOND_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[SECOND_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[SECOND_Quotient_A_One:v[0-9]+]], {{.*}}, [[SECOND_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[SECOND_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) {
+ %result0 = udiv <2 x i32> %x, %y
+ store <2 x i32> %result0, <2 x i32> addrspace(1)* %out
+ %result1 = urem <2 x i32> %x, %y
+ store <2 x i32> %result1, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+
+; FUNC-LABEL: @test_udivrem_v4
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: RECIP_UINT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: MULHI
+; EG-DAG: MULLO_INT
+; EG-DAG: SUB_INT
+; EG-DAG: SETGE_UINT
+; EG-DAG: SETGE_UINT
+; EG-DAG: AND_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: ADD_INT
+; EG-DAG: SUB_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[FIRST_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[FIRST_RCP_HI:v[0-9]+]], [[FIRST_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[FIRST_RCP_LO:v[0-9]+]], [[FIRST_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[FIRST_NEG_RCP_LO:v[0-9]+]], 0, [[FIRST_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FIRST_E:v[0-9]+]], {{v[0-9]+}}, [[FIRST_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[FIRST_RCP_A_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_RCP_S_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FIRST_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[FIRST_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[FIRST_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FIRST_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[FIRST_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[FIRST_Quotient_A_One:v[0-9]+]], {{.*}}, [[FIRST_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[FIRST_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[FIRST_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[SECOND_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[SECOND_RCP_HI:v[0-9]+]], [[SECOND_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[SECOND_RCP_LO:v[0-9]+]], [[SECOND_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[SECOND_NEG_RCP_LO:v[0-9]+]], 0, [[SECOND_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[SECOND_E:v[0-9]+]], {{v[0-9]+}}, [[SECOND_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[SECOND_RCP_A_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_RCP_S_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[SECOND_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[SECOND_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[SECOND_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[SECOND_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[SECOND_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[SECOND_Quotient_A_One:v[0-9]+]], {{.*}}, [[SECOND_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[SECOND_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[SECOND_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[THIRD_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[THIRD_RCP_HI:v[0-9]+]], [[THIRD_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[THIRD_RCP_LO:v[0-9]+]], [[THIRD_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[THIRD_NEG_RCP_LO:v[0-9]+]], 0, [[THIRD_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[THIRD_E:v[0-9]+]], {{v[0-9]+}}, [[THIRD_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[THIRD_RCP_A_E:v[0-9]+]], [[THIRD_E]], [[THIRD_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[THIRD_RCP_S_E:v[0-9]+]], [[THIRD_E]], [[THIRD_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[THIRD_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[THIRD_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[THIRD_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[THIRD_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[THIRD_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[THIRD_Quotient_A_One:v[0-9]+]], {{.*}}, [[THIRD_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[THIRD_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[THIRD_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[THIRD_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_RCP_IFLAG_F32_e32 [[FOURTH_RCP:v[0-9]+]]
+; SI-DAG: V_MUL_HI_U32 [[FOURTH_RCP_HI:v[0-9]+]], [[FOURTH_RCP]]
+; SI-DAG: V_MUL_LO_I32 [[FOURTH_RCP_LO:v[0-9]+]], [[FOURTH_RCP]]
+; SI-DAG: V_SUB_I32_e32 [[FOURTH_NEG_RCP_LO:v[0-9]+]], 0, [[FOURTH_RCP_LO]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FOURTH_E:v[0-9]+]], {{v[0-9]+}}, [[FOURTH_RCP]]
+; SI-DAG: V_ADD_I32_e32 [[FOURTH_RCP_A_E:v[0-9]+]], [[FOURTH_E]], [[FOURTH_RCP]]
+; SI-DAG: V_SUBREV_I32_e32 [[FOURTH_RCP_S_E:v[0-9]+]], [[FOURTH_E]], [[FOURTH_RCP]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_MUL_HI_U32 [[FOURTH_Quotient:v[0-9]+]]
+; SI-DAG: V_MUL_LO_I32 [[FOURTH_Num_S_Remainder:v[0-9]+]]
+; SI-DAG: V_SUB_I32_e32 [[FOURTH_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FOURTH_Num_S_Remainder]]
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_AND_B32_e32 [[FOURTH_Tmp1:v[0-9]+]]
+; SI-DAG: V_ADD_I32_e32 [[FOURTH_Quotient_A_One:v[0-9]+]], {{.*}}, [[FOURTH_Quotient]]
+; SI-DAG: V_SUBREV_I32_e32 [[FOURTH_Quotient_S_One:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_ADD_I32_e32 [[FOURTH_Remainder_A_Den:v[0-9]+]],
+; SI-DAG: V_SUBREV_I32_e32 [[FOURTH_Remainder_S_Den:v[0-9]+]],
+; SI-DAG: V_CNDMASK_B32_e64
+; SI-DAG: V_CNDMASK_B32_e64
+; SI: S_ENDPGM
+define void @test_udivrem_v4(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
+ %result0 = udiv <4 x i32> %x, %y
+ store <4 x i32> %result0, <4 x i32> addrspace(1)* %out
+ %result1 = urem <4 x i32> %x, %y
+ store <4 x i32> %result1, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/udivrem64.ll b/test/CodeGen/R600/udivrem64.ll
new file mode 100644
index 000000000000..a71315a12d80
--- /dev/null
+++ b/test/CodeGen/R600/udivrem64.ll
@@ -0,0 +1,82 @@
+;XUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG --check-prefix=FUNC %s
+
+;FUNC-LABEL: @test_udiv
+;EG: RECIP_UINT
+;EG: LSHL {{.*}}, 1,
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;SI: S_ENDPGM
+define void @test_udiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %result = udiv i64 %x, %y
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: @test_urem
+;EG: RECIP_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: AND_INT {{.*}}, 1,
+;SI: S_ENDPGM
+define void @test_urem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %result = urem i64 %x, %y
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/uint_to_fp.f64.ll b/test/CodeGen/R600/uint_to_fp.f64.ll
new file mode 100644
index 000000000000..9a41796a06bf
--- /dev/null
+++ b/test/CodeGen/R600/uint_to_fp.f64.ll
@@ -0,0 +1,36 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; SI-LABEL: @uint_to_fp_f64_i32
+; SI: V_CVT_F64_U32_e32
+; SI: S_ENDPGM
+define void @uint_to_fp_f64_i32(double addrspace(1)* %out, i32 %in) {
+ %cast = uitofp i32 %in to double
+ store double %cast, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: @uint_to_fp_i1_f64:
+; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
+; we should be able to fold the SGPRs into the V_CNDMASK instructions.
+; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; SI: BUFFER_STORE_DWORDX2
+; SI: S_ENDPGM
+define void @uint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = uitofp i1 %cmp to double
+ store double %fp, double addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @uint_to_fp_i1_f64_load:
+; SI: V_CNDMASK_B32_e64 [[IRESULT:v[0-9]]], 0, 1
+; SI-NEXT: V_CVT_F64_U32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
+; SI: BUFFER_STORE_DWORDX2 [[RESULT]]
+; SI: S_ENDPGM
+define void @uint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
+ %fp = uitofp i1 %in to double
+ store double %fp, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/uint_to_fp.ll b/test/CodeGen/R600/uint_to_fp.ll
index a5ac3555afde..8f5d42d42c6b 100644
--- a/test/CodeGen/R600/uint_to_fp.ll
+++ b/test/CodeGen/R600/uint_to_fp.ll
@@ -1,28 +1,30 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; R600-CHECK-LABEL: @uint_to_fp_v2i32
-; R600-CHECK-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
-; R600-CHECK-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
-; SI-CHECK-LABEL: @uint_to_fp_v2i32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
+; FUNC-LABEL: @uint_to_fp_v2i32
+; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
+; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
+
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: S_ENDPGM
define void @uint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) {
%result = uitofp <2 x i32> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
-; R600-CHECK-LABEL: @uint_to_fp_v4i32
-; R600-CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-CHECK-LABEL: @uint_to_fp_v4i32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
+; FUNC-LABEL: @uint_to_fp_v4i32
+; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: S_ENDPGM
define void @uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%value = load <4 x i32> addrspace(1) * %in
%result = uitofp <4 x i32> %value to <4 x float>
@@ -30,17 +32,39 @@ define void @uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspac
ret void
}
-; R600-CHECK-LABEL: @uint_to_fp_i64_f32
-; R600-CHECK: UINT_TO_FLT
-; R600-CHECK: UINT_TO_FLT
-; R600-CHECK: MULADD_IEEE
-; SI-CHECK-LABEL: @uint_to_fp_i64_f32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_CVT_F32_U32_e32
-; SI-CHECK: V_MAD_F32
+; FUNC-LABEL: @uint_to_fp_i64_f32
+; R600: UINT_TO_FLT
+; R600: UINT_TO_FLT
+; R600: MULADD_IEEE
+; SI: V_CVT_F32_U32_e32
+; SI: V_CVT_F32_U32_e32
+; SI: V_MAD_F32
+; SI: S_ENDPGM
define void @uint_to_fp_i64_f32(float addrspace(1)* %out, i64 %in) {
entry:
%0 = uitofp i64 %in to float
store float %0, float addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: @uint_to_fp_i1_f32:
+; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; SI-NEXT: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, 1.000000e+00, [[CMP]]
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @uint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = uitofp i1 %cmp to float
+ store float %fp, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @uint_to_fp_i1_f32_load:
+; SI: V_CNDMASK_B32_e64 [[RESULT:v[0-9]+]], 0, 1.000000e+00
+; SI: BUFFER_STORE_DWORD [[RESULT]],
+; SI: S_ENDPGM
+define void @uint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 %in) {
+ %fp = uitofp i1 %in to float
+ store float %fp, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/unaligned-load-store.ll b/test/CodeGen/R600/unaligned-load-store.ll
index 2824ff8a88c5..4df69d1e5f16 100644
--- a/test/CodeGen/R600/unaligned-load-store.ll
+++ b/test/CodeGen/R600/unaligned-load-store.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
; SI-LABEL: @unaligned_load_store_i32:
; DS_READ_U32 {{v[0-9]+}}, 0, [[REG]]
diff --git a/test/CodeGen/R600/unhandled-loop-condition-assertion.ll b/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
new file mode 100644
index 000000000000..e4129c511230
--- /dev/null
+++ b/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
@@ -0,0 +1,114 @@
+; REQUIRES: asserts
+; XFAIL: *
+; RUN: llc -O0 -verify-machineinstrs -asm-verbose=0 -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=COMMON %s
+; RUN: llc -O0 -verify-machineinstrs -asm-verbose=0 -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=COMMON %s
+
+; SI hits an assertion at -O0, evergreen hits a not implemented unreachable.
+
+; COMMON-LABEL: @branch_true:
+define void @branch_true(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+entry:
+ br i1 true, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %add.ptr.sum = shl i32 %main_stride, 1
+ %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride
+ %add.ptr4.sum = shl i32 %main_stride, 2
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
+ %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
+ %1 = load i32 addrspace(1)* %0, align 4
+ %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
+ %3 = load i32 addrspace(1)* %2, align 4
+ %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
+ %5 = load i32 addrspace(1)* %4, align 4
+ %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
+ %7 = load i32 addrspace(1)* %6, align 4
+ %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
+ %9 = load i32 addrspace(1)* %8, align 4
+ %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ br i1 undef, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; COMMON-LABEL: @branch_false:
+; SI: .text
+; SI-NEXT: S_ENDPGM
+define void @branch_false(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+entry:
+ br i1 false, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %add.ptr.sum = shl i32 %main_stride, 1
+ %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride
+ %add.ptr4.sum = shl i32 %main_stride, 2
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
+ %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
+ %1 = load i32 addrspace(1)* %0, align 4
+ %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
+ %3 = load i32 addrspace(1)* %2, align 4
+ %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
+ %5 = load i32 addrspace(1)* %4, align 4
+ %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
+ %7 = load i32 addrspace(1)* %6, align 4
+ %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
+ %9 = load i32 addrspace(1)* %8, align 4
+ %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ br i1 undef, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; COMMON-LABEL: @branch_undef:
+; SI: .text
+; SI-NEXT: S_ENDPGM
+define void @branch_undef(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {
+entry:
+ br i1 undef, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %entry
+ %add.ptr.sum = shl i32 %main_stride, 1
+ %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride
+ %add.ptr4.sum = shl i32 %main_stride, 2
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
+ %0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
+ %1 = load i32 addrspace(1)* %0, align 4
+ %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
+ %3 = load i32 addrspace(1)* %2, align 4
+ %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
+ %5 = load i32 addrspace(1)* %4, align 4
+ %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
+ %7 = load i32 addrspace(1)* %6, align 4
+ %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
+ %9 = load i32 addrspace(1)* %8, align 4
+ %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ br i1 undef, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/R600/unroll.ll b/test/CodeGen/R600/unroll.ll
new file mode 100644
index 000000000000..e0035eae71cf
--- /dev/null
+++ b/test/CodeGen/R600/unroll.ll
@@ -0,0 +1,37 @@
+; RUN: opt -loop-unroll -simplifycfg -sroa %s -S -o - | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
+target triple = "r600--"
+
+; This test contains a simple loop that initializes an array declared in
+; private memory. We want to make sure these kinds of loops are always
+; unrolled, because private memory is slow.
+
+; CHECK-LABEL: @test
+; CHECK-NOT: alloca
+; CHECK: store i32 5, i32 addrspace(1)* %out
+define void @test(i32 addrspace(1)* %out) {
+entry:
+ %0 = alloca [32 x i32]
+ br label %loop.header
+
+loop.header:
+ %counter = phi i32 [0, %entry], [%inc, %loop.inc]
+ br label %loop.body
+
+loop.body:
+ %ptr = getelementptr [32 x i32]* %0, i32 0, i32 %counter
+ store i32 %counter, i32* %ptr
+ br label %loop.inc
+
+loop.inc:
+ %inc = add i32 %counter, 1
+ %1 = icmp sge i32 %counter, 32
+ br i1 %1, label %exit, label %loop.header
+
+exit:
+ %2 = getelementptr [32 x i32]* %0, i32 0, i32 5
+ %3 = load i32* %2
+ store i32 %3, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/usubo.ll b/test/CodeGen/R600/usubo.ll
new file mode 100644
index 000000000000..d57a2c7f773e
--- /dev/null
+++ b/test/CodeGen/R600/usubo.ll
@@ -0,0 +1,66 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
+
+declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+
+; FUNC-LABEL: @usubo_i64_zext
+define void @usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %usub, 0
+ %carry = extractvalue { i64, i1 } %usub, 1
+ %ext = zext i1 %carry to i64
+ %add2 = add i64 %val, %ext
+ store i64 %add2, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @s_usubo_i32
+; SI: S_SUB_I32
+define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind {
+ %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %usub, 0
+ %carry = extractvalue { i32, i1 } %usub, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_usubo_i32
+; SI: V_SUBREV_I32_e32
+define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %b = load i32 addrspace(1)* %bptr, align 4
+ %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
+ %val = extractvalue { i32, i1 } %usub, 0
+ %carry = extractvalue { i32, i1 } %usub, 1
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @s_usubo_i64
+; SI: S_SUB_I32
+; SI: S_SUBB_U32
+define void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %usub, 0
+ %carry = extractvalue { i64, i1 } %usub, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
+
+; FUNC-LABEL: @v_usubo_i64
+; SI: V_SUB_I32
+; SI: V_SUBB_U32
+define void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %a = load i64 addrspace(1)* %aptr, align 4
+ %b = load i64 addrspace(1)* %bptr, align 4
+ %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
+ %val = extractvalue { i64, i1 } %usub, 0
+ %carry = extractvalue { i64, i1 } %usub, 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ store i1 %carry, i1 addrspace(1)* %carryout
+ ret void
+}
diff --git a/test/CodeGen/R600/v1i64-kernel-arg.ll b/test/CodeGen/R600/v1i64-kernel-arg.ll
new file mode 100644
index 000000000000..2aa1221b366e
--- /dev/null
+++ b/test/CodeGen/R600/v1i64-kernel-arg.ll
@@ -0,0 +1,17 @@
+; REQUIRES: asserts
+; XFAIL: *
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s
+
+; CHECK-LABEL: @kernel_arg_i64
+define void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
+ store i64 %a, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; i64 arg works, v1i64 arg does not.
+; CHECK-LABEL: @kernel_arg_v1i64
+define void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
+ store <1 x i64> %a, <1 x i64> addrspace(1)* %out, align 8
+ ret void
+}
+
diff --git a/test/CodeGen/R600/v_cndmask.ll b/test/CodeGen/R600/v_cndmask.ll
new file mode 100644
index 000000000000..84087ee78d59
--- /dev/null
+++ b/test/CodeGen/R600/v_cndmask.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI %s
+
+; SI: @v_cnd_nan
+; SI: V_CNDMASK_B32_e64 v{{[0-9]}},
+; SI-DAG: v{{[0-9]}}
+; All nan values are converted to 0xffffffff
+; SI-DAG: -1
+define void @v_cnd_nan(float addrspace(1)* %out, i32 %c, float %f) {
+entry:
+ %0 = icmp ne i32 %c, 0
+ %1 = select i1 %0, float 0xFFFFFFFFE0000000, float %f
+ store float %1, float addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/valu-i1.ll b/test/CodeGen/R600/valu-i1.ll
new file mode 100644
index 000000000000..5d5e3ff63a47
--- /dev/null
+++ b/test/CodeGen/R600/valu-i1.ll
@@ -0,0 +1,39 @@
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI %s
+
+; Make sure the i1 values created by the cfg structurizer pass are
+; moved using VALU instructions
+; SI-NOT: S_MOV_B64 s[{{[0-9]:[0-9]}}], -1
+; SI: V_MOV_B32_e32 v{{[0-9]}}, -1
+define void @test_if(i32 %a, i32 %b, i32 addrspace(1)* %src, i32 addrspace(1)* %dst) {
+entry:
+ switch i32 %a, label %default [
+ i32 0, label %case0
+ i32 1, label %case1
+ ]
+
+case0:
+ %arrayidx1 = getelementptr i32 addrspace(1)* %dst, i32 %b
+ store i32 0, i32 addrspace(1)* %arrayidx1, align 4
+ br label %end
+
+case1:
+ %arrayidx5 = getelementptr i32 addrspace(1)* %dst, i32 %b
+ store i32 1, i32 addrspace(1)* %arrayidx5, align 4
+ br label %end
+
+default:
+ %cmp8 = icmp eq i32 %a, 2
+ %arrayidx10 = getelementptr i32 addrspace(1)* %dst, i32 %b
+ br i1 %cmp8, label %if, label %else
+
+if:
+ store i32 2, i32 addrspace(1)* %arrayidx10, align 4
+ br label %end
+
+else:
+ store i32 3, i32 addrspace(1)* %arrayidx10, align 4
+ br label %end
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/R600/vector-alloca.ll b/test/CodeGen/R600/vector-alloca.ll
new file mode 100644
index 000000000000..ec1995f68089
--- /dev/null
+++ b/test/CodeGen/R600/vector-alloca.ll
@@ -0,0 +1,75 @@
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=verde -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=verde -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @vector_read
+; EG: MOV
+; EG: MOV
+; EG: MOV
+; EG: MOV
+; EG: MOVA_INT
+define void @vector_read(i32 addrspace(1)* %out, i32 %index) {
+entry:
+ %0 = alloca [4 x i32]
+ %x = getelementptr [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ store i32 0, i32* %x
+ store i32 1, i32* %y
+ store i32 2, i32* %z
+ store i32 3, i32* %w
+ %1 = getelementptr [4 x i32]* %0, i32 0, i32 %index
+ %2 = load i32* %1
+ store i32 %2, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @vector_write
+; EG: MOV
+; EG: MOV
+; EG: MOV
+; EG: MOV
+; EG: MOVA_INT
+; EG: MOVA_INT
+define void @vector_write(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
+entry:
+ %0 = alloca [4 x i32]
+ %x = getelementptr [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ store i32 0, i32* %x
+ store i32 0, i32* %y
+ store i32 0, i32* %z
+ store i32 0, i32* %w
+ %1 = getelementptr [4 x i32]* %0, i32 0, i32 %w_index
+ store i32 1, i32* %1
+ %2 = getelementptr [4 x i32]* %0, i32 0, i32 %r_index
+ %3 = load i32* %2
+ store i32 %3, i32 addrspace(1)* %out
+ ret void
+}
+
+; This test should be optimize to:
+; store i32 0, i32 addrspace(1)* %out
+; FUNC-LABEL: @bitcast_gep
+; EG: STORE_RAW
+define void @bitcast_gep(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
+entry:
+ %0 = alloca [4 x i32]
+ %x = getelementptr [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ store i32 0, i32* %x
+ store i32 0, i32* %y
+ store i32 0, i32* %z
+ store i32 0, i32* %w
+ %1 = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %2 = bitcast i32* %1 to [4 x i32]*
+ %3 = getelementptr [4 x i32]* %2, i32 0, i32 0
+ %4 = load i32* %3
+ store i32 %4, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/vop-shrink.ll b/test/CodeGen/R600/vop-shrink.ll
new file mode 100644
index 000000000000..54e588d80842
--- /dev/null
+++ b/test/CodeGen/R600/vop-shrink.ll
@@ -0,0 +1,41 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; XXX: This testis for a bug in the SIShrinkInstruction pass and it will be
+; relevant once we are selecting 64-bit instructions. We are
+; currently selecting mostly 32-bit instruction, so the
+; SIShrinkInstructions pass isn't doing much.
+; XFAIL: *
+
+; Test that we correctly commute a sub instruction
+; FUNC-LABEL: @sub_rev
+; SI-NOT: V_SUB_I32_e32 v{{[0-9]+}}, s
+; SI: V_SUBREV_I32_e32 v{{[0-9]+}}, s
+
+; ModuleID = 'vop-shrink.ll'
+
+define void @sub_rev(i32 addrspace(1)* %out, <4 x i32> %sgpr, i32 %cond) {
+entry:
+ %vgpr = call i32 @llvm.r600.read.tidig.x() #1
+ %tmp = icmp eq i32 %cond, 0
+ br i1 %tmp, label %if, label %else
+
+if: ; preds = %entry
+ %tmp1 = getelementptr i32 addrspace(1)* %out, i32 1
+ %tmp2 = extractelement <4 x i32> %sgpr, i32 1
+ store i32 %tmp2, i32 addrspace(1)* %out
+ br label %endif
+
+else: ; preds = %entry
+ %tmp3 = extractelement <4 x i32> %sgpr, i32 2
+ %tmp4 = sub i32 %vgpr, %tmp3
+ store i32 %tmp4, i32 addrspace(1)* %out
+ br label %endif
+
+endif: ; preds = %else, %if
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.r600.read.tidig.x() #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { readnone }
diff --git a/test/CodeGen/R600/vtx-schedule.ll b/test/CodeGen/R600/vtx-schedule.ll
index 97d37ed84ce2..ce852c5efeff 100644
--- a/test/CodeGen/R600/vtx-schedule.ll
+++ b/test/CodeGen/R600/vtx-schedule.ll
@@ -6,9 +6,9 @@
; CHECK: @test
; CHECK: Fetch clause
-; CHECK_VTX_READ_32 [[IN0:T[0-9]+\.X]], [[IN0]], 0
+; CHECK: VTX_READ_32 [[IN0:T[0-9]+\.X]], [[IN0]], 0
; CHECK: Fetch clause
-; CHECK_VTX_READ_32 [[IN1:T[0-9]+\.X]], [[IN1]], 0
+; CHECK: VTX_READ_32 [[IN1:T[0-9]+\.X]], [[IN1]], 0
define void @test(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* addrspace(1)* nocapture %in0) {
entry:
%0 = load i32 addrspace(1)* addrspace(1)* %in0
diff --git a/test/CodeGen/R600/work-item-intrinsics.ll b/test/CodeGen/R600/work-item-intrinsics.ll
index 9618d7fb1970..01236590742a 100644
--- a/test/CodeGen/R600/work-item-intrinsics.ll
+++ b/test/CodeGen/R600/work-item-intrinsics.ll
@@ -19,7 +19,7 @@ entry:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; R600-CHECK: MOV [[VAL]], KC0[0].Y
; SI-CHECK: @ngroups_y
-; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 1
+; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0x1
; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @ngroups_y (i32 addrspace(1)* %out) {
@@ -33,7 +33,7 @@ entry:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; R600-CHECK: MOV [[VAL]], KC0[0].Z
; SI-CHECK: @ngroups_z
-; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 2
+; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0x2
; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @ngroups_z (i32 addrspace(1)* %out) {
@@ -47,7 +47,7 @@ entry:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; R600-CHECK: MOV [[VAL]], KC0[0].W
; SI-CHECK: @global_size_x
-; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 3
+; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0x3
; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @global_size_x (i32 addrspace(1)* %out) {
@@ -61,7 +61,7 @@ entry:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; R600-CHECK: MOV [[VAL]], KC0[1].X
; SI-CHECK: @global_size_y
-; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 4
+; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0x4
; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @global_size_y (i32 addrspace(1)* %out) {
@@ -75,7 +75,7 @@ entry:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; R600-CHECK: MOV [[VAL]], KC0[1].Y
; SI-CHECK: @global_size_z
-; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 5
+; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0x5
; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @global_size_z (i32 addrspace(1)* %out) {
@@ -89,7 +89,7 @@ entry:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; R600-CHECK: MOV [[VAL]], KC0[1].Z
; SI-CHECK: @local_size_x
-; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 6
+; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0x6
; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @local_size_x (i32 addrspace(1)* %out) {
@@ -103,7 +103,7 @@ entry:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; R600-CHECK: MOV [[VAL]], KC0[1].W
; SI-CHECK: @local_size_y
-; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 7
+; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0x7
; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @local_size_y (i32 addrspace(1)* %out) {
@@ -117,7 +117,7 @@ entry:
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; R600-CHECK: MOV [[VAL]], KC0[2].X
; SI-CHECK: @local_size_z
-; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 8
+; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0x8
; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @local_size_z (i32 addrspace(1)* %out) {
@@ -127,12 +127,12 @@ entry:
ret void
}
-; The tgid values are stored in ss offset by the number of user ss.
-; Currently we always use exactly 2 user ss for the pointer to the
+; The tgid values are stored in sgprs offset by the number of user sgprs.
+; Currently we always use exactly 2 user sgprs for the pointer to the
; kernel arguments, but this may change in the future.
; SI-CHECK: @tgid_x
-; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s2
+; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s4
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @tgid_x (i32 addrspace(1)* %out) {
entry:
@@ -142,7 +142,7 @@ entry:
}
; SI-CHECK: @tgid_y
-; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s3
+; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s5
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @tgid_y (i32 addrspace(1)* %out) {
entry:
@@ -152,7 +152,7 @@ entry:
}
; SI-CHECK: @tgid_z
-; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s4
+; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s6
; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
define void @tgid_z (i32 addrspace(1)* %out) {
entry:
diff --git a/test/CodeGen/R600/xor.ll b/test/CodeGen/R600/xor.ll
index c12b0c1ce2c9..e14bd7127231 100644
--- a/test/CodeGen/R600/xor.ll
+++ b/test/CodeGen/R600/xor.ll
@@ -42,7 +42,7 @@ define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
;SI-CHECK: @xor_i1
-;SI-CHECK: S_XOR_B64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
+;SI-CHECK: V_XOR_B32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
%a = load float addrspace(1) * %in0
@@ -54,3 +54,105 @@ define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float ad
store float %result, float addrspace(1)* %out
ret void
}
+
+; SI-CHECK-LABEL: @vector_xor_i32
+; SI-CHECK: V_XOR_B32_e32
+define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
+ %a = load i32 addrspace(1)* %in0
+ %b = load i32 addrspace(1)* %in1
+ %result = xor i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @scalar_xor_i32
+; SI-CHECK: S_XOR_B32
+define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %result = xor i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @scalar_not_i32
+; SI-CHECK: S_NOT_B32
+define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
+ %result = xor i32 %a, -1
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @vector_not_i32
+; SI-CHECK: V_NOT_B32
+define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
+ %a = load i32 addrspace(1)* %in0
+ %b = load i32 addrspace(1)* %in1
+ %result = xor i32 %a, -1
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @vector_xor_i64
+; SI-CHECK: V_XOR_B32_e32
+; SI-CHECK: V_XOR_B32_e32
+; SI-CHECK: S_ENDPGM
+define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
+ %a = load i64 addrspace(1)* %in0
+ %b = load i64 addrspace(1)* %in1
+ %result = xor i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @scalar_xor_i64
+; SI-CHECK: S_XOR_B64
+; SI-CHECK: S_ENDPGM
+define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
+ %result = xor i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @scalar_not_i64
+; SI-CHECK: S_NOT_B64
+define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
+ %result = xor i64 %a, -1
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; SI-CHECK-LABEL: @vector_not_i64
+; SI-CHECK: V_NOT_B32
+; SI-CHECK: V_NOT_B32
+define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
+ %a = load i64 addrspace(1)* %in0
+ %b = load i64 addrspace(1)* %in1
+ %result = xor i64 %a, -1
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; Test that we have a pattern to match xor inside a branch.
+; Note that in the future the backend may be smart enough to
+; use an SALU instruction for this.
+
+; SI-CHECK-LABEL: @xor_cf
+; SI-CHECK: V_XOR
+; SI-CHECK: V_XOR
+define void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) {
+entry:
+ %0 = icmp eq i64 %a, 0
+ br i1 %0, label %if, label %else
+
+if:
+ %1 = xor i64 %a, %b
+ br label %endif
+
+else:
+ %2 = load i64 addrspace(1)* %in
+ br label %endif
+
+endif:
+ %3 = phi i64 [%1, %if], [%2, %else]
+ store i64 %3, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/zero_extend.ll b/test/CodeGen/R600/zero_extend.ll
index a114bfc4a02b..8585d4ab191e 100644
--- a/test/CodeGen/R600/zero_extend.ll
+++ b/test/CodeGen/R600/zero_extend.ll
@@ -6,8 +6,9 @@
; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW
; SI-CHECK: @test
-; SI-CHECK: V_MOV_B32_e32 v[[ZERO:[0-9]]], 0
-; SI-CHECK: BUFFER_STORE_DWORDX2 v[0:[[ZERO]]{{\]}}
+; SI-CHECK: S_MOV_B32 [[ZERO:s[0-9]]], 0
+; SI-CHECK: V_MOV_B32_e32 v[[V_ZERO:[0-9]]], [[ZERO]]
+; SI-CHECK: BUFFER_STORE_DWORDX2 v[0:[[V_ZERO]]{{\]}}
define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
%0 = mul i32 %a, %b
@@ -26,3 +27,14 @@ entry:
store i32 %1, i32 addrspace(1)* %out
ret void
}
+
+; SI-CHECK-LABEL: @zext_i1_to_i64
+; SI-CHECK: V_CMP_EQ_I32
+; SI-CHECK: V_CNDMASK_B32
+; SI-CHECK: S_MOV_B32 s{{[0-9]+}}, 0
+define void @zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %cmp = icmp eq i32 %a, %b
+ %ext = zext i1 %cmp to i64
+ store i64 %ext, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/SPARC/2009-08-28-PIC.ll b/test/CodeGen/SPARC/2009-08-28-PIC.ll
index a2ba0d02d45c..b004b11b853b 100644
--- a/test/CodeGen/SPARC/2009-08-28-PIC.ll
+++ b/test/CodeGen/SPARC/2009-08-28-PIC.ll
@@ -1,9 +1,45 @@
-; RUN: llc -march=sparc --relocation-model=pic < %s | grep _GLOBAL_OFFSET_TABLE_
+; RUN: llc -march=sparc --relocation-model=pic < %s | FileCheck %s --check-prefix=V8
+; RUN: llc -march=sparcv9 --relocation-model=pic < %s | FileCheck %s --check-prefix=V9
+; RUN: llc -march=sparc --relocation-model=pic < %s -O0 | FileCheck %s --check-prefix=V8UNOPT
+; RUN: llc -march=sparcv9 --relocation-model=pic < %s -O0 | FileCheck %s --check-prefix=V9UNOPT
+
+
+; V8-LABEL: func
+; V8: _GLOBAL_OFFSET_TABLE_
+
+; V9-LABEL: func
+; V9: _GLOBAL_OFFSET_TABLE_
@foo = global i32 0 ; <i32*> [#uses=1]
-define i32 @func() nounwind readonly {
+define i32 @func(i32 %a) nounwind readonly {
entry:
%0 = load i32* @foo, align 4 ; <i32> [#uses=1]
ret i32 %0
}
+
+; V8UNOPT-LABEL: test_spill
+; V8UNOPT: sethi %hi(_GLOBAL_OFFSET_TABLE_+{{.+}}), [[R:%[goli][0-7]]]
+; V8UNOPT: or [[R]], %lo(_GLOBAL_OFFSET_TABLE_+{{.+}}), [[R]]
+; V8UNOPT: add [[R]], %o7, [[R]]
+; V8UNOPT: st [[R]], [%fp+{{.+}}]
+
+; V9UNOPT-LABEL: test_spill
+; V9UNOPT: sethi %hi(_GLOBAL_OFFSET_TABLE_+{{.+}}), [[R:%[goli][0-7]]]
+; V9UNOPT: or [[R]], %lo(_GLOBAL_OFFSET_TABLE_+{{.+}}), [[R]]
+; V9UNOPT: add [[R]], %o7, [[R]]
+; V9UNOPT: stx [[R]], [%fp+{{.+}}]
+
+define i32 @test_spill(i32 %a, i32 %b) {
+entry:
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %ret = load i32* @foo, align 4
+ ret i32 %ret
+
+if.end:
+ %add = add nsw i32 %b, %a
+ ret i32 %add
+}
diff --git a/test/CodeGen/SPARC/2011-01-11-Call.ll b/test/CodeGen/SPARC/2011-01-11-Call.ll
index a0f478e119a3..067bade16609 100644
--- a/test/CodeGen/SPARC/2011-01-11-Call.ll
+++ b/test/CodeGen/SPARC/2011-01-11-Call.ll
@@ -8,7 +8,7 @@
; V8-NEXT: nop
; V8: call bar
; V8-NEXT: nop
-; V8: jmp %i7+8
+; V8: ret
; V8-NEXT: restore
; V9-LABEL: test
@@ -17,7 +17,7 @@
; V9-NEXT: nop
; V9: call bar
; V9-NEXT: nop
-; V9: jmp %i7+8
+; V9: ret
; V9-NEXT: restore
define void @test() nounwind {
@@ -36,14 +36,14 @@ declare void @bar(...)
; V8: save %sp
; V8: call foo
; V8-NEXT: nop
-; V8: jmp %i7+8
+; V8: ret
; V8-NEXT: restore %g0, %o0, %o0
; V9-LABEL: test_tail_call_with_return
; V9: save %sp
; V9: call foo
; V9-NEXT: nop
-; V9: jmp %i7+8
+; V9: ret
; V9-NEXT: restore %g0, %o0, %o0
define i32 @test_tail_call_with_return() nounwind {
diff --git a/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll b/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll
index 7cc7868e44f9..1c8e7d8636ff 100644
--- a/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll
+++ b/test/CodeGen/SPARC/2011-01-11-FrameAddr.ll
@@ -2,19 +2,28 @@
;RUN: llc -march=sparc -mattr=v9 < %s | FileCheck %s -check-prefix=V9
;RUN: llc -march=sparc -regalloc=basic < %s | FileCheck %s -check-prefix=V8
;RUN: llc -march=sparc -regalloc=basic -mattr=v9 < %s | FileCheck %s -check-prefix=V9
+;RUN: llc -march=sparcv9 < %s | FileCheck %s -check-prefix=SPARC64
define i8* @frameaddr() nounwind readnone {
entry:
;V8-LABEL: frameaddr:
;V8: save %sp, -96, %sp
-;V8: jmp %i7+8
+;V8: ret
;V8: restore %g0, %fp, %o0
;V9-LABEL: frameaddr:
;V9: save %sp, -96, %sp
-;V9: jmp %i7+8
+;V9: ret
;V9: restore %g0, %fp, %o0
+
+;SPARC64-LABEL: frameaddr
+;SPARC64: save %sp, -128, %sp
+;SPARC64: add %fp, 2047, %i0
+;SPARC64: ret
+;SPARC64-NOT: restore %g0, %g0, %g0
+;SPARC64: restore
+
%0 = tail call i8* @llvm.frameaddress(i32 0)
ret i8* %0
}
@@ -32,6 +41,14 @@ entry:
;V9: ld [%fp+56], {{.+}}
;V9: ld [{{.+}}+56], {{.+}}
;V9: ld [{{.+}}+56], {{.+}}
+
+;SPARC64-LABEL: frameaddr2
+;SPARC64: flushw
+;SPARC64: ldx [%fp+2159], %[[R0:[goli][0-7]]]
+;SPARC64: ldx [%[[R0]]+2159], %[[R1:[goli][0-7]]]
+;SPARC64: ldx [%[[R1]]+2159], %[[R2:[goli][0-7]]]
+;SPARC64: add %[[R2]], 2047, {{.+}}
+
%0 = tail call i8* @llvm.frameaddress(i32 3)
ret i8* %0
}
@@ -43,10 +60,13 @@ declare i8* @llvm.frameaddress(i32) nounwind readnone
define i8* @retaddr() nounwind readnone {
entry:
;V8-LABEL: retaddr:
-;V8: or %g0, %o7, {{.+}}
+;V8: mov %o7, {{.+}}
;V9-LABEL: retaddr:
-;V9: or %g0, %o7, {{.+}}
+;V9: mov %o7, {{.+}}
+
+;SPARC64-LABEL: retaddr
+;SPARC64: mov %o7, {{.+}}
%0 = tail call i8* @llvm.returnaddress(i32 0)
ret i8* %0
@@ -66,17 +86,11 @@ entry:
;V9: ld [{{.+}}+56], {{.+}}
;V9: ld [{{.+}}+60], {{.+}}
-;V8LEAF-LABEL: retaddr2:
-;V8LEAF: ta 3
-;V8LEAF: ld [%fp+56], %[[R:[goli][0-7]]]
-;V8LEAF: ld [%[[R]]+56], %[[R1:[goli][0-7]]]
-;V8LEAF: ld [%[[R1]]+60], {{.+}}
-
-;V9LEAF-LABEL: retaddr2:
-;V9LEAF: flushw
-;V9LEAF: ld [%fp+56], %[[R:[goli][0-7]]]
-;V9LEAF: ld [%[[R]]+56], %[[R1:[goli][0-7]]]
-;V9LEAF: ld [%[[R1]]+60], {{.+}}
+;SPARC64-LABEL: retaddr2
+;SPARC64: flushw
+;SPARC64: ldx [%fp+2159], %[[R0:[goli][0-7]]]
+;SPARC64: ldx [%[[R0]]+2159], %[[R1:[goli][0-7]]]
+;SPARC64: ldx [%[[R1]]+2167], {{.+}}
%0 = tail call i8* @llvm.returnaddress(i32 3)
ret i8* %0
diff --git a/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll b/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
index c71e7c00b916..8a3edc64b2da 100644
--- a/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
+++ b/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
@@ -1,13 +1,14 @@
-;RUN: llc -march=sparc < %s | FileCheck %s
-;RUN: llc -march=sparc -O0 < %s | FileCheck %s -check-prefix=UNOPT
+;RUN: llc -march=sparc < %s -verify-machineinstrs | FileCheck %s
+;RUN: llc -march=sparc -O0 < %s -verify-machineinstrs | FileCheck %s -check-prefix=UNOPT
+target triple = "sparc-unknown-linux-gnu"
define i32 @test(i32 %a) nounwind {
entry:
; CHECK: test
; CHECK: call bar
; CHECK-NOT: nop
-; CHECK: jmp
+; CHECK: ret
; CHECK-NEXT: restore
%0 = tail call i32 @bar(i32 %a) nounwind
ret i32 %0
@@ -18,7 +19,7 @@ entry:
; CHECK: test_jmpl
; CHECK: call
; CHECK-NOT: nop
-; CHECK: jmp
+; CHECK: ret
; CHECK-NEXT: restore
%0 = tail call i32 %f(i32 %a, i32 %b) nounwind
ret i32 %0
@@ -47,7 +48,7 @@ bb: ; preds = %entry, %bb
bb5: ; preds = %bb, %entry
%a_addr.1.lcssa = phi i32 [ %a, %entry ], [ %a_addr.0, %bb ]
-;CHECK: jmp
+;CHECK: retl
;CHECK-NOT: restore
ret i32 %a_addr.1.lcssa
}
@@ -59,7 +60,7 @@ entry:
;CHECK: !NO_APP
;CHECK-NEXT: cmp
;CHECK-NEXT: bg
-;CHECK-NEXT: or
+;CHECK-NEXT: mov
tail call void asm sideeffect "sethi 0, %g0", ""() nounwind
%0 = icmp slt i32 %a, 0
br i1 %0, label %bb, label %bb1
@@ -110,7 +111,7 @@ declare i32 @func(i32*)
define i32 @restore_add(i32 %a, i32 %b) {
entry:
;CHECK-LABEL: restore_add:
-;CHECK: jmp %i7+8
+;CHECK: ret
;CHECK: restore %o0, %i1, %o0
%0 = tail call i32 @bar(i32 %a) nounwind
%1 = add nsw i32 %0, %b
@@ -120,7 +121,7 @@ entry:
define i32 @restore_add_imm(i32 %a) {
entry:
;CHECK-LABEL: restore_add_imm:
-;CHECK: jmp %i7+8
+;CHECK: ret
;CHECK: restore %o0, 20, %o0
%0 = tail call i32 @bar(i32 %a) nounwind
%1 = add nsw i32 %0, 20
@@ -130,7 +131,7 @@ entry:
define i32 @restore_or(i32 %a) {
entry:
;CHECK-LABEL: restore_or:
-;CHECK: jmp %i7+8
+;CHECK: ret
;CHECK: restore %g0, %o0, %o0
%0 = tail call i32 @bar(i32 %a) nounwind
ret i32 %0
@@ -140,8 +141,9 @@ define i32 @restore_or_imm(i32 %a) {
entry:
;CHECK-LABEL: restore_or_imm:
;CHECK: or %o0, 20, %i0
-;CHECK: jmp %i7+8
-;CHECK: restore %g0, %g0, %g0
+;CHECK: ret
+;CHECK-NOT: restore %g0, %g0, %g0
+;CHECK: restore
%0 = tail call i32 @bar(i32 %a) nounwind
%1 = or i32 %0, 20
ret i32 %1
@@ -174,7 +176,8 @@ define i32 @restore_sethi_large(i32 %a) {
entry:
;CHECK-LABEL: restore_sethi_large:
;CHECK: sethi 4000, %i0
-;CHECK: restore %g0, %g0, %g0
+;CHECK-NOT: restore %g0, %g0, %g0
+;CHECK: restore
%0 = tail call i32 @bar(i32 %a) nounwind
%1 = icmp ne i32 %0, 0
%2 = select i1 %1, i32 4096000, i32 0
diff --git a/test/CodeGen/SPARC/64abi.ll b/test/CodeGen/SPARC/64abi.ll
index 8b752a1a2c3c..a88e19a5e2d4 100644
--- a/test/CodeGen/SPARC/64abi.ll
+++ b/test/CodeGen/SPARC/64abi.ll
@@ -44,7 +44,7 @@ define void @intarg(i8 %a0, ; %i0
; CHECK: sra %i0, 0, [[R:%[gilo][0-7]]]
; CHECK: stx [[R]], [%sp+2223]
; Use %o0-%o5 for outgoing arguments
-; CHECK: or %g0, 5, %o5
+; CHECK: mov 5, %o5
; CHECK: call intarg
; CHECK-NOT: add %sp
; CHECK: restore
@@ -180,7 +180,7 @@ define void @call_inreg_fi(i32* %p, i32 %i1, float %f5) {
}
; CHECK: inreg_ff
-; CHECK: fsubs %f0, %f1, %f1
+; CHECK: fsubs %f0, %f1, %f0
define float @inreg_ff(float inreg %a0, ; %f0
float inreg %a1) { ; %f1
%rv = fsub float %a0, %a1
@@ -208,7 +208,7 @@ define i32 @inreg_if(float inreg %a0, ; %f0
; CHECK: call_inreg_if
; CHECK: fmovs %f3, %f0
-; CHECK: or %g0, %i2, %o0
+; CHECK: mov %i2, %o0
; CHECK: call inreg_if
define void @call_inreg_if(i32* %p, float %f3, i32 %i2) {
%x = call i32 @inreg_if(float %f3, i32 %i2)
@@ -262,10 +262,10 @@ define void @call_ret_i64_pair(i64* %i0) {
ret void
}
-; This is not a C struct, each member uses 8 bytes.
+; This is not a C struct, the i32 member uses 8 bytes, but the float only 4.
; CHECK: ret_i32_float_pair
; CHECK: ld [%i2], %i0
-; CHECK: ld [%i3], %f3
+; CHECK: ld [%i3], %f2
define { i32, float } @ret_i32_float_pair(i32 %a0, i32 %a1,
i32* %p, float* %q) {
%r1 = load i32* %p
@@ -279,7 +279,7 @@ define { i32, float } @ret_i32_float_pair(i32 %a0, i32 %a1,
; CHECK: call_ret_i32_float_pair
; CHECK: call ret_i32_float_pair
; CHECK: st %o0, [%i0]
-; CHECK: st %f3, [%i1]
+; CHECK: st %f2, [%i1]
define void @call_ret_i32_float_pair(i32* %i0, float* %i1) {
%rv = call { i32, float } @ret_i32_float_pair(i32 undef, i32 undef,
i32* undef, float* undef)
@@ -411,3 +411,54 @@ entry:
}
declare i32 @use_buf(i32, i8*)
+
+; CHECK-LABEL: test_fp128_args
+; CHECK-DAG: std %f0, [%fp+{{.+}}]
+; CHECK-DAG: std %f2, [%fp+{{.+}}]
+; CHECK-DAG: std %f6, [%fp+{{.+}}]
+; CHECK-DAG: std %f4, [%fp+{{.+}}]
+; CHECK: add %fp, [[Offset:[0-9]+]], %o0
+; CHECK: call _Qp_add
+; CHECK: ldd [%fp+[[Offset]]], %f0
+define fp128 @test_fp128_args(fp128 %a, fp128 %b) {
+entry:
+ %0 = fadd fp128 %a, %b
+ ret fp128 %0
+}
+
+declare i64 @receive_fp128(i64 %a, ...)
+
+; CHECK-LABEL: test_fp128_variable_args
+; CHECK-DAG: std %f4, [%sp+[[Offset0:[0-9]+]]]
+; CHECK-DAG: std %f6, [%sp+[[Offset1:[0-9]+]]]
+; CHECK-DAG: ldx [%sp+[[Offset0]]], %o2
+; CHECK-DAG: ldx [%sp+[[Offset1]]], %o3
+; CHECK: call receive_fp128
+define i64 @test_fp128_variable_args(i64 %a, fp128 %b) {
+entry:
+ %0 = call i64 (i64, ...)* @receive_fp128(i64 %a, fp128 %b)
+ ret i64 %0
+}
+
+; CHECK-LABEL: test_call_libfunc
+; CHECK: st %f1, [%fp+[[Offset0:[0-9]+]]]
+; CHECK: fmovs %f3, %f1
+; CHECK: call cosf
+; CHECK: st %f0, [%fp+[[Offset1:[0-9]+]]]
+; CHECK: ld [%fp+[[Offset0]]], %f1
+; CHECK: call sinf
+; CHECK: ld [%fp+[[Offset1]]], %f1
+; CHECK: fmuls %f1, %f0, %f0
+
+define inreg float @test_call_libfunc(float %arg0, float %arg1) {
+entry:
+ %0 = tail call inreg float @cosf(float %arg1)
+ %1 = tail call inreg float @sinf(float %arg0)
+ %2 = fmul float %0, %1
+ ret float %2
+}
+
+declare inreg float @cosf(float %arg) readnone nounwind
+declare inreg float @sinf(float %arg) readnone nounwind
+
+
diff --git a/test/CodeGen/SPARC/64bit.ll b/test/CodeGen/SPARC/64bit.ll
index f5ed047592e9..b18f1bc0e837 100644
--- a/test/CodeGen/SPARC/64bit.ll
+++ b/test/CodeGen/SPARC/64bit.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -march=sparcv9 -disable-sparc-delay-filler -disable-sparc-leaf-proc | FileCheck %s
-; RUN: llc < %s -march=sparcv9 | FileCheck %s -check-prefix=OPT
+; RUN: llc < %s -march=sparcv9 -mattr=+popc -disable-sparc-delay-filler -disable-sparc-leaf-proc | FileCheck %s
+; RUN: llc < %s -march=sparcv9 -mattr=+popc | FileCheck %s -check-prefix=OPT
; CHECK-LABEL: ret2:
-; CHECK: or %g0, %i1, %i0
+; CHECK: mov %i1, %i0
; OPT-LABEL: ret2:
-; OPT: jmp %o7+8
-; OPT: or %g0, %o1, %o0
+; OPT: retl
+; OPT: mov %o1, %o0
define i64 @ret2(i64 %a, i64 %b) {
ret i64 %b
}
@@ -15,7 +15,7 @@ define i64 @ret2(i64 %a, i64 %b) {
; CHECK: sllx %i0, 7, %i0
; OPT-LABEL: shl_imm:
-; OPT: jmp %o7+8
+; OPT: retl
; OPT: sllx %o0, 7, %o0
define i64 @shl_imm(i64 %a) {
%x = shl i64 %a, 7
@@ -26,7 +26,7 @@ define i64 @shl_imm(i64 %a) {
; CHECK: srax %i0, %i1, %i0
; OPT-LABEL: sra_reg:
-; OPT: jmp %o7+8
+; OPT: retl
; OPT: srax %o0, %o1, %o0
define i64 @sra_reg(i64 %a, i64 %b) {
%x = ashr i64 %a, %b
@@ -39,21 +39,21 @@ define i64 @sra_reg(i64 %a, i64 %b) {
; restore %g0, %g0, %o0
;
; CHECK: ret_imm0
-; CHECK: or %g0, 0, %i0
+; CHECK: mov 0, %i0
; OPT: ret_imm0
-; OPT: jmp %o7+8
-; OPT: or %g0, 0, %o0
+; OPT: retl
+; OPT: mov 0, %o0
define i64 @ret_imm0() {
ret i64 0
}
; CHECK: ret_simm13
-; CHECK: or %g0, -4096, %i0
+; CHECK: mov -4096, %i0
; OPT: ret_simm13
-; OPT: jmp %o7+8
-; OPT: or %g0, -4096, %o0
+; OPT: retl
+; OPT: mov -4096, %o0
define i64 @ret_simm13() {
ret i64 -4096
}
@@ -64,7 +64,7 @@ define i64 @ret_simm13() {
; CHECK: restore
; OPT: ret_sethi
-; OPT: jmp %o7+8
+; OPT: retl
; OPT: sethi 4, %o0
define i64 @ret_sethi() {
ret i64 4096
@@ -76,7 +76,7 @@ define i64 @ret_sethi() {
; OPT: ret_sethi_or
; OPT: sethi 4, [[R:%[go][0-7]]]
-; OPT: jmp %o7+8
+; OPT: retl
; OPT: or [[R]], 1, %o0
define i64 @ret_sethi_or() {
@@ -89,7 +89,7 @@ define i64 @ret_sethi_or() {
; OPT: ret_nimm33
; OPT: sethi 4, [[R:%[go][0-7]]]
-; OPT: jmp %o7+8
+; OPT: retl
; OPT: xor [[R]], -4, %o0
define i64 @ret_nimm33() {
diff --git a/test/CodeGen/SPARC/64cond.ll b/test/CodeGen/SPARC/64cond.ll
index 7451b04eadfe..e491d61aad27 100644
--- a/test/CodeGen/SPARC/64cond.ll
+++ b/test/CodeGen/SPARC/64cond.ll
@@ -80,7 +80,7 @@ entry:
; CHECK: selectf32_xcc
; CHECK: cmp %i0, %i1
; CHECK: fmovsg %xcc, %f5, %f7
-; CHECK: fmovs %f7, %f1
+; CHECK: fmovs %f7, %f0
define float @selectf32_xcc(i64 %x, i64 %y, float %a, float %b) {
entry:
%tobool = icmp sgt i64 %x, %y
@@ -111,6 +111,11 @@ entry:
}
; CHECK-LABEL: setcc_resultty
+; CHECK-DAG: srax %i0, 63, %o0
+; CHECK-DAG: mov %i0, %o1
+; CHECK-DAG: mov 0, %o2
+; CHECK-DAG: mov 32, %o3
+; CHECK-DAG: call __multi3
; CHECK: cmp
; CHECK: movne %xcc, 1, [[R:%[gilo][0-7]]]
; CHECK: or [[R]], %i1, %i0
diff --git a/test/CodeGen/SPARC/64spill.ll b/test/CodeGen/SPARC/64spill.ll
new file mode 100644
index 000000000000..ab08d6b0570b
--- /dev/null
+++ b/test/CodeGen/SPARC/64spill.ll
@@ -0,0 +1,116 @@
+; RUN: llc < %s -march=sparcv9 | FileCheck %s
+
+target datalayout = "E-i64:64-n32:64-S128"
+target triple = "sparc64-sun-sparc"
+
+; CHECK-LABEL: test_and_spill
+; CHECK: and %i0, %i1, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_and_spill(i64 %a, i64 %b) {
+entry:
+ %r0 = and i64 %a, %b
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+; CHECK-LABEL: test_or_spill
+; CHECK: or %i0, %i1, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_or_spill(i64 %a, i64 %b) {
+entry:
+ %r0 = or i64 %a, %b
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+; CHECK-LABEL: test_xor_spill
+; CHECK: xor %i0, %i1, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_xor_spill(i64 %a, i64 %b) {
+entry:
+ %r0 = xor i64 %a, %b
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+
+; CHECK-LABEL: test_add_spill
+; CHECK: add %i0, %i1, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_add_spill(i64 %a, i64 %b) {
+entry:
+ %r0 = add i64 %a, %b
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+; CHECK-LABEL: test_sub_spill
+; CHECK: sub %i0, %i1, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_sub_spill(i64 %a, i64 %b) {
+entry:
+ %r0 = sub i64 %a, %b
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+; CHECK-LABEL: test_andi_spill
+; CHECK: and %i0, 1729, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_andi_spill(i64 %a) {
+entry:
+ %r0 = and i64 %a, 1729
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+; CHECK-LABEL: test_ori_spill
+; CHECK: or %i0, 1729, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_ori_spill(i64 %a) {
+entry:
+ %r0 = or i64 %a, 1729
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+; CHECK-LABEL: test_xori_spill
+; CHECK: xor %i0, 1729, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_xori_spill(i64 %a) {
+entry:
+ %r0 = xor i64 %a, 1729
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+; CHECK-LABEL: test_addi_spill
+; CHECK: add %i0, 1729, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_addi_spill(i64 %a) {
+entry:
+ %r0 = add i64 %a, 1729
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
+; CHECK-LABEL: test_subi_spill
+; CHECK: add %i0, -1729, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%fp+{{.+}}]
+; CHECK: ldx [%fp+{{.+}}, %i0
+define i64 @test_subi_spill(i64 %a) {
+entry:
+ %r0 = sub i64 %a, 1729
+ %0 = tail call i64 asm sideeffect "#$0 $1", "=r,r,~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{i6},~{i7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o6}"(i64 %r0)
+ ret i64 %r0
+}
+
diff --git a/test/CodeGen/SPARC/atomics.ll b/test/CodeGen/SPARC/atomics.ll
new file mode 100644
index 000000000000..ee6c1f8999b0
--- /dev/null
+++ b/test/CodeGen/SPARC/atomics.ll
@@ -0,0 +1,155 @@
+; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s
+
+; CHECK-LABEL: test_atomic_i32
+; CHECK: ld [%o0]
+; CHECK: membar
+; CHECK: ld [%o1]
+; CHECK: membar
+; CHECK: membar
+; CHECK: st {{.+}}, [%o2]
+define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
+entry:
+ %0 = load atomic i32* %ptr1 acquire, align 8
+ %1 = load atomic i32* %ptr2 acquire, align 8
+ %2 = add i32 %0, %1
+ store atomic i32 %2, i32* %ptr3 release, align 8
+ ret i32 %2
+}
+
+; CHECK-LABEL: test_atomic_i64
+; CHECK: ldx [%o0]
+; CHECK: membar
+; CHECK: ldx [%o1]
+; CHECK: membar
+; CHECK: membar
+; CHECK: stx {{.+}}, [%o2]
+define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) {
+entry:
+ %0 = load atomic i64* %ptr1 acquire, align 8
+ %1 = load atomic i64* %ptr2 acquire, align 8
+ %2 = add i64 %0, %1
+ store atomic i64 %2, i64* %ptr3 release, align 8
+ ret i64 %2
+}
+
+; CHECK-LABEL: test_cmpxchg_i32
+; CHECK: mov 123, [[R:%[gilo][0-7]]]
+; CHECK: cas [%o1], %o0, [[R]]
+
+define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
+entry:
+ %pair = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
+ %b = extractvalue { i32, i1 } %pair, 0
+ ret i32 %b
+}
+
+; CHECK-LABEL: test_cmpxchg_i64
+; CHECK: mov 123, [[R:%[gilo][0-7]]]
+; CHECK: casx [%o1], %o0, [[R]]
+
+define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
+entry:
+ %pair = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic monotonic
+ %b = extractvalue { i64, i1 } %pair, 0
+ ret i64 %b
+}
+
+; CHECK-LABEL: test_swap_i32
+; CHECK: mov 42, [[R:%[gilo][0-7]]]
+; CHECK: swap [%o1], [[R]]
+
+define i32 @test_swap_i32(i32 %a, i32* %ptr) {
+entry:
+ %b = atomicrmw xchg i32* %ptr, i32 42 monotonic
+ ret i32 %b
+}
+
+; CHECK-LABEL: test_swap_i64
+; CHECK: casx [%o1],
+
+define i64 @test_swap_i64(i64 %a, i64* %ptr) {
+entry:
+ %b = atomicrmw xchg i64* %ptr, i64 42 monotonic
+ ret i64 %b
+}
+
+; CHECK-LABEL: test_load_add_32
+; CHECK: membar
+; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
+; CHECK: cas [%o0], [[V]], [[U]]
+; CHECK: membar
+define zeroext i32 @test_load_add_32(i32* %p, i32 zeroext %v) {
+entry:
+ %0 = atomicrmw add i32* %p, i32 %v seq_cst
+ ret i32 %0
+}
+
+; CHECK-LABEL: test_load_sub_64
+; CHECK: membar
+; CHECK: sub
+; CHECK: casx [%o0]
+; CHECK: membar
+define zeroext i64 @test_load_sub_64(i64* %p, i64 zeroext %v) {
+entry:
+ %0 = atomicrmw sub i64* %p, i64 %v seq_cst
+ ret i64 %0
+}
+
+; CHECK-LABEL: test_load_xor_32
+; CHECK: membar
+; CHECK: xor
+; CHECK: cas [%o0]
+; CHECK: membar
+define zeroext i32 @test_load_xor_32(i32* %p, i32 zeroext %v) {
+entry:
+ %0 = atomicrmw xor i32* %p, i32 %v seq_cst
+ ret i32 %0
+}
+
+; CHECK-LABEL: test_load_and_32
+; CHECK: membar
+; CHECK: and
+; CHECK-NOT: xor
+; CHECK: cas [%o0]
+; CHECK: membar
+define zeroext i32 @test_load_and_32(i32* %p, i32 zeroext %v) {
+entry:
+ %0 = atomicrmw and i32* %p, i32 %v seq_cst
+ ret i32 %0
+}
+
+; CHECK-LABEL: test_load_nand_32
+; CHECK: membar
+; CHECK: and
+; CHECK: xor
+; CHECK: cas [%o0]
+; CHECK: membar
+define zeroext i32 @test_load_nand_32(i32* %p, i32 zeroext %v) {
+entry:
+ %0 = atomicrmw nand i32* %p, i32 %v seq_cst
+ ret i32 %0
+}
+
+; CHECK-LABEL: test_load_max_64
+; CHECK: membar
+; CHECK: cmp
+; CHECK: movg %xcc
+; CHECK: casx [%o0]
+; CHECK: membar
+define zeroext i64 @test_load_max_64(i64* %p, i64 zeroext %v) {
+entry:
+ %0 = atomicrmw max i64* %p, i64 %v seq_cst
+ ret i64 %0
+}
+
+; CHECK-LABEL: test_load_umin_32
+; CHECK: membar
+; CHECK: cmp
+; CHECK: movleu %icc
+; CHECK: cas [%o0]
+; CHECK: membar
+define zeroext i32 @test_load_umin_32(i32* %p, i32 zeroext %v) {
+entry:
+ %0 = atomicrmw umin i32* %p, i32 %v seq_cst
+ ret i32 %0
+}
diff --git a/test/CodeGen/SPARC/constpool.ll b/test/CodeGen/SPARC/constpool.ll
index b861676ce3e1..8b0d1d9656df 100644
--- a/test/CodeGen/SPARC/constpool.ll
+++ b/test/CodeGen/SPARC/constpool.ll
@@ -12,7 +12,7 @@ entry:
; abs32: floatCP
; abs32: sethi %hi(.LCPI0_0), %[[R:[gilo][0-7]]]
-; abs32: jmp %o7+8
+; abs32: retl
; abs32: ld [%[[R]]+%lo(.LCPI0_0)], %f
@@ -20,8 +20,8 @@ entry:
; abs44: sethi %h44(.LCPI0_0), %[[R1:[gilo][0-7]]]
; abs44: add %[[R1]], %m44(.LCPI0_0), %[[R2:[gilo][0-7]]]
; abs44: sllx %[[R2]], 12, %[[R3:[gilo][0-7]]]
-; abs44: jmp %o7+8
-; abs44: ld [%[[R3]]+%l44(.LCPI0_0)], %f1
+; abs44: retl
+; abs44: ld [%[[R3]]+%l44(.LCPI0_0)], %f0
; abs64: floatCP
@@ -30,8 +30,8 @@ entry:
; abs64: sethi %hh(.LCPI0_0), %[[R3:[gilo][0-7]]]
; abs64: add %[[R3]], %hm(.LCPI0_0), %[[R4:[gilo][0-7]]]
; abs64: sllx %[[R4]], 32, %[[R5:[gilo][0-7]]]
-; abs64: jmp %o7+8
-; abs64: ld [%[[R5]]+%[[R2]]], %f1
+; abs64: retl
+; abs64: ld [%[[R5]]+%[[R2]]], %f0
; v8pic32: floatCP
@@ -40,7 +40,7 @@ entry:
; v8pic32: add %[[R1]], %lo(.LCPI0_0), %[[Goffs:[gilo][0-7]]]
; v8pic32: ld [%[[GOT:[gilo][0-7]]]+%[[Goffs]]], %[[Gaddr:[gilo][0-7]]]
; v8pic32: ld [%[[Gaddr]]], %f0
-; v8pic32: jmp %i7+8
+; v8pic32: ret
; v8pic32: restore
@@ -50,8 +50,8 @@ entry:
; v9pic32: sethi %hi(.LCPI0_0), %[[R1:[gilo][0-7]]]
; v9pic32: add %[[R1]], %lo(.LCPI0_0), %[[Goffs:[gilo][0-7]]]
; v9pic32: ldx [%[[GOT:[gilo][0-7]]]+%[[Goffs]]], %[[Gaddr:[gilo][0-7]]]
-; v9pic32: ld [%[[Gaddr]]], %f1
-; v9pic32: jmp %i7+8
+; v9pic32: ld [%[[Gaddr]]], %f0
+; v9pic32: ret
; v9pic32: restore
diff --git a/test/CodeGen/SPARC/ctpop.ll b/test/CodeGen/SPARC/ctpop.ll
index 916a41496e2a..3a373404b991 100644
--- a/test/CodeGen/SPARC/ctpop.ll
+++ b/test/CodeGen/SPARC/ctpop.ll
@@ -1,8 +1,29 @@
-; RUN: llc < %s -march=sparc -mattr=-v9 | not grep popc
-; RUN: llc < %s -march=sparc -mattr=+v9 | grep popc
+; RUN: llc < %s -march=sparc -mattr=-v9 | FileCheck %s -check-prefix=V8
+; RUN: llc < %s -march=sparc -mattr=+v9,+popc | FileCheck %s -check-prefix=V9
+; RUN: llc < %s -march=sparc -mcpu=v9 | FileCheck %s -check-prefix=V8
+; RUN: llc < %s -march=sparc -mcpu=ultrasparc | FileCheck %s -check-prefix=V8
+; RUN: llc < %s -march=sparc -mcpu=ultrasparc3 | FileCheck %s -check-prefix=V8
+; RUN: llc < %s -march=sparc -mcpu=niagara | FileCheck %s -check-prefix=V8
+; RUN: llc < %s -march=sparc -mcpu=niagara2 | FileCheck %s -check-prefix=V9
+; RUN: llc < %s -march=sparc -mcpu=niagara3 | FileCheck %s -check-prefix=V9
+; RUN: llc < %s -march=sparc -mcpu=niagara4 | FileCheck %s -check-prefix=V9
+; RUN: llc < %s -march=sparcv9 -mattr=+popc | FileCheck %s -check-prefix=SPARC64
declare i32 @llvm.ctpop.i32(i32)
+; V8-LABEL: test
+; V8-NOT: popc
+
+; V9-LABEL: test
+; V9: srl %o0, 0, %o0
+; V9-NEXT: retl
+; V9-NEXT: popc %o0, %o0
+
+; SPARC64-LABEL: test
+; SPARC64: srl %o0, 0, %o0
+; SPARC64: retl
+; SPARC64: popc %o0, %o0
+
define i32 @test(i32 %X) {
%Y = call i32 @llvm.ctpop.i32( i32 %X ) ; <i32> [#uses=1]
ret i32 %Y
diff --git a/test/CodeGen/SPARC/exception.ll b/test/CodeGen/SPARC/exception.ll
index cb5b6e5c1168..eca9c8bf739f 100644
--- a/test/CodeGen/SPARC/exception.ll
+++ b/test/CodeGen/SPARC/exception.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s -march=sparc | FileCheck %s
+; RUN: llc < %s -march=sparc -relocation-model=static | FileCheck -check-prefix=V8ABS %s
+; RUN: llc < %s -march=sparc -relocation-model=pic | FileCheck -check-prefix=V8PIC %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=static | FileCheck -check-prefix=V9ABS %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=pic | FileCheck -check-prefix=V9PIC %s
%struct.__fundamental_type_info_pseudo = type { %struct.__type_info_pseudo }
@@ -6,25 +9,67 @@
@_ZTIi = external constant %struct.__fundamental_type_info_pseudo
@_ZTIf = external constant %struct.__fundamental_type_info_pseudo
-@.cst = linker_private unnamed_addr constant [12 x i8] c"catched int\00", align 64
-@.cst1 = linker_private unnamed_addr constant [14 x i8] c"catched float\00", align 64
-
-; CHECK-LABEL: main:
-; CHECK: .cfi_startproc
-; CHECK: .cfi_def_cfa_register 30
-; CHECK: .cfi_window_save
-; CHECK: .cfi_register 15, 31
-
-; CHECK: call __cxa_throw
-; CHECK: call __cxa_throw
-
-; CHECK: call __cxa_begin_catch
-; CHECK: call __cxa_end_catch
-
-; CHECK: call __cxa_begin_catch
-; CHECK: call __cxa_end_catch
-
-; CHECK: .cfi_endproc
+@.cst = private unnamed_addr constant [12 x i8] c"catched int\00", align 64
+@.cst1 = private unnamed_addr constant [14 x i8] c"catched float\00", align 64
+
+; V8ABS-LABEL: main:
+; V8ABS: .cfi_startproc
+; V8ABS: .cfi_personality 0, __gxx_personality_v0
+; V8ABS: .cfi_lsda 0,
+; V8ABS: .cfi_def_cfa_register {{30|%fp}}
+; V8ABS: .cfi_window_save
+; V8ABS: .cfi_register 15, 31
+
+; V8ABS: call __cxa_throw
+; V8ABS: call __cxa_throw
+
+; V8ABS: call __cxa_begin_catch
+; V8ABS: call __cxa_end_catch
+
+; V8ABS: call __cxa_begin_catch
+; V8ABS: call __cxa_end_catch
+
+; V8ABS: .cfi_endproc
+
+; V8PIC-LABEL: main:
+; V8PIC: .cfi_startproc
+; V8PIC: .cfi_personality 155, DW.ref.__gxx_personality_v0
+; V8PIC: .cfi_lsda 27,
+; V8PIC: .cfi_def_cfa_register {{30|%fp}}
+; V8PIC: .cfi_window_save
+; V8PIC: .cfi_register 15, 31
+; V8PIC: .section .gcc_except_table
+; V8PIC-NOT: .section
+; V8PIC: .word %r_disp32(.L_ZTIi.DW.stub)
+; V8PIC: .data
+; V8PIC: .L_ZTIi.DW.stub:
+; V8PIC-NEXT: .word _ZTIi
+
+
+; V9ABS-LABEL: main:
+; V9ABS: .cfi_startproc
+; V9ABS: .cfi_personality 0, __gxx_personality_v0
+; V9ABS: .cfi_lsda 27,
+; V9ABS: .cfi_def_cfa_register {{30|%fp}}
+; V9ABS: .cfi_window_save
+; V9ABS: .cfi_register 15, 31
+; V9ABS: .section .gcc_except_table
+; V9ABS-NOT: .section
+; V9ABS: .xword _ZTIi
+
+; V9PIC-LABEL: main:
+; V9PIC: .cfi_startproc
+; V9PIC: .cfi_personality 155, DW.ref.__gxx_personality_v0
+; V9PIC: .cfi_lsda 27,
+; V9PIC: .cfi_def_cfa_register {{30|%fp}}
+; V9PIC: .cfi_window_save
+; V9PIC: .cfi_register 15, 31
+; V9PIC: .section .gcc_except_table
+; V9PIC-NOT: .section
+; V9PIC: .word %r_disp32(.L_ZTIi.DW.stub)
+; V9PIC: .data
+; V9PIC: .L_ZTIi.DW.stub:
+; V9PIC-NEXT: .xword _ZTIi
define i32 @main(i32 %argc, i8** nocapture readnone %argv) unnamed_addr #0 {
entry:
diff --git a/test/CodeGen/SPARC/fp128.ll b/test/CodeGen/SPARC/fp128.ll
index c761361e773e..abd89bf264e1 100644
--- a/test/CodeGen/SPARC/fp128.ll
+++ b/test/CodeGen/SPARC/fp128.ll
@@ -45,14 +45,14 @@ entry:
; HARD: std %f{{.+}}, [%[[S1:.+]]]
; HARD-DAG: ldd [%[[S0]]], %f{{.+}}
; HARD-DAG: ldd [%[[S1]]], %f{{.+}}
-; HARD: jmp
+; HARD: jmp %o7+12
; SOFT-LABEL: f128_spill
; SOFT: std %f{{.+}}, [%[[S0:.+]]]
; SOFT: std %f{{.+}}, [%[[S1:.+]]]
; SOFT-DAG: ldd [%[[S0]]], %f{{.+}}
; SOFT-DAG: ldd [%[[S1]]], %f{{.+}}
-; SOFT: jmp
+; SOFT: jmp %o7+12
define void @f128_spill(fp128* noalias sret %scalar.result, fp128* byval %a) {
entry:
@@ -132,13 +132,13 @@ entry:
; HARD: ldub
; HARD: faddq
; HARD: stb
-; HARD: jmp
+; HARD: ret
; SOFT-LABEL: fp128_unaligned
; SOFT: ldub
; SOFT: call _Q_add
; SOFT: stb
-; SOFT: jmp
+; SOFT: ret
define void @fp128_unaligned(fp128* %a, fp128* %b, fp128* %c) {
entry:
@@ -232,3 +232,14 @@ entry:
store i32 %3, i32* %4, align 8
ret void
}
+
+; SOFT-LABEL: f128_neg
+; SOFT: fnegs
+
+define void @f128_neg(fp128* noalias sret %scalar.result, fp128* byval %a) {
+entry:
+ %0 = load fp128* %a, align 8
+ %1 = fsub fp128 0xL00000000000000008000000000000000, %0
+ store fp128 %1, fp128* %scalar.result, align 8
+ ret void
+}
diff --git a/test/CodeGen/SPARC/globals.ll b/test/CodeGen/SPARC/globals.ll
index 7e3effe3f4ce..3d3eba28af62 100644
--- a/test/CodeGen/SPARC/globals.ll
+++ b/test/CodeGen/SPARC/globals.ll
@@ -14,7 +14,7 @@ define zeroext i8 @loadG() {
; abs32: loadG
; abs32: sethi %hi(G), %[[R:[gilo][0-7]]]
-; abs32: jmp %o7+8
+; abs32: retl
; abs32: ldub [%[[R]]+%lo(G)], %o0
@@ -22,7 +22,7 @@ define zeroext i8 @loadG() {
; abs44: sethi %h44(G), %[[R1:[gilo][0-7]]]
; abs44: add %[[R1]], %m44(G), %[[R2:[gilo][0-7]]]
; abs44: sllx %[[R2]], 12, %[[R3:[gilo][0-7]]]
-; abs44: jmp %o7+8
+; abs44: retl
; abs44: ldub [%[[R3]]+%l44(G)], %o0
@@ -32,7 +32,7 @@ define zeroext i8 @loadG() {
; abs64: sethi %hh(G), %[[R3:[gilo][0-7]]]
; abs64: add %[[R3]], %hm(G), %[[R4:[gilo][0-7]]]
; abs64: sllx %[[R4]], 32, %[[R5:[gilo][0-7]]]
-; abs64: jmp %o7+8
+; abs64: retl
; abs64: ldub [%[[R5]]+%[[R2]]], %o0
@@ -42,7 +42,7 @@ define zeroext i8 @loadG() {
; v8pic32: add %[[R1]], %lo(G), %[[Goffs:[gilo][0-7]]]
; v8pic32: ld [%[[GOT:[gilo][0-7]]]+%[[Goffs]]], %[[Gaddr:[gilo][0-7]]]
; v8pic32: ldub [%[[Gaddr]]], %i0
-; v8pic32: jmp %i7+8
+; v8pic32: ret
; v8pic32: restore
@@ -52,6 +52,6 @@ define zeroext i8 @loadG() {
; v9pic32: add %[[R1]], %lo(G), %[[Goffs:[gilo][0-7]]]
; v9pic32: ldx [%[[GOT:[gilo][0-7]]]+%[[Goffs]]], %[[Gaddr:[gilo][0-7]]]
; v9pic32: ldub [%[[Gaddr]]], %i0
-; v9pic32: jmp %i7+8
+; v9pic32: ret
; v9pic32: restore
diff --git a/test/CodeGen/SPARC/inlineasm.ll b/test/CodeGen/SPARC/inlineasm.ll
new file mode 100644
index 000000000000..2650533b7fec
--- /dev/null
+++ b/test/CodeGen/SPARC/inlineasm.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=sparc <%s | FileCheck %s
+
+; CHECK-LABEL: test_constraint_r
+; CHECK: add %o1, %o0, %o0
+define i32 @test_constraint_r(i32 %a, i32 %b) {
+entry:
+ %0 = tail call i32 asm sideeffect "add $2, $1, $0", "=r,r,r"(i32 %a, i32 %b)
+ ret i32 %0
+}
+
+; CHECK-LABEL: test_constraint_I
+; CHECK: add %o0, 1023, %o0
+define i32 @test_constraint_I(i32 %a) {
+entry:
+ %0 = tail call i32 asm sideeffect "add $1, $2, $0", "=r,r,rI"(i32 %a, i32 1023)
+ ret i32 %0
+}
+
+; CHECK-LABEL: test_constraint_I_neg
+; CHECK: add %o0, -4096, %o0
+define i32 @test_constraint_I_neg(i32 %a) {
+entry:
+ %0 = tail call i32 asm sideeffect "add $1, $2, $0", "=r,r,rI"(i32 %a, i32 -4096)
+ ret i32 %0
+}
+
+; CHECK-LABEL: test_constraint_I_largeimm
+; CHECK: sethi 9, [[R0:%[gilo][0-7]]]
+; CHECK: or [[R0]], 784, [[R1:%[gilo][0-7]]]
+; CHECK: add %o0, [[R1]], %o0
+define i32 @test_constraint_I_largeimm(i32 %a) {
+entry:
+ %0 = tail call i32 asm sideeffect "add $1, $2, $0", "=r,r,rI"(i32 %a, i32 10000)
+ ret i32 %0
+}
+
+; CHECK-LABEL: test_constraint_reg
+; CHECK: ldda [%o1] 43, %g2
+; CHECK: ldda [%o1] 43, %g3
+define void @test_constraint_reg(i32 %s, i32* %ptr) {
+entry:
+ %0 = tail call i64 asm sideeffect "ldda [$1] $2, $0", "={r2},r,n"(i32* %ptr, i32 43)
+ %1 = tail call i64 asm sideeffect "ldda [$1] $2, $0", "={g3},r,n"(i32* %ptr, i32 43)
+ ret void
+}
diff --git a/test/CodeGen/SPARC/leafproc.ll b/test/CodeGen/SPARC/leafproc.ll
index 0a7ae083d208..abb8ed9be439 100644
--- a/test/CodeGen/SPARC/leafproc.ll
+++ b/test/CodeGen/SPARC/leafproc.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=sparc -disable-sparc-leaf-proc=0 < %s | FileCheck %s
; CHECK-LABEL: func_nobody:
-; CHECK: jmp %o7+8
+; CHECK: retl
; CHECK-NEXT: nop
define void @func_nobody() {
entry:
@@ -10,8 +10,8 @@ entry:
; CHECK-LABEL: return_int_const:
-; CHECK: jmp %o7+8
-; CHECK-NEXT: or %g0, 1729, %o0
+; CHECK: retl
+; CHECK-NEXT: mov 1729, %o0
define i32 @return_int_const() {
entry:
ret i32 1729
@@ -19,7 +19,7 @@ entry:
; CHECK-LABEL: return_double_const:
; CHECK: sethi
-; CHECK: jmp %o7+8
+; CHECK: retl
; CHECK-NEXT: ldd {{.*}}, %f0
define double @return_double_const() {
@@ -29,7 +29,7 @@ entry:
; CHECK-LABEL: leaf_proc_with_args:
; CHECK: add {{%o[0-1]}}, {{%o[0-1]}}, [[R:%[go][0-7]]]
-; CHECK: jmp %o7+8
+; CHECK: retl
; CHECK-NEXT: add [[R]], %o2, %o0
define i32 @leaf_proc_with_args(i32 %a, i32 %b, i32 %c) {
@@ -42,7 +42,7 @@ entry:
; CHECK-LABEL: leaf_proc_with_args_in_stack:
; CHECK-DAG: ld [%sp+92], {{%[go][0-7]}}
; CHECK-DAG: ld [%sp+96], {{%[go][0-7]}}
-; CHECK: jmp %o7+8
+; CHECK: retl
; CHECK-NEXT: add {{.*}}, %o0
define i32 @leaf_proc_with_args_in_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
entry:
@@ -58,12 +58,12 @@ entry:
; CHECK-LABEL: leaf_proc_with_local_array:
; CHECK: add %sp, -104, %sp
-; CHECK: or %g0, 1, [[R1:%[go][0-7]]]
+; CHECK: mov 1, [[R1:%[go][0-7]]]
; CHECK: st [[R1]], [%sp+96]
-; CHECK: or %g0, 2, [[R2:%[go][0-7]]]
+; CHECK: mov 2, [[R2:%[go][0-7]]]
; CHECK: st [[R2]], [%sp+100]
; CHECK: ld {{.+}}, %o0
-; CHECK: jmp %o7+8
+; CHECK: retl
; CHECK-NEXT: add %sp, 104, %sp
define i32 @leaf_proc_with_local_array(i32 %a, i32 %b, i32 %c) {
diff --git a/test/CodeGen/SPARC/lit.local.cfg b/test/CodeGen/SPARC/lit.local.cfg
index 4d344fa91a9e..fa6a54e50132 100644
--- a/test/CodeGen/SPARC/lit.local.cfg
+++ b/test/CodeGen/SPARC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Sparc' in targets:
+if not 'Sparc' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/SPARC/mature-mc-support.ll b/test/CodeGen/SPARC/mature-mc-support.ll
new file mode 100644
index 000000000000..4ed33098051d
--- /dev/null
+++ b/test/CodeGen/SPARC/mature-mc-support.ll
@@ -0,0 +1,20 @@
+; Test that inline assembly is parsed by the MC layer when MC support is mature
+; (even when the output is assembly).
+; FIXME: SPARC doesn't use the integrated assembler by default in all cases
+; so we only test that -filetype=obj tries to parse the assembly.
+
+; SKIP: not llc -march=sparc < %s > /dev/null 2> %t1
+; SKIP: FileCheck %s < %t1
+
+; RUN: not llc -march=sparc -filetype=obj < %s > /dev/null 2> %t2
+; RUN: FileCheck %s < %t2
+
+; SKIP: not llc -march=sparcv9 < %s > /dev/null 2> %t3
+; SKIP: FileCheck %s < %t3
+
+; RUN: not llc -march=sparcv9 -filetype=obj < %s > /dev/null 2> %t4
+; RUN: FileCheck %s < %t4
+
+module asm " .this_directive_is_very_unlikely_to_exist"
+
+; CHECK: LLVM ERROR: Error parsing inline asm
diff --git a/test/CodeGen/SPARC/missinglabel.ll b/test/CodeGen/SPARC/missinglabel.ll
new file mode 100644
index 000000000000..bcf384b7ad29
--- /dev/null
+++ b/test/CodeGen/SPARC/missinglabel.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -verify-machineinstrs | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64-S128"
+target triple = "sparc64-unknown-linux-gnu"
+
+define void @f() align 2 {
+entry:
+; CHECK: %xcc, .LBB0_1
+ %cmp = icmp eq i64 undef, 0
+ br i1 %cmp, label %targetblock, label %cond.false
+
+cond.false:
+ unreachable
+
+; CHECK: .LBB0_1: ! %targetblock
+targetblock:
+ br i1 undef, label %cond.false.i83, label %exit.i85
+
+cond.false.i83:
+ unreachable
+
+exit.i85:
+ unreachable
+}
diff --git a/test/CodeGen/SPARC/obj-relocs.ll b/test/CodeGen/SPARC/obj-relocs.ll
new file mode 100644
index 000000000000..6d57598795d4
--- /dev/null
+++ b/test/CodeGen/SPARC/obj-relocs.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -march=sparcv9 -filetype=obj --relocation-model=static | llvm-readobj -r | FileCheck %s --check-prefix=CHECK-ABS
+; RUN: llc < %s -march=sparcv9 -filetype=obj --relocation-model=pic | llvm-readobj -r | FileCheck %s --check-prefix=CHECK-PIC
+
+;CHECK-ABS: Relocations [
+;CHECK-ABS: 0x{{[0-9,A-F]+}} R_SPARC_H44 AGlobalVar 0x0
+;CHECK-ABS: 0x{{[0-9,A-F]+}} R_SPARC_M44 AGlobalVar 0x0
+;CHECK-ABS: 0x{{[0-9,A-F]+}} R_SPARC_L44 AGlobalVar 0x0
+;CHECK-ABS: 0x{{[0-9,A-F]+}} R_SPARC_WDISP30 bar 0x0
+;CHECK-ABS:]
+
+; CHECK-PIC: Relocations [
+; CHECK-PIC: 0x{{[0-9,A-F]+}} R_SPARC_PC22 _GLOBAL_OFFSET_TABLE_ 0x4
+; CHECK-PIC: 0x{{[0-9,A-F]+}} R_SPARC_PC10 _GLOBAL_OFFSET_TABLE_ 0x8
+; CHECK-PIC: 0x{{[0-9,A-F]+}} R_SPARC_GOT22 AGlobalVar 0x0
+; CHECK-PIC: 0x{{[0-9,A-F]+}} R_SPARC_GOT10 AGlobalVar 0x0
+; CHECK-PIC: 0x{{[0-9,A-F]+}} R_SPARC_WPLT30 bar 0x0
+; CHECK-PIC: ]
+
+
+@AGlobalVar = global i64 0, align 8
+
+define i64 @foo(i64 %a) {
+entry:
+ %0 = load i64* @AGlobalVar, align 4
+ %1 = add i64 %a, %0
+ %2 = call i64 @bar(i64 %1)
+ ret i64 %2
+}
+
+
+declare i64 @bar(i64)
diff --git a/test/CodeGen/SPARC/parts.ll b/test/CodeGen/SPARC/parts.ll
new file mode 100644
index 000000000000..47feb1514275
--- /dev/null
+++ b/test/CodeGen/SPARC/parts.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=sparcv9 | FileCheck %s
+
+; CHECK-LABEL: test
+; CHECK: srl %i1, 0, %o2
+; CHECK-NEXT: mov %i2, %o0
+; CHECK-NEXT: call __ashlti3
+; CHECK-NEXT: mov %i3, %o1
+; CHECK-NEXT: mov %o0, %i0
+
+define i128 @test(i128 %a, i128 %b) {
+entry:
+ %tmp = shl i128 %b, %a
+ ret i128 %tmp
+}
diff --git a/test/CodeGen/SPARC/rem.ll b/test/CodeGen/SPARC/rem.ll
index abef1fc112b4..3b01a55735b7 100644
--- a/test/CodeGen/SPARC/rem.ll
+++ b/test/CodeGen/SPARC/rem.ll
@@ -3,7 +3,7 @@
; CHECK-LABEL: test1:
; CHECK: sdivx %o0, %o1, %o2
; CHECK-NEXT: mulx %o2, %o1, %o1
-; CHECK-NEXT: jmp %o7+8
+; CHECK-NEXT: retl
; CHECK-NEXT: sub %o0, %o1, %o0
define i64 @test1(i64 %X, i64 %Y) {
@@ -14,7 +14,7 @@ define i64 @test1(i64 %X, i64 %Y) {
; CHECK-LABEL: test2:
; CHECK: udivx %o0, %o1, %o2
; CHECK-NEXT: mulx %o2, %o1, %o1
-; CHECK-NEXT: jmp %o7+8
+; CHECK-NEXT: retl
; CHECK-NEXT: sub %o0, %o1, %o0
define i64 @test2(i64 %X, i64 %Y) {
diff --git a/test/CodeGen/SPARC/setjmp.ll b/test/CodeGen/SPARC/setjmp.ll
index 39984fb14bcb..a31cd7016731 100644
--- a/test/CodeGen/SPARC/setjmp.ll
+++ b/test/CodeGen/SPARC/setjmp.ll
@@ -7,7 +7,7 @@
%struct.__jmp_buf_tag = type { [3 x i32], i32, %0 }
@jenv = common unnamed_addr global %struct.jmpbuf_env* null
-@.cst = linker_private unnamed_addr constant [30 x i8] c"in bar with jmp_buf's id: %d\0A\00", align 64
+@.cst = private unnamed_addr constant [30 x i8] c"in bar with jmp_buf's id: %d\0A\00", align 64
; CHECK-LABEL: foo
; CHECK-DAG: st {{.+}}, [%i0]
diff --git a/test/CodeGen/SPARC/spillsize.ll b/test/CodeGen/SPARC/spillsize.ll
new file mode 100644
index 000000000000..64f63f97d509
--- /dev/null
+++ b/test/CodeGen/SPARC/spillsize.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -verify-machineinstrs | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64-S128"
+target triple = "sparcv9"
+
+; CHECK-LABEL: spill4
+; This function spills two values: %p and the materialized large constant.
+; Both must use 8-byte spill and fill instructions.
+; CHECK: stx %{{..}}, [%fp+
+; CHECK: stx %{{..}}, [%fp+
+; CHECK: ldx [%fp+
+; CHECK: ldx [%fp+
+define void @spill4(i64* nocapture %p) {
+entry:
+ %val0 = load i64* %p
+ %cmp0 = icmp ult i64 %val0, 385672958347594845
+ %cm80 = zext i1 %cmp0 to i64
+ store i64 %cm80, i64* %p, align 8
+ tail call void asm sideeffect "", "~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{g2},~{g3},~{g4},~{g5},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o7}"()
+ %arrayidx1 = getelementptr inbounds i64* %p, i64 1
+ %val = load i64* %arrayidx1
+ %cmp = icmp ult i64 %val, 385672958347594845
+ %cm8 = select i1 %cmp, i64 10, i64 20
+ store i64 %cm8, i64* %arrayidx1, align 8
+ ret void
+}
diff --git a/test/CodeGen/SPARC/sret-secondary.ll b/test/CodeGen/SPARC/sret-secondary.ll
new file mode 100644
index 000000000000..4efcabfc6fb3
--- /dev/null
+++ b/test/CodeGen/SPARC/sret-secondary.ll
@@ -0,0 +1,8 @@
+; RUN: not llc -march=sparc < %s -o /dev/null 2>&1 | FileCheck %s
+
+; CHECK: sparc only supports sret on the first parameter
+
+define void @foo(i32 %a, i32* sret %out) {
+ store i32 %a, i32* %out
+ ret void
+}
diff --git a/test/CodeGen/SPARC/tls.ll b/test/CodeGen/SPARC/tls.ll
index 660ddff0fae9..ce3e00539845 100644
--- a/test/CodeGen/SPARC/tls.ll
+++ b/test/CodeGen/SPARC/tls.ll
@@ -3,6 +3,10 @@
; RUN: llc <%s -march=sparc -relocation-model=pic | FileCheck %s --check-prefix=pic
; RUN: llc <%s -march=sparcv9 -relocation-model=pic | FileCheck %s --check-prefix=pic
+; RUN: llc <%s -march=sparc -relocation-model=static -filetype=obj | llvm-readobj -r | FileCheck %s --check-prefix=v8abs-obj
+; RUN: llc <%s -march=sparcv9 -relocation-model=static -filetype=obj | llvm-readobj -r | FileCheck %s --check-prefix=v9abs-obj
+; RUN: llc <%s -march=sparc -relocation-model=pic -filetype=obj | llvm-readobj -r | FileCheck %s --check-prefix=pic-obj
+; RUN: llc <%s -march=sparcv9 -relocation-model=pic -filetype=obj | llvm-readobj -r | FileCheck %s --check-prefix=pic-obj
@local_symbol = internal thread_local global i32 0
@extern_symbol = external thread_local global i32
@@ -38,8 +42,7 @@ entry:
; v8abs-LABEL: test_tls_extern
-; v8abs: or {{%[goli][0-7]}}, %lo(_GLOBAL_OFFSET_TABLE_+{{.+}}), [[PC:%[goli][0-7]]]
-; v8abs: add [[PC]], %o7, %[[GOTBASE:[goli][0-7]]]
+; v8abs: or {{%[goli][0-7]}}, %lo(_GLOBAL_OFFSET_TABLE_), %[[GOTBASE:[goli][0-7]]]
; v8abs: sethi %tie_hi22(extern_symbol), [[R1:%[goli][0-7]]]
; v8abs: add [[R1]], %tie_lo10(extern_symbol), %[[R2:[goli][0-7]]]
; v8abs: ld [%[[GOTBASE]]+%[[R2]]], [[R3:%[goli][0-7]]], %tie_ld(extern_symbol)
@@ -47,8 +50,7 @@ entry:
; v8abs: ld [%[[R4]]]
; v9abs-LABEL: test_tls_extern
-; v9abs: or {{%[goli][0-7]}}, %lo(_GLOBAL_OFFSET_TABLE_+{{.+}}), [[PC:%[goli][0-7]]]
-; v9abs: add [[PC]], %o7, %[[GOTBASE:[goli][0-7]]]
+; v9abs: or {{%[goli][0-7]}}, %l44(_GLOBAL_OFFSET_TABLE_), %[[GOTBASE:[goli][0-7]]]
; v9abs: sethi %tie_hi22(extern_symbol), [[R1:%[goli][0-7]]]
; v9abs: add [[R1]], %tie_lo10(extern_symbol), %[[R2:[goli][0-7]]]
; v9abs: ldx [%[[GOTBASE]]+%[[R2]]], [[R3:%[goli][0-7]]], %tie_ldx(extern_symbol)
@@ -71,3 +73,47 @@ entry:
store i32 %1, i32* @extern_symbol, align 4
ret i32 %1
}
+
+
+; v8abs-obj: Relocations [
+; v8abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LE_HIX22 local_symbol 0x0
+; v8abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LE_LOX10 local_symbol 0x0
+; v8abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_HI22 _GLOBAL_OFFSET_TABLE_ 0x0
+; v8abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_LO10 _GLOBAL_OFFSET_TABLE_ 0x0
+; v8abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_IE_HI22 extern_symbol 0x0
+; v8abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_IE_LO10 extern_symbol 0x0
+; v8abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_IE_LD extern_symbol 0x0
+; v8abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_IE_ADD extern_symbol 0x0
+; v8abs-obj: ]
+
+; v9abs-obj: Relocations [
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LE_HIX22 local_symbol 0x0
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LE_LOX10 local_symbol 0x0
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_H44 _GLOBAL_OFFSET_TABLE_ 0x0
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_M44 _GLOBAL_OFFSET_TABLE_ 0x0
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_L44 _GLOBAL_OFFSET_TABLE_ 0x0
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_IE_HI22 extern_symbol 0x0
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_IE_LO10 extern_symbol 0x0
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_IE_LDX extern_symbol 0x0
+; v9abs-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_IE_ADD extern_symbol 0x0
+; v9abs-obj: ]
+
+; pic-obj: Relocations [
+; pic-obj: Section (2) .rela.text {
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_PC22 _GLOBAL_OFFSET_TABLE_ 0x4
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_PC10 _GLOBAL_OFFSET_TABLE_ 0x8
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LDO_HIX22 local_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LDO_LOX10 local_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LDM_HI22 local_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LDM_LO10 local_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LDM_ADD local_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LDM_CALL local_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_LDO_ADD local_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_PC22 _GLOBAL_OFFSET_TABLE_ 0x4
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_PC10 _GLOBAL_OFFSET_TABLE_ 0x8
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_GD_HI22 extern_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_GD_LO10 extern_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_GD_ADD extern_symbol 0x0
+; pic-obj: 0x{{[0-9,A-F]+}} R_SPARC_TLS_GD_CALL extern_symbol 0x0
+; pic-obj: ]
+
diff --git a/test/CodeGen/SPARC/trap.ll b/test/CodeGen/SPARC/trap.ll
new file mode 100644
index 000000000000..b72a63caeebc
--- /dev/null
+++ b/test/CodeGen/SPARC/trap.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple=sparc-linux-gnu < %s -show-mc-encoding | FileCheck %s
+
+define void @test1() {
+ tail call void @llvm.trap()
+ unreachable
+
+; CHECK-LABEL: test1:
+; CHECK: ta 5 ! encoding: [0x91,0xd0,0x20,0x05]
+}
+
+declare void @llvm.trap()
diff --git a/test/CodeGen/SystemZ/Large/branch-range-01.py b/test/CodeGen/SystemZ/Large/branch-range-01.py
index 552c9ca0ea85..edb631d8c6d5 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-01.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-01.py
@@ -79,7 +79,7 @@ for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i32 *%%stop, i64 %d' % (i, i)
- print ' %%bcur%d = load volatile i32 *%%bstop%d' % (i, i)
+ print ' %%bcur%d = load i32 *%%bstop%d' % (i, i)
print ' %%btest%d = icmp eq i32 %%limit, %%bcur%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
@@ -95,7 +95,7 @@ for i in xrange(0, main_size, 6):
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i32 *%%stop, i64 %d' % (i, i + 25)
- print ' %%acur%d = load volatile i32 *%%astop%d' % (i, i)
+ print ' %%acur%d = load i32 *%%astop%d' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
diff --git a/test/CodeGen/SystemZ/Large/branch-range-02.py b/test/CodeGen/SystemZ/Large/branch-range-02.py
index 0b21ced99a1f..743e12de0f1f 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-02.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-02.py
@@ -72,7 +72,7 @@ for i in xrange(blocks):
print 'b%d:' % i
print ' store volatile i8 %d, i8 *%%base' % value
print ' %%astop%d = getelementptr i32 *%%stop, i64 %d' % (i, i)
- print ' %%acur%d = load volatile i32 *%%astop%d' % (i, i)
+ print ' %%acur%d = load i32 *%%astop%d' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i)
print ' br i1 %%atest%d, label %%%s, label %%%s' % (i, other, next)
diff --git a/test/CodeGen/SystemZ/Large/branch-range-03.py b/test/CodeGen/SystemZ/Large/branch-range-03.py
index 75cdf247c6f3..5c9a93b87f73 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-03.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-03.py
@@ -79,7 +79,7 @@ for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8 *%%stop, i64 %d' % (i, i)
- print ' %%bcur%d = load volatile i8 *%%bstop%d' % (i, i)
+ print ' %%bcur%d = load i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
print ' %%btest%d = icmp eq i32 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
@@ -96,7 +96,7 @@ for i in xrange(0, main_size, 6):
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8 *%%stop, i64 %d' % (i, i + 25)
- print ' %%acur%d = load volatile i8 *%%astop%d' % (i, i)
+ print ' %%acur%d = load i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i32' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
diff --git a/test/CodeGen/SystemZ/Large/branch-range-04.py b/test/CodeGen/SystemZ/Large/branch-range-04.py
index 3ae3ae9c37f7..2c9090fa2067 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-04.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-04.py
@@ -83,7 +83,7 @@ for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8 *%%stop, i64 %d' % (i, i)
- print ' %%bcur%d = load volatile i8 *%%bstop%d' % (i, i)
+ print ' %%bcur%d = load i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
print ' %%btest%d = icmp eq i64 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
@@ -100,7 +100,7 @@ for i in xrange(0, main_size, 6):
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8 *%%stop, i64 %d' % (i, i + 25)
- print ' %%acur%d = load volatile i8 *%%astop%d' % (i, i)
+ print ' %%acur%d = load i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i)
print ' %%atest%d = icmp eq i64 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
diff --git a/test/CodeGen/SystemZ/Large/branch-range-05.py b/test/CodeGen/SystemZ/Large/branch-range-05.py
index 6928b8fc21d6..52f4a961c88f 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-05.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-05.py
@@ -82,7 +82,7 @@ print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
- print ' %%bcur%d = load volatile i8 *%%stop' % i
+ print ' %%bcur%d = load i8 *%%stop' % i
print ' %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
print ' %%btest%d = icmp slt i32 %%bext%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
@@ -98,7 +98,7 @@ for i in xrange(0, main_size, 6):
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
- print ' %%acur%d = load volatile i8 *%%stop' % i
+ print ' %%acur%d = load i8 *%%stop' % i
print ' %%aext%d = sext i8 %%acur%d to i32' % (i, i)
print ' %%atest%d = icmp slt i32 %%aext%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
diff --git a/test/CodeGen/SystemZ/Large/branch-range-06.py b/test/CodeGen/SystemZ/Large/branch-range-06.py
index aabc72fa6ec8..c34ebac4ce36 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-06.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-06.py
@@ -82,7 +82,7 @@ print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
- print ' %%bcur%d = load volatile i8 *%%stop' % i
+ print ' %%bcur%d = load i8 *%%stop' % i
print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
print ' %%btest%d = icmp slt i64 %%bext%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
@@ -98,7 +98,7 @@ for i in xrange(0, main_size, 6):
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
- print ' %%acur%d = load volatile i8 *%%stop' % i
+ print ' %%acur%d = load i8 *%%stop' % i
print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i)
print ' %%atest%d = icmp slt i64 %%aext%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
diff --git a/test/CodeGen/SystemZ/Large/branch-range-09.py b/test/CodeGen/SystemZ/Large/branch-range-09.py
index b3fd81324dab..bc712cb164ea 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-09.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-09.py
@@ -79,7 +79,7 @@ for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8 *%%stop, i64 %d' % (i, i)
- print ' %%bcur%d = load volatile i8 *%%bstop%d' % (i, i)
+ print ' %%bcur%d = load i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
print ' %%btest%d = icmp ult i32 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
@@ -96,7 +96,7 @@ for i in xrange(0, main_size, 6):
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8 *%%stop, i64 %d' % (i, i + 25)
- print ' %%acur%d = load volatile i8 *%%astop%d' % (i, i)
+ print ' %%acur%d = load i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i32' % (i, i)
print ' %%atest%d = icmp ult i32 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
diff --git a/test/CodeGen/SystemZ/Large/branch-range-10.py b/test/CodeGen/SystemZ/Large/branch-range-10.py
index 3aeea3ebccdf..8c483c33724c 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-10.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-10.py
@@ -83,7 +83,7 @@ for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8 *%%stop, i64 %d' % (i, i)
- print ' %%bcur%d = load volatile i8 *%%bstop%d' % (i, i)
+ print ' %%bcur%d = load i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
print ' %%btest%d = icmp ult i64 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
@@ -100,7 +100,7 @@ for i in xrange(0, main_size, 6):
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8 *%%stop, i64 %d' % (i, i + 25)
- print ' %%acur%d = load volatile i8 *%%astop%d' % (i, i)
+ print ' %%acur%d = load i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i)
print ' %%atest%d = icmp ult i64 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
diff --git a/test/CodeGen/SystemZ/Large/branch-range-11.py b/test/CodeGen/SystemZ/Large/branch-range-11.py
index 034902c4a342..054610380e31 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-11.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-11.py
@@ -98,8 +98,8 @@ print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
- print ' %%bcur%da = load volatile i32 *%%stopa' % i
- print ' %%bcur%db = load volatile i32 *%%stopb' % i
+ print ' %%bcur%da = load i32 *%%stopa' % i
+ print ' %%bcur%db = load i32 *%%stopb' % i
print ' %%bsub%d = sub i32 %%bcur%da, %%bcur%db' % (i, i, i)
print ' %%btest%d = icmp ult i32 %%bsub%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
@@ -115,8 +115,8 @@ for i in xrange(0, main_size, 6):
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
- print ' %%acur%da = load volatile i32 *%%stopa' % i
- print ' %%acur%db = load volatile i32 *%%stopb' % i
+ print ' %%acur%da = load i32 *%%stopa' % i
+ print ' %%acur%db = load i32 *%%stopb' % i
print ' %%asub%d = sub i32 %%acur%da, %%acur%db' % (i, i, i)
print ' %%atest%d = icmp ult i32 %%asub%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
diff --git a/test/CodeGen/SystemZ/Large/branch-range-12.py b/test/CodeGen/SystemZ/Large/branch-range-12.py
index 007d477e2140..626c8998d5d4 100644
--- a/test/CodeGen/SystemZ/Large/branch-range-12.py
+++ b/test/CodeGen/SystemZ/Large/branch-range-12.py
@@ -98,8 +98,8 @@ print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
- print ' %%bcur%da = load volatile i64 *%%stopa' % i
- print ' %%bcur%db = load volatile i64 *%%stopb' % i
+ print ' %%bcur%da = load i64 *%%stopa' % i
+ print ' %%bcur%db = load i64 *%%stopb' % i
print ' %%bsub%d = sub i64 %%bcur%da, %%bcur%db' % (i, i, i)
print ' %%btest%d = icmp ult i64 %%bsub%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
@@ -115,8 +115,8 @@ for i in xrange(0, main_size, 6):
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
- print ' %%acur%da = load volatile i64 *%%stopa' % i
- print ' %%acur%db = load volatile i64 *%%stopb' % i
+ print ' %%acur%da = load i64 *%%stopa' % i
+ print ' %%acur%db = load i64 *%%stopb' % i
print ' %%asub%d = sub i64 %%acur%da, %%acur%db' % (i, i, i)
print ' %%atest%d = icmp ult i64 %%asub%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
diff --git a/test/CodeGen/SystemZ/Large/lit.local.cfg b/test/CodeGen/SystemZ/Large/lit.local.cfg
index 9a02f849c347..4f22a970c3a6 100644
--- a/test/CodeGen/SystemZ/Large/lit.local.cfg
+++ b/test/CodeGen/SystemZ/Large/lit.local.cfg
@@ -5,6 +5,5 @@ config.suffixes = ['.py']
if config.root.host_arch not in ['SystemZ']:
config.unsupported = True
-targets = set(config.root.targets_to_build.split())
-if not 'SystemZ' in targets:
+if not 'SystemZ' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/SystemZ/atomic-load-01.ll b/test/CodeGen/SystemZ/atomic-load-01.ll
index a5bc8833e78a..f3acd605b012 100644
--- a/test/CodeGen/SystemZ/atomic-load-01.ll
+++ b/test/CodeGen/SystemZ/atomic-load-01.ll
@@ -2,11 +2,10 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-; This is just a placeholder to make sure that loads are handled.
-; The CS-based sequence is probably far too conservative.
define i8 @f1(i8 *%src) {
; CHECK-LABEL: f1:
-; CHECK: cs
+; CHECK: bcr 1{{[45]}}, %r0
+; CHECK: lb %r2, 0(%r2)
; CHECK: br %r14
%val = load atomic i8 *%src seq_cst, align 1
ret i8 %val
diff --git a/test/CodeGen/SystemZ/atomic-load-02.ll b/test/CodeGen/SystemZ/atomic-load-02.ll
index 2c9bbdb488a1..d9bec60f4c1b 100644
--- a/test/CodeGen/SystemZ/atomic-load-02.ll
+++ b/test/CodeGen/SystemZ/atomic-load-02.ll
@@ -2,11 +2,10 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-; This is just a placeholder to make sure that loads are handled.
-; The CS-based sequence is probably far too conservative.
define i16 @f1(i16 *%src) {
; CHECK-LABEL: f1:
-; CHECK: cs
+; CHECK: bcr 1{{[45]}}, %r0
+; CHECK: lh %r2, 0(%r2)
; CHECK: br %r14
%val = load atomic i16 *%src seq_cst, align 2
ret i16 %val
diff --git a/test/CodeGen/SystemZ/atomic-load-03.ll b/test/CodeGen/SystemZ/atomic-load-03.ll
index 1fb41f5e39aa..7e5eb9249a93 100644
--- a/test/CodeGen/SystemZ/atomic-load-03.ll
+++ b/test/CodeGen/SystemZ/atomic-load-03.ll
@@ -2,12 +2,10 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-; This is just a placeholder to make sure that loads are handled.
-; Using CS is probably too conservative.
-define i32 @f1(i32 %dummy, i32 *%src) {
+define i32 @f1(i32 *%src) {
; CHECK-LABEL: f1:
-; CHECK: lhi %r2, 0
-; CHECK: cs %r2, %r2, 0(%r3)
+; CHECK: bcr 1{{[45]}}, %r0
+; CHECK: l %r2, 0(%r2)
; CHECK: br %r14
%val = load atomic i32 *%src seq_cst, align 4
ret i32 %val
diff --git a/test/CodeGen/SystemZ/atomic-load-04.ll b/test/CodeGen/SystemZ/atomic-load-04.ll
index 92cac406e200..c7a9a98a425d 100644
--- a/test/CodeGen/SystemZ/atomic-load-04.ll
+++ b/test/CodeGen/SystemZ/atomic-load-04.ll
@@ -2,12 +2,10 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-; This is just a placeholder to make sure that loads are handled.
-; Using CSG is probably too conservative.
-define i64 @f1(i64 %dummy, i64 *%src) {
+define i64 @f1(i64 *%src) {
; CHECK-LABEL: f1:
-; CHECK: lghi %r2, 0
-; CHECK: csg %r2, %r2, 0(%r3)
+; CHECK: bcr 1{{[45]}}, %r0
+; CHECK: lg %r2, 0(%r2)
; CHECK: br %r14
%val = load atomic i64 *%src seq_cst, align 8
ret i64 %val
diff --git a/test/CodeGen/SystemZ/atomic-store-01.ll b/test/CodeGen/SystemZ/atomic-store-01.ll
index 53ed24f623cf..952e1a912168 100644
--- a/test/CodeGen/SystemZ/atomic-store-01.ll
+++ b/test/CodeGen/SystemZ/atomic-store-01.ll
@@ -2,11 +2,10 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-; This is just a placeholder to make sure that stores are handled.
-; The CS-based sequence is probably far too conservative.
define void @f1(i8 %val, i8 *%src) {
; CHECK-LABEL: f1:
-; CHECK: cs
+; CHECK: stc %r2, 0(%r3)
+; CHECK: bcr 1{{[45]}}, %r0
; CHECK: br %r14
store atomic i8 %val, i8 *%src seq_cst, align 1
ret void
diff --git a/test/CodeGen/SystemZ/atomic-store-02.ll b/test/CodeGen/SystemZ/atomic-store-02.ll
index 42d6695b51d9..c9576e556566 100644
--- a/test/CodeGen/SystemZ/atomic-store-02.ll
+++ b/test/CodeGen/SystemZ/atomic-store-02.ll
@@ -2,11 +2,10 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-; This is just a placeholder to make sure that stores are handled.
-; The CS-based sequence is probably far too conservative.
define void @f1(i16 %val, i16 *%src) {
; CHECK-LABEL: f1:
-; CHECK: cs
+; CHECK: sth %r2, 0(%r3)
+; CHECK: bcr 1{{[45]}}, %r0
; CHECK: br %r14
store atomic i16 %val, i16 *%src seq_cst, align 2
ret void
diff --git a/test/CodeGen/SystemZ/atomic-store-03.ll b/test/CodeGen/SystemZ/atomic-store-03.ll
index 846c86fd3662..459cb6a94e12 100644
--- a/test/CodeGen/SystemZ/atomic-store-03.ll
+++ b/test/CodeGen/SystemZ/atomic-store-03.ll
@@ -2,14 +2,10 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-; This is just a placeholder to make sure that stores are handled.
-; Using CS is probably too conservative.
define void @f1(i32 %val, i32 *%src) {
; CHECK-LABEL: f1:
-; CHECK: l %r0, 0(%r3)
-; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: cs %r0, %r2, 0(%r3)
-; CHECK: jl [[LABEL]]
+; CHECK: st %r2, 0(%r3)
+; CHECK: bcr 1{{[45]}}, %r0
; CHECK: br %r14
store atomic i32 %val, i32 *%src seq_cst, align 4
ret void
diff --git a/test/CodeGen/SystemZ/atomic-store-04.ll b/test/CodeGen/SystemZ/atomic-store-04.ll
index 24615b115658..7f2406eb5468 100644
--- a/test/CodeGen/SystemZ/atomic-store-04.ll
+++ b/test/CodeGen/SystemZ/atomic-store-04.ll
@@ -2,14 +2,10 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-; This is just a placeholder to make sure that stores are handled.
-; Using CS is probably too conservative.
define void @f1(i64 %val, i64 *%src) {
; CHECK-LABEL: f1:
-; CHECK: lg %r0, 0(%r3)
-; CHECK: [[LABEL:\.[^:]*]]:
-; CHECK: csg %r0, %r2, 0(%r3)
-; CHECK: jl [[LABEL]]
+; CHECK: stg %r2, 0(%r3)
+; CHECK: bcr 1{{[45]}}, %r0
; CHECK: br %r14
store atomic i64 %val, i64 *%src seq_cst, align 8
ret void
diff --git a/test/CodeGen/SystemZ/atomicrmw-add-05.ll b/test/CodeGen/SystemZ/atomicrmw-add-05.ll
new file mode 100644
index 000000000000..956c0d9642cd
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-add-05.ll
@@ -0,0 +1,64 @@
+; Test 32-bit atomic additions, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check addition of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: laa %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check addition of 1, which needs a temporary.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lhi [[TMP:%r[0-5]]], 1
+; CHECK: laa %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the LAA range.
+define i32 @f3(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: laa %r2, %r4, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131071
+ %res = atomicrmw add i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which needs separate address logic.
+define i32 @f4(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: laa %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131072
+ %res = atomicrmw add i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the LAA range.
+define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f5:
+; CHECK: laa %r2, %r4, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131072
+ %res = atomicrmw add i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word down, which needs separate address logic.
+define i32 @f6(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524292
+; CHECK: laa %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131073
+ %res = atomicrmw add i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-add-06.ll b/test/CodeGen/SystemZ/atomicrmw-add-06.ll
new file mode 100644
index 000000000000..f508858d1562
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-add-06.ll
@@ -0,0 +1,64 @@
+; Test 64-bit atomic additions, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check addition of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK: laag %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check addition of 1, which needs a temporary.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lghi [[TMP:%r[0-5]]], 1
+; CHECK: laag %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the LAAG range.
+define i64 @f3(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK: laag %r2, %r4, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %res = atomicrmw add i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+define i64 @f4(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: laag %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %res = atomicrmw add i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the LAAG range.
+define i64 @f5(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f5:
+; CHECK: laag %r2, %r4, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %res = atomicrmw add i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword down, which needs separate address logic.
+define i64 @f6(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524296
+; CHECK: laag %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %res = atomicrmw add i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-and-05.ll b/test/CodeGen/SystemZ/atomicrmw-and-05.ll
new file mode 100644
index 000000000000..f0b999c60431
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-and-05.ll
@@ -0,0 +1,64 @@
+; Test 32-bit atomic ANDs, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check AND of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: lan %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check AND of 1, which needs a temporary.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lhi [[TMP:%r[0-5]]], 1
+; CHECK: lan %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the LAN range.
+define i32 @f3(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: lan %r2, %r4, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131071
+ %res = atomicrmw and i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which needs separate address logic.
+define i32 @f4(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: lan %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131072
+ %res = atomicrmw and i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the LAN range.
+define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f5:
+; CHECK: lan %r2, %r4, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131072
+ %res = atomicrmw and i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word down, which needs separate address logic.
+define i32 @f6(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524292
+; CHECK: lan %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131073
+ %res = atomicrmw and i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-and-06.ll b/test/CodeGen/SystemZ/atomicrmw-and-06.ll
new file mode 100644
index 000000000000..e5b71945d57c
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-and-06.ll
@@ -0,0 +1,64 @@
+; Test 64-bit atomic ANDs, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check AND of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK: lang %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check AND of -2, which needs a temporary.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lghi [[TMP:%r[0-5]]], -2
+; CHECK: lang %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -2 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the LANG range.
+define i64 @f3(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK: lang %r2, %r4, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %res = atomicrmw and i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+define i64 @f4(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: lang %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %res = atomicrmw and i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the LANG range.
+define i64 @f5(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f5:
+; CHECK: lang %r2, %r4, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %res = atomicrmw and i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword down, which needs separate address logic.
+define i64 @f6(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524296
+; CHECK: lang %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %res = atomicrmw and i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-or-05.ll b/test/CodeGen/SystemZ/atomicrmw-or-05.ll
new file mode 100644
index 000000000000..b38654ca6f07
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-or-05.ll
@@ -0,0 +1,64 @@
+; Test 32-bit atomic ORs, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check OR of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: lao %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check OR of 1, which needs a temporary.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lhi [[TMP:%r[0-5]]], 1
+; CHECK: lao %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the LAO range.
+define i32 @f3(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: lao %r2, %r4, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131071
+ %res = atomicrmw or i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which needs separate address logic.
+define i32 @f4(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: lao %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131072
+ %res = atomicrmw or i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the LAO range.
+define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f5:
+; CHECK: lao %r2, %r4, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131072
+ %res = atomicrmw or i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word down, which needs separate address logic.
+define i32 @f6(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524292
+; CHECK: lao %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131073
+ %res = atomicrmw or i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-or-06.ll b/test/CodeGen/SystemZ/atomicrmw-or-06.ll
new file mode 100644
index 000000000000..30874abfe4a2
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-or-06.ll
@@ -0,0 +1,64 @@
+; Test 64-bit atomic ORs, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check OR of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK: laog %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check OR of 1, which needs a temporary.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lghi [[TMP:%r[0-5]]], 1
+; CHECK: laog %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the LAOG range.
+define i64 @f3(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK: laog %r2, %r4, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %res = atomicrmw or i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+define i64 @f4(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: laog %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %res = atomicrmw or i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the LAOG range.
+define i64 @f5(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f5:
+; CHECK: laog %r2, %r4, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %res = atomicrmw or i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword down, which needs separate address logic.
+define i64 @f6(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524296
+; CHECK: laog %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %res = atomicrmw or i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-sub-05.ll b/test/CodeGen/SystemZ/atomicrmw-sub-05.ll
new file mode 100644
index 000000000000..7668f0e2a7ac
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-sub-05.ll
@@ -0,0 +1,69 @@
+; Test 32-bit atomic subtractions, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check addition of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: lcr [[NEG:%r[0-5]]], %r4
+; CHECK: laa %r2, [[NEG]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check addition of 1, which needs a temporary.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lhi [[TMP:%r[0-5]]], -1
+; CHECK: laa %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the LAA range.
+define i32 @f3(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: lcr [[NEG:%r[0-5]]], %r4
+; CHECK: laa %r2, [[NEG]], 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131071
+ %res = atomicrmw sub i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which needs separate address logic.
+define i32 @f4(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f4:
+; CHECK-DAG: lcr [[NEG:%r[0-5]]], %r4
+; CHECK-DAG: agfi %r3, 524288
+; CHECK: laa %r2, [[NEG]], 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131072
+ %res = atomicrmw sub i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the LAA range.
+define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f5:
+; CHECK: lcr [[NEG:%r[0-5]]], %r4
+; CHECK: laa %r2, [[NEG]], -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131072
+ %res = atomicrmw sub i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word down, which needs separate address logic.
+define i32 @f6(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f6:
+; CHECK-DAG: lcr [[NEG:%r[0-5]]], %r4
+; CHECK-DAG: agfi %r3, -524292
+; CHECK: laa %r2, [[NEG]], 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131073
+ %res = atomicrmw sub i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-sub-06.ll b/test/CodeGen/SystemZ/atomicrmw-sub-06.ll
new file mode 100644
index 000000000000..5d11bdf96cde
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-sub-06.ll
@@ -0,0 +1,69 @@
+; Test 64-bit atomic subtractions, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check addition of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK: lcgr [[NEG:%r[0-5]]], %r4
+; CHECK: laag %r2, [[NEG]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check addition of 1, which needs a temporary.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lghi [[TMP:%r[0-5]]], -1
+; CHECK: laag %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the LAAG range.
+define i64 @f3(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK: lcgr [[NEG:%r[0-5]]], %r4
+; CHECK: laag %r2, [[NEG]], 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %res = atomicrmw sub i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+define i64 @f4(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f4:
+; CHECK-DAG: lcgr [[NEG:%r[0-5]]], %r4
+; CHECK-DAG: agfi %r3, 524288
+; CHECK: laag %r2, [[NEG]], 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %res = atomicrmw sub i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the LAAG range.
+define i64 @f5(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f5:
+; CHECK: lcgr [[NEG:%r[0-5]]], %r4
+; CHECK: laag %r2, [[NEG]], -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %res = atomicrmw sub i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword down, which needs separate address logic.
+define i64 @f6(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f6:
+; CHECK-DAG: lcgr [[NEG:%r[0-5]]], %r4
+; CHECK-DAG: agfi %r3, -524296
+; CHECK: laag %r2, [[NEG]], 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %res = atomicrmw sub i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xor-05.ll b/test/CodeGen/SystemZ/atomicrmw-xor-05.ll
new file mode 100644
index 000000000000..e9e7d30b3578
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xor-05.ll
@@ -0,0 +1,64 @@
+; Test 32-bit atomic ORs, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check OR of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f1:
+; CHECK: lax %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw xor i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check OR of 1, which needs a temporary.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lhi [[TMP:%r[0-5]]], 1
+; CHECK: lax %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw xor i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the LAX range.
+define i32 @f3(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: lax %r2, %r4, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131071
+ %res = atomicrmw xor i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which needs separate address logic.
+define i32 @f4(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: lax %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 131072
+ %res = atomicrmw xor i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the LAX range.
+define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f5:
+; CHECK: lax %r2, %r4, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131072
+ %res = atomicrmw xor i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word down, which needs separate address logic.
+define i32 @f6(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524292
+; CHECK: lax %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i32 -131073
+ %res = atomicrmw xor i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xor-06.ll b/test/CodeGen/SystemZ/atomicrmw-xor-06.ll
new file mode 100644
index 000000000000..0870c6476f61
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xor-06.ll
@@ -0,0 +1,64 @@
+; Test 64-bit atomic XORs, z196 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check XOR of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK: laxg %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check XOR of 1, which needs a temporary.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK-LABEL: f2:
+; CHECK: lghi [[TMP:%r[0-5]]], 1
+; CHECK: laxg %r2, [[TMP]], 0(%r3)
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the LAXG range.
+define i64 @f3(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK: laxg %r2, %r4, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %res = atomicrmw xor i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+define i64 @f4(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: laxg %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %res = atomicrmw xor i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the LAXG range.
+define i64 @f5(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f5:
+; CHECK: laxg %r2, %r4, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %res = atomicrmw xor i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword down, which needs separate address logic.
+define i64 @f6(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r3, -524296
+; CHECK: laxg %r2, %r4, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %res = atomicrmw xor i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/cmpxchg-01.ll b/test/CodeGen/SystemZ/cmpxchg-01.ll
index d5ea97786900..5118aadcf2ad 100644
--- a/test/CodeGen/SystemZ/cmpxchg-01.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-01.ll
@@ -32,7 +32,8 @@ define i8 @f1(i8 %dummy, i8 *%src, i8 %cmp, i8 %swap) {
; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
; CHECK-SHIFT: rll
; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -8([[NEGSHIFT]])
- %res = cmpxchg i8 *%src, i8 %cmp, i8 %swap seq_cst
+ %pair = cmpxchg i8 *%src, i8 %cmp, i8 %swap seq_cst seq_cst
+ %res = extractvalue { i8, i1 } %pair, 0
ret i8 %res
}
@@ -50,6 +51,7 @@ define i8 @f2(i8 *%src) {
; CHECK-SHIFT: risbg
; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 55, 0
; CHECK-SHIFT: br %r14
- %res = cmpxchg i8 *%src, i8 42, i8 88 seq_cst
+ %pair = cmpxchg i8 *%src, i8 42, i8 88 seq_cst seq_cst
+ %res = extractvalue { i8, i1 } %pair, 0
ret i8 %res
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-02.ll b/test/CodeGen/SystemZ/cmpxchg-02.ll
index 08c79d717c1e..9eb0628b5a30 100644
--- a/test/CodeGen/SystemZ/cmpxchg-02.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-02.ll
@@ -32,7 +32,8 @@ define i16 @f1(i16 %dummy, i16 *%src, i16 %cmp, i16 %swap) {
; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
; CHECK-SHIFT: rll
; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -16([[NEGSHIFT]])
- %res = cmpxchg i16 *%src, i16 %cmp, i16 %swap seq_cst
+ %pair = cmpxchg i16 *%src, i16 %cmp, i16 %swap seq_cst seq_cst
+ %res = extractvalue { i16, i1 } %pair, 0
ret i16 %res
}
@@ -50,6 +51,7 @@ define i16 @f2(i16 *%src) {
; CHECK-SHIFT: risbg
; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 47, 0
; CHECK-SHIFT: br %r14
- %res = cmpxchg i16 *%src, i16 42, i16 88 seq_cst
+ %pair = cmpxchg i16 *%src, i16 42, i16 88 seq_cst seq_cst
+ %res = extractvalue { i16, i1 } %pair, 0
ret i16 %res
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-03.ll b/test/CodeGen/SystemZ/cmpxchg-03.ll
index 3917979ac24c..c5fab4dc0439 100644
--- a/test/CodeGen/SystemZ/cmpxchg-03.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-03.ll
@@ -7,7 +7,8 @@ define i32 @f1(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK-LABEL: f1:
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -17,7 +18,8 @@ define i32 @f2(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 4092(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 1023
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -27,7 +29,8 @@ define i32 @f3(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, 4096(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 1024
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -37,7 +40,8 @@ define i32 @f4(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, 524284(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 131071
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -49,7 +53,8 @@ define i32 @f5(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 131072
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -59,7 +64,8 @@ define i32 @f6(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, -4(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -1
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -69,7 +75,8 @@ define i32 @f7(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: csy %r2, %r3, -524288(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -131072
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -81,7 +88,8 @@ define i32 @f8(i32 %cmp, i32 %swap, i32 *%src) {
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i32 *%src, i64 -131073
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -93,7 +101,8 @@ define i32 @f9(i32 %cmp, i32 %swap, i64 %src, i64 %index) {
; CHECK: br %r14
%add1 = add i64 %src, %index
%ptr = inttoptr i64 %add1 to i32 *
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -106,7 +115,8 @@ define i32 @f10(i32 %cmp, i32 %swap, i64 %src, i64 %index) {
%add1 = add i64 %src, %index
%add2 = add i64 %add1, 4096
%ptr = inttoptr i64 %add2 to i32 *
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -116,7 +126,8 @@ define i32 @f11(i32 %dummy, i32 %swap, i32 *%ptr) {
; CHECK: lhi %r2, 1001
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i32 *%ptr, i32 1001, i32 %swap seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 1001, i32 %swap seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
@@ -126,6 +137,7 @@ define i32 @f12(i32 %cmp, i32 *%ptr) {
; CHECK: lhi [[SWAP:%r[0-9]+]], 1002
; CHECK: cs %r2, [[SWAP]], 0(%r3)
; CHECK: br %r14
- %val = cmpxchg i32 *%ptr, i32 %cmp, i32 1002 seq_cst
+ %pair = cmpxchg i32 *%ptr, i32 %cmp, i32 1002 seq_cst seq_cst
+ %val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
diff --git a/test/CodeGen/SystemZ/cmpxchg-04.ll b/test/CodeGen/SystemZ/cmpxchg-04.ll
index f58868f04f2d..ba1493e1853e 100644
--- a/test/CodeGen/SystemZ/cmpxchg-04.ll
+++ b/test/CodeGen/SystemZ/cmpxchg-04.ll
@@ -7,7 +7,8 @@ define i64 @f1(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK-LABEL: f1:
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst
+ %pairval = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -17,7 +18,8 @@ define i64 @f2(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 524280(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 65535
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -29,7 +31,8 @@ define i64 @f3(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 65536
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -39,7 +42,8 @@ define i64 @f4(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, -8(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -1
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -49,7 +53,8 @@ define i64 @f5(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, -524288(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -65536
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -61,7 +66,8 @@ define i64 @f6(i64 %cmp, i64 %swap, i64 *%src) {
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
%ptr = getelementptr i64 *%src, i64 -65537
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -73,7 +79,8 @@ define i64 @f7(i64 %cmp, i64 %swap, i64 %src, i64 %index) {
; CHECK: br %r14
%add1 = add i64 %src, %index
%ptr = inttoptr i64 %add1 to i64 *
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -83,7 +90,8 @@ define i64 @f8(i64 %dummy, i64 %swap, i64 *%ptr) {
; CHECK: lghi %r2, 1001
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
- %val = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
@@ -93,6 +101,7 @@ define i64 @f9(i64 %cmp, i64 *%ptr) {
; CHECK: lghi [[SWAP:%r[0-9]+]], 1002
; CHECK: csg %r2, [[SWAP]], 0(%r3)
; CHECK: br %r14
- %val = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst
+ %pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst seq_cst
+ %val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
}
diff --git a/test/CodeGen/SystemZ/cond-store-01.ll b/test/CodeGen/SystemZ/cond-store-01.ll
index d55ea2133e8f..62e9796fa21b 100644
--- a/test/CodeGen/SystemZ/cond-store-01.ll
+++ b/test/CodeGen/SystemZ/cond-store-01.ll
@@ -347,11 +347,10 @@ define void @f19(i8 *%ptr, i8 %alt, i32 %limit) {
define void @f20(i8 *%ptr, i8 %alt, i32 %limit) {
; FIXME: should use a normal load instead of CS.
; CHECK-LABEL: f20:
-; CHECK: cs {{%r[0-9]+}},
-; CHECK: jl
+; CHECK: lb {{%r[0-9]+}}, 0(%r2)
; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: stc {{%r[0-9]+}},
+; CHECK: stc {{%r[0-9]+}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
%orig = load atomic i8 *%ptr unordered, align 1
@@ -367,7 +366,7 @@ define void @f21(i8 *%ptr, i8 %alt, i32 %limit) {
; CHECK: jhe [[LABEL:[^ ]*]]
; CHECK: lb %r3, 0(%r2)
; CHECK: [[LABEL]]:
-; CHECK: cs {{%r[0-9]+}},
+; CHECK: stc %r3, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
%orig = load i8 *%ptr
diff --git a/test/CodeGen/SystemZ/cond-store-02.ll b/test/CodeGen/SystemZ/cond-store-02.ll
index 91bc4860b384..4fbcdaba5103 100644
--- a/test/CodeGen/SystemZ/cond-store-02.ll
+++ b/test/CodeGen/SystemZ/cond-store-02.ll
@@ -347,11 +347,10 @@ define void @f19(i16 *%ptr, i16 %alt, i32 %limit) {
define void @f20(i16 *%ptr, i16 %alt, i32 %limit) {
; FIXME: should use a normal load instead of CS.
; CHECK-LABEL: f20:
-; CHECK: cs {{%r[0-9]+}},
-; CHECK: jl
+; CHECK: lh {{%r[0-9]+}}, 0(%r2)
; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
-; CHECK: sth {{%r[0-9]+}},
+; CHECK: sth {{%r[0-9]+}}, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
%orig = load atomic i16 *%ptr unordered, align 2
@@ -367,7 +366,7 @@ define void @f21(i16 *%ptr, i16 %alt, i32 %limit) {
; CHECK: jhe [[LABEL:[^ ]*]]
; CHECK: lh %r3, 0(%r2)
; CHECK: [[LABEL]]:
-; CHECK: cs {{%r[0-9]+}},
+; CHECK: sth %r3, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
%orig = load i16 *%ptr
diff --git a/test/CodeGen/SystemZ/cond-store-03.ll b/test/CodeGen/SystemZ/cond-store-03.ll
index d4fd48d61324..4b22555d0d60 100644
--- a/test/CodeGen/SystemZ/cond-store-03.ll
+++ b/test/CodeGen/SystemZ/cond-store-03.ll
@@ -272,7 +272,7 @@ define void @f15(i32 *%ptr, i32 %alt, i32 %limit) {
define void @f16(i32 *%ptr, i32 %alt, i32 %limit) {
; FIXME: should use a normal load instead of CS.
; CHECK-LABEL: f16:
-; CHECK: cs {{%r[0-5]}}, {{%r[0-5]}}, 0(%r2)
+; CHECK: l {{%r[0-5]}}, 0(%r2)
; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
; CHECK: st {{%r[0-5]}}, 0(%r2)
@@ -291,7 +291,7 @@ define void @f17(i32 *%ptr, i32 %alt, i32 %limit) {
; CHECK: jhe [[LABEL:[^ ]*]]
; CHECK: l %r3, 0(%r2)
; CHECK: [[LABEL]]:
-; CHECK: cs {{%r[0-5]}}, %r3, 0(%r2)
+; CHECK: st %r3, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
%orig = load i32 *%ptr
diff --git a/test/CodeGen/SystemZ/cond-store-04.ll b/test/CodeGen/SystemZ/cond-store-04.ll
index fc565c432fff..346b51a17d78 100644
--- a/test/CodeGen/SystemZ/cond-store-04.ll
+++ b/test/CodeGen/SystemZ/cond-store-04.ll
@@ -164,7 +164,7 @@ define void @f9(i64 *%ptr, i64 %alt, i32 %limit) {
define void @f10(i64 *%ptr, i64 %alt, i32 %limit) {
; FIXME: should use a normal load instead of CSG.
; CHECK-LABEL: f10:
-; CHECK: csg {{%r[0-5]}}, {{%r[0-5]}}, 0(%r2)
+; CHECK: lg {{%r[0-5]}}, 0(%r2)
; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
; CHECK: [[LABEL]]:
; CHECK: stg {{%r[0-5]}}, 0(%r2)
@@ -183,7 +183,7 @@ define void @f11(i64 *%ptr, i64 %alt, i32 %limit) {
; CHECK: jhe [[LABEL:[^ ]*]]
; CHECK: lg %r3, 0(%r2)
; CHECK: [[LABEL]]:
-; CHECK: csg {{%r[0-5]}}, %r3, 0(%r2)
+; CHECK: stg %r3, 0(%r2)
; CHECK: br %r14
%cond = icmp ult i32 %limit, 420
%orig = load i64 *%ptr
diff --git a/test/CodeGen/SystemZ/fp-cmp-04.ll b/test/CodeGen/SystemZ/fp-cmp-04.ll
index 8d842164fa4f..781a3beb4d4d 100644
--- a/test/CodeGen/SystemZ/fp-cmp-04.ll
+++ b/test/CodeGen/SystemZ/fp-cmp-04.ll
@@ -1,4 +1,4 @@
-; Test that floating-point compares are ommitted if CC already has the
+; Test that floating-point compares are omitted if CC already has the
; right value.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
@@ -346,3 +346,62 @@ store:
exit:
ret double %val
}
+
+; Repeat f2 with a comparison against -0.
+define float @f17(float %a, float %b, float *%dest) {
+; CHECK-LABEL: f17:
+; CHECK: aebr %f0, %f2
+; CHECK-NEXT: jl .L{{.*}}
+; CHECK: br %r14
+entry:
+ %res = fadd float %a, %b
+ %cmp = fcmp olt float %res, -0.0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store float %b, float *%dest
+ br label %exit
+
+exit:
+ ret float %res
+}
+
+; Test another form of f7 in which the condition is based on the unnegated
+; result. This is what InstCombine would produce.
+define float @f18(float %dummy, float %a, float *%dest) {
+; CHECK-LABEL: f18:
+; CHECK: lnebr %f0, %f2
+; CHECK-NEXT: jl .L{{.*}}
+; CHECK: br %r14
+entry:
+ %abs = call float @llvm.fabs.f32(float %a)
+ %res = fsub float -0.0, %abs
+ %cmp = fcmp ogt float %abs, 0.0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store float %res, float *%dest
+ br label %exit
+
+exit:
+ ret float %res
+}
+
+; Similarly for f8.
+define float @f19(float %dummy, float %a, float *%dest) {
+; CHECK-LABEL: f19:
+; CHECK: lcebr %f0, %f2
+; CHECK-NEXT: jle .L{{.*}}
+; CHECK: br %r14
+entry:
+ %res = fsub float -0.0, %a
+ %cmp = fcmp oge float %a, 0.0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store float %res, float *%dest
+ br label %exit
+
+exit:
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-06.ll b/test/CodeGen/SystemZ/fp-conv-06.ll
index 466c1456a0cb..8a3971a9929c 100644
--- a/test/CodeGen/SystemZ/fp-conv-06.ll
+++ b/test/CodeGen/SystemZ/fp-conv-06.ll
@@ -1,6 +1,6 @@
-; Test conversions of unsigned i32s to floating-point values.
+; Test conversions of unsigned i32s to floating-point values (z10 only).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
; Check i32->f32. There is no native instruction, so we must promote
; to i64 first.
diff --git a/test/CodeGen/SystemZ/fp-conv-08.ll b/test/CodeGen/SystemZ/fp-conv-08.ll
index 69b2d13e29f0..295ce8bdbe2c 100644
--- a/test/CodeGen/SystemZ/fp-conv-08.ll
+++ b/test/CodeGen/SystemZ/fp-conv-08.ll
@@ -1,6 +1,6 @@
-; Test conversions of unsigned i64s to floating-point values.
+; Test conversions of unsigned i64s to floating-point values (z10 only).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
; Test i64->f32. There's no native support for unsigned i64-to-fp conversions,
; but we should be able to implement them using signed i64-to-fp conversions.
diff --git a/test/CodeGen/SystemZ/fp-conv-10.ll b/test/CodeGen/SystemZ/fp-conv-10.ll
index 723d19d2a1de..b8155ed067da 100644
--- a/test/CodeGen/SystemZ/fp-conv-10.ll
+++ b/test/CodeGen/SystemZ/fp-conv-10.ll
@@ -1,6 +1,6 @@
-; Test conversion of floating-point values to unsigned i32s.
+; Test conversion of floating-point values to unsigned i32s (z10 only).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
; z10 doesn't have native support for unsigned fp-to-i32 conversions;
; they were added in z196 as the Convert to Logical family of instructions.
diff --git a/test/CodeGen/SystemZ/fp-conv-12.ll b/test/CodeGen/SystemZ/fp-conv-12.ll
index 6cc343abdafc..770c9407a0af 100644
--- a/test/CodeGen/SystemZ/fp-conv-12.ll
+++ b/test/CodeGen/SystemZ/fp-conv-12.ll
@@ -1,6 +1,6 @@
-; Test conversion of floating-point values to unsigned i64s.
+; Test conversion of floating-point values to unsigned i64s (z10 only).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
; z10 doesn't have native support for unsigned fp-to-i64 conversions;
; they were added in z196 as the Convert to Logical family of instructions.
diff --git a/test/CodeGen/SystemZ/fp-conv-13.ll b/test/CodeGen/SystemZ/fp-conv-13.ll
new file mode 100644
index 000000000000..96293bc8d270
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-13.ll
@@ -0,0 +1,64 @@
+; Test conversions of unsigned integers to floating-point values
+; (z196 and above).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Check i32->f32.
+define float @f1(i32 %i) {
+; CHECK-LABEL: f1:
+; CHECK: celfbr %f0, 0, %r2, 0
+; CHECK: br %r14
+ %conv = uitofp i32 %i to float
+ ret float %conv
+}
+
+; Check i32->f64.
+define double @f2(i32 %i) {
+; CHECK-LABEL: f2:
+; CHECK: cdlfbr %f0, 0, %r2, 0
+; CHECK: br %r14
+ %conv = uitofp i32 %i to double
+ ret double %conv
+}
+
+; Check i32->f128.
+define void @f3(i32 %i, fp128 *%dst) {
+; CHECK-LABEL: f3:
+; CHECK: cxlfbr %f0, 0, %r2, 0
+; CHECK-DAG: std %f0, 0(%r3)
+; CHECK-DAG: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = uitofp i32 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Check i64->f32.
+define float @f4(i64 %i) {
+; CHECK-LABEL: f4:
+; CHECK: celgbr %f0, 0, %r2, 0
+; CHECK: br %r14
+ %conv = uitofp i64 %i to float
+ ret float %conv
+}
+
+; Check i64->f64.
+define double @f5(i64 %i) {
+; CHECK-LABEL: f5:
+; CHECK: cdlgbr %f0, 0, %r2, 0
+; CHECK: br %r14
+ %conv = uitofp i64 %i to double
+ ret double %conv
+}
+
+; Check i64->f128.
+define void @f6(i64 %i, fp128 *%dst) {
+; CHECK-LABEL: f6:
+; CHECK: cxlgbr %f0, 0, %r2, 0
+; CHECK-DAG: std %f0, 0(%r3)
+; CHECK-DAG: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = uitofp i64 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-14.ll b/test/CodeGen/SystemZ/fp-conv-14.ll
new file mode 100644
index 000000000000..e926e9bb31f5
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-14.ll
@@ -0,0 +1,63 @@
+; Test conversion of floating-point values to unsigned integers.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Test f32->i32.
+define i32 @f1(float %f) {
+; CHECK-LABEL: f1:
+; CHECK: clfebr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %conv = fptoui float %f to i32
+ ret i32 %conv
+}
+
+; Test f64->i32.
+define i32 @f2(double %f) {
+; CHECK-LABEL: f2:
+; CHECK: clfdbr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %conv = fptoui double %f to i32
+ ret i32 %conv
+}
+
+; Test f128->i32.
+define i32 @f3(fp128 *%src) {
+; CHECK-LABEL: f3:
+; CHECK-DAG: ld %f0, 0(%r2)
+; CHECK-DAG: ld %f2, 8(%r2)
+; CHECK: clfxbr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %f = load fp128 *%src
+ %conv = fptoui fp128 %f to i32
+ ret i32 %conv
+}
+
+; Test f32->i64.
+define i64 @f4(float %f) {
+; CHECK-LABEL: f4:
+; CHECK: clgebr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %conv = fptoui float %f to i64
+ ret i64 %conv
+}
+
+; Test f64->i64.
+define i64 @f5(double %f) {
+; CHECK-LABEL: f5:
+; CHECK: clgdbr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %conv = fptoui double %f to i64
+ ret i64 %conv
+}
+
+; Test f128->i64.
+define i64 @f6(fp128 *%src) {
+; CHECK-LABEL: f6:
+; CHECK-DAG: ld %f0, 0(%r2)
+; CHECK-DAG: ld %f2, 8(%r2)
+; CHECK: clgxbr %r2, 5, %f0, 0
+; CHECK: br %r14
+ %f = load fp128 *%src
+ %conv = fptoui fp128 %f to i64
+ ret i64 %conv
+}
diff --git a/test/CodeGen/SystemZ/frame-08.ll b/test/CodeGen/SystemZ/frame-08.ll
index da2a6142fb47..aa4e3f481da4 100644
--- a/test/CodeGen/SystemZ/frame-08.ll
+++ b/test/CodeGen/SystemZ/frame-08.ll
@@ -208,7 +208,7 @@ define void @f4(i32 *%ptr, i64 %x) {
ret void
}
-; This is the largest frame size for which the prepatory increment for
+; This is the largest frame size for which the preparatory increment for
; "lmg %r14, %r15, ..." can be done using AGHI.
define void @f5(i32 *%ptr, i64 %x) {
; CHECK-LABEL: f5:
@@ -242,7 +242,7 @@ define void @f5(i32 *%ptr, i64 %x) {
ret void
}
-; This is the smallest frame size for which the prepatory increment for
+; This is the smallest frame size for which the preparatory increment for
; "lmg %r14, %r15, ..." needs to be done using AGFI.
define void @f6(i32 *%ptr, i64 %x) {
; CHECK-LABEL: f6:
diff --git a/test/CodeGen/SystemZ/frame-11.ll b/test/CodeGen/SystemZ/frame-11.ll
index 5145b4d1c862..575a4335d5da 100644
--- a/test/CodeGen/SystemZ/frame-11.ll
+++ b/test/CodeGen/SystemZ/frame-11.ll
@@ -2,17 +2,24 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+declare i8 *@llvm.stacksave()
declare void @llvm.stackrestore(i8 *)
; we should use a frame pointer and tear down the frame based on %r11
; rather than %r15.
-define void @f1(i8 *%src) {
+define void @f1(i32 %count1, i32 %count2) {
; CHECK-LABEL: f1:
; CHECK: stmg %r11, %r15, 88(%r15)
+; CHECK: aghi %r15, -160
; CHECK: lgr %r11, %r15
-; CHECK: lgr %r15, %r2
-; CHECK: lmg %r11, %r15, 88(%r11)
+; CHECK: lgr %r15, %r{{[0-5]}}
+; CHECK: lmg %r11, %r15, 248(%r11)
; CHECK: br %r14
+ %src = call i8 *@llvm.stacksave()
+ %array1 = alloca i8, i32 %count1
+ store volatile i8 0, i8 *%array1
call void @llvm.stackrestore(i8 *%src)
+ %array2 = alloca i8, i32 %count2
+ store volatile i8 0, i8 *%array2
ret void
}
diff --git a/test/CodeGen/SystemZ/frame-13.ll b/test/CodeGen/SystemZ/frame-13.ll
index 393850fbf617..58dee1da58b5 100644
--- a/test/CodeGen/SystemZ/frame-13.ll
+++ b/test/CodeGen/SystemZ/frame-13.ll
@@ -243,8 +243,8 @@ define void @f10(i32 *%vptr) {
; And again with maximum register pressure. The only spill slots that the
; NOFP case needs are the emergency ones, so the offsets are the same as for f2.
-; However, the FP case uses %r11 as the frame pointer and must therefore
-; spill a second register. This leads to an extra displacement of 8.
+; The FP case needs to spill an extra register and is too dependent on
+; register allocation heuristics for a stable test.
define void @f11(i32 *%vptr) {
; CHECK-NOFP-LABEL: f11:
; CHECK-NOFP: stmg %r6, %r15,
@@ -254,15 +254,6 @@ define void @f11(i32 *%vptr) {
; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15)
; CHECK-NOFP: lmg %r6, %r15,
; CHECK-NOFP: br %r14
-;
-; CHECK-FP-LABEL: f11:
-; CHECK-FP: stmg %r6, %r15,
-; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r11)
-; CHECK-FP: lay [[REGISTER]], 4096(%r11)
-; CHECK-FP: mvhi 8([[REGISTER]]), 42
-; CHECK-FP: lg [[REGISTER]], [[OFFSET]](%r11)
-; CHECK-FP: lmg %r6, %r15,
-; CHECK-FP: br %r14
%i0 = load volatile i32 *%vptr
%i1 = load volatile i32 *%vptr
%i3 = load volatile i32 *%vptr
diff --git a/test/CodeGen/SystemZ/frame-14.ll b/test/CodeGen/SystemZ/frame-14.ll
index 3b48179c40b6..24169cf61f00 100644
--- a/test/CodeGen/SystemZ/frame-14.ll
+++ b/test/CodeGen/SystemZ/frame-14.ll
@@ -266,8 +266,8 @@ define void @f10(i32 *%vptr) {
; And again with maximum register pressure. The only spill slots that the
; NOFP case needs are the emergency ones, so the offsets are the same as for f4.
-; However, the FP case uses %r11 as the frame pointer and must therefore
-; spill a second register. This leads to an extra displacement of 8.
+; The FP case needs to spill an extra register and is too dependent on
+; register allocation heuristics for a stable test.
define void @f11(i32 *%vptr) {
; CHECK-NOFP-LABEL: f11:
; CHECK-NOFP: stmg %r6, %r15,
@@ -278,16 +278,6 @@ define void @f11(i32 *%vptr) {
; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15)
; CHECK-NOFP: lmg %r6, %r15,
; CHECK-NOFP: br %r14
-;
-; CHECK-FP-LABEL: f11:
-; CHECK-FP: stmg %r6, %r15,
-; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r11)
-; CHECK-FP: llilh [[REGISTER]], 8
-; CHECK-FP: agr [[REGISTER]], %r11
-; CHECK-FP: mvi 8([[REGISTER]]), 42
-; CHECK-FP: lg [[REGISTER]], [[OFFSET]](%r11)
-; CHECK-FP: lmg %r6, %r15,
-; CHECK-FP: br %r14
%i0 = load volatile i32 *%vptr
%i1 = load volatile i32 *%vptr
%i3 = load volatile i32 *%vptr
diff --git a/test/CodeGen/SystemZ/insert-06.ll b/test/CodeGen/SystemZ/insert-06.ll
index edcd0c5dccd2..81a9c8770708 100644
--- a/test/CodeGen/SystemZ/insert-06.ll
+++ b/test/CodeGen/SystemZ/insert-06.ll
@@ -178,3 +178,17 @@ define i64 @f14(i64 %a, i64 %b) {
%ext = sext i1 %res to i64
ret i64 %ext
}
+
+; Check another representation of f8.
+define i64 @f15(i64 %a, i8 *%src) {
+; CHECK-LABEL: f15:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: lb %r2, 0(%r3)
+; CHECK: br %r14
+ %byte = load i8 *%src
+ %b = sext i8 %byte to i64
+ %low = and i64 %b, 4294967295
+ %high = and i64 %a, -4294967296
+ %res = or i64 %high, %low
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/int-abs-01.ll b/test/CodeGen/SystemZ/int-abs-01.ll
index 40fb61192c6e..053c347c0b75 100644
--- a/test/CodeGen/SystemZ/int-abs-01.ll
+++ b/test/CodeGen/SystemZ/int-abs-01.ll
@@ -81,3 +81,67 @@ define i64 @f7(i64 %val) {
%res = select i1 %cmp, i64 %neg, i64 %val
ret i64 %res
}
+
+; Test another form of f6, which is that produced by InstCombine.
+define i64 @f8(i64 %val) {
+; CHECK-LABEL: f8:
+; CHECK: lpgfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp slt i64 %shl, 0
+ %abs = select i1 %cmp, i64 %neg, i64 %ashr
+ ret i64 %abs
+}
+
+; Try again with sle rather than slt.
+define i64 @f9(i64 %val) {
+; CHECK-LABEL: f9:
+; CHECK: lpgfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sle i64 %shl, 0
+ %abs = select i1 %cmp, i64 %neg, i64 %ashr
+ ret i64 %abs
+}
+
+; Repeat f8 with the operands reversed.
+define i64 @f10(i64 %val) {
+; CHECK-LABEL: f10:
+; CHECK: lpgfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sgt i64 %shl, 0
+ %abs = select i1 %cmp, i64 %ashr, i64 %neg
+ ret i64 %abs
+}
+
+; Try again with sge rather than sgt.
+define i64 @f11(i64 %val) {
+; CHECK-LABEL: f11:
+; CHECK: lpgfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sge i64 %shl, 0
+ %abs = select i1 %cmp, i64 %ashr, i64 %neg
+ ret i64 %abs
+}
+
+; Repeat f5 with the comparison on the unextended value.
+define i64 @f12(i32 %val) {
+; CHECK-LABEL: f12:
+; CHECK: lpgfr %r2, %r2
+; CHECK: br %r14
+ %ext = sext i32 %val to i64
+ %cmp = icmp slt i32 %val, 0
+ %neg = sub i64 0, %ext
+ %abs = select i1 %cmp, i64 %neg, i64 %ext
+ ret i64 %abs
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-05.ll b/test/CodeGen/SystemZ/int-cmp-05.ll
index f15b76bb87fe..0be43a3ef1bf 100644
--- a/test/CodeGen/SystemZ/int-cmp-05.ll
+++ b/test/CodeGen/SystemZ/int-cmp-05.ll
@@ -291,9 +291,22 @@ define i64 @f15(i32 *%ptr0) {
ret i64 %sel9
}
-; Check the comparison can be reversed if that allows CGF to be used.
-define double @f16(double %a, double %b, i64 %i2, i32 *%ptr) {
+; Check the comparison can be reversed if that allows CGFR to be used.
+define double @f16(double %a, double %b, i64 %i1, i32 %unext) {
; CHECK-LABEL: f16:
+; CHECK: cgfr %r2, %r3
+; CHECK-NEXT: jh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i2, %i1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Likewise CGF.
+define double @f17(double %a, double %b, i64 %i2, i32 *%ptr) {
+; CHECK-LABEL: f17:
; CHECK: cgf %r2, 0(%r3)
; CHECK-NEXT: jh {{\.L.*}}
; CHECK: ldr %f0, %f2
diff --git a/test/CodeGen/SystemZ/int-cmp-06.ll b/test/CodeGen/SystemZ/int-cmp-06.ll
index 8ab62e89ec39..82007e221766 100644
--- a/test/CodeGen/SystemZ/int-cmp-06.ll
+++ b/test/CodeGen/SystemZ/int-cmp-06.ll
@@ -341,9 +341,35 @@ define i64 @f19(i32 *%ptr0) {
ret i64 %sel9
}
-; Check the comparison can be reversed if that allows CLGF to be used.
-define double @f20(double %a, double %b, i64 %i2, i32 *%ptr) {
+; Check the comparison can be reversed if that allows CLGFR to be used.
+define double @f20(double %a, double %b, i64 %i1, i32 %unext) {
; CHECK-LABEL: f20:
+; CHECK: clgfr %r2, %r3
+; CHECK-NEXT: jh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i2, %i1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and again with the AND representation.
+define double @f21(double %a, double %b, i64 %i1, i64 %unext) {
+; CHECK-LABEL: f21:
+; CHECK: clgfr %r2, %r3
+; CHECK-NEXT: jh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = and i64 %unext, 4294967295
+ %cond = icmp ult i64 %i2, %i1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the comparison can be reversed if that allows CLGF to be used.
+define double @f22(double %a, double %b, i64 %i2, i32 *%ptr) {
+; CHECK-LABEL: f22:
; CHECK: clgf %r2, 0(%r3)
; CHECK-NEXT: jh {{\.L.*}}
; CHECK: ldr %f0, %f2
diff --git a/test/CodeGen/SystemZ/int-cmp-44.ll b/test/CodeGen/SystemZ/int-cmp-44.ll
index ae0133f10860..f065e6421295 100644
--- a/test/CodeGen/SystemZ/int-cmp-44.ll
+++ b/test/CodeGen/SystemZ/int-cmp-44.ll
@@ -1,4 +1,4 @@
-; Test that compares are ommitted if CC already has the right value
+; Test that compares are omitted if CC already has the right value
; (z10 version).
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
@@ -797,3 +797,93 @@ store:
exit:
ret i32 %val
}
+
+; Test f35 for in-register extensions.
+define i64 @f39(i64 %dummy, i64 %a, i64 *%dest) {
+; CHECK-LABEL: f39:
+; CHECK: ltgfr %r2, %r3
+; CHECK-NEXT: #APP
+; CHECK-NEXT: blah %r2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jh .L{{.*}}
+; CHECK: br %r14
+entry:
+ %val = trunc i64 %a to i32
+ %ext = sext i32 %val to i64
+ call void asm sideeffect "blah $0", "{r2}"(i64 %ext)
+ %cmp = icmp sgt i64 %ext, 0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i64 %ext, i64 *%dest
+ br label %exit
+
+exit:
+ ret i64 %ext
+}
+
+; ...and again with what InstCombine would produce for f40.
+define i64 @f40(i64 %dummy, i64 %a, i64 *%dest) {
+; CHECK-LABEL: f40:
+; CHECK: ltgfr %r2, %r3
+; CHECK-NEXT: #APP
+; CHECK-NEXT: blah %r2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: jh .L{{.*}}
+; CHECK: br %r14
+entry:
+ %shl = shl i64 %a, 32
+ %ext = ashr i64 %shl, 32
+ call void asm sideeffect "blah $0", "{r2}"(i64 %ext)
+ %cmp = icmp sgt i64 %shl, 0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i64 %ext, i64 *%dest
+ br label %exit
+
+exit:
+ ret i64 %ext
+}
+
+; Try a form of f7 in which the subtraction operands are compared directly.
+define i32 @f41(i32 %a, i32 %b, i32 *%dest) {
+; CHECK-LABEL: f41:
+; CHECK: s %r2, 0(%r4)
+; CHECK-NEXT: jne .L{{.*}}
+; CHECK: br %r14
+entry:
+ %cur = load i32 *%dest
+ %res = sub i32 %a, %cur
+ %cmp = icmp ne i32 %a, %cur
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i32 %b, i32 *%dest
+ br label %exit
+
+exit:
+ ret i32 %res
+}
+
+; A version of f32 that tests the unextended value.
+define i64 @f42(i64 %base, i64 %index, i64 *%dest) {
+; CHECK-LABEL: f42:
+; CHECK: ltgf %r2, 0({{%r2,%r3|%r3,%r2}})
+; CHECK-NEXT: jh .L{{.*}}
+; CHECK: br %r14
+entry:
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i32 *
+ %val = load i32 *%ptr
+ %res = sext i32 %val to i64
+ %cmp = icmp sgt i32 %val, 0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i64 %res, i64 *%dest
+ br label %exit
+
+exit:
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-45.ll b/test/CodeGen/SystemZ/int-cmp-45.ll
index 753a528e46c9..9c9c49c05df1 100644
--- a/test/CodeGen/SystemZ/int-cmp-45.ll
+++ b/test/CodeGen/SystemZ/int-cmp-45.ll
@@ -1,4 +1,4 @@
-; Test that compares are ommitted if CC already has the right value
+; Test that compares are omitted if CC already has the right value
; (z196 version).
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
diff --git a/test/CodeGen/SystemZ/int-cmp-47.ll b/test/CodeGen/SystemZ/int-cmp-47.ll
index 9ebcbfe525ba..038a25b2a6ed 100644
--- a/test/CodeGen/SystemZ/int-cmp-47.ll
+++ b/test/CodeGen/SystemZ/int-cmp-47.ll
@@ -232,3 +232,112 @@ store:
exit:
ret void
}
+
+; Check a case where TMHH can be used to implement a ult comparison.
+define void @f13(i64 %a) {
+; CHECK-LABEL: f13:
+; CHECK: tmhh %r2, 49152
+; CHECK: jno {{\.L.*}}
+; CHECK: br %r14
+entry:
+ %cmp = icmp ult i64 %a, 13835058055282163712
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i32 1, i32 *@g
+ br label %exit
+
+exit:
+ ret void
+}
+
+; And again with ule.
+define void @f14(i64 %a) {
+; CHECK-LABEL: f14:
+; CHECK: tmhh %r2, 49152
+; CHECK: jno {{\.L.*}}
+; CHECK: br %r14
+entry:
+ %cmp = icmp ule i64 %a, 13835058055282163711
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i32 1, i32 *@g
+ br label %exit
+
+exit:
+ ret void
+}
+
+; And again with ugt.
+define void @f15(i64 %a) {
+; CHECK-LABEL: f15:
+; CHECK: tmhh %r2, 49152
+; CHECK: jo {{\.L.*}}
+; CHECK: br %r14
+entry:
+ %cmp = icmp ugt i64 %a, 13835058055282163711
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i32 1, i32 *@g
+ br label %exit
+
+exit:
+ ret void
+}
+
+; And again with uge.
+define void @f16(i64 %a) {
+; CHECK-LABEL: f16:
+; CHECK: tmhh %r2, 49152
+; CHECK: jo {{\.L.*}}
+; CHECK: br %r14
+entry:
+ %cmp = icmp uge i64 %a, 13835058055282163712
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i32 1, i32 *@g
+ br label %exit
+
+exit:
+ ret void
+}
+
+; Decrease the constant from f13 to make TMHH invalid.
+define void @f17(i64 %a) {
+; CHECK-LABEL: f17:
+; CHECK-NOT: tmhh
+; CHECK: llihh {{%r[0-5]}}, 49151
+; CHECK-NOT: tmhh
+; CHECK: br %r14
+entry:
+ %cmp = icmp ult i64 %a, 13834776580305453056
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i32 1, i32 *@g
+ br label %exit
+
+exit:
+ ret void
+}
+
+; Check that we don't use TMHH just to test the top bit.
+define void @f18(i64 %a) {
+; CHECK-LABEL: f18:
+; CHECK-NOT: tmhh
+; CHECK: cgijhe %r2, 0,
+; CHECK: br %r14
+entry:
+ %cmp = icmp ult i64 %a, 9223372036854775808
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i32 1, i32 *@g
+ br label %exit
+
+exit:
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-neg-02.ll b/test/CodeGen/SystemZ/int-neg-02.ll
index e26194c162d4..7f3f6375129a 100644
--- a/test/CodeGen/SystemZ/int-neg-02.ll
+++ b/test/CodeGen/SystemZ/int-neg-02.ll
@@ -89,3 +89,136 @@ define i64 @f7(i64 %val) {
%res = sub i64 0, %abs
ret i64 %res
}
+
+; Test another form of f6, which is that produced by InstCombine.
+define i64 @f8(i64 %val) {
+; CHECK-LABEL: f8:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp slt i64 %shl, 0
+ %abs = select i1 %cmp, i64 %neg, i64 %ashr
+ %res = sub i64 0, %abs
+ ret i64 %res
+}
+
+; Try again with sle rather than slt.
+define i64 @f9(i64 %val) {
+; CHECK-LABEL: f9:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sle i64 %shl, 0
+ %abs = select i1 %cmp, i64 %neg, i64 %ashr
+ %res = sub i64 0, %abs
+ ret i64 %res
+}
+
+; Repeat f8 with the operands reversed.
+define i64 @f10(i64 %val) {
+; CHECK-LABEL: f10:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sgt i64 %shl, 0
+ %abs = select i1 %cmp, i64 %ashr, i64 %neg
+ %res = sub i64 0, %abs
+ ret i64 %res
+}
+
+; Try again with sge rather than sgt.
+define i64 @f11(i64 %val) {
+; CHECK-LABEL: f11:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sge i64 %shl, 0
+ %abs = select i1 %cmp, i64 %ashr, i64 %neg
+ %res = sub i64 0, %abs
+ ret i64 %res
+}
+
+; Repeat f8 with the negation coming from swapped operands.
+define i64 @f12(i64 %val) {
+; CHECK-LABEL: f12:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp slt i64 %shl, 0
+ %negabs = select i1 %cmp, i64 %ashr, i64 %neg
+ ret i64 %negabs
+}
+
+; Likewise f9.
+define i64 @f13(i64 %val) {
+; CHECK-LABEL: f13:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sle i64 %shl, 0
+ %negabs = select i1 %cmp, i64 %ashr, i64 %neg
+ ret i64 %negabs
+}
+
+; Likewise f10.
+define i64 @f14(i64 %val) {
+; CHECK-LABEL: f14:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sgt i64 %shl, 0
+ %negabs = select i1 %cmp, i64 %neg, i64 %ashr
+ ret i64 %negabs
+}
+
+; Likewise f11.
+define i64 @f15(i64 %val) {
+; CHECK-LABEL: f15:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %shl = shl i64 %val, 32
+ %ashr = ashr i64 %shl, 32
+ %neg = sub i64 0, %ashr
+ %cmp = icmp sge i64 %shl, 0
+ %negabs = select i1 %cmp, i64 %neg, i64 %ashr
+ ret i64 %negabs
+}
+
+; Repeat f5 with the comparison on the unextended value.
+define i64 @f16(i32 %val) {
+; CHECK-LABEL: f16:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %ext = sext i32 %val to i64
+ %cmp = icmp slt i32 %val, 0
+ %neg = sub i64 0, %ext
+ %abs = select i1 %cmp, i64 %neg, i64 %ext
+ %res = sub i64 0, %abs
+ ret i64 %res
+}
+
+; And again with the negation coming from swapped operands.
+define i64 @f17(i32 %val) {
+; CHECK-LABEL: f17:
+; CHECK: lngfr %r2, %r2
+; CHECK: br %r14
+ %ext = sext i32 %val to i64
+ %cmp = icmp slt i32 %val, 0
+ %neg = sub i64 0, %ext
+ %abs = select i1 %cmp, i64 %ext, i64 %neg
+ ret i64 %abs
+}
diff --git a/test/CodeGen/SystemZ/lit.local.cfg b/test/CodeGen/SystemZ/lit.local.cfg
index b12af09434be..5c02dd3614a4 100644
--- a/test/CodeGen/SystemZ/lit.local.cfg
+++ b/test/CodeGen/SystemZ/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'SystemZ' in targets:
+if not 'SystemZ' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/SystemZ/mature-mc-support.ll b/test/CodeGen/SystemZ/mature-mc-support.ll
new file mode 100644
index 000000000000..5520f55e1e29
--- /dev/null
+++ b/test/CodeGen/SystemZ/mature-mc-support.ll
@@ -0,0 +1,15 @@
+; Test that inline assembly is parsed by the MC layer when MC support is mature
+; (even when the output is assembly).
+; FIXME: SystemZ doesn't use the integrated assembler by default so we only test
+; that -filetype=obj tries to parse the assembly.
+
+; SKIP: not llc -march=systemz < %s > /dev/null 2> %t1
+; SKIP: FileCheck %s < %t1
+
+; RUN: not llc -march=systemz -filetype=obj < %s > /dev/null 2> %t2
+; RUN: FileCheck %s < %t2
+
+
+module asm " .this_directive_is_very_unlikely_to_exist"
+
+; CHECK: LLVM ERROR: Error parsing inline asm
diff --git a/test/CodeGen/SystemZ/risbg-01.ll b/test/CodeGen/SystemZ/risbg-01.ll
index a4d11fdae5b9..d75e8e4b11a6 100644
--- a/test/CodeGen/SystemZ/risbg-01.ll
+++ b/test/CodeGen/SystemZ/risbg-01.ll
@@ -269,12 +269,12 @@ define i64 @f23(i64 %foo) {
; mask and rotate.
define i32 @f24(i32 %foo) {
; CHECK-LABEL: f24:
-; CHECK: nilf %r2, 14
-; CHECK: rll %r2, %r2, 3
+; CHECK: nilf %r2, 254
+; CHECK: rll %r2, %r2, 29
; CHECK: br %r14
- %and = and i32 %foo, 14
- %parta = shl i32 %and, 3
- %partb = lshr i32 %and, 29
+ %and = and i32 %foo, 254
+ %parta = lshr i32 %and, 3
+ %partb = shl i32 %and, 29
%rotl = or i32 %parta, %partb
ret i32 %rotl
}
@@ -295,7 +295,6 @@ define i64 @f25(i64 %foo) {
; This again needs a separate mask and rotate.
define i32 @f26(i32 %foo) {
; CHECK-LABEL: f26:
-; CHECK: nill %r2, 65487
; CHECK: rll %r2, %r2, 5
; CHECK: br %r14
%and = and i32 %foo, -49
@@ -457,11 +456,22 @@ define i64 @f40(i64 %foo, i64 *%dest) {
ret i64 %and
}
+; Check a case where the result is zero-extended.
+define i64 @f41(i32 %a) {
+; CHECK-LABEL: f41
+; CHECK: risbg %r2, %r2, 36, 191, 62
+; CHECK: br %r14
+ %shl = shl i32 %a, 2
+ %shr = lshr i32 %shl, 4
+ %ext = zext i32 %shr to i64
+ ret i64 %ext
+}
+
; In this case the sign extension is converted to a pair of 32-bit shifts,
; which is then extended to 64 bits. We previously used the wrong bit size
; when testing whether the shifted-in bits of the shift right were significant.
-define i64 @f41(i1 %x) {
-; CHECK-LABEL: f41:
+define i64 @f42(i1 %x) {
+; CHECK-LABEL: f42:
; CHECK: sll %r2, 31
; CHECK: sra %r2, 31
; CHECK: llgcr %r2, %r2
diff --git a/test/CodeGen/SystemZ/rnsbg-01.ll b/test/CodeGen/SystemZ/rnsbg-01.ll
index 666aeb21e8d8..282810a78151 100644
--- a/test/CodeGen/SystemZ/rnsbg-01.ll
+++ b/test/CodeGen/SystemZ/rnsbg-01.ll
@@ -255,3 +255,14 @@ define i64 @f22(i64 %a, i64 %b) {
%and = and i64 %a, %rotlorb
ret i64 %and
}
+
+; Check the handling of zext and AND, which isn't suitable for RNSBG.
+define i64 @f23(i64 %a, i32 %b) {
+; CHECK-LABEL: f23:
+; CHECK-NOT: rnsbg
+; CHECK: br %r14
+ %add = add i32 %b, 1
+ %ext = zext i32 %add to i64
+ %and = and i64 %a, %ext
+ ret i64 %and
+}
diff --git a/test/CodeGen/SystemZ/rosbg-01.ll b/test/CodeGen/SystemZ/rosbg-01.ll
index 0abacccba14c..96ee870d42b4 100644
--- a/test/CodeGen/SystemZ/rosbg-01.ll
+++ b/test/CodeGen/SystemZ/rosbg-01.ll
@@ -108,3 +108,14 @@ define i64 @f11(i64 %a, i64 %b) {
%or = or i64 %a, %andb
ret i64 %or
}
+
+; Check the handling of zext and OR, which can use ROSBG.
+define i64 @f12(i64 %a, i32 %b) {
+; CHECK-LABEL: f12:
+; CHECK: rosbg %r2, %r3, 32, 63, 0
+; CHECK: br %r14
+ %add = add i32 %b, 1
+ %ext = zext i32 %add to i64
+ %or = or i64 %a, %ext
+ ret i64 %or
+}
diff --git a/test/CodeGen/SystemZ/rxsbg-01.ll b/test/CodeGen/SystemZ/rxsbg-01.ll
index 5491bff2ecdc..339fe2a289f0 100644
--- a/test/CodeGen/SystemZ/rxsbg-01.ll
+++ b/test/CodeGen/SystemZ/rxsbg-01.ll
@@ -110,3 +110,14 @@ define i64 @f11(i64 %a, i64 %b) {
%xor = xor i64 %a, %andb
ret i64 %xor
}
+
+; Check the handling of zext and XOR, which can use ROSBG.
+define i64 @f12(i64 %a, i32 %b) {
+; CHECK-LABEL: f12:
+; CHECK: rxsbg %r2, %r3, 32, 63, 0
+; CHECK: br %r14
+ %add = add i32 %b, 1
+ %ext = zext i32 %add to i64
+ %xor = xor i64 %a, %ext
+ ret i64 %xor
+}
diff --git a/test/CodeGen/SystemZ/selectcc-01.ll b/test/CodeGen/SystemZ/selectcc-01.ll
new file mode 100644
index 000000000000..a57444c831a5
--- /dev/null
+++ b/test/CodeGen/SystemZ/selectcc-01.ll
@@ -0,0 +1,178 @@
+; Test an i32 0/-1 SELECTCCC for every floating-point condition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test CC in { 0 }
+define i32 @f1(float %a, float %b) {
+; CHECK-LABEL: f1:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, -268435456
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp oeq float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 1 }
+define i32 @f2(float %a, float %b) {
+; CHECK-LABEL: f2:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 268435456
+; CHECK-NEXT: afi %r2, -268435456
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp olt float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 0, 1 }
+define i32 @f3(float %a, float %b) {
+; CHECK-LABEL: f3:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, -536870912
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ole float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 2 }
+define i32 @f4(float %a, float %b) {
+; CHECK-LABEL: f4:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 268435456
+; CHECK-NEXT: afi %r2, 1342177280
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ogt float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 0, 2 }
+define i32 @f5(float %a, float %b) {
+; CHECK-LABEL: f5:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 4294967295
+; CHECK-NEXT: sll %r2, 3
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp oge float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 1, 2 }
+define i32 @f6(float %a, float %b) {
+; CHECK-LABEL: f6:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, 268435456
+; CHECK-NEXT: sll %r2, 2
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp one float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 0, 1, 2 }
+define i32 @f7(float %a, float %b) {
+; CHECK-LABEL: f7:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, -805306368
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ord float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 3 }
+define i32 @f8(float %a, float %b) {
+; CHECK-LABEL: f8:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, 1342177280
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp uno float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 0, 3 }
+define i32 @f9(float %a, float %b) {
+; CHECK-LABEL: f9:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, -268435456
+; CHECK-NEXT: sll %r2, 2
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ueq float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 1, 3 }
+define i32 @f10(float %a, float %b) {
+; CHECK-LABEL: f10:
+; CHECK: ipm %r2
+; CHECK-NEXT: sll %r2, 3
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ult float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 0, 1, 3 }
+define i32 @f11(float %a, float %b) {
+; CHECK-LABEL: f11:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 268435456
+; CHECK-NEXT: afi %r2, -805306368
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ule float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 2, 3 }
+define i32 @f12(float %a, float %b) {
+; CHECK-LABEL: f12:
+; CHECK: ipm %r2
+; CHECK-NEXT: sll %r2, 2
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ugt float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 0, 2, 3 }
+define i32 @f13(float %a, float %b) {
+; CHECK-LABEL: f13:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 268435456
+; CHECK-NEXT: afi %r2, 1879048192
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp uge float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
+
+; Test CC in { 1, 2, 3 }
+define i32 @f14(float %a, float %b) {
+; CHECK-LABEL: f14:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, 1879048192
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp une float %a, %b
+ %res = select i1 %cond, i32 -1, i32 0
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/selectcc-02.ll b/test/CodeGen/SystemZ/selectcc-02.ll
new file mode 100644
index 000000000000..b1081a0621d6
--- /dev/null
+++ b/test/CodeGen/SystemZ/selectcc-02.ll
@@ -0,0 +1,178 @@
+; Test an i32 0/-1 SELECTCCC for every floating-point condition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test CC in { 1, 2, 3 }
+define i32 @f1(float %a, float %b) {
+; CHECK-LABEL: f1:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, 1879048192
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp oeq float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 0, 2, 3 }
+define i32 @f2(float %a, float %b) {
+; CHECK-LABEL: f2:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 268435456
+; CHECK-NEXT: afi %r2, 1879048192
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp olt float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 2, 3 }
+define i32 @f3(float %a, float %b) {
+; CHECK-LABEL: f3:
+; CHECK: ipm %r2
+; CHECK-NEXT: sll %r2, 2
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ole float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 0, 1, 3 }
+define i32 @f4(float %a, float %b) {
+; CHECK-LABEL: f4:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 268435456
+; CHECK-NEXT: afi %r2, -805306368
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ogt float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 1, 3 }
+define i32 @f5(float %a, float %b) {
+; CHECK-LABEL: f5:
+; CHECK: ipm %r2
+; CHECK-NEXT: sll %r2, 3
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp oge float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 0, 3 }
+define i32 @f6(float %a, float %b) {
+; CHECK-LABEL: f6:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, -268435456
+; CHECK-NEXT: sll %r2, 2
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp one float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 3 }
+define i32 @f7(float %a, float %b) {
+; CHECK-LABEL: f7:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, 1342177280
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ord float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 0, 1, 2 }
+define i32 @f8(float %a, float %b) {
+; CHECK-LABEL: f8:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, -805306368
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp uno float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 1, 2 }
+define i32 @f9(float %a, float %b) {
+; CHECK-LABEL: f9:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, 268435456
+; CHECK-NEXT: sll %r2, 2
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ueq float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 0, 2 }
+define i32 @f10(float %a, float %b) {
+; CHECK-LABEL: f10:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 4294967295
+; CHECK-NEXT: sll %r2, 3
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ult float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 2 }
+define i32 @f11(float %a, float %b) {
+; CHECK-LABEL: f11:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 268435456
+; CHECK-NEXT: afi %r2, 1342177280
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ule float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 0, 1 }
+define i32 @f12(float %a, float %b) {
+; CHECK-LABEL: f12:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, -536870912
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp ugt float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 1 }
+define i32 @f13(float %a, float %b) {
+; CHECK-LABEL: f13:
+; CHECK: ipm %r2
+; CHECK-NEXT: xilf %r2, 268435456
+; CHECK-NEXT: afi %r2, -268435456
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp uge float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
+
+; Test CC in { 0 }
+define i32 @f14(float %a, float %b) {
+; CHECK-LABEL: f14:
+; CHECK: ipm %r2
+; CHECK-NEXT: afi %r2, -268435456
+; CHECK-NEXT: sra %r2, 31
+; CHECK: br %r14
+ %cond = fcmp une float %a, %b
+ %res = select i1 %cond, i32 0, i32 -1
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/selectcc-03.ll b/test/CodeGen/SystemZ/selectcc-03.ll
new file mode 100644
index 000000000000..cafb4a2f1842
--- /dev/null
+++ b/test/CodeGen/SystemZ/selectcc-03.ll
@@ -0,0 +1,187 @@
+; Test an i64 0/-1 SELECTCCC for every floating-point condition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test CC in { 0 }
+define i64 @f1(float %a, float %b) {
+; CHECK-LABEL: f1:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: afi [[REG]], -268435456
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp oeq float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 1 }
+define i64 @f2(float %a, float %b) {
+; CHECK-LABEL: f2:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: xilf [[REG]], 268435456
+; CHECK-NEXT: afi [[REG]], -268435456
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp olt float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 0, 1 }
+define i64 @f3(float %a, float %b) {
+; CHECK-LABEL: f3:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: afi [[REG]], -536870912
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp ole float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 2 }
+define i64 @f4(float %a, float %b) {
+; CHECK-LABEL: f4:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: xilf [[REG]], 268435456
+; CHECK-NEXT: afi [[REG]], 1342177280
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp ogt float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 0, 2 }
+define i64 @f5(float %a, float %b) {
+; CHECK-LABEL: f5:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: xilf [[REG]], 4294967295
+; CHECK-NEXT: sllg [[REG]], [[REG]], 35
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp oge float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 1, 2 }
+define i64 @f6(float %a, float %b) {
+; CHECK-LABEL: f6:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: afi [[REG]], 268435456
+; CHECK-NEXT: sllg [[REG]], [[REG]], 34
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp one float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 0, 1, 2 }
+define i64 @f7(float %a, float %b) {
+; CHECK-LABEL: f7:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: afi [[REG]], -805306368
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp ord float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 3 }
+define i64 @f8(float %a, float %b) {
+; CHECK-LABEL: f8:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: afi [[REG]], 1342177280
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp uno float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 0, 3 }
+define i64 @f9(float %a, float %b) {
+; CHECK-LABEL: f9:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: afi [[REG]], -268435456
+; CHECK-NEXT: sllg [[REG]], [[REG]], 34
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp ueq float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 1, 3 }
+define i64 @f10(float %a, float %b) {
+; CHECK-LABEL: f10:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: sllg [[REG]], [[REG]], 35
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp ult float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 0, 1, 3 }
+define i64 @f11(float %a, float %b) {
+; CHECK-LABEL: f11:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: xilf [[REG]], 268435456
+; CHECK-NEXT: afi [[REG]], -805306368
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp ule float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 2, 3 }
+define i64 @f12(float %a, float %b) {
+; CHECK-LABEL: f12:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: sllg [[REG]], [[REG]], 34
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp ugt float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 0, 2, 3 }
+define i64 @f13(float %a, float %b) {
+; CHECK-LABEL: f13:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: xilf [[REG]], 268435456
+; CHECK-NEXT: afi [[REG]], 1879048192
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp uge float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
+
+; Test CC in { 1, 2, 3 }
+define i64 @f14(float %a, float %b) {
+; CHECK-LABEL: f14:
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK-NEXT: afi [[REG]], 1879048192
+; CHECK-NEXT: sllg [[REG]], [[REG]], 32
+; CHECK-NEXT: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %cond = fcmp une float %a, %b
+ %res = select i1 %cond, i64 -1, i64 0
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/serialize-01.ll b/test/CodeGen/SystemZ/serialize-01.ll
new file mode 100644
index 000000000000..7801fac8d472
--- /dev/null
+++ b/test/CodeGen/SystemZ/serialize-01.ll
@@ -0,0 +1,21 @@
+; Test serialization instructions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | \
+; RUN: FileCheck %s -check-prefix=CHECK-FULL
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | \
+; RUN: FileCheck %s -check-prefix=CHECK-FAST
+
+; Check that volatile loads produce a serialisation.
+define i32 @f1(i32 *%src) {
+; CHECK-FULL-LABEL: f1:
+; CHECK-FULL: bcr 15, %r0
+; CHECK-FULL: l %r2, 0(%r2)
+; CHECK-FULL: br %r14
+;
+; CHECK-FAST-LABEL: f1:
+; CHECK-FAST: bcr 14, %r0
+; CHECK-FAST: l %r2, 0(%r2)
+; CHECK-FAST: br %r14
+ %val = load volatile i32 *%src
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/shift-04.ll b/test/CodeGen/SystemZ/shift-04.ll
index 04b39d002c5d..de2d74f27fa3 100644
--- a/test/CodeGen/SystemZ/shift-04.ll
+++ b/test/CodeGen/SystemZ/shift-04.ll
@@ -187,3 +187,104 @@ define i32 @f14(i32 %a, i32 *%ptr) {
%or = or i32 %parta, %partb
ret i32 %or
}
+
+; Check another form of f5, which is the one produced by running f5 through
+; instcombine.
+define i32 @f15(i32 %a, i32 %amt) {
+; CHECK-LABEL: f15:
+; CHECK: rll %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %sub = sub i32 22, %amt
+ %parta = shl i32 %a, %add
+ %partb = lshr i32 %a, %sub
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Likewise for f7.
+define i32 @f16(i32 %a, i64 %amt) {
+; CHECK-LABEL: f16:
+; CHECK: rll %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %sub = sub i64 22, %amt
+ %addtrunc = trunc i64 %add to i32
+ %subtrunc = trunc i64 %sub to i32
+ %parta = shl i32 %a, %addtrunc
+ %partb = lshr i32 %a, %subtrunc
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check cases where (-x & 31) is used instead of 32 - x.
+define i32 @f17(i32 %x, i32 %y) {
+; CHECK-LABEL: f17:
+; CHECK: rll %r2, %r2, 0(%r3)
+; CHECK: br %r14
+entry:
+ %shl = shl i32 %x, %y
+ %sub = sub i32 0, %y
+ %and = and i32 %sub, 31
+ %shr = lshr i32 %x, %and
+ %or = or i32 %shr, %shl
+ ret i32 %or
+}
+
+; ...and again with ((32 - x) & 31).
+define i32 @f18(i32 %x, i32 %y) {
+; CHECK-LABEL: f18:
+; CHECK: rll %r2, %r2, 0(%r3)
+; CHECK: br %r14
+entry:
+ %shl = shl i32 %x, %y
+ %sub = sub i32 32, %y
+ %and = and i32 %sub, 31
+ %shr = lshr i32 %x, %and
+ %or = or i32 %shr, %shl
+ ret i32 %or
+}
+
+; This is not a rotation.
+define i32 @f19(i32 %x, i32 %y) {
+; CHECK-LABEL: f19:
+; CHECK-NOT: rll
+; CHECK: br %r14
+entry:
+ %shl = shl i32 %x, %y
+ %sub = sub i32 16, %y
+ %and = and i32 %sub, 31
+ %shr = lshr i32 %x, %and
+ %or = or i32 %shr, %shl
+ ret i32 %or
+}
+
+; Repeat f17 with an addition on the shift count.
+define i32 @f20(i32 %x, i32 %y) {
+; CHECK-LABEL: f20:
+; CHECK: rll %r2, %r2, 199(%r3)
+; CHECK: br %r14
+entry:
+ %add = add i32 %y, 199
+ %shl = shl i32 %x, %add
+ %sub = sub i32 0, %add
+ %and = and i32 %sub, 31
+ %shr = lshr i32 %x, %and
+ %or = or i32 %shr, %shl
+ ret i32 %or
+}
+
+; ...and again with the InstCombine version.
+define i32 @f21(i32 %x, i32 %y) {
+; CHECK-LABEL: f21:
+; CHECK: rll %r2, %r2, 199(%r3)
+; CHECK: br %r14
+entry:
+ %add = add i32 %y, 199
+ %shl = shl i32 %x, %add
+ %sub = sub i32 -199, %y
+ %and = and i32 %sub, 31
+ %shr = lshr i32 %x, %and
+ %or = or i32 %shr, %shl
+ ret i32 %or
+}
diff --git a/test/CodeGen/SystemZ/shift-10.ll b/test/CodeGen/SystemZ/shift-10.ll
index 46ed2180dfd4..bf2f0f1776ee 100644
--- a/test/CodeGen/SystemZ/shift-10.ll
+++ b/test/CodeGen/SystemZ/shift-10.ll
@@ -14,13 +14,14 @@ define i64 @f1(i32 %a) {
ret i64 %ext
}
-; ...and again with the highest shift count.
+; ...and again with the highest shift count that doesn't reduce to an
+; ashr/sext pair.
define i64 @f2(i32 %a) {
; CHECK-LABEL: f2:
-; CHECK: sllg [[REG:%r[0-5]]], %r2, 32
+; CHECK: sllg [[REG:%r[0-5]]], %r2, 33
; CHECK: srag %r2, [[REG]], 63
; CHECK: br %r14
- %shr = lshr i32 %a, 31
+ %shr = lshr i32 %a, 30
%trunc = trunc i32 %shr to i1
%ext = sext i1 %trunc to i64
ret i64 %ext
@@ -76,3 +77,15 @@ define i64 @f6(i64 %a) {
%and = and i64 %shr, 256
ret i64 %and
}
+
+; Test another form of f1.
+define i64 @f7(i32 %a) {
+; CHECK-LABEL: f7:
+; CHECK: sllg [[REG:%r[0-5]]], %r2, 62
+; CHECK: srag %r2, [[REG]], 63
+; CHECK: br %r14
+ %1 = shl i32 %a, 30
+ %sext = ashr i32 %1, 31
+ %ext = sext i32 %sext to i64
+ ret i64 %ext
+}
diff --git a/test/CodeGen/SystemZ/spill-01.ll b/test/CodeGen/SystemZ/spill-01.ll
index ca64a88f2a0d..c1f780c55d3c 100644
--- a/test/CodeGen/SystemZ/spill-01.ll
+++ b/test/CodeGen/SystemZ/spill-01.ll
@@ -400,6 +400,7 @@ define void @f10() {
; CHECK: stgrl [[REG]], h8
; CHECK: br %r14
entry:
+ %val8 = load volatile i64 *@h8
%val0 = load volatile i64 *@h0
%val1 = load volatile i64 *@h1
%val2 = load volatile i64 *@h2
@@ -408,7 +409,6 @@ entry:
%val5 = load volatile i64 *@h5
%val6 = load volatile i64 *@h6
%val7 = load volatile i64 *@h7
- %val8 = load volatile i64 *@h8
%val9 = load volatile i64 *@h9
call void @foo()
diff --git a/test/CodeGen/Thumb/2009-06-18-ThumbCommuteMul.ll b/test/CodeGen/Thumb/2009-06-18-ThumbCommuteMul.ll
index 5c883b3930dc..ca6df7cdab31 100644
--- a/test/CodeGen/Thumb/2009-06-18-ThumbCommuteMul.ll
+++ b/test/CodeGen/Thumb/2009-06-18-ThumbCommuteMul.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb | grep r0 | count 1
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
define i32 @a(i32 %x, i32 %y) nounwind readnone {
entry:
@@ -6,3 +6,5 @@ entry:
ret i32 %mul
}
+; CHECK: r0
+
diff --git a/test/CodeGen/Thumb/2010-06-18-SibCallCrash.ll b/test/CodeGen/Thumb/2010-06-18-SibCallCrash.ll
index ad8b064bf4bd..e1efd3b7238f 100644
--- a/test/CodeGen/Thumb/2010-06-18-SibCallCrash.ll
+++ b/test/CodeGen/Thumb/2010-06-18-SibCallCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=thumb < %s
+; RUN: llc -mtriple=thumb-eabi %s -o /dev/null
; rdar://8104457
define arm_apcscc void @t(i32* %m) nounwind {
diff --git a/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll b/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
index b87bf24993a1..ffc9584199cf 100644
--- a/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
+++ b/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
@@ -151,5 +151,5 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!98 = metadata !{i32 52, i32 0, metadata !1, null}
!101 = metadata !{metadata !"ggEdgeDiscrepancy.cc", metadata !"/Volumes/Home/grosbaj/sources/llvm-externals/speccpu2000/benchspec/CINT2000/252.eon/src"}
!102 = metadata !{i32 0}
-!103 = metadata !{metadata !3}
+!103 = metadata !{metadata !3, metadata !77}
!104 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll b/test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll
new file mode 100644
index 000000000000..ae663697ebeb
--- /dev/null
+++ b/test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mtriple=thumbv6m-eabi -o - | FileCheck %s
+; XFAIL: *
+
+define void @foo(i32* %A) #0 {
+entry:
+; CHECK-LABEL: foo:
+; CHECK: push {r7, lr}
+; CHECK: ldm [[REG0:r[0-9]]]!,
+; CHECK-NEXT: subs [[REG0]]
+; CHECK-NEXT: bl
+ %0 = load i32* %A, align 4
+ %arrayidx1 = getelementptr inbounds i32* %A, i32 1
+ %1 = load i32* %arrayidx1, align 4
+ tail call void @bar(i32* %A, i32 %0, i32 %1) #2
+ ret void
+}
+
+declare void @bar(i32*, i32, i32) #1
diff --git a/test/CodeGen/Thumb/DbgValueOtherTargets.test b/test/CodeGen/Thumb/DbgValueOtherTargets.test
index afb18a43be47..557892b06233 100644
--- a/test/CodeGen/Thumb/DbgValueOtherTargets.test
+++ b/test/CodeGen/Thumb/DbgValueOtherTargets.test
@@ -1 +1 @@
-RUN: llc -O0 -march=thumb -asm-verbose < %S/../Inputs/DbgValueOtherTargets.ll | FileCheck %S/../Inputs/DbgValueOtherTargets.ll
+RUN: llc -O0 -mtriple=thumb-eabi -asm-verbose %S/../Inputs/DbgValueOtherTargets.ll -o - | FileCheck %S/../Inputs/DbgValueOtherTargets.ll
diff --git a/test/CodeGen/Thumb/barrier.ll b/test/CodeGen/Thumb/barrier.ll
index 1c27fa09884f..92d9bb2097ff 100644
--- a/test/CodeGen/Thumb/barrier.ll
+++ b/test/CodeGen/Thumb/barrier.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin | FileCheck %s -check-prefix=V6
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=-db | FileCheck %s -check-prefix=V6
-; RUN: llc < %s -march=thumb -mcpu=cortex-m0 | FileCheck %s -check-prefix=V6M
+; RUN: llc -mtriple=thumbv6-apple-darwin %s -o - | FileCheck %s -check-prefix=V6
+; RUN: llc -mtriple=thumbv7-apple-darwin -mattr=-db %s -o - | FileCheck %s -check-prefix=V6
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m0 %s -o - | FileCheck %s -check-prefix=V6M
define void @t1() {
; V6-LABEL: t1:
diff --git a/test/CodeGen/Thumb/cortex-m0-unaligned-access.ll b/test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
new file mode 100644
index 000000000000..c4403fe7efe6
--- /dev/null
+++ b/test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=thumbv6m-apple-unknown-macho < %s | FileCheck --check-prefix=V6M %s
+; RUN: llc -mtriple=thumbv7m-apple-unknown-macho < %s | FileCheck --check-prefix=V7M %s
+
+define i32 @split_load(i32* %p) nounwind {
+; V6M-LABEL: split_load
+; V6M: ldrh
+; V6M: ldrh
+; V7M-LABEL: split_load
+; V7M-NOT: ldrh
+; V7M: bx lr
+ %val = load i32* %p, align 2
+ ret i32 %val
+}
diff --git a/test/CodeGen/Thumb/fastcc.ll b/test/CodeGen/Thumb/fastcc.ll
new file mode 100644
index 000000000000..98ff684d2ec6
--- /dev/null
+++ b/test/CodeGen/Thumb/fastcc.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -mcpu=arm926ej-s -mattr=+vfp2
+
+; This is a regression test, to ensure that fastcc functions are correctly
+; handled when compiling for a processor which has a floating-point unit which
+; is not accessible from the selected instruction set.
+
+target datalayout = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv5e-none-linux-gnueabi"
+
+; Function Attrs: optsize
+define fastcc void @_foo(float %walpha) #0 {
+entry:
+ br label %for.body13
+
+for.body13: ; preds = %for.body13, %entry
+ br i1 undef, label %for.end182.critedge, label %for.body13
+
+for.end182.critedge: ; preds = %for.body13
+ %conv183 = fpext float %walpha to double
+ %mul184 = fmul double %conv183, 8.200000e-01
+ %conv185 = fptrunc double %mul184 to float
+ %conv188 = fpext float %conv185 to double
+ %mul189 = fmul double %conv188, 6.000000e-01
+ %conv190 = fptrunc double %mul189 to float
+ br label %for.body193
+
+for.body193: ; preds = %for.body193, %for.end182.critedge
+ %mul195 = fmul float %conv190, undef
+ br label %for.body193
+}
+
+attributes #0 = { optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 "}
diff --git a/test/CodeGen/Thumb/fpconv.ll b/test/CodeGen/Thumb/fpconv.ll
index 7da36ddf58ed..0ade798d1471 100644
--- a/test/CodeGen/Thumb/fpconv.ll
+++ b/test/CodeGen/Thumb/fpconv.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb
+; RUN: llc -mtriple=thumb-eabi %s -o /dev/null
define float @f1(double %x) {
entry:
diff --git a/test/CodeGen/Thumb/fpow.ll b/test/CodeGen/Thumb/fpow.ll
index be3dc0b3c1f8..18b1c91098f5 100644
--- a/test/CodeGen/Thumb/fpow.ll
+++ b/test/CodeGen/Thumb/fpow.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb
+; RUN: llc -mtriple=thumb-eabi %s -o /dev/null
define double @t(double %x, double %y) nounwind optsize {
entry:
diff --git a/test/CodeGen/Thumb/inlineasm-imm-thumb.ll b/test/CodeGen/Thumb/inlineasm-imm-thumb.ll
index 5c8a52af59e4..4e4f8fad25f8 100644
--- a/test/CodeGen/Thumb/inlineasm-imm-thumb.ll
+++ b/test/CodeGen/Thumb/inlineasm-imm-thumb.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb
+; RUN: llc -mtriple=thumb-eabi -no-integrated-as %s -o /dev/null
; Test Thumb-mode "I" constraint, for ADD immediate.
define i32 @testI(i32 %x) {
diff --git a/test/CodeGen/Thumb/inlineasm-thumb.ll b/test/CodeGen/Thumb/inlineasm-thumb.ll
index f2683c8dd8cc..2547ce8d6beb 100644
--- a/test/CodeGen/Thumb/inlineasm-thumb.ll
+++ b/test/CodeGen/Thumb/inlineasm-thumb.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=thumb | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
+
define i32 @t1(i32 %x, i32 %y) nounwind {
entry:
; CHECK: mov r0, r12
diff --git a/test/CodeGen/Thumb/ispositive.ll b/test/CodeGen/Thumb/ispositive.ll
index 7b2822707745..8d396878932b 100644
--- a/test/CodeGen/Thumb/ispositive.ll
+++ b/test/CodeGen/Thumb/ispositive.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
define i32 @test1(i32 %X) {
entry:
diff --git a/test/CodeGen/Thumb/ldr_ext.ll b/test/CodeGen/Thumb/ldr_ext.ll
index 9a28124b84ce..2d25af35b513 100644
--- a/test/CodeGen/Thumb/ldr_ext.ll
+++ b/test/CodeGen/Thumb/ldr_ext.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=thumb | FileCheck %s -check-prefix=V5
-; RUN: llc < %s -march=thumb -mattr=+v6 | FileCheck %s -check-prefix=V6
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s -check-prefix=V5
+; RUN: llc -mtriple=thumb-eabi -mattr=+v6 %s -o - | FileCheck %s -check-prefix=V6
; rdar://7176514
diff --git a/test/CodeGen/Thumb/ldr_frame.ll b/test/CodeGen/Thumb/ldr_frame.ll
index 6c586385b1bc..0e879d7379a8 100644
--- a/test/CodeGen/Thumb/ldr_frame.ll
+++ b/test/CodeGen/Thumb/ldr_frame.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
define i32 @f1() {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb/lit.local.cfg b/test/CodeGen/Thumb/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/CodeGen/Thumb/lit.local.cfg
+++ b/test/CodeGen/Thumb/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Thumb/long-setcc.ll b/test/CodeGen/Thumb/long-setcc.ll
index 8f2d98fc43c9..3460edb96f0d 100644
--- a/test/CodeGen/Thumb/long-setcc.ll
+++ b/test/CodeGen/Thumb/long-setcc.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=thumb | grep cmp | count 1
-
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
define i1 @t1(i64 %x) {
%B = icmp slt i64 %x, 0
@@ -15,3 +14,9 @@ define i1 @t3(i32 %x) {
%tmp = icmp ugt i32 %x, -1
ret i1 %tmp
}
+
+; CHECK: cmp
+; CHECK-NOT: cmp
+
+
+
diff --git a/test/CodeGen/Thumb/long.ll b/test/CodeGen/Thumb/long.ll
index 197e19e31b49..2449e5ad6777 100644
--- a/test/CodeGen/Thumb/long.ll
+++ b/test/CodeGen/Thumb/long.ll
@@ -1,10 +1,5 @@
-; RUN: llc < %s -march=thumb | \
-; RUN: grep mvn | count 1
-; RUN: llc < %s -march=thumb | \
-; RUN: grep adc | count 1
-; RUN: llc < %s -march=thumb | \
-; RUN: grep sbc | count 1
-; RUN: llc < %s -mtriple=thumb-apple-darwin | grep __muldi3
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-apple-darwin %s -o - | FileCheck %s -check-prefix CHECK-DARWIN
define i64 @f1() {
entry:
@@ -74,3 +69,14 @@ entry:
ret i64 %retval
}
+; CHECK: mvn
+; CHECK-NOT: mvn
+
+; CHECK: adc
+; CHECK-NOT: adc
+
+; CHECK: sbc
+; CHECK-NOT: sbc
+
+; CHECK-DARWIN: __muldi3
+
diff --git a/test/CodeGen/Thumb/long_shift.ll b/test/CodeGen/Thumb/long_shift.ll
index 24317141fca6..6aa1afd38988 100644
--- a/test/CodeGen/Thumb/long_shift.ll
+++ b/test/CodeGen/Thumb/long_shift.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb
+; RUN: llc -mtriple=thumb-eabi %s -o /dev/null
define i64 @f0(i64 %A, i64 %B) {
%tmp = bitcast i64 %A to i64
diff --git a/test/CodeGen/Thumb/mature-mc-support.ll b/test/CodeGen/Thumb/mature-mc-support.ll
new file mode 100644
index 000000000000..d7f8ae6c6c4d
--- /dev/null
+++ b/test/CodeGen/Thumb/mature-mc-support.ll
@@ -0,0 +1,12 @@
+; Test that inline assembly is parsed by the MC layer when MC support is mature
+; (even when the output is assembly).
+
+; RUN: not llc -mtriple=thumb-pc-linux < %s > /dev/null 2> %t1
+; RUN: FileCheck %s < %t1
+
+; RUN: not llc -mtriple=thumb-pc-linux -filetype=obj < %s > /dev/null 2> %t2
+; RUN: FileCheck %s < %t2
+
+module asm " .this_directive_is_very_unlikely_to_exist"
+
+; CHECK: LLVM ERROR: Error parsing inline asm
diff --git a/test/CodeGen/Thumb/mul.ll b/test/CodeGen/Thumb/mul.ll
index c1a2fb29477d..13a2cfb4c242 100644
--- a/test/CodeGen/Thumb/mul.ll
+++ b/test/CodeGen/Thumb/mul.ll
@@ -1,22 +1,32 @@
-; RUN: llc < %s -march=thumb | grep mul | count 3
-; RUN: llc < %s -march=thumb | grep lsl | count 1
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
define i32 @f1(i32 %u) {
%tmp = mul i32 %u, %u
ret i32 %tmp
}
+; CHECK: mul{{s?}}
+
define i32 @f2(i32 %u, i32 %v) {
%tmp = mul i32 %u, %v
ret i32 %tmp
}
+; CHECK: mul{{s?}}
+
define i32 @f3(i32 %u) {
%tmp = mul i32 %u, 5
ret i32 %tmp
}
+; CHECK: mul{{s?}}
+
define i32 @f4(i32 %u) {
%tmp = mul i32 %u, 4
ret i32 %tmp
}
+
+; CHECK: lsl
+; CHECK-NOT: mul{{s?}}
+; CHECK-NOT: lsl
+
diff --git a/test/CodeGen/Thumb/rev.ll b/test/CodeGen/Thumb/rev.ll
index dcba00e49663..3e947022e601 100644
--- a/test/CodeGen/Thumb/rev.ll
+++ b/test/CodeGen/Thumb/rev.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+v6 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mattr=+v6 %s -o - | FileCheck %s
define i32 @test1(i32 %X) nounwind {
; CHECK: test1
diff --git a/test/CodeGen/Thumb/segmented-stacks-dynamic.ll b/test/CodeGen/Thumb/segmented-stacks-dynamic.ll
new file mode 100644
index 000000000000..5d51f4052c1d
--- /dev/null
+++ b/test/CodeGen/Thumb/segmented-stacks-dynamic.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s -mtriple=thumb-linux-unknown-gnueabi -verify-machineinstrs | FileCheck %s -check-prefix=Thumb-linux
+; RUN: llc < %s -mtriple=thumb-linux-androideabi -verify-machineinstrs | FileCheck %s -check-prefix=Thumb-android
+; RUN: llc < %s -mtriple=thumb-linux-unknown-gnueabi -filetype=obj
+; RUN: llc < %s -mtriple=thumb-linux-androideabi -filetype=obj
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+define i32 @test_basic(i32 %l) #0 {
+ %mem = alloca i32, i32 %l
+ call void @dummy_use (i32* %mem, i32 %l)
+ %terminate = icmp eq i32 %l, 0
+ br i1 %terminate, label %true, label %false
+
+true:
+ ret i32 0
+
+false:
+ %newlen = sub i32 %l, 1
+ %retvalue = call i32 @test_basic(i32 %newlen)
+ ret i32 %retvalue
+
+; Thumb-linux: test_basic:
+
+; Thumb-linux: push {r4, r5}
+; Thumb-linux: mov r5, sp
+; Thumb-linux-NEXT: ldr r4, .LCPI0_0
+; Thumb-linux-NEXT: ldr r4, [r4]
+; Thumb-linux-NEXT: cmp r4, r5
+; Thumb-linux-NEXT: blo .LBB0_2
+
+; Thumb-linux: mov r4, #16
+; Thumb-linux-NEXT: mov r5, #0
+; Thumb-linux-NEXT: push {lr}
+; Thumb-linux-NEXT: bl __morestack
+; Thumb-linux-NEXT: pop {r4}
+; Thumb-linux-NEXT: mov lr, r4
+; Thumb-linux-NEXT: pop {r4, r5}
+; Thumb-linux-NEXT: bx lr
+
+; Thumb-linux: pop {r4, r5}
+
+; Thumb-android: test_basic:
+
+; Thumb-android: push {r4, r5}
+; Thumb-android: mov r5, sp
+; Thumb-android-NEXT: ldr r4, .LCPI0_0
+; Thumb-android-NEXT: ldr r4, [r4]
+; Thumb-android-NEXT: cmp r4, r5
+; Thumb-android-NEXT: blo .LBB0_2
+
+; Thumb-android: mov r4, #16
+; Thumb-android-NEXT: mov r5, #0
+; Thumb-android-NEXT: push {lr}
+; Thumb-android-NEXT: bl __morestack
+; Thumb-android-NEXT: pop {r4}
+; Thumb-android-NEXT: mov lr, r4
+; Thumb-android-NEXT: pop {r4, r5}
+; Thumb-android-NEXT: bx lr
+
+; Thumb-android: pop {r4, r5}
+
+}
+
+attributes #0 = { "split-stack" }
diff --git a/test/CodeGen/Thumb/segmented-stacks.ll b/test/CodeGen/Thumb/segmented-stacks.ll
new file mode 100644
index 000000000000..d6e25c7792e8
--- /dev/null
+++ b/test/CodeGen/Thumb/segmented-stacks.ll
@@ -0,0 +1,261 @@
+; RUN: llc < %s -mtriple=thumb-linux-androideabi -verify-machineinstrs | FileCheck %s -check-prefix=Thumb-android
+; RUN: llc < %s -mtriple=thumb-linux-unknown-gnueabi -verify-machineinstrs | FileCheck %s -check-prefix=Thumb-linux
+; RUN: llc < %s -mtriple=thumb-linux-androideabi -filetype=obj
+; RUN: llc < %s -mtriple=thumb-linux-unknown-gnueabi -filetype=obj
+
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+define void @test_basic() #0 {
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
+ ret void
+
+; Thumb-android: test_basic:
+
+; Thumb-android: push {r4, r5}
+; Thumb-android-NEXT: mov r5, sp
+; Thumb-android-NEXT: ldr r4, .LCPI0_0
+; Thumb-android-NEXT: ldr r4, [r4]
+; Thumb-android-NEXT: cmp r4, r5
+; Thumb-android-NEXT: blo .LBB0_2
+
+; Thumb-android: mov r4, #48
+; Thumb-android-NEXT: mov r5, #0
+; Thumb-android-NEXT: push {lr}
+; Thumb-android-NEXT: bl __morestack
+; Thumb-android-NEXT: pop {r4}
+; Thumb-android-NEXT: mov lr, r4
+; Thumb-android-NEXT: pop {r4, r5}
+; Thumb-android-NEXT: bx lr
+
+; Thumb-android: pop {r4, r5}
+
+; Thumb-linux: test_basic:
+
+; Thumb-linux: push {r4, r5}
+; Thumb-linux-NEXT: mov r5, sp
+; Thumb-linux-NEXT: ldr r4, .LCPI0_0
+; Thumb-linux-NEXT: ldr r4, [r4]
+; Thumb-linux-NEXT: cmp r4, r5
+; Thumb-linux-NEXT: blo .LBB0_2
+
+; Thumb-linux: mov r4, #48
+; Thumb-linux-NEXT: mov r5, #0
+; Thumb-linux-NEXT: push {lr}
+; Thumb-linux-NEXT: bl __morestack
+; Thumb-linux-NEXT: pop {r4}
+; Thumb-linux-NEXT: mov lr, r4
+; Thumb-linux-NEXT: pop {r4, r5}
+; Thumb-linux-NEXT: bx lr
+
+; Thumb-linux: pop {r4, r5}
+
+}
+
+define i32 @test_nested(i32 * nest %closure, i32 %other) #0 {
+ %addend = load i32 * %closure
+ %result = add i32 %other, %addend
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
+ ret i32 %result
+
+; Thumb-android: test_nested:
+
+; Thumb-android: push {r4, r5}
+; Thumb-android-NEXT: mov r5, sp
+; Thumb-android-NEXT: ldr r4, .LCPI1_0
+; Thumb-android-NEXT: ldr r4, [r4]
+; Thumb-android-NEXT: cmp r4, r5
+; Thumb-android-NEXT: blo .LBB1_2
+
+; Thumb-android: mov r4, #56
+; Thumb-android-NEXT: mov r5, #0
+; Thumb-android-NEXT: push {lr}
+; Thumb-android-NEXT: bl __morestack
+; Thumb-android-NEXT: pop {r4}
+; Thumb-android-NEXT: mov lr, r4
+; Thumb-android-NEXT: pop {r4, r5}
+; Thumb-android-NEXT: bx lr
+
+; Thumb-android: pop {r4, r5}
+
+; Thumb-linux: test_nested:
+
+; Thumb-linux: push {r4, r5}
+; Thumb-linux-NEXT: mov r5, sp
+; Thumb-linux-NEXT: ldr r4, .LCPI1_0
+; Thumb-linux-NEXT: ldr r4, [r4]
+; Thumb-linux-NEXT: cmp r4, r5
+; Thumb-linux-NEXT: blo .LBB1_2
+
+; Thumb-linux: mov r4, #56
+; Thumb-linux-NEXT: mov r5, #0
+; Thumb-linux-NEXT: push {lr}
+; Thumb-linux-NEXT: bl __morestack
+; Thumb-linux-NEXT: pop {r4}
+; Thumb-linux-NEXT: mov lr, r4
+; Thumb-linux-NEXT: pop {r4, r5}
+; Thumb-linux-NEXT: bx lr
+
+; Thumb-linux: pop {r4, r5}
+
+}
+
+define void @test_large() #0 {
+ %mem = alloca i32, i32 10000
+ call void @dummy_use (i32* %mem, i32 0)
+ ret void
+
+; Thumb-android: test_large:
+
+; Thumb-android: push {r4, r5}
+; Thumb-android-NEXT: mov r5, sp
+; Thumb-android-NEXT: sub r5, #40192
+; Thumb-android-NEXT: ldr r4, .LCPI2_2
+; Thumb-android-NEXT: ldr r4, [r4]
+; Thumb-android-NEXT: cmp r4, r5
+; Thumb-android-NEXT: blo .LBB2_2
+
+; Thumb-android: mov r4, #40192
+; Thumb-android-NEXT: mov r5, #0
+; Thumb-android-NEXT: push {lr}
+; Thumb-android-NEXT: bl __morestack
+; Thumb-android-NEXT: pop {r4}
+; Thumb-android-NEXT: mov lr, r4
+; Thumb-android-NEXT: pop {r4, r5}
+; Thumb-android-NEXT: bx lr
+
+; Thumb-android: pop {r4, r5}
+
+; Thumb-linux: test_large:
+
+; Thumb-linux: push {r4, r5}
+; Thumb-linux-NEXT: mov r5, sp
+; Thumb-linux-NEXT: sub r5, #40192
+; Thumb-linux-NEXT: ldr r4, .LCPI2_2
+; Thumb-linux-NEXT: ldr r4, [r4]
+; Thumb-linux-NEXT: cmp r4, r5
+; Thumb-linux-NEXT: blo .LBB2_2
+
+; Thumb-linux: mov r4, #40192
+; Thumb-linux-NEXT: mov r5, #0
+; Thumb-linux-NEXT: push {lr}
+; Thumb-linux-NEXT: bl __morestack
+; Thumb-linux-NEXT: pop {r4}
+; Thumb-linux-NEXT: mov lr, r4
+; Thumb-linux-NEXT: pop {r4, r5}
+; Thumb-linux-NEXT: bx lr
+
+; Thumb-linux: pop {r4, r5}
+
+}
+
+define fastcc void @test_fastcc() #0 {
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
+ ret void
+
+; Thumb-android: test_fastcc:
+
+; Thumb-android: push {r4, r5}
+; Thumb-android-NEXT: mov r5, sp
+; Thumb-android-NEXT: ldr r4, .LCPI3_0
+; Thumb-android-NEXT: ldr r4, [r4]
+; Thumb-android-NEXT: cmp r4, r5
+; Thumb-android-NEXT: blo .LBB3_2
+
+; Thumb-android: mov r4, #48
+; Thumb-android-NEXT: mov r5, #0
+; Thumb-android-NEXT: push {lr}
+; Thumb-android-NEXT: bl __morestack
+; Thumb-android-NEXT: pop {r4}
+; Thumb-android-NEXT: mov lr, r4
+; Thumb-android-NEXT: pop {r4, r5}
+; Thumb-android-NEXT: bx lr
+
+; Thumb-android: pop {r4, r5}
+
+; Thumb-linux: test_fastcc:
+
+; Thumb-linux: push {r4, r5}
+; Thumb-linux-NEXT: mov r5, sp
+; Thumb-linux-NEXT: ldr r4, .LCPI3_0
+; Thumb-linux-NEXT: ldr r4, [r4]
+; Thumb-linux-NEXT: cmp r4, r5
+; Thumb-linux-NEXT: blo .LBB3_2
+
+; Thumb-linux: mov r4, #48
+; Thumb-linux-NEXT: mov r5, #0
+; Thumb-linux-NEXT: push {lr}
+; Thumb-linux-NEXT: bl __morestack
+; Thumb-linux-NEXT: pop {r4}
+; Thumb-linux-NEXT: mov lr, r4
+; Thumb-linux-NEXT: pop {r4, r5}
+; Thumb-linux-NEXT: bx lr
+
+; Thumb-linux: pop {r4, r5}
+
+}
+
+define fastcc void @test_fastcc_large() #0 {
+ %mem = alloca i32, i32 10000
+ call void @dummy_use (i32* %mem, i32 0)
+ ret void
+
+; Thumb-android: test_fastcc_large:
+
+; Thumb-android: push {r4, r5}
+; Thumb-android-NEXT: mov r5, sp
+; Thumb-android-NEXT: sub r5, #40192
+; Thumb-android-NEXT: ldr r4, .LCPI4_2
+; Thumb-android-NEXT: ldr r4, [r4]
+; Thumb-android-NEXT: cmp r4, r5
+; Thumb-android-NEXT: blo .LBB4_2
+
+; Thumb-android: mov r4, #40192
+; Thumb-android-NEXT: mov r5, #0
+; Thumb-android-NEXT: push {lr}
+; Thumb-android-NEXT: bl __morestack
+; Thumb-android-NEXT: pop {r4}
+; Thumb-android-NEXT: mov lr, r4
+; Thumb-android-NEXT: pop {r4, r5}
+; Thumb-android-NEXT: bx lr
+
+; Thumb-android: pop {r4, r5}
+
+; Thumb-linux: test_fastcc_large:
+
+; Thumb-linux: push {r4, r5}
+; Thumb-linux-NEXT: mov r5, sp
+; Thumb-linux-NEXT: sub r5, #40192
+; Thumb-linux-NEXT: ldr r4, .LCPI4_2
+; Thumb-linux-NEXT: ldr r4, [r4]
+; Thumb-linux-NEXT: cmp r4, r5
+; Thumb-linux-NEXT: blo .LBB4_2
+
+; Thumb-linux: mov r4, #40192
+; Thumb-linux-NEXT: mov r5, #0
+; Thumb-linux-NEXT: push {lr}
+; Thumb-linux-NEXT: bl __morestack
+; Thumb-linux-NEXT: pop {r4}
+; Thumb-linux-NEXT: mov lr, r4
+; Thumb-linux-NEXT: pop {r4, r5}
+; Thumb-linux-NEXT: bx lr
+
+; Thumb-linux: pop {r4, r5}
+
+}
+
+define void @test_nostack() #0 {
+ ret void
+
+; Thumb-android-LABEL: test_nostack:
+; Thumb-android-NOT: bl __morestack
+
+; Thumb-linux-LABEL: test_nostack:
+; Thumb-linux-NOT: bl __morestack
+}
+
+attributes #0 = { "split-stack" }
diff --git a/test/CodeGen/Thumb/sjljehprepare-lower-vector.ll b/test/CodeGen/Thumb/sjljehprepare-lower-vector.ll
new file mode 100644
index 000000000000..ab082c79ba6f
--- /dev/null
+++ b/test/CodeGen/Thumb/sjljehprepare-lower-vector.ll
@@ -0,0 +1,23 @@
+; RUN: llc -mtriple=thumbv7-apple-ios < %s
+; SjLjEHPrepare shouldn't crash when lowering vectors.
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios"
+
+define i8* @foo(<4 x i32> %c) {
+entry:
+ invoke void @bar ()
+ to label %unreachable unwind label %handler
+
+unreachable:
+ unreachable
+
+handler:
+ %tmp = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @baz to i8*)
+ cleanup
+ resume { i8*, i32 } undef
+}
+
+declare void @bar()
+declare i32 @baz(...)
+
diff --git a/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll b/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll
index 3f6407a0a3c0..97c66d9dc865 100644
--- a/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll
+++ b/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mcpu=arm1022e
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1022e %s -o /dev/null
%iterator = type { i8**, i8**, i8**, i8*** }
%insert_iterator = type { %deque*, %iterator }
diff --git a/test/CodeGen/Thumb/stack-frame.ll b/test/CodeGen/Thumb/stack-frame.ll
index b103b331b797..09d480aec290 100644
--- a/test/CodeGen/Thumb/stack-frame.ll
+++ b/test/CodeGen/Thumb/stack-frame.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=thumb
-; RUN: llc < %s -march=thumb | grep add | count 1
+; RUN: llc -mtriple=thumb-eabi < %s -o - | FileCheck %s
define void @f1() {
%c = alloca i8, align 1
@@ -10,4 +9,6 @@ define i32 @f2() {
ret i32 1
}
+; CHECK: add
+; CHECK-NOT: add
diff --git a/test/CodeGen/Thumb/thumb-imm.ll b/test/CodeGen/Thumb/thumb-imm.ll
index 74a57ff271be..592e694e0277 100644
--- a/test/CodeGen/Thumb/thumb-imm.ll
+++ b/test/CodeGen/Thumb/thumb-imm.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=thumb | not grep CPI
-
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
define i32 @test1() {
ret i32 1000
@@ -8,3 +7,6 @@ define i32 @test1() {
define i32 @test2() {
ret i32 -256
}
+
+; CHECK-NOT: CPI
+
diff --git a/test/CodeGen/Thumb/thumb-ldm.ll b/test/CodeGen/Thumb/thumb-ldm.ll
new file mode 100644
index 000000000000..95f3edc9c4c3
--- /dev/null
+++ b/test/CodeGen/Thumb/thumb-ldm.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=thumbv6m-eabi -o - | FileCheck %s
+; XFAIL: *
+
+@X = external global [0 x i32] ; <[0 x i32]*> [#uses=5]
+
+define i32 @t1() {
+; CHECK-LABEL: t1:
+; CHECK: push {r7, lr}
+; CHECK: ldm
+; CHECK: pop {r7, pc}
+ %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 0) ; <i32> [#uses=1]
+ %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
+ %tmp4 = call i32 @f1( i32 %tmp, i32 %tmp3 ) ; <i32> [#uses=1]
+ ret i32 %tmp4
+}
+
+define i32 @t2() {
+; CHECK-LABEL: t2:
+; CHECK: push {r7, lr}
+; CHECK: ldm
+; CHECK: pop {r7, pc}
+ %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
+ %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
+ %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 4) ; <i32> [#uses=1]
+ %tmp6 = call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 ) ; <i32> [#uses=1]
+ ret i32 %tmp6
+}
+
+define i32 @t3() {
+; CHECK-LABEL: t3:
+; CHECK: push {r7, lr}
+; CHECK: ldm
+; CHECK: pop {r7, pc}
+ %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
+ %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
+ %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
+ %tmp6 = call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 ) ; <i32> [#uses=1]
+ ret i32 %tmp6
+}
+
+declare i32 @f1(i32, i32)
+
+declare i32 @f2(i32, i32, i32)
diff --git a/test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll b/test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll
new file mode 100644
index 000000000000..dedc82bf68ce
--- /dev/null
+++ b/test/CodeGen/Thumb/thumb-memcpy-ldm-stm.ll
@@ -0,0 +1,38 @@
+; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s
+; XFAIL: *
+
+@d = external global [64 x i32]
+@s = external global [64 x i32]
+
+; Function Attrs: nounwind
+define void @t1() #0 {
+entry:
+; CHECK: ldr [[REG0:r[0-9]]],
+; CHECK: ldm [[REG0]]!,
+; CHECK: ldr [[REG1:r[0-9]]],
+; CHECK: stm [[REG1]]!,
+; CHECK: subs [[REG0]], #32
+; CHECK-NEXT: ldrb
+; CHECK: subs [[REG1]], #32
+; CHECK-NEXT: strb
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([64 x i32]* @s to i8*), i8* bitcast ([64 x i32]* @d to i8*), i32 33, i32 4, i1 false)
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @t2() #0 {
+entry:
+; CHECK: ldr [[REG0:r[0-9]]],
+; CHECK: ldm [[REG0]]!,
+; CHECK: ldr [[REG1:r[0-9]]],
+; CHECK: stm [[REG1]]!,
+; CHECK: ldrh
+; CHECK: ldrb
+; CHECK: strb
+; CHECK: strh
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([64 x i32]* @s to i8*), i8* bitcast ([64 x i32]* @d to i8*), i32 15, i32 4, i1 false)
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1
diff --git a/test/CodeGen/Thumb/trap.ll b/test/CodeGen/Thumb/trap.ll
index e04059c4b021..7d2f6f11a4d1 100644
--- a/test/CodeGen/Thumb/trap.ll
+++ b/test/CodeGen/Thumb/trap.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
; rdar://7961298
define void @t() nounwind {
diff --git a/test/CodeGen/Thumb/triple.ll b/test/CodeGen/Thumb/triple.ll
new file mode 100644
index 000000000000..0a1759f081ba
--- /dev/null
+++ b/test/CodeGen/Thumb/triple.ll
@@ -0,0 +1,7 @@
+; RUN: llc < %s -mtriple=thumb | FileCheck %s
+
+; CHECK: .code 16
+
+define void @f() {
+ ret void
+}
diff --git a/test/CodeGen/Thumb/tst_teq.ll b/test/CodeGen/Thumb/tst_teq.ll
index 21ada3ed83a0..2b6d9a3706a3 100644
--- a/test/CodeGen/Thumb/tst_teq.ll
+++ b/test/CodeGen/Thumb/tst_teq.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb | grep tst
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
define i32 @f(i32 %a) {
entry:
@@ -15,3 +15,6 @@ entry:
%retval = select i1 %0, i32 20, i32 10 ; <i32> [#uses=1]
ret i32 %retval
}
+
+; CHECK: tst
+
diff --git a/test/CodeGen/Thumb/unord.ll b/test/CodeGen/Thumb/unord.ll
index 39458ae7b7bc..3cf9ebfa0373 100644
--- a/test/CodeGen/Thumb/unord.ll
+++ b/test/CodeGen/Thumb/unord.ll
@@ -1,13 +1,20 @@
-; RUN: llc < %s -march=thumb | grep bne | count 1
-; RUN: llc < %s -march=thumb | grep beq | count 1
+; RUN: llc < %s -mtriple=thumb-apple-darwin | FileCheck %s
define i32 @f1(float %X, float %Y) {
+; CHECK-LABEL: _f1:
+; CHECK: bne
+; CHECK: .data_region
+; CHECK: .long ___unordsf2
%tmp = fcmp uno float %X, %Y
%retval = select i1 %tmp, i32 1, i32 -1
ret i32 %retval
}
define i32 @f2(float %X, float %Y) {
+; CHECK-LABEL: _f2:
+; CHECK: beq
+; CHECK: .data_region
+; CHECK: .long ___unordsf2
%tmp = fcmp ord float %X, %Y
%retval = select i1 %tmp, i32 1, i32 -1
ret i32 %retval
diff --git a/test/CodeGen/Thumb/vargs.ll b/test/CodeGen/Thumb/vargs.ll
index 50a1a0728846..4078b01ba30c 100644
--- a/test/CodeGen/Thumb/vargs.ll
+++ b/test/CodeGen/Thumb/vargs.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=thumb
-; RUN: llc < %s -mtriple=thumb-linux | grep pop | count 2
-; RUN: llc < %s -mtriple=thumb-darwin | grep pop | count 2
+; RUN: llc -mtriple=thumb-eabi %s -o /dev/null
+; RUN: llc -mtriple=thumb-linux %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-darwin %s -o - | FileCheck %s
@str = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
@@ -34,3 +34,8 @@ declare void @llvm.va_start(i8*)
declare i32 @printf(i8*, ...)
declare void @llvm.va_end(i8*)
+
+; CHECK: pop
+; CHECK: pop
+; CHECK-NOT: pop
+
diff --git a/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll b/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
index e0144531454a..09e0ed1ead63 100644
--- a/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
+++ b/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim -arm-atomic-cfg-tidy=0 | FileCheck %s
@csize = external global [100 x [20 x [4 x i8]]] ; <[100 x [20 x [4 x i8]]]*> [#uses=1]
@vsize = external global [100 x [20 x [4 x i8]]] ; <[100 x [20 x [4 x i8]]]*> [#uses=1]
diff --git a/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll b/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
index 940cfd15e08e..c8eac8d4d094 100644
--- a/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
+++ b/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi -arm-atomic-cfg-tidy=0 | FileCheck %s
; PR4659
; PR4682
diff --git a/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll b/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
index 52066d3f86ad..a9a2478e4034 100644
--- a/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
+++ b/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -disable-cgp-branch-opts | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -disable-cgp-branch-opts -arm-atomic-cfg-tidy=0 | FileCheck %s
%struct.pix_pos = type { i32, i32, i32, i32, i32, i32 }
diff --git a/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll b/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
index 1b8bdb1c19bb..8beb5b1c8944 100644
--- a/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
+++ b/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -O3 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 -O3 | FileCheck %s
; rdar://7493908
; Make sure the result of the first dynamic_alloc isn't copied back to sp more
diff --git a/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll b/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
index 810bfb790209..f3046e1fcb82 100644
--- a/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
+++ b/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O3 -relocation-model=pic | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O3 -relocation-model=pic -arm-atomic-cfg-tidy=0 | FileCheck %s
; rdar://8115404
; Tail merging must not split an IT block.
diff --git a/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll b/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
index 75f5439b98c2..3d89390d04c1 100644
--- a/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
+++ b/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
@@ -1,5 +1,5 @@
; rdar://8465407
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
%struct.buf = type opaque
diff --git a/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll b/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
index b1ce3bb935e3..240df83252cc 100644
--- a/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
+++ b/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv7-apple-darwin10 < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv7-apple-darwin10 -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
%struct.op = type { %struct.op*, %struct.op*, %struct.op* ()*, i32, i16, i16, i8, i8 }
diff --git a/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll b/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
index dadbdc5ced2f..ea8d233e79f1 100644
--- a/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
+++ b/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
+; RUN: llc < %s -mtriple=thumbv8-none-linux-gnueabi
%struct.LIST_NODE.0.16 = type { %struct.LIST_NODE.0.16*, i8* }
@@ -26,3 +27,23 @@ bb3: ; preds = %bb2, %entry
bb5: ; preds = %bb3, %bb
ret %struct.LIST_NODE.0.16* null
}
+
+declare void @use(i32)
+define double @find_max_double(i32 %n, double* nocapture readonly %aa) {
+entry:
+ br i1 undef, label %for.body, label %for.end
+
+for.body: ; preds = %for.body, %entry
+ %0 = load double* null, align 8
+ %cmp2.6 = fcmp ogt double %0, 0.000000e+00
+ %idx.1.6 = select i1 %cmp2.6, i32 undef, i32 0
+ %idx.1.7 = select i1 undef, i32 undef, i32 %idx.1.6
+ %max.1.7 = select i1 undef, double 0.000000e+00, double undef
+ br i1 undef, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %max.0.lcssa = phi double [ undef, %entry ], [ %max.1.7, %for.body ]
+ %idx.0.lcssa = phi i32 [ 0, %entry ], [ %idx.1.7, %for.body ]
+ tail call void @use(i32 %idx.0.lcssa)
+ ret double %max.0.lcssa
+}
diff --git a/test/CodeGen/Thumb2/bfi.ll b/test/CodeGen/Thumb2/bfi.ll
index 3612e2752834..4f056d571c4c 100644
--- a/test/CodeGen/Thumb2/bfi.ll
+++ b/test/CodeGen/Thumb2/bfi.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=thumb -mattr=+v6t2 < %s | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mattr=+v6t2 %s -o - | FileCheck %s
%struct.F = type { [3 x i8], i8 }
diff --git a/test/CodeGen/Thumb2/bfx.ll b/test/CodeGen/Thumb2/bfx.ll
index 489349d61552..9bd8d70275b9 100644
--- a/test/CodeGen/Thumb2/bfx.ll
+++ b/test/CodeGen/Thumb2/bfx.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @sbfx1(i32 %a) {
; CHECK: sbfx1
diff --git a/test/CodeGen/Thumb2/buildvector-crash.ll b/test/CodeGen/Thumb2/buildvector-crash.ll
index 8a3c895bbe57..16e2298522f5 100644
--- a/test/CodeGen/Thumb2/buildvector-crash.ll
+++ b/test/CodeGen/Thumb2/buildvector-crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O3 -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 -mcpu=cortex-a8 | FileCheck %s
; Formerly crashed, 3573915.
define void @RotateStarsFP_Vec() nounwind {
diff --git a/test/CodeGen/Thumb2/carry.ll b/test/CodeGen/Thumb2/carry.ll
index da1902b7e0f8..26622e23dd44 100644
--- a/test/CodeGen/Thumb2/carry.ll
+++ b/test/CodeGen/Thumb2/carry.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i64 @f1(i64 %a, i64 %b) {
entry:
diff --git a/test/CodeGen/Thumb2/cortex-fp.ll b/test/CodeGen/Thumb2/cortex-fp.ll
index f6cea72caecd..e63970a97e1f 100644
--- a/test/CodeGen/Thumb2/cortex-fp.ll
+++ b/test/CodeGen/Thumb2/cortex-fp.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-m3 | FileCheck %s -check-prefix=CORTEXM3
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-m4 | FileCheck %s -check-prefix=CORTEXM4
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=CORTEXM3
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=CORTEXM4
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -march=thumb -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK -check-prefix=CORTEXA8
define float @foo(float %a, float %b) {
entry:
-; CHECK: foo
+; CHECK-LABEL: foo:
; CORTEXM3: blx ___mulsf3
; CORTEXM4: vmul.f32 s
; CORTEXA8: vmul.f32 d
@@ -15,7 +15,7 @@ entry:
define double @bar(double %a, double %b) {
entry:
-; CHECK: bar
+; CHECK-LABEL: bar:
%0 = fmul double %a, %b
; CORTEXM3: blx ___muldf3
; CORTEXM4: blx ___muldf3
diff --git a/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll b/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
index a9f948cf717a..88c7f0f17ab9 100644
--- a/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
+++ b/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
define void @fht(float* nocapture %fz, i16 signext %n) nounwind {
; CHECK-LABEL: fht:
diff --git a/test/CodeGen/Thumb2/div.ll b/test/CodeGen/Thumb2/div.ll
index 003d71797ab1..b273a8903265 100644
--- a/test/CodeGen/Thumb2/div.ll
+++ b/test/CodeGen/Thumb2/div.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin -mattr=+thumb2 \
+; RUN: llc -mtriple=thumb-apple-darwin -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - \
; RUN: | FileCheck %s -check-prefix=CHECK-THUMB
-; RUN: llc < %s -march=thumb -mcpu=cortex-m3 -mattr=+thumb2 \
+; RUN: llc -mtriple=thumb-apple-darwin -mcpu=cortex-m3 -mattr=+thumb2 %s -o - \
; RUN: | FileCheck %s -check-prefix=CHECK-THUMBV7M
-; RUN: llc < %s -march=thumb -mcpu=swift \
+; RUN: llc -mtriple=thumb-apple-darwin -mcpu=swift %s -o - \
; RUN: | FileCheck %s -check-prefix=CHECK-HWDIV
-; RUN: llc < %s -march=thumb -mcpu=cortex-r5 \
+; RUN: llc -mtriple=thumb-apple-darwin -mcpu=cortex-r5 %s -o - \
; RUN: | FileCheck %s -check-prefix=CHECK-HWDIV
define i32 @f1(i32 %a, i32 %b) {
diff --git a/test/CodeGen/Thumb2/ifcvt-neon.ll b/test/CodeGen/Thumb2/ifcvt-neon.ll
index 68320539693d..501b0b6a007c 100644
--- a/test/CodeGen/Thumb2/ifcvt-neon.ll
+++ b/test/CodeGen/Thumb2/ifcvt-neon.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
; rdar://7368193
@a = common global float 0.000000e+00 ; <float*> [#uses=2]
diff --git a/test/CodeGen/Thumb2/large-stack.ll b/test/CodeGen/Thumb2/large-stack.ll
index 36f3ce2eaa88..8d79da7982b1 100644
--- a/test/CodeGen/Thumb2/large-stack.ll
+++ b/test/CodeGen/Thumb2/large-stack.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=LINUX
+; RUN: llc < %s -march=thumb -mcpu=arm1156t2-s -mattr=+thumb2 \
+; RUN: -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
+; RUN: llc < %s -march=thumb -mcpu=arm1156t2-s -mattr=+thumb2 \
+; RUN: -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=LINUX
define void @test1() {
; DARWIN-LABEL: test1:
diff --git a/test/CodeGen/Thumb2/ldr-str-imm12.ll b/test/CodeGen/Thumb2/ldr-str-imm12.ll
index 36544d16d6f4..d20eef0c8bb7 100644
--- a/test/CodeGen/Thumb2/ldr-str-imm12.ll
+++ b/test/CodeGen/Thumb2/ldr-str-imm12.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-atomic-cfg-tidy=0 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
; rdar://7352504
; Make sure we use "str r9, [sp, #+28]" instead of "sub.w r4, r7, #256" followed by "str r9, [r4, #-32]".
diff --git a/test/CodeGen/Thumb2/lit.local.cfg b/test/CodeGen/Thumb2/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/CodeGen/Thumb2/lit.local.cfg
+++ b/test/CodeGen/Thumb2/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/Thumb2/longMACt.ll b/test/CodeGen/Thumb2/longMACt.ll
index a457333d978f..7322d0fe93d2 100644
--- a/test/CodeGen/Thumb2/longMACt.ll
+++ b/test/CodeGen/Thumb2/longMACt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; Check generated signed and unsigned multiply accumulate long.
define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) {
diff --git a/test/CodeGen/Thumb2/mul_const.ll b/test/CodeGen/Thumb2/mul_const.ll
index 488f4d13a0eb..7064798051e3 100644
--- a/test/CodeGen/Thumb2/mul_const.ll
+++ b/test/CodeGen/Thumb2/mul_const.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; rdar://7069502
define i32 @t1(i32 %v) nounwind readnone {
diff --git a/test/CodeGen/Thumb2/segmented-stacks.ll b/test/CodeGen/Thumb2/segmented-stacks.ll
new file mode 100644
index 000000000000..38bf91564eb7
--- /dev/null
+++ b/test/CodeGen/Thumb2/segmented-stacks.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -mtriple=thumb-linux-androideabi -march=thumb -mcpu=arm1156t2-s -mattr=+thumb2 -verify-machineinstrs | FileCheck %s -check-prefix=Thumb-android
+; RUN: llc < %s -mtriple=thumb-linux-androideabi -march=thumb -mcpu=arm1156t2-s -mattr=+thumb2 -filetype=obj
+
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+define void @test_basic() #0 {
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
+ ret void
+
+; Thumb-android: test_basic:
+
+; Thumb-android: push {r4, r5}
+; Thumb-android-NEXT: mrc p15, #0, r4, c13, c0, #3
+; Thumb-android-NEXT: mov r5, sp
+; Thumb-android-NEXT: ldr r4, [r4, #252]
+; Thumb-android-NEXT: cmp r4, r5
+; Thumb-android-NEXT: blo .LBB0_2
+
+; Thumb-android: mov r4, #48
+; Thumb-android-NEXT: mov r5, #0
+; Thumb-android-NEXT: push {lr}
+; Thumb-android-NEXT: bl __morestack
+; Thumb-android-NEXT: ldr lr, [sp], #4
+; Thumb-android-NEXT: pop {r4, r5}
+; Thumb-android-NEXT: bx lr
+
+; Thumb-android: pop {r4, r5}
+
+}
+
+attributes #0 = { "split-stack" }
diff --git a/test/CodeGen/Thumb2/tail-call-r9.ll b/test/CodeGen/Thumb2/tail-call-r9.ll
index 24c76c98c03a..673aa7c12ebc 100644
--- a/test/CodeGen/Thumb2/tail-call-r9.ll
+++ b/test/CodeGen/Thumb2/tail-call-r9.ll
@@ -6,7 +6,7 @@
; the destination address. It's callee-saved in AAPCS.
define arm_aapcscc void @test(i32 %a) nounwind {
; CHECK-LABEL: test:
-; CHECK-NOT bx r9
+; CHECK-NOT: bx r9
%tmp = load void ()** @foo, align 4
tail call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3},~{r12}"() nounwind
tail call arm_aapcscc void %tmp() nounwind
diff --git a/test/CodeGen/Thumb2/thumb2-adc.ll b/test/CodeGen/Thumb2/thumb2-adc.ll
index 7c34cfdef3f9..a97654c0f043 100644
--- a/test/CodeGen/Thumb2/thumb2-adc.ll
+++ b/test/CodeGen/Thumb2/thumb2-adc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 734439407618 = 0x000000ab00000002
define i64 @f1(i64 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-add.ll b/test/CodeGen/Thumb2/thumb2-add.ll
index c23c74a1682e..8ff931a4490d 100644
--- a/test/CodeGen/Thumb2/thumb2-add.ll
+++ b/test/CodeGen/Thumb2/thumb2-add.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @t2ADDrc_255(i32 %lhs) {
; CHECK-LABEL: t2ADDrc_255:
diff --git a/test/CodeGen/Thumb2/thumb2-add2.ll b/test/CodeGen/Thumb2/thumb2-add2.ll
index 3bbc3bf812ad..9d64fd2e27fc 100644
--- a/test/CodeGen/Thumb2/thumb2-add2.ll
+++ b/test/CodeGen/Thumb2/thumb2-add2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 171 = 0x000000ab
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-add3.ll b/test/CodeGen/Thumb2/thumb2-add3.ll
index 6cd818c03e11..03a817003afd 100644
--- a/test/CodeGen/Thumb2/thumb2-add3.ll
+++ b/test/CodeGen/Thumb2/thumb2-add3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
%tmp = add i32 %a, 4095
diff --git a/test/CodeGen/Thumb2/thumb2-add4.ll b/test/CodeGen/Thumb2/thumb2-add4.ll
index 8b957114835d..ad9642d0706d 100644
--- a/test/CodeGen/Thumb2/thumb2-add4.ll
+++ b/test/CodeGen/Thumb2/thumb2-add4.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 171 = 0x000000ab
define i64 @f1(i64 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-add5.ll b/test/CodeGen/Thumb2/thumb2-add5.ll
index beaa09e1e69e..f60e0be7876f 100644
--- a/test/CodeGen/Thumb2/thumb2-add5.ll
+++ b/test/CodeGen/Thumb2/thumb2-add5.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-add6.ll b/test/CodeGen/Thumb2/thumb2-add6.ll
index 0d2f12249956..af092937344c 100644
--- a/test/CodeGen/Thumb2/thumb2-add6.ll
+++ b/test/CodeGen/Thumb2/thumb2-add6.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i64 @f1(i64 %a, i64 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-and.ll b/test/CodeGen/Thumb2/thumb2-and.ll
index c9578d9d7d21..1984b3ffb356 100644
--- a/test/CodeGen/Thumb2/thumb2-and.ll
+++ b/test/CodeGen/Thumb2/thumb2-and.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-and2.ll b/test/CodeGen/Thumb2/thumb2-and2.ll
index c0501ab8ad37..70de9c96082f 100644
--- a/test/CodeGen/Thumb2/thumb2-and2.ll
+++ b/test/CodeGen/Thumb2/thumb2-and2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 171 = 0x000000ab
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-asr.ll b/test/CodeGen/Thumb2/thumb2-asr.ll
index ba782dde1034..a4cccd554215 100644
--- a/test/CodeGen/Thumb2/thumb2-asr.ll
+++ b/test/CodeGen/Thumb2/thumb2-asr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-asr2.ll b/test/CodeGen/Thumb2/thumb2-asr2.ll
index 3685badcafdf..da050fb670c4 100644
--- a/test/CodeGen/Thumb2/thumb2-asr2.ll
+++ b/test/CodeGen/Thumb2/thumb2-asr2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-bcc.ll b/test/CodeGen/Thumb2/thumb2-bcc.ll
index 81f7de9ae39c..e7b38221b00d 100644
--- a/test/CodeGen/Thumb2/thumb2-bcc.ll
+++ b/test/CodeGen/Thumb2/thumb2-bcc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; If-conversion defeats the purpose of this test, which is to check CBZ
; generation, so use memory barrier instruction to make sure it doesn't
; happen and we get actual branches.
diff --git a/test/CodeGen/Thumb2/thumb2-bfc.ll b/test/CodeGen/Thumb2/thumb2-bfc.ll
index 327b6d1a503a..dbf697cd51dc 100644
--- a/test/CodeGen/Thumb2/thumb2-bfc.ll
+++ b/test/CodeGen/Thumb2/thumb2-bfc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 4278190095 = 0xff00000f
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-bic.ll b/test/CodeGen/Thumb2/thumb2-bic.ll
index 5938fa19a3c4..68d92b8771cc 100644
--- a/test/CodeGen/Thumb2/thumb2-bic.ll
+++ b/test/CodeGen/Thumb2/thumb2-bic.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-branch.ll b/test/CodeGen/Thumb2/thumb2-branch.ll
index a00b22d85022..332ed50ede6f 100644
--- a/test/CodeGen/Thumb2/thumb2-branch.ll
+++ b/test/CodeGen/Thumb2/thumb2-branch.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 -arm-atomic-cfg-tidy=0 | FileCheck %s
; If-conversion defeats the purpose of this test, which is to check
; conditional branch generation, so a call to make sure it doesn't
; happen and we get actual branches.
diff --git a/test/CodeGen/Thumb2/thumb2-cbnz.ll b/test/CodeGen/Thumb2/thumb2-cbnz.ll
index 893bd0fdaef4..f0f79168c904 100644
--- a/test/CodeGen/Thumb2/thumb2-cbnz.ll
+++ b/test/CodeGen/Thumb2/thumb2-cbnz.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
; rdar://7354379
declare double @foo(double) nounwind readnone
diff --git a/test/CodeGen/Thumb2/thumb2-clz.ll b/test/CodeGen/Thumb2/thumb2-clz.ll
index dbdaae29eaef..52b540b2bdd0 100644
--- a/test/CodeGen/Thumb2/thumb2-clz.ll
+++ b/test/CodeGen/Thumb2/thumb2-clz.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+v7 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+v7 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-cmn.ll b/test/CodeGen/Thumb2/thumb2-cmn.ll
index 8bcaa7e8209e..efa150529ad6 100644
--- a/test/CodeGen/Thumb2/thumb2-cmn.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmn.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; These tests could be improved by 'movs r0, #0' being rematerialized below the
; test as 'mov.w r0, #0'.
diff --git a/test/CodeGen/Thumb2/thumb2-cmn2.ll b/test/CodeGen/Thumb2/thumb2-cmn2.ll
index f5db728d46a4..42473c2dcade 100644
--- a/test/CodeGen/Thumb2/thumb2-cmn2.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmn2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; -0x000000bb = 4294967109
define i1 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-cmp.ll b/test/CodeGen/Thumb2/thumb2-cmp.ll
index 87413444ca3b..8f08617125c2 100644
--- a/test/CodeGen/Thumb2/thumb2-cmp.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; These tests would be improved by 'movs r0, #0' being rematerialized below the
; test as 'mov.w r0, #0'.
diff --git a/test/CodeGen/Thumb2/thumb2-cmp2.ll b/test/CodeGen/Thumb2/thumb2-cmp2.ll
index 5b880f16deb5..4d840030f825 100644
--- a/test/CodeGen/Thumb2/thumb2-cmp2.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmp2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; These tests would be improved by 'movs r0, #0' being rematerialized below the
; test as 'mov.w r0, #0'.
diff --git a/test/CodeGen/Thumb2/thumb2-eor.ll b/test/CodeGen/Thumb2/thumb2-eor.ll
index b3e323c10d2e..20282993e817 100644
--- a/test/CodeGen/Thumb2/thumb2-eor.ll
+++ b/test/CodeGen/Thumb2/thumb2-eor.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-eor2.ll b/test/CodeGen/Thumb2/thumb2-eor2.ll
index 5daa13df655d..f26aafeff340 100644
--- a/test/CodeGen/Thumb2/thumb2-eor2.ll
+++ b/test/CodeGen/Thumb2/thumb2-eor2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 0x000000bb = 187
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
index 403cd48035b4..a861912fe113 100644
--- a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
+++ b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-default-it | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8-apple-ios -arm-no-restrict-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 -arm-default-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8-apple-ios -arm-atomic-cfg-tidy=0 -arm-no-restrict-it | FileCheck %s
define void @foo(i32 %X, i32 %Y) {
entry:
diff --git a/test/CodeGen/Thumb2/thumb2-ifcvt3.ll b/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
index a71aa3fb613a..79667d43b95e 100644
--- a/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
+++ b/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-default-it | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8-apple-darwin -arm-no-restrict-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-atomic-cfg-tidy=0 -arm-default-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8-apple-darwin -arm-atomic-cfg-tidy=0 -arm-no-restrict-it | FileCheck %s
; There shouldn't be a unconditional branch at end of bb52.
; rdar://7184787
diff --git a/test/CodeGen/Thumb2/thumb2-jtb.ll b/test/CodeGen/Thumb2/thumb2-jtb.ll
index 0748b9b32d9a..ce7fb9f10fec 100644
--- a/test/CodeGen/Thumb2/thumb2-jtb.ll
+++ b/test/CodeGen/Thumb2/thumb2-jtb.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -arm-adjust-jump-tables=0 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 -arm-adjust-jump-tables=0 %s -o - | FileCheck %s
; Do not use tbb / tbh if any destination is before the jumptable.
; rdar://7102917
diff --git a/test/CodeGen/Thumb2/thumb2-ldm.ll b/test/CodeGen/Thumb2/thumb2-ldm.ll
index 8716d80a2c8c..adfcf2b6aaf1 100644
--- a/test/CodeGen/Thumb2/thumb2-ldm.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldm.ll
@@ -5,6 +5,7 @@
define i32 @t1() {
; CHECK-LABEL: t1:
; CHECK: push {r7, lr}
+; CHECK: ldrd
; CHECK: pop {r7, pc}
%tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 0) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
@@ -27,6 +28,7 @@ define i32 @t2() {
define i32 @t3() {
; CHECK-LABEL: t3:
; CHECK: push {r7, lr}
+; CHECK: ldm
; CHECK: pop {r7, pc}
%tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
diff --git a/test/CodeGen/Thumb2/thumb2-ldr.ll b/test/CodeGen/Thumb2/thumb2-ldr.ll
index 7f68f661fa9a..c25ed789de04 100644
--- a/test/CodeGen/Thumb2/thumb2-ldr.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32* %v) {
entry:
diff --git a/test/CodeGen/Thumb2/thumb2-ldr_ext.ll b/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
index 9e6aef4e0974..b50b33320597 100644
--- a/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
@@ -1,7 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep ldrb | count 1
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep ldrh | count 1
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep ldrsb | count 1
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep ldrsh | count 1
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @test1(i8* %v.pntr.s0.u1) {
%tmp.u = load i8* %v.pntr.s0.u1
@@ -26,3 +23,16 @@ define i32 @test4() {
%tmp1.s = sext i16 %tmp.s to i32
ret i32 %tmp1.s
}
+
+; CHECK: ldrb
+; CHECK-NOT: ldrb
+
+; CHECK: ldrh
+; CHECK-NOT: ldrh
+
+; CHECK: ldrsb
+; CHECK-NOT: ldrsb
+
+; CHECK: ldrsh
+; CHECK-NOT: ldrsh
+
diff --git a/test/CodeGen/Thumb2/thumb2-ldr_post.ll b/test/CodeGen/Thumb2/thumb2-ldr_post.ll
index bce847471beb..c26e6b154e55 100644
--- a/test/CodeGen/Thumb2/thumb2-ldr_post.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldr_post.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @test(i32 %a, i32 %b, i32 %c) {
%tmp1 = mul i32 %a, %b ; <i32> [#uses=2]
diff --git a/test/CodeGen/Thumb2/thumb2-ldr_pre.ll b/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
index 601c0b560800..cafb02a4984f 100644
--- a/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
@@ -1,7 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep "ldr.*\!" | count 3
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep "ldrsb.*\!" | count 1
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32* @test1(i32* %X, i32* %dest) {
%Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
@@ -10,6 +7,8 @@ define i32* @test1(i32* %X, i32* %dest) {
ret i32* %Y
}
+; CHECK: ldr{{.*}}!
+
define i32 @test2(i32 %a, i32 %b) {
%tmp1 = sub i32 %a, 64 ; <i32> [#uses=2]
%tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1]
@@ -19,6 +18,8 @@ define i32 @test2(i32 %a, i32 %b) {
ret i32 %tmp5
}
+; CHECK: ldr{{.*}}!
+
define i8* @test3(i8* %X, i32* %dest) {
%tmp1 = getelementptr i8* %X, i32 4
%tmp2 = load i8* %tmp1
@@ -26,3 +27,6 @@ define i8* @test3(i8* %X, i32* %dest) {
store i32 %tmp3, i32* %dest
ret i8* %tmp1
}
+
+; CHECK: ldrsb{{.*}}!
+
diff --git a/test/CodeGen/Thumb2/thumb2-ldrb.ll b/test/CodeGen/Thumb2/thumb2-ldrb.ll
index c135effd796b..0b3441eb1e22 100644
--- a/test/CodeGen/Thumb2/thumb2-ldrb.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldrb.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i8 @f1(i8* %v) {
entry:
diff --git a/test/CodeGen/Thumb2/thumb2-ldrh.ll b/test/CodeGen/Thumb2/thumb2-ldrh.ll
index 99f6aba65cf0..db5dcfac2ba1 100644
--- a/test/CodeGen/Thumb2/thumb2-ldrh.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldrh.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i16 @f1(i16* %v) {
entry:
diff --git a/test/CodeGen/Thumb2/thumb2-lsl.ll b/test/CodeGen/Thumb2/thumb2-lsl.ll
index 1b4853853a4e..05441c856472 100644
--- a/test/CodeGen/Thumb2/thumb2-lsl.ll
+++ b/test/CodeGen/Thumb2/thumb2-lsl.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-lsl2.ll b/test/CodeGen/Thumb2/thumb2-lsl2.ll
index bc0978e68241..5a456b024dc5 100644
--- a/test/CodeGen/Thumb2/thumb2-lsl2.ll
+++ b/test/CodeGen/Thumb2/thumb2-lsl2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-lsr.ll b/test/CodeGen/Thumb2/thumb2-lsr.ll
index a3b207c1f90b..48c2ec42e6cf 100644
--- a/test/CodeGen/Thumb2/thumb2-lsr.ll
+++ b/test/CodeGen/Thumb2/thumb2-lsr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-lsr2.ll b/test/CodeGen/Thumb2/thumb2-lsr2.ll
index ae55735fabbc..5d158afc08a1 100644
--- a/test/CodeGen/Thumb2/thumb2-lsr2.ll
+++ b/test/CodeGen/Thumb2/thumb2-lsr2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-lsr3.ll b/test/CodeGen/Thumb2/thumb2-lsr3.ll
index e7ba782afe6a..c9344c89fb78 100644
--- a/test/CodeGen/Thumb2/thumb2-lsr3.ll
+++ b/test/CodeGen/Thumb2/thumb2-lsr3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i1 @test1(i64 %poscnt, i32 %work) {
entry:
diff --git a/test/CodeGen/Thumb2/thumb2-mla.ll b/test/CodeGen/Thumb2/thumb2-mla.ll
index 709fa13dd3a1..0c97d50bd284 100644
--- a/test/CodeGen/Thumb2/thumb2-mla.ll
+++ b/test/CodeGen/Thumb2/thumb2-mla.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -arm-use-mulops=false | FileCheck %s -check-prefix=NO_MULOPS
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 \
+; RUN: -arm-use-mulops=false %s -o - | FileCheck %s -check-prefix=NO_MULOPS
define i32 @f1(i32 %a, i32 %b, i32 %c) {
%tmp1 = mul i32 %a, %b
diff --git a/test/CodeGen/Thumb2/thumb2-mls.ll b/test/CodeGen/Thumb2/thumb2-mls.ll
index 86e147b24018..9b0e7ff37b69 100644
--- a/test/CodeGen/Thumb2/thumb2-mls.ll
+++ b/test/CodeGen/Thumb2/thumb2-mls.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b, i32 %c) {
%tmp1 = mul i32 %a, %b
diff --git a/test/CodeGen/Thumb2/thumb2-mov.ll b/test/CodeGen/Thumb2/thumb2-mov.ll
index 148bafec4014..e5633624dbfc 100644
--- a/test/CodeGen/Thumb2/thumb2-mov.ll
+++ b/test/CodeGen/Thumb2/thumb2-mov.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; Test #<const>
diff --git a/test/CodeGen/Thumb2/thumb2-mul.ll b/test/CodeGen/Thumb2/thumb2-mul.ll
index a989989b43f7..4815f4b5f751 100644
--- a/test/CodeGen/Thumb2/thumb2-mul.ll
+++ b/test/CodeGen/Thumb2/thumb2-mul.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-mulhi.ll b/test/CodeGen/Thumb2/thumb2-mulhi.ll
index 9d4840a2deb8..db9b644d4f92 100644
--- a/test/CodeGen/Thumb2/thumb2-mulhi.ll
+++ b/test/CodeGen/Thumb2/thumb2-mulhi.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2dsp | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2dsp %s -o - | FileCheck %s
define i32 @smulhi(i32 %x, i32 %y) {
; CHECK: smulhi
diff --git a/test/CodeGen/Thumb2/thumb2-mvn.ll b/test/CodeGen/Thumb2/thumb2-mvn.ll
index a5592f6b9276..adf982f4a342 100644
--- a/test/CodeGen/Thumb2/thumb2-mvn.ll
+++ b/test/CodeGen/Thumb2/thumb2-mvn.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
+; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s
; 0x000000bb = 187
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-mvn2.ll b/test/CodeGen/Thumb2/thumb2-mvn2.ll
index bce54a352e80..323c2ccf32de 100644
--- a/test/CodeGen/Thumb2/thumb2-mvn2.ll
+++ b/test/CodeGen/Thumb2/thumb2-mvn2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-neg.ll b/test/CodeGen/Thumb2/thumb2-neg.ll
index 40e809862140..bec609724876 100644
--- a/test/CodeGen/Thumb2/thumb2-neg.ll
+++ b/test/CodeGen/Thumb2/thumb2-neg.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-orn.ll b/test/CodeGen/Thumb2/thumb2-orn.ll
index 5bbe653cd12e..e1f0bba7c4b8 100644
--- a/test/CodeGen/Thumb2/thumb2-orn.ll
+++ b/test/CodeGen/Thumb2/thumb2-orn.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
%tmp = xor i32 %b, 4294967295
diff --git a/test/CodeGen/Thumb2/thumb2-orn2.ll b/test/CodeGen/Thumb2/thumb2-orn2.ll
index eff3ae38a056..c8347df2d8a5 100644
--- a/test/CodeGen/Thumb2/thumb2-orn2.ll
+++ b/test/CodeGen/Thumb2/thumb2-orn2.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 0x000000bb = 187
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-orr.ll b/test/CodeGen/Thumb2/thumb2-orr.ll
index 13ed8620059b..f9628668a6b0 100644
--- a/test/CodeGen/Thumb2/thumb2-orr.ll
+++ b/test/CodeGen/Thumb2/thumb2-orr.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-orr2.ll b/test/CodeGen/Thumb2/thumb2-orr2.ll
index 837bb1cb07c1..045cc1dfea1a 100644
--- a/test/CodeGen/Thumb2/thumb2-orr2.ll
+++ b/test/CodeGen/Thumb2/thumb2-orr2.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 0x000000bb = 187
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-pack.ll b/test/CodeGen/Thumb2/thumb2-pack.ll
index 1052dd2a072e..4825628f3014 100644
--- a/test/CodeGen/Thumb2/thumb2-pack.ll
+++ b/test/CodeGen/Thumb2/thumb2-pack.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk %s -o - | FileCheck %s
; CHECK: test1
; CHECK: pkhbt r0, r0, r1, lsl #16
diff --git a/test/CodeGen/Thumb2/thumb2-rev.ll b/test/CodeGen/Thumb2/thumb2-rev.ll
index 67cd62362fe9..873a2d4cf7de 100644
--- a/test/CodeGen/Thumb2/thumb2-rev.ll
+++ b/test/CodeGen/Thumb2/thumb2-rev.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+v7,+t2xtpk | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+v7,+t2xtpk %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-rev16.ll b/test/CodeGen/Thumb2/thumb2-rev16.ll
index 10cd5391a48d..3e2658741b6f 100644
--- a/test/CodeGen/Thumb2/thumb2-rev16.ll
+++ b/test/CodeGen/Thumb2/thumb2-rev16.ll
@@ -1,7 +1,7 @@
; XFAIL: *
; fixme rev16 pattern is not matching
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep "rev16\W*r[0-9]*,\W*r[0-9]*" | count 1
+; RUN: llc < %s -march=thumb -mcpu=arm1156t2-s -mattr=+thumb2 | grep "rev16\W*r[0-9]*,\W*r[0-9]*" | count 1
; 0xff00ff00 = 4278255360
; 0x00ff00ff = 16711935
diff --git a/test/CodeGen/Thumb2/thumb2-ror.ll b/test/CodeGen/Thumb2/thumb2-ror.ll
index 2a218eae9752..71b00153c298 100644
--- a/test/CodeGen/Thumb2/thumb2-ror.ll
+++ b/test/CodeGen/Thumb2/thumb2-ror.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; RUN: llc < %s -march=thumb | FileCheck %s -check-prefix=THUMB1
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s -check-prefix=THUMB1
; CHECK-LABEL: f1:
; CHECK: ror.w r0, r0, #22
diff --git a/test/CodeGen/Thumb2/thumb2-rsb.ll b/test/CodeGen/Thumb2/thumb2-rsb.ll
index 150a25f51b54..1c5acadcf40e 100644
--- a/test/CodeGen/Thumb2/thumb2-rsb.ll
+++ b/test/CodeGen/Thumb2/thumb2-rsb.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
%tmp = shl i32 %b, 5
diff --git a/test/CodeGen/Thumb2/thumb2-rsb2.ll b/test/CodeGen/Thumb2/thumb2-rsb2.ll
index 15aa8af3b83b..838e55e28eb2 100644
--- a/test/CodeGen/Thumb2/thumb2-rsb2.ll
+++ b/test/CodeGen/Thumb2/thumb2-rsb2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 171 = 0x000000ab
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-sbc.ll b/test/CodeGen/Thumb2/thumb2-sbc.ll
index 0c37984ba3e7..b04dae61cef7 100644
--- a/test/CodeGen/Thumb2/thumb2-sbc.ll
+++ b/test/CodeGen/Thumb2/thumb2-sbc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=thumb -mattr=+thumb2 < %s | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i64 @f1(i64 %a, i64 %b) {
; CHECK: f1
diff --git a/test/CodeGen/Thumb2/thumb2-select.ll b/test/CodeGen/Thumb2/thumb2-select.ll
index 5f5fa1992516..105c2672ee1b 100644
--- a/test/CodeGen/Thumb2/thumb2-select.ll
+++ b/test/CodeGen/Thumb2/thumb2-select.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -show-mc-encoding | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 -show-mc-encoding %s -o - \
+; RUN: | FileCheck %s
define i32 @f1(i32 %a.s) {
entry:
diff --git a/test/CodeGen/Thumb2/thumb2-select_xform.ll b/test/CodeGen/Thumb2/thumb2-select_xform.ll
index ed4d26d746cb..20f0e5ef4a40 100644
--- a/test/CodeGen/Thumb2/thumb2-select_xform.ll
+++ b/test/CodeGen/Thumb2/thumb2-select_xform.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK: t1
diff --git a/test/CodeGen/Thumb2/thumb2-shifter.ll b/test/CodeGen/Thumb2/thumb2-shifter.ll
index 05dd90cfbfed..538fc2214d2c 100644
--- a/test/CodeGen/Thumb2/thumb2-shifter.ll
+++ b/test/CodeGen/Thumb2/thumb2-shifter.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=thumb -mcpu=cortex-a8 | FileCheck %s --check-prefix=A8
-; RUN: llc < %s -march=thumb -mcpu=swift | FileCheck %s --check-prefix=SWIFT
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s --check-prefix=A8
+; RUN: llc -mtriple=thumb-eabi -mcpu=swift %s -o - | FileCheck %s --check-prefix=SWIFT
; rdar://12892707
diff --git a/test/CodeGen/Thumb2/thumb2-smla.ll b/test/CodeGen/Thumb2/thumb2-smla.ll
index aaaedfa42e74..8573d39f09f6 100644
--- a/test/CodeGen/Thumb2/thumb2-smla.ll
+++ b/test/CodeGen/Thumb2/thumb2-smla.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk,+t2dsp | FileCheck %s
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk,+t2dsp -arm-use-mulops=false | FileCheck %s -check-prefix=NO_MULOPS
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk,+t2dsp %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk,+t2dsp -arm-use-mulops=false %s -o - | FileCheck %s -check-prefix=NO_MULOPS
define i32 @f3(i32 %a, i16 %x, i32 %y) {
; CHECK: f3
diff --git a/test/CodeGen/Thumb2/thumb2-smul.ll b/test/CodeGen/Thumb2/thumb2-smul.ll
index 7a13269615d4..67783d284e9c 100644
--- a/test/CodeGen/Thumb2/thumb2-smul.ll
+++ b/test/CodeGen/Thumb2/thumb2-smul.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk,+t2dsp | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk,+t2dsp %s -o - | FileCheck %s
@x = weak global i16 0 ; <i16*> [#uses=1]
@y = weak global i16 0 ; <i16*> [#uses=0]
diff --git a/test/CodeGen/Thumb2/thumb2-spill-q.ll b/test/CodeGen/Thumb2/thumb2-spill-q.ll
index 52c106344910..94f472593b3c 100644
--- a/test/CodeGen/Thumb2/thumb2-spill-q.ll
+++ b/test/CodeGen/Thumb2/thumb2-spill-q.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-elf -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-elf -mattr=+neon -arm-atomic-cfg-tidy=0 | FileCheck %s
; PR4789
%bar = type { float, float, float }
diff --git a/test/CodeGen/Thumb2/thumb2-str.ll b/test/CodeGen/Thumb2/thumb2-str.ll
index fb5fa168e8b8..4008145b0732 100644
--- a/test/CodeGen/Thumb2/thumb2-str.ll
+++ b/test/CodeGen/Thumb2/thumb2-str.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32* %v) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-str_post.ll b/test/CodeGen/Thumb2/thumb2-str_post.ll
index 2133d2807006..aed849e50f74 100644
--- a/test/CodeGen/Thumb2/thumb2-str_post.ll
+++ b/test/CodeGen/Thumb2/thumb2-str_post.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i16 @test1(i32* %X, i16* %A) {
; CHECK-LABEL: test1:
diff --git a/test/CodeGen/Thumb2/thumb2-str_pre.ll b/test/CodeGen/Thumb2/thumb2-str_pre.ll
index 1e6616a91cc5..e957400fe28f 100644
--- a/test/CodeGen/Thumb2/thumb2-str_pre.ll
+++ b/test/CodeGen/Thumb2/thumb2-str_pre.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define void @test1(i32* %X, i32* %A, i32** %dest) {
; CHECK: test1
diff --git a/test/CodeGen/Thumb2/thumb2-strb.ll b/test/CodeGen/Thumb2/thumb2-strb.ll
index cc39b7d585c5..a2558eccc2b1 100644
--- a/test/CodeGen/Thumb2/thumb2-strb.ll
+++ b/test/CodeGen/Thumb2/thumb2-strb.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i8 @f1(i8 %a, i8* %v) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-strh.ll b/test/CodeGen/Thumb2/thumb2-strh.ll
index d68693830518..cbe73d5cf057 100644
--- a/test/CodeGen/Thumb2/thumb2-strh.ll
+++ b/test/CodeGen/Thumb2/thumb2-strh.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i16 @f1(i16 %a, i16* %v) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-sub.ll b/test/CodeGen/Thumb2/thumb2-sub.ll
index f83dfe2e00a4..1c69aebccb60 100644
--- a/test/CodeGen/Thumb2/thumb2-sub.ll
+++ b/test/CodeGen/Thumb2/thumb2-sub.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 171 = 0x000000ab
define i32 @f1(i32 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-sub2.ll b/test/CodeGen/Thumb2/thumb2-sub2.ll
index 47eb1e1a36cf..8afc4cbf5d31 100644
--- a/test/CodeGen/Thumb2/thumb2-sub2.ll
+++ b/test/CodeGen/Thumb2/thumb2-sub2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a) {
%tmp = sub i32 %a, 4095
diff --git a/test/CodeGen/Thumb2/thumb2-sub3.ll b/test/CodeGen/Thumb2/thumb2-sub3.ll
index 1dbda57f2369..a3702f442c42 100644
--- a/test/CodeGen/Thumb2/thumb2-sub3.ll
+++ b/test/CodeGen/Thumb2/thumb2-sub3.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=thumb -mattr=+thumb2 < %s | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; 171 = 0x000000ab
define i64 @f1(i64 %a) {
diff --git a/test/CodeGen/Thumb2/thumb2-sub4.ll b/test/CodeGen/Thumb2/thumb2-sub4.ll
index ff1441ac64dd..0ff75670da03 100644
--- a/test/CodeGen/Thumb2/thumb2-sub4.ll
+++ b/test/CodeGen/Thumb2/thumb2-sub4.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-sub5.ll b/test/CodeGen/Thumb2/thumb2-sub5.ll
index 5941dd6ec89f..e12d3e1c7fdd 100644
--- a/test/CodeGen/Thumb2/thumb2-sub5.ll
+++ b/test/CodeGen/Thumb2/thumb2-sub5.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -mattr=+32bit | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+32bit %s -o - \
+; RUN: | FileCheck %s
define i64 @f1(i64 %a, i64 %b) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll b/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
index 792ebef5f9bc..47b94c5a4a83 100644
--- a/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
+++ b/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mcpu=cortex-m3 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s
define i32 @test1(i16 zeroext %z) nounwind {
; CHECK-LABEL: test1:
diff --git a/test/CodeGen/Thumb2/thumb2-sxt_rot.ll b/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
index f3d0edf0c578..cef3490e2a38 100644
--- a/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
+++ b/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk %s -o - \
+; RUN: | FileCheck %s
define i32 @test0(i8 %A) {
; CHECK: test0
diff --git a/test/CodeGen/Thumb2/thumb2-teq.ll b/test/CodeGen/Thumb2/thumb2-teq.ll
index 5acda35b4948..258b7e48af85 100644
--- a/test/CodeGen/Thumb2/thumb2-teq.ll
+++ b/test/CodeGen/Thumb2/thumb2-teq.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; These tests would be improved by 'movs r0, #0' being rematerialized below the
; test as 'mov.w r0, #0'.
diff --git a/test/CodeGen/Thumb2/thumb2-teq2.ll b/test/CodeGen/Thumb2/thumb2-teq2.ll
index 27ecad839399..3b4970b87844 100644
--- a/test/CodeGen/Thumb2/thumb2-teq2.ll
+++ b/test/CodeGen/Thumb2/thumb2-teq2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; These tests would be improved by 'movs r0, #0' being rematerialized below the
; tst as 'mov.w r0, #0'.
diff --git a/test/CodeGen/Thumb2/thumb2-tst.ll b/test/CodeGen/Thumb2/thumb2-tst.ll
index 31eafea614de..8cf6f144a1f6 100644
--- a/test/CodeGen/Thumb2/thumb2-tst.ll
+++ b/test/CodeGen/Thumb2/thumb2-tst.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; These tests would be improved by 'movs r0, #0' being rematerialized below the
; tst as 'mov.w r0, #0'.
diff --git a/test/CodeGen/Thumb2/thumb2-tst2.ll b/test/CodeGen/Thumb2/thumb2-tst2.ll
index f71e91d1e9de..178a2a5f3298 100644
--- a/test/CodeGen/Thumb2/thumb2-tst2.ll
+++ b/test/CodeGen/Thumb2/thumb2-tst2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
; These tests would be improved by 'movs r0, #0' being rematerialized below the
; tst as 'mov.w r0, #0'.
diff --git a/test/CodeGen/Thumb2/thumb2-uxt_rot.ll b/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
index 61e849ef4a43..bcd4a0fa38ff 100644
--- a/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
+++ b/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=thumb -mcpu=cortex-a8 | FileCheck %s --check-prefix=A8
-; RUN: llc < %s -march=thumb -mcpu=cortex-m3 | FileCheck %s --check-prefix=M3
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s --check-prefix=A8
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s --check-prefix=M3
; rdar://11318438
define zeroext i8 @test1(i32 %A.u) {
diff --git a/test/CodeGen/Thumb2/thumb2-uxtb.ll b/test/CodeGen/Thumb2/thumb2-uxtb.ll
index 2074f98cb608..b8b1bc832d96 100644
--- a/test/CodeGen/Thumb2/thumb2-uxtb.ll
+++ b/test/CodeGen/Thumb2/thumb2-uxtb.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=thumb -mcpu=cortex-a8 | FileCheck %s -check-prefix=ARMv7A
-; RUN: llc < %s -march=thumb -mcpu=cortex-m3 | FileCheck %s -check-prefix=ARMv7M
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=ARMv7A
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s -check-prefix=ARMv7M
define i32 @test1(i32 %x) {
; ARMv7A: test1
diff --git a/test/CodeGen/Thumb2/tls1.ll b/test/CodeGen/Thumb2/tls1.ll
index d91e3b32f9b7..40973562d2b9 100644
--- a/test/CodeGen/Thumb2/tls1.ll
+++ b/test/CodeGen/Thumb2/tls1.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi | \
-; RUN: grep "i(tpoff)"
+; RUN: grep "i(TPOFF)"
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi | \
; RUN: grep "__aeabi_read_tp"
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi \
diff --git a/test/CodeGen/Thumb2/tls2.ll b/test/CodeGen/Thumb2/tls2.ll
index 6cb019ff00ec..e6bed2f65a49 100644
--- a/test/CodeGen/Thumb2/tls2.ll
+++ b/test/CodeGen/Thumb2/tls2.ll
@@ -8,7 +8,7 @@ entry:
; CHECK-NOT-PIC-LABEL: f:
; CHECK-NOT-PIC: add r0, pc
; CHECK-NOT-PIC: ldr r1, [r0]
-; CHECK-NOT-PIC: i(gottpoff)
+; CHECK-NOT-PIC: i(GOTTPOFF)
; CHECK-PIC-LABEL: f:
; CHECK-PIC: bl __tls_get_addr(PLT)
@@ -21,7 +21,7 @@ entry:
; CHECK-NOT-PIC-LABEL: g:
; CHECK-NOT-PIC: add r0, pc
; CHECK-NOT-PIC: ldr r1, [r0]
-; CHECK-NOT-PIC: i(gottpoff)
+; CHECK-NOT-PIC: i(GOTTPOFF)
; CHECK-PIC-LABEL: g:
; CHECK-PIC: bl __tls_get_addr(PLT)
diff --git a/test/CodeGen/Thumb2/tpsoft.ll b/test/CodeGen/Thumb2/tpsoft.ll
new file mode 100644
index 000000000000..6ab8bf01761b
--- /dev/null
+++ b/test/CodeGen/Thumb2/tpsoft.ll
@@ -0,0 +1,54 @@
+; RUN: llc %s -mtriple=thumbv7-linux-gnueabi -o - | \
+; RUN: FileCheck -check-prefix=ELFASM %s
+; RUN: llc %s -mtriple=thumbebv7-linux-gnueabi -o - | \
+; RUN: FileCheck -check-prefix=ELFASM %s
+; RUN: llc %s -mtriple=thumbv7-linux-gnueabi -filetype=obj -o - | \
+; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=ELFOBJ -check-prefix=ELFOBJ-LE %s
+; RUN: llc %s -mtriple=thumbebv7-linux-gnueabi -filetype=obj -o - | \
+; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=ELFOBJ -check-prefix=ELFOBJ-BE %s
+
+;; Make sure that bl __aeabi_read_tp is materialized and fixed up correctly
+;; in the obj case.
+
+@i = external thread_local global i32
+@a = external global i8
+@b = external global [10 x i8]
+
+define arm_aapcs_vfpcc i32 @main() nounwind {
+entry:
+ %0 = load i32* @i, align 4
+ switch i32 %0, label %bb2 [
+ i32 12, label %bb
+ i32 13, label %bb1
+ ]
+
+bb: ; preds = %entry
+ %1 = tail call arm_aapcs_vfpcc i32 @foo(i8* @a) nounwind
+ ret i32 %1
+; ELFASM: bl __aeabi_read_tp
+
+
+; ELFOBJ: Sections [
+; ELFOBJ: Section {
+; ELFOBJ: Name: .text
+; ELFOBJ-LE: SectionData (
+;;; BL __aeabi_read_tp is ---------+
+;;; V
+; ELFOBJ-LE-NEXT: 0000: 2DE90048 0E487844 0168FFF7 FEFF4058
+; ELFOBJ-BE: SectionData (
+;;; BL __aeabi_read_tp is ---------+
+;;; V
+; ELFOBJ-BE-NEXT: 0000: E92D4800 480E4478 6801F7FF FFFE5840
+
+
+bb1: ; preds = %entry
+ %2 = tail call arm_aapcs_vfpcc i32 @bar(i32* bitcast ([10 x i8]* @b to i32*)) nounwind
+ ret i32 %2
+
+bb2: ; preds = %entry
+ ret i32 -1
+}
+
+declare arm_aapcs_vfpcc i32 @foo(i8*)
+
+declare arm_aapcs_vfpcc i32 @bar(i32*)
diff --git a/test/CodeGen/Thumb2/v8_IT_3.ll b/test/CodeGen/Thumb2/v8_IT_3.ll
index 4dca24629b01..a028deebc8e8 100644
--- a/test/CodeGen/Thumb2/v8_IT_3.ll
+++ b/test/CodeGen/Thumb2/v8_IT_3.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -mtriple=thumbv8 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8 -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
-; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
+; RUN: llc < %s -mtriple=thumbv8 -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7 -arm-atomic-cfg-tidy=0 -arm-restrict-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8 -arm-atomic-cfg-tidy=0 -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
+; RUN: llc < %s -mtriple=thumbv7 -arm-atomic-cfg-tidy=0 -arm-restrict-it -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
%struct.FF = type { i32 (i32*)*, i32 (i32*, i32*, i32, i32, i32, i32)*, i32 (i32, i32, i8*)*, void ()*, i32 (i32, i8*, i32*)*, i32 ()* }
%struct.BD = type { %struct.BD*, i32, i32, i32, i32, i64, i32 (%struct.BD*, i8*, i64, i32)*, i32 (%struct.BD*, i8*, i32, i32)*, i32 (%struct.BD*, i8*, i64, i32)*, i32 (%struct.BD*, i8*, i32, i32)*, i32 (%struct.BD*, i64, i32)*, [16 x i8], i64, i64 }
diff --git a/test/CodeGen/Thumb2/v8_IT_5.ll b/test/CodeGen/Thumb2/v8_IT_5.ll
index 30250c8d02f0..2da75ad21436 100644
--- a/test/CodeGen/Thumb2/v8_IT_5.ll
+++ b/test/CodeGen/Thumb2/v8_IT_5.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -mtriple=thumbv8 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8 -arm-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7 -arm-atomic-cfg-tidy=0 -arm-restrict-it | FileCheck %s
; CHECK: it ne
; CHECK-NEXT: cmpne
-; CHECK-NEXT: beq
+; CHECK-NEXT: bne [[JUMPTARGET:.LBB[0-9]+_[0-9]+]]
; CHECK: cmp
; CHECK-NEXT: beq
; CHECK-NEXT: %if.else163
@@ -10,6 +10,7 @@
; CHECK-NEXT: b
; CHECK-NEXT: %if.else145
; CHECK-NEXT: mov.w
+; CHECK: [[JUMPTARGET]]:{{.*}}%if.else173
%struct.hc = type { i32, i32, i32, i32 }
diff --git a/test/CodeGen/Thumb2/v8_IT_6.ll b/test/CodeGen/Thumb2/v8_IT_6.ll
new file mode 100644
index 000000000000..b12c4797d244
--- /dev/null
+++ b/test/CodeGen/Thumb2/v8_IT_6.ll
@@ -0,0 +1,100 @@
+; RUN: llc < %s -mtriple=thumbv8 -show-mc-encoding | FileCheck %s
+; CHECK-NOT: orrsne r0, r1 @ encoding: [0x08,0x43]
+; Narrow tORR cannot be predicated and set CPSR at the same time!
+
+declare void @f(i32)
+
+define void @initCloneLookups() #1 {
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc24, %entry
+ %cmp108 = phi i1 [ true, %entry ], [ %cmp, %for.inc24 ]
+ %y.0105 = phi i32 [ 1, %entry ], [ %inc25, %for.inc24 ]
+ %notlhs = icmp slt i32 %y.0105, 6
+ %notlhs69 = icmp sgt i32 %y.0105, 4
+ %sub = add nsw i32 %y.0105, -1
+ %cmp1.i = icmp sgt i32 %sub, 5
+ %cmp1.i54 = icmp sgt i32 %y.0105, 5
+ br i1 %cmp108, label %if.then.us, label %for.cond1.preheader.for.cond1.preheader.split_crit_edge
+
+for.cond1.preheader.for.cond1.preheader.split_crit_edge: ; preds = %for.cond1.preheader
+ br i1 %notlhs, label %for.inc.us101, label %for.inc
+
+if.then.us: ; preds = %for.cond1.preheader, %for.inc.us
+ %x.071.us = phi i32 [ %inc.us.pre-phi, %for.inc.us ], [ 1, %for.cond1.preheader ]
+ %notrhs.us = icmp sge i32 %x.071.us, %y.0105
+ %or.cond44.not.us = or i1 %notrhs.us, %notlhs
+ %notrhs70.us = icmp sle i32 %x.071.us, %y.0105
+ %tobool.us = or i1 %notrhs70.us, %notlhs69
+ %or.cond66.us = and i1 %or.cond44.not.us, %tobool.us
+ br i1 %or.cond66.us, label %getHexxagonIndex.exit52.us, label %if.then.us.for.inc.us_crit_edge
+
+if.then.us.for.inc.us_crit_edge: ; preds = %if.then.us
+ %inc.us.pre = add nsw i32 %x.071.us, 1
+ br label %for.inc.us
+
+getHexxagonIndex.exit52.us: ; preds = %if.then.us
+ %cmp3.i.us = icmp slt i32 %x.071.us, 5
+ %or.cond.i.us = and i1 %cmp1.i, %cmp3.i.us
+ %..i.us = sext i1 %or.cond.i.us to i32
+ tail call void @f(i32 %..i.us) #3
+ %add.us = add nsw i32 %x.071.us, 1
+ %cmp3.i55.us = icmp slt i32 %add.us, 5
+ %or.cond.i56.us = and i1 %cmp1.i54, %cmp3.i55.us
+ %..i57.us = sext i1 %or.cond.i56.us to i32
+ tail call void @f(i32 %..i57.us) #3
+ %or.cond.i48.us = and i1 %notlhs69, %cmp3.i55.us
+ %..i49.us = sext i1 %or.cond.i48.us to i32
+ tail call void @f(i32 %..i49.us) #3
+ br label %for.inc.us
+
+for.inc.us: ; preds = %if.then.us.for.inc.us_crit_edge, %getHexxagonIndex.exit52.us
+ %inc.us.pre-phi = phi i32 [ %inc.us.pre, %if.then.us.for.inc.us_crit_edge ], [ %add.us, %getHexxagonIndex.exit52.us ]
+ %exitcond109 = icmp eq i32 %inc.us.pre-phi, 10
+ br i1 %exitcond109, label %for.inc24, label %if.then.us
+
+for.inc.us101: ; preds = %for.cond1.preheader.for.cond1.preheader.split_crit_edge, %for.inc.us101
+ %x.071.us74 = phi i32 [ %add.us89, %for.inc.us101 ], [ 1, %for.cond1.preheader.for.cond1.preheader.split_crit_edge ]
+ %cmp3.i.us84 = icmp slt i32 %x.071.us74, 5
+ %or.cond.i.us85 = and i1 %cmp1.i, %cmp3.i.us84
+ %..i.us86 = sext i1 %or.cond.i.us85 to i32
+ tail call void @f(i32 %..i.us86) #3
+ %add.us89 = add nsw i32 %x.071.us74, 1
+ %cmp3.i55.us93 = icmp slt i32 %add.us89, 5
+ %or.cond.i56.us94 = and i1 %cmp1.i54, %cmp3.i55.us93
+ %..i57.us95 = sext i1 %or.cond.i56.us94 to i32
+ tail call void @f(i32 %..i57.us95) #3
+ %or.cond.i48.us97 = and i1 %notlhs69, %cmp3.i55.us93
+ %..i49.us98 = sext i1 %or.cond.i48.us97 to i32
+ tail call void @f(i32 %..i49.us98) #3
+ %exitcond110 = icmp eq i32 %add.us89, 10
+ br i1 %exitcond110, label %for.inc24, label %for.inc.us101
+
+for.inc: ; preds = %for.cond1.preheader.for.cond1.preheader.split_crit_edge, %for.inc
+ %x.071 = phi i32 [ %add, %for.inc ], [ 1, %for.cond1.preheader.for.cond1.preheader.split_crit_edge ]
+ %cmp3.i = icmp slt i32 %x.071, 5
+ %or.cond.i = and i1 %cmp1.i, %cmp3.i
+ %..i = sext i1 %or.cond.i to i32
+ tail call void @f(i32 %..i) #3
+ %add = add nsw i32 %x.071, 1
+ %cmp3.i55 = icmp slt i32 %add, 5
+ %or.cond.i56 = and i1 %cmp1.i54, %cmp3.i55
+ %..i57 = sext i1 %or.cond.i56 to i32
+ tail call void @f(i32 %..i57) #3
+ %or.cond.i48 = and i1 %notlhs69, %cmp3.i55
+ %..i49 = sext i1 %or.cond.i48 to i32
+ tail call void @f(i32 %..i49) #3
+ %exitcond = icmp eq i32 %add, 10
+ br i1 %exitcond, label %for.inc24, label %for.inc
+
+for.inc24: ; preds = %for.inc, %for.inc.us101, %for.inc.us
+ %inc25 = add nsw i32 %y.0105, 1
+ %cmp = icmp slt i32 %inc25, 10
+ %exitcond111 = icmp eq i32 %inc25, 10
+ br i1 %exitcond111, label %for.end26, label %for.cond1.preheader
+
+for.end26: ; preds = %for.inc24
+ ret void
+}
+
diff --git a/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll b/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
index d906da43fe11..1b3fc382e890 100644
--- a/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
+++ b/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | \
+; RUN: llc < %s -march=x86 -mcpu=generic | \
; RUN: grep shld | count 1
;
; Check that the isel does not fold the shld, which already folds a load
diff --git a/test/CodeGen/X86/2006-07-20-InlineAsm.ll b/test/CodeGen/X86/2006-07-20-InlineAsm.ll
index cac47cdab6de..1facf15b9f40 100644
--- a/test/CodeGen/X86/2006-07-20-InlineAsm.ll
+++ b/test/CodeGen/X86/2006-07-20-InlineAsm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86
+; RUN: llc < %s -march=x86 -no-integrated-as
; PR833
@G = weak global i32 0 ; <i32*> [#uses=3]
diff --git a/test/CodeGen/X86/2006-07-31-SingleRegClass.ll b/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
index c4b08a3be283..2a9c8324d36a 100644
--- a/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
+++ b/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
@@ -1,5 +1,5 @@
; PR850
-; RUN: llc < %s -march=x86 -x86-asm-syntax=att | FileCheck %s
+; RUN: llc < %s -march=x86 -x86-asm-syntax=att -no-integrated-as | FileCheck %s
; CHECK: {{movl 4[(]%eax[)],%ebp}}
; CHECK: {{movl 0[(]%eax[)], %ebx}}
diff --git a/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll b/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
index e1f890192d12..4d7c3a185a8b 100644
--- a/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
+++ b/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
@@ -1,8 +1,14 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-darwin | \
-; RUN: grep push | count 3
+; RUN: llc < %s -march=x86 -mtriple=i686-darwin | FileCheck %s
+; RUN: llc < %s -march=x86 -mtriple=i686-darwin -addr-sink-using-gep=1 | FileCheck %s
define void @foo(i8** %buf, i32 %size, i32 %col, i8* %p) nounwind {
entry:
+; CHECK-LABEL: @foo
+; CHECK: push
+; CHECK: push
+; CHECK: push
+; CHECK-NOT: push
+
icmp sgt i32 %size, 0 ; <i1>:0 [#uses=1]
br i1 %0, label %bb.preheader, label %return
diff --git a/test/CodeGen/X86/2007-03-24-InlineAsmPModifier.ll b/test/CodeGen/X86/2007-03-24-InlineAsmPModifier.ll
index 3b2e443d7d4e..93fb344cbb1d 100644
--- a/test/CodeGen/X86/2007-03-24-InlineAsmPModifier.ll
+++ b/test/CodeGen/X86/2007-03-24-InlineAsmPModifier.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | grep "mov %gs:72, %eax"
+; RUN: llc < %s -march=x86 -no-integrated-as | grep "mov %gs:72, %eax"
target datalayout = "e-p:32:32"
target triple = "i686-apple-darwin9"
diff --git a/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll b/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll
index 366f5830392d..6cf8bf90611c 100644
--- a/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll
+++ b/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=yonah -march=x86 | FileCheck %s
+; RUN: llc < %s -mcpu=yonah -march=x86 -no-integrated-as | FileCheck %s
target datalayout = "e-p:32:32"
target triple = "i686-apple-darwin9"
diff --git a/test/CodeGen/X86/2007-05-05-Personality.ll b/test/CodeGen/X86/2007-05-05-Personality.ll
index 7d21b71ac373..b99c58c6e4af 100644
--- a/test/CodeGen/X86/2007-05-05-Personality.ll
+++ b/test/CodeGen/X86/2007-05-05-Personality.ll
@@ -1,7 +1,14 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -o - | FileCheck %s
-
-; CHECK: .cfi_personality 0, __gnat_eh_personality
-; CHECK: .cfi_lsda 0, .Lexception0
+; RUN: llc < %s -mtriple=i686-pc-linux-gnu -o - | FileCheck %s --check-prefix=LIN
+; RUN: llc < %s -mtriple=i386-pc-mingw32 -o - | FileCheck %s --check-prefix=WIN
+; RUN: llc < %s -mtriple=i686-pc-windows-gnu -o - | FileCheck %s --check-prefix=WIN
+; RUN: llc < %s -mtriple=x86_64-pc-windows-gnu -o - | FileCheck %s --check-prefix=WIN64
+
+; LIN: .cfi_personality 0, __gnat_eh_personality
+; LIN: .cfi_lsda 0, .Lexception0
+; WIN: .cfi_personality 0, ___gnat_eh_personality
+; WIN: .cfi_lsda 0, Lexception0
+; WIN64: .seh_handler __gnat_eh_personality
+; WIN64: .seh_handlerdata
@error = external global i8
@@ -10,7 +17,7 @@ entry:
invoke void @raise()
to label %eh_then unwind label %unwind
-unwind: ; preds = %entry
+unwind: ; preds = %entry
%eh_ptr = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gnat_eh_personality to i8*)
catch i8* @error
%eh_select = extractvalue { i8*, i32 } %eh_ptr, 1
diff --git a/test/CodeGen/X86/2007-09-17-ObjcFrameEH.ll b/test/CodeGen/X86/2007-09-17-ObjcFrameEH.ll
deleted file mode 100644
index 15466a18bd62..000000000000
--- a/test/CodeGen/X86/2007-09-17-ObjcFrameEH.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -disable-cfi -march=x86 -mtriple=i686-apple-darwin | FileCheck %s
-
-; CHECK: "_-[NSString(local) isNullOrNil].eh":
-
- %struct.NSString = type { }
- %struct._objc__method_prototype_list = type opaque
- %struct._objc_category = type { i8*, i8*, %struct._objc_method_list*, %struct._objc_method_list*, %struct._objc_protocol**, i32, %struct._prop_list_t* }
- %struct._objc_method = type { %struct.objc_selector*, i8*, i8* }
- %struct._objc_method_list = type opaque
- %struct._objc_module = type { i32, i32, i8*, %struct._objc_symtab* }
- %struct._objc_protocol = type { %struct._objc_protocol_extension*, i8*, %struct._objc_protocol**, %struct._objc__method_prototype_list*, %struct._objc__method_prototype_list* }
- %struct._objc_protocol_extension = type opaque
- %struct._objc_symtab = type { i32, %struct.objc_selector**, i16, i16, [1 x i8*] }
- %struct._prop_list_t = type opaque
- %struct.anon = type { %struct._objc__method_prototype_list*, i32, [1 x %struct._objc_method] }
- %struct.objc_selector = type opaque
-@"\01L_OBJC_SYMBOLS" = internal global { i32, i32, i16, i16, [1 x %struct._objc_category*] } {
- i32 0,
- i32 0,
- i16 0,
- i16 1,
- [1 x %struct._objc_category*] [ %struct._objc_category* bitcast ({ i8*, i8*, %struct._objc_method_list*, i32, i32, i32, i32 }* @"\01L_OBJC_CATEGORY_NSString_local" to %struct._objc_category*) ] }, section "__OBJC,__symbols,regular,no_dead_strip" ; <{ i32, i32, i16, i16, [1 x %struct._objc_category*] }*> [#uses=2]
-@"\01L_OBJC_CATEGORY_INSTANCE_METHODS_NSString_local" = internal global { i32, i32, [1 x %struct._objc_method] } {
- i32 0,
- i32 1,
- [1 x %struct._objc_method] [ %struct._objc_method {
- %struct.objc_selector* bitcast ([12 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*),
- i8* getelementptr ([7 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0),
- i8* bitcast (i8 (%struct.NSString*, %struct.objc_selector*) * @"-[NSString(local) isNullOrNil]" to i8*) } ] }, section "__OBJC,__cat_inst_meth,regular,no_dead_strip" ; <{ i32, i32, [1 x %struct._objc_method] }*> [#uses=3]
-@"\01L_OBJC_CATEGORY_NSString_local" = internal global { i8*, i8*, %struct._objc_method_list*, i32, i32, i32, i32 } {
- i8* getelementptr ([6 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0),
- i8* getelementptr ([9 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0),
- %struct._objc_method_list* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_CATEGORY_INSTANCE_METHODS_NSString_local" to %struct._objc_method_list*),
- i32 0,
- i32 0,
- i32 28,
- i32 0 }, section "__OBJC,__category,regular,no_dead_strip" ; <{ i8*, i8*, %struct._objc_method_list*, i32, i32, i32, i32 }*> [#uses=2]
-@"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] zeroinitializer, section "__OBJC,__image_info,regular" ; <[2 x i32]*> [#uses=1]
-@"\01L_OBJC_MODULES" = internal global %struct._objc_module {
- i32 7,
- i32 16,
- i8* getelementptr ([1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0),
- %struct._objc_symtab* bitcast ({ i32, i32, i16, i16, [1 x %struct._objc_category*] }* @"\01L_OBJC_SYMBOLS" to %struct._objc_symtab*) }, section "__OBJC,__module_info,regular,no_dead_strip" ; <%struct._objc_module*> [#uses=1]
-@"\01.objc_class_ref_NSString" = internal global i8* @"\01.objc_class_name_NSString" ; <i8**> [#uses=0]
-@"\01.objc_class_name_NSString" = external global i8 ; <i8*> [#uses=1]
-@"\01.objc_category_name_NSString_local" = constant i32 0 ; <i32*> [#uses=1]
-@"\01L_OBJC_CLASS_NAME_2" = internal global [1 x i8] zeroinitializer, section "__TEXT,__cstring,cstring_literals" ; <[1 x i8]*> [#uses=2]
-@"\01L_OBJC_CLASS_NAME_1" = internal global [9 x i8] c"NSString\00", section "__TEXT,__cstring,cstring_literals" ; <[9 x i8]*> [#uses=2]
-@"\01L_OBJC_CLASS_NAME_0" = internal global [6 x i8] c"local\00", section "__TEXT,__cstring,cstring_literals" ; <[6 x i8]*> [#uses=2]
-@"\01L_OBJC_METH_VAR_NAME_0" = internal global [12 x i8] c"isNullOrNil\00", section "__TEXT,__cstring,cstring_literals" ; <[12 x i8]*> [#uses=3]
-@"\01L_OBJC_METH_VAR_TYPE_0" = internal global [7 x i8] c"c8@0:4\00", section "__TEXT,__cstring,cstring_literals" ; <[7 x i8]*> [#uses=2]
-@llvm.used = appending global [11 x i8*] [ i8* bitcast ({ i32, i32, i16, i16, [1 x %struct._objc_category*] }* @"\01L_OBJC_SYMBOLS" to i8*), i8* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_CATEGORY_INSTANCE_METHODS_NSString_local" to i8*), i8* bitcast ({ i8*, i8*, %struct._objc_method_list*, i32, i32, i32, i32 }* @"\01L_OBJC_CATEGORY_NSString_local" to i8*), i8* bitcast ([2 x i32]* @"\01L_OBJC_IMAGE_INFO" to i8*), i8* bitcast (%struct._objc_module* @"\01L_OBJC_MODULES" to i8*), i8* bitcast (i32* @"\01.objc_category_name_NSString_local" to i8*), i8* getelementptr ([1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), i8* getelementptr ([9 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0), i8* getelementptr ([6 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i8* getelementptr ([12 x i8]* @"\01L_OBJC_METH_VAR_NAME_0", i32 0, i32 0), i8* getelementptr ([7 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0) ], section "llvm.metadata" ; <[11 x i8*]*> [#uses=0]
-
-define internal signext i8 @"-[NSString(local) isNullOrNil]"(%struct.NSString* %self, %struct.objc_selector* %_cmd) {
-entry:
- %self_addr = alloca %struct.NSString* ; <%struct.NSString**> [#uses=1]
- %_cmd_addr = alloca %struct.objc_selector* ; <%struct.objc_selector**> [#uses=1]
- %retval = alloca i8, align 1 ; <i8*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %struct.NSString* %self, %struct.NSString** %self_addr
- store %struct.objc_selector* %_cmd, %struct.objc_selector** %_cmd_addr
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i8* %retval ; <i8> [#uses=1]
- ret i8 %retval1
-}
diff --git a/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll b/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll
deleted file mode 100644
index 0ae1897e60e9..000000000000
--- a/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep -- -86
-
-define i16 @f(<4 x float>* %tmp116117.i1061.i) nounwind {
-entry:
- alloca [4 x <4 x float>] ; <[4 x <4 x float>]*>:0 [#uses=167]
- alloca [4 x <4 x float>] ; <[4 x <4 x float>]*>:1 [#uses=170]
- alloca [4 x <4 x i32>] ; <[4 x <4 x i32>]*>:2 [#uses=12]
- %.sub6235.i = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0 ; <<4 x float>*> [#uses=76]
- %.sub.i = getelementptr [4 x <4 x float>]* %1, i32 0, i32 0 ; <<4 x float>*> [#uses=59]
-
- %tmp124.i1062.i = getelementptr <4 x float>* %tmp116117.i1061.i, i32 63 ; <<4 x float>*> [#uses=1]
- %tmp125.i1063.i = load <4 x float>* %tmp124.i1062.i ; <<4 x float>> [#uses=5]
- %tmp828.i1077.i = shufflevector <4 x float> %tmp125.i1063.i, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>> [#uses=4]
- %tmp704.i1085.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1]
- %tmp712.i1086.i = call <4 x float> @llvm.x86.sse.max.ps( <4 x float> %tmp704.i1085.i, <4 x float> %tmp828.i1077.i ) ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp712.i1086.i, <4 x float>* %.sub.i
-
- %tmp2587.i1145.gep.i = getelementptr [4 x <4 x float>]* %1, i32 0, i32 0, i32 2 ; <float*> [#uses=1]
- %tmp5334.i = load float* %tmp2587.i1145.gep.i ; <float> [#uses=5]
- %tmp2723.i1170.i = insertelement <4 x float> undef, float %tmp5334.i, i32 2 ; <<4 x float>> [#uses=5]
- store <4 x float> %tmp2723.i1170.i, <4 x float>* %.sub6235.i
-
- %tmp1406.i1367.i = shufflevector <4 x float> %tmp2723.i1170.i, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1]
- %tmp84.i1413.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1]
- %tmp89.i1415.i = fmul <4 x float> %tmp84.i1413.i, %tmp1406.i1367.i ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp89.i1415.i, <4 x float>* %.sub.i
- ret i16 0
-}
-
-declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
diff --git a/test/CodeGen/X86/2007-10-17-IllegalAsm.ll b/test/CodeGen/X86/2007-10-17-IllegalAsm.ll
deleted file mode 100644
index c0bb55ed14ef..000000000000
--- a/test/CodeGen/X86/2007-10-17-IllegalAsm.ll
+++ /dev/null
@@ -1,87 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | grep addb | not grep x
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | grep cmpb | not grep x
-; PR1734
-
-target triple = "x86_64-unknown-linux-gnu"
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.eh_status = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i8, i8, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, %struct.tree_node*, i8, i8, i8 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type opaque
- %struct.language_function = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.machine_function = type { %struct.stack_local_entry*, i8*, %struct.rtx_def*, i32, i32, i32, i32, i32 }
- %struct.rtunion = type { i8* }
- %struct.rtvec_def = type { i32, [1 x %struct.rtx_def*] }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.stack_local_entry = type opaque
- %struct.temp_slot = type opaque
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.u = type { [1 x %struct.rtunion] }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_data = type { [1 x i64] }
- %struct.varray_head_tag = type { i64, i64, i32, i8*, %struct.varray_data }
- %union.tree_ann_d = type opaque
-
-define void @layout_type(%struct.tree_node* %type) {
-entry:
- %tmp32 = load i32* null, align 8 ; <i32> [#uses=3]
- %tmp3435 = trunc i32 %tmp32 to i8 ; <i8> [#uses=1]
- %tmp53 = icmp eq %struct.tree_node* null, null ; <i1> [#uses=1]
- br i1 %tmp53, label %cond_next57, label %UnifiedReturnBlock
-
-cond_next57: ; preds = %entry
- %tmp65 = and i32 %tmp32, 255 ; <i32> [#uses=1]
- switch i32 %tmp65, label %UnifiedReturnBlock [
- i32 6, label %bb140
- i32 7, label %bb140
- i32 8, label %bb140
- i32 13, label %bb478
- ]
-
-bb140: ; preds = %cond_next57, %cond_next57, %cond_next57
- %tmp219 = load i32* null, align 8 ; <i32> [#uses=1]
- %tmp221222 = trunc i32 %tmp219 to i8 ; <i8> [#uses=1]
- %tmp223 = icmp eq i8 %tmp221222, 24 ; <i1> [#uses=1]
- br i1 %tmp223, label %cond_true226, label %cond_next340
-
-cond_true226: ; preds = %bb140
- switch i8 %tmp3435, label %cond_true288 [
- i8 6, label %cond_next340
- i8 9, label %cond_next340
- i8 7, label %cond_next340
- i8 8, label %cond_next340
- i8 10, label %cond_next340
- ]
-
-cond_true288: ; preds = %cond_true226
- unreachable
-
-cond_next340: ; preds = %cond_true226, %cond_true226, %cond_true226, %cond_true226, %cond_true226, %bb140
- ret void
-
-bb478: ; preds = %cond_next57
- br i1 false, label %cond_next500, label %cond_true497
-
-cond_true497: ; preds = %bb478
- unreachable
-
-cond_next500: ; preds = %bb478
- %tmp513 = load i32* null, align 8 ; <i32> [#uses=1]
- %tmp545 = and i32 %tmp513, 8192 ; <i32> [#uses=1]
- %tmp547 = and i32 %tmp32, -8193 ; <i32> [#uses=1]
- %tmp548 = or i32 %tmp547, %tmp545 ; <i32> [#uses=1]
- store i32 %tmp548, i32* null, align 8
- ret void
-
-UnifiedReturnBlock: ; preds = %cond_next57, %entry
- ret void
-}
diff --git a/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll b/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
index 984094d86a27..d02346d103c1 100644
--- a/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
+++ b/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s
+; RUN: llc -no-integrated-as < %s
; PR1748
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll b/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
index 6b871aa3a4d4..ec3bce9c666a 100644
--- a/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
+++ b/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu
+; RUN: llc -no-integrated-as < %s -mtriple=x86_64-unknown-linux-gnu
; PR1767
define void @xor_sse_2(i64 %bytes, i64* %p1, i64* %p2) {
diff --git a/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll b/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
index c4670242b531..d1699d557113 100644
--- a/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
+++ b/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=static | FileCheck %s
+; RUN: llc < %s -relocation-model=static -no-integrated-as | FileCheck %s
; PR1761
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-pc-linux"
diff --git a/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll b/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
index d2d5149de3aa..35857b7e01e6 100644
--- a/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
+++ b/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep xor | grep CPI
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
+; CHECK: xorpd {{.*}}{{LCPI0_0|__xmm@}}
define void @casin({ double, double }* sret %agg.result, double %z.0, double %z.1) nounwind {
entry:
%memtmp = alloca { double, double }, align 8 ; <{ double, double }*> [#uses=3]
diff --git a/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll b/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
index b06b249a6326..319e884139ae 100644
--- a/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
+++ b/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -no-integrated-as < %s | FileCheck %s
; PR2078
; The clobber list says that "ax" is clobbered. Make sure that eax isn't
; allocated to the input/output register.
diff --git a/test/CodeGen/X86/2008-02-26-AsmDirectMemOp.ll b/test/CodeGen/X86/2008-02-26-AsmDirectMemOp.ll
index 0b4eb3a3b9b2..11b55a6e5ac7 100644
--- a/test/CodeGen/X86/2008-02-26-AsmDirectMemOp.ll
+++ b/test/CodeGen/X86/2008-02-26-AsmDirectMemOp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86
+; RUN: llc < %s -march=x86 -no-integrated-as
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
diff --git a/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll b/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
index e673d315a435..a0106d7798d5 100644
--- a/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
+++ b/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=pic | grep TLSGD | count 2
+; RUN: llc < %s -relocation-model=pic | FileCheck %s
; PR2137
; ModuleID = '1.c'
@@ -8,9 +8,11 @@ target triple = "i386-pc-linux-gnu"
@__resp = thread_local global %struct.__res_state* @_res ; <%struct.__res_state**> [#uses=1]
@_res = global %struct.__res_state zeroinitializer, section ".bss" ; <%struct.__res_state*> [#uses=1]
-@__libc_resp = hidden alias %struct.__res_state** @__resp ; <%struct.__res_state**> [#uses=2]
+@__libc_resp = hidden thread_local alias %struct.__res_state** @__resp ; <%struct.__res_state**> [#uses=2]
define i32 @foo() {
+; CHECK-LABEL: foo:
+; CHECK: leal __libc_resp@TLSLD
entry:
%retval = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
@@ -24,6 +26,8 @@ return: ; preds = %entry
}
define i32 @bar() {
+; CHECK-LABEL: bar:
+; CHECK: leal __libc_resp@TLSLD
entry:
%retval = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
diff --git a/test/CodeGen/X86/2008-03-14-SpillerCrash.ll b/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
index 18b3714f851f..6b374a7f6f08 100644
--- a/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
+++ b/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
@@ -6,7 +6,7 @@
%struct.locale_data = type { i8*, i8*, i32, i32, { void (%struct.locale_data*)*, %struct.anon }, i32, i32, i32, [0 x %struct.locale_data_value] }
%struct.locale_data_value = type { i32* }
-@wcstoll_l = alias i64 (i32*, i32**, i32, %struct.__locale_struct*)* @__wcstoll_l ; <i64 (i32*, i32**, i32, %struct.__locale_struct*)*> [#uses=0]
+@wcstoll_l = alias i64 (i32*, i32**, i32, %struct.__locale_struct*)* @__wcstoll_l
define i64 @____wcstoll_l_internal(i32* %nptr, i32** %endptr, i32 %base, i32 %group, %struct.__locale_struct* %loc) nounwind {
entry:
diff --git a/test/CodeGen/X86/2008-04-02-unnamedEH.ll b/test/CodeGen/X86/2008-04-02-unnamedEH.ll
index ab8ec801b049..70812eaf70ff 100644
--- a/test/CodeGen/X86/2008-04-02-unnamedEH.ll
+++ b/test/CodeGen/X86/2008-04-02-unnamedEH.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-cfi | FileCheck %s
+; RUN: llc < %s | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
@@ -11,6 +11,8 @@ define internal void @""() {
call i32 @_Z3barv( ) ; <i32>:4 [#uses=1]
ret void
}
-; CHECK: unnamed_1.eh
+
+; CHECK: ___unnamed_1:
+; CHECK-NEXT: .cfi_startproc
declare i32 @_Z3barv()
diff --git a/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll b/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll
index 5089e8c5b69d..d439e827e819 100644
--- a/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll
+++ b/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx
+; RUN: llc < %s -mtriple=i686-pc-linux -mattr=+mmx
define i32 @t2() nounwind {
entry:
diff --git a/test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll b/test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll
index d4805b4bb63e..6d45f1f00301 100644
--- a/test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll
+++ b/test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -no-integrated-as < %s | FileCheck %s
; rdar://5720231
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
diff --git a/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll b/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
index 496779c468f4..51064f1d2173 100644
--- a/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
+++ b/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK: movq %rsp, %rbp
; CHECK: popq %rbp
; CHECK: movq %rcx, %rsp
-; CHECK: ret # eh_return, addr: %rcx
+; CHECK: retq # eh_return, addr: %rcx
define i8* @test(i64 %a, i8* %b) {
entry:
call void @llvm.eh.unwind.init()
diff --git a/test/CodeGen/X86/2008-09-18-inline-asm-2.ll b/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
index 5c2fbeee5c70..f4a43a1e978a 100644
--- a/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
+++ b/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=x86 -regalloc=fast -optimize-regalloc=0 | FileCheck %s
-; RUN: llc < %s -march=x86 -regalloc=basic | FileCheck %s
-; RUN: llc < %s -march=x86 -regalloc=greedy | FileCheck %s
+; RUN: llc < %s -march=x86 -regalloc=fast -optimize-regalloc=0 -no-integrated-as | FileCheck %s
+; RUN: llc < %s -march=x86 -regalloc=basic -no-integrated-as | FileCheck %s
+; RUN: llc < %s -march=x86 -regalloc=greedy -no-integrated-as | FileCheck %s
; The 1st, 2nd, 3rd and 5th registers must all be different. The registers
; referenced in the 4th and 6th operands must not be the same as the 1st or 5th
diff --git a/test/CodeGen/X86/2008-10-17-Asm64bitRConstraint.ll b/test/CodeGen/X86/2008-10-17-Asm64bitRConstraint.ll
index b2e6061ff91c..2b2f704349b6 100644
--- a/test/CodeGen/X86/2008-10-17-Asm64bitRConstraint.ll
+++ b/test/CodeGen/X86/2008-10-17-Asm64bitRConstraint.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -march=x86-64
+; RUN: llc < %s -march=x86 -no-integrated-as
+; RUN: llc < %s -march=x86-64 -no-integrated-as
define void @test(i64 %x) nounwind {
entry:
diff --git a/test/CodeGen/X86/2008-10-20-AsmDoubleInI32.ll b/test/CodeGen/X86/2008-10-20-AsmDoubleInI32.ll
index 353d1c75216b..e23dfe5a6a1d 100644
--- a/test/CodeGen/X86/2008-10-20-AsmDoubleInI32.ll
+++ b/test/CodeGen/X86/2008-10-20-AsmDoubleInI32.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -march=x86-64
+; RUN: llc < %s -march=x86 -no-integrated-as
+; RUN: llc < %s -march=x86-64 -no-integrated-as
; from gcc.c-torture/compile/920520-1.c
diff --git a/test/CodeGen/X86/2008-12-12-PrivateEHSymbol.ll b/test/CodeGen/X86/2008-12-12-PrivateEHSymbol.ll
deleted file mode 100644
index 2e278118b7ad..000000000000
--- a/test/CodeGen/X86/2008-12-12-PrivateEHSymbol.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -disable-cfi -march=x86-64 -mtriple=x86_64-apple-darwin9 | grep ^__Z1fv.eh
-; RUN: llc < %s -disable-cfi -march=x86 -mtriple=i386-apple-darwin9 | grep ^__Z1fv.eh
-
-define void @_Z1fv() {
-entry:
- br label %return
-
-return:
- ret void
-}
diff --git a/test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll b/test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll
index 75496518afa6..5004f04bf8fd 100644
--- a/test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll
+++ b/test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -march=x86 -no-integrated-as | FileCheck %s
; ModuleID = 'shant.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
diff --git a/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll b/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
index 3d70b58686b1..bd1b47a588ef 100644
--- a/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
+++ b/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin
+; RUN: llc < %s -mtriple=i386-apple-darwin -no-integrated-as
; rdar://6781755
; PR3934
diff --git a/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll b/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll
index 7468acb95f11..fa240f64c300 100644
--- a/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll
+++ b/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=static | FileCheck %s
+; RUN: llc < %s -relocation-model=static -no-integrated-as | FileCheck %s
; PR4152
; CHECK: {{1: ._pv_cpu_ops[+]8}}
diff --git a/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll b/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
index 1259cf47b2bc..dfb98bb1ab39 100644
--- a/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
+++ b/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
@@ -1,7 +1,7 @@
; RUN: llc -mcpu=generic -mtriple=x86_64-mingw32 < %s | FileCheck %s
; CHECK: subq $40, %rsp
-; CHECK: movaps %xmm8, (%rsp)
-; CHECK: movaps %xmm7, 16(%rsp)
+; CHECK: movaps %xmm8, 16(%rsp)
+; CHECK: movaps %xmm7, (%rsp)
define i32 @a() nounwind {
entry:
diff --git a/test/CodeGen/X86/2009-08-23-linkerprivate.ll b/test/CodeGen/X86/2009-08-23-linkerprivate.ll
deleted file mode 100644
index 90fac15442aa..000000000000
--- a/test/CodeGen/X86/2009-08-23-linkerprivate.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin | FileCheck %s
-
-; ModuleID = '/Volumes/MacOS9/tests/WebKit/JavaScriptCore/profiler/ProfilerServer.mm'
-
-@"\01l_objc_msgSend_fixup_alloc" = linker_private_weak hidden global i32 0, section "__DATA, __objc_msgrefs, coalesced", align 16
-
-; CHECK: .globl l_objc_msgSend_fixup_alloc
-; CHECK: .weak_definition l_objc_msgSend_fixup_alloc
diff --git a/test/CodeGen/X86/2009-09-19-earlyclobber.ll b/test/CodeGen/X86/2009-09-19-earlyclobber.ll
index 66f51180509f..7df62fd8c37a 100644
--- a/test/CodeGen/X86/2009-09-19-earlyclobber.ll
+++ b/test/CodeGen/X86/2009-09-19-earlyclobber.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -no-integrated-as < %s | FileCheck %s
; ModuleID = '4964.c'
; PR 4964
; Registers other than RAX, RCX are OK, but they must be different.
diff --git a/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll b/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll
index 08a99e3f6618..b828c27e7826 100644
--- a/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll
+++ b/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck %s
; rdar://7396984
-@str = private constant [28 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 1
+@str = private unnamed_addr constant [28 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 1
define void @t(i32 %count) ssp nounwind {
entry:
diff --git a/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll b/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
index b1664470551b..5c10c55ea3ee 100644
--- a/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
+++ b/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -no-integrated-as | FileCheck %s
; pr5391
define void @t() nounwind ssp {
diff --git a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
index f9bf3109ea10..850f678c9c2c 100644
--- a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
+++ b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
@@ -11,9 +11,9 @@ entry:
; CHECK: movl 4([[REG]]), %edx
; CHECK: LBB0_1:
; CHECK: movl %eax, %ebx
-; CHECK: addl {{%[a-z]+}}, %ebx
+; CHECK: addl $1, %ebx
; CHECK: movl %edx, %ecx
-; CHECK: adcl {{%[a-z]+}}, %ecx
+; CHECK: adcl $0, %ecx
; CHECK: lock
; CHECK-NEXT: cmpxchg8b ([[REG]])
; CHECK-NEXT: jne
diff --git a/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll b/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
index 74a5ec28db1e..fc8c895af5b4 100644
--- a/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
+++ b/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -regalloc=fast | FileCheck %s
+; RUN: llc < %s -O0 -regalloc=fast -no-integrated-as | FileCheck %s
; PR6520
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
diff --git a/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll b/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
index c5736eb9b449..e11b5382c23f 100644
--- a/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
+++ b/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
@@ -26,7 +26,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786484, i32 0, metadata !1, metadata !"ret", metadata !"ret", metadata !"", metadata !1, i32 7, metadata !3, i1 false, i1 true, null, null} ; [ DW_TAG_variable ]
!1 = metadata !{i32 786473, metadata !36} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !36, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, metadata !37, metadata !37, metadata !32, metadata !31, metadata !31, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !36, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, metadata !37, metadata !37, metadata !32, metadata !31, metadata !37, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786468, metadata !36, metadata !1, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
!4 = metadata !{i32 786689, metadata !5, metadata !"x", metadata !1, i32 12, metadata !3, i32 0, null} ; [ DW_TAG_arg_variable ]
!5 = metadata !{i32 786478, metadata !36, metadata !1, metadata !"foo", metadata !"foo", metadata !"foo", i32 13, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true, void (i32)* @foo, null, null, metadata !33, i32 13} ; [ DW_TAG_subprogram ]
@@ -61,7 +61,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!34 = metadata !{metadata !8}
!35 = metadata !{metadata !18, metadata !25, metadata !26}
!36 = metadata !{metadata !"foo.c", metadata !"/tmp/"}
-!37 = metadata !{i32 0}
+!37 = metadata !{}
; The variable bar:myvar changes registers after the first movq.
; It is cobbered by popq %rbx
diff --git a/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll b/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
index 9b47bb75bf16..0f8855d1267e 100644
--- a/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
+++ b/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
@@ -1,4 +1,4 @@
-; RUN: llc -regalloc=fast -optimize-regalloc=0 < %s | FileCheck %s
+; RUN: llc -regalloc=fast -optimize-regalloc=0 -no-integrated-as < %s | FileCheck %s
; PR7382
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll b/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
index 68a6a134de5c..0df9dc1cb769 100644
--- a/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
+++ b/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -mtriple=i686-pc-mingw32
+; RUN: llc < %s -disable-fp-elim -mtriple=i686-pc-mingw32 -no-integrated-as
%struct.__SEH2Frame = type {}
diff --git a/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll b/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll
index e1491a03d8a8..d7bc21f6393a 100644
--- a/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll
+++ b/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -O0 | FileCheck %s
+; RUN: llc < %s -march=x86 -O0 -no-integrated-as | FileCheck %s
; PR7509
target triple = "i386-apple-darwin10"
%asmtype = type { i32, i8*, i32, i32 }
diff --git a/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll b/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
index 82dac9d9930e..a0798ae10d7c 100644
--- a/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
+++ b/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin11 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin11 -no-integrated-as | FileCheck %s
; Any register is OK for %0, but it must be a register, not memory.
define i32 @foo() nounwind ssp {
diff --git a/test/CodeGen/X86/2010-07-02-asm-alignstack.ll b/test/CodeGen/X86/2010-07-02-asm-alignstack.ll
index 0bbb24f6ecdf..4302adda5151 100644
--- a/test/CodeGen/X86/2010-07-02-asm-alignstack.ll
+++ b/test/CodeGen/X86/2010-07-02-asm-alignstack.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -no-integrated-as | FileCheck %s
define void @foo() nounwind ssp {
entry:
diff --git a/test/CodeGen/X86/2010-07-06-asm-RIP.ll b/test/CodeGen/X86/2010-07-06-asm-RIP.ll
index 9526b8d4cdc7..818bbc6a5bc0 100644
--- a/test/CodeGen/X86/2010-07-06-asm-RIP.ll
+++ b/test/CodeGen/X86/2010-07-06-asm-RIP.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -no-integrated-as | FileCheck %s
; PR 4752
@n = global i32 0 ; <i32*> [#uses=2]
diff --git a/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll b/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll
index 97cbe3ea5a02..306e22ae5f15 100644
--- a/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll
+++ b/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -no-integrated-as | FileCheck %s
; PR 7528
; formerly crashed
diff --git a/test/CodeGen/X86/2010-08-04-StackVariable.ll b/test/CodeGen/X86/2010-08-04-StackVariable.ll
index 91fec3beefcb..09e34ef6b7f5 100644
--- a/test/CodeGen/X86/2010-08-04-StackVariable.ll
+++ b/test/CodeGen/X86/2010-08-04-StackVariable.ll
@@ -76,7 +76,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!3}
!llvm.module.flags = !{!49}
-!46 = metadata !{metadata !0, metadata !9, metadata !16, metadata !17, metadata !20}
+!46 = metadata !{metadata !16, metadata !17, metadata !20}
!0 = metadata !{i32 786478, metadata !47, metadata !1, metadata !"SVal", metadata !"SVal", metadata !"", i32 11, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i1 false, i1 false, null, null, null, null, i32 11} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786451, metadata !47, metadata !2, metadata !"SVal", i32 1, i64 128, i64 64, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [SVal] [line 1, size 128, align 64, offset 0] [def] [from ]
diff --git a/test/CodeGen/X86/2010-09-16-EmptyFilename.ll b/test/CodeGen/X86/2010-09-16-EmptyFilename.ll
index 9aa41c32c366..a65b632691ae 100644
--- a/test/CodeGen/X86/2010-09-16-EmptyFilename.ll
+++ b/test/CodeGen/X86/2010-09-16-EmptyFilename.ll
@@ -1,6 +1,6 @@
; RUN: llc -O0 -mtriple=x86_64-apple-darwin10 < %s - | FileCheck %s
; Radar 8286101
-; CHECK: .file 2 "<stdin>"
+; CHECK: .file {{[0-9]+}} "<stdin>"
define i32 @foo() nounwind ssp {
entry:
diff --git a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
index 0e4118a2a912..ebf51a5d660a 100644
--- a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
+++ b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
@@ -18,7 +18,8 @@ entry:
loop:
; CHECK: lock
; CHECK-NEXT: cmpxchg8b
- %r = cmpxchg i64* %ptr, i64 0, i64 1 monotonic
+ %pair = cmpxchg i64* %ptr, i64 0, i64 1 monotonic monotonic
+ %r = extractvalue { i64, i1 } %pair, 0
%stored1 = icmp eq i64 %r, 0
br i1 %stored1, label %loop, label %continue
continue:
diff --git a/test/CodeGen/X86/2010-12-02-MC-Set.ll b/test/CodeGen/X86/2010-12-02-MC-Set.ll
deleted file mode 100644
index 5a407d3f9972..000000000000
--- a/test/CodeGen/X86/2010-12-02-MC-Set.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -disable-dot-loc -mtriple=x86_64-apple-darwin -O0 | FileCheck %s
-
-
-define void @foo() nounwind ssp {
-entry:
- ret void, !dbg !5
-}
-
-!llvm.dbg.cu = !{!2}
-!llvm.module.flags = !{!10}
-!7 = metadata !{metadata !0}
-
-!0 = metadata !{i32 786478, metadata !9, metadata !1, metadata !"foo", metadata !"foo", metadata !"", i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @foo, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
-!1 = metadata !{i32 786473, metadata !9} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !9, i32 12, metadata !"clang version 2.9 (trunk 120563)", i1 false, metadata !"", i32 0, metadata !8, metadata !8, metadata !7, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{i32 786453, metadata !9, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{null}
-!5 = metadata !{i32 5, i32 1, metadata !6, null}
-!6 = metadata !{i32 786443, metadata !9, metadata !0, i32 3, i32 16, i32 0} ; [ DW_TAG_lexical_block ]
-!8 = metadata !{i32 0}
-!9 = metadata !{metadata !"e.c", metadata !"/private/tmp"}
-
-; CHECK: .subsections_via_symbols
-; CHECK-NEXT: __debug_line
-; CHECK-NEXT: Lline_table_start0
-; CHECK-NEXT: Ltmp{{[0-9]}} = (Ltmp
-!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
index d5340300df54..625a35161c11 100644
--- a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
+++ b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
@@ -1,14 +1,20 @@
-; RUN: llc < %s | FileCheck %s
-; RUN: llc < %s -regalloc=basic | FileCheck %s
+; RUN: llc < %s -filetype=obj | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+; RUN: llc < %s -filetype=obj -regalloc=basic | llvm-dwarfdump -debug-dump=info - | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.0.0"
; Check debug info for variable z_s
-;CHECK: .long Lset14
-;CHECK-NEXT: ## DW_AT_decl_file
-;CHECK-NEXT: ## DW_AT_decl_line
-;CHECK-NEXT: ## DW_AT_type
-;CHECK-NEXT: ## DW_AT_location
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_TAG_variable
+; CHECK: DW_TAG_variable
+; CHECK-NEXT: DW_AT_location
+; CHECK-NEXT: DW_AT_name {{.*}} "z_s"
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_type{{.*}}{[[TYPE:.*]]}
+; CHECK: [[TYPE]]:
+; CHECK-NEXT: DW_AT_name {{.*}} "int"
@.str1 = private unnamed_addr constant [14 x i8] c"m=%u, z_s=%d\0A\00"
diff --git a/test/CodeGen/X86/2011-05-09-loaduse.ll b/test/CodeGen/X86/2011-05-09-loaduse.ll
index adcea5cf6159..c772e4c7f4e4 100644
--- a/test/CodeGen/X86/2011-05-09-loaduse.ll
+++ b/test/CodeGen/X86/2011-05-09-loaduse.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86 -mcpu=corei7 | FileCheck %s
;CHECK-LABEL: test:
-;CHECK-not: pshufd
+;CHECK-NOT: pshufd
;CHECK: ret
define float @test(<4 x float>* %A) nounwind {
entry:
diff --git a/test/CodeGen/X86/2011-10-11-SpillDead.ll b/test/CodeGen/X86/2011-10-11-SpillDead.ll
index 8e70d6543ac8..19c3d6ca727e 100644
--- a/test/CodeGen/X86/2011-10-11-SpillDead.ll
+++ b/test/CodeGen/X86/2011-10-11-SpillDead.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -verify-regalloc
+; RUN: llc < %s -verify-regalloc -no-integrated-as
; PR11125
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.7"
diff --git a/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
index e08c5b28c5ec..222068dc579f 100644
--- a/test/CodeGen/X86/2011-10-19-widen_vselect.ll
+++ b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
@@ -1,12 +1,10 @@
-; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
-target triple = "x86_64-unknown-linux-gnu"
-
-; Make sure that we don't crash when legalizng vselect and vsetcc and that
+; Make sure that we don't crash when legalizing vselect and vsetcc and that
; we are able to generate vector blend instructions.
-; CHECK: simple_widen
-; CHECK: blend
+; CHECK-LABEL: simple_widen
+; CHECK-NOT: blend
; CHECK: ret
define void @simple_widen() {
entry:
@@ -15,7 +13,7 @@ entry:
ret void
}
-; CHECK: complex_inreg_work
+; CHECK-LABEL: complex_inreg_work
; CHECK: blend
; CHECK: ret
@@ -27,8 +25,8 @@ entry:
ret void
}
-; CHECK: zero_test
-; CHECK: blend
+; CHECK-LABEL: zero_test
+; CHECK: xorps %xmm0, %xmm0
; CHECK: ret
define void @zero_test() {
@@ -38,7 +36,7 @@ entry:
ret void
}
-; CHECK: full_test
+; CHECK-LABEL: full_test
; CHECK: blend
; CHECK: ret
diff --git a/test/CodeGen/X86/2011-12-28-vselecti8.ll b/test/CodeGen/X86/2011-12-28-vselecti8.ll
index dbc122ac6e40..c91646640b8f 100644
--- a/test/CodeGen/X86/2011-12-28-vselecti8.ll
+++ b/test/CodeGen/X86/2011-12-28-vselecti8.ll
@@ -3,10 +3,20 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-darwin11.2.0"
-; CHECK: @foo8
-; CHECK: psll
-; CHECK: psraw
-; CHECK: pblendvb
+; During legalization, the vselect mask is 'type legalized' into a
+; wider BUILD_VECTOR. This causes the introduction of a new
+; sign_extend_inreg in the DAG.
+;
+; A sign_extend_inreg of a vector of ConstantSDNode or undef can be
+; always folded into a simple build_vector.
+;
+; Make sure that the sign_extend_inreg is simplified and that we
+; don't generate psll, psraw and pblendvb from the vselect.
+
+; CHECK-LABEL: foo8
+; CHECK-NOT: psll
+; CHECK-NOT: psraw
+; CHECK-NOT: pblendvb
; CHECK: ret
define void @foo8(float* nocapture %RET) nounwind {
allocas:
@@ -17,4 +27,3 @@ allocas:
ret void
}
-
diff --git a/test/CodeGen/X86/2012-08-17-legalizer-crash.ll b/test/CodeGen/X86/2012-08-17-legalizer-crash.ll
index 971e56d20ea2..0d18267fcde1 100644
--- a/test/CodeGen/X86/2012-08-17-legalizer-crash.ll
+++ b/test/CodeGen/X86/2012-08-17-legalizer-crash.ll
@@ -27,5 +27,5 @@ if.end: ; preds = %if.then, %entry
; CHECK-LABEL: fn1:
; CHECK: shrq $32, [[REG:%.*]]
-; CHECK: je
+; CHECK: sete
}
diff --git a/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll b/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
index d41b43228b6c..62ee1e15fda0 100644
--- a/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
+++ b/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
@@ -38,10 +38,8 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!12}
-!0 = metadata !{i32 786449, metadata !11, i32 12, metadata !"clang version 3.3 (trunk 168918) (llvm/trunk 168920)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, null, metadata !""} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/Olden/bh/newbh.c] [DW_LANG_C99]
-!1 = metadata !{metadata !2}
-!2 = metadata !{i32 0}
-!3 = metadata !{null}
+!0 = metadata !{i32 786449, metadata !11, i32 12, metadata !"clang version 3.3 (trunk 168918) (llvm/trunk 168920)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !2, null, metadata !""} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/Olden/bh/newbh.c] [DW_LANG_C99]
+!2 = metadata !{}
!4 = metadata !{i32 786689, null, metadata !"hg", metadata !5, i32 67109589, metadata !6, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [hg] [line 725]
!5 = metadata !{i32 786473, metadata !11} ; [ DW_TAG_file_type ]
!6 = metadata !{i32 786454, metadata !11, null, metadata !"hgstruct", i32 492, i64 0, i64 0, i64 0, i32 0, metadata !7} ; [ DW_TAG_typedef ] [hgstruct] [line 492, size 0, align 0, offset 0] [from ]
diff --git a/test/CodeGen/X86/2012-11-30-misched-dbg.ll b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
index 7befa6b4757d..36667def6110 100644
--- a/test/CodeGen/X86/2012-11-30-misched-dbg.ll
+++ b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
@@ -65,20 +65,19 @@ declare i32 @__sprintf_chk(i8*, i32, i64, i8*, ...)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!35}
-!0 = metadata !{i32 786449, metadata !19, i32 12, metadata !"clang version 3.3 (trunk 168918) (llvm/trunk 168920)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, null, metadata !""} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/MiBench/consumer-typeset/MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c] [DW_LANG_C99]
+!0 = metadata !{i32 786449, metadata !19, i32 12, metadata !"clang version 3.3 (trunk 168918) (llvm/trunk 168920)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !2, null, metadata !""} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/MiBench/consumer-typeset/MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c] [DW_LANG_C99]
!1 = metadata !{metadata !2}
-!2 = metadata !{i32 0}
-!3 = metadata !{}
+!2 = metadata !{}
!4 = metadata !{i32 786688, metadata !5, metadata !"num1", metadata !14, i32 815, metadata !15, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [num1] [line 815]
-!5 = metadata !{i32 786443, metadata !6, i32 815, i32 0, metadata !14, i32 177} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!6 = metadata !{i32 786443, metadata !7, i32 812, i32 0, metadata !14, i32 176} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!7 = metadata !{i32 786443, metadata !8, i32 807, i32 0, metadata !14, i32 175} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!8 = metadata !{i32 786443, metadata !9, i32 440, i32 0, metadata !14, i32 94} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!9 = metadata !{i32 786443, metadata !10, i32 435, i32 0, metadata !14, i32 91} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!10 = metadata !{i32 786443, metadata !11, i32 434, i32 0, metadata !14, i32 90} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!11 = metadata !{i32 786443, metadata !12, i32 250, i32 0, metadata !14, i32 24} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!12 = metadata !{i32 786443, metadata !13, i32 249, i32 0, metadata !14, i32 23} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!13 = metadata !{i32 786443, metadata !3, i32 221, i32 0, metadata !14, i32 19} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!5 = metadata !{i32 786443, metadata !14, metadata !6, i32 815, i32 0, i32 177} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!6 = metadata !{i32 786443, metadata !14, metadata !7, i32 812, i32 0, i32 176} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!7 = metadata !{i32 786443, metadata !14, metadata !8, i32 807, i32 0, i32 175} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!8 = metadata !{i32 786443, metadata !14, metadata !9, i32 440, i32 0, i32 94} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!9 = metadata !{i32 786443, metadata !14, metadata !10, i32 435, i32 0, i32 91} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!10 = metadata !{i32 786443, metadata !14, metadata !11, i32 434, i32 0, i32 90} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!11 = metadata !{i32 786443, metadata !14, metadata !12, i32 250, i32 0, i32 24} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!12 = metadata !{i32 786443, metadata !14, metadata !13, i32 249, i32 0, i32 23} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!13 = metadata !{i32 786443, metadata !14, metadata !2, i32 221, i32 0, i32 19} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
!14 = metadata !{i32 786473, metadata !19} ; [ DW_TAG_file_type ]
!15 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 160, i64 8, i32 0, i32 0, metadata !16, metadata !17, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 160, align 8, offset 0] [from char]
!16 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
diff --git a/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll b/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
index 3455b68fb0e6..bbba796eed24 100644
--- a/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
+++ b/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
@@ -3,7 +3,7 @@
; During X86 fastisel, the address of indirect call was resolved
; through bitcast, ptrtoint, and inttoptr instructions. This is valid
; only if the related instructions are in that same basic block, otherwise
-; we may reference variables that were not live accross basic blocks
+; we may reference variables that were not live across basic blocks
; resulting in undefined virtual registers.
;
; In this example, this is illustrated by a the spill/reload of the
@@ -25,7 +25,7 @@
; CHECK: movq [[ARG2_SLOT]], %rdi
; Load the second argument
; CHECK: movq [[ARG2_SLOT]], %rsi
-; Load the thrid argument
+; Load the third argument
; CHECK: movq [[ARG2_SLOT]], %rdx
; Load the function pointer.
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
@@ -64,7 +64,7 @@ label_end:
; CHECK: movq [[ARG2_SLOT]], %rdi
; Load the second argument
; CHECK: movq [[ARG2_SLOT]], %rsi
-; Load the thrid argument
+; Load the third argument
; CHECK: movq [[ARG2_SLOT]], %rdx
; Load the function pointer.
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
@@ -103,7 +103,7 @@ label_end:
; CHECK: movq [[ARG2_SLOT]], %rdi
; Load the second argument
; CHECK: movq [[ARG2_SLOT]], %rsi
-; Load the thrid argument
+; Load the third argument
; CHECK: movq [[ARG2_SLOT]], %rdx
; Load the function pointer.
; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]]
diff --git a/test/CodeGen/X86/2014-05-29-factorial.ll b/test/CodeGen/X86/2014-05-29-factorial.ll
new file mode 100644
index 000000000000..987a21d34eab
--- /dev/null
+++ b/test/CodeGen/X86/2014-05-29-factorial.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+; CHECK: decq [[X:%rdi|%rcx]]
+; CHECK-NOT: testq [[X]], [[X]]
+
+define i64 @fact2(i64 %x) {
+entry:
+ br label %while.body
+
+while.body:
+ %result.06 = phi i64 [ %mul, %while.body ], [ 1, %entry ]
+ %x.addr.05 = phi i64 [ %dec, %while.body ], [ %x, %entry ]
+ %mul = mul nsw i64 %result.06, %x.addr.05
+ %dec = add nsw i64 %x.addr.05, -1
+ %cmp = icmp sgt i64 %dec, 0
+ br i1 %cmp, label %while.body, label %while.end.loopexit
+
+while.end.loopexit:
+ %mul.lcssa = phi i64 [ %mul, %while.body ]
+ br label %while.end
+
+while.end:
+ %result.0.lcssa = phi i64 [ %mul.lcssa, %while.end.loopexit ]
+ ret i64 %result.0.lcssa
+}
diff --git a/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll b/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll
new file mode 100644
index 000000000000..4580795880ab
--- /dev/null
+++ b/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+; CHECK: addl
+
+; The two additions are the same , but have different flags.
+; In theory this code should never be generated by the frontend, but this
+; tries to test that two identical instructions with two different flags
+; actually generate two different nodes.
+;
+; Normally the combiner would see this condition without the flags
+; and optimize the result of the sub into a register clear
+; (the final result would be 0). With the different flags though the combiner
+; needs to keep the add + sub nodes, because the two nodes result as different
+; nodes and so cannot assume that the subtraction of the two nodes
+; generates 0 as result
+define i32 @foo(i32 %a, i32 %b) {
+ %1 = add i32 %a, %b
+ %2 = add nsw i32 %a, %b
+ %3 = sub i32 %1, %2
+ ret i32 %3
+}
diff --git a/test/CodeGen/X86/3addr-16bit.ll b/test/CodeGen/X86/3addr-16bit.ll
index fafdfdb74811..2d6a5e76657f 100644
--- a/test/CodeGen/X86/3addr-16bit.ll
+++ b/test/CodeGen/X86/3addr-16bit.ll
@@ -34,7 +34,7 @@ entry:
; 64BIT-LABEL: t2:
; 64BIT-NOT: movw %si, %ax
-; 64BIT: decl %eax
+; 64BIT: leal -1(%rsi), %eax
; 64BIT: movzwl %ax
%0 = icmp eq i16 %k, %c ; <i1> [#uses=1]
%1 = add i16 %k, -1 ; <i16> [#uses=3]
@@ -59,7 +59,7 @@ entry:
; 64BIT-LABEL: t3:
; 64BIT-NOT: movw %si, %ax
-; 64BIT: addl $2, %eax
+; 64BIT: leal 2(%rsi), %eax
%0 = add i16 %k, 2 ; <i16> [#uses=3]
%1 = icmp eq i16 %k, %c ; <i1> [#uses=1]
br i1 %1, label %bb, label %bb1
@@ -82,7 +82,7 @@ entry:
; 64BIT-LABEL: t4:
; 64BIT-NOT: movw %si, %ax
-; 64BIT: addl %edi, %eax
+; 64BIT: leal (%rsi,%rdi), %eax
%0 = add i16 %k, %c ; <i16> [#uses=3]
%1 = icmp eq i16 %k, %c ; <i1> [#uses=1]
br i1 %1, label %bb, label %bb1
diff --git a/test/CodeGen/X86/Atomics-64.ll b/test/CodeGen/X86/Atomics-64.ll
index 8b0a349a8be3..c392e947407e 100644
--- a/test/CodeGen/X86/Atomics-64.ll
+++ b/test/CodeGen/X86/Atomics-64.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 > %t.x86-64
-; RUN: llc < %s -march=x86 > %t.x86
+; RUN: llc < %s -march=x86 -mattr=cx16 > %t.x86
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin8"
@@ -704,7 +704,8 @@ entry:
%3 = zext i8 %2 to i32
%4 = trunc i32 %3 to i8
%5 = trunc i32 %1 to i8
- %6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic
+ %pair6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
+ %6 = extractvalue { i8, i1 } %pair6, 0
store i8 %6, i8* @sc, align 1
%7 = load i8* @sc, align 1
%8 = zext i8 %7 to i32
@@ -712,7 +713,8 @@ entry:
%10 = zext i8 %9 to i32
%11 = trunc i32 %10 to i8
%12 = trunc i32 %8 to i8
- %13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic
+ %pair13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
+ %13 = extractvalue { i8, i1 } %pair13, 0
store i8 %13, i8* @uc, align 1
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
@@ -722,7 +724,8 @@ entry:
%19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%20 = trunc i32 %18 to i16
%21 = trunc i32 %16 to i16
- %22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic
+ %pair22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
+ %22 = extractvalue { i16, i1 } %pair22, 0
store i16 %22, i16* @ss, align 2
%23 = load i8* @sc, align 1
%24 = sext i8 %23 to i16
@@ -732,49 +735,56 @@ entry:
%28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%29 = trunc i32 %27 to i16
%30 = trunc i32 %25 to i16
- %31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic
+ %pair31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
+ %31 = extractvalue { i16, i1 } %pair31, 0
store i16 %31, i16* @us, align 2
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i32
%34 = load i8* @uc, align 1
%35 = zext i8 %34 to i32
%36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic
+ %pair37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
+ %37 = extractvalue { i32, i1 } %pair37, 0
store i32 %37, i32* @si, align 4
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i32
%40 = load i8* @uc, align 1
%41 = zext i8 %40 to i32
%42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic
+ %pair43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
+ %43 = extractvalue { i32, i1 } %pair43, 0
store i32 %43, i32* @ui, align 4
%44 = load i8* @sc, align 1
%45 = sext i8 %44 to i64
%46 = load i8* @uc, align 1
%47 = zext i8 %46 to i64
%48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic
+ %pair49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
+ %49 = extractvalue { i64, i1 } %pair49, 0
store i64 %49, i64* @sl, align 8
%50 = load i8* @sc, align 1
%51 = sext i8 %50 to i64
%52 = load i8* @uc, align 1
%53 = zext i8 %52 to i64
%54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic
+ %pair55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
+ %55 = extractvalue { i64, i1 } %pair55, 0
store i64 %55, i64* @ul, align 8
%56 = load i8* @sc, align 1
%57 = sext i8 %56 to i64
%58 = load i8* @uc, align 1
%59 = zext i8 %58 to i64
%60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
- %61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic
+ %pair61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
+ %61 = extractvalue { i64, i1 } %pair61, 0
store i64 %61, i64* @sll, align 8
%62 = load i8* @sc, align 1
%63 = sext i8 %62 to i64
%64 = load i8* @uc, align 1
%65 = zext i8 %64 to i64
%66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
- %67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic
+ %pair67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
+ %67 = extractvalue { i64, i1 } %pair67, 0
store i64 %67, i64* @ull, align 8
%68 = load i8* @sc, align 1
%69 = zext i8 %68 to i32
@@ -782,7 +792,8 @@ entry:
%71 = zext i8 %70 to i32
%72 = trunc i32 %71 to i8
%73 = trunc i32 %69 to i8
- %74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic
+ %pair74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic monotonic
+ %74 = extractvalue { i8, i1 } %pair74, 0
%75 = icmp eq i8 %74, %72
%76 = zext i1 %75 to i8
%77 = zext i8 %76 to i32
@@ -793,7 +804,8 @@ entry:
%81 = zext i8 %80 to i32
%82 = trunc i32 %81 to i8
%83 = trunc i32 %79 to i8
- %84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic
+ %pair84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic monotonic
+ %84 = extractvalue { i8, i1 } %pair84, 0
%85 = icmp eq i8 %84, %82
%86 = zext i1 %85 to i8
%87 = zext i8 %86 to i32
@@ -805,7 +817,8 @@ entry:
%92 = zext i8 %91 to i32
%93 = trunc i32 %92 to i8
%94 = trunc i32 %90 to i8
- %95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic
+ %pair95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic monotonic
+ %95 = extractvalue { i8, i1 } %pair95, 0
%96 = icmp eq i8 %95, %93
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
@@ -817,7 +830,8 @@ entry:
%103 = zext i8 %102 to i32
%104 = trunc i32 %103 to i8
%105 = trunc i32 %101 to i8
- %106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic
+ %pair106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic monotonic
+ %106 = extractvalue { i8, i1 } %pair106, 0
%107 = icmp eq i8 %106, %104
%108 = zext i1 %107 to i8
%109 = zext i8 %108 to i32
@@ -828,7 +842,8 @@ entry:
%113 = zext i8 %112 to i32
%114 = trunc i32 %113 to i8
%115 = trunc i32 %111 to i8
- %116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic
+ %pair116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic monotonic
+ %116 = extractvalue { i8, i1 } %pair116, 0
%117 = icmp eq i8 %116, %114
%118 = zext i1 %117 to i8
%119 = zext i8 %118 to i32
@@ -839,7 +854,8 @@ entry:
%123 = zext i8 %122 to i32
%124 = trunc i32 %123 to i8
%125 = trunc i32 %121 to i8
- %126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic
+ %pair126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic monotonic
+ %126 = extractvalue { i8, i1 } %pair126, 0
%127 = icmp eq i8 %126, %124
%128 = zext i1 %127 to i8
%129 = zext i8 %128 to i32
@@ -850,7 +866,8 @@ entry:
%133 = zext i8 %132 to i64
%134 = trunc i64 %133 to i8
%135 = trunc i64 %131 to i8
- %136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic
+ %pair136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic monotonic
+ %136 = extractvalue { i8, i1 } %pair136, 0
%137 = icmp eq i8 %136, %134
%138 = zext i1 %137 to i8
%139 = zext i8 %138 to i32
@@ -861,7 +878,8 @@ entry:
%143 = zext i8 %142 to i64
%144 = trunc i64 %143 to i8
%145 = trunc i64 %141 to i8
- %146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic
+ %pair146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic monotonic
+ %146 = extractvalue { i8, i1 } %pair146, 0
%147 = icmp eq i8 %146, %144
%148 = zext i1 %147 to i8
%149 = zext i8 %148 to i32
@@ -872,7 +890,8 @@ entry:
%153 = zext i8 %152 to i64
%154 = trunc i64 %153 to i8
%155 = trunc i64 %151 to i8
- %156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic
+ %pair156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic monotonic
+ %156 = extractvalue { i8, i1 } %pair156, 0
%157 = icmp eq i8 %156, %154
%158 = zext i1 %157 to i8
%159 = zext i8 %158 to i32
@@ -883,7 +902,8 @@ entry:
%163 = zext i8 %162 to i64
%164 = trunc i64 %163 to i8
%165 = trunc i64 %161 to i8
- %166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic
+ %pair166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic monotonic
+ %166 = extractvalue { i8, i1 } %pair166, 0
%167 = icmp eq i8 %166, %164
%168 = zext i1 %167 to i8
%169 = zext i8 %168 to i32
diff --git a/test/CodeGen/X86/GC/lit.local.cfg b/test/CodeGen/X86/GC/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/CodeGen/X86/GC/lit.local.cfg
+++ b/test/CodeGen/X86/GC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/X86/GC/ocaml-gc.ll b/test/CodeGen/X86/GC/ocaml-gc.ll
index 6d5f8aebe139..37ddaf90bf67 100644
--- a/test/CodeGen/X86/GC/ocaml-gc.ll
+++ b/test/CodeGen/X86/GC/ocaml-gc.ll
@@ -1,8 +1,10 @@
; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s
-define i32 @main(i32 %x) nounwind gc "ocaml" {
; CHECK: .text
-; CHECK-NEXT: .globl "caml<stdin>__code_begin"
+; CHECK-NEXT: .file "<stdin>"
+
+define i32 @main(i32 %x) nounwind gc "ocaml" {
+; CHECK: .globl "caml<stdin>__code_begin"
; CHECK-NEXT: "caml<stdin>__code_begin":
; CHECK-NEXT: .data
; CHECK-NEXT: .globl "caml<stdin>__data_begin"
diff --git a/test/CodeGen/X86/MachineBranchProb.ll b/test/CodeGen/X86/MachineBranchProb.ll
new file mode 100644
index 000000000000..a8931527ea6d
--- /dev/null
+++ b/test/CodeGen/X86/MachineBranchProb.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -print-machineinstrs=expand-isel-pseudos -o /dev/null 2>&1 | FileCheck %s
+
+;; Make sure a transformation in SelectionDAGBuilder that converts "or + br" to
+;; two branches correctly updates the branch probability.
+
+@max_regno = common global i32 0, align 4
+
+define void @test(i32* %old, i32 %final) {
+for.cond:
+ br label %for.cond2
+
+for.cond2: ; preds = %for.inc, %for.cond
+ %i.1 = phi i32 [ %inc19, %for.inc ], [ 0, %for.cond ]
+ %bit.0 = phi i32 [ %shl, %for.inc ], [ 1, %for.cond ]
+ %tobool = icmp eq i32 %bit.0, 0
+ %v3 = load i32* @max_regno, align 4
+ %cmp4 = icmp eq i32 %i.1, %v3
+ %or.cond = or i1 %tobool, %cmp4
+ br i1 %or.cond, label %for.inc20, label %for.inc, !prof !0
+; CHECK: BB#1: derived from LLVM BB %for.cond2
+; CHECK: Successors according to CFG: BB#3(56008718) BB#4(2203492365)
+; CHECK: BB#4: derived from LLVM BB %for.cond2
+; CHECK: Successors according to CFG: BB#3(112017436) BB#2(4294967294)
+
+for.inc: ; preds = %for.cond2
+ %shl = shl i32 %bit.0, 1
+ %inc19 = add nsw i32 %i.1, 1
+ br label %for.cond2
+
+for.inc20: ; preds = %for.cond2
+ ret void
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 112017436, i32 -735157296}
diff --git a/test/CodeGen/X86/MachineSink-DbgValue.ll b/test/CodeGen/X86/MachineSink-DbgValue.ll
index 584e644ed51f..4ce2fb3dcafb 100644
--- a/test/CodeGen/X86/MachineSink-DbgValue.ll
+++ b/test/CodeGen/X86/MachineSink-DbgValue.ll
@@ -13,8 +13,8 @@ define i32 @foo(i32 %i, i32* nocapture %c) nounwind uwtable readonly ssp {
bb1: ; preds = %0
;CHECK: DEBUG_VALUE: a
-;CHECK-NEXT: .loc 1 5 5
-;CHECK-NEXT: addl
+;CHECK: .loc 1 5 5
+;CHECK-NEXT: addl
%gh = add nsw i32 %ab, 2, !dbg !16
br label %bb2, !dbg !16
diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll
index 0ef3aa5b6f07..f6d68520b7b4 100644
--- a/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=x86-64 -mcpu=corei7 -mattr=+avx < %s | FileCheck %s
+; RUN: llc -march=x86-64 -mcpu=corei7 -mattr=+avx -addr-sink-using-gep=1 < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/CodeGen/X86/add-of-carry.ll b/test/CodeGen/X86/add-of-carry.ll
index 1513fcba774b..9c24be4289ff 100644
--- a/test/CodeGen/X86/add-of-carry.ll
+++ b/test/CodeGen/X86/add-of-carry.ll
@@ -4,7 +4,7 @@
define i32 @test1(i32 %sum, i32 %x) nounwind readnone ssp {
entry:
; CHECK-LABEL: test1:
-; CHECK: cmpl %ecx, %eax
+; CHECK: cmpl %ecx, %eax
; CHECK-NOT: addl
; CHECK: adcl $0, %eax
%add4 = add i32 %x, %sum
diff --git a/test/CodeGen/X86/address-type-promotion-constantexpr.ll b/test/CodeGen/X86/address-type-promotion-constantexpr.ll
new file mode 100644
index 000000000000..32f29bd3cad9
--- /dev/null
+++ b/test/CodeGen/X86/address-type-promotion-constantexpr.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -mtriple=x86_64-pc-linux
+
+; PR20314 is a crashing bug. This program does nothing with the load, so just check that the return is 0.
+
+@c = common global [2 x i32] zeroinitializer, align 4
+@a = common global i32 0, align 4
+@b = internal unnamed_addr constant [2 x i8] c"\01\00", align 1
+
+; CHECK-LABEL: main
+; CHECK: xor %eax, %eax
+define i32 @main() {
+entry:
+ %foo = load i8* getelementptr ([2 x i8]* @b, i64 0, i64 sext (i8 or (i8 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i8), i8 1) to i64)), align 1
+ ret i32 0
+}
+
diff --git a/test/CodeGen/X86/alias-error.ll b/test/CodeGen/X86/alias-error.ll
deleted file mode 100644
index 8f01dcff2bf9..000000000000
--- a/test/CodeGen/X86/alias-error.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: not llc -mtriple=i686-pc-linux-gnu %s -o /dev/null 2>&1 | FileCheck %s
-
-@a = external global i32
-@b = alias i32* @a
-; CHECK: b: Target doesn't support aliases to declarations
diff --git a/test/CodeGen/X86/aliases.ll b/test/CodeGen/X86/aliases.ll
index d0a262d390da..bf55644de41e 100644
--- a/test/CodeGen/X86/aliases.ll
+++ b/test/CodeGen/X86/aliases.ll
@@ -1,4 +1,20 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -mtriple=i686-pc-linux-gnu -asm-verbose=false \
+; RUN: -relocation-model=pic | FileCheck %s
+
+@thread_var = thread_local global i32 42, align 4
+@thread_alias = thread_local(localdynamic) alias i32* @thread_var
+
+; CHECK-LABEL: get_thread_var
+define i32* @get_thread_var() {
+; CHECK: leal thread_var@TLSGD
+ ret i32* @thread_var
+}
+
+; CHECK-LABEL: get_thread_alias
+define i32* @get_thread_alias() {
+; CHECK: leal thread_alias@TLSLD
+ ret i32* @thread_alias
+}
@bar = global i32 42
@@ -32,6 +48,19 @@ define i32 @foo_f() {
; CHECK-DAG: .protected bar_p
@bar_p = protected alias i32* @bar
+; CHECK-DAG: test2 = bar+4
+@test2 = alias getelementptr(i32 *@bar, i32 1)
+
+; CHECK-DAG: test3 = 42
+@test3 = alias inttoptr(i32 42 to i32*)
+
+; CHECK-DAG: test4 = bar
+@test4 = alias inttoptr(i64 ptrtoint (i32* @bar to i64) to i32*)
+
+; CHECK-DAG: test5 = test2-bar
+@test5 = alias inttoptr(i32 sub (i32 ptrtoint (i32* @test2 to i32),
+ i32 ptrtoint (i32* @bar to i32)) to i32*)
+
; CHECK-DAG: .globl test
define i32 @test() {
entry:
diff --git a/test/CodeGen/X86/anyregcc-crash.ll b/test/CodeGen/X86/anyregcc-crash.ll
index cf6f6edb31a8..3abe3d149a11 100644
--- a/test/CodeGen/X86/anyregcc-crash.ll
+++ b/test/CodeGen/X86/anyregcc-crash.ll
@@ -7,11 +7,11 @@ define i64 @anyreglimit(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6,
i64 %v7, i64 %v8, i64 %v9, i64 %v10, i64 %v11, i64 %v12,
i64 %v13, i64 %v14, i64 %v15, i64 %v16) {
entry:
- %result = tail call anyregcc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 16,
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 16,
i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6,
i64 %v7, i64 %v8, i64 %v9, i64 %v10, i64 %v11, i64 %v12,
i64 %v13, i64 %v14, i64 %v15, i64 %v16)
ret i64 %result
}
-declare i64 @llvm.experimental.patchpoint.i64(i32, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/X86/anyregcc.ll b/test/CodeGen/X86/anyregcc.ll
index 8109f879f217..98ba17c74c82 100644
--- a/test/CodeGen/X86/anyregcc.ll
+++ b/test/CodeGen/X86/anyregcc.ll
@@ -1,17 +1,44 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim | FileCheck --check-prefix=SSE %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim | FileCheck --check-prefix=AVX %s
+
; Stackmap Header: no constants - 6 callsites
-; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
-; CHECK-NEXT: __LLVM_StackMaps:
+; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
; Header
-; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 8
; Num Constants
-; CHECK-NEXT: .long 0
+; CHECK-NEXT: .long 0
; Num Callsites
-; CHECK-NEXT: .long 8
+; CHECK-NEXT: .long 8
+
+; Functions and stack size
+; CHECK-NEXT: .quad _test
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _property_access1
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _property_access2
+; CHECK-NEXT: .quad 24
+; CHECK-NEXT: .quad _property_access3
+; CHECK-NEXT: .quad 24
+; CHECK-NEXT: .quad _anyreg_test1
+; CHECK-NEXT: .quad 56
+; CHECK-NEXT: .quad _anyreg_test2
+; CHECK-NEXT: .quad 56
+; CHECK-NEXT: .quad _patchpoint_spilldef
+; CHECK-NEXT: .quad 56
+; CHECK-NEXT: .quad _patchpoint_spillargs
+; CHECK-NEXT: .quad 88
+; No constants
+
+; Callsites
; test
-; CHECK-NEXT: .long 0
; CHECK-LABEL: .long L{{.*}}-_test
; CHECK-NEXT: .short 0
; 3 locations
@@ -33,12 +60,11 @@
; CHECK-NEXT: .long 3
define i64 @test() nounwind ssp uwtable {
entry:
- call anyregcc void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 0, i32 15, i8* null, i32 2, i32 1, i32 2, i64 3)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 0, i32 15, i8* null, i32 2, i32 1, i32 2, i64 3)
ret i64 0
}
; property access 1 - %obj is an anyreg call argument and should therefore be in a register
-; CHECK-NEXT: .long 1
; CHECK-LABEL: .long L{{.*}}-_property_access1
; CHECK-NEXT: .short 0
; 2 locations
@@ -56,12 +82,11 @@ entry:
define i64 @property_access1(i8* %obj) nounwind ssp uwtable {
entry:
%f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 1, i32 15, i8* %f, i32 1, i8* %obj)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 1, i32 15, i8* %f, i32 1, i8* %obj)
ret i64 %ret
}
; property access 2 - %obj is an anyreg call argument and should therefore be in a register
-; CHECK-NEXT: .long 2
; CHECK-LABEL: .long L{{.*}}-_property_access2
; CHECK-NEXT: .short 0
; 2 locations
@@ -80,12 +105,11 @@ define i64 @property_access2() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
%f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 2, i32 15, i8* %f, i32 1, i64* %obj)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 15, i8* %f, i32 1, i64* %obj)
ret i64 %ret
}
; property access 3 - %obj is a frame index
-; CHECK-NEXT: .long 3
; CHECK-LABEL: .long L{{.*}}-_property_access3
; CHECK-NEXT: .short 0
; 2 locations
@@ -95,21 +119,20 @@ entry:
; CHECK-NEXT: .byte 8
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .long 0
-; Loc 1: Register <-- this will be folded once folding for FI is implemented
-; CHECK-NEXT: .byte 1
+; Loc 1: Direct RBP - ofs
+; CHECK-NEXT: .byte 2
; CHECK-NEXT: .byte 8
-; CHECK-NEXT: .short {{[0-9]+}}
-; CHECK-NEXT: .long 0
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
define i64 @property_access3() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
%f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 3, i32 15, i8* %f, i32 0, i64* %obj)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 3, i32 15, i8* %f, i32 0, i64* %obj)
ret i64 %ret
}
; anyreg_test1
-; CHECK-NEXT: .long 4
; CHECK-LABEL: .long L{{.*}}-_anyreg_test1
; CHECK-NEXT: .short 0
; 14 locations
@@ -187,12 +210,11 @@ entry:
define i64 @anyreg_test1(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
entry:
%f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 4, i32 15, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 4, i32 15, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
ret i64 %ret
}
; anyreg_test2
-; CHECK-NEXT: .long 5
; CHECK-LABEL: .long L{{.*}}-_anyreg_test2
; CHECK-NEXT: .short 0
; 14 locations
@@ -270,7 +292,7 @@ entry:
define i64 @anyreg_test2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
entry:
%f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 5, i32 15, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 15, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
ret i64 %ret
}
@@ -278,7 +300,6 @@ entry:
;
; <rdar://problem/15432754> [JS] Assertion: "Folded a def to a non-store!"
;
-; CHECK-LABEL: .long 12
; CHECK-LABEL: .long L{{.*}}-_patchpoint_spilldef
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 3
@@ -299,7 +320,7 @@ entry:
; CHECK-NEXT: .long 0
define i64 @patchpoint_spilldef(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
- %result = tail call anyregcc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
tail call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() nounwind
ret i64 %result
}
@@ -308,7 +329,6 @@ entry:
;
; <rdar://problem/15487687> [JS] AnyRegCC argument ends up being spilled
;
-; CHECK-LABEL: .long 13
; CHECK-LABEL: .long L{{.*}}-_patchpoint_spillargs
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 5
@@ -330,19 +350,119 @@ entry:
; Loc 3: Arg2 spilled to RBP +
; CHECK-NEXT: .byte 3
; CHECK-NEXT: .byte 8
-; CHECK-NEXT: .short 7
-; CHECK-NEXT: .long {{[0-9]+}}
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
; Loc 4: Arg3 spilled to RBP +
; CHECK-NEXT: .byte 3
; CHECK-NEXT: .byte 8
-; CHECK-NEXT: .short 7
-; CHECK-NEXT: .long {{[0-9]+}}
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
define i64 @patchpoint_spillargs(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
tail call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() nounwind
- %result = tail call anyregcc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 13, i32 15, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 13, i32 15, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
ret i64 %result
}
-declare void @llvm.experimental.patchpoint.void(i32, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i32, i32, i8*, i32, ...)
+; Make sure all regs are spilled
+define anyregcc void @anyregcc1() {
+entry:
+;SSE-LABEL: anyregcc1
+;SSE: pushq %rbp
+;SSE: pushq %rax
+;SSE: pushq %r15
+;SSE: pushq %r14
+;SSE: pushq %r13
+;SSE: pushq %r12
+;SSE: pushq %r11
+;SSE: pushq %r10
+;SSE: pushq %r9
+;SSE: pushq %r8
+;SSE: pushq %rdi
+;SSE: pushq %rsi
+;SSE: pushq %rdx
+;SSE: pushq %rcx
+;SSE: pushq %rbx
+;SSE: movaps %xmm15
+;SSE-NEXT: movaps %xmm14
+;SSE-NEXT: movaps %xmm13
+;SSE-NEXT: movaps %xmm12
+;SSE-NEXT: movaps %xmm11
+;SSE-NEXT: movaps %xmm10
+;SSE-NEXT: movaps %xmm9
+;SSE-NEXT: movaps %xmm8
+;SSE-NEXT: movaps %xmm7
+;SSE-NEXT: movaps %xmm6
+;SSE-NEXT: movaps %xmm5
+;SSE-NEXT: movaps %xmm4
+;SSE-NEXT: movaps %xmm3
+;SSE-NEXT: movaps %xmm2
+;SSE-NEXT: movaps %xmm1
+;SSE-NEXT: movaps %xmm0
+;AVX-LABEL:anyregcc1
+;AVX: pushq %rbp
+;AVX: pushq %rax
+;AVX: pushq %r15
+;AVX: pushq %r14
+;AVX: pushq %r13
+;AVX: pushq %r12
+;AVX: pushq %r11
+;AVX: pushq %r10
+;AVX: pushq %r9
+;AVX: pushq %r8
+;AVX: pushq %rdi
+;AVX: pushq %rsi
+;AVX: pushq %rdx
+;AVX: pushq %rcx
+;AVX: pushq %rbx
+;AVX: vmovaps %ymm15
+;AVX-NEXT: vmovaps %ymm14
+;AVX-NEXT: vmovaps %ymm13
+;AVX-NEXT: vmovaps %ymm12
+;AVX-NEXT: vmovaps %ymm11
+;AVX-NEXT: vmovaps %ymm10
+;AVX-NEXT: vmovaps %ymm9
+;AVX-NEXT: vmovaps %ymm8
+;AVX-NEXT: vmovaps %ymm7
+;AVX-NEXT: vmovaps %ymm6
+;AVX-NEXT: vmovaps %ymm5
+;AVX-NEXT: vmovaps %ymm4
+;AVX-NEXT: vmovaps %ymm3
+;AVX-NEXT: vmovaps %ymm2
+;AVX-NEXT: vmovaps %ymm1
+;AVX-NEXT: vmovaps %ymm0
+ call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
+ ret void
+}
+
+; Make sure we don't spill any XMMs/YMMs
+declare anyregcc void @foo()
+define void @anyregcc2() {
+entry:
+;SSE-LABEL: anyregcc2
+;SSE-NOT: movaps %xmm
+;AVX-LABEL: anyregcc2
+;AVX-NOT: vmovups %ymm
+ %a0 = call <2 x double> asm sideeffect "", "={xmm0}"() nounwind
+ %a1 = call <2 x double> asm sideeffect "", "={xmm1}"() nounwind
+ %a2 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
+ %a3 = call <2 x double> asm sideeffect "", "={xmm3}"() nounwind
+ %a4 = call <2 x double> asm sideeffect "", "={xmm4}"() nounwind
+ %a5 = call <2 x double> asm sideeffect "", "={xmm5}"() nounwind
+ %a6 = call <2 x double> asm sideeffect "", "={xmm6}"() nounwind
+ %a7 = call <2 x double> asm sideeffect "", "={xmm7}"() nounwind
+ %a8 = call <2 x double> asm sideeffect "", "={xmm8}"() nounwind
+ %a9 = call <2 x double> asm sideeffect "", "={xmm9}"() nounwind
+ %a10 = call <2 x double> asm sideeffect "", "={xmm10}"() nounwind
+ %a11 = call <2 x double> asm sideeffect "", "={xmm11}"() nounwind
+ %a12 = call <2 x double> asm sideeffect "", "={xmm12}"() nounwind
+ %a13 = call <2 x double> asm sideeffect "", "={xmm13}"() nounwind
+ %a14 = call <2 x double> asm sideeffect "", "={xmm14}"() nounwind
+ %a15 = call <2 x double> asm sideeffect "", "={xmm15}"() nounwind
+ call anyregcc void @foo()
+ call void asm sideeffect "", "{xmm0},{xmm1},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3, <2 x double> %a4, <2 x double> %a5, <2 x double> %a6, <2 x double> %a7, <2 x double> %a8, <2 x double> %a9, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15)
+ ret void
+}
+
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/X86/asm-block-labels.ll b/test/CodeGen/X86/asm-block-labels.ll
index a43d43023196..6dbfb16a6d50 100644
--- a/test/CodeGen/X86/asm-block-labels.ll
+++ b/test/CodeGen/X86/asm-block-labels.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -std-compile-opts | llc
+; RUN: opt < %s -std-compile-opts | llc -no-integrated-as
; ModuleID = 'block12.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin8"
diff --git a/test/CodeGen/X86/asm-global-imm.ll b/test/CodeGen/X86/asm-global-imm.ll
index ebf585a39a28..9e79f6f78222 100644
--- a/test/CodeGen/X86/asm-global-imm.ll
+++ b/test/CodeGen/X86/asm-global-imm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -relocation-model=static | FileCheck %s
+; RUN: llc < %s -march=x86 -relocation-model=static -no-integrated-as | FileCheck %s
; PR882
target datalayout = "e-p:32:32"
diff --git a/test/CodeGen/X86/atom-bypass-slow-division-64.ll b/test/CodeGen/X86/atom-bypass-slow-division-64.ll
index d1b52a4ec3bb..5980b7907c9f 100644
--- a/test/CodeGen/X86/atom-bypass-slow-division-64.ll
+++ b/test/CodeGen/X86/atom-bypass-slow-division-64.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -mcpu=atom -mtriple=i686-linux -march=x86-64 | FileCheck %s
+; RUN: llc < %s -mcpu=atom -march=x86-64 | FileCheck %s
+
+target triple = "x86_64-unknown-linux-gnu"
; Additional tests for 64-bit divide bypass
diff --git a/test/CodeGen/X86/atom-cmpb.ll b/test/CodeGen/X86/atom-cmpb.ll
new file mode 100644
index 000000000000..034bf2f27d25
--- /dev/null
+++ b/test/CodeGen/X86/atom-cmpb.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -march=x86 -mcpu=atom | FileCheck %s
+; CHECK: movl
+; CHECK: movb
+; CHECK: movb
+; CHECK: cmpb
+; CHECK: notb
+; CHECK: notb
+
+; Test for checking of cancel conversion to cmp32 in Atom case
+; in function 'X86TargetLowering::EmitCmp'
+
+define i8 @run_test(i8* %rd_p) {
+entry:
+ %incdec.ptr = getelementptr inbounds i8* %rd_p, i64 1
+ %ld1 = load i8* %rd_p, align 1
+ %incdec.ptr1 = getelementptr inbounds i8* %rd_p, i64 2
+ %ld2 = load i8* %incdec.ptr, align 1
+ %x4 = xor i8 %ld1, -1
+ %x5 = xor i8 %ld2, -1
+ %cmp34 = icmp ult i8 %ld2, %ld1
+ br i1 %cmp34, label %if.then3, label %if.else
+
+if.then3:
+ %sub7 = sub i8 %x4, %x5
+ br label %if.end4
+
+if.else:
+ %sub8 = sub i8 %x5, %x4
+ br label %if.end4
+
+if.end4:
+ %res = phi i8 [ %sub7, %if.then3 ], [ %sub8, %if.else ]
+ ret i8 %res
+
+}
+
diff --git a/test/CodeGen/X86/atom-fixup-lea4.ll b/test/CodeGen/X86/atom-fixup-lea4.ll
new file mode 100644
index 000000000000..668574b968c8
--- /dev/null
+++ b/test/CodeGen/X86/atom-fixup-lea4.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -mcpu=atom -mtriple=x86_64-linux
+
+%struct.ValueWrapper = type { double }
+%struct.ValueWrapper.6 = type { %struct.ValueWrapper.7 }
+%struct.ValueWrapper.7 = type { %struct.ValueWrapper.8 }
+%struct.ValueWrapper.8 = type { %struct.ValueWrapper }
+
+; Function Attrs: uwtable
+define linkonce_odr void @_ZN12ValueWrapperIS_IS_IS_IdEEEEC2Ev(%struct.ValueWrapper.6* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %struct.ValueWrapper.6*, align 8
+ store %struct.ValueWrapper.6* %this, %struct.ValueWrapper.6** %this.addr, align 8
+ %this1 = load %struct.ValueWrapper.6** %this.addr
+ %value = getelementptr inbounds %struct.ValueWrapper.6* %this1, i32 0, i32 0
+ call void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7* %value)
+ ret void
+}
+
+; Function Attrs: uwtable
+declare void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7*) unnamed_addr #0 align 2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/test/CodeGen/X86/atomic-load-store-wide.ll b/test/CodeGen/X86/atomic-load-store-wide.ll
index 17e04f059034..7352d5a58006 100644
--- a/test/CodeGen/X86/atomic-load-store-wide.ll
+++ b/test/CodeGen/X86/atomic-load-store-wide.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=corei7 -march=x86 -verify-machineinstrs | FileCheck %s
; 64-bit load/store on x86-32
; FIXME: The generated code can be substantially improved.
diff --git a/test/CodeGen/X86/atomic-minmax-i6432.ll b/test/CodeGen/X86/atomic-minmax-i6432.ll
index 1cfbc49ab1c9..ffb7a3fd6f64 100644
--- a/test/CodeGen/X86/atomic-minmax-i6432.ll
+++ b/test/CodeGen/X86/atomic-minmax-i6432.ll
@@ -1,6 +1,5 @@
-; RUN: llc -march=x86 -mattr=+cmov -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
-; RUN: llc -march=x86 -mattr=-cmov -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=NOCMOV
-; RUN: llc -march=x86 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
+; RUN: llc -march=x86 -mattr=+cmov,cx16 -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
+; RUN: llc -march=x86 -mattr=cx16 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
@sc64 = external global i64
@@ -9,87 +8,39 @@ define void @atomic_maxmin_i6432() {
%1 = atomicrmw max i64* @sc64, i64 5 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setl
-; LINUX: cmpl
-; LINUX: setl
+; LINUX: seta
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setl
-; NOCMOV: cmpl
-; NOCMOV: setl
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%2 = atomicrmw min i64* @sc64, i64 6 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setg
-; LINUX: cmpl
-; LINUX: setg
+; LINUX: setb
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setg
-; NOCMOV: cmpl
-; NOCMOV: setg
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%3 = atomicrmw umax i64* @sc64, i64 7 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setb
-; LINUX: cmpl
-; LINUX: setb
+; LINUX: seta
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setb
-; NOCMOV: cmpl
-; NOCMOV: setb
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%4 = atomicrmw umin i64* @sc64, i64 8 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: seta
-; LINUX: cmpl
-; LINUX: seta
+; LINUX: setb
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: seta
-; NOCMOV: cmpl
-; NOCMOV: seta
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
ret void
}
@@ -98,8 +49,8 @@ define void @atomic_maxmin_i6432() {
define void @tf_bug(i8* %ptr) nounwind {
; PIC-LABEL: tf_bug:
-; PIC: movl _id-L1$pb(
-; PIC: movl (_id-L1$pb)+4(
+; PIC-DAG: movl _id-L1$pb(
+; PIC-DAG: movl (_id-L1$pb)+4(
%tmp1 = atomicrmw add i64* @id, i64 1 seq_cst
%tmp2 = add i64 %tmp1, 1
%tmp3 = bitcast i8* %ptr to i64*
diff --git a/test/CodeGen/X86/atomic-ops-ancient-64.ll b/test/CodeGen/X86/atomic-ops-ancient-64.ll
new file mode 100644
index 000000000000..508d83b0ffe1
--- /dev/null
+++ b/test/CodeGen/X86/atomic-ops-ancient-64.ll
@@ -0,0 +1,44 @@
+; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s
+; XFAIL: *
+
+define i64 @test_add(i64* %addr, i64 %inc) {
+; CHECK-LABEL: test_add:
+; CHECK: calll __sync_fetch_and_add_8
+ %old = atomicrmw add i64* %addr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_sub(i64* %addr, i64 %inc) {
+; CHECK-LABEL: test_sub:
+; CHECK: calll __sync_fetch_and_sub_8
+ %old = atomicrmw sub i64* %addr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_and(i64* %andr, i64 %inc) {
+; CHECK-LABEL: test_and:
+; CHECK: calll __sync_fetch_and_and_8
+ %old = atomicrmw and i64* %andr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_or(i64* %orr, i64 %inc) {
+; CHECK-LABEL: test_or:
+; CHECK: calll __sync_fetch_and_or_8
+ %old = atomicrmw or i64* %orr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_xor(i64* %xorr, i64 %inc) {
+; CHECK-LABEL: test_xor:
+; CHECK: calll __sync_fetch_and_xor_8
+ %old = atomicrmw xor i64* %xorr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_nand(i64* %nandr, i64 %inc) {
+; CHECK-LABEL: test_nand:
+; CHECK: calll __sync_fetch_and_nand_8
+ %old = atomicrmw nand i64* %nandr, i64 %inc seq_cst
+ ret i64 %old
+}
diff --git a/test/CodeGen/X86/atomic128.ll b/test/CodeGen/X86/atomic128.ll
new file mode 100644
index 000000000000..741d2904229d
--- /dev/null
+++ b/test/CodeGen/X86/atomic128.ll
@@ -0,0 +1,316 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9 -verify-machineinstrs -mattr=cx16 | FileCheck %s
+
+@var = global i128 0
+
+define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
+; CHECK-LABEL: val_compare_and_swap:
+; CHECK: movq %rsi, %rax
+; CHECK: movq %rcx, %rbx
+; CHECK: movq %r8, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+ %val = extractvalue { i128, i1 } %pair, 0
+ ret i128 %val
+}
+
+define void @fetch_and_nand(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_nand:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rdx, %rcx
+; CHECK: andq [[INCHI]], %rcx
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: andq %rsi, %rbx
+; CHECK: notq %rbx
+; CHECK: notq %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+ %val = atomicrmw nand i128* %p, i128 %bits release
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_or(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_or:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: orq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: orq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw or i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_add(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_add:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: addq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: adcq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw add i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_sub(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_sub:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: subq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: sbbq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw sub i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_min(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_min:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setbe [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setle [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw min i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_max(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_max:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setae [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setge [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw max i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umin(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umin:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setbe [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setbe [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw umin i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umax(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umax:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rax, %rsi
+; CHECK: setb [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: seta [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw umax i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define i128 @atomic_load_seq_cst(i128* %p) {
+; CHECK-LABEL: atomic_load_seq_cst:
+; CHECK: xorl %eax, %eax
+; CHECK: xorl %edx, %edx
+; CHECK: xorl %ebx, %ebx
+; CHECK: xorl %ecx, %ecx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %r = load atomic i128* %p seq_cst, align 16
+ ret i128 %r
+}
+
+define i128 @atomic_load_relaxed(i128* %p) {
+; CHECK: atomic_load_relaxed:
+; CHECK: xorl %eax, %eax
+; CHECK: xorl %edx, %edx
+; CHECK: xorl %ebx, %ebx
+; CHECK: xorl %ecx, %ecx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %r = load atomic i128* %p monotonic, align 16
+ ret i128 %r
+}
+
+define void @atomic_store_seq_cst(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_seq_cst:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+; CHECK-NOT: callq ___sync_lock_test_and_set_16
+
+ store atomic i128 %in, i128* %p seq_cst, align 16
+ ret void
+}
+
+define void @atomic_store_release(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_release:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p release, align 16
+ ret void
+}
+
+define void @atomic_store_relaxed(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_relaxed:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p unordered, align 16
+ ret void
+}
diff --git a/test/CodeGen/X86/atomic16.ll b/test/CodeGen/X86/atomic16.ll
index ec2887e29f81..faaa4c49d39b 100644
--- a/test/CodeGen/X86/atomic16.ll
+++ b/test/CodeGen/X86/atomic16.ll
@@ -4,8 +4,8 @@
@sc16 = external global i16
define void @atomic_fetch_add16() nounwind {
-; X64: atomic_fetch_add16
-; X32: atomic_fetch_add16
+; X64-LABEL: atomic_fetch_add16
+; X32-LABEL: atomic_fetch_add16
entry:
; 32-bit
%t1 = atomicrmw add i16* @sc16, i16 1 acquire
@@ -34,8 +34,8 @@ entry:
}
define void @atomic_fetch_sub16() nounwind {
-; X64: atomic_fetch_sub16
-; X32: atomic_fetch_sub16
+; X64-LABEL: atomic_fetch_sub16
+; X32-LABEL: atomic_fetch_sub16
%t1 = atomicrmw sub i16* @sc16, i16 1 acquire
; X64: lock
; X64: decw
@@ -62,18 +62,18 @@ define void @atomic_fetch_sub16() nounwind {
}
define void @atomic_fetch_and16() nounwind {
-; X64: atomic_fetch_and16
-; X32: atomic_fetch_and16
+; X64-LABEL: atomic_fetch_and16
+; X32-LABEL: atomic_fetch_and16
%t1 = atomicrmw and i16* @sc16, i16 3 acquire
; X64: lock
; X64: andw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: andw $3
%t2 = atomicrmw and i16* @sc16, i16 5 acquire
-; X64: andw
+; X64: andl
; X64: lock
; X64: cmpxchgw
-; X32: andw
+; X32: andl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw and i16* @sc16, i16 %t2 acquire
@@ -87,18 +87,18 @@ define void @atomic_fetch_and16() nounwind {
}
define void @atomic_fetch_or16() nounwind {
-; X64: atomic_fetch_or16
-; X32: atomic_fetch_or16
+; X64-LABEL: atomic_fetch_or16
+; X32-LABEL: atomic_fetch_or16
%t1 = atomicrmw or i16* @sc16, i16 3 acquire
; X64: lock
; X64: orw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: orw $3
%t2 = atomicrmw or i16* @sc16, i16 5 acquire
-; X64: orw
+; X64: orl
; X64: lock
; X64: cmpxchgw
-; X32: orw
+; X32: orl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw or i16* @sc16, i16 %t2 acquire
@@ -112,18 +112,18 @@ define void @atomic_fetch_or16() nounwind {
}
define void @atomic_fetch_xor16() nounwind {
-; X64: atomic_fetch_xor16
-; X32: atomic_fetch_xor16
+; X64-LABEL: atomic_fetch_xor16
+; X32-LABEL: atomic_fetch_xor16
%t1 = atomicrmw xor i16* @sc16, i16 3 acquire
; X64: lock
; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: xorw $3
%t2 = atomicrmw xor i16* @sc16, i16 5 acquire
-; X64: xorw
+; X64: xorl
; X64: lock
; X64: cmpxchgw
-; X32: xorw
+; X32: xorl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire
@@ -137,15 +137,15 @@ define void @atomic_fetch_xor16() nounwind {
}
define void @atomic_fetch_nand16(i16 %x) nounwind {
-; X64: atomic_fetch_nand16
-; X32: atomic_fetch_nand16
+; X64-LABEL: atomic_fetch_nand16
+; X32-LABEL: atomic_fetch_nand16
%t1 = atomicrmw nand i16* @sc16, i16 %x acquire
-; X64: andw
-; X64: notw
+; X64: andl
+; X64: notl
; X64: lock
; X64: cmpxchgw
-; X32: andw
-; X32: notw
+; X32: andl
+; X32: notl
; X32: lock
; X32: cmpxchgw
ret void
@@ -155,12 +155,16 @@ define void @atomic_fetch_nand16(i16 %x) nounwind {
define void @atomic_fetch_max16(i16 %x) nounwind {
%t1 = atomicrmw max i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movswl
+; X64: movswl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movswl
+; X32: movswl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -171,12 +175,16 @@ define void @atomic_fetch_max16(i16 %x) nounwind {
define void @atomic_fetch_min16(i16 %x) nounwind {
%t1 = atomicrmw min i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movswl
+; X64: movswl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movswl
+; X32: movswl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -187,12 +195,16 @@ define void @atomic_fetch_min16(i16 %x) nounwind {
define void @atomic_fetch_umax16(i16 %x) nounwind {
%t1 = atomicrmw umax i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movzwl
+; X64: movzwl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movzwl
+; X32: movzwl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -203,11 +215,16 @@ define void @atomic_fetch_umax16(i16 %x) nounwind {
define void @atomic_fetch_umin16(i16 %x) nounwind {
%t1 = atomicrmw umin i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movzwl
+; X64: movzwl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+
+; X32: movzwl
+; X32: movzwl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -217,7 +234,7 @@ define void @atomic_fetch_umin16(i16 %x) nounwind {
}
define void @atomic_fetch_cmpxchg16() nounwind {
- %t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire
+ %t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire acquire
; X64: lock
; X64: cmpxchgw
; X32: lock
diff --git a/test/CodeGen/X86/atomic32.ll b/test/CodeGen/X86/atomic32.ll
index 3cb9ca1c76c7..4f2cbe0ce2d3 100644
--- a/test/CodeGen/X86/atomic32.ll
+++ b/test/CodeGen/X86/atomic32.ll
@@ -5,8 +5,8 @@
@sc32 = external global i32
define void @atomic_fetch_add32() nounwind {
-; X64: atomic_fetch_add32
-; X32: atomic_fetch_add32
+; X64-LABEL: atomic_fetch_add32:
+; X32-LABEL: atomic_fetch_add32:
entry:
; 32-bit
%t1 = atomicrmw add i32* @sc32, i32 1 acquire
@@ -35,8 +35,8 @@ entry:
}
define void @atomic_fetch_sub32() nounwind {
-; X64: atomic_fetch_sub32
-; X32: atomic_fetch_sub32
+; X64-LABEL: atomic_fetch_sub32:
+; X32-LABEL: atomic_fetch_sub32:
%t1 = atomicrmw sub i32* @sc32, i32 1 acquire
; X64: lock
; X64: decl
@@ -63,8 +63,8 @@ define void @atomic_fetch_sub32() nounwind {
}
define void @atomic_fetch_and32() nounwind {
-; X64: atomic_fetch_and32
-; X32: atomic_fetch_and32
+; X64-LABEL: atomic_fetch_and32:
+; X32-LABEL: atomic_fetch_and32:
%t1 = atomicrmw and i32* @sc32, i32 3 acquire
; X64: lock
; X64: andl $3
@@ -88,8 +88,8 @@ define void @atomic_fetch_and32() nounwind {
}
define void @atomic_fetch_or32() nounwind {
-; X64: atomic_fetch_or32
-; X32: atomic_fetch_or32
+; X64-LABEL: atomic_fetch_or32:
+; X32-LABEL: atomic_fetch_or32:
%t1 = atomicrmw or i32* @sc32, i32 3 acquire
; X64: lock
; X64: orl $3
@@ -113,8 +113,8 @@ define void @atomic_fetch_or32() nounwind {
}
define void @atomic_fetch_xor32() nounwind {
-; X64: atomic_fetch_xor32
-; X32: atomic_fetch_xor32
+; X64-LABEL: atomic_fetch_xor32:
+; X32-LABEL: atomic_fetch_xor32:
%t1 = atomicrmw xor i32* @sc32, i32 3 acquire
; X64: lock
; X64: xorl $3
@@ -138,8 +138,8 @@ define void @atomic_fetch_xor32() nounwind {
}
define void @atomic_fetch_nand32(i32 %x) nounwind {
-; X64: atomic_fetch_nand32
-; X32: atomic_fetch_nand32
+; X64-LABEL: atomic_fetch_nand32:
+; X32-LABEL: atomic_fetch_nand32:
%t1 = atomicrmw nand i32* @sc32, i32 %x acquire
; X64: andl
; X64: notl
@@ -155,19 +155,22 @@ define void @atomic_fetch_nand32(i32 %x) nounwind {
}
define void @atomic_fetch_max32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_max32:
+; X32-LABEL: atomic_fetch_max32:
+
%t1 = atomicrmw max i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jl
+; NOCMOV: subl
+; NOCMOV: jge
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -177,19 +180,23 @@ define void @atomic_fetch_max32(i32 %x) nounwind {
}
define void @atomic_fetch_min32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_min32:
+; X32-LABEL: atomic_fetch_min32:
+; NOCMOV-LABEL: atomic_fetch_min32:
+
%t1 = atomicrmw min i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jg
+; NOCMOV: subl
+; NOCMOV: jle
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -199,19 +206,23 @@ define void @atomic_fetch_min32(i32 %x) nounwind {
}
define void @atomic_fetch_umax32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax32:
+; X32-LABEL: atomic_fetch_umax32:
+; NOCMOV-LABEL: atomic_fetch_umax32:
+
%t1 = atomicrmw umax i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jb
+; NOCMOV: subl
+; NOCMOV: ja
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -221,19 +232,23 @@ define void @atomic_fetch_umax32(i32 %x) nounwind {
}
define void @atomic_fetch_umin32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin32:
+; X32-LABEL: atomic_fetch_umin32:
+; NOCMOV-LABEL: atomic_fetch_umin32:
+
%t1 = atomicrmw umin i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: ja
+; NOCMOV: subl
+; NOCMOV: jb
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -243,7 +258,10 @@ define void @atomic_fetch_umin32(i32 %x) nounwind {
}
define void @atomic_fetch_cmpxchg32() nounwind {
- %t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire
+; X64-LABEL: atomic_fetch_cmpxchg32:
+; X32-LABEL: atomic_fetch_cmpxchg32:
+
+ %t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire acquire
; X64: lock
; X64: cmpxchgl
; X32: lock
@@ -254,6 +272,9 @@ define void @atomic_fetch_cmpxchg32() nounwind {
}
define void @atomic_fetch_store32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_store32:
+; X32-LABEL: atomic_fetch_store32:
+
store atomic i32 %x, i32* @sc32 release, align 4
; X64-NOT: lock
; X64: movl
@@ -265,6 +286,9 @@ define void @atomic_fetch_store32(i32 %x) nounwind {
}
define void @atomic_fetch_swap32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap32:
+; X32-LABEL: atomic_fetch_swap32:
+
%t1 = atomicrmw xchg i32* @sc32, i32 %x acquire
; X64-NOT: lock
; X64: xchgl
diff --git a/test/CodeGen/X86/atomic64.ll b/test/CodeGen/X86/atomic64.ll
index aa000455753f..11b4e6864da6 100644
--- a/test/CodeGen/X86/atomic64.ll
+++ b/test/CodeGen/X86/atomic64.ll
@@ -3,7 +3,8 @@
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
-; X64: atomic_fetch_add64
+; X64-LABEL: atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X64: lock
@@ -22,7 +23,8 @@ entry:
}
define void @atomic_fetch_sub64() nounwind {
-; X64: atomic_fetch_sub64
+; X64-LABEL: atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
; X64: lock
; X64: decq
@@ -40,7 +42,8 @@ define void @atomic_fetch_sub64() nounwind {
}
define void @atomic_fetch_and64() nounwind {
-; X64: atomic_fetch_and64
+; X64-LABEL: atomic_fetch_and64:
+; X32-LABEL: atomic_fetch_and64:
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
; X64: lock
; X64: andq $3
@@ -56,7 +59,8 @@ define void @atomic_fetch_and64() nounwind {
}
define void @atomic_fetch_or64() nounwind {
-; X64: atomic_fetch_or64
+; X64-LABEL: atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
; X64: lock
; X64: orq $3
@@ -72,7 +76,8 @@ define void @atomic_fetch_or64() nounwind {
}
define void @atomic_fetch_xor64() nounwind {
-; X64: atomic_fetch_xor64
+; X64-LABEL: atomic_fetch_xor64:
+; X32-LABEL: atomic_fetch_xor64:
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X64: lock
; X64: xorq $3
@@ -88,8 +93,8 @@ define void @atomic_fetch_xor64() nounwind {
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X64: atomic_fetch_nand64
-; X32: atomic_fetch_nand64
+; X64-LABEL: atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X64: andq
; X64: notq
@@ -107,8 +112,10 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
}
define void @atomic_fetch_max64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_max64:
+; X32-LABEL: atomic_fetch_max64:
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -126,8 +133,10 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
}
define void @atomic_fetch_min64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -145,8 +154,10 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax64:
+; X32-LABEL: atomic_fetch_umax64:
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -164,8 +175,10 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -183,7 +196,9 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
- %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
+; X64-LABEL: atomic_fetch_cmpxchg64:
+; X32-LABEL: atomic_fetch_cmpxchg64:
+ %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X64: lock
; X64: cmpxchgq
; X32: lock
@@ -194,6 +209,8 @@ define void @atomic_fetch_cmpxchg64() nounwind {
}
define void @atomic_fetch_store64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
store atomic i64 %x, i64* @sc64 release, align 8
; X64-NOT: lock
; X64: movq
@@ -205,6 +222,8 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X64-NOT: lock
; X64: xchgq
diff --git a/test/CodeGen/X86/atomic6432.ll b/test/CodeGen/X86/atomic6432.ll
index 31e66c876e3d..1c4b0f43bf76 100644
--- a/test/CodeGen/X86/atomic6432.ll
+++ b/test/CodeGen/X86/atomic6432.ll
@@ -3,7 +3,8 @@
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
-; X32: atomic_fetch_add64
+; X64-LABEL: atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X32: addl
@@ -30,20 +31,21 @@ entry:
}
define void @atomic_fetch_sub64() nounwind {
-; X32: atomic_fetch_sub64
+; X64-LABEL: atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-1
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t2 = atomicrmw sub i64* @sc64, i64 3 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-3
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw sub i64* @sc64, i64 5 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-5
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire
@@ -56,15 +58,16 @@ define void @atomic_fetch_sub64() nounwind {
}
define void @atomic_fetch_and64() nounwind {
-; X32: atomic_fetch_and64
+; X64-LABEL: atomic_fetch_and:64
+; X32-LABEL: atomic_fetch_and64:
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
-; X32: andl
-; X32: andl
+; X32: andl $3
+; X32-NOT: andl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw and i64* @sc64, i64 5 acquire
-; X32: andl
-; X32: andl
+ %t2 = atomicrmw and i64* @sc64, i64 4294967297 acquire
+; X32: andl $1
+; X32: andl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw and i64* @sc64, i64 %t2 acquire
@@ -77,15 +80,16 @@ define void @atomic_fetch_and64() nounwind {
}
define void @atomic_fetch_or64() nounwind {
-; X32: atomic_fetch_or64
+; X64-LABEL: atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
-; X32: orl
-; X32: orl
+; X32: orl $3
+; X32-NOT: orl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw or i64* @sc64, i64 5 acquire
-; X32: orl
-; X32: orl
+ %t2 = atomicrmw or i64* @sc64, i64 4294967297 acquire
+; X32: orl $1
+; X32: orl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw or i64* @sc64, i64 %t2 acquire
@@ -98,15 +102,16 @@ define void @atomic_fetch_or64() nounwind {
}
define void @atomic_fetch_xor64() nounwind {
-; X32: atomic_fetch_xor64
+; X64-LABEL: atomic_fetch_xor:64
+; X32-LABEL: atomic_fetch_xor64:
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X32: xorl
-; X32: xorl
+; X32-NOT: xorl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw xor i64* @sc64, i64 5 acquire
-; X32: xorl
-; X32: xorl
+ %t2 = atomicrmw xor i64* @sc64, i64 4294967297 acquire
+; X32: xorl $1
+; X32: xorl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire
@@ -119,7 +124,8 @@ define void @atomic_fetch_xor64() nounwind {
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X32: atomic_fetch_nand64
+; X64-LABEL: atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X32: andl
; X32: andl
@@ -132,10 +138,11 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
}
define void @atomic_fetch_max64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_max:64
+; X32-LABEL: atomic_fetch_max64:
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -145,10 +152,11 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
}
define void @atomic_fetch_min64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -158,10 +166,11 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax:64
+; X32-LABEL: atomic_fetch_umax64:
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -171,10 +180,11 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -184,7 +194,9 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
- %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
+; X64-LABEL: atomic_fetch_cmpxchg:64
+; X32-LABEL: atomic_fetch_cmpxchg64:
+ %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X32: lock
; X32: cmpxchg8b
ret void
@@ -192,6 +204,8 @@ define void @atomic_fetch_cmpxchg64() nounwind {
}
define void @atomic_fetch_store64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
store atomic i64 %x, i64* @sc64 release, align 8
; X32: lock
; X32: cmpxchg8b
@@ -200,6 +214,8 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X32: lock
; X32: xchg8b
diff --git a/test/CodeGen/X86/atomic8.ll b/test/CodeGen/X86/atomic8.ll
index 3278ed1f504e..5eef9b295e80 100644
--- a/test/CodeGen/X86/atomic8.ll
+++ b/test/CodeGen/X86/atomic8.ll
@@ -4,8 +4,8 @@
@sc8 = external global i8
define void @atomic_fetch_add8() nounwind {
-; X64: atomic_fetch_add8
-; X32: atomic_fetch_add8
+; X64-LABEL: atomic_fetch_add8:
+; X32-LABEL: atomic_fetch_add8:
entry:
; 32-bit
%t1 = atomicrmw add i8* @sc8, i8 1 acquire
@@ -34,8 +34,8 @@ entry:
}
define void @atomic_fetch_sub8() nounwind {
-; X64: atomic_fetch_sub8
-; X32: atomic_fetch_sub8
+; X64-LABEL: atomic_fetch_sub8:
+; X32-LABEL: atomic_fetch_sub8:
%t1 = atomicrmw sub i8* @sc8, i8 1 acquire
; X64: lock
; X64: decb
@@ -62,8 +62,8 @@ define void @atomic_fetch_sub8() nounwind {
}
define void @atomic_fetch_and8() nounwind {
-; X64: atomic_fetch_and8
-; X32: atomic_fetch_and8
+; X64-LABEL: atomic_fetch_and8:
+; X32-LABEL: atomic_fetch_and8:
%t1 = atomicrmw and i8* @sc8, i8 3 acquire
; X64: lock
; X64: andb $3
@@ -87,8 +87,8 @@ define void @atomic_fetch_and8() nounwind {
}
define void @atomic_fetch_or8() nounwind {
-; X64: atomic_fetch_or8
-; X32: atomic_fetch_or8
+; X64-LABEL: atomic_fetch_or8:
+; X32-LABEL: atomic_fetch_or8:
%t1 = atomicrmw or i8* @sc8, i8 3 acquire
; X64: lock
; X64: orb $3
@@ -112,8 +112,8 @@ define void @atomic_fetch_or8() nounwind {
}
define void @atomic_fetch_xor8() nounwind {
-; X64: atomic_fetch_xor8
-; X32: atomic_fetch_xor8
+; X64-LABEL: atomic_fetch_xor8:
+; X32-LABEL: atomic_fetch_xor8:
%t1 = atomicrmw xor i8* @sc8, i8 3 acquire
; X64: lock
; X64: xorb $3
@@ -137,8 +137,8 @@ define void @atomic_fetch_xor8() nounwind {
}
define void @atomic_fetch_nand8(i8 %x) nounwind {
-; X64: atomic_fetch_nand8
-; X32: atomic_fetch_nand8
+; X64-LABEL: atomic_fetch_nand8:
+; X32-LABEL: atomic_fetch_nand8:
%t1 = atomicrmw nand i8* @sc8, i8 %x acquire
; X64: andb
; X64: notb
@@ -154,14 +154,18 @@ define void @atomic_fetch_nand8(i8 %x) nounwind {
}
define void @atomic_fetch_max8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_max8:
+; X32-LABEL: atomic_fetch_max8:
%t1 = atomicrmw max i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movsbl
+; X64: movsbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movsbl
+; X32: movsbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -170,14 +174,18 @@ define void @atomic_fetch_max8(i8 %x) nounwind {
}
define void @atomic_fetch_min8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_min8:
+; X32-LABEL: atomic_fetch_min8:
%t1 = atomicrmw min i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movsbl
+; X64: movsbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movsbl
+; X32: movsbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -186,14 +194,18 @@ define void @atomic_fetch_min8(i8 %x) nounwind {
}
define void @atomic_fetch_umax8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax8:
+; X32-LABEL: atomic_fetch_umax8:
%t1 = atomicrmw umax i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movzbl
+; X64: movzbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movzbl
+; X32: movzbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -202,13 +214,18 @@ define void @atomic_fetch_umax8(i8 %x) nounwind {
}
define void @atomic_fetch_umin8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin8:
+; X32-LABEL: atomic_fetch_umin8:
%t1 = atomicrmw umin i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movzbl
+; X64: movzbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+
+; X32: movzbl
+; X32: movzbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -217,7 +234,9 @@ define void @atomic_fetch_umin8(i8 %x) nounwind {
}
define void @atomic_fetch_cmpxchg8() nounwind {
- %t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire
+; X64-LABEL: atomic_fetch_cmpxchg8:
+; X32-LABEL: atomic_fetch_cmpxchg8:
+ %t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire acquire
; X64: lock
; X64: cmpxchgb
; X32: lock
@@ -228,6 +247,8 @@ define void @atomic_fetch_cmpxchg8() nounwind {
}
define void @atomic_fetch_store8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_store8:
+; X32-LABEL: atomic_fetch_store8:
store atomic i8 %x, i8* @sc8 release, align 4
; X64-NOT: lock
; X64: movb
@@ -239,6 +260,8 @@ define void @atomic_fetch_store8(i8 %x) nounwind {
}
define void @atomic_fetch_swap8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap8:
+; X32-LABEL: atomic_fetch_swap8:
%t1 = atomicrmw xchg i8* @sc8, i8 %x acquire
; X64-NOT: lock
; X64: xchgb
diff --git a/test/CodeGen/X86/atomic_op.ll b/test/CodeGen/X86/atomic_op.ll
index a378d6e8d684..d0ab28aa61f9 100644
--- a/test/CodeGen/X86/atomic_op.ll
+++ b/test/CodeGen/X86/atomic_op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov,cx16 -verify-machineinstrs | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -101,26 +101,28 @@ entry:
%neg1 = sub i32 0, 10 ; <i32> [#uses=1]
; CHECK: lock
; CHECK: cmpxchgl
- %16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic
+ %pair16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic monotonic
+ %16 = extractvalue { i32, i1 } %pair16, 0
store i32 %16, i32* %old
; CHECK: lock
; CHECK: cmpxchgl
- %17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic
+ %pair17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic monotonic
+ %17 = extractvalue { i32, i1 } %pair17, 0
store i32 %17, i32* %old
; CHECK: movl [[R17atomic:.*]], %eax
- ; CHECK: movl $1401, %[[R17mask:[a-z]*]]
- ; CHECK: andl %eax, %[[R17mask]]
- ; CHECK: notl %[[R17mask]]
+ ; CHECK: movl %eax, %[[R17mask:[a-z]*]]
+ ; CHECK: notl %[[R17mask]]
+ ; CHECK: orl $-1402, %[[R17mask]]
; CHECK: lock
; CHECK: cmpxchgl %[[R17mask]], [[R17atomic]]
; CHECK: jne
; CHECK: movl %eax,
%18 = atomicrmw nand i32* %val2, i32 1401 monotonic
store i32 %18, i32* %old
- ; CHECK: andl
- ; CHECK: andl
; CHECK: notl
; CHECK: notl
+ ; CHECK: orl $252645135
+ ; CHECK: orl $252645135
; CHECK: lock
; CHECK: cmpxchg8b
%19 = atomicrmw nand i64* %temp64, i64 17361641481138401520 monotonic
@@ -133,6 +135,7 @@ entry:
; CHECK: lock
; CHECK: cmpxchgl %{{.*}}, %gs:(%{{.*}})
- %0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic
+ %pair0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic monotonic
+ %0 = extractvalue { i32, i1 } %pair0, 0
ret void
}
diff --git a/test/CodeGen/X86/avoid_complex_am.ll b/test/CodeGen/X86/avoid_complex_am.ll
new file mode 100644
index 000000000000..7f095190ab8f
--- /dev/null
+++ b/test/CodeGen/X86/avoid_complex_am.ll
@@ -0,0 +1,40 @@
+; RUN: opt -S -loop-reduce < %s | FileCheck %s
+; Complex addressing mode are costly.
+; Make loop-reduce prefer unscaled accesses.
+; On X86, reg1 + 1*reg2 has the same cost as reg1 + 8*reg2.
+; Therefore, LSR currently prefers to fold as much computation as possible
+; in the addressing mode.
+; <rdar://problem/16730541>
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+define void @mulDouble(double* nocapture %a, double* nocapture %b, double* nocapture %c) {
+; CHECK: @mulDouble
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+; CHECK: [[IV:%[^ ]+]] = phi i64 [ [[IVNEXT:%[^,]+]], %for.body ], [ 0, %entry ]
+; Only one induction variable should have been generated.
+; CHECK-NOT: phi
+ %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = add nsw i64 %indvars.iv, -1
+ %arrayidx = getelementptr inbounds double* %b, i64 %tmp
+ %tmp1 = load double* %arrayidx, align 8
+; The induction variable should carry the scaling factor: 1.
+; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 1
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next
+ %tmp2 = load double* %arrayidx2, align 8
+ %mul = fmul double %tmp1, %tmp2
+ %arrayidx4 = getelementptr inbounds double* %a, i64 %indvars.iv
+ store double %mul, double* %arrayidx4, align 8
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+; Comparison should be 19 * 1 = 19.
+; CHECK: icmp eq i32 {{%[^,]+}}, 19
+ %exitcond = icmp eq i32 %lftr.wideiv, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/CodeGen/X86/avx-blend.ll b/test/CodeGen/X86/avx-blend.ll
index a98e0761ce31..d2a22d709474 100644
--- a/test/CodeGen/X86/avx-blend.ll
+++ b/test/CodeGen/X86/avx-blend.ll
@@ -3,25 +3,34 @@
; AVX128 tests:
;CHECK-LABEL: vsel_float:
-;CHECK: vblendvps
+; select mask is <i1 true, i1 false, i1 true, i1 false>.
+; Big endian representation is 0101 = 5.
+; '1' means takes the first argument, '0' means takes the second argument.
+; This is the opposite of the intel syntax, thus we expect
+; the inverted mask: 1010 = 10.
+; According to the ABI:
+; v1 is in xmm0 => first argument is xmm0.
+; v2 is in xmm1 => second argument is xmm1.
+; result is in xmm0 => destination argument.
+;CHECK: vblendps $10, %xmm1, %xmm0, %xmm0
;CHECK: ret
define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
- %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x float> %v1, <4 x float> %v2
ret <4 x float> %vsel
}
;CHECK-LABEL: vsel_i32:
-;CHECK: vblendvps
+;CHECK: vblendps $10, %xmm1, %xmm0, %xmm0
;CHECK: ret
define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) {
- %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %v1, <4 x i32> %v2
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> %v1, <4 x i32> %v2
ret <4 x i32> %vsel
}
;CHECK-LABEL: vsel_double:
-;CHECK: vblendvpd
+;CHECK: vmovsd
;CHECK: ret
define <2 x double> @vsel_double(<2 x double> %v1, <2 x double> %v2) {
%vsel = select <2 x i1> <i1 true, i1 false>, <2 x double> %v1, <2 x double> %v2
@@ -30,7 +39,7 @@ define <2 x double> @vsel_double(<2 x double> %v1, <2 x double> %v2) {
;CHECK-LABEL: vsel_i64:
-;CHECK: vblendvpd
+;CHECK: vmovsd
;CHECK: ret
define <2 x i64> @vsel_i64(<2 x i64> %v1, <2 x i64> %v2) {
%vsel = select <2 x i1> <i1 true, i1 false>, <2 x i64> %v1, <2 x i64> %v2
@@ -51,7 +60,14 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
;CHECK-LABEL: vsel_float8:
-;CHECK: vblendvps
+;CHECK-NOT: vinsertf128
+; <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>
+; which translates into the boolean mask (big endian representation):
+; 00010001 = 17.
+; '1' means takes the first argument, '0' means takes the second argument.
+; This is the opposite of the intel syntax, thus we expect
+; the inverted mask: 11101110 = 238.
+;CHECK: vblendps $238, %ymm1, %ymm0, %ymm0
;CHECK: ret
define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x float> %v1, <8 x float> %v2
@@ -59,15 +75,24 @@ define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
}
;CHECK-LABEL: vsel_i328:
-;CHECK: vblendvps
-;CHECK: ret
+;CHECK-NOT: vinsertf128
+;CHECK: vblendps $238, %ymm1, %ymm0, %ymm0
+;CHECK-NEXT: ret
define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i32> %v1, <8 x i32> %v2
ret <8 x i32> %vsel
}
;CHECK-LABEL: vsel_double8:
-;CHECK: vblendvpd
+; select mask is 2x: 0001 => intel mask: ~0001 = 14
+; ABI:
+; v1 is in ymm0 and ymm1.
+; v2 is in ymm2 and ymm3.
+; result is in ymm0 and ymm1.
+; Compute the low part: res.low = blend v1.low, v2.low, blendmask
+;CHECK: vblendpd $14, %ymm2, %ymm0, %ymm0
+; Compute the high part.
+;CHECK: vblendpd $14, %ymm3, %ymm1, %ymm1
;CHECK: ret
define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x double> %v1, <8 x double> %v2
@@ -75,13 +100,23 @@ define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
}
;CHECK-LABEL: vsel_i648:
-;CHECK: vblendvpd
+;CHECK: vblendpd $14, %ymm2, %ymm0, %ymm0
+;CHECK: vblendpd $14, %ymm3, %ymm1, %ymm1
;CHECK: ret
define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i64> %v1, <8 x i64> %v2
ret <8 x i64> %vsel
}
+;CHECK-LABEL: vsel_double4:
+;CHECK-NOT: vinsertf128
+;CHECK: vblendpd $10
+;CHECK-NEXT: ret
+define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %v1, <4 x double> %v2
+ ret <4 x double> %vsel
+}
+
;; TEST blend + compares
; CHECK: testa
define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
@@ -101,4 +136,67 @@ define <2 x double> @testb(<2 x double> %x, <2 x double> %y) {
ret <2 x double> %min
}
+; If we can figure out a blend has a constant mask, we should emit the
+; blend instruction with an immediate mask
+define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) {
+; CHECK-LABEL: constant_blendvpd_avx:
+; CHECK-NOT: mov
+; CHECK: vblendpd
+; CHECK: ret
+ %1 = select <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> %xy, <4 x double> %ab
+ ret <4 x double> %1
+}
+define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) {
+; CHECK-LABEL: constant_blendvps_avx:
+; CHECK-NOT: mov
+; CHECK: vblendps
+; CHECK: ret
+ %1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true>, <8 x float> %xyzw, <8 x float> %abcd
+ ret <8 x float> %1
+}
+
+declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>)
+declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>)
+
+;; 4 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_shufflevector_4xfloat
+define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
+; Equivalent select mask is <i1 true, i1 false, i1 true, i1 false>.
+; Big endian representation is 0101 = 5.
+; '1' means takes the first argument, '0' means takes the second argument.
+; This is the opposite of the intel syntax, thus we expect
+; Inverted mask: 1010 = 10.
+; According to the ABI:
+; a is in xmm0 => first argument is xmm0.
+; b is in xmm1 => second argument is xmm1.
+; Result is in xmm0 => destination argument.
+; CHECK: vblendps $10, %xmm1, %xmm0, %xmm0
+; CHECK: ret
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_8xfloat
+define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) {
+; CHECK: vblendps $190, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 6, i32 15>
+ ret <8 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_4xdouble
+define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double> %b) {
+; CHECK: vblendpd $2, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ ret <4 x double> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_4xi64
+define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK: vblendpd $13, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ ret <4 x i64> %1
+}
diff --git a/test/CodeGen/X86/avx-cvt-2.ll b/test/CodeGen/X86/avx-cvt-2.ll
new file mode 100644
index 000000000000..8cc7190fcc69
--- /dev/null
+++ b/test/CodeGen/X86/avx-cvt-2.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s
+
+; Check that we generate vector conversion from float to narrower int types
+
+%f32vec_t = type <8 x float>
+%i16vec_t = type <8 x i16>
+%i8vec_t = type <8 x i8>
+
+define void @fptoui16(%f32vec_t %a, %i16vec_t *%p) {
+; CHECK-LABEL: fptoui16:
+; CHECK: vcvttps2dq %ymm
+; CHECK-NOT: vcvttss2si
+ %b = fptoui %f32vec_t %a to %i16vec_t
+ store %i16vec_t %b, %i16vec_t * %p
+ ret void
+}
+
+define void @fptosi16(%f32vec_t %a, %i16vec_t *%p) {
+; CHECK-LABEL: fptosi16:
+; CHECK: vcvttps2dq %ymm
+; CHECK-NOT: vcvttss2si
+ %b = fptosi %f32vec_t %a to %i16vec_t
+ store %i16vec_t %b, %i16vec_t * %p
+ ret void
+}
+
+define void @fptoui8(%f32vec_t %a, %i8vec_t *%p) {
+; CHECK-LABEL: fptoui8:
+; CHECK: vcvttps2dq %ymm
+; CHECK-NOT: vcvttss2si
+ %b = fptoui %f32vec_t %a to %i8vec_t
+ store %i8vec_t %b, %i8vec_t * %p
+ ret void
+}
+
+define void @fptosi8(%f32vec_t %a, %i8vec_t *%p) {
+; CHECK-LABEL: fptosi8:
+; CHECK: vcvttps2dq %ymm
+; CHECK-NOT: vcvttss2si
+ %b = fptosi %f32vec_t %a to %i8vec_t
+ store %i8vec_t %b, %i8vec_t * %p
+ ret void
+}
diff --git a/test/CodeGen/X86/avx-intel-ocl.ll b/test/CodeGen/X86/avx-intel-ocl.ll
index 7337815a39ac..3e051bff768d 100644
--- a/test/CodeGen/X86/avx-intel-ocl.ll
+++ b/test/CodeGen/X86/avx-intel-ocl.ll
@@ -7,21 +7,21 @@ declare <16 x float> @func_float16_ptr(<16 x float>, <16 x float> *)
declare <16 x float> @func_float16(<16 x float>, <16 x float>)
declare i32 @func_int(i32, i32)
-; WIN64: testf16_inp
+; WIN64-LABEL: testf16_inp
; WIN64: vaddps {{.*}}, {{%ymm[0-1]}}
; WIN64: vaddps {{.*}}, {{%ymm[0-1]}}
; WIN64: leaq {{.*}}(%rsp), %rcx
; WIN64: call
; WIN64: ret
-; X32: testf16_inp
+; X32-LABEL: testf16_inp
; X32: movl %eax, (%esp)
; X32: vaddps {{.*}}, {{%ymm[0-1]}}
; X32: vaddps {{.*}}, {{%ymm[0-1]}}
; X32: call
; X32: ret
-; X64: testf16_inp
+; X64-LABEL: testf16_inp
; X64: vaddps {{.*}}, {{%ymm[0-1]}}
; X64: vaddps {{.*}}, {{%ymm[0-1]}}
; X64: leaq {{.*}}(%rsp), %rdi
@@ -41,14 +41,14 @@ define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
;test calling conventions - preserved registers
; preserved ymm6-ymm15
-; WIN64: testf16_regs
+; WIN64-LABEL: testf16_regs
; WIN64: call
; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; WIN64: ret
; preserved ymm8-ymm15
-; X64: testf16_regs
+; X64-LABEL: testf16_regs
; X64: call
; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
@@ -65,28 +65,30 @@ define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
}
; test calling conventions - prolog and epilog
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
+; WIN64-LABEL: test_prolog_epilog
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
; WIN64: call
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+
+; X64-LABEL: test_prolog_epilog
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
@@ -111,12 +113,14 @@ define intel_ocl_bicc <16 x float> @test_prolog_epilog(<16 x float> %a, <16 x fl
; test functions with integer parameters
; pass parameters on stack for 32-bit platform
+; X32-LABEL: test_int
; X32: movl {{.*}}, 4(%esp)
; X32: movl {{.*}}, (%esp)
; X32: call
; X32: addl {{.*}}, %eax
; pass parameters in registers for 64-bit platform
+; X64-LABEL: test_int
; X64: leal {{.*}}, %edi
; X64: movl {{.*}}, %esi
; X64: call
@@ -128,21 +132,21 @@ define i32 @test_int(i32 %a, i32 %b) nounwind {
ret i32 %c
}
-; WIN64: test_float4
+; WIN64-LABEL: test_float4
; WIN64-NOT: vzeroupper
; WIN64: call
; WIN64-NOT: vzeroupper
; WIN64: call
; WIN64: ret
-; X64: test_float4
+; X64-LABEL: test_float4
; X64-NOT: vzeroupper
; X64: call
; X64-NOT: vzeroupper
; X64: call
; X64: ret
-; X32: test_float4
+; X32-LABEL: test_float4
; X32: vzeroupper
; X32: call
; X32: vzeroupper
diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll
index 0be83f648d1a..ce31161dbbcd 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -2219,14 +2219,6 @@ define void @test_x86_avx_storeu_ps_256(i8* %a0, <8 x float> %a1) {
declare void @llvm.x86.avx.storeu.ps.256(i8*, <8 x float>) nounwind
-define <4 x double> @test_x86_avx_vbroadcast_sd_256(i8* %a0) {
- ; CHECK: vbroadcastsd
- %res = call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %a0) ; <<4 x double>> [#uses=1]
- ret <4 x double> %res
-}
-declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*) nounwind readonly
-
-
define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
; CHECK: vbroadcastf128
%res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
@@ -2243,22 +2235,6 @@ define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
-define <4 x float> @test_x86_avx_vbroadcast_ss(i8* %a0) {
- ; CHECK: vbroadcastss
- %res = call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*) nounwind readonly
-
-
-define <8 x float> @test_x86_avx_vbroadcast_ss_256(i8* %a0) {
- ; CHECK: vbroadcastss
- %res = call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %a0) ; <<8 x float>> [#uses=1]
- ret <8 x float> %res
-}
-declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*) nounwind readonly
-
-
define <2 x double> @test_x86_avx_vextractf128_pd_256(<4 x double> %a0) {
; CHECK: vextractf128
%res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 7) ; <<2 x double>> [#uses=1]
diff --git a/test/CodeGen/X86/avx-shift.ll b/test/CodeGen/X86/avx-shift.ll
index d79dfcc076b0..a70d45a7991a 100644
--- a/test/CodeGen/X86/avx-shift.ll
+++ b/test/CodeGen/X86/avx-shift.ll
@@ -115,8 +115,8 @@ define <8 x i32> @vshift08(<8 x i32> %a) nounwind {
; PR15141
; CHECK: _vshift13:
; CHECK-NOT: vpsll
-; CHECK: vcvttps2dq
-; CHECK-NEXT: vpmulld
+; CHECK-NOT: vcvttps2dq
+; CHECK: vpmulld
define <4 x i32> @vshift13(<4 x i32> %in) {
%T = shl <4 x i32> %in, <i32 0, i32 1, i32 2, i32 4>
ret <4 x i32> %T
diff --git a/test/CodeGen/X86/avx-shuffle.ll b/test/CodeGen/X86/avx-shuffle.ll
index 0956361c7e30..4a996d79815c 100644
--- a/test/CodeGen/X86/avx-shuffle.ll
+++ b/test/CodeGen/X86/avx-shuffle.ll
@@ -5,8 +5,10 @@ define <4 x float> @test1(<4 x float> %a) nounwind {
%b = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 2, i32 5, i32 undef, i32 undef>
ret <4 x float> %b
; CHECK-LABEL: test1:
-; CHECK: vshufps
-; CHECK: vpshufd
+;; TODO: This test could be improved by removing the xor instruction and
+;; having vinsertps zero out the needed elements.
+; CHECK: vxorps
+; CHECK: vinsertps
}
; rdar://10538417
@@ -23,7 +25,7 @@ define <4 x i64> @test3(<4 x i64> %a, <4 x i64> %b) nounwind {
%c = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 5, i32 2, i32 undef>
ret <4 x i64> %c
; CHECK-LABEL: test3:
-; CHECK: vperm2f128
+; CHECK: vblendpd
; CHECK: ret
}
@@ -297,3 +299,38 @@ entry:
}
declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind readnone
+
+; this test case just should not fail
+define void @test20() {
+ %a0 = insertelement <3 x double> <double 0.000000e+00, double 0.000000e+00, double undef>, double 0.000000e+00, i32 2
+ store <3 x double> %a0, <3 x double>* undef, align 1
+ %a1 = insertelement <3 x double> <double 0.000000e+00, double 0.000000e+00, double undef>, double undef, i32 2
+ store <3 x double> %a1, <3 x double>* undef, align 1
+ ret void
+}
+
+define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
+; CHECK-LABEL: test_insert_64_zext
+; CHECK-NOT: xor
+; CHECK: vmovq
+ %1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %1
+}
+
+;; Ensure we don't use insertps from non v4x32 vectors.
+;; On SSE4.1 it works because bigger vectors use more than 1 register.
+;; On AVX they get passed in a single register.
+;; FIXME: We could probably optimize this case, if we're only using the
+;; first 4 indices.
+define <4 x i32> @insert_from_diff_size(<8 x i32> %x) {
+; CHECK-LABEL: insert_from_diff_size:
+; CHECK-NOT: insertps
+; CHECK: ret
+ %vecext = extractelement <8 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit1, i32 0, i32 2
+ %a.0 = extractelement <8 x i32> %x, i32 0
+ %vecinit3 = insertelement <4 x i32> %vecinit2, i32 %a.0, i32 3
+ ret <4 x i32> %vecinit3
+}
diff --git a/test/CodeGen/X86/avx-splat.ll b/test/CodeGen/X86/avx-splat.ll
index 5d0781531f4d..b1b2f8b97a73 100644
--- a/test/CodeGen/X86/avx-splat.ll
+++ b/test/CodeGen/X86/avx-splat.ll
@@ -43,13 +43,10 @@ entry:
ret <4 x double> %vecinit6.i
}
-; Test this simple opt:
+; Test this turns into a broadcast:
; shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
-; To:
-; shuffle (vload ptr)), undef, <1, 1, 1, 1>
-; CHECK: vmovdqa
-; CHECK-NEXT: vpshufd $-1
-; CHECK-NEXT: vinsertf128 $1
+;
+; CHECK: vbroadcastss
define <8 x float> @funcE() nounwind {
allocas:
%udx495 = alloca [18 x [18 x float]], align 32
diff --git a/test/CodeGen/X86/avx-trunc.ll b/test/CodeGen/X86/avx-trunc.ll
index 58d0a356909b..bf8d9a7f1a40 100755
--- a/test/CodeGen/X86/avx-trunc.ll
+++ b/test/CodeGen/X86/avx-trunc.ll
@@ -1,13 +1,15 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
define <4 x i32> @trunc_64_32(<4 x i64> %A) nounwind uwtable readnone ssp{
-; CHECK: trunc_64_32
-; CHECK: pshufd
+; CHECK-LABEL: trunc_64_32
+; CHECK: shufps
+; CHECK-NOT: pshufd
+; CHECK-NOT: movlhps
%B = trunc <4 x i64> %A to <4 x i32>
ret <4 x i32>%B
}
define <8 x i16> @trunc_32_16(<8 x i32> %A) nounwind uwtable readnone ssp{
-; CHECK: trunc_32_16
+; CHECK-LABEL: trunc_32_16
; CHECK: pshufb
%B = trunc <8 x i32> %A to <8 x i16>
ret <8 x i16>%B
diff --git a/test/CodeGen/X86/avx-vbroadcast.ll b/test/CodeGen/X86/avx-vbroadcast.ll
index 0d403d4bb124..2ebe6fda37a3 100644
--- a/test/CodeGen/X86/avx-vbroadcast.ll
+++ b/test/CodeGen/X86/avx-vbroadcast.ll
@@ -141,3 +141,66 @@ entry:
ret <4 x float> %t
}
+
+; These tests check that a vbroadcast instruction is used when we have a splat
+; formed from a concat_vectors (via the shufflevector) of two BUILD_VECTORs
+; (via the insertelements).
+
+; CHECK-LABEL: splat_concat1
+; CHECK-NOT: vinsertf128
+; CHECK: vbroadcastss (%
+; CHECK-NEXT: ret
+define <8 x float> @splat_concat1(float* %p) {
+ %1 = load float* %p, align 4
+ %2 = insertelement <4 x float> undef, float %1, i32 0
+ %3 = insertelement <4 x float> %2, float %1, i32 1
+ %4 = insertelement <4 x float> %3, float %1, i32 2
+ %5 = insertelement <4 x float> %4, float %1, i32 3
+ %6 = shufflevector <4 x float> %5, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x float> %6
+}
+
+; CHECK-LABEL: splat_concat2
+; CHECK-NOT: vinsertf128
+; CHECK: vbroadcastss (%
+; CHECK-NEXT: ret
+define <8 x float> @splat_concat2(float* %p) {
+ %1 = load float* %p, align 4
+ %2 = insertelement <4 x float> undef, float %1, i32 0
+ %3 = insertelement <4 x float> %2, float %1, i32 1
+ %4 = insertelement <4 x float> %3, float %1, i32 2
+ %5 = insertelement <4 x float> %4, float %1, i32 3
+ %6 = insertelement <4 x float> undef, float %1, i32 0
+ %7 = insertelement <4 x float> %6, float %1, i32 1
+ %8 = insertelement <4 x float> %7, float %1, i32 2
+ %9 = insertelement <4 x float> %8, float %1, i32 3
+ %10 = shufflevector <4 x float> %5, <4 x float> %9, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %10
+}
+
+; CHECK-LABEL: splat_concat3
+; CHECK-NOT: vinsertf128
+; CHECK: vbroadcastsd (%
+; CHECK-NEXT: ret
+define <4 x double> @splat_concat3(double* %p) {
+ %1 = load double* %p, align 8
+ %2 = insertelement <2 x double> undef, double %1, i32 0
+ %3 = insertelement <2 x double> %2, double %1, i32 1
+ %4 = shufflevector <2 x double> %3, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ ret <4 x double> %4
+}
+
+; CHECK-LABEL: splat_concat4
+; CHECK-NOT: vinsertf128
+; CHECK: vbroadcastsd (%
+; CHECK-NEXT: ret
+define <4 x double> @splat_concat4(double* %p) {
+ %1 = load double* %p, align 8
+ %2 = insertelement <2 x double> undef, double %1, i32 0
+ %3 = insertelement <2 x double> %2, double %1, i32 1
+ %4 = insertelement <2 x double> undef, double %1, i32 0
+ %5 = insertelement <2 x double> %2, double %1, i32 1
+ %6 = shufflevector <2 x double> %3, <2 x double> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %6
+}
+
diff --git a/test/CodeGen/X86/avx-vperm2f128.ll b/test/CodeGen/X86/avx-vperm2f128.ll
index caa21e5bacfe..c20775bacad2 100644
--- a/test/CodeGen/X86/avx-vperm2f128.ll
+++ b/test/CodeGen/X86/avx-vperm2f128.ll
@@ -9,7 +9,7 @@ entry:
}
; CHECK: _B
-; CHECK: vperm2f128 $48
+; CHECK: vblendps $240
define <8 x float> @B(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
diff --git a/test/CodeGen/X86/avx-vshufp.ll b/test/CodeGen/X86/avx-vshufp.ll
index 45883b717380..ad3dbc1ed893 100644
--- a/test/CodeGen/X86/avx-vshufp.ll
+++ b/test/CodeGen/X86/avx-vshufp.ll
@@ -32,14 +32,14 @@ entry:
ret <8 x i32> %shuffle
}
-; CHECK: vshufpd $10, %ymm
+; CHECK: vblendpd $10, %ymm
define <4 x double> @B(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x double> %shuffle
}
-; CHECK: vshufpd $10, (%{{.*}}), %ymm
+; CHECK: vblendpd $10, (%{{.*}}), %ymm
define <4 x double> @B2(<4 x double>* %a, <4 x double>* %b) nounwind uwtable readnone ssp {
entry:
%a2 = load <4 x double>* %a
@@ -48,14 +48,14 @@ entry:
ret <4 x double> %shuffle
}
-; CHECK: vshufpd $10, %ymm
+; CHECK: vblendpd $10, %ymm
define <4 x i64> @B3(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x i64> %shuffle
}
-; CHECK: vshufpd $10, (%{{.*}}), %ymm
+; CHECK: vblendpd $10, (%{{.*}}), %ymm
define <4 x i64> @B4(<4 x i64>* %a, <4 x i64>* %b) nounwind uwtable readnone ssp {
entry:
%a2 = load <4 x i64>* %a
@@ -71,7 +71,7 @@ entry:
ret <8 x float> %shuffle
}
-; CHECK: vshufpd $2, %ymm
+; CHECK: vblendpd $2, %ymm
define <4 x double> @D(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 undef>
diff --git a/test/CodeGen/X86/avx-vzeroupper.ll b/test/CodeGen/X86/avx-vzeroupper.ll
index bf4ab5be1512..a2163a254e14 100644
--- a/test/CodeGen/X86/avx-vzeroupper.ll
+++ b/test/CodeGen/X86/avx-vzeroupper.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+declare i32 @foo()
declare <4 x float> @do_sse(<4 x float>)
declare <8 x float> @do_avx(<8 x float>)
declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone
@@ -36,20 +37,38 @@ entry:
ret <8 x float> %c
}
+;; Check that vzeroupper is emitted for tail calls.
+
+; CHECK: _test02
+define <4 x float> @test02(<8 x float> %a, <8 x float> %b) nounwind uwtable ssp {
+entry:
+ %add.i = fadd <8 x float> %a, %b
+ %add.low = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %add.i, i8 0)
+ ; CHECK: vzeroupper
+ ; CHECK: jmp _do_sse
+ %call3 = tail call <4 x float> @do_sse(<4 x float> %add.low) nounwind
+ ret <4 x float> %call3
+}
+
;; Test the pass convergence and also that vzeroupper is only issued when necessary,
;; for this function it should be only once
-; CHECK: _test02
-define <4 x float> @test02(<4 x float> %a, <4 x float> %b) nounwind uwtable ssp {
+; CHECK: _test03
+define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind uwtable ssp {
entry:
%add.i = fadd <4 x float> %a, %b
- br label %for.body
+ br label %while.cond
-for.body: ; preds = %for.body, %entry
+while.cond:
+ %call = tail call i32 @foo()
+ %tobool = icmp eq i32 %call, 0
+ br i1 %tobool, label %for.body, label %while.cond
+
+for.body:
; CHECK: LBB
; CHECK-NOT: vzeroupper
- %i.018 = phi i32 [ 0, %entry ], [ %1, %for.body ]
- %c.017 = phi <4 x float> [ %add.i, %entry ], [ %call14, %for.body ]
+ %i.018 = phi i32 [ 0, %while.cond ], [ %1, %for.body ]
+ %c.017 = phi <4 x float> [ %add.i, %while.cond ], [ %call14, %for.body ]
; CHECK: callq _do_sse
%call5 = tail call <4 x float> @do_sse(<4 x float> %c.017) nounwind
; CHECK-NEXT: callq _do_sse
@@ -63,14 +82,14 @@ for.body: ; preds = %for.body, %entry
%exitcond = icmp eq i32 %1, 4
br i1 %exitcond, label %for.end, label %for.body
-for.end: ; preds = %for.body
+for.end:
ret <4 x float> %call14
}
;; Check that we also perform vzeroupper when we return from a function.
-; CHECK: _test03
-define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind uwtable ssp {
+; CHECK: _test04
+define <4 x float> @test04(<4 x float> %a, <4 x float> %b) nounwind uwtable ssp {
entry:
%shuf = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NOT: vzeroupper
diff --git a/test/CodeGen/X86/avx.ll b/test/CodeGen/X86/avx.ll
new file mode 100644
index 000000000000..6069c14f0d80
--- /dev/null
+++ b/test/CodeGen/X86/avx.ll
@@ -0,0 +1,136 @@
+; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=corei7-avx | FileCheck %s -check-prefix=X32 --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s -check-prefix=X64 --check-prefix=CHECK
+
+define <4 x i32> @blendvb_fallback_v4i32(<4 x i1> %mask, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @blendvb_fallback_v4i32
+; CHECK: vblendvps
+; CHECK: ret
+ %ret = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %y
+ ret <4 x i32> %ret
+}
+
+define <8 x i32> @blendvb_fallback_v8i32(<8 x i1> %mask, <8 x i32> %x, <8 x i32> %y) {
+; CHECK-LABEL: @blendvb_fallback_v8i32
+; CHECK: vblendvps
+; CHECK: ret
+ %ret = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
+ ret <8 x i32> %ret
+}
+
+define <8 x float> @blendvb_fallback_v8f32(<8 x i1> %mask, <8 x float> %x, <8 x float> %y) {
+; CHECK-LABEL: @blendvb_fallback_v8f32
+; CHECK: vblendvps
+; CHECK: ret
+ %ret = select <8 x i1> %mask, <8 x float> %x, <8 x float> %y
+ ret <8 x float> %ret
+}
+
+declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone
+
+define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
+; CHECK-LABEL: insertps_from_vector_load:
+; On X32, account for the argument's move to registers
+; X32: movl 4(%esp), %eax
+; CHECK-NOT: mov
+; CHECK: insertps $48
+; CHECK-NEXT: ret
+ %1 = load <4 x float>* %pb, align 16
+ %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
+ ret <4 x float> %2
+}
+
+;; Use a non-zero CountS for insertps
+define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
+; CHECK-LABEL: insertps_from_vector_load_offset:
+; On X32, account for the argument's move to registers
+; X32: movl 4(%esp), %eax
+; CHECK-NOT: mov
+;; Try to match a bit more of the instr, since we need the load's offset.
+; CHECK: insertps $96, 4(%{{...}}), %
+; CHECK-NEXT: ret
+ %1 = load <4 x float>* %pb, align 16
+ %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
+ ret <4 x float> %2
+}
+
+define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
+; CHECK-LABEL: insertps_from_vector_load_offset_2:
+; On X32, account for the argument's move to registers
+; X32: movl 4(%esp), %eax
+; X32: movl 8(%esp), %ecx
+; CHECK-NOT: mov
+;; Try to match a bit more of the instr, since we need the load's offset.
+; CHECK: vinsertps $192, 12(%{{...}},%{{...}}), %
+; CHECK-NEXT: ret
+ %1 = getelementptr inbounds <4 x float>* %pb, i64 %index
+ %2 = load <4 x float>* %1, align 16
+ %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
+ ret <4 x float> %3
+}
+
+define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
+; CHECK-LABEL: insertps_from_broadcast_loadf32:
+; On X32, account for the arguments' move to registers
+; X32: movl 8(%esp), %eax
+; X32: movl 4(%esp), %ecx
+; CHECK-NOT: mov
+; CHECK: insertps $48
+; CHECK-NEXT: ret
+ %1 = getelementptr inbounds float* %fb, i64 %index
+ %2 = load float* %1, align 4
+ %3 = insertelement <4 x float> undef, float %2, i32 0
+ %4 = insertelement <4 x float> %3, float %2, i32 1
+ %5 = insertelement <4 x float> %4, float %2, i32 2
+ %6 = insertelement <4 x float> %5, float %2, i32 3
+ %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
+ ret <4 x float> %7
+}
+
+define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
+; CHECK-LABEL: insertps_from_broadcast_loadv4f32:
+; On X32, account for the arguments' move to registers
+; X32: movl 4(%esp), %{{...}}
+; CHECK-NOT: mov
+; CHECK: insertps $48
+; CHECK-NEXT: ret
+ %1 = load <4 x float>* %b, align 4
+ %2 = extractelement <4 x float> %1, i32 0
+ %3 = insertelement <4 x float> undef, float %2, i32 0
+ %4 = insertelement <4 x float> %3, float %2, i32 1
+ %5 = insertelement <4 x float> %4, float %2, i32 2
+ %6 = insertelement <4 x float> %5, float %2, i32 3
+ %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
+ ret <4 x float> %7
+}
+
+;; FIXME: We're emitting an extraneous pshufd/vbroadcast.
+define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
+; CHECK-LABEL: insertps_from_broadcast_multiple_use:
+; On X32, account for the arguments' move to registers
+; X32: movl 8(%esp), %eax
+; X32: movl 4(%esp), %ecx
+; CHECK: vbroadcastss
+; CHECK-NOT: mov
+; CHECK: insertps $48
+; CHECK: insertps $48
+; CHECK: insertps $48
+; CHECK: insertps $48
+; CHECK: vaddps
+; CHECK: vaddps
+; CHECK: vaddps
+; CHECK-NEXT: ret
+ %1 = getelementptr inbounds float* %fb, i64 %index
+ %2 = load float* %1, align 4
+ %3 = insertelement <4 x float> undef, float %2, i32 0
+ %4 = insertelement <4 x float> %3, float %2, i32 1
+ %5 = insertelement <4 x float> %4, float %2, i32 2
+ %6 = insertelement <4 x float> %5, float %2, i32 3
+ %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
+ %8 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %b, <4 x float> %6, i32 48)
+ %9 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %c, <4 x float> %6, i32 48)
+ %10 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %d, <4 x float> %6, i32 48)
+ %11 = fadd <4 x float> %7, %8
+ %12 = fadd <4 x float> %9, %10
+ %13 = fadd <4 x float> %11, %12
+ ret <4 x float> %13
+}
diff --git a/test/CodeGen/X86/avx1-logical-load-folding.ll b/test/CodeGen/X86/avx1-logical-load-folding.ll
new file mode 100644
index 000000000000..32301b1bf9e6
--- /dev/null
+++ b/test/CodeGen/X86/avx1-logical-load-folding.ll
@@ -0,0 +1,60 @@
+; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Function Attrs: nounwind ssp uwtable
+define void @test1(float* %A, float* %C) #0 {
+ %tmp1 = bitcast float* %A to <8 x float>*
+ %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
+ %tmp4 = and <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
+ %tmp6 = extractelement <8 x float> %tmp5, i32 0
+ store float %tmp6, float* %C
+ ret void
+
+ ; CHECK: vandps LCPI0_0(%rip), %ymm0, %ymm0
+}
+
+; Function Attrs: nounwind ssp uwtable
+define void @test2(float* %A, float* %C) #0 {
+ %tmp1 = bitcast float* %A to <8 x float>*
+ %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
+ %tmp4 = or <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
+ %tmp6 = extractelement <8 x float> %tmp5, i32 0
+ store float %tmp6, float* %C
+ ret void
+
+ ; CHECK: vorps LCPI1_0(%rip), %ymm0, %ymm0
+}
+
+; Function Attrs: nounwind ssp uwtable
+define void @test3(float* %A, float* %C) #0 {
+ %tmp1 = bitcast float* %A to <8 x float>*
+ %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
+ %tmp4 = xor <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
+ %tmp6 = extractelement <8 x float> %tmp5, i32 0
+ store float %tmp6, float* %C
+ ret void
+
+ ; CHECK: vxorps LCPI2_0(%rip), %ymm0, %ymm0
+}
+
+define void @test4(float* %A, float* %C) #0 {
+ %tmp1 = bitcast float* %A to <8 x float>*
+ %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
+ %tmp4 = xor <8 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %tmp5 = and <8 x i32> %tmp4, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ %tmp6 = bitcast <8 x i32> %tmp5 to <8 x float>
+ %tmp7 = extractelement <8 x float> %tmp6, i32 0
+ store float %tmp7, float * %C
+ ret void
+
+ ;CHECK: vandnps LCPI3_0(%rip), %ymm0, %ymm0
+}
diff --git a/test/CodeGen/X86/avx2-blend.ll b/test/CodeGen/X86/avx2-blend.ll
new file mode 100644
index 000000000000..b02442b6fadd
--- /dev/null
+++ b/test/CodeGen/X86/avx2-blend.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 | FileCheck %s
+
+define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
+; CHECK-LABEL: constant_pblendvb_avx2:
+; CHECK: vmovdqa
+; CHECK: vpblendvb
+ %1 = select <32 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <32 x i8> %xyzw, <32 x i8> %abcd
+ ret <32 x i8> %1
+}
+
+declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>)
diff --git a/test/CodeGen/X86/avx2-gather.ll b/test/CodeGen/X86/avx2-gather.ll
index ee50c457fe8c..a9ac0258975a 100644
--- a/test/CodeGen/X86/avx2-gather.ll
+++ b/test/CodeGen/X86/avx2-gather.ll
@@ -15,4 +15,20 @@ define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1,
; CHECK: vgatherdps
; CHECK-NOT: [[DST]]
; CHECK: [[DST:%xmm[0-9]+]]{{$}}
+; CHECK: vmovaps
+; CHECK: ret
+
+declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*,
+ <4 x i32>, <2 x double>, i8) nounwind readonly
+
+define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1,
+ <4 x i32> %idx, <2 x double> %mask) {
+ %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> undef,
+ i8* %a1, <4 x i32> %idx, <2 x double> %mask, i8 2) ;
+ ret <2 x double> %res
+}
+
+; CHECK: test_x86_avx2_gather_d_pd
+; CHECK: vgatherdpd
+; CHECK: vmovapd
; CHECK: ret
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86.ll b/test/CodeGen/X86/avx2-intrinsics-x86.ll
index a6141b095617..ab3d591e1d9f 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86.ll
@@ -753,7 +753,7 @@ declare <16 x i16> @llvm.x86.avx2.pbroadcastw.256(<8 x i16>) nounwind readonly
define <4 x i32> @test_x86_avx2_pbroadcastd_128(<4 x i32> %a0) {
- ; CHECK: vpbroadcastd
+ ; CHECK: vbroadcastss
%res = call <4 x i32> @llvm.x86.avx2.pbroadcastd.128(<4 x i32> %a0) ; <<4 x i32>> [#uses=1]
ret <4 x i32> %res
}
@@ -761,7 +761,7 @@ declare <4 x i32> @llvm.x86.avx2.pbroadcastd.128(<4 x i32>) nounwind readonly
define <8 x i32> @test_x86_avx2_pbroadcastd_256(<4 x i32> %a0) {
- ; CHECK: vpbroadcastd
+ ; CHECK: vbroadcastss {{[^,]+}}, %ymm{{[0-9]+}}
%res = call <8 x i32> @llvm.x86.avx2.pbroadcastd.256(<4 x i32> %a0) ; <<8 x i32>> [#uses=1]
ret <8 x i32> %res
}
@@ -777,7 +777,7 @@ declare <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64>) nounwind readonly
define <4 x i64> @test_x86_avx2_pbroadcastq_256(<2 x i64> %a0) {
- ; CHECK: vpbroadcastq
+ ; CHECK: vbroadcastsd {{[^,]+}}, %ymm{{[0-9]+}}
%res = call <4 x i64> @llvm.x86.avx2.pbroadcastq.256(<2 x i64> %a0) ; <<4 x i64>> [#uses=1]
ret <4 x i64> %res
}
@@ -1142,7 +1142,7 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, float* %a,
<8 x i32> %idx, <8 x float> %mask,
float* nocapture %out) {
; CHECK: test_gather_mask
-; CHECK: vmovdqa %ymm2, [[DEST:%.*]]
+; CHECK: vmovaps %ymm2, [[DEST:%.*]]
; CHECK: vgatherdps [[DEST]]
;; gather with mask
%a_i8 = bitcast float* %a to i8*
diff --git a/test/CodeGen/X86/avx2-shift.ll b/test/CodeGen/X86/avx2-shift.ll
index 7fdbaaa39cbe..025d52ede0f4 100644
--- a/test/CodeGen/X86/avx2-shift.ll
+++ b/test/CodeGen/X86/avx2-shift.ll
@@ -266,3 +266,36 @@ define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
%c = sext <8 x i16> %b to <8 x i32>
ret <8 x i32> %c
}
+
+define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK-LABEL: variable_shl16:
+; CHECK-DAG: vpmovzxwd %xmm1, [[AMT:%ymm[0-9]+]]
+; CHECK-DAG: vpmovzxwd %xmm0, [[LHS:%ymm[0-9]+]]
+; CHECK: vpsllvd [[AMT]], [[LHS]], {{%ymm[0-9]+}}
+; CHECK: vpshufb
+; CHECK: vpermq
+ %res = shl <8 x i16> %lhs, %rhs
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK-LABEL: variable_ashr16:
+; CHECK-DAG: vpmovzxwd %xmm1, [[AMT:%ymm[0-9]+]]
+; CHECK-DAG: vpmovsxwd %xmm0, [[LHS:%ymm[0-9]+]]
+; CHECK: vpsravd [[AMT]], [[LHS]], {{%ymm[0-9]+}}
+; CHECK: vpshufb
+; CHECK: vpermq
+ %res = ashr <8 x i16> %lhs, %rhs
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK-LABEL: variable_lshr16:
+; CHECK-DAG: vpmovzxwd %xmm1, [[AMT:%ymm[0-9]+]]
+; CHECK-DAG: vpmovzxwd %xmm0, [[LHS:%ymm[0-9]+]]
+; CHECK: vpsrlvd [[AMT]], [[LHS]], {{%ymm[0-9]+}}
+; CHECK: vpshufb
+; CHECK: vpermq
+ %res = lshr <8 x i16> %lhs, %rhs
+ ret <8 x i16> %res
+} \ No newline at end of file
diff --git a/test/CodeGen/X86/avx2-shuffle.ll b/test/CodeGen/X86/avx2-shuffle.ll
index 0e6dd297f8df..185b989458ae 100644
--- a/test/CodeGen/X86/avx2-shuffle.ll
+++ b/test/CodeGen/X86/avx2-shuffle.ll
@@ -60,6 +60,24 @@ define <4 x i64> @blend_test4(<4 x i64> %a, <4 x i64> %b) nounwind alwaysinline
ret <4 x i64> %t
}
+;; 2 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_test5
+; CHECK: vpblendd $10, %xmm1, %xmm0, %xmm0
+; CHECK: ret
+define <4 x i32> @blend_test5(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i32> %1
+}
+
+; CHECK-LABEL: @blend_test6
+; CHECK: vpblendw $134, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+define <16 x i16> @blend_test6(<16 x i16> %a, <16 x i16> %b) {
+ %1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 23,
+ i32 8, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 31>
+ ret <16 x i16> %1
+}
+
; CHECK: vpshufhw $27, %ymm
define <16 x i16> @vpshufhw(<16 x i16> %src1) nounwind uwtable readnone ssp {
entry:
diff --git a/test/CodeGen/X86/avx2-vbroadcast.ll b/test/CodeGen/X86/avx2-vbroadcast.ll
index 5610416d39a3..66f586d23d14 100644
--- a/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -98,7 +98,7 @@ entry:
%qf = insertelement <16 x i16> %qe, i16 %q, i32 15
ret <16 x i16> %qf
}
-; CHECK: vpbroadcastd (%
+; CHECK: vbroadcastss (%
define <4 x i32> @D32(i32* %ptr) nounwind uwtable readnone ssp {
entry:
%q = load i32* %ptr, align 4
@@ -108,7 +108,7 @@ entry:
%q3 = insertelement <4 x i32> %q2, i32 %q, i32 3
ret <4 x i32> %q3
}
-; CHECK: vpbroadcastd (%
+; CHECK: vbroadcastss (%
define <8 x i32> @DD32(i32* %ptr) nounwind uwtable readnone ssp {
entry:
%q = load i32* %ptr, align 4
@@ -130,7 +130,7 @@ entry:
%q1 = insertelement <2 x i64> %q0, i64 %q, i32 1
ret <2 x i64> %q1
}
-; CHECK: vpbroadcastq (%
+; CHECK: vbroadcastsd (%
define <4 x i64> @QQ64(i64* %ptr) nounwind uwtable readnone ssp {
entry:
%q = load i64* %ptr, align 4
@@ -293,7 +293,7 @@ define <8 x i16> @_inreg8xi16(<8 x i16> %a) {
;CHECK-LABEL: _inreg4xi64:
-;CHECK: vpbroadcastq
+;CHECK: vbroadcastsd
;CHECK: ret
define <4 x i64> @_inreg4xi64(<4 x i64> %a) {
%b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -325,7 +325,7 @@ define <2 x double> @_inreg2xdouble(<2 x double> %a) {
}
;CHECK-LABEL: _inreg8xi32:
-;CHECK: vpbroadcastd
+;CHECK: vbroadcastss
;CHECK: ret
define <8 x i32> @_inreg8xi32(<8 x i32> %a) {
%b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -333,7 +333,7 @@ define <8 x i32> @_inreg8xi32(<8 x i32> %a) {
}
;CHECK-LABEL: _inreg4xi32:
-;CHECK: vpbroadcastd
+;CHECK: vbroadcastss
;CHECK: ret
define <4 x i32> @_inreg4xi32(<4 x i32> %a) {
%b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -355,3 +355,219 @@ define <16 x i8> @_inreg16xi8(<16 x i8> %a) {
%b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %b
}
+
+; These tests check that a vbroadcast instruction is used when we have a splat
+; formed from a concat_vectors (via the shufflevector) of two BUILD_VECTORs
+; (via the insertelements).
+
+; CHECK-LABEL: splat_concat1
+; CHECK-NOT: vinsertf128
+; CHECK: vbroadcastss
+; CHECK-NEXT: ret
+define <8 x float> @splat_concat1(float %f) {
+ %1 = insertelement <4 x float> undef, float %f, i32 0
+ %2 = insertelement <4 x float> %1, float %f, i32 1
+ %3 = insertelement <4 x float> %2, float %f, i32 2
+ %4 = insertelement <4 x float> %3, float %f, i32 3
+ %5 = shufflevector <4 x float> %4, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x float> %5
+}
+
+; CHECK-LABEL: splat_concat2
+; CHECK-NOT: vinsertf128
+; CHECK: vbroadcastss
+; CHECK-NEXT: ret
+define <8 x float> @splat_concat2(float %f) {
+ %1 = insertelement <4 x float> undef, float %f, i32 0
+ %2 = insertelement <4 x float> %1, float %f, i32 1
+ %3 = insertelement <4 x float> %2, float %f, i32 2
+ %4 = insertelement <4 x float> %3, float %f, i32 3
+ %5 = insertelement <4 x float> undef, float %f, i32 0
+ %6 = insertelement <4 x float> %5, float %f, i32 1
+ %7 = insertelement <4 x float> %6, float %f, i32 2
+ %8 = insertelement <4 x float> %7, float %f, i32 3
+ %9 = shufflevector <4 x float> %4, <4 x float> %8, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %9
+}
+
+; CHECK-LABEL: splat_concat3
+; CHECK-NOT: vinsertf128
+; CHECK: vbroadcastsd
+; CHECK-NEXT: ret
+define <4 x double> @splat_concat3(double %d) {
+ %1 = insertelement <2 x double> undef, double %d, i32 0
+ %2 = insertelement <2 x double> %1, double %d, i32 1
+ %3 = shufflevector <2 x double> %2, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ ret <4 x double> %3
+}
+
+; CHECK-LABEL: splat_concat4
+; CHECK-NOT: vinsertf128
+; CHECK: vbroadcastsd
+; CHECK-NEXT: ret
+define <4 x double> @splat_concat4(double %d) {
+ %1 = insertelement <2 x double> undef, double %d, i32 0
+ %2 = insertelement <2 x double> %1, double %d, i32 1
+ %3 = insertelement <2 x double> undef, double %d, i32 0
+ %4 = insertelement <2 x double> %3, double %d, i32 1
+ %5 = shufflevector <2 x double> %2, <2 x double> %4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %5
+}
+
+; Test cases for <rdar://problem/16074331>.
+; Instruction selection for broacast instruction fails if
+; the load cannot be folded into the broadcast.
+; This happens if the load has initial one use but other uses are
+; created later, or if selection DAG cannot prove that folding the
+; load will not create a cycle in the DAG.
+; Those test cases exerce the latter.
+
+; CHECK-LABEL: isel_crash_16b
+; CHECK: vpbroadcastb {{[^,]+}}, %xmm{{[0-9]+}}
+; CHECK: ret
+define void @isel_crash_16b(i8* %cV_R.addr) {
+eintry:
+ %__a.addr.i = alloca <2 x i64>, align 16
+ %__b.addr.i = alloca <2 x i64>, align 16
+ %vCr = alloca <2 x i64>, align 16
+ store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
+ %tmp = load <2 x i64>* %vCr, align 16
+ %tmp2 = load i8* %cV_R.addr, align 4
+ %splat.splatinsert = insertelement <16 x i8> undef, i8 %tmp2, i32 0
+ %splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
+ %tmp3 = bitcast <16 x i8> %splat.splat to <2 x i64>
+ store <2 x i64> %tmp, <2 x i64>* %__a.addr.i, align 16
+ store <2 x i64> %tmp3, <2 x i64>* %__b.addr.i, align 16
+ ret void
+}
+
+; CHECK-LABEL: isel_crash_32b
+; CHECK: vpbroadcastb {{[^,]+}}, %ymm{{[0-9]+}}
+; CHECK: ret
+define void @isel_crash_32b(i8* %cV_R.addr) {
+eintry:
+ %__a.addr.i = alloca <4 x i64>, align 16
+ %__b.addr.i = alloca <4 x i64>, align 16
+ %vCr = alloca <4 x i64>, align 16
+ store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
+ %tmp = load <4 x i64>* %vCr, align 16
+ %tmp2 = load i8* %cV_R.addr, align 4
+ %splat.splatinsert = insertelement <32 x i8> undef, i8 %tmp2, i32 0
+ %splat.splat = shufflevector <32 x i8> %splat.splatinsert, <32 x i8> undef, <32 x i32> zeroinitializer
+ %tmp3 = bitcast <32 x i8> %splat.splat to <4 x i64>
+ store <4 x i64> %tmp, <4 x i64>* %__a.addr.i, align 16
+ store <4 x i64> %tmp3, <4 x i64>* %__b.addr.i, align 16
+ ret void
+}
+
+; CHECK-LABEL: isel_crash_8w
+; CHECK: vpbroadcastw {{[^,]+}}, %xmm{{[0-9]+}}
+; CHECK: ret
+define void @isel_crash_8w(i16* %cV_R.addr) {
+entry:
+ %__a.addr.i = alloca <2 x i64>, align 16
+ %__b.addr.i = alloca <2 x i64>, align 16
+ %vCr = alloca <2 x i64>, align 16
+ store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
+ %tmp = load <2 x i64>* %vCr, align 16
+ %tmp2 = load i16* %cV_R.addr, align 4
+ %splat.splatinsert = insertelement <8 x i16> undef, i16 %tmp2, i32 0
+ %splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
+ %tmp3 = bitcast <8 x i16> %splat.splat to <2 x i64>
+ store <2 x i64> %tmp, <2 x i64>* %__a.addr.i, align 16
+ store <2 x i64> %tmp3, <2 x i64>* %__b.addr.i, align 16
+ ret void
+}
+
+; CHECK-LABEL: isel_crash_16w
+; CHECK: vpbroadcastw {{[^,]+}}, %ymm{{[0-9]+}}
+; CHECK: ret
+define void @isel_crash_16w(i16* %cV_R.addr) {
+eintry:
+ %__a.addr.i = alloca <4 x i64>, align 16
+ %__b.addr.i = alloca <4 x i64>, align 16
+ %vCr = alloca <4 x i64>, align 16
+ store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
+ %tmp = load <4 x i64>* %vCr, align 16
+ %tmp2 = load i16* %cV_R.addr, align 4
+ %splat.splatinsert = insertelement <16 x i16> undef, i16 %tmp2, i32 0
+ %splat.splat = shufflevector <16 x i16> %splat.splatinsert, <16 x i16> undef, <16 x i32> zeroinitializer
+ %tmp3 = bitcast <16 x i16> %splat.splat to <4 x i64>
+ store <4 x i64> %tmp, <4 x i64>* %__a.addr.i, align 16
+ store <4 x i64> %tmp3, <4 x i64>* %__b.addr.i, align 16
+ ret void
+}
+
+; CHECK-LABEL: isel_crash_4d
+; CHECK: vbroadcastss {{[^,]+}}, %xmm{{[0-9]+}}
+; CHECK: ret
+define void @isel_crash_4d(i32* %cV_R.addr) {
+entry:
+ %__a.addr.i = alloca <2 x i64>, align 16
+ %__b.addr.i = alloca <2 x i64>, align 16
+ %vCr = alloca <2 x i64>, align 16
+ store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
+ %tmp = load <2 x i64>* %vCr, align 16
+ %tmp2 = load i32* %cV_R.addr, align 4
+ %splat.splatinsert = insertelement <4 x i32> undef, i32 %tmp2, i32 0
+ %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+ %tmp3 = bitcast <4 x i32> %splat.splat to <2 x i64>
+ store <2 x i64> %tmp, <2 x i64>* %__a.addr.i, align 16
+ store <2 x i64> %tmp3, <2 x i64>* %__b.addr.i, align 16
+ ret void
+}
+
+; CHECK-LABEL: isel_crash_8d
+; CHECK: vbroadcastss {{[^,]+}}, %ymm{{[0-9]+}}
+; CHECK: ret
+define void @isel_crash_8d(i32* %cV_R.addr) {
+eintry:
+ %__a.addr.i = alloca <4 x i64>, align 16
+ %__b.addr.i = alloca <4 x i64>, align 16
+ %vCr = alloca <4 x i64>, align 16
+ store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
+ %tmp = load <4 x i64>* %vCr, align 16
+ %tmp2 = load i32* %cV_R.addr, align 4
+ %splat.splatinsert = insertelement <8 x i32> undef, i32 %tmp2, i32 0
+ %splat.splat = shufflevector <8 x i32> %splat.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer
+ %tmp3 = bitcast <8 x i32> %splat.splat to <4 x i64>
+ store <4 x i64> %tmp, <4 x i64>* %__a.addr.i, align 16
+ store <4 x i64> %tmp3, <4 x i64>* %__b.addr.i, align 16
+ ret void
+}
+
+; CHECK-LABEL: isel_crash_2q
+; CHECK: vpbroadcastq {{[^,]+}}, %xmm{{[0-9]+}}
+; CHECK: ret
+define void @isel_crash_2q(i64* %cV_R.addr) {
+entry:
+ %__a.addr.i = alloca <2 x i64>, align 16
+ %__b.addr.i = alloca <2 x i64>, align 16
+ %vCr = alloca <2 x i64>, align 16
+ store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
+ %tmp = load <2 x i64>* %vCr, align 16
+ %tmp2 = load i64* %cV_R.addr, align 4
+ %splat.splatinsert = insertelement <2 x i64> undef, i64 %tmp2, i32 0
+ %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
+ store <2 x i64> %tmp, <2 x i64>* %__a.addr.i, align 16
+ store <2 x i64> %splat.splat, <2 x i64>* %__b.addr.i, align 16
+ ret void
+}
+
+; CHECK-LABEL: isel_crash_4q
+; CHECK: vbroadcastsd {{[^,]+}}, %ymm{{[0-9]+}}
+; CHECK: ret
+define void @isel_crash_4q(i64* %cV_R.addr) {
+eintry:
+ %__a.addr.i = alloca <4 x i64>, align 16
+ %__b.addr.i = alloca <4 x i64>, align 16
+ %vCr = alloca <4 x i64>, align 16
+ store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
+ %tmp = load <4 x i64>* %vCr, align 16
+ %tmp2 = load i64* %cV_R.addr, align 4
+ %splat.splatinsert = insertelement <4 x i64> undef, i64 %tmp2, i32 0
+ %splat.splat = shufflevector <4 x i64> %splat.splatinsert, <4 x i64> undef, <4 x i32> zeroinitializer
+ store <4 x i64> %tmp, <4 x i64>* %__a.addr.i, align 16
+ store <4 x i64> %splat.splat, <4 x i64>* %__b.addr.i, align 16
+ ret void
+}
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index 5592e6c8a5f7..e355301dd051 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -9,7 +9,7 @@ entry:
}
; CHECK-LABEL: test_sllw_1:
-; CHECK: vpsllw $0, %ymm0, %ymm0
+; CHECK-NOT: vpsllw $0, %ymm0, %ymm0
; CHECK: ret
define <16 x i16> @test_sllw_2(<16 x i16> %InVec) {
@@ -24,12 +24,12 @@ entry:
define <16 x i16> @test_sllw_3(<16 x i16> %InVec) {
entry:
- %shl = shl <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = shl <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <16 x i16> %shl
}
; CHECK-LABEL: test_sllw_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsllw $15, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_slld_1(<8 x i32> %InVec) {
@@ -39,7 +39,7 @@ entry:
}
; CHECK-LABEL: test_slld_1:
-; CHECK: vpslld $0, %ymm0, %ymm0
+; CHECK-NOT: vpslld $0, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_slld_2(<8 x i32> %InVec) {
@@ -52,14 +52,24 @@ entry:
; CHECK: vpaddd %ymm0, %ymm0, %ymm0
; CHECK: ret
+define <8 x i32> @test_vpslld_var(i32 %shift) {
+ %amt = insertelement <8 x i32> undef, i32 %shift, i32 0
+ %tmp = shl <8 x i32> <i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199>, %amt
+ ret <8 x i32> %tmp
+}
+
+; CHECK-LABEL: test_vpslld_var:
+; CHECK: vpslld %xmm0, %ymm1, %ymm0
+; CHECK: ret
+
define <8 x i32> @test_slld_3(<8 x i32> %InVec) {
entry:
- %shl = shl <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
+ %shl = shl <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
ret <8 x i32> %shl
}
; CHECK-LABEL: test_slld_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpslld $31, %ymm0, %ymm0
; CHECK: ret
define <4 x i64> @test_sllq_1(<4 x i64> %InVec) {
@@ -69,7 +79,7 @@ entry:
}
; CHECK-LABEL: test_sllq_1:
-; CHECK: vpsllq $0, %ymm0, %ymm0
+; CHECK-NOT: vpsllq $0, %ymm0, %ymm0
; CHECK: ret
define <4 x i64> @test_sllq_2(<4 x i64> %InVec) {
@@ -84,12 +94,12 @@ entry:
define <4 x i64> @test_sllq_3(<4 x i64> %InVec) {
entry:
- %shl = shl <4 x i64> %InVec, <i64 64, i64 64, i64 64, i64 64>
+ %shl = shl <4 x i64> %InVec, <i64 63, i64 63, i64 63, i64 63>
ret <4 x i64> %shl
}
; CHECK-LABEL: test_sllq_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsllq $63, %ymm0, %ymm0
; CHECK: ret
; AVX2 Arithmetic Shift
@@ -101,7 +111,7 @@ entry:
}
; CHECK-LABEL: test_sraw_1:
-; CHECK: vpsraw $0, %ymm0, %ymm0
+; CHECK-NOT: vpsraw $0, %ymm0, %ymm0
; CHECK: ret
define <16 x i16> @test_sraw_2(<16 x i16> %InVec) {
@@ -116,7 +126,7 @@ entry:
define <16 x i16> @test_sraw_3(<16 x i16> %InVec) {
entry:
- %shl = ashr <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = ashr <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <16 x i16> %shl
}
@@ -131,7 +141,7 @@ entry:
}
; CHECK-LABEL: test_srad_1:
-; CHECK: vpsrad $0, %ymm0, %ymm0
+; CHECK-NOT: vpsrad $0, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_srad_2(<8 x i32> %InVec) {
@@ -146,7 +156,7 @@ entry:
define <8 x i32> @test_srad_3(<8 x i32> %InVec) {
entry:
- %shl = ashr <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
+ %shl = ashr <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
ret <8 x i32> %shl
}
@@ -163,7 +173,7 @@ entry:
}
; CHECK-LABEL: test_srlw_1:
-; CHECK: vpsrlw $0, %ymm0, %ymm0
+; CHECK-NOT: vpsrlw $0, %ymm0, %ymm0
; CHECK: ret
define <16 x i16> @test_srlw_2(<16 x i16> %InVec) {
@@ -178,12 +188,12 @@ entry:
define <16 x i16> @test_srlw_3(<16 x i16> %InVec) {
entry:
- %shl = lshr <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = lshr <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <16 x i16> %shl
}
; CHECK-LABEL: test_srlw_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsrlw $15, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_srld_1(<8 x i32> %InVec) {
@@ -193,7 +203,7 @@ entry:
}
; CHECK-LABEL: test_srld_1:
-; CHECK: vpsrld $0, %ymm0, %ymm0
+; CHECK-NOT: vpsrld $0, %ymm0, %ymm0
; CHECK: ret
define <8 x i32> @test_srld_2(<8 x i32> %InVec) {
@@ -208,12 +218,12 @@ entry:
define <8 x i32> @test_srld_3(<8 x i32> %InVec) {
entry:
- %shl = lshr <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
+ %shl = lshr <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
ret <8 x i32> %shl
}
; CHECK-LABEL: test_srld_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsrld $31, %ymm0, %ymm0
; CHECK: ret
define <4 x i64> @test_srlq_1(<4 x i64> %InVec) {
@@ -223,7 +233,7 @@ entry:
}
; CHECK-LABEL: test_srlq_1:
-; CHECK: vpsrlq $0, %ymm0, %ymm0
+; CHECK-NOT: vpsrlq $0, %ymm0, %ymm0
; CHECK: ret
define <4 x i64> @test_srlq_2(<4 x i64> %InVec) {
@@ -238,10 +248,21 @@ entry:
define <4 x i64> @test_srlq_3(<4 x i64> %InVec) {
entry:
- %shl = lshr <4 x i64> %InVec, <i64 64, i64 64, i64 64, i64 64>
+ %shl = lshr <4 x i64> %InVec, <i64 63, i64 63, i64 63, i64 63>
ret <4 x i64> %shl
}
; CHECK-LABEL: test_srlq_3:
-; CHECK: vxorps %ymm0, %ymm0, %ymm0
+; CHECK: vpsrlq $63, %ymm0, %ymm0
; CHECK: ret
+
+; CHECK-LABEL: @srl_trunc_and_v4i64
+; CHECK: vpand
+; CHECK-NEXT: vpsrlvd
+; CHECK: ret
+define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
+ %and = and <4 x i64> %y, <i64 8, i64 8, i64 8, i64 8>
+ %trunc = trunc <4 x i64> %and to <4 x i32>
+ %sra = lshr <4 x i32> %x, %trunc
+ ret <4 x i32> %sra
+}
diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll
index e27600ecd734..4d1c9f7cd973 100644
--- a/test/CodeGen/X86/avx512-arith.ll
+++ b/test/CodeGen/X86/avx512-arith.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding| FileCheck %s
; CHECK-LABEL: addpd512
; CHECK: vaddpd
@@ -163,6 +163,40 @@ define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
ret <8 x i64> %x
}
+; CHECK-LABEL: vpaddq_fold_test
+; CHECK: vpaddq (%
+; CHECK: ret
+define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
+ %tmp = load <8 x i64>* %j, align 4
+ %x = add <8 x i64> %i, %tmp
+ ret <8 x i64> %x
+}
+
+; CHECK-LABEL: vpaddq_broadcast_test
+; CHECK: vpaddq LCP{{.*}}(%rip){1to8}
+; CHECK: ret
+define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
+ %x = add <8 x i64> %i, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ ret <8 x i64> %x
+}
+
+; CHECK-LABEL: vpaddq_broadcast2_test
+; CHECK: vpaddq (%rdi){1to8}
+; CHECK: ret
+define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
+ %tmp = load i64* %j
+ %j.0 = insertelement <8 x i64> undef, i64 %tmp, i32 0
+ %j.1 = insertelement <8 x i64> %j.0, i64 %tmp, i32 1
+ %j.2 = insertelement <8 x i64> %j.1, i64 %tmp, i32 2
+ %j.3 = insertelement <8 x i64> %j.2, i64 %tmp, i32 3
+ %j.4 = insertelement <8 x i64> %j.3, i64 %tmp, i32 4
+ %j.5 = insertelement <8 x i64> %j.4, i64 %tmp, i32 5
+ %j.6 = insertelement <8 x i64> %j.5, i64 %tmp, i32 6
+ %j.7 = insertelement <8 x i64> %j.6, i64 %tmp, i32 7
+ %x = add <8 x i64> %i, %j.7
+ ret <8 x i64> %x
+}
+
; CHECK-LABEL: vpaddd_test
; CHECK: vpaddd %zmm
; CHECK: ret
@@ -171,6 +205,85 @@ define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
ret <16 x i32> %x
}
+; CHECK-LABEL: vpaddd_fold_test
+; CHECK: vpaddd (%
+; CHECK: ret
+define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
+ %tmp = load <16 x i32>* %j, align 4
+ %x = add <16 x i32> %i, %tmp
+ ret <16 x i32> %x
+}
+
+; CHECK-LABEL: vpaddd_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to16}
+; CHECK: ret
+define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
+ %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <16 x i32> %x
+}
+
+; CHECK-LABEL: vpaddd_mask_test
+; CHECK: vpaddd {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]} }}
+; CHECK: ret
+define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %x = add <16 x i32> %i, %j
+ %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i
+ ret <16 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd_maskz_test
+; CHECK: vpaddd {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]} {z} }}
+; CHECK: ret
+define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %x = add <16 x i32> %i, %j
+ %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer
+ ret <16 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd_mask_fold_test
+; CHECK: vpaddd (%rdi), {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]} }}
+; CHECK: ret
+define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %j = load <16 x i32>* %j.ptr
+ %x = add <16 x i32> %i, %j
+ %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i
+ ret <16 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd_mask_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to16}, {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]} }}
+; CHECK: ret
+define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i
+ ret <16 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd_maskz_fold_test
+; CHECK: vpaddd (%rdi), {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}} {z}
+; CHECK: ret
+define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %j = load <16 x i32>* %j.ptr
+ %x = add <16 x i32> %i, %j
+ %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer
+ ret <16 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd_maskz_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to16}, {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}} {z}
+; CHECK: ret
+define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer
+ ret <16 x i32> %r
+}
+
; CHECK-LABEL: vpsubq_test
; CHECK: vpsubq %zmm
; CHECK: ret
@@ -196,7 +309,7 @@ define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
}
; CHECK-LABEL: sqrtA
-; CHECK: vsqrtssz
+; CHECK: vsqrtss {{.*}} encoding: [0x62
; CHECK: ret
declare float @sqrtf(float) readnone
define float @sqrtA(float %a) nounwind uwtable readnone ssp {
@@ -206,7 +319,7 @@ entry:
}
; CHECK-LABEL: sqrtB
-; CHECK: vsqrtsdz
+; CHECK: vsqrtsd {{.*}}## encoding: [0x62
; CHECK: ret
declare double @sqrt(double) readnone
define double @sqrtB(double %a) nounwind uwtable readnone ssp {
@@ -216,7 +329,7 @@ entry:
}
; CHECK-LABEL: sqrtC
-; CHECK: vsqrtssz
+; CHECK: vsqrtss {{.*}}## encoding: [0x62
; CHECK: ret
declare float @llvm.sqrt.f32(float)
define float @sqrtC(float %a) nounwind {
@@ -224,6 +337,24 @@ define float @sqrtC(float %a) nounwind {
ret float %b
}
+; CHECK-LABEL: sqrtD
+; CHECK: vsqrtps {{.*}}
+; CHECK: ret
+declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
+define <16 x float> @sqrtD(<16 x float> %a) nounwind {
+ %b = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a)
+ ret <16 x float> %b
+}
+
+; CHECK-LABEL: sqrtE
+; CHECK: vsqrtpd {{.*}}
+; CHECK: ret
+declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
+define <8 x double> @sqrtE(<8 x double> %a) nounwind {
+ %b = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a)
+ ret <8 x double> %b
+}
+
; CHECK-LABEL: fadd_broadcast
; CHECK: LCP{{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK: ret
diff --git a/test/CodeGen/X86/avx512-build-vector.ll b/test/CodeGen/X86/avx512-build-vector.ll
index bc4560b3f3fc..b5a2aa80ce16 100644
--- a/test/CodeGen/X86/avx512-build-vector.ll
+++ b/test/CodeGen/X86/avx512-build-vector.ll
@@ -15,4 +15,16 @@ define <16 x i32> @test1(i32* %x) {
define <16 x i32> @test2(<16 x i32> %x) {
%res = add <16 x i32><i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, %x
ret <16 x i32>%res
+}
+
+; CHECK-LABEL: test3
+; CHECK: vinsertf128
+; CHECK: vinsertf64x4
+; CHECK: ret
+define <16 x float> @test3(<4 x float> %a) {
+ %b = extractelement <4 x float> %a, i32 2
+ %c = insertelement <16 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %b, i32 5
+ %b1 = extractelement <4 x float> %a, i32 0
+ %c1 = insertelement <16 x float> %c, float %b1, i32 6
+ ret <16 x float>%c1
} \ No newline at end of file
diff --git a/test/CodeGen/X86/avx512-cmp.ll b/test/CodeGen/X86/avx512-cmp.ll
index ba52745e6c19..47e50a93796a 100644
--- a/test/CodeGen/X86/avx512-cmp.ll
+++ b/test/CodeGen/X86/avx512-cmp.ll
@@ -1,6 +1,7 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding | FileCheck %s
-; CHECK: vucomisdz
+; CHECK-LABEL: test1
+; CHECK: vucomisd {{.*}}encoding: [0x62
define double @test1(double %a, double %b) nounwind {
%tobool = fcmp une double %a, %b
br i1 %tobool, label %l1, label %l2
@@ -13,7 +14,8 @@ l2:
ret double %c1
}
-; CHECK: vucomissz
+; CHECK-LABEL: test2
+; CHECK: vucomiss {{.*}}encoding: [0x62
define float @test2(float %a, float %b) nounwind {
%tobool = fcmp olt float %a, %b
br i1 %tobool, label %l1, label %l2
@@ -25,3 +27,62 @@ l2:
%c1 = fadd float %a, %b
ret float %c1
}
+
+; CHECK-LABEL: test3
+; CHECK: vcmpeqss
+; CHECK: kmov
+; CHECK: ret
+define i32 @test3(float %a, float %b) {
+
+ %cmp10.i = fcmp oeq float %a, %b
+ %conv11.i = zext i1 %cmp10.i to i32
+ ret i32 %conv11.i
+}
+
+; CHECK-LABEL: test5
+; CHECK: ret
+define float @test5(float %p) #0 {
+entry:
+ %cmp = fcmp oeq float %p, 0.000000e+00
+ br i1 %cmp, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %cmp1 = fcmp ogt float %p, 0.000000e+00
+ %cond = select i1 %cmp1, float 1.000000e+00, float -1.000000e+00
+ br label %return
+
+return: ; preds = %if.end, %entry
+ %retval.0 = phi float [ %cond, %if.end ], [ %p, %entry ]
+ ret float %retval.0
+}
+
+; CHECK-LABEL: test6
+; CHECK: cmpl
+; CHECK-NOT: kmov
+; CHECK: ret
+define i32 @test6(i32 %a, i32 %b) {
+ %cmp = icmp eq i32 %a, %b
+ %res = zext i1 %cmp to i32
+ ret i32 %res
+}
+
+; CHECK-LABEL: test7
+; CHECK: vucomisd
+; CHECK-NOT: kmov
+; CHECK: ret
+define i32 @test7(double %x, double %y) #2 {
+entry:
+ %0 = fcmp one double %x, %y
+ %or = zext i1 %0 to i32
+ ret i32 %or
+}
+
+define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
+ %tmp1 = icmp eq i32 %a1, -1
+ %tmp2 = icmp eq i32 %a2, -2147483648
+ %tmp3 = and i1 %tmp1, %tmp2
+ %tmp4 = icmp eq i32 %a3, 0
+ %tmp5 = or i1 %tmp3, %tmp4
+ %res = select i1 %tmp5, i32 1, i32 %a3
+ ret i32 %res
+}
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index ed68ff7bcbdb..f5cda96b99fa 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding | FileCheck %s
; CHECK-LABEL: sitof32
; CHECK: vcvtdq2ps %zmm
@@ -24,6 +24,22 @@ define <16 x i32> @fptoui00(<16 x float> %a) nounwind {
ret <16 x i32> %b
}
+; CHECK-LABEL: fptoui_256
+; CHECK: vcvttps2udq
+; CHECK: ret
+define <8 x i32> @fptoui_256(<8 x float> %a) nounwind {
+ %b = fptoui <8 x float> %a to <8 x i32>
+ ret <8 x i32> %b
+}
+
+; CHECK-LABEL: fptoui_128
+; CHECK: vcvttps2udq
+; CHECK: ret
+define <4 x i32> @fptoui_128(<4 x float> %a) nounwind {
+ %b = fptoui <4 x float> %a to <4 x i32>
+ ret <4 x i32> %b
+}
+
; CHECK-LABEL: fptoui01
; CHECK: vcvttpd2udq
; CHECK: ret
@@ -67,7 +83,7 @@ define <8 x double> @fpext00(<8 x float> %b) nounwind {
}
; CHECK-LABEL: funcA
-; CHECK: vcvtsi2sdqz (%
+; CHECK: vcvtsi2sdq (%rdi){{.*}} encoding: [0x62
; CHECK: ret
define double @funcA(i64* nocapture %e) {
entry:
@@ -77,7 +93,7 @@ entry:
}
; CHECK-LABEL: funcB
-; CHECK: vcvtsi2sdlz (%
+; CHECK: vcvtsi2sdl (%{{.*}} encoding: [0x62
; CHECK: ret
define double @funcB(i32* %e) {
entry:
@@ -87,7 +103,7 @@ entry:
}
; CHECK-LABEL: funcC
-; CHECK: vcvtsi2sslz (%
+; CHECK: vcvtsi2ssl (%{{.*}} encoding: [0x62
; CHECK: ret
define float @funcC(i32* %e) {
entry:
@@ -97,7 +113,7 @@ entry:
}
; CHECK-LABEL: i64tof32
-; CHECK: vcvtsi2ssqz (%
+; CHECK: vcvtsi2ssq (%{{.*}} encoding: [0x62
; CHECK: ret
define float @i64tof32(i64* %e) {
entry:
@@ -107,7 +123,7 @@ entry:
}
; CHECK-LABEL: fpext
-; CHECK: vcvtss2sdz
+; CHECK: vcvtss2sd {{.*}} encoding: [0x62
; CHECK: ret
define void @fpext() {
entry:
@@ -120,9 +136,9 @@ entry:
}
; CHECK-LABEL: fpround_scalar
-; CHECK: vmovsdz
-; CHECK: vcvtsd2ssz
-; CHECK: vmovssz
+; CHECK: vmovsd {{.*}} encoding: [0x62
+; CHECK: vcvtsd2ss {{.*}} encoding: [0x62
+; CHECK: vmovss {{.*}} encoding: [0x62
; CHECK: ret
define void @fpround_scalar() nounwind uwtable {
entry:
@@ -135,7 +151,7 @@ entry:
}
; CHECK-LABEL: long_to_double
-; CHECK: vmovqz
+; CHECK: vmovq {{.*}} encoding: [0x62
; CHECK: ret
define double @long_to_double(i64 %x) {
%res = bitcast i64 %x to double
@@ -143,7 +159,7 @@ define double @long_to_double(i64 %x) {
}
; CHECK-LABEL: double_to_long
-; CHECK: vmovqz
+; CHECK: vmovq {{.*}} encoding: [0x62
; CHECK: ret
define i64 @double_to_long(double %x) {
%res = bitcast double %x to i64
@@ -151,7 +167,7 @@ define i64 @double_to_long(double %x) {
}
; CHECK-LABEL: int_to_float
-; CHECK: vmovdz
+; CHECK: vmovd {{.*}} encoding: [0x62
; CHECK: ret
define float @int_to_float(i32 %x) {
%res = bitcast i32 %x to float
@@ -159,7 +175,7 @@ define float @int_to_float(i32 %x) {
}
; CHECK-LABEL: float_to_int
-; CHECK: vmovdz
+; CHECK: vmovd {{.*}} encoding: [0x62
; CHECK: ret
define i32 @float_to_int(float %x) {
%res = bitcast float %x to i32
@@ -176,6 +192,14 @@ define <16 x double> @uitof64(<16 x i32> %a) nounwind {
ret <16 x double> %b
}
+; CHECK-LABEL: uitof64_256
+; CHECK: vcvtudq2pd
+; CHECK: ret
+define <4 x double> @uitof64_256(<4 x i32> %a) nounwind {
+ %b = uitofp <4 x i32> %a to <4 x double>
+ ret <4 x double> %b
+}
+
; CHECK-LABEL: uitof32
; CHECK: vcvtudq2ps
; CHECK: ret
@@ -184,8 +208,24 @@ define <16 x float> @uitof32(<16 x i32> %a) nounwind {
ret <16 x float> %b
}
+; CHECK-LABEL: uitof32_256
+; CHECK: vcvtudq2ps
+; CHECK: ret
+define <8 x float> @uitof32_256(<8 x i32> %a) nounwind {
+ %b = uitofp <8 x i32> %a to <8 x float>
+ ret <8 x float> %b
+}
+
+; CHECK-LABEL: uitof32_128
+; CHECK: vcvtudq2ps
+; CHECK: ret
+define <4 x float> @uitof32_128(<4 x i32> %a) nounwind {
+ %b = uitofp <4 x i32> %a to <4 x float>
+ ret <4 x float> %b
+}
+
; CHECK-LABEL: @fptosi02
-; CHECK vcvttss2siz
+; CHECK: vcvttss2si {{.*}} encoding: [0x62
; CHECK: ret
define i32 @fptosi02(float %a) nounwind {
%b = fptosi float %a to i32
@@ -193,7 +233,7 @@ define i32 @fptosi02(float %a) nounwind {
}
; CHECK-LABEL: @fptoui02
-; CHECK vcvttss2usiz
+; CHECK: vcvttss2usi {{.*}} encoding: [0x62
; CHECK: ret
define i32 @fptoui02(float %a) nounwind {
%b = fptoui float %a to i32
@@ -201,7 +241,7 @@ define i32 @fptoui02(float %a) nounwind {
}
; CHECK-LABEL: @uitofp02
-; CHECK vcvtusi2ss
+; CHECK: vcvtusi2ss
; CHECK: ret
define float @uitofp02(i32 %a) nounwind {
%b = uitofp i32 %a to float
@@ -209,7 +249,7 @@ define float @uitofp02(i32 %a) nounwind {
}
; CHECK-LABEL: @uitofp03
-; CHECK vcvtusi2sd
+; CHECK: vcvtusi2sd
; CHECK: ret
define double @uitofp03(i32 %a) nounwind {
%b = uitofp i32 %a to double
diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index 0321e950ef81..20bf7e4a16e0 100644
--- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -1,14 +1,14 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
-declare <16 x float> @llvm.x86.avx512.gather.dps.mask.512 (<16 x float>, i16, <16 x i32>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.dps.mask.512 (i8*, i16, <16 x i32>, <16 x float>, i32)
-declare <8 x double> @llvm.x86.avx512.gather.dpd.mask.512 (<8 x double>, i8, <8 x i32>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.dpd.mask.512 (i8*, i8, <8 x i32>, <8 x double>, i32)
+declare <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float>, i8*, <16 x i32>, i16, i32)
+declare void @llvm.x86.avx512.scatter.dps.512 (i8*, i16, <16 x i32>, <16 x float>, i32)
+declare <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double>, i8*, <8 x i32>, i8, i32)
+declare void @llvm.x86.avx512.scatter.dpd.512 (i8*, i8, <8 x i32>, <8 x double>, i32)
-declare <8 x float> @llvm.x86.avx512.gather.qps.mask.512 (<8 x float>, i8, <8 x i64>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.qps.mask.512 (i8*, i8, <8 x i64>, <8 x float>, i32)
-declare <8 x double> @llvm.x86.avx512.gather.qpd.mask.512 (<8 x double>, i8, <8 x i64>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.qpd.mask.512 (i8*, i8, <8 x i64>, <8 x double>, i32)
+declare <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float>, i8*, <8 x i64>, i8, i32)
+declare void @llvm.x86.avx512.scatter.qps.512 (i8*, i8, <8 x i64>, <8 x float>, i32)
+declare <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double>, i8*, <8 x i64>, i8, i32)
+declare void @llvm.x86.avx512.scatter.qpd.512 (i8*, i8, <8 x i64>, <8 x double>, i32)
;CHECK-LABEL: gather_mask_dps
;CHECK: kmovw
@@ -17,9 +17,9 @@ declare void @llvm.x86.avx512.scatter.qpd.mask.512 (i8*, i8, <8 x i64>, <8 x dou
;CHECK: vscatterdps
;CHECK: ret
define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf) {
- %x = call <16 x float> @llvm.x86.avx512.gather.dps.mask.512 (<16 x float> %src, i16 %mask, <16 x i32>%ind, i8* %base, i32 4)
+ %x = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
%ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
- call void @llvm.x86.avx512.scatter.dps.mask.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind2, <16 x float> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind2, <16 x float> %x, i32 4)
ret void
}
@@ -30,9 +30,9 @@ define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8*
;CHECK: vscatterdpd
;CHECK: ret
define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = call <8 x double> @llvm.x86.avx512.gather.dpd.mask.512 (<8 x double> %src, i8 %mask, <8 x i32>%ind, i8* %base, i32 4)
+ %x = call <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
- call void @llvm.x86.avx512.scatter.dpd.mask.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind2, <8 x double> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind2, <8 x double> %x, i32 4)
ret void
}
@@ -43,9 +43,9 @@ define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %b
;CHECK: vscatterqps
;CHECK: ret
define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = call <8 x float> @llvm.x86.avx512.gather.qps.mask.512 (<8 x float> %src, i8 %mask, <8 x i64>%ind, i8* %base, i32 4)
+ %x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
- call void @llvm.x86.avx512.scatter.qps.mask.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x float> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x float> %x, i32 4)
ret void
}
@@ -56,23 +56,23 @@ define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %ba
;CHECK: vscatterqpd
;CHECK: ret
define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = call <8 x double> @llvm.x86.avx512.gather.qpd.mask.512 (<8 x double> %src, i8 %mask, <8 x i64>%ind, i8* %base, i32 4)
+ %x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
- call void @llvm.x86.avx512.scatter.qpd.mask.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x double> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x double> %x, i32 4)
ret void
}
;;
;; Integer Gather/Scatter
;;
-declare <16 x i32> @llvm.x86.avx512.gather.dpi.mask.512 (<16 x i32>, i16, <16 x i32>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.dpi.mask.512 (i8*, i16, <16 x i32>, <16 x i32>, i32)
-declare <8 x i64> @llvm.x86.avx512.gather.dpq.mask.512 (<8 x i64>, i8, <8 x i32>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.dpq.mask.512 (i8*, i8, <8 x i32>, <8 x i64>, i32)
+declare <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32>, i8*, <16 x i32>, i16, i32)
+declare void @llvm.x86.avx512.scatter.dpi.512 (i8*, i16, <16 x i32>, <16 x i32>, i32)
+declare <8 x i64> @llvm.x86.avx512.gather.dpq.512 (<8 x i64>, i8*, <8 x i32>, i8, i32)
+declare void @llvm.x86.avx512.scatter.dpq.512 (i8*, i8, <8 x i32>, <8 x i64>, i32)
-declare <8 x i32> @llvm.x86.avx512.gather.qpi.mask.512 (<8 x i32>, i8, <8 x i64>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.qpi.mask.512 (i8*, i8, <8 x i64>, <8 x i32>, i32)
-declare <8 x i64> @llvm.x86.avx512.gather.qpq.mask.512 (<8 x i64>, i8, <8 x i64>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.qpq.mask.512 (i8*, i8, <8 x i64>, <8 x i64>, i32)
+declare <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i32>, i8*, <8 x i64>, i8, i32)
+declare void @llvm.x86.avx512.scatter.qpi.512 (i8*, i8, <8 x i64>, <8 x i32>, i32)
+declare <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64>, i8*, <8 x i64>, i8, i32)
+declare void @llvm.x86.avx512.scatter.qpq.512 (i8*, i8, <8 x i64>, <8 x i64>, i32)
;CHECK-LABEL: gather_mask_dd
;CHECK: kmovw
@@ -81,9 +81,9 @@ declare void @llvm.x86.avx512.scatter.qpq.mask.512 (i8*, i8, <8 x i64>, <8 x i64
;CHECK: vpscatterdd
;CHECK: ret
define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf) {
- %x = call <16 x i32> @llvm.x86.avx512.gather.dpi.mask.512 (<16 x i32> %src, i16 %mask, <16 x i32>%ind, i8* %base, i32 4)
+ %x = call <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
%ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
- call void @llvm.x86.avx512.scatter.dpi.mask.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind2, <16 x i32> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.dpi.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind2, <16 x i32> %x, i32 4)
ret void
}
@@ -94,9 +94,9 @@ define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %ba
;CHECK: vpscatterqd
;CHECK: ret
define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = call <8 x i32> @llvm.x86.avx512.gather.qpi.mask.512 (<8 x i32> %src, i8 %mask, <8 x i64>%ind, i8* %base, i32 4)
+ %x = call <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i32> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
- call void @llvm.x86.avx512.scatter.qpi.mask.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x i32> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.qpi.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x i32> %x, i32 4)
ret void
}
@@ -107,9 +107,9 @@ define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base,
;CHECK: vpscatterqq
;CHECK: ret
define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = call <8 x i64> @llvm.x86.avx512.gather.qpq.mask.512 (<8 x i64> %src, i8 %mask, <8 x i64>%ind, i8* %base, i32 4)
+ %x = call <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
- call void @llvm.x86.avx512.scatter.qpq.mask.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x i64> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.qpq.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind2, <8 x i64> %x, i32 4)
ret void
}
@@ -120,106 +120,116 @@ define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base,
;CHECK: vpscatterdq
;CHECK: ret
define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = call <8 x i64> @llvm.x86.avx512.gather.dpq.mask.512 (<8 x i64> %src, i8 %mask, <8 x i32>%ind, i8* %base, i32 4)
+ %x = call <8 x i64> @llvm.x86.avx512.gather.dpq.512 (<8 x i64> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
%ind2 = add <8 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
- call void @llvm.x86.avx512.scatter.dpq.mask.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind2, <8 x i64> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.dpq.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind2, <8 x i64> %x, i32 4)
ret void
}
-;; FP Intinsics without masks
-declare <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x i32>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.dps.512 (i8*, <16 x i32>, <16 x float>, i32)
-declare <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x i64>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.qps.512 (i8*, <8 x i64>, <8 x float>, i32)
-declare <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x i64>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.qpd.512 (i8*, <8 x i64>, <8 x double>, i32)
+;CHECK-LABEL: gather_mask_dpd_execdomain
+;CHECK: vgatherdpd
+;CHECK: vmovapd
+;CHECK: ret
+define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
+ %x = call <8 x double> @llvm.x86.avx512.gather.dpd.512 (<8 x double> %src, i8* %base, <8 x i32>%ind, i8 %mask, i32 4)
+ store <8 x double> %x, <8 x double>* %stbuf
+ ret void
+}
-;CHECK-LABEL: gather_dps
-;CHECK: kxnorw
-;CHECK: vgatherdps
-;CHECK: vscatterdps
+;CHECK-LABEL: gather_mask_qpd_execdomain
+;CHECK: vgatherqpd
+;CHECK: vmovapd
;CHECK: ret
-define void @gather_dps(<16 x i32> %ind, i8* %base, i8* %stbuf) {
- %x = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x i32>%ind, i8* %base, i32 4)
- %ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
- call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, <16 x i32>%ind2, <16 x float> %x, i32 4)
+define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
+ %x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x double> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+ store <8 x double> %x, <8 x double>* %stbuf
ret void
}
-;CHECK-LABEL: gather_qps
-;CHECK: kxnorw
+;CHECK-LABEL: gather_mask_dps_execdomain
+;CHECK: vgatherdps
+;CHECK: vmovaps
+;CHECK: ret
+define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base) {
+ %res = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 %mask, i32 4)
+ ret <16 x float> %res;
+}
+
+;CHECK-LABEL: gather_mask_qps_execdomain
;CHECK: vgatherqps
-;CHECK: vscatterqps
+;CHECK: vmovaps
;CHECK: ret
-define void @gather_qps(<8 x i64> %ind, i8* %base, i8* %stbuf) {
- %x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x i64>%ind, i8* %base, i32 4)
- %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
- call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, <8 x i64>%ind2, <8 x float> %x, i32 4)
+define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base) {
+ %res = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 %mask, i32 4)
+ ret <8 x float> %res;
+}
+
+;CHECK-LABEL: scatter_mask_dpd_execdomain
+;CHECK: vmovapd
+;CHECK: vscatterdpd
+;CHECK: ret
+define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
+ %x = load <8 x double>* %src, align 64
+ call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind, <8 x double> %x, i32 4)
ret void
}
-;CHECK-LABEL: gather_qpd
-;CHECK: kxnorw
-;CHECK: vgatherqpd
-;CHECK: vpadd
+;CHECK-LABEL: scatter_mask_qpd_execdomain
+;CHECK: vmovapd
;CHECK: vscatterqpd
;CHECK: ret
-define void @gather_qpd(<8 x i64> %ind, i8* %base, i8* %stbuf) {
- %x = call <8 x double> @llvm.x86.avx512.gather.qpd.512 (<8 x i64>%ind, i8* %base, i32 4)
- %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
- call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, <8 x i64>%ind2, <8 x double> %x, i32 4)
+define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
+ %x = load <8 x double>* %src, align 64
+ call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x double> %x, i32 4)
ret void
}
-;; Integer Intinsics without masks
-
-declare <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.dpi.512 (i8*, <16 x i32>, <16 x i32>, i32)
-declare <8 x i64> @llvm.x86.avx512.gather.dpq.512 (<8 x i32>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.dpq.512 (i8*, <8 x i32>, <8 x i64>, i32)
-
-declare <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i64>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.qpi.512 (i8*, <8 x i64>, <8 x i32>, i32)
-declare <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64>, i8*, i32)
-declare void @llvm.x86.avx512.scatter.qpq.512 (i8*, <8 x i64>, <8 x i64>, i32)
+;CHECK-LABEL: scatter_mask_dps_execdomain
+;CHECK: vmovaps
+;CHECK: vscatterdps
+;CHECK: ret
+define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf) {
+ %x = load <16 x float>* %src, align 64
+ call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind, <16 x float> %x, i32 4)
+ ret void
+}
-;CHECK-LABEL: gather_dpi
-;CHECK: kxnorw
-;CHECK: vpgatherdd
-;CHECK: vpscatterdd
+;CHECK-LABEL: scatter_mask_qps_execdomain
+;CHECK: vmovaps
+;CHECK: vscatterqps
;CHECK: ret
-define void @gather_dpi(<16 x i32> %ind, i8* %base, i8* %stbuf) {
- %x = call <16 x i32> @llvm.x86.avx512.gather.dpi.512 (<16 x i32>%ind, i8* %base, i32 4)
- %ind2 = add <16 x i32> %ind, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
- call void @llvm.x86.avx512.scatter.dpi.512 (i8* %stbuf, <16 x i32>%ind2, <16 x i32> %x, i32 4)
+define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf) {
+ %x = load <8 x float>* %src, align 32
+ call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x float> %x, i32 4)
ret void
}
-;CHECK-LABEL: gather_qpq
-;CHECK: vpxord %zmm
+;CHECK-LABEL: gather_qps
;CHECK: kxnorw
-;CHECK: vpgatherqq
+;CHECK: vgatherqps
;CHECK: vpadd
-;CHECK: vpscatterqq
+;CHECK: vscatterqps
;CHECK: ret
-define void @gather_qpq(<8 x i64> %ind, i8* %base, i8* %stbuf) {
- %x = call <8 x i64> @llvm.x86.avx512.gather.qpq.512 (<8 x i64>%ind, i8* %base, i32 4)
+define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf) {
+ %x = call <8 x float> @llvm.x86.avx512.gather.qps.512 (<8 x float> %src, i8* %base, <8 x i64>%ind, i8 -1, i32 4)
%ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
- call void @llvm.x86.avx512.scatter.qpq.512 (i8* %stbuf, <8 x i64>%ind2, <8 x i64> %x, i32 4)
+ call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 -1, <8 x i64>%ind2, <8 x float> %x, i32 4)
ret void
}
-;CHECK-LABEL: gather_qpi
-;CHECK: vpxor %ymm
-;CHECK: kxnorw
-;CHECK: vpgatherqd
-;CHECK: vpadd
-;CHECK: vpscatterqd
+;CHECK-LABEL: prefetch
+;CHECK: gatherpf0
+;CHECK: gatherpf1
+;CHECK: scatterpf0
+;CHECK: scatterpf1
;CHECK: ret
-define void @gather_qpi(<8 x i64> %ind, i8* %base, i8* %stbuf) {
- %x = call <8 x i32> @llvm.x86.avx512.gather.qpi.512 (<8 x i64>%ind, i8* %base, i32 4)
- %ind2 = add <8 x i64> %ind, <i64 0, i64 1, i64 2, i64 3, i64 0, i64 1, i64 2, i64 3>
- call void @llvm.x86.avx512.scatter.qpi.512 (i8* %stbuf, <8 x i64>%ind2, <8 x i32> %x, i32 4)
+declare void @llvm.x86.avx512.gatherpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
+declare void @llvm.x86.avx512.scatterpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
+define void @prefetch(<8 x i64> %ind, i8* %base) {
+ call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %ind, i8* %base, i32 4, i32 0)
+ call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %ind, i8* %base, i32 4, i32 1)
+ call void @llvm.x86.avx512.scatterpf.qps.512(i8 -1, <8 x i64> %ind, i8* %base, i32 2, i32 0)
+ call void @llvm.x86.avx512.scatterpf.qps.512(i8 -1, <8 x i64> %ind, i8* %base, i32 2, i32 1)
ret void
}
diff --git a/test/CodeGen/X86/avx512-inc-dec.ll b/test/CodeGen/X86/avx512-inc-dec.ll
new file mode 100644
index 000000000000..f04ca878f434
--- /dev/null
+++ b/test/CodeGen/X86/avx512-inc-dec.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+;CHECK-LABEL: test
+;CHECK-NOT: dec
+;CHECK_NOT: enc
+;CHECK: ret
+define i32 @test(i32 %a, i32 %b) {
+ %a1 = add i32 %a, -1
+ %b1 = add i32 %b, 1
+ %res = mul i32 %a1, %b1
+ ret i32 %res
+}
+
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index 3f067401ed3f..b360c716b004 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -44,7 +44,7 @@ define <8 x i64> @test4(<8 x i64> %x) nounwind {
}
;CHECK-LABEL: test5:
-;CHECK: vextractpsz
+;CHECK: vextractps
;CHECK: ret
define i32 @test5(<4 x float> %x) nounwind {
%ef = extractelement <4 x float> %x, i32 3
@@ -53,7 +53,7 @@ define i32 @test5(<4 x float> %x) nounwind {
}
;CHECK-LABEL: test6:
-;CHECK: vextractpsz {{.*}}, (%rdi)
+;CHECK: vextractps {{.*}}, (%rdi)
;CHECK: ret
define void @test6(<4 x float> %x, float* %out) nounwind {
%ef = extractelement <4 x float> %x, i32 3
@@ -62,7 +62,7 @@ define void @test6(<4 x float> %x, float* %out) nounwind {
}
;CHECK-LABEL: test7
-;CHECK: vmovdz
+;CHECK: vmovd
;CHECK: vpermps %zmm
;CHECK: ret
define float @test7(<16 x float> %x, i32 %ind) nounwind {
@@ -71,7 +71,7 @@ define float @test7(<16 x float> %x, i32 %ind) nounwind {
}
;CHECK-LABEL: test8
-;CHECK: vmovqz
+;CHECK: vmovq
;CHECK: vpermpd %zmm
;CHECK: ret
define double @test8(<8 x double> %x, i32 %ind) nounwind {
@@ -89,9 +89,9 @@ define float @test9(<8 x float> %x, i32 %ind) nounwind {
}
;CHECK-LABEL: test10
-;CHECK: vmovdz
+;CHECK: vmovd
;CHECK: vpermd %zmm
-;CHEKK: vmovdz %xmm0, %eax
+;CHECK: vmovd %xmm0, %eax
;CHECK: ret
define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
%e = extractelement <16 x i32> %x, i32 %ind
@@ -99,27 +99,100 @@ define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
}
;CHECK-LABEL: test11
-;CHECK: movl $260
-;CHECK: bextrl
-;CHECK: movl $268
-;CHECK: bextrl
+;CHECK: vpcmpltud
+;CHECK: kshiftlw $11
+;CHECK: kshiftrw $15
+;CHECK: kortestw
+;CHECK: je
+;CHECK: ret
;CHECK: ret
define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
%cmp_res = icmp ult <16 x i32> %a, %b
%ia = extractelement <16 x i1> %cmp_res, i32 4
- %ib = extractelement <16 x i1> %cmp_res, i32 12
-
br i1 %ia, label %A, label %B
-
A:
ret <16 x i32>%b
B:
%c = add <16 x i32>%b, %a
- br i1 %ib, label %C, label %D
- C:
- %c1 = sub <16 x i32>%c, %a
- ret <16 x i32>%c1
- D:
- %c2 = mul <16 x i32>%c, %a
- ret <16 x i32>%c2
+ ret <16 x i32>%c
+}
+
+;CHECK-LABEL: test12
+;CHECK: vpcmpgtq
+;CHECK: kshiftlw $15
+;CHECK: kshiftrw $15
+;CHECK: kortestw
+;CHECK: ret
+
+define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
+
+ %cmpvector_func.i = icmp slt <16 x i64> %a, %b
+ %extract24vector_func.i = extractelement <16 x i1> %cmpvector_func.i, i32 0
+ %res = select i1 %extract24vector_func.i, i64 %a1, i64 %b1
+ ret i64 %res
+}
+
+;CHECK-LABEL: test13
+;CHECK: cmpl
+;CHECK: sbbl
+;CHECK: orl $65532
+;CHECK: ret
+define i16 @test13(i32 %a, i32 %b) {
+ %cmp_res = icmp ult i32 %a, %b
+ %maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %cmp_res, i32 0
+ %res = bitcast <16 x i1> %maskv to i16
+ ret i16 %res
+}
+
+;CHECK-LABEL: test14
+;CHECK: vpcmpgtq
+;CHECK: kshiftlw $11
+;CHECK: kshiftrw $15
+;CHECK: kortestw
+;CHECK: ret
+
+define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
+
+ %cmpvector_func.i = icmp slt <8 x i64> %a, %b
+ %extract24vector_func.i = extractelement <8 x i1> %cmpvector_func.i, i32 4
+ %res = select i1 %extract24vector_func.i, i64 %a1, i64 %b1
+ ret i64 %res
+}
+
+;CHECK-LABEL: test15
+;CHECK: kshiftlw
+;CHECK: kmovw
+;CHECK: ret
+define i16 @test15(i1 *%addr) {
+ %x = load i1 * %addr, align 128
+ %x1 = insertelement <16 x i1> undef, i1 %x, i32 10
+ %x2 = bitcast <16 x i1>%x1 to i16
+ ret i16 %x2
}
+
+;CHECK-LABEL: test16
+;CHECK: kshiftlw
+;CHECK: kshiftrw
+;CHECK: korw
+;CHECK: ret
+define i16 @test16(i1 *%addr, i16 %a) {
+ %x = load i1 * %addr, align 128
+ %a1 = bitcast i16 %a to <16 x i1>
+ %x1 = insertelement <16 x i1> %a1, i1 %x, i32 10
+ %x2 = bitcast <16 x i1>%x1 to i16
+ ret i16 %x2
+}
+
+;CHECK-LABEL: test17
+;CHECK: kshiftlw
+;CHECK: kshiftrw
+;CHECK: korw
+;CHECK: ret
+define i8 @test17(i1 *%addr, i8 %a) {
+ %x = load i1 * %addr, align 128
+ %a1 = bitcast i8 %a to <8 x i1>
+ %x1 = insertelement <8 x i1> %a1, i1 %x, i32 10
+ %x2 = bitcast <8 x i1>%x1 to i8
+ ret i8 %x2
+}
+
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index 5bdabf234990..18cfcfe78b05 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -1,108 +1,136 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding| FileCheck %s
-declare i32 @llvm.x86.avx512.kortestz(i16, i16) nounwind readnone
-; CHECK: test_kortestz
+declare i32 @llvm.x86.avx512.kortestz.w(i16, i16) nounwind readnone
+; CHECK-LABEL: test_kortestz
; CHECK: kortestw
; CHECK: sete
define i32 @test_kortestz(i16 %a0, i16 %a1) {
- %res = call i32 @llvm.x86.avx512.kortestz(i16 %a0, i16 %a1)
+ %res = call i32 @llvm.x86.avx512.kortestz.w(i16 %a0, i16 %a1)
ret i32 %res
}
-declare i32 @llvm.x86.avx512.kortestc(i16, i16) nounwind readnone
-; CHECK: test_kortestc
+declare i32 @llvm.x86.avx512.kortestc.w(i16, i16) nounwind readnone
+; CHECK-LABEL: test_kortestc
; CHECK: kortestw
; CHECK: sbbl
define i32 @test_kortestc(i16 %a0, i16 %a1) {
- %res = call i32 @llvm.x86.avx512.kortestc(i16 %a0, i16 %a1)
+ %res = call i32 @llvm.x86.avx512.kortestc.w(i16 %a0, i16 %a1)
ret i32 %res
}
+declare i16 @llvm.x86.avx512.kand.w(i16, i16) nounwind readnone
+; CHECK-LABEL: test_kand
+; CHECK: kandw
+; CHECK: kandw
+define i16 @test_kand(i16 %a0, i16 %a1) {
+ %t1 = call i16 @llvm.x86.avx512.kand.w(i16 %a0, i16 8)
+ %t2 = call i16 @llvm.x86.avx512.kand.w(i16 %t1, i16 %a1)
+ ret i16 %t2
+}
+
+declare i16 @llvm.x86.avx512.knot.w(i16) nounwind readnone
+; CHECK-LABEL: test_knot
+; CHECK: knotw
+define i16 @test_knot(i16 %a0) {
+ %res = call i16 @llvm.x86.avx512.knot.w(i16 %a0)
+ ret i16 %res
+}
+
+declare i16 @llvm.x86.avx512.kunpck.bw(i16, i16) nounwind readnone
+
+; CHECK-LABEL: unpckbw_test
+; CHECK: kunpckbw
+; CHECK:ret
+define i16 @unpckbw_test(i16 %a0, i16 %a1) {
+ %res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1)
+ ret i16 %res
+}
+
define <16 x float> @test_rcp_ps_512(<16 x float> %a0) {
- ; CHECK: vrcp14ps
- %res = call <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float> %a0) ; <<16 x float>> [#uses=1]
+ ; CHECK: vrcp14ps {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x4c,0xc0]
+ %res = call <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
ret <16 x float> %res
}
-declare <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float>) nounwind readnone
+declare <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float>, <16 x float>, i16) nounwind readnone
define <8 x double> @test_rcp_pd_512(<8 x double> %a0) {
- ; CHECK: vrcp14pd
- %res = call <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double> %a0) ; <<8 x double>> [#uses=1]
+ ; CHECK: vrcp14pd {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x4c,0xc0]
+ %res = call <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1) ; <<8 x double>> [#uses=1]
ret <8 x double> %res
}
-declare <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double>) nounwind readnone
+declare <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double>, <8 x double>, i8) nounwind readnone
define <16 x float> @test_rcp28_ps_512(<16 x float> %a0) {
- ; CHECK: vrcp28ps
- %res = call <16 x float> @llvm.x86.avx512.rcp28.ps.512(<16 x float> %a0) ; <<16 x float>> [#uses=1]
+ ; CHECK: vrcp28ps {sae}, {{.*}}encoding: [0x62,0xf2,0x7d,0x18,0xca,0xc0]
+ %res = call <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8) ; <<16 x float>> [#uses=1]
ret <16 x float> %res
}
-declare <16 x float> @llvm.x86.avx512.rcp28.ps.512(<16 x float>) nounwind readnone
+declare <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
define <8 x double> @test_rcp28_pd_512(<8 x double> %a0) {
- ; CHECK: vrcp28pd
- %res = call <8 x double> @llvm.x86.avx512.rcp28.pd.512(<8 x double> %a0) ; <<8 x double>> [#uses=1]
+ ; CHECK: vrcp28pd {sae}, {{.*}}encoding: [0x62,0xf2,0xfd,0x18,0xca,0xc0]
+ %res = call <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8) ; <<8 x double>> [#uses=1]
ret <8 x double> %res
}
-declare <8 x double> @llvm.x86.avx512.rcp28.pd.512(<8 x double>) nounwind readnone
+declare <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double>, <8 x double>, i8, i32) nounwind readnone
-define <8 x double> @test_rndscale_pd_512(<8 x double> %a0) {
- ; CHECK: vrndscale
- %res = call <8 x double> @llvm.x86.avx512.rndscale.pd.512(<8 x double> %a0, i32 7) ; <<8 x double>> [#uses=1]
- ret <8 x double> %res
+declare <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double>, i32, <8 x double>, i8, i32)
+
+define <8 x double> @test7(<8 x double> %a) {
+; CHECK: vrndscalepd {{.*}}encoding: [0x62,0xf3,0xfd,0x48,0x09,0xc0,0x0b]
+ %res = call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> %a, i32 11, <8 x double> %a, i8 -1, i32 4)
+ ret <8 x double>%res
}
-declare <8 x double> @llvm.x86.avx512.rndscale.pd.512(<8 x double>, i32) nounwind readnone
+declare <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float>, i32, <16 x float>, i16, i32)
-define <16 x float> @test_rndscale_ps_512(<16 x float> %a0) {
- ; CHECK: vrndscale
- %res = call <16 x float> @llvm.x86.avx512.rndscale.ps.512(<16 x float> %a0, i32 7) ; <<16 x float>> [#uses=1]
- ret <16 x float> %res
+define <16 x float> @test8(<16 x float> %a) {
+; CHECK: vrndscaleps {{.*}}encoding: [0x62,0xf3,0x7d,0x48,0x08,0xc0,0x0b]
+ %res = call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> %a, i32 11, <16 x float> %a, i16 -1, i32 4)
+ ret <16 x float>%res
}
-declare <16 x float> @llvm.x86.avx512.rndscale.ps.512(<16 x float>, i32) nounwind readnone
-
define <16 x float> @test_rsqrt_ps_512(<16 x float> %a0) {
- ; CHECK: vrsqrt14ps
- %res = call <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float> %a0) ; <<16 x float>> [#uses=1]
+ ; CHECK: vrsqrt14ps {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x4e,0xc0]
+ %res = call <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
ret <16 x float> %res
}
-declare <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float>) nounwind readnone
+declare <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float>, <16 x float>, i16) nounwind readnone
define <16 x float> @test_rsqrt28_ps_512(<16 x float> %a0) {
- ; CHECK: vrsqrt28ps
- %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps.512(<16 x float> %a0) ; <<16 x float>> [#uses=1]
+ ; CHECK: vrsqrt28ps {sae}, {{.*}}encoding: [0x62,0xf2,0x7d,0x18,0xcc,0xc0]
+ %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8) ; <<16 x float>> [#uses=1]
ret <16 x float> %res
}
-declare <16 x float> @llvm.x86.avx512.rsqrt28.ps.512(<16 x float>) nounwind readnone
+declare <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
define <4 x float> @test_rsqrt14_ss(<4 x float> %a0) {
- ; CHECK: vrsqrt14ss
- %res = call <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
+ ; CHECK: vrsqrt14ss {{.*}}encoding: [0x62,0xf2,0x7d,0x08,0x4f,0xc0]
+ %res = call <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
-declare <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float>) nounwind readnone
+declare <4 x float> @llvm.x86.avx512.rsqrt14.ss(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
define <4 x float> @test_rsqrt28_ss(<4 x float> %a0) {
- ; CHECK: vrsqrt28ss
- %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
+ ; CHECK: vrsqrt28ss {sae}, {{.*}}encoding: [0x62,0xf2,0x7d,0x18,0xcd,0xc0]
+ %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
-declare <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float>) nounwind readnone
+declare <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
define <4 x float> @test_rcp14_ss(<4 x float> %a0) {
- ; CHECK: vrcp14ss
- %res = call <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
+ ; CHECK: vrcp14ss {{.*}}encoding: [0x62,0xf2,0x7d,0x08,0x4d,0xc0]
+ %res = call <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
-declare <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float>) nounwind readnone
+declare <4 x float> @llvm.x86.avx512.rcp14.ss(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
define <4 x float> @test_rcp28_ss(<4 x float> %a0) {
- ; CHECK: vrcp28ss
- %res = call <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
+ ; CHECK: vrcp28ss {sae}, {{.*}}encoding: [0x62,0xf2,0x7d,0x18,0xcb,0xc0]
+ %res = call <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
-declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>) nounwind readnone
+declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) {
; CHECK: vsqrtpd
@@ -119,42 +147,42 @@ define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) {
declare <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float>) nounwind readnone
define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1) {
- ; CHECK: vsqrtssz
+ ; CHECK: vsqrtss {{.*}}encoding: [0x62
%res = call <4 x float> @llvm.x86.avx512.sqrt.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.avx512.sqrt.ss(<4 x float>, <4 x float>) nounwind readnone
define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1) {
- ; CHECK: vsqrtsdz
+ ; CHECK: vsqrtsd {{.*}}encoding: [0x62
%res = call <2 x double> @llvm.x86.avx512.sqrt.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
}
declare <2 x double> @llvm.x86.avx512.sqrt.sd(<2 x double>, <2 x double>) nounwind readnone
define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
- ; CHECK: vcvtsd2siz
+ ; CHECK: vcvtsd2si {{.*}}encoding: [0x62
%res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
ret i64 %res
}
declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
- ; CHECK: vcvtsi2sdqz
+ ; CHECK: vcvtsi2sdq {{.*}}encoding: [0x62
%res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
}
declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
define <2 x double> @test_x86_avx512_cvtusi642sd(<2 x double> %a0, i64 %a1) {
- ; CHECK: vcvtusi2sdqz
+ ; CHECK: vcvtusi2sdq {{.*}}encoding: [0x62
%res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
}
declare <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double>, i64) nounwind readnone
define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) {
- ; CHECK: vcvttsd2siz
+ ; CHECK: vcvttsd2si {{.*}}encoding: [0x62
%res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
ret i64 %res
}
@@ -162,7 +190,7 @@ declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
- ; CHECK: vcvtss2siz
+ ; CHECK: vcvtss2si {{.*}}encoding: [0x62
%res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
ret i64 %res
}
@@ -170,7 +198,7 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
- ; CHECK: vcvtsi2ssqz
+ ; CHECK: vcvtsi2ssq {{.*}}encoding: [0x62
%res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
@@ -178,33 +206,34 @@ declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) {
- ; CHECK: vcvttss2siz
+ ; CHECK: vcvttss2si {{.*}}encoding: [0x62
%res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1]
ret i64 %res
}
declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) {
- ; CHECK: vcvtsd2usiz
+ ; CHECK: vcvtsd2usi {{.*}}encoding: [0x62
%res = call i64 @llvm.x86.avx512.cvtsd2usi64(<2 x double> %a0) ; <i64> [#uses=1]
ret i64 %res
}
declare i64 @llvm.x86.avx512.cvtsd2usi64(<2 x double>) nounwind readnone
define <16 x float> @test_x86_vcvtph2ps_512(<16 x i16> %a0) {
- ; CHECK: vcvtph2ps
- %res = call <16 x float> @llvm.x86.avx512.vcvtph2ps.512(<16 x i16> %a0)
+ ; CHECK: vcvtph2ps %ymm0, %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x13,0xc0]
+ %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
ret <16 x float> %res
}
-declare <16 x float> @llvm.x86.avx512.vcvtph2ps.512(<16 x i16>) nounwind readonly
+declare <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16>, <16 x float>, i16, i32) nounwind readonly
define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0) {
- ; CHECK: vcvtps2ph
- %res = call <16 x i16> @llvm.x86.avx512.vcvtps2ph.512(<16 x float> %a0, i32 0)
+ ; CHECK: vcvtps2ph $2, %zmm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x48,0x1d,0xc0,0x02]
+ %res = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 -1)
ret <16 x i16> %res
}
-declare <16 x i16> @llvm.x86.avx512.vcvtps2ph.512(<16 x float>, i32) nounwind readonly
+
+declare <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float>, i32, <16 x i16>, i16) nounwind readonly
define <16 x float> @test_x86_vbroadcast_ss_512(i8* %a0) {
; CHECK: vbroadcastss
@@ -262,113 +291,323 @@ define <8 x i64> @test_x86_pbroadcastq_i64_512(i64 %a0) {
}
declare <8 x i64> @llvm.x86.avx512.pbroadcastq.i64.512(i64) nounwind readonly
-define <16 x i32> @test_x86_pmaxu_d(<16 x i32> %a0, <16 x i32> %a1) {
- ; CHECK: vpmaxud
- %res = call <16 x i32> @llvm.x86.avx512.pmaxu.d(<16 x i32> %a0, <16 x i32> %a1) ; <<16 x i32>> [#uses=1]
+define <16 x i32> @test_conflict_d(<16 x i32> %a) {
+ ; CHECK: movw $-1, %ax
+ ; CHECK: vpxor
+ ; CHECK: vpconflictd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx512.pmaxu.d(<16 x i32>, <16 x i32>) nounwind readonly
-define <8 x i64> @test_x86_pmaxu_q(<8 x i64> %a0, <8 x i64> %a1) {
- ; CHECK: vpmaxuq
- %res = call <8 x i64> @llvm.x86.avx512.pmaxu.q(<8 x i64> %a0, <8 x i64> %a1) ; <<8 x i64>> [#uses=1]
+declare <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
+
+define <8 x i64> @test_conflict_q(<8 x i64> %a) {
+ ; CHECK: movb $-1, %al
+ ; CHECK: vpxor
+ ; CHECK: vpconflictq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
ret <8 x i64> %res
}
-declare <8 x i64> @llvm.x86.avx512.pmaxu.q(<8 x i64>, <8 x i64>) nounwind readonly
-define <16 x i32> @test_x86_pmaxs_d(<16 x i32> %a0, <16 x i32> %a1) {
- ; CHECK: vpmaxsd
- %res = call <16 x i32> @llvm.x86.avx512.pmaxs.d(<16 x i32> %a0, <16 x i32> %a1) ; <<16 x i32>> [#uses=1]
+declare <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
+
+define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
+ ; CHECK: vpconflictd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 %mask)
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx512.pmaxs.d(<16 x i32>, <16 x i32>) nounwind readonly
-define <8 x i64> @test_x86_pmaxs_q(<8 x i64> %a0, <8 x i64> %a1) {
- ; CHECK: vpmaxsq
- %res = call <8 x i64> @llvm.x86.avx512.pmaxs.q(<8 x i64> %a0, <8 x i64> %a1) ; <<8 x i64>> [#uses=1]
+define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
+ ; CHECK: vpconflictq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret <8 x i64> %res
}
-declare <8 x i64> @llvm.x86.avx512.pmaxs.q(<8 x i64>, <8 x i64>) nounwind readonly
-define <16 x i32> @test_x86_pminu_d(<16 x i32> %a0, <16 x i32> %a1) {
- ; CHECK: vpminud
- %res = call <16 x i32> @llvm.x86.avx512.pminu.d(<16 x i32> %a0, <16 x i32> %a1) ; <<16 x i32>> [#uses=1]
+define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
+ ; CHECK: movw $-1, %ax
+ ; CHECK: vpxor
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx512.pminu.d(<16 x i32>, <16 x i32>) nounwind readonly
-define <8 x i64> @test_x86_pminu_q(<8 x i64> %a0, <8 x i64> %a1) {
- ; CHECK: vpminuq
- %res = call <8 x i64> @llvm.x86.avx512.pminu.q(<8 x i64> %a0, <8 x i64> %a1) ; <<8 x i64>> [#uses=1]
+declare <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
+
+define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
+ ; CHECK: movb $-1, %al
+ ; CHECK: vpxor
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
ret <8 x i64> %res
}
-declare <8 x i64> @llvm.x86.avx512.pminu.q(<8 x i64>, <8 x i64>) nounwind readonly
-define <16 x i32> @test_x86_pmins_d(<16 x i32> %a0, <16 x i32> %a1) {
- ; CHECK: vpminsd
- %res = call <16 x i32> @llvm.x86.avx512.pmins.d(<16 x i32> %a0, <16 x i32> %a1) ; <<16 x i32>> [#uses=1]
+declare <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
+
+
+define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx512.pmins.d(<16 x i32>, <16 x i32>) nounwind readonly
-define <8 x i64> @test_x86_pmins_q(<8 x i64> %a0, <8 x i64> %a1) {
- ; CHECK: vpminsq
- %res = call <8 x i64> @llvm.x86.avx512.pmins.q(<8 x i64> %a0, <8 x i64> %a1) ; <<8 x i64>> [#uses=1]
+define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret <8 x i64> %res
}
-declare <8 x i64> @llvm.x86.avx512.pmins.q(<8 x i64>, <8 x i64>) nounwind readonly
-define <16 x i32> @test_conflict_d(<16 x i32> %a) {
- ; CHECK: vpconflictd
- %res = call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %a)
+define <16 x i32> @test_ctlz_d(<16 x i32> %a) {
+ ; CHECK-LABEL: test_ctlz_d
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32>) nounwind readonly
-define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
- ; CHECK: vpconflictd %zmm0, %zmm0 {%k1} {z}
- %vmask = bitcast i16 %mask to <16 x i1>
- %res = call <16 x i32> @llvm.x86.avx512.conflict.d.maskz.512(<16 x i1> %vmask, <16 x i32> %a)
- ret <16 x i32> %res
-}
-declare <16 x i32> @llvm.x86.avx512.conflict.d.maskz.512(<16 x i1>,<16 x i32>) nounwind readonly
+declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) nounwind readonly
-define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
- ; CHECK: vpconflictq {{.*}} {%k1}
- %vmask = bitcast i8 %mask to <8 x i1>
- %res = call <8 x i64> @llvm.x86.avx512.conflict.q.mask.512(<8 x i64> %b, <8 x i1> %vmask, <8 x i64> %a)
+define <8 x i64> @test_ctlz_q(<8 x i64> %a) {
+ ; CHECK-LABEL: test_ctlz_q
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
ret <8 x i64> %res
}
-declare <8 x i64> @llvm.x86.avx512.conflict.q.mask.512(<8 x i64>, <8 x i1>,<8 x i64>) nounwind readonly
-define <16 x float> @test_x86_mskblend_ps_512(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
+declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) nounwind readonly
+
+define <16 x float> @test_x86_mask_blend_ps_512(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK: vblendmps
- %m0 = bitcast i16 %a0 to <16 x i1>
- %res = call <16 x float> @llvm.x86.avx512.mskblend.ps.512(<16 x i1> %m0, <16 x float> %a1, <16 x float> %a2) ; <<16 x float>> [#uses=1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float> %a1, <16 x float> %a2, i16 %a0) ; <<16 x float>> [#uses=1]
ret <16 x float> %res
}
-declare <16 x float> @llvm.x86.avx512.mskblend.ps.512(<16 x i1> %a0, <16 x float> %a1, <16 x float> %a2) nounwind readonly
-define <8 x double> @test_x86_mskblend_pd_512(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
+declare <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float>, <16 x float>, i16) nounwind readonly
+
+define <8 x double> @test_x86_mask_blend_pd_512(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK: vblendmpd
- %m0 = bitcast i8 %a0 to <8 x i1>
- %res = call <8 x double> @llvm.x86.avx512.mskblend.pd.512(<8 x i1> %m0, <8 x double> %a1, <8 x double> %a2) ; <<8 x double>> [#uses=1]
+ %res = call <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double> %a1, <8 x double> %a2, i8 %a0) ; <<8 x double>> [#uses=1]
ret <8 x double> %res
}
-declare <8 x double> @llvm.x86.avx512.mskblend.pd.512(<8 x i1> %a0, <8 x double> %a1, <8 x double> %a2) nounwind readonly
-define <16 x i32> @test_x86_mskblend_d_512(i16 %a0, <16 x i32> %a1, <16 x i32> %a2) {
+define <8 x double> @test_x86_mask_blend_pd_512_memop(<8 x double> %a, <8 x double>* %ptr, i8 %mask) {
+ ; CHECK-LABEL: test_x86_mask_blend_pd_512_memop
+ ; CHECK: vblendmpd (%
+ %b = load <8 x double>* %ptr
+ %res = call <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double> %a, <8 x double> %b, i8 %mask) ; <<8 x double>> [#uses=1]
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double>, <8 x double>, i8) nounwind readonly
+
+define <16 x i32> @test_x86_mask_blend_d_512(i16 %a0, <16 x i32> %a1, <16 x i32> %a2) {
; CHECK: vpblendmd
- %m0 = bitcast i16 %a0 to <16 x i1>
- %res = call <16 x i32> @llvm.x86.avx512.mskblend.d.512(<16 x i1> %m0, <16 x i32> %a1, <16 x i32> %a2) ; <<16 x i32>> [#uses=1]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.blend.d.512(<16 x i32> %a1, <16 x i32> %a2, i16 %a0) ; <<16 x i32>> [#uses=1]
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx512.mskblend.d.512(<16 x i1> %a0, <16 x i32> %a1, <16 x i32> %a2) nounwind readonly
+declare <16 x i32> @llvm.x86.avx512.mask.blend.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
-define <8 x i64> @test_x86_mskblend_q_512(i8 %a0, <8 x i64> %a1, <8 x i64> %a2) {
+define <8 x i64> @test_x86_mask_blend_q_512(i8 %a0, <8 x i64> %a1, <8 x i64> %a2) {
; CHECK: vpblendmq
- %m0 = bitcast i8 %a0 to <8 x i1>
- %res = call <8 x i64> @llvm.x86.avx512.mskblend.q.512(<8 x i1> %m0, <8 x i64> %a1, <8 x i64> %a2) ; <<8 x i64>> [#uses=1]
+ %res = call <8 x i64> @llvm.x86.avx512.mask.blend.q.512(<8 x i64> %a1, <8 x i64> %a2, i8 %a0) ; <<8 x i64>> [#uses=1]
ret <8 x i64> %res
}
-declare <8 x i64> @llvm.x86.avx512.mskblend.q.512(<8 x i1> %a0, <8 x i64> %a1, <8 x i64> %a2) nounwind readonly
+declare <8 x i64> @llvm.x86.avx512.mask.blend.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
+
+ define <8 x i32> @test_cvtpd2udq(<8 x double> %a) {
+ ;CHECK: vcvtpd2udq {ru-sae}{{.*}}encoding: [0x62,0xf1,0xfc,0x58,0x79,0xc0]
+ %res = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double> %a, <8 x i32>zeroinitializer, i8 -1, i32 2)
+ ret <8 x i32>%res
+ }
+ declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double>, <8 x i32>, i8, i32)
+
+ define <16 x i32> @test_cvtps2udq(<16 x float> %a) {
+ ;CHECK: vcvtps2udq {rd-sae}{{.*}}encoding: [0x62,0xf1,0x7c,0x38,0x79,0xc0]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float> %a, <16 x i32>zeroinitializer, i16 -1, i32 1)
+ ret <16 x i32>%res
+ }
+ declare <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float>, <16 x i32>, i16, i32)
+
+ define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) {
+ ;CHECK: vcmpleps {sae}{{.*}}encoding: [0x62,0xf1,0x7c,0x18,0xc2,0xc1,0x02]
+ %res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8)
+ ret i16 %res
+ }
+ declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32, i16, i32)
+
+ define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) {
+ ;CHECK: vcmpneqpd %zmm{{.*}}encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc1,0x04]
+ %res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4)
+ ret i8 %res
+ }
+ declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i8, i32)
+
+ ; cvt intrinsics
+ define <16 x float> @test_cvtdq2ps(<16 x i32> %a) {
+ ;CHECK: vcvtdq2ps {rd-sae}{{.*}}encoding: [0x62,0xf1,0x7c,0x38,0x5b,0xc0]
+ %res = call <16 x float> @llvm.x86.avx512.mask.cvtdq2ps.512(<16 x i32> %a, <16 x float>zeroinitializer, i16 -1, i32 1)
+ ret <16 x float>%res
+ }
+ declare <16 x float> @llvm.x86.avx512.mask.cvtdq2ps.512(<16 x i32>, <16 x float>, i16, i32)
+
+ define <16 x float> @test_cvtudq2ps(<16 x i32> %a) {
+ ;CHECK: vcvtudq2ps {rd-sae}{{.*}}encoding: [0x62,0xf1,0x7f,0x38,0x7a,0xc0]
+ %res = call <16 x float> @llvm.x86.avx512.mask.cvtudq2ps.512(<16 x i32> %a, <16 x float>zeroinitializer, i16 -1, i32 1)
+ ret <16 x float>%res
+ }
+ declare <16 x float> @llvm.x86.avx512.mask.cvtudq2ps.512(<16 x i32>, <16 x float>, i16, i32)
+
+ define <8 x double> @test_cvtdq2pd(<8 x i32> %a) {
+ ;CHECK: vcvtdq2pd {{.*}}encoding: [0x62,0xf1,0x7e,0x48,0xe6,0xc0]
+ %res = call <8 x double> @llvm.x86.avx512.mask.cvtdq2pd.512(<8 x i32> %a, <8 x double>zeroinitializer, i8 -1)
+ ret <8 x double>%res
+ }
+ declare <8 x double> @llvm.x86.avx512.mask.cvtdq2pd.512(<8 x i32>, <8 x double>, i8)
+
+ define <8 x double> @test_cvtudq2pd(<8 x i32> %a) {
+ ;CHECK: vcvtudq2pd {{.*}}encoding: [0x62,0xf1,0x7e,0x48,0x7a,0xc0]
+ %res = call <8 x double> @llvm.x86.avx512.mask.cvtudq2pd.512(<8 x i32> %a, <8 x double>zeroinitializer, i8 -1)
+ ret <8 x double>%res
+ }
+ declare <8 x double> @llvm.x86.avx512.mask.cvtudq2pd.512(<8 x i32>, <8 x double>, i8)
+
+ ; fp min - max
+define <16 x float> @test_vmaxps(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK: vmaxps
+ %res = call <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float>zeroinitializer, i16 -1, i32 4)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.x86.avx512.mask.max.ps.512(<16 x float>, <16 x float>,
+ <16 x float>, i16, i32)
+
+define <8 x double> @test_vmaxpd(<8 x double> %a0, <8 x double> %a1) {
+ ; CHECK: vmaxpd
+ %res = call <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double> %a0, <8 x double> %a1,
+ <8 x double>zeroinitializer, i8 -1, i32 4)
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.x86.avx512.mask.max.pd.512(<8 x double>, <8 x double>,
+ <8 x double>, i8, i32)
+
+define <16 x float> @test_vminps(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK: vminps
+ %res = call <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float>zeroinitializer, i16 -1, i32 4)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.x86.avx512.mask.min.ps.512(<16 x float>, <16 x float>,
+ <16 x float>, i16, i32)
+
+define <8 x double> @test_vminpd(<8 x double> %a0, <8 x double> %a1) {
+ ; CHECK: vminpd
+ %res = call <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double> %a0, <8 x double> %a1,
+ <8 x double>zeroinitializer, i8 -1, i32 4)
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double>, <8 x double>,
+ <8 x double>, i8, i32)
+
+ define <8 x float> @test_cvtpd2ps(<8 x double> %a) {
+ ;CHECK: vcvtpd2ps {rd-sae}{{.*}}encoding: [0x62,0xf1,0xfd,0x38,0x5a,0xc0]
+ %res = call <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double> %a, <8 x float>zeroinitializer, i8 -1, i32 1)
+ ret <8 x float>%res
+ }
+ declare <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double>, <8 x float>, i8, i32)
+
+ define <16 x i32> @test_pabsd(<16 x i32> %a) {
+ ;CHECK: vpabsd {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x1e,0xc0]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a, <16 x i32>zeroinitializer, i16 -1)
+ ret < 16 x i32> %res
+ }
+ declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16)
+
+ define <8 x i64> @test_pabsq(<8 x i64> %a) {
+ ;CHECK: vpabsq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x1f,0xc0]
+ %res = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %a, <8 x i64>zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+ }
+ declare <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64>, <8 x i64>, i8)
+
+define <8 x i64> @test_vpmaxq(<8 x i64> %a0, <8 x i64> %a1) {
+ ; CHECK: vpmaxsq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x3d,0xc1]
+ %res = call <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64> %a0, <8 x i64> %a1,
+ <8 x i64>zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+declare <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <16 x i32> @test_vpminud(<16 x i32> %a0, <16 x i32> %a1) {
+ ; CHECK: vpminud {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x3b,0xc1]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32> %a0, <16 x i32> %a1,
+ <16 x i32>zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+declare <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+
+define <16 x i32> @test_vpmaxsd(<16 x i32> %a0, <16 x i32> %a1) {
+ ; CHECK: vpmaxsd {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x3d,0xc1]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32> %a0, <16 x i32> %a1,
+ <16 x i32>zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+declare <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+
+define <8 x i64> @test_vpmuludq(<16 x i32> %a0, <16 x i32> %a1) {
+ ; CHECK: vpmuludq {{.*}}encoding: [0x62,0xf1,0xfd,0x48,0xf4,0xc1]
+ %res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a0, <16 x i32> %a1,
+ <8 x i64>zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+declare <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
+
+define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1) {
+ ; CHECK: vptestmq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x27,0xc1]
+ %res = call i8 @llvm.x86.avx512.mask.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1)
+ ret i8 %res
+}
+declare i8 @llvm.x86.avx512.mask.ptestm.q.512(<8 x i64>, <8 x i64>, i8)
+
+define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1) {
+ ; CHECK: vptestmd {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x27,0xc1]
+ %res = call i16 @llvm.x86.avx512.mask.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 -1)
+ ret i16 %res
+}
+declare i16 @llvm.x86.avx512.mask.ptestm.d.512(<16 x i32>, <16 x i32>, i16)
+
+define void @test_store1(<16 x float> %data, i8* %ptr, i16 %mask) {
+; CHECK: vmovups {{.*}}encoding: [0x62,0xf1,0x7c,0x49,0x11,0x07]
+ call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.storeu.ps.512(i8*, <16 x float>, i16 )
+
+define void @test_store2(<8 x double> %data, i8* %ptr, i8 %mask) {
+; CHECK: vmovupd {{.*}}encoding: [0x62,0xf1,0xfd,0x49,0x11,0x07]
+ call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8 )
+
+define <16 x float> @test_vpermt2ps(<16 x float>%x, <16 x float>%y, <16 x i32>%perm) {
+; CHECK: vpermt2ps {{.*}}encoding: [0x62,0xf2,0x6d,0x48,0x7f,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>%perm, <16 x float>%x, <16 x float>%y, i16 -1)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vpermt2ps_mask(<16 x float>%x, <16 x float>%y, <16 x i32>%perm, i16 %mask) {
+; CHECK-LABEL: test_vpermt2ps_mask:
+; CHECK: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x7f,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>%perm, <16 x float>%x, <16 x float>%y, i16 %mask)
+ ret <16 x float> %res
+}
+
+declare <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>, <16 x float>, <16 x float>, i16)
+
+define <8 x i64> @test_vmovntdqa(i8 *%x) {
+; CHECK-LABEL: test_vmovntdqa:
+; CHECK: vmovntdqa (%rdi), %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x2a,0x07]
+ %res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %x)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*)
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index ef5cb56d7284..dd33ffdb640d 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -33,19 +33,6 @@ define i16 @mand16(i16 %x, i16 %y) {
ret i16 %ret
}
-; CHECK: unpckbw_test
-; CHECK: kunpckbw
-; CHECK:ret
-declare <16 x i1> @llvm.x86.kunpck.v16i1(<8 x i1>, <8 x i1>) nounwind readnone
-
-define i16 @unpckbw_test(i8 %x, i8 %y) {
- %m0 = bitcast i8 %x to <8 x i1>
- %m1 = bitcast i8 %y to <8 x i1>
- %k = tail call <16 x i1> @llvm.x86.kunpck.v16i1(<8 x i1> %m0, <8 x i1> %m1)
- %r = bitcast <16 x i1> %k to i16
- ret i16 %r
-}
-
; CHECK: shuf_test1
; CHECK: kshiftrw $8
; CHECK:ret
@@ -55,3 +42,39 @@ define i8 @shuf_test1(i16 %v) nounwind {
%mask1 = bitcast <8 x i1> %mask to i8
ret i8 %mask1
}
+
+; CHECK: zext_test1
+; CHECK: kshiftlw
+; CHECK: kshiftrw
+; CHECK: kmovw
+; CHECK:ret
+define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
+ %cmp_res = icmp ugt <16 x i32> %a, %b
+ %cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
+ %res = zext i1 %cmp_res.i1 to i32
+ ret i32 %res
+}
+
+; CHECK: zext_test2
+; CHECK: kshiftlw
+; CHECK: kshiftrw
+; CHECK: kmovw
+; CHECK:ret
+define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
+ %cmp_res = icmp ugt <16 x i32> %a, %b
+ %cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
+ %res = zext i1 %cmp_res.i1 to i16
+ ret i16 %res
+}
+
+; CHECK: zext_test3
+; CHECK: kshiftlw
+; CHECK: kshiftrw
+; CHECK: kmovw
+; CHECK:ret
+define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
+ %cmp_res = icmp ugt <16 x i32> %a, %b
+ %cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
+ %res = zext i1 %cmp_res.i1 to i8
+ ret i8 %res
+}
diff --git a/test/CodeGen/X86/avx512-mov.ll b/test/CodeGen/X86/avx512-mov.ll
index 91242b1cc125..009802f1742d 100644
--- a/test/CodeGen/X86/avx512-mov.ll
+++ b/test/CodeGen/X86/avx512-mov.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding| FileCheck %s
; CHECK-LABEL: @test1
-; CHECK: vmovdz %xmm0, %eax
+; CHECK: vmovd %xmm0, %eax ## encoding: [0x62
; CHECK: ret
define i32 @test1(float %x) {
%res = bitcast float %x to i32
@@ -9,7 +9,7 @@ define i32 @test1(float %x) {
}
; CHECK-LABEL: @test2
-; CHECK: vmovdz %edi
+; CHECK: vmovd %edi, %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test2(i32 %x) {
%res = insertelement <4 x i32>undef, i32 %x, i32 0
@@ -17,7 +17,7 @@ define <4 x i32> @test2(i32 %x) {
}
; CHECK-LABEL: @test3
-; CHECK: vmovqz %rdi
+; CHECK: vmovq %rdi, %xmm0 ## encoding: [0x62
; CHECK: ret
define <2 x i64> @test3(i64 %x) {
%res = insertelement <2 x i64>undef, i64 %x, i32 0
@@ -25,7 +25,7 @@ define <2 x i64> @test3(i64 %x) {
}
; CHECK-LABEL: @test4
-; CHECK: vmovdz (%rdi)
+; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test4(i32* %x) {
%y = load i32* %x
@@ -34,7 +34,7 @@ define <4 x i32> @test4(i32* %x) {
}
; CHECK-LABEL: @test5
-; CHECK: vmovssz %xmm0, (%rdi)
+; CHECK: vmovss %xmm0, (%rdi) ## encoding: [0x62
; CHECK: ret
define void @test5(float %x, float* %y) {
store float %x, float* %y, align 4
@@ -42,7 +42,7 @@ define void @test5(float %x, float* %y) {
}
; CHECK-LABEL: @test6
-; CHECK: vmovsdz %xmm0, (%rdi)
+; CHECK: vmovsd %xmm0, (%rdi) ## encoding: [0x62
; CHECK: ret
define void @test6(double %x, double* %y) {
store double %x, double* %y, align 8
@@ -50,7 +50,7 @@ define void @test6(double %x, double* %y) {
}
; CHECK-LABEL: @test7
-; CHECK: vmovssz (%rdi), %xmm0
+; CHECK: vmovss (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define float @test7(i32* %x) {
%y = load i32* %x
@@ -59,7 +59,7 @@ define float @test7(i32* %x) {
}
; CHECK-LABEL: @test8
-; CHECK: vmovdz %xmm0, %eax
+; CHECK: vmovd %xmm0, %eax ## encoding: [0x62
; CHECK: ret
define i32 @test8(<4 x i32> %x) {
%res = extractelement <4 x i32> %x, i32 0
@@ -67,7 +67,7 @@ define i32 @test8(<4 x i32> %x) {
}
; CHECK-LABEL: @test9
-; CHECK: vmovqz %xmm0, %rax
+; CHECK: vmovq %xmm0, %rax ## encoding: [0x62
; CHECK: ret
define i64 @test9(<2 x i64> %x) {
%res = extractelement <2 x i64> %x, i32 0
@@ -75,7 +75,7 @@ define i64 @test9(<2 x i64> %x) {
}
; CHECK-LABEL: @test10
-; CHECK: vmovdz (%rdi)
+; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test10(i32* %x) {
%y = load i32* %x, align 4
@@ -84,7 +84,7 @@ define <4 x i32> @test10(i32* %x) {
}
; CHECK-LABEL: @test11
-; CHECK: vmovssz (%rdi)
+; CHECK: vmovss (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x float> @test11(float* %x) {
%y = load float* %x, align 4
@@ -93,7 +93,7 @@ define <4 x float> @test11(float* %x) {
}
; CHECK-LABEL: @test12
-; CHECK: vmovsdz (%rdi)
+; CHECK: vmovsd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <2 x double> @test12(double* %x) {
%y = load double* %x, align 8
@@ -102,7 +102,7 @@ define <2 x double> @test12(double* %x) {
}
; CHECK-LABEL: @test13
-; CHECK: vmovqz %rdi
+; CHECK: vmovq %rdi, %xmm0 ## encoding: [0x62
; CHECK: ret
define <2 x i64> @test13(i64 %x) {
%res = insertelement <2 x i64>zeroinitializer, i64 %x, i32 0
@@ -110,7 +110,7 @@ define <2 x i64> @test13(i64 %x) {
}
; CHECK-LABEL: @test14
-; CHECK: vmovdz %edi
+; CHECK: vmovd %edi, %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test14(i32 %x) {
%res = insertelement <4 x i32>zeroinitializer, i32 %x, i32 0
@@ -118,7 +118,7 @@ define <4 x i32> @test14(i32 %x) {
}
; CHECK-LABEL: @test15
-; CHECK: vmovdz (%rdi)
+; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test15(i32* %x) {
%y = load i32* %x, align 4
@@ -153,3 +153,31 @@ define void @test18(i8 * %addr, <8 x i64> %data) {
ret void
}
+; CHECK-LABEL: store_i1_1
+; CHECK: movb
+; CHECK: movb
+; CHECK: ret
+define void @store_i1_1() {
+ store i1 true, i1 addrspace(3)* undef, align 128
+ store i1 false, i1 addrspace(2)* undef, align 128
+ ret void
+}
+
+; CHECK-LABEL: store_i1_2
+; CHECK: movb
+; CHECK: ret
+define void @store_i1_2(i64 %a, i64 %b) {
+ %res = icmp eq i64 %a, %b
+ store i1 %res, i1 addrspace(3)* undef, align 128
+ ret void
+}
+
+; CHECK-LABEL: store_i1_3
+; CHECK: kmovw
+; CHECK: ret
+define void @store_i1_3(i16 %a) {
+ %a_vec = bitcast i16 %a to <16 x i1>
+ %res = extractelement <16 x i1> %a_vec, i32 4
+ store i1 %res, i1 addrspace(3)* undef, align 128
+ ret void
+}
diff --git a/test/CodeGen/X86/avx512-nontemporal.ll b/test/CodeGen/X86/avx512-nontemporal.ll
new file mode 100644
index 000000000000..ef50cdb82831
--- /dev/null
+++ b/test/CodeGen/X86/avx512-nontemporal.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=x86-64 -mattr=+avx512f | FileCheck %s
+
+define void @f(<16 x float> %A, <16 x float> %AA, i8* %B, <8 x double> %C, <8 x double> %CC, i32 %D, <8 x i64> %E, <8 x i64> %EE) {
+; CHECK: vmovntps %z
+ %cast = bitcast i8* %B to <16 x float>*
+ %A2 = fadd <16 x float> %A, %AA
+ store <16 x float> %A2, <16 x float>* %cast, align 64, !nontemporal !0
+; CHECK: vmovntdq %z
+ %cast1 = bitcast i8* %B to <8 x i64>*
+ %E2 = add <8 x i64> %E, %EE
+ store <8 x i64> %E2, <8 x i64>* %cast1, align 64, !nontemporal !0
+; CHECK: vmovntpd %z
+ %cast2 = bitcast i8* %B to <8 x double>*
+ %C2 = fadd <8 x double> %C, %CC
+ store <8 x double> %C2, <8 x double>* %cast2, align 64, !nontemporal !0
+ ret void
+}
+
+!0 = metadata !{i32 1}
diff --git a/test/CodeGen/X86/avx512-select.ll b/test/CodeGen/X86/avx512-select.ll
index d2d6681fb422..83f46984781f 100644
--- a/test/CodeGen/X86/avx512-select.ll
+++ b/test/CodeGen/X86/avx512-select.ll
@@ -20,3 +20,22 @@ define <8 x i64> @select01(i32 %a, <8 x i64> %b) nounwind {
ret <8 x i64> %res
}
+; CHECK-LABEL: @select02
+; CHECK: cmpless %xmm0, %xmm3, %k1
+; CHECK-NEXT: vmovss %xmm2, {{.*}}%xmm1 {%k1}
+; CHECK: ret
+define float @select02(float %a, float %b, float %c, float %eps) {
+ %cmp = fcmp oge float %a, %eps
+ %cond = select i1 %cmp, float %c, float %b
+ ret float %cond
+}
+
+; CHECK-LABEL: @select03
+; CHECK: cmplesd %xmm0, %xmm3, %k1
+; CHECK-NEXT: vmovsd %xmm2, {{.*}}%xmm1 {%k1}
+; CHECK: ret
+define double @select03(double %a, double %b, double %c, double %eps) {
+ %cmp = fcmp oge double %a, %eps
+ %cond = select i1 %cmp, double %c, double %b
+ ret double %cond
+}
diff --git a/test/CodeGen/X86/avx512-shuffle.ll b/test/CodeGen/X86/avx512-shuffle.ll
index c9e0c2b992d9..b99e89a9a546 100644
--- a/test/CodeGen/X86/avx512-shuffle.ll
+++ b/test/CodeGen/X86/avx512-shuffle.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding| FileCheck %s
; CHECK: LCP
; CHECK: .long 2
; CHECK: .long 5
@@ -49,13 +49,23 @@ define <8 x double> @test4(<8 x double> %a) nounwind {
}
; CHECK-LABEL: test5:
-; CHECK: vpermi2pd
+; CHECK: vpermt2pd
; CHECK: ret
define <8 x double> @test5(<8 x double> %a, <8 x double> %b) nounwind {
%c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
ret <8 x double> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test5m:
+; CHECK: vpermt2pd {{.* {%k[1-7]} {z}}}
+define <8 x double> @test5m(<8 x double> %a, <8 x double> %b, i8 %mask) nounwind {
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x double> %c, <8 x double> zeroinitializer
+ ret <8 x double> %res
+}
+
; CHECK-LABEL: test6:
; CHECK: vpermq $30
; CHECK: ret
@@ -65,31 +75,83 @@ define <8 x i64> @test6(<8 x i64> %a) nounwind {
}
; CHECK-LABEL: test7:
-; CHECK: vpermi2q
+; CHECK: vpermt2q
; CHECK: ret
define <8 x i64> @test7(<8 x i64> %a, <8 x i64> %b) nounwind {
%c = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
ret <8 x i64> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test7m:
+; CHECK: vpermt2q {{.* {%k[1-7]} {z}}}
+define <8 x i64> @test7m(<8 x i64> %a, <8 x i64> %b, i8 %mask) nounwind {
+ %c = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x i64> %c, <8 x i64> zeroinitializer
+ ret <8 x i64> %res
+}
+
+; The mem variant of vpermt2 with a writemask
+; CHECK-LABEL: test7mm:
+; CHECK: vpermt2q {{\(.*\).* {%k[1-7]} {z}}}
+define <8 x i64> @test7mm(<8 x i64> %a, <8 x i64> *%pb, i8 %mask) nounwind {
+ %b = load <8 x i64>* %pb
+ %c = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x i64> %c, <8 x i64> zeroinitializer
+ ret <8 x i64> %res
+}
+
; CHECK-LABEL: test8:
-; CHECK: vpermi2d
+; CHECK: vpermt2d
; CHECK: ret
define <16 x i32> @test8(<16 x i32> %a, <16 x i32> %b) nounwind {
%c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
ret <16 x i32> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test8m:
+; CHECK: vpermt2d {{.* {%k[1-7]} {z}}}
+define <16 x i32> @test8m(<16 x i32> %a, <16 x i32> %b, i16 %mask) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x i32> %c, <16 x i32> zeroinitializer
+ ret <16 x i32> %res
+}
+
+; The mem variant of vpermt2 with a writemask
+; CHECK-LABEL: test8mm:
+; CHECK: vpermt2d {{\(.*\).* {%k[1-7]} {z}}}
+define <16 x i32> @test8mm(<16 x i32> %a, <16 x i32> *%pb, i16 %mask) nounwind {
+ %b = load <16 x i32> * %pb
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x i32> %c, <16 x i32> zeroinitializer
+ ret <16 x i32> %res
+}
+
; CHECK-LABEL: test9:
-; CHECK: vpermi2ps
+; CHECK: vpermt2ps
; CHECK: ret
define <16 x float> @test9(<16 x float> %a, <16 x float> %b) nounwind {
%c = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
ret <16 x float> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test9m:
+; CHECK: vpermt2ps {{.*}} {%k{{.}}} {z}
+define <16 x float> @test9m(<16 x float> %a, <16 x float> %b, i16 %mask) nounwind {
+ %c = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x float> %c, <16 x float> zeroinitializer
+ ret <16 x float> %res
+}
+
; CHECK-LABEL: test10:
-; CHECK: vpermi2ps (
+; CHECK: vpermt2ps (
; CHECK: ret
define <16 x float> @test10(<16 x float> %a, <16 x float>* %b) nounwind {
%c = load <16 x float>* %b
@@ -98,7 +160,7 @@ define <16 x float> @test10(<16 x float> %a, <16 x float>* %b) nounwind {
}
; CHECK-LABEL: test11:
-; CHECK: vpermi2d (
+; CHECK: vpermt2d
; CHECK: ret
define <16 x i32> @test11(<16 x i32> %a, <16 x i32>* %b) nounwind {
%c = load <16 x i32>* %b
@@ -107,7 +169,7 @@ define <16 x i32> @test11(<16 x i32> %a, <16 x i32>* %b) nounwind {
}
; CHECK-LABEL: test12
-; CHECK: vmovlhpsz %xmm
+; CHECK: vmovlhps {{.*}}## encoding: [0x62
; CHECK: ret
define <4 x i32> @test12(<4 x i32> %a, <4 x i32> %b) nounwind {
%c = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -186,7 +248,7 @@ define <16 x float> @test21(<16 x float> %a, <16 x float> %c) {
}
; CHECK-LABEL: test22
-; CHECK: vmovhlpsz %xmm
+; CHECK: vmovhlps {{.*}}## encoding: [0x62
; CHECK: ret
define <4 x i32> @test22(<4 x i32> %a, <4 x i32> %b) nounwind {
%c = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
@@ -202,7 +264,7 @@ define <16 x float> @test23(<16 x float> %a, <16 x float> %c) {
}
; CHECK-LABEL: @test24
-; CHECK: vpermi2d
+; CHECK: vpermt2d
; CHECK: ret
define <16 x i32> @test24(<16 x i32> %a, <16 x i32> %b) nounwind {
%c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -223,4 +285,30 @@ define <16 x i32> @test25(<16 x i32> %a, <16 x i32> %b) nounwind {
define <16 x i32> @test26(<16 x i32> %a) nounwind {
%c = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 undef, i32 9, i32 9, i32 undef, i32 11, i32 13, i32 undef, i32 undef, i32 undef>
ret <16 x i32> %c
-} \ No newline at end of file
+}
+
+; CHECK-LABEL: @test27
+; CHECK: ret
+define <16 x i32> @test27(<4 x i32>%a) {
+ %res = shufflevector <4 x i32> %a, <4 x i32> undef, <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <16 x i32> %res
+}
+
+; CHECK-LABEL: @test28
+; CHECK: vinserti64x4 $1
+; CHECK: ret
+define <16 x i32> @test28(<16 x i32>%x, <16 x i32>%y) {
+ %res = shufflevector <16 x i32>%x, <16 x i32>%y, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i32> %res
+}
+
+; CHECK-LABEL: @test29
+; CHECK: vinserti64x4 $0
+; CHECK: ret
+define <16 x i32> @test29(<16 x i32>%x, <16 x i32>%y) {
+ %res = shufflevector <16 x i32>%x, <16 x i32>%y, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i32> %res
+}
+
diff --git a/test/CodeGen/X86/avx512-trunc-ext.ll b/test/CodeGen/X86/avx512-trunc-ext.ll
index 31db68cc582b..5e097be04cdf 100644
--- a/test/CodeGen/X86/avx512-trunc-ext.ll
+++ b/test/CodeGen/X86/avx512-trunc-ext.ll
@@ -18,7 +18,7 @@ define <8 x i16> @trunc_8x64_to_8x16(<8 x i64> %i) nounwind readnone {
; CHECK-LABEL: zext_16x8_to_16x32
-; CHECK; vpmovzxbd {{.*}}%zmm
+; CHECK: vpmovzxbd {{.*}}%zmm
; CHECK: ret
define <16 x i32> @zext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
%x = zext <16 x i8> %i to <16 x i32>
@@ -26,7 +26,7 @@ define <16 x i32> @zext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
}
; CHECK-LABEL: sext_16x8_to_16x32
-; CHECK; vpmovsxbd {{.*}}%zmm
+; CHECK: vpmovsxbd {{.*}}%zmm
; CHECK: ret
define <16 x i32> @sext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
%x = sext <16 x i8> %i to <16 x i32>
@@ -35,7 +35,7 @@ define <16 x i32> @sext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
; CHECK-LABEL: zext_16x16_to_16x32
-; CHECK; vpmovzxwd {{.*}}%zmm
+; CHECK: vpmovzxwd {{.*}}%zmm
; CHECK: ret
define <16 x i32> @zext_16x16_to_16x32(<16 x i16> %i) nounwind readnone {
%x = zext <16 x i16> %i to <16 x i32>
@@ -43,7 +43,7 @@ define <16 x i32> @zext_16x16_to_16x32(<16 x i16> %i) nounwind readnone {
}
; CHECK-LABEL: zext_8x16_to_8x64
-; CHECK; vpmovzxwq
+; CHECK: vpmovzxwq
; CHECK: ret
define <8 x i64> @zext_8x16_to_8x64(<8 x i16> %i) nounwind readnone {
%x = zext <8 x i16> %i to <8 x i64>
@@ -116,7 +116,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
ret i8 %mask
}
-; CHECK: sext_8i1_8i32
+; CHECK-LABEL: sext_8i1_8i32
; CHECK: vpbroadcastq LCP{{.*}}(%rip), %zmm0 {%k1} {z}
; CHECK: ret
define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
@@ -125,3 +125,24 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
%y = sext <8 x i1> %x1 to <8 x i32>
ret <8 x i32> %y
}
+
+; CHECK-LABEL: trunc_v16i32_to_v16i16
+; CHECK: vpmovdw
+; CHECK: ret
+define <16 x i16> @trunc_v16i32_to_v16i16(<16 x i32> %x) {
+ %1 = trunc <16 x i32> %x to <16 x i16>
+ ret <16 x i16> %1
+}
+
+; CHECK-LABEL: trunc_i32_to_i1
+; CHECK: andl
+; CHECK: kmov
+; CHECK: kortest
+; CKECK: orl
+; CHECK: ret
+define i16 @trunc_i32_to_i1(i32 %a) {
+ %a_i = trunc i32 %a to i1
+ %maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
+ %res = bitcast <16 x i1> %maskv to i16
+ ret i16 %res
+}
diff --git a/test/CodeGen/X86/avx512-vbroadcast.ll b/test/CodeGen/X86/avx512-vbroadcast.ll
index 6f89d6ce2342..9c6db11d8f45 100644
--- a/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding| FileCheck %s
;CHECK-LABEL: _inreg16xi32:
;CHECK: vpbroadcastd {{.*}}, %zmm
@@ -19,7 +19,7 @@ define <8 x i64> @_inreg8xi64(i64 %a) {
}
;CHECK-LABEL: _inreg16xfloat:
-;CHECK: vbroadcastssz {{.*}}, %zmm
+;CHECK: vbroadcastss {{.*}}, %zmm
;CHECK: ret
define <16 x float> @_inreg16xfloat(float %a) {
%b = insertelement <16 x float> undef, float %a, i32 0
@@ -28,7 +28,7 @@ define <16 x float> @_inreg16xfloat(float %a) {
}
;CHECK-LABEL: _inreg8xdouble:
-;CHECK: vbroadcastsdz {{.*}}, %zmm
+;CHECK: vbroadcastsd {{.*}}, %zmm
;CHECK: ret
define <8 x double> @_inreg8xdouble(double %a) {
%b = insertelement <8 x double> undef, double %a, i32 0
@@ -45,9 +45,20 @@ define <16 x i32> @_xmm16xi32(<16 x i32> %a) {
}
;CHECK-LABEL: _xmm16xfloat
-;CHECK: vbroadcastssz
+;CHECK: vbroadcastss {{.*}}## encoding: [0x62
;CHECK: ret
define <16 x float> @_xmm16xfloat(<16 x float> %a) {
%b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> zeroinitializer
ret <16 x float> %b
}
+
+define <16 x i32> @test_vbroadcast() {
+ ; CHECK: vpbroadcastd
+entry:
+ %0 = sext <16 x i1> zeroinitializer to <16 x i32>
+ %1 = fcmp uno <16 x float> undef, zeroinitializer
+ %2 = sext <16 x i1> %1 to <16 x i32>
+ %3 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> %2
+ ret <16 x i32> %3
+}
+
diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll
index 6ca5bcc3b862..d762f0083e35 100644
--- a/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -5,9 +5,9 @@
; CHECK: vmovups
; CHECK: ret
define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
- %mask = fcmp ole <16 x float> %x, %y
- %max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %y
- ret <16 x float> %max
+ %mask = fcmp ole <16 x float> %x, %y
+ %max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %y
+ ret <16 x float> %max
}
; CHECK-LABEL: test2
@@ -15,9 +15,9 @@ define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
; CHECK: vmovupd
; CHECK: ret
define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
- %mask = fcmp ole <8 x double> %x, %y
- %max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %y
- ret <8 x double> %max
+ %mask = fcmp ole <8 x double> %x, %y
+ %max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %y
+ ret <8 x double> %max
}
; CHECK-LABEL: test3
@@ -26,9 +26,9 @@ define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
; CHECK: ret
define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwind {
%y = load <16 x i32>* %yp, align 4
- %mask = icmp eq <16 x i32> %x, %y
- %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
- ret <16 x i32> %max
+ %mask = icmp eq <16 x i32> %x, %y
+ %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
+ ret <16 x i32> %max
}
; CHECK-LABEL: @test4_unsigned
@@ -36,9 +36,9 @@ define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwin
; CHECK: vmovdqu32
; CHECK: ret
define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y) nounwind {
- %mask = icmp uge <16 x i32> %x, %y
- %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %y
- ret <16 x i32> %max
+ %mask = icmp uge <16 x i32> %x, %y
+ %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %y
+ ret <16 x i32> %max
}
; CHECK-LABEL: test5
@@ -46,9 +46,9 @@ define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y) nounwind {
; CHECK: vmovdqu64 {{.*}}%k1
; CHECK: ret
define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
- %mask = icmp eq <8 x i64> %x, %y
- %max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
- ret <8 x i64> %max
+ %mask = icmp eq <8 x i64> %x, %y
+ %max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
+ ret <8 x i64> %max
}
; CHECK-LABEL: test6_unsigned
@@ -56,9 +56,9 @@ define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
; CHECK: vmovdqu64 {{.*}}%k1
; CHECK: ret
define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y) nounwind {
- %mask = icmp ugt <8 x i64> %x, %y
- %max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
- ret <8 x i64> %max
+ %mask = icmp ugt <8 x i64> %x, %y
+ %max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
+ ret <8 x i64> %max
}
; CHECK-LABEL: test7
@@ -111,3 +111,54 @@ define <8 x i32> @test11_unsigned(<8 x i32> %x, <8 x i32> %y) nounwind {
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
ret <8 x i32> %max
}
+
+; CHECK-LABEL: test12
+; CHECK: vpcmpeqq %zmm2, %zmm0, [[LO:%k[0-7]]]
+; CHECK: vpcmpeqq %zmm3, %zmm1, [[HI:%k[0-7]]]
+; CHECK: kunpckbw [[LO]], [[HI]], {{%k[0-7]}}
+
+define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
+ %res = icmp eq <16 x i64> %a, %b
+ %res1 = bitcast <16 x i1> %res to i16
+ ret i16 %res1
+}
+
+; CHECK-LABEL: test13
+; CHECK: vcmpeqps %zmm
+; CHECK: vpbroadcastd
+; CHECK: ret
+define <16 x i32> @test13(<16 x float>%a, <16 x float>%b)
+{
+ %cmpvector_i = fcmp oeq <16 x float> %a, %b
+ %conv = zext <16 x i1> %cmpvector_i to <16 x i32>
+ ret <16 x i32> %conv
+}
+
+; CHECK-LABEL: test14
+; CHECK: vpcmp
+; CHECK-NOT: vpcmp
+; CHECK: vmovdqu32 {{.*}}{%k1} {z}
+; CHECK: ret
+define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) {
+ %sub_r = sub <16 x i32> %a, %b
+ %cmp.i2.i = icmp sgt <16 x i32> %sub_r, %a
+ %sext.i3.i = sext <16 x i1> %cmp.i2.i to <16 x i32>
+ %mask = icmp eq <16 x i32> %sext.i3.i, zeroinitializer
+ %res = select <16 x i1> %mask, <16 x i32> zeroinitializer, <16 x i32> %sub_r
+ ret <16 x i32>%res
+}
+
+; CHECK-LABEL: test15
+; CHECK: vpcmpgtq
+; CHECK-NOT: vpcmp
+; CHECK: vmovdqu64 {{.*}}{%k1} {z}
+; CHECK: ret
+define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) {
+ %sub_r = sub <8 x i64> %a, %b
+ %cmp.i2.i = icmp sgt <8 x i64> %sub_r, %a
+ %sext.i3.i = sext <8 x i1> %cmp.i2.i to <8 x i64>
+ %mask = icmp eq <8 x i64> %sext.i3.i, zeroinitializer
+ %res = select <8 x i1> %mask, <8 x i64> zeroinitializer, <8 x i64> %sub_r
+ ret <8 x i64>%res
+}
+
diff --git a/test/CodeGen/X86/avx512-vselect-crash.ll b/test/CodeGen/X86/avx512-vselect-crash.ll
new file mode 100644
index 000000000000..9d652d36a524
--- /dev/null
+++ b/test/CodeGen/X86/avx512-vselect-crash.ll
@@ -0,0 +1,11 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+; CHECK-LABEL: test
+; CHECK: vpxord
+; CHECK: ret
+define <16 x i32> @test() {
+entry:
+ %0 = icmp slt <16 x i32> undef, undef
+ %1 = select <16 x i1> %0, <16 x i32> undef, <16 x i32> zeroinitializer
+ ret <16 x i32> %1
+}
diff --git a/test/CodeGen/X86/avx512-zext-load-crash.ll b/test/CodeGen/X86/avx512-zext-load-crash.ll
new file mode 100644
index 000000000000..07ded13a0e3c
--- /dev/null
+++ b/test/CodeGen/X86/avx512-zext-load-crash.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+define <8 x i16> @test_zext_load() {
+ ; CHECK: vmovq
+entry:
+ %0 = load <2 x i16> ** undef, align 8
+ %1 = getelementptr inbounds <2 x i16>* %0, i64 1
+ %2 = load <2 x i16>* %0, align 1
+ %3 = shufflevector <2 x i16> %2, <2 x i16> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = load <2 x i16>* %1, align 1
+ %5 = shufflevector <2 x i16> %4, <2 x i16> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %6 = shufflevector <8 x i16> %3, <8 x i16> %5, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %6
+}
diff --git a/test/CodeGen/X86/barrier-sse.ll b/test/CodeGen/X86/barrier-sse.ll
index bbfeea6419bd..80c0cc82e93e 100644
--- a/test/CodeGen/X86/barrier-sse.ll
+++ b/test/CodeGen/X86/barrier-sse.ll
@@ -1,11 +1,14 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep sfence
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep lfence
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep mfence
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep MEMBARRIER
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | FileCheck %s
define void @test() {
fence acquire
+ ; CHECK: #MEMBARRIER
+
fence release
+ ; CHECK: #MEMBARRIER
+
fence acq_rel
+ ; CHECK: #MEMBARRIER
+
ret void
}
diff --git a/test/CodeGen/X86/blend-msb.ll b/test/CodeGen/X86/blend-msb.ll
index 4f2060f7012b..34aaf2c31ace 100644
--- a/test/CodeGen/X86/blend-msb.ll
+++ b/test/CodeGen/X86/blend-msb.ll
@@ -1,13 +1,11 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -mattr=+sse4.1 | FileCheck %s
-; In this test we check that sign-extend of the mask bit is performed by
-; shifting the needed bit to the MSB, and not using shl+sra.
+; Verify that we produce movss instead of blendvps when possible.
;CHECK-LABEL: vsel_float:
-;CHECK: movl $-2147483648
-;CHECK-NEXT: movd
-;CHECK-NEXT: blendvps
+;CHECK-NOT: blend
+;CHECK: movss
;CHECK: ret
define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
%vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2
@@ -15,23 +13,26 @@ define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
}
;CHECK-LABEL: vsel_4xi8:
-;CHECK: movl $-2147483648
-;CHECK-NEXT: movd
-;CHECK-NEXT: blendvps
+;CHECK-NOT: blend
+;CHECK: movss
;CHECK: ret
define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) {
%vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i8> %v1, <4 x i8> %v2
ret <4 x i8> %vsel
}
-
-; We do not have native support for v8i16 blends and we have to use the
-; blendvb instruction or a sequence of NAND/OR/AND. Make sure that we do not r
-; reduce the mask in this case.
;CHECK-LABEL: vsel_8xi16:
-;CHECK: psllw
-;CHECK: psraw
-;CHECK: pblendvb
+; The select mask is
+; <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>
+; which translates into the boolean mask (big endian representation):
+; 00010001 = 17.
+; '1' means takes the first argument, '0' means takes the second argument.
+; This is the opposite of the intel syntax, thus we expect
+; the inverted mask: 11101110 = 238.
+; According to the ABI:
+; v1 is in xmm0 => first argument is xmm0.
+; v2 is in xmm1 => second argument is xmm1.
+;CHECK: pblendw $238, %xmm1, %xmm0
;CHECK: ret
define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) {
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i16> %v1, <8 x i16> %v2
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index d3e05d6fbed2..2681c109ef5d 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -701,7 +701,7 @@ exit:
define void @unanalyzable_branch_to_best_succ(i1 %cond) {
; Ensure that we can handle unanalyzable branches where the destination block
-; gets selected as the optimal sucessor to merge.
+; gets selected as the optimal successor to merge.
;
; CHECK: unanalyzable_branch_to_best_succ
; CHECK: %entry
diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll
index 242075a878bb..a70720926de0 100644
--- a/test/CodeGen/X86/bmi.ll
+++ b/test/CodeGen/X86/bmi.ll
@@ -216,6 +216,23 @@ entry:
; CHECK: bzhiq
}
+define i64 @bzhi64_constant_mask(i64 %x) #0 {
+entry:
+ %and = and i64 %x, 4611686018427387903
+ ret i64 %and
+; CHECK-LABEL: bzhi64_constant_mask:
+; CHECK: movb $62, %al
+; CHECK: bzhiq %rax, %r[[ARG1:di|cx]], %rax
+}
+
+define i64 @bzhi64_small_constant_mask(i64 %x) #0 {
+entry:
+ %and = and i64 %x, 2147483647
+ ret i64 %and
+; CHECK-LABEL: bzhi64_small_constant_mask:
+; CHECK: andq $2147483647, %r[[ARG1]]
+}
+
define i32 @blsi32(i32 %x) nounwind readnone {
%tmp = sub i32 0, %x
%tmp2 = and i32 %x, %tmp
diff --git a/test/CodeGen/X86/br-fold.ll b/test/CodeGen/X86/br-fold.ll
index 522346301162..fd1e73bde8cc 100644
--- a/test/CodeGen/X86/br-fold.ll
+++ b/test/CodeGen/X86/br-fold.ll
@@ -1,7 +1,19 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-darwin < %s | FileCheck -check-prefix=X64_DARWIN %s
+; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck -check-prefix=X64_LINUX %s
+; RUN: llc -mtriple=x86_64-pc-windows < %s | FileCheck -check-prefix=X64_WINDOWS %s
+; RUN: llc -mtriple=x86_64-pc-windows-gnu < %s | FileCheck -check-prefix=X64_WINDOWS_GNU %s
-; CHECK: orq
-; CHECK-NEXT: %bb8.i329
+; X64_DARWIN: orq
+; X64_DARWIN-NEXT: %bb8.i329
+
+; X64_LINUX: orq %rax, %rcx
+; X64_LINUX-NEXT: %bb8.i329
+
+; X64_WINDOWS: orq %rax, %rcx
+; X64_WINDOWS-NEXT: ud2
+
+; X64_WINDOWS_GNU: orq %rax, %rcx
+; X64_WINDOWS_GNU-NEXT: ud2
@_ZN11xercesc_2_513SchemaSymbols21fgURI_SCHEMAFORSCHEMAE = external constant [33 x i16], align 32 ; <[33 x i16]*> [#uses=1]
@_ZN11xercesc_2_56XMLUni16fgNotationStringE = external constant [9 x i16], align 16 ; <[9 x i16]*> [#uses=1]
diff --git a/test/CodeGen/X86/bswap-vector.ll b/test/CodeGen/X86/bswap-vector.ll
index 7a7a8a4ebb18..9dc960d7779f 100644
--- a/test/CodeGen/X86/bswap-vector.ll
+++ b/test/CodeGen/X86/bswap-vector.ll
@@ -1,19 +1,173 @@
-; RUN: llc < %s -mcpu=core | FileCheck %s
+; RUN: llc < %s -mcpu=x86-64 | FileCheck %s -check-prefix=CHECK-NOSSSE3
+; RUN: llc < %s -mcpu=core2 | FileCheck %s -check-prefix=CHECK-SSSE3
+; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK-AVX2
+; RUN: llc < %s -mcpu=core-avx2 -x86-experimental-vector-widening-legalization | FileCheck %s -check-prefix=CHECK-WIDE-AVX2
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
+declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
-define <2 x i64> @foo(<2 x i64> %v) #0 {
+define <8 x i16> @test1(<8 x i16> %v) #0 {
+entry:
+ %r = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %v)
+ ret <8 x i16> %r
+
+; CHECK-NOSSSE3-LABEL: @test1
+; CHECK-NOSSSE3: rolw
+; CHECK-NOSSSE3: rolw
+; CHECK-NOSSSE3: rolw
+; CHECK-NOSSSE3: rolw
+; CHECK-NOSSSE3: rolw
+; CHECK-NOSSSE3: rolw
+; CHECK-NOSSSE3: rolw
+; CHECK-NOSSSE3: rolw
+; CHECK-NOSSSE3: retq
+
+; CHECK-SSSE3-LABEL: @test1
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3-NEXT: retq
+
+; CHECK-AVX2-LABEL: @test1
+; CHECK-AVX2: vpshufb
+; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test1
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
+}
+
+define <4 x i32> @test2(<4 x i32> %v) #0 {
+entry:
+ %r = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %v)
+ ret <4 x i32> %r
+
+; CHECK-NOSSSE3-LABEL: @test2
+; CHECK-NOSSSE3: bswapl
+; CHECK-NOSSSE3: bswapl
+; CHECK-NOSSSE3: bswapl
+; CHECK-NOSSSE3: bswapl
+; CHECK-NOSSSE3: retq
+
+; CHECK-SSSE3-LABEL: @test2
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3-NEXT: retq
+
+; CHECK-AVX2-LABEL: @test2
+; CHECK-AVX2: vpshufb
+; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test2
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
+}
+
+define <2 x i64> @test3(<2 x i64> %v) #0 {
entry:
%r = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %v)
ret <2 x i64> %r
+
+; CHECK-NOSSSE3-LABEL: @test3
+; CHECK-NOSSSE3: bswapq
+; CHECK-NOSSSE3: bswapq
+; CHECK-NOSSSE3: retq
+
+; CHECK-SSSE3-LABEL: @test3
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3-NEXT: retq
+
+; CHECK-AVX2-LABEL: @test3
+; CHECK-AVX2: vpshufb
+; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test3
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
-; CHECK-LABEL: @foo
-; CHECK: bswapq
-; CHECK: bswapq
-; CHECK: ret
+declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
+declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)
+declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>)
+
+define <16 x i16> @test4(<16 x i16> %v) #0 {
+entry:
+ %r = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %v)
+ ret <16 x i16> %r
+
+; CHECK-SSSE3-LABEL: @test4
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3-NEXT: retq
+
+; CHECK-AVX2-LABEL: @test4
+; CHECK-AVX2: vpshufb
+; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test4
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
+}
+
+define <8 x i32> @test5(<8 x i32> %v) #0 {
+entry:
+ %r = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %v)
+ ret <8 x i32> %r
+
+; CHECK-SSSE3-LABEL: @test5
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3-NEXT: retq
+
+; CHECK-AVX2-LABEL: @test5
+; CHECK-AVX2: vpshufb
+; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test5
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
+}
+
+define <4 x i64> @test6(<4 x i64> %v) #0 {
+entry:
+ %r = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %v)
+ ret <4 x i64> %r
+
+; CHECK-SSSE3-LABEL: @test6
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3-NEXT: retq
+
+; CHECK-AVX2-LABEL: @test6
+; CHECK-AVX2: vpshufb
+; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test6
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
+}
+
+declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
+
+define <4 x i16> @test7(<4 x i16> %v) #0 {
+entry:
+ %r = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %v)
+ ret <4 x i16> %r
+
+; CHECK-SSSE3-LABEL: @test7
+; CHECK-SSSE3: pshufb
+; CHECK-SSSE3: psrld $16
+; CHECK-SSSE3-NEXT: retq
+
+; CHECK-AVX2-LABEL: @test7
+; CHECK-AVX2: vpshufb
+; CHECK-AVX2: vpsrld $16
+; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test7
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
+}
attributes #0 = { nounwind uwtable }
diff --git a/test/CodeGen/X86/bt.ll b/test/CodeGen/X86/bt.ll
index f12a3543b072..036ec0acc6e8 100644
--- a/test/CodeGen/X86/bt.ll
+++ b/test/CodeGen/X86/bt.ll
@@ -20,7 +20,7 @@
define void @test2(i32 %x, i32 %n) nounwind {
entry:
; CHECK: test2
-; CHECK: btl %eax, %ecx
+; CHECK: btl %ecx, %eax
; CHECK: jb
%tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
%tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
diff --git a/test/CodeGen/X86/cache-intrinsic.ll b/test/CodeGen/X86/cache-intrinsic.ll
new file mode 100644
index 000000000000..3091b5ff3118
--- /dev/null
+++ b/test/CodeGen/X86/cache-intrinsic.ll
@@ -0,0 +1,26 @@
+; RUN: llc %s -o - | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@buffer = global [32 x i8] c"This is a largely unused buffer\00", align 16
+@.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
+@.str1 = private unnamed_addr constant [25 x i8] c"Still, largely unused...\00", align 1
+
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0))
+ %call1 = call i8* @strcpy(i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds ([25 x i8]* @.str1, i32 0, i32 0)) #3
+ call void @llvm.clear_cache(i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds (i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0), i32 32)) #3
+ %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8]* @buffer, i32 0, i32 0))
+ ret i32 0
+}
+
+; CHECK-NOT: __clear_cache
+
+declare i32 @printf(i8*, ...)
+
+declare i8* @strcpy(i8*, i8*)
+
+declare void @llvm.clear_cache(i8*, i8*)
diff --git a/test/CodeGen/X86/call-imm.ll b/test/CodeGen/X86/call-imm.ll
index 8753594df10a..898b4ec203ad 100644
--- a/test/CodeGen/X86/call-imm.ll
+++ b/test/CodeGen/X86/call-imm.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=static | FileCheck -check-prefix X86STA %s
; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic | FileCheck -check-prefix X86PIC %s
; RUN: llc < %s -mtriple=i386-pc-linux -relocation-model=dynamic-no-pic | FileCheck -check-prefix X86DYN %s
+; RUN: llc < %s -mtriple=i386-pc-win32 -relocation-model=static | FileCheck -check-prefix X86WINSTA %s
; Call to immediate is not safe on x86-64 unless we *know* that the
; call will be within 32-bits pcrel from the dest immediate.
@@ -20,4 +21,5 @@ entry:
; X86STA: {{call.*12345678}}
; X86PIC-NOT: {{call.*12345678}}
; X86DYN: {{call.*12345678}}
+; X86WINSTA: {{call.*[*]%eax}}
; X64: {{call.*[*]%rax}}
diff --git a/test/CodeGen/X86/cas.ll b/test/CodeGen/X86/cas.ll
index c2dd05ef7302..ec519c646f69 100644
--- a/test/CodeGen/X86/cas.ll
+++ b/test/CodeGen/X86/cas.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=x86_64-pc-linux-gnu %s -o - | FileCheck %s
+; RUN: llc -mtriple=x86_64-pc-linux-gnu %s -o - -no-integrated-as | FileCheck %s
; C code this came from
;bool cas(float volatile *p, float *expected, float desired) {
diff --git a/test/CodeGen/X86/catch.ll b/test/CodeGen/X86/catch.ll
new file mode 100644
index 000000000000..6f7021360e1f
--- /dev/null
+++ b/test/CodeGen/X86/catch.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic | FileCheck %s
+
+; PR18390
+; We used to assert creating this label. The name itself is not critical. It
+; just needs to be a unique local symbol.
+; CHECK: .L.Lstr.DW.stub:
+; CHECK-NEXT: .quad .Lstr
+
+@str = private unnamed_addr constant [12 x i8] c"NSException\00"
+define void @f() {
+ invoke void @g()
+ to label %invoke.cont unwind label %lpad
+invoke.cont:
+ ret void
+lpad:
+ %tmp14 = landingpad { i8*, i32 } personality i8* bitcast (void ()* @h to i8*)
+ catch i8* getelementptr inbounds ([12 x i8]* @str, i64 0, i64 0)
+ ret void
+}
+declare void @g()
+declare void @h()
diff --git a/test/CodeGen/X86/cfi.ll b/test/CodeGen/X86/cfi.ll
new file mode 100644
index 000000000000..b57ff45f51e3
--- /dev/null
+++ b/test/CodeGen/X86/cfi.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck --check-prefix=STATIC %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic | FileCheck --check-prefix=PIC %s
+
+; STATIC: .cfi_personality 3, __gxx_personality_v0
+; STATIC: .cfi_lsda 3, .Lexception0
+
+; PIC: .cfi_personality 155, DW.ref.__gxx_personality_v0
+; PIC: .cfi_lsda 27, .Lexception0
+
+
+define void @bar() {
+entry:
+ %call = invoke i32 @foo()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont:
+ ret void
+
+lpad:
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
+ ret void
+}
+
+declare i32 @foo()
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/cfstring.ll b/test/CodeGen/X86/cfstring.ll
index 8cdd59e9ae93..cae432098907 100644
--- a/test/CodeGen/X86/cfstring.ll
+++ b/test/CodeGen/X86/cfstring.ll
@@ -7,7 +7,7 @@
; Make sure that the string ends up the correct section.
; CHECK: .section __TEXT,__cstring
-; CHECK-NEXT: l_.str3:
+; CHECK-NEXT: L_.str3:
; CHECK: .section __DATA,__cfstring
; CHECK-NEXT: .align 4
@@ -15,13 +15,13 @@
; CHECK-NEXT: .quad ___CFConstantStringClassReference
; CHECK-NEXT: .long 1992
; CHECK-NEXT: .space 4
-; CHECK-NEXT: .quad l_.str3
+; CHECK-NEXT: .quad L_.str3
; CHECK-NEXT: .long 0
; CHECK-NEXT: .space 4
@isLogVisible = global i8 0, align 1
@__CFConstantStringClassReference = external global [0 x i32]
-@.str3 = linker_private unnamed_addr constant [1 x i8] zeroinitializer, align 1
+@.str3 = private unnamed_addr constant [1 x i8] zeroinitializer, align 1
@_unnamed_cfstring_4 = private constant %struct.NSConstantString { i32* getelementptr inbounds ([0 x i32]* @__CFConstantStringClassReference, i32 0, i32 0), i32 1992, i8* getelementptr inbounds ([1 x i8]* @.str3, i32 0, i32 0), i32 0 }, section "__DATA,__cfstring"
@null.array = weak_odr constant [1 x i8] zeroinitializer, align 1
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index 215b86267a47..d38d2b430ccb 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -41,8 +41,8 @@ declare void @bar(i64) nounwind
define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
; CHECK-LABEL: test3:
-; CHECK: cmovnel %edi, %esi
-; CHECK-NEXT: movl %esi, %edi
+; CHECK: cmov{{n?}}el %[[R1:e..]], %[[R2:e..]]
+; CHECK-NEXT: movl %[[R2]], %{{e..}}
%c = trunc i64 %a to i32
%d = trunc i64 %b to i32
diff --git a/test/CodeGen/X86/cmp.ll b/test/CodeGen/X86/cmp.ll
index 551d9bc6074b..149d53759fe2 100644
--- a/test/CodeGen/X86/cmp.ll
+++ b/test/CodeGen/X86/cmp.ll
@@ -26,9 +26,22 @@ cond_true: ; preds = %0
ReturnBlock: ; preds = %0
ret i32 0
; CHECK-LABEL: test2:
-; CHECK: movl (%rsi), %eax
-; CHECK: shll $3, %eax
-; CHECK: testl %eax, %eax
+; CHECK: testl $536870911, (%rsi)
+}
+
+define i8 @test2b(i8 %X, i8* %y) nounwind {
+ %tmp = load i8* %y ; <i8> [#uses=1]
+ %tmp1 = shl i8 %tmp, 3 ; <i8> [#uses=1]
+ %tmp1.upgrd.2 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
+ br i1 %tmp1.upgrd.2, label %ReturnBlock, label %cond_true
+
+cond_true: ; preds = %0
+ ret i8 1
+
+ReturnBlock: ; preds = %0
+ ret i8 0
+; CHECK-LABEL: test2b:
+; CHECK: testb $31, (%rsi)
}
define i64 @test3(i64 %x) nounwind {
@@ -68,8 +81,8 @@ define i32 @test5(double %A) nounwind {
bb12:; preds = %entry
ret i32 32
; CHECK-LABEL: test5:
-; CHECK: ucomisd LCPI4_0(%rip), %xmm0
-; CHECK: ucomisd LCPI4_1(%rip), %xmm0
+; CHECK: ucomisd LCPI5_0(%rip), %xmm0
+; CHECK: ucomisd LCPI5_1(%rip), %xmm0
}
declare i32 @foo(...)
@@ -163,3 +176,38 @@ define i32 @test12() uwtable ssp {
}
declare zeroext i1 @test12b()
+
+define i32 @test13(i32 %mask, i32 %base, i32 %intra) {
+ %and = and i32 %mask, 8
+ %tobool = icmp ne i32 %and, 0
+ %cond = select i1 %tobool, i32 %intra, i32 %base
+ ret i32 %cond
+
+; CHECK-LABEL: test13:
+; CHECK: testb $8, %dil
+; CHECK: cmovnel
+}
+
+define i32 @test14(i32 %mask, i32 %base, i32 %intra) #0 {
+ %s = lshr i32 %mask, 7
+ %tobool = icmp sgt i32 %s, -1
+ %cond = select i1 %tobool, i32 %intra, i32 %base
+ ret i32 %cond
+
+; CHECK-LABEL: test14:
+; CHECK: shrl $7, %edi
+; CHECK-NEXT: cmovnsl %edx, %esi
+}
+
+; PR19964
+define zeroext i1 @test15(i32 %bf.load, i32 %n) {
+ %bf.lshr = lshr i32 %bf.load, 16
+ %cmp2 = icmp eq i32 %bf.lshr, 0
+ %cmp5 = icmp uge i32 %bf.lshr, %n
+ %.cmp5 = or i1 %cmp2, %cmp5
+ ret i1 %.cmp5
+
+; CHECK-LABEL: test15:
+; CHECK: shrl $16, %edi
+; CHECK: cmpl %esi, %edi
+}
diff --git a/test/CodeGen/X86/cmpxchg-i1.ll b/test/CodeGen/X86/cmpxchg-i1.ll
new file mode 100644
index 000000000000..a21ab593b078
--- /dev/null
+++ b/test/CodeGen/X86/cmpxchg-i1.ll
@@ -0,0 +1,87 @@
+; RUN: llc -mtriple=x86_64 -o - %s | FileCheck %s
+
+define i1 @try_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: try_cmpxchg:
+; CHECK: cmpxchgl
+; CHECK-NOT: cmp
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ ret i1 %success
+}
+
+define void @cmpxchg_flow(i64* %addr, i64 %desired, i64 %new) {
+; CHECK-LABEL: cmpxchg_flow:
+; CHECK: cmpxchgq
+; CHECK-NOT: cmp
+; CHECK-NOT: set
+; CHECK: {{jne|jeq}}
+ %pair = cmpxchg i64* %addr, i64 %desired, i64 %new seq_cst seq_cst
+ %success = extractvalue { i64, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ call void @foo()
+ ret void
+
+false:
+ call void @bar()
+ ret void
+}
+
+define i64 @cmpxchg_sext(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: cmpxchg_sext:
+; CHECK-DAG: cmpxchgl
+; CHECK-NOT: cmpl
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %mask = sext i1 %success to i64
+ ret i64 %mask
+}
+
+define i32 @cmpxchg_zext(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: cmpxchg_zext:
+; CHECK: cmpxchgl
+; CHECK-NOT: cmp
+; CHECK: sete [[BYTE:%[a-z0-9]+]]
+; CHECK: movzbl [[BYTE]], %eax
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %mask = zext i1 %success to i32
+ ret i32 %mask
+}
+
+
+define i32 @cmpxchg_use_eflags_and_val(i32* %addr, i32 %offset) {
+; CHECK-LABEL: cmpxchg_use_eflags_and_val:
+; CHECK: movl (%rdi), %e[[OLDVAL:[a-z0-9]+]]
+
+; CHECK: [[LOOPBB:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: leal (%r[[OLDVAL]],%rsi), [[NEW:%[a-z0-9]+]]
+; CHECK: cmpxchgl [[NEW]], (%rdi)
+; CHECK-NOT: cmpl
+; CHECK: jne [[LOOPBB]]
+
+ ; Result already in %eax
+; CHECK: retq
+entry:
+ %init = load atomic i32* %addr seq_cst, align 4
+ br label %loop
+
+loop:
+ %old = phi i32 [%init, %entry], [%oldval, %loop]
+ %new = add i32 %old, %offset
+ %pair = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst seq_cst
+ %oldval = extractvalue { i32, i1 } %pair, 0
+ %success = extractvalue { i32, i1 } %pair, 1
+ br i1 %success, label %done, label %loop
+
+done:
+ ret i32 %oldval
+}
+
+declare void @foo()
+declare void @bar()
diff --git a/test/CodeGen/X86/cmpxchg-i128-i1.ll b/test/CodeGen/X86/cmpxchg-i128-i1.ll
new file mode 100644
index 000000000000..4dd30013ecab
--- /dev/null
+++ b/test/CodeGen/X86/cmpxchg-i128-i1.ll
@@ -0,0 +1,83 @@
+; RUN: llc -mcpu=core-avx2 -mtriple=x86_64 -o - %s | FileCheck %s
+
+define i1 @try_cmpxchg(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: try_cmpxchg:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmp
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ ret i1 %success
+}
+
+define void @cmpxchg_flow(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_flow:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmp
+; CHECK-NOT: set
+; CHECK: {{jne|jeq}}
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ call void @foo()
+ ret void
+
+false:
+ call void @bar()
+ ret void
+}
+
+; Can't use the flags here because cmpxchg16b only sets ZF.
+define i1 @cmpxchg_arithcmp(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_arithcmp:
+; CHECK: cmpxchg16b
+; CHECK: cmpq
+; CHECK: retq
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %oldval = extractvalue { i128, i1 } %pair, 0
+ %success = icmp sge i128 %oldval, %desired
+ ret i1 %success
+}
+
+define i128 @cmpxchg_zext(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_zext:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmpq
+; CHECK: sete [[BYTE:%[a-z0-9]+]]
+; CHECK: movzbl [[BYTE]], %eax
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ %mask = zext i1 %success to i128
+ ret i128 %mask
+}
+
+
+define i128 @cmpxchg_use_eflags_and_val(i128* %addr, i128 %offset) {
+; CHECK-LABEL: cmpxchg_use_eflags_and_val:
+
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmpq
+; CHECK: jne
+entry:
+ %init = load atomic i128* %addr seq_cst, align 16
+ br label %loop
+
+loop:
+ %old = phi i128 [%init, %entry], [%oldval, %loop]
+ %new = add i128 %old, %offset
+
+ %pair = cmpxchg i128* %addr, i128 %old, i128 %new seq_cst seq_cst
+ %oldval = extractvalue { i128, i1 } %pair, 0
+ %success = extractvalue { i128, i1 } %pair, 1
+
+ br i1 %success, label %done, label %loop
+
+done:
+ ret i128 %old
+}
+
+declare void @foo()
+declare void @bar()
diff --git a/test/CodeGen/X86/cmpxchg16b.ll b/test/CodeGen/X86/cmpxchg16b.ll
index edbd0bc9ded5..1d5bb85f8d20 100644
--- a/test/CodeGen/X86/cmpxchg16b.ll
+++ b/test/CodeGen/X86/cmpxchg16b.ll
@@ -6,7 +6,7 @@ entry:
; CHECK: movl $1, %ebx
; CHECK: lock
; CHECK-NEXT: cmpxchg16b
- %r = cmpxchg i128* %p, i128 0, i128 1 seq_cst
+ %r = cmpxchg i128* %p, i128 0, i128 1 seq_cst seq_cst
ret void
}
diff --git a/test/CodeGen/X86/coalescer-remat.ll b/test/CodeGen/X86/coalescer-remat.ll
index eb7b7a8738a7..bb08a0ec52cd 100644
--- a/test/CodeGen/X86/coalescer-remat.ll
+++ b/test/CodeGen/X86/coalescer-remat.ll
@@ -5,7 +5,8 @@
define i32 @main() nounwind {
entry:
- %0 = cmpxchg i64* @val, i64 0, i64 1 monotonic
+ %t0 = cmpxchg i64* @val, i64 0, i64 1 monotonic monotonic
+ %0 = extractvalue { i64, i1 } %t0, 0
%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind
ret i32 0
}
diff --git a/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll b/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
new file mode 100644
index 000000000000..78e1dd287f6e
--- /dev/null
+++ b/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
@@ -0,0 +1,323 @@
+; RUN: opt -S -codegenprepare %s -o - | FileCheck %s
+; RUN: opt -S -codegenprepare -addr-sink-using-gep=1 %s -o - | FileCheck -check-prefix=CHECK-GEP %s
+; This file tests the different cases what are involved when codegen prepare
+; tries to get sign extension out of the way of addressing mode.
+; This tests require an actual target as addressing mode decisions depends
+; on the target.
+
+target datalayout = "e-i64:64-f80:128-s:64-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+
+; Check that we correctly promote both operands of the promotable add.
+; CHECK-LABEL: @twoArgsPromotion
+; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i32 %arg1 to i64
+; CHECK: [[ARG2SEXT:%[a-zA-Z_0-9-]+]] = sext i32 %arg2 to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], [[ARG2SEXT]]
+; CHECK: inttoptr i64 [[PROMOTED]] to i8*
+; CHECK: ret
+define i8 @twoArgsPromotion(i32 %arg1, i32 %arg2) {
+ %add = add nsw i32 %arg1, %arg2
+ %sextadd = sext i32 %add to i64
+ %base = inttoptr i64 %sextadd to i8*
+ %res = load i8* %base
+ ret i8 %res
+}
+
+; Check that we do not promote both operands of the promotable add when
+; the instruction will not be folded into the addressing mode.
+; Otherwise, we will increase the number of instruction executed.
+; (This is a heuristic of course, because the new sext could have been
+; merged with something else.)
+; CHECK-LABEL: @twoArgsNoPromotion
+; CHECK: add nsw i32 %arg1, %arg2
+; CHECK: ret
+define i8 @twoArgsNoPromotion(i32 %arg1, i32 %arg2, i8* %base) {
+ %add = add nsw i32 %arg1, %arg2
+ %sextadd = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ ret i8 %res
+}
+
+; Check that we do not promote when the related instruction does not have
+; the nsw flag.
+; CHECK-LABEL: @noPromotion
+; CHECK-NOT: add i64
+; CHECK: ret
+define i8 @noPromotion(i32 %arg1, i32 %arg2, i8* %base) {
+ %add = add i32 %arg1, %arg2
+ %sextadd = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ ret i8 %res
+}
+
+; Check that we correctly promote constant arguments.
+; CHECK-LABEL: @oneArgPromotion
+; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i32 %arg1 to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
+; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: ret
+define i8 @oneArgPromotion(i32 %arg1, i8* %base) {
+ %add = add nsw i32 %arg1, 1
+ %sextadd = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ ret i8 %res
+}
+
+; Check that we do not promote truncate when we cannot determine the
+; bits that are dropped.
+; CHECK-LABEL: @oneArgPromotionBlockTrunc1
+; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 %arg1 to i8
+; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
+; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: ret
+define i8 @oneArgPromotionBlockTrunc1(i32 %arg1, i8* %base) {
+ %trunc = trunc i32 %arg1 to i8
+ %add = add nsw i8 %trunc, 1
+ %sextadd = sext i8 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ ret i8 %res
+}
+
+; Check that we do not promote truncate when we cannot determine all the
+; bits that are dropped.
+; CHECK-LABEL: @oneArgPromotionBlockTrunc2
+; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i16 %arg1 to i32
+; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 [[ARG1SEXT]] to i8
+; CHECK: [[ARG1SEXT64:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT64]], 1
+; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: ret
+define i8 @oneArgPromotionBlockTrunc2(i16 %arg1, i8* %base) {
+ %sextarg1 = sext i16 %arg1 to i32
+ %trunc = trunc i32 %sextarg1 to i8
+ %add = add nsw i8 %trunc, 1
+ %sextadd = sext i8 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ ret i8 %res
+}
+
+; Check that we are able to promote truncate when we know all the bits
+; that are dropped.
+; CHECK-LABEL: @oneArgPromotionPassTruncKeepSExt
+; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i1 %arg1 to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
+; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: ret
+define i8 @oneArgPromotionPassTruncKeepSExt(i1 %arg1, i8* %base) {
+ %sextarg1 = sext i1 %arg1 to i32
+ %trunc = trunc i32 %sextarg1 to i8
+ %add = add nsw i8 %trunc, 1
+ %sextadd = sext i8 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ ret i8 %res
+}
+
+; On X86 truncate are free. Check that we are able to promote the add
+; to be used as addressing mode and that we insert a truncate for the other
+; use.
+; CHECK-LABEL: @oneArgPromotionTruncInsert
+; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
+; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
+; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: add i8 [[LOAD]], [[TRUNC]]
+; CHECK: ret
+define i8 @oneArgPromotionTruncInsert(i8 %arg1, i8* %base) {
+ %add = add nsw i8 %arg1, 1
+ %sextadd = sext i8 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ %finalres = add i8 %res, %add
+ ret i8 %finalres
+}
+
+; Cannot sext from a larger type than the promoted type.
+; CHECK-LABEL: @oneArgPromotionLargerType
+; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i128 %arg1 to i8
+; CHECK: [[ARG1SEXT64:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT64]], 1
+; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: ret
+define i8 @oneArgPromotionLargerType(i128 %arg1, i8* %base) {
+ %trunc = trunc i128 %arg1 to i8
+ %add = add nsw i8 %trunc, 1
+ %sextadd = sext i8 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ %finalres = add i8 %res, %add
+ ret i8 %finalres
+}
+
+; Use same inserted trunc
+; On X86 truncate are free. Check that we are able to promote the add
+; to be used as addressing mode and that we insert a truncate for
+; *all* the other uses.
+; CHECK-LABEL: @oneArgPromotionTruncInsertSeveralUse
+; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
+; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
+; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = add i8 [[LOAD]], [[TRUNC]]
+; CHECK: add i8 [[ADDRES]], [[TRUNC]]
+; CHECK: ret
+define i8 @oneArgPromotionTruncInsertSeveralUse(i8 %arg1, i8* %base) {
+ %add = add nsw i8 %arg1, 1
+ %sextadd = sext i8 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ %almostfinalres = add i8 %res, %add
+ %finalres = add i8 %almostfinalres, %add
+ ret i8 %finalres
+}
+
+; Check that the promoted instruction is used for all uses of the original
+; sign extension.
+; CHECK-LABEL: @oneArgPromotionSExtSeveralUse
+; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
+; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
+; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = zext i8 [[LOAD]] to i64
+; CHECK: add i64 [[ADDRES]], [[PROMOTED]]
+; CHECK: ret
+define i64 @oneArgPromotionSExtSeveralUse(i8 %arg1, i8* %base) {
+ %add = add nsw i8 %arg1, 1
+ %sextadd = sext i8 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ %almostfinalres = zext i8 %res to i64
+ %finalres = add i64 %almostfinalres, %sextadd
+ ret i64 %finalres
+}
+
+; Check all types of rollback mechanism.
+; For this test, the sign extension stays in place.
+; However, the matching process goes until promoting both the operands
+; of the first promotable add implies.
+; At this point the rollback mechanism kicks in and restores the states
+; until the addressing mode matcher is able to match something: in that
+; case promote nothing.
+; Along the way, the promotion mechanism involves:
+; - Mutating the type of %promotableadd1 and %promotableadd2.
+; - Creating a sext for %arg1 and %arg2.
+; - Creating a trunc for a use of %promotableadd1.
+; - Replacing a bunch of uses.
+; - Setting the operands of the promoted instruction with the promoted values.
+; - Moving instruction around (mainly sext when promoting instruction).
+; Each type of those promotions has to be undo at least once during this
+; specific test.
+; CHECK-LABEL: @twoArgsPromotionNest
+; CHECK: [[ORIG:%[a-zA-Z_0-9-]+]] = add nsw i32 %arg1, %arg2
+; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ORIG]], [[ORIG]]
+; CHECK: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
+; CHECK: getelementptr inbounds i8* %base, i64 [[SEXT]]
+; CHECK: ret
+define i8 @twoArgsPromotionNest(i32 %arg1, i32 %arg2, i8* %base) {
+ %promotableadd1 = add nsw i32 %arg1, %arg2
+ %promotableadd2 = add nsw i32 %promotableadd1, %promotableadd1
+ %sextadd = sext i32 %promotableadd2 to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ ret i8 %res
+}
+
+; Test the InstructionRemover undo, which was the only one not
+; kicked in the previous test.
+; The matcher first promotes the add, removes the trunc and promotes
+; the sext of arg1.
+; Then, the matcher cannot use an addressing mode r + r + r, thus it
+; rolls back.
+; CHECK-LABEL: @twoArgsNoPromotionRemove
+; CHECK: [[SEXTARG1:%[a-zA-Z_0-9-]+]] = sext i1 %arg1 to i32
+; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 [[SEXTARG1]] to i8
+; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[TRUNC]], %arg2
+; CHECK: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i64
+; CHECK: getelementptr inbounds i8* %base, i64 [[SEXT]]
+; CHECK: ret
+define i8 @twoArgsNoPromotionRemove(i1 %arg1, i8 %arg2, i8* %base) {
+ %sextarg1 = sext i1 %arg1 to i32
+ %trunc = trunc i32 %sextarg1 to i8
+ %add = add nsw i8 %trunc, %arg2
+ %sextadd = sext i8 %add to i64
+ %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %res = load i8* %arrayidx
+ ret i8 %res
+}
+
+; Ensure that when the profitability checks kicks in, the IR is not modified
+; will IgnoreProfitability is on.
+; The profitabily check happens when a candidate instruction has several uses.
+; The matcher will create a new matcher for each use and check if the
+; instruction is in the list of the matched instructions of this new matcher.
+; All changes made by the new matchers must be dropped before pursuing
+; otherwise the state of the original matcher will be wrong.
+;
+; Without the profitability check, when checking for the second use of
+; arrayidx, the matcher promotes everything all the way to %arg1, %arg2.
+; Check that we did not promote anything in the final matching.
+;
+; <rdar://problem/16020230>
+; CHECK-LABEL: @checkProfitability
+; CHECK-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg1 to i64
+; CHECK-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg2 to i64
+; CHECK: [[SHL:%[a-zA-Z_0-9-]+]] = shl nsw i32 %arg1, 1
+; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SHL]], %arg2
+; CHECK: [[SEXTADD:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
+; BB then
+; CHECK: [[BASE1:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTADD]], 48
+; CHECK: [[ADDR1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[BASE1]] to i32*
+; CHECK: load i32* [[ADDR1]]
+; BB else
+; CHECK: [[BASE2:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTADD]], 48
+; CHECK: [[ADDR2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[BASE2]] to i32*
+; CHECK: load i32* [[ADDR2]]
+; CHECK: ret
+; CHECK-GEP-LABEL: @checkProfitability
+; CHECK-GEP-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg1 to i64
+; CHECK-GEP-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg2 to i64
+; CHECK-GEP: [[SHL:%[a-zA-Z_0-9-]+]] = shl nsw i32 %arg1, 1
+; CHECK-GEP: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SHL]], %arg2
+; CHECK-GEP: [[SEXTADD:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
+; BB then
+; CHECK-GEP: [[BASE1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
+; CHECK-GEP: [[BCC1:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE1]] to i8*
+; CHECK-GEP: [[FULL1:%[a-zA-Z_0-9-]+]] = getelementptr i8* [[BCC1]], i64 48
+; CHECK-GEP: [[ADDR1:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL1]] to i32*
+; CHECK-GEP: load i32* [[ADDR1]]
+; BB else
+; CHECK-GEP: [[BASE2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
+; CHECK-GEP: [[BCC2:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE2]] to i8*
+; CHECK-GEP: [[FULL2:%[a-zA-Z_0-9-]+]] = getelementptr i8* [[BCC2]], i64 48
+; CHECK-GEP: [[ADDR2:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL2]] to i32*
+; CHECK-GEP: load i32* [[ADDR2]]
+; CHECK-GEP: ret
+define i32 @checkProfitability(i32 %arg1, i32 %arg2, i1 %test) {
+ %shl = shl nsw i32 %arg1, 1
+ %add1 = add nsw i32 %shl, %arg2
+ %sextidx1 = sext i32 %add1 to i64
+ %tmpptr = inttoptr i64 %sextidx1 to i32*
+ %arrayidx1 = getelementptr i32* %tmpptr, i64 12
+ br i1 %test, label %then, label %else
+then:
+ %res1 = load i32* %arrayidx1
+ br label %end
+else:
+ %res2 = load i32* %arrayidx1
+ br label %end
+end:
+ %tmp = phi i32 [%res1, %then], [%res2, %else]
+ %res = add i32 %tmp, %add1
+ %addr = inttoptr i32 %res to i32*
+ %final = load i32* %addr
+ ret i32 %final
+}
diff --git a/test/CodeGen/X86/codegen-prepare-cast.ll b/test/CodeGen/X86/codegen-prepare-cast.ll
index 2a8ead8c4909..59c513385f76 100644
--- a/test/CodeGen/X86/codegen-prepare-cast.ll
+++ b/test/CodeGen/X86/codegen-prepare-cast.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86-64
; PR4297
+; RUN: opt -S < %s -codegenprepare | FileCheck %s
target datalayout =
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
@@ -8,6 +9,9 @@ target triple = "x86_64-unknown-linux-gnu"
%"char[][]" = type { i64, %"byte[]"* }
@.str = external constant [7 x i8] ; <[7 x i8]*> [#uses=1]
+; CHECK-LABEL: @_Dmain
+; CHECK: load i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0)
+; CHECK ret
define fastcc i32 @_Dmain(%"char[][]" %unnamed) {
entry:
%tmp = getelementptr [7 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
diff --git a/test/CodeGen/X86/codegen-prepare-crash.ll b/test/CodeGen/X86/codegen-prepare-crash.ll
new file mode 100644
index 000000000000..c3288170cc4f
--- /dev/null
+++ b/test/CodeGen/X86/codegen-prepare-crash.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s
+target triple = "x86_64-unknown-linux-gnu"
+
+@g = external global [10 x i32]
+
+define void @f(i32 %u) {
+ %1 = add i32 %u, 4
+ br label %P.Proc8.exit
+
+P.Proc8.exit:
+ %valueindex35.i = getelementptr [10 x i32]* @g, i32 0, i32 %1
+ store i32 %u, i32* %valueindex35.i
+ ret void
+}
diff --git a/test/CodeGen/X86/codegen-prepare-extload.ll b/test/CodeGen/X86/codegen-prepare-extload.ll
index 14df815663e3..9320706d9728 100644
--- a/test/CodeGen/X86/codegen-prepare-extload.ll
+++ b/test/CodeGen/X86/codegen-prepare-extload.ll
@@ -5,7 +5,7 @@
; CodeGenPrepare should move the zext into the block with the load
; so that SelectionDAG can select it with the load.
-; CHECK: movzbl ({{%rdi|%rcx}}), %eax
+; CHECK: movsbl ({{%rdi|%rcx}}), %eax
define void @foo(i8* %p, i32* %q) {
entry:
diff --git a/test/CodeGen/X86/codegen-prepare.ll b/test/CodeGen/X86/codegen-prepare.ll
index 316accfa41ac..4ff0f1c0ba24 100644
--- a/test/CodeGen/X86/codegen-prepare.ll
+++ b/test/CodeGen/X86/codegen-prepare.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=x86_64-pc-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -addr-sink-using-gep=1 | FileCheck %s
; Check that the CodeGenPrepare Pass
; does not wrongly rewrite the address computed by Instruction %4
diff --git a/test/CodeGen/X86/coff-comdat.ll b/test/CodeGen/X86/coff-comdat.ll
new file mode 100644
index 000000000000..bf27b2fff1fa
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat.ll
@@ -0,0 +1,92 @@
+; RUN: llc -mtriple i386-pc-win32 < %s | FileCheck %s
+
+$f1 = comdat any
+@v1 = global i32 0, comdat $f1
+define void @f1() comdat $f1 {
+ ret void
+}
+
+$f2 = comdat exactmatch
+@v2 = global i32 0, comdat $f2
+define void @f2() comdat $f2 {
+ ret void
+}
+
+$f3 = comdat largest
+@v3 = global i32 0, comdat $f3
+define void @f3() comdat $f3 {
+ ret void
+}
+
+$f4 = comdat noduplicates
+@v4 = global i32 0, comdat $f4
+define void @f4() comdat $f4 {
+ ret void
+}
+
+$f5 = comdat samesize
+@v5 = global i32 0, comdat $f5
+define void @f5() comdat $f5 {
+ ret void
+}
+
+$f6 = comdat samesize
+@v6 = global i32 0, comdat $f6
+@f6 = global i32 0, comdat $f6
+
+$"\01@f7@0" = comdat any
+define x86_fastcallcc void @"\01@v7@0"() comdat $"\01@f7@0" {
+ ret void
+}
+define x86_fastcallcc void @"\01@f7@0"() comdat $"\01@f7@0" {
+ ret void
+}
+
+$f8 = comdat any
+define x86_fastcallcc void @v8() comdat $f8 {
+ ret void
+}
+define x86_fastcallcc void @f8() comdat $f8 {
+ ret void
+}
+
+$vftable = comdat largest
+
+@some_name = private unnamed_addr constant [2 x i8*] zeroinitializer, comdat $vftable
+@vftable = alias getelementptr([2 x i8*]* @some_name, i32 0, i32 1)
+
+; CHECK: .section .text,"xr",discard,_f1
+; CHECK: .globl _f1
+; CHECK: .section .text,"xr",same_contents,_f2
+; CHECK: .globl _f2
+; CHECK: .section .text,"xr",largest,_f3
+; CHECK: .globl _f3
+; CHECK: .section .text,"xr",one_only,_f4
+; CHECK: .globl _f4
+; CHECK: .section .text,"xr",same_size,_f5
+; CHECK: .globl _f5
+; CHECK: .section .text,"xr",associative,@f7@0
+; CHECK: .globl @v7@0
+; CHECK: .section .text,"xr",discard,@f7@0
+; CHECK: .globl @f7@0
+; CHECK: .section .text,"xr",associative,@f8@0
+; CHECK: .globl @v8@0
+; CHECK: .section .text,"xr",discard,@f8@0
+; CHECK: .globl @f8@0
+; CHECK: .section .bss,"bw",associative,_f1
+; CHECK: .globl _v1
+; CHECK: .section .bss,"bw",associative,_f2
+; CHECK: .globl _v2
+; CHECK: .section .bss,"bw",associative,_f3
+; CHECK: .globl _v3
+; CHECK: .section .bss,"bw",associative,_f4
+; CHECK: .globl _v4
+; CHECK: .section .bss,"bw",associative,_f5
+; CHECK: .globl _v5
+; CHECK: .section .bss,"bw",associative,_f6
+; CHECK: .globl _v6
+; CHECK: .section .bss,"bw",same_size,_f6
+; CHECK: .globl _f6
+; CHECK: .section .rdata,"rd",largest,_vftable
+; CHECK: .globl _vftable
+; CHECK: _vftable = L_some_name+4
diff --git a/test/CodeGen/X86/coff-comdat2.ll b/test/CodeGen/X86/coff-comdat2.ll
new file mode 100644
index 000000000000..6744b5b02ad7
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat2.ll
@@ -0,0 +1,9 @@
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat largest
+@foo = global i32 0
+@bar = global i32 0, comdat $foo
+; CHECK: Associative COMDAT symbol 'foo' is not a key for it's COMDAT.
diff --git a/test/CodeGen/X86/coff-comdat3.ll b/test/CodeGen/X86/coff-comdat3.ll
new file mode 100644
index 000000000000..76e464b27547
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat3.ll
@@ -0,0 +1,8 @@
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat largest
+@bar = global i32 0, comdat $foo
+; CHECK: Associative COMDAT symbol 'foo' does not exist.
diff --git a/test/CodeGen/X86/combine-64bit-vec-binop.ll b/test/CodeGen/X86/combine-64bit-vec-binop.ll
new file mode 100644
index 000000000000..8440fdab0eea
--- /dev/null
+++ b/test/CodeGen/X86/combine-64bit-vec-binop.ll
@@ -0,0 +1,273 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -mtriple=x86_64-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK -check-prefix=SSE41
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+
+
+define double @test1_add(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %add = add <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test1_add
+; SSE41: paddd
+; AVX: vpaddd
+; CHECK-NEXT: ret
+
+
+define double @test2_add(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %add = add <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test2_add
+; SSE41: paddw
+; AVX: vpaddw
+; CHECK-NEXT: ret
+
+define double @test3_add(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %add = add <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test3_add
+; SSE41: paddb
+; AVX: vpaddb
+; CHECK-NEXT: ret
+
+
+define double @test1_sub(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %sub = sub <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test1_sub
+; SSE41: psubd
+; AVX: vpsubd
+; CHECK-NEXT: ret
+
+
+define double @test2_sub(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %sub = sub <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test2_sub
+; SSE41: psubw
+; AVX: vpsubw
+; CHECK-NEXT: ret
+
+
+define double @test3_sub(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %sub = sub <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test3_sub
+; SSE41: psubb
+; AVX: vpsubb
+; CHECK-NEXT: ret
+
+
+define double @test1_mul(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %mul = mul <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test1_mul
+; SSE41: pmulld
+; AVX: vpmulld
+; CHECK-NEXT: ret
+
+
+define double @test2_mul(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %mul = mul <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test2_mul
+; SSE41: pmullw
+; AVX: vpmullw
+; CHECK-NEXT: ret
+
+; There is no legal ISD::MUL with type MVT::v8i16.
+define double @test3_mul(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %mul = mul <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test3_mul
+; CHECK: pmullw
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+
+
+define double @test1_and(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %and = and <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test1_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test2_and(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %and = and <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test2_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test3_and(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %and = and <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test3_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test1_or(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %or = or <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test1_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test2_or(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %or = or <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test2_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test3_or(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %or = or <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test3_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test1_xor(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %xor = xor <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test1_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test2_xor(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %xor = xor <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test2_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test3_xor(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %xor = xor <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test3_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test_fadd(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %add = fadd <2 x float> %1, %2
+ %3 = bitcast <2 x float> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test_fadd
+; SSE41: addps
+; AVX: vaddps
+; CHECK-NEXT: ret
+
+define double @test_fsub(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %sub = fsub <2 x float> %1, %2
+ %3 = bitcast <2 x float> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test_fsub
+; SSE41: subps
+; AVX: vsubps
+; CHECK-NEXT: ret
+
+define double @test_fmul(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %mul = fmul <2 x float> %1, %2
+ %3 = bitcast <2 x float> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test_fmul
+; SSE41: mulps
+; AVX: vmulps
+; CHECK-NEXT: ret
+
diff --git a/test/CodeGen/X86/combine-avx-intrinsics.ll b/test/CodeGen/X86/combine-avx-intrinsics.ll
new file mode 100644
index 000000000000..f610f7fcb91e
--- /dev/null
+++ b/test/CodeGen/X86/combine-avx-intrinsics.ll
@@ -0,0 +1,119 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s
+
+
+define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0) {
+ %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a0, i32 7)
+ ret <4 x double> %1
+}
+; CHECK-LABEL: test_x86_avx_blend_pd_256
+; CHECK-NOT: vblendpd
+; CHECK: ret
+
+
+define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0) {
+ %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a0, i32 7)
+ ret <8 x float> %1
+}
+; CHECK-LABEL: test_x86_avx_blend_ps_256
+; CHECK-NOT: vblendps
+; CHECK: ret
+
+
+define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1) {
+ %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a0, <4 x double> %a1)
+ ret <4 x double> %1
+}
+; CHECK-LABEL: test_x86_avx_blendv_pd_256
+; CHECK-NOT: vblendvpd
+; CHECK: ret
+
+
+define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1) {
+ %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %1
+}
+; CHECK-LABEL: test_x86_avx_blendv_ps_256
+; CHECK-NOT: vblendvps
+; CHECK: ret
+
+
+define <4 x double> @test2_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
+ %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 0)
+ ret <4 x double> %1
+}
+; CHECK-LABEL: test2_x86_avx_blend_pd_256
+; CHECK-NOT: vblendpd
+; CHECK: ret
+
+
+define <8 x float> @test2_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
+ %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 0)
+ ret <8 x float> %1
+}
+; CHECK-LABEL: test2_x86_avx_blend_ps_256
+; CHECK-NOT: vblendps
+; CHECK: ret
+
+
+define <4 x double> @test2_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1) {
+ %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> zeroinitializer)
+ ret <4 x double> %1
+}
+; CHECK-LABEL: test2_x86_avx_blendv_pd_256
+; CHECK-NOT: vblendvpd
+; CHECK: ret
+
+
+define <8 x float> @test2_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1) {
+ %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> zeroinitializer)
+ ret <8 x float> %1
+}
+; CHECK-LABEL: test2_x86_avx_blendv_ps_256
+; CHECK-NOT: vblendvps
+; CHECK: ret
+
+
+define <4 x double> @test3_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
+ %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 -1)
+ ret <4 x double> %1
+}
+; CHECK-LABEL: test3_x86_avx_blend_pd_256
+; CHECK-NOT: vblendpd
+; CHECK: ret
+
+
+define <8 x float> @test3_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
+ %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 -1)
+ ret <8 x float> %1
+}
+; CHECK-LABEL: test3_x86_avx_blend_ps_256
+; CHECK-NOT: vblendps
+; CHECK: ret
+
+
+define <4 x double> @test3_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1) {
+ %Mask = bitcast <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1> to <4 x double>
+ %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %Mask)
+ ret <4 x double> %1
+}
+; CHECK-LABEL: test3_x86_avx_blendv_pd_256
+; CHECK-NOT: vblendvpd
+; CHECK: ret
+
+
+define <8 x float> @test3_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1) {
+ %Mask = bitcast <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1> to <8 x float>
+ %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %Mask)
+ ret <8 x float> %1
+}
+; CHECK-LABEL: test3_x86_avx_blendv_ps_256
+; CHECK-NOT: vblendvps
+; CHECK: ret
+
+
+
+declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32)
+declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32)
+declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>)
+declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>)
+
diff --git a/test/CodeGen/X86/combine-avx2-intrinsics.ll b/test/CodeGen/X86/combine-avx2-intrinsics.ll
new file mode 100644
index 000000000000..8794f8b86849
--- /dev/null
+++ b/test/CodeGen/X86/combine-avx2-intrinsics.ll
@@ -0,0 +1,164 @@
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s
+
+; Verify that the backend correctly combines AVX2 builtin intrinsics.
+
+
+define <8 x i32> @test_psra_1(<8 x i32> %A) {
+ %1 = tail call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %A, i32 3)
+ %2 = tail call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %1, <4 x i32> <i32 3, i32 0, i32 7, i32 0>)
+ %3 = tail call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %2, i32 2)
+ ret <8 x i32> %3
+}
+; CHECK-LABEL: test_psra_1
+; CHECK: vpsrad $8, %ymm0, %ymm0
+; CHECK-NEXT: ret
+
+define <16 x i16> @test_psra_2(<16 x i16> %A) {
+ %1 = tail call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %A, i32 3)
+ %2 = tail call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %1, <8 x i16> <i16 3, i16 0, i16 0, i16 0, i16 7, i16 0, i16 0, i16 0>)
+ %3 = tail call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %2, i32 2)
+ ret <16 x i16> %3
+}
+; CHECK-LABEL: test_psra_2
+; CHECK: vpsraw $8, %ymm0, %ymm0
+; CHECK-NEXT: ret
+
+define <16 x i16> @test_psra_3(<16 x i16> %A) {
+ %1 = tail call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %A, i32 0)
+ %2 = tail call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %1, <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 7, i16 0, i16 0, i16 0>)
+ %3 = tail call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %2, i32 0)
+ ret <16 x i16> %3
+}
+; CHECK-LABEL: test_psra_3
+; CHECK-NOT: vpsraw
+; CHECK: ret
+
+define <8 x i32> @test_psra_4(<8 x i32> %A) {
+ %1 = tail call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %A, i32 0)
+ %2 = tail call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %1, <4 x i32> <i32 0, i32 0, i32 7, i32 0>)
+ %3 = tail call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %2, i32 0)
+ ret <8 x i32> %3
+}
+; CHECK-LABEL: test_psra_4
+; CHECK-NOT: vpsrad
+; CHECK: ret
+
+
+define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1) {
+ %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %res
+}
+; CHECK-LABEL: test_x86_avx2_pblendvb
+; CHECK-NOT: vpblendvb
+; CHECK: ret
+
+
+define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0) {
+ %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a0, i32 7)
+ ret <16 x i16> %res
+}
+; CHECK-LABEL: test_x86_avx2_pblendw
+; CHECK-NOT: vpblendw
+; CHECK: ret
+
+
+define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0) {
+ %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a0, i32 7)
+ ret <4 x i32> %res
+}
+; CHECK-LABEL: test_x86_avx2_pblendd_128
+; CHECK-NOT: vpblendd
+; CHECK: ret
+
+
+define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0) {
+ %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a0, i32 7)
+ ret <8 x i32> %res
+}
+; CHECK-LABEL: test_x86_avx2_pblendd_256
+; CHECK-NOT: vpblendd
+; CHECK: ret
+
+
+define <32 x i8> @test2_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1) {
+ %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> zeroinitializer)
+ ret <32 x i8> %res
+}
+; CHECK-LABEL: test2_x86_avx2_pblendvb
+; CHECK-NOT: vpblendvb
+; CHECK: ret
+
+
+define <16 x i16> @test2_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
+ %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 0)
+ ret <16 x i16> %res
+}
+; CHECK-LABEL: test2_x86_avx2_pblendw
+; CHECK-NOT: vpblendw
+; CHECK: ret
+
+
+define <4 x i32> @test2_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
+ %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 0)
+ ret <4 x i32> %res
+}
+; CHECK-LABEL: test2_x86_avx2_pblendd_128
+; CHECK-NOT: vpblendd
+; CHECK: ret
+
+
+define <8 x i32> @test2_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
+ %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 0)
+ ret <8 x i32> %res
+}
+; CHECK-LABEL: test2_x86_avx2_pblendd_256
+; CHECK-NOT: vpblendd
+; CHECK: ret
+
+
+define <32 x i8> @test3_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1) {
+ %1 = bitcast <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1> to <32 x i8>
+ %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %1)
+ ret <32 x i8> %res
+}
+; CHECK-LABEL: test3_x86_avx2_pblendvb
+; CHECK-NOT: vpblendvb
+; CHECK: ret
+
+
+define <16 x i16> @test3_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
+ %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 -1)
+ ret <16 x i16> %res
+}
+; CHECK-LABEL: test3_x86_avx2_pblendw
+; CHECK-NOT: vpblendw
+; CHECK: ret
+
+
+define <4 x i32> @test3_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
+ %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 -1)
+ ret <4 x i32> %res
+}
+; CHECK-LABEL: test3_x86_avx2_pblendd_128
+; CHECK-NOT: vpblendd
+; CHECK: ret
+
+
+define <8 x i32> @test3_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
+ %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 -1)
+ ret <8 x i32> %res
+}
+; CHECK-LABEL: test3_x86_avx2_pblendd_256
+; CHECK-NOT: vpblendd
+; CHECK: ret
+
+
+declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>)
+declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i32)
+declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i32)
+declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i32)
+declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>)
+declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32)
+declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32>, i32)
+
diff --git a/test/CodeGen/X86/combine-or.ll b/test/CodeGen/X86/combine-or.ll
new file mode 100644
index 000000000000..df3b9015adda
--- /dev/null
+++ b/test/CodeGen/X86/combine-or.ll
@@ -0,0 +1,281 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+
+; Verify that each of the following test cases is folded into a single
+; instruction which performs a blend operation.
+
+define <2 x i64> @test1(<2 x i64> %a, <2 x i64> %b) {
+ %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+ %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 1>
+ %or = or <2 x i64> %shuf1, %shuf2
+ ret <2 x i64> %or
+}
+; CHECK-LABEL: test1
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test2
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK: ret
+
+
+define <2 x i64> @test3(<2 x i64> %a, <2 x i64> %b) {
+ %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 1>
+ %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+ %or = or <2 x i64> %shuf1, %shuf2
+ ret <2 x i64> %or
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 1, i32 2, i32 3>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test4
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <4 x i32> @test5(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 1, i32 2, i32 3>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test5
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test6
+; CHECK-NOT: xorps
+; CHECK: blendps $12
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
+ %and1 = and <4 x i32> %a, <i32 -1, i32 -1, i32 0, i32 0>
+ %and2 = and <4 x i32> %b, <i32 0, i32 0, i32 -1, i32 -1>
+ %or = or <4 x i32> %and1, %and2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test7
+; CHECK-NOT: xorps
+; CHECK: blendps $12
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test8(<2 x i64> %a, <2 x i64> %b) {
+ %and1 = and <2 x i64> %a, <i64 -1, i64 0>
+ %and2 = and <2 x i64> %b, <i64 0, i64 -1>
+ %or = or <2 x i64> %and1, %and2
+ ret <2 x i64> %or
+}
+; CHECK-LABEL: test8
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
+ %and1 = and <4 x i32> %a, <i32 0, i32 0, i32 -1, i32 -1>
+ %and2 = and <4 x i32> %b, <i32 -1, i32 -1, i32 0, i32 0>
+ %or = or <4 x i32> %and1, %and2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test9
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK: ret
+
+
+define <2 x i64> @test10(<2 x i64> %a, <2 x i64> %b) {
+ %and1 = and <2 x i64> %a, <i64 0, i64 -1>
+ %and2 = and <2 x i64> %b, <i64 -1, i64 0>
+ %or = or <2 x i64> %and1, %and2
+ ret <2 x i64> %or
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) {
+ %and1 = and <4 x i32> %a, <i32 -1, i32 0, i32 0, i32 0>
+ %and2 = and <4 x i32> %b, <i32 0, i32 -1, i32 -1, i32 -1>
+ %or = or <4 x i32> %and1, %and2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test11
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <4 x i32> @test12(<4 x i32> %a, <4 x i32> %b) {
+ %and1 = and <4 x i32> %a, <i32 0, i32 -1, i32 -1, i32 -1>
+ %and2 = and <4 x i32> %b, <i32 -1, i32 0, i32 0, i32 0>
+ %or = or <4 x i32> %and1, %and2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test12
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NEXT: ret
+
+
+; Verify that the following test cases are folded into single shuffles.
+
+define <4 x i32> @test13(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 1, i32 1, i32 4, i32 4>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: xorps
+; CHECK: shufps
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test14(<2 x i64> %a, <2 x i64> %b) {
+ %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+ %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
+ %or = or <2 x i64> %shuf1, %shuf2
+ ret <2 x i64> %or
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: pslldq
+; CHECK-NOT: por
+; CHECK: punpcklqdq
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test15(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 1>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 2, i32 1, i32 4, i32 4>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test15
+; CHECK-NOT: xorps
+; CHECK: shufps
+; CHECK-NOT: shufps
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <2 x i64> @test16(<2 x i64> %a, <2 x i64> %b) {
+ %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
+ %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+ %or = or <2 x i64> %shuf1, %shuf2
+ ret <2 x i64> %or
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: pslldq
+; CHECK-NOT: por
+; CHECK: punpcklqdq
+; CHECK: ret
+
+
+; Verify that the dag-combiner does not fold a OR of two shuffles into a single
+; shuffle instruction when the shuffle indexes are not compatible.
+
+define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 2>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test17
+; CHECK: por
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 4>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test18
+; CHECK: orps
+; CHECK: ret
+
+
+define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 3>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 2, i32 2>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test19
+; CHECK: por
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test20(<2 x i64> %a, <2 x i64> %b) {
+ %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+ %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+ %or = or <2 x i64> %shuf1, %shuf2
+ ret <2 x i64> %or
+}
+; CHECK-LABEL: test20
+; CHECK-NOT: xorps
+; CHECK: orps
+; CHECK-NEXT: movq
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
+ %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
+ %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
+ %or = or <2 x i64> %shuf1, %shuf2
+ ret <2 x i64> %or
+}
+; CHECK-LABEL: test21
+; CHECK: por
+; CHECK-NEXT: pslldq
+; CHECK-NEXT: ret
+
+; Verify that the DAGCombiner doesn't crash in the attempt to check if a shuffle
+; with illegal type has a legal mask. Method 'isShuffleMaskLegal' only knows how to
+; handle legal vector value types.
+define <4 x i8> @test_crash(<4 x i8> %a, <4 x i8> %b) {
+ %shuf1 = shufflevector <4 x i8> %a, <4 x i8> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
+ %shuf2 = shufflevector <4 x i8> %b, <4 x i8> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
+ %or = or <4 x i8> %shuf1, %shuf2
+ ret <4 x i8> %or
+}
+; CHECK-LABEL: test_crash
+; CHECK: movsd
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/combine-sse2-intrinsics.ll b/test/CodeGen/X86/combine-sse2-intrinsics.ll
new file mode 100644
index 000000000000..fa500e5d8d67
--- /dev/null
+++ b/test/CodeGen/X86/combine-sse2-intrinsics.ll
@@ -0,0 +1,53 @@
+; RUN: llc < %s -march=x86 -mcpu=core2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s
+
+; Verify that the backend correctly combines SSE2 builtin intrinsics.
+
+
+define <4 x i32> @test_psra_1(<4 x i32> %A) {
+ %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %A, i32 3)
+ %2 = tail call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> <i32 3, i32 0, i32 7, i32 0>)
+ %3 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %2, i32 2)
+ ret <4 x i32> %3
+}
+; CHECK-LABEL: test_psra_1
+; CHECK: psrad $8, %xmm0
+; CHECK-NEXT: ret
+
+define <8 x i16> @test_psra_2(<8 x i16> %A) {
+ %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %A, i32 3)
+ %2 = tail call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> <i16 3, i16 0, i16 0, i16 0, i16 7, i16 0, i16 0, i16 0>)
+ %3 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %2, i32 2)
+ ret <8 x i16> %3
+}
+; CHECK-LABEL: test_psra_2
+; CHECK: psraw $8, %xmm0
+; CHECK-NEXT: ret
+
+define <4 x i32> @test_psra_3(<4 x i32> %A) {
+ %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %A, i32 0)
+ %2 = tail call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> <i32 0, i32 0, i32 7, i32 0>)
+ %3 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %2, i32 0)
+ ret <4 x i32> %3
+}
+; CHECK-LABEL: test_psra_3
+; CHECK-NOT: psrad
+; CHECK: ret
+
+
+define <8 x i16> @test_psra_4(<8 x i16> %A) {
+ %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %A, i32 0)
+ %2 = tail call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 7, i16 0, i16 0, i16 0>)
+ %3 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %2, i32 0)
+ ret <8 x i16> %3
+}
+; CHECK-LABEL: test_psra_4
+; CHECK-NOT: psraw
+; CHECK: ret
+
+
+declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32)
+declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32)
+
diff --git a/test/CodeGen/X86/combine-sse41-intrinsics.ll b/test/CodeGen/X86/combine-sse41-intrinsics.ll
new file mode 100644
index 000000000000..254991aec094
--- /dev/null
+++ b/test/CodeGen/X86/combine-sse41-intrinsics.ll
@@ -0,0 +1,182 @@
+; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=corei7 | FileCheck %s
+
+
+define <2 x double> @test_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
+ %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 0)
+ ret <2 x double> %1
+}
+; CHECK-LABEL: test_x86_sse41_blend_pd
+; CHECK-NOT: blendpd
+; CHECK: ret
+
+
+define <4 x float> @test_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
+ %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 0)
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test_x86_sse41_blend_ps
+; CHECK-NOT: blendps
+; CHECK: ret
+
+
+define <2 x double> @test_x86_sse41_blendv_pd(<2 x double> %a0, <2 x double> %a1) {
+ %1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> zeroinitializer)
+ ret <2 x double> %1
+}
+; CHECK-LABEL: test_x86_sse41_blendv_pd
+; CHECK-NOT: blendvpd
+; CHECK: ret
+
+
+define <4 x float> @test_x86_sse41_blendv_ps(<4 x float> %a0, <4 x float> %a1) {
+ %1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> zeroinitializer)
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test_x86_sse41_blendv_ps
+; CHECK-NOT: blendvps
+; CHECK: ret
+
+
+define <16 x i8> @test_x86_sse41_pblendv_b(<16 x i8> %a0, <16 x i8> %a1) {
+ %1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> zeroinitializer)
+ ret <16 x i8> %1
+}
+; CHECK-LABEL: test_x86_sse41_pblendv_b
+; CHECK-NOT: pblendvb
+; CHECK: ret
+
+
+define <8 x i16> @test_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
+ %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 0)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test_x86_sse41_pblend_w
+; CHECK-NOT: pblendw
+; CHECK: ret
+
+
+define <2 x double> @test2_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
+ %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 -1)
+ ret <2 x double> %1
+}
+; CHECK-LABEL: test2_x86_sse41_blend_pd
+; CHECK-NOT: blendpd
+; CHECK: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test2_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
+ %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 -1)
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test2_x86_sse41_blend_ps
+; CHECK-NOT: blendps
+; CHECK: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
+
+
+define <2 x double> @test2_x86_sse41_blendv_pd(<2 x double> %a0, <2 x double> %a1) {
+ %Mask = bitcast <2 x i64> <i64 -1, i64 -1> to <2 x double>
+ %1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %Mask )
+ ret <2 x double> %1
+}
+; CHECK-LABEL: test2_x86_sse41_blendv_pd
+; CHECK-NOT: blendvpd
+; CHECK: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test2_x86_sse41_blendv_ps(<4 x float> %a0, <4 x float> %a1) {
+ %Mask = bitcast <2 x i64> <i64 -1, i64 -1> to <4 x float>
+ %1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %Mask)
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test2_x86_sse41_blendv_ps
+; CHECK-NOT: blendvps
+; CHECK: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
+
+
+define <16 x i8> @test2_x86_sse41_pblendv_b(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
+ %Mask = bitcast <2 x i64> <i64 -1, i64 -1> to <16 x i8>
+ %1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %Mask)
+ ret <16 x i8> %1
+}
+; CHECK-LABEL: test2_x86_sse41_pblendv_b
+; CHECK-NOT: pblendvb
+; CHECK: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
+
+
+define <8 x i16> @test2_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
+ %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 -1)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test2_x86_sse41_pblend_w
+; CHECK-NOT: pblendw
+; CHECK: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
+
+
+define <2 x double> @test3_x86_sse41_blend_pd(<2 x double> %a0) {
+ %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a0, i32 7)
+ ret <2 x double> %1
+}
+; CHECK-LABEL: test3_x86_sse41_blend_pd
+; CHECK-NOT: blendpd
+; CHECK: ret
+
+
+define <4 x float> @test3_x86_sse41_blend_ps(<4 x float> %a0) {
+ %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a0, i32 7)
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test3_x86_sse41_blend_ps
+; CHECK-NOT: blendps
+; CHECK: ret
+
+
+define <2 x double> @test3_x86_sse41_blendv_pd(<2 x double> %a0, <2 x double> %a1) {
+ %1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a0, <2 x double> %a1 )
+ ret <2 x double> %1
+}
+; CHECK-LABEL: test3_x86_sse41_blendv_pd
+; CHECK-NOT: blendvpd
+; CHECK: ret
+
+
+define <4 x float> @test3_x86_sse41_blendv_ps(<4 x float> %a0, <4 x float> %a1) {
+ %1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test3_x86_sse41_blendv_ps
+; CHECK-NOT: blendvps
+; CHECK: ret
+
+
+define <16 x i8> @test3_x86_sse41_pblendv_b(<16 x i8> %a0, <16 x i8> %a1) {
+ %1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %1
+}
+; CHECK-LABEL: test3_x86_sse41_pblendv_b
+; CHECK-NOT: pblendvb
+; CHECK: ret
+
+
+define <8 x i16> @test3_x86_sse41_pblend_w(<8 x i16> %a0) {
+ %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a0, i32 7)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test3_x86_sse41_pblend_w
+; CHECK-NOT: pblendw
+; CHECK: ret
+
+
+declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i32)
+declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i32)
+declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>)
+declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i32)
+declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>)
+
diff --git a/test/CodeGen/X86/combine-vec-shuffle-2.ll b/test/CodeGen/X86/combine-vec-shuffle-2.ll
new file mode 100644
index 000000000000..877d38260d61
--- /dev/null
+++ b/test/CodeGen/X86/combine-vec-shuffle-2.ll
@@ -0,0 +1,253 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Check that DAGCombiner correctly folds the following pairs of shuffles
+; using the following rules:
+; 1. shuffle(shuffle(x, y), undef) -> x
+; 2. shuffle(shuffle(x, y), undef) -> y
+; 3. shuffle(shuffle(x, y), undef) -> shuffle(x, undef)
+; 4. shuffle(shuffle(x, y), undef) -> shuffle(undef, y)
+;
+; Rules 3. and 4. are used only if the resulting shuffle mask is legal.
+
+define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test1
+; Mask: [3,0,0,1]
+; CHECK: pshufd $67
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test2
+; Mask: [2,0,0,3]
+; CHECK: pshufd $-62
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test3
+; Mask: [2,0,0,3]
+; CHECK: pshufd $-62
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 7, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test4
+; Mask: [0,0,0,1]
+; CHECK: pshufd $64
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 5, i32 5, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 4, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test5
+; Mask: [1,1]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test6(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test6
+; Mask: [2,0,0,0]
+; CHECK: pshufd $2
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test7(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test7
+; Mask: [0,2,0,2]
+; CHECK: pshufd $-120
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test8
+; Mask: [1,0,3,0]
+; CHECK: pshufd $49
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test9(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 3, i32 2, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test9
+; Mask: [1,3,0,2]
+; CHECK: pshufd $-115
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test10(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test10
+; Mask: [1,0,1,0]
+; CHECK: pshufd $17
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test11(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 2, i32 5, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test11
+; Mask: [1,0,2,1]
+; CHECK: pshufd $97
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test12(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 0, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 4, i32 0, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test12
+; Mask: [0,0,0,0]
+; CHECK: pshufd $0
+; CHECK-NEXT: ret
+
+
+; The following pair of shuffles is folded into vector %A.
+define <4 x i32> @test13(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 4, i32 2, i32 6>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 0, i32 2, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: pshufd
+; CHECK: ret
+
+
+; The following pair of shuffles is folded into vector %B.
+define <4 x i32> @test14(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 4, i32 1, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: pshufd
+; CHECK: ret
+
+
+; Verify that we don't optimize the following cases. We expect more than one shuffle.
+
+define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test15
+; CHECK: shufps $114
+; CHECK-NEXT: pshufd $-58
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test16
+; CHECK: blendps $10
+; CHECK-NEXT: pshufd $-58
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test17
+; CHECK: shufps $120
+; CHECK-NEXT: pshufd $-58
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test18(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test18
+; CHECK: blendps $11
+; CHECK-NEXT: pshufd $-59
+; CHECK-NEXT: ret
+
+define <4 x i32> @test19(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 5, i32 6>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 0, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test19
+; CHECK: shufps $-104
+; CHECK-NEXT: pshufd $2
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test20(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 3, i32 2, i32 4, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test20
+; CHECK: shufps $11
+; CHECK-NEXT: pshufd $-58
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test21(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test21
+; CHECK: shufps $120
+; CHECK-NEXT: pshufd $-60
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test22(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test22
+; CHECK: blendps $11
+; CHECK-NEXT: pshufd $-43
+; CHECK-NEXT: ret
+
diff --git a/test/CodeGen/X86/combine-vec-shuffle-3.ll b/test/CodeGen/X86/combine-vec-shuffle-3.ll
new file mode 100644
index 000000000000..bd2d34ca189a
--- /dev/null
+++ b/test/CodeGen/X86/combine-vec-shuffle-3.ll
@@ -0,0 +1,380 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test1
+; Mask: [0,1,2,3]
+; CHECK: movaps
+; CHECK: ret
+
+define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test2
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK: ret
+
+define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test3
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x float> @test4(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test4
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test5
+; Mask: [4,1,6,7]
+; CHECK: blendps $13
+; CHECK: ret
+
+
+define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test6
+; Mask: [4,5,6,7]
+; CHECK: movaps
+; CHECK: ret
+
+define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test7
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK: ret
+
+define <4 x i32> @test8(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test8
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test9
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test10(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test10
+; Mask: [4,1,6,7]
+; CHECK: blendps
+; CHECK: ret
+
+define <4 x float> @test11(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test11
+; Mask: [0,1,2,3]
+; CHECK-NOT: movaps
+; CHECK-NOT: blendps
+; CHECK: ret
+
+define <4 x float> @test12(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test12
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK: ret
+
+define <4 x float> @test13(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test13
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x float> @test14(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test14
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK: ret
+
+define <4 x float> @test15(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test15
+; Mask: [4,1,6,7]
+; CHECK: blendps $13
+; CHECK: ret
+
+define <4 x i32> @test16(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test16
+; Mask: [0,1,2,3]
+; CHECK-NOT: movaps
+; CHECK-NOT: blendps
+; CHECK: ret
+
+define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test17
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK: ret
+
+define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test18
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test19
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK: ret
+
+define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test20
+; Mask: [4,1,6,7]
+; CHECK: blendps $13
+; CHECK: ret
+
+; Check some negative cases.
+define <4 x float> @test1b(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test1b
+; CHECK: shufps
+; CHECK: shufps
+; CHECK: ret
+
+define <4 x float> @test2b(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 0, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test2b
+; CHECK: shufps
+; CHECK: pshufd
+; CHECK: ret
+
+define <4 x float> @test3b(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test3b
+; CHECK: shufps
+; CHECK: shufps
+; CHECK: ret
+
+define <4 x float> @test4b(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 5, i32 5, i32 2, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test4b
+; CHECK: shufps
+; CHECK: shufps
+; CHECK: ret
+
+
+; Verify that we correctly fold shuffles even when we use illegal vector types.
+define <4 x i8> @test1c(<4 x i8>* %a, <4 x i8>* %b) {
+ %A = load <4 x i8>* %a
+ %B = load <4 x i8>* %b
+ %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+ ret <4 x i8> %2
+}
+; CHECK-LABEL: test1c
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK-NEXT: ret
+
+define <4 x i8> @test2c(<4 x i8>* %a, <4 x i8>* %b) {
+ %A = load <4 x i8>* %a
+ %B = load <4 x i8>* %b
+ %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 0, i32 5, i32 1, i32 5>
+ %2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
+ ret <4 x i8> %2
+}
+; CHECK-LABEL: test2c
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK-NEXT: ret
+
+define <4 x i8> @test3c(<4 x i8>* %a, <4 x i8>* %b) {
+ %A = load <4 x i8>* %a
+ %B = load <4 x i8>* %b
+ %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x i8> %2
+}
+; CHECK-LABEL: test3c
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x i8> @test4c(<4 x i8>* %a, <4 x i8>* %b) {
+ %A = load <4 x i8>* %a
+ %B = load <4 x i8>* %b
+ %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x i8> %2
+}
+; CHECK-LABEL: test4c
+; Mask: [4,1,6,7]
+; CHECK: blendps $13
+; CHECK: ret
+
+; The following test cases are generated from this C++ code
+;
+;__m128 blend_01(__m128 a, __m128 b)
+;{
+; __m128 s = a;
+; s = _mm_blend_ps( s, b, 1<<0 );
+; s = _mm_blend_ps( s, b, 1<<1 );
+; return s;
+;}
+;
+;__m128 blend_02(__m128 a, __m128 b)
+;{
+; __m128 s = a;
+; s = _mm_blend_ps( s, b, 1<<0 );
+; s = _mm_blend_ps( s, b, 1<<2 );
+; return s;
+;}
+;
+;__m128 blend_123(__m128 a, __m128 b)
+;{
+; __m128 s = a;
+; s = _mm_blend_ps( s, b, 1<<1 );
+; s = _mm_blend_ps( s, b, 1<<2 );
+; s = _mm_blend_ps( s, b, 1<<3 );
+; return s;
+;}
+
+; Ideally, we should collapse the following shuffles into a single one.
+
+define <4 x float> @blend_01(<4 x float> %a, <4 x float> %b) {
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 undef, i32 2, i32 3>
+ %shuffle6 = shufflevector <4 x float> %shuffle, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ ret <4 x float> %shuffle6
+}
+; CHECK-LABEL: blend_01
+; CHECK: movsd
+; CHECK-NEXT: ret
+
+define <4 x float> @blend_02(<4 x float> %a, <4 x float> %b) {
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 undef, i32 3>
+ %shuffle6 = shufflevector <4 x float> %shuffle, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
+ ret <4 x float> %shuffle6
+}
+; CHECK-LABEL: blend_02
+; CHECK: blendps $5
+; CHECK-NEXT: ret
+
+define <4 x float> @blend_123(<4 x float> %a, <4 x float> %b) {
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 undef, i32 undef>
+ %shuffle6 = shufflevector <4 x float> %shuffle, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 undef>
+ %shuffle12 = shufflevector <4 x float> %shuffle6, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x float> %shuffle12
+}
+; CHECK-LABEL: blend_123
+; CHECK: movss
+; CHECK: ret
+
+define <4 x i32> @test_movhl_1(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 7, i32 5, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 6, i32 1, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test_movhl_1
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test_movhl_2(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 0, i32 3, i32 6>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 3, i32 7, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test_movhl_2
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test_movhl_3(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 7, i32 6, i32 3, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 6, i32 0, i32 3, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test_movhl_3
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
diff --git a/test/CodeGen/X86/combine-vec-shuffle-4.ll b/test/CodeGen/X86/combine-vec-shuffle-4.ll
new file mode 100644
index 000000000000..0ddec2c12fb5
--- /dev/null
+++ b/test/CodeGen/X86/combine-vec-shuffle-4.ll
@@ -0,0 +1,237 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Verify that we fold shuffles according to rule:
+; (shuffle(shuffle A, Undef, M0), B, M1) -> (shuffle A, B, M2)
+
+define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 4, i32 5, i32 1, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test1
+; Mask: [4,5,2,3]
+; CHECK: movsd
+; CHECK: ret
+
+define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test2
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test3
+; Mask: [0,1,4,u]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x float> @test4(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test4
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 6, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test5
+; Mask: [0,1,6,7]
+; CHECK: blendps $12
+; CHECK: ret
+
+; Verify that we fold shuffles according to rule:
+; (shuffle(shuffle A, Undef, M0), A, M1) -> (shuffle A, Undef, M2)
+
+define <4 x float> @test6(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 5, i32 1, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test6
+; Mask: [0,1,2,3]
+; CHECK-NOT: pshufd
+; CHECK-NOT: shufps
+; CHECK-NOT: movlhps
+; CHECK: ret
+
+define <4 x float> @test7(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test7
+; Mask: [0,1,0,1]
+; CHECK-NOT: pshufd
+; CHECK-NOT: shufps
+; CHECK: movlhps
+; CHECK-NEXT: ret
+
+define <4 x float> @test8(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test8
+; Mask: [0,1,0,u]
+; CHECK-NOT: pshufd
+; CHECK-NOT: shufps
+; CHECK: movlhps
+; CHECK-NEXT: ret
+
+define <4 x float> @test9(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test9
+; Mask: [2,3,2,3]
+; CHECK-NOT: movlhps
+; CHECK-NOT: palignr
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x float> @test10(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 6, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test10
+; Mask: [0,1,2,3]
+; CHECK-NOT: pshufd
+; CHECK-NOT: shufps
+; CHECK-NOT: movlhps
+; CHECK: ret
+
+define <4 x float> @test11(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 0, i32 1, i32 5, i32 6>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test11
+; Mask: [4,5,2,3]
+; CHECK: movsd
+; CHECK: ret
+
+define <4 x float> @test12(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 5, i32 6, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test12
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x float> @test13(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 4, i32 5, i32 0, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test13
+; Mask: [0,1,4,u]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x float> @test14(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test14
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x float> @test15(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test15
+; Mask: [0,1,6,7]
+; CHECK: blendps $12
+; CHECK: ret
+
+; Verify that shuffles are canonicalized according to rules:
+; shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
+;
+; This allows to trigger the following combine rule:
+; (shuffle(shuffle A, Undef, M0), A, M1) -> (shuffle A, Undef, M2)
+;
+; As a result, all the shuffle pairs in each function below should be
+; combined into a single legal shuffle operation.
+
+define <4 x float> @test16(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test16
+; Mask: [0,1,2,3]
+; CHECK-NOT: pshufd
+; CHECK-NOT: shufps
+; CHECK-NOT: movlhps
+; CHECK: ret
+
+define <4 x float> @test17(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 5, i32 6, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test17
+; Mask: [0,1,0,1]
+; CHECK-NOT: pshufd
+; CHECK-NOT: shufps
+; CHECK: movlhps
+; CHECK-NEXT: ret
+
+define <4 x float> @test18(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 0, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test18
+; Mask: [0,1,0,u]
+; CHECK-NOT: pshufd
+; CHECK-NOT: shufps
+; CHECK: movlhps
+; CHECK-NEXT: ret
+
+define <4 x float> @test19(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test19
+; Mask: [2,3,2,3]
+; CHECK-NOT: movlhps
+; CHECK-NOT: palignr
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x float> @test20(<4 x float> %a) {
+ %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test20
+; Mask: [0,1,2,3]
+; CHECK-NOT: pshufd
+; CHECK-NOT: shufps
+; CHECK-NOT: movlhps
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/combine-vec-shuffle-5.ll b/test/CodeGen/X86/combine-vec-shuffle-5.ll
new file mode 100644
index 000000000000..16c45efe4be6
--- /dev/null
+++ b/test/CodeGen/X86/combine-vec-shuffle-5.ll
@@ -0,0 +1,257 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Verify that the DAGCombiner correctly folds all the shufflevector pairs
+; into a single shuffle operation.
+
+define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test1
+; Mask: [0,1,2,3]
+; CHECK: movaps
+; CHECK: ret
+
+define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test2
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK: ret
+
+define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 0, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test3
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x float> @test4(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test4
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 4, i32 5, i32 6, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test5
+; Mask: [4,1,6,7]
+; CHECK: blendps $13
+; CHECK: ret
+
+
+define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %b, <4 x i32> %1, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test6
+; Mask: [4,5,6,7]
+; CHECK: movaps
+; CHECK: ret
+
+define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i32> %b, <4 x i32> %1, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test7
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK: ret
+
+define <4 x i32> @test8(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
+ %2 = shufflevector <4 x i32> %b, <4 x i32> %1, <4 x i32> <i32 4, i32 6, i32 0, i32 5>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test8
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x i32> %b, <4 x i32> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test9
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test10(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %b, <4 x i32> %1, <4 x i32> <i32 4, i32 5, i32 6, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test10
+; Mask: [4,1,6,7]
+; CHECK: blendps
+; CHECK: ret
+
+define <4 x float> @test11(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test11
+; Mask: [0,1,2,3]
+; CHECK-NOT: movaps
+; CHECK-NOT: blendps
+; CHECK: ret
+
+define <4 x float> @test12(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test12
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK: ret
+
+define <4 x float> @test13(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test13
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x float> @test14(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test14
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK: ret
+
+define <4 x float> @test15(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: test15
+; Mask: [4,1,6,7]
+; CHECK: blendps $13
+; CHECK: ret
+
+define <4 x i32> @test16(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %a, <4 x i32> %1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test16
+; Mask: [0,1,2,3]
+; CHECK-NOT: movaps
+; CHECK-NOT: blendps
+; CHECK: ret
+
+define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ %2 = shufflevector <4 x i32> %a, <4 x i32> %1, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test17
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK: ret
+
+define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ %2 = shufflevector <4 x i32> %a, <4 x i32> %1, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test18
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK: ret
+
+define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
+ %2 = shufflevector <4 x i32> %a, <4 x i32> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test19
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK: ret
+
+define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ %2 = shufflevector <4 x i32> %a, <4 x i32> %1, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test20
+; Mask: [4,1,6,7]
+; CHECK: blendps $13
+; CHECK: ret
+
+; Verify that we correctly fold shuffles even when we use illegal vector types.
+define <4 x i8> @test1c(<4 x i8>* %a, <4 x i8>* %b) {
+ %A = load <4 x i8>* %a
+ %B = load <4 x i8>* %b
+ %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i8> %B, <4 x i8> %1, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
+ ret <4 x i8> %2
+}
+; CHECK-LABEL: test1c
+; Mask: [0,5,6,7]
+; CHECK: movss
+; CHECK-NEXT: ret
+
+define <4 x i8> @test2c(<4 x i8>* %a, <4 x i8>* %b) {
+ %A = load <4 x i8>* %a
+ %B = load <4 x i8>* %b
+ %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 0, i32 5, i32 1, i32 5>
+ %2 = shufflevector <4 x i8> %B, <4 x i8> %1, <4 x i32> <i32 4, i32 6, i32 0, i32 5>
+ ret <4 x i8> %2
+}
+; CHECK-LABEL: test2c
+; Mask: [0,1,4,5]
+; CHECK: movlhps
+; CHECK-NEXT: ret
+
+define <4 x i8> @test3c(<4 x i8>* %a, <4 x i8>* %b) {
+ %A = load <4 x i8>* %a
+ %B = load <4 x i8>* %b
+ %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
+ %2 = shufflevector <4 x i8> %B, <4 x i8> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x i8> %2
+}
+; CHECK-LABEL: test3c
+; Mask: [6,7,2,3]
+; CHECK: movhlps
+; CHECK: ret
+
+define <4 x i8> @test4c(<4 x i8>* %a, <4 x i8>* %b) {
+ %A = load <4 x i8>* %a
+ %B = load <4 x i8>* %b
+ %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i8> %B, <4 x i8> %1, <4 x i32> <i32 4, i32 5, i32 6, i32 3>
+ ret <4 x i8> %2
+}
+; CHECK-LABEL: test4c
+; Mask: [4,1,6,7]
+; CHECK: blendps $13
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/combine-vec-shuffle.ll b/test/CodeGen/X86/combine-vec-shuffle.ll
new file mode 100644
index 000000000000..9e6ab892713b
--- /dev/null
+++ b/test/CodeGen/X86/combine-vec-shuffle.ll
@@ -0,0 +1,253 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Verify that the DAGCombiner correctly folds according to the following rules:
+
+; fold (AND (shuf (A, C), shuf (B, C)) -> shuf (AND (A, B), C)
+; fold (OR (shuf (A, C), shuf (B, C)) -> shuf (OR (A, B), C)
+; fold (XOR (shuf (A, C), shuf (B, C)) -> shuf (XOR (A, B), V_0)
+
+; fold (AND (shuf (C, A), shuf (C, B)) -> shuf (C, AND (A, B))
+; fold (OR (shuf (C, A), shuf (C, B)) -> shuf (C, OR (A, B))
+; fold (XOR (shuf (C, A), shuf (C, B)) -> shuf (V_0, XOR (A, B))
+
+
+
+define <4 x i32> @test1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
+ %and = and <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %and
+}
+; CHECK-LABEL: test1
+; CHECK-NOT: pshufd
+; CHECK: pand
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test2
+; CHECK-NOT: pshufd
+; CHECK: por
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
+ %xor = xor <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %xor
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: pshufd
+; CHECK: pxor
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 4, i32 6, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 4, i32 6, i32 5, i32 7>
+ %and = and <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %and
+}
+; CHECK-LABEL: test4
+; CHECK-NOT: pshufd
+; CHECK: pand
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test5(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 4, i32 6, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 4, i32 6, i32 5, i32 7>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test5
+; CHECK-NOT: pshufd
+; CHECK: por
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 4, i32 6, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 4, i32 6, i32 5, i32 7>
+ %xor = xor <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %xor
+}
+; CHECK-LABEL: test6
+; CHECK-NOT: pshufd
+; CHECK: pxor
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+
+
+; Verify that DAGCombiner moves the shuffle after the xor/and/or even if shuffles
+; are not performing a swizzle operations.
+
+define <4 x i32> @test1b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %and = and <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %and
+}
+; CHECK-LABEL: test1b
+; CHECK-NOT: blendps
+; CHECK: andps
+; CHECK-NEXT: blendps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test2b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test2b
+; CHECK-NOT: blendps
+; CHECK: orps
+; CHECK-NEXT: blendps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test3b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %xor = xor <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %xor
+}
+; CHECK-LABEL: test3b
+; CHECK-NOT: blendps
+; CHECK: xorps
+; CHECK-NEXT: xorps
+; CHECK-NEXT: blendps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %and = and <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %and
+}
+; CHECK-LABEL: test4b
+; CHECK-NOT: blendps
+; CHECK: andps
+; CHECK-NEXT: blendps
+; CHECK: ret
+
+
+define <4 x i32> @test5b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test5b
+; CHECK-NOT: blendps
+; CHECK: orps
+; CHECK-NEXT: blendps
+; CHECK: ret
+
+
+define <4 x i32> @test6b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 5, i32 2, i32 7>
+ %xor = xor <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %xor
+}
+; CHECK-LABEL: test6b
+; CHECK-NOT: blendps
+; CHECK: xorps
+; CHECK-NEXT: xorps
+; CHECK-NEXT: blendps
+; CHECK: ret
+
+define <4 x i32> @test1c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %and = and <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %and
+}
+; CHECK-LABEL: test1c
+; CHECK-NOT: shufps
+; CHECK: andps
+; CHECK-NEXT: shufps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test2c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test2c
+; CHECK-NOT: shufps
+; CHECK: orps
+; CHECK-NEXT: shufps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test3c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %xor = xor <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %xor
+}
+; CHECK-LABEL: test3c
+; CHECK-NOT: shufps
+; CHECK: xorps
+; CHECK-NEXT: xorps
+; CHECK-NEXT: shufps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %and = and <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %and
+}
+; CHECK-LABEL: test4c
+; CHECK-NOT: shufps
+; CHECK: andps
+; CHECK-NEXT: shufps
+; CHECK: ret
+
+
+define <4 x i32> @test5c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %or = or <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %or
+}
+; CHECK-LABEL: test5c
+; CHECK-NOT: shufps
+; CHECK: orps
+; CHECK-NEXT: shufps
+; CHECK: ret
+
+
+define <4 x i32> @test6c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+ %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
+ %xor = xor <4 x i32> %shuf1, %shuf2
+ ret <4 x i32> %xor
+}
+; CHECK-LABEL: test6c
+; CHECK-NOT: shufps
+; CHECK: xorps
+; CHECK-NEXT: xorps
+; CHECK-NEXT: shufps
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/computeKnownBits_urem.ll b/test/CodeGen/X86/computeKnownBits_urem.ll
new file mode 100644
index 000000000000..9902e6f2597b
--- /dev/null
+++ b/test/CodeGen/X86/computeKnownBits_urem.ll
@@ -0,0 +1,14 @@
+; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
+define i32 @main() #0 {
+entry:
+ %a = alloca i32, align 4
+ store i32 1, i32* %a, align 4
+ %0 = load i32* %a, align 4
+ %or = or i32 1, %0
+ %and = and i32 1, %or
+ %rem = urem i32 %and, 1
+ %add = add i32 %rem, 1
+ ret i32 %add
+}
+; CHECK: $1, %eax
+; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/const-base-addr.ll b/test/CodeGen/X86/const-base-addr.ll
new file mode 100644
index 000000000000..f859d7fafff3
--- /dev/null
+++ b/test/CodeGen/X86/const-base-addr.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%T = type { i32, i32, i32, i32 }
+
+define i32 @test1() nounwind {
+; CHECK-LABEL: test1
+; CHECK: movabsq $123456789012345678, %rcx
+; CHECK-NEXT: movl 4(%rcx), %eax
+; CHECK-NEXT: addl 8(%rcx), %eax
+; CHECK-NEXT: addl 12(%rcx), %eax
+ %addr1 = getelementptr %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 1
+ %tmp1 = load i32* %addr1
+ %addr2 = getelementptr %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 2
+ %tmp2 = load i32* %addr2
+ %addr3 = getelementptr %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 3
+ %tmp3 = load i32* %addr3
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+}
+
diff --git a/test/CodeGen/X86/constant-hoisting-shift-immediate.ll b/test/CodeGen/X86/constant-hoisting-shift-immediate.ll
new file mode 100644
index 000000000000..883be355bd36
--- /dev/null
+++ b/test/CodeGen/X86/constant-hoisting-shift-immediate.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -O3 -march=x86-64 |FileCheck %s
+define i64 @foo(i1 %z, i192* %p, i192* %q)
+{
+; If const 128 is hoisted to a variable, then in basic block L_val2 we would
+; have %lshr2 = lshr i192 %data2, %const, and the definition of %const would
+; be in another basic block. As a result, a very inefficient code might be
+; produced. Here we check that this doesn't occur.
+entry:
+ %data1 = load i192* %p, align 8
+ %lshr1 = lshr i192 %data1, 128
+ %val1 = trunc i192 %lshr1 to i64
+ br i1 %z, label %End, label %L_val2
+
+; CHECK: movq 16(%rdx), %rax
+; CHECK-NEXT: retq
+L_val2:
+ %data2 = load i192* %q, align 8
+ %lshr2 = lshr i192 %data2, 128
+ %val2 = trunc i192 %lshr2 to i64
+ br label %End
+
+End:
+ %p1 = phi i64 [%val1,%entry], [%val2,%L_val2]
+ ret i64 %p1
+}
diff --git a/test/CodeGen/X86/constant-pool-remat-0.ll b/test/CodeGen/X86/constant-pool-remat-0.ll
index 4a0110896ced..e42a87c6acde 100644
--- a/test/CodeGen/X86/constant-pool-remat-0.ll
+++ b/test/CodeGen/X86/constant-pool-remat-0.ll
@@ -1,7 +1,7 @@
; REQUIRES: asserts
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-linux -regalloc=greedy | FileCheck %s
-; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-linux -mattr=+sse2 | FileCheck %s
; CHECK: LCPI
; CHECK: LCPI
; CHECK: LCPI
diff --git a/test/CodeGen/X86/constant-pool-sharing.ll b/test/CodeGen/X86/constant-pool-sharing.ll
index 26318dd6c558..3682165e3a25 100644
--- a/test/CodeGen/X86/constant-pool-sharing.ll
+++ b/test/CodeGen/X86/constant-pool-sharing.ll
@@ -1,12 +1,13 @@
-; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s --check-prefix=COMMON --check-prefix=LINUX
+; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=corei7 | FileCheck %s --check-prefix=COMMON --check-prefix=MSVC
; llc should share constant pool entries between this integer vector
; and this floating-point vector since they have the same encoding.
-; CHECK: LCPI0_0(%rip), %xmm0
-; CHECK: movaps %xmm0, ({{%rdi|%rcx}})
-; CHECK: movaps %xmm0, ({{%rsi|%rdx}})
+; LINUX: LCPI0_0(%rip), %xmm0
+; MSVC: __xmm@40000000400000004000000040000000(%rip), %xmm0
+; COMMON: movaps %xmm0, ({{%rdi|%rcx}})
+; COMMON: movaps %xmm0, ({{%rsi|%rdx}})
define void @foo(<4 x i32>* %p, <4 x float>* %q, i1 %t) nounwind {
entry:
diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll
index 051150e227aa..ee73377dffde 100644
--- a/test/CodeGen/X86/crash.ll
+++ b/test/CodeGen/X86/crash.ll
@@ -1,7 +1,7 @@
; REQUIRES: asserts
-; RUN: llc -march=x86 < %s -verify-machineinstrs -precompute-phys-liveness
-; RUN: llc -march=x86-64 < %s -verify-machineinstrs -precompute-phys-liveness
-
+; RUN: llc -march=x86 -no-integrated-as < %s -verify-machineinstrs -precompute-phys-liveness
+; RUN: llc -march=x86-64 -no-integrated-as < %s -verify-machineinstrs -precompute-phys-liveness
+
; PR6497
; Chain and flag folding issues.
diff --git a/test/CodeGen/X86/cse-add-with-overflow.ll b/test/CodeGen/X86/cse-add-with-overflow.ll
new file mode 100644
index 000000000000..1fcc03f117d3
--- /dev/null
+++ b/test/CodeGen/X86/cse-add-with-overflow.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=x86_64-darwin -mcpu=generic | FileCheck %s
+; XFAIL: *
+; rdar:15661073 simple example of redundant adds
+;
+; MachineCSE should coalesce trivial subregister copies.
+;
+; The extra movl+addl should be removed during MachineCSE.
+; CHECK-LABEL: redundantadd
+; CHECK: cmpq
+; CHECK: movq
+; CHECK-NOT: movl
+; CHECK: addl
+; CHECK-NOT: addl
+; CHECK: ret
+
+define i64 @redundantadd(i64* %a0, i64* %a1) {
+entry:
+ %tmp8 = load i64* %a0, align 8
+ %tmp12 = load i64* %a1, align 8
+ %tmp13 = icmp ult i64 %tmp12, -281474976710656
+ br i1 %tmp13, label %exit1, label %body
+
+exit1:
+ unreachable
+
+body:
+ %tmp14 = trunc i64 %tmp8 to i32
+ %tmp15 = trunc i64 %tmp12 to i32
+ %tmp16 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %tmp14, i32 %tmp15)
+ %tmp17 = extractvalue { i32, i1 } %tmp16, 1
+ br i1 %tmp17, label %exit2, label %return
+
+exit2:
+ unreachable
+
+return:
+ %tmp18 = add i64 %tmp12, %tmp8
+ %tmp19 = and i64 %tmp18, 4294967295
+ %tmp20 = or i64 %tmp19, -281474976710656
+ ret i64 %tmp20
+}
+
+declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
diff --git a/test/CodeGen/X86/ctpop-combine.ll b/test/CodeGen/X86/ctpop-combine.ll
index 786f7f9b1cc8..463505bd95d9 100644
--- a/test/CodeGen/X86/ctpop-combine.ll
+++ b/test/CodeGen/X86/ctpop-combine.ll
@@ -35,6 +35,6 @@ define i32 @test3(i64 %x) nounwind readnone {
%conv = zext i1 %cmp to i32
ret i32 %conv
; CHECK-LABEL: test3:
-; CHECK: cmpb $2
+; CHECK: cmpl $2
; CHECK: ret
}
diff --git a/test/CodeGen/X86/cvt16.ll b/test/CodeGen/X86/cvt16.ll
new file mode 100644
index 000000000000..4d920e2d23d2
--- /dev/null
+++ b/test/CodeGen/X86/cvt16.ll
@@ -0,0 +1,89 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=-f16c | FileCheck %s -check-prefix=CHECK -check-prefix=LIBCALL
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+f16c | FileCheck %s -check-prefix=CHECK -check-prefix=F16C
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -soft-float=1 -mattr=-f16c | FileCheck %s -check-prefix=CHECK -check-prefix=SOFTFLOAT
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -soft-float=1 -mattr=+f16c | FileCheck %s -check-prefix=CHECK -check-prefix=SOFTFLOAT
+
+; This is a test for float to half float conversions on x86-64.
+;
+; If flag -soft-float is set, or if there is no F16C support, then:
+; 1) half float to float conversions are
+; translated into calls to __gnu_h2f_ieee defined
+; by the compiler runtime library;
+; 2) float to half float conversions are translated into calls
+; to __gnu_f2h_ieee which expected to be defined by the
+; compiler runtime library.
+;
+; Otherwise (we have F16C support):
+; 1) half float to float conversion are translated using
+; vcvtph2ps instructions;
+; 2) float to half float conversions are translated using
+; vcvtps2ph instructions
+
+
+define void @test1(float %src, i16* %dest) {
+ %1 = tail call i16 @llvm.convert.to.fp16.f32(float %src)
+ store i16 %1, i16* %dest, align 2
+ ret void
+}
+; CHECK-LABEL: test1
+; LIBCALL: callq __gnu_f2h_ieee
+; SOFTFLOAT: callq __gnu_f2h_ieee
+; F16C: vcvtps2ph
+; CHECK: ret
+
+
+define float @test2(i16* nocapture %src) {
+ %1 = load i16* %src, align 2
+ %2 = tail call float @llvm.convert.from.fp16.f32(i16 %1)
+ ret float %2
+}
+; CHECK-LABEL: test2:
+; LIBCALL: jmp __gnu_h2f_ieee
+; SOFTFLOAT: callq __gnu_h2f_ieee
+; F16C: vcvtph2ps
+; F16C: ret
+
+
+define float @test3(float %src) nounwind uwtable readnone {
+ %1 = tail call i16 @llvm.convert.to.fp16.f32(float %src)
+ %2 = tail call float @llvm.convert.from.fp16.f32(i16 %1)
+ ret float %2
+}
+
+; CHECK-LABEL: test3:
+; LIBCALL: callq __gnu_f2h_ieee
+; LIBCALL: jmp __gnu_h2f_ieee
+; SOFTFLOAT: callq __gnu_f2h_ieee
+; SOFTFLOAT: callq __gnu_h2f_ieee
+; F16C: vcvtps2ph
+; F16C-NEXT: vcvtph2ps
+; F16C: ret
+
+define double @test4(i16* nocapture %src) {
+ %1 = load i16* %src, align 2
+ %2 = tail call double @llvm.convert.from.fp16.f64(i16 %1)
+ ret double %2
+}
+; CHECK-LABEL: test4:
+; LIBCALL: callq __gnu_h2f_ieee
+; LIBCALL: cvtss2sd
+; SOFTFLOAT: callq __gnu_h2f_ieee
+; SOFTFLOAT: callq __extendsfdf2
+; F16C: vcvtph2ps
+; F16C: vcvtss2sd
+; F16C: ret
+
+
+define i16 @test5(double %src) {
+ %val = tail call i16 @llvm.convert.to.fp16.f64(double %src)
+ ret i16 %val
+}
+; CHECK-LABEL: test5:
+; LIBCALL: jmp __truncdfhf2
+; SOFTFLOAT: callq __truncdfhf2
+; F16C: jmp __truncdfhf2
+
+declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
+declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
+declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
+declare i16 @llvm.convert.to.fp16.f64(double) nounwind readnone
diff --git a/test/CodeGen/X86/dagcombine-and-setcc.ll b/test/CodeGen/X86/dagcombine-and-setcc.ll
new file mode 100644
index 000000000000..e7336a90dbdd
--- /dev/null
+++ b/test/CodeGen/X86/dagcombine-and-setcc.ll
@@ -0,0 +1,47 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+; Function Attrs: nounwind
+declare i32 @printf(i8* nocapture readonly, ...)
+
+; On X86 1 is true and 0 is false, so we can't perform the combine:
+; (and (setgt X, true), (setgt Y, true)) -> (setgt (or X, Y), true)
+; This combine only works if the true value is -1.
+
+
+;CHECK: cmpl
+;CHECK: setg
+;CHECK: cmpl
+;CHECK: setg
+;CHECK: andb
+
+@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+; Function Attrs: optsize ssp uwtable
+define i32 @foo(i32 %a, i32 %b, i32 * %c) {
+if.else429:
+ %cmp.i1144 = icmp eq i32* %c, null
+ %cmp430 = icmp slt i32 %a, 2
+ %cmp432 = icmp slt i32 %b, 2
+ %or.cond710 = or i1 %cmp430, %cmp432
+ %or.cond710.not = xor i1 %or.cond710, true
+ %brmerge1448 = or i1 %cmp.i1144, %or.cond710.not
+ br i1 %brmerge1448, label %ret1, label %ret2
+
+ret1:
+ ret i32 0
+
+ret2:
+ ret i32 1
+}
+
+define i32 @main(i32 %argc, i8** nocapture readnone %argv) {
+ %res = alloca i32, align 4
+ %t = call i32 @foo(i32 1, i32 2, i32* %res) #3
+ %v = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %t)
+ ret i32 0
+}
+
+
+
diff --git a/test/CodeGen/X86/darwin-no-dead-strip.ll b/test/CodeGen/X86/darwin-no-dead-strip.ll
index 452d1f8ce392..35196aa5f8dc 100644
--- a/test/CodeGen/X86/darwin-no-dead-strip.ll
+++ b/test/CodeGen/X86/darwin-no-dead-strip.ll
@@ -1,7 +1,13 @@
-; RUN: llc < %s | grep no_dead_strip
+; RUN: llc < %s | FileCheck %s
target datalayout = "e-p:32:32"
target triple = "i686-apple-darwin8.7.2"
-@x = weak global i32 0 ; <i32*> [#uses=1]
-@llvm.used = appending global [1 x i8*] [ i8* bitcast (i32* @x to i8*) ] ; <[1 x i8*]*> [#uses=0]
+@x = weak global i32 0
+; CHECK: .no_dead_strip _x
+
+@"\01Ly" = private global i8 0
+; CHECK: no_dead_strip Ly
+
+@llvm.used = appending global [2 x i8*] [ i8* bitcast (i32* @x to i8*),
+ i8* @"\01Ly" ]
diff --git a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
new file mode 100644
index 000000000000..23f83352eb2e
--- /dev/null
+++ b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
@@ -0,0 +1,109 @@
+; RUN: llc -march=x86-64 -mtriple=x86_64-linux < %s | FileCheck %s
+; RUN: opt -strip-debug < %s | llc -march=x86-64 -mtriple=x86_64-linux | FileCheck %s
+; http://llvm.org/PR19051. Minor code-motion difference with -g.
+; Presence of debug info shouldn't affect the codegen. Make sure that
+; we generated the same code sequence with and without debug info.
+;
+; CHECK: callq _Z3fooPcjPKc
+; CHECK: callq _Z3fooPcjPKc
+; CHECK: leaq (%rsp), %rdi
+; CHECK: movl $4, %esi
+; CHECK: testl {{%[a-z]+}}, {{%[a-z]+}}
+; CHECK: je .LBB0_4
+
+; Regenerate test with this command:
+; clang -emit-llvm -S -O2 -g
+; from this source:
+;
+; extern void foo(char *dst,unsigned siz,const char *src);
+; extern const char * i2str(int);
+;
+; struct AAA3 {
+; AAA3(const char *value) { foo(text,sizeof(text),value);}
+; void operator=(const char *value) { foo(text,sizeof(text),value);}
+; operator const char*() const { return text;}
+; char text[4];
+; };
+;
+; void bar (int param1,int param2) {
+; const char * temp(0);
+;
+; if (param2) {
+; temp = i2str(param2);
+; }
+; AAA3 var1("");
+; AAA3 var2("");
+;
+; if (param1)
+; var2 = "+";
+; else
+; var2 = "-";
+; var1 = "";
+; }
+
+%struct.AAA3 = type { [4 x i8] }
+
+@.str = private unnamed_addr constant [1 x i8] zeroinitializer, align 1
+@.str1 = private unnamed_addr constant [2 x i8] c"+\00", align 1
+@.str2 = private unnamed_addr constant [2 x i8] c"-\00", align 1
+
+; Function Attrs: uwtable
+define void @_Z3barii(i32 %param1, i32 %param2) #0 {
+entry:
+ %var1 = alloca %struct.AAA3, align 1
+ %var2 = alloca %struct.AAA3, align 1
+ %tobool = icmp eq i32 %param2, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %call = call i8* @_Z5i2stri(i32 %param2)
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !60)
+ call void @llvm.dbg.value(metadata !62, i64 0, metadata !63)
+ %arraydecay.i = getelementptr inbounds %struct.AAA3* %var1, i64 0, i32 0, i64 0
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
+ call void @llvm.dbg.declare(metadata !{%struct.AAA3* %var2}, metadata !38)
+ %arraydecay.i5 = getelementptr inbounds %struct.AAA3* %var2, i64 0, i32 0, i64 0
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
+ %tobool1 = icmp eq i32 %param1, 0
+ br i1 %tobool1, label %if.else, label %if.then2
+
+if.then2: ; preds = %if.end
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0))
+ br label %if.end3
+
+if.else: ; preds = %if.end
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0))
+ br label %if.end3
+
+if.end3: ; preds = %if.else, %if.then2
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+declare i8* @_Z5i2stri(i32) #2
+
+declare void @_Z3fooPcjPKc(i8*, i32, i8*) #2
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.module.flags = !{!48, !49}
+!llvm.ident = !{!50}
+
+!38 = metadata !{i32 786688, null, metadata !"var2", null, i32 20, null, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [var2] [line 20]
+!48 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!50 = metadata !{metadata !"clang version 3.5 (202418)"}
+!60 = metadata !{i32 786689, null, metadata !"this", null, i32 16777216, null, i32 1088, null} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!62 = metadata !{i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)}
+!63 = metadata !{i32 786689, null, metadata !"value", null, i32 33554439, null, i32 0, null} ; [ DW_TAG_arg_variable ] [value] [line 7]
diff --git a/test/CodeGen/X86/dbg-changes-codegen.ll b/test/CodeGen/X86/dbg-changes-codegen.ll
new file mode 100644
index 000000000000..0b17c455408b
--- /dev/null
+++ b/test/CodeGen/X86/dbg-changes-codegen.ll
@@ -0,0 +1,83 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux | FileCheck %s
+
+; The Peephole optimizer should fold the load into the cmp even with debug info.
+; CHECK-LABEL: _ZN3Foo3batEv
+; CHECK-NOT: movq pfoo
+; CHECK: cmpq {{%[a-z]+}}, pfoo(%rip)
+;
+; CHECK-LABEL: _Z3bazv
+; CHECK-NOT: movq wibble2
+; CHECK: cmpq {{%[a-z]+}}, wibble2(%rip)
+
+; Regenerate test with this command:
+; clang -emit-llvm -S -O2 -g
+; from this source:
+; struct Foo {
+; bool bat();
+; bool operator==(Foo &arg) { return (this == &arg); }
+; };
+; Foo *pfoo;
+; bool Foo::bat() { return (*this == *pfoo); }
+;
+; struct Wibble {
+; int x;
+; } *wibble1, *wibble2;
+; struct Flibble {
+; void bar(Wibble *c) {
+; if (c < wibble2)
+; wibble2 = 0;
+; c->x = 0;
+; }
+; } flibble;
+; void baz() { flibble.bar(wibble1); }
+
+%struct.Foo = type { i8 }
+%struct.Wibble = type { i32 }
+%struct.Flibble = type { i8 }
+
+@pfoo = global %struct.Foo* null, align 8
+@wibble1 = global %struct.Wibble* null, align 8
+@wibble2 = global %struct.Wibble* null, align 8
+@flibble = global %struct.Flibble zeroinitializer, align 1
+
+; Function Attrs: nounwind readonly uwtable
+define zeroext i1 @_ZN3Foo3batEv(%struct.Foo* %this) #0 align 2 {
+entry:
+ %0 = load %struct.Foo** @pfoo, align 8
+ tail call void @llvm.dbg.value(metadata !{%struct.Foo* %0}, i64 0, metadata !62)
+ %cmp.i = icmp eq %struct.Foo* %0, %this
+ ret i1 %cmp.i
+}
+
+; Function Attrs: nounwind uwtable
+define void @_Z3bazv() #1 {
+entry:
+ %0 = load %struct.Wibble** @wibble1, align 8
+ tail call void @llvm.dbg.value(metadata !64, i64 0, metadata !65)
+ %1 = load %struct.Wibble** @wibble2, align 8
+ %cmp.i = icmp ugt %struct.Wibble* %1, %0
+ br i1 %cmp.i, label %if.then.i, label %_ZN7Flibble3barEP6Wibble.exit
+
+if.then.i: ; preds = %entry
+ store %struct.Wibble* null, %struct.Wibble** @wibble2, align 8
+ br label %_ZN7Flibble3barEP6Wibble.exit
+
+_ZN7Flibble3barEP6Wibble.exit: ; preds = %entry, %if.then.i
+ %x.i = getelementptr inbounds %struct.Wibble* %0, i64 0, i32 0
+ store i32 0, i32* %x.i, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
+
+attributes #0 = { nounwind readonly uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+
+
+!17 = metadata !{i32 786448, null, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, null} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from Foo]
+!45 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from Flibble]
+!62 = metadata !{i32 786689, null, metadata !"arg", null, i32 33554436, metadata !17, i32 0, null} ; [ DW_TAG_arg_variable ] [arg] [line 4]
+!64 = metadata !{%struct.Flibble* undef}
+!65 = metadata !{i32 786689, null, metadata !"this", null, i32 16777229, metadata !45, i32 1088, null} ; [ DW_TAG_arg_variable ] [this] [line 13]
diff --git a/test/CodeGen/X86/divide-by-constant.ll b/test/CodeGen/X86/divide-by-constant.ll
index 98ae1d51db21..21225e340826 100644
--- a/test/CodeGen/X86/divide-by-constant.ll
+++ b/test/CodeGen/X86/divide-by-constant.ll
@@ -7,7 +7,7 @@ entry:
%div = udiv i16 %x, 33
ret i16 %div
; CHECK-LABEL: test1:
-; CHECK: imull $63551, %eax, %eax
+; CHECK: imull $63551, %eax
; CHECK-NEXT: shrl $21, %eax
; CHECK-NEXT: ret
}
@@ -18,7 +18,7 @@ entry:
ret i16 %div
; CHECK-LABEL: test2:
-; CHECK: imull $43691, %eax, %eax
+; CHECK: imull $43691, %eax
; CHECK-NEXT: shrl $17, %eax
; CHECK-NEXT: ret
}
@@ -30,7 +30,7 @@ entry:
; CHECK-LABEL: test3:
; CHECK: movzbl 8(%esp), %eax
-; CHECK-NEXT: imull $171, %eax, %eax
+; CHECK-NEXT: imull $171, %eax
; CHECK-NEXT: shrl $9, %eax
; CHECK-NEXT: ret
}
@@ -40,7 +40,7 @@ entry:
%div = sdiv i16 %x, 33 ; <i32> [#uses=1]
ret i16 %div
; CHECK-LABEL: test4:
-; CHECK: imull $1986, %eax, %
+; CHECK: imull $1986, %eax
}
define i32 @test5(i32 %A) nounwind {
diff --git a/test/CodeGen/X86/dll-linkage.ll b/test/CodeGen/X86/dll-linkage.ll
deleted file mode 100644
index a0c2a54a99a4..000000000000
--- a/test/CodeGen/X86/dll-linkage.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=i386-pc-mingw32 | FileCheck %s
-
-; RUN: llc < %s -mtriple=i386-pc-mingw32 -O0 | FileCheck %s -check-prefix=FAST
-; PR6275
-
-declare dllimport void @foo()
-
-define void @bar() nounwind {
-; CHECK: calll *__imp__foo
-; FAST: movl __imp__foo, [[R:%[a-z]{3}]]
-; FAST: calll *[[R]]
- call void @foo()
- ret void
-}
diff --git a/test/CodeGen/X86/dllexport-x86_64.ll b/test/CodeGen/X86/dllexport-x86_64.ll
new file mode 100644
index 000000000000..0d5afa1b1384
--- /dev/null
+++ b/test/CodeGen/X86/dllexport-x86_64.ll
@@ -0,0 +1,108 @@
+; RUN: llc -mtriple x86_64-pc-win32 < %s | FileCheck -check-prefix=CHECK -check-prefix=WIN32 %s
+; RUN: llc -mtriple x86_64-pc-mingw32 < %s | FileCheck -check-prefix=CHECK -check-prefix=MINGW %s
+
+; CHECK: .text
+
+define void @notExported() {
+ ret void
+}
+
+; CHECK: .globl f1
+define dllexport void @f1() {
+ ret void
+}
+
+; CHECK: .globl f2
+define dllexport void @f2() unnamed_addr {
+ ret void
+}
+
+; CHECK: .section .text,"xr",discard,lnk1
+; CHECK: .globl lnk1
+define linkonce_odr dllexport void @lnk1() {
+ ret void
+}
+
+; CHECK: .section .text,"xr",discard,lnk2
+; CHECK: .globl lnk2
+define linkonce_odr dllexport void @lnk2() alwaysinline {
+ ret void
+}
+
+; CHECK: .section .text,"xr",discard,weak1
+; CHECK: .globl weak1
+define weak_odr dllexport void @weak1() {
+ ret void
+}
+
+
+; CHECK: .data
+; CHECK: .globl Var1
+@Var1 = dllexport global i32 1, align 4
+
+; CHECK: .rdata,"rd"
+; CHECK: .globl Var2
+@Var2 = dllexport unnamed_addr constant i32 1
+
+; CHECK: .comm Var3
+@Var3 = common dllexport global i32 0, align 4
+
+; CHECK: .section .data,"wd",discard,WeakVar1
+; CHECK: .globl WeakVar1
+@WeakVar1 = weak_odr dllexport global i32 1, align 4
+
+; CHECK: .section .rdata,"rd",discard,WeakVar2
+; CHECK: .globl WeakVar2
+@WeakVar2 = weak_odr dllexport unnamed_addr constant i32 1
+
+
+; CHECK: .globl alias
+; CHECK: alias = notExported
+@alias = dllexport alias void()* @notExported
+
+; CHECK: .globl alias2
+; CHECK: alias2 = f1
+@alias2 = dllexport alias void()* @f1
+
+; CHECK: .globl alias3
+; CHECK: alias3 = notExported
+@alias3 = dllexport alias void()* @notExported
+
+; CHECK: .weak weak_alias
+; CHECK: weak_alias = f1
+@weak_alias = dllexport alias weak_odr void()* @f1
+
+@blob = global [6 x i8] c"\B8*\00\00\00\C3", section ".text", align 16
+@blob_alias = dllexport alias bitcast ([6 x i8]* @blob to i32 ()*)
+
+; CHECK: .section .drectve
+; WIN32: " /EXPORT:Var1,DATA"
+; WIN32: " /EXPORT:Var2,DATA"
+; WIN32: " /EXPORT:Var3,DATA"
+; WIN32: " /EXPORT:WeakVar1,DATA"
+; WIN32: " /EXPORT:WeakVar2,DATA"
+; WIN32: " /EXPORT:f1"
+; WIN32: " /EXPORT:f2"
+; WIN32: " /EXPORT:lnk1"
+; WIN32: " /EXPORT:lnk2"
+; WIN32: " /EXPORT:weak1"
+; WIN32: " /EXPORT:alias"
+; WIN32: " /EXPORT:alias2"
+; WIN32: " /EXPORT:alias3"
+; WIN32: " /EXPORT:weak_alias"
+; WIN32: " /EXPORT:blob_alias"
+; MINGW: " -export:Var1,data"
+; MINGW: " -export:Var2,data"
+; MINGW: " -export:Var3,data"
+; MINGW: " -export:WeakVar1,data"
+; MINGW: " -export:WeakVar2,data"
+; MINGW: " -export:f1"
+; MINGW: " -export:f2"
+; MINGW: " -export:lnk1"
+; MINGW: " -export:lnk2"
+; MINGW: " -export:weak1"
+; MINGW: " -export:alias"
+; MINGW: " -export:alias2"
+; MINGW: " -export:alias3"
+; MINGW: " -export:weak_alias"
+; MINGW: " -export:blob_alias"
diff --git a/test/CodeGen/X86/dllexport.ll b/test/CodeGen/X86/dllexport.ll
index bf57e78f35d4..e2c3f131ee06 100644
--- a/test/CodeGen/X86/dllexport.ll
+++ b/test/CodeGen/X86/dllexport.ll
@@ -1,12 +1,130 @@
-; RUN: llc < %s | FileCheck %s
-; PR2936
+; RUN: llc -mtriple i386-pc-win32 < %s \
+; RUN: | FileCheck -check-prefix CHECK -check-prefix CHECK-CL %s
+; RUN: llc -mtriple i386-pc-mingw32 < %s \
+; RUN: | FileCheck -check-prefix CHECK -check-prefix CHECK-GCC %s
+; RUN: llc -mtriple i686-pc-cygwin %s -o - \
+; RUN: | FileCheck -check-prefix CHECK -check-prefix CHECK-GCC %s
-target triple = "i386-pc-mingw32"
+; CHECK: .text
-define dllexport x86_fastcallcc i32 @foo() nounwind {
-entry:
+define void @notExported() {
+ ret void
+}
+
+; CHECK: .globl _f1
+define dllexport void @f1() {
+ ret void
+}
+
+; CHECK: .globl _f2
+define dllexport void @f2() unnamed_addr {
+ ret void
+}
+
+; CHECK: .globl _stdfun@0
+define dllexport x86_stdcallcc void @stdfun() nounwind {
+ ret void
+}
+
+; CHECK: .globl @fastfun@0
+define dllexport x86_fastcallcc i32 @fastfun() nounwind {
ret i32 0
}
+; CHECK: .globl _thisfun
+define dllexport x86_thiscallcc void @thisfun() nounwind {
+ ret void
+}
+
+; CHECK: .section .text,"xr",discard,_lnk1
+; CHECK: .globl _lnk1
+define linkonce_odr dllexport void @lnk1() {
+ ret void
+}
+
+; CHECK: .section .text,"xr",discard,_lnk2
+; CHECK: .globl _lnk2
+define linkonce_odr dllexport void @lnk2() alwaysinline {
+ ret void
+}
+
+; CHECK: .section .text,"xr",discard,_weak1
+; CHECK: .globl _weak1
+define weak_odr dllexport void @weak1() {
+ ret void
+}
+
+
+; CHECK: .data
+; CHECK: .globl _Var1
+@Var1 = dllexport global i32 1, align 4
+
+; CHECK: .rdata,"rd"
+; CHECK: .globl _Var2
+@Var2 = dllexport unnamed_addr constant i32 1
+
+; CHECK: .comm _Var3
+@Var3 = common dllexport global i32 0, align 4
+
+; CHECK: .section .data,"wd",discard,_WeakVar1
+; CHECK: .globl _WeakVar1
+@WeakVar1 = weak_odr dllexport global i32 1, align 4
+
+; CHECK: .section .rdata,"rd",discard,_WeakVar2
+; CHECK: .globl _WeakVar2
+@WeakVar2 = weak_odr dllexport unnamed_addr constant i32 1
+
+
+; CHECK: .globl _alias
+; CHECK: _alias = _notExported
+@alias = dllexport alias void()* @notExported
+
+; CHECK: .globl _alias2
+; CHECK: _alias2 = _f1
+@alias2 = dllexport alias void()* @f1
+
+; CHECK: .globl _alias3
+; CHECK: _alias3 = _notExported
+@alias3 = dllexport alias void()* @notExported
+
+; CHECK: .weak _weak_alias
+; CHECK: _weak_alias = _f1
+@weak_alias = dllexport alias weak_odr void()* @f1
+
+
; CHECK: .section .drectve
-; CHECK: -export:@foo@0
+; CHECK-CL: " /EXPORT:_Var1,DATA"
+; CHECK-CL: " /EXPORT:_Var2,DATA"
+; CHECK-CL: " /EXPORT:_Var3,DATA"
+; CHECK-CL: " /EXPORT:_WeakVar1,DATA"
+; CHECK-CL: " /EXPORT:_WeakVar2,DATA"
+; CHECK-CL: " /EXPORT:_f1"
+; CHECK-CL: " /EXPORT:_f2"
+; CHECK-CL: " /EXPORT:_stdfun@0"
+; CHECK-CL: " /EXPORT:@fastfun@0"
+; CHECK-CL: " /EXPORT:_thisfun"
+; CHECK-CL: " /EXPORT:_lnk1"
+; CHECK-CL: " /EXPORT:_lnk2"
+; CHECK-CL: " /EXPORT:_weak1"
+; CHECK-CL: " /EXPORT:_alias"
+; CHECK-CL: " /EXPORT:_alias2"
+; CHECK-CL: " /EXPORT:_alias3"
+; CHECK-CL: " /EXPORT:_weak_alias"
+; CHECK-GCC: " -export:Var1,data"
+; CHECK-GCC: " -export:Var2,data"
+; CHECK-GCC: " -export:Var3,data"
+; CHECK-GCC: " -export:WeakVar1,data"
+; CHECK-GCC: " -export:WeakVar2,data"
+; CHECK-GCC: " -export:f1"
+; CHECK-GCC: " -export:f2"
+; CHECK-GCC: " -export:stdfun@0"
+; CHECK-GCC: " -export:@fastfun@0"
+; CHECK-GCC: " -export:thisfun"
+; CHECK-GCC: " -export:lnk1"
+; CHECK-GCC: " -export:lnk2"
+; CHECK-GCC: " -export:weak1"
+; CHECK-GCC: " -export:alias"
+; CHECK-GCC: " -export:alias2"
+; CHECK-GCC: " -export:alias3"
+; CHECK-GCC: " -export:weak_alias"
+
diff --git a/test/CodeGen/X86/dllimport-x86_64.ll b/test/CodeGen/X86/dllimport-x86_64.ll
new file mode 100644
index 000000000000..666409fd4c07
--- /dev/null
+++ b/test/CodeGen/X86/dllimport-x86_64.ll
@@ -0,0 +1,48 @@
+; RUN: llc -mtriple x86_64-pc-win32 < %s | FileCheck %s
+; RUN: llc -mtriple x86_64-pc-mingw32 < %s | FileCheck %s
+;
+; RUN: llc -mtriple x86_64-pc-mingw32 -O0 < %s | FileCheck %s -check-prefix=FAST
+; PR6275
+;
+; RUN: opt -mtriple x86_64-pc-win32 -std-compile-opts -S < %s | FileCheck %s -check-prefix=OPT
+
+@Var1 = external dllimport global i32
+@Var2 = available_externally dllimport unnamed_addr constant i32 1
+
+declare dllimport void @fun()
+
+define available_externally dllimport void @inline1() {
+ ret void
+}
+
+define available_externally dllimport void @inline2() {
+ ret void
+}
+
+declare void @dummy(...)
+
+define void @use() nounwind {
+; CHECK: callq *__imp_fun(%rip)
+; FAST: movq __imp_fun(%rip), [[R:%[a-z]{3}]]
+; FAST-NEXT: callq *[[R]]
+ call void @fun()
+
+; CHECK: callq *__imp_inline1(%rip)
+; CHECK: callq *__imp_inline2(%rip)
+ call void @inline1()
+ call void @inline2()
+
+; available_externally uses go away
+; OPT-NOT: call void @inline1()
+; OPT-NOT: call void @inline2()
+; OPT-NOT: load i32* @Var2
+; OPT: call void (...)* @dummy(i32 %1, i32 1)
+
+; CHECK-DAG: movq __imp_Var1(%rip), [[R1:%[a-z]{3}]]
+; CHECK-DAG: movq __imp_Var2(%rip), [[R2:%[a-z]{3}]]
+ %1 = load i32* @Var1
+ %2 = load i32* @Var2
+ call void(...)* @dummy(i32 %1, i32 %2)
+
+ ret void
+}
diff --git a/test/CodeGen/X86/dllimport.ll b/test/CodeGen/X86/dllimport.ll
new file mode 100644
index 000000000000..695bfce821bb
--- /dev/null
+++ b/test/CodeGen/X86/dllimport.ll
@@ -0,0 +1,59 @@
+; RUN: llc -mtriple i386-pc-win32 < %s | FileCheck %s
+; RUN: llc -mtriple i386-pc-mingw32 < %s | FileCheck %s
+;
+; RUN: llc -mtriple i386-pc-mingw32 -O0 < %s | FileCheck %s -check-prefix=FAST
+; PR6275
+;
+; RUN: opt -mtriple i386-pc-win32 -std-compile-opts -S < %s | FileCheck %s -check-prefix=OPT
+
+@Var1 = external dllimport global i32
+@Var2 = available_externally dllimport unnamed_addr constant i32 1
+
+declare dllimport void @fun()
+
+define available_externally dllimport void @inline1() {
+ ret void
+}
+
+define available_externally dllimport void @inline2() alwaysinline {
+ ret void
+}
+
+declare dllimport x86_stdcallcc void @stdfun() nounwind
+declare dllimport x86_fastcallcc void @fastfun() nounwind
+declare dllimport x86_thiscallcc void @thisfun() nounwind
+
+declare void @dummy(...)
+
+define void @use() nounwind {
+; CHECK: calll *__imp__fun
+; FAST: movl __imp__fun, [[R:%[a-z]{3}]]
+; FAST-NEXT: calll *[[R]]
+ call void @fun()
+
+; CHECK: calll *__imp__inline1
+; CHECK: calll *__imp__inline2
+ call void @inline1()
+ call void @inline2()
+
+; CHECK: calll *__imp__stdfun@0
+; CHECK: calll *__imp_@fastfun@0
+; CHECK: calll *__imp__thisfun
+ call void @stdfun()
+ call void @fastfun()
+ call void @thisfun()
+
+; available_externally uses go away
+; OPT-NOT: call void @inline1()
+; OPT-NOT: call void @inline2()
+; OPT-NOT: load i32* @Var2
+; OPT: call void (...)* @dummy(i32 %1, i32 1)
+
+; CHECK-DAG: movl __imp__Var1, [[R1:%[a-z]{3}]]
+; CHECK-DAG: movl __imp__Var2, [[R2:%[a-z]{3}]]
+ %1 = load i32* @Var1
+ %2 = load i32* @Var2
+ call void(...)* @dummy(i32 %1, i32 %2)
+
+ ret void
+}
diff --git a/test/CodeGen/X86/dwarf-comp-dir.ll b/test/CodeGen/X86/dwarf-comp-dir.ll
index 3b4a8689060d..c8d752771044 100644
--- a/test/CodeGen/X86/dwarf-comp-dir.ll
+++ b/test/CodeGen/X86/dwarf-comp-dir.ll
@@ -7,10 +7,12 @@ target triple = "x86_64-unknown-linux-gnu"
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!5}
-!0 = metadata !{i32 720913, metadata !4, i32 12, metadata !"clang version 3.1 (trunk 143523)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !2, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!2 = metadata !{i32 0}
+!0 = metadata !{i32 720913, metadata !4, i32 12, metadata !"clang version 3.1 (trunk 143523)", i1 true, metadata !"", i32 0, metadata !2, metadata !7, metadata !2, metadata !2, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{}
!3 = metadata !{i32 786473, metadata !4} ; [ DW_TAG_file_type ]
!4 = metadata !{metadata !"empty.c", metadata !"/home/nlewycky"}
+!6 = metadata !{i32 786451, metadata !4, null, metadata !"foo", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, metadata !"_ZTS3foo"} ; [ DW_TAG_structure_type ] [foo] [line 1, size 8, align 8, offset 0] [def] [from ]
+!7 = metadata !{metadata !6}
; The important part of the following check is that dir = #0.
; Dir Mod Time File Len File Name
diff --git a/test/CodeGen/X86/dynamic-alloca-in-entry.ll b/test/CodeGen/X86/dynamic-alloca-in-entry.ll
new file mode 100644
index 000000000000..7ed471c2f502
--- /dev/null
+++ b/test/CodeGen/X86/dynamic-alloca-in-entry.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -mtriple=i686-pc-win32 | FileCheck %s
+
+; Allocas with unknown size in the entry block are dynamic.
+define void @foo(i32 %n) {
+ %m = alloca i32, i32 %n
+ ret void
+}
+; CHECK-LABEL: _foo:
+; CHECK: calll __chkstk
+; CHECK: retl
+
+; Use of inalloca implies that that the alloca is not static.
+define void @bar() {
+ %m = alloca inalloca i32
+ ret void
+}
+; CHECK-LABEL: _bar:
+; CHECK: calll __chkstk
+; CHECK: retl
diff --git a/test/CodeGen/X86/elf-comdat.ll b/test/CodeGen/X86/elf-comdat.ll
new file mode 100644
index 000000000000..c7e6df7d64f0
--- /dev/null
+++ b/test/CodeGen/X86/elf-comdat.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
+
+$f = comdat any
+@v = global i32 0, comdat $f
+define void @f() comdat $f {
+ ret void
+}
+; CHECK: .section .text.f,"axG",@progbits,f,comdat
+; CHECK: .globl f
+; CHECK: .section .bss.v,"aGw",@nobits,f,comdat
+; CHECK: .globl v
diff --git a/test/CodeGen/X86/elf-comdat2.ll b/test/CodeGen/X86/elf-comdat2.ll
new file mode 100644
index 000000000000..209da39ed881
--- /dev/null
+++ b/test/CodeGen/X86/elf-comdat2.ll
@@ -0,0 +1,12 @@
+; RUN: llc -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
+
+$foo = comdat any
+@bar = global i32 42, comdat $foo
+@foo = global i32 42
+
+; CHECK: .type bar,@object
+; CHECK-NEXT: .section .data.bar,"aGw",@progbits,foo,comdat
+; CHECK-NEXT: .globl bar
+; CHECK: .type foo,@object
+; CHECK-NEXT: .data
+; CHECK-NEXT: .globl foo
diff --git a/test/CodeGen/X86/exedepsfix-broadcast.ll b/test/CodeGen/X86/exedepsfix-broadcast.ll
new file mode 100644
index 000000000000..a18f75195631
--- /dev/null
+++ b/test/CodeGen/X86/exedepsfix-broadcast.ll
@@ -0,0 +1,128 @@
+; RUN: llc -O3 -mtriple=x86_64-apple-macosx -o - < %s -mattr=+avx2 -enable-unsafe-fp-math -mcpu=core2 | FileCheck %s
+; Check that the ExeDepsFix pass correctly fixes the domain for broadcast instructions.
+; <rdar://problem/16354675>
+
+; CHECK-LABEL: ExeDepsFix_broadcastss
+; CHECK: broadcastss
+; CHECK: vandps
+; CHECK: vmaxps
+; CHECK: ret
+define <4 x float> @ExeDepsFix_broadcastss(<4 x float> %arg, <4 x float> %arg2) {
+ %bitcast = bitcast <4 x float> %arg to <4 x i32>
+ %and = and <4 x i32> %bitcast, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ %floatcast = bitcast <4 x i32> %and to <4 x float>
+ %max_is_x = fcmp oge <4 x float> %floatcast, %arg2
+ %max = select <4 x i1> %max_is_x, <4 x float> %floatcast, <4 x float> %arg2
+ ret <4 x float> %max
+}
+
+; CHECK-LABEL: ExeDepsFix_broadcastss256
+; CHECK: broadcastss
+; CHECK: vandps
+; CHECK: vmaxps
+; CHECK: ret
+define <8 x float> @ExeDepsFix_broadcastss256(<8 x float> %arg, <8 x float> %arg2) {
+ %bitcast = bitcast <8 x float> %arg to <8 x i32>
+ %and = and <8 x i32> %bitcast, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+ %floatcast = bitcast <8 x i32> %and to <8 x float>
+ %max_is_x = fcmp oge <8 x float> %floatcast, %arg2
+ %max = select <8 x i1> %max_is_x, <8 x float> %floatcast, <8 x float> %arg2
+ ret <8 x float> %max
+}
+
+
+; CHECK-LABEL: ExeDepsFix_broadcastss_inreg
+; CHECK: broadcastss
+; CHECK: vandps
+; CHECK: vmaxps
+; CHECK: ret
+define <4 x float> @ExeDepsFix_broadcastss_inreg(<4 x float> %arg, <4 x float> %arg2, i32 %broadcastvalue) {
+ %bitcast = bitcast <4 x float> %arg to <4 x i32>
+ %in = insertelement <4 x i32> undef, i32 %broadcastvalue, i32 0
+ %mask = shufflevector <4 x i32> %in, <4 x i32> undef, <4 x i32> zeroinitializer
+ %and = and <4 x i32> %bitcast, %mask
+ %floatcast = bitcast <4 x i32> %and to <4 x float>
+ %max_is_x = fcmp oge <4 x float> %floatcast, %arg2
+ %max = select <4 x i1> %max_is_x, <4 x float> %floatcast, <4 x float> %arg2
+ ret <4 x float> %max
+}
+
+; CHECK-LABEL: ExeDepsFix_broadcastss256_inreg
+; CHECK: broadcastss
+; CHECK: vandps
+; CHECK: vmaxps
+; CHECK: ret
+define <8 x float> @ExeDepsFix_broadcastss256_inreg(<8 x float> %arg, <8 x float> %arg2, i32 %broadcastvalue) {
+ %bitcast = bitcast <8 x float> %arg to <8 x i32>
+ %in = insertelement <8 x i32> undef, i32 %broadcastvalue, i32 0
+ %mask = shufflevector <8 x i32> %in, <8 x i32> undef, <8 x i32> zeroinitializer
+ %and = and <8 x i32> %bitcast, %mask
+ %floatcast = bitcast <8 x i32> %and to <8 x float>
+ %max_is_x = fcmp oge <8 x float> %floatcast, %arg2
+ %max = select <8 x i1> %max_is_x, <8 x float> %floatcast, <8 x float> %arg2
+ ret <8 x float> %max
+}
+
+; CHECK-LABEL: ExeDepsFix_broadcastsd
+; In that case the broadcast is directly folded into vandpd.
+; CHECK: vandpd
+; CHECK: vmaxpd
+; CHECK:ret
+define <2 x double> @ExeDepsFix_broadcastsd(<2 x double> %arg, <2 x double> %arg2) {
+ %bitcast = bitcast <2 x double> %arg to <2 x i64>
+ %and = and <2 x i64> %bitcast, <i64 2147483647, i64 2147483647>
+ %floatcast = bitcast <2 x i64> %and to <2 x double>
+ %max_is_x = fcmp oge <2 x double> %floatcast, %arg2
+ %max = select <2 x i1> %max_is_x, <2 x double> %floatcast, <2 x double> %arg2
+ ret <2 x double> %max
+}
+
+; CHECK-LABEL: ExeDepsFix_broadcastsd256
+; CHECK: broadcastsd
+; CHECK: vandpd
+; CHECK: vmaxpd
+; CHECK: ret
+define <4 x double> @ExeDepsFix_broadcastsd256(<4 x double> %arg, <4 x double> %arg2) {
+ %bitcast = bitcast <4 x double> %arg to <4 x i64>
+ %and = and <4 x i64> %bitcast, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+ %floatcast = bitcast <4 x i64> %and to <4 x double>
+ %max_is_x = fcmp oge <4 x double> %floatcast, %arg2
+ %max = select <4 x i1> %max_is_x, <4 x double> %floatcast, <4 x double> %arg2
+ ret <4 x double> %max
+}
+
+
+; CHECK-LABEL: ExeDepsFix_broadcastsd_inreg
+; ExeDepsFix works top down, thus it coalesces vmovlhps domain with
+; vandps and there is nothing more you can do to match vmaxpd.
+; CHECK: vmovlhps
+; CHECK: vandps
+; CHECK: vmaxpd
+; CHECK: ret
+define <2 x double> @ExeDepsFix_broadcastsd_inreg(<2 x double> %arg, <2 x double> %arg2, i64 %broadcastvalue) {
+ %bitcast = bitcast <2 x double> %arg to <2 x i64>
+ %in = insertelement <2 x i64> undef, i64 %broadcastvalue, i32 0
+ %mask = shufflevector <2 x i64> %in, <2 x i64> undef, <2 x i32> zeroinitializer
+ %and = and <2 x i64> %bitcast, %mask
+ %floatcast = bitcast <2 x i64> %and to <2 x double>
+ %max_is_x = fcmp oge <2 x double> %floatcast, %arg2
+ %max = select <2 x i1> %max_is_x, <2 x double> %floatcast, <2 x double> %arg2
+ ret <2 x double> %max
+}
+
+; CHECK-LABEL: ExeDepsFix_broadcastsd256_inreg
+; CHECK: broadcastsd
+; CHECK: vandpd
+; CHECK: vmaxpd
+; CHECK: ret
+define <4 x double> @ExeDepsFix_broadcastsd256_inreg(<4 x double> %arg, <4 x double> %arg2, i64 %broadcastvalue) {
+ %bitcast = bitcast <4 x double> %arg to <4 x i64>
+ %in = insertelement <4 x i64> undef, i64 %broadcastvalue, i32 0
+ %mask = shufflevector <4 x i64> %in, <4 x i64> undef, <4 x i32> zeroinitializer
+ %and = and <4 x i64> %bitcast, %mask
+ %floatcast = bitcast <4 x i64> %and to <4 x double>
+ %max_is_x = fcmp oge <4 x double> %floatcast, %arg2
+ %max = select <4 x i1> %max_is_x, <4 x double> %floatcast, <4 x double> %arg2
+ ret <4 x double> %max
+}
+
diff --git a/test/CodeGen/X86/expand-opaque-const.ll b/test/CodeGen/X86/expand-opaque-const.ll
new file mode 100644
index 000000000000..6e461cf8c30b
--- /dev/null
+++ b/test/CodeGen/X86/expand-opaque-const.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mcpu=generic -O1 -relocation-model=pic < %s | FileCheck %s
+target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
+target triple = "i686-apple-darwin"
+
+define i64 @test_lshr() {
+entry:
+; CHECK-NOT: movl $-1, 16(%esp)
+; CHECK-NOT: movl $-1, %eax
+ %retval = alloca i64
+ %op1 = alloca i64
+ %op2 = alloca i64
+ store i64 -6687208052682386272, i64* %op1
+ store i64 7106745059734980448, i64* %op2
+ %tmp1 = load i64* %op1
+ %tmp2 = load i64* %op2
+ %tmp = xor i64 %tmp2, 7106745059734980448
+ %tmp3 = lshr i64 %tmp1, %tmp
+ store i64 %tmp3, i64* %retval
+ %tmp4 = load i64* %retval
+ ret i64 %tmp4
+}
diff --git a/test/CodeGen/X86/extract-store.ll b/test/CodeGen/X86/extract-store.ll
new file mode 100644
index 000000000000..27d93804ba60
--- /dev/null
+++ b/test/CodeGen/X86/extract-store.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -o - -mcpu=generic -march=x86-64 -mattr=+sse4.1 | FileCheck %s -check-prefix=SSE41
+; RUN: llc < %s -o - -mcpu=generic -march=x86-64 -mattr=+avx | FileCheck %s -check-prefix=AVX
+
+define void @pextrb(i8* nocapture %dst, <16 x i8> %foo) {
+; AVX: vpextrb
+; SSE41: pextrb
+; AVX-NOT: movb
+; SSE41-NOT: movb
+ %vecext = extractelement <16 x i8> %foo, i32 15
+ store i8 %vecext, i8* %dst, align 1
+ ret void
+}
+
+define void @pextrw(i16* nocapture %dst, <8 x i16> %foo) {
+; AVX: vpextrw
+; SSE41: pextrw
+; AVX-NOT: movw
+; SSE41-NOT: movw
+ %vecext = extractelement <8 x i16> %foo, i32 15
+ store i16 %vecext, i16* %dst, align 1
+ ret void
+}
diff --git a/test/CodeGen/X86/f16c-intrinsics.ll b/test/CodeGen/X86/f16c-intrinsics.ll
index 2135f9409cfe..514d929f4432 100644
--- a/test/CodeGen/X86/f16c-intrinsics.ll
+++ b/test/CodeGen/X86/f16c-intrinsics.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86 -mattr=+avx,+f16c | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mattr=+avx,+f16c | FileCheck %s
define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) {
; CHECK: vcvtph2ps
@@ -30,3 +31,16 @@ define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) {
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly
+
+define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) {
+; CHECK-LABEL: test_x86_vcvtps2ph_128_scalar
+; CHECK-NOT: vmov
+; CHECK: vcvtph2ps (%
+
+ %load = load i64* %ptr
+ %ins1 = insertelement <2 x i64> undef, i64 %load, i32 0
+ %ins2 = insertelement <2 x i64> %ins1, i64 0, i32 1
+ %bc = bitcast <2 x i64> %ins2 to <8 x i16>
+ %res = tail call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %bc) #2
+ ret <4 x float> %res
+}
diff --git a/test/CodeGen/X86/fast-isel-args-fail.ll b/test/CodeGen/X86/fast-isel-args-fail.ll
index e748e1cad1fd..7467edd74f21 100644
--- a/test/CodeGen/X86/fast-isel-args-fail.ll
+++ b/test/CodeGen/X86/fast-isel-args-fail.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -fast-isel -verify-machineinstrs -mtriple=x86_64-apple-darwin10
; RUN: llc < %s -fast-isel -verify-machineinstrs -mtriple=x86_64-pc-win32 | FileCheck %s -check-prefix=WIN32
; RUN: llc < %s -fast-isel -verify-machineinstrs -mtriple=x86_64-pc-win64 | FileCheck %s -check-prefix=WIN64
-; Requires: Asserts
+; REQUIRES: asserts
; Previously, this would cause an assert.
define i31 @t1(i31 %a, i31 %b, i31 %c) {
diff --git a/test/CodeGen/X86/fast-isel-args-fail2.ll b/test/CodeGen/X86/fast-isel-args-fail2.ll
new file mode 100644
index 000000000000..08de472c2a54
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-args-fail2.ll
@@ -0,0 +1,10 @@
+; RUN: not --crash llc < %s -fast-isel -fast-isel-abort-args -mtriple=x86_64-apple-darwin10
+; REQUIRES: asserts
+
+%struct.s0 = type { x86_fp80, x86_fp80 }
+
+; FastISel cannot handle this case yet. Make sure that we abort.
+define i8* @args_fail(%struct.s0* byval nocapture readonly align 16 %y) {
+ %1 = bitcast %struct.s0* %y to i8*
+ ret i8* %1
+}
diff --git a/test/CodeGen/X86/fast-isel-args.ll b/test/CodeGen/X86/fast-isel-args.ll
index 0f3626565e7d..8c86a9cc01d6 100644
--- a/test/CodeGen/X86/fast-isel-args.ll
+++ b/test/CodeGen/X86/fast-isel-args.ll
@@ -23,3 +23,27 @@ entry:
%add2 = add nsw i64 %add, %conv1
ret i64 %add2
}
+
+define float @t4(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h) {
+entry:
+ %add1 = fadd float %a, %b
+ %add2 = fadd float %c, %d
+ %add3 = fadd float %e, %f
+ %add4 = fadd float %g, %h
+ %add5 = fadd float %add1, %add2
+ %add6 = fadd float %add3, %add4
+ %add7 = fadd float %add5, %add6
+ ret float %add7
+}
+
+define double @t5(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h) {
+entry:
+ %add1 = fadd double %a, %b
+ %add2 = fadd double %c, %d
+ %add3 = fadd double %e, %f
+ %add4 = fadd double %g, %h
+ %add5 = fadd double %add1, %add2
+ %add6 = fadd double %add3, %add4
+ %add7 = fadd double %add5, %add6
+ ret double %add7
+}
diff --git a/test/CodeGen/X86/fast-isel-branch_weights.ll b/test/CodeGen/X86/fast-isel-branch_weights.ll
new file mode 100644
index 000000000000..bc41395e1e83
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-branch_weights.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test if the BBs are reordred according to their branch weights.
+define i64 @branch_weights_test(i64 %a, i64 %b) {
+; CHECK-LABEL: branch_weights_test
+; CHECK-LABEL: success
+; CHECK-LABEL: fail
+ %1 = icmp ult i64 %a, %b
+ br i1 %1, label %fail, label %success, !prof !0
+
+fail:
+ ret i64 -1
+
+success:
+ ret i64 0
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/X86/fast-isel-cmp-branch2.ll b/test/CodeGen/X86/fast-isel-cmp-branch2.ll
new file mode 100644
index 000000000000..7e45c49f48f7
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp-branch2.ll
@@ -0,0 +1,294 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+define i32 @fcmp_oeq(float %x, float %y) {
+; CHECK-LABEL: fcmp_oeq
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_1}}
+; CHECK-NEXT: jnp {{LBB.+_2}}
+ %1 = fcmp oeq float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt(float %x, float %y) {
+; CHECK-LABEL: fcmp_ogt
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp ogt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge(float %x, float %y) {
+; CHECK-LABEL: fcmp_oge
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp oge float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt(float %x, float %y) {
+; CHECK-LABEL: fcmp_olt
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp olt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole(float %x, float %y) {
+; CHECK-LABEL: fcmp_ole
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp ole float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one(float %x, float %y) {
+; CHECK-LABEL: fcmp_one
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = fcmp one float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord(float %x, float %y) {
+; CHECK-LABEL: fcmp_ord
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno(float %x, float %y) {
+; CHECK-LABEL: fcmp_uno
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq(float %x, float %y) {
+; CHECK-LABEL: fcmp_ueq
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_2}}
+ %1 = fcmp ueq float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt(float %x, float %y) {
+; CHECK-LABEL: fcmp_ugt
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ugt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge(float %x, float %y) {
+; CHECK-LABEL: fcmp_uge
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp uge float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult(float %x, float %y) {
+; CHECK-LABEL: fcmp_ult
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ult float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule(float %x, float %y) {
+; CHECK-LABEL: fcmp_ule
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp ule float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une(float %x, float %y) {
+; CHECK-LABEL: fcmp_une
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_2}}
+; CHECK-NEXT: jp {{LBB.+_2}}
+; CHECK-NEXT: jmp {{LBB.+_1}}
+ %1 = fcmp une float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_eq
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jne {{LBB.+_1}}
+ %1 = icmp eq i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ne
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = icmp ne i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ugt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ugt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = icmp ugt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_uge(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_uge
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = icmp uge i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ult(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ult
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = icmp ult i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ule(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ule
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = icmp ule i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sgt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sgt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jle {{LBB.+_1}}
+ %1 = icmp sgt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sge(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sge
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jl {{LBB.+_1}}
+ %1 = icmp sge i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_slt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jge {{LBB.+_1}}
+ %1 = icmp slt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sle(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sle
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jg {{LBB.+_1}}
+ %1 = icmp sle i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-cmp-branch3.ll b/test/CodeGen/X86/fast-isel-cmp-branch3.ll
new file mode 100644
index 000000000000..a3f6851ca240
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp-branch3.ll
@@ -0,0 +1,470 @@
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+define i32 @fcmp_oeq1(float %x) {
+; CHECK-LABEL: fcmp_oeq1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp oeq float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oeq2(float %x) {
+; CHECK-LABEL: fcmp_oeq2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_1}}
+; CHECK-NEXT: jnp {{LBB.+_2}}
+ %1 = fcmp oeq float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt1(float %x) {
+; CHECK-LABEL: fcmp_ogt1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp ogt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt2(float %x) {
+; CHECK-LABEL: fcmp_ogt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp ogt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge1(float %x) {
+; CHECK-LABEL: fcmp_oge1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp oge float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge2(float %x) {
+; CHECK-LABEL: fcmp_oge2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp oge float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt1(float %x) {
+; CHECK-LABEL: fcmp_olt1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp olt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt2(float %x) {
+; CHECK-LABEL: fcmp_olt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp olt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole1(float %x) {
+; CHECK-LABEL: fcmp_ole1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ole float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole2(float %x) {
+; CHECK-LABEL: fcmp_ole2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp ole float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one1(float %x) {
+; CHECK-LABEL: fcmp_one1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp one float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one2(float %x) {
+; CHECK-LABEL: fcmp_one2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = fcmp one float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord1(float %x) {
+; CHECK-LABEL: fcmp_ord1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord2(float %x) {
+; CHECK-LABEL: fcmp_ord2
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno1(float %x) {
+; CHECK-LABEL: fcmp_uno1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno2(float %x) {
+; CHECK-LABEL: fcmp_uno2
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq1(float %x) {
+; CHECK-LABEL: fcmp_ueq1
+; CHECK-NOT: ucomiss
+ %1 = fcmp ueq float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq2(float %x) {
+; CHECK-LABEL: fcmp_ueq2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_2}}
+ %1 = fcmp ueq float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt1(float %x) {
+; CHECK-LABEL: fcmp_ugt1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp ugt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt2(float %x) {
+; CHECK-LABEL: fcmp_ugt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ugt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge1(float %x) {
+; CHECK-LABEL: fcmp_uge1
+; CHECK-NOT: ucomiss
+ %1 = fcmp uge float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge2(float %x) {
+; CHECK-LABEL: fcmp_uge2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp uge float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult1(float %x) {
+; CHECK-LABEL: fcmp_ult1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp ult float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult2(float %x) {
+; CHECK-LABEL: fcmp_ult2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ult float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule1(float %x) {
+; CHECK-LABEL: fcmp_ule1
+; CHECK-NOT: ucomiss
+ %1 = fcmp ule float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule2(float %x) {
+; CHECK-LABEL: fcmp_ule2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp ule float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une1(float %x) {
+; CHECK-LABEL: fcmp_une1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp une float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une2(float %x) {
+; CHECK-LABEL: fcmp_une2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_2}}
+; CHECK-NEXT: jp {{LBB.+_2}}
+; CHECK-NEXT: jmp {{LBB.+_1}}
+ %1 = fcmp une float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_eq(i32 %x) {
+; CHECK-LABEL: icmp_eq
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp eq i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ne(i32 %x) {
+; CHECK-LABEL: icmp_ne
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ne i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ugt(i32 %x) {
+; CHECK-LABEL: icmp_ugt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ugt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_uge(i32 %x) {
+; CHECK-LABEL: icmp_uge
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp uge i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ult(i32 %x) {
+; CHECK-LABEL: icmp_ult
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ult i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ule(i32 %x) {
+; CHECK-LABEL: icmp_ule
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp ule i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sgt(i32 %x) {
+; CHECK-LABEL: icmp_sgt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp sgt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sge(i32 %x) {
+; CHECK-LABEL: icmp_sge
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp sge i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_slt(i32 %x) {
+; CHECK-LABEL: icmp_slt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp slt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sle(i32 %x) {
+; CHECK-LABEL: icmp_sle
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp sle i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-cmp.ll b/test/CodeGen/X86/fast-isel-cmp.ll
new file mode 100644
index 000000000000..1b72cfcde657
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp.ll
@@ -0,0 +1,689 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=SDAG
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=FAST
+
+define zeroext i1 @fcmp_oeq(float %x, float %y) {
+; SDAG-LABEL: fcmp_oeq
+; SDAG: cmpeqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_oeq
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: setnp %cl
+; FAST-NEXT: andb %al, %cl
+ %1 = fcmp oeq float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt(float %x, float %y) {
+; SDAG-LABEL: fcmp_ogt
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: seta %al
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: seta %al
+ %1 = fcmp ogt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge(float %x, float %y) {
+; SDAG-LABEL: fcmp_oge
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_oge
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setae %al
+ %1 = fcmp oge float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt(float %x, float %y) {
+; SDAG-LABEL: fcmp_olt
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_olt
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: seta %al
+ %1 = fcmp olt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole(float %x, float %y) {
+; SDAG-LABEL: fcmp_ole
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_ole
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setae %al
+ %1 = fcmp ole float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one(float %x, float %y) {
+; SDAG-LABEL: fcmp_one
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setne %al
+; FAST-LABEL: fcmp_one
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+ %1 = fcmp one float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord(float %x, float %y) {
+; SDAG-LABEL: fcmp_ord
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno(float %x, float %y) {
+; SDAG-LABEL: fcmp_uno
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq(float %x, float %y) {
+; SDAG-LABEL: fcmp_ueq
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: sete %al
+; FAST-LABEL: fcmp_ueq
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+ %1 = fcmp ueq float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt(float %x, float %y) {
+; SDAG-LABEL: fcmp_ugt
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ugt
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setb %al
+ %1 = fcmp ugt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge(float %x, float %y) {
+; SDAG-LABEL: fcmp_uge
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_uge
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setbe %al
+ %1 = fcmp uge float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult(float %x, float %y) {
+; SDAG-LABEL: fcmp_ult
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ult
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setb %al
+ %1 = fcmp ult float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule(float %x, float %y) {
+; SDAG-LABEL: fcmp_ule
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_ule
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setbe %al
+ %1 = fcmp ule float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une(float %x, float %y) {
+; SDAG-LABEL: fcmp_une
+; SDAG: cmpneqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_une
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: setp %cl
+; FAST-NEXT: orb %al, %cl
+ %1 = fcmp une float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_eq(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_eq
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: sete %al
+; FAST-LABEL: icmp_eq
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: sete %al
+ %1 = icmp eq i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ne(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ne
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setne %al
+; FAST-LABEL: icmp_ne
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setne %al
+ %1 = icmp ne i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ugt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ugt
+; SDAG: cmpl %edi, %esi
+; SDAG-NEXT: setb %al
+; FAST-LABEL: icmp_ugt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: seta %al
+ %1 = icmp ugt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_uge(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_uge
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setae %al
+; FAST-LABEL: icmp_uge
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setae %al
+ %1 = icmp uge i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ult(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ult
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setb %al
+; FAST-LABEL: icmp_ult
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setb %al
+ %1 = icmp ult i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ule(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ule
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: icmp_ule
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setbe %al
+ %1 = icmp ule i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sgt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sgt
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setg %al
+; FAST-LABEL: icmp_sgt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setg %al
+ %1 = icmp sgt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sge(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sge
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setge %al
+; FAST-LABEL: icmp_sge
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setge %al
+ %1 = icmp sge i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_slt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_slt
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setl %al
+; FAST-LABEL: icmp_slt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setl %al
+ %1 = icmp slt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sle(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sle
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setle %al
+; FAST-LABEL: icmp_sle
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setle %al
+ %1 = icmp sle i32 %x, %y
+ ret i1 %1
+}
+
+; Test cmp folding and condition optimization.
+define zeroext i1 @fcmp_oeq2(float %x) {
+; SDAG-LABEL: fcmp_oeq2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_oeq2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp oeq float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oeq3(float %x) {
+; SDAG-LABEL: fcmp_oeq3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: cmpeqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_oeq3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: setnp %cl
+; FAST-NEXT: andb %al, %cl
+ %1 = fcmp oeq float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt2(float %x) {
+; SDAG-LABEL: fcmp_ogt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_ogt2
+; FAST: xorl %eax, %eax
+ %1 = fcmp ogt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt3(float %x) {
+; SDAG-LABEL: fcmp_ogt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_ogt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: seta %al
+ %1 = fcmp ogt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge2(float %x) {
+; SDAG-LABEL: fcmp_oge2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_oge2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp oge float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge3(float %x) {
+; SDAG-LABEL: fcmp_oge3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_oge3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setae %al
+ %1 = fcmp oge float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt2(float %x) {
+; SDAG-LABEL: fcmp_olt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_olt2
+; FAST: xorl %eax, %eax
+ %1 = fcmp olt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt3(float %x) {
+; SDAG-LABEL: fcmp_olt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_olt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: seta %al
+ %1 = fcmp olt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole2(float %x) {
+; SDAG-LABEL: fcmp_ole2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ole2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ole float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole3(float %x) {
+; SDAG-LABEL: fcmp_ole3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_ole3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setae %al
+ %1 = fcmp ole float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one2(float %x) {
+; SDAG-LABEL: fcmp_one2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_one2
+; FAST: xorl %eax, %eax
+ %1 = fcmp one float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one3(float %x) {
+; SDAG-LABEL: fcmp_one3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setne %al
+; FAST-LABEL: fcmp_one3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+ %1 = fcmp one float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord2(float %x) {
+; SDAG-LABEL: fcmp_ord2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord3(float %x) {
+; SDAG-LABEL: fcmp_ord3
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord3
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno2(float %x) {
+; SDAG-LABEL: fcmp_uno2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno3(float %x) {
+; SDAG-LABEL: fcmp_uno3
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno3
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq2(float %x) {
+; SDAG-LABEL: fcmp_ueq2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_ueq2
+; FAST: movb $1, %al
+ %1 = fcmp ueq float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq3(float %x) {
+; SDAG-LABEL: fcmp_ueq3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: sete %al
+; FAST-LABEL: fcmp_ueq3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+ %1 = fcmp ueq float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt2(float %x) {
+; SDAG-LABEL: fcmp_ugt2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_ugt2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp ugt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt3(float %x) {
+; SDAG-LABEL: fcmp_ugt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ugt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setb %al
+ %1 = fcmp ugt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge2(float %x) {
+; SDAG-LABEL: fcmp_uge2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_uge2
+; FAST: movb $1, %al
+ %1 = fcmp uge float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge3(float %x) {
+; SDAG-LABEL: fcmp_uge3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_uge3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setbe %al
+ %1 = fcmp uge float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult2(float %x) {
+; SDAG-LABEL: fcmp_ult2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_ult2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp ult float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult3(float %x) {
+; SDAG-LABEL: fcmp_ult3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ult3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setb %al
+ %1 = fcmp ult float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule2(float %x) {
+; SDAG-LABEL: fcmp_ule2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_ule2
+; FAST: movb $1, %al
+ %1 = fcmp ule float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule3(float %x) {
+; SDAG-LABEL: fcmp_ule3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_ule3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setbe %al
+ %1 = fcmp ule float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une2(float %x) {
+; SDAG-LABEL: fcmp_une2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_une2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp une float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une3(float %x) {
+; SDAG-LABEL: fcmp_une3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: cmpneqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_une3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: setp %cl
+; FAST-NEXT: orb %al, %cl
+ %1 = fcmp une float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_eq2(i32 %x) {
+; SDAG-LABEL: icmp_eq2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_eq2
+; FAST: movb $1, %al
+ %1 = icmp eq i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ne2(i32 %x) {
+; SDAG-LABEL: icmp_ne2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ne2
+; FAST: xorl %eax, %eax
+ %1 = icmp ne i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ugt2(i32 %x) {
+; SDAG-LABEL: icmp_ugt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ugt2
+; FAST: xorl %eax, %eax
+ %1 = icmp ugt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_uge2(i32 %x) {
+; SDAG-LABEL: icmp_uge2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_uge2
+; FAST: movb $1, %al
+ %1 = icmp uge i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ult2(i32 %x) {
+; SDAG-LABEL: icmp_ult2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ult2
+; FAST: xorl %eax, %eax
+ %1 = icmp ult i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ule2(i32 %x) {
+; SDAG-LABEL: icmp_ule2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_ule2
+; FAST: movb $1, %al
+ %1 = icmp ule i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sgt2(i32 %x) {
+; SDAG-LABEL: icmp_sgt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_sgt2
+; FAST: xorl %eax, %eax
+ %1 = icmp sgt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sge2(i32 %x) {
+; SDAG-LABEL: icmp_sge2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_sge2
+; FAST: movb $1, %al
+ %1 = icmp sge i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_slt2(i32 %x) {
+; SDAG-LABEL: icmp_slt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_slt2
+; FAST: xorl %eax, %eax
+ %1 = icmp slt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sle2(i32 %x) {
+; SDAG-LABEL: icmp_sle2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_sle2
+; FAST: movb $1, %al
+ %1 = icmp sle i32 %x, %x
+ ret i1 %1
+}
+
diff --git a/test/CodeGen/X86/fast-isel-fold-mem.ll b/test/CodeGen/X86/fast-isel-fold-mem.ll
new file mode 100644
index 000000000000..a94577962e91
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-fold-mem.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin | FileCheck %s
+
+define i64 @fold_load(i64* %a, i64 %b) {
+; CHECK-LABEL: fold_load
+; CHECK: addq (%rdi), %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = load i64* %a, align 8
+ %2 = add i64 %1, %b
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmov.ll b/test/CodeGen/X86/fast-isel-select-cmov.ll
new file mode 100644
index 000000000000..8008e283ad60
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmov.ll
@@ -0,0 +1,62 @@
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test conditional move for the supported types (i16, i32, and i32) and
+; conditon input (argument or cmp). Currently i8 is not supported.
+
+define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: select_cmov_i16
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmovew %dx, %si
+; CHECK-NEXT: movzwl %si, %eax
+ %1 = select i1 %cond, i16 %a, i16 %b
+ ret i16 %1
+}
+
+define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: select_cmp_cmov_i16
+; CHECK: cmpw %si, %di
+; CHECK-NEXT: cmovbw %di, %si
+; CHECK-NEXT: movzwl %si, %eax
+ %1 = icmp ult i16 %a, %b
+ %2 = select i1 %1, i16 %a, i16 %b
+ ret i16 %2
+}
+
+define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmov_i32
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmovel %edx, %esi
+; CHECK-NEXT: movl %esi, %eax
+ %1 = select i1 %cond, i32 %a, i32 %b
+ ret i32 %1
+}
+
+define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmp_cmov_i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovbl %edi, %esi
+; CHECK-NEXT: movl %esi, %eax
+ %1 = icmp ult i32 %a, %b
+ %2 = select i1 %1, i32 %a, i32 %b
+ ret i32 %2
+}
+
+define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
+; CHECK-LABEL: select_cmov_i64
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmoveq %rdx, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = select i1 %cond, i64 %a, i64 %b
+ ret i64 %1
+}
+
+define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: select_cmp_cmov_i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rdi, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, i64 %a, i64 %b
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmov2.ll b/test/CodeGen/X86/fast-isel-select-cmov2.ll
new file mode 100644
index 000000000000..658098fe7c7a
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmov2.ll
@@ -0,0 +1,255 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+
+; Test all the cmp predicates that can feed an integer conditional move.
+
+define i64 @select_fcmp_false_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_false_cmov
+; CHECK: movq %rsi, %rax
+; CHECK-NEXT: retq
+ %1 = fcmp false double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_oeq_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: setnp %al
+; CHECK-NEXT: sete %cl
+; CHECK-NEXT: testb %al, %cl
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp oeq double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ogt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ogt_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovbeq %rsi, %rdi
+ %1 = fcmp ogt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_oge_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_oge_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovbq %rsi, %rdi
+ %1 = fcmp oge double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_olt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_olt_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovbeq %rsi, %rdi
+ %1 = fcmp olt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ole_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ole_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovbq %rsi, %rdi
+ %1 = fcmp ole double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_one_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_one_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp one double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ord_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ord_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovpq %rsi, %rdi
+ %1 = fcmp ord double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_uno_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_uno_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovnpq %rsi, %rdi
+ %1 = fcmp uno double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ueq_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ueq_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovneq %rsi, %rdi
+ %1 = fcmp ueq double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ugt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ugt_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovaeq %rsi, %rdi
+ %1 = fcmp ugt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_uge_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_uge_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovaq %rsi, %rdi
+ %1 = fcmp uge double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ult_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ult_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovaeq %rsi, %rdi
+ %1 = fcmp ult double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ule_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ule_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovaq %rsi, %rdi
+ %1 = fcmp ule double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_une_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: setp %al
+; CHECK-NEXT: setne %cl
+; CHECK-NEXT: orb %al, %cl
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp une double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_true_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_true_cmov
+; CHECK: movq %rdi, %rax
+ %1 = fcmp true double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_eq_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_eq_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovneq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp eq i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ne_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ne_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmoveq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ne i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ugt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ugt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ugt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+
+define i64 @select_icmp_uge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_uge_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp uge i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ult_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ult_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovaeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ule_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ule_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovaq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ule i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sgt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sgt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovleq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sgt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sge_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovlq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sge i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_slt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_slt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovgeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp slt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sle_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sle_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovgq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sle i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmp.ll b/test/CodeGen/X86/fast-isel-select-cmp.ll
new file mode 100644
index 000000000000..1af30e9f32fe
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmp.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test if we do not fold the cmp into select if the instructions are in
+; different basic blocks.
+
+define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmp_cmov_i32
+; CHECK-LABEL: continue
+; CHECK-NOT: cmp
+ %1 = icmp ult i32 %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, i32 %a, i32 %b
+ ret i32 %2
+
+exit:
+ ret i32 -1
+}
+
+define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oeq_f32
+; CHECK-LABEL: continue
+; CHECK-NOT: cmp
+ %1 = fcmp oeq float %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+
+exit:
+ ret float -1.0
+}
+
+define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_one_f32
+; CHECK-LABEL: continue
+; CHECK-NOT: ucomi
+ %1 = fcmp one float %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+
+exit:
+ ret float -1.0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll b/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
new file mode 100644
index 000000000000..1ec4d64fe209
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
@@ -0,0 +1,138 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort -mcpu=corei7-avx | FileCheck %s
+
+
+define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_one_f32
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: movaps %xmm2, %xmm0
+ %1 = fcmp one float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_one_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_one_f64
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: movaps %xmm2, %xmm0
+ %1 = fcmp one double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_icmp_eq_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_eq_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: je [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp eq i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ne_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ne_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ne i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ugt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ugt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: ja [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ugt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_uge_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_uge_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jae [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp uge i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ult_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ult_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jb [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ule_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ule_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jbe [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ule i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sgt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sgt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jg [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sgt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sge_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sge_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jge [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sge i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_slt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_slt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jl [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp slt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sle_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sle_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jle [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sle i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-sse.ll b/test/CodeGen/X86/fast-isel-select-sse.ll
new file mode 100644
index 000000000000..3c03a0312f5e
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-sse.ll
@@ -0,0 +1,391 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
+
+; Test all cmp predicates that can be used with SSE.
+
+define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oeq_f32
+; CHECK: cmpeqss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_oeq_f32
+; AVX: vcmpeqss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp oeq float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_oeq_f64
+; CHECK: cmpeqsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_oeq_f64
+; AVX: vcmpeqsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp oeq double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ogt_f32
+; CHECK: cmpltss %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ogt_f32
+; AVX: vcmpltss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ogt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ogt_f64
+; CHECK: cmpltsd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ogt_f64
+; AVX: vcmpltsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ogt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oge_f32
+; CHECK: cmpless %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_oge_f32
+; AVX: vcmpless %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp oge float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_oge_f64
+; CHECK: cmplesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_oge_f64
+; AVX: vcmplesd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp oge double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_olt_f32
+; CHECK: cmpltss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_olt_f32
+; AVX: vcmpltss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp olt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_olt_f64
+; CHECK: cmpltsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_olt_f64
+; AVX: vcmpltsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp olt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ole_f32
+; CHECK: cmpless %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ole_f32
+; AVX: vcmpless %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ole float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ole_f64
+; CHECK: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ole_f64
+; AVX: vcmplesd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ole double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ord_f32
+; CHECK: cmpordss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ord_f32
+; AVX: vcmpordss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ord float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ord_f64
+; CHECK: cmpordsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ord_f64
+; AVX: vcmpordsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ord double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_uno_f32
+; CHECK: cmpunordss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uno_f32
+; AVX: vcmpunordss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp uno float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_uno_f64
+; CHECK: cmpunordsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uno_f64
+; AVX: vcmpunordsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp uno double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ugt_f32
+; CHECK: cmpnless %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ugt_f32
+; AVX: vcmpnless %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ugt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ugt_f64
+; CHECK: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ugt_f64
+; AVX: vcmpnlesd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ugt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_uge_f32
+; CHECK: cmpnltss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uge_f32
+; AVX: vcmpnltss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp uge float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_uge_f64
+; CHECK: cmpnltsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uge_f64
+; AVX: vcmpnltsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp uge double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ult_f32
+; CHECK: cmpnless %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ult_f32
+; AVX: vcmpnless %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ult float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ult_f64
+; CHECK: cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ult_f64
+; AVX: vcmpnlesd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ult double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ule_f32
+; CHECK: cmpnltss %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ule_f32
+; AVX: vcmpnltss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ule float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ule_f64
+; CHECK: cmpnltsd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ule_f64
+; AVX: vcmpnltsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ule double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_une_f32
+; CHECK: cmpneqss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_une_f32
+; AVX: vcmpneqss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp une float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_une_f64
+; CHECK: cmpneqsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_une_f64
+; AVX: vcmpneqsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp une double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select.ll b/test/CodeGen/X86/fast-isel-select.ll
new file mode 100644
index 000000000000..7b3c99f13cca
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select.ll
@@ -0,0 +1,16 @@
+; RUN: llc -mtriple x86_64-apple-darwin -O0 -o - < %s | FileCheck %s
+; Make sure we only use the less significant bit of the value that feeds the
+; select. Otherwise, we may account for a non-zero value whereas the
+; lsb is zero.
+; <rdar://problem/15651765>
+
+; CHECK-LABEL: fastisel_select:
+; CHECK: subb {{%[a-z0-9]+}}, [[RES:%[a-z0-9]+]]
+; CHECK: testb $1, [[RES]]
+; CHECK: cmovnel %edi, %esi
+define i32 @fastisel_select(i1 %exchSub2211_, i1 %trunc_8766) {
+ %shuffleInternal15257_8932 = sub i1 %exchSub2211_, %trunc_8766
+ %counter_diff1345 = select i1 %shuffleInternal15257_8932, i32 1204476887, i32 0
+ ret i32 %counter_diff1345
+}
+
diff --git a/test/CodeGen/X86/fast-isel-sse12-fptoint.ll b/test/CodeGen/X86/fast-isel-sse12-fptoint.ll
new file mode 100644
index 000000000000..769c987e604a
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-sse12-fptoint.ll
@@ -0,0 +1,54 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=AVX
+
+define i32 @cvt_test1(float %a) {
+; SSE-LABEL: cvt_test1
+; SSE: cvttss2si %xmm0, %eax
+; AVX-LABEL: cvt_test1
+; AVX: vcvttss2si %xmm0, %eax
+ %1 = insertelement <4 x float> undef, float %a, i32 0
+ %2 = insertelement <4 x float> %1, float 0.000000e+00, i32 1
+ %3 = insertelement <4 x float> %2, float 0.000000e+00, i32 2
+ %4 = insertelement <4 x float> %3, float 0.000000e+00, i32 3
+ %5 = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %4)
+ ret i32 %5
+}
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
+
+define i64 @cvt_test2(float %a) {
+; SSE-LABEL: cvt_test2
+; SSE: cvttss2si %xmm0, %rax
+; AVX-LABEL: cvt_test2
+; AVX: vcvttss2si %xmm0, %rax
+ %1 = insertelement <4 x float> undef, float %a, i32 0
+ %2 = insertelement <4 x float> %1, float 0.000000e+00, i32 1
+ %3 = insertelement <4 x float> %2, float 0.000000e+00, i32 2
+ %4 = insertelement <4 x float> %3, float 0.000000e+00, i32 3
+ %5 = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %4)
+ ret i64 %5
+}
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
+
+define i32 @cvt_test3(double %a) {
+; SSE-LABEL: cvt_test3
+; SSE: cvttsd2si %xmm0, %eax
+; AVX-LABEL: cvt_test3
+; AVX: vcvttsd2si %xmm0, %eax
+ %1 = insertelement <2 x double> undef, double %a, i32 0
+ %2 = insertelement <2 x double> %1, double 0.000000e+00, i32 1
+ %3 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %2)
+ ret i32 %3
+}
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
+
+define i64 @cvt_test4(double %a) {
+; SSE-LABEL: cvt_test4
+; SSE: cvttsd2si %xmm0, %rax
+; AVX-LABEL: cvt_test4
+; AVX: vcvttsd2si %xmm0, %rax
+ %1 = insertelement <2 x double> undef, double %a, i32 0
+ %2 = insertelement <2 x double> %1, double 0.000000e+00, i32 1
+ %3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %2)
+ ret i64 %3
+}
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
diff --git a/test/CodeGen/X86/fast-isel-x86.ll b/test/CodeGen/X86/fast-isel-x86.ll
index ba86e888cdde..a212a7c6876e 100644
--- a/test/CodeGen/X86/fast-isel-x86.ll
+++ b/test/CodeGen/X86/fast-isel-x86.ll
@@ -3,7 +3,7 @@
; This should use flds to set the return value.
; CHECK-LABEL: test0:
; CHECK: flds
-; CHECK: ret
+; CHECK: retl
@G = external global float
define float @test0() nounwind {
%t = load float* @G
@@ -12,7 +12,7 @@ define float @test0() nounwind {
; This should pop 4 bytes on return.
; CHECK-LABEL: test1:
-; CHECK: ret $4
+; CHECK: retl $4
define void @test1({i32, i32, i32, i32}* sret %p) nounwind {
store {i32, i32, i32, i32} zeroinitializer, {i32, i32, i32, i32}* %p
ret void
@@ -25,7 +25,7 @@ define void @test1({i32, i32, i32, i32}* sret %p) nounwind {
; CHECK-NEXT: L2$pb:
; CHECK-NEXT: pop
; CHECK: HHH
-; CHECK: ret
+; CHECK: retl
@HHH = external global i32
define i32 @test2() nounwind {
%t = load i32* @HHH
diff --git a/test/CodeGen/X86/fast-isel.ll b/test/CodeGen/X86/fast-isel.ll
index 132df2b0ab43..bc7918421603 100644
--- a/test/CodeGen/X86/fast-isel.ll
+++ b/test/CodeGen/X86/fast-isel.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -fast-isel -fast-isel-abort -verify-machineinstrs -march=x86 -mattr=sse2
-; RUN: llc < %s -fast-isel -fast-isel-abort -verify-machineinstrs -mtriple=x86_64-apple-darwin10
+; RUN: llc < %s -fast-isel -fast-isel-abort -verify-machineinstrs -march=x86 -mattr=sse2 -no-integrated-as
+; RUN: llc < %s -fast-isel -fast-isel-abort -verify-machineinstrs -mtriple=x86_64-apple-darwin10 -no-integrated-as
; This tests very minimal fast-isel functionality.
diff --git a/test/CodeGen/X86/fastcall-correct-mangling.ll b/test/CodeGen/X86/fastcall-correct-mangling.ll
index 3569d36541f7..00dc44e75e8f 100644
--- a/test/CodeGen/X86/fastcall-correct-mangling.ll
+++ b/test/CodeGen/X86/fastcall-correct-mangling.ll
@@ -1,14 +1,33 @@
-; RUN: llc < %s -mtriple=i386-unknown-mingw32 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown-mingw32 | \
+; RUN: FileCheck --check-prefix=CHECK32 %s
+
+; RUN: llc < %s -mtriple=i386-unknown-win32 | \
+; RUN: FileCheck --check-prefix=CHECK32 %s
+
+; RUN: llc < %s -mtriple=x86_64-unknown-mingw32 | \
+; RUN: FileCheck --check-prefix=CHECK64 %s
+
+; RUN: llc < %s -mtriple=x86_64-unknown-mingw32 | \
+; RUN: FileCheck --check-prefix=CHECK64 %s
; Check that a fastcall function gets correct mangling
define x86_fastcallcc void @func(i64 %X, i8 %Y, i8 %G, i16 %Z) {
-; CHECK: @func@20:
+; CHECK32-LABEL: {{^}}@func@20:
+; CHECK64-LABEL: {{^}}func:
ret void
}
define x86_fastcallcc i32 @"\01DoNotMangle"(i32 %a) {
-; CHECK: DoNotMangle:
+; CHECK32-LABEL: {{^}}DoNotMangle:
+; CHECK64-LABEL: {{^}}DoNotMangle:
entry:
ret i32 %a
}
+
+define private x86_fastcallcc void @dontCrash() {
+; The name is fairly arbitrary since it is private. Just don't crash.
+; CHECK32-LABEL: {{^}}L@dontCrash@0:
+; CHECK64-LABEL: {{^}}.LdontCrash:
+ ret void
+}
diff --git a/test/CodeGen/X86/float-asmprint.ll b/test/CodeGen/X86/float-asmprint.ll
index 4aeae7fe0469..5de9700fc064 100644
--- a/test/CodeGen/X86/float-asmprint.ll
+++ b/test/CodeGen/X86/float-asmprint.ll
@@ -16,8 +16,9 @@
; CHECK-NEXT: .size
; CHECK: varppc128:
-; CHECK-NEXT: .quad 0 # ppc_fp128 -0
-; CHECK-NEXT: .quad -9223372036854775808
+; For ppc_fp128, the high double always comes first.
+; CHECK-NEXT: .quad -9223372036854775808 # ppc_fp128 -0
+; CHECK-NEXT: .quad 0
; CHECK-NEXT: .size
; CHECK: var80:
diff --git a/test/CodeGen/X86/fma-do-not-commute.ll b/test/CodeGen/X86/fma-do-not-commute.ll
new file mode 100644
index 000000000000..4e211721a382
--- /dev/null
+++ b/test/CodeGen/X86/fma-do-not-commute.ll
@@ -0,0 +1,30 @@
+; RUN: llc -fp-contract=fast -mattr=+fma -disable-cgp < %s -o - | FileCheck %s
+; Check that the 2nd and 3rd arguments of fmaXXX231 reg1, reg2, mem3 are not commuted.
+; <rdar://problem/16800495>
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+; CHECK-LABEL: test1:
+; %arg lives in xmm0 and it shouldn't be redefined until it is used in the FMA.
+; CHECK-NOT {{.*}}, %xmm0
+; %addr lives in rdi.
+; %addr2 lives in rsi.
+; CHECK: vmovss (%rsi), [[ADDR2:%xmm[0-9]+]]
+; The assembly syntax is in the reverse order.
+; CHECK: vfmadd231ss (%rdi), [[ADDR2]], %xmm0
+define void @test1(float* %addr, float* %addr2, float %arg) {
+entry:
+ br label %loop
+
+loop:
+ %sum0 = phi float [ %fma, %loop ], [ %arg, %entry ]
+ %addrVal = load float* %addr, align 4
+ %addr2Val = load float* %addr2, align 4
+ %fmul = fmul float %addrVal, %addr2Val
+ %fma = fadd float %sum0, %fmul
+ br i1 true, label %exit, label %loop
+
+exit:
+ store float %fma, float* %addr, align 4
+ ret void
+}
diff --git a/test/CodeGen/X86/fma.ll b/test/CodeGen/X86/fma.ll
index 917eac0ca32d..2eb152b078ef 100644
--- a/test/CodeGen/X86/fma.ll
+++ b/test/CodeGen/X86/fma.ll
@@ -42,6 +42,39 @@ entry:
ret float %call
}
+; Test FMA3 variant selection
+; CHECK-FMA-INST: fma3_select231ssX:
+; CHECK-FMA-INST: vfmadd231ss %xmm
+define float @fma3_select231ssX(float %x, float %y) #0 {
+entry:
+ br label %while.body
+while.body: ; preds = %while.body, %while.body
+ %acc.01 = phi float [ 0.000000e+00, %entry ], [ %acc, %while.body ]
+ %acc = tail call float @llvm.fma.f32(float %x, float %y, float %acc.01) nounwind readnone
+ %b = fcmp ueq float %acc, 0.0
+ br i1 %b, label %while.body, label %while.end
+while.end: ; preds = %while.body, %entry
+ ret float %acc
+}
+
+; Test FMA3 variant selection
+; CHECK-FMA-INST: fma3_select231pdY:
+; CHECK-FMA-INST: vfmadd231pd %ymm
+define <4 x double> @fma3_select231pdY(<4 x double> %x, <4 x double> %y) #0 {
+entry:
+ br label %while.body
+while.body: ; preds = %entry, %while.body
+ %acc.04 = phi <4 x double> [ zeroinitializer, %entry ], [ %add, %while.body ]
+ %add = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %acc.04)
+ %vecext = extractelement <4 x double> %add, i32 0
+ %cmp = fcmp oeq double %vecext, 0.000000e+00
+ br i1 %cmp, label %while.body, label %while.end
+
+while.end: ; preds = %while.body
+ ret <4 x double> %add
+}
+
declare float @llvm.fma.f32(float, float, float) nounwind readnone
declare double @llvm.fma.f64(double, double, double) nounwind readnone
declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) nounwind readnone
+declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
diff --git a/test/CodeGen/X86/fma3-intrinsics.ll b/test/CodeGen/X86/fma3-intrinsics.ll
index e3910a6935c4..9a25096c7a52 100755
--- a/test/CodeGen/X86/fma3-intrinsics.ll
+++ b/test/CodeGen/X86/fma3-intrinsics.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s -mcpu=bdver2 -mtriple=x86_64-pc-win32 -mattr=-fma4 | FileCheck %s
define <4 x float> @test_x86_fmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
- ; CHECK: fmadd213ss %xmm
+ ; CHECK: fmadd213ss (%r8), %xmm
%res = call <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind
ret <4 x float> %res
}
@@ -24,7 +24,7 @@ define <8 x float> @test_x86_fmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x f
declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
define <4 x float> @test_x86_fnmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
- ; CHECK: fnmadd213ss %xmm
+ ; CHECK: fnmadd213ss (%r8), %xmm
%res = call <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind
ret <4 x float> %res
}
diff --git a/test/CodeGen/X86/fold-call-oper.ll b/test/CodeGen/X86/fold-call-oper.ll
new file mode 100644
index 000000000000..94e2a6f70506
--- /dev/null
+++ b/test/CodeGen/X86/fold-call-oper.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+;
+; PR18396: Assertion: MO->isDead "Cannot fold physreg def".
+; InlineSpiller::foldMemoryOperand needs to handle undef call operands.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+@a = external global i32**, align 8
+@b = external global i32, align 4
+
+; Check that the call targets are folded, and we don't crash!
+; CHECK-LABEL: foldCallOper:
+; CHECK: callq *{{.*}}(%rbp)
+; CHECK: callq *{{.*}}(%rbp)
+define void @foldCallOper(i32 (i32*, i32, i32**)* nocapture %p1) #0 {
+entry:
+ %0 = load i32*** @a, align 8
+ br label %for.body.i
+
+for.body.i: ; preds = %for.body.i, %entry
+ %exitcond5.i = icmp eq i32 undef, undef
+ br i1 %exitcond5.i, label %for.body3.lr.ph.i, label %for.body.i
+
+for.body3.lr.ph.i: ; preds = %for.body.i
+ %call.i = tail call i32 %p1(i32* undef, i32 0, i32** null)
+ %tobool.i = icmp eq i32 %call.i, 0
+ br label %for.body3.i
+
+for.body3.i: ; preds = %for.inc8.i, %for.body3.lr.ph.i
+ %1 = phi i32* [ undef, %for.body3.lr.ph.i ], [ %.pre.i, %for.inc8.i ]
+ %indvars.iv.i = phi i64 [ 1, %for.body3.lr.ph.i ], [ %phitmp.i, %for.inc8.i ]
+ %call5.i = tail call i32 %p1(i32* %1, i32 0, i32** %0)
+ br i1 %tobool.i, label %for.inc8.i, label %if.then.i
+
+if.then.i: ; preds = %for.body3.i
+ %2 = load i32* %1, align 4
+ store i32 %2, i32* @b, align 4
+ br label %for.inc8.i
+
+for.inc8.i: ; preds = %if.then.i, %for.body3.i
+ %lftr.wideiv.i = trunc i64 %indvars.iv.i to i32
+ %arrayidx4.phi.trans.insert.i = getelementptr inbounds [0 x i32*]* undef, i64 0, i64 %indvars.iv.i
+ %.pre.i = load i32** %arrayidx4.phi.trans.insert.i, align 8
+ %phitmp.i = add i64 %indvars.iv.i, 1
+ br label %for.body3.i
+}
+
+attributes #0 = { noreturn uwtable "no-frame-pointer-elim"="true" }
diff --git a/test/CodeGen/X86/fold-load-vec.ll b/test/CodeGen/X86/fold-load-vec.ll
index e85d8f78c052..96c5be4f752f 100644
--- a/test/CodeGen/X86/fold-load-vec.ll
+++ b/test/CodeGen/X86/fold-load-vec.ll
@@ -5,7 +5,7 @@
; loads from m32.
define void @sample_test(<4 x float>* %source, <2 x float>* %dest) nounwind {
; CHECK: sample_test
-; CHECK: movaps
+; CHECK-NOT: movaps
; CHECK: insertps
entry:
%source.addr = alloca <4 x float>*, align 8
diff --git a/test/CodeGen/X86/fold-vector-sext-crash.ll b/test/CodeGen/X86/fold-vector-sext-crash.ll
new file mode 100644
index 000000000000..52ea7a912b9f
--- /dev/null
+++ b/test/CodeGen/X86/fold-vector-sext-crash.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -mcpu=core-avx-i -mtriple=i386-unknown-linux-gnu -mattr=+avx,+popcnt,+cmov
+
+; Make sure that we don't introduce illegal build_vector dag nodes
+; when trying to fold a sign_extend of a constant build_vector.
+; After r200234 the test case below was crashing the compiler with an assertion failure
+; due to an illegal build_vector of type MVT::v4i64.
+
+define <4 x i64> @foo(<4 x i64> %A) {
+ %1 = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x i64> %A, <4 x i64><i64 undef, i64 undef, i64 0, i64 0>
+ ret <4 x i64> %1
+}
+
diff --git a/test/CodeGen/X86/fold-vector-sext-zext.ll b/test/CodeGen/X86/fold-vector-sext-zext.ll
new file mode 100644
index 000000000000..aeaab4479085
--- /dev/null
+++ b/test/CodeGen/X86/fold-vector-sext-zext.ll
@@ -0,0 +1,291 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; Verify that the backend correctly folds a sign/zero extend of a vector where
+; elements are all constant values or UNDEFs.
+; The backend should be able to optimize all the test functions below into
+; simple loads from constant pool of the result. That is because the resulting
+; vector should be known at static time.
+
+
+define <4 x i16> @test1() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i16>
+ ret <4 x i16> %5
+}
+; CHECK-LABEL: test1
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i16> @test2() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i16>
+ ret <4 x i16> %5
+}
+; CHECK-LABEL: test2
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test3() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+; CHECK-LABEL: test3
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test4() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+; CHECK-LABEL: test4
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+
+define <4 x i64> @test5() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+; CHECK-LABEL: test5
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i64> @test6() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+; CHECK-LABEL: test6
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test7() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = sext <8 x i8> %4 to <8 x i16>
+ ret <8 x i16> %9
+}
+; CHECK-LABEL: test7
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i32> @test8() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = sext <8 x i8> %4 to <8 x i32>
+ ret <8 x i32> %9
+}
+; CHECK-LABEL: test8
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test9() {
+ %1 = insertelement <8 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 undef, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 undef, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = sext <8 x i8> %4 to <8 x i16>
+ ret <8 x i16> %9
+}
+; CHECK-LABEL: test9
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i32> @test10() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 undef, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 undef, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 undef, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 undef, i32 7
+ %9 = sext <8 x i8> %4 to <8 x i32>
+ ret <8 x i32> %9
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+
+define <4 x i16> @test11() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i16>
+ ret <4 x i16> %5
+}
+; CHECK-LABEL: test11
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test12() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+; CHECK-LABEL: test12
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i64> @test13() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i16> @test14() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i16>
+ ret <4 x i16> %5
+}
+; CHECK-LABEL: test14
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test15() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 undef, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 undef, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+; CHECK-LABEL: test15
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i64> @test16() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 undef, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test17() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = zext <8 x i8> %8 to <8 x i16>
+ ret <8 x i16> %9
+}
+; CHECK-LABEL: test17
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i32> @test18() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = zext <8 x i8> %8 to <8 x i32>
+ ret <8 x i32> %9
+}
+; CHECK-LABEL: test18
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test19() {
+ %1 = insertelement <8 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 undef, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 undef, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = zext <8 x i8> %8 to <8 x i16>
+ ret <8 x i16> %9
+}
+; CHECK-LABEL: test19
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i32> @test20() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 undef, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 undef, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 undef, i32 7
+ %9 = zext <8 x i8> %8 to <8 x i32>
+ ret <8 x i32> %9
+}
+; CHECK-LABEL: test20
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
diff --git a/test/CodeGen/X86/fold-xmm-zero.ll b/test/CodeGen/X86/fold-xmm-zero.ll
index b4eeb4098384..c92d45c35ae9 100644
--- a/test/CodeGen/X86/fold-xmm-zero.ll
+++ b/test/CodeGen/X86/fold-xmm-zero.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-macosx10.6.7 -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-macosx10.6.7 -mattr=+sse2 -no-integrated-as | FileCheck %s
; Simple test to make sure folding for special constants (like float zero)
; isn't completely broken.
diff --git a/test/CodeGen/X86/frameaddr.ll b/test/CodeGen/X86/frameaddr.ll
new file mode 100644
index 000000000000..6c1ca252bb97
--- /dev/null
+++ b/test/CodeGen/X86/frameaddr.ll
@@ -0,0 +1,44 @@
+; RUN: llc < %s -march=x86 | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -march=x86 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -march=x86-64 | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -march=x86-64 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-64
+
+define i8* @test1() nounwind {
+entry:
+; CHECK-32-LABEL: test1
+; CHECK-32: push
+; CHECK-32-NEXT: movl %esp, %ebp
+; CHECK-32-NEXT: movl %ebp, %eax
+; CHECK-32-NEXT: pop
+; CHECK-32-NEXT: ret
+; CHECK-64-LABEL: test1
+; CHECK-64: push
+; CHECK-64-NEXT: movq %rsp, %rbp
+; CHECK-64-NEXT: movq %rbp, %rax
+; CHECK-64-NEXT: pop
+; CHECK-64-NEXT: ret
+ %0 = tail call i8* @llvm.frameaddress(i32 0)
+ ret i8* %0
+}
+
+define i8* @test2() nounwind {
+entry:
+; CHECK-32-LABEL: test2
+; CHECK-32: push
+; CHECK-32-NEXT: movl %esp, %ebp
+; CHECK-32-NEXT: movl (%ebp), %eax
+; CHECK-32-NEXT: movl (%eax), %eax
+; CHECK-32-NEXT: pop
+; CHECK-32-NEXT: ret
+; CHECK-64-LABEL: test2
+; CHECK-64: push
+; CHECK-64-NEXT: movq %rsp, %rbp
+; CHECK-64-NEXT: movq (%rbp), %rax
+; CHECK-64-NEXT: movq (%rax), %rax
+; CHECK-64-NEXT: pop
+; CHECK-64-NEXT: ret
+ %0 = tail call i8* @llvm.frameaddress(i32 2)
+ ret i8* %0
+}
+
+declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/gcc_except_table.ll b/test/CodeGen/X86/gcc_except_table.ll
index fcc4e9f0b89b..a732eb1efbd7 100644
--- a/test/CodeGen/X86/gcc_except_table.ll
+++ b/test/CodeGen/X86/gcc_except_table.ll
@@ -1,12 +1,35 @@
-; RUN: llc -mtriple x86_64-apple-darwin %s -o - | FileCheck %s
+; RUN: llc -mtriple x86_64-apple-darwin %s -o - | FileCheck %s --check-prefix=APPLE
+; RUN: llc -mtriple x86_64-pc-windows-gnu %s -o - | FileCheck %s --check-prefix=MINGW64
+; RUN: llc -mtriple i686-pc-windows-gnu %s -o - | FileCheck %s --check-prefix=MINGW32
@_ZTIi = external constant i8*
define i32 @main() uwtable optsize ssp {
-; CHECK: .cfi_startproc
-; CHECK: .cfi_personality 155, ___gxx_personality_v0
-; CHECK: .cfi_lsda 16, Lexception0
-; CHECK: .cfi_def_cfa_offset 16
-; CHECK: .cfi_endproc
+; APPLE: .cfi_startproc
+; APPLE: .cfi_personality 155, ___gxx_personality_v0
+; APPLE: .cfi_lsda 16, Lexception0
+; APPLE: .cfi_def_cfa_offset 16
+; APPLE: callq __Unwind_Resume
+; APPLE: .cfi_endproc
+; APPLE: GCC_except_table0:
+; APPLE: Lexception0:
+
+; MINGW64: .seh_proc
+; MINGW64: .seh_handler __gxx_personality_v0
+; MINGW64: .seh_setframe 5, 0
+; MINGW64: callq _Unwind_Resume
+; MINGW64: .seh_handlerdata
+; MINGW64: GCC_except_table0:
+; MINGW64: Lexception0:
+; MINGW64: .seh_endproc
+
+; MINGW32: .cfi_startproc
+; MINGW32: .cfi_personality 0, ___gxx_personality_v0
+; MINGW32: .cfi_lsda 0, Lexception0
+; MINGW32: .cfi_def_cfa_offset 8
+; MINGW32: calll __Unwind_Resume
+; MINGW32: .cfi_endproc
+; MINGW32: GCC_except_table0:
+; MINGW32: Lexception0:
entry:
invoke void @_Z1fv() optsize
@@ -27,7 +50,3 @@ eh.resume:
declare void @_Z1fv() optsize
declare i32 @__gxx_personality_v0(...)
-
-; CHECK: Leh_func_end0:
-; CHECK: GCC_except_table0
-; CHECK: = Leh_func_end0-
diff --git a/test/CodeGen/X86/global-sections.ll b/test/CodeGen/X86/global-sections.ll
index d8743ac31814..c763f3947e59 100644
--- a/test/CodeGen/X86/global-sections.ll
+++ b/test/CodeGen/X86/global-sections.ll
@@ -1,7 +1,16 @@
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu | FileCheck %s -check-prefix=LINUX
; RUN: llc < %s -mtriple=i386-apple-darwin9.7 | FileCheck %s -check-prefix=DARWIN
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -fdata-sections | FileCheck %s -check-prefix=LINUX-SECTIONS
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -relocation-model=static | FileCheck %s -check-prefix=DARWIN-STATIC
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s -check-prefix=DARWIN64
+; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -data-sections | FileCheck %s -check-prefix=LINUX-SECTIONS
+; RUN: llc < %s -mtriple=i686-pc-win32 -data-sections -function-sections | FileCheck %s -check-prefix=WIN32-SECTIONS
+define void @F1() {
+ ret void
+}
+
+; WIN32-SECTIONS: .section .text,"xr",one_only,_F1
+; WIN32-SECTIONS: .globl _F1
; int G1;
@G1 = common global i32 0
@@ -9,13 +18,13 @@
; LINUX: .type G1,@object
; LINUX: .comm G1,4,4
-; DARWIN: .comm _G1,4,2
+; DARWIN: .comm _G1,4,2
; const int G2 __attribute__((weak)) = 42;
-@G2 = weak_odr unnamed_addr constant i32 42
+@G2 = weak_odr unnamed_addr constant i32 42
; TODO: linux drops this into .rodata, we drop it into ".gnu.linkonce.r.G2"
@@ -39,6 +48,9 @@
; LINUX-SECTIONS: .section .rodata.G3,"a",@progbits
; LINUX-SECTIONS: .globl G3
+; WIN32-SECTIONS: .section .rdata,"rd",one_only,_G3
+; WIN32-SECTIONS: .globl _G3
+
; _Complex long long const G4 = 34;
@G4 = unnamed_addr constant {i64,i64} { i64 34, i64 0 }
@@ -47,6 +59,14 @@
; DARWIN: _G4:
; DARWIN: .long 34
+; DARWIN-STATIC: .section __TEXT,__literal16,16byte_literals
+; DARWIN-STATIC: _G4:
+; DARWIN-STATIC: .long 34
+
+; DARWIN64: .section __TEXT,__literal16,16byte_literals
+; DARWIN64: _G4:
+; DARWIN64: .quad 34
+
; int G5 = 47;
@G5 = global i32 47
@@ -65,25 +85,25 @@
; PR4584
@"foo bar" = linkonce global i32 42
-; LINUX: .type "foo bar",@object
+; LINUX: .type "foo bar",@object
; LINUX: .section ".data.foo bar","aGw",@progbits,"foo bar",comdat
-; LINUX: .weak "foo bar"
+; LINUX: .weak "foo bar"
; LINUX: "foo bar":
-; DARWIN: .section __DATA,__datacoal_nt,coalesced
-; DARWIN: .globl "_foo bar"
-; DARWIN: .weak_definition "_foo bar"
+; DARWIN: .section __DATA,__datacoal_nt,coalesced
+; DARWIN: .globl "_foo bar"
+; DARWIN: .weak_definition "_foo bar"
; DARWIN: "_foo bar":
; PR4650
@G6 = weak_odr unnamed_addr constant [1 x i8] c"\01"
-; LINUX: .type G6,@object
-; LINUX: .section .rodata.G6,"aG",@progbits,G6,comdat
-; LINUX: .weak G6
+; LINUX: .type G6,@object
+; LINUX: .section .rodata.G6,"aG",@progbits,G6,comdat
+; LINUX: .weak G6
; LINUX: G6:
-; LINUX: .byte 1
-; LINUX: .size G6, 1
+; LINUX: .byte 1
+; LINUX: .size G6, 1
; DARWIN: .section __TEXT,__const_coal,coalesced
; DARWIN: .globl _G6
@@ -94,55 +114,58 @@
@G7 = unnamed_addr constant [10 x i8] c"abcdefghi\00"
-; DARWIN: __TEXT,__cstring,cstring_literals
-; DARWIN: .globl _G7
+; DARWIN: __TEXT,__cstring,cstring_literals
+; DARWIN: .globl _G7
; DARWIN: _G7:
-; DARWIN: .asciz "abcdefghi"
+; DARWIN: .asciz "abcdefghi"
-; LINUX: .section .rodata.str1.1,"aMS",@progbits,1
-; LINUX: .globl G7
+; LINUX: .section .rodata.str1.1,"aMS",@progbits,1
+; LINUX: .globl G7
; LINUX: G7:
-; LINUX: .asciz "abcdefghi"
+; LINUX: .asciz "abcdefghi"
; LINUX-SECTIONS: .section .rodata.G7,"aMS",@progbits,1
-; LINUX-SECTIONS: .globl G7
+; LINUX-SECTIONS: .globl G7
+
+; WIN32-SECTIONS: .section .rdata,"rd",one_only,_G7
+; WIN32-SECTIONS: .globl _G7
@G8 = unnamed_addr constant [4 x i16] [ i16 1, i16 2, i16 3, i16 0 ]
-; DARWIN: .section __TEXT,__const
-; DARWIN: .globl _G8
+; DARWIN: .section __TEXT,__const
+; DARWIN: .globl _G8
; DARWIN: _G8:
-; LINUX: .section .rodata.str2.2,"aMS",@progbits,2
-; LINUX: .globl G8
+; LINUX: .section .rodata.str2.2,"aMS",@progbits,2
+; LINUX: .globl G8
; LINUX:G8:
@G9 = unnamed_addr constant [4 x i32] [ i32 1, i32 2, i32 3, i32 0 ]
-; DARWIN: .globl _G9
+; DARWIN: .globl _G9
; DARWIN: _G9:
-; LINUX: .section .rodata.str4.4,"aMS",@progbits,4
-; LINUX: .globl G9
+; LINUX: .section .rodata.str4.4,"aMS",@progbits,4
+; LINUX: .globl G9
; LINUX:G9
@G10 = weak global [100 x i32] zeroinitializer, align 32 ; <[100 x i32]*> [#uses=0]
-; DARWIN: .section __DATA,__datacoal_nt,coalesced
+; DARWIN: .section __DATA,__datacoal_nt,coalesced
; DARWIN: .globl _G10
-; DARWIN: .weak_definition _G10
-; DARWIN: .align 5
+; DARWIN: .weak_definition _G10
+; DARWIN: .align 5
; DARWIN: _G10:
-; DARWIN: .space 400
+; DARWIN: .space 400
-; LINUX: .bss
-; LINUX: .weak G10
-; LINUX: .align 32
+; LINUX: .bss
+; LINUX: .weak G10
+; LINUX: .align 32
; LINUX: G10:
-; LINUX: .zero 400
+; LINUX: .zero 400
@@ -158,3 +181,16 @@
; DARWIN: .zerofill __DATA,__common,_G12,1,3
; DARWIN: .globl _G13
; DARWIN: .zerofill __DATA,__common,_G13,1,3
+
+@G14 = private unnamed_addr constant [4 x i8] c"foo\00", align 1
+
+; LINUX-SECTIONS: .type .LG14,@object # @G14
+; LINUX-SECTIONS: .section .rodata..LG14,"aMS",@progbits,1
+; LINUX-SECTIONS: .LG14:
+; LINUX-SECTIONS: .asciz "foo"
+; LINUX-SECTIONS: .size .LG14, 4
+
+; WIN32-SECTIONS: .section .rdata,"rd"
+; WIN32-SECTIONS: L_G14:
+; WIN32-SECTIONS: .asciz "foo"
+
diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll
new file mode 100644
index 000000000000..ff939a99427e
--- /dev/null
+++ b/test/CodeGen/X86/haddsub-2.ll
@@ -0,0 +1,802 @@
+; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE3
+; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3,+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSSE3
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+
+
+define <4 x float> @hadd_ps_test1(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecext1 = extractelement <4 x float> %A, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %A, i32 2
+ %vecext3 = extractelement <4 x float> %A, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <4 x float> %B, i32 0
+ %vecext7 = extractelement <4 x float> %B, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 2
+ %vecext10 = extractelement <4 x float> %B, i32 2
+ %vecext11 = extractelement <4 x float> %B, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hadd_ps_test1
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hadd_ps_test2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 2
+ %vecext7 = extractelement <4 x float> %B, i32 3
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hadd_ps_test2
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hsub_ps_test1(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecext1 = extractelement <4 x float> %A, i32 1
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 0
+ %vecext2 = extractelement <4 x float> %A, i32 2
+ %vecext3 = extractelement <4 x float> %A, i32 3
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 1
+ %vecext6 = extractelement <4 x float> %B, i32 0
+ %vecext7 = extractelement <4 x float> %B, i32 1
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 2
+ %vecext10 = extractelement <4 x float> %B, i32 2
+ %vecext11 = extractelement <4 x float> %B, i32 3
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hsub_ps_test1
+; CHECK: hsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hsub_ps_test2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 2
+ %vecext7 = extractelement <4 x float> %B, i32 3
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hsub_ps_test2
+; CHECK: hsubps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 0
+ %vecext7 = extractelement <4 x i32> %B, i32 1
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 2
+ %vecext11 = extractelement <4 x i32> %B, i32 3
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phadd_d_test1
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; AVX: vphaddd
+; AVX2 vphaddd
+; CHECK: ret
+
+
+define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 2
+ %vecext1 = extractelement <4 x i32> %A, i32 3
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %add, i32 1
+ %vecext2 = extractelement <4 x i32> %A, i32 0
+ %vecext3 = extractelement <4 x i32> %A, i32 1
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %add4, i32 0
+ %vecext6 = extractelement <4 x i32> %B, i32 3
+ %vecext7 = extractelement <4 x i32> %B, i32 2
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %add8, i32 3
+ %vecext10 = extractelement <4 x i32> %B, i32 1
+ %vecext11 = extractelement <4 x i32> %B, i32 0
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 2
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phadd_d_test2
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; AVX: vphaddd
+; AVX2 vphaddd
+; CHECK: ret
+
+
+define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 0
+ %vecext7 = extractelement <4 x i32> %B, i32 1
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 2
+ %vecext11 = extractelement <4 x i32> %B, i32 3
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phsub_d_test1
+; SSE3-NOT: phsubd
+; SSSE3: phsubd
+; AVX: vphsubd
+; AVX2 vphsubd
+; CHECK: ret
+
+
+define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 2
+ %vecext1 = extractelement <4 x i32> %A, i32 3
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 1
+ %vecext2 = extractelement <4 x i32> %A, i32 0
+ %vecext3 = extractelement <4 x i32> %A, i32 1
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 0
+ %vecext6 = extractelement <4 x i32> %B, i32 2
+ %vecext7 = extractelement <4 x i32> %B, i32 3
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 3
+ %vecext10 = extractelement <4 x i32> %B, i32 0
+ %vecext11 = extractelement <4 x i32> %B, i32 1
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 2
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phsub_d_test2
+; SSE3-NOT: phsubd
+; SSSE3: phsubd
+; AVX: vphsubd
+; AVX2 vphsubd
+; CHECK: ret
+
+
+define <2 x double> @hadd_pd_test1(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 0
+ %vecext1 = extractelement <2 x double> %A, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 0
+ %vecext3 = extractelement <2 x double> %B, i32 1
+ %add2 = fadd double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hadd_pd_test1
+; CHECK: haddpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hadd_pd_test2(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 1
+ %vecext1 = extractelement <2 x double> %A, i32 0
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 1
+ %vecext3 = extractelement <2 x double> %B, i32 0
+ %add2 = fadd double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hadd_pd_test2
+; CHECK: haddpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hsub_pd_test1(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 0
+ %vecext1 = extractelement <2 x double> %A, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 0
+ %vecext3 = extractelement <2 x double> %B, i32 1
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hsub_pd_test1
+; CHECK: hsubpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hsub_pd_test2(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %B, i32 0
+ %vecext1 = extractelement <2 x double> %B, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 1
+ %vecext2 = extractelement <2 x double> %A, i32 0
+ %vecext3 = extractelement <2 x double> %A, i32 1
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 0
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hsub_pd_test2
+; CHECK: hsubpd
+; CHECK-NEXT: ret
+
+
+define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
+ %vecext = extractelement <4 x double> %A, i32 0
+ %vecext1 = extractelement <4 x double> %A, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <4 x double> %A, i32 2
+ %vecext3 = extractelement <4 x double> %A, i32 3
+ %add4 = fadd double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %add4, i32 1
+ %vecext6 = extractelement <4 x double> %B, i32 0
+ %vecext7 = extractelement <4 x double> %B, i32 1
+ %add8 = fadd double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %add8, i32 2
+ %vecext10 = extractelement <4 x double> %B, i32 2
+ %vecext11 = extractelement <4 x double> %B, i32 3
+ %add12 = fadd double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_vhadd_pd_test
+; SSE3: haddpd
+; SSE3-NEXT: haddpd
+; SSSE3: haddpd
+; SSSE3: haddpd
+; AVX: vhaddpd
+; AVX: vhaddpd
+; AVX2: vhaddpd
+; AVX2: vhaddpd
+; CHECK: ret
+
+
+define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
+ %vecext = extractelement <4 x double> %A, i32 0
+ %vecext1 = extractelement <4 x double> %A, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <4 x double> %A, i32 2
+ %vecext3 = extractelement <4 x double> %A, i32 3
+ %sub4 = fsub double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %sub4, i32 1
+ %vecext6 = extractelement <4 x double> %B, i32 0
+ %vecext7 = extractelement <4 x double> %B, i32 1
+ %sub8 = fsub double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %sub8, i32 2
+ %vecext10 = extractelement <4 x double> %B, i32 2
+ %vecext11 = extractelement <4 x double> %B, i32 3
+ %sub12 = fsub double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_vhsub_pd_test
+; SSE3: hsubpd
+; SSE3-NEXT: hsubpd
+; SSSE3: hsubpd
+; SSSE3-NEXT: hsubpd
+; AVX: vhsubpd
+; AVX: vhsubpd
+; AVX2: vhsubpd
+; AVX2: vhsubpd
+; CHECK: ret
+
+
+define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
+ %vecext = extractelement <8 x i32> %A, i32 0
+ %vecext1 = extractelement <8 x i32> %A, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %A, i32 2
+ %vecext3 = extractelement <8 x i32> %A, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <8 x i32> %A, i32 4
+ %vecext7 = extractelement <8 x i32> %A, i32 5
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <8 x i32> %A, i32 6
+ %vecext11 = extractelement <8 x i32> %A, i32 7
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x i32> %vecinit9, i32 %add12, i32 3
+ %vecext14 = extractelement <8 x i32> %B, i32 0
+ %vecext15 = extractelement <8 x i32> %B, i32 1
+ %add16 = add i32 %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x i32> %vecinit13, i32 %add16, i32 4
+ %vecext18 = extractelement <8 x i32> %B, i32 2
+ %vecext19 = extractelement <8 x i32> %B, i32 3
+ %add20 = add i32 %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x i32> %vecinit17, i32 %add20, i32 5
+ %vecext22 = extractelement <8 x i32> %B, i32 4
+ %vecext23 = extractelement <8 x i32> %B, i32 5
+ %add24 = add i32 %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x i32> %vecinit21, i32 %add24, i32 6
+ %vecext26 = extractelement <8 x i32> %B, i32 6
+ %vecext27 = extractelement <8 x i32> %B, i32 7
+ %add28 = add i32 %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
+ ret <8 x i32> %vecinit29
+}
+; CHECK-LABEL: avx2_vphadd_d_test
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; SSSE3-NEXT: phaddd
+; AVX: vphaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; AVX2: vphaddd
+; CHECK: ret
+
+define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
+ %vecext = extractelement <16 x i16> %a, i32 0
+ %vecext1 = extractelement <16 x i16> %a, i32 1
+ %add = add i16 %vecext, %vecext1
+ %vecinit = insertelement <16 x i16> undef, i16 %add, i32 0
+ %vecext4 = extractelement <16 x i16> %a, i32 2
+ %vecext6 = extractelement <16 x i16> %a, i32 3
+ %add8 = add i16 %vecext4, %vecext6
+ %vecinit10 = insertelement <16 x i16> %vecinit, i16 %add8, i32 1
+ %vecext11 = extractelement <16 x i16> %a, i32 4
+ %vecext13 = extractelement <16 x i16> %a, i32 5
+ %add15 = add i16 %vecext11, %vecext13
+ %vecinit17 = insertelement <16 x i16> %vecinit10, i16 %add15, i32 2
+ %vecext18 = extractelement <16 x i16> %a, i32 6
+ %vecext20 = extractelement <16 x i16> %a, i32 7
+ %add22 = add i16 %vecext18, %vecext20
+ %vecinit24 = insertelement <16 x i16> %vecinit17, i16 %add22, i32 3
+ %vecext25 = extractelement <16 x i16> %a, i32 8
+ %vecext27 = extractelement <16 x i16> %a, i32 9
+ %add29 = add i16 %vecext25, %vecext27
+ %vecinit31 = insertelement <16 x i16> %vecinit24, i16 %add29, i32 4
+ %vecext32 = extractelement <16 x i16> %a, i32 10
+ %vecext34 = extractelement <16 x i16> %a, i32 11
+ %add36 = add i16 %vecext32, %vecext34
+ %vecinit38 = insertelement <16 x i16> %vecinit31, i16 %add36, i32 5
+ %vecext39 = extractelement <16 x i16> %a, i32 12
+ %vecext41 = extractelement <16 x i16> %a, i32 13
+ %add43 = add i16 %vecext39, %vecext41
+ %vecinit45 = insertelement <16 x i16> %vecinit38, i16 %add43, i32 6
+ %vecext46 = extractelement <16 x i16> %a, i32 14
+ %vecext48 = extractelement <16 x i16> %a, i32 15
+ %add50 = add i16 %vecext46, %vecext48
+ %vecinit52 = insertelement <16 x i16> %vecinit45, i16 %add50, i32 7
+ %vecext53 = extractelement <16 x i16> %b, i32 0
+ %vecext55 = extractelement <16 x i16> %b, i32 1
+ %add57 = add i16 %vecext53, %vecext55
+ %vecinit59 = insertelement <16 x i16> %vecinit52, i16 %add57, i32 8
+ %vecext60 = extractelement <16 x i16> %b, i32 2
+ %vecext62 = extractelement <16 x i16> %b, i32 3
+ %add64 = add i16 %vecext60, %vecext62
+ %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 9
+ %vecext67 = extractelement <16 x i16> %b, i32 4
+ %vecext69 = extractelement <16 x i16> %b, i32 5
+ %add71 = add i16 %vecext67, %vecext69
+ %vecinit73 = insertelement <16 x i16> %vecinit66, i16 %add71, i32 10
+ %vecext74 = extractelement <16 x i16> %b, i32 6
+ %vecext76 = extractelement <16 x i16> %b, i32 7
+ %add78 = add i16 %vecext74, %vecext76
+ %vecinit80 = insertelement <16 x i16> %vecinit73, i16 %add78, i32 11
+ %vecext81 = extractelement <16 x i16> %b, i32 8
+ %vecext83 = extractelement <16 x i16> %b, i32 9
+ %add85 = add i16 %vecext81, %vecext83
+ %vecinit87 = insertelement <16 x i16> %vecinit80, i16 %add85, i32 12
+ %vecext88 = extractelement <16 x i16> %b, i32 10
+ %vecext90 = extractelement <16 x i16> %b, i32 11
+ %add92 = add i16 %vecext88, %vecext90
+ %vecinit94 = insertelement <16 x i16> %vecinit87, i16 %add92, i32 13
+ %vecext95 = extractelement <16 x i16> %b, i32 12
+ %vecext97 = extractelement <16 x i16> %b, i32 13
+ %add99 = add i16 %vecext95, %vecext97
+ %vecinit101 = insertelement <16 x i16> %vecinit94, i16 %add99, i32 14
+ %vecext102 = extractelement <16 x i16> %b, i32 14
+ %vecext104 = extractelement <16 x i16> %b, i32 15
+ %add106 = add i16 %vecext102, %vecext104
+ %vecinit108 = insertelement <16 x i16> %vecinit101, i16 %add106, i32 15
+ ret <16 x i16> %vecinit108
+}
+; CHECK-LABEL: avx2_vphadd_w_test
+; SSE3-NOT: phaddw
+; SSSE3: phaddw
+; SSSE3-NEXT: phaddw
+; AVX: vphaddw
+; AVX: vphaddw
+; AVX2: vphaddw
+; AVX2: vphaddw
+; CHECK: ret
+
+
+; Verify that we don't select horizontal subs in the following functions.
+
+define <4 x i32> @not_a_hsub_1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 1
+ %vecext7 = extractelement <4 x i32> %B, i32 0
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 3
+ %vecext11 = extractelement <4 x i32> %B, i32 2
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: not_a_hsub_1
+; CHECK-NOT: phsubd
+; CHECK: ret
+
+
+define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 3
+ %vecext7 = extractelement <4 x float> %B, i32 2
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: not_a_hsub_2
+; CHECK-NOT: hsubps
+; CHECK: ret
+
+
+define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %B, i32 0
+ %vecext1 = extractelement <2 x double> %B, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 1
+ %vecext2 = extractelement <2 x double> %A, i32 1
+ %vecext3 = extractelement <2 x double> %A, i32 0
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 0
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: not_a_hsub_3
+; CHECK-NOT: hsubpd
+; CHECK: ret
+
+
+; Test AVX horizontal add/sub of packed single/double precision
+; floating point values from 256-bit vectors.
+
+define <8 x float> @avx_vhadd_ps(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <8 x float> %b, i32 0
+ %vecext7 = extractelement <8 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x float> %vecinit5, float %add8, i32 2
+ %vecext10 = extractelement <8 x float> %b, i32 2
+ %vecext11 = extractelement <8 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x float> %vecinit9, float %add12, i32 3
+ %vecext14 = extractelement <8 x float> %a, i32 4
+ %vecext15 = extractelement <8 x float> %a, i32 5
+ %add16 = fadd float %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x float> %vecinit13, float %add16, i32 4
+ %vecext18 = extractelement <8 x float> %a, i32 6
+ %vecext19 = extractelement <8 x float> %a, i32 7
+ %add20 = fadd float %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x float> %vecinit17, float %add20, i32 5
+ %vecext22 = extractelement <8 x float> %b, i32 4
+ %vecext23 = extractelement <8 x float> %b, i32 5
+ %add24 = fadd float %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x float> %vecinit21, float %add24, i32 6
+ %vecext26 = extractelement <8 x float> %b, i32 6
+ %vecext27 = extractelement <8 x float> %b, i32 7
+ %add28 = fadd float %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x float> %vecinit25, float %add28, i32 7
+ ret <8 x float> %vecinit29
+}
+; CHECK-LABEL: avx_vhadd_ps
+; SSE3: haddps
+; SSE3-NEXT: haddps
+; SSSE3: haddps
+; SSSE3-NEXT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK: ret
+
+
+define <8 x float> @avx_vhsub_ps(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %sub, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %sub4, i32 1
+ %vecext6 = extractelement <8 x float> %b, i32 0
+ %vecext7 = extractelement <8 x float> %b, i32 1
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x float> %vecinit5, float %sub8, i32 2
+ %vecext10 = extractelement <8 x float> %b, i32 2
+ %vecext11 = extractelement <8 x float> %b, i32 3
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x float> %vecinit9, float %sub12, i32 3
+ %vecext14 = extractelement <8 x float> %a, i32 4
+ %vecext15 = extractelement <8 x float> %a, i32 5
+ %sub16 = fsub float %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x float> %vecinit13, float %sub16, i32 4
+ %vecext18 = extractelement <8 x float> %a, i32 6
+ %vecext19 = extractelement <8 x float> %a, i32 7
+ %sub20 = fsub float %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x float> %vecinit17, float %sub20, i32 5
+ %vecext22 = extractelement <8 x float> %b, i32 4
+ %vecext23 = extractelement <8 x float> %b, i32 5
+ %sub24 = fsub float %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x float> %vecinit21, float %sub24, i32 6
+ %vecext26 = extractelement <8 x float> %b, i32 6
+ %vecext27 = extractelement <8 x float> %b, i32 7
+ %sub28 = fsub float %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x float> %vecinit25, float %sub28, i32 7
+ ret <8 x float> %vecinit29
+}
+; CHECK-LABEL: avx_vhsub_ps
+; SSE3: hsubps
+; SSE3-NEXT: hsubps
+; SSSE3: hsubps
+; SSSE3-NEXT: hsubps
+; AVX: vhsubps
+; AVX2: vhsubps
+; CHECK: ret
+
+
+define <4 x double> @avx_hadd_pd(<4 x double> %a, <4 x double> %b) {
+ %vecext = extractelement <4 x double> %a, i32 0
+ %vecext1 = extractelement <4 x double> %a, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <4 x double> %b, i32 0
+ %vecext3 = extractelement <4 x double> %b, i32 1
+ %add4 = fadd double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %add4, i32 1
+ %vecext6 = extractelement <4 x double> %a, i32 2
+ %vecext7 = extractelement <4 x double> %a, i32 3
+ %add8 = fadd double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %add8, i32 2
+ %vecext10 = extractelement <4 x double> %b, i32 2
+ %vecext11 = extractelement <4 x double> %b, i32 3
+ %add12 = fadd double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_hadd_pd
+; SSE3: haddpd
+; SSE3-NEXT: haddpd
+; SSSE3: haddpd
+; SSSE3-NEXT: haddpd
+; AVX: vhaddpd
+; AVX2: vhaddpd
+; CHECK: ret
+
+
+define <4 x double> @avx_hsub_pd(<4 x double> %a, <4 x double> %b) {
+ %vecext = extractelement <4 x double> %a, i32 0
+ %vecext1 = extractelement <4 x double> %a, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <4 x double> %b, i32 0
+ %vecext3 = extractelement <4 x double> %b, i32 1
+ %sub4 = fsub double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %sub4, i32 1
+ %vecext6 = extractelement <4 x double> %a, i32 2
+ %vecext7 = extractelement <4 x double> %a, i32 3
+ %sub8 = fsub double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %sub8, i32 2
+ %vecext10 = extractelement <4 x double> %b, i32 2
+ %vecext11 = extractelement <4 x double> %b, i32 3
+ %sub12 = fsub double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_hsub_pd
+; SSE3: hsubpd
+; SSE3-NEXT: hsubpd
+; SSSE3: hsubpd
+; SSSE3-NEXT: hsubpd
+; AVX: vhsubpd
+; AVX2: vhsubpd
+; CHECK: ret
+
+
+; Test AVX2 horizontal add of packed integer values from 256-bit vectors.
+
+define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <8 x i32> %b, i32 0
+ %vecext7 = extractelement <8 x i32> %b, i32 1
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <8 x i32> %b, i32 2
+ %vecext11 = extractelement <8 x i32> %b, i32 3
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x i32> %vecinit9, i32 %add12, i32 3
+ %vecext14 = extractelement <8 x i32> %a, i32 4
+ %vecext15 = extractelement <8 x i32> %a, i32 5
+ %add16 = add i32 %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x i32> %vecinit13, i32 %add16, i32 4
+ %vecext18 = extractelement <8 x i32> %a, i32 6
+ %vecext19 = extractelement <8 x i32> %a, i32 7
+ %add20 = add i32 %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x i32> %vecinit17, i32 %add20, i32 5
+ %vecext22 = extractelement <8 x i32> %b, i32 4
+ %vecext23 = extractelement <8 x i32> %b, i32 5
+ %add24 = add i32 %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x i32> %vecinit21, i32 %add24, i32 6
+ %vecext26 = extractelement <8 x i32> %b, i32 6
+ %vecext27 = extractelement <8 x i32> %b, i32 7
+ %add28 = add i32 %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
+ ret <8 x i32> %vecinit29
+}
+; CHECK-LABEL: avx2_hadd_d
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; SSSE3-NEXT: phaddd
+; AVX: vphaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; AVX2-NOT: vphaddd
+; CHECK: ret
+
+
+define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
+ %vecext = extractelement <16 x i16> %a, i32 0
+ %vecext1 = extractelement <16 x i16> %a, i32 1
+ %add = add i16 %vecext, %vecext1
+ %vecinit = insertelement <16 x i16> undef, i16 %add, i32 0
+ %vecext4 = extractelement <16 x i16> %a, i32 2
+ %vecext6 = extractelement <16 x i16> %a, i32 3
+ %add8 = add i16 %vecext4, %vecext6
+ %vecinit10 = insertelement <16 x i16> %vecinit, i16 %add8, i32 1
+ %vecext11 = extractelement <16 x i16> %a, i32 4
+ %vecext13 = extractelement <16 x i16> %a, i32 5
+ %add15 = add i16 %vecext11, %vecext13
+ %vecinit17 = insertelement <16 x i16> %vecinit10, i16 %add15, i32 2
+ %vecext18 = extractelement <16 x i16> %a, i32 6
+ %vecext20 = extractelement <16 x i16> %a, i32 7
+ %add22 = add i16 %vecext18, %vecext20
+ %vecinit24 = insertelement <16 x i16> %vecinit17, i16 %add22, i32 3
+ %vecext25 = extractelement <16 x i16> %a, i32 8
+ %vecext27 = extractelement <16 x i16> %a, i32 9
+ %add29 = add i16 %vecext25, %vecext27
+ %vecinit31 = insertelement <16 x i16> %vecinit24, i16 %add29, i32 8
+ %vecext32 = extractelement <16 x i16> %a, i32 10
+ %vecext34 = extractelement <16 x i16> %a, i32 11
+ %add36 = add i16 %vecext32, %vecext34
+ %vecinit38 = insertelement <16 x i16> %vecinit31, i16 %add36, i32 9
+ %vecext39 = extractelement <16 x i16> %a, i32 12
+ %vecext41 = extractelement <16 x i16> %a, i32 13
+ %add43 = add i16 %vecext39, %vecext41
+ %vecinit45 = insertelement <16 x i16> %vecinit38, i16 %add43, i32 10
+ %vecext46 = extractelement <16 x i16> %a, i32 14
+ %vecext48 = extractelement <16 x i16> %a, i32 15
+ %add50 = add i16 %vecext46, %vecext48
+ %vecinit52 = insertelement <16 x i16> %vecinit45, i16 %add50, i32 11
+ %vecext53 = extractelement <16 x i16> %b, i32 0
+ %vecext55 = extractelement <16 x i16> %b, i32 1
+ %add57 = add i16 %vecext53, %vecext55
+ %vecinit59 = insertelement <16 x i16> %vecinit52, i16 %add57, i32 4
+ %vecext60 = extractelement <16 x i16> %b, i32 2
+ %vecext62 = extractelement <16 x i16> %b, i32 3
+ %add64 = add i16 %vecext60, %vecext62
+ %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 5
+ %vecext67 = extractelement <16 x i16> %b, i32 4
+ %vecext69 = extractelement <16 x i16> %b, i32 5
+ %add71 = add i16 %vecext67, %vecext69
+ %vecinit73 = insertelement <16 x i16> %vecinit66, i16 %add71, i32 6
+ %vecext74 = extractelement <16 x i16> %b, i32 6
+ %vecext76 = extractelement <16 x i16> %b, i32 7
+ %add78 = add i16 %vecext74, %vecext76
+ %vecinit80 = insertelement <16 x i16> %vecinit73, i16 %add78, i32 7
+ %vecext81 = extractelement <16 x i16> %b, i32 8
+ %vecext83 = extractelement <16 x i16> %b, i32 9
+ %add85 = add i16 %vecext81, %vecext83
+ %vecinit87 = insertelement <16 x i16> %vecinit80, i16 %add85, i32 12
+ %vecext88 = extractelement <16 x i16> %b, i32 10
+ %vecext90 = extractelement <16 x i16> %b, i32 11
+ %add92 = add i16 %vecext88, %vecext90
+ %vecinit94 = insertelement <16 x i16> %vecinit87, i16 %add92, i32 13
+ %vecext95 = extractelement <16 x i16> %b, i32 12
+ %vecext97 = extractelement <16 x i16> %b, i32 13
+ %add99 = add i16 %vecext95, %vecext97
+ %vecinit101 = insertelement <16 x i16> %vecinit94, i16 %add99, i32 14
+ %vecext102 = extractelement <16 x i16> %b, i32 14
+ %vecext104 = extractelement <16 x i16> %b, i32 15
+ %add106 = add i16 %vecext102, %vecext104
+ %vecinit108 = insertelement <16 x i16> %vecinit101, i16 %add106, i32 15
+ ret <16 x i16> %vecinit108
+}
+; CHECK-LABEL: avx2_hadd_w
+; SSE3-NOT: phaddw
+; SSSE3: phaddw
+; SSSE3-NEXT: phaddw
+; AVX: vphaddw
+; AVX: vphaddw
+; AVX2: vphaddw
+; AVX2-NOT: vphaddw
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/haddsub-undef.ll b/test/CodeGen/X86/haddsub-undef.ll
new file mode 100644
index 000000000000..954a9d994e61
--- /dev/null
+++ b/test/CodeGen/X86/haddsub-undef.ll
@@ -0,0 +1,325 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -mattr=+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
+
+define <4 x float> @test1_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext10 = extractelement <4 x float> %b, i32 2
+ %vecext11 = extractelement <4 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit5, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: test1_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test2_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext6 = extractelement <4 x float> %b, i32 0
+ %vecext7 = extractelement <4 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit, float %add8, i32 2
+ %vecext10 = extractelement <4 x float> %b, i32 2
+ %vecext11 = extractelement <4 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: test2_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test3_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <4 x float> %b, i32 0
+ %vecext7 = extractelement <4 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 2
+ ret <4 x float> %vecinit9
+}
+; CHECK-LABEL: test3_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ ret <4 x float> %vecinit
+}
+; CHECK-LABEL: test4_undef
+; CHECK-NOT: haddps
+; CHECK: ret
+
+
+define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
+ %vecext = extractelement <2 x double> %a, i32 0
+ %vecext1 = extractelement <2 x double> %a, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ ret <2 x double> %vecinit
+}
+; CHECK-LABEL: test5_undef
+; CHECK-NOT: haddpd
+; CHECK: ret
+
+
+define <4 x float> @test6_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test6_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test7_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %b, i32 0
+ %vecext1 = extractelement <4 x float> %b, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 2
+ %vecext2 = extractelement <4 x float> %b, i32 2
+ %vecext3 = extractelement <4 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 3
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test7_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 2
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test8_undef
+; CHECK-NOT: haddps
+; CHECK: ret
+
+
+define <4 x float> @test9_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %b, i32 2
+ %vecext3 = extractelement <4 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 3
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test9_undef
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+define <8 x float> @test10_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %b, i32 2
+ %vecext3 = extractelement <8 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 3
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test10_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %b, i32 4
+ %vecext3 = extractelement <8 x float> %b, i32 5
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 6
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test11_undef
+; SSE-NOT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK: ret
+
+define <8 x float> @test12_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 1
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test12_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x float> @test13_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add1 = fadd float %vecext, %vecext1
+ %vecinit1 = insertelement <8 x float> undef, float %add1, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add2 = fadd float %vecext2, %vecext3
+ %vecinit2 = insertelement <8 x float> %vecinit1, float %add2, i32 1
+ %vecext4 = extractelement <8 x float> %a, i32 4
+ %vecext5 = extractelement <8 x float> %a, i32 5
+ %add3 = fadd float %vecext4, %vecext5
+ %vecinit3 = insertelement <8 x float> %vecinit2, float %add3, i32 2
+ %vecext6 = extractelement <8 x float> %a, i32 6
+ %vecext7 = extractelement <8 x float> %a, i32 7
+ %add4 = fadd float %vecext6, %vecext7
+ %vecinit4 = insertelement <8 x float> %vecinit3, float %add4, i32 3
+ ret <8 x float> %vecinit4
+}
+; CHECK-LABEL: test13_undef
+; SSE: haddps
+; SSE-NOT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %b, i32 2
+ %vecext3 = extractelement <8 x i32> %b, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 3
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test14_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: phaddd
+; CHECK: ret
+
+; On AVX2, the following sequence can be folded into a single horizontal add.
+; If the Subtarget doesn't support AVX2, then we avoid emitting two packed
+; integer horizontal adds instead of two scalar adds followed by vector inserts.
+define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %b, i32 4
+ %vecext3 = extractelement <8 x i32> %b, i32 5
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 6
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test15_undef
+; SSE-NOT: phaddd
+; AVX-NOT: vphaddd
+; AVX2: vphaddd
+; CHECK: ret
+
+define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test16_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add1 = add i32 %vecext, %vecext1
+ %vecinit1 = insertelement <8 x i32> undef, i32 %add1, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add2 = add i32 %vecext2, %vecext3
+ %vecinit2 = insertelement <8 x i32> %vecinit1, i32 %add2, i32 1
+ %vecext4 = extractelement <8 x i32> %a, i32 4
+ %vecext5 = extractelement <8 x i32> %a, i32 5
+ %add3 = add i32 %vecext4, %vecext5
+ %vecinit3 = insertelement <8 x i32> %vecinit2, i32 %add3, i32 2
+ %vecext6 = extractelement <8 x i32> %a, i32 6
+ %vecext7 = extractelement <8 x i32> %a, i32 7
+ %add4 = add i32 %vecext6, %vecext7
+ %vecinit4 = insertelement <8 x i32> %vecinit3, i32 %add4, i32 3
+ ret <8 x i32> %vecinit4
+}
+; CHECK-LABEL: test17_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: haddps
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/half.ll b/test/CodeGen/X86/half.ll
new file mode 100644
index 000000000000..1dcf93939b8b
--- /dev/null
+++ b/test/CodeGen/X86/half.ll
@@ -0,0 +1,69 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=-f16c | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LIBCALL
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+f16c | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-F16C
+
+define void @test_load_store(half* %in, half* %out) {
+; CHECK-LABEL: test_load_store:
+; CHECK: movw (%rdi), [[TMP:%[a-z0-9]+]]
+; CHECK: movw [[TMP]], (%rsi)
+ %val = load half* %in
+ store half %val, half* %out
+ ret void
+}
+
+define i16 @test_bitcast_from_half(half* %addr) {
+; CHECK-LABEL: test_bitcast_from_half:
+; CHECK: movzwl (%rdi), %eax
+ %val = load half* %addr
+ %val_int = bitcast half %val to i16
+ ret i16 %val_int
+}
+
+define void @test_bitcast_to_half(half* %addr, i16 %in) {
+; CHECK-LABEL: test_bitcast_to_half:
+; CHECK: movw %si, (%rdi)
+ %val_fp = bitcast i16 %in to half
+ store half %val_fp, half* %addr
+ ret void
+}
+
+define float @test_extend32(half* %addr) {
+; CHECK-LABEL: test_extend32:
+
+; CHECK-LIBCALL: jmp __gnu_h2f_ieee
+; CHECK-FP16: vcvtph2ps
+ %val16 = load half* %addr
+ %val32 = fpext half %val16 to float
+ ret float %val32
+}
+
+define double @test_extend64(half* %addr) {
+; CHECK-LABEL: test_extend64:
+
+; CHECK-LIBCALL: callq __gnu_h2f_ieee
+; CHECK-LIBCALL: cvtss2sd
+; CHECK-FP16: vcvtph2ps
+; CHECK-FP16: vcvtss2sd
+ %val16 = load half* %addr
+ %val32 = fpext half %val16 to double
+ ret double %val32
+}
+
+define void @test_trunc32(float %in, half* %addr) {
+; CHECK-LABEL: test_trunc32:
+
+; CHECK-LIBCALL: callq __gnu_f2h_ieee
+; CHECK-FP16: vcvtps2ph
+ %val16 = fptrunc float %in to half
+ store half %val16, half* %addr
+ ret void
+}
+
+define void @test_trunc64(double %in, half* %addr) {
+; CHECK-LABEL: test_trunc64:
+
+; CHECK-LIBCALL: callq __truncdfhf2
+; CHECK-FP16: callq __truncdfhf2
+ %val16 = fptrunc double %in to half
+ store half %val16, half* %addr
+ ret void
+}
diff --git a/test/CodeGen/X86/hidden-vis-pic.ll b/test/CodeGen/X86/hidden-vis-pic.ll
index 67be3d0ffca0..1caab7a6a00e 100644
--- a/test/CodeGen/X86/hidden-vis-pic.ll
+++ b/test/CodeGen/X86/hidden-vis-pic.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-cfi -mtriple=i386-apple-darwin9 -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin9 -relocation-model=pic -disable-fp-elim | FileCheck %s
@@ -48,8 +48,3 @@ return: ; preds = %entry
%retval1 = load i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
-
-; CHECK: .private_extern _func.eh
-; CHECK: .private_extern _main.eh
-
-
diff --git a/test/CodeGen/X86/i64-mem-copy.ll b/test/CodeGen/X86/i64-mem-copy.ll
index dce12ae12485..bf778968c89a 100644
--- a/test/CodeGen/X86/i64-mem-copy.ll
+++ b/test/CodeGen/X86/i64-mem-copy.ll
@@ -3,7 +3,7 @@
; X64: movq ({{%rsi|%rdx}}), %r
; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s -check-prefix=X32
-; X32: movsd (%eax), %xmm
+; X32: movsd ({{%ecx|%eax}}), %xmm
; Uses movsd to load / store i64 values if sse2 is available.
diff --git a/test/CodeGen/X86/i8-umulo.ll b/test/CodeGen/X86/i8-umulo.ll
new file mode 100644
index 000000000000..ba846f3e9be3
--- /dev/null
+++ b/test/CodeGen/X86/i8-umulo.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mcpu=generic -march=x86 < %s | FileCheck %s
+; PR19858
+
+declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b)
+define i8 @testumulo(i32 %argc) {
+; CHECK: imulw
+; CHECK: testb %{{.+}}, %{{.+}}
+; CHECK: je [[NOOVERFLOWLABEL:.+]]
+; CHECK: {{.*}}[[NOOVERFLOWLABEL]]:
+; CHECK-NEXT: movb
+; CHECK-NEXT: retl
+top:
+ %RHS = trunc i32 %argc to i8
+ %umul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 25, i8 %RHS)
+ %ex = extractvalue { i8, i1 } %umul, 1
+ br i1 %ex, label %overflow, label %nooverlow
+
+overflow:
+ ret i8 %RHS
+
+nooverlow:
+ %umul.value = extractvalue { i8, i1 } %umul, 0
+ ret i8 %umul.value
+}
diff --git a/test/CodeGen/X86/inalloca-ctor.ll b/test/CodeGen/X86/inalloca-ctor.ll
new file mode 100644
index 000000000000..7cfa92913578
--- /dev/null
+++ b/test/CodeGen/X86/inalloca-ctor.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -mtriple=i686-pc-win32 | FileCheck %s
+
+%Foo = type { i32, i32 }
+
+%frame = type { %Foo, i32, %Foo }
+
+declare void @f(%frame* inalloca %a)
+
+declare void @Foo_ctor(%Foo* %this)
+
+define void @g() {
+entry:
+ %args = alloca inalloca %frame
+ %c = getelementptr %frame* %args, i32 0, i32 2
+; CHECK: movl $20, %eax
+; CHECK: calll __chkstk
+; CHECK: movl %esp,
+ call void @Foo_ctor(%Foo* %c)
+; CHECK: leal 12(%{{.*}}),
+; CHECK: subl $4, %esp
+; CHECK: calll _Foo_ctor
+; CHECK: addl $4, %esp
+ %b = getelementptr %frame* %args, i32 0, i32 1
+ store i32 42, i32* %b
+; CHECK: movl $42,
+ %a = getelementptr %frame* %args, i32 0, i32 0
+ call void @Foo_ctor(%Foo* %a)
+; CHECK: subl $4, %esp
+; CHECK: calll _Foo_ctor
+; CHECK: addl $4, %esp
+ call void @f(%frame* inalloca %args)
+; CHECK: calll _f
+ ret void
+}
diff --git a/test/CodeGen/X86/inalloca-invoke.ll b/test/CodeGen/X86/inalloca-invoke.ll
new file mode 100644
index 000000000000..6cff9ac0640c
--- /dev/null
+++ b/test/CodeGen/X86/inalloca-invoke.ll
@@ -0,0 +1,54 @@
+; RUN: llc < %s -mtriple=i686-pc-win32 | FileCheck %s
+
+%Iter = type { i32, i32, i32 }
+
+%frame.reverse = type { %Iter, %Iter }
+
+declare void @llvm.stackrestore(i8*)
+declare i8* @llvm.stacksave()
+declare void @begin(%Iter* sret)
+declare void @plus(%Iter* sret, %Iter*, i32)
+declare void @reverse(%frame.reverse* inalloca align 4)
+
+define i32 @main() {
+ %temp.lvalue = alloca %Iter
+ br label %blah
+
+blah:
+ %inalloca.save = call i8* @llvm.stacksave()
+ %rev_args = alloca inalloca %frame.reverse, align 4
+ %beg = getelementptr %frame.reverse* %rev_args, i32 0, i32 0
+ %end = getelementptr %frame.reverse* %rev_args, i32 0, i32 1
+
+; CHECK: calll __chkstk
+; CHECK: movl %[[beg:[^,]*]], %esp
+; CHECK: leal 12(%[[beg]]), %[[end:[^ ]*]]
+
+ call void @begin(%Iter* sret %temp.lvalue)
+; CHECK: calll _begin
+
+ invoke void @plus(%Iter* sret %end, %Iter* %temp.lvalue, i32 4)
+ to label %invoke.cont unwind label %lpad
+
+; Uses end as sret param.
+; CHECK: movl %[[end]], (%esp)
+; CHECK: calll _plus
+
+invoke.cont:
+ call void @begin(%Iter* sret %beg)
+
+; CHECK: movl %[[beg]],
+; CHECK: calll _begin
+
+ invoke void @reverse(%frame.reverse* inalloca align 4 %rev_args)
+ to label %invoke.cont5 unwind label %lpad
+
+invoke.cont5: ; preds = %invoke.cont
+ call void @llvm.stackrestore(i8* %inalloca.save)
+ ret i32 0
+
+lpad: ; preds = %invoke.cont, %entry
+ %lp = landingpad { i8*, i32 } personality i8* null
+ cleanup
+ unreachable
+}
diff --git a/test/CodeGen/X86/inalloca-stdcall.ll b/test/CodeGen/X86/inalloca-stdcall.ll
new file mode 100644
index 000000000000..54f97d99a9c7
--- /dev/null
+++ b/test/CodeGen/X86/inalloca-stdcall.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=i686-pc-win32 | FileCheck %s
+
+%Foo = type { i32, i32 }
+
+declare x86_stdcallcc void @f(%Foo* inalloca %a)
+declare x86_stdcallcc void @i(i32 %a)
+
+define void @g() {
+ %b = alloca inalloca %Foo
+; CHECK: movl $8, %eax
+; CHECK: calll __chkstk
+; CHECK: movl %[[REG:[^,]*]], %esp
+ %f1 = getelementptr %Foo* %b, i32 0, i32 0
+ %f2 = getelementptr %Foo* %b, i32 0, i32 1
+ store i32 13, i32* %f1
+ store i32 42, i32* %f2
+; CHECK: movl $13, (%[[REG]])
+; CHECK: movl $42, 4(%[[REG]])
+ call x86_stdcallcc void @f(%Foo* inalloca %b)
+; CHECK: calll _f@8
+; CHECK-NOT: %esp
+; CHECK: subl $4, %esp
+; CHECK: calll _i@4
+ call x86_stdcallcc void @i(i32 0)
+ ret void
+}
diff --git a/test/CodeGen/X86/inalloca.ll b/test/CodeGen/X86/inalloca.ll
new file mode 100644
index 000000000000..12643f9d0d50
--- /dev/null
+++ b/test/CodeGen/X86/inalloca.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s -mtriple=i686-pc-win32 | FileCheck %s
+
+%Foo = type { i32, i32 }
+
+declare void @f(%Foo* inalloca %b)
+
+define void @a() {
+; CHECK-LABEL: _a:
+entry:
+ %b = alloca inalloca %Foo
+; CHECK: movl $8, %eax
+; CHECK: calll __chkstk
+; CHECK: movl %[[REG:[^,]*]], %esp
+ %f1 = getelementptr %Foo* %b, i32 0, i32 0
+ %f2 = getelementptr %Foo* %b, i32 0, i32 1
+ store i32 13, i32* %f1
+ store i32 42, i32* %f2
+; CHECK: movl $13, (%[[REG]])
+; CHECK: movl $42, 4(%[[REG]])
+ call void @f(%Foo* inalloca %b)
+; CHECK: calll _f
+ ret void
+}
+
+declare void @inreg_with_inalloca(i32 inreg %a, %Foo* inalloca %b)
+
+define void @b() {
+; CHECK-LABEL: _b:
+entry:
+ %b = alloca inalloca %Foo
+; CHECK: movl $8, %eax
+; CHECK: calll __chkstk
+; CHECK: movl %[[REG:[^,]*]], %esp
+ %f1 = getelementptr %Foo* %b, i32 0, i32 0
+ %f2 = getelementptr %Foo* %b, i32 0, i32 1
+ store i32 13, i32* %f1
+ store i32 42, i32* %f2
+; CHECK: movl $13, (%[[REG]])
+; CHECK: movl $42, 4(%[[REG]])
+ call void @inreg_with_inalloca(i32 inreg 1, %Foo* inalloca %b)
+; CHECK: movl $1, %eax
+; CHECK: calll _inreg_with_inalloca
+ ret void
+}
+
+declare x86_thiscallcc void @thiscall_with_inalloca(i8* %a, %Foo* inalloca %b)
+
+define void @c() {
+; CHECK-LABEL: _c:
+entry:
+ %b = alloca inalloca %Foo
+; CHECK: movl $8, %eax
+; CHECK: calll __chkstk
+; CHECK: movl %[[REG:[^,]*]], %esp
+ %f1 = getelementptr %Foo* %b, i32 0, i32 0
+ %f2 = getelementptr %Foo* %b, i32 0, i32 1
+ store i32 13, i32* %f1
+ store i32 42, i32* %f2
+; CHECK-DAG: movl $13, (%[[REG]])
+; CHECK-DAG: movl $42, 4(%[[REG]])
+ call x86_thiscallcc void @thiscall_with_inalloca(i8* null, %Foo* inalloca %b)
+; CHECK-DAG: xorl %ecx, %ecx
+; CHECK: calll _thiscall_with_inalloca
+ ret void
+}
diff --git a/test/CodeGen/X86/indirect-hidden.ll b/test/CodeGen/X86/indirect-hidden.ll
new file mode 100644
index 000000000000..309375d93024
--- /dev/null
+++ b/test/CodeGen/X86/indirect-hidden.ll
@@ -0,0 +1,43 @@
+; RUN: llc -mtriple=i686-apple-macosx -o - %s | FileCheck %s
+
+; x86 doesn't normally use indirect symbols, particularly hidden ones, but it
+; can be tricked into it for exception-handling typeids.
+
+@hidden_typeid = external hidden constant i8*
+@normal_typeid = external constant i8*
+
+declare void @throws()
+
+define void @get_indirect_hidden() {
+ invoke void @throws() to label %end unwind label %lpad
+lpad:
+ %tmp = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* bitcast (i8** @hidden_typeid to i8*)
+ br label %end
+
+end:
+ ret void
+}
+
+define void @get_indirect() {
+ invoke void @throws() to label %end unwind label %lpad
+lpad:
+ %tmp = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* bitcast (i8** @normal_typeid to i8*)
+ br label %end
+
+end:
+ ret void
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+; CHECK: .section __IMPORT,__pointers,non_lazy_symbol_pointers
+
+; CHECK-NOT: __DATA,__data
+; CHECK: .indirect_symbol _normal_typeid
+; CHECK-NEXT: .long 0
+
+; CHECK-NOT: __DATA,__data
+; CHECK: .indirect_symbol _hidden_typeid
+; CHECK-NEXT: .long 0
diff --git a/test/CodeGen/X86/inline-asm-flag-clobber.ll b/test/CodeGen/X86/inline-asm-flag-clobber.ll
index 45f4d2f38a46..bb7c33e422ed 100644
--- a/test/CodeGen/X86/inline-asm-flag-clobber.ll
+++ b/test/CodeGen/X86/inline-asm-flag-clobber.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
+; RUN: llc -march=x86-64 -no-integrated-as < %s | FileCheck %s
; PR3701
define i64 @t(i64* %arg) nounwind {
diff --git a/test/CodeGen/X86/inline-asm-fpstack.ll b/test/CodeGen/X86/inline-asm-fpstack.ll
index e83c065632dc..91c477baaa51 100644
--- a/test/CodeGen/X86/inline-asm-fpstack.ll
+++ b/test/CodeGen/X86/inline-asm-fpstack.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=generic -mtriple=i386-apple-darwin | FileCheck %s
+; RUN: llc < %s -mcpu=generic -mtriple=i386-apple-darwin -no-integrated-as | FileCheck %s
; There should be no stack manipulations between the inline asm and ret.
; CHECK: test1
diff --git a/test/CodeGen/X86/inline-asm-h.ll b/test/CodeGen/X86/inline-asm-h.ll
index 53cf419bd11a..8c3e45aba903 100644
--- a/test/CodeGen/X86/inline-asm-h.ll
+++ b/test/CodeGen/X86/inline-asm-h.ll
@@ -9,4 +9,4 @@ entry:
}
; CHECK: zed
-; CHECK: movq %mm2,foobar+8(%rip)
+; CHECK: movq %mm2, foobar+8(%rip)
diff --git a/test/CodeGen/X86/inline-asm-modifier-n.ll b/test/CodeGen/X86/inline-asm-modifier-n.ll
index b069c4631899..072c7c419536 100644
--- a/test/CodeGen/X86/inline-asm-modifier-n.ll
+++ b/test/CodeGen/X86/inline-asm-modifier-n.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | grep " 37"
+; RUN: llc < %s -march=x86 -no-integrated-as | grep " 37"
; rdar://7008959
define void @bork() nounwind {
diff --git a/test/CodeGen/X86/inline-asm-modifier-q.ll b/test/CodeGen/X86/inline-asm-modifier-q.ll
index d20f06d29054..8063d48a2ca6 100644
--- a/test/CodeGen/X86/inline-asm-modifier-q.ll
+++ b/test/CodeGen/X86/inline-asm-modifier-q.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -march=x86 -no-integrated-as | FileCheck %s
; If the target does not have 64-bit integer registers, emit 32-bit register
; names.
diff --git a/test/CodeGen/X86/inline-asm-mrv.ll b/test/CodeGen/X86/inline-asm-mrv.ll
index 733205d6a915..a96e7b818072 100644
--- a/test/CodeGen/X86/inline-asm-mrv.ll
+++ b/test/CodeGen/X86/inline-asm-mrv.ll
@@ -1,8 +1,8 @@
; PR2094
-; RUN: llc < %s -march=x86-64 | grep movslq
-; RUN: llc < %s -march=x86-64 | grep addps
-; RUN: llc < %s -march=x86-64 | grep paddd
-; RUN: llc < %s -march=x86-64 | not grep movq
+; RUN: llc < %s -march=x86-64 -no-integrated-as | grep movslq
+; RUN: llc < %s -march=x86-64 -no-integrated-as | grep addps
+; RUN: llc < %s -march=x86-64 -no-integrated-as | grep paddd
+; RUN: llc < %s -march=x86-64 -no-integrated-as | not grep movq
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin8"
diff --git a/test/CodeGen/X86/inline-asm-q-regs.ll b/test/CodeGen/X86/inline-asm-q-regs.ll
index fca68baac6ef..53a56aee2cb3 100644
--- a/test/CodeGen/X86/inline-asm-q-regs.ll
+++ b/test/CodeGen/X86/inline-asm-q-regs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 -mattr=+avx
+; RUN: llc < %s -march=x86-64 -mattr=+avx -no-integrated-as
; rdar://7066579
%0 = type { i64, i64, i64, i64, i64 } ; type %0
diff --git a/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll b/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll
new file mode 100644
index 000000000000..b55571bcba09
--- /dev/null
+++ b/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -force-align-stack -mtriple i386-apple-darwin -mcpu=i486 | FileCheck %s
+
+%struct.foo = type { [88 x i8] }
+
+declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind
+
+; PR19012
+; Don't clobber %esi if we have inline asm that clobbers %esp.
+define void @test1(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind {
+ call void @bar(i8* %z, %struct.foo* align 4 byval %x)
+ call void asm sideeffect inteldialect "xor esp, esp", "=*m,~{flags},~{esp},~{esp},~{dirflag},~{fpsr},~{flags}"(i8* %z)
+ ret void
+
+; CHECK-LABEL: test1:
+; CHECK: movl %esp, %esi
+; CHECK-NOT: rep;movsl
+}
diff --git a/test/CodeGen/X86/inline-asm-stack-realign.ll b/test/CodeGen/X86/inline-asm-stack-realign.ll
new file mode 100644
index 000000000000..f2ac0f451bb0
--- /dev/null
+++ b/test/CodeGen/X86/inline-asm-stack-realign.ll
@@ -0,0 +1,16 @@
+; RUN: not llc -mtriple=i686-pc-win32 < %s 2>&1 | FileCheck %s
+
+; FIXME: This is miscompiled due to our unconditional use of ESI as the base
+; pointer.
+; XFAIL: *
+
+; CHECK: Stack realignment in presence of dynamic stack adjustments is not supported with inline assembly
+
+define i32 @foo() {
+entry:
+ %r = alloca i32, align 16
+ store i32 -1, i32* %r, align 16
+ call void asm sideeffect inteldialect "push esi\0A\09xor esi, esi\0A\09mov dword ptr $0, esi\0A\09pop esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r)
+ %0 = load i32* %r, align 16
+ ret i32 %0
+}
diff --git a/test/CodeGen/X86/inline-asm-stack-realign2.ll b/test/CodeGen/X86/inline-asm-stack-realign2.ll
new file mode 100644
index 000000000000..0e4e7e1a6776
--- /dev/null
+++ b/test/CodeGen/X86/inline-asm-stack-realign2.ll
@@ -0,0 +1,16 @@
+; RUN: not llc -mtriple=i686-pc-win32 < %s 2>&1 | FileCheck %s
+
+; FIXME: This is miscompiled due to our unconditional use of ESI as the base
+; pointer.
+; XFAIL: *
+
+; CHECK: Stack realignment in presence of dynamic stack adjustments is not supported with inline assembly
+
+define i32 @foo() {
+entry:
+ %r = alloca i32, align 16
+ store i32 -1, i32* %r, align 16
+ call void asm sideeffect "push %esi\0A\09xor %esi, %esi\0A\09mov %esi, $0\0A\09pop %esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r)
+ %0 = load i32* %r, align 16
+ ret i32 %0
+}
diff --git a/test/CodeGen/X86/inline-asm-stack-realign3.ll b/test/CodeGen/X86/inline-asm-stack-realign3.ll
new file mode 100644
index 000000000000..3baaaaa7d93d
--- /dev/null
+++ b/test/CodeGen/X86/inline-asm-stack-realign3.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=x86 -no-integrated-as < %s | FileCheck %s
+
+declare void @bar(i32* %junk)
+
+define i32 @foo(i1 %cond) {
+entry:
+ %r = alloca i32, align 128
+ store i32 -1, i32* %r, align 128
+ br i1 %cond, label %doit, label %skip
+
+doit:
+ call void asm sideeffect "xor %ecx, %ecx\0A\09mov %ecx, $0", "=*m,~{ecx},~{flags}"(i32* %r)
+ %junk = alloca i32
+ call void @bar(i32* %junk)
+ br label %skip
+
+skip:
+ %0 = load i32* %r, align 128
+ ret i32 %0
+}
+
+; CHECK-LABEL: foo:
+; CHECK: pushl %ebp
+; CHECK: andl $-128, %esp
+; CHECK: xor %ecx, %ecx
+; CHECK-NEXT: mov %ecx, (%esi)
+; CHECK: movl (%esi), %eax
+; CHECK: popl %ebp
+; CHECK: ret
diff --git a/test/CodeGen/X86/inline-asm-tied.ll b/test/CodeGen/X86/inline-asm-tied.ll
index 597236e36281..fb5896b0ad6d 100644
--- a/test/CodeGen/X86/inline-asm-tied.ll
+++ b/test/CodeGen/X86/inline-asm-tied.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 -O0 -optimize-regalloc -regalloc=basic | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin9 -O0 -optimize-regalloc -regalloc=basic -no-integrated-as | FileCheck %s
; rdar://6992609
; CHECK: movl [[EDX:%e..]], 4(%esp)
diff --git a/test/CodeGen/X86/inline-asm-x-scalar.ll b/test/CodeGen/X86/inline-asm-x-scalar.ll
index 5a9628b3df74..64a7fe826472 100644
--- a/test/CodeGen/X86/inline-asm-x-scalar.ll
+++ b/test/CodeGen/X86/inline-asm-x-scalar.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah
+; RUN: llc < %s -march=x86 -mcpu=yonah -no-integrated-as
define void @test1() {
tail call void asm sideeffect "ucomiss $0", "x"( float 0x41E0000000000000)
diff --git a/test/CodeGen/X86/inline-asm.ll b/test/CodeGen/X86/inline-asm.ll
index d201ebdc85d1..5ec4f469df89 100644
--- a/test/CodeGen/X86/inline-asm.ll
+++ b/test/CodeGen/X86/inline-asm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86
+; RUN: llc < %s -march=x86 -no-integrated-as
define i32 @test1() nounwind {
; Dest is AX, dest type = i32.
@@ -59,3 +59,18 @@ entry:
%asm = tail call i32 asm sideeffect "", "={ax},i,~{eax},~{flags},~{rax}"(i64 61) nounwind
ret i32 %asm
}
+
+@test8_v = global i32 42
+
+define void @test8() {
+ call void asm sideeffect "${0:P}", "i"( i32* @test8_v )
+ ret void
+}
+
+define void @test9() {
+ call void asm sideeffect "${0:P}", "X"( i8* blockaddress(@test9, %bb) )
+ br label %bb
+
+bb:
+ ret void
+}
diff --git a/test/CodeGen/X86/ins_split_regalloc.ll b/test/CodeGen/X86/ins_split_regalloc.ll
new file mode 100644
index 000000000000..f5c5254fcec3
--- /dev/null
+++ b/test/CodeGen/X86/ins_split_regalloc.ll
@@ -0,0 +1,33 @@
+; RUN: llc -O1 -regalloc=greedy -mtriple=x86_64-apple-macosx -march x86-64 < %s -o - | FileCheck %s
+; Check that last chance split (RAGreedy::tryInstructonSplit) just split
+; when this is beneficial, otherwise we end up with uncoalesced copies.
+; <rdar://problem/15570057>
+
+target datalayout = "e-i64:64-f80:128-s:64-n8:16:32:64-S128"
+
+@f = external constant void (i32)*
+
+; CHECK-LABEL: test:
+; Get the address of f in the GOT.
+; CHECK: movq _f@{{[^,]+}}, [[F_ENTRY_ADDR:%[a-z0-9]+]]
+; Read the actual address of f.
+; CHECK: movq ([[F_ENTRY_ADDR]]), [[F_ADDR:%[a-z0-9]+]]
+; Check that we do not have useless split points before each call.
+; CHECK-NOT: movq
+; CHECK: callq *[[F_ADDR]]
+; Check that we do not have useless split points before each call.
+; CHECK-NOT: movq
+; CHECK: callq *[[F_ADDR]]
+; Last call is a tail call, thus the address of the function cannot use
+; a callee saved register.
+; CHECK: movq [[F_ADDR]], [[F_ADDR_TC:%[a-z0-9]+]]
+; CHECK: popq [[F_ADDR]]
+; CHECK: jmpq *[[F_ADDR_TC]]
+define void @test(i32 %a, i32 %b, i32 %c) {
+entry:
+ %fct_f = load void (i32)** @f, align 8
+ tail call void %fct_f(i32 %a)
+ tail call void %fct_f(i32 %b)
+ tail call void %fct_f(i32 %c)
+ ret void
+}
diff --git a/test/CodeGen/X86/isel-sink.ll b/test/CodeGen/X86/isel-sink.ll
index 458f19dfc4f7..e4af9b67f95e 100644
--- a/test/CodeGen/X86/isel-sink.ll
+++ b/test/CodeGen/X86/isel-sink.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -march=x86 -addr-sink-using-gep=1 | FileCheck %s
define i32 @test(i32* %X, i32 %B) {
; CHECK-LABEL: test:
diff --git a/test/CodeGen/X86/jump_table_alias.ll b/test/CodeGen/X86/jump_table_alias.ll
new file mode 100644
index 000000000000..f3691fda221e
--- /dev/null
+++ b/test/CodeGen/X86/jump_table_alias.ll
@@ -0,0 +1,33 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck %s
+target triple = "x86_64-unknown-linux-gnu"
+define i32 @f() unnamed_addr jumptable {
+entry:
+ ret i32 0
+}
+
+@i = alias internal i32 ()* @f
+@j = alias i32 ()* @f
+
+define i32 @main(i32 %argc, i8** %argv) {
+ %temp = alloca i32 ()*, align 8
+ store i32 ()* @i, i32()** %temp, align 8
+; CHECK: movq $__llvm_jump_instr_table_0_1
+ %1 = load i32 ()** %temp, align 8
+; CHECK: movl $__llvm_jump_instr_table_0_1
+ %2 = call i32 ()* %1()
+ %3 = call i32 ()* @i()
+; CHECK: callq i
+ %4 = call i32 ()* @j()
+; CHECK: callq j
+ ret i32 %3
+}
+
+; There should only be one table, even though there are two GlobalAliases,
+; because they both alias the same value.
+
+; CHECK: .globl __llvm_jump_instr_table_0_1
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_1,@function
+; CHECK: __llvm_jump_instr_table_0_1:
+; CHECK: jmp f@PLT
+
diff --git a/test/CodeGen/X86/jump_table_bitcast.ll b/test/CodeGen/X86/jump_table_bitcast.ll
new file mode 100644
index 000000000000..33a798f7a6b7
--- /dev/null
+++ b/test/CodeGen/X86/jump_table_bitcast.ll
@@ -0,0 +1,46 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck %s
+target triple = "x86_64-unknown-linux-gnu"
+define i32 @f() unnamed_addr jumptable {
+ ret i32 0
+}
+
+define i32 @g(i8* %a) unnamed_addr jumptable {
+ ret i32 0
+}
+
+define void @h(void ()* %func) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @main() {
+ %g = alloca i32 (...)*, align 8
+ store i32 (...)* bitcast (i32 ()* @f to i32 (...)*), i32 (...)** %g, align 8
+; CHECK: movq $__llvm_jump_instr_table_0_[[ENTRY:1|2|3]], (%rsp)
+; CHECK: movl $__llvm_jump_instr_table_0_[[ENTRY]], %ecx
+ %1 = load i32 (...)** %g, align 8
+ %call = call i32 (...)* %1()
+ call void (void ()*)* @h(void ()* bitcast (void (void ()*)* @h to void ()*))
+; CHECK: movl $__llvm_jump_instr_table_0_{{1|2|3}}, %edi
+; CHECK: callq h
+
+ %a = call i32 (i32*)* bitcast (i32 (i8*)* @g to i32(i32*)*)(i32* null)
+; CHECK: callq g
+ ret i32 %a
+}
+
+; CHECK: .globl __llvm_jump_instr_table_0_1
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_1,@function
+; CHECK: __llvm_jump_instr_table_0_1:
+; CHECK: jmp {{f|g|h}}@PLT
+; CHECK: .globl __llvm_jump_instr_table_0_2
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_2,@function
+; CHECK: __llvm_jump_instr_table_0_2:
+; CHECK: jmp {{f|g|h}}@PLT
+; CHECK: .globl __llvm_jump_instr_table_0_3
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_3,@function
+; CHECK: __llvm_jump_instr_table_0_3:
+; CHECK: jmp {{f|g|h}}@PLT
+
diff --git a/test/CodeGen/X86/jump_tables.ll b/test/CodeGen/X86/jump_tables.ll
new file mode 100644
index 000000000000..5a0aed0c1761
--- /dev/null
+++ b/test/CodeGen/X86/jump_tables.ll
@@ -0,0 +1,272 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck --check-prefix=SINGLE %s
+; RUN: llc <%s -jump-table-type=arity | FileCheck --check-prefix=ARITY %s
+; RUN: llc <%s -jump-table-type=simplified | FileCheck --check-prefix=SIMPL %s
+; RUN: llc <%s -jump-table-type=full | FileCheck --check-prefix=FULL %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.fun_struct = type { i32 (...)* }
+
+define void @indirect_fun() unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_match() unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @indirect_fun_i32() unnamed_addr jumptable {
+ ret i32 0
+}
+
+define i32 @indirect_fun_i32_1(i32 %a) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define i32 @indirect_fun_i32_2(i32 %a, i32 %b) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define i32* @indirect_fun_i32S_2(i32* %a, i32 %b) unnamed_addr jumptable {
+ ret i32* %a
+}
+
+define void @indirect_fun_struct(%struct.fun_struct %fs) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_fun(i32 (...)* %fun, i32 %a) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @indirect_fun_fun_ret(i32 (...)* %fun, i32 %a) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define void @indirect_fun_array([19 x i8] %a) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_vec(<3 x i32> %a) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_vec_2(<4 x float> %a) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @m(void ()* %fun) {
+ call void ()* %fun()
+ ret i32 0
+}
+
+define void ()* @get_fun() {
+ ret void ()* @indirect_fun
+; SINGLE: movl $__llvm_jump_instr_table_0_
+; ARITY: movl $__llvm_jump_instr_table_
+; SIMPL: movl $__llvm_jump_instr_table_
+; FULL: movl $__llvm_jump_instr_table_
+}
+
+define i32 @main(i32 %argc, i8** %argv) {
+ %f = call void ()* ()* @get_fun()
+ %a = call i32 @m(void ()* %f)
+ ret i32 %a
+}
+
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_1
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_1,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_1:
+; SINGLE-DAG: jmp indirect_fun_array@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_2,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_2:
+; SINGLE-DAG: jmp indirect_fun_i32_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_3
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_3,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_3:
+; SINGLE-DAG: jmp indirect_fun_vec_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_4
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_4,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_4:
+; SINGLE-DAG: jmp indirect_fun_i32S_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_5
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_5,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_5:
+; SINGLE-DAG: jmp indirect_fun_struct@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_6
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_6,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_6:
+; SINGLE-DAG: jmp indirect_fun_i32_1@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_7
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_7,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_7:
+; SINGLE-DAG: jmp indirect_fun_i32@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_8
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_8,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_8:
+; SINGLE-DAG: jmp indirect_fun_fun@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_9
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_9,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_9:
+; SINGLE-DAG: jmp indirect_fun_fun_ret@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_10
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_10,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_10:
+; SINGLE-DAG: jmp indirect_fun@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_11
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_11,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_11:
+; SINGLE-DAG: jmp indirect_fun_match@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_12
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_12,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_12:
+; SINGLE-DAG: jmp indirect_fun_vec@PLT
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+
+
+; ARITY-DAG: .globl __llvm_jump_instr_table_2_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_2_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_2_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: ud2
+; ARITY-DAG: .globl __llvm_jump_instr_table_0_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_0_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_0_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+; ARITY-DAG: .globl __llvm_jump_instr_table_1_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_1_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_1_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+
+; SIMPL-DAG: .globl __llvm_jump_instr_table_2_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_2_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_2_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: ud2
+; SIMPL-DAG: .globl __llvm_jump_instr_table_0_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_0_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_0_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_1_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_1_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_1_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_3_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_3_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_3_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_4_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_4_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_4_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+
+
+; FULL-DAG: .globl __llvm_jump_instr_table_10_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_10_1,@function
+; FULL-DAG:__llvm_jump_instr_table_10_1:
+; FULL-DAG: jmp indirect_fun_i32_1@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_9_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_9_1,@function
+; FULL-DAG:__llvm_jump_instr_table_9_1:
+; FULL-DAG: jmp indirect_fun_i32_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_7_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_7_1,@function
+; FULL-DAG:__llvm_jump_instr_table_7_1:
+; FULL-DAG: jmp indirect_fun_i32S_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_3_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_3_1,@function
+; FULL-DAG:__llvm_jump_instr_table_3_1:
+; FULL-DAG: jmp indirect_fun_vec_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_2_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_2_1,@function
+; FULL-DAG:__llvm_jump_instr_table_2_1:
+; FULL-DAG: jmp indirect_fun@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_8_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_8_1,@function
+; FULL-DAG:__llvm_jump_instr_table_8_1:
+; FULL-DAG: jmp indirect_fun_i32@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_1_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_1_1,@function
+; FULL-DAG:__llvm_jump_instr_table_1_1:
+; FULL-DAG: jmp indirect_fun_array@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_0_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_0_1,@function
+; FULL-DAG:__llvm_jump_instr_table_0_1:
+; FULL-DAG: jmp indirect_fun_vec@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_6_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_6_1,@function
+; FULL-DAG:__llvm_jump_instr_table_6_1:
+; FULL-DAG: jmp indirect_fun_struct@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_5_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_5_1,@function
+; FULL-DAG:__llvm_jump_instr_table_5_1:
+; FULL-DAG: jmp indirect_fun_fun@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_4_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_4_1,@function
+; FULL-DAG:__llvm_jump_instr_table_4_1:
+; FULL-DAG: jmp indirect_fun_fun_ret@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
diff --git a/test/CodeGen/X86/large-constants.ll b/test/CodeGen/X86/large-constants.ll
new file mode 100644
index 000000000000..157ecc4af66b
--- /dev/null
+++ b/test/CodeGen/X86/large-constants.ll
@@ -0,0 +1,67 @@
+; RUN: llc < %s -mtriple=x86_64-darwin -mcpu=corei7 | grep movabsq | count 3
+
+define i64 @constant_hoisting(i64 %o0, i64 %o1, i64 %o2, i64 %o3, i64 %o4, i64 %o5) {
+entry:
+ %l0 = and i64 %o0, -281474976710654
+ %c0 = icmp ne i64 %l0, 0
+ br i1 %c0, label %fail, label %bb1
+
+bb1:
+ %l1 = and i64 %o1, -281474976710654
+ %c1 = icmp ne i64 %l1, 0
+ br i1 %c1, label %fail, label %bb2
+
+bb2:
+ %l2 = and i64 %o2, -281474976710654
+ %c2 = icmp ne i64 %l2, 0
+ br i1 %c2, label %fail, label %bb3
+
+bb3:
+ %l3 = and i64 %o3, -281474976710654
+ %c3 = icmp ne i64 %l3, 0
+ br i1 %c3, label %fail, label %bb4
+
+bb4:
+ %l4 = and i64 %o4, -281474976710653
+ %c4 = icmp ne i64 %l4, 0
+ br i1 %c4, label %fail, label %bb5
+
+bb5:
+ %l5 = and i64 %o5, -281474976710652
+ %c5 = icmp ne i64 %l5, 0
+ br i1 %c5, label %fail, label %bb6
+
+bb6:
+ ret i64 %l5
+
+fail:
+ ret i64 -1
+}
+
+define void @constant_expressions() {
+entry:
+ %0 = load i64* inttoptr (i64 add (i64 51250129900, i64 0) to i64*)
+ %1 = load i64* inttoptr (i64 add (i64 51250129900, i64 8) to i64*)
+ %2 = load i64* inttoptr (i64 add (i64 51250129900, i64 16) to i64*)
+ %3 = load i64* inttoptr (i64 add (i64 51250129900, i64 24) to i64*)
+ %4 = add i64 %0, %1
+ %5 = add i64 %2, %3
+ %6 = add i64 %4, %5
+ store i64 %6, i64* inttoptr (i64 add (i64 51250129900, i64 0) to i64*)
+ ret void
+}
+
+
+define void @constant_expressions2() {
+entry:
+ %0 = load i64* inttoptr (i64 51250129900 to i64*)
+ %1 = load i64* inttoptr (i64 51250129908 to i64*)
+ %2 = load i64* inttoptr (i64 51250129916 to i64*)
+ %3 = load i64* inttoptr (i64 51250129924 to i64*)
+ %4 = add i64 %0, %1
+ %5 = add i64 %2, %3
+ %6 = add i64 %4, %5
+ store i64 %6, i64* inttoptr (i64 51250129900 to i64*)
+ ret void
+}
+
diff --git a/test/CodeGen/X86/libcall-sret.ll b/test/CodeGen/X86/libcall-sret.ll
new file mode 100644
index 000000000000..67b99ac239cd
--- /dev/null
+++ b/test/CodeGen/X86/libcall-sret.ll
@@ -0,0 +1,28 @@
+; RUN: llc -mtriple=i686-linux-gnu -o - %s | FileCheck %s
+
+@var = global i128 0
+
+; We were trying to convert the i128 operation into a libcall, but failing to
+; perform sret demotion when we couldn't return the result in registers. Make
+; sure we marshal the return properly:
+
+define void @test_sret_libcall(i128 %l, i128 %r) {
+; CHECK-LABEL: test_sret_libcall:
+
+ ; Stack for call: 4(sret ptr), 16(i128 %l), 16(128 %r). So next logical
+ ; (aligned) place for the actual sret data is %esp + 40.
+; CHECK: leal 40(%esp), [[SRET_ADDR:%[a-z]+]]
+; CHECK: movl [[SRET_ADDR]], (%esp)
+; CHECK: calll __multi3
+; CHECK-DAG: movl 40(%esp), [[RES0:%[a-z]+]]
+; CHECK-DAG: movl 44(%esp), [[RES1:%[a-z]+]]
+; CHECK-DAG: movl 48(%esp), [[RES2:%[a-z]+]]
+; CHECK-DAG: movl 52(%esp), [[RES3:%[a-z]+]]
+; CHECK-DAG: movl [[RES0]], var
+; CHECK-DAG: movl [[RES1]], var+4
+; CHECK-DAG: movl [[RES2]], var+8
+; CHECK-DAG: movl [[RES3]], var+12
+ %prod = mul i128 %l, %r
+ store i128 %prod, i128* @var
+ ret void
+}
diff --git a/test/CodeGen/X86/lit.local.cfg b/test/CodeGen/X86/lit.local.cfg
index 1637fa46536a..8ed58f119c4f 100644
--- a/test/CodeGen/X86/lit.local.cfg
+++ b/test/CodeGen/X86/lit.local.cfg
@@ -4,9 +4,8 @@
#
# It should be possible to remove this override once all the bots have cycled
# cleanly.
-config.suffixes = ['.ll', '.c', '.cpp', '.test', '.txt']
+config.suffixes = ['.ll', '.test', '.txt']
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/X86/live-out-reg-info.ll b/test/CodeGen/X86/live-out-reg-info.ll
index 8cd9774983bc..283ee3ae71a8 100644
--- a/test/CodeGen/X86/live-out-reg-info.ll
+++ b/test/CodeGen/X86/live-out-reg-info.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86-64 | grep testb
; Make sure dagcombine doesn't eliminate the comparison due
-; to an off-by-one bug with ComputeMaskedBits information.
+; to an off-by-one bug with computeKnownBits information.
declare void @qux()
diff --git a/test/CodeGen/X86/load-slice.ll b/test/CodeGen/X86/load-slice.ll
index 85fd7f03ef62..49eb13160bbc 100644
--- a/test/CodeGen/X86/load-slice.ll
+++ b/test/CodeGen/X86/load-slice.ll
@@ -6,7 +6,7 @@
%class.Complex = type { float, float }
-; Check that independant slices leads to independant loads then the slices leads to
+; Check that independent slices leads to independent loads then the slices leads to
; different register file.
;
; The layout is:
diff --git a/test/CodeGen/X86/lower-bitcast.ll b/test/CodeGen/X86/lower-bitcast.ll
new file mode 100644
index 000000000000..f47161e5520c
--- /dev/null
+++ b/test/CodeGen/X86/lower-bitcast.ll
@@ -0,0 +1,188 @@
+; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
+
+
+define double @test1(double %A) {
+ %1 = bitcast double %A to <2 x i32>
+ %add = add <2 x i32> %1, <i32 3, i32 5>
+ %2 = bitcast <2 x i32> %add to double
+ ret double %2
+}
+; FIXME: Ideally we should be able to fold the entire body of @test1 into a
+; single paddd instruction. At the moment we produce the sequence
+; pshufd+paddq+pshufd. This is fixed with the widening legalization.
+;
+; CHECK-LABEL: test1
+; CHECK-NOT: movsd
+; CHECK: pshufd
+; CHECK-NEXT: paddd
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test1
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE: paddd
+; CHECK-WIDE-NEXT: ret
+
+
+define double @test2(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %add = add <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test2
+; CHECK-NOT: movsd
+; CHECK: paddd
+; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test2
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE: paddd
+; CHECK-WIDE-NEXT: ret
+
+
+define i64 @test3(i64 %A) {
+ %1 = bitcast i64 %A to <2 x float>
+ %add = fadd <2 x float> %1, <float 3.0, float 5.0>
+ %2 = bitcast <2 x float> %add to i64
+ ret i64 %2
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: pshufd
+; CHECK: addps
+; CHECK-NOT: pshufd
+; CHECK: ret
+;
+; CHECK-WIDE-LABEL: test3
+; CHECK-WIDE-NOT: pshufd
+; CHECK-WIDE: addps
+; CHECK-WIDE-NOT: pshufd
+; CHECK-WIDE: ret
+
+
+define i64 @test4(i64 %A) {
+ %1 = bitcast i64 %A to <2 x i32>
+ %add = add <2 x i32> %1, <i32 3, i32 5>
+ %2 = bitcast <2 x i32> %add to i64
+ ret i64 %2
+}
+; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
+; Ideally, we should fold that sequence into a single paddd. This is fixed with
+; the widening legalization.
+;
+; CHECK-LABEL: test4
+; CHECK: pshufd
+; CHECK-NEXT: paddq
+; CHECK-NEXT: pshufd
+; CHECK: ret
+;
+; CHECK-WIDE-LABEL: test4
+; CHECK-WIDE: movd %{{rdi|rcx}},
+; CHECK-WIDE-NEXT: paddd
+; CHECK-WIDE-NEXT: movd {{.*}}, %rax
+; CHECK-WIDE: ret
+
+
+define double @test5(double %A) {
+ %1 = bitcast double %A to <2 x float>
+ %add = fadd <2 x float> %1, <float 3.0, float 5.0>
+ %2 = bitcast <2 x float> %add to double
+ ret double %2
+}
+; CHECK-LABEL: test5
+; CHECK: addps
+; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test5
+; CHECK-WIDE: addps
+; CHECK-WIDE-NEXT: ret
+
+
+define double @test6(double %A) {
+ %1 = bitcast double %A to <4 x i16>
+ %add = add <4 x i16> %1, <i16 3, i16 4, i16 5, i16 6>
+ %2 = bitcast <4 x i16> %add to double
+ ret double %2
+}
+; FIXME: Ideally we should be able to fold the entire body of @test6 into a
+; single paddw instruction. This is fixed with the widening legalization.
+;
+; CHECK-LABEL: test6
+; CHECK-NOT: movsd
+; CHECK: punpcklwd
+; CHECK-NEXT: paddw
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test6
+; CHECK-WIDE-NOT: mov
+; CHECK-WIDE-NOT: punpcklwd
+; CHECK-WIDE: paddw
+; CHECK-WIDE-NEXT: ret
+
+
+define double @test7(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %add = add <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test7
+; CHECK-NOT: movsd
+; CHECK-NOT: punpcklwd
+; CHECK: paddw
+; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test7
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklwd
+; CHECK-WIDE: paddw
+; CHECK-WIDE-NEXT: ret
+
+
+define double @test8(double %A) {
+ %1 = bitcast double %A to <8 x i8>
+ %add = add <8 x i8> %1, <i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10>
+ %2 = bitcast <8 x i8> %add to double
+ ret double %2
+}
+; FIXME: Ideally we should be able to fold the entire body of @test8 into a
+; single paddb instruction. At the moment we produce the sequence
+; pshufd+paddw+pshufd. This is fixed with the widening legalization.
+;
+; CHECK-LABEL: test8
+; CHECK-NOT: movsd
+; CHECK: punpcklbw
+; CHECK-NEXT: paddb
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test8
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklbw
+; CHECK-WIDE: paddb
+; CHECK-WIDE-NEXT: ret
+
+
+define double @test9(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %add = add <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test9
+; CHECK-NOT: movsd
+; CHECK-NOT: punpcklbw
+; CHECK: paddb
+; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test9
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklbw
+; CHECK-WIDE: paddb
+; CHECK-WIDE-NEXT: ret
+
diff --git a/test/CodeGen/X86/lower-vec-shift.ll b/test/CodeGen/X86/lower-vec-shift.ll
new file mode 100644
index 000000000000..c28f82a0ef2a
--- /dev/null
+++ b/test/CodeGen/X86/lower-vec-shift.ll
@@ -0,0 +1,125 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2
+
+
+; Verify that the following shifts are lowered into a sequence of two shifts plus
+; a blend. On pre-avx2 targets, instead of scalarizing logical and arithmetic
+; packed shift right by a constant build_vector the backend should always try to
+; emit a simpler sequence of two shifts + blend when possible.
+
+define <8 x i16> @test1(<8 x i16> %a) {
+ %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
+ ret <8 x i16> %lshr
+}
+; CHECK-LABEL: test1
+; SSE: psrlw
+; SSE-NEXT: psrlw
+; SSE-NEXT: movss
+; AVX: vpsrlw
+; AVX-NEXT: vpsrlw
+; AVX-NEXT: vmovss
+; AVX2: vpsrlw
+; AVX2-NEXT: vpsrlw
+; AVX2-NEXT: vmovss
+; CHECK: ret
+
+
+define <8 x i16> @test2(<8 x i16> %a) {
+ %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2>
+ ret <8 x i16> %lshr
+}
+; CHECK-LABEL: test2
+; SSE: psrlw
+; SSE-NEXT: psrlw
+; SSE-NEXT: movsd
+; AVX: vpsrlw
+; AVX-NEXT: vpsrlw
+; AVX-NEXT: vmovsd
+; AVX2: vpsrlw
+; AVX2-NEXT: vpsrlw
+; AVX2-NEXT: vmovsd
+; CHECK: ret
+
+
+define <4 x i32> @test3(<4 x i32> %a) {
+ %lshr = lshr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2>
+ ret <4 x i32> %lshr
+}
+; CHECK-LABEL: test3
+; SSE: psrld
+; SSE-NEXT: psrld
+; SSE-NEXT: movss
+; AVX: vpsrld
+; AVX-NEXT: vpsrld
+; AVX-NEXT: vmovss
+; AVX2: vpsrlvd
+; CHECK: ret
+
+
+define <4 x i32> @test4(<4 x i32> %a) {
+ %lshr = lshr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2>
+ ret <4 x i32> %lshr
+}
+; CHECK-LABEL: test4
+; SSE: psrld
+; SSE-NEXT: psrld
+; SSE-NEXT: movsd
+; AVX: vpsrld
+; AVX-NEXT: vpsrld
+; AVX-NEXT: vmovsd
+; AVX2: vpsrlvd
+; CHECK: ret
+
+
+define <8 x i16> @test5(<8 x i16> %a) {
+ %lshr = ashr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
+ ret <8 x i16> %lshr
+}
+
+define <8 x i16> @test6(<8 x i16> %a) {
+ %lshr = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2>
+ ret <8 x i16> %lshr
+}
+; CHECK-LABEL: test6
+; SSE: psraw
+; SSE-NEXT: psraw
+; SSE-NEXT: movsd
+; AVX: vpsraw
+; AVX-NEXT: vpsraw
+; AVX-NEXT: vmovsd
+; AVX2: vpsraw
+; AVX2-NEXT: vpsraw
+; AVX2-NEXT: vmovsd
+; CHECK: ret
+
+
+define <4 x i32> @test7(<4 x i32> %a) {
+ %lshr = ashr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2>
+ ret <4 x i32> %lshr
+}
+; CHECK-LABEL: test7
+; SSE: psrad
+; SSE-NEXT: psrad
+; SSE-NEXT: movss
+; AVX: vpsrad
+; AVX-NEXT: vpsrad
+; AVX-NEXT: vmovss
+; AVX2: vpsravd
+; CHECK: ret
+
+
+define <4 x i32> @test8(<4 x i32> %a) {
+ %lshr = ashr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2>
+ ret <4 x i32> %lshr
+}
+; CHECK-LABEL: test8
+; SSE: psrad
+; SSE-NEXT: psrad
+; SSE-NEXT: movsd
+; AVX: vpsrad
+; AVX-NEXT: vpsrad
+; AVX-NEXT: vmovsd
+; AVX2: vpsravd
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/lsr-interesting-step.ll b/test/CodeGen/X86/lsr-interesting-step.ll
index d4a7ac7da12d..8ea3c53de41e 100644
--- a/test/CodeGen/X86/lsr-interesting-step.ll
+++ b/test/CodeGen/X86/lsr-interesting-step.ll
@@ -3,26 +3,24 @@
; The inner loop should require only one add (and no leas either).
; rdar://8100380
-; CHECK: BB0_3:
-; CHECK-NEXT: movb $0, flags(%rdx)
-; CHECK-NEXT: addq %rax, %rdx
-; CHECK-NEXT: cmpq $8192, %rdx
+; CHECK: BB0_2:
+; CHECK-NEXT: movb $0, flags(%rcx)
+; CHECK-NEXT: addq %rax, %rcx
+; CHECK-NEXT: cmpq $8192, %rcx
; CHECK-NEXT: jl
@flags = external global [8192 x i8], align 16 ; <[8192 x i8]*> [#uses=1]
define void @foo() nounwind {
entry:
- %tmp = icmp slt i64 2, 8192 ; <i1> [#uses=1]
- br i1 %tmp, label %bb, label %bb21
+ br label %bb
bb: ; preds = %entry
br label %bb7
bb7: ; preds = %bb, %bb17
%tmp8 = phi i64 [ %tmp18, %bb17 ], [ 2, %bb ] ; <i64> [#uses=2]
- %tmp9 = icmp slt i64 2, 8192 ; <i1> [#uses=1]
- br i1 %tmp9, label %bb10, label %bb17
+ br label %bb10
bb10: ; preds = %bb7
br label %bb11
diff --git a/test/CodeGen/X86/lsr-normalization.ll b/test/CodeGen/X86/lsr-normalization.ll
index bbf8f010efde..2775558b0cfd 100644
--- a/test/CodeGen/X86/lsr-normalization.ll
+++ b/test/CodeGen/X86/lsr-normalization.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
+; REQUIRES: asserts
+; RUN: llc < %s -march=x86-64 | FileCheck %s --check-prefix=ASM
+; RUN: llc -debug -o /dev/null < %s -march=x86-64 2>&1 | FileCheck %s --check-prefix=DBG
; rdar://8168938
; This testcase involves SCEV normalization with the exit value from
@@ -6,8 +8,9 @@
; loop. The expression should be properly normalized and simplified,
; and require only a single division.
-; CHECK: div
-; CHECK-NOT: div
+; DBG-NOT: DISCARDING (NORMALIZATION ISN'T INVERTIBLE)
+; ASM: div
+; ASM-NOT: div
%0 = type { %0*, %0* }
diff --git a/test/CodeGen/X86/lzcnt-tzcnt.ll b/test/CodeGen/X86/lzcnt-tzcnt.ll
new file mode 100644
index 000000000000..07e4b9d8ce61
--- /dev/null
+++ b/test/CodeGen/X86/lzcnt-tzcnt.ll
@@ -0,0 +1,447 @@
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+bmi,+lzcnt | FileCheck %s
+
+; LZCNT and TZCNT will always produce the operand size when the input operand
+; is zero. This test is to verify that we efficiently select LZCNT/TZCNT
+; based on the fact that the 'icmp+select' sequence is always redundant
+; in every function defined below.
+
+
+define i16 @test1_ctlz(i16 %v) {
+ %cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 %v, 0
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test1_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test2_ctlz(i32 %v) {
+ %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 %v, 0
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test2_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test3_ctlz(i64 %v) {
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 %v, 0
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test3_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test4_ctlz(i16 %v) {
+ %cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 0, %v
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test4_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test5_ctlz(i32 %v) {
+ %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 0, %v
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test5_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test6_ctlz(i64 %v) {
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 0, %v
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test6_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test7_ctlz(i16 %v) {
+ %cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 0, %v
+ %cond = select i1 %tobool, i16 %cnt, i16 16
+ ret i16 %cond
+}
+; CHECK-LABEL: test7_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test8_ctlz(i32 %v) {
+ %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 0, %v
+ %cond = select i1 %tobool, i32 %cnt, i32 32
+ ret i32 %cond
+}
+; CHECK-LABEL: test8_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test9_ctlz(i64 %v) {
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 0, %v
+ %cond = select i1 %tobool, i64 %cnt, i64 64
+ ret i64 %cond
+}
+; CHECK-LABEL: test9_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test10_ctlz(i16* %ptr) {
+ %v = load i16* %ptr
+ %cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 %v, 0
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test10_ctlz
+; CHECK-NOT: movw
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test11_ctlz(i32* %ptr) {
+ %v = load i32* %ptr
+ %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 %v, 0
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test11_ctlz
+; CHECK-NOT: movd
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test12_ctlz(i64* %ptr) {
+ %v = load i64* %ptr
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 %v, 0
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test12_ctlz
+; CHECK-NOT: movq
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test13_ctlz(i16* %ptr) {
+ %v = load i16* %ptr
+ %cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 0, %v
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test13_ctlz
+; CHECK-NOT: movw
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test14_ctlz(i32* %ptr) {
+ %v = load i32* %ptr
+ %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 0, %v
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test14_ctlz
+; CHECK-NOT: movd
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test15_ctlz(i64* %ptr) {
+ %v = load i64* %ptr
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 0, %v
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test15_ctlz
+; CHECK-NOT: movq
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test16_ctlz(i16* %ptr) {
+ %v = load i16* %ptr
+ %cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 0, %v
+ %cond = select i1 %tobool, i16 %cnt, i16 16
+ ret i16 %cond
+}
+; CHECK-LABEL: test16_ctlz
+; CHECK-NOT: movw
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test17_ctlz(i32* %ptr) {
+ %v = load i32* %ptr
+ %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 0, %v
+ %cond = select i1 %tobool, i32 %cnt, i32 32
+ ret i32 %cond
+}
+; CHECK-LABEL: test17_ctlz
+; CHECK-NOT: movd
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test18_ctlz(i64* %ptr) {
+ %v = load i64* %ptr
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 0, %v
+ %cond = select i1 %tobool, i64 %cnt, i64 64
+ ret i64 %cond
+}
+; CHECK-LABEL: test18_ctlz
+; CHECK-NOT: movq
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test1_cttz(i16 %v) {
+ %cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 %v, 0
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test1_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test2_cttz(i32 %v) {
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 %v, 0
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test2_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test3_cttz(i64 %v) {
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 %v, 0
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test3_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test4_cttz(i16 %v) {
+ %cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 0, %v
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test4_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test5_cttz(i32 %v) {
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 0, %v
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test5_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test6_cttz(i64 %v) {
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 0, %v
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test6_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test7_cttz(i16 %v) {
+ %cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 0, %v
+ %cond = select i1 %tobool, i16 %cnt, i16 16
+ ret i16 %cond
+}
+; CHECK-LABEL: test7_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test8_cttz(i32 %v) {
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 0, %v
+ %cond = select i1 %tobool, i32 %cnt, i32 32
+ ret i32 %cond
+}
+; CHECK-LABEL: test8_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test9_cttz(i64 %v) {
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 0, %v
+ %cond = select i1 %tobool, i64 %cnt, i64 64
+ ret i64 %cond
+}
+; CHECK-LABEL: test9_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test10_cttz(i16* %ptr) {
+ %v = load i16* %ptr
+ %cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 %v, 0
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test10_cttz
+; CHECK-NOT: movw
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test11_cttz(i32* %ptr) {
+ %v = load i32* %ptr
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 %v, 0
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test11_cttz
+; CHECK-NOT: movd
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test12_cttz(i64* %ptr) {
+ %v = load i64* %ptr
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 %v, 0
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test12_cttz
+; CHECK-NOT: movq
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test13_cttz(i16* %ptr) {
+ %v = load i16* %ptr
+ %cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 0, %v
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test13_cttz
+; CHECK-NOT: movw
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test14_cttz(i32* %ptr) {
+ %v = load i32* %ptr
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 0, %v
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test14_cttz
+; CHECK-NOT: movd
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test15_cttz(i64* %ptr) {
+ %v = load i64* %ptr
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 0, %v
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test15_cttz
+; CHECK-NOT: movq
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test16_cttz(i16* %ptr) {
+ %v = load i16* %ptr
+ %cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
+ %tobool = icmp eq i16 0, %v
+ %cond = select i1 %tobool, i16 %cnt, i16 16
+ ret i16 %cond
+}
+; CHECK-LABEL: test16_cttz
+; CHECK-NOT: movw
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test17_cttz(i32* %ptr) {
+ %v = load i32* %ptr
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %tobool = icmp eq i32 0, %v
+ %cond = select i1 %tobool, i32 %cnt, i32 32
+ ret i32 %cond
+}
+; CHECK-LABEL: test17_cttz
+; CHECK-NOT: movd
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test18_cttz(i64* %ptr) {
+ %v = load i64* %ptr
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 0, %v
+ %cond = select i1 %tobool, i64 %cnt, i64 64
+ ret i64 %cond
+}
+; CHECK-LABEL: test18_cttz
+; CHECK-NOT: movq
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+declare i64 @llvm.cttz.i64(i64, i1)
+declare i32 @llvm.cttz.i32(i32, i1)
+declare i16 @llvm.cttz.i16(i16, i1)
+declare i64 @llvm.ctlz.i64(i64, i1)
+declare i32 @llvm.ctlz.i32(i32, i1)
+declare i16 @llvm.ctlz.i16(i16, i1)
+
diff --git a/test/CodeGen/X86/machine-cp.ll b/test/CodeGen/X86/machine-cp.ll
index f04e111714ae..0006b6ea7133 100644
--- a/test/CodeGen/X86/machine-cp.ll
+++ b/test/CodeGen/X86/machine-cp.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=x86_64-apple-macosx -mcpu=nocona < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-macosx -mcpu=nocona -verify-machineinstrs < %s | FileCheck %s
; After tail duplication, two copies in an early exit BB can be cancelled out.
; rdar://10640363
@@ -34,3 +34,27 @@ entry:
%tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
ret <8 x i16> %tmp8
}
+
+define i32 @t3(i64 %a, i64 %b) nounwind {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: je [[LABEL:.*BB.*]]
+ %cmp1 = icmp eq i64 %b, 0
+ br i1 %cmp1, label %while.end, label %while.body
+
+; CHECK: [[LABEL]]:
+; CHECK-NOT: mov
+; CHECK: ret
+
+while.body: ; preds = %entry, %while.body
+ %a.addr.03 = phi i64 [ %b.addr.02, %while.body ], [ %a, %entry ]
+ %b.addr.02 = phi i64 [ %rem, %while.body ], [ %b, %entry ]
+ %rem = srem i64 %a.addr.03, %b.addr.02
+ %cmp = icmp eq i64 %rem, 0
+ br i1 %cmp, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ %a.addr.0.lcssa = phi i64 [ %a, %entry ], [ %b.addr.02, %while.body ]
+ %t = trunc i64 %a.addr.0.lcssa to i32
+ ret i32 %t
+}
diff --git a/test/CodeGen/X86/macho-comdat.ll b/test/CodeGen/X86/macho-comdat.ll
new file mode 100644
index 000000000000..3c2d997b4594
--- /dev/null
+++ b/test/CodeGen/X86/macho-comdat.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -mtriple x86_64-apple-darwin < %s 2> %t
+; RUN: FileCheck < %t %s
+
+$f = comdat any
+@v = global i32 0, comdat $f
+; CHECK: LLVM ERROR: MachO doesn't support COMDATs, 'f' cannot be lowered.
diff --git a/test/CodeGen/X86/masked-iv-safe.ll b/test/CodeGen/X86/masked-iv-safe.ll
index 4a4d178f6e41..9ddc84708d5b 100644
--- a/test/CodeGen/X86/masked-iv-safe.ll
+++ b/test/CodeGen/X86/masked-iv-safe.ll
@@ -5,7 +5,7 @@
; CHECK-LABEL: count_up
; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: inc
+; CHECK: incq
; CHECK-NOT: {{and|movz|sar|shl}}
; CHECK: jne
define void @count_up(double* %d, i64 %n) nounwind {
@@ -71,7 +71,7 @@ return:
; CHECK-LABEL: count_up_signed
; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: inc
+; CHECK: incq
; CHECK-NOT: {{and|movz|sar|shl}}
; CHECK: jne
define void @count_up_signed(double* %d, i64 %n) nounwind {
@@ -174,7 +174,7 @@ return:
; CHECK-LABEL: another_count_down
; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: decq
+; CHECK: addq $-8,
; CHECK-NOT: {{and|movz|sar|shl}}
; CHECK: jne
define void @another_count_down(double* %d, i64 %n) nounwind {
diff --git a/test/CodeGen/X86/mature-mc-support.ll b/test/CodeGen/X86/mature-mc-support.ll
new file mode 100644
index 000000000000..9d956f46beca
--- /dev/null
+++ b/test/CodeGen/X86/mature-mc-support.ll
@@ -0,0 +1,18 @@
+; Test that inline assembly is parsed by the MC layer when MC support is mature
+; (even when the output is assembly).
+
+; RUN: not llc -march=x86 < %s > /dev/null 2> %t1
+; RUN: FileCheck %s < %t1
+
+; RUN: not llc -march=x86 -filetype=obj < %s > /dev/null 2> %t2
+; RUN: FileCheck %s < %t2
+
+; RUN: not llc -march=x86-64 < %s > /dev/null 2> %t3
+; RUN: FileCheck %s < %t3
+
+; RUN: not llc -march=x86-64 -filetype=obj < %s > /dev/null 2> %t4
+; RUN: FileCheck %s < %t4
+
+module asm " .this_directive_is_very_unlikely_to_exist"
+
+; CHECK: LLVM ERROR: Error parsing inline asm
diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll
index cb0797d3eb33..0a534926c6cd 100644
--- a/test/CodeGen/X86/memcmp.ll
+++ b/test/CodeGen/X86/memcmp.ll
@@ -22,8 +22,9 @@ bb: ; preds = %entry
return: ; preds = %entry
ret void
; CHECK-LABEL: memcmp2:
-; CHECK: movw ([[A0:%rdi|%rcx]]), %ax
-; CHECK: cmpw ([[A1:%rsi|%rdx]]), %ax
+; CHECK: movzwl
+; CHECK-NEXT: movzwl
+; CHECK-NEXT: cmpl
; NOBUILTIN-LABEL: memcmp2:
; NOBUILTIN: callq
}
@@ -41,7 +42,8 @@ bb: ; preds = %entry
return: ; preds = %entry
ret void
; CHECK-LABEL: memcmp2a:
-; CHECK: cmpw $28527, ([[A0]])
+; CHECK: movzwl
+; CHECK-NEXT: cmpl $28527,
}
@@ -58,8 +60,8 @@ bb: ; preds = %entry
return: ; preds = %entry
ret void
; CHECK-LABEL: memcmp4:
-; CHECK: movl ([[A0]]), %eax
-; CHECK: cmpl ([[A1]]), %eax
+; CHECK: movl
+; CHECK-NEXT: cmpl
}
define void @memcmp4a(i8* %X, i32* nocapture %P) nounwind {
@@ -75,7 +77,7 @@ bb: ; preds = %entry
return: ; preds = %entry
ret void
; CHECK-LABEL: memcmp4a:
-; CHECK: cmpl $1869573999, ([[A0]])
+; CHECK: cmpl $1869573999,
}
define void @memcmp8(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
@@ -91,8 +93,8 @@ bb: ; preds = %entry
return: ; preds = %entry
ret void
; CHECK-LABEL: memcmp8:
-; CHECK: movq ([[A0]]), %rax
-; CHECK: cmpq ([[A1]]), %rax
+; CHECK: movq
+; CHECK: cmpq
}
define void @memcmp8a(i8* %X, i32* nocapture %P) nounwind {
@@ -108,7 +110,7 @@ bb: ; preds = %entry
return: ; preds = %entry
ret void
; CHECK-LABEL: memcmp8a:
-; CHECK: movabsq $8029759185026510694, %rax
-; CHECK: cmpq %rax, ([[A0]])
+; CHECK: movabsq $8029759185026510694,
+; CHECK: cmpq
}
diff --git a/test/CodeGen/X86/memset-2.ll b/test/CodeGen/X86/memset-2.ll
index d0a3c7a74bce..a87ef2e15a5a 100644
--- a/test/CodeGen/X86/memset-2.ll
+++ b/test/CodeGen/X86/memset-2.ll
@@ -5,7 +5,7 @@ declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
define fastcc void @t1() nounwind {
entry:
; CHECK-LABEL: t1:
-; CHECK: calll _memset
+; CHECK: calll L_memset$stub
call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 188, i32 1, i1 false)
unreachable
}
@@ -13,7 +13,7 @@ entry:
define fastcc void @t2(i8 signext %c) nounwind {
entry:
; CHECK-LABEL: t2:
-; CHECK: calll _memset
+; CHECK: calll L_memset$stub
call void @llvm.memset.p0i8.i32(i8* undef, i8 %c, i32 76, i32 1, i1 false)
unreachable
}
diff --git a/test/CodeGen/X86/merge_store.ll b/test/CodeGen/X86/merge_store.ll
index 940688c6252f..f98963d8e90e 100644
--- a/test/CodeGen/X86/merge_store.ll
+++ b/test/CodeGen/X86/merge_store.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -addr-sink-using-gep=1 | FileCheck %s
define void @merge_store(i32* nocapture %a) {
; CHECK-LABEL: merge_store:
diff --git a/test/CodeGen/X86/misched-aa-colored.ll b/test/CodeGen/X86/misched-aa-colored.ll
new file mode 100644
index 000000000000..52a5e5d25d11
--- /dev/null
+++ b/test/CodeGen/X86/misched-aa-colored.ll
@@ -0,0 +1,189 @@
+; RUN: llc < %s -mcpu=x86-64 -enable-misched -misched-bottomup=0 -misched-topdown=0 -misched=shuffle -enable-aa-sched-mi | FileCheck %s
+; REQUIRES: asserts
+; -misched=shuffle is NDEBUG only!
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%"class.llvm::SDNode.10.610.970.1930.2050.2290.4090" = type { %"class.llvm::FoldingSetImpl::Node.0.600.960.1920.2040.2280.4080", %"class.llvm::ilist_node.2.602.962.1922.2042.2282.4082", i16, [2 x i8], i32, %"class.llvm::SDUse.4.604.964.1924.2044.2284.4084"*, %"struct.llvm::EVT.8.608.968.1928.2048.2288.4088"*, %"class.llvm::SDUse.4.604.964.1924.2044.2284.4084"*, i16, i16, %"class.llvm::DebugLoc.9.609.969.1929.2049.2289.4089", i32 }
+%"class.llvm::FoldingSetImpl::Node.0.600.960.1920.2040.2280.4080" = type { i8* }
+%"class.llvm::ilist_node.2.602.962.1922.2042.2282.4082" = type { %"class.llvm::ilist_half_node.1.601.961.1921.2041.2281.4081", %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"* }
+%"class.llvm::ilist_half_node.1.601.961.1921.2041.2281.4081" = type { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"* }
+%"struct.llvm::EVT.8.608.968.1928.2048.2288.4088" = type { %"class.llvm::MVT.5.605.965.1925.2045.2285.4085", %"class.llvm::Type.7.607.967.1927.2047.2287.4087"* }
+%"class.llvm::MVT.5.605.965.1925.2045.2285.4085" = type { i32 }
+%"class.llvm::SDUse.4.604.964.1924.2044.2284.4084" = type { %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, %"class.llvm::SDUse.4.604.964.1924.2044.2284.4084"**, %"class.llvm::SDUse.4.604.964.1924.2044.2284.4084"* }
+%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083" = type { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 }
+%"class.llvm::DebugLoc.9.609.969.1929.2049.2289.4089" = type { i32, i32 }
+%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184" = type { %"class.llvm::TargetMachine.17.617.977.1937.2057.2297.4097"*, %"class.llvm::TargetSelectionDAGInfo.18.618.978.1938.2058.2298.4098"*, %"class.llvm::TargetTransformInfo.19.619.979.1939.2059.2299.4099"*, %"class.llvm::TargetLowering.51.651.1011.1971.2091.2331.4131"*, %"class.llvm::MachineFunction.52.652.1012.1972.2092.2332.4132"*, %"class.llvm::LLVMContext.6.606.966.1926.2046.2286.4086"*, i32, %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090", %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", %"struct.llvm::ilist.55.655.1015.1975.2095.2335.4135", %"class.llvm::RecyclingAllocator.65.665.1025.1985.2105.2345.4145", %"class.llvm::FoldingSet.67.667.1027.1987.2107.2347.4147", %"class.llvm::BumpPtrAllocator.64.664.1024.1984.2104.2344.4144", %"class.llvm::BumpPtrAllocator.64.664.1024.1984.2104.2344.4144", %"class.llvm::SDDbgInfo.79.679.1039.1999.2119.2359.4159"*, i8, %"struct.llvm::SelectionDAG::DAGUpdateListener.80.680.1040.2000.2120.2360.4160"*, %"class.std::map.43.84.684.1044.2004.2124.2364.4164", %"class.llvm::FoldingSet.50.85.685.1045.2005.2125.2365.4165", %"class.std::vector.51.89.689.1049.2009.2129.2369.4169", %"class.std::vector.56.92.692.1052.2012.2132.2372.4172", %"class.std::map.61.96.696.1056.2016.2136.2376.4176", %"class.llvm::StringMap.99.699.1059.2019.2139.2379.4179", %"class.std::map.66.103.703.1063.2023.2143.2383.4183" }
+%"class.llvm::TargetMachine.17.617.977.1937.2057.2297.4097" = type { i32 (...)**, %"class.llvm::Target.11.611.971.1931.2051.2291.4091"*, %"class.std::basic_string.13.613.973.1933.2053.2293.4093", %"class.std::basic_string.13.613.973.1933.2053.2293.4093", %"class.std::basic_string.13.613.973.1933.2053.2293.4093", %"class.llvm::MCCodeGenInfo.14.614.974.1934.2054.2294.4094"*, %"class.llvm::MCAsmInfo.15.615.975.1935.2055.2295.4095"*, i8, %"class.llvm::TargetOptions.16.616.976.1936.2056.2296.4096" }
+%"class.llvm::Target.11.611.971.1931.2051.2291.4091" = type opaque
+%"class.std::basic_string.13.613.973.1933.2053.2293.4093" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.12.612.972.1932.2052.2292.4092" }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.12.612.972.1932.2052.2292.4092" = type { i8* }
+%"class.llvm::MCCodeGenInfo.14.614.974.1934.2054.2294.4094" = type opaque
+%"class.llvm::MCAsmInfo.15.615.975.1935.2055.2295.4095" = type opaque
+%"class.llvm::TargetOptions.16.616.976.1936.2056.2296.4096" = type { [2 x i8], i32, i8, %"class.std::basic_string.13.613.973.1933.2053.2293.4093", i32, i32 }
+%"class.llvm::TargetSelectionDAGInfo.18.618.978.1938.2058.2298.4098" = type opaque
+%"class.llvm::TargetTransformInfo.19.619.979.1939.2059.2299.4099" = type opaque
+%"class.llvm::TargetLowering.51.651.1011.1971.2091.2331.4131" = type { %"class.llvm::TargetLoweringBase.50.650.1010.1970.2090.2330.4130" }
+%"class.llvm::TargetLoweringBase.50.650.1010.1970.2090.2330.4130" = type { i32 (...)**, %"class.llvm::TargetMachine.17.617.977.1937.2057.2297.4097"*, %"class.llvm::DataLayout.35.635.995.1955.2075.2315.4115"*, %"class.llvm::TargetLoweringObjectFile.36.636.996.1956.2076.2316.4116"*, i8, i8, i8, i8, %"class.llvm::DenseMap.11.38.638.998.1958.2078.2318.4118", i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i32, i32, [58 x %"class.llvm::TargetRegisterClass.39.639.999.1959.2079.2319.4119"*], [58 x i8], [58 x %"class.llvm::MVT.5.605.965.1925.2045.2285.4085"], [58 x %"class.llvm::TargetRegisterClass.39.639.999.1959.2079.2319.4119"*], [58 x i8], [58 x %"class.llvm::MVT.5.605.965.1925.2045.2285.4085"], [58 x [188 x i8]], [58 x [4 x i8]], [58 x [58 x i8]], [58 x [5 x i8]], [24 x [4 x i32]], %"class.llvm::TargetLoweringBase::ValueTypeActionImpl.40.640.1000.1960.2080.2320.4120", %"class.std::vector.15.44.644.1004.1964.2084.2324.4124", [24 x i8], %"class.std::map.49.649.1009.1969.2089.2329.4129", [341 x i8*], [341 x i32], [341 x i32], i32, i32, i32, i32, i32, i32, i8 }
+%"class.llvm::DataLayout.35.635.995.1955.2075.2315.4115" = type { [28 x i8], i8, i32, i32, %"class.llvm::SmallVector.23.623.983.1943.2063.2303.4103", %"class.llvm::SmallVector.3.31.631.991.1951.2071.2311.4111", %"class.llvm::DenseMap.34.634.994.1954.2074.2314.4114", i8* }
+%"class.llvm::SmallVector.23.623.983.1943.2063.2303.4103" = type { [25 x i8], %"struct.llvm::SmallVectorStorage.22.622.982.1942.2062.2302.4102" }
+%"struct.llvm::SmallVectorStorage.22.622.982.1942.2062.2302.4102" = type { [7 x %"struct.llvm::AlignedCharArrayUnion.21.621.981.1941.2061.2301.4101"] }
+%"struct.llvm::AlignedCharArrayUnion.21.621.981.1941.2061.2301.4101" = type { %"struct.llvm::AlignedCharArray.20.620.980.1940.2060.2300.4100" }
+%"struct.llvm::AlignedCharArray.20.620.980.1940.2060.2300.4100" = type { [1 x i8] }
+%"class.llvm::SmallVector.3.31.631.991.1951.2071.2311.4111" = type { %"class.llvm::SmallVectorImpl.4.29.629.989.1949.2069.2309.4109", %"struct.llvm::SmallVectorStorage.9.30.630.990.1950.2070.2310.4110" }
+%"class.llvm::SmallVectorImpl.4.29.629.989.1949.2069.2309.4109" = type { %"class.llvm::SmallVectorTemplateBase.5.28.628.988.1948.2068.2308.4108" }
+%"class.llvm::SmallVectorTemplateBase.5.28.628.988.1948.2068.2308.4108" = type { %"class.llvm::SmallVectorTemplateCommon.6.27.627.987.1947.2067.2307.4107" }
+%"class.llvm::SmallVectorTemplateCommon.6.27.627.987.1947.2067.2307.4107" = type { %"class.llvm::SmallVectorBase.24.624.984.1944.2064.2304.4104", %"struct.llvm::AlignedCharArrayUnion.7.26.626.986.1946.2066.2306.4106" }
+%"class.llvm::SmallVectorBase.24.624.984.1944.2064.2304.4104" = type { i8*, i8*, i8* }
+%"struct.llvm::AlignedCharArrayUnion.7.26.626.986.1946.2066.2306.4106" = type { %"struct.llvm::AlignedCharArray.8.25.625.985.1945.2065.2305.4105" }
+%"struct.llvm::AlignedCharArray.8.25.625.985.1945.2065.2305.4105" = type { [8 x i8] }
+%"struct.llvm::SmallVectorStorage.9.30.630.990.1950.2070.2310.4110" = type { [15 x %"struct.llvm::AlignedCharArrayUnion.7.26.626.986.1946.2066.2306.4106"] }
+%"class.llvm::DenseMap.34.634.994.1954.2074.2314.4114" = type { %"struct.std::pair.10.33.633.993.1953.2073.2313.4113"*, i32, i32, i32 }
+%"struct.std::pair.10.33.633.993.1953.2073.2313.4113" = type { i32, %"struct.llvm::PointerAlignElem.32.632.992.1952.2072.2312.4112" }
+%"struct.llvm::PointerAlignElem.32.632.992.1952.2072.2312.4112" = type { i32, i32, i32, i32 }
+%"class.llvm::TargetLoweringObjectFile.36.636.996.1956.2076.2316.4116" = type opaque
+%"class.llvm::DenseMap.11.38.638.998.1958.2078.2318.4118" = type { %"struct.std::pair.14.37.637.997.1957.2077.2317.4117"*, i32, i32, i32 }
+%"struct.std::pair.14.37.637.997.1957.2077.2317.4117" = type { i32, i32 }
+%"class.llvm::TargetRegisterClass.39.639.999.1959.2079.2319.4119" = type opaque
+%"class.llvm::TargetLoweringBase::ValueTypeActionImpl.40.640.1000.1960.2080.2320.4120" = type { [58 x i8] }
+%"class.std::vector.15.44.644.1004.1964.2084.2324.4124" = type { %"struct.std::_Vector_base.16.43.643.1003.1963.2083.2323.4123" }
+%"struct.std::_Vector_base.16.43.643.1003.1963.2083.2323.4123" = type { %"struct.std::_Vector_base<std::pair<llvm::MVT, const llvm::TargetRegisterClass *>, std::allocator<std::pair<llvm::MVT, const llvm::TargetRegisterClass *> > >::_Vector_impl.42.642.1002.1962.2082.2322.4122" }
+%"struct.std::_Vector_base<std::pair<llvm::MVT, const llvm::TargetRegisterClass *>, std::allocator<std::pair<llvm::MVT, const llvm::TargetRegisterClass *> > >::_Vector_impl.42.642.1002.1962.2082.2322.4122" = type { %"struct.std::pair.20.41.641.1001.1961.2081.2321.4121"*, %"struct.std::pair.20.41.641.1001.1961.2081.2321.4121"*, %"struct.std::pair.20.41.641.1001.1961.2081.2321.4121"* }
+%"struct.std::pair.20.41.641.1001.1961.2081.2321.4121" = type { %"class.llvm::MVT.5.605.965.1925.2045.2285.4085", %"class.llvm::TargetRegisterClass.39.639.999.1959.2079.2319.4119"* }
+%"class.std::map.49.649.1009.1969.2089.2329.4129" = type { %"class.std::_Rb_tree.48.648.1008.1968.2088.2328.4128" }
+%"class.std::_Rb_tree.48.648.1008.1968.2088.2328.4128" = type { %"struct.std::_Rb_tree<std::pair<unsigned int, llvm::MVT::SimpleValueType>, std::pair<const std::pair<unsigned int, llvm::MVT::SimpleValueType>, llvm::MVT::SimpleValueType>, std::_Select1st<std::pair<const std::pair<unsigned int, llvm::MVT::SimpleValueType>, llvm::MVT::SimpleValueType> >, std::less<std::pair<unsigned int, llvm::MVT::SimpleValueType> >, std::allocator<std::pair<const std::pair<unsigned int, llvm::MVT::SimpleValueType>, llvm::MVT::SimpleValueType> > >::_Rb_tree_impl.47.647.1007.1967.2087.2327.4127" }
+%"struct.std::_Rb_tree<std::pair<unsigned int, llvm::MVT::SimpleValueType>, std::pair<const std::pair<unsigned int, llvm::MVT::SimpleValueType>, llvm::MVT::SimpleValueType>, std::_Select1st<std::pair<const std::pair<unsigned int, llvm::MVT::SimpleValueType>, llvm::MVT::SimpleValueType> >, std::less<std::pair<unsigned int, llvm::MVT::SimpleValueType> >, std::allocator<std::pair<const std::pair<unsigned int, llvm::MVT::SimpleValueType>, llvm::MVT::SimpleValueType> > >::_Rb_tree_impl.47.647.1007.1967.2087.2327.4127" = type { %"struct.std::less.45.645.1005.1965.2085.2325.4125", %"struct.std::_Rb_tree_node_base.46.646.1006.1966.2086.2326.4126", i64 }
+%"struct.std::less.45.645.1005.1965.2085.2325.4125" = type { i8 }
+%"struct.std::_Rb_tree_node_base.46.646.1006.1966.2086.2326.4126" = type { i32, %"struct.std::_Rb_tree_node_base.46.646.1006.1966.2086.2326.4126"*, %"struct.std::_Rb_tree_node_base.46.646.1006.1966.2086.2326.4126"*, %"struct.std::_Rb_tree_node_base.46.646.1006.1966.2086.2326.4126"* }
+%"class.llvm::MachineFunction.52.652.1012.1972.2092.2332.4132" = type opaque
+%"class.llvm::LLVMContext.6.606.966.1926.2046.2286.4086" = type opaque
+%"struct.llvm::ilist.55.655.1015.1975.2095.2335.4135" = type { %"class.llvm::iplist.54.654.1014.1974.2094.2334.4134" }
+%"class.llvm::iplist.54.654.1014.1974.2094.2334.4134" = type { %"struct.llvm::ilist_traits.53.653.1013.1973.2093.2333.4133", %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"* }
+%"struct.llvm::ilist_traits.53.653.1013.1973.2093.2333.4133" = type { %"class.llvm::ilist_half_node.1.601.961.1921.2041.2281.4081" }
+%"class.llvm::RecyclingAllocator.65.665.1025.1985.2105.2345.4145" = type { %"class.llvm::Recycler.59.659.1019.1979.2099.2339.4139", %"class.llvm::BumpPtrAllocator.64.664.1024.1984.2104.2344.4144" }
+%"class.llvm::Recycler.59.659.1019.1979.2099.2339.4139" = type { %"class.llvm::iplist.24.58.658.1018.1978.2098.2338.4138" }
+%"class.llvm::iplist.24.58.658.1018.1978.2098.2338.4138" = type { %"struct.llvm::ilist_traits.25.57.657.1017.1977.2097.2337.4137", %"struct.llvm::RecyclerStruct.56.656.1016.1976.2096.2336.4136"* }
+%"struct.llvm::ilist_traits.25.57.657.1017.1977.2097.2337.4137" = type { %"struct.llvm::RecyclerStruct.56.656.1016.1976.2096.2336.4136" }
+%"struct.llvm::RecyclerStruct.56.656.1016.1976.2096.2336.4136" = type { %"struct.llvm::RecyclerStruct.56.656.1016.1976.2096.2336.4136"*, %"struct.llvm::RecyclerStruct.56.656.1016.1976.2096.2336.4136"* }
+%"class.llvm::FoldingSet.67.667.1027.1987.2107.2347.4147" = type { %"class.llvm::FoldingSetImpl.66.666.1026.1986.2106.2346.4146" }
+%"class.llvm::FoldingSetImpl.66.666.1026.1986.2106.2346.4146" = type { i32 (...)**, i8**, i32, i32 }
+%"class.llvm::BumpPtrAllocator.64.664.1024.1984.2104.2344.4144" = type { i64, i64, %"class.llvm::MallocSlabAllocator.62.662.1022.1982.2102.2342.4142", %"class.llvm::SlabAllocator.60.660.1020.1980.2100.2340.4140"*, %"class.llvm::MemSlab.63.663.1023.1983.2103.2343.4143"*, i8*, i8*, i64 }
+%"class.llvm::MallocSlabAllocator.62.662.1022.1982.2102.2342.4142" = type { %"class.llvm::SlabAllocator.60.660.1020.1980.2100.2340.4140", %"class.llvm::MallocAllocator.61.661.1021.1981.2101.2341.4141" }
+%"class.llvm::SlabAllocator.60.660.1020.1980.2100.2340.4140" = type { i32 (...)** }
+%"class.llvm::MallocAllocator.61.661.1021.1981.2101.2341.4141" = type { i8 }
+%"class.llvm::MemSlab.63.663.1023.1983.2103.2343.4143" = type { i64, %"class.llvm::MemSlab.63.663.1023.1983.2103.2343.4143"* }
+%"class.llvm::SDDbgInfo.79.679.1039.1999.2119.2359.4159" = type { %"class.llvm::SmallVector.30.74.674.1034.1994.2114.2354.4154", %"class.llvm::SmallVector.30.74.674.1034.1994.2114.2354.4154", %"class.llvm::DenseMap.37.78.678.1038.1998.2118.2358.4158" }
+%"class.llvm::SmallVector.30.74.674.1034.1994.2114.2354.4154" = type { %"class.llvm::SmallVectorImpl.31.72.672.1032.1992.2112.2352.4152", %"struct.llvm::SmallVectorStorage.36.73.673.1033.1993.2113.2353.4153" }
+%"class.llvm::SmallVectorImpl.31.72.672.1032.1992.2112.2352.4152" = type { %"class.llvm::SmallVectorTemplateBase.32.71.671.1031.1991.2111.2351.4151" }
+%"class.llvm::SmallVectorTemplateBase.32.71.671.1031.1991.2111.2351.4151" = type { %"class.llvm::SmallVectorTemplateCommon.33.70.670.1030.1990.2110.2350.4150" }
+%"class.llvm::SmallVectorTemplateCommon.33.70.670.1030.1990.2110.2350.4150" = type { %"class.llvm::SmallVectorBase.24.624.984.1944.2064.2304.4104", %"struct.llvm::AlignedCharArrayUnion.34.69.669.1029.1989.2109.2349.4149" }
+%"struct.llvm::AlignedCharArrayUnion.34.69.669.1029.1989.2109.2349.4149" = type { %"struct.llvm::AlignedCharArray.35.68.668.1028.1988.2108.2348.4148" }
+%"struct.llvm::AlignedCharArray.35.68.668.1028.1988.2108.2348.4148" = type { [8 x i8] }
+%"struct.llvm::SmallVectorStorage.36.73.673.1033.1993.2113.2353.4153" = type { [31 x %"struct.llvm::AlignedCharArrayUnion.34.69.669.1029.1989.2109.2349.4149"] }
+%"class.llvm::DenseMap.37.78.678.1038.1998.2118.2358.4158" = type { %"struct.std::pair.40.77.677.1037.1997.2117.2357.4157"*, i32, i32, i32 }
+%"struct.std::pair.40.77.677.1037.1997.2117.2357.4157" = type { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, %"class.llvm::SmallVector.41.76.676.1036.1996.2116.2356.4156" }
+%"class.llvm::SmallVector.41.76.676.1036.1996.2116.2356.4156" = type { %"class.llvm::SmallVectorImpl.31.72.672.1032.1992.2112.2352.4152", %"struct.llvm::SmallVectorStorage.42.75.675.1035.1995.2115.2355.4155" }
+%"struct.llvm::SmallVectorStorage.42.75.675.1035.1995.2115.2355.4155" = type { [1 x %"struct.llvm::AlignedCharArrayUnion.34.69.669.1029.1989.2109.2349.4149"] }
+%"struct.llvm::SelectionDAG::DAGUpdateListener.80.680.1040.2000.2120.2360.4160" = type { i32 (...)**, %"struct.llvm::SelectionDAG::DAGUpdateListener.80.680.1040.2000.2120.2360.4160"*, %"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"* }
+%"class.std::map.43.84.684.1044.2004.2124.2364.4164" = type { %"class.std::_Rb_tree.44.83.683.1043.2003.2123.2363.4163" }
+%"class.std::_Rb_tree.44.83.683.1043.2003.2123.2363.4163" = type { %"struct.std::_Rb_tree<const llvm::SDNode *, std::pair<const llvm::SDNode *const, std::basic_string<char> >, std::_Select1st<std::pair<const llvm::SDNode *const, std::basic_string<char> > >, std::less<const llvm::SDNode *>, std::allocator<std::pair<const llvm::SDNode *const, std::basic_string<char> > > >::_Rb_tree_impl.82.682.1042.2002.2122.2362.4162" }
+%"struct.std::_Rb_tree<const llvm::SDNode *, std::pair<const llvm::SDNode *const, std::basic_string<char> >, std::_Select1st<std::pair<const llvm::SDNode *const, std::basic_string<char> > >, std::less<const llvm::SDNode *>, std::allocator<std::pair<const llvm::SDNode *const, std::basic_string<char> > > >::_Rb_tree_impl.82.682.1042.2002.2122.2362.4162" = type { %"struct.std::less.48.81.681.1041.2001.2121.2361.4161", %"struct.std::_Rb_tree_node_base.46.646.1006.1966.2086.2326.4126", i64 }
+%"struct.std::less.48.81.681.1041.2001.2121.2361.4161" = type { i8 }
+%"class.llvm::FoldingSet.50.85.685.1045.2005.2125.2365.4165" = type { %"class.llvm::FoldingSetImpl.66.666.1026.1986.2106.2346.4146" }
+%"class.std::vector.51.89.689.1049.2009.2129.2369.4169" = type { %"struct.std::_Vector_base.52.88.688.1048.2008.2128.2368.4168" }
+%"struct.std::_Vector_base.52.88.688.1048.2008.2128.2368.4168" = type { %"struct.std::_Vector_base<llvm::CondCodeSDNode *, std::allocator<llvm::CondCodeSDNode *> >::_Vector_impl.87.687.1047.2007.2127.2367.4167" }
+%"struct.std::_Vector_base<llvm::CondCodeSDNode *, std::allocator<llvm::CondCodeSDNode *> >::_Vector_impl.87.687.1047.2007.2127.2367.4167" = type { %"class.llvm::CondCodeSDNode.86.686.1046.2006.2126.2366.4166"**, %"class.llvm::CondCodeSDNode.86.686.1046.2006.2126.2366.4166"**, %"class.llvm::CondCodeSDNode.86.686.1046.2006.2126.2366.4166"** }
+%"class.llvm::CondCodeSDNode.86.686.1046.2006.2126.2366.4166" = type { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090", i32 }
+%"class.std::vector.56.92.692.1052.2012.2132.2372.4172" = type { %"struct.std::_Vector_base.57.91.691.1051.2011.2131.2371.4171" }
+%"struct.std::_Vector_base.57.91.691.1051.2011.2131.2371.4171" = type { %"struct.std::_Vector_base<llvm::SDNode *, std::allocator<llvm::SDNode *> >::_Vector_impl.90.690.1050.2010.2130.2370.4170" }
+%"struct.std::_Vector_base<llvm::SDNode *, std::allocator<llvm::SDNode *> >::_Vector_impl.90.690.1050.2010.2130.2370.4170" = type { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"**, %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"**, %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"** }
+%"class.std::map.61.96.696.1056.2016.2136.2376.4176" = type { %"class.std::_Rb_tree.62.95.695.1055.2015.2135.2375.4175" }
+%"class.std::_Rb_tree.62.95.695.1055.2015.2135.2375.4175" = type { %"struct.std::_Rb_tree<llvm::EVT, std::pair<const llvm::EVT, llvm::SDNode *>, std::_Select1st<std::pair<const llvm::EVT, llvm::SDNode *> >, llvm::EVT::compareRawBits, std::allocator<std::pair<const llvm::EVT, llvm::SDNode *> > >::_Rb_tree_impl.94.694.1054.2014.2134.2374.4174" }
+%"struct.std::_Rb_tree<llvm::EVT, std::pair<const llvm::EVT, llvm::SDNode *>, std::_Select1st<std::pair<const llvm::EVT, llvm::SDNode *> >, llvm::EVT::compareRawBits, std::allocator<std::pair<const llvm::EVT, llvm::SDNode *> > >::_Rb_tree_impl.94.694.1054.2014.2134.2374.4174" = type { %"struct.llvm::EVT::compareRawBits.93.693.1053.2013.2133.2373.4173", %"struct.std::_Rb_tree_node_base.46.646.1006.1966.2086.2326.4126", i64 }
+%"struct.llvm::EVT::compareRawBits.93.693.1053.2013.2133.2373.4173" = type { i8 }
+%"class.llvm::StringMap.99.699.1059.2019.2139.2379.4179" = type { %"class.llvm::StringMapImpl.98.698.1058.2018.2138.2378.4178", %"class.llvm::MallocAllocator.61.661.1021.1981.2101.2341.4141" }
+%"class.llvm::StringMapImpl.98.698.1058.2018.2138.2378.4178" = type { %"class.llvm::StringMapEntryBase.97.697.1057.2017.2137.2377.4177"**, i32, i32, i32, i32 }
+%"class.llvm::StringMapEntryBase.97.697.1057.2017.2137.2377.4177" = type { i32 }
+%"class.std::map.66.103.703.1063.2023.2143.2383.4183" = type { %"class.std::_Rb_tree.67.102.702.1062.2022.2142.2382.4182" }
+%"class.std::_Rb_tree.67.102.702.1062.2022.2142.2382.4182" = type { %"struct.std::_Rb_tree<std::pair<std::basic_string<char>, unsigned char>, std::pair<const std::pair<std::basic_string<char>, unsigned char>, llvm::SDNode *>, std::_Select1st<std::pair<const std::pair<std::basic_string<char>, unsigned char>, llvm::SDNode *> >, std::less<std::pair<std::basic_string<char>, unsigned char> >, std::allocator<std::pair<const std::pair<std::basic_string<char>, unsigned char>, llvm::SDNode *> > >::_Rb_tree_impl.101.701.1061.2021.2141.2381.4181" }
+%"struct.std::_Rb_tree<std::pair<std::basic_string<char>, unsigned char>, std::pair<const std::pair<std::basic_string<char>, unsigned char>, llvm::SDNode *>, std::_Select1st<std::pair<const std::pair<std::basic_string<char>, unsigned char>, llvm::SDNode *> >, std::less<std::pair<std::basic_string<char>, unsigned char> >, std::allocator<std::pair<const std::pair<std::basic_string<char>, unsigned char>, llvm::SDNode *> > >::_Rb_tree_impl.101.701.1061.2021.2141.2381.4181" = type { %"struct.std::less.71.100.700.1060.2020.2140.2380.4180", %"struct.std::_Rb_tree_node_base.46.646.1006.1966.2086.2326.4126", i64 }
+%"struct.std::less.71.100.700.1060.2020.2140.2380.4180" = type { i8 }
+%"class.llvm::Type.7.607.967.1927.2047.2287.4087" = type { %"class.llvm::LLVMContext.6.606.966.1926.2046.2286.4086"*, i32, i32, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"** }
+%"class.llvm::DAGTypeLegalizer.117.717.1077.2037.2157.2397.4197" = type { %"class.llvm::TargetLowering.51.651.1011.1971.2091.2331.4131"*, %"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"*, %"class.llvm::TargetLoweringBase::ValueTypeActionImpl.40.640.1000.1960.2080.2320.4120", [6 x i8], %"class.llvm::SmallDenseMap.107.707.1067.2027.2147.2387.4187", %"class.llvm::SmallDenseMap.77.110.710.1070.2030.2150.2390.4190", %"class.llvm::SmallDenseMap.107.707.1067.2027.2147.2387.4187", %"class.llvm::SmallDenseMap.77.110.710.1070.2030.2150.2390.4190", %"class.llvm::SmallDenseMap.107.707.1067.2027.2147.2387.4187", %"class.llvm::SmallDenseMap.77.110.710.1070.2030.2150.2390.4190", %"class.llvm::SmallDenseMap.107.707.1067.2027.2147.2387.4187", %"class.llvm::SmallDenseMap.107.707.1067.2027.2147.2387.4187", %"class.llvm::SmallVector.82.116.716.1076.2036.2156.2396.4196" }
+%"class.llvm::SmallDenseMap.77.110.710.1070.2030.2150.2390.4190" = type { [4 x i8], i32, %"struct.llvm::AlignedCharArrayUnion.80.109.709.1069.2029.2149.2389.4189" }
+%"struct.llvm::AlignedCharArrayUnion.80.109.709.1069.2029.2149.2389.4189" = type { %"struct.llvm::AlignedCharArray.81.108.708.1068.2028.2148.2388.4188" }
+%"struct.llvm::AlignedCharArray.81.108.708.1068.2028.2148.2388.4188" = type { [384 x i8] }
+%"class.llvm::SmallDenseMap.107.707.1067.2027.2147.2387.4187" = type { [4 x i8], i32, %"struct.llvm::AlignedCharArrayUnion.75.106.706.1066.2026.2146.2386.4186" }
+%"struct.llvm::AlignedCharArrayUnion.75.106.706.1066.2026.2146.2386.4186" = type { %"struct.llvm::AlignedCharArray.76.105.705.1065.2025.2145.2385.4185" }
+%"struct.llvm::AlignedCharArray.76.105.705.1065.2025.2145.2385.4185" = type { [256 x i8] }
+%"class.llvm::SmallVector.82.116.716.1076.2036.2156.2396.4196" = type { %"class.llvm::SmallVectorImpl.83.114.714.1074.2034.2154.2394.4194", %"struct.llvm::SmallVectorStorage.87.115.715.1075.2035.2155.2395.4195" }
+%"class.llvm::SmallVectorImpl.83.114.714.1074.2034.2154.2394.4194" = type { %"class.llvm::SmallVectorTemplateBase.84.113.713.1073.2033.2153.2393.4193" }
+%"class.llvm::SmallVectorTemplateBase.84.113.713.1073.2033.2153.2393.4193" = type { %"class.llvm::SmallVectorTemplateCommon.85.112.712.1072.2032.2152.2392.4192" }
+%"class.llvm::SmallVectorTemplateCommon.85.112.712.1072.2032.2152.2392.4192" = type { %"class.llvm::SmallVectorBase.24.624.984.1944.2064.2304.4104", %"struct.llvm::AlignedCharArrayUnion.86.111.711.1071.2031.2151.2391.4191" }
+%"struct.llvm::AlignedCharArrayUnion.86.111.711.1071.2031.2151.2391.4191" = type { %"struct.llvm::AlignedCharArray.35.68.668.1028.1988.2108.2348.4148" }
+%"struct.llvm::SmallVectorStorage.87.115.715.1075.2035.2155.2395.4195" = type { [127 x %"struct.llvm::AlignedCharArrayUnion.86.111.711.1071.2031.2151.2391.4191"] }
+%"struct.std::pair.112.119.719.1079.2039.2159.2399.4199" = type { i32, %"struct.llvm::EVT.8.608.968.1928.2048.2288.4088" }
+%"class.llvm::DenseMapBase.73.118.718.1078.2038.2158.2398.4198" = type { i8 }
+
+@.str61 = external hidden unnamed_addr constant [80 x i8], align 1
+@.str63 = external hidden unnamed_addr constant [80 x i8], align 1
+@.str74 = external hidden unnamed_addr constant [49 x i8], align 1
+@__PRETTY_FUNCTION__._ZN4llvm16DAGTypeLegalizer16GetWidenedVectorENS_7SDValueE = external hidden unnamed_addr constant [70 x i8], align 1
+@.str98 = external hidden unnamed_addr constant [46 x i8], align 1
+@__PRETTY_FUNCTION__._ZNK4llvm6SDNode12getValueTypeEj = external hidden unnamed_addr constant [57 x i8], align 1
+@.str99 = external hidden unnamed_addr constant [19 x i8], align 1
+@__PRETTY_FUNCTION__._ZN4llvm5SDLocC2EPKNS_6SDNodeE = external hidden unnamed_addr constant [41 x i8], align 1
+@.str100 = external hidden unnamed_addr constant [50 x i8], align 1
+@__PRETTY_FUNCTION__._ZNK4llvm6SDNode10getOperandEj = external hidden unnamed_addr constant [66 x i8], align 1
+
+declare { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"*, i32, i8*, i32, i32, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"*, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8)
+
+; Function Attrs: noreturn nounwind
+declare void @__assert_fail(i8*, i8*, i32, i8*) #0
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+
+; Function Attrs: nounwind uwtable
+define hidden { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm16DAGTypeLegalizer18WidenVecRes_BinaryEPNS_6SDNodeE(%"class.llvm::DAGTypeLegalizer.117.717.1077.2037.2157.2397.4197"* %this, %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"* %N) #2 align 2 {
+entry:
+ %Op.i43 = alloca %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", align 8
+ %ref.tmp.i = alloca %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199", align 8
+ %Op.i = alloca %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", align 8
+ %0 = bitcast %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i to i8*
+ %retval.sroa.0.0.idx.i36 = getelementptr inbounds %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i, i64 0, i32 1, i32 0, i32 0
+ %retval.sroa.0.0.copyload.i37 = load i32* %retval.sroa.0.0.idx.i36, align 8
+ call void @llvm.lifetime.end(i64 24, i8* %0) #1
+ %agg.tmp8.sroa.2.0.copyload = load i32* undef, align 8
+ %1 = bitcast %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i to i8*
+ call void @llvm.lifetime.start(i64 16, i8* %1) #1
+ %2 = getelementptr %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i, i64 0, i32 1
+ store i32 %agg.tmp8.sroa.2.0.copyload, i32* %2, align 8
+
+; CHECK: movl (%rax), %eax
+; CHECK-NOT: movl %eax, {{[0-9]+}}(%rsp)
+; CHECK: movl [[OFF:[0-9]+]](%rsp), %r8d
+; CHECK: movl %eax, [[OFF]](%rsp)
+; CHECK: movl $-1, %ecx
+; CHECK: callq _ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_
+
+ %call18 = call { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"* undef, i32 undef, i8* undef, i32 -1, i32 %retval.sroa.0.0.copyload.i37, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"* undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8 undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8 undef) #1
+ ret { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } %call18
+}
+
+; Function Attrs: nounwind uwtable
+declare hidden %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* @_ZN4llvm12DenseMapBaseINS_13SmallDenseMapINS_7SDValueES2_Lj8ENS_12DenseMapInfoIS2_EEEES2_S2_S4_EixERKS2_(%"class.llvm::DenseMapBase.73.118.718.1078.2038.2158.2398.4198"*, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* nocapture readonly) #2 align 2
+
+declare hidden void @_ZN4llvm16DAGTypeLegalizer10RemapValueERNS_7SDValueE(%"class.llvm::DAGTypeLegalizer.117.717.1077.2037.2157.2397.4197"*, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"*)
+
+; Function Attrs: nounwind uwtable
+declare hidden void @_ZNK4llvm18TargetLoweringBase17getTypeConversionERNS_11LLVMContextENS_3EVTE(%"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* noalias sret, %"class.llvm::TargetLoweringBase.50.650.1010.1970.2090.2330.4130"* readonly, %"class.llvm::LLVMContext.6.606.966.1926.2046.2286.4086"*, i32, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"*) #2 align 2
+
+attributes #0 = { noreturn nounwind }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind uwtable }
+
diff --git a/test/CodeGen/X86/misched-aa-mmos.ll b/test/CodeGen/X86/misched-aa-mmos.ll
new file mode 100644
index 000000000000..343e26f54725
--- /dev/null
+++ b/test/CodeGen/X86/misched-aa-mmos.ll
@@ -0,0 +1,37 @@
+; RUN: llc -enable-misched -enable-aa-sched-mi < %s
+
+; This generates a decw instruction, which has two MMOs, and an alias SU edge
+; query involving that instruction. Make sure this does not crash.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%s1 = type { i16, i16, i32 }
+%c1 = type { %s1*, %u1, i16, i8 }
+%u1 = type { i64 }
+
+declare zeroext i1 @bar(i64*, i32) #5
+
+define i32 @foo() #0 align 2 {
+entry:
+ %temp_rhs = alloca %c1, align 8
+ br i1 undef, label %if.else56, label %cond.end.i
+
+cond.end.i:
+ %significand.i18.i = getelementptr inbounds %c1* %temp_rhs, i64 0, i32 1
+ %exponent.i = getelementptr inbounds %c1* %temp_rhs, i64 0, i32 2
+ %0 = load i16* %exponent.i, align 8
+ %sub.i = add i16 %0, -1
+ store i16 %sub.i, i16* %exponent.i, align 8
+ %parts.i.i = bitcast %u1* %significand.i18.i to i64**
+ %1 = load i64** %parts.i.i, align 8
+ %call5.i = call zeroext i1 @bar(i64* %1, i32 undef) #1
+ unreachable
+
+if.else56:
+ unreachable
+}
+
+attributes #0 = { nounwind uwtable }
+attributes #1 = { nounwind }
+
diff --git a/test/CodeGen/X86/misched-matmul.ll b/test/CodeGen/X86/misched-matmul.ll
index 5454b7cf780a..3ea6512258d4 100644
--- a/test/CodeGen/X86/misched-matmul.ll
+++ b/test/CodeGen/X86/misched-matmul.ll
@@ -10,7 +10,7 @@
; more complex cases.
;
; CHECK: @wrap_mul4
-; CHECK: 23 regalloc - Number of spills inserted
+; CHECK: 22 regalloc - Number of spills inserted
define void @wrap_mul4(double* nocapture %Out, [4 x double]* nocapture %A, [4 x double]* nocapture %B) #0 {
entry:
diff --git a/test/CodeGen/X86/mod128.ll b/test/CodeGen/X86/mod128.ll
new file mode 100644
index 000000000000..4fdee11ec83a
--- /dev/null
+++ b/test/CodeGen/X86/mod128.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64
+; RUN: llc < %s -mtriple=x86_64-cygwin | FileCheck %s -check-prefix=WIN64
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
+; RUN: llc < %s -mtriple=x86_64-mingw32 | FileCheck %s -check-prefix=WIN64
+
+define i64 @mod128(i128 %x) {
+ ; X86-64: movl $3, %edx
+ ; X86-64: xorl %ecx, %ecx
+ ; X86-64: callq __modti3
+ ; X86-64-NOT: movd %xmm0, %rax
+
+ ; WIN64-NOT: movl $3, %r8d
+ ; WIN64-NOT: xorl %r9d, %r9d
+ ; WIN64-DAG: movq %rdx, 56(%rsp)
+ ; WIN64-DAG: movq %rcx, 48(%rsp)
+ ; WIN64-DAG: leaq 48(%rsp), %rcx
+ ; WIN64-DAG: leaq 32(%rsp), %rdx
+ ; WIN64-DAG: movq $0, 40(%rsp)
+ ; WIN64-DAG: movq $3, 32(%rsp)
+ ; WIN64: callq __modti3
+ ; WIN64: movd %xmm0, %rax
+
+ %1 = srem i128 %x, 3
+ %2 = trunc i128 %1 to i64
+ ret i64 %2
+}
diff --git a/test/CodeGen/X86/movbe.ll b/test/CodeGen/X86/movbe.ll
index 3f459be70d2a..e248410b2020 100644
--- a/test/CodeGen/X86/movbe.ll
+++ b/test/CodeGen/X86/movbe.ll
@@ -1,45 +1,66 @@
; RUN: llc -mtriple=x86_64-linux -mcpu=atom < %s | FileCheck %s
; RUN: llc -mtriple=x86_64-linux -mcpu=slm < %s | FileCheck %s -check-prefix=SLM
+declare i16 @llvm.bswap.i16(i16) nounwind readnone
declare i32 @llvm.bswap.i32(i32) nounwind readnone
declare i64 @llvm.bswap.i64(i64) nounwind readnone
-define void @test1(i32* nocapture %x, i32 %y) nounwind {
+define void @test1(i16* nocapture %x, i16 %y) nounwind {
+ %bswap = call i16 @llvm.bswap.i16(i16 %y)
+ store i16 %bswap, i16* %x, align 2
+ ret void
+; CHECK-LABEL: test1:
+; CHECK: movbew %si, (%rdi)
+; SLM-LABEL: test1:
+; SLM: movbew %si, (%rdi)
+}
+
+define i16 @test2(i16* %x) nounwind {
+ %load = load i16* %x, align 2
+ %bswap = call i16 @llvm.bswap.i16(i16 %load)
+ ret i16 %bswap
+; CHECK-LABEL: test2:
+; CHECK: movbew (%rdi), %ax
+; SLM-LABEL: test2:
+; SLM: movbew (%rdi), %ax
+}
+
+define void @test3(i32* nocapture %x, i32 %y) nounwind {
%bswap = call i32 @llvm.bswap.i32(i32 %y)
store i32 %bswap, i32* %x, align 4
ret void
-; CHECK-LABEL: test1:
+; CHECK-LABEL: test3:
; CHECK: movbel %esi, (%rdi)
-; SLM-LABEL: test1:
+; SLM-LABEL: test3:
; SLM: movbel %esi, (%rdi)
}
-define i32 @test2(i32* %x) nounwind {
+define i32 @test4(i32* %x) nounwind {
%load = load i32* %x, align 4
%bswap = call i32 @llvm.bswap.i32(i32 %load)
ret i32 %bswap
-; CHECK-LABEL: test2:
+; CHECK-LABEL: test4:
; CHECK: movbel (%rdi), %eax
-; SLM-LABEL: test2:
+; SLM-LABEL: test4:
; SLM: movbel (%rdi), %eax
}
-define void @test3(i64* %x, i64 %y) nounwind {
+define void @test5(i64* %x, i64 %y) nounwind {
%bswap = call i64 @llvm.bswap.i64(i64 %y)
store i64 %bswap, i64* %x, align 8
ret void
-; CHECK-LABEL: test3:
+; CHECK-LABEL: test5:
; CHECK: movbeq %rsi, (%rdi)
-; SLM-LABEL: test3:
+; SLM-LABEL: test5:
; SLM: movbeq %rsi, (%rdi)
}
-define i64 @test4(i64* %x) nounwind {
+define i64 @test6(i64* %x) nounwind {
%load = load i64* %x, align 8
%bswap = call i64 @llvm.bswap.i64(i64 %load)
ret i64 %bswap
-; CHECK-LABEL: test4:
+; CHECK-LABEL: test6:
; CHECK: movbeq (%rdi), %rax
-; SLM-LABEL: test4:
+; SLM-LABEL: test6:
; SLM: movbeq (%rdi), %rax
}
diff --git a/test/CodeGen/X86/ms-inline-asm.ll b/test/CodeGen/X86/ms-inline-asm.ll
index 5e7ba37b39c0..69105158906f 100644
--- a/test/CodeGen/X86/ms-inline-asm.ll
+++ b/test/CodeGen/X86/ms-inline-asm.ll
@@ -1,11 +1,10 @@
-; RUN: llc < %s -march=x86 -mcpu=core2 | FileCheck %s
+; RUN: llc < %s -march=x86 -mcpu=core2 -no-integrated-as | FileCheck %s
define i32 @t1() nounwind {
entry:
%0 = tail call i32 asm sideeffect inteldialect "mov eax, $1\0A\09mov $0, eax", "=r,r,~{eax},~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
ret i32 %0
; CHECK: t1
-; CHECK: movl %esp, %ebp
; CHECK: {{## InlineAsm Start|#APP}}
; CHECK: .intel_syntax
; CHECK: mov eax, ecx
@@ -19,7 +18,6 @@ entry:
call void asm sideeffect inteldialect "mov eax, $$1", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind
ret void
; CHECK: t2
-; CHECK: movl %esp, %ebp
; CHECK: {{## InlineAsm Start|#APP}}
; CHECK: .intel_syntax
; CHECK: mov eax, 1
@@ -34,7 +32,6 @@ entry:
call void asm sideeffect inteldialect "mov eax, DWORD PTR [$0]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %V.addr) nounwind
ret void
; CHECK: t3
-; CHECK: movl %esp, %ebp
; CHECK: {{## InlineAsm Start|#APP}}
; CHECK: .intel_syntax
; CHECK: mov eax, DWORD PTR {{[[esp]}}
@@ -56,7 +53,6 @@ entry:
%0 = load i32* %b1, align 4
ret i32 %0
; CHECK: t18
-; CHECK: movl %esp, %ebp
; CHECK: {{## InlineAsm Start|#APP}}
; CHECK: .intel_syntax
; CHECK: lea ebx, foo
@@ -76,7 +72,6 @@ entry:
call void asm sideeffect inteldialect "call $0", "r,~{dirflag},~{fpsr},~{flags}"(void ()* @t19_helper) nounwind
ret void
; CHECK-LABEL: t19:
-; CHECK: movl %esp, %ebp
; CHECK: movl ${{_?}}t19_helper, %eax
; CHECK: {{## InlineAsm Start|#APP}}
; CHECK: .intel_syntax
@@ -95,7 +90,6 @@ entry:
%0 = load i32** %res, align 4
ret i32* %0
; CHECK-LABEL: t30:
-; CHECK: movl %esp, %ebp
; CHECK: {{## InlineAsm Start|#APP}}
; CHECK: .intel_syntax
; CHECK: lea edi, dword ptr [{{_?}}results]
@@ -103,8 +97,31 @@ entry:
; CHECK: {{## InlineAsm End|#NO_APP}}
; CHECK: {{## InlineAsm Start|#APP}}
; CHECK: .intel_syntax
-; CHECK: mov dword ptr [esi], edi
+; CHECK: mov dword ptr [esp], edi
+; CHECK: .att_syntax
+; CHECK: {{## InlineAsm End|#NO_APP}}
+; CHECK: movl (%esp), %eax
+}
+
+; Stack realignment plus MS inline asm that does *not* adjust the stack is no
+; longer an error.
+
+define i32 @t31() {
+entry:
+ %val = alloca i32, align 64
+ store i32 -1, i32* %val, align 64
+ call void asm sideeffect inteldialect "mov dword ptr $0, esp", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %val) #1
+ %sp = load i32* %val, align 64
+ ret i32 %sp
+; CHECK-LABEL: t31:
+; CHECK: pushl %ebp
+; CHECK: movl %esp, %ebp
+; CHECK: andl $-64, %esp
+; CHECK: {{## InlineAsm Start|#APP}}
+; CHECK: .intel_syntax
+; CHECK: mov dword ptr [esp], esp
; CHECK: .att_syntax
; CHECK: {{## InlineAsm End|#NO_APP}}
-; CHECK: movl (%esi), %eax
+; CHECK: movl (%esp), %eax
+; CHECK: ret
}
diff --git a/test/CodeGen/X86/mul128_sext_loop.ll b/test/CodeGen/X86/mul128_sext_loop.ll
new file mode 100644
index 000000000000..a516f03cbc3e
--- /dev/null
+++ b/test/CodeGen/X86/mul128_sext_loop.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+define void @test(i64* nocapture %arr, i64 %arrsize, i64 %factor) nounwind uwtable {
+ %1 = icmp sgt i64 %arrsize, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0
+ %2 = sext i64 %factor to i128
+ br label %3
+
+; <label>:3 ; preds = %3, %.lr.ph
+; CHECK-NOT: mul
+; CHECK: imulq
+; CHECK-NOT: mul
+ %carry.02 = phi i128 [ 0, %.lr.ph ], [ %10, %3 ]
+ %i.01 = phi i64 [ 0, %.lr.ph ], [ %11, %3 ]
+ %4 = getelementptr inbounds i64* %arr, i64 %i.01
+ %5 = load i64* %4, align 8
+ %6 = sext i64 %5 to i128
+ %7 = mul nsw i128 %6, %2
+ %8 = add nsw i128 %7, %carry.02
+ %.tr = trunc i128 %8 to i64
+ %9 = and i64 %.tr, 9223372036854775807
+ store i64 %9, i64* %4, align 8
+ %10 = ashr i128 %8, 63
+ %11 = add nsw i64 %i.01, 1
+ %exitcond = icmp eq i64 %11, %arrsize
+ br i1 %exitcond, label %._crit_edge, label %3
+
+._crit_edge: ; preds = %3, %0
+ ret void
+}
diff --git a/test/CodeGen/X86/mult-alt-generic-i686.ll b/test/CodeGen/X86/mult-alt-generic-i686.ll
index 7c3499f178a6..54bc3a42f035 100644
--- a/test/CodeGen/X86/mult-alt-generic-i686.ll
+++ b/test/CodeGen/X86/mult-alt-generic-i686.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86
+; RUN: llc < %s -march=x86 -no-integrated-as
; ModuleID = 'mult-alt-generic.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
target triple = "i686"
diff --git a/test/CodeGen/X86/mult-alt-generic-x86_64.ll b/test/CodeGen/X86/mult-alt-generic-x86_64.ll
index f35bb5e34079..84a9c8140943 100644
--- a/test/CodeGen/X86/mult-alt-generic-x86_64.ll
+++ b/test/CodeGen/X86/mult-alt-generic-x86_64.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64
+; RUN: llc < %s -march=x86-64 -no-integrated-as
; ModuleID = 'mult-alt-generic.c'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64"
diff --git a/test/CodeGen/X86/mult-alt-x86.ll b/test/CodeGen/X86/mult-alt-x86.ll
index 06175da46454..cb2219a6ed75 100644
--- a/test/CodeGen/X86/mult-alt-x86.ll
+++ b/test/CodeGen/X86/mult-alt-x86.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
+; RUN: llc < %s -march=x86 -mattr=+sse2 -no-integrated-as
; ModuleID = 'mult-alt-x86.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
target triple = "i686-pc-win32"
diff --git a/test/CodeGen/X86/multiple-loop-post-inc.ll b/test/CodeGen/X86/multiple-loop-post-inc.ll
index 29b9f34464f0..4edc1ff0b3fa 100644
--- a/test/CodeGen/X86/multiple-loop-post-inc.ll
+++ b/test/CodeGen/X86/multiple-loop-post-inc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -asm-verbose=false -disable-branch-fold -disable-block-placement -disable-tail-duplicate -march=x86-64 -mcpu=nehalem < %s | FileCheck %s
+; RUN: llc -asm-verbose=false -disable-branch-fold -disable-block-placement -disable-tail-duplicate -march=x86-64 -mcpu=nehalem -no-integrated-as < %s | FileCheck %s
; rdar://7236213
;
; The scheduler's 2-address hack has been disabled, so there is
diff --git a/test/CodeGen/X86/musttail-indirect.ll b/test/CodeGen/X86/musttail-indirect.ll
new file mode 100644
index 000000000000..9d21b5ea5d52
--- /dev/null
+++ b/test/CodeGen/X86/musttail-indirect.ll
@@ -0,0 +1,124 @@
+; RUN: llc < %s -mtriple=i686-win32 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-win32 -O0 | FileCheck %s
+
+; IR simplified from the following C++ snippet compiled for i686-windows-msvc:
+
+; struct A { A(); ~A(); int a; };
+;
+; struct B {
+; virtual int f(int);
+; virtual int g(A, int, A);
+; virtual void h(A, int, A);
+; virtual A i(A, int, A);
+; virtual A j(int);
+; };
+;
+; int (B::*mp_f)(int) = &B::f;
+; int (B::*mp_g)(A, int, A) = &B::g;
+; void (B::*mp_h)(A, int, A) = &B::h;
+; A (B::*mp_i)(A, int, A) = &B::i;
+; A (B::*mp_j)(int) = &B::j;
+
+; Each member pointer creates a thunk. The ones with inalloca are required to
+; tail calls by the ABI, even at O0.
+
+%struct.B = type { i32 (...)** }
+%struct.A = type { i32 }
+
+; CHECK-LABEL: f_thunk:
+; CHECK: jmpl
+; CHECK-NOT: ret
+define x86_thiscallcc i32 @f_thunk(%struct.B* %this, i32) {
+entry:
+ %1 = bitcast %struct.B* %this to i32 (%struct.B*, i32)***
+ %vtable = load i32 (%struct.B*, i32)*** %1
+ %2 = load i32 (%struct.B*, i32)** %vtable
+ %3 = musttail call x86_thiscallcc i32 %2(%struct.B* %this, i32 %0)
+ ret i32 %3
+}
+
+; Inalloca thunks shouldn't require any stores to the stack.
+; CHECK-LABEL: g_thunk:
+; CHECK-NOT: mov %{{.*}}, {{.*(.*esp.*)}}
+; CHECK: jmpl
+; CHECK-NOT: ret
+define x86_thiscallcc i32 @g_thunk(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca) {
+entry:
+ %1 = bitcast %struct.B* %this to i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)***
+ %vtable = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
+ %vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 1
+ %2 = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
+ %3 = musttail call x86_thiscallcc i32 %2(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca %0)
+ ret i32 %3
+}
+
+; CHECK-LABEL: h_thunk:
+; CHECK: jmpl
+; CHECK-NOT: mov %{{.*}}, {{.*(.*esp.*)}}
+; CHECK-NOT: ret
+define x86_thiscallcc void @h_thunk(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca) {
+entry:
+ %1 = bitcast %struct.B* %this to void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)***
+ %vtable = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
+ %vfn = getelementptr inbounds void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 2
+ %2 = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
+ musttail call x86_thiscallcc void %2(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca %0)
+ ret void
+}
+
+; CHECK-LABEL: i_thunk:
+; CHECK-NOT: mov %{{.*}}, {{.*(.*esp.*)}}
+; CHECK: jmpl
+; CHECK-NOT: ret
+define x86_thiscallcc %struct.A* @i_thunk(%struct.B* %this, <{ %struct.A*, %struct.A, i32, %struct.A }>* inalloca) {
+entry:
+ %1 = bitcast %struct.B* %this to %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)***
+ %vtable = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*** %1
+ %vfn = getelementptr inbounds %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vtable, i32 3
+ %2 = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vfn
+ %3 = musttail call x86_thiscallcc %struct.A* %2(%struct.B* %this, <{ %struct.A*, %struct.A, i32, %struct.A }>* inalloca %0)
+ ret %struct.A* %3
+}
+
+; CHECK-LABEL: j_thunk:
+; CHECK: jmpl
+; CHECK-NOT: ret
+define x86_thiscallcc void @j_thunk(%struct.A* noalias sret %agg.result, %struct.B* %this, i32) {
+entry:
+ %1 = bitcast %struct.B* %this to void (%struct.A*, %struct.B*, i32)***
+ %vtable = load void (%struct.A*, %struct.B*, i32)*** %1
+ %vfn = getelementptr inbounds void (%struct.A*, %struct.B*, i32)** %vtable, i32 4
+ %2 = load void (%struct.A*, %struct.B*, i32)** %vfn
+ musttail call x86_thiscallcc void %2(%struct.A* sret %agg.result, %struct.B* %this, i32 %0)
+ ret void
+}
+
+; CHECK-LABEL: _stdcall_thunk@8:
+; CHECK-NOT: mov %{{.*}}, {{.*(.*esp.*)}}
+; CHECK: jmpl
+; CHECK-NOT: ret
+define x86_stdcallcc i32 @stdcall_thunk(<{ %struct.B*, %struct.A }>* inalloca) {
+entry:
+ %this_ptr = getelementptr inbounds <{ %struct.B*, %struct.A }>* %0, i32 0, i32 0
+ %this = load %struct.B** %this_ptr
+ %1 = bitcast %struct.B* %this to i32 (<{ %struct.B*, %struct.A }>*)***
+ %vtable = load i32 (<{ %struct.B*, %struct.A }>*)*** %1
+ %vfn = getelementptr inbounds i32 (<{ %struct.B*, %struct.A }>*)** %vtable, i32 1
+ %2 = load i32 (<{ %struct.B*, %struct.A }>*)** %vfn
+ %3 = musttail call x86_stdcallcc i32 %2(<{ %struct.B*, %struct.A }>* inalloca %0)
+ ret i32 %3
+}
+
+; CHECK-LABEL: @fastcall_thunk@8:
+; CHECK-NOT: mov %{{.*}}, {{.*(.*esp.*)}}
+; CHECK: jmpl
+; CHECK-NOT: ret
+define x86_fastcallcc i32 @fastcall_thunk(%struct.B* inreg %this, <{ %struct.A }>* inalloca) {
+entry:
+ %1 = bitcast %struct.B* %this to i32 (%struct.B*, <{ %struct.A }>*)***
+ %vtable = load i32 (%struct.B*, <{ %struct.A }>*)*** %1
+ %vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A }>*)** %vtable, i32 1
+ %2 = load i32 (%struct.B*, <{ %struct.A }>*)** %vfn
+ %3 = musttail call x86_fastcallcc i32 %2(%struct.B* inreg %this, <{ %struct.A }>* inalloca %0)
+ ret i32 %3
+}
diff --git a/test/CodeGen/X86/musttail-thiscall.ll b/test/CodeGen/X86/musttail-thiscall.ll
new file mode 100644
index 000000000000..8ea12482e504
--- /dev/null
+++ b/test/CodeGen/X86/musttail-thiscall.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=x86 < %s | FileCheck %s
+; RUN: llc -march=x86 -O0 < %s | FileCheck %s
+
+; CHECK-LABEL: t1:
+; CHECK: jmp {{_?}}t1_callee
+define x86_thiscallcc void @t1(i8* %this) {
+ %adj = getelementptr i8* %this, i32 4
+ musttail call x86_thiscallcc void @t1_callee(i8* %adj)
+ ret void
+}
+declare x86_thiscallcc void @t1_callee(i8* %this)
+
+; CHECK-LABEL: t2:
+; CHECK: jmp {{_?}}t2_callee
+define x86_thiscallcc i32 @t2(i8* %this, i32 %a) {
+ %adj = getelementptr i8* %this, i32 4
+ %rv = musttail call x86_thiscallcc i32 @t2_callee(i8* %adj, i32 %a)
+ ret i32 %rv
+}
+declare x86_thiscallcc i32 @t2_callee(i8* %this, i32 %a)
+
+; CHECK-LABEL: t3:
+; CHECK: jmp {{_?}}t3_callee
+define x86_thiscallcc i8* @t3(i8* %this, <{ i8*, i32 }>* inalloca %args) {
+ %adj = getelementptr i8* %this, i32 4
+ %a_ptr = getelementptr <{ i8*, i32 }>* %args, i32 0, i32 1
+ store i32 0, i32* %a_ptr
+ %rv = musttail call x86_thiscallcc i8* @t3_callee(i8* %adj, <{ i8*, i32 }>* inalloca %args)
+ ret i8* %rv
+}
+declare x86_thiscallcc i8* @t3_callee(i8* %this, <{ i8*, i32 }>* inalloca %args);
diff --git a/test/CodeGen/X86/musttail.ll b/test/CodeGen/X86/musttail.ll
new file mode 100644
index 000000000000..ca5d3119cf10
--- /dev/null
+++ b/test/CodeGen/X86/musttail.ll
@@ -0,0 +1,90 @@
+; RUN: llc -march=x86 < %s | FileCheck %s
+; RUN: llc -march=x86 -O0 < %s | FileCheck %s
+; RUN: llc -march=x86 -disable-tail-calls < %s | FileCheck %s
+
+declare void @t1_callee(i8*)
+define void @t1(i32* %a) {
+; CHECK-LABEL: t1:
+; CHECK: jmp {{_?}}t1_callee
+ %b = bitcast i32* %a to i8*
+ musttail call void @t1_callee(i8* %b)
+ ret void
+}
+
+declare i8* @t2_callee()
+define i32* @t2() {
+; CHECK-LABEL: t2:
+; CHECK: jmp {{_?}}t2_callee
+ %v = musttail call i8* @t2_callee()
+ %w = bitcast i8* %v to i32*
+ ret i32* %w
+}
+
+; Complex frame layout: stack realignment with dynamic alloca.
+define void @t3(i32 %n) alignstack(32) nounwind {
+entry:
+; CHECK: t3:
+; CHECK: pushl %ebp
+; CHECK: pushl %esi
+; CHECK: andl $-32, %esp
+; CHECK: movl %esp, %esi
+; CHECK: popl %esi
+; CHECK: popl %ebp
+; CHECK-NEXT: jmp {{_?}}t3_callee
+ %a = alloca i8, i32 %n
+ call void @capture(i8* %a)
+ musttail call void @t3_callee(i32 %n) nounwind
+ ret void
+}
+
+declare void @capture(i8*)
+declare void @t3_callee(i32)
+
+; Test that we actually copy in and out stack arguments that aren't forwarded
+; without modification.
+define i32 @t4({}* %fn, i32 %n, i32 %r) {
+; CHECK-LABEL: t4:
+; CHECK: incl %[[r:.*]]
+; CHECK: decl %[[n:.*]]
+; CHECK: movl %[[r]], {{[0-9]+}}(%esp)
+; CHECK: movl %[[n]], {{[0-9]+}}(%esp)
+; CHECK: jmpl *%{{.*}}
+
+entry:
+ %r1 = add i32 %r, 1
+ %n1 = sub i32 %n, 1
+ %fn_cast = bitcast {}* %fn to i32 ({}*, i32, i32)*
+ %r2 = musttail call i32 %fn_cast({}* %fn, i32 %n1, i32 %r1)
+ ret i32 %r2
+}
+
+; Combine the complex stack frame with the parameter modification.
+define i32 @t5({}* %fn, i32 %n, i32 %r) alignstack(32) {
+; CHECK-LABEL: t5:
+; CHECK: pushl %ebp
+; CHECK: movl %esp, %ebp
+; CHECK: pushl %esi
+; Align the stack.
+; CHECK: andl $-32, %esp
+; CHECK: movl %esp, %esi
+; Modify the args.
+; CHECK: incl %[[r:.*]]
+; CHECK: decl %[[n:.*]]
+; Store them through ebp, since that's the only stable arg pointer.
+; CHECK: movl %[[r]], {{[0-9]+}}(%ebp)
+; CHECK: movl %[[n]], {{[0-9]+}}(%ebp)
+; Epilogue.
+; CHECK: leal {{[-0-9]+}}(%ebp), %esp
+; CHECK: popl %esi
+; CHECK: popl %ebp
+; CHECK: jmpl *%{{.*}}
+
+entry:
+ %a = alloca i8, i32 %n
+ call void @capture(i8* %a)
+ %r1 = add i32 %r, 1
+ %n1 = sub i32 %n, 1
+ %fn_cast = bitcast {}* %fn to i32 ({}*, i32, i32)*
+ %r2 = musttail call i32 %fn_cast({}* %fn, i32 %n1, i32 %r1)
+ ret i32 %r2
+}
diff --git a/test/CodeGen/X86/named-reg-alloc.ll b/test/CodeGen/X86/named-reg-alloc.ll
new file mode 100644
index 000000000000..9463ea377a9d
--- /dev/null
+++ b/test/CodeGen/X86/named-reg-alloc.ll
@@ -0,0 +1,14 @@
+; RUN: not llc < %s -mtriple=x86_64-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=x86_64-linux-gnueabi 2>&1 | FileCheck %s
+
+define i32 @get_stack() nounwind {
+entry:
+; FIXME: Include an allocatable-specific error message
+; CHECK: Invalid register name global variable
+ %sp = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %sp
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"eax\00"}
diff --git a/test/CodeGen/X86/named-reg-notareg.ll b/test/CodeGen/X86/named-reg-notareg.ll
new file mode 100644
index 000000000000..d85ddddbea85
--- /dev/null
+++ b/test/CodeGen/X86/named-reg-notareg.ll
@@ -0,0 +1,13 @@
+; RUN: not llc < %s -mtriple=x86_64-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=x86_64-linux-gnueabi 2>&1 | FileCheck %s
+
+define i32 @get_stack() nounwind {
+entry:
+; CHECK: Invalid register name global variable
+ %sp = call i32 @llvm.read_register.i32(metadata !0)
+ ret i32 %sp
+}
+
+declare i32 @llvm.read_register.i32(metadata) nounwind
+
+!0 = metadata !{metadata !"notareg\00"}
diff --git a/test/CodeGen/X86/negate-add-zero.ll b/test/CodeGen/X86/negate-add-zero.ll
index 92850f22eaa5..c961bd091b95 100644
--- a/test/CodeGen/X86/negate-add-zero.ll
+++ b/test/CodeGen/X86/negate-add-zero.ll
@@ -827,9 +827,7 @@ declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi6ELi6ELi0ELi0EEEENT_13
declare void @_ZN21HNodeTranslateRotate311toCartesianEv(%struct.HNodeTranslateRotate3*)
define linkonce void @_ZN21HNodeTranslateRotate36setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate3* %this, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"* %velv) {
-entry:
- %0 = add i32 0, -1 ; <i32> [#uses=1]
- %1 = getelementptr double* null, i32 %0 ; <double*> [#uses=1]
+ %1 = getelementptr double* null, i32 -1 ; <double*> [#uses=1]
%2 = load double* %1, align 8 ; <double> [#uses=1]
%3 = load double* null, align 8 ; <double> [#uses=2]
%4 = load double* null, align 8 ; <double> [#uses=2]
@@ -890,13 +888,12 @@ entry:
store double %52, double* %55, align 8
%56 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
store double %53, double* %56, align 8
- %57 = add i32 0, 4 ; <i32> [#uses=1]
- %58 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 0 ; <%"struct.CDSVector<double,0,CDS::DefaultAlloc>"**> [#uses=1]
- store %"struct.CDSVector<double,0,CDS::DefaultAlloc>"* %velv, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"** %58, align 8
- %59 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %57, i32* %59, align 4
- %60 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 2 ; <i32*> [#uses=1]
- store i32 3, i32* %60, align 8
+ %57 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 0 ; <%"struct.CDSVector<double,0,CDS::DefaultAlloc>"**> [#uses=1]
+ store %"struct.CDSVector<double,0,CDS::DefaultAlloc>"* %velv, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"** %57, align 8
+ %58 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 1 ; <i32*> [#uses=1]
+ store i32 4, i32* %58, align 4
+ %59 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 2 ; <i32*> [#uses=1]
+ store i32 3, i32* %59, align 8
unreachable
}
diff --git a/test/CodeGen/X86/no-cfi.ll b/test/CodeGen/X86/no-cfi.ll
deleted file mode 100644
index 5bb9bb2d4f67..000000000000
--- a/test/CodeGen/X86/no-cfi.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -disable-cfi | FileCheck --check-prefix=STATIC %s
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -disable-cfi -relocation-model=pic | FileCheck --check-prefix=PIC %s
-
-; STATIC: .ascii "zPLR"
-; STATIC: .byte 3
-; STATIC-NEXT: .long __gxx_personality_v0
-; STATIC-NEXT: .byte 3
-; STATIC-NEXT: .byte 3
-
-; PIC: .ascii "zPLR"
-; PIC: .byte 155
-; PIC-NEXT: .L
-; PIC-NEXT: .long DW.ref.__gxx_personality_v0-.L
-; PIC-NEXT: .byte 27
-; PIC-NEXT: .byte 27
-
-
-define void @bar() {
-entry:
- %call = invoke i32 @foo()
- to label %invoke.cont unwind label %lpad
-
-invoke.cont:
- ret void
-
-lpad:
- %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
- catch i8* null
- ret void
-}
-
-declare i32 @foo()
-
-declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/no-elf-compact-unwind.ll b/test/CodeGen/X86/no-elf-compact-unwind.ll
deleted file mode 100644
index 8a15817bcfe9..000000000000
--- a/test/CodeGen/X86/no-elf-compact-unwind.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -mtriple x86_64-apple-macosx10.8.0 -disable-cfi | FileCheck -check-prefix=MACHO %s
-; RUN: llc < %s -mtriple x86_64-unknown-linux -disable-cfi | FileCheck -check-prefix=ELF %s
-
-; Make sure we don't generate a compact unwind for ELF.
-
-; MACHO-LABEL: _Z3barv:
-; MACHO: __compact_unwind
-
-; ELF-LABEL: _Z3barv:
-; ELF-NOT: __compact_unwind
-
-@_ZTIi = external constant i8*
-
-define void @_Z3barv() uwtable {
-entry:
- invoke void @_Z3foov()
- to label %try.cont unwind label %lpad
-
-lpad: ; preds = %entry
- %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- catch i8* bitcast (i8** @_ZTIi to i8*)
- %1 = extractvalue { i8*, i32 } %0, 1
- %2 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
- %matches = icmp eq i32 %1, %2
- br i1 %matches, label %catch, label %eh.resume
-
-catch: ; preds = %lpad
- %3 = extractvalue { i8*, i32 } %0, 0
- %4 = tail call i8* @__cxa_begin_catch(i8* %3)
- tail call void @__cxa_end_catch()
- br label %try.cont
-
-try.cont: ; preds = %entry, %catch
- ret void
-
-eh.resume: ; preds = %lpad
- resume { i8*, i32 } %0
-}
-
-declare void @_Z3foov()
-
-declare i32 @__gxx_personality_v0(...)
-
-declare i32 @llvm.eh.typeid.for(i8*)
-
-declare i8* @__cxa_begin_catch(i8*)
-
-declare void @__cxa_end_catch()
diff --git a/test/CodeGen/X86/nocx16.ll b/test/CodeGen/X86/nocx16.ll
index cceaac47122d..8b995dafa75a 100644
--- a/test/CodeGen/X86/nocx16.ll
+++ b/test/CodeGen/X86/nocx16.ll
@@ -2,7 +2,7 @@
define void @test(i128* %a) nounwind {
entry:
; CHECK: __sync_val_compare_and_swap_16
- %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst
+ %0 = cmpxchg i128* %a, i128 1, i128 1 seq_cst seq_cst
; CHECK: __sync_lock_test_and_set_16
%1 = atomicrmw xchg i128* %a, i128 1 seq_cst
; CHECK: __sync_fetch_and_add_16
diff --git a/test/CodeGen/X86/null-streamer.ll b/test/CodeGen/X86/null-streamer.ll
index 7c0e82f08f93..fa77fcb1d138 100644
--- a/test/CodeGen/X86/null-streamer.ll
+++ b/test/CodeGen/X86/null-streamer.ll
@@ -1,6 +1,7 @@
; Check the MCNullStreamer operates correctly, at least on a minimal test case.
;
; RUN: llc -filetype=null -o %t -march=x86 %s
+; RUN: llc -filetype=null -o %t -mtriple=i686-cygwin %s
define void @f0() {
ret void
@@ -9,3 +10,20 @@ define void @f0() {
define void @f1() {
ret void
}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!11, !13}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !" ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !9, metadata !2, metadata !""}
+!1 = metadata !{metadata !"", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"", metadata !"", metadata !"", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* null, null, null, metadata !2, i32 2}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null}
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786484, i32 0, null, metadata !"i", metadata !"i", metadata !"_ZL1i", metadata !5, i32 1, metadata !8, i32 1, i32 1, null, null}
+!11 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
+!13 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/X86/opaque-constant-asm.ll b/test/CodeGen/X86/opaque-constant-asm.ll
new file mode 100644
index 000000000000..dd1cc8ec4839
--- /dev/null
+++ b/test/CodeGen/X86/opaque-constant-asm.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -no-integrated-as | FileCheck %s
+; This tests makes sure that we not mistake the bitcast inside the asm statement
+; as an opaque constant. If we do, then the compilation will simply fail.
+
+%struct2 = type <{ i32, i32, i32, i32 }>
+%union.anon = type { [2 x i64], [4 x i32] }
+%struct1 = type { i32, %union.anon }
+
+define void @test() {
+; CHECK: #ASM $16
+ call void asm sideeffect "#ASM $0", "n"(i32 ptrtoint (i32* getelementptr inbounds (%struct2* bitcast (%union.anon* getelementptr inbounds (%struct1* null, i32 0, i32 1) to %struct2*), i32 0, i32 2) to i32))
+ ret void
+}
diff --git a/test/CodeGen/X86/osx-private-labels.ll b/test/CodeGen/X86/osx-private-labels.ll
new file mode 100644
index 000000000000..349ce7d0cc5e
--- /dev/null
+++ b/test/CodeGen/X86/osx-private-labels.ll
@@ -0,0 +1,71 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; Test all the cases where a L label is safe. Removing any entry from
+; TargetLoweringObjectFileMachO::isSectionAtomizableBySymbols should cause
+; this to fail.
+; We also test some noteworthy cases that require an l label.
+
+@private1 = private unnamed_addr constant [4 x i8] c"zed\00"
+; CHECK: .section __TEXT,__cstring,cstring_literals
+; CHECK-NEXT: L_private1:
+
+@private2 = private unnamed_addr constant [5 x i16] [i16 116, i16 101,
+ i16 115, i16 116, i16 0]
+; CHECK: .section __TEXT,__ustring
+; CHECK-NEXT: .align 1
+; CHECK-NEXT: l_private2:
+
+; There is no dedicated 4 byte strings on MachO.
+
+%struct.NSConstantString = type { i32*, i32, i8*, i32 }
+@private3 = private constant %struct.NSConstantString { i32* null, i32 1992, i8* null, i32 0 }, section "__DATA,__cfstring"
+; CHECK: .section __DATA,__cfstring
+; CHECK-NEXT: .align 4
+; CHECK-NEXT: L_private3:
+
+; There is no dedicated 1 or 2 byte constant section on MachO.
+
+@private4 = private unnamed_addr constant i32 42
+; CHECK: .section __TEXT,__literal4,4byte_literals
+; CHECK-NEXT: .align 2
+; CHECK-NEXT: L_private4:
+
+@private5 = private unnamed_addr constant i64 42
+; CHECK: .section __TEXT,__literal8,8byte_literals
+; CHECK-NEXT: .align 3
+; CHECK-NEXT: L_private5:
+
+@private6 = private unnamed_addr constant i128 42
+; CHECK: .section __TEXT,__literal16,16byte_literals
+; CHECK-NEXT: .align 3
+; CHECK-NEXT: L_private6:
+
+%struct._objc_class = type { i8* }
+@private7 = private global %struct._objc_class* null, section "__OBJC,__cls_refs,literal_pointers,no_dead_strip"
+; CHECK: .section __OBJC,__cls_refs,literal_pointers,no_dead_strip
+; CHECK: .align 3
+; CHECK: L_private7:
+
+@private8 = private global i32* null, section "__DATA,__nl_symbol_ptr,non_lazy_symbol_pointers"
+; CHECK: .section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+; CHECK-NEXT: .align 3
+; CHECK-NEXT: L_private8:
+
+@private9 = private global i32* null, section "__DATA,__la_symbol_ptr,lazy_symbol_pointers"
+; CHECK: .section __DATA,__la_symbol_ptr,lazy_symbol_pointers
+; CHECK-NEXT: .align 3
+; CHECK-NEXT: L_private9:
+
+@private10 = private global i32* null, section "__DATA,__mod_init_func,mod_init_funcs"
+; CHECK: .section __DATA,__mod_init_func,mod_init_funcs
+; CHECK-NEXT: .align 3
+; CHECK-NEXT: L_private10:
+
+@private11 = private global i32* null, section "__DATA,__mod_term_func,mod_term_funcs"
+; CHECK: .section __DATA,__mod_term_func,mod_term_funcs
+; CHECK-NEXT: .align 3
+; CHECK-NEXT: L_private11:
+
+@private12 = private global i32* null, section "__DATA,__foobar,interposing"
+; CHECK: .section __DATA,__foobar,interposing
+; CHECK-NEXT: .align 3
+; CHECK-NEXT: L_private12:
diff --git a/test/CodeGen/X86/patchpoint.ll b/test/CodeGen/X86/patchpoint.ll
index d534639953b3..62b12732ded4 100644
--- a/test/CodeGen/X86/patchpoint.ll
+++ b/test/CodeGen/X86/patchpoint.ll
@@ -7,16 +7,16 @@ entry:
; CHECK-LABEL: trivial_patchpoint_codegen:
; CHECK: movabsq $-559038736, %r11
; CHECK-NEXT: callq *%r11
-; CHECK-NEXT: nop
+; CHECK-NEXT: xchgw %ax, %ax
; CHECK: movq %rax, %[[REG:r.+]]
; CHECK: callq *%r11
-; CHECK-NEXT: nop
+; CHECK-NEXT: xchgw %ax, %ax
; CHECK: movq %[[REG]], %rax
; CHECK: ret
%resolveCall2 = inttoptr i64 -559038736 to i8*
- %result = tail call i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 2, i32 15, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 15, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
%resolveCall3 = inttoptr i64 -559038737 to i8*
- tail call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 3, i32 15, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 15, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
ret i64 %result
}
@@ -34,31 +34,65 @@ entry:
store i64 11, i64* %metadata
store i64 12, i64* %metadata
store i64 13, i64* %metadata
- call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 4, i32 0, i64* %metadata)
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
ret void
}
; Test the webkit_jscc calling convention.
-; Two arguments will be pushed on the stack.
+; One argument will be passed in register, the other will be pushed on the stack.
; Return value in $rax.
define void @jscall_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
; CHECK-LABEL: jscall_patchpoint_codegen:
; CHECK: Ltmp
-; CHECK: movq %r{{.+}}, 8(%rsp)
; CHECK: movq %r{{.+}}, (%rsp)
+; CHECK: movq %r{{.+}}, %rax
; CHECK: Ltmp
; CHECK-NEXT: movabsq $-559038736, %r11
; CHECK-NEXT: callq *%r11
-; CHECK: movq %rax, 8(%rsp)
+; CHECK: movq %rax, (%rsp)
; CHECK: callq
%resolveCall2 = inttoptr i64 -559038736 to i8*
- %result = tail call webkit_jscc i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 5, i32 15, i8* %resolveCall2, i32 2, i64 %p1, i64 %p2)
+ %result = tail call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 15, i8* %resolveCall2, i32 2, i64 %p4, i64 %p2)
%resolveCall3 = inttoptr i64 -559038737 to i8*
- tail call webkit_jscc void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 6, i32 15, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
+ tail call webkit_jscc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 15, i8* %resolveCall3, i32 2, i64 %p4, i64 %result)
ret void
}
+; Test if the arguments are properly aligned and that we don't store undef arguments.
+define i64 @jscall_patchpoint_codegen2(i64 %callee) {
+entry:
+; CHECK-LABEL: jscall_patchpoint_codegen2:
+; CHECK: Ltmp
+; CHECK: movq $6, 24(%rsp)
+; CHECK-NEXT: movl $4, 16(%rsp)
+; CHECK-NEXT: movq $2, (%rsp)
+; CHECK: Ltmp
+; CHECK-NEXT: movabsq $-559038736, %r11
+; CHECK-NEXT: callq *%r11
+ %call = inttoptr i64 -559038736 to i8*
+ %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 7, i32 15, i8* %call, i32 6, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6)
+ ret i64 %result
+}
+
+; Test if the arguments are properly aligned and that we don't store undef arguments.
+define i64 @jscall_patchpoint_codegen3(i64 %callee) {
+entry:
+; CHECK-LABEL: jscall_patchpoint_codegen3:
+; CHECK: Ltmp
+; CHECK: movq $10, 48(%rsp)
+; CHECK-NEXT: movl $8, 36(%rsp)
+; CHECK-NEXT: movq $6, 24(%rsp)
+; CHECK-NEXT: movl $4, 16(%rsp)
+; CHECK-NEXT: movq $2, (%rsp)
+; CHECK: Ltmp
+; CHECK-NEXT: movabsq $-559038736, %r11
+; CHECK-NEXT: callq *%r11
+ %call = inttoptr i64 -559038736 to i8*
+ %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 7, i32 15, i8* %call, i32 10, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6, i32 undef, i32 8, i32 undef, i64 10)
+ ret i64 %result
+}
+
; Test patchpoints reusing the same TargetConstant.
; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4)
; There is no way to verify this, since it depends on memory allocation.
@@ -68,14 +102,14 @@ entry:
%tmp80 = add i64 %tmp79, -16
%tmp81 = inttoptr i64 %tmp80 to i64*
%tmp82 = load i64* %tmp81, align 8
- tail call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 14, i32 5, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
- tail call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 15, i32 30, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 5, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 30, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
%tmp83 = load i64* %tmp33, align 8
%tmp84 = add i64 %tmp83, -24
%tmp85 = inttoptr i64 %tmp84 to i64*
%tmp86 = load i64* %tmp85, align 8
- tail call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 17, i32 5, i64 %arg, i64 %tmp10, i64 %tmp86)
- tail call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 18, i32 30, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 5, i64 %arg, i64 %tmp10, i64 %tmp86)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 30, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
ret i64 10
}
@@ -84,17 +118,13 @@ define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
; CHECK-LABEL: small_patchpoint_codegen:
; CHECK: Ltmp
-; CHECK: nop
-; CHECK-NEXT: nop
-; CHECK-NEXT: nop
-; CHECK-NEXT: nop
-; CHECK-NEXT: nop
+; CHECK: nopl 8(%rax,%rax)
; CHECK-NEXT: popq
; CHECK-NEXT: ret
- %result = tail call i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 5, i32 5, i8* null, i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 5, i8* null, i32 2, i64 %p1, i64 %p2)
ret void
}
-declare void @llvm.experimental.stackmap(i32, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i32, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i32, i32, i8*, i32, ...)
+declare void @llvm.experimental.stackmap(i64, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/X86/peep-test-4.ll b/test/CodeGen/X86/peep-test-4.ll
index 884ee7c2ba28..1ae621fb1f58 100644
--- a/test/CodeGen/X86/peep-test-4.ll
+++ b/test/CodeGen/X86/peep-test-4.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+bmi,+bmi2,+popcnt | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+bmi,+bmi2,+popcnt,+lzcnt | FileCheck %s
declare void @foo(i32)
+declare void @foo32(i32)
declare void @foo64(i64)
; CHECK-LABEL: neg:
@@ -189,3 +190,76 @@ bb:
return:
ret void
}
+
+; CHECK-LABEL: testCTZ
+; CHECK: tzcntq
+; CHECK-NOT: test
+; CHECK: cmovaeq
+declare i64 @llvm.cttz.i64(i64, i1)
+define i64 @testCTZ(i64 %v) nounwind {
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp eq i64 %v, 0
+ %cond = select i1 %tobool, i64 255, i64 %cnt
+ ret i64 %cond
+}
+
+; CHECK-LABEL: testCTZ2
+; CHECK: tzcntl
+; CHECK-NEXT: jb
+; CHECK: jmp foo
+declare i32 @llvm.cttz.i32(i32, i1)
+define void @testCTZ2(i32 %v) nounwind {
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %cmp = icmp eq i32 %v, 0
+ br i1 %cmp, label %return, label %bb
+
+bb:
+ tail call void @foo(i32 %cnt)
+ br label %return
+
+return:
+ tail call void @foo32(i32 %cnt)
+ ret void
+}
+
+; CHECK-LABEL: testCTZ3
+; CHECK: tzcntl
+; CHECK-NEXT: jae
+; CHECK: jmp foo
+define void @testCTZ3(i32 %v) nounwind {
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %cmp = icmp ne i32 %v, 0
+ br i1 %cmp, label %return, label %bb
+
+bb:
+ tail call void @foo(i32 %cnt)
+ br label %return
+
+return:
+ tail call void @foo32(i32 %cnt)
+ ret void
+}
+
+; CHECK-LABEL: testCLZ
+; CHECK: lzcntq
+; CHECK-NOT: test
+; CHECK: cmovaeq
+declare i64 @llvm.ctlz.i64(i64, i1)
+define i64 @testCLZ(i64 %v) nounwind {
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp ne i64 %v, 0
+ %cond = select i1 %tobool, i64 %cnt, i64 255
+ ret i64 %cond
+}
+
+; CHECK-LABEL: testPOPCNT
+; CHECK: popcntq
+; CHECK-NOT: test
+; CHECK: cmovneq
+declare i64 @llvm.ctpop.i64(i64)
+define i64 @testPOPCNT(i64 %v) nounwind {
+ %cnt = tail call i64 @llvm.ctpop.i64(i64 %v)
+ %tobool = icmp ne i64 %v, 0
+ %cond = select i1 %tobool, i64 %cnt, i64 255
+ ret i64 %cond
+}
diff --git a/test/CodeGen/X86/peephole-multiple-folds.ll b/test/CodeGen/X86/peephole-multiple-folds.ll
new file mode 100644
index 000000000000..a6cec66c73c9
--- /dev/null
+++ b/test/CodeGen/X86/peephole-multiple-folds.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=x86-64 -mcpu=core-avx2 < %s | FileCheck %s
+;
+; Test multiple peephole-time folds in a single basic block.
+; <rdar://problem/16478629>
+
+define <8 x float> @test_peephole_multi_fold(<8 x float>* %p1, <8 x float>* %p2) {
+entry:
+ br label %loopbody
+
+loopbody:
+; CHECK: test_peephole_multi_fold:
+; CHECK: vfmadd231ps ({{%rdi|%rcx}}),
+; CHECK: vfmadd231ps ({{%rsi|%rdx}}),
+ %vsum1 = phi <8 x float> [ %vsum1.next, %loopbody ], [ zeroinitializer, %entry ]
+ %vsum2 = phi <8 x float> [ %vsum2.next, %loopbody ], [ zeroinitializer, %entry ]
+ %m1 = load <8 x float>* %p1, align 1
+ %m2 = load <8 x float>* %p2, align 1
+ %vsum1.next = tail call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %m1, <8 x float> zeroinitializer, <8 x float> %vsum1)
+ %vsum2.next = tail call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %m2, <8 x float> zeroinitializer, <8 x float> %vsum2)
+ %vsum1.next.1 = extractelement <8 x float> %vsum1.next, i32 0
+ %c = fcmp oeq float %vsum1.next.1, 0.0
+ br i1 %c, label %loopbody, label %loopexit
+
+loopexit:
+ %r = fadd <8 x float> %vsum1.next, %vsum2.next
+ ret <8 x float> %r
+}
+
+declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>)
diff --git a/test/CodeGen/X86/personality.ll b/test/CodeGen/X86/personality.ll
index 51be7bce2931..424a30734f00 100644
--- a/test/CodeGen/X86/personality.ll
+++ b/test/CodeGen/X86/personality.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -disable-cfi -mtriple=x86_64-apple-darwin9 -disable-cgp-branch-opts | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -disable-cfi -mtriple=i386-apple-darwin9 -disable-cgp-branch-opts | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin9 | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i386-apple-darwin9 | FileCheck %s -check-prefix=X32
; PR1632
define void @_Z1fv() {
@@ -41,15 +41,10 @@ declare void @__cxa_end_catch()
declare i32 @__gxx_personality_v0(...)
-; X64: zPLR
-; X64: .byte 155
-; X64-NEXT: .long ___gxx_personality_v0@GOTPCREL+4
+; X64: .cfi_personality 155, ___gxx_personality_v0
+
+; X32: .cfi_personality 155, L___gxx_personality_v0$non_lazy_ptr
; X32: .section __IMPORT,__pointers,non_lazy_symbol_pointers
; X32-NEXT: L___gxx_personality_v0$non_lazy_ptr:
; X32-NEXT: .indirect_symbol ___gxx_personality_v0
-
-; X32: zPLR
-; X32: .byte 155
-; X32-NEXT: :
-; X32-NEXT: .long L___gxx_personality_v0$non_lazy_ptr-
diff --git a/test/CodeGen/X86/personality_size.ll b/test/CodeGen/X86/personality_size.ll
index 30a5d39e4afc..79d131b82b2e 100644
--- a/test/CodeGen/X86/personality_size.ll
+++ b/test/CodeGen/X86/personality_size.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -relocation-model=pic -disable-cfi -mtriple=x86_64-pc-solaris2.11 -disable-cgp-branch-opts | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -relocation-model=pic -disable-cfi -mtriple=i386-pc-solaris2.11 -disable-cgp-branch-opts | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -relocation-model=pic -mtriple=x86_64-pc-solaris2.11 | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -relocation-model=pic -mtriple=i386-pc-solaris2.11 | FileCheck %s -check-prefix=X32
; PR1632
define void @_Z1fv() {
diff --git a/test/CodeGen/X86/pic.ll b/test/CodeGen/X86/pic.ll
index 7bb127eae930..da1e2248065f 100644
--- a/test/CodeGen/X86/pic.ll
+++ b/test/CodeGen/X86/pic.ll
@@ -192,7 +192,8 @@ bb12:
; LINUX: .LJTI7_0@GOTOFF(
; LINUX: jmpl *
-; LINUX: .LJTI7_0:
+; LINUX: .align 4
+; LINUX-NEXT: .LJTI7_0:
; LINUX: .long .LBB7_2@GOTOFF
; LINUX: .long .LBB7_8@GOTOFF
; LINUX: .long .LBB7_14@GOTOFF
diff --git a/test/CodeGen/X86/pr10420.ll b/test/CodeGen/X86/pr10420.ll
deleted file mode 100644
index 62951892619b..000000000000
--- a/test/CodeGen/X86/pr10420.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.7 -disable-cfi | FileCheck --check-prefix=CHECK-64-D11 %s
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.6 -disable-cfi | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.5 -disable-cfi | FileCheck --check-prefix=CHECK-64-D89 %s
-; RUN: llc < %s -mtriple=i686-apple-macosx10.6 -disable-cfi | FileCheck --check-prefix=CHECK-I686-D10 %s
-; RUN: llc < %s -mtriple=i686-apple-macosx10.5 -disable-cfi | FileCheck --check-prefix=CHECK-I686-D89 %s
-; RUN: llc < %s -mtriple=i686-apple-macosx10.4 -disable-cfi | FileCheck --check-prefix=CHECK-I686-D89 %s
-
-define private void @foo() {
- ret void
-}
-
-define void @bar() {
- call void @foo()
- ret void;
-}
-
-; CHECK: _bar: ## @bar
-; CHECK-NEXT: Ltmp2:
-
-; CHECK: Ltmp12:
-; CHECK-NEXT: Ltmp13 = L_foo-Ltmp12 ## FDE initial location
-; CHECK-NEXT: .quad Ltmp13
-
-; CHECK: Ltmp19:
-; CHECK-NEXT: Ltmp20 = Ltmp2-Ltmp19 ## FDE initial location
-; CHECK-NEXT: .quad Ltmp20
-
-
-; CHECK-64-D11: Ltmp13:
-; CHECK-64-D11-NEXT: Ltmp14 = L_foo-Ltmp13 ## FDE initial location
-; CHECK-64-D11-NEXT: .quad Ltmp14
-
-; CHECK-64-D11: Ltmp20:
-; CHECK-64-D11-NEXT: Ltmp21 = Ltmp2-Ltmp20 ## FDE initial location
-; CHECK-64-D11-NEXT: .quad Ltmp21
-
-
-; CHECK-64-D89: Ltmp12:
-; CHECK-64-D89-NEXT: .quad L_foo-Ltmp12 ## FDE initial location
-; CHECK-64-D89-NEXT: Ltmp13 = (Ltmp0-L_foo)-0 ## FDE address range
-; CHECK-64-D89-NEXT: .quad Ltmp13
-
-; CHECK-64-D89: Ltmp18:
-; CHECK-64-D89-NEXT: .quad Ltmp2-Ltmp18 ## FDE initial location
-; CHECK-64-D89-NEXT: Ltmp19 = (Ltmp4-Ltmp2)-0 ## FDE address range
-; CHECK-64-D89-NEXT: .quad Ltmp19
-
-
-; CHECK-I686-D10: Ltmp12:
-; CHECK-I686-D10-NEXT: Ltmp13 = L_foo-Ltmp12 ## FDE initial location
-; CHECK-I686-D10-NEXT: .long Ltmp13
-
-; CHECK-I686-D10: Ltmp19:
-; CHECK-I686-D10-NEXT: Ltmp20 = Ltmp2-Ltmp19 ## FDE initial location
-; CHECK-I686-D10-NEXT: .long Ltmp20
-
-
-; CHECK-I686-D89: Ltmp12:
-; CHECK-I686-D89-NEXT: .long L_foo-Ltmp12 ## FDE initial location
-; CHECK-I686-D89-NEXT: Ltmp13 = (Ltmp0-L_foo)-0 ## FDE address range
-; CHECK-I686-D89-NEXT: .long Ltmp13
-
-; CHECK-I686-D89: Ltmp18:
-; CHECK-I686-D89-NEXT: .long Ltmp2-Ltmp18 ## FDE initial location
-; CHECK-I686-D89-NEXT: Ltmp19 = (Ltmp4-Ltmp2)-0 ## FDE address range
-; CHECK-I686-D89-NEXT: .long Ltmp19
-
diff --git a/test/CodeGen/X86/pr14090.ll b/test/CodeGen/X86/pr14090.ll
deleted file mode 100644
index 2f7c720386be..000000000000
--- a/test/CodeGen/X86/pr14090.ll
+++ /dev/null
@@ -1,70 +0,0 @@
-; RUN: llc < %s -march=x86-64 -print-before=stack-coloring -print-after=stack-coloring >%t 2>&1 && FileCheck <%t %s
-
-define void @foo(i64* %retval.i, i32 %call, i32* %.ph.i80, i32 %fourteen, i32* %out.lo, i32* %out.hi) nounwind align 2 {
-entry:
- %_Tmp.i39 = alloca i64, align 8
- %retval.i33 = alloca i64, align 8
- %_Tmp.i = alloca i64, align 8
- %retval.i.i = alloca i64, align 8
- %_First.i = alloca i64, align 8
-
- %0 = load i64* %retval.i, align 8
-
- %1 = load i64* %retval.i, align 8
-
- %_Tmp.i39.0.cast73 = bitcast i64* %_Tmp.i39 to i8*
- call void @llvm.lifetime.start(i64 8, i8* %_Tmp.i39.0.cast73)
- store i64 %1, i64* %_Tmp.i39, align 8
- %cmp.i.i.i40 = icmp slt i32 %call, 0
- %2 = lshr i64 %1, 32
- %3 = trunc i64 %2 to i32
- %sub.i.i.i44 = sub i32 0, %call
- %cmp2.i.i.i45 = icmp ult i32 %3, %sub.i.i.i44
- %or.cond.i.i.i46 = and i1 %cmp.i.i.i40, %cmp2.i.i.i45
- %add.i.i.i47 = add i32 %3, %call
- %sub5.i.i.i48 = lshr i32 %add.i.i.i47, 5
- %trunc.i50 = trunc i64 %1 to i32
- %inttoptr.i51 = inttoptr i32 %trunc.i50 to i32*
- %add61617.i.i.i52 = or i32 %sub5.i.i.i48, -134217728
- %add61617.i.sub5.i.i.i53 = select i1 %or.cond.i.i.i46, i32 %add61617.i.i.i52, i32 %sub5.i.i.i48
- %storemerge2.i.i54 = getelementptr inbounds i32* %inttoptr.i51, i32 %add61617.i.sub5.i.i.i53
- %_Tmp.i39.0.cast74 = bitcast i64* %_Tmp.i39 to i32**
- store i32* %storemerge2.i.i54, i32** %_Tmp.i39.0.cast74, align 8
- %storemerge.i.i55 = and i32 %add.i.i.i47, 31
- %_Tmp.i39.4.raw_idx = getelementptr inbounds i8* %_Tmp.i39.0.cast73, i32 4
- %_Tmp.i39.4.cast = bitcast i8* %_Tmp.i39.4.raw_idx to i32*
- store i32 %storemerge.i.i55, i32* %_Tmp.i39.4.cast, align 4
- %srcval.i56 = load i64* %_Tmp.i39, align 8
- call void @llvm.lifetime.end(i64 8, i8* %_Tmp.i39.0.cast73)
-
-; CHECK: Before Merge disjoint stack slots
-; CHECK: [[PREFIX15:MOV64mr.*<fi#]]{{[0-9]}}[[SUFFIX15:.*;]] mem:ST8[%fifteen]
-; CHECK: [[PREFIX87:MOV32mr.*;]] mem:ST4[%sunkaddr87]
-
-; CHECK: After Merge disjoint stack slots
-; CHECK: [[PREFIX15]]{{[0-9]}}[[SUFFIX15]] mem:ST8[%_Tmp.i39]
-; CHECK: [[PREFIX87]] mem:ST4[<unknown>]
-
- %fifteen = bitcast i64* %retval.i.i to i32**
- %sixteen = bitcast i64* %retval.i.i to i8*
- call void @llvm.lifetime.start(i64 8, i8* %sixteen)
- store i32* %.ph.i80, i32** %fifteen, align 8
- %sunkaddr = ptrtoint i64* %retval.i.i to i32
- %sunkaddr86 = add i32 %sunkaddr, 4
- %sunkaddr87 = inttoptr i32 %sunkaddr86 to i32*
- store i32 %fourteen, i32* %sunkaddr87, align 4
- %seventeen = load i64* %retval.i.i, align 8
- call void @llvm.lifetime.end(i64 8, i8* %sixteen)
- %eighteen = lshr i64 %seventeen, 32
- %nineteen = trunc i64 %eighteen to i32
- %shl.i.i.i = shl i32 1, %nineteen
-
- store i32 %shl.i.i.i, i32* %out.lo, align 8
- store i32 %nineteen, i32* %out.hi, align 8
-
- ret void
-}
-
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
-
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
diff --git a/test/CodeGen/X86/pr1462.ll b/test/CodeGen/X86/pr1462.ll
index 62549a50356a..3aa18609d469 100644
--- a/test/CodeGen/X86/pr1462.ll
+++ b/test/CodeGen/X86/pr1462.ll
@@ -1,8 +1,7 @@
; RUN: llc < %s
; PR1462
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-
-v64:64:64-v128:128:128-a0:0:64"
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "x86_64-unknown-linux-gnu"
define hidden i128 @__addvti3(i128 %a1, i128 %b2) {
diff --git a/test/CodeGen/X86/pr16031.ll b/test/CodeGen/X86/pr16031.ll
index ecf6218aeb38..dc16fd9671ad 100644
--- a/test/CodeGen/X86/pr16031.ll
+++ b/test/CodeGen/X86/pr16031.ll
@@ -2,9 +2,9 @@
; CHECK-LABEL: main:
; CHECK: pushl %esi
+; CHECK-NEXT: testb $1, 8(%esp)
; CHECK-NEXT: movl $-12, %eax
; CHECK-NEXT: movl $-1, %edx
-; CHECK-NEXT: testb $1, 8(%esp)
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: movl %eax, %esi
diff --git a/test/CodeGen/X86/pr19049.ll b/test/CodeGen/X86/pr19049.ll
new file mode 100644
index 000000000000..027c9815e0c7
--- /dev/null
+++ b/test/CodeGen/X86/pr19049.ll
@@ -0,0 +1,7 @@
+; RUN: llc -mtriple x86_64-pc-linux %s -o - | FileCheck %s
+
+module asm ".pushsection foo"
+module asm ".popsection"
+
+; CHECK: .section foo,"",@progbits
+; CHECK: .text
diff --git a/test/CodeGen/X86/pr20020.ll b/test/CodeGen/X86/pr20020.ll
new file mode 100644
index 000000000000..83dae369dd75
--- /dev/null
+++ b/test/CodeGen/X86/pr20020.ll
@@ -0,0 +1,73 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx -disable-lsr -post-RA-scheduler=1 -break-anti-dependencies=critical | FileCheck %s
+
+; In PR20020, the critical anti-dependency breaker algorithm mistakenly
+; changes the register operands of an 'xorl %eax, %eax' to 'xorl %ecx, %ecx'
+; and then immediately reloads %rcx with a value based on the wrong %rax
+
+; CHECK-NOT: xorl %ecx, %ecx
+; CHECK: leaq 1(%rax), %rcx
+
+
+%struct.planet = type { double, double, double }
+
+; Function Attrs: nounwind ssp uwtable
+define void @advance(i32 %nbodies, %struct.planet* nocapture %bodies) #0 {
+entry:
+ %cmp4 = icmp sgt i32 %nbodies, 0
+ br i1 %cmp4, label %for.body.preheader, label %for.end38
+
+for.body.preheader: ; preds = %entry
+ %gep = getelementptr %struct.planet* %bodies, i64 1, i32 1
+ %gep13 = bitcast double* %gep to %struct.planet*
+ %0 = add i32 %nbodies, -1
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.inc20
+ %iv19 = phi i32 [ %0, %for.body.preheader ], [ %iv.next, %for.inc20 ]
+ %iv = phi %struct.planet* [ %gep13, %for.body.preheader ], [ %gep14, %for.inc20 ]
+ %iv9 = phi i64 [ %iv.next10, %for.inc20 ], [ 0, %for.body.preheader ]
+ %iv.next10 = add nuw nsw i64 %iv9, 1
+ %1 = trunc i64 %iv.next10 to i32
+ %cmp22 = icmp slt i32 %1, %nbodies
+ br i1 %cmp22, label %for.body3.lr.ph, label %for.inc20
+
+for.body3.lr.ph: ; preds = %for.body
+ %x = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 0
+ %y = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 1
+ %vx = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 2
+ br label %for.body3
+
+for.body3: ; preds = %for.body3, %for.body3.lr.ph
+ %iv20 = phi i32 [ %iv.next21, %for.body3 ], [ %iv19, %for.body3.lr.ph ]
+ %iv15 = phi %struct.planet* [ %gep16, %for.body3 ], [ %iv, %for.body3.lr.ph ]
+ %iv1517 = bitcast %struct.planet* %iv15 to double*
+ %2 = load double* %x, align 8
+ %gep18 = getelementptr double* %iv1517, i64 -1
+ %3 = load double* %gep18, align 8
+ %sub = fsub double %2, %3
+ %4 = load double* %y, align 8
+ %5 = load double* %iv1517, align 8
+ %sub8 = fsub double %4, %5
+ %add10 = fadd double %sub, %sub8
+ %call = tail call double @sqrt(double %sub8) #2
+ store double %add10, double* %vx, align 8
+ %gep16 = getelementptr %struct.planet* %iv15, i64 1
+ %iv.next21 = add i32 %iv20, -1
+ %exitcond = icmp eq i32 %iv.next21, 0
+ br i1 %exitcond, label %for.inc20, label %for.body3
+
+for.inc20: ; preds = %for.body3, %for.body
+ %lftr.wideiv11 = trunc i64 %iv.next10 to i32
+ %gep14 = getelementptr %struct.planet* %iv, i64 1
+ %iv.next = add i32 %iv19, -1
+ %exitcond12 = icmp eq i32 %lftr.wideiv11, %nbodies
+ br i1 %exitcond12, label %for.end38, label %for.body
+
+for.end38: ; preds = %for.inc20, %entry
+ ret void
+}
+
+; Function Attrs: nounwind
+declare double @sqrt(double) #1
+
+attributes #0 = { "no-frame-pointer-elim-non-leaf" }
diff --git a/test/CodeGen/X86/pr20088.ll b/test/CodeGen/X86/pr20088.ll
new file mode 100644
index 000000000000..3a829622424c
--- /dev/null
+++ b/test/CodeGen/X86/pr20088.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s
+
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
+
+define <16 x i8> @foo(<16 x i8> %x) {
+; CHECK: vpblendvb
+ %res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> zeroinitializer, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %x)
+ ret <16 x i8> %res;
+}
diff --git a/test/CodeGen/X86/pr5145.ll b/test/CodeGen/X86/pr5145.ll
index d048db8a850d..32a797ba138a 100644
--- a/test/CodeGen/X86/pr5145.ll
+++ b/test/CodeGen/X86/pr5145.ll
@@ -5,29 +5,29 @@ define void @atomic_maxmin_i8() {
; CHECK: atomic_maxmin_i8
%1 = atomicrmw max i8* @sc8, i8 5 acquire
; CHECK: [[LABEL1:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovl
+; CHECK: movsbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL1]]
%2 = atomicrmw min i8* @sc8, i8 6 acquire
; CHECK: [[LABEL3:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovg
+; CHECK: movsbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL3]]
%3 = atomicrmw umax i8* @sc8, i8 7 acquire
; CHECK: [[LABEL5:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovb
+; CHECK: movzbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL5]]
%4 = atomicrmw umin i8* @sc8, i8 8 acquire
; CHECK: [[LABEL7:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmova
+; CHECK: movzbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL7]]
diff --git a/test/CodeGen/X86/preserve_allcc64.ll b/test/CodeGen/X86/preserve_allcc64.ll
new file mode 100644
index 000000000000..545cd36ab957
--- /dev/null
+++ b/test/CodeGen/X86/preserve_allcc64.ll
@@ -0,0 +1,104 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck --check-prefix=SSE %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck --check-prefix=AVX %s
+
+define preserve_allcc void @preserve_allcc1() nounwind {
+entry:
+;SSE-LABEL: preserve_allcc1
+;SSE: pushq %r10
+;SSE-NEXT: pushq %r9
+;SSE-NEXT: pushq %r8
+;SSE-NEXT: pushq %rdi
+;SSE-NEXT: pushq %rsi
+;SSE-NEXT: pushq %rdx
+;SSE-NEXT: pushq %rcx
+;SSE-NEXT: pushq %rax
+;SSE-NEXT: pushq %rbp
+;SSE-NEXT: pushq %r15
+;SSE-NEXT: pushq %r14
+;SSE-NEXT: pushq %r13
+;SSE-NEXT: pushq %r12
+;SSE-NEXT: pushq %rbx
+;SSE: movaps %xmm15
+;SSE-NEXT: movaps %xmm14
+;SSE-NEXT: movaps %xmm13
+;SSE-NEXT: movaps %xmm12
+;SSE-NEXT: movaps %xmm11
+;SSE-NEXT: movaps %xmm10
+;SSE-NEXT: movaps %xmm9
+;SSE-NEXT: movaps %xmm8
+;SSE-NEXT: movaps %xmm7
+;SSE-NEXT: movaps %xmm6
+;SSE-NEXT: movaps %xmm5
+;SSE-NEXT: movaps %xmm4
+;SSE-NEXT: movaps %xmm3
+;SSE-NEXT: movaps %xmm2
+;SSE-NEXT: movaps %xmm1
+;SSE-NEXT: movaps %xmm0
+;AVX-LABEL: preserve_allcc1
+;AVX: pushq %r10
+;AVX-NEXT: pushq %r9
+;AVX-NEXT: pushq %r8
+;AVX-NEXT: pushq %rdi
+;AVX-NEXT: pushq %rsi
+;AVX-NEXT: pushq %rdx
+;AVX-NEXT: pushq %rcx
+;AVX-NEXT: pushq %rax
+;AVX-NEXT: pushq %rbp
+;AVX-NEXT: pushq %r15
+;AVX-NEXT: pushq %r14
+;AVX-NEXT: pushq %r13
+;AVX-NEXT: pushq %r12
+;AVX-NEXT: pushq %rbx
+;AVX: vmovups %ymm15
+;AVX-NEXT: vmovups %ymm14
+;AVX-NEXT: vmovups %ymm13
+;AVX-NEXT: vmovups %ymm12
+;AVX-NEXT: vmovups %ymm11
+;AVX-NEXT: vmovups %ymm10
+;AVX-NEXT: vmovups %ymm9
+;AVX-NEXT: vmovups %ymm8
+;AVX-NEXT: vmovups %ymm7
+;AVX-NEXT: vmovups %ymm6
+;AVX-NEXT: vmovups %ymm5
+;AVX-NEXT: vmovups %ymm4
+;AVX-NEXT: vmovups %ymm3
+;AVX-NEXT: vmovups %ymm2
+;AVX-NEXT: vmovups %ymm1
+;AVX-NEXT: vmovups %ymm0
+ call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
+ ret void
+}
+
+; Make sure only R11 is saved before the call
+declare preserve_allcc void @bar(i64, i64, double, double)
+define void @preserve_allcc2() nounwind {
+entry:
+;SSE-LABEL: preserve_allcc2
+;SSE: movq %r11, [[REG:%[a-z0-9]+]]
+;SSE-NOT: movaps %xmm
+;SSE: movq [[REG]], %r11
+ %a0 = call i64 asm sideeffect "", "={rax}"() nounwind
+ %a1 = call i64 asm sideeffect "", "={rcx}"() nounwind
+ %a2 = call i64 asm sideeffect "", "={rdx}"() nounwind
+ %a3 = call i64 asm sideeffect "", "={r8}"() nounwind
+ %a4 = call i64 asm sideeffect "", "={r9}"() nounwind
+ %a5 = call i64 asm sideeffect "", "={r10}"() nounwind
+ %a6 = call i64 asm sideeffect "", "={r11}"() nounwind
+ %a10 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
+ %a11 = call <2 x double> asm sideeffect "", "={xmm3}"() nounwind
+ %a12 = call <2 x double> asm sideeffect "", "={xmm4}"() nounwind
+ %a13 = call <2 x double> asm sideeffect "", "={xmm5}"() nounwind
+ %a14 = call <2 x double> asm sideeffect "", "={xmm6}"() nounwind
+ %a15 = call <2 x double> asm sideeffect "", "={xmm7}"() nounwind
+ %a16 = call <2 x double> asm sideeffect "", "={xmm8}"() nounwind
+ %a17 = call <2 x double> asm sideeffect "", "={xmm9}"() nounwind
+ %a18 = call <2 x double> asm sideeffect "", "={xmm10}"() nounwind
+ %a19 = call <2 x double> asm sideeffect "", "={xmm11}"() nounwind
+ %a20 = call <2 x double> asm sideeffect "", "={xmm12}"() nounwind
+ %a21 = call <2 x double> asm sideeffect "", "={xmm13}"() nounwind
+ %a22 = call <2 x double> asm sideeffect "", "={xmm14}"() nounwind
+ %a23 = call <2 x double> asm sideeffect "", "={xmm15}"() nounwind
+ call preserve_allcc void @bar(i64 1, i64 2, double 3.0, double 4.0)
+ call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
+ ret void
+}
diff --git a/test/CodeGen/X86/preserve_mostcc64.ll b/test/CodeGen/X86/preserve_mostcc64.ll
new file mode 100644
index 000000000000..4ee293e14304
--- /dev/null
+++ b/test/CodeGen/X86/preserve_mostcc64.ll
@@ -0,0 +1,86 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck --check-prefix=SSE %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck --check-prefix=AVX %s
+
+; Every GPR should be saved - except r11
+define preserve_mostcc void @preserve_mostcc1() nounwind {
+entry:
+;SSE-LABEL: preserve_mostcc1
+;SSE: pushq %r10
+;SSE-NEXT: pushq %r9
+;SSE-NEXT: pushq %r8
+;SSE-NEXT: pushq %rdi
+;SSE-NEXT: pushq %rsi
+;SSE-NEXT: pushq %rdx
+;SSE-NEXT: pushq %rcx
+;SSE-NEXT: pushq %rax
+;SSE-NEXT: pushq %rbp
+;SSE-NEXT: pushq %r15
+;SSE-NEXT: pushq %r14
+;SSE-NEXT: pushq %r13
+;SSE-NEXT: pushq %r12
+;SSE-NEXT: pushq %rbx
+;AVX-LABEL: preserve_mostcc1
+;AVX: pushq %r10
+;AVX-NEXT: pushq %r9
+;AVX-NEXT: pushq %r8
+;AVX-NEXT: pushq %rdi
+;AVX-NEXT: pushq %rsi
+;AVX-NEXT: pushq %rdx
+;AVX-NEXT: pushq %rcx
+;AVX-NEXT: pushq %rax
+;AVX-NEXT: pushq %rbp
+;AVX-NEXT: pushq %r15
+;AVX-NEXT: pushq %r14
+;AVX-NEXT: pushq %r13
+;AVX-NEXT: pushq %r12
+;AVX-NEXT: pushq %rbx
+ call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
+ ret void
+}
+
+; Make sure R11 and XMMs are saved before the call
+declare preserve_mostcc void @foo(i64, i64, double, double)
+define void @preserve_mostcc2() nounwind {
+entry:
+;SSE-LABEL: preserve_mostcc2
+;SSE: movq %r11, [[REG:%[a-z0-9]+]]
+;SSE: movaps %xmm2
+;SSE: movaps %xmm3
+;SSE: movaps %xmm4
+;SSE: movaps %xmm5
+;SSE: movaps %xmm6
+;SSE: movaps %xmm7
+;SSE: movaps %xmm8
+;SSE: movaps %xmm9
+;SSE: movaps %xmm10
+;SSE: movaps %xmm11
+;SSE: movaps %xmm12
+;SSE: movaps %xmm13
+;SSE: movaps %xmm14
+;SSE: movaps %xmm15
+;SSE: movq [[REG]], %r11
+ %a0 = call i64 asm sideeffect "", "={rax}"() nounwind
+ %a1 = call i64 asm sideeffect "", "={rcx}"() nounwind
+ %a2 = call i64 asm sideeffect "", "={rdx}"() nounwind
+ %a3 = call i64 asm sideeffect "", "={r8}"() nounwind
+ %a4 = call i64 asm sideeffect "", "={r9}"() nounwind
+ %a5 = call i64 asm sideeffect "", "={r10}"() nounwind
+ %a6 = call i64 asm sideeffect "", "={r11}"() nounwind
+ %a10 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
+ %a11 = call <2 x double> asm sideeffect "", "={xmm3}"() nounwind
+ %a12 = call <2 x double> asm sideeffect "", "={xmm4}"() nounwind
+ %a13 = call <2 x double> asm sideeffect "", "={xmm5}"() nounwind
+ %a14 = call <2 x double> asm sideeffect "", "={xmm6}"() nounwind
+ %a15 = call <2 x double> asm sideeffect "", "={xmm7}"() nounwind
+ %a16 = call <2 x double> asm sideeffect "", "={xmm8}"() nounwind
+ %a17 = call <2 x double> asm sideeffect "", "={xmm9}"() nounwind
+ %a18 = call <2 x double> asm sideeffect "", "={xmm10}"() nounwind
+ %a19 = call <2 x double> asm sideeffect "", "={xmm11}"() nounwind
+ %a20 = call <2 x double> asm sideeffect "", "={xmm12}"() nounwind
+ %a21 = call <2 x double> asm sideeffect "", "={xmm13}"() nounwind
+ %a22 = call <2 x double> asm sideeffect "", "={xmm14}"() nounwind
+ %a23 = call <2 x double> asm sideeffect "", "={xmm15}"() nounwind
+ call preserve_mostcc void @foo(i64 1, i64 2, double 3.0, double 4.0)
+ call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
+ ret void
+}
diff --git a/test/CodeGen/X86/private-2.ll b/test/CodeGen/X86/private-2.ll
index 4413cee23b33..cf2d74119374 100644
--- a/test/CodeGen/X86/private-2.ll
+++ b/test/CodeGen/X86/private-2.ll
@@ -2,7 +2,7 @@
; Quote should be outside of private prefix.
; rdar://6855766x
-; CHECK: L__ZZ20
+; CHECK: "l__ZZ20-[Example1 whatever]E4C.91"
%struct.A = type { i32*, i32 }
@"_ZZ20-[Example1 whatever]E4C.91" = private constant %struct.A { i32* null, i32 1 } ; <%struct.A*> [#uses=1]
diff --git a/test/CodeGen/X86/pshufd-combine-crash.ll b/test/CodeGen/X86/pshufd-combine-crash.ll
new file mode 100644
index 000000000000..84c69e32bcc3
--- /dev/null
+++ b/test/CodeGen/X86/pshufd-combine-crash.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -debug
+
+; REQUIRES: asserts
+
+; Test that the dag combiner doesn't assert if we try to replace a sequence of two
+; v4f32 X86ISD::PSHUFD nodes with a single PSHUFD.
+
+
+define <4 x float> @test(<4 x float> %V) {
+ %1 = shufflevector <4 x float> %V, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x float> %2
+}
+
diff --git a/test/CodeGen/X86/ragreedy-bug.ll b/test/CodeGen/X86/ragreedy-bug.ll
new file mode 100644
index 000000000000..df9b41d6e90b
--- /dev/null
+++ b/test/CodeGen/X86/ragreedy-bug.ll
@@ -0,0 +1,292 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx -regalloc=greedy | FileCheck %s
+
+; This testing case is reduced from 197.parser prune_match function.
+; We make sure register copies are not generated on isupper.exit blocks.
+
+; CHECK: isupper.exit
+; CHECK-NEXT: in Loop
+; CHECK-NEXT: testl
+; CHECK-NEXT: jne
+; CHECK: isupper.exit
+; CHECK-NEXT: in Loop
+; CHECK-NEXT: testl
+; CHECK-NEXT: je
+; CHECK: maskrune
+; CHECK: maskrune
+
+%struct.List_o_links_struct = type { i32, i32, i32, %struct.List_o_links_struct* }
+%struct.Connector_struct = type { i16, i16, i8, i8, %struct.Connector_struct*, i8* }
+%struct._RuneLocale = type { [8 x i8], [32 x i8], i32 (i8*, i64, i8**)*, i32 (i32, i8*, i64, i8**)*, i32, [256 x i32], [256 x i32], [256 x i32], %struct._RuneRange, %struct._RuneRange, %struct._RuneRange, i8*, i32, i32, %struct._RuneCharClass* }
+%struct._RuneRange = type { i32, %struct._RuneEntry* }
+%struct._RuneEntry = type { i32, i32, i32, i32* }
+%struct._RuneCharClass = type { [14 x i8], i32 }
+%struct.Exp_struct = type { i8, i8, i8, i8, %union.anon }
+%union.anon = type { %struct.E_list_struct* }
+%struct.E_list_struct = type { %struct.E_list_struct*, %struct.Exp_struct* }
+%struct.domain_struct = type { i8*, i32, %struct.List_o_links_struct*, i32, i32, %struct.d_tree_leaf_struct*, %struct.domain_struct* }
+%struct.d_tree_leaf_struct = type { %struct.domain_struct*, i32, %struct.d_tree_leaf_struct* }
+@_DefaultRuneLocale = external global %struct._RuneLocale
+declare i32 @__maskrune(i32, i64) #7
+define fastcc i32 @prune_match(%struct.Connector_struct* nocapture readonly %a, %struct.Connector_struct* nocapture readonly %b) #9 {
+entry:
+ %label56 = bitcast %struct.Connector_struct* %a to i16*
+ %0 = load i16* %label56, align 2
+ %label157 = bitcast %struct.Connector_struct* %b to i16*
+ %1 = load i16* %label157, align 2
+ %cmp = icmp eq i16 %0, %1
+ br i1 %cmp, label %if.end, label %return, !prof !988
+if.end:
+ %priority = getelementptr inbounds %struct.Connector_struct* %a, i64 0, i32 2
+ %2 = load i8* %priority, align 1
+ %priority5 = getelementptr inbounds %struct.Connector_struct* %b, i64 0, i32 2
+ %3 = load i8* %priority5, align 1
+ %string = getelementptr inbounds %struct.Connector_struct* %a, i64 0, i32 5
+ %4 = load i8** %string, align 8
+ %string7 = getelementptr inbounds %struct.Connector_struct* %b, i64 0, i32 5
+ %5 = load i8** %string7, align 8
+ br label %while.cond
+while.cond:
+ %lsr.iv27 = phi i64 [ %lsr.iv.next28, %if.end17 ], [ 0, %if.end ]
+ %scevgep55 = getelementptr i8* %4, i64 %lsr.iv27
+ %6 = load i8* %scevgep55, align 1
+ %idxprom.i.i = sext i8 %6 to i64
+ %isascii.i.i224 = icmp sgt i8 %6, -1
+ br i1 %isascii.i.i224, label %cond.true.i.i, label %cond.false.i.i, !prof !181
+cond.true.i.i:
+ %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
+ %7 = load i32* %arrayidx.i.i, align 4
+ %and.i.i = and i32 %7, 32768
+ br label %isupper.exit
+cond.false.i.i:
+ %8 = trunc i64 %idxprom.i.i to i8
+ %conv8 = sext i8 %8 to i32
+ %call3.i.i = tail call i32 @__maskrune(i32 %conv8, i64 32768) #3
+ br label %isupper.exit
+isupper.exit:
+ %tobool1.sink.i.in.i = phi i32 [ %and.i.i, %cond.true.i.i ], [ %call3.i.i, %cond.false.i.i ]
+ %tobool1.sink.i.i = icmp eq i32 %tobool1.sink.i.in.i, 0
+ br i1 %tobool1.sink.i.i, label %lor.rhs, label %while.body, !prof !989
+lor.rhs:
+ %sunkaddr = ptrtoint i8* %5 to i64
+ %sunkaddr58 = add i64 %sunkaddr, %lsr.iv27
+ %sunkaddr59 = inttoptr i64 %sunkaddr58 to i8*
+ %9 = load i8* %sunkaddr59, align 1
+ %idxprom.i.i214 = sext i8 %9 to i64
+ %isascii.i.i213225 = icmp sgt i8 %9, -1
+ br i1 %isascii.i.i213225, label %cond.true.i.i217, label %cond.false.i.i219, !prof !181
+cond.true.i.i217:
+ %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
+ %10 = load i32* %arrayidx.i.i215, align 4
+ %and.i.i216 = and i32 %10, 32768
+ br label %isupper.exit223
+cond.false.i.i219:
+ %11 = trunc i64 %idxprom.i.i214 to i8
+ %conv9 = sext i8 %11 to i32
+ %call3.i.i218 = tail call i32 @__maskrune(i32 %conv9, i64 32768) #3
+ br label %isupper.exit223
+isupper.exit223:
+ %tobool1.sink.i.in.i220 = phi i32 [ %and.i.i216, %cond.true.i.i217 ], [ %call3.i.i218, %cond.false.i.i219 ]
+ %tobool1.sink.i.i221 = icmp eq i32 %tobool1.sink.i.in.i220, 0
+ br i1 %tobool1.sink.i.i221, label %while.end, label %while.body, !prof !990
+while.body:
+ %sunkaddr60 = ptrtoint i8* %4 to i64
+ %sunkaddr61 = add i64 %sunkaddr60, %lsr.iv27
+ %sunkaddr62 = inttoptr i64 %sunkaddr61 to i8*
+ %12 = load i8* %sunkaddr62, align 1
+ %sunkaddr63 = ptrtoint i8* %5 to i64
+ %sunkaddr64 = add i64 %sunkaddr63, %lsr.iv27
+ %sunkaddr65 = inttoptr i64 %sunkaddr64 to i8*
+ %13 = load i8* %sunkaddr65, align 1
+ %cmp14 = icmp eq i8 %12, %13
+ br i1 %cmp14, label %if.end17, label %return, !prof !991
+if.end17:
+ %lsr.iv.next28 = add i64 %lsr.iv27, 1
+ br label %while.cond
+while.end:
+ %14 = or i8 %3, %2
+ %15 = icmp eq i8 %14, 0
+ br i1 %15, label %if.then23, label %if.else88, !prof !992
+if.then23:
+ %sunkaddr66 = ptrtoint %struct.Connector_struct* %a to i64
+ %sunkaddr67 = add i64 %sunkaddr66, 16
+ %sunkaddr68 = inttoptr i64 %sunkaddr67 to i8**
+ %16 = load i8** %sunkaddr68, align 8
+ %17 = load i8* %16, align 1
+ %cmp26 = icmp eq i8 %17, 83
+ %sunkaddr69 = ptrtoint i8* %4 to i64
+ %sunkaddr70 = add i64 %sunkaddr69, %lsr.iv27
+ %sunkaddr71 = inttoptr i64 %sunkaddr70 to i8*
+ %18 = load i8* %sunkaddr71, align 1
+ br i1 %cmp26, label %land.lhs.true28, label %while.cond59.preheader, !prof !993
+land.lhs.true28:
+ switch i8 %18, label %land.rhs.preheader [
+ i8 112, label %land.lhs.true35
+ i8 0, label %return
+ ], !prof !994
+land.lhs.true35:
+ %sunkaddr72 = ptrtoint i8* %5 to i64
+ %sunkaddr73 = add i64 %sunkaddr72, %lsr.iv27
+ %sunkaddr74 = inttoptr i64 %sunkaddr73 to i8*
+ %19 = load i8* %sunkaddr74, align 1
+ switch i8 %19, label %land.rhs.preheader [
+ i8 112, label %land.lhs.true43
+ ], !prof !995
+land.lhs.true43:
+ %20 = ptrtoint i8* %16 to i64
+ %21 = sub i64 0, %20
+ %scevgep52 = getelementptr i8* %4, i64 %21
+ %scevgep53 = getelementptr i8* %scevgep52, i64 %lsr.iv27
+ %scevgep54 = getelementptr i8* %scevgep53, i64 -1
+ %cmp45 = icmp eq i8* %scevgep54, null
+ br i1 %cmp45, label %return, label %lor.lhs.false47, !prof !996
+lor.lhs.false47:
+ %22 = ptrtoint i8* %16 to i64
+ %23 = sub i64 0, %22
+ %scevgep47 = getelementptr i8* %4, i64 %23
+ %scevgep48 = getelementptr i8* %scevgep47, i64 %lsr.iv27
+ %scevgep49 = getelementptr i8* %scevgep48, i64 -2
+ %cmp50 = icmp eq i8* %scevgep49, null
+ br i1 %cmp50, label %land.lhs.true52, label %while.cond59.preheader, !prof !997
+land.lhs.true52:
+ %sunkaddr75 = ptrtoint i8* %4 to i64
+ %sunkaddr76 = add i64 %sunkaddr75, %lsr.iv27
+ %sunkaddr77 = add i64 %sunkaddr76, -1
+ %sunkaddr78 = inttoptr i64 %sunkaddr77 to i8*
+ %24 = load i8* %sunkaddr78, align 1
+ %cmp55 = icmp eq i8 %24, 73
+ %cmp61233 = icmp eq i8 %18, 0
+ %or.cond265 = or i1 %cmp55, %cmp61233
+ br i1 %or.cond265, label %return, label %land.rhs.preheader, !prof !998
+while.cond59.preheader:
+ %cmp61233.old = icmp eq i8 %18, 0
+ br i1 %cmp61233.old, label %return, label %land.rhs.preheader, !prof !999
+land.rhs.preheader:
+ %scevgep33 = getelementptr i8* %5, i64 %lsr.iv27
+ %scevgep43 = getelementptr i8* %4, i64 %lsr.iv27
+ br label %land.rhs
+land.rhs:
+ %lsr.iv = phi i64 [ 0, %land.rhs.preheader ], [ %lsr.iv.next, %if.then83 ]
+ %25 = phi i8 [ %27, %if.then83 ], [ %18, %land.rhs.preheader ]
+ %scevgep34 = getelementptr i8* %scevgep33, i64 %lsr.iv
+ %26 = load i8* %scevgep34, align 1
+ %cmp64 = icmp eq i8 %26, 0
+ br i1 %cmp64, label %return, label %while.body66, !prof !1000
+while.body66:
+ %cmp68 = icmp eq i8 %25, 42
+ %cmp72 = icmp eq i8 %26, 42
+ %or.cond = or i1 %cmp68, %cmp72
+ br i1 %or.cond, label %if.then83, label %lor.lhs.false74, !prof !1001
+lor.lhs.false74:
+ %cmp77 = icmp ne i8 %25, %26
+ %cmp81 = icmp eq i8 %25, 94
+ %or.cond208 = or i1 %cmp77, %cmp81
+ br i1 %or.cond208, label %return, label %if.then83, !prof !1002
+if.then83:
+ %scevgep44 = getelementptr i8* %scevgep43, i64 %lsr.iv
+ %scevgep45 = getelementptr i8* %scevgep44, i64 1
+ %27 = load i8* %scevgep45, align 1
+ %cmp61 = icmp eq i8 %27, 0
+ %lsr.iv.next = add i64 %lsr.iv, 1
+ br i1 %cmp61, label %return, label %land.rhs, !prof !999
+if.else88:
+ %cmp89 = icmp eq i8 %2, 1
+ %cmp92 = icmp eq i8 %3, 2
+ %or.cond159 = and i1 %cmp89, %cmp92
+ br i1 %or.cond159, label %while.cond95.preheader, label %if.else123, !prof !1003
+while.cond95.preheader:
+ %sunkaddr79 = ptrtoint i8* %4 to i64
+ %sunkaddr80 = add i64 %sunkaddr79, %lsr.iv27
+ %sunkaddr81 = inttoptr i64 %sunkaddr80 to i8*
+ %28 = load i8* %sunkaddr81, align 1
+ %cmp97238 = icmp eq i8 %28, 0
+ br i1 %cmp97238, label %return, label %land.rhs99.preheader, !prof !1004
+land.rhs99.preheader:
+ %scevgep31 = getelementptr i8* %5, i64 %lsr.iv27
+ %scevgep40 = getelementptr i8* %4, i64 %lsr.iv27
+ br label %land.rhs99
+land.rhs99:
+ %lsr.iv17 = phi i64 [ 0, %land.rhs99.preheader ], [ %lsr.iv.next18, %if.then117 ]
+ %29 = phi i8 [ %31, %if.then117 ], [ %28, %land.rhs99.preheader ]
+ %scevgep32 = getelementptr i8* %scevgep31, i64 %lsr.iv17
+ %30 = load i8* %scevgep32, align 1
+ %cmp101 = icmp eq i8 %30, 0
+ br i1 %cmp101, label %return, label %while.body104, !prof !1005
+while.body104:
+ %cmp107 = icmp eq i8 %29, %30
+ %cmp111 = icmp eq i8 %29, 42
+ %or.cond209 = or i1 %cmp107, %cmp111
+ %cmp115 = icmp eq i8 %30, 94
+ %or.cond210 = or i1 %or.cond209, %cmp115
+ br i1 %or.cond210, label %if.then117, label %return, !prof !1006
+if.then117:
+ %scevgep41 = getelementptr i8* %scevgep40, i64 %lsr.iv17
+ %scevgep42 = getelementptr i8* %scevgep41, i64 1
+ %31 = load i8* %scevgep42, align 1
+ %cmp97 = icmp eq i8 %31, 0
+ %lsr.iv.next18 = add i64 %lsr.iv17, 1
+ br i1 %cmp97, label %return, label %land.rhs99, !prof !1004
+if.else123:
+ %cmp124 = icmp eq i8 %3, 1
+ %cmp127 = icmp eq i8 %2, 2
+ %or.cond160 = and i1 %cmp124, %cmp127
+ br i1 %or.cond160, label %while.cond130.preheader, label %return, !prof !1007
+while.cond130.preheader:
+ %sunkaddr82 = ptrtoint i8* %4 to i64
+ %sunkaddr83 = add i64 %sunkaddr82, %lsr.iv27
+ %sunkaddr84 = inttoptr i64 %sunkaddr83 to i8*
+ %32 = load i8* %sunkaddr84, align 1
+ %cmp132244 = icmp eq i8 %32, 0
+ br i1 %cmp132244, label %return, label %land.rhs134.preheader, !prof !1008
+land.rhs134.preheader:
+ %scevgep29 = getelementptr i8* %5, i64 %lsr.iv27
+ %scevgep37 = getelementptr i8* %4, i64 %lsr.iv27
+ br label %land.rhs134
+land.rhs134:
+ %lsr.iv22 = phi i64 [ 0, %land.rhs134.preheader ], [ %lsr.iv.next23, %if.then152 ]
+ %33 = phi i8 [ %35, %if.then152 ], [ %32, %land.rhs134.preheader ]
+ %scevgep30 = getelementptr i8* %scevgep29, i64 %lsr.iv22
+ %34 = load i8* %scevgep30, align 1
+ %cmp136 = icmp eq i8 %34, 0
+ br i1 %cmp136, label %return, label %while.body139, !prof !1009
+while.body139:
+ %cmp142 = icmp eq i8 %33, %34
+ %cmp146 = icmp eq i8 %34, 42
+ %or.cond211 = or i1 %cmp142, %cmp146
+ %cmp150 = icmp eq i8 %33, 94
+ %or.cond212 = or i1 %or.cond211, %cmp150
+ br i1 %or.cond212, label %if.then152, label %return, !prof !1010
+if.then152:
+ %scevgep38 = getelementptr i8* %scevgep37, i64 %lsr.iv22
+ %scevgep39 = getelementptr i8* %scevgep38, i64 1
+ %35 = load i8* %scevgep39, align 1
+ %cmp132 = icmp eq i8 %35, 0
+ %lsr.iv.next23 = add i64 %lsr.iv22, 1
+ br i1 %cmp132, label %return, label %land.rhs134, !prof !1008
+return:
+ %retval.0 = phi i32 [ 0, %entry ], [ 1, %land.lhs.true52 ], [ 1, %land.lhs.true43 ], [ 0, %if.else123 ], [ 1, %while.cond59.preheader ], [ 1, %while.cond95.preheader ], [ 1, %while.cond130.preheader ], [ 1, %land.lhs.true28 ], [ 1, %if.then83 ], [ 0, %lor.lhs.false74 ], [ 1, %land.rhs ], [ 1, %if.then117 ], [ 0, %while.body104 ], [ 1, %land.rhs99 ], [ 1, %if.then152 ], [ 0, %while.body139 ], [ 1, %land.rhs134 ], [ 0, %while.body ]
+ ret i32 %retval.0
+}
+!181 = metadata !{metadata !"branch_weights", i32 662038, i32 1}
+!988 = metadata !{metadata !"branch_weights", i32 12091450, i32 1916}
+!989 = metadata !{metadata !"branch_weights", i32 7564670, i32 4526781}
+!990 = metadata !{metadata !"branch_weights", i32 7484958, i32 13283499}
+!991 = metadata !{metadata !"branch_weights", i32 8677007, i32 4606493}
+!992 = metadata !{metadata !"branch_weights", i32 -1172426948, i32 145094705}
+!993 = metadata !{metadata !"branch_weights", i32 1468914, i32 5683688}
+!994 = metadata !{metadata !"branch_weights", i32 114025221, i32 -1217548794, i32 -1199521551, i32 87712616}
+!995 = metadata !{metadata !"branch_weights", i32 1853716452, i32 -444717951, i32 932776759}
+!996 = metadata !{metadata !"branch_weights", i32 1004870, i32 20259}
+!997 = metadata !{metadata !"branch_weights", i32 20071, i32 189}
+!998 = metadata !{metadata !"branch_weights", i32 -1020255939, i32 572177766}
+!999 = metadata !{metadata !"branch_weights", i32 2666513, i32 3466431}
+!1000 = metadata !{metadata !"branch_weights", i32 5117635, i32 1859780}
+!1001 = metadata !{metadata !"branch_weights", i32 354902465, i32 -1444604407}
+!1002 = metadata !{metadata !"branch_weights", i32 -1762419279, i32 1592770684}
+!1003 = metadata !{metadata !"branch_weights", i32 1435905930, i32 -1951930624}
+!1004 = metadata !{metadata !"branch_weights", i32 1, i32 504888}
+!1005 = metadata !{metadata !"branch_weights", i32 94662, i32 504888}
+!1006 = metadata !{metadata !"branch_weights", i32 -1897793104, i32 160196332}
+!1007 = metadata !{metadata !"branch_weights", i32 2074643678, i32 -29579071}
+!1008 = metadata !{metadata !"branch_weights", i32 1, i32 226163}
+!1009 = metadata !{metadata !"branch_weights", i32 58357, i32 226163}
+!1010 = metadata !{metadata !"branch_weights", i32 -2072848646, i32 92907517}
diff --git a/test/CodeGen/X86/ragreedy-hoist-spill.ll b/test/CodeGen/X86/ragreedy-hoist-spill.ll
new file mode 100644
index 000000000000..c6b28f71af46
--- /dev/null
+++ b/test/CodeGen/X86/ragreedy-hoist-spill.ll
@@ -0,0 +1,389 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx -regalloc=greedy | FileCheck %s
+
+; This testing case is reduced from 254.gap SyFgets funciton.
+; We make sure a spill is not hoisted to a hotter outer loop.
+
+%struct.TMP.1 = type { %struct.TMP.2*, %struct.TMP.2*, [1024 x i8] }
+%struct.TMP.2 = type { i8*, i32, i32, i16, i16, %struct.TMP.3, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.TMP.3, %struct.TMP.4*, i32, [3 x i8], [1 x i8], %struct.TMP.3, i32, i64 }
+%struct.TMP.4 = type opaque
+%struct.TMP.3 = type { i8*, i32 }
+
+@syBuf = external global [16 x %struct.TMP.1], align 16
+@syHistory = external global [8192 x i8], align 16
+@SyFgets.yank = external global [512 x i8], align 16
+@syCTRO = external global i32, align 4
+
+; CHECK-LABEL: SyFgets
+define i8* @SyFgets(i8* %line, i64 %length, i64 %fid) {
+entry:
+ %sub.ptr.rhs.cast646 = ptrtoint i8* %line to i64
+ %old = alloca [512 x i8], align 16
+ %0 = getelementptr inbounds [512 x i8]* %old, i64 0, i64 0
+ switch i64 %fid, label %if.then [
+ i64 2, label %if.end
+ i64 0, label %if.end
+ ]
+
+if.then:
+ br label %cleanup
+
+if.end:
+ switch i64 undef, label %if.end25 [
+ i64 0, label %if.then4
+ i64 1, label %land.lhs.true14
+ ]
+
+if.then4:
+ br i1 undef, label %SyTime.exit, label %if.then.i
+
+if.then.i:
+ unreachable
+
+SyTime.exit:
+ br i1 undef, label %SyTime.exit2681, label %if.then.i2673
+
+if.then.i2673:
+ unreachable
+
+SyTime.exit2681:
+ br label %cleanup
+
+land.lhs.true14:
+ unreachable
+
+if.end25:
+ br i1 undef, label %SyTime.exit2720, label %if.then.i2712
+
+if.then.i2712:
+ unreachable
+
+SyTime.exit2720:
+ %add.ptr = getelementptr [512 x i8]* %old, i64 0, i64 512
+ %cmp293427 = icmp ult i8* %0, %add.ptr
+ br i1 %cmp293427, label %for.body.lr.ph, label %while.body.preheader
+
+for.body.lr.ph:
+ call void @llvm.memset.p0i8.i64(i8* undef, i8 32, i64 512, i32 16, i1 false)
+ br label %while.body.preheader
+
+while.body.preheader:
+ %add.ptr1603 = getelementptr [512 x i8]* null, i64 0, i64 512
+ %echo.i3101 = getelementptr [16 x %struct.TMP.1]* @syBuf, i64 0, i64 %fid, i32 1
+ %1 = xor i64 %sub.ptr.rhs.cast646, -1
+ br label %do.body
+
+do.body:
+ %ch2.0 = phi i32 [ 0, %while.body.preheader ], [ %ch.12.ch2.12, %do.body ]
+ %rep.0 = phi i32 [ 1, %while.body.preheader ], [ %rep.6, %do.body ]
+ store i32 0, i32* @syCTRO, align 4, !tbaa !1
+ %ch.0.ch2.0 = select i1 undef, i32 14, i32 %ch2.0
+ %ch2.2 = select i1 undef, i32 0, i32 %ch.0.ch2.0
+ %ch.2.ch2.2 = select i1 undef, i32 0, i32 %ch2.2
+ %ch2.4 = select i1 undef, i32 278, i32 %ch.2.ch2.2
+ %ch2.5 = select i1 undef, i32 0, i32 %ch2.4
+ %rep.2 = select i1 undef, i32 undef, i32 %rep.0
+ %ch.5.ch2.5 = select i1 undef, i32 undef, i32 %ch2.5
+ %ch2.7 = select i1 undef, i32 0, i32 %ch.5.ch2.5
+ %rep.3 = select i1 undef, i32 undef, i32 %rep.2
+ %ch.7.ch2.7 = select i1 false, i32 0, i32 %ch2.7
+ %mul98.rep.3 = select i1 false, i32 0, i32 %rep.3
+ %ch2.9 = select i1 undef, i32 undef, i32 %ch.7.ch2.7
+ %rep.5 = select i1 undef, i32 undef, i32 %mul98.rep.3
+ %ch2.10 = select i1 false, i32 undef, i32 %ch2.9
+ %rep.6 = select i1 false, i32 undef, i32 %rep.5
+ %isdigittmp = add i32 %ch2.10, -48
+ %isdigit = icmp ult i32 %isdigittmp, 10
+ %cmp119 = icmp eq i32 undef, 22
+ %or.cond1875 = and i1 %isdigit, %cmp119
+ %ch.10.ch2.10 = select i1 %or.cond1875, i32 undef, i32 %ch2.10
+ %.ch.10 = select i1 %or.cond1875, i32 0, i32 undef
+ %ch2.12 = select i1 undef, i32 %.ch.10, i32 %ch.10.ch2.10
+ %ch.12 = select i1 undef, i32 0, i32 %.ch.10
+ %ch.12.ch2.12 = select i1 false, i32 %ch.12, i32 %ch2.12
+ %.ch.12 = select i1 false, i32 0, i32 %ch.12
+ %cmp147 = icmp eq i32 %.ch.12, 0
+ br i1 %cmp147, label %do.body, label %do.end
+
+do.end:
+ %cmp164 = icmp eq i32 %ch.12.ch2.12, 21
+ %mul167 = shl i32 %rep.6, 2
+ %rep.8 = select i1 %cmp164, i32 %mul167, i32 %rep.6
+ %..ch.19 = select i1 false, i32 2, i32 0
+ br i1 undef, label %while.body200, label %while.end1465
+
+while.body200:
+ %dec3386.in = phi i32 [ %dec3386, %while.cond197.backedge ], [ %rep.8, %do.end ]
+ %oldc.13384 = phi i32 [ %oldc.1.be, %while.cond197.backedge ], [ 0, %do.end ]
+ %ch.213379 = phi i32 [ %last.1.be, %while.cond197.backedge ], [ %..ch.19, %do.end ]
+ %last.13371 = phi i32 [ %last.1.be, %while.cond197.backedge ], [ 0, %do.end ]
+ %dec3386 = add i32 %dec3386.in, -1
+ switch i32 %ch.213379, label %sw.default [
+ i32 1, label %while.cond201.preheader
+ i32 322, label %sw.bb206
+ i32 354, label %sw.bb206
+ i32 2, label %sw.bb243
+ i32 364, label %sw.bb1077
+ i32 326, label %sw.bb256
+ i32 358, label %sw.bb256
+ i32 341, label %sw.bb979
+ i32 323, label %while.cond1037.preheader
+ i32 373, label %sw.bb979
+ i32 4, label %if.then1477
+ i32 332, label %sw.bb1077
+ i32 11, label %for.cond357
+ i32 355, label %while.cond1037.preheader
+ i32 324, label %sw.bb474
+ i32 356, label %sw.bb474
+ i32 20, label %sw.bb566
+ i32 -1, label %while.cond197.backedge
+ i32 268, label %sw.bb1134
+ i32 16, label %while.cond635.preheader
+ i32 18, label %sw.bb956
+ i32 316, label %while.cond864
+ ]
+
+while.cond1037.preheader:
+ %cmp10393273 = icmp eq i8 undef, 0
+ br i1 %cmp10393273, label %if.end1070, label %land.rhs1041
+
+while.cond635.preheader:
+ br i1 undef, label %for.body643.us, label %while.cond661
+
+for.body643.us:
+ br label %for.body643.us
+
+while.cond201.preheader:
+ %umax = select i1 false, i64 undef, i64 %1
+ %2 = xor i64 %umax, -1
+ %3 = inttoptr i64 %2 to i8*
+ br label %while.cond197.backedge
+
+sw.bb206:
+ br label %while.cond197.backedge
+
+sw.bb243:
+ br label %while.cond197.backedge
+
+sw.bb256:
+ br label %while.cond197.backedge
+
+while.cond197.backedge:
+ %last.1.be = phi i32 [ %ch.213379, %sw.default ], [ -1, %while.body200 ], [ %ch.213379, %sw.bb1077 ], [ %ch.213379, %sw.bb979 ], [ 18, %sw.bb956 ], [ 20, %sw.bb566 ], [ %ch.213379, %for.end552 ], [ %ch.213379, %sw.bb256 ], [ 2, %sw.bb243 ], [ 1, %while.cond201.preheader ], [ 268, %for.cond1145.preheader ], [ %ch.213379, %sw.bb206 ]
+ %oldc.1.be = phi i32 [ %oldc.13384, %sw.default ], [ %oldc.13384, %while.body200 ], [ %oldc.13384, %sw.bb1077 ], [ %oldc.13384, %sw.bb979 ], [ %oldc.13384, %sw.bb956 ], [ %oldc.13384, %sw.bb566 ], [ %oldc.13384, %for.end552 ], [ %oldc.13384, %sw.bb256 ], [ %oldc.13384, %sw.bb243 ], [ %oldc.13384, %while.cond201.preheader ], [ 0, %for.cond1145.preheader ], [ %oldc.13384, %sw.bb206 ]
+ %cmp198 = icmp sgt i32 %dec3386, 0
+ br i1 %cmp198, label %while.body200, label %while.end1465
+
+for.cond357:
+ br label %for.cond357
+
+sw.bb474:
+ %cmp476 = icmp eq i8 undef, 0
+ br i1 %cmp476, label %if.end517, label %do.body479.preheader
+
+do.body479.preheader:
+ %cmp4833314 = icmp eq i8 undef, 0
+ br i1 %cmp4833314, label %if.end517, label %land.rhs485
+
+land.rhs485:
+ %incdec.ptr4803316 = phi i8* [ %incdec.ptr480, %do.body479.backedge.land.rhs485_crit_edge ], [ undef, %do.body479.preheader ]
+ %isascii.i.i27763151 = icmp sgt i8 undef, -1
+ br i1 %isascii.i.i27763151, label %cond.true.i.i2780, label %cond.false.i.i2782
+
+cond.true.i.i2780:
+ br i1 undef, label %land.lhs.true490, label %lor.rhs500
+
+cond.false.i.i2782:
+ unreachable
+
+land.lhs.true490:
+ br i1 false, label %lor.rhs500, label %do.body479.backedge
+
+lor.rhs500:
+ ; CHECK: lor.rhs500
+ ; Make sure that we don't hoist the spill to outer loops.
+ ; CHECK: movq %r{{.*}}, {{[0-9]+}}(%rsp)
+ ; CHECK: movq %r{{.*}}, {{[0-9]+}}(%rsp)
+ ; CHECK: callq {{.*}}maskrune
+ %call3.i.i2792 = call i32 @__maskrune(i32 undef, i64 256)
+ br i1 undef, label %land.lhs.true504, label %do.body479.backedge
+
+land.lhs.true504:
+ br i1 undef, label %do.body479.backedge, label %if.end517
+
+do.body479.backedge:
+ %incdec.ptr480 = getelementptr i8* %incdec.ptr4803316, i64 1
+ %cmp483 = icmp eq i8 undef, 0
+ br i1 %cmp483, label %if.end517, label %do.body479.backedge.land.rhs485_crit_edge
+
+do.body479.backedge.land.rhs485_crit_edge:
+ br label %land.rhs485
+
+if.end517:
+ %q.4 = phi i8* [ undef, %sw.bb474 ], [ undef, %do.body479.preheader ], [ %incdec.ptr480, %do.body479.backedge ], [ %incdec.ptr4803316, %land.lhs.true504 ]
+ switch i32 %last.13371, label %if.then532 [
+ i32 383, label %for.cond534
+ i32 356, label %for.cond534
+ i32 324, label %for.cond534
+ i32 24, label %for.cond534
+ i32 11, label %for.cond534
+ ]
+
+if.then532:
+ store i8 0, i8* getelementptr inbounds ([512 x i8]* @SyFgets.yank, i64 0, i64 0), align 16, !tbaa !5
+ br label %for.cond534
+
+for.cond534:
+ %cmp536 = icmp eq i8 undef, 0
+ br i1 %cmp536, label %for.cond542.preheader, label %for.cond534
+
+for.cond542.preheader:
+ br i1 undef, label %for.body545, label %for.end552
+
+for.body545:
+ br i1 undef, label %for.end552, label %for.body545
+
+for.end552:
+ %s.2.lcssa = phi i8* [ undef, %for.cond542.preheader ], [ %q.4, %for.body545 ]
+ %sub.ptr.lhs.cast553 = ptrtoint i8* %s.2.lcssa to i64
+ %sub.ptr.sub555 = sub i64 %sub.ptr.lhs.cast553, 0
+ %arrayidx556 = getelementptr i8* null, i64 %sub.ptr.sub555
+ store i8 0, i8* %arrayidx556, align 1, !tbaa !5
+ br label %while.cond197.backedge
+
+sw.bb566:
+ br label %while.cond197.backedge
+
+while.cond661:
+ br label %while.cond661
+
+while.cond864:
+ br label %while.cond864
+
+sw.bb956:
+ br i1 undef, label %if.then959, label %while.cond197.backedge
+
+if.then959:
+ br label %while.cond962
+
+while.cond962:
+ br label %while.cond962
+
+sw.bb979:
+ br label %while.cond197.backedge
+
+land.rhs1041:
+ unreachable
+
+if.end1070:
+ br label %sw.bb1077
+
+sw.bb1077:
+ br label %while.cond197.backedge
+
+sw.bb1134:
+ br i1 false, label %for.body1139, label %for.cond1145.preheader
+
+for.cond1145.preheader:
+ br i1 %cmp293427, label %for.body1150.lr.ph, label %while.cond197.backedge
+
+for.body1150.lr.ph:
+ unreachable
+
+for.body1139:
+ unreachable
+
+sw.default:
+ br label %while.cond197.backedge
+
+while.end1465:
+ %oldc.1.lcssa = phi i32 [ 0, %do.end ], [ %oldc.1.be, %while.cond197.backedge ]
+ %ch.21.lcssa = phi i32 [ %..ch.19, %do.end ], [ %last.1.be, %while.cond197.backedge ]
+ switch i32 %ch.21.lcssa, label %for.cond1480.preheader [
+ i32 -1, label %if.then1477
+ i32 15, label %if.then1477
+ i32 13, label %if.then1477
+ i32 10, label %if.then1477
+ ]
+
+for.cond1480.preheader:
+ br i1 undef, label %for.body1606.lr.ph, label %for.end1609
+
+if.then1477:
+ %p.1.lcssa3539 = phi i8* [ null, %while.end1465 ], [ null, %while.end1465 ], [ null, %while.end1465 ], [ null, %while.end1465 ], [ %line, %while.body200 ]
+ %call1.i3057 = call i64 @"\01_write"(i32 undef, i8* undef, i64 1)
+ %sub.ptr.lhs.cast1717 = ptrtoint i8* %p.1.lcssa3539 to i64
+ %sub.ptr.sub1719 = sub i64 %sub.ptr.lhs.cast1717, %sub.ptr.rhs.cast646
+ %idx.neg1727 = sub i64 0, %sub.ptr.sub1719
+ br label %for.body1723
+
+for.body1606.lr.ph:
+ br label %for.end1609
+
+for.end1609:
+ br i1 undef, label %for.cond1659.preheader, label %land.lhs.true1614
+
+land.lhs.true1614:
+ br label %for.cond1659.preheader
+
+for.cond1659.preheader:
+ %cmp16623414 = icmp ult i8* undef, %add.ptr1603
+ br i1 %cmp16623414, label %for.body1664.lr.ph, label %while.body1703.lr.ph
+
+for.body1664.lr.ph:
+ %cmp16773405 = icmp slt i64 undef, undef
+ br i1 %cmp16773405, label %while.body1679, label %while.cond1683.preheader
+
+while.body1703.lr.ph:
+ unreachable
+
+while.cond1683.preheader:
+ br i1 undef, label %while.body1691, label %while.end1693
+
+while.body1679:
+ %oldc.43406 = phi i32 [ %inc, %syEchoch.exit3070 ], [ %oldc.1.lcssa, %for.body1664.lr.ph ]
+ %4 = load %struct.TMP.2** %echo.i3101, align 8, !tbaa !6
+ %call.i3062 = call i32 @fileno(%struct.TMP.2* %4)
+ br i1 undef, label %if.then.i3069, label %syEchoch.exit3070
+
+if.then.i3069:
+ br label %syEchoch.exit3070
+
+syEchoch.exit3070:
+ %inc = add i32 %oldc.43406, 1
+ %conv1672 = sext i32 %inc to i64
+ %cmp1677 = icmp slt i64 %conv1672, undef
+ br i1 %cmp1677, label %while.body1679, label %while.cond1683.preheader
+
+while.body1691:
+ unreachable
+
+while.end1693:
+ unreachable
+
+for.body1723:
+ %q.303203 = phi i8* [ getelementptr inbounds ([8192 x i8]* @syHistory, i64 0, i64 8189), %if.then1477 ], [ %incdec.ptr1730, %for.body1723 ]
+ %add.ptr1728 = getelementptr i8* %q.303203, i64 %idx.neg1727
+ %5 = load i8* %add.ptr1728, align 1, !tbaa !5
+ %incdec.ptr1730 = getelementptr i8* %q.303203, i64 -1
+ br label %for.body1723
+
+cleanup:
+ ret i8* undef
+}
+
+declare i32 @fileno(%struct.TMP.2* nocapture)
+declare i64 @"\01_write"(i32, i8*, i64)
+declare i32 @__maskrune(i32, i64)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 (trunk 204257)"}
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"int", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!5 = metadata !{metadata !3, metadata !3, i64 0}
+!6 = metadata !{metadata !7, metadata !8, i64 8}
+!7 = metadata !{metadata !"", metadata !8, i64 0, metadata !8, i64 8, metadata !3, i64 16}
+!8 = metadata !{metadata !"any pointer", metadata !3, i64 0}
diff --git a/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll b/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll
new file mode 100644
index 000000000000..d8e45727b9d2
--- /dev/null
+++ b/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll
@@ -0,0 +1,181 @@
+; RUN: llc -regalloc=greedy -relocation-model=pic < %s 2>&1 | FileCheck %s
+; Without the last chance recoloring, this test fails with:
+; "ran out of registers".
+
+; RUN: not llc -regalloc=greedy -relocation-model=pic -lcr-max-depth=0 < %s 2>&1 | FileCheck %s --check-prefix=CHECK-DEPTH
+; Test whether failure due to cutoff for depth is reported
+
+; RUN: not llc -regalloc=greedy -relocation-model=pic -lcr-max-interf=1 < %s 2>&1 | FileCheck %s --check-prefix=CHECK-INTERF
+; Test whether failure due to cutoff for interference is reported
+
+; RUN: llc -regalloc=greedy -relocation-model=pic -lcr-max-interf=1 -lcr-max-depth=0 -exhaustive-register-search < %s > %t 2>&1
+; RUN: FileCheck --input-file=%t %s --check-prefix=CHECK-EXHAUSTIVE
+; Test whether exhaustive-register-search can bypass the depth and interference cutoffs of last chance recoloring
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
+target triple = "i386-apple-macosx"
+
+@fp_dh_36985b17790d59a27994eaab5dcb00ee = external constant [499 x i32]
+@fp_dh_18716afa4a5354de0a302c8edb3b0ee1 = external global i32
+@fp_dh_20a33cdeefab8f4c8887e82766cb9dcb = external global i8*
+@fp_dh_9d93c897906e39883c58b034c8e786b2 = external global [5419648 x i8], align 16
+
+; Function Attrs: nounwind ssp
+; CHECK-NOT: ran out of registers during register allocation
+; CHECK-INTERF: error: register allocation failed: maximum interference for recoloring reached
+; CHECK-DEPTH: error: register allocation failed: maximum depth for recoloring reached
+; CHECK-EXHAUSTIVE-NOT: error: register allocation failed: maximum {{depth|interference}} for recoloring reached
+define void @fp_dh_f870bf31fd8ffe068450366e3f05389a(i8* %arg) #0 {
+bb:
+ indirectbr i8* undef, [label %bb85, label %bb206]
+
+bb85: ; preds = %bb222, %bb85, %bb
+ store i8* blockaddress(@fp_dh_f870bf31fd8ffe068450366e3f05389a, %bb206), i8** undef, align 4
+ indirectbr i8* undef, [label %bb439, label %bb85]
+
+bb206: ; preds = %bb
+ %tmp = getelementptr [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 undef
+ %tmp207 = load i32* %tmp
+ %tmp208 = add i32 %tmp207, 1
+ %tmp209 = inttoptr i32 %tmp208 to i8*
+ indirectbr i8* %tmp209, [label %bb213]
+
+bb213: ; preds = %bb206
+ %tmp214 = load i32* @fp_dh_18716afa4a5354de0a302c8edb3b0ee1, align 4
+ %tmp215 = load i8** @fp_dh_20a33cdeefab8f4c8887e82766cb9dcb, align 4
+ %tmp216 = urem i32 -717428541, %tmp214
+ %tmp217 = getelementptr i8* %tmp215, i32 %tmp216
+ %tmp218 = bitcast i8* %tmp217 to i32*
+ %tmp219 = load i32* %tmp218, align 4
+ store i32 %tmp219, i32* undef, align 4
+ %tmp220 = select i1 false, i32 359373646, i32 1677237955
+ %tmp221 = add i32 %tmp220, 0
+ indirectbr i8* undef, [label %bb432, label %bb222]
+
+bb222: ; preds = %bb213
+ %tmp224 = load i32* undef, align 4
+ %tmp225 = load i32* undef, align 4
+ %tmp226 = xor i32 %tmp225, %tmp224
+ %tmp227 = shl i32 %tmp226, 1
+ %tmp228 = and i32 %tmp227, -2048880334
+ %tmp229 = sub i32 0, %tmp228
+ %tmp230 = add i32 0, %tmp229
+ %tmp231 = xor i32 %tmp230, 1059356227
+ %tmp232 = mul i32 %tmp231, 1603744721
+ %tmp233 = urem i32 %tmp232, 259
+ %tmp234 = getelementptr [259 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 2039075) to [259 x i8]*), i32 0, i32 %tmp233
+ %tmp235 = load i8* %tmp234, align 1
+ %tmp236 = add i32 %tmp233, 2
+ %tmp237 = getelementptr [264 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 3388166) to [264 x i8]*), i32 0, i32 %tmp236
+ %tmp238 = load i8* %tmp237, align 1
+ %tmp239 = getelementptr [265 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 1325165) to [265 x i8]*), i32 0, i32 0
+ %tmp240 = load i8* %tmp239, align 1
+ %tmp241 = add i32 %tmp233, 6
+ %tmp242 = trunc i32 %tmp241 to i8
+ %tmp243 = mul i8 %tmp242, -3
+ %tmp244 = add i8 %tmp243, 3
+ %tmp245 = mul i8 %tmp242, -6
+ %tmp246 = and i8 %tmp245, 6
+ %tmp247 = sub i8 0, %tmp246
+ %tmp248 = add i8 %tmp244, %tmp247
+ %tmp249 = load i8* undef, align 1
+ %tmp250 = xor i8 %tmp235, 17
+ %tmp251 = xor i8 %tmp250, %tmp238
+ %tmp252 = xor i8 %tmp251, %tmp240
+ %tmp253 = xor i8 %tmp252, %tmp249
+ %tmp254 = xor i8 %tmp253, %tmp248
+ %tmp255 = zext i8 %tmp254 to i16
+ %tmp256 = shl nuw i16 %tmp255, 8
+ %tmp257 = load i8* null, align 1
+ %tmp258 = load i32* @fp_dh_18716afa4a5354de0a302c8edb3b0ee1, align 4
+ %tmp259 = load i8** @fp_dh_20a33cdeefab8f4c8887e82766cb9dcb, align 4
+ %tmp260 = urem i32 -717428541, %tmp258
+ %tmp261 = getelementptr i8* %tmp259, i32 %tmp260
+ %tmp262 = bitcast i8* %tmp261 to i32*
+ %tmp263 = load i32* %tmp262, align 4
+ %tmp264 = xor i32 %tmp263, 0
+ %tmp265 = shl i32 %tmp264, 1
+ %tmp266 = and i32 %tmp265, -1312119832
+ %tmp267 = sub i32 0, %tmp266
+ %tmp268 = add i32 0, %tmp267
+ %tmp269 = xor i32 %tmp268, 623994670
+ %tmp270 = mul i32 %tmp269, 1603744721
+ %tmp271 = urem i32 %tmp270, 259
+ %tmp274 = add i32 %tmp271, 3
+ %tmp275 = getelementptr [265 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 1325165) to [265 x i8]*), i32 0, i32 %tmp274
+ %tmp276 = load i8* %tmp275, align 1
+ %tmp277 = add i32 %tmp271, 6
+ %tmp278 = trunc i32 %tmp277 to i8
+ %tmp279 = mul i8 %tmp278, -3
+ %tmp280 = add i8 %tmp279, 31
+ %tmp281 = add i8 %tmp280, 0
+ %tmp282 = xor i8 %tmp257, 13
+ %tmp283 = xor i8 %tmp282, 0
+ %tmp284 = xor i8 %tmp283, 0
+ %tmp285 = xor i8 %tmp284, %tmp276
+ %tmp286 = xor i8 %tmp285, %tmp281
+ %tmp287 = zext i8 %tmp286 to i16
+ %tmp288 = or i16 %tmp287, %tmp256
+ %tmp289 = xor i16 %tmp288, 14330
+ %tmp290 = add i16 0, %tmp289
+ %tmp291 = add i16 %tmp290, -14330
+ %tmp292 = zext i16 %tmp291 to i32
+ %tmp293 = add i16 %tmp290, -14330
+ %tmp294 = lshr i16 %tmp293, 12
+ %tmp295 = zext i16 %tmp294 to i32
+ %tmp296 = sub i32 0, %tmp295
+ %tmp297 = xor i32 %tmp296, 16
+ %tmp298 = add i32 0, %tmp297
+ %tmp299 = and i32 %tmp298, 31
+ %tmp300 = and i32 %tmp292, 30864
+ %tmp301 = shl i32 %tmp300, %tmp299
+ %tmp302 = xor i32 0, %tmp301
+ %tmp303 = add i32 0, %tmp302
+ %tmp304 = and i32 %tmp298, 31
+ %tmp305 = and i32 %tmp303, 25568
+ %tmp306 = lshr i32 %tmp305, %tmp304
+ %tmp307 = xor i32 0, %tmp306
+ %tmp308 = add i32 0, %tmp307
+ %tmp309 = trunc i32 %tmp308 to i16
+ %tmp310 = shl i16 %tmp309, 1
+ %tmp311 = and i16 %tmp310, -4648
+ %tmp312 = shl i16 %tmp309, 1
+ %tmp313 = and i16 %tmp312, 4646
+ %tmp314 = xor i16 %tmp311, 17700
+ %tmp315 = xor i16 %tmp313, 17700
+ %tmp316 = add i16 %tmp314, %tmp315
+ %tmp317 = and i16 %tmp314, %tmp315
+ %tmp318 = shl nuw i16 %tmp317, 1
+ %tmp319 = sub i16 0, %tmp318
+ %tmp320 = add i16 %tmp316, %tmp319
+ %tmp321 = and i16 %tmp320, 29906
+ %tmp322 = xor i16 %tmp309, 14953
+ %tmp323 = add i16 0, %tmp322
+ %tmp324 = sub i16 0, %tmp321
+ %tmp325 = xor i16 %tmp324, %tmp323
+ %tmp326 = add i16 0, %tmp325
+ %tmp327 = add i32 %tmp221, 1161362661
+ %tmp333 = icmp eq i16 %tmp326, 14953
+ %tmp334 = add i32 %tmp327, -1456704142
+ %tmp335 = zext i1 %tmp333 to i32
+ %tmp336 = add i32 %tmp334, %tmp335
+ %tmp337 = getelementptr [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 %tmp336
+ %tmp338 = load i32* %tmp337
+ %tmp339 = add i32 %tmp338, 1
+ %tmp340 = inttoptr i32 %tmp339 to i8*
+ indirectbr i8* %tmp340, [label %bb85, label %bb439]
+
+bb432: ; preds = %bb432, %bb213
+ %tmp433 = phi i32 [ %tmp221, %bb213 ], [ %tmp433, %bb432 ]
+ %tmp434 = add i32 %tmp433, 1022523279
+ %tmp435 = getelementptr [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 %tmp434
+ %tmp436 = load i32* %tmp435
+ %tmp437 = add i32 %tmp436, 1
+ %tmp438 = inttoptr i32 %tmp437 to i8*
+ indirectbr i8* %tmp438, [label %bb432]
+
+bb439: ; preds = %bb222, %bb85
+ ret void
+}
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/X86/rdpmc.ll b/test/CodeGen/X86/rdpmc.ll
new file mode 100644
index 000000000000..7f1ca469c0b6
--- /dev/null
+++ b/test/CodeGen/X86/rdpmc.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=x86-64 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=X86-64
+; RUN: llc < %s -march=x86 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=X86
+
+; Verify that we correctly lower the "Read Performance-Monitoring Counters"
+; x86 builtin.
+
+
+define i64 @test_builtin_read_pmc(i32 %ID) {
+ %1 = tail call i64 @llvm.x86.rdpmc(i32 %ID)
+ ret i64 %1
+}
+; CHECK-LABEL: test_builtin_read_pmc
+; CHECK: rdpmc
+; X86-NOT: shlq
+; X86-NOT: or
+; X86-64: shlq
+; X86-64: or
+; CHECK-NOT: mov
+; CHECK: ret
+
+declare i64 @llvm.x86.rdpmc(i32 %ID)
+
diff --git a/test/CodeGen/X86/rdtsc.ll b/test/CodeGen/X86/rdtsc.ll
index f21a44c36073..dba614ad104e 100644
--- a/test/CodeGen/X86/rdtsc.ll
+++ b/test/CodeGen/X86/rdtsc.ll
@@ -1,8 +1,49 @@
-; RUN: llc < %s -march=x86 | grep rdtsc
-; RUN: llc < %s -march=x86-64 | grep rdtsc
-declare i64 @llvm.readcyclecounter()
+; RUN: llc < %s -march=x86-64 -mcpu=generic | FileCheck %s
+; RUN: llc < %s -march=x86 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=X86
+
+; Verify that we correctly lower ISD::READCYCLECOUNTER.
+
+
+define i64 @test_builtin_readcyclecounter() {
+ %1 = tail call i64 @llvm.readcyclecounter()
+ ret i64 %1
+}
+; CHECK-LABEL: test_builtin_readcyclecounter
+; CHECK: rdtsc
+; X86-NOT: shlq
+; X86-NOT: or
+; CHECK-NOT: mov
+; CHECK: ret
+
+
+; Verify that we correctly lower the Read Cycle Counter GCC x86 builtins
+; (i.e. RDTSC and RDTSCP).
-define i64 @foo() {
- %tmp.1 = call i64 @llvm.readcyclecounter( ) ; <i64> [#uses=1]
- ret i64 %tmp.1
+define i64 @test_builtin_rdtsc() {
+ %1 = tail call i64 @llvm.x86.rdtsc()
+ ret i64 %1
}
+; CHECK-LABEL: test_builtin_rdtsc
+; CHECK: rdtsc
+; X86-NOT: shlq
+; X86-NOT: or
+; CHECK-NOT: mov
+; CHECK: ret
+
+
+define i64 @test_builtin_rdtscp(i8* %A) {
+ %1 = tail call i64 @llvm.x86.rdtscp(i8* %A)
+ ret i64 %1
+}
+; CHECK-LABEL: test_builtin_rdtscp
+; CHECK: rdtscp
+; X86-NOT: shlq
+; CHECK: movl %ecx, (%{{[a-z0-9]+}})
+; X86-NOT: shlq
+; CHECK: ret
+
+
+declare i64 @llvm.readcyclecounter()
+declare i64 @llvm.x86.rdtscp(i8*)
+declare i64 @llvm.x86.rdtsc()
+
diff --git a/test/CodeGen/X86/remat-invalid-liveness.ll b/test/CodeGen/X86/remat-invalid-liveness.ll
new file mode 100644
index 000000000000..d285e83b7981
--- /dev/null
+++ b/test/CodeGen/X86/remat-invalid-liveness.ll
@@ -0,0 +1,85 @@
+; RUN: llc %s -mcpu=core2 -o - | FileCheck %s
+; This test was failing while tracking the liveness in the register scavenger
+; during the branching folding pass. The allocation of the subregisters was
+; incorrect.
+; I.e., the faulty pattern looked like:
+; CH = movb 64
+; ECX = movl 3 <- CH was killed here.
+; CH = subb CH, ...
+;
+; This reduced test case triggers the crash before the fix, but does not
+; strictly speaking check that the resulting code is correct.
+; To check that the code is actually correct we would need to check the
+; liveness of the produced code.
+;
+; Currently, we check that after ECX = movl 3, we do not have subb CH,
+; whereas CH could have been redefine in between and that would have been
+; totally fine.
+; <rdar://problem/16582185>
+target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
+target triple = "i386-apple-macosx10.9"
+
+%struct.A = type { %struct.B, %struct.C, %struct.D*, [1 x i8*] }
+%struct.B = type { i32, [4 x i8] }
+%struct.C = type { i128 }
+%struct.D = type { {}*, [0 x i32] }
+%union.E = type { i32 }
+
+; CHECK-LABEL: __XXX1:
+; CHECK: movl $3, %ecx
+; CHECK-NOT: subb %{{[a-z]+}}, %ch
+; Function Attrs: nounwind optsize ssp
+define fastcc void @__XXX1(%struct.A* %ht) #0 {
+entry:
+ %const72 = bitcast i128 72 to i128
+ %const3 = bitcast i128 3 to i128
+ switch i32 undef, label %if.end196 [
+ i32 1, label %sw.bb.i
+ i32 3, label %sw.bb2.i
+ ]
+
+sw.bb.i: ; preds = %entry
+ %call.i.i.i = tail call i32 undef(%struct.A* %ht, i8 zeroext 22, i32 undef, i32 0, %struct.D* undef)
+ %bf.load.i.i = load i128* undef, align 4
+ %bf.lshr.i.i = lshr i128 %bf.load.i.i, %const72
+ %shl1.i.i = shl nuw nsw i128 %bf.lshr.i.i, 8
+ %shl.i.i = trunc i128 %shl1.i.i to i32
+ br i1 undef, label %cond.false10.i.i, label %__XXX2.exit.i.i
+
+__XXX2.exit.i.i: ; preds = %sw.bb.i
+ %extract11.i.i.i = lshr i128 %bf.load.i.i, %const3
+ %extract.t12.i.i.i = trunc i128 %extract11.i.i.i to i32
+ %bf.cast7.i.i.i = and i32 %extract.t12.i.i.i, 3
+ %arrayidx.i.i.i = getelementptr inbounds %struct.A* %ht, i32 0, i32 3, i32 %bf.cast7.i.i.i
+ br label %cond.end12.i.i
+
+cond.false10.i.i: ; preds = %sw.bb.i
+ %arrayidx.i6.i.i = getelementptr inbounds %struct.A* %ht, i32 0, i32 3, i32 0
+ br label %cond.end12.i.i
+
+cond.end12.i.i: ; preds = %cond.false10.i.i, %__XXX2.exit.i.i
+ %.sink.in.i.i = phi i8** [ %arrayidx.i.i.i, %__XXX2.exit.i.i ], [ %arrayidx.i6.i.i, %cond.false10.i.i ]
+ %.sink.i.i = load i8** %.sink.in.i.i, align 4
+ %tmp = bitcast i8* %.sink.i.i to %union.E*
+ br i1 undef, label %for.body.i.i, label %if.end196
+
+for.body.i.i: ; preds = %for.body.i.i, %cond.end12.i.i
+ %weak.i.i = getelementptr inbounds %union.E* %tmp, i32 undef, i32 0
+ %tmp1 = load i32* %weak.i.i, align 4
+ %cmp36.i.i = icmp ne i32 %tmp1, %shl.i.i
+ %or.cond = and i1 %cmp36.i.i, false
+ br i1 %or.cond, label %for.body.i.i, label %if.end196
+
+sw.bb2.i: ; preds = %entry
+ %bf.lshr.i85.i = lshr i128 undef, %const72
+ br i1 undef, label %if.end196, label %__XXX2.exit.i95.i
+
+__XXX2.exit.i95.i: ; preds = %sw.bb2.i
+ %extract11.i.i91.i = lshr i128 undef, %const3
+ br label %if.end196
+
+if.end196: ; preds = %__XXX2.exit.i95.i, %sw.bb2.i, %for.body.i.i, %cond.end12.i.i, %entry
+ ret void
+}
+
+attributes #0 = { nounwind optsize ssp "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" }
diff --git a/test/CodeGen/X86/ret-mmx.ll b/test/CodeGen/X86/ret-mmx.ll
index 091fd5398496..fc9c78d1bbeb 100644
--- a/test/CodeGen/X86/ret-mmx.ll
+++ b/test/CodeGen/X86/ret-mmx.ll
@@ -34,6 +34,7 @@ define double @t4() nounwind {
ret double bitcast (<2 x i32> <i32 1, i32 0> to double)
; CHECK-LABEL: t4:
; CHECK: movl $1
+; CHECK-NOT: pshufd
; CHECK: movd {{.*}}, %xmm0
}
diff --git a/test/CodeGen/X86/rot16.ll b/test/CodeGen/X86/rot16.ll
index 0293f4e21123..6d7c702afc40 100644
--- a/test/CodeGen/X86/rot16.ll
+++ b/test/CodeGen/X86/rot16.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -march=x86 -mcpu=generic | FileCheck %s
define i16 @foo(i16 %x, i16 %y, i16 %z) nounwind readnone {
entry:
diff --git a/test/CodeGen/X86/rotate4.ll b/test/CodeGen/X86/rotate4.ll
new file mode 100644
index 000000000000..5372612aeab8
--- /dev/null
+++ b/test/CodeGen/X86/rotate4.ll
@@ -0,0 +1,134 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s
+
+; Check that we recognize this idiom for rotation too:
+; a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1))
+
+define i32 @rotate_left_32(i32 %a, i32 %b) {
+; CHECK-LABEL: rotate_left_32:
+; CHECK-NOT: and
+; CHECK: roll
+entry:
+ %and = and i32 %b, 31
+ %shl = shl i32 %a, %and
+ %0 = sub i32 0, %b
+ %and3 = and i32 %0, 31
+ %shr = lshr i32 %a, %and3
+ %or = or i32 %shl, %shr
+ ret i32 %or
+}
+
+define i32 @rotate_right_32(i32 %a, i32 %b) {
+; CHECK-LABEL: rotate_right_32:
+; CHECK-NOT: and
+; CHECK: rorl
+entry:
+ %and = and i32 %b, 31
+ %shl = lshr i32 %a, %and
+ %0 = sub i32 0, %b
+ %and3 = and i32 %0, 31
+ %shr = shl i32 %a, %and3
+ %or = or i32 %shl, %shr
+ ret i32 %or
+}
+
+define i64 @rotate_left_64(i64 %a, i64 %b) {
+; CHECK-LABEL: rotate_left_64:
+; CHECK-NOT: and
+; CHECK: rolq
+entry:
+ %and = and i64 %b, 63
+ %shl = shl i64 %a, %and
+ %0 = sub i64 0, %b
+ %and3 = and i64 %0, 63
+ %shr = lshr i64 %a, %and3
+ %or = or i64 %shl, %shr
+ ret i64 %or
+}
+
+define i64 @rotate_right_64(i64 %a, i64 %b) {
+; CHECK-LABEL: rotate_right_64:
+; CHECK-NOT: and
+; CHECK: rorq
+entry:
+ %and = and i64 %b, 63
+ %shl = lshr i64 %a, %and
+ %0 = sub i64 0, %b
+ %and3 = and i64 %0, 63
+ %shr = shl i64 %a, %and3
+ %or = or i64 %shl, %shr
+ ret i64 %or
+}
+
+; Also check mem operand.
+
+define void @rotate_left_m32(i32 *%pa, i32 %b) {
+; CHECK-LABEL: rotate_left_m32:
+; CHECK-NOT: and
+; CHECK: roll
+; no store:
+; CHECK-NOT: mov
+entry:
+ %a = load i32* %pa, align 16
+ %and = and i32 %b, 31
+ %shl = shl i32 %a, %and
+ %0 = sub i32 0, %b
+ %and3 = and i32 %0, 31
+ %shr = lshr i32 %a, %and3
+ %or = or i32 %shl, %shr
+ store i32 %or, i32* %pa, align 32
+ ret void
+}
+
+define void @rotate_right_m32(i32 *%pa, i32 %b) {
+; CHECK-LABEL: rotate_right_m32:
+; CHECK-NOT: and
+; CHECK: rorl
+; no store:
+; CHECK-NOT: mov
+entry:
+ %a = load i32* %pa, align 16
+ %and = and i32 %b, 31
+ %shl = lshr i32 %a, %and
+ %0 = sub i32 0, %b
+ %and3 = and i32 %0, 31
+ %shr = shl i32 %a, %and3
+ %or = or i32 %shl, %shr
+ store i32 %or, i32* %pa, align 32
+ ret void
+}
+
+define void @rotate_left_m64(i64 *%pa, i64 %b) {
+; CHECK-LABEL: rotate_left_m64:
+; CHECK-NOT: and
+; CHECK: rolq
+; no store:
+; CHECK-NOT: mov
+entry:
+ %a = load i64* %pa, align 16
+ %and = and i64 %b, 63
+ %shl = shl i64 %a, %and
+ %0 = sub i64 0, %b
+ %and3 = and i64 %0, 63
+ %shr = lshr i64 %a, %and3
+ %or = or i64 %shl, %shr
+ store i64 %or, i64* %pa, align 64
+ ret void
+}
+
+define void @rotate_right_m64(i64 *%pa, i64 %b) {
+; CHECK-LABEL: rotate_right_m64:
+; CHECK-NOT: and
+; CHECK: rorq
+; no store:
+; CHECK-NOT: mov
+entry:
+ %a = load i64* %pa, align 16
+ %and = and i64 %b, 63
+ %shl = lshr i64 %a, %and
+ %0 = sub i64 0, %b
+ %and3 = and i64 %0, 63
+ %shr = shl i64 %a, %and3
+ %or = or i64 %shl, %shr
+ store i64 %or, i64* %pa, align 64
+ ret void
+}
diff --git a/test/CodeGen/X86/saddo-redundant-add.ll b/test/CodeGen/X86/saddo-redundant-add.ll
new file mode 100644
index 000000000000..c56c68674a49
--- /dev/null
+++ b/test/CodeGen/X86/saddo-redundant-add.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+
+define void @redundant_add(i64 %n) {
+; Check that we don't create two additions for the sadd.with.overflow.
+; CHECK-LABEL: redundant_add
+; CHECK-NOT: leaq
+; CHECK-NOT: addq
+; CHECK: incq
+; CHECK-NEXT: jno
+entry:
+ br label %exit_check
+
+exit_check:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
+ %c = icmp slt i64 %i, %n
+ br i1 %c, label %loop, label %exit
+
+loop:
+ %i.o = tail call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %i, i64 1)
+ %i.next = extractvalue { i64, i1 } %i.o, 0
+ %o = extractvalue { i64, i1 } %i.o, 1
+ br i1 %o, label %overflow, label %exit_check
+
+exit:
+ ret void
+
+overflow:
+ tail call void @llvm.trap()
+ unreachable
+}
+
+declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64)
+declare void @llvm.trap()
+
diff --git a/test/CodeGen/X86/segmented-stacks-dynamic.ll b/test/CodeGen/X86/segmented-stacks-dynamic.ll
index e17076215d5e..b82be41b8cbf 100644
--- a/test/CodeGen/X86/segmented-stacks-dynamic.ll
+++ b/test/CodeGen/X86/segmented-stacks-dynamic.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -segmented-stacks -filetype=obj
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -segmented-stacks -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -verify-machineinstrs | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -verify-machineinstrs | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -filetype=obj
; Just to prevent the alloca from being optimized away
declare void @dummy_use(i32*, i32)
-define i32 @test_basic(i32 %l) {
+define i32 @test_basic(i32 %l) #0 {
%mem = alloca i32, i32 %l
call void @dummy_use (i32* %mem, i32 %l)
%terminate = icmp eq i32 %l, 0
@@ -62,3 +62,5 @@ false:
; X64: movq %rax, %rdi
}
+
+attributes #0 = { "split-stack" }
diff --git a/test/CodeGen/X86/segmented-stacks.ll b/test/CodeGen/X86/segmented-stacks.ll
index 08a98ef51ec4..9dab3cd8d6d5 100644
--- a/test/CodeGen/X86/segmented-stacks.ll
+++ b/test/CodeGen/X86/segmented-stacks.ll
@@ -1,33 +1,32 @@
-; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X32-Linux
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X64-Linux
-; RUN: llc < %s -mcpu=generic -mtriple=i686-darwin -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X32-Darwin
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-darwin -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X64-Darwin
-; RUN: llc < %s -mcpu=generic -mtriple=i686-mingw32 -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X32-MinGW
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-freebsd -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X64-FreeBSD
+; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -verify-machineinstrs | FileCheck %s -check-prefix=X32-Linux
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -verify-machineinstrs | FileCheck %s -check-prefix=X64-Linux
+; RUN: llc < %s -mcpu=generic -mtriple=i686-darwin -verify-machineinstrs | FileCheck %s -check-prefix=X32-Darwin
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-darwin -verify-machineinstrs | FileCheck %s -check-prefix=X64-Darwin
+; RUN: llc < %s -mcpu=generic -mtriple=i686-mingw32 -verify-machineinstrs | FileCheck %s -check-prefix=X32-MinGW
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-freebsd -verify-machineinstrs | FileCheck %s -check-prefix=X64-FreeBSD
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-mingw32 -verify-machineinstrs | FileCheck %s -check-prefix=X64-MinGW
; We used to crash with filetype=obj
-; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -segmented-stacks -filetype=obj
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -segmented-stacks -filetype=obj
-; RUN: llc < %s -mcpu=generic -mtriple=i686-darwin -segmented-stacks -filetype=obj
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-darwin -segmented-stacks -filetype=obj
-; RUN: llc < %s -mcpu=generic -mtriple=i686-mingw32 -segmented-stacks -filetype=obj
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-freebsd -segmented-stacks -filetype=obj
-
-; RUN: not llc < %s -mcpu=generic -mtriple=x86_64-solaris -segmented-stacks 2> %t.log
+; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=i686-darwin -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-darwin -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=i686-mingw32 -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-freebsd -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-mingw32 -filetype=obj
+
+; RUN: not llc < %s -mcpu=generic -mtriple=x86_64-solaris 2> %t.log
; RUN: FileCheck %s -input-file=%t.log -check-prefix=X64-Solaris
-; RUN: not llc < %s -mcpu=generic -mtriple=x86_64-mingw32 -segmented-stacks 2> %t.log
-; RUN: FileCheck %s -input-file=%t.log -check-prefix=X64-MinGW
-; RUN: not llc < %s -mcpu=generic -mtriple=i686-freebsd -segmented-stacks 2> %t.log
+; RUN: not llc < %s -mcpu=generic -mtriple=i686-freebsd 2> %t.log
; RUN: FileCheck %s -input-file=%t.log -check-prefix=X32-FreeBSD
; X64-Solaris: Segmented stacks not supported on this platform
-; X64-MinGW: Segmented stacks not supported on this platform
; X32-FreeBSD: Segmented stacks not supported on FreeBSD i386
; Just to prevent the alloca from being optimized away
declare void @dummy_use(i32*, i32)
-define void @test_basic() {
+define void @test_basic() #0 {
%mem = alloca i32, i32 10
call void @dummy_use (i32* %mem, i32 10)
ret void
@@ -83,6 +82,16 @@ define void @test_basic() {
; X32-MinGW-NEXT: calll ___morestack
; X32-MinGW-NEXT: ret
+; X64-MinGW-LABEL: test_basic:
+
+; X64-MinGW: cmpq %gs:40, %rsp
+; X64-MinGW-NEXT: ja .LBB0_2
+
+; X64-MinGW: movabsq $72, %r10
+; X64-MinGW-NEXT: movabsq $32, %r11
+; X64-MinGW-NEXT: callq __morestack
+; X64-MinGW-NEXT: retq
+
; X64-FreeBSD-LABEL: test_basic:
; X64-FreeBSD: cmpq %fs:24, %rsp
@@ -95,16 +104,18 @@ define void @test_basic() {
}
-define i32 @test_nested(i32 * nest %closure, i32 %other) {
+define i32 @test_nested(i32 * nest %closure, i32 %other) #0 {
%addend = load i32 * %closure
%result = add i32 %other, %addend
+ %mem = alloca i32, i32 10
+ call void @dummy_use (i32* %mem, i32 10)
ret i32 %result
; X32-Linux: cmpl %gs:48, %esp
; X32-Linux-NEXT: ja .LBB1_2
; X32-Linux: pushl $4
-; X32-Linux-NEXT: pushl $0
+; X32-Linux-NEXT: pushl $60
; X32-Linux-NEXT: calll __morestack
; X32-Linux-NEXT: ret
@@ -112,7 +123,7 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) {
; X64-Linux-NEXT: ja .LBB1_2
; X64-Linux: movq %r10, %rax
-; X64-Linux-NEXT: movabsq $0, %r10
+; X64-Linux-NEXT: movabsq $56, %r10
; X64-Linux-NEXT: movabsq $0, %r11
; X64-Linux-NEXT: callq __morestack
; X64-Linux-NEXT: ret
@@ -123,7 +134,7 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) {
; X32-Darwin-NEXT: ja LBB1_2
; X32-Darwin: pushl $4
-; X32-Darwin-NEXT: pushl $0
+; X32-Darwin-NEXT: pushl $60
; X32-Darwin-NEXT: calll ___morestack
; X32-Darwin-NEXT: ret
@@ -131,7 +142,7 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) {
; X64-Darwin-NEXT: ja LBB1_2
; X64-Darwin: movq %r10, %rax
-; X64-Darwin-NEXT: movabsq $0, %r10
+; X64-Darwin-NEXT: movabsq $56, %r10
; X64-Darwin-NEXT: movabsq $0, %r11
; X64-Darwin-NEXT: callq ___morestack
; X64-Darwin-NEXT: ret
@@ -141,15 +152,26 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) {
; X32-MinGW-NEXT: ja LBB1_2
; X32-MinGW: pushl $4
-; X32-MinGW-NEXT: pushl $0
+; X32-MinGW-NEXT: pushl $52
; X32-MinGW-NEXT: calll ___morestack
; X32-MinGW-NEXT: ret
+; X64-MinGW-LABEL: test_nested:
+; X64-MinGW: cmpq %gs:40, %rsp
+; X64-MinGW-NEXT: ja .LBB1_2
+
+; X64-MinGW: movq %r10, %rax
+; X64-MinGW-NEXT: movabsq $88, %r10
+; X64-MinGW-NEXT: movabsq $32, %r11
+; X64-MinGW-NEXT: callq __morestack
+; X64-MinGW-NEXT: retq
+; X64-MinGW-NEXT: movq %rax, %r10
+
; X64-FreeBSD: cmpq %fs:24, %rsp
; X64-FreeBSD-NEXT: ja .LBB1_2
; X64-FreeBSD: movq %r10, %rax
-; X64-FreeBSD-NEXT: movabsq $0, %r10
+; X64-FreeBSD-NEXT: movabsq $56, %r10
; X64-FreeBSD-NEXT: movabsq $0, %r11
; X64-FreeBSD-NEXT: callq __morestack
; X64-FreeBSD-NEXT: ret
@@ -157,7 +179,7 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) {
}
-define void @test_large() {
+define void @test_large() #0 {
%mem = alloca i32, i32 10000
call void @dummy_use (i32* %mem, i32 0)
ret void
@@ -208,6 +230,16 @@ define void @test_large() {
; X32-MinGW-NEXT: calll ___morestack
; X32-MinGW-NEXT: ret
+; X64-MinGW-LABEL: test_large:
+; X64-MinGW: leaq -40040(%rsp), %r11
+; X64-MinGW-NEXT: cmpq %gs:40, %r11
+; X64-MinGW-NEXT: ja .LBB2_2
+
+; X64-MinGW: movabsq $40040, %r10
+; X64-MinGW-NEXT: movabsq $32, %r11
+; X64-MinGW-NEXT: callq __morestack
+; X64-MinGW-NEXT: retq
+
; X64-FreeBSD: leaq -40008(%rsp), %r11
; X64-FreeBSD-NEXT: cmpq %fs:24, %r11
; X64-FreeBSD-NEXT: ja .LBB2_2
@@ -219,7 +251,7 @@ define void @test_large() {
}
-define fastcc void @test_fastcc() {
+define fastcc void @test_fastcc() #0 {
%mem = alloca i32, i32 10
call void @dummy_use (i32* %mem, i32 10)
ret void
@@ -275,6 +307,16 @@ define fastcc void @test_fastcc() {
; X32-MinGW-NEXT: calll ___morestack
; X32-MinGW-NEXT: ret
+; X64-MinGW-LABEL: test_fastcc:
+
+; X64-MinGW: cmpq %gs:40, %rsp
+; X64-MinGW-NEXT: ja .LBB3_2
+
+; X64-MinGW: movabsq $72, %r10
+; X64-MinGW-NEXT: movabsq $32, %r11
+; X64-MinGW-NEXT: callq __morestack
+; X64-MinGW-NEXT: retq
+
; X64-FreeBSD-LABEL: test_fastcc:
; X64-FreeBSD: cmpq %fs:24, %rsp
@@ -287,7 +329,7 @@ define fastcc void @test_fastcc() {
}
-define fastcc void @test_fastcc_large() {
+define fastcc void @test_fastcc_large() #0 {
%mem = alloca i32, i32 10000
call void @dummy_use (i32* %mem, i32 0)
ret void
@@ -348,6 +390,17 @@ define fastcc void @test_fastcc_large() {
; X32-MinGW-NEXT: calll ___morestack
; X32-MinGW-NEXT: ret
+; X64-MinGW-LABEL: test_fastcc_large:
+
+; X64-MinGW: leaq -40040(%rsp), %r11
+; X64-MinGW-NEXT: cmpq %gs:40, %r11
+; X64-MinGW-NEXT: ja .LBB4_2
+
+; X64-MinGW: movabsq $40040, %r10
+; X64-MinGW-NEXT: movabsq $32, %r11
+; X64-MinGW-NEXT: callq __morestack
+; X64-MinGW-NEXT: retq
+
; X64-FreeBSD-LABEL: test_fastcc_large:
; X64-FreeBSD: leaq -40008(%rsp), %r11
@@ -361,7 +414,7 @@ define fastcc void @test_fastcc_large() {
}
-define fastcc void @test_fastcc_large_with_ecx_arg(i32 %a) {
+define fastcc void @test_fastcc_large_with_ecx_arg(i32 %a) #0 {
%mem = alloca i32, i32 10000
call void @dummy_use (i32* %mem, i32 %a)
ret void
@@ -383,3 +436,30 @@ define fastcc void @test_fastcc_large_with_ecx_arg(i32 %a) {
; X32-Darwin-NEXT: ret
}
+
+define void @test_nostack() #0 {
+ ret void
+
+; X32-Linux-LABEL: test_nostack:
+; X32-Linux-NOT: calll __morestack
+
+; X64-Linux-LABEL: test_nostack:
+; X32-Linux-NOT: callq __morestack
+
+; X32-Darwin-LABEL: test_nostack:
+; X32-Darwin-NOT: calll __morestack
+
+; X64-Darwin-LABEL: test_nostack:
+; X64-Darwin-NOT: callq __morestack
+
+; X32-MinGW-LABEL: test_nostack:
+; X32-MinGW-NOT: calll __morestack
+
+; X64-MinGW-LABEL: test_nostack:
+; X64-MinGW-NOT: callq __morestack
+
+; X64-FreeBSD-LABEL: test_nostack:
+; X64-FreeBSD-NOT: callq __morestack
+}
+
+attributes #0 = { "split-stack" }
diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll
index cdd258d92031..654e8652cfcb 100644
--- a/test/CodeGen/X86/select.ll
+++ b/test/CodeGen/X86/select.ll
@@ -357,3 +357,11 @@ define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
; ATOM: cmpl $15, %edi
; ATOM: cmovgel %edx
}
+
+; CHECK-LABEL: @trunc_select_miscompile
+; CHECK-NOT: sarb
+define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
+ %tmp1 = select i1 %cc, i32 3, i32 2
+ %tmp2 = shl i32 %a, %tmp1
+ ret i32 %tmp2
+} \ No newline at end of file
diff --git a/test/CodeGen/X86/setjmp-spills.ll b/test/CodeGen/X86/setjmp-spills.ll
new file mode 100644
index 000000000000..c35caae97af6
--- /dev/null
+++ b/test/CodeGen/X86/setjmp-spills.ll
@@ -0,0 +1,141 @@
+; RUN: llc < %s -mtriple=i386-linux | FileCheck %s -check-prefix=X86-32
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64
+
+declare i32 @get_val()
+declare void @use_val(i32)
+declare i1 @setjmp()
+declare void @longjmp()
+declare void @personality()
+
+
+; Test that llc avoids reusing spill slots in functions that call
+; setjmp(), whether they use "call" or "invoke" for calling setjmp()
+; (PR18244).
+
+define void @setjmp_caller() {
+; X86-32-LABEL: setjmp_caller:
+; X86-64-LABEL: setjmp_caller:
+; This code keeps enough variables live across the setjmp() call that
+; they don't all fit in registers and the compiler will allocate a
+; spill slot.
+ %a1 = call i32 @get_val()
+ %a2 = call i32 @get_val()
+ %a3 = call i32 @get_val()
+ %a4 = call i32 @get_val()
+ %a5 = call i32 @get_val()
+ %a6 = call i32 @get_val()
+ %a7 = call i32 @get_val()
+ %a8 = call i32 @get_val()
+; X86-32: movl %eax, [[SPILL_SLOT:[0-9]+]](%esp)
+; X86-32: calll get_val
+; X86-64: movl %eax, [[SPILL_SLOT:[0-9]+]](%rsp)
+; X86-64: callq get_val
+
+ %setjmp_result = call i1 @setjmp() returns_twice
+ br i1 %setjmp_result, label %second, label %first
+; X86-32: calll setjmp
+; X86-64: callq setjmp
+
+; Again, keep enough variables live that they need spill slots. Since
+; this function calls a returns_twice function (setjmp()), the
+; compiler should not reuse the spill slots. longjmp() can return to
+; where the first spill slots were still live.
+first:
+ %b1 = call i32 @get_val()
+ %b2 = call i32 @get_val()
+ %b3 = call i32 @get_val()
+ %b4 = call i32 @get_val()
+ %b5 = call i32 @get_val()
+ %b6 = call i32 @get_val()
+ %b7 = call i32 @get_val()
+ %b8 = call i32 @get_val()
+ call void @use_val(i32 %b1)
+ call void @use_val(i32 %b2)
+ call void @use_val(i32 %b3)
+ call void @use_val(i32 %b4)
+ call void @use_val(i32 %b5)
+ call void @use_val(i32 %b6)
+ call void @use_val(i32 %b7)
+ call void @use_val(i32 %b8)
+ call void @longjmp()
+ unreachable
+; X86-32-NOT: movl {{.*}}, [[SPILL_SLOT]](%esp)
+; X86-64-NOT: movl {{.*}}, [[SPILL_SLOT]](%rsp)
+
+second:
+ call void @use_val(i32 %a1)
+ call void @use_val(i32 %a2)
+ call void @use_val(i32 %a3)
+ call void @use_val(i32 %a4)
+ call void @use_val(i32 %a5)
+ call void @use_val(i32 %a6)
+ call void @use_val(i32 %a7)
+ call void @use_val(i32 %a8)
+ ret void
+}
+
+
+; This is the same as above, but using "invoke" rather than "call" to
+; call setjmp().
+
+define void @setjmp_invoker() {
+; X86-32-LABEL: setjmp_invoker:
+; X86-64-LABEL: setjmp_invoker:
+ %a1 = call i32 @get_val()
+ %a2 = call i32 @get_val()
+ %a3 = call i32 @get_val()
+ %a4 = call i32 @get_val()
+ %a5 = call i32 @get_val()
+ %a6 = call i32 @get_val()
+ %a7 = call i32 @get_val()
+ %a8 = call i32 @get_val()
+; X86-32: movl %eax, [[SPILL_SLOT:[0-9]+]](%esp)
+; X86-32: calll get_val
+; X86-64: movl %eax, [[SPILL_SLOT:[0-9]+]](%rsp)
+; X86-64: callq get_val
+
+ %setjmp_result = invoke i1 @setjmp() returns_twice
+ to label %cont unwind label %lpad
+; X86-32: calll setjmp
+; X86-64: callq setjmp
+
+cont:
+ br i1 %setjmp_result, label %second, label %first
+
+lpad:
+ %lp = landingpad { i8*, i32 } personality void ()* @personality cleanup
+ unreachable
+
+first:
+ %b1 = call i32 @get_val()
+ %b2 = call i32 @get_val()
+ %b3 = call i32 @get_val()
+ %b4 = call i32 @get_val()
+ %b5 = call i32 @get_val()
+ %b6 = call i32 @get_val()
+ %b7 = call i32 @get_val()
+ %b8 = call i32 @get_val()
+ call void @use_val(i32 %b1)
+ call void @use_val(i32 %b2)
+ call void @use_val(i32 %b3)
+ call void @use_val(i32 %b4)
+ call void @use_val(i32 %b5)
+ call void @use_val(i32 %b6)
+ call void @use_val(i32 %b7)
+ call void @use_val(i32 %b8)
+ call void @longjmp()
+ unreachable
+; X86-32-NOT: movl {{.*}}, [[SPILL_SLOT]](%esp)
+; X86-64-NOT: movl {{.*}}, [[SPILL_SLOT]](%rsp)
+
+second:
+ call void @use_val(i32 %a1)
+ call void @use_val(i32 %a2)
+ call void @use_val(i32 %a3)
+ call void @use_val(i32 %a4)
+ call void @use_val(i32 %a5)
+ call void @use_val(i32 %a6)
+ call void @use_val(i32 %a7)
+ call void @use_val(i32 %a8)
+ ret void
+}
diff --git a/test/CodeGen/X86/shift-combine-crash.ll b/test/CodeGen/X86/shift-combine-crash.ll
new file mode 100644
index 000000000000..a69a907d41b0
--- /dev/null
+++ b/test/CodeGen/X86/shift-combine-crash.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 > /dev/null
+
+; Verify that DAGCombiner doesn't crash with an assertion failure in the
+; attempt to cast a ISD::UNDEF node to a ConstantSDNode.
+
+; During type legalization, the vector shift operation in function @test1 is
+; split into two legal shifts that work on <2 x i64> elements.
+; The first shift of the legalized sequence would be a shift by all undefs.
+; DAGCombiner will then try to simplify the vector shift and check if the
+; vector of shift counts is a splat. Make sure that llc doesn't crash
+; at that stage.
+
+
+define <4 x i64> @test1(<4 x i64> %A) {
+ %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 1, i64 2>
+ ret <4 x i64> %shl
+}
+
+; Also, verify that DAGCombiner doesn't crash when trying to combine shifts
+; with different combinations of undef elements in the vector shift count.
+
+define <4 x i64> @test2(<4 x i64> %A) {
+ %shl = shl <4 x i64> %A, <i64 2, i64 3, i64 undef, i64 undef>
+ ret <4 x i64> %shl
+}
+
+define <4 x i64> @test3(<4 x i64> %A) {
+ %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 3, i64 undef>
+ ret <4 x i64> %shl
+}
+
+define <4 x i64> @test4(<4 x i64> %A) {
+ %shl = shl <4 x i64> %A, <i64 undef, i64 2, i64 undef, i64 3>
+ ret <4 x i64> %shl
+}
+
+define <4 x i64> @test5(<4 x i64> %A) {
+ %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 undef, i64 undef>
+ ret <4 x i64> %shl
+}
+
+define <4 x i64> @test6(<4 x i64> %A) {
+ %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 3, i64 undef>
+ ret <4 x i64> %shl
+}
+
+define <4 x i64> @test7(<4 x i64> %A) {
+ %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 undef, i64 3>
+ ret <4 x i64> %shl
+}
+
+define <4 x i64> @test8(<4 x i64> %A) {
+ %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 undef, i64 undef>
+ ret <4 x i64> %shl
+}
+
+
diff --git a/test/CodeGen/X86/shift-double.ll b/test/CodeGen/X86/shift-double.ll
index 8d2b2907c5a7..fd4ba81d47c1 100644
--- a/test/CodeGen/X86/shift-double.ll
+++ b/test/CodeGen/X86/shift-double.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
+; RUN: llc < %s -march=x86 -mcpu=generic -x86-asm-syntax=intel | \
; RUN: grep "sh[lr]d" | count 5
define i64 @test1(i64 %X, i8 %C) {
diff --git a/test/CodeGen/X86/shift-parts.ll b/test/CodeGen/X86/shift-parts.ll
index ce4f538f4de4..763da6397101 100644
--- a/test/CodeGen/X86/shift-parts.ll
+++ b/test/CodeGen/X86/shift-parts.ll
@@ -1,17 +1,19 @@
-; RUN: llc < %s -march=x86-64 | grep shrdq
+; RUN: llc -march=x86-64 < %s | FileCheck %s
; PR4736
%0 = type { i32, i8, [35 x i8] }
@g_144 = external global %0, align 8 ; <%0*> [#uses=1]
-define i32 @int87(i32 %uint64p_8) nounwind {
+; CHECK: shrdq
+
+define i32 @int87(i32 %uint64p_8, i1 %cond) nounwind {
entry:
%srcval4 = load i320* bitcast (%0* @g_144 to i320*), align 8 ; <i320> [#uses=1]
br label %for.cond
for.cond: ; preds = %for.cond, %entry
- %call3.in.in.in.v = select i1 undef, i320 192, i320 128 ; <i320> [#uses=1]
+ %call3.in.in.in.v = select i1 %cond, i320 192, i320 128 ; <i320> [#uses=1]
%call3.in.in.in = lshr i320 %srcval4, %call3.in.in.in.v ; <i320> [#uses=1]
%call3.in = trunc i320 %call3.in.in.in to i32 ; <i32> [#uses=1]
%tobool = icmp eq i32 %call3.in, 0 ; <i1> [#uses=1]
diff --git a/test/CodeGen/X86/shift-pcmp.ll b/test/CodeGen/X86/shift-pcmp.ll
new file mode 100644
index 000000000000..365c7310559b
--- /dev/null
+++ b/test/CodeGen/X86/shift-pcmp.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -o - -mcpu=generic -march=x86-64 -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -o - -mcpu=generic -march=x86-64 -mattr=+avx | FileCheck %s
+
+define <8 x i16> @foo(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: .short 32
+; CHECK-NEXT: .short 32
+; CHECK-NEXT: .short 32
+; CHECK-NEXT: .short 32
+; CHECK-NEXT: .short 32
+; CHECK-NEXT: .short 32
+; CHECK-NEXT: .short 32
+; CHECK-NEXT: .short 32
+; CHECK-LABEL: {{^_?foo:}}
+; CHECK-NOT: psll
+entry:
+ %icmp = icmp eq <8 x i16> %a, %b
+ %zext = zext <8 x i1> %icmp to <8 x i16>
+ %shl = shl nuw nsw <8 x i16> %zext, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ret <8 x i16> %shl
+}
+
+; Don't fail with an assert due to an undef in the buildvector
+define <8 x i16> @bar(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: bar
+entry:
+ %icmp = icmp eq <8 x i16> %a, %b
+ %zext = zext <8 x i1> %icmp to <8 x i16>
+ %shl = shl nuw nsw <8 x i16> %zext, <i16 5, i16 undef, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ret <8 x i16> %shl
+}
diff --git a/test/CodeGen/X86/shl_undef.ll b/test/CodeGen/X86/shl_undef.ll
index 54b74cc52ece..705af5b4e332 100644
--- a/test/CodeGen/X86/shl_undef.ll
+++ b/test/CodeGen/X86/shl_undef.ll
@@ -1,15 +1,17 @@
-; RUN: llc < %s -O1 -mtriple=i386-apple-darwin | FileCheck %s
+; RUN: llc < %s -O1 -mtriple=i386-apple-darwin -x86-asm-syntax=intel | FileCheck %s
;
; Interesting test case where %tmp1220 = xor i32 %tmp862, %tmp592 and
; %tmp1676 = xor i32 %tmp1634, %tmp1530 have zero demanded bits after
; DAGCombiner optimization pass. These are changed to undef and in turn
; the successor shl(s) become shl undef, 1. This pattern then matches
-; shl x, 1 -> add x, x. add undef, undef doesn't guarentee the low
+; shl x, 1 -> add x, x. add undef, undef doesn't guarantee the low
; order bit is zero and is incorrect.
;
; See rdar://9453156 and rdar://9487392.
;
+; Use intel syntax, or "shl" might hit "pushl".
+
; CHECK-NOT: shl
define i32 @foo(i8* %a0, i32* %a2) nounwind {
entry:
diff --git a/test/CodeGen/X86/shrink-compare.ll b/test/CodeGen/X86/shrink-compare.ll
index bb892011e2d6..fc7ee061f35d 100644
--- a/test/CodeGen/X86/shrink-compare.ll
+++ b/test/CodeGen/X86/shrink-compare.ll
@@ -2,7 +2,7 @@
declare void @bar()
-define void @test1(i32* nocapture %X) nounwind {
+define void @test1(i32* nocapture %X) nounwind minsize {
entry:
%tmp1 = load i32* %X, align 4
%and = and i32 %tmp1, 255
@@ -19,7 +19,7 @@ if.end:
; CHECK: cmpb $47, (%{{rdi|rcx}})
}
-define void @test2(i32 %X) nounwind {
+define void @test2(i32 %X) nounwind minsize {
entry:
%and = and i32 %X, 255
%cmp = icmp eq i32 %and, 47
@@ -35,7 +35,7 @@ if.end:
; CHECK: cmpb $47, %{{dil|cl}}
}
-define void @test3(i32 %X) nounwind {
+define void @test3(i32 %X) nounwind minsize {
entry:
%and = and i32 %X, 255
%cmp = icmp eq i32 %and, 255
@@ -70,7 +70,7 @@ lor.end: ; preds = %lor.rhs, %entry
@x = global { i8, i8, i8, i8, i8, i8, i8, i8 } { i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 1 }, align 4
; PR16551
-define void @test5(i32 %X) nounwind {
+define void @test5(i32 %X) nounwind minsize {
entry:
%bf.load = load i56* bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @x to i56*), align 4
%bf.lshr = lshr i56 %bf.load, 32
diff --git a/test/CodeGen/X86/shuffle-combine-crash.ll b/test/CodeGen/X86/shuffle-combine-crash.ll
new file mode 100644
index 000000000000..6ab7b97e6a7b
--- /dev/null
+++ b/test/CodeGen/X86/shuffle-combine-crash.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7
+
+; Verify that DAGCombiner does not crash when checking if it is
+; safe to fold the shuffles in function @sample_test according to rule
+; (shuffle (shuffle A, Undef, M0), Undef, M1) -> (shuffle A, Undef, M2)
+;
+; The DAGCombiner avoids folding shuffles if
+; the resulting shuffle dag node is not legal for the target.
+; That means, the shuffle must have legal type and legal mask.
+;
+; Before, the DAGCombiner forgot to check if the resulting shuffle
+; was legal. It instead just called method
+; 'X86TargetLowering::isShuffleMaskLegal'; however, that was not enough since
+; that method always expect to have a valid vector type in input.
+; As a consequence, compiling the function below would have caused a crash.
+
+define void @sample_test() {
+ br i1 undef, label %5, label %1
+
+; <label>:1 ; preds = %0
+ %2 = load <4 x i8>* undef
+ %3 = shufflevector <4 x i8> %2, <4 x i8> undef, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
+ %4 = shufflevector <4 x i8> %3, <4 x i8> undef, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+ store <4 x i8> %4, <4 x i8>* undef
+ br label %5
+
+; <label>:5 ; preds = %1, %0
+ ret void
+}
+
diff --git a/test/CodeGen/X86/sibcall-5.ll b/test/CodeGen/X86/sibcall-5.ll
index c479030508a9..c04af234b131 100644
--- a/test/CodeGen/X86/sibcall-5.ll
+++ b/test/CodeGen/X86/sibcall-5.ll
@@ -8,7 +8,7 @@
define double @foo(double %a) nounwind readonly ssp {
entry:
; X32-LABEL: foo:
-; X32: jmp _sin$stub
+; X32: jmp L_sin$stub
; X64-LABEL: foo:
; X64: jmp _sin
@@ -18,7 +18,7 @@ entry:
define float @bar(float %a) nounwind readonly ssp {
; X32-LABEL: bar:
-; X32: jmp _sinf$stub
+; X32: jmp L_sinf$stub
; X64-LABEL: bar:
; X64: jmp _sinf
@@ -27,6 +27,11 @@ entry:
ret float %0
}
+; X32-LABEL: L_sin$stub:
+; X32-NEXT: .indirect_symbol _sin
+; X32-LABEL: L_sinf$stub:
+; X32-NEXT: .indirect_symbol _sinf
+
declare float @sinf(float) nounwind readonly
declare double @sin(double) nounwind readonly
diff --git a/test/CodeGen/X86/sibcall.ll b/test/CodeGen/X86/sibcall.ll
index 589e9ec10524..28fc626afd9d 100644
--- a/test/CodeGen/X86/sibcall.ll
+++ b/test/CodeGen/X86/sibcall.ll
@@ -247,11 +247,11 @@ entry:
define void @t15(%struct.foo* noalias sret %agg.result) nounwind {
; 32-LABEL: t15:
; 32: calll {{_?}}f
-; 32: ret $4
+; 32: retl $4
; 64-LABEL: t15:
; 64: callq {{_?}}f
-; 64: ret
+; 64: retq
tail call fastcc void @f(%struct.foo* noalias sret %agg.result) nounwind
ret void
}
diff --git a/test/CodeGen/X86/sqrt.ll b/test/CodeGen/X86/sqrt.ll
new file mode 100644
index 000000000000..be7c6e867399
--- /dev/null
+++ b/test/CodeGen/X86/sqrt.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=AVX
+
+define float @test_sqrt_f32(float %a) {
+; SSE2-LABEL: test_sqrt_f32
+; SSE2: sqrtss %xmm0, %xmm0
+; AVX-LABEL: test_sqrt_f32
+; AVX: vsqrtss %xmm0, %xmm0
+ %res = call float @llvm.sqrt.f32(float %a)
+ ret float %res
+}
+declare float @llvm.sqrt.f32(float) nounwind readnone
+
+define double @test_sqrt_f64(double %a) {
+; SSE2-LABEL: test_sqrt_f64
+; SSE2: sqrtsd %xmm0, %xmm0
+; AVX-LABEL: test_sqrt_f64
+; AVX: vsqrtsd %xmm0, %xmm0
+ %res = call double @llvm.sqrt.f64(double %a)
+ ret double %res
+}
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+
diff --git a/test/CodeGen/X86/sse-scalar-fp-arith-2.ll b/test/CodeGen/X86/sse-scalar-fp-arith-2.ll
new file mode 100644
index 000000000000..600ee1b7b1e5
--- /dev/null
+++ b/test/CodeGen/X86/sse-scalar-fp-arith-2.ll
@@ -0,0 +1,423 @@
+; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s
+; RUN: llc -mtriple=x86_64-pc-linux -mattr=-sse4.1 -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s
+; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7-avx < %s | FileCheck -check-prefix=CHECK -check-prefix=AVX %s
+
+; Ensure that the backend selects SSE/AVX scalar fp instructions
+; from a packed fp instrution plus a vector insert.
+
+
+define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fadd <4 x float> %a, %b
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test_add_ss
+; SSE2: addss %xmm1, %xmm0
+; AVX: vaddss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fsub <4 x float> %a, %b
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test_sub_ss
+; SSE2: subss %xmm1, %xmm0
+; AVX: vsubss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fmul <4 x float> %a, %b
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test_mul_ss
+; SSE2: mulss %xmm1, %xmm0
+; AVX: vmulss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fdiv <4 x float> %a, %b
+ %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test_div_ss
+; SSE2: divss %xmm1, %xmm0
+; AVX: vdivss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fadd <2 x double> %a, %b
+ %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test_add_sd
+; SSE2: addsd %xmm1, %xmm0
+; AVX: vaddsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fsub <2 x double> %a, %b
+ %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test_sub_sd
+; SSE2: subsd %xmm1, %xmm0
+; AVX: vsubsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fmul <2 x double> %a, %b
+ %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test_mul_sd
+; SSE2: mulsd %xmm1, %xmm0
+; AVX: vmulsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fdiv <2 x double> %a, %b
+ %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test_div_sd
+; SSE2: divsd %xmm1, %xmm0
+; AVX: vdivsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fadd <4 x float> %b, %a
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test2_add_ss
+; SSE2: addss %xmm0, %xmm1
+; AVX: vaddss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fsub <4 x float> %b, %a
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test2_sub_ss
+; SSE2: subss %xmm0, %xmm1
+; AVX: vsubss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fmul <4 x float> %b, %a
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test2_mul_ss
+; SSE2: mulss %xmm0, %xmm1
+; AVX: vmulss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fdiv <4 x float> %b, %a
+ %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test2_div_ss
+; SSE2: divss %xmm0, %xmm1
+; AVX: vdivss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fadd <2 x double> %b, %a
+ %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test2_add_sd
+; SSE2: addsd %xmm0, %xmm1
+; AVX: vaddsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fsub <2 x double> %b, %a
+ %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test2_sub_sd
+; SSE2: subsd %xmm0, %xmm1
+; AVX: vsubsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fmul <2 x double> %b, %a
+ %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test2_mul_sd
+; SSE2: mulsd %xmm0, %xmm1
+; AVX: vmulsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fdiv <2 x double> %b, %a
+ %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test2_div_sd
+; SSE2: divsd %xmm0, %xmm1
+; AVX: vdivsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <4 x float> @test3_add_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fadd <4 x float> %a, %b
+ %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test3_add_ss
+; SSE2: addss %xmm1, %xmm0
+; AVX: vaddss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test3_sub_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fsub <4 x float> %a, %b
+ %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test3_sub_ss
+; SSE2: subss %xmm1, %xmm0
+; AVX: vsubss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test3_mul_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fmul <4 x float> %a, %b
+ %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test3_mul_ss
+; SSE2: mulss %xmm1, %xmm0
+; AVX: vmulss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test3_div_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fdiv <4 x float> %a, %b
+ %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test3_div_ss
+; SSE2: divss %xmm1, %xmm0
+; AVX: vdivss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <2 x double> @test3_add_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fadd <2 x double> %a, %b
+ %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test3_add_sd
+; SSE2: addsd %xmm1, %xmm0
+; AVX: vaddsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test3_sub_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fsub <2 x double> %a, %b
+ %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test3_sub_sd
+; SSE2: subsd %xmm1, %xmm0
+; AVX: vsubsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test3_mul_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fmul <2 x double> %a, %b
+ %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test3_mul_sd
+; SSE2: mulsd %xmm1, %xmm0
+; AVX: vmulsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test3_div_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fdiv <2 x double> %a, %b
+ %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test3_div_sd
+; SSE2: divsd %xmm1, %xmm0
+; AVX: vdivsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <4 x float> @test4_add_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fadd <4 x float> %b, %a
+ %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test4_add_ss
+; SSE2: addss %xmm0, %xmm1
+; AVX: vaddss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test4_sub_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fsub <4 x float> %b, %a
+ %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test4_sub_ss
+; SSE2: subss %xmm0, %xmm1
+; AVX: vsubss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test4_mul_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fmul <4 x float> %b, %a
+ %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test4_mul_ss
+; SSE2: mulss %xmm0, %xmm1
+; AVX: vmulss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test4_div_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = fdiv <4 x float> %b, %a
+ %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1
+ ret <4 x float> %2
+}
+
+; CHECK-LABEL: test4_div_ss
+; SSE2: divss %xmm0, %xmm1
+; AVX: vdivss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <2 x double> @test4_add_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fadd <2 x double> %b, %a
+ %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test4_add_sd
+; SSE2: addsd %xmm0, %xmm1
+; AVX: vaddsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test4_sub_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fsub <2 x double> %b, %a
+ %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test4_sub_sd
+; SSE2: subsd %xmm0, %xmm1
+; AVX: vsubsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test4_mul_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fmul <2 x double> %b, %a
+ %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test4_mul_sd
+; SSE2: mulsd %xmm0, %xmm1
+; AVX: vmulsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test4_div_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = fdiv <2 x double> %b, %a
+ %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1
+ ret <2 x double> %2
+}
+
+; CHECK-LABEL: test4_div_sd
+; SSE2: divsd %xmm0, %xmm1
+; AVX: vdivsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/sse-scalar-fp-arith.ll b/test/CodeGen/X86/sse-scalar-fp-arith.ll
new file mode 100644
index 000000000000..3949a835e67a
--- /dev/null
+++ b/test/CodeGen/X86/sse-scalar-fp-arith.ll
@@ -0,0 +1,310 @@
+; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s
+; RUN: llc -mtriple=x86_64-pc-linux -mattr=-sse4.1 -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s
+; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7-avx < %s | FileCheck -check-prefix=CHECK -check-prefix=AVX %s
+
+; Ensure that the backend no longer emits unnecessary vector insert
+; instructions immediately after SSE scalar fp instructions
+; like addss or mulss.
+
+
+define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %b, i32 0
+ %2 = extractelement <4 x float> %a, i32 0
+ %add = fadd float %2, %1
+ %3 = insertelement <4 x float> %a, float %add, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_add_ss
+; SSE2: addss %xmm1, %xmm0
+; AVX: vaddss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %b, i32 0
+ %2 = extractelement <4 x float> %a, i32 0
+ %sub = fsub float %2, %1
+ %3 = insertelement <4 x float> %a, float %sub, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_sub_ss
+; SSE2: subss %xmm1, %xmm0
+; AVX: vsubss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %b, i32 0
+ %2 = extractelement <4 x float> %a, i32 0
+ %mul = fmul float %2, %1
+ %3 = insertelement <4 x float> %a, float %mul, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_mul_ss
+; SSE2: mulss %xmm1, %xmm0
+; AVX: vmulss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %b, i32 0
+ %2 = extractelement <4 x float> %a, i32 0
+ %div = fdiv float %2, %1
+ %3 = insertelement <4 x float> %a, float %div, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_div_ss
+; SSE2: divss %xmm1, %xmm0
+; AVX: vdivss %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = extractelement <2 x double> %b, i32 0
+ %2 = extractelement <2 x double> %a, i32 0
+ %add = fadd double %2, %1
+ %3 = insertelement <2 x double> %a, double %add, i32 0
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_add_sd
+; SSE2: addsd %xmm1, %xmm0
+; AVX: vaddsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = extractelement <2 x double> %b, i32 0
+ %2 = extractelement <2 x double> %a, i32 0
+ %sub = fsub double %2, %1
+ %3 = insertelement <2 x double> %a, double %sub, i32 0
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_sub_sd
+; SSE2: subsd %xmm1, %xmm0
+; AVX: vsubsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = extractelement <2 x double> %b, i32 0
+ %2 = extractelement <2 x double> %a, i32 0
+ %mul = fmul double %2, %1
+ %3 = insertelement <2 x double> %a, double %mul, i32 0
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_mul_sd
+; SSE2: mulsd %xmm1, %xmm0
+; AVX: vmulsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = extractelement <2 x double> %b, i32 0
+ %2 = extractelement <2 x double> %a, i32 0
+ %div = fdiv double %2, %1
+ %3 = insertelement <2 x double> %a, double %div, i32 0
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test_div_sd
+; SSE2: divsd %xmm1, %xmm0
+; AVX: vdivsd %xmm1, %xmm0, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = extractelement <4 x float> %b, i32 0
+ %add = fadd float %1, %2
+ %3 = insertelement <4 x float> %b, float %add, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test2_add_ss
+; SSE2: addss %xmm0, %xmm1
+; AVX: vaddss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = extractelement <4 x float> %b, i32 0
+ %sub = fsub float %2, %1
+ %3 = insertelement <4 x float> %b, float %sub, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test2_sub_ss
+; SSE2: subss %xmm0, %xmm1
+; AVX: vsubss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = extractelement <4 x float> %b, i32 0
+ %mul = fmul float %1, %2
+ %3 = insertelement <4 x float> %b, float %mul, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test2_mul_ss
+; SSE2: mulss %xmm0, %xmm1
+; AVX: vmulss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = extractelement <4 x float> %b, i32 0
+ %div = fdiv float %2, %1
+ %3 = insertelement <4 x float> %b, float %div, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test2_div_ss
+; SSE2: divss %xmm0, %xmm1
+; AVX: vdivss %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = extractelement <2 x double> %a, i32 0
+ %2 = extractelement <2 x double> %b, i32 0
+ %add = fadd double %1, %2
+ %3 = insertelement <2 x double> %b, double %add, i32 0
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test2_add_sd
+; SSE2: addsd %xmm0, %xmm1
+; AVX: vaddsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = extractelement <2 x double> %a, i32 0
+ %2 = extractelement <2 x double> %b, i32 0
+ %sub = fsub double %2, %1
+ %3 = insertelement <2 x double> %b, double %sub, i32 0
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test2_sub_sd
+; SSE2: subsd %xmm0, %xmm1
+; AVX: vsubsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = extractelement <2 x double> %a, i32 0
+ %2 = extractelement <2 x double> %b, i32 0
+ %mul = fmul double %1, %2
+ %3 = insertelement <2 x double> %b, double %mul, i32 0
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test2_mul_sd
+; SSE2: mulsd %xmm0, %xmm1
+; AVX: vmulsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) {
+ %1 = extractelement <2 x double> %a, i32 0
+ %2 = extractelement <2 x double> %b, i32 0
+ %div = fdiv double %2, %1
+ %3 = insertelement <2 x double> %b, double %div, i32 0
+ ret <2 x double> %3
+}
+
+; CHECK-LABEL: test2_div_sd
+; SSE2: divsd %xmm0, %xmm1
+; AVX: vdivsd %xmm0, %xmm1, %xmm0
+; CHECK-NOT: movsd
+; CHECK: ret
+
+
+define <4 x float> @test_multiple_add_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %b, i32 0
+ %2 = extractelement <4 x float> %a, i32 0
+ %add = fadd float %2, %1
+ %add2 = fadd float %2, %add
+ %3 = insertelement <4 x float> %a, float %add2, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_multiple_add_ss
+; CHECK: addss
+; CHECK: addss
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test_multiple_sub_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %b, i32 0
+ %2 = extractelement <4 x float> %a, i32 0
+ %sub = fsub float %2, %1
+ %sub2 = fsub float %2, %sub
+ %3 = insertelement <4 x float> %a, float %sub2, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_multiple_sub_ss
+; CHECK: subss
+; CHECK: subss
+; CHECK-NOT: movss
+; CHECK: ret
+
+
+define <4 x float> @test_multiple_mul_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %b, i32 0
+ %2 = extractelement <4 x float> %a, i32 0
+ %mul = fmul float %2, %1
+ %mul2 = fmul float %2, %mul
+ %3 = insertelement <4 x float> %a, float %mul2, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_multiple_mul_ss
+; CHECK: mulss
+; CHECK: mulss
+; CHECK-NOT: movss
+; CHECK: ret
+
+define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) {
+ %1 = extractelement <4 x float> %b, i32 0
+ %2 = extractelement <4 x float> %a, i32 0
+ %div = fdiv float %2, %1
+ %div2 = fdiv float %2, %div
+ %3 = insertelement <4 x float> %a, float %div2, i32 0
+ ret <4 x float> %3
+}
+
+; CHECK-LABEL: test_multiple_div_ss
+; CHECK: divss
+; CHECK: divss
+; CHECK-NOT: movss
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/sse1.ll b/test/CodeGen/X86/sse1.ll
index 47c6429b1814..183297e4c306 100644
--- a/test/CodeGen/X86/sse1.ll
+++ b/test/CodeGen/X86/sse1.ll
@@ -43,3 +43,17 @@ entry:
; CHECK-NOT: shufps $16
; CHECK: ret
}
+
+; We used to get stuck in type legalization for this example when lowering the
+; vselect. With SSE1 v4f32 is a legal type but v4i1 (or any vector integer type)
+; is not. We used to ping pong between splitting the vselect for the v4i
+; condition operand and widening the resulting vselect for the v4f32 result.
+; PR18036
+
+; CHECK-LABEL: vselect
+define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
+entry:
+ %a1 = icmp eq <4 x i32> %q, zeroinitializer
+ %a14 = select <4 x i1> %a1, <4 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+0> , <4 x float> zeroinitializer
+ ret <4 x float> %a14
+}
diff --git a/test/CodeGen/X86/sse2-blend.ll b/test/CodeGen/X86/sse2-blend.ll
index 1ac983254eaf..c63ff72b4801 100644
--- a/test/CodeGen/X86/sse2-blend.ll
+++ b/test/CodeGen/X86/sse2-blend.ll
@@ -1,22 +1,22 @@
; RUN: llc < %s -march=x86 -mcpu=yonah -mattr=+sse2,-sse4.1 | FileCheck %s
-; CHECK: vsel_float
-; CHECK: pandn
-; CHECK: pand
-; CHECK: por
+; CHECK-LABEL: vsel_float
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NOT: orps
; CHECK: ret
define void@vsel_float(<4 x float>* %v1, <4 x float>* %v2) {
%A = load <4 x float>* %v1
%B = load <4 x float>* %v2
- %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %A, <4 x float> %B
+ %vsel = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %A, <4 x float> %B
store <4 x float > %vsel, <4 x float>* %v1
ret void
}
-; CHECK: vsel_i32
-; CHECK: pandn
-; CHECK: pand
-; CHECK: por
+; CHECK-LABEL: vsel_i32
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NOT: orps
; CHECK: ret
define void@vsel_i32(<4 x i32>* %v1, <4 x i32>* %v2) {
%A = load <4 x i32>* %v1
@@ -27,7 +27,7 @@ define void@vsel_i32(<4 x i32>* %v1, <4 x i32>* %v2) {
}
; Without forcing instructions, fall back to the preferred PS domain.
-; CHECK: vsel_i64
+; CHECK-LABEL: vsel_i64
; CHECK: andnps
; CHECK: orps
; CHECK: ret
@@ -41,7 +41,7 @@ define void@vsel_i64(<2 x i64>* %v1, <2 x i64>* %v2) {
}
; Without forcing instructions, fall back to the preferred PS domain.
-; CHECK: vsel_double
+; CHECK-LABEL: vsel_double
; CHECK: andnps
; CHECK: orps
; CHECK: ret
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86.ll b/test/CodeGen/X86/sse2-intrinsics-x86.ll
index ff6c10bfe5a8..c906ecdd60c1 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86.ll
@@ -710,3 +710,37 @@ define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) {
ret i32 %res
}
declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define void @test_x86_sse2_pause() {
+ ; CHECK: pause
+ tail call void @llvm.x86.sse2.pause()
+ ret void
+}
+declare void @llvm.x86.sse2.pause() nounwind
+
+define <4 x i32> @test_x86_sse2_pshuf_d(<4 x i32> %a) {
+; CHECK-LABEL: test_x86_sse2_pshuf_d:
+; CHECK: pshufd $27
+entry:
+ %res = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) nounwind readnone
+ ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8) nounwind readnone
+
+define <8 x i16> @test_x86_sse2_pshufl_w(<8 x i16> %a) {
+; CHECK-LABEL: test_x86_sse2_pshufl_w:
+; CHECK: pshuflw $27
+entry:
+ %res = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) nounwind readnone
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8) nounwind readnone
+
+define <8 x i16> @test_x86_sse2_pshufh_w(<8 x i16> %a) {
+; CHECK-LABEL: test_x86_sse2_pshufh_w:
+; CHECK: pshufhw $27
+entry:
+ %res = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27) nounwind readnone
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8) nounwind readnone
diff --git a/test/CodeGen/X86/sse2-vector-shifts.ll b/test/CodeGen/X86/sse2-vector-shifts.ll
index 462def980a91..7c8d5e578898 100644
--- a/test/CodeGen/X86/sse2-vector-shifts.ll
+++ b/test/CodeGen/X86/sse2-vector-shifts.ll
@@ -9,8 +9,8 @@ entry:
}
; CHECK-LABEL: test_sllw_1:
-; CHECK: psllw $0, %xmm0
-; CHECK-NEXT: ret
+; CHECK-NOT: psllw $0, %xmm0
+; CHECK: ret
define <8 x i16> @test_sllw_2(<8 x i16> %InVec) {
entry:
@@ -24,12 +24,12 @@ entry:
define <8 x i16> @test_sllw_3(<8 x i16> %InVec) {
entry:
- %shl = shl <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = shl <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_sllw_3:
-; CHECK: xorps %xmm0, %xmm0
+; CHECK: psllw $15, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_slld_1(<4 x i32> %InVec) {
@@ -39,8 +39,8 @@ entry:
}
; CHECK-LABEL: test_slld_1:
-; CHECK: pslld $0, %xmm0
-; CHECK-NEXT: ret
+; CHECK-NOT: pslld $0, %xmm0
+; CHECK: ret
define <4 x i32> @test_slld_2(<4 x i32> %InVec) {
entry:
@@ -54,12 +54,12 @@ entry:
define <4 x i32> @test_slld_3(<4 x i32> %InVec) {
entry:
- %shl = shl <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
+ %shl = shl <4 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_slld_3:
-; CHECK: xorps %xmm0, %xmm0
+; CHECK: pslld $31, %xmm0
; CHECK-NEXT: ret
define <2 x i64> @test_sllq_1(<2 x i64> %InVec) {
@@ -69,8 +69,8 @@ entry:
}
; CHECK-LABEL: test_sllq_1:
-; CHECK: psllq $0, %xmm0
-; CHECK-NEXT: ret
+; CHECK-NOT: psllq $0, %xmm0
+; CHECK: ret
define <2 x i64> @test_sllq_2(<2 x i64> %InVec) {
entry:
@@ -84,12 +84,12 @@ entry:
define <2 x i64> @test_sllq_3(<2 x i64> %InVec) {
entry:
- %shl = shl <2 x i64> %InVec, <i64 64, i64 64>
+ %shl = shl <2 x i64> %InVec, <i64 63, i64 63>
ret <2 x i64> %shl
}
; CHECK-LABEL: test_sllq_3:
-; CHECK: xorps %xmm0, %xmm0
+; CHECK: psllq $63, %xmm0
; CHECK-NEXT: ret
; SSE2 Arithmetic Shift
@@ -101,8 +101,8 @@ entry:
}
; CHECK-LABEL: test_sraw_1:
-; CHECK: psraw $0, %xmm0
-; CHECK-NEXT: ret
+; CHECK-NOT: psraw $0, %xmm0
+; CHECK: ret
define <8 x i16> @test_sraw_2(<8 x i16> %InVec) {
entry:
@@ -116,7 +116,7 @@ entry:
define <8 x i16> @test_sraw_3(<8 x i16> %InVec) {
entry:
- %shl = ashr <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = ashr <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <8 x i16> %shl
}
@@ -131,8 +131,8 @@ entry:
}
; CHECK-LABEL: test_srad_1:
-; CHECK: psrad $0, %xmm0
-; CHECK-NEXT: ret
+; CHECK-NOT: psrad $0, %xmm0
+; CHECK: ret
define <4 x i32> @test_srad_2(<4 x i32> %InVec) {
entry:
@@ -146,7 +146,7 @@ entry:
define <4 x i32> @test_srad_3(<4 x i32> %InVec) {
entry:
- %shl = ashr <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
+ %shl = ashr <4 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31>
ret <4 x i32> %shl
}
@@ -163,8 +163,8 @@ entry:
}
; CHECK-LABEL: test_srlw_1:
-; CHECK: psrlw $0, %xmm0
-; CHECK-NEXT: ret
+; CHECK-NOT: psrlw $0, %xmm0
+; CHECK: ret
define <8 x i16> @test_srlw_2(<8 x i16> %InVec) {
entry:
@@ -178,12 +178,12 @@ entry:
define <8 x i16> @test_srlw_3(<8 x i16> %InVec) {
entry:
- %shl = lshr <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+ %shl = lshr <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_srlw_3:
-; CHECK: xorps %xmm0, %xmm0
+; CHECK: psrlw $15, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_srld_1(<4 x i32> %InVec) {
@@ -193,8 +193,8 @@ entry:
}
; CHECK-LABEL: test_srld_1:
-; CHECK: psrld $0, %xmm0
-; CHECK-NEXT: ret
+; CHECK-NOT: psrld $0, %xmm0
+; CHECK: ret
define <4 x i32> @test_srld_2(<4 x i32> %InVec) {
entry:
@@ -208,12 +208,12 @@ entry:
define <4 x i32> @test_srld_3(<4 x i32> %InVec) {
entry:
- %shl = lshr <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
+ %shl = lshr <4 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_srld_3:
-; CHECK: xorps %xmm0, %xmm0
+; CHECK: psrld $31, %xmm0
; CHECK-NEXT: ret
define <2 x i64> @test_srlq_1(<2 x i64> %InVec) {
@@ -223,8 +223,8 @@ entry:
}
; CHECK-LABEL: test_srlq_1:
-; CHECK: psrlq $0, %xmm0
-; CHECK-NEXT: ret
+; CHECK-NOT: psrlq $0, %xmm0
+; CHECK: ret
define <2 x i64> @test_srlq_2(<2 x i64> %InVec) {
entry:
@@ -238,10 +238,130 @@ entry:
define <2 x i64> @test_srlq_3(<2 x i64> %InVec) {
entry:
- %shl = lshr <2 x i64> %InVec, <i64 64, i64 64>
+ %shl = lshr <2 x i64> %InVec, <i64 63, i64 63>
ret <2 x i64> %shl
}
; CHECK-LABEL: test_srlq_3:
-; CHECK: xorps %xmm0, %xmm0
+; CHECK: psrlq $63, %xmm0
+; CHECK-NEXT: ret
+
+
+; CHECK-LABEL: sra_sra_v4i32:
+; CHECK: psrad $6, %xmm0
+; CHECK-NEXT: retq
+define <4 x i32> @sra_sra_v4i32(<4 x i32> %x) nounwind {
+ %sra0 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
+ %sra1 = ashr <4 x i32> %sra0, <i32 4, i32 4, i32 4, i32 4>
+ ret <4 x i32> %sra1
+}
+
+; CHECK-LABEL: @srl_srl_v4i32
+; CHECK: psrld $6, %xmm0
+; CHECK-NEXT: ret
+define <4 x i32> @srl_srl_v4i32(<4 x i32> %x) nounwind {
+ %srl0 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
+ %srl1 = lshr <4 x i32> %srl0, <i32 4, i32 4, i32 4, i32 4>
+ ret <4 x i32> %srl1
+}
+
+; CHECK-LABEL: @srl_shl_v4i32
+; CHECK: andps
+; CHECK-NEXT: retq
+define <4 x i32> @srl_shl_v4i32(<4 x i32> %x) nounwind {
+ %srl0 = shl <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
+ %srl1 = lshr <4 x i32> %srl0, <i32 4, i32 4, i32 4, i32 4>
+ ret <4 x i32> %srl1
+}
+
+; CHECK-LABEL: @srl_sra_31_v4i32
+; CHECK: psrld $31, %xmm0
+; CHECK-NEXT: ret
+define <4 x i32> @srl_sra_31_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %sra = ashr <4 x i32> %x, %y
+ %srl1 = lshr <4 x i32> %sra, <i32 31, i32 31, i32 31, i32 31>
+ ret <4 x i32> %srl1
+}
+
+; CHECK-LABEL: @shl_shl_v4i32
+; CHECK: pslld $6, %xmm0
+; CHECK-NEXT: ret
+define <4 x i32> @shl_shl_v4i32(<4 x i32> %x) nounwind {
+ %shl0 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
+ %shl1 = shl <4 x i32> %shl0, <i32 4, i32 4, i32 4, i32 4>
+ ret <4 x i32> %shl1
+}
+
+; CHECK-LABEL: @shl_sra_v4i32
+; CHECK: andps
+; CHECK-NEXT: ret
+define <4 x i32> @shl_sra_v4i32(<4 x i32> %x) nounwind {
+ %shl0 = ashr <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
+ %shl1 = shl <4 x i32> %shl0, <i32 4, i32 4, i32 4, i32 4>
+ ret <4 x i32> %shl1
+}
+
+; CHECK-LABEL: @shl_srl_v4i32
+; CHECK: pslld $3, %xmm0
+; CHECK-NEXT: pand
+; CHECK-NEXT: ret
+define <4 x i32> @shl_srl_v4i32(<4 x i32> %x) nounwind {
+ %shl0 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
+ %shl1 = shl <4 x i32> %shl0, <i32 5, i32 5, i32 5, i32 5>
+ ret <4 x i32> %shl1
+}
+
+; CHECK-LABEL: @shl_zext_srl_v4i32
+; CHECK: andps
; CHECK-NEXT: ret
+define <4 x i32> @shl_zext_srl_v4i32(<4 x i16> %x) nounwind {
+ %srl = lshr <4 x i16> %x, <i16 2, i16 2, i16 2, i16 2>
+ %zext = zext <4 x i16> %srl to <4 x i32>
+ %shl = shl <4 x i32> %zext, <i32 2, i32 2, i32 2, i32 2>
+ ret <4 x i32> %shl
+}
+
+; CHECK: @sra_trunc_srl_v4i32
+; CHECK: psrad $19, %xmm0
+; CHECK-NEXT: retq
+define <4 x i16> @sra_trunc_srl_v4i32(<4 x i32> %x) nounwind {
+ %srl = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
+ %trunc = trunc <4 x i32> %srl to <4 x i16>
+ %sra = ashr <4 x i16> %trunc, <i16 3, i16 3, i16 3, i16 3>
+ ret <4 x i16> %sra
+}
+
+; CHECK-LABEL: @shl_zext_shl_v4i32
+; CHECK: pand
+; CHECK-NEXT: pslld $19, %xmm0
+; CHECK-NEXT: ret
+define <4 x i32> @shl_zext_shl_v4i32(<4 x i16> %x) nounwind {
+ %shl0 = shl <4 x i16> %x, <i16 2, i16 2, i16 2, i16 2>
+ %ext = zext <4 x i16> %shl0 to <4 x i32>
+ %shl1 = shl <4 x i32> %ext, <i32 17, i32 17, i32 17, i32 17>
+ ret <4 x i32> %shl1
+}
+
+; CHECK-LABEL: @sra_v4i32
+; CHECK: psrad $3, %xmm0
+; CHECK-NEXT: ret
+define <4 x i32> @sra_v4i32(<4 x i32> %x) nounwind {
+ %sra = ashr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %sra
+}
+
+; CHECK-LABEL: @srl_v4i32
+; CHECK: psrld $3, %xmm0
+; CHECK-NEXT: ret
+define <4 x i32> @srl_v4i32(<4 x i32> %x) nounwind {
+ %sra = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %sra
+}
+
+; CHECK-LABEL: @shl_v4i32
+; CHECK: pslld $3, %xmm0
+; CHECK-NEXT: ret
+define <4 x i32> @shl_v4i32(<4 x i32> %x) nounwind {
+ %sra = shl <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %sra
+}
diff --git a/test/CodeGen/X86/sse2.ll b/test/CodeGen/X86/sse2.ll
index 9147c22dd375..e8d3d6f19ed7 100644
--- a/test/CodeGen/X86/sse2.ll
+++ b/test/CodeGen/X86/sse2.ll
@@ -9,10 +9,10 @@ define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
ret void
; CHECK-LABEL: test1:
-; CHECK: movl 8(%esp), %eax
-; CHECK-NEXT: movapd (%eax), %xmm0
+; CHECK: movl 4(%esp), %eax
+; CHECK-NEXT: movl 8(%esp), %ecx
+; CHECK-NEXT: movapd (%ecx), %xmm0
; CHECK-NEXT: movlpd 12(%esp), %xmm0
-; CHECK-NEXT: movl 4(%esp), %eax
; CHECK-NEXT: movapd %xmm0, (%eax)
; CHECK-NEXT: ret
}
@@ -221,3 +221,21 @@ entry:
%double2float.i = fptrunc <4 x double> %0 to <4 x float>
ret <4 x float> %double2float.i
}
+
+define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
+; CHECK-LABEL: test_insert_64_zext
+; CHECK-NOT: xor
+; CHECK: movq
+ %1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %1
+}
+
+define <4 x i32> @PR19721(<4 x i32> %i) {
+ %bc = bitcast <4 x i32> %i to i128
+ %insert = and i128 %bc, -4294967296
+ %bc2 = bitcast i128 %insert to <4 x i32>
+ ret <4 x i32> %bc2
+
+; CHECK-LABEL: PR19721
+; CHECK: punpckldq
+}
diff --git a/test/CodeGen/X86/sse3-avx-addsub-2.ll b/test/CodeGen/X86/sse3-avx-addsub-2.ll
new file mode 100644
index 000000000000..b7706cc34bb6
--- /dev/null
+++ b/test/CodeGen/X86/sse3-avx-addsub-2.ll
@@ -0,0 +1,318 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+
+
+; Verify that we correctly generate 'addsub' instructions from
+; a sequence of vector extracts + float add/sub + vector inserts.
+
+define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test1
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test2(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 2
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test2
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add = fadd float %4, %3
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test3
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 1
+ %4 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 1
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test4
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub2 = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 1
+ %4 = extractelement <4 x float> %B, i32 1
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 1
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test5
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test6(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test6
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
+ %1 = extractelement <4 x double> %A, i32 0
+ %2 = extractelement <4 x double> %B, i32 0
+ %sub = fsub double %1, %2
+ %3 = extractelement <4 x double> %A, i32 2
+ %4 = extractelement <4 x double> %B, i32 2
+ %sub2 = fsub double %3, %4
+ %5 = extractelement <4 x double> %A, i32 1
+ %6 = extractelement <4 x double> %B, i32 1
+ %add = fadd double %5, %6
+ %7 = extractelement <4 x double> %A, i32 3
+ %8 = extractelement <4 x double> %B, i32 3
+ %add2 = fadd double %7, %8
+ %vecinsert1 = insertelement <4 x double> undef, double %add, i32 1
+ %vecinsert2 = insertelement <4 x double> %vecinsert1, double %add2, i32 3
+ %vecinsert3 = insertelement <4 x double> %vecinsert2, double %sub, i32 0
+ %vecinsert4 = insertelement <4 x double> %vecinsert3, double %sub2, i32 2
+ ret <4 x double> %vecinsert4
+}
+; CHECK-LABEL: test7
+; SSE: addsubpd
+; SSE-NEXT: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8(<2 x double> %A, <2 x double> %B) {
+ %1 = extractelement <2 x double> %A, i32 0
+ %2 = extractelement <2 x double> %B, i32 0
+ %sub = fsub double %1, %2
+ %3 = extractelement <2 x double> %A, i32 1
+ %4 = extractelement <2 x double> %B, i32 1
+ %add = fadd double %3, %4
+ %vecinsert1 = insertelement <2 x double> undef, double %sub, i32 0
+ %vecinsert2 = insertelement <2 x double> %vecinsert1, double %add, i32 1
+ ret <2 x double> %vecinsert2
+}
+; CHECK-LABEL: test8
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK: ret
+
+
+define <8 x float> @test9(<8 x float> %A, <8 x float> %B) {
+ %1 = extractelement <8 x float> %A, i32 0
+ %2 = extractelement <8 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <8 x float> %A, i32 2
+ %4 = extractelement <8 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <8 x float> %A, i32 1
+ %6 = extractelement <8 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <8 x float> %A, i32 3
+ %8 = extractelement <8 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %9 = extractelement <8 x float> %A, i32 4
+ %10 = extractelement <8 x float> %B, i32 4
+ %sub3 = fsub float %9, %10
+ %11 = extractelement <8 x float> %A, i32 6
+ %12 = extractelement <8 x float> %B, i32 6
+ %sub4 = fsub float %11, %12
+ %13 = extractelement <8 x float> %A, i32 5
+ %14 = extractelement <8 x float> %B, i32 5
+ %add3 = fadd float %13, %14
+ %15 = extractelement <8 x float> %A, i32 7
+ %16 = extractelement <8 x float> %B, i32 7
+ %add4 = fadd float %15, %16
+ %vecinsert1 = insertelement <8 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <8 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <8 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <8 x float> %vecinsert3, float %sub2, i32 2
+ %vecinsert5 = insertelement <8 x float> %vecinsert4, float %add3, i32 5
+ %vecinsert6 = insertelement <8 x float> %vecinsert5, float %add4, i32 7
+ %vecinsert7 = insertelement <8 x float> %vecinsert6, float %sub3, i32 4
+ %vecinsert8 = insertelement <8 x float> %vecinsert7, float %sub4, i32 6
+ ret <8 x float> %vecinsert8
+}
+; CHECK-LABEL: test9
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+; Verify that we don't generate addsub instruction for the following
+; functions.
+define <4 x float> @test10(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test11(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub = fsub float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test11
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test12(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 1
+ %2 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test12
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test13(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 3
+ %2 = extractelement <4 x float> %B, i32 3
+ %add = fadd float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 3
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test14(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %sub2, i32 2
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test15(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 1
+ %2 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test15
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, undef
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, undef
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/sse3-avx-addsub.ll b/test/CodeGen/X86/sse3-avx-addsub.ll
new file mode 100644
index 000000000000..8b6674312b34
--- /dev/null
+++ b/test/CodeGen/X86/sse3-avx-addsub.ll
@@ -0,0 +1,296 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s -check-prefix=SSE -check-prefix=CHECK
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=AVX -check-prefix=CHECK
+
+; Test ADDSUB ISel patterns.
+
+; Functions below are obtained from the following source:
+;
+; typedef double double2 __attribute__((ext_vector_type(2)));
+; typedef double double4 __attribute__((ext_vector_type(4)));
+; typedef float float4 __attribute__((ext_vector_type(4)));
+; typedef float float8 __attribute__((ext_vector_type(8)));
+;
+; float4 test1(float4 A, float4 B) {
+; float4 X = A - B;
+; float4 Y = A + B;
+; return (float4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; float8 test2(float8 A, float8 B) {
+; float8 X = A - B;
+; float8 Y = A + B;
+; return (float8){X[0], Y[1], X[2], Y[3], X[4], Y[5], X[6], Y[7]};
+; }
+;
+; double4 test3(double4 A, double4 B) {
+; double4 X = A - B;
+; double4 Y = A + B;
+; return (double4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; double2 test4(double2 A, double2 B) {
+; double2 X = A - B;
+; double2 Y = A + B;
+; return (double2){X[0], Y[1]};
+; }
+
+define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %A, %B
+ %vecinit6 = shufflevector <4 x float> %sub, <4 x float> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test1
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <8 x float> @test2(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %A, %B
+ %vecinit14 = shufflevector <8 x float> %sub, <8 x float> %add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test2
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %A, %B
+ %vecinit6 = shufflevector <4 x double> %sub, <4 x double> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test3
+; SSE: addsubpd
+; SSE: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test4(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %A, %B
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %sub, <2 x double> %add, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test4
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test1b(<4 x float> %A, <4 x float>* %B) {
+ %1 = load <4 x float>* %B
+ %add = fadd <4 x float> %A, %1
+ %sub = fsub <4 x float> %A, %1
+ %vecinit6 = shufflevector <4 x float> %sub, <4 x float> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test1b
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <8 x float> @test2b(<8 x float> %A, <8 x float>* %B) {
+ %1 = load <8 x float>* %B
+ %add = fadd <8 x float> %A, %1
+ %sub = fsub <8 x float> %A, %1
+ %vecinit14 = shufflevector <8 x float> %sub, <8 x float> %add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test2b
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test3b(<4 x double> %A, <4 x double>* %B) {
+ %1 = load <4 x double>* %B
+ %add = fadd <4 x double> %A, %1
+ %sub = fsub <4 x double> %A, %1
+ %vecinit6 = shufflevector <4 x double> %sub, <4 x double> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test3b
+; SSE: addsubpd
+; SSE: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test4b(<2 x double> %A, <2 x double>* %B) {
+ %1 = load <2 x double>* %B
+ %sub = fsub <2 x double> %A, %1
+ %add = fadd <2 x double> %A, %1
+ %vecinit2 = shufflevector <2 x double> %sub, <2 x double> %add, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test4b
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK-NEXT: ret
+
+; Functions below are obtained from the following source:
+;
+; float4 test1(float4 A, float4 B) {
+; float4 X = A + B;
+; float4 Y = A - B;
+; return (float4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; float8 test2(float8 A, float8 B) {
+; float8 X = A + B;
+; float8 Y = A - B;
+; return (float8){X[0], Y[1], X[2], Y[3], X[4], Y[5], X[6], Y[7]};
+; }
+;
+; double4 test3(double4 A, double4 B) {
+; double4 X = A + B;
+; double4 Y = A - B;
+; return (double4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; double2 test4(double2 A, double2 B) {
+; double2 X = A + B;
+; double2 Y = A - B;
+; return (double2){X[0], Y[1]};
+; }
+
+define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %A, %B
+ %vecinit6 = shufflevector <4 x float> %add, <4 x float> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test5
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; CHECK: ret
+
+
+define <8 x float> @test6(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %A, %B
+ %vecinit14 = shufflevector <8 x float> %add, <8 x float> %sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test6
+; SSE: xorps
+; SSE-NEXT: addsubps
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; AVX-NOT: vxorps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %A, %B
+ %vecinit6 = shufflevector <4 x double> %add, <4 x double> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test7
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; AVX-NOT: vxorpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %A, %B
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %add, <2 x double> %sub, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test8
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; CHECK: ret
+
+
+define <4 x float> @test5b(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %B, %A
+ %vecinit6 = shufflevector <4 x float> %add, <4 x float> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test5
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; CHECK: ret
+
+
+define <8 x float> @test6b(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %B, %A
+ %vecinit14 = shufflevector <8 x float> %add, <8 x float> %sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test6
+; SSE: xorps
+; SSE-NEXT: addsubps
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; AVX-NOT: vxorps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test7b(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %B, %A
+ %vecinit6 = shufflevector <4 x double> %add, <4 x double> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test7
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; AVX-NOT: vxorpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8b(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %B, %A
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %add, <2 x double> %sub, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test8
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/sse3.ll b/test/CodeGen/X86/sse3.ll
index 6d5b19243e45..18bdcb3912b1 100644
--- a/test/CodeGen/X86/sse3.ll
+++ b/test/CodeGen/X86/sse3.ll
@@ -209,7 +209,7 @@ entry:
; X64-LABEL: t13:
; X64: punpcklqdq %xmm0, %xmm1
; X64: pextrw $3, %xmm1, %eax
-; X64: pshufd $52, %xmm1, %xmm0
+; X64: pshufhw $12, %xmm1, %xmm0
; X64: pinsrw $4, %eax, %xmm0
; X64: ret
}
diff --git a/test/CodeGen/X86/sse41-blend.ll b/test/CodeGen/X86/sse41-blend.ll
index a32f5de30a6c..3a4812119f8a 100644
--- a/test/CodeGen/X86/sse41-blend.ll
+++ b/test/CodeGen/X86/sse41-blend.ll
@@ -1,42 +1,42 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -mattr=+sse4.1 | FileCheck %s
;CHECK-LABEL: vsel_float:
-;CHECK: blendvps
+;CHECK: blendps
;CHECK: ret
define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
- %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x float> %v1, <4 x float> %v2
ret <4 x float> %vsel
}
;CHECK-LABEL: vsel_4xi8:
-;CHECK: blendvps
+;CHECK: blendps
;CHECK: ret
define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) {
- %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i8> %v1, <4 x i8> %v2
+ %vsel = select <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i8> %v1, <4 x i8> %v2
ret <4 x i8> %vsel
}
;CHECK-LABEL: vsel_4xi16:
-;CHECK: blendvps
+;CHECK: blendps
;CHECK: ret
define <4 x i16> @vsel_4xi16(<4 x i16> %v1, <4 x i16> %v2) {
- %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i16> %v1, <4 x i16> %v2
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i16> %v1, <4 x i16> %v2
ret <4 x i16> %vsel
}
;CHECK-LABEL: vsel_i32:
-;CHECK: blendvps
+;CHECK: blendps
;CHECK: ret
define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) {
- %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %v1, <4 x i32> %v2
+ %vsel = select <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %v1, <4 x i32> %v2
ret <4 x i32> %vsel
}
;CHECK-LABEL: vsel_double:
-;CHECK: blendvpd
+;CHECK: movsd
;CHECK: ret
define <4 x double> @vsel_double(<4 x double> %v1, <4 x double> %v2) {
%vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> %v1, <4 x double> %v2
@@ -45,7 +45,7 @@ define <4 x double> @vsel_double(<4 x double> %v1, <4 x double> %v2) {
;CHECK-LABEL: vsel_i64:
-;CHECK: blendvpd
+;CHECK: movsd
;CHECK: ret
define <4 x i64> @vsel_i64(<4 x i64> %v1, <4 x i64> %v2) {
%vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> %v1, <4 x i64> %v2
@@ -88,3 +88,53 @@ entry:
store double %extract214vector_func.i, double addrspace(1)* undef, align 8
ret void
}
+
+; If we can figure out a blend has a constant mask, we should emit the
+; blend instruction with an immediate mask
+define <2 x double> @constant_blendvpd(<2 x double> %xy, <2 x double> %ab) {
+; In this case, we emit a simple movss
+; CHECK-LABEL: constant_blendvpd
+; CHECK: movsd
+; CHECK: ret
+ %1 = select <2 x i1> <i1 true, i1 false>, <2 x double> %xy, <2 x double> %ab
+ ret <2 x double> %1
+}
+
+define <4 x float> @constant_blendvps(<4 x float> %xyzw, <4 x float> %abcd) {
+; CHECK-LABEL: constant_blendvps
+; CHECK-NOT: mov
+; CHECK: blendps $7
+; CHECK: ret
+ %1 = select <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %xyzw, <4 x float> %abcd
+ ret <4 x float> %1
+}
+
+define <16 x i8> @constant_pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd) {
+; CHECK-LABEL: constant_pblendvb:
+; CHECK: movaps
+; CHECK: pblendvb
+; CHECK: ret
+ %1 = select <16 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x i8> %xyzw, <16 x i8> %abcd
+ ret <16 x i8> %1
+}
+
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
+declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)
+declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>)
+
+;; 2 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_shufflevector_4xfloat
+; CHECK: blendps $6, %xmm1, %xmm0
+; CHECK: ret
+define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_8xi16
+; CHECK: pblendw $134, %xmm1, %xmm0
+; CHECK: ret
+define <8 x i16> @blend_shufflevector_8xi16(<8 x i16> %a, <8 x i16> %b) {
+ %1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 15>
+ ret <8 x i16> %1
+}
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll
index c15e24ccc96b..986488f531ec 100644
--- a/test/CodeGen/X86/sse41.ll
+++ b/test/CodeGen/X86/sse41.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s -check-prefix=X32 --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s -check-prefix=X64 --check-prefix=CHECK
@g16 = external global i16
@@ -249,3 +249,486 @@ entry:
; X64: ret
}
+define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
+entry:
+ %0 = load <4 x float>* %pb, align 16
+ %vecinit6 = shufflevector <4 x float> %a, <4 x float> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x float> %vecinit6
+; CHECK-LABEL: insertps_from_shufflevector_1:
+; CHECK-NOT: movss
+; CHECK-NOT: shufps
+; CHECK: insertps $48,
+; CHECK: ret
+}
+
+define <4 x float> @insertps_from_shufflevector_2(<4 x float> %a, <4 x float> %b) {
+entry:
+ %vecinit6 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
+ ret <4 x float> %vecinit6
+; CHECK-LABEL: insertps_from_shufflevector_2:
+; CHECK-NOT: shufps
+; CHECK: insertps $96,
+; CHECK: ret
+}
+
+; For loading an i32 from memory into an xmm register we use pinsrd
+; instead of insertps
+define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocapture readonly %pb) {
+entry:
+ %0 = load <4 x i32>* %pb, align 16
+ %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x i32> %vecinit6
+; CHECK-LABEL: pinsrd_from_shufflevector_i32:
+; CHECK-NOT: movss
+; CHECK-NOT: shufps
+; CHECK: pinsrd $3,
+; CHECK: ret
+}
+
+define <4 x i32> @insertps_from_shufflevector_i32_2(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
+ ret <4 x i32> %vecinit6
+; CHECK-LABEL: insertps_from_shufflevector_i32_2:
+; CHECK-NOT: shufps
+; CHECK-NOT: movaps
+; CHECK: insertps $208,
+; CHECK: ret
+}
+
+define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b) {
+; CHECK-LABEL: insertps_from_load_ins_elt_undef:
+; CHECK-NOT: movss
+; CHECK-NOT: shufps
+; CHECK: insertps $16,
+; CHECK: ret
+ %1 = load float* %b, align 4
+ %2 = insertelement <4 x float> undef, float %1, i32 0
+ %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
+ ret <4 x float> %result
+}
+
+define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
+; CHECK-LABEL: insertps_from_load_ins_elt_undef_i32:
+; TODO: Like on pinsrd_from_shufflevector_i32, remove this mov instr
+;; aCHECK-NOT: movd
+; CHECK-NOT: shufps
+; CHECK: insertps $32,
+; CHECK: ret
+ %1 = load i32* %b, align 4
+ %2 = insertelement <4 x i32> undef, i32 %1, i32 0
+ %result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
+ ret <4 x i32> %result
+}
+
+;;;;;; Shuffles optimizable with a single insertps instruction
+define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
+; CHECK-LABEL: shuf_XYZ0:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $8
+; CHECK: ret
+ %vecext = extractelement <4 x float> %x, i32 0
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecext1 = extractelement <4 x float> %x, i32 1
+ %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
+ %vecext3 = extractelement <4 x float> %x, i32 2
+ %vecinit4 = insertelement <4 x float> %vecinit2, float %vecext3, i32 2
+ %vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
+ ret <4 x float> %vecinit5
+}
+
+define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
+; CHECK-LABEL: shuf_XY00:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $12
+; CHECK: ret
+ %vecext = extractelement <4 x float> %x, i32 0
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecext1 = extractelement <4 x float> %x, i32 1
+ %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
+ %vecinit3 = insertelement <4 x float> %vecinit2, float 0.0, i32 2
+ %vecinit4 = insertelement <4 x float> %vecinit3, float 0.0, i32 3
+ ret <4 x float> %vecinit4
+}
+
+define <4 x float> @shuf_XYY0(<4 x float> %x, <4 x float> %a) {
+; CHECK-LABEL: shuf_XYY0:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $104
+; CHECK: ret
+ %vecext = extractelement <4 x float> %x, i32 0
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecext1 = extractelement <4 x float> %x, i32 1
+ %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
+ %vecinit4 = insertelement <4 x float> %vecinit2, float %vecext1, i32 2
+ %vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
+ ret <4 x float> %vecinit5
+}
+
+define <4 x float> @shuf_XYW0(<4 x float> %x, <4 x float> %a) {
+; CHECK-LABEL: shuf_XYW0:
+; CHECK: insertps $232
+; CHECK: ret
+ %vecext = extractelement <4 x float> %x, i32 0
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecext1 = extractelement <4 x float> %x, i32 1
+ %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
+ %vecext2 = extractelement <4 x float> %x, i32 3
+ %vecinit3 = insertelement <4 x float> %vecinit2, float %vecext2, i32 2
+ %vecinit4 = insertelement <4 x float> %vecinit3, float 0.0, i32 3
+ ret <4 x float> %vecinit4
+}
+
+define <4 x float> @shuf_W00W(<4 x float> %x, <4 x float> %a) {
+; CHECK-LABEL: shuf_W00W:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $198
+; CHECK: ret
+ %vecext = extractelement <4 x float> %x, i32 3
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecinit2 = insertelement <4 x float> %vecinit, float 0.0, i32 1
+ %vecinit3 = insertelement <4 x float> %vecinit2, float 0.0, i32 2
+ %vecinit4 = insertelement <4 x float> %vecinit3, float %vecext, i32 3
+ ret <4 x float> %vecinit4
+}
+
+define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
+; CHECK-LABEL: shuf_X00A:
+; CHECK-NOT: movaps
+; CHECK-NOT: shufps
+; CHECK: insertps $48
+; CHECK: ret
+ %vecext = extractelement <4 x float> %x, i32 0
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
+ %vecinit2 = insertelement <4 x float> %vecinit1, float 0.0, i32 2
+ %vecinit4 = shufflevector <4 x float> %vecinit2, <4 x float> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x float> %vecinit4
+}
+
+define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
+; CHECK-LABEL: shuf_X00X:
+; CHECK-NOT: movaps
+; CHECK-NOT: shufps
+; CHECK: insertps $48
+; CHECK: ret
+ %vecext = extractelement <4 x float> %x, i32 0
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
+ %vecinit2 = insertelement <4 x float> %vecinit1, float 0.0, i32 2
+ %vecinit4 = shufflevector <4 x float> %vecinit2, <4 x float> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x float> %vecinit4
+}
+
+define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
+; CHECK-LABEL: shuf_X0YC:
+; CHECK: shufps
+; CHECK-NOT: movhlps
+; CHECK-NOT: shufps
+; CHECK: insertps $176
+; CHECK: ret
+ %vecext = extractelement <4 x float> %x, i32 0
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
+ %vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %x, <4 x i32> <i32 0, i32 1, i32 5, i32 undef>
+ %vecinit5 = shufflevector <4 x float> %vecinit3, <4 x float> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 6>
+ ret <4 x float> %vecinit5
+}
+
+define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
+; CHECK-LABEL: i32_shuf_XYZ0:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $8
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecext1 = extractelement <4 x i32> %x, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
+ %vecext3 = extractelement <4 x i32> %x, i32 2
+ %vecinit4 = insertelement <4 x i32> %vecinit2, i32 %vecext3, i32 2
+ %vecinit5 = insertelement <4 x i32> %vecinit4, i32 0, i32 3
+ ret <4 x i32> %vecinit5
+}
+
+define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
+; CHECK-LABEL: i32_shuf_XY00:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $12
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecext1 = extractelement <4 x i32> %x, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
+ %vecinit3 = insertelement <4 x i32> %vecinit2, i32 0, i32 2
+ %vecinit4 = insertelement <4 x i32> %vecinit3, i32 0, i32 3
+ ret <4 x i32> %vecinit4
+}
+
+define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
+; CHECK-LABEL: i32_shuf_XYY0:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $104
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecext1 = extractelement <4 x i32> %x, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
+ %vecinit4 = insertelement <4 x i32> %vecinit2, i32 %vecext1, i32 2
+ %vecinit5 = insertelement <4 x i32> %vecinit4, i32 0, i32 3
+ ret <4 x i32> %vecinit5
+}
+
+define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
+; CHECK-LABEL: i32_shuf_XYW0:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $232
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecext1 = extractelement <4 x i32> %x, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
+ %vecext2 = extractelement <4 x i32> %x, i32 3
+ %vecinit3 = insertelement <4 x i32> %vecinit2, i32 %vecext2, i32 2
+ %vecinit4 = insertelement <4 x i32> %vecinit3, i32 0, i32 3
+ ret <4 x i32> %vecinit4
+}
+
+define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
+; CHECK-LABEL: i32_shuf_W00W:
+; CHECK-NOT: pextrd
+; CHECK-NOT: punpckldq
+; CHECK: insertps $198
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %x, i32 3
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecinit2 = insertelement <4 x i32> %vecinit, i32 0, i32 1
+ %vecinit3 = insertelement <4 x i32> %vecinit2, i32 0, i32 2
+ %vecinit4 = insertelement <4 x i32> %vecinit3, i32 %vecext, i32 3
+ ret <4 x i32> %vecinit4
+}
+
+define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
+; CHECK-LABEL: i32_shuf_X00A:
+; CHECK-NOT: movaps
+; CHECK-NOT: shufps
+; CHECK: insertps $48
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit1, i32 0, i32 2
+ %vecinit4 = shufflevector <4 x i32> %vecinit2, <4 x i32> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x i32> %vecinit4
+}
+
+define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
+; CHECK-LABEL: i32_shuf_X00X:
+; CHECK-NOT: movaps
+; CHECK-NOT: shufps
+; CHECK: insertps $48
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
+ %vecinit2 = insertelement <4 x i32> %vecinit1, i32 0, i32 2
+ %vecinit4 = shufflevector <4 x i32> %vecinit2, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x i32> %vecinit4
+}
+
+define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
+; CHECK-LABEL: i32_shuf_X0YC:
+; CHECK: shufps
+; CHECK-NOT: movhlps
+; CHECK-NOT: shufps
+; CHECK: insertps $176
+; CHECK: ret
+ %vecext = extractelement <4 x i32> %x, i32 0
+ %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
+ %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
+ %vecinit3 = shufflevector <4 x i32> %vecinit1, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 5, i32 undef>
+ %vecinit5 = shufflevector <4 x i32> %vecinit3, <4 x i32> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 6>
+ ret <4 x i32> %vecinit5
+}
+
+;; Test for a bug in the first implementation of LowerBuildVectorv4x32
+define < 4 x float> @test_insertps_no_undef(<4 x float> %x) {
+; CHECK-LABEL: test_insertps_no_undef:
+; CHECK: movaps %xmm0, %xmm1
+; CHECK-NEXT: insertps $8, %xmm1, %xmm1
+; CHECK-NEXT: maxps %xmm1, %xmm0
+; CHECK-NEXT: ret
+ %vecext = extractelement <4 x float> %x, i32 0
+ %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
+ %vecext1 = extractelement <4 x float> %x, i32 1
+ %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
+ %vecext3 = extractelement <4 x float> %x, i32 2
+ %vecinit4 = insertelement <4 x float> %vecinit2, float %vecext3, i32 2
+ %vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
+ %mask = fcmp olt <4 x float> %vecinit5, %x
+ %res = select <4 x i1> %mask, <4 x float> %x, <4 x float>%vecinit5
+ ret <4 x float> %res
+}
+
+define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: blendvb_fallback
+; CHECK: blendvb
+; CHECK: ret
+ %ret = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %y
+ ret <8 x i16> %ret
+}
+
+define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
+; CHECK-LABEL: insertps_from_vector_load:
+; On X32, account for the argument's move to registers
+; X32: movl 4(%esp), %eax
+; CHECK-NOT: mov
+; CHECK: insertps $48
+; CHECK-NEXT: ret
+ %1 = load <4 x float>* %pb, align 16
+ %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
+ ret <4 x float> %2
+}
+
+;; Use a non-zero CountS for insertps
+define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
+; CHECK-LABEL: insertps_from_vector_load_offset:
+; On X32, account for the argument's move to registers
+; X32: movl 4(%esp), %eax
+; CHECK-NOT: mov
+;; Try to match a bit more of the instr, since we need the load's offset.
+; CHECK: insertps $96, 4(%{{...}}), %
+; CHECK-NEXT: ret
+ %1 = load <4 x float>* %pb, align 16
+ %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
+ ret <4 x float> %2
+}
+
+define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
+; CHECK-LABEL: insertps_from_vector_load_offset_2:
+; On X32, account for the argument's move to registers
+; X32: movl 4(%esp), %eax
+; X32: movl 8(%esp), %ecx
+; CHECK-NOT: mov
+;; Try to match a bit more of the instr, since we need the load's offset.
+; CHECK: insertps $192, 12(%{{...}},%{{...}}), %
+; CHECK-NEXT: ret
+ %1 = getelementptr inbounds <4 x float>* %pb, i64 %index
+ %2 = load <4 x float>* %1, align 16
+ %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
+ ret <4 x float> %3
+}
+
+define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
+; CHECK-LABEL: insertps_from_broadcast_loadf32:
+; On X32, account for the arguments' move to registers
+; X32: movl 8(%esp), %eax
+; X32: movl 4(%esp), %ecx
+; CHECK-NOT: mov
+; CHECK: insertps $48
+; CHECK-NEXT: ret
+ %1 = getelementptr inbounds float* %fb, i64 %index
+ %2 = load float* %1, align 4
+ %3 = insertelement <4 x float> undef, float %2, i32 0
+ %4 = insertelement <4 x float> %3, float %2, i32 1
+ %5 = insertelement <4 x float> %4, float %2, i32 2
+ %6 = insertelement <4 x float> %5, float %2, i32 3
+ %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
+ ret <4 x float> %7
+}
+
+define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
+; CHECK-LABEL: insertps_from_broadcast_loadv4f32:
+; On X32, account for the arguments' move to registers
+; X32: movl 4(%esp), %{{...}}
+; CHECK-NOT: mov
+; CHECK: insertps $48
+; CHECK-NEXT: ret
+ %1 = load <4 x float>* %b, align 4
+ %2 = extractelement <4 x float> %1, i32 0
+ %3 = insertelement <4 x float> undef, float %2, i32 0
+ %4 = insertelement <4 x float> %3, float %2, i32 1
+ %5 = insertelement <4 x float> %4, float %2, i32 2
+ %6 = insertelement <4 x float> %5, float %2, i32 3
+ %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
+ ret <4 x float> %7
+}
+
+;; FIXME: We're emitting an extraneous pshufd/vbroadcast.
+define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
+; CHECK-LABEL: insertps_from_broadcast_multiple_use:
+; On X32, account for the arguments' move to registers
+; X32: movl 8(%esp), %eax
+; X32: movl 4(%esp), %ecx
+; CHECK: movss
+; CHECK-NOT: mov
+; CHECK: insertps $48
+; CHECK: insertps $48
+; CHECK: insertps $48
+; CHECK: insertps $48
+; CHECK: addps
+; CHECK: addps
+; CHECK: addps
+; CHECK-NEXT: ret
+ %1 = getelementptr inbounds float* %fb, i64 %index
+ %2 = load float* %1, align 4
+ %3 = insertelement <4 x float> undef, float %2, i32 0
+ %4 = insertelement <4 x float> %3, float %2, i32 1
+ %5 = insertelement <4 x float> %4, float %2, i32 2
+ %6 = insertelement <4 x float> %5, float %2, i32 3
+ %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
+ %8 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %b, <4 x float> %6, i32 48)
+ %9 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %c, <4 x float> %6, i32 48)
+ %10 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %d, <4 x float> %6, i32 48)
+ %11 = fadd <4 x float> %7, %8
+ %12 = fadd <4 x float> %9, %10
+ %13 = fadd <4 x float> %11, %12
+ ret <4 x float> %13
+}
+
+define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
+; CHECK-LABEL: insertps_with_undefs:
+; CHECK-NOT: shufps
+; CHECK: insertps $32, %xmm0
+; CHECK: ret
+ %1 = load float* %b, align 4
+ %2 = insertelement <4 x float> undef, float %1, i32 0
+ %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7>
+ ret <4 x float> %result
+}
+
+; Test for a bug in X86ISelLowering.cpp:getINSERTPS where we were using
+; the destination index to change the load, instead of the source index.
+define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
+; CHECK-LABEL: pr20087:
+; CHECK: insertps $48
+; CHECK: ret
+ %load = load <4 x float> *%ptr
+ %ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
+ ret <4 x float> %ret
+}
+
+; Edge case for insertps where we end up with a shuffle with mask=<0, 7, -1, -1>
+define void @insertps_pr20411(i32* noalias nocapture %RET) #1 {
+; CHECK-LABEL: insertps_pr20411:
+; CHECK: movaps {{[^,]*}}, %[[REG1:xmm.]]
+; CHECK: pshufd {{.*}} ## [[REG2:xmm.]] = mem[3,0,0,0]
+; CHECK: insertps {{.*}} ## xmm1 = [[REG2]][0],[[REG1]][3]{{.*}}
+
+ %gather_load = shufflevector <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle109 = shufflevector <4 x i32> <i32 4, i32 5, i32 6, i32 7>, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; 4 5 6 7
+
+ %shuffle116 = shufflevector <8 x i32> %gather_load, <8 x i32> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef> ; 3 x x x
+ %shuffle117 = shufflevector <4 x i32> %shuffle109, <4 x i32> %shuffle116, <4 x i32> <i32 4, i32 3, i32 undef, i32 undef> ; 3 7 x x
+
+ %ptrcast = bitcast i32* %RET to <4 x i32>*
+ store <4 x i32> %shuffle117, <4 x i32>* %ptrcast, align 4
+ ret void
+}
diff --git a/test/CodeGen/X86/ssp-data-layout.ll b/test/CodeGen/X86/ssp-data-layout.ll
new file mode 100644
index 000000000000..e76ad7b871ba
--- /dev/null
+++ b/test/CodeGen/X86/ssp-data-layout.ll
@@ -0,0 +1,510 @@
+; RUN: llc < %s -disable-fp-elim -mtriple=x86_64-pc-linux-gnu -mcpu=corei7 -o - | FileCheck %s
+; This test is fairly fragile. The goal is to ensure that "large" stack
+; objects are allocated closest to the stack protector (i.e., farthest away
+; from the Stack Pointer.) In standard SSP mode this means that large (>=
+; ssp-buffer-size) arrays and structures containing such arrays are
+; closet to the protector. With sspstrong and sspreq this means large
+; arrays/structures-with-arrays are closest, followed by small (< ssp-buffer-size)
+; arrays/structures-with-arrays, and then addr-taken variables.
+;
+; Ideally, we only want verify that the objects appear in the correct groups
+; and that the groups have the correct relative stack offset. The ordering
+; within a group is not relevant to this test. Unfortunately, there is not
+; an elegant way to do this, so just match the offset for each object.
+; RUN: llc < %s -disable-fp-elim -mtriple=x86_64-unknown-unknown -O0 -mcpu=corei7 -o - \
+; RUN: | FileCheck --check-prefix=FAST-NON-LIN %s
+; FastISel was not setting the StackProtectorIndex when lowering
+; Intrinsic::stackprotector and as a result the stack re-arrangement code was
+; never applied. This problem only shows up on non-Linux platforms because on
+; Linux the stack protector cookie is loaded from a special address space which
+; always triggers standard ISel. Run a basic test to ensure that at -O0
+; on a non-linux target the data layout rules are triggered.
+
+%struct.struct_large_char = type { [8 x i8] }
+%struct.struct_small_char = type { [2 x i8] }
+%struct.struct_large_nonchar = type { [8 x i32] }
+%struct.struct_small_nonchar = type { [2 x i16] }
+
+define void @layout_ssp() ssp {
+entry:
+; Expected stack layout for ssp is
+; -16 large_char . Group 1, nested arrays, arrays >= ssp-buffer-size
+; -24 struct_large_char .
+; -28 scalar1 | Everything else
+; -32 scalar2
+; -36 scalar3
+; -40 addr-of
+; -44 small_nonchar
+; -80 large_nonchar
+; -82 small_char
+; -88 struct_small_char
+; -120 struct_large_nonchar
+; -128 struct_small_nonchar
+
+; CHECK: layout_ssp:
+; CHECK: call{{l|q}} get_scalar1
+; CHECK: movl %eax, -28(
+; CHECK: call{{l|q}} end_scalar1
+
+; CHECK: call{{l|q}} get_scalar2
+; CHECK: movl %eax, -32(
+; CHECK: call{{l|q}} end_scalar2
+
+; CHECK: call{{l|q}} get_scalar3
+; CHECK: movl %eax, -36(
+; CHECK: call{{l|q}} end_scalar3
+
+; CHECK: call{{l|q}} get_addrof
+; CHECK: movl %eax, -40(
+; CHECK: call{{l|q}} end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: movw %ax, -44(
+; CHECK: call{{l|q}} end_small_nonchar
+
+; CHECK: call{{l|q}} get_large_nonchar
+; CHECK: movl %eax, -80(
+; CHECK: call{{l|q}} end_large_nonchar
+
+; CHECK: call{{l|q}} get_small_char
+; CHECK: movb %al, -82(
+; CHECK: call{{l|q}} end_small_char
+
+; CHECK: call{{l|q}} get_large_char
+; CHECK: movb %al, -16(
+; CHECK: call{{l|q}} end_large_char
+
+; CHECK: call{{l|q}} get_struct_large_char
+; CHECK: movb %al, -24(
+; CHECK: call{{l|q}} end_struct_large_char
+
+; CHECK: call{{l|q}} get_struct_small_char
+; CHECK: movb %al, -88(
+; CHECK: call{{l|q}} end_struct_small_char
+
+; CHECK: call{{l|q}} get_struct_large_nonchar
+; CHECK: movl %eax, -120(
+; CHECK: call{{l|q}} end_struct_large_nonchar
+
+; CHECK: call{{l|q}} get_struct_small_nonchar
+; CHECK: movw %ax, -128(
+; CHECK: call{{l|q}} end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @layout_sspstrong() nounwind uwtable sspstrong {
+entry:
+; Expected stack layout for sspstrong is
+; -48 large_nonchar . Group 1, nested arrays,
+; -56 large_char . arrays >= ssp-buffer-size
+; -64 struct_large_char .
+; -96 struct_large_nonchar .
+; -100 small_non_char | Group 2, nested arrays,
+; -102 small_char | arrays < ssp-buffer-size
+; -104 struct_small_char |
+; -112 struct_small_nonchar |
+; -116 addrof * Group 3, addr-of local
+; -120 scalar + Group 4, everything else
+; -124 scalar +
+; -128 scalar +
+;
+; CHECK: layout_sspstrong:
+; CHECK: call{{l|q}} get_scalar1
+; CHECK: movl %eax, -120(
+; CHECK: call{{l|q}} end_scalar1
+
+; CHECK: call{{l|q}} get_scalar2
+; CHECK: movl %eax, -124(
+; CHECK: call{{l|q}} end_scalar2
+
+; CHECK: call{{l|q}} get_scalar3
+; CHECK: movl %eax, -128(
+; CHECK: call{{l|q}} end_scalar3
+
+; CHECK: call{{l|q}} get_addrof
+; CHECK: movl %eax, -116(
+; CHECK: call{{l|q}} end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: movw %ax, -100(
+; CHECK: call{{l|q}} end_small_nonchar
+
+; CHECK: call{{l|q}} get_large_nonchar
+; CHECK: movl %eax, -48(
+; CHECK: call{{l|q}} end_large_nonchar
+
+; CHECK: call{{l|q}} get_small_char
+; CHECK: movb %al, -102(
+; CHECK: call{{l|q}} end_small_char
+
+; CHECK: call{{l|q}} get_large_char
+; CHECK: movb %al, -56(
+; CHECK: call{{l|q}} end_large_char
+
+; CHECK: call{{l|q}} get_struct_large_char
+; CHECK: movb %al, -64(
+; CHECK: call{{l|q}} end_struct_large_char
+
+; CHECK: call{{l|q}} get_struct_small_char
+; CHECK: movb %al, -104(
+; CHECK: call{{l|q}} end_struct_small_char
+
+; CHECK: call{{l|q}} get_struct_large_nonchar
+; CHECK: movl %eax, -96(
+; CHECK: call{{l|q}} end_struct_large_nonchar
+
+; CHECK: call{{l|q}} get_struct_small_nonchar
+; CHECK: movw %ax, -112(
+; CHECK: call{{l|q}} end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @layout_sspreq() nounwind uwtable sspreq {
+entry:
+; Expected stack layout for sspreq is the same as sspstrong
+;
+; CHECK: layout_sspreq:
+; CHECK: call{{l|q}} get_scalar1
+; CHECK: movl %eax, -120(
+; CHECK: call{{l|q}} end_scalar1
+
+; CHECK: call{{l|q}} get_scalar2
+; CHECK: movl %eax, -124(
+; CHECK: call{{l|q}} end_scalar2
+
+; CHECK: call{{l|q}} get_scalar3
+; CHECK: movl %eax, -128(
+; CHECK: call{{l|q}} end_scalar3
+
+; CHECK: call{{l|q}} get_addrof
+; CHECK: movl %eax, -116(
+; CHECK: call{{l|q}} end_addrof
+
+; CHECK: get_small_nonchar
+; CHECK: movw %ax, -100(
+; CHECK: call{{l|q}} end_small_nonchar
+
+; CHECK: call{{l|q}} get_large_nonchar
+; CHECK: movl %eax, -48(
+; CHECK: call{{l|q}} end_large_nonchar
+
+; CHECK: call{{l|q}} get_small_char
+; CHECK: movb %al, -102(
+; CHECK: call{{l|q}} end_small_char
+
+; CHECK: call{{l|q}} get_large_char
+; CHECK: movb %al, -56(
+; CHECK: call{{l|q}} end_large_char
+
+; CHECK: call{{l|q}} get_struct_large_char
+; CHECK: movb %al, -64(
+; CHECK: call{{l|q}} end_struct_large_char
+
+; CHECK: call{{l|q}} get_struct_small_char
+; CHECK: movb %al, -104(
+; CHECK: call{{l|q}} end_struct_small_char
+
+; CHECK: call{{l|q}} get_struct_large_nonchar
+; CHECK: movl %eax, -96(
+; CHECK: call{{l|q}} end_struct_large_nonchar
+
+; CHECK: call{{l|q}} get_struct_small_nonchar
+; CHECK: movw %ax, -112(
+; CHECK: call{{l|q}} end_struct_small_nonchar
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %ptr = alloca i32, align 4
+ %small2 = alloca [2 x i16], align 2
+ %large2 = alloca [8 x i32], align 16
+ %small = alloca [2 x i8], align 1
+ %large = alloca [8 x i8], align 1
+ %a = alloca %struct.struct_large_char, align 1
+ %b = alloca %struct.struct_small_char, align 1
+ %c = alloca %struct.struct_large_nonchar, align 8
+ %d = alloca %struct.struct_small_nonchar, align 2
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call i32 @get_scalar2()
+ store i32 %call1, i32* %y, align 4
+ call void @end_scalar2()
+ %call2 = call i32 @get_scalar3()
+ store i32 %call2, i32* %z, align 4
+ call void @end_scalar3()
+ %call3 = call i32 @get_addrof()
+ store i32 %call3, i32* %ptr, align 4
+ call void @end_addrof()
+ %call4 = call signext i16 @get_small_nonchar()
+ %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ store i16 %call4, i16* %arrayidx, align 2
+ call void @end_small_nonchar()
+ %call5 = call i32 @get_large_nonchar()
+ %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ store i32 %call5, i32* %arrayidx6, align 4
+ call void @end_large_nonchar()
+ %call7 = call signext i8 @get_small_char()
+ %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ store i8 %call7, i8* %arrayidx8, align 1
+ call void @end_small_char()
+ %call9 = call signext i8 @get_large_char()
+ %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call9, i8* %arrayidx10, align 1
+ call void @end_large_char()
+ %call11 = call signext i8 @get_struct_large_char()
+ %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ store i8 %call11, i8* %arrayidx12, align 1
+ call void @end_struct_large_char()
+ %call13 = call signext i8 @get_struct_small_char()
+ %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ store i8 %call13, i8* %arrayidx15, align 1
+ call void @end_struct_small_char()
+ %call16 = call i32 @get_struct_large_nonchar()
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ store i32 %call16, i32* %arrayidx18, align 4
+ call void @end_struct_large_nonchar()
+ %call19 = call signext i16 @get_struct_small_nonchar()
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ store i16 %call19, i16* %arrayidx21, align 2
+ call void @end_struct_small_nonchar()
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %0 = load i32* %x, align 4
+ %1 = load i32* %y, align 4
+ %2 = load i32* %z, align 4
+ %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %3 = bitcast [8 x i8]* %coerce.dive to i64*
+ %4 = load i64* %3, align 1
+ %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
+ %6 = load i16* %5, align 1
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
+ %8 = load i32* %7, align 1
+ call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
+ ret void
+}
+
+define void @fast_non_linux() ssp {
+entry:
+; FAST-NON-LIN: fast_non_linux:
+; FAST-NON-LIN: call{{l|q}} get_scalar1
+; FAST-NON-LIN: movl %eax, -20(
+; FAST-NON-LIN: call{{l|q}} end_scalar1
+
+; FAST-NON-LIN: call{{l|q}} get_large_char
+; FAST-NON-LIN: movb %al, -16(
+; FAST-NON-LIN: call{{l|q}} end_large_char
+ %x = alloca i32, align 4
+ %large = alloca [8 x i8], align 1
+ %call = call i32 @get_scalar1()
+ store i32 %call, i32* %x, align 4
+ call void @end_scalar1()
+ %call1 = call signext i8 @get_large_char()
+ %arrayidx = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ store i8 %call1, i8* %arrayidx, align 1
+ call void @end_large_char()
+ %0 = load i32* %x, align 4
+ %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ call void @takes_two(i32 %0, i8* %arraydecay)
+ ret void
+}
+
+declare i32 @get_scalar1()
+declare void @end_scalar1()
+
+declare i32 @get_scalar2()
+declare void @end_scalar2()
+
+declare i32 @get_scalar3()
+declare void @end_scalar3()
+
+declare i32 @get_addrof()
+declare void @end_addrof()
+
+declare signext i16 @get_small_nonchar()
+declare void @end_small_nonchar()
+
+declare i32 @get_large_nonchar()
+declare void @end_large_nonchar()
+
+declare signext i8 @get_small_char()
+declare void @end_small_char()
+
+declare signext i8 @get_large_char()
+declare void @end_large_char()
+
+declare signext i8 @get_struct_large_char()
+declare void @end_struct_large_char()
+
+declare signext i8 @get_struct_small_char()
+declare void @end_struct_small_char()
+
+declare i32 @get_struct_large_nonchar()
+declare void @end_struct_large_nonchar()
+
+declare signext i16 @get_struct_small_nonchar()
+declare void @end_struct_small_nonchar()
+
+declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32)
+declare void @takes_two(i32, i8*)
diff --git a/test/CodeGen/X86/stack-align-memcpy.ll b/test/CodeGen/X86/stack-align-memcpy.ll
index 87bb85fad83e..0cc3aa848891 100644
--- a/test/CodeGen/X86/stack-align-memcpy.ll
+++ b/test/CodeGen/X86/stack-align-memcpy.ll
@@ -2,6 +2,9 @@
%struct.foo = type { [88 x i8] }
+declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind
+declare void @baz(i8*) nounwind
+
; PR15249
; We can't use rep;movsl here because it clobbers the base pointer in %esi.
define void @test1(%struct.foo* nocapture %x, i32 %y) nounwind {
@@ -15,4 +18,26 @@ define void @test1(%struct.foo* nocapture %x, i32 %y) nounwind {
; CHECK-NOT: rep;movsl
}
-declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind
+; PR19012
+; Also don't clobber %esi if the dynamic alloca comes after the memcpy.
+define void @test2(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind {
+ call void @bar(i8* %z, %struct.foo* align 4 byval %x)
+ %dynalloc = alloca i8, i32 %y, align 1
+ call void @baz(i8* %dynalloc)
+ ret void
+
+; CHECK-LABEL: test2:
+; CHECK: movl %esp, %esi
+; CHECK-NOT: rep;movsl
+}
+
+; Check that we do use rep movs if we make the alloca static.
+define void @test3(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind {
+ call void @bar(i8* %z, %struct.foo* align 4 byval %x)
+ %statalloc = alloca i8, i32 8, align 1
+ call void @baz(i8* %statalloc)
+ ret void
+
+; CHECK-LABEL: test3:
+; CHECK: rep;movsl
+}
diff --git a/test/CodeGen/X86/stack-protector-dbginfo.ll b/test/CodeGen/X86/stack-protector-dbginfo.ll
index bd27ac347690..cf88ade9363d 100644
--- a/test/CodeGen/X86/stack-protector-dbginfo.ll
+++ b/test/CodeGen/X86/stack-protector-dbginfo.ll
@@ -30,10 +30,10 @@ attributes #0 = { sspreq }
!2 = metadata !{metadata !3}
!3 = metadata !{i32 786436, metadata !1, metadata !4, metadata !"", i32 20, i64 32, i64 32, i32 0, i32 0, null, metadata !6, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [line 20, size 32, align 32, offset 0] [def] [from ]
!4 = metadata !{i32 786451, metadata !1, null, metadata !"C", i32 19, i64 8, i64 8, i32 0, i32 0, null, metadata !5, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [C] [line 19, size 8, align 8, offset 0] [def] [from ]
-!5 = metadata !{i32 0}
+!5 = metadata !{}
!6 = metadata !{metadata !7}
!7 = metadata !{i32 786472, metadata !"max_frame_size", i64 0} ; [ DW_TAG_enumerator ] [max_frame_size :: 0]
-!8 = metadata !{metadata !9}
+!8 = metadata !{metadata !9, metadata !24, metadata !41, metadata !65}
!9 = metadata !{i32 786478, metadata !1, metadata !10, metadata !"read_response_size", metadata !"read_response_size", metadata !"_Z18read_response_sizev", i32 27, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @_Z18read_response_sizev, null, null, metadata !14, i32 27} ; [ DW_TAG_subprogram ] [line 27] [def] [read_response_size]
!10 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/Users/matt/ryan_bug/<unknown>]
!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
diff --git a/test/CodeGen/X86/stack-protector.ll b/test/CodeGen/X86/stack-protector.ll
index 265ec80682bf..4db0f9a3426b 100644
--- a/test/CodeGen/X86/stack-protector.ll
+++ b/test/CodeGen/X86/stack-protector.ll
@@ -16,13 +16,14 @@
%struct.anon.0 = type { %union.anon.1 }
%union.anon.1 = type { [2 x i8] }
%struct.small = type { i8 }
+%struct.small_char = type { i32, [5 x i8] }
@.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
; test1a: array of [16 x i8]
; no ssp attribute
; Requires no protector.
-define void @test1a(i8* %a) nounwind uwtable {
+define void @test1a(i8* %a) {
entry:
; LINUX-I386-LABEL: test1a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -53,7 +54,8 @@ entry:
; test1b: array of [16 x i8]
; ssp attribute
; Requires protector.
-define void @test1b(i8* %a) nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test1b(i8* %a) #0 {
entry:
; LINUX-I386-LABEL: test1b:
; LINUX-I386: mov{{l|q}} %gs:
@@ -88,7 +90,8 @@ entry:
; test1c: array of [16 x i8]
; sspstrong attribute
; Requires protector.
-define void @test1c(i8* %a) nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test1c(i8* %a) #1 {
entry:
; LINUX-I386-LABEL: test1c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -119,7 +122,8 @@ entry:
; test1d: array of [16 x i8]
; sspreq attribute
; Requires protector.
-define void @test1d(i8* %a) nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test1d(i8* %a) #2 {
entry:
; LINUX-I386-LABEL: test1d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -150,7 +154,7 @@ entry:
; test2a: struct { [16 x i8] }
; no ssp attribute
; Requires no protector.
-define void @test2a(i8* %a) nounwind uwtable {
+define void @test2a(i8* %a) {
entry:
; LINUX-I386-LABEL: test2a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -183,7 +187,8 @@ entry:
; test2b: struct { [16 x i8] }
; ssp attribute
; Requires protector.
-define void @test2b(i8* %a) nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test2b(i8* %a) #0 {
entry:
; LINUX-I386-LABEL: test2b:
; LINUX-I386: mov{{l|q}} %gs:
@@ -216,7 +221,8 @@ entry:
; test2c: struct { [16 x i8] }
; sspstrong attribute
; Requires protector.
-define void @test2c(i8* %a) nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test2c(i8* %a) #1 {
entry:
; LINUX-I386-LABEL: test2c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -249,7 +255,8 @@ entry:
; test2d: struct { [16 x i8] }
; sspreq attribute
; Requires protector.
-define void @test2d(i8* %a) nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test2d(i8* %a) #2 {
entry:
; LINUX-I386-LABEL: test2d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -282,7 +289,7 @@ entry:
; test3a: array of [4 x i8]
; no ssp attribute
; Requires no protector.
-define void @test3a(i8* %a) nounwind uwtable {
+define void @test3a(i8* %a) {
entry:
; LINUX-I386-LABEL: test3a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -313,7 +320,8 @@ entry:
; test3b: array [4 x i8]
; ssp attribute
; Requires no protector.
-define void @test3b(i8* %a) nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test3b(i8* %a) #0 {
entry:
; LINUX-I386-LABEL: test3b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -344,7 +352,8 @@ entry:
; test3c: array of [4 x i8]
; sspstrong attribute
; Requires protector.
-define void @test3c(i8* %a) nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test3c(i8* %a) #1 {
entry:
; LINUX-I386-LABEL: test3c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -375,7 +384,8 @@ entry:
; test3d: array of [4 x i8]
; sspreq attribute
; Requires protector.
-define void @test3d(i8* %a) nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test3d(i8* %a) #2 {
entry:
; LINUX-I386-LABEL: test3d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -406,7 +416,7 @@ entry:
; test4a: struct { [4 x i8] }
; no ssp attribute
; Requires no protector.
-define void @test4a(i8* %a) nounwind uwtable {
+define void @test4a(i8* %a) {
entry:
; LINUX-I386-LABEL: test4a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -439,7 +449,8 @@ entry:
; test4b: struct { [4 x i8] }
; ssp attribute
; Requires no protector.
-define void @test4b(i8* %a) nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test4b(i8* %a) #0 {
entry:
; LINUX-I386-LABEL: test4b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -472,7 +483,8 @@ entry:
; test4c: struct { [4 x i8] }
; sspstrong attribute
; Requires protector.
-define void @test4c(i8* %a) nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test4c(i8* %a) #1 {
entry:
; LINUX-I386-LABEL: test4c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -505,7 +517,8 @@ entry:
; test4d: struct { [4 x i8] }
; sspreq attribute
; Requires protector.
-define void @test4d(i8* %a) nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test4d(i8* %a) #2 {
entry:
; LINUX-I386-LABEL: test4d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -538,7 +551,7 @@ entry:
; test5a: no arrays / no nested arrays
; no ssp attribute
; Requires no protector.
-define void @test5a(i8* %a) nounwind uwtable {
+define void @test5a(i8* %a) {
entry:
; LINUX-I386-LABEL: test5a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -565,7 +578,8 @@ entry:
; test5b: no arrays / no nested arrays
; ssp attribute
; Requires no protector.
-define void @test5b(i8* %a) nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test5b(i8* %a) #0 {
entry:
; LINUX-I386-LABEL: test5b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -592,7 +606,8 @@ entry:
; test5c: no arrays / no nested arrays
; sspstrong attribute
; Requires no protector.
-define void @test5c(i8* %a) nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test5c(i8* %a) #1 {
entry:
; LINUX-I386-LABEL: test5c:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -619,7 +634,8 @@ entry:
; test5d: no arrays / no nested arrays
; sspreq attribute
; Requires protector.
-define void @test5d(i8* %a) nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test5d(i8* %a) #2 {
entry:
; LINUX-I386-LABEL: test5d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -646,7 +662,7 @@ entry:
; test6a: Address-of local taken (j = &a)
; no ssp attribute
; Requires no protector.
-define void @test6a() nounwind uwtable {
+define void @test6a() {
entry:
; LINUX-I386-LABEL: test6a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -677,7 +693,8 @@ entry:
; test6b: Address-of local taken (j = &a)
; ssp attribute
; Requires no protector.
-define void @test6b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test6b() #0 {
entry:
; LINUX-I386-LABEL: test6b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -708,7 +725,8 @@ entry:
; test6c: Address-of local taken (j = &a)
; sspstrong attribute
; Requires protector.
-define void @test6c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test6c() #1 {
entry:
; LINUX-I386-LABEL: test6c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -739,7 +757,8 @@ entry:
; test6d: Address-of local taken (j = &a)
; sspreq attribute
; Requires protector.
-define void @test6d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test6d() #2 {
entry:
; LINUX-I386-LABEL: test6d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -770,7 +789,7 @@ entry:
; test7a: PtrToInt Cast
; no ssp attribute
; Requires no protector.
-define void @test7a() nounwind uwtable readnone {
+define void @test7a() {
entry:
; LINUX-I386-LABEL: test7a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -796,7 +815,8 @@ entry:
; test7b: PtrToInt Cast
; ssp attribute
; Requires no protector.
-define void @test7b() nounwind uwtable readnone ssp {
+; Function Attrs: ssp
+define void @test7b() #0 {
entry:
; LINUX-I386-LABEL: test7b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -822,7 +842,8 @@ entry:
; test7c: PtrToInt Cast
; sspstrong attribute
; Requires protector.
-define void @test7c() nounwind uwtable readnone sspstrong {
+; Function Attrs: sspstrong
+define void @test7c() #1 {
entry:
; LINUX-I386-LABEL: test7c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -848,7 +869,8 @@ entry:
; test7d: PtrToInt Cast
; sspreq attribute
; Requires protector.
-define void @test7d() nounwind uwtable readnone sspreq {
+; Function Attrs: sspreq
+define void @test7d() #2 {
entry:
; LINUX-I386-LABEL: test7d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -874,7 +896,7 @@ entry:
; test8a: Passing addr-of to function call
; no ssp attribute
; Requires no protector.
-define void @test8a() nounwind uwtable {
+define void @test8a() {
entry:
; LINUX-I386-LABEL: test8a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -892,14 +914,15 @@ entry:
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%b = alloca i32, align 4
- call void @funcall(i32* %b) nounwind
+ call void @funcall(i32* %b)
ret void
}
; test8b: Passing addr-of to function call
; ssp attribute
; Requires no protector.
-define void @test8b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test8b() #0 {
entry:
; LINUX-I386-LABEL: test8b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -917,14 +940,15 @@ entry:
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%b = alloca i32, align 4
- call void @funcall(i32* %b) nounwind
+ call void @funcall(i32* %b)
ret void
}
; test8c: Passing addr-of to function call
; sspstrong attribute
; Requires protector.
-define void @test8c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test8c() #1 {
entry:
; LINUX-I386-LABEL: test8c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -942,14 +966,15 @@ entry:
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%b = alloca i32, align 4
- call void @funcall(i32* %b) nounwind
+ call void @funcall(i32* %b)
ret void
}
; test8d: Passing addr-of to function call
; sspreq attribute
; Requires protector.
-define void @test8d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test8d() #2 {
entry:
; LINUX-I386-LABEL: test8d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -967,14 +992,14 @@ entry:
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%b = alloca i32, align 4
- call void @funcall(i32* %b) nounwind
+ call void @funcall(i32* %b)
ret void
}
; test9a: Addr-of in select instruction
; no ssp attribute
; Requires no protector.
-define void @test9a() nounwind uwtable {
+define void @test9a() {
entry:
; LINUX-I386-LABEL: test9a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -992,7 +1017,7 @@ entry:
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%x = alloca double, align 8
- %call = call double @testi_aux() nounwind
+ %call = call double @testi_aux()
store double %call, double* %x, align 8
%cmp2 = fcmp ogt double %call, 0.000000e+00
%y.1 = select i1 %cmp2, double* %x, double* null
@@ -1003,7 +1028,8 @@ entry:
; test9b: Addr-of in select instruction
; ssp attribute
; Requires no protector.
-define void @test9b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test9b() #0 {
entry:
; LINUX-I386-LABEL: test9b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1021,7 +1047,7 @@ entry:
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%x = alloca double, align 8
- %call = call double @testi_aux() nounwind
+ %call = call double @testi_aux()
store double %call, double* %x, align 8
%cmp2 = fcmp ogt double %call, 0.000000e+00
%y.1 = select i1 %cmp2, double* %x, double* null
@@ -1032,7 +1058,8 @@ entry:
; test9c: Addr-of in select instruction
; sspstrong attribute
; Requires protector.
-define void @test9c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test9c() #1 {
entry:
; LINUX-I386-LABEL: test9c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1050,7 +1077,7 @@ entry:
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%x = alloca double, align 8
- %call = call double @testi_aux() nounwind
+ %call = call double @testi_aux()
store double %call, double* %x, align 8
%cmp2 = fcmp ogt double %call, 0.000000e+00
%y.1 = select i1 %cmp2, double* %x, double* null
@@ -1061,7 +1088,8 @@ entry:
; test9d: Addr-of in select instruction
; sspreq attribute
; Requires protector.
-define void @test9d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test9d() #2 {
entry:
; LINUX-I386-LABEL: test9d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1079,7 +1107,7 @@ entry:
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%x = alloca double, align 8
- %call = call double @testi_aux() nounwind
+ %call = call double @testi_aux()
store double %call, double* %x, align 8
%cmp2 = fcmp ogt double %call, 0.000000e+00
%y.1 = select i1 %cmp2, double* %x, double* null
@@ -1090,7 +1118,7 @@ entry:
; test10a: Addr-of in phi instruction
; no ssp attribute
; Requires no protector.
-define void @test10a() nounwind uwtable {
+define void @test10a() {
entry:
; LINUX-I386-LABEL: test10a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1108,13 +1136,13 @@ entry:
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%x = alloca double, align 8
- %call = call double @testi_aux() nounwind
+ %call = call double @testi_aux()
store double %call, double* %x, align 8
%cmp = fcmp ogt double %call, 3.140000e+00
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- %call1 = call double @testi_aux() nounwind
+ %call1 = call double @testi_aux()
store double %call1, double* %x, align 8
br label %if.end4
@@ -1127,14 +1155,15 @@ if.then3: ; preds = %if.else
if.end4: ; preds = %if.else, %if.then3, %if.then
%y.0 = phi double* [ null, %if.then ], [ %x, %if.then3 ], [ null, %if.else ]
- %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), double* %y.0) nounwind
+ %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), double* %y.0)
ret void
}
; test10b: Addr-of in phi instruction
; ssp attribute
; Requires no protector.
-define void @test10b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test10b() #0 {
entry:
; LINUX-I386-LABEL: test10b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1152,13 +1181,13 @@ entry:
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%x = alloca double, align 8
- %call = call double @testi_aux() nounwind
+ %call = call double @testi_aux()
store double %call, double* %x, align 8
%cmp = fcmp ogt double %call, 3.140000e+00
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- %call1 = call double @testi_aux() nounwind
+ %call1 = call double @testi_aux()
store double %call1, double* %x, align 8
br label %if.end4
@@ -1171,14 +1200,15 @@ if.then3: ; preds = %if.else
if.end4: ; preds = %if.else, %if.then3, %if.then
%y.0 = phi double* [ null, %if.then ], [ %x, %if.then3 ], [ null, %if.else ]
- %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), double* %y.0) nounwind
+ %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), double* %y.0)
ret void
}
; test10c: Addr-of in phi instruction
; sspstrong attribute
; Requires protector.
-define void @test10c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test10c() #1 {
entry:
; LINUX-I386-LABEL: test10c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1196,13 +1226,13 @@ entry:
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%x = alloca double, align 8
- %call = call double @testi_aux() nounwind
+ %call = call double @testi_aux()
store double %call, double* %x, align 8
%cmp = fcmp ogt double %call, 3.140000e+00
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- %call1 = call double @testi_aux() nounwind
+ %call1 = call double @testi_aux()
store double %call1, double* %x, align 8
br label %if.end4
@@ -1215,14 +1245,15 @@ if.then3: ; preds = %if.else
if.end4: ; preds = %if.else, %if.then3, %if.then
%y.0 = phi double* [ null, %if.then ], [ %x, %if.then3 ], [ null, %if.else ]
- %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), double* %y.0) nounwind
+ %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), double* %y.0)
ret void
}
; test10d: Addr-of in phi instruction
; sspreq attribute
; Requires protector.
-define void @test10d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test10d() #2 {
entry:
; LINUX-I386-LABEL: test10d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1240,13 +1271,13 @@ entry:
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%x = alloca double, align 8
- %call = call double @testi_aux() nounwind
+ %call = call double @testi_aux()
store double %call, double* %x, align 8
%cmp = fcmp ogt double %call, 3.140000e+00
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- %call1 = call double @testi_aux() nounwind
+ %call1 = call double @testi_aux()
store double %call1, double* %x, align 8
br label %if.end4
@@ -1259,14 +1290,14 @@ if.then3: ; preds = %if.else
if.end4: ; preds = %if.else, %if.then3, %if.then
%y.0 = phi double* [ null, %if.then ], [ %x, %if.then3 ], [ null, %if.else ]
- %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), double* %y.0) nounwind
+ %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), double* %y.0)
ret void
}
; test11a: Addr-of struct element. (GEP followed by store).
; no ssp attribute
; Requires no protector.
-define void @test11a() nounwind uwtable {
+define void @test11a() {
entry:
; LINUX-I386-LABEL: test11a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1295,7 +1326,8 @@ entry:
; test11b: Addr-of struct element. (GEP followed by store).
; ssp attribute
; Requires no protector.
-define void @test11b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test11b() #0 {
entry:
; LINUX-I386-LABEL: test11b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1324,7 +1356,8 @@ entry:
; test11c: Addr-of struct element. (GEP followed by store).
; sspstrong attribute
; Requires protector.
-define void @test11c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test11c() #1 {
entry:
; LINUX-I386-LABEL: test11c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1353,7 +1386,8 @@ entry:
; test11d: Addr-of struct element. (GEP followed by store).
; sspreq attribute
; Requires protector.
-define void @test11d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test11d() #2 {
entry:
; LINUX-I386-LABEL: test11d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1382,7 +1416,7 @@ entry:
; test12a: Addr-of struct element, GEP followed by ptrtoint.
; no ssp attribute
; Requires no protector.
-define void @test12a() nounwind uwtable {
+define void @test12a() {
entry:
; LINUX-I386-LABEL: test12a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1410,7 +1444,8 @@ entry:
; test12b: Addr-of struct element, GEP followed by ptrtoint.
; ssp attribute
; Requires no protector.
-define void @test12b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test12b() #0 {
entry:
; LINUX-I386-LABEL: test12b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1437,8 +1472,8 @@ entry:
; test12c: Addr-of struct element, GEP followed by ptrtoint.
; sspstrong attribute
-; Requires protector.
-define void @test12c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test12c() #1 {
entry:
; LINUX-I386-LABEL: test12c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1466,7 +1501,8 @@ entry:
; test12d: Addr-of struct element, GEP followed by ptrtoint.
; sspreq attribute
; Requires protector.
-define void @test12d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test12d() #2 {
entry:
; LINUX-I386-LABEL: test12d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1494,7 +1530,7 @@ entry:
; test13a: Addr-of struct element, GEP followed by callinst.
; no ssp attribute
; Requires no protector.
-define void @test13a() nounwind uwtable {
+define void @test13a() {
entry:
; LINUX-I386-LABEL: test13a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1513,14 +1549,15 @@ entry:
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.pair, align 4
%y = getelementptr inbounds %struct.pair* %c, i64 0, i32 1
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y)
ret void
}
; test13b: Addr-of struct element, GEP followed by callinst.
; ssp attribute
; Requires no protector.
-define void @test13b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test13b() #0 {
entry:
; LINUX-I386-LABEL: test13b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1539,14 +1576,15 @@ entry:
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.pair, align 4
%y = getelementptr inbounds %struct.pair* %c, i64 0, i32 1
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y)
ret void
}
; test13c: Addr-of struct element, GEP followed by callinst.
; sspstrong attribute
; Requires protector.
-define void @test13c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test13c() #1 {
entry:
; LINUX-I386-LABEL: test13c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1565,14 +1603,15 @@ entry:
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.pair, align 4
%y = getelementptr inbounds %struct.pair* %c, i64 0, i32 1
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y)
ret void
}
; test13d: Addr-of struct element, GEP followed by callinst.
; sspreq attribute
; Requires protector.
-define void @test13d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test13d() #2 {
entry:
; LINUX-I386-LABEL: test13d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1591,14 +1630,14 @@ entry:
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.pair, align 4
%y = getelementptr inbounds %struct.pair* %c, i64 0, i32 1
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y)
ret void
}
; test14a: Addr-of a local, optimized into a GEP (e.g., &a - 12)
; no ssp attribute
; Requires no protector.
-define void @test14a() nounwind uwtable {
+define void @test14a() {
entry:
; LINUX-I386-LABEL: test14a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1617,14 +1656,15 @@ entry:
; DARWIN-X64: .cfi_endproc
%a = alloca i32, align 4
%add.ptr5 = getelementptr inbounds i32* %a, i64 -12
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5)
ret void
}
; test14b: Addr-of a local, optimized into a GEP (e.g., &a - 12)
; ssp attribute
; Requires no protector.
-define void @test14b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test14b() #0 {
entry:
; LINUX-I386-LABEL: test14b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1643,14 +1683,15 @@ entry:
; DARWIN-X64: .cfi_endproc
%a = alloca i32, align 4
%add.ptr5 = getelementptr inbounds i32* %a, i64 -12
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5)
ret void
}
; test14c: Addr-of a local, optimized into a GEP (e.g., &a - 12)
; sspstrong attribute
; Requires protector.
-define void @test14c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test14c() #1 {
entry:
; LINUX-I386-LABEL: test14c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1669,14 +1710,15 @@ entry:
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca i32, align 4
%add.ptr5 = getelementptr inbounds i32* %a, i64 -12
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5)
ret void
}
; test14d: Addr-of a local, optimized into a GEP (e.g., &a - 12)
; sspreq attribute
; Requires protector.
-define void @test14d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test14d() #2 {
entry:
; LINUX-I386-LABEL: test14d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1695,7 +1737,7 @@ entry:
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca i32, align 4
%add.ptr5 = getelementptr inbounds i32* %a, i64 -12
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5)
ret void
}
@@ -1703,7 +1745,7 @@ entry:
; (e.g., int a; ... ; float *b = &a;)
; no ssp attribute
; Requires no protector.
-define void @test15a() nounwind uwtable {
+define void @test15a() {
entry:
; LINUX-I386-LABEL: test15a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1734,7 +1776,8 @@ entry:
; (e.g., int a; ... ; float *b = &a;)
; ssp attribute
; Requires no protector.
-define void @test15b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test15b() #0 {
entry:
; LINUX-I386-LABEL: test15b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1765,7 +1808,8 @@ entry:
; (e.g., int a; ... ; float *b = &a;)
; sspstrong attribute
; Requires protector.
-define void @test15c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test15c() #1 {
entry:
; LINUX-I386-LABEL: test15c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1796,7 +1840,8 @@ entry:
; (e.g., int a; ... ; float *b = &a;)
; sspreq attribute
; Requires protector.
-define void @test15d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test15d() #2 {
entry:
; LINUX-I386-LABEL: test15d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1827,7 +1872,7 @@ entry:
; (e.g., int a; ... ; float *b = &a;)
; no ssp attribute
; Requires no protector.
-define void @test16a() nounwind uwtable {
+define void @test16a() {
entry:
; LINUX-I386-LABEL: test16a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1847,7 +1892,7 @@ entry:
%a = alloca i32, align 4
store i32 0, i32* %a, align 4
%0 = bitcast i32* %a to float*
- call void @funfloat(float* %0) nounwind
+ call void @funfloat(float* %0)
ret void
}
@@ -1855,7 +1900,8 @@ entry:
; (e.g., int a; ... ; float *b = &a;)
; ssp attribute
; Requires no protector.
-define void @test16b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test16b() #0 {
entry:
; LINUX-I386-LABEL: test16b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1875,7 +1921,7 @@ entry:
%a = alloca i32, align 4
store i32 0, i32* %a, align 4
%0 = bitcast i32* %a to float*
- call void @funfloat(float* %0) nounwind
+ call void @funfloat(float* %0)
ret void
}
@@ -1883,7 +1929,8 @@ entry:
; (e.g., int a; ... ; float *b = &a;)
; sspstrong attribute
; Requires protector.
-define void @test16c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test16c() #1 {
entry:
; LINUX-I386-LABEL: test16c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1903,7 +1950,7 @@ entry:
%a = alloca i32, align 4
store i32 0, i32* %a, align 4
%0 = bitcast i32* %a to float*
- call void @funfloat(float* %0) nounwind
+ call void @funfloat(float* %0)
ret void
}
@@ -1911,7 +1958,8 @@ entry:
; (e.g., int a; ... ; float *b = &a;)
; sspreq attribute
; Requires protector.
-define void @test16d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test16d() #2 {
entry:
; LINUX-I386-LABEL: test16d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -1931,14 +1979,14 @@ entry:
%a = alloca i32, align 4
store i32 0, i32* %a, align 4
%0 = bitcast i32* %a to float*
- call void @funfloat(float* %0) nounwind
+ call void @funfloat(float* %0)
ret void
}
; test17a: Addr-of a vector nested in a struct
; no ssp attribute
; Requires no protector.
-define void @test17a() nounwind uwtable {
+define void @test17a() {
entry:
; LINUX-I386-LABEL: test17a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1958,14 +2006,15 @@ entry:
%c = alloca %struct.vec, align 16
%y = getelementptr inbounds %struct.vec* %c, i64 0, i32 0
%add.ptr = getelementptr inbounds <4 x i32>* %y, i64 -12
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr)
ret void
}
; test17b: Addr-of a vector nested in a struct
; ssp attribute
; Requires no protector.
-define void @test17b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test17b() #0 {
entry:
; LINUX-I386-LABEL: test17b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -1985,14 +2034,15 @@ entry:
%c = alloca %struct.vec, align 16
%y = getelementptr inbounds %struct.vec* %c, i64 0, i32 0
%add.ptr = getelementptr inbounds <4 x i32>* %y, i64 -12
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr)
ret void
}
; test17c: Addr-of a vector nested in a struct
; sspstrong attribute
; Requires protector.
-define void @test17c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test17c() #1 {
entry:
; LINUX-I386-LABEL: test17c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2012,14 +2062,15 @@ entry:
%c = alloca %struct.vec, align 16
%y = getelementptr inbounds %struct.vec* %c, i64 0, i32 0
%add.ptr = getelementptr inbounds <4 x i32>* %y, i64 -12
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr)
ret void
}
; test17d: Addr-of a vector nested in a struct
; sspreq attribute
; Requires protector.
-define void @test17d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test17d() #2 {
entry:
; LINUX-I386-LABEL: test17d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2039,14 +2090,14 @@ entry:
%c = alloca %struct.vec, align 16
%y = getelementptr inbounds %struct.vec* %c, i64 0, i32 0
%add.ptr = getelementptr inbounds <4 x i32>* %y, i64 -12
- %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr) nounwind
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr)
ret void
}
; test18a: Addr-of a variable passed into an invoke instruction.
; no ssp attribute
; Requires no protector.
-define i32 @test18a() uwtable {
+define i32 @test18a() {
entry:
; LINUX-I386-LABEL: test18a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2082,7 +2133,8 @@ lpad:
; test18b: Addr-of a variable passed into an invoke instruction.
; ssp attribute
; Requires no protector.
-define i32 @test18b() uwtable ssp {
+; Function Attrs: ssp
+define i32 @test18b() #0 {
entry:
; LINUX-I386-LABEL: test18b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2118,7 +2170,8 @@ lpad:
; test18c: Addr-of a variable passed into an invoke instruction.
; sspstrong attribute
; Requires protector.
-define i32 @test18c() uwtable sspstrong {
+; Function Attrs: sspstrong
+define i32 @test18c() #1 {
entry:
; LINUX-I386-LABEL: test18c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2154,7 +2207,8 @@ lpad:
; test18d: Addr-of a variable passed into an invoke instruction.
; sspreq attribute
; Requires protector.
-define i32 @test18d() uwtable sspreq {
+; Function Attrs: sspreq
+define i32 @test18d() #2 {
entry:
; LINUX-I386-LABEL: test18d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2186,12 +2240,11 @@ lpad:
catch i8* null
ret i32 0
}
-
; test19a: Addr-of a struct element passed into an invoke instruction.
; (GEP followed by an invoke)
; no ssp attribute
; Requires no protector.
-define i32 @test19a() uwtable {
+define i32 @test19a() {
entry:
; LINUX-I386-LABEL: test19a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2230,7 +2283,8 @@ lpad:
; (GEP followed by an invoke)
; ssp attribute
; Requires no protector.
-define i32 @test19b() uwtable ssp {
+; Function Attrs: ssp
+define i32 @test19b() #0 {
entry:
; LINUX-I386-LABEL: test19b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2269,7 +2323,8 @@ lpad:
; (GEP followed by an invoke)
; sspstrong attribute
; Requires protector.
-define i32 @test19c() uwtable sspstrong {
+; Function Attrs: sspstrong
+define i32 @test19c() #1 {
entry:
; LINUX-I386-LABEL: test19c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2308,7 +2363,8 @@ lpad:
; (GEP followed by an invoke)
; sspreq attribute
; Requires protector.
-define i32 @test19d() uwtable sspreq {
+; Function Attrs: sspreq
+define i32 @test19d() #2 {
entry:
; LINUX-I386-LABEL: test19d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2350,7 +2406,7 @@ lpad:
; test20a: Addr-of a pointer
; no ssp attribute
; Requires no protector.
-define void @test20a() nounwind uwtable {
+define void @test20a() {
entry:
; LINUX-I386-LABEL: test20a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2380,7 +2436,8 @@ entry:
; test20b: Addr-of a pointer
; ssp attribute
; Requires no protector.
-define void @test20b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test20b() #0 {
entry:
; LINUX-I386-LABEL: test20b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2410,7 +2467,8 @@ entry:
; test20c: Addr-of a pointer
; sspstrong attribute
; Requires protector.
-define void @test20c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test20c() #1 {
entry:
; LINUX-I386-LABEL: test20c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2440,7 +2498,8 @@ entry:
; test20d: Addr-of a pointer
; sspreq attribute
; Requires protector.
-define void @test20d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test20d() #2 {
entry:
; LINUX-I386-LABEL: test20d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2470,7 +2529,7 @@ entry:
; test21a: Addr-of a casted pointer
; no ssp attribute
; Requires no protector.
-define void @test21a() nounwind uwtable {
+define void @test21a() {
entry:
; LINUX-I386-LABEL: test21a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2501,7 +2560,8 @@ entry:
; test21b: Addr-of a casted pointer
; ssp attribute
; Requires no protector.
-define void @test21b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test21b() #0 {
entry:
; LINUX-I386-LABEL: test21b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2532,7 +2592,8 @@ entry:
; test21c: Addr-of a casted pointer
; sspstrong attribute
; Requires protector.
-define void @test21c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test21c() #1 {
entry:
; LINUX-I386-LABEL: test21c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2563,7 +2624,8 @@ entry:
; test21d: Addr-of a casted pointer
; sspreq attribute
; Requires protector.
-define void @test21d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test21d() #2 {
entry:
; LINUX-I386-LABEL: test21d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2594,7 +2656,7 @@ entry:
; test22a: [2 x i8] in a class
; no ssp attribute
; Requires no protector.
-define signext i8 @test22a() nounwind uwtable {
+define signext i8 @test22a() {
entry:
; LINUX-I386-LABEL: test22a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2621,7 +2683,8 @@ entry:
; test22b: [2 x i8] in a class
; ssp attribute
; Requires no protector.
-define signext i8 @test22b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define signext i8 @test22b() #0 {
entry:
; LINUX-I386-LABEL: test22b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2648,7 +2711,8 @@ entry:
; test22c: [2 x i8] in a class
; sspstrong attribute
; Requires protector.
-define signext i8 @test22c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define signext i8 @test22c() #1 {
entry:
; LINUX-I386-LABEL: test22c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2675,7 +2739,8 @@ entry:
; test22d: [2 x i8] in a class
; sspreq attribute
; Requires protector.
-define signext i8 @test22d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define signext i8 @test22d() #2 {
entry:
; LINUX-I386-LABEL: test22d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2702,7 +2767,7 @@ entry:
; test23a: [2 x i8] nested in several layers of structs and unions
; no ssp attribute
; Requires no protector.
-define signext i8 @test23a() nounwind uwtable {
+define signext i8 @test23a() {
entry:
; LINUX-I386-LABEL: test23a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2733,7 +2798,8 @@ entry:
; test23b: [2 x i8] nested in several layers of structs and unions
; ssp attribute
; Requires no protector.
-define signext i8 @test23b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define signext i8 @test23b() #0 {
entry:
; LINUX-I386-LABEL: test23b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2764,7 +2830,8 @@ entry:
; test23c: [2 x i8] nested in several layers of structs and unions
; sspstrong attribute
; Requires protector.
-define signext i8 @test23c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define signext i8 @test23c() #1 {
entry:
; LINUX-I386-LABEL: test23c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2795,7 +2862,8 @@ entry:
; test23d: [2 x i8] nested in several layers of structs and unions
; sspreq attribute
; Requires protector.
-define signext i8 @test23d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define signext i8 @test23d() #2 {
entry:
; LINUX-I386-LABEL: test23d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2826,7 +2894,7 @@ entry:
; test24a: Variable sized alloca
; no ssp attribute
; Requires no protector.
-define void @test24a(i32 %n) nounwind uwtable {
+define void @test24a(i32 %n) {
entry:
; LINUX-I386-LABEL: test24a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2857,7 +2925,8 @@ entry:
; test24b: Variable sized alloca
; ssp attribute
; Requires protector.
-define void @test24b(i32 %n) nounwind uwtable ssp {
+; Function Attrs: ssp
+define void @test24b(i32 %n) #0 {
entry:
; LINUX-I386-LABEL: test24b:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2888,7 +2957,8 @@ entry:
; test24c: Variable sized alloca
; sspstrong attribute
; Requires protector.
-define void @test24c(i32 %n) nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test24c(i32 %n) #1 {
entry:
; LINUX-I386-LABEL: test24c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2919,7 +2989,8 @@ entry:
; test24d: Variable sized alloca
; sspreq attribute
; Requires protector.
-define void @test24d(i32 %n) nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define void @test24d(i32 %n) #2 {
entry:
; LINUX-I386-LABEL: test24d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -2950,7 +3021,7 @@ entry:
; test25a: array of [4 x i32]
; no ssp attribute
; Requires no protector.
-define i32 @test25a() nounwind uwtable {
+define i32 @test25a() {
entry:
; LINUX-I386-LABEL: test25a:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -2976,7 +3047,8 @@ entry:
; test25b: array of [4 x i32]
; ssp attribute
; Requires no protector, except for Darwin which _does_ require a protector.
-define i32 @test25b() nounwind uwtable ssp {
+; Function Attrs: ssp
+define i32 @test25b() #0 {
entry:
; LINUX-I386-LABEL: test25b:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -3002,7 +3074,8 @@ entry:
; test25c: array of [4 x i32]
; sspstrong attribute
; Requires protector.
-define i32 @test25c() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define i32 @test25c() #1 {
entry:
; LINUX-I386-LABEL: test25c:
; LINUX-I386: mov{{l|q}} %gs:
@@ -3028,7 +3101,8 @@ entry:
; test25d: array of [4 x i32]
; sspreq attribute
; Requires protector.
-define i32 @test25d() nounwind uwtable sspreq {
+; Function Attrs: sspreq
+define i32 @test25d() #2 {
entry:
; LINUX-I386-LABEL: test25d:
; LINUX-I386: mov{{l|q}} %gs:
@@ -3056,7 +3130,8 @@ entry:
; a stack protector.
; ssptrong attribute
; Requires no protector.
-define void @test26() nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define void @test26() #1 {
entry:
; LINUX-I386-LABEL: test26:
; LINUX-I386-NOT: calll __stack_chk_fail
@@ -3087,7 +3162,8 @@ entry:
; Verify that the address-of analysis does not get stuck in infinite
; recursion when chasing the alloca through the PHI nodes.
; Requires protector.
-define i32 @test27(i32 %arg) nounwind uwtable sspstrong {
+; Function Attrs: sspstrong
+define i32 @test27(i32 %arg) #1 {
bb:
; LINUX-I386-LABEL: test27:
; LINUX-I386: mov{{l|q}} %gs:
@@ -3105,7 +3181,7 @@ bb:
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%tmp = alloca %struct.small*, align 8
- %tmp1 = call i32 (...)* @dummy(%struct.small** %tmp) nounwind
+ %tmp1 = call i32 (...)* @dummy(%struct.small** %tmp)
%tmp2 = load %struct.small** %tmp, align 8
%tmp3 = ptrtoint %struct.small* %tmp2 to i64
%tmp4 = trunc i64 %tmp3 to i32
@@ -3133,10 +3209,239 @@ bb17: ; preds = %bb6
bb21: ; preds = %bb6, %bb
%tmp22 = phi i32 [ %tmp1, %bb ], [ %tmp14, %bb6 ]
- %tmp23 = call i32 (...)* @dummy(i32 %tmp22) nounwind
+ %tmp23 = call i32 (...)* @dummy(i32 %tmp22)
ret i32 undef
}
+; test28a: An array of [32 x i8] and a requested ssp-buffer-size of 33.
+; Requires no protector.
+; Function Attrs: ssp stack-protector-buffer-size=33
+define i32 @test28a() #3 {
+entry:
+; LINUX-I386-LABEL: test28a:
+; LINUX-I386-NOT: calll __stack_chk_fail
+; LINUX-I386: .cfi_endproc
+
+; LINUX-X64-LABEL: test28a:
+; LINUX-X64-NOT: callq __stack_chk_fail
+; LINUX-X64: .cfi_endproc
+
+; LINUX-KERNEL-X64-LABEL: test28a:
+; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail
+; LINUX-KERNEL-X64: .cfi_endproc
+
+; DARWIN-X64-LABEL: test28a:
+; DARWIN-X64-NOT: callq ___stack_chk_fail
+; DARWIN-X64: .cfi_endproc
+ %test = alloca [32 x i8], align 16
+ %arraydecay = getelementptr inbounds [32 x i8]* %test, i32 0, i32 0
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay)
+ ret i32 %call
+}
+
+; test28b: An array of [33 x i8] and a requested ssp-buffer-size of 33.
+; Requires protector.
+; Function Attrs: ssp stack-protector-buffer-size=33
+define i32 @test28b() #3 {
+entry:
+; LINUX-I386-LABEL: test28b:
+; LINUX-I386: mov{{l|q}} %gs:
+; LINUX-I386: calll __stack_chk_fail
+
+; LINUX-X64-LABEL: test28b:
+; LINUX-X64: mov{{l|q}} %fs:
+; LINUX-X64: callq __stack_chk_fail
+
+; LINUX-KERNEL-X64-LABEL: test28b:
+; LINUX-KERNEL-X64: mov{{l|q}} %gs:
+; LINUX-KERNEL-X64: callq __stack_chk_fail
+
+; DARWIN-X64-LABEL: test28b:
+; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
+; DARWIN-X64: callq ___stack_chk_fail
+ %test = alloca [33 x i8], align 16
+ %arraydecay = getelementptr inbounds [33 x i8]* %test, i32 0, i32 0
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay)
+ ret i32 %call
+}
+
+; test29a: An array of [4 x i8] and a requested ssp-buffer-size of 5.
+; Requires no protector.
+; Function Attrs: ssp stack-protector-buffer-size=5
+define i32 @test29a() #4 {
+entry:
+; LINUX-I386-LABEL: test29a:
+; LINUX-I386-NOT: calll __stack_chk_fail
+; LINUX-I386: .cfi_endproc
+
+; LINUX-X64-LABEL: test29a:
+; LINUX-X64-NOT: callq __stack_chk_fail
+; LINUX-X64: .cfi_endproc
+
+; LINUX-KERNEL-X64-LABEL: test29a:
+; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail
+; LINUX-KERNEL-X64: .cfi_endproc
+
+; DARWIN-X64-LABEL: test29a:
+; DARWIN-X64-NOT: callq ___stack_chk_fail
+; DARWIN-X64: .cfi_endproc
+ %test = alloca [4 x i8], align 1
+ %arraydecay = getelementptr inbounds [4 x i8]* %test, i32 0, i32 0
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay)
+ ret i32 %call
+}
+
+; test29b: An array of [5 x i8] and a requested ssp-buffer-size of 5.
+; Requires protector.
+; Function Attrs: ssp stack-protector-buffer-size=5
+define i32 @test29b() #4 {
+entry:
+; LINUX-I386-LABEL: test29b:
+; LINUX-I386: mov{{l|q}} %gs:
+; LINUX-I386: calll __stack_chk_fail
+
+; LINUX-X64-LABEL: test29b:
+; LINUX-X64: mov{{l|q}} %fs:
+; LINUX-X64: callq __stack_chk_fail
+
+; LINUX-KERNEL-X64-LABEL: test29b:
+; LINUX-KERNEL-X64: mov{{l|q}} %gs:
+; LINUX-KERNEL-X64: callq __stack_chk_fail
+
+; DARWIN-X64-LABEL: test29b:
+; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
+; DARWIN-X64: callq ___stack_chk_fail
+ %test = alloca [5 x i8], align 1
+ %arraydecay = getelementptr inbounds [5 x i8]* %test, i32 0, i32 0
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay)
+ ret i32 %call
+}
+
+; test30a: An structure containing an i32 and an array of [5 x i8].
+; Requested ssp-buffer-size of 6.
+; Requires no protector.
+; Function Attrs: ssp stack-protector-buffer-size=6
+define i32 @test30a() #5 {
+entry:
+; LINUX-I386-LABEL: test30a:
+; LINUX-I386-NOT: calll __stack_chk_fail
+; LINUX-I386: .cfi_endproc
+
+; LINUX-X64-LABEL: test30a:
+; LINUX-X64-NOT: callq __stack_chk_fail
+; LINUX-X64: .cfi_endproc
+
+; LINUX-KERNEL-X64-LABEL: test30a:
+; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail
+; LINUX-KERNEL-X64: .cfi_endproc
+
+; DARWIN-X64-LABEL: test30a:
+; DARWIN-X64-NOT: callq ___stack_chk_fail
+; DARWIN-X64: .cfi_endproc
+ %test = alloca %struct.small_char, align 4
+ %test.coerce = alloca { i64, i8 }
+ %0 = bitcast { i64, i8 }* %test.coerce to i8*
+ %1 = bitcast %struct.small_char* %test to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false)
+ %2 = getelementptr { i64, i8 }* %test.coerce, i32 0, i32 0
+ %3 = load i64* %2, align 1
+ %4 = getelementptr { i64, i8 }* %test.coerce, i32 0, i32 1
+ %5 = load i8* %4, align 1
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %3, i8 %5)
+ ret i32 %call
+}
+
+; test30b: An structure containing an i32 and an array of [5 x i8].
+; Requested ssp-buffer-size of 5.
+; Requires protector.
+; Function Attrs: ssp stack-protector-buffer-size=5
+define i32 @test30b() #4 {
+entry:
+; LINUX-I386-LABEL: test30b:
+; LINUX-I386: mov{{l|q}} %gs:
+; LINUX-I386: calll __stack_chk_fail
+
+; LINUX-X64-LABEL: test30b:
+; LINUX-X64: mov{{l|q}} %fs:
+; LINUX-X64: callq __stack_chk_fail
+
+; LINUX-KERNEL-X64-LABEL: test30b:
+; LINUX-KERNEL-X64: mov{{l|q}} %gs:
+; LINUX-KERNEL-X64: callq __stack_chk_fail
+
+; DARWIN-X64-LABEL: test30b:
+; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
+; DARWIN-X64: callq ___stack_chk_fail
+ %test = alloca %struct.small_char, align 4
+ %test.coerce = alloca { i64, i8 }
+ %0 = bitcast { i64, i8 }* %test.coerce to i8*
+ %1 = bitcast %struct.small_char* %test to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false)
+ %2 = getelementptr { i64, i8 }* %test.coerce, i32 0, i32 0
+ %3 = load i64* %2, align 1
+ %4 = getelementptr { i64, i8 }* %test.coerce, i32 0, i32 1
+ %5 = load i8* %4, align 1
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %3, i8 %5)
+ ret i32 %call
+}
+
+; test31a: An alloca of size 5.
+; Requested ssp-buffer-size of 6.
+; Requires no protector.
+; Function Attrs: ssp stack-protector-buffer-size=6
+define i32 @test31a() #5 {
+entry:
+; LINUX-I386-LABEL: test31a:
+; LINUX-I386-NOT: calll __stack_chk_fail
+; LINUX-I386: .cfi_endproc
+
+; LINUX-X64-LABEL: test31a:
+; LINUX-X64-NOT: callq __stack_chk_fail
+; LINUX-X64: .cfi_endproc
+
+; LINUX-KERNEL-X64-LABEL: test31a:
+; LINUX-KERNEL-X64-NOT: callq __stack_chk_fail
+; LINUX-KERNEL-X64: .cfi_endproc
+
+; DARWIN-X64-LABEL: test31a:
+; DARWIN-X64-NOT: callq ___stack_chk_fail
+; DARWIN-X64: .cfi_endproc
+ %test = alloca i8*, align 8
+ %0 = alloca i8, i64 4
+ store i8* %0, i8** %test, align 8
+ %1 = load i8** %test, align 8
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %1)
+ ret i32 %call
+}
+
+; test31b: An alloca of size 5.
+; Requested ssp-buffer-size of 5.
+; Requires protector.
+define i32 @test31b() #4 {
+entry:
+; LINUX-I386-LABEL: test31b:
+; LINUX-I386: mov{{l|q}} %gs:
+; LINUX-I386: calll __stack_chk_fail
+
+; LINUX-X64-LABEL: test31b:
+; LINUX-X64: mov{{l|q}} %fs:
+; LINUX-X64: callq __stack_chk_fail
+
+; LINUX-KERNEL-X64-LABEL: test31b:
+; LINUX-KERNEL-X64: mov{{l|q}} %gs:
+; LINUX-KERNEL-X64: callq __stack_chk_fail
+
+; DARWIN-X64-LABEL: test31b:
+; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
+; DARWIN-X64: callq ___stack_chk_fail
+ %test = alloca i8*, align 8
+ %0 = alloca i8, i64 5
+ store i8* %0, i8** %test, align 8
+ %1 = load i8** %test, align 8
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %1)
+ ret i32 %call
+}
+
declare double @testi_aux()
declare i8* @strcpy(i8*, i8*)
declare i32 @printf(i8*, ...)
@@ -3148,3 +3453,11 @@ declare void @_Z3exceptPi(i32*)
declare i32 @__gxx_personality_v0(...)
declare i32* @getp()
declare i32 @dummy(...)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+
+attributes #0 = { ssp }
+attributes #1 = { sspstrong }
+attributes #2 = { sspreq }
+attributes #3 = { ssp "stack-protector-buffer-size"="33" }
+attributes #4 = { ssp "stack-protector-buffer-size"="5" }
+attributes #5 = { ssp "stack-protector-buffer-size"="6" }
diff --git a/test/CodeGen/X86/stackmap-fast-isel.ll b/test/CodeGen/X86/stackmap-fast-isel.ll
new file mode 100644
index 000000000000..0b7e6dbdc7a2
--- /dev/null
+++ b/test/CodeGen/X86/stackmap-fast-isel.ll
@@ -0,0 +1,165 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim -fast-isel -fast-isel-abort | FileCheck %s
+
+; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 4
+; Num LargeConstants
+; CHECK-NEXT: .long 3
+; Num Callsites
+; CHECK-NEXT: .long 7
+
+; Functions and stack size
+; CHECK-NEXT: .quad _constantargs
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _liveConstant
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _directFrameIdx
+; CHECK-NEXT: .quad 40
+; CHECK-NEXT: .quad _longid
+; CHECK-NEXT: .quad 8
+
+; Large Constants
+; CHECK-NEXT: .quad 2147483648
+; CHECK-NEXT: .quad 4294967295
+; CHECK-NEXT: .quad 4294967296
+
+; Callsites
+; Constant arguments
+;
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .long L{{.*}}-_constantargs
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 12
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 65536
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2000000000
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2147483647
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+; LargeConstant at index 0
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+; LargeConstant at index 1
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 1
+; LargeConstant at index 2
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+
+define void @constantargs() {
+entry:
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 15, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1)
+ ret void
+}
+
+; Map a constant value.
+;
+; CHECK-LABEL: .long L{{.*}}-_liveConstant
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 33
+
+define void @liveConstant() {
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 5, i32 33)
+ ret void
+}
+
+; Directly map an alloca's address.
+;
+; Callsite 16
+; CHECK-LABEL: .long L{{.*}}-_directFrameIdx
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Direct RBP - ofs
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
+
+define void @directFrameIdx() {
+entry:
+ %metadata1 = alloca i64, i32 3, align 8
+ store i64 11, i64* %metadata1
+ store i64 12, i64* %metadata1
+ store i64 13, i64* %metadata1
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1)
+ ret void
+}
+
+; Test a 64-bit ID.
+;
+; CHECK: .quad 4294967295
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad 4294967296
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad 9223372036854775807
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad -1
+; CHECK-LABEL: .long L{{.*}}-_longid
+define void @longid() {
+entry:
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4294967295, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4294967296, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 9223372036854775807, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 -1, i32 0)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
diff --git a/test/CodeGen/X86/stackmap-liveness.ll b/test/CodeGen/X86/stackmap-liveness.ll
new file mode 100644
index 000000000000..897595db2438
--- /dev/null
+++ b/test/CodeGen/X86/stackmap-liveness.ll
@@ -0,0 +1,176 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim -enable-patchpoint-liveness=false | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim | FileCheck -check-prefix=PATCH %s
+;
+; Note: Print verbose stackmaps using -debug-only=stackmaps.
+
+; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 2
+; Num LargeConstants
+; CHECK-NEXT: .long 0
+; Num Callsites
+; CHECK-NEXT: .long 5
+
+; Functions and stack size
+; CHECK-NEXT: .quad _stackmap_liveness
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _mixed_liveness
+; CHECK-NEXT: .quad 8
+
+define void @stackmap_liveness() {
+entry:
+ %a1 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
+; StackMap 1 (no liveness information available)
+; CHECK-LABEL: .long L{{.*}}-_stackmap_liveness
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 0
+; Padding
+; CHECK-NEXT: .short 0
+; Num LiveOut Entries: 0
+; CHECK-NEXT: .short 0
+; Align
+; CHECK-NEXT: .align 3
+
+; StackMap 1 (patchpoint liveness information enabled)
+; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
+; PATCH-NEXT: .short 0
+; PATCH-NEXT: .short 0
+; Padding
+; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 1
+; PATCH-NEXT: .short 1
+; LiveOut Entry 1: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
+; Align
+; PATCH-NEXT: .align 3
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 12, i8* null, i32 0)
+ %a2 = call i64 asm sideeffect "", "={r8}"() nounwind
+ %a3 = call i8 asm sideeffect "", "={ah}"() nounwind
+ %a4 = call <4 x double> asm sideeffect "", "={ymm0}"() nounwind
+ %a5 = call <4 x double> asm sideeffect "", "={ymm1}"() nounwind
+
+; StackMap 2 (no liveness information available)
+; CHECK-LABEL: .long L{{.*}}-_stackmap_liveness
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 0
+; Padding
+; CHECK-NEXT: .short 0
+; Num LiveOut Entries: 0
+; CHECK-NEXT: .short 0
+; Align
+; CHECK-NEXT: .align 3
+
+; StackMap 2 (patchpoint liveness information enabled)
+; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
+; PATCH-NEXT: .short 0
+; PATCH-NEXT: .short 0
+; Padding
+; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 5
+; PATCH-NEXT: .short 5
+; LiveOut Entry 1: %RAX (1 bytes) --> %AL or %AH
+; PATCH-NEXT: .short 0
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 1
+; LiveOut Entry 2: %R8 (8 bytes)
+; PATCH-NEXT: .short 8
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 8
+; LiveOut Entry 3: %YMM0 (32 bytes)
+; PATCH-NEXT: .short 17
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 32
+; LiveOut Entry 4: %YMM1 (32 bytes)
+; PATCH-NEXT: .short 18
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 32
+; LiveOut Entry 5: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
+; Align
+; PATCH-NEXT: .align 3
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 2, i32 12, i8* null, i32 0)
+ call void asm sideeffect "", "{r8},{ah},{ymm0},{ymm1}"(i64 %a2, i8 %a3, <4 x double> %a4, <4 x double> %a5) nounwind
+
+; StackMap 3 (no liveness information available)
+; CHECK-LABEL: .long L{{.*}}-_stackmap_liveness
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 0
+; Padding
+; CHECK-NEXT: .short 0
+; Num LiveOut Entries: 0
+; CHECK-NEXT: .short 0
+; Align
+; CHECK-NEXT: .align 3
+
+; StackMap 3 (patchpoint liveness information enabled)
+; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
+; PATCH-NEXT: .short 0
+; PATCH-NEXT: .short 0
+; Padding
+; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 2
+; PATCH-NEXT: .short 2
+; LiveOut Entry 1: %RSP (8 bytes)
+; PATCH-NEXT: .short 7
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 8
+; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
+; Align
+; PATCH-NEXT: .align 3
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 12, i8* null, i32 0)
+ call void asm sideeffect "", "{xmm2}"(<2 x double> %a1) nounwind
+ ret void
+}
+
+define void @mixed_liveness() {
+entry:
+ %a1 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
+; StackMap 4 (patchpoint liveness information enabled)
+; PATCH-LABEL: .long L{{.*}}-_mixed_liveness
+; PATCH-NEXT: .short 0
+; PATCH-NEXT: .short 0
+; Padding
+; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 0
+; PATCH-NEXT: .short 0
+; Align
+; PATCH-NEXT: .align 3
+
+; StackMap 5 (patchpoint liveness information enabled)
+; PATCH-LABEL: .long L{{.*}}-_mixed_liveness
+; PATCH-NEXT: .short 0
+; PATCH-NEXT: .short 0
+; Padding
+; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 2
+; PATCH-NEXT: .short 2
+; LiveOut Entry 1: %RSP (8 bytes)
+; PATCH-NEXT: .short 7
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 8
+; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
+; Align
+; PATCH-NEXT: .align 3
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 5)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 5, i32 0, i8* null, i32 0)
+ call void asm sideeffect "", "{xmm2}"(<2 x double> %a1) nounwind
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/X86/stackmap-nops.ll b/test/CodeGen/X86/stackmap-nops.ll
new file mode 100644
index 000000000000..5a78f24d7b5e
--- /dev/null
+++ b/test/CodeGen/X86/stackmap-nops.ll
@@ -0,0 +1,230 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim | FileCheck %s
+
+define void @nop_test() {
+entry:
+; CHECK-LABEL: nop_test:
+; CHECK: nop
+; CHECK: xchgw %ax, %ax
+; CHECK: nopl (%rax)
+; CHECK: nopl 8(%rax)
+; CHECK: nopl 8(%rax,%rax)
+; CHECK: nopw 8(%rax,%rax)
+; CHECK: nopl 512(%rax)
+; CHECK: nopl 512(%rax,%rax)
+; CHECK: nopw 512(%rax,%rax)
+; CHECK: nopw %cs:512(%rax,%rax)
+
+; 11
+; CHECK: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 12
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 13
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 14
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 15
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 16
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nop
+
+; 17
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: xchgw %ax, %ax
+
+; 18
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nopl (%rax)
+
+; 19
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nopl 8(%rax)
+
+; 20
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nopl 8(%rax,%rax)
+
+; 21
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nopw 8(%rax,%rax)
+
+; 22
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nopl 512(%rax)
+
+; 23
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nopl 512(%rax,%rax)
+
+; 24
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nopw 512(%rax,%rax)
+
+; 25
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 26
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 27
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 28
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+;29
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+
+; 30
+; CHECK: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: .byte 102
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 0, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 1)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 2, i32 2)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 3)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 4)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 5, i32 5)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 6, i32 6)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 7, i32 7)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 8, i32 8)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 9, i32 9)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 10, i32 10)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 11, i32 11)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 12, i32 12)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 13, i32 13)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 14)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 15)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 16)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 17)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 18, i32 18)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 19, i32 19)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 20, i32 20)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 21, i32 21)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 22, i32 22)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 23, i32 23)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 24, i32 24)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 25, i32 25)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 26, i32 26)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 27, i32 27)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 28, i32 28)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 29, i32 29)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 30, i32 30)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
diff --git a/test/CodeGen/X86/stackmap.ll b/test/CodeGen/X86/stackmap.ll
index ed9558302848..85670370d870 100644
--- a/test/CodeGen/X86/stackmap.ll
+++ b/test/CodeGen/X86/stackmap.ll
@@ -1,27 +1,74 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim | FileCheck %s
;
; Note: Print verbose stackmaps using -debug-only=stackmaps.
; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
-; CHECK-NEXT: .long 0
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 15
; Num LargeConstants
-; CHECK-NEXT: .long 1
-; CHECK-NEXT: .quad 4294967296
+; CHECK-NEXT: .long 3
; Num Callsites
-; CHECK-NEXT: .long 11
+; CHECK-NEXT: .long 19
+
+; Functions and stack size
+; CHECK-NEXT: .quad _constantargs
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _osrinline
+; CHECK-NEXT: .quad 24
+; CHECK-NEXT: .quad _osrcold
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _propertyRead
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _propertyWrite
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _jsVoidCall
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _jsIntCall
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _spilledValue
+; CHECK-NEXT: .quad 56
+; CHECK-NEXT: .quad _spilledStackMapValue
+; CHECK-NEXT: .quad 56
+; CHECK-NEXT: .quad _spillSubReg
+; CHECK-NEXT: .quad 56
+; CHECK-NEXT: .quad _subRegOffset
+; CHECK-NEXT: .quad 56
+; CHECK-NEXT: .quad _liveConstant
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _directFrameIdx
+; CHECK-NEXT: .quad 56
+; CHECK-NEXT: .quad _longid
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _clobberScratch
+; CHECK-NEXT: .quad 56
+
+; Large Constants
+; CHECK-NEXT: .quad 2147483648
+; CHECK-NEXT: .quad 4294967295
+; CHECK-NEXT: .quad 4294967296
+; Callsites
; Constant arguments
;
-; CHECK-NEXT: .long 1
+; CHECK-NEXT: .quad 1
; CHECK-NEXT: .long L{{.*}}-_constantargs
; CHECK-NEXT: .short 0
-; CHECK-NEXT: .short 4
+; CHECK-NEXT: .short 12
; SmallConstant
; CHECK-NEXT: .byte 4
; CHECK-NEXT: .byte 8
; CHECK-NEXT: .short 0
-; CHECK-NEXT: .long 65535
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
; SmallConstant
; CHECK-NEXT: .byte 4
; CHECK-NEXT: .byte 8
@@ -31,24 +78,58 @@
; CHECK-NEXT: .byte 4
; CHECK-NEXT: .byte 8
; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2000000000
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2147483647
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
; LargeConstant at index 0
; CHECK-NEXT: .byte 5
; CHECK-NEXT: .byte 8
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
+; LargeConstant at index 1
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 1
+; LargeConstant at index 2
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
define void @constantargs() {
entry:
%0 = inttoptr i64 12345 to i8*
- tail call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 1, i32 15, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 15, i8* %0, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1)
ret void
}
; Inline OSR Exit
;
-; CHECK-NEXT: .long 3
-; CHECK-NEXT: .long L{{.*}}-_osrinline
+; CHECK-LABEL: .long L{{.*}}-_osrinline
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -64,7 +145,7 @@ entry:
; Runtime void->void call.
call void inttoptr (i64 -559038737 to void ()*)()
; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
- call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 3, i32 12, i64 %a, i64 %b)
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
ret void
}
@@ -72,8 +153,7 @@ entry:
;
; 2 live variables in register.
;
-; CHECK-NEXT: .long 4
-; CHECK-NEXT: .long L{{.*}}-_osrcold
+; CHECK-LABEL: .long L{{.*}}-_osrcold
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -83,7 +163,7 @@ entry:
; CHECK-NEXT: .byte 1
; CHECK-NEXT: .byte 8
; CHECK-NEXT: .short {{[0-9]+}}
-; CHECK-NEXT: .long 0
+; CHECK-NEXT: .long 0
define void @osrcold(i64 %a, i64 %b) {
entry:
%test = icmp slt i64 %a, %b
@@ -91,40 +171,48 @@ entry:
cold:
; OSR patchpoint with 12-byte nop-slide and 2 live vars.
%thunk = inttoptr i64 -559038737 to i8*
- call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 4, i32 15, i8* %thunk, i32 0, i64 %a, i64 %b)
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4, i32 15, i8* %thunk, i32 0, i64 %a, i64 %b)
unreachable
ret:
ret void
}
; Property Read
-; CHECK-NEXT: .long 5
-; CHECK-NEXT: .long L{{.*}}-_propertyRead
-; CHECK-NEXT: .short 0
-; CHECK-NEXT: .short 0
-;
-; FIXME: There are currently no stackmap entries. After moving to
-; AnyRegCC, we will have entries for the object and return value.
+; CHECK-LABEL: .long L{{.*}}-_propertyRead
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
define i64 @propertyRead(i64* %obj) {
entry:
%resolveRead = inttoptr i64 -559038737 to i8*
- %result = call i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 5, i32 15, i8* %resolveRead, i32 1, i64* %obj)
+ %result = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 15, i8* %resolveRead, i32 1, i64* %obj)
%add = add i64 %result, 3
ret i64 %add
}
; Property Write
-; CHECK-NEXT: .long 6
-; CHECK-NEXT: .long L{{.*}}-_propertyWrite
-; CHECK-NEXT: .short 0
-; CHECK-NEXT: .short 0
-;
-; FIXME: There are currently no stackmap entries. After moving to
-; AnyRegCC, we will have entries for the object and return value.
+; CHECK-LABEL: .long L{{.*}}-_propertyWrite
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
entry:
%resolveWrite = inttoptr i64 -559038737 to i8*
- call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 6, i32 15, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 15, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
ret void
}
@@ -132,8 +220,7 @@ entry:
;
; 2 live variables in registers.
;
-; CHECK-NEXT: .long 7
-; CHECK-NEXT: .long L{{.*}}-_jsVoidCall
+; CHECK-LABEL: .long L{{.*}}-_jsVoidCall
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -147,7 +234,7 @@ entry:
define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
%resolveCall = inttoptr i64 -559038737 to i8*
- call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 7, i32 15, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 7, i32 15, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
ret void
}
@@ -155,8 +242,7 @@ entry:
;
; 2 live variables in registers.
;
-; CHECK: .long 8
-; CHECK-NEXT: .long L{{.*}}-_jsIntCall
+; CHECK-LABEL: .long L{{.*}}-_jsIntCall
; CHECK-NEXT: .short 0
; CHECK-NEXT: .short 2
; CHECK-NEXT: .byte 1
@@ -170,7 +256,7 @@ entry:
define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
%resolveCall = inttoptr i64 -559038737 to i8*
- %result = call i64 (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i32 8, i32 15, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 8, i32 15, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
%add = add i64 %result, 3
ret i64 %add
}
@@ -179,19 +265,18 @@ entry:
;
; Verify 17 stack map entries.
;
-; CHECK: .long 11
-; CHECK-NEXT: .long L{{.*}}-_spilledValue
-; CHECK-NEXT: .short 0
-; CHECK-NEXT: .short 17
+; CHECK-LABEL: .long L{{.*}}-_spilledValue
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 17
;
; Check that at least one is a spilled entry from RBP.
; Location: Indirect RBP + ...
-; CHECK: .byte 3
-; CHECK-NEXT: .byte 8
-; CHECK-NEXT: .short 6
+; CHECK: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 6
define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) {
entry:
- call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 11, i32 15, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 11, i32 15, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
ret void
}
@@ -199,35 +284,33 @@ entry:
;
; Verify 17 stack map entries.
;
-; CHECK: .long 12
-; CHECK-LABEL: .long L{{.*}}-_spilledStackMapValue
-; CHECK-NEXT: .short 0
-; CHECK-NEXT: .short 17
+; CHECK-LABEL: .long L{{.*}}-_spilledStackMapValue
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 17
;
; Check that at least one is a spilled entry from RBP.
; Location: Indirect RBP + ...
-; CHECK: .byte 3
-; CHECK-NEXT: .byte 8
-; CHECK-NEXT: .short 6
+; CHECK: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 6
define webkit_jscc void @spilledStackMapValue(i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) {
entry:
- call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 12, i32 15, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 12, i32 15, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
ret void
}
; Spill a subregister stackmap operand.
;
-; CHECK: .long 13
-; CHECK-LABEL: .long L{{.*}}-_spillSubReg
-; CHECK-NEXT: .short 0
+; CHECK-LABEL: .long L{{.*}}-_spillSubReg
+; CHECK-NEXT: .short 0
; 4 locations
-; CHECK-NEXT: .short 1
+; CHECK-NEXT: .short 1
;
; Check that the subregister operand is a 4-byte spill.
; Location: Indirect, 4-byte, RBP + ...
-; CHECK: .byte 3
-; CHECK-NEXT: .byte 4
-; CHECK-NEXT: .short 6
+; CHECK: .byte 3
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .short 6
define void @spillSubReg(i64 %arg) #0 {
bb:
br i1 undef, label %bb1, label %bb2
@@ -248,7 +331,7 @@ bb17:
bb60:
tail call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() nounwind
- tail call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 13, i32 5, i32 %tmp32)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 13, i32 5, i32 %tmp32)
unreachable
bb61:
@@ -258,24 +341,23 @@ bb61:
; Map a single byte subregister. There is no DWARF register number, so
; we expect the register to be encoded with the proper size and spill offset. We don't know which
;
-; CHECK: .long 14
-; CHECK-LABEL: .long L{{.*}}-_subRegOffset
-; CHECK-NEXT: .short 0
+; CHECK-LABEL: .long L{{.*}}-_subRegOffset
+; CHECK-NEXT: .short 0
; 2 locations
-; CHECK-NEXT: .short 2
+; CHECK-NEXT: .short 2
;
; Check that the subregister operands are 1-byte spills.
; Location 0: Register, 4-byte, AL
-; CHECK-NEXT: .byte 1
-; CHECK-NEXT: .byte 1
-; CHECK-NEXT: .short 0
-; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
;
; Location 1: Register, 4-byte, BL
-; CHECK-NEXT: .byte 1
-; CHECK-NEXT: .byte 1
-; CHECK-NEXT: .short 3
-; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .short 3
+; CHECK-NEXT: .long 0
define void @subRegOffset(i16 %arg) {
%v = mul i16 %arg, 5
%a0 = trunc i16 %v to i8
@@ -283,10 +365,105 @@ define void @subRegOffset(i16 %arg) {
%arghi = lshr i16 %v, 8
%a1 = trunc i16 %arghi to i8
tail call void asm sideeffect "nop", "~{cx},~{dx},~{bp},~{si},~{di},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() nounwind
- tail call void (i32, i32, ...)* @llvm.experimental.stackmap(i32 14, i32 5, i8 %a0, i8 %a1)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 5, i8 %a0, i8 %a1)
+ ret void
+}
+
+; Map a constant value.
+;
+; CHECK-LABEL: .long L{{.*}}-_liveConstant
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 33
+
+define void @liveConstant() {
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 5, i32 33)
+ ret void
+}
+
+; Directly map an alloca's address.
+;
+; Callsite 16
+; CHECK-LABEL: .long L{{.*}}-_directFrameIdx
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Direct RBP - ofs
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
+
+; Callsite 17
+; CHECK-LABEL: .long L{{.*}}-_directFrameIdx
+; CHECK-NEXT: .short 0
+; 2 locations
+; CHECK-NEXT: .short 2
+; Loc 0: Direct RBP - ofs
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
+; Loc 1: Direct RBP - ofs
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
+define void @directFrameIdx() {
+entry:
+ %metadata1 = alloca i64, i32 3, align 8
+ store i64 11, i64* %metadata1
+ store i64 12, i64* %metadata1
+ store i64 13, i64* %metadata1
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1)
+ %metadata2 = alloca i8, i32 4, align 8
+ %metadata3 = alloca i16, i32 4, align 8
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 17, i32 5, i8* null, i32 0, i8* %metadata2, i16* %metadata3)
+ ret void
+}
+
+; Test a 64-bit ID.
+;
+; CHECK: .quad 4294967295
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad 4294967296
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad 9223372036854775807
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad -1
+; CHECK-LABEL: .long L{{.*}}-_longid
+define void @longid() {
+entry:
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, i8* null, i32 0)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, i8* null, i32 0)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, i8* null, i32 0)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 -1, i32 0, i8* null, i32 0)
+ ret void
+}
+
+; Map a value when R11 is the only free register.
+; The scratch register should not be used for a live stackmap value.
+;
+; CHECK-LABEL: .long L{{.*}}-_clobberScratch
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Indirect fp - offset
+; CHECK-NEXT: .byte 3
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long -{{[0-9]+}}
+define void @clobberScratch(i32 %a) {
+ tail call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di},~{r8},~{r9},~{r10},~{r12},~{r13},~{r14},~{r15}"() nounwind
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 8, i32 %a)
ret void
}
-declare void @llvm.experimental.stackmap(i32, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i32, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i32, i32, i8*, i32, ...)
+declare void @llvm.experimental.stackmap(i64, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/X86/stackpointer.ll b/test/CodeGen/X86/stackpointer.ll
new file mode 100644
index 000000000000..80bcfbf16743
--- /dev/null
+++ b/test/CodeGen/X86/stackpointer.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux-gnueabi | FileCheck %s
+; RUN: opt < %s -O3 -S -mtriple=x86_64-linux-gnueabi | FileCheck %s --check-prefix=OPT
+
+define i64 @get_stack() nounwind {
+entry:
+; CHECK-LABEL: get_stack:
+; CHECK: movq %rsp, %rax
+ %sp = call i64 @llvm.read_register.i64(metadata !0)
+; OPT: @llvm.read_register.i64
+ ret i64 %sp
+}
+
+define void @set_stack(i64 %val) nounwind {
+entry:
+; CHECK-LABEL: set_stack:
+; CHECK: movq %rdi, %rsp
+ call void @llvm.write_register.i64(metadata !0, i64 %val)
+; OPT: @llvm.write_register.i64
+ ret void
+}
+
+declare i64 @llvm.read_register.i64(metadata) nounwind
+declare void @llvm.write_register.i64(metadata, i64) nounwind
+
+; register unsigned long current_stack_pointer asm("rsp");
+; CHECK-NOT: .asciz "rsp"
+!0 = metadata !{metadata !"rsp\00"}
diff --git a/test/CodeGen/X86/stdcall-notailcall.ll b/test/CodeGen/X86/stdcall-notailcall.ll
index 8f522cda284a..448db4cda17f 100644
--- a/test/CodeGen/X86/stdcall-notailcall.ll
+++ b/test/CodeGen/X86/stdcall-notailcall.ll
@@ -4,10 +4,18 @@
define x86_stdcallcc void @bar(%struct.I* nocapture %this) ssp align 2 {
; CHECK-LABEL: bar:
; CHECK-NOT: jmp
-; CHECK: ret $4
+; CHECK: retl $4
entry:
tail call void @foo()
ret void
}
+define x86_thiscallcc void @test2(%struct.I* %this, i32 %a) {
+; CHECK-LABEL: test2:
+; CHECK: calll _foo
+; CHECK: retl $4
+ tail call void @foo()
+ ret void
+}
+
declare void @foo()
diff --git a/test/CodeGen/X86/stdcall.ll b/test/CodeGen/X86/stdcall.ll
index 73826ed0b29d..3cefe14fe0d5 100644
--- a/test/CodeGen/X86/stdcall.ll
+++ b/test/CodeGen/X86/stdcall.ll
@@ -6,14 +6,14 @@
define internal x86_stdcallcc void @MyFunc() nounwind {
entry:
; CHECK: MyFunc@0:
-; CHECK: ret
+; CHECK: retl
ret void
}
; PR14410
define x86_stdcallcc i32 @"\01DoNotMangle"(i32 %a) {
; CHECK: DoNotMangle:
-; CHECK: ret $4
+; CHECK: retl $4
entry:
ret i32 %a
}
diff --git a/test/CodeGen/X86/sunkaddr-ext.ll b/test/CodeGen/X86/sunkaddr-ext.ll
new file mode 100644
index 000000000000..6d238678ce30
--- /dev/null
+++ b/test/CodeGen/X86/sunkaddr-ext.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s | FileCheck %s
+
+; Test to make sure that if math that can roll over has been used we don't
+; use the potential overflow as the basis for an address calculation later by
+; sinking it into a different basic block.
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Function Attrs: nounwind ssp uwtable
+define void @test_sink(i8* %arg1, i32 %arg2, i8 %arg3) #0 {
+ %tmp1 = add i32 -2147483648, %arg2
+ %tmp2 = add i32 -2147483648, %tmp1
+ %tmp3 = getelementptr i8* %arg1, i32 %arg2
+ br label %bb1
+
+bb1:
+ %tmp4 = getelementptr i8* %arg1, i32 %tmp2
+ store i8 %arg3, i8* %tmp4
+ ret void;
+}
+
+; CHECK-LABEL: test_sink:
+; CHECK: movslq %esi, [[TEMP:%[a-z0-9]+]]
+; CHECK: movb %dl, (%rdi,[[TEMP]])
+; CHECK: retq
diff --git a/test/CodeGen/X86/swizzle-2.ll b/test/CodeGen/X86/swizzle-2.ll
new file mode 100644
index 000000000000..4b1f903c444a
--- /dev/null
+++ b/test/CodeGen/X86/swizzle-2.ll
@@ -0,0 +1,515 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Test that we correctly fold a shuffle that performs a swizzle of another
+; shuffle node according to the rule
+; shuffle (shuffle (x, undef, M0), undef, M1) -> shuffle(x, undef, M2)
+;
+; We only do this if the resulting mask is legal to avoid introducing an
+; illegal shuffle that is expanded into a sub-optimal sequence of instructions
+; during lowering stage.
+
+
+define <4 x i32> @swizzle_1(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_1
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_2(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_2
+; Mask: [2,1,3,0]
+; CHECK: pshufd $54
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_3(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_3
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_4(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_4
+; Mask: [3,1,0,2]
+; CHECK: pshufd $-121
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_5(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_5
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_6(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_6
+; Mask: [2,0,1,3]
+; CHECK: pshufd $-46
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_7(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_7
+; Mask: [0,2,3,1]
+; CHECK: pshufd $120
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_8(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_8
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_9(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_9
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_10(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_10
+; Mask: [1,2,0,3]
+; CHECK: pshufd $-55
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_11(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_11
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_12(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_12
+; Mask: [0,3,1,2]
+; CHECK: pshufd $-100
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_13(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_13
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_14(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_14
+; Mask: [3,0,2,1]
+; CHECK: pshufd $99
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_15(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_15
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_16(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_16
+; Mask: [2,1,3,0]
+; CHECK: pshufd $54
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_17(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_17
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_18(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_18
+; Mask: [3,1,0,2]
+; CHECK: pshufd $-121
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_19(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_19
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_20(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_20
+; Mask: [2,0,1,3]
+; CHECK: pshufd $-46
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_21(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_21
+; Mask: [0,2,3,1]
+; CHECK: pshufd $120
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_22(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_22
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_23(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_23
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_24(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_24
+; Mask: [1,2,0,3]
+; CHECK: pshufd $-55
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_25(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_25
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_26(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_26
+; Mask: [0,3,1,2]
+; CHECK: pshufd $-100
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_27(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_27
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_28(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_28
+; Mask: [3,0,2,1]
+; CHECK: pshufd $99
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_29(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_29
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+; Make sure that we combine the shuffles from each function below into a single
+; legal shuffle (either pshuflw or pshufb depending on the masks).
+
+define <8 x i16> @swizzle_30(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_30
+; Mask: [1,3,2,0,5,7,6,4]
+; CHECK: pshuflw $45
+; CHECK-NOT: pshufb
+; CHECK-NEXT: ret
+
+
+define <8 x i16> @swizzle_31(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 3, i32 0, i32 2, i32 1, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 0, i32 2, i32 1, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_31
+; Mask: [1,3,2,0,4,5,6,7]
+; CHECK: pshuflw $45
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_32(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_32
+; Mask: [2,3,0,1,4,5,6,7] --> equivalent to pshufd mask [1,0,2,3]
+; CHECK: pshufd $-31
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+define <8 x i16> @swizzle_33(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 5, i32 7, i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 5, i32 7, i32 2, i32 3, i32 1, i32 0>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_33
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_34(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 7, i32 6, i32 5, i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 7, i32 6, i32 5, i32 1, i32 2, i32 0, i32 3>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_34
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_35(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 1, i32 3, i32 0, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_35
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_36(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 7, i32 5, i32 0, i32 1, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 7, i32 5, i32 0, i32 1, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_36
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_37(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 7, i32 4, i32 6, i32 5>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_37
+; Mask: [0,1,2,3,4,7,6,5]
+; CHECK: pshufhw $108
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_38(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 5, i32 6, i32 4, i32 7, i32 0, i32 2, i32 1, i32 3>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 5, i32 6, i32 4, i32 7, i32 0, i32 2, i32 1, i32 3>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_38
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_39(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 5, i32 4, i32 6, i32 7, i32 3, i32 2, i32 1, i32 0>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 5, i32 4, i32 6, i32 7, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_39
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_40(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 6, i32 4, i32 7, i32 5, i32 1, i32 0, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 6, i32 4, i32 7, i32 5, i32 1, i32 0, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_40
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_41(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 6, i32 7, i32 5, i32 4, i32 0, i32 1, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 6, i32 7, i32 5, i32 4, i32 0, i32 1, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_41
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_42(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 7, i32 6, i32 4, i32 5>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 7, i32 6, i32 4, i32 5>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_42
+; Mask: [0,1,2,3,5,4,7,6]
+; CHECK: pshufhw $-79
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/swizzle-avx2.ll b/test/CodeGen/X86/swizzle-avx2.ll
new file mode 100644
index 000000000000..29dfa6c2dcc1
--- /dev/null
+++ b/test/CodeGen/X86/swizzle-avx2.ll
@@ -0,0 +1,91 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s
+
+; Test that we correctly fold a shuffle that performs a swizzle of another
+; shuffle node according to the rule
+; shuffle (shuffle (x, undef, M0), undef, M1) -> shuffle(x, undef, M2)
+;
+; We only do this if the resulting mask is legal to avoid introducing an
+; illegal shuffle that is expanded into a sub-optimal sequence of instructions
+; during lowering stage.
+
+; Check that we produce a single vector permute / shuffle in all cases.
+
+define <8 x i32> @swizzle_1(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_1
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_2(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_2
+; CHECK: vpshufd $78
+; CHECK-NOT: vpermd
+; CHECK-NOT: vpshufd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_3(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_3
+; CHECK: vpshufd $78
+; CHECK-NOT: vpermd
+; CHECK-NOT: vpshufd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_4(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_4
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_5(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_5
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_6(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_6
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_7(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_7
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/tbm-intrinsics-x86_64.ll b/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
index 1bc617541edb..1beee72dfd0a 100644
--- a/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
@@ -34,7 +34,7 @@ declare i64 @llvm.x86.tbm.bextri.u64(i64, i64) nounwind readnone
define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind readonly {
entry:
- ; CHECK-LABEl: test_x86_tbm_bextri_u64_m:
+ ; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
; CHECK-NOT: mov
; CHECK: bextr $
%tmp1 = load i64* %a, align 8
diff --git a/test/CodeGen/X86/testb-je-fusion.ll b/test/CodeGen/X86/testb-je-fusion.ll
new file mode 100644
index 000000000000..9e946ae4ca33
--- /dev/null
+++ b/test/CodeGen/X86/testb-je-fusion.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s
+
+; testb should be scheduled right before je to enable macro-fusion.
+
+; CHECK: testb $2, %{{[abcd]}}h
+; CHECK-NEXT: je
+
+define i32 @check_flag(i32 %flags, ...) nounwind {
+entry:
+ %and = and i32 %flags, 512
+ %tobool = icmp eq i32 %and, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ br label %if.end
+
+if.end:
+ %hasflag = phi i32 [ 1, %if.then ], [ 0, %entry ]
+ ret i32 %hasflag
+}
diff --git a/test/CodeGen/X86/tls.ll b/test/CodeGen/X86/tls.ll
index 76a840260b9a..75e7fc4f6bb3 100644
--- a/test/CodeGen/X86/tls.ll
+++ b/test/CodeGen/X86/tls.ll
@@ -2,6 +2,8 @@
; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu | FileCheck -check-prefix=X64_LINUX %s
; RUN: llc < %s -march=x86 -mtriple=x86-pc-win32 | FileCheck -check-prefix=X32_WIN %s
; RUN: llc < %s -march=x86-64 -mtriple=x86_64-pc-win32 | FileCheck -check-prefix=X64_WIN %s
+; RUN: llc < %s -march=x86 -mtriple=x86-pc-windows-gnu | FileCheck -check-prefix=MINGW32 %s
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-pc-windows-gnu | FileCheck -check-prefix=X64_WIN %s
@i1 = thread_local global i32 15
@i2 = external thread_local global i32
@@ -30,6 +32,12 @@ define i32 @f1() {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: movl i1@SECREL32(%rax), %eax
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f1:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movl _i1@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
%tmp1 = load i32* @i1
@@ -57,6 +65,12 @@ define i32* @f2() {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: leaq i1@SECREL32(%rax), %rax
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f2:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: leal _i1@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
ret i32* @i1
@@ -83,6 +97,12 @@ define i32 @f3() nounwind {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: movl i2@SECREL32(%rax), %eax
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f3:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movl _i2@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
%tmp1 = load i32* @i2
@@ -110,6 +130,12 @@ define i32* @f4() {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: leaq i2@SECREL32(%rax), %rax
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f4:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: leal _i2@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
ret i32* @i2
@@ -134,6 +160,12 @@ define i32 @f5() nounwind {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: movl i3@SECREL32(%rax), %eax
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f5:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movl _i3@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
%tmp1 = load i32* @i3
@@ -161,6 +193,12 @@ define i32* @f6() {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: leaq i3@SECREL32(%rax), %rax
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f6:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: leal _i3@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
ret i32* @i3
@@ -173,6 +211,12 @@ define i32 @f7() {
; X64_LINUX-LABEL: f7:
; X64_LINUX: movl %fs:i4@TPOFF, %eax
; X64_LINUX-NEXT: ret
+; MINGW32-LABEL: _f7:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movl _i4@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
%tmp1 = load i32* @i4
@@ -188,6 +232,12 @@ define i32* @f8() {
; X64_LINUX: movq %fs:0, %rax
; X64_LINUX-NEXT: leaq i4@TPOFF(%rax), %rax
; X64_LINUX-NEXT: ret
+; MINGW32-LABEL: _f8:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: leal _i4@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
ret i32* @i4
@@ -200,6 +250,12 @@ define i32 @f9() {
; X64_LINUX-LABEL: f9:
; X64_LINUX: movl %fs:i5@TPOFF, %eax
; X64_LINUX-NEXT: ret
+; MINGW32-LABEL: _f9:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movl _i5@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
%tmp1 = load i32* @i5
@@ -215,6 +271,12 @@ define i32* @f10() {
; X64_LINUX: movq %fs:0, %rax
; X64_LINUX-NEXT: leaq i5@TPOFF(%rax), %rax
; X64_LINUX-NEXT: ret
+; MINGW32-LABEL: _f10:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: leal _i5@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
ret i32* @i5
@@ -239,6 +301,12 @@ define i16 @f11() {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: movzwl s1@SECREL32(%rax), %eax
; X64_WIN: ret
+; MINGW32-LABEL: _f11:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movzwl _s1@SECREL32(%eax), %eax
+; MINGW32: retl
entry:
%tmp1 = load i16* @s1
@@ -264,6 +332,13 @@ define i32 @f12() {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: movswl s1@SECREL32(%rax), %eax
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f12:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movswl _s1@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
+
entry:
%tmp1 = load i16* @s1
@@ -290,6 +365,12 @@ define i8 @f13() {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: movb b1@SECREL32(%rax), %al
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f13:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movb _b1@SECREL32(%eax), %al
+; MINGW32-NEXT: retl
entry:
%tmp1 = load i8* @b1
@@ -315,6 +396,12 @@ define i32 @f14() {
; X64_WIN-NEXT: movq (%rcx,%rax,8), %rax
; X64_WIN-NEXT: movsbl b1@SECREL32(%rax), %eax
; X64_WIN-NEXT: ret
+; MINGW32-LABEL: _f14:
+; MINGW32: movl __tls_index, %eax
+; MINGW32-NEXT: movl %fs:44, %ecx
+; MINGW32-NEXT: movl (%ecx,%eax,4), %eax
+; MINGW32-NEXT: movsbl _b1@SECREL32(%eax), %eax
+; MINGW32-NEXT: retl
entry:
%tmp1 = load i8* @b1
diff --git a/test/CodeGen/X86/v2f32.ll b/test/CodeGen/X86/v2f32.ll
index f2bebf57d4dc..dab5e7bc944c 100644
--- a/test/CodeGen/X86/v2f32.ll
+++ b/test/CodeGen/X86/v2f32.ll
@@ -24,9 +24,9 @@ define void @test1(<2 x float> %Q, float *%P2) nounwind {
; W64-NEXT: ret
; X32-LABEL: test1:
+; X32-NEXT: movl 4(%esp), %eax
; X32-NEXT: pshufd $1, %xmm0, %xmm1
; X32-NEXT: addss %xmm0, %xmm1
-; X32-NEXT: movl 4(%esp), %eax
; X32-NEXT: movss %xmm1, (%eax)
; X32-NEXT: ret
}
diff --git a/test/CodeGen/X86/v4i32load-crash.ll b/test/CodeGen/X86/v4i32load-crash.ll
index 052c4c3c61b8..3e7f9e63c9a8 100644
--- a/test/CodeGen/X86/v4i32load-crash.ll
+++ b/test/CodeGen/X86/v4i32load-crash.ll
@@ -1,10 +1,11 @@
-; RUN: llc --mcpu=x86-64 --mattr=ssse3 < %s
+; RUN: llc --march=x86 --mcpu=x86-64 --mattr=ssse3 < %s
+; RUN: llc --march=x86-64 --mcpu=x86-64 --mattr=ssse3 < %s
;PR18045:
;Issue of selection for 'v4i32 load'.
;This instruction is not legal for X86 CPUs with sse < 'sse4.1'.
;This node was generated by X86ISelLowering.cpp, EltsFromConsecutiveLoads
-;static function after legilize stage.
+;static function after legalize stage.
@e = external global [4 x i32], align 4
@f = external global [4 x i32], align 4
diff --git a/test/CodeGen/X86/vbinop-simplify-bug.ll b/test/CodeGen/X86/vbinop-simplify-bug.ll
new file mode 100644
index 000000000000..3a89cd7e636a
--- /dev/null
+++ b/test/CodeGen/X86/vbinop-simplify-bug.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=sse2 -mcpu=corei7 -o /dev/null
+
+; Revision 199135 introduced a wrong check in method
+; DAGCombiner::SimplifyVBinOp in an attempt to refactor some code
+; using the new method 'BuildVectorSDNode::isConstant' when possible.
+;
+; However the modified code in method SimplifyVBinOp now wrongly
+; checks that the operands of a vector bin-op are both constants.
+;
+; With that wrong change, this test started failing because of a
+; 'fatal error in the backend':
+; Cannot select: 0x2e329d0: v4i32 = BUILD_VECTOR 0x2e2ea00, 0x2e2ea00, 0x2e2ea00, 0x2e2ea00
+; 0x2e2ea00: i32 = Constant<1> [ID=4]
+; 0x2e2ea00: i32 = Constant<1> [ID=4]
+; 0x2e2ea00: i32 = Constant<1> [ID=4]
+; 0x2e2ea00: i32 = Constant<1> [ID=4]
+
+define <8 x i32> @reduced_test_case() {
+ %Shuff = shufflevector <8 x i32> zeroinitializer, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 undef, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %B23 = sub <8 x i32> %Shuff, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <8 x i32> %B23
+}
+
diff --git a/test/CodeGen/X86/vec_cast2.ll b/test/CodeGen/X86/vec_cast2.ll
index 5f6e7a853a33..1a6c05dd9f41 100644
--- a/test/CodeGen/X86/vec_cast2.ll
+++ b/test/CodeGen/X86/vec_cast2.ll
@@ -1,8 +1,20 @@
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=corei7-avx -mattr=+avx -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
;CHECK-LABEL: foo1_8:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo1_8:
+;CHECK-WIDE: vpmovzxbd %xmm0, %xmm1
+;CHECK-WIDE-NEXT: vpslld $24, %xmm1, %xmm1
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm1, %xmm1
+;CHECK-WIDE-NEXT: vpshufb {{.*}}, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpslld $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+;CHECK-WIDE-NEXT: vcvtdq2ps %ymm0, %ymm0
+;CHECK-WIDE-NEXT: ret
define <8 x float> @foo1_8(<8 x i8> %src) {
%res = sitofp <8 x i8> %src to <8 x float>
ret <8 x float> %res
@@ -11,6 +23,13 @@ define <8 x float> @foo1_8(<8 x i8> %src) {
;CHECK-LABEL: foo1_4:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo1_4:
+;CHECK-WIDE: vpmovzxbd %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpslld $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
+;CHECK-WIDE-NEXT: ret
define <4 x float> @foo1_4(<4 x i8> %src) {
%res = sitofp <4 x i8> %src to <4 x float>
ret <4 x float> %res
@@ -19,6 +38,10 @@ define <4 x float> @foo1_4(<4 x i8> %src) {
;CHECK-LABEL: foo2_8:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo2_8:
+;CHECK-WIDE: vcvtdq2ps %ymm{{.*}}, %ymm{{.*}}
+;CHECK-WIDE: ret
define <8 x float> @foo2_8(<8 x i8> %src) {
%res = uitofp <8 x i8> %src to <8 x float>
ret <8 x float> %res
@@ -27,6 +50,10 @@ define <8 x float> @foo2_8(<8 x i8> %src) {
;CHECK-LABEL: foo2_4:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo2_4:
+;CHECK-WIDE: vcvtdq2ps %xmm{{.*}}, %xmm{{.*}}
+;CHECK-WIDE: ret
define <4 x float> @foo2_4(<4 x i8> %src) {
%res = uitofp <4 x i8> %src to <4 x float>
ret <4 x float> %res
diff --git a/test/CodeGen/X86/vec_extract-sse4.ll b/test/CodeGen/X86/vec_extract-sse4.ll
index 3cb519adf4f8..747c8a8e8d02 100644
--- a/test/CodeGen/X86/vec_extract-sse4.ll
+++ b/test/CodeGen/X86/vec_extract-sse4.ll
@@ -1,10 +1,13 @@
-; RUN: llc < %s -mcpu=corei7 -march=x86 -mattr=+sse4.1 -o %t
-; RUN: not grep extractps %t
-; RUN: not grep pextrd %t
-; RUN: not grep pshufd %t
-; RUN: not grep movss %t
+; RUN: llc < %s -mcpu=corei7 -march=x86 -mattr=+sse4.1 | FileCheck %s
define void @t1(float* %R, <4 x float>* %P1) nounwind {
+; CHECK-LABEL: @t1
+; CHECK: movl 4(%esp), %[[R0:e[abcd]x]]
+; CHECK-NEXT: movl 8(%esp), %[[R1:e[abcd]x]]
+; CHECK-NEXT: movl 12(%[[R1]]), %[[R2:e[abcd]x]]
+; CHECK-NEXT: movl %[[R2]], (%[[R0]])
+; CHECK-NEXT: retl
+
%X = load <4 x float>* %P1
%tmp = extractelement <4 x float> %X, i32 3
store float %tmp, float* %R
@@ -12,12 +15,24 @@ define void @t1(float* %R, <4 x float>* %P1) nounwind {
}
define float @t2(<4 x float>* %P1) nounwind {
+; CHECK-LABEL: @t2
+; CHECK: movl 4(%esp), %[[R0:e[abcd]x]]
+; CHECK-NEXT: flds 8(%[[R0]])
+; CHECK-NEXT: retl
+
%X = load <4 x float>* %P1
%tmp = extractelement <4 x float> %X, i32 2
ret float %tmp
}
define void @t3(i32* %R, <4 x i32>* %P1) nounwind {
+; CHECK-LABEL: @t3
+; CHECK: movl 4(%esp), %[[R0:e[abcd]x]]
+; CHECK-NEXT: movl 8(%esp), %[[R1:e[abcd]x]]
+; CHECK-NEXT: movl 12(%[[R1]]), %[[R2:e[abcd]x]]
+; CHECK-NEXT: movl %[[R2]], (%[[R0]])
+; CHECK-NEXT: retl
+
%X = load <4 x i32>* %P1
%tmp = extractelement <4 x i32> %X, i32 3
store i32 %tmp, i32* %R
@@ -25,6 +40,11 @@ define void @t3(i32* %R, <4 x i32>* %P1) nounwind {
}
define i32 @t4(<4 x i32>* %P1) nounwind {
+; CHECK-LABEL: @t4
+; CHECK: movl 4(%esp), %[[R0:e[abcd]x]]
+; CHECK-NEXT: movl 12(%[[R0]]), %eax
+; CHECK-NEXT: retl
+
%X = load <4 x i32>* %P1
%tmp = extractelement <4 x i32> %X, i32 3
ret i32 %tmp
diff --git a/test/CodeGen/X86/vec_fabs.ll b/test/CodeGen/X86/vec_fabs.ll
index 82517cb9a5a0..4c14a9602d41 100644
--- a/test/CodeGen/X86/vec_fabs.ll
+++ b/test/CodeGen/X86/vec_fabs.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s
define <2 x double> @fabs_v2f64(<2 x double> %p)
{
- ; CHECK: fabs_v2f64
+ ; CHECK-LABEL: fabs_v2f64
; CHECK: vandps
%t = call <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
ret <2 x double> %t
@@ -12,7 +12,7 @@ declare <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
define <4 x float> @fabs_v4f32(<4 x float> %p)
{
- ; CHECK: fabs_v4f32
+ ; CHECK-LABEL: fabs_v4f32
; CHECK: vandps
%t = call <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
ret <4 x float> %t
@@ -21,7 +21,7 @@ declare <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
define <4 x double> @fabs_v4f64(<4 x double> %p)
{
- ; CHECK: fabs_v4f64
+ ; CHECK-LABEL: fabs_v4f64
; CHECK: vandps
%t = call <4 x double> @llvm.fabs.v4f64(<4 x double> %p)
ret <4 x double> %t
@@ -30,9 +30,29 @@ declare <4 x double> @llvm.fabs.v4f64(<4 x double> %p)
define <8 x float> @fabs_v8f32(<8 x float> %p)
{
- ; CHECK: fabs_v8f32
+ ; CHECK-LABEL: fabs_v8f32
; CHECK: vandps
%t = call <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
ret <8 x float> %t
}
declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
+
+; PR20354: when generating code for a vector fabs op,
+; make sure the correct mask is used for all vector elements.
+; CHECK-LABEL: .LCPI4_0:
+; CHECK-NEXT: .long 2147483647
+; CHECK-NEXT: .long 2147483647
+define i64 @fabs_v2f32(<2 x float> %v) {
+; CHECK-LABEL: fabs_v2f32:
+; CHECK: movabsq $-9223372034707292160, %[[R:r[^ ]+]]
+; CHECK-NEXT: vmovq %[[R]], %[[X:xmm[0-9]+]]
+; CHECK-NEXT: vandps {{.*}}.LCPI4_0{{.*}}, %[[X]], %[[X]]
+; CHECK-NEXT: vmovq %[[X]], %rax
+; CHECK-NEXT: retq
+ %highbits = bitcast i64 9223372039002259456 to <2 x float> ; 0x8000_0000_8000_0000
+ %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %highbits)
+ %ret = bitcast <2 x float> %fabs to i64
+ ret i64 %ret
+}
+
+declare <2 x float> @llvm.fabs.v2f32(<2 x float> %p)
diff --git a/test/CodeGen/X86/vec_fpext.ll b/test/CodeGen/X86/vec_fpext.ll
index 7ec07ae0f959..b882a5e272b5 100644
--- a/test/CodeGen/X86/vec_fpext.ll
+++ b/test/CodeGen/X86/vec_fpext.ll
@@ -3,6 +3,8 @@
; PR11674
define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
+; CHECK-LABEL: fpext_frommem:
+; AVX-LABEL: fpext_frommem:
entry:
; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
; AVX: vcvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
@@ -13,6 +15,8 @@ entry:
}
define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
+; CHECK-LABEL: fpext_frommem4:
+; AVX-LABEL: fpext_frommem4:
entry:
; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
; CHECK: cvtps2pd 8(%{{.+}}), %xmm{{[0-9]+}}
@@ -24,6 +28,8 @@ entry:
}
define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
+; CHECK-LABEL: fpext_frommem8:
+; AVX-LABEL: fpext_frommem8:
entry:
; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
; CHECK: cvtps2pd 8(%{{.+}}), %xmm{{[0-9]+}}
diff --git a/test/CodeGen/X86/vec_return.ll b/test/CodeGen/X86/vec_return.ll
index 2cf5dc6caa77..f7fcd032cab3 100644
--- a/test/CodeGen/X86/vec_return.ll
+++ b/test/CodeGen/X86/vec_return.ll
@@ -10,7 +10,7 @@ define <2 x double> @test() {
; Prefer a constant pool load here.
; CHECK: test2
; CHECK-NOT: shuf
-; CHECK: movaps {{.*}}CPI
+; CHECK: movaps {{.*}}{{CPI|__xmm@}}
define <4 x i32> @test2() nounwind {
ret <4 x i32> < i32 0, i32 0, i32 1, i32 0 >
}
diff --git a/test/CodeGen/X86/vec_round.ll b/test/CodeGen/X86/vec_round.ll
index baa2f58631d4..9258f9ee522e 100644
--- a/test/CodeGen/X86/vec_round.ll
+++ b/test/CodeGen/X86/vec_round.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-unknown-linux-gnu"
declare void @use(<2 x double>)
; CHECK-LABEL: @test
-; CHECK callq round
+; CHECK: callq round
; Function Attrs: nounwind uwtable
define void @test() {
diff --git a/test/CodeGen/X86/vec_setcc-2.ll b/test/CodeGen/X86/vec_setcc-2.ll
new file mode 100644
index 000000000000..ef916dcd709e
--- /dev/null
+++ b/test/CodeGen/X86/vec_setcc-2.ll
@@ -0,0 +1,96 @@
+; RUN: llc < %s -o - -mcpu=generic -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -o - -mcpu=generic -mtriple=x86_64-apple-darwin -mattr=+sse4.2 | FileCheck %s
+
+; For a setult against a constant, turn it into a setule and lower via psubusw.
+
+define void @loop_no_const_reload(<2 x i64>* %in, <2 x i64>* %out, i32 %n) {
+; CHECK: .short 25
+; CHECK-NEXT: .short 25
+; CHECK-NEXT: .short 25
+; CHECK-NEXT: .short 25
+; CHECK-NEXT: .short 25
+; CHECK-NEXT: .short 25
+; CHECK-NEXT: .short 25
+; CHECK-NEXT: .short 25
+; CHECK-LABEL: loop_no_const_reload:
+; CHECK: psubusw
+
+; Constant is no longer clobbered so no need to reload it in the loop.
+
+; CHECK-NOT: movdqa {{%xmm[0-9]+}}, {{%xmm[0-9]+}}
+
+entry:
+ %cmp9 = icmp eq i32 %n, 0
+ br i1 %cmp9, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx1 = getelementptr inbounds <2 x i64>* %in, i64 %indvars.iv
+ %arrayidx1.val = load <2 x i64>* %arrayidx1, align 16
+ %0 = bitcast <2 x i64> %arrayidx1.val to <8 x i16>
+ %cmp.i.i = icmp ult <8 x i16> %0, <i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26>
+ %sext.i.i = sext <8 x i1> %cmp.i.i to <8 x i16>
+ %1 = bitcast <8 x i16> %sext.i.i to <2 x i64>
+ %arrayidx5 = getelementptr inbounds <2 x i64>* %out, i64 %indvars.iv
+ store <2 x i64> %1, <2 x i64>* %arrayidx5, align 16
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; Be careful if decrementing the constant would undeflow.
+
+define void @loop_const_folding_underflow(<2 x i64>* %in, <2 x i64>* %out, i32 %n) {
+; CHECK-NOT: .short 25
+; CHECK-LABEL: loop_const_folding_underflow:
+; CHECK-NOT: psubusw
+entry:
+ %cmp9 = icmp eq i32 %n, 0
+ br i1 %cmp9, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx1 = getelementptr inbounds <2 x i64>* %in, i64 %indvars.iv
+ %arrayidx1.val = load <2 x i64>* %arrayidx1, align 16
+ %0 = bitcast <2 x i64> %arrayidx1.val to <8 x i16>
+ %cmp.i.i = icmp ult <8 x i16> %0, <i16 0, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26>
+ %sext.i.i = sext <8 x i1> %cmp.i.i to <8 x i16>
+ %1 = bitcast <8 x i16> %sext.i.i to <2 x i64>
+ %arrayidx5 = getelementptr inbounds <2 x i64>* %out, i64 %indvars.iv
+ store <2 x i64> %1, <2 x i64>* %arrayidx5, align 16
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+; Test for PSUBUSB
+
+define <16 x i8> @test_ult_byte(<16 x i8> %a) {
+; CHECK: .space 16,10
+; CHECK-LABEL: test_ult_byte:
+; CHECK: psubus
+entry:
+ %icmp = icmp ult <16 x i8> %a, <i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11>
+ %sext = sext <16 x i1> %icmp to <16 x i8>
+ ret <16 x i8> %sext
+}
+
+; Only do this when we can turn the comparison into a setule. I.e. not for
+; register operands.
+
+define <8 x i16> @test_ult_register(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_ult_register:
+; CHECK-NOT: psubus
+entry:
+ %icmp = icmp ult <8 x i16> %a, %b
+ %sext = sext <8 x i1> %icmp to <8 x i16>
+ ret <8 x i16> %sext
+}
diff --git a/test/CodeGen/X86/vec_setcc.ll b/test/CodeGen/X86/vec_setcc.ll
index fc8a56de7917..322dbae0c89f 100644
--- a/test/CodeGen/X86/vec_setcc.ll
+++ b/test/CodeGen/X86/vec_setcc.ll
@@ -42,12 +42,9 @@ define <8 x i16> @v8i16_icmp_uge(<8 x i16> %a, <8 x i16> %b) nounwind readnone s
%2 = sext <8 x i1> %1 to <8 x i16>
ret <8 x i16> %2
; SSE2-LABEL: v8i16_icmp_uge:
-; SSE2: movdqa {{.*}}(%rip), %xmm2
-; SEE2: pxor %xmm2, %xmm0
-; SSE2: pxor %xmm1, %xmm2
-; SSE2: pcmpgtw %xmm0, %xmm2
-; SSE2: pcmpeqd %xmm0, %xmm0
-; SSE2: pxor %xmm2, %xmm0
+; SSE2: psubusw %xmm0, %xmm1
+; SEE2: pxor %xmm0, %xmm0
+; SSE2: pcmpeqw %xmm1, %xmm0
; SSE41-LABEL: v8i16_icmp_uge:
; SSE41: pmaxuw %xmm0, %xmm1
@@ -63,12 +60,9 @@ define <8 x i16> @v8i16_icmp_ule(<8 x i16> %a, <8 x i16> %b) nounwind readnone s
%2 = sext <8 x i1> %1 to <8 x i16>
ret <8 x i16> %2
; SSE2-LABEL: v8i16_icmp_ule:
-; SSE2: movdqa {{.*}}(%rip), %xmm2
-; SSE2: pxor %xmm2, %xmm1
-; SSE2: pxor %xmm2, %xmm0
-; SSE2: pcmpgtw %xmm1, %xmm0
-; SSE2: pcmpeqd %xmm1, %xmm1
-; SSE2: pxor %xmm0, %xmm1
+; SSE2: psubusw %xmm1, %xmm0
+; SSE2: pxor %xmm1, %xmm1
+; SSE2: pcmpeqw %xmm0, %xmm1
; SSE2: movdqa %xmm1, %xmm0
; SSE41-LABEL: v8i16_icmp_ule:
diff --git a/test/CodeGen/X86/vec_shift5.ll b/test/CodeGen/X86/vec_shift5.ll
new file mode 100644
index 000000000000..2e98003ae1cd
--- /dev/null
+++ b/test/CodeGen/X86/vec_shift5.ll
@@ -0,0 +1,160 @@
+; RUN: llc -march=x86-64 -mcpu=corei7 -mattr=-sse4.1 < %s | FileCheck %s
+
+; Verify that we correctly fold target specific packed vector shifts by
+; immediate count into a simple build_vector when the elements of the vector
+; in input to the packed shift are all constants or undef.
+
+define <8 x i16> @test1() {
+ %1 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> <i16 1, i16 2, i16 4, i16 8, i16 1, i16 2, i16 4, i16 8>, i32 3)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test1
+; CHECK-NOT: psll
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test2() {
+ %1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> <i16 4, i16 8, i16 16, i16 32, i16 4, i16 8, i16 16, i16 32>, i32 3)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test2
+; CHECK-NOT: psrl
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test3() {
+ %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 4, i16 8, i16 16, i16 32, i16 4, i16 8, i16 16, i16 32>, i32 3)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: psra
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test4() {
+ %1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> <i32 1, i32 2, i32 4, i32 8>, i32 3)
+ ret <4 x i32> %1
+}
+; CHECK-LABEL: test4
+; CHECK-NOT: psll
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test5() {
+ %1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> <i32 4, i32 8, i32 16, i32 32>, i32 3)
+ ret <4 x i32> %1
+}
+; CHECK-LABEL: test5
+; CHECK-NOT: psrl
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test6() {
+ %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 4, i32 8, i32 16, i32 32>, i32 3)
+ ret <4 x i32> %1
+}
+; CHECK-LABEL: test6
+; CHECK-NOT: psra
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <2 x i64> @test7() {
+ %1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> <i64 1, i64 2>, i32 3)
+ ret <2 x i64> %1
+}
+; CHECK-LABEL: test7
+; CHECK-NOT: psll
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <2 x i64> @test8() {
+ %1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> <i64 8, i64 16>, i32 3)
+ ret <2 x i64> %1
+}
+; CHECK-LABEL: test8
+; CHECK-NOT: psrl
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test9() {
+ %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test9
+; CHECK-NOT: psra
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test10() {
+ %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
+ ret <4 x i32> %1
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: psra
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <2 x i64> @test11() {
+ %1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> <i64 undef, i64 31>, i32 3)
+ ret <2 x i64> %1
+}
+; CHECK-LABEL: test11
+; CHECK-NOT: psrl
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test12() {
+ %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test12
+; CHECK-NOT: psra
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test13() {
+ %1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
+ ret <4 x i32> %1
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: psrl
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test14() {
+ %1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: psrl
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test15() {
+ %1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
+ ret <4 x i32> %1
+}
+; CHECK-LABEL: test15
+; CHECK-NOT: psll
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+define <2 x i64> @test16() {
+ %1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> <i64 undef, i64 31>, i32 3)
+ ret <2 x i64> %1
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: psll
+; CHECK: movaps
+; CHECK-NEXT: ret
+
+
+declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32)
+declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32)
+declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32)
+declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32)
+declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32)
+declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32)
+declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32)
+declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32)
+
diff --git a/test/CodeGen/X86/vec_shift6.ll b/test/CodeGen/X86/vec_shift6.ll
new file mode 100644
index 000000000000..df2d9cb04687
--- /dev/null
+++ b/test/CodeGen/X86/vec_shift6.ll
@@ -0,0 +1,134 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2 -check-prefix=AVX2ONLY
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2 -check-prefix=AVX512
+
+
+; Verify that we don't scalarize a packed vector shift left of 16-bit
+; signed integers if the amount is a constant build_vector.
+; Check that we produce a SSE2 packed integer multiply (pmullw) instead.
+
+define <8 x i16> @test1(<8 x i16> %a) {
+ %shl = shl <8 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
+ ret <8 x i16> %shl
+}
+; CHECK-LABEL: test1
+; CHECK: pmullw
+; CHECK-NEXT: ret
+
+
+define <8 x i16> @test2(<8 x i16> %a) {
+ %shl = shl <8 x i16> %a, <i16 0, i16 undef, i16 0, i16 0, i16 1, i16 undef, i16 -1, i16 1>
+ ret <8 x i16> %shl
+}
+; CHECK-LABEL: test2
+; CHECK: pmullw
+; CHECK-NEXT: ret
+
+
+; Verify that a vector shift left of 32-bit signed integers is simply expanded
+; into a SSE4.1 pmulld (instead of cvttps2dq + pmulld) if the vector of shift
+; counts is a constant build_vector.
+
+define <4 x i32> @test3(<4 x i32> %a) {
+ %shl = shl <4 x i32> %a, <i32 1, i32 -1, i32 2, i32 -3>
+ ret <4 x i32> %shl
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: cvttps2dq
+; SSE: pmulld
+; AVX2: vpsllvd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4(<4 x i32> %a) {
+ %shl = shl <4 x i32> %a, <i32 0, i32 0, i32 1, i32 1>
+ ret <4 x i32> %shl
+}
+; CHECK-LABEL: test4
+; CHECK-NOT: cvttps2dq
+; SSE: pmulld
+; AVX2: vpsllvd
+; CHECK-NEXT: ret
+
+
+; If we have AVX/SSE2 but not AVX2, verify that the following shift is split
+; into two pmullw instructions. With AVX2, the test case below would produce
+; a single vpmullw.
+
+define <16 x i16> @test5(<16 x i16> %a) {
+ %shl = shl <16 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
+ ret <16 x i16> %shl
+}
+; CHECK-LABEL: test5
+; SSE: pmullw
+; SSE-NEXT: pmullw
+; AVX2: vpmullw
+; AVX2-NOT: vpmullw
+; CHECK: ret
+
+
+; If we have AVX/SSE4.1 but not AVX2, verify that the following shift is split
+; into two pmulld instructions. With AVX2, the test case below would produce
+; a single vpsllvd instead.
+
+define <8 x i32> @test6(<8 x i32> %a) {
+ %shl = shl <8 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
+ ret <8 x i32> %shl
+}
+; CHECK-LABEL: test6
+; SSE: pmulld
+; SSE-NEXT: pmulld
+; AVX2: vpsllvd
+; CHECK: ret
+
+
+; With AVX2 and AVX512, the test case below should produce a sequence of
+; two vpmullw instructions. On SSE2 instead, we split the shift in four
+; parts and then we convert each part into a pmullw.
+
+define <32 x i16> @test7(<32 x i16> %a) {
+ %shl = shl <32 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
+ ret <32 x i16> %shl
+}
+; CHECK-LABEL: test7
+; SSE: pmullw
+; SSE-NEXT: pmullw
+; SSE-NEXT: pmullw
+; SSE-NEXT: pmullw
+; AVX2: vpmullw
+; AVX2-NEXT: vpmullw
+; CHECK: ret
+
+
+; Similar to test7; the difference is that with AVX512 support
+; we only produce a single vpsllvd/vpsllvq instead of a pair of vpsllvd/vpsllvq.
+
+define <16 x i32> @test8(<16 x i32> %a) {
+ %shl = shl <16 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
+ ret <16 x i32> %shl
+}
+; CHECK-LABEL: test8
+; SSE: pmulld
+; SSE-NEXT: pmulld
+; SSE-NEXT: pmulld
+; SSE-NEXT: pmulld
+; AVX2ONLY: vpsllvd
+; AVX2ONLY-NEXT: vpsllvd
+; AVX512: vpsllvd
+; AVX512-NOT: vpsllvd
+; CHECK: ret
+
+
+; The shift from 'test9' gets scalarized if we don't have AVX2/AVX512f support.
+
+define <8 x i64> @test9(<8 x i64> %a) {
+ %shl = shl <8 x i64> %a, <i64 1, i64 1, i64 2, i64 3, i64 1, i64 1, i64 2, i64 3>
+ ret <8 x i64> %shl
+}
+; CHECK-LABEL: test9
+; AVX2ONLY: vpsllvq
+; AVX2ONLY-NEXT: vpsllvq
+; AVX512: vpsllvq
+; AVX512-NOT: vpsllvq
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/vec_shuf-insert.ll b/test/CodeGen/X86/vec_shuf-insert.ll
new file mode 100644
index 000000000000..2e1a1d61309f
--- /dev/null
+++ b/test/CodeGen/X86/vec_shuf-insert.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux -mcpu=corei7-avx | FileCheck %s
+
+; These tests check that an insert_subvector which replaces one of the halves
+; of a concat_vectors is optimized into a single vinsertf128.
+
+
+declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8)
+
+define <8 x float> @lower_half(<4 x float> %v1, <4 x float> %v2, <4 x float> %v3) {
+ %1 = shufflevector <4 x float> %v1, <4 x float> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %1, <4 x float> %v3, i8 0)
+ ret <8 x float> %2
+
+; CHECK-LABEL: lower_half
+; CHECK-NOT: vinsertf128
+; CHECK: vinsertf128 $1, %xmm1, %ymm2, %ymm0
+; CHECK-NEXT: ret
+}
+
+define <8 x float> @upper_half(<4 x float> %v1, <4 x float> %v2, <4 x float> %v3) {
+ %1 = shufflevector <4 x float> %v1, <4 x float> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %1, <4 x float> %v3, i8 1)
+ ret <8 x float> %2
+
+; CHECK-LABEL: upper_half
+; CHECK-NOT: vinsertf128
+; CHECK: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: ret
+}
diff --git a/test/CodeGen/X86/vec_shuffle-40.ll b/test/CodeGen/X86/vec_shuffle-40.ll
new file mode 100644
index 000000000000..75b45e3df111
--- /dev/null
+++ b/test/CodeGen/X86/vec_shuffle-40.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 | FileCheck %s
+
+define void @shuffle_v16i16(<16 x i16>* %a) {
+; CHECK-LABEL: shuffle_v16i16:
+; CHECK: vpshufb {{.*}}%ymm
+; CHECK-NOT: vpshufb {{.*}}%xmm
+entry:
+ %0 = load <16 x i16>* %a, align 32
+ %shuffle = shufflevector <16 x i16> %0, <16 x i16> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
+ store <16 x i16> %shuffle, <16 x i16>* %a, align 32
+ ret void
+}
+
+define void @shuffle_v16i16_lanecrossing(<16 x i16>* %a) {
+; CHECK-LABEL: shuffle_v16i16_lanecrossing:
+; CHECK-NOT: vpshufb {{.*}}%ymm
+entry:
+ %0 = load <16 x i16>* %a, align 32
+ %shuffle = shufflevector <16 x i16> %0, <16 x i16> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 13, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
+ store <16 x i16> %shuffle, <16 x i16>* %a, align 32
+ ret void
+}
diff --git a/test/CodeGen/X86/vec_shuffle-41.ll b/test/CodeGen/X86/vec_shuffle-41.ll
new file mode 100644
index 000000000000..28fdd2f5ce17
--- /dev/null
+++ b/test/CodeGen/X86/vec_shuffle-41.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s
+
+; Use buildFromShuffleMostly which allows this to be generated as two 128-bit
+; shuffles and an insert.
+
+; This is the (somewhat questionable) LLVM IR that is generated for:
+; x8.s0123456 = x8.s1234567; // x8 is a <8 x float> type
+; x8.s7 = f; // f is float
+
+
+define <8 x float> @test1(<8 x float> %a, float %b) {
+; CHECK-LABEL: test1:
+; CHECK: vinsertps
+; CHECK-NOT: vinsertps
+entry:
+ %shift = shufflevector <8 x float> %a, <8 x float> undef, <7 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %extend = shufflevector <7 x float> %shift, <7 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 undef>
+ %insert = insertelement <8 x float> %extend, float %b, i32 7
+
+ ret <8 x float> %insert
+}
diff --git a/test/CodeGen/X86/vec_splat.ll b/test/CodeGen/X86/vec_splat.ll
index 543c96ef3d45..a02e3836078c 100644
--- a/test/CodeGen/X86/vec_splat.ll
+++ b/test/CodeGen/X86/vec_splat.ll
@@ -32,3 +32,19 @@ define void @test_v2sd(<2 x double>* %P, <2 x double>* %Q, double %X) nounwind {
; SSE3-LABEL: test_v2sd:
; SSE3: movddup
}
+
+; Fold extract of a load into the load's address computation. This avoids spilling to the stack.
+define <4 x float> @load_extract_splat(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) nounwind {
+ %1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
+ %2 = load <4 x float>* %1, align 16
+ %3 = extractelement <4 x float> %2, i64 %j
+ %4 = insertelement <4 x float> undef, float %3, i32 0
+ %5 = insertelement <4 x float> %4, float %3, i32 1
+ %6 = insertelement <4 x float> %5, float %3, i32 2
+ %7 = insertelement <4 x float> %6, float %3, i32 3
+ ret <4 x float> %7
+
+; AVX-LABEL: load_extract_splat
+; AVX-NOT: movs
+; AVX: vbroadcastss
+}
diff --git a/test/CodeGen/X86/vec_split.ll b/test/CodeGen/X86/vec_split.ll
index f9e7c20ba4e2..bc2c6633f20d 100644
--- a/test/CodeGen/X86/vec_split.ll
+++ b/test/CodeGen/X86/vec_split.ll
@@ -40,3 +40,36 @@ define <32 x i16> @split32(<32 x i16> %a, <32 x i16> %b, <32 x i8> %__mask) {
%2 = select <32 x i1> %1, <32 x i16> %a, <32 x i16> %b
ret <32 x i16> %2
}
+
+; PR19492
+define i128 @split128(<2 x i128> %a, <2 x i128> %b) {
+; SSE4-LABEL: split128:
+; SSE4: addq
+; SSE4: adcq
+; SSE4: addq
+; SSE4: adcq
+; SSE4: addq
+; SSE4: adcq
+; SSE4: ret
+; AVX1-LABEL: split128:
+; AVX1: addq
+; AVX1: adcq
+; AVX1: addq
+; AVX1: adcq
+; AVX1: addq
+; AVX1: adcq
+; AVX1: ret
+; AVX2-LABEL: split128:
+; AVX2: addq
+; AVX2: adcq
+; AVX2: addq
+; AVX2: adcq
+; AVX2: addq
+; AVX2: adcq
+; AVX2: ret
+ %add = add nsw <2 x i128> %a, %b
+ %rdx.shuf = shufflevector <2 x i128> %add, <2 x i128> undef, <2 x i32> <i32 undef, i32 0>
+ %bin.rdx = add <2 x i128> %add, %rdx.shuf
+ %e = extractelement <2 x i128> %bin.rdx, i32 1
+ ret i128 %e
+}
diff --git a/test/CodeGen/X86/vector-gep.ll b/test/CodeGen/X86/vector-gep.ll
index b87d8447e543..3f7ee3aa3e42 100644
--- a/test/CodeGen/X86/vector-gep.ll
+++ b/test/CodeGen/X86/vector-gep.ll
@@ -1,25 +1,29 @@
-; RUN: llc < %s -march=x86 -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux -mcpu=corei7-avx | FileCheck %s
; RUN: opt -instsimplify -disable-output < %s
;CHECK-LABEL: AGEP0:
define <4 x i32*> @AGEP0(i32* %ptr) nounwind {
entry:
+;CHECK-LABEL: AGEP0
+;CHECK: vbroadcast
+;CHECK-NEXT: vpaddd
+;CHECK-NEXT: ret
%vecinit.i = insertelement <4 x i32*> undef, i32* %ptr, i32 0
%vecinit2.i = insertelement <4 x i32*> %vecinit.i, i32* %ptr, i32 1
%vecinit4.i = insertelement <4 x i32*> %vecinit2.i, i32* %ptr, i32 2
%vecinit6.i = insertelement <4 x i32*> %vecinit4.i, i32* %ptr, i32 3
-;CHECK: padd
%A2 = getelementptr <4 x i32*> %vecinit6.i, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
-;CHECK: padd
%A3 = getelementptr <4 x i32*> %A2, <4 x i32> <i32 10, i32 14, i32 19, i32 233>
ret <4 x i32*> %A3
-;CHECK: ret
}
;CHECK-LABEL: AGEP1:
define i32 @AGEP1(<4 x i32*> %param) nounwind {
entry:
-;CHECK: padd
+;CHECK-LABEL: AGEP1
+;CHECK: vpaddd
+;CHECK-NEXT: vpextrd
+;CHECK-NEXT: movl
%A2 = getelementptr <4 x i32*> %param, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
%k = extractelement <4 x i32*> %A2, i32 3
%v = load i32* %k
@@ -30,8 +34,9 @@ entry:
;CHECK-LABEL: AGEP2:
define i32 @AGEP2(<4 x i32*> %param, <4 x i32> %off) nounwind {
entry:
-;CHECK: pslld $2
-;CHECK: padd
+;CHECK-LABEL: AGEP2
+;CHECK: vpslld $2
+;CHECK-NEXT: vpadd
%A2 = getelementptr <4 x i32*> %param, <4 x i32> %off
%k = extractelement <4 x i32*> %A2, i32 3
%v = load i32* %k
@@ -42,8 +47,9 @@ entry:
;CHECK-LABEL: AGEP3:
define <4 x i32*> @AGEP3(<4 x i32*> %param, <4 x i32> %off) nounwind {
entry:
-;CHECK: pslld $2
-;CHECK: padd
+;CHECK-LABEL: AGEP3
+;CHECK: vpslld $2
+;CHECK-NEXT: vpadd
%A2 = getelementptr <4 x i32*> %param, <4 x i32> %off
%v = alloca i32
%k = insertelement <4 x i32*> %A2, i32* %v, i32 3
@@ -54,10 +60,11 @@ entry:
;CHECK-LABEL: AGEP4:
define <4 x i16*> @AGEP4(<4 x i16*> %param, <4 x i32> %off) nounwind {
entry:
+;CHECK-LABEL: AGEP4
; Multiply offset by two (add it to itself).
-;CHECK: padd
+;CHECK: vpadd
; add the base to the offset
-;CHECK: padd
+;CHECK-NEXT: vpadd
%A = getelementptr <4 x i16*> %param, <4 x i32> %off
ret <4 x i16*> %A
;CHECK: ret
@@ -66,7 +73,8 @@ entry:
;CHECK-LABEL: AGEP5:
define <4 x i8*> @AGEP5(<4 x i8*> %param, <4 x i8> %off) nounwind {
entry:
-;CHECK: paddd
+;CHECK-LABEL: AGEP5
+;CHECK: vpaddd
%A = getelementptr <4 x i8*> %param, <4 x i8> %off
ret <4 x i8*> %A
;CHECK: ret
@@ -77,6 +85,7 @@ entry:
;CHECK-LABEL: AGEP6:
define <4 x i8*> @AGEP6(<4 x i8*> %param, <4 x i32> %off) nounwind {
entry:
+;CHECK-LABEL: AGEP6
;CHECK-NOT: pslld
%A = getelementptr <4 x i8*> %param, <4 x i32> %off
ret <4 x i8*> %A
diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll
new file mode 100644
index 000000000000..a3229073751b
--- /dev/null
+++ b/test/CodeGen/X86/vector-idiv.ll
@@ -0,0 +1,218 @@
+; RUN: llc -march=x86-64 -mcpu=core2 -mattr=+sse4.1 < %s | FileCheck %s -check-prefix=SSE41
+; RUN: llc -march=x86-64 -mcpu=core2 < %s | FileCheck %s -check-prefix=SSE
+; RUN: llc -march=x86-64 -mcpu=core-avx2 < %s | FileCheck %s -check-prefix=AVX
+
+define <4 x i32> @test1(<4 x i32> %a) {
+ %div = udiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %div
+
+; SSE41-LABEL: test1:
+; SSE41: pmuludq
+; SSE41: pshufd $49
+; SSE41: pmuludq
+; SSE41: shufps $-35
+; SSE41: psubd
+; SSE41: psrld $1
+; SSE41: padd
+; SSE41: psrld $2
+
+; AVX-LABEL: test1:
+; AVX: vpmuludq
+; AVX: vpshufd $49
+; AVX: vpmuludq
+; AVX: vshufps $-35
+; AVX: vpsubd
+; AVX: vpsrld $1
+; AVX: vpadd
+; AVX: vpsrld $2
+}
+
+define <8 x i32> @test2(<8 x i32> %a) {
+ %div = udiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %div
+
+; AVX-LABEL: test2:
+; AVX: vpbroadcastd
+; AVX: vpalignr $4
+; AVX: vpmuludq
+; AVX: vpmuludq
+; AVX: vpblendd $170
+; AVX: vpsubd
+; AVX: vpsrld $1
+; AVX: vpadd
+; AVX: vpsrld $2
+}
+
+define <8 x i16> @test3(<8 x i16> %a) {
+ %div = udiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %div
+
+; SSE41-LABEL: test3:
+; SSE41: pmulhuw
+; SSE41: psubw
+; SSE41: psrlw $1
+; SSE41: paddw
+; SSE41: psrlw $2
+
+; AVX-LABEL: test3:
+; AVX: vpmulhuw
+; AVX: vpsubw
+; AVX: vpsrlw $1
+; AVX: vpaddw
+; AVX: vpsrlw $2
+}
+
+define <16 x i16> @test4(<16 x i16> %a) {
+ %div = udiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7>
+ ret <16 x i16> %div
+
+; AVX-LABEL: test4:
+; AVX: vpmulhuw
+; AVX: vpsubw
+; AVX: vpsrlw $1
+; AVX: vpaddw
+; AVX: vpsrlw $2
+; AVX-NOT: vpmulhuw
+}
+
+define <8 x i16> @test5(<8 x i16> %a) {
+ %div = sdiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %div
+
+; SSE41-LABEL: test5:
+; SSE41: pmulhw
+; SSE41: psrlw $15
+; SSE41: psraw $1
+; SSE41: paddw
+
+; AVX-LABEL: test5:
+; AVX: vpmulhw
+; AVX: vpsrlw $15
+; AVX: vpsraw $1
+; AVX: vpaddw
+}
+
+define <16 x i16> @test6(<16 x i16> %a) {
+ %div = sdiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7>
+ ret <16 x i16> %div
+
+; AVX-LABEL: test6:
+; AVX: vpmulhw
+; AVX: vpsrlw $15
+; AVX: vpsraw $1
+; AVX: vpaddw
+; AVX-NOT: vpmulhw
+}
+
+define <16 x i8> @test7(<16 x i8> %a) {
+ %div = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %div
+
+; FIXME: scalarized
+; SSE41-LABEL: test7:
+; SSE41: pext
+; AVX-LABEL: test7:
+; AVX: pext
+}
+
+define <4 x i32> @test8(<4 x i32> %a) {
+ %div = sdiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %div
+
+; SSE41-LABEL: test8:
+; SSE41: pmuldq
+; SSE41: pshufd $49
+; SSE41-NOT: pshufd $49
+; SSE41: pmuldq
+; SSE41: shufps $-35
+; SSE41: pshufd $-40
+; SSE41: padd
+; SSE41: psrld $31
+; SSE41: psrad $2
+; SSE41: padd
+
+; SSE-LABEL: test8:
+; SSE: pmuludq
+; SSE: pshufd $49
+; SSE-NOT: pshufd $49
+; SSE: pmuludq
+; SSE: shufps $-35
+; SSE: pshufd $-40
+; SSE: psubd
+; SSE: padd
+; SSE: psrld $31
+; SSE: psrad $2
+; SSE: padd
+
+; AVX-LABEL: test8:
+; AVX: vpmuldq
+; AVX: vpshufd $49
+; AVX-NOT: vpshufd $49
+; AVX: vpmuldq
+; AVX: vshufps $-35
+; AVX: vpshufd $-40
+; AVX: vpadd
+; AVX: vpsrld $31
+; AVX: vpsrad $2
+; AVX: vpadd
+}
+
+define <8 x i32> @test9(<8 x i32> %a) {
+ %div = sdiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %div
+
+; AVX-LABEL: test9:
+; AVX: vpalignr $4
+; AVX: vpbroadcastd
+; AVX: vpmuldq
+; AVX: vpmuldq
+; AVX: vpblendd $170
+; AVX: vpadd
+; AVX: vpsrld $31
+; AVX: vpsrad $2
+; AVX: vpadd
+}
+
+define <8 x i32> @test10(<8 x i32> %a) {
+ %rem = urem <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %rem
+
+; AVX-LABEL: test10:
+; AVX: vpbroadcastd
+; AVX: vpalignr $4
+; AVX: vpmuludq
+; AVX: vpmuludq
+; AVX: vpblendd $170
+; AVX: vpsubd
+; AVX: vpsrld $1
+; AVX: vpadd
+; AVX: vpsrld $2
+; AVX: vpmulld
+}
+
+define <8 x i32> @test11(<8 x i32> %a) {
+ %rem = srem <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %rem
+
+; AVX-LABEL: test11:
+; AVX: vpalignr $4
+; AVX: vpbroadcastd
+; AVX: vpmuldq
+; AVX: vpmuldq
+; AVX: vpblendd $170
+; AVX: vpadd
+; AVX: vpsrld $31
+; AVX: vpsrad $2
+; AVX: vpadd
+; AVX: vpmulld
+}
+
+define <2 x i16> @test12() {
+ %I8 = insertelement <2 x i16> zeroinitializer, i16 -1, i32 0
+ %I9 = insertelement <2 x i16> %I8, i16 -1, i32 1
+ %B9 = urem <2 x i16> %I9, %I9
+ ret <2 x i16> %B9
+
+; AVX-LABEL: test12:
+; AVX: xorps
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v16.ll b/test/CodeGen/X86/vector-shuffle-128-v16.ll
new file mode 100644
index 000000000000..4da7e42caabf
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v16.ll
@@ -0,0 +1,196 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,5,5,5]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,6,6,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: punpcklwd %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 5, i32 5, i32 5, i32 5, i32 6, i32 6, i32 6, i32 6, i32 7, i32 7, i32 7, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,6,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_0101010101010101(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_0101010101010101
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23
+; CHECK-SSE2: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 16, i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT: punpckhbw %xmm1, %xmm2
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: packuswb %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20
+; CHECK-SSE2: pxor %xmm2, %xmm2
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: packuswb %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20
+; CHECK-SSE2: pxor %xmm2, %xmm2
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm3
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm3
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm4
+; CHECK-SSE2-NEXT: punpckhbw %xmm2, %xmm4
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: shufpd {{.*}} # xmm4 = xmm4[0],xmm3[1]
+; CHECK-SSE2-NEXT: punpckhbw %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
+; CHECK-SSE2-NEXT: packuswb %xmm4, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 31, i32 30, i32 29, i32 28, i32 11, i32 10, i32 9, i32 8, i32 23, i32 22, i32 21, i32 20>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @zext_to_v8i16_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @zext_to_v8i16_shuffle
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 1, i32 19, i32 2, i32 21, i32 3, i32 23, i32 4, i32 25, i32 5, i32 27, i32 6, i32 29, i32 7, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @zext_to_v4i32_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @zext_to_v4i32_shuffle
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @trunc_v4i32_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @trunc_v4i32_shuffle
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pand
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: packuswb %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <16 x i8> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll
new file mode 100644
index 000000000000..78b4ee7e5dd0
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll
@@ -0,0 +1,219 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <2 x i64> @shuffle_v2i64_00(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_00
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_10(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_10
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_11(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_11
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_22(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_22
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_32(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_32
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_33(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_33
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 3>
+ ret <2 x i64> %shuffle
+}
+
+define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_00
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_10
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 0>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_11(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_11
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 1>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_22(<2 x double> %a, <2 x double> %b) {
+; FIXME: Should these use movapd + shufpd to remove a domain change at the cost
+; of a mov?
+;
+; CHECK-SSE2-LABEL: @shuffle_v2f64_22
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 2>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_32(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_32
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 2>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_33(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_33
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 3>
+ ret <2 x double> %shuffle
+}
+
+
+define <2 x i64> @shuffle_v2i64_02(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_02
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_02_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_02_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm2[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_03
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_03_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm2[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_12
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_12_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_12_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm2[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_13(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_13
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_13_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_13_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm2[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_20(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_20
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_20_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_20_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[0],xmm1[0]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_21
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_21_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[0],xmm1[1]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_30(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_30
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_30_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_30_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[1],xmm1[0]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_31(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_31
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm0[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_31_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_31_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[1],xmm1[1]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
+ ret <2 x i64> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v4.ll b/test/CodeGen/X86/vector-shuffle-128-v4.ll
new file mode 100644
index 000000000000..7d496fa19f15
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -0,0 +1,170 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i32> @shuffle_v4i32_0001(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0001
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,0,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0020(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0020
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,0,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0300(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0300
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,3,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_1000(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_1000
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[1,0,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_2200(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_2200
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,2,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_3330(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_3330
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[3,3,3,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_3210(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_3210
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %shuffle
+}
+
+define <4 x float> @shuffle_v4f32_0001(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0001
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,0,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_0020(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0020
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,0,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_0300(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0300
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,3,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_1000(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_1000
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[1,0,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_2200(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_2200
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[2,2,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_3330(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_3330
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[3,3,3,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_3210(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_3210
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x float> %shuffle
+}
+
+define <4 x i32> @shuffle_v4i32_0124(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0124
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[2,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0142(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0142
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[2,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0412(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0412
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[0,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm1 = xmm1[2,0],xmm0[1,2]
+; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4012(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4012
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[0,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm1 = xmm1[0,2],xmm0[1,2]
+; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0145(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0145
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0451(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0451
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,1]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2,3,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4501(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4501
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4015(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4015
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,1]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[2,0,1,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5>
+ ret <4 x i32> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v8.ll b/test/CodeGen/X86/vector-shuffle-128-v8.ll
new file mode 100644
index 000000000000..5d1922a34837
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v8.ll
@@ -0,0 +1,493 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <8 x i16> @shuffle_v8i16_01012323(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_01012323
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,0,1,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_67452301(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_67452301
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_456789AB(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_456789AB
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_00000000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00000000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_00004444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00004444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_31206745(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_31206745
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,1,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,3,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 6, i32 7, i32 4, i32 5>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44440000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44440000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_75643120(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_75643120
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,1,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,6,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 7, i32 5, i32 6, i32 4, i32 3, i32 1, i32 2, i32 0>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_10545410(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_10545410
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,0,3,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,4,7,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 5, i32 4, i32 5, i32 4, i32 1, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_54105410(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_54105410
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,4,7,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 4, i32 1, i32 0, i32 5, i32 4, i32 1, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_54101054(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_54101054
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 4, i32 1, i32 0, i32 1, i32 0, i32 5, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04400440(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04400440
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,4,4,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 0, i32 4, i32 4, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_40044004(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_40044004
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,0,0,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 0, i32 0, i32 4, i32 4, i32 0, i32 0, i32 4>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_26405173(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_26405173
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,3,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,6,4,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 6, i32 4, i32 0, i32 5, i32 1, i32 7, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_20645173(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_20645173
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,0,3,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,6,4,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 0, i32 6, i32 4, i32 5, i32 1, i32 7, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_26401375(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_26401375
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,1,2]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,3,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 6, i32 4, i32 0, i32 1, i32 3, i32 7, i32 5>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_00444444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00444444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44004444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44004444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,2,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04404444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04404444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04400000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04400000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04404567(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04404567
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0X444444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0X444444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 undef, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44X04444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44X04444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 undef, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_X4404444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_X4404444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 4, i32 4, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0127XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0127XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_XXXX4563(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXX4563
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 5, i32 6, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_4563XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_4563XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,0,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_01274563(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_01274563
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,5,4,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,1,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_45630127(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_45630127
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,1,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,0,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,7,5,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 0, i32 1, i32 2, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_08192a3b(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_08192a3b
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0c1d2e3f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0c1d2e3f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 12, i32 1, i32 13, i32 2, i32 14, i32 3, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_4c5d6e7f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_4c5d6e7f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_48596a7b(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_48596a7b
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 8, i32 5, i32 9, i32 6, i32 10, i32 7, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_08196e7f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_08196e7f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[0,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0c1d6879(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0c1d6879
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,0,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 12, i32 1, i32 13, i32 6, i32 8, i32 7, i32 9>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_109832ba(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_109832ba
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm0[2,0,3,1,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,0,3,1,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 9, i32 8, i32 3, i32 2, i32 11, i32 10>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_8091a2b3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_8091a2b3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_c4d5e6f7(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_c4d5e6f7
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm2 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0213cedf(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0213cedf
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 12, i32 14, i32 13, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_032dXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_032dXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,1,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 3, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_XXXcXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXcXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,1,2,1,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_012dXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_012dXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,2,0,3,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_XXXXcde3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXXcde3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_cde3XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_cde3XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 12, i32 13, i32 14, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_012dcde3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_012dcde3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm2 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm3 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpckhwd %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[0,2,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm3, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,2,0,3,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 12, i32 13, i32 14, i32 3>
+ ret <8 x i16> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll
new file mode 100644
index 000000000000..e60ecb70dec6
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -0,0 +1,119 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8)
+declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8)
+declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8)
+
+define <4 x i32> @combine_pshufd1(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 27)
+ ret <4 x i32> %c
+}
+
+define <4 x i32> @combine_pshufd2(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd2
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 -28)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd3(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 -28)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd4(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd4
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -31)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 27)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -31)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd5(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd5
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -76)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 27)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -76)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd6(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd6
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd $0
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 0)
+ %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 8)
+ ret <4 x i32> %c
+}
+
+define <8 x i16> @combine_pshuflw1(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
+ ret <8 x i16> %c
+}
+
+define <8 x i16> @combine_pshuflw2(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw2
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 -28)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
+define <8 x i16> @combine_pshuflw3(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 27)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
+define <8 x i16> @combine_pshufhw1(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufhw1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
diff --git a/test/CodeGen/X86/viabs.ll b/test/CodeGen/X86/viabs.ll
index 0be00da83fdf..d9f2cb074759 100644
--- a/test/CodeGen/X86/viabs.ll
+++ b/test/CodeGen/X86/viabs.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -march=x86-64 -mcpu=x86-64 | FileCheck %s -check-prefix=SSE2
; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s -check-prefix=SSSE3
; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=AVX2
+; RUN: llc < %s -march=x86-64 -mcpu=knl | FileCheck %s -check-prefix=AVX512
define <4 x i32> @test1(<4 x i32> %a) nounwind {
; SSE2-LABEL: test1:
@@ -17,6 +18,10 @@ define <4 x i32> @test1(<4 x i32> %a) nounwind {
; AVX2-LABEL: test1:
; AVX2: vpabsd
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test1:
+; AVX512: vpabsd
+; AVX512-NEXT: ret
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
%abs = select <4 x i1> %b, <4 x i32> %a, <4 x i32> %tmp1neg
@@ -38,6 +43,10 @@ define <4 x i32> @test2(<4 x i32> %a) nounwind {
; AVX2-LABEL: test2:
; AVX2: vpabsd
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test2:
+; AVX512: vpabsd
+; AVX512-NEXT: ret
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sge <4 x i32> %a, zeroinitializer
%abs = select <4 x i1> %b, <4 x i32> %a, <4 x i32> %tmp1neg
@@ -59,6 +68,10 @@ define <8 x i16> @test3(<8 x i16> %a) nounwind {
; AVX2-LABEL: test3:
; AVX2: vpabsw
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test3:
+; AVX512: vpabsw
+; AVX512-NEXT: ret
%tmp1neg = sub <8 x i16> zeroinitializer, %a
%b = icmp sgt <8 x i16> %a, zeroinitializer
%abs = select <8 x i1> %b, <8 x i16> %a, <8 x i16> %tmp1neg
@@ -80,6 +93,10 @@ define <16 x i8> @test4(<16 x i8> %a) nounwind {
; AVX2-LABEL: test4:
; AVX2: vpabsb
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test4:
+; AVX512: vpabsb
+; AVX512-NEXT: ret
%tmp1neg = sub <16 x i8> zeroinitializer, %a
%b = icmp slt <16 x i8> %a, zeroinitializer
%abs = select <16 x i1> %b, <16 x i8> %tmp1neg, <16 x i8> %a
@@ -101,6 +118,10 @@ define <4 x i32> @test5(<4 x i32> %a) nounwind {
; AVX2-LABEL: test5:
; AVX2: vpabsd
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test5:
+; AVX512: vpabsd
+; AVX512-NEXT: ret
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sle <4 x i32> %a, zeroinitializer
%abs = select <4 x i1> %b, <4 x i32> %tmp1neg, <4 x i32> %a
@@ -116,6 +137,10 @@ define <8 x i32> @test6(<8 x i32> %a) nounwind {
; AVX2-LABEL: test6:
; AVX2: vpabsd {{.*}}%ymm
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test6:
+; AVX512: vpabsd {{.*}}%ymm
+; AVX512-NEXT: ret
%tmp1neg = sub <8 x i32> zeroinitializer, %a
%b = icmp sgt <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%abs = select <8 x i1> %b, <8 x i32> %a, <8 x i32> %tmp1neg
@@ -131,6 +156,10 @@ define <8 x i32> @test7(<8 x i32> %a) nounwind {
; AVX2-LABEL: test7:
; AVX2: vpabsd {{.*}}%ymm
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test7:
+; AVX512: vpabsd {{.*}}%ymm
+; AVX512-NEXT: ret
%tmp1neg = sub <8 x i32> zeroinitializer, %a
%b = icmp sge <8 x i32> %a, zeroinitializer
%abs = select <8 x i1> %b, <8 x i32> %a, <8 x i32> %tmp1neg
@@ -146,6 +175,10 @@ define <16 x i16> @test8(<16 x i16> %a) nounwind {
; AVX2-LABEL: test8:
; AVX2: vpabsw {{.*}}%ymm
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test8:
+; AVX512: vpabsw {{.*}}%ymm
+; AVX512-NEXT: ret
%tmp1neg = sub <16 x i16> zeroinitializer, %a
%b = icmp sgt <16 x i16> %a, zeroinitializer
%abs = select <16 x i1> %b, <16 x i16> %a, <16 x i16> %tmp1neg
@@ -161,6 +194,10 @@ define <32 x i8> @test9(<32 x i8> %a) nounwind {
; AVX2-LABEL: test9:
; AVX2: vpabsb {{.*}}%ymm
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test9:
+; AVX512: vpabsb {{.*}}%ymm
+; AVX512-NEXT: ret
%tmp1neg = sub <32 x i8> zeroinitializer, %a
%b = icmp slt <32 x i8> %a, zeroinitializer
%abs = select <32 x i1> %b, <32 x i8> %tmp1neg, <32 x i8> %a
@@ -176,8 +213,58 @@ define <8 x i32> @test10(<8 x i32> %a) nounwind {
; AVX2-LABEL: test10:
; AVX2: vpabsd {{.*}}%ymm
; AVX2-NEXT: ret
+
+; AVX512-LABEL: test10:
+; AVX512: vpabsd {{.*}}%ymm
+; AVX512-NEXT: ret
%tmp1neg = sub <8 x i32> zeroinitializer, %a
%b = icmp sle <8 x i32> %a, zeroinitializer
%abs = select <8 x i1> %b, <8 x i32> %tmp1neg, <8 x i32> %a
ret <8 x i32> %abs
}
+
+define <16 x i32> @test11(<16 x i32> %a) nounwind {
+; AVX2-LABEL: test11:
+; AVX2: vpabsd
+; AVX2: vpabsd
+; AVX2-NEXT: ret
+
+; AVX512-LABEL: test11:
+; AVX512: vpabsd {{.*}}%zmm
+; AVX512-NEXT: ret
+ %tmp1neg = sub <16 x i32> zeroinitializer, %a
+ %b = icmp sle <16 x i32> %a, zeroinitializer
+ %abs = select <16 x i1> %b, <16 x i32> %tmp1neg, <16 x i32> %a
+ ret <16 x i32> %abs
+}
+
+define <8 x i64> @test12(<8 x i64> %a) nounwind {
+; AVX2-LABEL: test12:
+; AVX2: vpxor
+; AVX2: vpxor
+; AVX2-NEXT: ret
+
+; AVX512-LABEL: test12:
+; AVX512: vpabsq {{.*}}%zmm
+; AVX512-NEXT: ret
+ %tmp1neg = sub <8 x i64> zeroinitializer, %a
+ %b = icmp sle <8 x i64> %a, zeroinitializer
+ %abs = select <8 x i1> %b, <8 x i64> %tmp1neg, <8 x i64> %a
+ ret <8 x i64> %abs
+}
+
+define <8 x i64> @test13(<8 x i64>* %a.ptr) nounwind {
+; AVX2-LABEL: test13:
+; AVX2: vpxor
+; AVX2: vpxor
+; AVX2-NEXT: ret
+
+; AVX512-LABEL: test13:
+; AVX512: vpabsq (%
+; AVX512-NEXT: ret
+ %a = load <8 x i64>* %a.ptr, align 8
+ %tmp1neg = sub <8 x i64> zeroinitializer, %a
+ %b = icmp sle <8 x i64> %a, zeroinitializer
+ %abs = select <8 x i1> %b, <8 x i64> %tmp1neg, <8 x i64> %a
+ ret <8 x i64> %abs
+}
diff --git a/test/CodeGen/X86/vselect-2.ll b/test/CodeGen/X86/vselect-2.ll
new file mode 100644
index 000000000000..50da32c67a3b
--- /dev/null
+++ b/test/CodeGen/X86/vselect-2.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=sse2 | FileCheck %s
+
+define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) {
+ %select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x i32> %A, <4 x i32> %B
+ ret <4 x i32> %select
+}
+; CHECK-LABEL: test1
+; CHECK: movsd
+; CHECK: ret
+
+define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
+ %select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x i32> %A, <4 x i32> %B
+ ret <4 x i32> %select
+}
+; CHECK-LABEL: test2
+; CHECK: movsd
+; CHECK-NEXT: ret
+
+define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
+ %select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x float> %A, <4 x float> %B
+ ret <4 x float> %select
+}
+; CHECK-LABEL: test3
+; CHECK: movsd
+; CHECK: ret
+
+define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
+ %select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x float> %A, <4 x float> %B
+ ret <4 x float> %select
+}
+; CHECK-LABEL: test4
+; CHECK: movsd
+; CHECK-NEXT: ret
diff --git a/test/CodeGen/X86/vselect.ll b/test/CodeGen/X86/vselect.ll
new file mode 100644
index 000000000000..42cf06a4a049
--- /dev/null
+++ b/test/CodeGen/X86/vselect.ll
@@ -0,0 +1,278 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=corei7 -mattr=-sse4.1 < %s | FileCheck %s
+
+; Verify that we don't emit packed vector shifts instructions if the
+; condition used by the vector select is a vector of constants.
+
+
+define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test1
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+
+define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test2
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+
+define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+
+define <4 x float> @test4(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test4
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: movaps %xmm1, %xmm0
+; CHECK: ret
+
+
+define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test5
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+
+define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x i16> %a, <8 x i16> %a
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test6
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+
+define <8 x i16> @test7(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test7
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+
+define <8 x i16> @test8(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test8
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test9
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
+
+define <8 x i16> @test10(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 false, i1 true, i1 true, i1 false, i1 undef, i1 true, i1 true, i1 undef>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test11
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 false, i1 false, i1 undef, i1 false, i1 false, i1 false, i1 false, i1 undef>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test12
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+define <8 x i16> @test13(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK: ret
+
+; Fold (vselect (build_vector AllOnes), N1, N2) -> N1
+
+define <4 x float> @test14(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 true, i1 undef, i1 true, i1 undef>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: pcmpeq
+; CHECK: ret
+
+define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 true, i1 true, i1 undef>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test15
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: pcmpeq
+; CHECK: ret
+
+; Fold (vselect (build_vector AllZeros), N1, N2) -> N2
+
+define <4 x float> @test16(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 false, i1 undef, i1 false, i1 undef>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: ret
+
+define <8 x i16> @test17(<8 x i16> %a, <8 x i16> %b) {
+ %1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 undef, i1 undef, i1 false, i1 false, i1 undef>, <8 x i16> %a, <8 x i16> %b
+ ret <8 x i16> %1
+}
+; CHECK-LABEL: test17
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: ret
+
+define <4 x float> @test18(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test18
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK: ret
+
+define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
+ %1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> %a, <4 x i32> %b
+ ret <4 x i32> %1
+}
+; CHECK-LABEL: test19
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK: ret
+
+define <2 x double> @test20(<2 x double> %a, <2 x double> %b) {
+ %1 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %b
+ ret <2 x double> %1
+}
+; CHECK-LABEL: test20
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK: ret
+
+define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
+ %1 = select <2 x i1> <i1 false, i1 true>, <2 x i64> %a, <2 x i64> %b
+ ret <2 x i64> %1
+}
+; CHECK-LABEL: test21
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK: ret
+
+define <4 x float> @test22(<4 x float> %a, <4 x float> %b) {
+ %1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %1
+}
+; CHECK-LABEL: test22
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK: ret
+
+define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
+ %1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %a, <4 x i32> %b
+ ret <4 x i32> %1
+}
+; CHECK-LABEL: test23
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK: ret
+
+define <2 x double> @test24(<2 x double> %a, <2 x double> %b) {
+ %1 = select <2 x i1> <i1 true, i1 false>, <2 x double> %a, <2 x double> %b
+ ret <2 x double> %1
+}
+; CHECK-LABEL: test24
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK: ret
+
+define <2 x i64> @test25(<2 x i64> %a, <2 x i64> %b) {
+ %1 = select <2 x i1> <i1 true, i1 false>, <2 x i64> %a, <2 x i64> %b
+ ret <2 x i64> %1
+}
+; CHECK-LABEL: test25
+; CHECK-NOT: psllw
+; CHECK-NOT: psraw
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK: ret
+
+define <4 x float> @select_of_shuffles_0(<2 x float> %a0, <2 x float> %b0, <2 x float> %a1, <2 x float> %b1) {
+; CHECK-LABEL: select_of_shuffles_0
+; CHECK-DAG: movlhps %xmm2, [[REGA:%xmm[0-9]+]]
+; CHECK-DAG: movlhps %xmm3, [[REGB:%xmm[0-9]+]]
+; CHECK: subps [[REGB]], [[REGA]]
+ %1 = shufflevector <2 x float> %a0, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %2 = shufflevector <2 x float> %a1, <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ %3 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %2, <4 x float> %1
+ %4 = shufflevector <2 x float> %b0, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %5 = shufflevector <2 x float> %b1, <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ %6 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %5, <4 x float> %4
+ %7 = fsub <4 x float> %3, %6
+ ret <4 x float> %7
+}
diff --git a/test/CodeGen/X86/vshift-6.ll b/test/CodeGen/X86/vshift-6.ll
new file mode 100644
index 000000000000..f50d9a6bb124
--- /dev/null
+++ b/test/CodeGen/X86/vshift-6.ll
@@ -0,0 +1,36 @@
+; RUN: llc < %s -mcpu=corei7 -march=x86-64 -mattr=+sse2 | FileCheck %s
+
+; This test makes sure that the compiler does not crash with an
+; assertion failure when trying to fold a vector shift left
+; by immediate count if the type of the input vector is different
+; to the result type.
+;
+; This happens for example when lowering a shift left of a MVT::v16i8 vector.
+; This is custom lowered into the following sequence:
+; count << 5
+; A = VSHLI(MVT::v8i16, r & (char16)15, 4)
+; B = BITCAST MVT::v16i8, A
+; VSELECT(r, B, count);
+; count += count
+; C = VSHLI(MVT::v8i16, r & (char16)63, 2)
+; D = BITCAST MVT::v16i8, C
+; r = VSELECT(r, C, count);
+; count += count
+; VSELECT(r, r+r, count);
+; count = count << 5;
+;
+; Where 'r' is a vector of type MVT::v16i8, and
+; 'count' is the vector shift count.
+
+define <16 x i8> @do_not_crash(i8*, i32*, i64*, i32, i64, i8) {
+entry:
+ store i8 %5, i8* %0
+ %L5 = load i8* %0
+ %I8 = insertelement <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i8 %L5, i32 7
+ %B51 = shl <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, %I8
+ ret <16 x i8> %B51
+}
+
+; CHECK-LABEL: do_not_crash
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/warn-stack.ll b/test/CodeGen/X86/warn-stack.ll
index 5979f45b07d8..a76fd2801a1f 100644
--- a/test/CodeGen/X86/warn-stack.ll
+++ b/test/CodeGen/X86/warn-stack.ll
@@ -12,7 +12,7 @@ entry:
ret void
}
-; CHECK: warning: Stack size limit exceeded (104) in warn.
+; CHECK: warning: stack size limit exceeded (104) in warn
define void @warn() nounwind ssp {
entry:
%buffer = alloca [80 x i8], align 1
diff --git a/test/CodeGen/X86/weak_def_can_be_hidden.ll b/test/CodeGen/X86/weak_def_can_be_hidden.ll
index 22aa135e65e0..b17f372afed8 100644
--- a/test/CodeGen/X86/weak_def_can_be_hidden.ll
+++ b/test/CodeGen/X86/weak_def_can_be_hidden.ll
@@ -4,7 +4,7 @@
; RUN: llc -mtriple=i686-apple-darwin9 -O0 < %s | FileCheck --check-prefix=CHECK-D89 %s
; RUN: llc -mtriple=i686-apple-darwin8 -O0 < %s | FileCheck --check-prefix=CHECK-D89 %s
-@v1 = linkonce_odr global i32 32
+@v1 = linkonce_odr constant i32 32
; CHECK: .globl _v1
; CHECK: .weak_def_can_be_hidden _v1
@@ -16,13 +16,17 @@ define i32 @f1() {
ret i32 %x
}
-@v2 = linkonce_odr global i32 32
+@v2 = linkonce_odr constant i32 32
; CHECK: .globl _v2
; CHECK: .weak_definition _v2
; CHECK-D89: .globl _v2
; CHECK-D89: .weak_definition _v2
+define i32* @f2() {
+ ret i32* @v2
+}
+
@v3 = linkonce_odr unnamed_addr global i32 32
; CHECK: .globl _v3
; CHECK: .weak_def_can_be_hidden _v3
@@ -30,10 +34,18 @@ define i32 @f1() {
; CHECK-D89: .globl _v3
; CHECK-D89: .weak_definition _v3
-define i32* @f2() {
- ret i32* @v2
-}
-
define i32* @f3() {
ret i32* @v3
}
+
+@v4 = linkonce_odr global i32 32
+; CHECK: .globl _v4
+; CHECK: .weak_definition _v4
+
+; CHECK-D89: .globl _v4
+; CHECK-D89: .weak_definition _v4
+
+define i32 @f4() {
+ %x = load i32 * @v4
+ ret i32 %x
+}
diff --git a/test/CodeGen/X86/widen_cast-4.ll b/test/CodeGen/X86/widen_cast-4.ll
index 1bc06a77cbf7..19b84f19a4ff 100644
--- a/test/CodeGen/X86/widen_cast-4.ll
+++ b/test/CodeGen/X86/widen_cast-4.ll
@@ -1,8 +1,9 @@
; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s
-; CHECK: psraw
-; CHECK: psraw
+; RUN: llc < %s -march=x86 -mattr=+sse4.2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
+; CHECK-LABEL: update:
+; CHECK-WIDE-LABEL: update:
entry:
%dst_i.addr = alloca i64* ; <i64**> [#uses=2]
%src_i.addr = alloca i64* ; <i64**> [#uses=2]
@@ -44,6 +45,26 @@ forbody: ; preds = %forcond
%shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 > ; <<8 x i8>> [#uses=1]
store <8 x i8> %shr, <8 x i8>* %arrayidx10
br label %forinc
+; CHECK: %forbody
+; CHECK: pmovzxbw
+; CHECK-NEXT: paddw
+; CHECK-NEXT: psllw $8
+; CHECK-NEXT: psraw $8
+; CHECK-NEXT: psraw $2
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: movlpd
+;
+; FIXME: We shouldn't require both a movd and an insert.
+; CHECK-WIDE: %forbody
+; CHECK-WIDE: movd
+; CHECK-WIDE-NEXT: pinsrd
+; CHECK-WIDE-NEXT: paddb
+; CHECK-WIDE-NEXT: psrlw $2
+; CHECK-WIDE-NEXT: pand
+; CHECK-WIDE-NEXT: pxor
+; CHECK-WIDE-NEXT: psubb
+; CHECK-WIDE-NEXT: pextrd
+; CHECK-WIDE-NEXT: movd
forinc: ; preds = %forbody
%tmp15 = load i32* %i ; <i32> [#uses=1]
diff --git a/test/CodeGen/X86/widen_cast-6.ll b/test/CodeGen/X86/widen_cast-6.ll
index 7c06ad8ca664..46d8dd787a3b 100644
--- a/test/CodeGen/X86/widen_cast-6.ll
+++ b/test/CodeGen/X86/widen_cast-6.ll
@@ -1,9 +1,13 @@
; RUN: llc < %s -march=x86 -mattr=+sse4.1 | FileCheck %s
-; CHECK: movd
; Test bit convert that requires widening in the operand.
define i32 @return_v2hi() nounwind {
+; CHECK-LABEL: @return_v2hi
+; CHECK: pushl
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popl
+; CHECK-NEXT: ret
entry:
%retval12 = bitcast <2 x i16> zeroinitializer to i32 ; <i32> [#uses=1]
ret i32 %retval12
diff --git a/test/CodeGen/X86/widen_conversions.ll b/test/CodeGen/X86/widen_conversions.ll
new file mode 100644
index 000000000000..522ab475c2a0
--- /dev/null
+++ b/test/CodeGen/X86/widen_conversions.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mcpu=x86-64 -x86-experimental-vector-widening-legalization -x86-experimental-vector-shuffle-lowering | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i32> @zext_v4i8_to_v4i32(<4 x i8>* %ptr) {
+; CHECK-LABEL: zext_v4i8_to_v4i32:
+;
+; CHECK: movd (%{{.*}}), %[[X:xmm[0-9]+]]
+; CHECK-NEXT: pxor %[[Z:xmm[0-9]+]], %[[Z]]
+; CHECK-NEXT: punpcklbw %[[Z]], %[[X]]
+; CHECK-NEXT: punpcklbw %[[Z]], %[[X]]
+; CHECK-NEXT: ret
+
+ %val = load <4 x i8>* %ptr
+ %ext = zext <4 x i8> %val to <4 x i32>
+ ret <4 x i32> %ext
+}
diff --git a/test/CodeGen/X86/widen_load-2.ll b/test/CodeGen/X86/widen_load-2.ll
index 26815a422ec8..41bea859f474 100644
--- a/test/CodeGen/X86/widen_load-2.ll
+++ b/test/CodeGen/X86/widen_load-2.ll
@@ -149,9 +149,9 @@ define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp
; CHECK: movdqa
; CHECK: paddb
; CHECK: paddb
-; CHECK: movq
; CHECK: pextrb
; CHECK: pextrw
+; CHECK: movq
; CHECK: ret
%a = load %i8vec31* %ap, align 16
%b = load %i8vec31* %bp, align 16
diff --git a/test/CodeGen/X86/widen_shuffle-1.ll b/test/CodeGen/X86/widen_shuffle-1.ll
index 803402b1f1f4..a355b75fafcf 100644
--- a/test/CodeGen/X86/widen_shuffle-1.ll
+++ b/test/CodeGen/X86/widen_shuffle-1.ll
@@ -33,7 +33,9 @@ entry:
define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
entry:
; CHECK-LABEL: shuf3:
-; CHECK: shufps
+; CHECK-NOT: movlhps
+; CHECK-NOT: shufps
+; CHECK: pshufd
%shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
%tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
%tmp1.i.i = shufflevector <3 x float> %tmp25.i.i, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/test/CodeGen/X86/win32_sret.ll b/test/CodeGen/X86/win32_sret.ll
index a24963a3f34e..8728712cece4 100644
--- a/test/CodeGen/X86/win32_sret.ll
+++ b/test/CodeGen/X86/win32_sret.ll
@@ -1,11 +1,13 @@
; We specify -mcpu explicitly to avoid instruction reordering that happens on
; some setups (e.g., Atom) from affecting the output.
; RUN: llc < %s -mcpu=core2 -mtriple=i686-pc-win32 | FileCheck %s -check-prefix=WIN32
-; RUN: llc < %s -mtriple=i686-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X86
-; RUN: llc < %s -mtriple=i386-pc-linux | FileCheck %s -check-prefix=LINUX
+; RUN: llc < %s -mcpu=core2 -mtriple=i686-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X86
+; RUN: llc < %s -mcpu=core2 -mtriple=i686-pc-cygwin | FileCheck %s -check-prefix=CYGWIN
+; RUN: llc < %s -mcpu=core2 -mtriple=i386-pc-linux | FileCheck %s -check-prefix=LINUX
; RUN: llc < %s -mcpu=core2 -O0 -mtriple=i686-pc-win32 | FileCheck %s -check-prefix=WIN32
-; RUN: llc < %s -O0 -mtriple=i686-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X86
-; RUN: llc < %s -O0 -mtriple=i386-pc-linux | FileCheck %s -check-prefix=LINUX
+; RUN: llc < %s -mcpu=core2 -O0 -mtriple=i686-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X86
+; RUN: llc < %s -mcpu=core2 -O0 -mtriple=i686-pc-cygwin | FileCheck %s -check-prefix=CYGWIN
+; RUN: llc < %s -mcpu=core2 -O0 -mtriple=i386-pc-linux | FileCheck %s -check-prefix=LINUX
; The SysV ABI used by most Unixes and Mingw on x86 specifies that an sret pointer
; is callee-cleanup. However, in MSVC's cdecl calling convention, sret pointer
@@ -13,16 +15,19 @@
define void @sret1(i8* sret %x) nounwind {
entry:
-; WIN32: sret1
+; WIN32-LABEL: _sret1:
; WIN32: movb $42, (%eax)
; WIN32-NOT: popl %eax
-; WIN32: {{ret$}}
+; WIN32: {{retl$}}
-; MINGW_X86: sret1
-; MINGW_X86: ret $4
+; MINGW_X86-LABEL: _sret1:
+; MINGW_X86: {{retl$}}
-; LINUX: sret1
-; LINUX: ret $4
+; CYGWIN-LABEL: _sret1:
+; CYGWIN: retl $4
+
+; LINUX-LABEL: sret1:
+; LINUX: retl $4
store i8 42, i8* %x, align 4
ret void
@@ -30,16 +35,19 @@ entry:
define void @sret2(i8* sret %x, i8 %y) nounwind {
entry:
-; WIN32: sret2
+; WIN32-LABEL: _sret2:
; WIN32: movb {{.*}}, (%eax)
; WIN32-NOT: popl %eax
-; WIN32: {{ret$}}
+; WIN32: {{retl$}}
+
+; MINGW_X86-LABEL: _sret2:
+; MINGW_X86: {{retl$}}
-; MINGW_X86: sret2
-; MINGW_X86: ret $4
+; CYGWIN-LABEL: _sret2:
+; CYGWIN: retl $4
-; LINUX: sret2
-; LINUX: ret $4
+; LINUX-LABEL: sret2:
+; LINUX: retl $4
store i8 %y, i8* %x
ret void
@@ -47,17 +55,20 @@ entry:
define void @sret3(i8* sret %x, i8* %y) nounwind {
entry:
-; WIN32: sret3
+; WIN32-LABEL: _sret3:
; WIN32: movb $42, (%eax)
; WIN32-NOT: movb $13, (%eax)
; WIN32-NOT: popl %eax
-; WIN32: {{ret$}}
+; WIN32: {{retl$}}
-; MINGW_X86: sret3
-; MINGW_X86: ret $4
+; MINGW_X86-LABEL: _sret3:
+; MINGW_X86: {{retl$}}
-; LINUX: sret3
-; LINUX: ret $4
+; CYGWIN-LABEL: _sret3:
+; CYGWIN: retl $4
+
+; LINUX-LABEL: sret3:
+; LINUX: retl $4
store i8 42, i8* %x
store i8 13, i8* %y
@@ -69,16 +80,19 @@ entry:
define void @sret4(%struct.S4* noalias sret %agg.result) {
entry:
-; WIN32: sret4
+; WIN32-LABEL: _sret4:
; WIN32: movl $42, (%eax)
; WIN32-NOT: popl %eax
-; WIN32: {{ret$}}
+; WIN32: {{retl$}}
+
+; MINGW_X86-LABEL: _sret4:
+; MINGW_X86: {{retl$}}
-; MINGW_X86: sret4
-; MINGW_X86: ret $4
+; CYGWIN-LABEL: _sret4:
+; CYGWIN: retl $4
-; LINUX: sret4
-; LINUX: ret $4
+; LINUX-LABEL: sret4:
+; LINUX: retl $4
%x = getelementptr inbounds %struct.S4* %agg.result, i32 0, i32 0
store i32 42, i32* %x, align 4
@@ -96,14 +110,17 @@ entry:
%x = getelementptr inbounds %struct.S5* %agg.result, i32 0, i32 0
store i32 42, i32* %x, align 4
ret void
-; WIN32: {{^}}"?foo@C5@@QAE?AUS5@@XZ":
+; WIN32-LABEL: {{^}}"?foo@C5@@QAE?AUS5@@XZ":
+; MINGW_X86-LABEL: {{^}}"?foo@C5@@QAE?AUS5@@XZ":
+; CYGWIN-LABEL: {{^}}"?foo@C5@@QAE?AUS5@@XZ":
+; LINUX-LABEL: {{^}}"?foo@C5@@QAE?AUS5@@XZ":
; The address of the return structure is passed as an implicit parameter.
; In the -O0 build, %eax is spilled at the beginning of the function, hence we
; should match both 4(%esp) and 8(%esp).
; WIN32: {{[48]}}(%esp), %eax
; WIN32: movl $42, (%eax)
-; WIN32: ret $4
+; WIN32: retl $4
}
define void @call_foo5() {
@@ -111,7 +128,11 @@ entry:
%c = alloca %class.C5, align 1
%s = alloca %struct.S5, align 4
call x86_thiscallcc void @"\01?foo@C5@@QAE?AUS5@@XZ"(%struct.S5* sret %s, %class.C5* %c)
-; WIN32: {{^}}_call_foo5:
+; WIN32-LABEL: {{^}}_call_foo5:
+; MINGW_X86-LABEL: {{^}}_call_foo5:
+; CYGWIN-LABEL: {{^}}_call_foo5:
+; LINUX-LABEL: {{^}}call_foo5:
+
; Load the address of the result and put it onto stack
; (through %ecx in the -O0 build).
@@ -121,6 +142,102 @@ entry:
; The this pointer goes to ECX.
; WIN32-NEXT: leal {{[0-9]+}}(%esp), %ecx
; WIN32-NEXT: calll "?foo@C5@@QAE?AUS5@@XZ"
-; WIN32: ret
+; WIN32: retl
ret void
}
+
+
+%struct.test6 = type { i32, i32, i32 }
+define void @test6_f(%struct.test6* %x) nounwind {
+; WIN32-LABEL: _test6_f:
+; MINGW_X86-LABEL: _test6_f:
+; CYGWIN-LABEL: _test6_f:
+; LINUX-LABEL: test6_f:
+
+; The %x argument is moved to %ecx. It will be the this pointer.
+; WIN32: movl 8(%ebp), %ecx
+
+; The %x argument is moved to (%esp). It will be the this pointer. With -O0
+; we copy esp to ecx and use (ecx) instead of (esp).
+; MINGW_X86: movl 8(%ebp), %eax
+; MINGW_X86: movl %eax, (%e{{([a-d]x)|(sp)}})
+
+; CYGWIN: movl 8(%ebp), %eax
+; CYGWIN: movl %eax, (%e{{([a-d]x)|(sp)}})
+
+; The sret pointer is (%esp)
+; WIN32: leal 8(%esp), %[[REG:e[a-d]x]]
+; WIN32-NEXT: movl %[[REG]], (%e{{([a-d]x)|(sp)}})
+
+; The sret pointer is %ecx
+; MINGW_X86-NEXT: leal 8(%esp), %ecx
+; MINGW_X86-NEXT: calll _test6_g
+
+; CYGWIN-NEXT: leal 8(%esp), %ecx
+; CYGWIN-NEXT: calll _test6_g
+
+ %tmp = alloca %struct.test6, align 4
+ call x86_thiscallcc void @test6_g(%struct.test6* sret %tmp, %struct.test6* %x)
+ ret void
+}
+declare x86_thiscallcc void @test6_g(%struct.test6* sret, %struct.test6*)
+
+; Flipping the parameters at the IR level generates the same code.
+%struct.test7 = type { i32, i32, i32 }
+define void @test7_f(%struct.test7* %x) nounwind {
+; WIN32-LABEL: _test7_f:
+; MINGW_X86-LABEL: _test7_f:
+; CYGWIN-LABEL: _test7_f:
+; LINUX-LABEL: test7_f:
+
+; The %x argument is moved to %ecx on all OSs. It will be the this pointer.
+; WIN32: movl 8(%ebp), %ecx
+; MINGW_X86: movl 8(%ebp), %ecx
+; CYGWIN: movl 8(%ebp), %ecx
+
+; The sret pointer is (%esp)
+; WIN32: leal 8(%esp), %[[REG:e[a-d]x]]
+; WIN32-NEXT: movl %[[REG]], (%e{{([a-d]x)|(sp)}})
+; MINGW_X86: leal 8(%esp), %[[REG:e[a-d]x]]
+; MINGW_X86-NEXT: movl %[[REG]], (%e{{([a-d]x)|(sp)}})
+; CYGWIN: leal 8(%esp), %[[REG:e[a-d]x]]
+; CYGWIN-NEXT: movl %[[REG]], (%e{{([a-d]x)|(sp)}})
+
+ %tmp = alloca %struct.test7, align 4
+ call x86_thiscallcc void @test7_g(%struct.test7* %x, %struct.test7* sret %tmp)
+ ret void
+}
+
+define x86_thiscallcc void @test7_g(%struct.test7* %in, %struct.test7* sret %out) {
+ %s = getelementptr %struct.test7* %in, i32 0, i32 0
+ %d = getelementptr %struct.test7* %out, i32 0, i32 0
+ %v = load i32* %s
+ store i32 %v, i32* %d
+ call void @clobber_eax()
+ ret void
+
+; Make sure we return the second parameter in %eax.
+; WIN32-LABEL: _test7_g:
+; WIN32: calll _clobber_eax
+; WIN32: movl {{.*}}, %eax
+; WIN32: retl
+}
+
+declare void @clobber_eax()
+
+; Test what happens if the first parameter has to be split by codegen.
+; Realistically, no frontend will generate code like this, but here it is for
+; completeness.
+define void @test8_f(i64 inreg %a, i64* sret %out) {
+ store i64 %a, i64* %out
+ call void @clobber_eax()
+ ret void
+
+; WIN32-LABEL: _test8_f:
+; WIN32: movl {{[0-9]+}}(%esp), %[[out:[a-z]+]]
+; WIN32-DAG: movl %edx, 4(%[[out]])
+; WIN32-DAG: movl %eax, (%[[out]])
+; WIN32: calll _clobber_eax
+; WIN32: movl {{.*}}, %eax
+; WIN32: retl
+}
diff --git a/test/CodeGen/X86/win64_alloca_dynalloca.ll b/test/CodeGen/X86/win64_alloca_dynalloca.ll
index aff53057a954..a6b6536f906c 100644
--- a/test/CodeGen/X86/win64_alloca_dynalloca.ll
+++ b/test/CodeGen/X86/win64_alloca_dynalloca.ll
@@ -12,11 +12,11 @@ entry:
%buf0 = alloca i8, i64 4096, align 1
-; ___chkstk must adjust %rsp.
+; ___chkstk_ms does not adjust %rsp.
; M64: movq %rsp, %rbp
; M64: $4096, %rax
-; M64: callq ___chkstk
-; M64-NOT: %rsp
+; M64: callq ___chkstk_ms
+; M64: subq %rax, %rsp
; __chkstk does not adjust %rsp.
; W64: movq %rsp, %rbp
diff --git a/test/CodeGen/X86/win64_eh.ll b/test/CodeGen/X86/win64_eh.ll
new file mode 100644
index 000000000000..f1f874eb2f5a
--- /dev/null
+++ b/test/CodeGen/X86/win64_eh.ll
@@ -0,0 +1,170 @@
+; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-win32 | FileCheck %s -check-prefix=WIN64
+; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=WIN64
+
+; Check function without prolog
+define void @foo0() uwtable {
+entry:
+ ret void
+}
+; WIN64-LABEL: foo0:
+; WIN64: .seh_proc foo0
+; WIN64: .seh_endprologue
+; WIN64: ret
+; WIN64: .seh_endproc
+
+; Checks a small stack allocation
+define void @foo1() uwtable {
+entry:
+ %baz = alloca [2000 x i16], align 2
+ ret void
+}
+; WIN64-LABEL: foo1:
+; WIN64: .seh_proc foo1
+; WIN64: subq $4000, %rsp
+; WIN64: .seh_stackalloc 4000
+; WIN64: .seh_endprologue
+; WIN64: addq $4000, %rsp
+; WIN64: ret
+; WIN64: .seh_endproc
+
+; Checks a stack allocation requiring call to __chkstk/___chkstk_ms
+define void @foo2() uwtable {
+entry:
+ %baz = alloca [4000 x i16], align 2
+ ret void
+}
+; WIN64-LABEL: foo2:
+; WIN64: .seh_proc foo2
+; WIN64: movabsq $8000, %rax
+; WIN64: callq {{__chkstk|___chkstk_ms}}
+; WIN64: subq %rax, %rsp
+; WIN64: .seh_stackalloc 8000
+; WIN64: .seh_endprologue
+; WIN64: addq $8000, %rsp
+; WIN64: ret
+; WIN64: .seh_endproc
+
+
+; Checks stack push
+define i32 @foo3(i32 %f_arg, i32 %e_arg, i32 %d_arg, i32 %c_arg, i32 %b_arg, i32 %a_arg) uwtable {
+entry:
+ %a = alloca i32
+ %b = alloca i32
+ %c = alloca i32
+ %d = alloca i32
+ %e = alloca i32
+ %f = alloca i32
+ store i32 %a_arg, i32* %a
+ store i32 %b_arg, i32* %b
+ store i32 %c_arg, i32* %c
+ store i32 %d_arg, i32* %d
+ store i32 %e_arg, i32* %e
+ store i32 %f_arg, i32* %f
+ %tmp = load i32* %a
+ %tmp1 = mul i32 %tmp, 2
+ %tmp2 = load i32* %b
+ %tmp3 = mul i32 %tmp2, 3
+ %tmp4 = add i32 %tmp1, %tmp3
+ %tmp5 = load i32* %c
+ %tmp6 = mul i32 %tmp5, 5
+ %tmp7 = add i32 %tmp4, %tmp6
+ %tmp8 = load i32* %d
+ %tmp9 = mul i32 %tmp8, 7
+ %tmp10 = add i32 %tmp7, %tmp9
+ %tmp11 = load i32* %e
+ %tmp12 = mul i32 %tmp11, 11
+ %tmp13 = add i32 %tmp10, %tmp12
+ %tmp14 = load i32* %f
+ %tmp15 = mul i32 %tmp14, 13
+ %tmp16 = add i32 %tmp13, %tmp15
+ ret i32 %tmp16
+}
+; WIN64-LABEL: foo3:
+; WIN64: .seh_proc foo3
+; WIN64: pushq %rsi
+; WIN64: .seh_pushreg 6
+; WIN64: subq $24, %rsp
+; WIN64: .seh_stackalloc 24
+; WIN64: .seh_endprologue
+; WIN64: addq $24, %rsp
+; WIN64: popq %rsi
+; WIN64: ret
+; WIN64: .seh_endproc
+
+
+; Check emission of eh handler and handler data
+declare i32 @_d_eh_personality(i32, i32, i64, i8*, i8*)
+declare void @_d_eh_resume_unwind(i8*)
+
+declare i32 @bar()
+
+define i32 @foo4() #0 {
+entry:
+ %step = alloca i32, align 4
+ store i32 0, i32* %step
+ %tmp = load i32* %step
+
+ %tmp1 = invoke i32 @bar()
+ to label %finally unwind label %landingpad
+
+finally:
+ store i32 1, i32* %step
+ br label %endtryfinally
+
+landingpad:
+ %landing_pad = landingpad { i8*, i32 } personality i32 (i32, i32, i64, i8*, i8*)* @_d_eh_personality
+ cleanup
+ %tmp3 = extractvalue { i8*, i32 } %landing_pad, 0
+ store i32 2, i32* %step
+ call void @_d_eh_resume_unwind(i8* %tmp3)
+ unreachable
+
+endtryfinally:
+ %tmp10 = load i32* %step
+ ret i32 %tmp10
+}
+; WIN64-LABEL: foo4:
+; WIN64: .seh_proc foo4
+; WIN64: .seh_handler _d_eh_personality, @unwind, @except
+; WIN64: subq $56, %rsp
+; WIN64: .seh_stackalloc 56
+; WIN64: .seh_endprologue
+; WIN64: addq $56, %rsp
+; WIN64: ret
+; WIN64: .seh_handlerdata
+; WIN64: .seh_endproc
+
+
+; Check stack re-alignment and xmm spilling
+define void @foo5() uwtable {
+entry:
+ %s = alloca i32, align 64
+ call void asm sideeffect "", "~{rbx},~{rdi},~{xmm6},~{xmm7}"()
+ ret void
+}
+; WIN64-LABEL: foo5:
+; WIN64: .seh_proc foo5
+; WIN64: pushq %rbp
+; WIN64: .seh_pushreg 5
+; WIN64: movq %rsp, %rbp
+; WIN64: pushq %rdi
+; WIN64: .seh_pushreg 7
+; WIN64: pushq %rbx
+; WIN64: .seh_pushreg 3
+; WIN64: andq $-64, %rsp
+; WIN64: subq $128, %rsp
+; WIN64: .seh_stackalloc 48
+; WIN64: .seh_setframe 5, 64
+; WIN64: movaps %xmm7, -32(%rbp) # 16-byte Spill
+; WIN64: movaps %xmm6, -48(%rbp) # 16-byte Spill
+; WIN64: .seh_savexmm 6, 16
+; WIN64: .seh_savexmm 7, 32
+; WIN64: .seh_endprologue
+; WIN64: movaps -48(%rbp), %xmm6 # 16-byte Reload
+; WIN64: movaps -32(%rbp), %xmm7 # 16-byte Reload
+; WIN64: leaq -16(%rbp), %rsp
+; WIN64: popq %rbx
+; WIN64: popq %rdi
+; WIN64: popq %rbp
+; WIN64: retq
+; WIN64: .seh_endproc
diff --git a/test/CodeGen/X86/win_chkstk.ll b/test/CodeGen/X86/win_chkstk.ll
index 3f522ea5682c..0c02c1a11d18 100644
--- a/test/CodeGen/X86/win_chkstk.ll
+++ b/test/CodeGen/X86/win_chkstk.ll
@@ -17,7 +17,7 @@ entry:
; WIN_X32: calll __chkstk
; WIN_X64: callq __chkstk
; MINGW_X32: calll __alloca
-; MINGW_X64: callq ___chkstk
+; MINGW_X64: callq ___chkstk_ms
; LINUX-NOT: call __chkstk
%array4096 = alloca [4096 x i8], align 16 ; <[4096 x i8]*> [#uses=0]
ret i32 0
@@ -36,7 +36,7 @@ entry:
; WIN_X64: ret
; MINGW_X64: # BB#0:
-; MINGW_X64-NOT: callq _alloca
+; MINGW_X64-NOT: callq ___chkstk_ms
; MINGW_X64: ret
; LINUX: # BB#0:
@@ -53,7 +53,7 @@ entry:
; WIN_X32: calll __chkstk
; WIN_X64: callq __chkstk
; MINGW_X32: calll __alloca
-; MINGW_X64: callq ___chkstk
+; MINGW_X64: callq ___chkstk_ms
; LINUX-NOT: call __chkstk
%array4096 = alloca [4096 x i8], align 16 ; <[4096 x i8]*> [#uses=0]
ret i32 0
diff --git a/test/CodeGen/X86/win_cst_pool.ll b/test/CodeGen/X86/win_cst_pool.ll
new file mode 100644
index 000000000000..e8b853a03dae
--- /dev/null
+++ b/test/CodeGen/X86/win_cst_pool.ll
@@ -0,0 +1,66 @@
+; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=corei7 | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc"
+
+define double @double() {
+ ret double 0x0000000000800000
+}
+; CHECK: .globl __real@0000000000800000
+; CHECK-NEXT: .section .rdata,"rd",discard,__real@0000000000800000
+; CHECK-NEXT: .align 8
+; CHECK-NEXT: __real@0000000000800000:
+; CHECK-NEXT: .quad 8388608
+; CHECK: double:
+; CHECK: movsd __real@0000000000800000(%rip), %xmm0
+; CHECK-NEXT: ret
+
+define <4 x i32> @vec1() {
+ ret <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+}
+; CHECK: .globl __xmm@00000000000000010000000200000003
+; CHECK-NEXT: .section .rdata,"rd",discard,__xmm@00000000000000010000000200000003
+; CHECK-NEXT: .align 16
+; CHECK-NEXT: __xmm@00000000000000010000000200000003:
+; CHECK-NEXT: .long 3
+; CHECK-NEXT: .long 2
+; CHECK-NEXT: .long 1
+; CHECK-NEXT: .long 0
+; CHECK: vec1:
+; CHECK: movaps __xmm@00000000000000010000000200000003(%rip), %xmm0
+; CHECK-NEXT: ret
+
+define <8 x i16> @vec2() {
+ ret <8 x i16> <i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>
+}
+; CHECK: .globl __xmm@00000001000200030004000500060007
+; CHECK-NEXT: .section .rdata,"rd",discard,__xmm@00000001000200030004000500060007
+; CHECK-NEXT: .align 16
+; CHECK-NEXT: __xmm@00000001000200030004000500060007:
+; CHECK-NEXT: .short 7
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .short 5
+; CHECK-NEXT: .short 4
+; CHECK-NEXT: .short 3
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .short 1
+; CHECK-NEXT: .short 0
+; CHECK: vec2:
+; CHECK: movaps __xmm@00000001000200030004000500060007(%rip), %xmm0
+; CHECK-NEXT: ret
+
+
+define <4 x float> @undef1() {
+ ret <4 x float> <float 1.0, float 1.0, float undef, float undef>
+
+; CHECK: .globl __xmm@00000000000000003f8000003f800000
+; CHECK-NEXT: .section .rdata,"rd",discard,__xmm@00000000000000003f8000003f800000
+; CHECK-NEXT: .align 16
+; CHECK-NEXT: __xmm@00000000000000003f8000003f800000:
+; CHECK-NEXT: .long 1065353216 # float 1
+; CHECK-NEXT: .long 1065353216 # float 1
+; CHECK-NEXT: .zero 4
+; CHECK-NEXT: .zero 4
+; CHECK: undef1:
+; CHECK: movaps __xmm@00000000000000003f8000003f800000(%rip), %xmm0
+; CHECK-NEXT: ret
+}
diff --git a/test/CodeGen/X86/x86-64-double-precision-shift-left.ll b/test/CodeGen/X86/x86-64-double-precision-shift-left.ll
new file mode 100644
index 000000000000..f2380f23b8ee
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-double-precision-shift-left.ll
@@ -0,0 +1,77 @@
+; RUN: llc < %s -march=x86-64 -mcpu=bdver1 | FileCheck %s
+; Verify that for the architectures that are known to have poor latency
+; double precision shift instructions we generate alternative sequence
+; of instructions with lower latencies instead of shld instruction.
+
+;uint64_t lshift1(uint64_t a, uint64_t b)
+;{
+; return (a << 1) | (b >> 63);
+;}
+
+; CHECK: lshift1:
+; CHECK: addq {{.*}},{{.*}}
+; CHECK-NEXT: shrq $63, {{.*}}
+; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
+
+
+define i64 @lshift1(i64 %a, i64 %b) nounwind readnone uwtable {
+entry:
+ %shl = shl i64 %a, 1
+ %shr = lshr i64 %b, 63
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+;uint64_t lshift2(uint64_t a, uint64_t b)
+;{
+; return (a << 2) | (b >> 62);
+;}
+
+; CHECK: lshift2:
+; CHECK: shlq $2, {{.*}}
+; CHECK-NEXT: shrq $62, {{.*}}
+; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
+
+define i64 @lshift2(i64 %a, i64 %b) nounwind readnone uwtable {
+entry:
+ %shl = shl i64 %a, 2
+ %shr = lshr i64 %b, 62
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+;uint64_t lshift7(uint64_t a, uint64_t b)
+;{
+; return (a << 7) | (b >> 57);
+;}
+
+; CHECK: lshift7:
+; CHECK: shlq $7, {{.*}}
+; CHECK-NEXT: shrq $57, {{.*}}
+; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
+
+define i64 @lshift7(i64 %a, i64 %b) nounwind readnone uwtable {
+entry:
+ %shl = shl i64 %a, 7
+ %shr = lshr i64 %b, 57
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+;uint64_t lshift63(uint64_t a, uint64_t b)
+;{
+; return (a << 63) | (b >> 1);
+;}
+
+; CHECK: lshift63:
+; CHECK: shlq $63, {{.*}}
+; CHECK-NEXT: shrq {{.*}}
+; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
+
+define i64 @lshift63(i64 %a, i64 %b) nounwind readnone uwtable {
+entry:
+ %shl = shl i64 %a, 63
+ %shr = lshr i64 %b, 1
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
diff --git a/test/CodeGen/X86/x86-64-double-precision-shift-right.ll b/test/CodeGen/X86/x86-64-double-precision-shift-right.ll
new file mode 100644
index 000000000000..5edaad89df4c
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-double-precision-shift-right.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s -march=x86-64 -mcpu=bdver1 | FileCheck %s
+; Verify that for the architectures that are known to have poor latency
+; double precision shift instructions we generate alternative sequence
+; of instructions with lower latencies instead of shrd instruction.
+
+;uint64_t rshift1(uint64_t a, uint64_t b)
+;{
+; return (a >> 1) | (b << 63);
+;}
+
+; CHECK: rshift1:
+; CHECK: shrq {{.*}}
+; CHECK-NEXT: shlq $63, {{.*}}
+; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
+
+define i64 @rshift1(i64 %a, i64 %b) nounwind readnone uwtable {
+ %1 = lshr i64 %a, 1
+ %2 = shl i64 %b, 63
+ %3 = or i64 %2, %1
+ ret i64 %3
+}
+
+;uint64_t rshift2(uint64_t a, uint64_t b)
+;{
+; return (a >> 2) | (b << 62);
+;}
+
+; CHECK: rshift2:
+; CHECK: shrq $2, {{.*}}
+; CHECK-NEXT: shlq $62, {{.*}}
+; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
+
+
+define i64 @rshift2(i64 %a, i64 %b) nounwind readnone uwtable {
+ %1 = lshr i64 %a, 2
+ %2 = shl i64 %b, 62
+ %3 = or i64 %2, %1
+ ret i64 %3
+}
+
+;uint64_t rshift7(uint64_t a, uint64_t b)
+;{
+; return (a >> 7) | (b << 57);
+;}
+
+; CHECK: rshift7:
+; CHECK: shrq $7, {{.*}}
+; CHECK-NEXT: shlq $57, {{.*}}
+; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
+
+
+define i64 @rshift7(i64 %a, i64 %b) nounwind readnone uwtable {
+ %1 = lshr i64 %a, 7
+ %2 = shl i64 %b, 57
+ %3 = or i64 %2, %1
+ ret i64 %3
+}
+
+;uint64_t rshift63(uint64_t a, uint64_t b)
+;{
+; return (a >> 63) | (b << 1);
+;}
+
+; CHECK: rshift63:
+; CHECK: shrq $63, {{.*}}
+; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
+; CHECK-NEXT: orq {{.*}}, {{.*}}
+
+define i64 @rshift63(i64 %a, i64 %b) nounwind readnone uwtable {
+ %1 = lshr i64 %a, 63
+ %2 = shl i64 %b, 1
+ %3 = or i64 %2, %1
+ ret i64 %3
+}
diff --git a/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll b/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
new file mode 100644
index 000000000000..08d0257a0e5c
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
@@ -0,0 +1,67 @@
+; RUN: llc < %s -march=x86-64 -mcpu=bdver1 | FileCheck %s
+
+; clang -Oz -c test1.cpp -emit-llvm -S -o
+; Verify that we generate shld insruction when we are optimizing for size,
+; even for X86_64 processors that are known to have poor latency double
+; precision shift instructions.
+; uint64_t lshift10(uint64_t a, uint64_t b)
+; {
+; return (a << 10) | (b >> 54);
+; }
+
+; Function Attrs: minsize nounwind optsize readnone uwtable
+define i64 @_Z8lshift10mm(i64 %a, i64 %b) #0 {
+entry:
+; CHECK: shldq $10
+ %shl = shl i64 %a, 10
+ %shr = lshr i64 %b, 54
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+attributes #0 = { minsize nounwind optsize readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+
+; clang -Os -c test2.cpp -emit-llvm -S
+; Verify that we generate shld insruction when we are optimizing for size,
+; even for X86_64 processors that are known to have poor latency double
+; precision shift instructions.
+; uint64_t lshift11(uint64_t a, uint64_t b)
+; {
+; return (a << 11) | (b >> 53);
+; }
+
+; Function Attrs: nounwind optsize readnone uwtable
+define i64 @_Z8lshift11mm(i64 %a, i64 %b) #1 {
+entry:
+; CHECK: shldq $11
+ %shl = shl i64 %a, 11
+ %shr = lshr i64 %b, 53
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+attributes #1 = { nounwind optsize readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+; clang -O2 -c test2.cpp -emit-llvm -S
+; Verify that we do not generate shld insruction when we are not optimizing
+; for size for X86_64 processors that are known to have poor latency double
+; precision shift instructions.
+; uint64_t lshift12(uint64_t a, uint64_t b)
+; {
+; return (a << 12) | (b >> 52);
+; }
+
+; Function Attrs: nounwind optsize readnone uwtable
+define i64 @_Z8lshift12mm(i64 %a, i64 %b) #2 {
+entry:
+; CHECK: shlq $12
+; CHECK-NEXT: shrq $52
+ %shl = shl i64 %a, 12
+ %shr = lshr i64 %b, 52
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+attributes #2= { nounwind readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/test/CodeGen/X86/x86-64-double-shifts-var.ll b/test/CodeGen/X86/x86-64-double-shifts-var.ll
new file mode 100644
index 000000000000..5bab434ae6a1
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-double-shifts-var.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s -march=x86-64 -mcpu=athlon | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=athlon-tbird | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=athlon-4 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=athlon-xp | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=athlon-mp | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=k8 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=opteron | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=athlon64 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=athlon-fx | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=k8-sse3 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=opteron-sse3 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=athlon64-sse3 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=amdfam10 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=btver1 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=btver2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=bdver1 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=bdver2 | FileCheck %s
+
+; Verify that for the X86_64 processors that are known to have poor latency
+; double precision shift instructions we do not generate 'shld' or 'shrd'
+; instructions.
+
+;uint64_t lshift(uint64_t a, uint64_t b, int c)
+;{
+; return (a << c) | (b >> (64-c));
+;}
+
+define i64 @lshift(i64 %a, i64 %b, i32 %c) nounwind readnone {
+entry:
+; CHECK-NOT: shld
+ %sh_prom = zext i32 %c to i64
+ %shl = shl i64 %a, %sh_prom
+ %sub = sub nsw i32 64, %c
+ %sh_prom1 = zext i32 %sub to i64
+ %shr = lshr i64 %b, %sh_prom1
+ %or = or i64 %shr, %shl
+ ret i64 %or
+}
+
+;uint64_t rshift(uint64_t a, uint64_t b, int c)
+;{
+; return (a >> c) | (b << (64-c));
+;}
+
+define i64 @rshift(i64 %a, i64 %b, i32 %c) nounwind readnone {
+entry:
+; CHECK-NOT: shrd
+ %sh_prom = zext i32 %c to i64
+ %shr = lshr i64 %a, %sh_prom
+ %sub = sub nsw i32 64, %c
+ %sh_prom1 = zext i32 %sub to i64
+ %shl = shl i64 %b, %sh_prom1
+ %or = or i64 %shl, %shr
+ ret i64 %or
+}
+
+
diff --git a/test/CodeGen/X86/x86-64-frameaddr.ll b/test/CodeGen/X86/x86-64-frameaddr.ll
deleted file mode 100644
index 7d36a7af6aaa..000000000000
--- a/test/CodeGen/X86/x86-64-frameaddr.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; CHECK: stack_end_address
-; CHECK: {{movq.+rbp.*$}}
-; CHECK: {{movq.+rbp.*$}}
-; CHECK: ret
-
-define i64* @stack_end_address() nounwind {
-entry:
- tail call i8* @llvm.frameaddress( i32 0 )
- bitcast i8* %0 to i64*
- ret i64* %1
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-64-sret-return-2.ll b/test/CodeGen/X86/x86-64-sret-return-2.ll
new file mode 100644
index 000000000000..9f57ee1960e1
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-sret-return-2.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=x86_64-apple-darwin8 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s
+
+; FIXME: x32 doesn't know how to select this. This isn't a regression, it never
+; worked.
+; RUNX: llc -mtriple=x86_64-pc-linux-gnux32 < %s | FileCheck -check-prefix=X32ABI %s
+
+; This used to crash due to topological sorting issues in selection DAG.
+define void @foo(i32* sret %agg.result, i32, i32, i32, i32, i32, void (i32)* %pred) {
+entry:
+ call void %pred(i32 undef)
+ ret void
+
+; CHECK-LABEL: foo:
+; CHECK: callq
+; CHECK: movq {{.*}}, %rax
+; CHECK: ret
+}
diff --git a/test/CodeGen/X86/x86-64-static-relo-movl.ll b/test/CodeGen/X86/x86-64-static-relo-movl.ll
new file mode 100644
index 000000000000..71e52bb99191
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-static-relo-movl.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=x86_64-pc-win32-macho -relocation-model=static -O0 < %s | FileCheck %s
+
+; Ensure that we don't generate a movl and not a lea for a static relocation
+; when compiling for 64 bit.
+
+%struct.MatchInfo = type [64 x i64]
+
+@NO_MATCH = internal constant %struct.MatchInfo zeroinitializer, align 8
+
+define void @setup() {
+ %pending = alloca %struct.MatchInfo, align 8
+ %t = bitcast %struct.MatchInfo* %pending to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false)
+ %u = getelementptr inbounds %struct.MatchInfo* %pending, i32 0, i32 2
+ %v = load i64* %u, align 8
+ br label %done
+done:
+ ret void
+
+ ; CHECK: movabsq $_NO_MATCH, {{.*}}
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
diff --git a/test/CodeGen/X86/x86-frameaddr.ll b/test/CodeGen/X86/x86-frameaddr.ll
deleted file mode 100644
index d5958745dfff..000000000000
--- a/test/CodeGen/X86/x86-frameaddr.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | grep ebp
-
-define i8* @t() nounwind {
-entry:
- %0 = tail call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-frameaddr2.ll b/test/CodeGen/X86/x86-frameaddr2.ll
deleted file mode 100644
index c5091154152b..000000000000
--- a/test/CodeGen/X86/x86-frameaddr2.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 3
-
-define i8* @t() nounwind {
-entry:
- %0 = tail call i8* @llvm.frameaddress(i32 2)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll b/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll
new file mode 100644
index 000000000000..f737519bd153
--- /dev/null
+++ b/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+
+define <4 x float> @foo(<4 x float> %val, <4 x float> %test) nounwind {
+; CHECK-LABEL: LCPI0_0:
+; CHECK-NEXT: .long 1065353216 ## float 1.000000e+00
+; CHECK-NEXT: .long 1065353216 ## float 1.000000e+00
+; CHECK-NEXT: .long 1065353216 ## float 1.000000e+00
+; CHECK-NEXT: .long 1065353216 ## float 1.000000e+00
+; CHECK-LABEL: foo:
+; CHECK: cmpeqps %xmm1, %xmm0
+; CHECK-NEXT: andps LCPI0_0(%rip), %xmm0
+; CHECK-NEXT: retq
+
+ %cmp = fcmp oeq <4 x float> %val, %test
+ %ext = zext <4 x i1> %cmp to <4 x i32>
+ %result = sitofp <4 x i32> %ext to <4 x float>
+ ret <4 x float> %result
+}
+
+; Make sure the operation doesn't try to get folded when the sizes don't match,
+; as that ends up crashing later when trying to form a bitcast operation for
+; the folded nodes.
+define void @foo1(<4 x float> %val, <4 x float> %test, <4 x double>* %p) nounwind {
+; CHECK-LABEL: LCPI1_0:
+; CHECK-NEXT: .long 1 ## 0x1
+; CHECK-NEXT: .long 1 ## 0x1
+; CHECK-NEXT: .long 1 ## 0x1
+; CHECK-NEXT: .long 1 ## 0x1
+; CHECK-LABEL: foo1:
+; FIXME: The operation gets scalarized. If/when the compiler learns to better
+; use [V]CVTDQ2PD, this will need updated.
+; CHECK: cvtsi2sdq
+; CHECK: cvtsi2sdq
+; CHECK: cvtsi2sdq
+; CHECK: cvtsi2sdq
+ %cmp = fcmp oeq <4 x float> %val, %test
+ %ext = zext <4 x i1> %cmp to <4 x i32>
+ %result = sitofp <4 x i32> %ext to <4 x double>
+ store <4 x double> %result, <4 x double>* %p
+ ret void
+}
+
+; Also test the general purpose constant folding of int->fp.
+define void @foo2(<4 x float>* noalias %result) nounwind {
+; CHECK-LABEL: LCPI2_0:
+; CHECK-NEXT: .long 1082130432 ## float 4.000000e+00
+; CHECK-NEXT: .long 1084227584 ## float 5.000000e+00
+; CHECK-NEXT: .long 1086324736 ## float 6.000000e+00
+; CHECK-NEXT: .long 1088421888 ## float 7.000000e+00
+; CHECK-LABEL: foo2:
+; CHECK: movaps LCPI2_0(%rip), %xmm0
+
+ %val = uitofp <4 x i32> <i32 4, i32 5, i32 6, i32 7> to <4 x float>
+ store <4 x float> %val, <4 x float>* %result
+ ret void
+}
diff --git a/test/CodeGen/X86/x86-shifts.ll b/test/CodeGen/X86/x86-shifts.ll
index 2f3adb8db9a0..ec479330ed6b 100644
--- a/test/CodeGen/X86/x86-shifts.ll
+++ b/test/CodeGen/X86/x86-shifts.ll
@@ -100,7 +100,7 @@ entry:
ret <8 x i16> %K
}
-; non splat test
+; non-splat test
define <8 x i16> @sll8_nosplat(<8 x i16> %A) nounwind {
diff --git a/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll b/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
new file mode 100644
index 000000000000..d885f1cd364f
--- /dev/null
+++ b/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mattr=+avx < %s | FileCheck %s
+
+; Check that we properly upgrade the AVX vbroadcast intrinsics to IR. The
+; expectation is that we should still get the original instruction back that
+; maps to the intrinsic.
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; CHECK-LABEL: test_mm_broadcast_ss:
+define <4 x float> @test_mm_broadcast_ss(float* readonly %__a){
+entry:
+ %0 = bitcast float* %__a to i8*
+; CHECK: vbroadcastss (%{{.*}}), %xmm
+ %1 = tail call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %0)
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: test_mm256_broadcast_sd:
+define <4 x double> @test_mm256_broadcast_sd(double* readonly %__a) {
+entry:
+ %0 = bitcast double* %__a to i8*
+; CHECK: vbroadcastsd (%{{.*}}), %ymm
+ %1 = tail call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %0)
+ ret <4 x double> %1
+}
+
+; CHECK-LABEL: test_mm256_broadcast_ss:
+define <8 x float> @test_mm256_broadcast_ss(float* readonly %__a) {
+entry:
+ %0 = bitcast float* %__a to i8*
+; CHECK: vbroadcastss (%{{.*}}), %ymm
+ %1 = tail call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %0)
+ ret <8 x float> %1
+}
+
+declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*)
+
+declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*)
+
+declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*)
diff --git a/test/CodeGen/X86/xaluo.ll b/test/CodeGen/X86/xaluo.ll
new file mode 100644
index 000000000000..f078631c2b33
--- /dev/null
+++ b/test/CodeGen/X86/xaluo.ll
@@ -0,0 +1,743 @@
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=DAG
+; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort < %s | FileCheck %s --check-prefix=FAST
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort < %s | FileCheck %s
+
+;
+; Get the actual value of the overflow bit.
+;
+; SADDO reg, reg
+define zeroext i1 @saddo.i8(i8 signext %v1, i8 signext %v2, i8* %res) {
+entry:
+; DAG-LABEL: saddo.i8
+; DAG: addb %sil, %dil
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i8
+; FAST: addb %sil, %dil
+; FAST-NEXT: seto %al
+ %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: saddo.i16
+; DAG: addw %si, %di
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i16
+; FAST: addw %si, %di
+; FAST-NEXT: seto %al
+ %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: saddo.i32
+; DAG: addl %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i32
+; FAST: addl %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64
+; DAG: addq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64
+; FAST: addq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SADDO reg, imm | imm, reg
+; FIXME: INC isn't supported in FastISel yet
+define zeroext i1 @saddo.i64imm1(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm1
+; DAG: incq %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64imm1
+; FAST: addq $1, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 1)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; FIXME: DAG doesn't optimize immediates on the LHS.
+define zeroext i1 @saddo.i64imm2(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm2
+; DAG: mov
+; DAG-NEXT: addq
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm2
+; FAST: addq $1, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 1, i64 %v1)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; Check boundary conditions for large immediates.
+define zeroext i1 @saddo.i64imm3(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm3
+; DAG: addq $-2147483648, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64imm3
+; FAST: addq $-2147483648, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -2147483648)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64imm4(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm4
+; DAG: movabsq $-21474836489, %[[REG:[a-z]+]]
+; DAG-NEXT: addq %rdi, %[[REG]]
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm4
+; FAST: movabsq $-21474836489, %[[REG:[a-z]+]]
+; FAST-NEXT: addq %rdi, %[[REG]]
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -21474836489)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64imm5(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm5
+; DAG: addq $2147483647, %rdi
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm5
+; FAST: addq $2147483647, %rdi
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483647)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; TODO: FastISel shouldn't use movabsq.
+define zeroext i1 @saddo.i64imm6(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm6
+; DAG: movl $2147483648, %ecx
+; DAG: addq %rdi, %rcx
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm6
+; FAST: movabsq $2147483648, %[[REG:[a-z]+]]
+; FAST: addq %rdi, %[[REG]]
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483648)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; UADDO
+define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: uaddo.i32
+; DAG: addl %esi, %edi
+; DAG-NEXT: setb %al
+; FAST-LABEL: uaddo.i32
+; FAST: addl %esi, %edi
+; FAST-NEXT: setb %al
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: uaddo.i64
+; DAG: addq %rsi, %rdi
+; DAG-NEXT: setb %al
+; FAST-LABEL: uaddo.i64
+; FAST: addq %rsi, %rdi
+; FAST-NEXT: setb %al
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SSUBO
+define zeroext i1 @ssubo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: ssubo.i32
+; DAG: subl %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: ssubo.i32
+; FAST: subl %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: ssubo.i64
+; DAG: subq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: ssubo.i64
+; FAST: subq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; USUBO
+define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: usubo.i32
+; DAG: subl %esi, %edi
+; DAG-NEXT: setb %al
+; FAST-LABEL: usubo.i32
+; FAST: subl %esi, %edi
+; FAST-NEXT: setb %al
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: usubo.i64
+; DAG: subq %rsi, %rdi
+; DAG-NEXT: setb %al
+; FAST-LABEL: usubo.i64
+; FAST: subq %rsi, %rdi
+; FAST-NEXT: setb %al
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SMULO
+define zeroext i1 @smulo.i8(i8 %v1, i8 %v2, i8* %res) {
+entry:
+; FAST-LABEL: smulo.i8
+; FAST: movb %dil, %al
+; FAST-NEXT: imulb %sil
+; FAST-NEXT: seto %cl
+ %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: smulo.i16
+; DAG: imulw %si, %di
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i16
+; FAST: imulw %si, %di
+; FAST-NEXT: seto %al
+ %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: smulo.i32
+; DAG: imull %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i32
+; FAST: imull %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: smulo.i64
+; DAG: imulq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i64
+; FAST: imulq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; UMULO
+define zeroext i1 @umulo.i8(i8 %v1, i8 %v2, i8* %res) {
+entry:
+; FAST-LABEL: umulo.i8
+; FAST: movb %dil, %al
+; FAST-NEXT: mulb %sil
+; FAST-NEXT: seto %cl
+ %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: umulo.i16
+; DAG: mulw %si
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i16
+; FAST: mulw %si
+; FAST-NEXT: seto
+ %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: umulo.i32
+; DAG: mull %esi
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i32
+; FAST: mull %esi
+; FAST-NEXT: seto
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: umulo.i64
+; DAG: mulq %rsi
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i64
+; FAST: mulq %rsi
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+;
+; Check the use of the overflow bit in combination with a select instruction.
+;
+define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: saddo.select.i32
+; CHECK: addl %esi, %eax
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: saddo.select.i64
+; CHECK: addq %rsi, %rax
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @uaddo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: uaddo.select.i32
+; CHECK: addl %esi, %eax
+; CHECK-NEXT: cmovbl %edi, %esi
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: uaddo.select.i64
+; CHECK: addq %rsi, %rax
+; CHECK-NEXT: cmovbq %rdi, %rsi
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: ssubo.select.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: ssubo.select.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @usubo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: usubo.select.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovbl %edi, %esi
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: usubo.select.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rdi, %rsi
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: smulo.select.i32
+; CHECK: imull %esi, %eax
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: smulo.select.i64
+; CHECK: imulq %rsi, %rax
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: umulo.select.i32
+; CHECK: mull %esi
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: umulo.select.i64
+; CHECK: mulq %rsi
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+
+;
+; Check the use of the overflow bit in combination with a branch instruction.
+;
+define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: saddo.br.i32
+; CHECK: addl %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: saddo.br.i64
+; CHECK: addq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: uaddo.br.i32
+; CHECK: addl %esi, %edi
+; CHECK-NEXT: jb
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: uaddo.br.i64
+; CHECK: addq %rsi, %rdi
+; CHECK-NEXT: jb
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: ssubo.br.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: ssubo.br.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: usubo.br.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jb
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: usubo.br.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jb
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: smulo.br.i32
+; CHECK: imull %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: smulo.br.i64
+; CHECK: imulq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: umulo.br.i32
+; CHECK: mull %esi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: umulo.br.i64
+; CHECK: mulq %rsi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+declare {i8, i1} @llvm.sadd.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.sadd.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i8, i1} @llvm.smul.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
+declare {i8, i1} @llvm.umul.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.umul.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
+
+!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/X86/zlib-longest-match.ll b/test/CodeGen/X86/zlib-longest-match.ll
new file mode 100644
index 000000000000..d1598dce02d7
--- /dev/null
+++ b/test/CodeGen/X86/zlib-longest-match.ll
@@ -0,0 +1,240 @@
+; RUN: llc -march=x86-64 < %s -block-placement-exit-block-bias=20 | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; This is longest_match, the hot function from zlib's deflate implementation.
+
+%struct.internal_state = type { %struct.z_stream_s*, i32, i8*, i64, i8*, i32, i32, %struct.gz_header_s*, i32, i8, i32, i32, i32, i32, i8*, i64, i16*, i16*, i32, i32, i32, i32, i32, i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [573 x %struct.ct_data_s], [61 x %struct.ct_data_s], [39 x %struct.ct_data_s], %struct.tree_desc_s, %struct.tree_desc_s, %struct.tree_desc_s, [16 x i16], [573 x i32], i32, i32, [573 x i8], i8*, i32, i32, i16*, i64, i64, i32, i32, i16, i32, i64 }
+%struct.z_stream_s = type { i8*, i32, i64, i8*, i32, i64, i8*, %struct.internal_state*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i8*, i32, i64, i64 }
+%struct.gz_header_s = type { i32, i64, i32, i32, i8*, i32, i32, i8*, i32, i8*, i32, i32, i32 }
+%struct.ct_data_s = type { %union.anon, %union.anon.0 }
+%union.anon = type { i16 }
+%union.anon.0 = type { i16 }
+%struct.tree_desc_s = type { %struct.ct_data_s*, i32, %struct.static_tree_desc_s* }
+%struct.static_tree_desc_s = type { i32 }
+
+; CHECK-LABEL: longest_match:
+
+; Verify that there are no spills or reloads in the loop exit block. This loop
+; is mostly cold, only %do.cond125 and %land.rhs131 are hot.
+; CHECK: %do.cond125
+; CHECK-NOT: {{Spill|Reload}}
+; CHECK: jbe
+
+; Verify that block placement doesn't destroy source order. It's important that
+; the two hot blocks are laid out close to each other.
+; CHECK-NEXT: %land.rhs131
+; CHECK: jne
+; CHECK: jmp
+define i32 @longest_match(%struct.internal_state* nocapture %s, i32 %cur_match) nounwind {
+entry:
+ %max_chain_length = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 31
+ %0 = load i32* %max_chain_length, align 4
+ %window = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 14
+ %1 = load i8** %window, align 8
+ %strstart = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 27
+ %2 = load i32* %strstart, align 4
+ %idx.ext = zext i32 %2 to i64
+ %add.ptr = getelementptr inbounds i8* %1, i64 %idx.ext
+ %prev_length = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 30
+ %3 = load i32* %prev_length, align 4
+ %nice_match1 = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 36
+ %4 = load i32* %nice_match1, align 4
+ %w_size = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 11
+ %5 = load i32* %w_size, align 4
+ %sub = add i32 %5, -262
+ %cmp = icmp ugt i32 %2, %sub
+ %sub6 = sub i32 %2, %sub
+ %sub6. = select i1 %cmp, i32 %sub6, i32 0
+ %prev7 = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 16
+ %6 = load i16** %prev7, align 8
+ %w_mask = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 13
+ %7 = load i32* %w_mask, align 4
+ %add.ptr11.sum = add i64 %idx.ext, 258
+ %add.ptr12 = getelementptr inbounds i8* %1, i64 %add.ptr11.sum
+ %sub13 = add nsw i32 %3, -1
+ %idxprom = sext i32 %sub13 to i64
+ %add.ptr.sum = add i64 %idxprom, %idx.ext
+ %arrayidx = getelementptr inbounds i8* %1, i64 %add.ptr.sum
+ %8 = load i8* %arrayidx, align 1
+ %idxprom14 = sext i32 %3 to i64
+ %add.ptr.sum213 = add i64 %idxprom14, %idx.ext
+ %arrayidx15 = getelementptr inbounds i8* %1, i64 %add.ptr.sum213
+ %9 = load i8* %arrayidx15, align 1
+ %good_match = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 35
+ %10 = load i32* %good_match, align 4
+ %cmp17 = icmp ult i32 %3, %10
+ %shr = lshr i32 %0, 2
+ %chain_length.0 = select i1 %cmp17, i32 %0, i32 %shr
+ %lookahead = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 29
+ %11 = load i32* %lookahead, align 4
+ %cmp18 = icmp ugt i32 %4, %11
+ %. = select i1 %cmp18, i32 %11, i32 %4
+ %match_start = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 28
+ %add.ptr.sum217 = add i64 %idx.ext, 1
+ %arrayidx44 = getelementptr inbounds i8* %1, i64 %add.ptr.sum217
+ %add.ptr.sum218 = add i64 %idx.ext, 2
+ %add.ptr50 = getelementptr inbounds i8* %1, i64 %add.ptr.sum218
+ %sub.ptr.lhs.cast = ptrtoint i8* %add.ptr12 to i64
+ br label %do.body
+
+do.body: ; preds = %land.rhs131, %entry
+ %best_len.0 = phi i32 [ %best_len.1, %land.rhs131 ], [ %3, %entry ]
+ %chain_length.1 = phi i32 [ %dec, %land.rhs131 ], [ %chain_length.0, %entry ]
+ %cur_match.addr.0 = phi i32 [ %conv128, %land.rhs131 ], [ %cur_match, %entry ]
+ %scan_end1.0 = phi i8 [ %scan_end1.1, %land.rhs131 ], [ %8, %entry ]
+ %scan_end.0 = phi i8 [ %scan_end.1, %land.rhs131 ], [ %9, %entry ]
+ %idx.ext23 = zext i32 %cur_match.addr.0 to i64
+ %add.ptr24 = getelementptr inbounds i8* %1, i64 %idx.ext23
+ %idxprom25 = sext i32 %best_len.0 to i64
+ %add.ptr24.sum = add i64 %idx.ext23, %idxprom25
+ %arrayidx26 = getelementptr inbounds i8* %1, i64 %add.ptr24.sum
+ %12 = load i8* %arrayidx26, align 1
+ %cmp28 = icmp eq i8 %12, %scan_end.0
+ br i1 %cmp28, label %lor.lhs.false, label %do.cond125
+
+lor.lhs.false: ; preds = %do.body
+ %sub30 = add nsw i32 %best_len.0, -1
+ %idxprom31 = sext i32 %sub30 to i64
+ %add.ptr24.sum214 = add i64 %idx.ext23, %idxprom31
+ %arrayidx32 = getelementptr inbounds i8* %1, i64 %add.ptr24.sum214
+ %13 = load i8* %arrayidx32, align 1
+ %cmp35 = icmp eq i8 %13, %scan_end1.0
+ br i1 %cmp35, label %lor.lhs.false37, label %do.cond125
+
+lor.lhs.false37: ; preds = %lor.lhs.false
+ %14 = load i8* %add.ptr24, align 1
+ %15 = load i8* %add.ptr, align 1
+ %cmp40 = icmp eq i8 %14, %15
+ br i1 %cmp40, label %lor.lhs.false42, label %do.cond125
+
+lor.lhs.false42: ; preds = %lor.lhs.false37
+ %add.ptr24.sum215 = add i64 %idx.ext23, 1
+ %incdec.ptr = getelementptr inbounds i8* %1, i64 %add.ptr24.sum215
+ %16 = load i8* %incdec.ptr, align 1
+ %17 = load i8* %arrayidx44, align 1
+ %cmp46 = icmp eq i8 %16, %17
+ br i1 %cmp46, label %if.end49, label %do.cond125
+
+if.end49: ; preds = %lor.lhs.false42
+ %incdec.ptr.sum = add i64 %idx.ext23, 2
+ %incdec.ptr51 = getelementptr inbounds i8* %1, i64 %incdec.ptr.sum
+ br label %do.cond
+
+do.cond: ; preds = %land.lhs.true100, %if.end49
+ %match.0 = phi i8* [ %incdec.ptr51, %if.end49 ], [ %incdec.ptr103, %land.lhs.true100 ]
+ %scan.1 = phi i8* [ %add.ptr50, %if.end49 ], [ %incdec.ptr101, %land.lhs.true100 ]
+ %incdec.ptr53 = getelementptr inbounds i8* %scan.1, i64 1
+ %18 = load i8* %incdec.ptr53, align 1
+ %incdec.ptr55 = getelementptr inbounds i8* %match.0, i64 1
+ %19 = load i8* %incdec.ptr55, align 1
+ %cmp57 = icmp eq i8 %18, %19
+ br i1 %cmp57, label %land.lhs.true, label %do.end
+
+land.lhs.true: ; preds = %do.cond
+ %incdec.ptr59 = getelementptr inbounds i8* %scan.1, i64 2
+ %20 = load i8* %incdec.ptr59, align 1
+ %incdec.ptr61 = getelementptr inbounds i8* %match.0, i64 2
+ %21 = load i8* %incdec.ptr61, align 1
+ %cmp63 = icmp eq i8 %20, %21
+ br i1 %cmp63, label %land.lhs.true65, label %do.end
+
+land.lhs.true65: ; preds = %land.lhs.true
+ %incdec.ptr66 = getelementptr inbounds i8* %scan.1, i64 3
+ %22 = load i8* %incdec.ptr66, align 1
+ %incdec.ptr68 = getelementptr inbounds i8* %match.0, i64 3
+ %23 = load i8* %incdec.ptr68, align 1
+ %cmp70 = icmp eq i8 %22, %23
+ br i1 %cmp70, label %land.lhs.true72, label %do.end
+
+land.lhs.true72: ; preds = %land.lhs.true65
+ %incdec.ptr73 = getelementptr inbounds i8* %scan.1, i64 4
+ %24 = load i8* %incdec.ptr73, align 1
+ %incdec.ptr75 = getelementptr inbounds i8* %match.0, i64 4
+ %25 = load i8* %incdec.ptr75, align 1
+ %cmp77 = icmp eq i8 %24, %25
+ br i1 %cmp77, label %land.lhs.true79, label %do.end
+
+land.lhs.true79: ; preds = %land.lhs.true72
+ %incdec.ptr80 = getelementptr inbounds i8* %scan.1, i64 5
+ %26 = load i8* %incdec.ptr80, align 1
+ %incdec.ptr82 = getelementptr inbounds i8* %match.0, i64 5
+ %27 = load i8* %incdec.ptr82, align 1
+ %cmp84 = icmp eq i8 %26, %27
+ br i1 %cmp84, label %land.lhs.true86, label %do.end
+
+land.lhs.true86: ; preds = %land.lhs.true79
+ %incdec.ptr87 = getelementptr inbounds i8* %scan.1, i64 6
+ %28 = load i8* %incdec.ptr87, align 1
+ %incdec.ptr89 = getelementptr inbounds i8* %match.0, i64 6
+ %29 = load i8* %incdec.ptr89, align 1
+ %cmp91 = icmp eq i8 %28, %29
+ br i1 %cmp91, label %land.lhs.true93, label %do.end
+
+land.lhs.true93: ; preds = %land.lhs.true86
+ %incdec.ptr94 = getelementptr inbounds i8* %scan.1, i64 7
+ %30 = load i8* %incdec.ptr94, align 1
+ %incdec.ptr96 = getelementptr inbounds i8* %match.0, i64 7
+ %31 = load i8* %incdec.ptr96, align 1
+ %cmp98 = icmp eq i8 %30, %31
+ br i1 %cmp98, label %land.lhs.true100, label %do.end
+
+land.lhs.true100: ; preds = %land.lhs.true93
+ %incdec.ptr101 = getelementptr inbounds i8* %scan.1, i64 8
+ %32 = load i8* %incdec.ptr101, align 1
+ %incdec.ptr103 = getelementptr inbounds i8* %match.0, i64 8
+ %33 = load i8* %incdec.ptr103, align 1
+ %cmp105 = icmp eq i8 %32, %33
+ %cmp107 = icmp ult i8* %incdec.ptr101, %add.ptr12
+ %or.cond = and i1 %cmp105, %cmp107
+ br i1 %or.cond, label %do.cond, label %do.end
+
+do.end: ; preds = %land.lhs.true100, %land.lhs.true93, %land.lhs.true86, %land.lhs.true79, %land.lhs.true72, %land.lhs.true65, %land.lhs.true, %do.cond
+ %scan.2 = phi i8* [ %incdec.ptr101, %land.lhs.true100 ], [ %incdec.ptr94, %land.lhs.true93 ], [ %incdec.ptr87, %land.lhs.true86 ], [ %incdec.ptr80, %land.lhs.true79 ], [ %incdec.ptr73, %land.lhs.true72 ], [ %incdec.ptr66, %land.lhs.true65 ], [ %incdec.ptr59, %land.lhs.true ], [ %incdec.ptr53, %do.cond ]
+ %sub.ptr.rhs.cast = ptrtoint i8* %scan.2 to i64
+ %sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
+ %conv109 = trunc i64 %sub.ptr.sub to i32
+ %sub110 = sub nsw i32 258, %conv109
+ %cmp112 = icmp sgt i32 %sub110, %best_len.0
+ br i1 %cmp112, label %if.then114, label %do.cond125
+
+if.then114: ; preds = %do.end
+ store i32 %cur_match.addr.0, i32* %match_start, align 4
+ %cmp115 = icmp slt i32 %sub110, %.
+ br i1 %cmp115, label %if.end118, label %do.end135
+
+if.end118: ; preds = %if.then114
+ %sub119 = add nsw i32 %sub110, -1
+ %idxprom120 = sext i32 %sub119 to i64
+ %add.ptr111.sum = add i64 %idxprom120, %idx.ext
+ %arrayidx121 = getelementptr inbounds i8* %1, i64 %add.ptr111.sum
+ %34 = load i8* %arrayidx121, align 1
+ %idxprom122 = sext i32 %sub110 to i64
+ %add.ptr111.sum216 = add i64 %idxprom122, %idx.ext
+ %arrayidx123 = getelementptr inbounds i8* %1, i64 %add.ptr111.sum216
+ %35 = load i8* %arrayidx123, align 1
+ br label %do.cond125
+
+do.cond125: ; preds = %if.end118, %do.end, %lor.lhs.false42, %lor.lhs.false37, %lor.lhs.false, %do.body
+ %best_len.1 = phi i32 [ %best_len.0, %do.body ], [ %best_len.0, %lor.lhs.false ], [ %best_len.0, %lor.lhs.false37 ], [ %best_len.0, %lor.lhs.false42 ], [ %sub110, %if.end118 ], [ %best_len.0, %do.end ]
+ %scan_end1.1 = phi i8 [ %scan_end1.0, %do.body ], [ %scan_end1.0, %lor.lhs.false ], [ %scan_end1.0, %lor.lhs.false37 ], [ %scan_end1.0, %lor.lhs.false42 ], [ %34, %if.end118 ], [ %scan_end1.0, %do.end ]
+ %scan_end.1 = phi i8 [ %scan_end.0, %do.body ], [ %scan_end.0, %lor.lhs.false ], [ %scan_end.0, %lor.lhs.false37 ], [ %scan_end.0, %lor.lhs.false42 ], [ %35, %if.end118 ], [ %scan_end.0, %do.end ]
+ %and = and i32 %cur_match.addr.0, %7
+ %idxprom126 = zext i32 %and to i64
+ %arrayidx127 = getelementptr inbounds i16* %6, i64 %idxprom126
+ %36 = load i16* %arrayidx127, align 2
+ %conv128 = zext i16 %36 to i32
+ %cmp129 = icmp ugt i32 %conv128, %sub6.
+ br i1 %cmp129, label %land.rhs131, label %do.end135
+
+land.rhs131: ; preds = %do.cond125
+ %dec = add i32 %chain_length.1, -1
+ %cmp132 = icmp eq i32 %dec, 0
+ br i1 %cmp132, label %do.end135, label %do.body
+
+do.end135: ; preds = %land.rhs131, %do.cond125, %if.then114
+ %best_len.2 = phi i32 [ %best_len.1, %land.rhs131 ], [ %best_len.1, %do.cond125 ], [ %sub110, %if.then114 ]
+ %cmp137 = icmp ugt i32 %best_len.2, %11
+ %.best_len.2 = select i1 %cmp137, i32 %11, i32 %best_len.2
+ ret i32 %.best_len.2
+}
diff --git a/test/CodeGen/XCore/align.ll b/test/CodeGen/XCore/align.ll
new file mode 100644
index 000000000000..2878a648e09b
--- /dev/null
+++ b/test/CodeGen/XCore/align.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -march=xcore | FileCheck %s
+
+; CHECK: .align 4
+; CHECK-LABEL: f:
+define void @f() nounwind {
+entry:
+ ret void
+}
+
+; CHECK: .align 2
+; CHECK-LABEL: g:
+define void @g() nounwind optsize {
+entry:
+ ret void
+}
diff --git a/test/CodeGen/XCore/atomic.ll b/test/CodeGen/XCore/atomic.ll
index 95fca9ac5b21..58ef38bd3f60 100644
--- a/test/CodeGen/XCore/atomic.ll
+++ b/test/CodeGen/XCore/atomic.ll
@@ -14,3 +14,79 @@ entry:
fence seq_cst
ret void
}
+
+@pool = external global i64
+
+define void @atomicloadstore() nounwind {
+entry:
+; CHECK-LABEL: atomicloadstore
+
+; CHECK: ldw r[[R0:[0-9]+]], dp[pool]
+; CHECK-NEXT: #MEMBARRIER
+ %0 = load atomic i32* bitcast (i64* @pool to i32*) acquire, align 4
+
+; CHECK-NEXT: ldaw r[[R1:[0-9]+]], dp[pool]
+; CHECK-NEXT: ldc r[[R2:[0-9]+]], 0
+
+; CHECK-NEXT: ld16s r3, r[[R1]][r[[R2]]]
+; CHECK-NEXT: #MEMBARRIER
+ %1 = load atomic i16* bitcast (i64* @pool to i16*) acquire, align 2
+
+; CHECK-NEXT: ld8u r11, r[[R1]][r[[R2]]]
+; CHECK-NEXT: #MEMBARRIER
+ %2 = load atomic i8* bitcast (i64* @pool to i8*) acquire, align 1
+
+; CHECK-NEXT: ldw r4, dp[pool]
+; CHECK-NEXT: #MEMBARRIER
+ %3 = load atomic i32* bitcast (i64* @pool to i32*) seq_cst, align 4
+
+; CHECK-NEXT: ld16s r5, r[[R1]][r[[R2]]]
+; CHECK-NEXT: #MEMBARRIER
+ %4 = load atomic i16* bitcast (i64* @pool to i16*) seq_cst, align 2
+
+; CHECK-NEXT: ld8u r6, r[[R1]][r[[R2]]]
+; CHECK-NEXT: #MEMBARRIER
+ %5 = load atomic i8* bitcast (i64* @pool to i8*) seq_cst, align 1
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: stw r[[R0]], dp[pool]
+ store atomic i32 %0, i32* bitcast (i64* @pool to i32*) release, align 4
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: st16 r3, r[[R1]][r[[R2]]]
+ store atomic i16 %1, i16* bitcast (i64* @pool to i16*) release, align 2
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: st8 r11, r[[R1]][r[[R2]]]
+ store atomic i8 %2, i8* bitcast (i64* @pool to i8*) release, align 1
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: stw r4, dp[pool]
+; CHECK-NEXT: #MEMBARRIER
+ store atomic i32 %3, i32* bitcast (i64* @pool to i32*) seq_cst, align 4
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: st16 r5, r[[R1]][r[[R2]]]
+; CHECK-NEXT: #MEMBARRIER
+ store atomic i16 %4, i16* bitcast (i64* @pool to i16*) seq_cst, align 2
+
+; CHECK-NEXT: #MEMBARRIER
+; CHECK-NEXT: st8 r6, r[[R1]][r[[R2]]]
+; CHECK-NEXT: #MEMBARRIER
+ store atomic i8 %5, i8* bitcast (i64* @pool to i8*) seq_cst, align 1
+
+; CHECK-NEXT: ldw r[[R0]], dp[pool]
+; CHECK-NEXT: stw r[[R0]], dp[pool]
+; CHECK-NEXT: ld16s r[[R0]], r[[R1]][r[[R2]]]
+; CHECK-NEXT: st16 r[[R0]], r[[R1]][r[[R2]]]
+; CHECK-NEXT: ld8u r[[R0]], r[[R1]][r[[R2]]]
+; CHECK-NEXT: st8 r[[R0]], r[[R1]][r[[R2]]]
+ %6 = load atomic i32* bitcast (i64* @pool to i32*) monotonic, align 4
+ store atomic i32 %6, i32* bitcast (i64* @pool to i32*) monotonic, align 4
+ %7 = load atomic i16* bitcast (i64* @pool to i16*) monotonic, align 2
+ store atomic i16 %7, i16* bitcast (i64* @pool to i16*) monotonic, align 2
+ %8 = load atomic i8* bitcast (i64* @pool to i8*) monotonic, align 1
+ store atomic i8 %8, i8* bitcast (i64* @pool to i8*) monotonic, align 1
+
+ ret void
+}
diff --git a/test/CodeGen/XCore/bigstructret.ll b/test/CodeGen/XCore/bigstructret.ll
index 877c57140a1d..567b37209195 100644
--- a/test/CodeGen/XCore/bigstructret.ll
+++ b/test/CodeGen/XCore/bigstructret.ll
@@ -3,8 +3,8 @@
%0 = type { i32, i32, i32, i32 }
%1 = type { i32, i32, i32, i32, i32 }
-; Structs of 4 words can be returned in registers
-define internal fastcc %0 @ReturnBigStruct() nounwind readnone {
+; Structs of 4 words are returned in registers
+define internal %0 @ReturnBigStruct() nounwind readnone {
entry:
%0 = insertvalue %0 zeroinitializer, i32 12, 0
%1 = insertvalue %0 %0, i32 24, 1
@@ -19,8 +19,39 @@ entry:
; CHECK: ldc r3, 24601
; CHECK: retsp 0
-; Structs bigger than 4 words are returned via a hidden hidden sret-parameter
-define internal fastcc %1 @ReturnBigStruct2() nounwind readnone {
+; Structs of more than 4 words are partially returned in memory so long as the
+; function is not variadic.
+define { i32, i32, i32, i32, i32} @f(i32, i32, i32, i32, i32) nounwind readnone {
+; CHECK-LABEL: f:
+; CHECK: ldc [[REGISTER:r[0-9]+]], 5
+; CHECK-NEXT: stw [[REGISTER]], sp[2]
+; CHECK-NEXT: retsp 0
+body:
+ ret { i32, i32, i32, i32, i32} { i32 undef, i32 undef, i32 undef, i32 undef, i32 5}
+}
+
+@x = external global i32
+@y = external global i32
+
+; Check we call a function returning more than 4 words correctly.
+define i32 @g() nounwind {
+; CHECK-LABEL: g:
+; CHECK: entsp 3
+; CHECK: ldc [[REGISTER:r[0-9]+]], 0
+; CHECK: stw [[REGISTER]], sp[1]
+; CHECK: bl f
+; CHECK-NEXT: ldw r0, sp[2]
+; CHECK-NEXT: retsp 3
+;
+body:
+ %0 = call { i32, i32, i32, i32, i32 } @f(i32 0, i32 0, i32 0, i32 0, i32 0)
+ %1 = extractvalue { i32, i32, i32, i32, i32 } %0, 4
+ ret i32 %1
+}
+
+; Variadic functions return structs bigger than 4 words via a hidden
+; sret-parameter
+define internal %1 @ReturnBigStruct2(i32 %dummy, ...) nounwind readnone {
entry:
%0 = insertvalue %1 zeroinitializer, i32 12, 0
%1 = insertvalue %1 %0, i32 24, 1
diff --git a/test/CodeGen/XCore/byVal.ll b/test/CodeGen/XCore/byVal.ll
index e9612fd6021a..df6c6d351d18 100644
--- a/test/CodeGen/XCore/byVal.ll
+++ b/test/CodeGen/XCore/byVal.ll
@@ -20,7 +20,7 @@ entry:
; CHECK: ldaw r5, sp[1]
; CHECK: ldc r2, 40
; CHECK: mov r0, r5
-; CHECK: bl memcpy
+; CHECK: bl __memcpy_4
; CHECK: mov r0, r5
; CHECK: bl f1
; CHECK: mov r0, r4
diff --git a/test/CodeGen/XCore/call.ll b/test/CodeGen/XCore/call.ll
new file mode 100644
index 000000000000..06a12f144405
--- /dev/null
+++ b/test/CodeGen/XCore/call.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -march=xcore | FileCheck %s
+
+; CHECK-LABEL: bl_imm:
+; CHECK: ldw [[R0:r[0-9]+]], cp
+; CHECK: bla [[R0]]
+define void @bl_imm() nounwind {
+entry:
+ tail call void inttoptr (i64 65536 to void ()*)() nounwind
+ ret void
+}
diff --git a/test/CodeGen/XCore/codemodel.ll b/test/CodeGen/XCore/codemodel.ll
new file mode 100644
index 000000000000..0245893c478d
--- /dev/null
+++ b/test/CodeGen/XCore/codemodel.ll
@@ -0,0 +1,213 @@
+
+; RUN: not llc < %s -march=xcore -code-model=medium 2>&1 | FileCheck %s -check-prefix=BAD_CM
+; RUN: not llc < %s -march=xcore -code-model=kernel 2>&1 | FileCheck %s -check-prefix=BAD_CM
+; BAD_CM: Target only supports CodeModel Small or Large
+
+
+; RUN: llc < %s -march=xcore -code-model=default | FileCheck %s
+; RUN: llc < %s -march=xcore -code-model=small | FileCheck %s
+; RUN: llc < %s -march=xcore -code-model=large | FileCheck %s -check-prefix=LARGE
+
+
+; CHECK-LABEL: test:
+; CHECK: zext r0, 1
+; CHECK: bt r0, [[JUMP:.LBB[0-9_]*]]
+; CHECK: ldaw r0, dp[A2]
+; CHECK: retsp 0
+; CHECK: [[JUMP]]
+; CHECK: ldaw r0, dp[A1]
+; CHECK: retsp 0
+; LARGE-LABEL: test:
+; LARGE: zext r0, 1
+; LARGE: ldaw r11, cp[.LCPI{{[0-9_]*}}]
+; LARGE: mov r1, r11
+; LARGE: ldaw r11, cp[.LCPI{{[0-9_]*}}]
+; LARGE: bt r0, [[JUMP:.LBB[0-9_]*]]
+; LARGE: mov r11, r1
+; LARGE: [[JUMP]]
+; LARGE: ldw r0, r11[0]
+; LARGE: retsp 0
+@A1 = external global [50000 x i32]
+@A2 = external global [50000 x i32]
+define [50000 x i32]* @test(i1 %bool) nounwind {
+entry:
+ %Addr = select i1 %bool, [50000 x i32]* @A1, [50000 x i32]* @A2
+ ret [50000 x i32]* %Addr
+}
+
+
+; CHECK: .section .cp.rodata.cst4,"aMc",@progbits,4
+; CHECK: .long 65536
+; CHECK: .text
+; CHECK-LABEL: f:
+; CHECK: ldc r1, 65532
+; CHECK: add r1, r0, r1
+; CHECK: ldw r1, r1[0]
+; CHECK: ldw r2, cp[.LCPI{{[0-9_]*}}]
+; CHECK: add r0, r0, r2
+; CHECK: ldw r0, r0[0]
+; CHECK: add r0, r1, r0
+; CHECK: ldw r1, dp[l]
+; CHECK: add r0, r0, r1
+; CHECK: ldw r1, dp[l+4]
+; CHECK: add r0, r0, r1
+; CHECK: ldw r1, dp[l+392]
+; CHECK: add r0, r0, r1
+; CHECK: ldw r1, dp[l+396]
+; CHECK: add r0, r0, r1
+; CHECK: ldw r1, dp[s]
+; CHECK: add r0, r0, r1
+; CHECK: ldw r1, dp[s+36]
+; CHECK: add r0, r0, r1
+; CHECK: retsp 0
+;
+; LARGE: .section .cp.rodata.cst4,"aMc",@progbits,4
+; LARGE: .long 65536
+; LARGE: .section .cp.rodata,"ac",@progbits
+; LARGE: .long l
+; LARGE: .long l+4
+; LARGE: .long l+392
+; LARGE: .long l+396
+; LARGE: .text
+; LARGE-LABEL: f:
+; LARGE: ldc r1, 65532
+; LARGE: add r1, r0, r1
+; LARGE: ldw r1, r1[0]
+; LARGE: ldw r2, cp[.LCPI{{[0-9_]*}}]
+; LARGE: add r0, r0, r2
+; LARGE: ldw r0, r0[0]
+; LARGE: add r0, r1, r0
+; LARGE: ldw r1, cp[.LCPI{{[0-9_]*}}]
+; LARGE: ldw r1, r1[0]
+; LARGE: add r0, r0, r1
+; LARGE: ldw r1, cp[.LCPI{{[0-9_]*}}]
+; LARGE: ldw r1, r1[0]
+; LARGE: add r0, r0, r1
+; LARGE: ldw r1, cp[.LCPI{{[0-9_]*}}]
+; LARGE: ldw r1, r1[0]
+; LARGE: add r0, r0, r1
+; LARGE: ldw r1, cp[.LCPI{{[0-9_]*}}]
+; LARGE: ldw r1, r1[0]
+; LARGE: add r0, r0, r1
+; LARGE: ldw r1, dp[s]
+; LARGE: add r0, r0, r1
+; LARGE: ldw r1, dp[s+36]
+; LARGE: add r0, r0, r1
+; LARGE: retsp 0
+define i32 @f(i32* %i) {
+entry:
+ %0 = getelementptr inbounds i32* %i, i32 16383
+ %1 = load i32* %0
+ %2 = getelementptr inbounds i32* %i, i32 16384
+ %3 = load i32* %2
+ %4 = add nsw i32 %1, %3
+ %5 = load i32* getelementptr inbounds ([100 x i32]* @l, i32 0, i32 0)
+ %6 = add nsw i32 %4, %5
+ %7 = load i32* getelementptr inbounds ([100 x i32]* @l, i32 0, i32 1)
+ %8 = add nsw i32 %6, %7
+ %9 = load i32* getelementptr inbounds ([100 x i32]* @l, i32 0, i32 98)
+ %10 = add nsw i32 %8, %9
+ %11 = load i32* getelementptr inbounds ([100 x i32]* @l, i32 0, i32 99)
+ %12 = add nsw i32 %10, %11
+ %13 = load i32* getelementptr inbounds ([10 x i32]* @s, i32 0, i32 0)
+ %14 = add nsw i32 %12, %13
+ %15 = load i32* getelementptr inbounds ([10 x i32]* @s, i32 0, i32 9)
+ %16 = add nsw i32 %14, %15
+ ret i32 %16
+}
+
+
+; CHECK-LABEL: UnknownSize:
+; CHECK: ldw r0, dp[NoSize+40]
+; CHECK-NEXT: retsp 0
+;
+; LARGE: .section .cp.rodata,"ac",@progbits
+; LARGE: .LCPI{{[0-9_]*}}
+; LARGE-NEXT: .long NoSize
+; LARGE-NEXT: .text
+; LARGE-LABEL: UnknownSize:
+; LARGE: ldw r0, cp[.LCPI{{[0-9_]*}}]
+; LARGE-NEXT: ldw r0, r0[0]
+; LARGE-NEXT: retsp 0
+@NoSize = external global [0 x i32]
+define i32 @UnknownSize() nounwind {
+entry:
+ %0 = load i32* getelementptr inbounds ([0 x i32]* @NoSize, i32 0, i32 10)
+ ret i32 %0
+}
+
+
+; CHECK-LABEL: UnknownStruct:
+; CHECK: ldaw r0, dp[Unknown]
+; CHECK-NEXT: retsp 0
+;
+; LARGE: .section .cp.rodata,"ac",@progbits
+; LARGE: .LCPI{{[0-9_]*}}
+; LARGE-NEXT: .long Unknown
+; LARGE-NEXT: .text
+; LARGE-LABEL: UnknownStruct:
+; LARGE: ldw r0, cp[.LCPI{{[0-9_]*}}]
+; LARGE-NEXT: retsp 0
+%Struct = type opaque
+@Unknown = external global %Struct
+define %Struct* @UnknownStruct() nounwind {
+entry:
+ ret %Struct* @Unknown
+}
+
+
+; CHECK: .section .dp.bss,"awd",@nobits
+; CHECK-LABEL: l:
+; CHECK: .space 400
+; LARGE: .section .dp.bss.large,"awd",@nobits
+; LARGE-LABEL: l:
+; LARGE: .space 400
+@l = global [100 x i32] zeroinitializer
+
+; CHECK-LABEL: s:
+; CHECK: .space 40
+; LARGE: .section .dp.bss,"awd",@nobits
+; LARGE-LABEL: s:
+; LARGE: .space 40
+@s = global [10 x i32] zeroinitializer
+
+; CHECK: .section .dp.rodata,"awd",@progbits
+; CHECK-LABEL: cl:
+; CHECK: .space 400
+; LARGE: .section .dp.rodata.large,"awd",@progbits
+; LARGE-LABEL: cl:
+; LARGE: .space 400
+@cl = constant [100 x i32] zeroinitializer
+
+; CHECK-LABEL: cs:
+; CHECK: .space 40
+; LARGE: .section .dp.rodata,"awd",@progbits
+; LARGE-LABEL: cs:
+; LARGE: .space 40
+@cs = constant [10 x i32] zeroinitializer
+
+; CHECK: .section .cp.rodata,"ac",@progbits
+; CHECK-LABEL: icl:
+; CHECK: .space 400
+; LARGE: .section .cp.rodata.large,"ac",@progbits
+; LARGE-LABEL: icl:
+; LARGE: .space 400
+@icl = internal constant [100 x i32] zeroinitializer
+
+; CHECK-LABEL: cs:
+; CHECK: .space 40
+; LARGE: .section .cp.rodata,"ac",@progbits
+; LARGE-LABEL: cs:
+; LARGE: .space 40
+@ics = internal constant [10 x i32] zeroinitializer
+
+; CHECK: .section .cp.namedsection,"ac",@progbits
+; CHECK-LABEL: cpsec:
+; CHECK: .long 0
+@cpsec = constant i32 0, section ".cp.namedsection"
+
+; CHECK: .section .dp.namedsection,"awd",@progbits
+; CHECK-LABEL: dpsec:
+; CHECK: .long 0
+@dpsec = global i32 0, section ".dp.namedsection"
+
diff --git a/test/CodeGen/XCore/dwarf_debug.ll b/test/CodeGen/XCore/dwarf_debug.ll
new file mode 100644
index 000000000000..2f4b23111bb2
--- /dev/null
+++ b/test/CodeGen/XCore/dwarf_debug.ll
@@ -0,0 +1,39 @@
+; RUN: llc < %s -mtriple=xcore-unknown-unknown -O0 | FileCheck %s
+
+; target datalayout = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32-f64:32-a:0:32-n32"
+; target triple = "xcore"
+
+; CHECK-LABEL: f
+; CHECK: entsp 2
+; ...the prologue...
+; CHECK: .loc 1 2 0 prologue_end # :2:0
+; CHECK: add r0, r0, 1
+; CHECK: retsp 2
+define i32 @f(i32 %a) {
+entry:
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %a.addr}, metadata !11), !dbg !12
+ %0 = load i32* %a.addr, align 4, !dbg !12
+ %add = add nsw i32 %0, 1, !dbg !12
+ ret i32 %add, !dbg !12
+}
+
+declare void @llvm.dbg.declare(metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1}
+!1 = metadata !{metadata !"", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f", metadata !"f", metadata !"", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @f, null, null, metadata !2, i32 2}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null}
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{i32 786689, metadata !4, metadata !"a", metadata !5, i32 16777218, metadata !8, i32 0, i32 0}
+!12 = metadata !{i32 2, i32 0, metadata !4, null}
+
diff --git a/test/CodeGen/XCore/epilogue_prologue.ll b/test/CodeGen/XCore/epilogue_prologue.ll
index 185565f4e287..99978145ed36 100644
--- a/test/CodeGen/XCore/epilogue_prologue.ll
+++ b/test/CodeGen/XCore/epilogue_prologue.ll
@@ -1,5 +1,20 @@
; RUN: llc < %s -march=xcore | FileCheck %s
+; RUN: llc < %s -march=xcore -disable-fp-elim | FileCheck %s -check-prefix=CHECKFP
+; When using SP for small frames, we don't need any scratch registers (SR).
+; When using SP for large frames, we may need two scratch registers.
+; When using FP, for large or small frames, we may need one scratch register.
+
+; FP + small frame: spill FP+SR = entsp 2
+; CHECKFP-LABEL: f1
+; CHECKFP: entsp 2
+; CHECKFP-NEXT: stw r10, sp[1]
+; CHECKFP-NEXT: ldaw r10, sp[0]
+; CHECKFP: set sp, r10
+; CHECKFP-NEXT: ldw r10, sp[1]
+; CHECKFP-NEXT: retsp 2
+;
+; !FP + small frame: no spills = no stack adjustment needed
; CHECK-LABEL: f1
; CHECK: stw lr, sp[0]
; CHECK: ldw lr, sp[0]
@@ -10,17 +25,239 @@ entry:
ret void
}
+
+; FP + small frame: spill FP+SR+R0+LR = entsp 3 + extsp 1
+; CHECKFP-LABEL:f3
+; CHECKFP: entsp 3
+; CHECKFP-NEXT: stw r10, sp[1]
+; CHECKFP-NEXT: ldaw r10, sp[0]
+; CHECKFP-NEXT: stw [[REG:r[4-9]+]], r10[2]
+; CHECKFP-NEXT: mov [[REG]], r0
+; CHECKFP-NEXT: extsp 1
+; CHECKFP-NEXT: bl f2
+; CHECKFP-NEXT: ldaw sp, sp[1]
+; CHECKFP-NEXT: mov r0, [[REG]]
+; CHECKFP-NEXT: ldw [[REG]], r10[2]
+; CHECKFP-NEXT: set sp, r10
+; CHECKFP-NEXT: ldw r10, sp[1]
+; CHECKFP-NEXT: retsp 3
+;
+; !FP + small frame: spill R0+LR = entsp 2
; CHECK-LABEL: f3
; CHECK: entsp 2
-; CHECK: stw [[REG:r[4-9]+]], sp[1]
-; CHECK: mov [[REG]], r0
-; CHECK: bl f2
-; CHECK: mov r0, [[REG]]
-; CHECK: ldw [[REG]], sp[1]
-; CHECK: retsp 2
+; CHECK-NEXT: stw [[REG:r[4-9]+]], sp[1]
+; CHECK-NEXT: mov [[REG]], r0
+; CHECK-NEXT: bl f2
+; CHECK-NEXT: mov r0, [[REG]]
+; CHECK-NEXT: ldw [[REG]], sp[1]
+; CHECK-NEXT: retsp 2
declare void @f2()
define i32 @f3(i32 %i) nounwind {
entry:
call void @f2()
ret i32 %i
}
+
+
+; FP + large frame: spill FP+SR = entsp 2 + 100000
+; CHECKFP-LABEL: f4
+; CHECKFP: entsp 65535
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_def_cfa_offset 262140
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_offset 15, 0
+; CHECKFP-NEXT: extsp 34467
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_def_cfa_offset 400008
+; CHECKFP-NEXT: stw r10, sp[1]
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_offset 10, -400004
+; CHECKFP-NEXT: ldaw r10, sp[0]
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_def_cfa_register 10
+; CHECKFP-NEXT: set sp, r10
+; CHECKFP-NEXT: ldw r10, sp[1]
+; CHECKFP-NEXT: ldaw sp, sp[65535]
+; CHECKFP-NEXT: retsp 34467
+;
+; !FP + large frame: spill SR+SR = entsp 2 + 100000
+; CHECK-LABEL: f4
+; CHECK: entsp 65535
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_def_cfa_offset 262140
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_offset 15, 0
+; CHECK-NEXT: extsp 34467
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_def_cfa_offset 400008
+; CHECK-NEXT: ldaw sp, sp[65535]
+; CHECK-NEXT: retsp 34467
+define void @f4() {
+entry:
+ %0 = alloca [100000 x i32]
+ ret void
+}
+
+
+; FP + large frame: spill FP+SR+R4+LR = entsp 3 + 200000 + extsp 1
+; CHECKFP: .section .cp.rodata.cst4,"aMc",@progbits,4
+; CHECKFP-NEXT: .align 4
+; CHECKFP-NEXT: .LCPI[[CNST0:[0-9_]+]]:
+; CHECKFP-NEXT: .long 200002
+; CHECKFP-NEXT: .LCPI[[CNST1:[0-9_]+]]:
+; CHECKFP-NEXT: .long 200001
+; CHECKFP-NEXT: .text
+; CHECKFP-LABEL: f6
+; CHECKFP: entsp 65535
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_def_cfa_offset 262140
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_offset 15, 0
+; CHECKFP-NEXT: extsp 65535
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_def_cfa_offset 524280
+; CHECKFP-NEXT: extsp 65535
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_def_cfa_offset 786420
+; CHECKFP-NEXT: extsp 3398
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_def_cfa_offset 800012
+; CHECKFP-NEXT: stw r10, sp[1]
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_offset 10, -800008
+; CHECKFP-NEXT: ldaw r10, sp[0]
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_def_cfa_register 10
+; CHECKFP-NEXT: ldw r1, cp[.LCPI[[CNST0]]]
+; CHECKFP-NEXT: stw [[REG:r[4-9]+]], r10[r1]
+; CHECKFP-NEXT: .Ltmp{{[0-9]+}}
+; CHECKFP-NEXT: .cfi_offset 4, -4
+; CHECKFP-NEXT: mov [[REG]], r0
+; CHECKFP-NEXT: extsp 1
+; CHECKFP-NEXT: ldaw r0, r10[2]
+; CHECKFP-NEXT: bl f5
+; CHECKFP-NEXT: ldaw sp, sp[1]
+; CHECKFP-NEXT: ldw r1, cp[.LCPI3_1]
+; CHECKFP-NEXT: ldaw r0, r10[r1]
+; CHECKFP-NEXT: extsp 1
+; CHECKFP-NEXT: bl f5
+; CHECKFP-NEXT: ldaw sp, sp[1]
+; CHECKFP-NEXT: mov r0, [[REG]]
+; CHECKFP-NEXT: ldw r1, cp[.LCPI[[CNST0]]]
+; CHECKFP-NEXT: ldw [[REG]], r10[r1]
+; CHECKFP-NEXT: set sp, r10
+; CHECKFP-NEXT: ldw r10, sp[1]
+; CHECKFP-NEXT: ldaw sp, sp[65535]
+; CHECKFP-NEXT: ldaw sp, sp[65535]
+; CHECKFP-NEXT: ldaw sp, sp[65535]
+; CHECKFP-NEXT: retsp 3398
+;
+; !FP + large frame: spill SR+SR+R4+LR = entsp 4 + 200000
+; CHECK: .section .cp.rodata.cst4,"aMc",@progbits,4
+; CHECK-NEXT: .align 4
+; CHECK-NEXT: .LCPI[[CNST0:[0-9_]+]]:
+; CHECK-NEXT: .long 200003
+; CHECK-NEXT: .LCPI[[CNST1:[0-9_]+]]:
+; CHECK-NEXT: .long 200002
+; CHECK-NEXT: .text
+; CHECK-LABEL: f6
+; CHECK: entsp 65535
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_def_cfa_offset 262140
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_offset 15, 0
+; CHECK-NEXT: extsp 65535
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_def_cfa_offset 524280
+; CHECK-NEXT: extsp 65535
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_def_cfa_offset 786420
+; CHECK-NEXT: extsp 3399
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_def_cfa_offset 800016
+; CHECK-NEXT: ldaw r1, sp[0]
+; CHECK-NEXT: ldw r2, cp[.LCPI[[CNST0]]]
+; CHECK-NEXT: stw [[REG:r[4-9]+]], r1[r2]
+; CHECK-NEXT: .Ltmp{{[0-9]+}}
+; CHECK-NEXT: .cfi_offset 4, -4
+; CHECK-NEXT: mov [[REG]], r0
+; CHECK-NEXT: ldaw r0, sp[3]
+; CHECK-NEXT: bl f5
+; CHECK-NEXT: ldaw r0, sp[0]
+; CHECK-NEXT: ldw r1, cp[.LCPI[[CNST1]]]
+; CHECK-NEXT: ldaw r0, r0[r1]
+; CHECK-NEXT: bl f5
+; CHECK-NEXT: mov r0, [[REG]]
+; CHECK-NEXT: ldaw [[REG]], sp[0]
+; CHECK-NEXT: ldw r1, cp[.LCPI[[CNST0]]]
+; CHECK-NEXT: ldw [[REG]], [[REG]][r1]
+; CHECK-NEXT: ldaw sp, sp[65535]
+; CHECK-NEXT: ldaw sp, sp[65535]
+; CHECK-NEXT: ldaw sp, sp[65535]
+; CHECK-NEXT: retsp 3399
+declare void @f5(i32*)
+define i32 @f6(i32 %i) {
+entry:
+ %0 = alloca [200000 x i32]
+ %1 = getelementptr inbounds [200000 x i32]* %0, i32 0, i32 0
+ call void @f5(i32* %1)
+ %2 = getelementptr inbounds [200000 x i32]* %0, i32 0, i32 199999
+ call void @f5(i32* %2)
+ ret i32 %i
+}
+
+; FP + large frame: spill FP+SR+LR = entsp 2 + 256 + extsp 1
+; CHECKFP-LABEL:f8
+; CHECKFP: entsp 258
+; CHECKFP-NEXT: stw r10, sp[1]
+; CHECKFP-NEXT: ldaw r10, sp[0]
+; CHECKFP-NEXT: mkmsk [[REG:r[0-9]+]], 8
+; CHECKFP-NEXT: ldaw r0, r10{{\[}}[[REG]]{{\]}}
+; CHECKFP-NEXT: extsp 1
+; CHECKFP-NEXT: bl f5
+; CHECKFP-NEXT: ldaw sp, sp[1]
+; CHECKFP-NEXT: set sp, r10
+; CHECKFP-NEXT: ldw r10, sp[1]
+; CHECKFP-NEXT: retsp 258
+;
+; !FP + large frame: spill SR+SR+LR = entsp 3 + 256
+; CHECK-LABEL:f8
+; CHECK: entsp 257
+; CHECK-NEXT: ldaw r0, sp[254]
+; CHECK-NEXT: bl f5
+; CHECK-NEXT: retsp 257
+define void @f8() nounwind {
+entry:
+ %0 = alloca [256 x i32]
+ %1 = getelementptr inbounds [256 x i32]* %0, i32 0, i32 253
+ call void @f5(i32* %1)
+ ret void
+}
+
+; FP + large frame: spill FP+SR+LR = entsp 2 + 32768 + extsp 1
+; CHECKFP-LABEL:f9
+; CHECKFP: entsp 32770
+; CHECKFP-NEXT: stw r10, sp[1]
+; CHECKFP-NEXT: ldaw r10, sp[0]
+; CHECKFP-NEXT: ldc [[REG:r[0-9]+]], 32767
+; CHECKFP-NEXT: ldaw r0, r10{{\[}}[[REG]]{{\]}}
+; CHECKFP-NEXT: extsp 1
+; CHECKFP-NEXT: bl f5
+; CHECKFP-NEXT: ldaw sp, sp[1]
+; CHECKFP-NEXT: set sp, r10
+; CHECKFP-NEXT: ldw r10, sp[1]
+; CHECKFP-NEXT: retsp 32770
+;
+; !FP + large frame: spill SR+SR+LR = entsp 3 + 32768
+; CHECK-LABEL:f9
+; CHECK: entsp 32771
+; CHECK-NEXT: ldaw r0, sp[32768]
+; CHECK-NEXT: bl f5
+; CHECK-NEXT: retsp 32771
+define void @f9() nounwind {
+entry:
+ %0 = alloca [32768 x i32]
+ %1 = getelementptr inbounds [32768 x i32]* %0, i32 0, i32 32765
+ call void @f5(i32* %1)
+ ret void
+}
diff --git a/test/CodeGen/XCore/exception.ll b/test/CodeGen/XCore/exception.ll
index 8018cdcada7a..3179fcdfcf5d 100644
--- a/test/CodeGen/XCore/exception.ll
+++ b/test/CodeGen/XCore/exception.ll
@@ -29,9 +29,8 @@ entry:
; CHECK: .cfi_offset 15, 0
; CHECK: ldc r0, 4
; CHECK: bl __cxa_allocate_exception
-; CHECK: ldaw r11, cp[_ZTIi]
+; CHECK: ldaw r1, dp[_ZTIi]
; CHECK: ldc r2, 0
-; CHECK: mov r1, r11
; CHECK: bl __cxa_throw
define void @fn_throw() {
entry:
diff --git a/test/CodeGen/XCore/globals.ll b/test/CodeGen/XCore/globals.ll
index b3a872bb6892..04e135c25cae 100644
--- a/test/CodeGen/XCore/globals.ll
+++ b/test/CodeGen/XCore/globals.ll
@@ -17,11 +17,18 @@ entry:
define i32 *@addr_G3() {
entry:
; CHECK-LABEL: addr_G3:
-; CHECK: ldaw r11, cp[G3]
-; CHECK: mov r0, r11
+; CHECK: ldaw r0, dp[G3]
ret i32* @G3
}
+define i32 *@addr_iG3() {
+entry:
+; CHECK-LABEL: addr_iG3:
+; CHECK: ldaw r11, cp[iG3]
+; CHECK: mov r0, r11
+ ret i32* @iG3
+}
+
define i32 **@addr_G4() {
entry:
; CHECK-LABEL: addr_G4:
@@ -32,11 +39,18 @@ entry:
define i32 **@addr_G5() {
entry:
; CHECK-LABEL: addr_G5:
-; CHECK: ldaw r11, cp[G5]
-; CHECK: mov r0, r11
+; CHECK: ldaw r0, dp[G5]
ret i32** @G5
}
+define i32 **@addr_iG5() {
+entry:
+; CHECK-LABEL: addr_iG5:
+; CHECK: ldaw r11, cp[iG5]
+; CHECK: mov r0, r11
+ ret i32** @iG5
+}
+
define i32 **@addr_G6() {
entry:
; CHECK-LABEL: addr_G6:
@@ -47,11 +61,18 @@ entry:
define i32 **@addr_G7() {
entry:
; CHECK-LABEL: addr_G7:
-; CHECK: ldaw r11, cp[G7]
-; CHECK: mov r0, r11
+; CHECK: ldaw r0, dp[G7]
ret i32** @G7
}
+define i32 **@addr_iG7() {
+entry:
+; CHECK-LABEL: addr_iG7:
+; CHECK: ldaw r11, cp[iG7]
+; CHECK: mov r0, r11
+ ret i32** @iG7
+}
+
define i32 *@addr_G8() {
entry:
; CHECK-LABEL: addr_G8:
@@ -68,26 +89,38 @@ entry:
; CHECK: G2:
@G3 = unnamed_addr constant i32 9401
-; CHECK: .section .cp.rodata.cst4,"aMc",@progbits,4
+; CHECK: .section .dp.rodata,"awd",@progbits
; CHECK: G3:
+@iG3 = internal constant i32 9401
+; CHECK: .section .cp.rodata,"ac",@progbits
+; CHECK: iG3:
+
@G4 = global i32* @G1
; CHECK: .section .dp.data,"awd",@progbits
; CHECK: G4:
@G5 = unnamed_addr constant i32* @G1
-; CHECK: .section .cp.rodata,"ac",@progbits
+; CHECK: .section .dp.rodata,"awd",@progbits
; CHECK: G5:
+@iG5 = internal unnamed_addr constant i32* @G1
+; CHECK: .section .cp.rodata,"ac",@progbits
+; CHECK: iG5:
+
@G6 = global i32* @G8
; CHECK: .section .dp.data,"awd",@progbits
; CHECK: G6:
@G7 = unnamed_addr constant i32* @G8
-; CHECK: .section .cp.rodata,"ac",@progbits
+; CHECK: .section .dp.rodata,"awd",@progbits
; CHECK: G7:
-@G8 = internal global i32 9312
+@iG7 = internal unnamed_addr constant i32* @G8
+; CHECK: .section .cp.rodata,"ac",@progbits
+; CHECK: iG7:
+
+@G8 = global i32 9312
; CHECK: .section .dp.data,"awd",@progbits
; CHECK: G8:
diff --git a/test/CodeGen/XCore/inline-asm.ll b/test/CodeGen/XCore/inline-asm.ll
index af3edd1544a2..e9f5b5769997 100644
--- a/test/CodeGen/XCore/inline-asm.ll
+++ b/test/CodeGen/XCore/inline-asm.ll
@@ -30,3 +30,24 @@ entry:
tail call void asm sideeffect "foo ${0:n}", "i"(i32 99) nounwind
ret void
}
+
+@x = external global i32
+@y = external global i32, section ".cp.rodata"
+
+; CHECK-LABEL: f5:
+; CHECK: ldw r0, dp[x]
+; CHECK: retsp 0
+define i32 @f5() nounwind {
+entry:
+ %asmtmp = call i32 asm "ldw $0, $1", "=r,*m"(i32* @x) nounwind
+ ret i32 %asmtmp
+}
+
+; CHECK-LABEL: f6:
+; CHECK: ldw r0, cp[y]
+; CHECK: retsp 0
+define i32 @f6() nounwind {
+entry:
+ %asmtmp = call i32 asm "ldw $0, $1", "=r,*m"(i32* @y) nounwind
+ ret i32 %asmtmp
+}
diff --git a/test/CodeGen/XCore/linkage.ll b/test/CodeGen/XCore/linkage.ll
index 7a1179b7ab6e..7384fe7bcf09 100644
--- a/test/CodeGen/XCore/linkage.ll
+++ b/test/CodeGen/XCore/linkage.ll
@@ -25,9 +25,21 @@ define protected void @test_protected() {
; CHECK: .weak array
@array = weak global [2 x i32] zeroinitializer
+; CHECK: .globl ac.globound
+; CHECK: ac.globound = 2
+; CHECK: .weak ac.globound
+; CHECK: .globl ac
+; CHECK: .weak ac
+@ac = common global [2 x i32] zeroinitializer
+
+; CHECK: .globl gd
; CHECK: .weak gd
@gd = weak global i32 0
+; CHECK: .globl gc
+; CHECK: .weak gc
+@gc = common global i32 0
+
; CHECK-NOT: .hidden test_hidden_declaration
; CHECK: .weak gr
diff --git a/test/CodeGen/XCore/lit.local.cfg b/test/CodeGen/XCore/lit.local.cfg
index 3e84c1befeab..0b947bbbb850 100644
--- a/test/CodeGen/XCore/lit.local.cfg
+++ b/test/CodeGen/XCore/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'XCore' in targets:
+if not 'XCore' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/XCore/llvm-intrinsics.ll b/test/CodeGen/XCore/llvm-intrinsics.ll
new file mode 100644
index 000000000000..b436282615c2
--- /dev/null
+++ b/test/CodeGen/XCore/llvm-intrinsics.ll
@@ -0,0 +1,361 @@
+; RUN: llc < %s -march=xcore | FileCheck %s
+; RUN: llc < %s -march=xcore -disable-fp-elim | FileCheck %s -check-prefix=CHECKFP
+
+declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare i8* @llvm.returnaddress(i32) nounwind
+declare i8* @llvm.eh.dwarf.cfa(i32) nounwind
+declare void @llvm.eh.return.i32(i32, i8*) nounwind
+declare void @llvm.eh.unwind.init() nounwind
+
+define i8* @FA0() nounwind {
+entry:
+; CHECK-LABEL: FA0
+; CHECK: ldaw r0, sp[0]
+; CHECK-NEXT: retsp 0
+ %0 = call i8* @llvm.frameaddress(i32 0)
+ ret i8* %0
+}
+
+define i8* @FA1() nounwind {
+entry:
+; CHECK-LABEL: FA1
+; CHECK: entsp 100
+; CHECK-NEXT: ldaw r0, sp[0]
+; CHECK-NEXT: retsp 100
+ %0 = alloca [100 x i32]
+ %1 = call i8* @llvm.frameaddress(i32 0)
+ ret i8* %1
+}
+
+define i8* @RA0() nounwind {
+entry:
+; CHECK-LABEL: RA0
+; CHECK: stw lr, sp[0]
+; CHECK-NEXT: ldw r0, sp[0]
+; CHECK-NEXT: ldw lr, sp[0]
+; CHECK-NEXT: retsp 0
+ %0 = call i8* @llvm.returnaddress(i32 0)
+ ret i8* %0
+}
+
+define i8* @RA1() nounwind {
+entry:
+; CHECK-LABEL: RA1
+; CHECK: entsp 100
+; CHECK-NEXT: ldw r0, sp[100]
+; CHECK-NEXT: retsp 100
+ %0 = alloca [100 x i32]
+ %1 = call i8* @llvm.returnaddress(i32 0)
+ ret i8* %1
+}
+
+; test FRAME_TO_ARGS_OFFSET lowering
+define i8* @FTAO0() nounwind {
+entry:
+; CHECK-LABEL: FTAO0
+; CHECK: ldc r0, 0
+; CHECK-NEXT: ldaw r1, sp[0]
+; CHECK-NEXT: add r0, r1, r0
+; CHECK-NEXT: retsp 0
+ %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
+ ret i8* %0
+}
+
+define i8* @FTAO1() nounwind {
+entry:
+; CHECK-LABEL: FTAO1
+; CHECK: entsp 100
+; CHECK-NEXT: ldc r0, 400
+; CHECK-NEXT: ldaw r1, sp[0]
+; CHECK-NEXT: add r0, r1, r0
+; CHECK-NEXT: retsp 100
+ %0 = alloca [100 x i32]
+ %1 = call i8* @llvm.eh.dwarf.cfa(i32 0)
+ ret i8* %1
+}
+
+define i8* @EH0(i32 %offset, i8* %handler) {
+entry:
+; CHECK-LABEL: EH0
+; CHECK: entsp 2
+; CHECK: .cfi_def_cfa_offset 8
+; CHECK: .cfi_offset 15, 0
+; CHECK: .cfi_offset 1, -8
+; CHECK: .cfi_offset 0, -4
+; CHECK: ldc r2, 8
+; CHECK-NEXT: ldaw r3, sp[0]
+; CHECK-NEXT: add r2, r3, r2
+; CHECK-NEXT: add r2, r2, r0
+; CHECK-NEXT: mov r3, r1
+; CHECK-NEXT: ldw r1, sp[0]
+; CHECK-NEXT: ldw r0, sp[1]
+; CHECK-NEXT: set sp, r2
+; CHECK-NEXT: bau r3
+ call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
+ unreachable
+}
+
+declare void @foo(...)
+define i8* @EH1(i32 %offset, i8* %handler) {
+entry:
+; CHECK-LABEL: EH1
+; CHECK: entsp 5
+; CHECK: .cfi_def_cfa_offset 20
+; CHECK: .cfi_offset 15, 0
+; CHECK: .cfi_offset 1, -16
+; CHECK: .cfi_offset 0, -12
+; CHECK: stw r4, sp[4]
+; CHECK: .cfi_offset 4, -4
+; CHECK: stw r5, sp[3]
+; CHECK: .cfi_offset 5, -8
+; CHECK: mov r4, r1
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: ldc r0, 20
+; CHECK-NEXT: ldaw r1, sp[0]
+; CHECK-NEXT: add r0, r1, r0
+; CHECK-NEXT: add r2, r0, r5
+; CHECK-NEXT: mov r3, r4
+; CHECK-NEXT: ldw r5, sp[3]
+; CHECK-NEXT: ldw r4, sp[4]
+; CHECK-NEXT: ldw r1, sp[1]
+; CHECK-NEXT: ldw r0, sp[2]
+; CHECK-NEXT: set sp, r2
+; CHECK-NEXT: bau r3
+ call void (...)* @foo()
+ call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
+ unreachable
+}
+
+@offset = external constant i32
+@handler = external constant i8
+define i8* @EH2(i32 %r0, i32 %r1, i32 %r2, i32 %r3) {
+entry:
+; CHECK-LABEL: EH2
+; CHECK: entsp 3
+; CHECK: bl foo
+; CHECK-NEXT: ldw r0, dp[offset]
+; CHECK-NEXT: ldc r1, 12
+; CHECK-NEXT: ldaw r2, sp[0]
+; CHECK-NEXT: add r1, r2, r1
+; CHECK-NEXT: add r2, r1, r0
+; CHECK-NEXT: ldaw r3, dp[handler]
+; CHECK-NEXT: ldw r1, sp[1]
+; CHECK-NEXT: ldw r0, sp[2]
+; CHECK-NEXT: set sp, r2
+; CHECK-NEXT: bau r3
+ call void (...)* @foo()
+ %0 = load i32* @offset
+ call void @llvm.eh.return.i32(i32 %0, i8* @handler)
+ unreachable
+}
+
+
+; FP: spill FP+SR+R0:1+R4:9 = entsp 2+2+6
+; But we dont actually spill or restore R0:1
+; CHECKFP-LABEL: Unwind0:
+; CHECKFP: entsp 10
+; CHECKFP: stw r10, sp[1]
+; CHECKFP: ldaw r10, sp[0]
+; CHECKFP: stw r4, r10[9]
+; CHECKFP: stw r5, r10[8]
+; CHECKFP: stw r6, r10[7]
+; CHECKFP: stw r7, r10[6]
+; CHECKFP: stw r8, r10[5]
+; CHECKFP: stw r9, r10[4]
+; CHECKFP: ldw r9, r10[4]
+; CHECKFP: ldw r8, r10[5]
+; CHECKFP: ldw r7, r10[6]
+; CHECKFP: ldw r6, r10[7]
+; CHECKFP: ldw r5, r10[8]
+; CHECKFP: ldw r4, r10[9]
+; CHECKFP: set sp, r10
+; CHECKFP: ldw r10, sp[1]
+; CHECKFP: retsp 10
+;
+; !FP: spill R0:1+R4:10 = entsp 2+7
+; But we dont actually spill or restore R0:1
+; CHECK-LABEL: Unwind0:
+; CHECK: entsp 9
+; CHECK: stw r4, sp[8]
+; CHECK: stw r5, sp[7]
+; CHECK: stw r6, sp[6]
+; CHECK: stw r7, sp[5]
+; CHECK: stw r8, sp[4]
+; CHECK: stw r9, sp[3]
+; CHECK: stw r10, sp[2]
+; CHECK: ldw r10, sp[2]
+; CHECK: ldw r9, sp[3]
+; CHECK: ldw r8, sp[4]
+; CHECK: ldw r7, sp[5]
+; CHECK: ldw r6, sp[6]
+; CHECK: ldw r5, sp[7]
+; CHECK: ldw r4, sp[8]
+; CHECK: retsp 9
+define void @Unwind0() {
+ call void @llvm.eh.unwind.init()
+ ret void
+}
+
+
+; FP: spill FP+SR+R0:1+R4:9+LR = entsp 2+2+6 + extsp 1
+; But we dont actually spill or restore R0:1
+; CHECKFP-LABEL: Unwind1:
+; CHECKFP: entsp 10
+; CHECKFP: stw r10, sp[1]
+; CHECKFP: ldaw r10, sp[0]
+; CHECKFP: stw r4, r10[9]
+; CHECKFP: stw r5, r10[8]
+; CHECKFP: stw r6, r10[7]
+; CHECKFP: stw r7, r10[6]
+; CHECKFP: stw r8, r10[5]
+; CHECKFP: stw r9, r10[4]
+; CHECKFP: extsp 1
+; CHECKFP: bl foo
+; CHECKFP: ldaw sp, sp[1]
+; CHECKFP: ldw r9, r10[4]
+; CHECKFP: ldw r8, r10[5]
+; CHECKFP: ldw r7, r10[6]
+; CHECKFP: ldw r6, r10[7]
+; CHECKFP: ldw r5, r10[8]
+; CHECKFP: ldw r4, r10[9]
+; CHECKFP: set sp, r10
+; CHECKFP: ldw r10, sp[1]
+; CHECKFP: retsp 10
+;
+; !FP: spill R0:1+R4:10+LR = entsp 2+7+1
+; But we dont actually spill or restore R0:1
+; CHECK-LABEL: Unwind1:
+; CHECK: entsp 10
+; CHECK: stw r4, sp[9]
+; CHECK: stw r5, sp[8]
+; CHECK: stw r6, sp[7]
+; CHECK: stw r7, sp[6]
+; CHECK: stw r8, sp[5]
+; CHECK: stw r9, sp[4]
+; CHECK: stw r10, sp[3]
+; CHECK: bl foo
+; CHECK: ldw r10, sp[3]
+; CHECK: ldw r9, sp[4]
+; CHECK: ldw r8, sp[5]
+; CHECK: ldw r7, sp[6]
+; CHECK: ldw r6, sp[7]
+; CHECK: ldw r5, sp[8]
+; CHECK: ldw r4, sp[9]
+; CHECK: retsp 10
+define void @Unwind1() {
+ call void (...)* @foo()
+ call void @llvm.eh.unwind.init()
+ ret void
+}
+
+; FP: spill FP+SR+R0:1+R4:9 = entsp 2+2+6
+; We dont spill R0:1
+; We only restore R0:1 during eh.return
+; CHECKFP-LABEL: UnwindEH:
+; CHECKFP: entsp 10
+; CHECKFP: .cfi_def_cfa_offset 40
+; CHECKFP: .cfi_offset 15, 0
+; CHECKFP: stw r10, sp[1]
+; CHECKFP: .cfi_offset 10, -36
+; CHECKFP: ldaw r10, sp[0]
+; CHECKFP: .cfi_def_cfa_register 10
+; CHECKFP: .cfi_offset 1, -32
+; CHECKFP: .cfi_offset 0, -28
+; CHECKFP: stw r4, r10[9]
+; CHECKFP: .cfi_offset 4, -4
+; CHECKFP: stw r5, r10[8]
+; CHECKFP: .cfi_offset 5, -8
+; CHECKFP: stw r6, r10[7]
+; CHECKFP: .cfi_offset 6, -12
+; CHECKFP: stw r7, r10[6]
+; CHECKFP: .cfi_offset 7, -16
+; CHECKFP: stw r8, r10[5]
+; CHECKFP: .cfi_offset 8, -20
+; CHECKFP: stw r9, r10[4]
+; CHECKFP: .cfi_offset 9, -24
+; CHECKFP: bt r0, .LBB{{[0-9_]+}}
+; CHECKFP: ldw r9, r10[4]
+; CHECKFP-NEXT: ldw r8, r10[5]
+; CHECKFP-NEXT: ldw r7, r10[6]
+; CHECKFP-NEXT: ldw r6, r10[7]
+; CHECKFP-NEXT: ldw r5, r10[8]
+; CHECKFP-NEXT: ldw r4, r10[9]
+; CHECKFP-NEXT: set sp, r10
+; CHECKFP-NEXT: ldw r10, sp[1]
+; CHECKFP-NEXT: retsp 10
+; CHECKFP: .LBB{{[0-9_]+}}
+; CHECKFP-NEXT: ldc r2, 40
+; CHECKFP-NEXT: add r2, r10, r2
+; CHECKFP-NEXT: add r2, r2, r0
+; CHECKFP-NEXT: mov r3, r1
+; CHECKFP-NEXT: ldw r9, r10[4]
+; CHECKFP-NEXT: ldw r8, r10[5]
+; CHECKFP-NEXT: ldw r7, r10[6]
+; CHECKFP-NEXT: ldw r6, r10[7]
+; CHECKFP-NEXT: ldw r5, r10[8]
+; CHECKFP-NEXT: ldw r4, r10[9]
+; CHECKFP-NEXT: ldw r1, sp[2]
+; CHECKFP-NEXT: ldw r0, sp[3]
+; CHECKFP-NEXT: set sp, r2
+; CHECKFP-NEXT: bau r3
+;
+; !FP: spill R0:1+R4:10 = entsp 2+7
+; We dont spill R0:1
+; We only restore R0:1 during eh.return
+; CHECK-LABEL: UnwindEH:
+; CHECK: entsp 9
+; CHECK: .cfi_def_cfa_offset 36
+; CHECK: .cfi_offset 15, 0
+; CHECK: .cfi_offset 1, -36
+; CHECK: .cfi_offset 0, -32
+; CHECK: stw r4, sp[8]
+; CHECK: .cfi_offset 4, -4
+; CHECK: stw r5, sp[7]
+; CHECK: .cfi_offset 5, -8
+; CHECK: stw r6, sp[6]
+; CHECK: .cfi_offset 6, -12
+; CHECK: stw r7, sp[5]
+; CHECK: .cfi_offset 7, -16
+; CHECK: stw r8, sp[4]
+; CHECK: .cfi_offset 8, -20
+; CHECK: stw r9, sp[3]
+; CHECK: .cfi_offset 9, -24
+; CHECK: stw r10, sp[2]
+; CHECK: .cfi_offset 10, -28
+; CHECK: bt r0, .LBB{{[0-9_]+}}
+; CHECK: ldw r10, sp[2]
+; CHECK-NEXT: ldw r9, sp[3]
+; CHECK-NEXT: ldw r8, sp[4]
+; CHECK-NEXT: ldw r7, sp[5]
+; CHECK-NEXT: ldw r6, sp[6]
+; CHECK-NEXT: ldw r5, sp[7]
+; CHECK-NEXT: ldw r4, sp[8]
+; CHECK-NEXT: retsp 9
+; CHECK: .LBB{{[0-9_]+}}
+; CHECK-NEXT: ldc r2, 36
+; CHECK-NEXT: ldaw r3, sp[0]
+; CHECK-NEXT: add r2, r3, r2
+; CHECK-NEXT: add r2, r2, r0
+; CHECK-NEXT: mov r3, r1
+; CHECK-NEXT: ldw r10, sp[2]
+; CHECK-NEXT: ldw r9, sp[3]
+; CHECK-NEXT: ldw r8, sp[4]
+; CHECK-NEXT: ldw r7, sp[5]
+; CHECK-NEXT: ldw r6, sp[6]
+; CHECK-NEXT: ldw r5, sp[7]
+; CHECK-NEXT: ldw r4, sp[8]
+; CHECK-NEXT: ldw r1, sp[0]
+; CHECK-NEXT: ldw r0, sp[1]
+; CHECK-NEXT: set sp, r2
+; CHECK-NEXT: bau r3
+define void @UnwindEH(i32 %offset, i8* %handler) {
+ call void @llvm.eh.unwind.init()
+ %cmp = icmp eq i32 %offset, 0
+ br i1 %cmp, label %normal, label %eh
+eh:
+ call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
+ unreachable
+normal:
+ ret void
+}
diff --git a/test/CodeGen/XCore/load.ll b/test/CodeGen/XCore/load.ll
index 0622f1cd135e..c7fc2a33db1a 100644
--- a/test/CodeGen/XCore/load.ll
+++ b/test/CodeGen/XCore/load.ll
@@ -40,7 +40,7 @@ entry:
ret i32 %2
}
-@GConst = external constant i32
+@GConst = internal constant i32 42
define i32 @load_cp() nounwind {
entry:
; CHECK-LABEL: load_cp:
diff --git a/test/CodeGen/XCore/memcpy.ll b/test/CodeGen/XCore/memcpy.ll
new file mode 100644
index 000000000000..fe424c50cb28
--- /dev/null
+++ b/test/CodeGen/XCore/memcpy.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -march=xcore | FileCheck %s
+
+; Optimize memcpy to __memcpy_4 if src, dst and size are all 4 byte aligned.
+define void @f1(i8* %dst, i8* %src, i32 %n) nounwind {
+; CHECK-LABEL: f1:
+; CHECK: bl __memcpy_4
+entry:
+ %0 = shl i32 %n, 2
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %0, i32 4, i1 false)
+ ret void
+}
+
+; Can't optimize - size is not a multiple of 4.
+define void @f2(i8* %dst, i8* %src, i32 %n) nounwind {
+; CHECK-LABEL: f2:
+; CHECK: bl memcpy
+entry:
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %n, i32 4, i1 false)
+ ret void
+}
+
+; Can't optimize - alignment is not a multiple of 4.
+define void @f3(i8* %dst, i8* %src, i32 %n) nounwind {
+; CHECK-LABEL: f3:
+; CHECK: bl memcpy
+entry:
+ %0 = shl i32 %n, 2
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %0, i32 2, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/test/CodeGen/XCore/resources.ll b/test/CodeGen/XCore/resources.ll
index 5385010e138b..87bf3c204dc2 100644
--- a/test/CodeGen/XCore/resources.ll
+++ b/test/CodeGen/XCore/resources.ll
@@ -15,12 +15,14 @@ declare void @llvm.xcore.setd.p1i8(i8 addrspace(1)* %r, i32 %value)
declare void @llvm.xcore.setc.p1i8(i8 addrspace(1)* %r, i32 %value)
declare i32 @llvm.xcore.inshr.p1i8(i8 addrspace(1)* %r, i32 %value)
declare i32 @llvm.xcore.outshr.p1i8(i8 addrspace(1)* %r, i32 %value)
+declare void @llvm.xcore.clrpt.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.setpt.p1i8(i8 addrspace(1)* %r, i32 %value)
declare i32 @llvm.xcore.getts.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.syncr.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.settw.p1i8(i8 addrspace(1)* %r, i32 %value)
declare void @llvm.xcore.setv.p1i8(i8 addrspace(1)* %r, i8* %p)
declare void @llvm.xcore.setev.p1i8(i8 addrspace(1)* %r, i8* %p)
+declare void @llvm.xcore.edu.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.eeu.p1i8(i8 addrspace(1)* %r)
declare void @llvm.xcore.setclk.p1i8.p1i8(i8 addrspace(1)* %a, i8 addrspace(1)* %b)
declare void @llvm.xcore.setrdy.p1i8.p1i8(i8 addrspace(1)* %a, i8 addrspace(1)* %b)
@@ -140,6 +142,13 @@ define i32 @outshr(i32 %value, i8 addrspace(1)* %r) {
ret i32 %result
}
+define void @clrpt(i8 addrspace(1)* %r) {
+; CHECK-LABEL: clrpt:
+; CHECK: clrpt res[r0]
+ call void @llvm.xcore.clrpt.p1i8(i8 addrspace(1)* %r)
+ ret void
+}
+
define void @setpt(i8 addrspace(1)* %r, i32 %value) {
; CHECK-LABEL: setpt:
; CHECK: setpt res[r0], r1
@@ -184,6 +193,13 @@ define void @setev(i8 addrspace(1)* %r, i8* %p) {
ret void
}
+define void @edu(i8 addrspace(1)* %r) {
+; CHECK-LABEL: edu:
+; CHECK: edu res[r0]
+ call void @llvm.xcore.edu.p1i8(i8 addrspace(1)* %r)
+ ret void
+}
+
define void @eeu(i8 addrspace(1)* %r) {
; CHECK-LABEL: eeu:
; CHECK: eeu res[r0]
diff --git a/test/CodeGen/XCore/resources_combine.ll b/test/CodeGen/XCore/resources_combine.ll
new file mode 100644
index 000000000000..20c184a53b99
--- /dev/null
+++ b/test/CodeGen/XCore/resources_combine.ll
@@ -0,0 +1,93 @@
+; RUN: llc -march=xcore < %s | FileCheck %s
+
+declare i32 @llvm.xcore.int.p1i8(i8 addrspace(1)* %r)
+declare i32 @llvm.xcore.inct.p1i8(i8 addrspace(1)* %r)
+declare i32 @llvm.xcore.testct.p1i8(i8 addrspace(1)* %r)
+declare i32 @llvm.xcore.testwct.p1i8(i8 addrspace(1)* %r)
+declare i32 @llvm.xcore.getts.p1i8(i8 addrspace(1)* %r)
+declare void @llvm.xcore.outt.p1i8(i8 addrspace(1)* %r, i32 %value)
+declare void @llvm.xcore.outct.p1i8(i8 addrspace(1)* %r, i32 %value)
+declare void @llvm.xcore.chkct.p1i8(i8 addrspace(1)* %r, i32 %value)
+declare void @llvm.xcore.setpt.p1i8(i8 addrspace(1)* %r, i32 %value)
+
+define i32 @int(i8 addrspace(1)* %r) nounwind {
+; CHECK-LABEL: int:
+; CHECK: int r0, res[r0]
+; CHECK-NEXT: retsp 0
+ %result = call i32 @llvm.xcore.int.p1i8(i8 addrspace(1)* %r)
+ %trunc = and i32 %result, 255
+ ret i32 %trunc
+}
+
+define i32 @inct(i8 addrspace(1)* %r) nounwind {
+; CHECK-LABEL: inct:
+; CHECK: inct r0, res[r0]
+; CHECK-NEXT: retsp 0
+ %result = call i32 @llvm.xcore.inct.p1i8(i8 addrspace(1)* %r)
+ %trunc = and i32 %result, 255
+ ret i32 %trunc
+}
+
+define i32 @testct(i8 addrspace(1)* %r) nounwind {
+; CHECK-LABEL: testct:
+; CHECK: testct r0, res[r0]
+; CHECK-NEXT: retsp 0
+ %result = call i32 @llvm.xcore.testct.p1i8(i8 addrspace(1)* %r)
+ %trunc = and i32 %result, 1
+ ret i32 %trunc
+}
+
+define i32 @testwct(i8 addrspace(1)* %r) nounwind {
+; CHECK-LABEL: testwct:
+; CHECK: testwct r0, res[r0]
+; CHECK-NEXT: retsp 0
+ %result = call i32 @llvm.xcore.testwct.p1i8(i8 addrspace(1)* %r)
+ %trunc = and i32 %result, 7
+ ret i32 %trunc
+}
+
+define i32 @getts(i8 addrspace(1)* %r) nounwind {
+; CHECK-LABEL: getts:
+; CHECK: getts r0, res[r0]
+; CHECK-NEXT: retsp 0
+ %result = call i32 @llvm.xcore.getts.p1i8(i8 addrspace(1)* %r)
+ %trunc = and i32 %result, 65535
+ ret i32 %result
+}
+
+define void @outt(i8 addrspace(1)* %r, i32 %value) nounwind {
+; CHECK-LABEL: outt:
+; CHECK-NOT: zext
+; CHECK: outt res[r0], r1
+; CHECK-NEXT: retsp 0
+ %trunc = and i32 %value, 255
+ call void @llvm.xcore.outt.p1i8(i8 addrspace(1)* %r, i32 %trunc)
+ ret void
+}
+
+define void @outct(i8 addrspace(1)* %r, i32 %value) nounwind {
+; CHECK-LABEL: outct:
+; CHECK-NOT: zext
+; CHECK: outct res[r0], r1
+ %trunc = and i32 %value, 255
+ call void @llvm.xcore.outct.p1i8(i8 addrspace(1)* %r, i32 %trunc)
+ ret void
+}
+
+define void @chkct(i8 addrspace(1)* %r, i32 %value) nounwind {
+; CHECK-LABEL: chkct:
+; CHECK-NOT: zext
+; CHECK: chkct res[r0], r1
+ %trunc = and i32 %value, 255
+ call void @llvm.xcore.chkct.p1i8(i8 addrspace(1)* %r, i32 %trunc)
+ ret void
+}
+
+define void @setpt(i8 addrspace(1)* %r, i32 %value) nounwind {
+; CHECK-LABEL: setpt:
+; CHECK-NOT: zext
+; CHECK: setpt res[r0], r1
+ %trunc = and i32 %value, 65535
+ call void @llvm.xcore.setpt.p1i8(i8 addrspace(1)* %r, i32 %trunc)
+ ret void
+}
diff --git a/test/CodeGen/XCore/scavenging.ll b/test/CodeGen/XCore/scavenging.ll
index 5b612d0f9b59..a0c8a2e09379 100644
--- a/test/CodeGen/XCore/scavenging.ll
+++ b/test/CodeGen/XCore/scavenging.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=xcore
+; RUN: llc < %s -march=xcore | FileCheck %s
+
@size = global i32 0 ; <i32*> [#uses=1]
@g0 = external global i32 ; <i32*> [#uses=2]
@g1 = external global i32 ; <i32*> [#uses=2]
@@ -48,5 +49,69 @@ entry:
call void @g(i32* %x1, i32* %1) nounwind
ret void
}
-
declare void @g(i32*, i32*)
+
+
+; CHECK: .section .cp.rodata.cst4,"aMc",@progbits,4
+; CHECK: .align 4
+; CHECK: [[ARG5:.LCPI[0-9_]+]]:
+; CHECK: .long 100003
+; CHECK: [[INDEX0:.LCPI[0-9_]+]]:
+; CHECK: .long 80002
+; CHECK: [[INDEX1:.LCPI[0-9_]+]]:
+; CHECK: .long 81002
+; CHECK: [[INDEX2:.LCPI[0-9_]+]]:
+; CHECK: .long 82002
+; CHECK: [[INDEX3:.LCPI[0-9_]+]]:
+; CHECK: .long 83002
+; CHECK: [[INDEX4:.LCPI[0-9_]+]]:
+; CHECK: .long 84002
+; CHECK: .text
+; !FP + large frame: spill SR+SR = entsp 2 + 100000
+; CHECK-LABEL: ScavengeSlots:
+; CHECK: entsp 65535
+; CHECK: extsp 34467
+; scavenge r11
+; CHECK: ldaw r11, sp[0]
+; scavenge r4 using SR spill slot
+; CHECK: stw r4, sp[1]
+; CHECK: ldw r4, cp{{\[}}[[ARG5]]{{\]}}
+; r11 used to load 5th argument
+; CHECK: ldw r11, r11[r4]
+; CHECK: ldaw r4, sp[0]
+; scavenge r5 using SR spill slot
+; CHECK: stw r5, sp[0]
+; CHECK: ldw r5, cp{{\[}}[[INDEX0]]{{\]}}
+; r4 & r5 used by InsertSPConstInst() to emit STW_l3r instruction.
+; CHECK: stw r0, r4[r5]
+; CHECK: ldaw r0, sp[0]
+; CHECK: ldw r5, cp{{\[}}[[INDEX1]]{{\]}}
+; CHECK: stw r1, r0[r5]
+; CHECK: ldaw r0, sp[0]
+; CHECK: ldw r1, cp{{\[}}[[INDEX2]]{{\]}}
+; CHECK: stw r2, r0[r1]
+; CHECK: ldaw r0, sp[0]
+; CHECK: ldw r1, cp{{\[}}[[INDEX3]]{{\]}}
+; CHECK: stw r3, r0[r1]
+; CHECK: ldaw r0, sp[0]
+; CHECK: ldw r1, cp{{\[}}[[INDEX4]]{{\]}}
+; CHECK: stw r11, r0[r1]
+; CHECK: ldaw sp, sp[65535]
+; CHECK: ldw r4, sp[1]
+; CHECK: ldw r5, sp[0]
+; CHECK: retsp 34467
+define void @ScavengeSlots(i32 %r0, i32 %r1, i32 %r2, i32 %r3, i32 %r4) nounwind {
+entry:
+ %Data = alloca [100000 x i32]
+ %i0 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 80000
+ store volatile i32 %r0, i32* %i0
+ %i1 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 81000
+ store volatile i32 %r1, i32* %i1
+ %i2 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 82000
+ store volatile i32 %r2, i32* %i2
+ %i3 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 83000
+ store volatile i32 %r3, i32* %i3
+ %i4 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 84000
+ store volatile i32 %r4, i32* %i4
+ ret void
+}
diff --git a/test/DebugInfo/2009-11-05-DeadGlobalVariable.ll b/test/DebugInfo/2009-11-05-DeadGlobalVariable.ll
index 6fd788704bc2..65907d679780 100644
--- a/test/DebugInfo/2009-11-05-DeadGlobalVariable.ll
+++ b/test/DebugInfo/2009-11-05-DeadGlobalVariable.ll
@@ -1,5 +1,5 @@
; RUN: llc %s -o /dev/null
-; Here variable bar is optimzied away. Do not trip over while trying to generate debug info.
+; Here variable bar is optimized away. Do not trip over while trying to generate debug info.
define i32 @foo() nounwind uwtable readnone ssp {
@@ -13,13 +13,11 @@ entry:
!0 = metadata !{i32 720913, metadata !17, i32 12, metadata !"clang version 3.0 (trunk 139632)", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !12, null, metadata !""} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 0}
!3 = metadata !{metadata !5}
-!5 = metadata !{i32 720942, metadata !17, metadata !6, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 ()* @foo, null, null, metadata !10, i32 0} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 0] [foo]
+!5 = metadata !{i32 720942, metadata !17, metadata !6, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 ()* @foo, null, null, null, i32 0} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 0] [foo]
!6 = metadata !{i32 720937, metadata !17} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{metadata !9}
!9 = metadata !{i32 720932, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!10 = metadata !{metadata !11}
-!11 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
!12 = metadata !{metadata !14}
!14 = metadata !{i32 720948, i32 0, metadata !5, metadata !"bar", metadata !"bar", metadata !"", metadata !6, i32 2, metadata !9, i32 1, i32 1, null, null} ; [ DW_TAG_variable ]
!15 = metadata !{i32 3, i32 3, metadata !16, null}
diff --git a/test/DebugInfo/2009-11-06-NamelessGlobalVariable.ll b/test/DebugInfo/2009-11-06-NamelessGlobalVariable.ll
index 5a1045905306..9beab2008b88 100644
--- a/test/DebugInfo/2009-11-06-NamelessGlobalVariable.ll
+++ b/test/DebugInfo/2009-11-06-NamelessGlobalVariable.ll
@@ -5,7 +5,7 @@
!llvm.module.flags = !{!9}
!0 = metadata !{i32 720913, metadata !8, i32 12, metadata !"clang version 3.0 (trunk 139632)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 720948, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 2, metadata !7, i32 0, i32 1, i32* @0, null} ; [ DW_TAG_variable ]
!6 = metadata !{i32 720937, metadata !8} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/2009-11-10-CurrentFn.ll b/test/DebugInfo/2009-11-10-CurrentFn.ll
index d154c4399860..151d631d6868 100644
--- a/test/DebugInfo/2009-11-10-CurrentFn.ll
+++ b/test/DebugInfo/2009-11-10-CurrentFn.ll
@@ -14,7 +14,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.module.flags = !{!18}
!0 = metadata !{i32 720913, metadata !17, i32 12, metadata !"clang version 3.0 (trunk 139632)", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 720942, metadata !17, metadata !6, metadata !"bar", metadata !"bar", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32)* @bar, null, null, metadata !9, metadata !""} ; [ DW_TAG_subprogram ] [line 3] [def] [scope 0] [bar]
!6 = metadata !{i32 720937, metadata !17} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/2010-01-19-DbgScope.ll b/test/DebugInfo/2010-01-19-DbgScope.ll
deleted file mode 100644
index 1a7e378374cd..000000000000
--- a/test/DebugInfo/2010-01-19-DbgScope.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc -O0 < %s -o /dev/null
-; Ignore unreachable scopes.
-declare void @foo(i32) noreturn
-
-define i32 @bar() nounwind ssp {
-entry:
- br i1 undef, label %bb, label %bb11, !dbg !0
-
-bb: ; preds = %entry
- call void @foo(i32 0) noreturn nounwind, !dbg !7
- unreachable, !dbg !7
-
-bb11: ; preds = %entry
- ret i32 1, !dbg !11
-}
-
-!llvm.dbg.cu = !{!3}
-!llvm.module.flags = !{!15}
-
-!0 = metadata !{i32 8647, i32 0, metadata !1, null}
-!1 = metadata !{i32 458763, metadata !12, metadata !2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
-!2 = metadata !{i32 458798, null, metadata !3, metadata !"bar", metadata !"bar", metadata !"bar", i32 8639, metadata !4, i1 true, i1 true, i32 0, i32 0, null, i32 0, i32 0, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
-!3 = metadata !{i32 458769, metadata !12, i32 1, metadata !"LLVM build 00", i1 true, metadata !"", i32 0, metadata !13, metadata !13, metadata !14, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{i32 458773, null, metadata !3, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!5 = metadata !{metadata !6}
-!6 = metadata !{i32 458788, null, metadata !3, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
-!7 = metadata !{i32 8648, i32 0, metadata !8, null}
-!8 = metadata !{i32 458763, metadata !12, metadata !9, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
-!9 = metadata !{i32 458763, metadata !12, metadata !10, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
-!10 = metadata !{i32 458798, null, metadata !3, metadata !"bar2", metadata !"bar2", metadata !"bar2", i32 8639, metadata !4, i1 true, i1 true, i32 0, i32 0, null, i32 0, i32 0, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
-!11 = metadata !{i32 8652, i32 0, metadata !1, null}
-!12 = metadata !{metadata !"c-parser.c", metadata !"llvmgcc"}
-!13 = metadata !{i32 0}
-!14 = metadata !{metadata !2}
-!15 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/2010-03-19-DbgDeclare.ll b/test/DebugInfo/2010-03-19-DbgDeclare.ll
index d1afade4ad9f..94aa259d31b6 100644
--- a/test/DebugInfo/2010-03-19-DbgDeclare.ll
+++ b/test/DebugInfo/2010-03-19-DbgDeclare.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | opt -verify -S -asm-verbose | FileCheck %s
+; RUN: opt < %s -verify -S | FileCheck %s
; CHECK: lang 0x8001
@@ -9,7 +9,7 @@ entry:
}
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!5}
-!2 = metadata !{i32 786449, metadata !4, i32 32769, metadata !"clang version 3.3 ", i1 false, metadata !"", i32 0, metadata !3, metadata !3, metadata !3, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/blaikie/dev/scratch/scratch.cpp] [lang 0x8001]
+!2 = metadata !{i32 786449, metadata !4, i32 32769, metadata !"clang version 3.3 ", i1 false, metadata !"", i32 0, metadata !3, metadata !3, metadata !3, metadata !3, metadata !3, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/blaikie/dev/scratch/scratch.cpp] [lang 0x8001]
!3 = metadata !{}
!0 = metadata !{i32 662302, i32 26, metadata !1, null}
!1 = metadata !{i32 4, metadata !"foo"}
diff --git a/test/DebugInfo/2010-03-24-MemberFn.ll b/test/DebugInfo/2010-03-24-MemberFn.ll
index 1689fe6c91a8..4ea9d2cf9861 100644
--- a/test/DebugInfo/2010-03-24-MemberFn.ll
+++ b/test/DebugInfo/2010-03-24-MemberFn.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 < %s | grep AT_decl_file | grep 2
+; RUN: %llc_dwarf -O0 < %s | grep AT_decl_file | grep 2
; Here _ZN1S3fooEv is defined in header file identified as AT_decl_file no. 2 in debug info.
%struct.S = type <{ i8 }>
diff --git a/test/DebugInfo/2010-04-06-NestedFnDbgInfo.ll b/test/DebugInfo/2010-04-06-NestedFnDbgInfo.ll
index 4d4d61665c07..5f7cb696d738 100644
--- a/test/DebugInfo/2010-04-06-NestedFnDbgInfo.ll
+++ b/test/DebugInfo/2010-04-06-NestedFnDbgInfo.ll
@@ -1,6 +1,22 @@
-; RUN: llvm-as < %s | llc -asm-verbose -O0 | grep AT_specification | count 2
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj -o - < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
; Radar 7833483
-; Do not emit AT_specification for nested function foo.
+; Do not emit a separate out-of-line definition DIE for the function-local 'foo'
+; function (member of the function local 'A' type)
+; CHECK: DW_TAG_class_type
+; CHECK: DW_TAG_class_type
+; CHECK-NEXT: DW_AT_name {{.*}} "A"
+; Check that the subprogram inside the class definition has low_pc, only
+; attached to the definition.
+; CHECK: [[FOO_INL:0x........]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_low_pc
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_ZZN1B2fnEvEN1A3fooEv"
+; And just double check that there's no out of line definition that references
+; this subprogram.
+; CHECK-NOT: DW_AT_specification {{.*}} {[[FOO_INL]]}
%class.A = type { i8 }
%class.B = type { i8 }
diff --git a/test/DebugInfo/2010-04-19-FramePtr.ll b/test/DebugInfo/2010-04-19-FramePtr.ll
index 4af2fdcdfa8a..6c772230d93c 100644
--- a/test/DebugInfo/2010-04-19-FramePtr.ll
+++ b/test/DebugInfo/2010-04-19-FramePtr.ll
@@ -1,6 +1,6 @@
-; RUN: llc -asm-verbose -O1 -o %t < %s
+; RUN: %llc_dwarf -asm-verbose -O1 -o %t < %s
; RUN: grep DW_AT_APPLE_omit_frame_ptr %t
-; RUN: llc -disable-fp-elim -asm-verbose -O1 -o %t < %s
+; RUN: %llc_dwarf -disable-fp-elim -asm-verbose -O1 -o %t < %s
; RUN: grep -v DW_AT_APPLE_omit_frame_ptr %t
diff --git a/test/DebugInfo/2010-05-10-MultipleCU.ll b/test/DebugInfo/2010-05-10-MultipleCU.ll
index ad7c7d1614a5..75d2e7084d21 100644
--- a/test/DebugInfo/2010-05-10-MultipleCU.ll
+++ b/test/DebugInfo/2010-05-10-MultipleCU.ll
@@ -1,20 +1,11 @@
-; RUN: llc -O0 -asm-verbose < %s | FileCheck %s
-; One for a.c, second one for b.c and third one for abbrev.
+; REQUIRES: object-emission
-; CHECK: info_begin
-; CHECK: DW_TAG_compile_unit
-; CHECK-NOT: DW_TAG_compile_unit
-; CHECK: info_end
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
-; CHECK: info_begin
-; CHECK: DW_TAG_compile_unit
-; CHECK-NOT: DW_TAG_compile_unit
-; CHECK: info_end
+; Check that two compile units are generated
-; CHECK: abbrev_begin
-; CHECK: DW_TAG_compile_unit
-; CHECK-NOT: DW_TAG_compile_unit
-; CHECK: abbrev_end
+; CHECK: Compile Unit:
+; CHECK: Compile Unit:
define i32 @foo() nounwind readnone ssp {
return:
diff --git a/test/DebugInfo/2010-06-29-InlinedFnLocalVar.ll b/test/DebugInfo/2010-06-29-InlinedFnLocalVar.ll
index 50a34222fe63..a461abdcdf5b 100644
--- a/test/DebugInfo/2010-06-29-InlinedFnLocalVar.ll
+++ b/test/DebugInfo/2010-06-29-InlinedFnLocalVar.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O2 %s -o - | FileCheck %s
+; RUN: %llc_dwarf -O2 %s -o - | FileCheck %s
; Check struct X for dead variable xyz from inlined function foo.
; CHECK: DW_TAG_structure_type
@@ -15,7 +15,7 @@ define i32 @bar() nounwind ssp {
entry:
%0 = load i32* @i, align 4, !dbg !17 ; <i32> [#uses=2]
tail call void @llvm.dbg.value(metadata !{i32 %0}, i64 0, metadata !9), !dbg !19
- tail call void @llvm.dbg.declare(metadata !20, metadata !10), !dbg !21
+ tail call void @llvm.dbg.declare(metadata !29, metadata !10), !dbg !21
%1 = mul nsw i32 %0, %0, !dbg !22 ; <i32> [#uses=2]
store i32 %1, i32* @i, align 4, !dbg !17
ret i32 %1, !dbg !23
@@ -26,7 +26,7 @@ entry:
!0 = metadata !{i32 786478, metadata !27, metadata !1, metadata !"foo", metadata !"foo", metadata !"", i32 9, metadata !3, i1 true, i1 true, i32 0, i32 0, null, i1 false, i1 true, null, null, null, metadata !24, i32 9} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !27} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !27, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, metadata !20, metadata !20, metadata !25, metadata !26, metadata !26, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !27, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, metadata !20, metadata !20, metadata !25, metadata !26, metadata !20, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !27, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5, metadata !5}
!5 = metadata !{i32 786468, metadata !27, metadata !1, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
@@ -44,7 +44,7 @@ entry:
!17 = metadata !{i32 15, i32 0, metadata !18, null}
!18 = metadata !{i32 786443, metadata !1, metadata !6, i32 14, i32 0, i32 1} ; [ DW_TAG_lexical_block ]
!19 = metadata !{i32 9, i32 0, metadata !0, metadata !17}
-!20 = metadata !{null}
+!20 = metadata !{}
!21 = metadata !{i32 9, i32 0, metadata !11, metadata !17}
!22 = metadata !{i32 11, i32 0, metadata !11, metadata !17}
!23 = metadata !{i32 16, i32 0, metadata !18, null}
@@ -53,3 +53,4 @@ entry:
!26 = metadata !{metadata !16}
!27 = metadata !{metadata !"bar.c", metadata !"/tmp/"}
!28 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!29 = metadata !{null}
diff --git a/test/DebugInfo/2010-07-19-Crash.ll b/test/DebugInfo/2010-07-19-Crash.ll
index 6b6e61ddc28a..a10b10a7d145 100644
--- a/test/DebugInfo/2010-07-19-Crash.ll
+++ b/test/DebugInfo/2010-07-19-Crash.ll
@@ -25,6 +25,6 @@ entry:
!10 = metadata !{i32 524299, metadata !12, metadata !0, i32 3, i32 11, i32 0} ; [ DW_TAG_lexical_block ]
!11 = metadata !{i32 524334, metadata !12, metadata !1, metadata !"foo", metadata !"foo", metadata !"foo", i32 7, metadata !3, i1 true, i1 false, i32 0, i32 0, null, i1 false, i1 true, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
!12 = metadata !{metadata !"one.c", metadata !"/private/tmp"}
-!13 = metadata !{metadata !0, metadata !6, metadata !11}
+!13 = metadata !{metadata !0}
!14 = metadata !{i32 0}
!15 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/AArch64/cfi-frame.ll b/test/DebugInfo/AArch64/cfi-frame.ll
deleted file mode 100644
index 7290ddf357c1..000000000000
--- a/test/DebugInfo/AArch64/cfi-frame.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-WITH-FP
-
-@bigspace = global [8 x i64] zeroinitializer
-
-declare void @use_addr(i8*)
-
-define void @test_frame([8 x i64] %val) {
-; CHECK: test_frame:
-; CHECK: .cfi_startproc
-
- %var = alloca i8, i32 1000000
-; CHECK: sub sp, sp, #[[SP_INIT_ADJ:[0-9]+]]
-; CHECK-NEXT: .Ltmp
-; CHECK-NEXT: .cfi_def_cfa sp, [[SP_INIT_ADJ]]
-
-; Make sure the prologue is reasonably efficient
-; CHECK-NEXT: stp x29, x30, [sp,
-; CHECK-NEXT: stp x25, x26, [sp,
-; CHECK-NEXT: stp x23, x24, [sp,
-; CHECK-NEXT: stp x21, x22, [sp,
-; CHECK-NEXT: stp x19, x20, [sp,
-; CHECK-NEXT: sub sp, sp, #160
-; CHECK-NEXT: sub sp, sp, #244, lsl #12
-; CHECK-NEXT: .Ltmp
-; CHECK-NEXT: .cfi_def_cfa sp, 1000080
-; CHECK-NEXT: .Ltmp
-; CHECK-NEXT: .cfi_offset x30, -8
-; CHECK-NEXT: .Ltmp
-; CHECK-NEXT: .cfi_offset x29, -16
-; [...]
-; CHECK: .cfi_offset x19, -80
-
-; CHECK: bl use_addr
- call void @use_addr(i8* %var)
-
- store [8 x i64] %val, [8 x i64]* @bigspace
- ret void
-; CHECK: ret
-; CHECK: .cfi_endproc
-}
-
-; CHECK-WITH-FP: test_frame:
-
-; CHECK-WITH-FP: sub sp, sp, #[[SP_INIT_ADJ:[0-9]+]]
-; CHECK-WITH-FP-NEXT: .Ltmp
-; CHECK-WITH-FP-NEXT: .cfi_def_cfa sp, [[SP_INIT_ADJ]]
-
-; CHECK-WITH-FP: stp x29, x30, [sp, [[OFFSET:#[0-9]+]]]
-; CHECK-WITH-FP-NEXT: add x29, sp, [[OFFSET]]
-; CHECK-WITH-FP-NEXT: .Ltmp
-; CHECK-WITH-FP-NEXT: .cfi_def_cfa x29, 16
-
- ; We shouldn't emit any kind of update for the second stack adjustment if the
- ; FP is in use.
-; CHECK-WITH-FP-NOT: .cfi_def_cfa_offset
-
-; CHECK-WITH-FP: bl use_addr
diff --git a/test/DebugInfo/AArch64/dwarfdump.ll b/test/DebugInfo/AArch64/dwarfdump.ll
index 4c205077e6d8..98e863dbb4b5 100644
--- a/test/DebugInfo/AArch64/dwarfdump.ll
+++ b/test/DebugInfo/AArch64/dwarfdump.ll
@@ -1,4 +1,7 @@
-; RUN: llc -mtriple=aarch64-non-linux-gnu < %s -filetype=obj | llvm-dwarfdump - | FileCheck %s
+; RUN: llc -mtriple=aarch64-non-linux-gnu -dwarf-version=4 < %s -filetype=obj \
+; RUN: | llvm-dwarfdump - | FileCheck -check-prefix=CHECK -check-prefix=CHECK-4 %s
+; RUN: llc -mtriple=aarch64-non-linux-gnu -dwarf-version=3 < %s -filetype=obj \
+; RUN: | llvm-dwarfdump - | FileCheck -check-prefix=CHECK -check-prefix=CHECK-3 %s
; We're mostly checking that relocations are applied correctly
; here. Currently R_AARCH64_ABS32 is used for references to debug data
@@ -12,7 +15,8 @@
; A couple of ABS64s similarly:
; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; CHECK: DW_AT_high_pc [DW_FORM_addr] (0x0000000000000008)
+; CHECK-4: DW_AT_high_pc [DW_FORM_data4] (0x00000008)
+; CHECK-3: DW_AT_high_pc [DW_FORM_addr] (0x0000000000000008)
define i32 @main() nounwind {
ret i32 0, !dbg !8
@@ -24,7 +28,7 @@ attributes #0 = { nounwind }
!llvm.module.flags = !{!10}
!0 = metadata !{i32 786449, metadata !9, i32 12, metadata !"clang version 3.3 ", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !2, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/home/timnor01/llvm/build/tmp.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!2 = metadata !{metadata !3}
!3 = metadata !{i32 786478, metadata !9, metadata !4, metadata !"main", metadata !"main", metadata !"", i32 1, metadata !5, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @main, null, null, metadata !1, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [main]
!4 = metadata !{i32 786473, metadata !9} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/AArch64/eh_frame.s b/test/DebugInfo/AArch64/eh_frame.s
index d8d6b6d9325f..12a58961d717 100644
--- a/test/DebugInfo/AArch64/eh_frame.s
+++ b/test/DebugInfo/AArch64/eh_frame.s
@@ -17,7 +17,7 @@ foo:
// Output is:
// CHECK: Contents of section .eh_frame:
-// CHECK-NEXT: 0000 10000000 00000000 017a5200 017c1e01 .........zR..|..
+// CHECK-NEXT: 0000 10000000 00000000 037a5200 017c1e01 .........zR..|..
// CHECK-NEXT: 0010 1b0c1f00 10000000 18000000 00000000 ................
@@ -30,7 +30,7 @@ foo:
// -------------------
// 10000000: length of first CIE = 0x10
// 00000000: This is a CIE
-// 01: version = 0x1
+// 03: version = 0x3
// 7a 52 00: augmentation string "zR" -- pointer format is specified
// 01: code alignment factor 1
// 7c: data alignment factor -4
diff --git a/test/DebugInfo/AArch64/eh_frame_personality.ll b/test/DebugInfo/AArch64/eh_frame_personality.ll
index d35f2a2fcafb..51d6bf80b950 100644
--- a/test/DebugInfo/AArch64/eh_frame_personality.ll
+++ b/test/DebugInfo/AArch64/eh_frame_personality.ll
@@ -16,7 +16,7 @@ clean:
}
; CHECK: Contents of section .eh_frame:
-; CHECK: 0000 1c000000 00000000 017a504c 5200017c .........zPLR..|
+; CHECK: 0000 1c000000 00000000 037a504c 5200017c .........zPLR..|
; CHECK: 0010 1e0b0000 00000000 00000000 1b0c1f00 ................
; Don't really care about the rest:
@@ -33,7 +33,7 @@ clean:
; ----------
; 1c000000: Length = 0x1c
; 00000000: This is a CIE
-; 01: Version 1
+; 03: Version 3
; 7a 50 4c 52 00: Augmentation string "zPLR" (personality routine, language-specific data, pointer format)
; 01: Code alignment factor 1
; 78: Data alignment factor: -8
diff --git a/test/DebugInfo/AArch64/lit.local.cfg b/test/DebugInfo/AArch64/lit.local.cfg
index 9a66a00189ea..cec29af5bbe4 100644
--- a/test/DebugInfo/AArch64/lit.local.cfg
+++ b/test/DebugInfo/AArch64/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'AArch64' in targets:
+if not 'AArch64' in config.root.targets:
config.unsupported = True
diff --git a/test/DebugInfo/AArch64/struct_by_value.ll b/test/DebugInfo/AArch64/struct_by_value.ll
new file mode 100644
index 000000000000..0e336f799c54
--- /dev/null
+++ b/test/DebugInfo/AArch64/struct_by_value.ll
@@ -0,0 +1,70 @@
+; A by-value struct is a register-indirect value (breg).
+; RUN: llc %s -filetype=asm -o - | FileCheck %s
+
+; CHECK: DW_AT_location
+; CHECK-NEXT: .byte 112
+; 112 = 0x70 = DW_OP_breg0
+
+; rdar://problem/13658587
+;
+; Generated from
+;
+; struct five
+; {
+; int a;
+; int b;
+; int c;
+; int d;
+; int e;
+; };
+;
+; int
+; return_five_int (struct five f)
+; {
+; return f.a;
+; }
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+target triple = "arm64-apple-ios3.0.0"
+
+%struct.five = type { i32, i32, i32, i32, i32 }
+
+; Function Attrs: nounwind ssp
+define i32 @return_five_int(%struct.five* %f) #0 {
+entry:
+ call void @llvm.dbg.declare(metadata !{%struct.five* %f}, metadata !17), !dbg !18
+ %a = getelementptr inbounds %struct.five* %f, i32 0, i32 0, !dbg !19
+ %0 = load i32* %a, align 4, !dbg !19
+ ret i32 %0, !dbg !19
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind ssp }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!16, !20}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"LLVM version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [struct_by_value.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"struct_by_value.c", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"return_five_int", metadata !"return_five_int", metadata !"", i32 13, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (%struct.five*)* @return_five_int, null, null, metadata !2, i32 14} ; [ DW_TAG_subprogram ] [line 13] [def] [scope 14] [return_five_int]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [struct_by_value.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !9}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786451, metadata !1, null, metadata !"five", i32 1, i64 160, i64 32, i32 0, i32 0, null, metadata !10, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [five] [line 1, size 160, align 32, offset 0] [def] [from ]
+!10 = metadata !{metadata !11, metadata !12, metadata !13, metadata !14, metadata !15}
+!11 = metadata !{i32 786445, metadata !1, metadata !9, metadata !"a", i32 3, i64 32, i64 32, i64 0, i32 0, metadata !8} ; [ DW_TAG_member ] [a] [line 3, size 32, align 32, offset 0] [from int]
+!12 = metadata !{i32 786445, metadata !1, metadata !9, metadata !"b", i32 4, i64 32, i64 32, i64 32, i32 0, metadata !8} ; [ DW_TAG_member ] [b] [line 4, size 32, align 32, offset 32] [from int]
+!13 = metadata !{i32 786445, metadata !1, metadata !9, metadata !"c", i32 5, i64 32, i64 32, i64 64, i32 0, metadata !8} ; [ DW_TAG_member ] [c] [line 5, size 32, align 32, offset 64] [from int]
+!14 = metadata !{i32 786445, metadata !1, metadata !9, metadata !"d", i32 6, i64 32, i64 32, i64 96, i32 0, metadata !8} ; [ DW_TAG_member ] [d] [line 6, size 32, align 32, offset 96] [from int]
+!15 = metadata !{i32 786445, metadata !1, metadata !9, metadata !"e", i32 7, i64 32, i64 32, i64 128, i32 0, metadata !8} ; [ DW_TAG_member ] [e] [line 7, size 32, align 32, offset 128] [from int]
+!16 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!17 = metadata !{i32 786689, metadata !4, metadata !"f", metadata !5, i32 16777229, metadata !9, i32 8192, i32 0} ; [ DW_TAG_arg_variable ] [f] [line 13]
+!18 = metadata !{i32 13, i32 0, metadata !4, null}
+!19 = metadata !{i32 16, i32 0, metadata !4, null}
+!20 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/AArch64/variable-loc.ll b/test/DebugInfo/AArch64/variable-loc.ll
deleted file mode 100644
index f42cb746480b..000000000000
--- a/test/DebugInfo/AArch64/variable-loc.ll
+++ /dev/null
@@ -1,101 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -disable-fp-elim < %s | FileCheck %s
-
-; This is a regression test making sure the location of variables is correct in
-; debugging information, even if they're addressed via the frame pointer.
-
-; In case it needs, regenerating, the following suffices:
-; int printf(const char *, ...);
-; void populate_array(int *, int);
-; int sum_array(int *, int);
-
-; int main() {
-; int main_arr[100], val;
-; populate_array(main_arr, 100);
-; val = sum_array(main_arr, 100);
-; printf("Total is %d\n", val);
-; return 0;
-; }
-
- ; First make sure main_arr is where we expect it: sp + 4 == x29 - 412:
-; CHECK: main:
-; CHECK: sub sp, sp, #432
-; CHECK: stp x29, x30, [sp, #416]
-; CHECK: add x29, sp, #416
-; CHECK: add {{x[0-9]+}}, sp, #4
-
-; CHECK: .Linfo_string7:
-; CHECK-NEXT: main_arr
-
-; Now check the debugging information reflects this:
-; CHECK: DW_TAG_variable
-; CHECK-NEXT: .word .Linfo_string7
-
- ; Rather hard-coded, but 145 => DW_OP_fbreg and the .ascii is LEB128 encoded -412.
-; CHECK: DW_AT_location
-; CHECK-NEXT: .byte 145
-; CHECK-NEXT: .ascii "\344|"
-
-
-
-target datalayout = "e-p:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-i128:128:128-f32:32:32-f64:64:64-f128:128:128-n32:64-S128"
-target triple = "aarch64-none-linux-gnu"
-
-@.str = private unnamed_addr constant [13 x i8] c"Total is %d\0A\00", align 1
-
-declare void @populate_array(i32*, i32) nounwind
-
-declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
-
-declare i32 @sum_array(i32*, i32) nounwind
-
-define i32 @main() nounwind {
-entry:
- %retval = alloca i32, align 4
- %main_arr = alloca [100 x i32], align 4
- %val = alloca i32, align 4
- store i32 0, i32* %retval
- call void @llvm.dbg.declare(metadata !{[100 x i32]* %main_arr}, metadata !17), !dbg !22
- call void @llvm.dbg.declare(metadata !{i32* %val}, metadata !23), !dbg !24
- %arraydecay = getelementptr inbounds [100 x i32]* %main_arr, i32 0, i32 0, !dbg !25
- call void @populate_array(i32* %arraydecay, i32 100), !dbg !25
- %arraydecay1 = getelementptr inbounds [100 x i32]* %main_arr, i32 0, i32 0, !dbg !26
- %call = call i32 @sum_array(i32* %arraydecay1, i32 100), !dbg !26
- store i32 %call, i32* %val, align 4, !dbg !26
- %0 = load i32* %val, align 4, !dbg !27
- %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0), i32 %0), !dbg !27
- ret i32 0, !dbg !28
-}
-
-declare i32 @printf(i8*, ...)
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!30}
-
-!0 = metadata !{i32 786449, metadata !29, i32 12, metadata !"clang version 3.2 ", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/home/timnor01/a64-trunk/build/simple.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
-!3 = metadata !{metadata !5, metadata !11, metadata !14}
-!5 = metadata !{i32 786478, metadata !29, metadata !6, metadata !"populate_array", metadata !"populate_array", metadata !"", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32*, i32)* @populate_array, null, null, metadata !1, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [populate_array]
-!6 = metadata !{i32 786473, metadata !29} ; [ DW_TAG_file_type ]
-!7 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{null, metadata !9, metadata !10}
-!9 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
-!10 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!11 = metadata !{i32 786478, metadata !29, metadata !6, metadata !"sum_array", metadata !"sum_array", metadata !"", i32 9, metadata !12, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32*, i32)* @sum_array, null, null, metadata !1, i32 9} ; [ DW_TAG_subprogram ] [line 9] [def] [sum_array]
-!12 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !13, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!13 = metadata !{metadata !10, metadata !9, metadata !10}
-!14 = metadata !{i32 786478, metadata !29, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 18, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !1, i32 18} ; [ DW_TAG_subprogram ] [line 18] [def] [main]
-!15 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !16, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!16 = metadata !{metadata !10}
-!17 = metadata !{i32 786688, metadata !18, metadata !"main_arr", metadata !6, i32 19, metadata !19, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [main_arr] [line 19]
-!18 = metadata !{i32 786443, metadata !29, metadata !14, i32 18, i32 16, i32 4} ; [ DW_TAG_lexical_block ] [/home/timnor01/a64-trunk/build/simple.c]
-!19 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 3200, i64 32, i32 0, i32 0, metadata !10, metadata !20, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 3200, align 32, offset 0] [from int]
-!20 = metadata !{i32 786465, i64 0, i64 99} ; [ DW_TAG_subrange_type ] [0, 99]
-!22 = metadata !{i32 19, i32 7, metadata !18, null}
-!23 = metadata !{i32 786688, metadata !18, metadata !"val", metadata !6, i32 20, metadata !10, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [val] [line 20]
-!24 = metadata !{i32 20, i32 7, metadata !18, null}
-!25 = metadata !{i32 22, i32 3, metadata !18, null}
-!26 = metadata !{i32 23, i32 9, metadata !18, null}
-!27 = metadata !{i32 24, i32 3, metadata !18, null}
-!28 = metadata !{i32 26, i32 3, metadata !18, null}
-!29 = metadata !{metadata !"simple.c", metadata !"/home/timnor01/a64-trunk/build"}
-!30 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/ARM/PR16736.ll b/test/DebugInfo/ARM/PR16736.ll
index d01fa22318db..8c025ad487b4 100644
--- a/test/DebugInfo/ARM/PR16736.ll
+++ b/test/DebugInfo/ARM/PR16736.ll
@@ -43,7 +43,7 @@ attributes #3 = { nounwind }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (trunk 190804) (llvm/trunk 190797)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [//<unknown>] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"/<unknown>", metadata !""}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"h", metadata !"h", metadata !"_Z1hiiiif", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32, i32, i32, i32, float)* @_Z1hiiiif, null, null, metadata !11, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [h]
!5 = metadata !{metadata !"/arm.cpp", metadata !""}
diff --git a/test/DebugInfo/ARM/lit.local.cfg b/test/DebugInfo/ARM/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/DebugInfo/ARM/lit.local.cfg
+++ b/test/DebugInfo/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/DebugInfo/ARM/sectionorder.ll b/test/DebugInfo/ARM/sectionorder.ll
new file mode 100644
index 000000000000..a7030cd1b670
--- /dev/null
+++ b/test/DebugInfo/ARM/sectionorder.ll
@@ -0,0 +1,17 @@
+; RUN: llc -filetype=asm %s -o - | FileCheck %s
+
+; Verifies that the DWARF* sections come _after_ the __TEXT sections.
+; rdar://problem/15623193
+
+; CHECK: .section __TEXT,__text,
+; CHECK-NOT: __DWARF,__debug
+; CHECK: .section __TEXT,__cstring,cstring_literals
+target triple = "thumbv7-apple-ios"
+
+!llvm.module.flags = !{!3, !4}
+!llvm.dbg.cu = !{!0}
+
+!0 = metadata !{i32 786449, i32 0, i32 12, metadata !"test.c", metadata !"/Volumes/Data/radar/15623193", metadata !"LLVM", i1 true, i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !1} ; [ DW_TAG_compile_unit ] [/Volumes/Data/radar/15623193/test.c] [DW_LANG_C99]
+!1 = metadata !{}
+!3 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!4 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/ARM/tls.ll b/test/DebugInfo/ARM/tls.ll
new file mode 100644
index 000000000000..e54d16004762
--- /dev/null
+++ b/test/DebugInfo/ARM/tls.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O0 -filetype=asm -mtriple=armv7-linux-gnuehabi < %s | FileCheck %s
+;
+; Generated with clang with source
+; __thread int x;
+
+@x = thread_local global i32 0, align 4
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+; 6 byte of data
+; CHECK: .byte 6 @ DW_AT_location
+; DW_OP_const4u
+; CHECK: .byte 12
+; The debug relocation of the address of the tls variable
+; CHECK: .long x(tlsldo)
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/tls.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"tls.c", metadata !"/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786484, i32 0, null, metadata !"x", metadata !"x", metadata !"", metadata !5, i32 1, metadata !6, i32 0, i32 1, i32* @x, null} ; [ DW_TAG_variable ] [x] [line 1] [def]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/tls.c]
+!6 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5 "}
diff --git a/test/DebugInfo/COFF/asan-module-ctor.ll b/test/DebugInfo/COFF/asan-module-ctor.ll
new file mode 100644
index 000000000000..c1d8e75292cd
--- /dev/null
+++ b/test/DebugInfo/COFF/asan-module-ctor.ll
@@ -0,0 +1,91 @@
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -O0 < %s | FileCheck --check-prefix=X86 %s
+
+; This LL file was generated by running clang on the following code with
+; -fsanitize=address
+; D:\asan.c:
+; 1 int foo(void) {
+; 2 return 0;
+; 3 }
+
+; The module ctor has no debug info. All we have to do is don't crash.
+; X86: _asan.module_ctor:
+; X86-NEXT: # BB
+; X86-NEXT: calll ___asan_init_v3
+; X86-NEXT: retl
+
+; ModuleID = 'asan.c'
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-win32"
+
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 1, void ()* @asan.module_ctor }]
+
+; Function Attrs: nounwind sanitize_address
+define i32 @foo() #0 {
+entry:
+ ret i32 0, !dbg !10
+}
+
+define internal void @asan.module_ctor() {
+ call void @__asan_init_v3()
+ ret void
+}
+
+declare void @__asan_init_v3()
+
+declare void @__asan_report_load1(i32)
+
+declare void @__asan_report_load2(i32)
+
+declare void @__asan_report_load4(i32)
+
+declare void @__asan_report_load8(i32)
+
+declare void @__asan_report_load16(i32)
+
+declare void @__asan_report_store1(i32)
+
+declare void @__asan_report_store2(i32)
+
+declare void @__asan_report_store4(i32)
+
+declare void @__asan_report_store8(i32)
+
+declare void @__asan_report_store16(i32)
+
+declare void @__asan_report_load_n(i32, i32)
+
+declare void @__asan_report_store_n(i32, i32)
+
+declare void @__asan_handle_no_return()
+
+declare void @__sanitizer_cov()
+
+declare void @__sanitizer_ptr_cmp(i32, i32)
+
+declare void @__sanitizer_ptr_sub(i32, i32)
+
+declare void @__asan_before_dynamic_init(i32)
+
+declare void @__asan_after_dynamic_init()
+
+declare void @__asan_register_globals(i32, i32)
+
+declare void @__asan_unregister_globals(i32, i32)
+
+attributes #0 = { nounwind sanitize_address "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2} ; [ DW_TAG_compile_unit ] [D:\/asan.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"asan.c", metadata !"D:\5C"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [D:\/asan.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5.0 "}
+!10 = metadata !{i32 2, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/COFF/asan-module-without-functions.ll b/test/DebugInfo/COFF/asan-module-without-functions.ll
new file mode 100644
index 000000000000..419faa0e0f35
--- /dev/null
+++ b/test/DebugInfo/COFF/asan-module-without-functions.ll
@@ -0,0 +1,53 @@
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -O0 < %s | FileCheck --check-prefix=X86 %s
+
+; This LL file was generated by running clang on the following code with
+; -fsanitize=address
+; D:\asan.c:
+; 1 unsigned char c = 42;
+;
+; This file defines no functions, so just make sure we don't try to emit
+; the line table for functions of zero size.
+; X86-NOT: .section .debug$S,"rn"
+
+; ModuleID = 'asan.c'
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-win32"
+
+@c = global { i8, [63 x i8] } { i8 42, [63 x i8] zeroinitializer }, align 32
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 1, void ()* @asan.module_ctor }]
+@__asan_gen_ = private constant [7 x i8] c"asan.c\00", align 1
+@__asan_gen_1 = private unnamed_addr constant [2 x i8] c"c\00", align 1
+@0 = internal global [1 x { i32, i32, i32, i32, i32, i32 }] [{ i32, i32, i32, i32, i32, i32 } { i32 ptrtoint ({ i8, [63 x i8] }* @c to i32), i32 1, i32 64, i32 ptrtoint ([2 x i8]* @__asan_gen_1 to i32), i32 ptrtoint ([7 x i8]* @__asan_gen_ to i32), i32 0 }]
+@llvm.global_dtors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 1, void ()* @asan.module_dtor }]
+
+define internal void @asan.module_ctor() {
+ call void @__asan_init_v3()
+ call void @__asan_register_globals(i32 ptrtoint ([1 x { i32, i32, i32, i32, i32, i32 }]* @0 to i32), i32 1)
+ ret void
+}
+
+declare void @__asan_init_v3()
+
+declare void @__asan_before_dynamic_init(i32)
+
+declare void @__asan_after_dynamic_init()
+
+declare void @__asan_register_globals(i32, i32)
+
+declare void @__asan_unregister_globals(i32, i32)
+
+define internal void @asan.module_dtor() {
+ call void @__asan_unregister_globals(i32 ptrtoint ([1 x { i32, i32, i32, i32, i32, i32 }]* @0 to i32), i32 1)
+ ret void
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !2, metadata !2, metadata !"", i32 2} ; [ DW_TAG_compile_unit ] [D:\/asan.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"asan.c", metadata !"D:\5C"}
+!2 = metadata !{}
+!3 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!4 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!5 = metadata !{metadata !"clang version 3.5.0 "}
diff --git a/test/DebugInfo/COFF/asm.ll b/test/DebugInfo/COFF/asm.ll
new file mode 100644
index 000000000000..8c9dff0cdf15
--- /dev/null
+++ b/test/DebugInfo/COFF/asm.ll
@@ -0,0 +1,184 @@
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -O0 < %s | FileCheck --check-prefix=X86 %s
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -o - -O0 < %s | llvm-mc -triple=i686-pc-win32 -filetype=obj | llvm-readobj -s -sr -codeview-linetables | FileCheck --check-prefix=OBJ32 %s
+; RUN: llc -mcpu=core2 -mtriple=x86_64-pc-win32 -O0 < %s | FileCheck --check-prefix=X64 %s
+; RUN: llc -mcpu=core2 -mtriple=x86_64-pc-win32 -o - -O0 < %s | llvm-mc -triple=x86_64-pc-win32 -filetype=obj | llvm-readobj -s -sr -codeview-linetables | FileCheck --check-prefix=OBJ64 %s
+
+; This LL file was generated by running clang on the following code:
+; D:\asm.c:
+; 1 void g(void);
+; 2
+; 3 void f(void) {
+; 4 __asm align 4;
+; 5 g();
+; 6 }
+
+; X86-LABEL: _f:
+; X86-NEXT: # BB
+; X86-NEXT: [[ASM_LINE:^L.*]]:{{$}}
+; X86: [[CALL_LINE:^L.*]]:{{$}}
+; X86-NEXT: calll _g
+; X86-NEXT: [[RETURN_STMT:.*]]:
+; X86-NEXT: ret
+; X86-NEXT: [[END_OF_F:.*]]:
+;
+; X86-LABEL: .section .debug$S,"rnd"
+; X86-NEXT: .long 4
+; X86-NEXT: .long 242
+; X86-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X86-NEXT: [[F2_START]]:
+; X86-NEXT: .secrel32 _f
+; X86-NEXT: .secidx _f
+; X86-NEXT: .long [[END_OF_F]]-_f
+; X86-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X86-NEXT: .long 0
+; X86-NEXT: .long 3
+; X86-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X86-NEXT: .long [[ASM_LINE]]-_f
+; X86-NEXT: .long 4
+; X86-NEXT: .long [[CALL_LINE]]-_f
+; X86-NEXT: .long 5
+; X86-NEXT: .long [[RETURN_STMT]]-_f
+; X86-NEXT: .long 6
+; X86-NEXT: [[FILE_SEGMENT_END]]:
+; X86-NEXT: [[F2_END]]:
+; File index to string table offset subsection
+; X86-NEXT: .long 244
+; X86-NEXT: .long 8
+; X86-NEXT: .long 1
+; X86-NEXT: .long 0
+; String table
+; X86-NEXT: .long 243
+; X86-NEXT: .long 10
+; X86-NEXT: .byte 0
+; X86-NEXT: .ascii "D:\\asm.c"
+; X86-NEXT: .byte 0
+; Padding
+; X86-NEXT: .zero 2
+
+; OBJ32: Section {
+; OBJ32: Name: .debug$S (2E 64 65 62 75 67 24 53)
+; OBJ32: Characteristics [ (0x42100040)
+; OBJ32: ]
+; OBJ32: Relocations [
+; OBJ32-NEXT: 0xC IMAGE_REL_I386_SECREL _f
+; OBJ32-NEXT: 0x10 IMAGE_REL_I386_SECTION _f
+; OBJ32-NEXT: ]
+; OBJ32: FunctionLineTable [
+; OBJ32-NEXT: Name: _f
+; OBJ32-NEXT: CodeSize: 0x6
+; OBJ32-NEXT: FilenameSegment [
+; OBJ32-NEXT: Filename: D:\asm.c
+; FIXME: An empty __asm stmt creates an extra entry.
+; We seem to know that these offsets are the same statically during the
+; execution of endModule(). See PR18679 for the details.
+; OBJ32-NEXT: +0x0: 4
+; OBJ32-NEXT: +0x0: 5
+; OBJ32-NEXT: +0x5: 6
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: ]
+; OBJ32: }
+
+; X64-LABEL: f:
+; X64-NEXT: [[START:.*]]:{{$}}
+; X64-NEXT: # BB
+; X64-NEXT: subq $40, %rsp
+; X64-NEXT: [[ASM_LINE:.*]]:{{$}}
+; X64: [[CALL_LINE:.*]]:{{$}}
+; X64-NEXT: callq g
+; X64-NEXT: [[EPILOG_AND_RET:.*]]:
+; X64-NEXT: addq $40, %rsp
+; X64-NEXT: ret
+; X64-NEXT: [[END_OF_F:.*]]:
+;
+; X64-LABEL: .section .debug$S,"rnd"
+; X64-NEXT: .long 4
+; X64-NEXT: .long 242
+; X64-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X64-NEXT: [[F2_START]]:
+; X64-NEXT: .secrel32 f
+; X64-NEXT: .secidx f
+; X64-NEXT: .long [[END_OF_F]]-f
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 0
+; X64-NEXT: .long 4
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[START]]-f
+; X64-NEXT: .long 3
+; X64-NEXT: .long [[ASM_LINE]]-f
+; X64-NEXT: .long 4
+; X64-NEXT: .long [[CALL_LINE]]-f
+; X64-NEXT: .long 5
+; X64-NEXT: .long [[EPILOG_AND_RET]]-f
+; X64-NEXT: .long 6
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; X64-NEXT: [[F2_END]]:
+; File index to string table offset subsection
+; X64-NEXT: .long 244
+; X64-NEXT: .long 8
+; X64-NEXT: .long 1
+; X64-NEXT: .long 0
+; String table
+; X64-NEXT: .long 243
+; X64-NEXT: .long 10
+; X64-NEXT: .byte 0
+; X64-NEXT: .ascii "D:\\asm.c"
+; X64-NEXT: .byte 0
+; Padding
+; X64-NEXT: .zero 2
+
+; OBJ64: Section {
+; OBJ64: Name: .debug$S (2E 64 65 62 75 67 24 53)
+; OBJ64: Characteristics [ (0x42100040)
+; OBJ64: ]
+; OBJ64: Relocations [
+; OBJ64-NEXT: 0xC IMAGE_REL_AMD64_SECREL f
+; OBJ64-NEXT: 0x10 IMAGE_REL_AMD64_SECTION f
+; OBJ64-NEXT: ]
+; OBJ64: FunctionLineTable [
+; OBJ64-NEXT: Name: f
+; OBJ64-NEXT: CodeSize: 0xE
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\asm.c
+; OBJ64-NEXT: +0x0: 3
+; FIXME: An empty __asm stmt creates an extra entry.
+; See PR18679 for the details.
+; OBJ64-NEXT: +0x4: 4
+; OBJ64-NEXT: +0x4: 5
+; OBJ64-NEXT: +0x9: 6
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: ]
+; OBJ64: }
+
+; Function Attrs: nounwind
+define void @f() #0 {
+entry:
+ call void asm sideeffect inteldialect ".align 4", "~{dirflag},~{fpsr},~{flags}"() #2, !dbg !12
+ call void @g(), !dbg !13
+ ret void, !dbg !14
+}
+
+declare void @g() #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [D:\/<unknown>] [DW_LANG_C99]
+!1 = metadata !{metadata !"<unknown>", metadata !"D:\5C"}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"f", metadata !"f", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @f, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [f]
+!5 = metadata !{metadata !"asm.c", metadata !"D:\5C"}
+!6 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [D:\/asm.c]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{null}
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5 "}
+!12 = metadata !{i32 4, i32 0, metadata !4, null}
+!13 = metadata !{i32 5, i32 0, metadata !4, null}
+!14 = metadata !{i32 6, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/COFF/lit.local.cfg b/test/DebugInfo/COFF/lit.local.cfg
new file mode 100644
index 000000000000..c8625f4d9d24
--- /dev/null
+++ b/test/DebugInfo/COFF/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'X86' in config.root.targets:
+ config.unsupported = True
diff --git a/test/DebugInfo/COFF/multifile.ll b/test/DebugInfo/COFF/multifile.ll
new file mode 100644
index 000000000000..c04bdb3b7c94
--- /dev/null
+++ b/test/DebugInfo/COFF/multifile.ll
@@ -0,0 +1,257 @@
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -O0 < %s | FileCheck --check-prefix=X86 %s
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -o - -O0 < %s | llvm-mc -triple=i686-pc-win32 -filetype=obj | llvm-readobj -s -sr -codeview-linetables | FileCheck --check-prefix=OBJ32 %s
+; RUN: llc -mcpu=core2 -mtriple=x86_64-pc-win32 -O0 < %s | FileCheck --check-prefix=X64 %s
+; RUN: llc -mcpu=core2 -mtriple=x86_64-pc-win32 -o - -O0 < %s | llvm-mc -triple=x86_64-pc-win32 -filetype=obj | llvm-readobj -s -sr -codeview-linetables | FileCheck --check-prefix=OBJ64 %s
+
+; This LL file was generated by running clang on the following code:
+; D:\input.c:
+; 1 void g(void);
+; 2
+; 3 void f() {
+; 4 #line 1 "one.c"
+; 5 g(void);
+; 6 #line 2 "two.c"
+; 7 g(void);
+; 8 #line 7 "one.c"
+; 9 g(void);
+; 10 }
+
+; X86-LABEL: _f:
+; X86-NEXT: # BB
+; X86-NEXT: [[CALL_LINE_1:.*]]:{{$}}
+; X86-NEXT: calll _g
+; X86-NEXT: [[CALL_LINE_2:.*]]:{{$}}
+; X86-NEXT: calll _g
+; X86-NEXT: [[CALL_LINE_3:.*]]:{{$}}
+; X86-NEXT: calll _g
+; X86-NEXT: [[RETURN_STMT:.*]]:
+; X86-NEXT: ret
+; X86-NEXT: [[END_OF_F:.*]]:
+;
+; X86-LABEL: .section .debug$S,"rnd"
+; X86-NEXT: .long 4
+; X86-NEXT: .long 242
+; X86-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X86-NEXT: [[F2_START]]:
+; X86-NEXT: .secrel32 _f
+; X86-NEXT: .secidx _f
+; X86-NEXT: .long [[END_OF_F]]-_f
+; Segment for file 'D:\\one.c' begins
+; X86-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X86-NEXT: .long 0
+; X86-NEXT: .long 1
+; X86-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X86-NEXT: .long [[CALL_LINE_1]]-_f
+; X86-NEXT: .long 1
+; X86-NEXT: [[FILE_SEGMENT_END]]:
+; Segment for file 'D:\\two.c' begins
+; X86-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X86-NEXT: .long 8
+; X86-NEXT: .long 1
+; X86-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X86-NEXT: .long [[CALL_LINE_2]]-_f
+; X86-NEXT: .long 2
+; X86-NEXT: [[FILE_SEGMENT_END]]:
+; A new segment for file 'D:\\one.c' begins
+; X86-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X86-NEXT: .long 0
+; X86-NEXT: .long 2
+; X86-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X86-NEXT: .long [[CALL_LINE_3]]-_f
+; X86-NEXT: .long 7
+; X86-NEXT: .long [[RETURN_STMT]]-_f
+; X86-NEXT: .long 8
+; X86-NEXT: [[FILE_SEGMENT_END]]:
+; X86-NEXT: [[F2_END]]:
+; File index to string table offset subsection
+; X86-NEXT: .long 244
+; X86-NEXT: .long 16
+; X86-NEXT: .long 1
+; X86-NEXT: .long 0
+; X86-NEXT: .long 10
+; X86-NEXT: .long 0
+; String table
+; X86-NEXT: .long 243
+; X86-NEXT: .long 19
+; X86-NEXT: .byte 0
+; X86-NEXT: .ascii "D:\\one.c"
+; X86-NEXT: .byte 0
+; X86-NEXT: .ascii "D:\\two.c"
+; X86-NEXT: .byte 0
+; X86-NEXT: .zero 1
+
+; OBJ32: Section {
+; OBJ32: Name: .debug$S (2E 64 65 62 75 67 24 53)
+; OBJ32: Characteristics [ (0x42100040)
+; OBJ32: ]
+; OBJ32: Relocations [
+; OBJ32-NEXT: 0xC IMAGE_REL_I386_SECREL _f
+; OBJ32-NEXT: 0x10 IMAGE_REL_I386_SECTION _f
+; OBJ32-NEXT: ]
+; OBJ32: FunctionLineTable [
+; OBJ32-NEXT: Name: _f
+; OBJ32-NEXT: CodeSize: 0x10
+; OBJ32-NEXT: FilenameSegment [
+; OBJ32-NEXT: Filename: D:\one.c
+; OBJ32-NEXT: +0x0: 1
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: FilenameSegment [
+; OBJ32-NEXT: Filename: D:\two.c
+; OBJ32-NEXT: +0x5: 2
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: FilenameSegment [
+; OBJ32-NEXT: Filename: D:\one.c
+; OBJ32-NEXT: +0xA: 7
+; OBJ32-NEXT: +0xF: 8
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: ]
+; OBJ32: }
+
+; X64-LABEL: f:
+; X64-NEXT: [[START:.*]]:{{$}}
+; X64-NEXT: # BB
+; X64-NEXT: subq $40, %rsp
+; X64-NEXT: [[CALL_LINE_1:.*]]:{{$}}
+; X64-NEXT: callq g
+; X64-NEXT: [[CALL_LINE_2:.*]]:{{$}}
+; X64-NEXT: callq g
+; X64-NEXT: [[CALL_LINE_3:.*]]:{{$}}
+; X64-NEXT: callq g
+; X64-NEXT: [[EPILOG_AND_RET:.*]]:
+; X64-NEXT: addq $40, %rsp
+; X64-NEXT: ret
+; X64-NEXT: [[END_OF_F:.*]]:
+;
+; X64-LABEL: .section .debug$S,"rnd"
+; X64-NEXT: .long 4
+; X64-NEXT: .long 242
+; X64-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X64-NEXT: [[F2_START]]:
+; X64-NEXT: .secrel32 f
+; X64-NEXT: .secidx f
+; X64-NEXT: .long [[END_OF_F]]-f
+; Segment for file 'D:\\input.c' begins
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 0
+; X64-NEXT: .long 1
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[START]]-f
+; X64-NEXT: .long 3
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; Segment for file 'D:\\one.c' begins
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 8
+; X64-NEXT: .long 1
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[CALL_LINE_1]]-f
+; X64-NEXT: .long 1
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; Segment for file 'D:\\two.c' begins
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 16
+; X64-NEXT: .long 1
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[CALL_LINE_2]]-f
+; X64-NEXT: .long 2
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; A new segment for file 'D:\\one.c' begins
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 8
+; X64-NEXT: .long 2
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[CALL_LINE_3]]-f
+; X64-NEXT: .long 7
+; X64-NEXT: .long [[EPILOG_AND_RET]]-f
+; X64-NEXT: .long 8
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; X64-NEXT: [[F2_END]]:
+; File index to string table offset subsection
+; X64-NEXT: .long 244
+; X64-NEXT: .long 24
+; X64-NEXT: .long 1
+; X64-NEXT: .long 0
+; X64-NEXT: .long 12
+; X64-NEXT: .long 0
+; X64-NEXT: .long 21
+; X64-NEXT: .long 0
+; String table
+; X64-NEXT: .long 243
+; X64-NEXT: .long 30
+; X64-NEXT: .byte 0
+; X64-NEXT: .ascii "D:\\input.c"
+; X64-NEXT: .byte 0
+; X64-NEXT: .ascii "D:\\one.c"
+; X64-NEXT: .byte 0
+; X64-NEXT: .ascii "D:\\two.c"
+; X64-NEXT: .byte 0
+; X64-NEXT: .zero 2
+
+; OBJ64: Section {
+; OBJ64: Name: .debug$S (2E 64 65 62 75 67 24 53)
+; OBJ64: Characteristics [ (0x42100040)
+; OBJ64: ]
+; OBJ64: Relocations [
+; OBJ64-NEXT: 0xC IMAGE_REL_AMD64_SECREL f
+; OBJ64-NEXT: 0x10 IMAGE_REL_AMD64_SECTION f
+; OBJ64-NEXT: ]
+; OBJ64: FunctionLineTable [
+; OBJ64-NEXT: Name: f
+; OBJ64-NEXT: CodeSize: 0x18
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\input.c
+; OBJ64-NEXT: +0x0: 3
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\one.c
+; OBJ64-NEXT: +0x4: 1
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\two.c
+; OBJ64-NEXT: +0x9: 2
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\one.c
+; OBJ64-NEXT: +0xE: 7
+; OBJ64-NEXT: +0x13: 8
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: ]
+; OBJ64: }
+
+; Function Attrs: nounwind
+define void @f() #0 {
+entry:
+ call void @g(), !dbg !12
+ call void @g(), !dbg !15
+ call void @g(), !dbg !18
+ ret void, !dbg !19
+}
+
+declare void @g() #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [D:\/<unknown>] [DW_LANG_C99]
+!1 = metadata !{metadata !"<unknown>", metadata !"D:\5C"}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"f", metadata !"f", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @f, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [f]
+!5 = metadata !{metadata !"input.c", metadata !"D:\5C"}
+!6 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [D:\/input.c]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{null}
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5 "}
+!12 = metadata !{i32 1, i32 0, metadata !13, null}
+!13 = metadata !{i32 786443, metadata !14, metadata !4} ; [ DW_TAG_lexical_block ] [D:\/one.c]
+!14 = metadata !{metadata !"one.c", metadata !"D:\5C"}
+!15 = metadata !{i32 2, i32 0, metadata !16, null}
+!16 = metadata !{i32 786443, metadata !17, metadata !4} ; [ DW_TAG_lexical_block ] [D:\/two.c]
+!17 = metadata !{metadata !"two.c", metadata !"D:\5C"}
+!18 = metadata !{i32 7, i32 0, metadata !13, null}
+!19 = metadata !{i32 8, i32 0, metadata !13, null} ; [ DW_TAG_imported_declaration ]
diff --git a/test/DebugInfo/COFF/multifunction.ll b/test/DebugInfo/COFF/multifunction.ll
new file mode 100644
index 000000000000..5a6555805fd8
--- /dev/null
+++ b/test/DebugInfo/COFF/multifunction.ll
@@ -0,0 +1,378 @@
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -O0 < %s | FileCheck --check-prefix=X86 %s
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -o - -O0 < %s | llvm-mc -triple=i686-pc-win32 -filetype=obj | llvm-readobj -s -sr -codeview-linetables | FileCheck --check-prefix=OBJ32 %s
+; RUN: llc -mcpu=core2 -mtriple=x86_64-pc-win32 -O0 < %s | FileCheck --check-prefix=X64 %s
+; RUN: llc -mcpu=core2 -mtriple=x86_64-pc-win32 -o - -O0 < %s | llvm-mc -triple=x86_64-pc-win32 -filetype=obj | llvm-readobj -s -sr -codeview-linetables | FileCheck --check-prefix=OBJ64 %s
+
+; This LL file was generated by running clang on the following code:
+; D:\source.c:
+; 1 void z(void);
+; 2
+; 3 void x(void) {
+; 4 z();
+; 5 }
+; 6
+; 7 void y(void) {
+; 8 z();
+; 9 }
+; 10
+; 11 void f(void) {
+; 12 x();
+; 13 y();
+; 14 z();
+; 15 }
+
+
+; X86-LABEL: _x:
+; X86-NEXT: # BB
+; X86-NEXT: [[X_CALL:.*]]:{{$}}
+; X86-NEXT: calll _z
+; X86-NEXT: [[X_RETURN:.*]]:
+; X86-NEXT: ret
+; X86-NEXT: [[END_OF_X:.*]]:
+;
+; X86-LABEL: _y:
+; X86-NEXT: # BB
+; X86-NEXT: [[Y_CALL:.*]]:{{$}}
+; X86-NEXT: calll _z
+; X86-NEXT: [[Y_RETURN:.*]]:
+; X86-NEXT: ret
+; X86-NEXT: [[END_OF_Y:.*]]:
+;
+; X86-LABEL: _f:
+; X86-NEXT: # BB
+; X86-NEXT: [[F_CALLS_X:.*]]:{{$}}
+; X86-NEXT: calll _x
+; X86-NEXT: [[F_CALLS_Y:.*]]:
+; X86-NEXT: calll _y
+; X86-NEXT: [[F_CALLS_Z:.*]]:
+; X86-NEXT: calll _z
+; X86-NEXT: [[F_RETURN:.*]]:
+; X86-NEXT: ret
+; X86-NEXT: [[END_OF_F:.*]]:
+;
+; X86-LABEL: .section .debug$S,"rnd"
+; X86-NEXT: .long 4
+; Line table subsection for x
+; X86-NEXT: .long 242
+; X86-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X86-NEXT: [[F2_START]]:
+; X86-NEXT: .secrel32 _x
+; X86-NEXT: .secidx _x
+; X86-NEXT: .long [[END_OF_X]]-_x
+; X86-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X86-NEXT: .long 0
+; X86-NEXT: .long 2
+; X86-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X86-NEXT: .long [[X_CALL]]-_x
+; X86-NEXT: .long 4
+; X86-NEXT: .long [[X_RETURN]]-_x
+; X86-NEXT: .long 5
+; X86-NEXT: [[FILE_SEGMENT_END]]:
+; X86-NEXT: [[F2_END]]:
+; Line table subsection for y
+; X86-NEXT: .long 242
+; X86-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X86-NEXT: [[F2_START]]:
+; X86-NEXT: .secrel32 _y
+; X86-NEXT: .secidx _y
+; X86-NEXT: .long [[END_OF_Y]]-_y
+; X86-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X86-NEXT: .long 0
+; X86-NEXT: .long 2
+; X86-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X86-NEXT: .long [[Y_CALL]]-_y
+; X86-NEXT: .long 8
+; X86-NEXT: .long [[Y_RETURN]]-_y
+; X86-NEXT: .long 9
+; X86-NEXT: [[FILE_SEGMENT_END]]:
+; X86-NEXT: [[F2_END]]:
+; Line table subsection for f
+; X86-NEXT: .long 242
+; X86-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X86-NEXT: [[F2_START]]:
+; X86-NEXT: .secrel32 _f
+; X86-NEXT: .secidx _f
+; X86-NEXT: .long [[END_OF_F]]-_f
+; X86-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X86-NEXT: .long 0
+; X86-NEXT: .long 4
+; X86-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X86-NEXT: .long [[F_CALLS_X]]-_f
+; X86-NEXT: .long 12
+; X86-NEXT: .long [[F_CALLS_Y]]-_f
+; X86-NEXT: .long 13
+; X86-NEXT: .long [[F_CALLS_Z]]-_f
+; X86-NEXT: .long 14
+; X86-NEXT: .long [[F_RETURN]]-_f
+; X86-NEXT: .long 15
+; X86-NEXT: [[FILE_SEGMENT_END]]:
+; X86-NEXT: [[F2_END]]:
+; File index to string table offset subsection
+; X86-NEXT: .long 244
+; X86-NEXT: .long 8
+; X86-NEXT: .long 1
+; X86-NEXT: .long 0
+; String table
+; X86-NEXT: .long 243
+; X86-NEXT: .long 13
+; X86-NEXT: .byte 0
+; X86-NEXT: .ascii "D:\\source.c"
+; X86-NEXT: .byte 0
+; X86-NEXT: .zero 3
+
+; OBJ32: Section {
+; OBJ32: Name: .debug$S (2E 64 65 62 75 67 24 53)
+; OBJ32: Characteristics [ (0x42100040)
+; OBJ32: ]
+; OBJ32: Relocations [
+; OBJ32-NEXT: 0xC IMAGE_REL_I386_SECREL _x
+; OBJ32-NEXT: 0x10 IMAGE_REL_I386_SECTION _x
+; OBJ32-NEXT: 0x3C IMAGE_REL_I386_SECREL _y
+; OBJ32-NEXT: 0x40 IMAGE_REL_I386_SECTION _y
+; OBJ32-NEXT: 0x6C IMAGE_REL_I386_SECREL _f
+; OBJ32-NEXT: 0x70 IMAGE_REL_I386_SECTION _f
+; OBJ32-NEXT: ]
+; OBJ32: FunctionLineTable [
+; OBJ32-NEXT: Name: _x
+; OBJ32-NEXT: CodeSize: 0x6
+; OBJ32-NEXT: FilenameSegment [
+; OBJ32-NEXT: Filename: D:\source.c
+; OBJ32-NEXT: +0x0: 4
+; OBJ32-NEXT: +0x5: 5
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: FunctionLineTable [
+; OBJ32-NEXT: Name: _y
+; OBJ32-NEXT: CodeSize: 0x6
+; OBJ32-NEXT: FilenameSegment [
+; OBJ32-NEXT: Filename: D:\source.c
+; OBJ32-NEXT: +0x0: 8
+; OBJ32-NEXT: +0x5: 9
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: FunctionLineTable [
+; OBJ32-NEXT: Name: _f
+; OBJ32-NEXT: CodeSize: 0x10
+; OBJ32-NEXT: FilenameSegment [
+; OBJ32-NEXT: Filename: D:\source.c
+; OBJ32-NEXT: +0x0: 12
+; OBJ32-NEXT: +0x5: 13
+; OBJ32-NEXT: +0xA: 14
+; OBJ32-NEXT: +0xF: 15
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: ]
+; OBJ32: }
+
+; X64-LABEL: x:
+; X64-NEXT: [[X_START:.*]]:{{$}}
+; X64-NEXT: # BB
+; X64-NEXT: subq $40, %rsp
+; X64-NEXT: [[X_CALL_LINE:.*]]:{{$}}
+; X64-NEXT: callq z
+; X64-NEXT: [[X_EPILOG_AND_RET:.*]]:
+; X64-NEXT: addq $40, %rsp
+; X64-NEXT: ret
+; X64-NEXT: [[END_OF_X:.*]]:
+;
+; X64-LABEL: y:
+; X64-NEXT: [[Y_START:.*]]:{{$}}
+; X64-NEXT: # BB
+; X64-NEXT: subq $40, %rsp
+; X64-NEXT: [[Y_CALL_LINE:.*]]:{{$}}
+; X64-NEXT: callq z
+; X64-NEXT: [[Y_EPILOG_AND_RET:.*]]:
+; X64-NEXT: addq $40, %rsp
+; X64-NEXT: ret
+; X64-NEXT: [[END_OF_Y:.*]]:
+;
+; X64-LABEL: f:
+; X64-NEXT: [[F_START:.*]]:{{$}}
+; X64-NEXT: # BB
+; X64-NEXT: subq $40, %rsp
+; X64-NEXT: [[F_CALLS_X:.*]]:{{$}}
+; X64-NEXT: callq x
+; X64-NEXT: [[F_CALLS_Y:.*]]:
+; X64-NEXT: callq y
+; X64-NEXT: [[F_CALLS_Z:.*]]:
+; X64-NEXT: callq z
+; X64-NEXT: [[F_EPILOG_AND_RET:.*]]:
+; X64-NEXT: addq $40, %rsp
+; X64-NEXT: ret
+; X64-NEXT: [[END_OF_F:.*]]:
+;
+; X64-LABEL: .section .debug$S,"rnd"
+; X64-NEXT: .long 4
+; Line table subsection for x
+; X64-NEXT: .long 242
+; X64-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X64-NEXT: [[F2_START]]:
+; X64-NEXT: .secrel32 x
+; X64-NEXT: .secidx x
+; X64-NEXT: .long [[END_OF_X]]-x
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 0
+; X64-NEXT: .long 3
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[X_START]]-x
+; X64-NEXT: .long 3
+; X64-NEXT: .long [[X_CALL_LINE]]-x
+; X64-NEXT: .long 4
+; X64-NEXT: .long [[X_EPILOG_AND_RET]]-x
+; X64-NEXT: .long 5
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; X64-NEXT: [[F2_END]]:
+; Line table subsection for y
+; X64-NEXT: .long 242
+; X64-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X64-NEXT: [[F2_START]]:
+; X64-NEXT: .secrel32 y
+; X64-NEXT: .secidx y
+; X64-NEXT: .long [[END_OF_Y]]-y
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 0
+; X64-NEXT: .long 3
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[Y_START]]-y
+; X64-NEXT: .long 7
+; X64-NEXT: .long [[Y_CALL_LINE]]-y
+; X64-NEXT: .long 8
+; X64-NEXT: .long [[Y_EPILOG_AND_RET]]-y
+; X64-NEXT: .long 9
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; X64-NEXT: [[F2_END]]:
+; Line table subsection for f
+; X64-NEXT: .long 242
+; X64-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X64-NEXT: [[F2_START]]:
+; X64-NEXT: .secrel32 f
+; X64-NEXT: .secidx f
+; X64-NEXT: .long [[END_OF_F]]-f
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 0
+; X64-NEXT: .long 5
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[F_START]]-f
+; X64-NEXT: .long 11
+; X64-NEXT: .long [[F_CALLS_X]]-f
+; X64-NEXT: .long 12
+; X64-NEXT: .long [[F_CALLS_Y]]-f
+; X64-NEXT: .long 13
+; X64-NEXT: .long [[F_CALLS_Z]]-f
+; X64-NEXT: .long 14
+; X64-NEXT: .long [[F_EPILOG_AND_RET]]-f
+; X64-NEXT: .long 15
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; X64-NEXT: [[F2_END]]:
+; File index to string table offset subsection
+; X64-NEXT: .long 244
+; X64-NEXT: .long 8
+; X64-NEXT: .long 1
+; X64-NEXT: .long 0
+; String table
+; X64-NEXT: .long 243
+; X64-NEXT: .long 13
+; X64-NEXT: .byte 0
+; X64-NEXT: .ascii "D:\\source.c"
+; X64-NEXT: .byte 0
+; X64-NEXT: .zero 3
+
+; OBJ64: Section {
+; OBJ64: Name: .debug$S (2E 64 65 62 75 67 24 53)
+; OBJ64: Characteristics [ (0x42100040)
+; OBJ64: ]
+; OBJ64: Relocations [
+; OBJ64-NEXT: 0xC IMAGE_REL_AMD64_SECREL x
+; OBJ64-NEXT: 0x10 IMAGE_REL_AMD64_SECTION x
+; OBJ64-NEXT: 0x44 IMAGE_REL_AMD64_SECREL y
+; OBJ64-NEXT: 0x48 IMAGE_REL_AMD64_SECTION y
+; OBJ64-NEXT: 0x7C IMAGE_REL_AMD64_SECREL f
+; OBJ64-NEXT: 0x80 IMAGE_REL_AMD64_SECTION f
+; OBJ64-NEXT: ]
+; OBJ64: FunctionLineTable [
+; OBJ64-NEXT: Name: x
+; OBJ64-NEXT: CodeSize: 0xE
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\source.c
+; OBJ64-NEXT: +0x0: 3
+; OBJ64-NEXT: +0x4: 4
+; OBJ64-NEXT: +0x9: 5
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: FunctionLineTable [
+; OBJ64-NEXT: Name: y
+; OBJ64-NEXT: CodeSize: 0xE
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\source.c
+; OBJ64-NEXT: +0x0: 7
+; OBJ64-NEXT: +0x4: 8
+; OBJ64-NEXT: +0x9: 9
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: FunctionLineTable [
+; OBJ64-NEXT: Name: f
+; OBJ64-NEXT: CodeSize: 0x18
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\source.c
+; OBJ64-NEXT: +0x0: 11
+; OBJ64-NEXT: +0x4: 12
+; OBJ64-NEXT: +0x9: 13
+; OBJ64-NEXT: +0xE: 14
+; OBJ64-NEXT: +0x13: 15
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: ]
+; OBJ64: }
+
+; Function Attrs: nounwind
+define void @x() #0 {
+entry:
+ call void @z(), !dbg !14
+ ret void, !dbg !15
+}
+
+declare void @z() #1
+
+; Function Attrs: nounwind
+define void @y() #0 {
+entry:
+ call void @z(), !dbg !16
+ ret void, !dbg !17
+}
+
+; Function Attrs: nounwind
+define void @f() #0 {
+entry:
+ call void @x(), !dbg !18
+ call void @y(), !dbg !19
+ call void @z(), !dbg !20
+ ret void, !dbg !21
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!11, !12}
+!llvm.ident = !{!13}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [D:\/<unknown>] [DW_LANG_C99]
+!1 = metadata !{metadata !"<unknown>", metadata !"D:\5C"}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4, metadata !9, metadata !10}
+!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"x", metadata !"x", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @x, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [x]
+!5 = metadata !{metadata !"source.c", metadata !"D:\5C"}
+!6 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [D:\/source.c]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{null}
+!9 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"y", metadata !"y", metadata !"", i32 7, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @y, null, null, metadata !2, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [y]
+!10 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"f", metadata !"f", metadata !"", i32 11, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @f, null, null, metadata !2, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [f]
+!11 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!12 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!13 = metadata !{metadata !"clang version 3.5 "}
+!14 = metadata !{i32 4, i32 0, metadata !4, null}
+!15 = metadata !{i32 5, i32 0, metadata !4, null}
+!16 = metadata !{i32 8, i32 0, metadata !9, null} ; [ DW_TAG_imported_declaration ]
+!17 = metadata !{i32 9, i32 0, metadata !9, null}
+!18 = metadata !{i32 12, i32 0, metadata !10, null}
+!19 = metadata !{i32 13, i32 0, metadata !10, null}
+!20 = metadata !{i32 14, i32 0, metadata !10, null}
+!21 = metadata !{i32 15, i32 0, metadata !10, null}
diff --git a/test/DebugInfo/COFF/simple.ll b/test/DebugInfo/COFF/simple.ll
new file mode 100644
index 000000000000..2613a18298d3
--- /dev/null
+++ b/test/DebugInfo/COFF/simple.ll
@@ -0,0 +1,167 @@
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -O0 < %s | FileCheck --check-prefix=X86 %s
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -o - -O0 < %s | llvm-mc -triple=i686-pc-win32 -filetype=obj | llvm-readobj -s -sr -codeview-linetables | FileCheck --check-prefix=OBJ32 %s
+; RUN: llc -mcpu=core2 -mtriple=x86_64-pc-win32 -O0 < %s | FileCheck --check-prefix=X64 %s
+; RUN: llc -mcpu=core2 -mtriple=x86_64-pc-win32 -o - -O0 < %s | llvm-mc -triple=x86_64-pc-win32 -filetype=obj | llvm-readobj -s -sr -codeview-linetables | FileCheck --check-prefix=OBJ64 %s
+
+; This LL file was generated by running clang on the following code:
+; D:\test.c:
+; 1 void g(void);
+; 2
+; 3 void f(void) {
+; 4 g();
+; 5 }
+
+; X86-LABEL: _f:
+; X86-NEXT: # BB
+; X86-NEXT: [[CALL_LINE:^L.*]]:{{$}}
+; X86-NEXT: calll _g
+; X86-NEXT: [[RETURN_STMT:.*]]:
+; X86-NEXT: ret
+; X86-NEXT: [[END_OF_F:.*]]:
+;
+; X86-LABEL: .section .debug$S,"rnd"
+; X86-NEXT: .long 4
+; X86-NEXT: .long 242
+; X86-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X86-NEXT: [[F2_START]]:
+; X86-NEXT: .secrel32 _f
+; X86-NEXT: .secidx _f
+; X86-NEXT: .long [[END_OF_F]]-_f
+; X86-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X86-NEXT: .long 0
+; X86-NEXT: .long 2
+; X86-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X86-NEXT: .long [[CALL_LINE]]-_f
+; X86-NEXT: .long 4
+; X86-NEXT: .long [[RETURN_STMT]]-_f
+; X86-NEXT: .long 5
+; X86-NEXT: [[FILE_SEGMENT_END]]:
+; X86-NEXT: [[F2_END]]:
+; File index to string table offset subsection
+; X86-NEXT: .long 244
+; X86-NEXT: .long 8
+; X86-NEXT: .long 1
+; X86-NEXT: .long 0
+; String table
+; X86-NEXT: .long 243
+; X86-NEXT: .long 11
+; X86-NEXT: .byte 0
+; X86-NEXT: .ascii "D:\\test.c"
+; X86-NEXT: .byte 0
+; Padding
+; X86-NEXT: .zero 1
+
+; OBJ32: Section {
+; OBJ32: Name: .debug$S (2E 64 65 62 75 67 24 53)
+; OBJ32: Characteristics [ (0x42100040)
+; OBJ32: ]
+; OBJ32: Relocations [
+; OBJ32-NEXT: 0xC IMAGE_REL_I386_SECREL _f
+; OBJ32-NEXT: 0x10 IMAGE_REL_I386_SECTION _f
+; OBJ32-NEXT: ]
+; OBJ32: FunctionLineTable [
+; OBJ32-NEXT: Name: _f
+; OBJ32-NEXT: CodeSize: 0x6
+; OBJ32-NEXT: FilenameSegment [
+; OBJ32-NEXT: Filename: D:\test.c
+; OBJ32-NEXT: +0x0: 4
+; OBJ32-NEXT: +0x5: 5
+; OBJ32-NEXT: ]
+; OBJ32-NEXT: ]
+; OBJ32: }
+
+; X64-LABEL: f:
+; X64-NEXT: [[START:.*]]:{{$}}
+; X64-NEXT: # BB
+; X64-NEXT: subq $40, %rsp
+; X64-NEXT: [[CALL_LINE:.*]]:{{$}}
+; X64-NEXT: callq g
+; X64-NEXT: [[EPILOG_AND_RET:.*]]:
+; X64-NEXT: addq $40, %rsp
+; X64-NEXT: ret
+; X64-NEXT: [[END_OF_F:.*]]:
+;
+; X64-LABEL: .section .debug$S,"rnd"
+; X64-NEXT: .long 4
+; X64-NEXT: .long 242
+; X64-NEXT: .long [[F2_END:.*]]-[[F2_START:.*]]
+; X64-NEXT: [[F2_START]]:
+; X64-NEXT: .secrel32 f
+; X64-NEXT: .secidx f
+; X64-NEXT: .long [[END_OF_F]]-f
+; X64-NEXT: [[FILE_SEGMENT_START:[^:]*]]:
+; X64-NEXT: .long 0
+; X64-NEXT: .long 3
+; X64-NEXT: .long [[FILE_SEGMENT_END:.*]]-[[FILE_SEGMENT_START]]
+; X64-NEXT: .long [[START]]-f
+; X64-NEXT: .long 3
+; X64-NEXT: .long [[CALL_LINE]]-f
+; X64-NEXT: .long 4
+; X64-NEXT: .long [[EPILOG_AND_RET]]-f
+; X64-NEXT: .long 5
+; X64-NEXT: [[FILE_SEGMENT_END]]:
+; X64-NEXT: [[F2_END]]:
+; File index to string table offset subsection
+; X64-NEXT: .long 244
+; X64-NEXT: .long 8
+; X64-NEXT: .long 1
+; X64-NEXT: .long 0
+; String table
+; X64-NEXT: .long 243
+; X64-NEXT: .long 11
+; X64-NEXT: .byte 0
+; X64-NEXT: .ascii "D:\\test.c"
+; X64-NEXT: .byte 0
+; Padding
+; X64-NEXT: .zero 1
+
+; OBJ64: Section {
+; OBJ64: Name: .debug$S (2E 64 65 62 75 67 24 53)
+; OBJ64: Characteristics [ (0x42100040)
+; OBJ64: ]
+; OBJ64: Relocations [
+; OBJ64-NEXT: 0xC IMAGE_REL_AMD64_SECREL f
+; OBJ64-NEXT: 0x10 IMAGE_REL_AMD64_SECTION f
+; OBJ64-NEXT: ]
+; OBJ64: FunctionLineTable [
+; OBJ64-NEXT: Name: f
+; OBJ64-NEXT: CodeSize: 0xE
+; OBJ64-NEXT: FilenameSegment [
+; OBJ64-NEXT: Filename: D:\test.c
+; OBJ64-NEXT: +0x0: 3
+; OBJ64-NEXT: +0x4: 4
+; OBJ64-NEXT: +0x9: 5
+; OBJ64-NEXT: ]
+; OBJ64-NEXT: ]
+; OBJ64: }
+
+; Function Attrs: nounwind
+define void @f() #0 {
+entry:
+ call void @g(), !dbg !12
+ ret void, !dbg !13
+}
+
+declare void @g() #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [D:\/<unknown>] [DW_LANG_C99]
+!1 = metadata !{metadata !"<unknown>", metadata !"D:\5C"}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"f", metadata !"f", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @f, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [f]
+!5 = metadata !{metadata !"test.c", metadata !"D:\5C"}
+!6 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [D:\/test.c]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{null}
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5 "}
+!12 = metadata !{i32 4, i32 0, metadata !4, null}
+!13 = metadata !{i32 5, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/COFF/tail-call-without-lexical-scopes.ll b/test/DebugInfo/COFF/tail-call-without-lexical-scopes.ll
new file mode 100644
index 000000000000..4d2e42734475
--- /dev/null
+++ b/test/DebugInfo/COFF/tail-call-without-lexical-scopes.ll
@@ -0,0 +1,78 @@
+; RUN: llc -mcpu=core2 -mtriple=i686-pc-win32 -O0 < %s | FileCheck --check-prefix=X86 %s
+
+; This LL file was generated by running clang on the following code:
+; D:\test.cpp:
+; 1 void foo();
+; 2
+; 3 static void bar(int arg, ...) {
+; 4 foo();
+; 5 }
+; 6
+; 7 void spam(void) {
+; 8 bar(42);
+; 9 }
+;
+; The bar function happens to have no lexical scopes, yet it has one instruction
+; with debug information available. This used to be PR19239.
+
+; X86-LABEL: {{^}}"?bar@@YAXHZZ":
+; X86-NEXT: # BB
+; X86-NEXT: [[JMP_LINE:^L.*]]:{{$}}
+; X86-NEXT: jmp "?foo@@YAXXZ"
+; X86-NEXT: [[END_OF_BAR:^L.*]]:{{$}}
+; X86-NOT: ret
+
+; X86-LABEL: .section .debug$S,"rnd"
+; X86: .secrel32 "?bar@@YAXHZZ"
+; X86-NEXT: .secidx "?bar@@YAXHZZ"
+; X86: .long 0
+; X86-NEXT: .long 1
+; X86-NEXT: .long {{.*}}
+; X86-NEXT: .long [[JMP_LINE]]-"?bar@@YAXHZZ"
+; X86-NEXT: .long 4
+
+; X86-LABEL: .long 244
+
+; ModuleID = 'test.cpp'
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-win32"
+
+; Function Attrs: nounwind
+define void @"\01?spam@@YAXXZ"() #0 {
+entry:
+ tail call void @"\01?bar@@YAXHZZ"(), !dbg !11
+ ret void, !dbg !12
+}
+
+; Function Attrs: nounwind
+define internal void @"\01?bar@@YAXHZZ"() #0 {
+entry:
+ tail call void @"\01?foo@@YAXXZ"() #2, !dbg !13
+ ret void, !dbg !14
+}
+
+declare void @"\01?foo@@YAXXZ"() #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2} ; [ DW_TAG_compile_unit ] [D:\/test.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"test.cpp", metadata !"D:\5C"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !7}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"spam", metadata !"spam", metadata !"", i32 7, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @"\01?spam@@YAXXZ", null, null, metadata !2, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [spam]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [D:\/test.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"", i32 3, metadata !6, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [local] [def] [bar]
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5.0 "}
+!11 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!12 = metadata !{i32 9, i32 0, metadata !4, null}
+!13 = metadata !{i32 4, i32 0, metadata !7, null}
+!14 = metadata !{i32 5, i32 0, metadata !7, null}
diff --git a/test/DebugInfo/Inputs/arange-overlap.cc b/test/DebugInfo/Inputs/arange-overlap.cc
new file mode 100644
index 000000000000..82e3f120efda
--- /dev/null
+++ b/test/DebugInfo/Inputs/arange-overlap.cc
@@ -0,0 +1,26 @@
+void call();
+
+struct S {
+ static void foo() { call(); call(); }
+ static void bar() { call(); call(); }
+ static void baz() {}
+};
+
+#ifdef FILE1
+# define FUNC_NAME func1
+# define FUNC_BODY \
+ S::foo(); S::bar(); S::baz();
+#else
+# define FUNC_NAME func2
+# define FUNC_BODY \
+ S::bar();
+#endif
+
+void FUNC_NAME() {
+ FUNC_BODY
+}
+
+// Build instructions:
+// $ clang -g -fPIC -c -DFILE1 arange-overlap.cc -o obj1.o
+// $ clang -g -fPIC -c arange-overlap.cc -o obj2.o
+// $ clang -shared obj1.o obj2.o -o <output>
diff --git a/test/DebugInfo/Inputs/arange-overlap.elf-x86_64 b/test/DebugInfo/Inputs/arange-overlap.elf-x86_64
new file mode 100755
index 000000000000..075e9c271231
--- /dev/null
+++ b/test/DebugInfo/Inputs/arange-overlap.elf-x86_64
Binary files differ
diff --git a/test/DebugInfo/Inputs/arm-relocs.elf-arm b/test/DebugInfo/Inputs/arm-relocs.elf-arm
new file mode 100644
index 000000000000..4864c36256f3
--- /dev/null
+++ b/test/DebugInfo/Inputs/arm-relocs.elf-arm
Binary files differ
diff --git a/test/DebugInfo/Inputs/dwarfdump-line-dwo.cc b/test/DebugInfo/Inputs/dwarfdump-line-dwo.cc
new file mode 100644
index 000000000000..2784ae24ee56
--- /dev/null
+++ b/test/DebugInfo/Inputs/dwarfdump-line-dwo.cc
@@ -0,0 +1,10 @@
+struct foo {
+};
+
+foo f;
+
+// Built with GCC
+// $ mkdir -p /tmp/dbginfo
+// $ cp dwarfdump-line-dwo.cc /tmp/dbginfo
+// $ cd /tmp/dbginfo
+// $ g++ -c -fdebug-types-section dwarfdump-line-dwo.cc -o <output>
diff --git a/test/DebugInfo/Inputs/dwarfdump-line-dwo.elf-x86-64 b/test/DebugInfo/Inputs/dwarfdump-line-dwo.elf-x86-64
new file mode 100644
index 000000000000..9f1d267dde1e
--- /dev/null
+++ b/test/DebugInfo/Inputs/dwarfdump-line-dwo.elf-x86-64
Binary files differ
diff --git a/test/DebugInfo/Inputs/fission-ranges.cc b/test/DebugInfo/Inputs/fission-ranges.cc
new file mode 100644
index 000000000000..a585bf9c0086
--- /dev/null
+++ b/test/DebugInfo/Inputs/fission-ranges.cc
@@ -0,0 +1,17 @@
+static inline int inlined_f() {
+ volatile int x = 2;
+ return x;
+}
+
+int main() {
+ return inlined_f();
+}
+
+// Build instructions:
+// $ mkdir /tmp/dbginfo
+// $ cp fission-ranges.cc /tmp/dbginfo/
+// $ cd /tmp/dbginfo
+// $ gcc -gsplit-dwarf -O2 -fPIC fission-ranges.cc -c -o obj2.o
+// $ clang -gsplit-dwarf -O2 -fsanitize=address -fPIC -Dmain=foo fission-ranges.cc -c -o obj1.o
+// $ gcc obj1.o obj2.o -shared -o <output>
+// $ objcopy --remove-section=.debug_aranges <output>
diff --git a/test/DebugInfo/Inputs/fission-ranges.elf-x86_64 b/test/DebugInfo/Inputs/fission-ranges.elf-x86_64
new file mode 100755
index 000000000000..3d2fd79dd747
--- /dev/null
+++ b/test/DebugInfo/Inputs/fission-ranges.elf-x86_64
Binary files differ
diff --git a/test/DebugInfo/Inputs/llvm-symbolizer-dwo-test b/test/DebugInfo/Inputs/llvm-symbolizer-dwo-test
new file mode 100755
index 000000000000..c28c3d277a7c
--- /dev/null
+++ b/test/DebugInfo/Inputs/llvm-symbolizer-dwo-test
Binary files differ
diff --git a/test/DebugInfo/Inputs/llvm-symbolizer-dwo-test.cc b/test/DebugInfo/Inputs/llvm-symbolizer-dwo-test.cc
new file mode 100644
index 000000000000..ea0967a263a8
--- /dev/null
+++ b/test/DebugInfo/Inputs/llvm-symbolizer-dwo-test.cc
@@ -0,0 +1,18 @@
+int f(int a, int b) {
+ return a + b;
+}
+
+int g(int a) {
+ return a + 1;
+}
+
+
+int main() {
+ return f(2, g(2));
+}
+
+// Built with Clang 3.5.0:
+// $ mkdir -p /tmp/dbginfo
+// $ cp llvm-symbolizer-dwo-test.cc /tmp/dbginfo
+// $ cd /tmp/dbginfo
+// $ clang -gsplit-dwarf llvm-symbolizer-dwo-test.cc
diff --git a/test/DebugInfo/Inputs/llvm-symbolizer-test.c b/test/DebugInfo/Inputs/llvm-symbolizer-test.c
new file mode 100644
index 000000000000..4c40c00c6736
--- /dev/null
+++ b/test/DebugInfo/Inputs/llvm-symbolizer-test.c
@@ -0,0 +1,18 @@
+int f(int a, int b) {
+ return a + b;
+}
+
+int g(int a) {
+ return a + 1;
+}
+
+
+int main() {
+ return f(2, g(2));
+}
+
+// Built with Clang 3.3:
+// $ mkdir -p /tmp/dbginfo
+// $ cp llvm-symbolizer-test.c /tmp/dbginfo
+// $ cd /tmp/dbginfo
+// $ clang -g llvm-symbolizer-test.c -o <output>
diff --git a/test/DebugInfo/Inputs/llvm-symbolizer-test.elf-x86-64 b/test/DebugInfo/Inputs/llvm-symbolizer-test.elf-x86-64
new file mode 100755
index 000000000000..99a448a77789
--- /dev/null
+++ b/test/DebugInfo/Inputs/llvm-symbolizer-test.elf-x86-64
Binary files differ
diff --git a/test/DebugInfo/Inputs/shared-object-stripped.elf-i386 b/test/DebugInfo/Inputs/shared-object-stripped.elf-i386
new file mode 100644
index 000000000000..727c6a67b0ce
--- /dev/null
+++ b/test/DebugInfo/Inputs/shared-object-stripped.elf-i386
Binary files differ
diff --git a/test/DebugInfo/Mips/delay-slot.ll b/test/DebugInfo/Mips/delay-slot.ll
new file mode 100644
index 000000000000..9bce4ba6c9d8
--- /dev/null
+++ b/test/DebugInfo/Mips/delay-slot.ll
@@ -0,0 +1,75 @@
+; RUN: llc -filetype=obj -O0 < %s -mtriple mips-unknown-linux-gnu | llvm-dwarfdump - | FileCheck %s
+; PR19815
+
+; Generated using clang -target mips-linux-gnu -g test.c -S -o - -flto|opt -sroa -S
+; test.c:
+;
+; int foo(int x) {
+; if (x)
+; return 0;
+; return 1;
+; }
+
+; CHECK: Address Line Column File ISA Discriminator Flags
+; CHECK: ------------------ ------ ------ ------ --- ------------- -------------
+; CHECK: 0x0000000000000000 1 0 1 0 0 is_stmt
+; CHECK: 0x0000000000000000 1 0 1 0 0 is_stmt prologue_end
+; CHECK: 0x0000000000000008 2 0 1 0 0 is_stmt
+; CHECK: 0x0000000000000020 3 0 1 0 0 is_stmt
+; CHECK: 0x0000000000000030 4 0 1 0 0 is_stmt
+; CHECK: 0x0000000000000040 5 0 1 0 0 is_stmt
+; CHECK: 0x0000000000000050 5 0 1 0 0 is_stmt end_sequence
+
+target datalayout = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
+target triple = "mips--linux-gnu"
+
+; Function Attrs: nounwind
+define i32 @foo(i32 %x) #0 {
+entry:
+ call void @llvm.dbg.value(metadata !{i32 %x}, i64 0, metadata !12), !dbg !13
+ %tobool = icmp ne i32 %x, 0, !dbg !14
+ br i1 %tobool, label %if.then, label %if.end, !dbg !14
+
+if.then: ; preds = %entry
+ br label %return, !dbg !16
+
+if.end: ; preds = %entry
+ br label %return, !dbg !17
+
+return: ; preds = %if.end, %if.then
+ %retval.0 = phi i32 [ 0, %if.then ], [ 1, %if.end ]
+ ret i32 %retval.0, !dbg !18
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/test.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"test.c", metadata !"/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/test.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5.0"}
+!12 = metadata !{i32 786689, metadata !4, metadata !"x", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [x] [line 1]
+!13 = metadata !{i32 1, i32 0, metadata !4, null}
+!14 = metadata !{i32 2, i32 0, metadata !15, null}
+!15 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/test.c]
+!16 = metadata !{i32 3, i32 0, metadata !15, null}
+!17 = metadata !{i32 4, i32 0, metadata !4, null}
+!18 = metadata !{i32 5, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/Mips/lit.local.cfg b/test/DebugInfo/Mips/lit.local.cfg
new file mode 100644
index 000000000000..7d12f7a9c564
--- /dev/null
+++ b/test/DebugInfo/Mips/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'Mips' in config.root.targets:
+ config.unsupported = True
diff --git a/test/DebugInfo/PR20038.ll b/test/DebugInfo/PR20038.ll
new file mode 100644
index 000000000000..61145e5c8441
--- /dev/null
+++ b/test/DebugInfo/PR20038.ll
@@ -0,0 +1,168 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; IR generated from clang -O0 with:
+; struct C {
+; ~C();
+; };
+; extern bool b;
+; void fun4() { b && (C(), 1); }
+; __attribute__((always_inline)) C::~C() { }
+
+; CHECK: DW_TAG_structure_type
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "C"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[C_DTOR_DECL:.*]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "~C"
+
+; CHECK: [[D1_ABS:.*]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_ZN1CD1Ev"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[D1_THIS_ABS:.*]]: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "this"
+
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "fun4"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_lexical_block
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[D1_ABS]]}
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[D1_THIS_ABS]]}
+
+; FIXME: D2 is actually inlined into D1 but doesn't show up here, possibly due
+; to there being no work in D2 (calling another member function from the dtor
+; causes D2 to show up, calling a free function doesn't).
+
+; CHECK-NOT: DW_TAG
+; CHECK: NULL
+; CHECK-NOT: DW_TAG
+; CHECK: NULL
+; CHECK-NOT: DW_TAG
+; CHECK: NULL
+
+%struct.C = type { i8 }
+
+@b = external global i8
+
+; Function Attrs: nounwind
+define void @_Z4fun4v() #0 {
+entry:
+ %this.addr.i.i = alloca %struct.C*, align 8, !dbg !21
+ %this.addr.i = alloca %struct.C*, align 8, !dbg !22
+ %agg.tmp.ensured = alloca %struct.C, align 1
+ %cleanup.cond = alloca i1
+ %0 = load i8* @b, align 1, !dbg !24
+ %tobool = trunc i8 %0 to i1, !dbg !24
+ store i1 false, i1* %cleanup.cond
+ br i1 %tobool, label %land.rhs, label %land.end, !dbg !24
+
+land.rhs: ; preds = %entry
+ store i1 true, i1* %cleanup.cond, !dbg !25
+ br label %land.end
+
+land.end: ; preds = %land.rhs, %entry
+ %1 = phi i1 [ false, %entry ], [ true, %land.rhs ]
+ %cleanup.is_active = load i1* %cleanup.cond, !dbg !27
+ br i1 %cleanup.is_active, label %cleanup.action, label %cleanup.done, !dbg !27
+
+cleanup.action: ; preds = %land.end
+ store %struct.C* %agg.tmp.ensured, %struct.C** %this.addr.i, align 8, !dbg !22
+ call void @llvm.dbg.declare(metadata !{%struct.C** %this.addr.i}, metadata !29), !dbg !31
+ %this1.i = load %struct.C** %this.addr.i, !dbg !22
+ store %struct.C* %this1.i, %struct.C** %this.addr.i.i, align 8, !dbg !21
+ call void @llvm.dbg.declare(metadata !{%struct.C** %this.addr.i.i}, metadata !32), !dbg !33
+ %this1.i.i = load %struct.C** %this.addr.i.i, !dbg !21
+ br label %cleanup.done, !dbg !22
+
+cleanup.done: ; preds = %cleanup.action, %land.end
+ ret void, !dbg !34
+}
+
+; Function Attrs: alwaysinline nounwind
+define void @_ZN1CD1Ev(%struct.C* %this) unnamed_addr #1 align 2 {
+entry:
+ %this.addr.i = alloca %struct.C*, align 8, !dbg !37
+ %this.addr = alloca %struct.C*, align 8
+ store %struct.C* %this, %struct.C** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.C** %this.addr}, metadata !29), !dbg !38
+ %this1 = load %struct.C** %this.addr
+ store %struct.C* %this1, %struct.C** %this.addr.i, align 8, !dbg !37
+ call void @llvm.dbg.declare(metadata !{%struct.C** %this.addr.i}, metadata !32), !dbg !39
+ %this1.i = load %struct.C** %this.addr.i, !dbg !37
+ ret void, !dbg !37
+}
+
+; Function Attrs: alwaysinline nounwind
+define void @_ZN1CD2Ev(%struct.C* %this) unnamed_addr #1 align 2 {
+entry:
+ %this.addr = alloca %struct.C*, align 8
+ store %struct.C* %this, %struct.C** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.C** %this.addr}, metadata !32), !dbg !40
+ %this1 = load %struct.C** %this.addr
+ ret void, !dbg !41
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #2
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { alwaysinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!18, !19}
+!llvm.ident = !{!20}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !11, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/<stdin>] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"<stdin>", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !5, null, metadata !"C", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS1C"} ; [ DW_TAG_structure_type ] [C] [line 1, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !"PR20038.cpp", metadata !"/tmp/dbginfo"}
+!6 = metadata !{metadata !7}
+!7 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1C", metadata !"~C", metadata !"~C", metadata !"", i32 2, metadata !8, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 2} ; [ DW_TAG_subprogram ] [line 2] [~C]
+!8 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !9, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!9 = metadata !{null, metadata !10}
+!10 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1C"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1C]
+!11 = metadata !{metadata !12, metadata !16, metadata !17}
+!12 = metadata !{i32 786478, metadata !5, metadata !13, metadata !"fun4", metadata !"fun4", metadata !"_Z4fun4v", i32 5, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z4fun4v, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [fun4]
+!13 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [/tmp/dbginfo/PR20038.cpp]
+!14 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!15 = metadata !{null}
+!16 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1C", metadata !"~C", metadata !"~C", metadata !"_ZN1CD2Ev", i32 6, metadata !8, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.C*)* @_ZN1CD2Ev, null, metadata !7, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [~C]
+!17 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1C", metadata !"~C", metadata !"~C", metadata !"_ZN1CD1Ev", i32 6, metadata !8, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.C*)* @_ZN1CD1Ev, null, metadata !7, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [~C]
+!18 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!19 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!20 = metadata !{metadata !"clang version 3.5.0 "}
+!21 = metadata !{i32 6, i32 0, metadata !17, metadata !22}
+!22 = metadata !{i32 5, i32 0, metadata !23, null}
+!23 = metadata !{i32 786443, metadata !5, metadata !12, i32 5, i32 0, i32 3, i32 3} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/PR20038.cpp]
+!24 = metadata !{i32 5, i32 0, metadata !12, null}
+!25 = metadata !{i32 5, i32 0, metadata !26, null}
+!26 = metadata !{i32 786443, metadata !5, metadata !12, i32 5, i32 0, i32 1, i32 1} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/PR20038.cpp]
+!27 = metadata !{i32 5, i32 0, metadata !28, null}
+!28 = metadata !{i32 786443, metadata !5, metadata !12, i32 5, i32 0, i32 2, i32 2} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/PR20038.cpp]
+!29 = metadata !{i32 786689, metadata !17, metadata !"this", null, i32 16777216, metadata !30, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!30 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1C"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1C]
+!31 = metadata !{i32 0, i32 0, metadata !17, metadata !22}
+!32 = metadata !{i32 786689, metadata !16, metadata !"this", null, i32 16777216, metadata !30, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!33 = metadata !{i32 0, i32 0, metadata !16, metadata !21}
+!34 = metadata !{i32 5, i32 0, metadata !35, null}
+!35 = metadata !{i32 786443, metadata !5, metadata !36, i32 5, i32 0, i32 5, i32 5} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/PR20038.cpp]
+!36 = metadata !{i32 786443, metadata !5, metadata !12, i32 5, i32 0, i32 4, i32 4} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/PR20038.cpp]
+!37 = metadata !{i32 6, i32 0, metadata !17, null}
+!38 = metadata !{i32 0, i32 0, metadata !17, null}
+!39 = metadata !{i32 0, i32 0, metadata !16, metadata !37}
+!40 = metadata !{i32 0, i32 0, metadata !16, null}
+!41 = metadata !{i32 6, i32 0, metadata !16, null}
diff --git a/test/DebugInfo/PowerPC/lit.local.cfg b/test/DebugInfo/PowerPC/lit.local.cfg
index 193ebebcd50e..091332439b18 100644
--- a/test/DebugInfo/PowerPC/lit.local.cfg
+++ b/test/DebugInfo/PowerPC/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'PowerPC' in targets:
+if not 'PowerPC' in config.root.targets:
config.unsupported = True
diff --git a/test/DebugInfo/PowerPC/tls-fission.ll b/test/DebugInfo/PowerPC/tls-fission.ll
index 4a744c722532..9cde2c79a00b 100644
--- a/test/DebugInfo/PowerPC/tls-fission.ll
+++ b/test/DebugInfo/PowerPC/tls-fission.ll
@@ -23,7 +23,7 @@
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !"tls.dwo"} ; [ DW_TAG_compile_unit ] [/tmp/tls.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"tls.cpp", metadata !"/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786484, i32 0, null, metadata !"tls", metadata !"tls", metadata !"", metadata !5, i32 1, metadata !6, i32 0, i32 1, i32* @tls, null} ; [ DW_TAG_variable ] [tls] [line 1] [def]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/tls.cpp]
diff --git a/test/DebugInfo/PowerPC/tls.ll b/test/DebugInfo/PowerPC/tls.ll
index 6557f5ea47b9..f2586eda4243 100644
--- a/test/DebugInfo/PowerPC/tls.ll
+++ b/test/DebugInfo/PowerPC/tls.ll
@@ -19,7 +19,7 @@
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/tls.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"tls.cpp", metadata !"/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786484, i32 0, null, metadata !"tls", metadata !"tls", metadata !"", metadata !5, i32 1, metadata !6, i32 0, i32 1, i32* @tls, null} ; [ DW_TAG_variable ] [tls] [line 1] [def]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/tls.cpp]
diff --git a/test/DebugInfo/Sparc/gnu-window-save.ll b/test/DebugInfo/Sparc/gnu-window-save.ll
new file mode 100644
index 000000000000..303a28777584
--- /dev/null
+++ b/test/DebugInfo/Sparc/gnu-window-save.ll
@@ -0,0 +1,71 @@
+; RUN: llc -filetype=obj -O0 < %s -mtriple sparc64-unknown-linux-gnu | llvm-dwarfdump - | FileCheck %s --check-prefix=SPARC64
+; RUN: llc -filetype=obj -O0 < %s -mtriple sparc-unknown-linux-gnu | llvm-dwarfdump - | FileCheck %s --check-prefix=SPARC32
+
+; Check for DW_CFA_GNU_Window_save in debug_frame. Also, Ensure that relocations
+; are performed correctly in debug_info.
+
+; SPARC64: file format ELF64-sparc
+
+; SPARC64: .debug_info
+; SPARC64: DW_TAG_compile_unit
+; SPARC64: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9,A-F,a-f]+}}] = "hello.c")
+; SPARC64: DW_TAG_subprogram
+; SPARC64: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9,A-F,a-f]+}}] = "main")
+; SPARC64: DW_TAG_base_type
+; SPARC64: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9,A-F,a-f]+}}] = "int")
+
+; SPARC64: .debug_frame
+; SPARC64: DW_CFA_def_cfa_register
+; SPARC64-NEXT: DW_CFA_GNU_window_save
+; SPARC64-NEXT: DW_CFA_register
+
+
+; SPARC32: file format ELF32-sparc
+
+; SPARC32: .debug_info
+; SPARC32: DW_TAG_compile_unit
+; SPARC32: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9,A-F,a-f]+}}] = "hello.c")
+; SPARC32: DW_TAG_subprogram
+; SPARC32: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9,A-F,a-f]+}}] = "main")
+; SPARC32: DW_TAG_base_type
+; SPARC32: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9,A-F,a-f]+}}] = "int")
+
+; SPARC32: .debug_frame
+; SPARC32: DW_CFA_def_cfa_register
+; SPARC32-NEXT: DW_CFA_GNU_window_save
+; SPARC32-NEXT: DW_CFA_register
+
+@.str = private unnamed_addr constant [14 x i8] c"hello, world\0A\00", align 1
+
+; Function Attrs: nounwind
+define signext i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %call = call signext i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([14 x i8]* @.str, i32 0, i32 0)), !dbg !12
+ ret i32 0, !dbg !13
+}
+
+declare signext i32 @printf(i8*, ...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 (http://llvm.org/git/clang.git 6a0714fee07fb7c4e32d3972b4fe2ce2f5678cf4) (llvm/ 672e88e934757f76d5c5e5258be41e7615094844)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/home/venkatra/work/benchmarks/test/hello/hello.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"hello.c", metadata !"/home/venkatra/work/benchmarks/test/hello"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 3, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 4} ; [ DW_TAG_subprogram ] [line 3] [def] [scope 4] [main]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/home/venkatra/work/benchmarks/test/hello/hello.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5 (http://llvm.org/git/clang.git 6a0714fee07fb7c4e32d3972b4fe2ce2f5678cf4) (llvm/ 672e88e934757f76d5c5e5258be41e7615094844)"}
+!12 = metadata !{i32 5, i32 0, metadata !4, null}
+!13 = metadata !{i32 6, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/Sparc/lit.local.cfg b/test/DebugInfo/Sparc/lit.local.cfg
new file mode 100644
index 000000000000..d86c9e6d943a
--- /dev/null
+++ b/test/DebugInfo/Sparc/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'Sparc' in config.root.targets:
+ config.unsupported = True
diff --git a/test/DebugInfo/SystemZ/eh_frame.s b/test/DebugInfo/SystemZ/eh_frame.s
index 4e7afd56e94b..6189b9019673 100644
--- a/test/DebugInfo/SystemZ/eh_frame.s
+++ b/test/DebugInfo/SystemZ/eh_frame.s
@@ -11,9 +11,25 @@ check_largest_class:
.cfi_offset %r13, -56
.cfi_offset %r14, -48
.cfi_offset %r15, -40
- aghi %r15, -160
- .cfi_def_cfa_offset 320
- lmg %r13, %r15, 264(%r15)
+ aghi %r15, -224
+ .cfi_def_cfa_offset 384
+ std %f8, 160(%r15)
+ std %f9, 168(%r15)
+ std %f10, 176(%r15)
+ std %f11, 184(%r15)
+ std %f12, 192(%r15)
+ std %f13, 200(%r15)
+ std %f14, 208(%r15)
+ std %f15, 216(%r15)
+ .cfi_offset %f8, -224
+ .cfi_offset %f9, -216
+ .cfi_offset %f10, -208
+ .cfi_offset %f11, -200
+ .cfi_offset %f12, -192
+ .cfi_offset %f13, -184
+ .cfi_offset %f14, -176
+ .cfi_offset %f15, -168
+ lmg %r13, %r15, 328(%r15)
br %r14
.size check_largest_class, .-check_largest_class
.cfi_endproc
@@ -22,8 +38,8 @@ check_largest_class:
#
# Contents of the .eh_frame section:
#
-# 00000000 0000001c 00000000 CIE
-# Version: 1
+# 00000000 0000000000000014 00000000 CIE
+# Version: 3
# Augmentation: "zR"
# Code alignment factor: 1
# Data alignment factor: -8
@@ -35,20 +51,29 @@ check_largest_class:
# DW_CFA_nop
# DW_CFA_nop
#
-# 00000020 0000001c 00000024 FDE cie=00000000 pc=00000000..00000012
-# DW_CFA_advance_loc: 6 to 00000006
+# 000000.. 000000000000002c 0000001c FDE cie=00000000 pc=0000000000000000..0000000000000032
+# DW_CFA_advance_loc: 6 to 0000000000000006
# DW_CFA_offset: r13 at cfa-56
# DW_CFA_offset: r14 at cfa-48
# DW_CFA_offset: r15 at cfa-40
-# DW_CFA_advance_loc: 4 to 0000000a
-# DW_CFA_def_cfa_offset: 320
-# DW_CFA_nop
+# DW_CFA_advance_loc: 4 to 000000000000000a
+# DW_CFA_def_cfa_offset: 384
+# DW_CFA_advance_loc: 32 to 000000000000002a
+# DW_CFA_offset: r24 at cfa-224
+# DW_CFA_offset: r28 at cfa-216
+# DW_CFA_offset: r25 at cfa-208
+# DW_CFA_offset: r29 at cfa-200
+# DW_CFA_offset: r26 at cfa-192
+# DW_CFA_offset: r30 at cfa-184
+# DW_CFA_offset: r27 at cfa-176
+# DW_CFA_offset: r31 at cfa-168
# DW_CFA_nop
# DW_CFA_nop
# DW_CFA_nop
#
# CHECK: Contents of section .eh_frame:
-# CHECK-NEXT: 0000 00000014 00000000 017a5200 01780e01 .........zR..x..
-# CHECK-NEXT: 0010 1b0c0fa0 01000000 0000001c 0000001c ................
-# CHECK-NEXT: 0020 00000000 00000012 00468d07 8e068f05 .........F......
-# CHECK-NEXT: 0030 440ec002 00000000 D.......
+# CHECK-NEXT: 0000 00000014 00000000 037a5200 01780e01 {{.*}}
+# CHECK-NEXT: 0010 1b0c0fa0 01000000 0000002c 0000001c {{.*}}
+# CHECK-NEXT: 0020 00000000 00000032 00468d07 8e068f05 {{.*}}
+# CHECK-NEXT: 0030 440e8003 60981c9c 1b991a9d 199a189e {{.*}}
+# CHECK-NEXT: 0040 179b169f 15000000 {{.*}}
diff --git a/test/DebugInfo/SystemZ/eh_frame_personality.s b/test/DebugInfo/SystemZ/eh_frame_personality.s
index 46b46db1d806..456e0a6e6bdd 100644
--- a/test/DebugInfo/SystemZ/eh_frame_personality.s
+++ b/test/DebugInfo/SystemZ/eh_frame_personality.s
@@ -37,7 +37,7 @@ DW.ref.__gxx_personality_v0:
# Contents of the .eh_frame section:
#
# 00000000 0000001c 00000000 CIE
-# Version: 1
+# Version: 3
# Augmentation: "zPLR"
# Code alignment factor: 1
# Data alignment factor: -8
@@ -61,7 +61,7 @@ DW.ref.__gxx_personality_v0:
# DW_CFA_nop
#
# CHECK: Contents of section .eh_frame:
-# CHECK-NEXT: 0000 0000001c 00000000 017a504c 52000178 .........zPLR..x
+# CHECK-NEXT: 0000 0000001c 00000000 037a504c 52000178 .........zPLR..x
# CHECK-NEXT: 0010 0e079b00 0000001b 1b0c0fa0 01000000 ................
# CHECK-NEXT: 0020 0000001c 00000024 00000000 00000012 .......$........
# CHECK-NEXT: 0030 04000000 00468e06 8f05440e c0020000 .....F....D.....
diff --git a/test/DebugInfo/SystemZ/lit.local.cfg b/test/DebugInfo/SystemZ/lit.local.cfg
index b12af09434be..5c02dd3614a4 100644
--- a/test/DebugInfo/SystemZ/lit.local.cfg
+++ b/test/DebugInfo/SystemZ/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'SystemZ' in targets:
+if not 'SystemZ' in config.root.targets:
config.unsupported = True
diff --git a/test/DebugInfo/SystemZ/variable-loc.ll b/test/DebugInfo/SystemZ/variable-loc.ll
index 560b47747dd8..23df1cb555d3 100644
--- a/test/DebugInfo/SystemZ/variable-loc.ll
+++ b/test/DebugInfo/SystemZ/variable-loc.ll
@@ -1,8 +1,10 @@
; RUN: llc -mtriple=s390x-linux-gnu -disable-fp-elim < %s | FileCheck %s
+; RUN: llc -mtriple=s390x-linux-gnu -disable-fp-elim -filetype=obj < %s \
+; RUN: | llvm-dwarfdump -debug-dump=info - | FileCheck --check-prefix=DEBUG %s
;
; This is a regression test making sure the location of variables is correct in
; debugging information, even if they're addressed via the frame pointer.
-; A copy of the AArch64 test, commandeered for SystemZ.
+; Originally a copy of the AArch64 test, commandeered for SystemZ.
;
; First make sure main_arr is where we expect it: %r11 + 164
;
@@ -10,20 +12,13 @@
; CHECK: aghi %r15, -568
; CHECK: la %r2, 164(%r11)
; CHECK: brasl %r14, populate_array@PLT
-;
-; CHECK: .Linfo_string7:
-; CHECK-NEXT: main_arr
-;
-; Now check that the debugging information reflects this:
-; CHECK: DW_TAG_variable
-; CHECK-NEXT: .long .Linfo_string7
-;
-; Rather hard-coded, but 145 => DW_OP_fbreg and the .ascii is the sleb128
-; encoding of 164:
-; CHECK: DW_AT_location
-; CHECK-NEXT: .byte 145
-; CHECK-NEXT: .ascii "\244\001"
-;
+
+; DEBUG: DW_TAG_variable
+; Rather hard-coded, but 0x91 => DW_OP_fbreg and 0xa401 is SLEB128 encoded 164.
+; DEBUG-NOT: DW_TAG
+; DEBUG: DW_AT_location {{.*}}(<0x3> 91 a4 01 )
+; DEBUG-NOT: DW_TAG
+; DEBUG: DW_AT_name {{.*}} "main_arr"
@.str = private unnamed_addr constant [13 x i8] c"Total is %d\0A\00", align 2
@@ -39,7 +34,7 @@ entry:
%retval = alloca i32, align 4
%main_arr = alloca [100 x i32], align 4
%val = alloca i32, align 4
- store i32 0, i32* %retval
+ store volatile i32 0, i32* %retval
call void @llvm.dbg.declare(metadata !{[100 x i32]* %main_arr}, metadata !17), !dbg !22
call void @llvm.dbg.declare(metadata !{i32* %val}, metadata !23), !dbg !24
%arraydecay = getelementptr inbounds [100 x i32]* %main_arr, i32 0, i32 0, !dbg !25
@@ -58,7 +53,7 @@ declare i32 @printf(i8*, ...)
!llvm.module.flags = !{!30}
!0 = metadata !{i32 786449, metadata !29, i32 12, metadata !"clang version 3.2 ", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/home/timnor01/a64-trunk/build/simple.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !11, metadata !14}
!5 = metadata !{i32 786478, metadata !29, metadata !6, metadata !"populate_array", metadata !"populate_array", metadata !"", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32*, i32)* @populate_array, null, null, metadata !1, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [populate_array]
!6 = metadata !{i32 786473, metadata !29} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/2010-08-10-DbgConstant.ll b/test/DebugInfo/X86/2010-08-10-DbgConstant.ll
index d0a2dfaa1f31..7f42e7b70ec6 100644
--- a/test/DebugInfo/X86/2010-08-10-DbgConstant.ll
+++ b/test/DebugInfo/X86/2010-08-10-DbgConstant.ll
@@ -16,7 +16,7 @@ declare void @bar(i32)
!0 = metadata !{i32 786478, metadata !12, metadata !1, metadata !"foo", metadata !"foo", metadata !"foo", i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, void ()* @foo, null, null, null, i32 3} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !12} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !12, i32 12, metadata !"clang 2.8", i1 false, metadata !"", i32 0, metadata !4, metadata !4, metadata !10, metadata !11, metadata !11, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !12, i32 12, metadata !"clang 2.8", i1 false, metadata !"", i32 0, metadata !4, metadata !4, metadata !10, metadata !11, metadata !14, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !12, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{null}
!5 = metadata !{i32 786471, i32 0, metadata !1, metadata !"ro", metadata !"ro", metadata !"ro", metadata !1, i32 1, metadata !6, i1 true, i1 true, i32 201, null} ; [ DW_TAG_constant ]
@@ -28,3 +28,4 @@ declare void @bar(i32)
!11 = metadata !{metadata !5}
!12 = metadata !{metadata !"/tmp/l.c", metadata !"/Volumes/Lalgate/clean/D"}
!13 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!14 = metadata !{}
diff --git a/test/DebugInfo/X86/2011-09-26-GlobalVarContext.ll b/test/DebugInfo/X86/2011-09-26-GlobalVarContext.ll
index cdfd9527d005..4dc747f566d2 100644
--- a/test/DebugInfo/X86/2011-09-26-GlobalVarContext.ll
+++ b/test/DebugInfo/X86/2011-09-26-GlobalVarContext.ll
@@ -19,16 +19,14 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!21}
-!0 = metadata !{i32 786449, metadata !20, i32 12, metadata !"clang version 3.0 (trunk)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !12, metadata !12, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !20, i32 12, metadata !"clang version 3.0 (trunk)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !12, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
-!5 = metadata !{i32 720942, metadata !6, metadata !6, metadata !"f", metadata !"f", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @f, null, null, metadata !10, i32 0} ; [ DW_TAG_subprogram ] [line 3] [def] [scope 0] [f]
+!5 = metadata !{i32 720942, metadata !6, metadata !6, metadata !"f", metadata !"f", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @f, null, null, null, i32 0} ; [ DW_TAG_subprogram ] [line 3] [def] [scope 0] [f]
!6 = metadata !{i32 720937, metadata !20} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{metadata !9}
!9 = metadata !{i32 720932, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!10 = metadata !{metadata !11}
-!11 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
!12 = metadata !{metadata !14}
!14 = metadata !{i32 720948, i32 0, null, metadata !"GLB", metadata !"GLB", metadata !"", metadata !6, i32 1, metadata !9, i32 0, i32 1, i32* @GLB, null} ; [ DW_TAG_variable ]
!15 = metadata !{i32 786688, metadata !16, metadata !"LOC", metadata !6, i32 4, metadata !9, i32 0, i32 0} ; [ DW_TAG_auto_variable ]
@@ -39,13 +37,19 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!20 = metadata !{metadata !"test.c", metadata !"/work/llvm/vanilla/test/DebugInfo"}
; CHECK: DW_TAG_variable
-; CHECK-NEXT: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "GLB")
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "GLB")
+; CHECK-NOT: DW_TAG
; CHECK: DW_AT_decl_file [DW_FORM_data1] (0x01)
+; CHECK-NOT: DW_TAG
; CHECK: DW_AT_decl_line [DW_FORM_data1] (0x01)
; CHECK: DW_TAG_variable
-; CHECK-NEXT: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "LOC")
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "LOC")
+; CHECK-NOT: DW_TAG
; CHECK: DW_AT_decl_file [DW_FORM_data1] (0x01)
+; CHECK-NOT: DW_TAG
; CHECK: DW_AT_decl_line [DW_FORM_data1] (0x04)
!21 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/2011-12-16-BadStructRef.ll b/test/DebugInfo/X86/2011-12-16-BadStructRef.ll
index 5e6a6014d696..21dccd71c4e2 100644
--- a/test/DebugInfo/X86/2011-12-16-BadStructRef.ll
+++ b/test/DebugInfo/X86/2011-12-16-BadStructRef.ll
@@ -90,7 +90,7 @@ entry:
!llvm.module.flags = !{!83}
!0 = metadata !{i32 720913, metadata !82, i32 4, metadata !"clang version 3.1 (trunk 146596)", i1 false, metadata !"", i32 0, metadata !1, metadata !3, metadata !27, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !9}
!5 = metadata !{i32 720898, metadata !82, null, metadata !"bar", i32 9, i64 128, i64 64, i32 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_class_type ] [bar] [line 9, size 128, align 64, offset 0] [def] [from ]
!6 = metadata !{i32 720937, metadata !82} ; [ DW_TAG_file_type ]
@@ -100,22 +100,18 @@ entry:
!10 = metadata !{metadata !11, metadata !13}
!11 = metadata !{i32 720909, metadata !82, metadata !9, metadata !"h", i32 5, i64 32, i64 32, i64 0, i32 0, metadata !12} ; [ DW_TAG_member ]
!12 = metadata !{i32 720932, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!13 = metadata !{i32 720942, metadata !82, metadata !9, metadata !"baz", metadata !"baz", metadata !"", i32 6, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !17, i32 0} ; [ DW_TAG_subprogram ]
+!13 = metadata !{i32 720942, metadata !82, metadata !9, metadata !"baz", metadata !"baz", metadata !"", i32 6, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 0} ; [ DW_TAG_subprogram ]
!14 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!15 = metadata !{null, metadata !16, metadata !12}
!16 = metadata !{i32 720911, i32 0, null, i32 0, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !9} ; [ DW_TAG_pointer_type ]
-!17 = metadata !{metadata !18}
-!18 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
!19 = metadata !{i32 720909, metadata !82, metadata !5, metadata !"b_ref", i32 12, i64 64, i64 64, i64 64, i32 0, metadata !20} ; [ DW_TAG_member ]
!20 = metadata !{i32 720912, null, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !9} ; [ DW_TAG_reference_type ]
-!21 = metadata !{i32 720942, metadata !82, metadata !5, metadata !"bar", metadata !"bar", metadata !"", i32 13, metadata !22, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !25, i32 0} ; [ DW_TAG_subprogram ]
+!21 = metadata !{i32 720942, metadata !82, metadata !5, metadata !"bar", metadata !"bar", metadata !"", i32 13, metadata !22, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 0} ; [ DW_TAG_subprogram ]
!22 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !23, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!23 = metadata !{null, metadata !24, metadata !12}
!24 = metadata !{i32 720911, i32 0, null, i32 0, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !5} ; [ DW_TAG_pointer_type ]
-!25 = metadata !{metadata !26}
-!26 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
!27 = metadata !{metadata !29, metadata !37, metadata !40, metadata !43, metadata !46}
-!29 = metadata !{i32 720942, metadata !82, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 17, metadata !30, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32, i8**)* @main, null, null, metadata !47, i32 0} ; [ DW_TAG_subprogram ] [line 17] [def] [scope 0] [main]
+!29 = metadata !{i32 720942, metadata !82, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 17, metadata !30, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32, i8**)* @main, null, null, null, i32 0} ; [ DW_TAG_subprogram ] [line 17] [def] [scope 0] [main]
!30 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !31, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!31 = metadata !{metadata !12, metadata !12, metadata !32}
!32 = metadata !{i32 720911, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !33} ; [ DW_TAG_pointer_type ]
@@ -123,18 +119,16 @@ entry:
!34 = metadata !{i32 720932, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
!35 = metadata !{metadata !36}
!36 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
-!37 = metadata !{i32 720942, metadata !82, null, metadata !"bar", metadata !"bar", metadata !"_ZN3barC1Ei", i32 13, metadata !22, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.bar*, i32)* @_ZN3barC1Ei, null, metadata !21, metadata !47, i32 0} ; [ DW_TAG_subprogram ] [line 13] [def] [scope 0] [bar]
+!37 = metadata !{i32 720942, metadata !82, null, metadata !"bar", metadata !"bar", metadata !"_ZN3barC1Ei", i32 13, metadata !22, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.bar*, i32)* @_ZN3barC1Ei, null, metadata !21, null, i32 0} ; [ DW_TAG_subprogram ] [line 13] [def] [scope 0] [bar]
!38 = metadata !{metadata !39}
!39 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
-!40 = metadata !{i32 720942, metadata !82, null, metadata !"bar", metadata !"bar", metadata !"_ZN3barC2Ei", i32 13, metadata !22, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.bar*, i32)* @_ZN3barC2Ei, null, metadata !21, metadata !47, i32 0} ; [ DW_TAG_subprogram ] [line 13] [def] [scope 0] [bar]
+!40 = metadata !{i32 720942, metadata !82, null, metadata !"bar", metadata !"bar", metadata !"_ZN3barC2Ei", i32 13, metadata !22, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.bar*, i32)* @_ZN3barC2Ei, null, metadata !21, null, i32 0} ; [ DW_TAG_subprogram ] [line 13] [def] [scope 0] [bar]
!41 = metadata !{metadata !42}
!42 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
-!43 = metadata !{i32 720942, metadata !82, null, metadata !"baz", metadata !"baz", metadata !"_ZN3bazC1Ei", i32 6, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.baz*, i32)* @_ZN3bazC1Ei, null, metadata !13, metadata !47, i32 0} ; [ DW_TAG_subprogram ] [line 6] [def] [scope 0] [baz]
+!43 = metadata !{i32 720942, metadata !82, null, metadata !"baz", metadata !"baz", metadata !"_ZN3bazC1Ei", i32 6, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.baz*, i32)* @_ZN3bazC1Ei, null, metadata !13, null, i32 0} ; [ DW_TAG_subprogram ] [line 6] [def] [scope 0] [baz]
!44 = metadata !{metadata !45}
!45 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
-!46 = metadata !{i32 720942, metadata !82, null, metadata !"baz", metadata !"baz", metadata !"_ZN3bazC2Ei", i32 6, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.baz*, i32)* @_ZN3bazC2Ei, null, metadata !13, metadata !47, i32 0} ; [ DW_TAG_subprogram ] [line 6] [def] [scope 0] [baz]
-!47 = metadata !{metadata !48}
-!48 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
+!46 = metadata !{i32 720942, metadata !82, null, metadata !"baz", metadata !"baz", metadata !"_ZN3bazC2Ei", i32 6, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.baz*, i32)* @_ZN3bazC2Ei, null, metadata !13, null, i32 0} ; [ DW_TAG_subprogram ] [line 6] [def] [scope 0] [baz]
!49 = metadata !{i32 721153, metadata !29, metadata !"argc", metadata !6, i32 16777232, metadata !12, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!50 = metadata !{i32 16, i32 14, metadata !29, null}
!51 = metadata !{i32 721153, metadata !29, metadata !"argv", metadata !6, i32 33554448, metadata !32, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
diff --git a/test/DebugInfo/X86/DW_AT_byte_size.ll b/test/DebugInfo/X86/DW_AT_byte_size.ll
index 87e242a0bb13..59921bd245c7 100644
--- a/test/DebugInfo/X86/DW_AT_byte_size.ll
+++ b/test/DebugInfo/X86/DW_AT_byte_size.ll
@@ -27,9 +27,9 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!llvm.module.flags = !{!21}
!0 = metadata !{i32 786449, metadata !20, i32 4, metadata !"clang version 3.1 (trunk 150996)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
-!5 = metadata !{i32 786478, metadata !20, metadata !6, metadata !"foo", metadata !"foo", metadata !"_Z3fooP1A", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (%struct.A*)* @_Z3fooP1A, null, null, metadata !14, i32 3} ; [ DW_TAG_subprogram ]
+!5 = metadata !{i32 786478, metadata !20, metadata !6, metadata !"foo", metadata !"foo", metadata !"_Z3fooP1A", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (%struct.A*)* @_Z3fooP1A, null, null, null, i32 3} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !20} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{metadata !9, metadata !10}
@@ -38,8 +38,6 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!11 = metadata !{i32 786434, metadata !20, null, metadata !"A", i32 1, i64 32, i64 32, i32 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_class_type ] [A] [line 1, size 32, align 32, offset 0] [def] [from ]
!12 = metadata !{metadata !13}
!13 = metadata !{i32 786445, metadata !20, metadata !11, metadata !"b", i32 1, i64 32, i64 32, i64 0, i32 0, metadata !9} ; [ DW_TAG_member ]
-!14 = metadata !{metadata !15}
-!15 = metadata !{i32 786468} ; [ DW_TAG_base_type ]
!16 = metadata !{i32 786689, metadata !5, metadata !"a", metadata !6, i32 16777219, metadata !10, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!17 = metadata !{i32 3, i32 13, metadata !5, null}
!18 = metadata !{i32 4, i32 3, metadata !19, null}
diff --git a/test/DebugInfo/X86/DW_AT_linkage_name.ll b/test/DebugInfo/X86/DW_AT_linkage_name.ll
new file mode 100644
index 000000000000..dce234aa9002
--- /dev/null
+++ b/test/DebugInfo/X86/DW_AT_linkage_name.ll
@@ -0,0 +1,116 @@
+; RUN: llc -mtriple=x86_64-apple-macosx %s -o %t -filetype=obj
+; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
+;
+; struct A {
+; A(int i);
+; ~A();
+; };
+;
+; A::~A() {}
+;
+; void foo() {
+; A a(1);
+; }
+;
+; rdar://problem/16362674
+;
+; Test that we do not emit a linkage name for the declaration of a destructor.
+; Test that we do emit a linkage name for a specific instance of it.
+
+; CHECK: DW_TAG_subprogram
+; CHECK: [[A_DTOR:.*]]: DW_TAG_subprogram
+; CHECK: DW_AT_name {{.*}} "~A"
+; CHECK-NOT: DW_AT_MIPS_linkage_name
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_ZN1AD2Ev"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_specification {{.*}}[[A_DTOR]]
+
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%struct.A = type { i8 }
+
+; Function Attrs: nounwind ssp uwtable
+define void @_ZN1AD2Ev(%struct.A* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %struct.A*, align 8
+ store %struct.A* %this, %struct.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.A** %this.addr}, metadata !26), !dbg !28
+ %this1 = load %struct.A** %this.addr
+ ret void, !dbg !29
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind ssp uwtable
+define void @_ZN1AD1Ev(%struct.A* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %struct.A*, align 8
+ store %struct.A* %this, %struct.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.A** %this.addr}, metadata !30), !dbg !31
+ %this1 = load %struct.A** %this.addr
+ call void @_ZN1AD2Ev(%struct.A* %this1), !dbg !32
+ ret void, !dbg !33
+}
+
+; Function Attrs: ssp uwtable
+define void @_Z3foov() #2 {
+entry:
+ %a = alloca %struct.A, align 1
+ call void @llvm.dbg.declare(metadata !{%struct.A* %a}, metadata !34), !dbg !35
+ call void @_ZN1AC1Ei(%struct.A* %a, i32 1), !dbg !35
+ call void @_ZN1AD1Ev(%struct.A* %a), !dbg !36
+ ret void, !dbg !36
+}
+
+declare void @_ZN1AC1Ei(%struct.A*, i32)
+
+attributes #0 = { nounwind ssp uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { ssp uwtable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!23, !24}
+!llvm.ident = !{!25}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !16, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [linkage-name.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"linkage-name.cpp", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"A", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS1A"} ; [ DW_TAG_structure_type ] [A] [line 1, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !6, metadata !12}
+!6 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"", i32 2, metadata !7, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !11, i32 2} ; [ DW_TAG_subprogram ] [line 2] [A]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{null, metadata !9, metadata !10}
+!9 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!10 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!11 = metadata !{i32 786468}
+!12 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"~A", metadata !"~A", metadata !"", i32 3, metadata !13, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !15, i32 3} ; [ DW_TAG_subprogram ] [line 3] [~A]
+!13 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !14, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!14 = metadata !{null, metadata !9}
+!15 = metadata !{i32 786468}
+!16 = metadata !{metadata !17, metadata !18, metadata !19}
+!17 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"~A", metadata !"~A", metadata !"_ZN1AD2Ev", i32 6, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.A*)* @_ZN1AD2Ev, null, metadata !12, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [~A]
+!18 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"~A", metadata !"~A", metadata !"_ZN1AD1Ev", i32 6, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.A*)* @_ZN1AD1Ev, null, metadata !12, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [~A]
+!19 = metadata !{i32 786478, metadata !1, metadata !20, metadata !"foo", metadata !"foo", metadata !"_Z3foov", i32 10, metadata !21, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z3foov, null, null, metadata !2, i32 10} ; [ DW_TAG_subprogram ] [line 10] [def] [foo]
+!20 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [linkage-name.cpp]
+!21 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !22, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!22 = metadata !{null}
+!23 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!24 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!25 = metadata !{metadata !"clang version 3.5.0 "}
+!26 = metadata !{i32 786689, metadata !17, metadata !"this", null, i32 16777216, metadata !27, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!27 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1A]
+!28 = metadata !{i32 0, i32 0, metadata !17, null}
+!29 = metadata !{i32 8, i32 0, metadata !17, null} ; [ DW_TAG_imported_declaration ]
+!30 = metadata !{i32 786689, metadata !18, metadata !"this", null, i32 16777216, metadata !27, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!31 = metadata !{i32 0, i32 0, metadata !18, null}
+!32 = metadata !{i32 6, i32 0, metadata !18, null}
+!33 = metadata !{i32 8, i32 0, metadata !18, null} ; [ DW_TAG_imported_declaration ]
+!34 = metadata !{i32 786688, metadata !19, metadata !"a", metadata !20, i32 11, metadata !"_ZTS1A", i32 0, i32 0} ; [ DW_TAG_auto_variable ] [a] [line 11]
+!35 = metadata !{i32 11, i32 0, metadata !19, null}
+!36 = metadata !{i32 12, i32 0, metadata !19, null}
diff --git a/test/DebugInfo/X86/DW_AT_location-reference.ll b/test/DebugInfo/X86/DW_AT_location-reference.ll
index bdd0e044bf13..f31b0ad3259a 100644
--- a/test/DebugInfo/X86/DW_AT_location-reference.ll
+++ b/test/DebugInfo/X86/DW_AT_location-reference.ll
@@ -1,5 +1,10 @@
-; RUN: llc -O1 -mtriple=x86_64-apple-darwin < %s | FileCheck -check-prefix=DARWIN %s
-; RUN: llc -O1 -mtriple=x86_64-pc-linux-gnu < %s | FileCheck -check-prefix=LINUX %s
+; RUN: llc -O1 -filetype=obj -mtriple=x86_64-apple-darwin < %s > %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+; RUN: llvm-objdump -r %t | FileCheck -check-prefix=DARWIN %s
+; RUN: llc -O1 -filetype=obj -mtriple=x86_64-pc-linux-gnu < %s > %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+; RUN: llvm-objdump -r %t | FileCheck -check-prefix=LINUX %s
+
; PR9493
; Adapted from the original test case in r127757.
; We use 'llc -O1' to induce variable 'x' to live in different locations.
@@ -24,22 +29,31 @@
; }
; // The 'x' variable and its symbol reference location
-; DARWIN: DW_TAG_variable
-; DARWIN-NEXT: ## DW_AT_name
-; DARWIN-NEXT: .long Lset{{[0-9]+}}
-; DARWIN-NEXT: ## DW_AT_decl_file
-; DARWIN-NEXT: ## DW_AT_decl_line
-; DARWIN-NEXT: ## DW_AT_type
-; DARWIN-NEXT: Lset{{[0-9]+}} = Ldebug_loc{{[0-9]+}}-Lsection_debug_loc ## DW_AT_location
-; DARWIN-NEXT: .long Lset{{[0-9]+}}
-
-; LINUX: DW_TAG_variable
-; LINUX-NEXT: # DW_AT_name
-; LINUX-NEXT: # DW_AT_decl_file
-; LINUX-NEXT: # DW_AT_decl_line
-; LINUX-NEXT: # DW_AT_type
-; LINUX-NEXT: .long .Ldebug_loc{{[0-9]+}} # DW_AT_location
+; CHECK: .debug_info contents:
+; CHECK: DW_TAG_variable
+; CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x00000000)
+; CHECK-NEXT: DW_AT_name {{.*}} "x"
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+; CHECK-NEXT: DW_AT_type
+
+; Check that the location contains only 4 ranges - this verifies that the 4th
+; and 5th ranges were successfully merged into a single range.
+; CHECK: .debug_loc contents:
+; CHECK: 0x00000000:
+; CHECK: Beginning address offset:
+; CHECK: Beginning address offset:
+; CHECK: Beginning address offset:
+; CHECK: Beginning address offset:
+; CHECK-NOT: Beginning address offset:
+
+; Check that we have no relocations in Darwin's output.
+; DARWIN-NOT: X86_64_RELOC{{.*}} __debug_loc
+; Check we have a relocation for the debug_loc entry in Linux output.
+; LINUX: RELOCATION RECORDS FOR [.rela.debug_info]
+; LINUX-NOT: RELOCATION RECORDS
+; LINUX: R_X86_64{{.*}} .debug_loc+0
; ModuleID = 'simple.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32"
diff --git a/test/DebugInfo/X86/DW_AT_object_pointer.ll b/test/DebugInfo/X86/DW_AT_object_pointer.ll
index 6e6c3a177ab3..4b9fae8e5af8 100644
--- a/test/DebugInfo/X86/DW_AT_object_pointer.ll
+++ b/test/DebugInfo/X86/DW_AT_object_pointer.ll
@@ -7,7 +7,8 @@
; CHECK: DW_TAG_class_type
; CHECK: DW_AT_object_pointer [DW_FORM_ref4] (cu + 0x{{[0-9a-f]*}} => {[[PARAM:0x[0-9a-f]*]]})
; CHECK: [[PARAM]]: DW_TAG_formal_parameter
-; CHECK-NEXT: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "this")
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "this")
%class.A = type { i32 }
@@ -51,7 +52,7 @@ entry:
!llvm.module.flags = !{!38}
!0 = metadata !{i32 786449, metadata !37, i32 4, metadata !"clang version 3.2 (trunk 163586) (llvm/trunk 163570)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/echristo/debug-tests/bar.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !10, metadata !20}
!5 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"foo", metadata !"foo", metadata !"_Z3fooi", i32 7, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @_Z3fooi, null, null, metadata !1, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [foo]
!6 = metadata !{i32 786473, metadata !37} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/DW_AT_specification.ll b/test/DebugInfo/X86/DW_AT_specification.ll
index 4d7ef4fd36f6..4f45f367448f 100644
--- a/test/DebugInfo/X86/DW_AT_specification.ll
+++ b/test/DebugInfo/X86/DW_AT_specification.ll
@@ -3,10 +3,11 @@
; test that the DW_AT_specification is a back edge in the file.
-; CHECK: DW_TAG_subprogram [{{[0-9]+}}] *
-; CHECK: DW_AT_specification [DW_FORM_ref4] (cu + 0x[[OFFSET:[0-9a-f]*]] => {0x0000[[OFFSET]]})
-; CHECK: 0x0000[[OFFSET]]: DW_TAG_subprogram [{{[0-9]+}}] *
-; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "bar")
+; CHECK: [[BAR_DECL:0x[0-9a-f]*]]: DW_TAG_subprogram
+; CHECK-NEXT: DW_AT_MIPS_linkage_name {{.*}} "_ZN3foo3barEv"
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_specification {{.*}} {[[BAR_DECL]]}
@_ZZN3foo3barEvE1x = constant i32 0, align 4
@@ -19,27 +20,23 @@ entry:
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!28}
-!0 = metadata !{i32 786449, metadata !27, i32 4, metadata !"clang version 3.0 ()", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !18, metadata !18, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !27, i32 4, metadata !"clang version 3.0 ()", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !18, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
-!5 = metadata !{i32 720942, metadata !6, null, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEv", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZN3foo3barEv, null, metadata !11, metadata !16, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [bar]
+!5 = metadata !{i32 720942, metadata !6, null, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEv", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZN3foo3barEv, null, metadata !11, null, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [bar]
!6 = metadata !{i32 720937, metadata !27} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{null, metadata !9}
!9 = metadata !{i32 786447, i32 0, null, i32 0, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !10} ; [ DW_TAG_pointer_type ]
!10 = metadata !{i32 786451, metadata !27, null, metadata !"foo", i32 1, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [foo] [line 1, size 0, align 0, offset 0] [decl] [from ]
-!11 = metadata !{i32 720942, metadata !6, metadata !12, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEv", i32 2, metadata !7, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !14, i32 2} ; [ DW_TAG_subprogram ]
+!11 = metadata !{i32 720942, metadata !6, metadata !12, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEv", i32 2, metadata !7, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 2} ; [ DW_TAG_subprogram ]
!12 = metadata !{i32 720898, metadata !27, null, metadata !"foo", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !13, i32 0, null, null} ; [ DW_TAG_class_type ]
!13 = metadata !{metadata !11}
-!14 = metadata !{metadata !15}
-!15 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
-!16 = metadata !{metadata !17}
-!17 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
!18 = metadata !{metadata !20}
!20 = metadata !{i32 720948, i32 0, metadata !5, metadata !"x", metadata !"x", metadata !"", metadata !6, i32 5, metadata !21, i32 1, i32 1, i32* @_ZZN3foo3barEvE1x, null} ; [ DW_TAG_variable ]
!21 = metadata !{i32 720934, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !22} ; [ DW_TAG_const_type ]
!22 = metadata !{i32 720932, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
!25 = metadata !{i32 6, i32 1, metadata !26, null}
-!26 = metadata !{i32 786443, metadata !5, i32 4, i32 17, metadata !6, i32 0} ; [ DW_TAG_lexical_block ]
+!26 = metadata !{i32 786443, metadata !6, metadata !5, i32 4, i32 17, i32 0} ; [ DW_TAG_lexical_block ]
!27 = metadata !{metadata !"nsNativeAppSupportBase.ii", metadata !"/Users/espindola/mozilla-central/obj-x86_64-apple-darwin11.2.0/toolkit/library"}
!28 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/DW_AT_stmt_list_sec_offset.ll b/test/DebugInfo/X86/DW_AT_stmt_list_sec_offset.ll
index 0c08f23e87ad..f16cbb061bf3 100644
--- a/test/DebugInfo/X86/DW_AT_stmt_list_sec_offset.ll
+++ b/test/DebugInfo/X86/DW_AT_stmt_list_sec_offset.ll
@@ -1,13 +1,16 @@
; RUN: llc -mtriple=i686-w64-mingw32 -o %t -filetype=obj %s
; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s
+; RUN: llc -mtriple=i686-w64-mingw32 -o %t -filetype=obj -dwarf-version=3 %s
+; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s -check-prefix=DWARF3
-; CHECK: DW_AT_stmt_list [DW_FORM_sec_offset]
+; CHECK: DW_AT_stmt_list [DW_FORM_sec_offset]
+; DWARF3: DW_AT_stmt_list [DW_FORM_data4]
;
; generated from:
; clang -g -S -emit-llvm test.c -o test.ll
; int main()
; {
-; return 0;
+; return 0;
; }
; ModuleID = 'test.c'
@@ -29,13 +32,13 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [C:\Projects/test.c] [DW_LANG_C99]
!1 = metadata !{metadata !"test.c", metadata !"C:\5CProjects"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @main, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 2] [main]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [C:\Projects/test.c]
!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!7 = metadata !{metadata !8}
!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
!10 = metadata !{i32 3, i32 0, metadata !4, null}
!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/DW_TAG_friend.ll b/test/DebugInfo/X86/DW_TAG_friend.ll
index 2da962752a63..2facc409e8d1 100644
--- a/test/DebugInfo/X86/DW_TAG_friend.ll
+++ b/test/DebugInfo/X86/DW_TAG_friend.ll
@@ -18,8 +18,8 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!29}
-!0 = metadata !{i32 786449, metadata !28, i32 4, metadata !"clang version 3.1 (trunk 153413) (llvm/trunk 153428)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !28, i32 4, metadata !"clang version 3.1 (trunk 153413) (llvm/trunk 153428)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !17}
!5 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 10, metadata !7, i32 0, i32 1, %class.A* @a, null} ; [ DW_TAG_variable ]
!6 = metadata !{i32 786473, metadata !28} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/aligned_stack_var.ll b/test/DebugInfo/X86/aligned_stack_var.ll
index d733dfda9465..54484acc785c 100644
--- a/test/DebugInfo/X86/aligned_stack_var.ll
+++ b/test/DebugInfo/X86/aligned_stack_var.ll
@@ -28,7 +28,7 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!llvm.module.flags = !{!15}
!0 = metadata !{i32 786449, metadata !14, i32 4, metadata !"clang version 3.2 (trunk 155696:155697) (llvm/trunk 155696)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !14, metadata !6, metadata !"run", metadata !"run", metadata !"_Z3runv", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z3runv, null, null, metadata !1, i32 1} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !14} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/arange.ll b/test/DebugInfo/X86/arange.ll
new file mode 100644
index 000000000000..4eea646968b4
--- /dev/null
+++ b/test/DebugInfo/X86/arange.ll
@@ -0,0 +1,46 @@
+; REQUIRES: object-emission
+
+; RUN: llc -mtriple=x86_64-linux -O0 -filetype=obj -generate-arange-section < %s | llvm-dwarfdump -debug-dump=aranges - | FileCheck %s
+; RUN: llc -mtriple=x86_64-linux -O0 -filetype=obj -generate-arange-section < %s | llvm-readobj --relocations - | FileCheck --check-prefix=OBJ %s
+
+; extern int i;
+; template<int *x>
+; struct foo {
+; };
+;
+; foo<&i> f;
+
+; Check that we only have one arange in this compilation unit (it will be for 'f'), and not an extra one (for 'i' - since it isn't actually defined in this CU)
+
+; CHECK: Address Range Header
+; CHECK-NEXT: [0x
+; CHECK-NOT: [0x
+
+; Check that we have a relocation back to the debug_info section from the debug_aranges section
+; OBJ: debug_aranges
+; OBJ-NEXT: R_X86_64_32 .debug_info 0x0
+
+%struct.foo = type { i8 }
+
+@f = global %struct.foo zeroinitializer, align 1
+@i = external global i32
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!12, !13}
+!llvm.ident = !{!14}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !2, metadata !9, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/simple.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"simple.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"foo<&i>", i32 3, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, metadata !5, metadata !"_ZTS3fooIXadL_Z1iEEE"} ; [ DW_TAG_structure_type ] [foo<&i>] [line 3, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !6}
+!6 = metadata !{i32 786480, null, metadata !"x", metadata !7, i32* @i, null, i32 0, i32 0} ; [ DW_TAG_template_value_parameter ]
+!7 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !8} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786484, i32 0, null, metadata !"f", metadata !"f", metadata !"", metadata !11, i32 6, metadata !4, i32 0, i32 1, %struct.foo* @f, null} ; [ DW_TAG_variable ] [f] [line 6] [def]
+!11 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/simple.cpp]
+!12 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!13 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!14 = metadata !{metadata !"clang version 3.5 "}
diff --git a/test/DebugInfo/X86/arguments.ll b/test/DebugInfo/X86/arguments.ll
index 1d51049a5f6b..989e4fff484a 100644
--- a/test/DebugInfo/X86/arguments.ll
+++ b/test/DebugInfo/X86/arguments.ll
@@ -15,13 +15,16 @@
; CHECK: debug_info contents
; CHECK: DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_MIPS_linkage_name{{.*}}"_Z4func3fooS_"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name{{.*}}"_Z4func3fooS_"
; CHECK-NOT: NULL
; CHECK: DW_TAG_formal_parameter
-; CHECK-NEXT: DW_AT_name{{.*}}"f"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}"f"
; CHECK-NOT: NULL
; CHECK: DW_TAG_formal_parameter
-; CHECK-NEXT: DW_AT_name{{.*}}"g"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}"g"
%struct.foo = type { i32 }
@@ -48,7 +51,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/blaikie/dev/scratch/scratch.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"scratch.cpp", metadata !"/usr/local/google/home/blaikie/dev/scratch"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"func", metadata !"func", metadata !"_Z4func3fooS_", i32 6, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.foo*, %struct.foo*)* @_Z4func3fooS_, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [func]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/blaikie/dev/scratch/scratch.cpp]
diff --git a/test/DebugInfo/X86/array.ll b/test/DebugInfo/X86/array.ll
new file mode 100644
index 000000000000..dc6c7a406507
--- /dev/null
+++ b/test/DebugInfo/X86/array.ll
@@ -0,0 +1,101 @@
+; ModuleID = 'array.c'
+;
+; From (clang -g -c -O1):
+;
+; void f(int* p) {
+; p[0] = 42;
+; }
+;
+; int main(int argc, char** argv) {
+; int array[4] = { 0, 1, 2, 3 };
+; f(array);
+; return array[0];
+; }
+;
+; RUN: llc -filetype=asm %s -o - | FileCheck %s
+; Test that we only emit register-indirect locations for the array array.
+; rdar://problem/14874886
+;
+; CHECK: ##DEBUG_VALUE: main:array <- [R{{.*}}+0]
+; CHECK-NOT: ##DEBUG_VALUE: main:array <- R{{.*}}
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+@main.array = private unnamed_addr constant [4 x i32] [i32 0, i32 1, i32 2, i32 3], align 16
+
+; Function Attrs: nounwind ssp uwtable
+define void @f(i32* nocapture %p) #0 {
+ tail call void @llvm.dbg.value(metadata !{i32* %p}, i64 0, metadata !11), !dbg !28
+ store i32 42, i32* %p, align 4, !dbg !29, !tbaa !30
+ ret void, !dbg !34
+}
+
+; Function Attrs: nounwind ssp uwtable
+define i32 @main(i32 %argc, i8** nocapture readnone %argv) #0 {
+ %array = alloca [4 x i32], align 16
+ tail call void @llvm.dbg.value(metadata !{i32 %argc}, i64 0, metadata !19), !dbg !35
+ tail call void @llvm.dbg.value(metadata !{i8** %argv}, i64 0, metadata !20), !dbg !35
+ tail call void @llvm.dbg.value(metadata !{[4 x i32]* %array}, i64 0, metadata !21), !dbg !36
+ %1 = bitcast [4 x i32]* %array to i8*, !dbg !36
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([4 x i32]* @main.array to i8*), i64 16, i32 16, i1 false), !dbg !36
+ tail call void @llvm.dbg.value(metadata !{[4 x i32]* %array}, i64 0, metadata !21), !dbg !36
+ %2 = getelementptr inbounds [4 x i32]* %array, i64 0, i64 0, !dbg !37
+ call void @f(i32* %2), !dbg !37
+ tail call void @llvm.dbg.value(metadata !{[4 x i32]* %array}, i64 0, metadata !21), !dbg !36
+ %3 = load i32* %2, align 16, !dbg !38, !tbaa !30
+ ret i32 %3, !dbg !38
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
+
+attributes #0 = { nounwind ssp uwtable }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!25, !26}
+!llvm.ident = !{!27}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/array.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"array.c", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !12}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f", metadata !"f", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32*)* @f, null, null, metadata !10, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [f]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/array.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8}
+!8 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!10 = metadata !{metadata !11}
+!11 = metadata !{i32 786689, metadata !4, metadata !"p", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [p] [line 1]
+!12 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 5, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32, i8**)* @main, null, null, metadata !18, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [main]
+!13 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !14, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!14 = metadata !{metadata !9, metadata !9, metadata !15}
+!15 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!16 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !17} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from char]
+!17 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!18 = metadata !{metadata !19, metadata !20, metadata !21}
+!19 = metadata !{i32 786689, metadata !12, metadata !"argc", metadata !5, i32 16777221, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [argc] [line 5]
+!20 = metadata !{i32 786689, metadata !12, metadata !"argv", metadata !5, i32 33554437, metadata !15, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [argv] [line 5]
+!21 = metadata !{i32 786688, metadata !12, metadata !"array", metadata !5, i32 6, metadata !22, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [array] [line 6]
+!22 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 128, i64 32, i32 0, i32 0, metadata !9, metadata !23, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 32, offset 0] [from int]
+!23 = metadata !{metadata !24}
+!24 = metadata !{i32 786465, i64 0, i64 4} ; [ DW_TAG_subrange_type ] [0, 3]
+!25 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!26 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!27 = metadata !{metadata !"clang version 3.5.0 "}
+!28 = metadata !{i32 1, i32 0, metadata !4, null}
+!29 = metadata !{i32 2, i32 0, metadata !4, null}
+!30 = metadata !{metadata !31, metadata !31, i64 0}
+!31 = metadata !{metadata !"int", metadata !32, i64 0}
+!32 = metadata !{metadata !"omnipotent char", metadata !33, i64 0}
+!33 = metadata !{metadata !"Simple C/C++ TBAA"}
+!34 = metadata !{i32 3, i32 0, metadata !4, null}
+!35 = metadata !{i32 5, i32 0, metadata !12, null}
+!36 = metadata !{i32 6, i32 0, metadata !12, null}
+!37 = metadata !{i32 7, i32 0, metadata !12, null}
+!38 = metadata !{i32 8, i32 0, metadata !12, null} ; [ DW_TAG_imported_declaration ]
diff --git a/test/DebugInfo/X86/array2.ll b/test/DebugInfo/X86/array2.ll
new file mode 100644
index 000000000000..2dc2af325b51
--- /dev/null
+++ b/test/DebugInfo/X86/array2.ll
@@ -0,0 +1,107 @@
+; ModuleID = 'array.c'
+;
+; From (clang -g -c -O0):
+;
+; void f(int* p) {
+; p[0] = 42;
+; }
+;
+; int main(int argc, char** argv) {
+; int array[4] = { 0, 1, 2, 3 };
+; f(array);
+; return array[0];
+; }
+;
+; RUN: opt %s -O2 -S -o - | FileCheck %s
+; Test that we do not lower dbg.declares for arrays.
+;
+; CHECK: define i32 @main
+; CHECK: call void @llvm.dbg.value
+; CHECK: call void @llvm.dbg.value
+; CHECK: call void @llvm.dbg.declare
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+@main.array = private unnamed_addr constant [4 x i32] [i32 0, i32 1, i32 2, i32 3], align 16
+
+; Function Attrs: nounwind ssp uwtable
+define void @f(i32* %p) #0 {
+entry:
+ %p.addr = alloca i32*, align 8
+ store i32* %p, i32** %p.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i32** %p.addr}, metadata !19), !dbg !20
+ %0 = load i32** %p.addr, align 8, !dbg !21
+ %arrayidx = getelementptr inbounds i32* %0, i64 0, !dbg !21
+ store i32 42, i32* %arrayidx, align 4, !dbg !21
+ ret void, !dbg !22
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind ssp uwtable
+define i32 @main(i32 %argc, i8** %argv) #0 {
+entry:
+ %retval = alloca i32, align 4
+ %argc.addr = alloca i32, align 4
+ %argv.addr = alloca i8**, align 8
+ %array = alloca [4 x i32], align 16
+ store i32 0, i32* %retval
+ store i32 %argc, i32* %argc.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %argc.addr}, metadata !23), !dbg !24
+ store i8** %argv, i8*** %argv.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i8*** %argv.addr}, metadata !25), !dbg !24
+ call void @llvm.dbg.declare(metadata !{[4 x i32]* %array}, metadata !26), !dbg !30
+ %0 = bitcast [4 x i32]* %array to i8*, !dbg !30
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([4 x i32]* @main.array to i8*), i64 16, i32 16, i1 false), !dbg !30
+ %arraydecay = getelementptr inbounds [4 x i32]* %array, i32 0, i32 0, !dbg !31
+ call void @f(i32* %arraydecay), !dbg !31
+ %arrayidx = getelementptr inbounds [4 x i32]* %array, i32 0, i64 0, !dbg !32
+ %1 = load i32* %arrayidx, align 4, !dbg !32
+ ret i32 %1, !dbg !32
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #2
+
+attributes #0 = { nounwind ssp uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!16, !17}
+!llvm.ident = !{!18}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [array.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"array.c", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !10}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f", metadata !"f", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32*)* @f, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [f]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [array.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8}
+!8 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!10 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 5, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32, i8**)* @main, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [main]
+!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = metadata !{metadata !9, metadata !9, metadata !13}
+!13 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !14} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!14 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !15} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from char]
+!15 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!16 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!17 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!18 = metadata !{metadata !"clang version 3.5.0 "}
+!19 = metadata !{i32 786689, metadata !4, metadata !"p", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [p] [line 1]
+!20 = metadata !{i32 1, i32 0, metadata !4, null}
+!21 = metadata !{i32 2, i32 0, metadata !4, null}
+!22 = metadata !{i32 3, i32 0, metadata !4, null}
+!23 = metadata !{i32 786689, metadata !10, metadata !"argc", metadata !5, i32 16777221, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [argc] [line 5]
+!24 = metadata !{i32 5, i32 0, metadata !10, null}
+!25 = metadata !{i32 786689, metadata !10, metadata !"argv", metadata !5, i32 33554437, metadata !13, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [argv] [line 5]
+!26 = metadata !{i32 786688, metadata !10, metadata !"array", metadata !5, i32 6, metadata !27, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [array] [line 6]
+!27 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 128, i64 32, i32 0, i32 0, metadata !9, metadata !28, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 32, offset 0] [from int]
+!28 = metadata !{metadata !29}
+!29 = metadata !{i32 786465, i64 0, i64 4} ; [ DW_TAG_subrange_type ] [0, 3]
+!30 = metadata !{i32 6, i32 0, metadata !10, null}
+!31 = metadata !{i32 7, i32 0, metadata !10, null}
+!32 = metadata !{i32 8, i32 0, metadata !10, null} ; [ DW_TAG_imported_declaration ]
diff --git a/test/DebugInfo/X86/block-capture.ll b/test/DebugInfo/X86/block-capture.ll
index 2f966a71d717..e842afe9446a 100644
--- a/test/DebugInfo/X86/block-capture.ll
+++ b/test/DebugInfo/X86/block-capture.ll
@@ -1,11 +1,18 @@
; RUN: llc -mtriple=x86_64-apple-darwin %s -o %t -filetype=obj
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-darwin %s -o %t -filetype=obj -dwarf-version=3
+; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s -check-prefix=DWARF3
; Checks that we emit debug info for the block variable declare.
-; CHECK: DW_TAG_subprogram [3]
-; CHECK: DW_TAG_variable [5]
-; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[{{.*}}] = "block")
-; CHECK: DW_AT_location [DW_FORM_sec_offset] ({{.*}})
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_TAG_variable
+; CHECK: DW_AT_location [DW_FORM_sec_offset]
+; CHECK: DW_AT_name {{.*}} "block"
+
+; DWARF3: DW_TAG_subprogram
+; DWARF3: DW_TAG_variable
+; DWARF3: DW_AT_location [DW_FORM_data4]
+; DWARF3: DW_AT_name {{.*}} "block"
%struct.__block_descriptor = type { i64, i64 }
%struct.__block_literal_generic = type { i8*, i32, i32, i8*, %struct.__block_descriptor* }
@@ -63,9 +70,9 @@ declare i32 @__objc_personality_v0(...)
!llvm.module.flags = !{!35, !36, !37, !38, !64}
!0 = metadata !{i32 786449, metadata !63, i32 16, metadata !"clang version 3.1 (trunk 151227)", i1 false, metadata !"", i32 2, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !28, metadata !31, metadata !34}
-!5 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"foo", metadata !"foo", metadata !"", i32 5, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, metadata !26, i32 5} ; [ DW_TAG_subprogram ]
+!5 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"foo", metadata !"foo", metadata !"", i32 5, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, null, i32 5} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !63} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{null, metadata !9}
@@ -86,15 +93,13 @@ declare i32 @__objc_personality_v0(...)
!23 = metadata !{i32 786445, metadata !63, metadata !6, metadata !"reserved", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !24} ; [ DW_TAG_member ]
!24 = metadata !{i32 786468, null, null, metadata !"long unsigned int", i32 0, i64 64, i64 64, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
!25 = metadata !{i32 786445, metadata !63, metadata !6, metadata !"Size", i32 0, i64 64, i64 64, i64 64, i32 0, metadata !24} ; [ DW_TAG_member ]
-!26 = metadata !{metadata !27}
-!27 = metadata !{i32 786468} ; [ DW_TAG_base_type ]
-!28 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"__foo_block_invoke_0", metadata !"__foo_block_invoke_0", metadata !"", i32 7, metadata !29, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i8*)* @__foo_block_invoke_0, null, null, metadata !26, i32 7} ; [ DW_TAG_subprogram ]
+!28 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"__foo_block_invoke_0", metadata !"__foo_block_invoke_0", metadata !"", i32 7, metadata !29, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i8*)* @__foo_block_invoke_0, null, null, null, i32 7} ; [ DW_TAG_subprogram ]
!29 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !30, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!30 = metadata !{null, metadata !14}
-!31 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"__copy_helper_block_", metadata !"__copy_helper_block_", metadata !"", i32 10, metadata !32, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, metadata !26, i32 10} ; [ DW_TAG_subprogram ]
+!31 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"__copy_helper_block_", metadata !"__copy_helper_block_", metadata !"", i32 10, metadata !32, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, null, i32 10} ; [ DW_TAG_subprogram ]
!32 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !33, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!33 = metadata !{null, metadata !14, metadata !14}
-!34 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"__destroy_helper_block_", metadata !"__destroy_helper_block_", metadata !"", i32 10, metadata !29, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, metadata !26, i32 10} ; [ DW_TAG_subprogram ]
+!34 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"__destroy_helper_block_", metadata !"__destroy_helper_block_", metadata !"", i32 10, metadata !29, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, null, i32 10} ; [ DW_TAG_subprogram ]
!35 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
!36 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
!37 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
@@ -113,15 +118,16 @@ declare i32 @__objc_personality_v0(...)
!50 = metadata !{i32 786445, metadata !63, metadata !6, metadata !"block", i32 7, i64 64, i64 64, i64 256, i32 0, metadata !9} ; [ DW_TAG_member ]
!51 = metadata !{i32 7, i32 18, metadata !28, null}
!52 = metadata !{i32 7, i32 19, metadata !28, null}
-!53 = metadata !{i32 786688, metadata !28, metadata !"block", metadata !6, i32 5, metadata !9, i32 0, i32 0, i64 1, i64 32} ; [ DW_TAG_auto_variable ]
+!53 = metadata !{i32 786688, metadata !28, metadata !"block", metadata !6, i32 5, metadata !9, i32 0, i32 0, metadata !65} ; [ DW_TAG_auto_variable ]
!54 = metadata !{i32 5, i32 27, metadata !28, null}
!55 = metadata !{i32 8, i32 22, metadata !56, null}
-!56 = metadata !{i32 786443, metadata !57, i32 7, i32 26, metadata !6, i32 2} ; [ DW_TAG_lexical_block ]
-!57 = metadata !{i32 786443, metadata !28, i32 7, i32 19, metadata !6, i32 1} ; [ DW_TAG_lexical_block ]
+!56 = metadata !{i32 786443, metadata !6, metadata !57, i32 7, i32 26, i32 2} ; [ DW_TAG_lexical_block ]
+!57 = metadata !{i32 786443, metadata !6, metadata !28, i32 7, i32 19, i32 1} ; [ DW_TAG_lexical_block ]
!58 = metadata !{i32 10, i32 20, metadata !59, null}
-!59 = metadata !{i32 786443, metadata !60, i32 9, i32 35, metadata !6, i32 4} ; [ DW_TAG_lexical_block ]
-!60 = metadata !{i32 786443, metadata !57, i32 9, i32 35, metadata !6, i32 3} ; [ DW_TAG_lexical_block ]
+!59 = metadata !{i32 786443, metadata !6, metadata !60, i32 9, i32 35, i32 4} ; [ DW_TAG_lexical_block ]
+!60 = metadata !{i32 786443, metadata !6, metadata !57, i32 9, i32 35, i32 3} ; [ DW_TAG_lexical_block ]
!61 = metadata !{i32 10, i32 21, metadata !28, null}
!62 = metadata !{i32 9, i32 20, metadata !56, null}
!63 = metadata !{metadata !"foo.m", metadata !"/Users/echristo"}
!64 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!65 = metadata !{i64 1, i64 32}
diff --git a/test/DebugInfo/X86/byvalstruct.ll b/test/DebugInfo/X86/byvalstruct.ll
index 3dea8632a702..d787ef39c36c 100644
--- a/test/DebugInfo/X86/byvalstruct.ll
+++ b/test/DebugInfo/X86/byvalstruct.ll
@@ -6,7 +6,8 @@
; CHECK: DW_TAG_formal_parameter
; CHECK: DW_TAG_formal_parameter
; CHECK: DW_TAG_formal_parameter
-; CHECK-NEXT: DW_AT_name {{.*}} "info"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "info"
;
; generated from
;
@@ -88,7 +89,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 17, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 2, metadata !2, metadata !3, metadata !6, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/t.mm] [DW_LANG_ObjC_plus_plus]
!1 = metadata !{metadata !"t.mm", metadata !""}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !1, metadata !5, metadata !"Bitmap", i32 8, i64 8, i64 8, i32 0, i32 512, null, metadata !2, i32 17, null, null, null} ; [ DW_TAG_structure_type ] [Bitmap] [line 8, size 8, align 8, offset 0] [def] [from ]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/t.mm]
diff --git a/test/DebugInfo/X86/c-type-units.ll b/test/DebugInfo/X86/c-type-units.ll
new file mode 100644
index 000000000000..431b0295148e
--- /dev/null
+++ b/test/DebugInfo/X86/c-type-units.ll
@@ -0,0 +1,29 @@
+; REQUIRES: object-emission
+
+; RUN: llc -o - %s -filetype=obj -O0 -generate-dwarf-pub-sections=Disable -generate-type-units -mtriple=x86_64-unknown-linux-gnu | llvm-dwarfdump -debug-dump=types - | FileCheck %s
+
+; struct foo {
+; } f;
+
+; no known LLVM frontends produce appropriate unique identifiers for C types,
+; so we don't produce type units for them
+; CHECK-NOT: DW_TAG_type_unit
+
+%struct.foo = type {}
+
+@f = common global %struct.foo zeroinitializer, align 1
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/simple.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"simple.c", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786484, i32 0, null, metadata !"f", metadata !"f", metadata !"", metadata !5, i32 2, metadata !6, i32 0, i32 1, %struct.foo* @f, null} ; [ DW_TAG_variable ] [f] [line 2] [def]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/simple.c]
+!6 = metadata !{i32 786451, metadata !1, null, metadata !"foo", i32 1, i64 0, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [foo] [line 1, size 0, align 8, offset 0] [def] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5 "}
diff --git a/test/DebugInfo/X86/coff_debug_info_type.ll b/test/DebugInfo/X86/coff_debug_info_type.ll
new file mode 100644
index 000000000000..a1051c39bffd
--- /dev/null
+++ b/test/DebugInfo/X86/coff_debug_info_type.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mtriple=i686-pc-mingw32 -filetype=asm -O0 < %s | FileCheck %s
+; RUN: llc -mtriple=i686-pc-cygwin -filetype=asm -O0 < %s | FileCheck %s
+; RUN: llc -mtriple=i686-w64-mingw32 -filetype=asm -O0 < %s | FileCheck %s
+; CHECK: .section .debug_info
+
+; RUN: llc -mtriple=i686-pc-win32 -filetype=asm -O0 < %s | FileCheck -check-prefix=WIN32 %s
+; WIN32: .section .debug$S,"rnd"
+
+; RUN: llc -mtriple=i686-pc-win32 -filetype=null -O0 < %s
+
+; generated from:
+; clang -g -S -emit-llvm test.c -o test.ll
+; int main()
+; {
+; return 0;
+; }
+
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ ret i32 0, !dbg !10
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [C:\Projects/test.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"test.c", metadata !"C:\5CProjects"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @main, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 2] [main]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [C:\Projects/test.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
+!10 = metadata !{i32 3, i32 0, metadata !4, null}
+!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/coff_relative_names.ll b/test/DebugInfo/X86/coff_relative_names.ll
index 4cc38a63bebf..3b4854e733df 100644
--- a/test/DebugInfo/X86/coff_relative_names.ll
+++ b/test/DebugInfo/X86/coff_relative_names.ll
@@ -10,10 +10,6 @@
; return 0;
; }
-; ModuleID = 'test.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S32"
-target triple = "i686-pc-win32"
-
; Function Attrs: nounwind
define i32 @main() #0 {
entry:
@@ -29,7 +25,7 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [C:\Projects/test.c] [DW_LANG_C99]
!1 = metadata !{metadata !"test.c", metadata !"C:\5CProjects"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @main, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 2] [main]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [C:\Projects/test.c]
diff --git a/test/DebugInfo/X86/concrete_out_of_line.ll b/test/DebugInfo/X86/concrete_out_of_line.ll
index 4a152963a144..40300de793d5 100644
--- a/test/DebugInfo/X86/concrete_out_of_line.ll
+++ b/test/DebugInfo/X86/concrete_out_of_line.ll
@@ -1,5 +1,4 @@
-; RUN: llc -mtriple=x86_64-linux %s -o %t -filetype=obj
-; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
+; RUN: llc -mtriple=x86_64-linux < %s -filetype=obj | llvm-dwarfdump -debug-dump=info - | FileCheck %s
; test that we add DW_AT_inline even when we only have concrete out of line
; instances.
@@ -7,15 +6,57 @@
; first check that we have a TAG_subprogram at a given offset and it has
; AT_inline.
-; CHECK: 0x0000011c: DW_TAG_subprogram [17]
-; CHECK-NEXT: DW_AT_specification
+; CHECK: DW_TAG_class_type
+; CHECK: DW_TAG_subprogram
+; CHECK: [[ASSIGN_DECL:0x........]]: DW_TAG_subprogram
+
+; CHECK: DW_TAG_class_type
+; CHECK: [[RELEASE_DECL:0x........]]: DW_TAG_subprogram
+; CHECK: [[DTOR_DECL:0x........]]: DW_TAG_subprogram
+
+; CHECK: [[D2_ABS:.*]]: DW_TAG_subprogram
+; CHECK-NEXT: DW_AT_{{.*}}linkage_name {{.*}}D2
+; CHECK-NEXT: DW_AT_specification {{.*}} {[[DTOR_DECL]]}
; CHECK-NEXT: DW_AT_inline
+; CHECK-NOT: DW_AT
+; CHECK: DW_TAG
+; CHECK: [[D1_ABS:.*]]: DW_TAG_subprogram
+; CHECK-NEXT: DW_AT_{{.*}}linkage_name {{.*}}D1
+; CHECK-NEXT: DW_AT_specification {{.*}} {[[DTOR_DECL]]}
+; CHECK-NEXT: DW_AT_inline
+; CHECK-NOT: DW_AT
+; CHECK: [[D1_THIS_ABS:.*]]: DW_TAG_formal_parameter
+; CHECK: [[RELEASE:0x........]]: DW_TAG_subprogram
+; CHECK: DW_AT_specification {{.*}} {[[RELEASE_DECL]]}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: NULL
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_lexical_block
+; CHECK-NOT: NULL
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NEXT: DW_AT_abstract_origin {{.*}} {[[ASSIGN:0x........]]}
+; CHECK-NOT: NULL
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NEXT: DW_AT_abstract_origin {{.*}} {[[D1_ABS]]}
+; CHECK-NOT: NULL
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NEXT: DW_AT_abstract_origin {{.*}} {[[D2_ABS]]}
; and then that a TAG_subprogram refers to it with AT_abstract_origin.
-; CHECK: 0x0000015d: DW_TAG_subprogram [19]
-; CHECK-NEXT: DW_AT_abstract_origin [DW_FORM_ref4] (cu + 0x011c => {0x0000011c})
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[D1_ABS]]}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[D1_THIS_ABS]]}
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NEXT: DW_AT_abstract_origin {{.*}} {[[D2_ABS]]}
+
define i32 @_ZN17nsAutoRefCnt7ReleaseEv() {
entry:
@@ -35,8 +76,8 @@ declare void @_Z8moz_freePv(i8*)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!60}
-!0 = metadata !{i32 786449, metadata !59, i32 4, metadata !"clang version 3.1 ()", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !47, metadata !47, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !59, i32 4, metadata !"clang version 3.1 ()", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !47, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !23, metadata !27, metadata !31}
!5 = metadata !{i32 720942, metadata !6, null, metadata !"Release", metadata !"Release", metadata !"_ZN17nsAutoRefCnt7ReleaseEv", i32 14, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32* null, null, metadata !12, metadata !20, i32 14} ; [ DW_TAG_subprogram ] [line 14] [def] [Release]
!6 = metadata !{i32 720937, metadata !59} ; [ DW_TAG_file_type ]
@@ -51,7 +92,7 @@ declare void @_Z8moz_freePv(i8*)
!15 = metadata !{i32 720942, metadata !6, metadata !13, metadata !"~nsAutoRefCnt", metadata !"~nsAutoRefCnt", metadata !"", i32 12, metadata !16, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, metadata !18, i32 12} ; [ DW_TAG_subprogram ]
!16 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !17, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!17 = metadata !{null, metadata !10}
-!18 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
+!18 = metadata !{}
!20 = metadata !{metadata !22}
!22 = metadata !{i32 786689, metadata !5, metadata !"this", metadata !6, i32 16777230, metadata !10, i32 64, i32 0} ; [ DW_TAG_arg_variable ]
!23 = metadata !{i32 720942, metadata !6, null, metadata !"~nsAutoRefCnt", metadata !"~nsAutoRefCnt", metadata !"_ZN17nsAutoRefCntD1Ev", i32 18, metadata !16, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32* null, null, metadata !15, metadata !24, i32 18} ; [ DW_TAG_subprogram ] [line 18] [def] [~nsAutoRefCnt]
diff --git a/test/DebugInfo/X86/cu-ranges-odr.ll b/test/DebugInfo/X86/cu-ranges-odr.ll
new file mode 100644
index 000000000000..c42a9085da56
--- /dev/null
+++ b/test/DebugInfo/X86/cu-ranges-odr.ll
@@ -0,0 +1,96 @@
+; RUN: llc -split-dwarf=Enable -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
+; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s
+; RUN: llvm-readobj --relocations %t | FileCheck --check-prefix=CHECK-RELOCS %s
+
+; From:
+; class A {
+; public:
+; A(int i = 0) : a(i) {}
+; private:
+; int a;
+; };
+;
+; A a;
+
+; With function sections enabled make sure that we have a DW_AT_ranges attribute.
+; CHECK: DW_AT_ranges
+
+; Check that we have a relocation against the .debug_ranges section.
+; CHECK-RELOCS: R_X86_64_32 .debug_ranges 0x0
+
+%class.A = type { i32 }
+
+@a = global %class.A zeroinitializer, align 4
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
+
+define internal void @__cxx_global_var_init() section ".text.startup" {
+entry:
+ call void @_ZN1AC2Ei(%class.A* @a, i32 0), !dbg !26
+ ret void, !dbg !26
+}
+
+; Function Attrs: nounwind uwtable
+define linkonce_odr void @_ZN1AC2Ei(%class.A* %this, i32 %i) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ %i.addr = alloca i32, align 4
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !27), !dbg !29
+ store i32 %i, i32* %i.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %i.addr}, metadata !30), !dbg !31
+ %this1 = load %class.A** %this.addr
+ %a = getelementptr inbounds %class.A* %this1, i32 0, i32 0, !dbg !31
+ %0 = load i32* %i.addr, align 4, !dbg !31
+ store i32 %0, i32* %a, align 4, !dbg !31
+ ret void, !dbg !31
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+define internal void @_GLOBAL__I_a() section ".text.startup" {
+entry:
+ call void @__cxx_global_var_init(), !dbg !32
+ ret void, !dbg !32
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!23, !24}
+!llvm.ident = !{!25}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 (trunk 199923) (llvm/trunk 199940)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !13, metadata !21, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/baz.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"baz.cpp", metadata !"/usr/local/google/home/echristo/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786434, metadata !1, null, metadata !"A", i32 1, i64 32, i64 32, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS1A"} ; [ DW_TAG_class_type ] [A] [line 1, size 32, align 32, offset 0] [def] [from ]
+!5 = metadata !{metadata !6, metadata !8}
+!6 = metadata !{i32 786445, metadata !1, metadata !"_ZTS1A", metadata !"a", i32 5, i64 32, i64 32, i64 0, i32 1, metadata !7} ; [ DW_TAG_member ] [a] [line 5, size 32, align 32, offset 0] [private] [from int]
+!7 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!8 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"", i32 3, metadata !9, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !12, i32 3} ; [ DW_TAG_subprogram ] [line 3] [A]
+!9 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !10, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!10 = metadata !{null, metadata !11, metadata !7}
+!11 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!12 = metadata !{i32 786468}
+!13 = metadata !{metadata !14, metadata !18, metadata !19}
+!14 = metadata !{i32 786478, metadata !1, metadata !15, metadata !"__cxx_global_var_init", metadata !"__cxx_global_var_init", metadata !"", i32 8, metadata !16, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @__cxx_global_var_init, null, null, metadata !2, i32 8} ; [ DW_TAG_subprogram ] [line 8] [local] [def] [__cxx_global_var_init]
+!15 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/tmp/baz.cpp]
+!16 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !17, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!17 = metadata !{null}
+!18 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"_ZN1AC2Ei", i32 3, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.A*, i32)* @_ZN1AC2Ei, null, metadata !8, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [A]
+!19 = metadata !{i32 786478, metadata !1, metadata !15, metadata !"", metadata !"", metadata !"_GLOBAL__I_a", i32 3, metadata !20, i1 true, i1 true, i32 0, i32 0, null, i32 64, i1 false, void ()* @_GLOBAL__I_a, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [local] [def]
+!20 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!21 = metadata !{metadata !22}
+!22 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !15, i32 8, metadata !4, i32 0, i32 1, %class.A* @a, null} ; [ DW_TAG_variable ] [a] [line 8] [def]
+!23 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!24 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!25 = metadata !{metadata !"clang version 3.5 (trunk 199923) (llvm/trunk 199940)"}
+!26 = metadata !{i32 8, i32 0, metadata !14, null} ; [ DW_TAG_imported_declaration ]
+!27 = metadata !{i32 786689, metadata !18, metadata !"this", null, i32 16777216, metadata !28, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!28 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1A]
+!29 = metadata !{i32 0, i32 0, metadata !18, null}
+!30 = metadata !{i32 786689, metadata !18, metadata !"i", metadata !15, i32 33554435, metadata !7, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [i] [line 3]
+!31 = metadata !{i32 3, i32 0, metadata !18, null}
+!32 = metadata !{i32 3, i32 0, metadata !19, null}
diff --git a/test/DebugInfo/X86/cu-ranges.ll b/test/DebugInfo/X86/cu-ranges.ll
new file mode 100644
index 000000000000..405a498155f5
--- /dev/null
+++ b/test/DebugInfo/X86/cu-ranges.ll
@@ -0,0 +1,73 @@
+; RUN: llc -split-dwarf=Enable -O0 %s -function-sections -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
+; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck --check-prefix=FUNCTION-SECTIONS %s
+; RUN: llvm-readobj --relocations %t | FileCheck --check-prefix=FUNCTION-SECTIONS-RELOCS %s
+
+; RUN: llc -split-dwarf=Enable -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
+; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck --check-prefix=NO-FUNCTION-SECTIONS %s
+
+; From:
+; int foo (int a) {
+; return a+1;
+; }
+; int bar (int b) {
+; return b+2;
+; }
+
+; With function sections enabled make sure that we have a DW_AT_ranges attribute.
+; FUNCTION-SECTIONS: DW_AT_ranges
+
+; Check that we have a relocation against the .debug_ranges section.
+; FUNCTION-SECTIONS-RELOCS: R_X86_64_32 .debug_ranges 0x0
+
+; Without function sections enabled make sure that we have no DW_AT_ranges attribute.
+; NO-FUNCTION-SECTIONS-NOT: DW_AT_ranges
+
+; Function Attrs: nounwind uwtable
+define i32 @foo(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %a.addr}, metadata !13), !dbg !14
+ %0 = load i32* %a.addr, align 4, !dbg !14
+ %add = add nsw i32 %0, 1, !dbg !14
+ ret i32 %add, !dbg !14
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind uwtable
+define i32 @bar(i32 %b) #0 {
+entry:
+ %b.addr = alloca i32, align 4
+ store i32 %b, i32* %b.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %b.addr}, metadata !15), !dbg !16
+ %0 = load i32* %b.addr, align 4, !dbg !16
+ %add = add nsw i32 %0, 2, !dbg !16
+ ret i32 %add, !dbg !16
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 (trunk 204164) (llvm/trunk 204183)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/z.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"z.c", metadata !"/usr/local/google/home/echristo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !9}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/z.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @bar, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [bar]
+!10 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!12 = metadata !{metadata !"clang version 3.5.0 (trunk 204164) (llvm/trunk 204183)"}
+!13 = metadata !{i32 786689, metadata !4, metadata !"a", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [a] [line 1]
+!14 = metadata !{i32 1, i32 0, metadata !4, null}
+!15 = metadata !{i32 786689, metadata !9, metadata !"b", metadata !5, i32 16777218, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [b] [line 2]
+!16 = metadata !{i32 2, i32 0, metadata !9, null}
diff --git a/test/DebugInfo/X86/data_member_location.ll b/test/DebugInfo/X86/data_member_location.ll
index 1adddb97be25..1c76258ec205 100644
--- a/test/DebugInfo/X86/data_member_location.ll
+++ b/test/DebugInfo/X86/data_member_location.ll
@@ -1,4 +1,5 @@
; RUN: llc -mtriple=x86_64-linux -O0 -o - -filetype=obj < %s | llvm-dwarfdump -debug-dump=info -| FileCheck %s
+; RUN: llc -mtriple=x86_64-linux -dwarf-version=2 -O0 -o - -filetype=obj < %s | llvm-dwarfdump -debug-dump=info -| FileCheck -check-prefix=DWARF2 %s
; Generated from Clang with the following source:
;
@@ -17,6 +18,14 @@
; CHECK-NOT: DW_TAG
; CHECK: DW_AT_data_member_location {{.*}} (0x04)
+; DWARF2: DW_AT_name {{.*}} "c"
+; DWARF2-NOT: DW_TAG
+; DWARF2: DW_AT_data_member_location {{.*}} (<0x02> 23 00 )
+
+; DWARF2: DW_AT_name {{.*}} "i"
+; DWARF2-NOT: DW_TAG
+; DWARF2: DW_AT_data_member_location {{.*}} (<0x02> 23 04 )
+
%struct.foo = type { i8, i32 }
@f = global %struct.foo zeroinitializer, align 4
@@ -27,7 +36,7 @@
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !2, metadata !10, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/data_member_location.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"data_member_location.cpp", metadata !"/tmp/dbginfo"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !1, null, metadata !"foo", i32 1, i64 64, i64 32, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS3foo"} ; [ DW_TAG_structure_type ] [foo] [line 1, size 64, align 32, offset 0] [def] [from ]
!5 = metadata !{metadata !6, metadata !8}
diff --git a/test/DebugInfo/X86/dbg-asm.s b/test/DebugInfo/X86/dbg-asm.s
new file mode 100644
index 000000000000..66a0292461dd
--- /dev/null
+++ b/test/DebugInfo/X86/dbg-asm.s
@@ -0,0 +1,22 @@
+# RUN: llvm-mc -triple i686-windows-gnu -g %s -filetype obj -o - \
+# RUN: | llvm-readobj -r - | FileCheck -check-prefix CHECK-COFF %s
+# RUN: llvm-mc -triple i686-windows-itanium -g %s -filetype obj -o - \
+# RUN: | llvm-readobj -r - | FileCheck -check-prefix CHECK-COFF %s
+# RUN: llvm-mc -triple i686-linux-gnu -g %s -filetype obj -o - \
+# RUN: | llvm-readobj -r - | FileCheck -check-prefix CHECK-ELF %s
+
+_a:
+ movl $65, %eax
+ ret
+
+# CHECK-COFF: Relocations [
+# CHECK-COFF: Section {{.*}} .debug_info {
+# CHECK-COFF: 0x6 IMAGE_REL_I386_SECREL .debug_abbrev
+# CHECK-COFF: }
+# CHECK-COFF: ]
+
+# CHECK-ELF: Relocations [
+# CHECK-ELF: Section {{.*}} .rel.debug_info {
+# CHECK-ELF: 0x6 R_386_32 .debug_abbrev
+# CHECK-ELF: }
+# CHECK-ELF: ]
diff --git a/test/DebugInfo/X86/dbg-at-specficiation.ll b/test/DebugInfo/X86/dbg-at-specficiation.ll
index 8003a0fc15b7..c76536719f4f 100644
--- a/test/DebugInfo/X86/dbg-at-specficiation.ll
+++ b/test/DebugInfo/X86/dbg-at-specficiation.ll
@@ -9,7 +9,7 @@
!llvm.module.flags = !{!12}
!0 = metadata !{i32 720913, metadata !11, i32 12, metadata !"clang version 3.0 (trunk 140253)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, null, i32 0} ; [ DW_TAG_compile_unit ]
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 720948, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 1, metadata !7, i32 0, i32 1, [10 x i32]* @a, null} ; [ DW_TAG_variable ]
!6 = metadata !{i32 720937, metadata !11} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/dbg-byval-parameter.ll b/test/DebugInfo/X86/dbg-byval-parameter.ll
index d66486d14ae5..c658b5050269 100644
--- a/test/DebugInfo/X86/dbg-byval-parameter.ll
+++ b/test/DebugInfo/X86/dbg-byval-parameter.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=x86 -asm-verbose < %s | grep DW_TAG_formal_parameter
+; RUN: %llc_dwarf -march=x86 -asm-verbose < %s | grep DW_TAG_formal_parameter
%struct.Pt = type { double, double }
diff --git a/test/DebugInfo/X86/dbg-const-int.ll b/test/DebugInfo/X86/dbg-const-int.ll
index f2f51c9b0f3d..bf7ee08c665f 100644
--- a/test/DebugInfo/X86/dbg-const-int.ll
+++ b/test/DebugInfo/X86/dbg-const-int.ll
@@ -1,12 +1,14 @@
-; RUN: llc -mtriple=x86_64-apple-darwin12 -filetype=obj %s -o %t
-; RUN: llvm-dwarfdump %t | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-darwin12 -filetype=obj < %s \
+; RUN: | llvm-dwarfdump -debug-dump=info - | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-macosx10.6.7"
; Radar 9511391
; CHECK: DW_TAG_variable
-; CHECK: "i"
-; CHECK: DW_AT_const_value [DW_FORM_sdata] (42)
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_const_value [DW_FORM_sdata] (42)
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "i"
define i32 @foo() nounwind uwtable readnone optsize ssp {
entry:
diff --git a/test/DebugInfo/X86/dbg-const.ll b/test/DebugInfo/X86/dbg-const.ll
index 12dc154c051b..300c1eeb977b 100644
--- a/test/DebugInfo/X86/dbg-const.ll
+++ b/test/DebugInfo/X86/dbg-const.ll
@@ -13,7 +13,7 @@
target triple = "x86_64-apple-darwin10.0.0"
-;CHECK: ## DW_OP_constu
+;CHECK: ## DW_OP_consts
;CHECK-NEXT: .byte 42
define i32 @foobar() nounwind readonly noinline ssp {
entry:
diff --git a/test/DebugInfo/X86/dbg-declare-arg.ll b/test/DebugInfo/X86/dbg-declare-arg.ll
index 7bf6f4fa5dfd..b5372658cf37 100644
--- a/test/DebugInfo/X86/dbg-declare-arg.ll
+++ b/test/DebugInfo/X86/dbg-declare-arg.ll
@@ -122,6 +122,6 @@ entry:
!47 = metadata !{i32 2, i32 47, metadata !25, null}
!48 = metadata !{i32 2, i32 54, metadata !49, null}
!49 = metadata !{i32 786443, metadata !51, metadata !25, i32 2, i32 52, i32 2} ; [ DW_TAG_lexical_block ]
-!50 = metadata !{metadata !0, metadata !10, metadata !14, metadata !19, metadata !22, metadata !25}
+!50 = metadata !{metadata !19, metadata !22, metadata !25}
!51 = metadata !{metadata !"a.cc", metadata !"/private/tmp"}
!52 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/dbg-declare.ll b/test/DebugInfo/X86/dbg-declare.ll
index 988d0bcb7713..241a5a1b5f11 100644
--- a/test/DebugInfo/X86/dbg-declare.ll
+++ b/test/DebugInfo/X86/dbg-declare.ll
@@ -31,17 +31,15 @@ declare void @llvm.stackrestore(i8*) nounwind
!llvm.module.flags = !{!27}
!0 = metadata !{i32 786449, metadata !26, i32 12, metadata !"clang version 3.1 (trunk 153698)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
-!5 = metadata !{i32 786478, metadata !26, metadata !0, metadata !"foo", metadata !"foo", metadata !"", i32 6, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32*)* @foo, null, null, metadata !12, i32 0} ; [ DW_TAG_subprogram ]
+!5 = metadata !{i32 786478, metadata !26, metadata !0, metadata !"foo", metadata !"foo", metadata !"", i32 6, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32*)* @foo, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !26} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{metadata !9, metadata !10}
!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
!10 = metadata !{i32 786447, null, null, null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !11} ; [ DW_TAG_pointer_type ]
!11 = metadata !{i32 786470, null, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !9} ; [ DW_TAG_const_type ]
-!12 = metadata !{metadata !13}
-!13 = metadata !{i32 786468} ; [ DW_TAG_base_type ]
!14 = metadata !{i32 786689, metadata !5, metadata !"x", metadata !6, i32 16777221, metadata !10, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!15 = metadata !{i32 5, i32 21, metadata !5, null}
!16 = metadata !{i32 7, i32 13, metadata !17, null}
diff --git a/test/DebugInfo/X86/dbg-large-unsigned-const.ll b/test/DebugInfo/X86/dbg-large-unsigned-const.ll
deleted file mode 100644
index a037f3c269ee..000000000000
--- a/test/DebugInfo/X86/dbg-large-unsigned-const.ll
+++ /dev/null
@@ -1,62 +0,0 @@
-; RUN: llc -filetype=obj %s -o /dev/null
-; Hanle large unsigned constant values.
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
-target triple = "i386-apple-macosx10.7.0"
-
-define zeroext i1 @_Z3iseRKxS0_(i64* nocapture %LHS, i64* nocapture %RHS) nounwind readonly optsize ssp {
-entry:
- tail call void @llvm.dbg.value(metadata !{i64* %LHS}, i64 0, metadata !7), !dbg !13
- tail call void @llvm.dbg.value(metadata !{i64* %RHS}, i64 0, metadata !11), !dbg !14
- %tmp1 = load i64* %LHS, align 4, !dbg !15
- %tmp3 = load i64* %RHS, align 4, !dbg !15
- %cmp = icmp eq i64 %tmp1, %tmp3, !dbg !15
- ret i1 %cmp, !dbg !15
-}
-
-define zeroext i1 @_Z2fnx(i64 %a) nounwind readnone optsize ssp {
-entry:
- tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !12), !dbg !20
- tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !12), !dbg !20
- tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !21), !dbg !24
- tail call void @llvm.dbg.value(metadata !25, i64 0, metadata !26), !dbg !27
- %cmp.i = icmp eq i64 %a, 9223372036854775807, !dbg !28
- ret i1 %cmp.i, !dbg !22
-}
-
-declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!34}
-!29 = metadata !{metadata !1, metadata !6}
-!30 = metadata !{metadata !7, metadata !11}
-!31 = metadata !{metadata !12}
-
-!0 = metadata !{i32 786449, metadata !32, i32 4, metadata !"clang version 3.0 (trunk 135593)", i1 true, metadata !"", i32 0, metadata !33, metadata !33, metadata !29, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 786478, metadata !32, null, metadata !"ise", metadata !"ise", metadata !"_Z3iseRKxS0_", i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i1 (i64*, i64*)* @_Z3iseRKxS0_, null, null, metadata !30, i32 2} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 786473, metadata !32} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786453, metadata !32, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{i32 786468, null, metadata !0, metadata !"bool", i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 786478, metadata !32, null, metadata !"fn", metadata !"fn", metadata !"_Z2fnx", i32 6, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i1 (i64)* @_Z2fnx, null, null, metadata !31, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [fn]
-!7 = metadata !{i32 786689, metadata !1, metadata !"LHS", metadata !2, i32 16777218, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
-!8 = metadata !{i32 786448, metadata !0, null, null, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !9} ; [ DW_TAG_reference_type ]
-!9 = metadata !{i32 786470, metadata !0, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_const_type ]
-!10 = metadata !{i32 786468, null, metadata !0, metadata !"long long int", i32 0, i64 64, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!11 = metadata !{i32 786689, metadata !1, metadata !"RHS", metadata !2, i32 33554434, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
-!12 = metadata !{i32 786689, metadata !6, metadata !"a", metadata !2, i32 16777222, metadata !10, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
-!13 = metadata !{i32 2, i32 27, metadata !1, null}
-!14 = metadata !{i32 2, i32 49, metadata !1, null}
-!15 = metadata !{i32 3, i32 3, metadata !16, null}
-!16 = metadata !{i32 786443, metadata !32, metadata !1, i32 2, i32 54, i32 0} ; [ DW_TAG_lexical_block ]
-!20 = metadata !{i32 6, i32 19, metadata !6, null}
-!21 = metadata !{i32 786689, metadata !1, metadata !"LHS", metadata !2, i32 16777218, metadata !8, i32 0, metadata !22} ; [ DW_TAG_arg_variable ]
-!22 = metadata !{i32 7, i32 10, metadata !23, null}
-!23 = metadata !{i32 786443, metadata !32, metadata !6, i32 6, i32 22, i32 1} ; [ DW_TAG_lexical_block ]
-!24 = metadata !{i32 2, i32 27, metadata !1, metadata !22}
-!25 = metadata !{i64 9223372036854775807}
-!26 = metadata !{i32 786689, metadata !1, metadata !"RHS", metadata !2, i32 33554434, metadata !8, i32 0, metadata !22} ; [ DW_TAG_arg_variable ]
-!27 = metadata !{i32 2, i32 49, metadata !1, metadata !22}
-!28 = metadata !{i32 3, i32 3, metadata !16, metadata !22}
-!32 = metadata !{metadata !"lli.cc", metadata !"/private/tmp"}
-!33 = metadata !{i32 0}
-!34 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/dbg-merge-loc-entry.ll b/test/DebugInfo/X86/dbg-merge-loc-entry.ll
index 8b619ea8607d..016d0a1e9f74 100644
--- a/test/DebugInfo/X86/dbg-merge-loc-entry.ll
+++ b/test/DebugInfo/X86/dbg-merge-loc-entry.ll
@@ -6,7 +6,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin8"
-;CHECK: DW_AT_location{{.*}}(<0x01> 55 )
+;CHECK: DW_AT_location{{.*}}(<0x1> 55 )
%0 = type { i64, i1 }
diff --git a/test/DebugInfo/X86/dbg-subrange.ll b/test/DebugInfo/X86/dbg-subrange.ll
index 5bf330c9b9d7..f8761d012e86 100644
--- a/test/DebugInfo/X86/dbg-subrange.ll
+++ b/test/DebugInfo/X86/dbg-subrange.ll
@@ -15,15 +15,13 @@ entry:
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!22}
-!0 = metadata !{i32 786449, metadata !21, i32 12, metadata !"clang version 3.1 (trunk 144833)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !11, metadata !11, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !21, i32 12, metadata !"clang version 3.1 (trunk 144833)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !11, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
-!5 = metadata !{i32 720942, metadata !21, metadata !6, metadata !"bar", metadata !"bar", metadata !"", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @bar, null, null, metadata !9, i32 0} ; [ DW_TAG_subprogram ] [line 4] [def] [scope 0] [bar]
+!5 = metadata !{i32 720942, metadata !21, metadata !6, metadata !"bar", metadata !"bar", metadata !"", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @bar, null, null, null, i32 0} ; [ DW_TAG_subprogram ] [line 4] [def] [scope 0] [bar]
!6 = metadata !{i32 720937, metadata !21} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{null}
-!9 = metadata !{metadata !10}
-!10 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
!11 = metadata !{metadata !13}
!13 = metadata !{i32 720948, i32 0, null, metadata !"s", metadata !"s", metadata !"", metadata !6, i32 2, metadata !14, i32 0, i32 1, [4294967296 x i8]* @s, null} ; [ DW_TAG_variable ]
!14 = metadata !{i32 720897, null, null, null, i32 0, i64 34359738368, i64 8, i32 0, i32 0, metadata !15, metadata !16, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 34359738368, align 8, offset 0] [from char]
diff --git a/test/DebugInfo/X86/dbg-value-const-byref.ll b/test/DebugInfo/X86/dbg-value-const-byref.ll
new file mode 100644
index 000000000000..23fa3520a7d1
--- /dev/null
+++ b/test/DebugInfo/X86/dbg-value-const-byref.ll
@@ -0,0 +1,106 @@
+; RUN: llc -O1 -filetype=obj -o - %s | llvm-dwarfdump -debug-dump=all - | FileCheck %s
+; Generated with -O1 from:
+; int f1();
+; void f2(int*);
+; int f3(int);
+;
+; int foo() {
+; int i = 3;
+; f3(i);
+; i = 7;
+; i = f1();
+; f2(&i);
+; return 0;
+; }
+;
+; Test that we generate valid debug info for optimized code,
+; particularly variables that are described as constants and passed
+; by reference.
+; rdar://problem/14874886
+;
+; CHECK: .debug_info contents:
+; CHECK: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_location [DW_FORM_data4] ([[LOC:.*]])
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}"i"
+; CHECK: .debug_loc contents:
+; CHECK: [[LOC]]:
+; consts 0x00000003
+; CHECK: Beginning address offset: 0x0000000000000{{.*}}
+; CHECK: Ending address offset: [[C1:.*]]
+; CHECK: Location description: 11 03
+; consts 0x00000007
+; CHECK: Beginning address offset: [[C1]]
+; CHECK: Ending address offset: [[C2:.*]]
+; CHECK: Location description: 11 07
+; rax, piece 0x00000004
+; CHECK: Beginning address offset: [[C2]]
+; CHECK: Ending address offset: [[R1:.*]]
+; CHECK: Location description: 50 93 04
+; rdi+0
+; CHECK: Beginning address offset: [[R1]]
+; CHECK: Ending address offset: [[R2:.*]]
+; CHECK: Location description: 75 00
+;
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Function Attrs: nounwind ssp uwtable
+define i32 @foo() #0 {
+entry:
+ %i = alloca i32, align 4
+ call void @llvm.dbg.value(metadata !14, i64 0, metadata !10), !dbg !15
+ %call = call i32 @f3(i32 3) #3, !dbg !16
+ call void @llvm.dbg.value(metadata !17, i64 0, metadata !10), !dbg !18
+ %call1 = call i32 (...)* @f1() #3, !dbg !19
+ call void @llvm.dbg.value(metadata !{i32 %call1}, i64 0, metadata !10), !dbg !19
+ store i32 %call1, i32* %i, align 4, !dbg !19, !tbaa !20
+ call void @llvm.dbg.value(metadata !{i32* %i}, i64 0, metadata !10), !dbg !24
+ call void @f2(i32* %i) #3, !dbg !24
+ ret i32 0, !dbg !25
+}
+
+declare i32 @f3(i32)
+
+declare i32 @f1(...)
+
+declare void @f2(i32*)
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
+
+attributes #0 = { nounwind ssp uwtable }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!11, !12}
+!llvm.ident = !{!13}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [dbg-value-const-byref.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"dbg-value-const-byref.c", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 5, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 ()* @foo, null, null, metadata !9, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [dbg-value-const-byref.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786688, metadata !4, metadata !"i", metadata !5, i32 6, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [i] [line 6]
+!11 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!12 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!13 = metadata !{metadata !"clang version 3.5.0 "}
+!14 = metadata !{i32 3}
+!15 = metadata !{i32 6, i32 0, metadata !4, null}
+!16 = metadata !{i32 7, i32 0, metadata !4, null}
+!17 = metadata !{i32 7}
+!18 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!19 = metadata !{i32 9, i32 0, metadata !4, null}
+!20 = metadata !{metadata !21, metadata !21, i64 0}
+!21 = metadata !{metadata !"int", metadata !22, i64 0}
+!22 = metadata !{metadata !"omnipotent char", metadata !23, i64 0}
+!23 = metadata !{metadata !"Simple C/C++ TBAA"}
+!24 = metadata !{i32 10, i32 0, metadata !4, null}
+!25 = metadata !{i32 11, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/X86/dbg-value-inlined-parameter.ll b/test/DebugInfo/X86/dbg-value-inlined-parameter.ll
index 1a78772e2e32..4d18f7dc3063 100644
--- a/test/DebugInfo/X86/dbg-value-inlined-parameter.ll
+++ b/test/DebugInfo/X86/dbg-value-inlined-parameter.ll
@@ -1,18 +1,43 @@
-; RUN: llc -mtriple=x86_64-apple-darwin %s -filetype=obj -o %t
-; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
-; RUN: llc -mtriple=x86_64-apple-darwin -regalloc=basic %s -filetype=obj -o %t
-; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-darwin < %s -filetype=obj \
+; RUN: | llvm-dwarfdump -debug-dump=info - | FileCheck --check-prefix=CHECK --check-prefix=DARWIN %s
+; RUN: llc -mtriple=x86_64-linux-gnu < %s -filetype=obj \
+; RUN: | llvm-dwarfdump -debug-dump=info - | FileCheck --check-prefix=CHECK --check-prefix=LINUX %s
+; RUN: llc -mtriple=x86_64-apple-darwin < %s -filetype=obj -regalloc=basic \
+; RUN: | llvm-dwarfdump -debug-dump=info - | FileCheck --check-prefix=CHECK --check-prefix=DARWIN %s
+
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_AT_abstract_origin {{.*}}{[[ABS:.*]]}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}}{[[ABS_SP:.*]]}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}}{[[ABS_NUMS:.*]]}
+
+; CHECK: [[ABS]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "foo"
+; CHECK: [[ABS_SP]]: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "sp"
+; CHECK: [[ABS_NUMS]]: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "nums"
;CHECK: DW_TAG_inlined_subroutine
-;CHECK-NEXT: DW_AT_abstract_origin
-;CHECK-NEXT: DW_AT_low_pc
-;CHECK-NEXT: DW_AT_high_pc
+;CHECK-NEXT: DW_AT_abstract_origin {{.*}}{[[ABS]]}
+;CHECK-NEXT: DW_AT_low_pc [DW_FORM_addr]
+;CHECK-NEXT: DW_AT_high_pc [DW_FORM_data4]
;CHECK-NEXT: DW_AT_call_file
;CHECK-NEXT: DW_AT_call_line
;CHECK: DW_TAG_formal_parameter
-;CHECK: DW_TAG_formal_parameter
-;CHECK-NEXT: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000055] = "sp")
+;FIXME: Linux shouldn't drop this parameter either...
+;CHECK-NOT: DW_TAG
+;DARWIN: DW_AT_abstract_origin {{.*}}{[[ABS_SP]]}
+;DARWIN: DW_TAG_formal_parameter
+;CHECK: DW_AT_abstract_origin {{.*}}{[[ABS_NUMS]]}
+;CHECK-NOT: DW_TAG_formal_parameter
%struct.S1 = type { float*, i32 }
@@ -51,14 +76,14 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !1, metadata !"foo", metadata !"foo", metadata !"", i32 8, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (%struct.S1*, i32)* @foo, null, null, metadata !41, i32 8} ; [ DW_TAG_subprogram ] [line 8] [def] [foo]
!1 = metadata !{i32 786473, metadata !42} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !42, i32 12, metadata !"clang version 2.9 (trunk 125693)", i1 true, metadata !"", i32 0, metadata !8, metadata !8, metadata !39, metadata !40, metadata !40, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !42, i32 12, metadata !"clang version 2.9 (trunk 125693)", i1 true, metadata !"", i32 0, metadata !8, metadata !8, metadata !39, metadata !40, metadata !44, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !42, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786468, null, metadata !2, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
!6 = metadata !{i32 786478, metadata !1, metadata !1, metadata !"foobar", metadata !"foobar", metadata !"", i32 15, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, void ()* @foobar, null, null, null, i32 0} ; [ DW_TAG_subprogram ] [line 15] [def] [scope 0] [foobar]
!7 = metadata !{i32 786453, metadata !42, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{null}
-!9 = metadata !{i32 786689, metadata !0, metadata !"sp", metadata !1, i32 7, metadata !10, i32 0, metadata !32} ; [ DW_TAG_arg_variable ]
+!9 = metadata !{i32 786689, metadata !0, metadata !"sp", metadata !1, i32 16777223, metadata !10, i32 0, metadata !32} ; [ DW_TAG_arg_variable ]
!10 = metadata !{i32 786447, null, metadata !2, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !11} ; [ DW_TAG_pointer_type ]
!11 = metadata !{i32 786454, metadata !42, metadata !2, metadata !"S1", i32 4, i64 0, i64 0, i64 0, i32 0, metadata !12} ; [ DW_TAG_typedef ]
!12 = metadata !{i32 786451, metadata !42, metadata !2, metadata !"S1", i32 1, i64 128, i64 64, i32 0, i32 0, null, metadata !13, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [S1] [line 1, size 128, align 64, offset 0] [def] [from ]
@@ -67,7 +92,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!15 = metadata !{i32 786447, null, metadata !2, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ]
!16 = metadata !{i32 786468, null, metadata !2, metadata !"float", i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
!17 = metadata !{i32 786445, metadata !42, metadata !1, metadata !"nums", i32 3, i64 32, i64 32, i64 64, i32 0, metadata !5} ; [ DW_TAG_member ]
-!18 = metadata !{i32 786689, metadata !0, metadata !"nums", metadata !1, i32 7, metadata !5, i32 0, metadata !32} ; [ DW_TAG_arg_variable ]
+!18 = metadata !{i32 786689, metadata !0, metadata !"nums", metadata !1, i32 33554439, metadata !5, i32 0, metadata !32} ; [ DW_TAG_arg_variable ]
!19 = metadata !{i32 786484, i32 0, metadata !2, metadata !"p", metadata !"p", metadata !"", metadata !1, i32 14, metadata !11, i32 0, i32 1, %struct.S1* @p, null} ; [ DW_TAG_variable ]
!20 = metadata !{i32 7, i32 13, metadata !0, null}
!21 = metadata !{i32 7, i32 21, metadata !0, null}
@@ -89,3 +114,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!41 = metadata !{metadata !9, metadata !18}
!42 = metadata !{metadata !"nm2.c", metadata !"/private/tmp"}
!43 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!44 = metadata !{}
diff --git a/test/DebugInfo/X86/dbg-value-isel.ll b/test/DebugInfo/X86/dbg-value-isel.ll
index f899f48b1fdf..155f76f7ab5f 100644
--- a/test/DebugInfo/X86/dbg-value-isel.ll
+++ b/test/DebugInfo/X86/dbg-value-isel.ll
@@ -92,7 +92,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!8 = metadata !{i32 786689, metadata !0, metadata !"ip", metadata !1, i32 1, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!9 = metadata !{i32 1, i32 32, metadata !0, null}
!10 = metadata !{i32 786688, metadata !11, metadata !"tid", metadata !1, i32 3, metadata !6, i32 0, null} ; [ DW_TAG_auto_variable ]
-!11 = metadata !{i32 786443, metadata !0, i32 2, i32 1, metadata !1, i32 1} ; [ DW_TAG_lexical_block ]
+!11 = metadata !{i32 786443, metadata !1, metadata !0, i32 2, i32 1, i32 1} ; [ DW_TAG_lexical_block ]
!12 = metadata !{i32 5, i32 24, metadata !11, null}
!13 = metadata !{i32 786688, metadata !11, metadata !"gid", metadata !1, i32 3, metadata !6, i32 0, null} ; [ DW_TAG_auto_variable ]
!14 = metadata !{i32 6, i32 25, metadata !11, null}
diff --git a/test/DebugInfo/X86/dbg-value-location.ll b/test/DebugInfo/X86/dbg-value-location.ll
index 1e21c6a00ae4..55d1ae6a9f69 100644
--- a/test/DebugInfo/X86/dbg-value-location.ll
+++ b/test/DebugInfo/X86/dbg-value-location.ll
@@ -1,14 +1,16 @@
-; RUN: llc < %s | FileCheck %s
-; RUN: llc < %s -regalloc=basic | FileCheck %s
+; RUN: llc -filetype=obj %s -o - | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+; RUN: llc -filetype=obj %s -regalloc=basic -o - | llvm-dwarfdump -debug-dump=info - | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.0.0"
-;Radar 8950491
+; Test that the type for the formal parameter "var" makes it into the debug info.
+; rdar://8950491
-;CHECK: .long Lset5
-;CHECK-NEXT: ## DW_AT_decl_file
-;CHECK-NEXT: ## DW_AT_decl_line
-;CHECK-NEXT: ## DW_AT_type
-;CHECK-NEXT: ## DW_AT_location
+;CHECK: DW_TAG_formal_parameter
+;CHECK-NEXT: DW_AT_location
+;CHECK-NEXT: DW_AT_name {{.*}} "var"
+;CHECK-NEXT: DW_AT_decl_file
+;CHECK-NEXT: DW_AT_decl_line
+;CHECK-NEXT: DW_AT_type
@dfm = external global i32, align 4
@@ -69,7 +71,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!18 = metadata !{i32 786443, metadata !26, metadata !0, i32 19510, i32 1, i32 99} ; [ DW_TAG_lexical_block ]
!22 = metadata !{i32 18094, i32 2, metadata !15, metadata !17}
!23 = metadata !{i32 19524, i32 1, metadata !18, null}
-!24 = metadata !{metadata !0, metadata !6, metadata !7, metadata !8}
+!24 = metadata !{metadata !0, metadata !6, metadata !7, metadata !8, metadata !16}
!25 = metadata !{i32 786473, metadata !27} ; [ DW_TAG_file_type ]
!26 = metadata !{metadata !"/tmp/f.c", metadata !"/tmp"}
!27 = metadata !{metadata !"f.i", metadata !"/tmp"}
diff --git a/test/DebugInfo/X86/dbg-value-terminator.ll b/test/DebugInfo/X86/dbg-value-terminator.ll
index f08f281f4f3f..974e0ad6d837 100644
--- a/test/DebugInfo/X86/dbg-value-terminator.ll
+++ b/test/DebugInfo/X86/dbg-value-terminator.ll
@@ -11,84 +11,84 @@
define hidden fastcc %a* @test() #1 {
entry:
- %0 = icmp eq %a* undef, null, !dbg !1
- br i1 %0, label %"14", label %return, !dbg !1
+ %0 = icmp eq %a* undef, null, !dbg !12
+ br i1 %0, label %"14", label %return, !dbg !12
"14": ; preds = %"8"
- br i1 undef, label %"25", label %"21", !dbg !1
+ br i1 undef, label %"25", label %"21", !dbg !12
"21": ; preds = %"14"
- br i1 undef, label %may_unswitch_on.exit, label %"6.i", !dbg !1
+ br i1 undef, label %may_unswitch_on.exit, label %"6.i", !dbg !12
"6.i": ; preds = %"21"
- br i1 undef, label %"10.i", label %may_unswitch_on.exit, !dbg !1
+ br i1 undef, label %"10.i", label %may_unswitch_on.exit, !dbg !12
"10.i": ; preds = %"6.i"
- br i1 undef, label %may_unswitch_on.exit, label %"12.i", !dbg !1
+ br i1 undef, label %may_unswitch_on.exit, label %"12.i", !dbg !12
"12.i": ; preds = %"10.i"
- br i1 undef, label %"4.i.i", label %"3.i.i", !dbg !1
+ br i1 undef, label %"4.i.i", label %"3.i.i", !dbg !12
"3.i.i": ; preds = %"12.i"
- br i1 undef, label %"4.i.i", label %VEC_edge_base_index.exit.i, !dbg !1
+ br i1 undef, label %"4.i.i", label %VEC_edge_base_index.exit.i, !dbg !12
"4.i.i": ; preds = %"3.i.i", %"12.i"
- unreachable, !dbg !1
+ unreachable, !dbg !12
VEC_edge_base_index.exit.i: ; preds = %"3.i.i"
- br i1 undef, label %may_unswitch_on.exit, label %"16.i", !dbg !1
+ br i1 undef, label %may_unswitch_on.exit, label %"16.i", !dbg !12
"16.i": ; preds = %VEC_edge_base_index.exit.i
- br i1 undef, label %"4.i6.i", label %"3.i5.i", !dbg !1
+ br i1 undef, label %"4.i6.i", label %"3.i5.i", !dbg !12
"3.i5.i": ; preds = %"16.i"
- br i1 undef, label %VEC_edge_base_index.exit7.i, label %"4.i6.i", !dbg !1
+ br i1 undef, label %VEC_edge_base_index.exit7.i, label %"4.i6.i", !dbg !12
"4.i6.i": ; preds = %"3.i5.i", %"16.i"
- unreachable, !dbg !1
+ unreachable, !dbg !12
VEC_edge_base_index.exit7.i: ; preds = %"3.i5.i"
- br i1 undef, label %may_unswitch_on.exit, label %"21.i", !dbg !1
+ br i1 undef, label %may_unswitch_on.exit, label %"21.i", !dbg !12
"21.i": ; preds = %VEC_edge_base_index.exit7.i
- br i1 undef, label %may_unswitch_on.exit, label %"23.i", !dbg !1
+ br i1 undef, label %may_unswitch_on.exit, label %"23.i", !dbg !12
"23.i": ; preds = %"21.i"
- br i1 undef, label %may_unswitch_on.exit, label %"26.i", !dbg !1
+ br i1 undef, label %may_unswitch_on.exit, label %"26.i", !dbg !12
"26.i": ; preds = %"34.i", %"23.i"
- %1 = icmp eq i32 undef, 9, !dbg !1
- br i1 %1, label %"34.i", label %"28.i", !dbg !1
+ %1 = icmp eq i32 undef, 9, !dbg !12
+ br i1 %1, label %"34.i", label %"28.i", !dbg !12
"28.i": ; preds = %"26.i"
unreachable
"34.i": ; preds = %"26.i"
- br i1 undef, label %"26.i", label %"36.i", !dbg !1
+ br i1 undef, label %"26.i", label %"36.i", !dbg !12
"36.i": ; preds = %"34.i"
- br i1 undef, label %"37.i", label %"38.i", !dbg !1
+ br i1 undef, label %"37.i", label %"38.i", !dbg !12
"37.i": ; preds = %"36.i"
- br label %"38.i", !dbg !1
+ br label %"38.i", !dbg !12
"38.i": ; preds = %"37.i", %"36.i"
- br i1 undef, label %"39.i", label %"45.i", !dbg !1
+ br i1 undef, label %"39.i", label %"45.i", !dbg !12
"39.i": ; preds = %"38.i"
- br i1 undef, label %"41.i", label %may_unswitch_on.exit, !dbg !1
+ br i1 undef, label %"41.i", label %may_unswitch_on.exit, !dbg !12
"41.i": ; preds = %"39.i"
- br i1 undef, label %may_unswitch_on.exit, label %"42.i", !dbg !1
+ br i1 undef, label %may_unswitch_on.exit, label %"42.i", !dbg !12
"42.i": ; preds = %"41.i"
- br i1 undef, label %may_unswitch_on.exit, label %"44.i", !dbg !1
+ br i1 undef, label %may_unswitch_on.exit, label %"44.i", !dbg !12
"44.i": ; preds = %"42.i"
- %2 = load %a** undef, align 8, !dbg !1
- %3 = bitcast %a* %2 to %a*, !dbg !1
+ %2 = load %a** undef, align 8, !dbg !12
+ %3 = bitcast %a* %2 to %a*, !dbg !12
call void @llvm.dbg.value(metadata !{%a* %3}, i64 0, metadata !6), !dbg !12
- br label %may_unswitch_on.exit, !dbg !1
+ br label %may_unswitch_on.exit, !dbg !12
"45.i": ; preds = %"38.i"
unreachable
@@ -102,7 +102,7 @@ may_unswitch_on.exit: ; preds = %"44.i", %"42.i", %"
"return":
%result = phi %a* [ null, %entry ], [ %4, %may_unswitch_on.exit ]
- ret %a* %result, !dbg !1
+ ret %a* %result, !dbg !12
}
attributes #0 = { nounwind readnone }
diff --git a/test/DebugInfo/X86/dbg_value_direct.ll b/test/DebugInfo/X86/dbg_value_direct.ll
index 8a22cd7cca03..db947ac94799 100644
--- a/test/DebugInfo/X86/dbg_value_direct.ll
+++ b/test/DebugInfo/X86/dbg_value_direct.ll
@@ -149,7 +149,7 @@ attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/crash.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"crash.cpp", metadata !"/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"func", metadata !"func", metadata !"_Z4funci", i32 6, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.A*, i32)* @_Z4funci, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [func]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/crash.cpp]
@@ -170,8 +170,9 @@ attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!20 = metadata !{i32 786468}
!21 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
!22 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
-!23 = metadata !{i32 786689, metadata !4, metadata !"", metadata !5, i32 16777222, metadata !21, i32 0, i32 0, i64 2} ; [ DW_TAG_arg_variable ] [line 6]
+!23 = metadata !{i32 786689, metadata !4, metadata !"", metadata !5, i32 16777222, metadata !21, i32 0, i32 0, metadata !28} ; [ DW_TAG_arg_variable ] [line 6]
!24 = metadata !{i32 786688, metadata !4, metadata !"a", metadata !5, i32 7, metadata !8, i32 8192, i32 0} ; [ DW_TAG_auto_variable ] [a] [line 7]
!25 = metadata !{i32 7, i32 0, metadata !4, null}
!26 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
!27 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!28 = metadata !{i64 2}
diff --git a/test/DebugInfo/X86/debug-dead-local-var.ll b/test/DebugInfo/X86/debug-dead-local-var.ll
new file mode 100644
index 000000000000..64f0b2a9e40f
--- /dev/null
+++ b/test/DebugInfo/X86/debug-dead-local-var.ll
@@ -0,0 +1,51 @@
+; RUN: llc -mtriple=x86_64-linux-gnu %s -filetype=obj -o %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; Reconstruct this via clang and -O2.
+; static void foo() {
+; struct X { int a; int b; } xyz;
+; }
+
+; int bar() {
+; foo();
+; return 1;
+; }
+
+; Check that we still have the structure type for X even though we're not
+; going to emit a low/high_pc for foo.
+; CHECK: DW_TAG_structure_type
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @bar() #0 {
+entry:
+ ret i32 1, !dbg !21
+}
+
+attributes #0 = { nounwind readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!18, !19}
+!llvm.ident = !{!20}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 (trunk 209255) (llvm/trunk 209253)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/debug-dead-local-var.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"debug-dead-local-var.c", metadata !"/usr/local/google/home/echristo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !9}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"", i32 11, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 ()* @bar, null, null, metadata !2, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [bar]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/debug-dead-local-var.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 6, metadata !10, i1 true, i1 true, i32 0, i32 0, null, i32 0, i1 true, null, null, null, metadata !12, i32 6} ; [ DW_TAG_subprogram ] [line 6] [local] [def] [foo]
+!10 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !11, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!11 = metadata !{null}
+!12 = metadata !{metadata !13}
+!13 = metadata !{i32 786688, metadata !9, metadata !"xyz", metadata !5, i32 8, metadata !14, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [xyz] [line 8]
+!14 = metadata !{i32 786451, metadata !1, metadata !9, metadata !"X", i32 8, i64 64, i64 32, i32 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [X] [line 8, size 64, align 32, offset 0] [def] [from ]
+!15 = metadata !{metadata !16, metadata !17}
+!16 = metadata !{i32 786445, metadata !1, metadata !14, metadata !"a", i32 8, i64 32, i64 32, i64 0, i32 0, metadata !8} ; [ DW_TAG_member ] [a] [line 8, size 32, align 32, offset 0] [from int]
+!17 = metadata !{i32 786445, metadata !1, metadata !14, metadata !"b", i32 8, i64 32, i64 32, i64 32, i32 0, metadata !8} ; [ DW_TAG_member ] [b] [line 8, size 32, align 32, offset 32] [from int]
+!18 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!19 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!20 = metadata !{metadata !"clang version 3.5.0 (trunk 209255) (llvm/trunk 209253)"}
+!21 = metadata !{i32 13, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/X86/debug-info-block-captured-self.ll b/test/DebugInfo/X86/debug-info-block-captured-self.ll
index 6e4d2007a976..95eda60c5cca 100644
--- a/test/DebugInfo/X86/debug-info-block-captured-self.ll
+++ b/test/DebugInfo/X86/debug-info-block-captured-self.ll
@@ -7,17 +7,19 @@
; This test is split into two parts, the frontend part can be found at
; llvm/tools/clang/test/CodeGenObjC/debug-info-block-captured-self.m
;
-; CHECK: {{.*}}DW_AT_name{{.*}}_block_invoke{{.*}}
-; CHECK: DW_TAG_variable
-; CHECK: {{.*}}DW_AT_name{{.*}}"self"{{.*}}
+; CHECK: {{.*}}DW_AT_name{{.*}}_block_invoke{{.*}}
+; CHECK: DW_TAG_variable
; CHECK-NOT: DW_TAG
-; CHECK: DW_AT_location
+; CHECK: DW_AT_location
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}"self"{{.*}}
;
-; CHECK: {{.*}}DW_AT_name{{.*}}_block_invoke{{.*}}
-; CHECK: DW_TAG_variable
-; CHECK: {{.*}}DW_AT_name{{.*}}"self"{{.*}}
+; CHECK: {{.*}}DW_AT_name{{.*}}_block_invoke{{.*}}
+; CHECK: DW_TAG_variable
; CHECK-NOT: DW_TAG
-; CHECK: DW_AT_location
+; CHECK: DW_AT_location
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}"self"{{.*}}
;
; Generated (and then reduced) from
; ----------------------------------------------------------------------
@@ -83,7 +85,7 @@ define internal void @"__24-[Main initWithContext:]_block_invoke_2"(i8* %.block_
!2 = metadata !{metadata !3}
!3 = metadata !{i32 786436, metadata !107, null, metadata !"", i32 20, i64 32, i64 32, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [line 20, size 32, align 32, offset 0] [def] [from ]
!4 = metadata !{}
-!15 = metadata !{i32 0}
+!15 = metadata !{}
!23 = metadata !{metadata !38, metadata !42}
!27 = metadata !{i32 786454, metadata !107, null, metadata !"id", i32 31, i64 0, i64 0, i64 0, i32 0, metadata !28} ; [ DW_TAG_typedef ] [id] [line 31, size 0, align 0, offset 0] [from ]
!28 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !29} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from objc_object]
@@ -99,10 +101,12 @@ define internal void @"__24-[Main initWithContext:]_block_invoke_2"(i8* %.block_
!41 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
!42 = metadata !{i32 786478, metadata !1, metadata !1, metadata !"__24-[Main initWithContext:]_block_invoke_2", metadata !"__24-[Main initWithContext:]_block_invoke_2", metadata !"", i32 35, metadata !39, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i8*, i8*)* @"__24-[Main initWithContext:]_block_invoke_2", null, null, metadata !15, i32 35} ; [ DW_TAG_subprogram ] [line 35] [local] [def] [__24-[Main initWithContext:]_block_invoke_2]
!84 = metadata !{i32 33, i32 0, metadata !38, null}
-!86 = metadata !{i32 786688, metadata !38, metadata !"self", metadata !1, i32 41, metadata !34, i32 0, i32 0, i64 1, i64 32} ; [ DW_TAG_auto_variable ] [self] [line 41]
+!86 = metadata !{i32 786688, metadata !38, metadata !"self", metadata !1, i32 41, metadata !34, i32 0, i32 0, metadata !110} ; [ DW_TAG_auto_variable ] [self] [line 41]
!87 = metadata !{i32 41, i32 0, metadata !38, null}
!103 = metadata !{i32 35, i32 0, metadata !42, null}
-!105 = metadata !{i32 786688, metadata !42, metadata !"self", metadata !1, i32 40, metadata !34, i32 0, i32 0, i64 1, i64 32} ; [ DW_TAG_auto_variable ] [self] [line 40]
+!105 = metadata !{i32 786688, metadata !42, metadata !"self", metadata !1, i32 40, metadata !34, i32 0, i32 0, metadata !109} ; [ DW_TAG_auto_variable ] [self] [line 40]
!106 = metadata !{i32 40, i32 0, metadata !42, null}
!107 = metadata !{metadata !"llvm/tools/clang/test/CodeGenObjC/debug-info-block-captured-self.m", metadata !""}
!108 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!109 = metadata !{i64 1, i64 32}
+!110 = metadata !{i64 1, i64 32}
diff --git a/test/DebugInfo/X86/debug-info-blocks.ll b/test/DebugInfo/X86/debug-info-blocks.ll
index c3bedf2b2643..8a1a125ff804 100644
--- a/test/DebugInfo/X86/debug-info-blocks.ll
+++ b/test/DebugInfo/X86/debug-info-blocks.ll
@@ -5,32 +5,40 @@
; rdar://problem/9279956
; test that the DW_AT_location of self is at ( fbreg +{{[0-9]+}}, deref, +{{[0-9]+}} )
+; CHECK: [[A:.*]]: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_APPLE_objc_complete_type
+; CHECK-NEXT: DW_AT_name{{.*}}"A"
+
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_TAG_subprogram
; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_object_pointer
+; CHECK-NOT: DW_TAG
; CHECK: DW_AT_name{{.*}}_block_invoke
-; CHECK-NOT: DW_TAG_subprogram
+; CHECK-NOT: {{DW_TAG|NULL}}
; CHECK: DW_TAG_formal_parameter
-; CHECK-NEXT: DW_AT_name{{.*}}.block_descriptor
; CHECK-NOT: DW_TAG
; CHECK: DW_AT_location
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}.block_descriptor
-; CHECK-NOT: DW_TAG_subprogram
+; CHECK-NOT: {{DW_TAG|NULL}}
; CHECK: DW_TAG_variable
-; CHECK-NEXT: DW_AT_name{{.*}}"self"
-; CHECK-NOT: DW_TAG
-; CHECK: DW_AT_type{{.*}}{[[APTR:.*]]}
-; CHECK-NOT: DW_TAG
-; CHECK: DW_AT_artificial
; CHECK-NOT: DW_TAG
; 0x06 = DW_OP_deref
; 0x23 = DW_OP_uconst
; 0x91 = DW_OP_fbreg
; CHECK: DW_AT_location{{.*}}91 {{[0-9]+}} 06 23 {{[0-9]+}} )
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}"self"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_type{{.*}}{[[APTR:.*]]}
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_artificial
-; CHECK: [[A:.*]]: DW_TAG_structure_type
-; CHECK-NEXT: DW_AT_APPLE_objc_complete_type
-; CHECK-NEXT: DW_AT_name{{.*}}"A"
-; CHECK: [[APTR]]: DW_TAG_pointer_type [5]
+; CHECK: [[APTR]]: DW_TAG_pointer_type
; CHECK-NEXT: {[[A]]}
@@ -264,7 +272,7 @@ attributes #3 = { nounwind }
!0 = metadata !{i32 786449, metadata !1, i32 16, metadata !"clang version 3.3 ", i1 false, metadata !"", i32 2, metadata !2, metadata !3, metadata !12, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [llvm/tools/clang/test/CodeGenObjC/<unknown>] [DW_LANG_ObjC]
!1 = metadata !{metadata !"llvm/tools/clang/test/CodeGenObjC/<unknown>", metadata !"llvm/_build.ninja.Debug"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !5, metadata !6, metadata !"A", i32 33, i64 32, i64 32, i32 0, i32 512, null, metadata !7, i32 16, null, null, null} ; [ DW_TAG_structure_type ] [A] [line 33, size 32, align 32, offset 0] [def] [from ]
!5 = metadata !{metadata !"llvm/tools/clang/test/CodeGenObjC/debug-info-blocks.m", metadata !"llvm/_build.ninja.Debug"}
@@ -351,7 +359,7 @@ attributes #3 = { nounwind }
!86 = metadata !{i32 786451, metadata !1, null, metadata !"__block_descriptor_withcopydispose", i32 49, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [__block_descriptor_withcopydispose] [line 49, size 0, align 0, offset 0] [decl] [from ]
!87 = metadata !{i32 786445, metadata !5, metadata !6, metadata !"self", i32 49, i64 64, i64 64, i64 256, i32 0, metadata !61} ; [ DW_TAG_member ] [self] [line 49, size 64, align 64, offset 256] [from ]
!88 = metadata !{i32 49, i32 0, metadata !27, null}
-!89 = metadata !{i32 786688, metadata !27, metadata !"self", metadata !32, i32 52, metadata !23, i32 0, i32 0, i64 2, i64 1, i64 32} ; [ DW_TAG_auto_variable ] [self] [line 52]
+!89 = metadata !{i32 786688, metadata !27, metadata !"self", metadata !32, i32 52, metadata !23, i32 0, i32 0, metadata !111} ; [ DW_TAG_auto_variable ] [self] [line 52]
!90 = metadata !{i32 52, i32 0, metadata !27, null}
!91 = metadata !{i32 786688, metadata !92, metadata !"d", metadata !6, i32 50, metadata !93, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [d] [line 50]
!92 = metadata !{i32 786443, metadata !5, metadata !27, i32 49, i32 0, i32 2} ; [ DW_TAG_lexical_block ] [llvm/tools/clang/test/CodeGenObjC/debug-info-blocks.m]
@@ -373,3 +381,4 @@ attributes #3 = { nounwind }
!108 = metadata !{i32 61, i32 0, metadata !36, null}
!109 = metadata !{i32 62, i32 0, metadata !36, null}
!110 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!111 = metadata !{i64 2, i64 1, i64 32}
diff --git a/test/DebugInfo/X86/debug-info-static-member.ll b/test/DebugInfo/X86/debug-info-static-member.ll
index 1792bb4783b5..7d258f9d8377 100644
--- a/test/DebugInfo/X86/debug-info-static-member.ll
+++ b/test/DebugInfo/X86/debug-info-static-member.ll
@@ -1,7 +1,7 @@
-; RUN: llc %s -o %t -filetype=obj -O0 -mtriple=x86_64-unknown-linux-gnu
+; RUN: llc %s -o %t -filetype=obj -O0 -mtriple=x86_64-unknown-linux-gnu -dwarf-version=4
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s -check-prefix=PRESENT
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s -check-prefix=ABSENT
-; RUN: llc %s -o %t -filetype=obj -O0 -mtriple=x86_64-apple-darwin
+; RUN: llc %s -o %t -filetype=obj -O0 -mtriple=x86_64-apple-darwin -dwarf-version=4
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s -check-prefix=DARWINP
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s -check-prefix=DARWINA
; Verify that attributes we do want are PRESENT;
@@ -59,8 +59,8 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!34}
-!0 = metadata !{i32 786449, metadata !33, i32 4, metadata !"clang version 3.3 (trunk 171914)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !10, metadata !10, metadata !""} ; [ DW_TAG_compile_unit ] [/home/probinson/projects/upstream/static-member/test/debug-info-static-member.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !33, i32 4, metadata !"clang version 3.3 (trunk 171914)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !10, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/home/probinson/projects/upstream/static-member/test/debug-info-static-member.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !33, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 18, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !1, i32 23} ; [ DW_TAG_subprogram ] [line 18] [def] [scope 23] [main]
!6 = metadata !{i32 786473, metadata !33} ; [ DW_TAG_file_type ]
@@ -114,7 +114,7 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
; PRESENT: DW_TAG_member
; PRESENT-NEXT: DW_AT_name {{.*}} "const_b"
; PRESENT: DW_AT_accessibility [DW_FORM_data1] (0x02)
-; PRESENT: DW_AT_const_value {{.*}} (0x4048f5c3)
+; PRESENT: DW_AT_const_value [DW_FORM_udata] (1078523331)
; PRESENT: 0x[[DECL_C:[0-9a-f]+]]: DW_TAG_member
; PRESENT-NEXT: DW_AT_name {{.*}} "c"
; PRESENT: DW_AT_accessibility [DW_FORM_data1] (0x01)
@@ -133,15 +133,15 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
; PRESENT: DW_TAG_variable
; PRESENT-NEXT: DW_AT_specification {{.*}} {0x[[DECL_A]]}
; PRESENT-NEXT: DW_AT_location
-; PRESENT-NEXT: DW_AT_MIPS_linkage_name {{.*}} "_ZN1C1aE"
+; PRESENT-NEXT: DW_AT_linkage_name {{.*}} "_ZN1C1aE"
; PRESENT: DW_TAG_variable
; PRESENT-NEXT: DW_AT_specification {{.*}} {0x[[DECL_B]]}
; PRESENT-NEXT: DW_AT_location
-; PRESENT-NEXT: DW_AT_MIPS_linkage_name {{.*}} "_ZN1C1bE"
+; PRESENT-NEXT: DW_AT_linkage_name {{.*}} "_ZN1C1bE"
; PRESENT: DW_TAG_variable
; PRESENT-NEXT: DW_AT_specification {{.*}} {0x[[DECL_C]]}
; PRESENT-NEXT: DW_AT_location
-; PRESENT-NEXT: DW_AT_MIPS_linkage_name {{.*}} "_ZN1C1cE"
+; PRESENT-NEXT: DW_AT_linkage_name {{.*}} "_ZN1C1cE"
; For Darwin gdb:
; DARWINP: .debug_info contents:
@@ -164,7 +164,7 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
; DARWINP: DW_TAG_member
; DARWINP-NEXT: DW_AT_name {{.*}} "const_b"
; DARWINP: DW_AT_accessibility [DW_FORM_data1] (0x02)
-; DARWINP: DW_AT_const_value {{.*}} (0x4048f5c3)
+; DARWINP: DW_AT_const_value [DW_FORM_udata] (1078523331)
; DARWINP: 0x[[DECL_C:[0-9a-f]+]]: DW_TAG_member
; DARWINP-NEXT: DW_AT_name {{.*}} "c"
; DARWINP: DW_AT_accessibility [DW_FORM_data1] (0x01)
@@ -183,19 +183,19 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
; DARWINP: DW_TAG_variable
; DARWINP-NEXT: DW_AT_specification {{.*}} {0x[[DECL_A]]}
; DARWINP-NEXT: DW_AT_location
-; DARWINP-NEXT: DW_AT_MIPS_linkage_name {{.*}} "_ZN1C1aE"
+; DARWINP-NEXT: DW_AT_linkage_name {{.*}} "_ZN1C1aE"
; DARWINP: DW_TAG_variable
; DARWINP-NEXT: DW_AT_specification {{.*}} {0x[[DECL_B]]}
; DARWINP-NEXT: DW_AT_location
-; DARWINP-NEXT: DW_AT_MIPS_linkage_name {{.*}} "_ZN1C1bE"
+; DARWINP-NEXT: DW_AT_linkage_name {{.*}} "_ZN1C1bE"
; DARWINP: DW_TAG_variable
; DARWINP-NEXT: DW_AT_specification {{.*}} {0x[[DECL_C]]}
; DARWINP-NEXT: DW_AT_location
-; DARWINP-NEXT: DW_AT_MIPS_linkage_name {{.*}} "_ZN1C1cE"
+; DARWINP-NEXT: DW_AT_linkage_name {{.*}} "_ZN1C1cE"
; ABSENT verifies that static member declarations do not have either
; DW_AT_location or DW_AT_data_member_location; also, variables do not
-; have DW_AT_const_value and constants do not have DW_AT_MIPS_linkage_name.
+; have DW_AT_const_value and constants do not have DW_AT_linkage_name.
;
; ABSENT: .debug_info contents:
; ABSENT: DW_TAG_member
@@ -203,24 +203,24 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
; ABSENT-NOT: DW_AT_const_value
; ABSENT-NOT: location
; ABSENT: DW_AT_name {{.*}} "const_a"
-; ABSENT-NOT: DW_AT_MIPS_linkage_name
+; ABSENT-NOT: DW_AT_linkage_name
; ABSENT-NOT: location
; ABSENT: DW_AT_name {{.*}} "b"
; ABSENT-NOT: DW_AT_const_value
; ABSENT-NOT: location
; ABSENT: DW_AT_name {{.*}} "const_b"
-; ABSENT-NOT: DW_AT_MIPS_linkage_name
+; ABSENT-NOT: DW_AT_linkage_name
; ABSENT-NOT: location
; ABSENT: DW_AT_name {{.*}} "c"
; ABSENT-NOT: DW_AT_const_value
; ABSENT-NOT: location
; ABSENT: DW_AT_name {{.*}} "const_c"
-; ABSENT-NOT: DW_AT_MIPS_linkage_name
+; ABSENT-NOT: DW_AT_linkage_name
; ABSENT-NOT: location
; While we're here, a normal member does not have a linkage name, constant
; value, or DW_AT_location.
; ABSENT: DW_AT_name {{.*}} "d"
-; ABSENT-NOT: DW_AT_MIPS_linkage_name
+; ABSENT-NOT: DW_AT_linkage_name
; ABSENT-NOT: DW_AT_const_value
; ABSENT-NOT: DW_AT_location
; ABSENT: NULL
@@ -232,24 +232,24 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
; DARWINA-NOT: DW_AT_const_value
; DARWINA-NOT: location
; DARWINA: DW_AT_name {{.*}} "const_a"
-; DARWINA-NOT: DW_AT_MIPS_linkage_name
+; DARWINA-NOT: DW_AT_linkage_name
; DARWINA-NOT: location
; DARWINA: DW_AT_name {{.*}} "b"
; DARWINA-NOT: DW_AT_const_value
; DARWINA-NOT: location
; DARWINA: DW_AT_name {{.*}} "const_b"
-; DARWINA-NOT: DW_AT_MIPS_linkage_name
+; DARWINA-NOT: DW_AT_linkage_name
; DARWINA-NOT: location
; DARWINA: DW_AT_name {{.*}} "c"
; DARWINA-NOT: DW_AT_const_value
; DARWINA-NOT: location
; DARWINA: DW_AT_name {{.*}} "const_c"
-; DARWINA-NOT: DW_AT_MIPS_linkage_name
+; DARWINA-NOT: DW_AT_linkage_name
; DARWINA-NOT: location
; While we're here, a normal member does not have a linkage name, constant
; value, or DW_AT_location.
; DARWINA: DW_AT_name {{.*}} "d"
-; DARWINA-NOT: DW_AT_MIPS_linkage_name
+; DARWINA-NOT: DW_AT_linkage_name
; DARWINA-NOT: DW_AT_const_value
; DARWINA-NOT: DW_AT_location
; DARWINA: NULL
diff --git a/test/DebugInfo/X86/debug-loc-asan.ll b/test/DebugInfo/X86/debug-loc-asan.ll
new file mode 100644
index 000000000000..b1980ecda2d8
--- /dev/null
+++ b/test/DebugInfo/X86/debug-loc-asan.ll
@@ -0,0 +1,186 @@
+; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+; Verify that we have correct debug info for local variables in code
+; instrumented with AddressSanitizer.
+
+; Generated from the source file test.cc:
+; int bar(int y) {
+; return y + 2;
+; }
+; with "clang++ -S -emit-llvm -fsanitize=address -O0 -g test.cc"
+
+; First, argument variable "y" resides in %rdi:
+; CHECK: DEBUG_VALUE: bar:y <- RDI
+
+; Then its address is stored in a location on a stack:
+; CHECK: movq %rdi, [[OFFSET:[0-9]+]](%rsp)
+; CHECK-NEXT: [[START_LABEL:.Ltmp[0-9]+]]
+; CHECK-NEXT: DEBUG_VALUE: bar:y <- [RSP+[[OFFSET]]]
+; This location should be valid until the end of the function.
+
+; CHECK: .Ldebug_loc{{[0-9]+}}:
+; We expect two location ranges for the variable.
+
+; First, it is stored in %rdx:
+; CHECK: .Lset{{[0-9]+}} = .Lfunc_begin0-.Lfunc_begin0
+; CHECK-NEXT: .quad .Lset{{[0-9]+}}
+; CHECK-NEXT: .Lset{{[0-9]+}} = [[START_LABEL]]-.Lfunc_begin0
+; CHECK-NEXT: .quad .Lset{{[0-9]+}}
+; CHECK: DW_OP_reg5
+
+; Then it's addressed via %rsp:
+; CHECK: .Lset{{[0-9]+}} = [[START_LABEL]]-.Lfunc_begin0
+; CHECK-NEXT: .quad .Lset{{[0-9]+}}
+; CHECK-NEXT: .Lset{{[0-9]+}} = .Lfunc_end0-.Lfunc_begin0
+; CHECK-NEXT: .quad .Lset{{[0-9]+}}
+; CHECK: DW_OP_breg7
+; CHECK-NEXT: [[OFFSET]]
+; CHECK: DW_OP_deref
+
+; ModuleID = 'test.cc'
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 1, void ()* @asan.module_ctor }]
+@__asan_option_detect_stack_use_after_return = external global i32
+@__asan_gen_ = private unnamed_addr constant [16 x i8] c"1 32 4 6 y.addr\00", align 1
+
+; Function Attrs: nounwind sanitize_address uwtable
+define i32 @_Z3bari(i32 %y) #0 {
+entry:
+ %MyAlloca = alloca [64 x i8], align 32
+ %0 = ptrtoint [64 x i8]* %MyAlloca to i64
+ %1 = load i32* @__asan_option_detect_stack_use_after_return
+ %2 = icmp ne i32 %1, 0
+ br i1 %2, label %3, label %5
+
+; <label>:3 ; preds = %entry
+ %4 = call i64 @__asan_stack_malloc_0(i64 64, i64 %0)
+ br label %5
+
+; <label>:5 ; preds = %entry, %3
+ %6 = phi i64 [ %0, %entry ], [ %4, %3 ]
+ %7 = add i64 %6, 32
+ %8 = inttoptr i64 %7 to i32*
+ %9 = inttoptr i64 %6 to i64*
+ store i64 1102416563, i64* %9
+ %10 = add i64 %6, 8
+ %11 = inttoptr i64 %10 to i64*
+ store i64 ptrtoint ([16 x i8]* @__asan_gen_ to i64), i64* %11
+ %12 = add i64 %6, 16
+ %13 = inttoptr i64 %12 to i64*
+ store i64 ptrtoint (i32 (i32)* @_Z3bari to i64), i64* %13
+ %14 = lshr i64 %6, 3
+ %15 = add i64 %14, 2147450880
+ %16 = add i64 %15, 0
+ %17 = inttoptr i64 %16 to i64*
+ store i64 -868083100587789839, i64* %17
+ %18 = ptrtoint i32* %8 to i64
+ %19 = lshr i64 %18, 3
+ %20 = add i64 %19, 2147450880
+ %21 = inttoptr i64 %20 to i8*
+ %22 = load i8* %21
+ %23 = icmp ne i8 %22, 0
+ call void @llvm.dbg.declare(metadata !{i32* %8}, metadata !12)
+ br i1 %23, label %24, label %30
+
+; <label>:24 ; preds = %5
+ %25 = and i64 %18, 7
+ %26 = add i64 %25, 3
+ %27 = trunc i64 %26 to i8
+ %28 = icmp sge i8 %27, %22
+ br i1 %28, label %29, label %30
+
+; <label>:29 ; preds = %24
+ call void @__asan_report_store4(i64 %18)
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:30 ; preds = %24, %5
+ store i32 %y, i32* %8, align 4
+ %31 = ptrtoint i32* %8 to i64, !dbg !13
+ %32 = lshr i64 %31, 3, !dbg !13
+ %33 = add i64 %32, 2147450880, !dbg !13
+ %34 = inttoptr i64 %33 to i8*, !dbg !13
+ %35 = load i8* %34, !dbg !13
+ %36 = icmp ne i8 %35, 0, !dbg !13
+ br i1 %36, label %37, label %43, !dbg !13
+
+; <label>:37 ; preds = %30
+ %38 = and i64 %31, 7, !dbg !13
+ %39 = add i64 %38, 3, !dbg !13
+ %40 = trunc i64 %39 to i8, !dbg !13
+ %41 = icmp sge i8 %40, %35, !dbg !13
+ br i1 %41, label %42, label %43
+
+; <label>:42 ; preds = %37
+ call void @__asan_report_load4(i64 %31), !dbg !13
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:43 ; preds = %37, %30
+ %44 = load i32* %8, align 4, !dbg !13
+ %add = add nsw i32 %44, 2, !dbg !13
+ store i64 1172321806, i64* %9, !dbg !13
+ %45 = icmp ne i64 %6, %0, !dbg !13
+ br i1 %45, label %46, label %53, !dbg !13
+
+; <label>:46 ; preds = %43
+ %47 = add i64 %15, 0, !dbg !13
+ %48 = inttoptr i64 %47 to i64*, !dbg !13
+ store i64 -723401728380766731, i64* %48, !dbg !13
+ %49 = add i64 %6, 56, !dbg !13
+ %50 = inttoptr i64 %49 to i64*, !dbg !13
+ %51 = load i64* %50, !dbg !13
+ %52 = inttoptr i64 %51 to i8*, !dbg !13
+ store i8 0, i8* %52, !dbg !13
+ br label %56, !dbg !13
+
+; <label>:53 ; preds = %43
+ %54 = add i64 %15, 0, !dbg !13
+ %55 = inttoptr i64 %54 to i64*, !dbg !13
+ store i64 0, i64* %55, !dbg !13
+ br label %56, !dbg !13
+
+; <label>:56 ; preds = %53, %46
+ ret i32 %add, !dbg !13
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+define internal void @asan.module_ctor() {
+ call void @__asan_init_v3()
+ ret void
+}
+
+declare void @__asan_init_v3()
+
+declare void @__asan_report_load4(i64)
+
+declare void @__asan_report_store4(i64)
+
+declare i64 @__asan_stack_malloc_0(i64, i64)
+
+attributes #0 = { nounwind sanitize_address uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (209308)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/llvm_cmake_gcc/test.cc] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"test.cc", metadata !"/llvm_cmake_gcc"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"_Z3bari", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @_Z3bari, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [bar]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/llvm_cmake_gcc/test.cc]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5.0 (209308)"}
+!12 = metadata !{i32 786689, metadata !4, metadata !"y", metadata !5, i32 16777217, metadata !8, i32 0, i32 0, metadata !14} ; [ DW_TAG_arg_variable ] [y] [line 1]
+!13 = metadata !{i32 2, i32 0, metadata !4, null}
+!14 = metadata !{i64 2}
diff --git a/test/DebugInfo/X86/debug-loc-offset.ll b/test/DebugInfo/X86/debug-loc-offset.ll
new file mode 100644
index 000000000000..7866d0eac5c9
--- /dev/null
+++ b/test/DebugInfo/X86/debug-loc-offset.ll
@@ -0,0 +1,153 @@
+; RUN: llc %s -filetype=obj -O0 -mtriple=i386-unknown-linux-gnu -dwarf-version=4 -o %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; From the code:
+
+; debug-loc-offset1.cc
+; int bar (int b) {
+; return b+4;
+; }
+
+; debug-loc-offset2.cc
+; struct A {
+; int var;
+; virtual char foo();
+; };
+
+; void baz(struct A a) {
+; int z = 2;
+; if (a.var > 2)
+; z++;
+; if (a.foo() == 'a')
+; z++;
+; }
+
+; Compiled separately for i386-pc-linux-gnu and linked together.
+; This ensures that we have multiple compile units so that we can verify that
+; debug_loc entries are relative to the low_pc of the CU. The loc entry for
+; the byval argument in foo.cpp is in the second CU and so should have
+; an offset relative to that CU rather than from the beginning of the text
+; section.
+
+; Checking that we have two compile units with two sets of high/lo_pc.
+; CHECK: .debug_info contents
+; CHECK: DW_TAG_compile_unit
+; CHECK: DW_AT_low_pc
+; CHECK: DW_AT_high_pc
+
+; CHECK: DW_TAG_compile_unit
+; CHECK: DW_AT_low_pc
+; CHECK: DW_AT_high_pc
+
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name [DW_FORM_strp]{{.*}}"_Z3baz1A"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_location [DW_FORM_sec_offset] (0x00000000)
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name [DW_FORM_strp]{{.*}}"a"
+
+; CHECK: DW_TAG_variable
+; CHECK: DW_AT_location [DW_FORM_exprloc]
+; CHECK-NOT: DW_AT_location
+
+; CHECK: .debug_loc contents:
+; CHECK: 0x00000000: Beginning address offset: 0x0000000000000000
+; CHECK: Ending address offset: 0x000000000000001a
+
+%struct.A = type { i32 (...)**, i32 }
+
+; Function Attrs: nounwind
+define i32 @_Z3bari(i32 %b) #0 {
+entry:
+ %b.addr = alloca i32, align 4
+ store i32 %b, i32* %b.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %b.addr}, metadata !21), !dbg !22
+ %0 = load i32* %b.addr, align 4, !dbg !23
+ %add = add nsw i32 %0, 4, !dbg !23
+ ret i32 %add, !dbg !23
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+define void @_Z3baz1A(%struct.A* %a) #2 {
+entry:
+ %z = alloca i32, align 4
+ call void @llvm.dbg.declare(metadata !{%struct.A* %a}, metadata !24), !dbg !25
+ call void @llvm.dbg.declare(metadata !{i32* %z}, metadata !26), !dbg !27
+ store i32 2, i32* %z, align 4, !dbg !27
+ %var = getelementptr inbounds %struct.A* %a, i32 0, i32 1, !dbg !28
+ %0 = load i32* %var, align 4, !dbg !28
+ %cmp = icmp sgt i32 %0, 2, !dbg !28
+ br i1 %cmp, label %if.then, label %if.end, !dbg !28
+
+if.then: ; preds = %entry
+ %1 = load i32* %z, align 4, !dbg !30
+ %inc = add nsw i32 %1, 1, !dbg !30
+ store i32 %inc, i32* %z, align 4, !dbg !30
+ br label %if.end, !dbg !30
+
+if.end: ; preds = %if.then, %entry
+ %call = call signext i8 @_ZN1A3fooEv(%struct.A* %a), !dbg !31
+ %conv = sext i8 %call to i32, !dbg !31
+ %cmp1 = icmp eq i32 %conv, 97, !dbg !31
+ br i1 %cmp1, label %if.then2, label %if.end4, !dbg !31
+
+if.then2: ; preds = %if.end
+ %2 = load i32* %z, align 4, !dbg !33
+ %inc3 = add nsw i32 %2, 1, !dbg !33
+ store i32 %inc3, i32* %z, align 4, !dbg !33
+ br label %if.end4, !dbg !33
+
+if.end4: ; preds = %if.then2, %if.end
+ ret void, !dbg !34
+}
+
+declare signext i8 @_ZN1A3fooEv(%struct.A*) #2
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0, !9}
+!llvm.module.flags = !{!18, !19}
+!llvm.ident = !{!20, !20}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (210479)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/llvm_cmake_gcc/debug-loc-offset1.cc] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"debug-loc-offset1.cc", metadata !"/llvm_cmake_gcc"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"_Z3bari", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @_Z3bari, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [bar]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/llvm_cmake_gcc/debug-loc-offset1.cc]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786449, metadata !10, i32 4, metadata !"clang version 3.5.0 (210479)", i1 false, metadata !"", i32 0, metadata !2, metadata !11, metadata !13, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/llvm_cmake_gcc/debug-loc-offset2.cc] [DW_LANG_C_plus_plus]
+!10 = metadata !{metadata !"debug-loc-offset2.cc", metadata !"/llvm_cmake_gcc"}
+!11 = metadata !{metadata !12}
+!12 = metadata !{i32 786451, metadata !10, null, metadata !"A", i32 1, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, metadata !"_ZTS1A"} ; [ DW_TAG_structure_type ] [A] [line 1, size 0, align 0, offset 0] [decl] [from ]
+!13 = metadata !{metadata !14}
+!14 = metadata !{i32 786478, metadata !10, metadata !15, metadata !"baz", metadata !"baz", metadata !"_Z3baz1A", i32 6, metadata !16, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.A*)* @_Z3baz1A, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [baz]
+!15 = metadata !{i32 786473, metadata !10} ; [ DW_TAG_file_type ] [/llvm_cmake_gcc/debug-loc-offset2.cc]
+!16 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !17, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!17 = metadata !{null, metadata !12}
+!18 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!19 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!20 = metadata !{metadata !"clang version 3.5.0 (210479)"}
+!21 = metadata !{i32 786689, metadata !4, metadata !"b", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [b] [line 1]
+!22 = metadata !{i32 1, i32 0, metadata !4, null}
+!23 = metadata !{i32 2, i32 0, metadata !4, null}
+!24 = metadata !{i32 786689, metadata !14, metadata !"a", metadata !15, i32 16777222, metadata !"_ZTS1A", i32 8192, i32 0} ; [ DW_TAG_arg_variable ] [a] [line 6]
+!25 = metadata !{i32 6, i32 0, metadata !14, null}
+!26 = metadata !{i32 786688, metadata !14, metadata !"z", metadata !15, i32 7, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [z] [line 7]
+!27 = metadata !{i32 7, i32 0, metadata !14, null}
+!28 = metadata !{i32 8, i32 0, metadata !29, null} ; [ DW_TAG_imported_declaration ]
+!29 = metadata !{i32 786443, metadata !10, metadata !14, i32 8, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/llvm_cmake_gcc/debug-loc-offset2.cc]
+!30 = metadata !{i32 9, i32 0, metadata !29, null}
+!31 = metadata !{i32 10, i32 0, metadata !32, null}
+!32 = metadata !{i32 786443, metadata !10, metadata !14, i32 10, i32 0, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/llvm_cmake_gcc/debug-loc-offset2.cc]
+!33 = metadata !{i32 11, i32 0, metadata !32, null}
+!34 = metadata !{i32 12, i32 0, metadata !14, null}
diff --git a/test/DebugInfo/X86/debug-ranges-offset.ll b/test/DebugInfo/X86/debug-ranges-offset.ll
new file mode 100644
index 000000000000..365ba171a0d9
--- /dev/null
+++ b/test/DebugInfo/X86/debug-ranges-offset.ll
@@ -0,0 +1,241 @@
+; RUN: llc -filetype=obj -mtriple=x86_64-pc-linux-gnu %s -o %t
+; RUN: llvm-readobj --relocations %t | FileCheck %s
+
+; Check that we don't have any relocations in the ranges section -
+; to show that we're producing this as a relative offset to the
+; low_pc for the compile unit.
+; CHECK-NOT: .rela.debug_ranges
+
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 0, void ()* @__msan_init }]
+@str = private unnamed_addr constant [4 x i8] c"zzz\00"
+@__msan_retval_tls = external thread_local(initialexec) global [8 x i64]
+@__msan_retval_origin_tls = external thread_local(initialexec) global i32
+@__msan_param_tls = external thread_local(initialexec) global [1000 x i64]
+@__msan_param_origin_tls = external thread_local(initialexec) global [1000 x i32]
+@__msan_va_arg_tls = external thread_local(initialexec) global [1000 x i64]
+@__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
+@__msan_origin_tls = external thread_local(initialexec) global i32
+@__executable_start = external hidden global i32
+@_end = external hidden global i32
+
+; Function Attrs: sanitize_memory uwtable
+define void @_Z1fv() #0 {
+entry:
+ %p = alloca i32*, align 8
+ %0 = ptrtoint i32** %p to i64, !dbg !19
+ %1 = and i64 %0, -70368744177672, !dbg !19
+ %2 = inttoptr i64 %1 to i64*, !dbg !19
+ store i64 -1, i64* %2, align 8, !dbg !19
+ store i64 0, i64* getelementptr inbounds ([1000 x i64]* @__msan_param_tls, i64 0, i64 0), align 8, !dbg !19
+ store i64 0, i64* getelementptr inbounds ([8 x i64]* @__msan_retval_tls, i64 0, i64 0), align 8, !dbg !19
+ %call = call i8* @_Znwm(i64 4) #4, !dbg !19
+ %_msret = load i64* getelementptr inbounds ([8 x i64]* @__msan_retval_tls, i64 0, i64 0), align 8, !dbg !19
+ %3 = bitcast i8* %call to i32*, !dbg !19
+ tail call void @llvm.dbg.value(metadata !{i32* %3}, i64 0, metadata !9), !dbg !19
+ %4 = inttoptr i64 %1 to i64*, !dbg !19
+ store i64 %_msret, i64* %4, align 8, !dbg !19
+ store volatile i32* %3, i32** %p, align 8, !dbg !19
+ tail call void @llvm.dbg.value(metadata !{i32** %p}, i64 0, metadata !9), !dbg !19
+ %p.0.p.0. = load volatile i32** %p, align 8, !dbg !20
+ %_msld = load i64* %4, align 8, !dbg !20
+ %_mscmp = icmp eq i64 %_msld, 0, !dbg !20
+ br i1 %_mscmp, label %6, label %5, !dbg !20, !prof !22
+
+; <label>:5 ; preds = %entry
+ call void @__msan_warning_noreturn(), !dbg !20
+ call void asm sideeffect "", ""() #3, !dbg !20
+ unreachable, !dbg !20
+
+; <label>:6 ; preds = %entry
+ %7 = load i32* %p.0.p.0., align 4, !dbg !20, !tbaa !23
+ %8 = ptrtoint i32* %p.0.p.0. to i64, !dbg !20
+ %9 = and i64 %8, -70368744177665, !dbg !20
+ %10 = inttoptr i64 %9 to i32*, !dbg !20
+ %_msld2 = load i32* %10, align 4, !dbg !20
+ %11 = icmp ne i32 %_msld2, 0, !dbg !20
+ %12 = xor i32 %_msld2, -1, !dbg !20
+ %13 = and i32 %7, %12, !dbg !20
+ %14 = icmp eq i32 %13, 0, !dbg !20
+ %_msprop_icmp = and i1 %11, %14, !dbg !20
+ br i1 %_msprop_icmp, label %15, label %16, !dbg !20, !prof !27
+
+; <label>:15 ; preds = %6
+ call void @__msan_warning_noreturn(), !dbg !20
+ call void asm sideeffect "", ""() #3, !dbg !20
+ unreachable, !dbg !20
+
+; <label>:16 ; preds = %6
+ %tobool = icmp eq i32 %7, 0, !dbg !20
+ br i1 %tobool, label %if.end, label %if.then, !dbg !20
+
+if.then: ; preds = %16
+ store i64 0, i64* getelementptr inbounds ([1000 x i64]* @__msan_param_tls, i64 0, i64 0), align 8, !dbg !28
+ store i32 0, i32* bitcast ([8 x i64]* @__msan_retval_tls to i32*), align 8, !dbg !28
+ %puts = call i32 @puts(i8* getelementptr inbounds ([4 x i8]* @str, i64 0, i64 0)), !dbg !28
+ br label %if.end, !dbg !28
+
+if.end: ; preds = %16, %if.then
+ ret void, !dbg !29
+}
+
+; Function Attrs: nobuiltin
+declare i8* @_Znwm(i64) #1
+
+; Function Attrs: sanitize_memory uwtable
+define i32 @main() #0 {
+entry:
+ %p.i = alloca i32*, align 8
+ %0 = ptrtoint i32** %p.i to i64, !dbg !30
+ %1 = and i64 %0, -70368744177672, !dbg !30
+ %2 = inttoptr i64 %1 to i64*, !dbg !30
+ store i64 -1, i64* %2, align 8, !dbg !30
+ %p.i.0..sroa_cast = bitcast i32** %p.i to i8*, !dbg !30
+ call void @llvm.lifetime.start(i64 8, i8* %p.i.0..sroa_cast), !dbg !30
+ store i64 0, i64* getelementptr inbounds ([1000 x i64]* @__msan_param_tls, i64 0, i64 0), align 8, !dbg !30
+ store i64 0, i64* getelementptr inbounds ([8 x i64]* @__msan_retval_tls, i64 0, i64 0), align 8, !dbg !30
+ %call.i = call i8* @_Znwm(i64 4) #4, !dbg !30
+ %_msret = load i64* getelementptr inbounds ([8 x i64]* @__msan_retval_tls, i64 0, i64 0), align 8, !dbg !30
+ %3 = bitcast i8* %call.i to i32*, !dbg !30
+ tail call void @llvm.dbg.value(metadata !{i32* %3}, i64 0, metadata !32), !dbg !30
+ %4 = inttoptr i64 %1 to i64*, !dbg !30
+ store i64 %_msret, i64* %4, align 8, !dbg !30
+ store volatile i32* %3, i32** %p.i, align 8, !dbg !30
+ tail call void @llvm.dbg.value(metadata !{i32** %p.i}, i64 0, metadata !32), !dbg !30
+ %p.i.0.p.0.p.0..i = load volatile i32** %p.i, align 8, !dbg !33
+ %_msld = load i64* %4, align 8, !dbg !33
+ %_mscmp = icmp eq i64 %_msld, 0, !dbg !33
+ br i1 %_mscmp, label %6, label %5, !dbg !33, !prof !22
+
+; <label>:5 ; preds = %entry
+ call void @__msan_warning_noreturn(), !dbg !33
+ call void asm sideeffect "", ""() #3, !dbg !33
+ unreachable, !dbg !33
+
+; <label>:6 ; preds = %entry
+ %7 = load i32* %p.i.0.p.0.p.0..i, align 4, !dbg !33, !tbaa !23
+ %8 = ptrtoint i32* %p.i.0.p.0.p.0..i to i64, !dbg !33
+ %9 = and i64 %8, -70368744177665, !dbg !33
+ %10 = inttoptr i64 %9 to i32*, !dbg !33
+ %_msld2 = load i32* %10, align 4, !dbg !33
+ %11 = icmp ne i32 %_msld2, 0, !dbg !33
+ %12 = xor i32 %_msld2, -1, !dbg !33
+ %13 = and i32 %7, %12, !dbg !33
+ %14 = icmp eq i32 %13, 0, !dbg !33
+ %_msprop_icmp = and i1 %11, %14, !dbg !33
+ br i1 %_msprop_icmp, label %15, label %16, !dbg !33, !prof !27
+
+; <label>:15 ; preds = %6
+ call void @__msan_warning_noreturn(), !dbg !33
+ call void asm sideeffect "", ""() #3, !dbg !33
+ unreachable, !dbg !33
+
+; <label>:16 ; preds = %6
+ %tobool.i = icmp eq i32 %7, 0, !dbg !33
+ br i1 %tobool.i, label %_Z1fv.exit, label %if.then.i, !dbg !33
+
+if.then.i: ; preds = %16
+ store i64 0, i64* getelementptr inbounds ([1000 x i64]* @__msan_param_tls, i64 0, i64 0), align 8, !dbg !34
+ store i32 0, i32* bitcast ([8 x i64]* @__msan_retval_tls to i32*), align 8, !dbg !34
+ %puts.i = call i32 @puts(i8* getelementptr inbounds ([4 x i8]* @str, i64 0, i64 0)), !dbg !34
+ br label %_Z1fv.exit, !dbg !34
+
+_Z1fv.exit: ; preds = %16, %if.then.i
+ call void @llvm.lifetime.end(i64 8, i8* %p.i.0..sroa_cast), !dbg !35
+ store i32 0, i32* bitcast ([8 x i64]* @__msan_retval_tls to i32*), align 8, !dbg !36
+ ret i32 0, !dbg !36
+}
+
+declare void @__msan_init()
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
+
+; Function Attrs: nounwind
+declare i32 @puts(i8* nocapture readonly) #3
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture) #3
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #3
+
+declare void @__msan_warning_noreturn()
+
+declare void @__msan_maybe_warning_1(i8, i32)
+
+declare void @__msan_maybe_store_origin_1(i8, i8*, i32)
+
+declare void @__msan_maybe_warning_2(i16, i32)
+
+declare void @__msan_maybe_store_origin_2(i16, i8*, i32)
+
+declare void @__msan_maybe_warning_4(i32, i32)
+
+declare void @__msan_maybe_store_origin_4(i32, i8*, i32)
+
+declare void @__msan_maybe_warning_8(i64, i32)
+
+declare void @__msan_maybe_store_origin_8(i64, i8*, i32)
+
+declare void @__msan_set_alloca_origin4(i8*, i64, i8*, i64)
+
+declare void @__msan_poison_stack(i8*, i64)
+
+declare i32 @__msan_chain_origin(i32)
+
+declare i8* @__msan_memmove(i8*, i8*, i64)
+
+declare i8* @__msan_memcpy(i8*, i8*, i64)
+
+declare i8* @__msan_memset(i8*, i32, i64)
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3
+
+attributes #0 = { sanitize_memory uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nobuiltin "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
+attributes #4 = { builtin }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!16, !17}
+!llvm.ident = !{!18}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (trunk 207243) (llvm/trunk 207259)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/foo.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"foo.cpp", metadata !"/usr/local/google/home/echristo/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !13}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f", metadata !"f", metadata !"_Z1fv", i32 3, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @_Z1fv, null, null, metadata !8, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [f]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/tmp/foo.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{metadata !9}
+!9 = metadata !{i32 786688, metadata !4, metadata !"p", metadata !5, i32 4, metadata !10, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [p] [line 4]
+!10 = metadata !{i32 786485, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !11} ; [ DW_TAG_volatile_type ] [line 0, size 0, align 0, offset 0] [from ]
+!11 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !12} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!12 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!13 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 9, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @main, null, null, metadata !2, i32 9} ; [ DW_TAG_subprogram ] [line 9] [def] [main]
+!14 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!15 = metadata !{metadata !12}
+!16 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!17 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!18 = metadata !{metadata !"clang version 3.5.0 (trunk 207243) (llvm/trunk 207259)"}
+!19 = metadata !{i32 4, i32 0, metadata !4, null}
+!20 = metadata !{i32 5, i32 0, metadata !21, null}
+!21 = metadata !{i32 786443, metadata !1, metadata !4, i32 5, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/foo.cpp]
+!22 = metadata !{metadata !"branch_weights", i32 1000, i32 1}
+!23 = metadata !{metadata !24, metadata !24, i64 0}
+!24 = metadata !{metadata !"int", metadata !25, i64 0}
+!25 = metadata !{metadata !"omnipotent char", metadata !26, i64 0}
+!26 = metadata !{metadata !"Simple C/C++ TBAA"}
+!27 = metadata !{metadata !"branch_weights", i32 1, i32 1000}
+!28 = metadata !{i32 6, i32 0, metadata !21, null}
+!29 = metadata !{i32 7, i32 0, metadata !4, null}
+!30 = metadata !{i32 4, i32 0, metadata !4, metadata !31}
+!31 = metadata !{i32 10, i32 0, metadata !13, null}
+!32 = metadata !{i32 786688, metadata !4, metadata !"p", metadata !5, i32 4, metadata !10, i32 0, metadata !31} ; [ DW_TAG_auto_variable ] [p] [line 4]
+!33 = metadata !{i32 5, i32 0, metadata !21, metadata !31}
+!34 = metadata !{i32 6, i32 0, metadata !21, metadata !31}
+!35 = metadata !{i32 7, i32 0, metadata !4, metadata !31}
+!36 = metadata !{i32 11, i32 0, metadata !13, null}
diff --git a/test/DebugInfo/X86/decl-derived-member.ll b/test/DebugInfo/X86/decl-derived-member.ll
new file mode 100644
index 000000000000..4035602fb25f
--- /dev/null
+++ b/test/DebugInfo/X86/decl-derived-member.ll
@@ -0,0 +1,144 @@
+; REQUIRES: object-emission
+
+; RUN: llc -mtriple x86_64-pc-linux -O0 -filetype=obj %s -o %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; Testcase from:
+; struct base {
+; virtual ~base();
+; };
+; struct foo {
+; base b;
+; };
+; foo f;
+
+; Where member b should be seen as a field at an offset and not a bitfield.
+
+; CHECK: DW_TAG_member
+; CHECK: DW_AT_name{{.*}}"b"
+; CHECK-NOT: DW_AT_bit_offset
+
+%struct.foo = type { %struct.base }
+%struct.base = type { i32 (...)** }
+@f = global %struct.foo zeroinitializer, align 8
+@__dso_handle = external global i8
+@_ZTV4base = external unnamed_addr constant [4 x i8*]
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
+
+define internal void @__cxx_global_var_init() section ".text.startup" {
+entry:
+ call void @_ZN3fooC2Ev(%struct.foo* @f) #2, !dbg !35
+ %0 = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.foo*)* @_ZN3fooD2Ev to void (i8*)*), i8* bitcast (%struct.foo* @f to i8*), i8* @__dso_handle) #2, !dbg !35
+ ret void, !dbg !35
+}
+
+; Function Attrs: inlinehint nounwind uwtable
+define linkonce_odr void @_ZN3fooC2Ev(%struct.foo* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %struct.foo*, align 8
+ store %struct.foo* %this, %struct.foo** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.foo** %this.addr}, metadata !36), !dbg !38
+ %this1 = load %struct.foo** %this.addr
+ %b = getelementptr inbounds %struct.foo* %this1, i32 0, i32 0, !dbg !39
+ call void @_ZN4baseC2Ev(%struct.base* %b) #2, !dbg !39
+ ret void, !dbg !39
+}
+
+; Function Attrs: inlinehint uwtable
+define linkonce_odr void @_ZN3fooD2Ev(%struct.foo* %this) unnamed_addr #1 align 2 {
+entry:
+ %this.addr = alloca %struct.foo*, align 8
+ store %struct.foo* %this, %struct.foo** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.foo** %this.addr}, metadata !40), !dbg !41
+ %this1 = load %struct.foo** %this.addr
+ %b = getelementptr inbounds %struct.foo* %this1, i32 0, i32 0, !dbg !42
+ call void @_ZN4baseD1Ev(%struct.base* %b), !dbg !42
+ ret void, !dbg !44
+}
+
+; Function Attrs: nounwind
+declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*) #2
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #3
+
+declare void @_ZN4baseD1Ev(%struct.base*) #4
+
+; Function Attrs: inlinehint nounwind uwtable
+define linkonce_odr void @_ZN4baseC2Ev(%struct.base* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %struct.base*, align 8
+ store %struct.base* %this, %struct.base** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.base** %this.addr}, metadata !45), !dbg !47
+ %this1 = load %struct.base** %this.addr
+ %0 = bitcast %struct.base* %this1 to i8***, !dbg !48
+ store i8** getelementptr inbounds ([4 x i8*]* @_ZTV4base, i64 0, i64 2), i8*** %0, !dbg !48
+ ret void, !dbg !48
+}
+
+define internal void @_GLOBAL__I_a() section ".text.startup" {
+entry:
+ call void @__cxx_global_var_init(), !dbg !49
+ ret void, !dbg !49
+}
+
+attributes #0 = { inlinehint nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { inlinehint uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
+attributes #3 = { nounwind readnone }
+attributes #4 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!32, !33}
+!llvm.ident = !{!34}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (trunk 203673) (llvm/trunk 203681)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !8, metadata !30, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/foo.cc] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"foo.cc", metadata !"/usr/local/google/home/echristo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !7}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"foo", i32 5, i64 64, i64 64, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS3foo"} ; [ DW_TAG_structure_type ] [foo] [line 5, size 64, align 64, offset 0] [def] [from ]
+!5 = metadata !{metadata !6}
+!6 = metadata !{i32 786445, metadata !1, metadata !"_ZTS3foo", metadata !"b", i32 6, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS4base"} ; [ DW_TAG_member ] [b] [line 6, size 64, align 64, offset 0] [from _ZTS4base]
+!7 = metadata !{i32 786451, metadata !1, null, metadata !"base", i32 1, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, metadata !"_ZTS4base"} ; [ DW_TAG_structure_type ] [base] [line 1, size 0, align 0, offset 0] [decl] [from ]
+!8 = metadata !{metadata !9, metadata !13, metadata !19, metadata !22, metadata !28}
+!9 = metadata !{i32 786478, metadata !1, metadata !10, metadata !"__cxx_global_var_init", metadata !"__cxx_global_var_init", metadata !"", i32 9, metadata !11, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @__cxx_global_var_init, null, null, metadata !2, i32 9} ; [ DW_TAG_subprogram ] [line 9] [local] [def] [__cxx_global_var_init]
+!10 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/foo.cc]
+!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = metadata !{null}
+!13 = metadata !{i32 786478, metadata !1, metadata !"_ZTS3foo", metadata !"~foo", metadata !"~foo", metadata !"_ZN3fooD2Ev", i32 5, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 320, i1 false, void (%struct.foo*)* @_ZN3fooD2Ev, null, metadata !17, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [~foo]
+!14 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!15 = metadata !{null, metadata !16}
+!16 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS3foo"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS3foo]
+!17 = metadata !{i32 786478, null, metadata !"_ZTS3foo", metadata !"~foo", metadata !"~foo", metadata !"", i32 0, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i32 320, i1 false, null, null, i32 0, metadata !18, i32 0} ; [ DW_TAG_subprogram ] [line 0] [~foo]
+!18 = metadata !{i32 786468}
+!19 = metadata !{i32 786478, metadata !1, metadata !"_ZTS3foo", metadata !"foo", metadata !"foo", metadata !"_ZN3fooC2Ev", i32 5, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 320, i1 false, void (%struct.foo*)* @_ZN3fooC2Ev, null, metadata !20, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [foo]
+!20 = metadata !{i32 786478, null, metadata !"_ZTS3foo", metadata !"foo", metadata !"foo", metadata !"", i32 0, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i32 320, i1 false, null, null, i32 0, metadata !21, i32 0} ; [ DW_TAG_subprogram ] [line 0] [foo]
+!21 = metadata !{i32 786468}
+!22 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4base", metadata !"base", metadata !"base", metadata !"_ZN4baseC2Ev", i32 1, metadata !23, i1 false, i1 true, i32 0, i32 0, null, i32 320, i1 false, void (%struct.base*)* @_ZN4baseC2Ev, null, metadata !26, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [base]
+!23 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !24, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!24 = metadata !{null, metadata !25}
+!25 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS4base"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS4base]
+!26 = metadata !{i32 786478, null, metadata !"_ZTS4base", metadata !"base", metadata !"base", metadata !"", i32 0, metadata !23, i1 false, i1 false, i32 0, i32 0, null, i32 320, i1 false, null, null, i32 0, metadata !27, i32 0} ; [ DW_TAG_subprogram ] [line 0] [base]
+!27 = metadata !{i32 786468}
+!28 = metadata !{i32 786478, metadata !1, metadata !10, metadata !"", metadata !"", metadata !"_GLOBAL__I_a", i32 1, metadata !29, i1 true, i1 true, i32 0, i32 0, null, i32 64, i1 false, void ()* @_GLOBAL__I_a, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [local] [def]
+!29 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!30 = metadata !{metadata !31}
+!31 = metadata !{i32 786484, i32 0, null, metadata !"f", metadata !"f", metadata !"", metadata !10, i32 9, metadata !4, i32 0, i32 1, %struct.foo* @f, null} ; [ DW_TAG_variable ] [f] [line 9] [def]
+!32 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!33 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!34 = metadata !{metadata !"clang version 3.5.0 (trunk 203673) (llvm/trunk 203681)"}
+!35 = metadata !{i32 9, i32 0, metadata !9, null}
+!36 = metadata !{i32 786689, metadata !19, metadata !"this", null, i32 16777216, metadata !37, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!37 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS3foo"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS3foo]
+!38 = metadata !{i32 0, i32 0, metadata !19, null}
+!39 = metadata !{i32 5, i32 0, metadata !19, null}
+!40 = metadata !{i32 786689, metadata !13, metadata !"this", null, i32 16777216, metadata !37, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!41 = metadata !{i32 0, i32 0, metadata !13, null}
+!42 = metadata !{i32 5, i32 0, metadata !43, null}
+!43 = metadata !{i32 786443, metadata !1, metadata !13, i32 5, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/foo.cc]
+!44 = metadata !{i32 5, i32 0, metadata !13, null}
+!45 = metadata !{i32 786689, metadata !22, metadata !"this", null, i32 16777216, metadata !46, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!46 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS4base"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS4base]
+!47 = metadata !{i32 0, i32 0, metadata !22, null}
+!48 = metadata !{i32 1, i32 0, metadata !22, null}
+!49 = metadata !{i32 1, i32 0, metadata !28, null}
diff --git a/test/DebugInfo/X86/discriminator.ll b/test/DebugInfo/X86/discriminator.ll
new file mode 100644
index 000000000000..aafdae1626d5
--- /dev/null
+++ b/test/DebugInfo/X86/discriminator.ll
@@ -0,0 +1,63 @@
+; RUN: llc -mtriple=i386-unknown-unknown -mcpu=core2 %s -o %t -filetype=obj
+; RUN: llvm-dwarfdump -debug-dump=line %t | FileCheck %s
+;
+; Generated from:
+;
+; int foo(int i) {
+; if (i < 10) return i - 1;
+; return 0;
+; }
+;
+; Manually generated debug nodes !14 and !15 to incorporate an
+; arbitrary discriminator with value 42.
+
+define i32 @foo(i32 %i) #0 {
+entry:
+ %retval = alloca i32, align 4
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32* %i.addr, align 4, !dbg !10
+ %cmp = icmp slt i32 %0, 10, !dbg !10
+ br i1 %cmp, label %if.then, label %if.end, !dbg !10
+
+if.then: ; preds = %entry
+ %1 = load i32* %i.addr, align 4, !dbg !14
+ %sub = sub nsw i32 %1, 1, !dbg !14
+ store i32 %sub, i32* %retval, !dbg !14
+ br label %return, !dbg !14
+
+if.end: ; preds = %entry
+ store i32 0, i32* %retval, !dbg !12
+ br label %return, !dbg !12
+
+return: ; preds = %if.end, %if.then
+ %2 = load i32* %retval, !dbg !13
+ ret i32 %2, !dbg !13
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [./discriminator.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"discriminator.c", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [./discriminator.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5 "}
+!10 = metadata !{i32 2, i32 0, metadata !11, null}
+!11 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./discriminator.c]
+!12 = metadata !{i32 3, i32 0, metadata !4, null}
+!13 = metadata !{i32 4, i32 0, metadata !4, null}
+!14 = metadata !{i32 2, i32 0, metadata !15, null}
+!15 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 42, i32 1} ; [ DW_TAG_lexical_block ] [./discriminator.c]
+
+; CHECK: Address Line Column File ISA Discriminator Flags
+; CHECK: ------------------ ------ ------ ------ --- ------------- -------------
+; CHECK: 0x0000000000000011 2 0 1 0 42 is_stmt
diff --git a/test/DebugInfo/X86/dwarf-aranges-no-dwarf-labels.ll b/test/DebugInfo/X86/dwarf-aranges-no-dwarf-labels.ll
index 42a57bfed725..021b89e1e1ca 100644
--- a/test/DebugInfo/X86/dwarf-aranges-no-dwarf-labels.ll
+++ b/test/DebugInfo/X86/dwarf-aranges-no-dwarf-labels.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -generate-arange-section < %s | FileCheck %s
; CHECK: .short 2 # DWARF Arange version number
; CHECK: # Segment Size
@@ -62,7 +62,7 @@ attributes #2 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (191881)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !17, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/debug_ranges/a.cc] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"tmp/debug_ranges/a.cc", metadata !"/"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !11, metadata !14}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"_Z3fooi", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @_Z3fooi, null, null, metadata !9, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [foo]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/debug_ranges/a.cc]
diff --git a/test/DebugInfo/X86/dwarf-aranges.ll b/test/DebugInfo/X86/dwarf-aranges.ll
index 203afc71d830..9ad618507a25 100644
--- a/test/DebugInfo/X86/dwarf-aranges.ll
+++ b/test/DebugInfo/X86/dwarf-aranges.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -generate-arange-section < %s | FileCheck %s
; -- header --
@@ -7,10 +7,7 @@
; CHECK-NEXT: .byte 8 # Address Size (in bytes)
; CHECK-NEXT: .byte 0 # Segment Size (in bytes)
; -- alignment --
-; CHECK-NEXT: .byte
-; CHECK-NEXT: .byte
-; CHECK-NEXT: .byte
-; CHECK-NEXT: .byte
+; CHECK-NEXT: .zero 4,255
; <common symbols> - it should have made one span for each symbol.
; CHECK-NEXT: .quad some_bss
@@ -18,18 +15,18 @@
; <data section> - it should have made one span covering all vars in this CU.
; CHECK-NEXT: .quad some_data
-; CHECK-NEXT: .Lset0 = .Ldebug_end1-some_data
-; CHECK-NEXT: .quad .Lset0
+; CHECK-NEXT: [[R1:\.[A-Za-z0-9]*]] = .Ldebug_end1-some_data
+; CHECK-NEXT: .quad [[R1]]
; <text section> - it should have made one span covering all functions in this CU.
; CHECK-NEXT: .quad .Lfunc_begin0
-; CHECK-NEXT: .Lset1 = .Ldebug_end2-.Lfunc_begin0
-; CHECK-NEXT: .quad .Lset1
+; CHECK-NEXT: [[R2:\.[A-Za-z0-9]*]] = .Ldebug_end2-.Lfunc_begin0
+; CHECK-NEXT: .quad [[R2]]
; <other sections> - it should have made one span covering all vars in this CU.
; CHECK-NEXT: .quad some_other
-; CHECK-NEXT: .Lset2 = .Ldebug_end3-some_other
-; CHECK-NEXT: .quad .Lset2
+; CHECK-NEXT: [[R3:\.[A-Za-z0-9]*]] = .Ldebug_end3-some_other
+; CHECK-NEXT: .quad [[R3]]
; -- finish --
; CHECK-NEXT: # ARange terminator
@@ -70,7 +67,7 @@ entry:
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !8, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/home/kayamon/test.c] [DW_LANG_C99]
!1 = metadata !{metadata !"test.c", metadata !"/home/kayamon"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"some_code", metadata !"some_code", metadata !"", i32 5, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, void ()* @some_code, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 5] [def] [scope 6] [some_code]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/home/kayamon/test.c]
diff --git a/test/DebugInfo/X86/dwarf-public-names.ll b/test/DebugInfo/X86/dwarf-public-names.ll
index d66e5a0c1cec..793971a5f89f 100644
--- a/test/DebugInfo/X86/dwarf-public-names.ll
+++ b/test/DebugInfo/X86/dwarf-public-names.ll
@@ -37,18 +37,20 @@
; Darwin shouldn't be generating the section by default
; DARWIN: debug_pubnames
-; DARWIN: unit_size = 0x00000000
+; DARWIN: {{^$}}
; Skip the output to the header of the pubnames section.
; LINUX: debug_pubnames
; Check for each name in the output.
-; LINUX: global_namespace_variable
-; LINUX: global_namespace_function
-; LINUX: static_member_function
-; LINUX: global_variable
-; LINUX: global_function
-; LINUX: member_function
+; LINUX-DAG: "ns"
+; LINUX-DAG: "C::static_member_function"
+; LINUX-DAG: "global_variable"
+; LINUX-DAG: "ns::global_namespace_variable"
+; LINUX-DAG: "ns::global_namespace_function"
+; LINUX-DAG: "global_function"
+; LINUX-DAG: "C::static_member_variable"
+; LINUX-DAG: "C::member_function"
%struct.C = type { i8 }
@@ -91,8 +93,8 @@ attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!38}
-!0 = metadata !{i32 786449, metadata !37, i32 4, metadata !"clang version 3.3 (http://llvm.org/git/clang.git a09cd8103a6a719cb2628cdf0c91682250a17bd2) (http://llvm.org/git/llvm.git 47d03cec0afca0c01ae42b82916d1d731716cd20)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !2, metadata !24, metadata !24, metadata !""} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/dwarf-public-names.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !37, i32 4, metadata !"clang version 3.3 (http://llvm.org/git/clang.git a09cd8103a6a719cb2628cdf0c91682250a17bd2) (http://llvm.org/git/llvm.git 47d03cec0afca0c01ae42b82916d1d731716cd20)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !2, metadata !24, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/dwarf-public-names.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{}
!2 = metadata !{metadata !3, metadata !18, metadata !19, metadata !20}
!3 = metadata !{i32 786478, metadata !4, null, metadata !"member_function", metadata !"member_function", metadata !"_ZN1C15member_functionEv", i32 9, metadata !5, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.C*)* @_ZN1C15member_functionEv, null, metadata !12, metadata !1, i32 9} ; [ DW_TAG_subprogram ] [line 9] [def] [member_function]
!4 = metadata !{i32 786473, metadata !37} ; [ DW_TAG_file_type ]
@@ -112,7 +114,7 @@ attributes #1 = { nounwind readnone }
!18 = metadata !{i32 786478, metadata !4, null, metadata !"static_member_function", metadata !"static_member_function", metadata !"_ZN1C22static_member_functionEv", i32 13, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_ZN1C22static_member_functionEv, null, metadata !14, metadata !1, i32 13} ; [ DW_TAG_subprogram ] [line 13] [def] [static_member_function]
!19 = metadata !{i32 786478, metadata !4, metadata !4, metadata !"global_function", metadata !"global_function", metadata !"_Z15global_functionv", i32 19, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z15global_functionv, null, null, metadata !1, i32 19} ; [ DW_TAG_subprogram ] [line 19] [def] [global_function]
!20 = metadata !{i32 786478, metadata !4, metadata !21, metadata !"global_namespace_function", metadata !"global_namespace_function", metadata !"_ZN2ns25global_namespace_functionEv", i32 24, metadata !22, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZN2ns25global_namespace_functionEv, null, null, metadata !1, i32 24} ; [ DW_TAG_subprogram ] [line 24] [def] [global_namespace_function]
-!21 = metadata !{i32 786489, null, metadata !"ns", metadata !4, i32 23} ; [ DW_TAG_namespace ] [/usr2/kparzysz/s.hex/t/dwarf-public-names.cpp]
+!21 = metadata !{i32 786489, metadata !4, null, metadata !"ns", i32 23} ; [ DW_TAG_namespace ] [/usr2/kparzysz/s.hex/t/dwarf-public-names.cpp]
!22 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !23, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!23 = metadata !{null}
!24 = metadata !{metadata !25, metadata !26, metadata !27}
diff --git a/test/DebugInfo/X86/dwarf-pubnames-split.ll b/test/DebugInfo/X86/dwarf-pubnames-split.ll
index 131e5aae51fa..65c46d368d1b 100644
--- a/test/DebugInfo/X86/dwarf-pubnames-split.ll
+++ b/test/DebugInfo/X86/dwarf-pubnames-split.ll
@@ -7,7 +7,7 @@
; Check that we get a symbol off of the debug_info section when using split dwarf and pubnames.
-; CHECK: .Lpubtypes_begin0:
+; CHECK: .LpubTypes_begin0:
; CHECK-NEXT: .short 2 # DWARF Version
; CHECK-NEXT: .long .L.debug_info_begin0 # Offset of Compilation Unit Info
@@ -26,7 +26,7 @@ attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 189287) (llvm/trunk 189296)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/foo.c] [DW_LANG_C99]
!1 = metadata !{metadata !"foo.c", metadata !"/usr/local/google/home/echristo/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [main]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/tmp/foo.c]
diff --git a/test/DebugInfo/X86/eh_symbol.ll b/test/DebugInfo/X86/eh_symbol.ll
deleted file mode 100644
index 172ca922302a..000000000000
--- a/test/DebugInfo/X86/eh_symbol.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc -mtriple=i386-apple-macosx -disable-cfi %s -o - | FileCheck %s
-
-; test that we don't produce foo.eh symbols is a debug_frame section.
-; CHECK-NOT: .globl _f.eh
-
-define i32 @f() nounwind readnone optsize {
-entry:
- ret i32 42
-}
-
-!llvm.dbg.cu = !{!2}
-!llvm.module.flags = !{!9}
-!llvm.dbg.sp = !{!0}
-
-!0 = metadata !{i32 589870, metadata !6, metadata !1, metadata !"f", metadata !"f", metadata !"", i32 1, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @f, null, null, null, i32 0} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 0] [f]
-!1 = metadata !{i32 589865, metadata !6} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 589841, metadata !6, i32 12, metadata !"clang version 3.0 ()", i1 true, metadata !"", i32 0, metadata !7, metadata !7, metadata !8, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{i32 589845, metadata !6, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{i32 589860, null, metadata !2, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"/home/espindola/llvm/test.c", metadata !"/home/espindola/tmpfs/build"}
-!7 = metadata !{i32 0}
-!8 = metadata !{metadata !0}
-!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/elf-names.ll b/test/DebugInfo/X86/elf-names.ll
index 7b38fde5d34e..36fd232a9045 100644
--- a/test/DebugInfo/X86/elf-names.ll
+++ b/test/DebugInfo/X86/elf-names.ll
@@ -63,7 +63,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.module.flags = !{!54}
!0 = metadata !{i32 786449, metadata !53, i32 4, metadata !"clang version 3.2 (trunk 167506) (llvm/trunk 167505)", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/foo.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !31}
!5 = metadata !{i32 786478, metadata !6, null, metadata !"D", metadata !"D", metadata !"_ZN1DC2Ev", i32 12, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (%class.D*)* @_ZN1DC2Ev, null, metadata !17, metadata !27, i32 12} ; [ DW_TAG_subprogram ] [line 12] [def] [D]
!6 = metadata !{i32 786473, metadata !53} ; [ DW_TAG_file_type ]
@@ -87,25 +87,23 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!24 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from D]
!25 = metadata !{metadata !26}
!26 = metadata !{i32 786468} ; [ DW_TAG_base_type ] [line 0, size 0, align 0, offset 0]
-!27 = metadata !{metadata !28}
-!28 = metadata !{metadata !29}
+!27 = metadata !{metadata !29}
!29 = metadata !{i32 786689, metadata !5, metadata !"this", metadata !6, i32 16777228, metadata !30, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 12]
!30 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from D]
!31 = metadata !{i32 786478, metadata !6, null, metadata !"D", metadata !"D", metadata !"_ZN1DC2ERKS_", i32 19, metadata !21, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (%class.D*, %class.D*)* @_ZN1DC2ERKS_, null, metadata !20, metadata !32, i32 19} ; [ DW_TAG_subprogram ] [line 19] [def] [D]
-!32 = metadata !{metadata !33}
-!33 = metadata !{metadata !34, metadata !35}
+!32 = metadata !{metadata !34, metadata !35}
!34 = metadata !{i32 786689, metadata !31, metadata !"this", metadata !6, i32 16777235, metadata !30, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 19]
!35 = metadata !{i32 786689, metadata !31, metadata !"d", metadata !6, i32 33554451, metadata !23, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [d] [line 19]
!36 = metadata !{i32 12, i32 0, metadata !5, null}
!37 = metadata !{i32 13, i32 0, metadata !38, null}
-!38 = metadata !{i32 786443, metadata !5, i32 12, i32 0, metadata !6, i32 0} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/foo.cpp]
+!38 = metadata !{i32 786443, metadata !6, metadata !5, i32 12, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/foo.cpp]
!42 = metadata !{i32 14, i32 0, metadata !38, null}
!43 = metadata !{i32 15, i32 0, metadata !38, null}
!44 = metadata !{i32 16, i32 0, metadata !38, null}
!45 = metadata !{i32 17, i32 0, metadata !38, null}
!46 = metadata !{i32 19, i32 0, metadata !31, null}
!47 = metadata !{i32 20, i32 0, metadata !48, null}
-!48 = metadata !{i32 786443, metadata !31, i32 19, i32 0, metadata !6, i32 1} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/foo.cpp]
+!48 = metadata !{i32 786443, metadata !6, metadata !31, i32 19, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/foo.cpp]
!49 = metadata !{i32 21, i32 0, metadata !48, null}
!50 = metadata !{i32 22, i32 0, metadata !48, null}
!51 = metadata !{i32 23, i32 0, metadata !48, null}
diff --git a/test/DebugInfo/X86/empty-and-one-elem-array.ll b/test/DebugInfo/X86/empty-and-one-elem-array.ll
index a3a08f0e3b91..f5c37df1e5e8 100644
--- a/test/DebugInfo/X86/empty-and-one-elem-array.ll
+++ b/test/DebugInfo/X86/empty-and-one-elem-array.ll
@@ -67,7 +67,7 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!llvm.module.flags = !{!33}
!0 = metadata !{i32 786449, metadata !32, i32 12, metadata !"clang version 3.3 (trunk 169136)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/Volumes/Sandbox/llvm/test.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"func", metadata !"func", metadata !"", i32 11, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @func, null, null, metadata !1, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [func]
!6 = metadata !{i32 786473, metadata !32} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/empty-array.ll b/test/DebugInfo/X86/empty-array.ll
index 24364676f8ad..3fab313fe0ef 100644
--- a/test/DebugInfo/X86/empty-array.ll
+++ b/test/DebugInfo/X86/empty-array.ll
@@ -21,14 +21,14 @@
; CHECK: [[BASETYPE]]: DW_TAG_base_type
; CHECK: [[BASE2]]: DW_TAG_base_type
; CHECK-NEXT: DW_AT_name
-; CHECK-NEXT: DW_AT_byte_size [DW_FORM_data1] (0x04)
-; CHECK-NEXT: DW_AT_encoding [DW_FORM_data1] (0x05)
+; CHECK-NEXT: DW_AT_byte_size [DW_FORM_data1] (0x08)
+; CHECK-NEXT: DW_AT_encoding [DW_FORM_data1] (0x07)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!21}
-!0 = metadata !{i32 786449, metadata !20, i32 4, metadata !"clang version 3.3 (trunk 169136)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ] [/Volumes/Sandbox/llvm/t.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !20, i32 4, metadata !"clang version 3.3 (trunk 169136)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/Volumes/Sandbox/llvm/t.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 1, metadata !7, i32 0, i32 1, %class.A* @a, null} ; [ DW_TAG_variable ] [a] [line 1] [def]
!6 = metadata !{i32 786473, metadata !20} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/ending-run.ll b/test/DebugInfo/X86/ending-run.ll
index ae17fd0c0ae8..165074e3002c 100644
--- a/test/DebugInfo/X86/ending-run.ll
+++ b/test/DebugInfo/X86/ending-run.ll
@@ -1,12 +1,12 @@
-; RUN: llc -mtriple=x86_64-apple-darwin %s -o %t -filetype=obj
+; RUN: llc -mtriple=x86_64-apple-darwin -filetype=obj %s -o %t
; RUN: llvm-dwarfdump -debug-dump=line %t | FileCheck %s
; Check that the line table starts at 7, not 4, but that the first
; statement isn't until line 8.
-; CHECK-NOT: 0x0000000000000000 7 0 1 0 is_stmt
+; CHECK-NOT: 0x0000000000000000 7 0 1 0 0 is_stmt
; CHECK: 0x0000000000000000 7 0 1 0
-; CHECK: 0x0000000000000004 8 18 1 0 is_stmt prologue_end
+; CHECK: 0x0000000000000004 8 18 1 0 0 is_stmt prologue_end
define i32 @callee(i32 %x) nounwind uwtable ssp {
entry:
@@ -30,15 +30,13 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!llvm.module.flags = !{!20}
!0 = metadata !{i32 786449, metadata !19, i32 12, metadata !"clang version 3.1 (trunk 153921) (llvm/trunk 153916)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
-!5 = metadata !{i32 786478, metadata !19, metadata !6, metadata !"callee", metadata !"callee", metadata !"", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 (i32)* @callee, null, null, metadata !10, i32 7} ; [ DW_TAG_subprogram ]
+!5 = metadata !{i32 786478, metadata !19, metadata !6, metadata !"callee", metadata !"callee", metadata !"", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 (i32)* @callee, null, null, null, i32 7} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !19} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{metadata !9, metadata !9}
!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!10 = metadata !{metadata !11}
-!11 = metadata !{i32 786468} ; [ DW_TAG_base_type ]
!12 = metadata !{i32 786689, metadata !5, metadata !"x", metadata !6, i32 16777221, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!13 = metadata !{i32 5, i32 5, metadata !5, null}
!14 = metadata !{i32 786688, metadata !15, metadata !"y", metadata !6, i32 8, metadata !9, i32 0, i32 0} ; [ DW_TAG_auto_variable ]
diff --git a/test/DebugInfo/X86/enum-class.ll b/test/DebugInfo/X86/enum-class.ll
index a31e254c24c8..23ffbcc8f782 100644
--- a/test/DebugInfo/X86/enum-class.ll
+++ b/test/DebugInfo/X86/enum-class.ll
@@ -8,7 +8,7 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!23}
-!0 = metadata !{i32 786449, metadata !22, i32 4, metadata !"clang version 3.2 (trunk 157269) (llvm/trunk 157264)", i1 false, metadata !"", i32 0, metadata !1, metadata !15, metadata !15, metadata !17, metadata !17, metadata !""} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, metadata !22, i32 4, metadata !"clang version 3.2 (trunk 157269) (llvm/trunk 157264)", i1 false, metadata !"", i32 0, metadata !1, metadata !15, metadata !15, metadata !17, metadata !15, metadata !""} ; [ DW_TAG_compile_unit ]
!1 = metadata !{metadata !3, metadata !8, metadata !12}
!3 = metadata !{i32 786436, metadata !4, null, metadata !"A", i32 1, i64 32, i64 32, i32 0, i32 0, metadata !5, metadata !6, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [A] [line 1, size 32, align 32, offset 0] [def] [from int]
!4 = metadata !{i32 786473, metadata !22} ; [ DW_TAG_file_type ]
@@ -22,7 +22,7 @@
!12 = metadata !{i32 786436, metadata !4, null, metadata !"C", i32 3, i64 32, i64 32, i32 0, i32 0, null, metadata !13, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [C] [line 3, size 32, align 32, offset 0] [def] [from ]
!13 = metadata !{metadata !14}
!14 = metadata !{i32 786472, metadata !"C1", i64 1} ; [ DW_TAG_enumerator ]
-!15 = metadata !{i32 0}
+!15 = metadata !{}
!17 = metadata !{metadata !19, metadata !20, metadata !21}
!19 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !4, i32 4, metadata !3, i32 0, i32 1, i32* @a, null} ; [ DW_TAG_variable ]
!20 = metadata !{i32 786484, i32 0, null, metadata !"b", metadata !"b", metadata !"", metadata !4, i32 5, metadata !8, i32 0, i32 1, i64* @b, null} ; [ DW_TAG_variable ]
diff --git a/test/DebugInfo/X86/enum-fwd-decl.ll b/test/DebugInfo/X86/enum-fwd-decl.ll
index 6bfb930cb6e7..adb962ea527e 100644
--- a/test/DebugInfo/X86/enum-fwd-decl.ll
+++ b/test/DebugInfo/X86/enum-fwd-decl.ll
@@ -6,8 +6,8 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!9}
-!0 = metadata !{i32 786449, metadata !8, i32 4, metadata !"clang version 3.2 (trunk 165274) (llvm/trunk 165272)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/foo.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !8, i32 4, metadata !"clang version 3.2 (trunk 165274) (llvm/trunk 165272)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/foo.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786484, i32 0, null, metadata !"e", metadata !"e", metadata !"", metadata !6, i32 2, metadata !7, i32 0, i32 1, i16* @e, null} ; [ DW_TAG_variable ] [e] [line 2] [def]
!6 = metadata !{i32 786473, metadata !8} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/fission-cu.ll b/test/DebugInfo/X86/fission-cu.ll
index 06408d708150..7f176986394c 100644
--- a/test/DebugInfo/X86/fission-cu.ll
+++ b/test/DebugInfo/X86/fission-cu.ll
@@ -1,14 +1,15 @@
; RUN: llc -split-dwarf=Enable -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s
; RUN: llvm-readobj --relocations %t | FileCheck --check-prefix=OBJ %s
+; RUN: llvm-objdump -h %t | FileCheck --check-prefix=HDR %s
@a = common global i32 0, align 4
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!9}
-!0 = metadata !{i32 786449, metadata !8, i32 12, metadata !"clang version 3.3 (trunk 169021) (llvm/trunk 169020)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !"baz.dwo"} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/baz.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !8, i32 12, metadata !"clang version 3.3 (trunk 169021) (llvm/trunk 169020)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !"baz.dwo"} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/baz.c] [DW_LANG_C99]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 1, metadata !7, i32 0, i32 1, i32* @a, null} ; [ DW_TAG_variable ] [a] [line 1] [def]
!6 = metadata !{i32 786473, metadata !8} ; [ DW_TAG_file_type ]
@@ -23,26 +24,11 @@
; CHECK: .debug_abbrev contents:
; CHECK: Abbrev table for offset: 0x00000000
; CHECK: [1] DW_TAG_compile_unit DW_CHILDREN_no
-; CHECK: DW_AT_GNU_dwo_name DW_FORM_strp
-; CHECK: DW_AT_GNU_addr_base DW_FORM_sec_offset
-; CHECK: DW_AT_low_pc DW_FORM_addr
; CHECK: DW_AT_stmt_list DW_FORM_sec_offset
+; CHECK: DW_AT_GNU_dwo_name DW_FORM_strp
; CHECK: DW_AT_comp_dir DW_FORM_strp
; CHECK: DW_AT_GNU_dwo_id DW_FORM_data8
-; CHECK: .debug_info contents:
-; CHECK: DW_TAG_compile_unit
-; CHECK: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x00000000] = "baz.dwo")
-; CHECK: DW_AT_GNU_addr_base [DW_FORM_sec_offset] (0x00000000)
-; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; CHECK: DW_AT_stmt_list [DW_FORM_sec_offset] (0x00000000)
-; CHECK: DW_AT_comp_dir [DW_FORM_strp] ( .debug_str[0x00000008] = "/usr/local/google/home/echristo/tmp")
-; CHECK: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x0000000000000000)
-
-; CHECK: .debug_str contents:
-; CHECK: 0x00000000: "baz.dwo"
-; CHECK: 0x00000008: "/usr/local/google/home/echristo/tmp"
-
; Check that we're using the right forms.
; CHECK: .debug_abbrev.dwo contents:
; CHECK: Abbrev table for offset: 0x00000000
@@ -61,13 +47,20 @@
; CHECK: DW_AT_external DW_FORM_flag_present
; CHECK: DW_AT_decl_file DW_FORM_data1
; CHECK: DW_AT_decl_line DW_FORM_data1
-; CHECK: DW_AT_location DW_FORM_block1
+; CHECK: DW_AT_location DW_FORM_exprloc
; CHECK: [3] DW_TAG_base_type DW_CHILDREN_no
; CHECK: DW_AT_name DW_FORM_GNU_str_index
; CHECK: DW_AT_encoding DW_FORM_data1
; CHECK: DW_AT_byte_size DW_FORM_data1
+; CHECK: .debug_info contents:
+; CHECK: DW_TAG_compile_unit
+; CHECK-NEXT: DW_AT_stmt_list [DW_FORM_sec_offset] (0x00000000)
+; CHECK-NEXT: DW_AT_GNU_dwo_name [DW_FORM_strp] ( .debug_str[0x00000000] = "baz.dwo")
+; CHECK-NEXT: DW_AT_comp_dir [DW_FORM_strp] ( .debug_str[0x00000008] = "/usr/local/google/home/echristo/tmp")
+; CHECK-NEXT: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x1f1f859683d49324)
+
; Check that the rest of the compile units have information.
; CHECK: .debug_info.dwo contents:
; CHECK: DW_TAG_compile_unit
@@ -77,17 +70,20 @@
; CHECK-NOT: DW_AT_low_pc
; CHECK-NOT: DW_AT_stmt_list
; CHECK-NOT: DW_AT_comp_dir
-; CHECK: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x0000000000000000)
+; CHECK: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x1f1f859683d49324)
; CHECK: DW_TAG_variable
; CHECK: DW_AT_name [DW_FORM_GNU_str_index] ( indexed (00000002) string = "a")
; CHECK: DW_AT_type [DW_FORM_ref4] (cu + 0x{{[0-9a-f]*}} => {[[TYPE:0x[0-9a-f]*]]})
; CHECK: DW_AT_external [DW_FORM_flag_present] (true)
; CHECK: DW_AT_decl_file [DW_FORM_data1] (0x01)
; CHECK: DW_AT_decl_line [DW_FORM_data1] (0x01)
-; CHECK: DW_AT_location [DW_FORM_block1] (<0x02> fb 00 )
+; CHECK: DW_AT_location [DW_FORM_exprloc] (<0x2> fb 00 )
; CHECK: [[TYPE]]: DW_TAG_base_type
; CHECK: DW_AT_name [DW_FORM_GNU_str_index] ( indexed (00000003) string = "int")
+; CHECK: .debug_str contents:
+; CHECK: 0x00000000: "baz.dwo"
+; CHECK: 0x00000008: "/usr/local/google/home/echristo/tmp"
; CHECK: .debug_str.dwo contents:
; CHECK: 0x00000000: "clang version 3.3 (trunk 169021) (llvm/trunk 169020)"
@@ -106,9 +102,13 @@
;
; OBJ: .debug_info
; OBJ-NEXT: R_X86_64_32 .debug_abbrev
-; OBJ-NEXT: R_X86_64_32 .debug_str
-; OBJ-NEXT: R_X86_64_32 .debug_addr
; OBJ-NEXT: R_X86_64_32 .debug_line
; OBJ-NEXT: R_X86_64_32 .debug_str
+; OBJ-NEXT: R_X86_64_32 .debug_str
+; OBJ-NEXT: R_X86_64_32 .debug_addr
; OBJ-NEXT: }
+
+; HDR-NOT: .debug_aranges
+; HDR-NOT: .rela.{{.*}}.dwo
+
!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/fission-hash.ll b/test/DebugInfo/X86/fission-hash.ll
index d3e46a9c4ff1..3987faaf9716 100644
--- a/test/DebugInfo/X86/fission-hash.ll
+++ b/test/DebugInfo/X86/fission-hash.ll
@@ -1,4 +1,4 @@
-; RUN: llc -split-dwarf=Enable -generate-cu-hash -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
+; RUN: llc -split-dwarf=Enable -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s
; The source is an empty file.
@@ -11,6 +11,6 @@
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 188230) (llvm/trunk 188234)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !2, metadata !2, metadata !"foo.dwo"} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/foo.c] [DW_LANG_C99]
!1 = metadata !{metadata !"foo.c", metadata !"/usr/local/google/home/echristo/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
!4 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/fission-ranges.ll b/test/DebugInfo/X86/fission-ranges.ll
index 0a100799bed1..135837582fcc 100644
--- a/test/DebugInfo/X86/fission-ranges.ll
+++ b/test/DebugInfo/X86/fission-ranges.ll
@@ -1,5 +1,55 @@
; RUN: llc -split-dwarf=Enable -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
-; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s
+; RUN: llvm-dwarfdump %t | FileCheck %s
+; RUN: llvm-objdump -h %t | FileCheck --check-prefix=HDR %s
+
+; CHECK: .debug_info contents:
+; CHECK: DW_TAG_compile_unit
+; CHECK-NEXT: DW_AT_stmt_list
+; CHECK-NEXT: DW_AT_GNU_dwo_name
+; CHECK-NEXT: DW_AT_comp_dir
+; CHECK-NEXT: DW_AT_GNU_dwo_id
+; CHECK-NEXT: DW_AT_GNU_addr_base [DW_FORM_sec_offset] (0x00000000)
+
+
+; CHECK: .debug_info.dwo contents:
+; CHECK: DW_AT_location [DW_FORM_sec_offset] ([[A:0x[0-9a-z]*]])
+; CHECK: DW_AT_location [DW_FORM_sec_offset] ([[E:0x[0-9a-z]*]])
+; CHECK: DW_AT_location [DW_FORM_sec_offset] ([[B:0x[0-9a-z]*]])
+; CHECK: DW_AT_location [DW_FORM_sec_offset] ([[D:0x[0-9a-z]*]])
+; CHECK: DW_AT_ranges [DW_FORM_sec_offset] (0x000000a0)
+; CHECK: .debug_loc contents:
+; CHECK-NOT: Beginning address offset
+; CHECK: .debug_loc.dwo contents:
+
+; Don't assume these locations are entirely correct - feel free to update them
+; if they've changed due to a bugfix, change in register allocation, etc.
+
+; CHECK: [[A]]: Beginning address index: 2
+; CHECK-NEXT: Length: 199
+; CHECK-NEXT: Location description: 11 00
+; CHECK-NEXT: {{^$}}
+; CHECK-NEXT: Beginning address index: 3
+; CHECK-NEXT: Length: 23
+; CHECK-NEXT: Location description: 50 93 04
+; CHECK: [[E]]: Beginning address index: 4
+; CHECK-NEXT: Length: 21
+; CHECK-NEXT: Location description: 50 93 04
+; CHECK: [[B]]: Beginning address index: 5
+; CHECK-NEXT: Length: 19
+; CHECK-NEXT: Location description: 50 93 04
+; CHECK: [[D]]: Beginning address index: 6
+; CHECK-NEXT: Length: 23
+; CHECK-NEXT: Location description: 50 93 04
+
+; Make sure we don't produce any relocations in any .dwo section (though in particular, debug_info.dwo)
+; HDR-NOT: .rela.{{.*}}.dwo
+
+; Make sure we have enough stuff in the debug_addr to cover the address indexes
+; (6 is the last index in debug_loc.dwo, making 7 entries of 8 bytes each, 7 * 8
+; == 56 base 10 == 38 base 16)
+
+; HDR: .debug_addr 00000038
+; HDR-NOT: .rela.{{.*}}.dwo
; From the code:
@@ -29,8 +79,6 @@
; clang -g -S -gsplit-dwarf -O1 small.c
-; CHECK: DW_AT_GNU_ranges_base
-
@c = external global i32
; Function Attrs: nounwind uwtable
@@ -44,7 +92,7 @@ entry:
define internal fastcc void @foo() #0 {
entry:
tail call void @llvm.dbg.value(metadata !29, i64 0, metadata !13), !dbg !30
- tail call void @llvm.dbg.value(metadata !2, i64 0, metadata !14), !dbg !31
+ tail call void @llvm.dbg.value(metadata !44, i64 0, metadata !14), !dbg !31
%c.promoted9 = load i32* @c, align 4, !dbg !32, !tbaa !33
br label %for.cond1.preheader, !dbg !31
@@ -107,7 +155,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 191700) (llvm/trunk 191710)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"small.dwo"} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/small.c] [DW_LANG_C99]
!1 = metadata !{metadata !"small.c", metadata !"/usr/local/google/home/echristo/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !8}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"", i32 18, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, void ()* @bar, null, null, metadata !2, i32 19} ; [ DW_TAG_subprogram ] [line 18] [def] [scope 19] [bar]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/tmp/small.c]
@@ -149,3 +197,4 @@ attributes #1 = { nounwind readnone }
!41 = metadata !{i32* @c}
!42 = metadata !{i32 15, i32 0, metadata !8, null}
!43 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!44 = metadata !{i32 0}
diff --git a/test/DebugInfo/X86/formal_parameter.ll b/test/DebugInfo/X86/formal_parameter.ll
new file mode 100644
index 000000000000..2fdab7a07f54
--- /dev/null
+++ b/test/DebugInfo/X86/formal_parameter.ll
@@ -0,0 +1,84 @@
+; ModuleID = 'formal_parameter.c'
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+;
+; From (clang -g -c -O1):
+;
+; int lookup(int* map);
+; int verify(int val);
+; void foo(int map)
+; {
+; lookup(&map);
+; if (!verify(map)) { }
+; }
+;
+; RUN: opt %s -O2 -S -o %t
+; RUN: cat %t | FileCheck --check-prefix=LOWERING %s
+; RUN: llc -filetype=obj %t -o - | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+; Test that we only emit only one DW_AT_formal_parameter "map" for this function.
+; rdar://problem/14874886
+;
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}}map
+; CHECK-NOT: DW_AT_name {{.*}}map
+
+; Function Attrs: nounwind ssp uwtable
+define void @foo(i32 %map) #0 {
+entry:
+ %map.addr = alloca i32, align 4
+ store i32 %map, i32* %map.addr, align 4, !tbaa !15
+ call void @llvm.dbg.declare(metadata !{i32* %map.addr}, metadata !10), !dbg !14
+ %call = call i32 (i32*, ...)* bitcast (i32 (...)* @lookup to i32 (i32*, ...)*)(i32* %map.addr) #3, !dbg !19
+ ; Ensure that all dbg intrinsics have the same scope after
+ ; LowerDbgDeclare is finished with them.
+ ;
+ ; LOWERING: call void @llvm.dbg.value{{.*}}, !dbg ![[LOC:.*]]
+ ; LOWERING: call void @llvm.dbg.value{{.*}}, !dbg ![[LOC]]
+ ; LOWERING: call void @llvm.dbg.value{{.*}}, !dbg ![[LOC]]
+%0 = load i32* %map.addr, align 4, !dbg !20, !tbaa !15
+ %call1 = call i32 (i32, ...)* bitcast (i32 (...)* @verify to i32 (i32, ...)*)(i32 %0) #3, !dbg !20
+ ret void, !dbg !22
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+declare i32 @lookup(...)
+
+declare i32 @verify(...)
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { nounwind ssp uwtable }
+attributes #1 = { nounwind readnone }
+attributes #3 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!11, !12}
+!llvm.ident = !{!13}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [formal_parameter.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"formal_parameter.c", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32)* @foo, null, null, metadata !9, i32 2} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 2] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [formal_parameter.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786689, metadata !4, metadata !"map", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [map] [line 1]
+!11 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!12 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!13 = metadata !{metadata !"clang version 3.5.0 "}
+!14 = metadata !{i32 1, i32 0, metadata !4, null}
+!15 = metadata !{metadata !16, metadata !16, i64 0}
+!16 = metadata !{metadata !"int", metadata !17, i64 0}
+!17 = metadata !{metadata !"omnipotent char", metadata !18, i64 0}
+!18 = metadata !{metadata !"Simple C/C++ TBAA"}
+!19 = metadata !{i32 3, i32 0, metadata !4, null}
+!20 = metadata !{i32 4, i32 0, metadata !21, null}
+!21 = metadata !{i32 786443, metadata !1, metadata !4, i32 4, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [formal_parameter.c]
+!22 = metadata !{i32 5, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/X86/generate-odr-hash.ll b/test/DebugInfo/X86/generate-odr-hash.ll
index 4f9cc78fec9a..2256b3e212a7 100644
--- a/test/DebugInfo/X86/generate-odr-hash.ll
+++ b/test/DebugInfo/X86/generate-odr-hash.ll
@@ -1,10 +1,16 @@
; REQUIRES: object-emission
-; RUN: llc %s -o %t -filetype=obj -O0 -generate-odr-hash -mtriple=x86_64-unknown-linux-gnu
-; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
-;
-; Generated from:
+; RUN: llc %s -o %t -filetype=obj -O0 -generate-type-units -mtriple=x86_64-unknown-linux-gnu
+; RUN: llvm-dwarfdump %t | FileCheck --check-prefix=CHECK --check-prefix=SINGLE %s
+
+; RUN: llc %s -split-dwarf=Enable -o %t -filetype=obj -O0 -generate-type-units -mtriple=x86_64-unknown-linux-gnu
+; RUN: llvm-dwarfdump %t | FileCheck --check-prefix=CHECK --check-prefix=FISSION %s
+
+; Generated from bar.cpp:
+
+; #line 1 "bar.h"
; struct bar {};
+; #line 2 "bar.cpp"
; struct bar b;
@@ -43,46 +49,117 @@
; wombat wom;
+; SINGLE-LABEL: .debug_info contents:
+; FISSION-LABEL: .debug_info.dwo contents:
+; CHECK: Compile Unit: length = [[CU_SIZE:[0-9a-f]+]]
+
+; CHECK: [[BAR:^0x........]]: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_declaration
+; CHECK-NEXT: DW_AT_signature {{.*}} (0x1d02f3be30cc5688)
+; CHECK: [[FLUFFY:^0x........]]: DW_TAG_class_type
+; CHECK-NEXT: DW_AT_declaration
+; CHECK-NEXT: DW_AT_signature {{.*}} (0xb04af47397402e77)
+
+; Ensure the CU-local type 'walrus' is not placed in a type unit.
+; CHECK: [[WALRUS:^0x........]]: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name{{.*}}"walrus"
+; CHECK-NEXT: DW_AT_byte_size
+; CHECK-NEXT: DW_AT_decl_file
+; CHECK-NEXT: DW_AT_decl_line
+
+
+; CHECK: [[WOMBAT:^0x........]]: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_declaration
+; CHECK-NEXT: DW_AT_signature {{.*}} (0xfd756cee88f8a118)
+
+; FISSION-LABEL: .debug_types contents:
+; FISSION-NOT: type_signature
+; FISSION-LABEL: type_signature = 0x1d02f3be30cc5688
+; FISSION: DW_TAG_type_unit
+; FISSION-NEXT: DW_AT_GNU_dwo_name{{.*}}"bar.dwo"
+; FISSION-NEXT: DW_AT_comp_dir{{.*}}"/tmp/dbginfo"
+; FISSION-NOT: type_signature
+; FISSION-LABEL: type_signature = 0xb04af47397402e77
+; FISSION-NOT: type_signature
+; FISSION-LABEL: type_signature = 0xfd756cee88f8a118
+; FISSION-NOT: type_signature
+; FISSION-LABEL: type_signature = 0xe94f6d3843e62d6b
+
+; SINGLE-LABEL: .debug_types contents:
+; FISSION-LABEL: .debug_types.dwo contents:
+
; Check that we generate a hash for bar and the value.
+; CHECK-NOT: type_signature
+; CHECK-LABEL: type_signature = 0x1d02f3be30cc5688
; CHECK: DW_TAG_structure_type
-; CHECK-NEXT: debug_str{{.*}}"bar"
-; CHECK: DW_AT_GNU_odr_signature [DW_FORM_data8] (0x200520c0d5b90eff)
+; CHECK-NEXT: DW_AT_name{{.*}}"bar"
+
+
+; Check that we generate a hash for fluffy and the value.
+; CHECK-NOT: type_signature
+; CHECK-LABEL: type_signature = 0xb04af47397402e77
+; CHECK-NOT: DW_AT_GNU_odr_signature [DW_FORM_data8] (0x9a0124d5a0c21c52)
; CHECK: DW_TAG_namespace
-; CHECK-NEXT: debug_str{{.*}}"echidna"
+; CHECK-NEXT: DW_AT_name{{.*}}"echidna"
; CHECK: DW_TAG_namespace
-; CHECK-NEXT: debug_str{{.*}}"capybara"
+; CHECK-NEXT: DW_AT_name{{.*}}"capybara"
; CHECK: DW_TAG_namespace
-; CHECK-NEXT: debug_str{{.*}}"mongoose"
+; CHECK-NEXT: DW_AT_name{{.*}}"mongoose"
; CHECK: DW_TAG_class_type
-; CHECK-NEXT: debug_str{{.*}}"fluffy"
-; CHECK: DW_AT_GNU_odr_signature [DW_FORM_data8] (0x9a0124d5a0c21c52)
-
-; We emit no hash for walrus since the type is contained in an anonymous
-; namespace and won't violate any ODR-ness.
-; CHECK: DW_TAG_structure_type
-; CHECK-NEXT: debug_str{{.*}}"walrus"
-; CHECK-NEXT: DW_AT_byte_size
-; CHECK-NEXT: DW_AT_decl_file
-; CHECK-NEXT: DW_AT_decl_line
-; CHECK-NOT: DW_AT_GNU_odr_signature
-; CHECK: DW_TAG_subprogram
+; CHECK-NEXT: DW_AT_name{{.*}}"fluffy"
; Check that we generate a hash for wombat and the value, but not for the
; anonymous type contained within.
+; CHECK-NOT: type_signature
+; CHECK-LABEL: type_signature = 0xfd756cee88f8a118
+; CHECK-NOT: DW_AT_GNU_odr_signature [DW_FORM_data8] (0x685bcc220141e9d7)
; CHECK: DW_TAG_structure_type
-; CHECK-NEXT: debug_str{{.*}}wombat
-; CHECK: DW_AT_GNU_odr_signature [DW_FORM_data8] (0x685bcc220141e9d7)
-; CHECK: DW_TAG_structure_type
-; CHECK-NEXT: DW_AT_byte_size
-; CHECK-NEXT: DW_AT_decl_file
-; CHECK-NEXT: DW_AT_decl_line
-; CHECK: DW_TAG_member
-; CHECK-NEXT: debug_str{{.*}}"a"
+; CHECK-NEXT: DW_AT_name{{.*}}"wombat"
-; Check that we don't generate a hash for baz.
+; CHECK-NOT: type_signature
+; CHECK-LABEL: type_signature = 0xe94f6d3843e62d6b
+; CHECK: DW_TAG_type_unit
+; CHECK: DW_AT_stmt_list [DW_FORM_sec_offset] (0x00000000)
+; CHECK-NOT: NULL
+; CHECK-NOT: DW_AT_GNU_odr_signature
+; CHECK: DW_TAG_structure_type
+; The signature for the outer 'wombat' type
+; CHECK: DW_AT_signature [DW_FORM_ref_sig8] (0xfd756cee88f8a118)
; CHECK: DW_TAG_structure_type
-; CHECK-NEXT: debug_str{{.*}}"baz"
+; CHECK-NOT: DW_AT_name
; CHECK-NOT: DW_AT_GNU_odr_signature
+; CHECK: DW_TAG_member
+; CHECK-NEXT: DW_AT_name{{.*}}"a"
+
+; CHECK-LABEL: .debug_line contents:
+; CHECK: Line table prologue
+; CHECK-NOT: file_names[
+; SINGLE: file_names{{.*}} bar.h
+; CHECK: file_names{{.*}} bar.cpp
+; CHECK-NOT: file_names[
+
+; CHECK-LABEL: .debug_line.dwo contents:
+; FISSION: Line table prologue
+; FISSION: opcode_base: 1
+; FISSION-NOT: standard_opcode_lengths
+; FISSION-NOT: include_directories
+; FISSION-NOT: file_names[
+; FISSION: file_names{{.*}} bar.h
+; FISSION: file_names{{.*}} bar.cpp
+; FISSION-NOT: file_names[
+
+; CHECK-LABEL: .debug_str contents:
+
+; Use the unit size as a rough hash/identifier for the unit we're dealing with
+; it happens to be unambiguous at the moment, but it's hardly ideal.
+; CHECK-LABEL: .debug_pubtypes contents:
+; Don't emit pubtype entries for type DIEs in the compile unit that just indirect to a type unit.
+; CHECK-NEXT: unit_size = [[CU_SIZE]]
+; CHECK-NEXT: Offset Name
+; CHECK-DAG: [[BAR]] "bar"
+; CHECK-DAG: [[WALRUS]] "(anonymous namespace)::walrus"
+; CHECK-DAG: [[WOMBAT]] "wombat"
+; CHECK-DAG: [[FLUFFY]] "echidna::capybara::mongoose::fluffy"
%struct.bar = type { i8 }
%"class.echidna::capybara::mongoose::fluffy" = type { i32, i32 }
@@ -97,14 +174,12 @@
@wom = global %struct.wombat zeroinitializer, align 4
@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
-@_ZN12_GLOBAL__N_16walrusC1Ev = alias internal void (%"struct.<anonymous namespace>::walrus"*)* @_ZN12_GLOBAL__N_16walrusC2Ev
-
; Function Attrs: nounwind uwtable
define void @_Z3foov() #0 {
entry:
%b = alloca %struct.baz, align 1
- call void @llvm.dbg.declare(metadata !{%struct.baz* %b}, metadata !44), !dbg !46
- ret void, !dbg !47
+ call void @llvm.dbg.declare(metadata !{%struct.baz* %b}, metadata !46), !dbg !48
+ ret void, !dbg !49
}
; Function Attrs: nounwind readnone
@@ -112,8 +187,8 @@ declare void @llvm.dbg.declare(metadata, metadata) #1
define internal void @__cxx_global_var_init() section ".text.startup" {
entry:
- call void @_ZN12_GLOBAL__N_16walrusC1Ev(%"struct.<anonymous namespace>::walrus"* @w), !dbg !48
- ret void, !dbg !48
+ call void @_ZN12_GLOBAL__N_16walrusC2Ev(%"struct.<anonymous namespace>::walrus"* @w), !dbg !50
+ ret void, !dbg !50
}
; Function Attrs: nounwind uwtable
@@ -121,76 +196,77 @@ define internal void @_ZN12_GLOBAL__N_16walrusC2Ev(%"struct.<anonymous namespace
entry:
%this.addr = alloca %"struct.<anonymous namespace>::walrus"*, align 8
store %"struct.<anonymous namespace>::walrus"* %this, %"struct.<anonymous namespace>::walrus"** %this.addr, align 8
- call void @llvm.dbg.declare(metadata !{%"struct.<anonymous namespace>::walrus"** %this.addr}, metadata !49), !dbg !51
+ call void @llvm.dbg.declare(metadata !{%"struct.<anonymous namespace>::walrus"** %this.addr}, metadata !51), !dbg !53
%this1 = load %"struct.<anonymous namespace>::walrus"** %this.addr
- ret void, !dbg !52
+ ret void, !dbg !54
}
define internal void @_GLOBAL__I_a() section ".text.startup" {
entry:
- call void @__cxx_global_var_init(), !dbg !53
- ret void, !dbg !53
+ call void @__cxx_global_var_init(), !dbg !55
+ ret void, !dbg !55
}
attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!42, !54}
-!llvm.ident = !{!43}
+!llvm.module.flags = !{!43, !44}
+!llvm.ident = !{!45}
-!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !20, metadata !37, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/bar.cpp] [DW_LANG_C_plus_plus]
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !21, metadata !38, metadata !2, metadata !"bar.dwo"} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/bar.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"bar.cpp", metadata !"/tmp/dbginfo"}
-!2 = metadata !{i32 0}
-!3 = metadata !{metadata !4, metadata !5, metadata !13, metadata !16}
-!4 = metadata !{i32 786451, metadata !1, null, metadata !"bar", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, metadata !"_ZTS3bar"} ; [ DW_TAG_structure_type ] [bar] [line 1, size 8, align 8, offset 0] [def] [from ]
-!5 = metadata !{i32 786434, metadata !1, metadata !6, metadata !"fluffy", i32 13, i64 64, i64 32, i32 0, i32 0, null, metadata !9, i32 0, null, null, metadata !"_ZTSN7echidna8capybara8mongoose6fluffyE"} ; [ DW_TAG_class_type ] [fluffy] [line 13, size 64, align 32, offset 0] [def] [from ]
-!6 = metadata !{i32 786489, metadata !1, metadata !7, metadata !"mongoose", i32 12} ; [ DW_TAG_namespace ] [mongoose] [line 12]
-!7 = metadata !{i32 786489, metadata !1, metadata !8, metadata !"capybara", i32 11} ; [ DW_TAG_namespace ] [capybara] [line 11]
-!8 = metadata !{i32 786489, metadata !1, null, metadata !"echidna", i32 10} ; [ DW_TAG_namespace ] [echidna] [line 10]
-!9 = metadata !{metadata !10, metadata !12}
-!10 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN7echidna8capybara8mongoose6fluffyE", metadata !"a", i32 14, i64 32, i64 32, i64 0, i32 1, metadata !11} ; [ DW_TAG_member ] [a] [line 14, size 32, align 32, offset 0] [private] [from int]
-!11 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!12 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN7echidna8capybara8mongoose6fluffyE", metadata !"b", i32 15, i64 32, i64 32, i64 32, i32 1, metadata !11} ; [ DW_TAG_member ] [b] [line 15, size 32, align 32, offset 32] [private] [from int]
-!13 = metadata !{i32 786451, metadata !1, null, metadata !"wombat", i32 31, i64 64, i64 32, i32 0, i32 0, null, metadata !14, i32 0, null, null, metadata !"_ZTS6wombat"} ; [ DW_TAG_structure_type ] [wombat] [line 31, size 64, align 32, offset 0] [def] [from ]
-!14 = metadata !{metadata !15}
-!15 = metadata !{i32 786445, metadata !1, metadata !"_ZTS6wombat", metadata !"a_b", i32 35, i64 64, i64 32, i64 0, i32 0, metadata !"_ZTSN6wombatUt_E"} ; [ DW_TAG_member ] [a_b] [line 35, size 64, align 32, offset 0] [from _ZTSN6wombatUt_E]
-!16 = metadata !{i32 786451, metadata !1, metadata !"_ZTS6wombat", metadata !"", i32 32, i64 64, i64 32, i32 0, i32 0, null, metadata !17, i32 0, null, null, metadata !"_ZTSN6wombatUt_E"} ; [ DW_TAG_structure_type ] [line 32, size 64, align 32, offset 0] [def] [from ]
-!17 = metadata !{metadata !18, metadata !19}
-!18 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN6wombatUt_E", metadata !"a", i32 33, i64 32, i64 32, i64 0, i32 0, metadata !11} ; [ DW_TAG_member ] [a] [line 33, size 32, align 32, offset 0] [from int]
-!19 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN6wombatUt_E", metadata !"b", i32 34, i64 32, i64 32, i64 32, i32 0, metadata !11} ; [ DW_TAG_member ] [b] [line 34, size 32, align 32, offset 32] [from int]
-!20 = metadata !{metadata !21, metadata !25, metadata !26, metadata !35}
-!21 = metadata !{i32 786478, metadata !1, metadata !22, metadata !"foo", metadata !"foo", metadata !"_Z3foov", i32 5, metadata !23, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z3foov, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [foo]
-!22 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/bar.cpp]
-!23 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !24, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!24 = metadata !{null}
-!25 = metadata !{i32 786478, metadata !1, metadata !22, metadata !"__cxx_global_var_init", metadata !"__cxx_global_var_init", metadata !"", i32 29, metadata !23, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @__cxx_global_var_init, null, null, metadata !2, i32 29} ; [ DW_TAG_subprogram ] [line 29] [local] [def] [__cxx_global_var_init]
-!26 = metadata !{i32 786478, metadata !1, metadata !27, metadata !"walrus", metadata !"walrus", metadata !"_ZN12_GLOBAL__N_16walrusC2Ev", i32 25, metadata !31, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%"struct.<anonymous namespace>::walrus"*)* @_ZN12_GLOBAL__N_16walrusC2Ev, null, metadata !30, metadata !2, i32 25} ; [ DW_TAG_subprogram ] [line 25] [local] [def] [walrus]
-!27 = metadata !{i32 786451, metadata !1, metadata !28, metadata !"walrus", i32 24, i64 8, i64 8, i32 0, i32 0, null, metadata !29, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [walrus] [line 24, size 8, align 8, offset 0] [def] [from ]
-!28 = metadata !{i32 786489, metadata !1, null, metadata !"", i32 23} ; [ DW_TAG_namespace ] [line 23]
-!29 = metadata !{metadata !30}
-!30 = metadata !{i32 786478, metadata !1, metadata !27, metadata !"walrus", metadata !"walrus", metadata !"", i32 25, metadata !31, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !34, i32 25} ; [ DW_TAG_subprogram ] [line 25] [walrus]
-!31 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !32, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!32 = metadata !{null, metadata !33}
-!33 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !27} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from walrus]
-!34 = metadata !{i32 786468}
-!35 = metadata !{i32 786478, metadata !1, metadata !22, metadata !"", metadata !"", metadata !"_GLOBAL__I_a", i32 25, metadata !36, i1 true, i1 true, i32 0, i32 0, null, i32 64, i1 false, void ()* @_GLOBAL__I_a, null, null, metadata !2, i32 25} ; [ DW_TAG_subprogram ] [line 25] [local] [def]
-!36 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!37 = metadata !{metadata !38, metadata !39, metadata !40, metadata !41}
-!38 = metadata !{i32 786484, i32 0, null, metadata !"b", metadata !"b", metadata !"", metadata !22, i32 3, metadata !4, i32 0, i32 1, %struct.bar* @b, null} ; [ DW_TAG_variable ] [b] [line 3] [def]
-!39 = metadata !{i32 786484, i32 0, metadata !6, metadata !"animal", metadata !"animal", metadata !"_ZN7echidna8capybara8mongoose6animalE", metadata !22, i32 18, metadata !5, i32 0, i32 1, %"class.echidna::capybara::mongoose::fluffy"* @_ZN7echidna8capybara8mongoose6animalE, null} ; [ DW_TAG_variable ] [animal] [line 18] [def]
-!40 = metadata !{i32 786484, i32 0, null, metadata !"w", metadata !"w", metadata !"", metadata !22, i32 29, metadata !27, i32 1, i32 1, %"struct.<anonymous namespace>::walrus"* @w, null} ; [ DW_TAG_variable ] [w] [line 29] [local] [def]
-!41 = metadata !{i32 786484, i32 0, null, metadata !"wom", metadata !"wom", metadata !"", metadata !22, i32 38, metadata !13, i32 0, i32 1, %struct.wombat* @wom, null} ; [ DW_TAG_variable ] [wom] [line 38] [def]
-!42 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!43 = metadata !{metadata !"clang version 3.4 "}
-!44 = metadata !{i32 786688, metadata !21, metadata !"b", metadata !22, i32 7, metadata !45, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [b] [line 7]
-!45 = metadata !{i32 786451, metadata !1, metadata !21, metadata !"baz", i32 6, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [baz] [line 6, size 8, align 8, offset 0] [def] [from ]
-!46 = metadata !{i32 7, i32 0, metadata !21, null}
-!47 = metadata !{i32 8, i32 0, metadata !21, null} ; [ DW_TAG_imported_declaration ]
-!48 = metadata !{i32 29, i32 0, metadata !25, null}
-!49 = metadata !{i32 786689, metadata !26, metadata !"this", null, i32 16777216, metadata !50, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!50 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !27} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from walrus]
-!51 = metadata !{i32 0, i32 0, metadata !26, null}
-!52 = metadata !{i32 25, i32 0, metadata !26, null}
-!53 = metadata !{i32 25, i32 0, metadata !35, null}
-!54 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !6, metadata !14, metadata !17}
+!4 = metadata !{i32 786451, metadata !5, null, metadata !"bar", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, metadata !"_ZTS3bar"} ; [ DW_TAG_structure_type ] [bar] [line 1, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !"bar.h", metadata !"/tmp/dbginfo"}
+!6 = metadata !{i32 786434, metadata !1, metadata !7, metadata !"fluffy", i32 13, i64 64, i64 32, i32 0, i32 0, null, metadata !10, i32 0, null, null, metadata !"_ZTSN7echidna8capybara8mongoose6fluffyE"} ; [ DW_TAG_class_type ] [fluffy] [line 13, size 64, align 32, offset 0] [def] [from ]
+!7 = metadata !{i32 786489, metadata !1, metadata !8, metadata !"mongoose", i32 12} ; [ DW_TAG_namespace ] [mongoose] [line 12]
+!8 = metadata !{i32 786489, metadata !1, metadata !9, metadata !"capybara", i32 11} ; [ DW_TAG_namespace ] [capybara] [line 11]
+!9 = metadata !{i32 786489, metadata !1, null, metadata !"echidna", i32 10} ; [ DW_TAG_namespace ] [echidna] [line 10]
+!10 = metadata !{metadata !11, metadata !13}
+!11 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN7echidna8capybara8mongoose6fluffyE", metadata !"a", i32 14, i64 32, i64 32, i64 0, i32 1, metadata !12} ; [ DW_TAG_member ] [a] [line 14, size 32, align 32, offset 0] [private] [from int]
+!12 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!13 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN7echidna8capybara8mongoose6fluffyE", metadata !"b", i32 15, i64 32, i64 32, i64 32, i32 1, metadata !12} ; [ DW_TAG_member ] [b] [line 15, size 32, align 32, offset 32] [private] [from int]
+!14 = metadata !{i32 786451, metadata !1, null, metadata !"wombat", i32 31, i64 64, i64 32, i32 0, i32 0, null, metadata !15, i32 0, null, null, metadata !"_ZTS6wombat"} ; [ DW_TAG_structure_type ] [wombat] [line 31, size 64, align 32, offset 0] [def] [from ]
+!15 = metadata !{metadata !16}
+!16 = metadata !{i32 786445, metadata !1, metadata !"_ZTS6wombat", metadata !"a_b", i32 35, i64 64, i64 32, i64 0, i32 0, metadata !"_ZTSN6wombatUt_E"} ; [ DW_TAG_member ] [a_b] [line 35, size 64, align 32, offset 0] [from _ZTSN6wombatUt_E]
+!17 = metadata !{i32 786451, metadata !1, metadata !"_ZTS6wombat", metadata !"", i32 32, i64 64, i64 32, i32 0, i32 0, null, metadata !18, i32 0, null, null, metadata !"_ZTSN6wombatUt_E"} ; [ DW_TAG_structure_type ] [line 32, size 64, align 32, offset 0] [def] [from ]
+!18 = metadata !{metadata !19, metadata !20}
+!19 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN6wombatUt_E", metadata !"a", i32 33, i64 32, i64 32, i64 0, i32 0, metadata !12} ; [ DW_TAG_member ] [a] [line 33, size 32, align 32, offset 0] [from int]
+!20 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN6wombatUt_E", metadata !"b", i32 34, i64 32, i64 32, i64 32, i32 0, metadata !12} ; [ DW_TAG_member ] [b] [line 34, size 32, align 32, offset 32] [from int]
+!21 = metadata !{metadata !22, metadata !26, metadata !27, metadata !36}
+!22 = metadata !{i32 786478, metadata !1, metadata !23, metadata !"foo", metadata !"foo", metadata !"_Z3foov", i32 5, metadata !24, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z3foov, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [foo]
+!23 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/bar.cpp]
+!24 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !25, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!25 = metadata !{null}
+!26 = metadata !{i32 786478, metadata !1, metadata !23, metadata !"__cxx_global_var_init", metadata !"__cxx_global_var_init", metadata !"", i32 29, metadata !24, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @__cxx_global_var_init, null, null, metadata !2, i32 29} ; [ DW_TAG_subprogram ] [line 29] [local] [def] [__cxx_global_var_init]
+!27 = metadata !{i32 786478, metadata !1, metadata !28, metadata !"walrus", metadata !"walrus", metadata !"_ZN12_GLOBAL__N_16walrusC2Ev", i32 25, metadata !32, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%"struct.<anonymous namespace>::walrus"*)* @_ZN12_GLOBAL__N_16walrusC2Ev, null, metadata !31, metadata !2, i32 25} ; [ DW_TAG_subprogram ] [line 25] [local] [def] [walrus]
+!28 = metadata !{i32 786451, metadata !1, metadata !29, metadata !"walrus", i32 24, i64 8, i64 8, i32 0, i32 0, null, metadata !30, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [walrus] [line 24, size 8, align 8, offset 0] [def] [from ]
+!29 = metadata !{i32 786489, metadata !1, null, metadata !"", i32 23} ; [ DW_TAG_namespace ] [line 23]
+!30 = metadata !{metadata !31}
+!31 = metadata !{i32 786478, metadata !1, metadata !28, metadata !"walrus", metadata !"walrus", metadata !"", i32 25, metadata !32, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !35, i32 25} ; [ DW_TAG_subprogram ] [line 25] [walrus]
+!32 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !33, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!33 = metadata !{null, metadata !34}
+!34 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !28} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from walrus]
+!35 = metadata !{i32 786468}
+!36 = metadata !{i32 786478, metadata !1, metadata !23, metadata !"", metadata !"", metadata !"_GLOBAL__I_a", i32 25, metadata !37, i1 true, i1 true, i32 0, i32 0, null, i32 64, i1 false, void ()* @_GLOBAL__I_a, null, null, metadata !2, i32 25} ; [ DW_TAG_subprogram ] [line 25] [local] [def]
+!37 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!38 = metadata !{metadata !39, metadata !40, metadata !41, metadata !42}
+!39 = metadata !{i32 786484, i32 0, null, metadata !"b", metadata !"b", metadata !"", metadata !23, i32 3, metadata !4, i32 0, i32 1, %struct.bar* @b, null} ; [ DW_TAG_variable ] [b] [line 3] [def]
+!40 = metadata !{i32 786484, i32 0, metadata !7, metadata !"animal", metadata !"animal", metadata !"_ZN7echidna8capybara8mongoose6animalE", metadata !23, i32 18, metadata !6, i32 0, i32 1, %"class.echidna::capybara::mongoose::fluffy"* @_ZN7echidna8capybara8mongoose6animalE, null} ; [ DW_TAG_variable ] [animal] [line 18] [def]
+!41 = metadata !{i32 786484, i32 0, null, metadata !"w", metadata !"w", metadata !"", metadata !23, i32 29, metadata !28, i32 1, i32 1, %"struct.<anonymous namespace>::walrus"* @w, null} ; [ DW_TAG_variable ] [w] [line 29] [local] [def]
+!42 = metadata !{i32 786484, i32 0, null, metadata !"wom", metadata !"wom", metadata !"", metadata !23, i32 38, metadata !14, i32 0, i32 1, %struct.wombat* @wom, null} ; [ DW_TAG_variable ] [wom] [line 38] [def]
+!43 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!44 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!45 = metadata !{metadata !"clang version 3.5 "}
+!46 = metadata !{i32 786688, metadata !22, metadata !"b", metadata !23, i32 7, metadata !47, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [b] [line 7]
+!47 = metadata !{i32 786451, metadata !1, metadata !22, metadata !"baz", i32 6, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [baz] [line 6, size 8, align 8, offset 0] [def] [from ]
+!48 = metadata !{i32 7, i32 0, metadata !22, null}
+!49 = metadata !{i32 8, i32 0, metadata !22, null} ; [ DW_TAG_imported_declaration ]
+!50 = metadata !{i32 29, i32 0, metadata !26, null}
+!51 = metadata !{i32 786689, metadata !27, metadata !"this", null, i32 16777216, metadata !52, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!52 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !28} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from walrus]
+!53 = metadata !{i32 0, i32 0, metadata !27, null}
+!54 = metadata !{i32 25, i32 0, metadata !27, null}
+!55 = metadata !{i32 25, i32 0, metadata !36, null}
diff --git a/test/DebugInfo/X86/gnu-public-names-empty.ll b/test/DebugInfo/X86/gnu-public-names-empty.ll
index 8b0309cc65be..46ae65d00384 100644
--- a/test/DebugInfo/X86/gnu-public-names-empty.ll
+++ b/test/DebugInfo/X86/gnu-public-names-empty.ll
@@ -6,14 +6,14 @@
; Check that the attributes in the compile unit both point to a correct
; location, even when nothing is exported.
-; CHECK: DW_AT_GNU_pubnames [DW_FORM_sec_offset] (0x00000000)
-; CHECK: DW_AT_GNU_pubtypes [DW_FORM_sec_offset] (0x00000000)
+; CHECK: DW_AT_GNU_pubnames [DW_FORM_flag_present] (true)
+; CHECK-NOT: DW_AT_GNU_pubtypes [
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 191846) (llvm/trunk 191866)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/foo.c] [DW_LANG_C99]
!1 = metadata !{metadata !"foo.c", metadata !"/usr/local/google/home/echristo/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
!4 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/gnu-public-names.ll b/test/DebugInfo/X86/gnu-public-names.ll
index 7ad503253733..96fa52b92caf 100644
--- a/test/DebugInfo/X86/gnu-public-names.ll
+++ b/test/DebugInfo/X86/gnu-public-names.ll
@@ -45,66 +45,135 @@
; ASM-NEXT: .asciz "C" # External Name
; CHECK: .debug_info contents:
-; CHECK: DW_AT_GNU_pubnames [DW_FORM_sec_offset] (0x00000000)
-; CHECK: DW_AT_GNU_pubtypes [DW_FORM_sec_offset] (0x00000000)
+; CHECK: Compile Unit:
+; CHECK: DW_AT_GNU_pubnames [DW_FORM_flag_present] (true)
+; CHECK-NOT: DW_AT_GNU_pubtypes [
-; CHECK: [[C:[0-9a-f]+]]: DW_TAG_structure_type
+; CHECK: [[C:0x[0-9a-f]+]]: DW_TAG_structure_type
; CHECK-NEXT: DW_AT_name {{.*}} "C"
-; CHECK: [[STATIC_MEM_DECL:[0-9a-f]+]]: DW_TAG_member
+; CHECK: [[STATIC_MEM_DECL:0x[0-9a-f]+]]: DW_TAG_member
; CHECK-NEXT: DW_AT_name {{.*}} "static_member_variable"
-; CHECK: [[MEM_FUNC_DECL:[0-9a-f]+]]: DW_TAG_subprogram
+; CHECK: [[MEM_FUNC_DECL:0x[0-9a-f]+]]: DW_TAG_subprogram
; CHECK-NEXT: DW_AT_MIPS_linkage_name
; CHECK-NEXT: DW_AT_name {{.*}} "member_function"
-; CHECK: [[STATIC_MEM_FUNC_DECL:[0-9a-f]+]]: DW_TAG_subprogram
+; CHECK: [[STATIC_MEM_FUNC_DECL:0x[0-9a-f]+]]: DW_TAG_subprogram
; CHECK-NEXT: DW_AT_MIPS_linkage_name
; CHECK-NEXT: DW_AT_name {{.*}} "static_member_function"
-; CHECK: [[INT:[0-9a-f]+]]: DW_TAG_base_type
+; CHECK: [[INT:0x[0-9a-f]+]]: DW_TAG_base_type
; CHECK-NEXT: DW_AT_name {{.*}} "int"
-; CHECK: [[STATIC_MEM_VAR:[0-9a-f]+]]: DW_TAG_variable
-; CHECK-NEXT: DW_AT_specification {{.*}}[[STATIC_MEM_DECL]]
+; CHECK: [[STATIC_MEM_VAR:0x[0-9a-f]+]]: DW_TAG_variable
+; CHECK-NEXT: DW_AT_specification {{.*}} {[[STATIC_MEM_DECL]]}
-; CHECK: [[GLOB_VAR:[0-9a-f]+]]: DW_TAG_variable
+; CHECK: [[GLOB_VAR:0x[0-9a-f]+]]: DW_TAG_variable
; CHECK-NEXT: DW_AT_name {{.*}} "global_variable"
-; CHECK: [[NS:[0-9a-f]+]]: DW_TAG_namespace
+; CHECK: [[NS:0x[0-9a-f]+]]: DW_TAG_namespace
; CHECK-NEXT: DW_AT_name {{.*}} "ns"
-; CHECK: [[GLOB_NS_VAR_DECL:[0-9a-f]+]]: DW_TAG_variable
+; CHECK: [[GLOB_NS_VAR_DECL:0x[0-9a-f]+]]: DW_TAG_variable
; CHECK-NEXT: DW_AT_name {{.*}} "global_namespace_variable"
-; CHECK: [[D_VAR_DECL:[0-9a-f]+]]: DW_TAG_variable
+; CHECK: [[D_VAR_DECL:0x[0-9a-f]+]]: DW_TAG_variable
; CHECK-NEXT: DW_AT_name {{.*}} "d"
-; CHECK: [[D:[0-9a-f]+]]: DW_TAG_structure_type
+; CHECK: [[D:0x[0-9a-f]+]]: DW_TAG_structure_type
; CHECK-NEXT: DW_AT_name {{.*}} "D"
-; CHECK: [[GLOB_NS_FUNC:[0-9a-f]+]]: DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_MIPS_linkage_name
-; CHECK-NEXT: DW_AT_name {{.*}} "global_namespace_function"
-
-; CHECK: [[GLOB_NS_VAR:[0-9a-f]+]]: DW_TAG_variable
-; CHECK-NEXT: DW_AT_specification {{.*}}[[GLOB_NS_VAR_DECL]]
-
-; CHECK: [[D_VAR:[0-9a-f]+]]: DW_TAG_variable
-; CHECK-NEXT: DW_AT_specification {{.*}}[[D_VAR_DECL]]
-
-; CHECK: [[MEM_FUNC:[0-9a-f]+]]: DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_specification {{.*}}[[MEM_FUNC_DECL]]
-
-; CHECK: [[STATIC_MEM_FUNC:[0-9a-f]+]]: DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_specification {{.*}}[[STATIC_MEM_FUNC_DECL]]
-
-; CHECK: [[GLOBAL_FUNC:[0-9a-f]+]]: DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_MIPS_linkage_name
-; CHECK-NEXT: DW_AT_name {{.*}} "global_function"
+; CHECK: [[GLOB_NS_FUNC:0x[0-9a-f]+]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "global_namespace_function"
+
+; CHECK: [[GLOB_NS_VAR:0x[0-9a-f]+]]: DW_TAG_variable
+; CHECK-NEXT: DW_AT_specification {{.*}} {[[GLOB_NS_VAR_DECL]]}
+
+; CHECK: [[D_VAR:0x[0-9a-f]+]]: DW_TAG_variable
+; CHECK-NEXT: DW_AT_specification {{.*}} {[[D_VAR_DECL]]}
+
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "f3"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[F3_Z:.*]]: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "z"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_AT_location
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: NULL
+; CHECK-NOT: {{DW_TAG|NULL}}
+
+; CHECK: [[OUTER:.*]]: DW_TAG_namespace
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "outer"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[OUTER_ANON:.*]]: DW_TAG_namespace
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK-NOT: DW_AT_name
+; CHECK: [[OUTER_ANON_C_DECL:.*]]: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "c"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: NULL
+; CHECK-NOT: {{DW_TAG|NULL}}
+; FIXME: We probably shouldn't bother describing the implicit
+; import of the preceding anonymous namespace. This should be fixed
+; in clang.
+; CHECK: DW_TAG_imported_module
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: NULL
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[OUTER_ANON_C:.*]]: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK-NEXT: DW_AT_specification {{.*}} {[[OUTER_ANON_C_DECL]]}
+
+; CHECK: [[ANON:.*]]: DW_TAG_namespace
+; CHECK-NOT: DW_AT_name
+; CHECK: [[ANON_INNER:.*]]: DW_TAG_namespace
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "inner"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[ANON_INNER_B_DECL:.*]]: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "b"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: NULL
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[ANON_I_DECL:.*]]: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "i"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: NULL
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[ANON_INNER_B:.*]]: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK-NEXT: DW_AT_specification {{.*}} {[[ANON_INNER_B_DECL]]}
+; CHECK: [[ANON_I:.*]]: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK-NEXT: DW_AT_specification {{.*}} {[[ANON_I_DECL]]}
+
+; CHECK: [[MEM_FUNC:0x[0-9a-f]+]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_specification {{.*}} {[[MEM_FUNC_DECL]]}
+
+; CHECK: [[STATIC_MEM_FUNC:0x[0-9a-f]+]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_specification {{.*}} {[[STATIC_MEM_FUNC_DECL]]}
+
+; CHECK: [[GLOBAL_FUNC:0x[0-9a-f]+]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "global_function"
; CHECK-LABEL: .debug_gnu_pubnames contents:
-; CHECK-NEXT: length = 0x000000e7 version = 0x0002 unit_offset = 0x00000000 unit_size = 0x0000017b
+; CHECK-NEXT: length = {{.*}} version = 0x0002 unit_offset = 0x00000000 unit_size = {{.*}}
; CHECK-NEXT: Offset Linkage Kind Name
; CHECK-DAG: [[GLOBAL_FUNC]] EXTERNAL FUNCTION "global_function"
; CHECK-DAG: [[NS]] EXTERNAL TYPE "ns"
@@ -115,6 +184,20 @@
; CHECK-DAG: [[D_VAR]] EXTERNAL VARIABLE "ns::d"
; CHECK-DAG: [[STATIC_MEM_VAR]] EXTERNAL VARIABLE "C::static_member_variable"
; CHECK-DAG: [[STATIC_MEM_FUNC]] EXTERNAL FUNCTION "C::static_member_function"
+; CHECK-DAG: [[ANON]] EXTERNAL TYPE "(anonymous namespace)"
+; CHECK-DAG: [[ANON_INNER]] EXTERNAL TYPE "(anonymous namespace)::inner"
+; CHECK-DAG: [[OUTER]] EXTERNAL TYPE "outer"
+; CHECK-DAG: [[OUTER_ANON]] EXTERNAL TYPE "outer::(anonymous namespace)"
+; CHECK-DAG: [[ANON_I]] STATIC VARIABLE "(anonymous namespace)::i"
+; CHECK-DAG: [[ANON_INNER_B]] STATIC VARIABLE "(anonymous namespace)::inner::b"
+; CHECK-DAG: [[OUTER_ANON_C]] STATIC VARIABLE "outer::(anonymous namespace)::c"
+
+; GCC Doesn't put local statics in pubnames, but it seems not unreasonable and
+; comes out naturally from LLVM's implementation, so I'm OK with it for now. If
+; it's demonstrated that this is a major size concern or degrades debug info
+; consumer behavior, feel free to change it.
+
+; CHECK-DAG: [[F3_Z]] STATIC VARIABLE "f3::z"
; CHECK-LABEL: debug_gnu_pubtypes contents:
@@ -130,16 +213,20 @@
@global_variable = global %struct.C zeroinitializer, align 1
@_ZN2ns25global_namespace_variableE = global i32 1, align 4
@_ZN2ns1dE = global %"struct.ns::D" zeroinitializer, align 4
+@_ZZ2f3vE1z = internal global i32 0, align 4
+@_ZN12_GLOBAL__N_11iE = internal global i32 0, align 4
+@_ZN12_GLOBAL__N_15inner1bE = internal global i32 0, align 4
+@_ZN5outer12_GLOBAL__N_11cE = internal global i32 0, align 4
; Function Attrs: nounwind uwtable
define void @_ZN1C15member_functionEv(%struct.C* %this) #0 align 2 {
entry:
%this.addr = alloca %struct.C*, align 8
store %struct.C* %this, %struct.C** %this.addr, align 8
- call void @llvm.dbg.declare(metadata !{%struct.C** %this.addr}, metadata !36), !dbg !38
+ call void @llvm.dbg.declare(metadata !{%struct.C** %this.addr}, metadata !50), !dbg !52
%this1 = load %struct.C** %this.addr
- store i32 0, i32* @_ZN1C22static_member_variableE, align 4, !dbg !39
- ret void, !dbg !39
+ store i32 0, i32* @_ZN1C22static_member_variableE, align 4, !dbg !53
+ ret void, !dbg !54
}
; Function Attrs: nounwind readnone
@@ -148,72 +235,108 @@ declare void @llvm.dbg.declare(metadata, metadata) #1
; Function Attrs: nounwind uwtable
define i32 @_ZN1C22static_member_functionEv() #0 align 2 {
entry:
- %0 = load i32* @_ZN1C22static_member_variableE, align 4, !dbg !40
- ret i32 %0, !dbg !40
+ %0 = load i32* @_ZN1C22static_member_variableE, align 4, !dbg !55
+ ret i32 %0, !dbg !55
}
; Function Attrs: nounwind uwtable
define i32 @_Z15global_functionv() #0 {
entry:
- ret i32 -1, !dbg !41
+ ret i32 -1, !dbg !56
}
; Function Attrs: nounwind uwtable
define void @_ZN2ns25global_namespace_functionEv() #0 {
entry:
- call void @_ZN1C15member_functionEv(%struct.C* @global_variable), !dbg !42
- ret void, !dbg !42
+ call void @_ZN1C15member_functionEv(%struct.C* @global_variable), !dbg !57
+ ret void, !dbg !58
+}
+
+; Function Attrs: nounwind uwtable
+define i32* @_Z2f3v() #0 {
+entry:
+ ret i32* @_ZZ2f3vE1z, !dbg !59
+}
+
+; Function Attrs: nounwind uwtable
+define i32 @_Z2f7v() #0 {
+entry:
+ %0 = load i32* @_ZN12_GLOBAL__N_11iE, align 4, !dbg !60
+ %call = call i32* @_Z2f3v(), !dbg !60
+ %1 = load i32* %call, align 4, !dbg !60
+ %add = add nsw i32 %0, %1, !dbg !60
+ %2 = load i32* @_ZN12_GLOBAL__N_15inner1bE, align 4, !dbg !60
+ %add1 = add nsw i32 %add, %2, !dbg !60
+ %3 = load i32* @_ZN5outer12_GLOBAL__N_11cE, align 4, !dbg !60
+ %add2 = add nsw i32 %add1, %3, !dbg !60
+ ret i32 %add2, !dbg !60
}
attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!34, !43}
-!llvm.ident = !{!35}
+!llvm.module.flags = !{!47, !48}
+!llvm.ident = !{!49}
-!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (trunk 192862) (llvm/trunk 192861)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !21, metadata !29, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/pubnames.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{metadata !"pubnames.cpp", metadata !"/usr/local/google/home/echristo/tmp"}
-!2 = metadata !{i32 0}
-!3 = metadata !{metadata !4, metadata !17}
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !19, metadata !32, metadata !45, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/pubnames.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"pubnames.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !15}
!4 = metadata !{i32 786451, metadata !1, null, metadata !"C", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS1C"} ; [ DW_TAG_structure_type ] [C] [line 1, size 8, align 8, offset 0] [def] [from ]
-!5 = metadata !{metadata !6, metadata !8, metadata !13}
+!5 = metadata !{metadata !6, metadata !8, metadata !12}
!6 = metadata !{i32 786445, metadata !1, metadata !"_ZTS1C", metadata !"static_member_variable", i32 4, i64 0, i64 0, i64 0, i32 4096, metadata !7, null} ; [ DW_TAG_member ] [static_member_variable] [line 4, size 0, align 0, offset 0] [static] [from int]
!7 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!8 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1C", metadata !"member_function", metadata !"member_function", metadata !"_ZN1C15member_functionEv", i32 2, metadata !9, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !12, i32 2} ; [ DW_TAG_subprogram ] [line 2] [member_function]
+!8 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1C", metadata !"member_function", metadata !"member_function", metadata !"_ZN1C15member_functionEv", i32 2, metadata !9, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 2} ; [ DW_TAG_subprogram ] [line 2] [member_function]
!9 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !10, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!10 = metadata !{null, metadata !11}
!11 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1C"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1C]
-!12 = metadata !{i32 786468}
-!13 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1C", metadata !"static_member_function", metadata !"static_member_function", metadata !"_ZN1C22static_member_functionEv", i32 3, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !16, i32 3} ; [ DW_TAG_subprogram ] [line 3] [static_member_function]
-!14 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!15 = metadata !{metadata !7}
-!16 = metadata !{i32 786468}
-!17 = metadata !{i32 786451, metadata !1, metadata !18, metadata !"D", i32 21, i64 32, i64 32, i32 0, i32 0, null, metadata !19, i32 0, null, null, metadata !"_ZTSN2ns1DE"} ; [ DW_TAG_structure_type ] [D] [line 21, size 32, align 32, offset 0] [def] [from ]
-!18 = metadata !{i32 786489, metadata !1, null, metadata !"ns", i32 17} ; [ DW_TAG_namespace ] [ns] [line 17]
-!19 = metadata !{metadata !20}
-!20 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN2ns1DE", metadata !"A", i32 22, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_member ] [A] [line 22, size 32, align 32, offset 0] [from int]
-!21 = metadata !{metadata !22, metadata !23, metadata !24, metadata !26}
-!22 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1C", metadata !"member_function", metadata !"member_function", metadata !"_ZN1C15member_functionEv", i32 9, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.C*)* @_ZN1C15member_functionEv, null, metadata !8, metadata !2, i32 9} ; [ DW_TAG_subprogram ] [line 9] [def] [member_function]
-!23 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1C", metadata !"static_member_function", metadata !"static_member_function", metadata !"_ZN1C22static_member_functionEv", i32 11, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_ZN1C22static_member_functionEv, null, metadata !13, metadata !2, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [static_member_function]
-!24 = metadata !{i32 786478, metadata !1, metadata !25, metadata !"global_function", metadata !"global_function", metadata !"_Z15global_functionv", i32 15, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z15global_functionv, null, null, metadata !2, i32 15} ; [ DW_TAG_subprogram ] [line 15] [def] [global_function]
-!25 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/tmp/pubnames.cpp]
-!26 = metadata !{i32 786478, metadata !1, metadata !18, metadata !"global_namespace_function", metadata !"global_namespace_function", metadata !"_ZN2ns25global_namespace_functionEv", i32 18, metadata !27, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZN2ns25global_namespace_functionEv, null, null, metadata !2, i32 18} ; [ DW_TAG_subprogram ] [line 18] [def] [global_namespace_function]
-!27 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !28, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!28 = metadata !{null}
-!29 = metadata !{metadata !30, metadata !31, metadata !32, metadata !33}
-!30 = metadata !{i32 786484, i32 0, metadata !4, metadata !"static_member_variable", metadata !"static_member_variable", metadata !"_ZN1C22static_member_variableE", metadata !25, i32 7, metadata !7, i32 0, i32 1, i32* @_ZN1C22static_member_variableE, metadata !6} ; [ DW_TAG_variable ] [static_member_variable] [line 7] [def]
-!31 = metadata !{i32 786484, i32 0, null, metadata !"global_variable", metadata !"global_variable", metadata !"", metadata !25, i32 13, metadata !4, i32 0, i32 1, %struct.C* @global_variable, null} ; [ DW_TAG_variable ] [global_variable] [line 13] [def]
-!32 = metadata !{i32 786484, i32 0, metadata !18, metadata !"global_namespace_variable", metadata !"global_namespace_variable", metadata !"_ZN2ns25global_namespace_variableE", metadata !25, i32 19, metadata !7, i32 0, i32 1, i32* @_ZN2ns25global_namespace_variableE, null} ; [ DW_TAG_variable ] [global_namespace_variable] [line 19] [def]
-!33 = metadata !{i32 786484, i32 0, metadata !18, metadata !"d", metadata !"d", metadata !"_ZN2ns1dE", metadata !25, i32 23, metadata !17, i32 0, i32 1, %"struct.ns::D"* @_ZN2ns1dE, null} ; [ DW_TAG_variable ] [d] [line 23] [def]
-!34 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!35 = metadata !{metadata !"clang version 3.4 (trunk 192862) (llvm/trunk 192861)"}
-!36 = metadata !{i32 786689, metadata !22, metadata !"this", null, i32 16777216, metadata !37, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!37 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1C"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1C]
-!38 = metadata !{i32 0, i32 0, metadata !22, null}
-!39 = metadata !{i32 9, i32 0, metadata !22, null}
-!40 = metadata !{i32 11, i32 0, metadata !23, null}
-!41 = metadata !{i32 15, i32 0, metadata !24, null}
-!42 = metadata !{i32 18, i32 0, metadata !26, null}
-
-!43 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!12 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1C", metadata !"static_member_function", metadata !"static_member_function", metadata !"_ZN1C22static_member_functionEv", i32 3, metadata !13, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 3} ; [ DW_TAG_subprogram ] [line 3] [static_member_function]
+!13 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !14, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!14 = metadata !{metadata !7}
+!15 = metadata !{i32 786451, metadata !1, metadata !16, metadata !"D", i32 28, i64 32, i64 32, i32 0, i32 0, null, metadata !17, i32 0, null, null, metadata !"_ZTSN2ns1DE"} ; [ DW_TAG_structure_type ] [D] [line 28, size 32, align 32, offset 0] [def] [from ]
+!16 = metadata !{i32 786489, metadata !1, null, metadata !"ns", i32 23} ; [ DW_TAG_namespace ] [ns] [line 23]
+!17 = metadata !{metadata !18}
+!18 = metadata !{i32 786445, metadata !1, metadata !"_ZTSN2ns1DE", metadata !"A", i32 29, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_member ] [A] [line 29, size 32, align 32, offset 0] [from int]
+!19 = metadata !{metadata !20, metadata !21, metadata !22, metadata !24, metadata !27, metadata !31}
+!20 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1C", metadata !"member_function", metadata !"member_function", metadata !"_ZN1C15member_functionEv", i32 9, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.C*)* @_ZN1C15member_functionEv, null, metadata !8, metadata !2, i32 9} ; [ DW_TAG_subprogram ] [line 9] [def] [member_function]
+!21 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1C", metadata !"static_member_function", metadata !"static_member_function", metadata !"_ZN1C22static_member_functionEv", i32 13, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_ZN1C22static_member_functionEv, null, metadata !12, metadata !2, i32 13} ; [ DW_TAG_subprogram ] [line 13] [def] [static_member_function]
+!22 = metadata !{i32 786478, metadata !1, metadata !23, metadata !"global_function", metadata !"global_function", metadata !"_Z15global_functionv", i32 19, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z15global_functionv, null, null, metadata !2, i32 19} ; [ DW_TAG_subprogram ] [line 19] [def] [global_function]
+!23 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/pubnames.cpp]
+!24 = metadata !{i32 786478, metadata !1, metadata !16, metadata !"global_namespace_function", metadata !"global_namespace_function", metadata !"_ZN2ns25global_namespace_functionEv", i32 24, metadata !25, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZN2ns25global_namespace_functionEv, null, null, metadata !2, i32 24} ; [ DW_TAG_subprogram ] [line 24] [def] [global_namespace_function]
+!25 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !26, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!26 = metadata !{null}
+!27 = metadata !{i32 786478, metadata !1, metadata !23, metadata !"f3", metadata !"f3", metadata !"_Z2f3v", i32 37, metadata !28, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32* ()* @_Z2f3v, null, null, metadata !2, i32 37} ; [ DW_TAG_subprogram ] [line 37] [def] [f3]
+!28 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !29, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!29 = metadata !{metadata !30}
+!30 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !7} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!31 = metadata !{i32 786478, metadata !1, metadata !23, metadata !"f7", metadata !"f7", metadata !"_Z2f7v", i32 54, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z2f7v, null, null, metadata !2, i32 54} ; [ DW_TAG_subprogram ] [line 54] [def] [f7]
+!32 = metadata !{metadata !33, metadata !34, metadata !35, metadata !36, metadata !37, metadata !38, metadata !41, metadata !44}
+!33 = metadata !{i32 786484, i32 0, metadata !4, metadata !"static_member_variable", metadata !"static_member_variable", metadata !"_ZN1C22static_member_variableE", metadata !23, i32 7, metadata !7, i32 0, i32 1, i32* @_ZN1C22static_member_variableE, metadata !6} ; [ DW_TAG_variable ] [static_member_variable] [line 7] [def]
+!34 = metadata !{i32 786484, i32 0, null, metadata !"global_variable", metadata !"global_variable", metadata !"", metadata !23, i32 17, metadata !"_ZTS1C", i32 0, i32 1, %struct.C* @global_variable, null} ; [ DW_TAG_variable ] [global_variable] [line 17] [def]
+!35 = metadata !{i32 786484, i32 0, metadata !16, metadata !"global_namespace_variable", metadata !"global_namespace_variable", metadata !"_ZN2ns25global_namespace_variableE", metadata !23, i32 27, metadata !7, i32 0, i32 1, i32* @_ZN2ns25global_namespace_variableE, null} ; [ DW_TAG_variable ] [global_namespace_variable] [line 27] [def]
+!36 = metadata !{i32 786484, i32 0, metadata !16, metadata !"d", metadata !"d", metadata !"_ZN2ns1dE", metadata !23, i32 30, metadata !"_ZTSN2ns1DE", i32 0, i32 1, %"struct.ns::D"* @_ZN2ns1dE, null} ; [ DW_TAG_variable ] [d] [line 30] [def]
+!37 = metadata !{i32 786484, i32 0, metadata !27, metadata !"z", metadata !"z", metadata !"", metadata !23, i32 38, metadata !7, i32 1, i32 1, i32* @_ZZ2f3vE1z, null} ; [ DW_TAG_variable ] [z] [line 38] [local] [def]
+!38 = metadata !{i32 786484, i32 0, metadata !39, metadata !"c", metadata !"c", metadata !"_ZN5outer12_GLOBAL__N_11cE", metadata !23, i32 50, metadata !7, i32 1, i32 1, i32* @_ZN5outer12_GLOBAL__N_11cE, null} ; [ DW_TAG_variable ] [c] [line 50] [local] [def]
+!39 = metadata !{i32 786489, metadata !1, metadata !40, metadata !"", i32 49} ; [ DW_TAG_namespace ] [line 49]
+!40 = metadata !{i32 786489, metadata !1, null, metadata !"outer", i32 48} ; [ DW_TAG_namespace ] [outer] [line 48]
+!41 = metadata !{i32 786484, i32 0, metadata !42, metadata !"b", metadata !"b", metadata !"_ZN12_GLOBAL__N_15inner1bE", metadata !23, i32 44, metadata !7, i32 1, i32 1, i32* @_ZN12_GLOBAL__N_15inner1bE, null} ; [ DW_TAG_variable ] [b] [line 44] [local] [def]
+!42 = metadata !{i32 786489, metadata !1, metadata !43, metadata !"inner", i32 43} ; [ DW_TAG_namespace ] [inner] [line 43]
+!43 = metadata !{i32 786489, metadata !1, null, metadata !"", i32 33} ; [ DW_TAG_namespace ] [line 33]
+!44 = metadata !{i32 786484, i32 0, metadata !43, metadata !"i", metadata !"i", metadata !"_ZN12_GLOBAL__N_11iE", metadata !23, i32 34, metadata !7, i32 1, i32 1, i32* @_ZN12_GLOBAL__N_11iE, null} ; [ DW_TAG_variable ] [i] [line 34] [local] [def]
+!45 = metadata !{metadata !46}
+!46 = metadata !{i32 786490, metadata !40, metadata !39, i32 40} ; [ DW_TAG_imported_module ]
+!47 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!48 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!49 = metadata !{metadata !"clang version 3.5.0 "}
+!50 = metadata !{i32 786689, metadata !20, metadata !"this", null, i32 16777216, metadata !51, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!51 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1C"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1C]
+!52 = metadata !{i32 0, i32 0, metadata !20, null}
+!53 = metadata !{i32 10, i32 0, metadata !20, null}
+!54 = metadata !{i32 11, i32 0, metadata !20, null}
+!55 = metadata !{i32 14, i32 0, metadata !21, null}
+!56 = metadata !{i32 20, i32 0, metadata !22, null}
+!57 = metadata !{i32 25, i32 0, metadata !24, null}
+!58 = metadata !{i32 26, i32 0, metadata !24, null}
+!59 = metadata !{i32 39, i32 0, metadata !27, null}
+!60 = metadata !{i32 55, i32 0, metadata !31, null}
diff --git a/test/DebugInfo/X86/inline-member-function.ll b/test/DebugInfo/X86/inline-member-function.ll
new file mode 100644
index 000000000000..3dc6043bf36c
--- /dev/null
+++ b/test/DebugInfo/X86/inline-member-function.ll
@@ -0,0 +1,95 @@
+; REQUIRES: object-emission
+
+; RUN: llc -mtriple=x86_64-linux -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; From source:
+; struct foo {
+; int __attribute__((always_inline)) func(int x) { return x + 2; }
+; };
+
+; int i;
+
+; int main() {
+; return foo().func(i);
+; }
+
+; CHECK: DW_TAG_structure_type
+; CHECK: DW_TAG_subprogram
+
+; But make sure we emit DW_AT_object_pointer on the abstract definition.
+; CHECK: [[ABSTRACT_ORIGIN:.*]]: DW_TAG_subprogram
+; CHECK-NOT: NULL
+; CHECK-NOT: TAG
+; CHECK: DW_AT_object_pointer
+
+; Ensure we omit DW_AT_object_pointer on inlined subroutines.
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NEXT: DW_AT_abstract_origin {{.*}}{[[ABSTRACT_ORIGIN]]}
+; CHECK-NOT: NULL
+; CHECK-NOT: DW_AT_object_pointer
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_AT_artificial
+; CHECK: DW_TAG
+
+%struct.foo = type { i8 }
+
+@i = global i32 0, align 4
+
+; Function Attrs: uwtable
+define i32 @main() #0 {
+entry:
+ %this.addr.i = alloca %struct.foo*, align 8
+ %x.addr.i = alloca i32, align 4
+ %retval = alloca i32, align 4
+ %tmp = alloca %struct.foo, align 1
+ store i32 0, i32* %retval
+ %0 = load i32* @i, align 4, !dbg !23
+ store %struct.foo* %tmp, %struct.foo** %this.addr.i, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.foo** %this.addr.i}, metadata !24), !dbg !26
+ store i32 %0, i32* %x.addr.i, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %x.addr.i}, metadata !27), !dbg !28
+ %this1.i = load %struct.foo** %this.addr.i
+ %1 = load i32* %x.addr.i, align 4, !dbg !28
+ %add.i = add nsw i32 %1, 2, !dbg !28
+ ret i32 %add.i, !dbg !23
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!20, !21}
+!llvm.ident = !{!22}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !12, metadata !18, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/inline.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"inline.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"foo", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS3foo"} ; [ DW_TAG_structure_type ] [foo] [line 1, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !6}
+!6 = metadata !{i32 786478, metadata !1, metadata !"_ZTS3foo", metadata !"func", metadata !"func", metadata !"_ZN3foo4funcEi", i32 2, metadata !7, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !11, i32 2} ; [ DW_TAG_subprogram ] [line 2] [func]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{metadata !9, metadata !10, metadata !9}
+!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!10 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS3foo"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS3foo]
+!11 = metadata !{i32 786468}
+!12 = metadata !{metadata !13, metadata !17}
+!13 = metadata !{i32 786478, metadata !1, metadata !14, metadata !"main", metadata !"main", metadata !"", i32 7, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [main]
+!14 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/inline.cpp]
+!15 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !16, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!16 = metadata !{metadata !9}
+!17 = metadata !{i32 786478, metadata !1, metadata !"_ZTS3foo", metadata !"func", metadata !"func", metadata !"_ZN3foo4funcEi", i32 2, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, metadata !6, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [func]
+!18 = metadata !{metadata !19}
+!19 = metadata !{i32 786484, i32 0, null, metadata !"i", metadata !"i", metadata !"", metadata !14, i32 5, metadata !9, i32 0, i32 1, i32* @i, null} ; [ DW_TAG_variable ] [i] [line 5] [def]
+!20 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!21 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!22 = metadata !{metadata !"clang version 3.5.0 "}
+!23 = metadata !{i32 8, i32 0, metadata !13, null} ; [ DW_TAG_imported_declaration ]
+!24 = metadata !{i32 786689, metadata !17, metadata !"this", null, i32 16777216, metadata !25, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!25 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS3foo"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS3foo]
+!26 = metadata !{i32 0, i32 0, metadata !17, metadata !23}
+!27 = metadata !{i32 786689, metadata !17, metadata !"x", metadata !14, i32 33554434, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [x] [line 2]
+!28 = metadata !{i32 2, i32 0, metadata !17, metadata !23}
diff --git a/test/DebugInfo/X86/inline-seldag-test.ll b/test/DebugInfo/X86/inline-seldag-test.ll
new file mode 100644
index 000000000000..615f03a2ad28
--- /dev/null
+++ b/test/DebugInfo/X86/inline-seldag-test.ll
@@ -0,0 +1,77 @@
+; RUN: llc -mtriple=x86_64-linux-gnu -fast-isel=false -filetype=obj < %s -o - | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+; RUN: llc -mtriple=x86_64-linux-gnu -fast-isel=false -filetype=asm < %s -o - | FileCheck --check-prefix=ASM %s
+
+; Generated from:
+; clang-tot -c -S -emit-llvm -g inline-seldag-test.c
+; inline int __attribute__((always_inline)) f(int y) {
+; return y ? 4 : 7;
+; }
+; void func() {
+; volatile int x;
+; x = f(x);
+; }
+
+; CHECK: [[F:.*]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "f"
+
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NEXT: DW_AT_abstract_origin {{.*}} {[[F]]}
+
+
+; Make sure the condition test is attributed to the inline function, not the
+; location of the test's operands within the caller.
+
+; ASM: # inline-seldag-test.c:2:0
+; ASM-NOT: .loc
+; ASM: testl
+
+; Function Attrs: nounwind uwtable
+define void @func() #0 {
+entry:
+ %y.addr.i = alloca i32, align 4
+ %x = alloca i32, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %x}, metadata !15), !dbg !17
+ %0 = load volatile i32* %x, align 4, !dbg !18
+ store i32 %0, i32* %y.addr.i, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %y.addr.i}, metadata !19), !dbg !20
+ %1 = load i32* %y.addr.i, align 4, !dbg !21
+ %tobool.i = icmp ne i32 %1, 0, !dbg !21
+ %cond.i = select i1 %tobool.i, i32 4, i32 7, !dbg !21
+ store volatile i32 %cond.i, i32* %x, align 4, !dbg !18
+ ret void, !dbg !22
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!12, !13}
+!llvm.ident = !{!14}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/inline-seldag-test.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"inline-seldag-test.c", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !8}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"func", metadata !"func", metadata !"", i32 4, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, void ()* @func, null, null, metadata !2, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [func]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/inline-seldag-test.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f", metadata !"f", metadata !"", i32 1, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [f]
+!9 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !10, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!10 = metadata !{metadata !11, metadata !11}
+!11 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!12 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!13 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!14 = metadata !{metadata !"clang version 3.5.0 "}
+!15 = metadata !{i32 786688, metadata !4, metadata !"x", metadata !5, i32 5, metadata !16, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [x] [line 5]
+!16 = metadata !{i32 786485, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !11} ; [ DW_TAG_volatile_type ] [line 0, size 0, align 0, offset 0] [from int]
+!17 = metadata !{i32 5, i32 0, metadata !4, null}
+!18 = metadata !{i32 6, i32 7, metadata !4, null}
+!19 = metadata !{i32 786689, metadata !8, metadata !"y", metadata !5, i32 16777217, metadata !11, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [y] [line 1]
+!20 = metadata !{i32 1, i32 0, metadata !8, metadata !18}
+!21 = metadata !{i32 2, i32 0, metadata !8, metadata !18}
+!22 = metadata !{i32 7, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/X86/instcombine-instrinsics.ll b/test/DebugInfo/X86/instcombine-instrinsics.ll
index 41dd09f5a425..2fd7ee319c2e 100644
--- a/test/DebugInfo/X86/instcombine-instrinsics.ll
+++ b/test/DebugInfo/X86/instcombine-instrinsics.ll
@@ -1,102 +1,79 @@
-; RUN: opt < %s -O2 -S | FileCheck %s
+; RUN: opt %s -O2 -S -o - | FileCheck %s
; Verify that we emit the same intrinsic at most once.
-; CHECK: call void @llvm.dbg.value(metadata !{%struct.i14** %i14}
-; CHECK-NOT: call void @llvm.dbg.value(metadata !{%struct.i14** %i14}
+; rdar://problem/13056109
+;
+; CHECK: call void @llvm.dbg.value(metadata !{%struct.i14** %p}
+; CHECK-NOT: call void @llvm.dbg.value(metadata !{%struct.i14** %p}
+; CHECK-NEXT: call i32 @foo
; CHECK: ret
+;
+;
+; typedef struct {
+; long i;
+; } i14;
+;
+; int foo(i14**);
+;
+; void init() {
+; i14* p = 0;
+; foo(&p);
+; p->i |= 4;
+; foo(&p);
+; }
+;
+; ModuleID = 'instcombine_intrinsics.c'
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
-;*** IR Dump After Dead Argument Elimination ***
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.8.0"
+%struct.i14 = type { i64 }
-%struct.i3 = type { i32 }
-%struct.i14 = type { i32 }
-%struct.i24 = type opaque
-
-define %struct.i3* @barz(i64 %i9) nounwind {
-entry:
- br label %while.cond
-
-while.cond: ; preds = %while.body, %entry
- br label %while.body
-
-while.body: ; preds = %while.cond
- br label %while.cond
-}
-
-declare void @llvm.dbg.declare(metadata, metadata)
-
-define void @init() nounwind {
-entry:
- %i14 = alloca %struct.i14*, align 8
- call void @llvm.dbg.declare(metadata !{%struct.i14** %i14}, metadata !25)
- store %struct.i14* null, %struct.i14** %i14, align 8
- %call = call i32 @foo(i8* bitcast (void ()* @bar to i8*), %struct.i14** %i14)
- %0 = load %struct.i14** %i14, align 8
- %i16 = getelementptr inbounds %struct.i14* %0, i32 0, i32 0
- %1 = load i32* %i16, align 4
- %or = or i32 %1, 4
- store i32 %or, i32* %i16, align 4
- %call4 = call i32 @foo(i8* bitcast (void ()* @baz to i8*), %struct.i14** %i14)
- ret void
+; Function Attrs: nounwind ssp uwtable
+define void @init() #0 {
+ %p = alloca %struct.i14*, align 8
+ call void @llvm.dbg.declare(metadata !{%struct.i14** %p}, metadata !11), !dbg !18
+ store %struct.i14* null, %struct.i14** %p, align 8, !dbg !18
+ %1 = call i32 @foo(%struct.i14** %p), !dbg !19
+ %2 = load %struct.i14** %p, align 8, !dbg !20
+ %3 = getelementptr inbounds %struct.i14* %2, i32 0, i32 0, !dbg !20
+ %4 = load i64* %3, align 8, !dbg !20
+ %5 = or i64 %4, 4, !dbg !20
+ store i64 %5, i64* %3, align 8, !dbg !20
+ %6 = call i32 @foo(%struct.i14** %p), !dbg !21
+ ret void, !dbg !22
}
-declare i32 @foo(i8*, %struct.i14**) nounwind
-
-define internal void @bar() nounwind {
-entry:
- %i9 = alloca i64, align 8
- store i64 0, i64* %i9, align 8
- %call = call i32 @put(i64 0, i64* %i9, i64 0, %struct.i24* null)
- ret void
-}
-
-define internal void @baz() nounwind {
-entry:
- ret void
-}
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
-declare i32 @put(i64, i64*, i64, %struct.i24*) nounwind readnone
+declare i32 @foo(%struct.i14**)
-declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
+attributes #0 = { nounwind ssp uwtable }
+attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!73}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
-!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.3 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !48, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"i1", metadata !""}
-!2 = metadata !{i32 0}
-!3 = metadata !{metadata !4, metadata !21, metadata !33, metadata !47}
-!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"i2", metadata !"i2", metadata !"", i32 31, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, %struct.i3* (i64)* @barz, null, null, metadata !16, i32 32} ; [ DW_TAG_subprogram ] [line 31] [scope 32]
-!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ]
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [instcombine_intrinsics.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"instcombine_intrinsics.c", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"init", metadata !"init", metadata !"", i32 7, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, void ()* @init, null, null, metadata !2, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [init]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [instcombine_intrinsics.c]
!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{metadata !8, metadata !13}
-!8 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from i3]
-!9 = metadata !{i32 786451, metadata !1, null, metadata !"i3", i32 25, i64 32, i64 32, i32 0, i32 0, null, metadata !10, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [i3] [line 25, size 32, align 32, offset 0] [def] [from ]
-!10 = metadata !{metadata !11}
-!11 = metadata !{i32 786445, metadata !1, metadata !9, metadata !"i4", i32 26, i64 32, i64 32, i64 0, i32 0, metadata !12} ; [ DW_TAG_member ] [line 26, size 32, align 32, offset 0] [from i5]
-!12 = metadata !{i32 786468, null, null, metadata !"i5", i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] [line 0, size 32, align 32, offset 0, enc DW_ATE_unsigned]
-!13 = metadata !{i32 786454, metadata !1, null, metadata !"i6", i32 5, i64 0, i64 0, i64 0, i32 0, metadata !14} ; [ DW_TAG_typedef ] [line 5, size 0, align 0, offset 0] [from i7]
-!14 = metadata !{i32 786454, metadata !1, null, metadata !"i7", i32 2, i64 0, i64 0, i64 0, i32 0, metadata !15} ; [ DW_TAG_typedef ] [line 2, size 0, align 0, offset 0] [from i8]
-!15 = metadata !{i32 786468, null, null, metadata !"i8", i32 0, i64 64, i64 64, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] [line 0, size 64, align 64, offset 0, enc DW_ATE_unsigned]
-!16 = metadata !{}
-!21 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"i13", metadata !"i13", metadata !"", i32 42, metadata !22, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @init, null, null, metadata !24, i32 43} ; [ DW_TAG_subprogram ] [line 42] [scope 43]
-!22 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !34, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!23 = metadata !{null}
-!24 = metadata !{metadata !25}
-!25 = metadata !{i32 786688, metadata !21, metadata !"i14", metadata !5, i32 45, metadata !27, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [line 45]
-!27 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !28} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from i14]
-!28 = metadata !{i32 786451, metadata !1, null, metadata !"i14", i32 16, i64 32, i64 32, i32 0, i32 0, null, metadata !29, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [i14] [line 16, size 32, align 32, offset 0] [def] [from ]
-!29 = metadata !{metadata !30}
-!30 = metadata !{i32 786445, metadata !1, metadata !28, metadata !"i16", i32 17, i64 32, i64 32, i64 0, i32 0, metadata !31} ; [ DW_TAG_member ] [line 17, size 32, align 32, offset 0] [from i17]
-!31 = metadata !{i32 786454, metadata !1, null, metadata !"i17", i32 7, i64 0, i64 0, i64 0, i32 0, metadata !32} ; [ DW_TAG_typedef ] [line 7, size 0, align 0, offset 0] [from int]
-!32 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!33 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"i18", metadata !"i18", metadata !"", i32 54, metadata !22, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @bar, null, null, metadata !34, i32 55} ; [ DW_TAG_subprogram ] [line 54] [scope 55]
-!34 = metadata !{null}
-!47 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"i29", metadata !"i29", metadata !"", i32 53, metadata !22, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @baz, null, null, metadata !2, i32 53} ; [ DW_TAG_subprogram ] [line 53]
-!48 = metadata !{metadata !49}
-!49 = metadata !{i32 786484, i32 0, metadata !21, metadata !"i30", metadata !"i30", metadata !"", metadata !5, i32 44, metadata !50, i32 1, i32 1, null, null}
-!50 = metadata !{i32 786454, metadata !1, null, metadata !"i31", i32 6, i64 0, i64 0, i64 0, i32 0, metadata !32} ; [ DW_TAG_typedef ] [line 6, size 0, align 0, offset 0] [from int]
-!52 = metadata !{i64 0}
-!55 = metadata !{%struct.i3* null}
-!72 = metadata !{%struct.i24* null}
-!73 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!7 = metadata !{null}
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5.0 "}
+!11 = metadata !{i32 786688, metadata !4, metadata !"p", metadata !5, i32 8, metadata !12, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [p] [line 8]
+!12 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !13} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from i14]
+!13 = metadata !{i32 786454, metadata !1, null, metadata !"i14", i32 3, i64 0, i64 0, i64 0, i32 0, metadata !14} ; [ DW_TAG_typedef ] [i14] [line 3, size 0, align 0, offset 0] [from ]
+!14 = metadata !{i32 786451, metadata !1, null, metadata !"", i32 1, i64 64, i64 64, i32 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [line 1, size 64, align 64, offset 0] [def] [from ]
+!15 = metadata !{metadata !16}
+!16 = metadata !{i32 786445, metadata !1, metadata !14, metadata !"i", i32 2, i64 64, i64 64, i64 0, i32 0, metadata !17} ; [ DW_TAG_member ] [i] [line 2, size 64, align 64, offset 0] [from long int]
+!17 = metadata !{i32 786468, null, null, metadata !"long int", i32 0, i64 64, i64 64, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [long int] [line 0, size 64, align 64, offset 0, enc DW_ATE_signed]
+!18 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!19 = metadata !{i32 9, i32 0, metadata !4, null}
+!20 = metadata !{i32 10, i32 0, metadata !4, null}
+!21 = metadata !{i32 11, i32 0, metadata !4, null}
+!22 = metadata !{i32 12, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/X86/lexical_block.ll b/test/DebugInfo/X86/lexical_block.ll
new file mode 100644
index 000000000000..95b3921ab364
--- /dev/null
+++ b/test/DebugInfo/X86/lexical_block.ll
@@ -0,0 +1,59 @@
+; REQUIRES: object-emission
+
+; RUN: llc -mtriple=x86_64-linux -O0 -filetype=obj < %s \
+; RUN: | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; CHECK: DW_TAG_lexical_block
+; CHECK-NEXT: DW_AT_low_pc [DW_FORM_addr]
+; CHECK-NEXT: DW_AT_high_pc [DW_FORM_data4]
+
+; Test case produced from:
+; void b() {
+; if (int i = 3)
+; return;
+; }
+
+; Function Attrs: nounwind uwtable
+define void @_Z1bv() #0 {
+entry:
+ %i = alloca i32, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %i}, metadata !11), !dbg !14
+ store i32 3, i32* %i, align 4, !dbg !14
+ %0 = load i32* %i, align 4, !dbg !14
+ %tobool = icmp ne i32 %0, 0, !dbg !14
+ br i1 %tobool, label %if.then, label %if.end, !dbg !14
+
+if.then: ; preds = %entry
+ br label %if.end, !dbg !15
+
+if.end: ; preds = %if.then, %entry
+ ret void, !dbg !16
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/lexical_block.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"lexical_block.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"b", metadata !"b", metadata !"_Z1bv", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z1bv, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [b]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/lexical_block.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5.0 "}
+!11 = metadata !{i32 786688, metadata !12, metadata !"i", metadata !5, i32 2, metadata !13, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [i] [line 2]
+!12 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/lexical_block.cpp]
+!13 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!14 = metadata !{i32 2, i32 0, metadata !12, null}
+!15 = metadata !{i32 3, i32 0, metadata !12, null}
+!16 = metadata !{i32 4, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/X86/line-info.ll b/test/DebugInfo/X86/line-info.ll
index 46daccfe841d..f6deee9ff165 100644
--- a/test/DebugInfo/X86/line-info.ll
+++ b/test/DebugInfo/X86/line-info.ll
@@ -2,7 +2,7 @@
; RUN: llvm-dwarfdump %t | FileCheck %s
; CHECK: [[FILEID:[0-9]+]]]{{.*}}list0.h
-; CHECK: [[FILEID]] 0 1 0 is_stmt{{$}}
+; CHECK: [[FILEID]] 0 1 0 0 is_stmt{{$}}
; IR generated from clang -g -emit-llvm with the following source:
; list0.h:
@@ -40,7 +40,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.3 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/blaikie/dev/scratch/list0.c] [DW_LANG_C99]
!1 = metadata !{metadata !"list0.c", metadata !"/usr/local/google/home/blaikie/dev/scratch"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !10}
!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
!5 = metadata !{metadata !"./list0.h", metadata !"/usr/local/google/home/blaikie/dev/scratch"}
diff --git a/test/DebugInfo/X86/linkage-name.ll b/test/DebugInfo/X86/linkage-name.ll
index 3d116675454c..2b1647b3d3d9 100644
--- a/test/DebugInfo/X86/linkage-name.ll
+++ b/test/DebugInfo/X86/linkage-name.ll
@@ -27,10 +27,10 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!29}
-!0 = metadata !{i32 786449, metadata !28, i32 4, metadata !"clang version 3.1 (trunk 152691) (llvm/trunk 152692)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !18, metadata !18, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !28, i32 4, metadata !"clang version 3.1 (trunk 152691) (llvm/trunk 152692)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !18, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
-!5 = metadata !{i32 786478, metadata !6, null, metadata !"a", metadata !"a", metadata !"_ZN1A1aEi", i32 5, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (%class.A*, i32)* @_ZN1A1aEi, null, metadata !13, metadata !16, i32 5} ; [ DW_TAG_subprogram ]
+!5 = metadata !{i32 786478, metadata !6, null, metadata !"a", metadata !"a", metadata !"_ZN1A1aEi", i32 5, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (%class.A*, i32)* @_ZN1A1aEi, null, metadata !13, null, i32 5} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !28} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{metadata !9, metadata !10, metadata !9}
@@ -38,11 +38,7 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!10 = metadata !{i32 786447, i32 0, null, i32 0, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !11} ; [ DW_TAG_pointer_type ]
!11 = metadata !{i32 786434, metadata !28, null, metadata !"A", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_class_type ] [A] [line 1, size 8, align 8, offset 0] [def] [from ]
!12 = metadata !{metadata !13}
-!13 = metadata !{i32 786478, metadata !6, metadata !11, metadata !"a", metadata !"a", metadata !"_ZN1A1aEi", i32 2, metadata !7, i1 false, i1 false, i32 0, i32 0, null, i32 257, i1 false, null, null, i32 0, metadata !14, i32 0} ; [ DW_TAG_subprogram ]
-!14 = metadata !{metadata !15}
-!15 = metadata !{i32 786468} ; [ DW_TAG_base_type ]
-!16 = metadata !{metadata !17}
-!17 = metadata !{i32 786468} ; [ DW_TAG_base_type ]
+!13 = metadata !{i32 786478, metadata !6, metadata !11, metadata !"a", metadata !"a", metadata !"_ZN1A1aEi", i32 2, metadata !7, i1 false, i1 false, i32 0, i32 0, null, i32 257, i1 false, null, null, i32 0, null, i32 0} ; [ DW_TAG_subprogram ]
!18 = metadata !{metadata !20}
!20 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 9, metadata !11, i32 0, i32 1, %class.A* @a, null} ; [ DW_TAG_variable ]
!21 = metadata !{i32 786689, metadata !5, metadata !"this", metadata !6, i32 16777221, metadata !22, i32 64, i32 0} ; [ DW_TAG_arg_variable ]
diff --git a/test/DebugInfo/X86/lit.local.cfg b/test/DebugInfo/X86/lit.local.cfg
index 19840aa7574c..c8625f4d9d24 100644
--- a/test/DebugInfo/X86/lit.local.cfg
+++ b/test/DebugInfo/X86/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/DebugInfo/X86/low-pc-cu.ll b/test/DebugInfo/X86/low-pc-cu.ll
index 922ae8dfed2f..979d4006b920 100644
--- a/test/DebugInfo/X86/low-pc-cu.ll
+++ b/test/DebugInfo/X86/low-pc-cu.ll
@@ -5,28 +5,32 @@
; CHECK: DW_TAG_compile_unit [1]
; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+; CHECK: DW_AT_high_pc [DW_FORM_data4]
; CHECK: DW_TAG_subprogram [2]
+; CHECK: DW_AT_low_pc [DW_FORM_addr]
+; CHECK: DW_AT_high_pc [DW_FORM_data4]
-define i32 @_Z1qv() nounwind uwtable readnone ssp {
+; Function Attrs: nounwind uwtable
+define void @z() #0 {
entry:
- ret i32 undef, !dbg !13
+ ret void, !dbg !11
}
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!16}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
-!0 = metadata !{i32 786449, metadata !15, i32 4, metadata !"clang version 3.1 (trunk 153454) (llvm/trunk 153471)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
-!3 = metadata !{metadata !5, metadata !12}
-!5 = metadata !{i32 786478, metadata !6, null, metadata !"q", metadata !"q", metadata !"_Z1qv", i32 5, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z1qv, null, null, metadata !10, i32 0} ; [ DW_TAG_subprogram ] [line 5] [def] [scope 0] [q]
-!6 = metadata !{i32 786473, metadata !15} ; [ DW_TAG_file_type ]
-!7 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{metadata !9}
-!9 = metadata !{i32 786468, metadata !15, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!10 = metadata !{metadata !11}
-!11 = metadata !{i32 786468} ; [ DW_TAG_base_type ]
-!12 = metadata !{i32 786478, metadata !15, metadata !6, metadata !"t", metadata !"t", metadata !"", i32 2, metadata !7, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, metadata !10, i32 0} ; [ DW_TAG_subprogram ]
-!13 = metadata !{i32 7, i32 1, metadata !14, null}
-!14 = metadata !{i32 786443, metadata !5, i32 5, i32 1, metadata !6, i32 0} ; [ DW_TAG_lexical_block ]
-!15 = metadata !{metadata !"foo.cpp", metadata !"/Users/echristo/tmp"}
-!16 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 (trunk 204164) (llvm/trunk 204183)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/z.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"z.c", metadata !"/usr/local/google/home/echristo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"z", metadata !"z", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @z, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [z]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/z.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5.0 (trunk 204164) (llvm/trunk 204183)"}
+!11 = metadata !{i32 1, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/X86/misched-dbg-value.ll b/test/DebugInfo/X86/misched-dbg-value.ll
index cfb06672d259..c713e65110e7 100644
--- a/test/DebugInfo/X86/misched-dbg-value.ll
+++ b/test/DebugInfo/X86/misched-dbg-value.ll
@@ -6,20 +6,33 @@
; function parameters.
; CHECK: .debug_info contents:
; CHECK: DW_TAG_compile_unit
-; CHECK: DW_TAG_subprogram
-; CHECK: Proc8
-; CHECK: DW_TAG_formal_parameter
-; CHECK: Array1Par
-; CHECK: DW_AT_location
-; CHECK: DW_TAG_formal_parameter
-; CHECK: Array2Par
-; CHECK: DW_AT_location
-; CHECK: DW_TAG_formal_parameter
-; CHECK: IntParI1
-; CHECK: DW_AT_location
-; CHECK: DW_TAG_formal_parameter
-; CHECK: IntParI2
-; CHECK: DW_AT_location
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "Proc8"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_location
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "Array1Par"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_location
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "Array2Par"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_location
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "IntParI1"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_location
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "IntParI2"
%struct.Record = type { %struct.Record*, i32, i32, i32, [31 x i8] }
@@ -90,7 +103,7 @@ attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!83}
-!0 = metadata !{i32 786449, metadata !82, i32 12, metadata !"clang version 3.3 (trunk 175015)", i1 true, metadata !"", i32 0, metadata !1, metadata !10, metadata !11, metadata !29, metadata !29, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/manmanren/test-Nov/rdar_13183203/test2/dry.c] [DW_LANG_C99]
+!0 = metadata !{i32 786449, metadata !82, i32 12, metadata !"clang version 3.3 (trunk 175015)", i1 true, metadata !"", i32 0, metadata !1, metadata !10, metadata !11, metadata !29, metadata !10, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/manmanren/test-Nov/rdar_13183203/test2/dry.c] [DW_LANG_C99]
!1 = metadata !{metadata !2}
!2 = metadata !{i32 786436, metadata !82, null, metadata !"", i32 128, i64 32, i64 32, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [line 128, size 32, align 32, offset 0] [def] [from ]
!3 = metadata !{i32 786473, metadata !82} ; [ DW_TAG_file_type ]
@@ -100,7 +113,7 @@ attributes #1 = { nounwind readnone }
!7 = metadata !{i32 786472, metadata !"Ident3", i64 10001} ; [ DW_TAG_enumerator ] [Ident3 :: 10001]
!8 = metadata !{i32 786472, metadata !"Ident4", i64 10002} ; [ DW_TAG_enumerator ] [Ident4 :: 10002]
!9 = metadata !{i32 786472, metadata !"Ident5", i64 10003} ; [ DW_TAG_enumerator ] [Ident5 :: 10003]
-!10 = metadata !{i32 0}
+!10 = metadata !{}
!11 = metadata !{metadata !12}
!12 = metadata !{i32 786478, metadata !82, metadata !3, metadata !"Proc8", metadata !"Proc8", metadata !"", i32 180, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, void (i32*, [51 x i32]*, i32, i32)* @Proc8, null, null, metadata !22, i32 185} ; [ DW_TAG_subprogram ] [line 180] [def] [scope 185] [Proc8]
!13 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !14, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
diff --git a/test/DebugInfo/X86/multiple-aranges.ll b/test/DebugInfo/X86/multiple-aranges.ll
index 4c205d8e1697..2da293874245 100644
--- a/test/DebugInfo/X86/multiple-aranges.ll
+++ b/test/DebugInfo/X86/multiple-aranges.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -generate-arange-section < %s | FileCheck %s
; First CU
; CHECK: .long 44 # Length of ARange Set
@@ -6,10 +6,7 @@
; CHECK-NEXT: .long .L.debug_info_begin0 # Offset Into Debug Info Section
; CHECK-NEXT: .byte 8 # Address Size (in bytes)
; CHECK-NEXT: .byte 0 # Segment Size (in bytes)
-; CHECK-NEXT: .byte 255
-; CHECK-NEXT: .byte 255
-; CHECK-NEXT: .byte 255
-; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .zero 4,255
; CHECK-NEXT: .quad kittens
; CHECK-NEXT: .Lset0 = rainbows-kittens
; CHECK-NEXT: .quad .Lset0
@@ -22,10 +19,7 @@
; CHECK-NEXT: .long .L.debug_info_begin1 # Offset Into Debug Info Section
; CHECK-NEXT: .byte 8 # Address Size (in bytes)
; CHECK-NEXT: .byte 0 # Segment Size (in bytes)
-; CHECK-NEXT: .byte 255
-; CHECK-NEXT: .byte 255
-; CHECK-NEXT: .byte 255
-; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .zero 4,255
; CHECK-NEXT: .quad rainbows
; CHECK-NEXT: .Lset1 = .Ldebug_end0-rainbows
; CHECK-NEXT: .quad .Lset1
@@ -52,7 +46,7 @@ target triple = "x86_64-unknown-linux-gnu"
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/home/kayamon/test1.c] [DW_LANG_C99]
!1 = metadata !{metadata !"test1.c", metadata !"/home/kayamon"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786484, i32 0, null, metadata !"kittens", metadata !"kittens", metadata !"", metadata !5, i32 1, metadata !6, i32 0, i32 1, i32* @kittens, null} ; [ DW_TAG_variable ] [kittens] [line 1] [def]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/home/kayamon/test1.c]
diff --git a/test/DebugInfo/X86/multiple-at-const-val.ll b/test/DebugInfo/X86/multiple-at-const-val.ll
index 9a660614827f..27a5510f1190 100644
--- a/test/DebugInfo/X86/multiple-at-const-val.ll
+++ b/test/DebugInfo/X86/multiple-at-const-val.ll
@@ -32,7 +32,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!1803}
-!0 = metadata !{i32 786449, metadata !1802, i32 4, metadata !"clang version 3.3 (trunk 174207)", i1 true, metadata !"", i32 0, metadata !1, metadata !955, metadata !956, metadata !1786, metadata !1786, metadata !""} ; [ DW_TAG_compile_unit ] [/privite/tmp/student2.cpp] [DW_LANG_C_plus_plus]
+!0 = metadata !{i32 786449, metadata !1802, i32 4, metadata !"clang version 3.3 (trunk 174207)", i1 true, metadata !"", i32 0, metadata !1, metadata !955, metadata !956, metadata !1786, metadata !955, metadata !""} ; [ DW_TAG_compile_unit ] [/privite/tmp/student2.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !26}
!4 = metadata !{i32 786489, null, metadata !"std", metadata !5, i32 48} ; [ DW_TAG_namespace ]
!5 = metadata !{i32 786473, metadata !1801} ; [ DW_TAG_file_type ]
@@ -52,7 +52,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!77 = metadata !{i32 786445, metadata !1801, metadata !49, metadata !"badbit", i32 331, i64 0, i64 0, i64 0, i32 4096, metadata !78, i32 1} ; [ DW_TAG_member ]
!78 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !79} ; [ DW_TAG_const_type ]
!79 = metadata !{i32 786454, metadata !1801, metadata !49, metadata !"ostate", i32 327, i64 0, i64 0, i64 0, i32 0, metadata !26} ; [ DW_TAG_typedef ]
-!955 = metadata !{i32 0}
+!955 = metadata !{}
!956 = metadata !{metadata !960}
!960 = metadata !{i32 786478, metadata !1802, null, metadata !"main", metadata !"main", metadata !"", i32 73, metadata !54, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @main, null, null, metadata !955, i32 73} ; [ DW_TAG_subprogram ]
!961 = metadata !{i32 786473, metadata !1802} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/nondefault-subrange-array.ll b/test/DebugInfo/X86/nondefault-subrange-array.ll
index 91065a312b6b..4df1bd4847d0 100644
--- a/test/DebugInfo/X86/nondefault-subrange-array.ll
+++ b/test/DebugInfo/X86/nondefault-subrange-array.ll
@@ -23,15 +23,15 @@
; CHECK: [[BASE]]: DW_TAG_base_type
; CHECK: [[BASE2]]: DW_TAG_base_type
-; CHECK-NEXT: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "int")
-; CHECK-NEXT: DW_AT_byte_size [DW_FORM_data1] (0x04)
-; CHECK-NEXT: DW_AT_encoding [DW_FORM_data1] (0x05)
+; CHECK-NEXT: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "sizetype")
+; CHECK-NEXT: DW_AT_byte_size [DW_FORM_data1] (0x08)
+; CHECK-NEXT: DW_AT_encoding [DW_FORM_data1] (0x07)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!21}
-!0 = metadata !{i32 786449, metadata !20, i32 4, metadata !"clang version 3.3 (trunk 169136)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ] [/Volumes/Sandbox/llvm/t.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !20, i32 4, metadata !"clang version 3.3 (trunk 169136)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/Volumes/Sandbox/llvm/t.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 1, metadata !7, i32 0, i32 1, %class.A* @a, null} ; [ DW_TAG_variable ] [a] [line 1] [def]
!6 = metadata !{i32 786473, metadata !20} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/objc-fwd-decl.ll b/test/DebugInfo/X86/objc-fwd-decl.ll
index a5e9632d1178..1ec56be81a46 100644
--- a/test/DebugInfo/X86/objc-fwd-decl.ll
+++ b/test/DebugInfo/X86/objc-fwd-decl.ll
@@ -12,8 +12,8 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!9, !10, !11, !12, !14}
-!0 = metadata !{i32 786449, metadata !13, i32 16, metadata !"clang version 3.1 (trunk 152054 trunk 152094)", i1 false, metadata !"", i32 2, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !13, i32 16, metadata !"clang version 3.1 (trunk 152054 trunk 152094)", i1 false, metadata !"", i32 2, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 3, metadata !7, i32 0, i32 1, %0** @a, null} ; [ DW_TAG_variable ]
!6 = metadata !{i32 786473, metadata !13} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/objc-property-void.ll b/test/DebugInfo/X86/objc-property-void.ll
new file mode 100644
index 000000000000..d366a7acf4e5
--- /dev/null
+++ b/test/DebugInfo/X86/objc-property-void.ll
@@ -0,0 +1,104 @@
+; RUN: llc -filetype=obj -o %t.o < %s >/dev/null 2>&1
+; RUN: llvm-dwarfdump -debug-dump=info %t.o | FileCheck %s
+
+; CHECK: DW_TAG_structure_type
+; CHECK: DW_AT_APPLE_objc_complete_type
+; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-fA-F]+}}] = "Foo")
+; CHECK: DW_TAG_APPLE_property
+; CHECK: DW_AT_APPLE_property_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-fA-F]+}}] = "foo")
+
+; generated from:
+; @interface Foo
+; @property (nonatomic,assign,readonly) void foo;
+; @end
+; @implementation Foo
+; - (void)foo {}
+; @end
+;
+; with:
+; clang -S -emit-llvm -O0 -g
+
+; ModuleID = '-'
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%0 = type opaque
+%struct._objc_cache = type opaque
+%struct._class_t = type { %struct._class_t*, %struct._class_t*, %struct._objc_cache*, i8* (i8*, i8*)**, %struct._class_ro_t* }
+%struct._class_ro_t = type { i32, i32, i32, i8*, i8*, %struct.__method_list_t*, %struct._objc_protocol_list*, %struct._ivar_list_t*, i8*, %struct._prop_list_t* }
+%struct.__method_list_t = type { i32, i32, [0 x %struct._objc_method] }
+%struct._objc_method = type { i8*, i8*, i8* }
+%struct._objc_protocol_list = type { i64, [0 x %struct._protocol_t*] }
+%struct._protocol_t = type { i8*, i8*, %struct._objc_protocol_list*, %struct.__method_list_t*, %struct.__method_list_t*, %struct.__method_list_t*, %struct.__method_list_t*, %struct._prop_list_t*, i32, i32, i8** }
+%struct._ivar_list_t = type { i32, i32, [0 x %struct._ivar_t] }
+%struct._ivar_t = type { i64*, i8*, i8*, i32, i32 }
+%struct._prop_list_t = type { i32, i32, [0 x %struct._prop_t] }
+%struct._prop_t = type { i8*, i8* }
+
+@_objc_empty_cache = external global %struct._objc_cache
+@"OBJC_CLASS_$_Foo" = global %struct._class_t { %struct._class_t* @"OBJC_METACLASS_$_Foo", %struct._class_t* null, %struct._objc_cache* @_objc_empty_cache, i8* (i8*, i8*)** null, %struct._class_ro_t* @"\01l_OBJC_CLASS_RO_$_Foo" }, section "__DATA, __objc_data", align 8
+@"OBJC_METACLASS_$_Foo" = global %struct._class_t { %struct._class_t* @"OBJC_METACLASS_$_Foo", %struct._class_t* @"OBJC_CLASS_$_Foo", %struct._objc_cache* @_objc_empty_cache, i8* (i8*, i8*)** null, %struct._class_ro_t* @"\01l_OBJC_METACLASS_RO_$_Foo" }, section "__DATA, __objc_data", align 8
+@"\01L_OBJC_CLASS_NAME_" = internal global [4 x i8] c"Foo\00", section "__TEXT,__objc_classname,cstring_literals", align 1
+@"\01l_OBJC_METACLASS_RO_$_Foo" = internal global %struct._class_ro_t { i32 3, i32 40, i32 40, i8* null, i8* getelementptr inbounds ([4 x i8]* @"\01L_OBJC_CLASS_NAME_", i32 0, i32 0), %struct.__method_list_t* null, %struct._objc_protocol_list* null, %struct._ivar_list_t* null, i8* null, %struct._prop_list_t* null }, section "__DATA, __objc_const", align 8
+@"\01L_OBJC_METH_VAR_NAME_" = internal global [4 x i8] c"foo\00", section "__TEXT,__objc_methname,cstring_literals", align 1
+@"\01L_OBJC_METH_VAR_TYPE_" = internal global [8 x i8] c"v16@0:8\00", section "__TEXT,__objc_methtype,cstring_literals", align 1
+@"\01l_OBJC_$_INSTANCE_METHODS_Foo" = internal global { i32, i32, [1 x %struct._objc_method] } { i32 24, i32 1, [1 x %struct._objc_method] [%struct._objc_method { i8* getelementptr inbounds ([4 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i32 0, i32 0), i8* getelementptr inbounds ([8 x i8]* @"\01L_OBJC_METH_VAR_TYPE_", i32 0, i32 0), i8* bitcast (void (%0*, i8*)* @"\01-[Foo foo]" to i8*) }] }, section "__DATA, __objc_const", align 8
+@"\01L_OBJC_PROP_NAME_ATTR_" = internal global [4 x i8] c"foo\00", section "__TEXT,__cstring,cstring_literals", align 1
+@"\01L_OBJC_PROP_NAME_ATTR_1" = internal global [7 x i8] c"Tv,R,N\00", section "__TEXT,__cstring,cstring_literals", align 1
+@"\01l_OBJC_$_PROP_LIST_Foo" = internal global { i32, i32, [1 x %struct._prop_t] } { i32 16, i32 1, [1 x %struct._prop_t] [%struct._prop_t { i8* getelementptr inbounds ([4 x i8]* @"\01L_OBJC_PROP_NAME_ATTR_", i32 0, i32 0), i8* getelementptr inbounds ([7 x i8]* @"\01L_OBJC_PROP_NAME_ATTR_1", i32 0, i32 0) }] }, section "__DATA, __objc_const", align 8
+@"\01l_OBJC_CLASS_RO_$_Foo" = internal global %struct._class_ro_t { i32 2, i32 0, i32 0, i8* null, i8* getelementptr inbounds ([4 x i8]* @"\01L_OBJC_CLASS_NAME_", i32 0, i32 0), %struct.__method_list_t* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01l_OBJC_$_INSTANCE_METHODS_Foo" to %struct.__method_list_t*), %struct._objc_protocol_list* null, %struct._ivar_list_t* null, i8* null, %struct._prop_list_t* bitcast ({ i32, i32, [1 x %struct._prop_t] }* @"\01l_OBJC_$_PROP_LIST_Foo" to %struct._prop_list_t*) }, section "__DATA, __objc_const", align 8
+@"\01L_OBJC_LABEL_CLASS_$" = internal global [1 x i8*] [i8* bitcast (%struct._class_t* @"OBJC_CLASS_$_Foo" to i8*)], section "__DATA, __objc_classlist, regular, no_dead_strip", align 8
+@llvm.used = appending global [8 x i8*] [i8* getelementptr inbounds ([4 x i8]* @"\01L_OBJC_CLASS_NAME_", i32 0, i32 0), i8* getelementptr inbounds ([4 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i32 0, i32 0), i8* getelementptr inbounds ([8 x i8]* @"\01L_OBJC_METH_VAR_TYPE_", i32 0, i32 0), i8* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01l_OBJC_$_INSTANCE_METHODS_Foo" to i8*), i8* getelementptr inbounds ([4 x i8]* @"\01L_OBJC_PROP_NAME_ATTR_", i32 0, i32 0), i8* getelementptr inbounds ([7 x i8]* @"\01L_OBJC_PROP_NAME_ATTR_1", i32 0, i32 0), i8* bitcast ({ i32, i32, [1 x %struct._prop_t] }* @"\01l_OBJC_$_PROP_LIST_Foo" to i8*), i8* bitcast ([1 x i8*]* @"\01L_OBJC_LABEL_CLASS_$" to i8*)], section "llvm.metadata"
+
+; Function Attrs: ssp uwtable
+define internal void @"\01-[Foo foo]"(%0* %self, i8* %_cmd) #0 {
+entry:
+ %self.addr = alloca %0*, align 8
+ %_cmd.addr = alloca i8*, align 8
+ store %0* %self, %0** %self.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%0** %self.addr}, metadata !24), !dbg !26
+ store i8* %_cmd, i8** %_cmd.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i8** %_cmd.addr}, metadata !27), !dbg !26
+ ret void, !dbg !29
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!17, !18, !19, !20, !21, !22}
+!llvm.ident = !{!23}
+
+!0 = metadata !{i32 786449, metadata !1, i32 16, metadata !"", i1 false, metadata !"", i32 2, metadata !2, metadata !3, metadata !9, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [] [DW_LANG_ObjC]
+!1 = metadata !{metadata !"-", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !5, metadata !6, metadata !"Foo", i32 1, i64 0, i64 8, i32 0, i32 512, null, metadata !7, i32 16, null, null, null} ; [ DW_TAG_structure_type ] [Foo] [line 1, size 0, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !"<stdin>", metadata !""}
+!6 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] []
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 803328, metadata !"foo", metadata !6, i32 2, metadata !"", metadata !"", i32 2117, null} ; [ DW_TAG_APPLE_property ] [foo] [line 2, properties 2117]
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"-[Foo foo]", metadata !"-[Foo foo]", metadata !"", i32 5, metadata !11, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%0*, i8*)* @"\01-[Foo foo]", null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [local] [def] [-[Foo foo]]
+!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = metadata !{null, metadata !13, metadata !14}
+!13 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !4} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from Foo]
+!14 = metadata !{i32 786454, metadata !5, null, metadata !"SEL", i32 5, i64 0, i64 0, i64 0, i32 64, metadata !15} ; [ DW_TAG_typedef ] [SEL] [line 5, size 0, align 0, offset 0] [artificial] [from ]
+!15 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from objc_selector]
+!16 = metadata !{i32 786451, metadata !1, null, metadata !"objc_selector", i32 0, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [objc_selector] [line 0, size 0, align 0, offset 0] [decl] [from ]
+!17 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
+!18 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
+!19 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!20 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
+!21 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!22 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!23 = metadata !{metadata !""}
+!24 = metadata !{i32 786689, metadata !10, metadata !"self", null, i32 16777216, metadata !25, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [self] [line 0]
+!25 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !4} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from Foo]
+!26 = metadata !{i32 0, i32 0, metadata !10, null}
+!27 = metadata !{i32 786689, metadata !10, metadata !"_cmd", null, i32 33554432, metadata !28, i32 64, i32 0} ; [ DW_TAG_arg_variable ] [_cmd] [line 0]
+!28 = metadata !{i32 786454, metadata !5, null, metadata !"SEL", i32 5, i64 0, i64 0, i64 0, i32 0, metadata !15} ; [ DW_TAG_typedef ] [SEL] [line 5, size 0, align 0, offset 0] [from ]
+!29 = metadata !{i32 5, i32 0, metadata !10, null}
diff --git a/test/DebugInfo/X86/op_deref.ll b/test/DebugInfo/X86/op_deref.ll
index 300f13dc5fb2..31003eee2a8c 100644
--- a/test/DebugInfo/X86/op_deref.ll
+++ b/test/DebugInfo/X86/op_deref.ll
@@ -1,10 +1,20 @@
-; RUN: llc -O0 -mtriple=x86_64-apple-darwin %s -o %t -filetype=obj
-; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s -check-prefix=DW-CHECK
+; RUN: llc -O0 -mtriple=x86_64-apple-darwin < %s -filetype=obj \
+; RUN: | llvm-dwarfdump -debug-dump=info - \
+; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=DWARF4
+; RUN: llc -O0 -mtriple=x86_64-apple-darwin < %s -filetype=obj -dwarf-version=3 \
+; RUN: | llvm-dwarfdump -debug-dump=info - \
+; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=DWARF3
-; DW-CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000067] = "vla")
; FIXME: The location here needs to be fixed, but llvm-dwarfdump doesn't handle
; DW_AT_location lists yet.
-; DW-CHECK: DW_AT_location [DW_FORM_sec_offset] (0x00000000)
+; DWARF4: DW_AT_location [DW_FORM_sec_offset] (0x00000000)
+
+; FIXME: The location here needs to be fixed, but llvm-dwarfdump doesn't handle
+; DW_AT_location lists yet.
+; DWARF3: DW_AT_location [DW_FORM_data4] (0x00000000)
+
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000067] = "vla")
; Unfortunately llvm-dwarfdump can't unparse a list of DW_AT_locations
; right now, so we check the asm output:
@@ -68,7 +78,7 @@ declare void @llvm.stackrestore(i8*) nounwind
!llvm.module.flags = !{!29}
!0 = metadata !{i32 786449, metadata !28, i32 12, metadata !"clang version 3.2 (trunk 156005) (llvm/trunk 156000)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !28, metadata !6, metadata !"testVLAwithSize", metadata !"testVLAwithSize", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32)* @testVLAwithSize, null, null, metadata !1, i32 2} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !28} ; [ DW_TAG_file_type ]
@@ -79,7 +89,7 @@ declare void @llvm.stackrestore(i8*) nounwind
!11 = metadata !{i32 1, i32 26, metadata !5, null}
!12 = metadata !{i32 3, i32 13, metadata !13, null}
!13 = metadata !{i32 786443, metadata !28, metadata !5, i32 2, i32 1, i32 0} ; [ DW_TAG_lexical_block ]
-!14 = metadata !{i32 786688, metadata !13, metadata !"vla", metadata !6, i32 3, metadata !15, i32 8192, i32 0, i64 2} ; [ DW_TAG_auto_variable ]
+!14 = metadata !{i32 786688, metadata !13, metadata !"vla", metadata !6, i32 3, metadata !15, i32 8192, i32 0, metadata !30} ; [ DW_TAG_auto_variable ]
!15 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 0, i64 32, i32 0, i32 0, metadata !9, metadata !16, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 0, align 32, offset 0] [from int]
!16 = metadata !{metadata !17}
!17 = metadata !{i32 786465, i64 0, i64 -1} ; [ DW_TAG_subrange_type ]
@@ -95,3 +105,4 @@ declare void @llvm.stackrestore(i8*) nounwind
!27 = metadata !{i32 8, i32 1, metadata !13, null}
!28 = metadata !{metadata !"bar.c", metadata !"/Users/echristo/tmp"}
!29 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!30 = metadata !{i64 2}
diff --git a/test/DebugInfo/X86/parameters.ll b/test/DebugInfo/X86/parameters.ll
index fa91bd27ae28..4215c21721ef 100644
--- a/test/DebugInfo/X86/parameters.ll
+++ b/test/DebugInfo/X86/parameters.ll
@@ -23,13 +23,15 @@
; }
; CHECK: debug_info contents
-; CHECK: DW_AT_name{{.*}} = "f"
; 0x74 is DW_OP_breg4, showing that the parameter is accessed indirectly
; (with a zero offset) from the register parameter
; CHECK: DW_AT_location{{.*}}(<0x0{{.}}> 74 00
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}} = "f"
-; CHECK: DW_AT_name{{.*}} = "g"
; CHECK: DW_AT_location{{.*}}([[G_LOC:0x[0-9]*]])
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}} = "g"
; CHECK: debug_loc contents
; CHECK-NEXT: [[G_LOC]]: Beginning
; CHECK-NEXT: Ending
@@ -82,7 +84,7 @@ attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/pass.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"pass.cpp", metadata !"/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !17}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"func", metadata !"func", metadata !"_ZN7pr147634funcENS_3fooE", i32 6, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%"struct.pr14763::foo"*, %"struct.pr14763::foo"*)* @_ZN7pr147634funcENS_3fooE, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [func]
!5 = metadata !{i32 786489, metadata !1, null, metadata !"pr14763", i32 1} ; [ DW_TAG_namespace ] [pr14763] [line 1]
diff --git a/test/DebugInfo/X86/pointer-type-size.ll b/test/DebugInfo/X86/pointer-type-size.ll
index cf789b202343..40dc955ab52c 100644
--- a/test/DebugInfo/X86/pointer-type-size.ll
+++ b/test/DebugInfo/X86/pointer-type-size.ll
@@ -11,8 +11,8 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!14}
-!0 = metadata !{i32 786449, metadata !13, i32 12, metadata !"clang version 3.1 (trunk 147882)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !13, i32 12, metadata !"clang version 3.1 (trunk 147882)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 720948, i32 0, null, metadata !"crass", metadata !"crass", metadata !"", metadata !6, i32 1, metadata !7, i32 0, i32 1, %struct.crass* @crass, null} ; [ DW_TAG_variable ]
!6 = metadata !{i32 720937, metadata !13} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/pr11300.ll b/test/DebugInfo/X86/pr11300.ll
index caa24eeb8f66..11c409c16042 100644
--- a/test/DebugInfo/X86/pr11300.ll
+++ b/test/DebugInfo/X86/pr11300.ll
@@ -3,11 +3,14 @@
; test that the DW_AT_specification is a back edge in the file.
+; Skip the definition of zed(foo*)
; CHECK: DW_TAG_subprogram
-; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x{{[0-9a-f]*}}] = "zed")
+; CHECK: DW_TAG_class_type
+; CHECK: [[BAR_DECL:0x[0-9a-f]*]]: DW_TAG_subprogram
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_ZN3foo3barEv"
; CHECK: DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_specification [DW_FORM_ref4] (cu + {{.*}} => {[[BACK:0x[0-9a-f]*]]})
-; CHECK: [[BACK]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_specification {{.*}} {[[BAR_DECL]]}
%struct.foo = type { i8 }
@@ -36,9 +39,9 @@ entry:
!llvm.module.flags = !{!33}
!0 = metadata !{i32 786449, metadata !32, i32 4, metadata !"clang version 3.0 ()", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !20}
-!5 = metadata !{i32 720942, metadata !6, metadata !6, metadata !"zed", metadata !"zed", metadata !"_Z3zedP3foo", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.foo*)* @_Z3zedP3foo, null, null, metadata !21, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [zed]
+!5 = metadata !{i32 720942, metadata !6, metadata !6, metadata !"zed", metadata !"zed", metadata !"_Z3zedP3foo", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.foo*)* @_Z3zedP3foo, null, null, null, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [zed]
!6 = metadata !{i32 720937, metadata !32} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 720917, i32 0, null, i32 0, i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{null, metadata !9}
@@ -53,9 +56,7 @@ entry:
!17 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
!18 = metadata !{metadata !19}
!19 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
-!20 = metadata !{i32 720942, metadata !6, null, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEv", i32 2, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.foo*)* @_ZN3foo3barEv, null, metadata !12, metadata !21, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [bar]
-!21 = metadata !{metadata !22}
-!22 = metadata !{i32 720932} ; [ DW_TAG_base_type ]
+!20 = metadata !{i32 720942, metadata !6, null, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEv", i32 2, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.foo*)* @_ZN3foo3barEv, null, metadata !12, null, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [bar]
!23 = metadata !{i32 786689, metadata !5, metadata !"x", metadata !6, i32 16777220, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!24 = metadata !{i32 4, i32 15, metadata !5, null}
!25 = metadata !{i32 4, i32 20, metadata !26, null}
diff --git a/test/DebugInfo/X86/pr12831.ll b/test/DebugInfo/X86/pr12831.ll
index 6dea4a0cd982..79d00eddba28 100644
--- a/test/DebugInfo/X86/pr12831.ll
+++ b/test/DebugInfo/X86/pr12831.ll
@@ -79,7 +79,7 @@ entry:
!llvm.module.flags = !{!162}
!0 = metadata !{i32 786449, metadata !161, i32 4, metadata !"clang version 3.2 ", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !128, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !106, metadata !107, metadata !126, metadata !127}
!5 = metadata !{i32 786478, metadata !6, null, metadata !"writeExpr", metadata !"writeExpr", metadata !"_ZN17BPLFunctionWriter9writeExprEv", i32 19, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.BPLFunctionWriter*)* @_ZN17BPLFunctionWriter9writeExprEv, null, metadata !103, metadata !1, i32 19} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !160} ; [ DW_TAG_file_type ]
@@ -212,7 +212,7 @@ entry:
!134 = metadata !{i32 786447, null, null, null, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ]
!135 = metadata !{i32 19, i32 39, metadata !5, null}
!136 = metadata !{i32 20, i32 17, metadata !137, null}
-!137 = metadata !{i32 786443, metadata !5, i32 19, i32 51, metadata !6, i32 0} ; [ DW_TAG_lexical_block ]
+!137 = metadata !{i32 786443, metadata !6, metadata !5, i32 19, i32 51, i32 0} ; [ DW_TAG_lexical_block ]
!138 = metadata !{i32 23, i32 17, metadata !137, null}
!139 = metadata !{i32 26, i32 15, metadata !137, null}
!140 = metadata !{i32 786689, metadata !106, metadata !"this", metadata !6, i32 16777224, metadata !141, i32 64, i32 0} ; [ DW_TAG_arg_variable ]
@@ -221,19 +221,19 @@ entry:
!143 = metadata !{i32 786689, metadata !106, metadata !"__f", metadata !6, i32 33554440, metadata !61, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!144 = metadata !{i32 8, i32 63, metadata !106, null}
!145 = metadata !{i32 9, i32 9, metadata !146, null}
-!146 = metadata !{i32 786443, metadata !106, i32 8, i32 81, metadata !6, i32 1} ; [ DW_TAG_lexical_block ]
+!146 = metadata !{i32 786443, metadata !6, metadata !106, i32 8, i32 81, i32 1} ; [ DW_TAG_lexical_block ]
!147 = metadata !{i32 10, i32 13, metadata !146, null}
!148 = metadata !{i32 4, i32 5, metadata !149, null}
-!149 = metadata !{i32 786443, metadata !107, i32 3, i32 105, metadata !6, i32 2} ; [ DW_TAG_lexical_block ]
+!149 = metadata !{i32 786443, metadata !6, metadata !107, i32 3, i32 105, i32 2} ; [ DW_TAG_lexical_block ]
!150 = metadata !{i32 786689, metadata !126, metadata !"this", metadata !6, i32 16777224, metadata !141, i32 64, i32 0} ; [ DW_TAG_arg_variable ]
!151 = metadata !{i32 8, i32 45, metadata !126, null}
!152 = metadata !{i32 786689, metadata !126, metadata !"__f", metadata !6, i32 33554440, metadata !26, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!153 = metadata !{i32 8, i32 63, metadata !126, null}
!154 = metadata !{i32 9, i32 9, metadata !155, null}
-!155 = metadata !{i32 786443, metadata !126, i32 8, i32 81, metadata !6, i32 3} ; [ DW_TAG_lexical_block ]
+!155 = metadata !{i32 786443, metadata !6, metadata !126, i32 8, i32 81, i32 3} ; [ DW_TAG_lexical_block ]
!156 = metadata !{i32 10, i32 13, metadata !155, null}
!157 = metadata !{i32 4, i32 5, metadata !158, null}
-!158 = metadata !{i32 786443, metadata !127, i32 3, i32 105, metadata !6, i32 4} ; [ DW_TAG_lexical_block ]
+!158 = metadata !{i32 786443, metadata !6, metadata !127, i32 3, i32 105, i32 4} ; [ DW_TAG_lexical_block ]
!159 = metadata !{i32 786473, metadata !161} ; [ DW_TAG_file_type ]
!160 = metadata !{metadata !"BPLFunctionWriter2.ii", metadata !"/home/peter/crashdelta"}
!161 = metadata !{metadata !"BPLFunctionWriter.cpp", metadata !"/home/peter/crashdelta"}
diff --git a/test/DebugInfo/X86/pr13303.ll b/test/DebugInfo/X86/pr13303.ll
index 473786216e29..16e5966f84db 100644
--- a/test/DebugInfo/X86/pr13303.ll
+++ b/test/DebugInfo/X86/pr13303.ll
@@ -16,7 +16,7 @@ entry:
!llvm.module.flags = !{!13}
!0 = metadata !{i32 786449, metadata !12, i32 12, metadata !"clang version 3.2 (trunk 160143)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/home/probinson/PR13303.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !12, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @main, null, null, metadata !1, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [main]
!6 = metadata !{i32 786473, metadata !12} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/pr19307.ll b/test/DebugInfo/X86/pr19307.ll
new file mode 100644
index 000000000000..07e3a4255b08
--- /dev/null
+++ b/test/DebugInfo/X86/pr19307.ll
@@ -0,0 +1,147 @@
+; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+; Generated from the source file pr19307.cc:
+; #include <string>
+; void parse_range(unsigned long long &offset, unsigned long long &limit,
+; std::string range) {
+; if (range.compare(0, 6, "items=") != 0 || range[6] == '-')
+; offset = 1;
+; range.erase(0, 6);
+; limit = 2;
+; }
+; with "clang++ -S -emit-llvm -O0 -g pr19307.cc"
+
+; Location of "range" string is spilled from %rdx to stack and is
+; addressed via %rbp.
+; CHECK: movq %rdx, {{[-0-9]+}}(%rbp)
+; CHECK-NEXT: [[START_LABEL:.Ltmp[0-9]+]]
+; This location should be valid until the end of the function.
+
+; Verify that we have proper range in debug_loc section:
+; CHECK: .Ldebug_loc{{[0-9]+}}:
+; CHECK: DW_OP_breg1
+; CHECK: .Lset{{[0-9]+}} = [[START_LABEL]]-.Lfunc_begin0
+; CHECK-NEXT: .quad .Lset{{[0-9]+}}
+; CHECK-NEXT: .Lset{{[0-9]+}} = .Lfunc_end0-.Lfunc_begin0
+; CHECK-NEXT: .quad .Lset{{[0-9]+}}
+; CHECK: DW_OP_breg6
+; CHECK: DW_OP_deref
+
+; ModuleID = 'pr19307.cc'
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%"class.std::basic_string" = type { %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" }
+%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
+
+@.str = private unnamed_addr constant [7 x i8] c"items=\00", align 1
+
+; Function Attrs: uwtable
+define void @_Z11parse_rangeRyS_Ss(i64* %offset, i64* %limit, %"class.std::basic_string"* %range) #0 {
+entry:
+ %offset.addr = alloca i64*, align 8
+ %limit.addr = alloca i64*, align 8
+ store i64* %offset, i64** %offset.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i64** %offset.addr}, metadata !45), !dbg !46
+ store i64* %limit, i64** %limit.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i64** %limit.addr}, metadata !47), !dbg !46
+ call void @llvm.dbg.declare(metadata !{%"class.std::basic_string"* %range}, metadata !48), !dbg !49
+ %call = call i32 @_ZNKSs7compareEmmPKc(%"class.std::basic_string"* %range, i64 0, i64 6, i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0)), !dbg !50
+ %cmp = icmp ne i32 %call, 0, !dbg !50
+ br i1 %cmp, label %if.then, label %lor.lhs.false, !dbg !50
+
+lor.lhs.false: ; preds = %entry
+ %call1 = call i8* @_ZNSsixEm(%"class.std::basic_string"* %range, i64 6), !dbg !52
+ %0 = load i8* %call1, !dbg !52
+ %conv = sext i8 %0 to i32, !dbg !52
+ %cmp2 = icmp eq i32 %conv, 45, !dbg !52
+ br i1 %cmp2, label %if.then, label %if.end, !dbg !52
+
+if.then: ; preds = %lor.lhs.false, %entry
+ %1 = load i64** %offset.addr, align 8, !dbg !54
+ store i64 1, i64* %1, align 8, !dbg !54
+ br label %if.end, !dbg !54
+
+if.end: ; preds = %if.then, %lor.lhs.false
+ %call3 = call %"class.std::basic_string"* @_ZNSs5eraseEmm(%"class.std::basic_string"* %range, i64 0, i64 6), !dbg !55
+ %2 = load i64** %limit.addr, align 8, !dbg !56
+ store i64 2, i64* %2, align 8, !dbg !56
+ ret void, !dbg !57
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+declare i32 @_ZNKSs7compareEmmPKc(%"class.std::basic_string"*, i64, i64, i8*) #2
+
+declare i8* @_ZNSsixEm(%"class.std::basic_string"*, i64) #2
+
+declare %"class.std::basic_string"* @_ZNSs5eraseEmm(%"class.std::basic_string"*, i64, i64) #2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!42, !43}
+!llvm.ident = !{!44}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (209308)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !12, metadata !2, metadata !21, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/llvm_cmake_gcc/pr19307.cc] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"pr19307.cc", metadata !"/llvm_cmake_gcc"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !6, metadata !8}
+!4 = metadata !{i32 786451, metadata !5, null, metadata !"", i32 83, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, metadata !"_ZTS11__mbstate_t"} ; [ DW_TAG_structure_type ] [line 83, size 0, align 0, offset 0] [decl] [from ]
+!5 = metadata !{metadata !"/usr/include/wchar.h", metadata !"/llvm_cmake_gcc"}
+!6 = metadata !{i32 786451, metadata !7, null, metadata !"lconv", i32 54, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, metadata !"_ZTS5lconv"} ; [ DW_TAG_structure_type ] [lconv] [line 54, size 0, align 0, offset 0] [decl] [from ]
+!7 = metadata !{metadata !"/usr/include/locale.h", metadata !"/llvm_cmake_gcc"}
+!8 = metadata !{i32 786434, metadata !9, metadata !10, metadata !"basic_string<char, std::char_traits<char>, std::allocator<char> >", i32 1134, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, metadata !"_ZTSSs"} ; [ DW_TAG_class_type ] [basic_string<char, std::char_traits<char>, std::allocator<char> >] [line 1134, size 0, align 0, offset 0] [decl] [from ]
+!9 = metadata !{metadata !"/usr/lib/gcc/x86_64-linux-gnu/4.6/../../../../include/c++/4.6/bits/basic_string.tcc", metadata !"/llvm_cmake_gcc"}
+!10 = metadata !{i32 786489, metadata !11, null, metadata !"std", i32 153} ; [ DW_TAG_namespace ] [std] [line 153]
+!11 = metadata !{metadata !"/usr/lib/gcc/x86_64-linux-gnu/4.6/../../../../include/c++/4.6/x86_64-linux-gnu/bits/c++config.h", metadata !"/llvm_cmake_gcc"}
+!12 = metadata !{metadata !13}
+!13 = metadata !{i32 786478, metadata !1, metadata !14, metadata !"parse_range", metadata !"parse_range", metadata !"_Z11parse_rangeRyS_Ss", i32 3, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i64*, i64*, %"class.std::basic_string"*)* @_Z11parse_rangeRyS_Ss, null, null, metadata !2, i32 4} ; [ DW_TAG_subprogram ] [line 3] [def] [scope 4] [parse_range]
+!14 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/llvm_cmake_gcc/pr19307.cc]
+!15 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !16, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!16 = metadata !{null, metadata !17, metadata !17, metadata !19}
+!17 = metadata !{i32 786448, null, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !18} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from long long unsigned int]
+!18 = metadata !{i32 786468, null, null, metadata !"long long unsigned int", i32 0, i64 64, i64 64, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] [long long unsigned int] [line 0, size 64, align 64, offset 0, enc DW_ATE_unsigned]
+!19 = metadata !{i32 786454, metadata !20, metadata !10, metadata !"string", i32 65, i64 0, i64 0, i64 0, i32 0, metadata !"_ZTSSs"} ; [ DW_TAG_typedef ] [string] [line 65, size 0, align 0, offset 0] [from _ZTSSs]
+!20 = metadata !{metadata !"/usr/lib/gcc/x86_64-linux-gnu/4.6/../../../../include/c++/4.6/bits/stringfwd.h", metadata !"/llvm_cmake_gcc"}
+!21 = metadata !{metadata !22, metadata !26, metadata !29, metadata !33, metadata !38, metadata !41}
+!22 = metadata !{i32 786490, metadata !23, metadata !25, i32 57} ; [ DW_TAG_imported_module ]
+!23 = metadata !{i32 786489, metadata !24, null, metadata !"__gnu_debug", i32 55} ; [ DW_TAG_namespace ] [__gnu_debug] [line 55]
+!24 = metadata !{metadata !"/usr/lib/gcc/x86_64-linux-gnu/4.6/../../../../include/c++/4.6/debug/debug.h", metadata !"/llvm_cmake_gcc"}
+!25 = metadata !{i32 786489, metadata !24, metadata !10, metadata !"__debug", i32 49} ; [ DW_TAG_namespace ] [__debug] [line 49]
+!26 = metadata !{i32 786440, metadata !10, metadata !27, i32 66} ; [ DW_TAG_imported_declaration ]
+!27 = metadata !{i32 786454, metadata !5, null, metadata !"mbstate_t", i32 106, i64 0, i64 0, i64 0, i32 0, metadata !28} ; [ DW_TAG_typedef ] [mbstate_t] [line 106, size 0, align 0, offset 0] [from __mbstate_t]
+!28 = metadata !{i32 786454, metadata !5, null, metadata !"__mbstate_t", i32 95, i64 0, i64 0, i64 0, i32 0, metadata !"_ZTS11__mbstate_t"} ; [ DW_TAG_typedef ] [__mbstate_t] [line 95, size 0, align 0, offset 0] [from _ZTS11__mbstate_t]
+!29 = metadata !{i32 786440, metadata !10, metadata !30, i32 141} ; [ DW_TAG_imported_declaration ]
+!30 = metadata !{i32 786454, metadata !31, null, metadata !"wint_t", i32 141, i64 0, i64 0, i64 0, i32 0, metadata !32} ; [ DW_TAG_typedef ] [wint_t] [line 141, size 0, align 0, offset 0] [from unsigned int]
+!31 = metadata !{metadata !"/llvm_cmake_gcc/bin/../lib/clang/3.5.0/include/stddef.h", metadata !"/llvm_cmake_gcc"}
+!32 = metadata !{i32 786468, null, null, metadata !"unsigned int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] [unsigned int] [line 0, size 32, align 32, offset 0, enc DW_ATE_unsigned]
+!33 = metadata !{i32 786440, metadata !34, metadata !36, i32 42} ; [ DW_TAG_imported_declaration ]
+!34 = metadata !{i32 786489, metadata !35, null, metadata !"__gnu_cxx", i32 69} ; [ DW_TAG_namespace ] [__gnu_cxx] [line 69]
+!35 = metadata !{metadata !"/usr/lib/gcc/x86_64-linux-gnu/4.6/../../../../include/c++/4.6/bits/cpp_type_traits.h", metadata !"/llvm_cmake_gcc"}
+!36 = metadata !{i32 786454, metadata !11, metadata !10, metadata !"size_t", i32 155, i64 0, i64 0, i64 0, i32 0, metadata !37} ; [ DW_TAG_typedef ] [size_t] [line 155, size 0, align 0, offset 0] [from long unsigned int]
+!37 = metadata !{i32 786468, null, null, metadata !"long unsigned int", i32 0, i64 64, i64 64, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] [long unsigned int] [line 0, size 64, align 64, offset 0, enc DW_ATE_unsigned]
+!38 = metadata !{i32 786440, metadata !34, metadata !39, i32 43} ; [ DW_TAG_imported_declaration ]
+!39 = metadata !{i32 786454, metadata !11, metadata !10, metadata !"ptrdiff_t", i32 156, i64 0, i64 0, i64 0, i32 0, metadata !40} ; [ DW_TAG_typedef ] [ptrdiff_t] [line 156, size 0, align 0, offset 0] [from long int]
+!40 = metadata !{i32 786468, null, null, metadata !"long int", i32 0, i64 64, i64 64, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [long int] [line 0, size 64, align 64, offset 0, enc DW_ATE_signed]
+!41 = metadata !{i32 786440, metadata !10, metadata !"_ZTS5lconv", i32 55} ; [ DW_TAG_imported_declaration ]
+!42 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!43 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!44 = metadata !{metadata !"clang version 3.5.0 (209308)"}
+!45 = metadata !{i32 786689, metadata !13, metadata !"offset", metadata !14, i32 16777219, metadata !17, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [offset] [line 3]
+!46 = metadata !{i32 3, i32 0, metadata !13, null}
+!47 = metadata !{i32 786689, metadata !13, metadata !"limit", metadata !14, i32 33554435, metadata !17, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [limit] [line 3]
+!48 = metadata !{i32 786689, metadata !13, metadata !"range", metadata !14, i32 50331652, metadata !19, i32 8192, i32 0} ; [ DW_TAG_arg_variable ] [range] [line 4]
+!49 = metadata !{i32 4, i32 0, metadata !13, null}
+!50 = metadata !{i32 5, i32 0, metadata !51, null}
+!51 = metadata !{i32 786443, metadata !1, metadata !13, i32 5, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/llvm_cmake_gcc/pr19307.cc]
+!52 = metadata !{i32 5, i32 0, metadata !53, null}
+!53 = metadata !{i32 786443, metadata !1, metadata !51, i32 5, i32 0, i32 1, i32 1} ; [ DW_TAG_lexical_block ] [/llvm_cmake_gcc/pr19307.cc]
+!54 = metadata !{i32 6, i32 0, metadata !51, null}
+!55 = metadata !{i32 7, i32 0, metadata !13, null}
+!56 = metadata !{i32 8, i32 0, metadata !13, null} ; [ DW_TAG_imported_declaration ]
+!57 = metadata !{i32 9, i32 0, metadata !13, null}
+
diff --git a/test/DebugInfo/X86/pr9951.ll b/test/DebugInfo/X86/pr9951.ll
deleted file mode 100644
index d933beb5536f..000000000000
--- a/test/DebugInfo/X86/pr9951.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc -mtriple x86_64-apple-darwin10.0.0 -disable-cfi %s -o - | FileCheck %s
-
-define i32 @f() nounwind {
-entry:
- ret i32 42
-}
-
-!llvm.dbg.cu = !{!2}
-!llvm.module.flags = !{!9}
-!6 = metadata !{metadata !0}
-
-!0 = metadata !{i32 786478, metadata !7, metadata !1, metadata !"f", metadata !"f", metadata !"", i32 1, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @f, null, null, null, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [f]
-!1 = metadata !{i32 786473, metadata !7} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !7, i32 12, metadata !"clang version 3.0 ()", i1 true, metadata !"", i32 0, metadata !8, metadata !8, metadata !6, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{i32 786453, metadata !7, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{i32 786468, null, metadata !2, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!7 = metadata !{metadata !"/home/espindola/llvm/test.c", metadata !"/home/espindola/llvm/build-rust2"}
-!8 = metadata !{i32 0}
-
-; CHECK: _f: ## @f
-; CHECK-NEXT: Ltmp0:
-
-; CHECK: Ltmp9 = (Ltmp3-Ltmp2)-0
-; CHECK-NEXT: .long Ltmp9
-; CHECK-NEXT: .quad Ltmp0
-!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/prologue-stack.ll b/test/DebugInfo/X86/prologue-stack.ll
index b37e41ac8c9f..a5bae841fa3c 100644
--- a/test/DebugInfo/X86/prologue-stack.ll
+++ b/test/DebugInfo/X86/prologue-stack.ll
@@ -22,7 +22,7 @@ declare i32 @callme(i32)
!llvm.module.flags = !{!14}
!0 = metadata !{i32 786449, metadata !13, i32 12, metadata !"clang version 3.2 (trunk 164980) (llvm/trunk 164979)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/bar.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !13, metadata !6, metadata !"isel_line_test2", metadata !"isel_line_test2", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @isel_line_test2, null, null, metadata !1, i32 4} ; [ DW_TAG_subprogram ] [line 3] [def] [scope 4] [isel_line_test2]
!6 = metadata !{i32 786473, metadata !13} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/ref_addr_relocation.ll b/test/DebugInfo/X86/ref_addr_relocation.ll
index fc5197d78ebe..76e6aa6777ec 100644
--- a/test/DebugInfo/X86/ref_addr_relocation.ll
+++ b/test/DebugInfo/X86/ref_addr_relocation.ll
@@ -55,7 +55,7 @@
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (trunk 191799)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !2, metadata !6, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/manmanren/test-Nov/type_unique_air/ref_addr/tu1.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"tu1.cpp", metadata !"/Users/manmanren/test-Nov/type_unique_air/ref_addr"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !5, null, metadata !"foo", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, metadata !"_ZTS3foo"} ; [ DW_TAG_structure_type ] [foo] [line 1, size 8, align 8, offset 0] [def] [from ]
!5 = metadata !{metadata !"./hdr.h", metadata !"/Users/manmanren/test-Nov/type_unique_air/ref_addr"}
diff --git a/test/DebugInfo/X86/reference-argument.ll b/test/DebugInfo/X86/reference-argument.ll
index be54386a4267..4a6bdca550fd 100644
--- a/test/DebugInfo/X86/reference-argument.ll
+++ b/test/DebugInfo/X86/reference-argument.ll
@@ -34,7 +34,7 @@ declare void @_ZN4SValD2Ev(%class.SVal* %this)
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [aggregate-indirect-arg.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"aggregate-indirect-arg.cpp", metadata !""}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !29, metadata !33, metadata !34, metadata !35}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"_Z3barR4SVal", i32 19, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.SVal*)* @_Z3barR4SVal, null, null, metadata !2, i32 19} ; [ DW_TAG_subprogram ] [line 19] [def] [bar]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [aggregate-indirect-arg.cpp]
diff --git a/test/DebugInfo/X86/rvalue-ref.ll b/test/DebugInfo/X86/rvalue-ref.ll
index e9ea42718c84..b8ed0218568f 100644
--- a/test/DebugInfo/X86/rvalue-ref.ll
+++ b/test/DebugInfo/X86/rvalue-ref.ll
@@ -24,7 +24,7 @@ declare i32 @printf(i8*, ...)
!llvm.module.flags = !{!17}
!0 = metadata !{i32 786449, metadata !16, i32 4, metadata !"clang version 3.2 (trunk 157054) (llvm/trunk 157060)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !16, metadata !6, metadata !"foo", metadata !"foo", metadata !"_Z3fooOi", i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32*)* @_Z3fooOi, null, null, metadata !1, i32 5} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !16} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/sret.ll b/test/DebugInfo/X86/sret.ll
new file mode 100644
index 000000000000..faf51583848f
--- /dev/null
+++ b/test/DebugInfo/X86/sret.ll
@@ -0,0 +1,393 @@
+; RUN: llc -split-dwarf=Enable -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o %t
+; RUN: llvm-dwarfdump -debug-dump=all %t | FileCheck %s
+
+; Based on the debuginfo-tests/sret.cpp code.
+
+; CHECK: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x5b59949640ec1580)
+; CHECK: DW_AT_GNU_dwo_id [DW_FORM_data8] (0x5b59949640ec1580)
+
+%class.A = type { i32 (...)**, i32 }
+%class.B = type { i8 }
+
+@_ZTV1A = linkonce_odr unnamed_addr constant [4 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI1A to i8*), i8* bitcast (void (%class.A*)* @_ZN1AD2Ev to i8*), i8* bitcast (void (%class.A*)* @_ZN1AD0Ev to i8*)]
+@_ZTVN10__cxxabiv117__class_type_infoE = external global i8*
+@_ZTS1A = linkonce_odr constant [3 x i8] c"1A\00"
+@_ZTI1A = linkonce_odr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([3 x i8]* @_ZTS1A, i32 0, i32 0) }
+
+@_ZN1AC1Ei = alias void (%class.A*, i32)* @_ZN1AC2Ei
+@_ZN1AC1ERKS_ = alias void (%class.A*, %class.A*)* @_ZN1AC2ERKS_
+
+; Function Attrs: nounwind uwtable
+define void @_ZN1AC2Ei(%class.A* %this, i32 %i) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ %i.addr = alloca i32, align 4
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !67), !dbg !69
+ store i32 %i, i32* %i.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %i.addr}, metadata !70), !dbg !71
+ %this1 = load %class.A** %this.addr
+ %0 = bitcast %class.A* %this1 to i8***, !dbg !72
+ store i8** getelementptr inbounds ([4 x i8*]* @_ZTV1A, i64 0, i64 2), i8*** %0, !dbg !72
+ %m_int = getelementptr inbounds %class.A* %this1, i32 0, i32 1, !dbg !72
+ %1 = load i32* %i.addr, align 4, !dbg !72
+ store i32 %1, i32* %m_int, align 4, !dbg !72
+ ret void, !dbg !73
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind uwtable
+define void @_ZN1AC2ERKS_(%class.A* %this, %class.A* %rhs) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ %rhs.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !74), !dbg !75
+ store %class.A* %rhs, %class.A** %rhs.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %rhs.addr}, metadata !76), !dbg !77
+ %this1 = load %class.A** %this.addr
+ %0 = bitcast %class.A* %this1 to i8***, !dbg !78
+ store i8** getelementptr inbounds ([4 x i8*]* @_ZTV1A, i64 0, i64 2), i8*** %0, !dbg !78
+ %m_int = getelementptr inbounds %class.A* %this1, i32 0, i32 1, !dbg !78
+ %1 = load %class.A** %rhs.addr, align 8, !dbg !78
+ %m_int2 = getelementptr inbounds %class.A* %1, i32 0, i32 1, !dbg !78
+ %2 = load i32* %m_int2, align 4, !dbg !78
+ store i32 %2, i32* %m_int, align 4, !dbg !78
+ ret void, !dbg !79
+}
+
+; Function Attrs: nounwind uwtable
+define %class.A* @_ZN1AaSERKS_(%class.A* %this, %class.A* %rhs) #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ %rhs.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !80), !dbg !81
+ store %class.A* %rhs, %class.A** %rhs.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %rhs.addr}, metadata !82), !dbg !83
+ %this1 = load %class.A** %this.addr
+ %0 = load %class.A** %rhs.addr, align 8, !dbg !84
+ %m_int = getelementptr inbounds %class.A* %0, i32 0, i32 1, !dbg !84
+ %1 = load i32* %m_int, align 4, !dbg !84
+ %m_int2 = getelementptr inbounds %class.A* %this1, i32 0, i32 1, !dbg !84
+ store i32 %1, i32* %m_int2, align 4, !dbg !84
+ ret %class.A* %this1, !dbg !85
+}
+
+; Function Attrs: nounwind uwtable
+define i32 @_ZN1A7get_intEv(%class.A* %this) #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !86), !dbg !87
+ %this1 = load %class.A** %this.addr
+ %m_int = getelementptr inbounds %class.A* %this1, i32 0, i32 1, !dbg !88
+ %0 = load i32* %m_int, align 4, !dbg !88
+ ret i32 %0, !dbg !88
+}
+
+; Function Attrs: uwtable
+define void @_ZN1B9AInstanceEv(%class.A* noalias sret %agg.result, %class.B* %this) #2 align 2 {
+entry:
+ %this.addr = alloca %class.B*, align 8
+ %nrvo = alloca i1
+ %cleanup.dest.slot = alloca i32
+ store %class.B* %this, %class.B** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.B** %this.addr}, metadata !89), !dbg !91
+ %this1 = load %class.B** %this.addr
+ store i1 false, i1* %nrvo, !dbg !92
+ call void @llvm.dbg.declare(metadata !{%class.A* %agg.result}, metadata !93), !dbg !92
+ call void @_ZN1AC1Ei(%class.A* %agg.result, i32 12), !dbg !92
+ store i1 true, i1* %nrvo, !dbg !94
+ store i32 1, i32* %cleanup.dest.slot
+ %nrvo.val = load i1* %nrvo, !dbg !95
+ br i1 %nrvo.val, label %nrvo.skipdtor, label %nrvo.unused, !dbg !95
+
+nrvo.unused: ; preds = %entry
+ call void @_ZN1AD2Ev(%class.A* %agg.result), !dbg !96
+ br label %nrvo.skipdtor, !dbg !96
+
+nrvo.skipdtor: ; preds = %nrvo.unused, %entry
+ ret void, !dbg !98
+}
+
+; Function Attrs: nounwind uwtable
+define linkonce_odr void @_ZN1AD2Ev(%class.A* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !101), !dbg !102
+ %this1 = load %class.A** %this.addr
+ ret void, !dbg !103
+}
+
+; Function Attrs: uwtable
+define i32 @main(i32 %argc, i8** %argv) #2 {
+entry:
+ %retval = alloca i32, align 4
+ %argc.addr = alloca i32, align 4
+ %argv.addr = alloca i8**, align 8
+ %b = alloca %class.B, align 1
+ %return_val = alloca i32, align 4
+ %temp.lvalue = alloca %class.A, align 8
+ %exn.slot = alloca i8*
+ %ehselector.slot = alloca i32
+ %a = alloca %class.A, align 8
+ %cleanup.dest.slot = alloca i32
+ store i32 0, i32* %retval
+ store i32 %argc, i32* %argc.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %argc.addr}, metadata !104), !dbg !105
+ store i8** %argv, i8*** %argv.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i8*** %argv.addr}, metadata !106), !dbg !105
+ call void @llvm.dbg.declare(metadata !{%class.B* %b}, metadata !107), !dbg !108
+ call void @_ZN1BC2Ev(%class.B* %b), !dbg !108
+ call void @llvm.dbg.declare(metadata !{i32* %return_val}, metadata !109), !dbg !110
+ call void @_ZN1B9AInstanceEv(%class.A* sret %temp.lvalue, %class.B* %b), !dbg !110
+ %call = invoke i32 @_ZN1A7get_intEv(%class.A* %temp.lvalue)
+ to label %invoke.cont unwind label %lpad, !dbg !110
+
+invoke.cont: ; preds = %entry
+ call void @_ZN1AD2Ev(%class.A* %temp.lvalue), !dbg !111
+ store i32 %call, i32* %return_val, align 4, !dbg !111
+ call void @llvm.dbg.declare(metadata !{%class.A* %a}, metadata !113), !dbg !114
+ call void @_ZN1B9AInstanceEv(%class.A* sret %a, %class.B* %b), !dbg !114
+ %0 = load i32* %return_val, align 4, !dbg !115
+ store i32 %0, i32* %retval, !dbg !115
+ store i32 1, i32* %cleanup.dest.slot
+ call void @_ZN1AD2Ev(%class.A* %a), !dbg !116
+ %1 = load i32* %retval, !dbg !116
+ ret i32 %1, !dbg !116
+
+lpad: ; preds = %entry
+ %2 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup, !dbg !116
+ %3 = extractvalue { i8*, i32 } %2, 0, !dbg !116
+ store i8* %3, i8** %exn.slot, !dbg !116
+ %4 = extractvalue { i8*, i32 } %2, 1, !dbg !116
+ store i32 %4, i32* %ehselector.slot, !dbg !116
+ invoke void @_ZN1AD2Ev(%class.A* %temp.lvalue)
+ to label %invoke.cont1 unwind label %terminate.lpad, !dbg !116
+
+invoke.cont1: ; preds = %lpad
+ br label %eh.resume, !dbg !117
+
+eh.resume: ; preds = %invoke.cont1
+ %exn = load i8** %exn.slot, !dbg !119
+ %sel = load i32* %ehselector.slot, !dbg !119
+ %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn, 0, !dbg !119
+ %lpad.val2 = insertvalue { i8*, i32 } %lpad.val, i32 %sel, 1, !dbg !119
+ resume { i8*, i32 } %lpad.val2, !dbg !119
+
+terminate.lpad: ; preds = %lpad
+ %5 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null, !dbg !121
+ %6 = extractvalue { i8*, i32 } %5, 0, !dbg !121
+ call void @__clang_call_terminate(i8* %6) #5, !dbg !121
+ unreachable, !dbg !121
+}
+
+; Function Attrs: nounwind uwtable
+define linkonce_odr void @_ZN1BC2Ev(%class.B* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %class.B*, align 8
+ store %class.B* %this, %class.B** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.B** %this.addr}, metadata !123), !dbg !124
+ %this1 = load %class.B** %this.addr
+ ret void, !dbg !125
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+; Function Attrs: noinline noreturn nounwind
+define linkonce_odr hidden void @__clang_call_terminate(i8*) #3 {
+ %2 = call i8* @__cxa_begin_catch(i8* %0) #6
+ call void @_ZSt9terminatev() #5
+ unreachable
+}
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @_ZSt9terminatev()
+
+; Function Attrs: uwtable
+define linkonce_odr void @_ZN1AD0Ev(%class.A* %this) unnamed_addr #2 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ %exn.slot = alloca i8*
+ %ehselector.slot = alloca i32
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !126), !dbg !127
+ %this1 = load %class.A** %this.addr
+ invoke void @_ZN1AD2Ev(%class.A* %this1)
+ to label %invoke.cont unwind label %lpad, !dbg !128
+
+invoke.cont: ; preds = %entry
+ %0 = bitcast %class.A* %this1 to i8*, !dbg !129
+ call void @_ZdlPv(i8* %0) #7, !dbg !129
+ ret void, !dbg !129
+
+lpad: ; preds = %entry
+ %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup, !dbg !131
+ %2 = extractvalue { i8*, i32 } %1, 0, !dbg !131
+ store i8* %2, i8** %exn.slot, !dbg !131
+ %3 = extractvalue { i8*, i32 } %1, 1, !dbg !131
+ store i32 %3, i32* %ehselector.slot, !dbg !131
+ %4 = bitcast %class.A* %this1 to i8*, !dbg !131
+ call void @_ZdlPv(i8* %4) #7, !dbg !131
+ br label %eh.resume, !dbg !131
+
+eh.resume: ; preds = %lpad
+ %exn = load i8** %exn.slot, !dbg !133
+ %sel = load i32* %ehselector.slot, !dbg !133
+ %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn, 0, !dbg !133
+ %lpad.val2 = insertvalue { i8*, i32 } %lpad.val, i32 %sel, 1, !dbg !133
+ resume { i8*, i32 } %lpad.val2, !dbg !133
+}
+
+; Function Attrs: nobuiltin nounwind
+declare void @_ZdlPv(i8*) #4
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { noinline noreturn nounwind }
+attributes #4 = { nobuiltin nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #5 = { noreturn nounwind }
+attributes #6 = { nounwind }
+attributes #7 = { builtin nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!64, !65}
+!llvm.ident = !{!66}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (trunk 203283) (llvm/trunk 203307)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !48, metadata !2, metadata !2, metadata !"sret.dwo", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/sret.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"sret.cpp", metadata !"/usr/local/google/home/echristo/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !37}
+!4 = metadata !{i32 786434, metadata !1, null, metadata !"A", i32 1, i64 128, i64 64, i32 0, i32 0, null, metadata !5, i32 0, metadata !"_ZTS1A", null, metadata !"_ZTS1A"} ; [ DW_TAG_class_type ] [A] [line 1, size 128, align 64, offset 0] [def] [from ]
+!5 = metadata !{metadata !6, metadata !13, metadata !14, metadata !19, metadata !25, metadata !29, metadata !33}
+!6 = metadata !{i32 786445, metadata !1, metadata !7, metadata !"_vptr$A", i32 0, i64 64, i64 0, i64 0, i32 64, metadata !8} ; [ DW_TAG_member ] [_vptr$A] [line 0, size 64, align 0, offset 0] [artificial] [from ]
+!7 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!8 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 0, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 0, offset 0] [from __vtbl_ptr_type]
+!9 = metadata !{i32 786447, null, null, metadata !"__vtbl_ptr_type", i32 0, i64 64, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ] [__vtbl_ptr_type] [line 0, size 64, align 0, offset 0] [from ]
+!10 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !11, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!11 = metadata !{metadata !12}
+!12 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!13 = metadata !{i32 786445, metadata !1, metadata !"_ZTS1A", metadata !"m_int", i32 13, i64 32, i64 32, i64 64, i32 2, metadata !12} ; [ DW_TAG_member ] [m_int] [line 13, size 32, align 32, offset 64] [protected] [from int]
+!14 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"", i32 4, metadata !15, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 4} ; [ DW_TAG_subprogram ] [line 4] [A]
+!15 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !16, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!16 = metadata !{null, metadata !17, metadata !12}
+!17 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!19 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"", i32 5, metadata !20, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 5} ; [ DW_TAG_subprogram ] [line 5] [A]
+!20 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !21, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!21 = metadata !{null, metadata !17, metadata !22}
+!22 = metadata !{i32 786448, null, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !23} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from ]
+!23 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from _ZTS1A]
+!25 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"operator=", metadata !"operator=", metadata !"_ZN1AaSERKS_", i32 7, metadata !26, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 7} ; [ DW_TAG_subprogram ] [line 7] [operator=]
+!26 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !27, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!27 = metadata !{metadata !22, metadata !17, metadata !22}
+!29 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"~A", metadata !"~A", metadata !"", i32 8, metadata !30, i1 false, i1 false, i32 1, i32 0, metadata !"_ZTS1A", i32 256, i1 false, null, null, i32 0, null, i32 8} ; [ DW_TAG_subprogram ] [line 8] [~A]
+!30 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !31, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!31 = metadata !{null, metadata !17}
+!33 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"get_int", metadata !"get_int", metadata !"_ZN1A7get_intEv", i32 10, metadata !34, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 10} ; [ DW_TAG_subprogram ] [line 10] [get_int]
+!34 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !35, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!35 = metadata !{metadata !12, metadata !17}
+!37 = metadata !{i32 786434, metadata !1, null, metadata !"B", i32 38, i64 8, i64 8, i32 0, i32 0, null, metadata !38, i32 0, null, null, metadata !"_ZTS1B"} ; [ DW_TAG_class_type ] [B] [line 38, size 8, align 8, offset 0] [def] [from ]
+!38 = metadata !{metadata !39, metadata !44}
+!39 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1B", metadata !"B", metadata !"B", metadata !"", i32 41, metadata !40, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 41} ; [ DW_TAG_subprogram ] [line 41] [B]
+!40 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !41, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!41 = metadata !{null, metadata !42}
+!42 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1B"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1B]
+!44 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1B", metadata !"AInstance", metadata !"AInstance", metadata !"_ZN1B9AInstanceEv", i32 43, metadata !45, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 43} ; [ DW_TAG_subprogram ] [line 43] [AInstance]
+!45 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !46, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!46 = metadata !{metadata !4, metadata !42}
+!48 = metadata !{metadata !49, metadata !50, metadata !51, metadata !52, metadata !53, metadata !54, metadata !61, metadata !62, metadata !63}
+!49 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"_ZN1AC2Ei", i32 16, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.A*, i32)* @_ZN1AC2Ei, null, metadata !14, metadata !2, i32 18} ; [ DW_TAG_subprogram ] [line 16] [def] [scope 18] [A]
+!50 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"_ZN1AC2ERKS_", i32 21, metadata !20, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.A*, %class.A*)* @_ZN1AC2ERKS_, null, metadata !19, metadata !2, i32 23} ; [ DW_TAG_subprogram ] [line 21] [def] [scope 23] [A]
+!51 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"operator=", metadata !"operator=", metadata !"_ZN1AaSERKS_", i32 27, metadata !26, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, %class.A* (%class.A*, %class.A*)* @_ZN1AaSERKS_, null, metadata !25, metadata !2, i32 28} ; [ DW_TAG_subprogram ] [line 27] [def] [scope 28] [operator=]
+!52 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"get_int", metadata !"get_int", metadata !"_ZN1A7get_intEv", i32 33, metadata !34, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (%class.A*)* @_ZN1A7get_intEv, null, metadata !33, metadata !2, i32 34} ; [ DW_TAG_subprogram ] [line 33] [def] [scope 34] [get_int]
+!53 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1B", metadata !"AInstance", metadata !"AInstance", metadata !"_ZN1B9AInstanceEv", i32 47, metadata !45, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.A*, %class.B*)* @_ZN1B9AInstanceEv, null, metadata !44, metadata !2, i32 48} ; [ DW_TAG_subprogram ] [line 47] [def] [scope 48] [AInstance]
+!54 = metadata !{i32 786478, metadata !1, metadata !7, metadata !"main", metadata !"main", metadata !"", i32 53, metadata !55, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32, i8**)* @main, null, null, metadata !2, i32 54} ; [ DW_TAG_subprogram ] [line 53] [def] [scope 54] [main]
+!55 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !56, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!56 = metadata !{metadata !12, metadata !12, metadata !57}
+!57 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !58} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!58 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !59} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!59 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !60} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from char]
+!60 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!61 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"~A", metadata !"~A", metadata !"_ZN1AD0Ev", i32 8, metadata !30, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.A*)* @_ZN1AD0Ev, null, metadata !29, metadata !2, i32 8} ; [ DW_TAG_subprogram ] [line 8] [def] [~A]
+!62 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1B", metadata !"B", metadata !"B", metadata !"_ZN1BC2Ev", i32 41, metadata !40, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.B*)* @_ZN1BC2Ev, null, metadata !39, metadata !2, i32 41} ; [ DW_TAG_subprogram ] [line 41] [def] [B]
+!63 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"~A", metadata !"~A", metadata !"_ZN1AD2Ev", i32 8, metadata !30, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.A*)* @_ZN1AD2Ev, null, metadata !29, metadata !2, i32 8} ; [ DW_TAG_subprogram ] [line 8] [def] [~A]
+!64 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!65 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!66 = metadata !{metadata !"clang version 3.5.0 (trunk 203283) (llvm/trunk 203307)"}
+!67 = metadata !{i32 786689, metadata !49, metadata !"this", null, i32 16777216, metadata !68, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!68 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1A]
+!69 = metadata !{i32 0, i32 0, metadata !49, null}
+!70 = metadata !{i32 786689, metadata !49, metadata !"i", metadata !7, i32 33554448, metadata !12, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [i] [line 16]
+!71 = metadata !{i32 16, i32 0, metadata !49, null}
+!72 = metadata !{i32 18, i32 0, metadata !49, null}
+!73 = metadata !{i32 19, i32 0, metadata !49, null}
+!74 = metadata !{i32 786689, metadata !50, metadata !"this", null, i32 16777216, metadata !68, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!75 = metadata !{i32 0, i32 0, metadata !50, null}
+!76 = metadata !{i32 786689, metadata !50, metadata !"rhs", metadata !7, i32 33554453, metadata !22, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [rhs] [line 21]
+!77 = metadata !{i32 21, i32 0, metadata !50, null}
+!78 = metadata !{i32 23, i32 0, metadata !50, null}
+!79 = metadata !{i32 24, i32 0, metadata !50, null}
+!80 = metadata !{i32 786689, metadata !51, metadata !"this", null, i32 16777216, metadata !68, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!81 = metadata !{i32 0, i32 0, metadata !51, null}
+!82 = metadata !{i32 786689, metadata !51, metadata !"rhs", metadata !7, i32 33554459, metadata !22, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [rhs] [line 27]
+!83 = metadata !{i32 27, i32 0, metadata !51, null}
+!84 = metadata !{i32 29, i32 0, metadata !51, null}
+!85 = metadata !{i32 30, i32 0, metadata !51, null}
+!86 = metadata !{i32 786689, metadata !52, metadata !"this", null, i32 16777216, metadata !68, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!87 = metadata !{i32 0, i32 0, metadata !52, null}
+!88 = metadata !{i32 35, i32 0, metadata !52, null}
+!89 = metadata !{i32 786689, metadata !53, metadata !"this", null, i32 16777216, metadata !90, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!90 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1B"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1B]
+!91 = metadata !{i32 0, i32 0, metadata !53, null}
+!92 = metadata !{i32 49, i32 0, metadata !53, null}
+!93 = metadata !{i32 786688, metadata !53, metadata !"a", metadata !7, i32 49, metadata !4, i32 8192, i32 0} ; [ DW_TAG_auto_variable ] [a] [line 49]
+!94 = metadata !{i32 50, i32 0, metadata !53, null}
+!95 = metadata !{i32 51, i32 0, metadata !53, null}
+!96 = metadata !{i32 51, i32 0, metadata !97, null}
+!97 = metadata !{i32 786443, metadata !1, metadata !53, i32 51, i32 0, i32 2, i32 5} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!98 = metadata !{i32 51, i32 0, metadata !99, null}
+!99 = metadata !{i32 786443, metadata !1, metadata !100, i32 51, i32 0, i32 3, i32 6} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!100 = metadata !{i32 786443, metadata !1, metadata !53, i32 51, i32 0, i32 1, i32 4} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!101 = metadata !{i32 786689, metadata !63, metadata !"this", null, i32 16777216, metadata !68, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!102 = metadata !{i32 0, i32 0, metadata !63, null}
+!103 = metadata !{i32 8, i32 0, metadata !63, null} ; [ DW_TAG_imported_declaration ]
+!104 = metadata !{i32 786689, metadata !54, metadata !"argc", metadata !7, i32 16777269, metadata !12, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [argc] [line 53]
+!105 = metadata !{i32 53, i32 0, metadata !54, null}
+!106 = metadata !{i32 786689, metadata !54, metadata !"argv", metadata !7, i32 33554485, metadata !57, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [argv] [line 53]
+!107 = metadata !{i32 786688, metadata !54, metadata !"b", metadata !7, i32 55, metadata !37, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [b] [line 55]
+!108 = metadata !{i32 55, i32 0, metadata !54, null}
+!109 = metadata !{i32 786688, metadata !54, metadata !"return_val", metadata !7, i32 56, metadata !12, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [return_val] [line 56]
+!110 = metadata !{i32 56, i32 0, metadata !54, null}
+!111 = metadata !{i32 56, i32 0, metadata !112, null}
+!112 = metadata !{i32 786443, metadata !1, metadata !54, i32 56, i32 0, i32 1, i32 7} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!113 = metadata !{i32 786688, metadata !54, metadata !"a", metadata !7, i32 58, metadata !4, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [a] [line 58]
+!114 = metadata !{i32 58, i32 0, metadata !54, null} ; [ DW_TAG_imported_module ]
+!115 = metadata !{i32 59, i32 0, metadata !54, null}
+!116 = metadata !{i32 60, i32 0, metadata !54, null}
+!117 = metadata !{i32 60, i32 0, metadata !118, null}
+!118 = metadata !{i32 786443, metadata !1, metadata !54, i32 60, i32 0, i32 1, i32 8} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!119 = metadata !{i32 60, i32 0, metadata !120, null}
+!120 = metadata !{i32 786443, metadata !1, metadata !54, i32 60, i32 0, i32 3, i32 10} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!121 = metadata !{i32 60, i32 0, metadata !122, null}
+!122 = metadata !{i32 786443, metadata !1, metadata !54, i32 60, i32 0, i32 2, i32 9} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!123 = metadata !{i32 786689, metadata !62, metadata !"this", null, i32 16777216, metadata !90, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!124 = metadata !{i32 0, i32 0, metadata !62, null}
+!125 = metadata !{i32 41, i32 0, metadata !62, null}
+!126 = metadata !{i32 786689, metadata !61, metadata !"this", null, i32 16777216, metadata !68, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!127 = metadata !{i32 0, i32 0, metadata !61, null}
+!128 = metadata !{i32 8, i32 0, metadata !61, null} ; [ DW_TAG_imported_declaration ]
+!129 = metadata !{i32 8, i32 0, metadata !130, null} ; [ DW_TAG_imported_declaration ]
+!130 = metadata !{i32 786443, metadata !1, metadata !61, i32 8, i32 0, i32 1, i32 11} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!131 = metadata !{i32 8, i32 0, metadata !132, null} ; [ DW_TAG_imported_declaration ]
+!132 = metadata !{i32 786443, metadata !1, metadata !61, i32 8, i32 0, i32 2, i32 12} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
+!133 = metadata !{i32 8, i32 0, metadata !134, null} ; [ DW_TAG_imported_declaration ]
+!134 = metadata !{i32 786443, metadata !1, metadata !61, i32 8, i32 0, i32 3, i32 13} ; [ DW_TAG_lexical_block ] [/usr/local/google/home/echristo/tmp/sret.cpp]
diff --git a/test/DebugInfo/X86/stmt-list-multiple-compile-units.ll b/test/DebugInfo/X86/stmt-list-multiple-compile-units.ll
index 72eb62f3b436..8816fe77cf01 100644
--- a/test/DebugInfo/X86/stmt-list-multiple-compile-units.ll
+++ b/test/DebugInfo/X86/stmt-list-multiple-compile-units.ll
@@ -1,16 +1,27 @@
; RUN: llc -O0 %s -mtriple=x86_64-apple-darwin -filetype=obj -o %t
; RUN: llvm-dwarfdump %t | FileCheck %s
+; RUN: llc -O0 %s -mtriple=x86_64-apple-darwin -filetype=obj -o %t -dwarf-version=3
+; RUN: llvm-dwarfdump %t | FileCheck %s -check-prefix=DWARF3
; RUN: llc < %s -O0 -mtriple=x86_64-apple-macosx10.7 | FileCheck %s -check-prefix=ASM
; rdar://13067005
; CHECK: .debug_info contents:
; CHECK: DW_TAG_compile_unit
-; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; CHECK: DW_AT_stmt_list [DW_FORM_data4] (0x00000000)
+; CHECK: DW_AT_stmt_list [DW_FORM_sec_offset] (0x00000000)
+; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+; CHECK: DW_AT_high_pc [DW_FORM_data4] (0x00000010)
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+; CHECK: DW_AT_high_pc [DW_FORM_data4] (0x00000010)
; CHECK: DW_TAG_compile_unit
-; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
-; CHECK: DW_AT_stmt_list [DW_FORM_data4] (0x0000003c)
+; CHECK: DW_AT_stmt_list [DW_FORM_sec_offset] (0x0000003c)
+; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000010)
+; CHECK: DW_AT_high_pc [DW_FORM_data4] (0x00000009)
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000010)
+; CHECK: DW_AT_high_pc [DW_FORM_data4] (0x00000009)
+
; CHECK: .debug_line contents:
; CHECK-NEXT: Line table prologue:
@@ -21,11 +32,30 @@
; CHECK: file_names[ 1] 0 0x00000000 0x00000000 simple2.c
; CHECK-NOT: file_names
+; DWARF3: .debug_info contents:
+; DWARF3: DW_TAG_compile_unit
+; DWARF3: DW_AT_stmt_list [DW_FORM_data4] (0x00000000)
+
+; DWARF3: DW_TAG_compile_unit
+; DWARF3: DW_AT_stmt_list [DW_FORM_data4] (0x0000003c)
+
+
+; DWARF3: .debug_line contents:
+; DWARF3-NEXT: Line table prologue:
+; DWARF3-NEXT: total_length: 0x00000038
+; DWARF3: file_names[ 1] 0 0x00000000 0x00000000 simple.c
+; DWARF3: Line table prologue:
+; DWARF3-NEXT: total_length: 0x00000039
+; DWARF3: file_names[ 1] 0 0x00000000 0x00000000 simple2.c
+; DWARF3-NOT: file_names
+
; PR15408
; ASM: L__DWARF__debug_info_begin0:
-; ASM: .long 0 ## DW_AT_stmt_list
+; ASM: Lset3 = Lline_table_start0-Lsection_line ## DW_AT_stmt_list
+; ASM-NEXT: .long Lset3
; ASM: L__DWARF__debug_info_begin1:
-; ASM: .long 0 ## DW_AT_stmt_list
+; ASM: Lset13 = Lline_table_start0-Lsection_line ## DW_AT_stmt_list
+; ASM-NEXT: .long Lset13
define i32 @test(i32 %a) nounwind uwtable ssp {
entry:
%a.addr = alloca i32, align 4
@@ -49,15 +79,15 @@ entry:
!llvm.dbg.cu = !{!0, !10}
!llvm.module.flags = !{!25}
-!0 = metadata !{i32 786449, metadata !23, i32 12, metadata !"clang version 3.3", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !23, i32 12, metadata !"clang version 3.3", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !"", i32 1} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !23, metadata !6, metadata !"test", metadata !"test", metadata !"", i32 2, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @test, null, null, metadata !1, i32 3} ; [ DW_TAG_subprogram ] [line 2] [def] [scope 3] [test]
!6 = metadata !{i32 786473, metadata !23} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{metadata !9, metadata !9}
!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!10 = metadata !{i32 786449, metadata !24, i32 12, metadata !"clang version 3.3 (trunk 172862)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !11, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!10 = metadata !{i32 786449, metadata !24, i32 12, metadata !"clang version 3.3 (trunk 172862)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !11, metadata !1, metadata !1, metadata !"", i32 1} ; [ DW_TAG_compile_unit ]
!11 = metadata !{metadata !13}
!13 = metadata !{i32 786478, metadata !24, metadata !14, metadata !"fn", metadata !"fn", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @fn, null, null, metadata !1, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [fn]
!14 = metadata !{i32 786473, metadata !24} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/stmt-list.ll b/test/DebugInfo/X86/stmt-list.ll
index 6f846c1589a3..99bd0fc1b580 100644
--- a/test/DebugInfo/X86/stmt-list.ll
+++ b/test/DebugInfo/X86/stmt-list.ll
@@ -3,7 +3,7 @@
; CHECK: .section .debug_line,"",@progbits
; CHECK-NEXT: .Lsection_line:
-; CHECK: .long .Lsection_line # DW_AT_stmt_list
+; CHECK: .long .Lline_table_start0 # DW_AT_stmt_list
define void @f() {
entry:
diff --git a/test/DebugInfo/X86/stringpool.ll b/test/DebugInfo/X86/stringpool.ll
index fccac2618cfb..846d2101e590 100644
--- a/test/DebugInfo/X86/stringpool.ll
+++ b/test/DebugInfo/X86/stringpool.ll
@@ -6,8 +6,8 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!9}
-!0 = metadata !{i32 786449, metadata !8, i32 12, metadata !"clang version 3.1 (trunk 143009)", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !8, i32 12, metadata !"clang version 3.1 (trunk 143009)", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 720948, i32 0, null, metadata !"yyyy", metadata !"yyyy", metadata !"", metadata !6, i32 1, metadata !7, i32 0, i32 1, i32* @yyyy, null} ; [ DW_TAG_variable ]
!6 = metadata !{i32 720937, metadata !8} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/struct-loc.ll b/test/DebugInfo/X86/struct-loc.ll
index 95bdd41fb063..390d8da5d0a5 100644
--- a/test/DebugInfo/X86/struct-loc.ll
+++ b/test/DebugInfo/X86/struct-loc.ll
@@ -14,8 +14,8 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!12}
-!0 = metadata !{i32 786449, metadata !11, i32 12, metadata !"clang version 3.1 (trunk 152837) (llvm/trunk 152845)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !11, i32 12, metadata !"clang version 3.1 (trunk 152837) (llvm/trunk 152845)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786484, i32 0, null, metadata !"f", metadata !"f", metadata !"", metadata !6, i32 5, metadata !7, i32 0, i32 1, %struct.foo* @f, null} ; [ DW_TAG_variable ]
!6 = metadata !{i32 786473, metadata !11} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/subrange-type.ll b/test/DebugInfo/X86/subrange-type.ll
index 05b147765b15..14dca46c64f3 100644
--- a/test/DebugInfo/X86/subrange-type.ll
+++ b/test/DebugInfo/X86/subrange-type.ll
@@ -22,7 +22,7 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!llvm.module.flags = !{!18}
!0 = metadata !{i32 786449, metadata !17, i32 12, metadata !"clang version 3.3 (trunk 171472) (llvm/trunk 171487)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/foo.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !6, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 2, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !1, i32 3} ; [ DW_TAG_subprogram ] [line 2] [def] [scope 3] [main]
!6 = metadata !{i32 786473, metadata !17} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/subreg.ll b/test/DebugInfo/X86/subreg.ll
index 162c2d166d72..22fd1a80e87b 100644
--- a/test/DebugInfo/X86/subreg.ll
+++ b/test/DebugInfo/X86/subreg.ll
@@ -2,9 +2,11 @@
; We are testing that a value in a 16 bit register gets reported as
; being in its superregister.
-; FIXME: There should be a DW_OP_bit_piece too.
-; CHECK: .byte 80 # DW_OP_reg0
+; CHECK: .byte 80 # super-register
+; CHECK-NEXT: # DW_OP_reg0
+; CHECK-NEXT: .byte 147 # DW_OP_piece
+; CHECK-NEXT: .byte 2 # 2
define i16 @f(i16 signext %zzz) nounwind {
entry:
diff --git a/test/DebugInfo/X86/subregisters.ll b/test/DebugInfo/X86/subregisters.ll
new file mode 100644
index 000000000000..d46a95f2c994
--- /dev/null
+++ b/test/DebugInfo/X86/subregisters.ll
@@ -0,0 +1,117 @@
+; RUN: llc -mtriple=x86_64-apple-darwin %s -o %t.o -filetype=obj -O0
+; RUN: llvm-dwarfdump %t.o | FileCheck %s
+;
+; Test that on x86_64, the 32-bit subregister esi is emitted as
+; DW_OP_piece 32 of the 64-bit rsi.
+;
+; rdar://problem/16015314
+;
+; CHECK: DW_AT_location [DW_FORM_block1] (<0x03> 54 93 04 )
+; CHECK: DW_AT_name [DW_FORM_strp]{{.*}} "a"
+;
+; struct bar {
+; int a;
+; int b;
+; };
+;
+; void doSomething() __attribute__ ((noinline));
+;
+; void doSomething(struct bar *b)
+; {
+; int a = b->a;
+; printf("%d\n", a); // set breakpoint here
+; }
+;
+; int main()
+; {
+; struct bar myBar = { 3, 4 };
+; doSomething(&myBar);
+; return 0;
+; }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%struct.bar = type { i32, i32 }
+
+@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+@main.myBar = private unnamed_addr constant %struct.bar { i32 3, i32 4 }, align 4
+
+; Function Attrs: noinline nounwind ssp uwtable
+define void @doSomething(%struct.bar* nocapture readonly %b) #0 {
+entry:
+ tail call void @llvm.dbg.value(metadata !{%struct.bar* %b}, i64 0, metadata !15), !dbg !25
+ %a1 = getelementptr inbounds %struct.bar* %b, i64 0, i32 0, !dbg !26
+ %0 = load i32* %a1, align 4, !dbg !26, !tbaa !27
+ tail call void @llvm.dbg.value(metadata !{i32 %0}, i64 0, metadata !16), !dbg !26
+ %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %0) #4, !dbg !32
+ ret void, !dbg !33
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind
+declare i32 @printf(i8* nocapture readonly, ...) #2
+
+; Function Attrs: nounwind ssp uwtable
+define i32 @main() #3 {
+entry:
+ %myBar = alloca i64, align 8, !dbg !34
+ %tmpcast = bitcast i64* %myBar to %struct.bar*, !dbg !34
+ tail call void @llvm.dbg.declare(metadata !{%struct.bar* %tmpcast}, metadata !21), !dbg !34
+ store i64 17179869187, i64* %myBar, align 8, !dbg !34
+ call void @doSomething(%struct.bar* %tmpcast), !dbg !35
+ ret i32 0, !dbg !36
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { noinline nounwind ssp uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+attributes #3 = { nounwind ssp uwtable }
+attributes #4 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!22, !23}
+!llvm.ident = !{!24}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [subregisters.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"subregisters.c", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !17}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"doSomething", metadata !"doSomething", metadata !"", i32 10, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (%struct.bar*)* @doSomething, null, null, metadata !14, i32 11} ; [ DW_TAG_subprogram ] [line 10] [def] [scope 11] [doSomething]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [subregisters.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8}
+!8 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from bar]
+!9 = metadata !{i32 786451, metadata !1, null, metadata !"bar", i32 3, i64 64, i64 32, i32 0, i32 0, null, metadata !10, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [bar] [line 3, size 64, align 32, offset 0] [def] [from ]
+!10 = metadata !{metadata !11, metadata !13}
+!11 = metadata !{i32 786445, metadata !1, metadata !9, metadata !"a", i32 4, i64 32, i64 32, i64 0, i32 0, metadata !12} ; [ DW_TAG_member ] [a] [line 4, size 32, align 32, offset 0] [from int]
+!12 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!13 = metadata !{i32 786445, metadata !1, metadata !9, metadata !"b", i32 5, i64 32, i64 32, i64 32, i32 0, metadata !12} ; [ DW_TAG_member ] [b] [line 5, size 32, align 32, offset 32] [from int]
+!14 = metadata !{metadata !15, metadata !16}
+!15 = metadata !{i32 786689, metadata !4, metadata !"b", metadata !5, i32 16777226, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [b] [line 10]
+!16 = metadata !{i32 786688, metadata !4, metadata !"a", metadata !5, i32 12, metadata !12, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [a] [line 12]
+!17 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 16, metadata !18, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 ()* @main, null, null, metadata !20, i32 17} ; [ DW_TAG_subprogram ] [line 16] [def] [scope 17] [main]
+!18 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !19, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!19 = metadata !{metadata !12}
+!20 = metadata !{metadata !21}
+!21 = metadata !{i32 786688, metadata !17, metadata !"myBar", metadata !5, i32 18, metadata !9, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [myBar] [line 18]
+!22 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!23 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!24 = metadata !{metadata !"clang version 3.5 "}
+!25 = metadata !{i32 10, i32 0, metadata !4, null}
+!26 = metadata !{i32 12, i32 0, metadata !4, null}
+!27 = metadata !{metadata !28, metadata !29, i64 0}
+!28 = metadata !{metadata !"bar", metadata !29, i64 0, metadata !29, i64 4}
+!29 = metadata !{metadata !"int", metadata !30, i64 0}
+!30 = metadata !{metadata !"omnipotent char", metadata !31, i64 0}
+!31 = metadata !{metadata !"Simple C/C++ TBAA"}
+!32 = metadata !{i32 13, i32 0, metadata !4, null}
+!33 = metadata !{i32 14, i32 0, metadata !4, null}
+!34 = metadata !{i32 18, i32 0, metadata !17, null}
+!35 = metadata !{i32 19, i32 0, metadata !17, null}
+!36 = metadata !{i32 20, i32 0, metadata !17, null}
diff --git a/test/DebugInfo/X86/template.ll b/test/DebugInfo/X86/template.ll
index 64a8f7a87d15..54c351c7bfd3 100644
--- a/test/DebugInfo/X86/template.ll
+++ b/test/DebugInfo/X86/template.ll
@@ -35,7 +35,7 @@
; The address of the global 'glbl', followed by DW_OP_stack_value (9f), to use
; the value immediately, rather than indirecting through the address.
-; CHECK-NEXT: DW_AT_location [DW_FORM_block1]{{ *}}(<0x0a> 03 00 00 00 00 00 00 00 00 9f )
+; CHECK-NEXT: DW_AT_location [DW_FORM_exprloc]{{ *}}(<0xa> 03 00 00 00 00 00 00 00 00 9f )
; CHECK-NOT: NULL
; CHECK: DW_TAG_GNU_template_template_param
@@ -89,7 +89,7 @@ attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (trunk 192849) (llvm/trunk 192850)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !9, metadata !28, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/bar.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"bar.cpp", metadata !"/usr/local/google/home/echristo/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !8}
!4 = metadata !{i32 786451, metadata !1, null, metadata !"y_impl<int>", i32 2, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, metadata !5, metadata !"_ZTS6y_implIiE"} ; [ DW_TAG_structure_type ] [y_impl<int>] [line 2, size 8, align 8, offset 0] [def] [from ]
!5 = metadata !{metadata !6}
diff --git a/test/DebugInfo/X86/tls-fission.ll b/test/DebugInfo/X86/tls-fission.ll
deleted file mode 100644
index 8a25aced8b7b..000000000000
--- a/test/DebugInfo/X86/tls-fission.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc -split-dwarf=Enable -mtriple=x86_64-linux -O0 -filetype=asm < %s | FileCheck %s
-
-; FIXME: add relocation and DWARF expression support to llvm-dwarfdump & use
-; that here instead of raw assembly printing
-
-; CHECK: debug_info.dwo
-; 3 bytes of data in this DW_FORM_block1 representation of the location of 'tls'
-; CHECK: .byte 3{{ *}}# DW_AT_location
-; DW_OP_const_index (0xfx == 252) to refer to the debug_addr table
-; CHECK-NEXT: .byte 252
-; an index of zero into the debug_addr table
-; CHECK-NEXT: .byte 0
-; DW_OP_lo_user based on GCC/GDB extension presumably (by experiment) to support TLS
-; CHECK-NEXT: .byte 224
-; check that the expected TLS address description is the first thing in the debug_addr section
-; CHECK: debug_addr
-; CHECK-NEXT: .quad tls@DTPOFF
-
-@tls = thread_local global i32 0, align 4
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!7, !8}
-
-!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !"tls.dwo"} ; [ DW_TAG_compile_unit ] [/tmp/tls.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{metadata !"tls.cpp", metadata !"/tmp"}
-!2 = metadata !{i32 0}
-!3 = metadata !{metadata !4}
-!4 = metadata !{i32 786484, i32 0, null, metadata !"tls", metadata !"tls", metadata !"", metadata !5, i32 1, metadata !6, i32 0, i32 1, i32* @tls, null} ; [ DW_TAG_variable ] [tls] [line 1] [def]
-!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/tls.cpp]
-!6 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
-!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/X86/tls.ll b/test/DebugInfo/X86/tls.ll
index 745c2f40f67f..e49b12fbf54d 100644
--- a/test/DebugInfo/X86/tls.ll
+++ b/test/DebugInfo/X86/tls.ll
@@ -1,35 +1,102 @@
-; RUN: llc -mtriple=x86_64-linux -O0 -filetype=asm < %s | FileCheck %s
-; RUN: llc -mtriple=i386-linux -O0 -filetype=asm < %s | FileCheck --check-prefix=CHECK-32 %s
+; RUN: llc %s -o - -filetype=asm -O0 -mtriple=x86_64-unknown-linux-gnu \
+; RUN: | FileCheck --check-prefix=CHECK --check-prefix=SINGLE --check-prefix=SINGLE-64 %s
+
+; RUN: llc %s -o - -filetype=asm -O0 -mtriple=i386-linux-gnu \
+; RUN: | FileCheck --check-prefix=CHECK --check-prefix=SINGLE --check-prefix=SINGLE-32 %s
+
+; RUN: llc %s -o - -filetype=asm -O0 -mtriple=x86_64-unknown-linux-gnu -split-dwarf=Enable \
+; RUN: | FileCheck --check-prefix=CHECK --check-prefix=FISSION %s
; FIXME: add relocation and DWARF expression support to llvm-dwarfdump & use
; that here instead of raw assembly printing
+; FISSION: .section .debug_info.dwo,
+; 3 bytes of data in this DW_FORM_block1 representation of the location of 'tls'
+; FISSION: .byte 3{{ *}}# DW_AT_location
+; DW_OP_GNU_const_index (0xfx == 252) to refer to the debug_addr table
+; FISSION-NEXT: .byte 252
+; an index of zero into the debug_addr table
+; FISSION-NEXT: .byte 0
+
+; SINGLE: .section .debug_info,
; 10 bytes of data in this DW_FORM_block1 representation of the location of 'tls'
-; CHECK: .byte 10{{ *}}# DW_AT_location
-; DW_OP_const8u (0x0e == 14) of adress
-; CHECK: .byte 14
-; The debug relocation of the address of the tls variable
-; CHECK: .quad tls@DTPOFF
-; DW_OP_lo_user based on GCC/GDB extension presumably (by experiment) to support TLS
-; CHECK: .byte 224
-
-; same again, except with a 32 bit address
-; CHECK-32: .byte 6{{ *}}# DW_AT_location
-; CHECK-32: .byte 12
-; CHECK-32: .long tls@DTPOFF
-; CHECK-32: .byte 224
-
-@tls = thread_local global i32 7, align 4
+; SINGLE-64: .byte 10 # DW_AT_location
+; DW_OP_const8u (0x0e == 14) of address
+; SINGLE-64-NEXT: .byte 14
+; SINGLE-64-NEXT: .quad tls@DTPOFF
+
+; SINGLE-32: .byte 6 # DW_AT_location
+; DW_OP_const4u (0x0e == 12) of address
+; SINGLE-32-NEXT: .byte 12
+; SINGLE-32-NEXT: .long tls@DTPOFF
+
+; DW_OP_GNU_push_tls_address
+; CHECK-NEXT: .byte 224
+
+; FISSION: DW_TAG_variable
+; FISSION: .byte 2 # DW_AT_location
+; DW_OP_GNU_addr_index
+; FISSION-NEXT: .byte 251
+; FISSION-NEXT: .byte 1
+
+; FISSION: DW_TAG_template_value_parameter
+; FISSION: .byte 3 # DW_AT_location
+; DW_OP_GNU_addr_index
+; FISSION-NEXT: .byte 251
+; FISSION-NEXT: .byte 1
+; DW_OP_stack_value
+; FISSION-NEXT: .byte 159
+
+; check that the expected TLS address description is the first thing in the debug_addr section
+; FISSION: .section .debug_addr
+; FISSION-NEXT: .quad tls@DTPOFF
+; FISSION-NEXT: .quad glbl
+; FISSION-NOT: .quad glbl
+
+; Generated from:
+
+; __thread int tls;
+; int glbl;
+;
+; template <int *I>
+; int func() {
+; return 0;
+; }
+;
+; template int func<&glbl>(); // create a second reference to 'glbl'
+
+
+@tls = thread_local global i32 0, align 4
+@glbl = global i32 0, align 4
+
+; Function Attrs: nounwind uwtable
+define weak_odr i32 @_Z4funcIXadL_Z4glblEEEiv() #0 {
+entry:
+ ret i32 0, !dbg !18
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!7, !8}
+!llvm.module.flags = !{!15, !16}
+!llvm.ident = !{!17}
-!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/tls.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{metadata !"tls.cpp", metadata !"/tmp"}
-!2 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !12, metadata !2, metadata !"-.dwo"} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/tls.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"tls.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
-!4 = metadata !{i32 786484, i32 0, null, metadata !"tls", metadata !"tls", metadata !"", metadata !5, i32 1, metadata !6, i32 0, i32 1, i32* @tls, null} ; [ DW_TAG_variable ] [tls] [line 1] [def]
-!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/tls.cpp]
-!6 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
-!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"func<&glbl>", metadata !"func<&glbl>", metadata !"_Z4funcIXadL_Z4glblEEEiv", i32 5, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z4funcIXadL_Z4glblEEEiv, metadata !9, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [func<&glbl>]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/tls.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786480, null, metadata !"I", metadata !11, i32* @glbl, null, i32 0, i32 0} ; [ DW_TAG_template_value_parameter ]
+!11 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !8} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!12 = metadata !{metadata !13, metadata !14}
+!13 = metadata !{i32 786484, i32 0, null, metadata !"tls", metadata !"tls", metadata !"", metadata !5, i32 1, metadata !8, i32 0, i32 1, i32* @tls, null} ; [ DW_TAG_variable ] [tls] [line 1] [def]
+!14 = metadata !{i32 786484, i32 0, null, metadata !"glbl", metadata !"glbl", metadata !"", metadata !5, i32 2, metadata !8, i32 0, i32 1, i32* @glbl, null} ; [ DW_TAG_variable ] [glbl] [line 2] [def]
+!15 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!16 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!17 = metadata !{metadata !"clang version 3.5 "}
+!18 = metadata !{i32 6, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/X86/type_units_with_addresses.ll b/test/DebugInfo/X86/type_units_with_addresses.ll
new file mode 100644
index 000000000000..ff278f664465
--- /dev/null
+++ b/test/DebugInfo/X86/type_units_with_addresses.ll
@@ -0,0 +1,151 @@
+; REQUIRES: object-emission
+
+; RUN: llc -split-dwarf=Enable -filetype=obj -O0 -generate-type-units -mtriple=x86_64-unknown-linux-gnu < %s \
+; RUN: | llvm-dwarfdump - | FileCheck %s
+
+; RUN: llc -split-dwarf=Disable -filetype=obj -O0 -generate-type-units -mtriple=x86_64-unknown-linux-gnu < %s \
+; RUN: | llvm-dwarfdump - | FileCheck --check-prefix=SINGLE %s
+
+; Test case built from:
+;int i;
+;
+;template <int *I>
+;struct S1 {};
+;
+;S1<&i> s1;
+;
+;template <int *I>
+;struct S2_1 {};
+;
+;struct S2 {
+; S2_1<&i> s2_1;
+;};
+;
+;S2 s2;
+;
+;template <int *I>
+;struct S3_1 {};
+;
+;struct S3_2 {};
+;
+;struct S3 {
+; S3_1<&i> s3_1;
+; S3_2 s3_2;
+;};
+;
+;S3 s3;
+;
+;struct S4_1 {};
+;
+;template <int *T>
+;struct S4_2 {};
+;
+;struct S4 {
+; S4_1 s4_1;
+; S4_2<&::i> s4_2;
+;};
+;
+;S4 s4;
+
+
+; CHECK: .debug_info.dwo contents:
+
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name {{.*}}"S1<&i>"
+
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name {{.*}}"S2"
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name {{.*}}"S2_1<&i>"
+
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name {{.*}}"S3"
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name {{.*}}"S3_1<&i>"
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_declaration
+; CHECK-NEXT: DW_AT_signature
+
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name {{.*}}"S4"
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_declaration
+; CHECK-NEXT: DW_AT_signature
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name {{.*}}"S4_2<&i>"
+
+; SINGLE: .debug_info contents:
+
+; SINGLE: DW_TAG_structure_type
+; SINGLE-NEXT: DW_AT_declaration
+; SINGLE-NEXT: DW_AT_signature
+
+; SINGLE: DW_TAG_structure_type
+; SINGLE-NEXT: DW_AT_declaration
+; SINGLE-NEXT: DW_AT_signature
+
+; SINGLE: DW_TAG_structure_type
+; SINGLE-NEXT: DW_AT_declaration
+; SINGLE-NEXT: DW_AT_signature
+
+; SINGLE: DW_TAG_structure_type
+; SINGLE-NEXT: DW_AT_declaration
+; SINGLE-NEXT: DW_AT_signature
+
+%struct.S1 = type { i8 }
+%struct.S2 = type { %struct.S2_1 }
+%struct.S2_1 = type { i8 }
+%struct.S3 = type { %struct.S3_1, %struct.S3_2 }
+%struct.S3_1 = type { i8 }
+%struct.S3_2 = type { i8 }
+%struct.S4 = type { %struct.S4_1, %struct.S4_2 }
+%struct.S4_1 = type { i8 }
+%struct.S4_2 = type { i8 }
+
+@i = global i32 0, align 4
+@a = global %struct.S1 zeroinitializer, align 1
+@s2 = global %struct.S2 zeroinitializer, align 1
+@s3 = global %struct.S3 zeroinitializer, align 1
+@s4 = global %struct.S4 zeroinitializer, align 1
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!34, !35}
+!llvm.ident = !{!36}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !2, metadata !27, metadata !2, metadata !"tu.dwo", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/tu.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"tu.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !9, metadata !12, metadata !13, metadata !17, metadata !18, metadata !19, metadata !23, metadata !24}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"S1<&i>", i32 4, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, metadata !5, metadata !"_ZTS2S1IXadL_Z1iEEE"} ; [ DW_TAG_structure_type ] [S1<&i>] [line 4, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !6}
+!6 = metadata !{i32 786480, null, metadata !"I", metadata !7, i32* @i, null, i32 0, i32 0} ; [ DW_TAG_template_value_parameter ]
+!7 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !8} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786451, metadata !1, null, metadata !"S2", i32 11, i64 8, i64 8, i32 0, i32 0, null, metadata !10, i32 0, null, null, metadata !"_ZTS2S2"} ; [ DW_TAG_structure_type ] [S2] [line 11, size 8, align 8, offset 0] [def] [from ]
+!10 = metadata !{metadata !11}
+!11 = metadata !{i32 786445, metadata !1, metadata !"_ZTS2S2", metadata !"s2_1", i32 12, i64 8, i64 8, i64 0, i32 0, metadata !"_ZTS4S2_1IXadL_Z1iEEE"} ; [ DW_TAG_member ] [s2_1] [line 12, size 8, align 8, offset 0] [from _ZTS4S2_1IXadL_Z1iEEE]
+!12 = metadata !{i32 786451, metadata !1, null, metadata !"S2_1<&i>", i32 9, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, metadata !5, metadata !"_ZTS4S2_1IXadL_Z1iEEE"} ; [ DW_TAG_structure_type ] [S2_1<&i>] [line 9, size 8, align 8, offset 0] [def] [from ]
+!13 = metadata !{i32 786451, metadata !1, null, metadata !"S3", i32 22, i64 16, i64 8, i32 0, i32 0, null, metadata !14, i32 0, null, null, metadata !"_ZTS2S3"} ; [ DW_TAG_structure_type ] [S3] [line 22, size 16, align 8, offset 0] [def] [from ]
+!14 = metadata !{metadata !15, metadata !16}
+!15 = metadata !{i32 786445, metadata !1, metadata !"_ZTS2S3", metadata !"s3_1", i32 23, i64 8, i64 8, i64 0, i32 0, metadata !"_ZTS4S3_1IXadL_Z1iEEE"} ; [ DW_TAG_member ] [s3_1] [line 23, size 8, align 8, offset 0] [from _ZTS4S3_1IXadL_Z1iEEE]
+!16 = metadata !{i32 786445, metadata !1, metadata !"_ZTS2S3", metadata !"s3_2", i32 24, i64 8, i64 8, i64 8, i32 0, metadata !"_ZTS4S3_2"} ; [ DW_TAG_member ] [s3_2] [line 24, size 8, align 8, offset 8] [from _ZTS4S3_2]
+!17 = metadata !{i32 786451, metadata !1, null, metadata !"S3_1<&i>", i32 18, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, metadata !5, metadata !"_ZTS4S3_1IXadL_Z1iEEE"} ; [ DW_TAG_structure_type ] [S3_1<&i>] [line 18, size 8, align 8, offset 0] [def] [from ]
+!18 = metadata !{i32 786451, metadata !1, null, metadata !"S3_2", i32 20, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, metadata !"_ZTS4S3_2"} ; [ DW_TAG_structure_type ] [S3_2] [line 20, size 8, align 8, offset 0] [def] [from ]
+!19 = metadata !{i32 786451, metadata !1, null, metadata !"S4", i32 34, i64 16, i64 8, i32 0, i32 0, null, metadata !20, i32 0, null, null, metadata !"_ZTS2S4"} ; [ DW_TAG_structure_type ] [S4] [line 34, size 16, align 8, offset 0] [def] [from ]
+!20 = metadata !{metadata !21, metadata !22}
+!21 = metadata !{i32 786445, metadata !1, metadata !"_ZTS2S4", metadata !"s4_1", i32 35, i64 8, i64 8, i64 0, i32 0, metadata !"_ZTS4S4_1"} ; [ DW_TAG_member ] [s4_1] [line 35, size 8, align 8, offset 0] [from _ZTS4S4_1]
+!22 = metadata !{i32 786445, metadata !1, metadata !"_ZTS2S4", metadata !"s4_2", i32 36, i64 8, i64 8, i64 8, i32 0, metadata !"_ZTS4S4_2IXadL_Z1iEEE"} ; [ DW_TAG_member ] [s4_2] [line 36, size 8, align 8, offset 8] [from _ZTS4S4_2IXadL_Z1iEEE]
+!23 = metadata !{i32 786451, metadata !1, null, metadata !"S4_1", i32 29, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, null, metadata !"_ZTS4S4_1"} ; [ DW_TAG_structure_type ] [S4_1] [line 29, size 8, align 8, offset 0] [def] [from ]
+!24 = metadata !{i32 786451, metadata !1, null, metadata !"S4_2<&i>", i32 32, i64 8, i64 8, i32 0, i32 0, null, metadata !2, i32 0, null, metadata !25, metadata !"_ZTS4S4_2IXadL_Z1iEEE"} ; [ DW_TAG_structure_type ] [S4_2<&i>] [line 32, size 8, align 8, offset 0] [def] [from ]
+!25 = metadata !{metadata !26}
+!26 = metadata !{i32 786480, null, metadata !"T", metadata !7, i32* @i, null, i32 0, i32 0} ; [ DW_TAG_template_value_parameter ]
+!27 = metadata !{metadata !28, metadata !30, metadata !31, metadata !32, metadata !33}
+!28 = metadata !{i32 786484, i32 0, null, metadata !"i", metadata !"i", metadata !"", metadata !29, i32 1, metadata !8, i32 0, i32 1, i32* @i, null} ; [ DW_TAG_variable ] [i] [line 1] [def]
+!29 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/tu.cpp]
+!30 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !29, i32 6, metadata !"_ZTS2S1IXadL_Z1iEEE", i32 0, i32 1, %struct.S1* @a, null} ; [ DW_TAG_variable ] [a] [line 6] [def]
+!31 = metadata !{i32 786484, i32 0, null, metadata !"s2", metadata !"s2", metadata !"", metadata !29, i32 15, metadata !"_ZTS2S2", i32 0, i32 1, %struct.S2* @s2, null} ; [ DW_TAG_variable ] [s2] [line 15] [def]
+!32 = metadata !{i32 786484, i32 0, null, metadata !"s3", metadata !"s3", metadata !"", metadata !29, i32 27, metadata !"_ZTS2S3", i32 0, i32 1, %struct.S3* @s3, null} ; [ DW_TAG_variable ] [s3] [line 27] [def]
+!33 = metadata !{i32 786484, i32 0, null, metadata !"s4", metadata !"s4", metadata !"", metadata !29, i32 39, metadata !"_ZTS2S4", i32 0, i32 1, %struct.S4* @s4, null} ; [ DW_TAG_variable ] [s4] [line 39] [def]
+!34 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!35 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!36 = metadata !{metadata !"clang version 3.5.0 "}
diff --git a/test/DebugInfo/X86/union-template.ll b/test/DebugInfo/X86/union-template.ll
index c70ae0798c71..5fdb3494cf9d 100644
--- a/test/DebugInfo/X86/union-template.ll
+++ b/test/DebugInfo/X86/union-template.ll
@@ -29,9 +29,9 @@ attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!28}
-!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.3 (trunk 178499) (llvm/trunk 178472)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !9, metadata !9, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/foo.cc] [DW_LANG_C_plus_plus]
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.3 (trunk 178499) (llvm/trunk 178472)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !9, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/foo.cc] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"foo.cc", metadata !"/usr/local/google/home/echristo/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"g", metadata !"g", metadata !"_ZN7PR156371gEf", i32 3, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (float)* @_ZN7PR156371gEf, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [g]
!5 = metadata !{i32 786489, metadata !1, null, metadata !"PR15637", i32 1} ; [ DW_TAG_namespace ] [PR15637] [line 1]
diff --git a/test/DebugInfo/X86/vector.ll b/test/DebugInfo/X86/vector.ll
index 6e14ed67fc30..a7a158539844 100644
--- a/test/DebugInfo/X86/vector.ll
+++ b/test/DebugInfo/X86/vector.ll
@@ -12,8 +12,8 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!13}
-!0 = metadata !{i32 786449, metadata !12, i32 12, metadata !"clang version 3.3 (trunk 171825) (llvm/trunk 171822)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/echristo/foo.c] [DW_LANG_C99]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !12, i32 12, metadata !"clang version 3.3 (trunk 171825) (llvm/trunk 171822)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/echristo/foo.c] [DW_LANG_C99]
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !6, i32 3, metadata !7, i32 0, i32 1, <4 x i32>* @a, null} ; [ DW_TAG_variable ] [a] [line 3] [def]
!6 = metadata !{i32 786473, metadata !12} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/X86/vla.ll b/test/DebugInfo/X86/vla.ll
index 512b22323c89..a1a2e66685c6 100644
--- a/test/DebugInfo/X86/vla.ll
+++ b/test/DebugInfo/X86/vla.ll
@@ -77,7 +77,7 @@ entry:
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.3 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/vla.c] [DW_LANG_C99]
!1 = metadata !{metadata !"vla.c", metadata !""}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !9}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"vla", metadata !"vla", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @vla, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [vla]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/vla.c]
diff --git a/test/DebugInfo/arm-relocs.test b/test/DebugInfo/arm-relocs.test
new file mode 100644
index 000000000000..13e11f7043f3
--- /dev/null
+++ b/test/DebugInfo/arm-relocs.test
@@ -0,0 +1,5 @@
+RUN: llvm-dwarfdump %p/Inputs/arm-relocs.elf-arm | FileCheck %s
+
+; CHECK: debug_info contents
+; CHECK: DW_TAG_enumeration_type
+; CHECK-NEXT: DW_AT_name{{.*}} = "e1"
diff --git a/test/DebugInfo/array.ll b/test/DebugInfo/array.ll
index e5e07ffe9942..72b0b994c4de 100644
--- a/test/DebugInfo/array.ll
+++ b/test/DebugInfo/array.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 < %s | FileCheck %s
+; RUN: %llc_dwarf -O0 < %s | FileCheck %s
; Do not emit AT_upper_bound for an unbounded array.
; radar 9241695
define i32 @main() nounwind ssp {
diff --git a/test/DebugInfo/constant-pointers.ll b/test/DebugInfo/constant-pointers.ll
new file mode 100644
index 000000000000..fdde06d4a2b2
--- /dev/null
+++ b/test/DebugInfo/constant-pointers.ll
@@ -0,0 +1,51 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj %s -o - | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; Ensure that pointer constants are emitted as unsigned data. Alternatively,
+; these could be signless data (dataN).
+
+; Built with Clang from:
+; template <void *V, void (*F)(), int i>
+; void func() {}
+; template void func<nullptr, nullptr, 42>();
+
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_TAG_template_value_parameter
+; CHECK: DW_AT_name {{.*}} "V"
+; CHECK: DW_AT_const_value [DW_FORM_udata] (0)
+; CHECK: DW_TAG_template_value_parameter
+; CHECK: DW_AT_name {{.*}} "F"
+; CHECK: DW_AT_const_value [DW_FORM_udata] (0)
+
+; Function Attrs: nounwind uwtable
+define weak_odr void @_Z4funcILPv0ELPFvvE0ELi42EEvv() #0 {
+entry:
+ ret void, !dbg !18
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!15, !16}
+!llvm.ident = !{!17}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/constant-pointers.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"constant-pointers.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"func<nullptr, nullptr, 42>", metadata !"func<nullptr, nullptr, 42>", metadata !"_Z4funcILPv0ELPFvvE0ELi42EEvv", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z4funcILPv0ELPFvvE0ELi42EEvv, metadata !8, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [func<nullptr, nullptr, 42>]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/constant-pointers.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{metadata !9, metadata !11, metadata !13}
+!9 = metadata !{i32 786480, null, metadata !"V", metadata !10, i8 0, null, i32 0, i32 0} ; [ DW_TAG_template_value_parameter ]
+!10 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!11 = metadata !{i32 786480, null, metadata !"F", metadata !12, i8 0, null, i32 0, i32 0} ; [ DW_TAG_template_value_parameter ]
+!12 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !6} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!13 = metadata !{i32 786480, null, metadata !"i", metadata !14, i32 42, null, i32 0, i32 0} ; [ DW_TAG_template_value_parameter ]
+!14 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!15 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!16 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!17 = metadata !{metadata !"clang version 3.5.0 "}
+!18 = metadata !{i32 3, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/cross-cu-inlining.ll b/test/DebugInfo/cross-cu-inlining.ll
new file mode 100644
index 000000000000..8a0e3c568f1c
--- /dev/null
+++ b/test/DebugInfo/cross-cu-inlining.ll
@@ -0,0 +1,130 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck -implicit-check-not=DW_TAG %s
+
+; Build from source:
+; $ clang++ a.cpp b.cpp -g -c -emit-llvm
+; $ llvm-link a.bc b.bc -o ab.bc
+; $ opt -inline ab.bc -o ab-opt.bc
+; $ cat a.cpp
+; extern int i;
+; int func(int);
+; int main() {
+; return func(i);
+; }
+; $ cat b.cpp
+; int __attribute__((always_inline)) func(int x) {
+; return x * 2;
+; }
+
+; Ensure that func inlined into main is described and references the abstract
+; definition in b.cpp's CU.
+
+; CHECK: DW_TAG_compile_unit
+; CHECK: DW_AT_name {{.*}} "a.cpp"
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_AT_type [DW_FORM_ref_addr] (0x00000000[[INT:.*]])
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK: DW_AT_abstract_origin {{.*}}[[ABS_FUNC:........]])
+; CHECK: DW_TAG_formal_parameter
+; CHECK: DW_AT_abstract_origin {{.*}}[[ABS_VAR:........]])
+
+; Check the abstract definition is in the 'b.cpp' CU and doesn't contain any
+; concrete information (address range or variable location)
+; CHECK: DW_TAG_compile_unit
+; CHECK: DW_AT_name {{.*}} "b.cpp"
+; CHECK: 0x[[ABS_FUNC]]: DW_TAG_subprogram
+; CHECK-NOT: DW_AT_low_pc
+; CHECK: 0x[[ABS_VAR]]: DW_TAG_formal_parameter
+; CHECK-NOT: DW_AT_location
+; CHECK: DW_AT_type [DW_FORM_ref4] {{.*}} {0x[[INT]]}
+; CHECK-NOT: DW_AT_location
+
+; CHECK: 0x[[INT]]: DW_TAG_base_type
+; CHECK: DW_AT_name {{.*}} "int"
+
+; Check the concrete out of line definition references the abstract and
+; provides the address range and variable location
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_AT_low_pc
+; CHECK: DW_AT_abstract_origin {{.*}} {0x[[ABS_FUNC]]}
+; CHECK: DW_TAG_formal_parameter
+; CHECK: DW_AT_location
+; CHECK: DW_AT_abstract_origin {{.*}} {0x[[ABS_VAR]]}
+
+
+@i = external global i32
+
+; Function Attrs: uwtable
+define i32 @main() #0 {
+entry:
+ %x.addr.i = alloca i32, align 4
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* @i, align 4, !dbg !19
+ %1 = bitcast i32* %x.addr.i to i8*
+ call void @llvm.lifetime.start(i64 4, i8* %1)
+ store i32 %0, i32* %x.addr.i, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %x.addr.i}, metadata !20), !dbg !21
+ %2 = load i32* %x.addr.i, align 4, !dbg !22
+ %mul.i = mul nsw i32 %2, 2, !dbg !22
+ %3 = bitcast i32* %x.addr.i to i8*, !dbg !22
+ call void @llvm.lifetime.end(i64 4, i8* %3), !dbg !22
+ ret i32 %mul.i, !dbg !19
+}
+
+; Function Attrs: alwaysinline nounwind uwtable
+define i32 @_Z4funci(i32 %x) #1 {
+entry:
+ %x.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %x.addr}, metadata !20), !dbg !23
+ %0 = load i32* %x.addr, align 4, !dbg !24
+ %mul = mul nsw i32 %0, 2, !dbg !24
+ ret i32 %mul, !dbg !24
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #2
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture) #3
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #3
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { alwaysinline nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
+
+!llvm.dbg.cu = !{!0, !9}
+!llvm.module.flags = !{!16, !17}
+!llvm.ident = !{!18, !18}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/a.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"a.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 3, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [main]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/a.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786449, metadata !10, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !11, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/b.cpp] [DW_LANG_C_plus_plus]
+!10 = metadata !{metadata !"b.cpp", metadata !"/tmp/dbginfo"}
+!11 = metadata !{metadata !12}
+!12 = metadata !{i32 786478, metadata !10, metadata !13, metadata !"func", metadata !"func", metadata !"_Z4funci", i32 1, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @_Z4funci, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [func]
+!13 = metadata !{i32 786473, metadata !10} ; [ DW_TAG_file_type ] [/tmp/dbginfo/b.cpp]
+!14 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!15 = metadata !{metadata !8, metadata !8}
+!16 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!17 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!18 = metadata !{metadata !"clang version 3.5.0 "}
+!19 = metadata !{i32 4, i32 0, metadata !4, null}
+!20 = metadata !{i32 786689, metadata !12, metadata !"x", metadata !13, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [x] [line 1]
+!21 = metadata !{i32 1, i32 0, metadata !12, metadata !19}
+!22 = metadata !{i32 2, i32 0, metadata !12, metadata !19}
+!23 = metadata !{i32 1, i32 0, metadata !12, null}
+!24 = metadata !{i32 2, i32 0, metadata !12, null}
+
diff --git a/test/DebugInfo/cross-cu-linkonce.ll b/test/DebugInfo/cross-cu-linkonce.ll
new file mode 100644
index 000000000000..660d5709c7a8
--- /dev/null
+++ b/test/DebugInfo/cross-cu-linkonce.ll
@@ -0,0 +1,73 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; Built from source:
+; $ clang++ a.cpp b.cpp -g -c -emit-llvm
+; $ llvm-link a.bc b.bc -o ab.bc
+; $ cat a.cpp
+; # 1 "func.h"
+; inline int func(int i) {
+; return i * 2;
+; }
+; int (*x)(int) = &func;
+; $ cat b.cpp
+; # 1 "func.h"
+; inline int func(int i) {
+; return i * 2;
+; }
+; int (*y)(int) = &func;
+
+; CHECK: DW_TAG_compile_unit
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "func"
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: DW_TAG_subprogram
+
+@x = global i32 (i32)* @_Z4funci, align 8
+@y = global i32 (i32)* @_Z4funci, align 8
+
+; Function Attrs: inlinehint nounwind uwtable
+define linkonce_odr i32 @_Z4funci(i32 %i) #0 {
+ %1 = alloca i32, align 4
+ store i32 %i, i32* %1, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %1}, metadata !20), !dbg !21
+ %2 = load i32* %1, align 4, !dbg !22
+ %3 = mul nsw i32 %2, 2, !dbg !22
+ ret i32 %3, !dbg !22
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { inlinehint nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0, !13}
+!llvm.module.flags = !{!17, !18}
+!llvm.ident = !{!19, !19}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !10, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/a.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"a.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"func", metadata !"func", metadata !"_Z4funci", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @_Z4funci, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [func]
+!5 = metadata !{metadata !"func.h", metadata !"/tmp/dbginfo"}
+!6 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [/tmp/dbginfo/func.h]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{metadata !9, metadata !9}
+!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!10 = metadata !{metadata !11}
+!11 = metadata !{i32 786484, i32 0, null, metadata !"x", metadata !"x", metadata !"", metadata !6, i32 4, metadata !12, i32 0, i32 1, i32 (i32)** @x, null} ; [ DW_TAG_variable ] [x] [line 4] [def]
+!12 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !7} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!13 = metadata !{i32 786449, metadata !14, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !15, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/b.cpp] [DW_LANG_C_plus_plus]
+!14 = metadata !{metadata !"b.cpp", metadata !"/tmp/dbginfo"}
+!15 = metadata !{metadata !16}
+!16 = metadata !{i32 786484, i32 0, null, metadata !"y", metadata !"y", metadata !"", metadata !6, i32 4, metadata !12, i32 0, i32 1, i32 (i32)** @y, null} ; [ DW_TAG_variable ] [y] [line 4] [def]
+!17 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!18 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!19 = metadata !{metadata !"clang version 3.5.0 "}
+!20 = metadata !{i32 786689, metadata !4, metadata !"i", metadata !6, i32 16777217, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [i] [line 1]
+!21 = metadata !{i32 1, i32 0, metadata !4, null}
+!22 = metadata !{i32 2, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/cu-line-tables.ll b/test/DebugInfo/cu-line-tables.ll
new file mode 100644
index 000000000000..2496f3f3e87b
--- /dev/null
+++ b/test/DebugInfo/cu-line-tables.ll
@@ -0,0 +1,51 @@
+; REQUIRES: object-emission
+; RUN: %llc_dwarf -O0 -filetype=obj %s -o %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; Check that we don't emit ranges if we're emitting line tables only.
+
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: DW_AT_ranges
+; CHECK: DW_TAG_subprogram
+
+; FIXME: We probably want to avoid printing out anything if the section isn't there.
+; CHECK: .debug_ranges contents:
+; CHECK-NOT: 00000000 <End of list>
+
+; CHECK: .debug_pubnames contents:
+; CHECK-NOT: Offset
+
+; CHECK: .debug_pubtypes contents:
+; CHECK-NOT: Offset
+
+; Function Attrs: nounwind uwtable
+define i32 @f(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ %0 = load i32* %a.addr, align 4, !dbg !14
+ %add = add nsw i32 %0, 4, !dbg !14
+ ret i32 %add, !dbg !14
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 (trunk 197756) (llvm/trunk 197768)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/foo.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"foo.c", metadata !"/usr/local/google/home/echristo/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f", metadata !"f", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @f, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [f]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/tmp/foo.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5 (trunk 197756) (llvm/trunk 197768)"}
+!14 = metadata !{i32 2, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/cu-range-hole.ll b/test/DebugInfo/cu-range-hole.ll
new file mode 100644
index 000000000000..65a4956a6fc2
--- /dev/null
+++ b/test/DebugInfo/cu-range-hole.ll
@@ -0,0 +1,74 @@
+; REQUIRES: object-emission
+; RUN: %llc_dwarf -O0 -filetype=obj %s -o %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; Check that we emit ranges for this CU since we have a function with and
+; without debug info.
+; Note: This depends upon the order of output in the .o file. Currently it's
+; in order of the output to make sure that the CU has multiple ranges since
+; there's a function in the middle. If they were together then it would have
+; a single range and no DW_AT_ranges.
+; CHECK: DW_TAG_compile_unit
+; CHECK: DW_AT_ranges
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_TAG_subprogram
+
+; Function Attrs: nounwind uwtable
+define i32 @b(i32 %c) #0 {
+entry:
+ %c.addr = alloca i32, align 4
+ store i32 %c, i32* %c.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %c.addr}, metadata !13), !dbg !14
+ %0 = load i32* %c.addr, align 4, !dbg !14
+ %add = add nsw i32 %0, 1, !dbg !14
+ ret i32 %add, !dbg !14
+}
+
+; Function Attrs: nounwind uwtable
+define i32 @a(i32 %b) #0 {
+entry:
+ %b.addr = alloca i32, align 4
+ store i32 %b, i32* %b.addr, align 4
+ %0 = load i32* %b.addr, align 4
+ %add = add nsw i32 %0, 1
+ ret i32 %add
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind uwtable
+define i32 @d(i32 %e) #0 {
+entry:
+ %e.addr = alloca i32, align 4
+ store i32 %e, i32* %e.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %e.addr}, metadata !15), !dbg !16
+ %0 = load i32* %e.addr, align 4, !dbg !16
+ %add = add nsw i32 %0, 1, !dbg !16
+ ret i32 %add, !dbg !16
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.ident = !{!0, !0}
+!llvm.dbg.cu = !{!1}
+!llvm.module.flags = !{!11, !12}
+
+!0 = metadata !{metadata !"clang version 3.5.0 (trunk 204164) (llvm/trunk 204183)"}
+!1 = metadata !{i32 786449, metadata !2, i32 12, metadata !"clang version 3.5.0 (trunk 204164) (llvm/trunk 204183)", i1 false, metadata !"", i32 0, metadata !3, metadata !3, metadata !4, metadata !3, metadata !3, metadata !"", i32 1}
+!2 = metadata !{metadata !"b.c", metadata !"/usr/local/google/home/echristo"}
+!3 = metadata !{}
+!4 = metadata !{metadata !5, metadata !10}
+!5 = metadata !{i32 786478, metadata !2, metadata !6, metadata !"b", metadata !"b", metadata !"", i32 1, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @b, null, null, metadata !3, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [b]
+!6 = metadata !{i32 786473, metadata !2} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/b.c]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{metadata !9, metadata !9}
+!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!10 = metadata !{i32 786478, metadata !2, metadata !6, metadata !"d", metadata !"d", metadata !"", i32 3, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @d, null, null, metadata !3, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [d]
+!11 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!12 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!13 = metadata !{i32 786689, metadata !5, metadata !"c", metadata !6, i32 16777217, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [c] [line 1]
+!14 = metadata !{i32 1, i32 0, metadata !5, null}
+!15 = metadata !{i32 786689, metadata !10, metadata !"e", metadata !6, i32 16777219, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [e] [line 3]
+!16 = metadata !{i32 3, i32 0, metadata !10, null}
diff --git a/test/DebugInfo/cu-ranges.ll b/test/DebugInfo/cu-ranges.ll
new file mode 100644
index 000000000000..9262a2239c7f
--- /dev/null
+++ b/test/DebugInfo/cu-ranges.ll
@@ -0,0 +1,71 @@
+; REQUIRES: object-emission
+; RUN: %llc_dwarf -O0 -filetype=obj %s -o %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; Check that we emit ranges for this which has a non-traditional section and a normal section.
+
+; CHECK: DW_TAG_compile_unit
+; CHECK: DW_AT_ranges
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_AT_low_pc
+; CHECK: DW_AT_high_pc
+; CHECK: DW_TAG_subprogram
+; CHECK: DW_AT_low_pc
+; CHECK: DW_AT_high_pc
+
+; CHECK: .debug_ranges contents:
+; FIXME: When we get better dumping facilities we'll want to elaborate here.
+; CHECK: 00000000 <End of list>
+
+; Function Attrs: nounwind uwtable
+define i32 @foo(i32 %a) #0 section "__TEXT,__foo" {
+entry:
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %a.addr}, metadata !13), !dbg !14
+ %0 = load i32* %a.addr, align 4, !dbg !15
+ %add = add nsw i32 %0, 5, !dbg !15
+ ret i32 %add, !dbg !15
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind uwtable
+define i32 @bar(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %a.addr}, metadata !16), !dbg !17
+ %0 = load i32* %a.addr, align 4, !dbg !18
+ %add = add nsw i32 %0, 5, !dbg !18
+ ret i32 %add, !dbg !18
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 (trunk 204164) (llvm/trunk 204183)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/foo.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"foo.c", metadata !"/usr/local/google/home/echristo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !9}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/foo.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"bar", metadata !"bar", metadata !"", i32 5, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @bar, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [bar]
+!10 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!12 = metadata !{metadata !"clang version 3.5.0 (trunk 204164) (llvm/trunk 204183)"}
+!13 = metadata !{i32 786689, metadata !4, metadata !"a", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [a] [line 1]
+!14 = metadata !{i32 1, i32 0, metadata !4, null}
+!15 = metadata !{i32 2, i32 0, metadata !4, null}
+!16 = metadata !{i32 786689, metadata !9, metadata !"a", metadata !5, i32 16777221, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [a] [line 5]
+!17 = metadata !{i32 5, i32 0, metadata !9, null}
+!18 = metadata !{i32 6, i32 0, metadata !9, null}
+
diff --git a/test/DebugInfo/dead-argument-order.ll b/test/DebugInfo/dead-argument-order.ll
new file mode 100644
index 000000000000..ea805a4872fa
--- /dev/null
+++ b/test/DebugInfo/dead-argument-order.ll
@@ -0,0 +1,81 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; Built from the following source with clang -O1
+; struct S { int i; };
+; int function(struct S s, int i) { return s.i + i; }
+
+; Due to the X86_64 ABI, 's' is passed in registers and once optimized, the
+; entirety of 's' is never reconstituted, since only the int is required, and
+; thus the variable's location is unknown/dead to debug info.
+
+; Future/current work should enable us to describe partial variables, which, in
+; this case, happens to be the entire variable.
+
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "function"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "s"
+; CHECK-NOT: DW_TAG
+; FIXME: Even though 's' is never reconstituted into a struct, the one member
+; variable is still live and used, and so we should be able to describe 's's
+; location as the location of that int.
+; CHECK-NOT: DW_AT_location
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_location
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "i"
+
+
+%struct.S = type { i32 }
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @_Z8function1Si(i32 %s.coerce, i32 %i) #0 {
+entry:
+ tail call void @llvm.dbg.declare(metadata !19, metadata !14), !dbg !20
+ tail call void @llvm.dbg.value(metadata !{i32 %i}, i64 0, metadata !15), !dbg !20
+ %add = add nsw i32 %i, %s.coerce, !dbg !20
+ ret i32 %add, !dbg !20
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { nounwind readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!16, !17}
+!llvm.ident = !{!18}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !3, metadata !8, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/dead-argument-order.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"dead-argument-order.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"S", i32 1, i64 32, i64 32, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS1S"} ; [ DW_TAG_structure_type ] [S] [line 1, size 32, align 32, offset 0] [def] [from ]
+!5 = metadata !{metadata !6}
+!6 = metadata !{i32 786445, metadata !1, metadata !"_ZTS1S", metadata !"i", i32 1, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_member ] [i] [line 1, size 32, align 32, offset 0] [from int]
+!7 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!8 = metadata !{metadata !9}
+!9 = metadata !{i32 786478, metadata !1, metadata !10, metadata !"function", metadata !"function", metadata !"_Z8function1Si", i32 2, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32, i32)* @_Z8function1Si, null, null, metadata !13, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [function]
+!10 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/dead-argument-order.cpp]
+!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = metadata !{metadata !7, metadata !4, metadata !7}
+!13 = metadata !{metadata !14, metadata !15}
+!14 = metadata !{i32 786689, metadata !9, metadata !"s", metadata !10, i32 16777218, metadata !"_ZTS1S", i32 0, i32 0} ; [ DW_TAG_arg_variable ] [s] [line 2]
+!15 = metadata !{i32 786689, metadata !9, metadata !"i", metadata !10, i32 33554434, metadata !7, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [i] [line 2]
+!16 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!17 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!18 = metadata !{metadata !"clang version 3.5.0 "}
+!19 = metadata !{%struct.S* undef}
+!20 = metadata !{i32 2, i32 0, metadata !9, null}
+
diff --git a/test/DebugInfo/debug-info-qualifiers.ll b/test/DebugInfo/debug-info-qualifiers.ll
new file mode 100644
index 000000000000..b624d3874cb3
--- /dev/null
+++ b/test/DebugInfo/debug-info-qualifiers.ll
@@ -0,0 +1,100 @@
+; REQUIRES: object-emission
+; Test (r)value qualifiers on C++11 non-static member functions.
+; Generated from tools/clang/test/CodeGenCXX/debug-info-qualifiers.cpp
+;
+; class A {
+; public:
+; void l() const &;
+; void r() const &&;
+; };
+;
+; void g() {
+; A a;
+; auto pl = &A::l;
+; auto pr = &A::r;
+; }
+;
+; RUN: %llc_dwarf -filetype=obj -O0 < %s | llvm-dwarfdump - | FileCheck %s
+; CHECK: DW_TAG_subroutine_type DW_CHILDREN_yes
+; CHECK-NEXT: DW_AT_reference DW_FORM_flag_present
+; CHECK: DW_TAG_subroutine_type DW_CHILDREN_yes
+; CHECK-NEXT: DW_AT_rvalue_reference DW_FORM_flag_present
+;
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG_subprogram
+; CHECK: DW_AT_name {{.*}}"l"
+; CHECK-NOT: DW_TAG_subprogram
+; CHECK: DW_AT_reference [DW_FORM_flag_present] (true)
+
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG_subprogram
+; CHECK: DW_AT_name {{.*}}"r"
+; CHECK-NOT: DW_TAG_subprogram
+; CHECK: DW_AT_rvalue_reference [DW_FORM_flag_present] (true)
+
+%class.A = type { i8 }
+
+; Function Attrs: nounwind
+define void @_Z1gv() #0 {
+ %a = alloca %class.A, align 1
+ %pl = alloca { i64, i64 }, align 8
+ %pr = alloca { i64, i64 }, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A* %a}, metadata !24), !dbg !25
+ call void @llvm.dbg.declare(metadata !{{ i64, i64 }* %pl}, metadata !26), !dbg !31
+ store { i64, i64 } { i64 ptrtoint (void (%class.A*)* @_ZNKR1A1lEv to i64), i64 0 }, { i64, i64 }* %pl, align 8, !dbg !31
+ call void @llvm.dbg.declare(metadata !{{ i64, i64 }* %pr}, metadata !32), !dbg !35
+ store { i64, i64 } { i64 ptrtoint (void (%class.A*)* @_ZNKO1A1rEv to i64), i64 0 }, { i64, i64 }* %pr, align 8, !dbg !35
+ ret void, !dbg !36
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+declare void @_ZNKR1A1lEv(%class.A*)
+
+declare void @_ZNKO1A1rEv(%class.A*)
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!21, !22}
+!llvm.ident = !{!23}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !16, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786434, metadata !5, null, metadata !"A", i32 2, i64 8, i64 8, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS1A"} ; [ DW_TAG_class_type ] [A] [line 2, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !"debug-info-qualifiers.cpp", metadata !""}
+!6 = metadata !{metadata !7, metadata !13}
+!7 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"l", metadata !"l", metadata !"_ZNKR1A1lEv", i32 5, metadata !8, i1 false, i1 false, i32 0, i32 0, null, i32 16640, i1 false, null, null, i32 0, metadata !12, i32 5} ; [ DW_TAG_subprogram ] [line 5] [reference] [l]
+!8 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 16384, null, metadata !9, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [reference] [from ]
+!9 = metadata !{null, metadata !10}
+!10 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !11} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from ]
+!11 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from _ZTS1A]
+!12 = metadata !{i32 786468}
+!13 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"r", metadata !"r", metadata !"_ZNKO1A1rEv", i32 7, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i32 33024, i1 false, null, null, i32 0, metadata !15, i32 7} ; [ DW_TAG_subprogram ] [line 7] [rvalue reference] [r]
+!14 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 32768, null, metadata !9, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [rvalue reference] [from ]
+!15 = metadata !{i32 786468}
+!16 = metadata !{metadata !17}
+!17 = metadata !{i32 786478, metadata !5, metadata !18, metadata !"g", metadata !"g", metadata !"_Z1gv", i32 10, metadata !19, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z1gv, null, null, metadata !2, i32 10} ; [ DW_TAG_subprogram ] [line 10] [def] [g]
+!18 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ]
+!19 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !20, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!20 = metadata !{null}
+!21 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!22 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!23 = metadata !{metadata !"clang version 3.5 "}
+!24 = metadata !{i32 786688, metadata !17, metadata !"a", metadata !18, i32 11, metadata !4, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [a] [line 11]
+!25 = metadata !{i32 11, i32 0, metadata !17, null}
+!26 = metadata !{i32 786688, metadata !17, metadata !"pl", metadata !18, i32 16, metadata !27, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [pl] [line 16]
+!27 = metadata !{i32 786463, null, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !28, metadata !"_ZTS1A"} ; [ DW_TAG_ptr_to_member_type ] [line 0, size 0, align 0, offset 0] [from ]
+!28 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 16384, null, metadata !29, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [reference] [from ]
+!29 = metadata !{null, metadata !30}
+!30 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!31 = metadata !{i32 16, i32 0, metadata !17, null}
+!32 = metadata !{i32 786688, metadata !17, metadata !"pr", metadata !18, i32 21, metadata !33, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [pr] [line 21]
+!33 = metadata !{i32 786463, null, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !34, metadata !"_ZTS1A"} ; [ DW_TAG_ptr_to_member_type ] [line 0, size 0, align 0, offset 0] [from ]
+!34 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 32768, null, metadata !29, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [rvalue reference] [from ]
+!35 = metadata !{i32 21, i32 0, metadata !17, null}
+!36 = metadata !{i32 22, i32 0, metadata !17, null}
diff --git a/test/DebugInfo/dwarf-public-names.ll b/test/DebugInfo/dwarf-public-names.ll
index fc3363133e68..72189641e3aa 100644
--- a/test/DebugInfo/dwarf-public-names.ll
+++ b/test/DebugInfo/dwarf-public-names.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -generate-dwarf-pub-sections=Enable -filetype=obj -o %t.o < %s
+; RUN: %llc_dwarf -generate-dwarf-pub-sections=Enable -filetype=obj -o %t.o < %s
; RUN: llvm-dwarfdump -debug-dump=pubnames %t.o | FileCheck %s
; ModuleID = 'dwarf-public-names.cpp'
;
@@ -40,12 +40,14 @@
; CHECK: version = 0x0002
; Check for each name in the output.
-; CHECK: global_namespace_variable
-; CHECK: global_namespace_function
-; CHECK: static_member_function
-; CHECK: global_variable
-; CHECK: global_function
-; CHECK: member_function
+; CHECK-DAG: "ns"
+; CHECK-DAG: "C::static_member_function"
+; CHECK-DAG: "global_variable"
+; CHECK-DAG: "ns::global_namespace_variable"
+; CHECK-DAG: "ns::global_namespace_function"
+; CHECK-DAG: "global_function"
+; CHECK-DAG: "C::static_member_variable"
+; CHECK-DAG: "C::member_function"
%struct.C = type { i8 }
@@ -88,8 +90,8 @@ attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!38}
-!0 = metadata !{i32 786449, metadata !37, i32 4, metadata !"clang version 3.3 (http://llvm.org/git/clang.git a09cd8103a6a719cb2628cdf0c91682250a17bd2) (http://llvm.org/git/llvm.git 47d03cec0afca0c01ae42b82916d1d731716cd20)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !2, metadata !24, metadata !24, metadata !""} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/dwarf-public-names.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !37, i32 4, metadata !"clang version 3.3 (http://llvm.org/git/clang.git a09cd8103a6a719cb2628cdf0c91682250a17bd2) (http://llvm.org/git/llvm.git 47d03cec0afca0c01ae42b82916d1d731716cd20)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !2, metadata !24, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/dwarf-public-names.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{}
!2 = metadata !{metadata !3, metadata !18, metadata !19, metadata !20}
!3 = metadata !{i32 786478, metadata !4, null, metadata !"member_function", metadata !"member_function", metadata !"_ZN1C15member_functionEv", i32 9, metadata !5, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%struct.C*)* @_ZN1C15member_functionEv, null, metadata !12, metadata !1, i32 9} ; [ DW_TAG_subprogram ] [line 9] [def] [member_function]
!4 = metadata !{i32 786473, metadata !37} ; [ DW_TAG_file_type ]
@@ -109,7 +111,7 @@ attributes #1 = { nounwind readnone }
!18 = metadata !{i32 786478, metadata !4, null, metadata !"static_member_function", metadata !"static_member_function", metadata !"_ZN1C22static_member_functionEv", i32 13, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_ZN1C22static_member_functionEv, null, metadata !14, metadata !1, i32 13} ; [ DW_TAG_subprogram ] [line 13] [def] [static_member_function]
!19 = metadata !{i32 786478, metadata !4, metadata !4, metadata !"global_function", metadata !"global_function", metadata !"_Z15global_functionv", i32 19, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z15global_functionv, null, null, metadata !1, i32 19} ; [ DW_TAG_subprogram ] [line 19] [def] [global_function]
!20 = metadata !{i32 786478, metadata !4, metadata !21, metadata !"global_namespace_function", metadata !"global_namespace_function", metadata !"_ZN2ns25global_namespace_functionEv", i32 24, metadata !22, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZN2ns25global_namespace_functionEv, null, null, metadata !1, i32 24} ; [ DW_TAG_subprogram ] [line 24] [def] [global_namespace_function]
-!21 = metadata !{i32 786489, null, metadata !"ns", metadata !4, i32 23} ; [ DW_TAG_namespace ] [/usr2/kparzysz/s.hex/t/dwarf-public-names.cpp]
+!21 = metadata !{i32 786489, metadata !4, null, metadata !"ns", i32 23} ; [ DW_TAG_namespace ] [/usr2/kparzysz/s.hex/t/dwarf-public-names.cpp]
!22 = metadata !{i32 786453, i32 0, null, i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !23, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!23 = metadata !{null}
!24 = metadata !{metadata !25, metadata !26, metadata !27}
diff --git a/test/DebugInfo/dwarfdump-inlining.test b/test/DebugInfo/dwarfdump-inlining.test
deleted file mode 100644
index e926634d52f6..000000000000
--- a/test/DebugInfo/dwarfdump-inlining.test
+++ /dev/null
@@ -1,28 +0,0 @@
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-inl-test.elf-x86-64 --address=0x710 \
-RUN: --inlining --functions | FileCheck %s -check-prefix DEEP_STACK
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-inl-test.elf-x86-64 --address=0x7d1 \
-RUN: --inlining | FileCheck %s -check-prefix SHORTER_STACK
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-inl-test.elf-x86-64 --address=0x785 \
-RUN: --inlining | FileCheck %s -check-prefix SHORT_STACK
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-inl-test.elf-x86-64 --address=0x737 \
-RUN: --functions | FileCheck %s -check-prefix INL_FUNC_NAME
-
-DEEP_STACK: inlined_h
-DEEP_STACK-NEXT: dwarfdump-inl-test.h:2
-DEEP_STACK-NEXT: inlined_g
-DEEP_STACK-NEXT: dwarfdump-inl-test.h:7
-DEEP_STACK-NEXT: inlined_f
-DEEP_STACK-NEXT: dwarfdump-inl-test.cc:3
-DEEP_STACK-NEXT: main
-DEEP_STACK-NEXT: dwarfdump-inl-test.cc:8
-
-SHORTER_STACK: dwarfdump-inl-test.h:7
-SHORTER_STACK-NEXT: dwarfdump-inl-test.cc:3
-SHORTER_STACK-NEXT: dwarfdump-inl-test.cc:8
-
-SHORT_STACK: dwarfdump-inl-test.cc:3
-SHORT_STACK-NEXT: dwarfdump-inl-test.cc:8
-
-INL_FUNC_NAME: inlined_g
-INL_FUNC_NAME-NEXT: dwarfdump-inl-test.h:7
-
diff --git a/test/DebugInfo/dwarfdump-line-dwo.test b/test/DebugInfo/dwarfdump-line-dwo.test
new file mode 100644
index 000000000000..3178a5d29922
--- /dev/null
+++ b/test/DebugInfo/dwarfdump-line-dwo.test
@@ -0,0 +1,6 @@
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump-line-dwo.elf-x86-64 | FileCheck %s
+
+CHECK: .debug_line.dwo contents:
+CHECK: version: 4
+CHECK: max_ops_per_inst: 1
+CHECK: file_names[ 1]{{.*}}dwarfdump-line-dwo.cc
diff --git a/test/DebugInfo/dwarfdump-ranges.test b/test/DebugInfo/dwarfdump-ranges.test
new file mode 100644
index 000000000000..c9e33dcdc975
--- /dev/null
+++ b/test/DebugInfo/dwarfdump-ranges.test
@@ -0,0 +1,10 @@
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test4.elf-x86-64 | FileCheck %s
+
+CHECK: .debug_ranges contents:
+CHECK-NEXT: 00000000 000000000000062c 0000000000000637
+CHECK-NEXT: 00000000 0000000000000637 000000000000063d
+CHECK-NEXT: 00000000 <End of list>
+CHECK-NEXT: 00000030 0000000000000640 000000000000064b
+CHECK-NEXT: 00000030 0000000000000637 000000000000063d
+CHECK-NEXT: 00000030 <End of list>
+
diff --git a/test/DebugInfo/dwarfdump-test.test b/test/DebugInfo/dwarfdump-test.test
deleted file mode 100644
index 058d6a36981a..000000000000
--- a/test/DebugInfo/dwarfdump-test.test
+++ /dev/null
@@ -1,56 +0,0 @@
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test.elf-x86-64 \
-RUN: --address=0x400559 --functions | FileCheck %s -check-prefix MAIN
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test.elf-x86-64 \
-RUN: --address=0x400528 --functions | FileCheck %s -check-prefix FUNCTION
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test.elf-x86-64 \
-RUN: --address=0x400586 --functions | FileCheck %s -check-prefix CTOR_WITH_SPEC
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test2.elf-x86-64 \
-RUN: --address=0x4004e8 --functions | FileCheck %s -check-prefix MANY_CU_1
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test2.elf-x86-64 \
-RUN: --address=0x4004f4 --functions | FileCheck %s -check-prefix MANY_CU_2
-RUN: llvm-dwarfdump "%p/Inputs/dwarfdump-test3.elf-x86-64 space" \
-RUN: --address=0x640 --functions | FileCheck %s -check-prefix ABS_ORIGIN_1
-RUN: llvm-dwarfdump "%p/Inputs/dwarfdump-test3.elf-x86-64 space" \
-RUN: --address=0x633 --functions | FileCheck %s -check-prefix INCLUDE_TEST_1
-RUN: llvm-dwarfdump "%p/Inputs/dwarfdump-test3.elf-x86-64 space" \
-RUN: --address=0x62d --functions | FileCheck %s -check-prefix INCLUDE_TEST_2
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test4.elf-x86-64 \
-RUN: --address=0x62c --functions \
-RUN: | FileCheck %s -check-prefix MANY_SEQ_IN_LINE_TABLE
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test4.elf-x86-64 \
-RUN: | FileCheck %s -check-prefix DEBUG_RANGES
-
-MAIN: main
-MAIN-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:16
-
-FUNCTION: _Z1fii
-FUNCTION-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:11
-
-CTOR_WITH_SPEC: DummyClass
-CTOR_WITH_SPEC-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:4
-
-MANY_CU_1: a
-MANY_CU_1-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test2-helper.cc:2
-
-MANY_CU_2: main
-MANY_CU_2-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test2-main.cc:4
-
-ABS_ORIGIN_1: C
-ABS_ORIGIN_1-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test3.cc:3
-
-INCLUDE_TEST_1: _Z3do1v
-INCLUDE_TEST_1-NEXT: /tmp/include{{[/\\]}}dwarfdump-test3-decl.h:7
-
-INCLUDE_TEST_2: _Z3do2v
-INCLUDE_TEST_2-NEXT: /tmp/dbginfo{{[/\\]}}include{{[/\\]}}dwarfdump-test3-decl2.h:1
-
-MANY_SEQ_IN_LINE_TABLE: _Z1cv
-MANY_SEQ_IN_LINE_TABLE-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test4-part1.cc:2
-
-DEBUG_RANGES: .debug_ranges contents:
-DEBUG_RANGES-NEXT: 00000000 000000000000062c 0000000000000637
-DEBUG_RANGES-NEXT: 00000000 0000000000000637 000000000000063d
-DEBUG_RANGES-NEXT: 00000000 <End of list>
-DEBUG_RANGES-NEXT: 00000030 0000000000000640 000000000000064b
-DEBUG_RANGES-NEXT: 00000030 0000000000000637 000000000000063d
-DEBUG_RANGES-NEXT: 00000030 <End of list>
diff --git a/test/DebugInfo/dwarfdump-zlib.test b/test/DebugInfo/dwarfdump-zlib.test
index 8ce2cf7690d0..cbd85ca093c1 100644
--- a/test/DebugInfo/dwarfdump-zlib.test
+++ b/test/DebugInfo/dwarfdump-zlib.test
@@ -1,12 +1,6 @@
REQUIRES: zlib
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test-zlib.elf-x86-64 \
-RUN: | FileCheck %s -check-prefix FULLDUMP
-RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test-zlib.elf-x86-64 \
-RUN: --address=0x400559 --functions | FileCheck %s -check-prefix MAIN
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump-test-zlib.elf-x86-64 | FileCheck %s
-FULLDUMP: .debug_abbrev contents
-FULLDUMP: .debug_info contents
-
-MAIN: main
-MAIN-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test-zlib.cc:16
+CHECK: .debug_abbrev contents
+CHECK: .debug_info contents
diff --git a/test/DebugInfo/empty.ll b/test/DebugInfo/empty.ll
new file mode 100644
index 000000000000..cf40523e7e4a
--- /dev/null
+++ b/test/DebugInfo/empty.ll
@@ -0,0 +1,31 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf < %s -filetype=obj | llvm-dwarfdump - | FileCheck %s
+; RUN: %llc_dwarf -split-dwarf=Enable < %s -filetype=obj | llvm-dwarfdump - | FileCheck --check-prefix=FISSION %s
+
+; darwin has a workaround for a linker bug so it always emits one line table entry
+; XFAIL: darwin
+
+; Expect no line table entry since there are no functions and file references in this compile unit
+; CHECK: .debug_line contents:
+; CHECK: Line table prologue:
+; CHECK: total_length: 0x00000019
+; CHECK-NOT: file_names[
+
+; CHECK: .debug_pubnames contents:
+; CHECK-NOT: Offset
+
+; CHECK: .debug_pubtypes contents:
+; CHECK-NOT: Offset
+
+; Don't emit DW_AT_addr_base when there are no addresses.
+; FISSION-NOT: DW_AT_GNU_addr_base [DW_FORM_sec_offset]
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!5}
+
+!0 = metadata !{i32 720913, metadata !4, i32 12, metadata !"clang version 3.1 (trunk 143523)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !2, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{}
+!3 = metadata !{i32 786473, metadata !4} ; [ DW_TAG_file_type ]
+!4 = metadata !{metadata !"empty.c", metadata !"/home/nlewycky"}
+!5 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/DebugInfo/enum.ll b/test/DebugInfo/enum.ll
index bc09846bb8e1..df097a6ea92b 100644
--- a/test/DebugInfo/enum.ll
+++ b/test/DebugInfo/enum.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -O0 -filetype=obj < %s > %t
+; RUN: %llc_dwarf -O0 -filetype=obj < %s > %t
; RUN: llvm-dwarfdump %t | FileCheck %s
; IR generated from the following code compiled with clang -g:
@@ -64,7 +64,7 @@ attributes #1 = { nounwind readnone }
!8 = metadata !{i32 786436, metadata !1, null, metadata !"e2", i32 2, i64 32, i64 32, i32 0, i32 0, null, metadata !9, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [e2] [line 2, size 32, align 32, offset 0] [def] [from ]
!9 = metadata !{metadata !10}
!10 = metadata !{i32 786472, metadata !"X", i64 0} ; [ DW_TAG_enumerator ] [X :: 0]
-!11 = metadata !{i32 0}
+!11 = metadata !{}
!12 = metadata !{metadata !13}
!13 = metadata !{i32 786478, metadata !1, metadata !14, metadata !"func", metadata !"func", metadata !"_Z4funcv", i32 3, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z4funcv, null, null, metadata !11, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [func]
!14 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/enum.cpp]
diff --git a/test/DebugInfo/global.ll b/test/DebugInfo/global.ll
index 9a0c32ad91a3..3c97f0cb2279 100644
--- a/test/DebugInfo/global.ll
+++ b/test/DebugInfo/global.ll
@@ -1,8 +1,11 @@
; REQUIRES: object-emission
-; RUN: llc -O0 -filetype=obj < %s > %t
+; RUN: %llc_dwarf -O0 -filetype=obj < %s > %t
; RUN: llvm-dwarfdump %t | FileCheck %s
+; Also test that the null streamer doesn't crash with debug info.
+; RUN: %llc_dwarf -O0 -filetype=null < %s
+
; generated from the following source compiled to bitcode with clang -g -O1
; static int i;
; int main() {
@@ -25,7 +28,7 @@ attributes #0 = { nounwind readnone uwtable "less-precise-fpmad"="false" "no-fra
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !9, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/global.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"global.cpp", metadata !"/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @main, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [main]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/global.cpp]
diff --git a/test/DebugInfo/incorrect-variable-debugloc.ll b/test/DebugInfo/incorrect-variable-debugloc.ll
new file mode 100644
index 000000000000..284704c54a91
--- /dev/null
+++ b/test/DebugInfo/incorrect-variable-debugloc.ll
@@ -0,0 +1,391 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O2 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; This is a test case that's as reduced as I can get it, though I haven't fully
+; understood the mechanisms by which this bug occurs, so perhaps there's further
+; simplification to be had (it's certainly a bit non-obvious what's going on). I
+; hesitate to hand-craft or otherwise simplify the IR compared to what Clang
+; generates as this is a particular tickling of optimizations and debug location
+; propagation I want a realistic example of.
+
+; Generated with clang-tot -cc1 -g -O2 -w -std=c++11 -fsanitize=address,use-after-return -fcxx-exceptions -fexceptions -x c++ incorrect-variable-debug-loc.cpp -emit-llvm
+
+; struct A {
+; int m_fn1();
+; };
+;
+; struct B {
+; void __attribute__((always_inline)) m_fn2() { i = 0; }
+; int i;
+; };
+;
+; struct C {
+; void m_fn3();
+; int j;
+; B b;
+; };
+;
+; int fn1() {
+; C A;
+; A.b.m_fn2();
+; A.m_fn3();
+; }
+; void C::m_fn3() {
+; A().m_fn1();
+; b.m_fn2();
+; }
+
+; CHECK: DW_TAG_structure_type
+; CHECK-NEXT: DW_AT_name {{.*}} "C"
+; CHECK: [[FN3_DECL:.*]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "m_fn3"
+
+; CHECK: DW_AT_specification {{.*}} {[[FN3_DECL]]}
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "this"
+
+%struct.C = type { i32, %struct.B }
+%struct.B = type { i32 }
+%struct.A = type { i8 }
+
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 1, void ()* @asan.module_ctor }]
+@__asan_option_detect_stack_use_after_return = external global i32
+@__asan_gen_ = private unnamed_addr constant [11 x i8] c"1 32 8 1 A\00", align 1
+@__asan_gen_1 = private unnamed_addr constant [13 x i8] c"1 32 1 3 tmp\00", align 1
+
+; Function Attrs: noreturn sanitize_address
+define i32 @_Z3fn1v() #0 {
+entry:
+ %MyAlloca = alloca [64 x i8], align 32, !dbg !39
+ %0 = ptrtoint [64 x i8]* %MyAlloca to i64, !dbg !39
+ %1 = load i32* @__asan_option_detect_stack_use_after_return, !dbg !39
+ %2 = icmp ne i32 %1, 0, !dbg !39
+ br i1 %2, label %3, label %5
+
+; <label>:3 ; preds = %entry
+ %4 = call i64 @__asan_stack_malloc_0(i64 64, i64 %0), !dbg !39
+ br label %5
+
+; <label>:5 ; preds = %entry, %3
+ %6 = phi i64 [ %0, %entry ], [ %4, %3 ], !dbg !39
+ %7 = add i64 %6, 32, !dbg !39
+ %8 = inttoptr i64 %7 to %struct.C*, !dbg !39
+ %9 = inttoptr i64 %6 to i64*, !dbg !39
+ store i64 1102416563, i64* %9, !dbg !39
+ %10 = add i64 %6, 8, !dbg !39
+ %11 = inttoptr i64 %10 to i64*, !dbg !39
+ store i64 ptrtoint ([11 x i8]* @__asan_gen_ to i64), i64* %11, !dbg !39
+ %12 = add i64 %6, 16, !dbg !39
+ %13 = inttoptr i64 %12 to i64*, !dbg !39
+ store i64 ptrtoint (i32 ()* @_Z3fn1v to i64), i64* %13, !dbg !39
+ %14 = lshr i64 %6, 3, !dbg !39
+ %15 = add i64 %14, 2147450880, !dbg !39
+ %16 = add i64 %15, 0, !dbg !39
+ %17 = inttoptr i64 %16 to i64*, !dbg !39
+ store i64 -868083117767659023, i64* %17, !dbg !39
+ %i.i = getelementptr inbounds %struct.C* %8, i64 0, i32 1, i32 0, !dbg !39
+ %18 = ptrtoint i32* %i.i to i64, !dbg !39
+ %19 = lshr i64 %18, 3, !dbg !39
+ %20 = add i64 %19, 2147450880, !dbg !39
+ %21 = inttoptr i64 %20 to i8*, !dbg !39
+ %22 = load i8* %21, !dbg !39
+ %23 = icmp ne i8 %22, 0, !dbg !39
+ br i1 %23, label %24, label %30, !dbg !39
+
+; <label>:24 ; preds = %5
+ %25 = and i64 %18, 7, !dbg !39
+ %26 = add i64 %25, 3, !dbg !39
+ %27 = trunc i64 %26 to i8, !dbg !39
+ %28 = icmp sge i8 %27, %22, !dbg !39
+ br i1 %28, label %29, label %30
+
+; <label>:29 ; preds = %24
+ call void @__asan_report_store4(i64 %18), !dbg !39
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:30 ; preds = %24, %5
+ store i32 0, i32* %i.i, align 4, !dbg !39, !tbaa !41
+ tail call void @llvm.dbg.value(metadata !{%struct.C* %8}, i64 0, metadata !27), !dbg !46
+ call void @_ZN1C5m_fn3Ev(%struct.C* %8), !dbg !47
+ unreachable, !dbg !47
+}
+
+; Function Attrs: sanitize_address
+define void @_ZN1C5m_fn3Ev(%struct.C* nocapture %this) #1 align 2 {
+entry:
+ %MyAlloca = alloca [64 x i8], align 32, !dbg !48
+ %0 = ptrtoint [64 x i8]* %MyAlloca to i64, !dbg !48
+ %1 = load i32* @__asan_option_detect_stack_use_after_return, !dbg !48
+ %2 = icmp ne i32 %1, 0, !dbg !48
+ br i1 %2, label %3, label %5
+
+; <label>:3 ; preds = %entry
+ %4 = call i64 @__asan_stack_malloc_0(i64 64, i64 %0), !dbg !48
+ br label %5
+
+; <label>:5 ; preds = %entry, %3
+ %6 = phi i64 [ %0, %entry ], [ %4, %3 ], !dbg !48
+ %7 = add i64 %6, 32, !dbg !48
+ %8 = inttoptr i64 %7 to %struct.A*, !dbg !48
+ %9 = inttoptr i64 %6 to i64*, !dbg !48
+ store i64 1102416563, i64* %9, !dbg !48
+ %10 = add i64 %6, 8, !dbg !48
+ %11 = inttoptr i64 %10 to i64*, !dbg !48
+ store i64 ptrtoint ([13 x i8]* @__asan_gen_1 to i64), i64* %11, !dbg !48
+ %12 = add i64 %6, 16, !dbg !48
+ %13 = inttoptr i64 %12 to i64*, !dbg !48
+ store i64 ptrtoint (void (%struct.C*)* @_ZN1C5m_fn3Ev to i64), i64* %13, !dbg !48
+ %14 = lshr i64 %6, 3, !dbg !48
+ %15 = add i64 %14, 2147450880, !dbg !48
+ %16 = add i64 %15, 0, !dbg !48
+ %17 = inttoptr i64 %16 to i64*, !dbg !48
+ store i64 -868083113472691727, i64* %17, !dbg !48
+ tail call void @llvm.dbg.value(metadata !{%struct.C* %this}, i64 0, metadata !30), !dbg !48
+ %call = call i32 @_ZN1A5m_fn1Ev(%struct.A* %8), !dbg !49
+ %i.i = getelementptr inbounds %struct.C* %this, i64 0, i32 1, i32 0, !dbg !50
+ %18 = ptrtoint i32* %i.i to i64, !dbg !50
+ %19 = lshr i64 %18, 3, !dbg !50
+ %20 = add i64 %19, 2147450880, !dbg !50
+ %21 = inttoptr i64 %20 to i8*, !dbg !50
+ %22 = load i8* %21, !dbg !50
+ %23 = icmp ne i8 %22, 0, !dbg !50
+ br i1 %23, label %24, label %30, !dbg !50
+
+; <label>:24 ; preds = %5
+ %25 = and i64 %18, 7, !dbg !50
+ %26 = add i64 %25, 3, !dbg !50
+ %27 = trunc i64 %26 to i8, !dbg !50
+ %28 = icmp sge i8 %27, %22, !dbg !50
+ br i1 %28, label %29, label %30
+
+; <label>:29 ; preds = %24
+ call void @__asan_report_store4(i64 %18), !dbg !50
+ call void asm sideeffect "", ""()
+ unreachable
+
+; <label>:30 ; preds = %24, %5
+ store i32 0, i32* %i.i, align 4, !dbg !50, !tbaa !41
+ store i64 1172321806, i64* %9, !dbg !52
+ %31 = icmp ne i64 %6, %0, !dbg !52
+ br i1 %31, label %32, label %39, !dbg !52
+
+; <label>:32 ; preds = %30
+ %33 = add i64 %15, 0, !dbg !52
+ %34 = inttoptr i64 %33 to i64*, !dbg !52
+ store i64 -723401728380766731, i64* %34, !dbg !52
+ %35 = add i64 %6, 56, !dbg !52
+ %36 = inttoptr i64 %35 to i64*, !dbg !52
+ %37 = load i64* %36, !dbg !52
+ %38 = inttoptr i64 %37 to i8*, !dbg !52
+ store i8 0, i8* %38, !dbg !52
+ br label %42, !dbg !52
+
+; <label>:39 ; preds = %30
+ %40 = add i64 %15, 0, !dbg !52
+ %41 = inttoptr i64 %40 to i64*, !dbg !52
+ store i64 0, i64* %41, !dbg !52
+ br label %42, !dbg !52
+
+; <label>:42 ; preds = %39, %32
+ ret void, !dbg !52
+}
+
+declare i32 @_ZN1A5m_fn1Ev(%struct.A*) #2
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #3
+
+define internal void @asan.module_ctor() {
+ tail call void @__asan_init_v3()
+ ret void
+}
+
+declare void @__asan_init_v3()
+
+declare void @__asan_report_load1(i64)
+
+declare void @__asan_load1(i64)
+
+declare void @__asan_report_load2(i64)
+
+declare void @__asan_load2(i64)
+
+declare void @__asan_report_load4(i64)
+
+declare void @__asan_load4(i64)
+
+declare void @__asan_report_load8(i64)
+
+declare void @__asan_load8(i64)
+
+declare void @__asan_report_load16(i64)
+
+declare void @__asan_load16(i64)
+
+declare void @__asan_report_store1(i64)
+
+declare void @__asan_store1(i64)
+
+declare void @__asan_report_store2(i64)
+
+declare void @__asan_store2(i64)
+
+declare void @__asan_report_store4(i64)
+
+declare void @__asan_store4(i64)
+
+declare void @__asan_report_store8(i64)
+
+declare void @__asan_store8(i64)
+
+declare void @__asan_report_store16(i64)
+
+declare void @__asan_store16(i64)
+
+declare void @__asan_report_load_n(i64, i64)
+
+declare void @__asan_report_store_n(i64, i64)
+
+declare void @__asan_loadN(i64, i64)
+
+declare void @__asan_storeN(i64, i64)
+
+declare i8* @__asan_memmove(i8*, i8*, i64)
+
+declare i8* @__asan_memcpy(i8*, i8*, i64)
+
+declare i8* @__asan_memset(i8*, i32, i64)
+
+declare void @__asan_handle_no_return()
+
+declare void @__sanitizer_cov()
+
+declare void @__sanitizer_ptr_cmp(i64, i64)
+
+declare void @__sanitizer_ptr_sub(i64, i64)
+
+declare i64 @__asan_stack_malloc_0(i64, i64)
+
+declare void @__asan_stack_free_0(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_1(i64, i64)
+
+declare void @__asan_stack_free_1(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_2(i64, i64)
+
+declare void @__asan_stack_free_2(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_3(i64, i64)
+
+declare void @__asan_stack_free_3(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_4(i64, i64)
+
+declare void @__asan_stack_free_4(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_5(i64, i64)
+
+declare void @__asan_stack_free_5(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_6(i64, i64)
+
+declare void @__asan_stack_free_6(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_7(i64, i64)
+
+declare void @__asan_stack_free_7(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_8(i64, i64)
+
+declare void @__asan_stack_free_8(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_9(i64, i64)
+
+declare void @__asan_stack_free_9(i64, i64, i64)
+
+declare i64 @__asan_stack_malloc_10(i64, i64)
+
+declare void @__asan_stack_free_10(i64, i64, i64)
+
+declare void @__asan_poison_stack_memory(i64, i64)
+
+declare void @__asan_unpoison_stack_memory(i64, i64)
+
+declare void @__asan_before_dynamic_init(i64)
+
+declare void @__asan_after_dynamic_init()
+
+declare void @__asan_register_globals(i64, i64)
+
+declare void @__asan_unregister_globals(i64, i64)
+
+declare void @__sanitizer_cov_module_init(i64)
+
+attributes #0 = { noreturn sanitize_address "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { sanitize_address "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!36, !37}
+!llvm.ident = !{!38}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !3, metadata !21, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/<stdin>] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"<stdin>", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !14}
+!4 = metadata !{i32 786451, metadata !5, null, metadata !"C", i32 10, i64 64, i64 32, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS1C"} ; [ DW_TAG_structure_type ] [C] [line 10, size 64, align 32, offset 0] [def] [from ]
+!5 = metadata !{metadata !"incorrect-variable-debug-loc.cpp", metadata !"/tmp/dbginfo"}
+!6 = metadata !{metadata !7, metadata !9, metadata !10}
+!7 = metadata !{i32 786445, metadata !5, metadata !"_ZTS1C", metadata !"j", i32 12, i64 32, i64 32, i64 0, i32 0, metadata !8} ; [ DW_TAG_member ] [j] [line 12, size 32, align 32, offset 0] [from int]
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786445, metadata !5, metadata !"_ZTS1C", metadata !"b", i32 13, i64 32, i64 32, i64 32, i32 0, metadata !"_ZTS1B"} ; [ DW_TAG_member ] [b] [line 13, size 32, align 32, offset 32] [from _ZTS1B]
+!10 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1C", metadata !"m_fn3", metadata !"m_fn3", metadata !"_ZN1C5m_fn3Ev", i32 11, metadata !11, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 11} ; [ DW_TAG_subprogram ] [line 11] [m_fn3]
+!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = metadata !{null, metadata !13}
+!13 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1C"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1C]
+!14 = metadata !{i32 786451, metadata !5, null, metadata !"B", i32 5, i64 32, i64 32, i32 0, i32 0, null, metadata !15, i32 0, null, null, metadata !"_ZTS1B"} ; [ DW_TAG_structure_type ] [B] [line 5, size 32, align 32, offset 0] [def] [from ]
+!15 = metadata !{metadata !16, metadata !17}
+!16 = metadata !{i32 786445, metadata !5, metadata !"_ZTS1B", metadata !"i", i32 7, i64 32, i64 32, i64 0, i32 0, metadata !8} ; [ DW_TAG_member ] [i] [line 7, size 32, align 32, offset 0] [from int]
+!17 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1B", metadata !"m_fn2", metadata !"m_fn2", metadata !"_ZN1B5m_fn2Ev", i32 6, metadata !18, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 6} ; [ DW_TAG_subprogram ] [line 6] [m_fn2]
+!18 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !19, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!19 = metadata !{null, metadata !20}
+!20 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1B"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1B]
+!21 = metadata !{metadata !22, metadata !28, metadata !32}
+!22 = metadata !{i32 786478, metadata !5, metadata !23, metadata !"fn1", metadata !"fn1", metadata !"_Z3fn1v", i32 16, metadata !24, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @_Z3fn1v, null, null, metadata !26, i32 16} ; [ DW_TAG_subprogram ] [line 16] [def] [fn1]
+!23 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [/tmp/dbginfo/incorrect-variable-debug-loc.cpp]
+!24 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !25, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!25 = metadata !{metadata !8}
+!26 = metadata !{metadata !27}
+!27 = metadata !{i32 786688, metadata !22, metadata !"A", metadata !23, i32 17, metadata !"_ZTS1C", i32 0, i32 0} ; [ DW_TAG_auto_variable ] [A] [line 17]
+!28 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1C", metadata !"m_fn3", metadata !"m_fn3", metadata !"_ZN1C5m_fn3Ev", i32 21, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (%struct.C*)* @_ZN1C5m_fn3Ev, null, metadata !10, metadata !29, i32 21} ; [ DW_TAG_subprogram ] [line 21] [def] [m_fn3]
+!29 = metadata !{metadata !30}
+!30 = metadata !{i32 786689, metadata !28, metadata !"this", null, i32 16777216, metadata !31, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!31 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1C"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1C]
+!32 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1B", metadata !"m_fn2", metadata !"m_fn2", metadata !"_ZN1B5m_fn2Ev", i32 6, metadata !18, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, metadata !17, metadata !33, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [m_fn2]
+!33 = metadata !{metadata !34}
+!34 = metadata !{i32 786689, metadata !32, metadata !"this", null, i32 16777216, metadata !35, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!35 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1B"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1B]
+!36 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!37 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!38 = metadata !{metadata !"clang version 3.5.0 "}
+!39 = metadata !{i32 6, i32 0, metadata !32, metadata !40}
+!40 = metadata !{i32 18, i32 0, metadata !22, null}
+!41 = metadata !{metadata !42, metadata !43, i64 0}
+!42 = metadata !{metadata !"_ZTS1B", metadata !43, i64 0}
+!43 = metadata !{metadata !"int", metadata !44, i64 0}
+!44 = metadata !{metadata !"omnipotent char", metadata !45, i64 0}
+!45 = metadata !{metadata !"Simple C/C++ TBAA"}
+!46 = metadata !{i32 17, i32 0, metadata !22, null}
+!47 = metadata !{i32 19, i32 0, metadata !22, null}
+!48 = metadata !{i32 0, i32 0, metadata !28, null}
+!49 = metadata !{i32 22, i32 0, metadata !28, null}
+!50 = metadata !{i32 6, i32 0, metadata !32, metadata !51}
+!51 = metadata !{i32 23, i32 0, metadata !28, null}
+!52 = metadata !{i32 24, i32 0, metadata !28, null}
diff --git a/test/DebugInfo/inline-no-debug-info.ll b/test/DebugInfo/inline-no-debug-info.ll
new file mode 100644
index 000000000000..2257b8961fb5
--- /dev/null
+++ b/test/DebugInfo/inline-no-debug-info.ll
@@ -0,0 +1,69 @@
+; RUN: opt < %s -inline -S | FileCheck %s
+
+; This was generated from the following source:
+; int a, b;
+; __attribute__((__always_inline__)) static void callee2() { b = 2; }
+; __attribute__((__nodebug__)) void callee() { a = 1; callee2(); }
+; void caller() { callee(); }
+; by running
+; clang -S test.c -emit-llvm -O1 -gline-tables-only -fno-strict-aliasing
+
+; CHECK-LABEL: @caller(
+
+; This instruction did not have a !dbg metadata in the callee.
+; CHECK: store i32 1, {{.*}}, !dbg [[A:!.*]]
+
+; This instruction came from callee with a !dbg metadata.
+; CHECK: store i32 2, {{.*}}, !dbg [[B:!.*]]
+
+; The remaining instruction from the caller.
+; CHECK: ret void, !dbg [[A]]
+
+; Debug location of the code in caller() and of the inlined code that did not
+; have any debug location before.
+; CHECK-DAG: [[A]] = metadata !{i32 4, i32 0, metadata !{{[01-9]+}}, null}
+
+; Debug location of the inlined code.
+; CHECK-DAG: [[B]] = metadata !{i32 2, i32 0, metadata !{{[01-9]+}}, metadata [[A]]}
+
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@a = common global i32 0, align 4
+@b = common global i32 0, align 4
+
+; Function Attrs: nounwind uwtable
+define void @callee() #0 {
+entry:
+ store i32 1, i32* @a, align 4
+ store i32 2, i32* @b, align 4, !dbg !11
+ ret void
+}
+
+; Function Attrs: nounwind uwtable
+define void @caller() #0 {
+entry:
+ tail call void @callee(), !dbg !12
+ ret void, !dbg !12
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 (210174)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2} ; [ DW_TAG_compile_unit ] [/code/llvm/build0/test.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"test.c", metadata !"/code/llvm/build0"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !7}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"caller", metadata !"caller", metadata !"", i32 4, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, void ()* @caller, null, null, metadata !2, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [caller]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/code/llvm/build0/test.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"callee2", metadata !"callee2", metadata !"", i32 2, metadata !6, i1 true, i1 true, i32 0, i32 0, null, i32 0, i1 true, null, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [local] [def] [callee2]
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5.0 (210174)"}
+!11 = metadata !{i32 2, i32 0, metadata !7, null}
+!12 = metadata !{i32 4, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/inline-scopes.ll b/test/DebugInfo/inline-scopes.ll
new file mode 100644
index 000000000000..36c073516c56
--- /dev/null
+++ b/test/DebugInfo/inline-scopes.ll
@@ -0,0 +1,130 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; bool f();
+; inline __attribute__((always_inline)) int f1() {
+; if (bool b = f())
+; return 1;
+; return 2;
+; }
+;
+; inline __attribute__((always_inline)) int f2() {
+; # 2 "y.cc"
+; if (bool b = f())
+; return 3;
+; return 4;
+; }
+;
+; int main() {
+; f1();
+; f2();
+; }
+
+; Ensure that lexical_blocks within inlined_subroutines are preserved/emitted.
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NOT: DW_TAG
+; CHECK-NOT: NULL
+; CHECK: DW_TAG_lexical_block
+; CHECK-NOT: DW_TAG
+; CHECK-NOT: NULL
+; CHECK: DW_TAG_variable
+; Ensure that file changes don't interfere with creating inlined subroutines.
+; (see the line directive inside 'f2' in thesource)
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin
+
+; Function Attrs: uwtable
+define i32 @main() #0 {
+entry:
+ %retval.i2 = alloca i32, align 4
+ %b.i3 = alloca i8, align 1
+ %retval.i = alloca i32, align 4
+ %b.i = alloca i8, align 1
+ call void @llvm.dbg.declare(metadata !{i8* %b.i}, metadata !16), !dbg !19
+ %call.i = call zeroext i1 @_Z1fv(), !dbg !19
+ %frombool.i = zext i1 %call.i to i8, !dbg !19
+ store i8 %frombool.i, i8* %b.i, align 1, !dbg !19
+ %0 = load i8* %b.i, align 1, !dbg !19
+ %tobool.i = trunc i8 %0 to i1, !dbg !19
+ br i1 %tobool.i, label %if.then.i, label %if.end.i, !dbg !19
+
+if.then.i: ; preds = %entry
+ store i32 1, i32* %retval.i, !dbg !21
+ br label %_Z2f1v.exit, !dbg !21
+
+if.end.i: ; preds = %entry
+ store i32 2, i32* %retval.i, !dbg !22
+ br label %_Z2f1v.exit, !dbg !22
+
+_Z2f1v.exit: ; preds = %if.then.i, %if.end.i
+ %1 = load i32* %retval.i, !dbg !23
+ call void @llvm.dbg.declare(metadata !{i8* %b.i3}, metadata !24), !dbg !27
+ %call.i4 = call zeroext i1 @_Z1fv(), !dbg !27
+ %frombool.i5 = zext i1 %call.i4 to i8, !dbg !27
+ store i8 %frombool.i5, i8* %b.i3, align 1, !dbg !27
+ %2 = load i8* %b.i3, align 1, !dbg !27
+ %tobool.i6 = trunc i8 %2 to i1, !dbg !27
+ br i1 %tobool.i6, label %if.then.i7, label %if.end.i8, !dbg !27
+
+if.then.i7: ; preds = %_Z2f1v.exit
+ store i32 3, i32* %retval.i2, !dbg !29
+ br label %_Z2f2v.exit, !dbg !29
+
+if.end.i8: ; preds = %_Z2f1v.exit
+ store i32 4, i32* %retval.i2, !dbg !30
+ br label %_Z2f2v.exit, !dbg !30
+
+_Z2f2v.exit: ; preds = %if.then.i7, %if.end.i8
+ %3 = load i32* %retval.i2, !dbg !31
+ ret i32 0, !dbg !32
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+declare zeroext i1 @_Z1fv() #2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!13, !14}
+!llvm.ident = !{!15}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/inline-scopes.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"inline-scopes.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !10, metadata !12}
+!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 7, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [main]
+!5 = metadata !{metadata !"y.cc", metadata !"/tmp/dbginfo"}
+!6 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [/tmp/dbginfo/y.cc]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{metadata !9}
+!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!10 = metadata !{i32 786478, metadata !1, metadata !11, metadata !"f2", metadata !"f2", metadata !"_Z2f2v", i32 8, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, metadata !2, i32 8} ; [ DW_TAG_subprogram ] [line 8] [def] [f2]
+!11 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/inline-scopes.cpp]
+!12 = metadata !{i32 786478, metadata !1, metadata !11, metadata !"f1", metadata !"f1", metadata !"_Z2f1v", i32 2, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, null, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [f1]
+!13 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!14 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!15 = metadata !{metadata !"clang version 3.5.0 "}
+!16 = metadata !{i32 786688, metadata !17, metadata !"b", metadata !11, i32 3, metadata !18, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [b] [line 3]
+!17 = metadata !{i32 786443, metadata !1, metadata !12, i32 3, i32 0, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/inline-scopes.cpp]
+!18 = metadata !{i32 786468, null, null, metadata !"bool", i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ] [bool] [line 0, size 8, align 8, offset 0, enc DW_ATE_boolean]
+!19 = metadata !{i32 3, i32 0, metadata !17, metadata !20}
+!20 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!21 = metadata !{i32 4, i32 0, metadata !17, metadata !20}
+!22 = metadata !{i32 5, i32 0, metadata !12, metadata !20}
+!23 = metadata !{i32 6, i32 0, metadata !12, metadata !20}
+!24 = metadata !{i32 786688, metadata !25, metadata !"b", metadata !6, i32 2, metadata !18, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [b] [line 2]
+!25 = metadata !{i32 786443, metadata !5, metadata !26, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/y.cc]
+!26 = metadata !{i32 786443, metadata !5, metadata !10} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/y.cc]
+!27 = metadata !{i32 2, i32 0, metadata !25, metadata !28}
+!28 = metadata !{i32 9, i32 0, metadata !4, null}
+!29 = metadata !{i32 3, i32 0, metadata !25, metadata !28}
+!30 = metadata !{i32 4, i32 0, metadata !26, metadata !28}
+!31 = metadata !{i32 5, i32 0, metadata !26, metadata !28}
+!32 = metadata !{i32 10, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/inlined-arguments.ll b/test/DebugInfo/inlined-arguments.ll
index 1dd5b2c07f94..6979862a536f 100644
--- a/test/DebugInfo/inlined-arguments.ll
+++ b/test/DebugInfo/inlined-arguments.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -filetype=obj < %s > %t
+; RUN: %llc_dwarf -filetype=obj < %s > %t
; RUN: llvm-dwarfdump %t | FileCheck %s
; IR generated from clang -O -g with the following source
@@ -16,9 +16,11 @@
; CHECK: DW_AT_name{{.*}}"f1"
; CHECK: DW_TAG_formal_parameter
-; CHECK-NEXT: DW_AT_name{{.*}}"x"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}"x"
; CHECK: DW_TAG_formal_parameter
-; CHECK-NEXT: DW_AT_name{{.*}}"y"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}"y"
; Function Attrs: uwtable
define void @_Z2f2v() #0 {
@@ -50,7 +52,7 @@ attributes #2 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/blaikie/dev/scratch/exp.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"exp.cpp", metadata !"/usr/local/google/home/blaikie/dev/scratch"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !8}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f2", metadata !"f2", metadata !"_Z2f2v", i32 3, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @_Z2f2v, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [f2]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/blaikie/dev/scratch/exp.cpp]
diff --git a/test/DebugInfo/inlined-vars.ll b/test/DebugInfo/inlined-vars.ll
index 34c5101a1426..9cfde1f26039 100644
--- a/test/DebugInfo/inlined-vars.ll
+++ b/test/DebugInfo/inlined-vars.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 < %s | FileCheck %s -check-prefix ARGUMENT
-; RUN: llc -O0 < %s | FileCheck %s -check-prefix VARIABLE
+; RUN: %llc_dwarf -O0 < %s | FileCheck %s -check-prefix ARGUMENT
+; RUN: %llc_dwarf -O0 < %s | FileCheck %s -check-prefix VARIABLE
; PR 13202
define i32 @main() uwtable {
@@ -18,10 +18,11 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!27}
-!0 = metadata !{i32 786449, metadata !26, i32 4, metadata !"clang version 3.2 (trunk 159419)", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, metadata !26, i32 4, metadata !"clang version 3.2 (trunk 159419)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !5, metadata !10}
-!5 = metadata !{i32 786478, metadata !26, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 10, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @main, null, null, metadata !1, i32 10} ; [ DW_TAG_subprogram ]
+!5 = metadata !{i32 786478, metadata !26, metadata !6, metadata !"main", metadata !"main", metadata !"", i32 10, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @main, null, null, metadata !2, i32 10} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !26} ; [ DW_TAG_file_type ]
!7 = metadata !{i32 786453, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!8 = metadata !{metadata !9}
@@ -29,8 +30,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!10 = metadata !{i32 786478, metadata !26, metadata !6, metadata !"f", metadata !"f", metadata !"_ZL1fi", i32 3, metadata !11, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, null, metadata !13, i32 3} ; [ DW_TAG_subprogram ]
!11 = metadata !{i32 786453, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!12 = metadata !{metadata !9, metadata !9}
-!13 = metadata !{metadata !14}
-!14 = metadata !{metadata !15, metadata !16}
+!13 = metadata !{metadata !15, metadata !16}
!15 = metadata !{i32 786689, metadata !10, metadata !"argument", metadata !6, i32 16777219, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
; Two DW_TAG_formal_parameter: one abstract and one inlined.
diff --git a/test/DebugInfo/llvm-symbolizer-zlib.test b/test/DebugInfo/llvm-symbolizer-zlib.test
new file mode 100644
index 000000000000..0aae7e6c6923
--- /dev/null
+++ b/test/DebugInfo/llvm-symbolizer-zlib.test
@@ -0,0 +1,7 @@
+REQUIRES: zlib
+
+RUN: echo "%p/Inputs/dwarfdump-test-zlib.elf-x86-64 0x400559" > %t.input
+RUN: llvm-symbolizer < %t.input | FileCheck %s
+
+CHECK: main
+CHECK-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test-zlib.cc:16
diff --git a/test/DebugInfo/llvm-symbolizer.test b/test/DebugInfo/llvm-symbolizer.test
index a8799cfa5460..20d3dda21ab0 100644
--- a/test/DebugInfo/llvm-symbolizer.test
+++ b/test/DebugInfo/llvm-symbolizer.test
@@ -1,15 +1,26 @@
RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x400559" > %t.input
RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64.debuglink 0x400559" >> %t.input
RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x400436" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x400528" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-test.elf-x86-64 0x400586" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-test2.elf-x86-64 0x4004e8" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-test2.elf-x86-64 0x4004f4" >> %t.input
RUN: echo "%p/Inputs/dwarfdump-test4.elf-x86-64 0x62c" >> %t.input
RUN: echo "%p/Inputs/dwarfdump-inl-test.elf-x86-64 0x710" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-inl-test.elf-x86-64 0x7d1" >> %t.input
+RUN: echo "%p/Inputs/dwarfdump-inl-test.elf-x86-64 0x785" >> %t.input
RUN: echo "%p/Inputs/dwarfdump-inl-test.high_pc.elf-x86-64 0x568" >> %t.input
+RUN: echo "\"%p/Inputs/dwarfdump-test3.elf-x86-64 space\" 0x640" >> %t.input
RUN: echo "\"%p/Inputs/dwarfdump-test3.elf-x86-64 space\" 0x633" >> %t.input
+RUN: echo "\"%p/Inputs/dwarfdump-test3.elf-x86-64 space\" 0x62d" >> %t.input
RUN: echo "%p/Inputs/macho-universal 0x1f84" >> %t.input
RUN: echo "%p/Inputs/macho-universal:i386 0x1f67" >> %t.input
RUN: echo "%p/Inputs/macho-universal:x86_64 0x100000f05" >> %t.input
+RUN: echo "%p/Inputs/llvm-symbolizer-dwo-test 0x400514" >> %t.input
+RUN: echo "%p/Inputs/fission-ranges.elf-x86_64 0x720" >> %t.input
+RUN: echo "%p/Inputs/arange-overlap.elf-x86_64 0x714" >> %t.input
-RUN: llvm-symbolizer --functions --inlining --demangle=false \
+RUN: llvm-symbolizer --functions=linkage --inlining --demangle=false \
RUN: --default-arch=i386 < %t.input | FileCheck %s
CHECK: main
@@ -20,6 +31,18 @@ CHECK-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:16
CHECK: _start
+CHECK: _Z1fii
+CHECK-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:11
+
+CHECK: DummyClass
+CHECK-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:4
+
+CHECK: a
+CHECK-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test2-helper.cc:2
+
+CHECK: main
+CHECK-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test2-main.cc:4
+
CHECK: _Z1cv
CHECK-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test4-part1.cc:2
@@ -30,7 +53,19 @@ CHECK-NEXT: dwarfdump-inl-test.h:7
CHECK-NEXT: inlined_f
CHECK-NEXT: dwarfdump-inl-test.cc:3
CHECK-NEXT: main
-CHECK-NEXT: dwarfdump-inl-test.cc:
+CHECK-NEXT: dwarfdump-inl-test.cc:8
+
+CHECK: inlined_g
+CHECK-NEXT: dwarfdump-inl-test.h:7
+CHECK-NEXT: inlined_f
+CHECK-NEXT: dwarfdump-inl-test.cc:3
+CHECK-NEXT: main
+CHECK-NEXT: dwarfdump-inl-test.cc:8
+
+CHECK: inlined_f
+CHECK-NEXT: dwarfdump-inl-test.cc:3
+CHECK-NEXT: main
+CHECK-NEXT: dwarfdump-inl-test.cc:8
CHECK: inlined_h
CHECK-NEXT: dwarfdump-inl-test.h:3
@@ -39,15 +74,30 @@ CHECK-NEXT: dwarfdump-inl-test.h:7
CHECK-NEXT: inlined_f
CHECK-NEXT: dwarfdump-inl-test.cc:3
CHECK-NEXT: main
-CHECK-NEXT: dwarfdump-inl-test.cc:
+CHECK-NEXT: dwarfdump-inl-test.cc:8
+
+CHECK: C
+CHECK-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test3.cc:3
-CHECK: _Z3do1v
-CHECK-NEXT: dwarfdump-test3-decl.h:7
+CHECK: _Z3do1v
+CHECK-NEXT: /tmp/include{{[/\\]}}dwarfdump-test3-decl.h:7
+
+CHECK: _Z3do2v
+CHECK-NEXT: /tmp/dbginfo{{[/\\]}}include{{[/\\]}}dwarfdump-test3-decl2.h:1
CHECK: main
CHECK: _Z3inci
CHECK: _Z3inci
+CHECK: main
+CHECK-NEXT: llvm-symbolizer-dwo-test.cc:11
+
+CHECK: main
+CHECK-NEXT: {{.*}}fission-ranges.cc:6
+
+CHECK: _ZN1S3bazEv
+CHECK-NEXT: {{.*}}arange-overlap.cc:6
+
RUN: echo "unexisting-file 0x1234" > %t.input2
RUN: llvm-symbolizer < %t.input2
@@ -57,3 +107,35 @@ RUN: llvm-symbolizer < %t.input3 | FileCheck %s --check-prefix=UNKNOWN-ARCH
UNKNOWN-ARCH-NOT: main
UNKNOWN-ARCH: ??
UNKNOWN-ARCH-NOT: main
+
+RUN: echo "0x400559" > %t.input4
+RUN: echo "0x400436" >> %t.input4
+RUN: llvm-symbolizer --obj %p/Inputs/dwarfdump-test.elf-x86-64 < %t.input4 \
+RUN: | FileCheck %s --check-prefix=BINARY
+
+BINARY: main
+BINARY-NEXT: /tmp/dbginfo{{[/\\]}}dwarfdump-test.cc:16
+BINARY: _start
+
+RUN: echo "0x400720" > %t.input5
+RUN: echo "0x4004a0" >> %t.input5
+RUN: echo "0x4006f0" >> %t.input5
+RUN: llvm-symbolizer --obj %p/Inputs/llvm-symbolizer-test.elf-x86-64 < %t.input5 \
+RUN: | FileCheck %s --check-prefix=BINARY_C
+
+BINARY_C: main
+BINARY_C-NEXT: /tmp/dbginfo{{[/\\]}}llvm-symbolizer-test.c:10
+BINARY_C: _start
+BINARY_C: {{g$}}
+
+RUN: echo "0x1f1" > %t.input6
+RUN: llvm-symbolizer --obj %p/Inputs/shared-object-stripped.elf-i386 < %t.input6 \
+RUN: | FileCheck %s --check-prefix=STRIPPED
+
+STRIPPED: global_func
+
+RUN: echo "%p/Inputs/dwarfdump-test4.elf-x86-64 0x62c" > %t.input7
+RUN: llvm-symbolizer --functions=short --use-symbol-table=false --demangle=false < %t.input7 \
+RUN: | FileCheck %s --check-prefix=SHORT_FUNCTION_NAME
+
+SHORT_FUNCTION_NAME-NOT: _Z1cv
diff --git a/test/DebugInfo/lto-comp-dir.ll b/test/DebugInfo/lto-comp-dir.ll
new file mode 100644
index 000000000000..d272dff6ea82
--- /dev/null
+++ b/test/DebugInfo/lto-comp-dir.ll
@@ -0,0 +1,84 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf < %s -filetype=obj | llvm-dwarfdump -debug-dump=line - | FileCheck %s
+; RUN: %llc_dwarf < %s -filetype=asm | FileCheck --check-prefix=ASM %s
+
+; If multiple line tables are emitted, one per CU, those line tables can
+; unambiguously rely on the comp_dir of their owning CU and use directory '0'
+; to refer to it.
+
+; CHECK: .debug_line contents:
+; CHECK-NEXT: Line table prologue:
+; CHECK-NOT: include_directories
+; CHECK: file_names[ 1] 0 {{.*}} a.cpp
+; CHECK-NOT: file_names
+
+; CHECK: Line table prologue:
+; CHECK-NOT: include_directories
+; CHECK: file_names[ 1] 0 {{.*}} b.cpp
+; CHECK-NOT: file_names
+
+; However, if a single line table is emitted and shared between CUs, the
+; comp_dir is ambiguous and relying on it would lead to different path
+; interpretations depending on which CU lead to the table - so ensure that
+; full paths are always emitted in this case, never comp_dir relative.
+
+; ASM: .file 1 "/tmp/dbginfo/a{{[/\\]+}}a.cpp"
+; ASM: .file 2 "/tmp/dbginfo/b{{[/\\]+}}b.cpp"
+
+; Generated from the following source compiled to bitcode from within their
+; respective directories (with debug info) and linked together with llvm-link
+
+; a/a.cpp
+; void func() {
+; }
+
+; b/b.cpp
+; void func();
+; int main() {
+; func();
+; }
+
+; Function Attrs: nounwind uwtable
+define void @_Z4funcv() #0 {
+entry:
+ ret void, !dbg !19
+}
+
+; Function Attrs: uwtable
+define i32 @main() #1 {
+entry:
+ call void @_Z4funcv(), !dbg !20
+ ret i32 0, !dbg !21
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0, !8}
+!llvm.module.flags = !{!16, !17}
+!llvm.ident = !{!18, !18}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1}
+!1 = metadata !{metadata !"a.cpp", metadata !"/tmp/dbginfo/a"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"func", metadata !"func", metadata !"_Z4funcv", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z4funcv, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [func]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/a/a.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{i32 786449, metadata !9, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !10, metadata !2, metadata !2, metadata !"", i32 1}
+!9 = metadata !{metadata !"b.cpp", metadata !"/tmp/dbginfo/b"}
+!10 = metadata !{metadata !11}
+!11 = metadata !{i32 786478, metadata !9, metadata !12, metadata !"main", metadata !"main", metadata !"", i32 2, metadata !13, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [main]
+!12 = metadata !{i32 786473, metadata !9} ; [ DW_TAG_file_type ] [/tmp/dbginfo/b/b.cpp]
+!13 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !14, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!14 = metadata !{metadata !15}
+!15 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!16 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!17 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!18 = metadata !{metadata !"clang version 3.5.0 "}
+!19 = metadata !{i32 2, i32 0, metadata !4, null}
+!20 = metadata !{i32 3, i32 0, metadata !11, null}
+!21 = metadata !{i32 4, i32 0, metadata !11, null}
+
diff --git a/test/DebugInfo/member-order.ll b/test/DebugInfo/member-order.ll
index a0c283db5a16..652a6cd6c314 100644
--- a/test/DebugInfo/member-order.ll
+++ b/test/DebugInfo/member-order.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -filetype=obj -O0 < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+; RUN: %llc_dwarf -filetype=obj -O0 < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
; generated by clang from:
; struct foo {
@@ -45,7 +45,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !13, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/member-order.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"member-order.cpp", metadata !"/tmp/dbginfo"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !1, null, metadata !"foo", i32 1, i64 8, i64 8, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS3foo"} ; [ DW_TAG_structure_type ] [foo] [line 1, size 8, align 8, offset 0] [def] [from ]
!5 = metadata !{metadata !6, metadata !11}
diff --git a/test/DebugInfo/member-pointers.ll b/test/DebugInfo/member-pointers.ll
index 0bc4ee67265b..4ca69426efbe 100644
--- a/test/DebugInfo/member-pointers.ll
+++ b/test/DebugInfo/member-pointers.ll
@@ -1,7 +1,7 @@
; REQUIRES: object-emission
; XFAIL: hexagon
-; RUN: llc -filetype=obj -O0 < %s > %t
+; RUN: %llc_dwarf -filetype=obj -O0 < %s > %t
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
; CHECK: DW_TAG_ptr_to_member_type
; CHECK: DW_TAG_ptr_to_member_type
@@ -23,8 +23,8 @@
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!16}
-!0 = metadata !{i32 786449, metadata !15, i32 4, metadata !"clang version 3.3 ", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ] [/home/blaikie/Development/scratch/simple.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{i32 0}
+!0 = metadata !{i32 786449, metadata !15, i32 4, metadata !"clang version 3.3 ", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/home/blaikie/Development/scratch/simple.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{}
!3 = metadata !{metadata !5, metadata !10}
!5 = metadata !{i32 786484, i32 0, null, metadata !"x", metadata !"x", metadata !"", metadata !6, i32 4, metadata !7, i32 0, i32 1, i64* @x, null} ; [ DW_TAG_variable ] [x] [line 4] [def]
!6 = metadata !{i32 786473, metadata !15} ; [ DW_TAG_file_type ]
diff --git a/test/DebugInfo/missing-abstract-variable.ll b/test/DebugInfo/missing-abstract-variable.ll
new file mode 100644
index 000000000000..59a38cf39d5f
--- /dev/null
+++ b/test/DebugInfo/missing-abstract-variable.ll
@@ -0,0 +1,191 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; The formal parameter 'b' for Function 'x' when inlined within 'a' is lost on
+; mips and powerpc64 (and on x86_64 at at least -O2). Presumably this is a
+; SelectionDAG issue (do mips/powerpc64 use FastISel?).
+; XFAIL: mips, powerpc64, s390x
+
+; Build from the following source with clang -O2.
+
+; The important details are that 'x's abstract definition is first built during
+; the definition of 'b', where the parameter to 'x' is constant and so 'x's 's'
+; variable is optimized away. No abstract definition DIE for 's' is constructed.
+; Then, during 'a' emission, the abstract DbgVariable for 's' is created, but
+; the abstract DIE isn't (since the abstract definition for 'b' is already
+; built). This results in 's' inlined in 'a' being emitted with its name, line,
+; file there, rather than referencing an abstract definition.
+
+; extern int t;
+;
+; void f(int);
+;
+; inline void x(bool b) {
+; if (b) {
+; int s = t;
+; f(s);
+; }
+; f(0);
+; }
+;
+; void b() {
+; x(false);
+; }
+;
+; void a(bool u) {
+; x(u);
+; }
+
+; CHECK: [[ABS_X:.*]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "x"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[ABS_B:.*]]: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "b"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_lexical_block
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_lexical_block
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: [[ABS_S:.*]]: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "s"
+
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "b"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[ABS_X]]}
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[ABS_B]]}
+; Notice 'x's local variable 's' is missing. Not necessarily a bug here,
+; since it's been optimized entirely away and it should be described in
+; abstract subprogram.
+; CHECK-NOT: DW_TAG
+; CHECK: NULL
+; CHECK-NOT: DW_TAG
+; CHECK: NULL
+
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "a"
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_inlined_subroutine
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[ABS_X]]}
+; CHECK-NOT: {{DW_TAG|NULL}}
+; FIXME: This formal parameter goes missing at least at -O2 (& on
+; mips/powerpc), maybe before that. Perhaps SelectionDAG is to blame (and
+; fastisel succeeds).
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[ABS_B]]}
+
+; The two lexical blocks here are caused by the scope of the if that includes
+; the condition variable, and the scope within the if's composite statement. I'm
+; not sure we really need both of them since there's no variable declared in the
+; outer of the two
+
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_lexical_block
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_lexical_block
+; CHECK-NOT: {{DW_TAG|NULL}}
+; CHECK: DW_TAG_variable
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[ABS_S]]}
+
+@t = external global i32
+
+; Function Attrs: uwtable
+define void @_Z1bv() #0 {
+entry:
+ tail call void @llvm.dbg.value(metadata !24, i64 0, metadata !25), !dbg !27
+ tail call void @_Z1fi(i32 0), !dbg !28
+ ret void, !dbg !29
+}
+
+; Function Attrs: uwtable
+define void @_Z1ab(i1 zeroext %u) #0 {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i1 %u}, i64 0, metadata !13), !dbg !30
+ tail call void @llvm.dbg.value(metadata !{i1 %u}, i64 0, metadata !31), !dbg !33
+ br i1 %u, label %if.then.i, label %_Z1xb.exit, !dbg !34
+
+if.then.i: ; preds = %entry
+ %0 = load i32* @t, align 4, !dbg !35, !tbaa !36
+ tail call void @llvm.dbg.value(metadata !{i32 %0}, i64 0, metadata !40), !dbg !35
+ tail call void @_Z1fi(i32 %0), !dbg !41
+ br label %_Z1xb.exit, !dbg !42
+
+_Z1xb.exit: ; preds = %entry, %if.then.i
+ tail call void @_Z1fi(i32 0), !dbg !43
+ ret void, !dbg !44
+}
+
+declare void @_Z1fi(i32) #1
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!21, !22}
+!llvm.ident = !{!23}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/missing-abstract-variables.cc] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"missing-abstract-variables.cc", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !8, metadata !14}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"b", metadata !"b", metadata !"_Z1bv", i32 13, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @_Z1bv, null, null, metadata !2, i32 13} ; [ DW_TAG_subprogram ] [line 13] [def] [b]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/missing-abstract-variables.cc]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"a", metadata !"a", metadata !"_Z1ab", i32 17, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i1)* @_Z1ab, null, null, metadata !12, i32 17} ; [ DW_TAG_subprogram ] [line 17] [def] [a]
+!9 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !10, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!10 = metadata !{null, metadata !11}
+!11 = metadata !{i32 786468, null, null, metadata !"bool", i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ] [bool] [line 0, size 8, align 8, offset 0, enc DW_ATE_boolean]
+!12 = metadata !{metadata !13}
+!13 = metadata !{i32 786689, metadata !8, metadata !"u", metadata !5, i32 16777233, metadata !11, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [u] [line 17]
+!14 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"x", metadata !"x", metadata !"_Z1xb", i32 5, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, null, metadata !15, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [x]
+!15 = metadata !{metadata !16, metadata !17}
+!16 = metadata !{i32 786689, metadata !14, metadata !"b", metadata !5, i32 16777221, metadata !11, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [b] [line 5]
+!17 = metadata !{i32 786688, metadata !18, metadata !"s", metadata !5, i32 7, metadata !20, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [s] [line 7]
+!18 = metadata !{i32 786443, metadata !1, metadata !19, i32 6, i32 0, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/missing-abstract-variables.cc]
+!19 = metadata !{i32 786443, metadata !1, metadata !14, i32 6, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/missing-abstract-variables.cc]
+!20 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!21 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!22 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!23 = metadata !{metadata !"clang version 3.5.0 "}
+!24 = metadata !{i1 false}
+!25 = metadata !{i32 786689, metadata !14, metadata !"b", metadata !5, i32 16777221, metadata !11, i32 0, metadata !26} ; [ DW_TAG_arg_variable ] [b] [line 5]
+!26 = metadata !{i32 14, i32 0, metadata !4, null}
+!27 = metadata !{i32 5, i32 0, metadata !14, metadata !26}
+!28 = metadata !{i32 10, i32 0, metadata !14, metadata !26}
+!29 = metadata !{i32 15, i32 0, metadata !4, null}
+!30 = metadata !{i32 17, i32 0, metadata !8, null}
+!31 = metadata !{i32 786689, metadata !14, metadata !"b", metadata !5, i32 16777221, metadata !11, i32 0, metadata !32} ; [ DW_TAG_arg_variable ] [b] [line 5]
+!32 = metadata !{i32 18, i32 0, metadata !8, null}
+!33 = metadata !{i32 5, i32 0, metadata !14, metadata !32}
+!34 = metadata !{i32 6, i32 0, metadata !19, metadata !32}
+!35 = metadata !{i32 7, i32 0, metadata !18, metadata !32}
+!36 = metadata !{metadata !37, metadata !37, i64 0}
+!37 = metadata !{metadata !"int", metadata !38, i64 0}
+!38 = metadata !{metadata !"omnipotent char", metadata !39, i64 0}
+!39 = metadata !{metadata !"Simple C/C++ TBAA"}
+!40 = metadata !{i32 786688, metadata !18, metadata !"s", metadata !5, i32 7, metadata !20, i32 0, metadata !32} ; [ DW_TAG_auto_variable ] [s] [line 7]
+!41 = metadata !{i32 8, i32 0, metadata !18, metadata !32} ; [ DW_TAG_imported_declaration ]
+!42 = metadata !{i32 9, i32 0, metadata !18, metadata !32}
+!43 = metadata !{i32 10, i32 0, metadata !14, metadata !32}
+!44 = metadata !{i32 19, i32 0, metadata !8, null}
diff --git a/test/DebugInfo/namespace.ll b/test/DebugInfo/namespace.ll
index 9d0b25c74bae..a9de62c39062 100644
--- a/test/DebugInfo/namespace.ll
+++ b/test/DebugInfo/namespace.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -O0 -filetype=obj < %s > %t
+; RUN: %llc_dwarf -O0 -filetype=obj < %s > %t
; RUN: llvm-dwarfdump %t | FileCheck %s
; CHECK: debug_info contents
; CHECK: [[NS1:0x[0-9a-f]*]]:{{ *}}DW_TAG_namespace
@@ -16,14 +16,6 @@
; CHECK: [[I:0x[0-9a-f]*]]:{{ *}}DW_TAG_variable
; CHECK-NEXT: DW_AT_name{{.*}}= "i"
; CHECK-NOT: NULL
-; CHECK: DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_MIPS_linkage_name
-; CHECK-NEXT: DW_AT_name{{.*}}= "f1"
-; CHECK: [[FUNC1:0x[0-9a-f]*]]:{{ *}}DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_MIPS_linkage_name
-; CHECK-NEXT: DW_AT_name{{.*}}= "f1"
-; CHECK: NULL
-; CHECK-NOT: NULL
; CHECK: [[FOO:0x[0-9a-f]*]]:{{ *}}DW_TAG_structure_type
; CHECK-NEXT: DW_AT_name{{.*}}= "foo"
; CHECK-NEXT: DW_AT_declaration
@@ -31,7 +23,16 @@
; CHECK: [[BAR:0x[0-9a-f]*]]:{{ *}}DW_TAG_structure_type
; CHECK-NEXT: DW_AT_name{{.*}}= "bar"
; CHECK: NULL
-; CHECK: NULL
+; CHECK: [[FUNC1:.*]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}= "f1"
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}= "f1"
; CHECK: NULL
; CHECK-NOT: NULL
@@ -44,9 +45,18 @@
; CHECK: NULL
; CHECK-NOT: NULL
+; CHECK: DW_TAG_imported_module
+; Same bug as above, this should be F2, not F1
+; CHECK-NEXT: DW_AT_decl_file{{.*}}(0x0[[F1]])
+; CHECK-NEXT: DW_AT_decl_line{{.*}}(0x0b)
+; CHECK-NEXT: DW_AT_import{{.*}}=> {[[NS1]]})
+; CHECK-NOT: NULL
+
; CHECK: DW_TAG_subprogram
-; CHECK-NEXT: DW_AT_MIPS_linkage_name
-; CHECK-NEXT: DW_AT_name{{.*}}= "func"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name{{.*}}= "func"
; CHECK-NOT: NULL
; CHECK: DW_TAG_imported_module
; CHECK-NEXT: DW_AT_decl_file{{.*}}(0x0[[F2]])
@@ -73,13 +83,13 @@
; CHECK-NEXT: DW_AT_decl_line{{.*}}(0x16)
; CHECK-NEXT: DW_AT_import{{.*}}=> {[[I]]})
; CHECK-NOT: NULL
-; CHECK: [[X:0x[0-9a-f]*]]:{{ *}}DW_TAG_imported_module
+; CHECK: [[X:0x[0-9a-f]*]]:{{ *}}DW_TAG_imported_declaration
; CHECK-NEXT: DW_AT_decl_file{{.*}}(0x0[[F2]])
; CHECK-NEXT: DW_AT_decl_line{{.*}}(0x18)
; CHECK-NEXT: DW_AT_import{{.*}}=> {[[NS1]]})
; CHECK-NEXT: DW_AT_name{{.*}}"X"
; CHECK-NOT: NULL
-; CHECK: DW_TAG_imported_module
+; CHECK: DW_TAG_imported_declaration
; CHECK-NEXT: DW_AT_decl_file{{.*}}(0x0[[F2]])
; CHECK-NEXT: DW_AT_decl_line{{.*}}(0x19)
; CHECK-NEXT: DW_AT_import{{.*}}=> {[[X]]})
@@ -93,13 +103,7 @@
; CHECK-NEXT: DW_AT_import{{.*}}=> {[[NS2]]})
; CHECK: NULL
; CHECK: NULL
-; CHECK-NOT: NULL
-
-; CHECK: DW_TAG_imported_module
-; Same bug as above, this should be F2, not F1
-; CHECK-NEXT: DW_AT_decl_file{{.*}}(0x0[[F1]])
-; CHECK-NEXT: DW_AT_decl_line{{.*}}(0x0b)
-; CHECK-NEXT: DW_AT_import{{.*}}=> {[[NS1]]})
+; CHECK: NULL
; CHECK: file_names[ [[F1]]]{{.*}}debug-info-namespace.cpp
; CHECK: file_names[ [[F2]]]{{.*}}foo.cpp
@@ -199,7 +203,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !19, metadata !21, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/blaikie/dev/llvm/build/clang/debug//usr/local/google/home/blaikie/dev/llvm/src/tools/clang/test/CodeGenCXX/debug-info-namespace.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"/usr/local/google/home/blaikie/dev/llvm/src/tools/clang/test/CodeGenCXX/debug-info-namespace.cpp", metadata !"/usr/local/google/home/blaikie/dev/llvm/build/clang/debug"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !10, metadata !14}
!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"f1", metadata !"f1", metadata !"_ZN1A1B2f1Ev", i32 3, metadata !8, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZN1A1B2f1Ev, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [f1]
!5 = metadata !{metadata !"foo.cpp", metadata !"/usr/local/google/home/blaikie/dev/llvm/build/clang/debug"}
@@ -236,8 +240,8 @@ attributes #1 = { nounwind readnone }
!36 = metadata !{i32 786468}
!37 = metadata !{i32 786440, metadata !14, metadata !10, i32 21} ; [ DW_TAG_imported_declaration ]
!38 = metadata !{i32 786440, metadata !14, metadata !20, i32 22} ; [ DW_TAG_imported_declaration ]
-!39 = metadata !{i32 786490, metadata !14, metadata !7, i32 24, metadata !"X"} ; [ DW_TAG_imported_module ]
-!40 = metadata !{i32 786490, metadata !14, metadata !39, i32 25, metadata !"Y"} ; [ DW_TAG_imported_module ]
+!39 = metadata !{i32 786440, metadata !14, metadata !7, i32 24, metadata !"X"} ; [ DW_TAG_imported_declaration ]
+!40 = metadata !{i32 786440, metadata !14, metadata !39, i32 25, metadata !"Y"} ; [ DW_TAG_imported_declaration ]
!41 = metadata !{i32 3, i32 0, metadata !4, null}
!42 = metadata !{i32 786689, metadata !10, metadata !"", metadata !15, i32 16777220, metadata !13, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [line 4]
!43 = metadata !{i32 4, i32 0, metadata !10, null}
diff --git a/test/DebugInfo/namespace_function_definition.ll b/test/DebugInfo/namespace_function_definition.ll
new file mode 100644
index 000000000000..590f2b301ffe
--- /dev/null
+++ b/test/DebugInfo/namespace_function_definition.ll
@@ -0,0 +1,44 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; Generated from clang with the following source:
+; namespace ns {
+; void func() {
+; }
+; }
+
+; CHECK: DW_TAG_namespace
+; CHECK-NEXT: DW_AT_name {{.*}} "ns"
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_low_pc
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_ZN2ns4funcEv"
+; CHECK: NULL
+; CHECK: NULL
+
+; Function Attrs: nounwind uwtable
+define void @_ZN2ns4funcEv() #0 {
+entry:
+ ret void, !dbg !11
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/namespace_function_definition.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"namespace_function_definition.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"func", metadata !"func", metadata !"_ZN2ns4funcEv", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZN2ns4funcEv, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [func]
+!5 = metadata !{i32 786489, metadata !1, null, metadata !"ns", i32 1} ; [ DW_TAG_namespace ] [ns] [line 1]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null}
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5.0 "}
+!11 = metadata !{i32 3, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/namespace_inline_function_definition.ll b/test/DebugInfo/namespace_inline_function_definition.ll
new file mode 100644
index 000000000000..65fa4a442dc6
--- /dev/null
+++ b/test/DebugInfo/namespace_inline_function_definition.ll
@@ -0,0 +1,92 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; Generate from clang with the following source. Note that the definition of
+; the inline function follows its use to workaround another bug that should be
+; fixed soon.
+; namespace ns {
+; int func(int i);
+; }
+; extern int x;
+; int main() { return ns::func(x); }
+; int __attribute__((always_inline)) ns::func(int i) { return i * 2; }
+
+; CHECK: DW_TAG_namespace
+; CHECK-NEXT: DW_AT_name {{.*}} "ns"
+; CHECK-NOT: DW_TAG
+; CHECK: [[ABS_DEF:0x.*]]: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_ZN2ns4funcEi"
+; CHECK-NOT: DW_TAG
+; CHECK: [[ABS_PRM:0x.*]]: DW_TAG_formal_parameter
+; CHECK: NULL
+; CHECK-NOT: NULL
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_abstract_origin {{.*}} {[[ABS_DEF]]}
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_formal_parameter
+; CHECK: DW_AT_abstract_origin {{.*}} {[[ABS_PRM]]}
+; CHECK: NULL
+; CHECK: NULL
+; CHECK: NULL
+
+@x = external global i32
+
+; Function Attrs: uwtable
+define i32 @main() #0 {
+entry:
+ %i.addr.i = alloca i32, align 4
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* @x, align 4, !dbg !16
+ store i32 %0, i32* %i.addr.i, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %i.addr.i}, metadata !17), !dbg !18
+ %1 = load i32* %i.addr.i, align 4, !dbg !18
+ %mul.i = mul nsw i32 %1, 2, !dbg !18
+ ret i32 %mul.i, !dbg !16
+}
+
+; Function Attrs: alwaysinline nounwind uwtable
+define i32 @_ZN2ns4funcEi(i32 %i) #1 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %i.addr}, metadata !17), !dbg !19
+ %0 = load i32* %i.addr, align 4, !dbg !19
+ %mul = mul nsw i32 %0, 2, !dbg !19
+ ret i32 %mul, !dbg !19
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { alwaysinline nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!13, !14}
+!llvm.ident = !{!15}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/namespace_inline_function_definition.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"namespace_inline_function_definition.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !9}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 5, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [main]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/namespace_inline_function_definition.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786478, metadata !1, metadata !10, metadata !"func", metadata !"func", metadata !"_ZN2ns4funcEi", i32 6, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @_ZN2ns4funcEi, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [func]
+!10 = metadata !{i32 786489, metadata !1, null, metadata !"ns", i32 1} ; [ DW_TAG_namespace ] [ns] [line 1]
+!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = metadata !{metadata !8, metadata !8}
+!13 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!14 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!15 = metadata !{metadata !"clang version 3.5.0 "}
+!16 = metadata !{i32 5, i32 0, metadata !4, null}
+!17 = metadata !{i32 786689, metadata !9, metadata !"i", metadata !5, i32 16777222, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [i] [line 6]
+!18 = metadata !{i32 6, i32 0, metadata !9, metadata !16}
+!19 = metadata !{i32 6, i32 0, metadata !9, null}
diff --git a/test/DebugInfo/restrict.ll b/test/DebugInfo/restrict.ll
new file mode 100644
index 000000000000..ceb844f16efe
--- /dev/null
+++ b/test/DebugInfo/restrict.ll
@@ -0,0 +1,53 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -dwarf-version=2 -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck --check-prefix=CHECK --check-prefix=V2 %s
+; RUN: %llc_dwarf -dwarf-version=3 -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck --check-prefix=CHECK --check-prefix=V3 %s
+
+; CHECK: DW_AT_name {{.*}} "dst"
+; V2: DW_AT_type {{.*}} {[[PTR:0x.*]]}
+; V3: DW_AT_type {{.*}} {[[RESTRICT:0x.*]]}
+; V3: [[RESTRICT]]: {{.*}}DW_TAG_restrict_type
+; V3-NEXT: DW_AT_type {{.*}} {[[PTR:0x.*]]}
+; CHECK: [[PTR]]: {{.*}}DW_TAG_pointer_type
+; CHECK-NOT: DW_AT_type
+
+; Generated with clang from:
+; void foo(void* __restrict__ dst) {
+; }
+
+
+; Function Attrs: nounwind uwtable
+define void @_Z3fooPv(i8* noalias %dst) #0 {
+entry:
+ %dst.addr = alloca i8*, align 8
+ store i8* %dst, i8** %dst.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i8** %dst.addr}, metadata !13), !dbg !14
+ ret void, !dbg !15
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/restrict.c] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"restrict.c", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"_Z3fooPv", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i8*)* @_Z3fooPv, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/restrict.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8}
+!8 = metadata !{i32 786487, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !9} ; [ DW_TAG_restrict_type ] [line 0, size 0, align 0, offset 0] [from ]
+!9 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!10 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!12 = metadata !{metadata !"clang version 3.5.0 "}
+!13 = metadata !{i32 786689, metadata !4, metadata !"dst", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [dst] [line 1]
+!14 = metadata !{i32 1, i32 0, metadata !4, null}
+!15 = metadata !{i32 2, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/sugared-constants.ll b/test/DebugInfo/sugared-constants.ll
new file mode 100644
index 000000000000..0d2ebe663c00
--- /dev/null
+++ b/test/DebugInfo/sugared-constants.ll
@@ -0,0 +1,82 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj %s -o - | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+; Use correct signedness when emitting constants of derived (sugared) types.
+
+; Test compiled to IR from clang with -O1 and the following source:
+
+; void func(int);
+; void func(unsigned);
+; void func(char16_t);
+; int main() {
+; const int i = 42;
+; func(i);
+; const unsigned j = 117;
+; func(j);
+; char16_t c = 7;
+; func(c);
+; }
+
+; CHECK: DW_AT_const_value [DW_FORM_sdata] (42)
+; CHECK: DW_AT_const_value [DW_FORM_udata] (117)
+; CHECK: DW_AT_const_value [DW_FORM_udata] (7)
+
+; Function Attrs: uwtable
+define i32 @main() #0 {
+entry:
+ tail call void @llvm.dbg.value(metadata !20, i64 0, metadata !10), !dbg !21
+ tail call void @_Z4funci(i32 42), !dbg !22
+ tail call void @llvm.dbg.value(metadata !23, i64 0, metadata !12), !dbg !24
+ tail call void @_Z4funcj(i32 117), !dbg !25
+ tail call void @llvm.dbg.value(metadata !26, i64 0, metadata !15), !dbg !27
+ tail call void @_Z4funcDs(i16 zeroext 7), !dbg !28
+ ret i32 0, !dbg !29
+}
+
+declare void @_Z4funci(i32) #1
+
+declare void @_Z4funcj(i32) #1
+
+declare void @_Z4funcDs(i16 zeroext) #1
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!17, !18}
+!llvm.ident = !{!19}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/const.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"const.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 4, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* @main, null, null, metadata !9, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [main]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/const.cpp]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{metadata !10, metadata !12, metadata !15}
+!10 = metadata !{i32 786688, metadata !4, metadata !"i", metadata !5, i32 5, metadata !11, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [i] [line 5]
+!11 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !8} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from int]
+!12 = metadata !{i32 786688, metadata !4, metadata !"j", metadata !5, i32 7, metadata !13, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [j] [line 7]
+!13 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !14} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from unsigned int]
+!14 = metadata !{i32 786468, null, null, metadata !"unsigned int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ] [unsigned int] [line 0, size 32, align 32, offset 0, enc DW_ATE_unsigned]
+!15 = metadata !{i32 786688, metadata !4, metadata !"c", metadata !5, i32 9, metadata !16, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [c] [line 9]
+!16 = metadata !{i32 786468, null, null, metadata !"char16_t", i32 0, i64 16, i64 16, i64 0, i32 0, i32 16} ; [ DW_TAG_base_type ] [char16_t] [line 0, size 16, align 16, offset 0, enc DW_ATE_UTF]
+!17 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!18 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!19 = metadata !{metadata !"clang version 3.5.0 "}
+!20 = metadata !{i32 42}
+!21 = metadata !{i32 5, i32 0, metadata !4, null}
+!22 = metadata !{i32 6, i32 0, metadata !4, null}
+!23 = metadata !{i32 117}
+!24 = metadata !{i32 7, i32 0, metadata !4, null}
+!25 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!26 = metadata !{i16 7}
+!27 = metadata !{i32 9, i32 0, metadata !4, null}
+!28 = metadata !{i32 10, i32 0, metadata !4, null}
+!29 = metadata !{i32 11, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/template-recursive-void.ll b/test/DebugInfo/template-recursive-void.ll
index 2ed57a6576f3..ffbc30e7504c 100644
--- a/test/DebugInfo/template-recursive-void.ll
+++ b/test/DebugInfo/template-recursive-void.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -O0 -filetype=obj < %s > %t
+; RUN: %llc_dwarf -O0 -filetype=obj < %s > %t
; RUN: llvm-dwarfdump %t | FileCheck %s
; This was pulled from clang's debug-info-template-recursive.cpp test.
@@ -27,7 +27,7 @@
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (trunk 187958) (llvm/trunk 187964)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/echristo/tmp/debug-info-template-recursive.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"debug-info-template-recursive.cpp", metadata !"/usr/local/google/home/echristo/tmp"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786484, i32 0, null, metadata !"filters", metadata !"filters", metadata !"", metadata !5, i32 10, metadata !6, i32 0, i32 1, %class.bar* @filters, null} ; [ DW_TAG_variable ] [filters] [line 10] [def]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/usr/local/google/home/echristo/tmp/debug-info-template-recursive.cpp]
diff --git a/test/DebugInfo/tu-composite.ll b/test/DebugInfo/tu-composite.ll
index f838eca72a67..7a8ff5712659 100644
--- a/test/DebugInfo/tu-composite.ll
+++ b/test/DebugInfo/tu-composite.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -filetype=obj -O0 < %s > %t
+; RUN: %llc_dwarf -filetype=obj -O0 < %s > %t
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
; CHECK: [[TYPE:.*]]: DW_TAG_structure_type
; Make sure we correctly handle containing type of a struct being a type identifier.
@@ -125,7 +125,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !30, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [tmp.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"tmp.cpp", metadata !"."}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4, metadata !18, metadata !19, metadata !22, metadata !23, metadata !24}
!4 = metadata !{i32 786451, metadata !1, null, metadata !"C", i32 1, i64 64, i64 64, i32 0, i32 0, null, metadata !5, i32 0, metadata !"_ZTS1C", null, metadata !"_ZTS1C"} ; [ DW_TAG_structure_type ] [C] [line 1, size 64, align 64, offset 0] [def] [from ]
!5 = metadata !{metadata !6, metadata !13}
diff --git a/test/DebugInfo/tu-member-pointer.ll b/test/DebugInfo/tu-member-pointer.ll
index b746d3b6d4f1..cd37a9892f42 100644
--- a/test/DebugInfo/tu-member-pointer.ll
+++ b/test/DebugInfo/tu-member-pointer.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -filetype=obj -O0 < %s > %t
+; RUN: %llc_dwarf -filetype=obj -O0 < %s > %t
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
; CHECK: DW_TAG_ptr_to_member_type
; CHECK-NEXT: DW_AT_type [DW_FORM_ref4] (cu + {{.*}} => {[[TYPE:0x[0-9a-f]+]]})
@@ -18,7 +18,7 @@
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !2, metadata !5, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [foo.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"foo.cpp", metadata !"."}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !1, null, metadata !"Foo", i32 1, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, metadata !"_ZTS3Foo"} ; [ DW_TAG_structure_type ] [Foo] [line 1, size 0, align 0, offset 0] [decl] [from ]
!5 = metadata !{metadata !6}
diff --git a/test/DebugInfo/two-cus-from-same-file.ll b/test/DebugInfo/two-cus-from-same-file.ll
index 8589840a6955..2ab82a9a244a 100644
--- a/test/DebugInfo/two-cus-from-same-file.ll
+++ b/test/DebugInfo/two-cus-from-same-file.ll
@@ -5,7 +5,7 @@
; REQUIRES: object-emission
-; RUN: llc %s -o %t -filetype=obj -O0
+; RUN: %llc_dwarf %s -o %t -filetype=obj -O0
; RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck %s
; ModuleID = 'test.bc'
@@ -36,7 +36,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.module.flags = !{!33}
!0 = metadata !{i32 786449, metadata !32, i32 12, metadata !"clang version 3.2 (trunk 156513)", i1 true, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 0}
+!1 = metadata !{}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 786478, metadata !32, metadata !6, metadata !"foo", metadata !"foo", metadata !"", i32 5, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @foo, null, null, metadata !1, i32 5} ; [ DW_TAG_subprogram ]
!6 = metadata !{i32 786473, metadata !32} ; [ DW_TAG_file_type ]
@@ -51,8 +51,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!16 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !17} ; [ DW_TAG_pointer_type ]
!17 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !18} ; [ DW_TAG_pointer_type ]
!18 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
-!19 = metadata !{metadata !20}
-!20 = metadata !{metadata !21, metadata !22}
+!19 = metadata !{metadata !21, metadata !22}
!21 = metadata !{i32 786689, metadata !12, metadata !"argc", metadata !6, i32 16777227, metadata !15, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!22 = metadata !{i32 786689, metadata !12, metadata !"argv", metadata !6, i32 33554443, metadata !16, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!23 = metadata !{i32 6, i32 3, metadata !24, null}
diff --git a/test/DebugInfo/typedef.ll b/test/DebugInfo/typedef.ll
new file mode 100644
index 000000000000..40cecdfc3930
--- /dev/null
+++ b/test/DebugInfo/typedef.ll
@@ -0,0 +1,32 @@
+; REQUIRES: object-emission
+
+; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+
+; From source:
+; typedef void x;
+; x *y;
+
+; Check that a typedef with no DW_AT_type is produced. The absence of a type is used to imply the 'void' type.
+
+; CHECK: DW_TAG_typedef
+; CHECK-NOT: DW_AT_type
+; CHECK: {{DW_TAG|NULL}}
+
+@y = global i8* null, align 8
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !2, metadata !3, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/typedef.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"typedef.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786484, i32 0, null, metadata !"y", metadata !"y", metadata !"", metadata !5, i32 2, metadata !6, i32 0, i32 1, i8** @y, null} ; [ DW_TAG_variable ] [y] [line 2] [def]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/typedef.cpp]
+!6 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !7} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from x]
+!7 = metadata !{i32 786454, metadata !1, null, metadata !"x", i32 1, i64 0, i64 0, i64 0, i32 0, null} ; [ DW_TAG_typedef ] [x] [line 1, size 0, align 0, offset 0] [from ]
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5.0 "}
+
diff --git a/test/DebugInfo/unconditional-branch.ll b/test/DebugInfo/unconditional-branch.ll
new file mode 100644
index 000000000000..6c31375f4644
--- /dev/null
+++ b/test/DebugInfo/unconditional-branch.ll
@@ -0,0 +1,64 @@
+; REQUIRES: object-emission
+; PR 19261
+
+; RUN: %llc_dwarf -fast-isel=false -O0 -filetype=obj %s -o %t
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; CHECK: {{0x[0-9a-f]+}} 1 0 1 0 0 is_stmt
+; CHECK: {{0x[0-9a-f]+}} 2 0 1 0 0 is_stmt
+; CHECK: {{0x[0-9a-f]+}} 4 0 1 0 0 is_stmt
+
+; IR generated from clang -O0 -g with the following source:
+;void foo(int i){
+; switch(i){
+; default:
+; break;
+; }
+; return;
+;}
+
+; Function Attrs: nounwind
+define void @foo(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %i.addr}, metadata !12), !dbg !13
+ %0 = load i32* %i.addr, align 4, !dbg !14
+ switch i32 %0, label %sw.default [
+ ], !dbg !14
+
+sw.default: ; preds = %entry
+ br label %sw.epilog, !dbg !15
+
+sw.epilog: ; preds = %sw.default
+ ret void, !dbg !17
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 (204712)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [D:\work\EPRs\396363/test.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"test.c", metadata !"D:\5Cwork\5CEPRs\5C396363"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [D:\work\EPRs\396363/test.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5.0 (204712)"}
+!12 = metadata !{i32 786689, metadata !4, metadata !"i", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [i] [line 1]
+!13 = metadata !{i32 1, i32 0, metadata !4, null}
+!14 = metadata !{i32 2, i32 0, metadata !4, null}
+!15 = metadata !{i32 4, i32 0, metadata !16, null}
+!16 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [D:\work\EPRs\396363/test.c]
+!17 = metadata !{i32 6, i32 0, metadata !4, null}
diff --git a/test/DebugInfo/varargs.ll b/test/DebugInfo/varargs.ll
new file mode 100644
index 000000000000..ddfcd858f539
--- /dev/null
+++ b/test/DebugInfo/varargs.ll
@@ -0,0 +1,99 @@
+; RUN: %llc_dwarf -O0 -filetype=obj -o %t.o %s
+; RUN: llvm-dwarfdump -debug-dump=info %t.o | FileCheck %s
+; REQUIRES: object-emission
+;
+; Test debug info for variadic function arguments.
+; Created from tools/clang/tests/CodeGenCXX/debug-info-varargs.cpp
+;
+; The ... parameter of variadic should be emitted as
+; DW_TAG_unspecified_parameters.
+;
+; Normal variadic function.
+; void b(int c, ...);
+;
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "a"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_unspecified_parameters
+;
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "b"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_unspecified_parameters
+;
+; Variadic C++ member function.
+; struct A { void a(int c, ...); }
+;
+; Variadic function pointer.
+; void (*fptr)(int, ...);
+;
+; CHECK: DW_TAG_subroutine_type
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_formal_parameter
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_unspecified_parameters
+;
+; ModuleID = 'llvm/tools/clang/test/CodeGenCXX/debug-info-varargs.cpp'
+
+%struct.A = type { i8 }
+
+; Function Attrs: nounwind ssp uwtable
+define void @_Z1biz(i32 %c, ...) #0 {
+ %1 = alloca i32, align 4
+ %a = alloca %struct.A, align 1
+ %fptr = alloca void (i32, ...)*, align 8
+ store i32 %c, i32* %1, align 4
+ call void @llvm.dbg.declare(metadata !{i32* %1}, metadata !21), !dbg !22
+ call void @llvm.dbg.declare(metadata !{%struct.A* %a}, metadata !23), !dbg !24
+ call void @llvm.dbg.declare(metadata !{void (i32, ...)** %fptr}, metadata !25), !dbg !27
+ store void (i32, ...)* @_Z1biz, void (i32, ...)** %fptr, align 8, !dbg !27
+ ret void, !dbg !28
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind ssp uwtable }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!18, !19}
+!llvm.ident = !{!20}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !13, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [llvm/tools/clang/test/CodeGenCXX/debug-info-varargs.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"llvm/tools/clang/test/CodeGenCXX/debug-info-varargs.cpp", metadata !"radar/13690847"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"A", i32 3, i64 8, i64 8, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS1A"} ; [ DW_TAG_structure_type ] [A] [line 3, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !6}
+!6 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"a", metadata !"a", metadata !"_ZN1A1aEiz", i32 6, metadata !7, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !12, i32 6} ; [ DW_TAG_subprogram ] [line 6] [a]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{null, metadata !9, metadata !10, metadata !11}
+!9 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!10 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!11 = metadata !{i32 786456}
+!12 = metadata !{i32 786468}
+!13 = metadata !{metadata !14}
+!14 = metadata !{i32 786478, metadata !1, metadata !15, metadata !"b", metadata !"b", metadata !"_Z1biz", i32 13, metadata !16, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32, ...)* @_Z1biz, null, null, metadata !2, i32 13} ; [ DW_TAG_subprogram ] [line 13] [def] [b]
+!15 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [llvm/tools/clang/test/CodeGenCXX/debug-info-varargs.cpp]
+!16 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !17, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!17 = metadata !{null, metadata !10, metadata !11}
+!18 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!19 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!20 = metadata !{metadata !"clang version 3.5 "}
+!21 = metadata !{i32 786689, metadata !14, metadata !"c", metadata !15, i32 16777229, metadata !10, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [c] [line 13]
+!22 = metadata !{i32 13, i32 0, metadata !14, null}
+!23 = metadata !{i32 786688, metadata !14, metadata !"a", metadata !15, i32 16, metadata !4, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [a] [line 16]
+!24 = metadata !{i32 16, i32 0, metadata !14, null}
+!25 = metadata !{i32 786688, metadata !14, metadata !"fptr", metadata !15, i32 18, metadata !26, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [fptr] [line 18]
+!26 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!27 = metadata !{i32 18, i32 0, metadata !14, null}
+!28 = metadata !{i32 22, i32 0, metadata !14, null}
diff --git a/test/DebugInfo/version.ll b/test/DebugInfo/version.ll
index f4dde0a24988..9a201ebf3091 100644
--- a/test/DebugInfo/version.ll
+++ b/test/DebugInfo/version.ll
@@ -1,6 +1,6 @@
; REQUIRES: object-emission
-; RUN: llc -O0 -filetype=obj < %s > %t
+; RUN: %llc_dwarf -O0 -filetype=obj < %s > %t
; RUN: llvm-dwarfdump %t | FileCheck %s
; Make sure we are generating DWARF version 3 when module flag says so.
@@ -20,7 +20,7 @@ attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 185475)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ]
!1 = metadata !{metadata !"CodeGen/dwarf-version.c", metadata !"test"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 6, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [main]
!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ]
diff --git a/test/ExecutionEngine/MCJIT/eh-lg-pic.ll b/test/ExecutionEngine/MCJIT/eh-lg-pic.ll
index 7c0227d74ed6..539c890f9186 100644
--- a/test/ExecutionEngine/MCJIT/eh-lg-pic.ll
+++ b/test/ExecutionEngine/MCJIT/eh-lg-pic.ll
@@ -1,5 +1,5 @@
; RUN: %lli_mcjit -relocation-model=pic -code-model=large %s
-; XFAIL: cygwin, win32, mingw, mips, powerpc64, i686, i386, aarch64, arm
+; XFAIL: cygwin, win32, mingw, mips, i686, i386, aarch64, arm
declare i8* @__cxa_allocate_exception(i64)
declare void @__cxa_throw(i8*, i8*, i8*)
declare i32 @__gxx_personality_v0(...)
diff --git a/test/ExecutionEngine/MCJIT/lit.local.cfg b/test/ExecutionEngine/MCJIT/lit.local.cfg
index fdb36ee1d71c..f98140357736 100644
--- a/test/ExecutionEngine/MCJIT/lit.local.cfg
+++ b/test/ExecutionEngine/MCJIT/lit.local.cfg
@@ -1,5 +1,5 @@
root = config.root
-targets = set(root.targets_to_build.split())
+targets = root.targets
if ('X86' in targets) | ('AArch64' in targets) | ('ARM' in targets) | \
('Mips' in targets) | ('PowerPC' in targets) | ('SystemZ' in targets):
config.unsupported = False
@@ -8,7 +8,7 @@ else:
# FIXME: autoconf and cmake produce different arch names. We should normalize
# them before getting here.
-if root.host_arch not in ['i386', 'x86', 'x86_64',
+if root.host_arch not in ['i386', 'x86', 'x86_64', 'AMD64',
'AArch64', 'ARM', 'Mips', 'PowerPC', 'ppc64', 'SystemZ']:
config.unsupported = True
diff --git a/test/ExecutionEngine/MCJIT/load-object-a.ll b/test/ExecutionEngine/MCJIT/load-object-a.ll
new file mode 100644
index 000000000000..9d27e41c6a50
--- /dev/null
+++ b/test/ExecutionEngine/MCJIT/load-object-a.ll
@@ -0,0 +1,24 @@
+; This first line will generate the .o files for the next run line
+; RUN: rm -rf %t.cachedir %t.cachedir2 %t.cachedir3
+; RUN: mkdir -p %t.cachedir %t.cachedir2 %t.cachedir3
+; RUN: %lli_mcjit -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -enable-cache-manager -object-cache-dir=%t.cachedir %s
+
+; Collect generated objects.
+; RUN: find %t.cachedir -type f -name 'multi-module-?.o' -exec mv -v '{}' %t.cachedir2 ';'
+
+; This line tests MCJIT object loading
+; RUN: %lli_mcjit -extra-object=%t.cachedir2/multi-module-b.o -extra-object=%t.cachedir2/multi-module-c.o %s
+
+; These lines put the object files into an archive
+; RUN: llvm-ar r %t.cachedir3/load-object.a %t.cachedir2/multi-module-b.o
+; RUN: llvm-ar r %t.cachedir3/load-object.a %t.cachedir2/multi-module-c.o
+
+; This line test MCJIT archive loading
+; RUN: %lli_mcjit -extra-archive=%t.cachedir3/load-object.a %s
+
+declare i32 @FB()
+
+define i32 @main() {
+ %r = call i32 @FB( ) ; <i32> [#uses=1]
+ ret i32 %r
+}
diff --git a/test/ExecutionEngine/MCJIT/non-extern-addend-smallcodemodel.ll b/test/ExecutionEngine/MCJIT/non-extern-addend-smallcodemodel.ll
new file mode 100644
index 000000000000..21db67dad669
--- /dev/null
+++ b/test/ExecutionEngine/MCJIT/non-extern-addend-smallcodemodel.ll
@@ -0,0 +1,25 @@
+; RUN: %lli_mcjit -code-model=small %s > /dev/null
+; XFAIL: mips
+;
+; FIXME: Merge this file with non-extern-addend.ll once AArch64 supports PC-rel
+; relocations in ELF. (The code is identical, only the run line differs).
+;
+define i32 @foo(i32 %x, i32 %y, double %d) {
+entry:
+ %d.int64 = bitcast double %d to i64
+ %d.top64 = lshr i64 %d.int64, 32
+ %d.top = trunc i64 %d.top64 to i32
+ %d.bottom = trunc i64 %d.int64 to i32
+ %topCorrect = icmp eq i32 %d.top, 3735928559
+ %bottomCorrect = icmp eq i32 %d.bottom, 4277009102
+ %right = and i1 %topCorrect, %bottomCorrect
+ %nRight = xor i1 %right, true
+ %retVal = zext i1 %nRight to i32
+ ret i32 %retVal
+}
+
+define i32 @main() {
+entry:
+ %call = call i32 @foo(i32 0, i32 1, double 0xDEADBEEFFEEDFACE)
+ ret i32 %call
+}
diff --git a/test/ExecutionEngine/MCJIT/non-extern-addend.ll b/test/ExecutionEngine/MCJIT/non-extern-addend.ll
index 3a6e63441200..e0827f6add93 100644
--- a/test/ExecutionEngine/MCJIT/non-extern-addend.ll
+++ b/test/ExecutionEngine/MCJIT/non-extern-addend.ll
@@ -1,12 +1,21 @@
; RUN: %lli_mcjit %s > /dev/null
-define i32 @foo(i32 %X, i32 %Y, double %A) {
- %cond212 = fcmp ueq double %A, 2.000000e+00 ; <i1> [#uses=1]
- %cast110 = zext i1 %cond212 to i32 ; <i32> [#uses=1]
- ret i32 %cast110
+define i32 @foo(i32 %x, i32 %y, double %d) {
+entry:
+ %d.int64 = bitcast double %d to i64
+ %d.top64 = lshr i64 %d.int64, 32
+ %d.top = trunc i64 %d.top64 to i32
+ %d.bottom = trunc i64 %d.int64 to i32
+ %topCorrect = icmp eq i32 %d.top, 3735928559
+ %bottomCorrect = icmp eq i32 %d.bottom, 4277009102
+ %right = and i1 %topCorrect, %bottomCorrect
+ %nRight = xor i1 %right, true
+ %retVal = zext i1 %nRight to i32
+ ret i32 %retVal
}
define i32 @main() {
- %reg212 = call i32 @foo( i32 0, i32 1, double 1.000000e+00 ) ; <i32> [#uses=1]
- ret i32 %reg212
+entry:
+ %call = call i32 @foo(i32 0, i32 1, double 0xDEADBEEFFEEDFACE)
+ ret i32 %call
}
diff --git a/test/ExecutionEngine/MCJIT/remote/Inputs/cross-module-b.ll b/test/ExecutionEngine/MCJIT/remote/Inputs/cross-module-b.ll
index 687011741103..bc13b1de8ef6 100644
--- a/test/ExecutionEngine/MCJIT/remote/Inputs/cross-module-b.ll
+++ b/test/ExecutionEngine/MCJIT/remote/Inputs/cross-module-b.ll
@@ -1,6 +1,6 @@
declare i32 @FA()
-define i32 @FB() {
+define i32 @FB() nounwind {
%r = call i32 @FA( ) ; <i32> [#uses=1]
ret i32 %r
}
diff --git a/test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-b.ll b/test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-b.ll
index 103b601e7f08..0b8d5eb37adb 100644
--- a/test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-b.ll
+++ b/test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-b.ll
@@ -1,6 +1,6 @@
declare i32 @FC()
-define i32 @FB() {
+define i32 @FB() nounwind {
%r = call i32 @FC( ) ; <i32> [#uses=1]
ret i32 %r
}
diff --git a/test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-c.ll b/test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-c.ll
index b39306be9e3b..98350a8c106e 100644
--- a/test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-c.ll
+++ b/test/ExecutionEngine/MCJIT/remote/Inputs/multi-module-c.ll
@@ -1,4 +1,4 @@
-define i32 @FC() {
+define i32 @FC() nounwind {
ret i32 0
}
diff --git a/test/ExecutionEngine/MCJIT/remote/cross-module-a.ll b/test/ExecutionEngine/MCJIT/remote/cross-module-a.ll
index 094d362262c5..b540bfa3bd6e 100644
--- a/test/ExecutionEngine/MCJIT/remote/cross-module-a.ll
+++ b/test/ExecutionEngine/MCJIT/remote/cross-module-a.ll
@@ -1,13 +1,12 @@
-; RUN: %lli_mcjit -extra-module=%p/Inputs/cross-module-b.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target %s > /dev/null
+; RUN: %lli_mcjit -extra-module=%p/Inputs/cross-module-b.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target%exeext %s > /dev/null
declare i32 @FB()
-define i32 @FA() {
+define i32 @FA() nounwind {
ret i32 0
}
-define i32 @main() {
+define i32 @main() nounwind {
%r = call i32 @FB( ) ; <i32> [#uses=1]
ret i32 %r
}
-
diff --git a/test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll b/test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll
index bdaa9a045c8f..589ba2f6d382 100644
--- a/test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/MCJIT/remote/cross-module-sm-pic-a.ll
@@ -1,4 +1,4 @@
-; RUN: %lli_mcjit -extra-module=%p/Inputs/cross-module-b.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target -relocation-model=pic -code-model=small %s > /dev/null
+; RUN: %lli_mcjit -extra-module=%p/Inputs/cross-module-b.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target%exeext -relocation-model=pic -code-model=small %s > /dev/null
; XFAIL: mips, i686, i386, arm
declare i32 @FB()
diff --git a/test/ExecutionEngine/MCJIT/remote/lit.local.cfg b/test/ExecutionEngine/MCJIT/remote/lit.local.cfg
index 6b192ae44be7..625d82dc6189 100644
--- a/test/ExecutionEngine/MCJIT/remote/lit.local.cfg
+++ b/test/ExecutionEngine/MCJIT/remote/lit.local.cfg
@@ -1,3 +1,8 @@
if 'armv4' in config.root.target_triple or \
- 'armv5' in config.root.target_triple:
+ 'armv5' in config.root.target_triple:
config.unsupported = True
+
+# This is temporary, until Remote MCJIT works on ARM
+# See http://llvm.org/bugs/show_bug.cgi?id=18057
+#if 'armv7' in config.root.target_triple:
+# config.unsupported = True
diff --git a/test/ExecutionEngine/MCJIT/remote/multi-module-a.ll b/test/ExecutionEngine/MCJIT/remote/multi-module-a.ll
index 91d0387376ca..fbbb8bdf4bc2 100644
--- a/test/ExecutionEngine/MCJIT/remote/multi-module-a.ll
+++ b/test/ExecutionEngine/MCJIT/remote/multi-module-a.ll
@@ -1,8 +1,8 @@
-; RUN: %lli_mcjit -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target %s > /dev/null
+; RUN: %lli_mcjit -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target%exeext %s > /dev/null
declare i32 @FB()
-define i32 @main() {
+define i32 @main() nounwind {
%r = call i32 @FB( ) ; <i32> [#uses=1]
ret i32 %r
}
diff --git a/test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll b/test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll
index 73228e458f04..9c2316959910 100644
--- a/test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll
+++ b/test/ExecutionEngine/MCJIT/remote/multi-module-sm-pic-a.ll
@@ -1,4 +1,4 @@
-; RUN: %lli_mcjit -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target -relocation-model=pic -code-model=small %s > /dev/null
+; RUN: %lli_mcjit -extra-module=%p/Inputs/multi-module-b.ll -extra-module=%p/Inputs/multi-module-c.ll -disable-lazy-compilation=true -remote-mcjit -mcjit-remote-process=lli-child-target%exeext -relocation-model=pic -code-model=small %s > /dev/null
; XFAIL: mips, i686, i386, arm
declare i32 @FB()
diff --git a/test/ExecutionEngine/MCJIT/remote/simpletest-remote.ll b/test/ExecutionEngine/MCJIT/remote/simpletest-remote.ll
index d10a4117a0dd..6c8ab3d5369b 100644
--- a/test/ExecutionEngine/MCJIT/remote/simpletest-remote.ll
+++ b/test/ExecutionEngine/MCJIT/remote/simpletest-remote.ll
@@ -1,10 +1,10 @@
-; RUN: %lli_mcjit -remote-mcjit -mcjit-remote-process=lli-child-target %s > /dev/null
+; RUN: %lli_mcjit -remote-mcjit -mcjit-remote-process=lli-child-target%exeext %s > /dev/null
-define i32 @bar() {
+define i32 @bar() nounwind {
ret i32 0
}
-define i32 @main() {
+define i32 @main() nounwind {
%r = call i32 @bar( ) ; <i32> [#uses=1]
ret i32 %r
}
diff --git a/test/ExecutionEngine/MCJIT/remote/stubs-remote.ll b/test/ExecutionEngine/MCJIT/remote/stubs-remote.ll
index 97932bc389ac..48b939bbe35b 100644
--- a/test/ExecutionEngine/MCJIT/remote/stubs-remote.ll
+++ b/test/ExecutionEngine/MCJIT/remote/stubs-remote.ll
@@ -1,4 +1,4 @@
-; RUN: %lli_mcjit -remote-mcjit -disable-lazy-compilation=false -mcjit-remote-process=lli-child-target %s
+; RUN: %lli_mcjit -remote-mcjit -disable-lazy-compilation=false -mcjit-remote-process=lli-child-target%exeext %s
; XFAIL: *
; This test should fail until remote symbol resolution is supported.
diff --git a/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll b/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll
index 63280895a9a3..e07178ed283e 100644
--- a/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll
+++ b/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll
@@ -1,4 +1,4 @@
-; RUN: %lli_mcjit -remote-mcjit -O0 -disable-lazy-compilation=false -mcjit-remote-process=lli-child-target %s
+; RUN: %lli_mcjit -remote-mcjit -O0 -disable-lazy-compilation=false -mcjit-remote-process=lli-child-target%exeext %s
; The intention of this test is to verify that symbols mapped to COMMON in ELF
; work as expected.
diff --git a/test/ExecutionEngine/MCJIT/remote/test-data-align-remote.ll b/test/ExecutionEngine/MCJIT/remote/test-data-align-remote.ll
index 6b2b97bc2d7e..129350b63eb0 100644
--- a/test/ExecutionEngine/MCJIT/remote/test-data-align-remote.ll
+++ b/test/ExecutionEngine/MCJIT/remote/test-data-align-remote.ll
@@ -1,9 +1,9 @@
-; RUN: %lli_mcjit -remote-mcjit -O0 -mcjit-remote-process=lli-child-target %s
+; RUN: %lli_mcjit -remote-mcjit -O0 -mcjit-remote-process=lli-child-target%exeext %s
; Check that a variable is always aligned as specified.
@var = global i32 0, align 32
-define i32 @main() {
+define i32 @main() nounwind {
%addr = ptrtoint i32* @var to i64
%mask = and i64 %addr, 31
%tst = icmp eq i64 %mask, 0
diff --git a/test/ExecutionEngine/MCJIT/remote/test-fp-no-external-funcs-remote.ll b/test/ExecutionEngine/MCJIT/remote/test-fp-no-external-funcs-remote.ll
index a8a93a8dc3bf..8eec0f229885 100644
--- a/test/ExecutionEngine/MCJIT/remote/test-fp-no-external-funcs-remote.ll
+++ b/test/ExecutionEngine/MCJIT/remote/test-fp-no-external-funcs-remote.ll
@@ -1,6 +1,6 @@
-; RUN: %lli_mcjit -remote-mcjit -mcjit-remote-process=lli-child-target %s > /dev/null
+; RUN: %lli_mcjit -remote-mcjit -mcjit-remote-process=lli-child-target%exeext %s > /dev/null
-define double @test(double* %DP, double %Arg) {
+define double @test(double* %DP, double %Arg) nounwind {
%D = load double* %DP ; <double> [#uses=1]
%V = fadd double %D, 1.000000e+00 ; <double> [#uses=2]
%W = fsub double %V, %V ; <double> [#uses=3]
@@ -12,7 +12,7 @@ define double @test(double* %DP, double %Arg) {
ret double %Y
}
-define i32 @main() {
+define i32 @main() nounwind {
%X = alloca double ; <double*> [#uses=2]
store double 0.000000e+00, double* %X
call double @test( double* %X, double 2.000000e+00 ) ; <double>:1 [#uses=0]
diff --git a/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-remote.ll b/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-remote.ll
index 4181fb08842c..9fbaeb794496 100644
--- a/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-remote.ll
+++ b/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-remote.ll
@@ -1,8 +1,8 @@
-; RUN: %lli_mcjit -remote-mcjit -mcjit-remote-process=lli-child-target %s > /dev/null
+; RUN: %lli_mcjit -remote-mcjit -mcjit-remote-process=lli-child-target%exeext %s > /dev/null
@count = global i32 1, align 4
-define i32 @main() nounwind uwtable {
+define i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
%i = alloca i32, align 4
diff --git a/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-remote.ll b/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-remote.ll
index 8b562972b5d1..d62631ff50c6 100644
--- a/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-remote.ll
+++ b/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-remote.ll
@@ -1,4 +1,4 @@
-; RUN: %lli_mcjit -remote-mcjit -O0 -mcjit-remote-process=lli-child-target %s
+; RUN: %lli_mcjit -remote-mcjit -O0 -mcjit-remote-process=lli-child-target%exeext %s
@.str = private unnamed_addr constant [6 x i8] c"data1\00", align 1
@ptr = global i8* getelementptr inbounds ([6 x i8]* @.str, i32 0, i32 0), align 4
diff --git a/test/ExecutionEngine/RuntimeDyld/ARM/MachO_ARM_PIC_relocations.s b/test/ExecutionEngine/RuntimeDyld/ARM/MachO_ARM_PIC_relocations.s
new file mode 100644
index 000000000000..86041835591f
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/ARM/MachO_ARM_PIC_relocations.s
@@ -0,0 +1,27 @@
+# RUN: llvm-mc -triple=armv7s-apple-ios7.0.0 -relocation-model=pic -filetype=obj -o %t.o %s
+# RUN: llvm-rtdyld -triple=armv7s-apple-ios7.0.0 -verify -check=%s %t.o
+# RUN: rm %t.o
+
+ .syntax unified
+ .section __TEXT,__text,regular,pure_instructions
+ .globl bar
+ .align 2
+bar:
+# Check lower 16-bits of section difference relocation
+# rtdyld-check: decode_operand(insn1, 1) = (foo-(nextPC+8))[15:0]
+insn1:
+ movw r0, :lower16:(foo-(nextPC+8))
+# Check upper 16-bits of section difference relocation
+# rtdyld-check: decode_operand(insn2, 2) = (foo-(nextPC+8))[31:16]
+insn2:
+ movt r0, :upper16:(foo-(nextPC+8))
+nextPC:
+ add r0, pc, r0
+ bx lr
+
+ .globl foo
+ .align 2
+foo:
+ bx lr
+
+.subsections_via_symbols
diff --git a/test/ExecutionEngine/RuntimeDyld/ARM/lit.local.cfg b/test/ExecutionEngine/RuntimeDyld/ARM/lit.local.cfg
new file mode 100644
index 000000000000..98c6700c209d
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/ARM/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'ARM' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/ExecutionEngine/RuntimeDyld/Inputs/arm_secdiff_reloc.o b/test/ExecutionEngine/RuntimeDyld/Inputs/arm_secdiff_reloc.o
deleted file mode 100644
index 5392266cf560..000000000000
--- a/test/ExecutionEngine/RuntimeDyld/Inputs/arm_secdiff_reloc.o
+++ /dev/null
Binary files differ
diff --git a/test/ExecutionEngine/RuntimeDyld/X86/MachO_x86-64_PIC_relocations.s b/test/ExecutionEngine/RuntimeDyld/X86/MachO_x86-64_PIC_relocations.s
new file mode 100644
index 000000000000..e87b449bb237
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/X86/MachO_x86-64_PIC_relocations.s
@@ -0,0 +1,32 @@
+# RUN: llvm-mc -triple=x86_64-apple-macosx10.9 -relocation-model=pic -filetype=obj -o %t.o %s
+# RUN: llvm-rtdyld -triple=x86_64-apple-macosx10.9 -verify -check=%s %t.o
+# RUN: rm %t.o
+
+ .section __TEXT,__text,regular,pure_instructions
+ .globl foo
+ .align 4, 0x90
+foo:
+ retq
+
+ .globl main
+ .align 4, 0x90
+main:
+# Test PC-rel branch.
+# rtdyld-check: decode_operand(insn1, 0) = foo - next_pc(insn1)
+insn1:
+ callq foo
+
+# Test PC-rel signed.
+# rtdyld-check: decode_operand(insn2, 4) = x - next_pc(insn2)
+insn2:
+ movl x(%rip), %eax
+ movl $0, %eax
+ retq
+
+ .section __DATA,__data
+ .globl x
+ .align 2
+x:
+ .long 5
+
+.subsections_via_symbols
diff --git a/test/ExecutionEngine/RuntimeDyld/X86/lit.local.cfg b/test/ExecutionEngine/RuntimeDyld/X86/lit.local.cfg
new file mode 100644
index 000000000000..e71f3cc4c41e
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/X86/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'X86' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/ExecutionEngine/RuntimeDyld/arm_secdiff_reloc.test b/test/ExecutionEngine/RuntimeDyld/arm_secdiff_reloc.test
deleted file mode 100644
index 92e4dd793ccb..000000000000
--- a/test/ExecutionEngine/RuntimeDyld/arm_secdiff_reloc.test
+++ /dev/null
@@ -1 +0,0 @@
-RUN: llvm-rtdyld -printline %p/Inputs/arm_secdiff_reloc.o
diff --git a/test/ExecutionEngine/lit.local.cfg b/test/ExecutionEngine/lit.local.cfg
index 28c56ad9c5c2..f6673df3c358 100644
--- a/test/ExecutionEngine/lit.local.cfg
+++ b/test/ExecutionEngine/lit.local.cfg
@@ -1,6 +1,14 @@
if config.root.host_arch in ['PowerPC', 'AArch64', 'SystemZ']:
config.unsupported = True
+# CMake and autoconf diverge in naming or host_arch
+if 'powerpc64' in config.root.target_triple:
+ config.unsupported = True
+
+if 'aarch64' in config.root.target_triple \
+ or 'arm64' in config.root.target_triple:
+ config.unsupported = True
+
if 'hexagon' in config.root.target_triple:
config.unsupported = True
diff --git a/test/Feature/alias2.ll b/test/Feature/alias2.ll
new file mode 100644
index 000000000000..73c874f2b9ee
--- /dev/null
+++ b/test/Feature/alias2.ll
@@ -0,0 +1,28 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+@v1 = global i32 0
+; CHECK: @v1 = global i32 0
+
+@v2 = global [1 x i32] zeroinitializer
+; CHECK: @v2 = global [1 x i32] zeroinitializer
+
+@v3 = global [2 x i16] zeroinitializer
+; CHECK: @v3 = global [2 x i16] zeroinitializer
+
+@a1 = alias bitcast (i32* @v1 to i16*)
+; CHECK: @a1 = alias bitcast (i32* @v1 to i16*)
+
+@a2 = alias bitcast([1 x i32]* @v2 to i32*)
+; CHECK: @a2 = alias getelementptr inbounds ([1 x i32]* @v2, i32 0, i32 0)
+
+@a3 = alias addrspacecast (i32* @v1 to i32 addrspace(2)*)
+; CHECK: @a3 = alias addrspacecast (i32* @v1 to i32 addrspace(2)*)
+
+@a4 = alias bitcast (i32* @v1 to i16*)
+; CHECK: @a4 = alias bitcast (i32* @v1 to i16*)
+
+@a5 = thread_local(localdynamic) alias i32* @v1
+; CHECK: @a5 = thread_local(localdynamic) alias i32* @v1
+
+@a6 = alias getelementptr ([2 x i16]* @v3, i32 1, i32 1)
+; CHECK: @a6 = alias getelementptr ([2 x i16]* @v3, i32 1, i32 1)
diff --git a/test/Feature/aliases.ll b/test/Feature/aliases.ll
index 139381215ee8..ad1d1b08901c 100644
--- a/test/Feature/aliases.ll
+++ b/test/Feature/aliases.ll
@@ -4,15 +4,24 @@
@llvm.used = appending global [1 x i8*] [i8* bitcast (i32* @foo1 to i8*)], section "llvm.metadata"
-@bar = external global i32
+@bar = global i32 0
@foo1 = alias i32* @bar
@foo2 = alias i32* @bar
@foo3 = alias i32* @foo2
+@foo4 = unnamed_addr alias i32* @foo2
+
+; Make sure the verifier does not complain about references to a global
+; declaration from an initializer.
+@decl = external global i32
+@ptr = global i32* @decl
+@ptr_a = alias i32** @ptr
%FunTy = type i32()
-declare i32 @foo_f()
-@bar_f = alias weak %FunTy* @foo_f
+define i32 @foo_f() {
+ ret i32 0
+}
+@bar_f = alias weak_odr %FunTy* @foo_f
@bar_ff = alias i32()* @bar_f
@bar_i = alias internal i32* @bar
diff --git a/test/Feature/comdat.ll b/test/Feature/comdat.ll
new file mode 100644
index 000000000000..1e878bb71cd1
--- /dev/null
+++ b/test/Feature/comdat.ll
@@ -0,0 +1,21 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+$f = comdat any
+; CHECK: $f = comdat any
+
+$f2 = comdat any
+; CHECK-NOT: f2
+
+@v = global i32 0, comdat $f
+; CHECK: @v = global i32 0, comdat $f
+
+@a = alias i32* @v
+; CHECK: @a = alias i32* @v{{$}}
+
+define void @f() comdat $f {
+ ret void
+}
+; CHECK: define void @f() comdat $f
+
+$i = comdat largest
+@i = internal global i32 0, comdat $i
diff --git a/test/Feature/globalvars.ll b/test/Feature/globalvars.ll
index dad1cf31d5e6..84b4bdfd3750 100644
--- a/test/Feature/globalvars.ll
+++ b/test/Feature/globalvars.ll
@@ -16,3 +16,5 @@ define i32 @foo(i32 %blah) {
ret i32 %blah
}
+hidden dllexport global i32 42
+dllexport global i32 42
diff --git a/test/Feature/instructions.ll b/test/Feature/instructions.ll
index d0c303d71914..aa962948a168 100644
--- a/test/Feature/instructions.ll
+++ b/test/Feature/instructions.ll
@@ -4,11 +4,13 @@
define i32 @test_extractelement(<4 x i32> %V) {
%R = extractelement <4 x i32> %V, i32 1 ; <i32> [#uses=1]
+ %S = extractelement <4 x i32> %V, i64 1 ; <i32> [#uses=0]
ret i32 %R
}
define <4 x i32> @test_insertelement(<4 x i32> %V) {
%R = insertelement <4 x i32> %V, i32 0, i32 0 ; <<4 x i32>> [#uses=1]
+ %S = insertelement <4 x i32> %V, i32 0, i64 0 ; <<4 x i32>> [#uses=0]
ret <4 x i32> %R
}
diff --git a/test/Feature/intrinsic-noduplicate.ll b/test/Feature/intrinsic-noduplicate.ll
new file mode 100644
index 000000000000..9a2b0aba5bd4
--- /dev/null
+++ b/test/Feature/intrinsic-noduplicate.ll
@@ -0,0 +1,9 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+; Make sure LLVM knows about the noduplicate attribute on the
+; llvm.cuda.syncthreads intrinsic.
+
+declare void @llvm.cuda.syncthreads()
+
+; CHECK: declare void @llvm.cuda.syncthreads() #[[ATTRNUM:[0-9]+]]
+; CHECK: attributes #[[ATTRNUM]] = { noduplicate nounwind }
diff --git a/test/Feature/intrinsics.ll b/test/Feature/intrinsics.ll
index 28be053714d1..278cb9564e62 100644
--- a/test/Feature/intrinsics.ll
+++ b/test/Feature/intrinsics.ll
@@ -61,7 +61,7 @@ define void @libm() {
; FIXME: test ALL the intrinsics in this file.
; rdar://11542750
-; CHECK: declare void @llvm.trap() #2
+; CHECK: declare void @llvm.trap() #1
declare void @llvm.trap()
define void @trap() {
@@ -70,5 +70,4 @@ define void @trap() {
}
; CHECK: attributes #0 = { nounwind readnone }
-; CHECK: attributes #1 = { nounwind readonly }
-; CHECK: attributes #2 = { noreturn nounwind }
+; CHECK: attributes #1 = { noreturn nounwind }
diff --git a/test/Feature/linker_private_linkages.ll b/test/Feature/linker_private_linkages.ll
deleted file mode 100644
index 19bcbb40aa01..000000000000
--- a/test/Feature/linker_private_linkages.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-@foo = linker_private hidden global i32 0
-@bar = linker_private_weak hidden global i32 0
diff --git a/test/Feature/optnone-llc.ll b/test/Feature/optnone-llc.ll
new file mode 100644
index 000000000000..6cb27d0b7d5c
--- /dev/null
+++ b/test/Feature/optnone-llc.ll
@@ -0,0 +1,54 @@
+; RUN: llc -O0 -debug %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=LLC-O0
+; RUN: llc -O1 -debug %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=LLC-Ox
+; RUN: llc -O2 -debug %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=LLC-Ox
+; RUN: llc -O3 -debug %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=LLC-Ox
+; RUN: llc -misched-postra -debug %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=LLC-MORE
+
+; REQUIRES: asserts
+
+; This test verifies that we don't run Machine Function optimizations
+; on optnone functions.
+
+; Function Attrs: noinline optnone
+define i32 @_Z3fooi(i32 %x) #0 {
+entry:
+ %x.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ br label %while.cond
+
+while.cond: ; preds = %while.body, %entry
+ %0 = load i32* %x.addr, align 4
+ %dec = add nsw i32 %0, -1
+ store i32 %dec, i32* %x.addr, align 4
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %while.body, label %while.end
+
+while.body: ; preds = %while.cond
+ br label %while.cond
+
+while.end: ; preds = %while.cond
+ ret i32 0
+}
+
+attributes #0 = { optnone noinline }
+
+; Nothing that runs at -O0 gets skipped.
+; LLC-O0-NOT: Skipping pass
+
+; Machine Function passes run at -O1 and higher.
+; LLC-Ox-DAG: Skipping pass 'Branch Probability Basic Block Placement'
+; LLC-Ox-DAG: Skipping pass 'CodeGen Prepare'
+; LLC-Ox-DAG: Skipping pass 'Control Flow Optimizer'
+; LLC-Ox-DAG: Skipping pass 'Machine code sinking'
+; LLC-Ox-DAG: Skipping pass 'Machine Common Subexpression Elimination'
+; LLC-Ox-DAG: Skipping pass 'Machine Copy Propagation Pass'
+; LLC-Ox-DAG: Skipping pass 'Machine Loop Invariant Code Motion'
+; LLC-Ox-DAG: Skipping pass 'Merge disjoint stack slots'
+; LLC-Ox-DAG: Skipping pass 'Optimize machine instruction PHIs'
+; LLC-Ox-DAG: Skipping pass 'Peephole Optimizations'
+; LLC-Ox-DAG: Skipping pass 'Post RA top-down list latency scheduler'
+; LLC-Ox-DAG: Skipping pass 'Remove dead machine instructions'
+; LLC-Ox-DAG: Skipping pass 'Tail Duplication'
+
+; Alternate post-RA scheduler.
+; LLC-MORE: Skipping pass 'PostRA Machine Instruction Scheduler'
diff --git a/test/Feature/optnone-opt.ll b/test/Feature/optnone-opt.ll
new file mode 100644
index 000000000000..f83e68ccfef8
--- /dev/null
+++ b/test/Feature/optnone-opt.ll
@@ -0,0 +1,74 @@
+; RUN: opt -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-O0
+; RUN: opt -O1 -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-O1
+; RUN: opt -O2 -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-O1 --check-prefix=OPT-O2O3
+; RUN: opt -O3 -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-O1 --check-prefix=OPT-O2O3
+; RUN: opt -bb-vectorize -dce -die -loweratomic -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-MORE
+; RUN: opt -indvars -licm -loop-deletion -loop-extract -loop-idiom -loop-instsimplify -loop-reduce -loop-reroll -loop-rotate -loop-unroll -loop-unswitch -S -debug %s 2>&1 | FileCheck %s --check-prefix=OPT-LOOP
+
+; REQUIRES: asserts
+
+; This test verifies that we don't run target independent IR-level
+; optimizations on optnone functions.
+
+; Function Attrs: noinline optnone
+define i32 @_Z3fooi(i32 %x) #0 {
+entry:
+ %x.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ br label %while.cond
+
+while.cond: ; preds = %while.body, %entry
+ %0 = load i32* %x.addr, align 4
+ %dec = add nsw i32 %0, -1
+ store i32 %dec, i32* %x.addr, align 4
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %while.body, label %while.end
+
+while.body: ; preds = %while.cond
+ br label %while.cond
+
+while.end: ; preds = %while.cond
+ ret i32 0
+}
+
+attributes #0 = { optnone noinline }
+
+; Nothing that runs at -O0 gets skipped.
+; OPT-O0-NOT: Skipping pass
+
+; IR passes run at -O1 and higher.
+; OPT-O1-DAG: Skipping pass 'Aggressive Dead Code Elimination'
+; OPT-O1-DAG: Skipping pass 'Combine redundant instructions'
+; OPT-O1-DAG: Skipping pass 'Dead Store Elimination'
+; OPT-O1-DAG: Skipping pass 'Early CSE'
+; OPT-O1-DAG: Skipping pass 'Jump Threading'
+; OPT-O1-DAG: Skipping pass 'MemCpy Optimization'
+; OPT-O1-DAG: Skipping pass 'Reassociate expressions'
+; OPT-O1-DAG: Skipping pass 'Simplify the CFG'
+; OPT-O1-DAG: Skipping pass 'Sparse Conditional Constant Propagation'
+; OPT-O1-DAG: Skipping pass 'SROA'
+; OPT-O1-DAG: Skipping pass 'Tail Call Elimination'
+; OPT-O1-DAG: Skipping pass 'Value Propagation'
+
+; Additional IR passes run at -O2 and higher.
+; OPT-O2O3-DAG: Skipping pass 'Global Value Numbering'
+; OPT-O2O3-DAG: Skipping pass 'SLP Vectorizer'
+
+; Additional IR passes that opt doesn't turn on by default.
+; OPT-MORE-DAG: Skipping pass 'Basic-Block Vectorization'
+; OPT-MORE-DAG: Skipping pass 'Dead Code Elimination'
+; OPT-MORE-DAG: Skipping pass 'Dead Instruction Elimination'
+; OPT-MORE-DAG: Skipping pass 'Lower atomic intrinsics
+
+; Loop IR passes that opt doesn't turn on by default.
+; OPT-LOOP-DAG: Skipping pass 'Delete dead loops'
+; OPT-LOOP-DAG: Skipping pass 'Extract loops into new functions'
+; OPT-LOOP-DAG: Skipping pass 'Induction Variable Simplification'
+; OPT-LOOP-DAG: Skipping pass 'Loop Invariant Code Motion'
+; OPT-LOOP-DAG: Skipping pass 'Loop Strength Reduction'
+; OPT-LOOP-DAG: Skipping pass 'Recognize loop idioms'
+; OPT-LOOP-DAG: Skipping pass 'Reroll loops'
+; OPT-LOOP-DAG: Skipping pass 'Rotate Loops'
+; OPT-LOOP-DAG: Skipping pass 'Simplify instructions in loops'
+; OPT-LOOP-DAG: Skipping pass 'Unroll loops'
+; OPT-LOOP-DAG: Skipping pass 'Unswitch loops'
diff --git a/test/FileCheck/check-multiple-prefixes-nomatch-2.txt b/test/FileCheck/check-multiple-prefixes-nomatch-2.txt
new file mode 100644
index 000000000000..a1dc3d87b017
--- /dev/null
+++ b/test/FileCheck/check-multiple-prefixes-nomatch-2.txt
@@ -0,0 +1,10 @@
+; RUN: not FileCheck -input-file %s %s -check-prefix=FOO -check-prefix=BAR 2>&1 | FileCheck %s
+
+fog
+bar
+; _FOO not a valid check-line
+; FOO: fo{{o}}
+; BAR: ba{{r}}
+
+; CHECK: {{error: expected string not found in input}}
+; CHECK-NEXT: {{F}}OO: fo{{[{][{]o[}][}]}}
diff --git a/test/FileCheck/implicit-check-not.txt b/test/FileCheck/implicit-check-not.txt
new file mode 100644
index 000000000000..42677362158b
--- /dev/null
+++ b/test/FileCheck/implicit-check-not.txt
@@ -0,0 +1,44 @@
+; RUN: sed 's#^;.*##' %s | FileCheck -check-prefix=CHECK-PASS -implicit-check-not=warning: %s
+; RUN: sed 's#^;.*##' %s | not FileCheck -check-prefix=CHECK-FAIL1 -implicit-check-not=warning: %s 2>&1 | FileCheck %s -check-prefix CHECK-ERROR1
+; RUN: sed 's#^;.*##' %s | not FileCheck -check-prefix=CHECK-FAIL2 -implicit-check-not=warning: %s 2>&1 | FileCheck %s -check-prefix CHECK-ERROR2
+; RUN: sed 's#^;.*##' %s | not FileCheck -check-prefix=CHECK-FAIL3 -implicit-check-not=warning: %s 2>&1 | FileCheck %s -check-prefix CHECK-ERROR3
+; RUN: sed 's#^;.*##' %s | not FileCheck -check-prefix=CHECK-FAIL1 -implicit-check-not='{{aaa|bbb|ccc}}' %s 2>&1 | FileCheck %s -check-prefix CHECK-ERROR4
+; RUN: sed 's#^;.*##' %s | not FileCheck -check-prefix=CHECK-FAIL1 -implicit-check-not=aaa -implicit-check-not=bbb -implicit-check-not=ccc %s 2>&1 | FileCheck %s -check-prefix CHECK-ERROR5
+; RUN: sed 's#^;.*##' %s | not FileCheck -check-prefix=CHECK-FAIL2 -implicit-check-not=aaa -implicit-check-not=bbb -implicit-check-not=ccc %s 2>&1 | FileCheck %s -check-prefix CHECK-ERROR6
+; RUN: sed 's#^;.*##' %s | not FileCheck -check-prefix=CHECK-FAIL3 -implicit-check-not=aaa -implicit-check-not=bbb -implicit-check-not=ccc %s 2>&1 | FileCheck %s -check-prefix CHECK-ERROR7
+
+warning: aaa
+; CHECK-PASS: warning: aaa
+; CHECK-ERROR1: error: CHECK-FAIL1-NOT: string occurred!
+; CHECK-ERROR1: command line:1:22: note: CHECK-FAIL1-NOT: pattern specified here
+; CHECK-ERROR1-NEXT: -implicit-check-not='warning:'
+; CHECK-FAIL2: warning: aaa
+; CHECK-FAIL3: warning: aaa
+; CHECK-ERROR4: error: CHECK-FAIL1-NOT: string occurred!
+; CHECK-ERROR4: command line:1:22: note: CHECK-FAIL1-NOT: pattern specified here
+; CHECK-ERROR4-NEXT: {{-implicit-check-not='\{\{aaa\|bbb\|ccc\}\}'}}
+; CHECK-ERROR5: error: CHECK-FAIL1-NOT: string occurred!
+; CHECK-ERROR5: command line:1:22: note: CHECK-FAIL1-NOT: pattern specified here
+; CHECK-ERROR5-NEXT: -implicit-check-not='aaa'
+
+warning: bbb
+; CHECK-PASS: warning: bbb
+; CHECK-FAIL1: warning: bbb
+; CHECK-ERROR2: error: CHECK-FAIL2-NOT: string occurred!
+; CHECK-ERROR2: command line:1:22: note: CHECK-FAIL2-NOT: pattern specified here
+; CHECK-ERROR2-NEXT: -implicit-check-not='warning:'
+; CHECK-FAIL3: warning: bbb
+; CHECK-ERROR6: error: CHECK-FAIL2-NOT: string occurred!
+; CHECK-ERROR6: command line:1:22: note: CHECK-FAIL2-NOT: pattern specified here
+; CHECK-ERROR6-NEXT: -implicit-check-not='bbb'
+
+warning: ccc
+; CHECK-PASS: warning: ccc
+; CHECK-FAIL1: warning: ccc
+; CHECK-FAIL2: warning: ccc
+; CHECK-ERROR3: error: CHECK-FAIL3-NOT: string occurred!
+; CHECK-ERROR3: command line:1:22: note: CHECK-FAIL3-NOT: pattern specified here
+; CHECK-ERROR3-NEXT: -implicit-check-not='warning:'
+; CHECK-ERROR7: error: CHECK-FAIL3-NOT: string occurred!
+; CHECK-ERROR7: command line:1:22: note: CHECK-FAIL3-NOT: pattern specified here
+; CHECK-ERROR7-NEXT: -implicit-check-not='ccc'
diff --git a/test/Instrumentation/AddressSanitizer/X86/asm_attr.ll b/test/Instrumentation/AddressSanitizer/X86/asm_attr.ll
new file mode 100644
index 000000000000..0667a1474873
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/X86/asm_attr.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+sse2 -asm-instrumentation=address -asan-instrument-assembly | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: mov_no_attr
+; CHECK-NOT: callq __asan_report_load@PLT
+; CHECK-NOT: callq __asan_report_store@PLT
+define void @mov_no_attr(i64* %dst, i64* %src) {
+ tail call void asm sideeffect "movq ($1), %rax \0A\09movq %rax, ($0) \0A\09", "r,r,~{memory},~{rax},~{dirflag},~{fpsr},~{flags}"(i64* %dst, i64* %src)
+ ret void
+}
+
+; CHECK-LABEL: mov_sanitize
+; CHECK: callq __asan_report_load8@PLT
+; CHECK: callq __asan_report_store8@PLT
+define void @mov_sanitize(i64* %dst, i64* %src) sanitize_address {
+ tail call void asm sideeffect "movq ($1), %rax \0A\09movq %rax, ($0) \0A\09", "r,r,~{memory},~{rax},~{dirflag},~{fpsr},~{flags}"(i64* %dst, i64* %src)
+ ret void
+}
diff --git a/test/Instrumentation/AddressSanitizer/X86/asm_mov.ll b/test/Instrumentation/AddressSanitizer/X86/asm_mov.ll
new file mode 100644
index 000000000000..ad5e02e0d065
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/X86/asm_mov.ll
@@ -0,0 +1,146 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+sse2 -asm-instrumentation=address -asan-instrument-assembly | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: mov1b
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: pushq %rdi
+; CHECK-NEXT: pushfq
+; CHECK-NEXT: leaq {{.*}}, %rdi
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: shrq $3, %rax
+; CHECK-NEXT: movb 2147450880(%rax), %al
+; CHECK-NEXT: testb %al, %al
+; CHECK-NEXT: je [[A:.*]]
+; CHECK-NEXT: movl %edi, %ecx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: movsbl %al, %eax
+; CHECK-NEXT: cmpl %eax, %ecx
+; CHECK-NEXT: jl {{.*}}
+; CHECK-NEXT: cld
+; CHECK-NEXT: emms
+; CHECK-NEXT: andq $-16, %rsp
+; CHECK-NEXT: callq __asan_report_load1@PLT
+; CHECK-NEXT: [[A]]:
+; CHECK-NEXT: popfq
+; CHECK-NEXT: popq %rdi
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: leaq 128(%rsp), %rsp
+
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK: callq __asan_report_store1@PLT
+; CHECK: leaq 128(%rsp), %rsp
+
+; CHECK: movb {{.*}}, {{.*}}
+define void @mov1b(i8* %dst, i8* %src) #0 {
+entry:
+ tail call void asm sideeffect "movb ($1), %al \0A\09movb %al, ($0) \0A\09", "r,r,~{memory},~{rax},~{dirflag},~{fpsr},~{flags}"(i8* %dst, i8* %src) #1, !srcloc !0
+ ret void
+}
+
+; CHECK-LABEL: mov2b
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK: leal 1(%ecx), %ecx
+; CHECK: callq __asan_report_load2@PLT
+; CHECK: leaq 128(%rsp), %rsp
+
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK: leal 1(%ecx), %ecx
+; CHECK: callq __asan_report_store2@PLT
+; CHECK: leaq 128(%rsp), %rsp
+
+; CHECK: movw {{.*}}, {{.*}}
+define void @mov2b(i16* %dst, i16* %src) #0 {
+entry:
+ tail call void asm sideeffect "movw ($1), %ax \0A\09movw %ax, ($0) \0A\09", "r,r,~{memory},~{rax},~{dirflag},~{fpsr},~{flags}"(i16* %dst, i16* %src) #1, !srcloc !1
+ ret void
+}
+
+; CHECK-LABEL: mov4b
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK: addl $3, %ecx
+; CHECK: callq __asan_report_load4@PLT
+; CHECK: leaq 128(%rsp), %rsp
+
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK: addl $3, %ecx
+; CHECK: callq __asan_report_store4@PLT
+; CHECK: leaq 128(%rsp), %rsp
+
+; CHECK: movl {{.*}}, {{.*}}
+define void @mov4b(i32* %dst, i32* %src) #0 {
+entry:
+ tail call void asm sideeffect "movl ($1), %eax \0A\09movl %eax, ($0) \0A\09", "r,r,~{memory},~{rax},~{dirflag},~{fpsr},~{flags}"(i32* %dst, i32* %src) #1, !srcloc !2
+ ret void
+}
+
+; CHECK-LABEL: mov8b
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: pushfq
+; CHECK-NEXT: leaq {{.*}}, %rax
+; CHECK-NEXT: shrq $3, %rax
+; CHECK-NEXT: cmpb $0, 2147450880(%rax)
+; CHECK-NEXT: je [[A:.*]]
+; CHECK-NEXT: cld
+; CHECK-NEXT: emms
+; CHECK-NEXT: andq $-16, %rsp
+; CHECK-NEXT: callq __asan_report_load8@PLT
+; CHECK-NEXT: [[A]]:
+; CHECK-NEXT: popfq
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: leaq 128(%rsp), %rsp
+
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: pushfq
+; CHECK-NEXT: leaq {{.*}}, %rax
+; CHECK-NEXT: shrq $3, %rax
+; CHECK-NEXT: cmpb $0, 2147450880(%rax)
+; CHECK-NEXT: je [[A:.*]]
+; CHECK-NEXT: cld
+; CHECK-NEXT: emms
+; CHECK-NEXT: andq $-16, %rsp
+; CHECK-NEXT: callq __asan_report_store8@PLT
+; CHECK-NEXT: [[A]]:
+; CHECK-NEXT: popfq
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: leaq 128(%rsp), %rsp
+
+; CHECK: movq {{.*}}, {{.*}}
+define void @mov8b(i64* %dst, i64* %src) #0 {
+entry:
+ tail call void asm sideeffect "movq ($1), %rax \0A\09movq %rax, ($0) \0A\09", "r,r,~{memory},~{rax},~{dirflag},~{fpsr},~{flags}"(i64* %dst, i64* %src) #1, !srcloc !3
+ ret void
+}
+
+; CHECK-LABEL: mov16b
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK: cmpw $0, 2147450880(%rax)
+; CHECK: callq __asan_report_load16@PLT
+; CHECK: leaq 128(%rsp), %rsp
+
+; CHECK: leaq -128(%rsp), %rsp
+; CHECK: cmpw $0, 2147450880(%rax)
+; CHECK: callq __asan_report_store16@PLT
+; CHECK: leaq 128(%rsp), %rsp
+
+; CHECK: movaps {{.*}}, {{.*}}
+define void @mov16b(<2 x i64>* %dst, <2 x i64>* %src) #0 {
+entry:
+ tail call void asm sideeffect "movaps ($1), %xmm0 \0A\09movaps %xmm0, ($0) \0A\09", "r,r,~{memory},~{xmm0},~{dirflag},~{fpsr},~{flags}"(<2 x i64>* %dst, <2 x i64>* %src) #1, !srcloc !4
+ ret void
+}
+
+attributes #0 = { nounwind uwtable sanitize_address "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+
+!0 = metadata !{i32 98, i32 122, i32 160}
+!1 = metadata !{i32 305, i32 329, i32 367}
+!2 = metadata !{i32 512, i32 537, i32 576}
+!3 = metadata !{i32 721, i32 746, i32 785}
+!4 = metadata !{i32 929, i32 957, i32 999}
diff --git a/test/Instrumentation/AddressSanitizer/X86/asm_mov.s b/test/Instrumentation/AddressSanitizer/X86/asm_mov.s
new file mode 100644
index 000000000000..74a788cf4d51
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/X86/asm_mov.s
@@ -0,0 +1,64 @@
+# RUN: llvm-mc %s -triple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+sse2 -asm-instrumentation=address -asan-instrument-assembly | FileCheck %s
+
+ .text
+ .globl mov1b
+ .align 16, 0x90
+ .type mov1b,@function
+# CHECK-LABEL: mov1b:
+#
+# CHECK: leaq -128(%rsp), %rsp
+# CHECK: callq __asan_report_load1@PLT
+# CHECK: leaq 128(%rsp), %rsp
+#
+# CHECK-NEXT: movb (%rsi), %al
+#
+# CHECK-NEXT: leaq -128(%rsp), %rsp
+# CHECK: callq __asan_report_store1@PLT
+# CHECK: leaq 128(%rsp), %rsp
+#
+# CHECK-NEXT: movb %al, (%rdi)
+mov1b: # @mov1b
+ .cfi_startproc
+# BB#0:
+ #APP
+ movb (%rsi), %al
+ movb %al, (%rdi)
+
+ #NO_APP
+ retq
+.Ltmp0:
+ .size mov1b, .Ltmp0-mov1b
+ .cfi_endproc
+
+ .globl mov16b
+ .align 16, 0x90
+ .type mov16b,@function
+# CHECK-LABEL: mov16b:
+#
+# CHECK: leaq -128(%rsp), %rsp
+# CHECK: callq __asan_report_load16@PLT
+# CHECK: leaq 128(%rsp), %rsp
+#
+# CHECK-NEXT: movaps (%rsi), %xmm0
+#
+# CHECK-NEXT: leaq -128(%rsp), %rsp
+# CHECK: callq __asan_report_store16@PLT
+# CHECK: leaq 128(%rsp), %rsp
+#
+# CHECK-NEXT: movaps %xmm0, (%rdi)
+mov16b: # @mov16b
+ .cfi_startproc
+# BB#0:
+ #APP
+ movaps (%rsi), %xmm0
+ movaps %xmm0, (%rdi)
+
+ #NO_APP
+ retq
+.Ltmp1:
+ .size mov16b, .Ltmp1-mov16b
+ .cfi_endproc
+
+
+ .ident "clang version 3.5 "
+ .section ".note.GNU-stack","",@progbits
diff --git a/test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s b/test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s
new file mode 100644
index 000000000000..e3a1541e1951
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/X86/asm_mov_no_instrumentation.s
@@ -0,0 +1,24 @@
+# RUN: llvm-mc %s -triple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+sse2 | FileCheck %s
+
+ .text
+ .globl mov1b
+ .align 16, 0x90
+ .type mov1b,@function
+# CHECK-LABEL: mov1b
+# CHECK-NOT: callq __asan_report_load1@PLT
+# CHECK-NOT: callq __asan_report_store1@PLT
+mov1b: # @mov1b
+ .cfi_startproc
+# BB#0:
+ #APP
+ movb (%rsi), %al
+ movb %al, (%rdi)
+
+ #NO_APP
+ retq
+.Ltmp0:
+ .size mov1b, .Ltmp0-mov1b
+ .cfi_endproc
+
+ .ident "clang version 3.5 "
+ .section ".note.GNU-stack","",@progbits
diff --git a/test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s b/test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s
new file mode 100644
index 000000000000..ca3c54c1455a
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/X86/asm_swap_intel.s
@@ -0,0 +1,59 @@
+# RUN: llvm-mc %s -x86-asm-syntax=intel -triple=x86_64-unknown-linux-gnu -asm-instrumentation=address -asan-instrument-assembly | FileCheck %s
+
+ .text
+ .globl swap
+ .align 16, 0x90
+ .type swap,@function
+# CHECK-LABEL: swap:
+#
+# CHECK: leaq -128(%rsp), %rsp
+# CHECK: callq __asan_report_load8@PLT
+# CHECK: leaq 128(%rsp), %rsp
+#
+# CHECK-NEXT: movq (%rcx), %rax
+#
+# CHECK-NEXT: leaq -128(%rsp), %rsp
+# CHECK: callq __asan_report_load8@PLT
+# CHECK: leaq 128(%rsp), %rsp
+#
+# CHECK-NEXT: movq (%rdx), %rbx
+#
+# CHECK-NEXT: leaq -128(%rsp), %rsp
+# CHECK: callq __asan_report_store8@PLT
+# CHECK: leaq 128(%rsp), %rsp
+#
+# CHECK-NEXT: movq %rbx, (%rcx)
+#
+# CHECK-NEXT: leaq -128(%rsp), %rsp
+# CHECK: callq __asan_report_store8@PLT
+# CHECK: leaq 128(%rsp), %rsp
+#
+# CHECK-NEXT: movq %rax, (%rdx)
+swap: # @swap
+ .cfi_startproc
+# BB#0:
+ push rbx
+.Ltmp0:
+ .cfi_def_cfa_offset 16
+.Ltmp1:
+ .cfi_offset rbx, -16
+ mov rcx, rdi
+ mov rdx, rsi
+ #APP
+
+
+ mov rax, qword ptr [rcx]
+ mov rbx, qword ptr [rdx]
+ mov qword ptr [rcx], rbx
+ mov qword ptr [rdx], rax
+
+ #NO_APP
+ pop rbx
+ ret
+.Ltmp2:
+ .size swap, .Ltmp2-swap
+ .cfi_endproc
+
+
+ .ident "clang version 3.5.0 "
+ .section ".note.GNU-stack","",@progbits
diff --git a/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll b/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll
index 2c4d82eb1ae7..63477aacd8f8 100644
--- a/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll
+++ b/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -asan -S | llc -o /dev/null
+; RUN: opt < %s -asan -asan-module -S | llc -o /dev/null
; The bug manifests as a reg alloc failure:
; error: ran out of registers during register allocation
; ModuleID = 'z.o'
diff --git a/test/Instrumentation/AddressSanitizer/X86/lit.local.cfg b/test/Instrumentation/AddressSanitizer/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Instrumentation/AddressSanitizer/X86/lit.local.cfg
+++ b/test/Instrumentation/AddressSanitizer/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Instrumentation/AddressSanitizer/asan-vs-gvn.ll b/test/Instrumentation/AddressSanitizer/asan-vs-gvn.ll
index 1087c9a58ff3..75adf4061c0d 100644
--- a/test/Instrumentation/AddressSanitizer/asan-vs-gvn.ll
+++ b/test/Instrumentation/AddressSanitizer/asan-vs-gvn.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -basicaa -gvn -asan -S | FileCheck %s
+; RUN: opt < %s -basicaa -gvn -asan -asan-module -S | FileCheck %s
; ASAN conflicts with load widening iff the widened load accesses data out of bounds
; (while the original unwidened loads do not).
; http://code.google.com/p/address-sanitizer/issues/detail?id=20#c1
diff --git a/test/Instrumentation/AddressSanitizer/basic.ll b/test/Instrumentation/AddressSanitizer/basic.ll
index 6002b9e897d7..5436c60cb6d0 100644
--- a/test/Instrumentation/AddressSanitizer/basic.ll
+++ b/test/Instrumentation/AddressSanitizer/basic.ll
@@ -1,12 +1,12 @@
; Test basic address sanitizer instrumentation.
;
-; RUN: opt < %s -asan -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
define i32 @test_load(i32* %a) sanitize_address {
-; CHECK: @test_load
+; CHECK-LABEL: @test_load
; CHECK-NOT: load
; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint i32* %a to i64
; CHECK: lshr i64 %[[LOAD_ADDR]], 3
@@ -34,12 +34,12 @@ define i32 @test_load(i32* %a) sanitize_address {
entry:
- %tmp1 = load i32* %a
+ %tmp1 = load i32* %a, align 4
ret i32 %tmp1
}
define void @test_store(i32* %a) sanitize_address {
-; CHECK: @test_store
+; CHECK-LABEL: @test_store
; CHECK-NOT: store
; CHECK: %[[STORE_ADDR:[^ ]*]] = ptrtoint i32* %a to i64
; CHECK: lshr i64 %[[STORE_ADDR]], 3
@@ -66,7 +66,7 @@ define void @test_store(i32* %a) sanitize_address {
;
entry:
- store i32 42, i32* %a
+ store i32 42, i32* %a, align 4
ret void
}
@@ -84,37 +84,18 @@ entry:
ret void
}
-; CHECK: define void @alloca_test()
+; CHECK-LABEL: define void @alloca_test()
; CHECK: = alloca
; CHECK-NOT: = alloca
; CHECK: ret void
-; Check that asan does not touch allocas with alignment > 32.
-define void @alloca_alignment_test() sanitize_address {
-entry:
- %x = alloca [10 x i8], align 64
- %y = alloca [10 x i8], align 128
- %z = alloca [10 x i8], align 256
- call void @alloca_test_use([10 x i8]* %x)
- call void @alloca_test_use([10 x i8]* %y)
- call void @alloca_test_use([10 x i8]* %z)
- ret void
-}
-
-; CHECK: define void @alloca_alignment_test()
-; CHECK: = alloca{{.*}} align 64
-; CHECK: = alloca{{.*}} align 128
-; CHECK: = alloca{{.*}} align 256
-; CHECK: ret void
-
-
define void @LongDoubleTest(x86_fp80* nocapture %a) nounwind uwtable sanitize_address {
entry:
store x86_fp80 0xK3FFF8000000000000000, x86_fp80* %a, align 16
ret void
}
-; CHECK: LongDoubleTest
+; CHECK-LABEL: LongDoubleTest
; CHECK: __asan_report_store_n
; CHECK: __asan_report_store_n
; CHECK: ret void
@@ -127,13 +108,25 @@ define void @i40test(i40* %a, i40* %b) nounwind uwtable sanitize_address {
ret void
}
-; CHECK: i40test
+; CHECK-LABEL: i40test
; CHECK: __asan_report_load_n{{.*}}, i64 5)
; CHECK: __asan_report_load_n{{.*}}, i64 5)
; CHECK: __asan_report_store_n{{.*}}, i64 5)
; CHECK: __asan_report_store_n{{.*}}, i64 5)
; CHECK: ret void
+define void @i64test_align1(i64* %b) nounwind uwtable sanitize_address {
+ entry:
+ store i64 0, i64* %b, align 1
+ ret void
+}
+
+; CHECK-LABEL: i64test_align1
+; CHECK: __asan_report_store_n{{.*}}, i64 8)
+; CHECK: __asan_report_store_n{{.*}}, i64 8)
+; CHECK: ret void
+
+
define void @i80test(i80* %a, i80* %b) nounwind uwtable sanitize_address {
entry:
%t = load i80* %a
@@ -141,7 +134,7 @@ define void @i80test(i80* %a, i80* %b) nounwind uwtable sanitize_address {
ret void
}
-; CHECK: i80test
+; CHECK-LABEL: i80test
; CHECK: __asan_report_load_n{{.*}}, i64 10)
; CHECK: __asan_report_load_n{{.*}}, i64 10)
; CHECK: __asan_report_store_n{{.*}}, i64 10)
@@ -154,8 +147,25 @@ entry:
%tmp1 = load i32* %a
ret i32 %tmp1
}
-; CHECK: @f_available_externally
+; CHECK-LABEL: @f_available_externally
; CHECK-NOT: __asan_report
; CHECK: ret i32
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) nounwind
+
+define void @memintr_test(i8* %a, i8* %b) nounwind uwtable sanitize_address {
+ entry:
+ tail call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 100, i32 1, i1 false)
+ tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i32 1, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: memintr_test
+; CHECK: __asan_memset
+; CHECK: __asan_memmove
+; CHECK: __asan_memcpy
+; CHECK: ret void
diff --git a/test/Instrumentation/AddressSanitizer/coverage-dbg.ll b/test/Instrumentation/AddressSanitizer/coverage-dbg.ll
new file mode 100644
index 000000000000..3f7998d1a738
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/coverage-dbg.ll
@@ -0,0 +1,67 @@
+; Test that coverage instrumentation does not lose debug location.
+
+; RUN: opt < %s -asan -asan-module -asan-coverage=1 -S | FileCheck %s
+
+; C++ source:
+; 1: struct A {
+; 2: int f();
+; 3: int x;
+; 4: };
+; 5:
+; 6: int A::f() {
+; 7: return x;
+; 8: }
+; clang++ ../1.cc -O3 -g -S -emit-llvm -fno-strict-aliasing
+; and add sanitize_address to @_ZN1A1fEv
+
+; Test that __sanitizer_cov call has !dbg pointing to the opening { of A::f().
+; CHECK: call void @__sanitizer_cov(), !dbg [[A:!.*]]
+; CHECK: [[A]] = metadata !{i32 6, i32 0, metadata !{{.*}}, null}
+
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.A = type { i32 }
+
+; Function Attrs: nounwind readonly uwtable
+define i32 @_ZN1A1fEv(%struct.A* nocapture readonly %this) #0 align 2 {
+entry:
+ tail call void @llvm.dbg.value(metadata !{%struct.A* %this}, i64 0, metadata !15), !dbg !20
+ %x = getelementptr inbounds %struct.A* %this, i64 0, i32 0, !dbg !21
+ %0 = load i32* %x, align 4, !dbg !21
+ ret i32 %0, !dbg !21
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { sanitize_address nounwind readonly uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!17, !18}
+!llvm.ident = !{!19}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (210251)", i1 true, metadata !"", i32 0, metadata !2, metadata !3, metadata !12, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/code/llvm/build0/../1.cc] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"../1.cc", metadata !"/code/llvm/build0"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"A", i32 1, i64 32, i64 32, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS1A"} ; [ DW_TAG_structure_type ] [A] [line 1, size 32, align 32, offset 0] [def] [from ]
+!5 = metadata !{metadata !6, metadata !8}
+!6 = metadata !{i32 786445, metadata !1, metadata !"_ZTS1A", metadata !"x", i32 3, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_member ] [x] [line 3, size 32, align 32, offset 0] [from int]
+!7 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!8 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"f", metadata !"f", metadata !"_ZN1A1fEv", i32 2, metadata !9, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 2} ; [ DW_TAG_subprogram ] [line 2] [f]
+!9 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !10, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!10 = metadata !{metadata !7, metadata !11}
+!11 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!12 = metadata !{metadata !13}
+!13 = metadata !{i32 786478, metadata !1, metadata !"_ZTS1A", metadata !"f", metadata !"f", metadata !"_ZN1A1fEv", i32 6, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (%struct.A*)* @_ZN1A1fEv, null, metadata !8, metadata !14, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [f]
+!14 = metadata !{metadata !15}
+!15 = metadata !{i32 786689, metadata !13, metadata !"this", null, i32 16777216, metadata !16, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!16 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1A]
+!17 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!18 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!19 = metadata !{metadata !"clang version 3.5.0 (210251)"}
+!20 = metadata !{i32 0, i32 0, metadata !13, null}
+!21 = metadata !{i32 7, i32 0, metadata !13, null}
diff --git a/test/Instrumentation/AddressSanitizer/coverage.ll b/test/Instrumentation/AddressSanitizer/coverage.ll
index 47a54c0ef85e..79bb5c135330 100644
--- a/test/Instrumentation/AddressSanitizer/coverage.ll
+++ b/test/Instrumentation/AddressSanitizer/coverage.ll
@@ -1,13 +1,60 @@
-; RUN: opt < %s -asan -asan-coverage=1 -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -asan-coverage=0 -S | FileCheck %s --check-prefix=CHECK0
+; RUN: opt < %s -asan -asan-module -asan-coverage=1 -S | FileCheck %s --check-prefix=CHECK1
+; RUN: opt < %s -asan -asan-module -asan-coverage=2 -S | FileCheck %s --check-prefix=CHECK2
+; RUN: opt < %s -asan -asan-module -asan-coverage=2 -asan-coverage-block-threshold=10 -S | FileCheck %s --check-prefix=CHECK2
+; RUN: opt < %s -asan -asan-module -asan-coverage=2 -asan-coverage-block-threshold=1 -S | FileCheck %s --check-prefix=CHECK1
+
+; RUN: opt < %s -asan -asan-module -asan-coverage=0 -asan-globals=0 -S | \
+; RUN: FileCheck %s --check-prefix=CHECK0
+; RUN: opt < %s -asan -asan-module -asan-coverage=1 -asan-globals=0 -S | \
+; RUN: FileCheck %s --check-prefix=CHECK1
+; RUN: opt < %s -asan -asan-module -asan-coverage=2 -asan-globals=0 -S | \
+; RUN: FileCheck %s --check-prefix=CHECK2
+; RUN: opt < %s -asan -asan-module -asan-coverage=2 -asan-coverage-block-threshold=10 \
+; RUN: -asan-globals=0 -S | FileCheck %s --check-prefix=CHECK2
+; RUN: opt < %s -asan -asan-module -asan-coverage=2 -asan-coverage-block-threshold=1 \
+; RUN: -asan-globals=0 -S | FileCheck %s --check-prefix=CHECK1
+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
-define i32 @foo(i32* %a) sanitize_address {
+define void @foo(i32* %a) sanitize_address {
entry:
- ret i32 0
+ %tobool = icmp eq i32* %a, null
+ br i1 %tobool, label %if.end, label %if.then
+
+ if.then: ; preds = %entry
+ store i32 0, i32* %a, align 4
+ br label %if.end
+
+ if.end: ; preds = %entry, %if.then
+ ret void
}
-; CHECK: define i32 @foo(i32* %a) #0 {
-; CHECK: %0 = load atomic i8* @__asan_gen_cov_foo monotonic, align 1
-; CHECK: %1 = icmp eq i8 0, %0
-; CHECK: br i1 %1, label %2, label %3
-; CHECK: call void @__sanitizer_cov(i64 ptrtoint (i32 (i32*)* @foo to i64))
-; CHECK: store atomic i8 1, i8* @__asan_gen_cov_foo monotonic, align 1
+
+; CHECK0-NOT: call void @__sanitizer_cov(
+; CHECK0-NOT: call void @__sanitizer_cov_module_init(
+
+; CHECK1-LABEL: define void @foo
+; CHECK1: %0 = load atomic i8* @__asan_gen_cov_foo monotonic, align 1
+; CHECK1: %1 = icmp eq i8 0, %0
+; CHECK1: br i1 %1, label %2, label %3
+; CHECK1: call void @__sanitizer_cov
+; CHECK1-NOT: call void @__sanitizer_cov
+; CHECK1: store atomic i8 1, i8* @__asan_gen_cov_foo monotonic, align 1
+
+; CHECK1-LABEL: define internal void @asan.module_ctor
+; CHECK1-NOT: ret
+; CHECK1: call void @__sanitizer_cov_module_init(i64 1)
+; CHECK1: ret
+
+
+; CHECK2-LABEL: define void @foo
+; CHECK2: call void @__sanitizer_cov
+; CHECK2: call void @__sanitizer_cov
+; CHECK2: call void @__sanitizer_cov
+; CHECK2-NOT: call void @__sanitizer_cov
+; CHECK2: ret void
+
+; CHECK2-LABEL: define internal void @asan.module_ctor
+; CHECK2-NOT: ret
+; CHECK2: call void @__sanitizer_cov_module_init(i64 3)
+; CHECK2: ret
diff --git a/test/Instrumentation/AddressSanitizer/debug_info.ll b/test/Instrumentation/AddressSanitizer/debug_info.ll
index daf29571c4a6..336b98b289cd 100644
--- a/test/Instrumentation/AddressSanitizer/debug_info.ll
+++ b/test/Instrumentation/AddressSanitizer/debug_info.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -asan -asan-module -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -asan-use-after-return=0 -S | FileCheck %s
; Checks that llvm.dbg.declare instructions are updated
; accordingly as we merge allocas.
@@ -47,8 +47,9 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
; Verify that debug descriptors for argument and local variable will be replaced
; with descriptors that end with OpDeref (encoded as 2).
-; CHECK: ![[ARG_ID]] = metadata {{.*}} i64 2} ; [ DW_TAG_arg_variable ] [p] [line 1]
-; CHECK: ![[VAR_ID]] = metadata {{.*}} i64 2} ; [ DW_TAG_auto_variable ] [r] [line 2]
+; CHECK: ![[ARG_ID]] = {{.*}}metadata ![[OPDEREF:[0-9]+]]} ; [ DW_TAG_arg_variable ] [p] [line 1]
+; CHECK: ![[OPDEREF]] = metadata !{i64 2}
+; CHECK: ![[VAR_ID]] = {{.*}}metadata ![[OPDEREF]]} ; [ DW_TAG_auto_variable ] [r] [line 2]
; Verify that there are no more variable descriptors.
; CHECK-NOT: DW_TAG_arg_variable
; CHECK-NOT: DW_TAG_auto_variable
diff --git a/test/Instrumentation/AddressSanitizer/different_scale_and_offset.ll b/test/Instrumentation/AddressSanitizer/different_scale_and_offset.ll
deleted file mode 100644
index b0371769be05..000000000000
--- a/test/Instrumentation/AddressSanitizer/different_scale_and_offset.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; Test non-default shadow mapping scale and offset.
-;
-; RUN: opt < %s -asan -asan-mapping-scale=2 -asan-mapping-offset-log=0 -S | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-unknown-linux-gnu"
-
-; Test that ASan tells scale and offset to runtime.
-; CHECK: @__asan_mapping_offset = linkonce_odr constant i64 0
-; CHECK: @__asan_mapping_scale = linkonce_odr constant i64 2
-
-define i32 @test_load(i32* %a) sanitize_address {
-; CHECK: @test_load
-; CHECK-NOT: load
-; CHECK: %[[LOAD_ADDR:[^ ]*]] = ptrtoint i32* %a to i64
-; CHECK: lshr i64 %[[LOAD_ADDR]], 2
-
-; No need in shift for zero offset.
-; CHECK-NOT: or i64
-
-; CHECK: %[[LOAD_SHADOW_PTR:[^ ]*]] = inttoptr
-; CHECK: %[[LOAD_SHADOW:[^ ]*]] = load i8* %[[LOAD_SHADOW_PTR]]
-; CHECK: icmp ne i8
-; CHECK: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
-
-; No need in slow path for i32 and mapping scale equal to 2.
-; CHECK-NOT: and i64 %[[LOAD_ADDR]]
-;
-; The crash block reports the error.
-; CHECK: call void @__asan_report_load4(i64 %[[LOAD_ADDR]])
-; CHECK: unreachable
-;
-; The actual load.
-; CHECK: %tmp1 = load i32* %a
-; CHECK: ret i32 %tmp1
-
-entry:
- %tmp1 = load i32* %a
- ret i32 %tmp1
-}
-
diff --git a/test/Instrumentation/AddressSanitizer/do-not-instrument-internal-globals.ll b/test/Instrumentation/AddressSanitizer/do-not-instrument-internal-globals.ll
index d4fd93c16477..cff83ab718bb 100644
--- a/test/Instrumentation/AddressSanitizer/do-not-instrument-internal-globals.ll
+++ b/test/Instrumentation/AddressSanitizer/do-not-instrument-internal-globals.ll
@@ -1,6 +1,6 @@
; This test checks that we are not instrumenting globals
; that we created ourselves.
-; RUN: opt < %s -asan -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -16,5 +16,5 @@ declare void @_Z3fooPi(i32*)
; We create one global string constant for the stack frame above.
; It should have unnamed_addr and align 1.
; Make sure we don't create any other global constants.
-; CHECK: = internal unnamed_addr constant{{.*}}align 1
-; CHECK-NOT: = internal unnamed_addr constant
+; CHECK: = private unnamed_addr constant{{.*}}align 1
+; CHECK-NOT: = private unnamed_addr constant
diff --git a/test/Instrumentation/AddressSanitizer/do-not-instrument-llvm-metadata.ll b/test/Instrumentation/AddressSanitizer/do-not-instrument-llvm-metadata.ll
new file mode 100644
index 000000000000..d02f12aec98d
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/do-not-instrument-llvm-metadata.ll
@@ -0,0 +1,12 @@
+; This test checks that we are not instrumenting globals in llvm.metadata.
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@.str_noinst = private unnamed_addr constant [4 x i8] c"aaa\00", section "llvm.metadata"
+@.str_inst = private unnamed_addr constant [4 x i8] c"aaa\00"
+
+; CHECK-NOT: {{asan_gen.*str_noinst}}
+; CHECK: {{asan_gen.*str_inst}}
+; CHECK: @asan.module_ctor
diff --git a/test/Instrumentation/AddressSanitizer/do-not-touch-comdat-global.ll b/test/Instrumentation/AddressSanitizer/do-not-touch-comdat-global.ll
new file mode 100644
index 000000000000..8d14e839962e
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/do-not-touch-comdat-global.ll
@@ -0,0 +1,14 @@
+; This test checks that we instrument regular globals, but do not touch
+; the COMDAT ones.
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+; no action should be taken for these globals
+$global_noinst = comdat largest
+@aliasee = private unnamed_addr constant [2 x i8] [i8 1, i8 2], comdat $global_noinst
+@global_noinst = unnamed_addr alias [2 x i8]* @aliasee
+; CHECK-NOT: {{asan_gen.*global_noinst}}
+; CHECK-DAG: @global_noinst = unnamed_addr alias [2 x i8]* @aliasee
+@global_inst = private constant [2 x i8] [i8 1, i8 2]
+; CHECK-DAG: {{asan_gen.*global_inst}}
+; CHECK: @asan.module_ctor
diff --git a/test/Instrumentation/AddressSanitizer/do-not-touch-odr-global.ll b/test/Instrumentation/AddressSanitizer/do-not-touch-odr-global.ll
index 1687877849c4..97752612297d 100644
--- a/test/Instrumentation/AddressSanitizer/do-not-touch-odr-global.ll
+++ b/test/Instrumentation/AddressSanitizer/do-not-touch-odr-global.ll
@@ -1,6 +1,11 @@
-; RUN: opt < %s -asan -S | FileCheck %s
+; This test checks that we instrument regular globals, but do not touch
+; the linkonce_odr ones.
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
; no action should be taken for these globals
-@v1 = linkonce_odr constant i8 1
-; CHECK-NOT: __asan_register_globals
+@global_noinst = linkonce_odr constant [2 x i8] [i8 1, i8 2]
+@global_inst = private constant [2 x i8] [i8 1, i8 2]
+; CHECK-NOT: {{asan_gen.*global_noinst}}
+; CHECK: {{asan_gen.*global_inst}}
+; CHECK: @asan.module_ctor
diff --git a/test/Instrumentation/AddressSanitizer/do-not-touch-threadlocal.ll b/test/Instrumentation/AddressSanitizer/do-not-touch-threadlocal.ll
index 89644d4a943f..f863f44d5125 100644
--- a/test/Instrumentation/AddressSanitizer/do-not-touch-threadlocal.ll
+++ b/test/Instrumentation/AddressSanitizer/do-not-touch-threadlocal.ll
@@ -1,6 +1,6 @@
-; RUN: opt < %s -asan -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
; no action should be taken for thread locals
@xxx = thread_local global i32 0, align 4
-; CHECK-NOT: __asan_register_globals
+; CHECK-NOT: {{call.*__asan_register_globals}}
diff --git a/test/Instrumentation/AddressSanitizer/freebsd.ll b/test/Instrumentation/AddressSanitizer/freebsd.ll
new file mode 100644
index 000000000000..359529fc8fcd
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/freebsd.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -asan -asan-module -S \
+; RUN: -mtriple=i386-unknown-freebsd \
+; RUN: -default-data-layout="e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" | \
+; RUN: FileCheck --check-prefix=CHECK-32 %s
+
+; RUN: opt < %s -asan -asan-module -S \
+; RUN: -mtriple=x86_64-unknown-freebsd \
+; RUN: -default-data-layout="e-m:e-i64:64-f80:128-n8:16:32:64-S128" | \
+; RUN: FileCheck --check-prefix=CHECK-64 %s
+
+define i32 @read_4_bytes(i32* %a) sanitize_address {
+entry:
+ %tmp1 = load i32* %a, align 4
+ ret i32 %tmp1
+}
+
+; CHECK-32: @read_4_bytes
+; CHECK-32-NOT: ret
+; Check for ASAN's Offset for 32-bit (2^30 or 0x40000000)
+; CHECK-32: lshr {{.*}} 3
+; CHECK-32-NEXT: {{1073741824}}
+; CHECK-32: ret
+
+; CHECK-64: @read_4_bytes
+; CHECK-64-NOT: ret
+; Check for ASAN's Offset for 64-bit (2^46 or 0x400000000000)
+; CHECK-64: lshr {{.*}} 3
+; CHECK-64-NEXT: {{70368744177664}}
+; CHECK-64: ret
diff --git a/test/Instrumentation/AddressSanitizer/global_metadata.ll b/test/Instrumentation/AddressSanitizer/global_metadata.ll
new file mode 100644
index 000000000000..4dcd53b3ad83
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/global_metadata.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Globals:
+@global = global i32 0, align 4
+@dyn_init_global = global i32 0, align 4
+@blacklisted_global = global i32 0, align 4
+@_ZZ4funcvE10static_var = internal global i32 0, align 4
+@.str = private unnamed_addr constant [14 x i8] c"Hello, world!\00", align 1
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__sub_I_asan_globals.cpp, i8* null }]
+
+; Sanitizer location descriptors:
+@.str1 = private unnamed_addr constant [22 x i8] c"/tmp/asan-globals.cpp\00", align 1
+@.asan_loc_descr = private unnamed_addr constant { [22 x i8]*, i32, i32 } { [22 x i8]* @.str1, i32 5, i32 5 }
+@.asan_loc_descr1 = private unnamed_addr constant { [22 x i8]*, i32, i32 } { [22 x i8]* @.str1, i32 7, i32 5 }
+@.asan_loc_descr2 = private unnamed_addr constant { [22 x i8]*, i32, i32 } { [22 x i8]* @.str1, i32 12, i32 14 }
+@.asan_loc_descr4 = private unnamed_addr constant { [22 x i8]*, i32, i32 } { [22 x i8]* @.str1, i32 14, i32 25 }
+
+; Global names:
+@.str2 = private unnamed_addr constant [7 x i8] c"global\00", align 1
+@.str3 = private unnamed_addr constant [16 x i8] c"dyn_init_global\00", align 1
+@.str4 = private unnamed_addr constant [11 x i8] c"static_var\00", align 1
+@.str5 = private unnamed_addr constant [17 x i8] c"<string literal>\00", align 1
+
+; Check that globals were instrumented, but sanitizer location descriptors weren't:
+; CHECK: @global = global { i32, [60 x i8] } zeroinitializer, align 32
+; CHECK: @.str = internal unnamed_addr constant { [14 x i8], [50 x i8] } { [14 x i8] c"Hello, world!\00", [50 x i8] zeroinitializer }, align 32
+; CHECK: @.asan_loc_descr = private unnamed_addr constant { [22 x i8]*, i32, i32 } { [22 x i8]* @.str1, i32 5, i32 5 }
+; CHECK: @.str2 = private unnamed_addr constant [7 x i8] c"global\00", align 1
+
+; Check that location decriptors and global names were passed into __asan_register_globals:
+; CHECK: i64 ptrtoint ([7 x i8]* @.str2 to i64)
+; CHECK: i64 ptrtoint ({ [22 x i8]*, i32, i32 }* @.asan_loc_descr to i64)
+
+; Function Attrs: nounwind sanitize_address
+define internal void @__cxx_global_var_init() #0 section ".text.startup" {
+entry:
+ %0 = load i32* @global, align 4
+ store i32 %0, i32* @dyn_init_global, align 4
+ ret void
+}
+
+; Function Attrs: nounwind sanitize_address
+define void @_Z4funcv() #1 {
+entry:
+ %literal = alloca i8*, align 8
+ store i8* getelementptr inbounds ([14 x i8]* @.str, i32 0, i32 0), i8** %literal, align 8
+ ret void
+}
+
+; Function Attrs: nounwind sanitize_address
+define internal void @_GLOBAL__sub_I_asan_globals.cpp() #0 section ".text.startup" {
+entry:
+ call void @__cxx_global_var_init()
+ ret void
+}
+
+attributes #0 = { nounwind sanitize_address }
+attributes #1 = { nounwind sanitize_address "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.asan.globals = !{!0, !1, !2, !3, !4}
+!llvm.ident = !{!5}
+
+!0 = metadata !{i32* @global, { [22 x i8]*, i32, i32 }* @.asan_loc_descr, [7 x i8]* @.str2, i1 false, i1 false}
+!1 = metadata !{i32* @dyn_init_global, { [22 x i8]*, i32, i32 }* @.asan_loc_descr1, [16 x i8]* @.str3, i1 true, i1 false}
+!2 = metadata !{i32* @blacklisted_global, null, null, i1 false, i1 true}
+!3 = metadata !{i32* @_ZZ4funcvE10static_var, { [22 x i8]*, i32, i32 }* @.asan_loc_descr2, [11 x i8]* @.str4, i1 false, i1 false}
+!4 = metadata !{[14 x i8]* @.str, { [22 x i8]*, i32, i32 }* @.asan_loc_descr4, [17 x i8]* @.str5, i1 false, i1 false}
+!5 = metadata !{metadata !"clang version 3.5.0 (211282)"}
diff --git a/test/Instrumentation/AddressSanitizer/instrument-no-return.ll b/test/Instrumentation/AddressSanitizer/instrument-no-return.ll
index 2d835a34080a..5d5c592c3f4d 100644
--- a/test/Instrumentation/AddressSanitizer/instrument-no-return.ll
+++ b/test/Instrumentation/AddressSanitizer/instrument-no-return.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -asan -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
; AddressSanitizer must insert __asan_handle_no_return
; before every noreturn call or invoke.
diff --git a/test/Instrumentation/AddressSanitizer/instrument_global.ll b/test/Instrumentation/AddressSanitizer/instrument_global.ll
index 4717277b9afd..80791d9a905f 100644
--- a/test/Instrumentation/AddressSanitizer/instrument_global.ll
+++ b/test/Instrumentation/AddressSanitizer/instrument_global.ll
@@ -7,6 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
; module ctor/dtor
; CHECK: llvm.global_ctors
+; CHECK: @__asan_gen_ = private constant [8 x i8] c"<stdin>\00", align 1
; CHECK: llvm.global_dtors
; Test that we don't instrument global arrays with static initializer
@@ -67,8 +68,8 @@ entry:
}
-!llvm.asan.dynamically_initialized_globals = !{!0}
-!0 = metadata !{[10 x i32]* @GlobDy}
+!llvm.asan.globals = !{!0}
+!0 = metadata !{[10 x i32]* @GlobDy, null, null, i1 true, i1 false}
; CHECK-LABEL: define internal void @asan.module_ctor
; CHECK-NOT: ret
diff --git a/test/Instrumentation/AddressSanitizer/instrument_initializer_metadata.ll b/test/Instrumentation/AddressSanitizer/instrument_initializer_metadata.ll
index 1d00cfacafe4..c119879351a9 100644
--- a/test/Instrumentation/AddressSanitizer/instrument_initializer_metadata.ll
+++ b/test/Instrumentation/AddressSanitizer/instrument_initializer_metadata.ll
@@ -7,9 +7,11 @@ target triple = "x86_64-unknown-linux-gnu"
@YYY = global i32 0, align 4 ; W/o dynamic initializer.
; Clang will emit the following metadata identifying @xxx as dynamically
; initialized.
-!0 = metadata !{i32* @xxx}
-!1 = metadata !{i32* @XXX}
-!llvm.asan.dynamically_initialized_globals = !{!0, !1}
+!0 = metadata !{i32* @xxx, null, null, i1 true, i1 false}
+!1 = metadata !{i32* @XXX, null, null, i1 true, i1 false}
+!2 = metadata !{i32* @yyy, null, null, i1 false, i1 false}
+!3 = metadata !{i32* @YYY, null, null, i1 false, i1 false}
+!llvm.asan.globals = !{!0, !1, !2, !3}
define i32 @initializer() uwtable {
entry:
@@ -23,6 +25,8 @@ entry:
ret void
}
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
+
define internal void @_GLOBAL__I_a() sanitize_address section ".text.startup" {
entry:
call void @__cxx_global_var_init()
diff --git a/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll b/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll
index 23cf6d28ec6c..195785ff5e3b 100644
--- a/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll
+++ b/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll
@@ -1,6 +1,6 @@
; Test that AddressSanitizer instruments "(*a)++" only once.
-; RUN: opt < %s -asan -S -asan-opt=1 | FileCheck %s -check-prefix=OPT1
-; RUN: opt < %s -asan -S -asan-opt=0 | FileCheck %s -check-prefix=OPT0
+; RUN: opt < %s -asan -asan-module -S -asan-opt=1 | FileCheck %s -check-prefix=OPT1
+; RUN: opt < %s -asan -asan-module -S -asan-opt=0 | FileCheck %s -check-prefix=OPT0
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/AddressSanitizer/instrumentation-with-call-threshold.ll b/test/Instrumentation/AddressSanitizer/instrumentation-with-call-threshold.ll
new file mode 100644
index 000000000000..adb434112cd2
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/instrumentation-with-call-threshold.ll
@@ -0,0 +1,30 @@
+; Test asan internal compiler flags:
+; -asan-instrumentation-with-call-threshold
+; -asan-memory-access-callback-prefix
+
+; RUN: opt < %s -asan -asan-module -asan-instrumentation-with-call-threshold=1 -S | FileCheck %s --check-prefix=CHECK-CALL
+; RUN: opt < %s -asan -asan-module -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s --check-prefix=CHECK-CALL
+; RUN: opt < %s -asan -asan-module -asan-instrumentation-with-call-threshold=0 -asan-memory-access-callback-prefix=__foo_ -S | FileCheck %s --check-prefix=CHECK-CUSTOM-PREFIX
+; RUN: opt < %s -asan -asan-module -asan-instrumentation-with-call-threshold=5 -S | FileCheck %s --check-prefix=CHECK-INLINE
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s --check-prefix=CHECK-INLINE
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @test_load(i32* %a, i64* %b, i512* %c, i80* %d) sanitize_address {
+entry:
+; CHECK-CALL: call void @__asan_load4
+; CHECK-CALL: call void @__asan_load8
+; CHECK-CALL: call void @__asan_loadN{{.*}}i64 64)
+; CHECK-CALL: call void @__asan_loadN{{.*}}i64 10)
+; CHECK-CUSTOM-PREFIX: call void @__foo_load4
+; CHECK-CUSTOM-PREFIX: call void @__foo_load8
+; CHECK-CUSTOM-PREFIX: call void @__foo_loadN
+; CHECK-INLINE-NOT: call void @__asan_load
+ %tmp1 = load i32* %a, align 4
+ %tmp2 = load i64* %b, align 8
+ %tmp3 = load i512* %c, align 32
+ %tmp4 = load i80* %d, align 8
+ ret void
+}
+
+
diff --git a/test/Instrumentation/AddressSanitizer/keep-instrumented_functions.ll b/test/Instrumentation/AddressSanitizer/keep-instrumented_functions.ll
index ff3bbb047fff..8726b8e5f9c9 100644
--- a/test/Instrumentation/AddressSanitizer/keep-instrumented_functions.ll
+++ b/test/Instrumentation/AddressSanitizer/keep-instrumented_functions.ll
@@ -1,5 +1,5 @@
; Test the -asan-keep-uninstrumented-functions flag: FOO should get cloned
-; RUN: opt < %s -asan -asan-keep-uninstrumented-functions -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -asan-keep-uninstrumented-functions -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/AddressSanitizer/lifetime-uar.ll b/test/Instrumentation/AddressSanitizer/lifetime-uar.ll
index 21eaf7f15412..25577de445be 100644
--- a/test/Instrumentation/AddressSanitizer/lifetime-uar.ll
+++ b/test/Instrumentation/AddressSanitizer/lifetime-uar.ll
@@ -1,5 +1,5 @@
; Test handling of llvm.lifetime intrinsics in UAR mode.
-; RUN: opt < %s -asan -asan-use-after-return -asan-check-lifetime -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -asan-use-after-return -asan-check-lifetime -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Instrumentation/AddressSanitizer/lifetime.ll b/test/Instrumentation/AddressSanitizer/lifetime.ll
index d80331e38723..175a07d51e69 100644
--- a/test/Instrumentation/AddressSanitizer/lifetime.ll
+++ b/test/Instrumentation/AddressSanitizer/lifetime.ll
@@ -1,5 +1,5 @@
; Test hanlding of llvm.lifetime intrinsics.
-; RUN: opt < %s -asan -asan-check-lifetime -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -asan-check-lifetime -asan-use-after-return=0 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Instrumentation/AddressSanitizer/stack-poisoning.ll b/test/Instrumentation/AddressSanitizer/stack-poisoning.ll
new file mode 100644
index 000000000000..ace12d03b70e
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/stack-poisoning.ll
@@ -0,0 +1,43 @@
+; RUN: opt < %s -asan -asan-module -asan-use-after-return -S | FileCheck --check-prefix=CHECK-UAR %s
+; RUN: opt < %s -asan -asan-module -asan-use-after-return=0 -S | FileCheck --check-prefix=CHECK-PLAIN %s
+target datalayout = "e-i64:64-f80:128-s:64-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare void @Foo(i8*)
+
+define void @Bar() uwtable sanitize_address {
+entry:
+; CHECK-PLAIN-LABEL: Bar
+; CHECK-PLAIN-NOT: label
+; CHECK-PLAIN: ret void
+
+; CHECK-UAR-LABEL: Bar
+; CHECK-UAR: load i32* @__asan_option_detect_stack_use_after_return
+; CHECK-UAR: label
+; CHECK-UAR: call i64 @__asan_stack_malloc_1
+; CHECK-UAR: label
+; CHECK-UAR: call void @Foo
+; If LocalStackBase != OrigStackBase
+; CHECK-UAR: label
+; Then Block: poison the entire frame.
+ ; CHECK-UAR: store i64 -723401728380766731
+ ; CHECK-UAR: store i64 -723401728380766731
+ ; CHECK-UAR: store i8 0
+ ; CHECK-UAR-NOT: store
+ ; CHECK-UAR: label
+; Else Block: no UAR frame. Only unpoison the redzones.
+ ; CHECK-UAR: store i64 0
+ ; CHECK-UAR: store i32 0
+ ; CHECK-UAR-NOT: store
+ ; CHECK-UAR: label
+; Done, no more stores.
+; CHECK-UAR-NOT: store
+; CHECK-UAR: ret void
+
+ %x = alloca [20 x i8], align 16
+ %arraydecay = getelementptr inbounds [20 x i8]* %x, i64 0, i64 0
+ call void @Foo(i8* %arraydecay)
+ ret void
+}
+
+
diff --git a/test/Instrumentation/AddressSanitizer/stack_layout.ll b/test/Instrumentation/AddressSanitizer/stack_layout.ll
new file mode 100644
index 000000000000..c027acf3e4fd
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/stack_layout.ll
@@ -0,0 +1,49 @@
+; Test the ASan's stack layout.
+; More tests in tests/Transforms/Utils/ASanStackFrameLayoutTest.cpp
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare void @Use(i8*)
+
+; CHECK: private unnamed_addr constant{{.*}}3 32 10 3 XXX 64 20 3 YYY 128 30 3 ZZZ
+; CHECK: private unnamed_addr constant{{.*}}3 32 5 3 AAA 64 55 3 BBB 160 555 3 CCC
+; CHECK: private unnamed_addr constant{{.*}}3 256 128 3 CCC 448 128 3 BBB 608 128 3 AAA
+
+define void @Func1() sanitize_address {
+entry:
+; CHECK-LABEL: Func1
+; CHECK: alloca [192 x i8]
+; CHECK-NOT: alloca
+; CHECK: ret void
+ %XXX = alloca [10 x i8], align 1
+ %YYY = alloca [20 x i8], align 1
+ %ZZZ = alloca [30 x i8], align 1
+ ret void
+}
+
+define void @Func2() sanitize_address {
+entry:
+; CHECK-LABEL: Func2
+; CHECK: alloca [864 x i8]
+; CHECK-NOT: alloca
+; CHECK: ret void
+ %AAA = alloca [5 x i8], align 1
+ %BBB = alloca [55 x i8], align 1
+ %CCC = alloca [555 x i8], align 1
+ ret void
+}
+
+; Check that we reorder vars according to alignment and handle large alignments.
+define void @Func3() sanitize_address {
+entry:
+; CHECK-LABEL: Func3
+; CHECK: alloca [768 x i8]
+; CHECK-NOT: alloca
+; CHECK: ret void
+ %AAA = alloca [128 x i8], align 16
+ %BBB = alloca [128 x i8], align 64
+ %CCC = alloca [128 x i8], align 256
+ ret void
+}
diff --git a/test/Instrumentation/AddressSanitizer/test64.ll b/test/Instrumentation/AddressSanitizer/test64.ll
index 6aa5c2885099..fd93f4576ae4 100644
--- a/test/Instrumentation/AddressSanitizer/test64.ll
+++ b/test/Instrumentation/AddressSanitizer/test64.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -asan -S | FileCheck %s
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
define i32 @read_4_bytes(i32* %a) sanitize_address {
@@ -6,11 +6,11 @@ entry:
%tmp1 = load i32* %a, align 4
ret i32 %tmp1
}
-; CHECK: @read_4_bytes
+; CHECK-LABEL: @read_4_bytes
; CHECK-NOT: ret
; CHECK: lshr {{.*}} 3
-; Check for ASAN's Offset for 64-bit (2^44 or 7fff8000)
-; CHECK-NEXT: {{17592186044416|2147450880}}
+; Check for ASAN's Offset for 64-bit (7fff8000)
+; CHECK-NEXT: add{{.*}}2147450880
; CHECK: ret
define void @example_atomicrmw(i64* %ptr) nounwind uwtable sanitize_address {
@@ -19,18 +19,22 @@ entry:
ret void
}
-; CHECK: @example_atomicrmw
+; CHECK-LABEL: @example_atomicrmw
; CHECK: lshr {{.*}} 3
+; CHECK: __asan_report_store8
+; CHECK-NOT: __asan_report
; CHECK: atomicrmw
; CHECK: ret
define void @example_cmpxchg(i64* %ptr, i64 %compare_to, i64 %new_value) nounwind uwtable sanitize_address {
entry:
- %0 = cmpxchg i64* %ptr, i64 %compare_to, i64 %new_value seq_cst
+ %0 = cmpxchg i64* %ptr, i64 %compare_to, i64 %new_value seq_cst seq_cst
ret void
}
-; CHECK: @example_cmpxchg
+; CHECK-LABEL: @example_cmpxchg
; CHECK: lshr {{.*}} 3
+; CHECK: __asan_report_store8
+; CHECK-NOT: __asan_report
; CHECK: cmpxchg
; CHECK: ret
diff --git a/test/Instrumentation/AddressSanitizer/ubsan.ll b/test/Instrumentation/AddressSanitizer/ubsan.ll
new file mode 100644
index 000000000000..22e4172069cf
--- /dev/null
+++ b/test/Instrumentation/AddressSanitizer/ubsan.ll
@@ -0,0 +1,52 @@
+; ASan shouldn't instrument code added by UBSan.
+
+; RUN: opt < %s -asan -asan-module -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.A = type { i32 (...)** }
+declare void @__ubsan_handle_dynamic_type_cache_miss(i8*, i64, i64) uwtable
+@__ubsan_vptr_type_cache = external global [128 x i64]
+@.src = private unnamed_addr constant [19 x i8] c"tmp/ubsan/vptr.cpp\00", align 1
+@0 = private unnamed_addr constant { i16, i16, [4 x i8] } { i16 -1, i16 0, [4 x i8] c"'A'\00" }
+@_ZTI1A = external constant i8*
+@1 = private unnamed_addr global { { [19 x i8]*, i32, i32 }, { i16, i16, [4 x i8] }*, i8*, i8 } { { [19 x i8]*, i32, i32 } { [19 x i8]* @.src, i32 2, i32 18 }, { i16, i16, [4 x i8] }* @0, i8* bitcast (i8** @_ZTI1A to i8*), i8 4 }
+
+define void @_Z3BarP1A(%struct.A* %a) uwtable sanitize_address {
+; CHECK-LABEL: define void @_Z3BarP1A
+entry:
+ %0 = bitcast %struct.A* %a to void (%struct.A*)***
+ %vtable = load void (%struct.A*)*** %0, align 8
+; CHECK: __asan_report_load8
+ %1 = load void (%struct.A*)** %vtable, align 8
+; CHECK: __asan_report_load8
+ %2 = ptrtoint void (%struct.A*)** %vtable to i64
+ %3 = xor i64 %2, -303164226014115343, !nosanitize !0
+ %4 = mul i64 %3, -7070675565921424023, !nosanitize !0
+ %5 = lshr i64 %4, 47, !nosanitize !0
+ %6 = xor i64 %4, %2, !nosanitize !0
+ %7 = xor i64 %6, %5, !nosanitize !0
+ %8 = mul i64 %7, -7070675565921424023, !nosanitize !0
+ %9 = lshr i64 %8, 47, !nosanitize !0
+ %10 = xor i64 %9, %8, !nosanitize !0
+ %11 = mul i64 %10, -7070675565921424023, !nosanitize !0
+ %12 = and i64 %11, 127, !nosanitize !0
+ %13 = getelementptr inbounds [128 x i64]* @__ubsan_vptr_type_cache, i64 0, i64 %12, !nosanitize !0
+; CHECK-NOT: __asan_report_load8
+ %14 = load i64* %13, align 8, !nosanitize !0
+ %15 = icmp eq i64 %14, %11, !nosanitize !0
+ br i1 %15, label %cont, label %handler.dynamic_type_cache_miss, !nosanitize !0
+
+handler.dynamic_type_cache_miss: ; preds = %entry
+ %16 = ptrtoint %struct.A* %a to i64, !nosanitize !0
+ tail call void @__ubsan_handle_dynamic_type_cache_miss(i8* bitcast ({ { [19 x i8]*, i32, i32 }, { i16, i16, [4 x i8] }*, i8*, i8 }* @1 to i8*), i64 %16, i64 %11) #2, !nosanitize !0
+ br label %cont, !nosanitize !0
+
+cont: ; preds = %handler.dynamic_type_cache_miss, %entry
+ tail call void %1(%struct.A* %a)
+; CHECK: ret void
+ ret void
+}
+
+!0 = metadata !{}
diff --git a/test/Instrumentation/BoundsChecking/phi.ll b/test/Instrumentation/BoundsChecking/phi.ll
index 86b59222707b..25a5ed12c62d 100644
--- a/test/Instrumentation/BoundsChecking/phi.ll
+++ b/test/Instrumentation/BoundsChecking/phi.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -bounds-checking -S | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@global = private unnamed_addr constant [10 x i8] c"ola\00mundo\00", align 1
@@ -50,3 +50,56 @@ while.body.i:
fn.exit:
ret void
}
+
+
+@global_as1 = private unnamed_addr addrspace(1) constant [10 x i8] c"ola\00mundo\00", align 1
+
+define void @f1_as1(i8 addrspace(1)* nocapture %c) {
+; CHECK: @f1_as1
+; no checks are possible here
+; CHECK-NOT: trap
+; CHECK: add i16 undef, -1
+; CHECK-NOT: trap
+entry:
+ %0 = load i8 addrspace(1)* %c, align 1
+ %tobool1 = icmp eq i8 %0, 0
+ br i1 %tobool1, label %while.end, label %while.body
+
+while.body:
+ %c.addr.02 = phi i8 addrspace(1)* [ %incdec.ptr, %while.body ], [ %c, %entry ]
+ %incdec.ptr = getelementptr inbounds i8 addrspace(1)* %c.addr.02, i64 -1
+ store i8 100, i8 addrspace(1)* %c.addr.02, align 1
+ %1 = load i8 addrspace(1)* %incdec.ptr, align 1
+ %tobool = icmp eq i8 %1, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+
+define void @f2_as1() {
+; CHECK: @f2_as1
+while.body.i.preheader:
+ %addr = getelementptr inbounds [10 x i8] addrspace(1)* @global_as1, i16 0, i16 9
+ br label %while.body.i
+
+while.body.i:
+; CHECK: phi
+; CHECK-NEXT: phi
+; CHECK-NOT: phi
+ %c.addr.02.i = phi i8 addrspace(1)* [ %incdec.ptr.i, %while.body.i ], [ %addr, %while.body.i.preheader ]
+ %incdec.ptr.i = getelementptr inbounds i8 addrspace(1)* %c.addr.02.i, i16 -1
+; CHECK: sub i16 10, %0
+; CHECK-NEXT: icmp ult i16 10, %0
+; CHECK-NEXT: icmp ult i16 {{.*}}, 1
+; CHECK-NEXT: or i1
+; CHECK-NEXT: br {{.*}}, label %trap
+ store i8 100, i8 addrspace(1)* %c.addr.02.i, align 1
+ %0 = load i8 addrspace(1)* %incdec.ptr.i, align 1
+ %tobool.i = icmp eq i8 %0, 0
+ br i1 %tobool.i, label %fn.exit, label %while.body.i
+
+fn.exit:
+ ret void
+}
diff --git a/test/Instrumentation/BoundsChecking/simple.ll b/test/Instrumentation/BoundsChecking/simple.ll
index 72b58f4b0a32..ddacf6d412bd 100644
--- a/test/Instrumentation/BoundsChecking/simple.ll
+++ b/test/Instrumentation/BoundsChecking/simple.ll
@@ -1,8 +1,11 @@
; RUN: opt < %s -bounds-checking -S | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@.str = private constant [8 x i8] c"abcdefg\00" ; <[8 x i8]*>
+@.str_as1 = private addrspace(1) constant [8 x i8] c"abcdefg\00" ; <[8 x i8] addrspace(1)*>
+
+
declare noalias i8* @malloc(i64) nounwind
declare noalias i8* @calloc(i64, i64) nounwind
declare noalias i8* @realloc(i8* nocapture, i64) nounwind
@@ -60,6 +63,16 @@ define void @f5(i64 %x) nounwind {
ret void
}
+define void @f5_as1(i64 %x) nounwind {
+; CHECK: @f5_as1
+ %idx = getelementptr inbounds [8 x i8] addrspace(1)* @.str_as1, i64 0, i64 %x
+ ; CHECK: sub i16
+ ; CHECK icmp ult i16
+; CHECK: trap
+ %1 = load i8 addrspace(1)* %idx, align 4
+ ret void
+}
+
; CHECK: @f6
define void @f6(i64 %x) nounwind {
%1 = alloca i128
@@ -117,6 +130,15 @@ define void @f11(i128* byval %x) nounwind {
ret void
}
+; CHECK: @f11_as1
+define void @f11_as1(i128 addrspace(1)* byval %x) nounwind {
+ %1 = bitcast i128 addrspace(1)* %x to i8 addrspace(1)*
+ %2 = getelementptr inbounds i8 addrspace(1)* %1, i16 16
+; CHECK: br label
+ %3 = load i8 addrspace(1)* %2, align 4
+ ret void
+}
+
; CHECK: @f12
define i64 @f12(i64 %x, i64 %y) nounwind {
%1 = tail call i8* @calloc(i64 1, i64 %x)
diff --git a/test/Instrumentation/DataFlowSanitizer/load.ll b/test/Instrumentation/DataFlowSanitizer/load.ll
index 6431213f8be5..6cd5151b1260 100644
--- a/test/Instrumentation/DataFlowSanitizer/load.ll
+++ b/test/Instrumentation/DataFlowSanitizer/load.ll
@@ -1,81 +1,155 @@
-; RUN: opt < %s -dfsan -S | FileCheck %s
+; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-load=1 -S | FileCheck %s --check-prefix=COMBINE_PTR_LABEL
+; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-load=0 -S | FileCheck %s --check-prefix=NO_COMBINE_PTR_LABEL
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
define i8 @load8(i8* %p) {
- ; CHECK: @"dfs$load8"
- ; CHECK: ptrtoint
- ; CHECK: and
- ; CHECK: mul
- ; CHECK: inttoptr
- ; CHECK: load
- ; CHECK: store{{.*}}__dfsan_retval_tls
- ; CHECK: ret i8
+ ; COMBINE_PTR_LABEL: @"dfs$load8"
+ ; COMBINE_PTR_LABEL: load i16*
+ ; COMBINE_PTR_LABEL: ptrtoint i8* {{.*}} to i64
+ ; COMBINE_PTR_LABEL: and i64
+ ; COMBINE_PTR_LABEL: mul i64
+ ; COMBINE_PTR_LABEL: inttoptr i64
+ ; COMBINE_PTR_LABEL: load i16*
+ ; COMBINE_PTR_LABEL: icmp ne i16
+ ; COMBINE_PTR_LABEL: call zeroext i16 @__dfsan_union
+ ; COMBINE_PTR_LABEL: load i8*
+ ; COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
+ ; COMBINE_PTR_LABEL: ret i8
+
+ ; NO_COMBINE_PTR_LABEL: @"dfs$load8"
+ ; NO_COMBINE_PTR_LABEL: ptrtoint i8*
+ ; NO_COMBINE_PTR_LABEL: and i64
+ ; NO_COMBINE_PTR_LABEL: mul i64
+ ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} to i16*
+ ; NO_COMBINE_PTR_LABEL: load i16*
+ ; NO_COMBINE_PTR_LABEL: load i8*
+ ; NO_COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
+ ; NO_COMBINE_PTR_LABEL: ret i8
+
%a = load i8* %p
ret i8 %a
}
define i16 @load16(i16* %p) {
- ; CHECK: @"dfs$load16"
- ; CHECK: ptrtoint
- ; CHECK: and
- ; CHECK: mul
- ; CHECK: inttoptr
- ; CHECK: load
- ; CHECK: load
- ; CHECK: icmp ne
- ; CHECK: call{{.*}}__dfsan_union
- ; CHECK: store{{.*}}__dfsan_retval_tls
- ; CHECK: ret i16
+ ; COMBINE_PTR_LABEL: @"dfs$load16"
+ ; COMBINE_PTR_LABEL: ptrtoint i16*
+ ; COMBINE_PTR_LABEL: and i64
+ ; COMBINE_PTR_LABEL: mul i64
+ ; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16
+ ; COMBINE_PTR_LABEL: load i16*
+ ; COMBINE_PTR_LABEL: load i16*
+ ; COMBINE_PTR_LABEL: icmp ne
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; COMBINE_PTR_LABEL: icmp ne i16
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; COMBINE_PTR_LABEL: load i16*
+ ; COMBINE_PTR_LABEL: store {{.*}} @__dfsan_retval_tls
+ ; COMBINE_PTR_LABEL: ret i16
+
+ ; NO_COMBINE_PTR_LABEL: @"dfs$load16"
+ ; NO_COMBINE_PTR_LABEL: ptrtoint i16*
+ ; NO_COMBINE_PTR_LABEL: and i64
+ ; NO_COMBINE_PTR_LABEL: mul i64
+ ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: load i16*
+ ; NO_COMBINE_PTR_LABEL: load i16*
+ ; NO_COMBINE_PTR_LABEL: icmp ne i16
+ ; NO_COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; NO_COMBINE_PTR_LABEL: load i16*
+ ; NO_COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
+ ; NO_COMBINE_PTR_LABEL: ret i16
+
%a = load i16* %p
ret i16 %a
}
define i32 @load32(i32* %p) {
- ; CHECK: @"dfs$load32"
- ; CHECK: ptrtoint
- ; CHECK: and
- ; CHECK: mul
- ; CHECK: inttoptr
- ; CHECK: bitcast
- ; CHECK: load
- ; CHECK: trunc
- ; CHECK: shl
- ; CHECK: lshr
- ; CHECK: or
- ; CHECK: icmp eq
+ ; COMBINE_PTR_LABEL: @"dfs$load32"
+ ; COMBINE_PTR_LABEL: ptrtoint i32*
+ ; COMBINE_PTR_LABEL: and i64
+ ; COMBINE_PTR_LABEL: mul i64
+ ; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; COMBINE_PTR_LABEL: bitcast i16* {{.*}} i64*
+ ; COMBINE_PTR_LABEL: load i64*
+ ; COMBINE_PTR_LABEL: trunc i64 {{.*}} i16
+ ; COMBINE_PTR_LABEL: shl i64
+ ; COMBINE_PTR_LABEL: lshr i64
+ ; COMBINE_PTR_LABEL: or i64
+ ; COMBINE_PTR_LABEL: icmp eq i64
+ ; COMBINE_PTR_LABEL: icmp ne i16
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; COMBINE_PTR_LABEL: load i32*
+ ; COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
+ ; COMBINE_PTR_LABEL: ret i32
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union_load
- ; CHECK: store{{.*}}__dfsan_retval_tls
- ; CHECK: ret i32
-
- ; CHECK: call{{.*}}__dfsan_union_load
+ ; NO_COMBINE_PTR_LABEL: @"dfs$load32"
+ ; NO_COMBINE_PTR_LABEL: ptrtoint i32*
+ ; NO_COMBINE_PTR_LABEL: and i64
+ ; NO_COMBINE_PTR_LABEL: mul i64
+ ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; NO_COMBINE_PTR_LABEL: bitcast i16* {{.*}} i64*
+ ; NO_COMBINE_PTR_LABEL: load i64*
+ ; NO_COMBINE_PTR_LABEL: trunc i64 {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: shl i64
+ ; NO_COMBINE_PTR_LABEL: lshr i64
+ ; NO_COMBINE_PTR_LABEL: or i64
+ ; NO_COMBINE_PTR_LABEL: icmp eq i64
+ ; NO_COMBINE_PTR_LABEL: load i32*
+ ; NO_COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
+ ; NO_COMBINE_PTR_LABEL: ret i32
+ ; NO_COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union_load
+
%a = load i32* %p
ret i32 %a
}
define i64 @load64(i64* %p) {
- ; CHECK: @"dfs$load64"
- ; CHECK: ptrtoint
- ; CHECK: and
- ; CHECK: mul
- ; CHECK: inttoptr
- ; CHECK: bitcast
- ; CHECK: load
- ; CHECK: trunc
- ; CHECK: shl
- ; CHECK: lshr
- ; CHECK: or
- ; CHECK: icmp eq
-
- ; CHECK: store{{.*}}__dfsan_retval_tls
- ; CHECK: ret i64
+ ; COMBINE_PTR_LABEL: @"dfs$load64"
+ ; COMBINE_PTR_LABEL: ptrtoint i64*
+ ; COMBINE_PTR_LABEL: and i64
+ ; COMBINE_PTR_LABEL: mul i64
+ ; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; COMBINE_PTR_LABEL: bitcast i16* {{.*}} i64*
+ ; COMBINE_PTR_LABEL: load i64*
+ ; COMBINE_PTR_LABEL: trunc i64 {{.*}} i16
+ ; COMBINE_PTR_LABEL: shl i64
+ ; COMBINE_PTR_LABEL: lshr i64
+ ; COMBINE_PTR_LABEL: or i64
+ ; COMBINE_PTR_LABEL: icmp eq i64
+ ; COMBINE_PTR_LABEL: icmp ne i16
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; COMBINE_PTR_LABEL: load i64*
+ ; COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
+ ; COMBINE_PTR_LABEL: ret i64
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union_load
+ ; COMBINE_PTR_LABEL: getelementptr i64* {{.*}} i64
+ ; COMBINE_PTR_LABEL: load i64*
+ ; COMBINE_PTR_LABEL: icmp eq i64
- ; CHECK: call{{.*}}__dfsan_union_load
-
- ; CHECK: getelementptr
- ; CHECK: load
- ; CHECK: icmp eq
+ ; NO_COMBINE_PTR_LABEL: @"dfs$load64"
+ ; NO_COMBINE_PTR_LABEL: ptrtoint i64*
+ ; NO_COMBINE_PTR_LABEL: and i64
+ ; NO_COMBINE_PTR_LABEL: mul i64
+ ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; NO_COMBINE_PTR_LABEL: bitcast i16* {{.*}} i64*
+ ; NO_COMBINE_PTR_LABEL: load i64*
+ ; NO_COMBINE_PTR_LABEL: trunc i64 {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: shl i64
+ ; NO_COMBINE_PTR_LABEL: lshr i64
+ ; NO_COMBINE_PTR_LABEL: or i64
+ ; NO_COMBINE_PTR_LABEL: icmp eq i64
+ ; NO_COMBINE_PTR_LABEL: load i64*
+ ; NO_COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
+ ; NO_COMBINE_PTR_LABEL: ret i64
+ ; NO_COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union_load
+ ; NO_COMBINE_PTR_LABEL: getelementptr i64* {{.*}} i64
+ ; NO_COMBINE_PTR_LABEL: load i64*
+ ; NO_COMBINE_PTR_LABEL: icmp eq i64
%a = load i64* %p
ret i64 %a
-}
+} \ No newline at end of file
diff --git a/test/Instrumentation/DataFlowSanitizer/prefix-rename.ll b/test/Instrumentation/DataFlowSanitizer/prefix-rename.ll
index 1a5646074d21..f3c36b17b388 100644
--- a/test/Instrumentation/DataFlowSanitizer/prefix-rename.ll
+++ b/test/Instrumentation/DataFlowSanitizer/prefix-rename.ll
@@ -8,7 +8,15 @@ module asm ".symver f1,f@@version1"
; CHECK: @"dfs$f2" = alias {{.*}} @"dfs$f1"
@f2 = alias void ()* @f1
+; CHECK: @"dfs$g2" = alias {{.*}} @"dfs$g1"
+@g2 = alias bitcast (void (i8*)* @g1 to void (i16*)*)
+
; CHECK: define void @"dfs$f1"
define void @f1() {
ret void
}
+
+; CHECK: define void @"dfs$g1"
+define void @g1(i8*) {
+ ret void
+}
diff --git a/test/Instrumentation/DataFlowSanitizer/store.ll b/test/Instrumentation/DataFlowSanitizer/store.ll
index 95091777a326..8060537f3152 100644
--- a/test/Instrumentation/DataFlowSanitizer/store.ll
+++ b/test/Instrumentation/DataFlowSanitizer/store.ll
@@ -1,75 +1,146 @@
-; RUN: opt < %s -dfsan -S | FileCheck %s
+; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-store=1 -S | FileCheck %s --check-prefix=COMBINE_PTR_LABEL
+; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-store=0 -S | FileCheck %s --check-prefix=NO_COMBINE_PTR_LABEL
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
define void @store8(i8 %v, i8* %p) {
- ; CHECK: @"dfs$store8"
- ; CHECK: load{{.*}}__dfsan_arg_tls
- ; CHECK: ptrtoint
- ; CHECK: and
- ; CHECK: mul
- ; CHECK: inttoptr
- ; CHECK: getelementptr
- ; CHECK: store
- ; CHECK: store
+ ; NO_COMBINE_PTR_LABEL: @"dfs$store8"
+ ; NO_COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; NO_COMBINE_PTR_LABEL: ptrtoint i8* {{.*}} i64
+ ; NO_COMBINE_PTR_LABEL: and i64
+ ; NO_COMBINE_PTR_LABEL: mul i64
+ ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: store i16
+ ; NO_COMBINE_PTR_LABEL: store i8
+
+ ; COMBINE_PTR_LABEL: @"dfs$store8"
+ ; COMBINE_PTR_LABEL: load i16*
+ ; COMBINE_PTR_LABEL: load i16*
+ ; COMBINE_PTR_LABEL: icmp ne i16
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; COMBINE_PTR_LABEL: ptrtoint i8* {{.*}} i64
+ ; COMBINE_PTR_LABEL: and i64
+ ; COMBINE_PTR_LABEL: mul i64
+ ; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: store i16
+ ; COMBINE_PTR_LABEL: store i8
+
store i8 %v, i8* %p
ret void
}
define void @store16(i16 %v, i16* %p) {
- ; CHECK: @"dfs$store16"
- ; CHECK: load{{.*}}__dfsan_arg_tls
- ; CHECK: ptrtoint
- ; CHECK: and
- ; CHECK: mul
- ; CHECK: inttoptr
- ; CHECK: getelementptr
- ; CHECK: store
- ; CHECK: getelementptr
- ; CHECK: store
- ; CHECK: store
+ ; NO_COMBINE_PTR_LABEL: @"dfs$store16"
+ ; NO_COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; NO_COMBINE_PTR_LABEL: ptrtoint i16* {{.*}} i64
+ ; NO_COMBINE_PTR_LABEL: and i64
+ ; NO_COMBINE_PTR_LABEL: mul i64
+ ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: store i16
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: store i16
+ ; NO_COMBINE_PTR_LABEL: store i16
+
+ ; COMBINE_PTR_LABEL: @"dfs$store16"
+ ; COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; COMBINE_PTR_LABEL: icmp ne i16
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; COMBINE_PTR_LABEL: ptrtoint i16* {{.*}} i64
+ ; COMBINE_PTR_LABEL: and i64
+ ; COMBINE_PTR_LABEL: mul i64
+ ; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: store i16
+ ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: store i16
+ ; COMBINE_PTR_LABEL: store i16
+
store i16 %v, i16* %p
ret void
}
define void @store32(i32 %v, i32* %p) {
- ; CHECK: @"dfs$store32"
- ; CHECK: load{{.*}}__dfsan_arg_tls
- ; CHECK: ptrtoint
- ; CHECK: and
- ; CHECK: mul
- ; CHECK: inttoptr
- ; CHECK: getelementptr
- ; CHECK: store
- ; CHECK: getelementptr
- ; CHECK: store
- ; CHECK: getelementptr
- ; CHECK: store
- ; CHECK: getelementptr
- ; CHECK: store
- ; CHECK: store
+ ; NO_COMBINE_PTR_LABEL: @"dfs$store32"
+ ; NO_COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; NO_COMBINE_PTR_LABEL: ptrtoint i32* {{.*}} i64
+ ; NO_COMBINE_PTR_LABEL: and i64
+ ; NO_COMBINE_PTR_LABEL: mul i64
+ ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: store i16
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: store i16
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: store i16
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: store i16
+ ; NO_COMBINE_PTR_LABEL: store i32
+
+ ; COMBINE_PTR_LABEL: @"dfs$store32"
+ ; COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; COMBINE_PTR_LABEL: icmp ne i16
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; COMBINE_PTR_LABEL: ptrtoint i32* {{.*}} i64
+ ; COMBINE_PTR_LABEL: and i64
+ ; COMBINE_PTR_LABEL: mul i64
+ ; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: store i16
+ ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: store i16
+ ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: store i16
+ ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: store i16
+ ; COMBINE_PTR_LABEL: store i32
+
store i32 %v, i32* %p
ret void
}
define void @store64(i64 %v, i64* %p) {
- ; CHECK: @"dfs$store64"
- ; CHECK: load{{.*}}__dfsan_arg_tls
- ; CHECK: ptrtoint
- ; CHECK: and
- ; CHECK: mul
- ; CHECK: inttoptr
- ; CHECK: insertelement
- ; CHECK: insertelement
- ; CHECK: insertelement
- ; CHECK: insertelement
- ; CHECK: insertelement
- ; CHECK: insertelement
- ; CHECK: insertelement
- ; CHECK: insertelement
- ; CHECK: bitcast
- ; CHECK: getelementptr
- ; CHECK: store
- ; CHECK: store
+ ; NO_COMBINE_PTR_LABEL: @"dfs$store64"
+ ; NO_COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; NO_COMBINE_PTR_LABEL: ptrtoint i64* {{.*}} i64
+ ; NO_COMBINE_PTR_LABEL: and i64
+ ; NO_COMBINE_PTR_LABEL: mul i64
+ ; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; NO_COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; NO_COMBINE_PTR_LABEL: bitcast i16* {{.*}} <8 x i16>*
+ ; NO_COMBINE_PTR_LABEL: store i64
+
+ ; COMBINE_PTR_LABEL: @"dfs$store64"
+ ; COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; COMBINE_PTR_LABEL: load i16* {{.*}} @__dfsan_arg_tls
+ ; COMBINE_PTR_LABEL: icmp ne i16
+ ; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union
+ ; COMBINE_PTR_LABEL: ptrtoint i64* {{.*}} i64
+ ; COMBINE_PTR_LABEL: and i64
+ ; COMBINE_PTR_LABEL: mul i64
+ ; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
+ ; COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; COMBINE_PTR_LABEL: insertelement {{.*}} i16
+ ; COMBINE_PTR_LABEL: bitcast i16* {{.*}} <8 x i16>*
+ ; COMBINE_PTR_LABEL: store <8 x i16>
+ ; COMBINE_PTR_LABEL: store i64
+
store i64 %v, i64* %p
ret void
}
diff --git a/test/Instrumentation/DataFlowSanitizer/union.ll b/test/Instrumentation/DataFlowSanitizer/union.ll
new file mode 100644
index 000000000000..2b31081776b7
--- /dev/null
+++ b/test/Instrumentation/DataFlowSanitizer/union.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -dfsan -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+@a = common global i32 0
+@b = common global i32 0
+
+; Check that we reuse unions where possible.
+
+; CHECK-LABEL: @"dfs$f"
+define void @f(i32 %x, i32 %y) {
+ ; CHECK: call{{.*}}__dfsan_union
+ %xay = add i32 %x, %y
+ store i32 %xay, i32* @a
+ ; CHECK-NOT: call{{.*}}__dfsan_union
+ %xmy = mul i32 %x, %y
+ store i32 %xmy, i32* @b
+ ret void
+}
+
+; In this case, we compute the unions on both sides because neither block
+; dominates the other.
+
+; CHECK-LABEL: @"dfs$g"
+define void @g(i1 %p, i32 %x, i32 %y) {
+ br i1 %p, label %l1, label %l2
+
+l1:
+ ; CHECK: call{{.*}}__dfsan_union
+ %xay = add i32 %x, %y
+ store i32 %xay, i32* @a
+ br label %l3
+
+l2:
+ ; CHECK: call{{.*}}__dfsan_union
+ %xmy = mul i32 %x, %y
+ store i32 %xmy, i32* @b
+ br label %l3
+
+l3:
+ ret void
+}
+
+; In this case, we know that the label for %xayax subsumes the label for %xay.
+
+; CHECK-LABEL: @"dfs$h"
+define i32 @h(i32 %x, i32 %y) {
+ ; CHECK: call{{.*}}__dfsan_union
+ %xay = add i32 %x, %y
+ ; CHECK-NOT: call{{.*}}__dfsan_union
+ %xayax = add i32 %xay, %x
+ ret i32 %xayax
+}
diff --git a/test/Instrumentation/MemorySanitizer/atomics.ll b/test/Instrumentation/MemorySanitizer/atomics.ll
index ff0245262cb3..c8f3b88815bb 100644
--- a/test/Instrumentation/MemorySanitizer/atomics.ll
+++ b/test/Instrumentation/MemorySanitizer/atomics.ll
@@ -37,34 +37,36 @@ entry:
define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
entry:
- %0 = cmpxchg i32* %p, i32 %a, i32 %b seq_cst
+ %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
+ %0 = extractvalue { i32, i1 } %pair, 0
ret i32 %0
}
; CHECK: @Cmpxchg
-; CHECK: store i32 0,
+; CHECK: store { i32, i1 } zeroinitializer,
; CHECK: icmp
; CHECK: br
; CHECK: @__msan_warning
-; CHECK: cmpxchg {{.*}} seq_cst
+; CHECK: cmpxchg {{.*}} seq_cst seq_cst
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
-; relaxed cmpxchg: bump up to "release"
+; relaxed cmpxchg: bump up to "release monotonic"
define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
entry:
- %0 = cmpxchg i32* %p, i32 %a, i32 %b monotonic
+ %pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
+ %0 = extractvalue { i32, i1 } %pair, 0
ret i32 %0
}
; CHECK: @CmpxchgMonotonic
-; CHECK: store i32 0,
+; CHECK: store { i32, i1 } zeroinitializer,
; CHECK: icmp
; CHECK: br
; CHECK: @__msan_warning
-; CHECK: cmpxchg {{.*}} release
+; CHECK: cmpxchg {{.*}} release monotonic
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
diff --git a/test/Instrumentation/MemorySanitizer/check_access_address.ll b/test/Instrumentation/MemorySanitizer/check_access_address.ll
new file mode 100644
index 000000000000..566022600ea4
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/check_access_address.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+
+; Test byval argument shadow alignment
+
+define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval %p) sanitize_memory {
+entry:
+ %x = load <2 x i64>* %p
+ ret <2 x i64> %x
+}
+
+; CHECK: @ByValArgumentShadowLargeAlignment
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 16, i32 8, i1 false)
+; CHECK: ret <2 x i64>
+
+
+define i16 @ByValArgumentShadowSmallAlignment(i16* byval %p) sanitize_memory {
+entry:
+ %x = load i16* %p
+ ret i16 %x
+}
+
+; CHECK: @ByValArgumentShadowSmallAlignment
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 2, i32 2, i1 false)
+; CHECK: ret i16
diff --git a/test/Instrumentation/MemorySanitizer/do-not-emit-module-limits.ll b/test/Instrumentation/MemorySanitizer/do-not-emit-module-limits.ll
new file mode 100644
index 000000000000..7d0a62a256c4
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/do-not-emit-module-limits.ll
@@ -0,0 +1,21 @@
+; Test that MSan does not emit undefined symbol __executable_start when it is
+; not needed (i.e. without -msan-wrap-indirect-calls).
+
+; RUN: opt < %s -msan -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind uwtable
+define void @_Z1fv() #0 {
+entry:
+ ret void
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 (208165)"}
+
+; CHECK-NOT: __executable_start
diff --git a/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll b/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
new file mode 100644
index 000000000000..beb3c5fad735
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
@@ -0,0 +1,53 @@
+; Test -msan-instrumentation-with-call-threshold
+; Test that in with-calls mode there are no calls to __msan_chain_origin - they
+; are done from __msan_maybe_store_origin_*.
+
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -S | FileCheck %s
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-instrumentation-with-call-threshold=0 -msan-track-origins=2 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
+entry:
+ %0 = load i32* %a, align 4
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ tail call void (...)* @foo() nounwind
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+declare void @foo(...)
+
+; CHECK-LABEL: @LoadAndCmp
+; CHECK: = load
+; CHECK: = load
+; CHECK: = zext i1 {{.*}} to i8
+; CHECK: call void @__msan_maybe_warning_1(
+; CHECK-NOT: unreachable
+; CHECK: ret void
+
+
+define void @Store(i64* nocapture %p, i64 %x) nounwind uwtable sanitize_memory {
+entry:
+ store i64 %x, i64* %p, align 4
+ ret void
+}
+
+; CHECK-LABEL: @Store
+; CHECK: load {{.*}} @__msan_param_tls
+; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
+; CHECK: store
+; CHECK-ORIGINS-NOT: __msan_chain_origin
+; CHECK-ORIGINS: bitcast i64* {{.*}} to i8*
+; CHECK-ORIGINS-NOT: __msan_chain_origin
+; CHECK-ORIGINS: call void @__msan_maybe_store_origin_8(
+; CHECK-ORIGINS-NOT: __msan_chain_origin
+; CHECK: store i64
+; CHECK: ret void
diff --git a/test/Instrumentation/MemorySanitizer/missing_origin.ll b/test/Instrumentation/MemorySanitizer/missing_origin.ll
new file mode 100644
index 000000000000..673e85369a3a
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/missing_origin.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Test that result origin is directy propagated from the argument,
+; and is not affected by all the literal undef operands.
+; https://code.google.com/p/memory-sanitizer/issues/detail?id=56
+
+define <4 x i32> @Shuffle(<4 x i32> %x) nounwind uwtable sanitize_memory {
+entry:
+ %y = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ ret <4 x i32> %y
+}
+
+; CHECK-LABEL: @Shuffle(
+; CHECK: [[A:%.*]] = load i32* {{.*}}@__msan_param_origin_tls,
+; CHECK: store i32 [[A]], i32* @__msan_retval_origin_tls
+; CHECK: ret <4 x i32>
diff --git a/test/Instrumentation/MemorySanitizer/msan_basic.ll b/test/Instrumentation/MemorySanitizer/msan_basic.ll
index 72a992dd5901..0faf45d70c59 100644
--- a/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -1,6 +1,5 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
-; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK-ORIGINS %s
-; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s -check-prefix=CHECK-AA
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -32,20 +31,16 @@ entry:
; CHECK: @Store
; CHECK: load {{.*}} @__msan_param_tls
+; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
; CHECK: store
-; CHECK: store
-; CHECK: ret void
-; CHECK-ORIGINS: @Store
-; CHECK-ORIGINS: load {{.*}} @__msan_param_tls
-; CHECK-ORIGINS: store
; CHECK-ORIGINS: icmp
; CHECK-ORIGINS: br i1
; CHECK-ORIGINS: <label>
; CHECK-ORIGINS: store
; CHECK-ORIGINS: br label
; CHECK-ORIGINS: <label>
-; CHECK-ORIGINS: store
-; CHECK-ORIGINS: ret void
+; CHECK: store
+; CHECK: ret void
; Check instrumentation of aligned stores
@@ -60,20 +55,16 @@ entry:
; CHECK: @AlignedStore
; CHECK: load {{.*}} @__msan_param_tls
+; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
; CHECK: store {{.*}} align 32
-; CHECK: store {{.*}} align 32
-; CHECK: ret void
-; CHECK-ORIGINS: @AlignedStore
-; CHECK-ORIGINS: load {{.*}} @__msan_param_tls
-; CHECK-ORIGINS: store {{.*}} align 32
; CHECK-ORIGINS: icmp
; CHECK-ORIGINS: br i1
; CHECK-ORIGINS: <label>
; CHECK-ORIGINS: store {{.*}} align 32
; CHECK-ORIGINS: br label
; CHECK-ORIGINS: <label>
-; CHECK-ORIGINS: store {{.*}} align 32
-; CHECK-ORIGINS: ret void
+; CHECK: store {{.*}} align 32
+; CHECK: ret void
; load followed by cmp: check that we load the shadow and call __msan_warning.
@@ -251,18 +242,23 @@ declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
; Check that we propagate shadow for "select"
-define i32 @Select(i32 %a, i32 %b, i32 %c) nounwind uwtable readnone sanitize_memory {
+define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
entry:
- %tobool = icmp ne i32 %c, 0
- %cond = select i1 %tobool, i32 %a, i32 %b
+ %cond = select i1 %c, i32 %a, i32 %b
ret i32 %cond
}
; CHECK: @Select
-; CHECK: select
-; CHECK-NEXT: sext i1 {{.*}} to i32
-; CHECK-NEXT: or i32
-; CHECK-NEXT: select
+; CHECK: select i1
+; CHECK-DAG: or i32
+; CHECK-DAG: xor i32
+; CHECK: or i32
+; CHECK-DAG: select i1
+; CHECK-ORIGINS-DAG: select
+; CHECK-ORIGINS-DAG: select
+; CHECK-DAG: select i1
+; CHECK: store i32{{.*}}@__msan_retval_tls
+; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
; CHECK: ret i32
@@ -278,17 +274,17 @@ entry:
; CHECK: @SelectVector
; CHECK: select <8 x i1>
-; CHECK-NEXT: sext <8 x i1> {{.*}} to <8 x i16>
-; CHECK-NEXT: or <8 x i16>
-; CHECK-NEXT: select <8 x i1>
+; CHECK-DAG: or <8 x i16>
+; CHECK-DAG: xor <8 x i16>
+; CHECK: or <8 x i16>
+; CHECK-DAG: select <8 x i1>
+; CHECK-ORIGINS-DAG: select
+; CHECK-ORIGINS-DAG: select
+; CHECK-DAG: select <8 x i1>
+; CHECK: store <8 x i16>{{.*}}@__msan_retval_tls
+; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
; CHECK: ret <8 x i16>
-; CHECK-ORIGINS: @SelectVector
-; CHECK-ORIGINS: bitcast <8 x i1> {{.*}} to i8
-; CHECK-ORIGINS: icmp ne i8
-; CHECK-ORIGINS: select i1
-; CHECK-ORIGINS: ret <8 x i16>
-
; Check that we propagate origin for "select" with scalar condition and vector
; arguments. Select condition shadow is sign-extended to the vector type and
@@ -302,10 +298,13 @@ entry:
; CHECK: @SelectVector2
; CHECK: select i1
-; CHECK: sext i1 {{.*}} to i128
-; CHECK: bitcast i128 {{.*}} to <8 x i16>
+; CHECK-DAG: or <8 x i16>
+; CHECK-DAG: xor <8 x i16>
; CHECK: or <8 x i16>
-; CHECK: select i1
+; CHECK-DAG: select i1
+; CHECK-ORIGINS-DAG: select i1
+; CHECK-ORIGINS-DAG: select i1
+; CHECK-DAG: select i1
; CHECK: ret <8 x i16>
@@ -318,10 +317,27 @@ entry:
; CHECK: @SelectStruct
; CHECK: select i1 {{.*}}, { i64, i64 }
; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
+; CHECK-ORIGINS: select i1
+; CHECK-ORIGINS: select i1
; CHECK-NEXT: select i1 {{.*}}, { i64, i64 }
; CHECK: ret { i64, i64 }
+define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory {
+entry:
+ %c = select i1 %x, { i64*, double } %a, { i64*, double } %b
+ ret { i64*, double } %c
+}
+
+; CHECK: @SelectStruct2
+; CHECK: select i1 {{.*}}, { i64, i64 }
+; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
+; CHECK-ORIGINS: select i1
+; CHECK-ORIGINS: select i1
+; CHECK-NEXT: select i1 {{.*}}, { i64*, double }
+; CHECK: ret { i64*, double }
+
+
define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
entry:
%0 = inttoptr i64 %x to i8*
@@ -330,9 +346,10 @@ entry:
; CHECK: @IntToPtr
; CHECK: load i64*{{.*}}__msan_param_tls
+; CHECK-ORIGINS-NEXT: load i32*{{.*}}__msan_param_origin_tls
; CHECK-NEXT: inttoptr
; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
-; CHECK: ret i8
+; CHECK: ret i8*
define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
@@ -342,9 +359,11 @@ entry:
}
; CHECK: @IntToPtr_ZExt
+; CHECK: load i16*{{.*}}__msan_param_tls
; CHECK: zext
; CHECK-NEXT: inttoptr
-; CHECK: ret i8
+; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
+; CHECK: ret i8*
; Check that we insert exactly one check on udiv
@@ -474,13 +493,8 @@ define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
; CHECK: @ShadowLoadAlignmentSmall
; CHECK: load volatile i32* {{.*}} align 2
; CHECK: load i32* {{.*}} align 2
-; CHECK: ret i32
-
-; CHECK-ORIGINS: @ShadowLoadAlignmentSmall
-; CHECK-ORIGINS: load volatile i32* {{.*}} align 2
-; CHECK-ORIGINS: load i32* {{.*}} align 2
; CHECK-ORIGINS: load i32* {{.*}} align 4
-; CHECK-ORIGINS: ret i32
+; CHECK: ret i32
; Test vector manipulation instructions.
@@ -567,17 +581,13 @@ declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
; CHECK: @LoadIntrinsic
; CHECK: load <16 x i8>* {{.*}} align 1
+; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32* {{.*}}
; CHECK-NOT: br
; CHECK-NOT: = or
; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
-; CHECK: ret <16 x i8>
-
-; CHECK-ORIGINS: @LoadIntrinsic
-; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32* {{.*}}
-; CHECK-ORIGINS: call <16 x i8> @llvm.x86.sse3.ldu.dq
; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
-; CHECK-ORIGINS: ret <16 x i8>
+; CHECK: ret <16 x i8>
; Simple NoMem intrinsic
@@ -593,21 +603,17 @@ declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
; CHECK: @Paddsw128
; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
-; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
-; CHECK-NEXT: = or <8 x i16>
-; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
-; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
-; CHECK-NEXT: ret <8 x i16>
-
-; CHECK-ORIGINS: @Paddsw128
; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls
+; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls
+; CHECK-NEXT: = or <8 x i16>
; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
-; CHECK-ORIGINS: call <8 x i16> @llvm.x86.sse2.padds.w
+; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
+; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
-; CHECK-ORIGINS: ret <8 x i16>
+; CHECK-NEXT: ret <8 x i16>
; Test handling of vectors of pointers.
@@ -645,7 +651,7 @@ define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
declare void @llvm.va_start(i8*) nounwind
; Function Attrs: nounwind uwtable
-define void @VAStart(i32 %x, ...) {
+define void @VAStart(i32 %x, ...) sanitize_memory {
entry:
%x.addr = alloca i32, align 4
%va = alloca [1 x %struct.__va_list_tag], align 16
@@ -677,7 +683,7 @@ entry:
; CHECK: ret void
-; Test that checks are omitted but shadow propagation is kept if
+; Test that checks are omitted and returned value is always initialized if
; sanitize_memory attribute is missing.
define i32 @NoSanitizeMemory(i32 %x) uwtable {
@@ -697,9 +703,7 @@ declare void @bar()
; CHECK: @NoSanitizeMemory
; CHECK-NOT: @__msan_warning
-; CHECK: load i32* {{.*}} @__msan_param_tls
-; CHECK-NOT: @__msan_warning
-; CHECK: store {{.*}} @__msan_retval_tls
+; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK-NOT: @__msan_warning
; CHECK: ret i32
@@ -739,41 +743,58 @@ declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
; CHECK: ret i32
-; Test argument shadow alignment
+; Test PHINode instrumentation in blacklisted functions
-define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
+define i32 @NoSanitizeMemoryPHI(i32 %x) {
entry:
- ret <2 x i64> %b
+ %tobool = icmp ne i32 %x, 0
+ br i1 %tobool, label %cond.true, label %cond.false
+
+cond.true: ; preds = %entry
+ br label %cond.end
+
+cond.false: ; preds = %entry
+ br label %cond.end
+
+cond.end: ; preds = %cond.false, %cond.true
+ %cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
+ ret i32 %cond
}
-; CHECK: @ArgumentShadowAlignment
-; CHECK: load <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
-; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
-; CHECK: ret <2 x i64>
+; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
+; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls
+; CHECK: ret i32 [[A]]
-; Test byval argument shadow alignment
+; Test that there are no __msan_param_origin_tls stores when
+; argument shadow is a compile-time zero constant (which is always the case
+; in functions missing sanitize_memory attribute).
-define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval %p) sanitize_memory {
+define i32 @NoSanitizeMemoryParamTLS(i32* nocapture readonly %x) {
entry:
- %x = load <2 x i64>* %p
- ret <2 x i64> %x
+ %0 = load i32* %x, align 4
+ %call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
+ ret i32 %call
}
-; CHECK-AA: @ByValArgumentShadowLargeAlignment
-; CHECK-AA: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 16, i32 8, i1 false)
-; CHECK-AA: ret <2 x i64>
+declare i32 @NoSanitizeMemoryParamTLSHelper(i32 %x)
+
+; CHECK-LABEL: define i32 @NoSanitizeMemoryParamTLS(
+; CHECK-NOT: __msan_param_origin_tls
+; CHECK: ret i32
-define i16 @ByValArgumentShadowSmallAlignment(i16* byval %p) sanitize_memory {
+; Test argument shadow alignment
+
+define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
entry:
- %x = load i16* %p
- ret i16 %x
+ ret <2 x i64> %b
}
-; CHECK-AA: @ByValArgumentShadowSmallAlignment
-; CHECK-AA: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 2, i32 2, i1 false)
-; CHECK-AA: ret i16
+; CHECK: @ArgumentShadowAlignment
+; CHECK: load <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
+; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
+; CHECK: ret <2 x i64>
; Test origin propagation for insertvalue
@@ -801,3 +822,59 @@ entry:
; Second element app value
; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
; CHECK-ORIGINS: ret { i64, i32 }
+
+
+; Test shadow propagation for aggregates passed through ellipsis.
+
+%struct.StructByVal = type { i32, i32, i32, i32 }
+
+declare void @VAArgStructFn(i32 %guard, ...)
+
+define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory {
+entry:
+ %agg.tmp2 = alloca %struct.StructByVal, align 8
+ %0 = bitcast %struct.StructByVal* %s to i8*
+ %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
+ %agg.tmp.sroa.0.0.copyload = load i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
+ %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal* %s, i64 0, i32 2
+ %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
+ %agg.tmp.sroa.2.0.copyload = load i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
+ %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %0, i64 16, i32 4, i1 false)
+ call void (i32, ...)* @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
+ ret void
+}
+
+; "undef" and the first 2 structs go to general purpose registers;
+; the third struct goes to the overflow area byval
+
+; CHECK: @VAArgStruct
+; undef
+; CHECK: store i32 -1, i32* {{.*}}@__msan_va_arg_tls {{.*}}, align 8
+; first struct through general purpose registers
+; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
+; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
+; second struct through general purpose registers
+; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
+; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
+; third struct through the overflow area byval
+; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64
+; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 176
+; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
+; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls
+; CHECK: call void (i32, ...)* @VAArgStructFn
+; CHECK: ret void
+
+declare i32 @InnerTailCall(i32 %a)
+
+define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory {
+ %b = tail call i32 @InnerTailCall(i32 %a)
+ ret void
+}
+
+; We used to strip off the 'tail' modifier, but now that we unpoison return slot
+; shadow before the call, we don't need to anymore.
+
+; CHECK-LABEL: define void @MismatchedReturnTypeTailCall
+; CHECK: tail call i32 @InnerTailCall
+; CHECK: ret void
diff --git a/test/Instrumentation/MemorySanitizer/mul_by_constant.ll b/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
new file mode 100644
index 000000000000..e068f69ae4ba
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
@@ -0,0 +1,94 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Check instrumentation mul when one of the operands is a constant.
+
+define i64 @MulConst(i64 %x) sanitize_memory {
+entry:
+ %y = mul i64 %x, 42949672960000
+ ret i64 %y
+}
+
+; 42949672960000 = 2**32 * 10000
+; 36 trailing zero bits
+; 68719476736 = 2**36
+
+; CHECK-LABEL: @MulConst(
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
+; CHECK: [[B:%.*]] = mul i64 [[A]], 68719476736
+; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+
+
+define i64 @MulZero(i64 %x) sanitize_memory {
+entry:
+ %y = mul i64 %x, 0
+ ret i64 %y
+}
+
+; CHECK-LABEL: @MulZero(
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
+; CHECK: [[B:%.*]] = mul i64 [[A]], 0{{$}}
+; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+
+
+define i64 @MulNeg(i64 %x) sanitize_memory {
+entry:
+ %y = mul i64 %x, -16
+ ret i64 %y
+}
+
+; CHECK-LABEL: @MulNeg(
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
+; CHECK: [[B:%.*]] = mul i64 [[A]], 16
+; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+
+
+define i64 @MulNeg2(i64 %x) sanitize_memory {
+entry:
+ %y = mul i64 %x, -48
+ ret i64 %y
+}
+
+; CHECK-LABEL: @MulNeg2(
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
+; CHECK: [[B:%.*]] = mul i64 [[A]], 16
+; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+
+
+define i64 @MulOdd(i64 %x) sanitize_memory {
+entry:
+ %y = mul i64 %x, 12345
+ ret i64 %y
+}
+
+; CHECK-LABEL: @MulOdd(
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
+; CHECK: [[B:%.*]] = mul i64 [[A]], 1
+; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+
+
+define i64 @MulLarge(i64 %x) sanitize_memory {
+entry:
+ %y = mul i64 %x, -9223372036854775808
+ ret i64 %y
+}
+
+; -9223372036854775808 = 0x7000000000000000
+
+; CHECK-LABEL: @MulLarge(
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
+; CHECK: [[B:%.*]] = mul i64 [[A]], -9223372036854775808
+; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+
+define <4 x i32> @MulVectorConst(<4 x i32> %x) sanitize_memory {
+entry:
+ %y = mul <4 x i32> %x, <i32 3072, i32 0, i32 -16, i32 -48>
+ ret <4 x i32> %y
+}
+
+; CHECK-LABEL: @MulVectorConst(
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
+; CHECK: [[B:%.*]] = mul <4 x i32> [[A]], <i32 1024, i32 0, i32 16, i32 16>
+; CHECK: store <4 x i32> [[B]], <4 x i32>* {{.*}} @__msan_retval_tls
diff --git a/test/Instrumentation/MemorySanitizer/store-origin.ll b/test/Instrumentation/MemorySanitizer/store-origin.ll
new file mode 100644
index 000000000000..0bd977700e83
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/store-origin.ll
@@ -0,0 +1,73 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS1 %s
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS2 %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+
+; Check origin instrumentation of stores.
+; Check that debug info for origin propagation code is set correctly.
+
+; Function Attrs: nounwind
+define void @Store(i32* nocapture %p, i32 %x) #0 {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i32* %p}, i64 0, metadata !11), !dbg !16
+ tail call void @llvm.dbg.value(metadata !{i32 %x}, i64 0, metadata !12), !dbg !16
+ store i32 %x, i32* %p, align 4, !dbg !17, !tbaa !18
+ ret void, !dbg !22
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { nounwind sanitize_memory "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!13, !14}
+!llvm.ident = !{!15}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 (204220)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/build0/../2.cc] [DW_LANG_C99]
+!1 = metadata !{metadata !"../2.cc", metadata !"/tmp/build0"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"Store", metadata !"Store", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32*, i32)* @Store, null, null, metadata !10, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [Store]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/build0/../2.cc]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8, metadata !9}
+!8 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from int]
+!9 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!10 = metadata !{metadata !11, metadata !12}
+!11 = metadata !{i32 786689, metadata !4, metadata !"p", metadata !5, i32 16777217, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [p] [line 1]
+!12 = metadata !{i32 786689, metadata !4, metadata !"x", metadata !5, i32 33554433, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [x] [line 1]
+!13 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!14 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!15 = metadata !{metadata !"clang version 3.5.0 (204220)"}
+!16 = metadata !{i32 1, i32 0, metadata !4, null}
+!17 = metadata !{i32 2, i32 0, metadata !4, null}
+!18 = metadata !{metadata !19, metadata !19, i64 0}
+!19 = metadata !{metadata !"int", metadata !20, i64 0}
+!20 = metadata !{metadata !"omnipotent char", metadata !21, i64 0}
+!21 = metadata !{metadata !"Simple C/C++ TBAA"}
+!22 = metadata !{i32 3, i32 0, metadata !4, null}
+
+
+; CHECK: @Store
+; CHECK: load {{.*}} @__msan_param_tls
+; CHECK: [[ORIGIN:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK: store {{.*}}!dbg ![[DBG:[01-9]+]]
+; CHECK: icmp
+; CHECK: br i1
+; CHECK: <label>
+
+; Origin tracking level 1: simply store the origin value
+; CHECK-ORIGINS1: store i32 {{.*}}[[ORIGIN]],{{.*}}!dbg !{{.*}}[[DBG]]
+
+; Origin tracking level 2: pass origin value through __msan_chain_origin and store the result.
+; CHECK-ORIGINS2: [[ORIGIN2:%[01-9a-z]+]] = call i32 @__msan_chain_origin(i32 {{.*}}[[ORIGIN]])
+; CHECK-ORIGINS2: store i32 {{.*}}[[ORIGIN2]],{{.*}}!dbg !{{.*}}[[DBG]]
+
+; CHECK: br label{{.*}}!dbg !{{.*}}[[DBG]]
+; CHECK: <label>
+; CHECK: store{{.*}}!dbg !{{.*}}[[DBG]]
+; CHECK: ret void
diff --git a/test/Instrumentation/MemorySanitizer/vector_arith.ll b/test/Instrumentation/MemorySanitizer/vector_arith.ll
new file mode 100644
index 000000000000..6541a1c3a394
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/vector_arith.ll
@@ -0,0 +1,65 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
+declare x86_mmx @llvm.x86.ssse3.pmadd.ub.sw(x86_mmx, x86_mmx) nounwind readnone
+declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
+declare x86_mmx @llvm.x86.mmx.psad.bw(x86_mmx, x86_mmx) nounwind readnone
+
+define <4 x i32> @Test_sse2_pmadd_wd(<8 x i16> %a, <8 x i16> %b) sanitize_memory {
+entry:
+ %c = tail call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a, <8 x i16> %b) nounwind
+ ret <4 x i32> %c
+}
+
+; CHECK-LABEL: @Test_sse2_pmadd_wd(
+; CHECK: or <8 x i16>
+; CHECK: bitcast <8 x i16> {{.*}} to <4 x i32>
+; CHECK: icmp ne <4 x i32> {{.*}}, zeroinitializer
+; CHECK: sext <4 x i1> {{.*}} to <4 x i32>
+; CHECK: ret <4 x i32>
+
+
+define x86_mmx @Test_ssse3_pmadd_ub_sw(x86_mmx %a, x86_mmx %b) sanitize_memory {
+entry:
+ %c = tail call x86_mmx @llvm.x86.ssse3.pmadd.ub.sw(x86_mmx %a, x86_mmx %b) nounwind
+ ret x86_mmx %c
+}
+
+; CHECK-LABEL: @Test_ssse3_pmadd_ub_sw(
+; CHECK: or i64
+; CHECK: bitcast i64 {{.*}} to <4 x i16>
+; CHECK: icmp ne <4 x i16> {{.*}}, zeroinitializer
+; CHECK: sext <4 x i1> {{.*}} to <4 x i16>
+; CHECK: bitcast <4 x i16> {{.*}} to i64
+; CHECK: ret x86_mmx
+
+
+define <2 x i64> @Test_x86_sse2_psad_bw(<16 x i8> %a, <16 x i8> %b) sanitize_memory {
+ %c = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a, <16 x i8> %b)
+ ret <2 x i64> %c
+}
+
+; CHECK-LABEL: @Test_x86_sse2_psad_bw(
+; CHECK: or <16 x i8> {{.*}}, {{.*}}
+; CHECK: bitcast <16 x i8> {{.*}} to <2 x i64>
+; CHECK: icmp ne <2 x i64> {{.*}}, zeroinitializer
+; CHECK: sext <2 x i1> {{.*}} to <2 x i64>
+; CHECK: lshr <2 x i64> {{.*}}, <i64 48, i64 48>
+; CHECK: ret <2 x i64>
+
+
+define x86_mmx @Test_x86_mmx_psad_bw(x86_mmx %a, x86_mmx %b) sanitize_memory {
+entry:
+ %c = tail call x86_mmx @llvm.x86.mmx.psad.bw(x86_mmx %a, x86_mmx %b) nounwind
+ ret x86_mmx %c
+}
+
+; CHECK-LABEL: @Test_x86_mmx_psad_bw(
+; CHECK: or i64
+; CHECK: icmp ne i64
+; CHECK: sext i1 {{.*}} to i64
+; CHECK: lshr i64 {{.*}}, 48
+; CHECK: ret x86_mmx
diff --git a/test/Instrumentation/MemorySanitizer/vector_pack.ll b/test/Instrumentation/MemorySanitizer/vector_pack.ll
new file mode 100644
index 000000000000..31c0c62980ec
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/vector_pack.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
+declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b) nounwind readnone
+declare x86_mmx @llvm.x86.mmx.packuswb(x86_mmx, x86_mmx) nounwind readnone
+
+define <8 x i16> @Test_packssdw_128(<4 x i32> %a, <4 x i32> %b) sanitize_memory {
+entry:
+ %c = tail call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a, <4 x i32> %b) nounwind
+ ret <8 x i16> %c
+}
+
+; CHECK-LABEL: @Test_packssdw_128(
+; CHECK-DAG: icmp ne <4 x i32> {{.*}}, zeroinitializer
+; CHECK-DAG: sext <4 x i1> {{.*}} to <4 x i32>
+; CHECK-DAG: icmp ne <4 x i32> {{.*}}, zeroinitializer
+; CHECK-DAG: sext <4 x i1> {{.*}} to <4 x i32>
+; CHECK-DAG: call <8 x i16> @llvm.x86.sse2.packssdw.128(
+; CHECK-DAG: call <8 x i16> @llvm.x86.sse2.packssdw.128(
+; CHECK: ret <8 x i16>
+
+
+define <32 x i8> @Test_avx_packuswb(<16 x i16> %a, <16 x i16> %b) sanitize_memory {
+entry:
+ %c = tail call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a, <16 x i16> %b) nounwind
+ ret <32 x i8> %c
+}
+
+; CHECK-LABEL: @Test_avx_packuswb(
+; CHECK-DAG: icmp ne <16 x i16> {{.*}}, zeroinitializer
+; CHECK-DAG: sext <16 x i1> {{.*}} to <16 x i16>
+; CHECK-DAG: icmp ne <16 x i16> {{.*}}, zeroinitializer
+; CHECK-DAG: sext <16 x i1> {{.*}} to <16 x i16>
+; CHECK-DAG: call <32 x i8> @llvm.x86.avx2.packsswb(
+; CHECK-DAG: call <32 x i8> @llvm.x86.avx2.packuswb(
+; CHECK: ret <32 x i8>
+
+
+define x86_mmx @Test_mmx_packuswb(x86_mmx %a, x86_mmx %b) sanitize_memory {
+entry:
+ %c = tail call x86_mmx @llvm.x86.mmx.packuswb(x86_mmx %a, x86_mmx %b) nounwind
+ ret x86_mmx %c
+}
+
+; CHECK-LABEL: @Test_mmx_packuswb(
+; CHECK-DAG: bitcast i64 {{.*}} to <4 x i16>
+; CHECK-DAG: bitcast i64 {{.*}} to <4 x i16>
+; CHECK-DAG: icmp ne <4 x i16> {{.*}}, zeroinitializer
+; CHECK-DAG: sext <4 x i1> {{.*}} to <4 x i16>
+; CHECK-DAG: icmp ne <4 x i16> {{.*}}, zeroinitializer
+; CHECK-DAG: sext <4 x i1> {{.*}} to <4 x i16>
+; CHECK-DAG: bitcast <4 x i16> {{.*}} to x86_mmx
+; CHECK-DAG: bitcast <4 x i16> {{.*}} to x86_mmx
+; CHECK-DAG: call x86_mmx @llvm.x86.mmx.packsswb({{.*}}
+; CHECK-DAG: bitcast x86_mmx {{.*}} to i64
+; CHECK-DAG: call x86_mmx @llvm.x86.mmx.packuswb({{.*}}
+; CHECK: ret x86_mmx
diff --git a/test/Instrumentation/MemorySanitizer/vector_shift.ll b/test/Instrumentation/MemorySanitizer/vector_shift.ll
new file mode 100644
index 000000000000..91e4bd53c6a9
--- /dev/null
+++ b/test/Instrumentation/MemorySanitizer/vector_shift.ll
@@ -0,0 +1,100 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
+
+; Test instrumentation of vector shift instructions.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare x86_mmx @llvm.x86.mmx.psll.d(x86_mmx, x86_mmx)
+declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>)
+declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>)
+declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32)
+declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32)
+declare <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64>, i32)
+
+define i64 @test_mmx(i64 %x.coerce, i64 %y.coerce) sanitize_memory {
+entry:
+ %0 = bitcast i64 %x.coerce to <2 x i32>
+ %1 = bitcast <2 x i32> %0 to x86_mmx
+ %2 = bitcast i64 %y.coerce to x86_mmx
+ %3 = tail call x86_mmx @llvm.x86.mmx.psll.d(x86_mmx %1, x86_mmx %2)
+ %4 = bitcast x86_mmx %3 to <2 x i32>
+ %5 = bitcast <2 x i32> %4 to <1 x i64>
+ %6 = extractelement <1 x i64> %5, i32 0
+ ret i64 %6
+}
+
+; CHECK: @test_mmx
+; CHECK: = icmp ne i64 {{.*}}, 0
+; CHECK: [[C:%.*]] = sext i1 {{.*}} to i64
+; CHECK: [[A:%.*]] = call x86_mmx @llvm.x86.mmx.psll.d(
+; CHECK: [[B:%.*]] = bitcast x86_mmx {{.*}}[[A]] to i64
+; CHECK: = or i64 {{.*}}[[B]], {{.*}}[[C]]
+; CHECK: call x86_mmx @llvm.x86.mmx.psll.d(
+; CHECK: ret i64
+
+
+define <8 x i16> @test_sse2_scalar(<8 x i16> %x, i32 %y) sanitize_memory {
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %x, i32 %y)
+ ret <8 x i16> %0
+}
+
+; CHECK: @test_sse2_scalar
+; CHECK: = icmp ne i32 {{.*}}, 0
+; CHECK: = sext i1 {{.*}} to i128
+; CHECK: = bitcast i128 {{.*}} to <8 x i16>
+; CHECK: = call <8 x i16> @llvm.x86.sse2.pslli.w(
+; CHECK: = or <8 x i16>
+; CHECK: call <8 x i16> @llvm.x86.sse2.pslli.w(
+; CHECK: ret <8 x i16>
+
+
+define <8 x i16> @test_sse2(<8 x i16> %x, <8 x i16> %y) sanitize_memory {
+entry:
+ %0 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %x, <8 x i16> %y)
+ ret <8 x i16> %0
+}
+
+; CHECK: @test_sse2
+; CHECK: = bitcast <8 x i16> {{.*}} to i128
+; CHECK: = trunc i128 {{.*}} to i64
+; CHECK: = icmp ne i64 {{.*}}, 0
+; CHECK: = sext i1 {{.*}} to i128
+; CHECK: = bitcast i128 {{.*}} to <8 x i16>
+; CHECK: = call <8 x i16> @llvm.x86.sse2.psrl.w(
+; CHECK: = or <8 x i16>
+; CHECK: call <8 x i16> @llvm.x86.sse2.psrl.w(
+; CHECK: ret <8 x i16>
+
+
+; Test variable shift (i.e. vector by vector).
+
+define <4 x i32> @test_avx2(<4 x i32> %x, <4 x i32> %y) sanitize_memory {
+entry:
+ %0 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %x, <4 x i32> %y)
+ ret <4 x i32> %0
+}
+
+; CHECK: @test_avx2
+; CHECK: = icmp ne <4 x i32> {{.*}}, zeroinitializer
+; CHECK: = sext <4 x i1> {{.*}} to <4 x i32>
+; CHECK: = call <4 x i32> @llvm.x86.avx2.psllv.d(
+; CHECK: = or <4 x i32>
+; CHECK: = tail call <4 x i32> @llvm.x86.avx2.psllv.d(
+; CHECK: ret <4 x i32>
+
+define <8 x i32> @test_avx2_256(<8 x i32> %x, <8 x i32> %y) sanitize_memory {
+entry:
+ %0 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %x, <8 x i32> %y)
+ ret <8 x i32> %0
+}
+
+; CHECK: @test_avx2_256
+; CHECK: = icmp ne <8 x i32> {{.*}}, zeroinitializer
+; CHECK: = sext <8 x i1> {{.*}} to <8 x i32>
+; CHECK: = call <8 x i32> @llvm.x86.avx2.psllv.d.256(
+; CHECK: = or <8 x i32>
+; CHECK: = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(
+; CHECK: ret <8 x i32>
diff --git a/test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll b/test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll
index 555695d25845..65037cb4790b 100644
--- a/test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll
+++ b/test/Instrumentation/MemorySanitizer/wrap_indirect_calls.ll
@@ -8,20 +8,20 @@ target triple = "x86_64-unknown-linux-gnu"
; wrapper function.
; This does not depend on the sanitize_memory attribute.
-define i32 @func(i32 (i32, i32)* nocapture %f, i32 %x, i32 %y) {
+define i32 @func1(i32 (i32, i32)* nocapture %f, i32 %x, i32 %y) {
entry:
%call = tail call i32 %f(i32 %x, i32 %y)
ret i32 %call
}
-; CHECK: @func
+; CHECK: @func1
; CHECK: bitcast i32 (i32, i32)* %f to void ()*
; CHECK: call void ()* (void ()*)* @zzz(void ()*
; CHECK: [[A:%[01-9a-z_.]+]] = bitcast void ()* {{.*}} to i32 (i32, i32)*
; CHECK: call i32 {{.*}}[[A]](i32 {{.*}}, i32 {{.*}})
; CHECK: ret i32
-; CHECK-FAST: @func
+; CHECK-FAST: @func1
; CHECK-FAST: bitcast i32 (i32, i32)* %f to void ()*
; CHECK-FAST-DAG: icmp ult void ()* {{.*}}, bitcast (i32* @__executable_start to void ()*)
; CHECK-FAST-DAG: icmp uge void ()* {{.*}}, bitcast (i32* @_end to void ()*)
@@ -32,3 +32,29 @@ entry:
; CHECK-FAST: [[A:%[01-9a-z_.]+]] = phi i32 (i32, i32)* [ %f, %entry ], [ {{.*}} ]
; CHECK-FAST: call i32 {{.*}}[[A]](i32 {{.*}}, i32 {{.*}})
; CHECK-FAST: ret i32
+
+
+; The same test, but with a complex expression as the call target.
+
+declare i8* @callee(i32)
+
+define i8* @func2(i64 %x) #1 {
+entry:
+ %call = tail call i8* bitcast (i8* (i32)* @callee to i8* (i64)*)(i64 %x)
+ ret i8* %call
+}
+
+; CHECK: @func2
+; CHECK: call {{.*}} @zzz
+; CHECK: [[A:%[01-9a-z_.]+]] = bitcast void ()* {{.*}} to i8* (i64)*
+; CHECK: call i8* {{.*}}[[A]](i64 {{.*}})
+; CHECK: ret i8*
+
+; CHECK-FAST: @func2
+; CHECK-FAST: {{br i1 or .* icmp ult .* bitcast .* @callee .* @__executable_start.* icmp uge .* bitcast .* @callee .* @_end}}
+; CHECK-FAST: {{call .* @zzz.* bitcast .*@callee}}
+; CHECK-FAST: bitcast void ()* {{.*}} to i8* (i64)*
+; CHECK-FAST: br label
+; CHECK-FAST: [[A:%[01-9a-z_.]+]] = phi i8* (i64)* [{{.*bitcast .* @callee.*, %entry.*}}], [ {{.*}} ]
+; CHECK-FAST: call i8* {{.*}}[[A]](i64 {{.*}})
+; CHECK-FAST: ret i8*
diff --git a/test/Instrumentation/ThreadSanitizer/atomic.ll b/test/Instrumentation/ThreadSanitizer/atomic.ll
index 70b6cbbf3105..e40268f97b99 100644
--- a/test/Instrumentation/ThreadSanitizer/atomic.ll
+++ b/test/Instrumentation/ThreadSanitizer/atomic.ll
@@ -348,7 +348,7 @@ entry:
define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 monotonic
+ cmpxchg i8* %a, i8 0, i8 1 monotonic monotonic
ret void
}
; CHECK: atomic8_cas_monotonic
@@ -356,7 +356,7 @@ entry:
define void @atomic8_cas_acquire(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 acquire
+ cmpxchg i8* %a, i8 0, i8 1 acquire acquire
ret void
}
; CHECK: atomic8_cas_acquire
@@ -364,7 +364,7 @@ entry:
define void @atomic8_cas_release(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 release
+ cmpxchg i8* %a, i8 0, i8 1 release monotonic
ret void
}
; CHECK: atomic8_cas_release
@@ -372,7 +372,7 @@ entry:
define void @atomic8_cas_acq_rel(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 acq_rel
+ cmpxchg i8* %a, i8 0, i8 1 acq_rel acquire
ret void
}
; CHECK: atomic8_cas_acq_rel
@@ -380,7 +380,7 @@ entry:
define void @atomic8_cas_seq_cst(i8* %a) nounwind uwtable {
entry:
- cmpxchg i8* %a, i8 0, i8 1 seq_cst
+ cmpxchg i8* %a, i8 0, i8 1 seq_cst seq_cst
ret void
}
; CHECK: atomic8_cas_seq_cst
@@ -732,7 +732,7 @@ entry:
define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 monotonic
+ cmpxchg i16* %a, i16 0, i16 1 monotonic monotonic
ret void
}
; CHECK: atomic16_cas_monotonic
@@ -740,7 +740,7 @@ entry:
define void @atomic16_cas_acquire(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 acquire
+ cmpxchg i16* %a, i16 0, i16 1 acquire acquire
ret void
}
; CHECK: atomic16_cas_acquire
@@ -748,7 +748,7 @@ entry:
define void @atomic16_cas_release(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 release
+ cmpxchg i16* %a, i16 0, i16 1 release monotonic
ret void
}
; CHECK: atomic16_cas_release
@@ -756,7 +756,7 @@ entry:
define void @atomic16_cas_acq_rel(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 acq_rel
+ cmpxchg i16* %a, i16 0, i16 1 acq_rel acquire
ret void
}
; CHECK: atomic16_cas_acq_rel
@@ -764,7 +764,7 @@ entry:
define void @atomic16_cas_seq_cst(i16* %a) nounwind uwtable {
entry:
- cmpxchg i16* %a, i16 0, i16 1 seq_cst
+ cmpxchg i16* %a, i16 0, i16 1 seq_cst seq_cst
ret void
}
; CHECK: atomic16_cas_seq_cst
@@ -1116,7 +1116,7 @@ entry:
define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 monotonic
+ cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic
ret void
}
; CHECK: atomic32_cas_monotonic
@@ -1124,7 +1124,7 @@ entry:
define void @atomic32_cas_acquire(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 acquire
+ cmpxchg i32* %a, i32 0, i32 1 acquire acquire
ret void
}
; CHECK: atomic32_cas_acquire
@@ -1132,7 +1132,7 @@ entry:
define void @atomic32_cas_release(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 release
+ cmpxchg i32* %a, i32 0, i32 1 release monotonic
ret void
}
; CHECK: atomic32_cas_release
@@ -1140,7 +1140,7 @@ entry:
define void @atomic32_cas_acq_rel(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 acq_rel
+ cmpxchg i32* %a, i32 0, i32 1 acq_rel acquire
ret void
}
; CHECK: atomic32_cas_acq_rel
@@ -1148,7 +1148,7 @@ entry:
define void @atomic32_cas_seq_cst(i32* %a) nounwind uwtable {
entry:
- cmpxchg i32* %a, i32 0, i32 1 seq_cst
+ cmpxchg i32* %a, i32 0, i32 1 seq_cst seq_cst
ret void
}
; CHECK: atomic32_cas_seq_cst
@@ -1500,7 +1500,7 @@ entry:
define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 monotonic
+ cmpxchg i64* %a, i64 0, i64 1 monotonic monotonic
ret void
}
; CHECK: atomic64_cas_monotonic
@@ -1508,7 +1508,7 @@ entry:
define void @atomic64_cas_acquire(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 acquire
+ cmpxchg i64* %a, i64 0, i64 1 acquire acquire
ret void
}
; CHECK: atomic64_cas_acquire
@@ -1516,7 +1516,7 @@ entry:
define void @atomic64_cas_release(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 release
+ cmpxchg i64* %a, i64 0, i64 1 release monotonic
ret void
}
; CHECK: atomic64_cas_release
@@ -1524,7 +1524,7 @@ entry:
define void @atomic64_cas_acq_rel(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 acq_rel
+ cmpxchg i64* %a, i64 0, i64 1 acq_rel acquire
ret void
}
; CHECK: atomic64_cas_acq_rel
@@ -1532,7 +1532,7 @@ entry:
define void @atomic64_cas_seq_cst(i64* %a) nounwind uwtable {
entry:
- cmpxchg i64* %a, i64 0, i64 1 seq_cst
+ cmpxchg i64* %a, i64 0, i64 1 seq_cst seq_cst
ret void
}
; CHECK: atomic64_cas_seq_cst
@@ -1884,7 +1884,7 @@ entry:
define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 monotonic
+ cmpxchg i128* %a, i128 0, i128 1 monotonic monotonic
ret void
}
; CHECK: atomic128_cas_monotonic
@@ -1892,7 +1892,7 @@ entry:
define void @atomic128_cas_acquire(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 acquire
+ cmpxchg i128* %a, i128 0, i128 1 acquire acquire
ret void
}
; CHECK: atomic128_cas_acquire
@@ -1900,7 +1900,7 @@ entry:
define void @atomic128_cas_release(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 release
+ cmpxchg i128* %a, i128 0, i128 1 release monotonic
ret void
}
; CHECK: atomic128_cas_release
@@ -1908,7 +1908,7 @@ entry:
define void @atomic128_cas_acq_rel(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 acq_rel
+ cmpxchg i128* %a, i128 0, i128 1 acq_rel acquire
ret void
}
; CHECK: atomic128_cas_acq_rel
@@ -1916,7 +1916,7 @@ entry:
define void @atomic128_cas_seq_cst(i128* %a) nounwind uwtable {
entry:
- cmpxchg i128* %a, i128 0, i128 1 seq_cst
+ cmpxchg i128* %a, i128 0, i128 1 seq_cst seq_cst
ret void
}
; CHECK: atomic128_cas_seq_cst
diff --git a/test/Instrumentation/ThreadSanitizer/tsan_basic.ll b/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
index d449a97a62a1..dc6e43e3ecb9 100644
--- a/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
+++ b/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
@@ -27,7 +27,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
; Check that tsan converts mem intrinsics back to function calls.
-define void @MemCpyTest(i8* nocapture %x, i8* nocapture %y) {
+define void @MemCpyTest(i8* nocapture %x, i8* nocapture %y) sanitize_thread {
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
ret void
@@ -36,7 +36,7 @@ entry:
; CHECK: ret void
}
-define void @MemMoveTest(i8* nocapture %x, i8* nocapture %y) {
+define void @MemMoveTest(i8* nocapture %x, i8* nocapture %y) sanitize_thread {
entry:
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
ret void
@@ -45,7 +45,7 @@ entry:
; CHECK: ret void
}
-define void @MemSetTest(i8* nocapture %x) {
+define void @MemSetTest(i8* nocapture %x) sanitize_thread {
entry:
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
ret void
diff --git a/test/Instrumentation/ThreadSanitizer/vptr_update.ll b/test/Instrumentation/ThreadSanitizer/vptr_update.ll
index 95c7bb0e5915..83d28b6ee217 100644
--- a/test/Instrumentation/ThreadSanitizer/vptr_update.ll
+++ b/test/Instrumentation/ThreadSanitizer/vptr_update.ll
@@ -4,10 +4,37 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define void @Foo(i8** nocapture %a, i8* %b) nounwind uwtable sanitize_thread {
entry:
+; CHECK-LABEL: @Foo
; CHECK: call void @__tsan_vptr_update
+; CHECK: ret void
store i8* %b, i8** %a, align 8, !tbaa !0
ret void
}
+
+define void @FooInt(i64* nocapture %a, i64 %b) nounwind uwtable sanitize_thread {
+entry:
+; CHECK-LABEL: @FooInt
+; CHECK: call void @__tsan_vptr_update
+; CHECK: ret void
+ store i64 %b, i64* %a, align 8, !tbaa !0
+ ret void
+}
+
+
+declare i32 @Func1()
+declare i32 @Func2()
+
+; Test that we properly handle vector stores marked as vtable updates.
+define void @VectorVptrUpdate(<2 x i8*>* nocapture %a, i8* %b) nounwind uwtable sanitize_thread {
+entry:
+; CHECK-LABEL: @VectorVptrUpdate
+; CHECK: call void @__tsan_vptr_update{{.*}}Func1
+; CHECK-NOT: call void @__tsan_vptr_update
+; CHECK: ret void
+ store <2 x i8 *> <i8* bitcast(i32 ()* @Func1 to i8 *), i8* bitcast(i32 ()* @Func2 to i8 *)>, <2 x i8 *>* %a, align 8, !tbaa !0
+ ret void
+}
+
!0 = metadata !{metadata !2, metadata !2, i64 0}
!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
!2 = metadata !{metadata !"vtable pointer", metadata !1}
diff --git a/test/LTO/attrs.ll b/test/LTO/attrs.ll
new file mode 100644
index 000000000000..d1967470cdd3
--- /dev/null
+++ b/test/LTO/attrs.ll
@@ -0,0 +1,15 @@
+; RUN: llvm-as < %s >%t1
+; RUN: llvm-lto -exported-symbol=test_x86_aesni_aeskeygenassist -mattr=+aes -o %t2 %t1
+; RUN: llvm-objdump -d %t2 | FileCheck -check-prefix=WITH_AES %s
+; RUN: not llvm-lto -exported-symbol=test_x86_aesni_aeskeygenassist -mattr=-aes -o %t3 %t1 2>&1 | FileCheck -check-prefix=WITHOUT_AES %s
+
+target triple = "x86_64-unknown-linux-gnu"
+declare <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64>, i8)
+define <2 x i64> @test_x86_aesni_aeskeygenassist(<2 x i64> %a0) {
+ ; WITH_AES: test_x86_aesni_aeskeygenassist
+ ; WITH_AES: aeskeygenassist
+ %res = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7)
+ ret <2 x i64> %res
+}
+
+; WITHOUT_AES: LLVM ERROR: Cannot select: intrinsic %llvm.x86.aesni.aeskeygenassist
diff --git a/test/LTO/current-section.ll b/test/LTO/current-section.ll
new file mode 100644
index 000000000000..f79b378318df
--- /dev/null
+++ b/test/LTO/current-section.ll
@@ -0,0 +1,4 @@
+; RUN: llvm-as < %s >%t1
+; RUN: llvm-lto -o %t2 %t1
+
+module asm ".align 4"
diff --git a/test/LTO/jump-table-type.ll b/test/LTO/jump-table-type.ll
new file mode 100644
index 000000000000..a39d3e959830
--- /dev/null
+++ b/test/LTO/jump-table-type.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-as <%s >%t1
+; RUN: llvm-lto -o %t2 %t1 -jump-table-type=arity
+; RUN: llvm-nm %t2 | FileCheck %s
+
+; CHECK: T __llvm_jump_instr_table_0_1
+; CHECK: T __llvm_jump_instr_table_1_1
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @g(i32 %a) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define i32 @f() unnamed_addr jumptable {
+ ret i32 0
+}
+
+define i32 @main() {
+ ret i32 0
+}
+
+@llvm.used = appending global [2 x i8*] [i8* bitcast (i32(i32)* @g to i8*),
+ i8* bitcast (i32()* @f to i8*)]
diff --git a/test/LTO/keep-used-puts-during-instcombine.ll b/test/LTO/keep-used-puts-during-instcombine.ll
new file mode 100644
index 000000000000..69ce3ee3fae5
--- /dev/null
+++ b/test/LTO/keep-used-puts-during-instcombine.ll
@@ -0,0 +1,36 @@
+; RUN: opt -S -instcombine <%s | FileCheck %s
+; rdar://problem/16165191
+; llvm.compiler.used functions should not be renamed
+
+target triple = "x86_64-apple-darwin11"
+
+@llvm.compiler.used = appending global [1 x i8*] [
+ i8* bitcast (i32(i8*)* @puts to i8*)
+ ], section "llvm.metadata"
+@llvm.used = appending global [1 x i8*] [
+ i8* bitcast (i32(i32)* @uses_printf to i8*)
+ ], section "llvm.metadata"
+
+@str = private unnamed_addr constant [13 x i8] c"hello world\0A\00"
+
+define i32 @uses_printf(i32 %i) {
+entry:
+ %s = getelementptr [13 x i8]* @str, i64 0, i64 0
+ call i32 (i8*, ...)* @printf(i8* %s)
+ ret i32 0
+}
+
+define internal i32 @printf(i8* readonly nocapture %fmt, ...) {
+entry:
+ %ret = call i32 @bar(i8* %fmt)
+ ret i32 %ret
+}
+
+; CHECK: define {{.*}} @puts(
+define internal i32 @puts(i8* %s) {
+entry:
+ %ret = call i32 @bar(i8* %s)
+ ret i32 %ret
+}
+
+declare i32 @bar(i8*)
diff --git a/test/LTO/linkonce_odr_func.ll b/test/LTO/linkonce_odr_func.ll
index 8a4932672f6d..a67ffc0dd48e 100644
--- a/test/LTO/linkonce_odr_func.ll
+++ b/test/LTO/linkonce_odr_func.ll
@@ -1,6 +1,6 @@
; RUN: llvm-as < %s >%t1
; RUN: llvm-lto -o %t2 -dso-symbol=foo1 -dso-symbol=foo2 -dso-symbol=foo3 \
-; RUN: -dso-symbol=foo4 %t1 -disable-opt
+; RUN: -dso-symbol=foo4 -dso-symbol=v1 -dso-symbol=v2 %t1 -disable-opt
; RUN: llvm-nm %t2 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -26,6 +26,12 @@ define linkonce_odr void @foo4() noinline {
ret void
}
+; CHECK: r v1
+@v1 = linkonce_odr constant i32 32
+
+; CHECK: V v2
+@v2 = linkonce_odr global i32 32
+
declare void @f(void()*)
declare void @p()
diff --git a/test/LTO/lit.local.cfg b/test/LTO/lit.local.cfg
index 6df0e03ee648..afde89be896d 100644
--- a/test/LTO/lit.local.cfg
+++ b/test/LTO/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/LTO/no-undefined-puts-when-implemented.ll b/test/LTO/no-undefined-puts-when-implemented.ll
new file mode 100644
index 000000000000..29db8a63d1dd
--- /dev/null
+++ b/test/LTO/no-undefined-puts-when-implemented.ll
@@ -0,0 +1,40 @@
+; RUN: llvm-as <%s >%t1
+; RUN: llvm-lto -exported-symbol=_uses_puts -exported-symbol=_uses_printf -o - %t1 | \
+; RUN: llvm-nm - | \
+; RUN: FileCheck %s
+; rdar://problem/16165191
+; runtime library implementations should not be renamed
+
+target triple = "x86_64-apple-darwin11"
+
+@str = private unnamed_addr constant [13 x i8] c"hello world\0A\00"
+
+; CHECK-NOT: U _puts
+; CHECK: T _uses_printf
+; CHECK: T _uses_puts
+define i32 @uses_puts(i32 %i) {
+entry:
+ %s = call i8* @foo(i32 %i)
+ %ret = call i32 @puts(i8* %s)
+ ret i32 %ret
+}
+define i32 @uses_printf(i32 %i) {
+entry:
+ %s = getelementptr [13 x i8]* @str, i64 0, i64 0
+ call i32 (i8*, ...)* @printf(i8* %s)
+ ret i32 0
+}
+
+define hidden i32 @printf(i8* readonly nocapture %fmt, ...) {
+entry:
+ %ret = call i32 @bar(i8* %fmt)
+ ret i32 %ret
+}
+define hidden i32 @puts(i8* %s) {
+entry:
+ %ret = call i32 @bar(i8* %s)
+ ret i32 %ret
+}
+
+declare i8* @foo(i32)
+declare i32 @bar(i8*)
diff --git a/test/LTO/private-symbol.ll b/test/LTO/private-symbol.ll
new file mode 100644
index 000000000000..e13a393442d3
--- /dev/null
+++ b/test/LTO/private-symbol.ll
@@ -0,0 +1,7 @@
+; RUN: llvm-as < %s >%t1
+; RUN: llvm-lto -o %t2 %t1
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1
diff --git a/test/LTO/symver-asm.ll b/test/LTO/symver-asm.ll
new file mode 100644
index 000000000000..03dda2bedd96
--- /dev/null
+++ b/test/LTO/symver-asm.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-as < %s >%t1
+; RUN: llvm-lto -o %t2 %t1
+; RUN: llvm-nm %t2 | FileCheck %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+module asm ".symver io_cancel_0_4,io_cancel@@LIBAIO_0.4"
+
+; Even without -exported-symbol, io_cancel_0_4 should be noticed by LTOModule's
+; RecordStreamer, so it shouldn't get eliminated. However, the object file will
+; contain the aliased symver as well as the original.
+define i32 @io_cancel_0_4() {
+; CHECK: io_cancel@@LIBAIO_0.4
+; CHECK: io_cancel_0_4
+ ret i32 0
+}
diff --git a/test/LTO/triple-init.ll b/test/LTO/triple-init.ll
new file mode 100644
index 000000000000..e0ad87967ba2
--- /dev/null
+++ b/test/LTO/triple-init.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-as < %s >%t1
+; RUN: llvm-lto -exported-symbol=_main -o %t2 %t1
+; RUN: llvm-nm %t2 | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+target triple = "x86_64-apple-macosx10.9"
+
+declare double @pow(double, double)
+
+define double @main(double %x) {
+; We check that LTO will be aware of target triple and apply pow to __exp10 transformation.
+; CHECK: U ___exp10
+ %retval = call double @pow(double 10.0, double %x)
+ ret double %retval
+}
diff --git a/test/Linker/2011-08-22-ResolveAlias2.ll b/test/Linker/2011-08-22-ResolveAlias2.ll
index 254904059733..eee60d49d026 100644
--- a/test/Linker/2011-08-22-ResolveAlias2.ll
+++ b/test/Linker/2011-08-22-ResolveAlias2.ll
@@ -37,56 +37,110 @@
@_ZL33__gthrw_pthread_mutexattr_settypeP19pthread_mutexattr_ti = alias weak i32 (%union.pthread_mutexattr_t*, i32)* @pthread_mutexattr_settype
@_ZL33__gthrw_pthread_mutexattr_destroyP19pthread_mutexattr_t = alias weak i32 (%union.pthread_mutexattr_t*)* @pthread_mutexattr_destroy
-declare void @_ZN13HexxagonBoardC2ERKS_(%struct.HexxagonBoard*, %struct.HexxagonBoard*) uwtable align 2
+define void @_ZN13HexxagonBoardC2ERKS_(%struct.HexxagonBoard*, %struct.HexxagonBoard*) uwtable align 2 {
+ ret void
+}
-declare extern_weak i32 @pthread_once(i32*, void ()*)
+define weak i32 @pthread_once(i32*, void ()*) {
+ ret i32 0
+}
-declare extern_weak i8* @pthread_getspecific(i32)
+define weak i8* @pthread_getspecific(i32) {
+ ret i8* null
+}
-declare extern_weak i32 @pthread_setspecific(i32, i8*)
+define weak i32 @pthread_setspecific(i32, i8*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_create(i64*, %union.pthread_attr_t*, i8* (i8*)*, i8*)
+define weak i32 @pthread_create(i64*, %union.pthread_attr_t*, i8* (i8*)*, i8*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_join(i64, i8**)
+define weak i32 @pthread_join(i64, i8**) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_equal(i64, i64)
+define weak i32 @pthread_equal(i64, i64) {
+ ret i32 0
+}
-declare extern_weak i64 @pthread_self()
+define weak i64 @pthread_self() {
+ ret i64 0
+}
-declare extern_weak i32 @pthread_detach(i64)
+define weak i32 @pthread_detach(i64) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_cancel(i64)
+define weak i32 @pthread_cancel(i64) {
+ ret i32 0
+}
-declare extern_weak i32 @sched_yield()
+define weak i32 @sched_yield() {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutex_lock(%union.pthread_mutex_t*)
+define weak i32 @pthread_mutex_lock(%union.pthread_mutex_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutex_trylock(%union.pthread_mutex_t*)
+define weak i32 @pthread_mutex_trylock(%union.pthread_mutex_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutex_timedlock(%union.pthread_mutex_t*, %struct.timespec*)
+define weak i32 @pthread_mutex_timedlock(%union.pthread_mutex_t*, %struct.timespec*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutex_unlock(%union.pthread_mutex_t*)
+define weak i32 @pthread_mutex_unlock(%union.pthread_mutex_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutex_init(%union.pthread_mutex_t*, %union.pthread_mutexattr_t*)
+define weak i32 @pthread_mutex_init(%union.pthread_mutex_t*, %union.pthread_mutexattr_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutex_destroy(%union.pthread_mutex_t*)
+define weak i32 @pthread_mutex_destroy(%union.pthread_mutex_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_cond_broadcast(%union.pthread_cond_t*)
+define weak i32 @pthread_cond_broadcast(%union.pthread_cond_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_cond_signal(%union.pthread_cond_t*)
+define weak i32 @pthread_cond_signal(%union.pthread_cond_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_cond_wait(%union.pthread_cond_t*, %union.pthread_mutex_t*)
+define weak i32 @pthread_cond_wait(%union.pthread_cond_t*, %union.pthread_mutex_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_cond_timedwait(%union.pthread_cond_t*, %union.pthread_mutex_t*, %struct.timespec*)
+define weak i32 @pthread_cond_timedwait(%union.pthread_cond_t*, %union.pthread_mutex_t*, %struct.timespec*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_cond_destroy(%union.pthread_cond_t*)
+define weak i32 @pthread_cond_destroy(%union.pthread_cond_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_key_create(i32*, void (i8*)*)
+define weak i32 @pthread_key_create(i32*, void (i8*)*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_key_delete(i32)
+define weak i32 @pthread_key_delete(i32) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutexattr_init(%union.pthread_mutexattr_t*)
+define weak i32 @pthread_mutexattr_init(%union.pthread_mutexattr_t*) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutexattr_settype(%union.pthread_mutexattr_t*, i32)
+define weak i32 @pthread_mutexattr_settype(%union.pthread_mutexattr_t*, i32) {
+ ret i32 0
+}
-declare extern_weak i32 @pthread_mutexattr_destroy(%union.pthread_mutexattr_t*)
+define weak i32 @pthread_mutexattr_destroy(%union.pthread_mutexattr_t*) {
+ ret i32 0
+}
diff --git a/test/Linker/Inputs/alias.ll b/test/Linker/Inputs/alias.ll
new file mode 100644
index 000000000000..f379476e7654
--- /dev/null
+++ b/test/Linker/Inputs/alias.ll
@@ -0,0 +1,3 @@
+@zed = global i32 42
+@foo = alias i32* @zed
+@foo2 = alias bitcast (i32* @zed to i16*)
diff --git a/test/Linker/Inputs/comdat.ll b/test/Linker/Inputs/comdat.ll
new file mode 100644
index 000000000000..fdcca49c3c37
--- /dev/null
+++ b/test/Linker/Inputs/comdat.ll
@@ -0,0 +1,20 @@
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat largest
+@foo = global i64 43, comdat $foo
+
+define i32 @bar() comdat $foo {
+ ret i32 43
+}
+
+$qux = comdat largest
+@qux = global i32 13, comdat $qux
+@in_unselected_group = global i32 13, comdat $qux
+
+define i32 @baz() comdat $qux {
+ ret i32 13
+}
+
+$any = comdat any
+@any = global i64 7, comdat $any
diff --git a/test/Linker/Inputs/comdat2.ll b/test/Linker/Inputs/comdat2.ll
new file mode 100644
index 000000000000..9e18304744b5
--- /dev/null
+++ b/test/Linker/Inputs/comdat2.ll
@@ -0,0 +1,2 @@
+$foo = comdat largest
+@foo = global i64 43, comdat $foo
diff --git a/test/Linker/Inputs/comdat3.ll b/test/Linker/Inputs/comdat3.ll
new file mode 100644
index 000000000000..06f08b947af1
--- /dev/null
+++ b/test/Linker/Inputs/comdat3.ll
@@ -0,0 +1,2 @@
+$foo = comdat noduplicates
+@foo = global i64 43, comdat $foo
diff --git a/test/Linker/Inputs/comdat4.ll b/test/Linker/Inputs/comdat4.ll
new file mode 100644
index 000000000000..bbfe3f794abf
--- /dev/null
+++ b/test/Linker/Inputs/comdat4.ll
@@ -0,0 +1,5 @@
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat samesize
+@foo = global i64 42, comdat $foo
diff --git a/test/Linker/Inputs/comdat5.ll b/test/Linker/Inputs/comdat5.ll
new file mode 100644
index 000000000000..800af18534b1
--- /dev/null
+++ b/test/Linker/Inputs/comdat5.ll
@@ -0,0 +1,15 @@
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+%MSRTTICompleteObjectLocator = type { i32, i32, i32, i8*, %MSRTTIClassHierarchyDescriptor* }
+%MSRTTIClassHierarchyDescriptor = type { i32, i32, i32, %MSRTTIBaseClassDescriptor** }
+%MSRTTIBaseClassDescriptor = type { i8*, i32, i32, i32, i32, i32, %MSRTTIClassHierarchyDescriptor* }
+%struct.S = type { i32 (...)** }
+
+$"\01??_7S@@6B@" = comdat largest
+
+@"\01??_R4S@@6B@" = external constant %MSRTTICompleteObjectLocator
+@some_name = private unnamed_addr constant [2 x i8*] [i8* bitcast (%MSRTTICompleteObjectLocator* @"\01??_R4S@@6B@" to i8*), i8* bitcast (void (%struct.S*, i32)* @"\01??_GS@@UAEPAXI@Z" to i8*)], comdat $"\01??_7S@@6B@"
+@"\01??_7S@@6B@" = alias getelementptr([2 x i8*]* @some_name, i32 0, i32 1)
+
+declare x86_thiscallcc void @"\01??_GS@@UAEPAXI@Z"(%struct.S*, i32) unnamed_addr
diff --git a/test/Linker/Inputs/datalayout-a.ll b/test/Linker/Inputs/datalayout-a.ll
new file mode 100644
index 000000000000..e78478e6dfa4
--- /dev/null
+++ b/test/Linker/Inputs/datalayout-a.ll
@@ -0,0 +1 @@
+target datalayout = "e"
diff --git a/test/Linker/Inputs/datalayout-b.ll b/test/Linker/Inputs/datalayout-b.ll
new file mode 100644
index 000000000000..d76c1aa2e311
--- /dev/null
+++ b/test/Linker/Inputs/datalayout-b.ll
@@ -0,0 +1 @@
+target datalayout = "e-p:16:16"
diff --git a/test/Linker/Inputs/old_global_ctors.3.4.bc b/test/Linker/Inputs/old_global_ctors.3.4.bc
new file mode 100644
index 000000000000..a24b1b488800
--- /dev/null
+++ b/test/Linker/Inputs/old_global_ctors.3.4.bc
Binary files differ
diff --git a/test/Linker/Inputs/targettriple-a.ll b/test/Linker/Inputs/targettriple-a.ll
new file mode 100644
index 000000000000..296d2df3ab6a
--- /dev/null
+++ b/test/Linker/Inputs/targettriple-a.ll
@@ -0,0 +1 @@
+target triple = "e"
diff --git a/test/Linker/Inputs/targettriple-b.ll b/test/Linker/Inputs/targettriple-b.ll
new file mode 100644
index 000000000000..cca872ec9662
--- /dev/null
+++ b/test/Linker/Inputs/targettriple-b.ll
@@ -0,0 +1 @@
+target triple = "E"
diff --git a/test/Linker/Inputs/type-unique-simple2-a.ll b/test/Linker/Inputs/type-unique-simple2-a.ll
index 63470f3f5e8b..676b4109c0d9 100644
--- a/test/Linker/Inputs/type-unique-simple2-a.ll
+++ b/test/Linker/Inputs/type-unique-simple2-a.ll
@@ -65,7 +65,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (http://llvm.org/git/clang.git 8a3f9e46cb988d2c664395b21910091e3730ae82) (http://llvm.org/git/llvm.git 4699e9549358bc77824a59114548eecc3f7c523c)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !11, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [foo.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"foo.cpp", metadata !"."}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !5, null, metadata !"Base", i32 1, i64 128, i64 64, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS4Base"} ; [ DW_TAG_structure_type ] [Base] [line 1, size 128, align 64, offset 0] [def] [from ]
!5 = metadata !{metadata !"./a.hpp", metadata !"."}
diff --git a/test/Linker/Inputs/type-unique-simple2-b.ll b/test/Linker/Inputs/type-unique-simple2-b.ll
index f564d81f1bb1..3ec79e5d9cb6 100644
--- a/test/Linker/Inputs/type-unique-simple2-b.ll
+++ b/test/Linker/Inputs/type-unique-simple2-b.ll
@@ -38,7 +38,7 @@ attributes #3 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (http://llvm.org/git/clang.git 8a3f9e46cb988d2c664395b21910091e3730ae82) (http://llvm.org/git/llvm.git 4699e9549358bc77824a59114548eecc3f7c523c)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !11, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [bar.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"bar.cpp", metadata !"."}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !5, null, metadata !"Base", i32 1, i64 128, i64 64, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS4Base"} ; [ DW_TAG_structure_type ] [Base] [line 1, size 128, align 64, offset 0] [def] [from ]
!5 = metadata !{metadata !"./a.hpp", metadata !"."}
diff --git a/test/Linker/alias.ll b/test/Linker/alias.ll
new file mode 100644
index 000000000000..bce51ad9836f
--- /dev/null
+++ b/test/Linker/alias.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-link %s %S/Inputs/alias.ll -S -o - | FileCheck %s
+; RUN: llvm-link %S/Inputs/alias.ll %s -S -o - | FileCheck %s
+
+@foo = weak global i32 0
+; CHECK-DAG: @foo = alias i32* @zed
+
+@bar = alias i32* @foo
+; CHECK-DAG: @bar = alias i32* @foo
+
+@foo2 = weak global i32 0
+; CHECK-DAG: @foo2 = alias bitcast (i32* @zed to i16*)
+
+@bar2 = alias i32* @foo2
+; CHECK-DAG: @bar2 = alias bitcast (i16* @foo2 to i32*)
+
+; CHECK-DAG: @zed = global i32 42
diff --git a/test/Linker/comdat.ll b/test/Linker/comdat.ll
new file mode 100644
index 000000000000..4d2aef7b8a09
--- /dev/null
+++ b/test/Linker/comdat.ll
@@ -0,0 +1,32 @@
+; RUN: llvm-link %s %p/Inputs/comdat.ll -S -o - | FileCheck %s
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat largest
+@foo = global i32 42, comdat $foo
+
+define i32 @bar() comdat $foo {
+ ret i32 42
+}
+
+$qux = comdat largest
+@qux = global i64 12, comdat $qux
+
+define i32 @baz() comdat $qux {
+ ret i32 12
+}
+
+$any = comdat any
+@any = global i64 6, comdat $any
+
+; CHECK: $qux = comdat largest
+; CHECK: $foo = comdat largest
+; CHECK: $any = comdat any
+
+; CHECK: @qux = global i64 12, comdat $qux
+; CHECK: @any = global i64 6, comdat $any
+; CHECK: @foo = global i64 43, comdat $foo
+; CHECK-NOT: @in_unselected_group = global i32 13, comdat $qux
+
+; CHECK: define i32 @baz() comdat $qux
+; CHECK: define i32 @bar() comdat $foo
diff --git a/test/Linker/comdat2.ll b/test/Linker/comdat2.ll
new file mode 100644
index 000000000000..60c3d7cf5502
--- /dev/null
+++ b/test/Linker/comdat2.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-link %s %p/Inputs/comdat.ll -S -o - 2>&1 | FileCheck %s
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat samesize
+@foo = global i32 42, comdat $foo
+; CHECK: Linking COMDATs named 'foo': invalid selection kinds!
diff --git a/test/Linker/comdat3.ll b/test/Linker/comdat3.ll
new file mode 100644
index 000000000000..f0d9a48bb9dd
--- /dev/null
+++ b/test/Linker/comdat3.ll
@@ -0,0 +1,5 @@
+; RUN: not llvm-link %s %p/Inputs/comdat2.ll -S -o - 2>&1 | FileCheck %s
+
+$foo = comdat largest
+@foo = global i32 43, comdat $foo
+; CHECK: Linking COMDATs named 'foo': can't do size dependent selection without DataLayout!
diff --git a/test/Linker/comdat4.ll b/test/Linker/comdat4.ll
new file mode 100644
index 000000000000..50c1778e894d
--- /dev/null
+++ b/test/Linker/comdat4.ll
@@ -0,0 +1,5 @@
+; RUN: not llvm-link %s %p/Inputs/comdat3.ll -S -o - 2>&1 | FileCheck %s
+
+$foo = comdat noduplicates
+@foo = global i64 43, comdat $foo
+; CHECK: Linking COMDATs named 'foo': noduplicates has been violated!
diff --git a/test/Linker/comdat5.ll b/test/Linker/comdat5.ll
new file mode 100644
index 000000000000..011fb8c0f462
--- /dev/null
+++ b/test/Linker/comdat5.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-link %s %p/Inputs/comdat4.ll -S -o - 2>&1 | FileCheck %s
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat samesize
+@foo = global i32 42, comdat $foo
+; CHECK: Linking COMDATs named 'foo': SameSize violated!
diff --git a/test/Linker/comdat6.ll b/test/Linker/comdat6.ll
new file mode 100644
index 000000000000..efa5dfb4d677
--- /dev/null
+++ b/test/Linker/comdat6.ll
@@ -0,0 +1,13 @@
+; RUN: llvm-link %s %p/Inputs/comdat5.ll -S -o - 2>&1 | FileCheck %s
+; RUN: llvm-link %p/Inputs/comdat5.ll %s -S -o - 2>&1 | FileCheck %s
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+%struct.S = type { i32 (...)** }
+
+$"\01??_7S@@6B@" = comdat largest
+@"\01??_7S@@6B@" = linkonce_odr unnamed_addr constant [1 x i8*] [i8* bitcast (void (%struct.S*, i32)* @"\01??_GS@@UAEPAXI@Z" to i8*)], comdat $"\01??_7S@@6B@"
+
+; CHECK: @"\01??_7S@@6B@" = alias getelementptr inbounds ([2 x i8*]* @some_name, i32 0, i32 1)
+
+declare x86_thiscallcc void @"\01??_GS@@UAEPAXI@Z"(%struct.S*, i32) unnamed_addr
diff --git a/test/Linker/comdat7.ll b/test/Linker/comdat7.ll
new file mode 100644
index 000000000000..c3ff3f6cd7fb
--- /dev/null
+++ b/test/Linker/comdat7.ll
@@ -0,0 +1,9 @@
+; RUN: not llvm-link %s %p/Inputs/comdat5.ll -S -o - 2>&1 | FileCheck %s
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$"\01??_7S@@6B@" = comdat largest
+define void @"\01??_7S@@6B@"() {
+ ret void
+}
+; CHECK: GlobalVariable required for data dependent selection!
diff --git a/test/Linker/comdat8.ll b/test/Linker/comdat8.ll
new file mode 100644
index 000000000000..21669f69bd50
--- /dev/null
+++ b/test/Linker/comdat8.ll
@@ -0,0 +1,10 @@
+; RUN: not llvm-link %s %p/Inputs/comdat5.ll -S -o - 2>&1 | FileCheck %s
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$"\01??_7S@@6B@" = comdat largest
+define void @some_name() {
+ ret void
+}
+@"\01??_7S@@6B@" = alias i8* inttoptr (i32 ptrtoint (void ()* @some_name to i32) to i8*)
+; CHECK: COMDAT key involves incomputable alias size.
diff --git a/test/Linker/datalayout.ll b/test/Linker/datalayout.ll
new file mode 100644
index 000000000000..8cbfc198a684
--- /dev/null
+++ b/test/Linker/datalayout.ll
@@ -0,0 +1,14 @@
+; REQUIRES: shell
+; RUN: llvm-link %s %S/Inputs/datalayout-a.ll -S -o - 2>%t.a.err | FileCheck %s
+; RUN: (echo foo ;cat %t.a.err) | FileCheck --check-prefix=WARN-A %s
+
+; RUN: llvm-link %s %S/Inputs/datalayout-b.ll -S -o - 2>%t.b.err | FileCheck %s
+; RUN: cat %t.b.err | FileCheck --check-prefix=WARN-B %s
+
+target datalayout = "e"
+
+; CHECK: target datalayout = "e"
+
+; WARN-A-NOT: WARNING
+
+; WARN-B: WARNING: Linking two modules of different data layouts:
diff --git a/test/Linker/debug-info-version-a.ll b/test/Linker/debug-info-version-a.ll
new file mode 100644
index 000000000000..c3d9c87a2e90
--- /dev/null
+++ b/test/Linker/debug-info-version-a.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-link %s %p/debug-info-version-b.ll -S -o - | FileCheck %s
+
+; Test linking of incompatible debug info versions. The debug info
+; from the other file should be dropped.
+
+; CHECK-NOT: metadata !{metadata !"b.c", metadata !""}
+; CHECK: metadata !{metadata !"a.c", metadata !""}
+; CHECK-NOT: metadata !{metadata !"b.c", metadata !""}
+
+!llvm.module.flags = !{ !0 }
+!llvm.dbg.cu = !{!1}
+
+!0 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!1 = metadata !{i32 589841, metadata !2, i32 12, metadata !"clang", i1 true, metadata !"", i32 0, metadata !3, metadata !3, metadata !3, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{metadata !"a.c", metadata !""}
+!3 = metadata !{}
diff --git a/test/Linker/debug-info-version-b.ll b/test/Linker/debug-info-version-b.ll
new file mode 100644
index 000000000000..2b4f1844d169
--- /dev/null
+++ b/test/Linker/debug-info-version-b.ll
@@ -0,0 +1,10 @@
+; RUN: true
+; Companion for debug-info-version-a.ll.
+
+!llvm.module.flags = !{ !0 }
+!llvm.dbg.cu = !{!1}
+
+!0 = metadata !{i32 2, metadata !"Debug Info Version", i32 42}
+!1 = metadata !{i32 589841, metadata !2, i32 12, metadata !"clang", metadata !"I AM UNEXPECTED!"} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{metadata !"b.c", metadata !""}
+!3 = metadata !{}
diff --git a/test/Linker/dllstorage-a.ll b/test/Linker/dllstorage-a.ll
new file mode 100644
index 000000000000..91b98184f8b0
--- /dev/null
+++ b/test/Linker/dllstorage-a.ll
@@ -0,0 +1,4 @@
+; RUN: llvm-link %s %p/dllstorage-b.ll -S -o - | FileCheck %s
+@foo = external global i32
+
+; CHECK: @foo = dllexport global i32 42
diff --git a/test/Linker/dllstorage-b.ll b/test/Linker/dllstorage-b.ll
new file mode 100644
index 000000000000..4c7dbcd19bd7
--- /dev/null
+++ b/test/Linker/dllstorage-b.ll
@@ -0,0 +1,3 @@
+; RUN: true
+
+@foo = dllexport global i32 42
diff --git a/test/Linker/func-attrs-a.ll b/test/Linker/func-attrs-a.ll
new file mode 100644
index 000000000000..d5495e1e3fdc
--- /dev/null
+++ b/test/Linker/func-attrs-a.ll
@@ -0,0 +1,14 @@
+; RUN: llvm-link %s %p/func-attrs-b.ll -S -o - | FileCheck %s
+; PR2382
+
+; CHECK: call void @check0(%struct.S0* sret null, %struct.S0* byval align 4 null, %struct.S0* align 4 null, %struct.S0* byval align 4 null)
+; CHECK: define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval %arg0, %struct.S0* %arg1, %struct.S0* byval %arg2)
+
+%struct.S0 = type <{ i8, i8, i8, i8 }>
+
+define void @a() {
+ call void @check0(%struct.S0* sret null, %struct.S0* byval align 4 null, %struct.S0* align 4 null, %struct.S0* byval align 4 null)
+ ret void
+}
+
+declare void @check0(%struct.S0*, %struct.S0*, %struct.S0*, %struct.S0*)
diff --git a/test/Linker/func-attrs-b.ll b/test/Linker/func-attrs-b.ll
new file mode 100644
index 000000000000..df78e5f54ab7
--- /dev/null
+++ b/test/Linker/func-attrs-b.ll
@@ -0,0 +1,8 @@
+; This file is used with func-attrs-a.ll
+; RUN: true
+
+%struct.S0 = type <{ i8, i8, i8, i8 }>
+
+define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval %arg0, %struct.S0* %arg1, %struct.S0* byval %arg2) {
+ ret void
+}
diff --git a/test/Linker/global_ctors.ll b/test/Linker/global_ctors.ll
new file mode 100644
index 000000000000..541f0d4f91bb
--- /dev/null
+++ b/test/Linker/global_ctors.ll
@@ -0,0 +1,29 @@
+; RUN: llvm-as %s -o %t.new.bc
+; RUN: llvm-link %t.new.bc %S/Inputs/old_global_ctors.3.4.bc | llvm-dis | FileCheck %s
+
+; old_global_ctors.3.4.bc contains the following LLVM IL, assembled into
+; bitcode by llvm-as from 3.4. It uses a two element @llvm.global_ctors array.
+; ---
+; declare void @a_global_ctor()
+; declare void @b_global_ctor()
+;
+; @llvm.global_ctors = appending global [2 x { i32, void ()* } ] [
+; { i32, void ()* } { i32 65535, void ()* @a_global_ctor },
+; { i32, void ()* } { i32 65535, void ()* @b_global_ctor }
+; ]
+; ---
+
+declare void @c_global_ctor()
+declare void @d_global_ctor()
+
+@llvm.global_ctors = appending global [2 x { i32, void ()*, i8* } ] [
+ { i32, void ()*, i8* } { i32 65535, void ()* @c_global_ctor, i8* null },
+ { i32, void ()*, i8* } { i32 65535, void ()* @d_global_ctor, i8* null }
+]
+
+; CHECK: @llvm.global_ctors = appending global [4 x { i32, void ()*, i8* }] [
+; CHECK-DAG: { i32, void ()*, i8* } { i32 65535, void ()* @a_global_ctor, i8* null }
+; CHECK-DAG: { i32, void ()*, i8* } { i32 65535, void ()* @b_global_ctor, i8* null }
+; CHECK-DAG: { i32, void ()*, i8* } { i32 65535, void ()* @c_global_ctor, i8* null }
+; CHECK-DAG: { i32, void ()*, i8* } { i32 65535, void ()* @d_global_ctor, i8* null }
+; CHECK: ]
diff --git a/test/Linker/targettriple.ll b/test/Linker/targettriple.ll
new file mode 100644
index 000000000000..71830477bddc
--- /dev/null
+++ b/test/Linker/targettriple.ll
@@ -0,0 +1,14 @@
+; REQUIRES: shell
+; RUN: llvm-link %s %S/Inputs/targettriple-a.ll -S -o - 2>%t.a.err | FileCheck %s
+; RUN: (echo foo ;cat %t.a.err) | FileCheck --check-prefix=WARN-A %s
+
+; RUN: llvm-link %s %S/Inputs/targettriple-b.ll -S -o - 2>%t.b.err | FileCheck %s
+; RUN: cat %t.b.err | FileCheck --check-prefix=WARN-B %s
+
+target triple = "e"
+
+; CHECK: target triple = "e"
+
+; WARN-A-NOT: WARNING
+
+; WARN-B: WARNING: Linking two modules of different target triples:
diff --git a/test/Linker/type-unique-odr-a.ll b/test/Linker/type-unique-odr-a.ll
new file mode 100644
index 000000000000..91c80339ec03
--- /dev/null
+++ b/test/Linker/type-unique-odr-a.ll
@@ -0,0 +1,102 @@
+; REQUIRES: object-emission
+;
+; RUN: llvm-link %s %p/type-unique-odr-b.ll -S -o - | %llc_dwarf -filetype=obj -O0 | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+;
+; Test ODR-based type uniquing for C++ class members.
+; rdar://problem/15851313.
+;
+; $ cat -n type-unique-odr-a.cpp
+; 1 class A {
+; 2 int data;
+; 3 protected:
+; 4 void getFoo();
+; 5 };
+; 6
+; 7 static void bar() {
+; 8 A a;
+; 9 }
+; 10
+; 11 void baz() { bar(); }
+;; #include "ab.h"
+; foo_t bar() {
+; return A().getFoo();
+; }
+;
+; CHECK: DW_TAG_class_type
+; CHECK-NEXT: DW_AT_name {{.*}} "A"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_member
+; CHECK-NEXT: DW_AT_name {{.*}} "data"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_ZN1A6getFooEv"
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_name {{.*}} "getFoo"
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_Z3bazv"
+; CHECK: DW_TAG_subprogram
+; CHECK-NOT: DW_TAG
+; CHECK: DW_AT_MIPS_linkage_name {{.*}} "_ZL3barv"
+
+; getFoo and A may only appear once.
+; CHECK-NOT: {{(getFoo)|("A")}}
+
+
+; ModuleID = 'type-unique-odr-a.cpp'
+
+%class.A = type { i32 }
+
+; Function Attrs: nounwind
+define void @_Z3bazv() #0 {
+entry:
+ call void @_ZL3barv(), !dbg !23
+ ret void, !dbg !23
+}
+
+; Function Attrs: nounwind
+define internal void @_ZL3barv() #0 {
+entry:
+ %a = alloca %class.A, align 4
+ call void @llvm.dbg.declare(metadata !{%class.A* %a}, metadata !24), !dbg !25
+ ret void, !dbg !26
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!20, !21}
+!llvm.ident = !{!22}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !14, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [<unknown>] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"<unknown>", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786434, metadata !5, null, metadata !"A", i32 1, i64 32, i64 32, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS1A"} ; [ DW_TAG_class_type ] [A] [line 1, size 32, align 32, offset 0] [def] [from ]
+!5 = metadata !{metadata !"type-unique-odr-a.cpp", metadata !""}
+!6 = metadata !{metadata !7, metadata !9}
+!7 = metadata !{i32 786445, metadata !5, metadata !"_ZTS1A", metadata !"data", i32 2, i64 32, i64 32, i64 0, i32 1, metadata !8} ; [ DW_TAG_member ] [data] [line 2, size 32, align 32, offset 0] [private] [from int]
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"getFoo", metadata !"getFoo", metadata !"_ZN1A6getFooEv", i32 4, metadata !10, i1 false, i1 false, i32 0, i32 0, null, i32 258, i1 false, null, null, i32 0, metadata !13, i32 4} ; [ DW_TAG_subprogram ] [line 4] [protected] [getFoo]
+!10 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !11, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!11 = metadata !{null, metadata !12}
+!12 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!13 = metadata !{i32 786468}
+!14 = metadata !{metadata !15, metadata !19}
+!15 = metadata !{i32 786478, metadata !5, metadata !16, metadata !"baz", metadata !"baz", metadata !"_Z3bazv", i32 11, metadata !17, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z3bazv, null, null, metadata !2, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [baz]
+!16 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [type-unique-odr-a.cpp]
+!17 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !18, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!18 = metadata !{null}
+!19 = metadata !{i32 786478, metadata !5, metadata !16, metadata !"bar", metadata !"bar", metadata !"_ZL3barv", i32 7, metadata !17, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZL3barv, null, null, metadata !2, i32 7} ; [ DW_TAG_subprogram ] [line 7] [local] [def] [bar]
+!20 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!21 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!22 = metadata !{metadata !"clang version 3.5.0 "}
+!23 = metadata !{i32 11, i32 0, metadata !15, null}
+!24 = metadata !{i32 786688, metadata !19, metadata !"a", metadata !16, i32 8, metadata !"_ZTS1A", i32 0, i32 0} ; [ DW_TAG_auto_variable ] [a] [line 8]
+!25 = metadata !{i32 8, i32 0, metadata !19, null} ; [ DW_TAG_imported_declaration ]
+!26 = metadata !{i32 9, i32 0, metadata !19, null}
diff --git a/test/Linker/type-unique-odr-b.ll b/test/Linker/type-unique-odr-b.ll
new file mode 100644
index 000000000000..3c8b7a1e428f
--- /dev/null
+++ b/test/Linker/type-unique-odr-b.ll
@@ -0,0 +1,86 @@
+; RUN: true
+; This file belongs to type-unique-odr-a.ll.
+;
+; Test ODR-based type uniquing for C++ class members.
+; rdar://problem/15851313.
+;
+; $ cat -n type-unique-odr-b.cpp
+; 1 // Make this declaration start on a different line.
+; 2 class A {
+; 3 int data;
+; 4 protected:
+; 5 void getFoo();
+; 6 };
+; 7
+; 8 void A::getFoo() {}
+; 9
+; 10 static void bar() {}
+; 11 void f() { bar(); };
+
+; ModuleID = 'type-unique-odr-b.cpp'
+
+%class.A = type { i32 }
+
+; Function Attrs: nounwind
+define void @_ZN1A6getFooEv(%class.A* %this) #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !24), !dbg !26
+ %this1 = load %class.A** %this.addr
+ ret void, !dbg !27
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind
+define void @_Z1fv() #0 {
+entry:
+ call void @_ZL3barv(), !dbg !28
+ ret void, !dbg !28
+}
+
+; Function Attrs: nounwind
+define internal void @_ZL3barv() #0 {
+entry:
+ ret void, !dbg !29
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!21, !22}
+!llvm.ident = !{!23}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !14, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [<unknown>] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"<unknown>", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786434, metadata !5, null, metadata !"A", i32 2, i64 32, i64 32, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS1A"} ; [ DW_TAG_class_type ] [A] [line 2, size 32, align 32, offset 0] [def] [from ]
+!5 = metadata !{metadata !"type-unique-odr-b.cpp", metadata !""}
+!6 = metadata !{metadata !7, metadata !9}
+!7 = metadata !{i32 786445, metadata !5, metadata !"_ZTS1A", metadata !"data", i32 3, i64 32, i64 32, i64 0, i32 1, metadata !8} ; [ DW_TAG_member ] [data] [line 3, size 32, align 32, offset 0] [private] [from int]
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"getFoo", metadata !"getFoo", metadata !"_ZN1A6getFooEv", i32 5, metadata !10, i1 false, i1 false, i32 0, i32 0, null, i32 258, i1 false, null, null, i32 0, metadata !13, i32 5} ; [ DW_TAG_subprogram ] [line 5] [protected] [getFoo]
+!10 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !11, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!11 = metadata !{null, metadata !12}
+!12 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!13 = metadata !{i32 786468}
+!14 = metadata !{metadata !15, metadata !16, metadata !20}
+!15 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"getFoo", metadata !"getFoo", metadata !"_ZN1A6getFooEv", i32 8, metadata !10, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.A*)* @_ZN1A6getFooEv, null, metadata !9, metadata !2, i32 8} ; [ DW_TAG_subprogram ] [line 8] [def] [getFoo]
+!16 = metadata !{i32 786478, metadata !5, metadata !17, metadata !"f", metadata !"f", metadata !"_Z1fv", i32 11, metadata !18, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z1fv, null, null, metadata !2, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [f]
+!17 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [type-unique-odr-b.cpp]
+!18 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !19, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!19 = metadata !{null}
+!20 = metadata !{i32 786478, metadata !5, metadata !17, metadata !"bar", metadata !"bar", metadata !"_ZL3barv", i32 10, metadata !18, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_ZL3barv, null, null, metadata !2, i32 10} ; [ DW_TAG_subprogram ] [line 10] [local] [def] [bar]
+!21 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!22 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!23 = metadata !{metadata !"clang version 3.5.0 "}
+!24 = metadata !{i32 786689, metadata !15, metadata !"this", null, i32 16777216, metadata !25, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!25 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1A]
+!26 = metadata !{i32 0, i32 0, metadata !15, null}
+!27 = metadata !{i32 8, i32 0, metadata !15, null} ; [ DW_TAG_imported_declaration ]
+!28 = metadata !{i32 11, i32 0, metadata !16, null}
+!29 = metadata !{i32 10, i32 0, metadata !20, null}
diff --git a/test/Linker/type-unique-simple-a.ll b/test/Linker/type-unique-simple-a.ll
index 4bfdff977d10..350cd1fd4595 100644
--- a/test/Linker/type-unique-simple-a.ll
+++ b/test/Linker/type-unique-simple-a.ll
@@ -2,7 +2,7 @@
; RUN: llvm-link %s %p/type-unique-simple-b.ll -S -o %t
; RUN: cat %t | FileCheck %s -check-prefix=LINK
-; RUN: llc -filetype=obj -O0 < %t > %t2
+; RUN: %llc_dwarf -filetype=obj -O0 < %t > %t2
; RUN: llvm-dwarfdump -debug-dump=info %t2 | FileCheck %s
; Make sure the backend generates a single DIE and uses ref_addr.
@@ -70,7 +70,7 @@ attributes #1 = { nounwind readnone }
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (http://llvm.org/git/clang.git c23b1db6268c8e7ce64026d57d1510c1aac200a0) (http://llvm.org/git/llvm.git 09b98fe3978eddefc2145adc1056cf21580ce945)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !9, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/mren/c_testing/type_unique_air/simple/foo.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"foo.cpp", metadata !"/Users/mren/c_testing/type_unique_air/simple"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !5, null, metadata !"Base", i32 1, i64 32, i64 32, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS4Base"} ; [ DW_TAG_structure_type ] [Base] [line 1, size 32, align 32, offset 0] [def] [from ]
!5 = metadata !{metadata !"./a.hpp", metadata !"/Users/mren/c_testing/type_unique_air/simple"}
diff --git a/test/Linker/type-unique-simple-b.ll b/test/Linker/type-unique-simple-b.ll
index c46e67f4ff1a..854ec158794a 100644
--- a/test/Linker/type-unique-simple-b.ll
+++ b/test/Linker/type-unique-simple-b.ll
@@ -40,7 +40,7 @@ attributes #3 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.4 (http://llvm.org/git/clang.git c23b1db6268c8e7ce64026d57d1510c1aac200a0) (http://llvm.org/git/llvm.git 09b98fe3978eddefc2145adc1056cf21580ce945)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !9, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/mren/c_testing/type_unique_air/simple/bar.cpp] [DW_LANG_C_plus_plus]
!1 = metadata !{metadata !"bar.cpp", metadata !"/Users/mren/c_testing/type_unique_air/simple"}
-!2 = metadata !{i32 0}
+!2 = metadata !{}
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786451, metadata !5, null, metadata !"Base", i32 1, i64 32, i64 32, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS4Base"} ; [ DW_TAG_structure_type ] [Base] [line 1, size 32, align 32, offset 0] [def] [from ]
!5 = metadata !{metadata !"./a.hpp", metadata !"/Users/mren/c_testing/type_unique_air/simple"}
diff --git a/test/Linker/type-unique-simple2-a.ll b/test/Linker/type-unique-simple2-a.ll
new file mode 100644
index 000000000000..d0f1155fe654
--- /dev/null
+++ b/test/Linker/type-unique-simple2-a.ll
@@ -0,0 +1,129 @@
+; REQUIRES: object-emission
+;
+; RUN: llvm-link %s %p/type-unique-simple2-b.ll -S -o - | %llc_dwarf -filetype=obj -O0 | llvm-dwarfdump -debug-dump=info - | FileCheck %s
+;
+; Tests for a merge error where attributes are inserted twice into the same DIE.
+;
+; $ cat ab.h
+; typedef int foo_t;
+; class A {
+; public:
+; virtual void setFoo();
+; virtual const foo_t getFoo();
+; };
+;
+; $ cat a.cpp
+; #include "ab.h"
+; foo_t bar() {
+; return A().getFoo();
+; }
+;
+; CHECK: _ZN1A6setFooEv
+; CHECK: DW_AT_accessibility [DW_FORM_data1] (0x01)
+; CHECK-NOT: DW_AT_accessibility
+; CHECK: DW_TAG
+
+; ModuleID = 'a.cpp'
+
+%class.A = type { i32 (...)** }
+
+@_ZTV1A = external unnamed_addr constant [4 x i8*]
+
+; Function Attrs: nounwind
+define i32 @_Z3barv() #0 {
+entry:
+ %tmp = alloca %class.A, align 8
+ %0 = bitcast %class.A* %tmp to i8*, !dbg !38
+ call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i32 8, i1 false), !dbg !38
+ call void @_ZN1AC1Ev(%class.A* %tmp) #1, !dbg !38
+ %call = call i32 @_ZN1A6getFooEv(%class.A* %tmp), !dbg !38
+ ret i32 %call, !dbg !38
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1
+
+; Function Attrs: inlinehint nounwind
+define linkonce_odr void @_ZN1AC1Ev(%class.A* %this) unnamed_addr #2 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !39), !dbg !41
+ %this1 = load %class.A** %this.addr
+ call void @_ZN1AC2Ev(%class.A* %this1) #1, !dbg !42
+ ret void, !dbg !42
+}
+
+declare i32 @_ZN1A6getFooEv(%class.A*)
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #4
+
+; Function Attrs: inlinehint nounwind
+define linkonce_odr void @_ZN1AC2Ev(%class.A* %this) unnamed_addr #2 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !44), !dbg !45
+ %this1 = load %class.A** %this.addr
+ %0 = bitcast %class.A* %this1 to i8***, !dbg !46
+ store i8** getelementptr inbounds ([4 x i8*]* @_ZTV1A, i64 0, i64 2), i8*** %0, !dbg !46
+ ret void, !dbg !46
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind }
+attributes #2 = { inlinehint nounwind }
+attributes #4 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!35, !36}
+!llvm.ident = !{!37}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !26, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/<unknown>] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"<unknown>", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786434, metadata !5, null, metadata !"A", i32 2, i64 64, i64 64, i32 0, i32 0, null, metadata !6, i32 0, metadata !"_ZTS1A", null, metadata !"_ZTS1A"} ; [ DW_TAG_class_type ] [A] [line 2, size 64, align 64, offset 0] [def] [from ]
+!5 = metadata !{metadata !"./ab.h", metadata !""}
+!6 = metadata !{metadata !7, metadata !14, metadata !19}
+!7 = metadata !{i32 786445, metadata !5, metadata !8, metadata !"_vptr$A", i32 0, i64 64, i64 0, i64 0, i32 64, metadata !9} ; [ DW_TAG_member ] [_vptr$A] [line 0, size 64, align 0, offset 0] [artificial] [from ]
+!8 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [/./ab.h]
+!9 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 0, offset 0] [from __vtbl_ptr_type]
+!10 = metadata !{i32 786447, null, null, metadata !"__vtbl_ptr_type", i32 0, i64 64, i64 0, i64 0, i32 0, metadata !11} ; [ DW_TAG_pointer_type ] [__vtbl_ptr_type] [line 0, size 64, align 0, offset 0] [from ]
+!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = metadata !{metadata !13}
+!13 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!14 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"setFoo", metadata !"setFoo", metadata !"_ZN1A6setFooEv", i32 4, metadata !15, i1 false, i1 false, i32 1, i32 0, metadata !"_ZTS1A", i32 256, i1 false, null, null, i32 0, metadata !18, i32 4} ; [ DW_TAG_subprogram ] [line 4] [setFoo]
+!15 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !16, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!16 = metadata !{null, metadata !17}
+!17 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!18 = metadata !{i32 786468}
+!19 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"getFoo", metadata !"getFoo", metadata !"_ZN1A6getFooEv", i32 5, metadata !20, i1 false, i1 false, i32 1, i32 1, metadata !"_ZTS1A", i32 256, i1 false, null, null, i32 0, metadata !25, i32 5} ; [ DW_TAG_subprogram ] [line 5] [getFoo]
+!20 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !21, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!21 = metadata !{metadata !22, metadata !17}
+!22 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !23} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from foo_t]
+!23 = metadata !{i32 786454, metadata !24, null, metadata !"foo_t", i32 1, i64 0, i64 0, i64 0, i32 0, metadata !13} ; [ DW_TAG_typedef ] [foo_t] [line 1, size 0, align 0, offset 0] [from int]
+!24 = metadata !{metadata !"a.cpp", metadata !""}
+!25 = metadata !{i32 786468}
+!26 = metadata !{metadata !27, metadata !31, metadata !34}
+!27 = metadata !{i32 786478, metadata !24, metadata !28, metadata !"bar", metadata !"bar", metadata !"_Z3barv", i32 2, metadata !29, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z3barv, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [bar]
+!28 = metadata !{i32 786473, metadata !24} ; [ DW_TAG_file_type ] [/a.cpp]
+!29 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !30, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!30 = metadata !{metadata !23}
+!31 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"_ZN1AC1Ev", i32 2, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 320, i1 false, void (%class.A*)* @_ZN1AC1Ev, null, metadata !32, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [A]
+!32 = metadata !{i32 786478, null, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"", i32 0, metadata !15, i1 false, i1 false, i32 0, i32 0, null, i32 320, i1 false, null, null, i32 0, metadata !33, i32 0} ; [ DW_TAG_subprogram ] [line 0] [A]
+!33 = metadata !{i32 786468}
+!34 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"A", metadata !"A", metadata !"_ZN1AC2Ev", i32 2, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 320, i1 false, void (%class.A*)* @_ZN1AC2Ev, null, metadata !32, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [A]
+!35 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!36 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!37 = metadata !{metadata !"clang version 3.5 "}
+!38 = metadata !{i32 3, i32 0, metadata !27, null}
+!39 = metadata !{i32 786689, metadata !31, metadata !"this", null, i32 16777216, metadata !40, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!40 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1A]
+!41 = metadata !{i32 0, i32 0, metadata !31, null}
+!42 = metadata !{i32 2, i32 0, metadata !43, null}
+!43 = metadata !{i32 786443, metadata !5, metadata !31} ; [ DW_TAG_lexical_block ] [/./ab.h]
+!44 = metadata !{i32 786689, metadata !34, metadata !"this", null, i32 16777216, metadata !40, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!45 = metadata !{i32 0, i32 0, metadata !34, null}
+!46 = metadata !{i32 2, i32 0, metadata !34, null}
diff --git a/test/Linker/type-unique-simple2-b.ll b/test/Linker/type-unique-simple2-b.ll
new file mode 100644
index 000000000000..9155f69fb916
--- /dev/null
+++ b/test/Linker/type-unique-simple2-b.ll
@@ -0,0 +1,88 @@
+; RUN: true
+; This file belongs to type-unique-simple2-a.ll.
+;
+; $ cat b.cpp
+; #include "ab.h"
+; void A::setFoo() {}
+; const
+; foo_t A::getFoo() { return 1; }
+; ModuleID = 'b.cpp'
+; target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+; target triple = "x86_64-apple-macosx10.9.0"
+
+%class.A = type { i32 (...)** }
+
+@_ZTV1A = unnamed_addr constant [4 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI1A to i8*), i8* bitcast (void (%class.A*)* @_ZN1A6setFooEv to i8*), i8* bitcast (i32 (%class.A*)* @_ZN1A6getFooEv to i8*)]
+@_ZTVN10__cxxabiv117__class_type_infoE = external global i8*
+@_ZTS1A = constant [3 x i8] c"1A\00"
+@_ZTI1A = unnamed_addr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([3 x i8]* @_ZTS1A, i32 0, i32 0) }
+
+; Function Attrs: nounwind
+define void @_ZN1A6setFooEv(%class.A* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !32), !dbg !34
+ %this1 = load %class.A** %this.addr
+ ret void, !dbg !35
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+; Function Attrs: nounwind
+define i32 @_ZN1A6getFooEv(%class.A* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %class.A*, align 8
+ store %class.A* %this, %class.A** %this.addr, align 8
+ call void @llvm.dbg.declare(metadata !{%class.A** %this.addr}, metadata !36), !dbg !37
+ %this1 = load %class.A** %this.addr
+ ret i32 1, !dbg !38
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!29, !30}
+!llvm.ident = !{!31}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !25, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/<unknown>] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"<unknown>", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786434, metadata !5, null, metadata !"A", i32 2, i64 64, i64 64, i32 0, i32 0, null, metadata !6, i32 0, metadata !"_ZTS1A", null, metadata !"_ZTS1A"} ; [ DW_TAG_class_type ] [A] [line 2, size 64, align 64, offset 0] [def] [from ]
+!5 = metadata !{metadata !"./ab.h", metadata !""}
+!6 = metadata !{metadata !7, metadata !14, metadata !19}
+!7 = metadata !{i32 786445, metadata !5, metadata !8, metadata !"_vptr$A", i32 0, i64 64, i64 0, i64 0, i32 64, metadata !9} ; [ DW_TAG_member ] [_vptr$A] [line 0, size 64, align 0, offset 0] [artificial] [from ]
+!8 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [/./ab.h]
+!9 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 0, offset 0] [from __vtbl_ptr_type]
+!10 = metadata !{i32 786447, null, null, metadata !"__vtbl_ptr_type", i32 0, i64 64, i64 0, i64 0, i32 0, metadata !11} ; [ DW_TAG_pointer_type ] [__vtbl_ptr_type] [line 0, size 64, align 0, offset 0] [from ]
+!11 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = metadata !{metadata !13}
+!13 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!14 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"setFoo", metadata !"setFoo", metadata !"_ZN1A6setFooEv", i32 4, metadata !15, i1 false, i1 false, i32 1, i32 0, metadata !"_ZTS1A", i32 256, i1 false, null, null, i32 0, metadata !18, i32 4} ; [ DW_TAG_subprogram ] [line 4] [setFoo]
+!15 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !16, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!16 = metadata !{null, metadata !17}
+!17 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1A]
+!18 = metadata !{i32 786468}
+!19 = metadata !{i32 786478, metadata !5, metadata !"_ZTS1A", metadata !"getFoo", metadata !"getFoo", metadata !"_ZN1A6getFooEv", i32 5, metadata !20, i1 false, i1 false, i32 1, i32 1, metadata !"_ZTS1A", i32 256, i1 false, null, null, i32 0, metadata !24, i32 5} ; [ DW_TAG_subprogram ] [line 5] [getFoo]
+!20 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !21, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!21 = metadata !{metadata !22, metadata !17}
+!22 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !23} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from foo_t]
+!23 = metadata !{i32 786454, metadata !5, null, metadata !"foo_t", i32 1, i64 0, i64 0, i64 0, i32 0, metadata !13} ; [ DW_TAG_typedef ] [foo_t] [line 1, size 0, align 0, offset 0] [from int]
+!24 = metadata !{i32 786468}
+!25 = metadata !{metadata !26, metadata !28}
+!26 = metadata !{i32 786478, metadata !27, metadata !"_ZTS1A", metadata !"setFoo", metadata !"setFoo", metadata !"_ZN1A6setFooEv", i32 2, metadata !15, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (%class.A*)* @_ZN1A6setFooEv, null, metadata !14, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [setFoo]
+!27 = metadata !{metadata !"b.cpp", metadata !""}
+!28 = metadata !{i32 786478, metadata !27, metadata !"_ZTS1A", metadata !"getFoo", metadata !"getFoo", metadata !"_ZN1A6getFooEv", i32 4, metadata !20, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (%class.A*)* @_ZN1A6getFooEv, null, metadata !19, metadata !2, i32 4} ; [ DW_TAG_subprogram ] [line 4] [def] [getFoo]
+!29 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!30 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!31 = metadata !{metadata !"clang version 3.5 "}
+!32 = metadata !{i32 786689, metadata !26, metadata !"this", null, i32 16777216, metadata !33, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!33 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS1A"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS1A]
+!34 = metadata !{i32 0, i32 0, metadata !26, null}
+!35 = metadata !{i32 2, i32 0, metadata !26, null}
+!36 = metadata !{i32 786689, metadata !28, metadata !"this", null, i32 16777216, metadata !33, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!37 = metadata !{i32 0, i32 0, metadata !28, null}
+!38 = metadata !{i32 4, i32 0, metadata !28, null}
diff --git a/test/Linker/type-unique-simple2.ll b/test/Linker/type-unique-simple2.ll
index ead91df6da15..8a56e2e80c67 100644
--- a/test/Linker/type-unique-simple2.ll
+++ b/test/Linker/type-unique-simple2.ll
@@ -2,5 +2,5 @@
; RUN: llvm-link %S/Inputs/type-unique-simple2-a.ll %S/Inputs/type-unique-simple2-b.ll -S -o %t
; RUN: cat %t | FileCheck %S/Inputs/type-unique-simple2-a.ll -check-prefix=LINK
-; RUN: llc -filetype=obj -O0 < %t > %t2
+; RUN: %llc_dwarf -filetype=obj -O0 < %t > %t2
; RUN: llvm-dwarfdump -debug-dump=info %t2 | FileCheck %S/Inputs/type-unique-simple2-a.ll
diff --git a/test/Linker/unnamed-addr1-a.ll b/test/Linker/unnamed-addr1-a.ll
index adaa40024cfb..794ae987797c 100644
--- a/test/Linker/unnamed-addr1-a.ll
+++ b/test/Linker/unnamed-addr1-a.ll
@@ -21,6 +21,11 @@ define weak void @func-b() unnamed_addr { ret void }
@global-f = weak global i32 42
; CHECK-DAG: @global-f = global i32 42
+@alias-a = weak global i32 42
+; CHECK-DAG: @alias-a = alias i32* @global-f
+@alias-b = weak unnamed_addr global i32 42
+; CHECK-DAG: @alias-b = unnamed_addr alias i32* @global-f
+
declare void @func-c()
; CHECK-DAG: define weak void @func-c() {
define weak void @func-d() { ret void }
@@ -38,6 +43,12 @@ define weak void @func-e() unnamed_addr { ret void }
@global-j = weak global i32 42
; CHECK-DAG: @global-j = global i32 42
+@alias-c = weak global i32 42
+; CHECK-DAG: @alias-c = alias i32* @global-f
+@alias-d = weak unnamed_addr global i32 42
+; CHECK-DAG: @alias-d = alias i32* @global-f
+
+
declare void @func-g()
; CHECK-DAG: define weak void @func-g() {
define weak void @func-h() { ret void }
diff --git a/test/Linker/unnamed-addr1-b.ll b/test/Linker/unnamed-addr1-b.ll
index aa1507b9c6b3..39a0c8bd7e5c 100644
--- a/test/Linker/unnamed-addr1-b.ll
+++ b/test/Linker/unnamed-addr1-b.ll
@@ -6,6 +6,9 @@
@global-e = unnamed_addr global i32 42
@global-f = unnamed_addr global i32 42
+@alias-a = unnamed_addr alias i32* @global-f
+@alias-b = unnamed_addr alias i32* @global-f
+
define weak void @func-c() unnamed_addr { ret void }
define weak void @func-d() unnamed_addr { ret void }
define weak void @func-e() unnamed_addr { ret void }
@@ -15,6 +18,9 @@ define weak void @func-e() unnamed_addr { ret void }
@global-i = global i32 42
@global-j = global i32 42
+@alias-c = alias i32* @global-f
+@alias-d = alias i32* @global-f
+
define weak void @func-g() { ret void }
define weak void @func-h() { ret void }
define weak void @func-i() { ret void }
diff --git a/test/MC/AArch64/alias-logicalimm.s b/test/MC/AArch64/alias-logicalimm.s
new file mode 100644
index 000000000000..28ec40beac4d
--- /dev/null
+++ b/test/MC/AArch64/alias-logicalimm.s
@@ -0,0 +1,41 @@
+// RUN: llvm-mc -triple=aarch64-none-linux-gnu < %s | FileCheck %s
+
+// CHECK: and x0, x1, #0xfffffffffffffffd
+// CHECK: and x0, x1, #0xfffffffffffffffd
+ and x0, x1, #~2
+ bic x0, x1, #2
+
+// CHECK: and w0, w1, #0xfffffffd
+// CHECK: and w0, w1, #0xfffffffd
+ and w0, w1, #~2
+ bic w0, w1, #2
+
+// CHECK: ands x0, x1, #0xfffffffffffffffd
+// CHECK: ands x0, x1, #0xfffffffffffffffd
+ ands x0, x1, #~2
+ bics x0, x1, #2
+
+// CHECK: ands w0, w1, #0xfffffffd
+// CHECK: ands w0, w1, #0xfffffffd
+ ands w0, w1, #~2
+ bics w0, w1, #2
+
+// CHECK: orr x0, x1, #0xfffffffffffffffd
+// CHECK: orr x0, x1, #0xfffffffffffffffd
+ orr x0, x1, #~2
+ orn x0, x1, #2
+
+// CHECK: orr w2, w1, #0xfffffffc
+// CHECK: orr w2, w1, #0xfffffffc
+ orr w2, w1, #~3
+ orn w2, w1, #3
+
+// CHECK: eor x0, x1, #0xfffffffffffffffd
+// CHECK: eor x0, x1, #0xfffffffffffffffd
+ eor x0, x1, #~2
+ eon x0, x1, #2
+
+// CHECK: eor w2, w1, #0xfffffffc
+// CHECK: eor w2, w1, #0xfffffffc
+ eor w2, w1, #~3
+ eon w2, w1, #3
diff --git a/test/MC/AArch64/arm64-adr.s b/test/MC/AArch64/arm64-adr.s
new file mode 100644
index 000000000000..131e545d3bb5
--- /dev/null
+++ b/test/MC/AArch64/arm64-adr.s
@@ -0,0 +1,31 @@
+// RUN: not llvm-mc -triple arm64 -show-encoding < %s 2>%t | FileCheck %s
+// RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+adr x0, #0
+adr x0, #1
+adr x0, 1f
+adr x0, foo
+// CHECK: adr x0, #0 // encoding: [0x00,0x00,0x00,0x10]
+// CHECK: adr x0, #1 // encoding: [0x00,0x00,0x00,0x30]
+// CHECK: adr x0, .Ltmp0 // encoding: [A,A,A,0x10'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: .Ltmp0, kind: fixup_aarch64_pcrel_adr_imm21
+// CHECK: adr x0, foo // encoding: [A,A,A,0x10'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: foo, kind: fixup_aarch64_pcrel_adr_imm21
+
+adrp x0, #0
+adrp x0, #4096
+adrp x0, 1f
+adrp x0, foo
+// CHECK: adrp x0, #0 // encoding: [0x00,0x00,0x00,0x90]
+// CHECK: adrp x0, #4096 // encoding: [0x00,0x00,0x00,0xb0]
+// CHECK: adrp x0, .Ltmp0 // encoding: [A,A,A,0x90'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: .Ltmp0, kind: fixup_aarch64_pcrel_adrp_imm21
+// CHECK: adrp x0, foo // encoding: [A,A,A,0x90'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: foo, kind: fixup_aarch64_pcrel_adrp_imm21
+
+adr x0, #0xffffffff
+adrp x0, #0xffffffff
+adrp x0, #1
+// CHECK-ERRORS: error: expected label or encodable integer pc offset
+// CHECK-ERRORS: error: expected label or encodable integer pc offset
+// CHECK-ERRORS: error: expected label or encodable integer pc offset
diff --git a/test/MC/AArch64/arm64-advsimd.s b/test/MC/AArch64/arm64-advsimd.s
new file mode 100644
index 000000000000..c627de708d31
--- /dev/null
+++ b/test/MC/AArch64/arm64-advsimd.s
@@ -0,0 +1,1997 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -output-asm-variant=1 -show-encoding < %s | FileCheck %s
+
+foo:
+
+ abs.8b v0, v0
+ abs.16b v0, v0
+ abs.4h v0, v0
+ abs.8h v0, v0
+ abs.2s v0, v0
+ abs.4s v0, v0
+
+; CHECK: abs.8b v0, v0 ; encoding: [0x00,0xb8,0x20,0x0e]
+; CHECK: abs.16b v0, v0 ; encoding: [0x00,0xb8,0x20,0x4e]
+; CHECK: abs.4h v0, v0 ; encoding: [0x00,0xb8,0x60,0x0e]
+; CHECK: abs.8h v0, v0 ; encoding: [0x00,0xb8,0x60,0x4e]
+; CHECK: abs.2s v0, v0 ; encoding: [0x00,0xb8,0xa0,0x0e]
+; CHECK: abs.4s v0, v0 ; encoding: [0x00,0xb8,0xa0,0x4e]
+
+ add.8b v0, v0, v0
+ add.16b v0, v0, v0
+ add.4h v0, v0, v0
+ add.8h v0, v0, v0
+ add.2s v0, v0, v0
+ add.4s v0, v0, v0
+ add.2d v0, v0, v0
+
+; CHECK: add.8b v0, v0, v0 ; encoding: [0x00,0x84,0x20,0x0e]
+; CHECK: add.16b v0, v0, v0 ; encoding: [0x00,0x84,0x20,0x4e]
+; CHECK: add.4h v0, v0, v0 ; encoding: [0x00,0x84,0x60,0x0e]
+; CHECK: add.8h v0, v0, v0 ; encoding: [0x00,0x84,0x60,0x4e]
+; CHECK: add.2s v0, v0, v0 ; encoding: [0x00,0x84,0xa0,0x0e]
+; CHECK: add.4s v0, v0, v0 ; encoding: [0x00,0x84,0xa0,0x4e]
+; CHECK: add.2d v0, v0, v0 ; encoding: [0x00,0x84,0xe0,0x4e]
+
+ add d1, d2, d3
+
+; CHECK: add d1, d2, d3 ; encoding: [0x41,0x84,0xe3,0x5e]
+
+ addhn.8b v0, v0, v0
+ addhn2.16b v0, v0, v0
+ addhn.4h v0, v0, v0
+ addhn2.8h v0, v0, v0
+ addhn.2s v0, v0, v0
+ addhn2.4s v0, v0, v0
+
+; CHECK: addhn.8b v0, v0, v0 ; encoding: [0x00,0x40,0x20,0x0e]
+; CHECK: addhn2.16b v0, v0, v0 ; encoding: [0x00,0x40,0x20,0x4e]
+; CHECK: addhn.4h v0, v0, v0 ; encoding: [0x00,0x40,0x60,0x0e]
+; CHECK: addhn2.8h v0, v0, v0 ; encoding: [0x00,0x40,0x60,0x4e]
+; CHECK: addhn.2s v0, v0, v0 ; encoding: [0x00,0x40,0xa0,0x0e]
+; CHECK: addhn2.4s v0, v0, v0 ; encoding: [0x00,0x40,0xa0,0x4e]
+
+ addp.8b v0, v0, v0
+ addp.16b v0, v0, v0
+ addp.4h v0, v0, v0
+ addp.8h v0, v0, v0
+ addp.2s v0, v0, v0
+ addp.4s v0, v0, v0
+ addp.2d v0, v0, v0
+
+; CHECK: addp.8b v0, v0, v0 ; encoding: [0x00,0xbc,0x20,0x0e]
+; CHECK: addp.16b v0, v0, v0 ; encoding: [0x00,0xbc,0x20,0x4e]
+; CHECK: addp.4h v0, v0, v0 ; encoding: [0x00,0xbc,0x60,0x0e]
+; CHECK: addp.8h v0, v0, v0 ; encoding: [0x00,0xbc,0x60,0x4e]
+; CHECK: addp.2s v0, v0, v0 ; encoding: [0x00,0xbc,0xa0,0x0e]
+; CHECK: addp.4s v0, v0, v0 ; encoding: [0x00,0xbc,0xa0,0x4e]
+; CHECK: addp.2d v0, v0, v0 ; encoding: [0x00,0xbc,0xe0,0x4e]
+
+ addp.2d d0, v0
+
+; CHECK: addp.2d d0, v0 ; encoding: [0x00,0xb8,0xf1,0x5e]
+
+ addv.8b b0, v0
+ addv.16b b0, v0
+ addv.4h h0, v0
+ addv.8h h0, v0
+ addv.4s s0, v0
+
+; CHECK: addv.8b b0, v0 ; encoding: [0x00,0xb8,0x31,0x0e]
+; CHECK: addv.16b b0, v0 ; encoding: [0x00,0xb8,0x31,0x4e]
+; CHECK: addv.4h h0, v0 ; encoding: [0x00,0xb8,0x71,0x0e]
+; CHECK: addv.8h h0, v0 ; encoding: [0x00,0xb8,0x71,0x4e]
+; CHECK: addv.4s s0, v0 ; encoding: [0x00,0xb8,0xb1,0x4e]
+
+
+; INS/DUP
+ dup.2d v0, x3
+ dup.4s v0, w3
+ dup.2s v0, w3
+ dup.8h v0, w3
+ dup.4h v0, w3
+ dup.16b v0, w3
+ dup.8b v0, w3
+
+ dup v1.2d, x3
+ dup v2.4s, w4
+ dup v3.2s, w5
+ dup v4.8h, w6
+ dup v5.4h, w7
+ dup v6.16b, w8
+ dup v7.8b, w9
+
+; CHECK: dup.2d v0, x3 ; encoding: [0x60,0x0c,0x08,0x4e]
+; CHECK: dup.4s v0, w3 ; encoding: [0x60,0x0c,0x04,0x4e]
+; CHECK: dup.2s v0, w3 ; encoding: [0x60,0x0c,0x04,0x0e]
+; CHECK: dup.8h v0, w3 ; encoding: [0x60,0x0c,0x02,0x4e]
+; CHECK: dup.4h v0, w3 ; encoding: [0x60,0x0c,0x02,0x0e]
+; CHECK: dup.16b v0, w3 ; encoding: [0x60,0x0c,0x01,0x4e]
+; CHECK: dup.8b v0, w3 ; encoding: [0x60,0x0c,0x01,0x0e]
+
+; CHECK: dup.2d v1, x3 ; encoding: [0x61,0x0c,0x08,0x4e]
+; CHECK: dup.4s v2, w4 ; encoding: [0x82,0x0c,0x04,0x4e]
+; CHECK: dup.2s v3, w5 ; encoding: [0xa3,0x0c,0x04,0x0e]
+; CHECK: dup.8h v4, w6 ; encoding: [0xc4,0x0c,0x02,0x4e]
+; CHECK: dup.4h v5, w7 ; encoding: [0xe5,0x0c,0x02,0x0e]
+; CHECK: dup.16b v6, w8 ; encoding: [0x06,0x0d,0x01,0x4e]
+; CHECK: dup.8b v7, w9 ; encoding: [0x27,0x0d,0x01,0x0e]
+
+ dup.2d v0, v3[1]
+ dup.2s v0, v3[1]
+ dup.4s v0, v3[1]
+ dup.4h v0, v3[1]
+ dup.8h v0, v3[1]
+ dup.8b v0, v3[1]
+ dup.16b v0, v3[1]
+
+ dup v7.2d, v9.d[1]
+ dup v6.2s, v8.s[1]
+ dup v5.4s, v7.s[2]
+ dup v4.4h, v6.h[3]
+ dup v3.8h, v5.h[4]
+ dup v2.8b, v4.b[5]
+ dup v1.16b, v3.b[6]
+
+; CHECK: dup.2d v0, v3[1] ; encoding: [0x60,0x04,0x18,0x4e]
+; CHECK: dup.2s v0, v3[1] ; encoding: [0x60,0x04,0x0c,0x0e]
+; CHECK: dup.4s v0, v3[1] ; encoding: [0x60,0x04,0x0c,0x4e]
+; CHECK: dup.4h v0, v3[1] ; encoding: [0x60,0x04,0x06,0x0e]
+; CHECK: dup.8h v0, v3[1] ; encoding: [0x60,0x04,0x06,0x4e]
+; CHECK: dup.8b v0, v3[1] ; encoding: [0x60,0x04,0x03,0x0e]
+; CHECK: dup.16b v0, v3[1] ; encoding: [0x60,0x04,0x03,0x4e]
+
+; CHECK: dup.2d v7, v9[1] ; encoding: [0x27,0x05,0x18,0x4e]
+; CHECK: dup.2s v6, v8[1] ; encoding: [0x06,0x05,0x0c,0x0e]
+; CHECK: dup.4s v5, v7[2] ; encoding: [0xe5,0x04,0x14,0x4e]
+; CHECK: dup.4h v4, v6[3] ; encoding: [0xc4,0x04,0x0e,0x0e]
+; CHECK: dup.8h v3, v5[4] ; encoding: [0xa3,0x04,0x12,0x4e]
+; CHECK: dup.8b v2, v4[5] ; encoding: [0x82,0x04,0x0b,0x0e]
+; CHECK: dup.16b v1, v3[6] ; encoding: [0x61,0x04,0x0d,0x4e]
+
+ dup b3, v4[1]
+ dup h3, v4[1]
+ dup s3, v4[1]
+ dup d3, v4[1]
+ dup b3, v4.b[1]
+ dup h3, v4.h[1]
+ dup s3, v4.s[1]
+ dup d3, v4.d[1]
+
+ mov b3, v4[1]
+ mov h3, v4[1]
+ mov s3, v4[1]
+ mov d3, v4[1]
+ mov b3, v4.b[1]
+ mov h3, v4.h[1]
+ mov s3, v4.s[1]
+ mov d3, v4.d[1]
+
+; CHECK: mov b3, v4[1] ; encoding: [0x83,0x04,0x03,0x5e]
+; CHECK: mov h3, v4[1] ; encoding: [0x83,0x04,0x06,0x5e]
+; CHECK: mov s3, v4[1] ; encoding: [0x83,0x04,0x0c,0x5e]
+; CHECK: mov d3, v4[1] ; encoding: [0x83,0x04,0x18,0x5e]
+; CHECK: mov b3, v4[1] ; encoding: [0x83,0x04,0x03,0x5e]
+; CHECK: mov h3, v4[1] ; encoding: [0x83,0x04,0x06,0x5e]
+; CHECK: mov s3, v4[1] ; encoding: [0x83,0x04,0x0c,0x5e]
+; CHECK: mov d3, v4[1] ; encoding: [0x83,0x04,0x18,0x5e]
+
+; CHECK: mov b3, v4[1] ; encoding: [0x83,0x04,0x03,0x5e]
+; CHECK: mov h3, v4[1] ; encoding: [0x83,0x04,0x06,0x5e]
+; CHECK: mov s3, v4[1] ; encoding: [0x83,0x04,0x0c,0x5e]
+; CHECK: mov d3, v4[1] ; encoding: [0x83,0x04,0x18,0x5e]
+; CHECK: mov b3, v4[1] ; encoding: [0x83,0x04,0x03,0x5e]
+; CHECK: mov h3, v4[1] ; encoding: [0x83,0x04,0x06,0x5e]
+; CHECK: mov s3, v4[1] ; encoding: [0x83,0x04,0x0c,0x5e]
+; CHECK: mov d3, v4[1] ; encoding: [0x83,0x04,0x18,0x5e]
+
+ smov.s x3, v2[2]
+ smov x3, v2.s[2]
+ umov.s w3, v2[2]
+ umov w3, v2.s[2]
+ umov.d x3, v2[1]
+ umov x3, v2.d[1]
+
+; CHECK: smov.s x3, v2[2] ; encoding: [0x43,0x2c,0x14,0x4e]
+; CHECK: smov.s x3, v2[2] ; encoding: [0x43,0x2c,0x14,0x4e]
+; CHECK: mov.s w3, v2[2] ; encoding: [0x43,0x3c,0x14,0x0e]
+; CHECK: mov.s w3, v2[2] ; encoding: [0x43,0x3c,0x14,0x0e]
+; CHECK: mov.d x3, v2[1] ; encoding: [0x43,0x3c,0x18,0x4e]
+; CHECK: mov.d x3, v2[1] ; encoding: [0x43,0x3c,0x18,0x4e]
+
+ ; MOV aliases for UMOV instructions above
+
+ mov.s w2, v3[3]
+ mov w5, v7.s[2]
+ mov.d x11, v13[1]
+ mov x17, v19.d[0]
+
+; CHECK: mov.s w2, v3[3] ; encoding: [0x62,0x3c,0x1c,0x0e]
+; CHECK: mov.s w5, v7[2] ; encoding: [0xe5,0x3c,0x14,0x0e]
+; CHECK: mov.d x11, v13[1] ; encoding: [0xab,0x3d,0x18,0x4e]
+; CHECK: mov.d x17, v19[0] ; encoding: [0x71,0x3e,0x08,0x4e]
+
+ ins.d v2[1], x5
+ ins.s v2[1], w5
+ ins.h v2[1], w5
+ ins.b v2[1], w5
+
+ ins v2.d[1], x5
+ ins v2.s[1], w5
+ ins v2.h[1], w5
+ ins v2.b[1], w5
+
+; CHECK: ins.d v2[1], x5 ; encoding: [0xa2,0x1c,0x18,0x4e]
+; CHECK: ins.s v2[1], w5 ; encoding: [0xa2,0x1c,0x0c,0x4e]
+; CHECK: ins.h v2[1], w5 ; encoding: [0xa2,0x1c,0x06,0x4e]
+; CHECK: ins.b v2[1], w5 ; encoding: [0xa2,0x1c,0x03,0x4e]
+
+; CHECK: ins.d v2[1], x5 ; encoding: [0xa2,0x1c,0x18,0x4e]
+; CHECK: ins.s v2[1], w5 ; encoding: [0xa2,0x1c,0x0c,0x4e]
+; CHECK: ins.h v2[1], w5 ; encoding: [0xa2,0x1c,0x06,0x4e]
+; CHECK: ins.b v2[1], w5 ; encoding: [0xa2,0x1c,0x03,0x4e]
+
+ ins.d v2[1], v15[1]
+ ins.s v2[1], v15[1]
+ ins.h v2[1], v15[1]
+ ins.b v2[1], v15[1]
+
+ ins v2.d[1], v15.d[0]
+ ins v2.s[3], v15.s[2]
+ ins v2.h[7], v15.h[3]
+ ins v2.b[10], v15.b[5]
+
+; CHECK: ins.d v2[1], v15[1] ; encoding: [0xe2,0x45,0x18,0x6e]
+; CHECK: ins.s v2[1], v15[1] ; encoding: [0xe2,0x25,0x0c,0x6e]
+; CHECK: ins.h v2[1], v15[1] ; encoding: [0xe2,0x15,0x06,0x6e]
+; CHECK: ins.b v2[1], v15[1] ; encoding: [0xe2,0x0d,0x03,0x6e]
+
+; CHECK: ins.d v2[1], v15[0] ; encoding: [0xe2,0x05,0x18,0x6e]
+; CHECK: ins.s v2[3], v15[2] ; encoding: [0xe2,0x45,0x1c,0x6e]
+; CHECK: ins.h v2[7], v15[3] ; encoding: [0xe2,0x35,0x1e,0x6e]
+; CHECK: ins.b v2[10], v15[5] ; encoding: [0xe2,0x2d,0x15,0x6e]
+
+; MOV aliases for the above INS instructions.
+ mov.d v2[1], x5
+ mov.s v3[1], w6
+ mov.h v4[1], w7
+ mov.b v5[1], w8
+
+ mov v9.d[1], x2
+ mov v8.s[1], w3
+ mov v7.h[1], w4
+ mov v6.b[1], w5
+
+ mov.d v1[1], v10[1]
+ mov.s v2[1], v11[1]
+ mov.h v7[1], v12[1]
+ mov.b v8[1], v15[1]
+
+ mov v2.d[1], v15.d[0]
+ mov v7.s[3], v16.s[2]
+ mov v8.h[7], v17.h[3]
+ mov v9.b[10], v18.b[5]
+
+; CHECK: ins.d v2[1], x5 ; encoding: [0xa2,0x1c,0x18,0x4e]
+; CHECK: ins.s v3[1], w6 ; encoding: [0xc3,0x1c,0x0c,0x4e]
+; CHECK: ins.h v4[1], w7 ; encoding: [0xe4,0x1c,0x06,0x4e]
+; CHECK: ins.b v5[1], w8 ; encoding: [0x05,0x1d,0x03,0x4e]
+; CHECK: ins.d v9[1], x2 ; encoding: [0x49,0x1c,0x18,0x4e]
+; CHECK: ins.s v8[1], w3 ; encoding: [0x68,0x1c,0x0c,0x4e]
+; CHECK: ins.h v7[1], w4 ; encoding: [0x87,0x1c,0x06,0x4e]
+; CHECK: ins.b v6[1], w5 ; encoding: [0xa6,0x1c,0x03,0x4e]
+; CHECK: ins.d v1[1], v10[1] ; encoding: [0x41,0x45,0x18,0x6e]
+; CHECK: ins.s v2[1], v11[1] ; encoding: [0x62,0x25,0x0c,0x6e]
+; CHECK: ins.h v7[1], v12[1] ; encoding: [0x87,0x15,0x06,0x6e]
+; CHECK: ins.b v8[1], v15[1] ; encoding: [0xe8,0x0d,0x03,0x6e]
+; CHECK: ins.d v2[1], v15[0] ; encoding: [0xe2,0x05,0x18,0x6e]
+; CHECK: ins.s v7[3], v16[2] ; encoding: [0x07,0x46,0x1c,0x6e]
+; CHECK: ins.h v8[7], v17[3] ; encoding: [0x28,0x36,0x1e,0x6e]
+; CHECK: ins.b v9[10], v18[5] ; encoding: [0x49,0x2e,0x15,0x6e]
+
+
+ and.8b v0, v0, v0
+ and.16b v0, v0, v0
+
+; CHECK: and.8b v0, v0, v0 ; encoding: [0x00,0x1c,0x20,0x0e]
+; CHECK: and.16b v0, v0, v0 ; encoding: [0x00,0x1c,0x20,0x4e]
+
+ bic.8b v0, v0, v0
+
+; CHECK: bic.8b v0, v0, v0 ; encoding: [0x00,0x1c,0x60,0x0e]
+
+ cmeq.8b v0, v0, v0
+ cmge.8b v0, v0, v0
+ cmgt.8b v0, v0, v0
+ cmhi.8b v0, v0, v0
+ cmhs.8b v0, v0, v0
+ cmtst.8b v0, v0, v0
+ fabd.2s v0, v0, v0
+ facge.2s v0, v0, v0
+ facgt.2s v0, v0, v0
+ faddp.2s v0, v0, v0
+ fadd.2s v0, v0, v0
+ fcmeq.2s v0, v0, v0
+ fcmge.2s v0, v0, v0
+ fcmgt.2s v0, v0, v0
+ fdiv.2s v0, v0, v0
+ fmaxnmp.2s v0, v0, v0
+ fmaxnm.2s v0, v0, v0
+ fmaxp.2s v0, v0, v0
+ fmax.2s v0, v0, v0
+ fminnmp.2s v0, v0, v0
+ fminnm.2s v0, v0, v0
+ fminp.2s v0, v0, v0
+ fmin.2s v0, v0, v0
+ fmla.2s v0, v0, v0
+ fmls.2s v0, v0, v0
+ fmulx.2s v0, v0, v0
+ fmul.2s v0, v0, v0
+ fmulx d2, d3, d1
+ fmulx s2, s3, s1
+ frecps.2s v0, v0, v0
+ frsqrts.2s v0, v0, v0
+ fsub.2s v0, v0, v0
+ mla.8b v0, v0, v0
+ mls.8b v0, v0, v0
+ mul.8b v0, v0, v0
+ pmul.8b v0, v0, v0
+ saba.8b v0, v0, v0
+ sabd.8b v0, v0, v0
+ shadd.8b v0, v0, v0
+ shsub.8b v0, v0, v0
+ smaxp.8b v0, v0, v0
+ smax.8b v0, v0, v0
+ sminp.8b v0, v0, v0
+ smin.8b v0, v0, v0
+ sqadd.8b v0, v0, v0
+ sqdmulh.4h v0, v0, v0
+ sqrdmulh.4h v0, v0, v0
+ sqrshl.8b v0, v0, v0
+ sqshl.8b v0, v0, v0
+ sqsub.8b v0, v0, v0
+ srhadd.8b v0, v0, v0
+ srshl.8b v0, v0, v0
+ sshl.8b v0, v0, v0
+ sub.8b v0, v0, v0
+ uaba.8b v0, v0, v0
+ uabd.8b v0, v0, v0
+ uhadd.8b v0, v0, v0
+ uhsub.8b v0, v0, v0
+ umaxp.8b v0, v0, v0
+ umax.8b v0, v0, v0
+ uminp.8b v0, v0, v0
+ umin.8b v0, v0, v0
+ uqadd.8b v0, v0, v0
+ uqrshl.8b v0, v0, v0
+ uqshl.8b v0, v0, v0
+ uqsub.8b v0, v0, v0
+ urhadd.8b v0, v0, v0
+ urshl.8b v0, v0, v0
+ ushl.8b v0, v0, v0
+
+; CHECK: cmeq.8b v0, v0, v0 ; encoding: [0x00,0x8c,0x20,0x2e]
+; CHECK: cmge.8b v0, v0, v0 ; encoding: [0x00,0x3c,0x20,0x0e]
+; CHECK: cmgt.8b v0, v0, v0 ; encoding: [0x00,0x34,0x20,0x0e]
+; CHECK: cmhi.8b v0, v0, v0 ; encoding: [0x00,0x34,0x20,0x2e]
+; CHECK: cmhs.8b v0, v0, v0 ; encoding: [0x00,0x3c,0x20,0x2e]
+; CHECK: cmtst.8b v0, v0, v0 ; encoding: [0x00,0x8c,0x20,0x0e]
+; CHECK: fabd.2s v0, v0, v0 ; encoding: [0x00,0xd4,0xa0,0x2e]
+; CHECK: facge.2s v0, v0, v0 ; encoding: [0x00,0xec,0x20,0x2e]
+; CHECK: facgt.2s v0, v0, v0 ; encoding: [0x00,0xec,0xa0,0x2e]
+; CHECK: faddp.2s v0, v0, v0 ; encoding: [0x00,0xd4,0x20,0x2e]
+; CHECK: fadd.2s v0, v0, v0 ; encoding: [0x00,0xd4,0x20,0x0e]
+; CHECK: fcmeq.2s v0, v0, v0 ; encoding: [0x00,0xe4,0x20,0x0e]
+; CHECK: fcmge.2s v0, v0, v0 ; encoding: [0x00,0xe4,0x20,0x2e]
+; CHECK: fcmgt.2s v0, v0, v0 ; encoding: [0x00,0xe4,0xa0,0x2e]
+; CHECK: fdiv.2s v0, v0, v0 ; encoding: [0x00,0xfc,0x20,0x2e]
+; CHECK: fmaxnmp.2s v0, v0, v0 ; encoding: [0x00,0xc4,0x20,0x2e]
+; CHECK: fmaxnm.2s v0, v0, v0 ; encoding: [0x00,0xc4,0x20,0x0e]
+; CHECK: fmaxp.2s v0, v0, v0 ; encoding: [0x00,0xf4,0x20,0x2e]
+; CHECK: fmax.2s v0, v0, v0 ; encoding: [0x00,0xf4,0x20,0x0e]
+; CHECK: fminnmp.2s v0, v0, v0 ; encoding: [0x00,0xc4,0xa0,0x2e]
+; CHECK: fminnm.2s v0, v0, v0 ; encoding: [0x00,0xc4,0xa0,0x0e]
+; CHECK: fminp.2s v0, v0, v0 ; encoding: [0x00,0xf4,0xa0,0x2e]
+; CHECK: fmin.2s v0, v0, v0 ; encoding: [0x00,0xf4,0xa0,0x0e]
+; CHECK: fmla.2s v0, v0, v0 ; encoding: [0x00,0xcc,0x20,0x0e]
+; CHECK: fmls.2s v0, v0, v0 ; encoding: [0x00,0xcc,0xa0,0x0e]
+; CHECK: fmulx.2s v0, v0, v0 ; encoding: [0x00,0xdc,0x20,0x0e]
+
+; CHECK: fmul.2s v0, v0, v0 ; encoding: [0x00,0xdc,0x20,0x2e]
+; CHECK: fmulx d2, d3, d1 ; encoding: [0x62,0xdc,0x61,0x5e]
+; CHECK: fmulx s2, s3, s1 ; encoding: [0x62,0xdc,0x21,0x5e]
+; CHECK: frecps.2s v0, v0, v0 ; encoding: [0x00,0xfc,0x20,0x0e]
+; CHECK: frsqrts.2s v0, v0, v0 ; encoding: [0x00,0xfc,0xa0,0x0e]
+; CHECK: fsub.2s v0, v0, v0 ; encoding: [0x00,0xd4,0xa0,0x0e]
+; CHECK: mla.8b v0, v0, v0 ; encoding: [0x00,0x94,0x20,0x0e]
+; CHECK: mls.8b v0, v0, v0 ; encoding: [0x00,0x94,0x20,0x2e]
+; CHECK: mul.8b v0, v0, v0 ; encoding: [0x00,0x9c,0x20,0x0e]
+; CHECK: pmul.8b v0, v0, v0 ; encoding: [0x00,0x9c,0x20,0x2e]
+; CHECK: saba.8b v0, v0, v0 ; encoding: [0x00,0x7c,0x20,0x0e]
+; CHECK: sabd.8b v0, v0, v0 ; encoding: [0x00,0x74,0x20,0x0e]
+; CHECK: shadd.8b v0, v0, v0 ; encoding: [0x00,0x04,0x20,0x0e]
+; CHECK: shsub.8b v0, v0, v0 ; encoding: [0x00,0x24,0x20,0x0e]
+; CHECK: smaxp.8b v0, v0, v0 ; encoding: [0x00,0xa4,0x20,0x0e]
+; CHECK: smax.8b v0, v0, v0 ; encoding: [0x00,0x64,0x20,0x0e]
+; CHECK: sminp.8b v0, v0, v0 ; encoding: [0x00,0xac,0x20,0x0e]
+; CHECK: smin.8b v0, v0, v0 ; encoding: [0x00,0x6c,0x20,0x0e]
+; CHECK: sqadd.8b v0, v0, v0 ; encoding: [0x00,0x0c,0x20,0x0e]
+; CHECK: sqdmulh.4h v0, v0, v0 ; encoding: [0x00,0xb4,0x60,0x0e]
+; CHECK: sqrdmulh.4h v0, v0, v0 ; encoding: [0x00,0xb4,0x60,0x2e]
+; CHECK: sqrshl.8b v0, v0, v0 ; encoding: [0x00,0x5c,0x20,0x0e]
+; CHECK: sqshl.8b v0, v0, v0 ; encoding: [0x00,0x4c,0x20,0x0e]
+; CHECK: sqsub.8b v0, v0, v0 ; encoding: [0x00,0x2c,0x20,0x0e]
+; CHECK: srhadd.8b v0, v0, v0 ; encoding: [0x00,0x14,0x20,0x0e]
+; CHECK: srshl.8b v0, v0, v0 ; encoding: [0x00,0x54,0x20,0x0e]
+; CHECK: sshl.8b v0, v0, v0 ; encoding: [0x00,0x44,0x20,0x0e]
+; CHECK: sub.8b v0, v0, v0 ; encoding: [0x00,0x84,0x20,0x2e]
+; CHECK: uaba.8b v0, v0, v0 ; encoding: [0x00,0x7c,0x20,0x2e]
+; CHECK: uabd.8b v0, v0, v0 ; encoding: [0x00,0x74,0x20,0x2e]
+; CHECK: uhadd.8b v0, v0, v0 ; encoding: [0x00,0x04,0x20,0x2e]
+; CHECK: uhsub.8b v0, v0, v0 ; encoding: [0x00,0x24,0x20,0x2e]
+; CHECK: umaxp.8b v0, v0, v0 ; encoding: [0x00,0xa4,0x20,0x2e]
+; CHECK: umax.8b v0, v0, v0 ; encoding: [0x00,0x64,0x20,0x2e]
+; CHECK: uminp.8b v0, v0, v0 ; encoding: [0x00,0xac,0x20,0x2e]
+; CHECK: umin.8b v0, v0, v0 ; encoding: [0x00,0x6c,0x20,0x2e]
+; CHECK: uqadd.8b v0, v0, v0 ; encoding: [0x00,0x0c,0x20,0x2e]
+; CHECK: uqrshl.8b v0, v0, v0 ; encoding: [0x00,0x5c,0x20,0x2e]
+; CHECK: uqshl.8b v0, v0, v0 ; encoding: [0x00,0x4c,0x20,0x2e]
+; CHECK: uqsub.8b v0, v0, v0 ; encoding: [0x00,0x2c,0x20,0x2e]
+; CHECK: urhadd.8b v0, v0, v0 ; encoding: [0x00,0x14,0x20,0x2e]
+; CHECK: urshl.8b v0, v0, v0 ; encoding: [0x00,0x54,0x20,0x2e]
+; CHECK: ushl.8b v0, v0, v0 ; encoding: [0x00,0x44,0x20,0x2e]
+
+ bif.8b v0, v0, v0
+ bit.8b v0, v0, v0
+ bsl.8b v0, v0, v0
+ eor.8b v0, v0, v0
+ orn.8b v0, v0, v0
+ orr.8b v0, v0, v1
+
+; CHECK: bif.8b v0, v0, v0 ; encoding: [0x00,0x1c,0xe0,0x2e]
+; CHECK: bit.8b v0, v0, v0 ; encoding: [0x00,0x1c,0xa0,0x2e]
+; CHECK: bsl.8b v0, v0, v0 ; encoding: [0x00,0x1c,0x60,0x2e]
+; CHECK: eor.8b v0, v0, v0 ; encoding: [0x00,0x1c,0x20,0x2e]
+; CHECK: orn.8b v0, v0, v0 ; encoding: [0x00,0x1c,0xe0,0x0e]
+; CHECK: orr.8b v0, v0, v1 ; encoding: [0x00,0x1c,0xa1,0x0e]
+
+ sadalp.4h v0, v0
+ sadalp.8h v0, v0
+ sadalp.2s v0, v0
+ sadalp.4s v0, v0
+ sadalp.1d v0, v0
+ sadalp.2d v0, v0
+
+; CHECK: sadalp.4h v0, v0 ; encoding: [0x00,0x68,0x20,0x0e]
+; CHECK: sadalp.8h v0, v0 ; encoding: [0x00,0x68,0x20,0x4e]
+; CHECK: sadalp.2s v0, v0 ; encoding: [0x00,0x68,0x60,0x0e]
+; CHECK: sadalp.4s v0, v0 ; encoding: [0x00,0x68,0x60,0x4e]
+; CHECK: sadalp.1d v0, v0 ; encoding: [0x00,0x68,0xa0,0x0e]
+; CHECK: sadalp.2d v0, v0 ; encoding: [0x00,0x68,0xa0,0x4e]
+
+ cls.8b v0, v0
+ clz.8b v0, v0
+ cnt.8b v0, v0
+ fabs.2s v0, v0
+ fneg.2s v0, v0
+ frecpe.2s v0, v0
+ frinta.2s v0, v0
+ frintx.2s v0, v0
+ frinti.2s v0, v0
+ frintm.2s v0, v0
+ frintn.2s v0, v0
+ frintp.2s v0, v0
+ frintz.2s v0, v0
+ frsqrte.2s v0, v0
+ fsqrt.2s v0, v0
+ neg.8b v0, v0
+ not.8b v0, v0
+ rbit.8b v0, v0
+ rev16.8b v0, v0
+ rev32.8b v0, v0
+ rev64.8b v0, v0
+ sadalp.4h v0, v0
+ saddlp.4h v0, v0
+ scvtf.2s v0, v0
+ sqabs.8b v0, v0
+ sqneg.8b v0, v0
+ sqxtn.8b v0, v0
+ sqxtun.8b v0, v0
+ suqadd.8b v0, v0
+ uadalp.4h v0, v0
+ uaddlp.4h v0, v0
+ ucvtf.2s v0, v0
+ uqxtn.8b v0, v0
+ urecpe.2s v0, v0
+ ursqrte.2s v0, v0
+ usqadd.8b v0, v0
+ xtn.8b v0, v0
+ shll.8h v1, v2, #8
+ shll.4s v3, v4, #16
+ shll.2d v5, v6, #32
+ shll2.8h v7, v8, #8
+ shll2.4s v9, v10, #16
+ shll2.2d v11, v12, #32
+ shll v1.8h, v2.8b, #8
+ shll v1.4s, v2.4h, #16
+ shll v1.2d, v2.2s, #32
+ shll2 v1.8h, v2.16b, #8
+ shll2 v1.4s, v2.8h, #16
+ shll2 v1.2d, v2.4s, #32
+
+; CHECK: cls.8b v0, v0 ; encoding: [0x00,0x48,0x20,0x0e]
+; CHECK: clz.8b v0, v0 ; encoding: [0x00,0x48,0x20,0x2e]
+; CHECK: cnt.8b v0, v0 ; encoding: [0x00,0x58,0x20,0x0e]
+; CHECK: fabs.2s v0, v0 ; encoding: [0x00,0xf8,0xa0,0x0e]
+; CHECK: fneg.2s v0, v0 ; encoding: [0x00,0xf8,0xa0,0x2e]
+; CHECK: frecpe.2s v0, v0 ; encoding: [0x00,0xd8,0xa1,0x0e]
+; CHECK: frinta.2s v0, v0 ; encoding: [0x00,0x88,0x21,0x2e]
+; CHECK: frintx.2s v0, v0 ; encoding: [0x00,0x98,0x21,0x2e]
+; CHECK: frinti.2s v0, v0 ; encoding: [0x00,0x98,0xa1,0x2e]
+; CHECK: frintm.2s v0, v0 ; encoding: [0x00,0x98,0x21,0x0e]
+; CHECK: frintn.2s v0, v0 ; encoding: [0x00,0x88,0x21,0x0e]
+; CHECK: frintp.2s v0, v0 ; encoding: [0x00,0x88,0xa1,0x0e]
+; CHECK: frintz.2s v0, v0 ; encoding: [0x00,0x98,0xa1,0x0e]
+; CHECK: frsqrte.2s v0, v0 ; encoding: [0x00,0xd8,0xa1,0x2e]
+; CHECK: fsqrt.2s v0, v0 ; encoding: [0x00,0xf8,0xa1,0x2e]
+; CHECK: neg.8b v0, v0 ; encoding: [0x00,0xb8,0x20,0x2e]
+; CHECK: mvn.8b v0, v0 ; encoding: [0x00,0x58,0x20,0x2e]
+; CHECK: rbit.8b v0, v0 ; encoding: [0x00,0x58,0x60,0x2e]
+; CHECK: rev16.8b v0, v0 ; encoding: [0x00,0x18,0x20,0x0e]
+; CHECK: rev32.8b v0, v0 ; encoding: [0x00,0x08,0x20,0x2e]
+; CHECK: rev64.8b v0, v0 ; encoding: [0x00,0x08,0x20,0x0e]
+; CHECK: sadalp.4h v0, v0 ; encoding: [0x00,0x68,0x20,0x0e]
+; CHECK: saddlp.4h v0, v0 ; encoding: [0x00,0x28,0x20,0x0e]
+; CHECK: scvtf.2s v0, v0 ; encoding: [0x00,0xd8,0x21,0x0e]
+; CHECK: sqabs.8b v0, v0 ; encoding: [0x00,0x78,0x20,0x0e]
+; CHECK: sqneg.8b v0, v0 ; encoding: [0x00,0x78,0x20,0x2e]
+; CHECK: sqxtn.8b v0, v0 ; encoding: [0x00,0x48,0x21,0x0e]
+; CHECK: sqxtun.8b v0, v0 ; encoding: [0x00,0x28,0x21,0x2e]
+; CHECK: suqadd.8b v0, v0 ; encoding: [0x00,0x38,0x20,0x0e]
+; CHECK: uadalp.4h v0, v0 ; encoding: [0x00,0x68,0x20,0x2e]
+; CHECK: uaddlp.4h v0, v0 ; encoding: [0x00,0x28,0x20,0x2e]
+; CHECK: ucvtf.2s v0, v0 ; encoding: [0x00,0xd8,0x21,0x2e]
+; CHECK: uqxtn.8b v0, v0 ; encoding: [0x00,0x48,0x21,0x2e]
+; CHECK: urecpe.2s v0, v0 ; encoding: [0x00,0xc8,0xa1,0x0e]
+; CHECK: ursqrte.2s v0, v0 ; encoding: [0x00,0xc8,0xa1,0x2e]
+; CHECK: usqadd.8b v0, v0 ; encoding: [0x00,0x38,0x20,0x2e]
+; CHECK: xtn.8b v0, v0 ; encoding: [0x00,0x28,0x21,0x0e]
+; CHECK: shll.8h v1, v2, #8 ; encoding: [0x41,0x38,0x21,0x2e]
+; CHECK: shll.4s v3, v4, #16 ; encoding: [0x83,0x38,0x61,0x2e]
+; CHECK: shll.2d v5, v6, #32 ; encoding: [0xc5,0x38,0xa1,0x2e]
+; CHECK: shll2.8h v7, v8, #8 ; encoding: [0x07,0x39,0x21,0x6e]
+; CHECK: shll2.4s v9, v10, #16 ; encoding: [0x49,0x39,0x61,0x6e]
+; CHECK: shll2.2d v11, v12, #32 ; encoding: [0x8b,0x39,0xa1,0x6e]
+; CHECK: shll.8h v1, v2, #8 ; encoding: [0x41,0x38,0x21,0x2e]
+; CHECK: shll.4s v1, v2, #16 ; encoding: [0x41,0x38,0x61,0x2e]
+; CHECK: shll.2d v1, v2, #32 ; encoding: [0x41,0x38,0xa1,0x2e]
+; CHECK: shll2.8h v1, v2, #8 ; encoding: [0x41,0x38,0x21,0x6e]
+; CHECK: shll2.4s v1, v2, #16 ; encoding: [0x41,0x38,0x61,0x6e]
+; CHECK: shll2.2d v1, v2, #32 ; encoding: [0x41,0x38,0xa1,0x6e]
+
+
+ cmeq.8b v0, v0, #0
+ cmeq.16b v0, v0, #0
+ cmeq.4h v0, v0, #0
+ cmeq.8h v0, v0, #0
+ cmeq.2s v0, v0, #0
+ cmeq.4s v0, v0, #0
+ cmeq.2d v0, v0, #0
+
+; CHECK: cmeq.8b v0, v0, #0 ; encoding: [0x00,0x98,0x20,0x0e]
+; CHECK: cmeq.16b v0, v0, #0 ; encoding: [0x00,0x98,0x20,0x4e]
+; CHECK: cmeq.4h v0, v0, #0 ; encoding: [0x00,0x98,0x60,0x0e]
+; CHECK: cmeq.8h v0, v0, #0 ; encoding: [0x00,0x98,0x60,0x4e]
+; CHECK: cmeq.2s v0, v0, #0 ; encoding: [0x00,0x98,0xa0,0x0e]
+; CHECK: cmeq.4s v0, v0, #0 ; encoding: [0x00,0x98,0xa0,0x4e]
+; CHECK: cmeq.2d v0, v0, #0 ; encoding: [0x00,0x98,0xe0,0x4e]
+
+ cmge.8b v0, v0, #0
+ cmgt.8b v0, v0, #0
+ cmle.8b v0, v0, #0
+ cmlt.8b v0, v0, #0
+ fcmeq.2s v0, v0, #0
+ fcmge.2s v0, v0, #0
+ fcmgt.2s v0, v0, #0
+ fcmle.2s v0, v0, #0
+ fcmlt.2s v0, v0, #0
+
+; ARM verbose mode aliases
+ cmlt v8.8b, v14.8b, #0
+ cmlt v8.16b, v14.16b, #0
+ cmlt v8.4h, v14.4h, #0
+ cmlt v8.8h, v14.8h, #0
+ cmlt v8.2s, v14.2s, #0
+ cmlt v8.4s, v14.4s, #0
+ cmlt v8.2d, v14.2d, #0
+
+; CHECK: cmge.8b v0, v0, #0 ; encoding: [0x00,0x88,0x20,0x2e]
+; CHECK: cmgt.8b v0, v0, #0 ; encoding: [0x00,0x88,0x20,0x0e]
+; CHECK: cmle.8b v0, v0, #0 ; encoding: [0x00,0x98,0x20,0x2e]
+; CHECK: cmlt.8b v0, v0, #0 ; encoding: [0x00,0xa8,0x20,0x0e]
+; CHECK: fcmeq.2s v0, v0, #0.0 ; encoding: [0x00,0xd8,0xa0,0x0e]
+; CHECK: fcmge.2s v0, v0, #0.0 ; encoding: [0x00,0xc8,0xa0,0x2e]
+; CHECK: fcmgt.2s v0, v0, #0.0 ; encoding: [0x00,0xc8,0xa0,0x0e]
+; CHECK: fcmle.2s v0, v0, #0.0 ; encoding: [0x00,0xd8,0xa0,0x2e]
+; CHECK: fcmlt.2s v0, v0, #0.0 ; encoding: [0x00,0xe8,0xa0,0x0e]
+; CHECK: cmlt.8b v8, v14, #0 ; encoding: [0xc8,0xa9,0x20,0x0e]
+; CHECK: cmlt.16b v8, v14, #0 ; encoding: [0xc8,0xa9,0x20,0x4e]
+; CHECK: cmlt.4h v8, v14, #0 ; encoding: [0xc8,0xa9,0x60,0x0e]
+; CHECK: cmlt.8h v8, v14, #0 ; encoding: [0xc8,0xa9,0x60,0x4e]
+; CHECK: cmlt.2s v8, v14, #0 ; encoding: [0xc8,0xa9,0xa0,0x0e]
+; CHECK: cmlt.4s v8, v14, #0 ; encoding: [0xc8,0xa9,0xa0,0x4e]
+; CHECK: cmlt.2d v8, v14, #0 ; encoding: [0xc8,0xa9,0xe0,0x4e]
+
+
+;===-------------------------------------------------------------------------===
+; AdvSIMD Floating-point <-> Integer Conversions
+;===-------------------------------------------------------------------------===
+
+ fcvtas.2s v0, v0
+ fcvtas.4s v0, v0
+ fcvtas.2d v0, v0
+ fcvtas s0, s0
+ fcvtas d0, d0
+
+; CHECK: fcvtas.2s v0, v0 ; encoding: [0x00,0xc8,0x21,0x0e]
+; CHECK: fcvtas.4s v0, v0 ; encoding: [0x00,0xc8,0x21,0x4e]
+; CHECK: fcvtas.2d v0, v0 ; encoding: [0x00,0xc8,0x61,0x4e]
+; CHECK: fcvtas s0, s0 ; encoding: [0x00,0xc8,0x21,0x5e]
+; CHECK: fcvtas d0, d0 ; encoding: [0x00,0xc8,0x61,0x5e]
+
+ fcvtau.2s v0, v0
+ fcvtau.4s v0, v0
+ fcvtau.2d v0, v0
+ fcvtau s0, s0
+ fcvtau d0, d0
+
+; CHECK: fcvtau.2s v0, v0 ; encoding: [0x00,0xc8,0x21,0x2e]
+; CHECK: fcvtau.4s v0, v0 ; encoding: [0x00,0xc8,0x21,0x6e]
+; CHECK: fcvtau.2d v0, v0 ; encoding: [0x00,0xc8,0x61,0x6e]
+; CHECK: fcvtau s0, s0 ; encoding: [0x00,0xc8,0x21,0x7e]
+; CHECK: fcvtau d0, d0 ; encoding: [0x00,0xc8,0x61,0x7e]
+
+ fcvtl v1.4s, v5.4h
+ fcvtl v2.2d, v6.2s
+ fcvtl2 v3.4s, v7.8h
+ fcvtl2 v4.2d, v8.4s
+
+; CHECK: fcvtl v1.4s, v5.4h ; encoding: [0xa1,0x78,0x21,0x0e]
+; CHECK: fcvtl v2.2d, v6.2s ; encoding: [0xc2,0x78,0x61,0x0e]
+; CHECK: fcvtl2 v3.4s, v7.8h ; encoding: [0xe3,0x78,0x21,0x4e]
+; CHECK: fcvtl2 v4.2d, v8.4s ; encoding: [0x04,0x79,0x61,0x4e]
+
+ fcvtms.2s v0, v0
+ fcvtms.4s v0, v0
+ fcvtms.2d v0, v0
+ fcvtms s0, s0
+ fcvtms d0, d0
+
+; CHECK: fcvtms.2s v0, v0 ; encoding: [0x00,0xb8,0x21,0x0e]
+; CHECK: fcvtms.4s v0, v0 ; encoding: [0x00,0xb8,0x21,0x4e]
+; CHECK: fcvtms.2d v0, v0 ; encoding: [0x00,0xb8,0x61,0x4e]
+; CHECK: fcvtms s0, s0 ; encoding: [0x00,0xb8,0x21,0x5e]
+; CHECK: fcvtms d0, d0 ; encoding: [0x00,0xb8,0x61,0x5e]
+
+ fcvtmu.2s v0, v0
+ fcvtmu.4s v0, v0
+ fcvtmu.2d v0, v0
+ fcvtmu s0, s0
+ fcvtmu d0, d0
+
+; CHECK: fcvtmu.2s v0, v0 ; encoding: [0x00,0xb8,0x21,0x2e]
+; CHECK: fcvtmu.4s v0, v0 ; encoding: [0x00,0xb8,0x21,0x6e]
+; CHECK: fcvtmu.2d v0, v0 ; encoding: [0x00,0xb8,0x61,0x6e]
+; CHECK: fcvtmu s0, s0 ; encoding: [0x00,0xb8,0x21,0x7e]
+; CHECK: fcvtmu d0, d0 ; encoding: [0x00,0xb8,0x61,0x7e]
+
+ fcvtns.2s v0, v0
+ fcvtns.4s v0, v0
+ fcvtns.2d v0, v0
+ fcvtns s0, s0
+ fcvtns d0, d0
+
+; CHECK: fcvtns.2s v0, v0 ; encoding: [0x00,0xa8,0x21,0x0e]
+; CHECK: fcvtns.4s v0, v0 ; encoding: [0x00,0xa8,0x21,0x4e]
+; CHECK: fcvtns.2d v0, v0 ; encoding: [0x00,0xa8,0x61,0x4e]
+; CHECK: fcvtns s0, s0 ; encoding: [0x00,0xa8,0x21,0x5e]
+; CHECK: fcvtns d0, d0 ; encoding: [0x00,0xa8,0x61,0x5e]
+
+ fcvtnu.2s v0, v0
+ fcvtnu.4s v0, v0
+ fcvtnu.2d v0, v0
+ fcvtnu s0, s0
+ fcvtnu d0, d0
+
+; CHECK: fcvtnu.2s v0, v0 ; encoding: [0x00,0xa8,0x21,0x2e]
+; CHECK: fcvtnu.4s v0, v0 ; encoding: [0x00,0xa8,0x21,0x6e]
+; CHECK: fcvtnu.2d v0, v0 ; encoding: [0x00,0xa8,0x61,0x6e]
+; CHECK: fcvtnu s0, s0 ; encoding: [0x00,0xa8,0x21,0x7e]
+; CHECK: fcvtnu d0, d0 ; encoding: [0x00,0xa8,0x61,0x7e]
+
+ fcvtn v2.4h, v4.4s
+ fcvtn v3.2s, v5.2d
+ fcvtn2 v4.8h, v6.4s
+ fcvtn2 v5.4s, v7.2d
+ fcvtxn v6.2s, v9.2d
+ fcvtxn2 v7.4s, v8.2d
+
+; CHECK: fcvtn v2.4h, v4.4s ; encoding: [0x82,0x68,0x21,0x0e]
+; CHECK: fcvtn v3.2s, v5.2d ; encoding: [0xa3,0x68,0x61,0x0e]
+; CHECK: fcvtn2 v4.8h, v6.4s ; encoding: [0xc4,0x68,0x21,0x4e]
+; CHECK: fcvtn2 v5.4s, v7.2d ; encoding: [0xe5,0x68,0x61,0x4e]
+; CHECK: fcvtxn v6.2s, v9.2d ; encoding: [0x26,0x69,0x61,0x2e]
+; CHECK: fcvtxn2 v7.4s, v8.2d ; encoding: [0x07,0x69,0x61,0x6e]
+
+ fcvtps.2s v0, v0
+ fcvtps.4s v0, v0
+ fcvtps.2d v0, v0
+ fcvtps s0, s0
+ fcvtps d0, d0
+
+; CHECK: fcvtps.2s v0, v0 ; encoding: [0x00,0xa8,0xa1,0x0e]
+; CHECK: fcvtps.4s v0, v0 ; encoding: [0x00,0xa8,0xa1,0x4e]
+; CHECK: fcvtps.2d v0, v0 ; encoding: [0x00,0xa8,0xe1,0x4e]
+; CHECK: fcvtps s0, s0 ; encoding: [0x00,0xa8,0xa1,0x5e]
+; CHECK: fcvtps d0, d0 ; encoding: [0x00,0xa8,0xe1,0x5e]
+
+ fcvtpu.2s v0, v0
+ fcvtpu.4s v0, v0
+ fcvtpu.2d v0, v0
+ fcvtpu s0, s0
+ fcvtpu d0, d0
+
+; CHECK: fcvtpu.2s v0, v0 ; encoding: [0x00,0xa8,0xa1,0x2e]
+; CHECK: fcvtpu.4s v0, v0 ; encoding: [0x00,0xa8,0xa1,0x6e]
+; CHECK: fcvtpu.2d v0, v0 ; encoding: [0x00,0xa8,0xe1,0x6e]
+; CHECK: fcvtpu s0, s0 ; encoding: [0x00,0xa8,0xa1,0x7e]
+; CHECK: fcvtpu d0, d0 ; encoding: [0x00,0xa8,0xe1,0x7e]
+
+ fcvtzs.2s v0, v0
+ fcvtzs.4s v0, v0
+ fcvtzs.2d v0, v0
+ fcvtzs s0, s0
+ fcvtzs d0, d0
+
+; CHECK: fcvtzs.2s v0, v0 ; encoding: [0x00,0xb8,0xa1,0x0e]
+; CHECK: fcvtzs.4s v0, v0 ; encoding: [0x00,0xb8,0xa1,0x4e]
+; CHECK: fcvtzs.2d v0, v0 ; encoding: [0x00,0xb8,0xe1,0x4e]
+; CHECK: fcvtzs s0, s0 ; encoding: [0x00,0xb8,0xa1,0x5e]
+; CHECK: fcvtzs d0, d0 ; encoding: [0x00,0xb8,0xe1,0x5e]
+
+ fcvtzu.2s v0, v0
+ fcvtzu.4s v0, v0
+ fcvtzu.2d v0, v0
+ fcvtzu s0, s0
+ fcvtzu d0, d0
+
+; CHECK: fcvtzu.2s v0, v0 ; encoding: [0x00,0xb8,0xa1,0x2e]
+; CHECK: fcvtzu.4s v0, v0 ; encoding: [0x00,0xb8,0xa1,0x6e]
+; CHECK: fcvtzu.2d v0, v0 ; encoding: [0x00,0xb8,0xe1,0x6e]
+; CHECK: fcvtzu s0, s0 ; encoding: [0x00,0xb8,0xa1,0x7e]
+; CHECK: fcvtzu d0, d0 ; encoding: [0x00,0xb8,0xe1,0x7e]
+
+;===-------------------------------------------------------------------------===
+; AdvSIMD modified immediate instructions
+;===-------------------------------------------------------------------------===
+
+ bic.2s v0, #1
+ bic.2s v0, #1, lsl #0
+ bic.2s v0, #1, lsl #8
+ bic.2s v0, #1, lsl #16
+ bic.2s v0, #1, lsl #24
+
+; CHECK: bic.2s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x2f]
+; CHECK: bic.2s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x2f]
+; CHECK: bic.2s v0, #0x1, lsl #8 ; encoding: [0x20,0x34,0x00,0x2f]
+; CHECK: bic.2s v0, #0x1, lsl #16 ; encoding: [0x20,0x54,0x00,0x2f]
+; CHECK: bic.2s v0, #0x1, lsl #24 ; encoding: [0x20,0x74,0x00,0x2f]
+
+ bic.4h v0, #1
+ bic.4h v0, #1, lsl #0
+ bic.4h v0, #1, lsl #8
+
+; CHECK: bic.4h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x2f]
+; CHECK: bic.4h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x2f]
+; CHECK: bic.4h v0, #0x1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x2f]
+
+ bic.4s v0, #1
+ bic.4s v0, #1, lsl #0
+ bic.4s v0, #1, lsl #8
+ bic.4s v0, #1, lsl #16
+ bic.4s v0, #1, lsl #24
+
+; CHECK: bic.4s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x6f]
+; CHECK: bic.4s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x6f]
+; CHECK: bic.4s v0, #0x1, lsl #8 ; encoding: [0x20,0x34,0x00,0x6f]
+; CHECK: bic.4s v0, #0x1, lsl #16 ; encoding: [0x20,0x54,0x00,0x6f]
+; CHECK: bic.4s v0, #0x1, lsl #24 ; encoding: [0x20,0x74,0x00,0x6f]
+
+ bic.8h v0, #1
+ bic.8h v0, #1, lsl #0
+ bic.8h v0, #1, lsl #8
+
+; CHECK: bic.8h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x6f]
+; CHECK: bic.8h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x6f]
+; CHECK: bic.8h v0, #0x1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x6f]
+
+ fmov.2d v0, #1.250000e-01
+
+; CHECK: fmov.2d v0, #0.12500000 ; encoding: [0x00,0xf4,0x02,0x6f]
+
+ fmov.2s v0, #1.250000e-01
+ fmov.4s v0, #1.250000e-01
+
+; CHECK: fmov.2s v0, #0.12500000 ; encoding: [0x00,0xf4,0x02,0x0f]
+; CHECK: fmov.4s v0, #0.12500000 ; encoding: [0x00,0xf4,0x02,0x4f]
+
+ orr.2s v0, #1
+ orr.2s v0, #1, lsl #0
+ orr.2s v0, #1, lsl #8
+ orr.2s v0, #1, lsl #16
+ orr.2s v0, #1, lsl #24
+
+; CHECK: orr.2s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x0f]
+; CHECK: orr.2s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x0f]
+; CHECK: orr.2s v0, #0x1, lsl #8 ; encoding: [0x20,0x34,0x00,0x0f]
+; CHECK: orr.2s v0, #0x1, lsl #16 ; encoding: [0x20,0x54,0x00,0x0f]
+; CHECK: orr.2s v0, #0x1, lsl #24 ; encoding: [0x20,0x74,0x00,0x0f]
+
+ orr.4h v0, #1
+ orr.4h v0, #1, lsl #0
+ orr.4h v0, #1, lsl #8
+
+; CHECK: orr.4h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x0f]
+; CHECK: orr.4h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x0f]
+; CHECK: orr.4h v0, #0x1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x0f]
+
+ orr.4s v0, #1
+ orr.4s v0, #1, lsl #0
+ orr.4s v0, #1, lsl #8
+ orr.4s v0, #1, lsl #16
+ orr.4s v0, #1, lsl #24
+
+; CHECK: orr.4s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x4f]
+; CHECK: orr.4s v0, #0x1 ; encoding: [0x20,0x14,0x00,0x4f]
+; CHECK: orr.4s v0, #0x1, lsl #8 ; encoding: [0x20,0x34,0x00,0x4f]
+; CHECK: orr.4s v0, #0x1, lsl #16 ; encoding: [0x20,0x54,0x00,0x4f]
+; CHECK: orr.4s v0, #0x1, lsl #24 ; encoding: [0x20,0x74,0x00,0x4f]
+
+ orr.8h v0, #1
+ orr.8h v0, #1, lsl #0
+ orr.8h v0, #1, lsl #8
+
+; CHECK: orr.8h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x4f]
+; CHECK: orr.8h v0, #0x1 ; encoding: [0x20,0x94,0x00,0x4f]
+; CHECK: orr.8h v0, #0x1, lsl #8 ; encoding: [0x20,0xb4,0x00,0x4f]
+
+ movi d0, #0x000000000000ff
+ movi.2d v0, #0x000000000000ff
+
+; CHECK: movi d0, #0x000000000000ff ; encoding: [0x20,0xe4,0x00,0x2f]
+; CHECK: movi.2d v0, #0x000000000000ff ; encoding: [0x20,0xe4,0x00,0x6f]
+
+ movi.2s v0, #1
+ movi.2s v0, #1, lsl #0
+ movi.2s v0, #1, lsl #8
+ movi.2s v0, #1, lsl #16
+ movi.2s v0, #1, lsl #24
+
+; CHECK: movi.2s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x0f]
+; CHECK: movi.2s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x0f]
+; CHECK: movi.2s v0, #0x1, lsl #8 ; encoding: [0x20,0x24,0x00,0x0f]
+; CHECK: movi.2s v0, #0x1, lsl #16 ; encoding: [0x20,0x44,0x00,0x0f]
+; CHECK: movi.2s v0, #0x1, lsl #24 ; encoding: [0x20,0x64,0x00,0x0f]
+
+ movi.4s v0, #1
+ movi.4s v0, #1, lsl #0
+ movi.4s v0, #1, lsl #8
+ movi.4s v0, #1, lsl #16
+ movi.4s v0, #1, lsl #24
+
+; CHECK: movi.4s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x4f]
+; CHECK: movi.4s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x4f]
+; CHECK: movi.4s v0, #0x1, lsl #8 ; encoding: [0x20,0x24,0x00,0x4f]
+; CHECK: movi.4s v0, #0x1, lsl #16 ; encoding: [0x20,0x44,0x00,0x4f]
+; CHECK: movi.4s v0, #0x1, lsl #24 ; encoding: [0x20,0x64,0x00,0x4f]
+
+ movi.4h v0, #1
+ movi.4h v0, #1, lsl #0
+ movi.4h v0, #1, lsl #8
+
+; CHECK: movi.4h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x0f]
+; CHECK: movi.4h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x0f]
+; CHECK: movi.4h v0, #0x1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x0f]
+
+ movi.8h v0, #1
+ movi.8h v0, #1, lsl #0
+ movi.8h v0, #1, lsl #8
+
+; CHECK: movi.8h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x4f]
+; CHECK: movi.8h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x4f]
+; CHECK: movi.8h v0, #0x1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x4f]
+
+ movi.2s v0, #1, msl #8
+ movi.2s v0, #1, msl #16
+ movi.4s v0, #1, msl #8
+ movi.4s v0, #1, msl #16
+
+; CHECK: movi.2s v0, #0x1, msl #8 ; encoding: [0x20,0xc4,0x00,0x0f]
+; CHECK: movi.2s v0, #0x1, msl #16 ; encoding: [0x20,0xd4,0x00,0x0f]
+; CHECK: movi.4s v0, #0x1, msl #8 ; encoding: [0x20,0xc4,0x00,0x4f]
+; CHECK: movi.4s v0, #0x1, msl #16 ; encoding: [0x20,0xd4,0x00,0x4f]
+
+ movi.8b v0, #1
+ movi.16b v0, #1
+
+; CHECK: movi.8b v0, #0x1 ; encoding: [0x20,0xe4,0x00,0x0f]
+; CHECK: movi.16b v0, #0x1 ; encoding: [0x20,0xe4,0x00,0x4f]
+
+ mvni.2s v0, #1
+ mvni.2s v0, #1, lsl #0
+ mvni.2s v0, #1, lsl #8
+ mvni.2s v0, #1, lsl #16
+ mvni.2s v0, #1, lsl #24
+
+; CHECK: mvni.2s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x2f]
+; CHECK: mvni.2s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x2f]
+; CHECK: mvni.2s v0, #0x1, lsl #8 ; encoding: [0x20,0x24,0x00,0x2f]
+; CHECK: mvni.2s v0, #0x1, lsl #16 ; encoding: [0x20,0x44,0x00,0x2f]
+; CHECK: mvni.2s v0, #0x1, lsl #24 ; encoding: [0x20,0x64,0x00,0x2f]
+
+ mvni.4s v0, #1
+ mvni.4s v0, #1, lsl #0
+ mvni.4s v0, #1, lsl #8
+ mvni.4s v0, #1, lsl #16
+ mvni.4s v0, #1, lsl #24
+
+; CHECK: mvni.4s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x6f]
+; CHECK: mvni.4s v0, #0x1 ; encoding: [0x20,0x04,0x00,0x6f]
+; CHECK: mvni.4s v0, #0x1, lsl #8 ; encoding: [0x20,0x24,0x00,0x6f]
+; CHECK: mvni.4s v0, #0x1, lsl #16 ; encoding: [0x20,0x44,0x00,0x6f]
+; CHECK: mvni.4s v0, #0x1, lsl #24 ; encoding: [0x20,0x64,0x00,0x6f]
+
+ mvni.4h v0, #1
+ mvni.4h v0, #1, lsl #0
+ mvni.4h v0, #1, lsl #8
+
+; CHECK: mvni.4h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x2f]
+; CHECK: mvni.4h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x2f]
+; CHECK: mvni.4h v0, #0x1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x2f]
+
+ mvni.8h v0, #1
+ mvni.8h v0, #1, lsl #0
+ mvni.8h v0, #1, lsl #8
+
+; CHECK: mvni.8h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x6f]
+; CHECK: mvni.8h v0, #0x1 ; encoding: [0x20,0x84,0x00,0x6f]
+; CHECK: mvni.8h v0, #0x1, lsl #8 ; encoding: [0x20,0xa4,0x00,0x6f]
+
+ mvni.2s v0, #1, msl #8
+ mvni.2s v0, #1, msl #16
+ mvni.4s v0, #1, msl #8
+ mvni.4s v0, #1, msl #16
+
+; CHECK: mvni.2s v0, #0x1, msl #8 ; encoding: [0x20,0xc4,0x00,0x2f]
+; CHECK: mvni.2s v0, #0x1, msl #16 ; encoding: [0x20,0xd4,0x00,0x2f]
+; CHECK: mvni.4s v0, #0x1, msl #8 ; encoding: [0x20,0xc4,0x00,0x6f]
+; CHECK: mvni.4s v0, #0x1, msl #16 ; encoding: [0x20,0xd4,0x00,0x6f]
+
+;===-------------------------------------------------------------------------===
+; AdvSIMD scalar x index
+;===-------------------------------------------------------------------------===
+
+ fmla.s s0, s0, v0[3]
+ fmla.d d0, d0, v0[1]
+ fmls.s s0, s0, v0[3]
+ fmls.d d0, d0, v0[1]
+ fmulx.s s0, s0, v0[3]
+ fmulx.d d0, d0, v0[1]
+ fmul.s s0, s0, v0[3]
+ fmul.d d0, d0, v0[1]
+ sqdmlal.h s0, h0, v0[7]
+ sqdmlal.s d0, s0, v0[3]
+ sqdmlsl.h s0, h0, v0[7]
+ sqdmulh.h h0, h0, v0[7]
+ sqdmulh.s s0, s0, v0[3]
+ sqdmull.h s0, h0, v0[7]
+ sqdmull.s d0, s0, v0[3]
+ sqrdmulh.h h0, h0, v0[7]
+ sqrdmulh.s s0, s0, v0[3]
+
+; CHECK: fmla.s s0, s0, v0[3] ; encoding: [0x00,0x18,0xa0,0x5f]
+; CHECK: fmla.d d0, d0, v0[1] ; encoding: [0x00,0x18,0xc0,0x5f]
+; CHECK: fmls.s s0, s0, v0[3] ; encoding: [0x00,0x58,0xa0,0x5f]
+; CHECK: fmls.d d0, d0, v0[1] ; encoding: [0x00,0x58,0xc0,0x5f]
+; CHECK: fmulx.s s0, s0, v0[3] ; encoding: [0x00,0x98,0xa0,0x7f]
+; CHECK: fmulx.d d0, d0, v0[1] ; encoding: [0x00,0x98,0xc0,0x7f]
+; CHECK: fmul.s s0, s0, v0[3] ; encoding: [0x00,0x98,0xa0,0x5f]
+; CHECK: fmul.d d0, d0, v0[1] ; encoding: [0x00,0x98,0xc0,0x5f]
+; CHECK: sqdmlal.h s0, h0, v0[7] ; encoding: [0x00,0x38,0x70,0x5f]
+; CHECK: sqdmlal.s d0, s0, v0[3] ; encoding: [0x00,0x38,0xa0,0x5f]
+; CHECK: sqdmlsl.h s0, h0, v0[7] ; encoding: [0x00,0x78,0x70,0x5f]
+; CHECK: sqdmulh.h h0, h0, v0[7] ; encoding: [0x00,0xc8,0x70,0x5f]
+; CHECK: sqdmulh.s s0, s0, v0[3] ; encoding: [0x00,0xc8,0xa0,0x5f]
+; CHECK: sqdmull.h s0, h0, v0[7] ; encoding: [0x00,0xb8,0x70,0x5f]
+; CHECK: sqdmull.s d0, s0, v0[3] ; encoding: [0x00,0xb8,0xa0,0x5f]
+; CHECK: sqrdmulh.h h0, h0, v0[7] ; encoding: [0x00,0xd8,0x70,0x5f]
+; CHECK: sqrdmulh.s s0, s0, v0[3] ; encoding: [0x00,0xd8,0xa0,0x5f]
+
+;===-------------------------------------------------------------------------===
+; AdvSIMD SMLAL
+;===-------------------------------------------------------------------------===
+ smlal.8h v1, v2, v3
+ smlal.4s v1, v2, v3
+ smlal.2d v1, v2, v3
+ smlal2.8h v1, v2, v3
+ smlal2.4s v1, v2, v3
+ smlal2.2d v1, v2, v3
+
+ smlal v13.8h, v8.8b, v0.8b
+ smlal v13.4s, v8.4h, v0.4h
+ smlal v13.2d, v8.2s, v0.2s
+ smlal2 v13.8h, v8.16b, v0.16b
+ smlal2 v13.4s, v8.8h, v0.8h
+ smlal2 v13.2d, v8.4s, v0.4s
+
+; CHECK: smlal.8h v1, v2, v3 ; encoding: [0x41,0x80,0x23,0x0e]
+; CHECK: smlal.4s v1, v2, v3 ; encoding: [0x41,0x80,0x63,0x0e]
+; CHECK: smlal.2d v1, v2, v3 ; encoding: [0x41,0x80,0xa3,0x0e]
+; CHECK: smlal2.8h v1, v2, v3 ; encoding: [0x41,0x80,0x23,0x4e]
+; CHECK: smlal2.4s v1, v2, v3 ; encoding: [0x41,0x80,0x63,0x4e]
+; CHECK: smlal2.2d v1, v2, v3 ; encoding: [0x41,0x80,0xa3,0x4e]
+; CHECK: smlal.8h v13, v8, v0 ; encoding: [0x0d,0x81,0x20,0x0e]
+; CHECK: smlal.4s v13, v8, v0 ; encoding: [0x0d,0x81,0x60,0x0e]
+; CHECK: smlal.2d v13, v8, v0 ; encoding: [0x0d,0x81,0xa0,0x0e]
+; CHECK: smlal2.8h v13, v8, v0 ; encoding: [0x0d,0x81,0x20,0x4e]
+; CHECK: smlal2.4s v13, v8, v0 ; encoding: [0x0d,0x81,0x60,0x4e]
+; CHECK: smlal2.2d v13, v8, v0 ; encoding: [0x0d,0x81,0xa0,0x4e]
+
+
+;===-------------------------------------------------------------------------===
+; AdvSIMD scalar x index
+;===-------------------------------------------------------------------------===
+
+ fmla.2s v0, v0, v0[0]
+ fmla.4s v0, v0, v0[1]
+ fmla.2d v0, v0, v0[1]
+ fmls.2s v0, v0, v0[0]
+ fmls.4s v0, v0, v0[1]
+ fmls.2d v0, v0, v0[1]
+ fmulx.2s v0, v0, v0[0]
+ fmulx.4s v0, v0, v0[1]
+ fmulx.2d v0, v0, v0[1]
+ fmul.2s v0, v0, v0[0]
+ fmul.4s v0, v0, v0[1]
+ fmul.2d v0, v0, v0[1]
+ mla.4h v0, v0, v0[0]
+ mla.8h v0, v0, v0[1]
+ mla.2s v0, v0, v0[2]
+ mla.4s v0, v0, v0[3]
+ mls.4h v0, v0, v0[0]
+ mls.8h v0, v0, v0[1]
+ mls.2s v0, v0, v0[2]
+ mls.4s v0, v0, v0[3]
+ mul.4h v0, v0, v0[0]
+ mul.8h v0, v0, v0[1]
+ mul.2s v0, v0, v0[2]
+ mul.4s v0, v0, v0[3]
+ smlal.4s v0, v0, v0[0]
+ smlal2.4s v0, v0, v0[1]
+ smlal.2d v0, v0, v0[2]
+ smlal2.2d v0, v0, v0[3]
+ smlsl.4s v0, v0, v0[0]
+ smlsl2.4s v0, v0, v0[1]
+ smlsl.2d v0, v0, v0[2]
+ smlsl2.2d v0, v0, v0[3]
+ smull.4s v0, v0, v0[0]
+ smull2.4s v0, v0, v0[1]
+ smull.2d v0, v0, v0[2]
+ smull2.2d v0, v0, v0[3]
+ sqdmlal.4s v0, v0, v0[0]
+ sqdmlal2.4s v0, v0, v0[1]
+ sqdmlal.2d v0, v0, v0[2]
+ sqdmlal2.2d v0, v0, v0[3]
+ sqdmlsl.4s v0, v0, v0[0]
+ sqdmlsl2.4s v0, v0, v0[1]
+ sqdmlsl.2d v0, v0, v0[2]
+ sqdmlsl2.2d v0, v0, v0[3]
+ sqdmulh.4h v0, v0, v0[0]
+ sqdmulh.8h v0, v0, v0[1]
+ sqdmulh.2s v0, v0, v0[2]
+ sqdmulh.4s v0, v0, v0[3]
+ sqdmull.4s v0, v0, v0[0]
+ sqdmull2.4s v0, v0, v0[1]
+ sqdmull.2d v0, v0, v0[2]
+ sqdmull2.2d v0, v0, v0[3]
+ sqrdmulh.4h v0, v0, v0[0]
+ sqrdmulh.8h v0, v0, v0[1]
+ sqrdmulh.2s v0, v0, v0[2]
+ sqrdmulh.4s v0, v0, v0[3]
+ umlal.4s v0, v0, v0[0]
+ umlal2.4s v0, v0, v0[1]
+ umlal.2d v0, v0, v0[2]
+ umlal2.2d v0, v0, v0[3]
+ umlsl.4s v0, v0, v0[0]
+ umlsl2.4s v0, v0, v0[1]
+ umlsl.2d v0, v0, v0[2]
+ umlsl2.2d v0, v0, v0[3]
+ umull.4s v0, v0, v0[0]
+ umull2.4s v0, v0, v0[1]
+ umull.2d v0, v0, v0[2]
+ umull2.2d v0, v0, v0[3]
+
+; CHECK: fmla.2s v0, v0, v0[0] ; encoding: [0x00,0x10,0x80,0x0f]
+; CHECK: fmla.4s v0, v0, v0[1] ; encoding: [0x00,0x10,0xa0,0x4f]
+; CHECK: fmla.2d v0, v0, v0[1] ; encoding: [0x00,0x18,0xc0,0x4f]
+; CHECK: fmls.2s v0, v0, v0[0] ; encoding: [0x00,0x50,0x80,0x0f]
+; CHECK: fmls.4s v0, v0, v0[1] ; encoding: [0x00,0x50,0xa0,0x4f]
+; CHECK: fmls.2d v0, v0, v0[1] ; encoding: [0x00,0x58,0xc0,0x4f]
+; CHECK: fmulx.2s v0, v0, v0[0] ; encoding: [0x00,0x90,0x80,0x2f]
+; CHECK: fmulx.4s v0, v0, v0[1] ; encoding: [0x00,0x90,0xa0,0x6f]
+; CHECK: fmulx.2d v0, v0, v0[1] ; encoding: [0x00,0x98,0xc0,0x6f]
+; CHECK: fmul.2s v0, v0, v0[0] ; encoding: [0x00,0x90,0x80,0x0f]
+; CHECK: fmul.4s v0, v0, v0[1] ; encoding: [0x00,0x90,0xa0,0x4f]
+; CHECK: fmul.2d v0, v0, v0[1] ; encoding: [0x00,0x98,0xc0,0x4f]
+; CHECK: mla.4h v0, v0, v0[0] ; encoding: [0x00,0x00,0x40,0x2f]
+; CHECK: mla.8h v0, v0, v0[1] ; encoding: [0x00,0x00,0x50,0x6f]
+; CHECK: mla.2s v0, v0, v0[2] ; encoding: [0x00,0x08,0x80,0x2f]
+; CHECK: mla.4s v0, v0, v0[3] ; encoding: [0x00,0x08,0xa0,0x6f]
+; CHECK: mls.4h v0, v0, v0[0] ; encoding: [0x00,0x40,0x40,0x2f]
+; CHECK: mls.8h v0, v0, v0[1] ; encoding: [0x00,0x40,0x50,0x6f]
+; CHECK: mls.2s v0, v0, v0[2] ; encoding: [0x00,0x48,0x80,0x2f]
+; CHECK: mls.4s v0, v0, v0[3] ; encoding: [0x00,0x48,0xa0,0x6f]
+; CHECK: mul.4h v0, v0, v0[0] ; encoding: [0x00,0x80,0x40,0x0f]
+; CHECK: mul.8h v0, v0, v0[1] ; encoding: [0x00,0x80,0x50,0x4f]
+; CHECK: mul.2s v0, v0, v0[2] ; encoding: [0x00,0x88,0x80,0x0f]
+; CHECK: mul.4s v0, v0, v0[3] ; encoding: [0x00,0x88,0xa0,0x4f]
+; CHECK: smlal.4s v0, v0, v0[0] ; encoding: [0x00,0x20,0x40,0x0f]
+; CHECK: smlal2.4s v0, v0, v0[1] ; encoding: [0x00,0x20,0x50,0x4f]
+; CHECK: smlal.2d v0, v0, v0[2] ; encoding: [0x00,0x28,0x80,0x0f]
+; CHECK: smlal2.2d v0, v0, v0[3] ; encoding: [0x00,0x28,0xa0,0x4f]
+; CHECK: smlsl.4s v0, v0, v0[0] ; encoding: [0x00,0x60,0x40,0x0f]
+; CHECK: smlsl2.4s v0, v0, v0[1] ; encoding: [0x00,0x60,0x50,0x4f]
+; CHECK: smlsl.2d v0, v0, v0[2] ; encoding: [0x00,0x68,0x80,0x0f]
+; CHECK: smlsl2.2d v0, v0, v0[3] ; encoding: [0x00,0x68,0xa0,0x4f]
+; CHECK: smull.4s v0, v0, v0[0] ; encoding: [0x00,0xa0,0x40,0x0f]
+; CHECK: smull2.4s v0, v0, v0[1] ; encoding: [0x00,0xa0,0x50,0x4f]
+; CHECK: smull.2d v0, v0, v0[2] ; encoding: [0x00,0xa8,0x80,0x0f]
+; CHECK: smull2.2d v0, v0, v0[3] ; encoding: [0x00,0xa8,0xa0,0x4f]
+; CHECK: sqdmlal.4s v0, v0, v0[0] ; encoding: [0x00,0x30,0x40,0x0f]
+; CHECK: sqdmlal2.4s v0, v0, v0[1] ; encoding: [0x00,0x30,0x50,0x4f]
+; CHECK: sqdmlal.2d v0, v0, v0[2] ; encoding: [0x00,0x38,0x80,0x0f]
+; CHECK: sqdmlal2.2d v0, v0, v0[3] ; encoding: [0x00,0x38,0xa0,0x4f]
+; CHECK: sqdmlsl.4s v0, v0, v0[0] ; encoding: [0x00,0x70,0x40,0x0f]
+; CHECK: sqdmlsl2.4s v0, v0, v0[1] ; encoding: [0x00,0x70,0x50,0x4f]
+; CHECK: sqdmlsl.2d v0, v0, v0[2] ; encoding: [0x00,0x78,0x80,0x0f]
+; CHECK: sqdmlsl2.2d v0, v0, v0[3] ; encoding: [0x00,0x78,0xa0,0x4f]
+; CHECK: sqdmulh.4h v0, v0, v0[0] ; encoding: [0x00,0xc0,0x40,0x0f]
+; CHECK: sqdmulh.8h v0, v0, v0[1] ; encoding: [0x00,0xc0,0x50,0x4f]
+; CHECK: sqdmulh.2s v0, v0, v0[2] ; encoding: [0x00,0xc8,0x80,0x0f]
+; CHECK: sqdmulh.4s v0, v0, v0[3] ; encoding: [0x00,0xc8,0xa0,0x4f]
+; CHECK: sqdmull.4s v0, v0, v0[0] ; encoding: [0x00,0xb0,0x40,0x0f]
+; CHECK: sqdmull2.4s v0, v0, v0[1] ; encoding: [0x00,0xb0,0x50,0x4f]
+; CHECK: sqdmull.2d v0, v0, v0[2] ; encoding: [0x00,0xb8,0x80,0x0f]
+; CHECK: sqdmull2.2d v0, v0, v0[3] ; encoding: [0x00,0xb8,0xa0,0x4f]
+; CHECK: sqrdmulh.4h v0, v0, v0[0] ; encoding: [0x00,0xd0,0x40,0x0f]
+; CHECK: sqrdmulh.8h v0, v0, v0[1] ; encoding: [0x00,0xd0,0x50,0x4f]
+; CHECK: sqrdmulh.2s v0, v0, v0[2] ; encoding: [0x00,0xd8,0x80,0x0f]
+; CHECK: sqrdmulh.4s v0, v0, v0[3] ; encoding: [0x00,0xd8,0xa0,0x4f]
+; CHECK: umlal.4s v0, v0, v0[0] ; encoding: [0x00,0x20,0x40,0x2f]
+; CHECK: umlal2.4s v0, v0, v0[1] ; encoding: [0x00,0x20,0x50,0x6f]
+; CHECK: umlal.2d v0, v0, v0[2] ; encoding: [0x00,0x28,0x80,0x2f]
+; CHECK: umlal2.2d v0, v0, v0[3] ; encoding: [0x00,0x28,0xa0,0x6f]
+; CHECK: umlsl.4s v0, v0, v0[0] ; encoding: [0x00,0x60,0x40,0x2f]
+; CHECK: umlsl2.4s v0, v0, v0[1] ; encoding: [0x00,0x60,0x50,0x6f]
+; CHECK: umlsl.2d v0, v0, v0[2] ; encoding: [0x00,0x68,0x80,0x2f]
+; CHECK: umlsl2.2d v0, v0, v0[3] ; encoding: [0x00,0x68,0xa0,0x6f]
+; CHECK: umull.4s v0, v0, v0[0] ; encoding: [0x00,0xa0,0x40,0x2f]
+; CHECK: umull2.4s v0, v0, v0[1] ; encoding: [0x00,0xa0,0x50,0x6f]
+; CHECK: umull.2d v0, v0, v0[2] ; encoding: [0x00,0xa8,0x80,0x2f]
+; CHECK: umull2.2d v0, v0, v0[3] ; encoding: [0x00,0xa8,0xa0,0x6f]
+
+
+;===-------------------------------------------------------------------------===
+; AdvSIMD scalar with shift
+;===-------------------------------------------------------------------------===
+
+ fcvtzs s0, s0, #1
+ fcvtzs d0, d0, #2
+ fcvtzu s0, s0, #1
+ fcvtzu d0, d0, #2
+ shl d0, d0, #1
+ sli d0, d0, #1
+ sqrshrn b0, h0, #1
+ sqrshrn h0, s0, #2
+ sqrshrn s0, d0, #3
+ sqrshrun b0, h0, #1
+ sqrshrun h0, s0, #2
+ sqrshrun s0, d0, #3
+ sqshlu b0, b0, #1
+ sqshlu h0, h0, #2
+ sqshlu s0, s0, #3
+ sqshlu d0, d0, #4
+ sqshl b0, b0, #1
+ sqshl h0, h0, #2
+ sqshl s0, s0, #3
+ sqshl d0, d0, #4
+ sqshrn b0, h0, #1
+ sqshrn h0, s0, #2
+ sqshrn s0, d0, #3
+ sqshrun b0, h0, #1
+ sqshrun h0, s0, #2
+ sqshrun s0, d0, #3
+ sri d0, d0, #1
+ srshr d0, d0, #1
+ srsra d0, d0, #1
+ sshr d0, d0, #1
+ ucvtf s0, s0, #1
+ ucvtf d0, d0, #2
+ scvtf s0, s0, #1
+ scvtf d0, d0, #2
+ uqrshrn b0, h0, #1
+ uqrshrn h0, s0, #2
+ uqrshrn s0, d0, #3
+ uqshl b0, b0, #1
+ uqshl h0, h0, #2
+ uqshl s0, s0, #3
+ uqshl d0, d0, #4
+ uqshrn b0, h0, #1
+ uqshrn h0, s0, #2
+ uqshrn s0, d0, #3
+ urshr d0, d0, #1
+ ursra d0, d0, #1
+ ushr d0, d0, #1
+ usra d0, d0, #1
+
+; CHECK: fcvtzs s0, s0, #1 ; encoding: [0x00,0xfc,0x3f,0x5f]
+; CHECK: fcvtzs d0, d0, #2 ; encoding: [0x00,0xfc,0x7e,0x5f]
+; CHECK: fcvtzu s0, s0, #1 ; encoding: [0x00,0xfc,0x3f,0x7f]
+; CHECK: fcvtzu d0, d0, #2 ; encoding: [0x00,0xfc,0x7e,0x7f]
+; CHECK: shl d0, d0, #1 ; encoding: [0x00,0x54,0x41,0x5f]
+; CHECK: sli d0, d0, #1 ; encoding: [0x00,0x54,0x41,0x7f]
+; CHECK: sqrshrn b0, h0, #1 ; encoding: [0x00,0x9c,0x0f,0x5f]
+; CHECK: sqrshrn h0, s0, #2 ; encoding: [0x00,0x9c,0x1e,0x5f]
+; CHECK: sqrshrn s0, d0, #3 ; encoding: [0x00,0x9c,0x3d,0x5f]
+; CHECK: sqrshrun b0, h0, #1 ; encoding: [0x00,0x8c,0x0f,0x7f]
+; CHECK: sqrshrun h0, s0, #2 ; encoding: [0x00,0x8c,0x1e,0x7f]
+; CHECK: sqrshrun s0, d0, #3 ; encoding: [0x00,0x8c,0x3d,0x7f]
+; CHECK: sqshlu b0, b0, #1 ; encoding: [0x00,0x64,0x09,0x7f]
+; CHECK: sqshlu h0, h0, #2 ; encoding: [0x00,0x64,0x12,0x7f]
+; CHECK: sqshlu s0, s0, #3 ; encoding: [0x00,0x64,0x23,0x7f]
+; CHECK: sqshlu d0, d0, #4 ; encoding: [0x00,0x64,0x44,0x7f]
+; CHECK: sqshl b0, b0, #1 ; encoding: [0x00,0x74,0x09,0x5f]
+; CHECK: sqshl h0, h0, #2 ; encoding: [0x00,0x74,0x12,0x5f]
+; CHECK: sqshl s0, s0, #3 ; encoding: [0x00,0x74,0x23,0x5f]
+; CHECK: sqshl d0, d0, #4 ; encoding: [0x00,0x74,0x44,0x5f]
+; CHECK: sqshrn b0, h0, #1 ; encoding: [0x00,0x94,0x0f,0x5f]
+; CHECK: sqshrn h0, s0, #2 ; encoding: [0x00,0x94,0x1e,0x5f]
+; CHECK: sqshrn s0, d0, #3 ; encoding: [0x00,0x94,0x3d,0x5f]
+; CHECK: sqshrun b0, h0, #1 ; encoding: [0x00,0x84,0x0f,0x7f]
+; CHECK: sqshrun h0, s0, #2 ; encoding: [0x00,0x84,0x1e,0x7f]
+; CHECK: sqshrun s0, d0, #3 ; encoding: [0x00,0x84,0x3d,0x7f]
+; CHECK: sri d0, d0, #1 ; encoding: [0x00,0x44,0x7f,0x7f]
+; CHECK: srshr d0, d0, #1 ; encoding: [0x00,0x24,0x7f,0x5f]
+; CHECK: srsra d0, d0, #1 ; encoding: [0x00,0x34,0x7f,0x5f]
+; CHECK: sshr d0, d0, #1 ; encoding: [0x00,0x04,0x7f,0x5f]
+; CHECK: ucvtf s0, s0, #1 ; encoding: [0x00,0xe4,0x3f,0x7f]
+; CHECK: ucvtf d0, d0, #2 ; encoding: [0x00,0xe4,0x7e,0x7f]
+; check: scvtf s0, s0, #1 ; encoding: [0x00,0xe4,0x3f,0x5f]
+; check: scvtf d0, d0, #2 ; encoding: [0x00,0xe4,0x7e,0x5f]
+; CHECK: uqrshrn b0, h0, #1 ; encoding: [0x00,0x9c,0x0f,0x7f]
+; CHECK: uqrshrn h0, s0, #2 ; encoding: [0x00,0x9c,0x1e,0x7f]
+; CHECK: uqrshrn s0, d0, #3 ; encoding: [0x00,0x9c,0x3d,0x7f]
+; CHECK: uqshl b0, b0, #1 ; encoding: [0x00,0x74,0x09,0x7f]
+; CHECK: uqshl h0, h0, #2 ; encoding: [0x00,0x74,0x12,0x7f]
+; CHECK: uqshl s0, s0, #3 ; encoding: [0x00,0x74,0x23,0x7f]
+; CHECK: uqshl d0, d0, #4 ; encoding: [0x00,0x74,0x44,0x7f]
+; CHECK: uqshrn b0, h0, #1 ; encoding: [0x00,0x94,0x0f,0x7f]
+; CHECK: uqshrn h0, s0, #2 ; encoding: [0x00,0x94,0x1e,0x7f]
+; CHECK: uqshrn s0, d0, #3 ; encoding: [0x00,0x94,0x3d,0x7f]
+; CHECK: urshr d0, d0, #1 ; encoding: [0x00,0x24,0x7f,0x7f]
+; CHECK: ursra d0, d0, #1 ; encoding: [0x00,0x34,0x7f,0x7f]
+; CHECK: ushr d0, d0, #1 ; encoding: [0x00,0x04,0x7f,0x7f]
+; CHECK: usra d0, d0, #1 ; encoding: [0x00,0x14,0x7f,0x7f]
+
+
+;===-------------------------------------------------------------------------===
+; AdvSIMD vector with shift
+;===-------------------------------------------------------------------------===
+
+ fcvtzs.2s v0, v0, #1
+ fcvtzs.4s v0, v0, #2
+ fcvtzs.2d v0, v0, #3
+ fcvtzu.2s v0, v0, #1
+ fcvtzu.4s v0, v0, #2
+ fcvtzu.2d v0, v0, #3
+ rshrn.8b v0, v0, #1
+ rshrn2.16b v0, v0, #2
+ rshrn.4h v0, v0, #3
+ rshrn2.8h v0, v0, #4
+ rshrn.2s v0, v0, #5
+ rshrn2.4s v0, v0, #6
+ scvtf.2s v0, v0, #1
+ scvtf.4s v0, v0, #2
+ scvtf.2d v0, v0, #3
+ shl.8b v0, v0, #1
+ shl.16b v0, v0, #2
+ shl.4h v0, v0, #3
+ shl.8h v0, v0, #4
+ shl.2s v0, v0, #5
+ shl.4s v0, v0, #6
+ shl.2d v0, v0, #7
+ shrn.8b v0, v0, #1
+ shrn2.16b v0, v0, #2
+ shrn.4h v0, v0, #3
+ shrn2.8h v0, v0, #4
+ shrn.2s v0, v0, #5
+ shrn2.4s v0, v0, #6
+ sli.8b v0, v0, #1
+ sli.16b v0, v0, #2
+ sli.4h v0, v0, #3
+ sli.8h v0, v0, #4
+ sli.2s v0, v0, #5
+ sli.4s v0, v0, #6
+ sli.2d v0, v0, #7
+ sqrshrn.8b v0, v0, #1
+ sqrshrn2.16b v0, v0, #2
+ sqrshrn.4h v0, v0, #3
+ sqrshrn2.8h v0, v0, #4
+ sqrshrn.2s v0, v0, #5
+ sqrshrn2.4s v0, v0, #6
+ sqrshrun.8b v0, v0, #1
+ sqrshrun2.16b v0, v0, #2
+ sqrshrun.4h v0, v0, #3
+ sqrshrun2.8h v0, v0, #4
+ sqrshrun.2s v0, v0, #5
+ sqrshrun2.4s v0, v0, #6
+ sqshlu.8b v0, v0, #1
+ sqshlu.16b v0, v0, #2
+ sqshlu.4h v0, v0, #3
+ sqshlu.8h v0, v0, #4
+ sqshlu.2s v0, v0, #5
+ sqshlu.4s v0, v0, #6
+ sqshlu.2d v0, v0, #7
+ sqshl.8b v0, v0, #1
+ sqshl.16b v0, v0, #2
+ sqshl.4h v0, v0, #3
+ sqshl.8h v0, v0, #4
+ sqshl.2s v0, v0, #5
+ sqshl.4s v0, v0, #6
+ sqshl.2d v0, v0, #7
+ sqshrn.8b v0, v0, #1
+ sqshrn2.16b v0, v0, #2
+ sqshrn.4h v0, v0, #3
+ sqshrn2.8h v0, v0, #4
+ sqshrn.2s v0, v0, #5
+ sqshrn2.4s v0, v0, #6
+ sqshrun.8b v0, v0, #1
+ sqshrun2.16b v0, v0, #2
+ sqshrun.4h v0, v0, #3
+ sqshrun2.8h v0, v0, #4
+ sqshrun.2s v0, v0, #5
+ sqshrun2.4s v0, v0, #6
+ sri.8b v0, v0, #1
+ sri.16b v0, v0, #2
+ sri.4h v0, v0, #3
+ sri.8h v0, v0, #4
+ sri.2s v0, v0, #5
+ sri.4s v0, v0, #6
+ sri.2d v0, v0, #7
+ srshr.8b v0, v0, #1
+ srshr.16b v0, v0, #2
+ srshr.4h v0, v0, #3
+ srshr.8h v0, v0, #4
+ srshr.2s v0, v0, #5
+ srshr.4s v0, v0, #6
+ srshr.2d v0, v0, #7
+ srsra.8b v0, v0, #1
+ srsra.16b v0, v0, #2
+ srsra.4h v0, v0, #3
+ srsra.8h v0, v0, #4
+ srsra.2s v0, v0, #5
+ srsra.4s v0, v0, #6
+ srsra.2d v0, v0, #7
+ sshll.8h v0, v0, #1
+ sshll2.8h v0, v0, #2
+ sshll.4s v0, v0, #3
+ sshll2.4s v0, v0, #4
+ sshll.2d v0, v0, #5
+ sshll2.2d v0, v0, #6
+ sshr.8b v0, v0, #1
+ sshr.16b v0, v0, #2
+ sshr.4h v0, v0, #3
+ sshr.8h v0, v0, #4
+ sshr.2s v0, v0, #5
+ sshr.4s v0, v0, #6
+ sshr.2d v0, v0, #7
+ sshr.8b v0, v0, #1
+ ssra.16b v0, v0, #2
+ ssra.4h v0, v0, #3
+ ssra.8h v0, v0, #4
+ ssra.2s v0, v0, #5
+ ssra.4s v0, v0, #6
+ ssra.2d v0, v0, #7
+ ssra d0, d0, #64
+ ucvtf.2s v0, v0, #1
+ ucvtf.4s v0, v0, #2
+ ucvtf.2d v0, v0, #3
+ uqrshrn.8b v0, v0, #1
+ uqrshrn2.16b v0, v0, #2
+ uqrshrn.4h v0, v0, #3
+ uqrshrn2.8h v0, v0, #4
+ uqrshrn.2s v0, v0, #5
+ uqrshrn2.4s v0, v0, #6
+ uqshl.8b v0, v0, #1
+ uqshl.16b v0, v0, #2
+ uqshl.4h v0, v0, #3
+ uqshl.8h v0, v0, #4
+ uqshl.2s v0, v0, #5
+ uqshl.4s v0, v0, #6
+ uqshl.2d v0, v0, #7
+ uqshrn.8b v0, v0, #1
+ uqshrn2.16b v0, v0, #2
+ uqshrn.4h v0, v0, #3
+ uqshrn2.8h v0, v0, #4
+ uqshrn.2s v0, v0, #5
+ uqshrn2.4s v0, v0, #6
+ urshr.8b v0, v0, #1
+ urshr.16b v0, v0, #2
+ urshr.4h v0, v0, #3
+ urshr.8h v0, v0, #4
+ urshr.2s v0, v0, #5
+ urshr.4s v0, v0, #6
+ urshr.2d v0, v0, #7
+ ursra.8b v0, v0, #1
+ ursra.16b v0, v0, #2
+ ursra.4h v0, v0, #3
+ ursra.8h v0, v0, #4
+ ursra.2s v0, v0, #5
+ ursra.4s v0, v0, #6
+ ursra.2d v0, v0, #7
+ ushll.8h v0, v0, #1
+ ushll2.8h v0, v0, #2
+ ushll.4s v0, v0, #3
+ ushll2.4s v0, v0, #4
+ ushll.2d v0, v0, #5
+ ushll2.2d v0, v0, #6
+ ushr.8b v0, v0, #1
+ ushr.16b v0, v0, #2
+ ushr.4h v0, v0, #3
+ ushr.8h v0, v0, #4
+ ushr.2s v0, v0, #5
+ ushr.4s v0, v0, #6
+ ushr.2d v0, v0, #7
+ usra.8b v0, v0, #1
+ usra.16b v0, v0, #2
+ usra.4h v0, v0, #3
+ usra.8h v0, v0, #4
+ usra.2s v0, v0, #5
+ usra.4s v0, v0, #6
+ usra.2d v0, v0, #7
+
+; CHECK: fcvtzs.2s v0, v0, #1 ; encoding: [0x00,0xfc,0x3f,0x0f]
+; CHECK: fcvtzs.4s v0, v0, #2 ; encoding: [0x00,0xfc,0x3e,0x4f]
+; CHECK: fcvtzs.2d v0, v0, #3 ; encoding: [0x00,0xfc,0x7d,0x4f]
+; CHECK: fcvtzu.2s v0, v0, #1 ; encoding: [0x00,0xfc,0x3f,0x2f]
+; CHECK: fcvtzu.4s v0, v0, #2 ; encoding: [0x00,0xfc,0x3e,0x6f]
+; CHECK: fcvtzu.2d v0, v0, #3 ; encoding: [0x00,0xfc,0x7d,0x6f]
+; CHECK: rshrn.8b v0, v0, #1 ; encoding: [0x00,0x8c,0x0f,0x0f]
+; CHECK: rshrn2.16b v0, v0, #2 ; encoding: [0x00,0x8c,0x0e,0x4f]
+; CHECK: rshrn.4h v0, v0, #3 ; encoding: [0x00,0x8c,0x1d,0x0f]
+; CHECK: rshrn2.8h v0, v0, #4 ; encoding: [0x00,0x8c,0x1c,0x4f]
+; CHECK: rshrn.2s v0, v0, #5 ; encoding: [0x00,0x8c,0x3b,0x0f]
+; CHECK: rshrn2.4s v0, v0, #6 ; encoding: [0x00,0x8c,0x3a,0x4f]
+; CHECK: scvtf.2s v0, v0, #1 ; encoding: [0x00,0xe4,0x3f,0x0f]
+; CHECK: scvtf.4s v0, v0, #2 ; encoding: [0x00,0xe4,0x3e,0x4f]
+; CHECK: scvtf.2d v0, v0, #3 ; encoding: [0x00,0xe4,0x7d,0x4f]
+; CHECK: shl.8b v0, v0, #1 ; encoding: [0x00,0x54,0x09,0x0f]
+; CHECK: shl.16b v0, v0, #2 ; encoding: [0x00,0x54,0x0a,0x4f]
+; CHECK: shl.4h v0, v0, #3 ; encoding: [0x00,0x54,0x13,0x0f]
+; CHECK: shl.8h v0, v0, #4 ; encoding: [0x00,0x54,0x14,0x4f]
+; CHECK: shl.2s v0, v0, #5 ; encoding: [0x00,0x54,0x25,0x0f]
+; CHECK: shl.4s v0, v0, #6 ; encoding: [0x00,0x54,0x26,0x4f]
+; CHECK: shl.2d v0, v0, #7 ; encoding: [0x00,0x54,0x47,0x4f]
+; CHECK: shrn.8b v0, v0, #1 ; encoding: [0x00,0x84,0x0f,0x0f]
+; CHECK: shrn2.16b v0, v0, #2 ; encoding: [0x00,0x84,0x0e,0x4f]
+; CHECK: shrn.4h v0, v0, #3 ; encoding: [0x00,0x84,0x1d,0x0f]
+; CHECK: shrn2.8h v0, v0, #4 ; encoding: [0x00,0x84,0x1c,0x4f]
+; CHECK: shrn.2s v0, v0, #5 ; encoding: [0x00,0x84,0x3b,0x0f]
+; CHECK: shrn2.4s v0, v0, #6 ; encoding: [0x00,0x84,0x3a,0x4f]
+; CHECK: sli.8b v0, v0, #1 ; encoding: [0x00,0x54,0x09,0x2f]
+; CHECK: sli.16b v0, v0, #2 ; encoding: [0x00,0x54,0x0a,0x6f]
+; CHECK: sli.4h v0, v0, #3 ; encoding: [0x00,0x54,0x13,0x2f]
+; CHECK: sli.8h v0, v0, #4 ; encoding: [0x00,0x54,0x14,0x6f]
+; CHECK: sli.2s v0, v0, #5 ; encoding: [0x00,0x54,0x25,0x2f]
+; CHECK: sli.4s v0, v0, #6 ; encoding: [0x00,0x54,0x26,0x6f]
+; CHECK: sli.2d v0, v0, #7 ; encoding: [0x00,0x54,0x47,0x6f]
+; CHECK: sqrshrn.8b v0, v0, #1 ; encoding: [0x00,0x9c,0x0f,0x0f]
+; CHECK: sqrshrn2.16b v0, v0, #2 ; encoding: [0x00,0x9c,0x0e,0x4f]
+; CHECK: sqrshrn.4h v0, v0, #3 ; encoding: [0x00,0x9c,0x1d,0x0f]
+; CHECK: sqrshrn2.8h v0, v0, #4 ; encoding: [0x00,0x9c,0x1c,0x4f]
+; CHECK: sqrshrn.2s v0, v0, #5 ; encoding: [0x00,0x9c,0x3b,0x0f]
+; CHECK: sqrshrn2.4s v0, v0, #6 ; encoding: [0x00,0x9c,0x3a,0x4f]
+; CHECK: sqrshrun.8b v0, v0, #1 ; encoding: [0x00,0x8c,0x0f,0x2f]
+; CHECK: sqrshrun2.16b v0, v0, #2 ; encoding: [0x00,0x8c,0x0e,0x6f]
+; CHECK: sqrshrun.4h v0, v0, #3 ; encoding: [0x00,0x8c,0x1d,0x2f]
+; CHECK: sqrshrun2.8h v0, v0, #4 ; encoding: [0x00,0x8c,0x1c,0x6f]
+; CHECK: sqrshrun.2s v0, v0, #5 ; encoding: [0x00,0x8c,0x3b,0x2f]
+; CHECK: sqrshrun2.4s v0, v0, #6 ; encoding: [0x00,0x8c,0x3a,0x6f]
+; CHECK: sqshlu.8b v0, v0, #1 ; encoding: [0x00,0x64,0x09,0x2f]
+; CHECK: sqshlu.16b v0, v0, #2 ; encoding: [0x00,0x64,0x0a,0x6f]
+; CHECK: sqshlu.4h v0, v0, #3 ; encoding: [0x00,0x64,0x13,0x2f]
+; CHECK: sqshlu.8h v0, v0, #4 ; encoding: [0x00,0x64,0x14,0x6f]
+; CHECK: sqshlu.2s v0, v0, #5 ; encoding: [0x00,0x64,0x25,0x2f]
+; CHECK: sqshlu.4s v0, v0, #6 ; encoding: [0x00,0x64,0x26,0x6f]
+; CHECK: sqshlu.2d v0, v0, #7 ; encoding: [0x00,0x64,0x47,0x6f]
+; CHECK: sqshl.8b v0, v0, #1 ; encoding: [0x00,0x74,0x09,0x0f]
+; CHECK: sqshl.16b v0, v0, #2 ; encoding: [0x00,0x74,0x0a,0x4f]
+; CHECK: sqshl.4h v0, v0, #3 ; encoding: [0x00,0x74,0x13,0x0f]
+; CHECK: sqshl.8h v0, v0, #4 ; encoding: [0x00,0x74,0x14,0x4f]
+; CHECK: sqshl.2s v0, v0, #5 ; encoding: [0x00,0x74,0x25,0x0f]
+; CHECK: sqshl.4s v0, v0, #6 ; encoding: [0x00,0x74,0x26,0x4f]
+; CHECK: sqshl.2d v0, v0, #7 ; encoding: [0x00,0x74,0x47,0x4f]
+; CHECK: sqshrn.8b v0, v0, #1 ; encoding: [0x00,0x94,0x0f,0x0f]
+; CHECK: sqshrn2.16b v0, v0, #2 ; encoding: [0x00,0x94,0x0e,0x4f]
+; CHECK: sqshrn.4h v0, v0, #3 ; encoding: [0x00,0x94,0x1d,0x0f]
+; CHECK: sqshrn2.8h v0, v0, #4 ; encoding: [0x00,0x94,0x1c,0x4f]
+; CHECK: sqshrn.2s v0, v0, #5 ; encoding: [0x00,0x94,0x3b,0x0f]
+; CHECK: sqshrn2.4s v0, v0, #6 ; encoding: [0x00,0x94,0x3a,0x4f]
+; CHECK: sqshrun.8b v0, v0, #1 ; encoding: [0x00,0x84,0x0f,0x2f]
+; CHECK: sqshrun2.16b v0, v0, #2 ; encoding: [0x00,0x84,0x0e,0x6f]
+; CHECK: sqshrun.4h v0, v0, #3 ; encoding: [0x00,0x84,0x1d,0x2f]
+; CHECK: sqshrun2.8h v0, v0, #4 ; encoding: [0x00,0x84,0x1c,0x6f]
+; CHECK: sqshrun.2s v0, v0, #5 ; encoding: [0x00,0x84,0x3b,0x2f]
+; CHECK: sqshrun2.4s v0, v0, #6 ; encoding: [0x00,0x84,0x3a,0x6f]
+; CHECK: sri.8b v0, v0, #1 ; encoding: [0x00,0x44,0x0f,0x2f]
+; CHECK: sri.16b v0, v0, #2 ; encoding: [0x00,0x44,0x0e,0x6f]
+; CHECK: sri.4h v0, v0, #3 ; encoding: [0x00,0x44,0x1d,0x2f]
+; CHECK: sri.8h v0, v0, #4 ; encoding: [0x00,0x44,0x1c,0x6f]
+; CHECK: sri.2s v0, v0, #5 ; encoding: [0x00,0x44,0x3b,0x2f]
+; CHECK: sri.4s v0, v0, #6 ; encoding: [0x00,0x44,0x3a,0x6f]
+; CHECK: sri.2d v0, v0, #7 ; encoding: [0x00,0x44,0x79,0x6f]
+; CHECK: srshr.8b v0, v0, #1 ; encoding: [0x00,0x24,0x0f,0x0f]
+; CHECK: srshr.16b v0, v0, #2 ; encoding: [0x00,0x24,0x0e,0x4f]
+; CHECK: srshr.4h v0, v0, #3 ; encoding: [0x00,0x24,0x1d,0x0f]
+; CHECK: srshr.8h v0, v0, #4 ; encoding: [0x00,0x24,0x1c,0x4f]
+; CHECK: srshr.2s v0, v0, #5 ; encoding: [0x00,0x24,0x3b,0x0f]
+; CHECK: srshr.4s v0, v0, #6 ; encoding: [0x00,0x24,0x3a,0x4f]
+; CHECK: srshr.2d v0, v0, #7 ; encoding: [0x00,0x24,0x79,0x4f]
+; CHECK: srsra.8b v0, v0, #1 ; encoding: [0x00,0x34,0x0f,0x0f]
+; CHECK: srsra.16b v0, v0, #2 ; encoding: [0x00,0x34,0x0e,0x4f]
+; CHECK: srsra.4h v0, v0, #3 ; encoding: [0x00,0x34,0x1d,0x0f]
+; CHECK: srsra.8h v0, v0, #4 ; encoding: [0x00,0x34,0x1c,0x4f]
+; CHECK: srsra.2s v0, v0, #5 ; encoding: [0x00,0x34,0x3b,0x0f]
+; CHECK: srsra.4s v0, v0, #6 ; encoding: [0x00,0x34,0x3a,0x4f]
+; CHECK: srsra.2d v0, v0, #7 ; encoding: [0x00,0x34,0x79,0x4f]
+; CHECK: sshll.8h v0, v0, #1 ; encoding: [0x00,0xa4,0x09,0x0f]
+; CHECK: sshll2.8h v0, v0, #2 ; encoding: [0x00,0xa4,0x0a,0x4f]
+; CHECK: sshll.4s v0, v0, #3 ; encoding: [0x00,0xa4,0x13,0x0f]
+; CHECK: sshll2.4s v0, v0, #4 ; encoding: [0x00,0xa4,0x14,0x4f]
+; CHECK: sshll.2d v0, v0, #5 ; encoding: [0x00,0xa4,0x25,0x0f]
+; CHECK: sshll2.2d v0, v0, #6 ; encoding: [0x00,0xa4,0x26,0x4f]
+; CHECK: sshr.8b v0, v0, #1 ; encoding: [0x00,0x04,0x0f,0x0f]
+; CHECK: sshr.16b v0, v0, #2 ; encoding: [0x00,0x04,0x0e,0x4f]
+; CHECK: sshr.4h v0, v0, #3 ; encoding: [0x00,0x04,0x1d,0x0f]
+; CHECK: sshr.8h v0, v0, #4 ; encoding: [0x00,0x04,0x1c,0x4f]
+; CHECK: sshr.2s v0, v0, #5 ; encoding: [0x00,0x04,0x3b,0x0f]
+; CHECK: sshr.4s v0, v0, #6 ; encoding: [0x00,0x04,0x3a,0x4f]
+; CHECK: sshr.2d v0, v0, #7 ; encoding: [0x00,0x04,0x79,0x4f]
+; CHECK: sshr.8b v0, v0, #1 ; encoding: [0x00,0x04,0x0f,0x0f]
+; CHECK: ssra.16b v0, v0, #2 ; encoding: [0x00,0x14,0x0e,0x4f]
+; CHECK: ssra.4h v0, v0, #3 ; encoding: [0x00,0x14,0x1d,0x0f]
+; CHECK: ssra.8h v0, v0, #4 ; encoding: [0x00,0x14,0x1c,0x4f]
+; CHECK: ssra.2s v0, v0, #5 ; encoding: [0x00,0x14,0x3b,0x0f]
+; CHECK: ssra.4s v0, v0, #6 ; encoding: [0x00,0x14,0x3a,0x4f]
+; CHECK: ssra.2d v0, v0, #7 ; encoding: [0x00,0x14,0x79,0x4f]
+; CHECK: ssra d0, d0, #64 ; encoding: [0x00,0x14,0x40,0x5f]
+; CHECK: ucvtf.2s v0, v0, #1 ; encoding: [0x00,0xe4,0x3f,0x2f]
+; CHECK: ucvtf.4s v0, v0, #2 ; encoding: [0x00,0xe4,0x3e,0x6f]
+; CHECK: ucvtf.2d v0, v0, #3 ; encoding: [0x00,0xe4,0x7d,0x6f]
+; CHECK: uqrshrn.8b v0, v0, #1 ; encoding: [0x00,0x9c,0x0f,0x2f]
+; CHECK: uqrshrn2.16b v0, v0, #2 ; encoding: [0x00,0x9c,0x0e,0x6f]
+; CHECK: uqrshrn.4h v0, v0, #3 ; encoding: [0x00,0x9c,0x1d,0x2f]
+; CHECK: uqrshrn2.8h v0, v0, #4 ; encoding: [0x00,0x9c,0x1c,0x6f]
+; CHECK: uqrshrn.2s v0, v0, #5 ; encoding: [0x00,0x9c,0x3b,0x2f]
+; CHECK: uqrshrn2.4s v0, v0, #6 ; encoding: [0x00,0x9c,0x3a,0x6f]
+; CHECK: uqshl.8b v0, v0, #1 ; encoding: [0x00,0x74,0x09,0x2f]
+; CHECK: uqshl.16b v0, v0, #2 ; encoding: [0x00,0x74,0x0a,0x6f]
+; CHECK: uqshl.4h v0, v0, #3 ; encoding: [0x00,0x74,0x13,0x2f]
+; CHECK: uqshl.8h v0, v0, #4 ; encoding: [0x00,0x74,0x14,0x6f]
+; CHECK: uqshl.2s v0, v0, #5 ; encoding: [0x00,0x74,0x25,0x2f]
+; CHECK: uqshl.4s v0, v0, #6 ; encoding: [0x00,0x74,0x26,0x6f]
+; CHECK: uqshl.2d v0, v0, #7 ; encoding: [0x00,0x74,0x47,0x6f]
+; CHECK: uqshrn.8b v0, v0, #1 ; encoding: [0x00,0x94,0x0f,0x2f]
+; CHECK: uqshrn2.16b v0, v0, #2 ; encoding: [0x00,0x94,0x0e,0x6f]
+; CHECK: uqshrn.4h v0, v0, #3 ; encoding: [0x00,0x94,0x1d,0x2f]
+; CHECK: uqshrn2.8h v0, v0, #4 ; encoding: [0x00,0x94,0x1c,0x6f]
+; CHECK: uqshrn.2s v0, v0, #5 ; encoding: [0x00,0x94,0x3b,0x2f]
+; CHECK: uqshrn2.4s v0, v0, #6 ; encoding: [0x00,0x94,0x3a,0x6f]
+; CHECK: urshr.8b v0, v0, #1 ; encoding: [0x00,0x24,0x0f,0x2f]
+; CHECK: urshr.16b v0, v0, #2 ; encoding: [0x00,0x24,0x0e,0x6f]
+; CHECK: urshr.4h v0, v0, #3 ; encoding: [0x00,0x24,0x1d,0x2f]
+; CHECK: urshr.8h v0, v0, #4 ; encoding: [0x00,0x24,0x1c,0x6f]
+; CHECK: urshr.2s v0, v0, #5 ; encoding: [0x00,0x24,0x3b,0x2f]
+; CHECK: urshr.4s v0, v0, #6 ; encoding: [0x00,0x24,0x3a,0x6f]
+; CHECK: urshr.2d v0, v0, #7 ; encoding: [0x00,0x24,0x79,0x6f]
+; CHECK: ursra.8b v0, v0, #1 ; encoding: [0x00,0x34,0x0f,0x2f]
+; CHECK: ursra.16b v0, v0, #2 ; encoding: [0x00,0x34,0x0e,0x6f]
+; CHECK: ursra.4h v0, v0, #3 ; encoding: [0x00,0x34,0x1d,0x2f]
+; CHECK: ursra.8h v0, v0, #4 ; encoding: [0x00,0x34,0x1c,0x6f]
+; CHECK: ursra.2s v0, v0, #5 ; encoding: [0x00,0x34,0x3b,0x2f]
+; CHECK: ursra.4s v0, v0, #6 ; encoding: [0x00,0x34,0x3a,0x6f]
+; CHECK: ursra.2d v0, v0, #7 ; encoding: [0x00,0x34,0x79,0x6f]
+; CHECK: ushll.8h v0, v0, #1 ; encoding: [0x00,0xa4,0x09,0x2f]
+; CHECK: ushll2.8h v0, v0, #2 ; encoding: [0x00,0xa4,0x0a,0x6f]
+; CHECK: ushll.4s v0, v0, #3 ; encoding: [0x00,0xa4,0x13,0x2f]
+; CHECK: ushll2.4s v0, v0, #4 ; encoding: [0x00,0xa4,0x14,0x6f]
+; CHECK: ushll.2d v0, v0, #5 ; encoding: [0x00,0xa4,0x25,0x2f]
+; CHECK: ushll2.2d v0, v0, #6 ; encoding: [0x00,0xa4,0x26,0x6f]
+; CHECK: ushr.8b v0, v0, #1 ; encoding: [0x00,0x04,0x0f,0x2f]
+; CHECK: ushr.16b v0, v0, #2 ; encoding: [0x00,0x04,0x0e,0x6f]
+; CHECK: ushr.4h v0, v0, #3 ; encoding: [0x00,0x04,0x1d,0x2f]
+; CHECK: ushr.8h v0, v0, #4 ; encoding: [0x00,0x04,0x1c,0x6f]
+; CHECK: ushr.2s v0, v0, #5 ; encoding: [0x00,0x04,0x3b,0x2f]
+; CHECK: ushr.4s v0, v0, #6 ; encoding: [0x00,0x04,0x3a,0x6f]
+; CHECK: ushr.2d v0, v0, #7 ; encoding: [0x00,0x04,0x79,0x6f]
+; CHECK: usra.8b v0, v0, #1 ; encoding: [0x00,0x14,0x0f,0x2f]
+; CHECK: usra.16b v0, v0, #2 ; encoding: [0x00,0x14,0x0e,0x6f]
+; CHECK: usra.4h v0, v0, #3 ; encoding: [0x00,0x14,0x1d,0x2f]
+; CHECK: usra.8h v0, v0, #4 ; encoding: [0x00,0x14,0x1c,0x6f]
+; CHECK: usra.2s v0, v0, #5 ; encoding: [0x00,0x14,0x3b,0x2f]
+; CHECK: usra.4s v0, v0, #6 ; encoding: [0x00,0x14,0x3a,0x6f]
+; CHECK: usra.2d v0, v0, #7 ; encoding: [0x00,0x14,0x79,0x6f]
+
+
+; ARM Verbose syntax variants.
+
+ rshrn v9.8b, v11.8h, #1
+ rshrn2 v8.16b, v9.8h, #2
+ rshrn v7.4h, v8.4s, #3
+ rshrn2 v6.8h, v7.4s, #4
+ rshrn v5.2s, v6.2d, #5
+ rshrn2 v4.4s, v5.2d, #6
+
+ shrn v9.8b, v11.8h, #1
+ shrn2 v8.16b, v9.8h, #2
+ shrn v7.4h, v8.4s, #3
+ shrn2 v6.8h, v7.4s, #4
+ shrn v5.2s, v6.2d, #5
+ shrn2 v4.4s, v5.2d, #6
+
+ sqrshrn v9.8b, v11.8h, #1
+ sqrshrn2 v8.16b, v9.8h, #2
+ sqrshrn v7.4h, v8.4s, #3
+ sqrshrn2 v6.8h, v7.4s, #4
+ sqrshrn v5.2s, v6.2d, #5
+ sqrshrn2 v4.4s, v5.2d, #6
+
+ sqshrn v9.8b, v11.8h, #1
+ sqshrn2 v8.16b, v9.8h, #2
+ sqshrn v7.4h, v8.4s, #3
+ sqshrn2 v6.8h, v7.4s, #4
+ sqshrn v5.2s, v6.2d, #5
+ sqshrn2 v4.4s, v5.2d, #6
+
+ sqrshrun v9.8b, v11.8h, #1
+ sqrshrun2 v8.16b, v9.8h, #2
+ sqrshrun v7.4h, v8.4s, #3
+ sqrshrun2 v6.8h, v7.4s, #4
+ sqrshrun v5.2s, v6.2d, #5
+ sqrshrun2 v4.4s, v5.2d, #6
+
+ sqshrun v9.8b, v11.8h, #1
+ sqshrun2 v8.16b, v9.8h, #2
+ sqshrun v7.4h, v8.4s, #3
+ sqshrun2 v6.8h, v7.4s, #4
+ sqshrun v5.2s, v6.2d, #5
+ sqshrun2 v4.4s, v5.2d, #6
+
+ uqrshrn v9.8b, v11.8h, #1
+ uqrshrn2 v8.16b, v9.8h, #2
+ uqrshrn v7.4h, v8.4s, #3
+ uqrshrn2 v6.8h, v7.4s, #4
+ uqrshrn v5.2s, v6.2d, #5
+ uqrshrn2 v4.4s, v5.2d, #6
+
+ uqshrn v9.8b, v11.8h, #1
+ uqshrn2 v8.16b, v9.8h, #2
+ uqshrn v7.4h, v8.4s, #3
+ uqshrn2 v6.8h, v7.4s, #4
+ uqshrn v5.2s, v6.2d, #5
+ uqshrn2 v4.4s, v5.2d, #6
+
+ sshll2 v10.8h, v3.16b, #6
+ sshll2 v11.4s, v4.8h, #5
+ sshll2 v12.2d, v5.4s, #4
+ sshll v13.8h, v6.8b, #3
+ sshll v14.4s, v7.4h, #2
+ sshll v15.2d, v8.2s, #7
+
+ ushll2 v10.8h, v3.16b, #6
+ ushll2 v11.4s, v4.8h, #5
+ ushll2 v12.2d, v5.4s, #4
+ ushll v13.8h, v6.8b, #3
+ ushll v14.4s, v7.4h, #2
+ ushll v15.2d, v8.2s, #7
+
+
+; CHECK: rshrn.8b v9, v11, #1 ; encoding: [0x69,0x8d,0x0f,0x0f]
+; CHECK: rshrn2.16b v8, v9, #2 ; encoding: [0x28,0x8d,0x0e,0x4f]
+; CHECK: rshrn.4h v7, v8, #3 ; encoding: [0x07,0x8d,0x1d,0x0f]
+; CHECK: rshrn2.8h v6, v7, #4 ; encoding: [0xe6,0x8c,0x1c,0x4f]
+; CHECK: rshrn.2s v5, v6, #5 ; encoding: [0xc5,0x8c,0x3b,0x0f]
+; CHECK: rshrn2.4s v4, v5, #6 ; encoding: [0xa4,0x8c,0x3a,0x4f]
+; CHECK: shrn.8b v9, v11, #1 ; encoding: [0x69,0x85,0x0f,0x0f]
+; CHECK: shrn2.16b v8, v9, #2 ; encoding: [0x28,0x85,0x0e,0x4f]
+; CHECK: shrn.4h v7, v8, #3 ; encoding: [0x07,0x85,0x1d,0x0f]
+; CHECK: shrn2.8h v6, v7, #4 ; encoding: [0xe6,0x84,0x1c,0x4f]
+; CHECK: shrn.2s v5, v6, #5 ; encoding: [0xc5,0x84,0x3b,0x0f]
+; CHECK: shrn2.4s v4, v5, #6 ; encoding: [0xa4,0x84,0x3a,0x4f]
+; CHECK: sqrshrn.8b v9, v11, #1 ; encoding: [0x69,0x9d,0x0f,0x0f]
+; CHECK: sqrshrn2.16b v8, v9, #2 ; encoding: [0x28,0x9d,0x0e,0x4f]
+; CHECK: sqrshrn.4h v7, v8, #3 ; encoding: [0x07,0x9d,0x1d,0x0f]
+; CHECK: sqrshrn2.8h v6, v7, #4 ; encoding: [0xe6,0x9c,0x1c,0x4f]
+; CHECK: sqrshrn.2s v5, v6, #5 ; encoding: [0xc5,0x9c,0x3b,0x0f]
+; CHECK: sqrshrn2.4s v4, v5, #6 ; encoding: [0xa4,0x9c,0x3a,0x4f]
+; CHECK: sqshrn.8b v9, v11, #1 ; encoding: [0x69,0x95,0x0f,0x0f]
+; CHECK: sqshrn2.16b v8, v9, #2 ; encoding: [0x28,0x95,0x0e,0x4f]
+; CHECK: sqshrn.4h v7, v8, #3 ; encoding: [0x07,0x95,0x1d,0x0f]
+; CHECK: sqshrn2.8h v6, v7, #4 ; encoding: [0xe6,0x94,0x1c,0x4f]
+; CHECK: sqshrn.2s v5, v6, #5 ; encoding: [0xc5,0x94,0x3b,0x0f]
+; CHECK: sqshrn2.4s v4, v5, #6 ; encoding: [0xa4,0x94,0x3a,0x4f]
+; CHECK: sqrshrun.8b v9, v11, #1 ; encoding: [0x69,0x8d,0x0f,0x2f]
+; CHECK: sqrshrun2.16b v8, v9, #2 ; encoding: [0x28,0x8d,0x0e,0x6f]
+; CHECK: sqrshrun.4h v7, v8, #3 ; encoding: [0x07,0x8d,0x1d,0x2f]
+; CHECK: sqrshrun2.8h v6, v7, #4 ; encoding: [0xe6,0x8c,0x1c,0x6f]
+; CHECK: sqrshrun.2s v5, v6, #5 ; encoding: [0xc5,0x8c,0x3b,0x2f]
+; CHECK: sqrshrun2.4s v4, v5, #6 ; encoding: [0xa4,0x8c,0x3a,0x6f]
+; CHECK: sqshrun.8b v9, v11, #1 ; encoding: [0x69,0x85,0x0f,0x2f]
+; CHECK: sqshrun2.16b v8, v9, #2 ; encoding: [0x28,0x85,0x0e,0x6f]
+; CHECK: sqshrun.4h v7, v8, #3 ; encoding: [0x07,0x85,0x1d,0x2f]
+; CHECK: sqshrun2.8h v6, v7, #4 ; encoding: [0xe6,0x84,0x1c,0x6f]
+; CHECK: sqshrun.2s v5, v6, #5 ; encoding: [0xc5,0x84,0x3b,0x2f]
+; CHECK: sqshrun2.4s v4, v5, #6 ; encoding: [0xa4,0x84,0x3a,0x6f]
+; CHECK: uqrshrn.8b v9, v11, #1 ; encoding: [0x69,0x9d,0x0f,0x2f]
+; CHECK: uqrshrn2.16b v8, v9, #2 ; encoding: [0x28,0x9d,0x0e,0x6f]
+; CHECK: uqrshrn.4h v7, v8, #3 ; encoding: [0x07,0x9d,0x1d,0x2f]
+; CHECK: uqrshrn2.8h v6, v7, #4 ; encoding: [0xe6,0x9c,0x1c,0x6f]
+; CHECK: uqrshrn.2s v5, v6, #5 ; encoding: [0xc5,0x9c,0x3b,0x2f]
+; CHECK: uqrshrn2.4s v4, v5, #6 ; encoding: [0xa4,0x9c,0x3a,0x6f]
+; CHECK: uqshrn.8b v9, v11, #1 ; encoding: [0x69,0x95,0x0f,0x2f]
+; CHECK: uqshrn2.16b v8, v9, #2 ; encoding: [0x28,0x95,0x0e,0x6f]
+; CHECK: uqshrn.4h v7, v8, #3 ; encoding: [0x07,0x95,0x1d,0x2f]
+; CHECK: uqshrn2.8h v6, v7, #4 ; encoding: [0xe6,0x94,0x1c,0x6f]
+; CHECK: uqshrn.2s v5, v6, #5 ; encoding: [0xc5,0x94,0x3b,0x2f]
+; CHECK: uqshrn2.4s v4, v5, #6 ; encoding: [0xa4,0x94,0x3a,0x6f]
+; CHECK: sshll2.8h v10, v3, #6 ; encoding: [0x6a,0xa4,0x0e,0x4f]
+; CHECK: sshll2.4s v11, v4, #5 ; encoding: [0x8b,0xa4,0x15,0x4f]
+; CHECK: sshll2.2d v12, v5, #4 ; encoding: [0xac,0xa4,0x24,0x4f]
+; CHECK: sshll.8h v13, v6, #3 ; encoding: [0xcd,0xa4,0x0b,0x0f]
+; CHECK: sshll.4s v14, v7, #2 ; encoding: [0xee,0xa4,0x12,0x0f]
+; CHECK: sshll.2d v15, v8, #7 ; encoding: [0x0f,0xa5,0x27,0x0f]
+; CHECK: ushll2.8h v10, v3, #6 ; encoding: [0x6a,0xa4,0x0e,0x6f]
+; CHECK: ushll2.4s v11, v4, #5 ; encoding: [0x8b,0xa4,0x15,0x6f]
+; CHECK: ushll2.2d v12, v5, #4 ; encoding: [0xac,0xa4,0x24,0x6f]
+; CHECK: ushll.8h v13, v6, #3 ; encoding: [0xcd,0xa4,0x0b,0x2f]
+; CHECK: ushll.4s v14, v7, #2 ; encoding: [0xee,0xa4,0x12,0x2f]
+; CHECK: ushll.2d v15, v8, #7 ; encoding: [0x0f,0xa5,0x27,0x2f]
+
+
+ pmull.8h v0, v0, v0
+ pmull2.8h v0, v0, v0
+ pmull.1q v2, v3, v4
+ pmull2.1q v2, v3, v4
+ pmull v2.1q, v3.1d, v4.1d
+ pmull2 v2.1q, v3.2d, v4.2d
+
+; CHECK: pmull.8h v0, v0, v0 ; encoding: [0x00,0xe0,0x20,0x0e]
+; CHECK: pmull2.8h v0, v0, v0 ; encoding: [0x00,0xe0,0x20,0x4e]
+; CHECK: pmull.1q v2, v3, v4 ; encoding: [0x62,0xe0,0xe4,0x0e]
+; CHECK: pmull2.1q v2, v3, v4 ; encoding: [0x62,0xe0,0xe4,0x4e]
+; CHECK: pmull.1q v2, v3, v4 ; encoding: [0x62,0xe0,0xe4,0x0e]
+; CHECK: pmull2.1q v2, v3, v4 ; encoding: [0x62,0xe0,0xe4,0x4e]
+
+
+ faddp.2d d1, v2
+ faddp.2s s3, v4
+; CHECK: faddp.2d d1, v2 ; encoding: [0x41,0xd8,0x70,0x7e]
+; CHECK: faddp.2s s3, v4 ; encoding: [0x83,0xd8,0x30,0x7e]
+
+ tbl.16b v2, {v4,v5,v6,v7}, v1
+ tbl.8b v0, {v4,v5,v6,v7}, v1
+ tbl.16b v2, {v5}, v1
+ tbl.8b v0, {v5}, v1
+ tbl.16b v2, {v5,v6,v7}, v1
+ tbl.8b v0, {v5,v6,v7}, v1
+ tbl.16b v2, {v6,v7}, v1
+ tbl.8b v0, {v6,v7}, v1
+; CHECK: tbl.16b v2, { v4, v5, v6, v7 }, v1 ; encoding: [0x82,0x60,0x01,0x4e]
+; CHECK: tbl.8b v0, { v4, v5, v6, v7 }, v1 ; encoding: [0x80,0x60,0x01,0x0e]
+; CHECK: tbl.16b v2, { v5 }, v1 ; encoding: [0xa2,0x00,0x01,0x4e]
+; CHECK: tbl.8b v0, { v5 }, v1 ; encoding: [0xa0,0x00,0x01,0x0e]
+; CHECK: tbl.16b v2, { v5, v6, v7 }, v1 ; encoding: [0xa2,0x40,0x01,0x4e]
+; CHECK: tbl.8b v0, { v5, v6, v7 }, v1 ; encoding: [0xa0,0x40,0x01,0x0e]
+; CHECK: tbl.16b v2, { v6, v7 }, v1 ; encoding: [0xc2,0x20,0x01,0x4e]
+; CHECK: tbl.8b v0, { v6, v7 }, v1 ; encoding: [0xc0,0x20,0x01,0x0e]
+
+ tbl v2.16b, {v4.16b,v5.16b,v6.16b,v7.16b}, v1.16b
+ tbl v0.8b, {v4.16b,v5.16b,v6.16b,v7.16b}, v1.8b
+ tbl v2.16b, {v5.16b}, v1.16b
+ tbl v0.8b, {v5.16b}, v1.8b
+ tbl v2.16b, {v5.16b,v6.16b,v7.16b}, v1.16b
+ tbl v0.8b, {v5.16b,v6.16b,v7.16b}, v1.8b
+ tbl v2.16b, {v6.16b,v7.16b}, v1.16b
+ tbl v0.8b, {v6.16b,v7.16b}, v1.8b
+; CHECK: tbl.16b v2, { v4, v5, v6, v7 }, v1 ; encoding: [0x82,0x60,0x01,0x4e]
+; CHECK: tbl.8b v0, { v4, v5, v6, v7 }, v1 ; encoding: [0x80,0x60,0x01,0x0e]
+; CHECK: tbl.16b v2, { v5 }, v1 ; encoding: [0xa2,0x00,0x01,0x4e]
+; CHECK: tbl.8b v0, { v5 }, v1 ; encoding: [0xa0,0x00,0x01,0x0e]
+; CHECK: tbl.16b v2, { v5, v6, v7 }, v1 ; encoding: [0xa2,0x40,0x01,0x4e]
+; CHECK: tbl.8b v0, { v5, v6, v7 }, v1 ; encoding: [0xa0,0x40,0x01,0x0e]
+; CHECK: tbl.16b v2, { v6, v7 }, v1 ; encoding: [0xc2,0x20,0x01,0x4e]
+; CHECK: tbl.8b v0, { v6, v7 }, v1 ; encoding: [0xc0,0x20,0x01,0x0e]
+
+ sqdmull s0, h0, h0
+ sqdmull d0, s0, s0
+; CHECK: sqdmull s0, h0, h0 ; encoding: [0x00,0xd0,0x60,0x5e]
+; CHECK: sqdmull d0, s0, s0 ; encoding: [0x00,0xd0,0xa0,0x5e]
+
+ frsqrte s0, s0
+ frsqrte d0, d0
+; CHECK: frsqrte s0, s0 ; encoding: [0x00,0xd8,0xa1,0x7e]
+; CHECK: frsqrte d0, d0 ; encoding: [0x00,0xd8,0xe1,0x7e]
+
+ mov.16b v0, v0
+ mov.2s v0, v0
+; CHECK: mov.16b v0, v0 ; encoding: [0x00,0x1c,0xa0,0x4e]
+; CHECK: mov.8b v0, v0 ; encoding: [0x00,0x1c,0xa0,0x0e]
+
+
+; uadalp/sadalp verbose mode aliases.
+ uadalp v14.4h, v25.8b
+ uadalp v15.8h, v24.16b
+ uadalp v16.2s, v23.4h
+ uadalp v17.4s, v22.8h
+ uadalp v18.1d, v21.2s
+ uadalp v19.2d, v20.4s
+
+ sadalp v1.4h, v11.8b
+ sadalp v2.8h, v12.16b
+ sadalp v3.2s, v13.4h
+ sadalp v4.4s, v14.8h
+ sadalp v5.1d, v15.2s
+ sadalp v6.2d, v16.4s
+
+; CHECK: uadalp.4h v14, v25 ; encoding: [0x2e,0x6b,0x20,0x2e]
+; CHECK: uadalp.8h v15, v24 ; encoding: [0x0f,0x6b,0x20,0x6e]
+; CHECK: uadalp.2s v16, v23 ; encoding: [0xf0,0x6a,0x60,0x2e]
+; CHECK: uadalp.4s v17, v22 ; encoding: [0xd1,0x6a,0x60,0x6e]
+; CHECK: uadalp.1d v18, v21 ; encoding: [0xb2,0x6a,0xa0,0x2e]
+; CHECK: uadalp.2d v19, v20 ; encoding: [0x93,0x6a,0xa0,0x6e]
+; CHECK: sadalp.4h v1, v11 ; encoding: [0x61,0x69,0x20,0x0e]
+; CHECK: sadalp.8h v2, v12 ; encoding: [0x82,0x69,0x20,0x4e]
+; CHECK: sadalp.2s v3, v13 ; encoding: [0xa3,0x69,0x60,0x0e]
+; CHECK: sadalp.4s v4, v14 ; encoding: [0xc4,0x69,0x60,0x4e]
+; CHECK: sadalp.1d v5, v15 ; encoding: [0xe5,0x69,0xa0,0x0e]
+; CHECK: sadalp.2d v6, v16 ; encoding: [0x06,0x6a,0xa0,0x4e]
+
+; MVN is an alias for 'not'.
+ mvn v1.8b, v4.8b
+ mvn v19.16b, v17.16b
+ mvn.8b v10, v6
+ mvn.16b v11, v7
+
+; CHECK: mvn.8b v1, v4 ; encoding: [0x81,0x58,0x20,0x2e]
+; CHECK: mvn.16b v19, v17 ; encoding: [0x33,0x5a,0x20,0x6e]
+; CHECK: mvn.8b v10, v6 ; encoding: [0xca,0x58,0x20,0x2e]
+; CHECK: mvn.16b v11, v7 ; encoding: [0xeb,0x58,0x20,0x6e]
+
+; sqdmull verbose mode aliases
+ sqdmull v10.4s, v12.4h, v12.4h
+ sqdmull2 v10.4s, v13.8h, v13.8h
+ sqdmull v10.2d, v13.2s, v13.2s
+ sqdmull2 v10.2d, v13.4s, v13.4s
+; CHECK: sqdmull.4s v10, v12, v12 ; encoding: [0x8a,0xd1,0x6c,0x0e]
+; CHECK: sqdmull2.4s v10, v13, v13 ; encoding: [0xaa,0xd1,0x6d,0x4e]
+; CHECK: sqdmull.2d v10, v13, v13 ; encoding: [0xaa,0xd1,0xad,0x0e]
+; CHECK: sqdmull2.2d v10, v13, v13 ; encoding: [0xaa,0xd1,0xad,0x4e]
+
+; xtn verbose mode aliases
+ xtn v14.8b, v14.8h
+ xtn2 v14.16b, v14.8h
+ xtn v14.4h, v14.4s
+ xtn2 v14.8h, v14.4s
+ xtn v14.2s, v14.2d
+ xtn2 v14.4s, v14.2d
+; CHECK: xtn.8b v14, v14 ; encoding: [0xce,0x29,0x21,0x0e]
+; CHECK: xtn2.16b v14, v14 ; encoding: [0xce,0x29,0x21,0x4e]
+; CHECK: xtn.4h v14, v14 ; encoding: [0xce,0x29,0x61,0x0e]
+; CHECK: xtn2.8h v14, v14 ; encoding: [0xce,0x29,0x61,0x4e]
+; CHECK: xtn.2s v14, v14 ; encoding: [0xce,0x29,0xa1,0x0e]
+; CHECK: xtn2.4s v14, v14 ; encoding: [0xce,0x29,0xa1,0x4e]
+
+; uaddl verbose mode aliases
+ uaddl v9.8h, v13.8b, v14.8b
+ uaddl2 v9.8h, v13.16b, v14.16b
+ uaddl v9.4s, v13.4h, v14.4h
+ uaddl2 v9.4s, v13.8h, v14.8h
+ uaddl v9.2d, v13.2s, v14.2s
+ uaddl2 v9.2d, v13.4s, v14.4s
+; CHECK: uaddl.8h v9, v13, v14 ; encoding: [0xa9,0x01,0x2e,0x2e]
+; CHECK: uaddl2.8h v9, v13, v14 ; encoding: [0xa9,0x01,0x2e,0x6e]
+; CHECK: uaddl.4s v9, v13, v14 ; encoding: [0xa9,0x01,0x6e,0x2e]
+; CHECK: uaddl2.4s v9, v13, v14 ; encoding: [0xa9,0x01,0x6e,0x6e]
+; CHECK: uaddl.2d v9, v13, v14 ; encoding: [0xa9,0x01,0xae,0x2e]
+; CHECK: uaddl2.2d v9, v13, v14 ; encoding: [0xa9,0x01,0xae,0x6e]
+
+; bit verbose mode aliases
+ bit v9.16b, v10.16b, v10.16b
+ bit v9.8b, v10.8b, v10.8b
+; CHECK: bit.16b v9, v10, v10 ; encoding: [0x49,0x1d,0xaa,0x6e]
+; CHECK: bit.8b v9, v10, v10 ; encoding: [0x49,0x1d,0xaa,0x2e]
+
+; pmull verbose mode aliases
+ pmull v8.8h, v8.8b, v8.8b
+ pmull2 v8.8h, v8.16b, v8.16b
+ pmull v8.1q, v8.1d, v8.1d
+ pmull2 v8.1q, v8.2d, v8.2d
+; CHECK: pmull.8h v8, v8, v8 ; encoding: [0x08,0xe1,0x28,0x0e]
+; CHECK: pmull2.8h v8, v8, v8 ; encoding: [0x08,0xe1,0x28,0x4e]
+; CHECK: pmull.1q v8, v8, v8 ; encoding: [0x08,0xe1,0xe8,0x0e]
+; CHECK: pmull2.1q v8, v8, v8 ; encoding: [0x08,0xe1,0xe8,0x4e]
+
+; usubl verbose mode aliases
+ usubl v9.8h, v13.8b, v14.8b
+ usubl2 v9.8h, v13.16b, v14.16b
+ usubl v9.4s, v13.4h, v14.4h
+ usubl2 v9.4s, v13.8h, v14.8h
+ usubl v9.2d, v13.2s, v14.2s
+ usubl2 v9.2d, v13.4s, v14.4s
+; CHECK: usubl.8h v9, v13, v14 ; encoding: [0xa9,0x21,0x2e,0x2e]
+; CHECK: usubl2.8h v9, v13, v14 ; encoding: [0xa9,0x21,0x2e,0x6e]
+; CHECK: usubl.4s v9, v13, v14 ; encoding: [0xa9,0x21,0x6e,0x2e]
+; CHECK: usubl2.4s v9, v13, v14 ; encoding: [0xa9,0x21,0x6e,0x6e]
+; CHECK: usubl.2d v9, v13, v14 ; encoding: [0xa9,0x21,0xae,0x2e]
+; CHECK: usubl2.2d v9, v13, v14 ; encoding: [0xa9,0x21,0xae,0x6e]
+
+; uabdl verbose mode aliases
+ uabdl v9.8h, v13.8b, v14.8b
+ uabdl2 v9.8h, v13.16b, v14.16b
+ uabdl v9.4s, v13.4h, v14.4h
+ uabdl2 v9.4s, v13.8h, v14.8h
+ uabdl v9.2d, v13.2s, v14.2s
+ uabdl2 v9.2d, v13.4s, v14.4s
+; CHECK: uabdl.8h v9, v13, v14 ; encoding: [0xa9,0x71,0x2e,0x2e]
+; CHECK: uabdl2.8h v9, v13, v14 ; encoding: [0xa9,0x71,0x2e,0x6e]
+; CHECK: uabdl.4s v9, v13, v14 ; encoding: [0xa9,0x71,0x6e,0x2e]
+; CHECK: uabdl2.4s v9, v13, v14 ; encoding: [0xa9,0x71,0x6e,0x6e]
+; CHECK: uabdl.2d v9, v13, v14 ; encoding: [0xa9,0x71,0xae,0x2e]
+; CHECK: uabdl2.2d v9, v13, v14 ; encoding: [0xa9,0x71,0xae,0x6e]
+
+; umull verbose mode aliases
+ umull v9.8h, v13.8b, v14.8b
+ umull2 v9.8h, v13.16b, v14.16b
+ umull v9.4s, v13.4h, v14.4h
+ umull2 v9.4s, v13.8h, v14.8h
+ umull v9.2d, v13.2s, v14.2s
+ umull2 v9.2d, v13.4s, v14.4s
+; CHECK: umull.8h v9, v13, v14 ; encoding: [0xa9,0xc1,0x2e,0x2e]
+; CHECK: umull2.8h v9, v13, v14 ; encoding: [0xa9,0xc1,0x2e,0x6e]
+; CHECK: umull.4s v9, v13, v14 ; encoding: [0xa9,0xc1,0x6e,0x2e]
+; CHECK: umull2.4s v9, v13, v14 ; encoding: [0xa9,0xc1,0x6e,0x6e]
+; CHECK: umull.2d v9, v13, v14 ; encoding: [0xa9,0xc1,0xae,0x2e]
+; CHECK: umull2.2d v9, v13, v14 ; encoding: [0xa9,0xc1,0xae,0x6e]
+
+; smull verbose mode aliases
+ smull v9.8h, v13.8b, v14.8b
+ smull2 v9.8h, v13.16b, v14.16b
+ smull v9.4s, v13.4h, v14.4h
+ smull2 v9.4s, v13.8h, v14.8h
+ smull v9.2d, v13.2s, v14.2s
+ smull2 v9.2d, v13.4s, v14.4s
+; CHECK: smull.8h v9, v13, v14 ; encoding: [0xa9,0xc1,0x2e,0x0e]
+; CHECK: smull2.8h v9, v13, v14 ; encoding: [0xa9,0xc1,0x2e,0x4e]
+; CHECK: smull.4s v9, v13, v14 ; encoding: [0xa9,0xc1,0x6e,0x0e]
+; CHECK: smull2.4s v9, v13, v14 ; encoding: [0xa9,0xc1,0x6e,0x4e]
+; CHECK: smull.2d v9, v13, v14 ; encoding: [0xa9,0xc1,0xae,0x0e]
+; CHECK: smull2.2d v9, v13, v14 ; encoding: [0xa9,0xc1,0xae,0x4e]
diff --git a/test/MC/AArch64/arm64-aliases.s b/test/MC/AArch64/arm64-aliases.s
new file mode 100644
index 000000000000..c3affe37aa9c
--- /dev/null
+++ b/test/MC/AArch64/arm64-aliases.s
@@ -0,0 +1,753 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -mattr=neon -output-asm-variant=1 -show-encoding < %s | FileCheck %s
+
+foo:
+;-----------------------------------------------------------------------------
+; ADD #0 to/from SP/WSP is a MOV
+;-----------------------------------------------------------------------------
+ add x1, sp, #0
+; CHECK: mov x1, sp
+ add sp, x2, #0
+; CHECK: mov sp, x2
+ add w3, wsp, #0
+; CHECK: mov w3, wsp
+ add wsp, w4, #0
+; CHECK: mov wsp, w4
+ mov x5, sp
+; CHECK: mov x5, sp
+ mov sp, x6
+; CHECK: mov sp, x6
+ mov w7, wsp
+; CHECK: mov w7, wsp
+ mov wsp, w8
+; CHECK: mov wsp, w8
+
+;-----------------------------------------------------------------------------
+; ORR Rd, Rn, Rn is a MOV
+;-----------------------------------------------------------------------------
+ orr x2, xzr, x9
+; CHECK: mov x2, x9
+ orr w2, wzr, w9
+; CHECK: mov w2, w9
+ mov x3, x4
+; CHECK: mov x3, x4
+ mov w5, w6
+; CHECK: mov w5, w6
+
+;-----------------------------------------------------------------------------
+; TST Xn, #<imm>
+;-----------------------------------------------------------------------------
+ tst w1, #3
+ tst x1, #3
+ tst w1, w2
+ tst x1, x2
+ ands wzr, w1, w2, lsl #2
+ ands xzr, x1, x2, lsl #3
+ tst w3, w7, lsl #31
+ tst x2, x20, asr #0
+
+; CHECK: tst w1, #0x3 ; encoding: [0x3f,0x04,0x00,0x72]
+; CHECK: tst x1, #0x3 ; encoding: [0x3f,0x04,0x40,0xf2]
+; CHECK: tst w1, w2 ; encoding: [0x3f,0x00,0x02,0x6a]
+; CHECK: tst x1, x2 ; encoding: [0x3f,0x00,0x02,0xea]
+; CHECK: tst w1, w2, lsl #2 ; encoding: [0x3f,0x08,0x02,0x6a]
+; CHECK: tst x1, x2, lsl #3 ; encoding: [0x3f,0x0c,0x02,0xea]
+; CHECK: tst w3, w7, lsl #31 ; encoding: [0x7f,0x7c,0x07,0x6a]
+; CHECK: tst x2, x20, asr #0 ; encoding: [0x5f,0x00,0x94,0xea]
+
+;-----------------------------------------------------------------------------
+; ADDS to WZR/XZR is a CMN
+;-----------------------------------------------------------------------------
+ cmn w1, #3, lsl #0
+ cmn x2, #4194304
+ cmn w4, w5
+ cmn x6, x7
+ cmn w8, w9, asr #3
+ cmn x2, x3, lsr #4
+ cmn x2, w3, uxtb #1
+ cmn x4, x5, uxtx #1
+
+; CHECK: cmn w1, #3 ; encoding: [0x3f,0x0c,0x00,0x31]
+; CHECK: cmn x2, #1024, lsl #12 ; encoding: [0x5f,0x00,0x50,0xb1]
+; CHECK: cmn w4, w5 ; encoding: [0x9f,0x00,0x05,0x2b]
+; CHECK: cmn x6, x7 ; encoding: [0xdf,0x00,0x07,0xab]
+; CHECK: cmn w8, w9, asr #3 ; encoding: [0x1f,0x0d,0x89,0x2b]
+; CHECK: cmn x2, x3, lsr #4 ; encoding: [0x5f,0x10,0x43,0xab]
+; CHECK: cmn x2, w3, uxtb #1 ; encoding: [0x5f,0x04,0x23,0xab]
+; CHECK: cmn x4, x5, uxtx #1 ; encoding: [0x9f,0x64,0x25,0xab]
+
+
+;-----------------------------------------------------------------------------
+; SUBS to WZR/XZR is a CMP
+;-----------------------------------------------------------------------------
+ cmp w1, #1024, lsl #12
+ cmp x2, #1024
+ cmp w4, w5
+ cmp x6, x7
+ cmp w8, w9, asr #3
+ cmp x2, x3, lsr #4
+ cmp x2, w3, uxth #2
+ cmp x4, x5, uxtx
+ cmp wzr, w1
+ cmp x8, w8, uxtw
+ cmp w9, w8, uxtw
+ cmp wsp, w9, lsl #0
+
+; CHECK: cmp w1, #1024, lsl #12 ; encoding: [0x3f,0x00,0x50,0x71]
+; CHECK: cmp x2, #1024 ; encoding: [0x5f,0x00,0x10,0xf1]
+; CHECK: cmp w4, w5 ; encoding: [0x9f,0x00,0x05,0x6b]
+; CHECK: cmp x6, x7 ; encoding: [0xdf,0x00,0x07,0xeb]
+; CHECK: cmp w8, w9, asr #3 ; encoding: [0x1f,0x0d,0x89,0x6b]
+; CHECK: cmp x2, x3, lsr #4 ; encoding: [0x5f,0x10,0x43,0xeb]
+; CHECK: cmp x2, w3, uxth #2 ; encoding: [0x5f,0x28,0x23,0xeb]
+; CHECK: cmp x4, x5, uxtx ; encoding: [0x9f,0x60,0x25,0xeb]
+; CHECK: cmp wzr, w1 ; encoding: [0xff,0x03,0x01,0x6b]
+; CHECK: cmp x8, w8, uxtw ; encoding: [0x1f,0x41,0x28,0xeb]
+; CHECK: cmp w9, w8, uxtw ; encoding: [0x3f,0x41,0x28,0x6b]
+; CHECK: cmp wsp, w9 ; encoding: [0xff,0x43,0x29,0x6b]
+
+
+;-----------------------------------------------------------------------------
+; SUB/SUBS from WZR/XZR is a NEG
+;-----------------------------------------------------------------------------
+
+ neg w0, w1
+; CHECK: neg w0, w1
+ neg w0, w1, lsl #1
+; CHECK: neg w0, w1, lsl #1
+ neg x0, x1
+; CHECK: neg x0, x1
+ neg x0, x1, asr #1
+; CHECK: neg x0, x1, asr #1
+ negs w0, w1
+; CHECK: negs w0, w1
+ negs w0, w1, lsl #1
+; CHECK: negs w0, w1, lsl #1
+ negs x0, x1
+; CHECK: negs x0, x1
+ negs x0, x1, asr #1
+; CHECK: negs x0, x1, asr #1
+
+;-----------------------------------------------------------------------------
+; MOV aliases
+;-----------------------------------------------------------------------------
+
+ mov x0, #281470681743360
+ mov x0, #18446744073709486080
+
+; CHECK: movz x0, #0xffff, lsl #32
+; CHECK: movn x0, #0xffff
+
+ mov w0, #0xffffffff
+ mov w0, #0xffffff00
+ mov wzr, #0xffffffff
+ mov wzr, #0xffffff00
+
+; CHECK: movn w0, #0
+; CHECK: movn w0, #0xff
+; CHECK: movn wzr, #0
+; CHECK: movn wzr, #0xff
+
+;-----------------------------------------------------------------------------
+; MVN aliases
+;-----------------------------------------------------------------------------
+
+ mvn w4, w9
+ mvn x2, x3
+ orn w4, wzr, w9
+
+; CHECK: mvn w4, w9 ; encoding: [0xe4,0x03,0x29,0x2a]
+; CHECK: mvn x2, x3 ; encoding: [0xe2,0x03,0x23,0xaa]
+; CHECK: mvn w4, w9 ; encoding: [0xe4,0x03,0x29,0x2a]
+
+ mvn w4, w9, lsl #1
+ mvn x2, x3, lsl #1
+ orn w4, wzr, w9, lsl #1
+
+; CHECK: mvn w4, w9, lsl #1 ; encoding: [0xe4,0x07,0x29,0x2a]
+; CHECK: mvn x2, x3, lsl #1 ; encoding: [0xe2,0x07,0x23,0xaa]
+; CHECK: mvn w4, w9, lsl #1 ; encoding: [0xe4,0x07,0x29,0x2a]
+
+;-----------------------------------------------------------------------------
+; Bitfield aliases
+;-----------------------------------------------------------------------------
+
+ bfi w0, w0, #1, #4
+ bfi x0, x0, #1, #4
+ bfi w0, w0, #0, #2
+ bfi x0, x0, #0, #2
+ bfxil w0, w0, #2, #3
+ bfxil x0, x0, #2, #3
+ sbfiz w0, w0, #1, #4
+ sbfiz x0, x0, #1, #4
+ sbfx w0, w0, #2, #3
+ sbfx x0, x0, #2, #3
+ ubfiz w0, w0, #1, #4
+ ubfiz x0, x0, #1, #4
+ ubfx w0, w0, #2, #3
+ ubfx x0, x0, #2, #3
+
+; CHECK: bfi w0, w0, #1, #4
+; CHECK: bfi x0, x0, #1, #4
+; CHECK: bfxil w0, w0, #0, #2
+; CHECK: bfxil x0, x0, #0, #2
+; CHECK: bfxil w0, w0, #2, #3
+; CHECK: bfxil x0, x0, #2, #3
+; CHECK: sbfiz w0, w0, #1, #4
+; CHECK: sbfiz x0, x0, #1, #4
+; CHECK: sbfx w0, w0, #2, #3
+; CHECK: sbfx x0, x0, #2, #3
+; CHECK: ubfiz w0, w0, #1, #4
+; CHECK: ubfiz x0, x0, #1, #4
+; CHECK: ubfx w0, w0, #2, #3
+; CHECK: ubfx x0, x0, #2, #3
+
+;-----------------------------------------------------------------------------
+; Shift (immediate) aliases
+;-----------------------------------------------------------------------------
+
+; CHECK: asr w1, w3, #13
+; CHECK: asr x1, x3, #13
+; CHECK: lsl w0, w0, #1
+; CHECK: lsl x0, x0, #1
+; CHECK: lsr w0, w0, #4
+; CHECK: lsr x0, x0, #4
+
+ sbfm w1, w3, #13, #31
+ sbfm x1, x3, #13, #63
+ ubfm w0, w0, #31, #30
+ ubfm x0, x0, #63, #62
+ ubfm w0, w0, #4, #31
+ ubfm x0, x0, #4, #63
+; CHECK: ror w1, w3, #5
+; CHECK: ror x1, x3, #5
+ ror w1, w3, #5
+ ror x1, x3, #5
+; CHECK: lsl w1, wzr, #3
+ lsl w1, wzr, #3
+
+;-----------------------------------------------------------------------------
+; Sign/Zero extend aliases
+;-----------------------------------------------------------------------------
+
+ sxtb w1, w2
+ sxth w1, w2
+ uxtb w1, w2
+ uxth w1, w2
+
+; CHECK: sxtb w1, w2
+; CHECK: sxth w1, w2
+; CHECK: uxtb w1, w2
+; CHECK: uxth w1, w2
+
+ sxtb x1, w2
+ sxth x1, w2
+ sxtw x1, w2
+ uxtb x1, w2
+ uxth x1, w2
+ uxtw x1, w2
+
+; CHECK: sxtb x1, w2
+; CHECK: sxth x1, w2
+; CHECK: sxtw x1, w2
+; CHECK: uxtb w1, w2
+; CHECK: uxth w1, w2
+; CHECK: ubfx x1, x2, #0, #32
+
+;-----------------------------------------------------------------------------
+; Negate with carry
+;-----------------------------------------------------------------------------
+
+ ngc w1, w2
+ ngc x1, x2
+ ngcs w1, w2
+ ngcs x1, x2
+
+; CHECK: ngc w1, w2
+; CHECK: ngc x1, x2
+; CHECK: ngcs w1, w2
+; CHECK: ngcs x1, x2
+
+;-----------------------------------------------------------------------------
+; 6.6.1 Multiply aliases
+;-----------------------------------------------------------------------------
+
+ mneg w1, w2, w3
+ mneg x1, x2, x3
+ mul w1, w2, w3
+ mul x1, x2, x3
+ smnegl x1, w2, w3
+ umnegl x1, w2, w3
+ smull x1, w2, w3
+ umull x1, w2, w3
+
+; CHECK: mneg w1, w2, w3
+; CHECK: mneg x1, x2, x3
+; CHECK: mul w1, w2, w3
+; CHECK: mul x1, x2, x3
+; CHECK: smnegl x1, w2, w3
+; CHECK: umnegl x1, w2, w3
+; CHECK: smull x1, w2, w3
+; CHECK: umull x1, w2, w3
+
+;-----------------------------------------------------------------------------
+; Conditional select aliases
+;-----------------------------------------------------------------------------
+
+ cset w1, eq
+ cset x1, eq
+ csetm w1, ne
+ csetm x1, ne
+ cinc w1, w2, lt
+ cinc x1, x2, lt
+ cinv w1, w2, mi
+ cinv x1, x2, mi
+
+; CHECK: cset w1, eq
+; CHECK: cset x1, eq
+; CHECK: csetm w1, ne
+; CHECK: csetm x1, ne
+; CHECK: cinc w1, w2, lt
+; CHECK: cinc x1, x2, lt
+; CHECK: cinv w1, w2, mi
+; CHECK: cinv x1, x2, mi
+
+;-----------------------------------------------------------------------------
+; SYS aliases
+;-----------------------------------------------------------------------------
+
+ sys #0, c7, c1, #0
+; CHECK: ic ialluis
+ sys #0, c7, c5, #0
+; CHECK: ic iallu
+ sys #3, c7, c5, #1
+; CHECK: ic ivau
+
+ sys #3, c7, c4, #1
+; CHECK: dc zva
+ sys #0, c7, c6, #1
+; CHECK: dc ivac
+ sys #0, c7, c6, #2
+; CHECK: dc isw
+ sys #3, c7, c10, #1
+; CHECK: dc cvac
+ sys #0, c7, c10, #2
+; CHECK: dc csw
+ sys #3, c7, c11, #1
+; CHECK: dc cvau
+ sys #3, c7, c14, #1
+; CHECK: dc civac
+ sys #0, c7, c14, #2
+; CHECK: dc cisw
+
+ sys #0, c7, c8, #0
+; CHECK: at s1e1r
+ sys #4, c7, c8, #0
+; CHECK: at s1e2r
+ sys #6, c7, c8, #0
+; CHECK: at s1e3r
+ sys #0, c7, c8, #1
+; CHECK: at s1e1w
+ sys #4, c7, c8, #1
+; CHECK: at s1e2w
+ sys #6, c7, c8, #1
+; CHECK: at s1e3w
+ sys #0, c7, c8, #2
+; CHECK: at s1e0r
+ sys #0, c7, c8, #3
+; CHECK: at s1e0w
+ sys #4, c7, c8, #4
+; CHECK: at s12e1r
+ sys #4, c7, c8, #5
+; CHECK: at s12e1w
+ sys #4, c7, c8, #6
+; CHECK: at s12e0r
+ sys #4, c7, c8, #7
+; CHECK: at s12e0w
+
+ sys #0, c8, c3, #0
+; CHECK: tlbi vmalle1is
+ sys #4, c8, c3, #0
+; CHECK: tlbi alle2is
+ sys #6, c8, c3, #0
+; CHECK: tlbi alle3is
+ sys #0, c8, c3, #1
+; CHECK: tlbi vae1is
+ sys #4, c8, c3, #1
+; CHECK: tlbi vae2is
+ sys #6, c8, c3, #1
+; CHECK: tlbi vae3is
+ sys #0, c8, c3, #2
+; CHECK: tlbi aside1is
+ sys #0, c8, c3, #3
+; CHECK: tlbi vaae1is
+ sys #4, c8, c3, #4
+; CHECK: tlbi alle1is
+ sys #0, c8, c3, #5
+; CHECK: tlbi vale1is
+ sys #0, c8, c3, #7
+; CHECK: tlbi vaale1is
+ sys #0, c8, c7, #0
+; CHECK: tlbi vmalle1
+ sys #4, c8, c7, #0
+; CHECK: tlbi alle2
+ sys #4, c8, c3, #5
+; CHECK: tlbi vale2is
+ sys #6, c8, c3, #5
+; CHECK: tlbi vale3is
+ sys #6, c8, c7, #0
+; CHECK: tlbi alle3
+ sys #0, c8, c7, #1
+; CHECK: tlbi vae1
+ sys #4, c8, c7, #1
+; CHECK: tlbi vae2
+ sys #6, c8, c7, #1
+; CHECK: tlbi vae3
+ sys #0, c8, c7, #2
+; CHECK: tlbi aside1
+ sys #0, c8, c7, #3
+; CHECK: tlbi vaae1
+ sys #4, c8, c7, #4
+; CHECK: tlbi alle1
+ sys #0, c8, c7, #5
+; CHECK: tlbi vale1
+ sys #4, c8, c7, #5
+; CHECK: tlbi vale2
+ sys #6, c8, c7, #5
+; CHECK: tlbi vale3
+ sys #0, c8, c7, #7
+; CHECK: tlbi vaale1
+ sys #4, c8, c4, #1
+; CHECK: tlbi ipas2e1
+ sys #4, c8, c4, #5
+; CHECK: tlbi ipas2le1
+ sys #4, c8, c0, #1
+; CHECK: tlbi ipas2e1is
+ sys #4, c8, c0, #5
+; CHECK: tlbi ipas2le1is
+ sys #4, c8, c7, #6
+; CHECK: tlbi vmalls12e1
+ sys #4, c8, c3, #6
+; CHECK: tlbi vmalls12e1is
+
+ ic ialluis
+; CHECK: ic ialluis ; encoding: [0x1f,0x71,0x08,0xd5]
+ ic iallu
+; CHECK: ic iallu ; encoding: [0x1f,0x75,0x08,0xd5]
+ ic ivau, x0
+; CHECK: ic ivau, x0 ; encoding: [0x20,0x75,0x0b,0xd5]
+
+ dc zva, x0
+; CHECK: dc zva, x0 ; encoding: [0x20,0x74,0x0b,0xd5]
+ dc ivac, x0
+; CHECK: dc ivac, x0 ; encoding: [0x20,0x76,0x08,0xd5]
+ dc isw, x0
+; CHECK: dc isw, x0 ; encoding: [0x40,0x76,0x08,0xd5]
+ dc cvac, x0
+; CHECK: dc cvac, x0 ; encoding: [0x20,0x7a,0x0b,0xd5]
+ dc csw, x0
+; CHECK: dc csw, x0 ; encoding: [0x40,0x7a,0x08,0xd5]
+ dc cvau, x0
+; CHECK: dc cvau, x0 ; encoding: [0x20,0x7b,0x0b,0xd5]
+ dc civac, x0
+; CHECK: dc civac, x0 ; encoding: [0x20,0x7e,0x0b,0xd5]
+ dc cisw, x0
+; CHECK: dc cisw, x0 ; encoding: [0x40,0x7e,0x08,0xd5]
+
+ at s1e1r, x0
+; CHECK: at s1e1r, x0 ; encoding: [0x00,0x78,0x08,0xd5]
+ at s1e2r, x0
+; CHECK: at s1e2r, x0 ; encoding: [0x00,0x78,0x0c,0xd5]
+ at s1e3r, x0
+; CHECK: at s1e3r, x0 ; encoding: [0x00,0x78,0x0e,0xd5]
+ at s1e1w, x0
+; CHECK: at s1e1w, x0 ; encoding: [0x20,0x78,0x08,0xd5]
+ at s1e2w, x0
+; CHECK: at s1e2w, x0 ; encoding: [0x20,0x78,0x0c,0xd5]
+ at s1e3w, x0
+; CHECK: at s1e3w, x0 ; encoding: [0x20,0x78,0x0e,0xd5]
+ at s1e0r, x0
+; CHECK: at s1e0r, x0 ; encoding: [0x40,0x78,0x08,0xd5]
+ at s1e0w, x0
+; CHECK: at s1e0w, x0 ; encoding: [0x60,0x78,0x08,0xd5]
+ at s12e1r, x0
+; CHECK: at s12e1r, x0 ; encoding: [0x80,0x78,0x0c,0xd5]
+ at s12e1w, x0
+; CHECK: at s12e1w, x0 ; encoding: [0xa0,0x78,0x0c,0xd5]
+ at s12e0r, x0
+; CHECK: at s12e0r, x0 ; encoding: [0xc0,0x78,0x0c,0xd5]
+ at s12e0w, x0
+; CHECK: at s12e0w, x0 ; encoding: [0xe0,0x78,0x0c,0xd5]
+
+ tlbi vmalle1is
+; CHECK: tlbi vmalle1is ; encoding: [0x1f,0x83,0x08,0xd5]
+ tlbi alle2is
+; CHECK: tlbi alle2is ; encoding: [0x1f,0x83,0x0c,0xd5]
+ tlbi alle3is
+; CHECK: tlbi alle3is ; encoding: [0x1f,0x83,0x0e,0xd5]
+ tlbi vae1is, x0
+; CHECK: tlbi vae1is, x0 ; encoding: [0x20,0x83,0x08,0xd5]
+ tlbi vae2is, x0
+; CHECK: tlbi vae2is, x0 ; encoding: [0x20,0x83,0x0c,0xd5]
+ tlbi vae3is, x0
+; CHECK: tlbi vae3is, x0 ; encoding: [0x20,0x83,0x0e,0xd5]
+ tlbi aside1is, x0
+; CHECK: tlbi aside1is, x0 ; encoding: [0x40,0x83,0x08,0xd5]
+ tlbi vaae1is, x0
+; CHECK: tlbi vaae1is, x0 ; encoding: [0x60,0x83,0x08,0xd5]
+ tlbi alle1is
+; CHECK: tlbi alle1is ; encoding: [0x9f,0x83,0x0c,0xd5]
+ tlbi vale1is, x0
+; CHECK: tlbi vale1is, x0 ; encoding: [0xa0,0x83,0x08,0xd5]
+ tlbi vaale1is, x0
+; CHECK: tlbi vaale1is, x0 ; encoding: [0xe0,0x83,0x08,0xd5]
+ tlbi vmalle1
+; CHECK: tlbi vmalle1 ; encoding: [0x1f,0x87,0x08,0xd5]
+ tlbi alle2
+; CHECK: tlbi alle2 ; encoding: [0x1f,0x87,0x0c,0xd5]
+ tlbi vale2is, x0
+; CHECK: tlbi vale2is, x0 ; encoding: [0xa0,0x83,0x0c,0xd5]
+ tlbi vale3is, x0
+; CHECK: tlbi vale3is, x0 ; encoding: [0xa0,0x83,0x0e,0xd5]
+ tlbi alle3
+; CHECK: tlbi alle3 ; encoding: [0x1f,0x87,0x0e,0xd5]
+ tlbi vae1, x0
+; CHECK: tlbi vae1, x0 ; encoding: [0x20,0x87,0x08,0xd5]
+ tlbi vae2, x0
+; CHECK: tlbi vae2, x0 ; encoding: [0x20,0x87,0x0c,0xd5]
+ tlbi vae3, x0
+; CHECK: tlbi vae3, x0 ; encoding: [0x20,0x87,0x0e,0xd5]
+ tlbi aside1, x0
+; CHECK: tlbi aside1, x0 ; encoding: [0x40,0x87,0x08,0xd5]
+ tlbi vaae1, x0
+; CHECK: tlbi vaae1, x0 ; encoding: [0x60,0x87,0x08,0xd5]
+ tlbi alle1
+; CHECK: tlbi alle1 ; encoding: [0x9f,0x87,0x0c,0xd5
+ tlbi vale1, x0
+; CHECK: tlbi vale1, x0 ; encoding: [0xa0,0x87,0x08,0xd5]
+ tlbi vale2, x0
+; CHECK: tlbi vale2, x0 ; encoding: [0xa0,0x87,0x0c,0xd5]
+ tlbi vale3, x0
+; CHECK: tlbi vale3, x0 ; encoding: [0xa0,0x87,0x0e,0xd5]
+ tlbi vaale1, x0
+; CHECK: tlbi vaale1, x0 ; encoding: [0xe0,0x87,0x08,0xd5]
+ tlbi ipas2e1, x0
+; CHECK: tlbi ipas2e1, x0 ; encoding: [0x20,0x84,0x0c,0xd5]
+ tlbi ipas2le1, x0
+; CHECK: tlbi ipas2le1, x0 ; encoding: [0xa0,0x84,0x0c,0xd5]
+ tlbi ipas2e1is, x0
+; CHECK: tlbi ipas2e1is, x0 ; encoding: [0x20,0x80,0x0c,0xd5]
+ tlbi ipas2le1is, x0
+; CHECK: tlbi ipas2le1is, x0 ; encoding: [0xa0,0x80,0x0c,0xd5]
+ tlbi vmalls12e1
+; CHECK: tlbi vmalls12e1 ; encoding: [0xdf,0x87,0x0c,0xd5]
+ tlbi vmalls12e1is
+; CHECK: tlbi vmalls12e1is ; encoding: [0xdf,0x83,0x0c,0xd5]
+
+;-----------------------------------------------------------------------------
+; 5.8.5 Vector Arithmetic aliases
+;-----------------------------------------------------------------------------
+
+ cmls.8b v0, v2, v1
+ cmls.16b v0, v2, v1
+ cmls.4h v0, v2, v1
+ cmls.8h v0, v2, v1
+ cmls.2s v0, v2, v1
+ cmls.4s v0, v2, v1
+ cmls.2d v0, v2, v1
+; CHECK: cmhs.8b v0, v1, v2
+; CHECK: cmhs.16b v0, v1, v2
+; CHECK: cmhs.4h v0, v1, v2
+; CHECK: cmhs.8h v0, v1, v2
+; CHECK: cmhs.2s v0, v1, v2
+; CHECK: cmhs.4s v0, v1, v2
+; CHECK: cmhs.2d v0, v1, v2
+
+ cmlo.8b v0, v2, v1
+ cmlo.16b v0, v2, v1
+ cmlo.4h v0, v2, v1
+ cmlo.8h v0, v2, v1
+ cmlo.2s v0, v2, v1
+ cmlo.4s v0, v2, v1
+ cmlo.2d v0, v2, v1
+; CHECK: cmhi.8b v0, v1, v2
+; CHECK: cmhi.16b v0, v1, v2
+; CHECK: cmhi.4h v0, v1, v2
+; CHECK: cmhi.8h v0, v1, v2
+; CHECK: cmhi.2s v0, v1, v2
+; CHECK: cmhi.4s v0, v1, v2
+; CHECK: cmhi.2d v0, v1, v2
+
+ cmle.8b v0, v2, v1
+ cmle.16b v0, v2, v1
+ cmle.4h v0, v2, v1
+ cmle.8h v0, v2, v1
+ cmle.2s v0, v2, v1
+ cmle.4s v0, v2, v1
+ cmle.2d v0, v2, v1
+; CHECK: cmge.8b v0, v1, v2
+; CHECK: cmge.16b v0, v1, v2
+; CHECK: cmge.4h v0, v1, v2
+; CHECK: cmge.8h v0, v1, v2
+; CHECK: cmge.2s v0, v1, v2
+; CHECK: cmge.4s v0, v1, v2
+; CHECK: cmge.2d v0, v1, v2
+
+ cmlt.8b v0, v2, v1
+ cmlt.16b v0, v2, v1
+ cmlt.4h v0, v2, v1
+ cmlt.8h v0, v2, v1
+ cmlt.2s v0, v2, v1
+ cmlt.4s v0, v2, v1
+ cmlt.2d v0, v2, v1
+; CHECK: cmgt.8b v0, v1, v2
+; CHECK: cmgt.16b v0, v1, v2
+; CHECK: cmgt.4h v0, v1, v2
+; CHECK: cmgt.8h v0, v1, v2
+; CHECK: cmgt.2s v0, v1, v2
+; CHECK: cmgt.4s v0, v1, v2
+; CHECK: cmgt.2d v0, v1, v2
+
+ fcmle.2s v0, v2, v1
+ fcmle.4s v0, v2, v1
+ fcmle.2d v0, v2, v1
+; CHECK: fcmge.2s v0, v1, v2
+; CHECK: fcmge.4s v0, v1, v2
+; CHECK: fcmge.2d v0, v1, v2
+
+ fcmlt.2s v0, v2, v1
+ fcmlt.4s v0, v2, v1
+ fcmlt.2d v0, v2, v1
+; CHECK: fcmgt.2s v0, v1, v2
+; CHECK: fcmgt.4s v0, v1, v2
+; CHECK: fcmgt.2d v0, v1, v2
+
+ facle.2s v0, v2, v1
+ facle.4s v0, v2, v1
+ facle.2d v0, v2, v1
+; CHECK: facge.2s v0, v1, v2
+; CHECK: facge.4s v0, v1, v2
+; CHECK: facge.2d v0, v1, v2
+
+ faclt.2s v0, v2, v1
+ faclt.4s v0, v2, v1
+ faclt.2d v0, v2, v1
+; CHECK: facgt.2s v0, v1, v2
+; CHECK: facgt.4s v0, v1, v2
+; CHECK: facgt.2d v0, v1, v2
+
+;-----------------------------------------------------------------------------
+; 5.8.6 Scalar Arithmetic aliases
+;-----------------------------------------------------------------------------
+
+ cmls d0, d2, d1
+; CHECK: cmhs d0, d1, d2
+
+ cmle d0, d2, d1
+; CHECK: cmge d0, d1, d2
+
+ cmlo d0, d2, d1
+; CHECK: cmhi d0, d1, d2
+
+ cmlt d0, d2, d1
+; CHECK: cmgt d0, d1, d2
+
+ fcmle s0, s2, s1
+ fcmle d0, d2, d1
+; CHECK: fcmge s0, s1, s2
+; CHECK: fcmge d0, d1, d2
+
+ fcmlt s0, s2, s1
+ fcmlt d0, d2, d1
+; CHECK: fcmgt s0, s1, s2
+; CHECK: fcmgt d0, d1, d2
+
+ facle s0, s2, s1
+ facle d0, d2, d1
+; CHECK: facge s0, s1, s2
+; CHECK: facge d0, d1, d2
+
+ faclt s0, s2, s1
+ faclt d0, d2, d1
+; CHECK: facgt s0, s1, s2
+; CHECK: facgt d0, d1, d2
+
+;-----------------------------------------------------------------------------
+; 5.8.14 Vector Shift (immediate)
+;-----------------------------------------------------------------------------
+ sxtl v1.8h, v2.8b
+; CHECK: sshll.8h v1, v2, #0
+ sxtl.8h v1, v2
+; CHECK: sshll.8h v1, v2, #0
+
+ sxtl v1.4s, v2.4h
+; CHECK: sshll.4s v1, v2, #0
+ sxtl.4s v1, v2
+; CHECK: sshll.4s v1, v2, #0
+
+ sxtl v1.2d, v2.2s
+; CHECK: sshll.2d v1, v2, #0
+ sxtl.2d v1, v2
+; CHECK: sshll.2d v1, v2, #0
+
+ sxtl2 v1.8h, v2.16b
+; CHECK: sshll2.8h v1, v2, #0
+ sxtl2.8h v1, v2
+; CHECK: sshll2.8h v1, v2, #0
+
+ sxtl2 v1.4s, v2.8h
+; CHECK: sshll2.4s v1, v2, #0
+ sxtl2.4s v1, v2
+; CHECK: sshll2.4s v1, v2, #0
+
+ sxtl2 v1.2d, v2.4s
+; CHECK: sshll2.2d v1, v2, #0
+ sxtl2.2d v1, v2
+; CHECK: sshll2.2d v1, v2, #0
+
+ uxtl v1.8h, v2.8b
+; CHECK: ushll.8h v1, v2, #0
+ uxtl.8h v1, v2
+; CHECK: ushll.8h v1, v2, #0
+
+ uxtl v1.4s, v2.4h
+; CHECK: ushll.4s v1, v2, #0
+ uxtl.4s v1, v2
+; CHECK: ushll.4s v1, v2, #0
+
+ uxtl v1.2d, v2.2s
+; CHECK: ushll.2d v1, v2, #0
+ uxtl.2d v1, v2
+; CHECK: ushll.2d v1, v2, #0
+
+ uxtl2 v1.8h, v2.16b
+; CHECK: ushll2.8h v1, v2, #0
+ uxtl2.8h v1, v2
+; CHECK: ushll2.8h v1, v2, #0
+
+ uxtl2 v1.4s, v2.8h
+; CHECK: ushll2.4s v1, v2, #0
+ uxtl2.4s v1, v2
+; CHECK: ushll2.4s v1, v2, #0
+
+ uxtl2 v1.2d, v2.4s
+; CHECK: ushll2.2d v1, v2, #0
+ uxtl2.2d v1, v2
+; CHECK: ushll2.2d v1, v2, #0
+
+
+;-----------------------------------------------------------------------------
+; MOVI verbose syntax with shift operand omitted.
+;-----------------------------------------------------------------------------
+ movi v4.16b, #0x00
+ movi v4.16B, #0x01
+ movi v4.8b, #0x02
+ movi v4.8B, #0x03
+ movi v1.2d, #0x000000000000ff
+ movi v2.2D, #0x000000000000ff
+
+; CHECK: movi.16b v4, #0 ; encoding: [0x04,0xe4,0x00,0x4f]
+; CHECK: movi.16b v4, #0x1 ; encoding: [0x24,0xe4,0x00,0x4f]
+; CHECK: movi.8b v4, #0x2 ; encoding: [0x44,0xe4,0x00,0x0f]
+; CHECK: movi.8b v4, #0x3 ; encoding: [0x64,0xe4,0x00,0x0f]
+; CHECK: movi.2d v1, #0x000000000000ff ; encoding: [0x21,0xe4,0x00,0x6f]
+; CHECK: movi.2d v2, #0x000000000000ff ; encoding: [0x22,0xe4,0x00,0x6f]
diff --git a/test/MC/AArch64/arm64-arithmetic-encoding.s b/test/MC/AArch64/arm64-arithmetic-encoding.s
new file mode 100644
index 000000000000..5fd591240e25
--- /dev/null
+++ b/test/MC/AArch64/arm64-arithmetic-encoding.s
@@ -0,0 +1,615 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -mattr=neon -show-encoding < %s | FileCheck %s
+
+foo:
+;==---------------------------------------------------------------------------==
+; Add/Subtract with carry/borrow
+;==---------------------------------------------------------------------------==
+
+ adc w1, w2, w3
+ adc x1, x2, x3
+ adcs w5, w4, w3
+ adcs x5, x4, x3
+
+; CHECK: adc w1, w2, w3 ; encoding: [0x41,0x00,0x03,0x1a]
+; CHECK: adc x1, x2, x3 ; encoding: [0x41,0x00,0x03,0x9a]
+; CHECK: adcs w5, w4, w3 ; encoding: [0x85,0x00,0x03,0x3a]
+; CHECK: adcs x5, x4, x3 ; encoding: [0x85,0x00,0x03,0xba]
+
+ sbc w1, w2, w3
+ sbc x1, x2, x3
+ sbcs w1, w2, w3
+ sbcs x1, x2, x3
+
+; CHECK: sbc w1, w2, w3 ; encoding: [0x41,0x00,0x03,0x5a]
+; CHECK: sbc x1, x2, x3 ; encoding: [0x41,0x00,0x03,0xda]
+; CHECK: sbcs w1, w2, w3 ; encoding: [0x41,0x00,0x03,0x7a]
+; CHECK: sbcs x1, x2, x3 ; encoding: [0x41,0x00,0x03,0xfa]
+
+;==---------------------------------------------------------------------------==
+; Add/Subtract with (optionally shifted) immediate
+;==---------------------------------------------------------------------------==
+
+ add w3, w4, #1024
+ add w3, w4, #1024, lsl #0
+ add x3, x4, #1024
+ add x3, x4, #1024, lsl #0
+
+; CHECK: add w3, w4, #1024 ; encoding: [0x83,0x00,0x10,0x11]
+; CHECK: add w3, w4, #1024 ; encoding: [0x83,0x00,0x10,0x11]
+; CHECK: add x3, x4, #1024 ; encoding: [0x83,0x00,0x10,0x91]
+; CHECK: add x3, x4, #1024 ; encoding: [0x83,0x00,0x10,0x91]
+
+ add w3, w4, #1024, lsl #12
+ add w3, w4, #4194304
+ add w3, w4, #0, lsl #12
+ add x3, x4, #1024, lsl #12
+ add x3, x4, #4194304
+ add x3, x4, #0, lsl #12
+ add sp, sp, #32
+
+; CHECK: add w3, w4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0x11]
+; CHECK: add w3, w4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0x11]
+; CHECK: add w3, w4, #0, lsl #12 ; encoding: [0x83,0x00,0x40,0x11]
+; CHECK: add x3, x4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0x91]
+; CHECK: add x3, x4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0x91]
+; CHECK: add x3, x4, #0, lsl #12 ; encoding: [0x83,0x00,0x40,0x91]
+; CHECK: add sp, sp, #32 ; encoding: [0xff,0x83,0x00,0x91]
+
+ adds w3, w4, #1024
+ adds w3, w4, #1024, lsl #0
+ adds w3, w4, #1024, lsl #12
+ adds x3, x4, #1024
+ adds x3, x4, #1024, lsl #0
+ adds x3, x4, #1024, lsl #12
+
+; CHECK: adds w3, w4, #1024 ; encoding: [0x83,0x00,0x10,0x31]
+; CHECK: adds w3, w4, #1024 ; encoding: [0x83,0x00,0x10,0x31]
+; CHECK: adds w3, w4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0x31]
+; CHECK: adds x3, x4, #1024 ; encoding: [0x83,0x00,0x10,0xb1]
+; CHECK: adds x3, x4, #1024 ; encoding: [0x83,0x00,0x10,0xb1]
+; CHECK: adds x3, x4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0xb1]
+
+ sub w3, w4, #1024
+ sub w3, w4, #1024, lsl #0
+ sub w3, w4, #1024, lsl #12
+ sub x3, x4, #1024
+ sub x3, x4, #1024, lsl #0
+ sub x3, x4, #1024, lsl #12
+ sub sp, sp, #32
+
+; CHECK: sub w3, w4, #1024 ; encoding: [0x83,0x00,0x10,0x51]
+; CHECK: sub w3, w4, #1024 ; encoding: [0x83,0x00,0x10,0x51]
+; CHECK: sub w3, w4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0x51]
+; CHECK: sub x3, x4, #1024 ; encoding: [0x83,0x00,0x10,0xd1]
+; CHECK: sub x3, x4, #1024 ; encoding: [0x83,0x00,0x10,0xd1]
+; CHECK: sub x3, x4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0xd1]
+; CHECK: sub sp, sp, #32 ; encoding: [0xff,0x83,0x00,0xd1]
+
+ subs w3, w4, #1024
+ subs w3, w4, #1024, lsl #0
+ subs w3, w4, #1024, lsl #12
+ subs x3, x4, #1024
+ subs x3, x4, #1024, lsl #0
+ subs x3, x4, #1024, lsl #12
+
+; CHECK: subs w3, w4, #1024 ; encoding: [0x83,0x00,0x10,0x71]
+; CHECK: subs w3, w4, #1024 ; encoding: [0x83,0x00,0x10,0x71]
+; CHECK: subs w3, w4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0x71]
+; CHECK: subs x3, x4, #1024 ; encoding: [0x83,0x00,0x10,0xf1]
+; CHECK: subs x3, x4, #1024 ; encoding: [0x83,0x00,0x10,0xf1]
+; CHECK: subs x3, x4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0xf1]
+
+;==---------------------------------------------------------------------------==
+; Add/Subtract register with (optional) shift
+;==---------------------------------------------------------------------------==
+
+ add w12, w13, w14
+ add x12, x13, x14
+ add w12, w13, w14, lsl #12
+ add x12, x13, x14, lsl #12
+ add x12, x13, x14, lsr #42
+ add x12, x13, x14, asr #39
+
+; CHECK: add w12, w13, w14 ; encoding: [0xac,0x01,0x0e,0x0b]
+; CHECK: add x12, x13, x14 ; encoding: [0xac,0x01,0x0e,0x8b]
+; CHECK: add w12, w13, w14, lsl #12 ; encoding: [0xac,0x31,0x0e,0x0b]
+; CHECK: add x12, x13, x14, lsl #12 ; encoding: [0xac,0x31,0x0e,0x8b]
+; CHECK: add x12, x13, x14, lsr #42 ; encoding: [0xac,0xa9,0x4e,0x8b]
+; CHECK: add x12, x13, x14, asr #39 ; encoding: [0xac,0x9d,0x8e,0x8b]
+
+ sub w12, w13, w14
+ sub x12, x13, x14
+ sub w12, w13, w14, lsl #12
+ sub x12, x13, x14, lsl #12
+ sub x12, x13, x14, lsr #42
+ sub x12, x13, x14, asr #39
+
+; CHECK: sub w12, w13, w14 ; encoding: [0xac,0x01,0x0e,0x4b]
+; CHECK: sub x12, x13, x14 ; encoding: [0xac,0x01,0x0e,0xcb]
+; CHECK: sub w12, w13, w14, lsl #12 ; encoding: [0xac,0x31,0x0e,0x4b]
+; CHECK: sub x12, x13, x14, lsl #12 ; encoding: [0xac,0x31,0x0e,0xcb]
+; CHECK: sub x12, x13, x14, lsr #42 ; encoding: [0xac,0xa9,0x4e,0xcb]
+; CHECK: sub x12, x13, x14, asr #39 ; encoding: [0xac,0x9d,0x8e,0xcb]
+
+ adds w12, w13, w14
+ adds x12, x13, x14
+ adds w12, w13, w14, lsl #12
+ adds x12, x13, x14, lsl #12
+ adds x12, x13, x14, lsr #42
+ adds x12, x13, x14, asr #39
+
+; CHECK: adds w12, w13, w14 ; encoding: [0xac,0x01,0x0e,0x2b]
+; CHECK: adds x12, x13, x14 ; encoding: [0xac,0x01,0x0e,0xab]
+; CHECK: adds w12, w13, w14, lsl #12 ; encoding: [0xac,0x31,0x0e,0x2b]
+; CHECK: adds x12, x13, x14, lsl #12 ; encoding: [0xac,0x31,0x0e,0xab]
+; CHECK: adds x12, x13, x14, lsr #42 ; encoding: [0xac,0xa9,0x4e,0xab]
+; CHECK: adds x12, x13, x14, asr #39 ; encoding: [0xac,0x9d,0x8e,0xab]
+
+ subs w12, w13, w14
+ subs x12, x13, x14
+ subs w12, w13, w14, lsl #12
+ subs x12, x13, x14, lsl #12
+ subs x12, x13, x14, lsr #42
+ subs x12, x13, x14, asr #39
+
+; CHECK: subs w12, w13, w14 ; encoding: [0xac,0x01,0x0e,0x6b]
+; CHECK: subs x12, x13, x14 ; encoding: [0xac,0x01,0x0e,0xeb]
+; CHECK: subs w12, w13, w14, lsl #12 ; encoding: [0xac,0x31,0x0e,0x6b]
+; CHECK: subs x12, x13, x14, lsl #12 ; encoding: [0xac,0x31,0x0e,0xeb]
+; CHECK: subs x12, x13, x14, lsr #42 ; encoding: [0xac,0xa9,0x4e,0xeb]
+; CHECK: subs x12, x13, x14, asr #39 ; encoding: [0xac,0x9d,0x8e,0xeb]
+
+; Check use of upper case register names rdar://14354073
+ add X2, X2, X2
+; CHECK: add x2, x2, x2 ; encoding: [0x42,0x00,0x02,0x8b]
+
+;==---------------------------------------------------------------------------==
+; Add/Subtract with (optional) extend
+;==---------------------------------------------------------------------------==
+
+ add w1, w2, w3, uxtb
+ add w1, w2, w3, uxth
+ add w1, w2, w3, uxtw
+ add w1, w2, w3, uxtx
+ add w1, w2, w3, sxtb
+ add w1, w2, w3, sxth
+ add w1, w2, w3, sxtw
+ add w1, w2, w3, sxtx
+
+; CHECK: add w1, w2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x0b]
+; CHECK: add w1, w2, w3, uxth ; encoding: [0x41,0x20,0x23,0x0b]
+; CHECK: add w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x0b]
+; CHECK: add w1, w2, w3, uxtx ; encoding: [0x41,0x60,0x23,0x0b]
+; CHECK: add w1, w2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x0b]
+; CHECK: add w1, w2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x0b]
+; CHECK: add w1, w2, w3, sxtw ; encoding: [0x41,0xc0,0x23,0x0b]
+; CHECK: add w1, w2, w3, sxtx ; encoding: [0x41,0xe0,0x23,0x0b]
+
+ add x1, x2, w3, uxtb
+ add x1, x2, w3, uxth
+ add x1, x2, w3, uxtw
+ add x1, x2, w3, sxtb
+ add x1, x2, w3, sxth
+ add x1, x2, w3, sxtw
+
+; CHECK: add x1, x2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x8b]
+; CHECK: add x1, x2, w3, uxth ; encoding: [0x41,0x20,0x23,0x8b]
+; CHECK: add x1, x2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x8b]
+; CHECK: add x1, x2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x8b]
+; CHECK: add x1, x2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x8b]
+; CHECK: add x1, x2, w3, sxtw ; encoding: [0x41,0xc0,0x23,0x8b]
+
+ add w1, wsp, w3
+ add w1, wsp, w3, uxtw #0
+ add w2, wsp, w3, lsl #1
+ add sp, x2, x3
+ add sp, x2, x3, uxtx #0
+
+; CHECK: add w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x0b]
+; CHECK: add w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x0b]
+; CHECK: add w2, wsp, w3, lsl #1 ; encoding: [0xe2,0x47,0x23,0x0b]
+; CHECK: add sp, x2, x3 ; encoding: [0x5f,0x60,0x23,0x8b]
+; CHECK: add sp, x2, x3 ; encoding: [0x5f,0x60,0x23,0x8b]
+
+ sub w1, w2, w3, uxtb
+ sub w1, w2, w3, uxth
+ sub w1, w2, w3, uxtw
+ sub w1, w2, w3, uxtx
+ sub w1, w2, w3, sxtb
+ sub w1, w2, w3, sxth
+ sub w1, w2, w3, sxtw
+ sub w1, w2, w3, sxtx
+
+; CHECK: sub w1, w2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x4b]
+; CHECK: sub w1, w2, w3, uxth ; encoding: [0x41,0x20,0x23,0x4b]
+; CHECK: sub w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x4b]
+; CHECK: sub w1, w2, w3, uxtx ; encoding: [0x41,0x60,0x23,0x4b]
+; CHECK: sub w1, w2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x4b]
+; CHECK: sub w1, w2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x4b]
+; CHECK: sub w1, w2, w3, sxtw ; encoding: [0x41,0xc0,0x23,0x4b]
+; CHECK: sub w1, w2, w3, sxtx ; encoding: [0x41,0xe0,0x23,0x4b]
+
+ sub x1, x2, w3, uxtb
+ sub x1, x2, w3, uxth
+ sub x1, x2, w3, uxtw
+ sub x1, x2, w3, sxtb
+ sub x1, x2, w3, sxth
+ sub x1, x2, w3, sxtw
+
+; CHECK: sub x1, x2, w3, uxtb ; encoding: [0x41,0x00,0x23,0xcb]
+; CHECK: sub x1, x2, w3, uxth ; encoding: [0x41,0x20,0x23,0xcb]
+; CHECK: sub x1, x2, w3, uxtw ; encoding: [0x41,0x40,0x23,0xcb]
+; CHECK: sub x1, x2, w3, sxtb ; encoding: [0x41,0x80,0x23,0xcb]
+; CHECK: sub x1, x2, w3, sxth ; encoding: [0x41,0xa0,0x23,0xcb]
+; CHECK: sub x1, x2, w3, sxtw ; encoding: [0x41,0xc0,0x23,0xcb]
+
+ sub w1, wsp, w3
+ sub w1, wsp, w3, uxtw #0
+ sub sp, x2, x3
+ sub sp, x2, x3, uxtx #0
+ sub sp, x3, x7, lsl #4
+
+; CHECK: sub w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x4b]
+; CHECK: sub w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x4b]
+; CHECK: sub sp, x2, x3 ; encoding: [0x5f,0x60,0x23,0xcb]
+; CHECK: sub sp, x2, x3 ; encoding: [0x5f,0x60,0x23,0xcb]
+; CHECK: sp, x3, x7, lsl #4 ; encoding: [0x7f,0x70,0x27,0xcb]
+
+ adds w1, w2, w3, uxtb
+ adds w1, w2, w3, uxth
+ adds w1, w2, w3, uxtw
+ adds w1, w2, w3, uxtx
+ adds w1, w2, w3, sxtb
+ adds w1, w2, w3, sxth
+ adds w1, w2, w3, sxtw
+ adds w1, w2, w3, sxtx
+
+; CHECK: adds w1, w2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x2b]
+; CHECK: adds w1, w2, w3, uxth ; encoding: [0x41,0x20,0x23,0x2b]
+; CHECK: adds w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x2b]
+; CHECK: adds w1, w2, w3, uxtx ; encoding: [0x41,0x60,0x23,0x2b]
+; CHECK: adds w1, w2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x2b]
+; CHECK: adds w1, w2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x2b]
+; CHECK: adds w1, w2, w3, sxtw ; encoding: [0x41,0xc0,0x23,0x2b]
+; CHECK: adds w1, w2, w3, sxtx ; encoding: [0x41,0xe0,0x23,0x2b]
+
+ adds x1, x2, w3, uxtb
+ adds x1, x2, w3, uxth
+ adds x1, x2, w3, uxtw
+ adds x1, x2, w3, uxtx
+ adds x1, x2, w3, sxtb
+ adds x1, x2, w3, sxth
+ adds x1, x2, w3, sxtw
+ adds x1, x2, w3, sxtx
+
+; CHECK: adds x1, x2, w3, uxtb ; encoding: [0x41,0x00,0x23,0xab]
+; CHECK: adds x1, x2, w3, uxth ; encoding: [0x41,0x20,0x23,0xab]
+; CHECK: adds x1, x2, w3, uxtw ; encoding: [0x41,0x40,0x23,0xab]
+; CHECK: adds x1, x2, w3, uxtx ; encoding: [0x41,0x60,0x23,0xab]
+; CHECK: adds x1, x2, w3, sxtb ; encoding: [0x41,0x80,0x23,0xab]
+; CHECK: adds x1, x2, w3, sxth ; encoding: [0x41,0xa0,0x23,0xab]
+; CHECK: adds x1, x2, w3, sxtw ; encoding: [0x41,0xc0,0x23,0xab]
+; CHECK: adds x1, x2, w3, sxtx ; encoding: [0x41,0xe0,0x23,0xab]
+
+ adds w1, wsp, w3
+ adds w1, wsp, w3, uxtw #0
+ adds wzr, wsp, w3, lsl #4
+
+; CHECK: adds w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x2b]
+; CHECK: adds w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x2b]
+; CHECK: cmn wsp, w3, lsl #4 ; encoding: [0xff,0x53,0x23,0x2b]
+
+ subs w1, w2, w3, uxtb
+ subs w1, w2, w3, uxth
+ subs w1, w2, w3, uxtw
+ subs w1, w2, w3, uxtx
+ subs w1, w2, w3, sxtb
+ subs w1, w2, w3, sxth
+ subs w1, w2, w3, sxtw
+ subs w1, w2, w3, sxtx
+
+; CHECK: subs w1, w2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x6b]
+; CHECK: subs w1, w2, w3, uxth ; encoding: [0x41,0x20,0x23,0x6b]
+; CHECK: subs w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x6b]
+; CHECK: subs w1, w2, w3, uxtx ; encoding: [0x41,0x60,0x23,0x6b]
+; CHECK: subs w1, w2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x6b]
+; CHECK: subs w1, w2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x6b]
+; CHECK: subs w1, w2, w3, sxtw ; encoding: [0x41,0xc0,0x23,0x6b]
+; CHECK: subs w1, w2, w3, sxtx ; encoding: [0x41,0xe0,0x23,0x6b]
+
+ subs x1, x2, w3, uxtb
+ subs x1, x2, w3, uxth
+ subs x1, x2, w3, uxtw
+ subs x1, x2, w3, uxtx
+ subs x1, x2, w3, sxtb
+ subs x1, x2, w3, sxth
+ subs x1, x2, w3, sxtw
+ subs x1, x2, w3, sxtx
+
+; CHECK: subs x1, x2, w3, uxtb ; encoding: [0x41,0x00,0x23,0xeb]
+; CHECK: subs x1, x2, w3, uxth ; encoding: [0x41,0x20,0x23,0xeb]
+; CHECK: subs x1, x2, w3, uxtw ; encoding: [0x41,0x40,0x23,0xeb]
+; CHECK: subs x1, x2, w3, uxtx ; encoding: [0x41,0x60,0x23,0xeb]
+; CHECK: subs x1, x2, w3, sxtb ; encoding: [0x41,0x80,0x23,0xeb]
+; CHECK: subs x1, x2, w3, sxth ; encoding: [0x41,0xa0,0x23,0xeb]
+; CHECK: subs x1, x2, w3, sxtw ; encoding: [0x41,0xc0,0x23,0xeb]
+; CHECK: subs x1, x2, w3, sxtx ; encoding: [0x41,0xe0,0x23,0xeb]
+
+ subs w1, wsp, w3
+ subs w1, wsp, w3, uxtw #0
+
+; CHECK: subs w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x6b]
+; CHECK: subs w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x6b]
+
+ cmp wsp, w9, lsl #0
+ subs x3, sp, x9, lsl #2
+ cmp wsp, w8, uxtw
+ subs wzr, wsp, w8, uxtw
+ cmp sp, w8, uxtw
+ subs xzr, sp, w8, uxtw
+
+; CHECK: cmp wsp, w9 ; encoding: [0xff,0x43,0x29,0x6b]
+; CHECK: subs x3, sp, x9, lsl #2 ; encoding: [0xe3,0x6b,0x29,0xeb]
+; CHECK: cmp wsp, w8 ; encoding: [0xff,0x43,0x28,0x6b]
+; CHECK: cmp wsp, w8 ; encoding: [0xff,0x43,0x28,0x6b]
+; CHECK: cmp sp, w8, uxtw ; encoding: [0xff,0x43,0x28,0xeb]
+; CHECK: cmp sp, w8, uxtw ; encoding: [0xff,0x43,0x28,0xeb]
+
+ sub wsp, w9, w8, uxtw
+ sub w1, wsp, w8, uxtw
+ sub wsp, wsp, w8, uxtw
+ sub sp, x9, w8, uxtw
+ sub x1, sp, w8, uxtw
+ sub sp, sp, w8, uxtw
+ subs w1, wsp, w8, uxtw
+ subs x1, sp, w8, uxtw
+
+; CHECK: sub wsp, w9, w8 ; encoding: [0x3f,0x41,0x28,0x4b]
+; CHECK: sub w1, wsp, w8 ; encoding: [0xe1,0x43,0x28,0x4b]
+; CHECK: sub wsp, wsp, w8 ; encoding: [0xff,0x43,0x28,0x4b]
+; CHECK: sub sp, x9, w8, uxtw ; encoding: [0x3f,0x41,0x28,0xcb]
+; CHECK: sub x1, sp, w8, uxtw ; encoding: [0xe1,0x43,0x28,0xcb]
+; CHECK: sub sp, sp, w8, uxtw ; encoding: [0xff,0x43,0x28,0xcb]
+; CHECK: subs w1, wsp, w8 ; encoding: [0xe1,0x43,0x28,0x6b]
+; CHECK: subs x1, sp, w8, uxtw ; encoding: [0xe1,0x43,0x28,0xeb]
+
+;==---------------------------------------------------------------------------==
+; Signed/Unsigned divide
+;==---------------------------------------------------------------------------==
+
+ sdiv w1, w2, w3
+ sdiv x1, x2, x3
+ udiv w1, w2, w3
+ udiv x1, x2, x3
+
+; CHECK: sdiv w1, w2, w3 ; encoding: [0x41,0x0c,0xc3,0x1a]
+; CHECK: sdiv x1, x2, x3 ; encoding: [0x41,0x0c,0xc3,0x9a]
+; CHECK: udiv w1, w2, w3 ; encoding: [0x41,0x08,0xc3,0x1a]
+; CHECK: udiv x1, x2, x3 ; encoding: [0x41,0x08,0xc3,0x9a]
+
+;==---------------------------------------------------------------------------==
+; Variable shifts
+;==---------------------------------------------------------------------------==
+
+ asrv w1, w2, w3
+ asrv x1, x2, x3
+ asr w1, w2, w3
+ asr x1, x2, x3
+ lslv w1, w2, w3
+ lslv x1, x2, x3
+ lsl w1, w2, w3
+ lsl x1, x2, x3
+ lsrv w1, w2, w3
+ lsrv x1, x2, x3
+ lsr w1, w2, w3
+ lsr x1, x2, x3
+ rorv w1, w2, w3
+ rorv x1, x2, x3
+ ror w1, w2, w3
+ ror x1, x2, x3
+
+; CHECK: encoding: [0x41,0x28,0xc3,0x1a]
+; CHECK: encoding: [0x41,0x28,0xc3,0x9a]
+; CHECK: encoding: [0x41,0x28,0xc3,0x1a]
+; CHECK: encoding: [0x41,0x28,0xc3,0x9a]
+; CHECK: encoding: [0x41,0x20,0xc3,0x1a]
+; CHECK: encoding: [0x41,0x20,0xc3,0x9a]
+; CHECK: encoding: [0x41,0x20,0xc3,0x1a]
+; CHECK: encoding: [0x41,0x20,0xc3,0x9a]
+; CHECK: encoding: [0x41,0x24,0xc3,0x1a]
+; CHECK: encoding: [0x41,0x24,0xc3,0x9a]
+; CHECK: encoding: [0x41,0x24,0xc3,0x1a]
+; CHECK: encoding: [0x41,0x24,0xc3,0x9a]
+; CHECK: encoding: [0x41,0x2c,0xc3,0x1a]
+; CHECK: encoding: [0x41,0x2c,0xc3,0x9a]
+; CHECK: encoding: [0x41,0x2c,0xc3,0x1a]
+; CHECK: encoding: [0x41,0x2c,0xc3,0x9a]
+
+;==---------------------------------------------------------------------------==
+; One operand instructions
+;==---------------------------------------------------------------------------==
+
+ cls w1, w2
+ cls x1, x2
+ clz w1, w2
+ clz x1, x2
+ rbit w1, w2
+ rbit x1, x2
+ rev w1, w2
+ rev x1, x2
+ rev16 w1, w2
+ rev16 x1, x2
+ rev32 x1, x2
+
+; CHECK: encoding: [0x41,0x14,0xc0,0x5a]
+; CHECK: encoding: [0x41,0x14,0xc0,0xda]
+; CHECK: encoding: [0x41,0x10,0xc0,0x5a]
+; CHECK: encoding: [0x41,0x10,0xc0,0xda]
+; CHECK: encoding: [0x41,0x00,0xc0,0x5a]
+; CHECK: encoding: [0x41,0x00,0xc0,0xda]
+; CHECK: encoding: [0x41,0x08,0xc0,0x5a]
+; CHECK: encoding: [0x41,0x0c,0xc0,0xda]
+; CHECK: encoding: [0x41,0x04,0xc0,0x5a]
+; CHECK: encoding: [0x41,0x04,0xc0,0xda]
+; CHECK: encoding: [0x41,0x08,0xc0,0xda]
+
+;==---------------------------------------------------------------------------==
+; 6.6.1 Multiply-add instructions
+;==---------------------------------------------------------------------------==
+
+ madd w1, w2, w3, w4
+ madd x1, x2, x3, x4
+ msub w1, w2, w3, w4
+ msub x1, x2, x3, x4
+ smaddl x1, w2, w3, x4
+ smsubl x1, w2, w3, x4
+ umaddl x1, w2, w3, x4
+ umsubl x1, w2, w3, x4
+
+; CHECK: madd w1, w2, w3, w4 ; encoding: [0x41,0x10,0x03,0x1b]
+; CHECK: madd x1, x2, x3, x4 ; encoding: [0x41,0x10,0x03,0x9b]
+; CHECK: msub w1, w2, w3, w4 ; encoding: [0x41,0x90,0x03,0x1b]
+; CHECK: msub x1, x2, x3, x4 ; encoding: [0x41,0x90,0x03,0x9b]
+; CHECK: smaddl x1, w2, w3, x4 ; encoding: [0x41,0x10,0x23,0x9b]
+; CHECK: smsubl x1, w2, w3, x4 ; encoding: [0x41,0x90,0x23,0x9b]
+; CHECK: umaddl x1, w2, w3, x4 ; encoding: [0x41,0x10,0xa3,0x9b]
+; CHECK: umsubl x1, w2, w3, x4 ; encoding: [0x41,0x90,0xa3,0x9b]
+
+;==---------------------------------------------------------------------------==
+; Multiply-high instructions
+;==---------------------------------------------------------------------------==
+
+ smulh x1, x2, x3
+ umulh x1, x2, x3
+
+; CHECK: smulh x1, x2, x3 ; encoding: [0x41,0x7c,0x43,0x9b]
+; CHECK: umulh x1, x2, x3 ; encoding: [0x41,0x7c,0xc3,0x9b]
+
+;==---------------------------------------------------------------------------==
+; Move immediate instructions
+;==---------------------------------------------------------------------------==
+
+ movz w0, #1
+ movz x0, #1
+ movz w0, #1, lsl #16
+ movz x0, #1, lsl #16
+
+; CHECK: movz w0, #0x1 ; encoding: [0x20,0x00,0x80,0x52]
+; CHECK: movz x0, #0x1 ; encoding: [0x20,0x00,0x80,0xd2]
+; CHECK: movz w0, #0x1, lsl #16 ; encoding: [0x20,0x00,0xa0,0x52]
+; CHECK: movz x0, #0x1, lsl #16 ; encoding: [0x20,0x00,0xa0,0xd2]
+
+ movn w0, #2
+ movn x0, #2
+ movn w0, #2, lsl #16
+ movn x0, #2, lsl #16
+
+; CHECK: movn w0, #0x2 ; encoding: [0x40,0x00,0x80,0x12]
+; CHECK: movn x0, #0x2 ; encoding: [0x40,0x00,0x80,0x92]
+; CHECK: movn w0, #0x2, lsl #16 ; encoding: [0x40,0x00,0xa0,0x12]
+; CHECK: movn x0, #0x2, lsl #16 ; encoding: [0x40,0x00,0xa0,0x92]
+
+ movk w0, #1
+ movk x0, #1
+ movk w0, #1, lsl #16
+ movk x0, #1, lsl #16
+
+; CHECK: movk w0, #0x1 ; encoding: [0x20,0x00,0x80,0x72]
+; CHECK: movk x0, #0x1 ; encoding: [0x20,0x00,0x80,0xf2]
+; CHECK: movk w0, #0x1, lsl #16 ; encoding: [0x20,0x00,0xa0,0x72]
+; CHECK: movk x0, #0x1, lsl #16 ; encoding: [0x20,0x00,0xa0,0xf2]
+
+;==---------------------------------------------------------------------------==
+; Conditionally set flags instructions
+;==---------------------------------------------------------------------------==
+
+ ccmn w1, #2, #3, eq
+ ccmn x1, #2, #3, eq
+ ccmp w1, #2, #3, eq
+ ccmp x1, #2, #3, eq
+
+; CHECK: encoding: [0x23,0x08,0x42,0x3a]
+; CHECK: encoding: [0x23,0x08,0x42,0xba]
+; CHECK: encoding: [0x23,0x08,0x42,0x7a]
+; CHECK: encoding: [0x23,0x08,0x42,0xfa]
+
+ ccmn w1, w2, #3, eq
+ ccmn x1, x2, #3, eq
+ ccmp w1, w2, #3, eq
+ ccmp x1, x2, #3, eq
+
+; CHECK: encoding: [0x23,0x00,0x42,0x3a]
+; CHECK: encoding: [0x23,0x00,0x42,0xba]
+; CHECK: encoding: [0x23,0x00,0x42,0x7a]
+; CHECK: encoding: [0x23,0x00,0x42,0xfa]
+
+;==---------------------------------------------------------------------------==
+; Conditional select instructions
+;==---------------------------------------------------------------------------==
+
+ csel w1, w2, w3, eq
+ csel x1, x2, x3, eq
+ csinc w1, w2, w3, eq
+ csinc x1, x2, x3, eq
+ csinv w1, w2, w3, eq
+ csinv x1, x2, x3, eq
+ csneg w1, w2, w3, eq
+ csneg x1, x2, x3, eq
+
+; CHECK: encoding: [0x41,0x00,0x83,0x1a]
+; CHECK: encoding: [0x41,0x00,0x83,0x9a]
+; CHECK: encoding: [0x41,0x04,0x83,0x1a]
+; CHECK: encoding: [0x41,0x04,0x83,0x9a]
+; CHECK: encoding: [0x41,0x00,0x83,0x5a]
+; CHECK: encoding: [0x41,0x00,0x83,0xda]
+; CHECK: encoding: [0x41,0x04,0x83,0x5a]
+; CHECK: encoding: [0x41,0x04,0x83,0xda]
+
+; Make sure we handle upper case, too. In particular, condition codes.
+ CSEL W16, W7, W27, EQ
+ CSEL W15, W6, W26, NE
+ CSEL W14, W5, W25, CS
+ CSEL W13, W4, W24, HS
+ csel w12, w3, w23, CC
+ csel w11, w2, w22, LO
+ csel w10, w1, w21, MI
+ csel x9, x9, x1, PL
+ csel x8, x8, x2, VS
+ CSEL X7, X7, X3, VC
+ CSEL X6, X7, X4, HI
+ CSEL X5, X6, X5, LS
+ CSEL X4, X5, X6, GE
+ csel x3, x4, x7, LT
+ csel x2, x3, x8, GT
+ csel x1, x2, x9, LE
+ csel x10, x1, x20, AL
+
+; CHECK: csel w16, w7, w27, eq ; encoding: [0xf0,0x00,0x9b,0x1a]
+; CHECK: csel w15, w6, w26, ne ; encoding: [0xcf,0x10,0x9a,0x1a]
+; CHECK: csel w14, w5, w25, hs ; encoding: [0xae,0x20,0x99,0x1a]
+; CHECK: csel w13, w4, w24, hs ; encoding: [0x8d,0x20,0x98,0x1a]
+; CHECK: csel w12, w3, w23, lo ; encoding: [0x6c,0x30,0x97,0x1a]
+; CHECK: csel w11, w2, w22, lo ; encoding: [0x4b,0x30,0x96,0x1a]
+; CHECK: csel w10, w1, w21, mi ; encoding: [0x2a,0x40,0x95,0x1a]
+; CHECK: csel x9, x9, x1, pl ; encoding: [0x29,0x51,0x81,0x9a]
+; CHECK: csel x8, x8, x2, vs ; encoding: [0x08,0x61,0x82,0x9a]
+; CHECK: csel x7, x7, x3, vc ; encoding: [0xe7,0x70,0x83,0x9a]
+; CHECK: csel x6, x7, x4, hi ; encoding: [0xe6,0x80,0x84,0x9a]
+; CHECK: csel x5, x6, x5, ls ; encoding: [0xc5,0x90,0x85,0x9a]
+; CHECK: csel x4, x5, x6, ge ; encoding: [0xa4,0xa0,0x86,0x9a]
+; CHECK: csel x3, x4, x7, lt ; encoding: [0x83,0xb0,0x87,0x9a]
+; CHECK: csel x2, x3, x8, gt ; encoding: [0x62,0xc0,0x88,0x9a]
+; CHECK: csel x1, x2, x9, le ; encoding: [0x41,0xd0,0x89,0x9a]
+; CHECK: csel x10, x1, x20, al ; encoding: [0x2a,0xe0,0x94,0x9a]
+
+
+;==---------------------------------------------------------------------------==
+; Scalar saturating arithmetic
+;==---------------------------------------------------------------------------==
+ uqxtn b4, h2
+ uqxtn h2, s3
+ uqxtn s9, d2
+
+; CHECK: uqxtn b4, h2 ; encoding: [0x44,0x48,0x21,0x7e]
+; CHECK: uqxtn h2, s3 ; encoding: [0x62,0x48,0x61,0x7e]
+; CHECK: uqxtn s9, d2 ; encoding: [0x49,0x48,0xa1,0x7e]
diff --git a/test/MC/AArch64/arm64-arm64-fixup.s b/test/MC/AArch64/arm64-arm64-fixup.s
new file mode 100644
index 000000000000..81306fb5ac06
--- /dev/null
+++ b/test/MC/AArch64/arm64-arm64-fixup.s
@@ -0,0 +1,10 @@
+; RUN: llvm-mc < %s -triple arm64-apple-darwin --show-encoding | FileCheck %s
+
+foo:
+ adr x3, Lbar
+; CHECK: adr x3, Lbar ; encoding: [0x03'A',A,A,0x10'A']
+; CHECK: fixup A - offset: 0, value: Lbar, kind: fixup_aarch64_pcrel_adr_imm21
+Lbar:
+ adrp x3, _printf@page
+; CHECK: adrp x3, _printf@PAGE ; encoding: [0x03'A',A,A,0x90'A']
+; CHECK: fixup A - offset: 0, value: _printf@PAGE, kind: fixup_aarch64_pcrel_adrp_imm21
diff --git a/test/MC/AArch64/arm64-basic-a64-instructions.s b/test/MC/AArch64/arm64-basic-a64-instructions.s
new file mode 100644
index 000000000000..2f58eadfc846
--- /dev/null
+++ b/test/MC/AArch64/arm64-basic-a64-instructions.s
@@ -0,0 +1,18 @@
+// RUN: llvm-mc -triple arm64 -mattr=+crc -show-encoding < %s | FileCheck %s
+
+ crc32b w5, w7, w20
+ crc32h w28, wzr, w30
+ crc32w w0, w1, w2
+ crc32x w7, w9, x20
+ crc32cb w9, w5, w4
+ crc32ch w13, w17, w25
+ crc32cw wzr, w3, w5
+ crc32cx w18, w16, xzr
+// CHECK: crc32b w5, w7, w20 // encoding: [0xe5,0x40,0xd4,0x1a]
+// CHECK: crc32h w28, wzr, w30 // encoding: [0xfc,0x47,0xde,0x1a]
+// CHECK: crc32w w0, w1, w2 // encoding: [0x20,0x48,0xc2,0x1a]
+// CHECK: crc32x w7, w9, x20 // encoding: [0x27,0x4d,0xd4,0x9a]
+// CHECK: crc32cb w9, w5, w4 // encoding: [0xa9,0x50,0xc4,0x1a]
+// CHECK: crc32ch w13, w17, w25 // encoding: [0x2d,0x56,0xd9,0x1a]
+// CHECK: crc32cw wzr, w3, w5 // encoding: [0x7f,0x58,0xc5,0x1a]
+// CHECK: crc32cx w18, w16, xzr // encoding: [0x12,0x5e,0xdf,0x9a]
diff --git a/test/MC/AArch64/arm64-be-datalayout.s b/test/MC/AArch64/arm64-be-datalayout.s
new file mode 100644
index 000000000000..f448a4b86e15
--- /dev/null
+++ b/test/MC/AArch64/arm64-be-datalayout.s
@@ -0,0 +1,4 @@
+// RUN: llvm-mc -filetype=obj -triple arm64_be %s | llvm-readobj -section-data -sections | FileCheck %s
+
+// CHECK: 0000: 00123456 789ABCDE
+foo: .xword 0x123456789abcde
diff --git a/test/MC/AArch64/arm64-bitfield-encoding.s b/test/MC/AArch64/arm64-bitfield-encoding.s
new file mode 100644
index 000000000000..1589aa7139f4
--- /dev/null
+++ b/test/MC/AArch64/arm64-bitfield-encoding.s
@@ -0,0 +1,38 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -show-encoding < %s | FileCheck %s
+
+foo:
+;==---------------------------------------------------------------------------==
+; 5.4.4 Bitfield Operations
+;==---------------------------------------------------------------------------==
+
+ bfm w1, w2, #1, #15
+ bfm x1, x2, #1, #15
+ sbfm w1, w2, #1, #15
+ sbfm x1, x2, #1, #15
+ ubfm w1, w2, #1, #15
+ ubfm x1, x2, #1, #15
+ sbfiz wzr, w0, #31, #1
+ sbfiz xzr, x0, #31, #1
+ ubfiz wzr, w0, #31, #1
+ ubfiz xzr, x0, #31, #1
+
+; CHECK: bfxil w1, w2, #1, #15 ; encoding: [0x41,0x3c,0x01,0x33]
+; CHECK: bfxil x1, x2, #1, #15 ; encoding: [0x41,0x3c,0x41,0xb3]
+; CHECK: sbfx w1, w2, #1, #15 ; encoding: [0x41,0x3c,0x01,0x13]
+; CHECK: sbfx x1, x2, #1, #15 ; encoding: [0x41,0x3c,0x41,0x93]
+; CHECK: ubfx w1, w2, #1, #15 ; encoding: [0x41,0x3c,0x01,0x53]
+; CHECK: ubfx x1, x2, #1, #15 ; encoding: [0x41,0x3c,0x41,0xd3]
+; CHECK: sbfiz wzr, w0, #31, #1 ; encoding: [0x1f,0x00,0x01,0x13]
+; CHECK: sbfiz xzr, x0, #31, #1 ; encoding: [0x1f,0x00,0x61,0x93]
+; CHECK: lsl wzr, w0, #31 ; encoding: [0x1f,0x00,0x01,0x53]
+; CHECK: ubfiz xzr, x0, #31, #1 ; encoding: [0x1f,0x00,0x61,0xd3]
+
+;==---------------------------------------------------------------------------==
+; 5.4.5 Extract (immediate)
+;==---------------------------------------------------------------------------==
+
+ extr w1, w2, w3, #15
+ extr x2, x3, x4, #1
+
+; CHECK: extr w1, w2, w3, #15 ; encoding: [0x41,0x3c,0x83,0x13]
+; CHECK: extr x2, x3, x4, #1 ; encoding: [0x62,0x04,0xc4,0x93]
diff --git a/test/MC/AArch64/arm64-branch-encoding.s b/test/MC/AArch64/arm64-branch-encoding.s
new file mode 100644
index 000000000000..48c2099012f6
--- /dev/null
+++ b/test/MC/AArch64/arm64-branch-encoding.s
@@ -0,0 +1,159 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -show-encoding < %s | FileCheck %s
+
+foo:
+
+;-----------------------------------------------------------------------------
+; Unconditional branch (register) instructions.
+;-----------------------------------------------------------------------------
+
+ ret
+; CHECK: encoding: [0xc0,0x03,0x5f,0xd6]
+ ret x1
+; CHECK: encoding: [0x20,0x00,0x5f,0xd6]
+ drps
+; CHECK: encoding: [0xe0,0x03,0xbf,0xd6]
+ eret
+; CHECK: encoding: [0xe0,0x03,0x9f,0xd6]
+ br x5
+; CHECK: encoding: [0xa0,0x00,0x1f,0xd6]
+ blr x9
+; CHECK: encoding: [0x20,0x01,0x3f,0xd6]
+ bl L1
+; CHECK: bl L1 ; encoding: [A,A,A,0b100101AA]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_call26
+
+;-----------------------------------------------------------------------------
+; Contitional branch instructions.
+;-----------------------------------------------------------------------------
+
+ b L1
+; CHECK: b L1 ; encoding: [A,A,A,0b000101AA]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch26
+ b.eq L1
+; CHECK: b.eq L1 ; encoding: [0bAAA00000,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.ne L1
+; CHECK: b.ne L1 ; encoding: [0bAAA00001,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.cs L1
+; CHECK: b.hs L1 ; encoding: [0bAAA00010,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.cc L1
+; CHECK: b.lo L1 ; encoding: [0bAAA00011,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.mi L1
+; CHECK: b.mi L1 ; encoding: [0bAAA00100,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.pl L1
+; CHECK: b.pl L1 ; encoding: [0bAAA00101,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.vs L1
+; CHECK: b.vs L1 ; encoding: [0bAAA00110,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.vc L1
+; CHECK: b.vc L1 ; encoding: [0bAAA00111,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.hi L1
+; CHECK: b.hi L1 ; encoding: [0bAAA01000,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.ls L1
+; CHECK: b.ls L1 ; encoding: [0bAAA01001,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.ge L1
+; CHECK: b.ge L1 ; encoding: [0bAAA01010,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.lt L1
+; CHECK: b.lt L1 ; encoding: [0bAAA01011,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.gt L1
+; CHECK: b.gt L1 ; encoding: [0bAAA01100,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.le L1
+; CHECK: b.le L1 ; encoding: [0bAAA01101,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+ b.al L1
+; CHECK: b.al L1 ; encoding: [0bAAA01110,A,A,0x54]
+; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_branch19
+L1:
+ b #28
+; CHECK: b #28
+ b.lt #28
+; CHECK: b.lt #28
+ b.cc #1048572
+; CHECK: b.lo #1048572 ; encoding: [0xe3,0xff,0x7f,0x54]
+ b #134217724
+; CHECK: b #134217724 ; encoding: [0xff,0xff,0xff,0x15]
+ b #-134217728
+; CHECK: b #-134217728 ; encoding: [0x00,0x00,0x00,0x16]
+
+;-----------------------------------------------------------------------------
+; Compare-and-branch instructions.
+;-----------------------------------------------------------------------------
+
+ cbz w1, foo
+; CHECK: encoding: [0bAAA00001,A,A,0x34]
+ cbz x1, foo
+; CHECK: encoding: [0bAAA00001,A,A,0xb4]
+ cbnz w2, foo
+; CHECK: encoding: [0bAAA00010,A,A,0x35]
+ cbnz x2, foo
+; CHECK: encoding: [0bAAA00010,A,A,0xb5]
+ cbz w1, #28
+; CHECK: cbz w1, #28
+ cbz w20, #1048572
+; CHECK: cbz w20, #1048572 ; encoding: [0xf4,0xff,0x7f,0x34]
+ cbnz x2, #-1048576
+; CHECK: cbnz x2, #-1048576 ; encoding: [0x02,0x00,0x80,0xb5]
+
+
+;-----------------------------------------------------------------------------
+; Bit-test-and-branch instructions.
+;-----------------------------------------------------------------------------
+
+ tbz x1, #3, foo
+; CHECK: encoding: [0bAAA00001,A,0b00011AAA,0x36]
+ tbnz x1, #63, foo
+; CHECK: encoding: [0bAAA00001,A,0b11111AAA,0xb7]
+
+ tbz w1, #3, foo
+; CHECK: encoding: [0bAAA00001,A,0b00011AAA,0x36]
+ tbnz w1, #31, foo
+; CHECK: encoding: [0bAAA00001,A,0b11111AAA,0x37]
+
+ tbz w1, #3, #28
+; CHECK: tbz w1, #3, #28
+ tbz w3, #5, #32764
+; CHECK: tbz w3, #5, #32764 ; encoding: [0xe3,0xff,0x2b,0x36]
+ tbnz x3, #8, #-32768
+; CHECK: tbnz w3, #8, #-32768 ; encoding: [0x03,0x00,0x44,0x37]
+
+;-----------------------------------------------------------------------------
+; Exception generation instructions.
+;-----------------------------------------------------------------------------
+
+ brk #1
+; CHECK: encoding: [0x20,0x00,0x20,0xd4]
+ dcps1 #2
+; CHECK: encoding: [0x41,0x00,0xa0,0xd4]
+ dcps2 #3
+; CHECK: encoding: [0x62,0x00,0xa0,0xd4]
+ dcps3 #4
+; CHECK: encoding: [0x83,0x00,0xa0,0xd4]
+ hlt #5
+; CHECK: encoding: [0xa0,0x00,0x40,0xd4]
+ hvc #6
+; CHECK: encoding: [0xc2,0x00,0x00,0xd4]
+ smc #7
+; CHECK: encoding: [0xe3,0x00,0x00,0xd4]
+ svc #8
+; CHECK: encoding: [0x01,0x01,0x00,0xd4]
+
+; The immediate defaults to zero for DCPSn
+ dcps1
+ dcps2
+ dcps3
+
+; CHECK: dcps1 ; encoding: [0x01,0x00,0xa0,0xd4]
+; CHECK: dcps2 ; encoding: [0x02,0x00,0xa0,0xd4]
+; CHECK: dcps3 ; encoding: [0x03,0x00,0xa0,0xd4]
+
diff --git a/test/MC/AArch64/arm64-condbr-without-dots.s b/test/MC/AArch64/arm64-condbr-without-dots.s
new file mode 100644
index 000000000000..2a9f7a7cf740
--- /dev/null
+++ b/test/MC/AArch64/arm64-condbr-without-dots.s
@@ -0,0 +1,37 @@
+// RUN: llvm-mc -triple arm64-apple-ios -o - %s | FileCheck %s
+
+ beq lbl
+ bne lbl
+ bcs lbl
+ bhs lbl
+ blo lbl
+ bcc lbl
+ bmi lbl
+ bpl lbl
+ bvs lbl
+ bvc lbl
+ bhi lbl
+ bls lbl
+ bge lbl
+ blt lbl
+ bgt lbl
+ ble lbl
+ bal lbl
+
+// CHECK: b.eq lbl
+// CHECK: b.ne lbl
+// CHECK: b.hs lbl
+// CHECK: b.hs lbl
+// CHECK: b.lo lbl
+// CHECK: b.lo lbl
+// CHECK: b.mi lbl
+// CHECK: b.pl lbl
+// CHECK: b.vs lbl
+// CHECK: b.vc lbl
+// CHECK: b.hi lbl
+// CHECK: b.ls lbl
+// CHECK: b.ge lbl
+// CHECK: b.lt lbl
+// CHECK: b.gt lbl
+// CHECK: b.le lbl
+// CHECK: b.al lbl
diff --git a/test/MC/AArch64/arm64-crypto.s b/test/MC/AArch64/arm64-crypto.s
new file mode 100644
index 000000000000..51efd2132a78
--- /dev/null
+++ b/test/MC/AArch64/arm64-crypto.s
@@ -0,0 +1,66 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -show-encoding -output-asm-variant=1 < %s | FileCheck %s
+
+foo:
+ aese.16b v0, v1
+ aesd.16b v0, v1
+ aesmc.16b v0, v1
+ aesimc.16b v0, v1
+
+ sha1c.4s q0, s1, v2
+ sha1p.4s q0, s1, v2
+ sha1m.4s q0, s1, v2
+ sha1su0.4s v0, v1, v2
+ sha256h.4s q0, q1, v2
+ sha256h2.4s q0, q1, v2
+ sha256su1.4s v0, v1, v2
+ sha1h s0, s1
+ sha1su1.4s v0, v1
+ sha256su0.4s v0, v1
+
+; CHECK: aese.16b v0, v1 ; encoding: [0x20,0x48,0x28,0x4e]
+; CHECK: aesd.16b v0, v1 ; encoding: [0x20,0x58,0x28,0x4e]
+; CHECK: aesmc.16b v0, v1 ; encoding: [0x20,0x68,0x28,0x4e]
+; CHECK: aesimc.16b v0, v1 ; encoding: [0x20,0x78,0x28,0x4e]
+
+; CHECK: sha1c.4s q0, s1, v2 ; encoding: [0x20,0x00,0x02,0x5e]
+; CHECK: sha1p.4s q0, s1, v2 ; encoding: [0x20,0x10,0x02,0x5e]
+; CHECK: sha1m.4s q0, s1, v2 ; encoding: [0x20,0x20,0x02,0x5e]
+; CHECK: sha1su0.4s v0, v1, v2 ; encoding: [0x20,0x30,0x02,0x5e]
+; CHECK: sha256h.4s q0, q1, v2 ; encoding: [0x20,0x40,0x02,0x5e]
+; CHECK: sha256h2.4s q0, q1, v2 ; encoding: [0x20,0x50,0x02,0x5e]
+; CHECK: sha256su1.4s v0, v1, v2 ; encoding: [0x20,0x60,0x02,0x5e]
+; CHECK: sha1h s0, s1 ; encoding: [0x20,0x08,0x28,0x5e]
+; CHECK: sha1su1.4s v0, v1 ; encoding: [0x20,0x18,0x28,0x5e]
+; CHECK: sha256su0.4s v0, v1 ; encoding: [0x20,0x28,0x28,0x5e]
+
+ aese v2.16b, v3.16b
+ aesd v5.16b, v7.16b
+ aesmc v11.16b, v13.16b
+ aesimc v17.16b, v19.16b
+
+; CHECK: aese.16b v2, v3 ; encoding: [0x62,0x48,0x28,0x4e]
+; CHECK: aesd.16b v5, v7 ; encoding: [0xe5,0x58,0x28,0x4e]
+; CHECK: aesmc.16b v11, v13 ; encoding: [0xab,0x69,0x28,0x4e]
+; CHECK: aesimc.16b v17, v19 ; encoding: [0x71,0x7a,0x28,0x4e]
+
+ sha1c q23, s29, v3.4s
+ sha1p q14, s15, v9.4s
+ sha1m q2, s6, v5.4s
+ sha1su0 v3.4s, v5.4s, v9.4s
+ sha256h q2, q7, v18.4s
+ sha256h2 q28, q18, v28.4s
+ sha256su1 v4.4s, v5.4s, v9.4s
+ sha1h s30, s0
+ sha1su1 v10.4s, v21.4s
+ sha256su0 v2.4s, v31.4s
+
+; CHECK: sha1c.4s q23, s29, v3 ; encoding: [0xb7,0x03,0x03,0x5e]
+; CHECK: sha1p.4s q14, s15, v9 ; encoding: [0xee,0x11,0x09,0x5e]
+; CHECK: sha1m.4s q2, s6, v5 ; encoding: [0xc2,0x20,0x05,0x5e]
+; CHECK: sha1su0.4s v3, v5, v9 ; encoding: [0xa3,0x30,0x09,0x5e]
+; CHECK: sha256h.4s q2, q7, v18 ; encoding: [0xe2,0x40,0x12,0x5e]
+; CHECK: sha256h2.4s q28, q18, v28 ; encoding: [0x5c,0x52,0x1c,0x5e]
+; CHECK: sha256su1.4s v4, v5, v9 ; encoding: [0xa4,0x60,0x09,0x5e]
+; CHECK: sha1h s30, s0 ; encoding: [0x1e,0x08,0x28,0x5e]
+; CHECK: sha1su1.4s v10, v21 ; encoding: [0xaa,0x1a,0x28,0x5e]
+; CHECK: sha256su0.4s v2, v31 ; encoding: [0xe2,0x2b,0x28,0x5e]
diff --git a/test/MC/AArch64/arm64-diagno-predicate.s b/test/MC/AArch64/arm64-diagno-predicate.s
new file mode 100644
index 000000000000..3b757e836d39
--- /dev/null
+++ b/test/MC/AArch64/arm64-diagno-predicate.s
@@ -0,0 +1,24 @@
+// RUN: not llvm-mc -triple arm64-linux-gnu -mattr=-fp-armv8,-crc < %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-ERROR < %t %s
+
+
+ fcvt d0, s0
+// CHECK-ERROR: error: instruction requires: fp-armv8
+// CHECK-ERROR-NEXT: fcvt d0, s0
+// CHECK-ERROR-NEXT: ^
+
+ fmla v9.2s, v9.2s, v0.2s
+// CHECK-ERROR: error: instruction requires: neon
+// CHECK-ERROR-NEXT: fmla v9.2s, v9.2s, v0.2s
+// CHECK-ERROR-NEXT: ^
+
+ pmull v0.1q, v1.1d, v2.1d
+// CHECK-ERROR: error: instruction requires: crypto
+// CHECK-ERROR-NEXT: pmull v0.1q, v1.1d, v2.1d
+// CHECK-ERROR-NEXT: ^
+
+ crc32b w5, w7, w20
+// CHECK-ERROR: error: instruction requires: crc
+// CHECK-ERROR-NEXT: crc32b w5, w7, w20
+// CHECK-ERROR-NEXT: ^
+
diff --git a/test/MC/AArch64/arm64-diags.s b/test/MC/AArch64/arm64-diags.s
new file mode 100644
index 000000000000..f8138bde3a4f
--- /dev/null
+++ b/test/MC/AArch64/arm64-diags.s
@@ -0,0 +1,428 @@
+; RUN: not llvm-mc -triple arm64-apple-darwin -show-encoding < %s 2> %t | FileCheck %s
+; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+foo:
+
+; The first should encode as an expression. The second should error expecting
+; a register.
+ ldr x3, (foo + 4)
+ ldr x3, [foo + 4]
+; CHECK: ldr x3, foo+4 ; encoding: [0bAAA00011,A,A,0x58]
+; CHECK: ; fixup A - offset: 0, value: foo+4, kind: fixup_aarch64_ldr_pcrel_imm19
+; CHECK-ERRORS: error: invalid operand for instruction
+
+; The last argument should be flagged as an error. rdar://9576009
+ ld4.8b {v0, v1, v2, v3}, [x0], #33
+; CHECK-ERRORS: error: invalid operand for instruction
+; CHECK-ERRORS: ld4.8b {v0, v1, v2, v3}, [x0], #33
+
+
+ ldr x0, [x0, #804]
+ ldr w0, [x0, #802]
+ ldr x0, [x0, #804]!
+ ldr w0, [w0, #301]!
+ ldr x0, [x0], #804
+ ldr w0, [w0], #301
+
+ ldp w3, w4, [x5, #11]!
+ ldp x3, x4, [x5, #12]!
+ ldp q3, q4, [x5, #12]!
+ ldp w3, w4, [x5], #11
+ ldp x3, x4, [x5], #12
+ ldp q3, q4, [x5], #12
+
+ ldur x0, [x1, #-257]
+
+; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
+; CHECK-ERRORS: ldr x0, [x0, #804]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
+; CHECK-ERRORS: ldr w0, [x0, #802]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
+; CHECK-ERRORS: ldr x0, [x0, #804]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: invalid operand for instruction
+; CHECK-ERRORS: ldr w0, [w0, #301]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
+; CHECK-ERRORS: ldr x0, [x0], #804
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: invalid operand for instruction
+; CHECK-ERRORS: ldr w0, [w0], #301
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be a multiple of 4 in range [-256, 252].
+; CHECK-ERRORS: ldp w3, w4, [x5, #11]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be a multiple of 8 in range [-512, 504].
+; CHECK-ERRORS: ldp x3, x4, [x5, #12]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be a multiple of 16 in range [-1024, 1008].
+; CHECK-ERRORS: ldp q3, q4, [x5, #12]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be a multiple of 4 in range [-256, 252].
+; CHECK-ERRORS: ldp w3, w4, [x5], #11
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be a multiple of 8 in range [-512, 504].
+; CHECK-ERRORS: ldp x3, x4, [x5], #12
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be a multiple of 16 in range [-1024, 1008].
+; CHECK-ERRORS: ldp q3, q4, [x5], #12
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
+; CHECK-ERRORS: ldur x0, [x1, #-257]
+; CHECK-ERRORS: ^
+
+
+ldrb w1, [x3, w3, sxtw #4]
+ldrh w1, [x3, w3, sxtw #4]
+ldr w1, [x3, w3, sxtw #4]
+ldr x1, [x3, w3, sxtw #4]
+ldr b1, [x3, w3, sxtw #4]
+ldr h1, [x3, w3, sxtw #4]
+ldr s1, [x3, w3, sxtw #4]
+ldr d1, [x3, w3, sxtw #4]
+ldr q1, [x3, w3, sxtw #1]
+
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0
+; CHECK-ERRORS:ldrb w1, [x3, w3, sxtw #4]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #1
+; CHECK-ERRORS:ldrh w1, [x3, w3, sxtw #4]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
+; CHECK-ERRORS:ldr w1, [x3, w3, sxtw #4]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #3
+; CHECK-ERRORS:ldr x1, [x3, w3, sxtw #4]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0
+; CHECK-ERRORS:ldr b1, [x3, w3, sxtw #4]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #1
+; CHECK-ERRORS:ldr h1, [x3, w3, sxtw #4]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
+; CHECK-ERRORS:ldr s1, [x3, w3, sxtw #4]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #3
+; CHECK-ERRORS:ldr d1, [x3, w3, sxtw #4]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #4
+; CHECK-ERRORS:ldr q1, [x3, w3, sxtw #1]
+; CHECK-ERRORS: ^
+
+; Check that register offset addressing modes only accept 32-bit offset
+; registers when using uxtw/sxtw extends. Everything else requires a 64-bit
+; register.
+ str d1, [x3, w3, sxtx #3]
+ ldr s1, [x3, d3, sxtx #2]
+
+; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #3
+; CHECK-ERRORS: str d1, [x3, w3, sxtx #3]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
+; CHECK-ERRORS: ldr s1, [x3, d3, sxtx #2]
+; CHECK-ERRORS: ^
+
+; Shift immediates range checking.
+ sqrshrn b4, h9, #10
+ rshrn v9.8b, v11.8h, #17
+ sqrshrn v7.4h, v8.4s, #39
+ uqshrn2 v4.4s, v5.2d, #67
+
+; CHECK-ERRORS: error: immediate must be an integer in range [1, 8].
+; CHECK-ERRORS: sqrshrn b4, h9, #10
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: immediate must be an integer in range [1, 8].
+; CHECK-ERRORS: rshrn v9.8b, v11.8h, #17
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: immediate must be an integer in range [1, 16].
+; CHECK-ERRORS: sqrshrn v7.4h, v8.4s, #39
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: immediate must be an integer in range [1, 32].
+; CHECK-ERRORS: uqshrn2 v4.4s, v5.2d, #67
+; CHECK-ERRORS: ^
+
+
+ st1.s4 {v14, v15}, [x2], #32
+; CHECK-ERRORS: error: invalid type suffix for instruction
+; CHECK-ERRORS: st1.s4 {v14, v15}, [x2], #32
+; CHECK-ERRORS: ^
+
+
+
+; Load pair instructions where Rt==Rt2 and writeback load/store instructions
+; where Rt==Rn or Rt2==Rn are unpredicatable.
+ ldp x1, x2, [x2], #16
+ ldp x2, x2, [x2], #16
+ ldp w1, w2, [x2], #16
+ ldp w2, w2, [x2], #16
+ ldp x1, x1, [x2]
+ ldp s1, s1, [x1], #8
+ ldp s1, s1, [x1, #8]!
+ ldp s1, s1, [x1, #8]
+ ldp d1, d1, [x1], #16
+ ldp d1, d1, [x1, #16]!
+ ldp d1, d1, [x1, #16]
+ ldp q1, q1, [x1], #32
+ ldp q1, q1, [x1, #32]!
+ ldp q1, q1, [x1, #32]
+
+ ldr x2, [x2], #8
+ ldr x2, [x2, #8]!
+ ldr w2, [x2], #8
+ ldr w2, [x2, #8]!
+
+ str x2, [x2], #8
+ str x2, [x2, #8]!
+ str w2, [x2], #8
+ str w2, [x2, #8]!
+
+; CHECK-ERRORS: error: unpredictable LDP instruction, writeback base is also a destination
+; CHECK-ERRORS: ldp x1, x2, [x2], #16
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, writeback base is also a destination
+; CHECK-ERRORS: ldp x2, x2, [x2], #16
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, writeback base is also a destination
+; CHECK-ERRORS: ldp w1, w2, [x2], #16
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, writeback base is also a destination
+; CHECK-ERRORS: ldp w2, w2, [x2], #16
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp x1, x1, [x2]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp s1, s1, [x1], #8
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp s1, s1, [x1, #8]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp s1, s1, [x1, #8]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp d1, d1, [x1], #16
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp d1, d1, [x1, #16]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp d1, d1, [x1, #16]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp q1, q1, [x1], #32
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp q1, q1, [x1, #32]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDP instruction, Rt2==Rt
+; CHECK-ERRORS: ldp q1, q1, [x1, #32]
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDR instruction, writeback base is also a source
+; CHECK-ERRORS: ldr x2, [x2], #8
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDR instruction, writeback base is also a source
+; CHECK-ERRORS: ldr x2, [x2, #8]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDR instruction, writeback base is also a source
+; CHECK-ERRORS: ldr w2, [x2], #8
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable LDR instruction, writeback base is also a source
+; CHECK-ERRORS: ldr w2, [x2, #8]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable STR instruction, writeback base is also a source
+; CHECK-ERRORS: str x2, [x2], #8
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable STR instruction, writeback base is also a source
+; CHECK-ERRORS: str x2, [x2, #8]!
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable STR instruction, writeback base is also a source
+; CHECK-ERRORS: str w2, [x2], #8
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: unpredictable STR instruction, writeback base is also a source
+; CHECK-ERRORS: str w2, [x2, #8]!
+; CHECK-ERRORS: ^
+
+; The validity checking for shifted-immediate operands. rdar://13174476
+; Where the immediate is out of range.
+ add w1, w2, w3, lsr #75
+
+; CHECK-ERRORS: error: expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]
+; CHECK-ERRORS: add w1, w2, w3, lsr #75
+; CHECK-ERRORS: ^
+
+; logical instructions on 32-bit regs with shift > 31 is not legal
+orr w0, w0, w0, lsl #32
+; CHECK-ERRORS: error: expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]
+; CHECK-ERRORS: orr w0, w0, w0, lsl #32
+; CHECK-ERRORS: ^
+eor w0, w0, w0, lsl #32
+; CHECK-ERRORS: error: expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]
+; CHECK-ERRORS: eor w0, w0, w0, lsl #32
+; CHECK-ERRORS: ^
+and w0, w0, w0, lsl #32
+; CHECK-ERRORS: error: expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]
+; CHECK-ERRORS: and w0, w0, w0, lsl #32
+; CHECK-ERRORS: ^
+ands w0, w0, w0, lsl #32
+; CHECK-ERRORS: error: expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]
+; CHECK-ERRORS: ands w0, w0, w0, lsl #32
+; CHECK-ERRORS: ^
+
+; Relocated expressions should not be accepted for 32-bit adds or sub (imm)
+add w3, w5, sym@PAGEOFF
+; CHECK-ERRORS: error: invalid immediate expression
+; CHECK-ERRORS: add w3, w5, sym@PAGEOFF
+; CHECK-ERRORS: ^
+
+adds w3, w5, sym@PAGEOFF
+adds x9, x12, sym@PAGEOFF
+; CHECK-ERRORS: error: invalid immediate expression
+; CHECK-ERRORS: adds w3, w5, sym@PAGEOFF
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: invalid immediate expression
+; CHECK-ERRORS: adds x9, x12, sym@PAGEOFF
+; CHECK-ERRORS: ^
+
+sub x3, x5, sym@PAGEOFF
+sub w20, w30, sym@PAGEOFF
+; CHECK-ERRORS: error: invalid immediate expression
+; CHECK-ERRORS: sub x3, x5, sym@PAGEOFF
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: invalid immediate expression
+; CHECK-ERRORS: sub w20, w30, sym@PAGEOFF
+; CHECK-ERRORS: ^
+
+subs w9, w10, sym@PAGEOFF
+subs x20, x30, sym@PAGEOFF
+; CHECK-ERRORS: error: invalid immediate expression
+; CHECK-ERRORS: subs w9, w10, sym@PAGEOFF
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: invalid immediate expression
+; CHECK-ERRORS: subs x20, x30, sym@PAGEOFF
+; CHECK-ERRORS: ^
+
+tbl v0.8b, { v1 }, v0.8b
+tbl v0.16b, { v1.8b, v2.8b, v3.8b }, v0.16b
+tbx v3.16b, { v12.8b, v13.8b, v14.8b }, v6.8b
+tbx v2.8b, { v0 }, v6.8b
+; CHECK-ERRORS: error: invalid operand for instruction
+; CHECK-ERRORS: tbl v0.8b, { v1 }, v0.8b
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: invalid operand for instruction
+; CHECK-ERRORS: tbl v0.16b, { v1.8b, v2.8b, v3.8b }, v0.16b
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: invalid operand for instruction
+; CHECK-ERRORS: tbx v3.16b, { v12.8b, v13.8b, v14.8b }, v6.8b
+; CHECK-ERRORS: ^
+; CHECK-ERRORS: error: invalid operand for instruction
+; CHECK-ERRORS: tbx v2.8b, { v0 }, v6.8b
+; CHECK-ERRORS: ^
+
+b.c #0x4
+; CHECK-ERRORS: error: invalid condition code
+; CHECK-ERRORS: b.c #0x4
+; CHECK-ERRORS: ^
+
+ic ialluis, x0
+; CHECK-ERRORS: error: specified ic op does not use a register
+ic iallu, x0
+; CHECK-ERRORS: error: specified ic op does not use a register
+ic ivau
+; CHECK-ERRORS: error: specified ic op requires a register
+
+dc zva
+; CHECK-ERRORS: error: specified dc op requires a register
+dc ivac
+; CHECK-ERRORS: error: specified dc op requires a register
+dc isw
+; CHECK-ERRORS: error: specified dc op requires a register
+dc cvac
+; CHECK-ERRORS: error: specified dc op requires a register
+dc csw
+; CHECK-ERRORS: error: specified dc op requires a register
+dc cvau
+; CHECK-ERRORS: error: specified dc op requires a register
+dc civac
+; CHECK-ERRORS: error: specified dc op requires a register
+dc cisw
+; CHECK-ERRORS: error: specified dc op requires a register
+
+at s1e1r
+; CHECK-ERRORS: error: specified at op requires a register
+at s1e2r
+; CHECK-ERRORS: error: specified at op requires a register
+at s1e3r
+; CHECK-ERRORS: error: specified at op requires a register
+at s1e1w
+; CHECK-ERRORS: error: specified at op requires a register
+at s1e2w
+; CHECK-ERRORS: error: specified at op requires a register
+at s1e3w
+; CHECK-ERRORS: error: specified at op requires a register
+at s1e0r
+; CHECK-ERRORS: error: specified at op requires a register
+at s1e0w
+; CHECK-ERRORS: error: specified at op requires a register
+at s12e1r
+; CHECK-ERRORS: error: specified at op requires a register
+at s12e1w
+; CHECK-ERRORS: error: specified at op requires a register
+at s12e0r
+; CHECK-ERRORS: error: specified at op requires a register
+at s12e0w
+; CHECK-ERRORS: error: specified at op requires a register
+
+tlbi vmalle1is, x0
+; CHECK-ERRORS: error: specified tlbi op does not use a register
+tlbi vmalle1, x0
+; CHECK-ERRORS: error: specified tlbi op does not use a register
+tlbi alle1is, x0
+; CHECK-ERRORS: error: specified tlbi op does not use a register
+tlbi alle2is, x0
+; CHECK-ERRORS: error: specified tlbi op does not use a register
+tlbi alle3is, x0
+; CHECK-ERRORS: error: specified tlbi op does not use a register
+tlbi alle1, x0
+; CHECK-ERRORS: error: specified tlbi op does not use a register
+tlbi alle2, x0
+; CHECK-ERRORS: error: specified tlbi op does not use a register
+tlbi alle3, x0
+; CHECK-ERRORS: error: specified tlbi op does not use a register
+tlbi vae1is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vae2is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vae3is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi aside1is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vaae1is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vale1is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vaale1is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vale2is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vale3is
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vae1
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vae2
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vae3
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi aside1
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vaae1
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vale1
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vale2
+; CHECK-ERRORS: error: specified tlbi op requires a register
+tlbi vale3
+; CHECK-ERRORS: error: specified tlbi op requires a register
diff --git a/test/MC/AArch64/arm64-directive_loh.s b/test/MC/AArch64/arm64-directive_loh.s
new file mode 100644
index 000000000000..76d2d7f21861
--- /dev/null
+++ b/test/MC/AArch64/arm64-directive_loh.s
@@ -0,0 +1,93 @@
+# RUN: not llvm-mc -triple arm64-apple-darwin < %s 2> %t | FileCheck %s
+# RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+.globl _fct1
+_fct1:
+ L1:
+ L2:
+ L3:
+ L4:
+ ret lr;
+
+# Known LOHs with:
+# - Regular syntax.
+# - Alternative syntax.
+
+# CHECK: .loh AdrpAdrp L1, L2
+# CHECK: .loh AdrpAdrp L1, L2
+.loh AdrpAdrp L1, L2
+.loh 1 L1, L2
+
+# CHECK: .loh AdrpLdr L1, L2
+# CHECK: .loh AdrpLdr L1, L2
+.loh AdrpLdr L1, L2
+.loh 2 L1, L2
+
+# CHECK: .loh AdrpAddLdr L1, L2, L3
+# CHECK: .loh AdrpAddLdr L1, L2, L3
+.loh AdrpAddLdr L1, L2, L3
+.loh 3 L1, L2, L3
+
+# CHECK: .loh AdrpLdrGotLdr L1, L2, L3
+# CHECK: .loh AdrpLdrGotLdr L1, L2, L3
+.loh AdrpLdrGotLdr L1, L2, L3
+.loh 4 L1, L2, L3
+
+# CHECK: .loh AdrpAddStr L1, L2, L3
+# CHECK: .loh AdrpAddStr L1, L2, L3
+.loh AdrpAddStr L1, L2, L3
+.loh 5 L1, L2, L3
+
+# CHECK: .loh AdrpLdrGotStr L1, L2, L3
+# CHECK: .loh AdrpLdrGotStr L1, L2, L3
+.loh AdrpLdrGotStr L1, L2, L3
+.loh 6 L1, L2, L3
+
+# CHECK: .loh AdrpAdd L1, L2
+# CHECK: .loh AdrpAdd L1, L2
+.loh AdrpAdd L1, L2
+.loh 7 L1, L2
+
+# CHECK: .loh AdrpLdrGot L1, L2
+# CHECK: .loh AdrpLdrGot L1, L2
+.loh AdrpLdrGot L1, L2
+.loh 8 L1, L2
+
+# End Known LOHs.
+
+### Errors Check ####
+
+# Unknown textual identifier.
+# CHECK-ERRORS: error: invalid identifier in directive
+# CHECK-ERRORS-NEXT: .loh Unknown
+# CHECK-ERRORS-NEXT: ^
+.loh Unknown
+# Unknown numeric identifier.
+# CHECK-ERRORS: error: invalid numeric identifier in directive
+# CHECK-ERRORS-NEXT: .loh 153, L1
+# CHECK-ERRORS-NEXT: ^
+.loh 153, L1
+
+# Too much arguments.
+# CHECK-ERRORS: error: unexpected token in '.loh' directive
+# CHECK-ERRORS-NEXT: .loh AdrpAdrp L1, L2, L3
+# CHECK-ERRORS-NEXT: ^
+.loh AdrpAdrp L1, L2, L3
+
+# Too much arguments with alternative syntax.
+# CHECK-ERRORS: error: unexpected token in '.loh' directive
+# CHECK-ERRORS-NEXT: .loh 1 L1, L2, L3
+# CHECK-ERRORS-NEXT: ^
+.loh 1 L1, L2, L3
+
+# Too few argumets.
+# CHECK-ERRORS: error: unexpected token in '.loh' directive
+# CHECK-ERRORS-NEXT: .loh AdrpAdrp L1
+# CHECK-ERRORS-NEXT: ^
+.loh AdrpAdrp L1
+
+# Too few argumets with alternative syntax.
+# CHECK-ERRORS: error: unexpected token in '.loh' directive
+# CHECK-ERRORS-NEXT: .loh 1 L1
+# CHECK-ERRORS-NEXT: ^
+.loh 1 L1
diff --git a/test/MC/AArch64/arm64-elf-reloc-condbr.s b/test/MC/AArch64/arm64-elf-reloc-condbr.s
new file mode 100644
index 000000000000..9b70a20e1bc2
--- /dev/null
+++ b/test/MC/AArch64/arm64-elf-reloc-condbr.s
@@ -0,0 +1,10 @@
+// RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj %s -o - | \
+// RUN: llvm-readobj -r | FileCheck -check-prefix=OBJ %s
+
+ b.eq somewhere
+
+// OBJ: Relocations [
+// OBJ-NEXT: Section (2) .rela.text {
+// OBJ-NEXT: 0x0 R_AARCH64_CONDBR19 somewhere 0x0
+// OBJ-NEXT: }
+// OBJ-NEXT: ]
diff --git a/test/MC/AArch64/arm64-elf-relocs.s b/test/MC/AArch64/arm64-elf-relocs.s
new file mode 100644
index 000000000000..eb22cc2f2365
--- /dev/null
+++ b/test/MC/AArch64/arm64-elf-relocs.s
@@ -0,0 +1,249 @@
+// RUN: llvm-mc -triple=arm64-linux-gnu -o - < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64-linux-gnu -filetype=obj < %s | llvm-objdump -triple=arm64-linux-gnu - -r | FileCheck %s --check-prefix=CHECK-OBJ
+
+ add x0, x2, #:lo12:sym
+// CHECK: add x0, x2, :lo12:sym
+// CHECK-OBJ: 0 R_AARCH64_ADD_ABS_LO12_NC sym
+
+ add x5, x7, #:dtprel_lo12:sym
+// CHECK: add x5, x7, :dtprel_lo12:sym
+// CHECK-OBJ: 4 R_AARCH64_TLSLD_ADD_DTPREL_LO12 sym
+
+ add x9, x12, #:dtprel_lo12_nc:sym
+// CHECK: add x9, x12, :dtprel_lo12_nc:sym
+// CHECK-OBJ: 8 R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC sym
+
+ add x20, x30, #:tprel_lo12:sym
+// CHECK: add x20, x30, :tprel_lo12:sym
+// CHECK-OBJ: c R_AARCH64_TLSLE_ADD_TPREL_LO12 sym
+
+ add x9, x12, #:tprel_lo12_nc:sym
+// CHECK: add x9, x12, :tprel_lo12_nc:sym
+// CHECK-OBJ: 10 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC sym
+
+ add x5, x0, #:tlsdesc_lo12:sym
+// CHECK: add x5, x0, :tlsdesc_lo12:sym
+// CHECK-OBJ: 14 R_AARCH64_TLSDESC_ADD_LO12_NC sym
+
+ add x0, x2, #:lo12:sym+8
+// CHECK: add x0, x2, :lo12:sym
+// CHECK-OBJ: 18 R_AARCH64_ADD_ABS_LO12_NC sym+8
+
+ add x5, x7, #:dtprel_lo12:sym+1
+// CHECK: add x5, x7, :dtprel_lo12:sym+1
+// CHECK-OBJ: 1c R_AARCH64_TLSLD_ADD_DTPREL_LO12 sym+1
+
+ add x9, x12, #:dtprel_lo12_nc:sym+2
+// CHECK: add x9, x12, :dtprel_lo12_nc:sym+2
+// CHECK-OBJ:20 R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC sym+2
+
+ add x20, x30, #:tprel_lo12:sym+12
+// CHECK: add x20, x30, :tprel_lo12:sym+12
+// CHECK-OBJ: 24 R_AARCH64_TLSLE_ADD_TPREL_LO12 sym+12
+
+ add x9, x12, #:tprel_lo12_nc:sym+54
+// CHECK: add x9, x12, :tprel_lo12_nc:sym+54
+// CHECK-OBJ: 28 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC sym+54
+
+ add x5, x0, #:tlsdesc_lo12:sym+70
+// CHECK: add x5, x0, :tlsdesc_lo12:sym+70
+// CHECK-OBJ: 2c R_AARCH64_TLSDESC_ADD_LO12_NC sym+70
+
+ .hword sym + 4 - .
+// CHECK-OBJ: 30 R_AARCH64_PREL16 sym+4
+ .word sym - . + 8
+// CHECK-OBJ: 32 R_AARCH64_PREL32 sym+8
+ .xword sym-.
+// CHECK-OBJ: 36 R_AARCH64_PREL64 sym{{$}}
+
+ .hword sym
+// CHECK-OBJ: 3e R_AARCH64_ABS16 sym
+ .word sym+1
+// CHECK-OBJ: 40 R_AARCH64_ABS32 sym+1
+ .xword sym+16
+// CHECK-OBJ: 44 R_AARCH64_ABS64 sym+16
+
+ adrp x0, sym
+// CHECK: adrp x0, sym
+// CHECK-OBJ: 4c R_AARCH64_ADR_PREL_PG_HI21 sym
+
+ adrp x15, :got:sym
+// CHECK: adrp x15, :got:sym
+// CHECK-OBJ: 50 R_AARCH64_ADR_GOT_PAGE sym
+
+ adrp x29, :gottprel:sym
+// CHECK: adrp x29, :gottprel:sym
+// CHECK-OBJ: 54 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 sym
+
+ adrp x2, :tlsdesc:sym
+// CHECK: adrp x2, :tlsdesc:sym
+// CHECK-OBJ: 58 R_AARCH64_TLSDESC_ADR_PAGE sym
+
+ // LLVM is not competent enough to do this relocation because the
+ // page boundary could occur anywhere after linking. A relocation
+ // is needed.
+ adrp x3, trickQuestion
+ .global trickQuestion
+trickQuestion:
+// CHECK: adrp x3, trickQuestion
+// CHECK-OBJ: 5c R_AARCH64_ADR_PREL_PG_HI21 trickQuestion
+
+ ldrb w2, [x3, :lo12:sym]
+ ldrsb w5, [x7, #:lo12:sym]
+ ldrsb x11, [x13, :lo12:sym]
+ ldr b17, [x19, #:lo12:sym]
+// CHECK: ldrb w2, [x3, :lo12:sym]
+// CHECK: ldrsb w5, [x7, :lo12:sym]
+// CHECK: ldrsb x11, [x13, :lo12:sym]
+// CHECK: ldr b17, [x19, :lo12:sym]
+// CHECK-OBJ: R_AARCH64_LDST8_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST8_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST8_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST8_ABS_LO12_NC sym
+
+ ldrb w23, [x29, #:dtprel_lo12_nc:sym]
+ ldrsb w23, [x19, #:dtprel_lo12:sym]
+ ldrsb x17, [x13, :dtprel_lo12_nc:sym]
+ ldr b11, [x7, #:dtprel_lo12:sym]
+// CHECK: ldrb w23, [x29, :dtprel_lo12_nc:sym]
+// CHECK: ldrsb w23, [x19, :dtprel_lo12:sym]
+// CHECK: ldrsb x17, [x13, :dtprel_lo12_nc:sym]
+// CHECK: ldr b11, [x7, :dtprel_lo12:sym]
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST8_DTPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST8_DTPREL_LO12 sym
+
+ ldrb w1, [x2, :tprel_lo12:sym]
+ ldrsb w3, [x4, #:tprel_lo12_nc:sym]
+ ldrsb x5, [x6, :tprel_lo12:sym]
+ ldr b7, [x8, #:tprel_lo12_nc:sym]
+// CHECK: ldrb w1, [x2, :tprel_lo12:sym]
+// CHECK: ldrsb w3, [x4, :tprel_lo12_nc:sym]
+// CHECK: ldrsb x5, [x6, :tprel_lo12:sym]
+// CHECK: ldr b7, [x8, :tprel_lo12_nc:sym]
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST8_TPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST8_TPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC sym
+
+ ldrh w2, [x3, #:lo12:sym]
+ ldrsh w5, [x7, :lo12:sym]
+ ldrsh x11, [x13, #:lo12:sym]
+ ldr h17, [x19, :lo12:sym]
+// CHECK: ldrh w2, [x3, :lo12:sym]
+// CHECK: ldrsh w5, [x7, :lo12:sym]
+// CHECK: ldrsh x11, [x13, :lo12:sym]
+// CHECK: ldr h17, [x19, :lo12:sym]
+// CHECK-OBJ: R_AARCH64_LDST16_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST16_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST16_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST16_ABS_LO12_NC sym
+
+ ldrh w23, [x29, #:dtprel_lo12_nc:sym]
+ ldrsh w23, [x19, :dtprel_lo12:sym]
+ ldrsh x17, [x13, :dtprel_lo12_nc:sym]
+ ldr h11, [x7, #:dtprel_lo12:sym]
+// CHECK: ldrh w23, [x29, :dtprel_lo12_nc:sym]
+// CHECK: ldrsh w23, [x19, :dtprel_lo12:sym]
+// CHECK: ldrsh x17, [x13, :dtprel_lo12_nc:sym]
+// CHECK: ldr h11, [x7, :dtprel_lo12:sym]
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST16_DTPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST16_DTPREL_LO12 sym
+
+ ldrh w1, [x2, :tprel_lo12:sym]
+ ldrsh w3, [x4, #:tprel_lo12_nc:sym]
+ ldrsh x5, [x6, :tprel_lo12:sym]
+ ldr h7, [x8, #:tprel_lo12_nc:sym]
+// CHECK: ldrh w1, [x2, :tprel_lo12:sym]
+// CHECK: ldrsh w3, [x4, :tprel_lo12_nc:sym]
+// CHECK: ldrsh x5, [x6, :tprel_lo12:sym]
+// CHECK: ldr h7, [x8, :tprel_lo12_nc:sym]
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST16_TPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST16_TPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC sym
+
+ ldr w1, [x2, #:lo12:sym]
+ ldrsw x3, [x4, #:lo12:sym]
+ ldr s4, [x5, :lo12:sym]
+// CHECK: ldr w1, [x2, :lo12:sym]
+// CHECK: ldrsw x3, [x4, :lo12:sym]
+// CHECK: ldr s4, [x5, :lo12:sym]
+// CHECK-OBJ: R_AARCH64_LDST32_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST32_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST32_ABS_LO12_NC sym
+
+ ldr w1, [x2, :dtprel_lo12:sym]
+ ldrsw x3, [x4, #:dtprel_lo12_nc:sym]
+ ldr s4, [x5, #:dtprel_lo12_nc:sym]
+// CHECK: ldr w1, [x2, :dtprel_lo12:sym]
+// CHECK: ldrsw x3, [x4, :dtprel_lo12_nc:sym]
+// CHECK: ldr s4, [x5, :dtprel_lo12_nc:sym]
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST32_DTPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC sym
+
+
+ ldr w1, [x2, #:tprel_lo12:sym]
+ ldrsw x3, [x4, :tprel_lo12_nc:sym]
+ ldr s4, [x5, :tprel_lo12_nc:sym]
+// CHECK: ldr w1, [x2, :tprel_lo12:sym]
+// CHECK: ldrsw x3, [x4, :tprel_lo12_nc:sym]
+// CHECK: ldr s4, [x5, :tprel_lo12_nc:sym]
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST32_TPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC sym
+
+ ldr x28, [x27, :lo12:sym]
+ ldr d26, [x25, #:lo12:sym]
+// CHECK: ldr x28, [x27, :lo12:sym]
+// CHECK: ldr d26, [x25, :lo12:sym]
+// CHECK-OBJ: R_AARCH64_LDST64_ABS_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LDST64_ABS_LO12_NC sym
+
+ ldr x24, [x23, #:got_lo12:sym]
+ ldr d22, [x21, :got_lo12:sym]
+// CHECK: ldr x24, [x23, :got_lo12:sym]
+// CHECK: ldr d22, [x21, :got_lo12:sym]
+// CHECK-OBJ: R_AARCH64_LD64_GOT_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_LD64_GOT_LO12_NC sym
+
+ ldr x24, [x23, :dtprel_lo12_nc:sym]
+ ldr d22, [x21, #:dtprel_lo12:sym]
+// CHECK: ldr x24, [x23, :dtprel_lo12_nc:sym]
+// CHECK: ldr d22, [x21, :dtprel_lo12:sym]
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSLD_LDST64_DTPREL_LO12 sym
+
+ ldr x24, [x23, #:tprel_lo12:sym]
+ ldr d22, [x21, :tprel_lo12_nc:sym]
+// CHECK: ldr x24, [x23, :tprel_lo12:sym]
+// CHECK: ldr d22, [x21, :tprel_lo12_nc:sym]
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST64_TPREL_LO12 sym
+// CHECK-OBJ: R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC sym
+
+ ldr x24, [x23, :gottprel_lo12:sym]
+ ldr d22, [x21, #:gottprel_lo12:sym]
+// CHECK: ldr x24, [x23, :gottprel_lo12:sym]
+// CHECK: ldr d22, [x21, :gottprel_lo12:sym]
+// CHECK-OBJ: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC sym
+
+ ldr x24, [x23, #:tlsdesc_lo12:sym]
+ ldr d22, [x21, :tlsdesc_lo12:sym]
+// CHECK: ldr x24, [x23, :tlsdesc_lo12:sym]
+// CHECK: ldr d22, [x21, :tlsdesc_lo12:sym]
+// CHECK-OBJ: R_AARCH64_TLSDESC_LD64_LO12_NC sym
+// CHECK-OBJ: R_AARCH64_TLSDESC_LD64_LO12_NC sym
+
+ ldr q20, [x19, #:lo12:sym]
+// CHECK: ldr q20, [x19, :lo12:sym]
+// CHECK-OBJ: R_AARCH64_LDST128_ABS_LO12_NC sym
+
+// Since relocated instructions print without a '#', that syntax should
+// certainly be accepted when assembling.
+ add x3, x5, :lo12:imm
+// CHECK: add x3, x5, :lo12:imm
diff --git a/test/MC/AArch64/arm64-fp-encoding.s b/test/MC/AArch64/arm64-fp-encoding.s
new file mode 100644
index 000000000000..684d9883e37f
--- /dev/null
+++ b/test/MC/AArch64/arm64-fp-encoding.s
@@ -0,0 +1,443 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -mattr=neon -show-encoding -output-asm-variant=1 < %s | FileCheck %s
+
+foo:
+;-----------------------------------------------------------------------------
+; Floating-point arithmetic
+;-----------------------------------------------------------------------------
+
+ fabs s1, s2
+ fabs d1, d2
+
+; CHECK: fabs s1, s2 ; encoding: [0x41,0xc0,0x20,0x1e]
+; CHECK: fabs d1, d2 ; encoding: [0x41,0xc0,0x60,0x1e]
+
+ fadd s1, s2, s3
+ fadd d1, d2, d3
+
+; CHECK: fadd s1, s2, s3 ; encoding: [0x41,0x28,0x23,0x1e]
+; CHECK: fadd d1, d2, d3 ; encoding: [0x41,0x28,0x63,0x1e]
+
+ fdiv s1, s2, s3
+ fdiv d1, d2, d3
+
+; CHECK: fdiv s1, s2, s3 ; encoding: [0x41,0x18,0x23,0x1e]
+; CHECK: fdiv d1, d2, d3 ; encoding: [0x41,0x18,0x63,0x1e]
+
+ fmadd s1, s2, s3, s4
+ fmadd d1, d2, d3, d4
+
+; CHECK: fmadd s1, s2, s3, s4 ; encoding: [0x41,0x10,0x03,0x1f]
+; CHECK: fmadd d1, d2, d3, d4 ; encoding: [0x41,0x10,0x43,0x1f]
+
+ fmax s1, s2, s3
+ fmax d1, d2, d3
+ fmaxnm s1, s2, s3
+ fmaxnm d1, d2, d3
+
+; CHECK: fmax s1, s2, s3 ; encoding: [0x41,0x48,0x23,0x1e]
+; CHECK: fmax d1, d2, d3 ; encoding: [0x41,0x48,0x63,0x1e]
+; CHECK: fmaxnm s1, s2, s3 ; encoding: [0x41,0x68,0x23,0x1e]
+; CHECK: fmaxnm d1, d2, d3 ; encoding: [0x41,0x68,0x63,0x1e]
+
+ fmin s1, s2, s3
+ fmin d1, d2, d3
+ fminnm s1, s2, s3
+ fminnm d1, d2, d3
+
+; CHECK: fmin s1, s2, s3 ; encoding: [0x41,0x58,0x23,0x1e]
+; CHECK: fmin d1, d2, d3 ; encoding: [0x41,0x58,0x63,0x1e]
+; CHECK: fminnm s1, s2, s3 ; encoding: [0x41,0x78,0x23,0x1e]
+; CHECK: fminnm d1, d2, d3 ; encoding: [0x41,0x78,0x63,0x1e]
+
+ fmsub s1, s2, s3, s4
+ fmsub d1, d2, d3, d4
+
+; CHECK: fmsub s1, s2, s3, s4 ; encoding: [0x41,0x90,0x03,0x1f]
+; CHECK: fmsub d1, d2, d3, d4 ; encoding: [0x41,0x90,0x43,0x1f]
+
+ fmul s1, s2, s3
+ fmul d1, d2, d3
+
+; CHECK: fmul s1, s2, s3 ; encoding: [0x41,0x08,0x23,0x1e]
+; CHECK: fmul d1, d2, d3 ; encoding: [0x41,0x08,0x63,0x1e]
+
+ fneg s1, s2
+ fneg d1, d2
+
+; CHECK: fneg s1, s2 ; encoding: [0x41,0x40,0x21,0x1e]
+; CHECK: fneg d1, d2 ; encoding: [0x41,0x40,0x61,0x1e]
+
+ fnmadd s1, s2, s3, s4
+ fnmadd d1, d2, d3, d4
+
+; CHECK: fnmadd s1, s2, s3, s4 ; encoding: [0x41,0x10,0x23,0x1f]
+; CHECK: fnmadd d1, d2, d3, d4 ; encoding: [0x41,0x10,0x63,0x1f]
+
+ fnmsub s1, s2, s3, s4
+ fnmsub d1, d2, d3, d4
+
+; CHECK: fnmsub s1, s2, s3, s4 ; encoding: [0x41,0x90,0x23,0x1f]
+; CHECK: fnmsub d1, d2, d3, d4 ; encoding: [0x41,0x90,0x63,0x1f]
+
+ fnmul s1, s2, s3
+ fnmul d1, d2, d3
+
+; CHECK: fnmul s1, s2, s3 ; encoding: [0x41,0x88,0x23,0x1e]
+; CHECK: fnmul d1, d2, d3 ; encoding: [0x41,0x88,0x63,0x1e]
+
+ fsqrt s1, s2
+ fsqrt d1, d2
+
+; CHECK: fsqrt s1, s2 ; encoding: [0x41,0xc0,0x21,0x1e]
+; CHECK: fsqrt d1, d2 ; encoding: [0x41,0xc0,0x61,0x1e]
+
+ fsub s1, s2, s3
+ fsub d1, d2, d3
+
+; CHECK: fsub s1, s2, s3 ; encoding: [0x41,0x38,0x23,0x1e]
+; CHECK: fsub d1, d2, d3 ; encoding: [0x41,0x38,0x63,0x1e]
+
+;-----------------------------------------------------------------------------
+; Floating-point comparison
+;-----------------------------------------------------------------------------
+
+ fccmp s1, s2, #0, eq
+ fccmp d1, d2, #0, eq
+ fccmpe s1, s2, #0, eq
+ fccmpe d1, d2, #0, eq
+
+; CHECK: fccmp s1, s2, #0, eq ; encoding: [0x20,0x04,0x22,0x1e]
+; CHECK: fccmp d1, d2, #0, eq ; encoding: [0x20,0x04,0x62,0x1e]
+; CHECK: fccmpe s1, s2, #0, eq ; encoding: [0x30,0x04,0x22,0x1e]
+; CHECK: fccmpe d1, d2, #0, eq ; encoding: [0x30,0x04,0x62,0x1e]
+
+ fcmp s1, s2
+ fcmp d1, d2
+ fcmp s1, #0.0
+ fcmp d1, #0.0
+ fcmpe s1, s2
+ fcmpe d1, d2
+ fcmpe s1, #0.0
+ fcmpe d1, #0.0
+
+; CHECK: fcmp s1, s2 ; encoding: [0x20,0x20,0x22,0x1e]
+; CHECK: fcmp d1, d2 ; encoding: [0x20,0x20,0x62,0x1e]
+; CHECK: fcmp s1, #0.0 ; encoding: [0x28,0x20,0x20,0x1e]
+; CHECK: fcmp d1, #0.0 ; encoding: [0x28,0x20,0x60,0x1e]
+; CHECK: fcmpe s1, s2 ; encoding: [0x30,0x20,0x22,0x1e]
+; CHECK: fcmpe d1, d2 ; encoding: [0x30,0x20,0x62,0x1e]
+; CHECK: fcmpe s1, #0.0 ; encoding: [0x38,0x20,0x20,0x1e]
+; CHECK: fcmpe d1, #0.0 ; encoding: [0x38,0x20,0x60,0x1e]
+
+;-----------------------------------------------------------------------------
+; Floating-point conditional select
+;-----------------------------------------------------------------------------
+
+ fcsel s1, s2, s3, eq
+ fcsel d1, d2, d3, eq
+
+; CHECK: fcsel s1, s2, s3, eq ; encoding: [0x41,0x0c,0x23,0x1e]
+; CHECK: fcsel d1, d2, d3, eq ; encoding: [0x41,0x0c,0x63,0x1e]
+
+;-----------------------------------------------------------------------------
+; Floating-point convert
+;-----------------------------------------------------------------------------
+
+ fcvt h1, d2
+ fcvt s1, d2
+ fcvt d1, h2
+ fcvt s1, h2
+ fcvt d1, s2
+ fcvt h1, s2
+
+; CHECK: fcvt h1, d2 ; encoding: [0x41,0xc0,0x63,0x1e]
+; CHECK: fcvt s1, d2 ; encoding: [0x41,0x40,0x62,0x1e]
+; CHECK: fcvt d1, h2 ; encoding: [0x41,0xc0,0xe2,0x1e]
+; CHECK: fcvt s1, h2 ; encoding: [0x41,0x40,0xe2,0x1e]
+; CHECK: fcvt d1, s2 ; encoding: [0x41,0xc0,0x22,0x1e]
+; CHECK: fcvt h1, s2 ; encoding: [0x41,0xc0,0x23,0x1e]
+
+ fcvtas w1, d2
+ fcvtas x1, d2
+ fcvtas w1, s2
+ fcvtas x1, s2
+
+; CHECK: fcvtas w1, d2 ; encoding: [0x41,0x00,0x64,0x1e]
+; CHECK: fcvtas x1, d2 ; encoding: [0x41,0x00,0x64,0x9e]
+; CHECK: fcvtas w1, s2 ; encoding: [0x41,0x00,0x24,0x1e]
+; CHECK: fcvtas x1, s2 ; encoding: [0x41,0x00,0x24,0x9e]
+
+ fcvtau w1, s2
+ fcvtau w1, d2
+ fcvtau x1, s2
+ fcvtau x1, d2
+
+; CHECK: fcvtau w1, s2 ; encoding: [0x41,0x00,0x25,0x1e]
+; CHECK: fcvtau w1, d2 ; encoding: [0x41,0x00,0x65,0x1e]
+; CHECK: fcvtau x1, s2 ; encoding: [0x41,0x00,0x25,0x9e]
+; CHECK: fcvtau x1, d2 ; encoding: [0x41,0x00,0x65,0x9e]
+
+ fcvtms w1, s2
+ fcvtms w1, d2
+ fcvtms x1, s2
+ fcvtms x1, d2
+
+; CHECK: fcvtms w1, s2 ; encoding: [0x41,0x00,0x30,0x1e]
+; CHECK: fcvtms w1, d2 ; encoding: [0x41,0x00,0x70,0x1e]
+; CHECK: fcvtms x1, s2 ; encoding: [0x41,0x00,0x30,0x9e]
+; CHECK: fcvtms x1, d2 ; encoding: [0x41,0x00,0x70,0x9e]
+
+ fcvtmu w1, s2
+ fcvtmu w1, d2
+ fcvtmu x1, s2
+ fcvtmu x1, d2
+
+; CHECK: fcvtmu w1, s2 ; encoding: [0x41,0x00,0x31,0x1e]
+; CHECK: fcvtmu w1, d2 ; encoding: [0x41,0x00,0x71,0x1e]
+; CHECK: fcvtmu x1, s2 ; encoding: [0x41,0x00,0x31,0x9e]
+; CHECK: fcvtmu x1, d2 ; encoding: [0x41,0x00,0x71,0x9e]
+
+ fcvtns w1, s2
+ fcvtns w1, d2
+ fcvtns x1, s2
+ fcvtns x1, d2
+
+; CHECK: fcvtns w1, s2 ; encoding: [0x41,0x00,0x20,0x1e]
+; CHECK: fcvtns w1, d2 ; encoding: [0x41,0x00,0x60,0x1e]
+; CHECK: fcvtns x1, s2 ; encoding: [0x41,0x00,0x20,0x9e]
+; CHECK: fcvtns x1, d2 ; encoding: [0x41,0x00,0x60,0x9e]
+
+ fcvtnu w1, s2
+ fcvtnu w1, d2
+ fcvtnu x1, s2
+ fcvtnu x1, d2
+
+; CHECK: fcvtnu w1, s2 ; encoding: [0x41,0x00,0x21,0x1e]
+; CHECK: fcvtnu w1, d2 ; encoding: [0x41,0x00,0x61,0x1e]
+; CHECK: fcvtnu x1, s2 ; encoding: [0x41,0x00,0x21,0x9e]
+; CHECK: fcvtnu x1, d2 ; encoding: [0x41,0x00,0x61,0x9e]
+
+ fcvtps w1, s2
+ fcvtps w1, d2
+ fcvtps x1, s2
+ fcvtps x1, d2
+
+; CHECK: fcvtps w1, s2 ; encoding: [0x41,0x00,0x28,0x1e]
+; CHECK: fcvtps w1, d2 ; encoding: [0x41,0x00,0x68,0x1e]
+; CHECK: fcvtps x1, s2 ; encoding: [0x41,0x00,0x28,0x9e]
+; CHECK: fcvtps x1, d2 ; encoding: [0x41,0x00,0x68,0x9e]
+
+ fcvtpu w1, s2
+ fcvtpu w1, d2
+ fcvtpu x1, s2
+ fcvtpu x1, d2
+
+; CHECK: fcvtpu w1, s2 ; encoding: [0x41,0x00,0x29,0x1e]
+; CHECK: fcvtpu w1, d2 ; encoding: [0x41,0x00,0x69,0x1e]
+; CHECK: fcvtpu x1, s2 ; encoding: [0x41,0x00,0x29,0x9e]
+; CHECK: fcvtpu x1, d2 ; encoding: [0x41,0x00,0x69,0x9e]
+
+ fcvtzs w1, s2
+ fcvtzs w1, s2, #1
+ fcvtzs w1, d2
+ fcvtzs w1, d2, #1
+ fcvtzs x1, s2
+ fcvtzs x1, s2, #1
+ fcvtzs x1, d2
+ fcvtzs x1, d2, #1
+
+; CHECK: fcvtzs w1, s2 ; encoding: [0x41,0x00,0x38,0x1e]
+; CHECK: fcvtzs w1, s2, #1 ; encoding: [0x41,0xfc,0x18,0x1e]
+; CHECK: fcvtzs w1, d2 ; encoding: [0x41,0x00,0x78,0x1e]
+; CHECK: fcvtzs w1, d2, #1 ; encoding: [0x41,0xfc,0x58,0x1e]
+; CHECK: fcvtzs x1, s2 ; encoding: [0x41,0x00,0x38,0x9e]
+; CHECK: fcvtzs x1, s2, #1 ; encoding: [0x41,0xfc,0x18,0x9e]
+; CHECK: fcvtzs x1, d2 ; encoding: [0x41,0x00,0x78,0x9e]
+; CHECK: fcvtzs x1, d2, #1 ; encoding: [0x41,0xfc,0x58,0x9e]
+
+ fcvtzu w1, s2
+ fcvtzu w1, s2, #1
+ fcvtzu w1, d2
+ fcvtzu w1, d2, #1
+ fcvtzu x1, s2
+ fcvtzu x1, s2, #1
+ fcvtzu x1, d2
+ fcvtzu x1, d2, #1
+
+; CHECK: fcvtzu w1, s2 ; encoding: [0x41,0x00,0x39,0x1e]
+; CHECK: fcvtzu w1, s2, #1 ; encoding: [0x41,0xfc,0x19,0x1e]
+; CHECK: fcvtzu w1, d2 ; encoding: [0x41,0x00,0x79,0x1e]
+; CHECK: fcvtzu w1, d2, #1 ; encoding: [0x41,0xfc,0x59,0x1e]
+; CHECK: fcvtzu x1, s2 ; encoding: [0x41,0x00,0x39,0x9e]
+; CHECK: fcvtzu x1, s2, #1 ; encoding: [0x41,0xfc,0x19,0x9e]
+; CHECK: fcvtzu x1, d2 ; encoding: [0x41,0x00,0x79,0x9e]
+; CHECK: fcvtzu x1, d2, #1 ; encoding: [0x41,0xfc,0x59,0x9e]
+
+ scvtf s1, w2
+ scvtf s1, w2, #1
+ scvtf d1, w2
+ scvtf d1, w2, #1
+ scvtf s1, x2
+ scvtf s1, x2, #1
+ scvtf d1, x2
+ scvtf d1, x2, #1
+
+; CHECK: scvtf s1, w2 ; encoding: [0x41,0x00,0x22,0x1e]
+; CHECK: scvtf s1, w2, #1 ; encoding: [0x41,0xfc,0x02,0x1e]
+; CHECK: scvtf d1, w2 ; encoding: [0x41,0x00,0x62,0x1e]
+; CHECK: scvtf d1, w2, #1 ; encoding: [0x41,0xfc,0x42,0x1e]
+; CHECK: scvtf s1, x2 ; encoding: [0x41,0x00,0x22,0x9e]
+; CHECK: scvtf s1, x2, #1 ; encoding: [0x41,0xfc,0x02,0x9e]
+; CHECK: scvtf d1, x2 ; encoding: [0x41,0x00,0x62,0x9e]
+; CHECK: scvtf d1, x2, #1 ; encoding: [0x41,0xfc,0x42,0x9e]
+
+ ucvtf s1, w2
+ ucvtf s1, w2, #1
+ ucvtf d1, w2
+ ucvtf d1, w2, #1
+ ucvtf s1, x2
+ ucvtf s1, x2, #1
+ ucvtf d1, x2
+ ucvtf d1, x2, #1
+
+; CHECK: ucvtf s1, w2 ; encoding: [0x41,0x00,0x23,0x1e]
+; CHECK: ucvtf s1, w2, #1 ; encoding: [0x41,0xfc,0x03,0x1e]
+; CHECK: ucvtf d1, w2 ; encoding: [0x41,0x00,0x63,0x1e]
+; CHECK: ucvtf d1, w2, #1 ; encoding: [0x41,0xfc,0x43,0x1e]
+; CHECK: ucvtf s1, x2 ; encoding: [0x41,0x00,0x23,0x9e]
+; CHECK: ucvtf s1, x2, #1 ; encoding: [0x41,0xfc,0x03,0x9e]
+; CHECK: ucvtf d1, x2 ; encoding: [0x41,0x00,0x63,0x9e]
+; CHECK: ucvtf d1, x2, #1 ; encoding: [0x41,0xfc,0x43,0x9e]
+
+;-----------------------------------------------------------------------------
+; Floating-point move
+;-----------------------------------------------------------------------------
+
+ fmov s1, w2
+ fmov w1, s2
+ fmov d1, x2
+ fmov x1, d2
+
+; CHECK: fmov s1, w2 ; encoding: [0x41,0x00,0x27,0x1e]
+; CHECK: fmov w1, s2 ; encoding: [0x41,0x00,0x26,0x1e]
+; CHECK: fmov d1, x2 ; encoding: [0x41,0x00,0x67,0x9e]
+; CHECK: fmov x1, d2 ; encoding: [0x41,0x00,0x66,0x9e]
+
+ fmov s1, #0.125
+ fmov s1, #0x40
+ fmov d1, #0.125
+ fmov d1, #0x40
+ fmov d1, #-4.843750e-01
+ fmov d1, #4.843750e-01
+ fmov d3, #3
+ fmov s2, #0.0
+ fmov d2, #0.0
+
+; CHECK: fmov s1, #0.12500000 ; encoding: [0x01,0x10,0x28,0x1e]
+; CHECK: fmov s1, #0.12500000 ; encoding: [0x01,0x10,0x28,0x1e]
+; CHECK: fmov d1, #0.12500000 ; encoding: [0x01,0x10,0x68,0x1e]
+; CHECK: fmov d1, #0.12500000 ; encoding: [0x01,0x10,0x68,0x1e]
+; CHECK: fmov d1, #-0.48437500 ; encoding: [0x01,0xf0,0x7b,0x1e]
+; CHECK: fmov d1, #0.48437500 ; encoding: [0x01,0xf0,0x6b,0x1e]
+; CHECK: fmov d3, #3.00000000 ; encoding: [0x03,0x10,0x61,0x1e]
+; CHECK: fmov s2, wzr ; encoding: [0xe2,0x03,0x27,0x1e]
+; CHECK: fmov d2, xzr ; encoding: [0xe2,0x03,0x67,0x9e]
+
+ fmov s1, s2
+ fmov d1, d2
+
+; CHECK: fmov s1, s2 ; encoding: [0x41,0x40,0x20,0x1e]
+; CHECK: fmov d1, d2 ; encoding: [0x41,0x40,0x60,0x1e]
+
+
+ fmov x2, v5.d[1]
+ fmov.d x9, v7[1]
+ fmov v1.d[1], x1
+ fmov.d v8[1], x6
+
+; CHECK: fmov.d x2, v5[1] ; encoding: [0xa2,0x00,0xae,0x9e]
+; CHECK: fmov.d x9, v7[1] ; encoding: [0xe9,0x00,0xae,0x9e]
+; CHECK: fmov.d v1[1], x1 ; encoding: [0x21,0x00,0xaf,0x9e]
+; CHECK: fmov.d v8[1], x6 ; encoding: [0xc8,0x00,0xaf,0x9e]
+
+
+;-----------------------------------------------------------------------------
+; Floating-point round to integral
+;-----------------------------------------------------------------------------
+
+ frinta s1, s2
+ frinta d1, d2
+
+; CHECK: frinta s1, s2 ; encoding: [0x41,0x40,0x26,0x1e]
+; CHECK: frinta d1, d2 ; encoding: [0x41,0x40,0x66,0x1e]
+
+ frinti s1, s2
+ frinti d1, d2
+
+; CHECK: frinti s1, s2 ; encoding: [0x41,0xc0,0x27,0x1e]
+; CHECK: frinti d1, d2 ; encoding: [0x41,0xc0,0x67,0x1e]
+
+ frintm s1, s2
+ frintm d1, d2
+
+; CHECK: frintm s1, s2 ; encoding: [0x41,0x40,0x25,0x1e]
+; CHECK: frintm d1, d2 ; encoding: [0x41,0x40,0x65,0x1e]
+
+ frintn s1, s2
+ frintn d1, d2
+
+; CHECK: frintn s1, s2 ; encoding: [0x41,0x40,0x24,0x1e]
+; CHECK: frintn d1, d2 ; encoding: [0x41,0x40,0x64,0x1e]
+
+ frintp s1, s2
+ frintp d1, d2
+
+; CHECK: frintp s1, s2 ; encoding: [0x41,0xc0,0x24,0x1e]
+; CHECK: frintp d1, d2 ; encoding: [0x41,0xc0,0x64,0x1e]
+
+ frintx s1, s2
+ frintx d1, d2
+
+; CHECK: frintx s1, s2 ; encoding: [0x41,0x40,0x27,0x1e]
+; CHECK: frintx d1, d2 ; encoding: [0x41,0x40,0x67,0x1e]
+
+ frintz s1, s2
+ frintz d1, d2
+
+; CHECK: frintz s1, s2 ; encoding: [0x41,0xc0,0x25,0x1e]
+; CHECK: frintz d1, d2 ; encoding: [0x41,0xc0,0x65,0x1e]
+
+ cmhs d0, d0, d0
+ cmtst d0, d0, d0
+
+; CHECK: cmhs d0, d0, d0 ; encoding: [0x00,0x3c,0xe0,0x7e]
+; CHECK: cmtst d0, d0, d0 ; encoding: [0x00,0x8c,0xe0,0x5e]
+
+
+
+;-----------------------------------------------------------------------------
+; Floating-point extract and narrow
+;-----------------------------------------------------------------------------
+ sqxtn b4, h2
+ sqxtn h2, s3
+ sqxtn s9, d2
+
+; CHECK: sqxtn b4, h2 ; encoding: [0x44,0x48,0x21,0x5e]
+; CHECK: sqxtn h2, s3 ; encoding: [0x62,0x48,0x61,0x5e]
+; CHECK: sqxtn s9, d2 ; encoding: [0x49,0x48,0xa1,0x5e]
+
+ sqxtun b4, h2
+ sqxtun h2, s3
+ sqxtun s9, d2
+
+; CHECK: sqxtun b4, h2 ; encoding: [0x44,0x28,0x21,0x7e]
+; CHECK: sqxtun h2, s3 ; encoding: [0x62,0x28,0x61,0x7e]
+; CHECK: sqxtun s9, d2 ; encoding: [0x49,0x28,0xa1,0x7e]
+
+ uqxtn b4, h2
+ uqxtn h2, s3
+ uqxtn s9, d2
+
+; CHECK: uqxtn b4, h2 ; encoding: [0x44,0x48,0x21,0x7e]
+; CHECK: uqxtn h2, s3 ; encoding: [0x62,0x48,0x61,0x7e]
+; CHECK: uqxtn s9, d2 ; encoding: [0x49,0x48,0xa1,0x7e]
diff --git a/test/MC/AArch64/arm64-large-relocs.s b/test/MC/AArch64/arm64-large-relocs.s
new file mode 100644
index 000000000000..2a0cfa222862
--- /dev/null
+++ b/test/MC/AArch64/arm64-large-relocs.s
@@ -0,0 +1,38 @@
+// RUN: llvm-mc -triple=arm64-linux-gnu -show-encoding -o - %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64-linux-gnu -show-encoding -filetype=obj -o - %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-OBJ %s
+
+ movz x2, #:abs_g0:sym
+ movk w3, #:abs_g0_nc:sym
+// CHECK: movz x2, #:abs_g0:sym // encoding: [0bAAA00010,A,0b100AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0:sym, kind: fixup_aarch64_movw
+// CHECK: movk w3, #:abs_g0_nc:sym // encoding: [0bAAA00011,A,0b100AAAAA,0x72]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_nc:sym, kind: fixup_aarch64_movw
+
+// CHECK-OBJ: 0 R_AARCH64_MOVW_UABS_G0 sym
+// CHECK-OBJ: 4 R_AARCH64_MOVW_UABS_G0_NC sym
+
+ movz x4, #:abs_g1:sym
+ movk w5, #:abs_g1_nc:sym
+// CHECK: movz x4, #:abs_g1:sym // encoding: [0bAAA00100,A,0b101AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1:sym, kind: fixup_aarch64_movw
+// CHECK: movk w5, #:abs_g1_nc:sym // encoding: [0bAAA00101,A,0b101AAAAA,0x72]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_nc:sym, kind: fixup_aarch64_movw
+
+// CHECK-OBJ: 8 R_AARCH64_MOVW_UABS_G1 sym
+// CHECK-OBJ: c R_AARCH64_MOVW_UABS_G1_NC sym
+
+ movz x6, #:abs_g2:sym
+ movk x7, #:abs_g2_nc:sym
+// CHECK: movz x6, #:abs_g2:sym // encoding: [0bAAA00110,A,0b110AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2:sym, kind: fixup_aarch64_movw
+// CHECK: movk x7, #:abs_g2_nc:sym // encoding: [0bAAA00111,A,0b110AAAAA,0xf2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_nc:sym, kind: fixup_aarch64_movw
+
+// CHECK-OBJ: 10 R_AARCH64_MOVW_UABS_G2 sym
+// CHECK-OBJ: 14 R_AARCH64_MOVW_UABS_G2_NC sym
+
+ movz x8, #:abs_g3:sym
+// CHECK: movz x8, #:abs_g3:sym // encoding: [0bAAA01000,A,0b111AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g3:sym, kind: fixup_aarch64_movw
+
+// CHECK-OBJ: 18 R_AARCH64_MOVW_UABS_G3 sym
diff --git a/test/MC/AArch64/arm64-leaf-compact-unwind.s b/test/MC/AArch64/arm64-leaf-compact-unwind.s
new file mode 100644
index 000000000000..27d3d51c2935
--- /dev/null
+++ b/test/MC/AArch64/arm64-leaf-compact-unwind.s
@@ -0,0 +1,208 @@
+// RUN: llvm-mc -triple=arm64-apple-ios -filetype=obj < %s | \
+// RUN: llvm-readobj -sections -section-relocations -section-data | \
+// RUN: FileCheck %s
+//
+// rdar://13070556
+
+// FIXME: we should add compact unwind support to llvm-objdump -unwind-info
+
+// CHECK: Section {
+// CHECK: Index: 1
+// CHECK-NEXT: Name: __compact_unwind
+// CHECK-NEXT: Segment: __LD
+// CHECK-NEXT: Address:
+// CHECK-NEXT: Size:
+// CHECK-NEXT: Offset:
+// CHECK-NEXT: Alignment:
+// CHECK-NEXT: RelocationOffset:
+// CHECK-NEXT: RelocationCount:
+// CHECK-NEXT: Type:
+// CHECK-NEXT: Attributes [
+// CHECK-NEXT: Debug
+// CHECK-NEXT: ]
+// CHECK-NEXT: Reserved1:
+// CHECK-NEXT: Reserved2:
+// CHECK-NEXT: Relocations [
+// CHECK-NEXT: 0x60 0 3 0 ARM64_RELOC_UNSIGNED 0 0x1
+// CHECK-NEXT: 0x40 0 3 0 ARM64_RELOC_UNSIGNED 0 0x1
+// CHECK-NEXT: 0x20 0 3 0 ARM64_RELOC_UNSIGNED 0 0x1
+// CHECK-NEXT: 0x0 0 3 0 ARM64_RELOC_UNSIGNED 0 0x1
+// CHECK-NEXT: ]
+// CHECK-NEXT: SectionData (
+// CHECK-NEXT: 0000: 00000000 00000000 08000000 00000002
+// CHECK-NEXT: 0010: 00000000 00000000 00000000 00000000
+// CHECK-NEXT: 0020: 08000000 00000000 40000000 00900002
+// CHECK-NEXT: 0030: 00000000 00000000 00000000 00000000
+// CHECK-NEXT: 0040: 48000000 00000000 D4000000 0F400002
+// CHECK-NEXT: 0050: 00000000 00000000 00000000 00000000
+// CHECK-NEXT: 0060: 1C010000 00000000 54000000 10100202
+// CHECK-NEXT: 0070: 00000000 00000000 00000000 00000000
+// CHECK-NEXT: )
+// CHECK-NEXT: }
+
+ .section __TEXT,__text,regular,pure_instructions
+ .globl _foo1
+ .align 2
+_foo1: ; @foo1
+ .cfi_startproc
+; BB#0: ; %entry
+ add w0, w0, #42 ; =#42
+ ret
+ .cfi_endproc
+
+ .globl _foo2
+ .align 2
+_foo2: ; @foo2
+ .cfi_startproc
+; BB#0: ; %entry
+ sub sp, sp, #144 ; =#144
+Ltmp2:
+ .cfi_def_cfa_offset 144
+ mov x9, xzr
+ mov x8, sp
+LBB1_1: ; %for.body
+ ; =>This Inner Loop Header: Depth=1
+ str w9, [x8, x9, lsl #2]
+ add x9, x9, #1 ; =#1
+ cmp w9, #36 ; =#36
+ b.ne LBB1_1
+; BB#2:
+ mov x9, xzr
+ mov w0, wzr
+LBB1_3: ; %for.body4
+ ; =>This Inner Loop Header: Depth=1
+ ldr w10, [x8, x9]
+ add x9, x9, #4 ; =#4
+ cmp w9, #144 ; =#144
+ add w0, w10, w0
+ b.ne LBB1_3
+; BB#4: ; %for.end9
+ add sp, sp, #144 ; =#144
+ ret
+ .cfi_endproc
+
+ .globl _foo3
+ .align 2
+_foo3: ; @foo3
+ .cfi_startproc
+; BB#0: ; %entry
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+Ltmp3:
+ .cfi_def_cfa_offset 64
+Ltmp4:
+ .cfi_offset w19, -16
+Ltmp5:
+ .cfi_offset w20, -24
+Ltmp6:
+ .cfi_offset w21, -32
+Ltmp7:
+ .cfi_offset w22, -40
+Ltmp8:
+ .cfi_offset w23, -48
+Ltmp9:
+ .cfi_offset w24, -56
+Ltmp10:
+ .cfi_offset w25, -64
+Ltmp11:
+ .cfi_offset w26, -72
+Lloh0:
+ adrp x8, _bar@GOTPAGE
+Lloh1:
+ ldr x8, [x8, _bar@GOTPAGEOFF]
+ ldr w9, [x8]
+ ldr w10, [x8]
+ ldr w11, [x8]
+ ldr w12, [x8]
+ ldr w13, [x8]
+ ldr w14, [x8]
+ ldr w15, [x8]
+ ldr w16, [x8]
+ ldr w17, [x8]
+ ldr w0, [x8]
+ ldr w19, [x8]
+ ldr w20, [x8]
+ ldr w21, [x8]
+ ldr w22, [x8]
+ ldr w23, [x8]
+ ldr w24, [x8]
+ ldr w25, [x8]
+ ldr w8, [x8]
+ add w9, w10, w9
+ add w9, w9, w11
+ add w9, w9, w12
+ add w9, w9, w13
+ add w9, w9, w14
+ add w9, w9, w15
+ add w9, w9, w16
+ add w9, w9, w17
+ add w9, w9, w0
+ add w9, w9, w19
+ add w9, w9, w20
+ add w9, w9, w21
+ add w9, w9, w22
+ add w9, w9, w23
+ add w9, w9, w24
+ add w9, w9, w25
+ sub w8, w8, w9
+ sub w8, w8, w7, lsl #1
+ sub w8, w8, w6, lsl #1
+ sub w8, w8, w5, lsl #1
+ sub w8, w8, w4, lsl #1
+ sub w8, w8, w3, lsl #1
+ sub w8, w8, w2, lsl #1
+ sub w0, w8, w1, lsl #1
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+ .loh AdrpLdrGot Lloh0, Lloh1
+ .cfi_endproc
+
+ .globl _foo4
+ .align 2
+_foo4: ; @foo4
+ .cfi_startproc
+; BB#0: ; %entry
+ stp x28, x27, [sp, #-16]!
+ sub sp, sp, #512 ; =#512
+Ltmp12:
+ .cfi_def_cfa_offset 528
+Ltmp13:
+ .cfi_offset w27, -16
+Ltmp14:
+ .cfi_offset w28, -24
+ ; kill: W0<def> W0<kill> X0<def>
+ mov x9, xzr
+ ubfx x10, x0, #0, #32
+ mov x8, sp
+LBB3_1: ; %for.body
+ ; =>This Inner Loop Header: Depth=1
+ add w11, w10, w9
+ str w11, [x8, x9, lsl #2]
+ add x9, x9, #1 ; =#1
+ cmp w9, #128 ; =#128
+ b.ne LBB3_1
+; BB#2: ; %for.cond2.preheader
+ mov x9, xzr
+ mov w0, wzr
+ add x8, x8, w5, sxtw #2
+LBB3_3: ; %for.body4
+ ; =>This Inner Loop Header: Depth=1
+ ldr w10, [x8, x9]
+ add x9, x9, #4 ; =#4
+ cmp w9, #512 ; =#512
+ add w0, w10, w0
+ b.ne LBB3_3
+; BB#4: ; %for.end11
+ add sp, sp, #512 ; =#512
+ ldp x28, x27, [sp], #16
+ ret
+ .cfi_endproc
+
+ .comm _bar,4,2 ; @bar
+
+.subsections_via_symbols
diff --git a/test/MC/AArch64/arm64-logical-encoding.s b/test/MC/AArch64/arm64-logical-encoding.s
new file mode 100644
index 000000000000..e5f1436d1ab7
--- /dev/null
+++ b/test/MC/AArch64/arm64-logical-encoding.s
@@ -0,0 +1,224 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -show-encoding < %s | FileCheck %s
+
+foo:
+;==---------------------------------------------------------------------------==
+; 5.4.2 Logical (immediate)
+;==---------------------------------------------------------------------------==
+
+ and w0, w0, #1
+ and x0, x0, #1
+ and w1, w2, #15
+ and x1, x2, #15
+ and sp, x5, #~15
+ ands w0, w0, #1
+ ands x0, x0, #1
+ ands w1, w2, #15
+ ands x1, x2, #15
+
+; CHECK: and w0, w0, #0x1 ; encoding: [0x00,0x00,0x00,0x12]
+; CHECK: and x0, x0, #0x1 ; encoding: [0x00,0x00,0x40,0x92]
+; CHECK: and w1, w2, #0xf ; encoding: [0x41,0x0c,0x00,0x12]
+; CHECK: and x1, x2, #0xf ; encoding: [0x41,0x0c,0x40,0x92]
+; CHECK: and sp, x5, #0xfffffffffffffff0 ; encoding: [0xbf,0xec,0x7c,0x92]
+; CHECK: ands w0, w0, #0x1 ; encoding: [0x00,0x00,0x00,0x72]
+; CHECK: ands x0, x0, #0x1 ; encoding: [0x00,0x00,0x40,0xf2]
+; CHECK: ands w1, w2, #0xf ; encoding: [0x41,0x0c,0x00,0x72]
+; CHECK: ands x1, x2, #0xf ; encoding: [0x41,0x0c,0x40,0xf2]
+
+ eor w1, w2, #0x4000
+ eor x1, x2, #0x8000
+
+; CHECK: eor w1, w2, #0x4000 ; encoding: [0x41,0x00,0x12,0x52]
+; CHECK: eor x1, x2, #0x8000 ; encoding: [0x41,0x00,0x71,0xd2]
+
+ orr w1, w2, #0x4000
+ orr x1, x2, #0x8000
+
+; CHECK: orr w1, w2, #0x4000 ; encoding: [0x41,0x00,0x12,0x32]
+; CHECK: orr x1, x2, #0x8000 ; encoding: [0x41,0x00,0x71,0xb2]
+
+ orr w8, wzr, #0x1
+ orr x8, xzr, #0x1
+
+; CHECK: orr w8, wzr, #0x1 ; encoding: [0xe8,0x03,0x00,0x32]
+; CHECK: orr x8, xzr, #0x1 ; encoding: [0xe8,0x03,0x40,0xb2]
+
+;==---------------------------------------------------------------------------==
+; 5.5.3 Logical (shifted register)
+;==---------------------------------------------------------------------------==
+
+ and w1, w2, w3
+ and x1, x2, x3
+ and w1, w2, w3, lsl #2
+ and x1, x2, x3, lsl #2
+ and w1, w2, w3, lsr #2
+ and x1, x2, x3, lsr #2
+ and w1, w2, w3, asr #2
+ and x1, x2, x3, asr #2
+ and w1, w2, w3, ror #2
+ and x1, x2, x3, ror #2
+
+; CHECK: and w1, w2, w3 ; encoding: [0x41,0x00,0x03,0x0a]
+; CHECK: and x1, x2, x3 ; encoding: [0x41,0x00,0x03,0x8a]
+; CHECK: and w1, w2, w3, lsl #2 ; encoding: [0x41,0x08,0x03,0x0a]
+; CHECK: and x1, x2, x3, lsl #2 ; encoding: [0x41,0x08,0x03,0x8a]
+; CHECK: and w1, w2, w3, lsr #2 ; encoding: [0x41,0x08,0x43,0x0a]
+; CHECK: and x1, x2, x3, lsr #2 ; encoding: [0x41,0x08,0x43,0x8a]
+; CHECK: and w1, w2, w3, asr #2 ; encoding: [0x41,0x08,0x83,0x0a]
+; CHECK: and x1, x2, x3, asr #2 ; encoding: [0x41,0x08,0x83,0x8a]
+; CHECK: and w1, w2, w3, ror #2 ; encoding: [0x41,0x08,0xc3,0x0a]
+; CHECK: and x1, x2, x3, ror #2 ; encoding: [0x41,0x08,0xc3,0x8a]
+
+ ands w1, w2, w3
+ ands x1, x2, x3
+ ands w1, w2, w3, lsl #2
+ ands x1, x2, x3, lsl #2
+ ands w1, w2, w3, lsr #2
+ ands x1, x2, x3, lsr #2
+ ands w1, w2, w3, asr #2
+ ands x1, x2, x3, asr #2
+ ands w1, w2, w3, ror #2
+ ands x1, x2, x3, ror #2
+
+; CHECK: ands w1, w2, w3 ; encoding: [0x41,0x00,0x03,0x6a]
+; CHECK: ands x1, x2, x3 ; encoding: [0x41,0x00,0x03,0xea]
+; CHECK: ands w1, w2, w3, lsl #2 ; encoding: [0x41,0x08,0x03,0x6a]
+; CHECK: ands x1, x2, x3, lsl #2 ; encoding: [0x41,0x08,0x03,0xea]
+; CHECK: ands w1, w2, w3, lsr #2 ; encoding: [0x41,0x08,0x43,0x6a]
+; CHECK: ands x1, x2, x3, lsr #2 ; encoding: [0x41,0x08,0x43,0xea]
+; CHECK: ands w1, w2, w3, asr #2 ; encoding: [0x41,0x08,0x83,0x6a]
+; CHECK: ands x1, x2, x3, asr #2 ; encoding: [0x41,0x08,0x83,0xea]
+; CHECK: ands w1, w2, w3, ror #2 ; encoding: [0x41,0x08,0xc3,0x6a]
+; CHECK: ands x1, x2, x3, ror #2 ; encoding: [0x41,0x08,0xc3,0xea]
+
+ bic w1, w2, w3
+ bic x1, x2, x3
+ bic w1, w2, w3, lsl #3
+ bic x1, x2, x3, lsl #3
+ bic w1, w2, w3, lsr #3
+ bic x1, x2, x3, lsr #3
+ bic w1, w2, w3, asr #3
+ bic x1, x2, x3, asr #3
+ bic w1, w2, w3, ror #3
+ bic x1, x2, x3, ror #3
+
+; CHECK: bic w1, w2, w3 ; encoding: [0x41,0x00,0x23,0x0a]
+; CHECK: bic x1, x2, x3 ; encoding: [0x41,0x00,0x23,0x8a]
+; CHECK: bic w1, w2, w3, lsl #3 ; encoding: [0x41,0x0c,0x23,0x0a]
+; CHECK: bic x1, x2, x3, lsl #3 ; encoding: [0x41,0x0c,0x23,0x8a]
+; CHECK: bic w1, w2, w3, lsr #3 ; encoding: [0x41,0x0c,0x63,0x0a]
+; CHECK: bic x1, x2, x3, lsr #3 ; encoding: [0x41,0x0c,0x63,0x8a]
+; CHECK: bic w1, w2, w3, asr #3 ; encoding: [0x41,0x0c,0xa3,0x0a]
+; CHECK: bic x1, x2, x3, asr #3 ; encoding: [0x41,0x0c,0xa3,0x8a]
+; CHECK: bic w1, w2, w3, ror #3 ; encoding: [0x41,0x0c,0xe3,0x0a]
+; CHECK: bic x1, x2, x3, ror #3 ; encoding: [0x41,0x0c,0xe3,0x8a]
+
+ bics w1, w2, w3
+ bics x1, x2, x3
+ bics w1, w2, w3, lsl #3
+ bics x1, x2, x3, lsl #3
+ bics w1, w2, w3, lsr #3
+ bics x1, x2, x3, lsr #3
+ bics w1, w2, w3, asr #3
+ bics x1, x2, x3, asr #3
+ bics w1, w2, w3, ror #3
+ bics x1, x2, x3, ror #3
+
+; CHECK: bics w1, w2, w3 ; encoding: [0x41,0x00,0x23,0x6a]
+; CHECK: bics x1, x2, x3 ; encoding: [0x41,0x00,0x23,0xea]
+; CHECK: bics w1, w2, w3, lsl #3 ; encoding: [0x41,0x0c,0x23,0x6a]
+; CHECK: bics x1, x2, x3, lsl #3 ; encoding: [0x41,0x0c,0x23,0xea]
+; CHECK: bics w1, w2, w3, lsr #3 ; encoding: [0x41,0x0c,0x63,0x6a]
+; CHECK: bics x1, x2, x3, lsr #3 ; encoding: [0x41,0x0c,0x63,0xea]
+; CHECK: bics w1, w2, w3, asr #3 ; encoding: [0x41,0x0c,0xa3,0x6a]
+; CHECK: bics x1, x2, x3, asr #3 ; encoding: [0x41,0x0c,0xa3,0xea]
+; CHECK: bics w1, w2, w3, ror #3 ; encoding: [0x41,0x0c,0xe3,0x6a]
+; CHECK: bics x1, x2, x3, ror #3 ; encoding: [0x41,0x0c,0xe3,0xea]
+
+ eon w1, w2, w3
+ eon x1, x2, x3
+ eon w1, w2, w3, lsl #4
+ eon x1, x2, x3, lsl #4
+ eon w1, w2, w3, lsr #4
+ eon x1, x2, x3, lsr #4
+ eon w1, w2, w3, asr #4
+ eon x1, x2, x3, asr #4
+ eon w1, w2, w3, ror #4
+ eon x1, x2, x3, ror #4
+
+; CHECK: eon w1, w2, w3 ; encoding: [0x41,0x00,0x23,0x4a]
+; CHECK: eon x1, x2, x3 ; encoding: [0x41,0x00,0x23,0xca]
+; CHECK: eon w1, w2, w3, lsl #4 ; encoding: [0x41,0x10,0x23,0x4a]
+; CHECK: eon x1, x2, x3, lsl #4 ; encoding: [0x41,0x10,0x23,0xca]
+; CHECK: eon w1, w2, w3, lsr #4 ; encoding: [0x41,0x10,0x63,0x4a]
+; CHECK: eon x1, x2, x3, lsr #4 ; encoding: [0x41,0x10,0x63,0xca]
+; CHECK: eon w1, w2, w3, asr #4 ; encoding: [0x41,0x10,0xa3,0x4a]
+; CHECK: eon x1, x2, x3, asr #4 ; encoding: [0x41,0x10,0xa3,0xca]
+; CHECK: eon w1, w2, w3, ror #4 ; encoding: [0x41,0x10,0xe3,0x4a]
+; CHECK: eon x1, x2, x3, ror #4 ; encoding: [0x41,0x10,0xe3,0xca]
+
+ eor w1, w2, w3
+ eor x1, x2, x3
+ eor w1, w2, w3, lsl #5
+ eor x1, x2, x3, lsl #5
+ eor w1, w2, w3, lsr #5
+ eor x1, x2, x3, lsr #5
+ eor w1, w2, w3, asr #5
+ eor x1, x2, x3, asr #5
+ eor w1, w2, w3, ror #5
+ eor x1, x2, x3, ror #5
+
+; CHECK: eor w1, w2, w3 ; encoding: [0x41,0x00,0x03,0x4a]
+; CHECK: eor x1, x2, x3 ; encoding: [0x41,0x00,0x03,0xca]
+; CHECK: eor w1, w2, w3, lsl #5 ; encoding: [0x41,0x14,0x03,0x4a]
+; CHECK: eor x1, x2, x3, lsl #5 ; encoding: [0x41,0x14,0x03,0xca]
+; CHECK: eor w1, w2, w3, lsr #5 ; encoding: [0x41,0x14,0x43,0x4a]
+; CHECK: eor x1, x2, x3, lsr #5 ; encoding: [0x41,0x14,0x43,0xca]
+; CHECK: eor w1, w2, w3, asr #5 ; encoding: [0x41,0x14,0x83,0x4a]
+; CHECK: eor x1, x2, x3, asr #5 ; encoding: [0x41,0x14,0x83,0xca]
+; CHECK: eor w1, w2, w3, ror #5 ; encoding: [0x41,0x14,0xc3,0x4a]
+; CHECK: eor x1, x2, x3, ror #5 ; encoding: [0x41,0x14,0xc3,0xca]
+
+ orr w1, w2, w3
+ orr x1, x2, x3
+ orr w1, w2, w3, lsl #6
+ orr x1, x2, x3, lsl #6
+ orr w1, w2, w3, lsr #6
+ orr x1, x2, x3, lsr #6
+ orr w1, w2, w3, asr #6
+ orr x1, x2, x3, asr #6
+ orr w1, w2, w3, ror #6
+ orr x1, x2, x3, ror #6
+
+; CHECK: orr w1, w2, w3 ; encoding: [0x41,0x00,0x03,0x2a]
+; CHECK: orr x1, x2, x3 ; encoding: [0x41,0x00,0x03,0xaa]
+; CHECK: orr w1, w2, w3, lsl #6 ; encoding: [0x41,0x18,0x03,0x2a]
+; CHECK: orr x1, x2, x3, lsl #6 ; encoding: [0x41,0x18,0x03,0xaa]
+; CHECK: orr w1, w2, w3, lsr #6 ; encoding: [0x41,0x18,0x43,0x2a]
+; CHECK: orr x1, x2, x3, lsr #6 ; encoding: [0x41,0x18,0x43,0xaa]
+; CHECK: orr w1, w2, w3, asr #6 ; encoding: [0x41,0x18,0x83,0x2a]
+; CHECK: orr x1, x2, x3, asr #6 ; encoding: [0x41,0x18,0x83,0xaa]
+; CHECK: orr w1, w2, w3, ror #6 ; encoding: [0x41,0x18,0xc3,0x2a]
+; CHECK: orr x1, x2, x3, ror #6 ; encoding: [0x41,0x18,0xc3,0xaa]
+
+ orn w1, w2, w3
+ orn x1, x2, x3
+ orn w1, w2, w3, lsl #7
+ orn x1, x2, x3, lsl #7
+ orn w1, w2, w3, lsr #7
+ orn x1, x2, x3, lsr #7
+ orn w1, w2, w3, asr #7
+ orn x1, x2, x3, asr #7
+ orn w1, w2, w3, ror #7
+ orn x1, x2, x3, ror #7
+
+; CHECK: orn w1, w2, w3 ; encoding: [0x41,0x00,0x23,0x2a]
+; CHECK: orn x1, x2, x3 ; encoding: [0x41,0x00,0x23,0xaa]
+; CHECK: orn w1, w2, w3, lsl #7 ; encoding: [0x41,0x1c,0x23,0x2a]
+; CHECK: orn x1, x2, x3, lsl #7 ; encoding: [0x41,0x1c,0x23,0xaa]
+; CHECK: orn w1, w2, w3, lsr #7 ; encoding: [0x41,0x1c,0x63,0x2a]
+; CHECK: orn x1, x2, x3, lsr #7 ; encoding: [0x41,0x1c,0x63,0xaa]
+; CHECK: orn w1, w2, w3, asr #7 ; encoding: [0x41,0x1c,0xa3,0x2a]
+; CHECK: orn x1, x2, x3, asr #7 ; encoding: [0x41,0x1c,0xa3,0xaa]
+; CHECK: orn w1, w2, w3, ror #7 ; encoding: [0x41,0x1c,0xe3,0x2a]
+; CHECK: orn x1, x2, x3, ror #7 ; encoding: [0x41,0x1c,0xe3,0xaa]
diff --git a/test/MC/AArch64/arm64-mapping-across-sections.s b/test/MC/AArch64/arm64-mapping-across-sections.s
new file mode 100644
index 000000000000..00b324cb8264
--- /dev/null
+++ b/test/MC/AArch64/arm64-mapping-across-sections.s
@@ -0,0 +1,28 @@
+// RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj < %s | llvm-objdump -t - | FileCheck %s
+
+ .text
+ add w0, w0, w0
+
+// .wibble should *not* inherit .text's mapping symbol. It's a completely different section.
+ .section .wibble
+ add w0, w0, w0
+
+// A setion should be able to start with a $d
+ .section .starts_data
+ .word 42
+
+// Changing back to .text should not emit a redundant $x
+ .text
+ add w0, w0, w0
+
+// With all those constraints, we want:
+// + .text to have $x at 0 and no others
+// + .wibble to have $x at 0
+// + .starts_data to have $d at 0
+
+
+// CHECK: 00000000 .starts_data 00000000 $d
+// CHECK-NEXT: 00000000 .text 00000000 $x
+// CHECK-NEXT: 00000000 .wibble 00000000 $x
+// CHECK-NOT: ${{[adtx]}}
+
diff --git a/test/MC/AArch64/arm64-mapping-within-section.s b/test/MC/AArch64/arm64-mapping-within-section.s
new file mode 100644
index 000000000000..f515cb9a5c0b
--- /dev/null
+++ b/test/MC/AArch64/arm64-mapping-within-section.s
@@ -0,0 +1,23 @@
+// RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj < %s | llvm-objdump -t - | FileCheck %s
+
+ .text
+// $x at 0x0000
+ add w0, w0, w0
+// $d at 0x0004
+ .ascii "012"
+ .byte 1
+ .hword 2
+ .word 4
+ .xword 8
+ .single 4.0
+ .double 8.0
+ .space 10
+ .zero 3
+ .fill 10, 2, 42
+ .org 100, 12
+// $x at 0x0018
+ add x0, x0, x0
+
+// CHECK: 00000004 .text 00000000 $d
+// CHECK-NEXT: 00000000 .text 00000000 $x
+// CHECK-NEXT: 00000064 .text 00000000 $x
diff --git a/test/MC/AArch64/arm64-memory.s b/test/MC/AArch64/arm64-memory.s
new file mode 100644
index 000000000000..579859660f9b
--- /dev/null
+++ b/test/MC/AArch64/arm64-memory.s
@@ -0,0 +1,634 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -show-encoding < %s | FileCheck %s
+
+foo:
+;-----------------------------------------------------------------------------
+; Indexed loads
+;-----------------------------------------------------------------------------
+
+ ldr w5, [x4, #20]
+ ldr x4, [x3]
+ ldr x2, [sp, #32]
+ ldr b5, [sp, #1]
+ ldr h6, [sp, #2]
+ ldr s7, [sp, #4]
+ ldr d8, [sp, #8]
+ ldr q9, [sp, #16]
+ ldrb w4, [x3]
+ ldrb w5, [x4, #20]
+ ldrb w2, [x3, _foo@pageoff]
+ ldrb w3, [x2, "+[Test method].var"@PAGEOFF]
+ ldrsb w9, [x3]
+ ldrsb x2, [sp, #128]
+ ldrh w2, [sp, #32]
+ ldrsh w3, [sp, #32]
+ ldrsh x5, [x9, #24]
+ ldrsw x9, [sp, #512]
+
+ prfm #5, [sp, #32]
+ prfm #31, [sp, #32]
+ prfm pldl1keep, [x2]
+ prfm pldl1strm, [x2]
+ prfm pldl2keep, [x2]
+ prfm pldl2strm, [x2]
+ prfm pldl3keep, [x2]
+ prfm pldl3strm, [x2]
+ prfm pstl1keep, [x2]
+ prfm pstl1strm, [x2]
+ prfm pstl2keep, [x2]
+ prfm pstl2strm, [x2]
+ prfm pstl3keep, [x2]
+ prfm pstl3strm, [x2]
+ prfm pstl3strm, [x4, x5, lsl #3]
+
+; CHECK: ldr w5, [x4, #20] ; encoding: [0x85,0x14,0x40,0xb9]
+; CHECK: ldr x4, [x3] ; encoding: [0x64,0x00,0x40,0xf9]
+; CHECK: ldr x2, [sp, #32] ; encoding: [0xe2,0x13,0x40,0xf9]
+; CHECK: ldr b5, [sp, #1] ; encoding: [0xe5,0x07,0x40,0x3d]
+; CHECK: ldr h6, [sp, #2] ; encoding: [0xe6,0x07,0x40,0x7d]
+; CHECK: ldr s7, [sp, #4] ; encoding: [0xe7,0x07,0x40,0xbd]
+; CHECK: ldr d8, [sp, #8] ; encoding: [0xe8,0x07,0x40,0xfd]
+; CHECK: ldr q9, [sp, #16] ; encoding: [0xe9,0x07,0xc0,0x3d]
+; CHECK: ldrb w4, [x3] ; encoding: [0x64,0x00,0x40,0x39]
+; CHECK: ldrb w5, [x4, #20] ; encoding: [0x85,0x50,0x40,0x39]
+; CHECK: ldrb w2, [x3, _foo@PAGEOFF] ; encoding: [0x62,0bAAAAAA00,0b01AAAAAA,0x39]
+; CHECK: ldrb w3, [x2, "+[Test method].var"@PAGEOFF] ; encoding: [0x43,0bAAAAAA00,0b01AAAAAA,0x39]
+; CHECK: ldrsb w9, [x3] ; encoding: [0x69,0x00,0xc0,0x39]
+; CHECK: ldrsb x2, [sp, #128] ; encoding: [0xe2,0x03,0x82,0x39]
+; CHECK: ldrh w2, [sp, #32] ; encoding: [0xe2,0x43,0x40,0x79]
+; CHECK: ldrsh w3, [sp, #32] ; encoding: [0xe3,0x43,0xc0,0x79]
+; CHECK: ldrsh x5, [x9, #24] ; encoding: [0x25,0x31,0x80,0x79]
+; CHECK: ldrsw x9, [sp, #512] ; encoding: [0xe9,0x03,0x82,0xb9]
+; CHECK: prfm pldl3strm, [sp, #32] ; encoding: [0xe5,0x13,0x80,0xf9]
+; CHECK: prfm #31, [sp, #32] ; encoding: [0xff,0x13,0x80,0xf9]
+; CHECK: prfm pldl1keep, [x2] ; encoding: [0x40,0x00,0x80,0xf9]
+; CHECK: prfm pldl1strm, [x2] ; encoding: [0x41,0x00,0x80,0xf9]
+; CHECK: prfm pldl2keep, [x2] ; encoding: [0x42,0x00,0x80,0xf9]
+; CHECK: prfm pldl2strm, [x2] ; encoding: [0x43,0x00,0x80,0xf9]
+; CHECK: prfm pldl3keep, [x2] ; encoding: [0x44,0x00,0x80,0xf9]
+; CHECK: prfm pldl3strm, [x2] ; encoding: [0x45,0x00,0x80,0xf9]
+; CHECK: prfm pstl1keep, [x2] ; encoding: [0x50,0x00,0x80,0xf9]
+; CHECK: prfm pstl1strm, [x2] ; encoding: [0x51,0x00,0x80,0xf9]
+; CHECK: prfm pstl2keep, [x2] ; encoding: [0x52,0x00,0x80,0xf9]
+; CHECK: prfm pstl2strm, [x2] ; encoding: [0x53,0x00,0x80,0xf9]
+; CHECK: prfm pstl3keep, [x2] ; encoding: [0x54,0x00,0x80,0xf9]
+; CHECK: prfm pstl3strm, [x2] ; encoding: [0x55,0x00,0x80,0xf9]
+; CHECK: prfm pstl3strm, [x4, x5, lsl #3] ; encoding: [0x95,0x78,0xa5,0xf8]
+
+;-----------------------------------------------------------------------------
+; Indexed stores
+;-----------------------------------------------------------------------------
+
+ str x4, [x3]
+ str x2, [sp, #32]
+ str w5, [x4, #20]
+ str b5, [sp, #1]
+ str h6, [sp, #2]
+ str s7, [sp, #4]
+ str d8, [sp, #8]
+ str q9, [sp, #16]
+ strb w4, [x3]
+ strb w5, [x4, #20]
+ strh w2, [sp, #32]
+
+; CHECK: str x4, [x3] ; encoding: [0x64,0x00,0x00,0xf9]
+; CHECK: str x2, [sp, #32] ; encoding: [0xe2,0x13,0x00,0xf9]
+; CHECK: str w5, [x4, #20] ; encoding: [0x85,0x14,0x00,0xb9]
+; CHECK: str b5, [sp, #1] ; encoding: [0xe5,0x07,0x00,0x3d]
+; CHECK: str h6, [sp, #2] ; encoding: [0xe6,0x07,0x00,0x7d]
+; CHECK: str s7, [sp, #4] ; encoding: [0xe7,0x07,0x00,0xbd]
+; CHECK: str d8, [sp, #8] ; encoding: [0xe8,0x07,0x00,0xfd]
+; CHECK: str q9, [sp, #16] ; encoding: [0xe9,0x07,0x80,0x3d]
+; CHECK: strb w4, [x3] ; encoding: [0x64,0x00,0x00,0x39]
+; CHECK: strb w5, [x4, #20] ; encoding: [0x85,0x50,0x00,0x39]
+; CHECK: strh w2, [sp, #32] ; encoding: [0xe2,0x43,0x00,0x79]
+
+;-----------------------------------------------------------------------------
+; Unscaled immediate loads and stores
+;-----------------------------------------------------------------------------
+
+ ldur w2, [x3]
+ ldur w2, [sp, #24]
+ ldur x2, [x3]
+ ldur x2, [sp, #24]
+ ldur b5, [sp, #1]
+ ldur h6, [sp, #2]
+ ldur s7, [sp, #4]
+ ldur d8, [sp, #8]
+ ldur q9, [sp, #16]
+ ldursb w9, [x3]
+ ldursb x2, [sp, #128]
+ ldursh w3, [sp, #32]
+ ldursh x5, [x9, #24]
+ ldursw x9, [sp, #-128]
+
+; CHECK: ldur w2, [x3] ; encoding: [0x62,0x00,0x40,0xb8]
+; CHECK: ldur w2, [sp, #24] ; encoding: [0xe2,0x83,0x41,0xb8]
+; CHECK: ldur x2, [x3] ; encoding: [0x62,0x00,0x40,0xf8]
+; CHECK: ldur x2, [sp, #24] ; encoding: [0xe2,0x83,0x41,0xf8]
+; CHECK: ldur b5, [sp, #1] ; encoding: [0xe5,0x13,0x40,0x3c]
+; CHECK: ldur h6, [sp, #2] ; encoding: [0xe6,0x23,0x40,0x7c]
+; CHECK: ldur s7, [sp, #4] ; encoding: [0xe7,0x43,0x40,0xbc]
+; CHECK: ldur d8, [sp, #8] ; encoding: [0xe8,0x83,0x40,0xfc]
+; CHECK: ldur q9, [sp, #16] ; encoding: [0xe9,0x03,0xc1,0x3c]
+; CHECK: ldursb w9, [x3] ; encoding: [0x69,0x00,0xc0,0x38]
+; CHECK: ldursb x2, [sp, #128] ; encoding: [0xe2,0x03,0x88,0x38]
+; CHECK: ldursh w3, [sp, #32] ; encoding: [0xe3,0x03,0xc2,0x78]
+; CHECK: ldursh x5, [x9, #24] ; encoding: [0x25,0x81,0x81,0x78]
+; CHECK: ldursw x9, [sp, #-128] ; encoding: [0xe9,0x03,0x98,0xb8]
+
+ stur w4, [x3]
+ stur w2, [sp, #32]
+ stur x4, [x3]
+ stur x2, [sp, #32]
+ stur w5, [x4, #20]
+ stur b5, [sp, #1]
+ stur h6, [sp, #2]
+ stur s7, [sp, #4]
+ stur d8, [sp, #8]
+ stur q9, [sp, #16]
+ sturb w4, [x3]
+ sturb w5, [x4, #20]
+ sturh w2, [sp, #32]
+ prfum #5, [sp, #32]
+
+; CHECK: stur w4, [x3] ; encoding: [0x64,0x00,0x00,0xb8]
+; CHECK: stur w2, [sp, #32] ; encoding: [0xe2,0x03,0x02,0xb8]
+; CHECK: stur x4, [x3] ; encoding: [0x64,0x00,0x00,0xf8]
+; CHECK: stur x2, [sp, #32] ; encoding: [0xe2,0x03,0x02,0xf8]
+; CHECK: stur w5, [x4, #20] ; encoding: [0x85,0x40,0x01,0xb8]
+; CHECK: stur b5, [sp, #1] ; encoding: [0xe5,0x13,0x00,0x3c]
+; CHECK: stur h6, [sp, #2] ; encoding: [0xe6,0x23,0x00,0x7c]
+; CHECK: stur s7, [sp, #4] ; encoding: [0xe7,0x43,0x00,0xbc]
+; CHECK: stur d8, [sp, #8] ; encoding: [0xe8,0x83,0x00,0xfc]
+; CHECK: stur q9, [sp, #16] ; encoding: [0xe9,0x03,0x81,0x3c]
+; CHECK: sturb w4, [x3] ; encoding: [0x64,0x00,0x00,0x38]
+; CHECK: sturb w5, [x4, #20] ; encoding: [0x85,0x40,0x01,0x38]
+; CHECK: sturh w2, [sp, #32] ; encoding: [0xe2,0x03,0x02,0x78]
+; CHECK: prfum pldl3strm, [sp, #32] ; encoding: [0xe5,0x03,0x82,0xf8]
+
+;-----------------------------------------------------------------------------
+; Unprivileged loads and stores
+;-----------------------------------------------------------------------------
+
+ ldtr w3, [x4, #16]
+ ldtr x3, [x4, #16]
+ ldtrb w3, [x4, #16]
+ ldtrsb w9, [x3]
+ ldtrsb x2, [sp, #128]
+ ldtrh w3, [x4, #16]
+ ldtrsh w3, [sp, #32]
+ ldtrsh x5, [x9, #24]
+ ldtrsw x9, [sp, #-128]
+
+; CHECK: ldtr w3, [x4, #16] ; encoding: [0x83,0x08,0x41,0xb8]
+; CHECK: ldtr x3, [x4, #16] ; encoding: [0x83,0x08,0x41,0xf8]
+; CHECK: ldtrb w3, [x4, #16] ; encoding: [0x83,0x08,0x41,0x38]
+; CHECK: ldtrsb w9, [x3] ; encoding: [0x69,0x08,0xc0,0x38]
+; CHECK: ldtrsb x2, [sp, #128] ; encoding: [0xe2,0x0b,0x88,0x38]
+; CHECK: ldtrh w3, [x4, #16] ; encoding: [0x83,0x08,0x41,0x78]
+; CHECK: ldtrsh w3, [sp, #32] ; encoding: [0xe3,0x0b,0xc2,0x78]
+; CHECK: ldtrsh x5, [x9, #24] ; encoding: [0x25,0x89,0x81,0x78]
+; CHECK: ldtrsw x9, [sp, #-128] ; encoding: [0xe9,0x0b,0x98,0xb8]
+
+ sttr w5, [x4, #20]
+ sttr x4, [x3]
+ sttr x2, [sp, #32]
+ sttrb w4, [x3]
+ sttrb w5, [x4, #20]
+ sttrh w2, [sp, #32]
+
+; CHECK: sttr w5, [x4, #20] ; encoding: [0x85,0x48,0x01,0xb8]
+; CHECK: sttr x4, [x3] ; encoding: [0x64,0x08,0x00,0xf8]
+; CHECK: sttr x2, [sp, #32] ; encoding: [0xe2,0x0b,0x02,0xf8]
+; CHECK: sttrb w4, [x3] ; encoding: [0x64,0x08,0x00,0x38]
+; CHECK: sttrb w5, [x4, #20] ; encoding: [0x85,0x48,0x01,0x38]
+; CHECK: sttrh w2, [sp, #32] ; encoding: [0xe2,0x0b,0x02,0x78]
+
+;-----------------------------------------------------------------------------
+; Pre-indexed loads and stores
+;-----------------------------------------------------------------------------
+
+ ldr x29, [x7, #8]!
+ ldr x30, [x7, #8]!
+ ldr b5, [x0, #1]!
+ ldr h6, [x0, #2]!
+ ldr s7, [x0, #4]!
+ ldr d8, [x0, #8]!
+ ldr q9, [x0, #16]!
+
+ str x30, [x7, #-8]!
+ str x29, [x7, #-8]!
+ str b5, [x0, #-1]!
+ str h6, [x0, #-2]!
+ str s7, [x0, #-4]!
+ str d8, [x0, #-8]!
+ str q9, [x0, #-16]!
+
+; CHECK: ldr x29, [x7, #8]! ; encoding: [0xfd,0x8c,0x40,0xf8]
+; CHECK: ldr x30, [x7, #8]! ; encoding: [0xfe,0x8c,0x40,0xf8]
+; CHECK: ldr b5, [x0, #1]! ; encoding: [0x05,0x1c,0x40,0x3c]
+; CHECK: ldr h6, [x0, #2]! ; encoding: [0x06,0x2c,0x40,0x7c]
+; CHECK: ldr s7, [x0, #4]! ; encoding: [0x07,0x4c,0x40,0xbc]
+; CHECK: ldr d8, [x0, #8]! ; encoding: [0x08,0x8c,0x40,0xfc]
+; CHECK: ldr q9, [x0, #16]! ; encoding: [0x09,0x0c,0xc1,0x3c]
+
+; CHECK: str x30, [x7, #-8]! ; encoding: [0xfe,0x8c,0x1f,0xf8]
+; CHECK: str x29, [x7, #-8]! ; encoding: [0xfd,0x8c,0x1f,0xf8]
+; CHECK: str b5, [x0, #-1]! ; encoding: [0x05,0xfc,0x1f,0x3c]
+; CHECK: str h6, [x0, #-2]! ; encoding: [0x06,0xec,0x1f,0x7c]
+; CHECK: str s7, [x0, #-4]! ; encoding: [0x07,0xcc,0x1f,0xbc]
+; CHECK: str d8, [x0, #-8]! ; encoding: [0x08,0x8c,0x1f,0xfc]
+; CHECK: str q9, [x0, #-16]! ; encoding: [0x09,0x0c,0x9f,0x3c]
+
+;-----------------------------------------------------------------------------
+; post-indexed loads and stores
+;-----------------------------------------------------------------------------
+ str x30, [x7], #-8
+ str x29, [x7], #-8
+ str b5, [x0], #-1
+ str h6, [x0], #-2
+ str s7, [x0], #-4
+ str d8, [x0], #-8
+ str q9, [x0], #-16
+
+ ldr x29, [x7], #8
+ ldr x30, [x7], #8
+ ldr b5, [x0], #1
+ ldr h6, [x0], #2
+ ldr s7, [x0], #4
+ ldr d8, [x0], #8
+ ldr q9, [x0], #16
+
+; CHECK: str x30, [x7], #-8 ; encoding: [0xfe,0x84,0x1f,0xf8]
+; CHECK: str x29, [x7], #-8 ; encoding: [0xfd,0x84,0x1f,0xf8]
+; CHECK: str b5, [x0], #-1 ; encoding: [0x05,0xf4,0x1f,0x3c]
+; CHECK: str h6, [x0], #-2 ; encoding: [0x06,0xe4,0x1f,0x7c]
+; CHECK: str s7, [x0], #-4 ; encoding: [0x07,0xc4,0x1f,0xbc]
+; CHECK: str d8, [x0], #-8 ; encoding: [0x08,0x84,0x1f,0xfc]
+; CHECK: str q9, [x0], #-16 ; encoding: [0x09,0x04,0x9f,0x3c]
+
+; CHECK: ldr x29, [x7], #8 ; encoding: [0xfd,0x84,0x40,0xf8]
+; CHECK: ldr x30, [x7], #8 ; encoding: [0xfe,0x84,0x40,0xf8]
+; CHECK: ldr b5, [x0], #1 ; encoding: [0x05,0x14,0x40,0x3c]
+; CHECK: ldr h6, [x0], #2 ; encoding: [0x06,0x24,0x40,0x7c]
+; CHECK: ldr s7, [x0], #4 ; encoding: [0x07,0x44,0x40,0xbc]
+; CHECK: ldr d8, [x0], #8 ; encoding: [0x08,0x84,0x40,0xfc]
+; CHECK: ldr q9, [x0], #16 ; encoding: [0x09,0x04,0xc1,0x3c]
+
+;-----------------------------------------------------------------------------
+; Load/Store pair (indexed, offset)
+;-----------------------------------------------------------------------------
+
+ ldp w3, w2, [x15, #16]
+ ldp x4, x9, [sp, #-16]
+ ldpsw x2, x3, [x14, #16]
+ ldpsw x2, x3, [sp, #-16]
+ ldp s10, s1, [x2, #64]
+ ldp d10, d1, [x2]
+ ldp q2, q3, [x0, #32]
+
+; CHECK: ldp w3, w2, [x15, #16] ; encoding: [0xe3,0x09,0x42,0x29]
+; CHECK: ldp x4, x9, [sp, #-16] ; encoding: [0xe4,0x27,0x7f,0xa9]
+; CHECK: ldpsw x2, x3, [x14, #16] ; encoding: [0xc2,0x0d,0x42,0x69]
+; CHECK: ldpsw x2, x3, [sp, #-16] ; encoding: [0xe2,0x0f,0x7e,0x69]
+; CHECK: ldp s10, s1, [x2, #64] ; encoding: [0x4a,0x04,0x48,0x2d]
+; CHECK: ldp d10, d1, [x2] ; encoding: [0x4a,0x04,0x40,0x6d]
+; CHECK: ldp q2, q3, [x0, #32] ; encoding: [0x02,0x0c,0x41,0xad]
+
+ stp w3, w2, [x15, #16]
+ stp x4, x9, [sp, #-16]
+ stp s10, s1, [x2, #64]
+ stp d10, d1, [x2]
+ stp q2, q3, [x0, #32]
+
+; CHECK: stp w3, w2, [x15, #16] ; encoding: [0xe3,0x09,0x02,0x29]
+; CHECK: stp x4, x9, [sp, #-16] ; encoding: [0xe4,0x27,0x3f,0xa9]
+; CHECK: stp s10, s1, [x2, #64] ; encoding: [0x4a,0x04,0x08,0x2d]
+; CHECK: stp d10, d1, [x2] ; encoding: [0x4a,0x04,0x00,0x6d]
+; CHECK: stp q2, q3, [x0, #32] ; encoding: [0x02,0x0c,0x01,0xad]
+
+;-----------------------------------------------------------------------------
+; Load/Store pair (pre-indexed)
+;-----------------------------------------------------------------------------
+
+ ldp w3, w2, [x15, #16]!
+ ldp x4, x9, [sp, #-16]!
+ ldpsw x2, x3, [x14, #16]!
+ ldpsw x2, x3, [sp, #-16]!
+ ldp s10, s1, [x2, #64]!
+ ldp d10, d1, [x2, #16]!
+
+; CHECK: ldp w3, w2, [x15, #16]! ; encoding: [0xe3,0x09,0xc2,0x29]
+; CHECK: ldp x4, x9, [sp, #-16]! ; encoding: [0xe4,0x27,0xff,0xa9]
+; CHECK: ldpsw x2, x3, [x14, #16]! ; encoding: [0xc2,0x0d,0xc2,0x69]
+; CHECK: ldpsw x2, x3, [sp, #-16]! ; encoding: [0xe2,0x0f,0xfe,0x69]
+; CHECK: ldp s10, s1, [x2, #64]! ; encoding: [0x4a,0x04,0xc8,0x2d]
+; CHECK: ldp d10, d1, [x2, #16]! ; encoding: [0x4a,0x04,0xc1,0x6d]
+
+ stp w3, w2, [x15, #16]!
+ stp x4, x9, [sp, #-16]!
+ stp s10, s1, [x2, #64]!
+ stp d10, d1, [x2, #16]!
+
+; CHECK: stp w3, w2, [x15, #16]! ; encoding: [0xe3,0x09,0x82,0x29]
+; CHECK: stp x4, x9, [sp, #-16]! ; encoding: [0xe4,0x27,0xbf,0xa9]
+; CHECK: stp s10, s1, [x2, #64]! ; encoding: [0x4a,0x04,0x88,0x2d]
+; CHECK: stp d10, d1, [x2, #16]! ; encoding: [0x4a,0x04,0x81,0x6d]
+
+;-----------------------------------------------------------------------------
+; Load/Store pair (post-indexed)
+;-----------------------------------------------------------------------------
+
+ ldp w3, w2, [x15], #16
+ ldp x4, x9, [sp], #-16
+ ldpsw x2, x3, [x14], #16
+ ldpsw x2, x3, [sp], #-16
+ ldp s10, s1, [x2], #64
+ ldp d10, d1, [x2], #16
+
+; CHECK: ldp w3, w2, [x15], #16 ; encoding: [0xe3,0x09,0xc2,0x28]
+; CHECK: ldp x4, x9, [sp], #-16 ; encoding: [0xe4,0x27,0xff,0xa8]
+; CHECK: ldpsw x2, x3, [x14], #16 ; encoding: [0xc2,0x0d,0xc2,0x68]
+; CHECK: ldpsw x2, x3, [sp], #-16 ; encoding: [0xe2,0x0f,0xfe,0x68]
+; CHECK: ldp s10, s1, [x2], #64 ; encoding: [0x4a,0x04,0xc8,0x2c]
+; CHECK: ldp d10, d1, [x2], #16 ; encoding: [0x4a,0x04,0xc1,0x6c]
+
+ stp w3, w2, [x15], #16
+ stp x4, x9, [sp], #-16
+ stp s10, s1, [x2], #64
+ stp d10, d1, [x2], #16
+
+; CHECK: stp w3, w2, [x15], #16 ; encoding: [0xe3,0x09,0x82,0x28]
+; CHECK: stp x4, x9, [sp], #-16 ; encoding: [0xe4,0x27,0xbf,0xa8]
+; CHECK: stp s10, s1, [x2], #64 ; encoding: [0x4a,0x04,0x88,0x2c]
+; CHECK: stp d10, d1, [x2], #16 ; encoding: [0x4a,0x04,0x81,0x6c]
+
+;-----------------------------------------------------------------------------
+; Load/Store pair (no-allocate)
+;-----------------------------------------------------------------------------
+
+ ldnp w3, w2, [x15, #16]
+ ldnp x4, x9, [sp, #-16]
+ ldnp s10, s1, [x2, #64]
+ ldnp d10, d1, [x2]
+
+; CHECK: ldnp w3, w2, [x15, #16] ; encoding: [0xe3,0x09,0x42,0x28]
+; CHECK: ldnp x4, x9, [sp, #-16] ; encoding: [0xe4,0x27,0x7f,0xa8]
+; CHECK: ldnp s10, s1, [x2, #64] ; encoding: [0x4a,0x04,0x48,0x2c]
+; CHECK: ldnp d10, d1, [x2] ; encoding: [0x4a,0x04,0x40,0x6c]
+
+ stnp w3, w2, [x15, #16]
+ stnp x4, x9, [sp, #-16]
+ stnp s10, s1, [x2, #64]
+ stnp d10, d1, [x2]
+
+; CHECK: stnp w3, w2, [x15, #16] ; encoding: [0xe3,0x09,0x02,0x28]
+; CHECK: stnp x4, x9, [sp, #-16] ; encoding: [0xe4,0x27,0x3f,0xa8]
+; CHECK: stnp s10, s1, [x2, #64] ; encoding: [0x4a,0x04,0x08,0x2c]
+; CHECK: stnp d10, d1, [x2] ; encoding: [0x4a,0x04,0x00,0x6c]
+
+;-----------------------------------------------------------------------------
+; Load/Store register offset
+;-----------------------------------------------------------------------------
+
+ ldr w0, [x0, x0]
+ ldr w0, [x0, x0, lsl #2]
+ ldr x0, [x0, x0]
+ ldr x0, [x0, x0, lsl #3]
+ ldr x0, [x0, x0, sxtx]
+
+; CHECK: ldr w0, [x0, x0] ; encoding: [0x00,0x68,0x60,0xb8]
+; CHECK: ldr w0, [x0, x0, lsl #2] ; encoding: [0x00,0x78,0x60,0xb8]
+; CHECK: ldr x0, [x0, x0] ; encoding: [0x00,0x68,0x60,0xf8]
+; CHECK: ldr x0, [x0, x0, lsl #3] ; encoding: [0x00,0x78,0x60,0xf8]
+; CHECK: ldr x0, [x0, x0, sxtx] ; encoding: [0x00,0xe8,0x60,0xf8]
+
+ ldr b1, [x1, x2]
+ ldr b1, [x1, x2, lsl #0]
+ ldr h1, [x1, x2]
+ ldr h1, [x1, x2, lsl #1]
+ ldr s1, [x1, x2]
+ ldr s1, [x1, x2, lsl #2]
+ ldr d1, [x1, x2]
+ ldr d1, [x1, x2, lsl #3]
+ ldr q1, [x1, x2]
+ ldr q1, [x1, x2, lsl #4]
+
+; CHECK: ldr b1, [x1, x2] ; encoding: [0x21,0x68,0x62,0x3c]
+; CHECK: ldr b1, [x1, x2, lsl #0] ; encoding: [0x21,0x78,0x62,0x3c]
+; CHECK: ldr h1, [x1, x2] ; encoding: [0x21,0x68,0x62,0x7c]
+; CHECK: ldr h1, [x1, x2, lsl #1] ; encoding: [0x21,0x78,0x62,0x7c]
+; CHECK: ldr s1, [x1, x2] ; encoding: [0x21,0x68,0x62,0xbc]
+; CHECK: ldr s1, [x1, x2, lsl #2] ; encoding: [0x21,0x78,0x62,0xbc]
+; CHECK: ldr d1, [x1, x2] ; encoding: [0x21,0x68,0x62,0xfc]
+; CHECK: ldr d1, [x1, x2, lsl #3] ; encoding: [0x21,0x78,0x62,0xfc]
+; CHECK: ldr q1, [x1, x2] ; encoding: [0x21,0x68,0xe2,0x3c]
+; CHECK: ldr q1, [x1, x2, lsl #4] ; encoding: [0x21,0x78,0xe2,0x3c]
+
+ str d1, [sp, x3]
+ str d1, [sp, w3, uxtw #3]
+ str q1, [sp, x3]
+ str q1, [sp, w3, uxtw #4]
+
+; CHECK: str d1, [sp, x3] ; encoding: [0xe1,0x6b,0x23,0xfc]
+; CHECK: str d1, [sp, w3, uxtw #3] ; encoding: [0xe1,0x5b,0x23,0xfc]
+; CHECK: str q1, [sp, x3] ; encoding: [0xe1,0x6b,0xa3,0x3c]
+; CHECK: str q1, [sp, w3, uxtw #4] ; encoding: [0xe1,0x5b,0xa3,0x3c]
+
+;-----------------------------------------------------------------------------
+; Load literal
+;-----------------------------------------------------------------------------
+
+ ldr w5, foo
+ ldr x4, foo
+ ldrsw x9, foo
+ prfm #5, foo
+
+; CHECK: ldr w5, foo ; encoding: [0bAAA00101,A,A,0x18]
+; CHECK: ldr x4, foo ; encoding: [0bAAA00100,A,A,0x58]
+; CHECK: ldrsw x9, foo ; encoding: [0bAAA01001,A,A,0x98]
+; CHECK: prfm pldl3strm, foo ; encoding: [0bAAA00101,A,A,0xd8]
+
+;-----------------------------------------------------------------------------
+; Load/Store exclusive
+;-----------------------------------------------------------------------------
+
+ ldxr w6, [x1]
+ ldxr x6, [x1]
+ ldxrb w6, [x1]
+ ldxrh w6, [x1]
+ ldxp w7, w3, [x9]
+ ldxp x7, x3, [x9]
+
+; CHECK: ldxrb w6, [x1] ; encoding: [0x26,0x7c,0x5f,0x08]
+; CHECK: ldxrh w6, [x1] ; encoding: [0x26,0x7c,0x5f,0x48]
+; CHECK: ldxp w7, w3, [x9] ; encoding: [0x27,0x0d,0x7f,0x88]
+; CHECK: ldxp x7, x3, [x9] ; encoding: [0x27,0x0d,0x7f,0xc8]
+
+ stxr w1, x4, [x3]
+ stxr w1, w4, [x3]
+ stxrb w1, w4, [x3]
+ stxrh w1, w4, [x3]
+ stxp w1, x2, x6, [x1]
+ stxp w1, w2, w6, [x1]
+
+; CHECK: stxr w1, x4, [x3] ; encoding: [0x64,0x7c,0x01,0xc8]
+; CHECK: stxr w1, w4, [x3] ; encoding: [0x64,0x7c,0x01,0x88]
+; CHECK: stxrb w1, w4, [x3] ; encoding: [0x64,0x7c,0x01,0x08]
+; CHECK: stxrh w1, w4, [x3] ; encoding: [0x64,0x7c,0x01,0x48]
+; CHECK: stxp w1, x2, x6, [x1] ; encoding: [0x22,0x18,0x21,0xc8]
+; CHECK: stxp w1, w2, w6, [x1] ; encoding: [0x22,0x18,0x21,0x88]
+
+;-----------------------------------------------------------------------------
+; Load-acquire/Store-release non-exclusive
+;-----------------------------------------------------------------------------
+
+ ldar w4, [sp]
+ ldar x4, [sp, #0]
+ ldarb w4, [sp]
+ ldarh w4, [sp]
+
+; CHECK: ldar w4, [sp] ; encoding: [0xe4,0xff,0xdf,0x88]
+; CHECK: ldar x4, [sp] ; encoding: [0xe4,0xff,0xdf,0xc8]
+; CHECK: ldarb w4, [sp] ; encoding: [0xe4,0xff,0xdf,0x08]
+; CHECK: ldarh w4, [sp] ; encoding: [0xe4,0xff,0xdf,0x48]
+
+ stlr w3, [x6]
+ stlr x3, [x6]
+ stlrb w3, [x6]
+ stlrh w3, [x6]
+
+; CHECK: stlr w3, [x6] ; encoding: [0xc3,0xfc,0x9f,0x88]
+; CHECK: stlr x3, [x6] ; encoding: [0xc3,0xfc,0x9f,0xc8]
+; CHECK: stlrb w3, [x6] ; encoding: [0xc3,0xfc,0x9f,0x08]
+; CHECK: stlrh w3, [x6] ; encoding: [0xc3,0xfc,0x9f,0x48]
+
+;-----------------------------------------------------------------------------
+; Load-acquire/Store-release exclusive
+;-----------------------------------------------------------------------------
+
+ ldaxr w2, [x4]
+ ldaxr x2, [x4]
+ ldaxrb w2, [x4, #0]
+ ldaxrh w2, [x4]
+ ldaxp w2, w6, [x1]
+ ldaxp x2, x6, [x1]
+
+; CHECK: ldaxr w2, [x4] ; encoding: [0x82,0xfc,0x5f,0x88]
+; CHECK: ldaxr x2, [x4] ; encoding: [0x82,0xfc,0x5f,0xc8]
+; CHECK: ldaxrb w2, [x4] ; encoding: [0x82,0xfc,0x5f,0x08]
+; CHECK: ldaxrh w2, [x4] ; encoding: [0x82,0xfc,0x5f,0x48]
+; CHECK: ldaxp w2, w6, [x1] ; encoding: [0x22,0x98,0x7f,0x88]
+; CHECK: ldaxp x2, x6, [x1] ; encoding: [0x22,0x98,0x7f,0xc8]
+
+ stlxr w8, x7, [x1]
+ stlxr w8, w7, [x1]
+ stlxrb w8, w7, [x1]
+ stlxrh w8, w7, [x1]
+ stlxp w1, x2, x6, [x1]
+ stlxp w1, w2, w6, [x1]
+
+; CHECK: stlxr w8, x7, [x1] ; encoding: [0x27,0xfc,0x08,0xc8]
+; CHECK: stlxr w8, w7, [x1] ; encoding: [0x27,0xfc,0x08,0x88]
+; CHECK: stlxrb w8, w7, [x1] ; encoding: [0x27,0xfc,0x08,0x08]
+; CHECK: stlxrh w8, w7, [x1] ; encoding: [0x27,0xfc,0x08,0x48]
+; CHECK: stlxp w1, x2, x6, [x1] ; encoding: [0x22,0x98,0x21,0xc8]
+; CHECK: stlxp w1, w2, w6, [x1] ; encoding: [0x22,0x98,0x21,0x88]
+
+
+;-----------------------------------------------------------------------------
+; LDUR/STUR aliases for negative and unaligned LDR/STR instructions.
+;
+; According to the ARM ISA documentation:
+; "A programmer-friendly assembler should also generate these instructions
+; in response to the standard LDR/STR mnemonics when the immediate offset is
+; unambiguous, i.e. negative or unaligned."
+;-----------------------------------------------------------------------------
+
+ ldr x11, [x29, #-8]
+ ldr x11, [x29, #7]
+ ldr w0, [x0, #2]
+ ldr w0, [x0, #-256]
+ ldr b2, [x1, #-2]
+ ldr h3, [x2, #3]
+ ldr h3, [x3, #-4]
+ ldr s3, [x4, #3]
+ ldr s3, [x5, #-4]
+ ldr d4, [x6, #4]
+ ldr d4, [x7, #-8]
+ ldr q5, [x8, #8]
+ ldr q5, [x9, #-16]
+
+; CHECK: ldur x11, [x29, #-8] ; encoding: [0xab,0x83,0x5f,0xf8]
+; CHECK: ldur x11, [x29, #7] ; encoding: [0xab,0x73,0x40,0xf8]
+; CHECK: ldur w0, [x0, #2] ; encoding: [0x00,0x20,0x40,0xb8]
+; CHECK: ldur w0, [x0, #-256] ; encoding: [0x00,0x00,0x50,0xb8]
+; CHECK: ldur b2, [x1, #-2] ; encoding: [0x22,0xe0,0x5f,0x3c]
+; CHECK: ldur h3, [x2, #3] ; encoding: [0x43,0x30,0x40,0x7c]
+; CHECK: ldur h3, [x3, #-4] ; encoding: [0x63,0xc0,0x5f,0x7c]
+; CHECK: ldur s3, [x4, #3] ; encoding: [0x83,0x30,0x40,0xbc]
+; CHECK: ldur s3, [x5, #-4] ; encoding: [0xa3,0xc0,0x5f,0xbc]
+; CHECK: ldur d4, [x6, #4] ; encoding: [0xc4,0x40,0x40,0xfc]
+; CHECK: ldur d4, [x7, #-8] ; encoding: [0xe4,0x80,0x5f,0xfc]
+; CHECK: ldur q5, [x8, #8] ; encoding: [0x05,0x81,0xc0,0x3c]
+; CHECK: ldur q5, [x9, #-16] ; encoding: [0x25,0x01,0xdf,0x3c]
+
+ str x11, [x29, #-8]
+ str x11, [x29, #7]
+ str w0, [x0, #2]
+ str w0, [x0, #-256]
+ str b2, [x1, #-2]
+ str h3, [x2, #3]
+ str h3, [x3, #-4]
+ str s3, [x4, #3]
+ str s3, [x5, #-4]
+ str d4, [x6, #4]
+ str d4, [x7, #-8]
+ str q5, [x8, #8]
+ str q5, [x9, #-16]
+
+; CHECK: stur x11, [x29, #-8] ; encoding: [0xab,0x83,0x1f,0xf8]
+; CHECK: stur x11, [x29, #7] ; encoding: [0xab,0x73,0x00,0xf8]
+; CHECK: stur w0, [x0, #2] ; encoding: [0x00,0x20,0x00,0xb8]
+; CHECK: stur w0, [x0, #-256] ; encoding: [0x00,0x00,0x10,0xb8]
+; CHECK: stur b2, [x1, #-2] ; encoding: [0x22,0xe0,0x1f,0x3c]
+; CHECK: stur h3, [x2, #3] ; encoding: [0x43,0x30,0x00,0x7c]
+; CHECK: stur h3, [x3, #-4] ; encoding: [0x63,0xc0,0x1f,0x7c]
+; CHECK: stur s3, [x4, #3] ; encoding: [0x83,0x30,0x00,0xbc]
+; CHECK: stur s3, [x5, #-4] ; encoding: [0xa3,0xc0,0x1f,0xbc]
+; CHECK: stur d4, [x6, #4] ; encoding: [0xc4,0x40,0x00,0xfc]
+; CHECK: stur d4, [x7, #-8] ; encoding: [0xe4,0x80,0x1f,0xfc]
+; CHECK: stur q5, [x8, #8] ; encoding: [0x05,0x81,0x80,0x3c]
+; CHECK: stur q5, [x9, #-16] ; encoding: [0x25,0x01,0x9f,0x3c]
+
+ ldrb w3, [x1, #-1]
+ ldrh w4, [x2, #1]
+ ldrh w5, [x3, #-1]
+ ldrsb w6, [x4, #-1]
+ ldrsb x7, [x5, #-1]
+ ldrsh w8, [x6, #1]
+ ldrsh w9, [x7, #-1]
+ ldrsh x1, [x8, #1]
+ ldrsh x2, [x9, #-1]
+ ldrsw x3, [x10, #10]
+ ldrsw x4, [x11, #-1]
+
+; CHECK: ldurb w3, [x1, #-1] ; encoding: [0x23,0xf0,0x5f,0x38]
+; CHECK: ldurh w4, [x2, #1] ; encoding: [0x44,0x10,0x40,0x78]
+; CHECK: ldurh w5, [x3, #-1] ; encoding: [0x65,0xf0,0x5f,0x78]
+; CHECK: ldursb w6, [x4, #-1] ; encoding: [0x86,0xf0,0xdf,0x38]
+; CHECK: ldursb x7, [x5, #-1] ; encoding: [0xa7,0xf0,0x9f,0x38]
+; CHECK: ldursh w8, [x6, #1] ; encoding: [0xc8,0x10,0xc0,0x78]
+; CHECK: ldursh w9, [x7, #-1] ; encoding: [0xe9,0xf0,0xdf,0x78]
+; CHECK: ldursh x1, [x8, #1] ; encoding: [0x01,0x11,0x80,0x78]
+; CHECK: ldursh x2, [x9, #-1] ; encoding: [0x22,0xf1,0x9f,0x78]
+; CHECK: ldursw x3, [x10, #10] ; encoding: [0x43,0xa1,0x80,0xb8]
+; CHECK: ldursw x4, [x11, #-1] ; encoding: [0x64,0xf1,0x9f,0xb8]
+
+ strb w3, [x1, #-1]
+ strh w4, [x2, #1]
+ strh w5, [x3, #-1]
+
+; CHECK: sturb w3, [x1, #-1] ; encoding: [0x23,0xf0,0x1f,0x38]
+; CHECK: sturh w4, [x2, #1] ; encoding: [0x44,0x10,0x00,0x78]
+; CHECK: sturh w5, [x3, #-1] ; encoding: [0x65,0xf0,0x1f,0x78]
diff --git a/test/MC/AArch64/arm64-nv-cond.s b/test/MC/AArch64/arm64-nv-cond.s
new file mode 100644
index 000000000000..1b4d054d2487
--- /dev/null
+++ b/test/MC/AArch64/arm64-nv-cond.s
@@ -0,0 +1,11 @@
+// RUN: llvm-mc < %s -triple arm64 -mattr=neon -show-encoding | FileCheck %s
+
+fcsel d28,d31,d31,nv
+csel x0,x0,x0,nv
+ccmp x0,x0,#0,nv
+b.nv #0
+
+// CHECK: fcsel d28, d31, d31, nv // encoding: [0xfc,0xff,0x7f,0x1e]
+// CHECK: csel x0, x0, x0, nv // encoding: [0x00,0xf0,0x80,0x9a]
+// CHECK: ccmp x0, x0, #0, nv // encoding: [0x00,0xf0,0x40,0xfa]
+// CHECK: b.nv #0 // encoding: [0x0f,0x00,0x00,0x54]
diff --git a/test/MC/AArch64/arm64-optional-hash.s b/test/MC/AArch64/arm64-optional-hash.s
new file mode 100644
index 000000000000..71e2fda217d5
--- /dev/null
+++ b/test/MC/AArch64/arm64-optional-hash.s
@@ -0,0 +1,31 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -show-encoding < %s | FileCheck %s
+.text
+; parseOperand check
+; CHECK: add sp, sp, #32 ; encoding: [0xff,0x83,0x00,0x91]
+ add sp, sp, 32
+
+; Optional shift
+; CHECK: adds x3, x4, #1024, lsl #12 ; encoding: [0x83,0x00,0x50,0xb1]
+adds x3, x4, 1024, lsl 12
+
+; Optional extend
+; CHECK: add sp, x2, x3 ; encoding: [0x5f,0x60,0x23,0x8b]
+add sp, x2, x3, uxtx 0
+
+; FP immediates
+; CHECK: fmov s1, #0.12500000 ; encoding: [0x01,0x10,0x28,0x1e]
+fmov s1, 0.125
+
+; Barrier operand
+; CHECK: dmb osh ; encoding: [0xbf,0x33,0x03,0xd5]
+dmb 3
+
+; Prefetch and memory
+
+; Single register inside []
+; CHECK: ldnp w3, w2, [x15, #16] ; encoding: [0xe3,0x09,0x42,0x28]
+ldnp w3, w2, [x15, 16]
+
+; Memory, two registers inside []
+; CHECK: prfm pstl3strm, [x4, x5, lsl #3] ; encoding: [0x95,0x78,0xa5,0xf8]
+prfm pstl3strm, [x4, x5, lsl 3]
diff --git a/test/MC/AArch64/arm64-separator.s b/test/MC/AArch64/arm64-separator.s
new file mode 100644
index 000000000000..e67deba825d9
--- /dev/null
+++ b/test/MC/AArch64/arm64-separator.s
@@ -0,0 +1,20 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -show-encoding < %s | FileCheck %s
+
+; ARM64 uses a multi-character statement separator, "%%". Check that we lex
+; it properly and recognize the multiple assembly statements on the line.
+
+; To make sure the output assembly correctly handled the instructions,
+; tell it to show encodings. That will result in the two 'mov' instructions
+; being on separate lines in the output. We look for the "; encoding" string
+; to verify that. For this test, we don't care what the encoding is, just that
+; there is one for each 'mov' instruction.
+
+
+_foo:
+; CHECK: foo
+; CHECK: mov x0, x1 ; encoding
+; CHECK: mov x1, x0 ; encoding
+ mov x0, x1 %% mov x1, x0
+ ret lr
+
+
diff --git a/test/MC/AArch64/arm64-simd-ldst.s b/test/MC/AArch64/arm64-simd-ldst.s
new file mode 100644
index 000000000000..30854852c285
--- /dev/null
+++ b/test/MC/AArch64/arm64-simd-ldst.s
@@ -0,0 +1,2404 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -mattr=neon -output-asm-variant=1 -show-encoding < %s | FileCheck %s
+
+_ld1st1_multiple:
+ ld1.8b {v0}, [x1]
+ ld1.8b {v0, v1}, [x1]
+ ld1.8b {v0, v1, v2}, [x1]
+ ld1.8b {v0, v1, v2, v3}, [x1]
+
+ ld1.8b {v3}, [x1]
+ ld1.8b {v3, v4}, [x2]
+ ld1.8b {v4, v5, v6}, [x3]
+ ld1.8b {v7, v8, v9, v10}, [x4]
+
+ ld1.16b {v0}, [x1]
+ ld1.16b {v0, v1}, [x1]
+ ld1.16b {v0, v1, v2}, [x1]
+ ld1.16b {v0, v1, v2, v3}, [x1]
+
+ ld1.4h {v0}, [x1]
+ ld1.4h {v0, v1}, [x1]
+ ld1.4h {v0, v1, v2}, [x1]
+ ld1.4h {v0, v1, v2, v3}, [x1]
+
+ ld1.8h {v0}, [x1]
+ ld1.8h {v0, v1}, [x1]
+ ld1.8h {v0, v1, v2}, [x1]
+ ld1.8h {v0, v1, v2, v3}, [x1]
+
+ ld1.2s {v0}, [x1]
+ ld1.2s {v0, v1}, [x1]
+ ld1.2s {v0, v1, v2}, [x1]
+ ld1.2s {v0, v1, v2, v3}, [x1]
+
+ ld1.4s {v0}, [x1]
+ ld1.4s {v0, v1}, [x1]
+ ld1.4s {v0, v1, v2}, [x1]
+ ld1.4s {v0, v1, v2, v3}, [x1]
+
+ ld1.1d {v0}, [x1]
+ ld1.1d {v0, v1}, [x1]
+ ld1.1d {v0, v1, v2}, [x1]
+ ld1.1d {v0, v1, v2, v3}, [x1]
+
+ ld1.2d {v0}, [x1]
+ ld1.2d {v0, v1}, [x1]
+ ld1.2d {v0, v1, v2}, [x1]
+ ld1.2d {v0, v1, v2, v3}, [x1]
+
+ st1.8b {v0}, [x1]
+ st1.8b {v0, v1}, [x1]
+ st1.8b {v0, v1, v2}, [x1]
+ st1.8b {v0, v1, v2, v3}, [x1]
+
+ st1.16b {v0}, [x1]
+ st1.16b {v0, v1}, [x1]
+ st1.16b {v0, v1, v2}, [x1]
+ st1.16b {v0, v1, v2, v3}, [x1]
+
+ st1.4h {v0}, [x1]
+ st1.4h {v0, v1}, [x1]
+ st1.4h {v0, v1, v2}, [x1]
+ st1.4h {v0, v1, v2, v3}, [x1]
+
+ st1.8h {v0}, [x1]
+ st1.8h {v0, v1}, [x1]
+ st1.8h {v0, v1, v2}, [x1]
+ st1.8h {v0, v1, v2, v3}, [x1]
+
+ st1.2s {v0}, [x1]
+ st1.2s {v0, v1}, [x1]
+ st1.2s {v0, v1, v2}, [x1]
+ st1.2s {v0, v1, v2, v3}, [x1]
+
+ st1.4s {v0}, [x1]
+ st1.4s {v0, v1}, [x1]
+ st1.4s {v0, v1, v2}, [x1]
+ st1.4s {v0, v1, v2, v3}, [x1]
+
+ st1.1d {v0}, [x1]
+ st1.1d {v0, v1}, [x1]
+ st1.1d {v0, v1, v2}, [x1]
+ st1.1d {v0, v1, v2, v3}, [x1]
+
+ st1.2d {v0}, [x1]
+ st1.2d {v0, v1}, [x1]
+ st1.2d {v0, v1, v2}, [x1]
+ st1.2d {v0, v1, v2, v3}, [x1]
+
+ st1.2d {v5}, [x1]
+ st1.2d {v7, v8}, [x10]
+ st1.2d {v11, v12, v13}, [x1]
+ st1.2d {v28, v29, v30, v31}, [x13]
+
+; CHECK: _ld1st1_multiple:
+; CHECK: ld1.8b { v0 }, [x1] ; encoding: [0x20,0x70,0x40,0x0c]
+; CHECK: ld1.8b { v0, v1 }, [x1] ; encoding: [0x20,0xa0,0x40,0x0c]
+; CHECK: ld1.8b { v0, v1, v2 }, [x1] ; encoding: [0x20,0x60,0x40,0x0c]
+; CHECK: ld1.8b { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x20,0x40,0x0c]
+
+; CHECK: ld1.8b { v3 }, [x1] ; encoding: [0x23,0x70,0x40,0x0c]
+; CHECK: ld1.8b { v3, v4 }, [x2] ; encoding: [0x43,0xa0,0x40,0x0c]
+; CHECK: ld1.8b { v4, v5, v6 }, [x3] ; encoding: [0x64,0x60,0x40,0x0c]
+; CHECK: ld1.8b { v7, v8, v9, v10 }, [x4] ; encoding: [0x87,0x20,0x40,0x0c]
+
+; CHECK: ld1.16b { v0 }, [x1] ; encoding: [0x20,0x70,0x40,0x4c]
+; CHECK: ld1.16b { v0, v1 }, [x1] ; encoding: [0x20,0xa0,0x40,0x4c]
+; CHECK: ld1.16b { v0, v1, v2 }, [x1] ; encoding: [0x20,0x60,0x40,0x4c]
+; CHECK: ld1.16b { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x20,0x40,0x4c]
+
+; CHECK: ld1.4h { v0 }, [x1] ; encoding: [0x20,0x74,0x40,0x0c]
+; CHECK: ld1.4h { v0, v1 }, [x1] ; encoding: [0x20,0xa4,0x40,0x0c]
+; CHECK: ld1.4h { v0, v1, v2 }, [x1] ; encoding: [0x20,0x64,0x40,0x0c]
+; CHECK: ld1.4h { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x24,0x40,0x0c]
+
+; CHECK: ld1.8h { v0 }, [x1] ; encoding: [0x20,0x74,0x40,0x4c]
+; CHECK: ld1.8h { v0, v1 }, [x1] ; encoding: [0x20,0xa4,0x40,0x4c]
+; CHECK: ld1.8h { v0, v1, v2 }, [x1] ; encoding: [0x20,0x64,0x40,0x4c]
+; CHECK: ld1.8h { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x24,0x40,0x4c]
+
+; CHECK: ld1.2s { v0 }, [x1] ; encoding: [0x20,0x78,0x40,0x0c]
+; CHECK: ld1.2s { v0, v1 }, [x1] ; encoding: [0x20,0xa8,0x40,0x0c]
+; CHECK: ld1.2s { v0, v1, v2 }, [x1] ; encoding: [0x20,0x68,0x40,0x0c]
+; CHECK: ld1.2s { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x28,0x40,0x0c]
+
+; CHECK: ld1.4s { v0 }, [x1] ; encoding: [0x20,0x78,0x40,0x4c]
+; CHECK: ld1.4s { v0, v1 }, [x1] ; encoding: [0x20,0xa8,0x40,0x4c]
+; CHECK: ld1.4s { v0, v1, v2 }, [x1] ; encoding: [0x20,0x68,0x40,0x4c]
+; CHECK: ld1.4s { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x28,0x40,0x4c]
+
+; CHECK: ld1.1d { v0 }, [x1] ; encoding: [0x20,0x7c,0x40,0x0c]
+; CHECK: ld1.1d { v0, v1 }, [x1] ; encoding: [0x20,0xac,0x40,0x0c]
+; CHECK: ld1.1d { v0, v1, v2 }, [x1] ; encoding: [0x20,0x6c,0x40,0x0c]
+; CHECK: ld1.1d { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x2c,0x40,0x0c]
+
+; CHECK: ld1.2d { v0 }, [x1] ; encoding: [0x20,0x7c,0x40,0x4c]
+; CHECK: ld1.2d { v0, v1 }, [x1] ; encoding: [0x20,0xac,0x40,0x4c]
+; CHECK: ld1.2d { v0, v1, v2 }, [x1] ; encoding: [0x20,0x6c,0x40,0x4c]
+; CHECK: ld1.2d { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x2c,0x40,0x4c]
+
+
+; CHECK: st1.8b { v0 }, [x1] ; encoding: [0x20,0x70,0x00,0x0c]
+; CHECK: st1.8b { v0, v1 }, [x1] ; encoding: [0x20,0xa0,0x00,0x0c]
+; CHECK: st1.8b { v0, v1, v2 }, [x1] ; encoding: [0x20,0x60,0x00,0x0c]
+; CHECK: st1.8b { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x20,0x00,0x0c]
+
+; CHECK: st1.16b { v0 }, [x1] ; encoding: [0x20,0x70,0x00,0x4c]
+; CHECK: st1.16b { v0, v1 }, [x1] ; encoding: [0x20,0xa0,0x00,0x4c]
+; CHECK: st1.16b { v0, v1, v2 }, [x1] ; encoding: [0x20,0x60,0x00,0x4c]
+; CHECK: st1.16b { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x20,0x00,0x4c]
+
+; CHECK: st1.4h { v0 }, [x1] ; encoding: [0x20,0x74,0x00,0x0c]
+; CHECK: st1.4h { v0, v1 }, [x1] ; encoding: [0x20,0xa4,0x00,0x0c]
+; CHECK: st1.4h { v0, v1, v2 }, [x1] ; encoding: [0x20,0x64,0x00,0x0c]
+; CHECK: st1.4h { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x24,0x00,0x0c]
+
+; CHECK: st1.8h { v0 }, [x1] ; encoding: [0x20,0x74,0x00,0x4c]
+; CHECK: st1.8h { v0, v1 }, [x1] ; encoding: [0x20,0xa4,0x00,0x4c]
+; CHECK: st1.8h { v0, v1, v2 }, [x1] ; encoding: [0x20,0x64,0x00,0x4c]
+; CHECK: st1.8h { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x24,0x00,0x4c]
+
+; CHECK: st1.2s { v0 }, [x1] ; encoding: [0x20,0x78,0x00,0x0c]
+; CHECK: st1.2s { v0, v1 }, [x1] ; encoding: [0x20,0xa8,0x00,0x0c]
+; CHECK: st1.2s { v0, v1, v2 }, [x1] ; encoding: [0x20,0x68,0x00,0x0c]
+; CHECK: st1.2s { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x28,0x00,0x0c]
+
+; CHECK: st1.4s { v0 }, [x1] ; encoding: [0x20,0x78,0x00,0x4c]
+; CHECK: st1.4s { v0, v1 }, [x1] ; encoding: [0x20,0xa8,0x00,0x4c]
+; CHECK: st1.4s { v0, v1, v2 }, [x1] ; encoding: [0x20,0x68,0x00,0x4c]
+; CHECK: st1.4s { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x28,0x00,0x4c]
+
+; CHECK: st1.1d { v0 }, [x1] ; encoding: [0x20,0x7c,0x00,0x0c]
+; CHECK: st1.1d { v0, v1 }, [x1] ; encoding: [0x20,0xac,0x00,0x0c]
+; CHECK: st1.1d { v0, v1, v2 }, [x1] ; encoding: [0x20,0x6c,0x00,0x0c]
+; CHECK: st1.1d { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x2c,0x00,0x0c]
+
+; CHECK: st1.2d { v0 }, [x1] ; encoding: [0x20,0x7c,0x00,0x4c]
+; CHECK: st1.2d { v0, v1 }, [x1] ; encoding: [0x20,0xac,0x00,0x4c]
+; CHECK: st1.2d { v0, v1, v2 }, [x1] ; encoding: [0x20,0x6c,0x00,0x4c]
+; CHECK: st1.2d { v0, v1, v2, v3 }, [x1] ; encoding: [0x20,0x2c,0x00,0x4c]
+
+; CHECK: st1.2d { v5 }, [x1] ; encoding: [0x25,0x7c,0x00,0x4c]
+; CHECK: st1.2d { v7, v8 }, [x10] ; encoding: [0x47,0xad,0x00,0x4c]
+; CHECK: st1.2d { v11, v12, v13 }, [x1] ; encoding: [0x2b,0x6c,0x00,0x4c]
+; CHECK: st1.2d { v28, v29, v30, v31 }, [x13] ; encoding: [0xbc,0x2d,0x00,0x4c]
+
+_ld2st2_multiple:
+ ld2.8b {v4, v5}, [x19]
+ ld2.16b {v4, v5}, [x19]
+ ld2.4h {v4, v5}, [x19]
+ ld2.8h {v4, v5}, [x19]
+ ld2.2s {v4, v5}, [x19]
+ ld2.4s {v4, v5}, [x19]
+ ld2.2d {v4, v5}, [x19]
+
+ st2.8b {v4, v5}, [x19]
+ st2.16b {v4, v5}, [x19]
+ st2.4h {v4, v5}, [x19]
+ st2.8h {v4, v5}, [x19]
+ st2.2s {v4, v5}, [x19]
+ st2.4s {v4, v5}, [x19]
+ st2.2d {v4, v5}, [x19]
+
+
+; CHECK: _ld2st2_multiple
+; CHECK: ld2.8b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x40,0x0c]
+; CHECK: ld2.16b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x40,0x4c]
+; CHECK: ld2.4h { v4, v5 }, [x19] ; encoding: [0x64,0x86,0x40,0x0c]
+; CHECK: ld2.8h { v4, v5 }, [x19] ; encoding: [0x64,0x86,0x40,0x4c]
+; CHECK: ld2.2s { v4, v5 }, [x19] ; encoding: [0x64,0x8a,0x40,0x0c]
+; CHECK: ld2.4s { v4, v5 }, [x19] ; encoding: [0x64,0x8a,0x40,0x4c]
+; CHECK: ld2.2d { v4, v5 }, [x19] ; encoding: [0x64,0x8e,0x40,0x4c]
+
+; CHECK: st2.8b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x00,0x0c]
+; CHECK: st2.16b { v4, v5 }, [x19] ; encoding: [0x64,0x82,0x00,0x4c]
+; CHECK: st2.4h { v4, v5 }, [x19] ; encoding: [0x64,0x86,0x00,0x0c]
+; CHECK: st2.8h { v4, v5 }, [x19] ; encoding: [0x64,0x86,0x00,0x4c]
+; CHECK: st2.2s { v4, v5 }, [x19] ; encoding: [0x64,0x8a,0x00,0x0c]
+; CHECK: st2.4s { v4, v5 }, [x19] ; encoding: [0x64,0x8a,0x00,0x4c]
+; CHECK: st2.2d { v4, v5 }, [x19] ; encoding: [0x64,0x8e,0x00,0x4c]
+
+
+ld3st3_multiple:
+ ld3.8b {v4, v5, v6}, [x19]
+ ld3.16b {v4, v5, v6}, [x19]
+ ld3.4h {v4, v5, v6}, [x19]
+ ld3.8h {v4, v5, v6}, [x19]
+ ld3.2s {v4, v5, v6}, [x19]
+ ld3.4s {v4, v5, v6}, [x19]
+ ld3.2d {v4, v5, v6}, [x19]
+
+ ld3.8b {v9, v10, v11}, [x9]
+ ld3.16b {v14, v15, v16}, [x19]
+ ld3.4h {v24, v25, v26}, [x29]
+ ld3.8h {v30, v31, v0}, [x9]
+ ld3.2s {v2, v3, v4}, [x19]
+ ld3.4s {v4, v5, v6}, [x29]
+ ld3.2d {v7, v8, v9}, [x9]
+
+ st3.8b {v4, v5, v6}, [x19]
+ st3.16b {v4, v5, v6}, [x19]
+ st3.4h {v4, v5, v6}, [x19]
+ st3.8h {v4, v5, v6}, [x19]
+ st3.2s {v4, v5, v6}, [x19]
+ st3.4s {v4, v5, v6}, [x19]
+ st3.2d {v4, v5, v6}, [x19]
+
+ st3.8b {v10, v11, v12}, [x9]
+ st3.16b {v14, v15, v16}, [x19]
+ st3.4h {v24, v25, v26}, [x29]
+ st3.8h {v30, v31, v0}, [x9]
+ st3.2s {v2, v3, v4}, [x19]
+ st3.4s {v7, v8, v9}, [x29]
+ st3.2d {v4, v5, v6}, [x9]
+
+; CHECK: ld3st3_multiple:
+; CHECK: ld3.8b { v4, v5, v6 }, [x19] ; encoding: [0x64,0x42,0x40,0x0c]
+; CHECK: ld3.16b { v4, v5, v6 }, [x19] ; encoding: [0x64,0x42,0x40,0x4c]
+; CHECK: ld3.4h { v4, v5, v6 }, [x19] ; encoding: [0x64,0x46,0x40,0x0c]
+; CHECK: ld3.8h { v4, v5, v6 }, [x19] ; encoding: [0x64,0x46,0x40,0x4c]
+; CHECK: ld3.2s { v4, v5, v6 }, [x19] ; encoding: [0x64,0x4a,0x40,0x0c]
+; CHECK: ld3.4s { v4, v5, v6 }, [x19] ; encoding: [0x64,0x4a,0x40,0x4c]
+; CHECK: ld3.2d { v4, v5, v6 }, [x19] ; encoding: [0x64,0x4e,0x40,0x4c]
+
+; CHECK: ld3.8b { v9, v10, v11 }, [x9] ; encoding: [0x29,0x41,0x40,0x0c]
+; CHECK: ld3.16b { v14, v15, v16 }, [x19] ; encoding: [0x6e,0x42,0x40,0x4c]
+; CHECK: ld3.4h { v24, v25, v26 }, [x29] ; encoding: [0xb8,0x47,0x40,0x0c]
+; CHECK: ld3.8h { v30, v31, v0 }, [x9] ; encoding: [0x3e,0x45,0x40,0x4c]
+; CHECK: ld3.2s { v2, v3, v4 }, [x19] ; encoding: [0x62,0x4a,0x40,0x0c]
+; CHECK: ld3.4s { v4, v5, v6 }, [x29] ; encoding: [0xa4,0x4b,0x40,0x4c]
+; CHECK: ld3.2d { v7, v8, v9 }, [x9] ; encoding: [0x27,0x4d,0x40,0x4c]
+
+; CHECK: st3.8b { v4, v5, v6 }, [x19] ; encoding: [0x64,0x42,0x00,0x0c]
+; CHECK: st3.16b { v4, v5, v6 }, [x19] ; encoding: [0x64,0x42,0x00,0x4c]
+; CHECK: st3.4h { v4, v5, v6 }, [x19] ; encoding: [0x64,0x46,0x00,0x0c]
+; CHECK: st3.8h { v4, v5, v6 }, [x19] ; encoding: [0x64,0x46,0x00,0x4c]
+; CHECK: st3.2s { v4, v5, v6 }, [x19] ; encoding: [0x64,0x4a,0x00,0x0c]
+; CHECK: st3.4s { v4, v5, v6 }, [x19] ; encoding: [0x64,0x4a,0x00,0x4c]
+; CHECK: st3.2d { v4, v5, v6 }, [x19] ; encoding: [0x64,0x4e,0x00,0x4c]
+
+; CHECK: st3.8b { v10, v11, v12 }, [x9] ; encoding: [0x2a,0x41,0x00,0x0c]
+; CHECK: st3.16b { v14, v15, v16 }, [x19] ; encoding: [0x6e,0x42,0x00,0x4c]
+; CHECK: st3.4h { v24, v25, v26 }, [x29] ; encoding: [0xb8,0x47,0x00,0x0c]
+; CHECK: st3.8h { v30, v31, v0 }, [x9] ; encoding: [0x3e,0x45,0x00,0x4c]
+; CHECK: st3.2s { v2, v3, v4 }, [x19] ; encoding: [0x62,0x4a,0x00,0x0c]
+; CHECK: st3.4s { v7, v8, v9 }, [x29] ; encoding: [0xa7,0x4b,0x00,0x4c]
+; CHECK: st3.2d { v4, v5, v6 }, [x9] ; encoding: [0x24,0x4d,0x00,0x4c]
+
+ld4st4_multiple:
+ ld4.8b {v4, v5, v6, v7}, [x19]
+ ld4.16b {v4, v5, v6, v7}, [x19]
+ ld4.4h {v4, v5, v6, v7}, [x19]
+ ld4.8h {v4, v5, v6, v7}, [x19]
+ ld4.2s {v4, v5, v6, v7}, [x19]
+ ld4.4s {v4, v5, v6, v7}, [x19]
+ ld4.2d {v4, v5, v6, v7}, [x19]
+
+ st4.8b {v4, v5, v6, v7}, [x19]
+ st4.16b {v4, v5, v6, v7}, [x19]
+ st4.4h {v4, v5, v6, v7}, [x19]
+ st4.8h {v4, v5, v6, v7}, [x19]
+ st4.2s {v4, v5, v6, v7}, [x19]
+ st4.4s {v4, v5, v6, v7}, [x19]
+ st4.2d {v4, v5, v6, v7}, [x19]
+
+; CHECK: ld4st4_multiple:
+; CHECK: ld4.8b { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x02,0x40,0x0c]
+; CHECK: ld4.16b { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x02,0x40,0x4c]
+; CHECK: ld4.4h { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x06,0x40,0x0c]
+; CHECK: ld4.8h { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x06,0x40,0x4c]
+; CHECK: ld4.2s { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x0a,0x40,0x0c]
+; CHECK: ld4.4s { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x0a,0x40,0x4c]
+; CHECK: ld4.2d { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x0e,0x40,0x4c]
+
+; CHECK: st4.8b { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x02,0x00,0x0c]
+; CHECK: st4.16b { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x02,0x00,0x4c]
+; CHECK: st4.4h { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x06,0x00,0x0c]
+; CHECK: st4.8h { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x06,0x00,0x4c]
+; CHECK: st4.2s { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x0a,0x00,0x0c]
+; CHECK: st4.4s { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x0a,0x00,0x4c]
+; CHECK: st4.2d { v4, v5, v6, v7 }, [x19] ; encoding: [0x64,0x0e,0x00,0x4c]
+
+;-----------------------------------------------------------------------------
+; Post-increment versions.
+;-----------------------------------------------------------------------------
+
+_ld1st1_multiple_post:
+ ld1.8b {v0}, [x1], x15
+ ld1.8b {v0, v1}, [x1], x15
+ ld1.8b {v0, v1, v2}, [x1], x15
+ ld1.8b {v0, v1, v2, v3}, [x1], x15
+
+ ld1.16b {v0}, [x1], x15
+ ld1.16b {v0, v1}, [x1], x15
+ ld1.16b {v0, v1, v2}, [x1], x15
+ ld1.16b {v0, v1, v2, v3}, [x1], x15
+
+ ld1.4h {v0}, [x1], x15
+ ld1.4h {v0, v1}, [x1], x15
+ ld1.4h {v0, v1, v2}, [x1], x15
+ ld1.4h {v0, v1, v2, v3}, [x1], x15
+
+ ld1.8h {v0}, [x1], x15
+ ld1.8h {v0, v1}, [x1], x15
+ ld1.8h {v0, v1, v2}, [x1], x15
+ ld1.8h {v0, v1, v2, v3}, [x1], x15
+
+ ld1.2s {v0}, [x1], x15
+ ld1.2s {v0, v1}, [x1], x15
+ ld1.2s {v0, v1, v2}, [x1], x15
+ ld1.2s {v0, v1, v2, v3}, [x1], x15
+
+ ld1.4s {v0}, [x1], x15
+ ld1.4s {v0, v1}, [x1], x15
+ ld1.4s {v0, v1, v2}, [x1], x15
+ ld1.4s {v0, v1, v2, v3}, [x1], x15
+
+ ld1.1d {v0}, [x1], x15
+ ld1.1d {v0, v1}, [x1], x15
+ ld1.1d {v0, v1, v2}, [x1], x15
+ ld1.1d {v0, v1, v2, v3}, [x1], x15
+
+ ld1.2d {v0}, [x1], x15
+ ld1.2d {v0, v1}, [x1], x15
+ ld1.2d {v0, v1, v2}, [x1], x15
+ ld1.2d {v0, v1, v2, v3}, [x1], x15
+
+ st1.8b {v0}, [x1], x15
+ st1.8b {v0, v1}, [x1], x15
+ st1.8b {v0, v1, v2}, [x1], x15
+ st1.8b {v0, v1, v2, v3}, [x1], x15
+
+ st1.16b {v0}, [x1], x15
+ st1.16b {v0, v1}, [x1], x15
+ st1.16b {v0, v1, v2}, [x1], x15
+ st1.16b {v0, v1, v2, v3}, [x1], x15
+
+ st1.4h {v0}, [x1], x15
+ st1.4h {v0, v1}, [x1], x15
+ st1.4h {v0, v1, v2}, [x1], x15
+ st1.4h {v0, v1, v2, v3}, [x1], x15
+
+ st1.8h {v0}, [x1], x15
+ st1.8h {v0, v1}, [x1], x15
+ st1.8h {v0, v1, v2}, [x1], x15
+ st1.8h {v0, v1, v2, v3}, [x1], x15
+
+ st1.2s {v0}, [x1], x15
+ st1.2s {v0, v1}, [x1], x15
+ st1.2s {v0, v1, v2}, [x1], x15
+ st1.2s {v0, v1, v2, v3}, [x1], x15
+
+ st1.4s {v0}, [x1], x15
+ st1.4s {v0, v1}, [x1], x15
+ st1.4s {v0, v1, v2}, [x1], x15
+ st1.4s {v0, v1, v2, v3}, [x1], x15
+
+ st1.1d {v0}, [x1], x15
+ st1.1d {v0, v1}, [x1], x15
+ st1.1d {v0, v1, v2}, [x1], x15
+ st1.1d {v0, v1, v2, v3}, [x1], x15
+
+ st1.2d {v0}, [x1], x15
+ st1.2d {v0, v1}, [x1], x15
+ st1.2d {v0, v1, v2}, [x1], x15
+ st1.2d {v0, v1, v2, v3}, [x1], x15
+
+ ld1.8b {v0}, [x1], #8
+ ld1.8b {v0, v1}, [x1], #16
+ ld1.8b {v0, v1, v2}, [x1], #24
+ ld1.8b {v0, v1, v2, v3}, [x1], #32
+
+ ld1.16b {v0}, [x1], #16
+ ld1.16b {v0, v1}, [x1], #32
+ ld1.16b {v0, v1, v2}, [x1], #48
+ ld1.16b {v0, v1, v2, v3}, [x1], #64
+
+ ld1.4h {v0}, [x1], #8
+ ld1.4h {v0, v1}, [x1], #16
+ ld1.4h {v0, v1, v2}, [x1], #24
+ ld1.4h {v0, v1, v2, v3}, [x1], #32
+
+ ld1.8h {v0}, [x1], #16
+ ld1.8h {v0, v1}, [x1], #32
+ ld1.8h {v0, v1, v2}, [x1], #48
+ ld1.8h {v0, v1, v2, v3}, [x1], #64
+
+ ld1.2s {v0}, [x1], #8
+ ld1.2s {v0, v1}, [x1], #16
+ ld1.2s {v0, v1, v2}, [x1], #24
+ ld1.2s {v0, v1, v2, v3}, [x1], #32
+
+ ld1.4s {v0}, [x1], #16
+ ld1.4s {v0, v1}, [x1], #32
+ ld1.4s {v0, v1, v2}, [x1], #48
+ ld1.4s {v0, v1, v2, v3}, [x1], #64
+
+ ld1.1d {v0}, [x1], #8
+ ld1.1d {v0, v1}, [x1], #16
+ ld1.1d {v0, v1, v2}, [x1], #24
+ ld1.1d {v0, v1, v2, v3}, [x1], #32
+
+ ld1.2d {v0}, [x1], #16
+ ld1.2d {v0, v1}, [x1], #32
+ ld1.2d {v0, v1, v2}, [x1], #48
+ ld1.2d {v0, v1, v2, v3}, [x1], #64
+
+ st1.8b {v0}, [x1], #8
+ st1.8b {v0, v1}, [x1], #16
+ st1.8b {v0, v1, v2}, [x1], #24
+ st1.8b {v0, v1, v2, v3}, [x1], #32
+
+ st1.16b {v0}, [x1], #16
+ st1.16b {v0, v1}, [x1], #32
+ st1.16b {v0, v1, v2}, [x1], #48
+ st1.16b {v0, v1, v2, v3}, [x1], #64
+
+ st1.4h {v0}, [x1], #8
+ st1.4h {v0, v1}, [x1], #16
+ st1.4h {v0, v1, v2}, [x1], #24
+ st1.4h {v0, v1, v2, v3}, [x1], #32
+
+ st1.8h {v0}, [x1], #16
+ st1.8h {v0, v1}, [x1], #32
+ st1.8h {v0, v1, v2}, [x1], #48
+ st1.8h {v0, v1, v2, v3}, [x1], #64
+
+ st1.2s {v0}, [x1], #8
+ st1.2s {v0, v1}, [x1], #16
+ st1.2s {v0, v1, v2}, [x1], #24
+ st1.2s {v0, v1, v2, v3}, [x1], #32
+
+ st1.4s {v0}, [x1], #16
+ st1.4s {v0, v1}, [x1], #32
+ st1.4s {v0, v1, v2}, [x1], #48
+ st1.4s {v0, v1, v2, v3}, [x1], #64
+
+ st1.1d {v0}, [x1], #8
+ st1.1d {v0, v1}, [x1], #16
+ st1.1d {v0, v1, v2}, [x1], #24
+ st1.1d {v0, v1, v2, v3}, [x1], #32
+
+ st1.2d {v0}, [x1], #16
+ st1.2d {v0, v1}, [x1], #32
+ st1.2d {v0, v1, v2}, [x1], #48
+ st1.2d {v0, v1, v2, v3}, [x1], #64
+
+; CHECK: ld1st1_multiple_post:
+; CHECK: ld1.8b { v0 }, [x1], x15 ; encoding: [0x20,0x70,0xcf,0x0c]
+; CHECK: ld1.8b { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa0,0xcf,0x0c]
+; CHECK: ld1.8b { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x60,0xcf,0x0c]
+; CHECK: ld1.8b { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x20,0xcf,0x0c]
+
+; CHECK: ld1.16b { v0 }, [x1], x15 ; encoding: [0x20,0x70,0xcf,0x4c]
+; CHECK: ld1.16b { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa0,0xcf,0x4c]
+; CHECK: ld1.16b { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x60,0xcf,0x4c]
+; CHECK: ld1.16b { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x20,0xcf,0x4c]
+
+; CHECK: ld1.4h { v0 }, [x1], x15 ; encoding: [0x20,0x74,0xcf,0x0c]
+; CHECK: ld1.4h { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa4,0xcf,0x0c]
+; CHECK: ld1.4h { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x64,0xcf,0x0c]
+; CHECK: ld1.4h { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x24,0xcf,0x0c]
+
+; CHECK: ld1.8h { v0 }, [x1], x15 ; encoding: [0x20,0x74,0xcf,0x4c]
+; CHECK: ld1.8h { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa4,0xcf,0x4c]
+; CHECK: ld1.8h { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x64,0xcf,0x4c]
+; CHECK: ld1.8h { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x24,0xcf,0x4c]
+
+; CHECK: ld1.2s { v0 }, [x1], x15 ; encoding: [0x20,0x78,0xcf,0x0c]
+; CHECK: ld1.2s { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa8,0xcf,0x0c]
+; CHECK: ld1.2s { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x68,0xcf,0x0c]
+; CHECK: ld1.2s { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x28,0xcf,0x0c]
+
+; CHECK: ld1.4s { v0 }, [x1], x15 ; encoding: [0x20,0x78,0xcf,0x4c]
+; CHECK: ld1.4s { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa8,0xcf,0x4c]
+; CHECK: ld1.4s { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x68,0xcf,0x4c]
+; CHECK: ld1.4s { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x28,0xcf,0x4c]
+
+; CHECK: ld1.1d { v0 }, [x1], x15 ; encoding: [0x20,0x7c,0xcf,0x0c]
+; CHECK: ld1.1d { v0, v1 }, [x1], x15 ; encoding: [0x20,0xac,0xcf,0x0c]
+; CHECK: ld1.1d { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x6c,0xcf,0x0c]
+; CHECK: ld1.1d { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x2c,0xcf,0x0c]
+
+; CHECK: ld1.2d { v0 }, [x1], x15 ; encoding: [0x20,0x7c,0xcf,0x4c]
+; CHECK: ld1.2d { v0, v1 }, [x1], x15 ; encoding: [0x20,0xac,0xcf,0x4c]
+; CHECK: ld1.2d { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x6c,0xcf,0x4c]
+; CHECK: ld1.2d { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x2c,0xcf,0x4c]
+
+; CHECK: st1.8b { v0 }, [x1], x15 ; encoding: [0x20,0x70,0x8f,0x0c]
+; CHECK: st1.8b { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa0,0x8f,0x0c]
+; CHECK: st1.8b { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x60,0x8f,0x0c]
+; CHECK: st1.8b { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x20,0x8f,0x0c]
+
+; CHECK: st1.16b { v0 }, [x1], x15 ; encoding: [0x20,0x70,0x8f,0x4c]
+; CHECK: st1.16b { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa0,0x8f,0x4c]
+; CHECK: st1.16b { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x60,0x8f,0x4c]
+; CHECK: st1.16b { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x20,0x8f,0x4c]
+
+; CHECK: st1.4h { v0 }, [x1], x15 ; encoding: [0x20,0x74,0x8f,0x0c]
+; CHECK: st1.4h { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa4,0x8f,0x0c]
+; CHECK: st1.4h { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x64,0x8f,0x0c]
+; CHECK: st1.4h { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x24,0x8f,0x0c]
+
+; CHECK: st1.8h { v0 }, [x1], x15 ; encoding: [0x20,0x74,0x8f,0x4c]
+; CHECK: st1.8h { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa4,0x8f,0x4c]
+; CHECK: st1.8h { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x64,0x8f,0x4c]
+; CHECK: st1.8h { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x24,0x8f,0x4c]
+
+; CHECK: st1.2s { v0 }, [x1], x15 ; encoding: [0x20,0x78,0x8f,0x0c]
+; CHECK: st1.2s { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa8,0x8f,0x0c]
+; CHECK: st1.2s { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x68,0x8f,0x0c]
+; CHECK: st1.2s { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x28,0x8f,0x0c]
+
+; CHECK: st1.4s { v0 }, [x1], x15 ; encoding: [0x20,0x78,0x8f,0x4c]
+; CHECK: st1.4s { v0, v1 }, [x1], x15 ; encoding: [0x20,0xa8,0x8f,0x4c]
+; CHECK: st1.4s { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x68,0x8f,0x4c]
+; CHECK: st1.4s { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x28,0x8f,0x4c]
+
+; CHECK: st1.1d { v0 }, [x1], x15 ; encoding: [0x20,0x7c,0x8f,0x0c]
+; CHECK: st1.1d { v0, v1 }, [x1], x15 ; encoding: [0x20,0xac,0x8f,0x0c]
+; CHECK: st1.1d { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x6c,0x8f,0x0c]
+; CHECK: st1.1d { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x2c,0x8f,0x0c]
+
+; CHECK: st1.2d { v0 }, [x1], x15 ; encoding: [0x20,0x7c,0x8f,0x4c]
+; CHECK: st1.2d { v0, v1 }, [x1], x15 ; encoding: [0x20,0xac,0x8f,0x4c]
+; CHECK: st1.2d { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x6c,0x8f,0x4c]
+; CHECK: st1.2d { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x2c,0x8f,0x4c]
+
+; CHECK: ld1.8b { v0 }, [x1], #8 ; encoding: [0x20,0x70,0xdf,0x0c]
+; CHECK: ld1.8b { v0, v1 }, [x1], #16 ; encoding: [0x20,0xa0,0xdf,0x0c]
+; CHECK: ld1.8b { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x60,0xdf,0x0c]
+; CHECK: ld1.8b { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x20,0xdf,0x0c]
+
+; CHECK: ld1.16b { v0 }, [x1], #16 ; encoding: [0x20,0x70,0xdf,0x4c]
+; CHECK: ld1.16b { v0, v1 }, [x1], #32 ; encoding: [0x20,0xa0,0xdf,0x4c]
+; CHECK: ld1.16b { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x60,0xdf,0x4c]
+; CHECK: ld1.16b { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x20,0xdf,0x4c]
+
+; CHECK: ld1.4h { v0 }, [x1], #8 ; encoding: [0x20,0x74,0xdf,0x0c]
+; CHECK: ld1.4h { v0, v1 }, [x1], #16 ; encoding: [0x20,0xa4,0xdf,0x0c]
+; CHECK: ld1.4h { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x64,0xdf,0x0c]
+; CHECK: ld1.4h { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x24,0xdf,0x0c]
+
+; CHECK: ld1.8h { v0 }, [x1], #16 ; encoding: [0x20,0x74,0xdf,0x4c]
+; CHECK: ld1.8h { v0, v1 }, [x1], #32 ; encoding: [0x20,0xa4,0xdf,0x4c]
+; CHECK: ld1.8h { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x64,0xdf,0x4c]
+; CHECK: ld1.8h { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x24,0xdf,0x4c]
+
+; CHECK: ld1.2s { v0 }, [x1], #8 ; encoding: [0x20,0x78,0xdf,0x0c]
+; CHECK: ld1.2s { v0, v1 }, [x1], #16 ; encoding: [0x20,0xa8,0xdf,0x0c]
+; CHECK: ld1.2s { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x68,0xdf,0x0c]
+; CHECK: ld1.2s { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x28,0xdf,0x0c]
+
+; CHECK: ld1.4s { v0 }, [x1], #16 ; encoding: [0x20,0x78,0xdf,0x4c]
+; CHECK: ld1.4s { v0, v1 }, [x1], #32 ; encoding: [0x20,0xa8,0xdf,0x4c]
+; CHECK: ld1.4s { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x68,0xdf,0x4c]
+; CHECK: ld1.4s { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x28,0xdf,0x4c]
+
+; CHECK: ld1.1d { v0 }, [x1], #8 ; encoding: [0x20,0x7c,0xdf,0x0c]
+; CHECK: ld1.1d { v0, v1 }, [x1], #16 ; encoding: [0x20,0xac,0xdf,0x0c]
+; CHECK: ld1.1d { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x6c,0xdf,0x0c]
+; CHECK: ld1.1d { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x2c,0xdf,0x0c]
+
+; CHECK: ld1.2d { v0 }, [x1], #16 ; encoding: [0x20,0x7c,0xdf,0x4c]
+; CHECK: ld1.2d { v0, v1 }, [x1], #32 ; encoding: [0x20,0xac,0xdf,0x4c]
+; CHECK: ld1.2d { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x6c,0xdf,0x4c]
+; CHECK: ld1.2d { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x2c,0xdf,0x4c]
+
+; CHECK: st1.8b { v0 }, [x1], #8 ; encoding: [0x20,0x70,0x9f,0x0c]
+; CHECK: st1.8b { v0, v1 }, [x1], #16 ; encoding: [0x20,0xa0,0x9f,0x0c]
+; CHECK: st1.8b { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x60,0x9f,0x0c]
+; CHECK: st1.8b { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x20,0x9f,0x0c]
+
+; CHECK: st1.16b { v0 }, [x1], #16 ; encoding: [0x20,0x70,0x9f,0x4c]
+; CHECK: st1.16b { v0, v1 }, [x1], #32 ; encoding: [0x20,0xa0,0x9f,0x4c]
+; CHECK: st1.16b { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x60,0x9f,0x4c]
+; CHECK: st1.16b { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x20,0x9f,0x4c]
+
+; CHECK: st1.4h { v0 }, [x1], #8 ; encoding: [0x20,0x74,0x9f,0x0c]
+; CHECK: st1.4h { v0, v1 }, [x1], #16 ; encoding: [0x20,0xa4,0x9f,0x0c]
+; CHECK: st1.4h { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x64,0x9f,0x0c]
+; CHECK: st1.4h { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x24,0x9f,0x0c]
+
+; CHECK: st1.8h { v0 }, [x1], #16 ; encoding: [0x20,0x74,0x9f,0x4c]
+; CHECK: st1.8h { v0, v1 }, [x1], #32 ; encoding: [0x20,0xa4,0x9f,0x4c]
+; CHECK: st1.8h { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x64,0x9f,0x4c]
+; CHECK: st1.8h { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x24,0x9f,0x4c]
+
+; CHECK: st1.2s { v0 }, [x1], #8 ; encoding: [0x20,0x78,0x9f,0x0c]
+; CHECK: st1.2s { v0, v1 }, [x1], #16 ; encoding: [0x20,0xa8,0x9f,0x0c]
+; CHECK: st1.2s { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x68,0x9f,0x0c]
+; CHECK: st1.2s { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x28,0x9f,0x0c]
+
+; CHECK: st1.4s { v0 }, [x1], #16 ; encoding: [0x20,0x78,0x9f,0x4c]
+; CHECK: st1.4s { v0, v1 }, [x1], #32 ; encoding: [0x20,0xa8,0x9f,0x4c]
+; CHECK: st1.4s { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x68,0x9f,0x4c]
+; CHECK: st1.4s { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x28,0x9f,0x4c]
+
+; CHECK: st1.1d { v0 }, [x1], #8 ; encoding: [0x20,0x7c,0x9f,0x0c]
+; CHECK: st1.1d { v0, v1 }, [x1], #16 ; encoding: [0x20,0xac,0x9f,0x0c]
+; CHECK: st1.1d { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x6c,0x9f,0x0c]
+; CHECK: st1.1d { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x2c,0x9f,0x0c]
+
+; CHECK: st1.2d { v0 }, [x1], #16 ; encoding: [0x20,0x7c,0x9f,0x4c]
+; CHECK: st1.2d { v0, v1 }, [x1], #32 ; encoding: [0x20,0xac,0x9f,0x4c]
+; CHECK: st1.2d { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x6c,0x9f,0x4c]
+; CHECK: st1.2d { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x2c,0x9f,0x4c]
+
+
+_ld2st2_multiple_post:
+ ld2.8b {v0, v1}, [x1], x15
+ ld2.16b {v0, v1}, [x1], x15
+ ld2.4h {v0, v1}, [x1], x15
+ ld2.8h {v0, v1}, [x1], x15
+ ld2.2s {v0, v1}, [x1], x15
+ ld2.4s {v0, v1}, [x1], x15
+ ld2.2d {v0, v1}, [x1], x15
+
+ st2.8b {v0, v1}, [x1], x15
+ st2.16b {v0, v1}, [x1], x15
+ st2.4h {v0, v1}, [x1], x15
+ st2.8h {v0, v1}, [x1], x15
+ st2.2s {v0, v1}, [x1], x15
+ st2.4s {v0, v1}, [x1], x15
+ st2.2d {v0, v1}, [x1], x15
+
+ ld2.8b {v0, v1}, [x1], #16
+ ld2.16b {v0, v1}, [x1], #32
+ ld2.4h {v0, v1}, [x1], #16
+ ld2.8h {v0, v1}, [x1], #32
+ ld2.2s {v0, v1}, [x1], #16
+ ld2.4s {v0, v1}, [x1], #32
+ ld2.2d {v0, v1}, [x1], #32
+
+ st2.8b {v0, v1}, [x1], #16
+ st2.16b {v0, v1}, [x1], #32
+ st2.4h {v0, v1}, [x1], #16
+ st2.8h {v0, v1}, [x1], #32
+ st2.2s {v0, v1}, [x1], #16
+ st2.4s {v0, v1}, [x1], #32
+ st2.2d {v0, v1}, [x1], #32
+
+
+; CHECK: ld2st2_multiple_post:
+; CHECK: ld2.8b { v0, v1 }, [x1], x15 ; encoding: [0x20,0x80,0xcf,0x0c]
+; CHECK: ld2.16b { v0, v1 }, [x1], x15 ; encoding: [0x20,0x80,0xcf,0x4c]
+; CHECK: ld2.4h { v0, v1 }, [x1], x15 ; encoding: [0x20,0x84,0xcf,0x0c]
+; CHECK: ld2.8h { v0, v1 }, [x1], x15 ; encoding: [0x20,0x84,0xcf,0x4c]
+; CHECK: ld2.2s { v0, v1 }, [x1], x15 ; encoding: [0x20,0x88,0xcf,0x0c]
+; CHECK: ld2.4s { v0, v1 }, [x1], x15 ; encoding: [0x20,0x88,0xcf,0x4c]
+; CHECK: ld2.2d { v0, v1 }, [x1], x15 ; encoding: [0x20,0x8c,0xcf,0x4c]
+
+; CHECK: st2.8b { v0, v1 }, [x1], x15 ; encoding: [0x20,0x80,0x8f,0x0c]
+; CHECK: st2.16b { v0, v1 }, [x1], x15 ; encoding: [0x20,0x80,0x8f,0x4c]
+; CHECK: st2.4h { v0, v1 }, [x1], x15 ; encoding: [0x20,0x84,0x8f,0x0c]
+; CHECK: st2.8h { v0, v1 }, [x1], x15 ; encoding: [0x20,0x84,0x8f,0x4c]
+; CHECK: st2.2s { v0, v1 }, [x1], x15 ; encoding: [0x20,0x88,0x8f,0x0c]
+; CHECK: st2.4s { v0, v1 }, [x1], x15 ; encoding: [0x20,0x88,0x8f,0x4c]
+; CHECK: st2.2d { v0, v1 }, [x1], x15 ; encoding: [0x20,0x8c,0x8f,0x4c]
+
+; CHECK: ld2.8b { v0, v1 }, [x1], #16 ; encoding: [0x20,0x80,0xdf,0x0c]
+; CHECK: ld2.16b { v0, v1 }, [x1], #32 ; encoding: [0x20,0x80,0xdf,0x4c]
+; CHECK: ld2.4h { v0, v1 }, [x1], #16 ; encoding: [0x20,0x84,0xdf,0x0c]
+; CHECK: ld2.8h { v0, v1 }, [x1], #32 ; encoding: [0x20,0x84,0xdf,0x4c]
+; CHECK: ld2.2s { v0, v1 }, [x1], #16 ; encoding: [0x20,0x88,0xdf,0x0c]
+; CHECK: ld2.4s { v0, v1 }, [x1], #32 ; encoding: [0x20,0x88,0xdf,0x4c]
+; CHECK: ld2.2d { v0, v1 }, [x1], #32 ; encoding: [0x20,0x8c,0xdf,0x4c]
+
+; CHECK: st2.8b { v0, v1 }, [x1], #16 ; encoding: [0x20,0x80,0x9f,0x0c]
+; CHECK: st2.16b { v0, v1 }, [x1], #32 ; encoding: [0x20,0x80,0x9f,0x4c]
+; CHECK: st2.4h { v0, v1 }, [x1], #16 ; encoding: [0x20,0x84,0x9f,0x0c]
+; CHECK: st2.8h { v0, v1 }, [x1], #32 ; encoding: [0x20,0x84,0x9f,0x4c]
+; CHECK: st2.2s { v0, v1 }, [x1], #16 ; encoding: [0x20,0x88,0x9f,0x0c]
+; CHECK: st2.4s { v0, v1 }, [x1], #32 ; encoding: [0x20,0x88,0x9f,0x4c]
+; CHECK: st2.2d { v0, v1 }, [x1], #32 ; encoding: [0x20,0x8c,0x9f,0x4c]
+
+
+_ld3st3_multiple_post:
+ ld3.8b {v0, v1, v2}, [x1], x15
+ ld3.16b {v0, v1, v2}, [x1], x15
+ ld3.4h {v0, v1, v2}, [x1], x15
+ ld3.8h {v0, v1, v2}, [x1], x15
+ ld3.2s {v0, v1, v2}, [x1], x15
+ ld3.4s {v0, v1, v2}, [x1], x15
+ ld3.2d {v0, v1, v2}, [x1], x15
+
+ st3.8b {v0, v1, v2}, [x1], x15
+ st3.16b {v0, v1, v2}, [x1], x15
+ st3.4h {v0, v1, v2}, [x1], x15
+ st3.8h {v0, v1, v2}, [x1], x15
+ st3.2s {v0, v1, v2}, [x1], x15
+ st3.4s {v0, v1, v2}, [x1], x15
+ st3.2d {v0, v1, v2}, [x1], x15
+
+ ld3.8b {v0, v1, v2}, [x1], #24
+ ld3.16b {v0, v1, v2}, [x1], #48
+ ld3.4h {v0, v1, v2}, [x1], #24
+ ld3.8h {v0, v1, v2}, [x1], #48
+ ld3.2s {v0, v1, v2}, [x1], #24
+ ld3.4s {v0, v1, v2}, [x1], #48
+ ld3.2d {v0, v1, v2}, [x1], #48
+
+ st3.8b {v0, v1, v2}, [x1], #24
+ st3.16b {v0, v1, v2}, [x1], #48
+ st3.4h {v0, v1, v2}, [x1], #24
+ st3.8h {v0, v1, v2}, [x1], #48
+ st3.2s {v0, v1, v2}, [x1], #24
+ st3.4s {v0, v1, v2}, [x1], #48
+ st3.2d {v0, v1, v2}, [x1], #48
+
+; CHECK: ld3st3_multiple_post:
+; CHECK: ld3.8b { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x40,0xcf,0x0c]
+; CHECK: ld3.16b { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x40,0xcf,0x4c]
+; CHECK: ld3.4h { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x44,0xcf,0x0c]
+; CHECK: ld3.8h { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x44,0xcf,0x4c]
+; CHECK: ld3.2s { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x48,0xcf,0x0c]
+; CHECK: ld3.4s { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x48,0xcf,0x4c]
+; CHECK: ld3.2d { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x4c,0xcf,0x4c]
+
+; CHECK: st3.8b { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x40,0x8f,0x0c]
+; CHECK: st3.16b { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x40,0x8f,0x4c]
+; CHECK: st3.4h { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x44,0x8f,0x0c]
+; CHECK: st3.8h { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x44,0x8f,0x4c]
+; CHECK: st3.2s { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x48,0x8f,0x0c]
+; CHECK: st3.4s { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x48,0x8f,0x4c]
+; CHECK: st3.2d { v0, v1, v2 }, [x1], x15 ; encoding: [0x20,0x4c,0x8f,0x4c]
+
+; CHECK: ld3.8b { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x40,0xdf,0x0c]
+; CHECK: ld3.16b { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x40,0xdf,0x4c]
+; CHECK: ld3.4h { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x44,0xdf,0x0c]
+; CHECK: ld3.8h { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x44,0xdf,0x4c]
+; CHECK: ld3.2s { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x48,0xdf,0x0c]
+; CHECK: ld3.4s { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x48,0xdf,0x4c]
+; CHECK: ld3.2d { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x4c,0xdf,0x4c]
+
+; CHECK: st3.8b { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x40,0x9f,0x0c]
+; CHECK: st3.16b { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x40,0x9f,0x4c]
+; CHECK: st3.4h { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x44,0x9f,0x0c]
+; CHECK: st3.8h { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x44,0x9f,0x4c]
+; CHECK: st3.2s { v0, v1, v2 }, [x1], #24 ; encoding: [0x20,0x48,0x9f,0x0c]
+; CHECK: st3.4s { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x48,0x9f,0x4c]
+; CHECK: st3.2d { v0, v1, v2 }, [x1], #48 ; encoding: [0x20,0x4c,0x9f,0x4c]
+
+_ld4st4_multiple_post:
+ ld4.8b {v0, v1, v2, v3}, [x1], x15
+ ld4.16b {v0, v1, v2, v3}, [x1], x15
+ ld4.4h {v0, v1, v2, v3}, [x1], x15
+ ld4.8h {v0, v1, v2, v3}, [x1], x15
+ ld4.2s {v0, v1, v2, v3}, [x1], x15
+ ld4.4s {v0, v1, v2, v3}, [x1], x15
+ ld4.2d {v0, v1, v2, v3}, [x1], x15
+
+ st4.8b {v0, v1, v2, v3}, [x1], x15
+ st4.16b {v0, v1, v2, v3}, [x1], x15
+ st4.4h {v0, v1, v2, v3}, [x1], x15
+ st4.8h {v0, v1, v2, v3}, [x1], x15
+ st4.2s {v0, v1, v2, v3}, [x1], x15
+ st4.4s {v0, v1, v2, v3}, [x1], x15
+ st4.2d {v0, v1, v2, v3}, [x1], x15
+
+ ld4.8b {v0, v1, v2, v3}, [x1], #32
+ ld4.16b {v0, v1, v2, v3}, [x1], #64
+ ld4.4h {v0, v1, v2, v3}, [x1], #32
+ ld4.8h {v0, v1, v2, v3}, [x1], #64
+ ld4.2s {v0, v1, v2, v3}, [x1], #32
+ ld4.4s {v0, v1, v2, v3}, [x1], #64
+ ld4.2d {v0, v1, v2, v3}, [x1], #64
+
+ st4.8b {v0, v1, v2, v3}, [x1], #32
+ st4.16b {v0, v1, v2, v3}, [x1], #64
+ st4.4h {v0, v1, v2, v3}, [x1], #32
+ st4.8h {v0, v1, v2, v3}, [x1], #64
+ st4.2s {v0, v1, v2, v3}, [x1], #32
+ st4.4s {v0, v1, v2, v3}, [x1], #64
+ st4.2d {v0, v1, v2, v3}, [x1], #64
+
+
+; CHECK: ld4st4_multiple_post:
+; CHECK: ld4.8b { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x00,0xcf,0x0c]
+; CHECK: ld4.16b { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x00,0xcf,0x4c]
+; CHECK: ld4.4h { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x04,0xcf,0x0c]
+; CHECK: ld4.8h { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x04,0xcf,0x4c]
+; CHECK: ld4.2s { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x08,0xcf,0x0c]
+; CHECK: ld4.4s { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x08,0xcf,0x4c]
+; CHECK: ld4.2d { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x0c,0xcf,0x4c]
+
+; CHECK: st4.8b { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x00,0x8f,0x0c]
+; CHECK: st4.16b { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x00,0x8f,0x4c]
+; CHECK: st4.4h { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x04,0x8f,0x0c]
+; CHECK: st4.8h { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x04,0x8f,0x4c]
+; CHECK: st4.2s { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x08,0x8f,0x0c]
+; CHECK: st4.4s { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x08,0x8f,0x4c]
+; CHECK: st4.2d { v0, v1, v2, v3 }, [x1], x15 ; encoding: [0x20,0x0c,0x8f,0x4c]
+
+; CHECK: ld4.8b { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x00,0xdf,0x0c]
+; CHECK: ld4.16b { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x00,0xdf,0x4c]
+; CHECK: ld4.4h { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x04,0xdf,0x0c]
+; CHECK: ld4.8h { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x04,0xdf,0x4c]
+; CHECK: ld4.2s { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x08,0xdf,0x0c]
+; CHECK: ld4.4s { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x08,0xdf,0x4c]
+; CHECK: ld4.2d { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x0c,0xdf,0x4c]
+
+; CHECK: st4.8b { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x00,0x9f,0x0c]
+; CHECK: st4.16b { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x00,0x9f,0x4c]
+; CHECK: st4.4h { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x04,0x9f,0x0c]
+; CHECK: st4.8h { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x04,0x9f,0x4c]
+; CHECK: st4.2s { v0, v1, v2, v3 }, [x1], #32 ; encoding: [0x20,0x08,0x9f,0x0c]
+; CHECK: st4.4s { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x08,0x9f,0x4c]
+; CHECK: st4.2d { v0, v1, v2, v3 }, [x1], #64 ; encoding: [0x20,0x0c,0x9f,0x4c]
+
+ld1r:
+ ld1r.8b {v4}, [x2]
+ ld1r.8b {v4}, [x2], x3
+ ld1r.16b {v4}, [x2]
+ ld1r.16b {v4}, [x2], x3
+ ld1r.4h {v4}, [x2]
+ ld1r.4h {v4}, [x2], x3
+ ld1r.8h {v4}, [x2]
+ ld1r.8h {v4}, [x2], x3
+ ld1r.2s {v4}, [x2]
+ ld1r.2s {v4}, [x2], x3
+ ld1r.4s {v4}, [x2]
+ ld1r.4s {v4}, [x2], x3
+ ld1r.1d {v4}, [x2]
+ ld1r.1d {v4}, [x2], x3
+ ld1r.2d {v4}, [x2]
+ ld1r.2d {v4}, [x2], x3
+
+ ld1r.8b {v4}, [x2], #1
+ ld1r.16b {v4}, [x2], #1
+ ld1r.4h {v4}, [x2], #2
+ ld1r.8h {v4}, [x2], #2
+ ld1r.2s {v4}, [x2], #4
+ ld1r.4s {v4}, [x2], #4
+ ld1r.1d {v4}, [x2], #8
+ ld1r.2d {v4}, [x2], #8
+
+; CHECK: ld1r:
+; CHECK: ld1r.8b { v4 }, [x2] ; encoding: [0x44,0xc0,0x40,0x0d]
+; CHECK: ld1r.8b { v4 }, [x2], x3 ; encoding: [0x44,0xc0,0xc3,0x0d]
+; CHECK: ld1r.16b { v4 }, [x2] ; encoding: [0x44,0xc0,0x40,0x4d]
+; CHECK: ld1r.16b { v4 }, [x2], x3 ; encoding: [0x44,0xc0,0xc3,0x4d]
+; CHECK: ld1r.4h { v4 }, [x2] ; encoding: [0x44,0xc4,0x40,0x0d]
+; CHECK: ld1r.4h { v4 }, [x2], x3 ; encoding: [0x44,0xc4,0xc3,0x0d]
+; CHECK: ld1r.8h { v4 }, [x2] ; encoding: [0x44,0xc4,0x40,0x4d]
+; CHECK: ld1r.8h { v4 }, [x2], x3 ; encoding: [0x44,0xc4,0xc3,0x4d]
+; CHECK: ld1r.2s { v4 }, [x2] ; encoding: [0x44,0xc8,0x40,0x0d]
+; CHECK: ld1r.2s { v4 }, [x2], x3 ; encoding: [0x44,0xc8,0xc3,0x0d]
+; CHECK: ld1r.4s { v4 }, [x2] ; encoding: [0x44,0xc8,0x40,0x4d]
+; CHECK: ld1r.4s { v4 }, [x2], x3 ; encoding: [0x44,0xc8,0xc3,0x4d]
+; CHECK: ld1r.1d { v4 }, [x2] ; encoding: [0x44,0xcc,0x40,0x0d]
+; CHECK: ld1r.1d { v4 }, [x2], x3 ; encoding: [0x44,0xcc,0xc3,0x0d]
+; CHECK: ld1r.2d { v4 }, [x2] ; encoding: [0x44,0xcc,0x40,0x4d]
+; CHECK: ld1r.2d { v4 }, [x2], x3 ; encoding: [0x44,0xcc,0xc3,0x4d]
+
+; CHECK: ld1r.8b { v4 }, [x2], #1 ; encoding: [0x44,0xc0,0xdf,0x0d]
+; CHECK: ld1r.16b { v4 }, [x2], #1 ; encoding: [0x44,0xc0,0xdf,0x4d]
+; CHECK: ld1r.4h { v4 }, [x2], #2 ; encoding: [0x44,0xc4,0xdf,0x0d]
+; CHECK: ld1r.8h { v4 }, [x2], #2 ; encoding: [0x44,0xc4,0xdf,0x4d]
+; CHECK: ld1r.2s { v4 }, [x2], #4 ; encoding: [0x44,0xc8,0xdf,0x0d]
+; CHECK: ld1r.4s { v4 }, [x2], #4 ; encoding: [0x44,0xc8,0xdf,0x4d]
+; CHECK: ld1r.1d { v4 }, [x2], #8 ; encoding: [0x44,0xcc,0xdf,0x0d]
+; CHECK: ld1r.2d { v4 }, [x2], #8 ; encoding: [0x44,0xcc,0xdf,0x4d]
+
+ld2r:
+ ld2r.8b {v4, v5}, [x2]
+ ld2r.8b {v4, v5}, [x2], x3
+ ld2r.16b {v4, v5}, [x2]
+ ld2r.16b {v4, v5}, [x2], x3
+ ld2r.4h {v4, v5}, [x2]
+ ld2r.4h {v4, v5}, [x2], x3
+ ld2r.8h {v4, v5}, [x2]
+ ld2r.8h {v4, v5}, [x2], x3
+ ld2r.2s {v4, v5}, [x2]
+ ld2r.2s {v4, v5}, [x2], x3
+ ld2r.4s {v4, v5}, [x2]
+ ld2r.4s {v4, v5}, [x2], x3
+ ld2r.1d {v4, v5}, [x2]
+ ld2r.1d {v4, v5}, [x2], x3
+ ld2r.2d {v4, v5}, [x2]
+ ld2r.2d {v4, v5}, [x2], x3
+
+ ld2r.8b {v4, v5}, [x2], #2
+ ld2r.16b {v4, v5}, [x2], #2
+ ld2r.4h {v4, v5}, [x2], #4
+ ld2r.8h {v4, v5}, [x2], #4
+ ld2r.2s {v4, v5}, [x2], #8
+ ld2r.4s {v4, v5}, [x2], #8
+ ld2r.1d {v4, v5}, [x2], #16
+ ld2r.2d {v4, v5}, [x2], #16
+
+; CHECK: ld2r:
+; CHECK: ld2r.8b { v4, v5 }, [x2] ; encoding: [0x44,0xc0,0x60,0x0d]
+; CHECK: ld2r.8b { v4, v5 }, [x2], x3 ; encoding: [0x44,0xc0,0xe3,0x0d]
+; CHECK: ld2r.16b { v4, v5 }, [x2] ; encoding: [0x44,0xc0,0x60,0x4d]
+; CHECK: ld2r.16b { v4, v5 }, [x2], x3 ; encoding: [0x44,0xc0,0xe3,0x4d]
+; CHECK: ld2r.4h { v4, v5 }, [x2] ; encoding: [0x44,0xc4,0x60,0x0d]
+; CHECK: ld2r.4h { v4, v5 }, [x2], x3 ; encoding: [0x44,0xc4,0xe3,0x0d]
+; CHECK: ld2r.8h { v4, v5 }, [x2] ; encoding: [0x44,0xc4,0x60,0x4d]
+; CHECK: ld2r.8h { v4, v5 }, [x2], x3 ; encoding: [0x44,0xc4,0xe3,0x4d]
+; CHECK: ld2r.2s { v4, v5 }, [x2] ; encoding: [0x44,0xc8,0x60,0x0d]
+; CHECK: ld2r.2s { v4, v5 }, [x2], x3 ; encoding: [0x44,0xc8,0xe3,0x0d]
+; CHECK: ld2r.4s { v4, v5 }, [x2] ; encoding: [0x44,0xc8,0x60,0x4d]
+; CHECK: ld2r.4s { v4, v5 }, [x2], x3 ; encoding: [0x44,0xc8,0xe3,0x4d]
+; CHECK: ld2r.1d { v4, v5 }, [x2] ; encoding: [0x44,0xcc,0x60,0x0d]
+; CHECK: ld2r.1d { v4, v5 }, [x2], x3 ; encoding: [0x44,0xcc,0xe3,0x0d]
+; CHECK: ld2r.2d { v4, v5 }, [x2] ; encoding: [0x44,0xcc,0x60,0x4d]
+; CHECK: ld2r.2d { v4, v5 }, [x2], x3 ; encoding: [0x44,0xcc,0xe3,0x4d]
+
+; CHECK: ld2r.8b { v4, v5 }, [x2], #2 ; encoding: [0x44,0xc0,0xff,0x0d]
+; CHECK: ld2r.16b { v4, v5 }, [x2], #2 ; encoding: [0x44,0xc0,0xff,0x4d]
+; CHECK: ld2r.4h { v4, v5 }, [x2], #4 ; encoding: [0x44,0xc4,0xff,0x0d]
+; CHECK: ld2r.8h { v4, v5 }, [x2], #4 ; encoding: [0x44,0xc4,0xff,0x4d]
+; CHECK: ld2r.2s { v4, v5 }, [x2], #8 ; encoding: [0x44,0xc8,0xff,0x0d]
+; CHECK: ld2r.4s { v4, v5 }, [x2], #8 ; encoding: [0x44,0xc8,0xff,0x4d]
+; CHECK: ld2r.1d { v4, v5 }, [x2], #16 ; encoding: [0x44,0xcc,0xff,0x0d]
+; CHECK: ld2r.2d { v4, v5 }, [x2], #16 ; encoding: [0x44,0xcc,0xff,0x4d]
+
+ld3r:
+ ld3r.8b {v4, v5, v6}, [x2]
+ ld3r.8b {v4, v5, v6}, [x2], x3
+ ld3r.16b {v4, v5, v6}, [x2]
+ ld3r.16b {v4, v5, v6}, [x2], x3
+ ld3r.4h {v4, v5, v6}, [x2]
+ ld3r.4h {v4, v5, v6}, [x2], x3
+ ld3r.8h {v4, v5, v6}, [x2]
+ ld3r.8h {v4, v5, v6}, [x2], x3
+ ld3r.2s {v4, v5, v6}, [x2]
+ ld3r.2s {v4, v5, v6}, [x2], x3
+ ld3r.4s {v4, v5, v6}, [x2]
+ ld3r.4s {v4, v5, v6}, [x2], x3
+ ld3r.1d {v4, v5, v6}, [x2]
+ ld3r.1d {v4, v5, v6}, [x2], x3
+ ld3r.2d {v4, v5, v6}, [x2]
+ ld3r.2d {v4, v5, v6}, [x2], x3
+
+ ld3r.8b {v4, v5, v6}, [x2], #3
+ ld3r.16b {v4, v5, v6}, [x2], #3
+ ld3r.4h {v4, v5, v6}, [x2], #6
+ ld3r.8h {v4, v5, v6}, [x2], #6
+ ld3r.2s {v4, v5, v6}, [x2], #12
+ ld3r.4s {v4, v5, v6}, [x2], #12
+ ld3r.1d {v4, v5, v6}, [x2], #24
+ ld3r.2d {v4, v5, v6}, [x2], #24
+
+; CHECK: ld3r:
+; CHECK: ld3r.8b { v4, v5, v6 }, [x2] ; encoding: [0x44,0xe0,0x40,0x0d]
+; CHECK: ld3r.8b { v4, v5, v6 }, [x2], x3 ; encoding: [0x44,0xe0,0xc3,0x0d]
+; CHECK: ld3r.16b { v4, v5, v6 }, [x2] ; encoding: [0x44,0xe0,0x40,0x4d]
+; CHECK: ld3r.16b { v4, v5, v6 }, [x2], x3 ; encoding: [0x44,0xe0,0xc3,0x4d]
+; CHECK: ld3r.4h { v4, v5, v6 }, [x2] ; encoding: [0x44,0xe4,0x40,0x0d]
+; CHECK: ld3r.4h { v4, v5, v6 }, [x2], x3 ; encoding: [0x44,0xe4,0xc3,0x0d]
+; CHECK: ld3r.8h { v4, v5, v6 }, [x2] ; encoding: [0x44,0xe4,0x40,0x4d]
+; CHECK: ld3r.8h { v4, v5, v6 }, [x2], x3 ; encoding: [0x44,0xe4,0xc3,0x4d]
+; CHECK: ld3r.2s { v4, v5, v6 }, [x2] ; encoding: [0x44,0xe8,0x40,0x0d]
+; CHECK: ld3r.2s { v4, v5, v6 }, [x2], x3 ; encoding: [0x44,0xe8,0xc3,0x0d]
+; CHECK: ld3r.4s { v4, v5, v6 }, [x2] ; encoding: [0x44,0xe8,0x40,0x4d]
+; CHECK: ld3r.4s { v4, v5, v6 }, [x2], x3 ; encoding: [0x44,0xe8,0xc3,0x4d]
+; CHECK: ld3r.1d { v4, v5, v6 }, [x2] ; encoding: [0x44,0xec,0x40,0x0d]
+; CHECK: ld3r.1d { v4, v5, v6 }, [x2], x3 ; encoding: [0x44,0xec,0xc3,0x0d]
+; CHECK: ld3r.2d { v4, v5, v6 }, [x2] ; encoding: [0x44,0xec,0x40,0x4d]
+; CHECK: ld3r.2d { v4, v5, v6 }, [x2], x3 ; encoding: [0x44,0xec,0xc3,0x4d]
+
+; CHECK: ld3r.8b { v4, v5, v6 }, [x2], #3 ; encoding: [0x44,0xe0,0xdf,0x0d]
+; CHECK: ld3r.16b { v4, v5, v6 }, [x2], #3 ; encoding: [0x44,0xe0,0xdf,0x4d]
+; CHECK: ld3r.4h { v4, v5, v6 }, [x2], #6 ; encoding: [0x44,0xe4,0xdf,0x0d]
+; CHECK: ld3r.8h { v4, v5, v6 }, [x2], #6 ; encoding: [0x44,0xe4,0xdf,0x4d]
+; CHECK: ld3r.2s { v4, v5, v6 }, [x2], #12 ; encoding: [0x44,0xe8,0xdf,0x0d]
+; CHECK: ld3r.4s { v4, v5, v6 }, [x2], #12 ; encoding: [0x44,0xe8,0xdf,0x4d]
+; CHECK: ld3r.1d { v4, v5, v6 }, [x2], #24 ; encoding: [0x44,0xec,0xdf,0x0d]
+; CHECK: ld3r.2d { v4, v5, v6 }, [x2], #24 ; encoding: [0x44,0xec,0xdf,0x4d]
+
+ld4r:
+ ld4r.8b {v4, v5, v6, v7}, [x2]
+ ld4r.8b {v4, v5, v6, v7}, [x2], x3
+ ld4r.16b {v4, v5, v6, v7}, [x2]
+ ld4r.16b {v4, v5, v6, v7}, [x2], x3
+ ld4r.4h {v4, v5, v6, v7}, [x2]
+ ld4r.4h {v4, v5, v6, v7}, [x2], x3
+ ld4r.8h {v4, v5, v6, v7}, [x2]
+ ld4r.8h {v4, v5, v6, v7}, [x2], x3
+ ld4r.2s {v4, v5, v6, v7}, [x2]
+ ld4r.2s {v4, v5, v6, v7}, [x2], x3
+ ld4r.4s {v4, v5, v6, v7}, [x2]
+ ld4r.4s {v4, v5, v6, v7}, [x2], x3
+ ld4r.1d {v4, v5, v6, v7}, [x2]
+ ld4r.1d {v4, v5, v6, v7}, [x2], x3
+ ld4r.2d {v4, v5, v6, v7}, [x2]
+ ld4r.2d {v4, v5, v6, v7}, [x2], x3
+
+ ld4r.8b {v4, v5, v6, v7}, [x2], #4
+ ld4r.16b {v5, v6, v7, v8}, [x2], #4
+ ld4r.4h {v6, v7, v8, v9}, [x2], #8
+ ld4r.8h {v1, v2, v3, v4}, [x2], #8
+ ld4r.2s {v2, v3, v4, v5}, [x2], #16
+ ld4r.4s {v3, v4, v5, v6}, [x2], #16
+ ld4r.1d {v0, v1, v2, v3}, [x2], #32
+ ld4r.2d {v4, v5, v6, v7}, [x2], #32
+
+; CHECK: ld4r:
+; CHECK: ld4r.8b { v4, v5, v6, v7 }, [x2] ; encoding: [0x44,0xe0,0x60,0x0d]
+; CHECK: ld4r.8b { v4, v5, v6, v7 }, [x2], x3 ; encoding: [0x44,0xe0,0xe3,0x0d]
+; CHECK: ld4r.16b { v4, v5, v6, v7 }, [x2] ; encoding: [0x44,0xe0,0x60,0x4d]
+; CHECK: ld4r.16b { v4, v5, v6, v7 }, [x2], x3 ; encoding: [0x44,0xe0,0xe3,0x4d]
+; CHECK: ld4r.4h { v4, v5, v6, v7 }, [x2] ; encoding: [0x44,0xe4,0x60,0x0d]
+; CHECK: ld4r.4h { v4, v5, v6, v7 }, [x2], x3 ; encoding: [0x44,0xe4,0xe3,0x0d]
+; CHECK: ld4r.8h { v4, v5, v6, v7 }, [x2] ; encoding: [0x44,0xe4,0x60,0x4d]
+; CHECK: ld4r.8h { v4, v5, v6, v7 }, [x2], x3 ; encoding: [0x44,0xe4,0xe3,0x4d]
+; CHECK: ld4r.2s { v4, v5, v6, v7 }, [x2] ; encoding: [0x44,0xe8,0x60,0x0d]
+; CHECK: ld4r.2s { v4, v5, v6, v7 }, [x2], x3 ; encoding: [0x44,0xe8,0xe3,0x0d]
+; CHECK: ld4r.4s { v4, v5, v6, v7 }, [x2] ; encoding: [0x44,0xe8,0x60,0x4d]
+; CHECK: ld4r.4s { v4, v5, v6, v7 }, [x2], x3 ; encoding: [0x44,0xe8,0xe3,0x4d]
+; CHECK: ld4r.1d { v4, v5, v6, v7 }, [x2] ; encoding: [0x44,0xec,0x60,0x0d]
+; CHECK: ld4r.1d { v4, v5, v6, v7 }, [x2], x3 ; encoding: [0x44,0xec,0xe3,0x0d]
+; CHECK: ld4r.2d { v4, v5, v6, v7 }, [x2] ; encoding: [0x44,0xec,0x60,0x4d]
+; CHECK: ld4r.2d { v4, v5, v6, v7 }, [x2], x3 ; encoding: [0x44,0xec,0xe3,0x4d]
+
+; CHECK: ld4r.8b { v4, v5, v6, v7 }, [x2], #4 ; encoding: [0x44,0xe0,0xff,0x0d]
+; CHECK: ld4r.16b { v5, v6, v7, v8 }, [x2], #4 ; encoding: [0x45,0xe0,0xff,0x4d]
+; CHECK: ld4r.4h { v6, v7, v8, v9 }, [x2], #8 ; encoding: [0x46,0xe4,0xff,0x0d]
+; CHECK: ld4r.8h { v1, v2, v3, v4 }, [x2], #8 ; encoding: [0x41,0xe4,0xff,0x4d]
+; CHECK: ld4r.2s { v2, v3, v4, v5 }, [x2], #16 ; encoding: [0x42,0xe8,0xff,0x0d]
+; CHECK: ld4r.4s { v3, v4, v5, v6 }, [x2], #16 ; encoding: [0x43,0xe8,0xff,0x4d]
+; CHECK: ld4r.1d { v0, v1, v2, v3 }, [x2], #32 ; encoding: [0x40,0xec,0xff,0x0d]
+; CHECK: ld4r.2d { v4, v5, v6, v7 }, [x2], #32 ; encoding: [0x44,0xec,0xff,0x4d]
+
+
+_ld1:
+ ld1.b {v4}[13], [x3]
+ ld1.h {v4}[2], [x3]
+ ld1.s {v4}[2], [x3]
+ ld1.d {v4}[1], [x3]
+ ld1.b {v4}[13], [x3], x5
+ ld1.h {v4}[2], [x3], x5
+ ld1.s {v4}[2], [x3], x5
+ ld1.d {v4}[1], [x3], x5
+ ld1.b {v4}[13], [x3], #1
+ ld1.h {v4}[2], [x3], #2
+ ld1.s {v4}[2], [x3], #4
+ ld1.d {v4}[1], [x3], #8
+
+; CHECK: _ld1:
+; CHECK: ld1.b { v4 }[13], [x3] ; encoding: [0x64,0x14,0x40,0x4d]
+; CHECK: ld1.h { v4 }[2], [x3] ; encoding: [0x64,0x50,0x40,0x0d]
+; CHECK: ld1.s { v4 }[2], [x3] ; encoding: [0x64,0x80,0x40,0x4d]
+; CHECK: ld1.d { v4 }[1], [x3] ; encoding: [0x64,0x84,0x40,0x4d]
+; CHECK: ld1.b { v4 }[13], [x3], x5 ; encoding: [0x64,0x14,0xc5,0x4d]
+; CHECK: ld1.h { v4 }[2], [x3], x5 ; encoding: [0x64,0x50,0xc5,0x0d]
+; CHECK: ld1.s { v4 }[2], [x3], x5 ; encoding: [0x64,0x80,0xc5,0x4d]
+; CHECK: ld1.d { v4 }[1], [x3], x5 ; encoding: [0x64,0x84,0xc5,0x4d]
+; CHECK: ld1.b { v4 }[13], [x3], #1 ; encoding: [0x64,0x14,0xdf,0x4d]
+; CHECK: ld1.h { v4 }[2], [x3], #2 ; encoding: [0x64,0x50,0xdf,0x0d]
+; CHECK: ld1.s { v4 }[2], [x3], #4 ; encoding: [0x64,0x80,0xdf,0x4d]
+; CHECK: ld1.d { v4 }[1], [x3], #8 ; encoding: [0x64,0x84,0xdf,0x4d]
+
+_ld2:
+ ld2.b {v4, v5}[13], [x3]
+ ld2.h {v4, v5}[2], [x3]
+ ld2.s {v4, v5}[2], [x3]
+ ld2.d {v4, v5}[1], [x3]
+ ld2.b {v4, v5}[13], [x3], x5
+ ld2.h {v4, v5}[2], [x3], x5
+ ld2.s {v4, v5}[2], [x3], x5
+ ld2.d {v4, v5}[1], [x3], x5
+ ld2.b {v4, v5}[13], [x3], #2
+ ld2.h {v4, v5}[2], [x3], #4
+ ld2.s {v4, v5}[2], [x3], #8
+ ld2.d {v4, v5}[1], [x3], #16
+
+
+; CHECK: _ld2:
+; CHECK: ld2.b { v4, v5 }[13], [x3] ; encoding: [0x64,0x14,0x60,0x4d]
+; CHECK: ld2.h { v4, v5 }[2], [x3] ; encoding: [0x64,0x50,0x60,0x0d]
+; CHECK: ld2.s { v4, v5 }[2], [x3] ; encoding: [0x64,0x80,0x60,0x4d]
+; CHECK: ld2.d { v4, v5 }[1], [x3] ; encoding: [0x64,0x84,0x60,0x4d]
+; CHECK: ld2.b { v4, v5 }[13], [x3], x5 ; encoding: [0x64,0x14,0xe5,0x4d]
+; CHECK: ld2.h { v4, v5 }[2], [x3], x5 ; encoding: [0x64,0x50,0xe5,0x0d]
+; CHECK: ld2.s { v4, v5 }[2], [x3], x5 ; encoding: [0x64,0x80,0xe5,0x4d]
+; CHECK: ld2.d { v4, v5 }[1], [x3], x5 ; encoding: [0x64,0x84,0xe5,0x4d]
+; CHECK: ld2.b { v4, v5 }[13], [x3], #2 ; encoding: [0x64,0x14,0xff,0x4d]
+; CHECK: ld2.h { v4, v5 }[2], [x3], #4 ; encoding: [0x64,0x50,0xff,0x0d]
+; CHECK: ld2.s { v4, v5 }[2], [x3], #8 ; encoding: [0x64,0x80,0xff,0x4d]
+; CHECK: ld2.d { v4, v5 }[1], [x3], #16 ; encoding: [0x64,0x84,0xff,0x4d]
+
+
+_ld3:
+ ld3.b {v4, v5, v6}[13], [x3]
+ ld3.h {v4, v5, v6}[2], [x3]
+ ld3.s {v4, v5, v6}[2], [x3]
+ ld3.d {v4, v5, v6}[1], [x3]
+ ld3.b {v4, v5, v6}[13], [x3], x5
+ ld3.h {v4, v5, v6}[2], [x3], x5
+ ld3.s {v4, v5, v6}[2], [x3], x5
+ ld3.d {v4, v5, v6}[1], [x3], x5
+ ld3.b {v4, v5, v6}[13], [x3], #3
+ ld3.h {v4, v5, v6}[2], [x3], #6
+ ld3.s {v4, v5, v6}[2], [x3], #12
+ ld3.d {v4, v5, v6}[1], [x3], #24
+
+
+; CHECK: _ld3:
+; CHECK: ld3.b { v4, v5, v6 }[13], [x3] ; encoding: [0x64,0x34,0x40,0x4d]
+; CHECK: ld3.h { v4, v5, v6 }[2], [x3] ; encoding: [0x64,0x70,0x40,0x0d]
+; CHECK: ld3.s { v4, v5, v6 }[2], [x3] ; encoding: [0x64,0xa0,0x40,0x4d]
+; CHECK: ld3.d { v4, v5, v6 }[1], [x3] ; encoding: [0x64,0xa4,0x40,0x4d]
+; CHECK: ld3.b { v4, v5, v6 }[13], [x3], x5 ; encoding: [0x64,0x34,0xc5,0x4d]
+; CHECK: ld3.h { v4, v5, v6 }[2], [x3], x5 ; encoding: [0x64,0x70,0xc5,0x0d]
+; CHECK: ld3.s { v4, v5, v6 }[2], [x3], x5 ; encoding: [0x64,0xa0,0xc5,0x4d]
+; CHECK: ld3.d { v4, v5, v6 }[1], [x3], x5 ; encoding: [0x64,0xa4,0xc5,0x4d]
+; CHECK: ld3.b { v4, v5, v6 }[13], [x3], #3 ; encoding: [0x64,0x34,0xdf,0x4d]
+; CHECK: ld3.h { v4, v5, v6 }[2], [x3], #6 ; encoding: [0x64,0x70,0xdf,0x0d]
+; CHECK: ld3.s { v4, v5, v6 }[2], [x3], #12 ; encoding: [0x64,0xa0,0xdf,0x4d]
+; CHECK: ld3.d { v4, v5, v6 }[1], [x3], #24 ; encoding: [0x64,0xa4,0xdf,0x4d]
+
+
+_ld4:
+ ld4.b {v4, v5, v6, v7}[13], [x3]
+ ld4.h {v4, v5, v6, v7}[2], [x3]
+ ld4.s {v4, v5, v6, v7}[2], [x3]
+ ld4.d {v4, v5, v6, v7}[1], [x3]
+ ld4.b {v4, v5, v6, v7}[13], [x3], x5
+ ld4.h {v4, v5, v6, v7}[2], [x3], x5
+ ld4.s {v4, v5, v6, v7}[2], [x3], x5
+ ld4.d {v4, v5, v6, v7}[1], [x3], x5
+ ld4.b {v4, v5, v6, v7}[13], [x3], #4
+ ld4.h {v4, v5, v6, v7}[2], [x3], #8
+ ld4.s {v4, v5, v6, v7}[2], [x3], #16
+ ld4.d {v4, v5, v6, v7}[1], [x3], #32
+
+; CHECK: _ld4:
+; CHECK: ld4.b { v4, v5, v6, v7 }[13], [x3] ; encoding: [0x64,0x34,0x60,0x4d]
+; CHECK: ld4.h { v4, v5, v6, v7 }[2], [x3] ; encoding: [0x64,0x70,0x60,0x0d]
+; CHECK: ld4.s { v4, v5, v6, v7 }[2], [x3] ; encoding: [0x64,0xa0,0x60,0x4d]
+; CHECK: ld4.d { v4, v5, v6, v7 }[1], [x3] ; encoding: [0x64,0xa4,0x60,0x4d]
+; CHECK: ld4.b { v4, v5, v6, v7 }[13], [x3], x5 ; encoding: [0x64,0x34,0xe5,0x4d]
+; CHECK: ld4.h { v4, v5, v6, v7 }[2], [x3], x5 ; encoding: [0x64,0x70,0xe5,0x0d]
+; CHECK: ld4.s { v4, v5, v6, v7 }[2], [x3], x5 ; encoding: [0x64,0xa0,0xe5,0x4d]
+; CHECK: ld4.d { v4, v5, v6, v7 }[1], [x3], x5 ; encoding: [0x64,0xa4,0xe5,0x4d]
+; CHECK: ld4.b { v4, v5, v6, v7 }[13], [x3], #4 ; encoding: [0x64,0x34,0xff,0x4d]
+; CHECK: ld4.h { v4, v5, v6, v7 }[2], [x3], #8 ; encoding: [0x64,0x70,0xff,0x0d]
+; CHECK: ld4.s { v4, v5, v6, v7 }[2], [x3], #16 ; encoding: [0x64,0xa0,0xff,0x4d]
+; CHECK: ld4.d { v4, v5, v6, v7 }[1], [x3], #32 ; encoding: [0x64,0xa4,0xff,0x4d]
+
+_st1:
+ st1.b {v4}[13], [x3]
+ st1.h {v4}[2], [x3]
+ st1.s {v4}[2], [x3]
+ st1.d {v4}[1], [x3]
+ st1.b {v4}[13], [x3], x5
+ st1.h {v4}[2], [x3], x5
+ st1.s {v4}[2], [x3], x5
+ st1.d {v4}[1], [x3], x5
+ st1.b {v4}[13], [x3], #1
+ st1.h {v4}[2], [x3], #2
+ st1.s {v4}[2], [x3], #4
+ st1.d {v4}[1], [x3], #8
+
+; CHECK: _st1:
+; CHECK: st1.b { v4 }[13], [x3] ; encoding: [0x64,0x14,0x00,0x4d]
+; CHECK: st1.h { v4 }[2], [x3] ; encoding: [0x64,0x50,0x00,0x0d]
+; CHECK: st1.s { v4 }[2], [x3] ; encoding: [0x64,0x80,0x00,0x4d]
+; CHECK: st1.d { v4 }[1], [x3] ; encoding: [0x64,0x84,0x00,0x4d]
+; CHECK: st1.b { v4 }[13], [x3], x5 ; encoding: [0x64,0x14,0x85,0x4d]
+; CHECK: st1.h { v4 }[2], [x3], x5 ; encoding: [0x64,0x50,0x85,0x0d]
+; CHECK: st1.s { v4 }[2], [x3], x5 ; encoding: [0x64,0x80,0x85,0x4d]
+; CHECK: st1.d { v4 }[1], [x3], x5 ; encoding: [0x64,0x84,0x85,0x4d]
+; CHECK: st1.b { v4 }[13], [x3], #1 ; encoding: [0x64,0x14,0x9f,0x4d]
+; CHECK: st1.h { v4 }[2], [x3], #2 ; encoding: [0x64,0x50,0x9f,0x0d]
+; CHECK: st1.s { v4 }[2], [x3], #4 ; encoding: [0x64,0x80,0x9f,0x4d]
+; CHECK: st1.d { v4 }[1], [x3], #8 ; encoding: [0x64,0x84,0x9f,0x4d]
+
+_st2:
+ st2.b {v4, v5}[13], [x3]
+ st2.h {v4, v5}[2], [x3]
+ st2.s {v4, v5}[2], [x3]
+ st2.d {v4, v5}[1], [x3]
+ st2.b {v4, v5}[13], [x3], x5
+ st2.h {v4, v5}[2], [x3], x5
+ st2.s {v4, v5}[2], [x3], x5
+ st2.d {v4, v5}[1], [x3], x5
+ st2.b {v4, v5}[13], [x3], #2
+ st2.h {v4, v5}[2], [x3], #4
+ st2.s {v4, v5}[2], [x3], #8
+ st2.d {v4, v5}[1], [x3], #16
+
+; CHECK: _st2:
+; CHECK: st2.b { v4, v5 }[13], [x3] ; encoding: [0x64,0x14,0x20,0x4d]
+; CHECK: st2.h { v4, v5 }[2], [x3] ; encoding: [0x64,0x50,0x20,0x0d]
+; CHECK: st2.s { v4, v5 }[2], [x3] ; encoding: [0x64,0x80,0x20,0x4d]
+; CHECK: st2.d { v4, v5 }[1], [x3] ; encoding: [0x64,0x84,0x20,0x4d]
+; CHECK: st2.b { v4, v5 }[13], [x3], x5 ; encoding: [0x64,0x14,0xa5,0x4d]
+; CHECK: st2.h { v4, v5 }[2], [x3], x5 ; encoding: [0x64,0x50,0xa5,0x0d]
+; CHECK: st2.s { v4, v5 }[2], [x3], x5 ; encoding: [0x64,0x80,0xa5,0x4d]
+; CHECK: st2.d { v4, v5 }[1], [x3], x5 ; encoding: [0x64,0x84,0xa5,0x4d]
+; CHECK: st2.b { v4, v5 }[13], [x3], #2 ; encoding: [0x64,0x14,0xbf,0x4d]
+; CHECK: st2.h { v4, v5 }[2], [x3], #4 ; encoding: [0x64,0x50,0xbf,0x0d]
+; CHECK: st2.s { v4, v5 }[2], [x3], #8 ; encoding: [0x64,0x80,0xbf,0x4d]
+; CHECK: st2.d { v4, v5 }[1], [x3], #16 ; encoding: [0x64,0x84,0xbf,0x4d]
+
+
+_st3:
+ st3.b {v4, v5, v6}[13], [x3]
+ st3.h {v4, v5, v6}[2], [x3]
+ st3.s {v4, v5, v6}[2], [x3]
+ st3.d {v4, v5, v6}[1], [x3]
+ st3.b {v4, v5, v6}[13], [x3], x5
+ st3.h {v4, v5, v6}[2], [x3], x5
+ st3.s {v4, v5, v6}[2], [x3], x5
+ st3.d {v4, v5, v6}[1], [x3], x5
+ st3.b {v4, v5, v6}[13], [x3], #3
+ st3.h {v4, v5, v6}[2], [x3], #6
+ st3.s {v4, v5, v6}[2], [x3], #12
+ st3.d {v4, v5, v6}[1], [x3], #24
+
+; CHECK: _st3:
+; CHECK: st3.b { v4, v5, v6 }[13], [x3] ; encoding: [0x64,0x34,0x00,0x4d]
+; CHECK: st3.h { v4, v5, v6 }[2], [x3] ; encoding: [0x64,0x70,0x00,0x0d]
+; CHECK: st3.s { v4, v5, v6 }[2], [x3] ; encoding: [0x64,0xa0,0x00,0x4d]
+; CHECK: st3.d { v4, v5, v6 }[1], [x3] ; encoding: [0x64,0xa4,0x00,0x4d]
+; CHECK: st3.b { v4, v5, v6 }[13], [x3], x5 ; encoding: [0x64,0x34,0x85,0x4d]
+; CHECK: st3.h { v4, v5, v6 }[2], [x3], x5 ; encoding: [0x64,0x70,0x85,0x0d]
+; CHECK: st3.s { v4, v5, v6 }[2], [x3], x5 ; encoding: [0x64,0xa0,0x85,0x4d]
+; CHECK: st3.d { v4, v5, v6 }[1], [x3], x5 ; encoding: [0x64,0xa4,0x85,0x4d]
+; CHECK: st3.b { v4, v5, v6 }[13], [x3], #3 ; encoding: [0x64,0x34,0x9f,0x4d]
+; CHECK: st3.h { v4, v5, v6 }[2], [x3], #6 ; encoding: [0x64,0x70,0x9f,0x0d]
+; CHECK: st3.s { v4, v5, v6 }[2], [x3], #12 ; encoding: [0x64,0xa0,0x9f,0x4d]
+; CHECK: st3.d { v4, v5, v6 }[1], [x3], #24 ; encoding: [0x64,0xa4,0x9f,0x4d]
+
+_st4:
+ st4.b {v4, v5, v6, v7}[13], [x3]
+ st4.h {v4, v5, v6, v7}[2], [x3]
+ st4.s {v4, v5, v6, v7}[2], [x3]
+ st4.d {v4, v5, v6, v7}[1], [x3]
+ st4.b {v4, v5, v6, v7}[13], [x3], x5
+ st4.h {v4, v5, v6, v7}[2], [x3], x5
+ st4.s {v4, v5, v6, v7}[2], [x3], x5
+ st4.d {v4, v5, v6, v7}[1], [x3], x5
+ st4.b {v4, v5, v6, v7}[13], [x3], #4
+ st4.h {v4, v5, v6, v7}[2], [x3], #8
+ st4.s {v4, v5, v6, v7}[2], [x3], #16
+ st4.d {v4, v5, v6, v7}[1], [x3], #32
+
+; CHECK: _st4:
+; CHECK: st4.b { v4, v5, v6, v7 }[13], [x3] ; encoding: [0x64,0x34,0x20,0x4d]
+; CHECK: st4.h { v4, v5, v6, v7 }[2], [x3] ; encoding: [0x64,0x70,0x20,0x0d]
+; CHECK: st4.s { v4, v5, v6, v7 }[2], [x3] ; encoding: [0x64,0xa0,0x20,0x4d]
+; CHECK: st4.d { v4, v5, v6, v7 }[1], [x3] ; encoding: [0x64,0xa4,0x20,0x4d]
+; CHECK: st4.b { v4, v5, v6, v7 }[13], [x3], x5 ; encoding: [0x64,0x34,0xa5,0x4d]
+; CHECK: st4.h { v4, v5, v6, v7 }[2], [x3], x5 ; encoding: [0x64,0x70,0xa5,0x0d]
+; CHECK: st4.s { v4, v5, v6, v7 }[2], [x3], x5 ; encoding: [0x64,0xa0,0xa5,0x4d]
+; CHECK: st4.d { v4, v5, v6, v7 }[1], [x3], x5 ; encoding: [0x64,0xa4,0xa5,0x4d]
+; CHECK: st4.b { v4, v5, v6, v7 }[13], [x3], #4 ; encoding: [0x64,0x34,0xbf,0x4d]
+; CHECK: st4.h { v4, v5, v6, v7 }[2], [x3], #8 ; encoding: [0x64,0x70,0xbf,0x0d]
+; CHECK: st4.s { v4, v5, v6, v7 }[2], [x3], #16 ; encoding: [0x64,0xa0,0xbf,0x4d]
+; CHECK: st4.d { v4, v5, v6, v7 }[1], [x3], #32 ; encoding: [0x64,0xa4,0xbf,0x4d]
+
+
+;---------
+; ARM verbose syntax equivalents to the above.
+;---------
+verbose_syntax:
+
+ ld1 { v1.8b }, [x1]
+ ld1 { v2.8b, v3.8b }, [x1]
+ ld1 { v3.8b, v4.8b, v5.8b }, [x1]
+ ld1 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1]
+
+ ld1 { v1.16b }, [x1]
+ ld1 { v2.16b, v3.16b }, [x1]
+ ld1 { v3.16b, v4.16b, v5.16b }, [x1]
+ ld1 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1]
+
+ ld1 { v1.4h }, [x1]
+ ld1 { v2.4h, v3.4h }, [x1]
+ ld1 { v3.4h, v4.4h, v5.4h }, [x1]
+ ld1 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1]
+
+ ld1 { v1.8h }, [x1]
+ ld1 { v2.8h, v3.8h }, [x1]
+ ld1 { v3.8h, v4.8h, v5.8h }, [x1]
+ ld1 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1]
+
+ ld1 { v1.2s }, [x1]
+ ld1 { v2.2s, v3.2s }, [x1]
+ ld1 { v3.2s, v4.2s, v5.2s }, [x1]
+ ld1 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1]
+
+ ld1 { v1.4s }, [x1]
+ ld1 { v2.4s, v3.4s }, [x1]
+ ld1 { v3.4s, v4.4s, v5.4s }, [x1]
+ ld1 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1]
+
+ ld1 { v1.1d }, [x1]
+ ld1 { v2.1d, v3.1d }, [x1]
+ ld1 { v3.1d, v4.1d, v5.1d }, [x1]
+ ld1 { v7.1d, v8.1d, v9.1d, v10.1d }, [x1]
+
+ ld1 { v1.2d }, [x1]
+ ld1 { v2.2d, v3.2d }, [x1]
+ ld1 { v3.2d, v4.2d, v5.2d }, [x1]
+ ld1 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1]
+
+ st1 { v1.8b }, [x1]
+ st1 { v2.8b, v3.8b }, [x1]
+ st1 { v3.8b, v4.8b, v5.8b }, [x1]
+ st1 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1]
+
+ st1 { v1.16b }, [x1]
+ st1 { v2.16b, v3.16b }, [x1]
+ st1 { v3.16b, v4.16b, v5.16b }, [x1]
+ st1 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1]
+
+ st1 { v1.4h }, [x1]
+ st1 { v2.4h, v3.4h }, [x1]
+ st1 { v3.4h, v4.4h, v5.4h }, [x1]
+ st1 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1]
+
+ st1 { v1.8h }, [x1]
+ st1 { v2.8h, v3.8h }, [x1]
+ st1 { v3.8h, v4.8h, v5.8h }, [x1]
+ st1 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1]
+
+ st1 { v1.2s }, [x1]
+ st1 { v2.2s, v3.2s }, [x1]
+ st1 { v3.2s, v4.2s, v5.2s }, [x1]
+ st1 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1]
+
+ st1 { v1.4s }, [x1]
+ st1 { v2.4s, v3.4s }, [x1]
+ st1 { v3.4s, v4.4s, v5.4s }, [x1]
+ st1 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1]
+
+ st1 { v1.1d }, [x1]
+ st1 { v2.1d, v3.1d }, [x1]
+ st1 { v3.1d, v4.1d, v5.1d }, [x1]
+ st1 { v7.1d, v8.1d, v9.1d, v10.1d }, [x1]
+
+ st1 { v1.2d }, [x1]
+ st1 { v2.2d, v3.2d }, [x1]
+ st1 { v3.2d, v4.2d, v5.2d }, [x1]
+ st1 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1]
+
+ ld2 { v3.8b, v4.8b }, [x19]
+ ld2 { v3.16b, v4.16b }, [x19]
+ ld2 { v3.4h, v4.4h }, [x19]
+ ld2 { v3.8h, v4.8h }, [x19]
+ ld2 { v3.2s, v4.2s }, [x19]
+ ld2 { v3.4s, v4.4s }, [x19]
+ ld2 { v3.2d, v4.2d }, [x19]
+
+ st2 { v3.8b, v4.8b }, [x19]
+ st2 { v3.16b, v4.16b }, [x19]
+ st2 { v3.4h, v4.4h }, [x19]
+ st2 { v3.8h, v4.8h }, [x19]
+ st2 { v3.2s, v4.2s }, [x19]
+ st2 { v3.4s, v4.4s }, [x19]
+ st2 { v3.2d, v4.2d }, [x19]
+
+ ld3 { v2.8b, v3.8b, v4.8b }, [x19]
+ ld3 { v2.16b, v3.16b, v4.16b }, [x19]
+ ld3 { v2.4h, v3.4h, v4.4h }, [x19]
+ ld3 { v2.8h, v3.8h, v4.8h }, [x19]
+ ld3 { v2.2s, v3.2s, v4.2s }, [x19]
+ ld3 { v2.4s, v3.4s, v4.4s }, [x19]
+ ld3 { v2.2d, v3.2d, v4.2d }, [x19]
+
+ st3 { v2.8b, v3.8b, v4.8b }, [x19]
+ st3 { v2.16b, v3.16b, v4.16b }, [x19]
+ st3 { v2.4h, v3.4h, v4.4h }, [x19]
+ st3 { v2.8h, v3.8h, v4.8h }, [x19]
+ st3 { v2.2s, v3.2s, v4.2s }, [x19]
+ st3 { v2.4s, v3.4s, v4.4s }, [x19]
+ st3 { v2.2d, v3.2d, v4.2d }, [x19]
+
+ ld4 { v2.8b, v3.8b, v4.8b, v5.8b }, [x19]
+ ld4 { v2.16b, v3.16b, v4.16b, v5.16b }, [x19]
+ ld4 { v2.4h, v3.4h, v4.4h, v5.4h }, [x19]
+ ld4 { v2.8h, v3.8h, v4.8h, v5.8h }, [x19]
+ ld4 { v2.2s, v3.2s, v4.2s, v5.2s }, [x19]
+ ld4 { v2.4s, v3.4s, v4.4s, v5.4s }, [x19]
+ ld4 { v2.2d, v3.2d, v4.2d, v5.2d }, [x19]
+
+ st4 { v2.8b, v3.8b, v4.8b, v5.8b }, [x19]
+ st4 { v2.16b, v3.16b, v4.16b, v5.16b }, [x19]
+ st4 { v2.4h, v3.4h, v4.4h, v5.4h }, [x19]
+ st4 { v2.8h, v3.8h, v4.8h, v5.8h }, [x19]
+ st4 { v2.2s, v3.2s, v4.2s, v5.2s }, [x19]
+ st4 { v2.4s, v3.4s, v4.4s, v5.4s }, [x19]
+ st4 { v2.2d, v3.2d, v4.2d, v5.2d }, [x19]
+
+ ld1 { v1.8b }, [x1], x15
+ ld1 { v2.8b, v3.8b }, [x1], x15
+ ld1 { v3.8b, v4.8b, v5.8b }, [x1], x15
+ ld1 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1], x15
+
+ ld1 { v1.16b }, [x1], x15
+ ld1 { v2.16b, v3.16b }, [x1], x15
+ ld1 { v3.16b, v4.16b, v5.16b }, [x1], x15
+ ld1 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1], x15
+
+ ld1 { v1.4h }, [x1], x15
+ ld1 { v2.4h, v3.4h }, [x1], x15
+ ld1 { v3.4h, v4.4h, v5.4h }, [x1], x15
+ ld1 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1], x15
+
+ ld1 { v1.8h }, [x1], x15
+ ld1 { v2.8h, v3.8h }, [x1], x15
+ ld1 { v3.8h, v4.8h, v5.8h }, [x1], x15
+ ld1 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1], x15
+
+ ld1 { v1.2s }, [x1], x15
+ ld1 { v2.2s, v3.2s }, [x1], x15
+ ld1 { v3.2s, v4.2s, v5.2s }, [x1], x15
+ ld1 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1], x15
+
+ ld1 { v1.4s }, [x1], x15
+ ld1 { v2.4s, v3.4s }, [x1], x15
+ ld1 { v3.4s, v4.4s, v5.4s }, [x1], x15
+ ld1 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1], x15
+
+ ld1 { v1.1d }, [x1], x15
+ ld1 { v2.1d, v3.1d }, [x1], x15
+ ld1 { v3.1d, v4.1d, v5.1d }, [x1], x15
+ ld1 { v7.1d, v8.1d, v9.1d, v10.1d }, [x1], x15
+
+ ld1 { v1.2d }, [x1], x15
+ ld1 { v2.2d, v3.2d }, [x1], x15
+ ld1 { v3.2d, v4.2d, v5.2d }, [x1], x15
+ ld1 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1], x15
+
+ st1 { v1.8b }, [x1], x15
+ st1 { v2.8b, v3.8b }, [x1], x15
+ st1 { v3.8b, v4.8b, v5.8b }, [x1], x15
+ st1 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1], x15
+
+ st1 { v1.16b }, [x1], x15
+ st1 { v2.16b, v3.16b }, [x1], x15
+ st1 { v3.16b, v4.16b, v5.16b }, [x1], x15
+ st1 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1], x15
+
+ st1 { v1.4h }, [x1], x15
+ st1 { v2.4h, v3.4h }, [x1], x15
+ st1 { v3.4h, v4.4h, v5.4h }, [x1], x15
+ st1 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1], x15
+
+ st1 { v1.8h }, [x1], x15
+ st1 { v2.8h, v3.8h }, [x1], x15
+ st1 { v3.8h, v4.8h, v5.8h }, [x1], x15
+ st1 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1], x15
+
+ st1 { v1.2s }, [x1], x15
+ st1 { v2.2s, v3.2s }, [x1], x15
+ st1 { v3.2s, v4.2s, v5.2s }, [x1], x15
+ st1 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1], x15
+
+ st1 { v1.4s }, [x1], x15
+ st1 { v2.4s, v3.4s }, [x1], x15
+ st1 { v3.4s, v4.4s, v5.4s }, [x1], x15
+ st1 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1], x15
+
+ st1 { v1.1d }, [x1], x15
+ st1 { v2.1d, v3.1d }, [x1], x15
+ st1 { v3.1d, v4.1d, v5.1d }, [x1], x15
+ st1 { v7.1d, v8.1d, v9.1d, v10.1d }, [x1], x15
+
+ st1 { v1.2d }, [x1], x15
+ st1 { v2.2d, v3.2d }, [x1], x15
+ st1 { v3.2d, v4.2d, v5.2d }, [x1], x15
+ st1 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1], x15
+
+ ld1 { v1.8b }, [x1], #8
+ ld1 { v2.8b, v3.8b }, [x1], #16
+ ld1 { v3.8b, v4.8b, v5.8b }, [x1], #24
+ ld1 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1], #32
+
+ ld1 { v1.16b }, [x1], #16
+ ld1 { v2.16b, v3.16b }, [x1], #32
+ ld1 { v3.16b, v4.16b, v5.16b }, [x1], #48
+ ld1 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1], #64
+
+ ld1 { v1.4h }, [x1], #8
+ ld1 { v2.4h, v3.4h }, [x1], #16
+ ld1 { v3.4h, v4.4h, v5.4h }, [x1], #24
+ ld1 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1], #32
+
+ ld1 { v1.8h }, [x1], #16
+ ld1 { v2.8h, v3.8h }, [x1], #32
+ ld1 { v3.8h, v4.8h, v5.8h }, [x1], #48
+ ld1 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1], #64
+
+ ld1 { v1.2s }, [x1], #8
+ ld1 { v2.2s, v3.2s }, [x1], #16
+ ld1 { v3.2s, v4.2s, v5.2s }, [x1], #24
+ ld1 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1], #32
+
+ ld1 { v1.4s }, [x1], #16
+ ld1 { v2.4s, v3.4s }, [x1], #32
+ ld1 { v3.4s, v4.4s, v5.4s }, [x1], #48
+ ld1 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1], #64
+
+ ld1 { v1.1d }, [x1], #8
+ ld1 { v2.1d, v3.1d }, [x1], #16
+ ld1 { v3.1d, v4.1d, v5.1d }, [x1], #24
+ ld1 { v7.1d, v8.1d, v9.1d, v10.1d }, [x1], #32
+
+ ld1 { v1.2d }, [x1], #16
+ ld1 { v2.2d, v3.2d }, [x1], #32
+ ld1 { v3.2d, v4.2d, v5.2d }, [x1], #48
+ ld1 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1], #64
+
+ st1 { v1.8b }, [x1], #8
+ st1 { v2.8b, v3.8b }, [x1], #16
+ st1 { v3.8b, v4.8b, v5.8b }, [x1], #24
+ st1 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1], #32
+
+ st1 { v1.16b }, [x1], #16
+ st1 { v2.16b, v3.16b }, [x1], #32
+ st1 { v3.16b, v4.16b, v5.16b }, [x1], #48
+ st1 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1], #64
+
+ st1 { v1.4h }, [x1], #8
+ st1 { v2.4h, v3.4h }, [x1], #16
+ st1 { v3.4h, v4.4h, v5.4h }, [x1], #24
+ st1 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1], #32
+
+ st1 { v1.8h }, [x1], #16
+ st1 { v2.8h, v3.8h }, [x1], #32
+ st1 { v3.8h, v4.8h, v5.8h }, [x1], #48
+ st1 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1], #64
+
+ st1 { v1.2s }, [x1], #8
+ st1 { v2.2s, v3.2s }, [x1], #16
+ st1 { v3.2s, v4.2s, v5.2s }, [x1], #24
+ st1 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1], #32
+
+ st1 { v1.4s }, [x1], #16
+ st1 { v2.4s, v3.4s }, [x1], #32
+ st1 { v3.4s, v4.4s, v5.4s }, [x1], #48
+ st1 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1], #64
+
+ st1 { v1.1d }, [x1], #8
+ st1 { v2.1d, v3.1d }, [x1], #16
+ st1 { v3.1d, v4.1d, v5.1d }, [x1], #24
+ st1 { v7.1d, v8.1d, v9.1d, v10.1d }, [x1], #32
+
+ st1 { v1.2d }, [x1], #16
+ st1 { v2.2d, v3.2d }, [x1], #32
+ st1 { v3.2d, v4.2d, v5.2d }, [x1], #48
+ st1 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1], #64
+
+ ld2 { v2.8b, v3.8b }, [x1], x15
+ ld2 { v2.16b, v3.16b }, [x1], x15
+ ld2 { v2.4h, v3.4h }, [x1], x15
+ ld2 { v2.8h, v3.8h }, [x1], x15
+ ld2 { v2.2s, v3.2s }, [x1], x15
+ ld2 { v2.4s, v3.4s }, [x1], x15
+ ld2 { v2.2d, v3.2d }, [x1], x15
+
+ st2 { v2.8b, v3.8b }, [x1], x15
+ st2 { v2.16b, v3.16b }, [x1], x15
+ st2 { v2.4h, v3.4h }, [x1], x15
+ st2 { v2.8h, v3.8h }, [x1], x15
+ st2 { v2.2s, v3.2s }, [x1], x15
+ st2 { v2.4s, v3.4s }, [x1], x15
+ st2 { v2.2d, v3.2d }, [x1], x15
+
+ ld2 { v2.8b, v3.8b }, [x1], #16
+ ld2 { v2.16b, v3.16b }, [x1], #32
+ ld2 { v2.4h, v3.4h }, [x1], #16
+ ld2 { v2.8h, v3.8h }, [x1], #32
+ ld2 { v2.2s, v3.2s }, [x1], #16
+ ld2 { v2.4s, v3.4s }, [x1], #32
+ ld2 { v2.2d, v3.2d }, [x1], #32
+
+ st2 { v2.8b, v3.8b }, [x1], #16
+ st2 { v2.16b, v3.16b }, [x1], #32
+ st2 { v2.4h, v3.4h }, [x1], #16
+ st2 { v2.8h, v3.8h }, [x1], #32
+ st2 { v2.2s, v3.2s }, [x1], #16
+ st2 { v2.4s, v3.4s }, [x1], #32
+ st2 { v2.2d, v3.2d }, [x1], #32
+
+ ld3 { v3.8b, v4.8b, v5.8b }, [x1], x15
+ ld3 { v3.16b, v4.16b, v5.16b }, [x1], x15
+ ld3 { v3.4h, v4.4h, v5.4h }, [x1], x15
+ ld3 { v3.8h, v4.8h, v5.8h }, [x1], x15
+ ld3 { v3.2s, v4.2s, v5.2s }, [x1], x15
+ ld3 { v3.4s, v4.4s, v5.4s }, [x1], x15
+ ld3 { v3.2d, v4.2d, v5.2d }, [x1], x15
+
+ st3 { v3.8b, v4.8b, v5.8b }, [x1], x15
+ st3 { v3.16b, v4.16b, v5.16b }, [x1], x15
+ st3 { v3.4h, v4.4h, v5.4h }, [x1], x15
+ st3 { v3.8h, v4.8h, v5.8h }, [x1], x15
+ st3 { v3.2s, v4.2s, v5.2s }, [x1], x15
+ st3 { v3.4s, v4.4s, v5.4s }, [x1], x15
+ st3 { v3.2d, v4.2d, v5.2d }, [x1], x15
+ ld3 { v3.8b, v4.8b, v5.8b }, [x1], #24
+
+ ld3 { v3.16b, v4.16b, v5.16b }, [x1], #48
+ ld3 { v3.4h, v4.4h, v5.4h }, [x1], #24
+ ld3 { v3.8h, v4.8h, v5.8h }, [x1], #48
+ ld3 { v3.2s, v4.2s, v5.2s }, [x1], #24
+ ld3 { v3.4s, v4.4s, v5.4s }, [x1], #48
+ ld3 { v3.2d, v4.2d, v5.2d }, [x1], #48
+
+ st3 { v3.8b, v4.8b, v5.8b }, [x1], #24
+ st3 { v3.16b, v4.16b, v5.16b }, [x1], #48
+ st3 { v3.4h, v4.4h, v5.4h }, [x1], #24
+ st3 { v3.8h, v4.8h, v5.8h }, [x1], #48
+ st3 { v3.2s, v4.2s, v5.2s }, [x1], #24
+ st3 { v3.4s, v4.4s, v5.4s }, [x1], #48
+ st3 { v3.2d, v4.2d, v5.2d }, [x1], #48
+
+ ld4 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1], x15
+ ld4 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1], x15
+ ld4 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1], x15
+ ld4 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1], x15
+ ld4 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1], x15
+ ld4 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1], x15
+ ld4 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1], x15
+
+ st4 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1], x15
+ st4 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1], x15
+ st4 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1], x15
+ st4 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1], x15
+ st4 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1], x15
+ st4 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1], x15
+ st4 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1], x15
+
+ ld4 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1], #32
+ ld4 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1], #64
+ ld4 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1], #32
+ ld4 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1], #64
+ ld4 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1], #32
+ ld4 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1], #64
+ ld4 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1], #64
+
+ st4 { v4.8b, v5.8b, v6.8b, v7.8b }, [x1], #32
+ st4 { v4.16b, v5.16b, v6.16b, v7.16b }, [x1], #64
+ st4 { v7.4h, v8.4h, v9.4h, v10.4h }, [x1], #32
+ st4 { v7.8h, v8.8h, v9.8h, v10.8h }, [x1], #64
+ st4 { v7.2s, v8.2s, v9.2s, v10.2s }, [x1], #32
+ st4 { v7.4s, v8.4s, v9.4s, v10.4s }, [x1], #64
+ st4 { v7.2d, v8.2d, v9.2d, v10.2d }, [x1], #64
+
+
+ ld1r { v12.8b }, [x2]
+ ld1r { v12.8b }, [x2], x3
+ ld1r { v12.16b }, [x2]
+ ld1r { v12.16b }, [x2], x3
+ ld1r { v12.4h }, [x2]
+ ld1r { v12.4h }, [x2], x3
+ ld1r { v12.8h }, [x2]
+ ld1r { v12.8h }, [x2], x3
+ ld1r { v12.2s }, [x2]
+ ld1r { v12.2s }, [x2], x3
+ ld1r { v12.4s }, [x2]
+ ld1r { v12.4s }, [x2], x3
+ ld1r { v12.1d }, [x2]
+ ld1r { v12.1d }, [x2], x3
+ ld1r { v12.2d }, [x2]
+ ld1r { v12.2d }, [x2], x3
+
+ ld1r { v12.8b }, [x2], #1
+ ld1r { v12.16b }, [x2], #1
+ ld1r { v12.4h }, [x2], #2
+ ld1r { v12.8h }, [x2], #2
+ ld1r { v12.2s }, [x2], #4
+ ld1r { v12.4s }, [x2], #4
+ ld1r { v12.1d }, [x2], #8
+ ld1r { v12.2d }, [x2], #8
+ ld2r { v3.8b, v4.8b }, [x2]
+ ld2r { v3.8b, v4.8b }, [x2], x3
+ ld2r { v3.16b, v4.16b }, [x2]
+ ld2r { v3.16b, v4.16b }, [x2], x3
+ ld2r { v3.4h, v4.4h }, [x2]
+ ld2r { v3.4h, v4.4h }, [x2], x3
+ ld2r { v3.8h, v4.8h }, [x2]
+ ld2r { v3.8h, v4.8h }, [x2], x3
+ ld2r { v3.2s, v4.2s }, [x2]
+ ld2r { v3.2s, v4.2s }, [x2], x3
+ ld2r { v3.4s, v4.4s }, [x2]
+ ld2r { v3.4s, v4.4s }, [x2], x3
+ ld2r { v3.1d, v4.1d }, [x2]
+ ld2r { v3.1d, v4.1d }, [x2], x3
+ ld2r { v3.2d, v4.2d }, [x2]
+ ld2r { v3.2d, v4.2d }, [x2], x3
+
+ ld2r { v3.8b, v4.8b }, [x2], #2
+ ld2r { v3.16b, v4.16b }, [x2], #2
+ ld2r { v3.4h, v4.4h }, [x2], #4
+ ld2r { v3.8h, v4.8h }, [x2], #4
+ ld2r { v3.2s, v4.2s }, [x2], #8
+ ld2r { v3.4s, v4.4s }, [x2], #8
+ ld2r { v3.1d, v4.1d }, [x2], #16
+ ld2r { v3.2d, v4.2d }, [x2], #16
+
+ ld3r { v2.8b, v3.8b, v4.8b }, [x2]
+ ld3r { v2.8b, v3.8b, v4.8b }, [x2], x3
+ ld3r { v2.16b, v3.16b, v4.16b }, [x2]
+ ld3r { v2.16b, v3.16b, v4.16b }, [x2], x3
+ ld3r { v2.4h, v3.4h, v4.4h }, [x2]
+ ld3r { v2.4h, v3.4h, v4.4h }, [x2], x3
+ ld3r { v2.8h, v3.8h, v4.8h }, [x2]
+ ld3r { v2.8h, v3.8h, v4.8h }, [x2], x3
+ ld3r { v2.2s, v3.2s, v4.2s }, [x2]
+ ld3r { v2.2s, v3.2s, v4.2s }, [x2], x3
+ ld3r { v2.4s, v3.4s, v4.4s }, [x2]
+ ld3r { v2.4s, v3.4s, v4.4s }, [x2], x3
+ ld3r { v2.1d, v3.1d, v4.1d }, [x2]
+ ld3r { v2.1d, v3.1d, v4.1d }, [x2], x3
+ ld3r { v2.2d, v3.2d, v4.2d }, [x2]
+ ld3r { v2.2d, v3.2d, v4.2d }, [x2], x3
+
+ ld3r { v2.8b, v3.8b, v4.8b }, [x2], #3
+ ld3r { v2.16b, v3.16b, v4.16b }, [x2], #3
+ ld3r { v2.4h, v3.4h, v4.4h }, [x2], #6
+ ld3r { v2.8h, v3.8h, v4.8h }, [x2], #6
+ ld3r { v2.2s, v3.2s, v4.2s }, [x2], #12
+ ld3r { v2.4s, v3.4s, v4.4s }, [x2], #12
+ ld3r { v2.1d, v3.1d, v4.1d }, [x2], #24
+ ld3r { v2.2d, v3.2d, v4.2d }, [x2], #24
+
+ ld4r { v2.8b, v3.8b, v4.8b, v5.8b }, [x2]
+ ld4r { v2.8b, v3.8b, v4.8b, v5.8b }, [x2], x3
+ ld4r { v2.16b, v3.16b, v4.16b, v5.16b }, [x2]
+ ld4r { v2.16b, v3.16b, v4.16b, v5.16b }, [x2], x3
+ ld4r { v2.4h, v3.4h, v4.4h, v5.4h }, [x2]
+ ld4r { v2.4h, v3.4h, v4.4h, v5.4h }, [x2], x3
+ ld4r { v2.8h, v3.8h, v4.8h, v5.8h }, [x2]
+ ld4r { v2.8h, v3.8h, v4.8h, v5.8h }, [x2], x3
+ ld4r { v2.2s, v3.2s, v4.2s, v5.2s }, [x2]
+ ld4r { v2.2s, v3.2s, v4.2s, v5.2s }, [x2], x3
+ ld4r { v2.4s, v3.4s, v4.4s, v5.4s }, [x2]
+ ld4r { v2.4s, v3.4s, v4.4s, v5.4s }, [x2], x3
+ ld4r { v2.1d, v3.1d, v4.1d, v5.1d }, [x2]
+ ld4r { v2.1d, v3.1d, v4.1d, v5.1d }, [x2], x3
+ ld4r { v2.2d, v3.2d, v4.2d, v5.2d }, [x2]
+ ld4r { v2.2d, v3.2d, v4.2d, v5.2d }, [x2], x3
+
+ ld4r { v2.8b, v3.8b, v4.8b, v5.8b }, [x2], #4
+ ld4r { v2.16b, v3.16b, v4.16b, v5.16b }, [x2], #4
+ ld4r { v2.4h, v3.4h, v4.4h, v5.4h }, [x2], #8
+ ld4r { v2.8h, v3.8h, v4.8h, v5.8h }, [x2], #8
+ ld4r { v2.2s, v3.2s, v4.2s, v5.2s }, [x2], #16
+ ld4r { v2.4s, v3.4s, v4.4s, v5.4s }, [x2], #16
+ ld4r { v2.1d, v3.1d, v4.1d, v5.1d }, [x2], #32
+ ld4r { v2.2d, v3.2d, v4.2d, v5.2d }, [x2], #32
+
+ ld1 { v6.b }[13], [x3]
+ ld1 { v6.h }[2], [x3]
+ ld1 { v6.s }[2], [x3]
+ ld1 { v6.d }[1], [x3]
+ ld1 { v6.b }[13], [x3], x5
+ ld1 { v6.h }[2], [x3], x5
+ ld1 { v6.s }[2], [x3], x5
+ ld1 { v6.d }[1], [x3], x5
+ ld1 { v6.b }[13], [x3], #1
+ ld1 { v6.h }[2], [x3], #2
+ ld1 { v6.s }[2], [x3], #4
+ ld1 { v6.d }[1], [x3], #8
+
+ ld2 { v5.b, v6.b }[13], [x3]
+ ld2 { v5.h, v6.h }[2], [x3]
+ ld2 { v5.s, v6.s }[2], [x3]
+ ld2 { v5.d, v6.d }[1], [x3]
+ ld2 { v5.b, v6.b }[13], [x3], x5
+ ld2 { v5.h, v6.h }[2], [x3], x5
+ ld2 { v5.s, v6.s }[2], [x3], x5
+ ld2 { v5.d, v6.d }[1], [x3], x5
+ ld2 { v5.b, v6.b }[13], [x3], #2
+ ld2 { v5.h, v6.h }[2], [x3], #4
+ ld2 { v5.s, v6.s }[2], [x3], #8
+ ld2 { v5.d, v6.d }[1], [x3], #16
+
+ ld3 { v7.b, v8.b, v9.b }[13], [x3]
+ ld3 { v7.h, v8.h, v9.h }[2], [x3]
+ ld3 { v7.s, v8.s, v9.s }[2], [x3]
+ ld3 { v7.d, v8.d, v9.d }[1], [x3]
+ ld3 { v7.b, v8.b, v9.b }[13], [x3], x5
+ ld3 { v7.h, v8.h, v9.h }[2], [x3], x5
+ ld3 { v7.s, v8.s, v9.s }[2], [x3], x5
+ ld3 { v7.d, v8.d, v9.d }[1], [x3], x5
+ ld3 { v7.b, v8.b, v9.b }[13], [x3], #3
+ ld3 { v7.h, v8.h, v9.h }[2], [x3], #6
+ ld3 { v7.s, v8.s, v9.s }[2], [x3], #12
+ ld3 { v7.d, v8.d, v9.d }[1], [x3], #24
+
+ ld4 { v7.b, v8.b, v9.b, v10.b }[13], [x3]
+ ld4 { v7.h, v8.h, v9.h, v10.h }[2], [x3]
+ ld4 { v7.s, v8.s, v9.s, v10.s }[2], [x3]
+ ld4 { v7.d, v8.d, v9.d, v10.d }[1], [x3]
+ ld4 { v7.b, v8.b, v9.b, v10.b }[13], [x3], x5
+ ld4 { v7.h, v8.h, v9.h, v10.h }[2], [x3], x5
+ ld4 { v7.s, v8.s, v9.s, v10.s }[2], [x3], x5
+ ld4 { v7.d, v8.d, v9.d, v10.d }[1], [x3], x5
+ ld4 { v7.b, v8.b, v9.b, v10.b }[13], [x3], #4
+ ld4 { v7.h, v8.h, v9.h, v10.h }[2], [x3], #8
+ ld4 { v7.s, v8.s, v9.s, v10.s }[2], [x3], #16
+ ld4 { v7.d, v8.d, v9.d, v10.d }[1], [x3], #32
+
+ st1 { v6.b }[13], [x3]
+ st1 { v6.h }[2], [x3]
+ st1 { v6.s }[2], [x3]
+ st1 { v6.d }[1], [x3]
+ st1 { v6.b }[13], [x3], x5
+ st1 { v6.h }[2], [x3], x5
+ st1 { v6.s }[2], [x3], x5
+ st1 { v6.d }[1], [x3], x5
+ st1 { v6.b }[13], [x3], #1
+ st1 { v6.h }[2], [x3], #2
+ st1 { v6.s }[2], [x3], #4
+ st1 { v6.d }[1], [x3], #8
+
+
+ st2 { v5.b, v6.b }[13], [x3]
+ st2 { v5.h, v6.h }[2], [x3]
+ st2 { v5.s, v6.s }[2], [x3]
+ st2 { v5.d, v6.d }[1], [x3]
+ st2 { v5.b, v6.b }[13], [x3], x5
+ st2 { v5.h, v6.h }[2], [x3], x5
+ st2 { v5.s, v6.s }[2], [x3], x5
+ st2 { v5.d, v6.d }[1], [x3], x5
+ st2 { v5.b, v6.b }[13], [x3], #2
+ st2 { v5.h, v6.h }[2], [x3], #4
+ st2 { v5.s, v6.s }[2], [x3], #8
+ st2 { v5.d, v6.d }[1], [x3], #16
+
+ st3 { v7.b, v8.b, v9.b }[13], [x3]
+ st3 { v7.h, v8.h, v9.h }[2], [x3]
+ st3 { v7.s, v8.s, v9.s }[2], [x3]
+ st3 { v7.d, v8.d, v9.d }[1], [x3]
+ st3 { v7.b, v8.b, v9.b }[13], [x3], x5
+ st3 { v7.h, v8.h, v9.h }[2], [x3], x5
+ st3 { v7.s, v8.s, v9.s }[2], [x3], x5
+ st3 { v7.d, v8.d, v9.d }[1], [x3], x5
+ st3 { v7.b, v8.b, v9.b }[13], [x3], #3
+ st3 { v7.h, v8.h, v9.h }[2], [x3], #6
+ st3 { v7.s, v8.s, v9.s }[2], [x3], #12
+ st3 { v7.d, v8.d, v9.d }[1], [x3], #24
+
+ st4 { v7.b, v8.b, v9.b, v10.b }[13], [x3]
+ st4 { v7.h, v8.h, v9.h, v10.h }[2], [x3]
+ st4 { v7.s, v8.s, v9.s, v10.s }[2], [x3]
+ st4 { v7.d, v8.d, v9.d, v10.d }[1], [x3]
+ st4 { v7.b, v8.b, v9.b, v10.b }[13], [x3], x5
+ st4 { v7.h, v8.h, v9.h, v10.h }[2], [x3], x5
+ st4 { v7.s, v8.s, v9.s, v10.s }[2], [x3], x5
+ st4 { v7.d, v8.d, v9.d, v10.d }[1], [x3], x5
+ st4 { v7.b, v8.b, v9.b, v10.b }[13], [x3], #4
+ st4 { v7.h, v8.h, v9.h, v10.h }[2], [x3], #8
+ st4 { v7.s, v8.s, v9.s, v10.s }[2], [x3], #16
+ st4 { v7.d, v8.d, v9.d, v10.d }[1], [x3], #32
+
+; CHECK: ld1.8b { v1 }, [x1] ; encoding: [0x21,0x70,0x40,0x0c]
+; CHECK: ld1.8b { v2, v3 }, [x1] ; encoding: [0x22,0xa0,0x40,0x0c]
+; CHECK: ld1.8b { v3, v4, v5 }, [x1] ; encoding: [0x23,0x60,0x40,0x0c]
+; CHECK: ld1.8b { v4, v5, v6, v7 }, [x1] ; encoding: [0x24,0x20,0x40,0x0c]
+; CHECK: ld1.16b { v1 }, [x1] ; encoding: [0x21,0x70,0x40,0x4c]
+; CHECK: ld1.16b { v2, v3 }, [x1] ; encoding: [0x22,0xa0,0x40,0x4c]
+; CHECK: ld1.16b { v3, v4, v5 }, [x1] ; encoding: [0x23,0x60,0x40,0x4c]
+; CHECK: ld1.16b { v4, v5, v6, v7 }, [x1] ; encoding: [0x24,0x20,0x40,0x4c]
+; CHECK: ld1.4h { v1 }, [x1] ; encoding: [0x21,0x74,0x40,0x0c]
+; CHECK: ld1.4h { v2, v3 }, [x1] ; encoding: [0x22,0xa4,0x40,0x0c]
+; CHECK: ld1.4h { v3, v4, v5 }, [x1] ; encoding: [0x23,0x64,0x40,0x0c]
+; CHECK: ld1.4h { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x24,0x40,0x0c]
+; CHECK: ld1.8h { v1 }, [x1] ; encoding: [0x21,0x74,0x40,0x4c]
+; CHECK: ld1.8h { v2, v3 }, [x1] ; encoding: [0x22,0xa4,0x40,0x4c]
+; CHECK: ld1.8h { v3, v4, v5 }, [x1] ; encoding: [0x23,0x64,0x40,0x4c]
+; CHECK: ld1.8h { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x24,0x40,0x4c]
+; CHECK: ld1.2s { v1 }, [x1] ; encoding: [0x21,0x78,0x40,0x0c]
+; CHECK: ld1.2s { v2, v3 }, [x1] ; encoding: [0x22,0xa8,0x40,0x0c]
+; CHECK: ld1.2s { v3, v4, v5 }, [x1] ; encoding: [0x23,0x68,0x40,0x0c]
+; CHECK: ld1.2s { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x28,0x40,0x0c]
+; CHECK: ld1.4s { v1 }, [x1] ; encoding: [0x21,0x78,0x40,0x4c]
+; CHECK: ld1.4s { v2, v3 }, [x1] ; encoding: [0x22,0xa8,0x40,0x4c]
+; CHECK: ld1.4s { v3, v4, v5 }, [x1] ; encoding: [0x23,0x68,0x40,0x4c]
+; CHECK: ld1.4s { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x28,0x40,0x4c]
+; CHECK: ld1.1d { v1 }, [x1] ; encoding: [0x21,0x7c,0x40,0x0c]
+; CHECK: ld1.1d { v2, v3 }, [x1] ; encoding: [0x22,0xac,0x40,0x0c]
+; CHECK: ld1.1d { v3, v4, v5 }, [x1] ; encoding: [0x23,0x6c,0x40,0x0c]
+; CHECK: ld1.1d { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x2c,0x40,0x0c]
+; CHECK: ld1.2d { v1 }, [x1] ; encoding: [0x21,0x7c,0x40,0x4c]
+; CHECK: ld1.2d { v2, v3 }, [x1] ; encoding: [0x22,0xac,0x40,0x4c]
+; CHECK: ld1.2d { v3, v4, v5 }, [x1] ; encoding: [0x23,0x6c,0x40,0x4c]
+; CHECK: ld1.2d { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x2c,0x40,0x4c]
+; CHECK: st1.8b { v1 }, [x1] ; encoding: [0x21,0x70,0x00,0x0c]
+; CHECK: st1.8b { v2, v3 }, [x1] ; encoding: [0x22,0xa0,0x00,0x0c]
+; CHECK: st1.8b { v3, v4, v5 }, [x1] ; encoding: [0x23,0x60,0x00,0x0c]
+; CHECK: st1.8b { v4, v5, v6, v7 }, [x1] ; encoding: [0x24,0x20,0x00,0x0c]
+; CHECK: st1.16b { v1 }, [x1] ; encoding: [0x21,0x70,0x00,0x4c]
+; CHECK: st1.16b { v2, v3 }, [x1] ; encoding: [0x22,0xa0,0x00,0x4c]
+; CHECK: st1.16b { v3, v4, v5 }, [x1] ; encoding: [0x23,0x60,0x00,0x4c]
+; CHECK: st1.16b { v4, v5, v6, v7 }, [x1] ; encoding: [0x24,0x20,0x00,0x4c]
+; CHECK: st1.4h { v1 }, [x1] ; encoding: [0x21,0x74,0x00,0x0c]
+; CHECK: st1.4h { v2, v3 }, [x1] ; encoding: [0x22,0xa4,0x00,0x0c]
+; CHECK: st1.4h { v3, v4, v5 }, [x1] ; encoding: [0x23,0x64,0x00,0x0c]
+; CHECK: st1.4h { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x24,0x00,0x0c]
+; CHECK: st1.8h { v1 }, [x1] ; encoding: [0x21,0x74,0x00,0x4c]
+; CHECK: st1.8h { v2, v3 }, [x1] ; encoding: [0x22,0xa4,0x00,0x4c]
+; CHECK: st1.8h { v3, v4, v5 }, [x1] ; encoding: [0x23,0x64,0x00,0x4c]
+; CHECK: st1.8h { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x24,0x00,0x4c]
+; CHECK: st1.2s { v1 }, [x1] ; encoding: [0x21,0x78,0x00,0x0c]
+; CHECK: st1.2s { v2, v3 }, [x1] ; encoding: [0x22,0xa8,0x00,0x0c]
+; CHECK: st1.2s { v3, v4, v5 }, [x1] ; encoding: [0x23,0x68,0x00,0x0c]
+; CHECK: st1.2s { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x28,0x00,0x0c]
+; CHECK: st1.4s { v1 }, [x1] ; encoding: [0x21,0x78,0x00,0x4c]
+; CHECK: st1.4s { v2, v3 }, [x1] ; encoding: [0x22,0xa8,0x00,0x4c]
+; CHECK: st1.4s { v3, v4, v5 }, [x1] ; encoding: [0x23,0x68,0x00,0x4c]
+; CHECK: st1.4s { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x28,0x00,0x4c]
+; CHECK: st1.1d { v1 }, [x1] ; encoding: [0x21,0x7c,0x00,0x0c]
+; CHECK: st1.1d { v2, v3 }, [x1] ; encoding: [0x22,0xac,0x00,0x0c]
+; CHECK: st1.1d { v3, v4, v5 }, [x1] ; encoding: [0x23,0x6c,0x00,0x0c]
+; CHECK: st1.1d { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x2c,0x00,0x0c]
+; CHECK: st1.2d { v1 }, [x1] ; encoding: [0x21,0x7c,0x00,0x4c]
+; CHECK: st1.2d { v2, v3 }, [x1] ; encoding: [0x22,0xac,0x00,0x4c]
+; CHECK: st1.2d { v3, v4, v5 }, [x1] ; encoding: [0x23,0x6c,0x00,0x4c]
+; CHECK: st1.2d { v7, v8, v9, v10 }, [x1] ; encoding: [0x27,0x2c,0x00,0x4c]
+; CHECK: ld2.8b { v3, v4 }, [x19] ; encoding: [0x63,0x82,0x40,0x0c]
+; CHECK: ld2.16b { v3, v4 }, [x19] ; encoding: [0x63,0x82,0x40,0x4c]
+; CHECK: ld2.4h { v3, v4 }, [x19] ; encoding: [0x63,0x86,0x40,0x0c]
+; CHECK: ld2.8h { v3, v4 }, [x19] ; encoding: [0x63,0x86,0x40,0x4c]
+; CHECK: ld2.2s { v3, v4 }, [x19] ; encoding: [0x63,0x8a,0x40,0x0c]
+; CHECK: ld2.4s { v3, v4 }, [x19] ; encoding: [0x63,0x8a,0x40,0x4c]
+; CHECK: ld2.2d { v3, v4 }, [x19] ; encoding: [0x63,0x8e,0x40,0x4c]
+; CHECK: st2.8b { v3, v4 }, [x19] ; encoding: [0x63,0x82,0x00,0x0c]
+; CHECK: st2.16b { v3, v4 }, [x19] ; encoding: [0x63,0x82,0x00,0x4c]
+; CHECK: st2.4h { v3, v4 }, [x19] ; encoding: [0x63,0x86,0x00,0x0c]
+; CHECK: st2.8h { v3, v4 }, [x19] ; encoding: [0x63,0x86,0x00,0x4c]
+; CHECK: st2.2s { v3, v4 }, [x19] ; encoding: [0x63,0x8a,0x00,0x0c]
+; CHECK: st2.4s { v3, v4 }, [x19] ; encoding: [0x63,0x8a,0x00,0x4c]
+; CHECK: st2.2d { v3, v4 }, [x19] ; encoding: [0x63,0x8e,0x00,0x4c]
+; CHECK: ld3.8b { v2, v3, v4 }, [x19] ; encoding: [0x62,0x42,0x40,0x0c]
+; CHECK: ld3.16b { v2, v3, v4 }, [x19] ; encoding: [0x62,0x42,0x40,0x4c]
+; CHECK: ld3.4h { v2, v3, v4 }, [x19] ; encoding: [0x62,0x46,0x40,0x0c]
+; CHECK: ld3.8h { v2, v3, v4 }, [x19] ; encoding: [0x62,0x46,0x40,0x4c]
+; CHECK: ld3.2s { v2, v3, v4 }, [x19] ; encoding: [0x62,0x4a,0x40,0x0c]
+; CHECK: ld3.4s { v2, v3, v4 }, [x19] ; encoding: [0x62,0x4a,0x40,0x4c]
+; CHECK: ld3.2d { v2, v3, v4 }, [x19] ; encoding: [0x62,0x4e,0x40,0x4c]
+; CHECK: st3.8b { v2, v3, v4 }, [x19] ; encoding: [0x62,0x42,0x00,0x0c]
+; CHECK: st3.16b { v2, v3, v4 }, [x19] ; encoding: [0x62,0x42,0x00,0x4c]
+; CHECK: st3.4h { v2, v3, v4 }, [x19] ; encoding: [0x62,0x46,0x00,0x0c]
+; CHECK: st3.8h { v2, v3, v4 }, [x19] ; encoding: [0x62,0x46,0x00,0x4c]
+; CHECK: st3.2s { v2, v3, v4 }, [x19] ; encoding: [0x62,0x4a,0x00,0x0c]
+; CHECK: st3.4s { v2, v3, v4 }, [x19] ; encoding: [0x62,0x4a,0x00,0x4c]
+; CHECK: st3.2d { v2, v3, v4 }, [x19] ; encoding: [0x62,0x4e,0x00,0x4c]
+; CHECK: ld4.8b { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x02,0x40,0x0c]
+; CHECK: ld4.16b { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x02,0x40,0x4c]
+; CHECK: ld4.4h { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x06,0x40,0x0c]
+; CHECK: ld4.8h { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x06,0x40,0x4c]
+; CHECK: ld4.2s { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x0a,0x40,0x0c]
+; CHECK: ld4.4s { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x0a,0x40,0x4c]
+; CHECK: ld4.2d { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x0e,0x40,0x4c]
+; CHECK: st4.8b { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x02,0x00,0x0c]
+; CHECK: st4.16b { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x02,0x00,0x4c]
+; CHECK: st4.4h { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x06,0x00,0x0c]
+; CHECK: st4.8h { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x06,0x00,0x4c]
+; CHECK: st4.2s { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x0a,0x00,0x0c]
+; CHECK: st4.4s { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x0a,0x00,0x4c]
+; CHECK: st4.2d { v2, v3, v4, v5 }, [x19] ; encoding: [0x62,0x0e,0x00,0x4c]
+; CHECK: ld1.8b { v1 }, [x1], x15 ; encoding: [0x21,0x70,0xcf,0x0c]
+; CHECK: ld1.8b { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa0,0xcf,0x0c]
+; CHECK: ld1.8b { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x60,0xcf,0x0c]
+; CHECK: ld1.8b { v4, v5, v6, v7 }, [x1], x15 ; encoding: [0x24,0x20,0xcf,0x0c]
+; CHECK: ld1.16b { v1 }, [x1], x15 ; encoding: [0x21,0x70,0xcf,0x4c]
+; CHECK: ld1.16b { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa0,0xcf,0x4c]
+; CHECK: ld1.16b { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x60,0xcf,0x4c]
+; CHECK: ld1.16b { v4, v5, v6, v7 }, [x1], x15 ; encoding: [0x24,0x20,0xcf,0x4c]
+; CHECK: ld1.4h { v1 }, [x1], x15 ; encoding: [0x21,0x74,0xcf,0x0c]
+; CHECK: ld1.4h { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa4,0xcf,0x0c]
+; CHECK: ld1.4h { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x64,0xcf,0x0c]
+; CHECK: ld1.4h { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x24,0xcf,0x0c]
+; CHECK: ld1.8h { v1 }, [x1], x15 ; encoding: [0x21,0x74,0xcf,0x4c]
+; CHECK: ld1.8h { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa4,0xcf,0x4c]
+; CHECK: ld1.8h { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x64,0xcf,0x4c]
+; CHECK: ld1.8h { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x24,0xcf,0x4c]
+; CHECK: ld1.2s { v1 }, [x1], x15 ; encoding: [0x21,0x78,0xcf,0x0c]
+; CHECK: ld1.2s { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa8,0xcf,0x0c]
+; CHECK: ld1.2s { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x68,0xcf,0x0c]
+; CHECK: ld1.2s { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x28,0xcf,0x0c]
+; CHECK: ld1.4s { v1 }, [x1], x15 ; encoding: [0x21,0x78,0xcf,0x4c]
+; CHECK: ld1.4s { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa8,0xcf,0x4c]
+; CHECK: ld1.4s { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x68,0xcf,0x4c]
+; CHECK: ld1.4s { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x28,0xcf,0x4c]
+; CHECK: ld1.1d { v1 }, [x1], x15 ; encoding: [0x21,0x7c,0xcf,0x0c]
+; CHECK: ld1.1d { v2, v3 }, [x1], x15 ; encoding: [0x22,0xac,0xcf,0x0c]
+; CHECK: ld1.1d { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x6c,0xcf,0x0c]
+; CHECK: ld1.1d { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x2c,0xcf,0x0c]
+; CHECK: ld1.2d { v1 }, [x1], x15 ; encoding: [0x21,0x7c,0xcf,0x4c]
+; CHECK: ld1.2d { v2, v3 }, [x1], x15 ; encoding: [0x22,0xac,0xcf,0x4c]
+; CHECK: ld1.2d { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x6c,0xcf,0x4c]
+; CHECK: ld1.2d { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x2c,0xcf,0x4c]
+; CHECK: st1.8b { v1 }, [x1], x15 ; encoding: [0x21,0x70,0x8f,0x0c]
+; CHECK: st1.8b { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa0,0x8f,0x0c]
+; CHECK: st1.8b { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x60,0x8f,0x0c]
+; CHECK: st1.8b { v4, v5, v6, v7 }, [x1], x15 ; encoding: [0x24,0x20,0x8f,0x0c]
+; CHECK: st1.16b { v1 }, [x1], x15 ; encoding: [0x21,0x70,0x8f,0x4c]
+; CHECK: st1.16b { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa0,0x8f,0x4c]
+; CHECK: st1.16b { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x60,0x8f,0x4c]
+; CHECK: st1.16b { v4, v5, v6, v7 }, [x1], x15 ; encoding: [0x24,0x20,0x8f,0x4c]
+; CHECK: st1.4h { v1 }, [x1], x15 ; encoding: [0x21,0x74,0x8f,0x0c]
+; CHECK: st1.4h { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa4,0x8f,0x0c]
+; CHECK: st1.4h { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x64,0x8f,0x0c]
+; CHECK: st1.4h { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x24,0x8f,0x0c]
+; CHECK: st1.8h { v1 }, [x1], x15 ; encoding: [0x21,0x74,0x8f,0x4c]
+; CHECK: st1.8h { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa4,0x8f,0x4c]
+; CHECK: st1.8h { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x64,0x8f,0x4c]
+; CHECK: st1.8h { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x24,0x8f,0x4c]
+; CHECK: st1.2s { v1 }, [x1], x15 ; encoding: [0x21,0x78,0x8f,0x0c]
+; CHECK: st1.2s { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa8,0x8f,0x0c]
+; CHECK: st1.2s { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x68,0x8f,0x0c]
+; CHECK: st1.2s { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x28,0x8f,0x0c]
+; CHECK: st1.4s { v1 }, [x1], x15 ; encoding: [0x21,0x78,0x8f,0x4c]
+; CHECK: st1.4s { v2, v3 }, [x1], x15 ; encoding: [0x22,0xa8,0x8f,0x4c]
+; CHECK: st1.4s { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x68,0x8f,0x4c]
+; CHECK: st1.4s { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x28,0x8f,0x4c]
+; CHECK: st1.1d { v1 }, [x1], x15 ; encoding: [0x21,0x7c,0x8f,0x0c]
+; CHECK: st1.1d { v2, v3 }, [x1], x15 ; encoding: [0x22,0xac,0x8f,0x0c]
+; CHECK: st1.1d { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x6c,0x8f,0x0c]
+; CHECK: st1.1d { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x2c,0x8f,0x0c]
+; CHECK: st1.2d { v1 }, [x1], x15 ; encoding: [0x21,0x7c,0x8f,0x4c]
+; CHECK: st1.2d { v2, v3 }, [x1], x15 ; encoding: [0x22,0xac,0x8f,0x4c]
+; CHECK: st1.2d { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x6c,0x8f,0x4c]
+; CHECK: st1.2d { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x2c,0x8f,0x4c]
+; CHECK: ld1.8b { v1 }, [x1], #8 ; encoding: [0x21,0x70,0xdf,0x0c]
+; CHECK: ld1.8b { v2, v3 }, [x1], #16 ; encoding: [0x22,0xa0,0xdf,0x0c]
+; CHECK: ld1.8b { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x60,0xdf,0x0c]
+; CHECK: ld1.8b { v4, v5, v6, v7 }, [x1], #32 ; encoding: [0x24,0x20,0xdf,0x0c]
+; CHECK: ld1.16b { v1 }, [x1], #16 ; encoding: [0x21,0x70,0xdf,0x4c]
+; CHECK: ld1.16b { v2, v3 }, [x1], #32 ; encoding: [0x22,0xa0,0xdf,0x4c]
+; CHECK: ld1.16b { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x60,0xdf,0x4c]
+; CHECK: ld1.16b { v4, v5, v6, v7 }, [x1], #64 ; encoding: [0x24,0x20,0xdf,0x4c]
+; CHECK: ld1.4h { v1 }, [x1], #8 ; encoding: [0x21,0x74,0xdf,0x0c]
+; CHECK: ld1.4h { v2, v3 }, [x1], #16 ; encoding: [0x22,0xa4,0xdf,0x0c]
+; CHECK: ld1.4h { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x64,0xdf,0x0c]
+; CHECK: ld1.4h { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x24,0xdf,0x0c]
+; CHECK: ld1.8h { v1 }, [x1], #16 ; encoding: [0x21,0x74,0xdf,0x4c]
+; CHECK: ld1.8h { v2, v3 }, [x1], #32 ; encoding: [0x22,0xa4,0xdf,0x4c]
+; CHECK: ld1.8h { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x64,0xdf,0x4c]
+; CHECK: ld1.8h { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x24,0xdf,0x4c]
+; CHECK: ld1.2s { v1 }, [x1], #8 ; encoding: [0x21,0x78,0xdf,0x0c]
+; CHECK: ld1.2s { v2, v3 }, [x1], #16 ; encoding: [0x22,0xa8,0xdf,0x0c]
+; CHECK: ld1.2s { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x68,0xdf,0x0c]
+; CHECK: ld1.2s { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x28,0xdf,0x0c]
+; CHECK: ld1.4s { v1 }, [x1], #16 ; encoding: [0x21,0x78,0xdf,0x4c]
+; CHECK: ld1.4s { v2, v3 }, [x1], #32 ; encoding: [0x22,0xa8,0xdf,0x4c]
+; CHECK: ld1.4s { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x68,0xdf,0x4c]
+; CHECK: ld1.4s { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x28,0xdf,0x4c]
+; CHECK: ld1.1d { v1 }, [x1], #8 ; encoding: [0x21,0x7c,0xdf,0x0c]
+; CHECK: ld1.1d { v2, v3 }, [x1], #16 ; encoding: [0x22,0xac,0xdf,0x0c]
+; CHECK: ld1.1d { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x6c,0xdf,0x0c]
+; CHECK: ld1.1d { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x2c,0xdf,0x0c]
+; CHECK: ld1.2d { v1 }, [x1], #16 ; encoding: [0x21,0x7c,0xdf,0x4c]
+; CHECK: ld1.2d { v2, v3 }, [x1], #32 ; encoding: [0x22,0xac,0xdf,0x4c]
+; CHECK: ld1.2d { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x6c,0xdf,0x4c]
+; CHECK: ld1.2d { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x2c,0xdf,0x4c]
+; CHECK: st1.8b { v1 }, [x1], #8 ; encoding: [0x21,0x70,0x9f,0x0c]
+; CHECK: st1.8b { v2, v3 }, [x1], #16 ; encoding: [0x22,0xa0,0x9f,0x0c]
+; CHECK: st1.8b { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x60,0x9f,0x0c]
+; CHECK: st1.8b { v4, v5, v6, v7 }, [x1], #32 ; encoding: [0x24,0x20,0x9f,0x0c]
+; CHECK: st1.16b { v1 }, [x1], #16 ; encoding: [0x21,0x70,0x9f,0x4c]
+; CHECK: st1.16b { v2, v3 }, [x1], #32 ; encoding: [0x22,0xa0,0x9f,0x4c]
+; CHECK: st1.16b { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x60,0x9f,0x4c]
+; CHECK: st1.16b { v4, v5, v6, v7 }, [x1], #64 ; encoding: [0x24,0x20,0x9f,0x4c]
+; CHECK: st1.4h { v1 }, [x1], #8 ; encoding: [0x21,0x74,0x9f,0x0c]
+; CHECK: st1.4h { v2, v3 }, [x1], #16 ; encoding: [0x22,0xa4,0x9f,0x0c]
+; CHECK: st1.4h { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x64,0x9f,0x0c]
+; CHECK: st1.4h { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x24,0x9f,0x0c]
+; CHECK: st1.8h { v1 }, [x1], #16 ; encoding: [0x21,0x74,0x9f,0x4c]
+; CHECK: st1.8h { v2, v3 }, [x1], #32 ; encoding: [0x22,0xa4,0x9f,0x4c]
+; CHECK: st1.8h { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x64,0x9f,0x4c]
+; CHECK: st1.8h { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x24,0x9f,0x4c]
+; CHECK: st1.2s { v1 }, [x1], #8 ; encoding: [0x21,0x78,0x9f,0x0c]
+; CHECK: st1.2s { v2, v3 }, [x1], #16 ; encoding: [0x22,0xa8,0x9f,0x0c]
+; CHECK: st1.2s { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x68,0x9f,0x0c]
+; CHECK: st1.2s { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x28,0x9f,0x0c]
+; CHECK: st1.4s { v1 }, [x1], #16 ; encoding: [0x21,0x78,0x9f,0x4c]
+; CHECK: st1.4s { v2, v3 }, [x1], #32 ; encoding: [0x22,0xa8,0x9f,0x4c]
+; CHECK: st1.4s { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x68,0x9f,0x4c]
+; CHECK: st1.4s { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x28,0x9f,0x4c]
+; CHECK: st1.1d { v1 }, [x1], #8 ; encoding: [0x21,0x7c,0x9f,0x0c]
+; CHECK: st1.1d { v2, v3 }, [x1], #16 ; encoding: [0x22,0xac,0x9f,0x0c]
+; CHECK: st1.1d { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x6c,0x9f,0x0c]
+; CHECK: st1.1d { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x2c,0x9f,0x0c]
+; CHECK: st1.2d { v1 }, [x1], #16 ; encoding: [0x21,0x7c,0x9f,0x4c]
+; CHECK: st1.2d { v2, v3 }, [x1], #32 ; encoding: [0x22,0xac,0x9f,0x4c]
+; CHECK: st1.2d { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x6c,0x9f,0x4c]
+; CHECK: st1.2d { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x2c,0x9f,0x4c]
+; CHECK: ld2.8b { v2, v3 }, [x1], x15 ; encoding: [0x22,0x80,0xcf,0x0c]
+; CHECK: ld2.16b { v2, v3 }, [x1], x15 ; encoding: [0x22,0x80,0xcf,0x4c]
+; CHECK: ld2.4h { v2, v3 }, [x1], x15 ; encoding: [0x22,0x84,0xcf,0x0c]
+; CHECK: ld2.8h { v2, v3 }, [x1], x15 ; encoding: [0x22,0x84,0xcf,0x4c]
+; CHECK: ld2.2s { v2, v3 }, [x1], x15 ; encoding: [0x22,0x88,0xcf,0x0c]
+; CHECK: ld2.4s { v2, v3 }, [x1], x15 ; encoding: [0x22,0x88,0xcf,0x4c]
+; CHECK: ld2.2d { v2, v3 }, [x1], x15 ; encoding: [0x22,0x8c,0xcf,0x4c]
+; CHECK: st2.8b { v2, v3 }, [x1], x15 ; encoding: [0x22,0x80,0x8f,0x0c]
+; CHECK: st2.16b { v2, v3 }, [x1], x15 ; encoding: [0x22,0x80,0x8f,0x4c]
+; CHECK: st2.4h { v2, v3 }, [x1], x15 ; encoding: [0x22,0x84,0x8f,0x0c]
+; CHECK: st2.8h { v2, v3 }, [x1], x15 ; encoding: [0x22,0x84,0x8f,0x4c]
+; CHECK: st2.2s { v2, v3 }, [x1], x15 ; encoding: [0x22,0x88,0x8f,0x0c]
+; CHECK: st2.4s { v2, v3 }, [x1], x15 ; encoding: [0x22,0x88,0x8f,0x4c]
+; CHECK: st2.2d { v2, v3 }, [x1], x15 ; encoding: [0x22,0x8c,0x8f,0x4c]
+; CHECK: ld2.8b { v2, v3 }, [x1], #16 ; encoding: [0x22,0x80,0xdf,0x0c]
+; CHECK: ld2.16b { v2, v3 }, [x1], #32 ; encoding: [0x22,0x80,0xdf,0x4c]
+; CHECK: ld2.4h { v2, v3 }, [x1], #16 ; encoding: [0x22,0x84,0xdf,0x0c]
+; CHECK: ld2.8h { v2, v3 }, [x1], #32 ; encoding: [0x22,0x84,0xdf,0x4c]
+; CHECK: ld2.2s { v2, v3 }, [x1], #16 ; encoding: [0x22,0x88,0xdf,0x0c]
+; CHECK: ld2.4s { v2, v3 }, [x1], #32 ; encoding: [0x22,0x88,0xdf,0x4c]
+; CHECK: ld2.2d { v2, v3 }, [x1], #32 ; encoding: [0x22,0x8c,0xdf,0x4c]
+; CHECK: st2.8b { v2, v3 }, [x1], #16 ; encoding: [0x22,0x80,0x9f,0x0c]
+; CHECK: st2.16b { v2, v3 }, [x1], #32 ; encoding: [0x22,0x80,0x9f,0x4c]
+; CHECK: st2.4h { v2, v3 }, [x1], #16 ; encoding: [0x22,0x84,0x9f,0x0c]
+; CHECK: st2.8h { v2, v3 }, [x1], #32 ; encoding: [0x22,0x84,0x9f,0x4c]
+; CHECK: st2.2s { v2, v3 }, [x1], #16 ; encoding: [0x22,0x88,0x9f,0x0c]
+; CHECK: st2.4s { v2, v3 }, [x1], #32 ; encoding: [0x22,0x88,0x9f,0x4c]
+; CHECK: st2.2d { v2, v3 }, [x1], #32 ; encoding: [0x22,0x8c,0x9f,0x4c]
+; CHECK: ld3.8b { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x40,0xcf,0x0c]
+; CHECK: ld3.16b { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x40,0xcf,0x4c]
+; CHECK: ld3.4h { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x44,0xcf,0x0c]
+; CHECK: ld3.8h { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x44,0xcf,0x4c]
+; CHECK: ld3.2s { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x48,0xcf,0x0c]
+; CHECK: ld3.4s { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x48,0xcf,0x4c]
+; CHECK: ld3.2d { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x4c,0xcf,0x4c]
+; CHECK: st3.8b { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x40,0x8f,0x0c]
+; CHECK: st3.16b { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x40,0x8f,0x4c]
+; CHECK: st3.4h { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x44,0x8f,0x0c]
+; CHECK: st3.8h { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x44,0x8f,0x4c]
+; CHECK: st3.2s { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x48,0x8f,0x0c]
+; CHECK: st3.4s { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x48,0x8f,0x4c]
+; CHECK: st3.2d { v3, v4, v5 }, [x1], x15 ; encoding: [0x23,0x4c,0x8f,0x4c]
+; CHECK: ld3.8b { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x40,0xdf,0x0c]
+; CHECK: ld3.16b { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x40,0xdf,0x4c]
+; CHECK: ld3.4h { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x44,0xdf,0x0c]
+; CHECK: ld3.8h { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x44,0xdf,0x4c]
+; CHECK: ld3.2s { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x48,0xdf,0x0c]
+; CHECK: ld3.4s { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x48,0xdf,0x4c]
+; CHECK: ld3.2d { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x4c,0xdf,0x4c]
+; CHECK: st3.8b { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x40,0x9f,0x0c]
+; CHECK: st3.16b { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x40,0x9f,0x4c]
+; CHECK: st3.4h { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x44,0x9f,0x0c]
+; CHECK: st3.8h { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x44,0x9f,0x4c]
+; CHECK: st3.2s { v3, v4, v5 }, [x1], #24 ; encoding: [0x23,0x48,0x9f,0x0c]
+; CHECK: st3.4s { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x48,0x9f,0x4c]
+; CHECK: st3.2d { v3, v4, v5 }, [x1], #48 ; encoding: [0x23,0x4c,0x9f,0x4c]
+; CHECK: ld4.8b { v4, v5, v6, v7 }, [x1], x15 ; encoding: [0x24,0x00,0xcf,0x0c]
+; CHECK: ld4.16b { v4, v5, v6, v7 }, [x1], x15 ; encoding: [0x24,0x00,0xcf,0x4c]
+; CHECK: ld4.4h { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x04,0xcf,0x0c]
+; CHECK: ld4.8h { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x04,0xcf,0x4c]
+; CHECK: ld4.2s { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x08,0xcf,0x0c]
+; CHECK: ld4.4s { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x08,0xcf,0x4c]
+; CHECK: ld4.2d { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x0c,0xcf,0x4c]
+; CHECK: st4.8b { v4, v5, v6, v7 }, [x1], x15 ; encoding: [0x24,0x00,0x8f,0x0c]
+; CHECK: st4.16b { v4, v5, v6, v7 }, [x1], x15 ; encoding: [0x24,0x00,0x8f,0x4c]
+; CHECK: st4.4h { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x04,0x8f,0x0c]
+; CHECK: st4.8h { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x04,0x8f,0x4c]
+; CHECK: st4.2s { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x08,0x8f,0x0c]
+; CHECK: st4.4s { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x08,0x8f,0x4c]
+; CHECK: st4.2d { v7, v8, v9, v10 }, [x1], x15 ; encoding: [0x27,0x0c,0x8f,0x4c]
+; CHECK: ld4.8b { v4, v5, v6, v7 }, [x1], #32 ; encoding: [0x24,0x00,0xdf,0x0c]
+; CHECK: ld4.16b { v4, v5, v6, v7 }, [x1], #64 ; encoding: [0x24,0x00,0xdf,0x4c]
+; CHECK: ld4.4h { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x04,0xdf,0x0c]
+; CHECK: ld4.8h { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x04,0xdf,0x4c]
+; CHECK: ld4.2s { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x08,0xdf,0x0c]
+; CHECK: ld4.4s { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x08,0xdf,0x4c]
+; CHECK: ld4.2d { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x0c,0xdf,0x4c]
+; CHECK: st4.8b { v4, v5, v6, v7 }, [x1], #32 ; encoding: [0x24,0x00,0x9f,0x0c]
+; CHECK: st4.16b { v4, v5, v6, v7 }, [x1], #64 ; encoding: [0x24,0x00,0x9f,0x4c]
+; CHECK: st4.4h { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x04,0x9f,0x0c]
+; CHECK: st4.8h { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x04,0x9f,0x4c]
+; CHECK: st4.2s { v7, v8, v9, v10 }, [x1], #32 ; encoding: [0x27,0x08,0x9f,0x0c]
+; CHECK: st4.4s { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x08,0x9f,0x4c]
+; CHECK: st4.2d { v7, v8, v9, v10 }, [x1], #64 ; encoding: [0x27,0x0c,0x9f,0x4c]
+; CHECK: ld1r.8b { v12 }, [x2] ; encoding: [0x4c,0xc0,0x40,0x0d]
+; CHECK: ld1r.8b { v12 }, [x2], x3 ; encoding: [0x4c,0xc0,0xc3,0x0d]
+; CHECK: ld1r.16b { v12 }, [x2] ; encoding: [0x4c,0xc0,0x40,0x4d]
+; CHECK: ld1r.16b { v12 }, [x2], x3 ; encoding: [0x4c,0xc0,0xc3,0x4d]
+; CHECK: ld1r.4h { v12 }, [x2] ; encoding: [0x4c,0xc4,0x40,0x0d]
+; CHECK: ld1r.4h { v12 }, [x2], x3 ; encoding: [0x4c,0xc4,0xc3,0x0d]
+; CHECK: ld1r.8h { v12 }, [x2] ; encoding: [0x4c,0xc4,0x40,0x4d]
+; CHECK: ld1r.8h { v12 }, [x2], x3 ; encoding: [0x4c,0xc4,0xc3,0x4d]
+; CHECK: ld1r.2s { v12 }, [x2] ; encoding: [0x4c,0xc8,0x40,0x0d]
+; CHECK: ld1r.2s { v12 }, [x2], x3 ; encoding: [0x4c,0xc8,0xc3,0x0d]
+; CHECK: ld1r.4s { v12 }, [x2] ; encoding: [0x4c,0xc8,0x40,0x4d]
+; CHECK: ld1r.4s { v12 }, [x2], x3 ; encoding: [0x4c,0xc8,0xc3,0x4d]
+; CHECK: ld1r.1d { v12 }, [x2] ; encoding: [0x4c,0xcc,0x40,0x0d]
+; CHECK: ld1r.1d { v12 }, [x2], x3 ; encoding: [0x4c,0xcc,0xc3,0x0d]
+; CHECK: ld1r.2d { v12 }, [x2] ; encoding: [0x4c,0xcc,0x40,0x4d]
+; CHECK: ld1r.2d { v12 }, [x2], x3 ; encoding: [0x4c,0xcc,0xc3,0x4d]
+; CHECK: ld1r.8b { v12 }, [x2], #1 ; encoding: [0x4c,0xc0,0xdf,0x0d]
+; CHECK: ld1r.16b { v12 }, [x2], #1 ; encoding: [0x4c,0xc0,0xdf,0x4d]
+; CHECK: ld1r.4h { v12 }, [x2], #2 ; encoding: [0x4c,0xc4,0xdf,0x0d]
+; CHECK: ld1r.8h { v12 }, [x2], #2 ; encoding: [0x4c,0xc4,0xdf,0x4d]
+; CHECK: ld1r.2s { v12 }, [x2], #4 ; encoding: [0x4c,0xc8,0xdf,0x0d]
+; CHECK: ld1r.4s { v12 }, [x2], #4 ; encoding: [0x4c,0xc8,0xdf,0x4d]
+; CHECK: ld1r.1d { v12 }, [x2], #8 ; encoding: [0x4c,0xcc,0xdf,0x0d]
+; CHECK: ld1r.2d { v12 }, [x2], #8 ; encoding: [0x4c,0xcc,0xdf,0x4d]
+; CHECK: ld2r.8b { v3, v4 }, [x2] ; encoding: [0x43,0xc0,0x60,0x0d]
+; CHECK: ld2r.8b { v3, v4 }, [x2], x3 ; encoding: [0x43,0xc0,0xe3,0x0d]
+; CHECK: ld2r.16b { v3, v4 }, [x2] ; encoding: [0x43,0xc0,0x60,0x4d]
+; CHECK: ld2r.16b { v3, v4 }, [x2], x3 ; encoding: [0x43,0xc0,0xe3,0x4d]
+; CHECK: ld2r.4h { v3, v4 }, [x2] ; encoding: [0x43,0xc4,0x60,0x0d]
+; CHECK: ld2r.4h { v3, v4 }, [x2], x3 ; encoding: [0x43,0xc4,0xe3,0x0d]
+; CHECK: ld2r.8h { v3, v4 }, [x2] ; encoding: [0x43,0xc4,0x60,0x4d]
+; CHECK: ld2r.8h { v3, v4 }, [x2], x3 ; encoding: [0x43,0xc4,0xe3,0x4d]
+; CHECK: ld2r.2s { v3, v4 }, [x2] ; encoding: [0x43,0xc8,0x60,0x0d]
+; CHECK: ld2r.2s { v3, v4 }, [x2], x3 ; encoding: [0x43,0xc8,0xe3,0x0d]
+; CHECK: ld2r.4s { v3, v4 }, [x2] ; encoding: [0x43,0xc8,0x60,0x4d]
+; CHECK: ld2r.4s { v3, v4 }, [x2], x3 ; encoding: [0x43,0xc8,0xe3,0x4d]
+; CHECK: ld2r.1d { v3, v4 }, [x2] ; encoding: [0x43,0xcc,0x60,0x0d]
+; CHECK: ld2r.1d { v3, v4 }, [x2], x3 ; encoding: [0x43,0xcc,0xe3,0x0d]
+; CHECK: ld2r.2d { v3, v4 }, [x2] ; encoding: [0x43,0xcc,0x60,0x4d]
+; CHECK: ld2r.2d { v3, v4 }, [x2], x3 ; encoding: [0x43,0xcc,0xe3,0x4d]
+; CHECK: ld2r.8b { v3, v4 }, [x2], #2 ; encoding: [0x43,0xc0,0xff,0x0d]
+; CHECK: ld2r.16b { v3, v4 }, [x2], #2 ; encoding: [0x43,0xc0,0xff,0x4d]
+; CHECK: ld2r.4h { v3, v4 }, [x2], #4 ; encoding: [0x43,0xc4,0xff,0x0d]
+; CHECK: ld2r.8h { v3, v4 }, [x2], #4 ; encoding: [0x43,0xc4,0xff,0x4d]
+; CHECK: ld2r.2s { v3, v4 }, [x2], #8 ; encoding: [0x43,0xc8,0xff,0x0d]
+; CHECK: ld2r.4s { v3, v4 }, [x2], #8 ; encoding: [0x43,0xc8,0xff,0x4d]
+; CHECK: ld2r.1d { v3, v4 }, [x2], #16 ; encoding: [0x43,0xcc,0xff,0x0d]
+; CHECK: ld2r.2d { v3, v4 }, [x2], #16 ; encoding: [0x43,0xcc,0xff,0x4d]
+; CHECK: ld3r.8b { v2, v3, v4 }, [x2] ; encoding: [0x42,0xe0,0x40,0x0d]
+; CHECK: ld3r.8b { v2, v3, v4 }, [x2], x3 ; encoding: [0x42,0xe0,0xc3,0x0d]
+; CHECK: ld3r.16b { v2, v3, v4 }, [x2] ; encoding: [0x42,0xe0,0x40,0x4d]
+; CHECK: ld3r.16b { v2, v3, v4 }, [x2], x3 ; encoding: [0x42,0xe0,0xc3,0x4d]
+; CHECK: ld3r.4h { v2, v3, v4 }, [x2] ; encoding: [0x42,0xe4,0x40,0x0d]
+; CHECK: ld3r.4h { v2, v3, v4 }, [x2], x3 ; encoding: [0x42,0xe4,0xc3,0x0d]
+; CHECK: ld3r.8h { v2, v3, v4 }, [x2] ; encoding: [0x42,0xe4,0x40,0x4d]
+; CHECK: ld3r.8h { v2, v3, v4 }, [x2], x3 ; encoding: [0x42,0xe4,0xc3,0x4d]
+; CHECK: ld3r.2s { v2, v3, v4 }, [x2] ; encoding: [0x42,0xe8,0x40,0x0d]
+; CHECK: ld3r.2s { v2, v3, v4 }, [x2], x3 ; encoding: [0x42,0xe8,0xc3,0x0d]
+; CHECK: ld3r.4s { v2, v3, v4 }, [x2] ; encoding: [0x42,0xe8,0x40,0x4d]
+; CHECK: ld3r.4s { v2, v3, v4 }, [x2], x3 ; encoding: [0x42,0xe8,0xc3,0x4d]
+; CHECK: ld3r.1d { v2, v3, v4 }, [x2] ; encoding: [0x42,0xec,0x40,0x0d]
+; CHECK: ld3r.1d { v2, v3, v4 }, [x2], x3 ; encoding: [0x42,0xec,0xc3,0x0d]
+; CHECK: ld3r.2d { v2, v3, v4 }, [x2] ; encoding: [0x42,0xec,0x40,0x4d]
+; CHECK: ld3r.2d { v2, v3, v4 }, [x2], x3 ; encoding: [0x42,0xec,0xc3,0x4d]
+; CHECK: ld3r.8b { v2, v3, v4 }, [x2], #3 ; encoding: [0x42,0xe0,0xdf,0x0d]
+; CHECK: ld3r.16b { v2, v3, v4 }, [x2], #3 ; encoding: [0x42,0xe0,0xdf,0x4d]
+; CHECK: ld3r.4h { v2, v3, v4 }, [x2], #6 ; encoding: [0x42,0xe4,0xdf,0x0d]
+; CHECK: ld3r.8h { v2, v3, v4 }, [x2], #6 ; encoding: [0x42,0xe4,0xdf,0x4d]
+; CHECK: ld3r.2s { v2, v3, v4 }, [x2], #12 ; encoding: [0x42,0xe8,0xdf,0x0d]
+; CHECK: ld3r.4s { v2, v3, v4 }, [x2], #12 ; encoding: [0x42,0xe8,0xdf,0x4d]
+; CHECK: ld3r.1d { v2, v3, v4 }, [x2], #24 ; encoding: [0x42,0xec,0xdf,0x0d]
+; CHECK: ld3r.2d { v2, v3, v4 }, [x2], #24 ; encoding: [0x42,0xec,0xdf,0x4d]
+; CHECK: ld4r.8b { v2, v3, v4, v5 }, [x2] ; encoding: [0x42,0xe0,0x60,0x0d]
+; CHECK: ld4r.8b { v2, v3, v4, v5 }, [x2], x3 ; encoding: [0x42,0xe0,0xe3,0x0d]
+; CHECK: ld4r.16b { v2, v3, v4, v5 }, [x2] ; encoding: [0x42,0xe0,0x60,0x4d]
+; CHECK: ld4r.16b { v2, v3, v4, v5 }, [x2], x3 ; encoding: [0x42,0xe0,0xe3,0x4d]
+; CHECK: ld4r.4h { v2, v3, v4, v5 }, [x2] ; encoding: [0x42,0xe4,0x60,0x0d]
+; CHECK: ld4r.4h { v2, v3, v4, v5 }, [x2], x3 ; encoding: [0x42,0xe4,0xe3,0x0d]
+; CHECK: ld4r.8h { v2, v3, v4, v5 }, [x2] ; encoding: [0x42,0xe4,0x60,0x4d]
+; CHECK: ld4r.8h { v2, v3, v4, v5 }, [x2], x3 ; encoding: [0x42,0xe4,0xe3,0x4d]
+; CHECK: ld4r.2s { v2, v3, v4, v5 }, [x2] ; encoding: [0x42,0xe8,0x60,0x0d]
+; CHECK: ld4r.2s { v2, v3, v4, v5 }, [x2], x3 ; encoding: [0x42,0xe8,0xe3,0x0d]
+; CHECK: ld4r.4s { v2, v3, v4, v5 }, [x2] ; encoding: [0x42,0xe8,0x60,0x4d]
+; CHECK: ld4r.4s { v2, v3, v4, v5 }, [x2], x3 ; encoding: [0x42,0xe8,0xe3,0x4d]
+; CHECK: ld4r.1d { v2, v3, v4, v5 }, [x2] ; encoding: [0x42,0xec,0x60,0x0d]
+; CHECK: ld4r.1d { v2, v3, v4, v5 }, [x2], x3 ; encoding: [0x42,0xec,0xe3,0x0d]
+; CHECK: ld4r.2d { v2, v3, v4, v5 }, [x2] ; encoding: [0x42,0xec,0x60,0x4d]
+; CHECK: ld4r.2d { v2, v3, v4, v5 }, [x2], x3 ; encoding: [0x42,0xec,0xe3,0x4d]
+; CHECK: ld4r.8b { v2, v3, v4, v5 }, [x2], #4 ; encoding: [0x42,0xe0,0xff,0x0d]
+; CHECK: ld4r.16b { v2, v3, v4, v5 }, [x2], #4 ; encoding: [0x42,0xe0,0xff,0x4d]
+; CHECK: ld4r.4h { v2, v3, v4, v5 }, [x2], #8 ; encoding: [0x42,0xe4,0xff,0x0d]
+; CHECK: ld4r.8h { v2, v3, v4, v5 }, [x2], #8 ; encoding: [0x42,0xe4,0xff,0x4d]
+; CHECK: ld4r.2s { v2, v3, v4, v5 }, [x2], #16 ; encoding: [0x42,0xe8,0xff,0x0d]
+; CHECK: ld4r.4s { v2, v3, v4, v5 }, [x2], #16 ; encoding: [0x42,0xe8,0xff,0x4d]
+; CHECK: ld4r.1d { v2, v3, v4, v5 }, [x2], #32 ; encoding: [0x42,0xec,0xff,0x0d]
+; CHECK: ld4r.2d { v2, v3, v4, v5 }, [x2], #32 ; encoding: [0x42,0xec,0xff,0x4d]
+; CHECK: ld1.b { v6 }[13], [x3] ; encoding: [0x66,0x14,0x40,0x4d]
+; CHECK: ld1.h { v6 }[2], [x3] ; encoding: [0x66,0x50,0x40,0x0d]
+; CHECK: ld1.s { v6 }[2], [x3] ; encoding: [0x66,0x80,0x40,0x4d]
+; CHECK: ld1.d { v6 }[1], [x3] ; encoding: [0x66,0x84,0x40,0x4d]
+; CHECK: ld1.b { v6 }[13], [x3], x5 ; encoding: [0x66,0x14,0xc5,0x4d]
+; CHECK: ld1.h { v6 }[2], [x3], x5 ; encoding: [0x66,0x50,0xc5,0x0d]
+; CHECK: ld1.s { v6 }[2], [x3], x5 ; encoding: [0x66,0x80,0xc5,0x4d]
+; CHECK: ld1.d { v6 }[1], [x3], x5 ; encoding: [0x66,0x84,0xc5,0x4d]
+; CHECK: ld1.b { v6 }[13], [x3], #1 ; encoding: [0x66,0x14,0xdf,0x4d]
+; CHECK: ld1.h { v6 }[2], [x3], #2 ; encoding: [0x66,0x50,0xdf,0x0d]
+; CHECK: ld1.s { v6 }[2], [x3], #4 ; encoding: [0x66,0x80,0xdf,0x4d]
+; CHECK: ld1.d { v6 }[1], [x3], #8 ; encoding: [0x66,0x84,0xdf,0x4d]
+; CHECK: ld2.b { v5, v6 }[13], [x3] ; encoding: [0x65,0x14,0x60,0x4d]
+; CHECK: ld2.h { v5, v6 }[2], [x3] ; encoding: [0x65,0x50,0x60,0x0d]
+; CHECK: ld2.s { v5, v6 }[2], [x3] ; encoding: [0x65,0x80,0x60,0x4d]
+; CHECK: ld2.d { v5, v6 }[1], [x3] ; encoding: [0x65,0x84,0x60,0x4d]
+; CHECK: ld2.b { v5, v6 }[13], [x3], x5 ; encoding: [0x65,0x14,0xe5,0x4d]
+; CHECK: ld2.h { v5, v6 }[2], [x3], x5 ; encoding: [0x65,0x50,0xe5,0x0d]
+; CHECK: ld2.s { v5, v6 }[2], [x3], x5 ; encoding: [0x65,0x80,0xe5,0x4d]
+; CHECK: ld2.d { v5, v6 }[1], [x3], x5 ; encoding: [0x65,0x84,0xe5,0x4d]
+; CHECK: ld2.b { v5, v6 }[13], [x3], #2 ; encoding: [0x65,0x14,0xff,0x4d]
+; CHECK: ld2.h { v5, v6 }[2], [x3], #4 ; encoding: [0x65,0x50,0xff,0x0d]
+; CHECK: ld2.s { v5, v6 }[2], [x3], #8 ; encoding: [0x65,0x80,0xff,0x4d]
+; CHECK: ld2.d { v5, v6 }[1], [x3], #16 ; encoding: [0x65,0x84,0xff,0x4d]
+; CHECK: ld3.b { v7, v8, v9 }[13], [x3] ; encoding: [0x67,0x34,0x40,0x4d]
+; CHECK: ld3.h { v7, v8, v9 }[2], [x3] ; encoding: [0x67,0x70,0x40,0x0d]
+; CHECK: ld3.s { v7, v8, v9 }[2], [x3] ; encoding: [0x67,0xa0,0x40,0x4d]
+; CHECK: ld3.d { v7, v8, v9 }[1], [x3] ; encoding: [0x67,0xa4,0x40,0x4d]
+; CHECK: ld3.b { v7, v8, v9 }[13], [x3], x5 ; encoding: [0x67,0x34,0xc5,0x4d]
+; CHECK: ld3.h { v7, v8, v9 }[2], [x3], x5 ; encoding: [0x67,0x70,0xc5,0x0d]
+; CHECK: ld3.s { v7, v8, v9 }[2], [x3], x5 ; encoding: [0x67,0xa0,0xc5,0x4d]
+; CHECK: ld3.d { v7, v8, v9 }[1], [x3], x5 ; encoding: [0x67,0xa4,0xc5,0x4d]
+; CHECK: ld3.b { v7, v8, v9 }[13], [x3], #3 ; encoding: [0x67,0x34,0xdf,0x4d]
+; CHECK: ld3.h { v7, v8, v9 }[2], [x3], #6 ; encoding: [0x67,0x70,0xdf,0x0d]
+; CHECK: ld3.s { v7, v8, v9 }[2], [x3], #12 ; encoding: [0x67,0xa0,0xdf,0x4d]
+; CHECK: ld3.d { v7, v8, v9 }[1], [x3], #24 ; encoding: [0x67,0xa4,0xdf,0x4d]
+; CHECK: ld4.b { v7, v8, v9, v10 }[13], [x3] ; encoding: [0x67,0x34,0x60,0x4d]
+; CHECK: ld4.h { v7, v8, v9, v10 }[2], [x3] ; encoding: [0x67,0x70,0x60,0x0d]
+; CHECK: ld4.s { v7, v8, v9, v10 }[2], [x3] ; encoding: [0x67,0xa0,0x60,0x4d]
+; CHECK: ld4.d { v7, v8, v9, v10 }[1], [x3] ; encoding: [0x67,0xa4,0x60,0x4d]
+; CHECK: ld4.b { v7, v8, v9, v10 }[13], [x3], x5 ; encoding: [0x67,0x34,0xe5,0x4d]
+; CHECK: ld4.h { v7, v8, v9, v10 }[2], [x3], x5 ; encoding: [0x67,0x70,0xe5,0x0d]
+; CHECK: ld4.s { v7, v8, v9, v10 }[2], [x3], x5 ; encoding: [0x67,0xa0,0xe5,0x4d]
+; CHECK: ld4.d { v7, v8, v9, v10 }[1], [x3], x5 ; encoding: [0x67,0xa4,0xe5,0x4d]
+; CHECK: ld4.b { v7, v8, v9, v10 }[13], [x3], #4 ; encoding: [0x67,0x34,0xff,0x4d]
+; CHECK: ld4.h { v7, v8, v9, v10 }[2], [x3], #8 ; encoding: [0x67,0x70,0xff,0x0d]
+; CHECK: ld4.s { v7, v8, v9, v10 }[2], [x3], #16 ; encoding: [0x67,0xa0,0xff,0x4d]
+; CHECK: ld4.d { v7, v8, v9, v10 }[1], [x3], #32 ; encoding: [0x67,0xa4,0xff,0x4d]
+; CHECK: st1.b { v6 }[13], [x3] ; encoding: [0x66,0x14,0x00,0x4d]
+; CHECK: st1.h { v6 }[2], [x3] ; encoding: [0x66,0x50,0x00,0x0d]
+; CHECK: st1.s { v6 }[2], [x3] ; encoding: [0x66,0x80,0x00,0x4d]
+; CHECK: st1.d { v6 }[1], [x3] ; encoding: [0x66,0x84,0x00,0x4d]
+; CHECK: st1.b { v6 }[13], [x3], x5 ; encoding: [0x66,0x14,0x85,0x4d]
+; CHECK: st1.h { v6 }[2], [x3], x5 ; encoding: [0x66,0x50,0x85,0x0d]
+; CHECK: st1.s { v6 }[2], [x3], x5 ; encoding: [0x66,0x80,0x85,0x4d]
+; CHECK: st1.d { v6 }[1], [x3], x5 ; encoding: [0x66,0x84,0x85,0x4d]
+; CHECK: st1.b { v6 }[13], [x3], #1 ; encoding: [0x66,0x14,0x9f,0x4d]
+; CHECK: st1.h { v6 }[2], [x3], #2 ; encoding: [0x66,0x50,0x9f,0x0d]
+; CHECK: st1.s { v6 }[2], [x3], #4 ; encoding: [0x66,0x80,0x9f,0x4d]
+; CHECK: st1.d { v6 }[1], [x3], #8 ; encoding: [0x66,0x84,0x9f,0x4d]
+; CHECK: st2.b { v5, v6 }[13], [x3] ; encoding: [0x65,0x14,0x20,0x4d]
+; CHECK: st2.h { v5, v6 }[2], [x3] ; encoding: [0x65,0x50,0x20,0x0d]
+; CHECK: st2.s { v5, v6 }[2], [x3] ; encoding: [0x65,0x80,0x20,0x4d]
+; CHECK: st2.d { v5, v6 }[1], [x3] ; encoding: [0x65,0x84,0x20,0x4d]
+; CHECK: st2.b { v5, v6 }[13], [x3], x5 ; encoding: [0x65,0x14,0xa5,0x4d]
+; CHECK: st2.h { v5, v6 }[2], [x3], x5 ; encoding: [0x65,0x50,0xa5,0x0d]
+; CHECK: st2.s { v5, v6 }[2], [x3], x5 ; encoding: [0x65,0x80,0xa5,0x4d]
+; CHECK: st2.d { v5, v6 }[1], [x3], x5 ; encoding: [0x65,0x84,0xa5,0x4d]
+; CHECK: st2.b { v5, v6 }[13], [x3], #2 ; encoding: [0x65,0x14,0xbf,0x4d]
+; CHECK: st2.h { v5, v6 }[2], [x3], #4 ; encoding: [0x65,0x50,0xbf,0x0d]
+; CHECK: st2.s { v5, v6 }[2], [x3], #8 ; encoding: [0x65,0x80,0xbf,0x4d]
+; CHECK: st2.d { v5, v6 }[1], [x3], #16 ; encoding: [0x65,0x84,0xbf,0x4d]
+; CHECK: st3.b { v7, v8, v9 }[13], [x3] ; encoding: [0x67,0x34,0x00,0x4d]
+; CHECK: st3.h { v7, v8, v9 }[2], [x3] ; encoding: [0x67,0x70,0x00,0x0d]
+; CHECK: st3.s { v7, v8, v9 }[2], [x3] ; encoding: [0x67,0xa0,0x00,0x4d]
+; CHECK: st3.d { v7, v8, v9 }[1], [x3] ; encoding: [0x67,0xa4,0x00,0x4d]
+; CHECK: st3.b { v7, v8, v9 }[13], [x3], x5 ; encoding: [0x67,0x34,0x85,0x4d]
+; CHECK: st3.h { v7, v8, v9 }[2], [x3], x5 ; encoding: [0x67,0x70,0x85,0x0d]
+; CHECK: st3.s { v7, v8, v9 }[2], [x3], x5 ; encoding: [0x67,0xa0,0x85,0x4d]
+; CHECK: st3.d { v7, v8, v9 }[1], [x3], x5 ; encoding: [0x67,0xa4,0x85,0x4d]
+; CHECK: st3.b { v7, v8, v9 }[13], [x3], #3 ; encoding: [0x67,0x34,0x9f,0x4d]
+; CHECK: st3.h { v7, v8, v9 }[2], [x3], #6 ; encoding: [0x67,0x70,0x9f,0x0d]
+; CHECK: st3.s { v7, v8, v9 }[2], [x3], #12 ; encoding: [0x67,0xa0,0x9f,0x4d]
+; CHECK: st3.d { v7, v8, v9 }[1], [x3], #24 ; encoding: [0x67,0xa4,0x9f,0x4d]
+; CHECK: st4.b { v7, v8, v9, v10 }[13], [x3] ; encoding: [0x67,0x34,0x20,0x4d]
+; CHECK: st4.h { v7, v8, v9, v10 }[2], [x3] ; encoding: [0x67,0x70,0x20,0x0d]
+; CHECK: st4.s { v7, v8, v9, v10 }[2], [x3] ; encoding: [0x67,0xa0,0x20,0x4d]
+; CHECK: st4.d { v7, v8, v9, v10 }[1], [x3] ; encoding: [0x67,0xa4,0x20,0x4d]
+; CHECK: st4.b { v7, v8, v9, v10 }[13], [x3], x5 ; encoding: [0x67,0x34,0xa5,0x4d]
+; CHECK: st4.h { v7, v8, v9, v10 }[2], [x3], x5 ; encoding: [0x67,0x70,0xa5,0x0d]
+; CHECK: st4.s { v7, v8, v9, v10 }[2], [x3], x5 ; encoding: [0x67,0xa0,0xa5,0x4d]
+; CHECK: st4.d { v7, v8, v9, v10 }[1], [x3], x5 ; encoding: [0x67,0xa4,0xa5,0x4d]
+; CHECK: st4.b { v7, v8, v9, v10 }[13], [x3], #4 ; encoding: [0x67,0x34,0xbf,0x4d]
+; CHECK: st4.h { v7, v8, v9, v10 }[2], [x3], #8 ; encoding: [0x67,0x70,0xbf,0x0d]
+; CHECK: st4.s { v7, v8, v9, v10 }[2], [x3], #16 ; encoding: [0x67,0xa0,0xbf,0x4d]
+; CHECK: st4.d { v7, v8, v9, v10 }[1], [x3], #32 ; encoding: [0x67,0xa4,0xbf,0x4d]
diff --git a/test/MC/AArch64/arm64-small-data-fixups.s b/test/MC/AArch64/arm64-small-data-fixups.s
new file mode 100644
index 000000000000..3fe7c75c011d
--- /dev/null
+++ b/test/MC/AArch64/arm64-small-data-fixups.s
@@ -0,0 +1,24 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -filetype=obj -o - %s | macho-dump | FileCheck %s
+
+foo:
+ .long 0
+bar:
+ .long 1
+
+baz:
+ .byte foo - bar
+ .short foo - bar
+
+; CHECK: # Relocation 0
+; CHECK: (('word-0', 0x9),
+; CHECK: ('word-1', 0x1a000002)),
+; CHECK: # Relocation 1
+; CHECK: (('word-0', 0x9),
+; CHECK: ('word-1', 0xa000001)),
+; CHECK: # Relocation 2
+; CHECK: (('word-0', 0x8),
+; CHECK: ('word-1', 0x18000002)),
+; CHECK: # Relocation 3
+; CHECK: (('word-0', 0x8),
+; CHECK: ('word-1', 0x8000001)),
+
diff --git a/test/MC/AArch64/arm64-spsel-sysreg.s b/test/MC/AArch64/arm64-spsel-sysreg.s
new file mode 100644
index 000000000000..f1d94d8c2d8b
--- /dev/null
+++ b/test/MC/AArch64/arm64-spsel-sysreg.s
@@ -0,0 +1,24 @@
+// RUN: not llvm-mc -triple arm64 -show-encoding < %s 2>%t | FileCheck %s
+// RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+msr SPSel, #0
+msr SPSel, x0
+msr DAIFSet, #0
+msr ESR_EL1, x0
+mrs x0, SPSel
+mrs x0, ESR_EL1
+
+// CHECK: msr SPSEL, #0 // encoding: [0xbf,0x40,0x00,0xd5]
+// CHECK: msr SPSEL, x0 // encoding: [0x00,0x42,0x18,0xd5]
+// CHECK: msr DAIFSET, #0 // encoding: [0xdf,0x40,0x03,0xd5]
+// CHECK: msr ESR_EL1, x0 // encoding: [0x00,0x52,0x18,0xd5]
+// CHECK: mrs x0, SPSEL // encoding: [0x00,0x42,0x38,0xd5]
+// CHECK: mrs x0, ESR_EL1 // encoding: [0x00,0x52,0x38,0xd5]
+
+
+msr DAIFSet, x0
+msr ESR_EL1, #0
+mrs x0, DAIFSet
+// CHECK-ERRORS: error: immediate must be an integer in range [0, 15]
+// CHECK-ERRORS: error: invalid operand for instruction
+// CHECK-ERRORS: error: expected readable system register
diff --git a/test/MC/AArch64/arm64-system-encoding.s b/test/MC/AArch64/arm64-system-encoding.s
new file mode 100644
index 000000000000..87f8f8a4e98c
--- /dev/null
+++ b/test/MC/AArch64/arm64-system-encoding.s
@@ -0,0 +1,623 @@
+; RUN: not llvm-mc -triple arm64-apple-darwin -show-encoding < %s 2> %t | FileCheck %s
+; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+foo:
+
+;-----------------------------------------------------------------------------
+; Simple encodings (instructions w/ no operands)
+;-----------------------------------------------------------------------------
+
+ nop
+ sev
+ sevl
+ wfe
+ wfi
+ yield
+
+; CHECK: nop ; encoding: [0x1f,0x20,0x03,0xd5]
+; CHECK: sev ; encoding: [0x9f,0x20,0x03,0xd5]
+; CHECK: sevl ; encoding: [0xbf,0x20,0x03,0xd5]
+; CHECK: wfe ; encoding: [0x5f,0x20,0x03,0xd5]
+; CHECK: wfi ; encoding: [0x7f,0x20,0x03,0xd5]
+; CHECK: yield ; encoding: [0x3f,0x20,0x03,0xd5]
+
+;-----------------------------------------------------------------------------
+; Single-immediate operand instructions
+;-----------------------------------------------------------------------------
+
+ clrex #10
+; CHECK: clrex #10 ; encoding: [0x5f,0x3a,0x03,0xd5]
+ isb #15
+ isb sy
+; CHECK: isb ; encoding: [0xdf,0x3f,0x03,0xd5]
+; CHECK: isb ; encoding: [0xdf,0x3f,0x03,0xd5]
+ dmb #3
+ dmb osh
+; CHECK: dmb osh ; encoding: [0xbf,0x33,0x03,0xd5]
+; CHECK: dmb osh ; encoding: [0xbf,0x33,0x03,0xd5]
+ dsb #7
+ dsb nsh
+; CHECK: dsb nsh ; encoding: [0x9f,0x37,0x03,0xd5]
+; CHECK: dsb nsh ; encoding: [0x9f,0x37,0x03,0xd5]
+
+;-----------------------------------------------------------------------------
+; Generic system instructions
+;-----------------------------------------------------------------------------
+ sys #2, c0, c5, #7
+; CHECK: encoding: [0xff,0x05,0x0a,0xd5]
+ sys #7, C6, c10, #7, x7
+; CHECK: encoding: [0xe7,0x6a,0x0f,0xd5]
+ sysl x20, #6, c3, C15, #7
+; CHECK: encoding: [0xf4,0x3f,0x2e,0xd5]
+
+; Check for error on invalid 'C' operand value.
+ sys #2, c16, c5, #7
+; CHECK-ERRORS: error: Expected cN operand where 0 <= N <= 15
+
+;-----------------------------------------------------------------------------
+; MSR/MRS instructions
+;-----------------------------------------------------------------------------
+ msr ACTLR_EL1, x3
+ msr ACTLR_EL2, x3
+ msr ACTLR_EL3, x3
+ msr AFSR0_EL1, x3
+ msr AFSR0_EL2, x3
+ msr AFSR0_EL3, x3
+ msr AFSR1_EL1, x3
+ msr AFSR1_EL2, x3
+ msr AFSR1_EL3, x3
+ msr AMAIR_EL1, x3
+ msr AMAIR_EL2, x3
+ msr AMAIR_EL3, x3
+ msr CNTFRQ_EL0, x3
+ msr CNTHCTL_EL2, x3
+ msr CNTHP_CTL_EL2, x3
+ msr CNTHP_CVAL_EL2, x3
+ msr CNTHP_TVAL_EL2, x3
+ msr CNTKCTL_EL1, x3
+ msr CNTP_CTL_EL0, x3
+ msr CNTP_CVAL_EL0, x3
+ msr CNTP_TVAL_EL0, x3
+ msr CNTVOFF_EL2, x3
+ msr CNTV_CTL_EL0, x3
+ msr CNTV_CVAL_EL0, x3
+ msr CNTV_TVAL_EL0, x3
+ msr CONTEXTIDR_EL1, x3
+ msr CPACR_EL1, x3
+ msr CPTR_EL2, x3
+ msr CPTR_EL3, x3
+ msr CSSELR_EL1, x3
+ msr CURRENTEL, x3
+ msr DACR32_EL2, x3
+ msr ESR_EL1, x3
+ msr ESR_EL2, x3
+ msr ESR_EL3, x3
+ msr FAR_EL1, x3
+ msr FAR_EL2, x3
+ msr FAR_EL3, x3
+ msr FPEXC32_EL2, x3
+ msr HACR_EL2, x3
+ msr HCR_EL2, x3
+ msr HPFAR_EL2, x3
+ msr HSTR_EL2, x3
+ msr IFSR32_EL2, x3
+ msr MAIR_EL1, x3
+ msr MAIR_EL2, x3
+ msr MAIR_EL3, x3
+ msr MDCR_EL2, x3
+ msr MDCR_EL3, x3
+ msr PAR_EL1, x3
+ msr SCR_EL3, x3
+ msr SCTLR_EL1, x3
+ msr SCTLR_EL2, x3
+ msr SCTLR_EL3, x3
+ msr SDER32_EL3, x3
+ msr TCR_EL1, x3
+ msr TCR_EL2, x3
+ msr TCR_EL3, x3
+ msr TEECR32_EL1, x3
+ msr TEEHBR32_EL1, x3
+ msr TPIDRRO_EL0, x3
+ msr TPIDR_EL0, x3
+ msr TPIDR_EL1, x3
+ msr TPIDR_EL2, x3
+ msr TPIDR_EL3, x3
+ msr TTBR0_EL1, x3
+ msr TTBR0_EL2, x3
+ msr TTBR0_EL3, x3
+ msr TTBR1_EL1, x3
+ msr VBAR_EL1, x3
+ msr VBAR_EL2, x3
+ msr VBAR_EL3, x3
+ msr VMPIDR_EL2, x3
+ msr VPIDR_EL2, x3
+ msr VTCR_EL2, x3
+ msr VTTBR_EL2, x3
+ msr SPSel, x3
+ msr S3_2_C11_C6_4, x1
+; CHECK: msr ACTLR_EL1, x3 ; encoding: [0x23,0x10,0x18,0xd5]
+; CHECK: msr ACTLR_EL2, x3 ; encoding: [0x23,0x10,0x1c,0xd5]
+; CHECK: msr ACTLR_EL3, x3 ; encoding: [0x23,0x10,0x1e,0xd5]
+; CHECK: msr AFSR0_EL1, x3 ; encoding: [0x03,0x51,0x18,0xd5]
+; CHECK: msr AFSR0_EL2, x3 ; encoding: [0x03,0x51,0x1c,0xd5]
+; CHECK: msr AFSR0_EL3, x3 ; encoding: [0x03,0x51,0x1e,0xd5]
+; CHECK: msr AFSR1_EL1, x3 ; encoding: [0x23,0x51,0x18,0xd5]
+; CHECK: msr AFSR1_EL2, x3 ; encoding: [0x23,0x51,0x1c,0xd5]
+; CHECK: msr AFSR1_EL3, x3 ; encoding: [0x23,0x51,0x1e,0xd5]
+; CHECK: msr AMAIR_EL1, x3 ; encoding: [0x03,0xa3,0x18,0xd5]
+; CHECK: msr AMAIR_EL2, x3 ; encoding: [0x03,0xa3,0x1c,0xd5]
+; CHECK: msr AMAIR_EL3, x3 ; encoding: [0x03,0xa3,0x1e,0xd5]
+; CHECK: msr CNTFRQ_EL0, x3 ; encoding: [0x03,0xe0,0x1b,0xd5]
+; CHECK: msr CNTHCTL_EL2, x3 ; encoding: [0x03,0xe1,0x1c,0xd5]
+; CHECK: msr CNTHP_CTL_EL2, x3 ; encoding: [0x23,0xe2,0x1c,0xd5]
+; CHECK: msr CNTHP_CVAL_EL2, x3 ; encoding: [0x43,0xe2,0x1c,0xd5]
+; CHECK: msr CNTHP_TVAL_EL2, x3 ; encoding: [0x03,0xe2,0x1c,0xd5]
+; CHECK: msr CNTKCTL_EL1, x3 ; encoding: [0x03,0xe1,0x18,0xd5]
+; CHECK: msr CNTP_CTL_EL0, x3 ; encoding: [0x23,0xe2,0x1b,0xd5]
+; CHECK: msr CNTP_CVAL_EL0, x3 ; encoding: [0x43,0xe2,0x1b,0xd5]
+; CHECK: msr CNTP_TVAL_EL0, x3 ; encoding: [0x03,0xe2,0x1b,0xd5]
+; CHECK: msr CNTVOFF_EL2, x3 ; encoding: [0x63,0xe0,0x1c,0xd5]
+; CHECK: msr CNTV_CTL_EL0, x3 ; encoding: [0x23,0xe3,0x1b,0xd5]
+; CHECK: msr CNTV_CVAL_EL0, x3 ; encoding: [0x43,0xe3,0x1b,0xd5]
+; CHECK: msr CNTV_TVAL_EL0, x3 ; encoding: [0x03,0xe3,0x1b,0xd5]
+; CHECK: msr CONTEXTIDR_EL1, x3 ; encoding: [0x23,0xd0,0x18,0xd5]
+; CHECK: msr CPACR_EL1, x3 ; encoding: [0x43,0x10,0x18,0xd5]
+; CHECK: msr CPTR_EL2, x3 ; encoding: [0x43,0x11,0x1c,0xd5]
+; CHECK: msr CPTR_EL3, x3 ; encoding: [0x43,0x11,0x1e,0xd5]
+; CHECK: msr CSSELR_EL1, x3 ; encoding: [0x03,0x00,0x1a,0xd5]
+; CHECK: msr CURRENTEL, x3 ; encoding: [0x43,0x42,0x18,0xd5]
+; CHECK: msr DACR32_EL2, x3 ; encoding: [0x03,0x30,0x1c,0xd5]
+; CHECK: msr ESR_EL1, x3 ; encoding: [0x03,0x52,0x18,0xd5]
+; CHECK: msr ESR_EL2, x3 ; encoding: [0x03,0x52,0x1c,0xd5]
+; CHECK: msr ESR_EL3, x3 ; encoding: [0x03,0x52,0x1e,0xd5]
+; CHECK: msr FAR_EL1, x3 ; encoding: [0x03,0x60,0x18,0xd5]
+; CHECK: msr FAR_EL2, x3 ; encoding: [0x03,0x60,0x1c,0xd5]
+; CHECK: msr FAR_EL3, x3 ; encoding: [0x03,0x60,0x1e,0xd5]
+; CHECK: msr FPEXC32_EL2, x3 ; encoding: [0x03,0x53,0x1c,0xd5]
+; CHECK: msr HACR_EL2, x3 ; encoding: [0xe3,0x11,0x1c,0xd5]
+; CHECK: msr HCR_EL2, x3 ; encoding: [0x03,0x11,0x1c,0xd5]
+; CHECK: msr HPFAR_EL2, x3 ; encoding: [0x83,0x60,0x1c,0xd5]
+; CHECK: msr HSTR_EL2, x3 ; encoding: [0x63,0x11,0x1c,0xd5]
+; CHECK: msr IFSR32_EL2, x3 ; encoding: [0x23,0x50,0x1c,0xd5]
+; CHECK: msr MAIR_EL1, x3 ; encoding: [0x03,0xa2,0x18,0xd5]
+; CHECK: msr MAIR_EL2, x3 ; encoding: [0x03,0xa2,0x1c,0xd5]
+; CHECK: msr MAIR_EL3, x3 ; encoding: [0x03,0xa2,0x1e,0xd5]
+; CHECK: msr MDCR_EL2, x3 ; encoding: [0x23,0x11,0x1c,0xd5]
+; CHECK: msr MDCR_EL3, x3 ; encoding: [0x23,0x13,0x1e,0xd5]
+; CHECK: msr PAR_EL1, x3 ; encoding: [0x03,0x74,0x18,0xd5]
+; CHECK: msr SCR_EL3, x3 ; encoding: [0x03,0x11,0x1e,0xd5]
+; CHECK: msr SCTLR_EL1, x3 ; encoding: [0x03,0x10,0x18,0xd5]
+; CHECK: msr SCTLR_EL2, x3 ; encoding: [0x03,0x10,0x1c,0xd5]
+; CHECK: msr SCTLR_EL3, x3 ; encoding: [0x03,0x10,0x1e,0xd5]
+; CHECK: msr SDER32_EL3, x3 ; encoding: [0x23,0x11,0x1e,0xd5]
+; CHECK: msr TCR_EL1, x3 ; encoding: [0x43,0x20,0x18,0xd5]
+; CHECK: msr TCR_EL2, x3 ; encoding: [0x43,0x20,0x1c,0xd5]
+; CHECK: msr TCR_EL3, x3 ; encoding: [0x43,0x20,0x1e,0xd5]
+; CHECK: msr TEECR32_EL1, x3 ; encoding: [0x03,0x00,0x12,0xd5]
+; CHECK: msr TEEHBR32_EL1, x3 ; encoding: [0x03,0x10,0x12,0xd5]
+; CHECK: msr TPIDRRO_EL0, x3 ; encoding: [0x63,0xd0,0x1b,0xd5]
+; CHECK: msr TPIDR_EL0, x3 ; encoding: [0x43,0xd0,0x1b,0xd5]
+; CHECK: msr TPIDR_EL1, x3 ; encoding: [0x83,0xd0,0x18,0xd5]
+; CHECK: msr TPIDR_EL2, x3 ; encoding: [0x43,0xd0,0x1c,0xd5]
+; CHECK: msr TPIDR_EL3, x3 ; encoding: [0x43,0xd0,0x1e,0xd5]
+; CHECK: msr TTBR0_EL1, x3 ; encoding: [0x03,0x20,0x18,0xd5]
+; CHECK: msr TTBR0_EL2, x3 ; encoding: [0x03,0x20,0x1c,0xd5]
+; CHECK: msr TTBR0_EL3, x3 ; encoding: [0x03,0x20,0x1e,0xd5]
+; CHECK: msr TTBR1_EL1, x3 ; encoding: [0x23,0x20,0x18,0xd5]
+; CHECK: msr VBAR_EL1, x3 ; encoding: [0x03,0xc0,0x18,0xd5]
+; CHECK: msr VBAR_EL2, x3 ; encoding: [0x03,0xc0,0x1c,0xd5]
+; CHECK: msr VBAR_EL3, x3 ; encoding: [0x03,0xc0,0x1e,0xd5]
+; CHECK: msr VMPIDR_EL2, x3 ; encoding: [0xa3,0x00,0x1c,0xd5]
+; CHECK: msr VPIDR_EL2, x3 ; encoding: [0x03,0x00,0x1c,0xd5]
+; CHECK: msr VTCR_EL2, x3 ; encoding: [0x43,0x21,0x1c,0xd5]
+; CHECK: msr VTTBR_EL2, x3 ; encoding: [0x03,0x21,0x1c,0xd5]
+; CHECK: msr SPSEL, x3 ; encoding: [0x03,0x42,0x18,0xd5]
+; CHECK: msr S3_2_C11_C6_4, x1 ; encoding: [0x81,0xb6,0x1a,0xd5]
+
+ mrs x3, ACTLR_EL1
+ mrs x3, ACTLR_EL2
+ mrs x3, ACTLR_EL3
+ mrs x3, AFSR0_EL1
+ mrs x3, AFSR0_EL2
+ mrs x3, AFSR0_EL3
+ mrs x3, AIDR_EL1
+ mrs x3, AFSR1_EL1
+ mrs x3, AFSR1_EL2
+ mrs x3, AFSR1_EL3
+ mrs x3, AMAIR_EL1
+ mrs x3, AMAIR_EL2
+ mrs x3, AMAIR_EL3
+ mrs x3, CCSIDR_EL1
+ mrs x3, CLIDR_EL1
+ mrs x3, CNTFRQ_EL0
+ mrs x3, CNTHCTL_EL2
+ mrs x3, CNTHP_CTL_EL2
+ mrs x3, CNTHP_CVAL_EL2
+ mrs x3, CNTHP_TVAL_EL2
+ mrs x3, CNTKCTL_EL1
+ mrs x3, CNTPCT_EL0
+ mrs x3, CNTP_CTL_EL0
+ mrs x3, CNTP_CVAL_EL0
+ mrs x3, CNTP_TVAL_EL0
+ mrs x3, CNTVCT_EL0
+ mrs x3, CNTVOFF_EL2
+ mrs x3, CNTV_CTL_EL0
+ mrs x3, CNTV_CVAL_EL0
+ mrs x3, CNTV_TVAL_EL0
+ mrs x3, CONTEXTIDR_EL1
+ mrs x3, CPACR_EL1
+ mrs x3, CPTR_EL2
+ mrs x3, CPTR_EL3
+ mrs x3, CSSELR_EL1
+ mrs x3, CTR_EL0
+ mrs x3, CURRENTEL
+ mrs x3, DACR32_EL2
+ mrs x3, DCZID_EL0
+ mrs x3, REVIDR_EL1
+ mrs x3, ESR_EL1
+ mrs x3, ESR_EL2
+ mrs x3, ESR_EL3
+ mrs x3, FAR_EL1
+ mrs x3, FAR_EL2
+ mrs x3, FAR_EL3
+ mrs x3, FPEXC32_EL2
+ mrs x3, HACR_EL2
+ mrs x3, HCR_EL2
+ mrs x3, HPFAR_EL2
+ mrs x3, HSTR_EL2
+ mrs x3, ID_AA64DFR0_EL1
+ mrs x3, ID_AA64DFR1_EL1
+ mrs x3, ID_AA64ISAR0_EL1
+ mrs x3, ID_AA64ISAR1_EL1
+ mrs x3, ID_AA64MMFR0_EL1
+ mrs x3, ID_AA64MMFR1_EL1
+ mrs x3, ID_AA64PFR0_EL1
+ mrs x3, ID_AA64PFR1_EL1
+ mrs x3, IFSR32_EL2
+ mrs x3, ISR_EL1
+ mrs x3, MAIR_EL1
+ mrs x3, MAIR_EL2
+ mrs x3, MAIR_EL3
+ mrs x3, MDCR_EL2
+ mrs x3, MDCR_EL3
+ mrs x3, MIDR_EL1
+ mrs x3, MPIDR_EL1
+ mrs x3, MVFR0_EL1
+ mrs x3, MVFR1_EL1
+ mrs x3, PAR_EL1
+ mrs x3, RVBAR_EL1
+ mrs x3, RVBAR_EL2
+ mrs x3, RVBAR_EL3
+ mrs x3, SCR_EL3
+ mrs x3, SCTLR_EL1
+ mrs x3, SCTLR_EL2
+ mrs x3, SCTLR_EL3
+ mrs x3, SDER32_EL3
+ mrs x3, TCR_EL1
+ mrs x3, TCR_EL2
+ mrs x3, TCR_EL3
+ mrs x3, TEECR32_EL1
+ mrs x3, TEEHBR32_EL1
+ mrs x3, TPIDRRO_EL0
+ mrs x3, TPIDR_EL0
+ mrs x3, TPIDR_EL1
+ mrs x3, TPIDR_EL2
+ mrs x3, TPIDR_EL3
+ mrs x3, TTBR0_EL1
+ mrs x3, TTBR0_EL2
+ mrs x3, TTBR0_EL3
+ mrs x3, TTBR1_EL1
+ mrs x3, VBAR_EL1
+ mrs x3, VBAR_EL2
+ mrs x3, VBAR_EL3
+ mrs x3, VMPIDR_EL2
+ mrs x3, VPIDR_EL2
+ mrs x3, VTCR_EL2
+ mrs x3, VTTBR_EL2
+
+ mrs x3, MDCCSR_EL0
+ mrs x3, MDCCINT_EL1
+ mrs x3, DBGDTR_EL0
+ mrs x3, DBGDTRRX_EL0
+ mrs x3, DBGVCR32_EL2
+ mrs x3, OSDTRRX_EL1
+ mrs x3, MDSCR_EL1
+ mrs x3, OSDTRTX_EL1
+ mrs x3, OSECCR_EL1
+ mrs x3, DBGBVR0_EL1
+ mrs x3, DBGBVR1_EL1
+ mrs x3, DBGBVR2_EL1
+ mrs x3, DBGBVR3_EL1
+ mrs x3, DBGBVR4_EL1
+ mrs x3, DBGBVR5_EL1
+ mrs x3, DBGBVR6_EL1
+ mrs x3, DBGBVR7_EL1
+ mrs x3, DBGBVR8_EL1
+ mrs x3, DBGBVR9_EL1
+ mrs x3, DBGBVR10_EL1
+ mrs x3, DBGBVR11_EL1
+ mrs x3, DBGBVR12_EL1
+ mrs x3, DBGBVR13_EL1
+ mrs x3, DBGBVR14_EL1
+ mrs x3, DBGBVR15_EL1
+ mrs x3, DBGBCR0_EL1
+ mrs x3, DBGBCR1_EL1
+ mrs x3, DBGBCR2_EL1
+ mrs x3, DBGBCR3_EL1
+ mrs x3, DBGBCR4_EL1
+ mrs x3, DBGBCR5_EL1
+ mrs x3, DBGBCR6_EL1
+ mrs x3, DBGBCR7_EL1
+ mrs x3, DBGBCR8_EL1
+ mrs x3, DBGBCR9_EL1
+ mrs x3, DBGBCR10_EL1
+ mrs x3, DBGBCR11_EL1
+ mrs x3, DBGBCR12_EL1
+ mrs x3, DBGBCR13_EL1
+ mrs x3, DBGBCR14_EL1
+ mrs x3, DBGBCR15_EL1
+ mrs x3, DBGWVR0_EL1
+ mrs x3, DBGWVR1_EL1
+ mrs x3, DBGWVR2_EL1
+ mrs x3, DBGWVR3_EL1
+ mrs x3, DBGWVR4_EL1
+ mrs x3, DBGWVR5_EL1
+ mrs x3, DBGWVR6_EL1
+ mrs x3, DBGWVR7_EL1
+ mrs x3, DBGWVR8_EL1
+ mrs x3, DBGWVR9_EL1
+ mrs x3, DBGWVR10_EL1
+ mrs x3, DBGWVR11_EL1
+ mrs x3, DBGWVR12_EL1
+ mrs x3, DBGWVR13_EL1
+ mrs x3, DBGWVR14_EL1
+ mrs x3, DBGWVR15_EL1
+ mrs x3, DBGWCR0_EL1
+ mrs x3, DBGWCR1_EL1
+ mrs x3, DBGWCR2_EL1
+ mrs x3, DBGWCR3_EL1
+ mrs x3, DBGWCR4_EL1
+ mrs x3, DBGWCR5_EL1
+ mrs x3, DBGWCR6_EL1
+ mrs x3, DBGWCR7_EL1
+ mrs x3, DBGWCR8_EL1
+ mrs x3, DBGWCR9_EL1
+ mrs x3, DBGWCR10_EL1
+ mrs x3, DBGWCR11_EL1
+ mrs x3, DBGWCR12_EL1
+ mrs x3, DBGWCR13_EL1
+ mrs x3, DBGWCR14_EL1
+ mrs x3, DBGWCR15_EL1
+ mrs x3, MDRAR_EL1
+ mrs x3, OSLSR_EL1
+ mrs x3, OSDLR_EL1
+ mrs x3, DBGPRCR_EL1
+ mrs x3, DBGCLAIMSET_EL1
+ mrs x3, DBGCLAIMCLR_EL1
+ mrs x3, DBGAUTHSTATUS_EL1
+ mrs x1, S3_2_C15_C6_4
+ mrs x3, s3_3_c11_c1_4
+ mrs x3, S3_3_c11_c1_4
+
+; CHECK: mrs x3, ACTLR_EL1 ; encoding: [0x23,0x10,0x38,0xd5]
+; CHECK: mrs x3, ACTLR_EL2 ; encoding: [0x23,0x10,0x3c,0xd5]
+; CHECK: mrs x3, ACTLR_EL3 ; encoding: [0x23,0x10,0x3e,0xd5]
+; CHECK: mrs x3, AFSR0_EL1 ; encoding: [0x03,0x51,0x38,0xd5]
+; CHECK: mrs x3, AFSR0_EL2 ; encoding: [0x03,0x51,0x3c,0xd5]
+; CHECK: mrs x3, AFSR0_EL3 ; encoding: [0x03,0x51,0x3e,0xd5]
+; CHECK: mrs x3, AIDR_EL1 ; encoding: [0xe3,0x00,0x39,0xd5]
+; CHECK: mrs x3, AFSR1_EL1 ; encoding: [0x23,0x51,0x38,0xd5]
+; CHECK: mrs x3, AFSR1_EL2 ; encoding: [0x23,0x51,0x3c,0xd5]
+; CHECK: mrs x3, AFSR1_EL3 ; encoding: [0x23,0x51,0x3e,0xd5]
+; CHECK: mrs x3, AMAIR_EL1 ; encoding: [0x03,0xa3,0x38,0xd5]
+; CHECK: mrs x3, AMAIR_EL2 ; encoding: [0x03,0xa3,0x3c,0xd5]
+; CHECK: mrs x3, AMAIR_EL3 ; encoding: [0x03,0xa3,0x3e,0xd5]
+; CHECK: mrs x3, CCSIDR_EL1 ; encoding: [0x03,0x00,0x39,0xd5]
+; CHECK: mrs x3, CLIDR_EL1 ; encoding: [0x23,0x00,0x39,0xd5]
+; CHECK: mrs x3, CNTFRQ_EL0 ; encoding: [0x03,0xe0,0x3b,0xd5]
+; CHECK: mrs x3, CNTHCTL_EL2 ; encoding: [0x03,0xe1,0x3c,0xd5]
+; CHECK: mrs x3, CNTHP_CTL_EL2 ; encoding: [0x23,0xe2,0x3c,0xd5]
+; CHECK: mrs x3, CNTHP_CVAL_EL2 ; encoding: [0x43,0xe2,0x3c,0xd5]
+; CHECK: mrs x3, CNTHP_TVAL_EL2 ; encoding: [0x03,0xe2,0x3c,0xd5]
+; CHECK: mrs x3, CNTKCTL_EL1 ; encoding: [0x03,0xe1,0x38,0xd5]
+; CHECK: mrs x3, CNTPCT_EL0 ; encoding: [0x23,0xe0,0x3b,0xd5]
+; CHECK: mrs x3, CNTP_CTL_EL0 ; encoding: [0x23,0xe2,0x3b,0xd5]
+; CHECK: mrs x3, CNTP_CVAL_EL0 ; encoding: [0x43,0xe2,0x3b,0xd5]
+; CHECK: mrs x3, CNTP_TVAL_EL0 ; encoding: [0x03,0xe2,0x3b,0xd5]
+; CHECK: mrs x3, CNTVCT_EL0 ; encoding: [0x43,0xe0,0x3b,0xd5]
+; CHECK: mrs x3, CNTVOFF_EL2 ; encoding: [0x63,0xe0,0x3c,0xd5]
+; CHECK: mrs x3, CNTV_CTL_EL0 ; encoding: [0x23,0xe3,0x3b,0xd5]
+; CHECK: mrs x3, CNTV_CVAL_EL0 ; encoding: [0x43,0xe3,0x3b,0xd5]
+; CHECK: mrs x3, CNTV_TVAL_EL0 ; encoding: [0x03,0xe3,0x3b,0xd5]
+; CHECK: mrs x3, CONTEXTIDR_EL1 ; encoding: [0x23,0xd0,0x38,0xd5]
+; CHECK: mrs x3, CPACR_EL1 ; encoding: [0x43,0x10,0x38,0xd5]
+; CHECK: mrs x3, CPTR_EL2 ; encoding: [0x43,0x11,0x3c,0xd5]
+; CHECK: mrs x3, CPTR_EL3 ; encoding: [0x43,0x11,0x3e,0xd5]
+; CHECK: mrs x3, CSSELR_EL1 ; encoding: [0x03,0x00,0x3a,0xd5]
+; CHECK: mrs x3, CTR_EL0 ; encoding: [0x23,0x00,0x3b,0xd5]
+; CHECK: mrs x3, CURRENTEL ; encoding: [0x43,0x42,0x38,0xd5]
+; CHECK: mrs x3, DACR32_EL2 ; encoding: [0x03,0x30,0x3c,0xd5]
+; CHECK: mrs x3, DCZID_EL0 ; encoding: [0xe3,0x00,0x3b,0xd5]
+; CHECK: mrs x3, REVIDR_EL1 ; encoding: [0xc3,0x00,0x38,0xd5]
+; CHECK: mrs x3, ESR_EL1 ; encoding: [0x03,0x52,0x38,0xd5]
+; CHECK: mrs x3, ESR_EL2 ; encoding: [0x03,0x52,0x3c,0xd5]
+; CHECK: mrs x3, ESR_EL3 ; encoding: [0x03,0x52,0x3e,0xd5]
+; CHECK: mrs x3, FAR_EL1 ; encoding: [0x03,0x60,0x38,0xd5]
+; CHECK: mrs x3, FAR_EL2 ; encoding: [0x03,0x60,0x3c,0xd5]
+; CHECK: mrs x3, FAR_EL3 ; encoding: [0x03,0x60,0x3e,0xd5]
+; CHECK: mrs x3, FPEXC32_EL2 ; encoding: [0x03,0x53,0x3c,0xd5]
+; CHECK: mrs x3, HACR_EL2 ; encoding: [0xe3,0x11,0x3c,0xd5]
+; CHECK: mrs x3, HCR_EL2 ; encoding: [0x03,0x11,0x3c,0xd5]
+; CHECK: mrs x3, HPFAR_EL2 ; encoding: [0x83,0x60,0x3c,0xd5]
+; CHECK: mrs x3, HSTR_EL2 ; encoding: [0x63,0x11,0x3c,0xd5]
+; CHECK: mrs x3, ID_AA64DFR0_EL1 ; encoding: [0x03,0x05,0x38,0xd5]
+; CHECK: mrs x3, ID_AA64DFR1_EL1 ; encoding: [0x23,0x05,0x38,0xd5]
+; CHECK: mrs x3, ID_AA64ISAR0_EL1 ; encoding: [0x03,0x06,0x38,0xd5]
+; CHECK: mrs x3, ID_AA64ISAR1_EL1 ; encoding: [0x23,0x06,0x38,0xd5]
+; CHECK: mrs x3, ID_AA64MMFR0_EL1 ; encoding: [0x03,0x07,0x38,0xd5]
+; CHECK: mrs x3, ID_AA64MMFR1_EL1 ; encoding: [0x23,0x07,0x38,0xd5]
+; CHECK: mrs x3, ID_AA64PFR0_EL1 ; encoding: [0x03,0x04,0x38,0xd5]
+; CHECK: mrs x3, ID_AA64PFR1_EL1 ; encoding: [0x23,0x04,0x38,0xd5]
+; CHECK: mrs x3, IFSR32_EL2 ; encoding: [0x23,0x50,0x3c,0xd5]
+; CHECK: mrs x3, ISR_EL1 ; encoding: [0x03,0xc1,0x38,0xd5]
+; CHECK: mrs x3, MAIR_EL1 ; encoding: [0x03,0xa2,0x38,0xd5]
+; CHECK: mrs x3, MAIR_EL2 ; encoding: [0x03,0xa2,0x3c,0xd5]
+; CHECK: mrs x3, MAIR_EL3 ; encoding: [0x03,0xa2,0x3e,0xd5]
+; CHECK: mrs x3, MDCR_EL2 ; encoding: [0x23,0x11,0x3c,0xd5]
+; CHECK: mrs x3, MDCR_EL3 ; encoding: [0x23,0x13,0x3e,0xd5]
+; CHECK: mrs x3, MIDR_EL1 ; encoding: [0x03,0x00,0x38,0xd5]
+; CHECK: mrs x3, MPIDR_EL1 ; encoding: [0xa3,0x00,0x38,0xd5]
+; CHECK: mrs x3, MVFR0_EL1 ; encoding: [0x03,0x03,0x38,0xd5]
+; CHECK: mrs x3, MVFR1_EL1 ; encoding: [0x23,0x03,0x38,0xd5]
+; CHECK: mrs x3, PAR_EL1 ; encoding: [0x03,0x74,0x38,0xd5]
+; CHECK: mrs x3, RVBAR_EL1 ; encoding: [0x23,0xc0,0x38,0xd5]
+; CHECK: mrs x3, RVBAR_EL2 ; encoding: [0x23,0xc0,0x3c,0xd5]
+; CHECK: mrs x3, RVBAR_EL3 ; encoding: [0x23,0xc0,0x3e,0xd5]
+; CHECK: mrs x3, SCR_EL3 ; encoding: [0x03,0x11,0x3e,0xd5]
+; CHECK: mrs x3, SCTLR_EL1 ; encoding: [0x03,0x10,0x38,0xd5]
+; CHECK: mrs x3, SCTLR_EL2 ; encoding: [0x03,0x10,0x3c,0xd5]
+; CHECK: mrs x3, SCTLR_EL3 ; encoding: [0x03,0x10,0x3e,0xd5]
+; CHECK: mrs x3, SDER32_EL3 ; encoding: [0x23,0x11,0x3e,0xd5]
+; CHECK: mrs x3, TCR_EL1 ; encoding: [0x43,0x20,0x38,0xd5]
+; CHECK: mrs x3, TCR_EL2 ; encoding: [0x43,0x20,0x3c,0xd5]
+; CHECK: mrs x3, TCR_EL3 ; encoding: [0x43,0x20,0x3e,0xd5]
+; CHECK: mrs x3, TEECR32_EL1 ; encoding: [0x03,0x00,0x32,0xd5]
+; CHECK: mrs x3, TEEHBR32_EL1 ; encoding: [0x03,0x10,0x32,0xd5]
+; CHECK: mrs x3, TPIDRRO_EL0 ; encoding: [0x63,0xd0,0x3b,0xd5]
+; CHECK: mrs x3, TPIDR_EL0 ; encoding: [0x43,0xd0,0x3b,0xd5]
+; CHECK: mrs x3, TPIDR_EL1 ; encoding: [0x83,0xd0,0x38,0xd5]
+; CHECK: mrs x3, TPIDR_EL2 ; encoding: [0x43,0xd0,0x3c,0xd5]
+; CHECK: mrs x3, TPIDR_EL3 ; encoding: [0x43,0xd0,0x3e,0xd5]
+; CHECK: mrs x3, TTBR0_EL1 ; encoding: [0x03,0x20,0x38,0xd5]
+; CHECK: mrs x3, TTBR0_EL2 ; encoding: [0x03,0x20,0x3c,0xd5]
+; CHECK: mrs x3, TTBR0_EL3 ; encoding: [0x03,0x20,0x3e,0xd5]
+; CHECK: mrs x3, TTBR1_EL1 ; encoding: [0x23,0x20,0x38,0xd5]
+; CHECK: mrs x3, VBAR_EL1 ; encoding: [0x03,0xc0,0x38,0xd5]
+; CHECK: mrs x3, VBAR_EL2 ; encoding: [0x03,0xc0,0x3c,0xd5]
+; CHECK: mrs x3, VBAR_EL3 ; encoding: [0x03,0xc0,0x3e,0xd5]
+; CHECK: mrs x3, VMPIDR_EL2 ; encoding: [0xa3,0x00,0x3c,0xd5]
+; CHECK: mrs x3, VPIDR_EL2 ; encoding: [0x03,0x00,0x3c,0xd5]
+; CHECK: mrs x3, VTCR_EL2 ; encoding: [0x43,0x21,0x3c,0xd5]
+; CHECK: mrs x3, VTTBR_EL2 ; encoding: [0x03,0x21,0x3c,0xd5]
+; CHECK: mrs x3, MDCCSR_EL0 ; encoding: [0x03,0x01,0x33,0xd5]
+; CHECK: mrs x3, MDCCINT_EL1 ; encoding: [0x03,0x02,0x30,0xd5]
+; CHECK: mrs x3, DBGDTR_EL0 ; encoding: [0x03,0x04,0x33,0xd5]
+; CHECK: mrs x3, DBGDTRRX_EL0 ; encoding: [0x03,0x05,0x33,0xd5]
+; CHECK: mrs x3, DBGVCR32_EL2 ; encoding: [0x03,0x07,0x34,0xd5]
+; CHECK: mrs x3, OSDTRRX_EL1 ; encoding: [0x43,0x00,0x30,0xd5]
+; CHECK: mrs x3, MDSCR_EL1 ; encoding: [0x43,0x02,0x30,0xd5]
+; CHECK: mrs x3, OSDTRTX_EL1 ; encoding: [0x43,0x03,0x30,0xd5]
+; CHECK: mrs x3, OSECCR_EL1 ; encoding: [0x43,0x06,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR0_EL1 ; encoding: [0x83,0x00,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR1_EL1 ; encoding: [0x83,0x01,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR2_EL1 ; encoding: [0x83,0x02,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR3_EL1 ; encoding: [0x83,0x03,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR4_EL1 ; encoding: [0x83,0x04,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR5_EL1 ; encoding: [0x83,0x05,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR6_EL1 ; encoding: [0x83,0x06,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR7_EL1 ; encoding: [0x83,0x07,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR8_EL1 ; encoding: [0x83,0x08,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR9_EL1 ; encoding: [0x83,0x09,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR10_EL1 ; encoding: [0x83,0x0a,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR11_EL1 ; encoding: [0x83,0x0b,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR12_EL1 ; encoding: [0x83,0x0c,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR13_EL1 ; encoding: [0x83,0x0d,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR14_EL1 ; encoding: [0x83,0x0e,0x30,0xd5]
+; CHECK: mrs x3, DBGBVR15_EL1 ; encoding: [0x83,0x0f,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR0_EL1 ; encoding: [0xa3,0x00,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR1_EL1 ; encoding: [0xa3,0x01,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR2_EL1 ; encoding: [0xa3,0x02,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR3_EL1 ; encoding: [0xa3,0x03,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR4_EL1 ; encoding: [0xa3,0x04,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR5_EL1 ; encoding: [0xa3,0x05,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR6_EL1 ; encoding: [0xa3,0x06,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR7_EL1 ; encoding: [0xa3,0x07,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR8_EL1 ; encoding: [0xa3,0x08,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR9_EL1 ; encoding: [0xa3,0x09,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR10_EL1 ; encoding: [0xa3,0x0a,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR11_EL1 ; encoding: [0xa3,0x0b,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR12_EL1 ; encoding: [0xa3,0x0c,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR13_EL1 ; encoding: [0xa3,0x0d,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR14_EL1 ; encoding: [0xa3,0x0e,0x30,0xd5]
+; CHECK: mrs x3, DBGBCR15_EL1 ; encoding: [0xa3,0x0f,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR0_EL1 ; encoding: [0xc3,0x00,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR1_EL1 ; encoding: [0xc3,0x01,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR2_EL1 ; encoding: [0xc3,0x02,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR3_EL1 ; encoding: [0xc3,0x03,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR4_EL1 ; encoding: [0xc3,0x04,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR5_EL1 ; encoding: [0xc3,0x05,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR6_EL1 ; encoding: [0xc3,0x06,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR7_EL1 ; encoding: [0xc3,0x07,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR8_EL1 ; encoding: [0xc3,0x08,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR9_EL1 ; encoding: [0xc3,0x09,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR10_EL1 ; encoding: [0xc3,0x0a,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR11_EL1 ; encoding: [0xc3,0x0b,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR12_EL1 ; encoding: [0xc3,0x0c,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR13_EL1 ; encoding: [0xc3,0x0d,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR14_EL1 ; encoding: [0xc3,0x0e,0x30,0xd5]
+; CHECK: mrs x3, DBGWVR15_EL1 ; encoding: [0xc3,0x0f,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR0_EL1 ; encoding: [0xe3,0x00,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR1_EL1 ; encoding: [0xe3,0x01,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR2_EL1 ; encoding: [0xe3,0x02,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR3_EL1 ; encoding: [0xe3,0x03,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR4_EL1 ; encoding: [0xe3,0x04,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR5_EL1 ; encoding: [0xe3,0x05,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR6_EL1 ; encoding: [0xe3,0x06,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR7_EL1 ; encoding: [0xe3,0x07,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR8_EL1 ; encoding: [0xe3,0x08,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR9_EL1 ; encoding: [0xe3,0x09,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR10_EL1 ; encoding: [0xe3,0x0a,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR11_EL1 ; encoding: [0xe3,0x0b,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR12_EL1 ; encoding: [0xe3,0x0c,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR13_EL1 ; encoding: [0xe3,0x0d,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR14_EL1 ; encoding: [0xe3,0x0e,0x30,0xd5]
+; CHECK: mrs x3, DBGWCR15_EL1 ; encoding: [0xe3,0x0f,0x30,0xd5]
+; CHECK: mrs x3, MDRAR_EL1 ; encoding: [0x03,0x10,0x30,0xd5]
+; CHECK: mrs x3, OSLSR_EL1 ; encoding: [0x83,0x11,0x30,0xd5]
+; CHECK: mrs x3, OSDLR_EL1 ; encoding: [0x83,0x13,0x30,0xd5]
+; CHECK: mrs x3, DBGPRCR_EL1 ; encoding: [0x83,0x14,0x30,0xd5]
+; CHECK: mrs x3, DBGCLAIMSET_EL1 ; encoding: [0xc3,0x78,0x30,0xd5]
+; CHECK: mrs x3, DBGCLAIMCLR_EL1 ; encoding: [0xc3,0x79,0x30,0xd5]
+; CHECK: mrs x3, DBGAUTHSTATUS_EL1 ; encoding: [0xc3,0x7e,0x30,0xd5]
+; CHECK: mrs x1, S3_2_C15_C6_4 ; encoding: [0x81,0xf6,0x3a,0xd5]
+; CHECK: mrs x3, S3_3_C11_C1_4 ; encoding: [0x83,0xb1,0x3b,0xd5]
+; CHECK: mrs x3, S3_3_C11_C1_4 ; encoding: [0x83,0xb1,0x3b,0xd5]
+
+ msr RMR_EL3, x0
+ msr RMR_EL2, x0
+ msr RMR_EL1, x0
+ msr OSLAR_EL1, x3
+ msr DBGDTRTX_EL0, x3
+
+; CHECK: msr RMR_EL3, x0 ; encoding: [0x40,0xc0,0x1e,0xd5]
+; CHECK: msr RMR_EL2, x0 ; encoding: [0x40,0xc0,0x1c,0xd5]
+; CHECK: msr RMR_EL1, x0 ; encoding: [0x40,0xc0,0x18,0xd5]
+; CHECK: msr OSLAR_EL1, x3 ; encoding: [0x83,0x10,0x10,0xd5]
+; CHECK: msr DBGDTRTX_EL0, x3 ; encoding: [0x03,0x05,0x13,0xd5]
+
+ mrs x0, ID_PFR0_EL1
+ mrs x0, ID_PFR1_EL1
+ mrs x0, ID_DFR0_EL1
+ mrs x0, ID_AFR0_EL1
+ mrs x0, ID_ISAR0_EL1
+ mrs x0, ID_ISAR1_EL1
+ mrs x0, ID_ISAR2_EL1
+ mrs x0, ID_ISAR3_EL1
+ mrs x0, ID_ISAR4_EL1
+ mrs x0, ID_ISAR5_EL1
+ mrs x0, AFSR1_EL1
+ mrs x0, AFSR0_EL1
+ mrs x0, REVIDR_EL1
+; CHECK: mrs x0, ID_PFR0_EL1 ; encoding: [0x00,0x01,0x38,0xd5]
+; CHECK: mrs x0, ID_PFR1_EL1 ; encoding: [0x20,0x01,0x38,0xd5]
+; CHECK: mrs x0, ID_DFR0_EL1 ; encoding: [0x40,0x01,0x38,0xd5]
+; CHECK: mrs x0, ID_AFR0_EL1 ; encoding: [0x60,0x01,0x38,0xd5]
+; CHECK: mrs x0, ID_ISAR0_EL1 ; encoding: [0x00,0x02,0x38,0xd5]
+; CHECK: mrs x0, ID_ISAR1_EL1 ; encoding: [0x20,0x02,0x38,0xd5]
+; CHECK: mrs x0, ID_ISAR2_EL1 ; encoding: [0x40,0x02,0x38,0xd5]
+; CHECK: mrs x0, ID_ISAR3_EL1 ; encoding: [0x60,0x02,0x38,0xd5]
+; CHECK: mrs x0, ID_ISAR4_EL1 ; encoding: [0x80,0x02,0x38,0xd5]
+; CHECK: mrs x0, ID_ISAR5_EL1 ; encoding: [0xa0,0x02,0x38,0xd5]
+; CHECK: mrs x0, AFSR1_EL1 ; encoding: [0x20,0x51,0x38,0xd5]
+; CHECK: mrs x0, AFSR0_EL1 ; encoding: [0x00,0x51,0x38,0xd5]
+; CHECK: mrs x0, REVIDR_EL1 ; encoding: [0xc0,0x00,0x38,0xd5]
diff --git a/test/MC/AArch64/arm64-target-specific-sysreg.s b/test/MC/AArch64/arm64-target-specific-sysreg.s
new file mode 100644
index 000000000000..05cea3ac2da5
--- /dev/null
+++ b/test/MC/AArch64/arm64-target-specific-sysreg.s
@@ -0,0 +1,10 @@
+// RUN: not llvm-mc -triple arm64 -mcpu=generic -show-encoding < %s 2>&1 | \
+// RUN: FileCheck %s --check-prefix=CHECK-GENERIC
+//
+// RUN: llvm-mc -triple arm64 -mcpu=cyclone -show-encoding < %s 2>&1 | \
+// RUN: FileCheck %s --check-prefix=CHECK-CYCLONE
+
+msr CPM_IOACC_CTL_EL3, x0
+
+// CHECK-GENERIC: error: expected writable system register or pstate
+// CHECK-CYCLONE: msr CPM_IOACC_CTL_EL3, x0 // encoding: [0x00,0xf2,0x1f,0xd5]
diff --git a/test/MC/AArch64/arm64-tls-modifiers-darwin.s b/test/MC/AArch64/arm64-tls-modifiers-darwin.s
new file mode 100644
index 000000000000..8ff07cd86b2b
--- /dev/null
+++ b/test/MC/AArch64/arm64-tls-modifiers-darwin.s
@@ -0,0 +1,13 @@
+; RUN: llvm-mc -triple=arm64-apple-ios7.0 %s -o - | FileCheck %s
+; RUN: llvm-mc -triple=arm64-apple-ios7.0 -filetype=obj %s -o - | llvm-objdump -r - | FileCheck %s --check-prefix=CHECK-OBJ
+
+ adrp x2, _var@TLVPPAGE
+ ldr x0, [x15, _var@TLVPPAGEOFF]
+ add x30, x0, _var@TLVPPAGEOFF
+; CHECK: adrp x2, _var@TLVPPAG
+; CHECK: ldr x0, [x15, _var@TLVPPAGEOFF]
+; CHECK: add x30, x0, _var@TLVPPAGEOFF
+
+; CHECK-OBJ: 8 ARM64_RELOC_TLVP_LOAD_PAGEOFF12 _var
+; CHECK-OBJ: 4 ARM64_RELOC_TLVP_LOAD_PAGEOFF12 _var
+; CHECK-OBJ: 0 ARM64_RELOC_TLVP_LOAD_PAGE21 _var
diff --git a/test/MC/AArch64/arm64-tls-relocs.s b/test/MC/AArch64/arm64-tls-relocs.s
new file mode 100644
index 000000000000..96c2b55c36d8
--- /dev/null
+++ b/test/MC/AArch64/arm64-tls-relocs.s
@@ -0,0 +1,320 @@
+// RUN: llvm-mc -triple=arm64-none-linux-gnu -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj < %s -o - | \
+// RUN: llvm-readobj -r -t | FileCheck --check-prefix=CHECK-ELF %s
+
+
+////////////////////////////////////////////////////////////////////////////////
+// TLS initial-exec forms
+////////////////////////////////////////////////////////////////////////////////
+
+ movz x15, #:gottprel_g1:var
+// CHECK: movz x15, #:gottprel_g1:var // encoding: [0bAAA01111,A,0b101AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g1:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF: {{0x[0-9A-F]+}} R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 [[VARSYM:[^ ]+]]
+
+
+ movk x13, #:gottprel_g0_nc:var
+// CHECK: movk x13, #:gottprel_g0_nc:var // encoding: [0bAAA01101,A,0b100AAAAA,0xf2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g0_nc:var, kind: fixup_aarch64_movw
+
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC [[VARSYM]]
+
+ adrp x11, :gottprel:var
+ ldr x10, [x0, #:gottprel_lo12:var]
+ ldr x9, :gottprel:var
+// CHECK: adrp x11, :gottprel:var // encoding: [0x0b'A',A,A,0x90'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel:var, kind: fixup_aarch64_pcrel_adrp_imm21
+// CHECK: ldr x10, [x0, :gottprel_lo12:var] // encoding: [0x0a,0bAAAAAA00,0b01AAAAAA,0xf9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: ldr x9, :gottprel:var // encoding: [0bAAA01001,A,A,0x58]
+// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel:var, kind: fixup_aarch64_ldr_pcrel_imm19
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 [[VARSYM]]
+
+
+////////////////////////////////////////////////////////////////////////////////
+// TLS local-exec forms
+////////////////////////////////////////////////////////////////////////////////
+
+ movz x3, #:tprel_g2:var
+ movn x4, #:tprel_g2:var
+// CHECK: movz x3, #:tprel_g2:var // encoding: [0bAAA00011,A,0b110AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g2:var, kind: fixup_aarch64_movw
+// CHECK: movn x4, #:tprel_g2:var // encoding: [0bAAA00100,A,0b110AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g2:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G2 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G2 [[VARSYM]]
+
+
+ movz x5, #:tprel_g1:var
+ movn x6, #:tprel_g1:var
+ movz w7, #:tprel_g1:var
+// CHECK: movz x5, #:tprel_g1:var // encoding: [0bAAA00101,A,0b101AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movn x6, #:tprel_g1:var // encoding: [0bAAA00110,A,0b101AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movz w7, #:tprel_g1:var // encoding: [0bAAA00111,A,0b101AAAAA,0x12]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G1 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G1 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G1 [[VARSYM]]
+
+
+ movk x9, #:tprel_g1_nc:var
+ movk w10, #:tprel_g1_nc:var
+// CHECK: movk x9, #:tprel_g1_nc:var // encoding: [0bAAA01001,A,0b101AAAAA,0xf2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w10, #:tprel_g1_nc:var // encoding: [0bAAA01010,A,0b101AAAAA,0x72]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1_nc:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G1_NC [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G1_NC [[VARSYM]]
+
+
+ movz x11, #:tprel_g0:var
+ movn x12, #:tprel_g0:var
+ movz w13, #:tprel_g0:var
+// CHECK: movz x11, #:tprel_g0:var // encoding: [0bAAA01011,A,0b100AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movn x12, #:tprel_g0:var // encoding: [0bAAA01100,A,0b100AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movz w13, #:tprel_g0:var // encoding: [0bAAA01101,A,0b100AAAAA,0x12]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G0 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G0 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G0 [[VARSYM]]
+
+
+ movk x15, #:tprel_g0_nc:var
+ movk w16, #:tprel_g0_nc:var
+// CHECK: movk x15, #:tprel_g0_nc:var // encoding: [0bAAA01111,A,0b100AAAAA,0xf2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w16, #:tprel_g0_nc:var // encoding: [0bAAA10000,A,0b100AAAAA,0x72]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0_nc:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G0_NC [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_MOVW_TPREL_G0_NC [[VARSYM]]
+
+
+ add x21, x22, #:tprel_lo12:var
+// CHECK: add x21, x22, :tprel_lo12:var // encoding: [0xd5,0bAAAAAA10,0b00AAAAAA,0x91]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_add_imm12
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_ADD_TPREL_LO12 [[VARSYM]]
+
+
+ add x25, x26, #:tprel_lo12_nc:var
+// CHECK: add x25, x26, :tprel_lo12_nc:var // encoding: [0x59,0bAAAAAA11,0b00AAAAAA,0x91]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_add_imm12
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_ADD_TPREL_LO12_NC [[VARSYM]]
+
+
+ ldrb w29, [x30, #:tprel_lo12:var]
+ ldrsb x29, [x28, #:tprel_lo12_nc:var]
+// CHECK: ldrb w29, [x30, :tprel_lo12:var] // encoding: [0xdd,0bAAAAAA11,0b01AAAAAA,0x39]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale1
+// CHECK: ldrsb x29, [x28, :tprel_lo12_nc:var] // encoding: [0x9d,0bAAAAAA11,0b10AAAAAA,0x39]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale1
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_LDST8_TPREL_LO12 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC [[VARSYM]]
+
+
+ strh w27, [x26, #:tprel_lo12:var]
+ ldrsh x25, [x24, #:tprel_lo12_nc:var]
+// CHECK: strh w27, [x26, :tprel_lo12:var] // encoding: [0x5b,0bAAAAAA11,0b00AAAAAA,0x79]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale2
+// CHECK: ldrsh x25, [x24, :tprel_lo12_nc:var] // encoding: [0x19,0bAAAAAA11,0b10AAAAAA,0x79]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale2
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_LDST16_TPREL_LO12 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC [[VARSYM]]
+
+
+ ldr w23, [x22, #:tprel_lo12:var]
+ ldrsw x21, [x20, #:tprel_lo12_nc:var]
+// CHECK: ldr w23, [x22, :tprel_lo12:var] // encoding: [0xd7,0bAAAAAA10,0b01AAAAAA,0xb9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale4
+// CHECK: ldrsw x21, [x20, :tprel_lo12_nc:var] // encoding: [0x95,0bAAAAAA10,0b10AAAAAA,0xb9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale4
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_LDST32_TPREL_LO12 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC [[VARSYM]]
+
+ ldr x19, [x18, #:tprel_lo12:var]
+ str x17, [x16, #:tprel_lo12_nc:var]
+// CHECK: ldr x19, [x18, :tprel_lo12:var] // encoding: [0x53,0bAAAAAA10,0b01AAAAAA,0xf9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: str x17, [x16, :tprel_lo12_nc:var] // encoding: [0x11,0bAAAAAA10,0b00AAAAAA,0xf9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale8
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_LDST64_TPREL_LO12 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC [[VARSYM]]
+
+
+////////////////////////////////////////////////////////////////////////////////
+// TLS local-dynamic forms
+////////////////////////////////////////////////////////////////////////////////
+
+ movz x3, #:dtprel_g2:var
+ movn x4, #:dtprel_g2:var
+// CHECK: movz x3, #:dtprel_g2:var // encoding: [0bAAA00011,A,0b110AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_aarch64_movw
+// CHECK: movn x4, #:dtprel_g2:var // encoding: [0bAAA00100,A,0b110AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G2 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G2 [[VARSYM]]
+
+
+ movz x5, #:dtprel_g1:var
+ movn x6, #:dtprel_g1:var
+ movz w7, #:dtprel_g1:var
+// CHECK: movz x5, #:dtprel_g1:var // encoding: [0bAAA00101,A,0b101AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movn x6, #:dtprel_g1:var // encoding: [0bAAA00110,A,0b101AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movz w7, #:dtprel_g1:var // encoding: [0bAAA00111,A,0b101AAAAA,0x12]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G1 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G1 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G1 [[VARSYM]]
+
+
+ movk x9, #:dtprel_g1_nc:var
+ movk w10, #:dtprel_g1_nc:var
+// CHECK: movk x9, #:dtprel_g1_nc:var // encoding: [0bAAA01001,A,0b101AAAAA,0xf2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w10, #:dtprel_g1_nc:var // encoding: [0bAAA01010,A,0b101AAAAA,0x72]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1_nc:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC [[VARSYM]]
+
+
+ movz x11, #:dtprel_g0:var
+ movn x12, #:dtprel_g0:var
+ movz w13, #:dtprel_g0:var
+// CHECK: movz x11, #:dtprel_g0:var // encoding: [0bAAA01011,A,0b100AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movn x12, #:dtprel_g0:var // encoding: [0bAAA01100,A,0b100AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movz w13, #:dtprel_g0:var // encoding: [0bAAA01101,A,0b100AAAAA,0x12]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G0 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G0 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G0 [[VARSYM]]
+
+
+ movk x15, #:dtprel_g0_nc:var
+ movk w16, #:dtprel_g0_nc:var
+// CHECK: movk x15, #:dtprel_g0_nc:var // encoding: [0bAAA01111,A,0b100AAAAA,0xf2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w16, #:dtprel_g0_nc:var // encoding: [0bAAA10000,A,0b100AAAAA,0x72]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0_nc:var, kind: fixup_aarch64_movw
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC [[VARSYM]]
+
+
+ add x21, x22, #:dtprel_lo12:var
+// CHECK: add x21, x22, :dtprel_lo12:var // encoding: [0xd5,0bAAAAAA10,0b00AAAAAA,0x91]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_add_imm12
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_ADD_DTPREL_LO12 [[VARSYM]]
+
+
+ add x25, x26, #:dtprel_lo12_nc:var
+// CHECK: add x25, x26, :dtprel_lo12_nc:var // encoding: [0x59,0bAAAAAA11,0b00AAAAAA,0x91]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_add_imm12
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC [[VARSYM]]
+
+
+ ldrb w29, [x30, #:dtprel_lo12:var]
+ ldrsb x29, [x28, #:dtprel_lo12_nc:var]
+// CHECK: ldrb w29, [x30, :dtprel_lo12:var] // encoding: [0xdd,0bAAAAAA11,0b01AAAAAA,0x39]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale1
+// CHECK: ldrsb x29, [x28, :dtprel_lo12_nc:var] // encoding: [0x9d,0bAAAAAA11,0b10AAAAAA,0x39]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale1
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_LDST8_DTPREL_LO12 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC [[VARSYM]]
+
+
+ strh w27, [x26, #:dtprel_lo12:var]
+ ldrsh x25, [x24, #:dtprel_lo12_nc:var]
+// CHECK: strh w27, [x26, :dtprel_lo12:var] // encoding: [0x5b,0bAAAAAA11,0b00AAAAAA,0x79]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale2
+// CHECK: ldrsh x25, [x24, :dtprel_lo12_nc:var] // encoding: [0x19,0bAAAAAA11,0b10AAAAAA,0x79]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale2
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_LDST16_DTPREL_LO12 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC [[VARSYM]]
+
+
+ ldr w23, [x22, #:dtprel_lo12:var]
+ ldrsw x21, [x20, #:dtprel_lo12_nc:var]
+// CHECK: ldr w23, [x22, :dtprel_lo12:var] // encoding: [0xd7,0bAAAAAA10,0b01AAAAAA,0xb9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale4
+// CHECK: ldrsw x21, [x20, :dtprel_lo12_nc:var] // encoding: [0x95,0bAAAAAA10,0b10AAAAAA,0xb9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale4
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_LDST32_DTPREL_LO12 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC [[VARSYM]]
+
+ ldr x19, [x18, #:dtprel_lo12:var]
+ str x17, [x16, #:dtprel_lo12_nc:var]
+// CHECK: ldr x19, [x18, :dtprel_lo12:var] // encoding: [0x53,0bAAAAAA10,0b01AAAAAA,0xf9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: str x17, [x16, :dtprel_lo12_nc:var] // encoding: [0x11,0bAAAAAA10,0b00AAAAAA,0xf9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale8
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_LDST64_DTPREL_LO12 [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC [[VARSYM]]
+
+////////////////////////////////////////////////////////////////////////////////
+// TLS descriptor forms
+////////////////////////////////////////////////////////////////////////////////
+
+ adrp x8, :tlsdesc:var
+ ldr x7, [x6, #:tlsdesc_lo12:var]
+ add x5, x4, #:tlsdesc_lo12:var
+ .tlsdesccall var
+ blr x3
+
+// CHECK: adrp x8, :tlsdesc:var // encoding: [0x08'A',A,A,0x90'A']
+// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc:var, kind: fixup_aarch64_pcrel_adrp_imm21
+// CHECK: ldr x7, [x6, :tlsdesc_lo12:var] // encoding: [0xc7,0bAAAAAA00,0b01AAAAAA,0xf9]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc_lo12:var, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: add x5, x4, :tlsdesc_lo12:var // encoding: [0x85,0bAAAAAA00,0b00AAAAAA,0x91]
+// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc_lo12:var, kind: fixup_aarch64_add_imm12
+// CHECK: .tlsdesccall var // encoding: []
+// CHECK-NEXT: // fixup A - offset: 0, value: var, kind: fixup_aarch64_tlsdesc_call
+// CHECK: blr x3 // encoding: [0x60,0x00,0x3f,0xd6]
+
+
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSDESC_ADR_PAGE [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSDESC_LD64_LO12_NC [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSDESC_ADD_LO12_NC [[VARSYM]]
+// CHECK-ELF-NEXT: {{0x[0-9A-F]+}} R_AARCH64_TLSDESC_CALL [[VARSYM]]
+
+ // Make sure symbol 5 has type STT_TLS:
+
+// CHECK-ELF: Symbols [
+// CHECK-ELF: Symbol {
+// CHECK-ELF: Name: var
+// CHECK-ELF-NEXT: Value:
+// CHECK-ELF-NEXT: Size:
+// CHECK-ELF-NEXT: Binding: Global
+// CHECK-ELF-NEXT: Type: TLS
diff --git a/test/MC/AArch64/arm64-v128_lo-diagnostics.s b/test/MC/AArch64/arm64-v128_lo-diagnostics.s
new file mode 100644
index 000000000000..ffe29cfbed3a
--- /dev/null
+++ b/test/MC/AArch64/arm64-v128_lo-diagnostics.s
@@ -0,0 +1,11 @@
+// RUN: not llvm-mc -triple arm64 -mattr=neon %s 2> %t > /dev/null
+// RUN: FileCheck %s < %t
+
+ sqrdmulh v0.8h, v1.8h, v16.h[0]
+// CHECK: error: invalid operand for instruction
+
+ sqrdmulh h0, h1, v16.h[0]
+// CHECK: error: invalid operand for instruction
+
+ sqdmull2 v0.4h, v1.8h, v16.h[0]
+// CHECK: error: invalid operand for instruction
diff --git a/test/MC/AArch64/arm64-variable-exprs.s b/test/MC/AArch64/arm64-variable-exprs.s
new file mode 100644
index 000000000000..01204425c794
--- /dev/null
+++ b/test/MC/AArch64/arm64-variable-exprs.s
@@ -0,0 +1,40 @@
+// RUN: llvm-mc -triple arm64-apple-darwin10 %s -filetype=obj -o %t.o
+
+.data
+
+ .long 0
+a:
+ .long 0
+b = a
+
+c: .long b
+
+d2 = d
+.globl d2
+d3 = d + 4
+.globl d3
+
+e = a + 4
+
+g:
+f = g
+ .long 0
+
+ .long b
+ .long e
+ .long a + 4
+ .long d
+ .long d2
+ .long d3
+ .long f
+ .long g
+
+///
+ .text
+t0:
+Lt0_a:
+ .long 0
+
+ .section __DWARF,__debug_frame,regular,debug
+Lt1 = Lt0_a
+ .long Lt1
diff --git a/test/MC/AArch64/arm64-vector-lists.s b/test/MC/AArch64/arm64-vector-lists.s
new file mode 100644
index 000000000000..a9b2d198e868
--- /dev/null
+++ b/test/MC/AArch64/arm64-vector-lists.s
@@ -0,0 +1,20 @@
+// RUN: not llvm-mc -triple arm64 -mattr=neon -show-encoding < %s 2>%t | FileCheck %s
+// RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
+
+ ST4 {v0.8B-v3.8B}, [x0]
+ ST4 {v0.4H-v3.4H}, [x0]
+
+// CHECK: st4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x00,0x00,0x0c]
+// CHECK: st4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0] // encoding: [0x00,0x04,0x00,0x0c]
+
+ ST4 {v0.8B-v4.8B}, [x0]
+ ST4 {v0.8B-v3.8B,v4.8B}, [x0]
+ ST4 {v0.8B-v3.8H}, [x0]
+ ST4 {v0.8B-v3.16B}, [x0]
+ ST4 {v0.8B-},[x0]
+
+// CHECK-ERRORS: error: invalid number of vectors
+// CHECK-ERRORS: error: '}' expected
+// CHECK-ERRORS: error: mismatched register size suffix
+// CHECK-ERRORS: error: mismatched register size suffix
+// CHECK-ERRORS: error: vector register expected
diff --git a/test/MC/AArch64/arm64-verbose-vector-case.s b/test/MC/AArch64/arm64-verbose-vector-case.s
new file mode 100644
index 000000000000..6f0a3812dd74
--- /dev/null
+++ b/test/MC/AArch64/arm64-verbose-vector-case.s
@@ -0,0 +1,19 @@
+// RUN: llvm-mc -triple arm64 -mattr=crypto -show-encoding < %s | FileCheck %s
+
+pmull v8.8h, v8.8b, v8.8b
+pmull2 v8.8h, v8.16b, v8.16b
+pmull v8.1q, v8.1d, v8.1d
+pmull2 v8.1q, v8.2d, v8.2d
+// CHECK: pmull v8.8h, v8.8b, v8.8b // encoding: [0x08,0xe1,0x28,0x0e]
+// CHECK: pmull2 v8.8h, v8.16b, v8.16b // encoding: [0x08,0xe1,0x28,0x4e]
+// CHECK: pmull v8.1q, v8.1d, v8.1d // encoding: [0x08,0xe1,0xe8,0x0e]
+// CHECK: pmull2 v8.1q, v8.2d, v8.2d // encoding: [0x08,0xe1,0xe8,0x4e]
+
+pmull v8.8H, v8.8B, v8.8B
+pmull2 v8.8H, v8.16B, v8.16B
+pmull v8.1Q, v8.1D, v8.1D
+pmull2 v8.1Q, v8.2D, v8.2D
+// CHECK: pmull v8.8h, v8.8b, v8.8b // encoding: [0x08,0xe1,0x28,0x0e]
+// CHECK: pmull2 v8.8h, v8.16b, v8.16b // encoding: [0x08,0xe1,0x28,0x4e]
+// CHECK: pmull v8.1q, v8.1d, v8.1d // encoding: [0x08,0xe1,0xe8,0x0e]
+// CHECK: pmull2 v8.1q, v8.2d, v8.2d // encoding: [0x08,0xe1,0xe8,0x4e]
diff --git a/test/MC/AArch64/basic-a64-diagnostics.s b/test/MC/AArch64/basic-a64-diagnostics.s
index 2e6e0bbd387c..5293131711b6 100644
--- a/test/MC/AArch64/basic-a64-diagnostics.s
+++ b/test/MC/AArch64/basic-a64-diagnostics.s
@@ -1,5 +1,5 @@
// RUN: not llvm-mc -triple aarch64-none-linux-gnu < %s 2> %t
-// RUN: FileCheck --check-prefix=CHECK-ERROR < %t %s
+// RUN: FileCheck --check-prefix=CHECK-ERROR --check-prefix=CHECK-ERROR-ARM64 < %t %s
//------------------------------------------------------------------------------
// Add/sub (extended register)
@@ -83,9 +83,9 @@
// CHECK-ERROR: error: expected compatible register, symbol or integer in range [0, 4095]
// CHECK-ERROR-NEXT: add w4, w5, #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected compatible register, symbol or integer in range [0, 4095]
-// CHECK-ERROR-NEXT: add w5, w6, #0x1000
-// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-AARCH64-NEXT: error: expected compatible register, symbol or integer in range [0, 4095]
+// CHECK-ERROR-AARCH64-NEXT: add w5, w6, #0x1000
+// CHECK-ERROR-AARCH64-NEXT: ^
// CHECK-ERROR-NEXT: error: expected compatible register, symbol or integer in range [0, 4095]
// CHECK-ERROR-NEXT: add w4, w5, #-1, lsl #12
// CHECK-ERROR-NEXT: ^
@@ -141,9 +141,9 @@
// Out of range immediate
adds w0, w5, #0x10000
-// CHECK-ERROR: error: expected compatible register, symbol or integer in range [0, 4095]
-// CHECK-ERROR-NEXT: adds w0, w5, #0x10000
-// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-AARCH64: error: expected compatible register, symbol or integer in range [0, 4095]
+// CHECK-ERROR-AARCH64-NEXT: adds w0, w5, #0x10000
+// CHECK-ERROR-AARCH64-NEXT: ^
// Wn|WSP should be in second place
adds w4, wzr, #0x123
@@ -729,6 +729,27 @@
// CHECK-ERROR-NEXT: ^
//------------------------------------------------------------------------------
+// Logical (immediates)
+//------------------------------------------------------------------------------
+
+ and w2, w3, #4294967296
+ eor w2, w3, #4294967296
+ orr w2, w3, #4294967296
+ ands w2, w3, #4294967296
+// CHECK-ERROR: error: expected compatible register or logical immediate
+// CHECK-ERROR-NEXT: and w2, w3, #4294967296
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected compatible register or logical immediate
+// CHECK-ERROR-NEXT: eor w2, w3, #4294967296
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected compatible register or logical immediate
+// CHECK-ERROR-NEXT: orr w2, w3, #4294967296
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: expected compatible register or logical immediate
+// CHECK-ERROR-NEXT: ands w2, w3, #4294967296
+// CHECK-ERROR-NEXT: ^
+
+//------------------------------------------------------------------------------
// Bitfield
//------------------------------------------------------------------------------
@@ -750,10 +771,10 @@
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: sbfm w3, wsp, #1, #9
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: sbfm x9, x5, #-1, #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: sbfm x9, x5, #0, #-1
// CHECK-ERROR-NEXT: ^
@@ -761,16 +782,16 @@
sbfm w7, w11, #19, #32
sbfm x29, x30, #64, #0
sbfm x10, x20, #63, #64
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: sbfm w3, w5, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: sbfm w7, w11, #19, #32
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: sbfm x29, x30, #64, #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: sbfm x10, x20, #63, #64
// CHECK-ERROR-NEXT: ^
@@ -778,16 +799,16 @@
ubfm w7, w11, #19, #32
ubfm x29, x30, #64, #0
ubfm x10, x20, #63, #64
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ubfm w3, w5, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ubfm w7, w11, #19, #32
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: ubfm x29, x30, #64, #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: ubfm x10, x20, #63, #64
// CHECK-ERROR-NEXT: ^
@@ -795,31 +816,31 @@
bfm w7, w11, #19, #32
bfm x29, x30, #64, #0
bfm x10, x20, #63, #64
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: bfm w3, w5, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: bfm w7, w11, #19, #32
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: bfm x29, x30, #64, #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: bfm x10, x20, #63, #64
// CHECK-ERROR-NEXT: ^
sxtb x3, x2
sxth xzr, xzr
sxtw x3, x5
-// CHECK-ERROR: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: sxtb x3, x2
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: sxth xzr, xzr
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: sxtw x3, x5
-// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-AARCH64: error: invalid operand for instruction
+// CHECK-ERROR-AARCH64-NEXT: sxtb x3, x2
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-AARCH64-NEXT: sxth xzr, xzr
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-AARCH64-NEXT: sxtw x3, x5
+// CHECK-ERROR-AARCH64-NEXT: ^
uxtb x3, x12
uxth x5, x9
@@ -832,9 +853,9 @@
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: uxth x5, x9
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: invalid instruction
-// CHECK-ERROR-NEXT: uxtw x3, x5
-// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-AARCH64-NEXT: error: invalid instruction
+// CHECK-ERROR-AARCH64-NEXT: uxtw x3, x5
+// CHECK-ERROR-AARCH64-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: uxtb x2, sp
// CHECK-ERROR-NEXT: ^
@@ -853,13 +874,13 @@
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: asr sp, x2, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: asr x25, x26, #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: asr x25, x26, #64
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: asr w9, w8, #32
// CHECK-ERROR-NEXT: ^
@@ -869,18 +890,19 @@
sbfiz w11, w12, #32, #0
sbfiz w9, w10, #10, #23
sbfiz x3, x5, #12, #53
- sbfiz sp, x3, #5, #6
- sbfiz w3, wsp, #7, #8
-// CHECK-ERROR: error: expected integer in range [<lsb>, 31]
+ sbfiz sp, x3, #7, #6
+ sbfiz w3, wsp, #10, #8
+// CHECK-ERROR-AARCH64: error: expected integer in range [<lsb>, 31]
+// CHECK-ERROR-ARM64: error: expected integer in range [1, 32]
// CHECK-ERROR-NEXT: sbfiz w1, w2, #0, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: sbfiz wsp, w9, #0, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: sbfiz w9, w10, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: sbfiz w11, w12, #32, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: requested insert overflows register
@@ -890,10 +912,10 @@
// CHECK-ERROR-NEXT: sbfiz x3, x5, #12, #53
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: sbfiz sp, x3, #5, #6
+// CHECK-ERROR-NEXT: sbfiz sp, x3, #7, #6
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: sbfiz w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: sbfiz w3, wsp, #10, #8
// CHECK-ERROR-NEXT: ^
sbfx w1, w2, #0, #0
@@ -902,18 +924,19 @@
sbfx w11, w12, #32, #0
sbfx w9, w10, #10, #23
sbfx x3, x5, #12, #53
- sbfx sp, x3, #5, #6
- sbfx w3, wsp, #7, #8
-// CHECK-ERROR: error: expected integer in range [<lsb>, 31]
+ sbfx sp, x3, #7, #6
+ sbfx w3, wsp, #10, #8
+// CHECK-ERROR-AARCH64: error: expected integer in range [<lsb>, 31]
+// CHECK-ERROR-ARM64: error: expected integer in range [1, 32]
// CHECK-ERROR-NEXT: sbfx w1, w2, #0, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: sbfx wsp, w9, #0, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: sbfx w9, w10, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: sbfx w11, w12, #32, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: requested extract overflows register
@@ -923,10 +946,10 @@
// CHECK-ERROR-NEXT: sbfx x3, x5, #12, #53
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: sbfx sp, x3, #5, #6
+// CHECK-ERROR-NEXT: sbfx sp, x3, #7, #6
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: sbfx w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: sbfx w3, wsp, #10, #8
// CHECK-ERROR-NEXT: ^
bfi w1, w2, #0, #0
@@ -935,18 +958,19 @@
bfi w11, w12, #32, #0
bfi w9, w10, #10, #23
bfi x3, x5, #12, #53
- bfi sp, x3, #5, #6
- bfi w3, wsp, #7, #8
-// CHECK-ERROR: error: expected integer in range [<lsb>, 31]
+ bfi sp, x3, #7, #6
+ bfi w3, wsp, #10, #8
+// CHECK-ERROR-AARCH64: error: expected integer in range [<lsb>, 31]
+// CHECK-ERROR-ARM64: error: expected integer in range [1, 32]
// CHECK-ERROR-NEXT: bfi w1, w2, #0, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: bfi wsp, w9, #0, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: bfi w9, w10, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: bfi w11, w12, #32, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: requested insert overflows register
@@ -956,10 +980,10 @@
// CHECK-ERROR-NEXT: bfi x3, x5, #12, #53
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: bfi sp, x3, #5, #6
+// CHECK-ERROR-NEXT: bfi sp, x3, #7, #6
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: bfi w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: bfi w3, wsp, #10, #8
// CHECK-ERROR-NEXT: ^
bfxil w1, w2, #0, #0
@@ -968,18 +992,19 @@
bfxil w11, w12, #32, #0
bfxil w9, w10, #10, #23
bfxil x3, x5, #12, #53
- bfxil sp, x3, #5, #6
- bfxil w3, wsp, #7, #8
-// CHECK-ERROR: error: expected integer in range [<lsb>, 31]
+ bfxil sp, x3, #7, #6
+ bfxil w3, wsp, #10, #8
+// CHECK-ERROR-AARCH64: error: expected integer in range [<lsb>, 31]
+// CHECK-ERROR-ARM64: error: expected integer in range [1, 32]
// CHECK-ERROR-NEXT: bfxil w1, w2, #0, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: bfxil wsp, w9, #0, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: bfxil w9, w10, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: bfxil w11, w12, #32, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: requested extract overflows register
@@ -989,10 +1014,10 @@
// CHECK-ERROR-NEXT: bfxil x3, x5, #12, #53
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: bfxil sp, x3, #5, #6
+// CHECK-ERROR-NEXT: bfxil sp, x3, #7, #6
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: bfxil w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: bfxil w3, wsp, #10, #8
// CHECK-ERROR-NEXT: ^
ubfiz w1, w2, #0, #0
@@ -1001,18 +1026,19 @@
ubfiz w11, w12, #32, #0
ubfiz w9, w10, #10, #23
ubfiz x3, x5, #12, #53
- ubfiz sp, x3, #5, #6
- ubfiz w3, wsp, #7, #8
-// CHECK-ERROR: error: expected integer in range [<lsb>, 31]
+ ubfiz sp, x3, #7, #6
+ ubfiz w3, wsp, #10, #8
+// CHECK-ERROR-AARCH64: error: expected integer in range [<lsb>, 31]
+// CHECK-ERROR-ARM64: error: expected integer in range [1, 32]
// CHECK-ERROR-NEXT: ubfiz w1, w2, #0, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ubfiz wsp, w9, #0, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ubfiz w9, w10, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ubfiz w11, w12, #32, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: requested insert overflows register
@@ -1022,10 +1048,10 @@
// CHECK-ERROR-NEXT: ubfiz x3, x5, #12, #53
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: ubfiz sp, x3, #5, #6
+// CHECK-ERROR-NEXT: ubfiz sp, x3, #7, #6
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: ubfiz w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: ubfiz w3, wsp, #10, #8
// CHECK-ERROR-NEXT: ^
ubfx w1, w2, #0, #0
@@ -1034,18 +1060,19 @@
ubfx w11, w12, #32, #0
ubfx w9, w10, #10, #23
ubfx x3, x5, #12, #53
- ubfx sp, x3, #5, #6
- ubfx w3, wsp, #7, #8
-// CHECK-ERROR: error: expected integer in range [<lsb>, 31]
+ ubfx sp, x3, #7, #6
+ ubfx w3, wsp, #10, #8
+// CHECK-ERROR-AARCH64: error: expected integer in range [<lsb>, 31]
+// CHECK-ERROR-ARM64: error: expected integer in range [1, 32]
// CHECK-ERROR-NEXT: ubfx w1, w2, #0, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ubfx wsp, w9, #0, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ubfx w9, w10, #32, #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ubfx w11, w12, #32, #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: requested extract overflows register
@@ -1055,10 +1082,10 @@
// CHECK-ERROR-NEXT: ubfx x3, x5, #12, #53
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: ubfx sp, x3, #5, #6
+// CHECK-ERROR-NEXT: ubfx sp, x3, #7, #6
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: ubfx w3, wsp, #7, #8
+// CHECK-ERROR-NEXT: ubfx w3, wsp, #10, #8
// CHECK-ERROR-NEXT: ^
//------------------------------------------------------------------------------
@@ -1125,16 +1152,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ccmp wsp, #4, #2, ne
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmp w25, #-1, #15, hs
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmp w3, #32, #0, ge
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmp w19, #5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmp w20, #7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1146,16 +1173,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ccmp sp, #4, #2, ne
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmp x25, #-1, #15, hs
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmp x3, #32, #0, ge
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmp x19, #5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmp x20, #7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1167,16 +1194,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ccmn wsp, #4, #2, ne
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmn w25, #-1, #15, hs
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmn w3, #32, #0, ge
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmn w19, #5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmn w20, #7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1188,16 +1215,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ccmn sp, #4, #2, ne
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmn x25, #-1, #15, hs
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmn x3, #32, #0, ge
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmn x19, #5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmn x20, #7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1212,13 +1239,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ccmp wsp, w4, #2, ne
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmp w3, wsp, #0, ge
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmp w19, w5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmp w20, w7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1229,13 +1256,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ccmp sp, x4, #2, ne
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmp x25, sp, #15, hs
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmp x19, x5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmp x20, x7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1246,13 +1273,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ccmn wsp, w4, #2, ne
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmn w25, wsp, #15, hs
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmn w19, w5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmn w20, w7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1263,13 +1290,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ccmn sp, x4, #2, ne
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ccmn x25, sp, #15, hs
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmn x19, x5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: ccmn x20, x7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1339,39 +1366,59 @@
cset wsp, lt
csetm sp, ge
+ cset w1, al
+ csetm x6, nv
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: cset wsp, lt
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: csetm sp, ge
// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: condition codes AL and NV are invalid for this instruction
+// CHECK-ERROR-NEXT: cset w1, al
+// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: condition codes AL and NV are invalid for this instruction
+// CHECK-ERROR-NEXT: csetm x6, nv
+// CHECK-ERROR-NEXT: ^
cinc w3, wsp, ne
cinc sp, x9, eq
+ cinc x2, x0, nv
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: cinc w3, wsp, ne
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: cinc sp, x9, eq
// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: condition codes AL and NV are invalid for this instruction
+// CHECK-ERROR-NEXT: cinc x2, x0, nv
+// CHECK-ERROR-NEXT: ^
cinv w3, wsp, ne
cinv sp, x9, eq
+ cinv w8, x7, nv
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: cinv w3, wsp, ne
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: cinv sp, x9, eq
// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: condition codes AL and NV are invalid for this instruction
+// CHECK-ERROR-NEXT: cinv w8, x7, nv
+// CHECK-ERROR-NEXT: ^
cneg w3, wsp, ne
cneg sp, x9, eq
+ cneg x4, x5, al
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: cneg w3, wsp, ne
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: cneg sp, x9, eq
// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: error: condition codes AL and NV are invalid for this instruction
+// CHECK-ERROR-NEXT: cneg x4, x5, al
+// CHECK-ERROR-NEXT: ^
//------------------------------------------------------------------------------
// Data Processing (1 source)
@@ -1418,16 +1465,16 @@
hlt #65536
dcps4 #43
dcps4
-// CHECK-ERROR: error: expected integer in range [0, 65535]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: svc #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: hlt #65536
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: invalid instruction
+// CHECK-ERROR-NEXT: error: {{invalid instruction|unrecognized instruction mnemonic}}
// CHECK-ERROR-NEXT: dcps4 #43
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: invalid instruction
+// CHECK-ERROR-NEXT: error: {{invalid instruction|unrecognized instruction mnemonic}}
// CHECK-ERROR-NEXT: dcps4
// CHECK-ERROR-NEXT: ^
@@ -1437,28 +1484,28 @@
extr w2, w20, w30, #-1
extr w9, w19, w20, #32
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: extr w2, w20, w30, #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: extr w9, w19, w20, #32
// CHECK-ERROR-NEXT: ^
extr x10, x15, x20, #-1
extr x20, x25, x30, #64
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: extr x10, x15, x20, #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: extr x20, x25, x30, #64
// CHECK-ERROR-NEXT: ^
ror w9, w10, #32
ror x10, x11, #64
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: ror w9, w10, #32
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: ror x10, x11, #64
// CHECK-ERROR-NEXT: ^
@@ -1467,7 +1514,8 @@
//------------------------------------------------------------------------------
fcmp s3, d2
-// CHECK-ERROR: error: expected floating-point constant #0.0
+// CHECK-ERROR-AARCH64: error: expected floating-point constant #0.0
+// CHECK-ERROR-ARM64: error: invalid operand for instruction
// CHECK-ERROR-NEXT: fcmp s3, d2
// CHECK-ERROR-NEXT: ^
@@ -1494,37 +1542,37 @@
fccmp s19, s5, #-1, lt
fccmp s20, s7, #16, hs
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: fccmp s19, s5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: fccmp s20, s7, #16, hs
// CHECK-ERROR-NEXT: ^
fccmp d19, d5, #-1, lt
fccmp d20, d7, #16, hs
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: fccmp d19, d5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: fccmp d20, d7, #16, hs
// CHECK-ERROR-NEXT: ^
fccmpe s19, s5, #-1, lt
fccmpe s20, s7, #16, hs
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: fccmpe s19, s5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: fccmpe s20, s7, #16, hs
// CHECK-ERROR-NEXT: ^
fccmpe d19, d5, #-1, lt
fccmpe d20, d7, #16, hs
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: fccmpe d19, d5, #-1, lt
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: fccmpe d20, d7, #16, hs
// CHECK-ERROR-NEXT: ^
@@ -1604,10 +1652,10 @@
fcvtzs w13, s31, #0
fcvtzs w19, s20, #33
fcvtzs wsp, s19, #14
-// CHECK-ERROR-NEXT: error: expected integer in range [1, 32]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR-NEXT: fcvtzs w13, s31, #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [1, 32]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR-NEXT: fcvtzs w19, s20, #33
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -1617,10 +1665,10 @@
fcvtzs x13, s31, #0
fcvtzs x19, s20, #65
fcvtzs sp, s19, #14
-// CHECK-ERROR-NEXT: error: expected integer in range [1, 64]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR-NEXT: fcvtzs x13, s31, #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [1, 64]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR-NEXT: fcvtzs x19, s20, #65
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -1630,10 +1678,10 @@
fcvtzu w13, s31, #0
fcvtzu w19, s20, #33
fcvtzu wsp, s19, #14
-// CHECK-ERROR-NEXT: error: expected integer in range [1, 32]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR-NEXT: fcvtzu w13, s31, #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [1, 32]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR-NEXT: fcvtzu w19, s20, #33
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -1643,10 +1691,10 @@
fcvtzu x13, s31, #0
fcvtzu x19, s20, #65
fcvtzu sp, s19, #14
-// CHECK-ERROR-NEXT: error: expected integer in range [1, 64]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR-NEXT: fcvtzu x13, s31, #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [1, 64]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR-NEXT: fcvtzu x19, s20, #65
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -1730,9 +1778,9 @@
;; No particular reason, but a striking omission
fmov d0, #0.0
-// CHECK-ERROR: error: expected compatible register or floating-point constant
-// CHECK-ERROR-NEXT: fmov d0, #0.0
-// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-AARCH64: error: expected compatible register or floating-point constant
+// CHECK-ERROR-AARCH64-NEXT: fmov d0, #0.0
+// CHECK-ERROR-AARCH64-NEXT: ^
//------------------------------------------------------------------------------
// Floating-point <-> integer conversion
@@ -1746,10 +1794,12 @@
// CHECK-ERROR: error: expected lane specifier '[1]'
// CHECK-ERROR-NEXT: fmov x3, v0.d[0]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: lane number incompatible with layout
+// CHECK-ERROR-AARCH64-NEXT: error: lane number incompatible with layout
+// CHECK-ERROR-ARM64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: fmov v29.1d[1], x2
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: lane number incompatible with layout
+// CHECK-ERROR-AARCH64-NEXT: error: lane number incompatible with layout
+// CHECK-ERROR-ARM64-NEXT: error: expected lane specifier '[1]'
// CHECK-ERROR-NEXT: fmov x7, v0.d[2]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -1789,10 +1839,11 @@
// Load/store exclusive
//------------------------------------------------------------------------------
- stxrb w2, x3, [x4, #20]
+ stxrb w2, w3, [x4, #20]
stlxrh w10, w11, [w2]
-// CHECK-ERROR: error: expected '#0'
-// CHECK-ERROR-NEXT: stxrb w2, x3, [x4, #20]
+// CHECK-ERROR-AARCH64: error: expected '#0'
+// CHECK-ERROR-ARM64: error: index must be absent or #0
+// CHECK-ERROR-NEXT: stxrb w2, w3, [x4, #20]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: stlxrh w10, w11, [w2]
@@ -1831,16 +1882,16 @@
sturh w17, [x1, #256]
ldursw x20, [x1, #256]
ldur x12, [sp, #256]
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldurb w2, [sp, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: sturh w17, [x1, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldursw x20, [x1, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldur x12, [sp, #256]
// CHECK-ERROR-NEXT: ^
@@ -1849,19 +1900,19 @@
ldursb x9, [sp, #-257]
ldur w2, [x30, #-257]
stur q9, [x20, #-257]
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: stur h2, [x2, #-257]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: stur b2, [x2, #-257]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldursb x9, [sp, #-257]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldur w2, [x30, #-257]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: stur q9, [x20, #-257]
// CHECK-ERROR-NEXT: ^
@@ -1875,12 +1926,13 @@
//------------------------------------------------------------------------------
ldr x3, [x4, #25], #0
ldr x4, [x9, #0], #4
-// CHECK-ERROR: error: expected symbolic reference or integer in range [0, 32760]
+// CHECK-ERROR-AARCH64: error: {{expected symbolic reference or integer|index must be a multiple of 8}} in range [0, 32760]
+// CHECK-ERROR-ARM64: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr x3, [x4, #25], #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: ldr x4, [x9, #0], #4
-// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-AARCH64-NEXT: ldr x4, [x9, #0], #4
+// CHECK-ERROR-AARCH64-NEXT: ^
strb w1, [x19], #256
strb w9, [sp], #-257
@@ -1888,22 +1940,22 @@
strh w9, [sp], #-257
str w1, [x19], #256
str w9, [sp], #-257
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: strb w1, [x19], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: strb w9, [sp], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: strh w1, [x19], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: strh w9, [sp], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str w1, [x19], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str w9, [sp], #-257
// CHECK-ERROR-NEXT: ^
@@ -1913,22 +1965,22 @@
ldrh w9, [sp], #-257
ldr w1, [x19], #256
ldr w9, [sp], #-257
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrb w1, [x19], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrb w9, [sp], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrh w1, [x19], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrh w9, [sp], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr w1, [x19], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr w9, [sp], #-257
// CHECK-ERROR-NEXT: ^
@@ -1938,22 +1990,22 @@
ldrsh x22, [x13], #-257
ldrsw x2, [x3], #256
ldrsw x22, [x13], #-257
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsb x2, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsb x22, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsh x2, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsh x22, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsw x2, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsw x22, [x13], #-257
// CHECK-ERROR-NEXT: ^
@@ -1961,16 +2013,16 @@
ldrsb w22, [x13], #-257
ldrsh w2, [x3], #256
ldrsh w22, [x13], #-257
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsb w2, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsb w22, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsh w2, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsh w22, [x13], #-257
// CHECK-ERROR-NEXT: ^
@@ -1984,34 +2036,34 @@
str d3, [x13], #-257
str q3, [x3], #256
str q3, [x13], #-257
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str b3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str b3, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str h3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str h3, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str s3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str s3, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str d3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str d3, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str q3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str q3, [x13], #-257
// CHECK-ERROR-NEXT: ^
@@ -2025,34 +2077,34 @@
ldr d3, [x13], #-257
ldr q3, [x3], #256
ldr q3, [x13], #-257
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr b3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr b3, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr h3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr h3, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr s3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr s3, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr d3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr d3, [x13], #-257
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr q3, [x3], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr q3, [x13], #-257
// CHECK-ERROR-NEXT: ^
@@ -2074,19 +2126,19 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: strb w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: strb w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: strh w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: strh w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
@@ -2099,19 +2151,19 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrb w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrb w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrh w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrh w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
@@ -2124,19 +2176,19 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsb x2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsb x22, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsh x2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsh x22, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsw x2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsw x22, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
@@ -2147,13 +2199,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsb w2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsb w22, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsh w2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsh w22, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
@@ -2168,25 +2220,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str b3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str b3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str h3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str h3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str s3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str s3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str d3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str d3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
@@ -2201,25 +2253,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr b3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr b3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr h3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr h3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr s3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr s3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr d3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr d3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
@@ -2231,16 +2283,16 @@
sttrh w17, [x1, #256]
ldtrsw x20, [x1, #256]
ldtr x12, [sp, #256]
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtrb w2, [sp, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: sttrh w17, [x1, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtrsw x20, [x1, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtr x12, [sp, #256]
// CHECK-ERROR-NEXT: ^
@@ -2255,10 +2307,10 @@
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: sttr b2, [x2, #-257]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtrsb x9, [sp, #-257]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtr w2, [x30, #-257]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -2276,19 +2328,19 @@
ldr w0, [x4, #16384]
ldrh w2, [x21, #8192]
ldrb w3, [x12, #4096]
-// CHECK-ERROR: error: expected integer in range [-256, 255]
+// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr q0, [x11, #65536]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr x0, [sp, #32768]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr w0, [x4, #16384]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrh w2, [x21, #8192]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrb w3, [x12, #4096]
// CHECK-ERROR-NEXT: ^
@@ -2296,15 +2348,15 @@
ldr w0, [x0, #2]
ldrsh w2, [x0, #123]
str q0, [x0, #8]
-// CHECK-ERROR: error: too few operands for instruction
-// CHECK-ERROR-NEXT: ldr w0, [x0, #2]
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: too few operands for instruction
-// CHECK-ERROR-NEXT: ldrsh w2, [x0, #123]
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: too few operands for instruction
-// CHECK-ERROR-NEXT: str q0, [x0, #8]
-// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-AARCH64: error: too few operands for instruction
+// CHECK-ERROR-AARCH64-NEXT: ldr w0, [x0, #2]
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-AARCH64-NEXT: error: too few operands for instruction
+// CHECK-ERROR-AARCH64-NEXT: ldrsh w2, [x0, #123]
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-AARCH64-NEXT: error: too few operands for instruction
+// CHECK-ERROR-AARCH64-NEXT: str q0, [x0, #8]
+// CHECK-ERROR-AARCH64-NEXT: ^
//// 32-bit addresses
ldr w0, [w20]
@@ -2324,13 +2376,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: strb w0, [wsp]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR: error: invalid operand for instruction
-// CHECK-ERROR-NEXT: strh w31, [x23, #1]
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: too few operands for instruction
-// CHECK-ERROR-NEXT: str x5, [x22, #12]
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [-256, 255]
+// CHECK-ERROR-AARCH64: error: invalid operand for instruction
+// CHECK-ERROR-AARCH64-NEXT: strh w31, [x23, #1]
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-AARCH64-NEXT: error: too few operands for instruction
+// CHECK-ERROR-AARCH64-NEXT: str x5, [x22, #12]
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str w7, [x12, #16384]
// CHECK-ERROR-NEXT: ^
@@ -2339,16 +2391,19 @@
prfm #32, [sp, #8]
prfm pldl1strm, [w3, #8]
prfm wibble, [sp]
-// CHECK-ERROR: error: Invalid immediate for instruction
+// CHECK-ERROR-AARCH64: error: Invalid immediate for instruction
+// CHECK-ERROR-ARM64: error: prefetch operand out of range, [0,31] expected
// CHECK-ERROR-NEXT: prfm #-1, [sp]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-AARCH64-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-ARM64-NEXT: error: prefetch operand out of range, [0,31] expected
// CHECK-ERROR-NEXT: prfm #32, [sp, #8]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: prfm pldl1strm, [w3, #8]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: operand specifier not recognised
+// CHECK-ERROR-AARCH64-NEXT: error: operand specifier not recognised
+// CHECK-ERROR-ARM64-NEXT: error: pre-fetch hint expected
// CHECK-ERROR-NEXT: prfm wibble, [sp]
// CHECK-ERROR-NEXT: ^
@@ -2431,10 +2486,12 @@
// CHECK-ERROR-NEXT: error: expected integer shift amount
// CHECK-ERROR-NEXT: ldr q5, [sp, x2, lsl #-1]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected 'lsl' or 'sxtw' with optional shift of #0 or #4
+// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtw' with optional shift of #0 or #4
+// CHECK-ERROR-ARM64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #4
// CHECK-ERROR-NEXT: ldr q10, [x20, w4, uxtw #2]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected 'lsl' or 'sxtw' with optional shift of #0 or #4
+// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtw' with optional shift of #0 or #4
+// CHECK-ERROR-ARM64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #4
// CHECK-ERROR-NEXT: str q21, [x20, w4, uxtw #5]
// CHECK-ERROR-NEXT: ^
@@ -2446,16 +2503,16 @@
stp w9, w10, [x5, #256]
ldp w11, w12, [x9, #-260]
stp wsp, w9, [sp]
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp w3, w2, [x4, #1]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp w1, w2, [x3, #253]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp w9, w10, [x5, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp w11, w12, [x9, #-260]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -2465,26 +2522,26 @@
ldpsw x9, x2, [sp, #2]
ldpsw x1, x2, [x10, #256]
ldpsw x3, x4, [x11, #-260]
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x9, x2, [sp, #2]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x1, x2, [x10, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x3, x4, [x11, #-260]
// CHECK-ERROR-NEXT: ^
ldp x2, x5, [sp, #4]
ldp x5, x6, [x9, #512]
stp x7, x8, [x10, #-520]
-// CHECK-ERROR: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp x2, x5, [sp, #4]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp x5, x6, [x9, #512]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: stp x7, x8, [x10, #-520]
// CHECK-ERROR-NEXT: ^
@@ -2500,13 +2557,13 @@
stp s3, s5, [sp, #-2]
ldp s6, s26, [x4, #-260]
stp s13, s19, [x5, #256]
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp s3, s5, [sp, #-2]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp s6, s26, [x4, #-260]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp s13, s19, [x5, #256]
// CHECK-ERROR-NEXT: ^
@@ -2516,10 +2573,10 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldp d3, d4, [xzr]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp d5, d6, [x0, #512]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: stp d7, d8, [x0, #-520]
// CHECK-ERROR-NEXT: ^
@@ -2530,13 +2587,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldp d3, q2, [sp]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldp q3, q5, [sp, #8]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: stp q20, q25, [x5, #1024]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldp q30, q15, [x23, #-1040]
// CHECK-ERROR-NEXT: ^
@@ -2549,16 +2606,16 @@
stp w9, w10, [x5], #256
ldp w11, w12, [x9], #-260
stp wsp, w9, [sp], #0
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp w3, w2, [x4], #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp w1, w2, [x3], #253
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp w9, w10, [x5], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp w11, w12, [x9], #-260
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -2568,26 +2625,26 @@
ldpsw x9, x2, [sp], #2
ldpsw x1, x2, [x10], #256
ldpsw x3, x4, [x11], #-260
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x9, x2, [sp], #2
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x1, x2, [x10], #256
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x3, x4, [x11], #-260
// CHECK-ERROR-NEXT: ^
ldp x2, x5, [sp], #4
ldp x5, x6, [x9], #512
stp x7, x8, [x10], #-520
-// CHECK-ERROR: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp x2, x5, [sp], #4
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp x5, x6, [x9], #512
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: stp x7, x8, [x10], #-520
// CHECK-ERROR-NEXT: ^
@@ -2603,13 +2660,13 @@
stp s3, s5, [sp], #-2
ldp s6, s26, [x4], #-260
stp s13, s19, [x5], #256
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp s3, s5, [sp], #-2
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp s6, s26, [x4], #-260
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp s13, s19, [x5], #256
// CHECK-ERROR-NEXT: ^
@@ -2619,10 +2676,10 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldp d3, d4, [xzr], #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp d5, d6, [x0], #512
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: stp d7, d8, [x0], #-520
// CHECK-ERROR-NEXT: ^
@@ -2633,13 +2690,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldp d3, q2, [sp], #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldp q3, q5, [sp], #8
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: stp q20, q25, [x5], #1024
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldp q30, q15, [x23], #-1040
// CHECK-ERROR-NEXT: ^
@@ -2652,16 +2709,16 @@
stp w9, w10, [x5, #256]!
ldp w11, w12, [x9, #-260]!
stp wsp, w9, [sp, #0]!
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp w3, w2, [x4, #1]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp w1, w2, [x3, #253]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp w9, w10, [x5, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp w11, w12, [x9, #-260]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -2671,26 +2728,26 @@
ldpsw x9, x2, [sp, #2]!
ldpsw x1, x2, [x10, #256]!
ldpsw x3, x4, [x11, #-260]!
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x9, x2, [sp, #2]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x1, x2, [x10, #256]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldpsw x3, x4, [x11, #-260]!
// CHECK-ERROR-NEXT: ^
ldp x2, x5, [sp, #4]!
ldp x5, x6, [x9, #512]!
stp x7, x8, [x10, #-520]!
-// CHECK-ERROR: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp x2, x5, [sp, #4]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp x5, x6, [x9, #512]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: stp x7, x8, [x10, #-520]!
// CHECK-ERROR-NEXT: ^
@@ -2706,13 +2763,13 @@
stp s3, s5, [sp, #-2]!
ldp s6, s26, [x4, #-260]!
stp s13, s19, [x5, #256]!
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp s3, s5, [sp, #-2]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldp s6, s26, [x4, #-260]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stp s13, s19, [x5, #256]!
// CHECK-ERROR-NEXT: ^
@@ -2722,10 +2779,10 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldp d3, d4, [xzr, #0]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldp d5, d6, [x0, #512]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: stp d7, d8, [x0, #-520]!
// CHECK-ERROR-NEXT: ^
@@ -2736,13 +2793,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldp d3, q2, [sp, #0]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldp q3, q5, [sp, #8]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: stp q20, q25, [x5, #1024]!
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldp q30, q15, [x23, #-1040]!
// CHECK-ERROR-NEXT: ^
@@ -2754,16 +2811,16 @@
stnp w9, w10, [x5, #256]
ldnp w11, w12, [x9, #-260]
stnp wsp, w9, [sp]
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldnp w3, w2, [x4, #1]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stnp w1, w2, [x3, #253]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stnp w9, w10, [x5, #256]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldnp w11, w12, [x9, #-260]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@@ -2773,13 +2830,13 @@
ldnp x2, x5, [sp, #4]
ldnp x5, x6, [x9, #512]
stnp x7, x8, [x10, #-520]
-// CHECK-ERROR: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldnp x2, x5, [sp, #4]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldnp x5, x6, [x9, #512]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: stnp x7, x8, [x10, #-520]
// CHECK-ERROR-NEXT: ^
@@ -2795,13 +2852,13 @@
stnp s3, s5, [sp, #-2]
ldnp s6, s26, [x4, #-260]
stnp s13, s19, [x5, #256]
-// CHECK-ERROR: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stnp s3, s5, [sp, #-2]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: ldnp s6, s26, [x4, #-260]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 4 in range [-256, 252]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 4 in range [-256, 252]
// CHECK-ERROR-NEXT: stnp s13, s19, [x5, #256]
// CHECK-ERROR-NEXT: ^
@@ -2811,10 +2868,10 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldnp d3, d4, [xzr]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: ldnp d5, d6, [x0, #512]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 8 in range [-512, 508]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: stnp d7, d8, [x0, #-520]
// CHECK-ERROR-NEXT: ^
@@ -2825,13 +2882,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldnp d3, q2, [sp]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldnp q3, q5, [sp, #8]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: stnp q20, q25, [x5, #1024]
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer multiple of 16 in range [-1024, 1016]
+// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldnp q30, q15, [x23, #-1040]
// CHECK-ERROR-NEXT: ^
@@ -2928,13 +2985,17 @@
orn wsp, w3, w5
bics x20, sp, x9, lsr #0
orn x2, x6, sp, lsl #3
-// CHECK-ERROR: error: invalid operand for instruction
+// FIXME: the diagnostic we get for 'orn wsp, w3, w5' is from the orn alias,
+// which is a better match than the genuine ORNWri, whereas it would be better
+// to get the ORNWri diagnostic when the alias did not match, i.e. the
+// alias' diagnostics should have a lower priority.
+// CHECK-ERROR: error: expected compatible register or logical immediate
// CHECK-ERROR-NEXT: orn wsp, w3, w5
-// CHECK-ERROR-NEXT: ^
+// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: bics x20, sp, x9, lsr #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: invalid operand for instruction
+// CHECK-ERROR-NEXT: error: expected compatible register or logical immediate
// CHECK-ERROR-NEXT: orn x2, x6, sp, lsl #3
// CHECK-ERROR-NEXT: ^
@@ -2974,28 +3035,32 @@
movz x3, #-1
movk w3, #1, lsl #32
movn x2, #12, lsl #64
-// CHECK-ERROR: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz w3, #65536, lsl #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz w4, #65536
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-ARM64-NEXT: error: expected 'lsl' with optional integer 0 or 16
// CHECK-ERROR-NEXT: movn w1, #2, lsl #1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: only 'lsl #+N' valid after immediate
+// CHECK-ERROR-AARCH64-NEXT: error: only 'lsl #+N' valid after immediate
+// CHECK-ERROR-ARM64-NEXT: error: expected integer shift amount
// CHECK-ERROR-NEXT: movk w3, #0, lsl #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movn w2, #-1, lsl #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz x3, #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-ARM64-NEXT: error: expected 'lsl' with optional integer 0 or 16
// CHECK-ERROR-NEXT: movk w3, #1, lsl #32
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-ARM64-NEXT: error: expected 'lsl' with optional integer 0, 16, 32 or 48
// CHECK-ERROR-NEXT: movn x2, #12, lsl #64
// CHECK-ERROR-NEXT: ^
@@ -3005,22 +3070,22 @@
movk w3, #:abs_g0:sym
movz x3, #:abs_g0_nc:sym
movn x4, #:abs_g0_nc:sym
-// CHECK-ERROR: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz x12, #:abs_g0:sym, lsl #16
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz x12, #:abs_g0:sym, lsl #0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
-// CHECK-ERROR-NEXT: movn x2, #:abs_g0:sym
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: movn x2, #:abs_g0:sym
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk w3, #:abs_g0:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz x3, #:abs_g0_nc:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movn x4, #:abs_g0_nc:sym
// CHECK-ERROR-NEXT: ^
@@ -3028,16 +3093,16 @@
movk w3, #:abs_g1:sym
movz x3, #:abs_g1_nc:sym
movn x4, #:abs_g1_nc:sym
-// CHECK-ERROR: error: expected relocated symbol or integer in range [0, 65535]
-// CHECK-ERROR-NEXT: movn x2, #:abs_g1:sym
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-AARCH64: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: movn x2, #:abs_g1:sym
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk w3, #:abs_g1:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz x3, #:abs_g1_nc:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movn x4, #:abs_g1_nc:sym
// CHECK-ERROR-NEXT: ^
@@ -3047,53 +3112,53 @@
movk w3, #:abs_g2_nc:sym
movz x13, #:abs_g2_nc:sym
movn x24, #:abs_g2_nc:sym
-// CHECK-ERROR: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz w12, #:abs_g2:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
-// CHECK-ERROR-NEXT: movn x12, #:abs_g2:sym
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: movn x12, #:abs_g2:sym
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk x13, #:abs_g2:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk w3, #:abs_g2_nc:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz x13, #:abs_g2_nc:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movn x24, #:abs_g2_nc:sym
// CHECK-ERROR-NEXT: ^
movn x19, #:abs_g3:sym
movz w20, #:abs_g3:sym
movk w21, #:abs_g3:sym
-// CHECK-ERROR: error: expected relocated symbol or integer in range [0, 65535]
-// CHECK-ERROR-NEXT: movn x19, #:abs_g3:sym
-// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-AARCH64: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
+// CHECK-ERROR-AARCH64-NEXT: movn x19, #:abs_g3:sym
+// CHECK-ERROR-AARCH64-NEXT: ^
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz w20, #:abs_g3:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk w21, #:abs_g3:sym
// CHECK-ERROR-NEXT: ^
movk x19, #:abs_g0_s:sym
movk w23, #:abs_g0_s:sym
-// CHECK-ERROR: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk x19, #:abs_g0_s:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk w23, #:abs_g0_s:sym
// CHECK-ERROR-NEXT: ^
movk x19, #:abs_g1_s:sym
movk w23, #:abs_g1_s:sym
-// CHECK-ERROR: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk x19, #:abs_g1_s:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk w23, #:abs_g1_s:sym
// CHECK-ERROR-NEXT: ^
@@ -3101,16 +3166,16 @@
movn w29, #:abs_g2_s:sym
movk x19, #:abs_g2_s:sym
movk w23, #:abs_g2_s:sym
-// CHECK-ERROR: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movz w2, #:abs_g2_s:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movn w29, #:abs_g2_s:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk x19, #:abs_g2_s:sym
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected relocated symbol or integer in range [0, 65535]
+// CHECK-ERROR-NEXT: error: {{expected relocated symbol or|immediate must be an}} integer in range [0, 65535]
// CHECK-ERROR-NEXT: movk w23, #:abs_g2_s:sym
// CHECK-ERROR-NEXT: ^
@@ -3154,19 +3219,19 @@
hint #-1
hint #128
-// CHECK-ERROR: error: expected integer in range [0, 127]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 127]
// CHECK-ERROR-NEXT: hint #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 127]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 127]
// CHECK-ERROR-NEXT: hint #128
// CHECK-ERROR-NEXT: ^
clrex #-1
clrex #16
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: clrex #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: clrex #16
// CHECK-ERROR-NEXT: ^
@@ -3174,25 +3239,25 @@
dsb #16
dmb #-1
dmb #16
-// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: error: {{Invalid immediate for instruction|barrier operand out of range}}
// CHECK-ERROR-NEXT: dsb #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: error: {{Invalid immediate for instruction|barrier operand out of range}}
// CHECK-ERROR-NEXT: dsb #16
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: error: {{Invalid immediate for instruction|barrier operand out of range}}
// CHECK-ERROR-NEXT: dmb #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: error: {{Invalid immediate for instruction|barrier operand out of range}}
// CHECK-ERROR-NEXT: dmb #16
// CHECK-ERROR-NEXT: ^
isb #-1
isb #16
-// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: error: {{Invalid immediate for instruction|barrier operand out of range}}
// CHECK-ERROR-NEXT: isb #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: Invalid immediate for instruction
+// CHECK-ERROR-NEXT: error: {{Invalid immediate for instruction|barrier operand out of range}}
// CHECK-ERROR-NEXT: isb #16
// CHECK-ERROR-NEXT: ^
@@ -3200,16 +3265,16 @@
msr spsel, #-1
msr spsel #-1
msr daifclr, #16
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: msr daifset, x4
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: msr spsel, #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected comma before next operand
+// CHECK-ERROR-NEXT: error: {{expected comma before next operand|unexpected token in argument list}}
// CHECK-ERROR-NEXT: msr spsel #-1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 15]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR-NEXT: msr daifclr, #16
// CHECK-ERROR-NEXT: ^
@@ -3221,7 +3286,7 @@
sysl x13, #3, c16, c2, #3
sysl x9, #2, c11, c16, #5
sysl x4, #4, c9, c8, #8
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 7]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR-NEXT: sys #8, c1, c2, #7, x9
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: Expected cN operand where 0 <= N <= 15
@@ -3230,10 +3295,10 @@
// CHECK-ERROR-NEXT: error: Expected cN operand where 0 <= N <= 15
// CHECK-ERROR-NEXT: sys #2, c11, c16, #5
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 7]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR-NEXT: sys #4, c9, c8, #8, xzr
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 7]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR-NEXT: sysl x11, #8, c1, c2, #7
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: Expected cN operand where 0 <= N <= 15
@@ -3242,20 +3307,21 @@
// CHECK-ERROR-NEXT: error: Expected cN operand where 0 <= N <= 15
// CHECK-ERROR-NEXT: sysl x9, #2, c11, c16, #5
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 7]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR-NEXT: sysl x4, #4, c9, c8, #8
// CHECK-ERROR-NEXT: ^
ic ialluis, x2
ic allu, x7
ic ivau
-// CHECK-ERROR-NEXT: error: specified IC op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{IC|ic}} op does not use a register
// CHECK-ERROR-NEXT: ic ialluis, x2
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: operand specifier not recognised
+// CHECK-ERROR-AARCH64-NEXT: error: operand specifier not recognised
+// CHECK-ERROR-ARM64-NEXT: error: invalid operand for IC instruction
// CHECK-ERROR-NEXT: ic allu, x7
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified IC op requires a register
+// CHECK-ERROR-NEXT: error: specified {{IC|ic}} op requires a register
// CHECK-ERROR-NEXT: ic ivau
// CHECK-ERROR-NEXT: ^
@@ -3291,100 +3357,100 @@
tlbi VALE3
tlbi VMALLS12E1, x15
tlbi VAALE1
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi IPAS2E1IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi IPAS2LE1IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi VMALLE1IS, x12
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi ALLE2IS, x11
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi ALLE3IS, x20
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAE1IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAE2IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAE3IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi ASIDE1IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAAE1IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi ALLE1IS, x0
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VALE1IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VALE2IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VALE3IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi VMALLS12E1IS, xzr
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAALE1IS
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi IPAS2E1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi IPAS2LE1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi VMALLE1, x9
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi ALLE2, x10
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi ALLE3, x11
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAE1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAE2
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAE3
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi ASIDE1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAAE1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi ALLE1, x25
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VALE1
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VALE2
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VALE3
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op does not use a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op does not use a register
// CHECK-ERROR-NEXT: tlbi VMALLS12E1, x15
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: specified TLBI op requires a register
+// CHECK-ERROR-NEXT: error: specified {{TLBI|tlbi}} op requires a register
// CHECK-ERROR-NEXT: tlbi VAALE1
// CHECK-ERROR-NEXT: ^
@@ -3642,16 +3708,16 @@
tbz w3, #32, nowhere
tbz x9, #-1, there
tbz x20, #64, dont
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: tbz w3, #-1, addr
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: tbz w3, #32, nowhere
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: tbz x9, #-1, there
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: tbz x20, #64, dont
// CHECK-ERROR-NEXT: ^
@@ -3659,16 +3725,16 @@
tbnz w3, #32, nowhere
tbnz x9, #-1, there
tbnz x20, #64, dont
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: tbnz w3, #-1, addr
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 31]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR-NEXT: tbnz w3, #32, nowhere
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: tbnz x9, #-1, there
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: expected integer in range [0, 63]
+// CHECK-ERROR-NEXT: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR-NEXT: tbnz x20, #64, dont
//------------------------------------------------------------------------------
diff --git a/test/MC/AArch64/basic-a64-instructions.s b/test/MC/AArch64/basic-a64-instructions.s
index a50efb33109d..140ea336984c 100644
--- a/test/MC/AArch64/basic-a64-instructions.s
+++ b/test/MC/AArch64/basic-a64-instructions.s
@@ -108,9 +108,9 @@ _func:
// CHECK: adds x20, sp, w19, uxth #4 // encoding: [0xf4,0x33,0x33,0xab]
// CHECK: adds x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0xab]
// CHECK: adds x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xab]
-// CHECK: adds xzr, x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xab]
+// CHECK: {{adds xzr,|cmn}} x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xab]
// CHECK: adds x18, sp, w19, sxth // encoding: [0xf2,0xa3,0x33,0xab]
-// CHECK: adds xzr, x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xab]
+// CHECK: {{adds xzr,|cmn}} x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xab]
// CHECK: adds x3, x5, x9, sxtx #2 // encoding: [0xa3,0xe8,0x29,0xab]
adds w2, w5, w7, uxtb
@@ -127,7 +127,7 @@ _func:
// CHECK: adds w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x2b]
// CHECK: adds w2, w5, w1, sxtb #1 // encoding: [0xa2,0x84,0x21,0x2b]
// CHECK: adds w26, wsp, w19, sxth // encoding: [0xfa,0xa3,0x33,0x2b]
-// CHECK: adds wzr, w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
+// CHECK: cmn w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
// CHECK: adds w2, w3, w5, sxtx // encoding: [0x62,0xe0,0x25,0x2b]
// subs
@@ -143,9 +143,9 @@ _func:
// CHECK: subs x20, sp, w19, uxth #4 // encoding: [0xf4,0x33,0x33,0xeb]
// CHECK: subs x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0xeb]
// CHECK: subs x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xeb]
-// CHECK: subs xzr, x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xeb]
+// CHECK: {{subs xzr,|cmp}} x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xeb]
// CHECK: subs x18, sp, w19, sxth // encoding: [0xf2,0xa3,0x33,0xeb]
-// CHECK: subs xzr, x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xeb]
+// CHECK: {{subs xzr,|cmp}} x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xeb]
// CHECK: subs x3, x5, x9, sxtx #2 // encoding: [0xa3,0xe8,0x29,0xeb]
subs w2, w5, w7, uxtb
@@ -162,7 +162,7 @@ _func:
// CHECK: subs w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x6b]
// CHECK: subs w2, w5, w1, sxtb #1 // encoding: [0xa2,0x84,0x21,0x6b]
// CHECK: subs w26, wsp, w19, sxth // encoding: [0xfa,0xa3,0x33,0x6b]
-// CHECK: subs wzr, w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x6b]
+// CHECK: {{subs wzr,|cmp}} w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x6b]
// CHECK: subs w2, w3, w5, sxtx // encoding: [0x62,0xe0,0x25,0x6b]
// cmp
@@ -227,14 +227,14 @@ _func:
cmn wsp, w19, sxth
cmn w2, w3, sxtw
cmn w3, w5, sxtx
-// CHECK: cmn w5, w7, uxtb // encoding: [0xbf,0x00,0x27,0x2b]
-// CHECK: cmn w15, w17, uxth // encoding: [0xff,0x21,0x31,0x2b]
-// CHECK: cmn w29, wzr, uxtw // encoding: [0xbf,0x43,0x3f,0x2b]
-// CHECK: cmn w17, w1, uxtx // encoding: [0x3f,0x62,0x21,0x2b]
-// CHECK: cmn w5, w1, sxtb #1 // encoding: [0xbf,0x84,0x21,0x2b]
-// CHECK: cmn wsp, w19, sxth // encoding: [0xff,0xa3,0x33,0x2b]
-// CHECK: cmn w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
-// CHECK: cmn w3, w5, sxtx // encoding: [0x7f,0xe0,0x25,0x2b]
+// CHECK: {{cmn|adds wzr,}} w5, w7, uxtb // encoding: [0xbf,0x00,0x27,0x2b]
+// CHECK: {{cmn|adds wzr,}} w15, w17, uxth // encoding: [0xff,0x21,0x31,0x2b]
+// CHECK: {{cmn|adds wzr,}} w29, wzr, uxtw // encoding: [0xbf,0x43,0x3f,0x2b]
+// CHECK: {{cmn|adds wzr,}} w17, w1, uxtx // encoding: [0x3f,0x62,0x21,0x2b]
+// CHECK: {{cmn|adds wzr,}} w5, w1, sxtb #1 // encoding: [0xbf,0x84,0x21,0x2b]
+// CHECK: {{cmn|adds wzr,}} wsp, w19, sxth // encoding: [0xff,0xa3,0x33,0x2b]
+// CHECK: {{cmn|adds wzr,}} w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
+// CHECK: {{cmn|adds wzr,}} w3, w5, sxtx // encoding: [0x7f,0xe0,0x25,0x2b]
// operands for cmp
cmp x20, w29, uxtb #3
@@ -244,7 +244,7 @@ _func:
// CHECK: cmp x20, w29, uxtb #3 // encoding: [0x9f,0x0e,0x3d,0xeb]
// CHECK: cmp x12, x13, uxtx #4 // encoding: [0x9f,0x71,0x2d,0xeb]
// CHECK: cmp wsp, w1, uxtb // encoding: [0xff,0x03,0x21,0x6b]
-// CHECK: cmn wsp, wzr, sxtw // encoding: [0xff,0xc3,0x3f,0x2b]
+// CHECK: {{cmn|adds wzr,}} wsp, wzr, sxtw // encoding: [0xff,0xc3,0x3f,0x2b]
// LSL variant if sp involved
sub sp, x3, x7, lsl #4
@@ -255,7 +255,7 @@ _func:
// CHECK: sub sp, x3, x7, lsl #4 // encoding: [0x7f,0x70,0x27,0xcb]
// CHECK: add w2, wsp, w3, lsl #1 // encoding: [0xe2,0x47,0x23,0x0b]
// CHECK: cmp wsp, w9 // encoding: [0xff,0x43,0x29,0x6b]
-// CHECK: adds wzr, wsp, w3, lsl #4 // encoding: [0xff,0x53,0x23,0x2b]
+// CHECK: cmn wsp, w3, lsl #4 // encoding: [0xff,0x53,0x23,0x2b]
// CHECK: subs x3, sp, x9, lsl #2 // encoding: [0xe3,0x6b,0x29,0xeb]
//------------------------------------------------------------------------------
@@ -309,16 +309,16 @@ _func:
adds w20, wsp, #0x0
adds xzr, x3, #0x1, lsl #12 // FIXME: canonically should be cmn
// CHECK: adds w13, w23, #291, lsl #12 // encoding: [0xed,0x8e,0x44,0x31]
-// CHECK: adds wzr, w2, #4095 // encoding: [0x5f,0xfc,0x3f,0x31]
+// CHECK: {{adds wzr,|cmn}} w2, #4095 // encoding: [0x5f,0xfc,0x3f,0x31]
// CHECK: adds w20, wsp, #0 // encoding: [0xf4,0x03,0x00,0x31]
-// CHECK: adds xzr, x3, #1, lsl #12 // encoding: [0x7f,0x04,0x40,0xb1]
+// CHECK: {{adds xzr,|cmn}} x3, #1, lsl #12 // encoding: [0x7f,0x04,0x40,0xb1]
// Checks for subs
subs xzr, sp, #20, lsl #12 // FIXME: canonically should be cmp
subs xzr, x30, #4095, lsl #0 // FIXME: canonically should be cmp
subs x4, sp, #3822
-// CHECK: subs xzr, sp, #20, lsl #12 // encoding: [0xff,0x53,0x40,0xf1]
-// CHECK: subs xzr, x30, #4095 // encoding: [0xdf,0xff,0x3f,0xf1]
+// CHECK: {{subs xzr,|cmp}} sp, #20, lsl #12 // encoding: [0xff,0x53,0x40,0xf1]
+// CHECK: {{subs xzr,|cmp}} x30, #4095 // encoding: [0xdf,0xff,0x3f,0xf1]
// CHECK: subs x4, sp, #3822 // encoding: [0xe4,0xbb,0x3b,0xf1]
// cmn is an alias for adds zr, ...
@@ -349,8 +349,8 @@ _func:
// A relocation check (default to lo12, which is the only sane relocation anyway really)
add x0, x4, #:lo12:var
-// CHECK: add x0, x4, #:lo12:var // encoding: [0x80'A',A,A,0x91'A']
-// CHECK: // fixup A - offset: 0, value: :lo12:var, kind: fixup_a64_add_lo12
+// CHECK: add x0, x4, :lo12:var // encoding: [0x80,0bAAAAAA00,0b00AAAAAA,0x91]
+// CHECK: // fixup A - offset: 0, value: :lo12:var, kind: fixup_aarch64_add_imm12
//------------------------------------------------------------------------------
// Add-sub (shifted register)
@@ -423,7 +423,7 @@ _func:
adds w20, wzr, w4
adds w4, w6, wzr
// CHECK: adds w3, w5, w7 // encoding: [0xa3,0x00,0x07,0x2b]
-// CHECK: adds wzr, w3, w5 // encoding: [0x7f,0x00,0x05,0x2b]
+// CHECK: {{adds wzr,|cmn}} w3, w5 // encoding: [0x7f,0x00,0x05,0x2b]
// CHECK: adds w20, wzr, w4 // encoding: [0xf4,0x03,0x04,0x2b]
// CHECK: adds w4, w6, wzr // encoding: [0xc4,0x00,0x1f,0x2b]
@@ -453,7 +453,7 @@ _func:
adds x20, xzr, x4
adds x4, x6, xzr
// CHECK: adds x3, x5, x7 // encoding: [0xa3,0x00,0x07,0xab]
-// CHECK: adds xzr, x3, x5 // encoding: [0x7f,0x00,0x05,0xab]
+// CHECK: {{adds xzr,|cmn}} x3, x5 // encoding: [0x7f,0x00,0x05,0xab]
// CHECK: adds x20, xzr, x4 // encoding: [0xf4,0x03,0x04,0xab]
// CHECK: adds x4, x6, xzr // encoding: [0xc4,0x00,0x1f,0xab]
@@ -484,7 +484,7 @@ _func:
sub w4, w6, wzr
// CHECK: sub w3, w5, w7 // encoding: [0xa3,0x00,0x07,0x4b]
// CHECK: sub wzr, w3, w5 // encoding: [0x7f,0x00,0x05,0x4b]
-// CHECK: sub w20, wzr, w4 // encoding: [0xf4,0x03,0x04,0x4b]
+// CHECK: neg w20, w4 // encoding: [0xf4,0x03,0x04,0x4b]
// CHECK: sub w4, w6, wzr // encoding: [0xc4,0x00,0x1f,0x4b]
sub w11, w13, w15, lsl #0
@@ -514,7 +514,7 @@ _func:
sub x4, x6, xzr
// CHECK: sub x3, x5, x7 // encoding: [0xa3,0x00,0x07,0xcb]
// CHECK: sub xzr, x3, x5 // encoding: [0x7f,0x00,0x05,0xcb]
-// CHECK: sub x20, xzr, x4 // encoding: [0xf4,0x03,0x04,0xcb]
+// CHECK: neg x20, x4 // encoding: [0xf4,0x03,0x04,0xcb]
// CHECK: sub x4, x6, xzr // encoding: [0xc4,0x00,0x1f,0xcb]
sub x11, x13, x15, lsl #0
@@ -543,8 +543,8 @@ _func:
subs w20, wzr, w4
subs w4, w6, wzr
// CHECK: subs w3, w5, w7 // encoding: [0xa3,0x00,0x07,0x6b]
-// CHECK: subs wzr, w3, w5 // encoding: [0x7f,0x00,0x05,0x6b]
-// CHECK: subs w20, wzr, w4 // encoding: [0xf4,0x03,0x04,0x6b]
+// CHECK: {{subs wzr,|cmp}} w3, w5 // encoding: [0x7f,0x00,0x05,0x6b]
+// CHECK: negs w20, w4 // encoding: [0xf4,0x03,0x04,0x6b]
// CHECK: subs w4, w6, wzr // encoding: [0xc4,0x00,0x1f,0x6b]
subs w11, w13, w15, lsl #0
@@ -573,8 +573,8 @@ _func:
subs x20, xzr, x4
subs x4, x6, xzr
// CHECK: subs x3, x5, x7 // encoding: [0xa3,0x00,0x07,0xeb]
-// CHECK: subs xzr, x3, x5 // encoding: [0x7f,0x00,0x05,0xeb]
-// CHECK: subs x20, xzr, x4 // encoding: [0xf4,0x03,0x04,0xeb]
+// CHECK: {{subs xzr,|cmp}} x3, x5 // encoding: [0x7f,0x00,0x05,0xeb]
+// CHECK: negs x20, x4 // encoding: [0xf4,0x03,0x04,0xeb]
// CHECK: subs x4, x6, xzr // encoding: [0xc4,0x00,0x1f,0xeb]
subs x11, x13, x15, lsl #0
@@ -601,9 +601,11 @@ _func:
cmn w0, w3
cmn wzr, w4
cmn w5, wzr
+ cmn wsp, w6
// CHECK: cmn w0, w3 // encoding: [0x1f,0x00,0x03,0x2b]
// CHECK: cmn wzr, w4 // encoding: [0xff,0x03,0x04,0x2b]
// CHECK: cmn w5, wzr // encoding: [0xbf,0x00,0x1f,0x2b]
+// CHECK: cmn wsp, w6 // encoding: [0xff,0x43,0x26,0x2b]
cmn w6, w7, lsl #0
cmn w8, w9, lsl #15
@@ -629,9 +631,11 @@ _func:
cmn x0, x3
cmn xzr, x4
cmn x5, xzr
+ cmn sp, x6
// CHECK: cmn x0, x3 // encoding: [0x1f,0x00,0x03,0xab]
// CHECK: cmn xzr, x4 // encoding: [0xff,0x03,0x04,0xab]
// CHECK: cmn x5, xzr // encoding: [0xbf,0x00,0x1f,0xab]
+// CHECK: cmn sp, x6 // encoding: [0xff,0x63,0x26,0xab]
cmn x6, x7, lsl #0
cmn x8, x9, lsl #15
@@ -657,9 +661,11 @@ _func:
cmp w0, w3
cmp wzr, w4
cmp w5, wzr
+ cmp wsp, w6
// CHECK: cmp w0, w3 // encoding: [0x1f,0x00,0x03,0x6b]
// CHECK: cmp wzr, w4 // encoding: [0xff,0x03,0x04,0x6b]
// CHECK: cmp w5, wzr // encoding: [0xbf,0x00,0x1f,0x6b]
+// CHECK: cmp wsp, w6 // encoding: [0xff,0x43,0x26,0x6b]
cmp w6, w7, lsl #0
cmp w8, w9, lsl #15
@@ -685,9 +691,11 @@ _func:
cmp x0, x3
cmp xzr, x4
cmp x5, xzr
+ cmp sp, x6
// CHECK: cmp x0, x3 // encoding: [0x1f,0x00,0x03,0xeb]
// CHECK: cmp xzr, x4 // encoding: [0xff,0x03,0x04,0xeb]
// CHECK: cmp x5, xzr // encoding: [0xbf,0x00,0x1f,0xeb]
+// CHECK: cmp sp, x6 // encoding: [0xff,0x63,0x26,0xeb]
cmp x6, x7, lsl #0
cmp x8, x9, lsl #15
@@ -713,114 +721,118 @@ _func:
neg w29, w30
neg w30, wzr
neg wzr, w0
-// CHECK: sub w29, wzr, w30 // encoding: [0xfd,0x03,0x1e,0x4b]
-// CHECK: sub w30, wzr, wzr // encoding: [0xfe,0x03,0x1f,0x4b]
-// CHECK: sub wzr, wzr, w0 // encoding: [0xff,0x03,0x00,0x4b]
+// CHECK: neg w29, w30 // encoding: [0xfd,0x03,0x1e,0x4b]
+// CHECK: neg w30, wzr // encoding: [0xfe,0x03,0x1f,0x4b]
+// CHECK: neg wzr, w0 // encoding: [0xff,0x03,0x00,0x4b]
neg w28, w27, lsl #0
neg w26, w25, lsl #29
neg w24, w23, lsl #31
-// CHECK: sub w28, wzr, w27 // encoding: [0xfc,0x03,0x1b,0x4b]
-// CHECK: sub w26, wzr, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x4b]
-// CHECK: sub w24, wzr, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x4b]
+
+// CHECK: neg w28, w27 // encoding: [0xfc,0x03,0x1b,0x4b]
+// CHECK: neg w26, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x4b]
+// CHECK: neg w24, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x4b]
neg w22, w21, lsr #0
neg w20, w19, lsr #1
neg w18, w17, lsr #31
-// CHECK: sub w22, wzr, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x4b]
-// CHECK: sub w20, wzr, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x4b]
-// CHECK: sub w18, wzr, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x4b]
+// CHECK: neg w22, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x4b]
+// CHECK: neg w20, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x4b]
+// CHECK: neg w18, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x4b]
neg w16, w15, asr #0
neg w14, w13, asr #12
neg w12, w11, asr #31
-// CHECK: sub w16, wzr, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x4b]
-// CHECK: sub w14, wzr, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x4b]
-// CHECK: sub w12, wzr, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x4b]
+// CHECK: neg w16, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x4b]
+// CHECK: neg w14, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x4b]
+// CHECK: neg w12, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x4b]
neg x29, x30
neg x30, xzr
neg xzr, x0
-// CHECK: sub x29, xzr, x30 // encoding: [0xfd,0x03,0x1e,0xcb]
-// CHECK: sub x30, xzr, xzr // encoding: [0xfe,0x03,0x1f,0xcb]
-// CHECK: sub xzr, xzr, x0 // encoding: [0xff,0x03,0x00,0xcb]
+// CHECK: neg x29, x30 // encoding: [0xfd,0x03,0x1e,0xcb]
+// CHECK: neg x30, xzr // encoding: [0xfe,0x03,0x1f,0xcb]
+// CHECK: neg xzr, x0 // encoding: [0xff,0x03,0x00,0xcb]
neg x28, x27, lsl #0
neg x26, x25, lsl #29
neg x24, x23, lsl #31
-// CHECK: sub x28, xzr, x27 // encoding: [0xfc,0x03,0x1b,0xcb]
-// CHECK: sub x26, xzr, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xcb]
-// CHECK: sub x24, xzr, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xcb]
+
+// CHECK: neg x28, x27 // encoding: [0xfc,0x03,0x1b,0xcb]
+// CHECK: neg x26, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xcb]
+// CHECK: neg x24, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xcb]
neg x22, x21, lsr #0
neg x20, x19, lsr #1
neg x18, x17, lsr #31
-// CHECK: sub x22, xzr, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xcb]
-// CHECK: sub x20, xzr, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xcb]
-// CHECK: sub x18, xzr, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xcb]
+// CHECK: neg x22, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xcb]
+// CHECK: neg x20, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xcb]
+// CHECK: neg x18, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xcb]
neg x16, x15, asr #0
neg x14, x13, asr #12
neg x12, x11, asr #31
-// CHECK: sub x16, xzr, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xcb]
-// CHECK: sub x14, xzr, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xcb]
-// CHECK: sub x12, xzr, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xcb]
+// CHECK: neg x16, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xcb]
+// CHECK: neg x14, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xcb]
+// CHECK: neg x12, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xcb]
negs w29, w30
negs w30, wzr
negs wzr, w0
-// CHECK: subs w29, wzr, w30 // encoding: [0xfd,0x03,0x1e,0x6b]
-// CHECK: subs w30, wzr, wzr // encoding: [0xfe,0x03,0x1f,0x6b]
-// CHECK: subs wzr, wzr, w0 // encoding: [0xff,0x03,0x00,0x6b]
+// CHECK: negs w29, w30 // encoding: [0xfd,0x03,0x1e,0x6b]
+// CHECK: negs w30, wzr // encoding: [0xfe,0x03,0x1f,0x6b]
+// CHECK: cmp wzr, w0 // encoding: [0xff,0x03,0x00,0x6b]
negs w28, w27, lsl #0
negs w26, w25, lsl #29
negs w24, w23, lsl #31
-// CHECK: subs w28, wzr, w27 // encoding: [0xfc,0x03,0x1b,0x6b]
-// CHECK: subs w26, wzr, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x6b]
-// CHECK: subs w24, wzr, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x6b]
+
+// CHECK: negs w28, w27 // encoding: [0xfc,0x03,0x1b,0x6b]
+// CHECK: negs w26, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x6b]
+// CHECK: negs w24, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x6b]
negs w22, w21, lsr #0
negs w20, w19, lsr #1
negs w18, w17, lsr #31
-// CHECK: subs w22, wzr, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x6b]
-// CHECK: subs w20, wzr, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x6b]
-// CHECK: subs w18, wzr, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x6b]
+// CHECK: negs w22, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x6b]
+// CHECK: negs w20, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x6b]
+// CHECK: negs w18, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x6b]
negs w16, w15, asr #0
negs w14, w13, asr #12
negs w12, w11, asr #31
-// CHECK: subs w16, wzr, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x6b]
-// CHECK: subs w14, wzr, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x6b]
-// CHECK: subs w12, wzr, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x6b]
+// CHECK: negs w16, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x6b]
+// CHECK: negs w14, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x6b]
+// CHECK: negs w12, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x6b]
negs x29, x30
negs x30, xzr
negs xzr, x0
-// CHECK: subs x29, xzr, x30 // encoding: [0xfd,0x03,0x1e,0xeb]
-// CHECK: subs x30, xzr, xzr // encoding: [0xfe,0x03,0x1f,0xeb]
-// CHECK: subs xzr, xzr, x0 // encoding: [0xff,0x03,0x00,0xeb]
+// CHECK: negs x29, x30 // encoding: [0xfd,0x03,0x1e,0xeb]
+// CHECK: negs x30, xzr // encoding: [0xfe,0x03,0x1f,0xeb]
+// CHECK: cmp xzr, x0 // encoding: [0xff,0x03,0x00,0xeb]
negs x28, x27, lsl #0
negs x26, x25, lsl #29
negs x24, x23, lsl #31
-// CHECK: subs x28, xzr, x27 // encoding: [0xfc,0x03,0x1b,0xeb]
-// CHECK: subs x26, xzr, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xeb]
-// CHECK: subs x24, xzr, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xeb]
+
+// CHECK: negs x28, x27 // encoding: [0xfc,0x03,0x1b,0xeb]
+// CHECK: negs x26, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xeb]
+// CHECK: negs x24, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xeb]
negs x22, x21, lsr #0
negs x20, x19, lsr #1
negs x18, x17, lsr #31
-// CHECK: subs x22, xzr, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xeb]
-// CHECK: subs x20, xzr, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xeb]
-// CHECK: subs x18, xzr, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xeb]
+// CHECK: negs x22, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xeb]
+// CHECK: negs x20, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xeb]
+// CHECK: negs x18, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xeb]
negs x16, x15, asr #0
negs x14, x13, asr #12
negs x12, x11, asr #31
-// CHECK: subs x16, xzr, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xeb]
-// CHECK: subs x14, xzr, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xeb]
-// CHECK: subs x12, xzr, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xeb]
+// CHECK: negs x16, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xeb]
+// CHECK: negs x14, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xeb]
+// CHECK: negs x12, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xeb]
//------------------------------------------------------------------------------
// Add-sub (shifted register)
@@ -933,28 +945,29 @@ _func:
sbfm x3, x4, #63, #63
sbfm wzr, wzr, #31, #31
sbfm w12, w9, #0, #0
-// CHECK: sbfm x1, x2, #3, #4 // encoding: [0x41,0x10,0x43,0x93]
-// CHECK: sbfm x3, x4, #63, #63 // encoding: [0x83,0xfc,0x7f,0x93]
-// CHECK: sbfm wzr, wzr, #31, #31 // encoding: [0xff,0x7f,0x1f,0x13]
-// CHECK: sbfm w12, w9, #0, #0 // encoding: [0x2c,0x01,0x00,0x13]
+
+// CHECK: sbfx x1, x2, #3, #2 // encoding: [0x41,0x10,0x43,0x93]
+// CHECK: asr x3, x4, #63 // encoding: [0x83,0xfc,0x7f,0x93]
+// CHECK: asr wzr, wzr, #31 // encoding: [0xff,0x7f,0x1f,0x13]
+// CHECK: sbfx w12, w9, #0, #1 // encoding: [0x2c,0x01,0x00,0x13]
ubfm x4, x5, #12, #10
ubfm xzr, x4, #0, #0
ubfm x4, xzr, #63, #5
ubfm x5, x6, #12, #63
-// CHECK: ubfm x4, x5, #12, #10 // encoding: [0xa4,0x28,0x4c,0xd3]
-// CHECK: ubfm xzr, x4, #0, #0 // encoding: [0x9f,0x00,0x40,0xd3]
-// CHECK: ubfm x4, xzr, #63, #5 // encoding: [0xe4,0x17,0x7f,0xd3]
-// CHECK: ubfm x5, x6, #12, #63 // encoding: [0xc5,0xfc,0x4c,0xd3]
+// CHECK: ubfiz x4, x5, #52, #11 // encoding: [0xa4,0x28,0x4c,0xd3]
+// CHECK: ubfx xzr, x4, #0, #1 // encoding: [0x9f,0x00,0x40,0xd3]
+// CHECK: ubfiz x4, xzr, #1, #6 // encoding: [0xe4,0x17,0x7f,0xd3]
+// CHECK: lsr x5, x6, #12 // encoding: [0xc5,0xfc,0x4c,0xd3]
bfm x4, x5, #12, #10
bfm xzr, x4, #0, #0
bfm x4, xzr, #63, #5
bfm x5, x6, #12, #63
-// CHECK: bfm x4, x5, #12, #10 // encoding: [0xa4,0x28,0x4c,0xb3]
-// CHECK: bfm xzr, x4, #0, #0 // encoding: [0x9f,0x00,0x40,0xb3]
-// CHECK: bfm x4, xzr, #63, #5 // encoding: [0xe4,0x17,0x7f,0xb3]
-// CHECK: bfm x5, x6, #12, #63 // encoding: [0xc5,0xfc,0x4c,0xb3]
+// CHECK: bfi x4, x5, #52, #11 // encoding: [0xa4,0x28,0x4c,0xb3]
+// CHECK: bfxil xzr, x4, #0, #1 // encoding: [0x9f,0x00,0x40,0xb3]
+// CHECK: bfi x4, xzr, #1, #6 // encoding: [0xe4,0x17,0x7f,0xb3]
+// CHECK: bfxil x5, x6, #12, #52 // encoding: [0xc5,0xfc,0x4c,0xb3]
sxtb w1, w2
sxtb xzr, w3
@@ -972,9 +985,9 @@ _func:
uxth w9, w10
uxth x0, w1
// CHECK: uxtb w1, w2 // encoding: [0x41,0x1c,0x00,0x53]
-// CHECK: uxtb xzr, w3 // encoding: [0x7f,0x1c,0x00,0x53]
+// CHECK: uxtb {{[wx]}}zr, w3 // encoding: [0x7f,0x1c,0x00,0x53]
// CHECK: uxth w9, w10 // encoding: [0x49,0x3d,0x00,0x53]
-// CHECK: uxth x0, w1 // encoding: [0x20,0x3c,0x00,0x53]
+// CHECK: uxth {{[wx]}}0, w1 // encoding: [0x20,0x3c,0x00,0x53]
asr w3, w2, #0
asr w9, w10, #31
@@ -998,7 +1011,7 @@ _func:
lsl w9, w10, #31
lsl x20, x21, #63
lsl w1, wzr, #3
-// CHECK: lsl w3, w2, #0 // encoding: [0x43,0x7c,0x00,0x53]
+// CHECK: {{lsl|lsr}} w3, w2, #0 // encoding: [0x43,0x7c,0x00,0x53]
// CHECK: lsl w9, w10, #31 // encoding: [0x49,0x01,0x01,0x53]
// CHECK: lsl x20, x21, #63 // encoding: [0xb4,0x02,0x41,0xd3]
// CHECK: lsl w1, wzr, #3 // encoding: [0xe1,0x73,0x1d,0x53]
@@ -1011,11 +1024,11 @@ _func:
sbfiz w11, w12, #31, #1
sbfiz w13, w14, #29, #3
sbfiz xzr, xzr, #10, #11
-// CHECK: sbfiz w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x13]
+// CHECK: {{sbfiz|sbfx}} w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x13]
// CHECK: sbfiz x2, x3, #63, #1 // encoding: [0x62,0x00,0x41,0x93]
-// CHECK: sbfiz x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0x93]
+// CHECK: asr x19, x20, #0 // encoding: [0x93,0xfe,0x40,0x93]
// CHECK: sbfiz x9, x10, #5, #59 // encoding: [0x49,0xe9,0x7b,0x93]
-// CHECK: sbfiz w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x13]
+// CHECK: asr w9, w10, #0 // encoding: [0x49,0x7d,0x00,0x13]
// CHECK: sbfiz w11, w12, #31, #1 // encoding: [0x8b,0x01,0x01,0x13]
// CHECK: sbfiz w13, w14, #29, #3 // encoding: [0xcd,0x09,0x03,0x13]
// CHECK: sbfiz xzr, xzr, #10, #11 // encoding: [0xff,0x2b,0x76,0x93]
@@ -1029,12 +1042,12 @@ _func:
sbfx w13, w14, #29, #3
sbfx xzr, xzr, #10, #11
// CHECK: sbfx w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x13]
-// CHECK: sbfx x2, x3, #63, #1 // encoding: [0x62,0xfc,0x7f,0x93]
-// CHECK: sbfx x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0x93]
-// CHECK: sbfx x9, x10, #5, #59 // encoding: [0x49,0xfd,0x45,0x93]
-// CHECK: sbfx w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x13]
-// CHECK: sbfx w11, w12, #31, #1 // encoding: [0x8b,0x7d,0x1f,0x13]
-// CHECK: sbfx w13, w14, #29, #3 // encoding: [0xcd,0x7d,0x1d,0x13]
+// CHECK: asr x2, x3, #63 // encoding: [0x62,0xfc,0x7f,0x93]
+// CHECK: asr x19, x20, #0 // encoding: [0x93,0xfe,0x40,0x93]
+// CHECK: asr x9, x10, #5 // encoding: [0x49,0xfd,0x45,0x93]
+// CHECK: asr w9, w10, #0 // encoding: [0x49,0x7d,0x00,0x13]
+// CHECK: asr w11, w12, #31 // encoding: [0x8b,0x7d,0x1f,0x13]
+// CHECK: asr w13, w14, #29 // encoding: [0xcd,0x7d,0x1d,0x13]
// CHECK: sbfx xzr, xzr, #10, #11 // encoding: [0xff,0x53,0x4a,0x93]
bfi w9, w10, #0, #1
@@ -1045,11 +1058,12 @@ _func:
bfi w11, w12, #31, #1
bfi w13, w14, #29, #3
bfi xzr, xzr, #10, #11
-// CHECK: bfi w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x33]
+
+// CHECK: bfxil w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x33]
// CHECK: bfi x2, x3, #63, #1 // encoding: [0x62,0x00,0x41,0xb3]
-// CHECK: bfi x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0xb3]
+// CHECK: bfxil x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0xb3]
// CHECK: bfi x9, x10, #5, #59 // encoding: [0x49,0xe9,0x7b,0xb3]
-// CHECK: bfi w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x33]
+// CHECK: bfxil w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x33]
// CHECK: bfi w11, w12, #31, #1 // encoding: [0x8b,0x01,0x01,0x33]
// CHECK: bfi w13, w14, #29, #3 // encoding: [0xcd,0x09,0x03,0x33]
// CHECK: bfi xzr, xzr, #10, #11 // encoding: [0xff,0x2b,0x76,0xb3]
@@ -1079,14 +1093,15 @@ _func:
ubfiz w11, w12, #31, #1
ubfiz w13, w14, #29, #3
ubfiz xzr, xzr, #10, #11
-// CHECK: ubfiz w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x53]
-// CHECK: ubfiz x2, x3, #63, #1 // encoding: [0x62,0x00,0x41,0xd3]
-// CHECK: ubfiz x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0xd3]
-// CHECK: ubfiz x9, x10, #5, #59 // encoding: [0x49,0xe9,0x7b,0xd3]
-// CHECK: ubfiz w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x53]
-// CHECK: ubfiz w11, w12, #31, #1 // encoding: [0x8b,0x01,0x01,0x53]
-// CHECK: ubfiz w13, w14, #29, #3 // encoding: [0xcd,0x09,0x03,0x53]
-// CHECK: ubfiz xzr, xzr, #10, #11 // encoding: [0xff,0x2b,0x76,0xd3]
+
+// CHECK: ubfx w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x53]
+// CHECK: lsl x2, x3, #63 // encoding: [0x62,0x00,0x41,0xd3]
+// CHECK: lsr x19, x20, #0 // encoding: [0x93,0xfe,0x40,0xd3]
+// CHECK: lsl x9, x10, #5 // encoding: [0x49,0xe9,0x7b,0xd3]
+// CHECK: lsr w9, w10, #0 // encoding: [0x49,0x7d,0x00,0x53]
+// CHECK: lsl w11, w12, #31 // encoding: [0x8b,0x01,0x01,0x53]
+// CHECK: lsl w13, w14, #29 // encoding: [0xcd,0x09,0x03,0x53]
+// CHECK: ubfiz xzr, xzr, #10, #11 // encoding: [0xff,0x2b,0x76,0xd3]
ubfx w9, w10, #0, #1
ubfx x2, x3, #63, #1
@@ -1096,15 +1111,15 @@ _func:
ubfx w11, w12, #31, #1
ubfx w13, w14, #29, #3
ubfx xzr, xzr, #10, #11
-// CHECK: ubfx w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x53]
-// CHECK: ubfx x2, x3, #63, #1 // encoding: [0x62,0xfc,0x7f,0xd3]
-// CHECK: ubfx x19, x20, #0, #64 // encoding: [0x93,0xfe,0x40,0xd3]
-// CHECK: ubfx x9, x10, #5, #59 // encoding: [0x49,0xfd,0x45,0xd3]
-// CHECK: ubfx w9, w10, #0, #32 // encoding: [0x49,0x7d,0x00,0x53]
-// CHECK: ubfx w11, w12, #31, #1 // encoding: [0x8b,0x7d,0x1f,0x53]
-// CHECK: ubfx w13, w14, #29, #3 // encoding: [0xcd,0x7d,0x1d,0x53]
-// CHECK: ubfx xzr, xzr, #10, #11 // encoding: [0xff,0x53,0x4a,0xd3]
+// CHECK: ubfx w9, w10, #0, #1 // encoding: [0x49,0x01,0x00,0x53]
+// CHECK: lsr x2, x3, #63 // encoding: [0x62,0xfc,0x7f,0xd3]
+// CHECK: lsr x19, x20, #0 // encoding: [0x93,0xfe,0x40,0xd3]
+// CHECK: lsr x9, x10, #5 // encoding: [0x49,0xfd,0x45,0xd3]
+// CHECK: lsr w9, w10, #0 // encoding: [0x49,0x7d,0x00,0x53]
+// CHECK: lsr w11, w12, #31 // encoding: [0x8b,0x7d,0x1f,0x53]
+// CHECK: lsr w13, w14, #29 // encoding: [0xcd,0x7d,0x1d,0x53]
+// CHECK: ubfx xzr, xzr, #10, #11 // encoding: [0xff,0x53,0x4a,0xd3]
//------------------------------------------------------------------------------
// Compare & branch (immediate)
//------------------------------------------------------------------------------
@@ -1113,21 +1128,22 @@ _func:
cbz x5, lbl
cbnz x2, lbl
cbnz x26, lbl
-// CHECK: cbz w5, lbl // encoding: [0x05'A',A,A,0x34'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: cbz x5, lbl // encoding: [0x05'A',A,A,0xb4'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: cbnz x2, lbl // encoding: [0x02'A',A,A,0xb5'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: cbnz x26, lbl // encoding: [0x1a'A',A,A,0xb5'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+// CHECK: cbz w5, lbl // encoding: [0bAAA00101,A,A,0x34]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: cbz x5, lbl // encoding: [0bAAA00101,A,A,0xb4]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: cbnz x2, lbl // encoding: [0bAAA00010,A,A,0xb5]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: cbnz x26, lbl // encoding: [0bAAA11010,A,A,0xb5]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
cbz wzr, lbl
cbnz xzr, lbl
-// CHECK: cbz wzr, lbl // encoding: [0x1f'A',A,A,0x34'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: cbnz xzr, lbl // encoding: [0x1f'A',A,A,0xb5'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+
+// CHECK: cbz wzr, lbl // encoding: [0bAAA11111,A,A,0x34]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: cbnz xzr, lbl // encoding: [0bAAA11111,A,A,0xb5]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
cbz w5, #0
cbnz x3, #-4
@@ -1159,40 +1175,60 @@ _func:
b.gt lbl
b.le lbl
b.al lbl
-// CHECK: b.eq lbl // encoding: [A,A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.ne lbl // encoding: [0x01'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.hs lbl // encoding: [0x02'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.hs lbl // encoding: [0x02'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.lo lbl // encoding: [0x03'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.lo lbl // encoding: [0x03'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.mi lbl // encoding: [0x04'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.pl lbl // encoding: [0x05'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.vs lbl // encoding: [0x06'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.vc lbl // encoding: [0x07'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.hi lbl // encoding: [0x08'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.ls lbl // encoding: [0x09'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.ge lbl // encoding: [0x0a'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.lt lbl // encoding: [0x0b'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.gt lbl // encoding: [0x0c'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.le lbl // encoding: [0x0d'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
-// CHECK: b.al lbl // encoding: [0x0e'A',A,A,0x54'A']
-// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_a64_condbr
+
+// CHECK: b.eq lbl // encoding: [0bAAA00000,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.ne lbl // encoding: [0bAAA00001,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.hs lbl // encoding: [0bAAA00010,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.hs lbl // encoding: [0bAAA00010,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.lo lbl // encoding: [0bAAA00011,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.lo lbl // encoding: [0bAAA00011,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.mi lbl // encoding: [0bAAA00100,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.pl lbl // encoding: [0bAAA00101,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.vs lbl // encoding: [0bAAA00110,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.vc lbl // encoding: [0bAAA00111,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.hi lbl // encoding: [0bAAA01000,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.ls lbl // encoding: [0bAAA01001,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.ge lbl // encoding: [0bAAA01010,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.lt lbl // encoding: [0bAAA01011,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.gt lbl // encoding: [0bAAA01100,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.le lbl // encoding: [0bAAA01101,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+// CHECK: b.al lbl // encoding: [0bAAA01110,A,A,0x54]
+// CHECK: // fixup A - offset: 0, value: lbl, kind: fixup_aarch64_pcrel_branch19
+
+ // ARM64 has these in a separate file
+ beq lbl
+ bne lbl
+ bcs lbl
+ bhs lbl
+ blo lbl
+ bcc lbl
+ bmi lbl
+ bpl lbl
+ bvs lbl
+ bvc lbl
+ bhi lbl
+ bls lbl
+ bge lbl
+ blt lbl
+ bgt lbl
+ ble lbl
+ bal lbl
b.eq #0
b.lt #-4
@@ -1342,55 +1378,55 @@ _func:
cset w3, eq
cset x9, pl
-// CHECK: csinc w3, wzr, wzr, ne // encoding: [0xe3,0x17,0x9f,0x1a]
-// CHECK: csinc x9, xzr, xzr, mi // encoding: [0xe9,0x47,0x9f,0x9a]
+// CHECK: cset w3, eq // encoding: [0xe3,0x17,0x9f,0x1a]
+// CHECK: cset x9, pl // encoding: [0xe9,0x47,0x9f,0x9a]
csetm w20, ne
csetm x30, ge
-// CHECK: csinv w20, wzr, wzr, eq // encoding: [0xf4,0x03,0x9f,0x5a]
-// CHECK: csinv x30, xzr, xzr, lt // encoding: [0xfe,0xb3,0x9f,0xda]
+// CHECK: csetm w20, ne // encoding: [0xf4,0x03,0x9f,0x5a]
+// CHECK: csetm x30, ge // encoding: [0xfe,0xb3,0x9f,0xda]
cinc w3, w5, gt
cinc wzr, w4, le
cinc w9, wzr, lt
-// CHECK: csinc w3, w5, w5, le // encoding: [0xa3,0xd4,0x85,0x1a]
-// CHECK: csinc wzr, w4, w4, gt // encoding: [0x9f,0xc4,0x84,0x1a]
-// CHECK: csinc w9, wzr, wzr, ge // encoding: [0xe9,0xa7,0x9f,0x1a]
+// CHECK: cinc w3, w5, gt // encoding: [0xa3,0xd4,0x85,0x1a]
+// CHECK: cinc wzr, w4, le // encoding: [0x9f,0xc4,0x84,0x1a]
+// CHECK: cset w9, lt // encoding: [0xe9,0xa7,0x9f,0x1a]
cinc x3, x5, gt
cinc xzr, x4, le
cinc x9, xzr, lt
-// CHECK: csinc x3, x5, x5, le // encoding: [0xa3,0xd4,0x85,0x9a]
-// CHECK: csinc xzr, x4, x4, gt // encoding: [0x9f,0xc4,0x84,0x9a]
-// CHECK: csinc x9, xzr, xzr, ge // encoding: [0xe9,0xa7,0x9f,0x9a]
+// CHECK: cinc x3, x5, gt // encoding: [0xa3,0xd4,0x85,0x9a]
+// CHECK: cinc xzr, x4, le // encoding: [0x9f,0xc4,0x84,0x9a]
+// CHECK: cset x9, lt // encoding: [0xe9,0xa7,0x9f,0x9a]
cinv w3, w5, gt
cinv wzr, w4, le
cinv w9, wzr, lt
-// CHECK: csinv w3, w5, w5, le // encoding: [0xa3,0xd0,0x85,0x5a]
-// CHECK: csinv wzr, w4, w4, gt // encoding: [0x9f,0xc0,0x84,0x5a]
-// CHECK: csinv w9, wzr, wzr, ge // encoding: [0xe9,0xa3,0x9f,0x5a]
+// CHECK: cinv w3, w5, gt // encoding: [0xa3,0xd0,0x85,0x5a]
+// CHECK: cinv wzr, w4, le // encoding: [0x9f,0xc0,0x84,0x5a]
+// CHECK: csetm w9, lt // encoding: [0xe9,0xa3,0x9f,0x5a]
cinv x3, x5, gt
cinv xzr, x4, le
cinv x9, xzr, lt
-// CHECK: csinv x3, x5, x5, le // encoding: [0xa3,0xd0,0x85,0xda]
-// CHECK: csinv xzr, x4, x4, gt // encoding: [0x9f,0xc0,0x84,0xda]
-// CHECK: csinv x9, xzr, xzr, ge // encoding: [0xe9,0xa3,0x9f,0xda]
+// CHECK: cinv x3, x5, gt // encoding: [0xa3,0xd0,0x85,0xda]
+// CHECK: cinv xzr, x4, le // encoding: [0x9f,0xc0,0x84,0xda]
+// CHECK: csetm x9, lt // encoding: [0xe9,0xa3,0x9f,0xda]
cneg w3, w5, gt
cneg wzr, w4, le
cneg w9, wzr, lt
-// CHECK: csneg w3, w5, w5, le // encoding: [0xa3,0xd4,0x85,0x5a]
-// CHECK: csneg wzr, w4, w4, gt // encoding: [0x9f,0xc4,0x84,0x5a]
-// CHECK: csneg w9, wzr, wzr, ge // encoding: [0xe9,0xa7,0x9f,0x5a]
+// CHECK: cneg w3, w5, gt // encoding: [0xa3,0xd4,0x85,0x5a]
+// CHECK: cneg wzr, w4, le // encoding: [0x9f,0xc4,0x84,0x5a]
+// CHECK: cneg w9, wzr, lt // encoding: [0xe9,0xa7,0x9f,0x5a]
cneg x3, x5, gt
cneg xzr, x4, le
cneg x9, xzr, lt
-// CHECK: csneg x3, x5, x5, le // encoding: [0xa3,0xd4,0x85,0xda]
-// CHECK: csneg xzr, x4, x4, gt // encoding: [0x9f,0xc4,0x84,0xda]
-// CHECK: csneg x9, xzr, xzr, ge // encoding: [0xe9,0xa7,0x9f,0xda]
+// CHECK: cneg x3, x5, gt // encoding: [0xa3,0xd4,0x85,0xda]
+// CHECK: cneg xzr, x4, le // encoding: [0x9f,0xc4,0x84,0xda]
+// CHECK: cneg x9, xzr, lt // encoding: [0xe9,0xa7,0x9f,0xda]
//------------------------------------------------------------------------------
// Data-processing (1 source)
@@ -1647,23 +1683,23 @@ _func:
svc #0
svc #65535
// CHECK: svc #0 // encoding: [0x01,0x00,0x00,0xd4]
-// CHECK: svc #65535 // encoding: [0xe1,0xff,0x1f,0xd4]
+// CHECK: svc #{{65535|0xffff}} // encoding: [0xe1,0xff,0x1f,0xd4]
hvc #1
smc #12000
brk #12
hlt #123
-// CHECK: hvc #1 // encoding: [0x22,0x00,0x00,0xd4]
-// CHECK: smc #12000 // encoding: [0x03,0xdc,0x05,0xd4]
-// CHECK: brk #12 // encoding: [0x80,0x01,0x20,0xd4]
-// CHECK: hlt #123 // encoding: [0x60,0x0f,0x40,0xd4]
+// CHECK: hvc #{{1|0x1}} // encoding: [0x22,0x00,0x00,0xd4]
+// CHECK: smc #{{12000|0x2ee0}} // encoding: [0x03,0xdc,0x05,0xd4]
+// CHECK: brk #{{12|0xc}} // encoding: [0x80,0x01,0x20,0xd4]
+// CHECK: hlt #{{123|0x7b}} // encoding: [0x60,0x0f,0x40,0xd4]
dcps1 #42
dcps2 #9
dcps3 #1000
-// CHECK: dcps1 #42 // encoding: [0x41,0x05,0xa0,0xd4]
-// CHECK: dcps2 #9 // encoding: [0x22,0x01,0xa0,0xd4]
-// CHECK: dcps3 #1000 // encoding: [0x03,0x7d,0xa0,0xd4]
+// CHECK: dcps1 #{{42|0x2a}} // encoding: [0x41,0x05,0xa0,0xd4]
+// CHECK: dcps2 #{{9|0x9}} // encoding: [0x22,0x01,0xa0,0xd4]
+// CHECK: dcps3 #{{1000|0x3e8}} // encoding: [0x03,0x7d,0xa0,0xd4]
dcps1
dcps2
@@ -1688,11 +1724,11 @@ _func:
ror x19, x23, #24
ror x29, xzr, #63
-// CHECK: extr x19, x23, x23, #24 // encoding: [0xf3,0x62,0xd7,0x93]
-// CHECK: extr x29, xzr, xzr, #63 // encoding: [0xfd,0xff,0xdf,0x93]
+// CHECK: ror x19, x23, #24 // encoding: [0xf3,0x62,0xd7,0x93]
+// CHECK: ror x29, xzr, #63 // encoding: [0xfd,0xff,0xdf,0x93]
ror w9, w13, #31
-// CHECK: extr w9, w13, w13, #31 // encoding: [0xa9,0x7d,0x8d,0x13]
+// CHECK: ror w9, w13, #31 // encoding: [0xa9,0x7d,0x8d,0x13]
//------------------------------------------------------------------------------
// Floating-point compare
@@ -2124,7 +2160,7 @@ _func:
fmov x3, v12.d[1]
fmov v1.d[1], x19
- fmov v3.2d[1], xzr
+ fmov v3.d[1], xzr
// CHECK: fmov x3, v12.d[1] // encoding: [0x83,0x01,0xae,0x9e]
// CHECK: fmov v1.d[1], x19 // encoding: [0x61,0x02,0xaf,0x9e]
// CHECK: fmov v3.d[1], xzr // encoding: [0xe3,0x03,0xaf,0x9e]
@@ -2136,20 +2172,20 @@ _func:
fmov s2, #0.125
fmov s3, #1.0
fmov d30, #16.0
-// CHECK: fmov s2, #0.12500000 // encoding: [0x02,0x10,0x28,0x1e]
-// CHECK: fmov s3, #1.00000000 // encoding: [0x03,0x10,0x2e,0x1e]
-// CHECK: fmov d30, #16.00000000 // encoding: [0x1e,0x10,0x66,0x1e]
+// CHECK: fmov s2, #{{0.12500000|1.250*e-01}} // encoding: [0x02,0x10,0x28,0x1e]
+// CHECK: fmov s3, #{{1.00000000|1.0*e\+00}} // encoding: [0x03,0x10,0x2e,0x1e]
+// CHECK: fmov d30, #{{16.00000000|1.60*e\+01}} // encoding: [0x1e,0x10,0x66,0x1e]
fmov s4, #1.0625
fmov d10, #1.9375
-// CHECK: fmov s4, #1.06250000 // encoding: [0x04,0x30,0x2e,0x1e]
-// CHECK: fmov d10, #1.93750000 // encoding: [0x0a,0xf0,0x6f,0x1e]
+// CHECK: fmov s4, #{{1.06250*(e\+00)?}} // encoding: [0x04,0x30,0x2e,0x1e]
+// CHECK: fmov d10, #{{1.93750*(e\+00)?}} // encoding: [0x0a,0xf0,0x6f,0x1e]
fmov s12, #-1.0
-// CHECK: fmov s12, #-1.00000000 // encoding: [0x0c,0x10,0x3e,0x1e]
+// CHECK: fmov s12, #{{-1.0*(e\+00)?}} // encoding: [0x0c,0x10,0x3e,0x1e]
fmov d16, #8.5
-// CHECK: fmov d16, #8.50000000 // encoding: [0x10,0x30,0x64,0x1e]
+// CHECK: fmov d16, #{{8.50*(e\+00)?}} // encoding: [0x10,0x30,0x64,0x1e]
//------------------------------------------------------------------------------
// Load-register (literal)
@@ -2157,22 +2193,24 @@ _func:
ldr w3, here
ldr x29, there
ldrsw xzr, everywhere
-// CHECK: ldr w3, here // encoding: [0x03'A',A,A,0x18'A']
-// CHECK: // fixup A - offset: 0, value: here, kind: fixup_a64_ld_prel
-// CHECK: ldr x29, there // encoding: [0x1d'A',A,A,0x58'A']
-// CHECK: // fixup A - offset: 0, value: there, kind: fixup_a64_ld_prel
-// CHECK: ldrsw xzr, everywhere // encoding: [0x1f'A',A,A,0x98'A']
-// CHECK: // fixup A - offset: 0, value: everywhere, kind: fixup_a64_ld_prel
+
+// CHECK: ldr w3, here // encoding: [0bAAA00011,A,A,0x18]
+// CHECK: // fixup A - offset: 0, value: here, kind: fixup_aarch64_ldr_pcrel_imm19
+// CHECK: ldr x29, there // encoding: [0bAAA11101,A,A,0x58]
+// CHECK: // fixup A - offset: 0, value: there, kind: fixup_aarch64_ldr_pcrel_imm19
+// CHECK: ldrsw xzr, everywhere // encoding: [0bAAA11111,A,A,0x98]
+// CHECK: // fixup A - offset: 0, value: everywhere, kind: fixup_aarch64_ldr_pcrel_imm19
ldr s0, who_knows
ldr d0, i_dont
ldr q0, there_must_be_a_better_way
-// CHECK: ldr s0, who_knows // encoding: [A,A,A,0x1c'A']
-// CHECK: // fixup A - offset: 0, value: who_knows, kind: fixup_a64_ld_prel
-// CHECK: ldr d0, i_dont // encoding: [A,A,A,0x5c'A']
-// CHECK: // fixup A - offset: 0, value: i_dont, kind: fixup_a64_ld_prel
-// CHECK: ldr q0, there_must_be_a_better_way // encoding: [A,A,A,0x9c'A']
-// CHECK: // fixup A - offset: 0, value: there_must_be_a_better_way, kind: fixup_a64_ld_prel
+
+// CHECK: ldr s0, who_knows // encoding: [0bAAA00000,A,A,0x1c]
+// CHECK: // fixup A - offset: 0, value: who_knows, kind: fixup_aarch64_ldr_pcrel_imm19
+// CHECK: ldr d0, i_dont // encoding: [0bAAA00000,A,A,0x5c]
+// CHECK: // fixup A - offset: 0, value: i_dont, kind: fixup_aarch64_ldr_pcrel_imm19
+// CHECK: ldr q0, there_must_be_a_better_way // encoding: [0bAAA00000,A,A,0x9c]
+// CHECK: // fixup A - offset: 0, value: there_must_be_a_better_way, kind: fixup_aarch64_ldr_pcrel_imm19
ldr w0, #1048572
ldr x10, #-1048576
@@ -2181,32 +2219,11 @@ _func:
prfm pldl1strm, nowhere
prfm #22, somewhere
-// CHECK: prfm pldl1strm, nowhere // encoding: [0x01'A',A,A,0xd8'A']
-// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_a64_ld_prel
-// CHECK: prfm #22, somewhere // encoding: [0x16'A',A,A,0xd8'A']
-// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_a64_ld_prel
-
-//------------------------------------------------------------------------------
-// Floating-point immediate
-//------------------------------------------------------------------------------
-
- fmov s2, #0.125
- fmov s3, #1.0
- fmov d30, #16.0
-// CHECK: fmov s2, #0.12500000 // encoding: [0x02,0x10,0x28,0x1e]
-// CHECK: fmov s3, #1.00000000 // encoding: [0x03,0x10,0x2e,0x1e]
-// CHECK: fmov d30, #16.00000000 // encoding: [0x1e,0x10,0x66,0x1e]
-
- fmov s4, #1.0625
- fmov d10, #1.9375
-// CHECK: fmov s4, #1.06250000 // encoding: [0x04,0x30,0x2e,0x1e]
-// CHECK: fmov d10, #1.93750000 // encoding: [0x0a,0xf0,0x6f,0x1e]
- fmov s12, #-1.0
-// CHECK: fmov s12, #-1.00000000 // encoding: [0x0c,0x10,0x3e,0x1e]
-
- fmov d16, #8.5
-// CHECK: fmov d16, #8.50000000 // encoding: [0x10,0x30,0x64,0x1e]
+// CHECK: prfm pldl1strm, nowhere // encoding: [0bAAA00001,A,A,0xd8]
+// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_aarch64_ldr_pcrel_imm19
+// CHECK: prfm #22, somewhere // encoding: [0bAAA10110,A,A,0xd8]
+// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_aarch64_ldr_pcrel_imm19
//------------------------------------------------------------------------------
// Load/store exclusive
@@ -2421,18 +2438,19 @@ _func:
ldrsw x15, [x5, #:lo12:sym]
ldr x15, [x5, #:lo12:sym]
ldr q3, [x2, #:lo12:sym]
-// CHECK: str x15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,A,0xf9'A']
-// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst64_lo12
-// CHECK: ldrb w15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,0x40'A',0x39'A']
-// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst8_lo12
-// CHECK: ldrsh x15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,0x80'A',0x79'A']
-// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst16_lo12
-// CHECK: ldrsw x15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,0x80'A',0xb9'A']
-// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst32_lo12
-// CHECK: ldr x15, [x5, #:lo12:sym] // encoding: [0xaf'A',A,0x40'A',0xf9'A']
-// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst64_lo12
-// CHECK: ldr q3, [x2, #:lo12:sym] // encoding: [0x43'A',A,0xc0'A',0x3d'A']
-// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_a64_ldst128_lo12
+
+// CHECK: str x15, [x5, :lo12:sym] // encoding: [0xaf,0bAAAAAA00,0b00AAAAAA,0xf9]
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: ldrb w15, [x5, :lo12:sym] // encoding: [0xaf,0bAAAAAA00,0b01AAAAAA,0x39]
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_aarch64_ldst_imm12_scale1
+// CHECK: ldrsh x15, [x5, :lo12:sym] // encoding: [0xaf,0bAAAAAA00,0b10AAAAAA,0x79]
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_aarch64_ldst_imm12_scale2
+// CHECK: ldrsw x15, [x5, :lo12:sym] // encoding: [0xaf,0bAAAAAA00,0b10AAAAAA,0xb9]
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_aarch64_ldst_imm12_scale4
+// CHECK: ldr x15, [x5, :lo12:sym] // encoding: [0xaf,0bAAAAAA00,0b01AAAAAA,0xf9]
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: ldr q3, [x2, :lo12:sym] // encoding: [0x43,0bAAAAAA00,0b11AAAAAA,0x3d]
+// CHECK: // fixup A - offset: 0, value: :lo12:sym, kind: fixup_aarch64_ldst_imm12_scale16
prfm pldl1keep, [sp, #8]
prfm pldl1strm, [x3]
@@ -2454,24 +2472,24 @@ _func:
prfm pstl3strm, [x6]
prfm #15, [sp]
// CHECK: prfm pldl1keep, [sp, #8] // encoding: [0xe0,0x07,0x80,0xf9]
-// CHECK: prfm pldl1strm, [x3, #0] // encoding: [0x61,0x00,0x80,0xf9]
+// CHECK: prfm pldl1strm, [x3{{(, #0)?}}] // encoding: [0x61,0x00,0x80,0xf9]
// CHECK: prfm pldl2keep, [x5, #16] // encoding: [0xa2,0x08,0x80,0xf9]
-// CHECK: prfm pldl2strm, [x2, #0] // encoding: [0x43,0x00,0x80,0xf9]
-// CHECK: prfm pldl3keep, [x5, #0] // encoding: [0xa4,0x00,0x80,0xf9]
-// CHECK: prfm pldl3strm, [x6, #0] // encoding: [0xc5,0x00,0x80,0xf9]
+// CHECK: prfm pldl2strm, [x2{{(, #0)?}}] // encoding: [0x43,0x00,0x80,0xf9]
+// CHECK: prfm pldl3keep, [x5{{(, #0)?}}] // encoding: [0xa4,0x00,0x80,0xf9]
+// CHECK: prfm pldl3strm, [x6{{(, #0)?}}] // encoding: [0xc5,0x00,0x80,0xf9]
// CHECK: prfm plil1keep, [sp, #8] // encoding: [0xe8,0x07,0x80,0xf9]
-// CHECK: prfm plil1strm, [x3, #0] // encoding: [0x69,0x00,0x80,0xf9]
+// CHECK: prfm plil1strm, [x3{{(, #0)?}}] // encoding: [0x69,0x00,0x80,0xf9]
// CHECK: prfm plil2keep, [x5, #16] // encoding: [0xaa,0x08,0x80,0xf9]
-// CHECK: prfm plil2strm, [x2, #0] // encoding: [0x4b,0x00,0x80,0xf9]
-// CHECK: prfm plil3keep, [x5, #0] // encoding: [0xac,0x00,0x80,0xf9]
-// CHECK: prfm plil3strm, [x6, #0] // encoding: [0xcd,0x00,0x80,0xf9]
+// CHECK: prfm plil2strm, [x2{{(, #0)?}}] // encoding: [0x4b,0x00,0x80,0xf9]
+// CHECK: prfm plil3keep, [x5{{(, #0)?}}] // encoding: [0xac,0x00,0x80,0xf9]
+// CHECK: prfm plil3strm, [x6{{(, #0)?}}] // encoding: [0xcd,0x00,0x80,0xf9]
// CHECK: prfm pstl1keep, [sp, #8] // encoding: [0xf0,0x07,0x80,0xf9]
-// CHECK: prfm pstl1strm, [x3, #0] // encoding: [0x71,0x00,0x80,0xf9]
+// CHECK: prfm pstl1strm, [x3{{(, #0)?}}] // encoding: [0x71,0x00,0x80,0xf9]
// CHECK: prfm pstl2keep, [x5, #16] // encoding: [0xb2,0x08,0x80,0xf9]
-// CHECK: prfm pstl2strm, [x2, #0] // encoding: [0x53,0x00,0x80,0xf9]
-// CHECK: prfm pstl3keep, [x5, #0] // encoding: [0xb4,0x00,0x80,0xf9]
-// CHECK: prfm pstl3strm, [x6, #0] // encoding: [0xd5,0x00,0x80,0xf9]
-// CHECK: prfm #15, [sp, #0] // encoding: [0xef,0x03,0x80,0xf9]
+// CHECK: prfm pstl2strm, [x2{{(, #0)?}}] // encoding: [0x53,0x00,0x80,0xf9]
+// CHECK: prfm pstl3keep, [x5{{(, #0)?}}] // encoding: [0xb4,0x00,0x80,0xf9]
+// CHECK: prfm pstl3strm, [x6{{(, #0)?}}] // encoding: [0xd5,0x00,0x80,0xf9]
+// CHECK: prfm #15, [sp{{(, #0)?}}] // encoding: [0xef,0x03,0x80,0xf9]
//// Floating-point versions
@@ -2584,7 +2602,7 @@ _func:
// CHECK: ldr x17, [x23, w9, sxtw] // encoding: [0xf1,0xca,0x69,0xf8]
// CHECK: ldr x18, [x22, w10, sxtw] // encoding: [0xd2,0xca,0x6a,0xf8]
// CHECK: str d19, [x21, wzr, sxtw #3] // encoding: [0xb3,0xda,0x3f,0xfc]
-// CHECK: prfm #6, [x0, x5, lsl #0] // encoding: [0x06,0x68,0xa5,0xf8]
+// CHECK: prfm #6, [x0, x5{{(, lsl #0)?}}] // encoding: [0x06,0x68,0xa5,0xf8]
ldr q3, [sp, x5]
ldr q9, [x27, x6, lsl #0]
@@ -3166,15 +3184,15 @@ _func:
ands wzr, w18, #0xcccccccc
ands w19, w20, #0x33333333
ands w21, w22, #0x99999999
-// CHECK: ands wzr, w18, #0xcccccccc // encoding: [0x5f,0xe6,0x02,0x72]
+// CHECK: {{ands wzr,|tst}} w18, #0xcccccccc // encoding: [0x5f,0xe6,0x02,0x72]
// CHECK: ands w19, w20, #0x33333333 // encoding: [0x93,0xe6,0x00,0x72]
// CHECK: ands w21, w22, #0x99999999 // encoding: [0xd5,0xe6,0x01,0x72]
// 2 bit replication width
tst w3, #0xaaaaaaaa
tst wzr, #0x55555555
-// CHECK: ands wzr, w3, #0xaaaaaaaa // encoding: [0x7f,0xf0,0x01,0x72]
-// CHECK: ands wzr, wzr, #0x55555555 // encoding: [0xff,0xf3,0x00,0x72]
+// CHECK: {{ands wzr,|tst}} w3, #0xaaaaaaaa // encoding: [0x7f,0xf0,0x01,0x72]
+// CHECK: {{ands wzr,|tst}} wzr, #0x55555555 // encoding: [0xff,0xf3,0x00,0x72]
// 64 bit replication-width
eor x3, x5, #0xffffffffc000000
@@ -3212,20 +3230,31 @@ _func:
ands xzr, x18, #0xcccccccccccccccc
ands x19, x20, #0x3333333333333333
ands x21, x22, #0x9999999999999999
-// CHECK: ands xzr, x18, #0xcccccccccccccccc // encoding: [0x5f,0xe6,0x02,0xf2]
+// CHECK: {{ands xzr,|tst}} x18, #0xcccccccccccccccc // encoding: [0x5f,0xe6,0x02,0xf2]
// CHECK: ands x19, x20, #0x3333333333333333 // encoding: [0x93,0xe6,0x00,0xf2]
// CHECK: ands x21, x22, #0x9999999999999999 // encoding: [0xd5,0xe6,0x01,0xf2]
// 2 bit replication-width
tst x3, #0xaaaaaaaaaaaaaaaa
tst xzr, #0x5555555555555555
-// CHECK: ands xzr, x3, #0xaaaaaaaaaaaaaaaa // encoding: [0x7f,0xf0,0x01,0xf2]
-// CHECK: ands xzr, xzr, #0x5555555555555555 // encoding: [0xff,0xf3,0x00,0xf2]
+// CHECK: {{ands xzr,|tst}} x3, #0xaaaaaaaaaaaaaaaa // encoding: [0x7f,0xf0,0x01,0xf2]
+// CHECK: {{ands xzr,|tst}} xzr, #0x5555555555555555 // encoding: [0xff,0xf3,0x00,0xf2]
mov w3, #0xf000f
mov x10, #0xaaaaaaaaaaaaaaaa
// CHECK: orr w3, wzr, #0xf000f // encoding: [0xe3,0x8f,0x00,0x32]
-// CHECK: orr x10, xzr, #0xaaaaaaaaaaaaaaaa // encoding: [0xea,0xf3,0x01,0xb2]
+// CHECK: orr x10, xzr, #0xaaaaaaaaaaaaaaaa // encoding: [0xea,0xf3,0x01,0xb2]
+
+ // The Imm field of logicalImm operations has to be truncated to the
+ // register width, i.e. 32 bits
+ and w2, w3, #-3
+ orr w0, w1, #~2
+ eor w16, w17, #-7
+ ands w19, w20, #~15
+// CHECK: and w2, w3, #0xfffffffd // encoding: [0x62,0x78,0x1e,0x12]
+// CHECK: orr w0, w1, #0xfffffffd // encoding: [0x20,0x78,0x1e,0x32]
+// CHECK: eor w16, w17, #0xfffffff9 // encoding: [0x30,0x76,0x1d,0x52]
+// CHECK: ands w19, w20, #0xfffffff0 // encoding: [0x93,0x6e,0x1c,0x72]
//------------------------------------------------------------------------------
// Logical (shifted register)
@@ -3301,75 +3330,83 @@ _func:
movz w1, #65535, lsl #0
movz w2, #0, lsl #16
movn w2, #1234, lsl #0
-// CHECK: movz w1, #65535 // encoding: [0xe1,0xff,0x9f,0x52]
+// CHECK: movz w1, #{{65535|0xffff}} // encoding: [0xe1,0xff,0x9f,0x52]
// CHECK: movz w2, #0, lsl #16 // encoding: [0x02,0x00,0xa0,0x52]
-// CHECK: movn w2, #1234 // encoding: [0x42,0x9a,0x80,0x12]
+// CHECK: movn w2, #{{1234|0x4d2}} // encoding: [0x42,0x9a,0x80,0x12]
movz x2, #1234, lsl #32
movk xzr, #4321, lsl #48
-// CHECK: movz x2, #1234, lsl #32 // encoding: [0x42,0x9a,0xc0,0xd2]
-// CHECK: movk xzr, #4321, lsl #48 // encoding: [0x3f,0x1c,0xe2,0xf2]
+// CHECK: movz x2, #{{1234|0x4d2}}, lsl #32 // encoding: [0x42,0x9a,0xc0,0xd2]
+// CHECK: movk xzr, #{{4321|0x10e1}}, lsl #48 // encoding: [0x3f,0x1c,0xe2,0xf2]
movz x2, #:abs_g0:sym
movk w3, #:abs_g0_nc:sym
-// CHECK: movz x2, #:abs_g0:sym // encoding: [0x02'A',A,0x80'A',0xd2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0:sym, kind: fixup_a64_movw_uabs_g0
-// CHECK: movk w3, #:abs_g0_nc:sym // encoding: [0x03'A',A,0x80'A',0x72'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_nc:sym, kind: fixup_a64_movw_uabs_g0_nc
+
+// CHECK: movz x2, #:abs_g0:sym // encoding: [0bAAA00010,A,0b100AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0:sym, kind: fixup_aarch64_movw
+// CHECK: movk w3, #:abs_g0_nc:sym // encoding: [0bAAA00011,A,0b100AAAAA,0x72]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_nc:sym, kind: fixup_aarch64_movw
movz x4, #:abs_g1:sym
movk w5, #:abs_g1_nc:sym
-// CHECK: movz x4, #:abs_g1:sym // encoding: [0x04'A',A,0xa0'A',0xd2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1:sym, kind: fixup_a64_movw_uabs_g1
-// CHECK: movk w5, #:abs_g1_nc:sym // encoding: [0x05'A',A,0xa0'A',0x72'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_nc:sym, kind: fixup_a64_movw_uabs_g1_nc
+
+// CHECK: movz x4, #:abs_g1:sym // encoding: [0bAAA00100,A,0b101AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1:sym, kind: fixup_aarch64_movw
+// CHECK: movk w5, #:abs_g1_nc:sym // encoding: [0bAAA00101,A,0b101AAAAA,0x72]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_nc:sym, kind: fixup_aarch64_movw
movz x6, #:abs_g2:sym
movk x7, #:abs_g2_nc:sym
-// CHECK: movz x6, #:abs_g2:sym // encoding: [0x06'A',A,0xc0'A',0xd2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2:sym, kind: fixup_a64_movw_uabs_g2
-// CHECK: movk x7, #:abs_g2_nc:sym // encoding: [0x07'A',A,0xc0'A',0xf2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_nc:sym, kind: fixup_a64_movw_uabs_g2_nc
+
+// CHECK: movz x6, #:abs_g2:sym // encoding: [0bAAA00110,A,0b110AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2:sym, kind: fixup_aarch64_movw
+// CHECK: movk x7, #:abs_g2_nc:sym // encoding: [0bAAA00111,A,0b110AAAAA,0xf2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_nc:sym, kind: fixup_aarch64_movw
movz x8, #:abs_g3:sym
movk x9, #:abs_g3:sym
-// CHECK: movz x8, #:abs_g3:sym // encoding: [0x08'A',A,0xe0'A',0xd2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g3:sym, kind: fixup_a64_movw_uabs_g3
-// CHECK: movk x9, #:abs_g3:sym // encoding: [0x09'A',A,0xe0'A',0xf2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g3:sym, kind: fixup_a64_movw_uabs_g3
+
+// CHECK: movz x8, #:abs_g3:sym // encoding: [0bAAA01000,A,0b111AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g3:sym, kind: fixup_aarch64_movw
+// CHECK: movk x9, #:abs_g3:sym // encoding: [0bAAA01001,A,0b111AAAAA,0xf2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g3:sym, kind: fixup_aarch64_movw
+
movn x30, #:abs_g0_s:sym
movz x19, #:abs_g0_s:sym
movn w10, #:abs_g0_s:sym
movz w25, #:abs_g0_s:sym
-// CHECK: movn x30, #:abs_g0_s:sym // encoding: [0x1e'A',A,0x80'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_a64_movw_sabs_g0
-// CHECK: movz x19, #:abs_g0_s:sym // encoding: [0x13'A',A,0x80'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_a64_movw_sabs_g0
-// CHECK: movn w10, #:abs_g0_s:sym // encoding: [0x0a'A',A,0x80'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_a64_movw_sabs_g0
-// CHECK: movz w25, #:abs_g0_s:sym // encoding: [0x19'A',A,0x80'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_a64_movw_sabs_g0
+
+// CHECK: movn x30, #:abs_g0_s:sym // encoding: [0bAAA11110,A,0b100AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_aarch64_movw
+// CHECK: movz x19, #:abs_g0_s:sym // encoding: [0bAAA10011,A,0b100AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_aarch64_movw
+// CHECK: movn w10, #:abs_g0_s:sym // encoding: [0bAAA01010,A,0b100AAAAA,0x12]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_aarch64_movw
+// CHECK: movz w25, #:abs_g0_s:sym // encoding: [0bAAA11001,A,0b100AAAAA,0x52]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g0_s:sym, kind: fixup_aarch64_movw
movn x30, #:abs_g1_s:sym
movz x19, #:abs_g1_s:sym
movn w10, #:abs_g1_s:sym
movz w25, #:abs_g1_s:sym
-// CHECK: movn x30, #:abs_g1_s:sym // encoding: [0x1e'A',A,0xa0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_a64_movw_sabs_g1
-// CHECK: movz x19, #:abs_g1_s:sym // encoding: [0x13'A',A,0xa0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_a64_movw_sabs_g1
-// CHECK: movn w10, #:abs_g1_s:sym // encoding: [0x0a'A',A,0xa0'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_a64_movw_sabs_g1
-// CHECK: movz w25, #:abs_g1_s:sym // encoding: [0x19'A',A,0xa0'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_a64_movw_sabs_g1
+
+// CHECK: movn x30, #:abs_g1_s:sym // encoding: [0bAAA11110,A,0b101AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_aarch64_movw
+// CHECK: movz x19, #:abs_g1_s:sym // encoding: [0bAAA10011,A,0b101AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_aarch64_movw
+// CHECK: movn w10, #:abs_g1_s:sym // encoding: [0bAAA01010,A,0b101AAAAA,0x12]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_aarch64_movw
+// CHECK: movz w25, #:abs_g1_s:sym // encoding: [0bAAA11001,A,0b101AAAAA,0x52]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g1_s:sym, kind: fixup_aarch64_movw
movn x30, #:abs_g2_s:sym
movz x19, #:abs_g2_s:sym
-// CHECK: movn x30, #:abs_g2_s:sym // encoding: [0x1e'A',A,0xc0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_s:sym, kind: fixup_a64_movw_sabs_g2
-// CHECK: movz x19, #:abs_g2_s:sym // encoding: [0x13'A',A,0xc0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_s:sym, kind: fixup_a64_movw_sabs_g2
+
+// CHECK: movn x30, #:abs_g2_s:sym // encoding: [0bAAA11110,A,0b110AAAAA,0x92]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_s:sym, kind: fixup_aarch64_movw
+// CHECK: movz x19, #:abs_g2_s:sym // encoding: [0bAAA10011,A,0b110AAAAA,0xd2]
+// CHECK-NEXT: // fixup A - offset: 0, value: :abs_g2_s:sym, kind: fixup_aarch64_movw
//------------------------------------------------------------------------------
// PC-relative addressing
@@ -3377,15 +3414,16 @@ _func:
adr x2, loc
adr xzr, loc
- // CHECK: adr x2, loc // encoding: [0x02'A',A,A,0x10'A']
- // CHECK: // fixup A - offset: 0, value: loc, kind: fixup_a64_adr_prel
- // CHECK: adr xzr, loc // encoding: [0x1f'A',A,A,0x10'A']
- // CHECK: // fixup A - offset: 0, value: loc, kind: fixup_a64_adr_prel
+
+// CHECK: adr x2, loc // encoding: [0x02'A',A,A,0x10'A']
+// CHECK: // fixup A - offset: 0, value: loc, kind: fixup_aarch64_pcrel_adr_imm21
+// CHECK: adr xzr, loc // encoding: [0x1f'A',A,A,0x10'A']
+// CHECK: // fixup A - offset: 0, value: loc, kind: fixup_aarch64_pcrel_adr_imm21
adrp x29, loc
- // CHECK: adrp x29, loc // encoding: [0x1d'A',A,A,0x90'A']
- // CHECK: // fixup A - offset: 0, value: loc, kind: fixup_a64_adr_prel_page
+// CHECK: adrp x29, loc // encoding: [0x1d'A',A,A,0x90'A']
+// CHECK: // fixup A - offset: 0, value: loc, kind: fixup_aarch64_pcrel_adrp_imm21
adrp x30, #4096
adr x20, #0
adr x9, #-1
@@ -3411,7 +3449,7 @@ _func:
hint #0
hint #127
// CHECK: nop // encoding: [0x1f,0x20,0x03,0xd5]
-// CHECK: hint #127 // encoding: [0xff,0x2f,0x03,0xd5]
+// CHECK: hint #{{127|0x7f}} // encoding: [0xff,0x2f,0x03,0xd5]
nop
yield
@@ -3508,14 +3546,14 @@ _func:
msr spsel, #0
msr daifset, #15
msr daifclr, #12
-// CHECK: msr spsel, #0 // encoding: [0xbf,0x40,0x00,0xd5]
-// CHECK: msr daifset, #15 // encoding: [0xdf,0x4f,0x03,0xd5]
-// CHECK: msr daifclr, #12 // encoding: [0xff,0x4c,0x03,0xd5]
+// CHECK: msr {{spsel|SPSEL}}, #0 // encoding: [0xbf,0x40,0x00,0xd5]
+// CHECK: msr {{daifset|DAIFSET}}, #15 // encoding: [0xdf,0x4f,0x03,0xd5]
+// CHECK: msr {{daifclr|DAIFCLR}}, #12 // encoding: [0xff,0x4c,0x03,0xd5]
sys #7, c5, c9, #7, x5
sys #0, c15, c15, #2
// CHECK: sys #7, c5, c9, #7, x5 // encoding: [0xe5,0x59,0x0f,0xd5]
-// CHECK: sys #0, c15, c15, #2, xzr // encoding: [0x5f,0xff,0x08,0xd5]
+// CHECK: sys #0, c15, c15, #2 // encoding: [0x5f,0xff,0x08,0xd5]
sysl x9, #7, c5, c9, #7
sysl x1, #0, c15, c15, #2
@@ -3890,260 +3928,260 @@ _func:
msr PMEVTYPER28_EL0, x12
msr PMEVTYPER29_EL0, x12
msr PMEVTYPER30_EL0, x12
-// CHECK: msr teecr32_el1, x12 // encoding: [0x0c,0x00,0x12,0xd5]
-// CHECK: msr osdtrrx_el1, x12 // encoding: [0x4c,0x00,0x10,0xd5]
-// CHECK: msr mdccint_el1, x12 // encoding: [0x0c,0x02,0x10,0xd5]
-// CHECK: msr mdscr_el1, x12 // encoding: [0x4c,0x02,0x10,0xd5]
-// CHECK: msr osdtrtx_el1, x12 // encoding: [0x4c,0x03,0x10,0xd5]
-// CHECK: msr dbgdtr_el0, x12 // encoding: [0x0c,0x04,0x13,0xd5]
-// CHECK: msr dbgdtrtx_el0, x12 // encoding: [0x0c,0x05,0x13,0xd5]
-// CHECK: msr oseccr_el1, x12 // encoding: [0x4c,0x06,0x10,0xd5]
-// CHECK: msr dbgvcr32_el2, x12 // encoding: [0x0c,0x07,0x14,0xd5]
-// CHECK: msr dbgbvr0_el1, x12 // encoding: [0x8c,0x00,0x10,0xd5]
-// CHECK: msr dbgbvr1_el1, x12 // encoding: [0x8c,0x01,0x10,0xd5]
-// CHECK: msr dbgbvr2_el1, x12 // encoding: [0x8c,0x02,0x10,0xd5]
-// CHECK: msr dbgbvr3_el1, x12 // encoding: [0x8c,0x03,0x10,0xd5]
-// CHECK: msr dbgbvr4_el1, x12 // encoding: [0x8c,0x04,0x10,0xd5]
-// CHECK: msr dbgbvr5_el1, x12 // encoding: [0x8c,0x05,0x10,0xd5]
-// CHECK: msr dbgbvr6_el1, x12 // encoding: [0x8c,0x06,0x10,0xd5]
-// CHECK: msr dbgbvr7_el1, x12 // encoding: [0x8c,0x07,0x10,0xd5]
-// CHECK: msr dbgbvr8_el1, x12 // encoding: [0x8c,0x08,0x10,0xd5]
-// CHECK: msr dbgbvr9_el1, x12 // encoding: [0x8c,0x09,0x10,0xd5]
-// CHECK: msr dbgbvr10_el1, x12 // encoding: [0x8c,0x0a,0x10,0xd5]
-// CHECK: msr dbgbvr11_el1, x12 // encoding: [0x8c,0x0b,0x10,0xd5]
-// CHECK: msr dbgbvr12_el1, x12 // encoding: [0x8c,0x0c,0x10,0xd5]
-// CHECK: msr dbgbvr13_el1, x12 // encoding: [0x8c,0x0d,0x10,0xd5]
-// CHECK: msr dbgbvr14_el1, x12 // encoding: [0x8c,0x0e,0x10,0xd5]
-// CHECK: msr dbgbvr15_el1, x12 // encoding: [0x8c,0x0f,0x10,0xd5]
-// CHECK: msr dbgbcr0_el1, x12 // encoding: [0xac,0x00,0x10,0xd5]
-// CHECK: msr dbgbcr1_el1, x12 // encoding: [0xac,0x01,0x10,0xd5]
-// CHECK: msr dbgbcr2_el1, x12 // encoding: [0xac,0x02,0x10,0xd5]
-// CHECK: msr dbgbcr3_el1, x12 // encoding: [0xac,0x03,0x10,0xd5]
-// CHECK: msr dbgbcr4_el1, x12 // encoding: [0xac,0x04,0x10,0xd5]
-// CHECK: msr dbgbcr5_el1, x12 // encoding: [0xac,0x05,0x10,0xd5]
-// CHECK: msr dbgbcr6_el1, x12 // encoding: [0xac,0x06,0x10,0xd5]
-// CHECK: msr dbgbcr7_el1, x12 // encoding: [0xac,0x07,0x10,0xd5]
-// CHECK: msr dbgbcr8_el1, x12 // encoding: [0xac,0x08,0x10,0xd5]
-// CHECK: msr dbgbcr9_el1, x12 // encoding: [0xac,0x09,0x10,0xd5]
-// CHECK: msr dbgbcr10_el1, x12 // encoding: [0xac,0x0a,0x10,0xd5]
-// CHECK: msr dbgbcr11_el1, x12 // encoding: [0xac,0x0b,0x10,0xd5]
-// CHECK: msr dbgbcr12_el1, x12 // encoding: [0xac,0x0c,0x10,0xd5]
-// CHECK: msr dbgbcr13_el1, x12 // encoding: [0xac,0x0d,0x10,0xd5]
-// CHECK: msr dbgbcr14_el1, x12 // encoding: [0xac,0x0e,0x10,0xd5]
-// CHECK: msr dbgbcr15_el1, x12 // encoding: [0xac,0x0f,0x10,0xd5]
-// CHECK: msr dbgwvr0_el1, x12 // encoding: [0xcc,0x00,0x10,0xd5]
-// CHECK: msr dbgwvr1_el1, x12 // encoding: [0xcc,0x01,0x10,0xd5]
-// CHECK: msr dbgwvr2_el1, x12 // encoding: [0xcc,0x02,0x10,0xd5]
-// CHECK: msr dbgwvr3_el1, x12 // encoding: [0xcc,0x03,0x10,0xd5]
-// CHECK: msr dbgwvr4_el1, x12 // encoding: [0xcc,0x04,0x10,0xd5]
-// CHECK: msr dbgwvr5_el1, x12 // encoding: [0xcc,0x05,0x10,0xd5]
-// CHECK: msr dbgwvr6_el1, x12 // encoding: [0xcc,0x06,0x10,0xd5]
-// CHECK: msr dbgwvr7_el1, x12 // encoding: [0xcc,0x07,0x10,0xd5]
-// CHECK: msr dbgwvr8_el1, x12 // encoding: [0xcc,0x08,0x10,0xd5]
-// CHECK: msr dbgwvr9_el1, x12 // encoding: [0xcc,0x09,0x10,0xd5]
-// CHECK: msr dbgwvr10_el1, x12 // encoding: [0xcc,0x0a,0x10,0xd5]
-// CHECK: msr dbgwvr11_el1, x12 // encoding: [0xcc,0x0b,0x10,0xd5]
-// CHECK: msr dbgwvr12_el1, x12 // encoding: [0xcc,0x0c,0x10,0xd5]
-// CHECK: msr dbgwvr13_el1, x12 // encoding: [0xcc,0x0d,0x10,0xd5]
-// CHECK: msr dbgwvr14_el1, x12 // encoding: [0xcc,0x0e,0x10,0xd5]
-// CHECK: msr dbgwvr15_el1, x12 // encoding: [0xcc,0x0f,0x10,0xd5]
-// CHECK: msr dbgwcr0_el1, x12 // encoding: [0xec,0x00,0x10,0xd5]
-// CHECK: msr dbgwcr1_el1, x12 // encoding: [0xec,0x01,0x10,0xd5]
-// CHECK: msr dbgwcr2_el1, x12 // encoding: [0xec,0x02,0x10,0xd5]
-// CHECK: msr dbgwcr3_el1, x12 // encoding: [0xec,0x03,0x10,0xd5]
-// CHECK: msr dbgwcr4_el1, x12 // encoding: [0xec,0x04,0x10,0xd5]
-// CHECK: msr dbgwcr5_el1, x12 // encoding: [0xec,0x05,0x10,0xd5]
-// CHECK: msr dbgwcr6_el1, x12 // encoding: [0xec,0x06,0x10,0xd5]
-// CHECK: msr dbgwcr7_el1, x12 // encoding: [0xec,0x07,0x10,0xd5]
-// CHECK: msr dbgwcr8_el1, x12 // encoding: [0xec,0x08,0x10,0xd5]
-// CHECK: msr dbgwcr9_el1, x12 // encoding: [0xec,0x09,0x10,0xd5]
-// CHECK: msr dbgwcr10_el1, x12 // encoding: [0xec,0x0a,0x10,0xd5]
-// CHECK: msr dbgwcr11_el1, x12 // encoding: [0xec,0x0b,0x10,0xd5]
-// CHECK: msr dbgwcr12_el1, x12 // encoding: [0xec,0x0c,0x10,0xd5]
-// CHECK: msr dbgwcr13_el1, x12 // encoding: [0xec,0x0d,0x10,0xd5]
-// CHECK: msr dbgwcr14_el1, x12 // encoding: [0xec,0x0e,0x10,0xd5]
-// CHECK: msr dbgwcr15_el1, x12 // encoding: [0xec,0x0f,0x10,0xd5]
-// CHECK: msr teehbr32_el1, x12 // encoding: [0x0c,0x10,0x12,0xd5]
-// CHECK: msr oslar_el1, x12 // encoding: [0x8c,0x10,0x10,0xd5]
-// CHECK: msr osdlr_el1, x12 // encoding: [0x8c,0x13,0x10,0xd5]
-// CHECK: msr dbgprcr_el1, x12 // encoding: [0x8c,0x14,0x10,0xd5]
-// CHECK: msr dbgclaimset_el1, x12 // encoding: [0xcc,0x78,0x10,0xd5]
-// CHECK: msr dbgclaimclr_el1, x12 // encoding: [0xcc,0x79,0x10,0xd5]
-// CHECK: msr csselr_el1, x12 // encoding: [0x0c,0x00,0x1a,0xd5]
-// CHECK: msr vpidr_el2, x12 // encoding: [0x0c,0x00,0x1c,0xd5]
-// CHECK: msr vmpidr_el2, x12 // encoding: [0xac,0x00,0x1c,0xd5]
-// CHECK: msr sctlr_el1, x12 // encoding: [0x0c,0x10,0x18,0xd5]
-// CHECK: msr sctlr_el2, x12 // encoding: [0x0c,0x10,0x1c,0xd5]
-// CHECK: msr sctlr_el3, x12 // encoding: [0x0c,0x10,0x1e,0xd5]
-// CHECK: msr actlr_el1, x12 // encoding: [0x2c,0x10,0x18,0xd5]
-// CHECK: msr actlr_el2, x12 // encoding: [0x2c,0x10,0x1c,0xd5]
-// CHECK: msr actlr_el3, x12 // encoding: [0x2c,0x10,0x1e,0xd5]
-// CHECK: msr cpacr_el1, x12 // encoding: [0x4c,0x10,0x18,0xd5]
-// CHECK: msr hcr_el2, x12 // encoding: [0x0c,0x11,0x1c,0xd5]
-// CHECK: msr scr_el3, x12 // encoding: [0x0c,0x11,0x1e,0xd5]
-// CHECK: msr mdcr_el2, x12 // encoding: [0x2c,0x11,0x1c,0xd5]
-// CHECK: msr sder32_el3, x12 // encoding: [0x2c,0x11,0x1e,0xd5]
-// CHECK: msr cptr_el2, x12 // encoding: [0x4c,0x11,0x1c,0xd5]
-// CHECK: msr cptr_el3, x12 // encoding: [0x4c,0x11,0x1e,0xd5]
-// CHECK: msr hstr_el2, x12 // encoding: [0x6c,0x11,0x1c,0xd5]
-// CHECK: msr hacr_el2, x12 // encoding: [0xec,0x11,0x1c,0xd5]
-// CHECK: msr mdcr_el3, x12 // encoding: [0x2c,0x13,0x1e,0xd5]
-// CHECK: msr ttbr0_el1, x12 // encoding: [0x0c,0x20,0x18,0xd5]
-// CHECK: msr ttbr0_el2, x12 // encoding: [0x0c,0x20,0x1c,0xd5]
-// CHECK: msr ttbr0_el3, x12 // encoding: [0x0c,0x20,0x1e,0xd5]
-// CHECK: msr ttbr1_el1, x12 // encoding: [0x2c,0x20,0x18,0xd5]
-// CHECK: msr tcr_el1, x12 // encoding: [0x4c,0x20,0x18,0xd5]
-// CHECK: msr tcr_el2, x12 // encoding: [0x4c,0x20,0x1c,0xd5]
-// CHECK: msr tcr_el3, x12 // encoding: [0x4c,0x20,0x1e,0xd5]
-// CHECK: msr vttbr_el2, x12 // encoding: [0x0c,0x21,0x1c,0xd5]
-// CHECK: msr vtcr_el2, x12 // encoding: [0x4c,0x21,0x1c,0xd5]
-// CHECK: msr dacr32_el2, x12 // encoding: [0x0c,0x30,0x1c,0xd5]
-// CHECK: msr spsr_el1, x12 // encoding: [0x0c,0x40,0x18,0xd5]
-// CHECK: msr spsr_el2, x12 // encoding: [0x0c,0x40,0x1c,0xd5]
-// CHECK: msr spsr_el3, x12 // encoding: [0x0c,0x40,0x1e,0xd5]
-// CHECK: msr elr_el1, x12 // encoding: [0x2c,0x40,0x18,0xd5]
-// CHECK: msr elr_el2, x12 // encoding: [0x2c,0x40,0x1c,0xd5]
-// CHECK: msr elr_el3, x12 // encoding: [0x2c,0x40,0x1e,0xd5]
-// CHECK: msr sp_el0, x12 // encoding: [0x0c,0x41,0x18,0xd5]
-// CHECK: msr sp_el1, x12 // encoding: [0x0c,0x41,0x1c,0xd5]
-// CHECK: msr sp_el2, x12 // encoding: [0x0c,0x41,0x1e,0xd5]
-// CHECK: msr spsel, x12 // encoding: [0x0c,0x42,0x18,0xd5]
-// CHECK: msr nzcv, x12 // encoding: [0x0c,0x42,0x1b,0xd5]
-// CHECK: msr daif, x12 // encoding: [0x2c,0x42,0x1b,0xd5]
-// CHECK: msr currentel, x12 // encoding: [0x4c,0x42,0x18,0xd5]
-// CHECK: msr spsr_irq, x12 // encoding: [0x0c,0x43,0x1c,0xd5]
-// CHECK: msr spsr_abt, x12 // encoding: [0x2c,0x43,0x1c,0xd5]
-// CHECK: msr spsr_und, x12 // encoding: [0x4c,0x43,0x1c,0xd5]
-// CHECK: msr spsr_fiq, x12 // encoding: [0x6c,0x43,0x1c,0xd5]
-// CHECK: msr fpcr, x12 // encoding: [0x0c,0x44,0x1b,0xd5]
-// CHECK: msr fpsr, x12 // encoding: [0x2c,0x44,0x1b,0xd5]
-// CHECK: msr dspsr_el0, x12 // encoding: [0x0c,0x45,0x1b,0xd5]
-// CHECK: msr dlr_el0, x12 // encoding: [0x2c,0x45,0x1b,0xd5]
-// CHECK: msr ifsr32_el2, x12 // encoding: [0x2c,0x50,0x1c,0xd5]
-// CHECK: msr afsr0_el1, x12 // encoding: [0x0c,0x51,0x18,0xd5]
-// CHECK: msr afsr0_el2, x12 // encoding: [0x0c,0x51,0x1c,0xd5]
-// CHECK: msr afsr0_el3, x12 // encoding: [0x0c,0x51,0x1e,0xd5]
-// CHECK: msr afsr1_el1, x12 // encoding: [0x2c,0x51,0x18,0xd5]
-// CHECK: msr afsr1_el2, x12 // encoding: [0x2c,0x51,0x1c,0xd5]
-// CHECK: msr afsr1_el3, x12 // encoding: [0x2c,0x51,0x1e,0xd5]
-// CHECK: msr esr_el1, x12 // encoding: [0x0c,0x52,0x18,0xd5]
-// CHECK: msr esr_el2, x12 // encoding: [0x0c,0x52,0x1c,0xd5]
-// CHECK: msr esr_el3, x12 // encoding: [0x0c,0x52,0x1e,0xd5]
-// CHECK: msr fpexc32_el2, x12 // encoding: [0x0c,0x53,0x1c,0xd5]
-// CHECK: msr far_el1, x12 // encoding: [0x0c,0x60,0x18,0xd5]
-// CHECK: msr far_el2, x12 // encoding: [0x0c,0x60,0x1c,0xd5]
-// CHECK: msr far_el3, x12 // encoding: [0x0c,0x60,0x1e,0xd5]
-// CHECK: msr hpfar_el2, x12 // encoding: [0x8c,0x60,0x1c,0xd5]
-// CHECK: msr par_el1, x12 // encoding: [0x0c,0x74,0x18,0xd5]
-// CHECK: msr pmcr_el0, x12 // encoding: [0x0c,0x9c,0x1b,0xd5]
-// CHECK: msr pmcntenset_el0, x12 // encoding: [0x2c,0x9c,0x1b,0xd5]
-// CHECK: msr pmcntenclr_el0, x12 // encoding: [0x4c,0x9c,0x1b,0xd5]
-// CHECK: msr pmovsclr_el0, x12 // encoding: [0x6c,0x9c,0x1b,0xd5]
-// CHECK: msr pmselr_el0, x12 // encoding: [0xac,0x9c,0x1b,0xd5]
-// CHECK: msr pmccntr_el0, x12 // encoding: [0x0c,0x9d,0x1b,0xd5]
-// CHECK: msr pmxevtyper_el0, x12 // encoding: [0x2c,0x9d,0x1b,0xd5]
-// CHECK: msr pmxevcntr_el0, x12 // encoding: [0x4c,0x9d,0x1b,0xd5]
-// CHECK: msr pmuserenr_el0, x12 // encoding: [0x0c,0x9e,0x1b,0xd5]
-// CHECK: msr pmintenset_el1, x12 // encoding: [0x2c,0x9e,0x18,0xd5]
-// CHECK: msr pmintenclr_el1, x12 // encoding: [0x4c,0x9e,0x18,0xd5]
-// CHECK: msr pmovsset_el0, x12 // encoding: [0x6c,0x9e,0x1b,0xd5]
-// CHECK: msr mair_el1, x12 // encoding: [0x0c,0xa2,0x18,0xd5]
-// CHECK: msr mair_el2, x12 // encoding: [0x0c,0xa2,0x1c,0xd5]
-// CHECK: msr mair_el3, x12 // encoding: [0x0c,0xa2,0x1e,0xd5]
-// CHECK: msr amair_el1, x12 // encoding: [0x0c,0xa3,0x18,0xd5]
-// CHECK: msr amair_el2, x12 // encoding: [0x0c,0xa3,0x1c,0xd5]
-// CHECK: msr amair_el3, x12 // encoding: [0x0c,0xa3,0x1e,0xd5]
-// CHECK: msr vbar_el1, x12 // encoding: [0x0c,0xc0,0x18,0xd5]
-// CHECK: msr vbar_el2, x12 // encoding: [0x0c,0xc0,0x1c,0xd5]
-// CHECK: msr vbar_el3, x12 // encoding: [0x0c,0xc0,0x1e,0xd5]
-// CHECK: msr rmr_el1, x12 // encoding: [0x4c,0xc0,0x18,0xd5]
-// CHECK: msr rmr_el2, x12 // encoding: [0x4c,0xc0,0x1c,0xd5]
-// CHECK: msr rmr_el3, x12 // encoding: [0x4c,0xc0,0x1e,0xd5]
-// CHECK: msr contextidr_el1, x12 // encoding: [0x2c,0xd0,0x18,0xd5]
-// CHECK: msr tpidr_el0, x12 // encoding: [0x4c,0xd0,0x1b,0xd5]
-// CHECK: msr tpidr_el2, x12 // encoding: [0x4c,0xd0,0x1c,0xd5]
-// CHECK: msr tpidr_el3, x12 // encoding: [0x4c,0xd0,0x1e,0xd5]
-// CHECK: msr tpidrro_el0, x12 // encoding: [0x6c,0xd0,0x1b,0xd5]
-// CHECK: msr tpidr_el1, x12 // encoding: [0x8c,0xd0,0x18,0xd5]
-// CHECK: msr cntfrq_el0, x12 // encoding: [0x0c,0xe0,0x1b,0xd5]
-// CHECK: msr cntvoff_el2, x12 // encoding: [0x6c,0xe0,0x1c,0xd5]
-// CHECK: msr cntkctl_el1, x12 // encoding: [0x0c,0xe1,0x18,0xd5]
-// CHECK: msr cnthctl_el2, x12 // encoding: [0x0c,0xe1,0x1c,0xd5]
-// CHECK: msr cntp_tval_el0, x12 // encoding: [0x0c,0xe2,0x1b,0xd5]
-// CHECK: msr cnthp_tval_el2, x12 // encoding: [0x0c,0xe2,0x1c,0xd5]
-// CHECK: msr cntps_tval_el1, x12 // encoding: [0x0c,0xe2,0x1f,0xd5]
-// CHECK: msr cntp_ctl_el0, x12 // encoding: [0x2c,0xe2,0x1b,0xd5]
-// CHECK: msr cnthp_ctl_el2, x12 // encoding: [0x2c,0xe2,0x1c,0xd5]
-// CHECK: msr cntps_ctl_el1, x12 // encoding: [0x2c,0xe2,0x1f,0xd5]
-// CHECK: msr cntp_cval_el0, x12 // encoding: [0x4c,0xe2,0x1b,0xd5]
-// CHECK: msr cnthp_cval_el2, x12 // encoding: [0x4c,0xe2,0x1c,0xd5]
-// CHECK: msr cntps_cval_el1, x12 // encoding: [0x4c,0xe2,0x1f,0xd5]
-// CHECK: msr cntv_tval_el0, x12 // encoding: [0x0c,0xe3,0x1b,0xd5]
-// CHECK: msr cntv_ctl_el0, x12 // encoding: [0x2c,0xe3,0x1b,0xd5]
-// CHECK: msr cntv_cval_el0, x12 // encoding: [0x4c,0xe3,0x1b,0xd5]
-// CHECK: msr pmevcntr0_el0, x12 // encoding: [0x0c,0xe8,0x1b,0xd5]
-// CHECK: msr pmevcntr1_el0, x12 // encoding: [0x2c,0xe8,0x1b,0xd5]
-// CHECK: msr pmevcntr2_el0, x12 // encoding: [0x4c,0xe8,0x1b,0xd5]
-// CHECK: msr pmevcntr3_el0, x12 // encoding: [0x6c,0xe8,0x1b,0xd5]
-// CHECK: msr pmevcntr4_el0, x12 // encoding: [0x8c,0xe8,0x1b,0xd5]
-// CHECK: msr pmevcntr5_el0, x12 // encoding: [0xac,0xe8,0x1b,0xd5]
-// CHECK: msr pmevcntr6_el0, x12 // encoding: [0xcc,0xe8,0x1b,0xd5]
-// CHECK: msr pmevcntr7_el0, x12 // encoding: [0xec,0xe8,0x1b,0xd5]
-// CHECK: msr pmevcntr8_el0, x12 // encoding: [0x0c,0xe9,0x1b,0xd5]
-// CHECK: msr pmevcntr9_el0, x12 // encoding: [0x2c,0xe9,0x1b,0xd5]
-// CHECK: msr pmevcntr10_el0, x12 // encoding: [0x4c,0xe9,0x1b,0xd5]
-// CHECK: msr pmevcntr11_el0, x12 // encoding: [0x6c,0xe9,0x1b,0xd5]
-// CHECK: msr pmevcntr12_el0, x12 // encoding: [0x8c,0xe9,0x1b,0xd5]
-// CHECK: msr pmevcntr13_el0, x12 // encoding: [0xac,0xe9,0x1b,0xd5]
-// CHECK: msr pmevcntr14_el0, x12 // encoding: [0xcc,0xe9,0x1b,0xd5]
-// CHECK: msr pmevcntr15_el0, x12 // encoding: [0xec,0xe9,0x1b,0xd5]
-// CHECK: msr pmevcntr16_el0, x12 // encoding: [0x0c,0xea,0x1b,0xd5]
-// CHECK: msr pmevcntr17_el0, x12 // encoding: [0x2c,0xea,0x1b,0xd5]
-// CHECK: msr pmevcntr18_el0, x12 // encoding: [0x4c,0xea,0x1b,0xd5]
-// CHECK: msr pmevcntr19_el0, x12 // encoding: [0x6c,0xea,0x1b,0xd5]
-// CHECK: msr pmevcntr20_el0, x12 // encoding: [0x8c,0xea,0x1b,0xd5]
-// CHECK: msr pmevcntr21_el0, x12 // encoding: [0xac,0xea,0x1b,0xd5]
-// CHECK: msr pmevcntr22_el0, x12 // encoding: [0xcc,0xea,0x1b,0xd5]
-// CHECK: msr pmevcntr23_el0, x12 // encoding: [0xec,0xea,0x1b,0xd5]
-// CHECK: msr pmevcntr24_el0, x12 // encoding: [0x0c,0xeb,0x1b,0xd5]
-// CHECK: msr pmevcntr25_el0, x12 // encoding: [0x2c,0xeb,0x1b,0xd5]
-// CHECK: msr pmevcntr26_el0, x12 // encoding: [0x4c,0xeb,0x1b,0xd5]
-// CHECK: msr pmevcntr27_el0, x12 // encoding: [0x6c,0xeb,0x1b,0xd5]
-// CHECK: msr pmevcntr28_el0, x12 // encoding: [0x8c,0xeb,0x1b,0xd5]
-// CHECK: msr pmevcntr29_el0, x12 // encoding: [0xac,0xeb,0x1b,0xd5]
-// CHECK: msr pmevcntr30_el0, x12 // encoding: [0xcc,0xeb,0x1b,0xd5]
-// CHECK: msr pmccfiltr_el0, x12 // encoding: [0xec,0xef,0x1b,0xd5]
-// CHECK: msr pmevtyper0_el0, x12 // encoding: [0x0c,0xec,0x1b,0xd5]
-// CHECK: msr pmevtyper1_el0, x12 // encoding: [0x2c,0xec,0x1b,0xd5]
-// CHECK: msr pmevtyper2_el0, x12 // encoding: [0x4c,0xec,0x1b,0xd5]
-// CHECK: msr pmevtyper3_el0, x12 // encoding: [0x6c,0xec,0x1b,0xd5]
-// CHECK: msr pmevtyper4_el0, x12 // encoding: [0x8c,0xec,0x1b,0xd5]
-// CHECK: msr pmevtyper5_el0, x12 // encoding: [0xac,0xec,0x1b,0xd5]
-// CHECK: msr pmevtyper6_el0, x12 // encoding: [0xcc,0xec,0x1b,0xd5]
-// CHECK: msr pmevtyper7_el0, x12 // encoding: [0xec,0xec,0x1b,0xd5]
-// CHECK: msr pmevtyper8_el0, x12 // encoding: [0x0c,0xed,0x1b,0xd5]
-// CHECK: msr pmevtyper9_el0, x12 // encoding: [0x2c,0xed,0x1b,0xd5]
-// CHECK: msr pmevtyper10_el0, x12 // encoding: [0x4c,0xed,0x1b,0xd5]
-// CHECK: msr pmevtyper11_el0, x12 // encoding: [0x6c,0xed,0x1b,0xd5]
-// CHECK: msr pmevtyper12_el0, x12 // encoding: [0x8c,0xed,0x1b,0xd5]
-// CHECK: msr pmevtyper13_el0, x12 // encoding: [0xac,0xed,0x1b,0xd5]
-// CHECK: msr pmevtyper14_el0, x12 // encoding: [0xcc,0xed,0x1b,0xd5]
-// CHECK: msr pmevtyper15_el0, x12 // encoding: [0xec,0xed,0x1b,0xd5]
-// CHECK: msr pmevtyper16_el0, x12 // encoding: [0x0c,0xee,0x1b,0xd5]
-// CHECK: msr pmevtyper17_el0, x12 // encoding: [0x2c,0xee,0x1b,0xd5]
-// CHECK: msr pmevtyper18_el0, x12 // encoding: [0x4c,0xee,0x1b,0xd5]
-// CHECK: msr pmevtyper19_el0, x12 // encoding: [0x6c,0xee,0x1b,0xd5]
-// CHECK: msr pmevtyper20_el0, x12 // encoding: [0x8c,0xee,0x1b,0xd5]
-// CHECK: msr pmevtyper21_el0, x12 // encoding: [0xac,0xee,0x1b,0xd5]
-// CHECK: msr pmevtyper22_el0, x12 // encoding: [0xcc,0xee,0x1b,0xd5]
-// CHECK: msr pmevtyper23_el0, x12 // encoding: [0xec,0xee,0x1b,0xd5]
-// CHECK: msr pmevtyper24_el0, x12 // encoding: [0x0c,0xef,0x1b,0xd5]
-// CHECK: msr pmevtyper25_el0, x12 // encoding: [0x2c,0xef,0x1b,0xd5]
-// CHECK: msr pmevtyper26_el0, x12 // encoding: [0x4c,0xef,0x1b,0xd5]
-// CHECK: msr pmevtyper27_el0, x12 // encoding: [0x6c,0xef,0x1b,0xd5]
-// CHECK: msr pmevtyper28_el0, x12 // encoding: [0x8c,0xef,0x1b,0xd5]
-// CHECK: msr pmevtyper29_el0, x12 // encoding: [0xac,0xef,0x1b,0xd5]
-// CHECK: msr pmevtyper30_el0, x12 // encoding: [0xcc,0xef,0x1b,0xd5]
+// CHECK: msr {{teecr32_el1|TEECR32_EL1}}, x12 // encoding: [0x0c,0x00,0x12,0xd5]
+// CHECK: msr {{osdtrrx_el1|OSDTRRX_EL1}}, x12 // encoding: [0x4c,0x00,0x10,0xd5]
+// CHECK: msr {{mdccint_el1|MDCCINT_EL1}}, x12 // encoding: [0x0c,0x02,0x10,0xd5]
+// CHECK: msr {{mdscr_el1|MDSCR_EL1}}, x12 // encoding: [0x4c,0x02,0x10,0xd5]
+// CHECK: msr {{osdtrtx_el1|OSDTRTX_EL1}}, x12 // encoding: [0x4c,0x03,0x10,0xd5]
+// CHECK: msr {{dbgdtr_el0|DBGDTR_EL0}}, x12 // encoding: [0x0c,0x04,0x13,0xd5]
+// CHECK: msr {{dbgdtrtx_el0|DBGDTRTX_EL0}}, x12 // encoding: [0x0c,0x05,0x13,0xd5]
+// CHECK: msr {{oseccr_el1|OSECCR_EL1}}, x12 // encoding: [0x4c,0x06,0x10,0xd5]
+// CHECK: msr {{dbgvcr32_el2|DBGVCR32_EL2}}, x12 // encoding: [0x0c,0x07,0x14,0xd5]
+// CHECK: msr {{dbgbvr0_el1|DBGBVR0_EL1}}, x12 // encoding: [0x8c,0x00,0x10,0xd5]
+// CHECK: msr {{dbgbvr1_el1|DBGBVR1_EL1}}, x12 // encoding: [0x8c,0x01,0x10,0xd5]
+// CHECK: msr {{dbgbvr2_el1|DBGBVR2_EL1}}, x12 // encoding: [0x8c,0x02,0x10,0xd5]
+// CHECK: msr {{dbgbvr3_el1|DBGBVR3_EL1}}, x12 // encoding: [0x8c,0x03,0x10,0xd5]
+// CHECK: msr {{dbgbvr4_el1|DBGBVR4_EL1}}, x12 // encoding: [0x8c,0x04,0x10,0xd5]
+// CHECK: msr {{dbgbvr5_el1|DBGBVR5_EL1}}, x12 // encoding: [0x8c,0x05,0x10,0xd5]
+// CHECK: msr {{dbgbvr6_el1|DBGBVR6_EL1}}, x12 // encoding: [0x8c,0x06,0x10,0xd5]
+// CHECK: msr {{dbgbvr7_el1|DBGBVR7_EL1}}, x12 // encoding: [0x8c,0x07,0x10,0xd5]
+// CHECK: msr {{dbgbvr8_el1|DBGBVR8_EL1}}, x12 // encoding: [0x8c,0x08,0x10,0xd5]
+// CHECK: msr {{dbgbvr9_el1|DBGBVR9_EL1}}, x12 // encoding: [0x8c,0x09,0x10,0xd5]
+// CHECK: msr {{dbgbvr10_el1|DBGBVR10_EL1}}, x12 // encoding: [0x8c,0x0a,0x10,0xd5]
+// CHECK: msr {{dbgbvr11_el1|DBGBVR11_EL1}}, x12 // encoding: [0x8c,0x0b,0x10,0xd5]
+// CHECK: msr {{dbgbvr12_el1|DBGBVR12_EL1}}, x12 // encoding: [0x8c,0x0c,0x10,0xd5]
+// CHECK: msr {{dbgbvr13_el1|DBGBVR13_EL1}}, x12 // encoding: [0x8c,0x0d,0x10,0xd5]
+// CHECK: msr {{dbgbvr14_el1|DBGBVR14_EL1}}, x12 // encoding: [0x8c,0x0e,0x10,0xd5]
+// CHECK: msr {{dbgbvr15_el1|DBGBVR15_EL1}}, x12 // encoding: [0x8c,0x0f,0x10,0xd5]
+// CHECK: msr {{dbgbcr0_el1|DBGBCR0_EL1}}, x12 // encoding: [0xac,0x00,0x10,0xd5]
+// CHECK: msr {{dbgbcr1_el1|DBGBCR1_EL1}}, x12 // encoding: [0xac,0x01,0x10,0xd5]
+// CHECK: msr {{dbgbcr2_el1|DBGBCR2_EL1}}, x12 // encoding: [0xac,0x02,0x10,0xd5]
+// CHECK: msr {{dbgbcr3_el1|DBGBCR3_EL1}}, x12 // encoding: [0xac,0x03,0x10,0xd5]
+// CHECK: msr {{dbgbcr4_el1|DBGBCR4_EL1}}, x12 // encoding: [0xac,0x04,0x10,0xd5]
+// CHECK: msr {{dbgbcr5_el1|DBGBCR5_EL1}}, x12 // encoding: [0xac,0x05,0x10,0xd5]
+// CHECK: msr {{dbgbcr6_el1|DBGBCR6_EL1}}, x12 // encoding: [0xac,0x06,0x10,0xd5]
+// CHECK: msr {{dbgbcr7_el1|DBGBCR7_EL1}}, x12 // encoding: [0xac,0x07,0x10,0xd5]
+// CHECK: msr {{dbgbcr8_el1|DBGBCR8_EL1}}, x12 // encoding: [0xac,0x08,0x10,0xd5]
+// CHECK: msr {{dbgbcr9_el1|DBGBCR9_EL1}}, x12 // encoding: [0xac,0x09,0x10,0xd5]
+// CHECK: msr {{dbgbcr10_el1|DBGBCR10_EL1}}, x12 // encoding: [0xac,0x0a,0x10,0xd5]
+// CHECK: msr {{dbgbcr11_el1|DBGBCR11_EL1}}, x12 // encoding: [0xac,0x0b,0x10,0xd5]
+// CHECK: msr {{dbgbcr12_el1|DBGBCR12_EL1}}, x12 // encoding: [0xac,0x0c,0x10,0xd5]
+// CHECK: msr {{dbgbcr13_el1|DBGBCR13_EL1}}, x12 // encoding: [0xac,0x0d,0x10,0xd5]
+// CHECK: msr {{dbgbcr14_el1|DBGBCR14_EL1}}, x12 // encoding: [0xac,0x0e,0x10,0xd5]
+// CHECK: msr {{dbgbcr15_el1|DBGBCR15_EL1}}, x12 // encoding: [0xac,0x0f,0x10,0xd5]
+// CHECK: msr {{dbgwvr0_el1|DBGWVR0_EL1}}, x12 // encoding: [0xcc,0x00,0x10,0xd5]
+// CHECK: msr {{dbgwvr1_el1|DBGWVR1_EL1}}, x12 // encoding: [0xcc,0x01,0x10,0xd5]
+// CHECK: msr {{dbgwvr2_el1|DBGWVR2_EL1}}, x12 // encoding: [0xcc,0x02,0x10,0xd5]
+// CHECK: msr {{dbgwvr3_el1|DBGWVR3_EL1}}, x12 // encoding: [0xcc,0x03,0x10,0xd5]
+// CHECK: msr {{dbgwvr4_el1|DBGWVR4_EL1}}, x12 // encoding: [0xcc,0x04,0x10,0xd5]
+// CHECK: msr {{dbgwvr5_el1|DBGWVR5_EL1}}, x12 // encoding: [0xcc,0x05,0x10,0xd5]
+// CHECK: msr {{dbgwvr6_el1|DBGWVR6_EL1}}, x12 // encoding: [0xcc,0x06,0x10,0xd5]
+// CHECK: msr {{dbgwvr7_el1|DBGWVR7_EL1}}, x12 // encoding: [0xcc,0x07,0x10,0xd5]
+// CHECK: msr {{dbgwvr8_el1|DBGWVR8_EL1}}, x12 // encoding: [0xcc,0x08,0x10,0xd5]
+// CHECK: msr {{dbgwvr9_el1|DBGWVR9_EL1}}, x12 // encoding: [0xcc,0x09,0x10,0xd5]
+// CHECK: msr {{dbgwvr10_el1|DBGWVR10_EL1}}, x12 // encoding: [0xcc,0x0a,0x10,0xd5]
+// CHECK: msr {{dbgwvr11_el1|DBGWVR11_EL1}}, x12 // encoding: [0xcc,0x0b,0x10,0xd5]
+// CHECK: msr {{dbgwvr12_el1|DBGWVR12_EL1}}, x12 // encoding: [0xcc,0x0c,0x10,0xd5]
+// CHECK: msr {{dbgwvr13_el1|DBGWVR13_EL1}}, x12 // encoding: [0xcc,0x0d,0x10,0xd5]
+// CHECK: msr {{dbgwvr14_el1|DBGWVR14_EL1}}, x12 // encoding: [0xcc,0x0e,0x10,0xd5]
+// CHECK: msr {{dbgwvr15_el1|DBGWVR15_EL1}}, x12 // encoding: [0xcc,0x0f,0x10,0xd5]
+// CHECK: msr {{dbgwcr0_el1|DBGWCR0_EL1}}, x12 // encoding: [0xec,0x00,0x10,0xd5]
+// CHECK: msr {{dbgwcr1_el1|DBGWCR1_EL1}}, x12 // encoding: [0xec,0x01,0x10,0xd5]
+// CHECK: msr {{dbgwcr2_el1|DBGWCR2_EL1}}, x12 // encoding: [0xec,0x02,0x10,0xd5]
+// CHECK: msr {{dbgwcr3_el1|DBGWCR3_EL1}}, x12 // encoding: [0xec,0x03,0x10,0xd5]
+// CHECK: msr {{dbgwcr4_el1|DBGWCR4_EL1}}, x12 // encoding: [0xec,0x04,0x10,0xd5]
+// CHECK: msr {{dbgwcr5_el1|DBGWCR5_EL1}}, x12 // encoding: [0xec,0x05,0x10,0xd5]
+// CHECK: msr {{dbgwcr6_el1|DBGWCR6_EL1}}, x12 // encoding: [0xec,0x06,0x10,0xd5]
+// CHECK: msr {{dbgwcr7_el1|DBGWCR7_EL1}}, x12 // encoding: [0xec,0x07,0x10,0xd5]
+// CHECK: msr {{dbgwcr8_el1|DBGWCR8_EL1}}, x12 // encoding: [0xec,0x08,0x10,0xd5]
+// CHECK: msr {{dbgwcr9_el1|DBGWCR9_EL1}}, x12 // encoding: [0xec,0x09,0x10,0xd5]
+// CHECK: msr {{dbgwcr10_el1|DBGWCR10_EL1}}, x12 // encoding: [0xec,0x0a,0x10,0xd5]
+// CHECK: msr {{dbgwcr11_el1|DBGWCR11_EL1}}, x12 // encoding: [0xec,0x0b,0x10,0xd5]
+// CHECK: msr {{dbgwcr12_el1|DBGWCR12_EL1}}, x12 // encoding: [0xec,0x0c,0x10,0xd5]
+// CHECK: msr {{dbgwcr13_el1|DBGWCR13_EL1}}, x12 // encoding: [0xec,0x0d,0x10,0xd5]
+// CHECK: msr {{dbgwcr14_el1|DBGWCR14_EL1}}, x12 // encoding: [0xec,0x0e,0x10,0xd5]
+// CHECK: msr {{dbgwcr15_el1|DBGWCR15_EL1}}, x12 // encoding: [0xec,0x0f,0x10,0xd5]
+// CHECK: msr {{teehbr32_el1|TEEHBR32_EL1}}, x12 // encoding: [0x0c,0x10,0x12,0xd5]
+// CHECK: msr {{oslar_el1|OSLAR_EL1}}, x12 // encoding: [0x8c,0x10,0x10,0xd5]
+// CHECK: msr {{osdlr_el1|OSDLR_EL1}}, x12 // encoding: [0x8c,0x13,0x10,0xd5]
+// CHECK: msr {{dbgprcr_el1|DBGPRCR_EL1}}, x12 // encoding: [0x8c,0x14,0x10,0xd5]
+// CHECK: msr {{dbgclaimset_el1|DBGCLAIMSET_EL1}}, x12 // encoding: [0xcc,0x78,0x10,0xd5]
+// CHECK: msr {{dbgclaimclr_el1|DBGCLAIMCLR_EL1}}, x12 // encoding: [0xcc,0x79,0x10,0xd5]
+// CHECK: msr {{csselr_el1|CSSELR_EL1}}, x12 // encoding: [0x0c,0x00,0x1a,0xd5]
+// CHECK: msr {{vpidr_el2|VPIDR_EL2}}, x12 // encoding: [0x0c,0x00,0x1c,0xd5]
+// CHECK: msr {{vmpidr_el2|VMPIDR_EL2}}, x12 // encoding: [0xac,0x00,0x1c,0xd5]
+// CHECK: msr {{sctlr_el1|SCTLR_EL1}}, x12 // encoding: [0x0c,0x10,0x18,0xd5]
+// CHECK: msr {{sctlr_el2|SCTLR_EL2}}, x12 // encoding: [0x0c,0x10,0x1c,0xd5]
+// CHECK: msr {{sctlr_el3|SCTLR_EL3}}, x12 // encoding: [0x0c,0x10,0x1e,0xd5]
+// CHECK: msr {{actlr_el1|ACTLR_EL1}}, x12 // encoding: [0x2c,0x10,0x18,0xd5]
+// CHECK: msr {{actlr_el2|ACTLR_EL2}}, x12 // encoding: [0x2c,0x10,0x1c,0xd5]
+// CHECK: msr {{actlr_el3|ACTLR_EL3}}, x12 // encoding: [0x2c,0x10,0x1e,0xd5]
+// CHECK: msr {{cpacr_el1|CPACR_EL1}}, x12 // encoding: [0x4c,0x10,0x18,0xd5]
+// CHECK: msr {{hcr_el2|HCR_EL2}}, x12 // encoding: [0x0c,0x11,0x1c,0xd5]
+// CHECK: msr {{scr_el3|SCR_EL3}}, x12 // encoding: [0x0c,0x11,0x1e,0xd5]
+// CHECK: msr {{mdcr_el2|MDCR_EL2}}, x12 // encoding: [0x2c,0x11,0x1c,0xd5]
+// CHECK: msr {{sder32_el3|SDER32_EL3}}, x12 // encoding: [0x2c,0x11,0x1e,0xd5]
+// CHECK: msr {{cptr_el2|CPTR_EL2}}, x12 // encoding: [0x4c,0x11,0x1c,0xd5]
+// CHECK: msr {{cptr_el3|CPTR_EL3}}, x12 // encoding: [0x4c,0x11,0x1e,0xd5]
+// CHECK: msr {{hstr_el2|HSTR_EL2}}, x12 // encoding: [0x6c,0x11,0x1c,0xd5]
+// CHECK: msr {{hacr_el2|HACR_EL2}}, x12 // encoding: [0xec,0x11,0x1c,0xd5]
+// CHECK: msr {{mdcr_el3|MDCR_EL3}}, x12 // encoding: [0x2c,0x13,0x1e,0xd5]
+// CHECK: msr {{ttbr0_el1|TTBR0_EL1}}, x12 // encoding: [0x0c,0x20,0x18,0xd5]
+// CHECK: msr {{ttbr0_el2|TTBR0_EL2}}, x12 // encoding: [0x0c,0x20,0x1c,0xd5]
+// CHECK: msr {{ttbr0_el3|TTBR0_EL3}}, x12 // encoding: [0x0c,0x20,0x1e,0xd5]
+// CHECK: msr {{ttbr1_el1|TTBR1_EL1}}, x12 // encoding: [0x2c,0x20,0x18,0xd5]
+// CHECK: msr {{tcr_el1|TCR_EL1}}, x12 // encoding: [0x4c,0x20,0x18,0xd5]
+// CHECK: msr {{tcr_el2|TCR_EL2}}, x12 // encoding: [0x4c,0x20,0x1c,0xd5]
+// CHECK: msr {{tcr_el3|TCR_EL3}}, x12 // encoding: [0x4c,0x20,0x1e,0xd5]
+// CHECK: msr {{vttbr_el2|VTTBR_EL2}}, x12 // encoding: [0x0c,0x21,0x1c,0xd5]
+// CHECK: msr {{vtcr_el2|VTCR_EL2}}, x12 // encoding: [0x4c,0x21,0x1c,0xd5]
+// CHECK: msr {{dacr32_el2|DACR32_EL2}}, x12 // encoding: [0x0c,0x30,0x1c,0xd5]
+// CHECK: msr {{spsr_el1|SPSR_EL1}}, x12 // encoding: [0x0c,0x40,0x18,0xd5]
+// CHECK: msr {{spsr_el2|SPSR_EL2}}, x12 // encoding: [0x0c,0x40,0x1c,0xd5]
+// CHECK: msr {{spsr_el3|SPSR_EL3}}, x12 // encoding: [0x0c,0x40,0x1e,0xd5]
+// CHECK: msr {{elr_el1|ELR_EL1}}, x12 // encoding: [0x2c,0x40,0x18,0xd5]
+// CHECK: msr {{elr_el2|ELR_EL2}}, x12 // encoding: [0x2c,0x40,0x1c,0xd5]
+// CHECK: msr {{elr_el3|ELR_EL3}}, x12 // encoding: [0x2c,0x40,0x1e,0xd5]
+// CHECK: msr {{sp_el0|SP_EL0}}, x12 // encoding: [0x0c,0x41,0x18,0xd5]
+// CHECK: msr {{sp_el1|SP_EL1}}, x12 // encoding: [0x0c,0x41,0x1c,0xd5]
+// CHECK: msr {{sp_el2|SP_EL2}}, x12 // encoding: [0x0c,0x41,0x1e,0xd5]
+// CHECK: msr {{spsel|SPSEL}}, x12 // encoding: [0x0c,0x42,0x18,0xd5]
+// CHECK: msr {{nzcv|NZCV}}, x12 // encoding: [0x0c,0x42,0x1b,0xd5]
+// CHECK: msr {{daif|DAIF}}, x12 // encoding: [0x2c,0x42,0x1b,0xd5]
+// CHECK: msr {{currentel|CURRENTEL}}, x12 // encoding: [0x4c,0x42,0x18,0xd5]
+// CHECK: msr {{spsr_irq|SPSR_IRQ}}, x12 // encoding: [0x0c,0x43,0x1c,0xd5]
+// CHECK: msr {{spsr_abt|SPSR_ABT}}, x12 // encoding: [0x2c,0x43,0x1c,0xd5]
+// CHECK: msr {{spsr_und|SPSR_UND}}, x12 // encoding: [0x4c,0x43,0x1c,0xd5]
+// CHECK: msr {{spsr_fiq|SPSR_FIQ}}, x12 // encoding: [0x6c,0x43,0x1c,0xd5]
+// CHECK: msr {{fpcr|FPCR}}, x12 // encoding: [0x0c,0x44,0x1b,0xd5]
+// CHECK: msr {{fpsr|FPSR}}, x12 // encoding: [0x2c,0x44,0x1b,0xd5]
+// CHECK: msr {{dspsr_el0|DSPSR_EL0}}, x12 // encoding: [0x0c,0x45,0x1b,0xd5]
+// CHECK: msr {{dlr_el0|DLR_EL0}}, x12 // encoding: [0x2c,0x45,0x1b,0xd5]
+// CHECK: msr {{ifsr32_el2|IFSR32_EL2}}, x12 // encoding: [0x2c,0x50,0x1c,0xd5]
+// CHECK: msr {{afsr0_el1|AFSR0_EL1}}, x12 // encoding: [0x0c,0x51,0x18,0xd5]
+// CHECK: msr {{afsr0_el2|AFSR0_EL2}}, x12 // encoding: [0x0c,0x51,0x1c,0xd5]
+// CHECK: msr {{afsr0_el3|AFSR0_EL3}}, x12 // encoding: [0x0c,0x51,0x1e,0xd5]
+// CHECK: msr {{afsr1_el1|AFSR1_EL1}}, x12 // encoding: [0x2c,0x51,0x18,0xd5]
+// CHECK: msr {{afsr1_el2|AFSR1_EL2}}, x12 // encoding: [0x2c,0x51,0x1c,0xd5]
+// CHECK: msr {{afsr1_el3|AFSR1_EL3}}, x12 // encoding: [0x2c,0x51,0x1e,0xd5]
+// CHECK: msr {{esr_el1|ESR_EL1}}, x12 // encoding: [0x0c,0x52,0x18,0xd5]
+// CHECK: msr {{esr_el2|ESR_EL2}}, x12 // encoding: [0x0c,0x52,0x1c,0xd5]
+// CHECK: msr {{esr_el3|ESR_EL3}}, x12 // encoding: [0x0c,0x52,0x1e,0xd5]
+// CHECK: msr {{fpexc32_el2|FPEXC32_EL2}}, x12 // encoding: [0x0c,0x53,0x1c,0xd5]
+// CHECK: msr {{far_el1|FAR_EL1}}, x12 // encoding: [0x0c,0x60,0x18,0xd5]
+// CHECK: msr {{far_el2|FAR_EL2}}, x12 // encoding: [0x0c,0x60,0x1c,0xd5]
+// CHECK: msr {{far_el3|FAR_EL3}}, x12 // encoding: [0x0c,0x60,0x1e,0xd5]
+// CHECK: msr {{hpfar_el2|HPFAR_EL2}}, x12 // encoding: [0x8c,0x60,0x1c,0xd5]
+// CHECK: msr {{par_el1|PAR_EL1}}, x12 // encoding: [0x0c,0x74,0x18,0xd5]
+// CHECK: msr {{pmcr_el0|PMCR_EL0}}, x12 // encoding: [0x0c,0x9c,0x1b,0xd5]
+// CHECK: msr {{pmcntenset_el0|PMCNTENSET_EL0}}, x12 // encoding: [0x2c,0x9c,0x1b,0xd5]
+// CHECK: msr {{pmcntenclr_el0|PMCNTENCLR_EL0}}, x12 // encoding: [0x4c,0x9c,0x1b,0xd5]
+// CHECK: msr {{pmovsclr_el0|PMOVSCLR_EL0}}, x12 // encoding: [0x6c,0x9c,0x1b,0xd5]
+// CHECK: msr {{pmselr_el0|PMSELR_EL0}}, x12 // encoding: [0xac,0x9c,0x1b,0xd5]
+// CHECK: msr {{pmccntr_el0|PMCCNTR_EL0}}, x12 // encoding: [0x0c,0x9d,0x1b,0xd5]
+// CHECK: msr {{pmxevtyper_el0|PMXEVTYPER_EL0}}, x12 // encoding: [0x2c,0x9d,0x1b,0xd5]
+// CHECK: msr {{pmxevcntr_el0|PMXEVCNTR_EL0}}, x12 // encoding: [0x4c,0x9d,0x1b,0xd5]
+// CHECK: msr {{pmuserenr_el0|PMUSERENR_EL0}}, x12 // encoding: [0x0c,0x9e,0x1b,0xd5]
+// CHECK: msr {{pmintenset_el1|PMINTENSET_EL1}}, x12 // encoding: [0x2c,0x9e,0x18,0xd5]
+// CHECK: msr {{pmintenclr_el1|PMINTENCLR_EL1}}, x12 // encoding: [0x4c,0x9e,0x18,0xd5]
+// CHECK: msr {{pmovsset_el0|PMOVSSET_EL0}}, x12 // encoding: [0x6c,0x9e,0x1b,0xd5]
+// CHECK: msr {{mair_el1|MAIR_EL1}}, x12 // encoding: [0x0c,0xa2,0x18,0xd5]
+// CHECK: msr {{mair_el2|MAIR_EL2}}, x12 // encoding: [0x0c,0xa2,0x1c,0xd5]
+// CHECK: msr {{mair_el3|MAIR_EL3}}, x12 // encoding: [0x0c,0xa2,0x1e,0xd5]
+// CHECK: msr {{amair_el1|AMAIR_EL1}}, x12 // encoding: [0x0c,0xa3,0x18,0xd5]
+// CHECK: msr {{amair_el2|AMAIR_EL2}}, x12 // encoding: [0x0c,0xa3,0x1c,0xd5]
+// CHECK: msr {{amair_el3|AMAIR_EL3}}, x12 // encoding: [0x0c,0xa3,0x1e,0xd5]
+// CHECK: msr {{vbar_el1|VBAR_EL1}}, x12 // encoding: [0x0c,0xc0,0x18,0xd5]
+// CHECK: msr {{vbar_el2|VBAR_EL2}}, x12 // encoding: [0x0c,0xc0,0x1c,0xd5]
+// CHECK: msr {{vbar_el3|VBAR_EL3}}, x12 // encoding: [0x0c,0xc0,0x1e,0xd5]
+// CHECK: msr {{rmr_el1|RMR_EL1}}, x12 // encoding: [0x4c,0xc0,0x18,0xd5]
+// CHECK: msr {{rmr_el2|RMR_EL2}}, x12 // encoding: [0x4c,0xc0,0x1c,0xd5]
+// CHECK: msr {{rmr_el3|RMR_EL3}}, x12 // encoding: [0x4c,0xc0,0x1e,0xd5]
+// CHECK: msr {{contextidr_el1|CONTEXTIDR_EL1}}, x12 // encoding: [0x2c,0xd0,0x18,0xd5]
+// CHECK: msr {{tpidr_el0|TPIDR_EL0}}, x12 // encoding: [0x4c,0xd0,0x1b,0xd5]
+// CHECK: msr {{tpidr_el2|TPIDR_EL2}}, x12 // encoding: [0x4c,0xd0,0x1c,0xd5]
+// CHECK: msr {{tpidr_el3|TPIDR_EL3}}, x12 // encoding: [0x4c,0xd0,0x1e,0xd5]
+// CHECK: msr {{tpidrro_el0|TPIDRRO_EL0}}, x12 // encoding: [0x6c,0xd0,0x1b,0xd5]
+// CHECK: msr {{tpidr_el1|TPIDR_EL1}}, x12 // encoding: [0x8c,0xd0,0x18,0xd5]
+// CHECK: msr {{cntfrq_el0|CNTFRQ_EL0}}, x12 // encoding: [0x0c,0xe0,0x1b,0xd5]
+// CHECK: msr {{cntvoff_el2|CNTVOFF_EL2}}, x12 // encoding: [0x6c,0xe0,0x1c,0xd5]
+// CHECK: msr {{cntkctl_el1|CNTKCTL_EL1}}, x12 // encoding: [0x0c,0xe1,0x18,0xd5]
+// CHECK: msr {{cnthctl_el2|CNTHCTL_EL2}}, x12 // encoding: [0x0c,0xe1,0x1c,0xd5]
+// CHECK: msr {{cntp_tval_el0|CNTP_TVAL_EL0}}, x12 // encoding: [0x0c,0xe2,0x1b,0xd5]
+// CHECK: msr {{cnthp_tval_el2|CNTHP_TVAL_EL2}}, x12 // encoding: [0x0c,0xe2,0x1c,0xd5]
+// CHECK: msr {{cntps_tval_el1|CNTPS_TVAL_EL1}}, x12 // encoding: [0x0c,0xe2,0x1f,0xd5]
+// CHECK: msr {{cntp_ctl_el0|CNTP_CTL_EL0}}, x12 // encoding: [0x2c,0xe2,0x1b,0xd5]
+// CHECK: msr {{cnthp_ctl_el2|CNTHP_CTL_EL2}}, x12 // encoding: [0x2c,0xe2,0x1c,0xd5]
+// CHECK: msr {{cntps_ctl_el1|CNTPS_CTL_EL1}}, x12 // encoding: [0x2c,0xe2,0x1f,0xd5]
+// CHECK: msr {{cntp_cval_el0|CNTP_CVAL_EL0}}, x12 // encoding: [0x4c,0xe2,0x1b,0xd5]
+// CHECK: msr {{cnthp_cval_el2|CNTHP_CVAL_EL2}}, x12 // encoding: [0x4c,0xe2,0x1c,0xd5]
+// CHECK: msr {{cntps_cval_el1|CNTPS_CVAL_EL1}}, x12 // encoding: [0x4c,0xe2,0x1f,0xd5]
+// CHECK: msr {{cntv_tval_el0|CNTV_TVAL_EL0}}, x12 // encoding: [0x0c,0xe3,0x1b,0xd5]
+// CHECK: msr {{cntv_ctl_el0|CNTV_CTL_EL0}}, x12 // encoding: [0x2c,0xe3,0x1b,0xd5]
+// CHECK: msr {{cntv_cval_el0|CNTV_CVAL_EL0}}, x12 // encoding: [0x4c,0xe3,0x1b,0xd5]
+// CHECK: msr {{pmevcntr0_el0|PMEVCNTR0_EL0}}, x12 // encoding: [0x0c,0xe8,0x1b,0xd5]
+// CHECK: msr {{pmevcntr1_el0|PMEVCNTR1_EL0}}, x12 // encoding: [0x2c,0xe8,0x1b,0xd5]
+// CHECK: msr {{pmevcntr2_el0|PMEVCNTR2_EL0}}, x12 // encoding: [0x4c,0xe8,0x1b,0xd5]
+// CHECK: msr {{pmevcntr3_el0|PMEVCNTR3_EL0}}, x12 // encoding: [0x6c,0xe8,0x1b,0xd5]
+// CHECK: msr {{pmevcntr4_el0|PMEVCNTR4_EL0}}, x12 // encoding: [0x8c,0xe8,0x1b,0xd5]
+// CHECK: msr {{pmevcntr5_el0|PMEVCNTR5_EL0}}, x12 // encoding: [0xac,0xe8,0x1b,0xd5]
+// CHECK: msr {{pmevcntr6_el0|PMEVCNTR6_EL0}}, x12 // encoding: [0xcc,0xe8,0x1b,0xd5]
+// CHECK: msr {{pmevcntr7_el0|PMEVCNTR7_EL0}}, x12 // encoding: [0xec,0xe8,0x1b,0xd5]
+// CHECK: msr {{pmevcntr8_el0|PMEVCNTR8_EL0}}, x12 // encoding: [0x0c,0xe9,0x1b,0xd5]
+// CHECK: msr {{pmevcntr9_el0|PMEVCNTR9_EL0}}, x12 // encoding: [0x2c,0xe9,0x1b,0xd5]
+// CHECK: msr {{pmevcntr10_el0|PMEVCNTR10_EL0}}, x12 // encoding: [0x4c,0xe9,0x1b,0xd5]
+// CHECK: msr {{pmevcntr11_el0|PMEVCNTR11_EL0}}, x12 // encoding: [0x6c,0xe9,0x1b,0xd5]
+// CHECK: msr {{pmevcntr12_el0|PMEVCNTR12_EL0}}, x12 // encoding: [0x8c,0xe9,0x1b,0xd5]
+// CHECK: msr {{pmevcntr13_el0|PMEVCNTR13_EL0}}, x12 // encoding: [0xac,0xe9,0x1b,0xd5]
+// CHECK: msr {{pmevcntr14_el0|PMEVCNTR14_EL0}}, x12 // encoding: [0xcc,0xe9,0x1b,0xd5]
+// CHECK: msr {{pmevcntr15_el0|PMEVCNTR15_EL0}}, x12 // encoding: [0xec,0xe9,0x1b,0xd5]
+// CHECK: msr {{pmevcntr16_el0|PMEVCNTR16_EL0}}, x12 // encoding: [0x0c,0xea,0x1b,0xd5]
+// CHECK: msr {{pmevcntr17_el0|PMEVCNTR17_EL0}}, x12 // encoding: [0x2c,0xea,0x1b,0xd5]
+// CHECK: msr {{pmevcntr18_el0|PMEVCNTR18_EL0}}, x12 // encoding: [0x4c,0xea,0x1b,0xd5]
+// CHECK: msr {{pmevcntr19_el0|PMEVCNTR19_EL0}}, x12 // encoding: [0x6c,0xea,0x1b,0xd5]
+// CHECK: msr {{pmevcntr20_el0|PMEVCNTR20_EL0}}, x12 // encoding: [0x8c,0xea,0x1b,0xd5]
+// CHECK: msr {{pmevcntr21_el0|PMEVCNTR21_EL0}}, x12 // encoding: [0xac,0xea,0x1b,0xd5]
+// CHECK: msr {{pmevcntr22_el0|PMEVCNTR22_EL0}}, x12 // encoding: [0xcc,0xea,0x1b,0xd5]
+// CHECK: msr {{pmevcntr23_el0|PMEVCNTR23_EL0}}, x12 // encoding: [0xec,0xea,0x1b,0xd5]
+// CHECK: msr {{pmevcntr24_el0|PMEVCNTR24_EL0}}, x12 // encoding: [0x0c,0xeb,0x1b,0xd5]
+// CHECK: msr {{pmevcntr25_el0|PMEVCNTR25_EL0}}, x12 // encoding: [0x2c,0xeb,0x1b,0xd5]
+// CHECK: msr {{pmevcntr26_el0|PMEVCNTR26_EL0}}, x12 // encoding: [0x4c,0xeb,0x1b,0xd5]
+// CHECK: msr {{pmevcntr27_el0|PMEVCNTR27_EL0}}, x12 // encoding: [0x6c,0xeb,0x1b,0xd5]
+// CHECK: msr {{pmevcntr28_el0|PMEVCNTR28_EL0}}, x12 // encoding: [0x8c,0xeb,0x1b,0xd5]
+// CHECK: msr {{pmevcntr29_el0|PMEVCNTR29_EL0}}, x12 // encoding: [0xac,0xeb,0x1b,0xd5]
+// CHECK: msr {{pmevcntr30_el0|PMEVCNTR30_EL0}}, x12 // encoding: [0xcc,0xeb,0x1b,0xd5]
+// CHECK: msr {{pmccfiltr_el0|PMCCFILTR_EL0}}, x12 // encoding: [0xec,0xef,0x1b,0xd5]
+// CHECK: msr {{pmevtyper0_el0|PMEVTYPER0_EL0}}, x12 // encoding: [0x0c,0xec,0x1b,0xd5]
+// CHECK: msr {{pmevtyper1_el0|PMEVTYPER1_EL0}}, x12 // encoding: [0x2c,0xec,0x1b,0xd5]
+// CHECK: msr {{pmevtyper2_el0|PMEVTYPER2_EL0}}, x12 // encoding: [0x4c,0xec,0x1b,0xd5]
+// CHECK: msr {{pmevtyper3_el0|PMEVTYPER3_EL0}}, x12 // encoding: [0x6c,0xec,0x1b,0xd5]
+// CHECK: msr {{pmevtyper4_el0|PMEVTYPER4_EL0}}, x12 // encoding: [0x8c,0xec,0x1b,0xd5]
+// CHECK: msr {{pmevtyper5_el0|PMEVTYPER5_EL0}}, x12 // encoding: [0xac,0xec,0x1b,0xd5]
+// CHECK: msr {{pmevtyper6_el0|PMEVTYPER6_EL0}}, x12 // encoding: [0xcc,0xec,0x1b,0xd5]
+// CHECK: msr {{pmevtyper7_el0|PMEVTYPER7_EL0}}, x12 // encoding: [0xec,0xec,0x1b,0xd5]
+// CHECK: msr {{pmevtyper8_el0|PMEVTYPER8_EL0}}, x12 // encoding: [0x0c,0xed,0x1b,0xd5]
+// CHECK: msr {{pmevtyper9_el0|PMEVTYPER9_EL0}}, x12 // encoding: [0x2c,0xed,0x1b,0xd5]
+// CHECK: msr {{pmevtyper10_el0|PMEVTYPER10_EL0}}, x12 // encoding: [0x4c,0xed,0x1b,0xd5]
+// CHECK: msr {{pmevtyper11_el0|PMEVTYPER11_EL0}}, x12 // encoding: [0x6c,0xed,0x1b,0xd5]
+// CHECK: msr {{pmevtyper12_el0|PMEVTYPER12_EL0}}, x12 // encoding: [0x8c,0xed,0x1b,0xd5]
+// CHECK: msr {{pmevtyper13_el0|PMEVTYPER13_EL0}}, x12 // encoding: [0xac,0xed,0x1b,0xd5]
+// CHECK: msr {{pmevtyper14_el0|PMEVTYPER14_EL0}}, x12 // encoding: [0xcc,0xed,0x1b,0xd5]
+// CHECK: msr {{pmevtyper15_el0|PMEVTYPER15_EL0}}, x12 // encoding: [0xec,0xed,0x1b,0xd5]
+// CHECK: msr {{pmevtyper16_el0|PMEVTYPER16_EL0}}, x12 // encoding: [0x0c,0xee,0x1b,0xd5]
+// CHECK: msr {{pmevtyper17_el0|PMEVTYPER17_EL0}}, x12 // encoding: [0x2c,0xee,0x1b,0xd5]
+// CHECK: msr {{pmevtyper18_el0|PMEVTYPER18_EL0}}, x12 // encoding: [0x4c,0xee,0x1b,0xd5]
+// CHECK: msr {{pmevtyper19_el0|PMEVTYPER19_EL0}}, x12 // encoding: [0x6c,0xee,0x1b,0xd5]
+// CHECK: msr {{pmevtyper20_el0|PMEVTYPER20_EL0}}, x12 // encoding: [0x8c,0xee,0x1b,0xd5]
+// CHECK: msr {{pmevtyper21_el0|PMEVTYPER21_EL0}}, x12 // encoding: [0xac,0xee,0x1b,0xd5]
+// CHECK: msr {{pmevtyper22_el0|PMEVTYPER22_EL0}}, x12 // encoding: [0xcc,0xee,0x1b,0xd5]
+// CHECK: msr {{pmevtyper23_el0|PMEVTYPER23_EL0}}, x12 // encoding: [0xec,0xee,0x1b,0xd5]
+// CHECK: msr {{pmevtyper24_el0|PMEVTYPER24_EL0}}, x12 // encoding: [0x0c,0xef,0x1b,0xd5]
+// CHECK: msr {{pmevtyper25_el0|PMEVTYPER25_EL0}}, x12 // encoding: [0x2c,0xef,0x1b,0xd5]
+// CHECK: msr {{pmevtyper26_el0|PMEVTYPER26_EL0}}, x12 // encoding: [0x4c,0xef,0x1b,0xd5]
+// CHECK: msr {{pmevtyper27_el0|PMEVTYPER27_EL0}}, x12 // encoding: [0x6c,0xef,0x1b,0xd5]
+// CHECK: msr {{pmevtyper28_el0|PMEVTYPER28_EL0}}, x12 // encoding: [0x8c,0xef,0x1b,0xd5]
+// CHECK: msr {{pmevtyper29_el0|PMEVTYPER29_EL0}}, x12 // encoding: [0xac,0xef,0x1b,0xd5]
+// CHECK: msr {{pmevtyper30_el0|PMEVTYPER30_EL0}}, x12 // encoding: [0xcc,0xef,0x1b,0xd5]
mrs x9, TEECR32_EL1
mrs x9, OSDTRRX_EL1
@@ -4445,315 +4483,315 @@ _func:
mrs x9, PMEVTYPER28_EL0
mrs x9, PMEVTYPER29_EL0
mrs x9, PMEVTYPER30_EL0
-// CHECK: mrs x9, teecr32_el1 // encoding: [0x09,0x00,0x32,0xd5]
-// CHECK: mrs x9, osdtrrx_el1 // encoding: [0x49,0x00,0x30,0xd5]
-// CHECK: mrs x9, mdccsr_el0 // encoding: [0x09,0x01,0x33,0xd5]
-// CHECK: mrs x9, mdccint_el1 // encoding: [0x09,0x02,0x30,0xd5]
-// CHECK: mrs x9, mdscr_el1 // encoding: [0x49,0x02,0x30,0xd5]
-// CHECK: mrs x9, osdtrtx_el1 // encoding: [0x49,0x03,0x30,0xd5]
-// CHECK: mrs x9, dbgdtr_el0 // encoding: [0x09,0x04,0x33,0xd5]
-// CHECK: mrs x9, dbgdtrrx_el0 // encoding: [0x09,0x05,0x33,0xd5]
-// CHECK: mrs x9, oseccr_el1 // encoding: [0x49,0x06,0x30,0xd5]
-// CHECK: mrs x9, dbgvcr32_el2 // encoding: [0x09,0x07,0x34,0xd5]
-// CHECK: mrs x9, dbgbvr0_el1 // encoding: [0x89,0x00,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr1_el1 // encoding: [0x89,0x01,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr2_el1 // encoding: [0x89,0x02,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr3_el1 // encoding: [0x89,0x03,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr4_el1 // encoding: [0x89,0x04,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr5_el1 // encoding: [0x89,0x05,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr6_el1 // encoding: [0x89,0x06,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr7_el1 // encoding: [0x89,0x07,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr8_el1 // encoding: [0x89,0x08,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr9_el1 // encoding: [0x89,0x09,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr10_el1 // encoding: [0x89,0x0a,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr11_el1 // encoding: [0x89,0x0b,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr12_el1 // encoding: [0x89,0x0c,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr13_el1 // encoding: [0x89,0x0d,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr14_el1 // encoding: [0x89,0x0e,0x30,0xd5]
-// CHECK: mrs x9, dbgbvr15_el1 // encoding: [0x89,0x0f,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr0_el1 // encoding: [0xa9,0x00,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr1_el1 // encoding: [0xa9,0x01,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr2_el1 // encoding: [0xa9,0x02,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr3_el1 // encoding: [0xa9,0x03,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr4_el1 // encoding: [0xa9,0x04,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr5_el1 // encoding: [0xa9,0x05,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr6_el1 // encoding: [0xa9,0x06,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr7_el1 // encoding: [0xa9,0x07,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr8_el1 // encoding: [0xa9,0x08,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr9_el1 // encoding: [0xa9,0x09,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr10_el1 // encoding: [0xa9,0x0a,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr11_el1 // encoding: [0xa9,0x0b,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr12_el1 // encoding: [0xa9,0x0c,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr13_el1 // encoding: [0xa9,0x0d,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr14_el1 // encoding: [0xa9,0x0e,0x30,0xd5]
-// CHECK: mrs x9, dbgbcr15_el1 // encoding: [0xa9,0x0f,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr0_el1 // encoding: [0xc9,0x00,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr1_el1 // encoding: [0xc9,0x01,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr2_el1 // encoding: [0xc9,0x02,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr3_el1 // encoding: [0xc9,0x03,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr4_el1 // encoding: [0xc9,0x04,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr5_el1 // encoding: [0xc9,0x05,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr6_el1 // encoding: [0xc9,0x06,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr7_el1 // encoding: [0xc9,0x07,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr8_el1 // encoding: [0xc9,0x08,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr9_el1 // encoding: [0xc9,0x09,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr10_el1 // encoding: [0xc9,0x0a,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr11_el1 // encoding: [0xc9,0x0b,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr12_el1 // encoding: [0xc9,0x0c,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr13_el1 // encoding: [0xc9,0x0d,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr14_el1 // encoding: [0xc9,0x0e,0x30,0xd5]
-// CHECK: mrs x9, dbgwvr15_el1 // encoding: [0xc9,0x0f,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr0_el1 // encoding: [0xe9,0x00,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr1_el1 // encoding: [0xe9,0x01,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr2_el1 // encoding: [0xe9,0x02,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr3_el1 // encoding: [0xe9,0x03,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr4_el1 // encoding: [0xe9,0x04,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr5_el1 // encoding: [0xe9,0x05,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr6_el1 // encoding: [0xe9,0x06,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr7_el1 // encoding: [0xe9,0x07,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr8_el1 // encoding: [0xe9,0x08,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr9_el1 // encoding: [0xe9,0x09,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr10_el1 // encoding: [0xe9,0x0a,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr11_el1 // encoding: [0xe9,0x0b,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr12_el1 // encoding: [0xe9,0x0c,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr13_el1 // encoding: [0xe9,0x0d,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr14_el1 // encoding: [0xe9,0x0e,0x30,0xd5]
-// CHECK: mrs x9, dbgwcr15_el1 // encoding: [0xe9,0x0f,0x30,0xd5]
-// CHECK: mrs x9, mdrar_el1 // encoding: [0x09,0x10,0x30,0xd5]
-// CHECK: mrs x9, teehbr32_el1 // encoding: [0x09,0x10,0x32,0xd5]
-// CHECK: mrs x9, oslsr_el1 // encoding: [0x89,0x11,0x30,0xd5]
-// CHECK: mrs x9, osdlr_el1 // encoding: [0x89,0x13,0x30,0xd5]
-// CHECK: mrs x9, dbgprcr_el1 // encoding: [0x89,0x14,0x30,0xd5]
-// CHECK: mrs x9, dbgclaimset_el1 // encoding: [0xc9,0x78,0x30,0xd5]
-// CHECK: mrs x9, dbgclaimclr_el1 // encoding: [0xc9,0x79,0x30,0xd5]
-// CHECK: mrs x9, dbgauthstatus_el1 // encoding: [0xc9,0x7e,0x30,0xd5]
-// CHECK: mrs x9, midr_el1 // encoding: [0x09,0x00,0x38,0xd5]
-// CHECK: mrs x9, ccsidr_el1 // encoding: [0x09,0x00,0x39,0xd5]
-// CHECK: mrs x9, csselr_el1 // encoding: [0x09,0x00,0x3a,0xd5]
-// CHECK: mrs x9, vpidr_el2 // encoding: [0x09,0x00,0x3c,0xd5]
-// CHECK: mrs x9, clidr_el1 // encoding: [0x29,0x00,0x39,0xd5]
-// CHECK: mrs x9, ctr_el0 // encoding: [0x29,0x00,0x3b,0xd5]
-// CHECK: mrs x9, mpidr_el1 // encoding: [0xa9,0x00,0x38,0xd5]
-// CHECK: mrs x9, vmpidr_el2 // encoding: [0xa9,0x00,0x3c,0xd5]
-// CHECK: mrs x9, revidr_el1 // encoding: [0xc9,0x00,0x38,0xd5]
-// CHECK: mrs x9, aidr_el1 // encoding: [0xe9,0x00,0x39,0xd5]
-// CHECK: mrs x9, dczid_el0 // encoding: [0xe9,0x00,0x3b,0xd5]
-// CHECK: mrs x9, id_pfr0_el1 // encoding: [0x09,0x01,0x38,0xd5]
-// CHECK: mrs x9, id_pfr1_el1 // encoding: [0x29,0x01,0x38,0xd5]
-// CHECK: mrs x9, id_dfr0_el1 // encoding: [0x49,0x01,0x38,0xd5]
-// CHECK: mrs x9, id_afr0_el1 // encoding: [0x69,0x01,0x38,0xd5]
-// CHECK: mrs x9, id_mmfr0_el1 // encoding: [0x89,0x01,0x38,0xd5]
-// CHECK: mrs x9, id_mmfr1_el1 // encoding: [0xa9,0x01,0x38,0xd5]
-// CHECK: mrs x9, id_mmfr2_el1 // encoding: [0xc9,0x01,0x38,0xd5]
-// CHECK: mrs x9, id_mmfr3_el1 // encoding: [0xe9,0x01,0x38,0xd5]
-// CHECK: mrs x9, id_isar0_el1 // encoding: [0x09,0x02,0x38,0xd5]
-// CHECK: mrs x9, id_isar1_el1 // encoding: [0x29,0x02,0x38,0xd5]
-// CHECK: mrs x9, id_isar2_el1 // encoding: [0x49,0x02,0x38,0xd5]
-// CHECK: mrs x9, id_isar3_el1 // encoding: [0x69,0x02,0x38,0xd5]
-// CHECK: mrs x9, id_isar4_el1 // encoding: [0x89,0x02,0x38,0xd5]
-// CHECK: mrs x9, id_isar5_el1 // encoding: [0xa9,0x02,0x38,0xd5]
-// CHECK: mrs x9, mvfr0_el1 // encoding: [0x09,0x03,0x38,0xd5]
-// CHECK: mrs x9, mvfr1_el1 // encoding: [0x29,0x03,0x38,0xd5]
-// CHECK: mrs x9, mvfr2_el1 // encoding: [0x49,0x03,0x38,0xd5]
-// CHECK: mrs x9, id_aa64pfr0_el1 // encoding: [0x09,0x04,0x38,0xd5]
-// CHECK: mrs x9, id_aa64pfr1_el1 // encoding: [0x29,0x04,0x38,0xd5]
-// CHECK: mrs x9, id_aa64dfr0_el1 // encoding: [0x09,0x05,0x38,0xd5]
-// CHECK: mrs x9, id_aa64dfr1_el1 // encoding: [0x29,0x05,0x38,0xd5]
-// CHECK: mrs x9, id_aa64afr0_el1 // encoding: [0x89,0x05,0x38,0xd5]
-// CHECK: mrs x9, id_aa64afr1_el1 // encoding: [0xa9,0x05,0x38,0xd5]
-// CHECK: mrs x9, id_aa64isar0_el1 // encoding: [0x09,0x06,0x38,0xd5]
-// CHECK: mrs x9, id_aa64isar1_el1 // encoding: [0x29,0x06,0x38,0xd5]
-// CHECK: mrs x9, id_aa64mmfr0_el1 // encoding: [0x09,0x07,0x38,0xd5]
-// CHECK: mrs x9, id_aa64mmfr1_el1 // encoding: [0x29,0x07,0x38,0xd5]
-// CHECK: mrs x9, sctlr_el1 // encoding: [0x09,0x10,0x38,0xd5]
-// CHECK: mrs x9, sctlr_el2 // encoding: [0x09,0x10,0x3c,0xd5]
-// CHECK: mrs x9, sctlr_el3 // encoding: [0x09,0x10,0x3e,0xd5]
-// CHECK: mrs x9, actlr_el1 // encoding: [0x29,0x10,0x38,0xd5]
-// CHECK: mrs x9, actlr_el2 // encoding: [0x29,0x10,0x3c,0xd5]
-// CHECK: mrs x9, actlr_el3 // encoding: [0x29,0x10,0x3e,0xd5]
-// CHECK: mrs x9, cpacr_el1 // encoding: [0x49,0x10,0x38,0xd5]
-// CHECK: mrs x9, hcr_el2 // encoding: [0x09,0x11,0x3c,0xd5]
-// CHECK: mrs x9, scr_el3 // encoding: [0x09,0x11,0x3e,0xd5]
-// CHECK: mrs x9, mdcr_el2 // encoding: [0x29,0x11,0x3c,0xd5]
-// CHECK: mrs x9, sder32_el3 // encoding: [0x29,0x11,0x3e,0xd5]
-// CHECK: mrs x9, cptr_el2 // encoding: [0x49,0x11,0x3c,0xd5]
-// CHECK: mrs x9, cptr_el3 // encoding: [0x49,0x11,0x3e,0xd5]
-// CHECK: mrs x9, hstr_el2 // encoding: [0x69,0x11,0x3c,0xd5]
-// CHECK: mrs x9, hacr_el2 // encoding: [0xe9,0x11,0x3c,0xd5]
-// CHECK: mrs x9, mdcr_el3 // encoding: [0x29,0x13,0x3e,0xd5]
-// CHECK: mrs x9, ttbr0_el1 // encoding: [0x09,0x20,0x38,0xd5]
-// CHECK: mrs x9, ttbr0_el2 // encoding: [0x09,0x20,0x3c,0xd5]
-// CHECK: mrs x9, ttbr0_el3 // encoding: [0x09,0x20,0x3e,0xd5]
-// CHECK: mrs x9, ttbr1_el1 // encoding: [0x29,0x20,0x38,0xd5]
-// CHECK: mrs x9, tcr_el1 // encoding: [0x49,0x20,0x38,0xd5]
-// CHECK: mrs x9, tcr_el2 // encoding: [0x49,0x20,0x3c,0xd5]
-// CHECK: mrs x9, tcr_el3 // encoding: [0x49,0x20,0x3e,0xd5]
-// CHECK: mrs x9, vttbr_el2 // encoding: [0x09,0x21,0x3c,0xd5]
-// CHECK: mrs x9, vtcr_el2 // encoding: [0x49,0x21,0x3c,0xd5]
-// CHECK: mrs x9, dacr32_el2 // encoding: [0x09,0x30,0x3c,0xd5]
-// CHECK: mrs x9, spsr_el1 // encoding: [0x09,0x40,0x38,0xd5]
-// CHECK: mrs x9, spsr_el2 // encoding: [0x09,0x40,0x3c,0xd5]
-// CHECK: mrs x9, spsr_el3 // encoding: [0x09,0x40,0x3e,0xd5]
-// CHECK: mrs x9, elr_el1 // encoding: [0x29,0x40,0x38,0xd5]
-// CHECK: mrs x9, elr_el2 // encoding: [0x29,0x40,0x3c,0xd5]
-// CHECK: mrs x9, elr_el3 // encoding: [0x29,0x40,0x3e,0xd5]
-// CHECK: mrs x9, sp_el0 // encoding: [0x09,0x41,0x38,0xd5]
-// CHECK: mrs x9, sp_el1 // encoding: [0x09,0x41,0x3c,0xd5]
-// CHECK: mrs x9, sp_el2 // encoding: [0x09,0x41,0x3e,0xd5]
-// CHECK: mrs x9, spsel // encoding: [0x09,0x42,0x38,0xd5]
-// CHECK: mrs x9, nzcv // encoding: [0x09,0x42,0x3b,0xd5]
-// CHECK: mrs x9, daif // encoding: [0x29,0x42,0x3b,0xd5]
-// CHECK: mrs x9, currentel // encoding: [0x49,0x42,0x38,0xd5]
-// CHECK: mrs x9, spsr_irq // encoding: [0x09,0x43,0x3c,0xd5]
-// CHECK: mrs x9, spsr_abt // encoding: [0x29,0x43,0x3c,0xd5]
-// CHECK: mrs x9, spsr_und // encoding: [0x49,0x43,0x3c,0xd5]
-// CHECK: mrs x9, spsr_fiq // encoding: [0x69,0x43,0x3c,0xd5]
-// CHECK: mrs x9, fpcr // encoding: [0x09,0x44,0x3b,0xd5]
-// CHECK: mrs x9, fpsr // encoding: [0x29,0x44,0x3b,0xd5]
-// CHECK: mrs x9, dspsr_el0 // encoding: [0x09,0x45,0x3b,0xd5]
-// CHECK: mrs x9, dlr_el0 // encoding: [0x29,0x45,0x3b,0xd5]
-// CHECK: mrs x9, ifsr32_el2 // encoding: [0x29,0x50,0x3c,0xd5]
-// CHECK: mrs x9, afsr0_el1 // encoding: [0x09,0x51,0x38,0xd5]
-// CHECK: mrs x9, afsr0_el2 // encoding: [0x09,0x51,0x3c,0xd5]
-// CHECK: mrs x9, afsr0_el3 // encoding: [0x09,0x51,0x3e,0xd5]
-// CHECK: mrs x9, afsr1_el1 // encoding: [0x29,0x51,0x38,0xd5]
-// CHECK: mrs x9, afsr1_el2 // encoding: [0x29,0x51,0x3c,0xd5]
-// CHECK: mrs x9, afsr1_el3 // encoding: [0x29,0x51,0x3e,0xd5]
-// CHECK: mrs x9, esr_el1 // encoding: [0x09,0x52,0x38,0xd5]
-// CHECK: mrs x9, esr_el2 // encoding: [0x09,0x52,0x3c,0xd5]
-// CHECK: mrs x9, esr_el3 // encoding: [0x09,0x52,0x3e,0xd5]
-// CHECK: mrs x9, fpexc32_el2 // encoding: [0x09,0x53,0x3c,0xd5]
-// CHECK: mrs x9, far_el1 // encoding: [0x09,0x60,0x38,0xd5]
-// CHECK: mrs x9, far_el2 // encoding: [0x09,0x60,0x3c,0xd5]
-// CHECK: mrs x9, far_el3 // encoding: [0x09,0x60,0x3e,0xd5]
-// CHECK: mrs x9, hpfar_el2 // encoding: [0x89,0x60,0x3c,0xd5]
-// CHECK: mrs x9, par_el1 // encoding: [0x09,0x74,0x38,0xd5]
-// CHECK: mrs x9, pmcr_el0 // encoding: [0x09,0x9c,0x3b,0xd5]
-// CHECK: mrs x9, pmcntenset_el0 // encoding: [0x29,0x9c,0x3b,0xd5]
-// CHECK: mrs x9, pmcntenclr_el0 // encoding: [0x49,0x9c,0x3b,0xd5]
-// CHECK: mrs x9, pmovsclr_el0 // encoding: [0x69,0x9c,0x3b,0xd5]
-// CHECK: mrs x9, pmselr_el0 // encoding: [0xa9,0x9c,0x3b,0xd5]
-// CHECK: mrs x9, pmceid0_el0 // encoding: [0xc9,0x9c,0x3b,0xd5]
-// CHECK: mrs x9, pmceid1_el0 // encoding: [0xe9,0x9c,0x3b,0xd5]
-// CHECK: mrs x9, pmccntr_el0 // encoding: [0x09,0x9d,0x3b,0xd5]
-// CHECK: mrs x9, pmxevtyper_el0 // encoding: [0x29,0x9d,0x3b,0xd5]
-// CHECK: mrs x9, pmxevcntr_el0 // encoding: [0x49,0x9d,0x3b,0xd5]
-// CHECK: mrs x9, pmuserenr_el0 // encoding: [0x09,0x9e,0x3b,0xd5]
-// CHECK: mrs x9, pmintenset_el1 // encoding: [0x29,0x9e,0x38,0xd5]
-// CHECK: mrs x9, pmintenclr_el1 // encoding: [0x49,0x9e,0x38,0xd5]
-// CHECK: mrs x9, pmovsset_el0 // encoding: [0x69,0x9e,0x3b,0xd5]
-// CHECK: mrs x9, mair_el1 // encoding: [0x09,0xa2,0x38,0xd5]
-// CHECK: mrs x9, mair_el2 // encoding: [0x09,0xa2,0x3c,0xd5]
-// CHECK: mrs x9, mair_el3 // encoding: [0x09,0xa2,0x3e,0xd5]
-// CHECK: mrs x9, amair_el1 // encoding: [0x09,0xa3,0x38,0xd5]
-// CHECK: mrs x9, amair_el2 // encoding: [0x09,0xa3,0x3c,0xd5]
-// CHECK: mrs x9, amair_el3 // encoding: [0x09,0xa3,0x3e,0xd5]
-// CHECK: mrs x9, vbar_el1 // encoding: [0x09,0xc0,0x38,0xd5]
-// CHECK: mrs x9, vbar_el2 // encoding: [0x09,0xc0,0x3c,0xd5]
-// CHECK: mrs x9, vbar_el3 // encoding: [0x09,0xc0,0x3e,0xd5]
-// CHECK: mrs x9, rvbar_el1 // encoding: [0x29,0xc0,0x38,0xd5]
-// CHECK: mrs x9, rvbar_el2 // encoding: [0x29,0xc0,0x3c,0xd5]
-// CHECK: mrs x9, rvbar_el3 // encoding: [0x29,0xc0,0x3e,0xd5]
-// CHECK: mrs x9, rmr_el1 // encoding: [0x49,0xc0,0x38,0xd5]
-// CHECK: mrs x9, rmr_el2 // encoding: [0x49,0xc0,0x3c,0xd5]
-// CHECK: mrs x9, rmr_el3 // encoding: [0x49,0xc0,0x3e,0xd5]
-// CHECK: mrs x9, isr_el1 // encoding: [0x09,0xc1,0x38,0xd5]
-// CHECK: mrs x9, contextidr_el1 // encoding: [0x29,0xd0,0x38,0xd5]
-// CHECK: mrs x9, tpidr_el0 // encoding: [0x49,0xd0,0x3b,0xd5]
-// CHECK: mrs x9, tpidr_el2 // encoding: [0x49,0xd0,0x3c,0xd5]
-// CHECK: mrs x9, tpidr_el3 // encoding: [0x49,0xd0,0x3e,0xd5]
-// CHECK: mrs x9, tpidrro_el0 // encoding: [0x69,0xd0,0x3b,0xd5]
-// CHECK: mrs x9, tpidr_el1 // encoding: [0x89,0xd0,0x38,0xd5]
-// CHECK: mrs x9, cntfrq_el0 // encoding: [0x09,0xe0,0x3b,0xd5]
-// CHECK: mrs x9, cntpct_el0 // encoding: [0x29,0xe0,0x3b,0xd5]
-// CHECK: mrs x9, cntvct_el0 // encoding: [0x49,0xe0,0x3b,0xd5]
-// CHECK: mrs x9, cntvoff_el2 // encoding: [0x69,0xe0,0x3c,0xd5]
-// CHECK: mrs x9, cntkctl_el1 // encoding: [0x09,0xe1,0x38,0xd5]
-// CHECK: mrs x9, cnthctl_el2 // encoding: [0x09,0xe1,0x3c,0xd5]
-// CHECK: mrs x9, cntp_tval_el0 // encoding: [0x09,0xe2,0x3b,0xd5]
-// CHECK: mrs x9, cnthp_tval_el2 // encoding: [0x09,0xe2,0x3c,0xd5]
-// CHECK: mrs x9, cntps_tval_el1 // encoding: [0x09,0xe2,0x3f,0xd5]
-// CHECK: mrs x9, cntp_ctl_el0 // encoding: [0x29,0xe2,0x3b,0xd5]
-// CHECK: mrs x9, cnthp_ctl_el2 // encoding: [0x29,0xe2,0x3c,0xd5]
-// CHECK: mrs x9, cntps_ctl_el1 // encoding: [0x29,0xe2,0x3f,0xd5]
-// CHECK: mrs x9, cntp_cval_el0 // encoding: [0x49,0xe2,0x3b,0xd5]
-// CHECK: mrs x9, cnthp_cval_el2 // encoding: [0x49,0xe2,0x3c,0xd5]
-// CHECK: mrs x9, cntps_cval_el1 // encoding: [0x49,0xe2,0x3f,0xd5]
-// CHECK: mrs x9, cntv_tval_el0 // encoding: [0x09,0xe3,0x3b,0xd5]
-// CHECK: mrs x9, cntv_ctl_el0 // encoding: [0x29,0xe3,0x3b,0xd5]
-// CHECK: mrs x9, cntv_cval_el0 // encoding: [0x49,0xe3,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr0_el0 // encoding: [0x09,0xe8,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr1_el0 // encoding: [0x29,0xe8,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr2_el0 // encoding: [0x49,0xe8,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr3_el0 // encoding: [0x69,0xe8,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr4_el0 // encoding: [0x89,0xe8,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr5_el0 // encoding: [0xa9,0xe8,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr6_el0 // encoding: [0xc9,0xe8,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr7_el0 // encoding: [0xe9,0xe8,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr8_el0 // encoding: [0x09,0xe9,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr9_el0 // encoding: [0x29,0xe9,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr10_el0 // encoding: [0x49,0xe9,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr11_el0 // encoding: [0x69,0xe9,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr12_el0 // encoding: [0x89,0xe9,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr13_el0 // encoding: [0xa9,0xe9,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr14_el0 // encoding: [0xc9,0xe9,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr15_el0 // encoding: [0xe9,0xe9,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr16_el0 // encoding: [0x09,0xea,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr17_el0 // encoding: [0x29,0xea,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr18_el0 // encoding: [0x49,0xea,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr19_el0 // encoding: [0x69,0xea,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr20_el0 // encoding: [0x89,0xea,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr21_el0 // encoding: [0xa9,0xea,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr22_el0 // encoding: [0xc9,0xea,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr23_el0 // encoding: [0xe9,0xea,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr24_el0 // encoding: [0x09,0xeb,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr25_el0 // encoding: [0x29,0xeb,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr26_el0 // encoding: [0x49,0xeb,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr27_el0 // encoding: [0x69,0xeb,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr28_el0 // encoding: [0x89,0xeb,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr29_el0 // encoding: [0xa9,0xeb,0x3b,0xd5]
-// CHECK: mrs x9, pmevcntr30_el0 // encoding: [0xc9,0xeb,0x3b,0xd5]
-// CHECK: mrs x9, pmccfiltr_el0 // encoding: [0xe9,0xef,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper0_el0 // encoding: [0x09,0xec,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper1_el0 // encoding: [0x29,0xec,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper2_el0 // encoding: [0x49,0xec,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper3_el0 // encoding: [0x69,0xec,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper4_el0 // encoding: [0x89,0xec,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper5_el0 // encoding: [0xa9,0xec,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper6_el0 // encoding: [0xc9,0xec,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper7_el0 // encoding: [0xe9,0xec,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper8_el0 // encoding: [0x09,0xed,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper9_el0 // encoding: [0x29,0xed,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper10_el0 // encoding: [0x49,0xed,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper11_el0 // encoding: [0x69,0xed,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper12_el0 // encoding: [0x89,0xed,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper13_el0 // encoding: [0xa9,0xed,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper14_el0 // encoding: [0xc9,0xed,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper15_el0 // encoding: [0xe9,0xed,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper16_el0 // encoding: [0x09,0xee,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper17_el0 // encoding: [0x29,0xee,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper18_el0 // encoding: [0x49,0xee,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper19_el0 // encoding: [0x69,0xee,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper20_el0 // encoding: [0x89,0xee,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper21_el0 // encoding: [0xa9,0xee,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper22_el0 // encoding: [0xc9,0xee,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper23_el0 // encoding: [0xe9,0xee,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper24_el0 // encoding: [0x09,0xef,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper25_el0 // encoding: [0x29,0xef,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper26_el0 // encoding: [0x49,0xef,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper27_el0 // encoding: [0x69,0xef,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper28_el0 // encoding: [0x89,0xef,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper29_el0 // encoding: [0xa9,0xef,0x3b,0xd5]
-// CHECK: mrs x9, pmevtyper30_el0 // encoding: [0xc9,0xef,0x3b,0xd5]
+// CHECK: mrs x9, {{teecr32_el1|TEECR32_EL1}} // encoding: [0x09,0x00,0x32,0xd5]
+// CHECK: mrs x9, {{osdtrrx_el1|OSDTRRX_EL1}} // encoding: [0x49,0x00,0x30,0xd5]
+// CHECK: mrs x9, {{mdccsr_el0|MDCCSR_EL0}} // encoding: [0x09,0x01,0x33,0xd5]
+// CHECK: mrs x9, {{mdccint_el1|MDCCINT_EL1}} // encoding: [0x09,0x02,0x30,0xd5]
+// CHECK: mrs x9, {{mdscr_el1|MDSCR_EL1}} // encoding: [0x49,0x02,0x30,0xd5]
+// CHECK: mrs x9, {{osdtrtx_el1|OSDTRTX_EL1}} // encoding: [0x49,0x03,0x30,0xd5]
+// CHECK: mrs x9, {{dbgdtr_el0|DBGDTR_EL0}} // encoding: [0x09,0x04,0x33,0xd5]
+// CHECK: mrs x9, {{dbgdtrrx_el0|DBGDTRRX_EL0}} // encoding: [0x09,0x05,0x33,0xd5]
+// CHECK: mrs x9, {{oseccr_el1|OSECCR_EL1}} // encoding: [0x49,0x06,0x30,0xd5]
+// CHECK: mrs x9, {{dbgvcr32_el2|DBGVCR32_EL2}} // encoding: [0x09,0x07,0x34,0xd5]
+// CHECK: mrs x9, {{dbgbvr0_el1|DBGBVR0_EL1}} // encoding: [0x89,0x00,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr1_el1|DBGBVR1_EL1}} // encoding: [0x89,0x01,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr2_el1|DBGBVR2_EL1}} // encoding: [0x89,0x02,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr3_el1|DBGBVR3_EL1}} // encoding: [0x89,0x03,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr4_el1|DBGBVR4_EL1}} // encoding: [0x89,0x04,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr5_el1|DBGBVR5_EL1}} // encoding: [0x89,0x05,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr6_el1|DBGBVR6_EL1}} // encoding: [0x89,0x06,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr7_el1|DBGBVR7_EL1}} // encoding: [0x89,0x07,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr8_el1|DBGBVR8_EL1}} // encoding: [0x89,0x08,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr9_el1|DBGBVR9_EL1}} // encoding: [0x89,0x09,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr10_el1|DBGBVR10_EL1}} // encoding: [0x89,0x0a,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr11_el1|DBGBVR11_EL1}} // encoding: [0x89,0x0b,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr12_el1|DBGBVR12_EL1}} // encoding: [0x89,0x0c,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr13_el1|DBGBVR13_EL1}} // encoding: [0x89,0x0d,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr14_el1|DBGBVR14_EL1}} // encoding: [0x89,0x0e,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbvr15_el1|DBGBVR15_EL1}} // encoding: [0x89,0x0f,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr0_el1|DBGBCR0_EL1}} // encoding: [0xa9,0x00,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr1_el1|DBGBCR1_EL1}} // encoding: [0xa9,0x01,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr2_el1|DBGBCR2_EL1}} // encoding: [0xa9,0x02,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr3_el1|DBGBCR3_EL1}} // encoding: [0xa9,0x03,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr4_el1|DBGBCR4_EL1}} // encoding: [0xa9,0x04,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr5_el1|DBGBCR5_EL1}} // encoding: [0xa9,0x05,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr6_el1|DBGBCR6_EL1}} // encoding: [0xa9,0x06,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr7_el1|DBGBCR7_EL1}} // encoding: [0xa9,0x07,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr8_el1|DBGBCR8_EL1}} // encoding: [0xa9,0x08,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr9_el1|DBGBCR9_EL1}} // encoding: [0xa9,0x09,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr10_el1|DBGBCR10_EL1}} // encoding: [0xa9,0x0a,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr11_el1|DBGBCR11_EL1}} // encoding: [0xa9,0x0b,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr12_el1|DBGBCR12_EL1}} // encoding: [0xa9,0x0c,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr13_el1|DBGBCR13_EL1}} // encoding: [0xa9,0x0d,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr14_el1|DBGBCR14_EL1}} // encoding: [0xa9,0x0e,0x30,0xd5]
+// CHECK: mrs x9, {{dbgbcr15_el1|DBGBCR15_EL1}} // encoding: [0xa9,0x0f,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr0_el1|DBGWVR0_EL1}} // encoding: [0xc9,0x00,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr1_el1|DBGWVR1_EL1}} // encoding: [0xc9,0x01,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr2_el1|DBGWVR2_EL1}} // encoding: [0xc9,0x02,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr3_el1|DBGWVR3_EL1}} // encoding: [0xc9,0x03,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr4_el1|DBGWVR4_EL1}} // encoding: [0xc9,0x04,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr5_el1|DBGWVR5_EL1}} // encoding: [0xc9,0x05,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr6_el1|DBGWVR6_EL1}} // encoding: [0xc9,0x06,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr7_el1|DBGWVR7_EL1}} // encoding: [0xc9,0x07,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr8_el1|DBGWVR8_EL1}} // encoding: [0xc9,0x08,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr9_el1|DBGWVR9_EL1}} // encoding: [0xc9,0x09,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr10_el1|DBGWVR10_EL1}} // encoding: [0xc9,0x0a,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr11_el1|DBGWVR11_EL1}} // encoding: [0xc9,0x0b,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr12_el1|DBGWVR12_EL1}} // encoding: [0xc9,0x0c,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr13_el1|DBGWVR13_EL1}} // encoding: [0xc9,0x0d,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr14_el1|DBGWVR14_EL1}} // encoding: [0xc9,0x0e,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwvr15_el1|DBGWVR15_EL1}} // encoding: [0xc9,0x0f,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr0_el1|DBGWCR0_EL1}} // encoding: [0xe9,0x00,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr1_el1|DBGWCR1_EL1}} // encoding: [0xe9,0x01,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr2_el1|DBGWCR2_EL1}} // encoding: [0xe9,0x02,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr3_el1|DBGWCR3_EL1}} // encoding: [0xe9,0x03,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr4_el1|DBGWCR4_EL1}} // encoding: [0xe9,0x04,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr5_el1|DBGWCR5_EL1}} // encoding: [0xe9,0x05,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr6_el1|DBGWCR6_EL1}} // encoding: [0xe9,0x06,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr7_el1|DBGWCR7_EL1}} // encoding: [0xe9,0x07,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr8_el1|DBGWCR8_EL1}} // encoding: [0xe9,0x08,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr9_el1|DBGWCR9_EL1}} // encoding: [0xe9,0x09,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr10_el1|DBGWCR10_EL1}} // encoding: [0xe9,0x0a,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr11_el1|DBGWCR11_EL1}} // encoding: [0xe9,0x0b,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr12_el1|DBGWCR12_EL1}} // encoding: [0xe9,0x0c,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr13_el1|DBGWCR13_EL1}} // encoding: [0xe9,0x0d,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr14_el1|DBGWCR14_EL1}} // encoding: [0xe9,0x0e,0x30,0xd5]
+// CHECK: mrs x9, {{dbgwcr15_el1|DBGWCR15_EL1}} // encoding: [0xe9,0x0f,0x30,0xd5]
+// CHECK: mrs x9, {{mdrar_el1|MDRAR_EL1}} // encoding: [0x09,0x10,0x30,0xd5]
+// CHECK: mrs x9, {{teehbr32_el1|TEEHBR32_EL1}} // encoding: [0x09,0x10,0x32,0xd5]
+// CHECK: mrs x9, {{oslsr_el1|OSLSR_EL1}} // encoding: [0x89,0x11,0x30,0xd5]
+// CHECK: mrs x9, {{osdlr_el1|OSDLR_EL1}} // encoding: [0x89,0x13,0x30,0xd5]
+// CHECK: mrs x9, {{dbgprcr_el1|DBGPRCR_EL1}} // encoding: [0x89,0x14,0x30,0xd5]
+// CHECK: mrs x9, {{dbgclaimset_el1|DBGCLAIMSET_EL1}} // encoding: [0xc9,0x78,0x30,0xd5]
+// CHECK: mrs x9, {{dbgclaimclr_el1|DBGCLAIMCLR_EL1}} // encoding: [0xc9,0x79,0x30,0xd5]
+// CHECK: mrs x9, {{dbgauthstatus_el1|DBGAUTHSTATUS_EL1}} // encoding: [0xc9,0x7e,0x30,0xd5]
+// CHECK: mrs x9, {{midr_el1|MIDR_EL1}} // encoding: [0x09,0x00,0x38,0xd5]
+// CHECK: mrs x9, {{ccsidr_el1|CCSIDR_EL1}} // encoding: [0x09,0x00,0x39,0xd5]
+// CHECK: mrs x9, {{csselr_el1|CSSELR_EL1}} // encoding: [0x09,0x00,0x3a,0xd5]
+// CHECK: mrs x9, {{vpidr_el2|VPIDR_EL2}} // encoding: [0x09,0x00,0x3c,0xd5]
+// CHECK: mrs x9, {{clidr_el1|CLIDR_EL1}} // encoding: [0x29,0x00,0x39,0xd5]
+// CHECK: mrs x9, {{ctr_el0|CTR_EL0}} // encoding: [0x29,0x00,0x3b,0xd5]
+// CHECK: mrs x9, {{mpidr_el1|MPIDR_EL1}} // encoding: [0xa9,0x00,0x38,0xd5]
+// CHECK: mrs x9, {{vmpidr_el2|VMPIDR_EL2}} // encoding: [0xa9,0x00,0x3c,0xd5]
+// CHECK: mrs x9, {{revidr_el1|REVIDR_EL1}} // encoding: [0xc9,0x00,0x38,0xd5]
+// CHECK: mrs x9, {{aidr_el1|AIDR_EL1}} // encoding: [0xe9,0x00,0x39,0xd5]
+// CHECK: mrs x9, {{dczid_el0|DCZID_EL0}} // encoding: [0xe9,0x00,0x3b,0xd5]
+// CHECK: mrs x9, {{id_pfr0_el1|ID_PFR0_EL1}} // encoding: [0x09,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_pfr1_el1|ID_PFR1_EL1}} // encoding: [0x29,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_dfr0_el1|ID_DFR0_EL1}} // encoding: [0x49,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_afr0_el1|ID_AFR0_EL1}} // encoding: [0x69,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_mmfr0_el1|ID_MMFR0_EL1}} // encoding: [0x89,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_mmfr1_el1|ID_MMFR1_EL1}} // encoding: [0xa9,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_mmfr2_el1|ID_MMFR2_EL1}} // encoding: [0xc9,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_mmfr3_el1|ID_MMFR3_EL1}} // encoding: [0xe9,0x01,0x38,0xd5]
+// CHECK: mrs x9, {{id_isar0_el1|ID_ISAR0_EL1}} // encoding: [0x09,0x02,0x38,0xd5]
+// CHECK: mrs x9, {{id_isar1_el1|ID_ISAR1_EL1}} // encoding: [0x29,0x02,0x38,0xd5]
+// CHECK: mrs x9, {{id_isar2_el1|ID_ISAR2_EL1}} // encoding: [0x49,0x02,0x38,0xd5]
+// CHECK: mrs x9, {{id_isar3_el1|ID_ISAR3_EL1}} // encoding: [0x69,0x02,0x38,0xd5]
+// CHECK: mrs x9, {{id_isar4_el1|ID_ISAR4_EL1}} // encoding: [0x89,0x02,0x38,0xd5]
+// CHECK: mrs x9, {{id_isar5_el1|ID_ISAR5_EL1}} // encoding: [0xa9,0x02,0x38,0xd5]
+// CHECK: mrs x9, {{mvfr0_el1|MVFR0_EL1}} // encoding: [0x09,0x03,0x38,0xd5]
+// CHECK: mrs x9, {{mvfr1_el1|MVFR1_EL1}} // encoding: [0x29,0x03,0x38,0xd5]
+// CHECK: mrs x9, {{mvfr2_el1|MVFR2_EL1}} // encoding: [0x49,0x03,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64pfr0_el1|ID_AA64PFR0_EL1}} // encoding: [0x09,0x04,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64pfr1_el1|ID_AA64PFR1_EL1}} // encoding: [0x29,0x04,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64dfr0_el1|ID_AA64DFR0_EL1}} // encoding: [0x09,0x05,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64dfr1_el1|ID_AA64DFR1_EL1}} // encoding: [0x29,0x05,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64afr0_el1|ID_AA64AFR0_EL1}} // encoding: [0x89,0x05,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64afr1_el1|ID_AA64AFR1_EL1}} // encoding: [0xa9,0x05,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64isar0_el1|ID_AA64ISAR0_EL1}} // encoding: [0x09,0x06,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64isar1_el1|ID_AA64ISAR1_EL1}} // encoding: [0x29,0x06,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64mmfr0_el1|ID_AA64MMFR0_EL1}} // encoding: [0x09,0x07,0x38,0xd5]
+// CHECK: mrs x9, {{id_aa64mmfr1_el1|ID_AA64MMFR1_EL1}} // encoding: [0x29,0x07,0x38,0xd5]
+// CHECK: mrs x9, {{sctlr_el1|SCTLR_EL1}} // encoding: [0x09,0x10,0x38,0xd5]
+// CHECK: mrs x9, {{sctlr_el2|SCTLR_EL2}} // encoding: [0x09,0x10,0x3c,0xd5]
+// CHECK: mrs x9, {{sctlr_el3|SCTLR_EL3}} // encoding: [0x09,0x10,0x3e,0xd5]
+// CHECK: mrs x9, {{actlr_el1|ACTLR_EL1}} // encoding: [0x29,0x10,0x38,0xd5]
+// CHECK: mrs x9, {{actlr_el2|ACTLR_EL2}} // encoding: [0x29,0x10,0x3c,0xd5]
+// CHECK: mrs x9, {{actlr_el3|ACTLR_EL3}} // encoding: [0x29,0x10,0x3e,0xd5]
+// CHECK: mrs x9, {{cpacr_el1|CPACR_EL1}} // encoding: [0x49,0x10,0x38,0xd5]
+// CHECK: mrs x9, {{hcr_el2|HCR_EL2}} // encoding: [0x09,0x11,0x3c,0xd5]
+// CHECK: mrs x9, {{scr_el3|SCR_EL3}} // encoding: [0x09,0x11,0x3e,0xd5]
+// CHECK: mrs x9, {{mdcr_el2|MDCR_EL2}} // encoding: [0x29,0x11,0x3c,0xd5]
+// CHECK: mrs x9, {{sder32_el3|SDER32_EL3}} // encoding: [0x29,0x11,0x3e,0xd5]
+// CHECK: mrs x9, {{cptr_el2|CPTR_EL2}} // encoding: [0x49,0x11,0x3c,0xd5]
+// CHECK: mrs x9, {{cptr_el3|CPTR_EL3}} // encoding: [0x49,0x11,0x3e,0xd5]
+// CHECK: mrs x9, {{hstr_el2|HSTR_EL2}} // encoding: [0x69,0x11,0x3c,0xd5]
+// CHECK: mrs x9, {{hacr_el2|HACR_EL2}} // encoding: [0xe9,0x11,0x3c,0xd5]
+// CHECK: mrs x9, {{mdcr_el3|MDCR_EL3}} // encoding: [0x29,0x13,0x3e,0xd5]
+// CHECK: mrs x9, {{ttbr0_el1|TTBR0_EL1}} // encoding: [0x09,0x20,0x38,0xd5]
+// CHECK: mrs x9, {{ttbr0_el2|TTBR0_EL2}} // encoding: [0x09,0x20,0x3c,0xd5]
+// CHECK: mrs x9, {{ttbr0_el3|TTBR0_EL3}} // encoding: [0x09,0x20,0x3e,0xd5]
+// CHECK: mrs x9, {{ttbr1_el1|TTBR1_EL1}} // encoding: [0x29,0x20,0x38,0xd5]
+// CHECK: mrs x9, {{tcr_el1|TCR_EL1}} // encoding: [0x49,0x20,0x38,0xd5]
+// CHECK: mrs x9, {{tcr_el2|TCR_EL2}} // encoding: [0x49,0x20,0x3c,0xd5]
+// CHECK: mrs x9, {{tcr_el3|TCR_EL3}} // encoding: [0x49,0x20,0x3e,0xd5]
+// CHECK: mrs x9, {{vttbr_el2|VTTBR_EL2}} // encoding: [0x09,0x21,0x3c,0xd5]
+// CHECK: mrs x9, {{vtcr_el2|VTCR_EL2}} // encoding: [0x49,0x21,0x3c,0xd5]
+// CHECK: mrs x9, {{dacr32_el2|DACR32_EL2}} // encoding: [0x09,0x30,0x3c,0xd5]
+// CHECK: mrs x9, {{spsr_el1|SPSR_EL1}} // encoding: [0x09,0x40,0x38,0xd5]
+// CHECK: mrs x9, {{spsr_el2|SPSR_EL2}} // encoding: [0x09,0x40,0x3c,0xd5]
+// CHECK: mrs x9, {{spsr_el3|SPSR_EL3}} // encoding: [0x09,0x40,0x3e,0xd5]
+// CHECK: mrs x9, {{elr_el1|ELR_EL1}} // encoding: [0x29,0x40,0x38,0xd5]
+// CHECK: mrs x9, {{elr_el2|ELR_EL2}} // encoding: [0x29,0x40,0x3c,0xd5]
+// CHECK: mrs x9, {{elr_el3|ELR_EL3}} // encoding: [0x29,0x40,0x3e,0xd5]
+// CHECK: mrs x9, {{sp_el0|SP_EL0}} // encoding: [0x09,0x41,0x38,0xd5]
+// CHECK: mrs x9, {{sp_el1|SP_EL1}} // encoding: [0x09,0x41,0x3c,0xd5]
+// CHECK: mrs x9, {{sp_el2|SP_EL2}} // encoding: [0x09,0x41,0x3e,0xd5]
+// CHECK: mrs x9, {{spsel|SPSEL}} // encoding: [0x09,0x42,0x38,0xd5]
+// CHECK: mrs x9, {{nzcv|NZCV}} // encoding: [0x09,0x42,0x3b,0xd5]
+// CHECK: mrs x9, {{daif|DAIF}} // encoding: [0x29,0x42,0x3b,0xd5]
+// CHECK: mrs x9, {{currentel|CURRENTEL}} // encoding: [0x49,0x42,0x38,0xd5]
+// CHECK: mrs x9, {{spsr_irq|SPSR_IRQ}} // encoding: [0x09,0x43,0x3c,0xd5]
+// CHECK: mrs x9, {{spsr_abt|SPSR_ABT}} // encoding: [0x29,0x43,0x3c,0xd5]
+// CHECK: mrs x9, {{spsr_und|SPSR_UND}} // encoding: [0x49,0x43,0x3c,0xd5]
+// CHECK: mrs x9, {{spsr_fiq|SPSR_FIQ}} // encoding: [0x69,0x43,0x3c,0xd5]
+// CHECK: mrs x9, {{fpcr|FPCR}} // encoding: [0x09,0x44,0x3b,0xd5]
+// CHECK: mrs x9, {{fpsr|FPSR}} // encoding: [0x29,0x44,0x3b,0xd5]
+// CHECK: mrs x9, {{dspsr_el0|DSPSR_EL0}} // encoding: [0x09,0x45,0x3b,0xd5]
+// CHECK: mrs x9, {{dlr_el0|DLR_EL0}} // encoding: [0x29,0x45,0x3b,0xd5]
+// CHECK: mrs x9, {{ifsr32_el2|IFSR32_EL2}} // encoding: [0x29,0x50,0x3c,0xd5]
+// CHECK: mrs x9, {{afsr0_el1|AFSR0_EL1}} // encoding: [0x09,0x51,0x38,0xd5]
+// CHECK: mrs x9, {{afsr0_el2|AFSR0_EL2}} // encoding: [0x09,0x51,0x3c,0xd5]
+// CHECK: mrs x9, {{afsr0_el3|AFSR0_EL3}} // encoding: [0x09,0x51,0x3e,0xd5]
+// CHECK: mrs x9, {{afsr1_el1|AFSR1_EL1}} // encoding: [0x29,0x51,0x38,0xd5]
+// CHECK: mrs x9, {{afsr1_el2|AFSR1_EL2}} // encoding: [0x29,0x51,0x3c,0xd5]
+// CHECK: mrs x9, {{afsr1_el3|AFSR1_EL3}} // encoding: [0x29,0x51,0x3e,0xd5]
+// CHECK: mrs x9, {{esr_el1|ESR_EL1}} // encoding: [0x09,0x52,0x38,0xd5]
+// CHECK: mrs x9, {{esr_el2|ESR_EL2}} // encoding: [0x09,0x52,0x3c,0xd5]
+// CHECK: mrs x9, {{esr_el3|ESR_EL3}} // encoding: [0x09,0x52,0x3e,0xd5]
+// CHECK: mrs x9, {{fpexc32_el2|FPEXC32_EL2}} // encoding: [0x09,0x53,0x3c,0xd5]
+// CHECK: mrs x9, {{far_el1|FAR_EL1}} // encoding: [0x09,0x60,0x38,0xd5]
+// CHECK: mrs x9, {{far_el2|FAR_EL2}} // encoding: [0x09,0x60,0x3c,0xd5]
+// CHECK: mrs x9, {{far_el3|FAR_EL3}} // encoding: [0x09,0x60,0x3e,0xd5]
+// CHECK: mrs x9, {{hpfar_el2|HPFAR_EL2}} // encoding: [0x89,0x60,0x3c,0xd5]
+// CHECK: mrs x9, {{par_el1|PAR_EL1}} // encoding: [0x09,0x74,0x38,0xd5]
+// CHECK: mrs x9, {{pmcr_el0|PMCR_EL0}} // encoding: [0x09,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, {{pmcntenset_el0|PMCNTENSET_EL0}} // encoding: [0x29,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, {{pmcntenclr_el0|PMCNTENCLR_EL0}} // encoding: [0x49,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, {{pmovsclr_el0|PMOVSCLR_EL0}} // encoding: [0x69,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, {{pmselr_el0|PMSELR_EL0}} // encoding: [0xa9,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, {{pmceid0_el0|PMCEID0_EL0}} // encoding: [0xc9,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, {{pmceid1_el0|PMCEID1_EL0}} // encoding: [0xe9,0x9c,0x3b,0xd5]
+// CHECK: mrs x9, {{pmccntr_el0|PMCCNTR_EL0}} // encoding: [0x09,0x9d,0x3b,0xd5]
+// CHECK: mrs x9, {{pmxevtyper_el0|PMXEVTYPER_EL0}} // encoding: [0x29,0x9d,0x3b,0xd5]
+// CHECK: mrs x9, {{pmxevcntr_el0|PMXEVCNTR_EL0}} // encoding: [0x49,0x9d,0x3b,0xd5]
+// CHECK: mrs x9, {{pmuserenr_el0|PMUSERENR_EL0}} // encoding: [0x09,0x9e,0x3b,0xd5]
+// CHECK: mrs x9, {{pmintenset_el1|PMINTENSET_EL1}} // encoding: [0x29,0x9e,0x38,0xd5]
+// CHECK: mrs x9, {{pmintenclr_el1|PMINTENCLR_EL1}} // encoding: [0x49,0x9e,0x38,0xd5]
+// CHECK: mrs x9, {{pmovsset_el0|PMOVSSET_EL0}} // encoding: [0x69,0x9e,0x3b,0xd5]
+// CHECK: mrs x9, {{mair_el1|MAIR_EL1}} // encoding: [0x09,0xa2,0x38,0xd5]
+// CHECK: mrs x9, {{mair_el2|MAIR_EL2}} // encoding: [0x09,0xa2,0x3c,0xd5]
+// CHECK: mrs x9, {{mair_el3|MAIR_EL3}} // encoding: [0x09,0xa2,0x3e,0xd5]
+// CHECK: mrs x9, {{amair_el1|AMAIR_EL1}} // encoding: [0x09,0xa3,0x38,0xd5]
+// CHECK: mrs x9, {{amair_el2|AMAIR_EL2}} // encoding: [0x09,0xa3,0x3c,0xd5]
+// CHECK: mrs x9, {{amair_el3|AMAIR_EL3}} // encoding: [0x09,0xa3,0x3e,0xd5]
+// CHECK: mrs x9, {{vbar_el1|VBAR_EL1}} // encoding: [0x09,0xc0,0x38,0xd5]
+// CHECK: mrs x9, {{vbar_el2|VBAR_EL2}} // encoding: [0x09,0xc0,0x3c,0xd5]
+// CHECK: mrs x9, {{vbar_el3|VBAR_EL3}} // encoding: [0x09,0xc0,0x3e,0xd5]
+// CHECK: mrs x9, {{rvbar_el1|RVBAR_EL1}} // encoding: [0x29,0xc0,0x38,0xd5]
+// CHECK: mrs x9, {{rvbar_el2|RVBAR_EL2}} // encoding: [0x29,0xc0,0x3c,0xd5]
+// CHECK: mrs x9, {{rvbar_el3|RVBAR_EL3}} // encoding: [0x29,0xc0,0x3e,0xd5]
+// CHECK: mrs x9, {{rmr_el1|RMR_EL1}} // encoding: [0x49,0xc0,0x38,0xd5]
+// CHECK: mrs x9, {{rmr_el2|RMR_EL2}} // encoding: [0x49,0xc0,0x3c,0xd5]
+// CHECK: mrs x9, {{rmr_el3|RMR_EL3}} // encoding: [0x49,0xc0,0x3e,0xd5]
+// CHECK: mrs x9, {{isr_el1|ISR_EL1}} // encoding: [0x09,0xc1,0x38,0xd5]
+// CHECK: mrs x9, {{contextidr_el1|CONTEXTIDR_EL1}} // encoding: [0x29,0xd0,0x38,0xd5]
+// CHECK: mrs x9, {{tpidr_el0|TPIDR_EL0}} // encoding: [0x49,0xd0,0x3b,0xd5]
+// CHECK: mrs x9, {{tpidr_el2|TPIDR_EL2}} // encoding: [0x49,0xd0,0x3c,0xd5]
+// CHECK: mrs x9, {{tpidr_el3|TPIDR_EL3}} // encoding: [0x49,0xd0,0x3e,0xd5]
+// CHECK: mrs x9, {{tpidrro_el0|TPIDRRO_EL0}} // encoding: [0x69,0xd0,0x3b,0xd5]
+// CHECK: mrs x9, {{tpidr_el1|TPIDR_EL1}} // encoding: [0x89,0xd0,0x38,0xd5]
+// CHECK: mrs x9, {{cntfrq_el0|CNTFRQ_EL0}} // encoding: [0x09,0xe0,0x3b,0xd5]
+// CHECK: mrs x9, {{cntpct_el0|CNTPCT_EL0}} // encoding: [0x29,0xe0,0x3b,0xd5]
+// CHECK: mrs x9, {{cntvct_el0|CNTVCT_EL0}} // encoding: [0x49,0xe0,0x3b,0xd5]
+// CHECK: mrs x9, {{cntvoff_el2|CNTVOFF_EL2}} // encoding: [0x69,0xe0,0x3c,0xd5]
+// CHECK: mrs x9, {{cntkctl_el1|CNTKCTL_EL1}} // encoding: [0x09,0xe1,0x38,0xd5]
+// CHECK: mrs x9, {{cnthctl_el2|CNTHCTL_EL2}} // encoding: [0x09,0xe1,0x3c,0xd5]
+// CHECK: mrs x9, {{cntp_tval_el0|CNTP_TVAL_EL0}} // encoding: [0x09,0xe2,0x3b,0xd5]
+// CHECK: mrs x9, {{cnthp_tval_el2|CNTHP_TVAL_EL2}} // encoding: [0x09,0xe2,0x3c,0xd5]
+// CHECK: mrs x9, {{cntps_tval_el1|CNTPS_TVAL_EL1}} // encoding: [0x09,0xe2,0x3f,0xd5]
+// CHECK: mrs x9, {{cntp_ctl_el0|CNTP_CTL_EL0}} // encoding: [0x29,0xe2,0x3b,0xd5]
+// CHECK: mrs x9, {{cnthp_ctl_el2|CNTHP_CTL_EL2}} // encoding: [0x29,0xe2,0x3c,0xd5]
+// CHECK: mrs x9, {{cntps_ctl_el1|CNTPS_CTL_EL1}} // encoding: [0x29,0xe2,0x3f,0xd5]
+// CHECK: mrs x9, {{cntp_cval_el0|CNTP_CVAL_EL0}} // encoding: [0x49,0xe2,0x3b,0xd5]
+// CHECK: mrs x9, {{cnthp_cval_el2|CNTHP_CVAL_EL2}} // encoding: [0x49,0xe2,0x3c,0xd5]
+// CHECK: mrs x9, {{cntps_cval_el1|CNTPS_CVAL_EL1}} // encoding: [0x49,0xe2,0x3f,0xd5]
+// CHECK: mrs x9, {{cntv_tval_el0|CNTV_TVAL_EL0}} // encoding: [0x09,0xe3,0x3b,0xd5]
+// CHECK: mrs x9, {{cntv_ctl_el0|CNTV_CTL_EL0}} // encoding: [0x29,0xe3,0x3b,0xd5]
+// CHECK: mrs x9, {{cntv_cval_el0|CNTV_CVAL_EL0}} // encoding: [0x49,0xe3,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr0_el0|PMEVCNTR0_EL0}} // encoding: [0x09,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr1_el0|PMEVCNTR1_EL0}} // encoding: [0x29,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr2_el0|PMEVCNTR2_EL0}} // encoding: [0x49,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr3_el0|PMEVCNTR3_EL0}} // encoding: [0x69,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr4_el0|PMEVCNTR4_EL0}} // encoding: [0x89,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr5_el0|PMEVCNTR5_EL0}} // encoding: [0xa9,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr6_el0|PMEVCNTR6_EL0}} // encoding: [0xc9,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr7_el0|PMEVCNTR7_EL0}} // encoding: [0xe9,0xe8,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr8_el0|PMEVCNTR8_EL0}} // encoding: [0x09,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr9_el0|PMEVCNTR9_EL0}} // encoding: [0x29,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr10_el0|PMEVCNTR10_EL0}} // encoding: [0x49,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr11_el0|PMEVCNTR11_EL0}} // encoding: [0x69,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr12_el0|PMEVCNTR12_EL0}} // encoding: [0x89,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr13_el0|PMEVCNTR13_EL0}} // encoding: [0xa9,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr14_el0|PMEVCNTR14_EL0}} // encoding: [0xc9,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr15_el0|PMEVCNTR15_EL0}} // encoding: [0xe9,0xe9,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr16_el0|PMEVCNTR16_EL0}} // encoding: [0x09,0xea,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr17_el0|PMEVCNTR17_EL0}} // encoding: [0x29,0xea,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr18_el0|PMEVCNTR18_EL0}} // encoding: [0x49,0xea,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr19_el0|PMEVCNTR19_EL0}} // encoding: [0x69,0xea,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr20_el0|PMEVCNTR20_EL0}} // encoding: [0x89,0xea,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr21_el0|PMEVCNTR21_EL0}} // encoding: [0xa9,0xea,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr22_el0|PMEVCNTR22_EL0}} // encoding: [0xc9,0xea,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr23_el0|PMEVCNTR23_EL0}} // encoding: [0xe9,0xea,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr24_el0|PMEVCNTR24_EL0}} // encoding: [0x09,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr25_el0|PMEVCNTR25_EL0}} // encoding: [0x29,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr26_el0|PMEVCNTR26_EL0}} // encoding: [0x49,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr27_el0|PMEVCNTR27_EL0}} // encoding: [0x69,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr28_el0|PMEVCNTR28_EL0}} // encoding: [0x89,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr29_el0|PMEVCNTR29_EL0}} // encoding: [0xa9,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevcntr30_el0|PMEVCNTR30_EL0}} // encoding: [0xc9,0xeb,0x3b,0xd5]
+// CHECK: mrs x9, {{pmccfiltr_el0|PMCCFILTR_EL0}} // encoding: [0xe9,0xef,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper0_el0|PMEVTYPER0_EL0}} // encoding: [0x09,0xec,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper1_el0|PMEVTYPER1_EL0}} // encoding: [0x29,0xec,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper2_el0|PMEVTYPER2_EL0}} // encoding: [0x49,0xec,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper3_el0|PMEVTYPER3_EL0}} // encoding: [0x69,0xec,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper4_el0|PMEVTYPER4_EL0}} // encoding: [0x89,0xec,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper5_el0|PMEVTYPER5_EL0}} // encoding: [0xa9,0xec,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper6_el0|PMEVTYPER6_EL0}} // encoding: [0xc9,0xec,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper7_el0|PMEVTYPER7_EL0}} // encoding: [0xe9,0xec,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper8_el0|PMEVTYPER8_EL0}} // encoding: [0x09,0xed,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper9_el0|PMEVTYPER9_EL0}} // encoding: [0x29,0xed,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper10_el0|PMEVTYPER10_EL0}} // encoding: [0x49,0xed,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper11_el0|PMEVTYPER11_EL0}} // encoding: [0x69,0xed,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper12_el0|PMEVTYPER12_EL0}} // encoding: [0x89,0xed,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper13_el0|PMEVTYPER13_EL0}} // encoding: [0xa9,0xed,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper14_el0|PMEVTYPER14_EL0}} // encoding: [0xc9,0xed,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper15_el0|PMEVTYPER15_EL0}} // encoding: [0xe9,0xed,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper16_el0|PMEVTYPER16_EL0}} // encoding: [0x09,0xee,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper17_el0|PMEVTYPER17_EL0}} // encoding: [0x29,0xee,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper18_el0|PMEVTYPER18_EL0}} // encoding: [0x49,0xee,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper19_el0|PMEVTYPER19_EL0}} // encoding: [0x69,0xee,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper20_el0|PMEVTYPER20_EL0}} // encoding: [0x89,0xee,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper21_el0|PMEVTYPER21_EL0}} // encoding: [0xa9,0xee,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper22_el0|PMEVTYPER22_EL0}} // encoding: [0xc9,0xee,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper23_el0|PMEVTYPER23_EL0}} // encoding: [0xe9,0xee,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper24_el0|PMEVTYPER24_EL0}} // encoding: [0x09,0xef,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper25_el0|PMEVTYPER25_EL0}} // encoding: [0x29,0xef,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper26_el0|PMEVTYPER26_EL0}} // encoding: [0x49,0xef,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper27_el0|PMEVTYPER27_EL0}} // encoding: [0x69,0xef,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper28_el0|PMEVTYPER28_EL0}} // encoding: [0x89,0xef,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper29_el0|PMEVTYPER29_EL0}} // encoding: [0xa9,0xef,0x3b,0xd5]
+// CHECK: mrs x9, {{pmevtyper30_el0|PMEVTYPER30_EL0}} // encoding: [0xc9,0xef,0x3b,0xd5]
mrs x12, s3_7_c15_c1_5
mrs x13, s3_2_c11_c15_7
msr s3_0_c15_c0_0, x12
msr s3_7_c11_c13_7, x5
-// CHECK: mrs x12, s3_7_c15_c1_5 // encoding: [0xac,0xf1,0x3f,0xd5]
-// CHECK: mrs x13, s3_2_c11_c15_7 // encoding: [0xed,0xbf,0x3a,0xd5]
-// CHECK: msr s3_0_c15_c0_0, x12 // encoding: [0x0c,0xf0,0x18,0xd5]
-// CHECK: msr s3_7_c11_c13_7, x5 // encoding: [0xe5,0xbd,0x1f,0xd5]
+// CHECK: mrs x12, {{s3_7_c15_c1_5|S3_7_C15_C1_5}} // encoding: [0xac,0xf1,0x3f,0xd5]
+// CHECK: mrs x13, {{s3_2_c11_c15_7|S3_2_C11_C15_7}} // encoding: [0xed,0xbf,0x3a,0xd5]
+// CHECK: msr {{s3_0_c15_c0_0|S3_0_C15_C0_0}}, x12 // encoding: [0x0c,0xf0,0x18,0xd5]
+// CHECK: msr {{s3_7_c11_c13_7|S3_7_C11_C13_7}}, x5 // encoding: [0xe5,0xbd,0x1f,0xd5]
//------------------------------------------------------------------------------
// Unconditional branch (immediate)
@@ -4762,22 +4800,25 @@ _func:
tbz x5, #0, somewhere
tbz xzr, #63, elsewhere
tbnz x5, #45, nowhere
-// CHECK: tbz x5, #0, somewhere // encoding: [0x05'A',A,A,0x36'A']
-// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_a64_tstbr
-// CHECK: tbz xzr, #63, elsewhere // encoding: [0x1f'A',A,0xf8'A',0xb6'A']
-// CHECK: // fixup A - offset: 0, value: elsewhere, kind: fixup_a64_tstbr
-// CHECK: tbnz x5, #45, nowhere // encoding: [0x05'A',A,0x68'A',0xb7'A']
-// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_a64_tstbr
+
+// CHECK: tbz w5, #0, somewhere // encoding: [0bAAA00101,A,0b00000AAA,0x36]
+// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_aarch64_pcrel_branch14
+// CHECK: tbz xzr, #63, elsewhere // encoding: [0bAAA11111,A,0b11111AAA,0xb6]
+// CHECK: // fixup A - offset: 0, value: elsewhere, kind: fixup_aarch64_pcrel_branch14
+// CHECK: tbnz x5, #45, nowhere // encoding: [0bAAA00101,A,0b01101AAA,0xb7]
+// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_aarch64_pcrel_branch14
+
tbnz w3, #2, there
tbnz wzr, #31, nowhere
tbz w5, #12, anywhere
-// CHECK: tbnz w3, #2, there // encoding: [0x03'A',A,0x10'A',0x37'A']
-// CHECK: // fixup A - offset: 0, value: there, kind: fixup_a64_tstbr
-// CHECK: tbnz wzr, #31, nowhere // encoding: [0x1f'A',A,0xf8'A',0x37'A']
-// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_a64_tstbr
-// CHECK: tbz w5, #12, anywhere // encoding: [0x05'A',A,0x60'A',0x36'A']
-// CHECK: // fixup A - offset: 0, value: anywhere, kind: fixup_a64_tstbr
+
+// CHECK: tbnz w3, #2, there // encoding: [0bAAA00011,A,0b00010AAA,0x37]
+// CHECK: // fixup A - offset: 0, value: there, kind: fixup_aarch64_pcrel_branch14
+// CHECK: tbnz wzr, #31, nowhere // encoding: [0bAAA11111,A,0b11111AAA,0x37]
+// CHECK: // fixup A - offset: 0, value: nowhere, kind: fixup_aarch64_pcrel_branch14
+// CHECK: tbz w5, #12, anywhere // encoding: [0bAAA00101,A,0b01100AAA,0x36]
+// CHECK: // fixup A - offset: 0, value: anywhere, kind: fixup_aarch64_pcrel_branch14
//------------------------------------------------------------------------------
// Unconditional branch (immediate)
@@ -4785,10 +4826,11 @@ _func:
b somewhere
bl elsewhere
-// CHECK: b somewhere // encoding: [A,A,A,0x14'A']
-// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_a64_uncondbr
-// CHECK: bl elsewhere // encoding: [A,A,A,0x94'A']
-// CHECK: // fixup A - offset: 0, value: elsewhere, kind: fixup_a64_call
+
+// CHECK: b somewhere // encoding: [A,A,A,0b000101AA]
+// CHECK: // fixup A - offset: 0, value: somewhere, kind: fixup_aarch64_pcrel_branch26
+// CHECK: bl elsewhere // encoding: [A,A,A,0b100101AA]
+// CHECK: // fixup A - offset: 0, value: elsewhere, kind: fixup_aarch64_pcrel_call26
b #4
bl #0
diff --git a/test/MC/AArch64/dot-req-case-insensitive.s b/test/MC/AArch64/dot-req-case-insensitive.s
new file mode 100644
index 000000000000..e68b1012f752
--- /dev/null
+++ b/test/MC/AArch64/dot-req-case-insensitive.s
@@ -0,0 +1,18 @@
+// RUN: llvm-mc -triple=arm64-eabi < %s | FileCheck %s
+_foo:
+ OBJECT .req x2
+ mov x4, OBJECT
+ mov x4, oBjEcT
+ .unreq oBJECT
+
+_foo2:
+ OBJECT .req w5
+ mov w4, OBJECT
+ .unreq OBJECT
+
+// CHECK-LABEL: _foo:
+// CHECK: mov x4, x2
+// CHECK: mov x4, x2
+
+// CHECK-LABEL: _foo2:
+// CHECK: mov w4, w5
diff --git a/test/MC/AArch64/dot-req-diagnostics.s b/test/MC/AArch64/dot-req-diagnostics.s
new file mode 100644
index 000000000000..44065f8d1946
--- /dev/null
+++ b/test/MC/AArch64/dot-req-diagnostics.s
@@ -0,0 +1,37 @@
+// RUN: not llvm-mc -triple aarch64-none-linux-gnu < %s 2>&1 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-ERROR %s
+
+bar:
+ fred .req x5
+ fred .req x6
+// CHECK-ERROR: warning: ignoring redefinition of register alias 'fred'
+// CHECK-ERROR: fred .req x6
+// CHECK-ERROR: ^
+
+ ada .req v2.8b
+// CHECK-ERROR: error: vector register without type specifier expected
+// CHECK-ERROR: ada .req v2.8b
+// CHECK-ERROR: ^
+
+ bob .req lisa
+// CHECK-ERROR: error: register name or alias expected
+// CHECK-ERROR: bob .req lisa
+// CHECK-ERROR: ^
+
+ lisa .req x1, 23
+// CHECK-ERROR: error: unexpected input in .req directive
+// CHECK-ERROR: lisa .req x1, 23
+// CHECK-ERROR: ^
+
+ mov bob, fred
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: mov bob, fred
+// CHECK-ERROR: ^
+
+ .unreq 1
+// CHECK-ERROR: error: unexpected input in .unreq directive.
+// CHECK-ERROR: .unreq 1
+// CHECK-ERROR: ^
+
+ mov x1, fred
+// CHECK: mov x1, x5
+// CHECK-NOT: mov x1, x6
diff --git a/test/MC/AArch64/dot-req.s b/test/MC/AArch64/dot-req.s
new file mode 100644
index 000000000000..947f945bded8
--- /dev/null
+++ b/test/MC/AArch64/dot-req.s
@@ -0,0 +1,37 @@
+// RUN: llvm-mc -triple=aarch64-none-linux-gnu -show-encoding < %s | FileCheck %s
+
+bar:
+ fred .req x5
+ mov fred, x11
+ .unreq fred
+ fred .req w6
+ mov w1, fred
+
+ bob .req fred
+ ada .req w1
+ mov ada, bob
+ .unreq bob
+ .unreq fred
+ .unreq ada
+// CHECK: mov x5, x11 // encoding: [0xe5,0x03,0x0b,0xaa]
+// CHECK: mov w1, w6 // encoding: [0xe1,0x03,0x06,0x2a]
+// CHECK: mov w1, w6 // encoding: [0xe1,0x03,0x06,0x2a]
+
+ bob .req b6
+ hanah .req h5
+ sam .req s4
+ dora .req d3
+ quentin .req q2
+ vesna .req v1
+ addv bob, v0.8b
+ mov hanah, v4.h[3]
+ fadd s0, sam, sam
+ fmov d2, dora
+ ldr quentin, [sp]
+ mov v0.8b, vesna.8b
+// CHECK: addv b6, v0.8b // encoding: [0x06,0xb8,0x31,0x0e]
+// CHECK: mov h5, v4.h[3] // encoding: [0x85,0x04,0x0e,0x5e]
+// CHECK: fadd s0, s4, s4 // encoding: [0x80,0x28,0x24,0x1e]
+// CHECK: fmov d2, d3 // encoding: [0x62,0x40,0x60,0x1e]
+// CHECK: ldr q2, [sp] // encoding: [0xe2,0x03,0xc0,0x3d]
+// CHECK: mov v0.8b, v1.8b // encoding: [0x20,0x1c,0xa1,0x0e]
diff --git a/test/MC/AArch64/elf-globaladdress.ll b/test/MC/AArch64/elf-globaladdress.ll
index bc43113fee03..7d031e6a3160 100644
--- a/test/MC/AArch64/elf-globaladdress.ll
+++ b/test/MC/AArch64/elf-globaladdress.ll
@@ -3,7 +3,7 @@
; Also take it on a round-trip through llvm-mc to stretch assembly-parsing's legs:
;; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | \
-;; RUN: llvm-mc -triple=aarch64-none-linux-gnu -filetype=obj -o - | \
+;; RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj -o - | \
;; RUN: llvm-readobj -h -r | FileCheck -check-prefix=OBJ %s
@var8 = global i8 0
diff --git a/test/MC/AArch64/elf-reloc-addend.s b/test/MC/AArch64/elf-reloc-addend.s
deleted file mode 100644
index 0e7e2cafb721..000000000000
--- a/test/MC/AArch64/elf-reloc-addend.s
+++ /dev/null
@@ -1,8 +0,0 @@
-// RUN: llvm-mc -triple=aarch64-linux-gnu -filetype=obj -o - %s | llvm-objdump -triple=aarch64-linux-gnu -r - | FileCheck %s
-
- add x0, x4, #:lo12:sym
-// CHECK: 0 R_AARCH64_ADD_ABS_LO12_NC sym
- add x3, x5, #:lo12:sym+1
-// CHECK: 4 R_AARCH64_ADD_ABS_LO12_NC sym+1
- add x3, x5, #:lo12:sym-1
-// CHECK: 8 R_AARCH64_ADD_ABS_LO12_NC sym-1
diff --git a/test/MC/AArch64/elf-reloc-condbr.s b/test/MC/AArch64/elf-reloc-condbr.s
deleted file mode 100644
index b70dfa70fb8e..000000000000
--- a/test/MC/AArch64/elf-reloc-condbr.s
+++ /dev/null
@@ -1,10 +0,0 @@
-// RUN: llvm-mc -triple=aarch64-none-linux-gnu -filetype=obj %s -o - | \
-// RUN: llvm-readobj -r | FileCheck -check-prefix=OBJ %s
-
- b.eq somewhere
-
-// OBJ: Relocations [
-// OBJ-NEXT: Section (2) .rela.text {
-// OBJ-NEXT: 0x0 R_AARCH64_CONDBR19 somewhere 0x0
-// OBJ-NEXT: }
-// OBJ-NEXT: ]
diff --git a/test/MC/AArch64/gicv3-regs.s b/test/MC/AArch64/gicv3-regs.s
index f7776514da09..0f5742ee5435 100644
--- a/test/MC/AArch64/gicv3-regs.s
+++ b/test/MC/AArch64/gicv3-regs.s
@@ -56,62 +56,62 @@
mrs x8, ich_lr13_el2
mrs x2, ich_lr14_el2
mrs x8, ich_lr15_el2
-// CHECK: mrs x8, icc_iar1_el1 // encoding: [0x08,0xcc,0x38,0xd5]
-// CHECK: mrs x26, icc_iar0_el1 // encoding: [0x1a,0xc8,0x38,0xd5]
-// CHECK: mrs x2, icc_hppir1_el1 // encoding: [0x42,0xcc,0x38,0xd5]
-// CHECK: mrs x17, icc_hppir0_el1 // encoding: [0x51,0xc8,0x38,0xd5]
-// CHECK: mrs x29, icc_rpr_el1 // encoding: [0x7d,0xcb,0x38,0xd5]
-// CHECK: mrs x4, ich_vtr_el2 // encoding: [0x24,0xcb,0x3c,0xd5]
-// CHECK: mrs x24, ich_eisr_el2 // encoding: [0x78,0xcb,0x3c,0xd5]
-// CHECK: mrs x9, ich_elsr_el2 // encoding: [0xa9,0xcb,0x3c,0xd5]
-// CHECK: mrs x24, icc_bpr1_el1 // encoding: [0x78,0xcc,0x38,0xd5]
-// CHECK: mrs x14, icc_bpr0_el1 // encoding: [0x6e,0xc8,0x38,0xd5]
-// CHECK: mrs x19, icc_pmr_el1 // encoding: [0x13,0x46,0x38,0xd5]
-// CHECK: mrs x23, icc_ctlr_el1 // encoding: [0x97,0xcc,0x38,0xd5]
-// CHECK: mrs x20, icc_ctlr_el3 // encoding: [0x94,0xcc,0x3e,0xd5]
-// CHECK: mrs x28, icc_sre_el1 // encoding: [0xbc,0xcc,0x38,0xd5]
-// CHECK: mrs x25, icc_sre_el2 // encoding: [0xb9,0xc9,0x3c,0xd5]
-// CHECK: mrs x8, icc_sre_el3 // encoding: [0xa8,0xcc,0x3e,0xd5]
-// CHECK: mrs x22, icc_igrpen0_el1 // encoding: [0xd6,0xcc,0x38,0xd5]
-// CHECK: mrs x5, icc_igrpen1_el1 // encoding: [0xe5,0xcc,0x38,0xd5]
-// CHECK: mrs x7, icc_igrpen1_el3 // encoding: [0xe7,0xcc,0x3e,0xd5]
-// CHECK: mrs x22, icc_seien_el1 // encoding: [0x16,0xcd,0x38,0xd5]
-// CHECK: mrs x4, icc_ap0r0_el1 // encoding: [0x84,0xc8,0x38,0xd5]
-// CHECK: mrs x11, icc_ap0r1_el1 // encoding: [0xab,0xc8,0x38,0xd5]
-// CHECK: mrs x27, icc_ap0r2_el1 // encoding: [0xdb,0xc8,0x38,0xd5]
-// CHECK: mrs x21, icc_ap0r3_el1 // encoding: [0xf5,0xc8,0x38,0xd5]
-// CHECK: mrs x2, icc_ap1r0_el1 // encoding: [0x02,0xc9,0x38,0xd5]
-// CHECK: mrs x21, icc_ap1r1_el1 // encoding: [0x35,0xc9,0x38,0xd5]
-// CHECK: mrs x10, icc_ap1r2_el1 // encoding: [0x4a,0xc9,0x38,0xd5]
-// CHECK: mrs x27, icc_ap1r3_el1 // encoding: [0x7b,0xc9,0x38,0xd5]
-// CHECK: mrs x20, ich_ap0r0_el2 // encoding: [0x14,0xc8,0x3c,0xd5]
-// CHECK: mrs x21, ich_ap0r1_el2 // encoding: [0x35,0xc8,0x3c,0xd5]
-// CHECK: mrs x5, ich_ap0r2_el2 // encoding: [0x45,0xc8,0x3c,0xd5]
-// CHECK: mrs x4, ich_ap0r3_el2 // encoding: [0x64,0xc8,0x3c,0xd5]
-// CHECK: mrs x15, ich_ap1r0_el2 // encoding: [0x0f,0xc9,0x3c,0xd5]
-// CHECK: mrs x12, ich_ap1r1_el2 // encoding: [0x2c,0xc9,0x3c,0xd5]
-// CHECK: mrs x27, ich_ap1r2_el2 // encoding: [0x5b,0xc9,0x3c,0xd5]
-// CHECK: mrs x20, ich_ap1r3_el2 // encoding: [0x74,0xc9,0x3c,0xd5]
-// CHECK: mrs x10, ich_hcr_el2 // encoding: [0x0a,0xcb,0x3c,0xd5]
-// CHECK: mrs x27, ich_misr_el2 // encoding: [0x5b,0xcb,0x3c,0xd5]
-// CHECK: mrs x6, ich_vmcr_el2 // encoding: [0xe6,0xcb,0x3c,0xd5]
-// CHECK: mrs x19, ich_vseir_el2 // encoding: [0x93,0xc9,0x3c,0xd5]
-// CHECK: mrs x3, ich_lr0_el2 // encoding: [0x03,0xcc,0x3c,0xd5]
-// CHECK: mrs x1, ich_lr1_el2 // encoding: [0x21,0xcc,0x3c,0xd5]
-// CHECK: mrs x22, ich_lr2_el2 // encoding: [0x56,0xcc,0x3c,0xd5]
-// CHECK: mrs x21, ich_lr3_el2 // encoding: [0x75,0xcc,0x3c,0xd5]
-// CHECK: mrs x6, ich_lr4_el2 // encoding: [0x86,0xcc,0x3c,0xd5]
-// CHECK: mrs x10, ich_lr5_el2 // encoding: [0xaa,0xcc,0x3c,0xd5]
-// CHECK: mrs x11, ich_lr6_el2 // encoding: [0xcb,0xcc,0x3c,0xd5]
-// CHECK: mrs x12, ich_lr7_el2 // encoding: [0xec,0xcc,0x3c,0xd5]
-// CHECK: mrs x0, ich_lr8_el2 // encoding: [0x00,0xcd,0x3c,0xd5]
-// CHECK: mrs x21, ich_lr9_el2 // encoding: [0x35,0xcd,0x3c,0xd5]
-// CHECK: mrs x13, ich_lr10_el2 // encoding: [0x4d,0xcd,0x3c,0xd5]
-// CHECK: mrs x26, ich_lr11_el2 // encoding: [0x7a,0xcd,0x3c,0xd5]
-// CHECK: mrs x1, ich_lr12_el2 // encoding: [0x81,0xcd,0x3c,0xd5]
-// CHECK: mrs x8, ich_lr13_el2 // encoding: [0xa8,0xcd,0x3c,0xd5]
-// CHECK: mrs x2, ich_lr14_el2 // encoding: [0xc2,0xcd,0x3c,0xd5]
-// CHECK: mrs x8, ich_lr15_el2 // encoding: [0xe8,0xcd,0x3c,0xd5]
+// CHECK: mrs x8, {{icc_iar1_el1|ICC_IAR1_EL1}} // encoding: [0x08,0xcc,0x38,0xd5]
+// CHECK: mrs x26, {{icc_iar0_el1|ICC_IAR0_EL1}} // encoding: [0x1a,0xc8,0x38,0xd5]
+// CHECK: mrs x2, {{icc_hppir1_el1|ICC_HPPIR1_EL1}} // encoding: [0x42,0xcc,0x38,0xd5]
+// CHECK: mrs x17, {{icc_hppir0_el1|ICC_HPPIR0_EL1}} // encoding: [0x51,0xc8,0x38,0xd5]
+// CHECK: mrs x29, {{icc_rpr_el1|ICC_RPR_EL1}} // encoding: [0x7d,0xcb,0x38,0xd5]
+// CHECK: mrs x4, {{ich_vtr_el2|ICH_VTR_EL2}} // encoding: [0x24,0xcb,0x3c,0xd5]
+// CHECK: mrs x24, {{ich_eisr_el2|ICH_EISR_EL2}} // encoding: [0x78,0xcb,0x3c,0xd5]
+// CHECK: mrs x9, {{ich_elsr_el2|ICH_ELSR_EL2}} // encoding: [0xa9,0xcb,0x3c,0xd5]
+// CHECK: mrs x24, {{icc_bpr1_el1|ICC_BPR1_EL1}} // encoding: [0x78,0xcc,0x38,0xd5]
+// CHECK: mrs x14, {{icc_bpr0_el1|ICC_BPR0_EL1}} // encoding: [0x6e,0xc8,0x38,0xd5]
+// CHECK: mrs x19, {{icc_pmr_el1|ICC_PMR_EL1}} // encoding: [0x13,0x46,0x38,0xd5]
+// CHECK: mrs x23, {{icc_ctlr_el1|ICC_CTLR_EL1}} // encoding: [0x97,0xcc,0x38,0xd5]
+// CHECK: mrs x20, {{icc_ctlr_el3|ICC_CTLR_EL3}} // encoding: [0x94,0xcc,0x3e,0xd5]
+// CHECK: mrs x28, {{icc_sre_el1|ICC_SRE_EL1}} // encoding: [0xbc,0xcc,0x38,0xd5]
+// CHECK: mrs x25, {{icc_sre_el2|ICC_SRE_EL2}} // encoding: [0xb9,0xc9,0x3c,0xd5]
+// CHECK: mrs x8, {{icc_sre_el3|ICC_SRE_EL3}} // encoding: [0xa8,0xcc,0x3e,0xd5]
+// CHECK: mrs x22, {{icc_igrpen0_el1|ICC_IGRPEN0_EL1}} // encoding: [0xd6,0xcc,0x38,0xd5]
+// CHECK: mrs x5, {{icc_igrpen1_el1|ICC_IGRPEN1_EL1}} // encoding: [0xe5,0xcc,0x38,0xd5]
+// CHECK: mrs x7, {{icc_igrpen1_el3|ICC_IGRPEN1_EL3}} // encoding: [0xe7,0xcc,0x3e,0xd5]
+// CHECK: mrs x22, {{icc_seien_el1|ICC_SEIEN_EL1}} // encoding: [0x16,0xcd,0x38,0xd5]
+// CHECK: mrs x4, {{icc_ap0r0_el1|ICC_AP0R0_EL1}} // encoding: [0x84,0xc8,0x38,0xd5]
+// CHECK: mrs x11, {{icc_ap0r1_el1|ICC_AP0R1_EL1}} // encoding: [0xab,0xc8,0x38,0xd5]
+// CHECK: mrs x27, {{icc_ap0r2_el1|ICC_AP0R2_EL1}} // encoding: [0xdb,0xc8,0x38,0xd5]
+// CHECK: mrs x21, {{icc_ap0r3_el1|ICC_AP0R3_EL1}} // encoding: [0xf5,0xc8,0x38,0xd5]
+// CHECK: mrs x2, {{icc_ap1r0_el1|ICC_AP1R0_EL1}} // encoding: [0x02,0xc9,0x38,0xd5]
+// CHECK: mrs x21, {{icc_ap1r1_el1|ICC_AP1R1_EL1}} // encoding: [0x35,0xc9,0x38,0xd5]
+// CHECK: mrs x10, {{icc_ap1r2_el1|ICC_AP1R2_EL1}} // encoding: [0x4a,0xc9,0x38,0xd5]
+// CHECK: mrs x27, {{icc_ap1r3_el1|ICC_AP1R3_EL1}} // encoding: [0x7b,0xc9,0x38,0xd5]
+// CHECK: mrs x20, {{ich_ap0r0_el2|ICH_AP0R0_EL2}} // encoding: [0x14,0xc8,0x3c,0xd5]
+// CHECK: mrs x21, {{ich_ap0r1_el2|ICH_AP0R1_EL2}} // encoding: [0x35,0xc8,0x3c,0xd5]
+// CHECK: mrs x5, {{ich_ap0r2_el2|ICH_AP0R2_EL2}} // encoding: [0x45,0xc8,0x3c,0xd5]
+// CHECK: mrs x4, {{ich_ap0r3_el2|ICH_AP0R3_EL2}} // encoding: [0x64,0xc8,0x3c,0xd5]
+// CHECK: mrs x15, {{ich_ap1r0_el2|ICH_AP1R0_EL2}} // encoding: [0x0f,0xc9,0x3c,0xd5]
+// CHECK: mrs x12, {{ich_ap1r1_el2|ICH_AP1R1_EL2}} // encoding: [0x2c,0xc9,0x3c,0xd5]
+// CHECK: mrs x27, {{ich_ap1r2_el2|ICH_AP1R2_EL2}} // encoding: [0x5b,0xc9,0x3c,0xd5]
+// CHECK: mrs x20, {{ich_ap1r3_el2|ICH_AP1R3_EL2}} // encoding: [0x74,0xc9,0x3c,0xd5]
+// CHECK: mrs x10, {{ich_hcr_el2|ICH_HCR_EL2}} // encoding: [0x0a,0xcb,0x3c,0xd5]
+// CHECK: mrs x27, {{ich_misr_el2|ICH_MISR_EL2}} // encoding: [0x5b,0xcb,0x3c,0xd5]
+// CHECK: mrs x6, {{ich_vmcr_el2|ICH_VMCR_EL2}} // encoding: [0xe6,0xcb,0x3c,0xd5]
+// CHECK: mrs x19, {{ich_vseir_el2|ICH_VSEIR_EL2}} // encoding: [0x93,0xc9,0x3c,0xd5]
+// CHECK: mrs x3, {{ich_lr0_el2|ICH_LR0_EL2}} // encoding: [0x03,0xcc,0x3c,0xd5]
+// CHECK: mrs x1, {{ich_lr1_el2|ICH_LR1_EL2}} // encoding: [0x21,0xcc,0x3c,0xd5]
+// CHECK: mrs x22, {{ich_lr2_el2|ICH_LR2_EL2}} // encoding: [0x56,0xcc,0x3c,0xd5]
+// CHECK: mrs x21, {{ich_lr3_el2|ICH_LR3_EL2}} // encoding: [0x75,0xcc,0x3c,0xd5]
+// CHECK: mrs x6, {{ich_lr4_el2|ICH_LR4_EL2}} // encoding: [0x86,0xcc,0x3c,0xd5]
+// CHECK: mrs x10, {{ich_lr5_el2|ICH_LR5_EL2}} // encoding: [0xaa,0xcc,0x3c,0xd5]
+// CHECK: mrs x11, {{ich_lr6_el2|ICH_LR6_EL2}} // encoding: [0xcb,0xcc,0x3c,0xd5]
+// CHECK: mrs x12, {{ich_lr7_el2|ICH_LR7_EL2}} // encoding: [0xec,0xcc,0x3c,0xd5]
+// CHECK: mrs x0, {{ich_lr8_el2|ICH_LR8_EL2}} // encoding: [0x00,0xcd,0x3c,0xd5]
+// CHECK: mrs x21, {{ich_lr9_el2|ICH_LR9_EL2}} // encoding: [0x35,0xcd,0x3c,0xd5]
+// CHECK: mrs x13, {{ich_lr10_el2|ICH_LR10_EL2}} // encoding: [0x4d,0xcd,0x3c,0xd5]
+// CHECK: mrs x26, {{ich_lr11_el2|ICH_LR11_EL2}} // encoding: [0x7a,0xcd,0x3c,0xd5]
+// CHECK: mrs x1, {{ich_lr12_el2|ICH_LR12_EL2}} // encoding: [0x81,0xcd,0x3c,0xd5]
+// CHECK: mrs x8, {{ich_lr13_el2|ICH_LR13_EL2}} // encoding: [0xa8,0xcd,0x3c,0xd5]
+// CHECK: mrs x2, {{ich_lr14_el2|ICH_LR14_EL2}} // encoding: [0xc2,0xcd,0x3c,0xd5]
+// CHECK: mrs x8, {{ich_lr15_el2|ICH_LR15_EL2}} // encoding: [0xe8,0xcd,0x3c,0xd5]
msr icc_eoir1_el1, x27
msr icc_eoir0_el1, x5
@@ -167,57 +167,57 @@
msr ich_lr13_el2, x2
msr ich_lr14_el2, x13
msr ich_lr15_el2, x27
-// CHECK: msr icc_eoir1_el1, x27 // encoding: [0x3b,0xcc,0x18,0xd5]
-// CHECK: msr icc_eoir0_el1, x5 // encoding: [0x25,0xc8,0x18,0xd5]
-// CHECK: msr icc_dir_el1, x13 // encoding: [0x2d,0xcb,0x18,0xd5]
-// CHECK: msr icc_sgi1r_el1, x21 // encoding: [0xb5,0xcb,0x18,0xd5]
-// CHECK: msr icc_asgi1r_el1, x25 // encoding: [0xd9,0xcb,0x18,0xd5]
-// CHECK: msr icc_sgi0r_el1, x28 // encoding: [0xfc,0xcb,0x18,0xd5]
-// CHECK: msr icc_bpr1_el1, x7 // encoding: [0x67,0xcc,0x18,0xd5]
-// CHECK: msr icc_bpr0_el1, x9 // encoding: [0x69,0xc8,0x18,0xd5]
-// CHECK: msr icc_pmr_el1, x29 // encoding: [0x1d,0x46,0x18,0xd5]
-// CHECK: msr icc_ctlr_el1, x24 // encoding: [0x98,0xcc,0x18,0xd5]
-// CHECK: msr icc_ctlr_el3, x0 // encoding: [0x80,0xcc,0x1e,0xd5]
-// CHECK: msr icc_sre_el1, x2 // encoding: [0xa2,0xcc,0x18,0xd5]
-// CHECK: msr icc_sre_el2, x5 // encoding: [0xa5,0xc9,0x1c,0xd5]
-// CHECK: msr icc_sre_el3, x10 // encoding: [0xaa,0xcc,0x1e,0xd5]
-// CHECK: msr icc_igrpen0_el1, x22 // encoding: [0xd6,0xcc,0x18,0xd5]
-// CHECK: msr icc_igrpen1_el1, x11 // encoding: [0xeb,0xcc,0x18,0xd5]
-// CHECK: msr icc_igrpen1_el3, x8 // encoding: [0xe8,0xcc,0x1e,0xd5]
-// CHECK: msr icc_seien_el1, x4 // encoding: [0x04,0xcd,0x18,0xd5]
-// CHECK: msr icc_ap0r0_el1, x27 // encoding: [0x9b,0xc8,0x18,0xd5]
-// CHECK: msr icc_ap0r1_el1, x5 // encoding: [0xa5,0xc8,0x18,0xd5]
-// CHECK: msr icc_ap0r2_el1, x20 // encoding: [0xd4,0xc8,0x18,0xd5]
-// CHECK: msr icc_ap0r3_el1, x0 // encoding: [0xe0,0xc8,0x18,0xd5]
-// CHECK: msr icc_ap1r0_el1, x2 // encoding: [0x02,0xc9,0x18,0xd5]
-// CHECK: msr icc_ap1r1_el1, x29 // encoding: [0x3d,0xc9,0x18,0xd5]
-// CHECK: msr icc_ap1r2_el1, x23 // encoding: [0x57,0xc9,0x18,0xd5]
-// CHECK: msr icc_ap1r3_el1, x11 // encoding: [0x6b,0xc9,0x18,0xd5]
-// CHECK: msr ich_ap0r0_el2, x2 // encoding: [0x02,0xc8,0x1c,0xd5]
-// CHECK: msr ich_ap0r1_el2, x27 // encoding: [0x3b,0xc8,0x1c,0xd5]
-// CHECK: msr ich_ap0r2_el2, x7 // encoding: [0x47,0xc8,0x1c,0xd5]
-// CHECK: msr ich_ap0r3_el2, x1 // encoding: [0x61,0xc8,0x1c,0xd5]
-// CHECK: msr ich_ap1r0_el2, x7 // encoding: [0x07,0xc9,0x1c,0xd5]
-// CHECK: msr ich_ap1r1_el2, x12 // encoding: [0x2c,0xc9,0x1c,0xd5]
-// CHECK: msr ich_ap1r2_el2, x14 // encoding: [0x4e,0xc9,0x1c,0xd5]
-// CHECK: msr ich_ap1r3_el2, x13 // encoding: [0x6d,0xc9,0x1c,0xd5]
-// CHECK: msr ich_hcr_el2, x1 // encoding: [0x01,0xcb,0x1c,0xd5]
-// CHECK: msr ich_misr_el2, x10 // encoding: [0x4a,0xcb,0x1c,0xd5]
-// CHECK: msr ich_vmcr_el2, x24 // encoding: [0xf8,0xcb,0x1c,0xd5]
-// CHECK: msr ich_vseir_el2, x29 // encoding: [0x9d,0xc9,0x1c,0xd5]
-// CHECK: msr ich_lr0_el2, x26 // encoding: [0x1a,0xcc,0x1c,0xd5]
-// CHECK: msr ich_lr1_el2, x9 // encoding: [0x29,0xcc,0x1c,0xd5]
-// CHECK: msr ich_lr2_el2, x18 // encoding: [0x52,0xcc,0x1c,0xd5]
-// CHECK: msr ich_lr3_el2, x26 // encoding: [0x7a,0xcc,0x1c,0xd5]
-// CHECK: msr ich_lr4_el2, x22 // encoding: [0x96,0xcc,0x1c,0xd5]
-// CHECK: msr ich_lr5_el2, x26 // encoding: [0xba,0xcc,0x1c,0xd5]
-// CHECK: msr ich_lr6_el2, x27 // encoding: [0xdb,0xcc,0x1c,0xd5]
-// CHECK: msr ich_lr7_el2, x8 // encoding: [0xe8,0xcc,0x1c,0xd5]
-// CHECK: msr ich_lr8_el2, x17 // encoding: [0x11,0xcd,0x1c,0xd5]
-// CHECK: msr ich_lr9_el2, x19 // encoding: [0x33,0xcd,0x1c,0xd5]
-// CHECK: msr ich_lr10_el2, x17 // encoding: [0x51,0xcd,0x1c,0xd5]
-// CHECK: msr ich_lr11_el2, x5 // encoding: [0x65,0xcd,0x1c,0xd5]
-// CHECK: msr ich_lr12_el2, x29 // encoding: [0x9d,0xcd,0x1c,0xd5]
-// CHECK: msr ich_lr13_el2, x2 // encoding: [0xa2,0xcd,0x1c,0xd5]
-// CHECK: msr ich_lr14_el2, x13 // encoding: [0xcd,0xcd,0x1c,0xd5]
-// CHECK: msr ich_lr15_el2, x27 // encoding: [0xfb,0xcd,0x1c,0xd5]
+// CHECK: msr {{icc_eoir1_el1|ICC_EOIR1_EL1}}, x27 // encoding: [0x3b,0xcc,0x18,0xd5]
+// CHECK: msr {{icc_eoir0_el1|ICC_EOIR0_EL1}}, x5 // encoding: [0x25,0xc8,0x18,0xd5]
+// CHECK: msr {{icc_dir_el1|ICC_DIR_EL1}}, x13 // encoding: [0x2d,0xcb,0x18,0xd5]
+// CHECK: msr {{icc_sgi1r_el1|ICC_SGI1R_EL1}}, x21 // encoding: [0xb5,0xcb,0x18,0xd5]
+// CHECK: msr {{icc_asgi1r_el1|ICC_ASGI1R_EL1}}, x25 // encoding: [0xd9,0xcb,0x18,0xd5]
+// CHECK: msr {{icc_sgi0r_el1|ICC_SGI0R_EL1}}, x28 // encoding: [0xfc,0xcb,0x18,0xd5]
+// CHECK: msr {{icc_bpr1_el1|ICC_BPR1_EL1}}, x7 // encoding: [0x67,0xcc,0x18,0xd5]
+// CHECK: msr {{icc_bpr0_el1|ICC_BPR0_EL1}}, x9 // encoding: [0x69,0xc8,0x18,0xd5]
+// CHECK: msr {{icc_pmr_el1|ICC_PMR_EL1}}, x29 // encoding: [0x1d,0x46,0x18,0xd5]
+// CHECK: msr {{icc_ctlr_el1|ICC_CTLR_EL1}}, x24 // encoding: [0x98,0xcc,0x18,0xd5]
+// CHECK: msr {{icc_ctlr_el3|ICC_CTLR_EL3}}, x0 // encoding: [0x80,0xcc,0x1e,0xd5]
+// CHECK: msr {{icc_sre_el1|ICC_SRE_EL1}}, x2 // encoding: [0xa2,0xcc,0x18,0xd5]
+// CHECK: msr {{icc_sre_el2|ICC_SRE_EL2}}, x5 // encoding: [0xa5,0xc9,0x1c,0xd5]
+// CHECK: msr {{icc_sre_el3|ICC_SRE_EL3}}, x10 // encoding: [0xaa,0xcc,0x1e,0xd5]
+// CHECK: msr {{icc_igrpen0_el1|ICC_IGRPEN0_EL1}}, x22 // encoding: [0xd6,0xcc,0x18,0xd5]
+// CHECK: msr {{icc_igrpen1_el1|ICC_IGRPEN1_EL1}}, x11 // encoding: [0xeb,0xcc,0x18,0xd5]
+// CHECK: msr {{icc_igrpen1_el3|ICC_IGRPEN1_EL3}}, x8 // encoding: [0xe8,0xcc,0x1e,0xd5]
+// CHECK: msr {{icc_seien_el1|ICC_SEIEN_EL1}}, x4 // encoding: [0x04,0xcd,0x18,0xd5]
+// CHECK: msr {{icc_ap0r0_el1|ICC_AP0R0_EL1}}, x27 // encoding: [0x9b,0xc8,0x18,0xd5]
+// CHECK: msr {{icc_ap0r1_el1|ICC_AP0R1_EL1}}, x5 // encoding: [0xa5,0xc8,0x18,0xd5]
+// CHECK: msr {{icc_ap0r2_el1|ICC_AP0R2_EL1}}, x20 // encoding: [0xd4,0xc8,0x18,0xd5]
+// CHECK: msr {{icc_ap0r3_el1|ICC_AP0R3_EL1}}, x0 // encoding: [0xe0,0xc8,0x18,0xd5]
+// CHECK: msr {{icc_ap1r0_el1|ICC_AP1R0_EL1}}, x2 // encoding: [0x02,0xc9,0x18,0xd5]
+// CHECK: msr {{icc_ap1r1_el1|ICC_AP1R1_EL1}}, x29 // encoding: [0x3d,0xc9,0x18,0xd5]
+// CHECK: msr {{icc_ap1r2_el1|ICC_AP1R2_EL1}}, x23 // encoding: [0x57,0xc9,0x18,0xd5]
+// CHECK: msr {{icc_ap1r3_el1|ICC_AP1R3_EL1}}, x11 // encoding: [0x6b,0xc9,0x18,0xd5]
+// CHECK: msr {{ich_ap0r0_el2|ICH_AP0R0_EL2}}, x2 // encoding: [0x02,0xc8,0x1c,0xd5]
+// CHECK: msr {{ich_ap0r1_el2|ICH_AP0R1_EL2}}, x27 // encoding: [0x3b,0xc8,0x1c,0xd5]
+// CHECK: msr {{ich_ap0r2_el2|ICH_AP0R2_EL2}}, x7 // encoding: [0x47,0xc8,0x1c,0xd5]
+// CHECK: msr {{ich_ap0r3_el2|ICH_AP0R3_EL2}}, x1 // encoding: [0x61,0xc8,0x1c,0xd5]
+// CHECK: msr {{ich_ap1r0_el2|ICH_AP1R0_EL2}}, x7 // encoding: [0x07,0xc9,0x1c,0xd5]
+// CHECK: msr {{ich_ap1r1_el2|ICH_AP1R1_EL2}}, x12 // encoding: [0x2c,0xc9,0x1c,0xd5]
+// CHECK: msr {{ich_ap1r2_el2|ICH_AP1R2_EL2}}, x14 // encoding: [0x4e,0xc9,0x1c,0xd5]
+// CHECK: msr {{ich_ap1r3_el2|ICH_AP1R3_EL2}}, x13 // encoding: [0x6d,0xc9,0x1c,0xd5]
+// CHECK: msr {{ich_hcr_el2|ICH_HCR_EL2}}, x1 // encoding: [0x01,0xcb,0x1c,0xd5]
+// CHECK: msr {{ich_misr_el2|ICH_MISR_EL2}}, x10 // encoding: [0x4a,0xcb,0x1c,0xd5]
+// CHECK: msr {{ich_vmcr_el2|ICH_VMCR_EL2}}, x24 // encoding: [0xf8,0xcb,0x1c,0xd5]
+// CHECK: msr {{ich_vseir_el2|ICH_VSEIR_EL2}}, x29 // encoding: [0x9d,0xc9,0x1c,0xd5]
+// CHECK: msr {{ich_lr0_el2|ICH_LR0_EL2}}, x26 // encoding: [0x1a,0xcc,0x1c,0xd5]
+// CHECK: msr {{ich_lr1_el2|ICH_LR1_EL2}}, x9 // encoding: [0x29,0xcc,0x1c,0xd5]
+// CHECK: msr {{ich_lr2_el2|ICH_LR2_EL2}}, x18 // encoding: [0x52,0xcc,0x1c,0xd5]
+// CHECK: msr {{ich_lr3_el2|ICH_LR3_EL2}}, x26 // encoding: [0x7a,0xcc,0x1c,0xd5]
+// CHECK: msr {{ich_lr4_el2|ICH_LR4_EL2}}, x22 // encoding: [0x96,0xcc,0x1c,0xd5]
+// CHECK: msr {{ich_lr5_el2|ICH_LR5_EL2}}, x26 // encoding: [0xba,0xcc,0x1c,0xd5]
+// CHECK: msr {{ich_lr6_el2|ICH_LR6_EL2}}, x27 // encoding: [0xdb,0xcc,0x1c,0xd5]
+// CHECK: msr {{ich_lr7_el2|ICH_LR7_EL2}}, x8 // encoding: [0xe8,0xcc,0x1c,0xd5]
+// CHECK: msr {{ich_lr8_el2|ICH_LR8_EL2}}, x17 // encoding: [0x11,0xcd,0x1c,0xd5]
+// CHECK: msr {{ich_lr9_el2|ICH_LR9_EL2}}, x19 // encoding: [0x33,0xcd,0x1c,0xd5]
+// CHECK: msr {{ich_lr10_el2|ICH_LR10_EL2}}, x17 // encoding: [0x51,0xcd,0x1c,0xd5]
+// CHECK: msr {{ich_lr11_el2|ICH_LR11_EL2}}, x5 // encoding: [0x65,0xcd,0x1c,0xd5]
+// CHECK: msr {{ich_lr12_el2|ICH_LR12_EL2}}, x29 // encoding: [0x9d,0xcd,0x1c,0xd5]
+// CHECK: msr {{ich_lr13_el2|ICH_LR13_EL2}}, x2 // encoding: [0xa2,0xcd,0x1c,0xd5]
+// CHECK: msr {{ich_lr14_el2|ICH_LR14_EL2}}, x13 // encoding: [0xcd,0xcd,0x1c,0xd5]
+// CHECK: msr {{ich_lr15_el2|ICH_LR15_EL2}}, x27 // encoding: [0xfb,0xcd,0x1c,0xd5]
diff --git a/test/MC/AArch64/ldr-pseudo-diagnostics.s b/test/MC/AArch64/ldr-pseudo-diagnostics.s
new file mode 100644
index 000000000000..e32c51679528
--- /dev/null
+++ b/test/MC/AArch64/ldr-pseudo-diagnostics.s
@@ -0,0 +1,14 @@
+//RUN: not llvm-mc -triple=aarch64-linux-gnu - < %s 2>&1 | FileCheck --check-prefix=CHECK-ERROR %s
+
+// simple test
+.section a, "ax", @progbits
+f1:
+ ldr w0, =0x100000001
+// CHECK-ERROR: error: Immediate too large for register
+// CHECK-ERROR: ldr w0, =0x100000001
+// CHECK-ERROR: ^
+f2:
+ ldr w0, =-0x80000001
+// CHECK-ERROR: error: Immediate too large for register
+// CHECK-ERROR: ldr w0, =-0x80000001
+// CHECK-ERROR: ^
diff --git a/test/MC/AArch64/ldr-pseudo-obj-errors.s b/test/MC/AArch64/ldr-pseudo-obj-errors.s
new file mode 100644
index 000000000000..7f1b64262c4f
--- /dev/null
+++ b/test/MC/AArch64/ldr-pseudo-obj-errors.s
@@ -0,0 +1,13 @@
+//RUN: not llvm-mc -triple=aarch64-linux -filetype=obj %s -o %t1 2> %t2
+//RUN: cat %t2 | FileCheck %s
+
+//These tests look for errors that should be reported for invalid object layout
+//with the ldr pseudo. They are tested separately from parse errors because they
+//only trigger when the file has successfully parsed and the object file is about
+//to be written out.
+
+.text
+foo:
+ ldr x0, =0x10111
+ .space 0xdeadb0
+// CHECK: LVM ERROR: fixup value out of range
diff --git a/test/MC/AArch64/ldr-pseudo.s b/test/MC/AArch64/ldr-pseudo.s
new file mode 100644
index 000000000000..1bdb5d69f6ff
--- /dev/null
+++ b/test/MC/AArch64/ldr-pseudo.s
@@ -0,0 +1,319 @@
+//RUN: llvm-mc -triple=aarch64-linux-gnu %s | FileCheck %s
+
+//
+// Check that large constants are converted to ldr from constant pool
+//
+// simple test
+.section a, "ax", @progbits
+// CHECK-LABEL: f1:
+f1:
+ ldr x0, =0x1234
+// CHECK: movz x0, #0x1234
+ ldr w1, =0x4567
+// CHECK: movz w1, #0x4567
+ ldr x0, =0x12340000
+// CHECK: movz x0, #0x1234, lsl #16
+ ldr w1, =0x45670000
+// CHECK: movz w1, #0x4567, lsl #16
+ ldr x0, =0xabc00000000
+// CHECK: movz x0, #0xabc, lsl #32
+ ldr x0, =0xbeef000000000000
+// CHECK: movz x0, #0xbeef, lsl #48
+
+.section b,"ax",@progbits
+// CHECK-LABEL: f3:
+f3:
+ ldr w0, =0x10001
+// CHECK: ldr w0, .Ltmp[[TMP0:[0-9]+]]
+
+// loading multiple constants
+.section c,"ax",@progbits
+// CHECK-LABEL: f4:
+f4:
+ ldr w0, =0x10002
+// CHECK: ldr w0, .Ltmp[[TMP1:[0-9]+]]
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ ldr w0, =0x10003
+// CHECK: ldr w0, .Ltmp[[TMP2:[0-9]+]]
+ adds x0, x0, #1
+ adds x0, x0, #1
+
+// TODO: the same constants should have the same constant pool location
+.section d,"ax",@progbits
+// CHECK-LABEL: f5:
+f5:
+ ldr w0, =0x10004
+// CHECK: ldr w0, .Ltmp[[TMP3:[0-9]+]]
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ ldr w0, =0x10004
+// CHECK: ldr w0, .Ltmp[[TMP4:[0-9]+]]
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+
+// a section defined in multiple pieces should be merged and use a single constant pool
+.section e,"ax",@progbits
+// CHECK-LABEL: f6:
+f6:
+ ldr w0, =0x10006
+// CHECK: ldr w0, .Ltmp[[TMP5:[0-9]+]]
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+
+.section f, "ax", @progbits
+// CHECK-LABEL: f7:
+f7:
+ adds x0, x0, #1
+ adds x0, x0, #1
+ adds x0, x0, #1
+
+.section e, "ax", @progbits
+// CHECK-LABEL: f8:
+f8:
+ adds x0, x0, #1
+ ldr w0, =0x10007
+// CHECK: ldr w0, .Ltmp[[TMP6:[0-9]+]]
+ adds x0, x0, #1
+ adds x0, x0, #1
+
+//
+// Check that symbols can be loaded using ldr pseudo
+//
+
+// load an undefined symbol
+.section g,"ax",@progbits
+// CHECK-LABEL: f9:
+f9:
+ ldr w0, =foo
+// CHECK: ldr w0, .Ltmp[[TMP7:[0-9]+]]
+
+// load a symbol from another section
+.section h,"ax",@progbits
+// CHECK-LABEL: f10:
+f10:
+ ldr w0, =f5
+// CHECK: ldr w0, .Ltmp[[TMP8:[0-9]+]]
+
+// load a symbol from the same section
+.section i,"ax",@progbits
+// CHECK-LABEL: f11:
+f11:
+ ldr w0, =f12
+// CHECK: ldr w0, .Ltmp[[TMP9:[0-9]+]]
+ ldr w0,=0x3C000
+// CHECK: ldr w0, .Ltmp[[TMP10:[0-9]+]]
+
+// CHECK-LABEL: f12:
+f12:
+ adds x0, x0, #1
+ adds x0, x0, #1
+
+.section j,"ax",@progbits
+// mix of symbols and constants
+// CHECK-LABEL: f13:
+f13:
+ adds x0, x0, #1
+ adds x0, x0, #1
+ ldr w0, =0x101
+// CHECK: movz w0, #0x101
+ adds x0, x0, #1
+ adds x0, x0, #1
+ ldr w0, =bar
+// CHECK: ldr w0, .Ltmp[[TMP11:[0-9]+]]
+ adds x0, x0, #1
+ adds x0, x0, #1
+//
+// Check for correct usage in other contexts
+//
+
+// usage in macro
+.macro useit_in_a_macro
+ ldr w0, =0x10008
+ ldr w0, =baz
+.endm
+.section k,"ax",@progbits
+// CHECK-LABEL: f14:
+f14:
+ useit_in_a_macro
+// CHECK: ldr w0, .Ltmp[[TMP12:[0-9]+]]
+// CHECK: ldr w0, .Ltmp[[TMP13:[0-9]+]]
+
+// usage with expressions
+.section l, "ax", @progbits
+// CHECK-LABEL: f15:
+f15:
+ ldr w0, =0x10001+8
+// CHECK: ldr w0, .Ltmp[[TMP14:[0-9]+]]
+ adds x0, x0, #1
+ ldr w0, =bar+4
+// CHECK: ldr w0, .Ltmp[[TMP15:[0-9]+]]
+ adds x0, x0, #1
+
+// usage with 64-bit regs
+.section m, "ax", @progbits
+// CHECK-LABEL: f16:
+f16:
+ ldr x0, =0x0102030405060708
+// CHECK: ldr x0, .Ltmp[[TMP16:[0-9]+]]
+ add x0, x0, #1
+ ldr w0, =bar
+// CHECK: ldr w0, .Ltmp[[TMP17:[0-9]+]]
+ ldr x0, =bar+16
+// CHECK: ldr x0, .Ltmp[[TMP18:[0-9]+]]
+ add x0, x0, #1
+ ldr x0, =0x100000001
+// CHECK: ldr x0, .Ltmp[[TMP19:[0-9]+]]
+ ldr x1, =-0x80000001
+// CHECK: ldr x1, .Ltmp[[TMP20:[0-9]+]]
+ ldr x2, =0x10001
+// CHECK: ldr x2, .Ltmp[[TMP21:[0-9]+]]
+
+// check range for 32-bit regs
+.section n, "ax", @progbits
+// CHECK-LABEL: f17:
+f17:
+ ldr w0, =0xFFFFFFFF
+// CHECK: ldr w0, .Ltmp[[TMP22:[0-9]+]]
+ add w0, w0, #1
+ ldr w1, =-0x7FFFFFFF
+// CHECK: ldr w1, .Ltmp[[TMP23:[0-9]+]]
+ add w0, w0, #1
+ ldr w0, =-1
+// CHECK: ldr w0, .Ltmp[[TMP24:[0-9]+]]
+ add w0, w0, #1
+
+// make sure the same contant uses different pools for 32- and 64-bit registers
+.section o, "ax", @progbits
+// CHECK-LABEL: f18:
+f18:
+ ldr w0, =0x320064
+// CHECK: ldr w0, .Ltmp[[TMP25:[0-9]+]]
+ add w0, w0, #1
+ ldr x1, =0x320064
+// CHECK: ldr x1, .Ltmp[[TMP26:[0-9]+]]
+
+//
+// Constant Pools
+//
+// CHECK: .section b,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP0]]
+// CHECK: .word 65537
+
+// CHECK: .section c,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP1]]
+// CHECK: .word 65538
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP2]]
+// CHECK: .word 65539
+
+// CHECK: .section d,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP3]]
+// CHECK: .word 65540
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP4]]
+// CHECK: .word 65540
+
+// CHECK: .section e,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP5]]
+// CHECK: .word 65542
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP6]]
+// CHECK: .word 65543
+
+// Should not switch to section because it has no constant pool
+// CHECK-NOT: .section f,"ax",@progbits
+
+// CHECK: .section g,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP7]]
+// CHECK: .word foo
+
+// CHECK: .section h,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP8]]
+// CHECK: .word f5
+
+// CHECK: .section i,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP9]]
+// CHECK: .word f12
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP10]]
+// CHECK: .word 245760
+
+// CHECK: .section j,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP11]]
+// CHECK: .word bar
+
+// CHECK: .section k,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP12]]
+// CHECK: .word 65544
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP13]]
+// CHECK: .word baz
+
+// CHECK: .section l,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP14]]
+// CHECK: .word 65545
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP15]]
+// CHECK: .word bar+4
+
+// CHECK: .section m,"ax",@progbits
+// CHECK: .align 3
+// CHECK: .Ltmp[[TMP16]]
+// CHECK: .xword 72623859790382856
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP17]]
+// CHECK: .word bar
+// CHECK: .align 3
+// CHECK: .Ltmp[[TMP18]]
+// CHECK: .xword bar+16
+// CHECK: .align 3
+// CHECK: .Ltmp[[TMP19]]
+// CHECK: .xword 4294967297
+// CHECK: .align 3
+// CHECK: .Ltmp[[TMP20]]
+// CHECK: .xword -2147483649
+// CHECK: .align 3
+// CHECK: .Ltmp[[TMP21]]
+// CHECK: .xword 65537
+
+// CHECK: .section n,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP22]]
+// CHECK: .word 4294967295
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP23]]
+// CHECK: .word -2147483647
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP24]]
+// CHECK: .word -1
+
+// CHECK: .section o,"ax",@progbits
+// CHECK: .align 2
+// CHECK: .Ltmp[[TMP25]]
+// CHECK: .word 3276900
+// CHECK: .align 3
+// CHECK: .Ltmp[[TMP26]]
+// CHECK: .xword 3276900
diff --git a/test/MC/AArch64/lit.local.cfg b/test/MC/AArch64/lit.local.cfg
index 75dba81bc0b5..5822b7226687 100644
--- a/test/MC/AArch64/lit.local.cfg
+++ b/test/MC/AArch64/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'AArch64' in targets:
- config.unsupported = True \ No newline at end of file
+if 'AArch64' not in config.root.targets:
+ config.unsupported = True
diff --git a/test/MC/AArch64/neon-2velem.s b/test/MC/AArch64/neon-2velem.s
index cde792a2fb65..04841d0164f2 100644
--- a/test/MC/AArch64/neon-2velem.s
+++ b/test/MC/AArch64/neon-2velem.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
diff --git a/test/MC/AArch64/neon-3vdiff.s b/test/MC/AArch64/neon-3vdiff.s
index 3ff86bfd6a40..fc3215b4b671 100644
--- a/test/MC/AArch64/neon-3vdiff.s
+++ b/test/MC/AArch64/neon-3vdiff.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=+crypto -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
diff --git a/test/MC/AArch64/neon-across.s b/test/MC/AArch64/neon-across.s
index 8b1c2d421ba6..60b766d8c881 100644
--- a/test/MC/AArch64/neon-across.s
+++ b/test/MC/AArch64/neon-across.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
diff --git a/test/MC/AArch64/neon-compare-instructions.s b/test/MC/AArch64/neon-compare-instructions.s
index e4bc20258357..19cfaf1f4d36 100644
--- a/test/MC/AArch64/neon-compare-instructions.s
+++ b/test/MC/AArch64/neon-compare-instructions.s
@@ -255,13 +255,13 @@
cmeq v9.4s, v7.4s, #0
cmeq v3.2d, v31.2d, #0
-// CHECK: cmeq v0.8b, v15.8b, #0x0 // encoding: [0xe0,0x99,0x20,0x0e]
-// CHECK: cmeq v1.16b, v31.16b, #0x0 // encoding: [0xe1,0x9b,0x20,0x4e]
-// CHECK: cmeq v15.4h, v16.4h, #0x0 // encoding: [0x0f,0x9a,0x60,0x0e]
-// CHECK: cmeq v5.8h, v6.8h, #0x0 // encoding: [0xc5,0x98,0x60,0x4e]
-// CHECK: cmeq v29.2s, v27.2s, #0x0 // encoding: [0x7d,0x9b,0xa0,0x0e]
-// CHECK: cmeq v9.4s, v7.4s, #0x0 // encoding: [0xe9,0x98,0xa0,0x4e]
-// CHECK: cmeq v3.2d, v31.2d, #0x0 // encoding: [0xe3,0x9b,0xe0,0x4e]
+// CHECK: cmeq v0.8b, v15.8b, #{{0x0|0}} // encoding: [0xe0,0x99,0x20,0x0e]
+// CHECK: cmeq v1.16b, v31.16b, #{{0x0|0}} // encoding: [0xe1,0x9b,0x20,0x4e]
+// CHECK: cmeq v15.4h, v16.4h, #{{0x0|0}} // encoding: [0x0f,0x9a,0x60,0x0e]
+// CHECK: cmeq v5.8h, v6.8h, #{{0x0|0}} // encoding: [0xc5,0x98,0x60,0x4e]
+// CHECK: cmeq v29.2s, v27.2s, #{{0x0|0}} // encoding: [0x7d,0x9b,0xa0,0x0e]
+// CHECK: cmeq v9.4s, v7.4s, #{{0x0|0}} // encoding: [0xe9,0x98,0xa0,0x4e]
+// CHECK: cmeq v3.2d, v31.2d, #{{0x0|0}} // encoding: [0xe3,0x9b,0xe0,0x4e]
//----------------------------------------------------------------------
// Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
@@ -274,13 +274,13 @@
cmge v17.4s, v20.4s, #0
cmge v3.2d, v31.2d, #0
-// CHECK: cmge v0.8b, v15.8b, #0x0 // encoding: [0xe0,0x89,0x20,0x2e]
-// CHECK: cmge v1.16b, v31.16b, #0x0 // encoding: [0xe1,0x8b,0x20,0x6e]
-// CHECK: cmge v15.4h, v16.4h, #0x0 // encoding: [0x0f,0x8a,0x60,0x2e]
-// CHECK: cmge v5.8h, v6.8h, #0x0 // encoding: [0xc5,0x88,0x60,0x6e]
-// CHECK: cmge v29.2s, v27.2s, #0x0 // encoding: [0x7d,0x8b,0xa0,0x2e]
-// CHECK: cmge v17.4s, v20.4s, #0x0 // encoding: [0x91,0x8a,0xa0,0x6e]
-// CHECK: cmge v3.2d, v31.2d, #0x0 // encoding: [0xe3,0x8b,0xe0,0x6e]
+// CHECK: cmge v0.8b, v15.8b, #{{0x0|0}} // encoding: [0xe0,0x89,0x20,0x2e]
+// CHECK: cmge v1.16b, v31.16b, #{{0x0|0}} // encoding: [0xe1,0x8b,0x20,0x6e]
+// CHECK: cmge v15.4h, v16.4h, #{{0x0|0}} // encoding: [0x0f,0x8a,0x60,0x2e]
+// CHECK: cmge v5.8h, v6.8h, #{{0x0|0}} // encoding: [0xc5,0x88,0x60,0x6e]
+// CHECK: cmge v29.2s, v27.2s, #{{0x0|0}} // encoding: [0x7d,0x8b,0xa0,0x2e]
+// CHECK: cmge v17.4s, v20.4s, #{{0x0|0}} // encoding: [0x91,0x8a,0xa0,0x6e]
+// CHECK: cmge v3.2d, v31.2d, #{{0x0|0}} // encoding: [0xe3,0x8b,0xe0,0x6e]
//----------------------------------------------------------------------
// Vector Compare Mask Greater Than Zero (Signed Integer)
@@ -294,13 +294,13 @@
cmgt v9.4s, v7.4s, #0
cmgt v3.2d, v31.2d, #0
-// CHECK: cmgt v0.8b, v15.8b, #0x0 // encoding: [0xe0,0x89,0x20,0x0e]
-// CHECK: cmgt v1.16b, v31.16b, #0x0 // encoding: [0xe1,0x8b,0x20,0x4e]
-// CHECK: cmgt v15.4h, v16.4h, #0x0 // encoding: [0x0f,0x8a,0x60,0x0e]
-// CHECK: cmgt v5.8h, v6.8h, #0x0 // encoding: [0xc5,0x88,0x60,0x4e]
-// CHECK: cmgt v29.2s, v27.2s, #0x0 // encoding: [0x7d,0x8b,0xa0,0x0e]
-// CHECK: cmgt v9.4s, v7.4s, #0x0 // encoding: [0xe9,0x88,0xa0,0x4e]
-// CHECK: cmgt v3.2d, v31.2d, #0x0 // encoding: [0xe3,0x8b,0xe0,0x4e]
+// CHECK: cmgt v0.8b, v15.8b, #{{0x0|0}} // encoding: [0xe0,0x89,0x20,0x0e]
+// CHECK: cmgt v1.16b, v31.16b, #{{0x0|0}} // encoding: [0xe1,0x8b,0x20,0x4e]
+// CHECK: cmgt v15.4h, v16.4h, #{{0x0|0}} // encoding: [0x0f,0x8a,0x60,0x0e]
+// CHECK: cmgt v5.8h, v6.8h, #{{0x0|0}} // encoding: [0xc5,0x88,0x60,0x4e]
+// CHECK: cmgt v29.2s, v27.2s, #{{0x0|0}} // encoding: [0x7d,0x8b,0xa0,0x0e]
+// CHECK: cmgt v9.4s, v7.4s, #{{0x0|0}} // encoding: [0xe9,0x88,0xa0,0x4e]
+// CHECK: cmgt v3.2d, v31.2d, #{{0x0|0}} // encoding: [0xe3,0x8b,0xe0,0x4e]
//----------------------------------------------------------------------
// Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
@@ -313,13 +313,13 @@
cmle v9.4s, v7.4s, #0
cmle v3.2d, v31.2d, #0
-// CHECK: cmle v0.8b, v15.8b, #0x0 // encoding: [0xe0,0x99,0x20,0x2e]
-// CHECK: cmle v1.16b, v31.16b, #0x0 // encoding: [0xe1,0x9b,0x20,0x6e]
-// CHECK: cmle v15.4h, v16.4h, #0x0 // encoding: [0x0f,0x9a,0x60,0x2e]
-// CHECK: cmle v5.8h, v6.8h, #0x0 // encoding: [0xc5,0x98,0x60,0x6e]
-// CHECK: cmle v29.2s, v27.2s, #0x0 // encoding: [0x7d,0x9b,0xa0,0x2e]
-// CHECK: cmle v9.4s, v7.4s, #0x0 // encoding: [0xe9,0x98,0xa0,0x6e]
-// CHECK: cmle v3.2d, v31.2d, #0x0 // encoding: [0xe3,0x9b,0xe0,0x6e]
+// CHECK: cmle v0.8b, v15.8b, #{{0x0|0}} // encoding: [0xe0,0x99,0x20,0x2e]
+// CHECK: cmle v1.16b, v31.16b, #{{0x0|0}} // encoding: [0xe1,0x9b,0x20,0x6e]
+// CHECK: cmle v15.4h, v16.4h, #{{0x0|0}} // encoding: [0x0f,0x9a,0x60,0x2e]
+// CHECK: cmle v5.8h, v6.8h, #{{0x0|0}} // encoding: [0xc5,0x98,0x60,0x6e]
+// CHECK: cmle v29.2s, v27.2s, #{{0x0|0}} // encoding: [0x7d,0x9b,0xa0,0x2e]
+// CHECK: cmle v9.4s, v7.4s, #{{0x0|0}} // encoding: [0xe9,0x98,0xa0,0x6e]
+// CHECK: cmle v3.2d, v31.2d, #{{0x0|0}} // encoding: [0xe3,0x9b,0xe0,0x6e]
//----------------------------------------------------------------------
// Vector Compare Mask Less Than Zero (Signed Integer)
@@ -332,13 +332,13 @@
cmlt v9.4s, v7.4s, #0
cmlt v3.2d, v31.2d, #0
-// CHECK: cmlt v0.8b, v15.8b, #0x0 // encoding: [0xe0,0xa9,0x20,0x0e]
-// CHECK: cmlt v1.16b, v31.16b, #0x0 // encoding: [0xe1,0xab,0x20,0x4e]
-// CHECK: cmlt v15.4h, v16.4h, #0x0 // encoding: [0x0f,0xaa,0x60,0x0e]
-// CHECK: cmlt v5.8h, v6.8h, #0x0 // encoding: [0xc5,0xa8,0x60,0x4e]
-// CHECK: cmlt v29.2s, v27.2s, #0x0 // encoding: [0x7d,0xab,0xa0,0x0e]
-// CHECK: cmlt v9.4s, v7.4s, #0x0 // encoding: [0xe9,0xa8,0xa0,0x4e]
-// CHECK: cmlt v3.2d, v31.2d, #0x0 // encoding: [0xe3,0xab,0xe0,0x4e]
+// CHECK: cmlt v0.8b, v15.8b, #{{0x0|0}} // encoding: [0xe0,0xa9,0x20,0x0e]
+// CHECK: cmlt v1.16b, v31.16b, #{{0x0|0}} // encoding: [0xe1,0xab,0x20,0x4e]
+// CHECK: cmlt v15.4h, v16.4h, #{{0x0|0}} // encoding: [0x0f,0xaa,0x60,0x0e]
+// CHECK: cmlt v5.8h, v6.8h, #{{0x0|0}} // encoding: [0xc5,0xa8,0x60,0x4e]
+// CHECK: cmlt v29.2s, v27.2s, #{{0x0|0}} // encoding: [0x7d,0xab,0xa0,0x0e]
+// CHECK: cmlt v9.4s, v7.4s, #{{0x0|0}} // encoding: [0xe9,0xa8,0xa0,0x4e]
+// CHECK: cmlt v3.2d, v31.2d, #{{0x0|0}} // encoding: [0xe3,0xab,0xe0,0x4e]
//----------------------------------------------------------------------
// Vector Compare Mask Equal to Zero (Floating Point)
@@ -346,10 +346,16 @@
fcmeq v0.2s, v31.2s, #0.0
fcmeq v4.4s, v7.4s, #0.0
fcmeq v29.2d, v2.2d, #0.0
+ fcmeq v0.2s, v31.2s, #0
+ fcmeq v4.4s, v7.4s, #0
+ fcmeq v29.2d, v2.2d, #0
// CHECK: fcmeq v0.2s, v31.2s, #0.0 // encoding: [0xe0,0xdb,0xa0,0x0e]
// CHECK: fcmeq v4.4s, v7.4s, #0.0 // encoding: [0xe4,0xd8,0xa0,0x4e]
// CHECK: fcmeq v29.2d, v2.2d, #0.0 // encoding: [0x5d,0xd8,0xe0,0x4e]
+// CHECK: fcmeq v0.2s, v31.2s, #0.0 // encoding: [0xe0,0xdb,0xa0,0x0e]
+// CHECK: fcmeq v4.4s, v7.4s, #0.0 // encoding: [0xe4,0xd8,0xa0,0x4e]
+// CHECK: fcmeq v29.2d, v2.2d, #0.0 // encoding: [0x5d,0xd8,0xe0,0x4e]
//----------------------------------------------------------------------
// Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
@@ -357,10 +363,16 @@
fcmge v31.4s, v29.4s, #0.0
fcmge v3.2s, v8.2s, #0.0
fcmge v17.2d, v15.2d, #0.0
+ fcmge v31.4s, v29.4s, #0
+ fcmge v3.2s, v8.2s, #0
+ fcmge v17.2d, v15.2d, #0
// CHECK: fcmge v31.4s, v29.4s, #0.0 // encoding: [0xbf,0xcb,0xa0,0x6e]
// CHECK: fcmge v3.2s, v8.2s, #0.0 // encoding: [0x03,0xc9,0xa0,0x2e]
// CHECK: fcmge v17.2d, v15.2d, #0.0 // encoding: [0xf1,0xc9,0xe0,0x6e]
+// CHECK: fcmge v31.4s, v29.4s, #0.0 // encoding: [0xbf,0xcb,0xa0,0x6e]
+// CHECK: fcmge v3.2s, v8.2s, #0.0 // encoding: [0x03,0xc9,0xa0,0x2e]
+// CHECK: fcmge v17.2d, v15.2d, #0.0 // encoding: [0xf1,0xc9,0xe0,0x6e]
//----------------------------------------------------------------------
// Vector Compare Mask Greater Than Zero (Floating Point)
@@ -368,10 +380,16 @@
fcmgt v0.2s, v31.2s, #0.0
fcmgt v4.4s, v7.4s, #0.0
fcmgt v29.2d, v2.2d, #0.0
+ fcmgt v0.2s, v31.2s, #0
+ fcmgt v4.4s, v7.4s, #0
+ fcmgt v29.2d, v2.2d, #0
// CHECK: fcmgt v0.2s, v31.2s, #0.0 // encoding: [0xe0,0xcb,0xa0,0x0e]
// CHECK: fcmgt v4.4s, v7.4s, #0.0 // encoding: [0xe4,0xc8,0xa0,0x4e]
// CHECK: fcmgt v29.2d, v2.2d, #0.0 // encoding: [0x5d,0xc8,0xe0,0x4e]
+// CHECK: fcmgt v0.2s, v31.2s, #0.0 // encoding: [0xe0,0xcb,0xa0,0x0e]
+// CHECK: fcmgt v4.4s, v7.4s, #0.0 // encoding: [0xe4,0xc8,0xa0,0x4e]
+// CHECK: fcmgt v29.2d, v2.2d, #0.0 // encoding: [0x5d,0xc8,0xe0,0x4e]
//----------------------------------------------------------------------
// Vector Compare Mask Less Than or Equal To Zero (Floating Point)
@@ -379,10 +397,16 @@
fcmle v1.4s, v8.4s, #0.0
fcmle v3.2s, v20.2s, #0.0
fcmle v7.2d, v13.2d, #0.0
+ fcmle v1.4s, v8.4s, #0
+ fcmle v3.2s, v20.2s, #0
+ fcmle v7.2d, v13.2d, #0
// CHECK: fcmle v1.4s, v8.4s, #0.0 // encoding: [0x01,0xd9,0xa0,0x6e]
// CHECK: fcmle v3.2s, v20.2s, #0.0 // encoding: [0x83,0xda,0xa0,0x2e]
// CHECK: fcmle v7.2d, v13.2d, #0.0 // encoding: [0xa7,0xd9,0xe0,0x6e]
+// CHECK: fcmle v1.4s, v8.4s, #0.0 // encoding: [0x01,0xd9,0xa0,0x6e]
+// CHECK: fcmle v3.2s, v20.2s, #0.0 // encoding: [0x83,0xda,0xa0,0x2e]
+// CHECK: fcmle v7.2d, v13.2d, #0.0 // encoding: [0xa7,0xd9,0xe0,0x6e]
//----------------------------------------------------------------------
// Vector Compare Mask Less Than Zero (Floating Point)
@@ -390,10 +414,16 @@
fcmlt v16.2s, v2.2s, #0.0
fcmlt v15.4s, v4.4s, #0.0
fcmlt v5.2d, v29.2d, #0.0
+ fcmlt v16.2s, v2.2s, #0
+ fcmlt v15.4s, v4.4s, #0
+ fcmlt v5.2d, v29.2d, #0
// CHECK: fcmlt v16.2s, v2.2s, #0.0 // encoding: [0x50,0xe8,0xa0,0x0e]
// CHECK: fcmlt v15.4s, v4.4s, #0.0 // encoding: [0x8f,0xe8,0xa0,0x4e]
// CHECK: fcmlt v5.2d, v29.2d, #0.0 // encoding: [0xa5,0xeb,0xe0,0x4e]
+// CHECK: fcmlt v16.2s, v2.2s, #0.0 // encoding: [0x50,0xe8,0xa0,0x0e]
+// CHECK: fcmlt v15.4s, v4.4s, #0.0 // encoding: [0x8f,0xe8,0xa0,0x4e]
+// CHECK: fcmlt v5.2d, v29.2d, #0.0 // encoding: [0xa5,0xeb,0xe0,0x4e]
diff --git a/test/MC/AArch64/neon-crypto.s b/test/MC/AArch64/neon-crypto.s
index 2952dd5aac29..ed1bf8882648 100644
--- a/test/MC/AArch64/neon-crypto.s
+++ b/test/MC/AArch64/neon-crypto.s
@@ -1,5 +1,5 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -mattr=+crypto -show-encoding < %s | FileCheck %s
-// RUN: not llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s 2>&1 | FileCheck -check-prefix=CHECK-NO-CRYPTO %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -mattr=+crypto -show-encoding < %s | FileCheck %s
+// RUN: not llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s 2>&1 | FileCheck -check-prefix=CHECK-NO-CRYPTO-ARM64 %s
// Check that the assembler can handle the documented syntax for AArch64
@@ -13,6 +13,7 @@
aesimc v0.16b, v1.16b
// CHECK-NO-CRYPTO: error: instruction requires a CPU feature not currently enabled
+// CHECK-NO-CRYPTO-ARM64: error: instruction requires: crypto
// CHECK: aese v0.16b, v1.16b // encoding: [0x20,0x48,0x28,0x4e]
// CHECK: aesd v0.16b, v1.16b // encoding: [0x20,0x58,0x28,0x4e]
// CHECK: aesmc v0.16b, v1.16b // encoding: [0x20,0x68,0x28,0x4e]
diff --git a/test/MC/AArch64/neon-diagnostics.s b/test/MC/AArch64/neon-diagnostics.s
index be6c163741f9..fa1f3caf5ad3 100644
--- a/test/MC/AArch64/neon-diagnostics.s
+++ b/test/MC/AArch64/neon-diagnostics.s
@@ -587,10 +587,11 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmgt v0.2d, v31.2s, v16.2s
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected floating-point constant #0.0 or invalid register type
+
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmgt v4.4s, v7.4s, v15.4h
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected floating-point constant #0.0 or invalid register type
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmlt v29.2d, v5.2d, v2.16b
// CHECK-ERROR: ^
@@ -680,12 +681,15 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmeq v0.16b, v1.16b, #0.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid operand for instruction
+
+
+// CHECK-ERROR: error: expected floating-point constant #0.0
// CHECK-ERROR: fcmeq v0.8b, v1.4h, #1.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: Expected floating-point immediate
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmeq v0.8b, v1.4h, #1
// CHECK-ERROR: ^
+
//----------------------------------------------------------------------
// Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
//----------------------------------------------------------------------
@@ -694,7 +698,7 @@
fcmge v31.4s, v29.2s, #0.0
fcmge v3.8b, v8.2s, #0.0
fcmle v17.8h, v15.2d, #-1.0
- fcmle v17.8h, v15.2d, #0
+ fcmle v17.8h, v15.2d, #2
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmge v31.4s, v29.2s, #0.0
@@ -702,12 +706,15 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmge v3.8b, v8.2s, #0.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid operand for instruction
+
+
+// CHECK-ERROR: error: expected floating-point constant #0.0
// CHECK-ERROR: fcmle v17.8h, v15.2d, #-1.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: Expected floating-point immediate
-// CHECK-ERROR: fcmle v17.8h, v15.2d, #0
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fcmle v17.8h, v15.2d, #2
// CHECK-ERROR: ^
+
//----------------------------------------------------------------------
// Vector Compare Mask Greater Than Zero (Floating Point)
//----------------------------------------------------------------------
@@ -723,10 +730,12 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmgt v4.4s, v7.4h, #0.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected floating-point constant #0.0 or invalid register type
+
+
+// CHECK-ERROR: error: expected floating-point constant #0.0
// CHECK-ERROR: fcmlt v29.2d, v5.2d, #255.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: Expected floating-point immediate
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmlt v29.2d, v5.2d, #255
// CHECK-ERROR: ^
@@ -745,10 +754,12 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmge v3.8b, v8.2s, #0.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected floating-point constant #0.0 or invalid register type
+
+
+// CHECK-ERROR: error: expected floating-point constant #0.0
// CHECK-ERROR: fcmle v17.2d, v15.2d, #15.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: Expected floating-point immediate
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmle v17.2d, v15.2d, #15
// CHECK-ERROR: ^
@@ -767,10 +778,12 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmgt v4.4s, v7.4h, #0.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected floating-point constant #0.0 or invalid register type
+
+
+// CHECK-ERROR: error: expected floating-point constant #0.0
// CHECK-ERROR: fcmlt v29.2d, v5.2d, #16.0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: Expected floating-point immediate
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcmlt v29.2d, v5.2d, #2
// CHECK-ERROR: ^
@@ -1285,22 +1298,24 @@
shl v0.4s, v21.4s, #32
shl v0.2d, v1.2d, #64
-// CHECK-ERROR: error: expected comma before next operand
+
+// CHECK-ERROR: error: unexpected token in argument list
// CHECK-ERROR: shl v0.4s, v15,2s, #3
// CHECK-ERROR: ^
+
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: shl v0.2d, v17.4s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: shl v0.8b, v31.8b, #-1
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: shl v0.8b, v31.8b, #8
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: shl v0.4s, v21.4s, #32
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: shl v0.2d, v1.2d, #64
// CHECK-ERROR: ^
@@ -1334,25 +1349,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ushll2 v1.4s, v25.4s, #7
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: sshll v0.8h, v1.8b, #-1
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: sshll v0.8h, v1.8b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: ushll v0.4s, v1.4h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: ushll v0.2d, v1.2s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: sshll2 v0.8h, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: sshll2 v0.4s, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: ushll2 v0.2d, v1.4s, #33
// CHECK-ERROR: ^
@@ -1377,16 +1392,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sshr v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sshr v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sshr v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sshr v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: sshr v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1410,16 +1425,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ushr v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: ushr v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: ushr v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: ushr v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: ushr v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1443,16 +1458,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ssra v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: ssra v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: ssra v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: ssra v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: ssra v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1476,16 +1491,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: usra v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: usra v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: usra v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: usra v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: usra v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1509,16 +1524,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: srshr v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: srshr v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: srshr v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: srshr v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: srshr v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1542,16 +1557,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: urshr v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: urshr v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: urshr v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: urshr v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: urshr v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1575,16 +1590,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: srsra v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: srsra v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: srsra v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: srsra v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: srsra v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1608,16 +1623,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ursra v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: ursra v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: ursra v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: ursra v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: ursra v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1641,16 +1656,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sri v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sri v0.16b, v1.16b, #9
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sri v0.8h, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sri v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: sri v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -1674,16 +1689,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sli v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: sli v0.16b, v1.16b, #8
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: sli v0.8h, v1.8h, #16
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: sli v0.4s, v1.4s, #32
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: sli v0.2d, v1.2d, #64
// CHECK-ERROR: ^
@@ -1707,16 +1722,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqshlu v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: sqshlu v0.16b, v1.16b, #8
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: sqshlu v0.8h, v1.8h, #16
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: sqshlu v0.4s, v1.4s, #32
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: sqshlu v0.2d, v1.2d, #64
// CHECK-ERROR: ^
@@ -1740,16 +1755,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqshl v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: sqshl v0.16b, v1.16b, #8
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: sqshl v0.8h, v1.8h, #16
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: sqshl v0.4s, v1.4s, #32
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: sqshl v0.2d, v1.2d, #64
// CHECK-ERROR: ^
@@ -1773,16 +1788,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: uqshl v0.2s, v1.2d, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: uqshl v0.16b, v1.16b, #8
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: uqshl v0.8h, v1.8h, #16
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: uqshl v0.4s, v1.4s, #32
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: uqshl v0.2d, v1.2d, #64
// CHECK-ERROR: ^
@@ -1805,13 +1820,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: shrn v0.2s, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: shrn2 v0.16b, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: shrn2 v0.8h, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: shrn2 v0.4s, v1.2d, #65
// CHECK-ERROR: ^
@@ -1834,13 +1849,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqshrun v0.2s, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sqshrun2 v0.16b, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sqshrun2 v0.8h, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sqshrun2 v0.4s, v1.2d, #65
// CHECK-ERROR: ^
@@ -1863,13 +1878,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: rshrn v0.2s, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: rshrn2 v0.16b, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: rshrn2 v0.8h, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: rshrn2 v0.4s, v1.2d, #65
// CHECK-ERROR: ^
@@ -1892,13 +1907,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqrshrun v0.2s, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sqrshrun2 v0.16b, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sqrshrun2 v0.8h, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sqrshrun2 v0.4s, v1.2d, #65
// CHECK-ERROR: ^
@@ -1921,13 +1936,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqshrn v0.2s, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sqshrn2 v0.16b, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sqshrn2 v0.8h, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sqshrn2 v0.4s, v1.2d, #65
// CHECK-ERROR: ^
@@ -1950,13 +1965,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: uqshrn v0.2s, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: uqshrn2 v0.16b, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: uqshrn2 v0.8h, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: uqshrn2 v0.4s, v1.2d, #65
// CHECK-ERROR: ^
@@ -1979,13 +1994,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqrshrn v0.2s, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sqrshrn2 v0.16b, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sqrshrn2 v0.8h, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sqrshrn2 v0.4s, v1.2d, #65
// CHECK-ERROR: ^
@@ -2008,13 +2023,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: uqrshrn v0.2s, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: uqrshrn2 v0.16b, v1.8h, #17
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: uqrshrn2 v0.8h, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: uqrshrn2 v0.4s, v1.2d, #65
// CHECK-ERROR: ^
@@ -2037,13 +2052,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: scvtf v0.2d, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: ucvtf v0.2s, v1.2s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: ucvtf v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: ucvtf v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -2066,13 +2081,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcvtzs v0.2d, v1.2s, #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: fcvtzu v0.2s, v1.2s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: fcvtzu v0.4s, v1.4s, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: fcvtzu v0.2d, v1.2d, #65
// CHECK-ERROR: ^
@@ -2616,9 +2631,11 @@
pmull2 v0.4s, v1.8h v2.8h
pmull2 v0.2d, v1.4s, v2.4s
-// CHECK-ERROR: error: expected comma before next operand
+
+// CHECK-ERROR: error: unexpected token in argument list
// CHECK-ERROR: pmull2 v0.4s, v1.8h v2.8h
// CHECK-ERROR: ^
+
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: pmull2 v0.2d, v1.4s, v2.4s
// CHECK-ERROR: ^
@@ -2941,19 +2958,19 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: mla v0.2d, v1.2d, v16.d[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mla v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mla v0.4s, v1.4s, v2.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: mla v0.2h, v1.2h, v2.h[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mla v0.4h, v1.4h, v2.h[8]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mla v0.8h, v1.8h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -2975,19 +2992,19 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: mls v0.2d, v1.2d, v16.d[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mls v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mls v0.4s, v1.4s, v2.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: mls v0.2h, v1.2h, v2.h[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mls v0.4h, v1.4h, v2.h[8]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mls v0.8h, v1.8h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3012,22 +3029,22 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fmla v0.8h, v1.8h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmla v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmla v0.2s, v1.2s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmla v3.4s, v8.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmla v3.4s, v8.4s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmla v0.2d, v1.2d, v2.d[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmla v0.2d, v1.2d, v22.d[2]
// CHECK-ERROR: ^
@@ -3046,29 +3063,29 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fmls v0.8h, v1.8h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmls v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmls v0.2s, v1.2s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmls v3.4s, v8.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmls v3.4s, v8.4s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmls v0.2d, v1.2d, v2.d[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmls v0.2d, v1.2d, v22.d[2]
// CHECK-ERROR: ^
smlal v0.4h, v1.4h, v2.h[2]
smlal v0.4s, v1.4h, v2.h[8]
smlal v0.4s, v1.4h, v16.h[2]
- smlal v0.2s, v1.2s, v2.s[4]
+ smlal v0.2s, v1.2s, v2.s[1]
smlal v0.2d, v1.2s, v2.s[4]
smlal v0.2d, v1.2s, v22.s[4]
smlal2 v0.4h, v1.8h, v1.h[2]
@@ -3081,25 +3098,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smlal v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlal v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smlal v0.4s, v1.4h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
-// CHECK-ERROR: smlal v0.2s, v1.2s, v2.s[4]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: smlal v0.2s, v1.2s, v2.s[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlal v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlal v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smlal2 v0.4h, v1.8h, v1.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlal2 v0.4s, v1.8h, v1.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3108,17 +3125,17 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smlal2 v0.2s, v1.4s, v1.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlal2 v0.2d, v1.4s, v1.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlal2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
smlsl v0.4h, v1.4h, v2.h[2]
smlsl v0.4s, v1.4h, v2.h[8]
smlsl v0.4s, v1.4h, v16.h[2]
- smlsl v0.2s, v1.2s, v2.s[4]
+ smlsl v0.2s, v1.2s, v2.s[1]
smlsl v0.2d, v1.2s, v2.s[4]
smlsl v0.2d, v1.2s, v22.s[4]
smlsl2 v0.4h, v1.8h, v1.h[2]
@@ -3131,25 +3148,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smlsl v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlsl v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smlsl v0.4s, v1.4h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
-// CHECK-ERROR: smlsl v0.2s, v1.2s, v2.s[4]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: smlsl v0.2s, v1.2s, v2.s[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlsl v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlsl v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smlsl2 v0.4h, v1.8h, v1.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlsl2 v0.4s, v1.8h, v1.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3158,17 +3175,17 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smlsl2 v0.2s, v1.4s, v1.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlsl2 v0.2d, v1.4s, v1.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smlsl2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
umlal v0.4h, v1.4h, v2.h[2]
umlal v0.4s, v1.4h, v2.h[8]
umlal v0.4s, v1.4h, v16.h[2]
- umlal v0.2s, v1.2s, v2.s[4]
+ umlal v0.2s, v1.2s, v2.s[1]
umlal v0.2d, v1.2s, v2.s[4]
umlal v0.2d, v1.2s, v22.s[4]
umlal2 v0.4h, v1.8h, v1.h[2]
@@ -3181,25 +3198,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umlal v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlal v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umlal v0.4s, v1.4h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
-// CHECK-ERROR: umlal v0.2s, v1.2s, v2.s[4]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: umlal v0.2s, v1.2s, v2.s[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlal v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlal v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umlal2 v0.4h, v1.8h, v1.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlal2 v0.4s, v1.8h, v1.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3208,17 +3225,17 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umlal2 v0.2s, v1.4s, v1.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlal2 v0.2d, v1.4s, v1.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlal2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
umlsl v0.4h, v1.4h, v2.h[2]
umlsl v0.4s, v1.4h, v2.h[8]
umlsl v0.4s, v1.4h, v16.h[2]
- umlsl v0.2s, v1.2s, v2.s[4]
+ umlsl v0.2s, v1.2s, v2.s[3]
umlsl v0.2d, v1.2s, v2.s[4]
umlsl v0.2d, v1.2s, v22.s[4]
umlsl2 v0.4h, v1.8h, v1.h[2]
@@ -3231,25 +3248,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umlsl v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlsl v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umlsl v0.4s, v1.4h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
-// CHECK-ERROR: umlsl v0.2s, v1.2s, v2.s[4]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: umlsl v0.2s, v1.2s, v2.s[3]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlsl v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlsl v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umlsl2 v0.4h, v1.8h, v1.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlsl2 v0.4s, v1.8h, v1.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3258,17 +3275,17 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umlsl2 v0.2s, v1.4s, v1.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlsl2 v0.2d, v1.4s, v1.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umlsl2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
sqdmlal v0.4h, v1.4h, v2.h[2]
sqdmlal v0.4s, v1.4h, v2.h[8]
sqdmlal v0.4s, v1.4h, v16.h[2]
- sqdmlal v0.2s, v1.2s, v2.s[4]
+ sqdmlal v0.2s, v1.2s, v2.s[3]
sqdmlal v0.2d, v1.2s, v2.s[4]
sqdmlal v0.2d, v1.2s, v22.s[4]
sqdmlal2 v0.4h, v1.8h, v1.h[2]
@@ -3281,25 +3298,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlal v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlal v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlal v0.4s, v1.4h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
-// CHECK-ERROR: sqdmlal v0.2s, v1.2s, v2.s[4]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: sqdmlal v0.2s, v1.2s, v2.s[3]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlal v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlal v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlal2 v0.4h, v1.8h, v1.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlal2 v0.4s, v1.8h, v1.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3308,17 +3325,17 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlal2 v0.2s, v1.4s, v1.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlal2 v0.2d, v1.4s, v1.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlal2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
sqdmlsl v0.4h, v1.4h, v2.h[2]
sqdmlsl v0.4s, v1.4h, v2.h[8]
sqdmlsl v0.4s, v1.4h, v16.h[2]
- sqdmlsl v0.2s, v1.2s, v2.s[4]
+ sqdmlsl v0.2s, v1.2s, v2.s[3]
sqdmlsl v0.2d, v1.2s, v2.s[4]
sqdmlsl v0.2d, v1.2s, v22.s[4]
sqdmlsl2 v0.4h, v1.8h, v1.h[2]
@@ -3331,25 +3348,25 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlsl v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlsl v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlsl v0.4s, v1.4h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
-// CHECK-ERROR: sqdmlsl v0.2s, v1.2s, v2.s[4]
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: sqdmlsl v0.2s, v1.2s, v2.s[3]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlsl v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlsl v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlsl2 v0.4h, v1.8h, v1.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlsl2 v0.4s, v1.8h, v1.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3358,10 +3375,10 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlsl2 v0.2s, v1.4s, v1.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlsl2 v0.2d, v1.4s, v1.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlsl2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
@@ -3375,28 +3392,28 @@
mul v0.4s, v1.4s, v22.s[4]
mul v0.2d, v1.2d, v2.d[1]
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mul v0.4h, v1.4h, v2.h[8]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: mul v0.4h, v1.4h, v16.h[8]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mul v0.8h, v1.8h, v2.h[8]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: invalid operand for instruction
// CHECK-ERROR: mul v0.8h, v1.8h, v16.h[8]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mul v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mul v0.2s, v1.2s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mul v0.4s, v1.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: mul v0.4s, v1.4s, v22.s[4]
// CHECK-ERROR: ^
@@ -3414,22 +3431,22 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fmul v0.4h, v1.4h, v2.h[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmul v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmul v0.2s, v1.2s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmul v0.4s, v1.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmul v0.4s, v1.4s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmul v0.2d, v1.2d, v2.d[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmul v0.2d, v1.2d, v22.d[2]
// CHECK-ERROR: ^
@@ -3444,22 +3461,22 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fmulx v0.4h, v1.4h, v2.h[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmulx v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmulx v0.2s, v1.2s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmulx v0.4s, v1.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmulx v0.4s, v1.4s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmulx v0.2d, v1.2d, v2.d[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmulx v0.2d, v1.2d, v22.d[2]
// CHECK-ERROR: ^
@@ -3479,7 +3496,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smull v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smull v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3488,16 +3505,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smull v0.2s, v1.2s, v2.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smull v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smull v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smull2 v0.4h, v1.8h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smull2 v0.4s, v1.8h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3506,10 +3523,10 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: smull2 v0.2s, v1.4s, v2.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smull2 v0.2d, v1.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: smull2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
@@ -3529,7 +3546,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umull v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umull v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3538,16 +3555,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umull v0.2s, v1.2s, v2.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umull v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umull v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umull2 v0.4h, v1.8h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umull2 v0.4s, v1.8h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3556,10 +3573,10 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: umull2 v0.2s, v1.4s, v2.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umull2 v0.2d, v1.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: umull2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
@@ -3579,7 +3596,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmull v0.4h, v1.4h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmull v0.4s, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3588,16 +3605,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmull v0.2s, v1.2s, v2.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmull v0.2d, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmull v0.2d, v1.2s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmull2 v0.4h, v1.8h, v2.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmull2 v0.4s, v1.8h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3606,10 +3623,10 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmull2 v0.2s, v1.4s, v2.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmull2 v0.2d, v1.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmull2 v0.2d, v1.4s, v22.s[4]
// CHECK-ERROR: ^
@@ -3623,28 +3640,28 @@
sqdmulh v0.4s, v1.4s, v22.s[4]
sqdmulh v0.2d, v1.2d, v22.d[1]
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmulh v0.4h, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmulh v0.4h, v1.4h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmulh v0.8h, v1.8h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmulh v0.8h, v1.8h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmulh v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmulh v0.2s, v1.2s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmulh v0.4s, v1.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmulh v0.4s, v1.4s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3661,28 +3678,28 @@
sqrdmulh v0.4s, v1.4s, v22.s[4]
sqrdmulh v0.2d, v1.2d, v22.d[1]
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqrdmulh v0.4h, v1.4h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqrdmulh v0.4h, v1.4h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqrdmulh v0.8h, v1.8h, v2.h[8]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqrdmulh v0.8h, v1.8h, v16.h[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqrdmulh v0.2s, v1.2s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqrdmulh v0.2s, v1.2s, v22.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqrdmulh v0.4s, v1.4s, v2.s[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqrdmulh v0.4s, v1.4s, v22.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3900,13 +3917,13 @@
ld1 {v4}, [x0]
ld1 {v32.16b}, [x0]
ld1 {v15.8h}, [x32]
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: vector register expected
// CHECK-ERROR: ld1 {x3}, [x2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld1 {v4}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: vector register expected
// CHECK-ERROR: ld1 {v32.16b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3920,13 +3937,13 @@
ld1 {v1.8h-v1.8h}, [x0]
ld1 {v15.8h-v17.4h}, [x15]
ld1 {v0.8b-v2.8b, [x0]
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: registers must be sequential
// CHECK-ERROR: ld1 {v0.16b, v2.16b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid number of vectors
// CHECK-ERROR: ld1 {v0.8h, v1.8h, v2.8h, v3.8h, v4.8h}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: '{' expected
+// CHECK-ERROR: error: unexpected token in argument list
// CHECK-ERROR: ld1 v0.8b, v1.8b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid number of vectors
@@ -3935,7 +3952,7 @@
// CHECK-ERROR: error: invalid number of vectors
// CHECK-ERROR: ld1 {v1.8h-v1.8h}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld1 {v15.8h-v17.4h}, [x15]
// CHECK-ERROR: ^
// CHECK-ERROR: error: '}' expected
@@ -3947,16 +3964,15 @@
ld2 {v15.4h, v16.4h, v17.4h}, [x32]
ld2 {v15.8h-v16.4h}, [x15]
ld2 {v0.2d-v2.2d}, [x0]
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld2 {v15.8h, v16.4h}, [x15]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: registers must be sequential
// CHECK-ERROR: ld2 {v0.8b, v2.8b}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld2 {v15.4h, v16.4h, v17.4h}, [x32]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld2 {v15.8h-v16.4h}, [x15]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3968,16 +3984,16 @@
ld3 {v0.8b, v2.8b, v3.8b}, [x0]
ld3 {v15.8h-v17.4h}, [x15]
ld3 {v31.4s-v2.4s}, [sp]
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld3 {v15.8h, v16.8h, v17.4h}, [x15]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: registers must be sequential
// CHECK-ERROR: ld3 {v0.8b, v2.8b, v3.8b}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld3 {v15.8h-v17.4h}, [x15]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -3989,16 +4005,16 @@
ld4 {v15.4h, v16.4h, v17.4h, v18.4h, v19.4h}, [x31]
ld4 {v15.8h-v18.4h}, [x15]
ld4 {v31.2s-v1.2s}, [x31]
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: registers must be sequential
// CHECK-ERROR: ld4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid number of vectors
// CHECK-ERROR: ld4 {v15.4h, v16.4h, v17.4h, v18.4h, v19.4h}, [x31]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld4 {v15.8h-v18.4h}, [x15]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4009,13 +4025,13 @@
st1 {v4}, [x0]
st1 {v32.16b}, [x0]
st1 {v15.8h}, [x32]
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: vector register expected
// CHECK-ERROR: st1 {x3}, [x2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st1 {v4}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: vector register expected
// CHECK-ERROR: st1 {v32.16b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4029,13 +4045,13 @@
st1 {v1.8h-v1.8h}, [x0]
st1 {v15.8h-v17.4h}, [x15]
st1 {v0.8b-v2.8b, [x0]
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: registers must be sequential
// CHECK-ERROR: st1 {v0.16b, v2.16b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid number of vectors
// CHECK-ERROR: st1 {v0.8h, v1.8h, v2.8h, v3.8h, v4.8h}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: '{' expected
+// CHECK-ERROR: error: unexpected token in argument list
// CHECK-ERROR: st1 v0.8b, v1.8b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid number of vectors
@@ -4044,7 +4060,7 @@
// CHECK-ERROR: error: invalid number of vectors
// CHECK-ERROR: st1 {v1.8h-v1.8h}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st1 {v15.8h-v17.4h}, [x15]
// CHECK-ERROR: ^
// CHECK-ERROR: error: '}' expected
@@ -4056,16 +4072,16 @@
st2 {v15.4h, v16.4h, v17.4h}, [x30]
st2 {v15.8h-v16.4h}, [x15]
st2 {v0.2d-v2.2d}, [x0]
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st2 {v15.8h, v16.4h}, [x15]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: registers must be sequential
// CHECK-ERROR: st2 {v0.8b, v2.8b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st2 {v15.4h, v16.4h, v17.4h}, [x30]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st2 {v15.8h-v16.4h}, [x15]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4077,16 +4093,16 @@
st3 {v0.8b, v2.8b, v3.8b}, [x0]
st3 {v15.8h-v17.4h}, [x15]
st3 {v31.4s-v2.4s}, [sp]
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st3 {v15.8h, v16.8h, v17.4h}, [x15]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: registers must be sequential
// CHECK-ERROR: st3 {v0.8b, v2.8b, v3.8b}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st3 {v15.8h-v17.4h}, [x15]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4098,16 +4114,16 @@
st4 {v15.4h, v16.4h, v17.4h, v18.4h, v19.4h}, [x31]
st4 {v15.8h-v18.4h}, [x15]
st4 {v31.2s-v1.2s}, [x31]
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: registers must be sequential
// CHECK-ERROR: st4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid number of vectors
// CHECK-ERROR: st4 {v15.4h, v16.4h, v17.4h, v18.4h, v19.4h}, [x31]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st4 {v15.8h-v18.4h}, [x15]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4124,7 +4140,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld1 {v0.16b}, [x0], #8
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: invalid vector kind qualifier
// CHECK-ERROR: ld1 {v0.8h, v1.16h}, [x0], x1
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4140,7 +4156,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld3 {v5.2s, v6.2s, v7.2s}, [x1], #48
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld4 {v31.2d, v0.2d, v1.2d, v2.1d}, [x3], x1
// CHECK-ERROR: ^
@@ -4150,7 +4166,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st1 {v0.16b}, [x0], #8
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: invalid vector kind qualifier
// CHECK-ERROR: st1 {v0.8h, v1.16h}, [x0], x1
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4166,7 +4182,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st3 {v5.2s, v6.2s, v7.2s}, [x1], #48
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: st4 {v31.2d, v0.2d, v1.2d, v2.1d}, [x3], x1
// CHECK-ERROR: ^
@@ -4178,16 +4194,16 @@
ld2r {v31.4s, v0.2s}, [sp]
ld3r {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
ld4r {v31.2s, v0.2s, v1.2d, v2.2s}, [sp]
-// CHECK-ERROR: error: expected vector type register
+// CHECK-ERROR: error: vector register expected
// CHECK-ERROR: ld1r {x1}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld2r {v31.4s, v0.2s}, [sp]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld3r {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: invalid space between two vectors
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld4r {v31.2s, v0.2s, v1.2d, v2.2s}, [sp]
// CHECK-ERROR: ^
@@ -4199,16 +4215,16 @@
ld2 {v15.h, v16.h}[8], [x15]
ld3 {v31.s, v0.s, v1.s}[-1], [sp]
ld4 {v0.d, v1.d, v2.d, v3.d}[2], [x0]
-// CHECK-ERROR:: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: ld1 {v0.b}[16], [x0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: ld2 {v15.h, v16.h}[8], [x15]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected lane number
+// CHECK-ERROR: error: vector lane must be an integer in range
// CHECK-ERROR: ld3 {v31.s, v0.s, v1.s}[-1], [sp]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: ld4 {v0.d, v1.d, v2.d, v3.d}[2], [x0]
// CHECK-ERROR: ^
@@ -4216,16 +4232,16 @@
st2 {v31.s, v0.s}[3], [8]
st3 {v15.h, v16.h, v17.h}[-1], [x15]
st4 {v0.d, v1.d, v2.d, v3.d}[2], [x0]
-// CHECK-ERROR:: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: st1 {v0.d}[16], [x0]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st2 {v31.s, v0.s}[3], [8]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected lane number
+// CHECK-ERROR: error: vector lane must be an integer in range
// CHECK-ERROR: st3 {v15.h, v16.h, v17.h}[-1], [x15]
// CHECK-ERROR: ^
-// CHECK-ERROR: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: st4 {v0.d, v1.d, v2.d, v3.d}[2], [x0]
// CHECK-ERROR: ^
@@ -4264,7 +4280,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld2 {v15.h, v16.h}[0], [x15], #3
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected the same vector layout
+// CHECK-ERROR: error: mismatched register size suffix
// CHECK-ERROR: ld3 {v31.s, v0.s, v1.d}[0], [sp], x9
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4298,16 +4314,16 @@
ins v20.s[1], s30
ins v1.d[0], d7
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: ins v2.b[16], w1
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: ins v7.h[8], w14
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: ins v20.s[5], w30
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: ins v1.d[2], x7
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -4334,19 +4350,19 @@
smov x14, v6.d[1]
smov x20, v9.d[0]
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR smov w1, v0.b[16]
// CHECK-ERROR ^
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR smov w14, v6.h[8]
// CHECK-ERROR ^
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR smov x1, v0.b[16]
// CHECK-ERROR ^
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR smov x14, v6.h[8]
// CHECK-ERROR ^
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR smov x20, v9.s[5]
// CHECK-ERROR ^
// CHECK-ERROR error: invalid operand for instruction
@@ -4373,16 +4389,16 @@
umov s20, v9.s[2]
umov d7, v18.d[1]
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR umov w1, v0.b[16]
// CHECK-ERROR ^
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR umov w14, v6.h[8]
// CHECK-ERROR ^
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR umov w20, v9.s[5]
// CHECK-ERROR ^
-// CHECK-ERROR error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR umov x7, v18.d[3]
// CHECK-ERROR ^
// CHECK-ERROR error: invalid operand for instruction
@@ -4798,7 +4814,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlal s17, h27, s12
// CHECK-ERROR: ^
-// CHECK-ERROR: error: too few operands for instruction
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlal d19, s24, d12
// CHECK-ERROR: ^
@@ -4812,7 +4828,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlsl s14, h12, s25
// CHECK-ERROR: ^
-// CHECK-ERROR: error: too few operands for instruction
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlsl d12, s23, d13
// CHECK-ERROR: ^
@@ -4826,7 +4842,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmull s12, h22, s12
// CHECK-ERROR: ^
-// CHECK-ERROR: error: too few operands for instruction
+// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmull d15, s22, d12
// CHECK-ERROR: ^
@@ -4890,7 +4906,7 @@
//----------------------------------------------------------------------
sshr d15, d16, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: sshr d15, d16, #99
// CHECK-ERROR: ^
@@ -4906,7 +4922,7 @@
ushr d10, d17, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: ushr d10, d17, #99
// CHECK-ERROR: ^
@@ -4916,7 +4932,7 @@
srshr d19, d18, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: srshr d19, d18, #99
// CHECK-ERROR: ^
@@ -4926,7 +4942,7 @@
urshr d20, d23, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: urshr d20, d23, #99
// CHECK-ERROR: ^
@@ -4936,7 +4952,7 @@
ssra d18, d12, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: ssra d18, d12, #99
// CHECK-ERROR: ^
@@ -4946,7 +4962,7 @@
usra d20, d13, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: usra d20, d13, #99
// CHECK-ERROR: ^
@@ -4956,7 +4972,7 @@
srsra d15, d11, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: srsra d15, d11, #99
// CHECK-ERROR: ^
@@ -4966,7 +4982,7 @@
ursra d18, d10, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: ursra d18, d10, #99
// CHECK-ERROR: ^
@@ -4976,7 +4992,7 @@
shl d7, d10, #99
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: shl d7, d10, #99
// CHECK-ERROR: ^
@@ -4995,16 +5011,16 @@
sqshl s14, s17, #99
sqshl d15, d16, #99
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: sqshl b11, b19, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: sqshl h13, h18, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: sqshl s14, s17, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: sqshl d15, d16, #99
// CHECK-ERROR: ^
@@ -5017,16 +5033,16 @@
uqshl s14, s19, #99
uqshl d15, d12, #99
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: uqshl b18, b15, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: uqshl h11, h18, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: uqshl s14, s19, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: uqshl d15, d12, #99
// CHECK-ERROR: ^
@@ -5039,16 +5055,16 @@
sqshlu s16, s14, #99
sqshlu d11, d13, #99
-// CHECK-ERROR: error: expected integer in range [0, 7]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 7]
// CHECK-ERROR: sqshlu b15, b18, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 15]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 15]
// CHECK-ERROR: sqshlu h19, h17, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 31]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 31]
// CHECK-ERROR: sqshlu s16, s14, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: sqshlu d11, d13, #99
// CHECK-ERROR: ^
@@ -5058,7 +5074,7 @@
sri d10, d12, #99
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: sri d10, d12, #99
// CHECK-ERROR: ^
@@ -5068,7 +5084,7 @@
sli d10, d14, #99
-// CHECK-ERROR: error: expected integer in range [0, 63]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [0, 63]
// CHECK-ERROR: sli d10, d14, #99
// CHECK-ERROR: ^
@@ -5080,13 +5096,13 @@
sqshrn h17, s10, #99
sqshrn s18, d10, #99
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sqshrn b10, h15, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sqshrn h17, s10, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sqshrn s18, d10, #99
// CHECK-ERROR: ^
@@ -5098,13 +5114,13 @@
uqshrn h10, s14, #99
uqshrn s10, d12, #99
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: uqshrn b12, h10, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: uqshrn h10, s14, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: uqshrn s10, d12, #99
// CHECK-ERROR: ^
@@ -5116,13 +5132,13 @@
sqrshrn h15, s10, #99
sqrshrn s15, d12, #99
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sqrshrn b10, h13, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sqrshrn h15, s10, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sqrshrn s15, d12, #99
// CHECK-ERROR: ^
@@ -5134,13 +5150,13 @@
uqrshrn h12, s10, #99
uqrshrn s10, d10, #99
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: uqrshrn b10, h12, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: uqrshrn h12, s10, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: uqrshrn s10, d10, #99
// CHECK-ERROR: ^
@@ -5152,13 +5168,13 @@
sqshrun h20, s14, #99
sqshrun s10, d15, #99
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sqshrun b15, h10, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sqshrun h20, s14, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sqshrun s10, d15, #99
// CHECK-ERROR: ^
@@ -5170,13 +5186,13 @@
sqrshrun h10, s13, #99
sqrshrun s22, d16, #99
-// CHECK-ERROR: error: expected integer in range [1, 8]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 8]
// CHECK-ERROR: sqrshrun b17, h10, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 16]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 16]
// CHECK-ERROR: sqrshrun h10, s13, #99
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: sqrshrun s22, d16, #99
// CHECK-ERROR: ^
@@ -5189,13 +5205,13 @@
scvtf d21, d12, #65
scvtf d21, s12, #31
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: scvtf s22, s13, #0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: scvtf s22, s13, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: scvtf d21, d12, #65
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -5210,10 +5226,10 @@
ucvtf d21, d14, #65
ucvtf d21, s14, #64
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: ucvtf s22, s13, #34
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: ucvtf d21, d14, #65
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -6262,10 +6278,10 @@
fcvtzs d21, d12, #65
fcvtzs s21, d12, #1
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: fcvtzs s21, s12, #0
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: fcvtzs d21, d12, #65
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -6280,10 +6296,10 @@
fcvtzu d21, d12, #0
fcvtzu s21, d12, #1
-// CHECK-ERROR: error: expected integer in range [1, 32]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 32]
// CHECK-ERROR: fcvtzu s21, s12, #33
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected integer in range [1, 64]
+// CHECK-ERROR: error: {{expected|immediate must be an}} integer in range [1, 64]
// CHECK-ERROR: fcvtzu d21, d12, #0
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -6868,7 +6884,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fmul h0, h1, v1.s[0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmul s2, s29, v10.s[4]
// CHECK-ERROR: ^
@@ -6887,7 +6903,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fmulx h0, h1, v1.d[0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmulx d2, d29, v10.d[3]
// CHECK-ERROR: ^
@@ -6906,7 +6922,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fmla d30, s11, v1.d[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: fmla s16, s22, v16.s[5]
// CHECK-ERROR: ^
@@ -6925,7 +6941,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fmls h7, h17, v26.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: expected lane number
+// CHECK-ERROR: error: vector lane must be an integer in range [0, 1]
// CHECK-ERROR: fmls d16, d22, v16.d[-1]
// CHECK-ERROR: ^
@@ -6937,7 +6953,7 @@
sqdmlal s0, h0, v0.s[0]
sqdmlal s8, s9, v14.s[1]
// invalid lane
- sqdmlal s4, s5, v1.s[5]
+ sqdmlal d4, s5, v1.s[5]
// invalid vector index
sqdmlal s0, h0, v17.h[0]
@@ -6947,8 +6963,8 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlal s8, s9, v14.s[1]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
-// CHECK-ERROR: sqdmlal s4, s5, v1.s[5]
+// CHECK-ERROR: vector lane must be an integer in range
+// CHECK-ERROR: sqdmlal d4, s5, v1.s[5]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlal s0, h0, v17.h[0]
@@ -6972,7 +6988,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmlsl d1, h1, v13.s[0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmlsl d1, s1, v13.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -6999,7 +7015,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmull s1, s1, v4.s[0]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmull s12, h17, v9.h[9]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -7024,7 +7040,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqdmulh s25, s26, v27.h[3]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqdmulh s25, s26, v27.s[4]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -7049,7 +7065,7 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqrdmulh s5, h6, v7.s[2]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: sqrdmulh h31, h30, v14.h[9]
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
@@ -7081,16 +7097,16 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: dup d0, v17.s[3]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: dup d0, v17.d[4]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: dup s0, v1.s[7]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: dup h0, v31.h[16]
// CHECK-ERROR: ^
-// CHECK-ERROR: error: lane number incompatible with layout
+// CHECK-ERROR: vector lane must be an integer in range
// CHECK-ERROR: dup b1, v3.b[16]
// CHECK-ERROR: ^
diff --git a/test/MC/AArch64/neon-extract.s b/test/MC/AArch64/neon-extract.s
index 2d58a75a4907..1daa46d096ee 100644
--- a/test/MC/AArch64/neon-extract.s
+++ b/test/MC/AArch64/neon-extract.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
@@ -9,5 +9,5 @@
ext v0.8b, v1.8b, v2.8b, #0x3
ext v0.16b, v1.16b, v2.16b, #0x3
-// CHECK: ext v0.8b, v1.8b, v2.8b, #0x3 // encoding: [0x20,0x18,0x02,0x2e]
-// CHECK: ext v0.16b, v1.16b, v2.16b, #0x3 // encoding: [0x20,0x18,0x02,0x6e]
+// CHECK: ext v0.8b, v1.8b, v2.8b, #{{0x3|3}} // encoding: [0x20,0x18,0x02,0x2e]
+// CHECK: ext v0.16b, v1.16b, v2.16b, #{{0x3|3}} // encoding: [0x20,0x18,0x02,0x6e]
diff --git a/test/MC/AArch64/neon-mov.s b/test/MC/AArch64/neon-mov.s
index c2ca80322001..567a5ecc5412 100644
--- a/test/MC/AArch64/neon-mov.s
+++ b/test/MC/AArch64/neon-mov.s
@@ -20,19 +20,19 @@
movi v0.8h, #1
movi v0.8h, #1, lsl #8
-// CHECK: movi v0.2s, #0x1 // encoding: [0x20,0x04,0x00,0x0f]
-// CHECK: movi v1.2s, #0x0 // encoding: [0x01,0x04,0x00,0x0f]
-// CHECK: movi v15.2s, #0x1, lsl #8 // encoding: [0x2f,0x24,0x00,0x0f]
-// CHECK: movi v16.2s, #0x1, lsl #16 // encoding: [0x30,0x44,0x00,0x0f]
-// CHECK: movi v31.2s, #0x1, lsl #24 // encoding: [0x3f,0x64,0x00,0x0f]
-// CHECK: movi v0.4s, #0x1 // encoding: [0x20,0x04,0x00,0x4f]
-// CHECK: movi v0.4s, #0x1, lsl #8 // encoding: [0x20,0x24,0x00,0x4f]
-// CHECK: movi v0.4s, #0x1, lsl #16 // encoding: [0x20,0x44,0x00,0x4f]
-// CHECK: movi v0.4s, #0x1, lsl #24 // encoding: [0x20,0x64,0x00,0x4f]
-// CHECK: movi v0.4h, #0x1 // encoding: [0x20,0x84,0x00,0x0f]
-// CHECK: movi v0.4h, #0x1, lsl #8 // encoding: [0x20,0xa4,0x00,0x0f]
-// CHECK: movi v0.8h, #0x1 // encoding: [0x20,0x84,0x00,0x4f]
-// CHECK: movi v0.8h, #0x1, lsl #8 // encoding: [0x20,0xa4,0x00,0x4f]
+// CHECK: movi v0.2s, #{{0x1|1}} // encoding: [0x20,0x04,0x00,0x0f]
+// CHECK: movi v1.2s, #{{0x0|0}} // encoding: [0x01,0x04,0x00,0x0f]
+// CHECK: movi v15.2s, #{{0x1|1}}, lsl #8 // encoding: [0x2f,0x24,0x00,0x0f]
+// CHECK: movi v16.2s, #{{0x1|1}}, lsl #16 // encoding: [0x30,0x44,0x00,0x0f]
+// CHECK: movi v31.2s, #{{0x1|1}}, lsl #24 // encoding: [0x3f,0x64,0x00,0x0f]
+// CHECK: movi v0.4s, #{{0x1|1}} // encoding: [0x20,0x04,0x00,0x4f]
+// CHECK: movi v0.4s, #{{0x1|1}}, lsl #8 // encoding: [0x20,0x24,0x00,0x4f]
+// CHECK: movi v0.4s, #{{0x1|1}}, lsl #16 // encoding: [0x20,0x44,0x00,0x4f]
+// CHECK: movi v0.4s, #{{0x1|1}}, lsl #24 // encoding: [0x20,0x64,0x00,0x4f]
+// CHECK: movi v0.4h, #{{0x1|1}} // encoding: [0x20,0x84,0x00,0x0f]
+// CHECK: movi v0.4h, #{{0x1|1}}, lsl #8 // encoding: [0x20,0xa4,0x00,0x0f]
+// CHECK: movi v0.8h, #{{0x1|1}} // encoding: [0x20,0x84,0x00,0x4f]
+// CHECK: movi v0.8h, #{{0x1|1}}, lsl #8 // encoding: [0x20,0xa4,0x00,0x4f]
//----------------------------------------------------------------------
// Vector Move Inverted Immediate Shifted
@@ -51,19 +51,19 @@
mvni v0.8h, #1
mvni v0.8h, #1, lsl #8
-// CHECK: mvni v0.2s, #0x1 // encoding: [0x20,0x04,0x00,0x2f]
-// CHECK: mvni v1.2s, #0x0 // encoding: [0x01,0x04,0x00,0x2f]
-// CHECK: mvni v0.2s, #0x1, lsl #8 // encoding: [0x20,0x24,0x00,0x2f]
-// CHECK: mvni v0.2s, #0x1, lsl #16 // encoding: [0x20,0x44,0x00,0x2f]
-// CHECK: mvni v0.2s, #0x1, lsl #24 // encoding: [0x20,0x64,0x00,0x2f]
-// CHECK: mvni v0.4s, #0x1 // encoding: [0x20,0x04,0x00,0x6f]
-// CHECK: mvni v15.4s, #0x1, lsl #8 // encoding: [0x2f,0x24,0x00,0x6f]
-// CHECK: mvni v16.4s, #0x1, lsl #16 // encoding: [0x30,0x44,0x00,0x6f]
-// CHECK: mvni v31.4s, #0x1, lsl #24 // encoding: [0x3f,0x64,0x00,0x6f]
-// CHECK: mvni v0.4h, #0x1 // encoding: [0x20,0x84,0x00,0x2f]
-// CHECK: mvni v0.4h, #0x1, lsl #8 // encoding: [0x20,0xa4,0x00,0x2f]
-// CHECK: mvni v0.8h, #0x1 // encoding: [0x20,0x84,0x00,0x6f]
-// CHECK: mvni v0.8h, #0x1, lsl #8 // encoding: [0x20,0xa4,0x00,0x6f]
+// CHECK: mvni v0.2s, #{{0x1|1}} // encoding: [0x20,0x04,0x00,0x2f]
+// CHECK: mvni v1.2s, #{{0x0|0}} // encoding: [0x01,0x04,0x00,0x2f]
+// CHECK: mvni v0.2s, #{{0x1|1}}, lsl #8 // encoding: [0x20,0x24,0x00,0x2f]
+// CHECK: mvni v0.2s, #{{0x1|1}}, lsl #16 // encoding: [0x20,0x44,0x00,0x2f]
+// CHECK: mvni v0.2s, #{{0x1|1}}, lsl #24 // encoding: [0x20,0x64,0x00,0x2f]
+// CHECK: mvni v0.4s, #{{0x1|1}} // encoding: [0x20,0x04,0x00,0x6f]
+// CHECK: mvni v15.4s, #{{0x1|1}}, lsl #8 // encoding: [0x2f,0x24,0x00,0x6f]
+// CHECK: mvni v16.4s, #{{0x1|1}}, lsl #16 // encoding: [0x30,0x44,0x00,0x6f]
+// CHECK: mvni v31.4s, #{{0x1|1}}, lsl #24 // encoding: [0x3f,0x64,0x00,0x6f]
+// CHECK: mvni v0.4h, #{{0x1|1}} // encoding: [0x20,0x84,0x00,0x2f]
+// CHECK: mvni v0.4h, #{{0x1|1}}, lsl #8 // encoding: [0x20,0xa4,0x00,0x2f]
+// CHECK: mvni v0.8h, #{{0x1|1}} // encoding: [0x20,0x84,0x00,0x6f]
+// CHECK: mvni v0.8h, #{{0x1|1}}, lsl #8 // encoding: [0x20,0xa4,0x00,0x6f]
//----------------------------------------------------------------------
// Vector Bitwise Bit Clear (AND NOT) - immediate
@@ -82,19 +82,19 @@
bic v0.8h, #1
bic v31.8h, #1, lsl #8
-// CHECK: bic v0.2s, #0x1 // encoding: [0x20,0x14,0x00,0x2f]
-// CHECK: bic v1.2s, #0x0 // encoding: [0x01,0x14,0x00,0x2f]
-// CHECK: bic v0.2s, #0x1, lsl #8 // encoding: [0x20,0x34,0x00,0x2f]
-// CHECK: bic v0.2s, #0x1, lsl #16 // encoding: [0x20,0x54,0x00,0x2f]
-// CHECK: bic v0.2s, #0x1, lsl #24 // encoding: [0x20,0x74,0x00,0x2f]
-// CHECK: bic v0.4s, #0x1 // encoding: [0x20,0x14,0x00,0x6f]
-// CHECK: bic v0.4s, #0x1, lsl #8 // encoding: [0x20,0x34,0x00,0x6f]
-// CHECK: bic v0.4s, #0x1, lsl #16 // encoding: [0x20,0x54,0x00,0x6f]
-// CHECK: bic v0.4s, #0x1, lsl #24 // encoding: [0x20,0x74,0x00,0x6f]
-// CHECK: bic v15.4h, #0x1 // encoding: [0x2f,0x94,0x00,0x2f]
-// CHECK: bic v16.4h, #0x1, lsl #8 // encoding: [0x30,0xb4,0x00,0x2f]
-// CHECK: bic v0.8h, #0x1 // encoding: [0x20,0x94,0x00,0x6f]
-// CHECK: bic v31.8h, #0x1, lsl #8 // encoding: [0x3f,0xb4,0x00,0x6f]
+// CHECK: bic v0.2s, #{{0x1|1}} // encoding: [0x20,0x14,0x00,0x2f]
+// CHECK: bic v1.2s, #{{0x0|0}} // encoding: [0x01,0x14,0x00,0x2f]
+// CHECK: bic v0.2s, #{{0x1|1}}, lsl #8 // encoding: [0x20,0x34,0x00,0x2f]
+// CHECK: bic v0.2s, #{{0x1|1}}, lsl #16 // encoding: [0x20,0x54,0x00,0x2f]
+// CHECK: bic v0.2s, #{{0x1|1}}, lsl #24 // encoding: [0x20,0x74,0x00,0x2f]
+// CHECK: bic v0.4s, #{{0x1|1}} // encoding: [0x20,0x14,0x00,0x6f]
+// CHECK: bic v0.4s, #{{0x1|1}}, lsl #8 // encoding: [0x20,0x34,0x00,0x6f]
+// CHECK: bic v0.4s, #{{0x1|1}}, lsl #16 // encoding: [0x20,0x54,0x00,0x6f]
+// CHECK: bic v0.4s, #{{0x1|1}}, lsl #24 // encoding: [0x20,0x74,0x00,0x6f]
+// CHECK: bic v15.4h, #{{0x1|1}} // encoding: [0x2f,0x94,0x00,0x2f]
+// CHECK: bic v16.4h, #{{0x1|1}}, lsl #8 // encoding: [0x30,0xb4,0x00,0x2f]
+// CHECK: bic v0.8h, #{{0x1|1}} // encoding: [0x20,0x94,0x00,0x6f]
+// CHECK: bic v31.8h, #{{0x1|1}}, lsl #8 // encoding: [0x3f,0xb4,0x00,0x6f]
//----------------------------------------------------------------------
// Vector Bitwise OR - immedidate
@@ -113,19 +113,19 @@
orr v0.8h, #1
orr v16.8h, #1, lsl #8
-// CHECK: orr v0.2s, #0x1 // encoding: [0x20,0x14,0x00,0x0f]
-// CHECK: orr v1.2s, #0x0 // encoding: [0x01,0x14,0x00,0x0f]
-// CHECK: orr v0.2s, #0x1, lsl #8 // encoding: [0x20,0x34,0x00,0x0f]
-// CHECK: orr v0.2s, #0x1, lsl #16 // encoding: [0x20,0x54,0x00,0x0f]
-// CHECK: orr v0.2s, #0x1, lsl #24 // encoding: [0x20,0x74,0x00,0x0f]
-// CHECK: orr v0.4s, #0x1 // encoding: [0x20,0x14,0x00,0x4f]
-// CHECK: orr v0.4s, #0x1, lsl #8 // encoding: [0x20,0x34,0x00,0x4f]
-// CHECK: orr v0.4s, #0x1, lsl #16 // encoding: [0x20,0x54,0x00,0x4f]
-// CHECK: orr v0.4s, #0x1, lsl #24 // encoding: [0x20,0x74,0x00,0x4f]
-// CHECK: orr v31.4h, #0x1 // encoding: [0x3f,0x94,0x00,0x0f]
-// CHECK: orr v15.4h, #0x1, lsl #8 // encoding: [0x2f,0xb4,0x00,0x0f]
-// CHECK: orr v0.8h, #0x1 // encoding: [0x20,0x94,0x00,0x4f]
-// CHECK: orr v16.8h, #0x1, lsl #8 // encoding: [0x30,0xb4,0x00,0x4f]
+// CHECK: orr v0.2s, #{{0x1|1}} // encoding: [0x20,0x14,0x00,0x0f]
+// CHECK: orr v1.2s, #{{0x0|0}} // encoding: [0x01,0x14,0x00,0x0f]
+// CHECK: orr v0.2s, #{{0x1|1}}, lsl #8 // encoding: [0x20,0x34,0x00,0x0f]
+// CHECK: orr v0.2s, #{{0x1|1}}, lsl #16 // encoding: [0x20,0x54,0x00,0x0f]
+// CHECK: orr v0.2s, #{{0x1|1}}, lsl #24 // encoding: [0x20,0x74,0x00,0x0f]
+// CHECK: orr v0.4s, #{{0x1|1}} // encoding: [0x20,0x14,0x00,0x4f]
+// CHECK: orr v0.4s, #{{0x1|1}}, lsl #8 // encoding: [0x20,0x34,0x00,0x4f]
+// CHECK: orr v0.4s, #{{0x1|1}}, lsl #16 // encoding: [0x20,0x54,0x00,0x4f]
+// CHECK: orr v0.4s, #{{0x1|1}}, lsl #24 // encoding: [0x20,0x74,0x00,0x4f]
+// CHECK: orr v31.4h, #{{0x1|1}} // encoding: [0x3f,0x94,0x00,0x0f]
+// CHECK: orr v15.4h, #{{0x1|1}}, lsl #8 // encoding: [0x2f,0xb4,0x00,0x0f]
+// CHECK: orr v0.8h, #{{0x1|1}} // encoding: [0x20,0x94,0x00,0x4f]
+// CHECK: orr v16.8h, #{{0x1|1}}, lsl #8 // encoding: [0x30,0xb4,0x00,0x4f]
//----------------------------------------------------------------------
// Vector Move Immediate Masked
@@ -135,10 +135,10 @@
movi v0.4s, #1, msl #8
movi v31.4s, #1, msl #16
-// CHECK: movi v0.2s, #0x1, msl #8 // encoding: [0x20,0xc4,0x00,0x0f]
-// CHECK: movi v1.2s, #0x1, msl #16 // encoding: [0x21,0xd4,0x00,0x0f]
-// CHECK: movi v0.4s, #0x1, msl #8 // encoding: [0x20,0xc4,0x00,0x4f]
-// CHECK: movi v31.4s, #0x1, msl #16 // encoding: [0x3f,0xd4,0x00,0x4f]
+// CHECK: movi v0.2s, #{{0x1|1}}, msl #8 // encoding: [0x20,0xc4,0x00,0x0f]
+// CHECK: movi v1.2s, #{{0x1|1}}, msl #16 // encoding: [0x21,0xd4,0x00,0x0f]
+// CHECK: movi v0.4s, #{{0x1|1}}, msl #8 // encoding: [0x20,0xc4,0x00,0x4f]
+// CHECK: movi v31.4s, #{{0x1|1}}, msl #16 // encoding: [0x3f,0xd4,0x00,0x4f]
//----------------------------------------------------------------------
// Vector Move Inverted Immediate Masked
@@ -148,10 +148,10 @@
mvni v31.4s, #0x1, msl #8
mvni v0.4s, #0x1, msl #16
-// CHECK: mvni v1.2s, #0x1, msl #8 // encoding: [0x21,0xc4,0x00,0x2f]
-// CHECK: mvni v0.2s, #0x1, msl #16 // encoding: [0x20,0xd4,0x00,0x2f]
-// CHECK: mvni v31.4s, #0x1, msl #8 // encoding: [0x3f,0xc4,0x00,0x6f]
-// CHECK: mvni v0.4s, #0x1, msl #16 // encoding: [0x20,0xd4,0x00,0x6f]
+// CHECK: mvni v1.2s, #{{0x1|1}}, msl #8 // encoding: [0x21,0xc4,0x00,0x2f]
+// CHECK: mvni v0.2s, #{{0x1|1}}, msl #16 // encoding: [0x20,0xd4,0x00,0x2f]
+// CHECK: mvni v31.4s, #{{0x1|1}}, msl #8 // encoding: [0x3f,0xc4,0x00,0x6f]
+// CHECK: mvni v0.4s, #{{0x1|1}}, msl #16 // encoding: [0x20,0xd4,0x00,0x6f]
//----------------------------------------------------------------------
// Vector Immediate - per byte
@@ -161,10 +161,10 @@
movi v15.16b, #0xf
movi v31.16b, #0x1f
-// CHECK: movi v0.8b, #0x0 // encoding: [0x00,0xe4,0x00,0x0f]
-// CHECK: movi v31.8b, #0xff // encoding: [0xff,0xe7,0x07,0x0f]
-// CHECK: movi v15.16b, #0xf // encoding: [0xef,0xe5,0x00,0x4f]
-// CHECK: movi v31.16b, #0x1f // encoding: [0xff,0xe7,0x00,0x4f]
+// CHECK: movi v0.8b, #{{0x0|0}} // encoding: [0x00,0xe4,0x00,0x0f]
+// CHECK: movi v31.8b, #{{0xff|255}} // encoding: [0xff,0xe7,0x07,0x0f]
+// CHECK: movi v15.16b, #{{0xf|15}} // encoding: [0xef,0xe5,0x00,0x4f]
+// CHECK: movi v31.16b, #{{0x1f|31}} // encoding: [0xff,0xe7,0x00,0x4f]
//----------------------------------------------------------------------
// Vector Move Immediate - bytemask, per doubleword
@@ -187,23 +187,22 @@
fmov v15.4s, #1.0
fmov v31.2d, #1.0
-// CHECK: fmov v1.2s, #1.00000000 // encoding: [0x01,0xf6,0x03,0x0f]
-// CHECK: fmov v15.4s, #1.00000000 // encoding: [0x0f,0xf6,0x03,0x4f]
-// CHECK: fmov v31.2d, #1.00000000 // encoding: [0x1f,0xf6,0x03,0x6f]
+// CHECK: fmov v1.2s, #{{1.00000000|1.000000e\+00}} // encoding: [0x01,0xf6,0x03,0x0f]
+// CHECK: fmov v15.4s, #{{1.00000000|1.000000e\+00}} // encoding: [0x0f,0xf6,0x03,0x4f]
+// CHECK: fmov v31.2d, #{{1.00000000|1.000000e\+00}} // encoding: [0x1f,0xf6,0x03,0x6f]
//----------------------------------------------------------------------
// Vector Move - register
//----------------------------------------------------------------------
- // FIXME: these should all print with the "mov" syntax.
mov v0.8b, v31.8b
mov v15.16b, v16.16b
orr v0.8b, v31.8b, v31.8b
orr v15.16b, v16.16b, v16.16b
-// CHECK: orr v0.8b, v31.8b, v31.8b // encoding: [0xe0,0x1f,0xbf,0x0e]
-// CHECK: orr v15.16b, v16.16b, v16.16b // encoding: [0x0f,0x1e,0xb0,0x4e]
-// CHECK: orr v0.8b, v31.8b, v31.8b // encoding: [0xe0,0x1f,0xbf,0x0e]
-// CHECK: orr v15.16b, v16.16b, v16.16b // encoding: [0x0f,0x1e,0xb0,0x4e]
+// CHECK: mov v0.8b, v31.8b // encoding: [0xe0,0x1f,0xbf,0x0e]
+// CHECK: mov v15.16b, v16.16b // encoding: [0x0f,0x1e,0xb0,0x4e]
+// CHECK: mov v0.8b, v31.8b // encoding: [0xe0,0x1f,0xbf,0x0e]
+// CHECK: mov v15.16b, v16.16b // encoding: [0x0f,0x1e,0xb0,0x4e]
diff --git a/test/MC/AArch64/neon-perm.s b/test/MC/AArch64/neon-perm.s
index 20a4acde37fa..4b28dd01db39 100644
--- a/test/MC/AArch64/neon-perm.s
+++ b/test/MC/AArch64/neon-perm.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
diff --git a/test/MC/AArch64/neon-scalar-compare.s b/test/MC/AArch64/neon-scalar-compare.s
index 55ade0efc258..28de46a7733a 100644
--- a/test/MC/AArch64/neon-scalar-compare.s
+++ b/test/MC/AArch64/neon-scalar-compare.s
@@ -16,7 +16,7 @@
cmeq d20, d21, #0x0
-// CHECK: cmeq d20, d21, #0x0 // encoding: [0xb4,0x9a,0xe0,0x5e]
+// CHECK: cmeq d20, d21, #{{0x0|0}} // encoding: [0xb4,0x9a,0xe0,0x5e]
//----------------------------------------------------------------------
// Scalar Compare Unsigned Higher Or Same
@@ -40,7 +40,7 @@
cmge d20, d21, #0x0
-// CHECK: cmge d20, d21, #0x0 // encoding: [0xb4,0x8a,0xe0,0x7e]
+// CHECK: cmge d20, d21, #{{0x0|0}} // encoding: [0xb4,0x8a,0xe0,0x7e]
//----------------------------------------------------------------------
// Scalar Compare Unsigned Higher
@@ -63,7 +63,7 @@
cmgt d20, d21, #0x0
-// CHECK: cmgt d20, d21, #0x0 // encoding: [0xb4,0x8a,0xe0,0x5e]
+// CHECK: cmgt d20, d21, #{{0x0|0}} // encoding: [0xb4,0x8a,0xe0,0x5e]
//----------------------------------------------------------------------
// Scalar Compare Signed Less Than Or Equal To Zero
@@ -71,7 +71,7 @@
cmle d20, d21, #0x0
-// CHECK: cmle d20, d21, #0x0 // encoding: [0xb4,0x9a,0xe0,0x7e]
+// CHECK: cmle d20, d21, #{{0x0|0}} // encoding: [0xb4,0x9a,0xe0,0x7e]
//----------------------------------------------------------------------
// Scalar Compare Less Than Zero
@@ -79,7 +79,7 @@
cmlt d20, d21, #0x0
-// CHECK: cmlt d20, d21, #0x0 // encoding: [0xb4,0xaa,0xe0,0x5e]
+// CHECK: cmlt d20, d21, #{{0x0|0}} // encoding: [0xb4,0xaa,0xe0,0x5e]
//----------------------------------------------------------------------
// Scalar Compare Bitwise Test Bits
diff --git a/test/MC/AArch64/neon-scalar-dup.s b/test/MC/AArch64/neon-scalar-dup.s
index 77c638df0952..db11ea2aa086 100644
--- a/test/MC/AArch64/neon-scalar-dup.s
+++ b/test/MC/AArch64/neon-scalar-dup.s
@@ -15,17 +15,17 @@
dup d3, v5.d[0]
dup d6, v5.d[1]
-// CHECK: dup b0, v0.b[15] // encoding: [0x00,0x04,0x1f,0x5e]
-// CHECK: dup b1, v0.b[7] // encoding: [0x01,0x04,0x0f,0x5e]
-// CHECK: dup b17, v0.b[0] // encoding: [0x11,0x04,0x01,0x5e]
-// CHECK: dup h5, v31.h[7] // encoding: [0xe5,0x07,0x1e,0x5e]
-// CHECK: dup h9, v1.h[4] // encoding: [0x29,0x04,0x12,0x5e]
-// CHECK: dup h11, v17.h[0] // encoding: [0x2b,0x06,0x02,0x5e]
-// CHECK: dup s2, v2.s[3] // encoding: [0x42,0x04,0x1c,0x5e]
-// CHECK: dup s4, v21.s[0] // encoding: [0xa4,0x06,0x04,0x5e]
-// CHECK: dup s31, v21.s[2] // encoding: [0xbf,0x06,0x14,0x5e]
-// CHECK: dup d3, v5.d[0] // encoding: [0xa3,0x04,0x08,0x5e]
-// CHECK: dup d6, v5.d[1] // encoding: [0xa6,0x04,0x18,0x5e]
+// CHECK: {{dup|mov}} b0, v0.b[15] // encoding: [0x00,0x04,0x1f,0x5e]
+// CHECK: {{dup|mov}} b1, v0.b[7] // encoding: [0x01,0x04,0x0f,0x5e]
+// CHECK: {{dup|mov}} b17, v0.b[0] // encoding: [0x11,0x04,0x01,0x5e]
+// CHECK: {{dup|mov}} h5, v31.h[7] // encoding: [0xe5,0x07,0x1e,0x5e]
+// CHECK: {{dup|mov}} h9, v1.h[4] // encoding: [0x29,0x04,0x12,0x5e]
+// CHECK: {{dup|mov}} h11, v17.h[0] // encoding: [0x2b,0x06,0x02,0x5e]
+// CHECK: {{dup|mov}} s2, v2.s[3] // encoding: [0x42,0x04,0x1c,0x5e]
+// CHECK: {{dup|mov}} s4, v21.s[0] // encoding: [0xa4,0x06,0x04,0x5e]
+// CHECK: {{dup|mov}} s31, v21.s[2] // encoding: [0xbf,0x06,0x14,0x5e]
+// CHECK: {{dup|mov}} d3, v5.d[0] // encoding: [0xa3,0x04,0x08,0x5e]
+// CHECK: {{dup|mov}} d6, v5.d[1] // encoding: [0xa6,0x04,0x18,0x5e]
//------------------------------------------------------------------------------
// Aliases for Duplicate element (scalar)
@@ -42,14 +42,14 @@
mov d3, v5.d[0]
mov d6, v5.d[1]
-// CHECK: dup b0, v0.b[15] // encoding: [0x00,0x04,0x1f,0x5e]
-// CHECK: dup b1, v0.b[7] // encoding: [0x01,0x04,0x0f,0x5e]
-// CHECK: dup b17, v0.b[0] // encoding: [0x11,0x04,0x01,0x5e]
-// CHECK: dup h5, v31.h[7] // encoding: [0xe5,0x07,0x1e,0x5e]
-// CHECK: dup h9, v1.h[4] // encoding: [0x29,0x04,0x12,0x5e]
-// CHECK: dup h11, v17.h[0] // encoding: [0x2b,0x06,0x02,0x5e]
-// CHECK: dup s2, v2.s[3] // encoding: [0x42,0x04,0x1c,0x5e]
-// CHECK: dup s4, v21.s[0] // encoding: [0xa4,0x06,0x04,0x5e]
-// CHECK: dup s31, v21.s[2] // encoding: [0xbf,0x06,0x14,0x5e]
-// CHECK: dup d3, v5.d[0] // encoding: [0xa3,0x04,0x08,0x5e]
-// CHECK: dup d6, v5.d[1] // encoding: [0xa6,0x04,0x18,0x5e]
+// CHECK: {{dup|mov}} b0, v0.b[15] // encoding: [0x00,0x04,0x1f,0x5e]
+// CHECK: {{dup|mov}} b1, v0.b[7] // encoding: [0x01,0x04,0x0f,0x5e]
+// CHECK: {{dup|mov}} b17, v0.b[0] // encoding: [0x11,0x04,0x01,0x5e]
+// CHECK: {{dup|mov}} h5, v31.h[7] // encoding: [0xe5,0x07,0x1e,0x5e]
+// CHECK: {{dup|mov}} h9, v1.h[4] // encoding: [0x29,0x04,0x12,0x5e]
+// CHECK: {{dup|mov}} h11, v17.h[0] // encoding: [0x2b,0x06,0x02,0x5e]
+// CHECK: {{dup|mov}} s2, v2.s[3] // encoding: [0x42,0x04,0x1c,0x5e]
+// CHECK: {{dup|mov}} s4, v21.s[0] // encoding: [0xa4,0x06,0x04,0x5e]
+// CHECK: {{dup|mov}} s31, v21.s[2] // encoding: [0xbf,0x06,0x14,0x5e]
+// CHECK: {{dup|mov}} d3, v5.d[0] // encoding: [0xa3,0x04,0x08,0x5e]
+// CHECK: {{dup|mov}} d6, v5.d[1] // encoding: [0xa6,0x04,0x18,0x5e]
diff --git a/test/MC/AArch64/neon-scalar-fp-compare.s b/test/MC/AArch64/neon-scalar-fp-compare.s
index a59ec0d1d6ed..b798b3410670 100644
--- a/test/MC/AArch64/neon-scalar-fp-compare.s
+++ b/test/MC/AArch64/neon-scalar-fp-compare.s
@@ -18,9 +18,13 @@
fcmeq s10, s11, #0.0
fcmeq d20, d21, #0.0
+ fcmeq s10, s11, #0
+ fcmeq d20, d21, #0x0
// CHECK: fcmeq s10, s11, #0.0 // encoding: [0x6a,0xd9,0xa0,0x5e]
// CHECK: fcmeq d20, d21, #0.0 // encoding: [0xb4,0xda,0xe0,0x5e]
+// CHECK: fcmeq s10, s11, #0.0 // encoding: [0x6a,0xd9,0xa0,0x5e]
+// CHECK: fcmeq d20, d21, #0.0 // encoding: [0xb4,0xda,0xe0,0x5e]
//----------------------------------------------------------------------
// Scalar Floating-point Compare Mask Greater Than Or Equal
@@ -38,9 +42,13 @@
fcmge s10, s11, #0.0
fcmge d20, d21, #0.0
+ fcmge s10, s11, #0
+ fcmge d20, d21, #0x0
// CHECK: fcmge s10, s11, #0.0 // encoding: [0x6a,0xc9,0xa0,0x7e]
// CHECK: fcmge d20, d21, #0.0 // encoding: [0xb4,0xca,0xe0,0x7e]
+// CHECK: fcmge s10, s11, #0.0 // encoding: [0x6a,0xc9,0xa0,0x7e]
+// CHECK: fcmge d20, d21, #0.0 // encoding: [0xb4,0xca,0xe0,0x7e]
//----------------------------------------------------------------------
// Scalar Floating-point Compare Mask Greather Than
@@ -58,9 +66,13 @@
fcmgt s10, s11, #0.0
fcmgt d20, d21, #0.0
+ fcmgt s10, s11, #0
+ fcmgt d20, d21, #0x0
// CHECK: fcmgt s10, s11, #0.0 // encoding: [0x6a,0xc9,0xa0,0x5e]
// CHECK: fcmgt d20, d21, #0.0 // encoding: [0xb4,0xca,0xe0,0x5e]
+// CHECK: fcmgt s10, s11, #0.0 // encoding: [0x6a,0xc9,0xa0,0x5e]
+// CHECK: fcmgt d20, d21, #0.0 // encoding: [0xb4,0xca,0xe0,0x5e]
//----------------------------------------------------------------------
// Scalar Floating-point Compare Mask Less Than Or Equal To Zero
@@ -68,9 +80,13 @@
fcmle s10, s11, #0.0
fcmle d20, d21, #0.0
+ fcmle s10, s11, #0
+ fcmle d20, d21, #0x0
// CHECK: fcmle s10, s11, #0.0 // encoding: [0x6a,0xd9,0xa0,0x7e]
// CHECK: fcmle d20, d21, #0.0 // encoding: [0xb4,0xda,0xe0,0x7e]
+// CHECK: fcmle s10, s11, #0.0 // encoding: [0x6a,0xd9,0xa0,0x7e]
+// CHECK: fcmle d20, d21, #0.0 // encoding: [0xb4,0xda,0xe0,0x7e]
//----------------------------------------------------------------------
// Scalar Floating-point Compare Mask Less Than
@@ -78,9 +94,13 @@
fcmlt s10, s11, #0.0
fcmlt d20, d21, #0.0
+ fcmlt s10, s11, #0
+ fcmlt d20, d21, #0x0
// CHECK: fcmlt s10, s11, #0.0 // encoding: [0x6a,0xe9,0xa0,0x5e]
// CHECK: fcmlt d20, d21, #0.0 // encoding: [0xb4,0xea,0xe0,0x5e]
+// CHECK: fcmlt s10, s11, #0.0 // encoding: [0x6a,0xe9,0xa0,0x5e]
+// CHECK: fcmlt d20, d21, #0.0 // encoding: [0xb4,0xea,0xe0,0x5e]
//----------------------------------------------------------------------
// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
diff --git a/test/MC/AArch64/neon-simd-copy.s b/test/MC/AArch64/neon-simd-copy.s
index f254d65b3b0c..4837a4cb9ee8 100644
--- a/test/MC/AArch64/neon-simd-copy.s
+++ b/test/MC/AArch64/neon-simd-copy.s
@@ -16,15 +16,15 @@
mov v20.s[0], w30
mov v1.d[1], x7
-// CHECK: ins v2.b[2], w1 // encoding: [0x22,0x1c,0x05,0x4e]
-// CHECK: ins v7.h[7], w14 // encoding: [0xc7,0x1d,0x1e,0x4e]
-// CHECK: ins v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
-// CHECK: ins v1.d[1], x7 // encoding: [0xe1,0x1c,0x18,0x4e]
+// CHECK: {{mov|ins}} v2.b[2], w1 // encoding: [0x22,0x1c,0x05,0x4e]
+// CHECK: {{mov|ins}} v7.h[7], w14 // encoding: [0xc7,0x1d,0x1e,0x4e]
+// CHECK: {{mov|ins}} v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
+// CHECK: {{mov|ins}} v1.d[1], x7 // encoding: [0xe1,0x1c,0x18,0x4e]
-// CHECK: ins v2.b[2], w1 // encoding: [0x22,0x1c,0x05,0x4e]
-// CHECK: ins v7.h[7], w14 // encoding: [0xc7,0x1d,0x1e,0x4e]
-// CHECK: ins v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
-// CHECK: ins v1.d[1], x7 // encoding: [0xe1,0x1c,0x18,0x4e]
+// CHECK: {{mov|ins}} v2.b[2], w1 // encoding: [0x22,0x1c,0x05,0x4e]
+// CHECK: {{mov|ins}} v7.h[7], w14 // encoding: [0xc7,0x1d,0x1e,0x4e]
+// CHECK: {{mov|ins}} v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
+// CHECK: {{mov|ins}} v1.d[1], x7 // encoding: [0xe1,0x1c,0x18,0x4e]
//------------------------------------------------------------------------------
@@ -54,13 +54,13 @@
mov w20, v9.s[2]
mov x7, v18.d[1]
-// CHECK: umov w1, v0.b[15] // encoding: [0x01,0x3c,0x1f,0x0e]
-// CHECK: umov w14, v6.h[4] // encoding: [0xce,0x3c,0x12,0x0e]
-// CHECK: umov w20, v9.s[2] // encoding: [0x34,0x3d,0x14,0x0e]
-// CHECK: umov x7, v18.d[1] // encoding: [0x47,0x3e,0x18,0x4e]
+// CHECK: {{mov|umov}} w1, v0.b[15] // encoding: [0x01,0x3c,0x1f,0x0e]
+// CHECK: {{mov|umov}} w14, v6.h[4] // encoding: [0xce,0x3c,0x12,0x0e]
+// CHECK: {{mov|umov}} w20, v9.s[2] // encoding: [0x34,0x3d,0x14,0x0e]
+// CHECK: {{mov|umov}} x7, v18.d[1] // encoding: [0x47,0x3e,0x18,0x4e]
-// CHECK: umov w20, v9.s[2] // encoding: [0x34,0x3d,0x14,0x0e]
-// CHECK: umov x7, v18.d[1] // encoding: [0x47,0x3e,0x18,0x4e]
+// CHECK: {{mov|umov}} w20, v9.s[2] // encoding: [0x34,0x3d,0x14,0x0e]
+// CHECK: {{mov|umov}} x7, v18.d[1] // encoding: [0x47,0x3e,0x18,0x4e]
//------------------------------------------------------------------------------
// Insert element (vector, from element)
@@ -76,15 +76,15 @@
mov v15.s[3], v22.s[2]
mov v0.d[0], v4.d[1]
-// CHECK: ins v1.b[14], v3.b[6] // encoding: [0x61,0x34,0x1d,0x6e]
-// CHECK: ins v6.h[7], v7.h[5] // encoding: [0xe6,0x54,0x1e,0x6e]
-// CHECK: ins v15.s[3], v22.s[2] // encoding: [0xcf,0x46,0x1c,0x6e]
-// CHECK: ins v0.d[0], v4.d[1] // encoding: [0x80,0x44,0x08,0x6e]
+// CHECK: {{mov|ins}} v1.b[14], v3.b[6] // encoding: [0x61,0x34,0x1d,0x6e]
+// CHECK: {{mov|ins}} v6.h[7], v7.h[5] // encoding: [0xe6,0x54,0x1e,0x6e]
+// CHECK: {{mov|ins}} v15.s[3], v22.s[2] // encoding: [0xcf,0x46,0x1c,0x6e]
+// CHECK: {{mov|ins}} v0.d[0], v4.d[1] // encoding: [0x80,0x44,0x08,0x6e]
-// CHECK: ins v1.b[14], v3.b[6] // encoding: [0x61,0x34,0x1d,0x6e]
-// CHECK: ins v6.h[7], v7.h[5] // encoding: [0xe6,0x54,0x1e,0x6e]
-// CHECK: ins v15.s[3], v22.s[2] // encoding: [0xcf,0x46,0x1c,0x6e]
-// CHECK: ins v0.d[0], v4.d[1] // encoding: [0x80,0x44,0x08,0x6e]
+// CHECK: {{mov|ins}} v1.b[14], v3.b[6] // encoding: [0x61,0x34,0x1d,0x6e]
+// CHECK: {{mov|ins}} v6.h[7], v7.h[5] // encoding: [0xe6,0x54,0x1e,0x6e]
+// CHECK: {{mov|ins}} v15.s[3], v22.s[2] // encoding: [0xcf,0x46,0x1c,0x6e]
+// CHECK: {{mov|ins}} v0.d[0], v4.d[1] // encoding: [0x80,0x44,0x08,0x6e]
//------------------------------------------------------------------------------
// Duplicate to all lanes( vector, from element)
@@ -97,13 +97,13 @@
dup v17.4s, v20.s[0]
dup v5.2d, v1.d[1]
-// CHECK: dup v1.8b, v2.b[2] // encoding: [0x41,0x04,0x05,0x0e]
-// CHECK: dup v11.4h, v7.h[7] // encoding: [0xeb,0x04,0x1e,0x0e]
-// CHECK: dup v17.2s, v20.s[0] // encoding: [0x91,0x06,0x04,0x0e]
-// CHECK: dup v1.16b, v2.b[2] // encoding: [0x41,0x04,0x05,0x4e]
-// CHECK: dup v11.8h, v7.h[7] // encoding: [0xeb,0x04,0x1e,0x4e]
-// CHECK: dup v17.4s, v20.s[0] // encoding: [0x91,0x06,0x04,0x4e]
-// CHECK: dup v5.2d, v1.d[1] // encoding: [0x25,0x04,0x18,0x4e]
+// CHECK: {{mov|dup}} v1.8b, v2.b[2] // encoding: [0x41,0x04,0x05,0x0e]
+// CHECK: {{mov|dup}} v11.4h, v7.h[7] // encoding: [0xeb,0x04,0x1e,0x0e]
+// CHECK: {{mov|dup}} v17.2s, v20.s[0] // encoding: [0x91,0x06,0x04,0x0e]
+// CHECK: {{mov|dup}} v1.16b, v2.b[2] // encoding: [0x41,0x04,0x05,0x4e]
+// CHECK: {{mov|dup}} v11.8h, v7.h[7] // encoding: [0xeb,0x04,0x1e,0x4e]
+// CHECK: {{mov|dup}} v17.4s, v20.s[0] // encoding: [0x91,0x06,0x04,0x4e]
+// CHECK: {{mov|dup}} v5.2d, v1.d[1] // encoding: [0x25,0x04,0x18,0x4e]
//------------------------------------------------------------------------------
// Duplicate to all lanes( vector, from main)
@@ -116,13 +116,13 @@
dup v17.4s, w28
dup v5.2d, x0
-// CHECK: dup v1.8b, w1 // encoding: [0x21,0x0c,0x01,0x0e]
-// CHECK: dup v11.4h, w14 // encoding: [0xcb,0x0d,0x02,0x0e]
-// CHECK: dup v17.2s, w30 // encoding: [0xd1,0x0f,0x04,0x0e]
-// CHECK: dup v1.16b, w2 // encoding: [0x41,0x0c,0x01,0x4e]
-// CHECK: dup v11.8h, w16 // encoding: [0x0b,0x0e,0x02,0x4e]
-// CHECK: dup v17.4s, w28 // encoding: [0x91,0x0f,0x04,0x4e]
-// CHECK: dup v5.2d, x0 // encoding: [0x05,0x0c,0x08,0x4e]
+// CHECK: {{mov|dup}} v1.8b, w1 // encoding: [0x21,0x0c,0x01,0x0e]
+// CHECK: {{mov|dup}} v11.4h, w14 // encoding: [0xcb,0x0d,0x02,0x0e]
+// CHECK: {{mov|dup}} v17.2s, w30 // encoding: [0xd1,0x0f,0x04,0x0e]
+// CHECK: {{mov|dup}} v1.16b, w2 // encoding: [0x41,0x0c,0x01,0x4e]
+// CHECK: {{mov|dup}} v11.8h, w16 // encoding: [0x0b,0x0e,0x02,0x4e]
+// CHECK: {{mov|dup}} v17.4s, w28 // encoding: [0x91,0x0f,0x04,0x4e]
+// CHECK: {{mov|dup}} v5.2d, x0 // encoding: [0x05,0x0c,0x08,0x4e]
diff --git a/test/MC/AArch64/neon-simd-ldst-multi-elem.s b/test/MC/AArch64/neon-simd-ldst-multi-elem.s
index 05fe4dac9138..b8b3e72ff777 100644
--- a/test/MC/AArch64/neon-simd-ldst-multi-elem.s
+++ b/test/MC/AArch64/neon-simd-ldst-multi-elem.s
@@ -1,463 +1,463 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
//------------------------------------------------------------------------------
// Store multiple 1-element structures from one register
//------------------------------------------------------------------------------
- st1 {v0.16b}, [x0]
- st1 {v15.8h}, [x15]
- st1 {v31.4s}, [sp]
- st1 {v0.2d}, [x0]
- st1 {v0.8b}, [x0]
- st1 {v15.4h}, [x15]
- st1 {v31.2s}, [sp]
- st1 {v0.1d}, [x0]
-// CHECK: st1 {v0.16b}, [x0] // encoding: [0x00,0x70,0x00,0x4c]
-// CHECK: st1 {v15.8h}, [x15] // encoding: [0xef,0x75,0x00,0x4c]
-// CHECK: st1 {v31.4s}, [sp] // encoding: [0xff,0x7b,0x00,0x4c]
-// CHECK: st1 {v0.2d}, [x0] // encoding: [0x00,0x7c,0x00,0x4c]
-// CHECK: st1 {v0.8b}, [x0] // encoding: [0x00,0x70,0x00,0x0c]
-// CHECK: st1 {v15.4h}, [x15] // encoding: [0xef,0x75,0x00,0x0c]
-// CHECK: st1 {v31.2s}, [sp] // encoding: [0xff,0x7b,0x00,0x0c]
-// CHECK: st1 {v0.1d}, [x0] // encoding: [0x00,0x7c,0x00,0x0c]
+ st1 { v0.16b }, [x0]
+ st1 { v15.8h }, [x15]
+ st1 { v31.4s }, [sp]
+ st1 { v0.2d }, [x0]
+ st1 { v0.8b }, [x0]
+ st1 { v15.4h }, [x15]
+ st1 { v31.2s }, [sp]
+ st1 { v0.1d }, [x0]
+// CHECK: st1 { v0.16b }, [x0] // encoding: [0x00,0x70,0x00,0x4c]
+// CHECK: st1 { v15.8h }, [x15] // encoding: [0xef,0x75,0x00,0x4c]
+// CHECK: st1 { v31.4s }, [sp] // encoding: [0xff,0x7b,0x00,0x4c]
+// CHECK: st1 { v0.2d }, [x0] // encoding: [0x00,0x7c,0x00,0x4c]
+// CHECK: st1 { v0.8b }, [x0] // encoding: [0x00,0x70,0x00,0x0c]
+// CHECK: st1 { v15.4h }, [x15] // encoding: [0xef,0x75,0x00,0x0c]
+// CHECK: st1 { v31.2s }, [sp] // encoding: [0xff,0x7b,0x00,0x0c]
+// CHECK: st1 { v0.1d }, [x0] // encoding: [0x00,0x7c,0x00,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from two consecutive registers
//------------------------------------------------------------------------------
- st1 {v0.16b, v1.16b}, [x0]
- st1 {v15.8h, v16.8h}, [x15]
- st1 {v31.4s, v0.4s}, [sp]
- st1 {v0.2d, v1.2d}, [x0]
- st1 {v0.8b, v1.8b}, [x0]
- st1 {v15.4h, v16.4h}, [x15]
- st1 {v31.2s, v0.2s}, [sp]
- st1 {v0.1d, v1.1d}, [x0]
-// CHECK: st1 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xa0,0x00,0x4c]
-// CHECK: st1 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xa5,0x00,0x4c]
-// CHECK: st1 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xab,0x00,0x4c]
-// CHECK: st1 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xac,0x00,0x4c]
-// CHECK: st1 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xa0,0x00,0x0c]
-// CHECK: st1 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xa5,0x00,0x0c]
-// CHECK: st1 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xab,0x00,0x0c]
-// CHECK: st1 {v0.1d, v1.1d}, [x0] // encoding: [0x00,0xac,0x00,0x0c]
+ st1 { v0.16b, v1.16b }, [x0]
+ st1 { v15.8h, v16.8h }, [x15]
+ st1 { v31.4s, v0.4s }, [sp]
+ st1 { v0.2d, v1.2d }, [x0]
+ st1 { v0.8b, v1.8b }, [x0]
+ st1 { v15.4h, v16.4h }, [x15]
+ st1 { v31.2s, v0.2s }, [sp]
+ st1 { v0.1d, v1.1d }, [x0]
+// CHECK: st1 { v0.16b, v1.16b }, [x0] // encoding: [0x00,0xa0,0x00,0x4c]
+// CHECK: st1 { v15.8h, v16.8h }, [x15] // encoding: [0xef,0xa5,0x00,0x4c]
+// CHECK: st1 { v31.4s, v0.4s }, [sp] // encoding: [0xff,0xab,0x00,0x4c]
+// CHECK: st1 { v0.2d, v1.2d }, [x0] // encoding: [0x00,0xac,0x00,0x4c]
+// CHECK: st1 { v0.8b, v1.8b }, [x0] // encoding: [0x00,0xa0,0x00,0x0c]
+// CHECK: st1 { v15.4h, v16.4h }, [x15] // encoding: [0xef,0xa5,0x00,0x0c]
+// CHECK: st1 { v31.2s, v0.2s }, [sp] // encoding: [0xff,0xab,0x00,0x0c]
+// CHECK: st1 { v0.1d, v1.1d }, [x0] // encoding: [0x00,0xac,0x00,0x0c]
- st1 {v0.16b-v1.16b}, [x0]
- st1 {v15.8h-v16.8h}, [x15]
- st1 {v31.4s-v0.4s}, [sp]
- st1 {v0.2d-v1.2d}, [x0]
- st1 {v0.8b-v1.8b}, [x0]
- st1 {v15.4h-v16.4h}, [x15]
- st1 {v31.2s-v0.2s}, [sp]
- st1 {v0.1d-v1.1d}, [x0]
-// CHECK: st1 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xa0,0x00,0x4c]
-// CHECK: st1 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xa5,0x00,0x4c]
-// CHECK: st1 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xab,0x00,0x4c]
-// CHECK: st1 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xac,0x00,0x4c]
-// CHECK: st1 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xa0,0x00,0x0c]
-// CHECK: st1 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xa5,0x00,0x0c]
-// CHECK: st1 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xab,0x00,0x0c]
-// CHECK: st1 {v0.1d, v1.1d}, [x0] // encoding: [0x00,0xac,0x00,0x0c]
+ st1 { v0.16b-v1.16b }, [x0]
+ st1 { v15.8h-v16.8h }, [x15]
+ st1 { v31.4s-v0.4s }, [sp]
+ st1 { v0.2d-v1.2d }, [x0]
+ st1 { v0.8b-v1.8b }, [x0]
+ st1 { v15.4h-v16.4h }, [x15]
+ st1 { v31.2s-v0.2s }, [sp]
+ st1 { v0.1d-v1.1d }, [x0]
+// CHECK: st1 { v0.16b, v1.16b }, [x0] // encoding: [0x00,0xa0,0x00,0x4c]
+// CHECK: st1 { v15.8h, v16.8h }, [x15] // encoding: [0xef,0xa5,0x00,0x4c]
+// CHECK: st1 { v31.4s, v0.4s }, [sp] // encoding: [0xff,0xab,0x00,0x4c]
+// CHECK: st1 { v0.2d, v1.2d }, [x0] // encoding: [0x00,0xac,0x00,0x4c]
+// CHECK: st1 { v0.8b, v1.8b }, [x0] // encoding: [0x00,0xa0,0x00,0x0c]
+// CHECK: st1 { v15.4h, v16.4h }, [x15] // encoding: [0xef,0xa5,0x00,0x0c]
+// CHECK: st1 { v31.2s, v0.2s }, [sp] // encoding: [0xff,0xab,0x00,0x0c]
+// CHECK: st1 { v0.1d, v1.1d }, [x0] // encoding: [0x00,0xac,0x00,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from three consecutive registers
//------------------------------------------------------------------------------
- st1 {v0.16b, v1.16b, v2.16b}, [x0]
- st1 {v15.8h, v16.8h, v17.8h}, [x15]
- st1 {v31.4s, v0.4s, v1.4s}, [sp]
- st1 {v0.2d, v1.2d, v2.2d}, [x0]
- st1 {v0.8b, v1.8b, v2.8b}, [x0]
- st1 {v15.4h, v16.4h, v17.4h}, [x15]
- st1 {v31.2s, v0.2s, v1.2s}, [sp]
- st1 {v0.1d, v1.1d, v2.1d}, [x0]
-// CHECK: st1 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x60,0x00,0x4c]
-// CHECK: st1 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x65,0x00,0x4c]
-// CHECK: st1 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x6b,0x00,0x4c]
-// CHECK: st1 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x6c,0x00,0x4c]
-// CHECK: st1 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x60,0x00,0x0c]
-// CHECK: st1 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x65,0x00,0x0c]
-// CHECK: st1 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x6b,0x00,0x0c]
-// CHECK: st1 {v0.1d, v1.1d, v2.1d}, [x0] // encoding: [0x00,0x6c,0x00,0x0c]
+ st1 { v0.16b, v1.16b, v2.16b }, [x0]
+ st1 { v15.8h, v16.8h, v17.8h }, [x15]
+ st1 { v31.4s, v0.4s, v1.4s }, [sp]
+ st1 { v0.2d, v1.2d, v2.2d }, [x0]
+ st1 { v0.8b, v1.8b, v2.8b }, [x0]
+ st1 { v15.4h, v16.4h, v17.4h }, [x15]
+ st1 { v31.2s, v0.2s, v1.2s }, [sp]
+ st1 { v0.1d, v1.1d, v2.1d }, [x0]
+// CHECK: st1 { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0x60,0x00,0x4c]
+// CHECK: st1 { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0x65,0x00,0x4c]
+// CHECK: st1 { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0x6b,0x00,0x4c]
+// CHECK: st1 { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0x6c,0x00,0x4c]
+// CHECK: st1 { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0x60,0x00,0x0c]
+// CHECK: st1 { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0x65,0x00,0x0c]
+// CHECK: st1 { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0x6b,0x00,0x0c]
+// CHECK: st1 { v0.1d, v1.1d, v2.1d }, [x0] // encoding: [0x00,0x6c,0x00,0x0c]
- st1 {v0.16b-v2.16b}, [x0]
- st1 {v15.8h-v17.8h}, [x15]
- st1 {v31.4s-v1.4s}, [sp]
- st1 {v0.2d-v2.2d}, [x0]
- st1 {v0.8b-v2.8b}, [x0]
- st1 {v15.4h-v17.4h}, [x15]
- st1 {v31.2s-v1.2s}, [sp]
- st1 {v0.1d-v2.1d}, [x0]
-// CHECK: st1 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x60,0x00,0x4c]
-// CHECK: st1 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x65,0x00,0x4c]
-// CHECK: st1 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x6b,0x00,0x4c]
-// CHECK: st1 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x6c,0x00,0x4c]
-// CHECK: st1 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x60,0x00,0x0c]
-// CHECK: st1 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x65,0x00,0x0c]
-// CHECK: st1 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x6b,0x00,0x0c]
-// CHECK: st1 {v0.1d, v1.1d, v2.1d}, [x0] // encoding: [0x00,0x6c,0x00,0x0c]
+ st1 { v0.16b-v2.16b }, [x0]
+ st1 { v15.8h-v17.8h }, [x15]
+ st1 { v31.4s-v1.4s }, [sp]
+ st1 { v0.2d-v2.2d }, [x0]
+ st1 { v0.8b-v2.8b }, [x0]
+ st1 { v15.4h-v17.4h }, [x15]
+ st1 { v31.2s-v1.2s }, [sp]
+ st1 { v0.1d-v2.1d }, [x0]
+// CHECK: st1 { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0x60,0x00,0x4c]
+// CHECK: st1 { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0x65,0x00,0x4c]
+// CHECK: st1 { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0x6b,0x00,0x4c]
+// CHECK: st1 { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0x6c,0x00,0x4c]
+// CHECK: st1 { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0x60,0x00,0x0c]
+// CHECK: st1 { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0x65,0x00,0x0c]
+// CHECK: st1 { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0x6b,0x00,0x0c]
+// CHECK: st1 { v0.1d, v1.1d, v2.1d }, [x0] // encoding: [0x00,0x6c,0x00,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from four consecutive registers
//------------------------------------------------------------------------------
- st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
- st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15]
- st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp]
- st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
- st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
- st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15]
- st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
- st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0]
-// CHECK: st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x20,0x00,0x4c]
-// CHECK: st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x25,0x00,0x4c]
-// CHECK: st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x2b,0x00,0x4c]
-// CHECK: st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x2c,0x00,0x4c]
-// CHECK: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x20,0x00,0x0c]
-// CHECK: st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x25,0x00,0x0c]
-// CHECK: st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x2b,0x00,0x0c]
-// CHECK: st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] // encoding: [0x00,0x2c,0x00,0x0c]
+ st1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0]
+ st1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15]
+ st1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp]
+ st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+ st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0]
+ st1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15]
+ st1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp]
+ st1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0]
+// CHECK: st1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0x20,0x00,0x4c]
+// CHECK: st1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x25,0x00,0x4c]
+// CHECK: st1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0x2b,0x00,0x4c]
+// CHECK: st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0x2c,0x00,0x4c]
+// CHECK: st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x20,0x00,0x0c]
+// CHECK: st1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x25,0x00,0x0c]
+// CHECK: st1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0x2b,0x00,0x0c]
+// CHECK: st1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0] // encoding: [0x00,0x2c,0x00,0x0c]
- st1 {v0.16b-v3.16b}, [x0]
- st1 {v15.8h-v18.8h}, [x15]
- st1 {v31.4s-v2.4s}, [sp]
- st1 {v0.2d-v3.2d}, [x0]
- st1 {v0.8b-v3.8b}, [x0]
- st1 {v15.4h-v18.4h}, [x15]
- st1 {v31.2s-v2.2s}, [sp]
- st1 {v0.1d-v3.1d}, [x0]
-// CHECK: st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x20,0x00,0x4c]
-// CHECK: st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x25,0x00,0x4c]
-// CHECK: st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x2b,0x00,0x4c]
-// CHECK: st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x2c,0x00,0x4c]
-// CHECK: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x20,0x00,0x0c]
-// CHECK: st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x25,0x00,0x0c]
-// CHECK: st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x2b,0x00,0x0c]
-// CHECK: st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] // encoding: [0x00,0x2c,0x00,0x0c]
+ st1 { v0.16b-v3.16b }, [x0]
+ st1 { v15.8h-v18.8h }, [x15]
+ st1 { v31.4s-v2.4s }, [sp]
+ st1 { v0.2d-v3.2d }, [x0]
+ st1 { v0.8b-v3.8b }, [x0]
+ st1 { v15.4h-v18.4h }, [x15]
+ st1 { v31.2s-v2.2s }, [sp]
+ st1 { v0.1d-v3.1d }, [x0]
+// CHECK: st1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0x20,0x00,0x4c]
+// CHECK: st1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x25,0x00,0x4c]
+// CHECK: st1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0x2b,0x00,0x4c]
+// CHECK: st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0x2c,0x00,0x4c]
+// CHECK: st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x20,0x00,0x0c]
+// CHECK: st1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x25,0x00,0x0c]
+// CHECK: st1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0x2b,0x00,0x0c]
+// CHECK: st1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0] // encoding: [0x00,0x2c,0x00,0x0c]
//------------------------------------------------------------------------------
// Store multiple 2-element structures from two consecutive registers
//------------------------------------------------------------------------------
- st2 {v0.16b, v1.16b}, [x0]
- st2 {v15.8h, v16.8h}, [x15]
- st2 {v31.4s, v0.4s}, [sp]
- st2 {v0.2d, v1.2d}, [x0]
- st2 {v0.8b, v1.8b}, [x0]
- st2 {v15.4h, v16.4h}, [x15]
- st2 {v31.2s, v0.2s}, [sp]
-// CHECK: st2 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0x80,0x00,0x4c]
-// CHECK: st2 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0x85,0x00,0x4c]
-// CHECK: st2 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0x8b,0x00,0x4c]
-// CHECK: st2 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0x8c,0x00,0x4c]
-// CHECK: st2 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0x80,0x00,0x0c]
-// CHECK: st2 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0x85,0x00,0x0c]
-// CHECK: st2 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0x8b,0x00,0x0c]
+ st2 { v0.16b, v1.16b }, [x0]
+ st2 { v15.8h, v16.8h }, [x15]
+ st2 { v31.4s, v0.4s }, [sp]
+ st2 { v0.2d, v1.2d }, [x0]
+ st2 { v0.8b, v1.8b }, [x0]
+ st2 { v15.4h, v16.4h }, [x15]
+ st2 { v31.2s, v0.2s }, [sp]
+// CHECK: st2 { v0.16b, v1.16b }, [x0] // encoding: [0x00,0x80,0x00,0x4c]
+// CHECK: st2 { v15.8h, v16.8h }, [x15] // encoding: [0xef,0x85,0x00,0x4c]
+// CHECK: st2 { v31.4s, v0.4s }, [sp] // encoding: [0xff,0x8b,0x00,0x4c]
+// CHECK: st2 { v0.2d, v1.2d }, [x0] // encoding: [0x00,0x8c,0x00,0x4c]
+// CHECK: st2 { v0.8b, v1.8b }, [x0] // encoding: [0x00,0x80,0x00,0x0c]
+// CHECK: st2 { v15.4h, v16.4h }, [x15] // encoding: [0xef,0x85,0x00,0x0c]
+// CHECK: st2 { v31.2s, v0.2s }, [sp] // encoding: [0xff,0x8b,0x00,0x0c]
- st2 {v0.16b-v1.16b}, [x0]
- st2 {v15.8h-v16.8h}, [x15]
- st2 {v31.4s-v0.4s}, [sp]
- st2 {v0.2d-v1.2d}, [x0]
- st2 {v0.8b-v1.8b}, [x0]
- st2 {v15.4h-v16.4h}, [x15]
- st2 {v31.2s-v0.2s}, [sp]
-// CHECK: st2 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0x80,0x00,0x4c]
-// CHECK: st2 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0x85,0x00,0x4c]
-// CHECK: st2 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0x8b,0x00,0x4c]
-// CHECK: st2 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0x8c,0x00,0x4c]
-// CHECK: st2 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0x80,0x00,0x0c]
-// CHECK: st2 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0x85,0x00,0x0c]
-// CHECK: st2 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0x8b,0x00,0x0c]
+ st2 { v0.16b-v1.16b }, [x0]
+ st2 { v15.8h-v16.8h }, [x15]
+ st2 { v31.4s-v0.4s }, [sp]
+ st2 { v0.2d-v1.2d }, [x0]
+ st2 { v0.8b-v1.8b }, [x0]
+ st2 { v15.4h-v16.4h }, [x15]
+ st2 { v31.2s-v0.2s }, [sp]
+// CHECK: st2 { v0.16b, v1.16b }, [x0] // encoding: [0x00,0x80,0x00,0x4c]
+// CHECK: st2 { v15.8h, v16.8h }, [x15] // encoding: [0xef,0x85,0x00,0x4c]
+// CHECK: st2 { v31.4s, v0.4s }, [sp] // encoding: [0xff,0x8b,0x00,0x4c]
+// CHECK: st2 { v0.2d, v1.2d }, [x0] // encoding: [0x00,0x8c,0x00,0x4c]
+// CHECK: st2 { v0.8b, v1.8b }, [x0] // encoding: [0x00,0x80,0x00,0x0c]
+// CHECK: st2 { v15.4h, v16.4h }, [x15] // encoding: [0xef,0x85,0x00,0x0c]
+// CHECK: st2 { v31.2s, v0.2s }, [sp] // encoding: [0xff,0x8b,0x00,0x0c]
//------------------------------------------------------------------------------
// Store multiple 3-element structures from three consecutive registers
//------------------------------------------------------------------------------
- st3 {v0.16b, v1.16b, v2.16b}, [x0]
- st3 {v15.8h, v16.8h, v17.8h}, [x15]
- st3 {v31.4s, v0.4s, v1.4s}, [sp]
- st3 {v0.2d, v1.2d, v2.2d}, [x0]
- st3 {v0.8b, v1.8b, v2.8b}, [x0]
- st3 {v15.4h, v16.4h, v17.4h}, [x15]
- st3 {v31.2s, v0.2s, v1.2s}, [sp]
-// CHECK: st3 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x40,0x00,0x4c]
-// CHECK: st3 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x45,0x00,0x4c]
-// CHECK: st3 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x4b,0x00,0x4c]
-// CHECK: st3 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x4c,0x00,0x4c]
-// CHECK: st3 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x40,0x00,0x0c]
-// CHECK: st3 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x45,0x00,0x0c]
-// CHECK: st3 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x4b,0x00,0x0c]
+ st3 { v0.16b, v1.16b, v2.16b }, [x0]
+ st3 { v15.8h, v16.8h, v17.8h }, [x15]
+ st3 { v31.4s, v0.4s, v1.4s }, [sp]
+ st3 { v0.2d, v1.2d, v2.2d }, [x0]
+ st3 { v0.8b, v1.8b, v2.8b }, [x0]
+ st3 { v15.4h, v16.4h, v17.4h }, [x15]
+ st3 { v31.2s, v0.2s, v1.2s }, [sp]
+// CHECK: st3 { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0x40,0x00,0x4c]
+// CHECK: st3 { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0x45,0x00,0x4c]
+// CHECK: st3 { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0x4b,0x00,0x4c]
+// CHECK: st3 { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0x4c,0x00,0x4c]
+// CHECK: st3 { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0x40,0x00,0x0c]
+// CHECK: st3 { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0x45,0x00,0x0c]
+// CHECK: st3 { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0x4b,0x00,0x0c]
- st3 {v0.16b-v2.16b}, [x0]
- st3 {v15.8h-v17.8h}, [x15]
- st3 {v31.4s-v1.4s}, [sp]
- st3 {v0.2d-v2.2d}, [x0]
- st3 {v0.8b-v2.8b}, [x0]
- st3 {v15.4h-v17.4h}, [x15]
- st3 {v31.2s-v1.2s}, [sp]
-// CHECK: st3 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x40,0x00,0x4c]
-// CHECK: st3 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x45,0x00,0x4c]
-// CHECK: st3 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x4b,0x00,0x4c]
-// CHECK: st3 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x4c,0x00,0x4c]
-// CHECK: st3 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x40,0x00,0x0c]
-// CHECK: st3 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x45,0x00,0x0c]
-// CHECK: st3 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x4b,0x00,0x0c]
+ st3 { v0.16b-v2.16b }, [x0]
+ st3 { v15.8h-v17.8h }, [x15]
+ st3 { v31.4s-v1.4s }, [sp]
+ st3 { v0.2d-v2.2d }, [x0]
+ st3 { v0.8b-v2.8b }, [x0]
+ st3 { v15.4h-v17.4h }, [x15]
+ st3 { v31.2s-v1.2s }, [sp]
+// CHECK: st3 { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0x40,0x00,0x4c]
+// CHECK: st3 { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0x45,0x00,0x4c]
+// CHECK: st3 { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0x4b,0x00,0x4c]
+// CHECK: st3 { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0x4c,0x00,0x4c]
+// CHECK: st3 { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0x40,0x00,0x0c]
+// CHECK: st3 { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0x45,0x00,0x0c]
+// CHECK: st3 { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0x4b,0x00,0x0c]
//------------------------------------------------------------------------------
// Store multiple 4-element structures from four consecutive registers
//------------------------------------------------------------------------------
- st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
- st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15]
- st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp]
- st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
- st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
- st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15]
- st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
-// CHECK: st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x00,0x00,0x4c]
-// CHECK: st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x05,0x00,0x4c]
-// CHECK: st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x0b,0x00,0x4c]
-// CHECK: st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x0c,0x00,0x4c]
-// CHECK: st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x00,0x00,0x0c]
-// CHECK: st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x05,0x00,0x0c]
-// CHECK: st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x0b,0x00,0x0c]
+ st4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0]
+ st4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15]
+ st4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp]
+ st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+ st4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0]
+ st4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15]
+ st4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp]
+// CHECK: st4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0x00,0x00,0x4c]
+// CHECK: st4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x05,0x00,0x4c]
+// CHECK: st4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0x0b,0x00,0x4c]
+// CHECK: st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0x0c,0x00,0x4c]
+// CHECK: st4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x00,0x00,0x0c]
+// CHECK: st4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x05,0x00,0x0c]
+// CHECK: st4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0x0b,0x00,0x0c]
- st4 {v0.16b-v3.16b}, [x0]
- st4 {v15.8h-v18.8h}, [x15]
- st4 {v31.4s-v2.4s}, [sp]
- st4 {v0.2d-v3.2d}, [x0]
- st4 {v0.8b-v3.8b}, [x0]
- st4 {v15.4h-v18.4h}, [x15]
- st4 {v31.2s-v2.2s}, [sp]
-// CHECK: st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x00,0x00,0x4c]
-// CHECK: st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x05,0x00,0x4c]
-// CHECK: st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x0b,0x00,0x4c]
-// CHECK: st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x0c,0x00,0x4c]
-// CHECK: st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x00,0x00,0x0c]
-// CHECK: st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x05,0x00,0x0c]
-// CHECK: st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x0b,0x00,0x0c]
+ st4 { v0.16b-v3.16b }, [x0]
+ st4 { v15.8h-v18.8h }, [x15]
+ st4 { v31.4s-v2.4s }, [sp]
+ st4 { v0.2d-v3.2d }, [x0]
+ st4 { v0.8b-v3.8b }, [x0]
+ st4 { v15.4h-v18.4h }, [x15]
+ st4 { v31.2s-v2.2s }, [sp]
+// CHECK: st4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0x00,0x00,0x4c]
+// CHECK: st4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x05,0x00,0x4c]
+// CHECK: st4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0x0b,0x00,0x4c]
+// CHECK: st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0x0c,0x00,0x4c]
+// CHECK: st4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x00,0x00,0x0c]
+// CHECK: st4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x05,0x00,0x0c]
+// CHECK: st4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0x0b,0x00,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures to one register
//------------------------------------------------------------------------------
- ld1 {v0.16b}, [x0]
- ld1 {v15.8h}, [x15]
- ld1 {v31.4s}, [sp]
- ld1 {v0.2d}, [x0]
- ld1 {v0.8b}, [x0]
- ld1 {v15.4h}, [x15]
- ld1 {v31.2s}, [sp]
- ld1 {v0.1d}, [x0]
-// CHECK: ld1 {v0.16b}, [x0] // encoding: [0x00,0x70,0x40,0x4c]
-// CHECK: ld1 {v15.8h}, [x15] // encoding: [0xef,0x75,0x40,0x4c]
-// CHECK: ld1 {v31.4s}, [sp] // encoding: [0xff,0x7b,0x40,0x4c]
-// CHECK: ld1 {v0.2d}, [x0] // encoding: [0x00,0x7c,0x40,0x4c]
-// CHECK: ld1 {v0.8b}, [x0] // encoding: [0x00,0x70,0x40,0x0c]
-// CHECK: ld1 {v15.4h}, [x15] // encoding: [0xef,0x75,0x40,0x0c]
-// CHECK: ld1 {v31.2s}, [sp] // encoding: [0xff,0x7b,0x40,0x0c]
-// CHECK: ld1 {v0.1d}, [x0] // encoding: [0x00,0x7c,0x40,0x0c]
+ ld1 { v0.16b }, [x0]
+ ld1 { v15.8h }, [x15]
+ ld1 { v31.4s }, [sp]
+ ld1 { v0.2d }, [x0]
+ ld1 { v0.8b }, [x0]
+ ld1 { v15.4h }, [x15]
+ ld1 { v31.2s }, [sp]
+ ld1 { v0.1d }, [x0]
+// CHECK: ld1 { v0.16b }, [x0] // encoding: [0x00,0x70,0x40,0x4c]
+// CHECK: ld1 { v15.8h }, [x15] // encoding: [0xef,0x75,0x40,0x4c]
+// CHECK: ld1 { v31.4s }, [sp] // encoding: [0xff,0x7b,0x40,0x4c]
+// CHECK: ld1 { v0.2d }, [x0] // encoding: [0x00,0x7c,0x40,0x4c]
+// CHECK: ld1 { v0.8b }, [x0] // encoding: [0x00,0x70,0x40,0x0c]
+// CHECK: ld1 { v15.4h }, [x15] // encoding: [0xef,0x75,0x40,0x0c]
+// CHECK: ld1 { v31.2s }, [sp] // encoding: [0xff,0x7b,0x40,0x0c]
+// CHECK: ld1 { v0.1d }, [x0] // encoding: [0x00,0x7c,0x40,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures to two consecutive registers
//------------------------------------------------------------------------------
- ld1 {v0.16b, v1.16b}, [x0]
- ld1 {v15.8h, v16.8h}, [x15]
- ld1 {v31.4s, v0.4s}, [sp]
- ld1 {v0.2d, v1.2d}, [x0]
- ld1 {v0.8b, v1.8b}, [x0]
- ld1 {v15.4h, v16.4h}, [x15]
- ld1 {v31.2s, v0.2s}, [sp]
- ld1 {v0.1d, v1.1d}, [x0]
-// CHECK: ld1 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xa0,0x40,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xa5,0x40,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xab,0x40,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xac,0x40,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xa0,0x40,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xa5,0x40,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xab,0x40,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d}, [x0] // encoding: [0x00,0xac,0x40,0x0c]
+ ld1 { v0.16b, v1.16b }, [x0]
+ ld1 { v15.8h, v16.8h }, [x15]
+ ld1 { v31.4s, v0.4s }, [sp]
+ ld1 { v0.2d, v1.2d }, [x0]
+ ld1 { v0.8b, v1.8b }, [x0]
+ ld1 { v15.4h, v16.4h }, [x15]
+ ld1 { v31.2s, v0.2s }, [sp]
+ ld1 { v0.1d, v1.1d }, [x0]
+// CHECK: ld1 { v0.16b, v1.16b }, [x0] // encoding: [0x00,0xa0,0x40,0x4c]
+// CHECK: ld1 { v15.8h, v16.8h }, [x15] // encoding: [0xef,0xa5,0x40,0x4c]
+// CHECK: ld1 { v31.4s, v0.4s }, [sp] // encoding: [0xff,0xab,0x40,0x4c]
+// CHECK: ld1 { v0.2d, v1.2d }, [x0] // encoding: [0x00,0xac,0x40,0x4c]
+// CHECK: ld1 { v0.8b, v1.8b }, [x0] // encoding: [0x00,0xa0,0x40,0x0c]
+// CHECK: ld1 { v15.4h, v16.4h }, [x15] // encoding: [0xef,0xa5,0x40,0x0c]
+// CHECK: ld1 { v31.2s, v0.2s }, [sp] // encoding: [0xff,0xab,0x40,0x0c]
+// CHECK: ld1 { v0.1d, v1.1d }, [x0] // encoding: [0x00,0xac,0x40,0x0c]
- ld1 {v0.16b-v1.16b}, [x0]
- ld1 {v15.8h-v16.8h}, [x15]
- ld1 {v31.4s-v0.4s}, [sp]
- ld1 {v0.2d-v1.2d}, [x0]
- ld1 {v0.8b-v1.8b}, [x0]
- ld1 {v15.4h-v16.4h}, [x15]
- ld1 {v31.2s-v0.2s}, [sp]
- ld1 {v0.1d-v1.1d}, [x0]
-// CHECK: ld1 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xa0,0x40,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xa5,0x40,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xab,0x40,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xac,0x40,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xa0,0x40,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xa5,0x40,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xab,0x40,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d}, [x0] // encoding: [0x00,0xac,0x40,0x0c]
+ ld1 { v0.16b-v1.16b }, [x0]
+ ld1 { v15.8h-v16.8h }, [x15]
+ ld1 { v31.4s-v0.4s }, [sp]
+ ld1 { v0.2d-v1.2d }, [x0]
+ ld1 { v0.8b-v1.8b }, [x0]
+ ld1 { v15.4h-v16.4h }, [x15]
+ ld1 { v31.2s-v0.2s }, [sp]
+ ld1 { v0.1d-v1.1d }, [x0]
+// CHECK: ld1 { v0.16b, v1.16b }, [x0] // encoding: [0x00,0xa0,0x40,0x4c]
+// CHECK: ld1 { v15.8h, v16.8h }, [x15] // encoding: [0xef,0xa5,0x40,0x4c]
+// CHECK: ld1 { v31.4s, v0.4s }, [sp] // encoding: [0xff,0xab,0x40,0x4c]
+// CHECK: ld1 { v0.2d, v1.2d }, [x0] // encoding: [0x00,0xac,0x40,0x4c]
+// CHECK: ld1 { v0.8b, v1.8b }, [x0] // encoding: [0x00,0xa0,0x40,0x0c]
+// CHECK: ld1 { v15.4h, v16.4h }, [x15] // encoding: [0xef,0xa5,0x40,0x0c]
+// CHECK: ld1 { v31.2s, v0.2s }, [sp] // encoding: [0xff,0xab,0x40,0x0c]
+// CHECK: ld1 { v0.1d, v1.1d }, [x0] // encoding: [0x00,0xac,0x40,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures to three consecutive registers
//------------------------------------------------------------------------------
- ld1 {v0.16b, v1.16b, v2.16b}, [x0]
- ld1 {v15.8h, v16.8h, v17.8h}, [x15]
- ld1 {v31.4s, v0.4s, v1.4s}, [sp]
- ld1 {v0.2d, v1.2d, v2.2d}, [x0]
- ld1 {v0.8b, v1.8b, v2.8b}, [x0]
- ld1 {v15.4h, v16.4h, v17.4h}, [x15]
- ld1 {v31.2s, v0.2s, v1.2s}, [sp]
- ld1 {v0.1d, v1.1d, v2.1d}, [x0]
-// CHECK: ld1 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x60,0x40,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x65,0x40,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x6b,0x40,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x6c,0x40,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x60,0x40,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x65,0x40,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x6b,0x40,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d, v2.1d}, [x0] // encoding: [0x00,0x6c,0x40,0x0c]
+ ld1 { v0.16b, v1.16b, v2.16b }, [x0]
+ ld1 { v15.8h, v16.8h, v17.8h }, [x15]
+ ld1 { v31.4s, v0.4s, v1.4s }, [sp]
+ ld1 { v0.2d, v1.2d, v2.2d }, [x0]
+ ld1 { v0.8b, v1.8b, v2.8b }, [x0]
+ ld1 { v15.4h, v16.4h, v17.4h }, [x15]
+ ld1 { v31.2s, v0.2s, v1.2s }, [sp]
+ ld1 { v0.1d, v1.1d, v2.1d }, [x0]
+// CHECK: ld1 { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0x60,0x40,0x4c]
+// CHECK: ld1 { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0x65,0x40,0x4c]
+// CHECK: ld1 { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0x6b,0x40,0x4c]
+// CHECK: ld1 { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0x6c,0x40,0x4c]
+// CHECK: ld1 { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0x60,0x40,0x0c]
+// CHECK: ld1 { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0x65,0x40,0x0c]
+// CHECK: ld1 { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0x6b,0x40,0x0c]
+// CHECK: ld1 { v0.1d, v1.1d, v2.1d }, [x0] // encoding: [0x00,0x6c,0x40,0x0c]
- ld1 {v0.16b-v2.16b}, [x0]
- ld1 {v15.8h-v17.8h}, [x15]
- ld1 {v31.4s-v1.4s}, [sp]
- ld1 {v0.2d-v2.2d}, [x0]
- ld1 {v0.8b-v2.8b}, [x0]
- ld1 {v15.4h-v17.4h}, [x15]
- ld1 {v31.2s-v1.2s}, [sp]
- ld1 {v0.1d-v2.1d}, [x0]
-// CHECK: ld1 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x60,0x40,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x65,0x40,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x6b,0x40,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x6c,0x40,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x60,0x40,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x65,0x40,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x6b,0x40,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d, v2.1d}, [x0] // encoding: [0x00,0x6c,0x40,0x0c]
+ ld1 { v0.16b-v2.16b }, [x0]
+ ld1 { v15.8h-v17.8h }, [x15]
+ ld1 { v31.4s-v1.4s }, [sp]
+ ld1 { v0.2d-v2.2d }, [x0]
+ ld1 { v0.8b-v2.8b }, [x0]
+ ld1 { v15.4h-v17.4h }, [x15]
+ ld1 { v31.2s-v1.2s }, [sp]
+ ld1 { v0.1d-v2.1d }, [x0]
+// CHECK: ld1 { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0x60,0x40,0x4c]
+// CHECK: ld1 { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0x65,0x40,0x4c]
+// CHECK: ld1 { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0x6b,0x40,0x4c]
+// CHECK: ld1 { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0x6c,0x40,0x4c]
+// CHECK: ld1 { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0x60,0x40,0x0c]
+// CHECK: ld1 { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0x65,0x40,0x0c]
+// CHECK: ld1 { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0x6b,0x40,0x0c]
+// CHECK: ld1 { v0.1d, v1.1d, v2.1d }, [x0] // encoding: [0x00,0x6c,0x40,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures to four consecutive registers
//------------------------------------------------------------------------------
- ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
- ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15]
- ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp]
- ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
- ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
- ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15]
- ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
- ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0]
-// CHECK: ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x20,0x40,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x25,0x40,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x2b,0x40,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x2c,0x40,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x20,0x40,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x25,0x40,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x2b,0x40,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] // encoding: [0x00,0x2c,0x40,0x0c]
+ ld1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0]
+ ld1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15]
+ ld1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp]
+ ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+ ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0]
+ ld1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15]
+ ld1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp]
+ ld1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0]
+// CHECK: ld1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0x20,0x40,0x4c]
+// CHECK: ld1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x25,0x40,0x4c]
+// CHECK: ld1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0x2b,0x40,0x4c]
+// CHECK: ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0x2c,0x40,0x4c]
+// CHECK: ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x20,0x40,0x0c]
+// CHECK: ld1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x25,0x40,0x0c]
+// CHECK: ld1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0x2b,0x40,0x0c]
+// CHECK: ld1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0] // encoding: [0x00,0x2c,0x40,0x0c]
- ld1 {v0.16b-v3.16b}, [x0]
- ld1 {v15.8h-v18.8h}, [x15]
- ld1 {v31.4s-v2.4s}, [sp]
- ld1 {v0.2d-v3.2d}, [x0]
- ld1 {v0.8b-v3.8b}, [x0]
- ld1 {v15.4h-v18.4h}, [x15]
- ld1 {v31.2s-v2.2s}, [sp]
- ld1 {v0.1d-v3.1d}, [x0]
-// CHECK: ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x20,0x40,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x25,0x40,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x2b,0x40,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x2c,0x40,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x20,0x40,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x25,0x40,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x2b,0x40,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0] // encoding: [0x00,0x2c,0x40,0x0c]
+ ld1 { v0.16b-v3.16b }, [x0]
+ ld1 { v15.8h-v18.8h }, [x15]
+ ld1 { v31.4s-v2.4s }, [sp]
+ ld1 { v0.2d-v3.2d }, [x0]
+ ld1 { v0.8b-v3.8b }, [x0]
+ ld1 { v15.4h-v18.4h }, [x15]
+ ld1 { v31.2s-v2.2s }, [sp]
+ ld1 { v0.1d-v3.1d }, [x0]
+// CHECK: ld1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0x20,0x40,0x4c]
+// CHECK: ld1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x25,0x40,0x4c]
+// CHECK: ld1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0x2b,0x40,0x4c]
+// CHECK: ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0x2c,0x40,0x4c]
+// CHECK: ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x20,0x40,0x0c]
+// CHECK: ld1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x25,0x40,0x0c]
+// CHECK: ld1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0x2b,0x40,0x0c]
+// CHECK: ld1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0] // encoding: [0x00,0x2c,0x40,0x0c]
//------------------------------------------------------------------------------
// Load multiple 4-element structures to two consecutive registers
//------------------------------------------------------------------------------
- ld2 {v0.16b, v1.16b}, [x0]
- ld2 {v15.8h, v16.8h}, [x15]
- ld2 {v31.4s, v0.4s}, [sp]
- ld2 {v0.2d, v1.2d}, [x0]
- ld2 {v0.8b, v1.8b}, [x0]
- ld2 {v15.4h, v16.4h}, [x15]
- ld2 {v31.2s, v0.2s}, [sp]
-// CHECK: ld2 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0x80,0x40,0x4c]
-// CHECK: ld2 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0x85,0x40,0x4c]
-// CHECK: ld2 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0x8b,0x40,0x4c]
-// CHECK: ld2 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0x8c,0x40,0x4c]
-// CHECK: ld2 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0x80,0x40,0x0c]
-// CHECK: ld2 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0x85,0x40,0x0c]
-// CHECK: ld2 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0x8b,0x40,0x0c]
+ ld2 { v0.16b, v1.16b }, [x0]
+ ld2 { v15.8h, v16.8h }, [x15]
+ ld2 { v31.4s, v0.4s }, [sp]
+ ld2 { v0.2d, v1.2d }, [x0]
+ ld2 { v0.8b, v1.8b }, [x0]
+ ld2 { v15.4h, v16.4h }, [x15]
+ ld2 { v31.2s, v0.2s }, [sp]
+// CHECK: ld2 { v0.16b, v1.16b }, [x0] // encoding: [0x00,0x80,0x40,0x4c]
+// CHECK: ld2 { v15.8h, v16.8h }, [x15] // encoding: [0xef,0x85,0x40,0x4c]
+// CHECK: ld2 { v31.4s, v0.4s }, [sp] // encoding: [0xff,0x8b,0x40,0x4c]
+// CHECK: ld2 { v0.2d, v1.2d }, [x0] // encoding: [0x00,0x8c,0x40,0x4c]
+// CHECK: ld2 { v0.8b, v1.8b }, [x0] // encoding: [0x00,0x80,0x40,0x0c]
+// CHECK: ld2 { v15.4h, v16.4h }, [x15] // encoding: [0xef,0x85,0x40,0x0c]
+// CHECK: ld2 { v31.2s, v0.2s }, [sp] // encoding: [0xff,0x8b,0x40,0x0c]
- ld2 {v0.16b-v1.16b}, [x0]
- ld2 {v15.8h-v16.8h}, [x15]
- ld2 {v31.4s-v0.4s}, [sp]
- ld2 {v0.2d-v1.2d}, [x0]
- ld2 {v0.8b-v1.8b}, [x0]
- ld2 {v15.4h-v16.4h}, [x15]
- ld2 {v31.2s-v0.2s}, [sp]
-// CHECK: ld2 {v0.16b, v1.16b}, [x0] // encoding: [0x00,0x80,0x40,0x4c]
-// CHECK: ld2 {v15.8h, v16.8h}, [x15] // encoding: [0xef,0x85,0x40,0x4c]
-// CHECK: ld2 {v31.4s, v0.4s}, [sp] // encoding: [0xff,0x8b,0x40,0x4c]
-// CHECK: ld2 {v0.2d, v1.2d}, [x0] // encoding: [0x00,0x8c,0x40,0x4c]
-// CHECK: ld2 {v0.8b, v1.8b}, [x0] // encoding: [0x00,0x80,0x40,0x0c]
-// CHECK: ld2 {v15.4h, v16.4h}, [x15] // encoding: [0xef,0x85,0x40,0x0c]
-// CHECK: ld2 {v31.2s, v0.2s}, [sp] // encoding: [0xff,0x8b,0x40,0x0c]
+ ld2 { v0.16b-v1.16b }, [x0]
+ ld2 { v15.8h-v16.8h }, [x15]
+ ld2 { v31.4s-v0.4s }, [sp]
+ ld2 { v0.2d-v1.2d }, [x0]
+ ld2 { v0.8b-v1.8b }, [x0]
+ ld2 { v15.4h-v16.4h }, [x15]
+ ld2 { v31.2s-v0.2s }, [sp]
+// CHECK: ld2 { v0.16b, v1.16b }, [x0] // encoding: [0x00,0x80,0x40,0x4c]
+// CHECK: ld2 { v15.8h, v16.8h }, [x15] // encoding: [0xef,0x85,0x40,0x4c]
+// CHECK: ld2 { v31.4s, v0.4s }, [sp] // encoding: [0xff,0x8b,0x40,0x4c]
+// CHECK: ld2 { v0.2d, v1.2d }, [x0] // encoding: [0x00,0x8c,0x40,0x4c]
+// CHECK: ld2 { v0.8b, v1.8b }, [x0] // encoding: [0x00,0x80,0x40,0x0c]
+// CHECK: ld2 { v15.4h, v16.4h }, [x15] // encoding: [0xef,0x85,0x40,0x0c]
+// CHECK: ld2 { v31.2s, v0.2s }, [sp] // encoding: [0xff,0x8b,0x40,0x0c]
//------------------------------------------------------------------------------
// Load multiple 3-element structures to three consecutive registers
//------------------------------------------------------------------------------
- ld3 {v0.16b, v1.16b, v2.16b}, [x0]
- ld3 {v15.8h, v16.8h, v17.8h}, [x15]
- ld3 {v31.4s, v0.4s, v1.4s}, [sp]
- ld3 {v0.2d, v1.2d, v2.2d}, [x0]
- ld3 {v0.8b, v1.8b, v2.8b}, [x0]
- ld3 {v15.4h, v16.4h, v17.4h}, [x15]
- ld3 {v31.2s, v0.2s, v1.2s}, [sp]
-// CHECK: ld3 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x40,0x40,0x4c]
-// CHECK: ld3 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x45,0x40,0x4c]
-// CHECK: ld3 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x4b,0x40,0x4c]
-// CHECK: ld3 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x4c,0x40,0x4c]
-// CHECK: ld3 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x40,0x40,0x0c]
-// CHECK: ld3 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x45,0x40,0x0c]
-// CHECK: ld3 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x4b,0x40,0x0c]
+ ld3 { v0.16b, v1.16b, v2.16b }, [x0]
+ ld3 { v15.8h, v16.8h, v17.8h }, [x15]
+ ld3 { v31.4s, v0.4s, v1.4s }, [sp]
+ ld3 { v0.2d, v1.2d, v2.2d }, [x0]
+ ld3 { v0.8b, v1.8b, v2.8b }, [x0]
+ ld3 { v15.4h, v16.4h, v17.4h }, [x15]
+ ld3 { v31.2s, v0.2s, v1.2s }, [sp]
+// CHECK: ld3 { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0x40,0x40,0x4c]
+// CHECK: ld3 { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0x45,0x40,0x4c]
+// CHECK: ld3 { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0x4b,0x40,0x4c]
+// CHECK: ld3 { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0x4c,0x40,0x4c]
+// CHECK: ld3 { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0x40,0x40,0x0c]
+// CHECK: ld3 { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0x45,0x40,0x0c]
+// CHECK: ld3 { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0x4b,0x40,0x0c]
- ld3 {v0.16b-v2.16b}, [x0]
- ld3 {v15.8h-v17.8h}, [x15]
- ld3 {v31.4s-v1.4s}, [sp]
- ld3 {v0.2d-v2.2d}, [x0]
- ld3 {v0.8b-v2.8b}, [x0]
- ld3 {v15.4h-v17.4h}, [x15]
- ld3 {v31.2s-v1.2s}, [sp]
-// CHECK: ld3 {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0x40,0x40,0x4c]
-// CHECK: ld3 {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0x45,0x40,0x4c]
-// CHECK: ld3 {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0x4b,0x40,0x4c]
-// CHECK: ld3 {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0x4c,0x40,0x4c]
-// CHECK: ld3 {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0x40,0x40,0x0c]
-// CHECK: ld3 {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0x45,0x40,0x0c]
-// CHECK: ld3 {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0x4b,0x40,0x0c]
+ ld3 { v0.16b-v2.16b }, [x0]
+ ld3 { v15.8h-v17.8h }, [x15]
+ ld3 { v31.4s-v1.4s }, [sp]
+ ld3 { v0.2d-v2.2d }, [x0]
+ ld3 { v0.8b-v2.8b }, [x0]
+ ld3 { v15.4h-v17.4h }, [x15]
+ ld3 { v31.2s-v1.2s }, [sp]
+// CHECK: ld3 { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0x40,0x40,0x4c]
+// CHECK: ld3 { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0x45,0x40,0x4c]
+// CHECK: ld3 { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0x4b,0x40,0x4c]
+// CHECK: ld3 { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0x4c,0x40,0x4c]
+// CHECK: ld3 { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0x40,0x40,0x0c]
+// CHECK: ld3 { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0x45,0x40,0x0c]
+// CHECK: ld3 { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0x4b,0x40,0x0c]
//------------------------------------------------------------------------------
// Load multiple 4-element structures to four consecutive registers
//------------------------------------------------------------------------------
- ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
- ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15]
- ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp]
- ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
- ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
- ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15]
- ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
-// CHECK: ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x00,0x40,0x4c]
-// CHECK: ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x05,0x40,0x4c]
-// CHECK: ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x0b,0x40,0x4c]
-// CHECK: ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x0c,0x40,0x4c]
-// CHECK: ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x00,0x40,0x0c]
-// CHECK: ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x05,0x40,0x0c]
-// CHECK: ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x0b,0x40,0x0c]
+ ld4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0]
+ ld4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15]
+ ld4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp]
+ ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+ ld4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0]
+ ld4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15]
+ ld4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp]
+// CHECK: ld4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0x00,0x40,0x4c]
+// CHECK: ld4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x05,0x40,0x4c]
+// CHECK: ld4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0x0b,0x40,0x4c]
+// CHECK: ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0x0c,0x40,0x4c]
+// CHECK: ld4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x00,0x40,0x0c]
+// CHECK: ld4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x05,0x40,0x0c]
+// CHECK: ld4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0x0b,0x40,0x0c]
- ld4 {v0.16b-v3.16b}, [x0]
- ld4 {v15.8h-v18.8h}, [x15]
- ld4 {v31.4s-v2.4s}, [sp]
- ld4 {v0.2d-v3.2d}, [x0]
- ld4 {v0.8b-v3.8b}, [x0]
- ld4 {v15.4h-v18.4h}, [x15]
- ld4 {v31.2s-v2.2s}, [sp]
-// CHECK: ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0x00,0x40,0x4c]
-// CHECK: ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0x05,0x40,0x4c]
-// CHECK: ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0x0b,0x40,0x4c]
-// CHECK: ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0x0c,0x40,0x4c]
-// CHECK: ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0x00,0x40,0x0c]
-// CHECK: ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0x05,0x40,0x0c]
-// CHECK: ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0x0b,0x40,0x0c]
+ ld4 { v0.16b-v3.16b }, [x0]
+ ld4 { v15.8h-v18.8h }, [x15]
+ ld4 { v31.4s-v2.4s }, [sp]
+ ld4 { v0.2d-v3.2d }, [x0]
+ ld4 { v0.8b-v3.8b }, [x0]
+ ld4 { v15.4h-v18.4h }, [x15]
+ ld4 { v31.2s-v2.2s }, [sp]
+// CHECK: ld4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0x00,0x40,0x4c]
+// CHECK: ld4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0x05,0x40,0x4c]
+// CHECK: ld4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0x0b,0x40,0x4c]
+// CHECK: ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0x0c,0x40,0x4c]
+// CHECK: ld4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0x00,0x40,0x0c]
+// CHECK: ld4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0x05,0x40,0x0c]
+// CHECK: ld4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0x0b,0x40,0x0c]
diff --git a/test/MC/AArch64/neon-simd-ldst-one-elem.s b/test/MC/AArch64/neon-simd-ldst-one-elem.s
index 140d7525fee6..4febf6d8fe0b 100644
--- a/test/MC/AArch64/neon-simd-ldst-one-elem.s
+++ b/test/MC/AArch64/neon-simd-ldst-one-elem.s
@@ -1,325 +1,325 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
//------------------------------------------------------------------------------
// Load single 1-element structure to all lanes of 1 register
//------------------------------------------------------------------------------
- ld1r {v0.16b}, [x0]
- ld1r {v15.8h}, [x15]
- ld1r {v31.4s}, [sp]
- ld1r {v0.2d}, [x0]
- ld1r {v0.8b}, [x0]
- ld1r {v15.4h}, [x15]
- ld1r {v31.2s}, [sp]
- ld1r {v0.1d}, [x0]
-// CHECK: ld1r {v0.16b}, [x0] // encoding: [0x00,0xc0,0x40,0x4d]
-// CHECK: ld1r {v15.8h}, [x15] // encoding: [0xef,0xc5,0x40,0x4d]
-// CHECK: ld1r {v31.4s}, [sp] // encoding: [0xff,0xcb,0x40,0x4d]
-// CHECK: ld1r {v0.2d}, [x0] // encoding: [0x00,0xcc,0x40,0x4d]
-// CHECK: ld1r {v0.8b}, [x0] // encoding: [0x00,0xc0,0x40,0x0d]
-// CHECK: ld1r {v15.4h}, [x15] // encoding: [0xef,0xc5,0x40,0x0d]
-// CHECK: ld1r {v31.2s}, [sp] // encoding: [0xff,0xcb,0x40,0x0d]
-// CHECK: ld1r {v0.1d}, [x0] // encoding: [0x00,0xcc,0x40,0x0d]
+ ld1r { v0.16b }, [x0]
+ ld1r { v15.8h }, [x15]
+ ld1r { v31.4s }, [sp]
+ ld1r { v0.2d }, [x0]
+ ld1r { v0.8b }, [x0]
+ ld1r { v15.4h }, [x15]
+ ld1r { v31.2s }, [sp]
+ ld1r { v0.1d }, [x0]
+// CHECK: ld1r { v0.16b }, [x0] // encoding: [0x00,0xc0,0x40,0x4d]
+// CHECK: ld1r { v15.8h }, [x15] // encoding: [0xef,0xc5,0x40,0x4d]
+// CHECK: ld1r { v31.4s }, [sp] // encoding: [0xff,0xcb,0x40,0x4d]
+// CHECK: ld1r { v0.2d }, [x0] // encoding: [0x00,0xcc,0x40,0x4d]
+// CHECK: ld1r { v0.8b }, [x0] // encoding: [0x00,0xc0,0x40,0x0d]
+// CHECK: ld1r { v15.4h }, [x15] // encoding: [0xef,0xc5,0x40,0x0d]
+// CHECK: ld1r { v31.2s }, [sp] // encoding: [0xff,0xcb,0x40,0x0d]
+// CHECK: ld1r { v0.1d }, [x0] // encoding: [0x00,0xcc,0x40,0x0d]
//------------------------------------------------------------------------------
// Load single N-element structure to all lanes of N consecutive
// registers (N = 2,3,4)
//------------------------------------------------------------------------------
- ld2r {v0.16b, v1.16b}, [x0]
- ld2r {v15.8h, v16.8h}, [x15]
- ld2r {v31.4s, v0.4s}, [sp]
- ld2r {v0.2d, v1.2d}, [x0]
- ld2r {v0.8b, v1.8b}, [x0]
- ld2r {v15.4h, v16.4h}, [x15]
- ld2r {v31.2s, v0.2s}, [sp]
- ld2r {v31.1d, v0.1d}, [sp]
-// CHECK: ld2r {v0.16b, v1.16b}, [x0] // encoding: [0x00,0xc0,0x60,0x4d]
-// CHECK: ld2r {v15.8h, v16.8h}, [x15] // encoding: [0xef,0xc5,0x60,0x4d]
-// CHECK: ld2r {v31.4s, v0.4s}, [sp] // encoding: [0xff,0xcb,0x60,0x4d]
-// CHECK: ld2r {v0.2d, v1.2d}, [x0] // encoding: [0x00,0xcc,0x60,0x4d]
-// CHECK: ld2r {v0.8b, v1.8b}, [x0] // encoding: [0x00,0xc0,0x60,0x0d]
-// CHECK: ld2r {v15.4h, v16.4h}, [x15] // encoding: [0xef,0xc5,0x60,0x0d]
-// CHECK: ld2r {v31.2s, v0.2s}, [sp] // encoding: [0xff,0xcb,0x60,0x0d]
-// CHECK: ld2r {v31.1d, v0.1d}, [sp] // encoding: [0xff,0xcf,0x60,0x0d]
+ ld2r { v0.16b, v1.16b }, [x0]
+ ld2r { v15.8h, v16.8h }, [x15]
+ ld2r { v31.4s, v0.4s }, [sp]
+ ld2r { v0.2d, v1.2d }, [x0]
+ ld2r { v0.8b, v1.8b }, [x0]
+ ld2r { v15.4h, v16.4h }, [x15]
+ ld2r { v31.2s, v0.2s }, [sp]
+ ld2r { v31.1d, v0.1d }, [sp]
+// CHECK: ld2r { v0.16b, v1.16b }, [x0] // encoding: [0x00,0xc0,0x60,0x4d]
+// CHECK: ld2r { v15.8h, v16.8h }, [x15] // encoding: [0xef,0xc5,0x60,0x4d]
+// CHECK: ld2r { v31.4s, v0.4s }, [sp] // encoding: [0xff,0xcb,0x60,0x4d]
+// CHECK: ld2r { v0.2d, v1.2d }, [x0] // encoding: [0x00,0xcc,0x60,0x4d]
+// CHECK: ld2r { v0.8b, v1.8b }, [x0] // encoding: [0x00,0xc0,0x60,0x0d]
+// CHECK: ld2r { v15.4h, v16.4h }, [x15] // encoding: [0xef,0xc5,0x60,0x0d]
+// CHECK: ld2r { v31.2s, v0.2s }, [sp] // encoding: [0xff,0xcb,0x60,0x0d]
+// CHECK: ld2r { v31.1d, v0.1d }, [sp] // encoding: [0xff,0xcf,0x60,0x0d]
- ld3r {v0.16b, v1.16b, v2.16b}, [x0]
- ld3r {v15.8h, v16.8h, v17.8h}, [x15]
- ld3r {v31.4s, v0.4s, v1.4s}, [sp]
- ld3r {v0.2d, v1.2d, v2.2d}, [x0]
- ld3r {v0.8b, v1.8b, v2.8b}, [x0]
- ld3r {v15.4h, v16.4h, v17.4h}, [x15]
- ld3r {v31.2s, v0.2s, v1.2s}, [sp]
- ld3r {v31.1d, v0.1d, v1.1d}, [sp]
-// CHECK: ld3r {v0.16b, v1.16b, v2.16b}, [x0] // encoding: [0x00,0xe0,0x40,0x4d]
-// CHECK: ld3r {v15.8h, v16.8h, v17.8h}, [x15] // encoding: [0xef,0xe5,0x40,0x4d]
-// CHECK: ld3r {v31.4s, v0.4s, v1.4s}, [sp] // encoding: [0xff,0xeb,0x40,0x4d]
-// CHECK: ld3r {v0.2d, v1.2d, v2.2d}, [x0] // encoding: [0x00,0xec,0x40,0x4d]
-// CHECK: ld3r {v0.8b, v1.8b, v2.8b}, [x0] // encoding: [0x00,0xe0,0x40,0x0d]
-// CHECK: ld3r {v15.4h, v16.4h, v17.4h}, [x15] // encoding: [0xef,0xe5,0x40,0x0d]
-// CHECK: ld3r {v31.2s, v0.2s, v1.2s}, [sp] // encoding: [0xff,0xeb,0x40,0x0d]
-// CHECK: ld3r {v31.1d, v0.1d, v1.1d}, [sp] // encoding: [0xff,0xef,0x40,0x0d]
+ ld3r { v0.16b, v1.16b, v2.16b }, [x0]
+ ld3r { v15.8h, v16.8h, v17.8h }, [x15]
+ ld3r { v31.4s, v0.4s, v1.4s }, [sp]
+ ld3r { v0.2d, v1.2d, v2.2d }, [x0]
+ ld3r { v0.8b, v1.8b, v2.8b }, [x0]
+ ld3r { v15.4h, v16.4h, v17.4h }, [x15]
+ ld3r { v31.2s, v0.2s, v1.2s }, [sp]
+ ld3r { v31.1d, v0.1d, v1.1d }, [sp]
+// CHECK: ld3r { v0.16b, v1.16b, v2.16b }, [x0] // encoding: [0x00,0xe0,0x40,0x4d]
+// CHECK: ld3r { v15.8h, v16.8h, v17.8h }, [x15] // encoding: [0xef,0xe5,0x40,0x4d]
+// CHECK: ld3r { v31.4s, v0.4s, v1.4s }, [sp] // encoding: [0xff,0xeb,0x40,0x4d]
+// CHECK: ld3r { v0.2d, v1.2d, v2.2d }, [x0] // encoding: [0x00,0xec,0x40,0x4d]
+// CHECK: ld3r { v0.8b, v1.8b, v2.8b }, [x0] // encoding: [0x00,0xe0,0x40,0x0d]
+// CHECK: ld3r { v15.4h, v16.4h, v17.4h }, [x15] // encoding: [0xef,0xe5,0x40,0x0d]
+// CHECK: ld3r { v31.2s, v0.2s, v1.2s }, [sp] // encoding: [0xff,0xeb,0x40,0x0d]
+// CHECK: ld3r { v31.1d, v0.1d, v1.1d }, [sp] // encoding: [0xff,0xef,0x40,0x0d]
- ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
- ld4r {v15.8h, v16.8h, v17.8h, v18.8h}, [x15]
- ld4r {v31.4s, v0.4s, v1.4s, v2.4s}, [sp]
- ld4r {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
- ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
- ld4r {v15.4h, v16.4h, v17.4h, v18.4h}, [x15]
- ld4r {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
- ld4r {v31.1d, v0.1d, v1.1d, v2.1d}, [sp]
-// CHECK: ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [x0] // encoding: [0x00,0xe0,0x60,0x4d]
-// CHECK: ld4r {v15.8h, v16.8h, v17.8h, v18.8h}, [x15] // encoding: [0xef,0xe5,0x60,0x4d]
-// CHECK: ld4r {v31.4s, v0.4s, v1.4s, v2.4s}, [sp] // encoding: [0xff,0xeb,0x60,0x4d]
-// CHECK: ld4r {v0.2d, v1.2d, v2.2d, v3.2d}, [x0] // encoding: [0x00,0xec,0x60,0x4d]
-// CHECK: ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x0] // encoding: [0x00,0xe0,0x60,0x0d]
-// CHECK: ld4r {v15.4h, v16.4h, v17.4h, v18.4h}, [x15] // encoding: [0xef,0xe5,0x60,0x0d]
-// CHECK: ld4r {v31.2s, v0.2s, v1.2s, v2.2s}, [sp] // encoding: [0xff,0xeb,0x60,0x0d]
-// CHECK: ld4r {v31.1d, v0.1d, v1.1d, v2.1d}, [sp] // encoding: [0xff,0xef,0x60,0x0d]
+ ld4r { v0.16b, v1.16b, v2.16b, v3.16b }, [x0]
+ ld4r { v15.8h, v16.8h, v17.8h, v18.8h }, [x15]
+ ld4r { v31.4s, v0.4s, v1.4s, v2.4s }, [sp]
+ ld4r { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+ ld4r { v0.8b, v1.8b, v2.8b, v3.8b }, [x0]
+ ld4r { v15.4h, v16.4h, v17.4h, v18.4h }, [x15]
+ ld4r { v31.2s, v0.2s, v1.2s, v2.2s }, [sp]
+ ld4r { v31.1d, v0.1d, v1.1d, v2.1d }, [sp]
+// CHECK: ld4r { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] // encoding: [0x00,0xe0,0x60,0x4d]
+// CHECK: ld4r { v15.8h, v16.8h, v17.8h, v18.8h }, [x15] // encoding: [0xef,0xe5,0x60,0x4d]
+// CHECK: ld4r { v31.4s, v0.4s, v1.4s, v2.4s }, [sp] // encoding: [0xff,0xeb,0x60,0x4d]
+// CHECK: ld4r { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] // encoding: [0x00,0xec,0x60,0x4d]
+// CHECK: ld4r { v0.8b, v1.8b, v2.8b, v3.8b }, [x0] // encoding: [0x00,0xe0,0x60,0x0d]
+// CHECK: ld4r { v15.4h, v16.4h, v17.4h, v18.4h }, [x15] // encoding: [0xef,0xe5,0x60,0x0d]
+// CHECK: ld4r { v31.2s, v0.2s, v1.2s, v2.2s }, [sp] // encoding: [0xff,0xeb,0x60,0x0d]
+// CHECK: ld4r { v31.1d, v0.1d, v1.1d, v2.1d }, [sp] // encoding: [0xff,0xef,0x60,0x0d]
//------------------------------------------------------------------------------
// Load single 1-element structure to one lane of 1 register.
//------------------------------------------------------------------------------
- ld1 {v0.b}[9], [x0]
- ld1 {v15.h}[7], [x15]
- ld1 {v31.s}[3], [sp]
- ld1 {v0.d}[1], [x0]
-// CHECK: ld1 {v0.b}[9], [x0] // encoding: [0x00,0x04,0x40,0x4d]
-// CHECK: ld1 {v15.h}[7], [x15] // encoding: [0xef,0x59,0x40,0x4d]
-// CHECK: ld1 {v31.s}[3], [sp] // encoding: [0xff,0x93,0x40,0x4d]
-// CHECK: ld1 {v0.d}[1], [x0] // encoding: [0x00,0x84,0x40,0x4d]
+ ld1 { v0.b }[9], [x0]
+ ld1 { v15.h }[7], [x15]
+ ld1 { v31.s }[3], [sp]
+ ld1 { v0.d }[1], [x0]
+// CHECK: ld1 { v0.b }[9], [x0] // encoding: [0x00,0x04,0x40,0x4d]
+// CHECK: ld1 { v15.h }[7], [x15] // encoding: [0xef,0x59,0x40,0x4d]
+// CHECK: ld1 { v31.s }[3], [sp] // encoding: [0xff,0x93,0x40,0x4d]
+// CHECK: ld1 { v0.d }[1], [x0] // encoding: [0x00,0x84,0x40,0x4d]
//------------------------------------------------------------------------------
// Load single N-element structure to one lane of N consecutive registers
// (N = 2,3,4)
//------------------------------------------------------------------------------
- ld2 {v0.b, v1.b}[9], [x0]
- ld2 {v15.h, v16.h}[7], [x15]
- ld2 {v31.s, v0.s}[3], [sp]
- ld2 {v0.d, v1.d}[1], [x0]
-// CHECK: ld2 {v0.b, v1.b}[9], [x0] // encoding: [0x00,0x04,0x60,0x4d]
-// CHECK: ld2 {v15.h, v16.h}[7], [x15] // encoding: [0xef,0x59,0x60,0x4d]
-// CHECK: ld2 {v31.s, v0.s}[3], [sp] // encoding: [0xff,0x93,0x60,0x4d]
-// CHECK: ld2 {v0.d, v1.d}[1], [x0] // encoding: [0x00,0x84,0x60,0x4d]
+ ld2 { v0.b, v1.b }[9], [x0]
+ ld2 { v15.h, v16.h }[7], [x15]
+ ld2 { v31.s, v0.s }[3], [sp]
+ ld2 { v0.d, v1.d }[1], [x0]
+// CHECK: ld2 { v0.b, v1.b }[9], [x0] // encoding: [0x00,0x04,0x60,0x4d]
+// CHECK: ld2 { v15.h, v16.h }[7], [x15] // encoding: [0xef,0x59,0x60,0x4d]
+// CHECK: ld2 { v31.s, v0.s }[3], [sp] // encoding: [0xff,0x93,0x60,0x4d]
+// CHECK: ld2 { v0.d, v1.d }[1], [x0] // encoding: [0x00,0x84,0x60,0x4d]
- ld3 {v0.b, v1.b, v2.b}[9], [x0]
- ld3 {v15.h, v16.h, v17.h}[7], [x15]
- ld3 {v31.s, v0.s, v1.s}[3], [sp]
- ld3 {v0.d, v1.d, v2.d}[1], [x0]
-// CHECK: ld3 {v0.b, v1.b, v2.b}[9], [x0] // encoding: [0x00,0x24,0x40,0x4d]
-// CHECK: ld3 {v15.h, v16.h, v17.h}[7], [x15] // encoding: [0xef,0x79,0x40,0x4d]
-// CHECK: ld3 {v31.s, v0.s, v1.s}[3], [sp] // encoding: [0xff,0xb3,0x40,0x4d]
-// CHECK: ld3 {v0.d, v1.d, v2.d}[1], [x0] // encoding: [0x00,0xa4,0x40,0x4d]
+ ld3 { v0.b, v1.b, v2.b }[9], [x0]
+ ld3 { v15.h, v16.h, v17.h }[7], [x15]
+ ld3 { v31.s, v0.s, v1.s }[3], [sp]
+ ld3 { v0.d, v1.d, v2.d }[1], [x0]
+// CHECK: ld3 { v0.b, v1.b, v2.b }[9], [x0] // encoding: [0x00,0x24,0x40,0x4d]
+// CHECK: ld3 { v15.h, v16.h, v17.h }[7], [x15] // encoding: [0xef,0x79,0x40,0x4d]
+// CHECK: ld3 { v31.s, v0.s, v1.s }[3], [sp] // encoding: [0xff,0xb3,0x40,0x4d]
+// CHECK: ld3 { v0.d, v1.d, v2.d }[1], [x0] // encoding: [0x00,0xa4,0x40,0x4d]
- ld4 {v0.b, v1.b, v2.b, v3.b}[9], [x0]
- ld4 {v15.h, v16.h, v17.h, v18.h}[7], [x15]
- ld4 {v31.s, v0.s, v1.s, v2.s}[3], [sp]
- ld4 {v0.d, v1.d, v2.d, v3.d}[1], [x0]
-// CHECK: ld4 {v0.b, v1.b, v2.b, v3.b}[9], [x0] // encoding: [0x00,0x24,0x60,0x4d]
-// CHECK: ld4 {v15.h, v16.h, v17.h, v18.h}[7], [x15] // encoding: [0xef,0x79,0x60,0x4d]
-// CHECK: ld4 {v31.s, v0.s, v1.s, v2.s}[3], [sp] // encoding: [0xff,0xb3,0x60,0x4d]
-// CHECK: ld4 {v0.d, v1.d, v2.d, v3.d}[1], [x0] // encoding: [0x00,0xa4,0x60,0x4d]
+ ld4 { v0.b, v1.b, v2.b, v3.b }[9], [x0]
+ ld4 { v15.h, v16.h, v17.h, v18.h }[7], [x15]
+ ld4 { v31.s, v0.s, v1.s, v2.s }[3], [sp]
+ ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0]
+// CHECK: ld4 { v0.b, v1.b, v2.b, v3.b }[9], [x0] // encoding: [0x00,0x24,0x60,0x4d]
+// CHECK: ld4 { v15.h, v16.h, v17.h, v18.h }[7], [x15] // encoding: [0xef,0x79,0x60,0x4d]
+// CHECK: ld4 { v31.s, v0.s, v1.s, v2.s }[3], [sp] // encoding: [0xff,0xb3,0x60,0x4d]
+// CHECK: ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0] // encoding: [0x00,0xa4,0x60,0x4d]
//------------------------------------------------------------------------------
// Store single 1-element structure from one lane of 1 register.
//------------------------------------------------------------------------------
- st1 {v0.b}[9], [x0]
- st1 {v15.h}[7], [x15]
- st1 {v31.s}[3], [sp]
- st1 {v0.d}[1], [x0]
-// CHECK: st1 {v0.b}[9], [x0] // encoding: [0x00,0x04,0x00,0x4d]
-// CHECK: st1 {v15.h}[7], [x15] // encoding: [0xef,0x59,0x00,0x4d]
-// CHECK: st1 {v31.s}[3], [sp] // encoding: [0xff,0x93,0x00,0x4d]
-// CHECK: st1 {v0.d}[1], [x0] // encoding: [0x00,0x84,0x00,0x4d]
+ st1 { v0.b }[9], [x0]
+ st1 { v15.h }[7], [x15]
+ st1 { v31.s }[3], [sp]
+ st1 { v0.d }[1], [x0]
+// CHECK: st1 { v0.b }[9], [x0] // encoding: [0x00,0x04,0x00,0x4d]
+// CHECK: st1 { v15.h }[7], [x15] // encoding: [0xef,0x59,0x00,0x4d]
+// CHECK: st1 { v31.s }[3], [sp] // encoding: [0xff,0x93,0x00,0x4d]
+// CHECK: st1 { v0.d }[1], [x0] // encoding: [0x00,0x84,0x00,0x4d]
//------------------------------------------------------------------------------
// Store single N-element structure from one lane of N consecutive registers
// (N = 2,3,4)
//------------------------------------------------------------------------------
- st2 {v0.b, v1.b}[9], [x0]
- st2 {v15.h, v16.h}[7], [x15]
- st2 {v31.s, v0.s}[3], [sp]
- st2 {v0.d, v1.d}[1], [x0]
-// CHECK: st2 {v0.b, v1.b}[9], [x0] // encoding: [0x00,0x04,0x20,0x4d]
-// CHECK: st2 {v15.h, v16.h}[7], [x15] // encoding: [0xef,0x59,0x20,0x4d]
-// CHECK: st2 {v31.s, v0.s}[3], [sp] // encoding: [0xff,0x93,0x20,0x4d]
-// CHECK: st2 {v0.d, v1.d}[1], [x0] // encoding: [0x00,0x84,0x20,0x4d]
+ st2 { v0.b, v1.b }[9], [x0]
+ st2 { v15.h, v16.h }[7], [x15]
+ st2 { v31.s, v0.s }[3], [sp]
+ st2 { v0.d, v1.d }[1], [x0]
+// CHECK: st2 { v0.b, v1.b }[9], [x0] // encoding: [0x00,0x04,0x20,0x4d]
+// CHECK: st2 { v15.h, v16.h }[7], [x15] // encoding: [0xef,0x59,0x20,0x4d]
+// CHECK: st2 { v31.s, v0.s }[3], [sp] // encoding: [0xff,0x93,0x20,0x4d]
+// CHECK: st2 { v0.d, v1.d }[1], [x0] // encoding: [0x00,0x84,0x20,0x4d]
- st3 {v0.b, v1.b, v2.b}[9], [x0]
- st3 {v15.h, v16.h, v17.h}[7], [x15]
- st3 {v31.s, v0.s, v1.s}[3], [sp]
- st3 {v0.d, v1.d, v2.d}[1], [x0]
-// CHECK: st3 {v0.b, v1.b, v2.b}[9], [x0] // encoding: [0x00,0x24,0x00,0x4d]
-// CHECK: st3 {v15.h, v16.h, v17.h}[7], [x15] // encoding: [0xef,0x79,0x00,0x4d]
-// CHECK: st3 {v31.s, v0.s, v1.s}[3], [sp] // encoding: [0xff,0xb3,0x00,0x4d]
-// CHECK: st3 {v0.d, v1.d, v2.d}[1], [x0] // encoding: [0x00,0xa4,0x00,0x4d]
+ st3 { v0.b, v1.b, v2.b }[9], [x0]
+ st3 { v15.h, v16.h, v17.h }[7], [x15]
+ st3 { v31.s, v0.s, v1.s }[3], [sp]
+ st3 { v0.d, v1.d, v2.d }[1], [x0]
+// CHECK: st3 { v0.b, v1.b, v2.b }[9], [x0] // encoding: [0x00,0x24,0x00,0x4d]
+// CHECK: st3 { v15.h, v16.h, v17.h }[7], [x15] // encoding: [0xef,0x79,0x00,0x4d]
+// CHECK: st3 { v31.s, v0.s, v1.s }[3], [sp] // encoding: [0xff,0xb3,0x00,0x4d]
+// CHECK: st3 { v0.d, v1.d, v2.d }[1], [x0] // encoding: [0x00,0xa4,0x00,0x4d]
- st4 {v0.b, v1.b, v2.b, v3.b}[9], [x0]
- st4 {v15.h, v16.h, v17.h, v18.h}[7], [x15]
- st4 {v31.s, v0.s, v1.s, v2.s}[3], [sp]
- st4 {v0.d, v1.d, v2.d, v3.d}[1], [x0]
-// CHECK: st4 {v0.b, v1.b, v2.b, v3.b}[9], [x0] // encoding: [0x00,0x24,0x20,0x4d]
-// CHECK: st4 {v15.h, v16.h, v17.h, v18.h}[7], [x15] // encoding: [0xef,0x79,0x20,0x4d]
-// CHECK: st4 {v31.s, v0.s, v1.s, v2.s}[3], [sp] // encoding: [0xff,0xb3,0x20,0x4d]
-// CHECK: st4 {v0.d, v1.d, v2.d, v3.d}[1], [x0] // encoding: [0x00,0xa4,0x20,0x4d]
+ st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0]
+ st4 { v15.h, v16.h, v17.h, v18.h }[7], [x15]
+ st4 { v31.s, v0.s, v1.s, v2.s }[3], [sp]
+ st4 { v0.d, v1.d, v2.d, v3.d }[1], [x0]
+// CHECK: st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0] // encoding: [0x00,0x24,0x20,0x4d]
+// CHECK: st4 { v15.h, v16.h, v17.h, v18.h }[7], [x15] // encoding: [0xef,0x79,0x20,0x4d]
+// CHECK: st4 { v31.s, v0.s, v1.s, v2.s }[3], [sp] // encoding: [0xff,0xb3,0x20,0x4d]
+// CHECK: st4 { v0.d, v1.d, v2.d, v3.d }[1], [x0] // encoding: [0x00,0xa4,0x20,0x4d]
//------------------------------------------------------------------------------
// Post-index oad single 1-element structure to all lanes of 1 register
//------------------------------------------------------------------------------
- ld1r {v0.16b}, [x0], #1
- ld1r {v15.8h}, [x15], #2
- ld1r {v31.4s}, [sp], #4
- ld1r {v0.2d}, [x0], #8
- ld1r {v0.8b}, [x0], x0
- ld1r {v15.4h}, [x15], x1
- ld1r {v31.2s}, [sp], x2
- ld1r {v0.1d}, [x0], x3
-// CHECK: ld1r {v0.16b}, [x0], #1 // encoding: [0x00,0xc0,0xdf,0x4d]
-// CHECK: ld1r {v15.8h}, [x15], #2 // encoding: [0xef,0xc5,0xdf,0x4d]
-// CHECK: ld1r {v31.4s}, [sp], #4 // encoding: [0xff,0xcb,0xdf,0x4d]
-// CHECK: ld1r {v0.2d}, [x0], #8 // encoding: [0x00,0xcc,0xdf,0x4d]
-// CHECK: ld1r {v0.8b}, [x0], x0 // encoding: [0x00,0xc0,0xc0,0x0d]
-// CHECK: ld1r {v15.4h}, [x15], x1 // encoding: [0xef,0xc5,0xc1,0x0d]
-// CHECK: ld1r {v31.2s}, [sp], x2 // encoding: [0xff,0xcb,0xc2,0x0d]
-// CHECK: ld1r {v0.1d}, [x0], x3 // encoding: [0x00,0xcc,0xc3,0x0d]
+ ld1r { v0.16b }, [x0], #1
+ ld1r { v15.8h }, [x15], #2
+ ld1r { v31.4s }, [sp], #4
+ ld1r { v0.2d }, [x0], #8
+ ld1r { v0.8b }, [x0], x0
+ ld1r { v15.4h }, [x15], x1
+ ld1r { v31.2s }, [sp], x2
+ ld1r { v0.1d }, [x0], x3
+// CHECK: ld1r { v0.16b }, [x0], #1 // encoding: [0x00,0xc0,0xdf,0x4d]
+// CHECK: ld1r { v15.8h }, [x15], #2 // encoding: [0xef,0xc5,0xdf,0x4d]
+// CHECK: ld1r { v31.4s }, [sp], #4 // encoding: [0xff,0xcb,0xdf,0x4d]
+// CHECK: ld1r { v0.2d }, [x0], #8 // encoding: [0x00,0xcc,0xdf,0x4d]
+// CHECK: ld1r { v0.8b }, [x0], x0 // encoding: [0x00,0xc0,0xc0,0x0d]
+// CHECK: ld1r { v15.4h }, [x15], x1 // encoding: [0xef,0xc5,0xc1,0x0d]
+// CHECK: ld1r { v31.2s }, [sp], x2 // encoding: [0xff,0xcb,0xc2,0x0d]
+// CHECK: ld1r { v0.1d }, [x0], x3 // encoding: [0x00,0xcc,0xc3,0x0d]
//------------------------------------------------------------------------------
// Post-index load single N-element structure to all lanes of N consecutive
// registers (N = 2,3,4)
//------------------------------------------------------------------------------
- ld2r {v0.16b, v1.16b}, [x0], #2
- ld2r {v15.8h, v16.8h}, [x15], #4
- ld2r {v31.4s, v0.4s}, [sp], #8
- ld2r {v0.2d, v1.2d}, [x0], #16
- ld2r {v0.8b, v1.8b}, [x0], x6
- ld2r {v15.4h, v16.4h}, [x15], x7
- ld2r {v31.2s, v0.2s}, [sp], x9
- ld2r {v31.1d, v0.1d}, [x0], x5
-// CHECK: ld2r {v0.16b, v1.16b}, [x0], #2 // encoding: [0x00,0xc0,0xff,0x4d]
-// CHECK: ld2r {v15.8h, v16.8h}, [x15], #4 // encoding: [0xef,0xc5,0xff,0x4d]
-// CHECK: ld2r {v31.4s, v0.4s}, [sp], #8 // encoding: [0xff,0xcb,0xff,0x4d]
-// CHECK: ld2r {v0.2d, v1.2d}, [x0], #16 // encoding: [0x00,0xcc,0xff,0x4d]
-// CHECK: ld2r {v0.8b, v1.8b}, [x0], x6 // encoding: [0x00,0xc0,0xe6,0x0d]
-// CHECK: ld2r {v15.4h, v16.4h}, [x15], x7 // encoding: [0xef,0xc5,0xe7,0x0d]
-// CHECK: ld2r {v31.2s, v0.2s}, [sp], x9 // encoding: [0xff,0xcb,0xe9,0x0d]
-// CHECK: ld2r {v31.1d, v0.1d}, [x0], x5 // encoding: [0x1f,0xcc,0xe5,0x0d]
+ ld2r { v0.16b, v1.16b }, [x0], #2
+ ld2r { v15.8h, v16.8h }, [x15], #4
+ ld2r { v31.4s, v0.4s }, [sp], #8
+ ld2r { v0.2d, v1.2d }, [x0], #16
+ ld2r { v0.8b, v1.8b }, [x0], x6
+ ld2r { v15.4h, v16.4h }, [x15], x7
+ ld2r { v31.2s, v0.2s }, [sp], x9
+ ld2r { v31.1d, v0.1d }, [x0], x5
+// CHECK: ld2r { v0.16b, v1.16b }, [x0], #2 // encoding: [0x00,0xc0,0xff,0x4d]
+// CHECK: ld2r { v15.8h, v16.8h }, [x15], #4 // encoding: [0xef,0xc5,0xff,0x4d]
+// CHECK: ld2r { v31.4s, v0.4s }, [sp], #8 // encoding: [0xff,0xcb,0xff,0x4d]
+// CHECK: ld2r { v0.2d, v1.2d }, [x0], #16 // encoding: [0x00,0xcc,0xff,0x4d]
+// CHECK: ld2r { v0.8b, v1.8b }, [x0], x6 // encoding: [0x00,0xc0,0xe6,0x0d]
+// CHECK: ld2r { v15.4h, v16.4h }, [x15], x7 // encoding: [0xef,0xc5,0xe7,0x0d]
+// CHECK: ld2r { v31.2s, v0.2s }, [sp], x9 // encoding: [0xff,0xcb,0xe9,0x0d]
+// CHECK: ld2r { v31.1d, v0.1d }, [x0], x5 // encoding: [0x1f,0xcc,0xe5,0x0d]
- ld3r {v0.16b, v1.16b, v2.16b}, [x0], x9
- ld3r {v15.8h, v16.8h, v17.8h}, [x15], x6
- ld3r {v31.4s, v0.4s, v1.4s}, [sp], x7
- ld3r {v0.2d, v1.2d, v2.2d}, [x0], x5
- ld3r {v0.8b, v1.8b, v2.8b}, [x0], #3
- ld3r {v15.4h, v16.4h, v17.4h}, [x15], #6
- ld3r {v31.2s, v0.2s, v1.2s}, [sp], #12
- ld3r {v31.1d, v0.1d, v1.1d}, [sp], #24
-// CHECK: ld3r {v0.16b, v1.16b, v2.16b}, [x0], x9 // encoding: [0x00,0xe0,0xc9,0x4d]
-// CHECK: ld3r {v15.8h, v16.8h, v17.8h}, [x15], x6 // encoding: [0xef,0xe5,0xc6,0x4d]
-// CHECK: ld3r {v31.4s, v0.4s, v1.4s}, [sp], x7 // encoding: [0xff,0xeb,0xc7,0x4d]
-// CHECK: ld3r {v0.2d, v1.2d, v2.2d}, [x0], x5 // encoding: [0x00,0xec,0xc5,0x4d]
-// CHECK: ld3r {v0.8b, v1.8b, v2.8b}, [x0], #3 // encoding: [0x00,0xe0,0xdf,0x0d]
-// CHECK: ld3r {v15.4h, v16.4h, v17.4h}, [x15], #6 // encoding: [0xef,0xe5,0xdf,0x0d]
-// CHECK: ld3r {v31.2s, v0.2s, v1.2s}, [sp], #12 // encoding: [0xff,0xeb,0xdf,0x0d]
-// CHECK: ld3r {v31.1d, v0.1d, v1.1d}, [sp], #24 // encoding: [0xff,0xef,0xdf,0x0d]
+ ld3r { v0.16b, v1.16b, v2.16b }, [x0], x9
+ ld3r { v15.8h, v16.8h, v17.8h }, [x15], x6
+ ld3r { v31.4s, v0.4s, v1.4s }, [sp], x7
+ ld3r { v0.2d, v1.2d, v2.2d }, [x0], x5
+ ld3r { v0.8b, v1.8b, v2.8b }, [x0], #3
+ ld3r { v15.4h, v16.4h, v17.4h }, [x15], #6
+ ld3r { v31.2s, v0.2s, v1.2s }, [sp], #12
+ ld3r { v31.1d, v0.1d, v1.1d }, [sp], #24
+// CHECK: ld3r { v0.16b, v1.16b, v2.16b }, [x0], x9 // encoding: [0x00,0xe0,0xc9,0x4d]
+// CHECK: ld3r { v15.8h, v16.8h, v17.8h }, [x15], x6 // encoding: [0xef,0xe5,0xc6,0x4d]
+// CHECK: ld3r { v31.4s, v0.4s, v1.4s }, [sp], x7 // encoding: [0xff,0xeb,0xc7,0x4d]
+// CHECK: ld3r { v0.2d, v1.2d, v2.2d }, [x0], x5 // encoding: [0x00,0xec,0xc5,0x4d]
+// CHECK: ld3r { v0.8b, v1.8b, v2.8b }, [x0], #3 // encoding: [0x00,0xe0,0xdf,0x0d]
+// CHECK: ld3r { v15.4h, v16.4h, v17.4h }, [x15], #6 // encoding: [0xef,0xe5,0xdf,0x0d]
+// CHECK: ld3r { v31.2s, v0.2s, v1.2s }, [sp], #12 // encoding: [0xff,0xeb,0xdf,0x0d]
+// CHECK: ld3r { v31.1d, v0.1d, v1.1d }, [sp], #24 // encoding: [0xff,0xef,0xdf,0x0d]
- ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], #4
- ld4r {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], #8
- ld4r {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #16
- ld4r {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #32
- ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x5
- ld4r {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x9
- ld4r {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], x30
- ld4r {v31.1d, v0.1d, v1.1d, v2.1d}, [sp], x7
-// CHECK: ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], #4 // encoding: [0x00,0xe0,0xff,0x4d]
-// CHECK: ld4r {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], #8 // encoding: [0xef,0xe5,0xff,0x4d]
-// CHECK: ld4r {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #16 // encoding: [0xff,0xeb,0xff,0x4d]
-// CHECK: ld4r {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #32 // encoding: [0x00,0xec,0xff,0x4d]
-// CHECK: ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x5 // encoding: [0x00,0xe0,0xe5,0x0d]
-// CHECK: ld4r {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x9 // encoding: [0xef,0xe5,0xe9,0x0d]
-// CHECK: ld4r {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], x30 // encoding: [0xff,0xeb,0xfe,0x0d]
-// CHECK: ld4r {v31.1d, v0.1d, v1.1d, v2.1d}, [sp], x7 // encoding: [0xff,0xef,0xe7,0x0d]
+ ld4r { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], #4
+ ld4r { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], #8
+ ld4r { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #16
+ ld4r { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #32
+ ld4r { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x5
+ ld4r { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x9
+ ld4r { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], x30
+ ld4r { v31.1d, v0.1d, v1.1d, v2.1d }, [sp], x7
+// CHECK: ld4r { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], #4 // encoding: [0x00,0xe0,0xff,0x4d]
+// CHECK: ld4r { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], #8 // encoding: [0xef,0xe5,0xff,0x4d]
+// CHECK: ld4r { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #16 // encoding: [0xff,0xeb,0xff,0x4d]
+// CHECK: ld4r { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #32 // encoding: [0x00,0xec,0xff,0x4d]
+// CHECK: ld4r { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x5 // encoding: [0x00,0xe0,0xe5,0x0d]
+// CHECK: ld4r { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x9 // encoding: [0xef,0xe5,0xe9,0x0d]
+// CHECK: ld4r { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], x30 // encoding: [0xff,0xeb,0xfe,0x0d]
+// CHECK: ld4r { v31.1d, v0.1d, v1.1d, v2.1d }, [sp], x7 // encoding: [0xff,0xef,0xe7,0x0d]
//------------------------------------------------------------------------------
// Post-index load single 1-element structure to one lane of 1 register.
//------------------------------------------------------------------------------
- ld1 {v0.b}[9], [x0], #1
- ld1 {v15.h}[7], [x15], x9
- ld1 {v31.s}[3], [sp], x6
- ld1 {v0.d}[1], [x0], #8
-// CHECK: ld1 {v0.b}[9], [x0], #1 // encoding: [0x00,0x04,0xdf,0x4d]
-// CHECK: ld1 {v15.h}[7], [x15], x9 // encoding: [0xef,0x59,0xc9,0x4d]
-// CHECK: ld1 {v31.s}[3], [sp], x6 // encoding: [0xff,0x93,0xc6,0x4d]
-// CHECK: ld1 {v0.d}[1], [x0], #8 // encoding: [0x00,0x84,0xdf,0x4d]
+ ld1 { v0.b }[9], [x0], #1
+ ld1 { v15.h }[7], [x15], x9
+ ld1 { v31.s }[3], [sp], x6
+ ld1 { v0.d }[1], [x0], #8
+// CHECK: ld1 { v0.b }[9], [x0], #1 // encoding: [0x00,0x04,0xdf,0x4d]
+// CHECK: ld1 { v15.h }[7], [x15], x9 // encoding: [0xef,0x59,0xc9,0x4d]
+// CHECK: ld1 { v31.s }[3], [sp], x6 // encoding: [0xff,0x93,0xc6,0x4d]
+// CHECK: ld1 { v0.d }[1], [x0], #8 // encoding: [0x00,0x84,0xdf,0x4d]
//------------------------------------------------------------------------------
// Post-index load single N-element structure to one lane of N consecutive
// registers (N = 2,3,4)
//------------------------------------------------------------------------------
- ld2 {v0.b, v1.b}[9], [x0], x3
- ld2 {v15.h, v16.h}[7], [x15], #4
- ld2 {v31.s, v0.s}[3], [sp], #8
- ld2 {v0.d, v1.d}[1], [x0], x0
-// CHECK: ld2 {v0.b, v1.b}[9], [x0], x3 // encoding: [0x00,0x04,0xe3,0x4d]
-// CHECK: ld2 {v15.h, v16.h}[7], [x15], #4 // encoding: [0xef,0x59,0xff,0x4d]
-// CHECK: ld2 {v31.s, v0.s}[3], [sp], #8 // encoding: [0xff,0x93,0xff,0x4d]
-// CHECK: ld2 {v0.d, v1.d}[1], [x0], x0 // encoding: [0x00,0x84,0xe0,0x4d]
+ ld2 { v0.b, v1.b }[9], [x0], x3
+ ld2 { v15.h, v16.h }[7], [x15], #4
+ ld2 { v31.s, v0.s }[3], [sp], #8
+ ld2 { v0.d, v1.d }[1], [x0], x0
+// CHECK: ld2 { v0.b, v1.b }[9], [x0], x3 // encoding: [0x00,0x04,0xe3,0x4d]
+// CHECK: ld2 { v15.h, v16.h }[7], [x15], #4 // encoding: [0xef,0x59,0xff,0x4d]
+// CHECK: ld2 { v31.s, v0.s }[3], [sp], #8 // encoding: [0xff,0x93,0xff,0x4d]
+// CHECK: ld2 { v0.d, v1.d }[1], [x0], x0 // encoding: [0x00,0x84,0xe0,0x4d]
- ld3 {v0.b, v1.b, v2.b}[9], [x0], #3
- ld3 {v15.h, v16.h, v17.h}[7], [x15], #6
- ld3 {v31.s, v0.s, v1.s}[3], [sp], x3
- ld3 {v0.d, v1.d, v2.d}[1], [x0], x6
-// CHECK: ld3 {v0.b, v1.b, v2.b}[9], [x0], #3 // encoding: [0x00,0x24,0xdf,0x4d]
-// CHECK: ld3 {v15.h, v16.h, v17.h}[7], [x15], #6 // encoding: [0xef,0x79,0xdf,0x4d]
-// CHECK: ld3 {v31.s, v0.s, v1.s}[3], [sp], x3 // encoding: [0xff,0xb3,0xc3,0x4d]
-// CHECK: ld3 {v0.d, v1.d, v2.d}[1], [x0], x6 // encoding: [0x00,0xa4,0xc6,0x4d]
+ ld3 { v0.b, v1.b, v2.b }[9], [x0], #3
+ ld3 { v15.h, v16.h, v17.h }[7], [x15], #6
+ ld3 { v31.s, v0.s, v1.s }[3], [sp], x3
+ ld3 { v0.d, v1.d, v2.d }[1], [x0], x6
+// CHECK: ld3 { v0.b, v1.b, v2.b }[9], [x0], #3 // encoding: [0x00,0x24,0xdf,0x4d]
+// CHECK: ld3 { v15.h, v16.h, v17.h }[7], [x15], #6 // encoding: [0xef,0x79,0xdf,0x4d]
+// CHECK: ld3 { v31.s, v0.s, v1.s }[3], [sp], x3 // encoding: [0xff,0xb3,0xc3,0x4d]
+// CHECK: ld3 { v0.d, v1.d, v2.d }[1], [x0], x6 // encoding: [0x00,0xa4,0xc6,0x4d]
- ld4 {v0.b, v1.b, v2.b, v3.b}[9], [x0], x5
- ld4 {v15.h, v16.h, v17.h, v18.h}[7], [x15], x7
- ld4 {v31.s, v0.s, v1.s, v2.s}[3], [sp], #16
- ld4 {v0.d, v1.d, v2.d, v3.d}[1], [x0], #32
-// CHECK: ld4 {v0.b, v1.b, v2.b, v3.b}[9], [x0], x5 // encoding: [0x00,0x24,0xe5,0x4d]
-// CHECK: ld4 {v15.h, v16.h, v17.h, v18.h}[7], [x15], x7 // encoding: [0xef,0x79,0xe7,0x4d]
-// CHECK: ld4 {v31.s, v0.s, v1.s, v2.s}[3], [sp], #16 // encoding: [0xff,0xb3,0xff,0x4d]
-// CHECK: ld4 {v0.d, v1.d, v2.d, v3.d}[1], [x0], #32 // encoding: [0x00,0xa4,0xff,0x4d]
+ ld4 { v0.b, v1.b, v2.b, v3.b }[9], [x0], x5
+ ld4 { v15.h, v16.h, v17.h, v18.h }[7], [x15], x7
+ ld4 { v31.s, v0.s, v1.s, v2.s }[3], [sp], #16
+ ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0], #32
+// CHECK: ld4 { v0.b, v1.b, v2.b, v3.b }[9], [x0], x5 // encoding: [0x00,0x24,0xe5,0x4d]
+// CHECK: ld4 { v15.h, v16.h, v17.h, v18.h }[7], [x15], x7 // encoding: [0xef,0x79,0xe7,0x4d]
+// CHECK: ld4 { v31.s, v0.s, v1.s, v2.s }[3], [sp], #16 // encoding: [0xff,0xb3,0xff,0x4d]
+// CHECK: ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0], #32 // encoding: [0x00,0xa4,0xff,0x4d]
//------------------------------------------------------------------------------
// Post-index store single 1-element structure from one lane of 1 register.
//------------------------------------------------------------------------------
- st1 {v0.b}[9], [x0], #1
- st1 {v15.h}[7], [x15], x9
- st1 {v31.s}[3], [sp], x6
- st1 {v0.d}[1], [x0], #8
-// CHECK: st1 {v0.b}[9], [x0], #1 // encoding: [0x00,0x04,0x9f,0x4d]
-// CHECK: st1 {v15.h}[7], [x15], x9 // encoding: [0xef,0x59,0x89,0x4d]
-// CHECK: st1 {v31.s}[3], [sp], x6 // encoding: [0xff,0x93,0x86,0x4d]
-// CHECK: st1 {v0.d}[1], [x0], #8 // encoding: [0x00,0x84,0x9f,0x4d]
+ st1 { v0.b }[9], [x0], #1
+ st1 { v15.h }[7], [x15], x9
+ st1 { v31.s }[3], [sp], x6
+ st1 { v0.d }[1], [x0], #8
+// CHECK: st1 { v0.b }[9], [x0], #1 // encoding: [0x00,0x04,0x9f,0x4d]
+// CHECK: st1 { v15.h }[7], [x15], x9 // encoding: [0xef,0x59,0x89,0x4d]
+// CHECK: st1 { v31.s }[3], [sp], x6 // encoding: [0xff,0x93,0x86,0x4d]
+// CHECK: st1 { v0.d }[1], [x0], #8 // encoding: [0x00,0x84,0x9f,0x4d]
//------------------------------------------------------------------------------
// Post-index store single N-element structure from one lane of N consecutive
// registers (N = 2,3,4)
//------------------------------------------------------------------------------
- st2 {v0.b, v1.b}[9], [x0], x3
- st2 {v15.h, v16.h}[7], [x15], #4
- st2 {v31.s, v0.s}[3], [sp], #8
- st2 {v0.d, v1.d}[1], [x0], x0
-// CHECK: st2 {v0.b, v1.b}[9], [x0], x3 // encoding: [0x00,0x04,0xa3,0x4d]
-// CHECK: st2 {v15.h, v16.h}[7], [x15], #4 // encoding: [0xef,0x59,0xbf,0x4d]
-// CHECK: st2 {v31.s, v0.s}[3], [sp], #8 // encoding: [0xff,0x93,0xbf,0x4d]
-// CHECK: st2 {v0.d, v1.d}[1], [x0], x0 // encoding: [0x00,0x84,0xa0,0x4d]
+ st2 { v0.b, v1.b }[9], [x0], x3
+ st2 { v15.h, v16.h }[7], [x15], #4
+ st2 { v31.s, v0.s }[3], [sp], #8
+ st2 { v0.d, v1.d }[1], [x0], x0
+// CHECK: st2 { v0.b, v1.b }[9], [x0], x3 // encoding: [0x00,0x04,0xa3,0x4d]
+// CHECK: st2 { v15.h, v16.h }[7], [x15], #4 // encoding: [0xef,0x59,0xbf,0x4d]
+// CHECK: st2 { v31.s, v0.s }[3], [sp], #8 // encoding: [0xff,0x93,0xbf,0x4d]
+// CHECK: st2 { v0.d, v1.d }[1], [x0], x0 // encoding: [0x00,0x84,0xa0,0x4d]
- st3 {v0.b, v1.b, v2.b}[9], [x0], #3
- st3 {v15.h, v16.h, v17.h}[7], [x15], #6
- st3 {v31.s, v0.s, v1.s}[3], [sp], x3
- st3 {v0.d, v1.d, v2.d}[1], [x0], x6
-// CHECK: st3 {v0.b, v1.b, v2.b}[9], [x0], #3 // encoding: [0x00,0x24,0x9f,0x4d]
-// CHECK: st3 {v15.h, v16.h, v17.h}[7], [x15], #6 // encoding: [0xef,0x79,0x9f,0x4d]
-// CHECK: st3 {v31.s, v0.s, v1.s}[3], [sp], x3 // encoding: [0xff,0xb3,0x83,0x4d]
-// CHECK: st3 {v0.d, v1.d, v2.d}[1], [x0], x6 // encoding: [0x00,0xa4,0x86,0x4d]
+ st3 { v0.b, v1.b, v2.b }[9], [x0], #3
+ st3 { v15.h, v16.h, v17.h }[7], [x15], #6
+ st3 { v31.s, v0.s, v1.s }[3], [sp], x3
+ st3 { v0.d, v1.d, v2.d }[1], [x0], x6
+// CHECK: st3 { v0.b, v1.b, v2.b }[9], [x0], #3 // encoding: [0x00,0x24,0x9f,0x4d]
+// CHECK: st3 { v15.h, v16.h, v17.h }[7], [x15], #6 // encoding: [0xef,0x79,0x9f,0x4d]
+// CHECK: st3 { v31.s, v0.s, v1.s }[3], [sp], x3 // encoding: [0xff,0xb3,0x83,0x4d]
+// CHECK: st3 { v0.d, v1.d, v2.d }[1], [x0], x6 // encoding: [0x00,0xa4,0x86,0x4d]
- st4 {v0.b, v1.b, v2.b, v3.b}[9], [x0], x5
- st4 {v15.h, v16.h, v17.h, v18.h}[7], [x15], x7
- st4 {v31.s, v0.s, v1.s, v2.s}[3], [sp], #16
- st4 {v0.d, v1.d, v2.d, v3.d}[1], [x0], #32
-// CHECK: st4 {v0.b, v1.b, v2.b, v3.b}[9], [x0], x5 // encoding: [0x00,0x24,0xa5,0x4d]
-// CHECK: st4 {v15.h, v16.h, v17.h, v18.h}[7], [x15], x7 // encoding: [0xef,0x79,0xa7,0x4d]
-// CHECK: st4 {v31.s, v0.s, v1.s, v2.s}[3], [sp], #16 // encoding: [0xff,0xb3,0xbf,0x4d]
-// CHECK: st4 {v0.d, v1.d, v2.d, v3.d}[1], [x0], #32 // encoding: [0x00,0xa4,0xbf,0x4d]
+ st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0], x5
+ st4 { v15.h, v16.h, v17.h, v18.h }[7], [x15], x7
+ st4 { v31.s, v0.s, v1.s, v2.s }[3], [sp], #16
+ st4 { v0.d, v1.d, v2.d, v3.d }[1], [x0], #32
+// CHECK: st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0], x5 // encoding: [0x00,0x24,0xa5,0x4d]
+// CHECK: st4 { v15.h, v16.h, v17.h, v18.h }[7], [x15], x7 // encoding: [0xef,0x79,0xa7,0x4d]
+// CHECK: st4 { v31.s, v0.s, v1.s, v2.s }[3], [sp], #16 // encoding: [0xff,0xb3,0xbf,0x4d]
+// CHECK: st4 { v0.d, v1.d, v2.d, v3.d }[1], [x0], #32 // encoding: [0x00,0xa4,0xbf,0x4d]
diff --git a/test/MC/AArch64/neon-simd-misc.s b/test/MC/AArch64/neon-simd-misc.s
index 9e0f9c5b4d95..6d1aafdd7725 100644
--- a/test/MC/AArch64/neon-simd-misc.s
+++ b/test/MC/AArch64/neon-simd-misc.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
@@ -281,8 +281,8 @@
not v0.16b, v31.16b
not v1.8b, v9.8b
-// CHECK: not v0.16b, v31.16b // encoding: [0xe0,0x5b,0x20,0x6e]
-// CHECK: not v1.8b, v9.8b // encoding: [0x21,0x59,0x20,0x2e]
+// CHECK: {{mvn|not}} v0.16b, v31.16b // encoding: [0xe0,0x5b,0x20,0x6e]
+// CHECK: {{mvn|not}} v1.8b, v9.8b // encoding: [0x21,0x59,0x20,0x2e]
//------------------------------------------------------------------------------
// Bitwise reverse
diff --git a/test/MC/AArch64/neon-simd-post-ldst-multi-elem.s b/test/MC/AArch64/neon-simd-post-ldst-multi-elem.s
index 8dc271e38d2c..c57a122f35c8 100644
--- a/test/MC/AArch64/neon-simd-post-ldst-multi-elem.s
+++ b/test/MC/AArch64/neon-simd-post-ldst-multi-elem.s
@@ -1,389 +1,389 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
//------------------------------------------------------------------------------
// Load multiple 1-element structures from one register (post-index)
//------------------------------------------------------------------------------
- ld1 {v0.16b}, [x0], x1
- ld1 {v15.8h}, [x15], x2
- ld1 {v31.4s}, [sp], #16
- ld1 {v0.2d}, [x0], #16
- ld1 {v0.8b}, [x0], x2
- ld1 {v15.4h}, [x15], x3
- ld1 {v31.2s}, [sp], #8
- ld1 {v0.1d}, [x0], #8
-// CHECK: ld1 {v0.16b}, [x0], x1
+ ld1 { v0.16b }, [x0], x1
+ ld1 { v15.8h }, [x15], x2
+ ld1 { v31.4s }, [sp], #16
+ ld1 { v0.2d }, [x0], #16
+ ld1 { v0.8b }, [x0], x2
+ ld1 { v15.4h }, [x15], x3
+ ld1 { v31.2s }, [sp], #8
+ ld1 { v0.1d }, [x0], #8
+// CHECK: ld1 { v0.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x70,0xc1,0x4c]
-// CHECK: ld1 {v15.8h}, [x15], x2
+// CHECK: ld1 { v15.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x75,0xc2,0x4c]
-// CHECK: ld1 {v31.4s}, [sp], #16
+// CHECK: ld1 { v31.4s }, [sp], #16
// CHECK: // encoding: [0xff,0x7b,0xdf,0x4c]
-// CHECK: ld1 {v0.2d}, [x0], #16
+// CHECK: ld1 { v0.2d }, [x0], #16
// CHECK: // encoding: [0x00,0x7c,0xdf,0x4c]
-// CHECK: ld1 {v0.8b}, [x0], x2
+// CHECK: ld1 { v0.8b }, [x0], x2
// CHECK: // encoding: [0x00,0x70,0xc2,0x0c]
-// CHECK: ld1 {v15.4h}, [x15], x3
+// CHECK: ld1 { v15.4h }, [x15], x3
// CHECK: // encoding: [0xef,0x75,0xc3,0x0c]
-// CHECK: ld1 {v31.2s}, [sp], #8
+// CHECK: ld1 { v31.2s }, [sp], #8
// CHECK: // encoding: [0xff,0x7b,0xdf,0x0c]
-// CHECK: ld1 {v0.1d}, [x0], #8
+// CHECK: ld1 { v0.1d }, [x0], #8
// CHECK: // encoding: [0x00,0x7c,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures from two consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- ld1 {v0.16b, v1.16b}, [x0], x1
- ld1 {v15.8h, v16.8h}, [x15], x2
- ld1 {v31.4s, v0.4s}, [sp], #32
- ld1 {v0.2d, v1.2d}, [x0], #32
- ld1 {v0.8b, v1.8b}, [x0], x2
- ld1 {v15.4h, v16.4h}, [x15], x3
- ld1 {v31.2s, v0.2s}, [sp], #16
- ld1 {v0.1d, v1.1d}, [x0], #16
-// CHECK: ld1 {v0.16b, v1.16b}, [x0], x1
+ ld1 { v0.16b, v1.16b }, [x0], x1
+ ld1 { v15.8h, v16.8h }, [x15], x2
+ ld1 { v31.4s, v0.4s }, [sp], #32
+ ld1 { v0.2d, v1.2d }, [x0], #32
+ ld1 { v0.8b, v1.8b }, [x0], x2
+ ld1 { v15.4h, v16.4h }, [x15], x3
+ ld1 { v31.2s, v0.2s }, [sp], #16
+ ld1 { v0.1d, v1.1d }, [x0], #16
+// CHECK: ld1 { v0.16b, v1.16b }, [x0], x1
// CHECK: // encoding: [0x00,0xa0,0xc1,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h}, [x15], x2
+// CHECK: ld1 { v15.8h, v16.8h }, [x15], x2
// CHECK: // encoding: [0xef,0xa5,0xc2,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s}, [sp], #32
+// CHECK: ld1 { v31.4s, v0.4s }, [sp], #32
// CHECK: // encoding: [0xff,0xab,0xdf,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d}, [x0], #32
+// CHECK: ld1 { v0.2d, v1.2d }, [x0], #32
// CHECK: // encoding: [0x00,0xac,0xdf,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b}, [x0], x2
+// CHECK: ld1 { v0.8b, v1.8b }, [x0], x2
// CHECK: // encoding: [0x00,0xa0,0xc2,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h}, [x15], x3
+// CHECK: ld1 { v15.4h, v16.4h }, [x15], x3
// CHECK: // encoding: [0xef,0xa5,0xc3,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s}, [sp], #16
+// CHECK: ld1 { v31.2s, v0.2s }, [sp], #16
// CHECK: // encoding: [0xff,0xab,0xdf,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d}, [x0], #16
+// CHECK: ld1 { v0.1d, v1.1d }, [x0], #16
// CHECK: // encoding: [0x00,0xac,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures from three consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- ld1 {v0.16b, v1.16b, v2.16b}, [x0], x1
- ld1 {v15.8h, v16.8h, v17.8h}, [x15], x2
- ld1 {v31.4s, v0.4s, v1.4s}, [sp], #48
- ld1 {v0.2d, v1.2d, v2.2d}, [x0], #48
- ld1 {v0.8b, v1.8b, v2.8b}, [x0], x2
- ld1 {v15.4h, v16.4h, v17.4h}, [x15], x3
- ld1 {v31.2s, v0.2s, v1.2s}, [sp], #24
- ld1 {v0.1d, v1.1d, v2.1d}, [x0], #24
-// CHECK: ld1 {v0.16b, v1.16b, v2.16b}, [x0], x1
+ ld1 { v0.16b, v1.16b, v2.16b }, [x0], x1
+ ld1 { v15.8h, v16.8h, v17.8h }, [x15], x2
+ ld1 { v31.4s, v0.4s, v1.4s }, [sp], #48
+ ld1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+ ld1 { v0.8b, v1.8b, v2.8b }, [x0], x2
+ ld1 { v15.4h, v16.4h, v17.4h }, [x15], x3
+ ld1 { v31.2s, v0.2s, v1.2s }, [sp], #24
+ ld1 { v0.1d, v1.1d, v2.1d }, [x0], #24
+// CHECK: ld1 { v0.16b, v1.16b, v2.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x60,0xc1,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h, v17.8h}, [x15], x2
+// CHECK: ld1 { v15.8h, v16.8h, v17.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x65,0xc2,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s, v1.4s}, [sp], #48
+// CHECK: ld1 { v31.4s, v0.4s, v1.4s }, [sp], #48
// CHECK: // encoding: [0xff,0x6b,0xdf,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d, v2.2d}, [x0], #48
+// CHECK: ld1 { v0.2d, v1.2d, v2.2d }, [x0], #48
// CHECK: // encoding: [0x00,0x6c,0xdf,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b, v2.8b}, [x0], x2
+// CHECK: ld1 { v0.8b, v1.8b, v2.8b }, [x0], x2
// CHECK: // encoding: [0x00,0x60,0xc2,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h, v17.4h}, [x15], x3
+// CHECK: ld1 { v15.4h, v16.4h, v17.4h }, [x15], x3
// CHECK: // encoding: [0xef,0x65,0xc3,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s, v1.2s}, [sp], #24
+// CHECK: ld1 { v31.2s, v0.2s, v1.2s }, [sp], #24
// CHECK: // encoding: [0xff,0x6b,0xdf,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d, v2.1d}, [x0], #24
+// CHECK: ld1 { v0.1d, v1.1d, v2.1d }, [x0], #24
// CHECK: // encoding: [0x00,0x6c,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures from four consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
- ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
- ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
- ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
- ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
- ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
- ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
- ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0], #32
-// CHECK: ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
+ ld1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], x1
+ ld1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], x2
+ ld1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
+ ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #64
+ ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+ ld1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x4
+ ld1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], #32
+ ld1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0], #32
+// CHECK: ld1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x20,0xc1,0x4c]
-// CHECK: ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
+// CHECK: ld1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x25,0xc2,0x4c]
-// CHECK: ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
+// CHECK: ld1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
// CHECK: // encoding: [0xff,0x2b,0xdf,0x4c]
-// CHECK: ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
+// CHECK: ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #64
// CHECK: // encoding: [0x00,0x2c,0xdf,0x4c]
-// CHECK: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
+// CHECK: ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
// CHECK: // encoding: [0x00,0x20,0xc3,0x0c]
-// CHECK: ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
+// CHECK: ld1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x4
// CHECK: // encoding: [0xef,0x25,0xc4,0x0c]
-// CHECK: ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
+// CHECK: ld1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], #32
// CHECK: // encoding: [0xff,0x2b,0xdf,0x0c]
-// CHECK: ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0], #32
+// CHECK: ld1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0], #32
// CHECK: // encoding: [0x00,0x2c,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 2-element structures from two consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- ld2 {v0.16b, v1.16b}, [x0], x1
- ld2 {v15.8h, v16.8h}, [x15], x2
- ld2 {v31.4s, v0.4s}, [sp], #32
- ld2 {v0.2d, v1.2d}, [x0], #32
- ld2 {v0.8b, v1.8b}, [x0], x2
- ld2 {v15.4h, v16.4h}, [x15], x3
- ld2 {v31.2s, v0.2s}, [sp], #16
-// CHECK: ld2 {v0.16b, v1.16b}, [x0], x1
+ ld2 { v0.16b, v1.16b }, [x0], x1
+ ld2 { v15.8h, v16.8h }, [x15], x2
+ ld2 { v31.4s, v0.4s }, [sp], #32
+ ld2 { v0.2d, v1.2d }, [x0], #32
+ ld2 { v0.8b, v1.8b }, [x0], x2
+ ld2 { v15.4h, v16.4h }, [x15], x3
+ ld2 { v31.2s, v0.2s }, [sp], #16
+// CHECK: ld2 { v0.16b, v1.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x80,0xc1,0x4c]
-// CHECK: ld2 {v15.8h, v16.8h}, [x15], x2
+// CHECK: ld2 { v15.8h, v16.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x85,0xc2,0x4c]
-// CHECK: ld2 {v31.4s, v0.4s}, [sp], #32
+// CHECK: ld2 { v31.4s, v0.4s }, [sp], #32
// CHECK: // encoding: [0xff,0x8b,0xdf,0x4c]
-// CHECK: ld2 {v0.2d, v1.2d}, [x0], #32
+// CHECK: ld2 { v0.2d, v1.2d }, [x0], #32
// CHECK: // encoding: [0x00,0x8c,0xdf,0x4c]
-// CHECK: ld2 {v0.8b, v1.8b}, [x0], x2
+// CHECK: ld2 { v0.8b, v1.8b }, [x0], x2
// CHECK: // encoding: [0x00,0x80,0xc2,0x0c]
-// CHECK: ld2 {v15.4h, v16.4h}, [x15], x3
+// CHECK: ld2 { v15.4h, v16.4h }, [x15], x3
// CHECK: // encoding: [0xef,0x85,0xc3,0x0c]
-// CHECK: ld2 {v31.2s, v0.2s}, [sp], #16
+// CHECK: ld2 { v31.2s, v0.2s }, [sp], #16
// CHECK: // encoding: [0xff,0x8b,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 3-element structures from three consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- ld3 {v0.16b, v1.16b, v2.16b}, [x0], x1
- ld3 {v15.8h, v16.8h, v17.8h}, [x15], x2
- ld3 {v31.4s, v0.4s, v1.4s}, [sp], #48
- ld3 {v0.2d, v1.2d, v2.2d}, [x0], #48
- ld3 {v0.8b, v1.8b, v2.8b}, [x0], x2
- ld3 {v15.4h, v16.4h, v17.4h}, [x15], x3
- ld3 {v31.2s, v0.2s, v1.2s}, [sp], #24
-// CHECK: ld3 {v0.16b, v1.16b, v2.16b}, [x0], x1
+ ld3 { v0.16b, v1.16b, v2.16b }, [x0], x1
+ ld3 { v15.8h, v16.8h, v17.8h }, [x15], x2
+ ld3 { v31.4s, v0.4s, v1.4s }, [sp], #48
+ ld3 { v0.2d, v1.2d, v2.2d }, [x0], #48
+ ld3 { v0.8b, v1.8b, v2.8b }, [x0], x2
+ ld3 { v15.4h, v16.4h, v17.4h }, [x15], x3
+ ld3 { v31.2s, v0.2s, v1.2s }, [sp], #24
+// CHECK: ld3 { v0.16b, v1.16b, v2.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x40,0xc1,0x4c]
-// CHECK: ld3 {v15.8h, v16.8h, v17.8h}, [x15], x2
+// CHECK: ld3 { v15.8h, v16.8h, v17.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x45,0xc2,0x4c]
-// CHECK: ld3 {v31.4s, v0.4s, v1.4s}, [sp], #48
+// CHECK: ld3 { v31.4s, v0.4s, v1.4s }, [sp], #48
// CHECK: // encoding: [0xff,0x4b,0xdf,0x4c]
-// CHECK: ld3 {v0.2d, v1.2d, v2.2d}, [x0], #48
+// CHECK: ld3 { v0.2d, v1.2d, v2.2d }, [x0], #48
// CHECK: // encoding: [0x00,0x4c,0xdf,0x4c]
-// CHECK: ld3 {v0.8b, v1.8b, v2.8b}, [x0], x2
+// CHECK: ld3 { v0.8b, v1.8b, v2.8b }, [x0], x2
// CHECK: // encoding: [0x00,0x40,0xc2,0x0c]
-// CHECK: ld3 {v15.4h, v16.4h, v17.4h}, [x15], x3
+// CHECK: ld3 { v15.4h, v16.4h, v17.4h }, [x15], x3
// CHECK: // encoding: [0xef,0x45,0xc3,0x0c]
-// CHECK: ld3 {v31.2s, v0.2s, v1.2s}, [sp], #24
+// CHECK: ld3 { v31.2s, v0.2s, v1.2s }, [sp], #24
// CHECK: // encoding: [0xff,0x4b,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 4-element structures from four consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
- ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
- ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
- ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
- ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
- ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
- ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
-// CHECK: ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
+ ld4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], x1
+ ld4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], x2
+ ld4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
+ ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #64
+ ld4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+ ld4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x4
+ ld4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], #32
+// CHECK: ld4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x00,0xc1,0x4c]
-// CHECK: ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
+// CHECK: ld4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x05,0xc2,0x4c]
-// CHECK: ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
+// CHECK: ld4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
// CHECK: // encoding: [0xff,0x0b,0xdf,0x4c]
-// CHECK: ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
+// CHECK: ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #64
// CHECK: // encoding: [0x00,0x0c,0xdf,0x4c]
-// CHECK: ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
+// CHECK: ld4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
// CHECK: // encoding: [0x00,0x00,0xc3,0x0c]
-// CHECK: ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
+// CHECK: ld4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x4
// CHECK: // encoding: [0xef,0x05,0xc4,0x0c]
-// CHECK: ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
+// CHECK: ld4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], #32
// CHECK: // encoding: [0xff,0x0b,0xdf,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from one register (post-index)
//------------------------------------------------------------------------------
- st1 {v0.16b}, [x0], x1
- st1 {v15.8h}, [x15], x2
- st1 {v31.4s}, [sp], #16
- st1 {v0.2d}, [x0], #16
- st1 {v0.8b}, [x0], x2
- st1 {v15.4h}, [x15], x3
- st1 {v31.2s}, [sp], #8
- st1 {v0.1d}, [x0], #8
-// CHECK: st1 {v0.16b}, [x0], x1
+ st1 { v0.16b }, [x0], x1
+ st1 { v15.8h }, [x15], x2
+ st1 { v31.4s }, [sp], #16
+ st1 { v0.2d }, [x0], #16
+ st1 { v0.8b }, [x0], x2
+ st1 { v15.4h }, [x15], x3
+ st1 { v31.2s }, [sp], #8
+ st1 { v0.1d }, [x0], #8
+// CHECK: st1 { v0.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x70,0x81,0x4c]
-// CHECK: st1 {v15.8h}, [x15], x2
+// CHECK: st1 { v15.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x75,0x82,0x4c]
-// CHECK: st1 {v31.4s}, [sp], #16
+// CHECK: st1 { v31.4s }, [sp], #16
// CHECK: // encoding: [0xff,0x7b,0x9f,0x4c]
-// CHECK: st1 {v0.2d}, [x0], #16
+// CHECK: st1 { v0.2d }, [x0], #16
// CHECK: // encoding: [0x00,0x7c,0x9f,0x4c]
-// CHECK: st1 {v0.8b}, [x0], x2
+// CHECK: st1 { v0.8b }, [x0], x2
// CHECK: // encoding: [0x00,0x70,0x82,0x0c]
-// CHECK: st1 {v15.4h}, [x15], x3
+// CHECK: st1 { v15.4h }, [x15], x3
// CHECK: // encoding: [0xef,0x75,0x83,0x0c]
-// CHECK: st1 {v31.2s}, [sp], #8
+// CHECK: st1 { v31.2s }, [sp], #8
// CHECK: // encoding: [0xff,0x7b,0x9f,0x0c]
-// CHECK: st1 {v0.1d}, [x0], #8
+// CHECK: st1 { v0.1d }, [x0], #8
// CHECK: // encoding: [0x00,0x7c,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from two consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- st1 {v0.16b, v1.16b}, [x0], x1
- st1 {v15.8h, v16.8h}, [x15], x2
- st1 {v31.4s, v0.4s}, [sp], #32
- st1 {v0.2d, v1.2d}, [x0], #32
- st1 {v0.8b, v1.8b}, [x0], x2
- st1 {v15.4h, v16.4h}, [x15], x3
- st1 {v31.2s, v0.2s}, [sp], #16
- st1 {v0.1d, v1.1d}, [x0], #16
-// CHECK: st1 {v0.16b, v1.16b}, [x0], x1
+ st1 { v0.16b, v1.16b }, [x0], x1
+ st1 { v15.8h, v16.8h }, [x15], x2
+ st1 { v31.4s, v0.4s }, [sp], #32
+ st1 { v0.2d, v1.2d }, [x0], #32
+ st1 { v0.8b, v1.8b }, [x0], x2
+ st1 { v15.4h, v16.4h }, [x15], x3
+ st1 { v31.2s, v0.2s }, [sp], #16
+ st1 { v0.1d, v1.1d }, [x0], #16
+// CHECK: st1 { v0.16b, v1.16b }, [x0], x1
// CHECK: // encoding: [0x00,0xa0,0x81,0x4c]
-// CHECK: st1 {v15.8h, v16.8h}, [x15], x2
+// CHECK: st1 { v15.8h, v16.8h }, [x15], x2
// CHECK: // encoding: [0xef,0xa5,0x82,0x4c]
-// CHECK: st1 {v31.4s, v0.4s}, [sp], #32
+// CHECK: st1 { v31.4s, v0.4s }, [sp], #32
// CHECK: // encoding: [0xff,0xab,0x9f,0x4c]
-// CHECK: st1 {v0.2d, v1.2d}, [x0], #32
+// CHECK: st1 { v0.2d, v1.2d }, [x0], #32
// CHECK: // encoding: [0x00,0xac,0x9f,0x4c]
-// CHECK: st1 {v0.8b, v1.8b}, [x0], x2
+// CHECK: st1 { v0.8b, v1.8b }, [x0], x2
// CHECK: // encoding: [0x00,0xa0,0x82,0x0c]
-// CHECK: st1 {v15.4h, v16.4h}, [x15], x3
+// CHECK: st1 { v15.4h, v16.4h }, [x15], x3
// CHECK: // encoding: [0xef,0xa5,0x83,0x0c]
-// CHECK: st1 {v31.2s, v0.2s}, [sp], #16
+// CHECK: st1 { v31.2s, v0.2s }, [sp], #16
// CHECK: // encoding: [0xff,0xab,0x9f,0x0c]
-// CHECK: st1 {v0.1d, v1.1d}, [x0], #16
+// CHECK: st1 { v0.1d, v1.1d }, [x0], #16
// CHECK: // encoding: [0x00,0xac,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from three consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- st1 {v0.16b, v1.16b, v2.16b}, [x0], x1
- st1 {v15.8h, v16.8h, v17.8h}, [x15], x2
- st1 {v31.4s, v0.4s, v1.4s}, [sp], #48
- st1 {v0.2d, v1.2d, v2.2d}, [x0], #48
- st1 {v0.8b, v1.8b, v2.8b}, [x0], x2
- st1 {v15.4h, v16.4h, v17.4h}, [x15], x3
- st1 {v31.2s, v0.2s, v1.2s}, [sp], #24
- st1 {v0.1d, v1.1d, v2.1d}, [x0], #24
-// CHECK: st1 {v0.16b, v1.16b, v2.16b}, [x0], x1
+ st1 { v0.16b, v1.16b, v2.16b }, [x0], x1
+ st1 { v15.8h, v16.8h, v17.8h }, [x15], x2
+ st1 { v31.4s, v0.4s, v1.4s }, [sp], #48
+ st1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+ st1 { v0.8b, v1.8b, v2.8b }, [x0], x2
+ st1 { v15.4h, v16.4h, v17.4h }, [x15], x3
+ st1 { v31.2s, v0.2s, v1.2s }, [sp], #24
+ st1 { v0.1d, v1.1d, v2.1d }, [x0], #24
+// CHECK: st1 { v0.16b, v1.16b, v2.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x60,0x81,0x4c]
-// CHECK: st1 {v15.8h, v16.8h, v17.8h}, [x15], x2
+// CHECK: st1 { v15.8h, v16.8h, v17.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x65,0x82,0x4c]
-// CHECK: st1 {v31.4s, v0.4s, v1.4s}, [sp], #48
+// CHECK: st1 { v31.4s, v0.4s, v1.4s }, [sp], #48
// CHECK: // encoding: [0xff,0x6b,0x9f,0x4c]
-// CHECK: st1 {v0.2d, v1.2d, v2.2d}, [x0], #48
+// CHECK: st1 { v0.2d, v1.2d, v2.2d }, [x0], #48
// CHECK: // encoding: [0x00,0x6c,0x9f,0x4c]
-// CHECK: st1 {v0.8b, v1.8b, v2.8b}, [x0], x2
+// CHECK: st1 { v0.8b, v1.8b, v2.8b }, [x0], x2
// CHECK: // encoding: [0x00,0x60,0x82,0x0c]
-// CHECK: st1 {v15.4h, v16.4h, v17.4h}, [x15], x3
+// CHECK: st1 { v15.4h, v16.4h, v17.4h }, [x15], x3
// CHECK: // encoding: [0xef,0x65,0x83,0x0c]
-// CHECK: st1 {v31.2s, v0.2s, v1.2s}, [sp], #24
+// CHECK: st1 { v31.2s, v0.2s, v1.2s }, [sp], #24
// CHECK: // encoding: [0xff,0x6b,0x9f,0x0c]
-// CHECK: st1 {v0.1d, v1.1d, v2.1d}, [x0], #24
+// CHECK: st1 { v0.1d, v1.1d, v2.1d }, [x0], #24
// CHECK: // encoding: [0x00,0x6c,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from four consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
- st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
- st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
- st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
- st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
- st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
- st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
- st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0], #32
-// CHECK: st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
+ st1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], x1
+ st1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], x2
+ st1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
+ st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #64
+ st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+ st1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x4
+ st1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], #32
+ st1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0], #32
+// CHECK: st1 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x20,0x81,0x4c]
-// CHECK: st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
+// CHECK: st1 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x25,0x82,0x4c]
-// CHECK: st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
+// CHECK: st1 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
// CHECK: // encoding: [0xff,0x2b,0x9f,0x4c]
-// CHECK: st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
+// CHECK: st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #64
// CHECK: // encoding: [0x00,0x2c,0x9f,0x4c]
-// CHECK: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
+// CHECK: st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
// CHECK: // encoding: [0x00,0x20,0x83,0x0c]
-// CHECK: st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
+// CHECK: st1 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x4
// CHECK: // encoding: [0xef,0x25,0x84,0x0c]
-// CHECK: st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
+// CHECK: st1 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], #32
// CHECK: // encoding: [0xff,0x2b,0x9f,0x0c]
-// CHECK: st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0], #32
+// CHECK: st1 { v0.1d, v1.1d, v2.1d, v3.1d }, [x0], #32
// CHECK: // encoding: [0x00,0x2c,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 2-element structures from two consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- st2 {v0.16b, v1.16b}, [x0], x1
- st2 {v15.8h, v16.8h}, [x15], x2
- st2 {v31.4s, v0.4s}, [sp], #32
- st2 {v0.2d, v1.2d}, [x0], #32
- st2 {v0.8b, v1.8b}, [x0], x2
- st2 {v15.4h, v16.4h}, [x15], x3
- st2 {v31.2s, v0.2s}, [sp], #16
-// CHECK: st2 {v0.16b, v1.16b}, [x0], x1
+ st2 { v0.16b, v1.16b }, [x0], x1
+ st2 { v15.8h, v16.8h }, [x15], x2
+ st2 { v31.4s, v0.4s }, [sp], #32
+ st2 { v0.2d, v1.2d }, [x0], #32
+ st2 { v0.8b, v1.8b }, [x0], x2
+ st2 { v15.4h, v16.4h }, [x15], x3
+ st2 { v31.2s, v0.2s }, [sp], #16
+// CHECK: st2 { v0.16b, v1.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x80,0x81,0x4c]
-// CHECK: st2 {v15.8h, v16.8h}, [x15], x2
+// CHECK: st2 { v15.8h, v16.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x85,0x82,0x4c]
-// CHECK: st2 {v31.4s, v0.4s}, [sp], #32
+// CHECK: st2 { v31.4s, v0.4s }, [sp], #32
// CHECK: // encoding: [0xff,0x8b,0x9f,0x4c]
-// CHECK: st2 {v0.2d, v1.2d}, [x0], #32
+// CHECK: st2 { v0.2d, v1.2d }, [x0], #32
// CHECK: // encoding: [0x00,0x8c,0x9f,0x4c]
-// CHECK: st2 {v0.8b, v1.8b}, [x0], x2
+// CHECK: st2 { v0.8b, v1.8b }, [x0], x2
// CHECK: // encoding: [0x00,0x80,0x82,0x0c]
-// CHECK: st2 {v15.4h, v16.4h}, [x15], x3
+// CHECK: st2 { v15.4h, v16.4h }, [x15], x3
// CHECK: // encoding: [0xef,0x85,0x83,0x0c]
-// CHECK: st2 {v31.2s, v0.2s}, [sp], #16
+// CHECK: st2 { v31.2s, v0.2s }, [sp], #16
// CHECK: // encoding: [0xff,0x8b,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 3-element structures from three consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- st3 {v0.16b, v1.16b, v2.16b}, [x0], x1
- st3 {v15.8h, v16.8h, v17.8h}, [x15], x2
- st3 {v31.4s, v0.4s, v1.4s}, [sp], #48
- st3 {v0.2d, v1.2d, v2.2d}, [x0], #48
- st3 {v0.8b, v1.8b, v2.8b}, [x0], x2
- st3 {v15.4h, v16.4h, v17.4h}, [x15], x3
- st3 {v31.2s, v0.2s, v1.2s}, [sp], #24
-// CHECK: st3 {v0.16b, v1.16b, v2.16b}, [x0], x1
+ st3 { v0.16b, v1.16b, v2.16b }, [x0], x1
+ st3 { v15.8h, v16.8h, v17.8h }, [x15], x2
+ st3 { v31.4s, v0.4s, v1.4s }, [sp], #48
+ st3 { v0.2d, v1.2d, v2.2d }, [x0], #48
+ st3 { v0.8b, v1.8b, v2.8b }, [x0], x2
+ st3 { v15.4h, v16.4h, v17.4h }, [x15], x3
+ st3 { v31.2s, v0.2s, v1.2s }, [sp], #24
+// CHECK: st3 { v0.16b, v1.16b, v2.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x40,0x81,0x4c]
-// CHECK: st3 {v15.8h, v16.8h, v17.8h}, [x15], x2
+// CHECK: st3 { v15.8h, v16.8h, v17.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x45,0x82,0x4c]
-// CHECK: st3 {v31.4s, v0.4s, v1.4s}, [sp], #48
+// CHECK: st3 { v31.4s, v0.4s, v1.4s }, [sp], #48
// CHECK: // encoding: [0xff,0x4b,0x9f,0x4c]
-// CHECK: st3 {v0.2d, v1.2d, v2.2d}, [x0], #48
+// CHECK: st3 { v0.2d, v1.2d, v2.2d }, [x0], #48
// CHECK: // encoding: [0x00,0x4c,0x9f,0x4c]
-// CHECK: st3 {v0.8b, v1.8b, v2.8b}, [x0], x2
+// CHECK: st3 { v0.8b, v1.8b, v2.8b }, [x0], x2
// CHECK: // encoding: [0x00,0x40,0x82,0x0c]
-// CHECK: st3 {v15.4h, v16.4h, v17.4h}, [x15], x3
+// CHECK: st3 { v15.4h, v16.4h, v17.4h }, [x15], x3
// CHECK: // encoding: [0xef,0x45,0x83,0x0c]
-// CHECK: st3 {v31.2s, v0.2s, v1.2s}, [sp], #24
+// CHECK: st3 { v31.2s, v0.2s, v1.2s }, [sp], #24
// CHECK: // encoding: [0xff,0x4b,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 4-element structures from four consecutive registers
// (post-index)
//------------------------------------------------------------------------------
- st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
- st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
- st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
- st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
- st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
- st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
- st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
-// CHECK: st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
+ st4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], x1
+ st4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], x2
+ st4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
+ st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #64
+ st4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
+ st4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x4
+ st4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], #32
+// CHECK: st4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0], x1
// CHECK: // encoding: [0x00,0x00,0x81,0x4c]
-// CHECK: st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
+// CHECK: st4 { v15.8h, v16.8h, v17.8h, v18.8h }, [x15], x2
// CHECK: // encoding: [0xef,0x05,0x82,0x4c]
-// CHECK: st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
+// CHECK: st4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
// CHECK: // encoding: [0xff,0x0b,0x9f,0x4c]
-// CHECK: st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
+// CHECK: st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0], #64
// CHECK: // encoding: [0x00,0x0c,0x9f,0x4c]
-// CHECK: st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
+// CHECK: st4 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
// CHECK: // encoding: [0x00,0x00,0x83,0x0c]
-// CHECK: st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
+// CHECK: st4 { v15.4h, v16.4h, v17.4h, v18.4h }, [x15], x4
// CHECK: // encoding: [0xef,0x05,0x84,0x0c]
-// CHECK: st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
+// CHECK: st4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], #32
// CHECK: // encoding: [0xff,0x0b,0x9f,0x0c]
diff --git a/test/MC/AArch64/neon-sxtl.s b/test/MC/AArch64/neon-sxtl.s
new file mode 100644
index 000000000000..0fe26cb5e8e5
--- /dev/null
+++ b/test/MC/AArch64/neon-sxtl.s
@@ -0,0 +1,26 @@
+// RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=+neon -show-encoding < %s | FileCheck %s
+
+// Check that the assembler can handle the documented syntax for AArch64
+
+//------------------------------------------------------------------------------
+// Signed integer lengthen (vector)
+//------------------------------------------------------------------------------
+ sxtl v0.8h, v1.8b
+ sxtl v0.4s, v1.4h
+ sxtl v0.2d, v1.2s
+
+// CHECK: sshll v0.8h, v1.8b, #0 // encoding: [0x20,0xa4,0x08,0x0f]
+// CHECK: sshll v0.4s, v1.4h, #0 // encoding: [0x20,0xa4,0x10,0x0f]
+// CHECK: sshll v0.2d, v1.2s, #0 // encoding: [0x20,0xa4,0x20,0x0f]
+
+//------------------------------------------------------------------------------
+// Signed integer lengthen (vector, second part)
+//------------------------------------------------------------------------------
+
+ sxtl2 v0.8h, v1.16b
+ sxtl2 v0.4s, v1.8h
+ sxtl2 v0.2d, v1.4s
+
+// CHECK: sshll2 v0.8h, v1.16b, #0 // encoding: [0x20,0xa4,0x08,0x4f]
+// CHECK: sshll2 v0.4s, v1.8h, #0 // encoding: [0x20,0xa4,0x10,0x4f]
+// CHECK: sshll2 v0.2d, v1.4s, #0 // encoding: [0x20,0xa4,0x20,0x4f]
diff --git a/test/MC/AArch64/neon-tbl.s b/test/MC/AArch64/neon-tbl.s
index ff3e86b1c9b9..bb39fa9f22ae 100644
--- a/test/MC/AArch64/neon-tbl.s
+++ b/test/MC/AArch64/neon-tbl.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+// RUN: llvm-mc -triple=arm64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
@@ -6,51 +6,50 @@
// Instructions across vector registers
//------------------------------------------------------------------------------
- tbl v0.8b, {v1.16b}, v2.8b
- tbl v0.8b, {v1.16b, v2.16b}, v2.8b
- tbl v0.8b, {v1.16b, v2.16b, v3.16b}, v2.8b
- tbl v0.8b, {v1.16b, v2.16b, v3.16b, v4.16b}, v2.8b
- tbl v0.8b, {v31.16b, v0.16b, v1.16b, v2.16b}, v2.8b
-
-// CHECK: tbl v0.8b, {v1.16b}, v2.8b // encoding: [0x20,0x00,0x02,0x0e]
-// CHECK: tbl v0.8b, {v1.16b, v2.16b}, v2.8b // encoding: [0x20,0x20,0x02,0x0e]
-// CHECK: tbl v0.8b, {v1.16b, v2.16b, v3.16b}, v2.8b // encoding: [0x20,0x40,0x02,0x0e]
-// CHECK: tbl v0.8b, {v1.16b, v2.16b, v3.16b, v4.16b}, v2.8b // encoding: [0x20,0x60,0x02,0x0e]
-// CHECK: tbl v0.8b, {v31.16b, v0.16b, v1.16b, v2.16b}, v2.8b // encoding: [0xe0,0x63,0x02,0x0e]
-
- tbl v0.16b, {v1.16b}, v2.16b
- tbl v0.16b, {v1.16b, v2.16b}, v2.16b
- tbl v0.16b, {v1.16b, v2.16b, v3.16b}, v2.16b
- tbl v0.16b, {v1.16b, v2.16b, v3.16b, v4.16b}, v2.16b
- tbl v0.16b, {v30.16b, v31.16b, v0.16b, v1.16b}, v2.16b
-
-// CHECK: tbl v0.16b, {v1.16b}, v2.16b // encoding: [0x20,0x00,0x02,0x4e]
-// CHECK: tbl v0.16b, {v1.16b, v2.16b}, v2.16b // encoding: [0x20,0x20,0x02,0x4e]
-// CHECK: tbl v0.16b, {v1.16b, v2.16b, v3.16b}, v2.16b // encoding: [0x20,0x40,0x02,0x4e]
-// CHECK: tbl v0.16b, {v1.16b, v2.16b, v3.16b, v4.16b}, v2.16b // encoding: [0x20,0x60,0x02,0x4e]
-// CHECK: tbl v0.16b, {v30.16b, v31.16b, v0.16b, v1.16b}, v2.16b // encoding: [0xc0,0x63,0x02,0x4e]
-
- tbx v0.8b, {v1.16b}, v2.8b
- tbx v0.8b, {v1.16b, v2.16b}, v2.8b
- tbx v0.8b, {v1.16b, v2.16b, v3.16b}, v2.8b
- tbx v0.8b, {v1.16b, v2.16b, v3.16b, v4.16b}, v2.8b
- tbx v0.8b, {v31.16b, v0.16b, v1.16b, v2.16b}, v2.8b
-
-// CHECK: tbx v0.8b, {v1.16b}, v2.8b // encoding: [0x20,0x10,0x02,0x0e]
-// CHECK: tbx v0.8b, {v1.16b, v2.16b}, v2.8b // encoding: [0x20,0x30,0x02,0x0e]
-// CHECK: tbx v0.8b, {v1.16b, v2.16b, v3.16b}, v2.8b // encoding: [0x20,0x50,0x02,0x0e]
-// CHECK: tbx v0.8b, {v1.16b, v2.16b, v3.16b, v4.16b}, v2.8b // encoding: [0x20,0x70,0x02,0x0e]
-// CHECK: tbx v0.8b, {v31.16b, v0.16b, v1.16b, v2.16b}, v2.8b // encoding: [0xe0,0x73,0x02,0x0e]
-
- tbx v0.16b, {v1.16b}, v2.16b
- tbx v0.16b, {v1.16b, v2.16b}, v2.16b
- tbx v0.16b, {v1.16b, v2.16b, v3.16b}, v2.16b
- tbx v0.16b, {v1.16b, v2.16b, v3.16b, v4.16b}, v2.16b
- tbx v0.16b, {v30.16b, v31.16b, v0.16b, v1.16b}, v2.16b
-
-// CHECK: tbx v0.16b, {v1.16b}, v2.16b // encoding: [0x20,0x10,0x02,0x4e]
-// CHECK: tbx v0.16b, {v1.16b, v2.16b}, v2.16b // encoding: [0x20,0x30,0x02,0x4e]
-// CHECK: tbx v0.16b, {v1.16b, v2.16b, v3.16b}, v2.16b // encoding: [0x20,0x50,0x02,0x4e]
-// CHECK: tbx v0.16b, {v1.16b, v2.16b, v3.16b, v4.16b}, v2.16b // encoding: [0x20,0x70,0x02,0x4e]
-// CHECK: tbx v0.16b, {v30.16b, v31.16b, v0.16b, v1.16b}, v2.16b // encoding: [0xc0,0x73,0x02,0x4e]
-
+ tbl v0.8b, { v1.16b }, v2.8b
+ tbl v0.8b, { v1.16b, v2.16b }, v2.8b
+ tbl v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b
+ tbl v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.8b
+ tbl v0.8b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.8b
+
+// CHECK: tbl v0.8b, { v1.16b }, v2.8b // encoding: [0x20,0x00,0x02,0x0e]
+// CHECK: tbl v0.8b, { v1.16b, v2.16b }, v2.8b // encoding: [0x20,0x20,0x02,0x0e]
+// CHECK: tbl v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b // encoding: [0x20,0x40,0x02,0x0e]
+// CHECK: tbl v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.8b // encoding: [0x20,0x60,0x02,0x0e]
+// CHECK: tbl v0.8b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.8b // encoding: [0xe0,0x63,0x02,0x0e]
+
+ tbl v0.16b, { v1.16b }, v2.16b
+ tbl v0.16b, { v1.16b, v2.16b }, v2.16b
+ tbl v0.16b, { v1.16b, v2.16b, v3.16b }, v2.16b
+ tbl v0.16b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.16b
+ tbl v0.16b, { v30.16b, v31.16b, v0.16b, v1.16b }, v2.16b
+
+// CHECK: tbl v0.16b, { v1.16b }, v2.16b // encoding: [0x20,0x00,0x02,0x4e]
+// CHECK: tbl v0.16b, { v1.16b, v2.16b }, v2.16b // encoding: [0x20,0x20,0x02,0x4e]
+// CHECK: tbl v0.16b, { v1.16b, v2.16b, v3.16b }, v2.16b // encoding: [0x20,0x40,0x02,0x4e]
+// CHECK: tbl v0.16b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.16b // encoding: [0x20,0x60,0x02,0x4e]
+// CHECK: tbl v0.16b, { v30.16b, v31.16b, v0.16b, v1.16b }, v2.16b // encoding: [0xc0,0x63,0x02,0x4e]
+
+ tbx v0.8b, { v1.16b }, v2.8b
+ tbx v0.8b, { v1.16b, v2.16b }, v2.8b
+ tbx v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b
+ tbx v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.8b
+ tbx v0.8b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.8b
+
+// CHECK: tbx v0.8b, { v1.16b }, v2.8b // encoding: [0x20,0x10,0x02,0x0e]
+// CHECK: tbx v0.8b, { v1.16b, v2.16b }, v2.8b // encoding: [0x20,0x30,0x02,0x0e]
+// CHECK: tbx v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b // encoding: [0x20,0x50,0x02,0x0e]
+// CHECK: tbx v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.8b // encoding: [0x20,0x70,0x02,0x0e]
+// CHECK: tbx v0.8b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.8b // encoding: [0xe0,0x73,0x02,0x0e]
+
+ tbx v0.16b, { v1.16b }, v2.16b
+ tbx v0.16b, { v1.16b, v2.16b }, v2.16b
+ tbx v0.16b, { v1.16b, v2.16b, v3.16b }, v2.16b
+ tbx v0.16b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.16b
+ tbx v0.16b, { v30.16b, v31.16b, v0.16b, v1.16b }, v2.16b
+
+// CHECK: tbx v0.16b, { v1.16b }, v2.16b // encoding: [0x20,0x10,0x02,0x4e]
+// CHECK: tbx v0.16b, { v1.16b, v2.16b }, v2.16b // encoding: [0x20,0x30,0x02,0x4e]
+// CHECK: tbx v0.16b, { v1.16b, v2.16b, v3.16b }, v2.16b // encoding: [0x20,0x50,0x02,0x4e]
+// CHECK: tbx v0.16b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.16b // encoding: [0x20,0x70,0x02,0x4e]
+// CHECK: tbx v0.16b, { v30.16b, v31.16b, v0.16b, v1.16b }, v2.16b // encoding: [0xc0,0x73,0x02,0x4e]
diff --git a/test/MC/AArch64/neon-uxtl.s b/test/MC/AArch64/neon-uxtl.s
new file mode 100644
index 000000000000..685b6362bcb1
--- /dev/null
+++ b/test/MC/AArch64/neon-uxtl.s
@@ -0,0 +1,26 @@
+// RUN: llvm-mc -triple=aarch64-none-linux-gnu -mattr=+neon -show-encoding < %s | FileCheck %s
+
+// Check that the assembler can handle the documented syntax for AArch64
+
+//------------------------------------------------------------------------------
+// Unsigned integer lengthen (vector)
+//------------------------------------------------------------------------------
+ uxtl v0.8h, v1.8b
+ uxtl v0.4s, v1.4h
+ uxtl v0.2d, v1.2s
+
+// CHECK: ushll v0.8h, v1.8b, #0 // encoding: [0x20,0xa4,0x08,0x2f]
+// CHECK: ushll v0.4s, v1.4h, #0 // encoding: [0x20,0xa4,0x10,0x2f]
+// CHECK: ushll v0.2d, v1.2s, #0 // encoding: [0x20,0xa4,0x20,0x2f]
+
+//------------------------------------------------------------------------------
+// Unsigned integer lengthen (vector, second part)
+//------------------------------------------------------------------------------
+
+ uxtl2 v0.8h, v1.16b
+ uxtl2 v0.4s, v1.8h
+ uxtl2 v0.2d, v1.4s
+
+// CHECK: ushll2 v0.8h, v1.16b, #0 // encoding: [0x20,0xa4,0x08,0x6f]
+// CHECK: ushll2 v0.4s, v1.8h, #0 // encoding: [0x20,0xa4,0x10,0x6f]
+// CHECK: ushll2 v0.2d, v1.4s, #0 // encoding: [0x20,0xa4,0x20,0x6f]
diff --git a/test/MC/AArch64/noneon-diagnostics.s b/test/MC/AArch64/noneon-diagnostics.s
index ea786c0ba678..60a5fd208af9 100644
--- a/test/MC/AArch64/noneon-diagnostics.s
+++ b/test/MC/AArch64/noneon-diagnostics.s
@@ -4,25 +4,26 @@
fmla v3.4s, v12.4s, v17.4s
fmla v1.2d, v30.2d, v20.2d
fmla v9.2s, v9.2s, v0.2s
-// CHECK-ERROR: error: instruction requires a CPU feature not currently enabled
+// CHECK-ERROR: error: instruction requires: neon
// CHECK-ERROR-NEXT: fmla v3.4s, v12.4s, v17.4s
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: instruction requires a CPU feature not currently enabled
+// CHECK-ERROR-NEXT: error: instruction requires: neon
// CHECK-ERROR-NEXT: fmla v1.2d, v30.2d, v20.2d
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: instruction requires a CPU feature not currently enabled
+// CHECK-ERROR-NEXT: error: instruction requires: neon
// CHECK-ERROR-NEXT: fmla v9.2s, v9.2s, v0.2s
// CHECK-ERROR-NEXT: ^
fmls v3.4s, v12.4s, v17.4s
fmls v1.2d, v30.2d, v20.2d
fmls v9.2s, v9.2s, v0.2s
-// CHECK-ERROR: error: instruction requires a CPU feature not currently enabled
+
+// CHECK-ERROR: error: instruction requires: neon
// CHECK-ERROR-NEXT: fmls v3.4s, v12.4s, v17.4s
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: instruction requires a CPU feature not currently enabled
+// CHECK-ERROR-NEXT: error: instruction requires: neon
// CHECK-ERROR-NEXT: fmls v1.2d, v30.2d, v20.2d
// CHECK-ERROR-NEXT: ^
-// CHECK-ERROR-NEXT: error: instruction requires a CPU feature not currently enabled
+// CHECK-ERROR-NEXT: error: instruction requires: neon
// CHECK-ERROR-NEXT: fmls v9.2s, v9.2s, v0.2s
// CHECK-ERROR-NEXT: ^
diff --git a/test/MC/AArch64/optional-hash.s b/test/MC/AArch64/optional-hash.s
new file mode 100644
index 000000000000..3922b5be34a1
--- /dev/null
+++ b/test/MC/AArch64/optional-hash.s
@@ -0,0 +1,17 @@
+// PR18929
+// RUN: llvm-mc < %s -triple=aarch64-linux-gnueabi -mattr=+fp-armv8,+neon -filetype=obj -o - \
+// RUN: | llvm-objdump --disassemble -arch=arm64 -mattr=+fp-armv8,+neon - | FileCheck %s
+
+ .text
+// CHECK: cmp w0, #123
+ cmp w0, 123
+// CHECK: fmov s0, #1.06250000
+ fmov s0, 1.0625
+// CHECK: fcmp s0, #0.0
+ fcmp s0, 0.0
+// CHECK: cmgt v0.8b, v15.8b, #0
+ cmgt v0.8b, v15.8b, 0
+// CHECK: fcmeq v0.2s, v31.2s, #0.0
+ fcmeq v0.2s, v31.2s, 0.0
+l1:
+l2:
diff --git a/test/MC/AArch64/tls-relocs.s b/test/MC/AArch64/tls-relocs.s
index f99cb41fe5e9..ebf02167a8f3 100644
--- a/test/MC/AArch64/tls-relocs.s
+++ b/test/MC/AArch64/tls-relocs.s
@@ -7,14 +7,15 @@
movn x2, #:dtprel_g2:var
movz x3, #:dtprel_g2:var
movn x4, #:dtprel_g2:var
-// CHECK: movz x1, #:dtprel_g2:var // encoding: [0x01'A',A,0xc0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_a64_movw_dtprel_g2
-// CHECK: movn x2, #:dtprel_g2:var // encoding: [0x02'A',A,0xc0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_a64_movw_dtprel_g2
-// CHECK: movz x3, #:dtprel_g2:var // encoding: [0x03'A',A,0xc0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_a64_movw_dtprel_g2
-// CHECK: movn x4, #:dtprel_g2:var // encoding: [0x04'A',A,0xc0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_a64_movw_dtprel_g2
+
+// CHECK: movz x1, #:dtprel_g2:var // encoding: [0bAAA00001,A,0b110AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_aarch64_movw
+// CHECK: movn x2, #:dtprel_g2:var // encoding: [0bAAA00010,A,0b110AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_aarch64_movw
+// CHECK: movz x3, #:dtprel_g2:var // encoding: [0bAAA00011,A,0b110AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_aarch64_movw
+// CHECK: movn x4, #:dtprel_g2:var // encoding: [0bAAA00100,A,0b110AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g2:var, kind: fixup_aarch64_movw
// CHECK-ELF: Relocations [
// CHECK-ELF-NEXT: Section (2) .rela.text {
@@ -28,14 +29,15 @@
movn x6, #:dtprel_g1:var
movz w7, #:dtprel_g1:var
movn w8, #:dtprel_g1:var
-// CHECK: movz x5, #:dtprel_g1:var // encoding: [0x05'A',A,0xa0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_a64_movw_dtprel_g1
-// CHECK: movn x6, #:dtprel_g1:var // encoding: [0x06'A',A,0xa0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_a64_movw_dtprel_g1
-// CHECK: movz w7, #:dtprel_g1:var // encoding: [0x07'A',A,0xa0'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_a64_movw_dtprel_g1
-// CHECK: movn w8, #:dtprel_g1:var // encoding: [0x08'A',A,0xa0'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_a64_movw_dtprel_g1
+
+// CHECK: movz x5, #:dtprel_g1:var // encoding: [0bAAA00101,A,0b101AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movn x6, #:dtprel_g1:var // encoding: [0bAAA00110,A,0b101AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movz w7, #:dtprel_g1:var // encoding: [0bAAA00111,A,0b101AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movn w8, #:dtprel_g1:var // encoding: [0bAAA01000,A,0b101AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g1:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0x10 R_AARCH64_TLSLD_MOVW_DTPREL_G1 [[VARSYM]]
// CHECK-ELF-NEXT: 0x14 R_AARCH64_TLSLD_MOVW_DTPREL_G1 [[VARSYM]]
@@ -45,10 +47,11 @@
movk x9, #:dtprel_g1_nc:var
movk w10, #:dtprel_g1_nc:var
-// CHECK: movk x9, #:dtprel_g1_nc:var // encoding: [0x09'A',A,0xa0'A',0xf2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1_nc:var, kind: fixup_a64_movw_dtprel_g1_nc
-// CHECK: movk w10, #:dtprel_g1_nc:var // encoding: [0x0a'A',A,0xa0'A',0x72'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g1_nc:var, kind: fixup_a64_movw_dtprel_g1_nc
+
+// CHECK: movk x9, #:dtprel_g1_nc:var // encoding: [0bAAA01001,A,0b101AAAAA,0xf2]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g1_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w10, #:dtprel_g1_nc:var // encoding: [0bAAA01010,A,0b101AAAAA,0x72]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g1_nc:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0x20 R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC [[VARSYM]]
// CHECK-ELF-NEXT: 0x24 R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC [[VARSYM]]
@@ -58,13 +61,15 @@
movn x12, #:dtprel_g0:var
movz w13, #:dtprel_g0:var
movn w14, #:dtprel_g0:var
-// CHECK: movz x11, #:dtprel_g0:var // encoding: [0x0b'A',A,0x80'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_a64_movw_dtprel_g0
-// CHECK: movn x12, #:dtprel_g0:var // encoding: [0x0c'A',A,0x80'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_a64_movw_dtprel_g0
-// CHECK: movz w13, #:dtprel_g0:var // encoding: [0x0d'A',A,0x80'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_a64_movw_dtprel_g0
-// CHECK: movn w14, #:dtprel_g0:var // encoding: [0x0e'A',A,0x80'A',0x12'A']
+
+// CHECK: movz x11, #:dtprel_g0:var // encoding: [0bAAA01011,A,0b100AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movn x12, #:dtprel_g0:var // encoding: [0bAAA01100,A,0b100AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movz w13, #:dtprel_g0:var // encoding: [0bAAA01101,A,0b100AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movn w14, #:dtprel_g0:var // encoding: [0bAAA01110,A,0b100AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g0:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0x28 R_AARCH64_TLSLD_MOVW_DTPREL_G0 [[VARSYM]]
// CHECK-ELF-NEXT: 0x2C R_AARCH64_TLSLD_MOVW_DTPREL_G0 [[VARSYM]]
@@ -74,10 +79,11 @@
movk x15, #:dtprel_g0_nc:var
movk w16, #:dtprel_g0_nc:var
-// CHECK: movk x15, #:dtprel_g0_nc:var // encoding: [0x0f'A',A,0x80'A',0xf2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0_nc:var, kind: fixup_a64_movw_dtprel_g0_nc
-// CHECK: movk w16, #:dtprel_g0_nc:var // encoding: [0x10'A',A,0x80'A',0x72'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_g0_nc:var, kind: fixup_a64_movw_dtprel_g0_nc
+
+// CHECK: movk x15, #:dtprel_g0_nc:var // encoding: [0bAAA01111,A,0b100AAAAA,0xf2]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g0_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w16, #:dtprel_g0_nc:var // encoding: [0bAAA10000,A,0b100AAAAA,0x72]
+// CHECK: // fixup A - offset: 0, value: :dtprel_g0_nc:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0x38 R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC [[VARSYM]]
// CHECK-ELF-NEXT: 0x3C R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC [[VARSYM]]
@@ -85,10 +91,11 @@
add x17, x18, #:dtprel_hi12:var, lsl #12
add w19, w20, #:dtprel_hi12:var, lsl #12
-// CHECK: add x17, x18, #:dtprel_hi12:var, lsl #12 // encoding: [0x51'A',0x02'A',0x40'A',0x91'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_hi12:var, kind: fixup_a64_add_dtprel_hi12
-// CHECK: add w19, w20, #:dtprel_hi12:var, lsl #12 // encoding: [0x93'A',0x02'A',0x40'A',0x11'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_hi12:var, kind: fixup_a64_add_dtprel_hi12
+
+// CHECK: add x17, x18, :dtprel_hi12:var, lsl #12 // encoding: [0x51,0bAAAAAA10,0b00AAAAAA,0x91]
+// CHECK: // fixup A - offset: 0, value: :dtprel_hi12:var, kind: fixup_aarch64_add_imm12
+// CHECK: add w19, w20, :dtprel_hi12:var, lsl #12 // encoding: [0x93,0bAAAAAA10,0b00AAAAAA,0x11]
+// CHECK: // fixup A - offset: 0, value: :dtprel_hi12:var, kind: fixup_aarch64_add_imm12
// CHECK-ELF-NEXT: 0x40 R_AARCH64_TLSLD_ADD_DTPREL_HI12 [[VARSYM]]
// CHECK-ELF-NEXT: 0x44 R_AARCH64_TLSLD_ADD_DTPREL_HI12 [[VARSYM]]
@@ -96,10 +103,11 @@
add x21, x22, #:dtprel_lo12:var
add w23, w24, #:dtprel_lo12:var
-// CHECK: add x21, x22, #:dtprel_lo12:var // encoding: [0xd5'A',0x02'A',A,0x91'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_add_dtprel_lo12
-// CHECK: add w23, w24, #:dtprel_lo12:var // encoding: [0x17'A',0x03'A',A,0x11'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_add_dtprel_lo12
+
+// CHECK: add x21, x22, :dtprel_lo12:var // encoding: [0xd5,0bAAAAAA10,0b00AAAAAA,0x91]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_add_imm12
+// CHECK: add w23, w24, :dtprel_lo12:var // encoding: [0x17,0bAAAAAA11,0b00AAAAAA,0x11]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_add_imm12
// CHECK-ELF-NEXT: 0x48 R_AARCH64_TLSLD_ADD_DTPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0x4C R_AARCH64_TLSLD_ADD_DTPREL_LO12 [[VARSYM]]
@@ -107,10 +115,11 @@
add x25, x26, #:dtprel_lo12_nc:var
add w27, w28, #:dtprel_lo12_nc:var
-// CHECK: add x25, x26, #:dtprel_lo12_nc:var // encoding: [0x59'A',0x03'A',A,0x91'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_add_dtprel_lo12_nc
-// CHECK: add w27, w28, #:dtprel_lo12_nc:var // encoding: [0x9b'A',0x03'A',A,0x11'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_add_dtprel_lo12_nc
+
+// CHECK: add x25, x26, :dtprel_lo12_nc:var // encoding: [0x59,0bAAAAAA11,0b00AAAAAA,0x91]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_add_imm12
+// CHECK: add w27, w28, :dtprel_lo12_nc:var // encoding: [0x9b,0bAAAAAA11,0b00AAAAAA,0x11]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_add_imm12
// CHECK-ELF-NEXT: 0x50 R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC [[VARSYM]]
// CHECK-ELF-NEXT: 0x54 R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC [[VARSYM]]
@@ -118,10 +127,11 @@
ldrb w29, [x30, #:dtprel_lo12:var]
ldrsb x29, [x28, #:dtprel_lo12_nc:var]
-// CHECK: ldrb w29, [x30, #:dtprel_lo12:var] // encoding: [0xdd'A',0x03'A',0x40'A',0x39'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_ldst8_dtprel_lo12
-// CHECK: ldrsb x29, [x28, #:dtprel_lo12_nc:var] // encoding: [0x9d'A',0x03'A',0x80'A',0x39'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_ldst8_dtprel_lo12_nc
+
+// CHECK: ldrb w29, [x30, :dtprel_lo12:var] // encoding: [0xdd,0bAAAAAA11,0b01AAAAAA,0x39]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale1
+// CHECK: ldrsb x29, [x28, :dtprel_lo12_nc:var] // encoding: [0x9d,0bAAAAAA11,0b10AAAAAA,0x39]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale1
// CHECK-ELF-NEXT: 0x58 R_AARCH64_TLSLD_LDST8_DTPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0x5C R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC [[VARSYM]]
@@ -129,10 +139,11 @@
strh w27, [x26, #:dtprel_lo12:var]
ldrsh x25, [x24, #:dtprel_lo12_nc:var]
-// CHECK: strh w27, [x26, #:dtprel_lo12:var] // encoding: [0x5b'A',0x03'A',A,0x79'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_ldst16_dtprel_lo12
-// CHECK: ldrsh x25, [x24, #:dtprel_lo12_nc:var] // encoding: [0x19'A',0x03'A',0x80'A',0x79'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_ldst16_dtprel_lo12_n
+
+// CHECK: strh w27, [x26, :dtprel_lo12:var] // encoding: [0x5b,0bAAAAAA11,0b00AAAAAA,0x79]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale2
+// CHECK: ldrsh x25, [x24, :dtprel_lo12_nc:var] // encoding: [0x19,0bAAAAAA11,0b10AAAAAA,0x79]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale2
// CHECK-ELF-NEXT: 0x60 R_AARCH64_TLSLD_LDST16_DTPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0x64 R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC [[VARSYM]]
@@ -140,10 +151,11 @@
ldr w23, [x22, #:dtprel_lo12:var]
ldrsw x21, [x20, #:dtprel_lo12_nc:var]
-// CHECK: ldr w23, [x22, #:dtprel_lo12:var] // encoding: [0xd7'A',0x02'A',0x40'A',0xb9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_ldst32_dtprel_lo12
-// CHECK: ldrsw x21, [x20, #:dtprel_lo12_nc:var] // encoding: [0x95'A',0x02'A',0x80'A',0xb9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_ldst32_dtprel_lo12_n
+
+// CHECK: ldr w23, [x22, :dtprel_lo12:var] // encoding: [0xd7,0bAAAAAA10,0b01AAAAAA,0xb9]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale4
+// CHECK: ldrsw x21, [x20, :dtprel_lo12_nc:var] // encoding: [0x95,0bAAAAAA10,0b10AAAAAA,0xb9]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale4
// CHECK-ELF-NEXT: 0x68 R_AARCH64_TLSLD_LDST32_DTPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0x6C R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC [[VARSYM]]
@@ -151,11 +163,11 @@
ldr x19, [x18, #:dtprel_lo12:var]
str x17, [x16, #:dtprel_lo12_nc:var]
-// CHECK: ldr x19, [x18, #:dtprel_lo12:var] // encoding: [0x53'A',0x02'A',0x40'A',0xf9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_a64_ldst64_dtprel_lo12
-// CHECK: str x17, [x16, #:dtprel_lo12_nc:var] // encoding: [0x11'A',0x02'A',A,0xf9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_a64_ldst64_dtprel_lo12_nc
+// CHECK: ldr x19, [x18, :dtprel_lo12:var] // encoding: [0x53,0bAAAAAA10,0b01AAAAAA,0xf9]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: str x17, [x16, :dtprel_lo12_nc:var] // encoding: [0x11,0bAAAAAA10,0b00AAAAAA,0xf9]
+// CHECK: // fixup A - offset: 0, value: :dtprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale8
// CHECK-ELF-NEXT: 0x70 R_AARCH64_TLSLD_LDST64_DTPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0x74 R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC [[VARSYM]]
@@ -164,10 +176,11 @@
// TLS initial-exec forms
movz x15, #:gottprel_g1:var
movz w14, #:gottprel_g1:var
-// CHECK: movz x15, #:gottprel_g1:var // encoding: [0x0f'A',A,0xa0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g1:var, kind: fixup_a64_movw_gottprel_g1
-// CHECK: movz w14, #:gottprel_g1:var // encoding: [0x0e'A',A,0xa0'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g1:var, kind: fixup_a64_movw_gottprel_g1
+
+// CHECK: movz x15, #:gottprel_g1:var // encoding: [0bAAA01111,A,0b101AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :gottprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movz w14, #:gottprel_g1:var // encoding: [0bAAA01110,A,0b101AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :gottprel_g1:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0x78 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 [[VARSYM]]
// CHECK-ELF-NEXT: 0x7C R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 [[VARSYM]]
@@ -175,10 +188,11 @@
movk x13, #:gottprel_g0_nc:var
movk w12, #:gottprel_g0_nc:var
-// CHECK: movk x13, #:gottprel_g0_nc:var // encoding: [0x0d'A',A,0x80'A',0xf2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g0_nc:var, kind: fixup_a64_movw_gottprel_g0_nc
-// CHECK: movk w12, #:gottprel_g0_nc:var // encoding: [0x0c'A',A,0x80'A',0x72'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_g0_nc:var, kind: fixup_a64_movw_gottprel_g0_nc
+
+// CHECK: movk x13, #:gottprel_g0_nc:var // encoding: [0bAAA01101,A,0b100AAAAA,0xf2]
+// CHECK: // fixup A - offset: 0, value: :gottprel_g0_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w12, #:gottprel_g0_nc:var // encoding: [0bAAA01100,A,0b100AAAAA,0x72]
+// CHECK: // fixup A - offset: 0, value: :gottprel_g0_nc:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0x80 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC [[VARSYM]]
// CHECK-ELF-NEXT: 0x84 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC [[VARSYM]]
@@ -187,12 +201,13 @@
adrp x11, :gottprel:var
ldr x10, [x0, #:gottprel_lo12:var]
ldr x9, :gottprel:var
+
// CHECK: adrp x11, :gottprel:var // encoding: [0x0b'A',A,A,0x90'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel:var, kind: fixup_a64_adr_gottprel_page
-// CHECK: ldr x10, [x0, #:gottprel_lo12:var] // encoding: [0x0a'A',A,0x40'A',0xf9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel_lo12:var, kind: fixup_a64_ld64_gottprel_lo12_nc
-// CHECK: ldr x9, :gottprel:var // encoding: [0x09'A',A,A,0x58'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :gottprel:var, kind: fixup_a64_ld_gottprel_prel19
+// CHECK: // fixup A - offset: 0, value: :gottprel:var, kind: fixup_aarch64_pcrel_adrp_imm21
+// CHECK: ldr x10, [x0, :gottprel_lo12:var] // encoding: [0x0a,0bAAAAAA00,0b01AAAAAA,0xf9]
+// CHECK: // fixup A - offset: 0, value: :gottprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: ldr x9, :gottprel:var // encoding: [0bAAA01001,A,A,0x58]
+// CHECK: // fixup A - offset: 0, value: :gottprel:var, kind: fixup_aarch64_ldr_pcrel_imm19
// CHECK-ELF-NEXT: 0x88 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 [[VARSYM]]
// CHECK-ELF-NEXT: 0x8C R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC [[VARSYM]]
@@ -202,10 +217,11 @@
// TLS local-exec forms
movz x3, #:tprel_g2:var
movn x4, #:tprel_g2:var
-// CHECK: movz x3, #:tprel_g2:var // encoding: [0x03'A',A,0xc0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g2:var, kind: fixup_a64_movw_tprel_g2
-// CHECK: movn x4, #:tprel_g2:var // encoding: [0x04'A',A,0xc0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g2:var, kind: fixup_a64_movw_tprel_g2
+
+// CHECK: movz x3, #:tprel_g2:var // encoding: [0bAAA00011,A,0b110AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :tprel_g2:var, kind: fixup_aarch64_movw
+// CHECK: movn x4, #:tprel_g2:var // encoding: [0bAAA00100,A,0b110AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :tprel_g2:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0x94 R_AARCH64_TLSLE_MOVW_TPREL_G2 [[VARSYM]]
// CHECK-ELF-NEXT: 0x98 R_AARCH64_TLSLE_MOVW_TPREL_G2 [[VARSYM]]
@@ -215,14 +231,15 @@
movn x6, #:tprel_g1:var
movz w7, #:tprel_g1:var
movn w8, #:tprel_g1:var
-// CHECK: movz x5, #:tprel_g1:var // encoding: [0x05'A',A,0xa0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_a64_movw_tprel_g1
-// CHECK: movn x6, #:tprel_g1:var // encoding: [0x06'A',A,0xa0'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_a64_movw_tprel_g1
-// CHECK: movz w7, #:tprel_g1:var // encoding: [0x07'A',A,0xa0'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_a64_movw_tprel_g1
-// CHECK: movn w8, #:tprel_g1:var // encoding: [0x08'A',A,0xa0'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_a64_movw_tprel_g1
+
+// CHECK: movz x5, #:tprel_g1:var // encoding: [0bAAA00101,A,0b101AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movn x6, #:tprel_g1:var // encoding: [0bAAA00110,A,0b101AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movz w7, #:tprel_g1:var // encoding: [0bAAA00111,A,0b101AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_aarch64_movw
+// CHECK: movn w8, #:tprel_g1:var // encoding: [0bAAA01000,A,0b101AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :tprel_g1:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0x9C R_AARCH64_TLSLE_MOVW_TPREL_G1 [[VARSYM]]
// CHECK-ELF-NEXT: 0xA0 R_AARCH64_TLSLE_MOVW_TPREL_G1 [[VARSYM]]
@@ -232,10 +249,11 @@
movk x9, #:tprel_g1_nc:var
movk w10, #:tprel_g1_nc:var
-// CHECK: movk x9, #:tprel_g1_nc:var // encoding: [0x09'A',A,0xa0'A',0xf2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1_nc:var, kind: fixup_a64_movw_tprel_g1_nc
-// CHECK: movk w10, #:tprel_g1_nc:var // encoding: [0x0a'A',A,0xa0'A',0x72'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g1_nc:var, kind: fixup_a64_movw_tprel_g1_nc
+
+// CHECK: movk x9, #:tprel_g1_nc:var // encoding: [0bAAA01001,A,0b101AAAAA,0xf2]
+// CHECK: // fixup A - offset: 0, value: :tprel_g1_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w10, #:tprel_g1_nc:var // encoding: [0bAAA01010,A,0b101AAAAA,0x72]
+// CHECK: // fixup A - offset: 0, value: :tprel_g1_nc:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0xAC R_AARCH64_TLSLE_MOVW_TPREL_G1_NC [[VARSYM]]
// CHECK-ELF-NEXT: 0xB0 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC [[VARSYM]]
@@ -245,14 +263,15 @@
movn x12, #:tprel_g0:var
movz w13, #:tprel_g0:var
movn w14, #:tprel_g0:var
-// CHECK: movz x11, #:tprel_g0:var // encoding: [0x0b'A',A,0x80'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_a64_movw_tprel_g0
-// CHECK: movn x12, #:tprel_g0:var // encoding: [0x0c'A',A,0x80'A',0x92'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_a64_movw_tprel_g0
-// CHECK: movz w13, #:tprel_g0:var // encoding: [0x0d'A',A,0x80'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_a64_movw_tprel_g0
-// CHECK: movn w14, #:tprel_g0:var // encoding: [0x0e'A',A,0x80'A',0x12'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_a64_movw_tprel_g0
+
+// CHECK: movz x11, #:tprel_g0:var // encoding: [0bAAA01011,A,0b100AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movn x12, #:tprel_g0:var // encoding: [0bAAA01100,A,0b100AAAAA,0x92]
+// CHECK: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movz w13, #:tprel_g0:var // encoding: [0bAAA01101,A,0b100AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_aarch64_movw
+// CHECK: movn w14, #:tprel_g0:var // encoding: [0bAAA01110,A,0b100AAAAA,0x12]
+// CHECK: // fixup A - offset: 0, value: :tprel_g0:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0xB4 R_AARCH64_TLSLE_MOVW_TPREL_G0 [[VARSYM]]
// CHECK-ELF-NEXT: 0xB8 R_AARCH64_TLSLE_MOVW_TPREL_G0 [[VARSYM]]
@@ -262,10 +281,11 @@
movk x15, #:tprel_g0_nc:var
movk w16, #:tprel_g0_nc:var
-// CHECK: movk x15, #:tprel_g0_nc:var // encoding: [0x0f'A',A,0x80'A',0xf2'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0_nc:var, kind: fixup_a64_movw_tprel_g0_nc
-// CHECK: movk w16, #:tprel_g0_nc:var // encoding: [0x10'A',A,0x80'A',0x72'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_g0_nc:var, kind: fixup_a64_movw_tprel_g0_nc
+
+// CHECK: movk x15, #:tprel_g0_nc:var // encoding: [0bAAA01111,A,0b100AAAAA,0xf2]
+// CHECK: // fixup A - offset: 0, value: :tprel_g0_nc:var, kind: fixup_aarch64_movw
+// CHECK: movk w16, #:tprel_g0_nc:var // encoding: [0bAAA10000,A,0b100AAAAA,0x72]
+// CHECK: // fixup A - offset: 0, value: :tprel_g0_nc:var, kind: fixup_aarch64_movw
// CHECK-ELF-NEXT: 0xC4 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC [[VARSYM]]
// CHECK-ELF-NEXT: 0xC8 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC [[VARSYM]]
@@ -273,10 +293,11 @@
add x17, x18, #:tprel_hi12:var, lsl #12
add w19, w20, #:tprel_hi12:var, lsl #12
-// CHECK: add x17, x18, #:tprel_hi12:var, lsl #12 // encoding: [0x51'A',0x02'A',0x40'A',0x91'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_hi12:var, kind: fixup_a64_add_tprel_hi12
-// CHECK: add w19, w20, #:tprel_hi12:var, lsl #12 // encoding: [0x93'A',0x02'A',0x40'A',0x11'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_hi12:var, kind: fixup_a64_add_tprel_hi12
+
+// CHECK: add x17, x18, :tprel_hi12:var, lsl #12 // encoding: [0x51,0bAAAAAA10,0b00AAAAAA,0x91]
+// CHECK: // fixup A - offset: 0, value: :tprel_hi12:var, kind: fixup_aarch64_add_imm12
+// CHECK: add w19, w20, :tprel_hi12:var, lsl #12 // encoding: [0x93,0bAAAAAA10,0b00AAAAAA,0x11]
+// CHECK: // fixup A - offset: 0, value: :tprel_hi12:var, kind: fixup_aarch64_add_imm12
// CHECK-ELF-NEXT: 0xCC R_AARCH64_TLSLE_ADD_TPREL_HI12 [[VARSYM]]
// CHECK-ELF-NEXT: 0xD0 R_AARCH64_TLSLE_ADD_TPREL_HI12 [[VARSYM]]
@@ -284,10 +305,11 @@
add x21, x22, #:tprel_lo12:var
add w23, w24, #:tprel_lo12:var
-// CHECK: add x21, x22, #:tprel_lo12:var // encoding: [0xd5'A',0x02'A',A,0x91'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_add_tprel_lo12
-// CHECK: add w23, w24, #:tprel_lo12:var // encoding: [0x17'A',0x03'A',A,0x11'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_add_tprel_lo12
+
+// CHECK: add x21, x22, :tprel_lo12:var // encoding: [0xd5,0bAAAAAA10,0b00AAAAAA,0x91]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_add_imm12
+// CHECK: add w23, w24, :tprel_lo12:var // encoding: [0x17,0bAAAAAA11,0b00AAAAAA,0x11]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_add_imm12
// CHECK-ELF-NEXT: 0xD4 R_AARCH64_TLSLE_ADD_TPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0xD8 R_AARCH64_TLSLE_ADD_TPREL_LO12 [[VARSYM]]
@@ -295,10 +317,11 @@
add x25, x26, #:tprel_lo12_nc:var
add w27, w28, #:tprel_lo12_nc:var
-// CHECK: add x25, x26, #:tprel_lo12_nc:var // encoding: [0x59'A',0x03'A',A,0x91'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_add_tprel_lo12_nc
-// CHECK: add w27, w28, #:tprel_lo12_nc:var // encoding: [0x9b'A',0x03'A',A,0x11'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_add_tprel_lo12_nc
+
+// CHECK: add x25, x26, :tprel_lo12_nc:var // encoding: [0x59,0bAAAAAA11,0b00AAAAAA,0x91]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_add_imm12
+// CHECK: add w27, w28, :tprel_lo12_nc:var // encoding: [0x9b,0bAAAAAA11,0b00AAAAAA,0x11]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_add_imm12
// CHECK-ELF-NEXT: 0xDC R_AARCH64_TLSLE_ADD_TPREL_LO12_NC [[VARSYM]]
// CHECK-ELF-NEXT: 0xE0 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC [[VARSYM]]
@@ -306,10 +329,11 @@
ldrb w29, [x30, #:tprel_lo12:var]
ldrsb x29, [x28, #:tprel_lo12_nc:var]
-// CHECK: ldrb w29, [x30, #:tprel_lo12:var] // encoding: [0xdd'A',0x03'A',0x40'A',0x39'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_ldst8_tprel_lo12
-// CHECK: ldrsb x29, [x28, #:tprel_lo12_nc:var] // encoding: [0x9d'A',0x03'A',0x80'A',0x39'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_ldst8_tprel_lo12_nc
+
+// CHECK: ldrb w29, [x30, :tprel_lo12:var] // encoding: [0xdd,0bAAAAAA11,0b01AAAAAA,0x39]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale1
+// CHECK: ldrsb x29, [x28, :tprel_lo12_nc:var] // encoding: [0x9d,0bAAAAAA11,0b10AAAAAA,0x39]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale1
// CHECK-ELF-NEXT: 0xE4 R_AARCH64_TLSLE_LDST8_TPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0xE8 R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC [[VARSYM]]
@@ -317,10 +341,11 @@
strh w27, [x26, #:tprel_lo12:var]
ldrsh x25, [x24, #:tprel_lo12_nc:var]
-// CHECK: strh w27, [x26, #:tprel_lo12:var] // encoding: [0x5b'A',0x03'A',A,0x79'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_ldst16_tprel_lo12
-// CHECK: ldrsh x25, [x24, #:tprel_lo12_nc:var] // encoding: [0x19'A',0x03'A',0x80'A',0x79'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_ldst16_tprel_lo12_n
+
+// CHECK: strh w27, [x26, :tprel_lo12:var] // encoding: [0x5b,0bAAAAAA11,0b00AAAAAA,0x79]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale2
+// CHECK: ldrsh x25, [x24, :tprel_lo12_nc:var] // encoding: [0x19,0bAAAAAA11,0b10AAAAAA,0x79]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale2
// CHECK-ELF-NEXT: 0xEC R_AARCH64_TLSLE_LDST16_TPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0xF0 R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC [[VARSYM]]
@@ -328,20 +353,22 @@
ldr w23, [x22, #:tprel_lo12:var]
ldrsw x21, [x20, #:tprel_lo12_nc:var]
-// CHECK: ldr w23, [x22, #:tprel_lo12:var] // encoding: [0xd7'A',0x02'A',0x40'A',0xb9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_ldst32_tprel_lo12
-// CHECK: ldrsw x21, [x20, #:tprel_lo12_nc:var] // encoding: [0x95'A',0x02'A',0x80'A',0xb9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_ldst32_tprel_lo12_n
+
+// CHECK: ldr w23, [x22, :tprel_lo12:var] // encoding: [0xd7,0bAAAAAA10,0b01AAAAAA,0xb9]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale4
+// CHECK: ldrsw x21, [x20, :tprel_lo12_nc:var] // encoding: [0x95,0bAAAAAA10,0b10AAAAAA,0xb9]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale4
// CHECK-ELF-NEXT: 0xF4 R_AARCH64_TLSLE_LDST32_TPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0xF8 R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC [[VARSYM]]
ldr x19, [x18, #:tprel_lo12:var]
str x17, [x16, #:tprel_lo12_nc:var]
-// CHECK: ldr x19, [x18, #:tprel_lo12:var] // encoding: [0x53'A',0x02'A',0x40'A',0xf9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_a64_ldst64_tprel_lo12
-// CHECK: str x17, [x16, #:tprel_lo12_nc:var] // encoding: [0x11'A',0x02'A',A,0xf9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_a64_ldst64_tprel_lo12_nc
+
+// CHECK: ldr x19, [x18, :tprel_lo12:var] // encoding: [0x53,0bAAAAAA10,0b01AAAAAA,0xf9]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12:var, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: str x17, [x16, :tprel_lo12_nc:var] // encoding: [0x11,0bAAAAAA10,0b00AAAAAA,0xf9]
+// CHECK: // fixup A - offset: 0, value: :tprel_lo12_nc:var, kind: fixup_aarch64_ldst_imm12_scale8
// CHECK-ELF-NEXT: 0xFC R_AARCH64_TLSLE_LDST64_TPREL_LO12 [[VARSYM]]
// CHECK-ELF-NEXT: 0x100 R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC [[VARSYM]]
@@ -353,16 +380,16 @@
.tlsdesccall var
blr x3
+
// CHECK: adrp x8, :tlsdesc:var // encoding: [0x08'A',A,A,0x90'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc:var, kind: fixup_a64_tlsdesc_adr_page
-// CHECK: ldr x7, [x6, #:tlsdesc_lo12:var] // encoding: [0xc7'A',A,0x40'A',0xf9'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc_lo12:var, kind: fixup_a64_tlsdesc_ld64_lo12_nc
-// CHECK: add x5, x4, #:tlsdesc_lo12:var // encoding: [0x85'A',A,A,0x91'A']
-// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc_lo12:var, kind: fixup_a64_tlsdesc_add_lo12_nc
+// CHECK: // fixup A - offset: 0, value: :tlsdesc:var, kind: fixup_aarch64_pcrel_adrp_imm21
+// CHECK: ldr x7, [x6, :tlsdesc_lo12:var] // encoding: [0xc7,0bAAAAAA00,0b01AAAAAA,0xf9]
+// CHECK: // fixup A - offset: 0, value: :tlsdesc_lo12:var, kind: fixup_aarch64_ldst_imm12_scale8
+// CHECK: add x5, x4, :tlsdesc_lo12:var // encoding: [0x85,0bAAAAAA00,0b00AAAAAA,0x91]
+// CHECK: // fixup A - offset: 0, value: :tlsdesc_lo12:var, kind: fixup_aarch64_add_imm12
// CHECK: .tlsdesccall var // encoding: []
-// CHECK-NEXT: // fixup A - offset: 0, value: :tlsdesc:var, kind: fixup_a64_tlsdesc_call
-// CHECK: blr x3 // encoding: [0x60,0x00,0x3f,0xd6]
-
+// CHECK: // fixup A - offset: 0, value: var, kind: fixup_aarch64_tlsdesc_call
+// CHECK: blr x3 // encoding: [0x60,0x00,0x3f,0xd6]
// CHECK-ELF-NEXT: 0x104 R_AARCH64_TLSDESC_ADR_PAGE [[VARSYM]]
// CHECK-ELF-NEXT: 0x108 R_AARCH64_TLSDESC_LD64_LO12_NC [[VARSYM]]
@@ -374,7 +401,7 @@
// CHECK-ELF: Symbols [
// CHECK-ELF: Symbol {
-// CHECK-ELF: Name: var (6)
+// CHECK-ELF: Name: var
// CHECK-ELF-NEXT: Value:
// CHECK-ELF-NEXT: Size:
// CHECK-ELF-NEXT: Binding: Global
diff --git a/test/MC/AArch64/trace-regs.s b/test/MC/AArch64/trace-regs.s
index f9ab4c9ad975..92f16cd54f31 100644
--- a/test/MC/AArch64/trace-regs.s
+++ b/test/MC/AArch64/trace-regs.s
@@ -1,4 +1,5 @@
// RUN: llvm-mc -triple=aarch64-none-linux-gnu -show-encoding < %s | FileCheck %s
+
mrs x8, trcstatr
mrs x9, trcidr8
mrs x11, trcidr9
@@ -207,214 +208,214 @@
mrs x22, trcitctrl
mrs x23, trcclaimset
mrs x14, trcclaimclr
-// CHECK: mrs x8, trcstatr // encoding: [0x08,0x03,0x31,0xd5]
-// CHECK: mrs x9, trcidr8 // encoding: [0xc9,0x00,0x31,0xd5]
-// CHECK: mrs x11, trcidr9 // encoding: [0xcb,0x01,0x31,0xd5]
-// CHECK: mrs x25, trcidr10 // encoding: [0xd9,0x02,0x31,0xd5]
-// CHECK: mrs x7, trcidr11 // encoding: [0xc7,0x03,0x31,0xd5]
-// CHECK: mrs x7, trcidr12 // encoding: [0xc7,0x04,0x31,0xd5]
-// CHECK: mrs x6, trcidr13 // encoding: [0xc6,0x05,0x31,0xd5]
-// CHECK: mrs x27, trcidr0 // encoding: [0xfb,0x08,0x31,0xd5]
-// CHECK: mrs x29, trcidr1 // encoding: [0xfd,0x09,0x31,0xd5]
-// CHECK: mrs x4, trcidr2 // encoding: [0xe4,0x0a,0x31,0xd5]
-// CHECK: mrs x8, trcidr3 // encoding: [0xe8,0x0b,0x31,0xd5]
-// CHECK: mrs x15, trcidr4 // encoding: [0xef,0x0c,0x31,0xd5]
-// CHECK: mrs x20, trcidr5 // encoding: [0xf4,0x0d,0x31,0xd5]
-// CHECK: mrs x6, trcidr6 // encoding: [0xe6,0x0e,0x31,0xd5]
-// CHECK: mrs x6, trcidr7 // encoding: [0xe6,0x0f,0x31,0xd5]
-// CHECK: mrs x24, trcoslsr // encoding: [0x98,0x11,0x31,0xd5]
-// CHECK: mrs x18, trcpdsr // encoding: [0x92,0x15,0x31,0xd5]
-// CHECK: mrs x28, trcdevaff0 // encoding: [0xdc,0x7a,0x31,0xd5]
-// CHECK: mrs x5, trcdevaff1 // encoding: [0xc5,0x7b,0x31,0xd5]
-// CHECK: mrs x5, trclsr // encoding: [0xc5,0x7d,0x31,0xd5]
-// CHECK: mrs x11, trcauthstatus // encoding: [0xcb,0x7e,0x31,0xd5]
-// CHECK: mrs x13, trcdevarch // encoding: [0xcd,0x7f,0x31,0xd5]
-// CHECK: mrs x18, trcdevid // encoding: [0xf2,0x72,0x31,0xd5]
-// CHECK: mrs x22, trcdevtype // encoding: [0xf6,0x73,0x31,0xd5]
-// CHECK: mrs x14, trcpidr4 // encoding: [0xee,0x74,0x31,0xd5]
-// CHECK: mrs x5, trcpidr5 // encoding: [0xe5,0x75,0x31,0xd5]
-// CHECK: mrs x5, trcpidr6 // encoding: [0xe5,0x76,0x31,0xd5]
-// CHECK: mrs x9, trcpidr7 // encoding: [0xe9,0x77,0x31,0xd5]
-// CHECK: mrs x15, trcpidr0 // encoding: [0xef,0x78,0x31,0xd5]
-// CHECK: mrs x6, trcpidr1 // encoding: [0xe6,0x79,0x31,0xd5]
-// CHECK: mrs x11, trcpidr2 // encoding: [0xeb,0x7a,0x31,0xd5]
-// CHECK: mrs x20, trcpidr3 // encoding: [0xf4,0x7b,0x31,0xd5]
-// CHECK: mrs x17, trccidr0 // encoding: [0xf1,0x7c,0x31,0xd5]
-// CHECK: mrs x2, trccidr1 // encoding: [0xe2,0x7d,0x31,0xd5]
-// CHECK: mrs x20, trccidr2 // encoding: [0xf4,0x7e,0x31,0xd5]
-// CHECK: mrs x4, trccidr3 // encoding: [0xe4,0x7f,0x31,0xd5]
-// CHECK: mrs x11, trcprgctlr // encoding: [0x0b,0x01,0x31,0xd5]
-// CHECK: mrs x23, trcprocselr // encoding: [0x17,0x02,0x31,0xd5]
-// CHECK: mrs x13, trcconfigr // encoding: [0x0d,0x04,0x31,0xd5]
-// CHECK: mrs x23, trcauxctlr // encoding: [0x17,0x06,0x31,0xd5]
-// CHECK: mrs x9, trceventctl0r // encoding: [0x09,0x08,0x31,0xd5]
-// CHECK: mrs x16, trceventctl1r // encoding: [0x10,0x09,0x31,0xd5]
-// CHECK: mrs x4, trcstallctlr // encoding: [0x04,0x0b,0x31,0xd5]
-// CHECK: mrs x14, trctsctlr // encoding: [0x0e,0x0c,0x31,0xd5]
-// CHECK: mrs x24, trcsyncpr // encoding: [0x18,0x0d,0x31,0xd5]
-// CHECK: mrs x28, trcccctlr // encoding: [0x1c,0x0e,0x31,0xd5]
-// CHECK: mrs x15, trcbbctlr // encoding: [0x0f,0x0f,0x31,0xd5]
-// CHECK: mrs x1, trctraceidr // encoding: [0x21,0x00,0x31,0xd5]
-// CHECK: mrs x20, trcqctlr // encoding: [0x34,0x01,0x31,0xd5]
-// CHECK: mrs x2, trcvictlr // encoding: [0x42,0x00,0x31,0xd5]
-// CHECK: mrs x12, trcviiectlr // encoding: [0x4c,0x01,0x31,0xd5]
-// CHECK: mrs x16, trcvissctlr // encoding: [0x50,0x02,0x31,0xd5]
-// CHECK: mrs x8, trcvipcssctlr // encoding: [0x48,0x03,0x31,0xd5]
-// CHECK: mrs x27, trcvdctlr // encoding: [0x5b,0x08,0x31,0xd5]
-// CHECK: mrs x9, trcvdsacctlr // encoding: [0x49,0x09,0x31,0xd5]
-// CHECK: mrs x0, trcvdarcctlr // encoding: [0x40,0x0a,0x31,0xd5]
-// CHECK: mrs x13, trcseqevr0 // encoding: [0x8d,0x00,0x31,0xd5]
-// CHECK: mrs x11, trcseqevr1 // encoding: [0x8b,0x01,0x31,0xd5]
-// CHECK: mrs x26, trcseqevr2 // encoding: [0x9a,0x02,0x31,0xd5]
-// CHECK: mrs x14, trcseqrstevr // encoding: [0x8e,0x06,0x31,0xd5]
-// CHECK: mrs x4, trcseqstr // encoding: [0x84,0x07,0x31,0xd5]
-// CHECK: mrs x17, trcextinselr // encoding: [0x91,0x08,0x31,0xd5]
-// CHECK: mrs x21, trccntrldvr0 // encoding: [0xb5,0x00,0x31,0xd5]
-// CHECK: mrs x10, trccntrldvr1 // encoding: [0xaa,0x01,0x31,0xd5]
-// CHECK: mrs x20, trccntrldvr2 // encoding: [0xb4,0x02,0x31,0xd5]
-// CHECK: mrs x5, trccntrldvr3 // encoding: [0xa5,0x03,0x31,0xd5]
-// CHECK: mrs x17, trccntctlr0 // encoding: [0xb1,0x04,0x31,0xd5]
-// CHECK: mrs x1, trccntctlr1 // encoding: [0xa1,0x05,0x31,0xd5]
-// CHECK: mrs x17, trccntctlr2 // encoding: [0xb1,0x06,0x31,0xd5]
-// CHECK: mrs x6, trccntctlr3 // encoding: [0xa6,0x07,0x31,0xd5]
-// CHECK: mrs x28, trccntvr0 // encoding: [0xbc,0x08,0x31,0xd5]
-// CHECK: mrs x23, trccntvr1 // encoding: [0xb7,0x09,0x31,0xd5]
-// CHECK: mrs x9, trccntvr2 // encoding: [0xa9,0x0a,0x31,0xd5]
-// CHECK: mrs x6, trccntvr3 // encoding: [0xa6,0x0b,0x31,0xd5]
-// CHECK: mrs x24, trcimspec0 // encoding: [0xf8,0x00,0x31,0xd5]
-// CHECK: mrs x24, trcimspec1 // encoding: [0xf8,0x01,0x31,0xd5]
-// CHECK: mrs x15, trcimspec2 // encoding: [0xef,0x02,0x31,0xd5]
-// CHECK: mrs x10, trcimspec3 // encoding: [0xea,0x03,0x31,0xd5]
-// CHECK: mrs x29, trcimspec4 // encoding: [0xfd,0x04,0x31,0xd5]
-// CHECK: mrs x18, trcimspec5 // encoding: [0xf2,0x05,0x31,0xd5]
-// CHECK: mrs x29, trcimspec6 // encoding: [0xfd,0x06,0x31,0xd5]
-// CHECK: mrs x2, trcimspec7 // encoding: [0xe2,0x07,0x31,0xd5]
-// CHECK: mrs x8, trcrsctlr2 // encoding: [0x08,0x12,0x31,0xd5]
-// CHECK: mrs x0, trcrsctlr3 // encoding: [0x00,0x13,0x31,0xd5]
-// CHECK: mrs x12, trcrsctlr4 // encoding: [0x0c,0x14,0x31,0xd5]
-// CHECK: mrs x26, trcrsctlr5 // encoding: [0x1a,0x15,0x31,0xd5]
-// CHECK: mrs x29, trcrsctlr6 // encoding: [0x1d,0x16,0x31,0xd5]
-// CHECK: mrs x17, trcrsctlr7 // encoding: [0x11,0x17,0x31,0xd5]
-// CHECK: mrs x0, trcrsctlr8 // encoding: [0x00,0x18,0x31,0xd5]
-// CHECK: mrs x1, trcrsctlr9 // encoding: [0x01,0x19,0x31,0xd5]
-// CHECK: mrs x17, trcrsctlr10 // encoding: [0x11,0x1a,0x31,0xd5]
-// CHECK: mrs x21, trcrsctlr11 // encoding: [0x15,0x1b,0x31,0xd5]
-// CHECK: mrs x1, trcrsctlr12 // encoding: [0x01,0x1c,0x31,0xd5]
-// CHECK: mrs x8, trcrsctlr13 // encoding: [0x08,0x1d,0x31,0xd5]
-// CHECK: mrs x24, trcrsctlr14 // encoding: [0x18,0x1e,0x31,0xd5]
-// CHECK: mrs x0, trcrsctlr15 // encoding: [0x00,0x1f,0x31,0xd5]
-// CHECK: mrs x2, trcrsctlr16 // encoding: [0x22,0x10,0x31,0xd5]
-// CHECK: mrs x29, trcrsctlr17 // encoding: [0x3d,0x11,0x31,0xd5]
-// CHECK: mrs x22, trcrsctlr18 // encoding: [0x36,0x12,0x31,0xd5]
-// CHECK: mrs x6, trcrsctlr19 // encoding: [0x26,0x13,0x31,0xd5]
-// CHECK: mrs x26, trcrsctlr20 // encoding: [0x3a,0x14,0x31,0xd5]
-// CHECK: mrs x26, trcrsctlr21 // encoding: [0x3a,0x15,0x31,0xd5]
-// CHECK: mrs x4, trcrsctlr22 // encoding: [0x24,0x16,0x31,0xd5]
-// CHECK: mrs x12, trcrsctlr23 // encoding: [0x2c,0x17,0x31,0xd5]
-// CHECK: mrs x1, trcrsctlr24 // encoding: [0x21,0x18,0x31,0xd5]
-// CHECK: mrs x0, trcrsctlr25 // encoding: [0x20,0x19,0x31,0xd5]
-// CHECK: mrs x17, trcrsctlr26 // encoding: [0x31,0x1a,0x31,0xd5]
-// CHECK: mrs x8, trcrsctlr27 // encoding: [0x28,0x1b,0x31,0xd5]
-// CHECK: mrs x10, trcrsctlr28 // encoding: [0x2a,0x1c,0x31,0xd5]
-// CHECK: mrs x25, trcrsctlr29 // encoding: [0x39,0x1d,0x31,0xd5]
-// CHECK: mrs x12, trcrsctlr30 // encoding: [0x2c,0x1e,0x31,0xd5]
-// CHECK: mrs x11, trcrsctlr31 // encoding: [0x2b,0x1f,0x31,0xd5]
-// CHECK: mrs x18, trcssccr0 // encoding: [0x52,0x10,0x31,0xd5]
-// CHECK: mrs x12, trcssccr1 // encoding: [0x4c,0x11,0x31,0xd5]
-// CHECK: mrs x3, trcssccr2 // encoding: [0x43,0x12,0x31,0xd5]
-// CHECK: mrs x2, trcssccr3 // encoding: [0x42,0x13,0x31,0xd5]
-// CHECK: mrs x21, trcssccr4 // encoding: [0x55,0x14,0x31,0xd5]
-// CHECK: mrs x10, trcssccr5 // encoding: [0x4a,0x15,0x31,0xd5]
-// CHECK: mrs x22, trcssccr6 // encoding: [0x56,0x16,0x31,0xd5]
-// CHECK: mrs x23, trcssccr7 // encoding: [0x57,0x17,0x31,0xd5]
-// CHECK: mrs x23, trcsscsr0 // encoding: [0x57,0x18,0x31,0xd5]
-// CHECK: mrs x19, trcsscsr1 // encoding: [0x53,0x19,0x31,0xd5]
-// CHECK: mrs x25, trcsscsr2 // encoding: [0x59,0x1a,0x31,0xd5]
-// CHECK: mrs x17, trcsscsr3 // encoding: [0x51,0x1b,0x31,0xd5]
-// CHECK: mrs x19, trcsscsr4 // encoding: [0x53,0x1c,0x31,0xd5]
-// CHECK: mrs x11, trcsscsr5 // encoding: [0x4b,0x1d,0x31,0xd5]
-// CHECK: mrs x5, trcsscsr6 // encoding: [0x45,0x1e,0x31,0xd5]
-// CHECK: mrs x9, trcsscsr7 // encoding: [0x49,0x1f,0x31,0xd5]
-// CHECK: mrs x1, trcsspcicr0 // encoding: [0x61,0x10,0x31,0xd5]
-// CHECK: mrs x12, trcsspcicr1 // encoding: [0x6c,0x11,0x31,0xd5]
-// CHECK: mrs x21, trcsspcicr2 // encoding: [0x75,0x12,0x31,0xd5]
-// CHECK: mrs x11, trcsspcicr3 // encoding: [0x6b,0x13,0x31,0xd5]
-// CHECK: mrs x3, trcsspcicr4 // encoding: [0x63,0x14,0x31,0xd5]
-// CHECK: mrs x9, trcsspcicr5 // encoding: [0x69,0x15,0x31,0xd5]
-// CHECK: mrs x5, trcsspcicr6 // encoding: [0x65,0x16,0x31,0xd5]
-// CHECK: mrs x2, trcsspcicr7 // encoding: [0x62,0x17,0x31,0xd5]
-// CHECK: mrs x26, trcpdcr // encoding: [0x9a,0x14,0x31,0xd5]
-// CHECK: mrs x8, trcacvr0 // encoding: [0x08,0x20,0x31,0xd5]
-// CHECK: mrs x15, trcacvr1 // encoding: [0x0f,0x22,0x31,0xd5]
-// CHECK: mrs x19, trcacvr2 // encoding: [0x13,0x24,0x31,0xd5]
-// CHECK: mrs x8, trcacvr3 // encoding: [0x08,0x26,0x31,0xd5]
-// CHECK: mrs x28, trcacvr4 // encoding: [0x1c,0x28,0x31,0xd5]
-// CHECK: mrs x3, trcacvr5 // encoding: [0x03,0x2a,0x31,0xd5]
-// CHECK: mrs x25, trcacvr6 // encoding: [0x19,0x2c,0x31,0xd5]
-// CHECK: mrs x24, trcacvr7 // encoding: [0x18,0x2e,0x31,0xd5]
-// CHECK: mrs x6, trcacvr8 // encoding: [0x26,0x20,0x31,0xd5]
-// CHECK: mrs x3, trcacvr9 // encoding: [0x23,0x22,0x31,0xd5]
-// CHECK: mrs x24, trcacvr10 // encoding: [0x38,0x24,0x31,0xd5]
-// CHECK: mrs x3, trcacvr11 // encoding: [0x23,0x26,0x31,0xd5]
-// CHECK: mrs x12, trcacvr12 // encoding: [0x2c,0x28,0x31,0xd5]
-// CHECK: mrs x9, trcacvr13 // encoding: [0x29,0x2a,0x31,0xd5]
-// CHECK: mrs x14, trcacvr14 // encoding: [0x2e,0x2c,0x31,0xd5]
-// CHECK: mrs x3, trcacvr15 // encoding: [0x23,0x2e,0x31,0xd5]
-// CHECK: mrs x21, trcacatr0 // encoding: [0x55,0x20,0x31,0xd5]
-// CHECK: mrs x26, trcacatr1 // encoding: [0x5a,0x22,0x31,0xd5]
-// CHECK: mrs x8, trcacatr2 // encoding: [0x48,0x24,0x31,0xd5]
-// CHECK: mrs x22, trcacatr3 // encoding: [0x56,0x26,0x31,0xd5]
-// CHECK: mrs x6, trcacatr4 // encoding: [0x46,0x28,0x31,0xd5]
-// CHECK: mrs x29, trcacatr5 // encoding: [0x5d,0x2a,0x31,0xd5]
-// CHECK: mrs x5, trcacatr6 // encoding: [0x45,0x2c,0x31,0xd5]
-// CHECK: mrs x18, trcacatr7 // encoding: [0x52,0x2e,0x31,0xd5]
-// CHECK: mrs x2, trcacatr8 // encoding: [0x62,0x20,0x31,0xd5]
-// CHECK: mrs x19, trcacatr9 // encoding: [0x73,0x22,0x31,0xd5]
-// CHECK: mrs x13, trcacatr10 // encoding: [0x6d,0x24,0x31,0xd5]
-// CHECK: mrs x25, trcacatr11 // encoding: [0x79,0x26,0x31,0xd5]
-// CHECK: mrs x18, trcacatr12 // encoding: [0x72,0x28,0x31,0xd5]
-// CHECK: mrs x29, trcacatr13 // encoding: [0x7d,0x2a,0x31,0xd5]
-// CHECK: mrs x9, trcacatr14 // encoding: [0x69,0x2c,0x31,0xd5]
-// CHECK: mrs x18, trcacatr15 // encoding: [0x72,0x2e,0x31,0xd5]
-// CHECK: mrs x29, trcdvcvr0 // encoding: [0x9d,0x20,0x31,0xd5]
-// CHECK: mrs x15, trcdvcvr1 // encoding: [0x8f,0x24,0x31,0xd5]
-// CHECK: mrs x15, trcdvcvr2 // encoding: [0x8f,0x28,0x31,0xd5]
-// CHECK: mrs x15, trcdvcvr3 // encoding: [0x8f,0x2c,0x31,0xd5]
-// CHECK: mrs x19, trcdvcvr4 // encoding: [0xb3,0x20,0x31,0xd5]
-// CHECK: mrs x22, trcdvcvr5 // encoding: [0xb6,0x24,0x31,0xd5]
-// CHECK: mrs x27, trcdvcvr6 // encoding: [0xbb,0x28,0x31,0xd5]
-// CHECK: mrs x1, trcdvcvr7 // encoding: [0xa1,0x2c,0x31,0xd5]
-// CHECK: mrs x29, trcdvcmr0 // encoding: [0xdd,0x20,0x31,0xd5]
-// CHECK: mrs x9, trcdvcmr1 // encoding: [0xc9,0x24,0x31,0xd5]
-// CHECK: mrs x1, trcdvcmr2 // encoding: [0xc1,0x28,0x31,0xd5]
-// CHECK: mrs x2, trcdvcmr3 // encoding: [0xc2,0x2c,0x31,0xd5]
-// CHECK: mrs x5, trcdvcmr4 // encoding: [0xe5,0x20,0x31,0xd5]
-// CHECK: mrs x21, trcdvcmr5 // encoding: [0xf5,0x24,0x31,0xd5]
-// CHECK: mrs x5, trcdvcmr6 // encoding: [0xe5,0x28,0x31,0xd5]
-// CHECK: mrs x1, trcdvcmr7 // encoding: [0xe1,0x2c,0x31,0xd5]
-// CHECK: mrs x21, trccidcvr0 // encoding: [0x15,0x30,0x31,0xd5]
-// CHECK: mrs x24, trccidcvr1 // encoding: [0x18,0x32,0x31,0xd5]
-// CHECK: mrs x24, trccidcvr2 // encoding: [0x18,0x34,0x31,0xd5]
-// CHECK: mrs x12, trccidcvr3 // encoding: [0x0c,0x36,0x31,0xd5]
-// CHECK: mrs x10, trccidcvr4 // encoding: [0x0a,0x38,0x31,0xd5]
-// CHECK: mrs x9, trccidcvr5 // encoding: [0x09,0x3a,0x31,0xd5]
-// CHECK: mrs x6, trccidcvr6 // encoding: [0x06,0x3c,0x31,0xd5]
-// CHECK: mrs x20, trccidcvr7 // encoding: [0x14,0x3e,0x31,0xd5]
-// CHECK: mrs x20, trcvmidcvr0 // encoding: [0x34,0x30,0x31,0xd5]
-// CHECK: mrs x20, trcvmidcvr1 // encoding: [0x34,0x32,0x31,0xd5]
-// CHECK: mrs x26, trcvmidcvr2 // encoding: [0x3a,0x34,0x31,0xd5]
-// CHECK: mrs x1, trcvmidcvr3 // encoding: [0x21,0x36,0x31,0xd5]
-// CHECK: mrs x14, trcvmidcvr4 // encoding: [0x2e,0x38,0x31,0xd5]
-// CHECK: mrs x27, trcvmidcvr5 // encoding: [0x3b,0x3a,0x31,0xd5]
-// CHECK: mrs x29, trcvmidcvr6 // encoding: [0x3d,0x3c,0x31,0xd5]
-// CHECK: mrs x17, trcvmidcvr7 // encoding: [0x31,0x3e,0x31,0xd5]
-// CHECK: mrs x10, trccidcctlr0 // encoding: [0x4a,0x30,0x31,0xd5]
-// CHECK: mrs x4, trccidcctlr1 // encoding: [0x44,0x31,0x31,0xd5]
-// CHECK: mrs x9, trcvmidcctlr0 // encoding: [0x49,0x32,0x31,0xd5]
-// CHECK: mrs x11, trcvmidcctlr1 // encoding: [0x4b,0x33,0x31,0xd5]
-// CHECK: mrs x22, trcitctrl // encoding: [0x96,0x70,0x31,0xd5]
-// CHECK: mrs x23, trcclaimset // encoding: [0xd7,0x78,0x31,0xd5]
-// CHECK: mrs x14, trcclaimclr // encoding: [0xce,0x79,0x31,0xd5]
+// CHECK: mrs x8, {{trcstatr|TRCSTATR}} // encoding: [0x08,0x03,0x31,0xd5]
+// CHECK: mrs x9, {{trcidr8|TRCIDR8}} // encoding: [0xc9,0x00,0x31,0xd5]
+// CHECK: mrs x11, {{trcidr9|TRCIDR9}} // encoding: [0xcb,0x01,0x31,0xd5]
+// CHECK: mrs x25, {{trcidr10|TRCIDR10}} // encoding: [0xd9,0x02,0x31,0xd5]
+// CHECK: mrs x7, {{trcidr11|TRCIDR11}} // encoding: [0xc7,0x03,0x31,0xd5]
+// CHECK: mrs x7, {{trcidr12|TRCIDR12}} // encoding: [0xc7,0x04,0x31,0xd5]
+// CHECK: mrs x6, {{trcidr13|TRCIDR13}} // encoding: [0xc6,0x05,0x31,0xd5]
+// CHECK: mrs x27, {{trcidr0|TRCIDR0}} // encoding: [0xfb,0x08,0x31,0xd5]
+// CHECK: mrs x29, {{trcidr1|TRCIDR1}} // encoding: [0xfd,0x09,0x31,0xd5]
+// CHECK: mrs x4, {{trcidr2|TRCIDR2}} // encoding: [0xe4,0x0a,0x31,0xd5]
+// CHECK: mrs x8, {{trcidr3|TRCIDR3}} // encoding: [0xe8,0x0b,0x31,0xd5]
+// CHECK: mrs x15, {{trcidr4|TRCIDR4}} // encoding: [0xef,0x0c,0x31,0xd5]
+// CHECK: mrs x20, {{trcidr5|TRCIDR5}} // encoding: [0xf4,0x0d,0x31,0xd5]
+// CHECK: mrs x6, {{trcidr6|TRCIDR6}} // encoding: [0xe6,0x0e,0x31,0xd5]
+// CHECK: mrs x6, {{trcidr7|TRCIDR7}} // encoding: [0xe6,0x0f,0x31,0xd5]
+// CHECK: mrs x24, {{trcoslsr|TRCOSLSR}} // encoding: [0x98,0x11,0x31,0xd5]
+// CHECK: mrs x18, {{trcpdsr|TRCPDSR}} // encoding: [0x92,0x15,0x31,0xd5]
+// CHECK: mrs x28, {{trcdevaff0|TRCDEVAFF0}} // encoding: [0xdc,0x7a,0x31,0xd5]
+// CHECK: mrs x5, {{trcdevaff1|TRCDEVAFF1}} // encoding: [0xc5,0x7b,0x31,0xd5]
+// CHECK: mrs x5, {{trclsr|TRCLSR}} // encoding: [0xc5,0x7d,0x31,0xd5]
+// CHECK: mrs x11, {{trcauthstatus|TRCAUTHSTATUS}} // encoding: [0xcb,0x7e,0x31,0xd5]
+// CHECK: mrs x13, {{trcdevarch|TRCDEVARCH}} // encoding: [0xcd,0x7f,0x31,0xd5]
+// CHECK: mrs x18, {{trcdevid|TRCDEVID}} // encoding: [0xf2,0x72,0x31,0xd5]
+// CHECK: mrs x22, {{trcdevtype|TRCDEVTYPE}} // encoding: [0xf6,0x73,0x31,0xd5]
+// CHECK: mrs x14, {{trcpidr4|TRCPIDR4}} // encoding: [0xee,0x74,0x31,0xd5]
+// CHECK: mrs x5, {{trcpidr5|TRCPIDR5}} // encoding: [0xe5,0x75,0x31,0xd5]
+// CHECK: mrs x5, {{trcpidr6|TRCPIDR6}} // encoding: [0xe5,0x76,0x31,0xd5]
+// CHECK: mrs x9, {{trcpidr7|TRCPIDR7}} // encoding: [0xe9,0x77,0x31,0xd5]
+// CHECK: mrs x15, {{trcpidr0|TRCPIDR0}} // encoding: [0xef,0x78,0x31,0xd5]
+// CHECK: mrs x6, {{trcpidr1|TRCPIDR1}} // encoding: [0xe6,0x79,0x31,0xd5]
+// CHECK: mrs x11, {{trcpidr2|TRCPIDR2}} // encoding: [0xeb,0x7a,0x31,0xd5]
+// CHECK: mrs x20, {{trcpidr3|TRCPIDR3}} // encoding: [0xf4,0x7b,0x31,0xd5]
+// CHECK: mrs x17, {{trccidr0|TRCCIDR0}} // encoding: [0xf1,0x7c,0x31,0xd5]
+// CHECK: mrs x2, {{trccidr1|TRCCIDR1}} // encoding: [0xe2,0x7d,0x31,0xd5]
+// CHECK: mrs x20, {{trccidr2|TRCCIDR2}} // encoding: [0xf4,0x7e,0x31,0xd5]
+// CHECK: mrs x4, {{trccidr3|TRCCIDR3}} // encoding: [0xe4,0x7f,0x31,0xd5]
+// CHECK: mrs x11, {{trcprgctlr|TRCPRGCTLR}} // encoding: [0x0b,0x01,0x31,0xd5]
+// CHECK: mrs x23, {{trcprocselr|TRCPROCSELR}} // encoding: [0x17,0x02,0x31,0xd5]
+// CHECK: mrs x13, {{trcconfigr|TRCCONFIGR}} // encoding: [0x0d,0x04,0x31,0xd5]
+// CHECK: mrs x23, {{trcauxctlr|TRCAUXCTLR}} // encoding: [0x17,0x06,0x31,0xd5]
+// CHECK: mrs x9, {{trceventctl0r|TRCEVENTCTL0R}} // encoding: [0x09,0x08,0x31,0xd5]
+// CHECK: mrs x16, {{trceventctl1r|TRCEVENTCTL1R}} // encoding: [0x10,0x09,0x31,0xd5]
+// CHECK: mrs x4, {{trcstallctlr|TRCSTALLCTLR}} // encoding: [0x04,0x0b,0x31,0xd5]
+// CHECK: mrs x14, {{trctsctlr|TRCTSCTLR}} // encoding: [0x0e,0x0c,0x31,0xd5]
+// CHECK: mrs x24, {{trcsyncpr|TRCSYNCPR}} // encoding: [0x18,0x0d,0x31,0xd5]
+// CHECK: mrs x28, {{trcccctlr|TRCCCCTLR}} // encoding: [0x1c,0x0e,0x31,0xd5]
+// CHECK: mrs x15, {{trcbbctlr|TRCBBCTLR}} // encoding: [0x0f,0x0f,0x31,0xd5]
+// CHECK: mrs x1, {{trctraceidr|TRCTRACEIDR}} // encoding: [0x21,0x00,0x31,0xd5]
+// CHECK: mrs x20, {{trcqctlr|TRCQCTLR}} // encoding: [0x34,0x01,0x31,0xd5]
+// CHECK: mrs x2, {{trcvictlr|TRCVICTLR}} // encoding: [0x42,0x00,0x31,0xd5]
+// CHECK: mrs x12, {{trcviiectlr|TRCVIIECTLR}} // encoding: [0x4c,0x01,0x31,0xd5]
+// CHECK: mrs x16, {{trcvissctlr|TRCVISSCTLR}} // encoding: [0x50,0x02,0x31,0xd5]
+// CHECK: mrs x8, {{trcvipcssctlr|TRCVIPCSSCTLR}} // encoding: [0x48,0x03,0x31,0xd5]
+// CHECK: mrs x27, {{trcvdctlr|TRCVDCTLR}} // encoding: [0x5b,0x08,0x31,0xd5]
+// CHECK: mrs x9, {{trcvdsacctlr|TRCVDSACCTLR}} // encoding: [0x49,0x09,0x31,0xd5]
+// CHECK: mrs x0, {{trcvdarcctlr|TRCVDARCCTLR}} // encoding: [0x40,0x0a,0x31,0xd5]
+// CHECK: mrs x13, {{trcseqevr0|TRCSEQEVR0}} // encoding: [0x8d,0x00,0x31,0xd5]
+// CHECK: mrs x11, {{trcseqevr1|TRCSEQEVR1}} // encoding: [0x8b,0x01,0x31,0xd5]
+// CHECK: mrs x26, {{trcseqevr2|TRCSEQEVR2}} // encoding: [0x9a,0x02,0x31,0xd5]
+// CHECK: mrs x14, {{trcseqrstevr|TRCSEQRSTEVR}} // encoding: [0x8e,0x06,0x31,0xd5]
+// CHECK: mrs x4, {{trcseqstr|TRCSEQSTR}} // encoding: [0x84,0x07,0x31,0xd5]
+// CHECK: mrs x17, {{trcextinselr|TRCEXTINSELR}} // encoding: [0x91,0x08,0x31,0xd5]
+// CHECK: mrs x21, {{trccntrldvr0|TRCCNTRLDVR0}} // encoding: [0xb5,0x00,0x31,0xd5]
+// CHECK: mrs x10, {{trccntrldvr1|TRCCNTRLDVR1}} // encoding: [0xaa,0x01,0x31,0xd5]
+// CHECK: mrs x20, {{trccntrldvr2|TRCCNTRLDVR2}} // encoding: [0xb4,0x02,0x31,0xd5]
+// CHECK: mrs x5, {{trccntrldvr3|TRCCNTRLDVR3}} // encoding: [0xa5,0x03,0x31,0xd5]
+// CHECK: mrs x17, {{trccntctlr0|TRCCNTCTLR0}} // encoding: [0xb1,0x04,0x31,0xd5]
+// CHECK: mrs x1, {{trccntctlr1|TRCCNTCTLR1}} // encoding: [0xa1,0x05,0x31,0xd5]
+// CHECK: mrs x17, {{trccntctlr2|TRCCNTCTLR2}} // encoding: [0xb1,0x06,0x31,0xd5]
+// CHECK: mrs x6, {{trccntctlr3|TRCCNTCTLR3}} // encoding: [0xa6,0x07,0x31,0xd5]
+// CHECK: mrs x28, {{trccntvr0|TRCCNTVR0}} // encoding: [0xbc,0x08,0x31,0xd5]
+// CHECK: mrs x23, {{trccntvr1|TRCCNTVR1}} // encoding: [0xb7,0x09,0x31,0xd5]
+// CHECK: mrs x9, {{trccntvr2|TRCCNTVR2}} // encoding: [0xa9,0x0a,0x31,0xd5]
+// CHECK: mrs x6, {{trccntvr3|TRCCNTVR3}} // encoding: [0xa6,0x0b,0x31,0xd5]
+// CHECK: mrs x24, {{trcimspec0|TRCIMSPEC0}} // encoding: [0xf8,0x00,0x31,0xd5]
+// CHECK: mrs x24, {{trcimspec1|TRCIMSPEC1}} // encoding: [0xf8,0x01,0x31,0xd5]
+// CHECK: mrs x15, {{trcimspec2|TRCIMSPEC2}} // encoding: [0xef,0x02,0x31,0xd5]
+// CHECK: mrs x10, {{trcimspec3|TRCIMSPEC3}} // encoding: [0xea,0x03,0x31,0xd5]
+// CHECK: mrs x29, {{trcimspec4|TRCIMSPEC4}} // encoding: [0xfd,0x04,0x31,0xd5]
+// CHECK: mrs x18, {{trcimspec5|TRCIMSPEC5}} // encoding: [0xf2,0x05,0x31,0xd5]
+// CHECK: mrs x29, {{trcimspec6|TRCIMSPEC6}} // encoding: [0xfd,0x06,0x31,0xd5]
+// CHECK: mrs x2, {{trcimspec7|TRCIMSPEC7}} // encoding: [0xe2,0x07,0x31,0xd5]
+// CHECK: mrs x8, {{trcrsctlr2|TRCRSCTLR2}} // encoding: [0x08,0x12,0x31,0xd5]
+// CHECK: mrs x0, {{trcrsctlr3|TRCRSCTLR3}} // encoding: [0x00,0x13,0x31,0xd5]
+// CHECK: mrs x12, {{trcrsctlr4|TRCRSCTLR4}} // encoding: [0x0c,0x14,0x31,0xd5]
+// CHECK: mrs x26, {{trcrsctlr5|TRCRSCTLR5}} // encoding: [0x1a,0x15,0x31,0xd5]
+// CHECK: mrs x29, {{trcrsctlr6|TRCRSCTLR6}} // encoding: [0x1d,0x16,0x31,0xd5]
+// CHECK: mrs x17, {{trcrsctlr7|TRCRSCTLR7}} // encoding: [0x11,0x17,0x31,0xd5]
+// CHECK: mrs x0, {{trcrsctlr8|TRCRSCTLR8}} // encoding: [0x00,0x18,0x31,0xd5]
+// CHECK: mrs x1, {{trcrsctlr9|TRCRSCTLR9}} // encoding: [0x01,0x19,0x31,0xd5]
+// CHECK: mrs x17, {{trcrsctlr10|TRCRSCTLR10}} // encoding: [0x11,0x1a,0x31,0xd5]
+// CHECK: mrs x21, {{trcrsctlr11|TRCRSCTLR11}} // encoding: [0x15,0x1b,0x31,0xd5]
+// CHECK: mrs x1, {{trcrsctlr12|TRCRSCTLR12}} // encoding: [0x01,0x1c,0x31,0xd5]
+// CHECK: mrs x8, {{trcrsctlr13|TRCRSCTLR13}} // encoding: [0x08,0x1d,0x31,0xd5]
+// CHECK: mrs x24, {{trcrsctlr14|TRCRSCTLR14}} // encoding: [0x18,0x1e,0x31,0xd5]
+// CHECK: mrs x0, {{trcrsctlr15|TRCRSCTLR15}} // encoding: [0x00,0x1f,0x31,0xd5]
+// CHECK: mrs x2, {{trcrsctlr16|TRCRSCTLR16}} // encoding: [0x22,0x10,0x31,0xd5]
+// CHECK: mrs x29, {{trcrsctlr17|TRCRSCTLR17}} // encoding: [0x3d,0x11,0x31,0xd5]
+// CHECK: mrs x22, {{trcrsctlr18|TRCRSCTLR18}} // encoding: [0x36,0x12,0x31,0xd5]
+// CHECK: mrs x6, {{trcrsctlr19|TRCRSCTLR19}} // encoding: [0x26,0x13,0x31,0xd5]
+// CHECK: mrs x26, {{trcrsctlr20|TRCRSCTLR20}} // encoding: [0x3a,0x14,0x31,0xd5]
+// CHECK: mrs x26, {{trcrsctlr21|TRCRSCTLR21}} // encoding: [0x3a,0x15,0x31,0xd5]
+// CHECK: mrs x4, {{trcrsctlr22|TRCRSCTLR22}} // encoding: [0x24,0x16,0x31,0xd5]
+// CHECK: mrs x12, {{trcrsctlr23|TRCRSCTLR23}} // encoding: [0x2c,0x17,0x31,0xd5]
+// CHECK: mrs x1, {{trcrsctlr24|TRCRSCTLR24}} // encoding: [0x21,0x18,0x31,0xd5]
+// CHECK: mrs x0, {{trcrsctlr25|TRCRSCTLR25}} // encoding: [0x20,0x19,0x31,0xd5]
+// CHECK: mrs x17, {{trcrsctlr26|TRCRSCTLR26}} // encoding: [0x31,0x1a,0x31,0xd5]
+// CHECK: mrs x8, {{trcrsctlr27|TRCRSCTLR27}} // encoding: [0x28,0x1b,0x31,0xd5]
+// CHECK: mrs x10, {{trcrsctlr28|TRCRSCTLR28}} // encoding: [0x2a,0x1c,0x31,0xd5]
+// CHECK: mrs x25, {{trcrsctlr29|TRCRSCTLR29}} // encoding: [0x39,0x1d,0x31,0xd5]
+// CHECK: mrs x12, {{trcrsctlr30|TRCRSCTLR30}} // encoding: [0x2c,0x1e,0x31,0xd5]
+// CHECK: mrs x11, {{trcrsctlr31|TRCRSCTLR31}} // encoding: [0x2b,0x1f,0x31,0xd5]
+// CHECK: mrs x18, {{trcssccr0|TRCSSCCR0}} // encoding: [0x52,0x10,0x31,0xd5]
+// CHECK: mrs x12, {{trcssccr1|TRCSSCCR1}} // encoding: [0x4c,0x11,0x31,0xd5]
+// CHECK: mrs x3, {{trcssccr2|TRCSSCCR2}} // encoding: [0x43,0x12,0x31,0xd5]
+// CHECK: mrs x2, {{trcssccr3|TRCSSCCR3}} // encoding: [0x42,0x13,0x31,0xd5]
+// CHECK: mrs x21, {{trcssccr4|TRCSSCCR4}} // encoding: [0x55,0x14,0x31,0xd5]
+// CHECK: mrs x10, {{trcssccr5|TRCSSCCR5}} // encoding: [0x4a,0x15,0x31,0xd5]
+// CHECK: mrs x22, {{trcssccr6|TRCSSCCR6}} // encoding: [0x56,0x16,0x31,0xd5]
+// CHECK: mrs x23, {{trcssccr7|TRCSSCCR7}} // encoding: [0x57,0x17,0x31,0xd5]
+// CHECK: mrs x23, {{trcsscsr0|TRCSSCSR0}} // encoding: [0x57,0x18,0x31,0xd5]
+// CHECK: mrs x19, {{trcsscsr1|TRCSSCSR1}} // encoding: [0x53,0x19,0x31,0xd5]
+// CHECK: mrs x25, {{trcsscsr2|TRCSSCSR2}} // encoding: [0x59,0x1a,0x31,0xd5]
+// CHECK: mrs x17, {{trcsscsr3|TRCSSCSR3}} // encoding: [0x51,0x1b,0x31,0xd5]
+// CHECK: mrs x19, {{trcsscsr4|TRCSSCSR4}} // encoding: [0x53,0x1c,0x31,0xd5]
+// CHECK: mrs x11, {{trcsscsr5|TRCSSCSR5}} // encoding: [0x4b,0x1d,0x31,0xd5]
+// CHECK: mrs x5, {{trcsscsr6|TRCSSCSR6}} // encoding: [0x45,0x1e,0x31,0xd5]
+// CHECK: mrs x9, {{trcsscsr7|TRCSSCSR7}} // encoding: [0x49,0x1f,0x31,0xd5]
+// CHECK: mrs x1, {{trcsspcicr0|TRCSSPCICR0}} // encoding: [0x61,0x10,0x31,0xd5]
+// CHECK: mrs x12, {{trcsspcicr1|TRCSSPCICR1}} // encoding: [0x6c,0x11,0x31,0xd5]
+// CHECK: mrs x21, {{trcsspcicr2|TRCSSPCICR2}} // encoding: [0x75,0x12,0x31,0xd5]
+// CHECK: mrs x11, {{trcsspcicr3|TRCSSPCICR3}} // encoding: [0x6b,0x13,0x31,0xd5]
+// CHECK: mrs x3, {{trcsspcicr4|TRCSSPCICR4}} // encoding: [0x63,0x14,0x31,0xd5]
+// CHECK: mrs x9, {{trcsspcicr5|TRCSSPCICR5}} // encoding: [0x69,0x15,0x31,0xd5]
+// CHECK: mrs x5, {{trcsspcicr6|TRCSSPCICR6}} // encoding: [0x65,0x16,0x31,0xd5]
+// CHECK: mrs x2, {{trcsspcicr7|TRCSSPCICR7}} // encoding: [0x62,0x17,0x31,0xd5]
+// CHECK: mrs x26, {{trcpdcr|TRCPDCR}} // encoding: [0x9a,0x14,0x31,0xd5]
+// CHECK: mrs x8, {{trcacvr0|TRCACVR0}} // encoding: [0x08,0x20,0x31,0xd5]
+// CHECK: mrs x15, {{trcacvr1|TRCACVR1}} // encoding: [0x0f,0x22,0x31,0xd5]
+// CHECK: mrs x19, {{trcacvr2|TRCACVR2}} // encoding: [0x13,0x24,0x31,0xd5]
+// CHECK: mrs x8, {{trcacvr3|TRCACVR3}} // encoding: [0x08,0x26,0x31,0xd5]
+// CHECK: mrs x28, {{trcacvr4|TRCACVR4}} // encoding: [0x1c,0x28,0x31,0xd5]
+// CHECK: mrs x3, {{trcacvr5|TRCACVR5}} // encoding: [0x03,0x2a,0x31,0xd5]
+// CHECK: mrs x25, {{trcacvr6|TRCACVR6}} // encoding: [0x19,0x2c,0x31,0xd5]
+// CHECK: mrs x24, {{trcacvr7|TRCACVR7}} // encoding: [0x18,0x2e,0x31,0xd5]
+// CHECK: mrs x6, {{trcacvr8|TRCACVR8}} // encoding: [0x26,0x20,0x31,0xd5]
+// CHECK: mrs x3, {{trcacvr9|TRCACVR9}} // encoding: [0x23,0x22,0x31,0xd5]
+// CHECK: mrs x24, {{trcacvr10|TRCACVR10}} // encoding: [0x38,0x24,0x31,0xd5]
+// CHECK: mrs x3, {{trcacvr11|TRCACVR11}} // encoding: [0x23,0x26,0x31,0xd5]
+// CHECK: mrs x12, {{trcacvr12|TRCACVR12}} // encoding: [0x2c,0x28,0x31,0xd5]
+// CHECK: mrs x9, {{trcacvr13|TRCACVR13}} // encoding: [0x29,0x2a,0x31,0xd5]
+// CHECK: mrs x14, {{trcacvr14|TRCACVR14}} // encoding: [0x2e,0x2c,0x31,0xd5]
+// CHECK: mrs x3, {{trcacvr15|TRCACVR15}} // encoding: [0x23,0x2e,0x31,0xd5]
+// CHECK: mrs x21, {{trcacatr0|TRCACATR0}} // encoding: [0x55,0x20,0x31,0xd5]
+// CHECK: mrs x26, {{trcacatr1|TRCACATR1}} // encoding: [0x5a,0x22,0x31,0xd5]
+// CHECK: mrs x8, {{trcacatr2|TRCACATR2}} // encoding: [0x48,0x24,0x31,0xd5]
+// CHECK: mrs x22, {{trcacatr3|TRCACATR3}} // encoding: [0x56,0x26,0x31,0xd5]
+// CHECK: mrs x6, {{trcacatr4|TRCACATR4}} // encoding: [0x46,0x28,0x31,0xd5]
+// CHECK: mrs x29, {{trcacatr5|TRCACATR5}} // encoding: [0x5d,0x2a,0x31,0xd5]
+// CHECK: mrs x5, {{trcacatr6|TRCACATR6}} // encoding: [0x45,0x2c,0x31,0xd5]
+// CHECK: mrs x18, {{trcacatr7|TRCACATR7}} // encoding: [0x52,0x2e,0x31,0xd5]
+// CHECK: mrs x2, {{trcacatr8|TRCACATR8}} // encoding: [0x62,0x20,0x31,0xd5]
+// CHECK: mrs x19, {{trcacatr9|TRCACATR9}} // encoding: [0x73,0x22,0x31,0xd5]
+// CHECK: mrs x13, {{trcacatr10|TRCACATR10}} // encoding: [0x6d,0x24,0x31,0xd5]
+// CHECK: mrs x25, {{trcacatr11|TRCACATR11}} // encoding: [0x79,0x26,0x31,0xd5]
+// CHECK: mrs x18, {{trcacatr12|TRCACATR12}} // encoding: [0x72,0x28,0x31,0xd5]
+// CHECK: mrs x29, {{trcacatr13|TRCACATR13}} // encoding: [0x7d,0x2a,0x31,0xd5]
+// CHECK: mrs x9, {{trcacatr14|TRCACATR14}} // encoding: [0x69,0x2c,0x31,0xd5]
+// CHECK: mrs x18, {{trcacatr15|TRCACATR15}} // encoding: [0x72,0x2e,0x31,0xd5]
+// CHECK: mrs x29, {{trcdvcvr0|TRCDVCVR0}} // encoding: [0x9d,0x20,0x31,0xd5]
+// CHECK: mrs x15, {{trcdvcvr1|TRCDVCVR1}} // encoding: [0x8f,0x24,0x31,0xd5]
+// CHECK: mrs x15, {{trcdvcvr2|TRCDVCVR2}} // encoding: [0x8f,0x28,0x31,0xd5]
+// CHECK: mrs x15, {{trcdvcvr3|TRCDVCVR3}} // encoding: [0x8f,0x2c,0x31,0xd5]
+// CHECK: mrs x19, {{trcdvcvr4|TRCDVCVR4}} // encoding: [0xb3,0x20,0x31,0xd5]
+// CHECK: mrs x22, {{trcdvcvr5|TRCDVCVR5}} // encoding: [0xb6,0x24,0x31,0xd5]
+// CHECK: mrs x27, {{trcdvcvr6|TRCDVCVR6}} // encoding: [0xbb,0x28,0x31,0xd5]
+// CHECK: mrs x1, {{trcdvcvr7|TRCDVCVR7}} // encoding: [0xa1,0x2c,0x31,0xd5]
+// CHECK: mrs x29, {{trcdvcmr0|TRCDVCMR0}} // encoding: [0xdd,0x20,0x31,0xd5]
+// CHECK: mrs x9, {{trcdvcmr1|TRCDVCMR1}} // encoding: [0xc9,0x24,0x31,0xd5]
+// CHECK: mrs x1, {{trcdvcmr2|TRCDVCMR2}} // encoding: [0xc1,0x28,0x31,0xd5]
+// CHECK: mrs x2, {{trcdvcmr3|TRCDVCMR3}} // encoding: [0xc2,0x2c,0x31,0xd5]
+// CHECK: mrs x5, {{trcdvcmr4|TRCDVCMR4}} // encoding: [0xe5,0x20,0x31,0xd5]
+// CHECK: mrs x21, {{trcdvcmr5|TRCDVCMR5}} // encoding: [0xf5,0x24,0x31,0xd5]
+// CHECK: mrs x5, {{trcdvcmr6|TRCDVCMR6}} // encoding: [0xe5,0x28,0x31,0xd5]
+// CHECK: mrs x1, {{trcdvcmr7|TRCDVCMR7}} // encoding: [0xe1,0x2c,0x31,0xd5]
+// CHECK: mrs x21, {{trccidcvr0|TRCCIDCVR0}} // encoding: [0x15,0x30,0x31,0xd5]
+// CHECK: mrs x24, {{trccidcvr1|TRCCIDCVR1}} // encoding: [0x18,0x32,0x31,0xd5]
+// CHECK: mrs x24, {{trccidcvr2|TRCCIDCVR2}} // encoding: [0x18,0x34,0x31,0xd5]
+// CHECK: mrs x12, {{trccidcvr3|TRCCIDCVR3}} // encoding: [0x0c,0x36,0x31,0xd5]
+// CHECK: mrs x10, {{trccidcvr4|TRCCIDCVR4}} // encoding: [0x0a,0x38,0x31,0xd5]
+// CHECK: mrs x9, {{trccidcvr5|TRCCIDCVR5}} // encoding: [0x09,0x3a,0x31,0xd5]
+// CHECK: mrs x6, {{trccidcvr6|TRCCIDCVR6}} // encoding: [0x06,0x3c,0x31,0xd5]
+// CHECK: mrs x20, {{trccidcvr7|TRCCIDCVR7}} // encoding: [0x14,0x3e,0x31,0xd5]
+// CHECK: mrs x20, {{trcvmidcvr0|TRCVMIDCVR0}} // encoding: [0x34,0x30,0x31,0xd5]
+// CHECK: mrs x20, {{trcvmidcvr1|TRCVMIDCVR1}} // encoding: [0x34,0x32,0x31,0xd5]
+// CHECK: mrs x26, {{trcvmidcvr2|TRCVMIDCVR2}} // encoding: [0x3a,0x34,0x31,0xd5]
+// CHECK: mrs x1, {{trcvmidcvr3|TRCVMIDCVR3}} // encoding: [0x21,0x36,0x31,0xd5]
+// CHECK: mrs x14, {{trcvmidcvr4|TRCVMIDCVR4}} // encoding: [0x2e,0x38,0x31,0xd5]
+// CHECK: mrs x27, {{trcvmidcvr5|TRCVMIDCVR5}} // encoding: [0x3b,0x3a,0x31,0xd5]
+// CHECK: mrs x29, {{trcvmidcvr6|TRCVMIDCVR6}} // encoding: [0x3d,0x3c,0x31,0xd5]
+// CHECK: mrs x17, {{trcvmidcvr7|TRCVMIDCVR7}} // encoding: [0x31,0x3e,0x31,0xd5]
+// CHECK: mrs x10, {{trccidcctlr0|TRCCIDCCTLR0}} // encoding: [0x4a,0x30,0x31,0xd5]
+// CHECK: mrs x4, {{trccidcctlr1|TRCCIDCCTLR1}} // encoding: [0x44,0x31,0x31,0xd5]
+// CHECK: mrs x9, {{trcvmidcctlr0|TRCVMIDCCTLR0}} // encoding: [0x49,0x32,0x31,0xd5]
+// CHECK: mrs x11, {{trcvmidcctlr1|TRCVMIDCCTLR1}} // encoding: [0x4b,0x33,0x31,0xd5]
+// CHECK: mrs x22, {{trcitctrl|TRCITCTRL}} // encoding: [0x96,0x70,0x31,0xd5]
+// CHECK: mrs x23, {{trcclaimset|TRCCLAIMSET}} // encoding: [0xd7,0x78,0x31,0xd5]
+// CHECK: mrs x14, {{trcclaimclr|TRCCLAIMCLR}} // encoding: [0xce,0x79,0x31,0xd5]
msr trcoslar, x28
msr trclar, x14
@@ -590,177 +591,177 @@
msr trcitctrl, x1
msr trcclaimset, x7
msr trcclaimclr, x29
-// CHECK: msr trcoslar, x28 // encoding: [0x9c,0x10,0x11,0xd5]
-// CHECK: msr trclar, x14 // encoding: [0xce,0x7c,0x11,0xd5]
-// CHECK: msr trcprgctlr, x10 // encoding: [0x0a,0x01,0x11,0xd5]
-// CHECK: msr trcprocselr, x27 // encoding: [0x1b,0x02,0x11,0xd5]
-// CHECK: msr trcconfigr, x24 // encoding: [0x18,0x04,0x11,0xd5]
-// CHECK: msr trcauxctlr, x8 // encoding: [0x08,0x06,0x11,0xd5]
-// CHECK: msr trceventctl0r, x16 // encoding: [0x10,0x08,0x11,0xd5]
-// CHECK: msr trceventctl1r, x27 // encoding: [0x1b,0x09,0x11,0xd5]
-// CHECK: msr trcstallctlr, x26 // encoding: [0x1a,0x0b,0x11,0xd5]
-// CHECK: msr trctsctlr, x0 // encoding: [0x00,0x0c,0x11,0xd5]
-// CHECK: msr trcsyncpr, x14 // encoding: [0x0e,0x0d,0x11,0xd5]
-// CHECK: msr trcccctlr, x8 // encoding: [0x08,0x0e,0x11,0xd5]
-// CHECK: msr trcbbctlr, x6 // encoding: [0x06,0x0f,0x11,0xd5]
-// CHECK: msr trctraceidr, x23 // encoding: [0x37,0x00,0x11,0xd5]
-// CHECK: msr trcqctlr, x5 // encoding: [0x25,0x01,0x11,0xd5]
-// CHECK: msr trcvictlr, x0 // encoding: [0x40,0x00,0x11,0xd5]
-// CHECK: msr trcviiectlr, x0 // encoding: [0x40,0x01,0x11,0xd5]
-// CHECK: msr trcvissctlr, x1 // encoding: [0x41,0x02,0x11,0xd5]
-// CHECK: msr trcvipcssctlr, x0 // encoding: [0x40,0x03,0x11,0xd5]
-// CHECK: msr trcvdctlr, x7 // encoding: [0x47,0x08,0x11,0xd5]
-// CHECK: msr trcvdsacctlr, x18 // encoding: [0x52,0x09,0x11,0xd5]
-// CHECK: msr trcvdarcctlr, x24 // encoding: [0x58,0x0a,0x11,0xd5]
-// CHECK: msr trcseqevr0, x28 // encoding: [0x9c,0x00,0x11,0xd5]
-// CHECK: msr trcseqevr1, x21 // encoding: [0x95,0x01,0x11,0xd5]
-// CHECK: msr trcseqevr2, x16 // encoding: [0x90,0x02,0x11,0xd5]
-// CHECK: msr trcseqrstevr, x16 // encoding: [0x90,0x06,0x11,0xd5]
-// CHECK: msr trcseqstr, x25 // encoding: [0x99,0x07,0x11,0xd5]
-// CHECK: msr trcextinselr, x29 // encoding: [0x9d,0x08,0x11,0xd5]
-// CHECK: msr trccntrldvr0, x20 // encoding: [0xb4,0x00,0x11,0xd5]
-// CHECK: msr trccntrldvr1, x20 // encoding: [0xb4,0x01,0x11,0xd5]
-// CHECK: msr trccntrldvr2, x22 // encoding: [0xb6,0x02,0x11,0xd5]
-// CHECK: msr trccntrldvr3, x12 // encoding: [0xac,0x03,0x11,0xd5]
-// CHECK: msr trccntctlr0, x20 // encoding: [0xb4,0x04,0x11,0xd5]
-// CHECK: msr trccntctlr1, x4 // encoding: [0xa4,0x05,0x11,0xd5]
-// CHECK: msr trccntctlr2, x8 // encoding: [0xa8,0x06,0x11,0xd5]
-// CHECK: msr trccntctlr3, x16 // encoding: [0xb0,0x07,0x11,0xd5]
-// CHECK: msr trccntvr0, x5 // encoding: [0xa5,0x08,0x11,0xd5]
-// CHECK: msr trccntvr1, x27 // encoding: [0xbb,0x09,0x11,0xd5]
-// CHECK: msr trccntvr2, x21 // encoding: [0xb5,0x0a,0x11,0xd5]
-// CHECK: msr trccntvr3, x8 // encoding: [0xa8,0x0b,0x11,0xd5]
-// CHECK: msr trcimspec0, x6 // encoding: [0xe6,0x00,0x11,0xd5]
-// CHECK: msr trcimspec1, x27 // encoding: [0xfb,0x01,0x11,0xd5]
-// CHECK: msr trcimspec2, x23 // encoding: [0xf7,0x02,0x11,0xd5]
-// CHECK: msr trcimspec3, x15 // encoding: [0xef,0x03,0x11,0xd5]
-// CHECK: msr trcimspec4, x13 // encoding: [0xed,0x04,0x11,0xd5]
-// CHECK: msr trcimspec5, x25 // encoding: [0xf9,0x05,0x11,0xd5]
-// CHECK: msr trcimspec6, x19 // encoding: [0xf3,0x06,0x11,0xd5]
-// CHECK: msr trcimspec7, x27 // encoding: [0xfb,0x07,0x11,0xd5]
-// CHECK: msr trcrsctlr2, x4 // encoding: [0x04,0x12,0x11,0xd5]
-// CHECK: msr trcrsctlr3, x0 // encoding: [0x00,0x13,0x11,0xd5]
-// CHECK: msr trcrsctlr4, x21 // encoding: [0x15,0x14,0x11,0xd5]
-// CHECK: msr trcrsctlr5, x8 // encoding: [0x08,0x15,0x11,0xd5]
-// CHECK: msr trcrsctlr6, x20 // encoding: [0x14,0x16,0x11,0xd5]
-// CHECK: msr trcrsctlr7, x11 // encoding: [0x0b,0x17,0x11,0xd5]
-// CHECK: msr trcrsctlr8, x18 // encoding: [0x12,0x18,0x11,0xd5]
-// CHECK: msr trcrsctlr9, x24 // encoding: [0x18,0x19,0x11,0xd5]
-// CHECK: msr trcrsctlr10, x15 // encoding: [0x0f,0x1a,0x11,0xd5]
-// CHECK: msr trcrsctlr11, x21 // encoding: [0x15,0x1b,0x11,0xd5]
-// CHECK: msr trcrsctlr12, x4 // encoding: [0x04,0x1c,0x11,0xd5]
-// CHECK: msr trcrsctlr13, x28 // encoding: [0x1c,0x1d,0x11,0xd5]
-// CHECK: msr trcrsctlr14, x3 // encoding: [0x03,0x1e,0x11,0xd5]
-// CHECK: msr trcrsctlr15, x20 // encoding: [0x14,0x1f,0x11,0xd5]
-// CHECK: msr trcrsctlr16, x12 // encoding: [0x2c,0x10,0x11,0xd5]
-// CHECK: msr trcrsctlr17, x17 // encoding: [0x31,0x11,0x11,0xd5]
-// CHECK: msr trcrsctlr18, x10 // encoding: [0x2a,0x12,0x11,0xd5]
-// CHECK: msr trcrsctlr19, x11 // encoding: [0x2b,0x13,0x11,0xd5]
-// CHECK: msr trcrsctlr20, x3 // encoding: [0x23,0x14,0x11,0xd5]
-// CHECK: msr trcrsctlr21, x18 // encoding: [0x32,0x15,0x11,0xd5]
-// CHECK: msr trcrsctlr22, x26 // encoding: [0x3a,0x16,0x11,0xd5]
-// CHECK: msr trcrsctlr23, x5 // encoding: [0x25,0x17,0x11,0xd5]
-// CHECK: msr trcrsctlr24, x25 // encoding: [0x39,0x18,0x11,0xd5]
-// CHECK: msr trcrsctlr25, x5 // encoding: [0x25,0x19,0x11,0xd5]
-// CHECK: msr trcrsctlr26, x4 // encoding: [0x24,0x1a,0x11,0xd5]
-// CHECK: msr trcrsctlr27, x20 // encoding: [0x34,0x1b,0x11,0xd5]
-// CHECK: msr trcrsctlr28, x5 // encoding: [0x25,0x1c,0x11,0xd5]
-// CHECK: msr trcrsctlr29, x10 // encoding: [0x2a,0x1d,0x11,0xd5]
-// CHECK: msr trcrsctlr30, x24 // encoding: [0x38,0x1e,0x11,0xd5]
-// CHECK: msr trcrsctlr31, x20 // encoding: [0x34,0x1f,0x11,0xd5]
-// CHECK: msr trcssccr0, x23 // encoding: [0x57,0x10,0x11,0xd5]
-// CHECK: msr trcssccr1, x27 // encoding: [0x5b,0x11,0x11,0xd5]
-// CHECK: msr trcssccr2, x27 // encoding: [0x5b,0x12,0x11,0xd5]
-// CHECK: msr trcssccr3, x6 // encoding: [0x46,0x13,0x11,0xd5]
-// CHECK: msr trcssccr4, x3 // encoding: [0x43,0x14,0x11,0xd5]
-// CHECK: msr trcssccr5, x12 // encoding: [0x4c,0x15,0x11,0xd5]
-// CHECK: msr trcssccr6, x7 // encoding: [0x47,0x16,0x11,0xd5]
-// CHECK: msr trcssccr7, x6 // encoding: [0x46,0x17,0x11,0xd5]
-// CHECK: msr trcsscsr0, x20 // encoding: [0x54,0x18,0x11,0xd5]
-// CHECK: msr trcsscsr1, x17 // encoding: [0x51,0x19,0x11,0xd5]
-// CHECK: msr trcsscsr2, x11 // encoding: [0x4b,0x1a,0x11,0xd5]
-// CHECK: msr trcsscsr3, x4 // encoding: [0x44,0x1b,0x11,0xd5]
-// CHECK: msr trcsscsr4, x14 // encoding: [0x4e,0x1c,0x11,0xd5]
-// CHECK: msr trcsscsr5, x22 // encoding: [0x56,0x1d,0x11,0xd5]
-// CHECK: msr trcsscsr6, x3 // encoding: [0x43,0x1e,0x11,0xd5]
-// CHECK: msr trcsscsr7, x11 // encoding: [0x4b,0x1f,0x11,0xd5]
-// CHECK: msr trcsspcicr0, x2 // encoding: [0x62,0x10,0x11,0xd5]
-// CHECK: msr trcsspcicr1, x3 // encoding: [0x63,0x11,0x11,0xd5]
-// CHECK: msr trcsspcicr2, x5 // encoding: [0x65,0x12,0x11,0xd5]
-// CHECK: msr trcsspcicr3, x7 // encoding: [0x67,0x13,0x11,0xd5]
-// CHECK: msr trcsspcicr4, x11 // encoding: [0x6b,0x14,0x11,0xd5]
-// CHECK: msr trcsspcicr5, x13 // encoding: [0x6d,0x15,0x11,0xd5]
-// CHECK: msr trcsspcicr6, x17 // encoding: [0x71,0x16,0x11,0xd5]
-// CHECK: msr trcsspcicr7, x23 // encoding: [0x77,0x17,0x11,0xd5]
-// CHECK: msr trcpdcr, x3 // encoding: [0x83,0x14,0x11,0xd5]
-// CHECK: msr trcacvr0, x6 // encoding: [0x06,0x20,0x11,0xd5]
-// CHECK: msr trcacvr1, x20 // encoding: [0x14,0x22,0x11,0xd5]
-// CHECK: msr trcacvr2, x25 // encoding: [0x19,0x24,0x11,0xd5]
-// CHECK: msr trcacvr3, x1 // encoding: [0x01,0x26,0x11,0xd5]
-// CHECK: msr trcacvr4, x28 // encoding: [0x1c,0x28,0x11,0xd5]
-// CHECK: msr trcacvr5, x15 // encoding: [0x0f,0x2a,0x11,0xd5]
-// CHECK: msr trcacvr6, x25 // encoding: [0x19,0x2c,0x11,0xd5]
-// CHECK: msr trcacvr7, x12 // encoding: [0x0c,0x2e,0x11,0xd5]
-// CHECK: msr trcacvr8, x5 // encoding: [0x25,0x20,0x11,0xd5]
-// CHECK: msr trcacvr9, x25 // encoding: [0x39,0x22,0x11,0xd5]
-// CHECK: msr trcacvr10, x13 // encoding: [0x2d,0x24,0x11,0xd5]
-// CHECK: msr trcacvr11, x10 // encoding: [0x2a,0x26,0x11,0xd5]
-// CHECK: msr trcacvr12, x19 // encoding: [0x33,0x28,0x11,0xd5]
-// CHECK: msr trcacvr13, x10 // encoding: [0x2a,0x2a,0x11,0xd5]
-// CHECK: msr trcacvr14, x19 // encoding: [0x33,0x2c,0x11,0xd5]
-// CHECK: msr trcacvr15, x2 // encoding: [0x22,0x2e,0x11,0xd5]
-// CHECK: msr trcacatr0, x15 // encoding: [0x4f,0x20,0x11,0xd5]
-// CHECK: msr trcacatr1, x13 // encoding: [0x4d,0x22,0x11,0xd5]
-// CHECK: msr trcacatr2, x8 // encoding: [0x48,0x24,0x11,0xd5]
-// CHECK: msr trcacatr3, x1 // encoding: [0x41,0x26,0x11,0xd5]
-// CHECK: msr trcacatr4, x11 // encoding: [0x4b,0x28,0x11,0xd5]
-// CHECK: msr trcacatr5, x8 // encoding: [0x48,0x2a,0x11,0xd5]
-// CHECK: msr trcacatr6, x24 // encoding: [0x58,0x2c,0x11,0xd5]
-// CHECK: msr trcacatr7, x6 // encoding: [0x46,0x2e,0x11,0xd5]
-// CHECK: msr trcacatr8, x23 // encoding: [0x77,0x20,0x11,0xd5]
-// CHECK: msr trcacatr9, x5 // encoding: [0x65,0x22,0x11,0xd5]
-// CHECK: msr trcacatr10, x11 // encoding: [0x6b,0x24,0x11,0xd5]
-// CHECK: msr trcacatr11, x11 // encoding: [0x6b,0x26,0x11,0xd5]
-// CHECK: msr trcacatr12, x3 // encoding: [0x63,0x28,0x11,0xd5]
-// CHECK: msr trcacatr13, x28 // encoding: [0x7c,0x2a,0x11,0xd5]
-// CHECK: msr trcacatr14, x25 // encoding: [0x79,0x2c,0x11,0xd5]
-// CHECK: msr trcacatr15, x4 // encoding: [0x64,0x2e,0x11,0xd5]
-// CHECK: msr trcdvcvr0, x6 // encoding: [0x86,0x20,0x11,0xd5]
-// CHECK: msr trcdvcvr1, x3 // encoding: [0x83,0x24,0x11,0xd5]
-// CHECK: msr trcdvcvr2, x5 // encoding: [0x85,0x28,0x11,0xd5]
-// CHECK: msr trcdvcvr3, x11 // encoding: [0x8b,0x2c,0x11,0xd5]
-// CHECK: msr trcdvcvr4, x9 // encoding: [0xa9,0x20,0x11,0xd5]
-// CHECK: msr trcdvcvr5, x14 // encoding: [0xae,0x24,0x11,0xd5]
-// CHECK: msr trcdvcvr6, x10 // encoding: [0xaa,0x28,0x11,0xd5]
-// CHECK: msr trcdvcvr7, x12 // encoding: [0xac,0x2c,0x11,0xd5]
-// CHECK: msr trcdvcmr0, x8 // encoding: [0xc8,0x20,0x11,0xd5]
-// CHECK: msr trcdvcmr1, x8 // encoding: [0xc8,0x24,0x11,0xd5]
-// CHECK: msr trcdvcmr2, x22 // encoding: [0xd6,0x28,0x11,0xd5]
-// CHECK: msr trcdvcmr3, x22 // encoding: [0xd6,0x2c,0x11,0xd5]
-// CHECK: msr trcdvcmr4, x5 // encoding: [0xe5,0x20,0x11,0xd5]
-// CHECK: msr trcdvcmr5, x16 // encoding: [0xf0,0x24,0x11,0xd5]
-// CHECK: msr trcdvcmr6, x27 // encoding: [0xfb,0x28,0x11,0xd5]
-// CHECK: msr trcdvcmr7, x21 // encoding: [0xf5,0x2c,0x11,0xd5]
-// CHECK: msr trccidcvr0, x8 // encoding: [0x08,0x30,0x11,0xd5]
-// CHECK: msr trccidcvr1, x6 // encoding: [0x06,0x32,0x11,0xd5]
-// CHECK: msr trccidcvr2, x9 // encoding: [0x09,0x34,0x11,0xd5]
-// CHECK: msr trccidcvr3, x8 // encoding: [0x08,0x36,0x11,0xd5]
-// CHECK: msr trccidcvr4, x3 // encoding: [0x03,0x38,0x11,0xd5]
-// CHECK: msr trccidcvr5, x21 // encoding: [0x15,0x3a,0x11,0xd5]
-// CHECK: msr trccidcvr6, x12 // encoding: [0x0c,0x3c,0x11,0xd5]
-// CHECK: msr trccidcvr7, x7 // encoding: [0x07,0x3e,0x11,0xd5]
-// CHECK: msr trcvmidcvr0, x4 // encoding: [0x24,0x30,0x11,0xd5]
-// CHECK: msr trcvmidcvr1, x3 // encoding: [0x23,0x32,0x11,0xd5]
-// CHECK: msr trcvmidcvr2, x9 // encoding: [0x29,0x34,0x11,0xd5]
-// CHECK: msr trcvmidcvr3, x17 // encoding: [0x31,0x36,0x11,0xd5]
-// CHECK: msr trcvmidcvr4, x14 // encoding: [0x2e,0x38,0x11,0xd5]
-// CHECK: msr trcvmidcvr5, x12 // encoding: [0x2c,0x3a,0x11,0xd5]
-// CHECK: msr trcvmidcvr6, x10 // encoding: [0x2a,0x3c,0x11,0xd5]
-// CHECK: msr trcvmidcvr7, x3 // encoding: [0x23,0x3e,0x11,0xd5]
-// CHECK: msr trccidcctlr0, x14 // encoding: [0x4e,0x30,0x11,0xd5]
-// CHECK: msr trccidcctlr1, x22 // encoding: [0x56,0x31,0x11,0xd5]
-// CHECK: msr trcvmidcctlr0, x8 // encoding: [0x48,0x32,0x11,0xd5]
-// CHECK: msr trcvmidcctlr1, x15 // encoding: [0x4f,0x33,0x11,0xd5]
-// CHECK: msr trcitctrl, x1 // encoding: [0x81,0x70,0x11,0xd5]
-// CHECK: msr trcclaimset, x7 // encoding: [0xc7,0x78,0x11,0xd5]
-// CHECK: msr trcclaimclr, x29 // encoding: [0xdd,0x79,0x11,0xd5]
+// CHECK: msr {{trcoslar|TRCOSLAR}}, x28 // encoding: [0x9c,0x10,0x11,0xd5]
+// CHECK: msr {{trclar|TRCLAR}}, x14 // encoding: [0xce,0x7c,0x11,0xd5]
+// CHECK: msr {{trcprgctlr|TRCPRGCTLR}}, x10 // encoding: [0x0a,0x01,0x11,0xd5]
+// CHECK: msr {{trcprocselr|TRCPROCSELR}}, x27 // encoding: [0x1b,0x02,0x11,0xd5]
+// CHECK: msr {{trcconfigr|TRCCONFIGR}}, x24 // encoding: [0x18,0x04,0x11,0xd5]
+// CHECK: msr {{trcauxctlr|TRCAUXCTLR}}, x8 // encoding: [0x08,0x06,0x11,0xd5]
+// CHECK: msr {{trceventctl0r|TRCEVENTCTL0R}}, x16 // encoding: [0x10,0x08,0x11,0xd5]
+// CHECK: msr {{trceventctl1r|TRCEVENTCTL1R}}, x27 // encoding: [0x1b,0x09,0x11,0xd5]
+// CHECK: msr {{trcstallctlr|TRCSTALLCTLR}}, x26 // encoding: [0x1a,0x0b,0x11,0xd5]
+// CHECK: msr {{trctsctlr|TRCTSCTLR}}, x0 // encoding: [0x00,0x0c,0x11,0xd5]
+// CHECK: msr {{trcsyncpr|TRCSYNCPR}}, x14 // encoding: [0x0e,0x0d,0x11,0xd5]
+// CHECK: msr {{trcccctlr|TRCCCCTLR}}, x8 // encoding: [0x08,0x0e,0x11,0xd5]
+// CHECK: msr {{trcbbctlr|TRCBBCTLR}}, x6 // encoding: [0x06,0x0f,0x11,0xd5]
+// CHECK: msr {{trctraceidr|TRCTRACEIDR}}, x23 // encoding: [0x37,0x00,0x11,0xd5]
+// CHECK: msr {{trcqctlr|TRCQCTLR}}, x5 // encoding: [0x25,0x01,0x11,0xd5]
+// CHECK: msr {{trcvictlr|TRCVICTLR}}, x0 // encoding: [0x40,0x00,0x11,0xd5]
+// CHECK: msr {{trcviiectlr|TRCVIIECTLR}}, x0 // encoding: [0x40,0x01,0x11,0xd5]
+// CHECK: msr {{trcvissctlr|TRCVISSCTLR}}, x1 // encoding: [0x41,0x02,0x11,0xd5]
+// CHECK: msr {{trcvipcssctlr|TRCVIPCSSCTLR}}, x0 // encoding: [0x40,0x03,0x11,0xd5]
+// CHECK: msr {{trcvdctlr|TRCVDCTLR}}, x7 // encoding: [0x47,0x08,0x11,0xd5]
+// CHECK: msr {{trcvdsacctlr|TRCVDSACCTLR}}, x18 // encoding: [0x52,0x09,0x11,0xd5]
+// CHECK: msr {{trcvdarcctlr|TRCVDARCCTLR}}, x24 // encoding: [0x58,0x0a,0x11,0xd5]
+// CHECK: msr {{trcseqevr0|TRCSEQEVR0}}, x28 // encoding: [0x9c,0x00,0x11,0xd5]
+// CHECK: msr {{trcseqevr1|TRCSEQEVR1}}, x21 // encoding: [0x95,0x01,0x11,0xd5]
+// CHECK: msr {{trcseqevr2|TRCSEQEVR2}}, x16 // encoding: [0x90,0x02,0x11,0xd5]
+// CHECK: msr {{trcseqrstevr|TRCSEQRSTEVR}}, x16 // encoding: [0x90,0x06,0x11,0xd5]
+// CHECK: msr {{trcseqstr|TRCSEQSTR}}, x25 // encoding: [0x99,0x07,0x11,0xd5]
+// CHECK: msr {{trcextinselr|TRCEXTINSELR}}, x29 // encoding: [0x9d,0x08,0x11,0xd5]
+// CHECK: msr {{trccntrldvr0|TRCCNTRLDVR0}}, x20 // encoding: [0xb4,0x00,0x11,0xd5]
+// CHECK: msr {{trccntrldvr1|TRCCNTRLDVR1}}, x20 // encoding: [0xb4,0x01,0x11,0xd5]
+// CHECK: msr {{trccntrldvr2|TRCCNTRLDVR2}}, x22 // encoding: [0xb6,0x02,0x11,0xd5]
+// CHECK: msr {{trccntrldvr3|TRCCNTRLDVR3}}, x12 // encoding: [0xac,0x03,0x11,0xd5]
+// CHECK: msr {{trccntctlr0|TRCCNTCTLR0}}, x20 // encoding: [0xb4,0x04,0x11,0xd5]
+// CHECK: msr {{trccntctlr1|TRCCNTCTLR1}}, x4 // encoding: [0xa4,0x05,0x11,0xd5]
+// CHECK: msr {{trccntctlr2|TRCCNTCTLR2}}, x8 // encoding: [0xa8,0x06,0x11,0xd5]
+// CHECK: msr {{trccntctlr3|TRCCNTCTLR3}}, x16 // encoding: [0xb0,0x07,0x11,0xd5]
+// CHECK: msr {{trccntvr0|TRCCNTVR0}}, x5 // encoding: [0xa5,0x08,0x11,0xd5]
+// CHECK: msr {{trccntvr1|TRCCNTVR1}}, x27 // encoding: [0xbb,0x09,0x11,0xd5]
+// CHECK: msr {{trccntvr2|TRCCNTVR2}}, x21 // encoding: [0xb5,0x0a,0x11,0xd5]
+// CHECK: msr {{trccntvr3|TRCCNTVR3}}, x8 // encoding: [0xa8,0x0b,0x11,0xd5]
+// CHECK: msr {{trcimspec0|TRCIMSPEC0}}, x6 // encoding: [0xe6,0x00,0x11,0xd5]
+// CHECK: msr {{trcimspec1|TRCIMSPEC1}}, x27 // encoding: [0xfb,0x01,0x11,0xd5]
+// CHECK: msr {{trcimspec2|TRCIMSPEC2}}, x23 // encoding: [0xf7,0x02,0x11,0xd5]
+// CHECK: msr {{trcimspec3|TRCIMSPEC3}}, x15 // encoding: [0xef,0x03,0x11,0xd5]
+// CHECK: msr {{trcimspec4|TRCIMSPEC4}}, x13 // encoding: [0xed,0x04,0x11,0xd5]
+// CHECK: msr {{trcimspec5|TRCIMSPEC5}}, x25 // encoding: [0xf9,0x05,0x11,0xd5]
+// CHECK: msr {{trcimspec6|TRCIMSPEC6}}, x19 // encoding: [0xf3,0x06,0x11,0xd5]
+// CHECK: msr {{trcimspec7|TRCIMSPEC7}}, x27 // encoding: [0xfb,0x07,0x11,0xd5]
+// CHECK: msr {{trcrsctlr2|TRCRSCTLR2}}, x4 // encoding: [0x04,0x12,0x11,0xd5]
+// CHECK: msr {{trcrsctlr3|TRCRSCTLR3}}, x0 // encoding: [0x00,0x13,0x11,0xd5]
+// CHECK: msr {{trcrsctlr4|TRCRSCTLR4}}, x21 // encoding: [0x15,0x14,0x11,0xd5]
+// CHECK: msr {{trcrsctlr5|TRCRSCTLR5}}, x8 // encoding: [0x08,0x15,0x11,0xd5]
+// CHECK: msr {{trcrsctlr6|TRCRSCTLR6}}, x20 // encoding: [0x14,0x16,0x11,0xd5]
+// CHECK: msr {{trcrsctlr7|TRCRSCTLR7}}, x11 // encoding: [0x0b,0x17,0x11,0xd5]
+// CHECK: msr {{trcrsctlr8|TRCRSCTLR8}}, x18 // encoding: [0x12,0x18,0x11,0xd5]
+// CHECK: msr {{trcrsctlr9|TRCRSCTLR9}}, x24 // encoding: [0x18,0x19,0x11,0xd5]
+// CHECK: msr {{trcrsctlr10|TRCRSCTLR10}}, x15 // encoding: [0x0f,0x1a,0x11,0xd5]
+// CHECK: msr {{trcrsctlr11|TRCRSCTLR11}}, x21 // encoding: [0x15,0x1b,0x11,0xd5]
+// CHECK: msr {{trcrsctlr12|TRCRSCTLR12}}, x4 // encoding: [0x04,0x1c,0x11,0xd5]
+// CHECK: msr {{trcrsctlr13|TRCRSCTLR13}}, x28 // encoding: [0x1c,0x1d,0x11,0xd5]
+// CHECK: msr {{trcrsctlr14|TRCRSCTLR14}}, x3 // encoding: [0x03,0x1e,0x11,0xd5]
+// CHECK: msr {{trcrsctlr15|TRCRSCTLR15}}, x20 // encoding: [0x14,0x1f,0x11,0xd5]
+// CHECK: msr {{trcrsctlr16|TRCRSCTLR16}}, x12 // encoding: [0x2c,0x10,0x11,0xd5]
+// CHECK: msr {{trcrsctlr17|TRCRSCTLR17}}, x17 // encoding: [0x31,0x11,0x11,0xd5]
+// CHECK: msr {{trcrsctlr18|TRCRSCTLR18}}, x10 // encoding: [0x2a,0x12,0x11,0xd5]
+// CHECK: msr {{trcrsctlr19|TRCRSCTLR19}}, x11 // encoding: [0x2b,0x13,0x11,0xd5]
+// CHECK: msr {{trcrsctlr20|TRCRSCTLR20}}, x3 // encoding: [0x23,0x14,0x11,0xd5]
+// CHECK: msr {{trcrsctlr21|TRCRSCTLR21}}, x18 // encoding: [0x32,0x15,0x11,0xd5]
+// CHECK: msr {{trcrsctlr22|TRCRSCTLR22}}, x26 // encoding: [0x3a,0x16,0x11,0xd5]
+// CHECK: msr {{trcrsctlr23|TRCRSCTLR23}}, x5 // encoding: [0x25,0x17,0x11,0xd5]
+// CHECK: msr {{trcrsctlr24|TRCRSCTLR24}}, x25 // encoding: [0x39,0x18,0x11,0xd5]
+// CHECK: msr {{trcrsctlr25|TRCRSCTLR25}}, x5 // encoding: [0x25,0x19,0x11,0xd5]
+// CHECK: msr {{trcrsctlr26|TRCRSCTLR26}}, x4 // encoding: [0x24,0x1a,0x11,0xd5]
+// CHECK: msr {{trcrsctlr27|TRCRSCTLR27}}, x20 // encoding: [0x34,0x1b,0x11,0xd5]
+// CHECK: msr {{trcrsctlr28|TRCRSCTLR28}}, x5 // encoding: [0x25,0x1c,0x11,0xd5]
+// CHECK: msr {{trcrsctlr29|TRCRSCTLR29}}, x10 // encoding: [0x2a,0x1d,0x11,0xd5]
+// CHECK: msr {{trcrsctlr30|TRCRSCTLR30}}, x24 // encoding: [0x38,0x1e,0x11,0xd5]
+// CHECK: msr {{trcrsctlr31|TRCRSCTLR31}}, x20 // encoding: [0x34,0x1f,0x11,0xd5]
+// CHECK: msr {{trcssccr0|TRCSSCCR0}}, x23 // encoding: [0x57,0x10,0x11,0xd5]
+// CHECK: msr {{trcssccr1|TRCSSCCR1}}, x27 // encoding: [0x5b,0x11,0x11,0xd5]
+// CHECK: msr {{trcssccr2|TRCSSCCR2}}, x27 // encoding: [0x5b,0x12,0x11,0xd5]
+// CHECK: msr {{trcssccr3|TRCSSCCR3}}, x6 // encoding: [0x46,0x13,0x11,0xd5]
+// CHECK: msr {{trcssccr4|TRCSSCCR4}}, x3 // encoding: [0x43,0x14,0x11,0xd5]
+// CHECK: msr {{trcssccr5|TRCSSCCR5}}, x12 // encoding: [0x4c,0x15,0x11,0xd5]
+// CHECK: msr {{trcssccr6|TRCSSCCR6}}, x7 // encoding: [0x47,0x16,0x11,0xd5]
+// CHECK: msr {{trcssccr7|TRCSSCCR7}}, x6 // encoding: [0x46,0x17,0x11,0xd5]
+// CHECK: msr {{trcsscsr0|TRCSSCSR0}}, x20 // encoding: [0x54,0x18,0x11,0xd5]
+// CHECK: msr {{trcsscsr1|TRCSSCSR1}}, x17 // encoding: [0x51,0x19,0x11,0xd5]
+// CHECK: msr {{trcsscsr2|TRCSSCSR2}}, x11 // encoding: [0x4b,0x1a,0x11,0xd5]
+// CHECK: msr {{trcsscsr3|TRCSSCSR3}}, x4 // encoding: [0x44,0x1b,0x11,0xd5]
+// CHECK: msr {{trcsscsr4|TRCSSCSR4}}, x14 // encoding: [0x4e,0x1c,0x11,0xd5]
+// CHECK: msr {{trcsscsr5|TRCSSCSR5}}, x22 // encoding: [0x56,0x1d,0x11,0xd5]
+// CHECK: msr {{trcsscsr6|TRCSSCSR6}}, x3 // encoding: [0x43,0x1e,0x11,0xd5]
+// CHECK: msr {{trcsscsr7|TRCSSCSR7}}, x11 // encoding: [0x4b,0x1f,0x11,0xd5]
+// CHECK: msr {{trcsspcicr0|TRCSSPCICR0}}, x2 // encoding: [0x62,0x10,0x11,0xd5]
+// CHECK: msr {{trcsspcicr1|TRCSSPCICR1}}, x3 // encoding: [0x63,0x11,0x11,0xd5]
+// CHECK: msr {{trcsspcicr2|TRCSSPCICR2}}, x5 // encoding: [0x65,0x12,0x11,0xd5]
+// CHECK: msr {{trcsspcicr3|TRCSSPCICR3}}, x7 // encoding: [0x67,0x13,0x11,0xd5]
+// CHECK: msr {{trcsspcicr4|TRCSSPCICR4}}, x11 // encoding: [0x6b,0x14,0x11,0xd5]
+// CHECK: msr {{trcsspcicr5|TRCSSPCICR5}}, x13 // encoding: [0x6d,0x15,0x11,0xd5]
+// CHECK: msr {{trcsspcicr6|TRCSSPCICR6}}, x17 // encoding: [0x71,0x16,0x11,0xd5]
+// CHECK: msr {{trcsspcicr7|TRCSSPCICR7}}, x23 // encoding: [0x77,0x17,0x11,0xd5]
+// CHECK: msr {{trcpdcr|TRCPDCR}}, x3 // encoding: [0x83,0x14,0x11,0xd5]
+// CHECK: msr {{trcacvr0|TRCACVR0}}, x6 // encoding: [0x06,0x20,0x11,0xd5]
+// CHECK: msr {{trcacvr1|TRCACVR1}}, x20 // encoding: [0x14,0x22,0x11,0xd5]
+// CHECK: msr {{trcacvr2|TRCACVR2}}, x25 // encoding: [0x19,0x24,0x11,0xd5]
+// CHECK: msr {{trcacvr3|TRCACVR3}}, x1 // encoding: [0x01,0x26,0x11,0xd5]
+// CHECK: msr {{trcacvr4|TRCACVR4}}, x28 // encoding: [0x1c,0x28,0x11,0xd5]
+// CHECK: msr {{trcacvr5|TRCACVR5}}, x15 // encoding: [0x0f,0x2a,0x11,0xd5]
+// CHECK: msr {{trcacvr6|TRCACVR6}}, x25 // encoding: [0x19,0x2c,0x11,0xd5]
+// CHECK: msr {{trcacvr7|TRCACVR7}}, x12 // encoding: [0x0c,0x2e,0x11,0xd5]
+// CHECK: msr {{trcacvr8|TRCACVR8}}, x5 // encoding: [0x25,0x20,0x11,0xd5]
+// CHECK: msr {{trcacvr9|TRCACVR9}}, x25 // encoding: [0x39,0x22,0x11,0xd5]
+// CHECK: msr {{trcacvr10|TRCACVR10}}, x13 // encoding: [0x2d,0x24,0x11,0xd5]
+// CHECK: msr {{trcacvr11|TRCACVR11}}, x10 // encoding: [0x2a,0x26,0x11,0xd5]
+// CHECK: msr {{trcacvr12|TRCACVR12}}, x19 // encoding: [0x33,0x28,0x11,0xd5]
+// CHECK: msr {{trcacvr13|TRCACVR13}}, x10 // encoding: [0x2a,0x2a,0x11,0xd5]
+// CHECK: msr {{trcacvr14|TRCACVR14}}, x19 // encoding: [0x33,0x2c,0x11,0xd5]
+// CHECK: msr {{trcacvr15|TRCACVR15}}, x2 // encoding: [0x22,0x2e,0x11,0xd5]
+// CHECK: msr {{trcacatr0|TRCACATR0}}, x15 // encoding: [0x4f,0x20,0x11,0xd5]
+// CHECK: msr {{trcacatr1|TRCACATR1}}, x13 // encoding: [0x4d,0x22,0x11,0xd5]
+// CHECK: msr {{trcacatr2|TRCACATR2}}, x8 // encoding: [0x48,0x24,0x11,0xd5]
+// CHECK: msr {{trcacatr3|TRCACATR3}}, x1 // encoding: [0x41,0x26,0x11,0xd5]
+// CHECK: msr {{trcacatr4|TRCACATR4}}, x11 // encoding: [0x4b,0x28,0x11,0xd5]
+// CHECK: msr {{trcacatr5|TRCACATR5}}, x8 // encoding: [0x48,0x2a,0x11,0xd5]
+// CHECK: msr {{trcacatr6|TRCACATR6}}, x24 // encoding: [0x58,0x2c,0x11,0xd5]
+// CHECK: msr {{trcacatr7|TRCACATR7}}, x6 // encoding: [0x46,0x2e,0x11,0xd5]
+// CHECK: msr {{trcacatr8|TRCACATR8}}, x23 // encoding: [0x77,0x20,0x11,0xd5]
+// CHECK: msr {{trcacatr9|TRCACATR9}}, x5 // encoding: [0x65,0x22,0x11,0xd5]
+// CHECK: msr {{trcacatr10|TRCACATR10}}, x11 // encoding: [0x6b,0x24,0x11,0xd5]
+// CHECK: msr {{trcacatr11|TRCACATR11}}, x11 // encoding: [0x6b,0x26,0x11,0xd5]
+// CHECK: msr {{trcacatr12|TRCACATR12}}, x3 // encoding: [0x63,0x28,0x11,0xd5]
+// CHECK: msr {{trcacatr13|TRCACATR13}}, x28 // encoding: [0x7c,0x2a,0x11,0xd5]
+// CHECK: msr {{trcacatr14|TRCACATR14}}, x25 // encoding: [0x79,0x2c,0x11,0xd5]
+// CHECK: msr {{trcacatr15|TRCACATR15}}, x4 // encoding: [0x64,0x2e,0x11,0xd5]
+// CHECK: msr {{trcdvcvr0|TRCDVCVR0}}, x6 // encoding: [0x86,0x20,0x11,0xd5]
+// CHECK: msr {{trcdvcvr1|TRCDVCVR1}}, x3 // encoding: [0x83,0x24,0x11,0xd5]
+// CHECK: msr {{trcdvcvr2|TRCDVCVR2}}, x5 // encoding: [0x85,0x28,0x11,0xd5]
+// CHECK: msr {{trcdvcvr3|TRCDVCVR3}}, x11 // encoding: [0x8b,0x2c,0x11,0xd5]
+// CHECK: msr {{trcdvcvr4|TRCDVCVR4}}, x9 // encoding: [0xa9,0x20,0x11,0xd5]
+// CHECK: msr {{trcdvcvr5|TRCDVCVR5}}, x14 // encoding: [0xae,0x24,0x11,0xd5]
+// CHECK: msr {{trcdvcvr6|TRCDVCVR6}}, x10 // encoding: [0xaa,0x28,0x11,0xd5]
+// CHECK: msr {{trcdvcvr7|TRCDVCVR7}}, x12 // encoding: [0xac,0x2c,0x11,0xd5]
+// CHECK: msr {{trcdvcmr0|TRCDVCMR0}}, x8 // encoding: [0xc8,0x20,0x11,0xd5]
+// CHECK: msr {{trcdvcmr1|TRCDVCMR1}}, x8 // encoding: [0xc8,0x24,0x11,0xd5]
+// CHECK: msr {{trcdvcmr2|TRCDVCMR2}}, x22 // encoding: [0xd6,0x28,0x11,0xd5]
+// CHECK: msr {{trcdvcmr3|TRCDVCMR3}}, x22 // encoding: [0xd6,0x2c,0x11,0xd5]
+// CHECK: msr {{trcdvcmr4|TRCDVCMR4}}, x5 // encoding: [0xe5,0x20,0x11,0xd5]
+// CHECK: msr {{trcdvcmr5|TRCDVCMR5}}, x16 // encoding: [0xf0,0x24,0x11,0xd5]
+// CHECK: msr {{trcdvcmr6|TRCDVCMR6}}, x27 // encoding: [0xfb,0x28,0x11,0xd5]
+// CHECK: msr {{trcdvcmr7|TRCDVCMR7}}, x21 // encoding: [0xf5,0x2c,0x11,0xd5]
+// CHECK: msr {{trccidcvr0|TRCCIDCVR0}}, x8 // encoding: [0x08,0x30,0x11,0xd5]
+// CHECK: msr {{trccidcvr1|TRCCIDCVR1}}, x6 // encoding: [0x06,0x32,0x11,0xd5]
+// CHECK: msr {{trccidcvr2|TRCCIDCVR2}}, x9 // encoding: [0x09,0x34,0x11,0xd5]
+// CHECK: msr {{trccidcvr3|TRCCIDCVR3}}, x8 // encoding: [0x08,0x36,0x11,0xd5]
+// CHECK: msr {{trccidcvr4|TRCCIDCVR4}}, x3 // encoding: [0x03,0x38,0x11,0xd5]
+// CHECK: msr {{trccidcvr5|TRCCIDCVR5}}, x21 // encoding: [0x15,0x3a,0x11,0xd5]
+// CHECK: msr {{trccidcvr6|TRCCIDCVR6}}, x12 // encoding: [0x0c,0x3c,0x11,0xd5]
+// CHECK: msr {{trccidcvr7|TRCCIDCVR7}}, x7 // encoding: [0x07,0x3e,0x11,0xd5]
+// CHECK: msr {{trcvmidcvr0|TRCVMIDCVR0}}, x4 // encoding: [0x24,0x30,0x11,0xd5]
+// CHECK: msr {{trcvmidcvr1|TRCVMIDCVR1}}, x3 // encoding: [0x23,0x32,0x11,0xd5]
+// CHECK: msr {{trcvmidcvr2|TRCVMIDCVR2}}, x9 // encoding: [0x29,0x34,0x11,0xd5]
+// CHECK: msr {{trcvmidcvr3|TRCVMIDCVR3}}, x17 // encoding: [0x31,0x36,0x11,0xd5]
+// CHECK: msr {{trcvmidcvr4|TRCVMIDCVR4}}, x14 // encoding: [0x2e,0x38,0x11,0xd5]
+// CHECK: msr {{trcvmidcvr5|TRCVMIDCVR5}}, x12 // encoding: [0x2c,0x3a,0x11,0xd5]
+// CHECK: msr {{trcvmidcvr6|TRCVMIDCVR6}}, x10 // encoding: [0x2a,0x3c,0x11,0xd5]
+// CHECK: msr {{trcvmidcvr7|TRCVMIDCVR7}}, x3 // encoding: [0x23,0x3e,0x11,0xd5]
+// CHECK: msr {{trccidcctlr0|TRCCIDCCTLR0}}, x14 // encoding: [0x4e,0x30,0x11,0xd5]
+// CHECK: msr {{trccidcctlr1|TRCCIDCCTLR1}}, x22 // encoding: [0x56,0x31,0x11,0xd5]
+// CHECK: msr {{trcvmidcctlr0|TRCVMIDCCTLR0}}, x8 // encoding: [0x48,0x32,0x11,0xd5]
+// CHECK: msr {{trcvmidcctlr1|TRCVMIDCCTLR1}}, x15 // encoding: [0x4f,0x33,0x11,0xd5]
+// CHECK: msr {{trcitctrl|TRCITCTRL}}, x1 // encoding: [0x81,0x70,0x11,0xd5]
+// CHECK: msr {{trcclaimset|TRCCLAIMSET}}, x7 // encoding: [0xc7,0x78,0x11,0xd5]
+// CHECK: msr {{trcclaimclr|TRCCLAIMCLR}}, x29 // encoding: [0xdd,0x79,0x11,0xd5]
diff --git a/test/MC/ARM/2013-03-18-Br-to-label-named-like-reg.s b/test/MC/ARM/2013-03-18-Br-to-label-named-like-reg.s
index 172abcf6f813..66fba3b6ac9b 100644
--- a/test/MC/ARM/2013-03-18-Br-to-label-named-like-reg.s
+++ b/test/MC/ARM/2013-03-18-Br-to-label-named-like-reg.s
@@ -1,5 +1,6 @@
-@ RUN: llvm-mc -arch arm %s
+@ RUN: llvm-mc -triple arm-eabi %s -o - | FileCheck %s
+
@ CHECK: test:
-@ CHECK: br r1
+@ CHECK: bl r1
test:
bl r1
diff --git a/test/MC/ARM/AlignedBundling/lit.local.cfg b/test/MC/ARM/AlignedBundling/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/MC/ARM/AlignedBundling/lit.local.cfg
+++ b/test/MC/ARM/AlignedBundling/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/ARM/Windows/mov32t-range.s b/test/MC/ARM/Windows/mov32t-range.s
new file mode 100644
index 000000000000..fef8ff2aca7a
--- /dev/null
+++ b/test/MC/ARM/Windows/mov32t-range.s
@@ -0,0 +1,37 @@
+@ RUN: llvm-mc -triple thumbv7-windows-itanium -filetype obj -o - %s \
+@ RUN: | llvm-readobj -r - | FileCheck -check-prefix CHECK-RELOCATIONS %s
+
+@ RUN: llvm-mc -triple thumbv7-windows-itanium -filetype obj -o - %s \
+@ RUN: | llvm-objdump -d - | FileCheck -check-prefix CHECK-ENCODING %s
+
+ .syntax unified
+ .thumb
+ .text
+
+ .def truncation
+ .scl 3
+ .type 32
+ .endef
+ .align 2
+ .thumb_func
+truncation:
+ movw r0, :lower16:.Lerange
+ movt r0, :upper16:.Lerange
+ bx lr
+
+ .section .rdata,"rd"
+.Lbuffer:
+ .zero 65536
+.Lerange:
+ .asciz "-erange"
+
+@ CHECK-RELOCATIONS: Relocations [
+@ CHECK-RELOCATIONS: .text {
+@ CHECK-RELOCATIONS: 0x0 IMAGE_REL_ARM_MOV32T .rdata
+@ CHECK-RELOCATIONS-NOT: 0x4 IMAGE_REL_ARM_MOV32T .rdata
+@ CHECK-RELOCATIONS: }
+@ CHECK-RELOCATIONS: ]
+
+@ CHECK-ENCODING: 0: 40 f2 00 00
+@ CHECK-ENCODING-NEXT: 4: c0 f2 01 00
+
diff --git a/test/MC/ARM/Windows/multiple-text-sections.s b/test/MC/ARM/Windows/multiple-text-sections.s
new file mode 100644
index 000000000000..241eee4bcad3
--- /dev/null
+++ b/test/MC/ARM/Windows/multiple-text-sections.s
@@ -0,0 +1,58 @@
+@ RUN: llvm-mc -triple thumbv7-windows-itanium -filetype obj -o - %s \
+@ RUN: | llvm-readobj -s - | FileCheck %s
+
+ .syntax unified
+ .text
+ .thumb
+
+ .section .text,"xr",one_only,a
+
+ .def a;
+ .scl 2;
+ .type 32;
+ .endef
+a:
+ movs r0, #65
+ bx lr
+
+ .section .text,"xr",one_only,b
+
+ .def b;
+ .scl 2;
+ .type 32;
+ .endef
+ .thumb_func
+b:
+ movs r0, #66
+ bx lr
+
+@ CHECK: Sections [
+@ CHECK: Section {
+@ CHECK: Name: .text
+@ CHECK: Characteristics [
+@ CHECK: IMAGE_SCN_CNT_CODE
+@ CHECK: IMAGE_SCN_MEM_16BIT
+@ CHECK: IMAGE_SCN_MEM_EXECUTE
+@ CHECK: IMAGE_SCN_MEM_READ
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: Section {
+@ CHECK: Name: .text
+@ CHECK: Characteristics [
+@ CHECK: IMAGE_SCN_CNT_CODE
+@ CHECK: IMAGE_SCN_MEM_16BIT
+@ CHECK: IMAGE_SCN_MEM_EXECUTE
+@ CHECK: IMAGE_SCN_MEM_READ
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: Section {
+@ CHECK: Name: .text
+@ CHECK: Characteristics [
+@ CHECK: IMAGE_SCN_CNT_CODE
+@ CHECK: IMAGE_SCN_MEM_16BIT
+@ CHECK: IMAGE_SCN_MEM_EXECUTE
+@ CHECK: IMAGE_SCN_MEM_READ
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: ]
+
diff --git a/test/MC/ARM/Windows/text-attributes.s b/test/MC/ARM/Windows/text-attributes.s
new file mode 100644
index 000000000000..62aa028789fb
--- /dev/null
+++ b/test/MC/ARM/Windows/text-attributes.s
@@ -0,0 +1,30 @@
+@ RUN: llvm-mc -triple thumbv7-windows-itanium -filetype obj -o - %s \
+@ RUN: | llvm-readobj -s - | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .text
+
+ .def function
+ .type 32
+ .scl 2
+ .endef
+ .global function
+ .thumb_func
+function:
+ bx lr
+
+@ CHECK: Sections [
+@ CHECK: Section {
+@ CHECK: Name: .text
+@ CHECK: Characteristics [
+@ CHECK: IMAGE_SCN_ALIGN_4BYTES
+@ CHECK: IMAGE_SCN_CNT_CODE
+@ CHECK: IMAGE_SCN_MEM_16BIT
+@ CHECK: IMAGE_SCN_MEM_EXECUTE
+@ CHECK: IMAGE_SCN_MEM_PURGEABLE
+@ CHECK: IMAGE_SCN_MEM_READ
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: ]
diff --git a/test/MC/ARM/arm-elf-symver.s b/test/MC/ARM/arm-elf-symver.s
new file mode 100644
index 000000000000..5fb1f6a0f3eb
--- /dev/null
+++ b/test/MC/ARM/arm-elf-symver.s
@@ -0,0 +1,143 @@
+@ RUN: llvm-mc -filetype=obj -triple arm-none-linux-gnueabi %s -o - | llvm-readobj -r -t | FileCheck %s
+@ RUN: llvm-mc -filetype=obj -triple thumb-none-linux-gnueabi %s -o - | llvm-readobj -r -t | FileCheck %s
+
+defined1:
+defined2:
+defined3:
+ .symver defined1, bar1@zed
+ .symver undefined1, bar2@zed
+
+ .symver defined2, bar3@@zed
+
+ .symver defined3, bar5@@@zed
+ .symver undefined3, bar6@@@zed
+
+ .long defined1
+ .long undefined1
+ .long defined2
+ .long defined3
+ .long undefined3
+
+ .global global1
+ .symver global1, g1@@zed
+global1:
+
+@ CHECK: Relocations [
+@ CHECK-NEXT: Section (2) .rel.text {
+@ CHECK-NEXT: 0x0 R_ARM_ABS32 .text 0x0
+@ CHECK-NEXT: 0x4 R_ARM_ABS32 bar2@zed 0x0
+@ CHECK-NEXT: 0x8 R_ARM_ABS32 .text 0x0
+@ CHECK-NEXT: 0xC R_ARM_ABS32 .text 0x0
+@ CHECK-NEXT: 0x10 R_ARM_ABS32 bar6@zed 0x0
+@ CHECK-NEXT: }
+@ CHECK-NEXT: ]
+
+@ CHECK: Symbol {
+@ CHECK: Name: bar1@zed
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local (0x0)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .text (0x1)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: bar3@@zed
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local (0x0)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .text (0x1)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: bar5@@zed
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local (0x0)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .text (0x1)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: defined1
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local (0x0)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .text (0x1)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: defined2
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local (0x0)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .text (0x1)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: .text (0)
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local (0x0)
+@ CHECK-NEXT: Type: Section (0x3)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .text (0x1)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: .data (0)
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local (0x0)
+@ CHECK-NEXT: Type: Section (0x3)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .data (0x3)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: .bss (0)
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local (0x0)
+@ CHECK-NEXT: Type: Section (0x3)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .bss (0x4)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: g1@@zed
+@ CHECK-NEXT: Value: 0x14
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Global (0x1)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .text (0x1)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: global1
+@ CHECK-NEXT: Value: 0x14
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Global (0x1)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .text (0x1)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: bar2@zed
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Global (0x1)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: Undefined (0x0)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: Symbol {
+@ CHECK-NEXT: Name: bar6@zed
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Global (0x1)
+@ CHECK-NEXT: Type: None (0x0)
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: Undefined (0x0)
+@ CHECK-NEXT: }
+@ CHECK-NEXT: ]
diff --git a/test/MC/ARM/arm-ldrd.s b/test/MC/ARM/arm-ldrd.s
index c26ee25aad7d..af4bc735f39c 100644
--- a/test/MC/ARM/arm-ldrd.s
+++ b/test/MC/ARM/arm-ldrd.s
@@ -1,4 +1,4 @@
-// RUN: not llvm-mc -arch arm -mattr=+v5te %s 2>&1 | FileCheck %s
+// RUN: not llvm-mc -triple arm-eabi -mattr=+v5te %s -o /dev/null 2>&1 | FileCheck %s
//
// rdar://14479793
diff --git a/test/MC/ARM/arm-memory-instructions.s b/test/MC/ARM/arm-memory-instructions.s
index ad35dd26a04a..f41c779b8f6b 100644
--- a/test/MC/ARM/arm-memory-instructions.s
+++ b/test/MC/ARM/arm-memory-instructions.s
@@ -485,3 +485,14 @@ Lbaz: .quad 0
@ CHECK: strht r8, [r1], #-25 @ encoding: [0xb9,0x81,0x61,0xe0]
@ CHECK: strht r5, [r3], r4 @ encoding: [0xb4,0x50,0xa3,0xe0]
@ CHECK: strht r6, [r8], -r0 @ encoding: [0xb0,0x60,0x28,0xe0]
+
+@------------------------------------------------------------------------------
+@ GNU Assembler Compatibility
+@------------------------------------------------------------------------------
+
+ ldrd r0, [sp]
+ strd r0, [sp]
+
+@ CHECK: ldrd r0, r1, [sp] @ encoding: [0xd0,0x00,0xcd,0xe1]
+@ CHECK: strd r0, r1, [sp] @ encoding: [0xf0,0x00,0xcd,0xe1]
+
diff --git a/test/MC/ARM/arm-qualifier-diagnostics.s b/test/MC/ARM/arm-qualifier-diagnostics.s
new file mode 100644
index 000000000000..8b75eee5de20
--- /dev/null
+++ b/test/MC/ARM/arm-qualifier-diagnostics.s
@@ -0,0 +1,15 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o - %s 2>&1 | FileCheck %s
+
+ .syntax unified
+
+ .type function,%function
+function:
+ ldr.n r0, [r0]
+
+@ CHECK: error: instruction with .n (narrow) qualifier not allowed in arm mode
+@ CHECK: ldr.n r0, [r0]
+@ CHECK: ^
+@ CHECK-NOT: error: unexpected token in operand
+@ CHECK-NOT: ldr.n r0, [r0]
+@ CHECK-NOT: ^
+
diff --git a/test/MC/ARM/arm-thumb-cpus-default.s b/test/MC/ARM/arm-thumb-cpus-default.s
index 636ee3c50571..d7a18490ffc5 100644
--- a/test/MC/ARM/arm-thumb-cpus-default.s
+++ b/test/MC/ARM/arm-thumb-cpus-default.s
@@ -1,9 +1,20 @@
-@ RUN: llvm-mc -show-encoding -arch=arm < %s | FileCheck %s --check-prefix=CHECK-ARM-ONLY
-@ RUN: llvm-mc -show-encoding -triple=armv4t < %s | FileCheck %s --check-prefix=CHECK-ARM-THUMB
-@ RUN: llvm-mc -show-encoding -arch=arm -mcpu=cortex-a15 < %s| FileCheck %s --check-prefix=CHECK-ARM-THUMB
-@ RUN: llvm-mc -show-encoding -arch=arm -mcpu=cortex-m3 < %s | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
-@ RUN: llvm-mc -show-encoding -triple=armv7m < %s | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
-@ RUN: llvm-mc -show-encoding -triple=armv6m < %s | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
+@ RUN: llvm-mc -show-encoding -triple=arm-eabi < %s \
+@ RUN: | FileCheck %s --check-prefix=CHECK-ARM-ONLY
+
+@ RUN: llvm-mc -show-encoding -triple=armv4t-eabi < %s \
+@ RUN: | FileCheck %s --check-prefix=CHECK-ARM-THUMB
+
+@ RUN: llvm-mc -show-encoding -triple=arm-eabi -mcpu=cortex-a15 < %s \
+@ RUN: | FileCheck %s --check-prefix=CHECK-ARM-THUMB
+
+@ RUN: llvm-mc -show-encoding -triple=arm-eabi -mcpu=cortex-m3 < %s \
+@ RUN: | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
+
+@ RUN: llvm-mc -show-encoding -triple=armv7m-eabi < %s \
+@ RUN: | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
+
+@ RUN: llvm-mc -show-encoding -triple=armv6m-eabi < %s \
+@ RUN: | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
@ Make sure the architecture chosen by LLVM defaults to a compatible
@ ARM/Thumb mode.
diff --git a/test/MC/ARM/arm-thumb-cpus.s b/test/MC/ARM/arm-thumb-cpus.s
index 24be989db330..9005c7f2a174 100644
--- a/test/MC/ARM/arm-thumb-cpus.s
+++ b/test/MC/ARM/arm-thumb-cpus.s
@@ -1,9 +1,20 @@
-@ RUN: not llvm-mc -show-encoding -arch=arm < %s 2>&1 | FileCheck %s --check-prefix=CHECK-ARM-ONLY
-@ RUN: llvm-mc -show-encoding -triple=armv4t < %s 2>&1| FileCheck %s --check-prefix=CHECK-ARM-THUMB
-@ RUN: llvm-mc -show-encoding -arch=arm -mcpu=cortex-a15 < %s 2>&1| FileCheck %s --check-prefix=CHECK-ARM-THUMB
-@ RUN: not llvm-mc -show-encoding -arch=arm -mcpu=cortex-m3 < %s 2>&1 | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
-@ RUN: not llvm-mc -show-encoding -triple=armv7m < %s 2>&1 | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
-@ RUN: not llvm-mc -show-encoding -triple=armv6m < %s 2>&1 | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
+@ RUN: not llvm-mc -show-encoding -triple=arm-eabi < %s 2>&1 \
+@ RUN: | FileCheck %s --check-prefix=CHECK-ARM-ONLY
+
+@ RUN: llvm-mc -show-encoding -triple=armv4t < %s 2>&1 \
+@ RUN: | FileCheck %s --check-prefix=CHECK-ARM-THUMB
+
+@ RUN: llvm-mc -show-encoding -triple=arm-eabi -mcpu=cortex-a15 < %s 2>&1 \
+@ RUN: | FileCheck %s --check-prefix=CHECK-ARM-THUMB
+
+@ RUN: not llvm-mc -show-encoding -triple=arm-eabi -mcpu=cortex-m3 < %s 2>&1 \
+@ RUN: | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
+
+@ RUN: not llvm-mc -show-encoding -triple=armv7m-eabi < %s 2>&1 \
+@ RUN: | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
+
+@ RUN: not llvm-mc -show-encoding -triple=armv6m-eabi < %s 2>&1 \
+@ RUN: | FileCheck %s --check-prefix=CHECK-THUMB-ONLY
@ Make sure correct diagnostics are given for CPUs without support for
@ one or other of the execution states.
diff --git a/test/MC/ARM/arm_addrmode2.s b/test/MC/ARM/arm_addrmode2.s
index ca99233b9b5c..53290ab0dd3e 100644
--- a/test/MC/ARM/arm_addrmode2.s
+++ b/test/MC/ARM/arm_addrmode2.s
@@ -4,27 +4,35 @@
@ CHECK: ldrt r1, [r0], r2 @ encoding: [0x02,0x10,0xb0,0xe6]
@ CHECK: ldrt r1, [r0], r2, lsr #3 @ encoding: [0xa2,0x11,0xb0,0xe6]
@ CHECK: ldrt r1, [r0], #4 @ encoding: [0x04,0x10,0xb0,0xe4]
+@ CHECK: ldrt r1, [r0], #0 @ encoding: [0x00,0x10,0xb0,0xe4]
@ CHECK: ldrbt r1, [r0], r2 @ encoding: [0x02,0x10,0xf0,0xe6]
@ CHECK: ldrbt r1, [r0], r2, lsr #3 @ encoding: [0xa2,0x11,0xf0,0xe6]
@ CHECK: ldrbt r1, [r0], #4 @ encoding: [0x04,0x10,0xf0,0xe4]
+@ CHECK: ldrbt r1, [r0], #0 @ encoding: [0x00,0x10,0xf0,0xe4]
@ CHECK: strt r1, [r0], r2 @ encoding: [0x02,0x10,0xa0,0xe6]
@ CHECK: strt r1, [r0], r2, lsr #3 @ encoding: [0xa2,0x11,0xa0,0xe6]
@ CHECK: strt r1, [r0], #4 @ encoding: [0x04,0x10,0xa0,0xe4]
+@ CHECK: strt r1, [r0], #0 @ encoding: [0x00,0x10,0xa0,0xe4]
@ CHECK: strbt r1, [r0], r2 @ encoding: [0x02,0x10,0xe0,0xe6]
@ CHECK: strbt r1, [r0], r2, lsr #3 @ encoding: [0xa2,0x11,0xe0,0xe6]
@ CHECK: strbt r1, [r0], #4 @ encoding: [0x04,0x10,0xe0,0xe4]
+@ CHECK: strbt r1, [r0], #0 @ encoding: [0x00,0x10,0xe0,0xe4]
ldrt r1, [r0], r2
ldrt r1, [r0], r2, lsr #3
ldrt r1, [r0], #4
+ ldrt r1, [r0]
ldrbt r1, [r0], r2
ldrbt r1, [r0], r2, lsr #3
ldrbt r1, [r0], #4
+ ldrbt r1, [r0]
strt r1, [r0], r2
strt r1, [r0], r2, lsr #3
strt r1, [r0], #4
+ strt r1, [r0]
strbt r1, [r0], r2
strbt r1, [r0], r2, lsr #3
strbt r1, [r0], #4
+ strbt r1, [r0]
@ Pre-indexed
@ CHECK: ldr r1, [r0, r2, lsr #3]! @ encoding: [0xa2,0x11,0xb0,0xe7]
diff --git a/test/MC/ARM/arm_fixups.s b/test/MC/ARM/arm_fixups.s
index 99eb3c539416..1f56e1285245 100644
--- a/test/MC/ARM/arm_fixups.s
+++ b/test/MC/ARM/arm_fixups.s
@@ -1,9 +1,13 @@
@ RUN: llvm-mc -triple armv7-unknown-unknown %s --show-encoding > %t
@ RUN: FileCheck < %t %s
+@ RUN: llvm-mc -triple armebv7-unknown-unknown %s --show-encoding > %t
+@ RUN: FileCheck --check-prefix=CHECK-BE < %t %s
bl _printf
@ CHECK: bl _printf @ encoding: [A,A,A,0xeb]
@ CHECK: @ fixup A - offset: 0, value: _printf, kind: fixup_arm_uncondbl
+@ CHECK-BE: bl _printf @ encoding: [0xeb,A,A,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _printf, kind: fixup_arm_uncondbl
mov r9, :lower16:(_foo)
movw r9, :lower16:(_foo)
@@ -11,12 +15,20 @@
@ CHECK: movw r9, :lower16:_foo @ encoding: [A,0x90'A',0b0000AAAA,0xe3]
@ CHECK: @ fixup A - offset: 0, value: _foo, kind: fixup_arm_movw_lo16
+@ CHECK-BE: movw r9, :lower16:_foo @ encoding: [0xe3,0b0000AAAA,0x90'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _foo, kind: fixup_arm_movw_lo16
@ CHECK: movw r9, :lower16:_foo @ encoding: [A,0x90'A',0b0000AAAA,0xe3]
@ CHECK: @ fixup A - offset: 0, value: _foo, kind: fixup_arm_movw_lo16
+@ CHECK-BE: movw r9, :lower16:_foo @ encoding: [0xe3,0b0000AAAA,0x90'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _foo, kind: fixup_arm_movw_lo16
@ CHECK: movt r9, :upper16:_foo @ encoding: [A,0x90'A',0b0100AAAA,0xe3]
@ CHECK: @ fixup A - offset: 0, value: _foo, kind: fixup_arm_movt_hi16
+@ CHECK-BE: movt r9, :upper16:_foo @ encoding: [0xe3,0b0100AAAA,0x90'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _foo, kind: fixup_arm_movt_hi16
- mov r2, fred
+ mov r2, :lower16:fred
-@ CHECK: movw r2, fred @ encoding: [A,0x20'A',0b0000AAAA,0xe3]
+@ CHECK: movw r2, :lower16:fred @ encoding: [A,0x20'A',0b0000AAAA,0xe3]
@ CHECK: @ fixup A - offset: 0, value: fred, kind: fixup_arm_movw_lo16
+@ CHECK-BE: movw r2, :lower16:fred @ encoding: [0xe3,0b0000AAAA,0x20'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: fred, kind: fixup_arm_movw_lo16
diff --git a/test/MC/ARM/arm_word_directive.s b/test/MC/ARM/arm_word_directive.s
deleted file mode 100644
index e782479b6081..000000000000
--- a/test/MC/ARM/arm_word_directive.s
+++ /dev/null
@@ -1,6 +0,0 @@
-@ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unknown %s | FileCheck %s
-
-@ CHECK: TEST0:
-@ CHECK: .long 3
-TEST0:
- .word 3
diff --git a/test/MC/ARM/basic-arm-instructions.s b/test/MC/ARM/basic-arm-instructions.s
index 29bc6c07cc6c..e5e96170a775 100644
--- a/test/MC/ARM/basic-arm-instructions.s
+++ b/test/MC/ARM/basic-arm-instructions.s
@@ -1,4 +1,5 @@
@ RUN: llvm-mc -triple=armv7-apple-darwin -mcpu=cortex-a8 -show-encoding < %s | FileCheck %s
+@ RUN: llvm-mc -triple=armebv7-unknown-unknown -mcpu=cortex-a8 -show-encoding < %s | FileCheck --check-prefix=CHECK-BE %s
.syntax unified
.globl _func
@@ -135,8 +136,12 @@ Lforward:
@ CHECK: Lback:
@ CHECK: adr r2, Lback @ encoding: [A,0x20'A',0x0f'A',0xe2'A']
@ CHECK: @ fixup A - offset: 0, value: Lback, kind: fixup_arm_adr_pcrel_12
+@ CHECK-BE: adr r2, Lback @ encoding: [0xe2'A',0x0f'A',0x20'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: Lback, kind: fixup_arm_adr_pcrel_12
@ CHECK: adr r3, Lforward @ encoding: [A,0x30'A',0x0f'A',0xe2'A']
@ CHECK: @ fixup A - offset: 0, value: Lforward, kind: fixup_arm_adr_pcrel_12
+@ CHECK-BE: adr r3, Lforward @ encoding: [0xe2'A',0x0f'A',0x30'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: Lforward, kind: fixup_arm_adr_pcrel_12
@ CHECK: Lforward:
@ CHECK: adr r2, #3 @ encoding: [0x03,0x20,0x8f,0xe2]
@ CHECK: adr r2, #-3 @ encoding: [0x03,0x20,0x4f,0xe2]
@@ -310,9 +315,13 @@ Lforward:
beq _baz
@ CHECK: b _bar @ encoding: [A,A,A,0xea]
- @ fixup A - offset: 0, value: _bar, kind: fixup_arm_uncondbranch
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_uncondbranch
+@ CHECK-BE: b _bar @ encoding: [0xea,A,A,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_uncondbranch
@ CHECK: beq _baz @ encoding: [A,A,A,0x0a]
- @ fixup A - offset: 0, value: _baz, kind: fixup_arm_condbranch
+@ CHECK: @ fixup A - offset: 0, value: _baz, kind: fixup_arm_condbranch
+@ CHECK-BE: beq _baz @ encoding: [0x0a,A,A,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _baz, kind: fixup_arm_condbranch
@------------------------------------------------------------------------------
@@ -420,10 +429,16 @@ Lforward:
@ CHECK: bl _bar @ encoding: [A,A,A,0xeb]
@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_uncondbl
+@ CHECK-BE: bl _bar @ encoding: [0xeb,A,A,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_uncondbl
@ CHECK: bleq _bar @ encoding: [A,A,A,0x0b]
@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_condbl
+@ CHECK-BE: bleq _bar @ encoding: [0x0b,A,A,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_condbl
@ CHECK: blx _bar @ encoding: [A,A,A,0xfa]
- @ fixup A - offset: 0, value: _bar, kind: fixup_arm_blx
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_blx
+@ CHECK-BE: blx _bar @ encoding: [0xfa,A,A,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_blx
@ CHECK: blls #28634268 @ encoding: [0x27,0x3b,0x6d,0x9b]
@ CHECK: blx #32424576 @ encoding: [0xa0,0xb0,0x7b,0xfa]
@ CHECK: blx #16212288 @ encoding: [0x50,0xd8,0x3d,0xfa]
diff --git a/test/MC/ARM/basic-thumb-instructions.s b/test/MC/ARM/basic-thumb-instructions.s
index dec7f5b13334..30ab733375ba 100644
--- a/test/MC/ARM/basic-thumb-instructions.s
+++ b/test/MC/ARM/basic-thumb-instructions.s
@@ -4,6 +4,7 @@
@---
@ RUN: llvm-mc -triple=thumbv6-apple-darwin -show-encoding < %s | FileCheck %s
@ RUN: llvm-mc -triple=thumbv7-apple-darwin -show-encoding < %s | FileCheck %s
+@ RUN: llvm-mc -triple=thumbebv7-unknown-unknown -show-encoding < %s | FileCheck --check-prefix=CHECK-BE %s
.syntax unified
.globl _func
@@ -90,7 +91,9 @@ _func:
adr r3, #1020
@ CHECK: adr r2, _baz @ encoding: [A,0xa2]
- @ fixup A - offset: 0, value: _baz, kind: fixup_thumb_adr_pcrel_10
+@ CHECK: @ fixup A - offset: 0, value: _baz, kind: fixup_thumb_adr_pcrel_10
+@ CHECK-BE: adr r2, _baz @ encoding: [0xa2,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _baz, kind: fixup_thumb_adr_pcrel_10
@ CHECK: adr r5, #0 @ encoding: [0x00,0xa5]
@ CHECK: adr r2, #4 @ encoding: [0x01,0xa2]
@ CHECK: adr r3, #1020 @ encoding: [0xff,0xa3]
@@ -132,9 +135,13 @@ _func:
beq #160
@ CHECK: b _baz @ encoding: [A,0xe0'A']
- @ fixup A - offset: 0, value: _baz, kind: fixup_arm_thumb_br
+@ CHECK: @ fixup A - offset: 0, value: _baz, kind: fixup_arm_thumb_br
+@ CHECK-BE: b _baz @ encoding: [0xe0'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _baz, kind: fixup_arm_thumb_br
@ CHECK: beq _bar @ encoding: [A,0xd0]
- @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_bcc
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_bcc
+@ CHECK-BE: beq _bar @ encoding: [0xd0,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_bcc
@ CHECK: b #1838 @ encoding: [0x97,0xe3]
@ CHECK: b #-420 @ encoding: [0x2e,0xe7]
@ CHECK: beq #-256 @ encoding: [0x80,0xd0]
@@ -174,9 +181,13 @@ _func:
blx _baz
@ CHECK: bl _bar @ encoding: [A,0xf0'A',A,0xd0'A']
- @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_bl
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_bl
+@ CHECK-BE: bl _bar @ encoding: [0xf0'A',A,0xd0'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_bl
@ CHECK: blx _baz @ encoding: [A,0xf0'A',A,0xc0'A']
- @ fixup A - offset: 0, value: _baz, kind: fixup_arm_thumb_blx
+@ CHECK: @ fixup A - offset: 0, value: _baz, kind: fixup_arm_thumb_blx
+@ CHECK-BE: blx _baz @ encoding: [0xf0'A',A,0xc0'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _baz, kind: fixup_arm_thumb_blx
@------------------------------------------------------------------------------
@@ -272,7 +283,9 @@ _func:
ldr r3, #368
@ CHECK: ldr r1, _foo @ encoding: [A,0x49]
- @ fixup A - offset: 0, value: _foo, kind: fixup_arm_thumb_cp
+@ CHECK: @ fixup A - offset: 0, value: _foo, kind: fixup_arm_thumb_cp
+@ CHECK-BE: ldr r1, _foo @ encoding: [0x49,A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _foo, kind: fixup_arm_thumb_cp
@ CHECK: ldr r3, [pc, #604] @ encoding: [0x97,0x4b]
@ CHECK: ldr r3, [pc, #368] @ encoding: [0x5c,0x4b]
diff --git a/test/MC/ARM/basic-thumb2-instructions.s b/test/MC/ARM/basic-thumb2-instructions.s
index 3a5f48832c15..05e0b2b574e9 100644
--- a/test/MC/ARM/basic-thumb2-instructions.s
+++ b/test/MC/ARM/basic-thumb2-instructions.s
@@ -1,4 +1,5 @@
@ RUN: llvm-mc -triple=thumbv7-apple-darwin -mcpu=cortex-a8 -show-encoding < %s | FileCheck %s
+@ RUN: llvm-mc -triple=thumbebv7-unknown-unknown -mcpu=cortex-a8 -show-encoding < %s | FileCheck --check-prefix=CHECK-BE %s
.syntax unified
.globl _func
@@ -227,12 +228,18 @@ _func:
bmi.w #-183396
@ CHECK: b.w _bar @ encoding: [A,0xf0'A',A,0x90'A']
- @ fixup A - offset: 0, value: _bar, kind: fixup_t2_uncondbranch
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_uncondbranch
+@ CHECK-BE: b.w _bar @ encoding: [0xf0'A',A,0x90'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_uncondbranch
@ CHECK: beq.w _bar @ encoding: [A,0xf0'A',A,0x80'A']
- @ fixup A - offset: 0, value: _bar, kind: fixup_t2_condbranch
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_condbranch
+@ CHECK-BE: beq.w _bar @ encoding: [0xf0'A',A,0x80'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_condbranch
@ CHECK: it eq @ encoding: [0x08,0xbf]
@ CHECK: beq.w _bar @ encoding: [A,0xf0'A',A,0x90'A']
- @ fixup A - offset: 0, value: _bar, kind: fixup_t2_uncondbranch
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_uncondbranch
+@ CHECK-BE: beq.w _bar @ encoding: [0xf0'A',A,0x90'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_uncondbranch
@ CHECK: bmi.w #-183396 @ encoding: [0x13,0xf5,0xce,0xa9]
@@ -332,9 +339,13 @@ _func:
@ CHECK: cbnz r7, #6 @ encoding: [0x1f,0xb9]
@ CHECK: cbnz r7, #12 @ encoding: [0x37,0xb9]
@ CHECK: cbz r6, _bar @ encoding: [0x06'A',0xb1'A']
- @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_cb
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_cb
+@ CHECK-BE: cbz r6, _bar @ encoding: [0xb1'A',0x06'A']
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_cb
@ CHECK: cbnz r6, _bar @ encoding: [0x06'A',0xb9'A']
- @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_cb
+@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_cb
+@ CHECK-BE: cbnz r6, _bar @ encoding: [0xb9'A',0x06'A']
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_arm_thumb_cb
@------------------------------------------------------------------------------
@@ -800,11 +811,20 @@ _func:
@------------------------------------------------------------------------------
ldr.w r5, _foo
ldr lr, (_strcmp-4)
+ ldr sp, _foo
@ CHECK: ldr.w r5, _foo @ encoding: [0x5f'A',0xf8'A',A,0x50'A']
@ CHECK: @ fixup A - offset: 0, value: _foo, kind: fixup_t2_ldst_pcrel_12
+@ CHECK-BE: ldr.w r5, _foo @ encoding: [0xf8'A',0x5f'A',0x50'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _foo, kind: fixup_t2_ldst_pcrel_12
@ CHECK: ldr.w lr, _strcmp-4 @ encoding: [0x5f'A',0xf8'A',A,0xe0'A']
@ CHECK: @ fixup A - offset: 0, value: _strcmp-4, kind: fixup_t2_ldst_pcrel_12
+@ CHECK-BE: ldr.w lr, _strcmp-4 @ encoding: [0xf8'A',0x5f'A',0xe0'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _strcmp-4, kind: fixup_t2_ldst_pcrel_12
+@ CHECK: ldr.w sp, _foo @ encoding: [0x5f'A',0xf8'A',A,0xd0'A']
+@ CHECK: @ fixup A - offset: 0, value: _foo, kind: fixup_t2_ldst_pcrel_12
+@ CHECK-BE: ldr.w sp, _foo @ encoding: [0xf8'A',0x5f'A',0xd0'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _foo, kind: fixup_t2_ldst_pcrel_12
ldr r7, [pc, #8]
ldr.n r7, [pc, #8]
@@ -818,19 +838,21 @@ _func:
ldr r8, [pc, #132]
ldr pc, [pc, #256]
ldr pc, [pc, #-400]
+ ldr sp, [pc, #4]
@ CHECK: ldr r7, [pc, #8] @ encoding: [0x02,0x4f]
@ CHECK: ldr r7, [pc, #8] @ encoding: [0x02,0x4f]
@ CHECK: ldr.w r7, [pc, #8] @ encoding: [0xdf,0xf8,0x08,0x70]
-@ CHECK: ldr r4, [pc, #1020] @ encoding: [0xff,0x4c]
+@ CHECK: ldr r4, [pc, #1020] @ encoding: [0xff,0x4c]
@ CHECK: ldr.w r3, [pc, #-1020] @ encoding: [0x5f,0xf8,0xfc,0x33]
-@ CHECK: ldr.w r6, [pc, #1024] @ encoding: [0xdf,0xf8,0x00,0x64]
-@ CHECK: ldr.w r0, [pc, #-1024] @ encoding: [0x5f,0xf8,0x00,0x04]
-@ CHECK: ldr.w r2, [pc, #4095] @ encoding: [0xdf,0xf8,0xff,0x2f]
-@ CHECK: ldr.w r1, [pc, #-4095] @ encoding: [0x5f,0xf8,0xff,0x1f]
-@ CHECK: ldr.w r8, [pc, #132] @ encoding: [0xdf,0xf8,0x84,0x80]
+@ CHECK: ldr.w r6, [pc, #1024] @ encoding: [0xdf,0xf8,0x00,0x64]
+@ CHECK: ldr.w r0, [pc, #-1024] @ encoding: [0x5f,0xf8,0x00,0x04]
+@ CHECK: ldr.w r2, [pc, #4095] @ encoding: [0xdf,0xf8,0xff,0x2f]
+@ CHECK: ldr.w r1, [pc, #-4095] @ encoding: [0x5f,0xf8,0xff,0x1f]
+@ CHECK: ldr.w r8, [pc, #132] @ encoding: [0xdf,0xf8,0x84,0x80]
@ CHECK: ldr.w pc, [pc, #256] @ encoding: [0xdf,0xf8,0x00,0xf1]
@ CHECK: ldr.w pc, [pc, #-400] @ encoding: [0x5f,0xf8,0x90,0xf1]
+@ CHECK: ldr.w sp, [pc, #4] @ encoding: [0xdf,0xf8,0x04,0xd0]
ldrb r9, [pc, #-0]
ldrsb r11, [pc, #-0]
@@ -839,9 +861,9 @@ _func:
ldr r5, [pc, #-0]
@ CHECK: ldrb.w r9, [pc, #-0] @ encoding: [0x1f,0xf8,0x00,0x90]
-@ CHECK: ldrsb.w r11, [pc, #-0] @ encoding: [0x1f,0xf9,0x00,0xb0]
+@ CHECK: ldrsb.w r11, [pc, #-0] @ encoding: [0x1f,0xf9,0x00,0xb0]
@ CHECK: ldrh.w r10, [pc, #-0] @ encoding: [0x3f,0xf8,0x00,0xa0]
-@ CHECK: ldrsh.w r1, [pc, #-0] @ encoding: [0x3f,0xf9,0x00,0x10]
+@ CHECK: ldrsh.w r1, [pc, #-0] @ encoding: [0x3f,0xf9,0x00,0x10]
@ CHECK: ldr.w r5, [pc, #-0] @ encoding: [0x5f,0xf8,0x00,0x50]
@------------------------------------------------------------------------------
@@ -1022,6 +1044,8 @@ _func:
@ CHECK: ldrh.w r5, _bar @ encoding: [0x3f'A',0xf8'A',A,0x50'A']
@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_ldst_pcrel_12
+@ CHECK-BE: ldrh.w r5, _bar @ encoding: [0xf8'A',0x3f'A',0x50'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_ldst_pcrel_12
@------------------------------------------------------------------------------
@@ -1091,6 +1115,8 @@ _func:
@ CHECK: ldrsb.w r5, _bar @ encoding: [0x1f'A',0xf9'A',A,0x50'A']
@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_ldst_pcrel_12
+@ CHECK-BE: ldrsb.w r5, _bar @ encoding: [0xf9'A',0x1f'A',0x50'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_ldst_pcrel_12
@------------------------------------------------------------------------------
@@ -1160,6 +1186,8 @@ _func:
@ CHECK: ldrsh.w r5, _bar @ encoding: [0x3f'A',0xf9'A',A,0x50'A']
@ CHECK: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_ldst_pcrel_12
+@ CHECK-BE: ldrsh.w r5, _bar @ encoding: [0xf9'A',0x3f'A',0x50'A',A]
+@ CHECK-BE: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_ldst_pcrel_12
@ TEMPORARILY DISABLED:
@ ldrsh.w r4, [pc, #1435]
@@ -2777,6 +2805,9 @@ _func:
strd r0, r1, [r2, #-0]
strd r0, r1, [r2, #-0]!
strd r0, r1, [r2], #-0
+ strd r0, r1, [r2, #256]
+ strd r0, r1, [r2, #256]!
+ strd r0, r1, [r2], #256
@ CHECK: strd r3, r5, [r6, #24] @ encoding: [0xc6,0xe9,0x06,0x35]
@ CHECK: strd r3, r5, [r6, #24]! @ encoding: [0xe6,0xe9,0x06,0x35]
@@ -2787,6 +2818,9 @@ _func:
@ CHECK: strd r0, r1, [r2, #-0] @ encoding: [0x42,0xe9,0x00,0x01]
@ CHECK: strd r0, r1, [r2, #-0]! @ encoding: [0x62,0xe9,0x00,0x01]
@ CHECK: strd r0, r1, [r2], #-0 @ encoding: [0x62,0xe8,0x00,0x01]
+@ CHECK: strd r0, r1, [r2, #256] @ encoding: [0xc2,0xe9,0x40,0x01]
+@ CHECK: strd r0, r1, [r2, #256]! @ encoding: [0xe2,0xe9,0x40,0x01]
+@ CHECK: strd r0, r1, [r2], #256 @ encoding: [0xe2,0xe8,0x40,0x01]
@------------------------------------------------------------------------------
diff --git a/test/MC/ARM/big-endian-arm-fixup.s b/test/MC/ARM/big-endian-arm-fixup.s
new file mode 100644
index 000000000000..5fb9cef5028d
--- /dev/null
+++ b/test/MC/ARM/big-endian-arm-fixup.s
@@ -0,0 +1,107 @@
+// RUN: llvm-mc -triple=armeb-eabi -mattr v7,vfp2 -filetype=obj < %s | llvm-objdump -s - | FileCheck %s
+
+ .syntax unified
+ .text
+ .align 2
+ .code 32
+
+@ARM::fixup_arm_condbl
+.section s_condbl,"ax",%progbits
+// CHECK-LABEL: Contents of section s_condbl
+// CHECK: 0000 0b000002
+ bleq condbl_label+16
+condbl_label:
+
+@ARM::fixup_arm_uncondbl
+.section s_uncondbl,"ax",%progbits
+// CHECK-LABEL: Contents of section s_uncondbl
+// CHECK: 0000 eb000002
+ bl uncond_label+16
+uncond_label:
+
+@ARM::fixup_arm_blx
+.section s_blx,"ax",%progbits
+// CHECK-LABEL: Contents of section s_blx
+// CHECK: 0000 fa000002
+ blx blx_label+16
+blx_label:
+
+@ARM::fixup_arm_uncondbranch
+.section s_uncondbranch,"ax",%progbits
+// CHECK-LABEL: Contents of section s_uncondbranch
+// CHECK: 0000 ea000003
+ b uncondbranch_label+16
+uncondbranch_label:
+
+@ARM::fixup_arm_condbranch
+.section s_condbranch,"ax",%progbits
+// CHECK-LABEL: Contents of section s_condbranch
+// CHECK: 0000 0a000003
+ beq condbranch_label+16
+condbranch_label:
+
+@ARM::fixup_arm_pcrel_10
+.section s_arm_pcrel_10,"ax",%progbits
+// CHECK-LABEL: Contents of section s_arm_pcrel_10
+// CHECK: 0000 ed9f0b03
+ vldr d0, arm_pcrel_10_label+16
+arm_pcrel_10_label:
+
+@ARM::fixup_arm_ldst_pcrel_12
+.section s_arm_ldst_pcrel_12,"ax",%progbits
+// CHECK-LABEL: Contents of section s_arm_ldst_pcrel_12
+// CHECK: 0000 e59f000c
+ ldr r0, arm_ldst_pcrel_12_label+16
+arm_ldst_pcrel_12_label:
+
+@ARM::fixup_arm_adr_pcrel_12
+.section s_arm_adr_pcrel_12,"ax",%progbits
+// CHECK-LABEL: Contents of section s_arm_adr_pcrel_12
+// CHECK: 0000 e28f0010
+ adr r0, arm_adr_pcrel_12_label+20
+arm_adr_pcrel_12_label:
+
+@ARM::fixup_arm_adr_pcrel_10_unscaled
+.section s_arm_adr_pcrel_10_unscaled,"ax",%progbits
+// CHECK-LABEL: Contents of section s_arm_adr_pcrel_10_unscaled
+// CHECK: 0000 e1cf01d4
+ ldrd r0, r1, arm_adr_pcrel_10_unscaled_label+24
+arm_adr_pcrel_10_unscaled_label:
+
+@ARM::fixup_arm_movw_lo16
+.section s_movw,"ax",%progbits
+// CHECK-LABEL: Contents of section s_movw
+// CHECK: 0000 e3000008
+ movw r0, :lower16:(some_label+8)
+
+@ARM::fixup_arm_movt_hi16
+.section s_movt,"ax",%progbits
+// CHECK-LABEL: Contents of section s_movt
+// CHECK: 0000 e34f0ffc
+ movt r0, :upper16:GOT-(movt_label)
+movt_label:
+
+@FK_Data_1
+.section s_fk_data_1
+// CHECK-LABEL: Contents of section s_fk_data_1
+// CHECK: 0000 01
+fk_data1_l_label:
+.byte fk_data1_h_label-fk_data1_l_label
+fk_data1_h_label:
+
+@FK_Data_2
+.section s_fk_data_2
+// CHECK-LABEL: Contents of section s_fk_data_2
+// CHECK: 0000 0002
+fk_data2_l_label:
+.short fk_data2_h_label-fk_data2_l_label
+fk_data2_h_label:
+
+@FK_Data_4
+.section s_fk_data_4
+// CHECK-LABEL: Contents of section s_fk_data_4
+// CHECK: 0000 00000004
+fk_data4_l_label:
+.long fk_data4_h_label-fk_data4_l_label
+fk_data4_h_label:
+
diff --git a/test/MC/ARM/big-endian-thumb-fixup.s b/test/MC/ARM/big-endian-thumb-fixup.s
new file mode 100644
index 000000000000..5023fca26be1
--- /dev/null
+++ b/test/MC/ARM/big-endian-thumb-fixup.s
@@ -0,0 +1,63 @@
+// RUN: llvm-mc -triple=armeb-eabi -mattr v7,vfp2 -filetype=obj < %s | llvm-objdump -s - | FileCheck %s
+
+ .syntax unified
+ .text
+ .align 2
+ .code 16
+
+@ARM::fixup_arm_thumb_bl
+.section s_thumb_bl,"ax",%progbits
+// CHECK-LABEL: Contents of section s_thumb_bl
+// CHECK: 0000 f000f801
+ bl thumb_bl_label
+ nop
+thumb_bl_label:
+
+@ARM::fixup_arm_thumb_blx
+// CHECK-LABEL: Contents of section s_thumb_bl
+// CHECK: 0000 f000e802
+.section s_thumb_blx,"ax",%progbits
+ blx thumb_blx_label+8
+thumb_blx_label:
+
+@ARM::fixup_arm_thumb_br
+.section s_thumb_br,"ax",%progbits
+// CHECK-LABEL: Contents of section s_thumb_br
+// CHECK: 0000 e000bf00
+ b thumb_br_label
+ nop
+thumb_br_label:
+
+@ARM::fixup_arm_thumb_bcc
+.section s_thumb_bcc,"ax",%progbits
+// CHECK-LABEL: Contents of section s_thumb_bcc
+// CHECK: 0000 d000bf00
+ beq thumb_bcc_label
+ nop
+thumb_bcc_label:
+
+@ARM::fixup_arm_thumb_cb
+.section s_thumb_cb,"ax",%progbits
+// CHECK-LABEL: Contents of section s_thumb_cb
+// CHECK: 0000 b100bf00
+ cbz r0, thumb_cb_label
+ nop
+thumb_cb_label:
+
+@ARM::fixup_arm_thumb_cp
+.section s_thumb_cp,"ax",%progbits
+// CHECK-LABEL: Contents of section s_thumb_cp
+// CHECK: 0000 4801bf00
+ ldr r0, =thumb_cp_label
+ nop
+ nop
+thumb_cp_label:
+
+@ARM::fixup_arm_thumb_adr_pcrel_10
+.section s_thumb_adr_pcrel_10,"ax",%progbits
+// CHECK-LABEL: Contents of section s_thumb_adr_pcrel_10
+// CHECK: 0000 a000bf00
+ adr r0, thumb_adr_pcrel_10_label
+ nop
+thumb_adr_pcrel_10_label:
+
diff --git a/test/MC/ARM/big-endian-thumb2-fixup.s b/test/MC/ARM/big-endian-thumb2-fixup.s
new file mode 100644
index 000000000000..4fd5276fce6e
--- /dev/null
+++ b/test/MC/ARM/big-endian-thumb2-fixup.s
@@ -0,0 +1,49 @@
+// RUN: llvm-mc -triple=thumbeb-eabi -mattr v7,vfp2 -filetype=obj < %s | llvm-objdump -s - | FileCheck %s
+
+ .syntax unified
+ .text
+ .align 2
+
+@ARM::fixup_t2_movw_lo16
+.section s_movw,"ax",%progbits
+// CHECK-LABEL: Contents of section s_movw
+// CHECK: 0000 f2400008
+ movw r0, :lower16:(some_label+8)
+
+@ARM::fixup_t2_movt_hi16
+.section s_movt,"ax",%progbits
+// CHECK-LABEL: Contents of section s_movt
+// CHECK: 0000 f6cf70fc
+ movt r0, :upper16:GOT-(movt_label)
+movt_label:
+
+@ARM::fixup_t2_uncondbranch
+.section s_uncondbranch,"ax",%progbits
+// CHECK-LABEL: Contents of section s_uncondbranch
+// CHECK: 0000 f000b801 bf00
+ b.w uncond_label
+ nop
+uncond_label:
+
+@ARM::fixup_t2_condbranch
+.section s_condbranch,"ax",%progbits
+// CHECK-LABEL: Contents of section s_condbranch
+// CHECK: 0000 f0008001 bf00
+ beq.w cond_label
+ nop
+cond_label:
+
+@ARM::fixup_t2_ldst_precel_12
+.section s_ldst_precel_12,"ax",%progbits
+ ldr r0, ldst_precel_12_label
+ nop
+ nop
+ldst_precel_12_label:
+
+@ARM::fixup_t2_adr_pcrel_12
+.section s_adr_pcrel_12,"ax",%progbits
+ adr r0, adr_pcrel_12_label
+ nop
+ nop
+adr_pcrel_12_label:
+
diff --git a/test/MC/ARM/bkpt.s b/test/MC/ARM/bkpt.s
new file mode 100644
index 000000000000..fcd4040b186f
--- /dev/null
+++ b/test/MC/ARM/bkpt.s
@@ -0,0 +1,32 @@
+@ RUN: llvm-mc -triple armv7-unknown-unknown -filetype asm -o - %s | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .global thumb_default_bkpt
+ .type thumb_default_bkpt, %function
+ .thumb_func
+thumb_default_bkpt:
+ bkpt
+
+@ CHECK-LABEL: thumb_default_bkpt
+@ CHECK: bkpt #0
+
+ .global normal_bkpt
+ .type normal_bkpt, %function
+normal_bkpt:
+ bkpt #42
+
+@ CHECK-LABEL: normal_bkpt
+@ CHECK: bkpt #42
+
+ .arm
+
+ .global arm_default_bkpt
+ .type arm_default_bkpt, %function
+arm_default_bkpt:
+ bkpt
+
+@ CEHCK-LABEL: arm_default_bkpt
+@ CHECK: bkpt #0
+
diff --git a/test/MC/ARM/cmp-immediate-fixup-error.s b/test/MC/ARM/cmp-immediate-fixup-error.s
new file mode 100644
index 000000000000..25a2368643db
--- /dev/null
+++ b/test/MC/ARM/cmp-immediate-fixup-error.s
@@ -0,0 +1,7 @@
+@ RUN: not llvm-mc -triple=arm-linux-gnueabi -filetype=obj < %s 2>&1 | FileCheck %s
+
+.text
+ cmp r0, #(l1 - unknownLabel + 4) >> 2
+@ CHECK: error: expected relocatable expression
+
+l1:
diff --git a/test/MC/ARM/cmp-immediate-fixup-error2.s b/test/MC/ARM/cmp-immediate-fixup-error2.s
new file mode 100644
index 000000000000..71f7fa141e02
--- /dev/null
+++ b/test/MC/ARM/cmp-immediate-fixup-error2.s
@@ -0,0 +1,7 @@
+@ RUN: not llvm-mc -triple=arm-linux-gnueabi -filetype=obj < %s 2>&1 | FileCheck %s
+
+.text
+ cmp r0, #(l1 - unknownLabel)
+@ CHECK: error: symbol 'unknownLabel' can not be undefined in a subtraction expression
+
+l1:
diff --git a/test/MC/ARM/cmp-immediate-fixup.s b/test/MC/ARM/cmp-immediate-fixup.s
new file mode 100644
index 000000000000..e21d5c20ccda
--- /dev/null
+++ b/test/MC/ARM/cmp-immediate-fixup.s
@@ -0,0 +1,9 @@
+@ PR18931
+@ RUN: llvm-mc < %s -triple=arm-linux-gnueabi -filetype=obj -o - \
+@ RUN: | llvm-objdump --disassemble -arch=arm - | FileCheck %s
+
+ .text
+@ CHECK: cmp r2, #1
+ cmp r2, #(l2 - l1 + 4) >> 2
+l1:
+l2:
diff --git a/test/MC/ARM/cmp-immediate-fixup2.s b/test/MC/ARM/cmp-immediate-fixup2.s
new file mode 100644
index 000000000000..c091145523c0
--- /dev/null
+++ b/test/MC/ARM/cmp-immediate-fixup2.s
@@ -0,0 +1,9 @@
+@ PR18931
+@ RUN: llvm-mc < %s -triple=arm-linux-gnueabi -filetype=obj -o - \
+@ RUN: | llvm-objdump --disassemble -arch=arm - | FileCheck %s
+
+ .text
+@ CHECK: cmp r2, #0
+ cmp r2, #(l2 - l1)
+l1:
+l2:
diff --git a/test/MC/ARM/coff-debugging-secrel.ll b/test/MC/ARM/coff-debugging-secrel.ll
new file mode 100644
index 000000000000..f37b19e6a70c
--- /dev/null
+++ b/test/MC/ARM/coff-debugging-secrel.ll
@@ -0,0 +1,49 @@
+; RUN: llc -mtriple thumbv7--windows-itanium -filetype obj -o - %s \
+; RUN: | llvm-readobj -r - | FileCheck %s -check-prefix CHECK-ITANIUM
+
+; RUN: llc -mtriple thumbv7--windows-msvc -filetype obj -o - %s \
+; RUN: | llvm-readobj -r - | FileCheck %s -check-prefix CHECK-MSVC
+
+; ModuleID = '/Users/compnerd/work/llvm/test/MC/ARM/reduced.c'
+target datalayout = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv7--windows-itanium"
+
+define arm_aapcs_vfpcc void @function() {
+entry:
+ ret void, !dbg !0
+}
+
+!llvm.dbg.cu = !{!7}
+!llvm.module.flags = !{!9, !10}
+
+!0 = metadata !{i32 1, i32 0, metadata !1, null}
+!1 = metadata !{i32 786478, metadata !2, metadata !3, metadata !"function", metadata !"function", metadata !"", i32 1, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, void ()* @function, null, null, metadata !6, i32 1} ; [ DW_TAG_subprogram ], [line 1], [def], [function]
+!2 = metadata !{metadata !"/Users/compnerd/work/llvm/test/MC/ARM/reduced.c", metadata !"/Users/compnerd/work/llvm"}
+!3 = metadata !{i32 786473, metadata !2} ; [ DW_TAG_file_type] [/Users/compnerd/work/llvm/test/MC/ARM/reduced.c]
+!4 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ], [line 0, size 0, align 0, offset 0] [from ]
+!5 = metadata !{null}
+!6 = metadata !{}
+!7 = metadata !{i32 786449, metadata !2, i32 12, metadata !"clang version 3.5.0", i1 false, metadata !"", i32 0, metadata !6, metadata !6, metadata !8, metadata !6, metadata !6, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/Users/compnerd/work/llvm/test/MC/ARM/reduced.c] [DW_LANG_C99]
+!8 = metadata !{metadata !1}
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+
+; CHECK-ITANIUM: Relocations [
+; CHECK-ITANIUM: Section {{.*}} .debug_info {
+; CHECK-ITANIUM: 0x6 IMAGE_REL_ARM_SECREL .debug_abbrev
+; CHECK-ITANIUM: 0xC IMAGE_REL_ARM_SECREL .debug_str
+; CHECK-ITANIUM: 0x12 IMAGE_REL_ARM_SECREL .debug_str
+; CHECK-ITANIUM: 0x16 IMAGE_REL_ARM_SECREL .debug_line
+; CHECK-ITANIUM: }
+; CHECK-ITANIUM: Section {{.*}}.debug_pubnames {
+; CHECK-ITANIUM: 0x6 IMAGE_REL_ARM_SECREL .debug_info
+; CHECK-ITANIUM: }
+; CHECK-ITANIUM: ]
+
+; CHECK-MSVC: Relocations [
+; CHECK-MSVC: Section {{.*}} .debug$S {
+; CHECK-MSVC: 0xC IMAGE_REL_ARM_SECREL function
+; CHECK-MSVC: 0x10 IMAGE_REL_ARM_SECTION function
+; CHECK-MSVC: }
+; CHECK-MSVC: ]
+
diff --git a/test/MC/ARM/coff-file.s b/test/MC/ARM/coff-file.s
new file mode 100644
index 000000000000..f0dd29a29256
--- /dev/null
+++ b/test/MC/ARM/coff-file.s
@@ -0,0 +1,47 @@
+// RUN: llvm-mc -triple thumbv7-windows -filetype obj %s -o - | llvm-objdump -t - \
+// RUN: | FileCheck %s
+
+// RUN: llvm-mc -triple thumbv7-windows -filetype obj %s -o - \
+// RUN: | llvm-readobj -symbols | FileCheck %s -check-prefix CHECK-SCN
+
+ .file "null-padded.asm"
+// CHECK: (nx 1) {{0x[0-9]+}} .file
+// CHECK-NEXT: AUX null-padded.asm{{$}}
+
+ .file "eighteen-chars.asm"
+
+// CHECK: (nx 1) {{0x[0-9]+}} .file
+// CHECK-NEXT: AUX eighteen-chars.asm{{$}}
+
+ .file "multiple-auxiliary-entries.asm"
+
+// CHECK: (nx 2) {{0x[0-9]+}} .file
+// CHECK-NEXT: AUX multiple-auxiliary-entries.asm{{$}}
+
+// CHECK-SCN: Symbols [
+// CHECK-SCN: Symbol {
+// CHECK-SCN: Name: .file
+// CHECK-SCN: Section: (65534)
+// CHECK-SCN: StorageClass: File
+// CHECK-SCN: AuxFileRecord {
+// CHECK-SCN: FileName: null-padded.asm
+// CHECK-SCN: }
+// CHECK-SCN: }
+// CHECK-SCN: Symbol {
+// CHECK-SCN: Name: .file
+// CHECK-SCN: Section: (65534)
+// CHECK-SCN: StorageClass: File
+// CHECK-SCN: AuxFileRecord {
+// CHECK-SCN: FileName: eighteen-chars.asm
+// CHECK-SCN: }
+// CHECK-SCN: }
+// CHECK-SCN: Symbol {
+// CHECK-SCN: Name: .file
+// CHECK-SCN: Section: (65534)
+// CHECK-SCN: StorageClass: File
+// CHECK-SCN: AuxFileRecord {
+// CHECK-SCN: FileName: multiple-auxiliary-entries.asm
+// CHECK-SCN: }
+// CHECK-SCN: }
+// CHECK-SCN: ]
+
diff --git a/test/MC/ARM/coff-function-type-info.ll b/test/MC/ARM/coff-function-type-info.ll
new file mode 100644
index 000000000000..a9f7c186b630
--- /dev/null
+++ b/test/MC/ARM/coff-function-type-info.ll
@@ -0,0 +1,45 @@
+; RUN: llc -mtriple thumbv7-windows-itanium -filetype asm -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-ASM
+
+; RUN: llc -mtriple thumbv7-windows-itanium -filetype obj -o - %s \
+; RUN: | llvm-readobj -t | FileCheck %s -check-prefix CHECK-OBJECT
+
+define arm_aapcs_vfpcc void @external() {
+entry:
+ ret void
+}
+
+; CHECK-ASM: .def external
+; CHECK-ASM: .scl 2
+; CHECK-ASM: .type 32
+; CHECK-ASM: .endef
+; CHECK-ASM: .globl external
+
+define internal arm_aapcs_vfpcc void @internal() {
+entry:
+ ret void
+}
+
+; CHECK-ASM: .def internal
+; CHECK-ASM: .scl 3
+; CHECK-ASM: .type 32
+; CHECK-ASM: .endef
+; CHECK-ASM-NOT: .globl internal
+
+; CHECK-OBJECT: Symbol {
+; CHECK-OBJECT: Name: external
+; CHECK-OBJECT: Section: .text
+; CHECK-OBJECT: BaseType: Null
+; CHECK-OBJECT: ComplexType: Function
+; CHECK-OBJECT: StorageClass: External
+; CHECK-OBJECT: AuxSymbolCount: 0
+; CHECK-OBJECT: }
+; CHECK-OBJECT: Symbol {
+; CHECK-OBJECT: Name: internal
+; CHECK-OBJECT: Section: .text
+; CHECK-OBJECT: BaseType: Null
+; CHECK-OBJECT: ComplexType: Function
+; CHECK-OBJECT: StorageClass: Static
+; CHECK-OBJECT: AuxSymbolCount: 0
+; CHECK-OBJECT: }
+
diff --git a/test/MC/ARM/coff-relocations.s b/test/MC/ARM/coff-relocations.s
new file mode 100644
index 000000000000..6ebae709f6cf
--- /dev/null
+++ b/test/MC/ARM/coff-relocations.s
@@ -0,0 +1,101 @@
+@ RUN: llvm-mc -triple thumbv7-windows-itanium -filetype obj -o - %s \
+@ RUN: | llvm-readobj -r - | FileCheck %s -check-prefix CHECK-RELOCATION
+
+@ RUN: llvm-mc -triple thumbv7-windows-itanium -filetype obj -o - %s \
+@ RUN: | llvm-objdump -d - | FileCheck %s -check-prefix CHECK-ENCODING
+
+ .syntax unified
+ .text
+ .thumb
+
+ .global target
+
+ .thumb_func
+branch24t:
+ b target
+
+@ CHECK-ENCODING-LABEL: branch24t
+@ CHECK-ENCODING-NEXT: b.w #0
+
+ .thumb_func
+branch20t:
+ bcc target
+
+@ CHECK-ENCODING-LABEL: branch20t
+@ CHECK-ENCODING-NEXT: blo.w #0
+
+ .thumb_func
+blx23t:
+ bl target
+
+@ CHECK-ENCODING-LABEL: blx23t
+@ CHECK-ENCODING-NEXT: bl #0
+
+ .thumb_func
+mov32t:
+ movw r0, :lower16:target
+ movt r0, :upper16:target
+ blx r0
+
+@ CHECK-ENCODING-LABEL: mov32t
+@ CHECK-ENCODING-NEXT: movw r0, #0
+@ CHECK-ENCODING-NEXT: movt r0, #0
+@ CHECK-ENCODING-NEXT: blx r0
+
+ .thumb_func
+addr32:
+ ldr r0, .Laddr32
+ bx r0
+ trap
+.Laddr32:
+ .long target
+
+@ CHECK-ENCODING-LABEL: addr32
+@ CHECK-ENCODING-NEXT: ldr r0, [pc, #4]
+@ CHECK-ENCODING-NEXT: bx r0
+@ CHECK-ENCODING-NEXT: trap
+@ CHECK-ENCODING-NEXT: movs r0, r0
+@ CHECK-ENCODING-NEXT: movs r0, r0
+
+ .thumb_func
+addr32nb:
+ ldr r0, .Laddr32nb
+ bx r0
+ trap
+.Laddr32nb:
+ .long target(imgrel)
+
+@ CHECK-ENCODING-LABEL: addr32nb
+@ CHECK-ENCODING-NEXT: ldr.w r0, [pc, #4]
+@ CHECK-ENCODING-NEXT: bx r0
+@ CHECK-ENCODING-NEXT: trap
+@ CHECK-ENCODING-NEXT: movs r0, r0
+@ CHECK-ENCODING-NEXT: movs r0, r0
+
+ .thumb_func
+secrel:
+ ldr r0, .Lsecrel
+ bx r0
+ trap
+.Lsecrel:
+ .long target(secrel32)
+
+@ CHECK-ENCODING-LABEL: secrel
+@ CHECK-ENCODING-NEXT: ldr.w r0, [pc, #4]
+@ CHECK-ENCODING-NEXT: bx r0
+@ CHECK-ENCODING-NEXT: trap
+@ CHECK-ENCODING-NEXT: movs r0, r0
+@ CHECK-ENCODING-NEXT: movs r0, r0
+
+@ CHECK-RELOCATION: Relocations [
+@ CHECK-RELOCATION: Section (1) .text {
+@ CHCEK-RELOCATION: 0x0 IMAGE_REL_ARM_BRANCH24T
+@ CHECK-RELOCATION: 0x4 IMAGE_REL_ARM_BRANCH20T
+@ CHECK-RELOCATION: 0x8 IMAGE_REL_ARM_BLX23T
+@ CHECK-RELOCATION: 0xC IMAGE_REL_ARM_MOV32T
+@ CHECK-RELOCATION: 0x1C IMAGE_REL_ARM_ADDR32
+@ CHECK-RELOCATION: 0x28 IMAGE_REL_ARM_ADDR32NB
+@ CHECK-RELOCATION: 0x34 IMAGE_REL_ARM_SECREL
+@ CHECK-RELOCATION: }
+@ CHECK-RELOCATION: ]
+
diff --git a/test/MC/ARM/comment.s b/test/MC/ARM/comment.s
new file mode 100644
index 000000000000..c24bc1aaa406
--- /dev/null
+++ b/test/MC/ARM/comment.s
@@ -0,0 +1,47 @@
+@ Tests to check that '@' does not get lexed as an identifier for arm
+@ RUN: llvm-mc %s -triple=armv7-linux-gnueabi | FileCheck %s
+@ RUN: llvm-mc %s -triple=armv7-linux-gnueabi 2>&1 | FileCheck %s --check-prefix=ERROR
+
+foo:
+ bl boo@plt should be ignored
+ bl goo@plt
+ .long bar@got to parse this as a comment
+ .long baz@got
+ add r0, r0@ignore this extra junk
+
+@ the symver directive should allow @ in the second symbol name
+defined1:
+defined2:
+defined3:
+bar:
+ .symver defined1, bar1@zed
+ .symver defined2, bar3@@zed
+ .symver defined3, bar5@@@zed
+
+far:
+ .long baz@got
+
+@CHECK-LABEL: foo:
+@CHECK: bl boo
+@CHECK-NOT: @
+@CHECK: bl goo
+@CHECK-NOT: @
+@CHECK: .long bar
+@CHECK-NOT: @
+@CHECK: .long baz
+@CHECK-NOT: @
+@CHECK: add r0, r0
+@CHECK-NOT: @
+
+@CHECK-LABEL: bar:
+@CHECK: bar1@zed = defined1
+@CHECK: bar3@@zed = defined2
+@CHECK: bar5@@@zed = defined3
+
+@ Make sure we did not mess up the parser state and it still lexes
+@ comments correctly by excluding the @ in normal symbols
+@CHECK-LABEL: far:
+@CHECK: .long baz
+@CHECK-NOT: @
+
+@ERROR-NOT: error:
diff --git a/test/MC/ARM/complex-operands.s b/test/MC/ARM/complex-operands.s
new file mode 100644
index 000000000000..72f8f88d2aa4
--- /dev/null
+++ b/test/MC/ARM/complex-operands.s
@@ -0,0 +1,40 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype asm -o - %s | FileCheck %s
+
+ .syntax unified
+
+ .data
+
+ .type .L_table_begin,%object
+.L_table_begin:
+ .rep 2
+ .long 0xd15ab1ed
+ .long 0x0ff1c1a1
+ .endr
+.L_table_end:
+
+ .text
+
+ .type return,%function
+return:
+ bx lr
+
+ .global arm_function
+ .type arm_function,%function
+arm_function:
+ mov r0, #:lower16:((.L_table_end - .L_table_begin) >> 2)
+ blx return
+
+@ CHECK-LABEL: arm_function
+@ CHECK: movw r0, :lower16:((.L_table_end-.L_table_begin)>>2)
+@ CHECK: blx return
+
+ .global thumb_function
+ .type thumb_function,%function
+thumb_function:
+ mov r0, #:lower16:((.L_table_end - .L_table_begin) >> 2)
+ blx return
+
+@ CHECK-LABEL: thumb_function
+@ CHECK: movw r0, :lower16:((.L_table_end-.L_table_begin)>>2)
+@ CHECK: blx return
+
diff --git a/test/MC/ARM/data-in-code.ll b/test/MC/ARM/data-in-code.ll
index 9fccf2e9f880..3bb017d7513f 100644
--- a/test/MC/ARM/data-in-code.ll
+++ b/test/MC/ARM/data-in-code.ll
@@ -144,6 +144,16 @@ exit:
;; ARM-NEXT: Other:
;; ARM-NEXT: Section: [[MIXED_SECT]]
+;; ARM: Symbol {
+;; ARM: Name: $d
+;; ARM-NEXT: Value: 0x0
+;; ARM-NEXT: Size: 0
+;; ARM-NEXT: Binding: Local (0x0)
+;; ARM-NEXT: Type: None (0x0)
+;; ARM-NEXT: Other: 0
+;; ARM-NEXT: Section: .ARM.exidx
+;; ARM-NEXT: }
+
;; ARM-NOT: ${{[atd]}}
;; TMB: Symbol {
diff --git a/test/MC/ARM/diagnostics.s b/test/MC/ARM/diagnostics.s
index 3c26f6d645c8..88c5fb5b688a 100644
--- a/test/MC/ARM/diagnostics.s
+++ b/test/MC/ARM/diagnostics.s
@@ -351,6 +351,24 @@
@ CHECK-ERRORS: ubfxgt r4, r5, #16, #17
@ CHECK-ERRORS: ^
+ @ Using pc for SBFX/UBFX
+ sbfx pc, r2, #1, #3
+ sbfx sp, pc, #4, #5
+ ubfx pc, r0, #0, #31
+ ubfx r14, pc, #1, #2
+@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: sbfx pc, r2, #1, #3
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: sbfx sp, pc, #4, #5
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: ubfx pc, r0, #0, #31
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: ubfx r14, pc, #1, #2
+@ CHECK-ERRORS: ^
+
@ Out of order Rt/Rt2 operands for ldrd
ldrd r4, r3, [r8]
ldrd r4, r3, [r8, #8]!
@@ -465,3 +483,11 @@
ldm sp!, {r0}^
@ CHECK-ERRORS: error: system STM cannot have writeback register
@ CHECK-ERRORS: error: writeback register only allowed on system LDM if PC in register-list
+
+foo2:
+ mov r0, foo2
+ movw r0, foo2
+@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
+@ CHECK-ERRORS: ^
diff --git a/test/MC/ARM/directive-align.s b/test/MC/ARM/directive-align.s
new file mode 100644
index 000000000000..d3e39cb1c563
--- /dev/null
+++ b/test/MC/ARM/directive-align.s
@@ -0,0 +1,28 @@
+@ RUN: llvm-mc -triple armv7-eabi %s | FileCheck %s
+
+ .data
+
+unaligned:
+ .byte 1
+ .align
+
+@ CHECK-LABEL: unaligned
+@ CHECK-NEXT: .byte 1
+@ CHECK-NEXT: .align 2
+
+aligned:
+ .long 0x1d10c1e5
+ .align
+
+@ CHECK-LABEL: aligned
+@ CHECK-NEXT: .long 487637477
+@ CHECK-NEXT: .align 2
+
+trailer:
+ .long 0xd1ab011c
+ .align 2
+
+@ CHECK-LABEL: trailer
+@ CHECK-NEXT: .long 3517645084
+@ CHECK-NEXT: .align 2
+
diff --git a/test/MC/ARM/directive-arch-armv2.s b/test/MC/ARM/directive-arch-armv2.s
new file mode 100644
index 000000000000..40857ca9fad8
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv2.s
@@ -0,0 +1,30 @@
+@ Test the .arch directive for armv2
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv2 architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv2
+
+@ CHECK-ASM: .arch armv2
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv2a.s b/test/MC/ARM/directive-arch-armv2a.s
new file mode 100644
index 000000000000..62c2ace796f9
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv2a.s
@@ -0,0 +1,30 @@
+@ Test the .arch directive for armv2a
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv2a architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv2a
+
+@ CHECK-ASM: .arch armv2a
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 2A
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv3.s b/test/MC/ARM/directive-arch-armv3.s
new file mode 100644
index 000000000000..41cce659246b
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv3.s
@@ -0,0 +1,30 @@
+@ Test the .arch directive for armv3
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv3 architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv3
+
+@ CHECK-ASM: .arch armv3
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 3
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv3m.s b/test/MC/ARM/directive-arch-armv3m.s
new file mode 100644
index 000000000000..8041da2e1e52
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv3m.s
@@ -0,0 +1,30 @@
+@ Test the .arch directive for armv3m
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv3m architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv3m
+
+@ CHECK-ASM: .arch armv3m
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 3M
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv4.s b/test/MC/ARM/directive-arch-armv4.s
new file mode 100644
index 000000000000..fb838428403f
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv4.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for armv4
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv4 architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv4
+
+@ CHECK-ASM: .arch armv4
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 4
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v4
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
+@ Check that multiplication is supported
+ mul r4, r5, r6
+ mla r4, r5, r6, r3
+ smull r4, r5, r6, r3
+ umull r4, r5, r6, r3
+ smlal r4, r5, r6, r3
+ umlal r4, r5, r6, r3
+
diff --git a/test/MC/ARM/directive-arch-armv4t.s b/test/MC/ARM/directive-arch-armv4t.s
new file mode 100644
index 000000000000..33a5ae385753
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv4t.s
@@ -0,0 +1,34 @@
+@ Test the .arch directive for armv4t
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv4t architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv4t
+
+@ CHECK-ASM: .arch armv4t
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 4T
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v4T
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv5.s b/test/MC/ARM/directive-arch-armv5.s
new file mode 100644
index 000000000000..73a8c95db796
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv5.s
@@ -0,0 +1,30 @@
+@ Test the .arch directive for armv5
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv5 architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv5
+
+@ CHECK-ASM: .arch armv5
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 5
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v5T
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv5t.s b/test/MC/ARM/directive-arch-armv5t.s
new file mode 100644
index 000000000000..66a75c4c89e2
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv5t.s
@@ -0,0 +1,34 @@
+@ Test the .arch directive for armv5t
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv5t architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv5t
+
+@ CHECK-ASM: .arch armv5t
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 5T
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v5T
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv5te.s b/test/MC/ARM/directive-arch-armv5te.s
new file mode 100644
index 000000000000..f3932d84a56f
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv5te.s
@@ -0,0 +1,34 @@
+@ Test the .arch directive for armv5te
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv5te architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv5te
+
+@ CHECK-ASM: .arch armv5te
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 5TE
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v5TE
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv6-m.s b/test/MC/ARM/directive-arch-armv6-m.s
new file mode 100644
index 000000000000..d89a627740bd
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv6-m.s
@@ -0,0 +1,30 @@
+@ Test the .arch directive for armv6-m
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv6-m architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv6-m
+
+@ CHECK-ASM: .arch armv6-m
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 6-M
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v6-M
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv6.s b/test/MC/ARM/directive-arch-armv6.s
new file mode 100644
index 000000000000..fb489207355e
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv6.s
@@ -0,0 +1,34 @@
+@ Test the .arch directive for armv6
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv6 architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv6
+
+@ CHECK-ASM: .arch armv6
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 6
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v6
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv6j.s b/test/MC/ARM/directive-arch-armv6j.s
new file mode 100644
index 000000000000..e27beef1ebaf
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv6j.s
@@ -0,0 +1,34 @@
+@ Test the .arch directive for armv6j
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv6j architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv6j
+
+@ CHECK-ASM: .arch armv6j
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 6J
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v6
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv6t2.s b/test/MC/ARM/directive-arch-armv6t2.s
new file mode 100644
index 000000000000..85f4491b0c6f
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv6t2.s
@@ -0,0 +1,34 @@
+@ Test the .arch directive for armv6t2
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv6t2 architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv6t2
+
+@ CHECK-ASM: .arch armv6t2
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 6T2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v6T2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv6z.s b/test/MC/ARM/directive-arch-armv6z.s
new file mode 100644
index 000000000000..78a9ab1d5de7
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv6z.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for armv6z
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv6z architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv6z
+
+@ CHECK-ASM: .arch armv6z
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 6Z
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v6KZ
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: Virtualization_use
+@ CHECK-ATTR: Description: TrustZone
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv6zk.s b/test/MC/ARM/directive-arch-armv6zk.s
new file mode 100644
index 000000000000..48d9cc1a2bf1
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv6zk.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for armv6zk
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv6zk architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv6zk
+
+@ CHECK-ASM: .arch armv6zk
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 6ZK
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v6KZ
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: Virtualization_use
+@ CHECK-ATTR: Description: TrustZone
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv7-a.s b/test/MC/ARM/directive-arch-armv7-a.s
new file mode 100644
index 000000000000..792429a4e047
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv7-a.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for armv7-a
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv7-a architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv7-a
+
+@ CHECK-ASM: .arch armv7-a
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 7-A
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v7
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch_profile
+@ CHECK-ATTR: Description: Application
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv7-m.s b/test/MC/ARM/directive-arch-armv7-m.s
new file mode 100644
index 000000000000..058f23ba2bea
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv7-m.s
@@ -0,0 +1,34 @@
+@ Test the .arch directive for armv7-m
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv7-m architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv7-m
+
+@ CHECK-ASM: .arch armv7-m
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 7-M
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v7
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch_profile
+@ CHECK-ATTR: Description: Microcontroller
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv7-r.s b/test/MC/ARM/directive-arch-armv7-r.s
new file mode 100644
index 000000000000..99481f70c5f2
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv7-r.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for armv7-r
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv7-r architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv7-r
+
+@ CHECK-ASM: .arch armv7-r
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 7-R
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v7
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch_profile
+@ CHECK-ATTR: Description: Real-time
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv7.s b/test/MC/ARM/directive-arch-armv7.s
new file mode 100644
index 000000000000..0cd499666ea6
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv7.s
@@ -0,0 +1,30 @@
+@ Test the .arch directive for armv7
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv7 architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv7
+
+@ CHECK-ASM: .arch armv7
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 7
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v7
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv7a.s b/test/MC/ARM/directive-arch-armv7a.s
new file mode 100644
index 000000000000..3bb202fb278b
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv7a.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for armv7-a
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv7-a architecture when using the armv7a alias.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv7a
+
+@ CHECK-ASM: .arch armv7-a
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 7-A
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v7
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch_profile
+@ CHECK-ATTR: Description: Application
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv7m.s b/test/MC/ARM/directive-arch-armv7m.s
new file mode 100644
index 000000000000..0e9f546387bf
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv7m.s
@@ -0,0 +1,34 @@
+@ Test the .arch directive for armv7-m
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv7-m architecture when using the armv7m alias.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv7m
+
+@ CHECK-ASM: .arch armv7-m
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 7-M
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v7
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch_profile
+@ CHECK-ATTR: Description: Microcontroller
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv7r.s b/test/MC/ARM/directive-arch-armv7r.s
new file mode 100644
index 000000000000..9009d1341281
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv7r.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for armv7-r
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv7-r architecture when using the armv7r alias.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv7r
+
+@ CHECK-ASM: .arch armv7-r
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 7-R
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v7
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch_profile
+@ CHECK-ATTR: Description: Real-time
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv8-a.s b/test/MC/ARM/directive-arch-armv8-a.s
new file mode 100644
index 000000000000..636378155e70
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv8-a.s
@@ -0,0 +1,46 @@
+@ Test the .arch directive for armv8-a
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv8-a architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv8-a
+
+@ CHECK-ASM: .arch armv8-a
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 8-A
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v8
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch_profile
+@ CHECK-ATTR: Description: Application
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: MPextension_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: Virtualization_use
+@ CHECK-ATTR: Description: TrustZone + Virtualization Extensions
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-armv8a.s b/test/MC/ARM/directive-arch-armv8a.s
new file mode 100644
index 000000000000..4a1915c53436
--- /dev/null
+++ b/test/MC/ARM/directive-arch-armv8a.s
@@ -0,0 +1,46 @@
+@ Test the .arch directive for armv8-a
+
+@ This test case will check the default .ARM.attributes value for the
+@ armv8-a architecture when using the armv8a alias.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch armv8a
+
+@ CHECK-ASM: .arch armv8-a
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: 8-A
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v8
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch_profile
+@ CHECK-ATTR: Description: Application
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: MPextension_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: Virtualization_use
+@ CHECK-ATTR: Description: TrustZone + Virtualization Extensions
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-iwmmxt.s b/test/MC/ARM/directive-arch-iwmmxt.s
new file mode 100644
index 000000000000..db25ec683fe5
--- /dev/null
+++ b/test/MC/ARM/directive-arch-iwmmxt.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for iwmmxt
+
+@ This test case will check the default .ARM.attributes value for the
+@ iwmmxt architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch iwmmxt
+
+@ CHECK-ASM: .arch iwmmxt
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: IWMMXT
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v5TE
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: WMMX_arch
+@ CHECK-ATTR: Description: WMMXv1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch-iwmmxt2.s b/test/MC/ARM/directive-arch-iwmmxt2.s
new file mode 100644
index 000000000000..de94f97b449e
--- /dev/null
+++ b/test/MC/ARM/directive-arch-iwmmxt2.s
@@ -0,0 +1,38 @@
+@ Test the .arch directive for iwmmxt2
+
+@ This test case will check the default .ARM.attributes value for the
+@ iwmmxt2 architecture.
+
+@ RUN: llvm-mc -triple arm-eabi -filetype asm %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ASM
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .arch iwmmxt2
+
+@ CHECK-ASM: .arch iwmmxt2
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_name
+@ CHECK-ATTR: Value: IWMMXT2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: CPU_arch
+@ CHECK-ATTR: Description: ARM v5TE
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: ARM_ISA_use
+@ CHECK-ATTR: Description: Permitted
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: THUMB_ISA_use
+@ CHECK-ATTR: Description: Thumb-1
+@ CHECK-ATTR: }
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: WMMX_arch
+@ CHECK-ATTR: Description: WMMXv2
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-arch_extension-crc.s b/test/MC/ARM/directive-arch_extension-crc.s
new file mode 100644
index 000000000000..9e4dedabca48
--- /dev/null
+++ b/test/MC/ARM/directive-arch_extension-crc.s
@@ -0,0 +1,57 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple armv8-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V8
+
+ .syntax unified
+
+ .arm
+
+ .arch_extension crc
+@ CHECK-V7: error: architectural extension 'crc' is not allowed for the current base architecture
+@ CHECK-V7-NEXT: .arch_extension crc
+@ CHECK-V7-NEXT: ^
+
+ .type crc,%function
+crc:
+ crc32b r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+ crc32h r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+ crc32w r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+
+ crc32cb r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+ crc32ch r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+ crc32cw r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+
+ .arch_extension nocrc
+@ CHECK-V7: error: architectural extension 'crc' is not allowed for the current base architecture
+@ CHECK-V7-NEXT: .arch_extension nocrc
+@ CHECK-V7-NEXT: ^
+
+ .type nocrc,%function
+nocrc:
+ crc32b r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+@ CHECK-V8: error: instruction requires: crc arm-mode
+ crc32h r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+@ CHECK-V8: error: instruction requires: crc arm-mode
+ crc32w r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+@ CHECK-V8: error: instruction requires: crc arm-mode
+
+ crc32cb r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+@ CHECK-V8: error: instruction requires: crc arm-mode
+ crc32ch r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+@ CHECK-V8: error: instruction requires: crc arm-mode
+ crc32cw r0, r1, r2
+@ CHECK-V7: error: instruction requires: crc armv8
+@ CHECK-V8: error: instruction requires: crc arm-mode
+
diff --git a/test/MC/ARM/directive-arch_extension-crypto.s b/test/MC/ARM/directive-arch_extension-crypto.s
new file mode 100644
index 000000000000..898ba06cf017
--- /dev/null
+++ b/test/MC/ARM/directive-arch_extension-crypto.s
@@ -0,0 +1,108 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple armv8-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V8
+@ RUN: not llvm-mc -triple thumbv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple thumbv8-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V8
+
+ .syntax unified
+
+ .arch_extension crypto
+@ CHECK-V7: error: architectural extension 'crypto' is not allowed for the current base architecture
+@ CHECK-V7-NEXT: .arch_extension crypto
+@ CHECK-V7-NEXT: ^
+
+ .type crypto,%function
+crypto:
+ vmull.p64 q0, d0, d1
+@ CHECK-V7: error: instruction requires: crypto armv8
+
+ aesd.8 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+ aese.8 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+ aesimc.8 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+ aesmc.8 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+
+ sha1h.32 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+ sha1su1.32 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+ sha256su0.32 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+
+ sha1c.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+ sha1m.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+ sha1p.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+ sha1su0.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+ sha256h.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+ sha256h2.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+ sha256su1.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+
+ .arch_extension nocrypto
+@ CHECK-V7: error: architectural extension 'crypto' is not allowed for the current base architecture
+@ CHECK-V7-NEXT: .arch_extension nocrypto
+@ CHECK-V7-NEXT: ^
+
+ .type nocrypto,%function
+nocrypto:
+ vmull.p64 q0, d0, d1
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+
+ aesd.8 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ aese.8 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ aesimc.8 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ aesmc.8 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+
+ sha1h.32 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ sha1su1.32 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ sha256su0.32 q0, q1
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+
+ sha1c.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ sha1m.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ sha1p.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ sha1su0.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ sha256h.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ sha256h2.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+ sha256su1.32 q0, q1, q2
+@ CHECK-V7: error: instruction requires: crypto armv8
+@ CHECK-V8: error: instruction requires: crypto
+
diff --git a/test/MC/ARM/directive-arch_extension-fp.s b/test/MC/ARM/directive-arch_extension-fp.s
new file mode 100644
index 000000000000..0327dd74337e
--- /dev/null
+++ b/test/MC/ARM/directive-arch_extension-fp.s
@@ -0,0 +1,344 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple armv8-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V8
+@ RUN: not llvm-mc -triple thumbv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple thumbv8-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V8
+
+ .syntax unified
+
+ .arch_extension fp
+@ CHECK-V7: error: architectural extension 'fp' is not allowed for the current base architecture
+@ CHECK-V7-NEXT: .arch_extension fp
+@ CHECK-V7-NEXT: ^
+
+ .type fp,%function
+fp:
+ vmrs r0, mvfr2
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vselgt.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vselge.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vseleq.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vselvs.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vmaxnm.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vminnm.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vselgt.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vselge.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vseleq.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vselvs.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vmaxnm.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vminnm.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vcvtb.f64.f16 d0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtb.f16.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtt.f64.f16 d0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtt.f16.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vcvta.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvta.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvta.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvta.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtn.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtn.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtn.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtn.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtp.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtp.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtp.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtp.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtm.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtm.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtm.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtm.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vrintz.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintz.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintz.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintz.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintr.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintr.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintr.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintr.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintx.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintx.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintx.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintx.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vrinta.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrinta.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrinta.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrinta.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintn.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintn.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintn.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintn.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintp.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintp.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintp.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintp.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintm.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintm.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintm.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintm.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ .arch_extension nofp
+@ CHECK-V7: error: architectural extension 'fp' is not allowed for the current base architecture
+@ CHECK-V7-NEXT: .arch_extension nofp
+@ CHECK-V7-NEXT: ^
+
+ .type nofp,%function
+nofp:
+ vmrs r0, mvfr2
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+
+ vselgt.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vselge.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vseleq.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vselvs.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vmaxnm.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vminnm.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+
+ vselgt.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vselge.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vseleq.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vselvs.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vmaxnm.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vminnm.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
+ vcvtb.f64.f16 d0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtb.f16.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtt.f64.f16 d0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtt.f16.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
+ vcvta.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvta.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvta.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvta.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtn.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtn.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtn.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtn.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtp.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtp.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtp.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtp.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtm.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtm.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtm.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtm.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
+ vrintz.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintz.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintz.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintz.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintr.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintr.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintr.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintr.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintx.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintx.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintx.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintx.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
+ vrinta.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrinta.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrinta.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrinta.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintn.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintn.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintn.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintn.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintp.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintp.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintp.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintp.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintm.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintm.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintm.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintm.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
diff --git a/test/MC/ARM/directive-arch_extension-idiv.s b/test/MC/ARM/directive-arch_extension-idiv.s
new file mode 100644
index 000000000000..c63bbfbb4cab
--- /dev/null
+++ b/test/MC/ARM/directive-arch_extension-idiv.s
@@ -0,0 +1,53 @@
+@ RUN: not llvm-mc -triple armv6-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARMv6 -check-prefix CHECK-V6
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARMv7 -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple armv7m-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARMv7M -check-prefix CHECK-V7M
+@ RUN: not llvm-mc -triple thumbv6-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMBv6 -check-prefix CHECK-V6
+@ RUN: not llvm-mc -triple thumbv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMBv7 -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple thumbv7m-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMBv7M -check-prefix CHECK-V7M
+
+ .syntax unified
+
+ .arch_extension idiv
+@ CHECK-V6: error: architectural extension 'idiv' is not allowed for the current base architecture
+@ CHECK-V6-NEXT: .arch_extension idiv
+@ CHECK-V6-NEXT: ^
+@ CHECK-V7M: error: architectural extension 'idiv' is not allowed for the current base architecture
+@ CHECK-V7M-NEXT: .arch_extension idiv
+@ CHECK-V7M-NEXT: ^
+
+ .type idiv,%function
+idiv:
+ udiv r0, r1, r2
+@ CHECK-ARMv6: error: instruction requires: divide in ARM
+@ CHECK-THUMBv6: error: instruction requires: divide in ARM arm-mode
+ sdiv r0, r1, r2
+@ CHECK-ARMv6: error: instruction requires: divide in ARM
+@ CHECK-THUMBv6: error: instruction requires: divide in ARM arm-mode
+
+ .arch_extension noidiv
+@ CHECK-V6: error: architectural extension 'idiv' is not allowed for the current base architecture
+@ CHECK-V6-NEXT: .arch_extension noidiv
+@ CHECK-V6-NEXT: ^
+@ CHECK-V7M: error: architectural extension 'idiv' is not allowed for the current base architecture
+@ CHECK-V7M-NEXT: .arch_extension noidiv
+@ CHECK-V7M-NEXT: ^
+
+ .type noidiv,%function
+noidiv:
+ udiv r0, r1, r2
+@ CHECK-ARMv6: error: instruction requires: divide in ARM
+@ CHECK-THUMBv6: error: instruction requires: divide in ARM arm-mode
+@ CHECK-ARMv7: error: instruction requires: divide in ARM arm-mode
+@ CHECK-THUMBv7: error: instruction requires: divide in THUMB
+ sdiv r0, r1, r2
+@ CHECK-ARMv6: error: instruction requires: divide in ARM
+@ CHECK-THUMBv6: error: instruction requires: divide in ARM arm-mode
+@ CHECK-ARMv7: error: instruction requires: divide in ARM arm-mode
+@ CHECK-THUMBv7: error: instruction requires: divide in THUMB
+
diff --git a/test/MC/ARM/directive-arch_extension-mp.s b/test/MC/ARM/directive-arch_extension-mp.s
new file mode 100644
index 000000000000..9046c2153155
--- /dev/null
+++ b/test/MC/ARM/directive-arch_extension-mp.s
@@ -0,0 +1,38 @@
+@ RUN: not llvm-mc -triple armv6-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARMv6 -check-prefix CHECK-V6
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARMv7 -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple armv7m-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARMv7M -check-prefix CHECK-V7M
+@ RUN: not llvm-mc -triple thumbv6-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMBv6 -check-prefix CHECK-V6
+@ RUN: not llvm-mc -triple thumbv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMBv7 -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple thumbv7m-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMBv7M -check-prefix CHECK-V7M
+
+ .syntax unified
+
+ .arch_extension mp
+@ CHECK-V6: error: architectural extension 'mp' is not allowed for the current base architecture
+@ CHECK-V6-NEXT: .arch_extension mp
+@ CHECK-V6-NEXT: ^
+
+ .type mp,%function
+mp:
+ pldw [r0]
+@ CHECK-V6: error: instruction requires: mp-extensions armv7
+@ CHECK-V7M: error: instruction requires: mp-extensions
+
+ .arch_extension nomp
+@ CHECK-V6: error: architectural extension 'mp' is not allowed for the current base architecture
+@ CHECK-V6-NEXT: .arch_extension nomp
+@ CHECK-V6-NEXT: ^
+
+ .type nomp,%function
+nomp:
+ pldw [r0]
+@ CHECK-V6: error: instruction requires: mp-extensions armv7
+@ CHECK-V7: error: instruction requires: mp-extensions
+@ CHECK-V7M: error: instruction requires: mp-extensions
+
diff --git a/test/MC/ARM/directive-arch_extension-sec.s b/test/MC/ARM/directive-arch_extension-sec.s
new file mode 100644
index 000000000000..55ead8506ab1
--- /dev/null
+++ b/test/MC/ARM/directive-arch_extension-sec.s
@@ -0,0 +1,31 @@
+@ RUN: not llvm-mc -triple armv6-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARMv6 -check-prefix CHECK-V6
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-ARMv7 -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple thumbv6-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMBv6 -check-prefix CHECK-V6
+@ RUN: not llvm-mc -triple thumbv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-THUMBv7 -check-prefix CHECK-V7
+
+ .syntax unified
+
+ .arch_extension sec
+@ CHECK-V6: error: architectural extension 'sec' is not allowed for the current base architecture
+@ CHECK-V6-NEXT: .arch_extension sec
+@ CHECK-V6-NEXT: ^
+
+ .type sec,%function
+sec:
+ smc #0
+@ CHECK-V6: error: instruction requires: TrustZone
+
+ .arch_extension nosec
+@ CHECK-V6: error: architectural extension 'sec' is not allowed for the current base architecture
+@ CHECK-V6-NEXT: .arch_extension nosec
+@ CHECK-V6-NEXT: ^
+
+ .type nosec,%function
+nosec:
+ smc #0
+@ CHECK-V7: error: instruction requires: TrustZone
+
diff --git a/test/MC/ARM/directive-arch_extension-simd.s b/test/MC/ARM/directive-arch_extension-simd.s
new file mode 100644
index 000000000000..c9dbf21541c1
--- /dev/null
+++ b/test/MC/ARM/directive-arch_extension-simd.s
@@ -0,0 +1,275 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple armv8-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V8
+@ RUN: not llvm-mc -triple thumbv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V7
+@ RUN: not llvm-mc -triple thumbv8-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s -check-prefix CHECK-V8
+
+ .syntax unified
+
+ .arch_extension simd
+@ CHECK-V7: error: architectural extension 'simd' is not allowed for the current base architecture
+@ CHECK-V7-NEXT: .arch_extension simd
+@ CHECK-V7-NEXT: ^
+
+ .type simd,%function
+simd:
+ vmaxnm.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vminnm.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vmaxnm.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vminnm.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vcvta.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvta.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvta.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvta.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtn.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtn.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtn.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtn.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtp.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtp.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtp.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtp.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtm.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtm.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtm.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vcvtm.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vrintz.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintz.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintz.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintz.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintr.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintr.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintr.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintr.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintx.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintx.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintx.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintx.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ vrinta.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrinta.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrinta.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrinta.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintn.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintn.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintn.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintn.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintp.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintp.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintp.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintp.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintm.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintm.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintm.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+ vrintm.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+
+ .arch_extension nosimd
+@ CHECK-V7: error: architectural extension 'simd' is not allowed for the current base architecture
+@ CHECK-V7-NEXT: .arch_extension nosimd
+@ CHECK-V7-NEXT: ^
+
+ .type nosimd,%function
+nosimd:
+ vmaxnm.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vminnm.f32 s0, s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+
+ vmaxnm.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vminnm.f64 d0, d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
+ vcvta.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvta.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvta.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvta.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtn.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtn.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtn.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtn.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtp.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtp.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtp.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtp.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtm.s32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtm.u32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vcvtm.s32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vcvtm.u32.f64 s0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
+ vrintz.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintz.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintz.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintz.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintr.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintr.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintr.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintr.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintx.f32 s0, s1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintx.f64 d0, d1
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintx.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintx.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
+ vrinta.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrinta.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrinta.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrinta.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintn.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintn.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintn.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintn.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintp.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintp.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintp.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintp.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintm.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintm.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+ vrintm.f32.f32 s0, s0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: FPARMv8
+ vrintm.f64.f64 d0, d0
+@ CHECK-V7: error: instruction requires: FPARMv8
+@ CHECK-V8: error: instruction requires: double precision VFP FPARMv8
+
diff --git a/test/MC/ARM/directive-eabi_attribute-2.s b/test/MC/ARM/directive-eabi_attribute-2.s
new file mode 100644
index 000000000000..8f00ac807802
--- /dev/null
+++ b/test/MC/ARM/directive-eabi_attribute-2.s
@@ -0,0 +1,98 @@
+@ RUN: llvm-mc -triple armv7-elf -filetype asm -o - %s | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .eabi_attribute Tag_CPU_raw_name, "Cortex-A9"
+@ CHECK: .eabi_attribute 4, "Cortex-A9"
+ .eabi_attribute Tag_CPU_name, "cortex-a9"
+@ CHECK: .cpu cortex-a9
+ .eabi_attribute Tag_CPU_arch, 10
+@ CHECK: .eabi_attribute 6, 10
+ .eabi_attribute Tag_CPU_arch_profile, 'A'
+@ CHECK: .eabi_attribute 7, 65
+ .eabi_attribute Tag_ARM_ISA_use, 0
+@ CHECK: .eabi_attribute 8, 0
+ .eabi_attribute Tag_THUMB_ISA_use, 2
+@ CHECK: .eabi_attribute 9, 2
+ .eabi_attribute Tag_FP_arch, 3
+@ CHECK: .eabi_attribute 10, 3
+ .eabi_attribute Tag_WMMX_arch, 0
+@ CHECK: .eabi_attribute 11, 0
+ .eabi_attribute Tag_Advanced_SIMD_arch, 1
+@ CHECK: .eabi_attribute 12, 1
+ .eabi_attribute Tag_PCS_config, 2
+@ CHECK: .eabi_attribute 13, 2
+ .eabi_attribute Tag_ABI_PCS_R9_use, 0
+@ CHECK: .eabi_attribute 14, 0
+ .eabi_attribute Tag_ABI_PCS_RW_data, 0
+@ CHECK: .eabi_attribute 15, 0
+ .eabi_attribute Tag_ABI_PCS_RO_data, 0
+@ CHECK: .eabi_attribute 16, 0
+ .eabi_attribute Tag_ABI_PCS_GOT_use, 0
+@ CHECK: .eabi_attribute 17, 0
+ .eabi_attribute Tag_ABI_PCS_wchar_t, 4
+@ CHECK: .eabi_attribute 18, 4
+ .eabi_attribute Tag_ABI_FP_rounding, 1
+@ CHECK: .eabi_attribute 19, 1
+ .eabi_attribute Tag_ABI_FP_denormal, 2
+@ CHECK: .eabi_attribute 20, 2
+ .eabi_attribute Tag_ABI_FP_exceptions, 1
+@ CHECK: .eabi_attribute 21, 1
+ .eabi_attribute Tag_ABI_FP_user_exceptions, 1
+@ CHECK: .eabi_attribute 22, 1
+ .eabi_attribute Tag_ABI_FP_number_model, 3
+@ CHECK: .eabi_attribute 23, 3
+ .eabi_attribute Tag_ABI_align_needed, 1
+@ CHECK: .eabi_attribute 24, 1
+ .eabi_attribute Tag_ABI_align_preserved, 2
+@ CHECK: .eabi_attribute 25, 2
+ .eabi_attribute Tag_ABI_enum_size, 3
+@ CHECK: .eabi_attribute 26, 3
+ .eabi_attribute Tag_ABI_HardFP_use, 0
+@ CHECK: .eabi_attribute 27, 0
+ .eabi_attribute Tag_ABI_VFP_args, 1
+@ CHECK: .eabi_attribute 28, 1
+ .eabi_attribute Tag_ABI_WMMX_args, 0
+@ CHECK: .eabi_attribute 29, 0
+ .eabi_attribute Tag_ABI_FP_optimization_goals, 1
+@ CHECK: .eabi_attribute 31, 1
+ .eabi_attribute Tag_compatibility, 1
+@ CHECK: .eabi_attribute 32, 1
+ .eabi_attribute Tag_compatibility, 1, "aeabi"
+@ CHECK: .eabi_attribute 32, 1, "aeabi"
+ .eabi_attribute Tag_CPU_unaligned_access, 0
+@ CHECK: .eabi_attribute 34, 0
+ .eabi_attribute Tag_FP_HP_extension, 0
+@ CHECK: .eabi_attribute 36, 0
+ .eabi_attribute Tag_ABI_FP_16bit_format, 0
+@ CHECK: .eabi_attribute 38, 0
+ .eabi_attribute Tag_MPextension_use, 0
+@ CHECK: .eabi_attribute 42, 0
+ .eabi_attribute Tag_DIV_use, 0
+@ CHECK: .eabi_attribute 44, 0
+ .eabi_attribute Tag_nodefaults, 0
+@ CHECK: .eabi_attribute 64, 0
+ .eabi_attribute Tag_also_compatible_with, "gnu"
+@ CHECK: .eabi_attribute 65, "gnu"
+ .eabi_attribute Tag_T2EE_use, 0
+@ CHECK: .eabi_attribute 66, 0
+ .eabi_attribute Tag_conformance, "2.09"
+@ CHECK: .eabi_attribute 67, "2.09"
+ .eabi_attribute Tag_Virtualization_use, 0
+@ CHECK: .eabi_attribute 68, 0
+
+@ ===--- Compatibility Checks ---===
+
+ .eabi_attribute Tag_ABI_align8_needed, 1
+@ CHECK: .eabi_attribute 24, 1
+ .eabi_attribute Tag_ABI_align8_preserved, 2
+@ CHECK: .eabi_attribute 25, 2
+
+@ ===--- GNU AS Compatibility Checks ---===
+
+ .eabi_attribute 2 * 2 + 1, "cortex-a9"
+@ CHECK: .cpu cortex-a9
+ .eabi_attribute 2 * 2 + 2, 5 * 2
+@ CHECK: .eabi_attribute 6, 10
+
diff --git a/test/MC/ARM/directive-eabi_attribute-diagnostics.s b/test/MC/ARM/directive-eabi_attribute-diagnostics.s
new file mode 100644
index 000000000000..d1ae352b25f4
--- /dev/null
+++ b/test/MC/ARM/directive-eabi_attribute-diagnostics.s
@@ -0,0 +1,36 @@
+@ RUN: not llvm-mc -triple armv7-elf -filetype asm -o /dev/null %s 2>&1 \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .eabi_attribute Tag_unknown_name, 0
+@ CHECK: error: attribute name not recognised: Tag_unknown_name
+@ CHECK: .eabi_attribute Tag_unknown_name
+@ CHECK: ^
+
+ .eabi_attribute [non_constant_expression], 0
+@ CHECK: error: expected numeric constant
+@ CHECK: .eabi_attribute [non_constant_expression], 0
+@ CHECK: ^
+
+ .eabi_attribute 42, "forty two"
+@ CHECK: error: expected numeric constant
+@ CHECK: .eabi_attribute 42, "forty two"
+@ CHECK: ^
+
+ .eabi_attribute 43, 43
+@ CHECK: error: bad string constant
+@ CHECK: .eabi_attribute 43, 43
+@ CHECK: ^
+
+ .eabi_attribute 0
+@ CHECK: error: comma expected
+@ CHECK: .eabi_attribute 0
+@ CHECK: ^
+
+ .eabi_attribute Tag_MPextension_use_old, 0
+@ CHECK: error: attribute name not recognised: Tag_MPextension_use_old
+@ CHECK: .eabi_attribute Tag_MPextension_use_old, 0
+@ CHECK: ^
+
diff --git a/test/MC/ARM/directive-eabi_attribute-overwrite.s b/test/MC/ARM/directive-eabi_attribute-overwrite.s
new file mode 100644
index 000000000000..6fdded3d8312
--- /dev/null
+++ b/test/MC/ARM/directive-eabi_attribute-overwrite.s
@@ -0,0 +1,17 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype obj -o - %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s -check-prefix CHECK-ATTR
+
+ .syntax unified
+ .thumb
+
+ .eabi_attribute Tag_compatibility, 1
+ .eabi_attribute Tag_compatibility, 1, "aeabi"
+
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: Value: 1, AEABI
+@ CHECK-ATTR: TagName: compatibility
+@ CHECK-ATTR: Description: AEABI Conformant
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-even.s b/test/MC/ARM/directive-even.s
new file mode 100644
index 000000000000..b92c9f95f5d4
--- /dev/null
+++ b/test/MC/ARM/directive-even.s
@@ -0,0 +1,70 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype obj -o - %s | llvm-readobj -s -sd \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+
+ .text
+
+ .even
+
+ .global aligned_function
+ .type aligned_function,%function
+aligned_function:
+ bkpt
+
+ .space 5
+
+ .even
+
+ .global unaligned_function
+ .type unaligned_function,%function
+unaligned_function:
+ bkpt
+
+@ CHECK: Section {
+@ CHECK: Name: .text
+@ CHECK: SectionData (
+@ CHECK: 0000: 700020E1 00000000 00007000 20E1
+@ CHECK: )
+@ CHECK: }
+
+ .data
+
+ .space 15
+
+ .even
+
+ .global classifiable
+ .type classifiable,%object
+classifiable:
+ .byte 0xf1
+ .byte 0x51
+ .byte 0xa5
+ .byte 0xc1
+ .byte 0x00
+ .byte 0x00
+ .byte 0x1e
+ .byte 0xab
+
+ .even
+
+ .global declassified
+ .type declassified,%object
+declassified:
+ .byte 0x51
+ .byte 0xa5
+ .byte 0xc1
+ .byte 0xde
+ .byte 0x00
+ .byte 0x00
+ .byte 0xed
+ .byte 0xf1
+
+@ CHECK: Section {
+@ CHECK: Name: .data
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 00000000 00000000 00000000
+@ CHECK: 0010: F151A5C1 00001EAB 51A5C1DE 0000EDF1
+@ CHECK: )
+@ CHECK: }
+
diff --git a/test/MC/ARM/directive-fpu-multiple.s b/test/MC/ARM/directive-fpu-multiple.s
index 6a93f246822f..de2baaf27165 100644
--- a/test/MC/ARM/directive-fpu-multiple.s
+++ b/test/MC/ARM/directive-fpu-multiple.s
@@ -3,24 +3,16 @@
@ The later .fpu directive should overwrite the earlier one.
@ See also: directive-fpu-multiple2.s.
-@ RUN: llvm-mc < %s -triple arm-unknown-linux-gnueabi -filetype=obj \
-@ RUN: | llvm-readobj -s -sd | FileCheck %s
+@ RUN: llvm-mc -triple arm-eabi -filetype obj %s | llvm-readobj -arm-attributes \
+@ RUN: | FileCheck %s -check-prefix CHECK-ATTR
.fpu neon
.fpu vfpv4
-@ CHECK: Name: .ARM.attributes
-@ CHECK-NEXT: Type: SHT_ARM_ATTRIBUTES (0x70000003)
-@ CHECK-NEXT: Flags [ (0x0)
-@ CHECK-NEXT: ]
-@ CHECK-NEXT: Address: 0x0
-@ CHECK-NEXT: Offset: 0x34
-@ CHECK-NEXT: Size: 18
-@ CHECK-NEXT: Link: 0
-@ CHECK-NEXT: Info: 0
-@ CHECK-NEXT: AddressAlignment: 1
-@ CHECK-NEXT: EntrySize: 0
-@ CHECK-NEXT: SectionData (
-@ CHECK-NEXT: 0000: 41110000 00616561 62690001 07000000
-@ CHECK-NEXT: 0010: 0A05
-@ CHECK-NEXT: )
+@ CHECK-ATTR: FileAttributes {
+@ CHECK-ATTR: Attribute {
+@ CHECK-ATTR: TagName: FP_arch
+@ CHECK-ATTR: Description: VFPv4
+@ CHECK-ATTR: }
+@ CHECK-ATTR: }
+
diff --git a/test/MC/ARM/directive-fpu-softvfp.s b/test/MC/ARM/directive-fpu-softvfp.s
new file mode 100644
index 000000000000..f10845fcb341
--- /dev/null
+++ b/test/MC/ARM/directive-fpu-softvfp.s
@@ -0,0 +1,8 @@
+@ RUN: llvm-mc < %s -triple armv7-unknown-linux-gnueabi -filetype=obj -o /dev/null
+
+@ Check softvfp as the FPU name.
+
+@ Expected result: The integrated-as should be able to assemble this file
+@ without problems.
+
+ .fpu softvfp
diff --git a/test/MC/ARM/directive-literals.s b/test/MC/ARM/directive-literals.s
new file mode 100644
index 000000000000..eb09867d8aa7
--- /dev/null
+++ b/test/MC/ARM/directive-literals.s
@@ -0,0 +1,26 @@
+@ RUN: llvm-mc -triple arm %s | FileCheck %s
+
+ .data
+
+short:
+ .short 0
+ .short 0xdefe
+
+@ CHECK-LABEL: short
+@ CHECK-NEXT: .short 0
+@ CHECK-NEXT: .short 57086
+
+hword:
+ .hword 0
+ .hword 0xdefe
+
+@ CHECK-LABEL: hword
+@ CHECK-NEXT: .short 0
+@ CHECK-NEXT: .short 57086
+
+word:
+ .word 3
+
+@ CHECK-LABEL: word
+@ CHECK-NEXT: .long 3
+
diff --git a/test/MC/ARM/directive-object_arch-2.s b/test/MC/ARM/directive-object_arch-2.s
new file mode 100644
index 000000000000..3aca434a1e4c
--- /dev/null
+++ b/test/MC/ARM/directive-object_arch-2.s
@@ -0,0 +1,22 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype obj -o - %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s
+
+ .syntax unified
+
+ .object_arch armv4
+ .arch armv7
+
+@ CHECK: FileAttributes {
+@ CHECK: Attribute {
+@ CHECK: Tag: 5
+@ CHECK: TagName: CPU_name
+@ CHECK: Value: 7
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 6
+@ CHEKC: Value: 1
+@ CHECK: TagName: CPU_arch
+@ CHECK: Description: ARM v4
+@ CHECK: }
+@ CHECK: }
+
diff --git a/test/MC/ARM/directive-object_arch-3.s b/test/MC/ARM/directive-object_arch-3.s
new file mode 100644
index 000000000000..5dd26197ab16
--- /dev/null
+++ b/test/MC/ARM/directive-object_arch-3.s
@@ -0,0 +1,11 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype asm -o - %s | FileCheck %s
+
+ .syntax unified
+
+ .arch armv7
+ .object_arch armv4
+
+@ CHECK: .text
+@ CHECK: .arch armv7
+@ CHECK: .object_arch armv4
+
diff --git a/test/MC/ARM/directive-object_arch-diagnostics.s b/test/MC/ARM/directive-object_arch-diagnostics.s
new file mode 100644
index 000000000000..91b15c8d2d83
--- /dev/null
+++ b/test/MC/ARM/directive-object_arch-diagnostics.s
@@ -0,0 +1,23 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null %s 2>&1 \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+
+ .object_arch i686
+
+@ CHECK: error: unknown architecture 'i686'
+@ CHECK: .object_arch i686
+@ CHECK: ^
+
+ .object_arch armv4!
+
+@ CHECK: error: unexpected token
+@ CHECK: .object_arch armv4!
+@ CHECK: ^
+
+ .object_arch, invalid
+
+@ CHECK: error: unexpected token
+@ CHECK: .object_arch, invalid
+@ CHECK: ^
+
diff --git a/test/MC/ARM/directive-object_arch.s b/test/MC/ARM/directive-object_arch.s
new file mode 100644
index 000000000000..0707077630e0
--- /dev/null
+++ b/test/MC/ARM/directive-object_arch.s
@@ -0,0 +1,22 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype obj -o - %s \
+@ RUN: | llvm-readobj -arm-attributes | FileCheck %s
+
+ .syntax unified
+
+ .arch armv7
+ .object_arch armv4
+
+@ CHECK: FileAttributes {
+@ CHECK: Attribute {
+@ CHECK: Tag: 5
+@ CHECK: TagName: CPU_name
+@ CHECK: Value: 7
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 6
+@ CHEKC: Value: 1
+@ CHECK: TagName: CPU_arch
+@ CHECK: Description: ARM v4
+@ CHECK: }
+@ CHECK: }
+
diff --git a/test/MC/ARM/directive-tlsdescseq-diagnostics.s b/test/MC/ARM/directive-tlsdescseq-diagnostics.s
new file mode 100644
index 000000000000..0d33b5894fea
--- /dev/null
+++ b/test/MC/ARM/directive-tlsdescseq-diagnostics.s
@@ -0,0 +1,35 @@
+@ RUN: not llvm-mc -triple armv7-linux-gnu -filetype asm -o /dev/null %s 2>&1 \
+@ RUN: | FileCheck %s
+
+ .type missing_variable,%function
+missing_variable:
+.tlsdescseq
+
+@ CHECK: error: expected variable after '.tlsdescseq' directive
+@ CHECK: .tlsdescseq
+@ CHECK: ^
+
+ .type bad_expression,%function
+bad_expression:
+.tlsdescseq variable(tlsdesc)
+
+@ CHECK: error: unexpected token
+@ CHECK: .tlsdescseq variable(tlsdesc)
+@ CHECK: ^
+
+ .type trailing_garbage,%function
+trailing_garbage:
+.tlsdescseq variable,
+
+@ CHECK: error: unexpected token
+@ CHECK: .tlsdescseq variable,
+@ CHECK: ^
+
+ .type invalid_use,%function
+invalid_use:
+ blx invalid(tlsdescseq)
+
+@ CHECK: error: invalid variant 'tlsdescseq'
+@ CHECK: blx invalid(tlsdescseq)
+@ CHECK: ^
+
diff --git a/test/MC/ARM/directive-tlsdescseq.s b/test/MC/ARM/directive-tlsdescseq.s
new file mode 100644
index 000000000000..12db0589d7ad
--- /dev/null
+++ b/test/MC/ARM/directive-tlsdescseq.s
@@ -0,0 +1,33 @@
+@ RUN: llvm-mc -triple armv7-linux-gnu -filetype obj -o - %s | llvm-readobj -r \
+@ RUN: | FileCheck %s
+@ RUN: llvm-mc -triple armv7-linux-gnu -filetype asm -o - %s \
+@ RUN: | FileCheck -check-prefix CHECK-ASM %s
+
+ .type tlsdescseq,%function
+tlsdescseq:
+ ldr r1, [pc, #8]
+1:
+.tlsdescseq variable
+ add r2, pc, r1
+.tlsdescseq variable
+ ldr r3, [r1, #4]
+.tlsdescseq variable
+ blx r3
+2:
+ .word variable(tlsdesc) + (. - 1b)
+
+@ CHECK: Relocations [
+@ CHECK: 0x4 R_ARM_TLS_DESCSEQ variable 0x0
+@ CHECK: 0x8 R_ARM_TLS_DESCSEQ variable 0x0
+@ CHECK: 0xC R_ARM_TLS_DESCSEQ variable 0x0
+@ CHECK: 0x10 R_ARM_TLS_GOTDESC variable 0x0
+@ CHECK: ]
+
+@ CHECK-ASM: ldr r1, [pc, #8]
+@ CHECK-ASM: .tlsdescseq variable
+@ CHECK-ASM: add r2, pc, r1
+@ CHECK-ASM: .tlsdescseq variable
+@ CHECK-ASM: ldr r3, [r1, #4]
+@ CHECK-ASM: .tlsdescseq variable
+@ CHECK-ASM: blx r3
+
diff --git a/test/MC/ARM/directive-word-diagnostics.s b/test/MC/ARM/directive-word-diagnostics.s
new file mode 100644
index 000000000000..e68595b77878
--- /dev/null
+++ b/test/MC/ARM/directive-word-diagnostics.s
@@ -0,0 +1,12 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s
+
+ .cpu armv7
+
+ .type double_diagnostics,%function
+double_diagnostics:
+ .word invalid(invalid) + 32
+
+@ CHECK: error: invalid variant 'invalid'
+@ CHECK-NOT: error: unexpected token at start of statement
+
diff --git a/test/MC/ARM/dot-req-case-insensitive.s b/test/MC/ARM/dot-req-case-insensitive.s
new file mode 100644
index 000000000000..c1ca5667fcb5
--- /dev/null
+++ b/test/MC/ARM/dot-req-case-insensitive.s
@@ -0,0 +1,20 @@
+@ RUN: llvm-mc -triple=arm < %s | FileCheck %s
+ .syntax unified
+_foo:
+
+ OBJECT .req r2
+ mov r4, OBJECT
+ mov r4, oBjEcT
+ .unreq oBJECT
+
+_foo2:
+ OBJECT .req r5
+ mov r4, OBJECT
+ .unreq OBJECT
+
+@ CHECK-LABEL: _foo:
+@ CHECK: mov r4, r2
+@ CHECK: mov r4, r2
+
+@ CHECK-LABEL: _foo2:
+@ CHECK: mov r4, r5
diff --git a/test/MC/ARM/dwarf-asm-multiple-sections.s b/test/MC/ARM/dwarf-asm-multiple-sections.s
new file mode 100644
index 000000000000..ed1b89eff3cd
--- /dev/null
+++ b/test/MC/ARM/dwarf-asm-multiple-sections.s
@@ -0,0 +1,79 @@
+// RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -fdebug-compilation-dir=/tmp
+// RUN: llvm-dwarfdump %t | FileCheck -check-prefix DWARF %s
+// RUN: llvm-objdump -r %t | FileCheck -check-prefix RELOC %s
+// RUN: not llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 2 2>&1 | FileCheck -check-prefix VERSION %s
+// RUN: not llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 1 2>&1 | FileCheck -check-prefix DWARF1 %s
+// RUN: not llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -dwarf-version 5 2>&1 | FileCheck -check-prefix DWARF5 %s
+ .section .text, "ax"
+a:
+ mov r0, r0
+
+ .section foo, "ax"
+b:
+ mov r1, r1
+
+// DWARF: .debug_abbrev contents:
+// DWARF: Abbrev table for offset: 0x00000000
+// DWARF: [1] DW_TAG_compile_unit DW_CHILDREN_yes
+// DWARF: DW_AT_stmt_list DW_FORM_data4
+// DWARF: DW_AT_ranges DW_FORM_data4
+// DWARF: DW_AT_name DW_FORM_string
+// DWARF: DW_AT_comp_dir DW_FORM_string
+// DWARF: DW_AT_producer DW_FORM_string
+// DWARF: DW_AT_language DW_FORM_data2
+
+// DWARF: .debug_info contents:
+// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_compile_unit [1]
+// CHECK-NOT-DWARF: DW_TAG_
+// DWARF: DW_AT_ranges [DW_FORM_data4] (0x00000000)
+
+// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_label [2] *
+// DWARF-NEXT: DW_AT_name [DW_FORM_string] ("a")
+
+// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_label [2] *
+// DWARF-NEXT: DW_AT_name [DW_FORM_string] ("b")
+
+
+// DWARF: .debug_aranges contents:
+// DWARF-NEXT: Address Range Header: length = 0x00000024, version = 0x0002, cu_offset = 0x00000000, addr_size = 0x04, seg_size = 0x00
+// DWARF-NEXT: [0x00000000 - 0x00000004)
+// DWARF-NEXT: [0x00000000 - 0x00000004)
+
+
+// DWARF: .debug_line contents:
+// DWARF: 0x0000000000000000 9 0 1 0 0 is_stmt
+// DWARF-NEXT: 0x0000000000000004 9 0 1 0 0 is_stmt end_sequence
+// DWARF-NEXT: 0x0000000000000000 13 0 1 0 0 is_stmt
+// DWARF-NEXT: 0x0000000000000004 13 0 1 0 0 is_stmt end_sequence
+
+
+// DWARF: .debug_ranges contents:
+// DWARF: 00000000 ffffffff 00000000
+// DWARF: 00000000 00000000 00000004
+// DWARF: 00000000 ffffffff 00000000
+// DWARF: 00000000 00000000 00000004
+// DWARF: 00000000 <End of list>
+
+
+
+// RELOC: RELOCATION RECORDS FOR [.rel.debug_info]:
+// RELOC-NEXT: 00000006 R_ARM_ABS32 .debug_abbrev
+// RELOC-NEXT: 0000000c R_ARM_ABS32 .debug_line
+// RELOC-NEXT: 00000010 R_ARM_ABS32 .debug_ranges
+// RELOC-NEXT: R_ARM_ABS32 .text
+// RELOC-NEXT: R_ARM_ABS32 foo
+
+// RELOC: RELOCATION RECORDS FOR [.rel.debug_ranges]:
+// RELOC-NEXT: 00000004 R_ARM_ABS32 .text
+// RELOC-NEXT: 00000014 R_ARM_ABS32 foo
+
+// RELOC: RELOCATION RECORDS FOR [.rel.debug_aranges]:
+// RELOC-NEXT: 00000006 R_ARM_ABS32 .debug_info
+// RELOC-NEXT: 00000010 R_ARM_ABS32 .text
+// RELOC-NEXT: 00000018 R_ARM_ABS32 foo
+
+
+// VERSION: {{.*}} error: DWARF2 only supports one section per compilation unit
+
+// DWARF1: Dwarf version 1 is not supported.
+// DWARF5: Dwarf version 5 is not supported.
diff --git a/test/MC/ARM/dwarf-asm-no-code.s b/test/MC/ARM/dwarf-asm-no-code.s
new file mode 100644
index 000000000000..7d06a4190091
--- /dev/null
+++ b/test/MC/ARM/dwarf-asm-no-code.s
@@ -0,0 +1,27 @@
+// RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -fdebug-compilation-dir=/tmp
+// RUN: llvm-dwarfdump %t | FileCheck -check-prefix DWARF %s
+// RUN: llvm-objdump -r %t | FileCheck -check-prefix RELOC %s
+
+// If there is no code in an assembly file, no debug info is produced
+
+.section .data, "aw"
+a:
+.long 42
+
+// DWARF: .debug_abbrev contents:
+// DWARF-NEXT: < EMPTY >
+
+// DWARF: .debug_info contents:
+
+// DWARF: .debug_aranges contents:
+
+// DWARF: .debug_line contents:
+
+// DWARF: .debug_ranges contents:
+
+
+// RELOC-NOT: RELOCATION RECORDS FOR [.rel.debug_info]:
+
+// RELOC-NOT: RELOCATION RECORDS FOR [.rel.debug_ranges]:
+
+// RELOC-NOT: RELOCATION RECORDS FOR [.rel.debug_aranges]:
diff --git a/test/MC/ARM/dwarf-asm-nonstandard-section.s b/test/MC/ARM/dwarf-asm-nonstandard-section.s
new file mode 100644
index 000000000000..497a39ad1162
--- /dev/null
+++ b/test/MC/ARM/dwarf-asm-nonstandard-section.s
@@ -0,0 +1,57 @@
+// RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -fdebug-compilation-dir=/tmp
+// RUN: llvm-dwarfdump %t | FileCheck -check-prefix DWARF %s
+// RUN: llvm-objdump -r %t | FileCheck -check-prefix RELOC %s
+
+ .section foo, "ax"
+b:
+ mov r1, r1
+
+// DWARF: .debug_abbrev contents:
+// DWARF: Abbrev table for offset: 0x00000000
+// DWARF: [1] DW_TAG_compile_unit DW_CHILDREN_yes
+// DWARF: DW_AT_stmt_list DW_FORM_data4
+// DWARF: DW_AT_low_pc DW_FORM_addr
+// DWARF: DW_AT_high_pc DW_FORM_addr
+// DWARF: DW_AT_name DW_FORM_string
+// DWARF: DW_AT_comp_dir DW_FORM_string
+// DWARF: DW_AT_producer DW_FORM_string
+// DWARF: DW_AT_language DW_FORM_data2
+
+// DWARF: .debug_info contents:
+// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_compile_unit [1]
+// DWARF-NOT: DW_TAG_
+// DWARF: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+// DWARF: DW_AT_high_pc [DW_FORM_addr] (0x0000000000000004)
+
+// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_label [2] *
+// DWARF-NEXT: DW_AT_name [DW_FORM_string] ("b")
+
+
+// DWARF: .debug_aranges contents:
+// DWARF-NEXT: Address Range Header: length = 0x0000001c, version = 0x0002, cu_offset = 0x00000000, addr_size = 0x04, seg_size = 0x00
+// DWARF-NEXT: [0x00000000 - 0x00000004)
+
+
+// DWARF: .debug_line contents:
+// DWARF: 0x0000000000000000 7 0 1 0 0 is_stmt
+// DWARF-NEXT: 0x0000000000000004 7 0 1 0 0 is_stmt end_sequence
+
+
+// DWARF: .debug_ranges contents:
+// DWARF-NOT: {{0-9a-f}}
+// DWARF: .debug_pubnames contents:
+
+
+
+// RELOC: RELOCATION RECORDS FOR [.rel.debug_info]:
+// RELOC-NEXT: 00000006 R_ARM_ABS32 .debug_abbrev
+// RELOC-NEXT: 0000000c R_ARM_ABS32 .debug_line
+// RELOC-NEXT: R_ARM_ABS32 foo
+// RELOC-NEXT: R_ARM_ABS32 foo
+// RELOC-NEXT: R_ARM_ABS32 foo
+
+// RELOC-NOT: RELOCATION RECORDS FOR [.rel.debug_ranges]:
+
+// RELOC: RELOCATION RECORDS FOR [.rel.debug_aranges]:
+// RELOC-NEXT: 00000006 R_ARM_ABS32 .debug_info
+// RELOC-NEXT: 00000010 R_ARM_ABS32 foo
diff --git a/test/MC/ARM/dwarf-asm-single-section.s b/test/MC/ARM/dwarf-asm-single-section.s
new file mode 100644
index 000000000000..c57e6498a38a
--- /dev/null
+++ b/test/MC/ARM/dwarf-asm-single-section.s
@@ -0,0 +1,56 @@
+// RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o %t -g -fdebug-compilation-dir=/tmp
+// RUN: llvm-dwarfdump %t | FileCheck -check-prefix DWARF %s
+// RUN: llvm-objdump -r %t | FileCheck -check-prefix RELOC %s
+
+ .section .text, "ax"
+a:
+ mov r0, r0
+
+
+// DWARF: .debug_abbrev contents:
+// DWARF: Abbrev table for offset: 0x00000000
+// DWARF: [1] DW_TAG_compile_unit DW_CHILDREN_yes
+// DWARF: DW_AT_stmt_list DW_FORM_data4
+// DWARF: DW_AT_low_pc DW_FORM_addr
+// DWARF: DW_AT_high_pc DW_FORM_addr
+// DWARF: DW_AT_name DW_FORM_string
+// DWARF: DW_AT_comp_dir DW_FORM_string
+// DWARF: DW_AT_producer DW_FORM_string
+// DWARF: DW_AT_language DW_FORM_data2
+
+// DWARF: .debug_info contents:
+// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_compile_unit [1]
+// CHECK-NOT-DWARF: DW_TAG_
+// DWARF: DW_AT_low_pc [DW_FORM_addr] (0x0000000000000000)
+// DWARF: DW_AT_high_pc [DW_FORM_addr] (0x0000000000000004)
+
+// DWARF: 0x{{[0-9a-f]+}}: DW_TAG_label [2] *
+// DWARF-NEXT: DW_AT_name [DW_FORM_string] ("a")
+
+
+// DWARF: .debug_aranges contents:
+// DWARF-NEXT: Address Range Header: length = 0x0000001c, version = 0x0002, cu_offset = 0x00000000, addr_size = 0x04, seg_size = 0x00
+// DWARF-NEXT: [0x00000000 - 0x00000004)
+
+// DWARF: .debug_line contents:
+// DWARF: 0x0000000000000000 7 0 1 0 0 is_stmt
+// DWARF-NEXT: 0x0000000000000004 7 0 1 0 0 is_stmt end_sequence
+
+
+// DWARF: .debug_ranges contents:
+// DWARF-NOT: {{0-9a-f}}
+// DWARF: .debug_pubnames contents:
+
+
+// RELOC: RELOCATION RECORDS FOR [.rel.debug_info]:
+// RELOC-NEXT: 00000006 R_ARM_ABS32 .debug_abbrev
+// RELOC-NEXT: 0000000c R_ARM_ABS32 .debug_line
+// RELOC-NEXT: R_ARM_ABS32 .text
+// RELOC-NEXT: R_ARM_ABS32 .text
+// RELOC-NEXT: R_ARM_ABS32 .text
+
+// RELOC-NOT: RELOCATION RECORDS FOR [.rel.debug_ranges]:
+
+// RELOC: RELOCATION RECORDS FOR [.rel.debug_aranges]:
+// RELOC-NEXT: 00000006 R_ARM_ABS32 .debug_info
+// RELOC-NEXT: 00000010 R_ARM_ABS32 .text
diff --git a/test/MC/ARM/dwarf-cfi-initial-state.s b/test/MC/ARM/dwarf-cfi-initial-state.s
new file mode 100644
index 000000000000..0d1c08af7282
--- /dev/null
+++ b/test/MC/ARM/dwarf-cfi-initial-state.s
@@ -0,0 +1,17 @@
+# RUN: llvm-mc < %s -triple=armv7-linux-gnueabi -filetype=obj -o - \
+# RUN: | llvm-dwarfdump - | FileCheck %s
+
+_proc:
+.cfi_sections .debug_frame
+.cfi_startproc
+bx lr
+.cfi_endproc
+
+# CHECK: .debug_frame contents:
+# CHECK: CIE
+# CHECK-NOT: DW_CFA
+# When llvm-dwarfdump prints the full info for the DW_CFA_def_cfa
+# field, we can check that here too.
+# CHECK: DW_CFA_def_cfa:
+# CHECK-NOT: DW_CFA
+# CHECK: FDE
diff --git a/test/MC/ARM/eh-directive-cantunwind-diagnostics.s b/test/MC/ARM/eh-directive-cantunwind-diagnostics.s
index 640cc3005fcb..9eca16498e14 100644
--- a/test/MC/ARM/eh-directive-cantunwind-diagnostics.s
+++ b/test/MC/ARM/eh-directive-cantunwind-diagnostics.s
@@ -24,7 +24,7 @@ func1:
@ CHECK: error: .personality can't be used with .cantunwind directive
@ CHECK: .personality __gxx_personality_v0
@ CHECK: ^
-@ CHECK: error: .cantunwind was specified here
+@ CHECK: note: .cantunwind was specified here
@ CHECK: .cantunwind
@ CHECK: ^
.fnend
@@ -44,7 +44,7 @@ func2:
@ CHECK: error: .handlerdata can't be used with .cantunwind directive
@ CHECK: .handlerdata
@ CHECK: ^
-@ CHECK: error: .cantunwind was specified here
+@ CHECK: note: .cantunwind was specified here
@ CHECK: .cantunwind
@ CHECK: ^
.fnend
@@ -64,7 +64,7 @@ func3:
@ CHECK: error: .cantunwind can't be used with .personality directive
@ CHECK: .cantunwind
@ CHECK: ^
-@ CHECK: error: .personality was specified here
+@ CHECK: note: .personality was specified here
@ CHECK: .personality __gxx_personality_v0
@ CHECK: ^
.fnend
@@ -84,7 +84,7 @@ func4:
@ CHECK: error: .cantunwind can't be used with .handlerdata directive
@ CHECK: .cantunwind
@ CHECK: ^
-@ CHECK: error: .handlerdata was specified here
+@ CHECK: note: .handlerdata was specified here
@ CHECK: .handlerdata
@ CHECK: ^
.fnend
diff --git a/test/MC/ARM/eh-directive-fnstart-diagnostics.s b/test/MC/ARM/eh-directive-fnstart-diagnostics.s
index 75ddd9faebc1..11364dee39b0 100644
--- a/test/MC/ARM/eh-directive-fnstart-diagnostics.s
+++ b/test/MC/ARM/eh-directive-fnstart-diagnostics.s
@@ -24,7 +24,7 @@ func1:
@ CHECK: error: .fnstart starts before the end of previous one
@ CHECK: .fnstart
@ CHECK: ^
-@ CHECK: error: previous .fnstart starts here
+@ CHECK: note: .fnstart was specified here
@ CHECK: .fnstart
@ CHECK: ^
func2:
diff --git a/test/MC/ARM/eh-directive-movsp-diagnostics.s b/test/MC/ARM/eh-directive-movsp-diagnostics.s
new file mode 100644
index 000000000000..519e7d742f37
--- /dev/null
+++ b/test/MC/ARM/eh-directive-movsp-diagnostics.s
@@ -0,0 +1,102 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .global false_start
+ .type false_start,%function
+ .thumb_func
+false_start:
+ .movsp r7
+
+@ CHECK: error: .fnstart must precede .movsp directive
+@ CHECK: .movsp r7
+@ CHECK: ^
+
+ .global beyond_saving
+ .type beyond_saving,%function
+ .thumb_func
+beyond_saving:
+ .fnstart
+ .setfp r11, sp, #8
+ add r11, sp, #8
+ .movsp r7
+ mov r7, r11
+ .fnend
+
+@ CHECK: error: unexpected .movsp directive
+@ CHECK: .movsp r7
+@ CHECK: ^
+
+
+ .global sp_invalid
+ .type sp_invalid,%function
+ .thumb_func
+sp_invalid:
+ .fnstart
+ .movsp r13
+ mov sp, sp
+ .fnend
+
+@ CHECK: error: sp and pc are not permitted in .movsp directive
+@ CHECK: .movsp r13
+@ CHECK: ^
+
+
+ .global pc_invalid
+ .type pc_invalid,%function
+ .thumb_func
+pc_invalid:
+ .fnstart
+ .movsp r15
+ mov sp, pc
+ .fnend
+
+@ CHECK: error: sp and pc are not permitted in .movsp directive
+@ CHECK: .movsp r15
+@ CHECK: ^
+
+
+ .global constant_required
+ .type constant_required,%function
+ .thumb_func
+constant_required:
+ .fnstart
+ .movsp r11,
+ mov sp, r11
+ .fnend
+
+@ CHECK: error: expected #constant
+@ CHECK: .movsp r11,
+@ CHECK: ^
+
+
+ .global constant_constant
+ .type constant_constant,%function
+ .thumb_func
+constant_constant:
+ .fnstart
+ .movsp r11, #constant
+ mov sp, r11
+ .fnend
+
+@ CHECK: error: offset must be an immediate constant
+@ CHECK: .movsp r11, #constant
+@ CHECK: ^
+
+
+ .arm
+
+ .global register_required
+ .type register_required,%function
+register_required:
+ .fnstart
+ .movsp #42
+ mov sp, #42
+ .fnend
+
+@ CHECK: error: register expected
+@ CHECK: .movsp #42
+@ CHECK: ^
+
diff --git a/test/MC/ARM/eh-directive-movsp.s b/test/MC/ARM/eh-directive-movsp.s
new file mode 100644
index 000000000000..620f5b769d4c
--- /dev/null
+++ b/test/MC/ARM/eh-directive-movsp.s
@@ -0,0 +1,44 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype obj -o - %s | llvm-readobj -s -sd \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .section .duplicate
+
+ .global duplicate
+ .type duplicate,%function
+duplicate:
+ .fnstart
+ .setfp sp, sp, #8
+ add sp, sp, #8
+ .movsp r11
+ mov r11, sp
+ .fnend
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.exidx.duplicate
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 B09B9B80
+@ CHECK: )
+@ CHECK: }
+
+
+ .section .squash
+
+ .global squash
+ .type squash,%function
+squash:
+ .fnstart
+ .movsp ip
+ mov ip, sp
+ .save {fp, ip, lr}
+ stmfd sp!, {fp, ip, lr}
+ .fnend
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.exidx.squash
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 9C808580
+@ CHECK: )
+@ CHECK: }
diff --git a/test/MC/ARM/eh-directive-personalityindex-diagnostics.s b/test/MC/ARM/eh-directive-personalityindex-diagnostics.s
new file mode 100644
index 000000000000..2dc2c8045a65
--- /dev/null
+++ b/test/MC/ARM/eh-directive-personalityindex-diagnostics.s
@@ -0,0 +1,122 @@
+@ RUN: not llvm-mc -triple armv7-linux-eabi -filetype asm -o /dev/null %s 2>&1 \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .global function
+ .type function,%function
+ .thumb_func
+function:
+ .personalityindex 0
+
+@ CHECK: error: .fnstart must precede .personalityindex directive
+@ CHECK: .personalityindex 0
+@ CHECK: ^
+
+ .global ununwindable
+ .type ununwindable,%function
+ .thumb_func
+ununwindable:
+ .fnstart
+ .cantunwind
+ .personalityindex 0
+ .fnend
+
+@ CHECK: error: .personalityindex cannot be used with .cantunwind
+@ CHECK: .personalityindex 0
+@ CHECK: ^
+@ CHECK: note: .cantunwind was specified here
+@ CHECK: .cantunwind
+@ CHECK: ^
+
+ .global nodata
+ .type nodata,%function
+ .thumb_func
+nodata:
+ .fnstart
+ .handlerdata
+ .personalityindex 0
+ .fnend
+
+@ CHECK: error: .personalityindex must precede .handlerdata directive
+@ CHECK: .personalityindex 0
+@ CHECK: ^
+@ CHECK: note: .handlerdata was specified here
+@ CHECK: .handlerdata
+@ CHECK: ^
+
+ .global multiple_personality
+ .type multiple_personality,%function
+ .thumb_func
+multiple_personality:
+ .fnstart
+ .personality __aeabi_personality_pr0
+ .personalityindex 0
+ .fnend
+
+@ CHECK: error: multiple personality directives
+@ CHECK: .personalityindex 0
+@ CHECK: ^
+@ CHECK: note: .personality was specified here
+@ CHECK: .personality __aeabi_personality_pr0
+@ CHECK: ^
+@ CHECK: note: .personalityindex was specified here
+@ CHECK: .personalityindex 0
+@ CHECK: ^
+
+ .global multiple_personality_indicies
+ .type multiple_personality_indicies,%function
+ .thumb_func
+multiple_personality_indicies:
+ .fnstart
+ .personalityindex 0
+ .personalityindex 1
+ .fnend
+
+@ CHECK: error: multiple personality directives
+@ CHECK: .personalityindex 1
+@ CHECK: ^
+@ CHECK: note: .personalityindex was specified here
+@ CHECK: .personalityindex 0
+@ CHECK: ^
+@ CHECK: note: .personalityindex was specified here
+@ CHECK: .personalityindex 1
+@ CHECK: ^
+
+ .global invalid_expression
+ .type invalid_expression,%function
+ .thumb_func
+invalid_expression:
+ .fnstart
+ .personalityindex <expression>
+ .fnend
+
+@ CHECK: error: unknown token in expression
+@ CHECK: .personalityindex <expression>
+@ CHECK: ^
+
+ .global nonconstant_expression
+ .type nonconstant_expression,%function
+ .thumb_func
+nonconstant_expression:
+ .fnstart
+ .personalityindex nonconstant_expression
+ .fnend
+
+@ CHECK: error: index must be a constant number
+@ CHECK: .personalityindex nonconstant_expression
+@ CHECK: ^
+
+ .global bad_index
+ .type bad_index,%function
+ .thumb_func
+bad_index:
+ .fnstart
+ .personalityindex 42
+ .fnend
+
+@ CHECK: error: personality routine index should be in range [0-3]
+@ CHECK: .personalityindex 42
+@ CHECK: ^
+
diff --git a/test/MC/ARM/eh-directive-personalityindex.s b/test/MC/ARM/eh-directive-personalityindex.s
new file mode 100644
index 000000000000..551722750933
--- /dev/null
+++ b/test/MC/ARM/eh-directive-personalityindex.s
@@ -0,0 +1,202 @@
+@ RUN: llvm-mc -triple armv7-linux-eabi -filetype obj -o - %s \
+@ RUN: | llvm-readobj -s -sd -sr | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+
+ .section .pr0
+
+ .global pr0
+ .type pr0,%function
+ .thumb_func
+pr0:
+ .fnstart
+ .personalityindex 0
+ bx lr
+ .fnend
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.exidx.pr0
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 B0B0B080
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .rel.ARM.exidx.pr0
+@ CHECK: Relocations [
+@ CHECK: 0x0 R_ARM_PREL31 .pr0 0x0
+@ CHECK: 0x0 R_ARM_NONE __aeabi_unwind_cpp_pr0 0x0
+@ CHECK: ]
+@ CHECK: }
+
+ .section .pr0.nontrivial
+
+ .global pr0_nontrivial
+ .type pr0_nontrivial,%function
+ .thumb_func
+pr0_nontrivial:
+ .fnstart
+ .personalityindex 0
+ .pad #0x10
+ sub sp, sp, #0x10
+ add sp, sp, #0x10
+ bx lr
+ .fnend
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.exidx.pr0.nontrivial
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 B0B00380
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .rel.ARM.exidx.pr0.nontrivial
+@ CHECK: Relocations [
+@ CHECK: 0x0 R_ARM_PREL31 .pr0.nontrivial 0x0
+@ CHECK: 0x0 R_ARM_NONE __aeabi_unwind_cpp_pr0 0x0
+@ CHECK: ]
+@ CHECK: }
+
+ .section .pr1
+
+ .global pr1
+ .type pr1,%function
+ .thumb_func
+pr1:
+ .fnstart
+ .personalityindex 1
+ bx lr
+ .fnend
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.extab.pr1
+@ CHECK: SectionData (
+@ CHECK: 0000: B0B00081 00000000
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.exidx.pr1
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 00000000
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .rel.ARM.exidx.pr1
+@ CHECK: Relocations [
+@ CHECK: 0x0 R_ARM_PREL31 .pr1 0x0
+@ CHECK: 0x0 R_ARM_NONE __aeabi_unwind_cpp_pr1 0x0
+@ CHECK: 0x4 R_ARM_PREL31 .ARM.extab.pr1 0x0
+@ CHECK: ]
+@ CHECK: }
+
+ .section .pr1.nontrivial
+
+ .global pr1_nontrivial
+ .type pr1_nontrivial,%function
+ .thumb_func
+pr1_nontrivial:
+ .fnstart
+ .personalityindex 1
+ .pad #0x10
+ sub sp, sp, #0x10
+ add sp, sp, #0x10
+ bx lr
+ .fnend
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.extab.pr1.nontrivial
+@ CHECK: SectionData (
+@ CHECK: 0000: B0030081 00000000
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.exidx.pr1.nontrivial
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 00000000
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .rel.ARM.exidx.pr1.nontrivial
+@ CHECK: Relocations [
+@ CHECK: 0x0 R_ARM_PREL31 .pr1.nontrivial 0x0
+@ CHECK: 0x0 R_ARM_NONE __aeabi_unwind_cpp_pr1 0x0
+@ CHECK: 0x4 R_ARM_PREL31 .ARM.extab.pr1.nontrivial 0x0
+@ CHECK: ]
+@ CHECK: }
+
+ .section .pr2
+
+ .global pr2
+ .type pr2,%function
+ .thumb_func
+pr2:
+ .fnstart
+ .personalityindex 2
+ bx lr
+ .fnend
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.extab.pr2
+@ CHECK: SectionData (
+@ CHECK: 0000: B0B00082 00000000
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.exidx.pr2
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 00000000
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .rel.ARM.exidx.pr2
+@ CHECK: Relocations [
+@ CHECK: 0x0 R_ARM_PREL31 .pr2 0x0
+@ CHECK: 0x0 R_ARM_NONE __aeabi_unwind_cpp_pr2 0x0
+@ CHECK: 0x4 R_ARM_PREL31 .ARM.extab.pr2 0x0
+@ CHECK: ]
+@ CHECK: }
+
+ .section .pr2.nontrivial
+ .type pr2_nontrivial,%function
+ .thumb_func
+pr2_nontrivial:
+ .fnstart
+ .personalityindex 2
+ .pad #0x10
+ sub sp, sp, #0x10
+ add sp, sp, #0x10
+ bx lr
+ .fnend
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.extab.pr2.nontrivial
+@ CHECK: SectionData (
+@ CHECK: 0000: B0030082 00000000
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .ARM.exidx.pr2.nontrivial
+@ CHECK: SectionData (
+@ CHECK: 0000: 00000000 00000000
+@ CHECK: )
+@ CHECK: }
+
+@ CHECK: Section {
+@ CHECK: Name: .rel.ARM.exidx.pr2.nontrivial
+@ CHECK: Relocations [
+@ CHECK: 0x0 R_ARM_PREL31 .pr2.nontrivial 0x0
+@ CHECK: 0x0 R_ARM_NONE __aeabi_unwind_cpp_pr2 0x0
+@ CHECK: 0x4 R_ARM_PREL31 .ARM.extab.pr2.nontrivial 0x0
+@ CHECK: ]
+@ CHECK: }
+
diff --git a/test/MC/ARM/eh-directive-save-diagnoatics.s b/test/MC/ARM/eh-directive-save-diagnostics.s
index 0e6d7404a3af..0e6d7404a3af 100644
--- a/test/MC/ARM/eh-directive-save-diagnoatics.s
+++ b/test/MC/ARM/eh-directive-save-diagnostics.s
diff --git a/test/MC/ARM/eh-directive-setfp.s b/test/MC/ARM/eh-directive-setfp.s
index dfa79e622d2e..ce7fe10b43c4 100644
--- a/test/MC/ARM/eh-directive-setfp.s
+++ b/test/MC/ARM/eh-directive-setfp.s
@@ -9,7 +9,7 @@
@ then libunwind will reconstruct the stack pointer from the frame pointer.
@ The reconstruction code is implemented by two different unwind opcode:
@ (i) the unwind opcode to copy stack offset from the other register, and
-@ (ii) the unwind opcode to add or substract the stack offset.
+@ (ii) the unwind opcode to add or subtract the stack offset.
@
@ This file includes several cases separated by different range of -offset
@
diff --git a/test/MC/ARM/eh-directive-unwind_raw-diagnostics.s b/test/MC/ARM/eh-directive-unwind_raw-diagnostics.s
new file mode 100644
index 000000000000..72a208ed83d1
--- /dev/null
+++ b/test/MC/ARM/eh-directive-unwind_raw-diagnostics.s
@@ -0,0 +1,73 @@
+@ RUN: not llvm-mc -triple armv7-linux-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+
+ .type require_fnstart,%function
+require_fnstart:
+ .unwind_raw 0, 0
+
+@ CHECK: error: .fnstart must precede .unwind_raw directive
+@ CHECK: .unwind_raw 0, 0
+@ CHECK: ^
+
+ .type check_arguments,%function
+check_arguments:
+ .fnstart
+ .unwind_raw
+ .fnend
+
+@ CHECK: error: expected expression
+@ CHECK: .unwind_raw
+@ CHECK: ^
+
+ .type check_stack_offset,%function
+check_stack_offset:
+ .fnstart
+ .unwind_raw ., 0
+ .fnend
+
+@ CHECK: error: offset must be a constant
+@ CHECK: .unwind_raw ., 0
+@ CHECK: ^
+
+ .type comma_check,%function
+comma_check:
+ .fnstart
+ .unwind_raw 0
+ .fnend
+
+@ CHECK: error: expected comma
+@ CHECK: .unwind_raw 0
+@ CHECK: ^
+
+ .type require_opcode,%function
+require_opcode:
+ .fnstart
+ .unwind_raw 0,
+ .fnend
+
+@ CHECK: error: expected opcode expression
+@ CHECK: .unwind_raw 0,
+@ CHECK: ^
+
+ .type require_opcode_constant,%function
+require_opcode_constant:
+ .fnstart
+ .unwind_raw 0, .
+ .fnend
+
+@ CHECK: error: opcode value must be a constant
+@ CHECK: .unwind_raw 0, .
+@ CHECK: ^
+
+ .type check_opcode_range,%function
+check_opcode_range:
+ .fnstart
+ .unwind_raw 0, 0x100
+ .fnend
+
+@ CHECK: error: invalid opcode
+@ CHECK: .unwind_raw 0, 0x100
+@ CHECK: ^
+
diff --git a/test/MC/ARM/eh-directive-unwind_raw.s b/test/MC/ARM/eh-directive-unwind_raw.s
new file mode 100644
index 000000000000..c617aa37c497
--- /dev/null
+++ b/test/MC/ARM/eh-directive-unwind_raw.s
@@ -0,0 +1,110 @@
+@ RUN: llvm-mc -triple armv7-linux-eabi -filetype obj -o - %s | llvm-readobj -u \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+
+ .type save,%function
+ .thumb_func
+save:
+ .fnstart
+ .unwind_raw 4, 0xb1, 0x01
+ push {r0}
+ pop {r0}
+ bx lr
+ .fnend
+
+ .type empty,%function
+ .thumb_func
+empty:
+ .fnstart
+ .unwind_raw 0, 0xb0
+ bx lr
+ .fnend
+
+ .type extended,%function
+ .thumb_func
+extended:
+ .fnstart
+ .unwind_raw 12, 0x9b, 0x40, 0x84, 0x80, 0xb0, 0xb0
+ @ .save {fp, lr}
+ stmfd sp!, {fp, lr}
+ @ .setfp fp, sp, #4
+ add fp, sp, #4
+ @ .pad #8
+ sub sp, sp, #8
+ add sp, sp, #8
+ sub fp, sp, #4
+ ldmfd sp!, {fp, lr}
+ bx lr
+ .fnend
+
+ .type refuse,%function
+ .thumb_func
+refuse:
+ .fnstart
+ .unwind_raw 0, 0x80, 0x00
+ bx lr
+ .fnend
+
+ .type stack_adjust,%function
+ .thumb_func
+stack_adjust:
+ .fnstart
+ .setfp fp, sp, #32
+ .unwind_raw 24, 0xc2
+ .fnend
+
+@ CHECK: UnwindInformation {
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx
+@ CHECK: Entries [
+@ CHECK: Entry {
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0xB1 0x01 ; pop {r0}
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: Entry {
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: Entry {
+@ CHECK: ExceptionHandlingTable: .ARM.extab
+@ CHECK: Model: Compact
+@ CHECK: PersonalityIndex: 1
+@ CHECK: Opcodes [
+@ CHECK: 0x9B ; vsp = r11
+@ CHECK: 0x40 ; vsp = vsp - 4
+@ CHECK: 0x84 0x80 ; pop {fp, lr}
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: Entry {
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0x80 0x00 ; refuse to unwind
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: Entry {
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0x9B ; vsp = r11
+@ CHECK: 0x4D ; vsp = vsp - 56
+@ CHECK: 0xC2 ; pop {wR10, wR11, wR12}
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: }
+
diff --git a/test/MC/ARM/elf-jump24-fixup.s b/test/MC/ARM/elf-jump24-fixup.s
index 75a4b869dc60..affdcda8d74e 100644
--- a/test/MC/ARM/elf-jump24-fixup.s
+++ b/test/MC/ARM/elf-jump24-fixup.s
@@ -6,4 +6,4 @@
foo:
b.w bar
-@ CHECK: {{[0-9]+}} R_ARM_THM_JUMP24 bar
+@ CHECK: {{[0-9a-f]+}} R_ARM_THM_JUMP24 bar
diff --git a/test/MC/ARM/elf-thumbfunc-reloc.ll b/test/MC/ARM/elf-thumbfunc-reloc.ll
index 9fd360e1a013..f502739a2afd 100644
--- a/test/MC/ARM/elf-thumbfunc-reloc.ll
+++ b/test/MC/ARM/elf-thumbfunc-reloc.ll
@@ -3,7 +3,7 @@
; RUN: FileCheck %s
; FIXME: This file needs to be in .s form!
-; We wanna test relocatable thumb function call,
+; We want to test relocatable thumb function call,
; but ARMAsmParser cannot handle "bl foo(PLT)" yet
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:32-n32"
@@ -32,6 +32,10 @@ entry:
; CHECK-NEXT: Section (2) .rel.text {
; CHECK-NEXT: 0x8 R_ARM_THM_CALL foo 0x0
; CHECK-NEXT: }
+; CHECK-NEXT: Section (7) .rel.ARM.exidx {
+; CHECK-NEXT: 0x0 R_ARM_PREL31 .text 0x0
+; CHECK-NEXT: 0x8 R_ARM_PREL31 .text 0x0
+; CHECK-NEXT: }
; CHECK-NEXT: ]
; make sure foo is thumb function: bit 0 = 1
diff --git a/test/MC/ARM/elf-thumbfunc-reloc.s b/test/MC/ARM/elf-thumbfunc-reloc.s
index 614702012f0c..ea7d507a7e83 100644
--- a/test/MC/ARM/elf-thumbfunc-reloc.s
+++ b/test/MC/ARM/elf-thumbfunc-reloc.s
@@ -5,7 +5,6 @@
.syntax unified
.text
- .globl f
.align 2
.type f,%function
.code 16
@@ -16,9 +15,21 @@ f:
bl g
pop {r7, pc}
+ .section .data.rel.local,"aw",%progbits
+ptr:
+ .long f
+
+
@@ make sure an R_ARM_THM_CALL relocation is generated for the call to g
@CHECK: Relocations [
@CHECK-NEXT: Section (2) .rel.text {
@CHECK-NEXT: 0x4 R_ARM_THM_CALL g 0x0
@CHECK-NEXT: }
+
+
+@@ make sure the relocation is with f. That is one way to make sure it includes
+@@ the thumb bit.
+@CHECK-NEXT: Section (6) .rel.data.rel.local {
+@CHECK-NEXT: 0x0 R_ARM_ABS32 f 0x0
+@CHECK-NEXT: }
@CHECK-NEXT: ]
diff --git a/test/MC/ARM/elf-thumbfunc.s b/test/MC/ARM/elf-thumbfunc.s
index 0ea11821b96b..af061b50bc3d 100644
--- a/test/MC/ARM/elf-thumbfunc.s
+++ b/test/MC/ARM/elf-thumbfunc.s
@@ -11,7 +11,17 @@
foo:
bx lr
-@@ make sure foo is thumb function: bit 0 = 1 (st_value)
+ .global bar
+bar = foo
+
+@@ make sure foo and bar are thumb function: bit 0 = 1 (st_value)
+@CHECK: Symbol {
+@CHECK: Name: bar
+@CHECK-NEXT: Value: 0x1
+@CHECK-NEXT: Size: 0
+@CHECK-NEXT: Binding: Global
+@CHECK-NEXT: Type: Function
+
@CHECK: Symbol {
@CHECK: Name: foo
@CHECK-NEXT: Value: 0x1
diff --git a/test/MC/ARM/fconst.s b/test/MC/ARM/fconst.s
new file mode 100644
index 000000000000..e2c1b3918ff9
--- /dev/null
+++ b/test/MC/ARM/fconst.s
@@ -0,0 +1,22 @@
+@ RUN: llvm-mc -mcpu=cortex-a8 -triple armv7-apple-darwin -show-encoding < %s | FileCheck %s
+
+@ fconstd/fconsts aliases
+ fconsts s4, #0x0
+ fconsts s4, #0x70
+ fconstd d3, #0x0
+ fconstd d3, #0x70
+
+ fconstsne s5, #0x1
+ fconstsgt s5, #0x20
+ fconstdlt d2, #0x3
+ fconstdge d2, #0x40
+
+@ CHECK: vmov.f32 s4, #2.000000e+00 @ encoding: [0x00,0x2a,0xb0,0xee]
+@ CHECK: vmov.f32 s4, #1.000000e+00 @ encoding: [0x00,0x2a,0xb7,0xee]
+@ CHECK: vmov.f64 d3, #2.000000e+00 @ encoding: [0x00,0x3b,0xb0,0xee]
+@ CHECK: vmov.f64 d3, #1.000000e+00 @ encoding: [0x00,0x3b,0xb7,0xee]
+
+@ CHECK: vmovne.f32 s5, #2.125000e+00 @ encoding: [0x01,0x2a,0xf0,0x1e]
+@ CHECK: vmovgt.f32 s5, #8.000000e+00 @ encoding: [0x00,0x2a,0xf2,0xce]
+@ CHECK: vmovlt.f64 d2, #2.375000e+00 @ encoding: [0x03,0x2b,0xb0,0xbe]
+@ CHECK: vmovge.f64 d2, #1.250000e-01 @ encoding: [0x00,0x2b,0xb4,0xae]
diff --git a/test/MC/ARM/fixup-cpu-mode.s b/test/MC/ARM/fixup-cpu-mode.s
new file mode 100644
index 000000000000..17f29f94d2d6
--- /dev/null
+++ b/test/MC/ARM/fixup-cpu-mode.s
@@ -0,0 +1,9 @@
+// RUN: llvm-mc -filetype=obj -triple thumbv7-linux-gnu %s -o %t
+// RUN: llvm-objdump -triple thumbv7-linux-gnu -d %t | FileCheck %s
+
+//PR18303
+.code 16
+.global edata
+b edata // CHECK: b.w
+.code 32
+
diff --git a/test/MC/ARM/fp-const-errors.s b/test/MC/ARM/fp-const-errors.s
new file mode 100644
index 000000000000..2a68ddbe727e
--- /dev/null
+++ b/test/MC/ARM/fp-const-errors.s
@@ -0,0 +1,22 @@
+@ RUN: not llvm-mc -mcpu=cortex-a8 -triple armv7-none-linux-gnueabi < %s 2>&1 | FileCheck %s
+
+@ Test for floating point constants that are out of the 8-bit encoded value range
+vmov.f32 s2, #32.0
+@ CHECK: error: invalid operand for instruction
+
+vmov.f64 d2, #32.0
+@ CHECK: error: invalid operand for instruction
+
+@ Test that vmov.f instructions do not accept an 8-bit encoded float as an operand
+vmov.f32 s1, #0x70
+@ CHECK: error: invalid floating point immediate
+
+vmov.f64 d2, #0x70
+@ CHECK: error: invalid floating point immediate
+
+@ Test that fconst instructions do not accept a float constant as an operand
+fconsts s1, #1.0
+@ CHECK: error: invalid floating point immediate
+
+fconstd d2, #1.0
+@ CHECK: error: invalid floating point immediate
diff --git a/test/MC/ARM/gas-compl-copr-reg.s b/test/MC/ARM/gas-compl-copr-reg.s
new file mode 100644
index 000000000000..ab0b02395831
--- /dev/null
+++ b/test/MC/ARM/gas-compl-copr-reg.s
@@ -0,0 +1,14 @@
+@ RUN: llvm-mc -triple=armv7-linux-gnueabi -show-encoding < %s | FileCheck %s
+
+@ CHECK: ldc p12, c4, [r0, #4] @ encoding: [0x01,0x4c,0x90,0xed]
+@ CHECK: stc p14, c6, [r2, #-224] @ encoding: [0x38,0x6e,0x02,0xed]
+
+ ldc p12, cr4, [r0, #4]
+ stc p14, cr6, [r2, #-224]
+@ RUN: llvm-mc -triple=armv7-linux-gnueabi -show-encoding < %s | FileCheck %s
+
+@ CHECK: ldc p12, c4, [r0, #4] @ encoding: [0x01,0x4c,0x90,0xed]
+@ CHECK: stc p14, c6, [r2, #-224] @ encoding: [0x38,0x6e,0x02,0xed]
+
+ ldc p12, cr4, [r0, #4]
+ stc p14, cr6, [r2, #-224]
diff --git a/test/MC/ARM/inst-arm-suffixes.s b/test/MC/ARM/inst-arm-suffixes.s
new file mode 100644
index 000000000000..a80ef47a6074
--- /dev/null
+++ b/test/MC/ARM/inst-arm-suffixes.s
@@ -0,0 +1,15 @@
+@ RUN: not llvm-mc %s -triple armv7-linux-gnueabi -filetype asm -o - 2>&1 \
+@ RUN: | FileCheck -check-prefix CHECK-ERROR %s
+
+ .syntax unified
+ .arm
+
+ .align 2
+ .global suffixes_invalid_in_arm
+ .type suffixes_invalid_in_arm,%function
+suffixes_invalid_in_arm:
+ .inst.n 2
+@ CHECK-ERROR: width suffixes are invalid in ARM mode
+ .inst.w 4
+@ CHECK-ERROR: width suffixes are invalid in ARM mode
+
diff --git a/test/MC/ARM/inst-constant-required.s b/test/MC/ARM/inst-constant-required.s
new file mode 100644
index 000000000000..d4863dd2cd77
--- /dev/null
+++ b/test/MC/ARM/inst-constant-required.s
@@ -0,0 +1,15 @@
+@ RUN: not llvm-mc %s -triple=armv7-linux-gnueabi -filetype asm -o - 2>&1 \
+@ RUN: | FileCheck -check-prefix CHECK-ERROR %s
+
+ .syntax unified
+ .arm
+
+ .align 2
+ .global constant_expression_required
+ .type constant_expression_required,%function
+constant_expression_required:
+.Label:
+ movs r0, r0
+ .inst .Label
+@ CHECK-ERROR: expected constant expression
+
diff --git a/test/MC/ARM/inst-directive-emit.s b/test/MC/ARM/inst-directive-emit.s
new file mode 100644
index 000000000000..13b7edfa1f3a
--- /dev/null
+++ b/test/MC/ARM/inst-directive-emit.s
@@ -0,0 +1,20 @@
+@ RUN: llvm-mc %s -triple armv7-linux-gnueabi -filetype asm -o - | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .align 2
+ .global emit_asm
+ .type emit_asm,%function
+emit_asm:
+ .inst.w 0xf2400000, 0xf2c00000
+
+@ CHECK: .text
+@ CHECK: .code 16
+@ CHECK: .align 2
+@ CHECK: .globl emit_asm
+@ CHECK: .type emit_asm,%function
+@ CHECK: emit_asm:
+@ CHECK: inst.w 0xF2400000
+@ CHECK: inst.w 0xF2C00000
+
diff --git a/test/MC/ARM/inst-directive.s b/test/MC/ARM/inst-directive.s
new file mode 100644
index 000000000000..a3fd8c23f44d
--- /dev/null
+++ b/test/MC/ARM/inst-directive.s
@@ -0,0 +1,81 @@
+@ RUN: llvm-mc %s -triple=armv7-linux-gnueabi -filetype=obj -o - \
+@ RUN: | llvm-readobj -s -sd | FileCheck %s
+
+ .syntax unified
+
+@-------------------------------------------------------------------------------
+@ arm_inst
+@-------------------------------------------------------------------------------
+ .arm
+
+ .section .inst.arm_inst
+
+ .align 2
+ .global arm_inst
+ .type arm_inst,%function
+arm_inst:
+ .inst 0xdefe
+
+@ CHECK: Section {
+@ CHECK: Name: .inst.arm_inst
+@ CHECK: SectionData (
+@ CHECK-NEXT: 0000: FEDE0000
+@ CHECK-NEXT: )
+
+@-------------------------------------------------------------------------------
+@ thumb_inst_n
+@-------------------------------------------------------------------------------
+ .thumb
+
+ .section .inst.thumb_inst_n
+
+ .align 2
+ .global thumb_inst_n
+ .type thumb_inst_n,%function
+thumb_inst_n:
+ .inst.n 0xdefe
+
+@ CHECK: Section {
+@ CHECK: Name: .inst.thumb_inst_n
+@ CHECK: SectionData (
+@ CHECK-NEXT: 0000: FEDE
+@ CHECK-NEXT: )
+
+@-------------------------------------------------------------------------------
+@ thumb_inst_w
+@-------------------------------------------------------------------------------
+ .thumb
+
+ .section .inst.thumb_inst_w
+
+ .align 2
+ .global thumb_inst_w
+ .type thumb_inst_w,%function
+thumb_inst_w:
+ .inst.w 0x00000000
+
+@ CHECK: Section {
+@ CHECK: Name: .inst.thumb_inst_w
+@ CHECK: SectionData (
+@ CHECK-NEXT: 0000: 00000000
+@ CHECK-NEXT: )
+
+@-------------------------------------------------------------------------------
+@ thumb_inst_w
+@-------------------------------------------------------------------------------
+ .thumb
+
+ .section .inst.thumb_inst_inst
+
+ .align 2
+ .global thumb_inst_inst
+ .type thumb_inst_inst,%function
+thumb_inst_inst:
+ .inst.w 0xf2400000, 0xf2c00000
+
+@ CHECK: Section {
+@ CHECK: Name: .inst.thumb_inst_inst
+@ CHECK: SectionData (
+@ CHECK-NEXT: 0000: 40F20000 C0F20000
+@ CHECK-NEXT: )
+
diff --git a/test/MC/ARM/inst-overflow.s b/test/MC/ARM/inst-overflow.s
new file mode 100644
index 000000000000..133d53f74307
--- /dev/null
+++ b/test/MC/ARM/inst-overflow.s
@@ -0,0 +1,14 @@
+@ RUN: not llvm-mc %s -triple armv7-linux-gnueabi -filetype asm -o - 2>&1 \
+@ RUN: | FileCheck -check-prefix CHECK-ERROR %s
+
+ .syntax unified
+ .arm
+
+ .align 2
+ .global constant_overflow
+ .type constant_overflow,%function
+constant_overflow:
+ .inst 1 << 32
+@ CHECK-ERROR: inst operand is too big
+
+
diff --git a/test/MC/ARM/inst-thumb-overflow-2.s b/test/MC/ARM/inst-thumb-overflow-2.s
new file mode 100644
index 000000000000..1b3d642cf666
--- /dev/null
+++ b/test/MC/ARM/inst-thumb-overflow-2.s
@@ -0,0 +1,13 @@
+@ RUN: not llvm-mc %s -triple armv7-linux-gnueabi -filetype asm -o - 2>&1 \
+@ RUN: | FileCheck -check-prefix CHECK-ERRORS %s
+
+ .syntax unified
+ .thumb
+
+ .align 2
+ .global constant_overflow
+ .type constant_overflow,%function
+constant_overflow:
+ .inst.w 1 << 32
+@ CHECK-ERRORS: inst.w operand is too big
+
diff --git a/test/MC/ARM/inst-thumb-overflow.s b/test/MC/ARM/inst-thumb-overflow.s
new file mode 100644
index 000000000000..34626f97ff03
--- /dev/null
+++ b/test/MC/ARM/inst-thumb-overflow.s
@@ -0,0 +1,13 @@
+@ RUN: not llvm-mc %s -triple armv7-linux-gnueabi -filetype asm -o - 2>&1 \
+@ RUN: | FileCheck -check-prefix CHECK-ERROR %s
+
+ .syntax unified
+ .thumb
+
+ .align 2
+ .global constant_overflow
+ .type constant_overflow,%function
+constant_overflow:
+ .inst.n 1 << 31
+@ CHECK-ERROR: inst.n operand is too big, use inst.w instead
+
diff --git a/test/MC/ARM/inst-thumb-suffixes.s b/test/MC/ARM/inst-thumb-suffixes.s
new file mode 100644
index 000000000000..40def3c3b89e
--- /dev/null
+++ b/test/MC/ARM/inst-thumb-suffixes.s
@@ -0,0 +1,13 @@
+@ RUN: not llvm-mc %s -triple armv7-linux-gnueabi -filetype asm -o - 2>&1 \
+@ RUN: | FileCheck -check-prefix CHECK-ERROR %s
+
+ .syntax unified
+ .thumb
+
+ .align 2
+ .global suffixes_required_in_thumb
+ .type suffixes_required_in_thumb,%function
+suffixes_required_in_thumb:
+ .inst 0x0000
+@ CHECK-ERROR: cannot determine Thumb instruction size, use inst.n/inst.w instead
+
diff --git a/test/MC/ARM/invalid-vector-index.s b/test/MC/ARM/invalid-vector-index.s
new file mode 100644
index 000000000000..b58e1bdcc30d
--- /dev/null
+++ b/test/MC/ARM/invalid-vector-index.s
@@ -0,0 +1,5 @@
+@ RUN: not llvm-mc -triple=armv7-apple-darwin < %s 2>&1 | FileCheck %s
+
+ldrd r6, r7 [r2, #15]
+
+@ CHECK: error: immediate value expected for vector index
diff --git a/test/MC/ARM/ldr-pseudo-darwin.s b/test/MC/ARM/ldr-pseudo-darwin.s
new file mode 100644
index 000000000000..f04f533cfae7
--- /dev/null
+++ b/test/MC/ARM/ldr-pseudo-darwin.s
@@ -0,0 +1,247 @@
+@ This test has a partner (ldr-pseudo.s) that contains matching
+@ tests for the ldr-pseudo on linux targets. We need separate files
+@ because the syntax for switching sections and temporary labels differs
+@ between darwin and linux. Any tests added here should have a matching
+@ test added there.
+
+@RUN: llvm-mc -triple armv7-apple-darwin %s | FileCheck %s
+@RUN: llvm-mc -triple thumbv5-apple-darwin %s | FileCheck %s
+@RUN: llvm-mc -triple thumbv7-apple-darwin %s | FileCheck %s
+
+@
+@ Check that large constants are converted to ldr from constant pool
+@
+@ simple test
+.section __TEXT,b,regular,pure_instructions
+@ CHECK-LABEL: f3:
+f3:
+ ldr r0, =0x10001
+@ CHECK: ldr r0, Ltmp0
+
+@ loading multiple constants
+.section __TEXT,c,regular,pure_instructions
+@ CHECK-LABEL: f4:
+f4:
+ ldr r0, =0x10002
+@ CHECK: ldr r0, Ltmp1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x10003
+@ CHECK: ldr r0, Ltmp2
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ TODO: the same constants should have the same constant pool location
+.section __TEXT,d,regular,pure_instructions
+@ CHECK-LABEL: f5:
+f5:
+ ldr r0, =0x10004
+@ CHECK: ldr r0, Ltmp3
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x10004
+@ CHECK: ldr r0, Ltmp4
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ a section defined in multiple pieces should be merged and use a single constant pool
+.section __TEXT,e,regular,pure_instructions
+@ CHECK-LABEL: f6:
+f6:
+ ldr r0, =0x10006
+@ CHECK: ldr r0, Ltmp5
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+.section __TEXT,f,regular,pure_instructions
+@ CHECK-LABEL: f7:
+f7:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+.section __TEXT,e,regular,pure_instructions
+@ CHECK-LABEL: f8:
+f8:
+ adds r0, r0, #1
+ ldr r0, =0x10007
+@ CHECK: ldr r0, Ltmp6
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@
+@ Check that symbols can be loaded using ldr pseudo
+@
+
+@ load an undefined symbol
+.section __TEXT,g,regular,pure_instructions
+@ CHECK-LABEL: f9:
+f9:
+ ldr r0, =foo
+@ CHECK: ldr r0, Ltmp7
+
+@ load a symbol from another section
+.section __TEXT,h,regular,pure_instructions
+@ CHECK-LABEL: f10:
+f10:
+ ldr r0, =f5
+@ CHECK: ldr r0, Ltmp8
+
+@ load a symbol from the same section
+.section __TEXT,i,regular,pure_instructions
+@ CHECK-LABEL: f11:
+f11:
+ ldr r0, =f12
+@ CHECK: ldr r0, Ltmp9
+
+@ CHECK-LABEL: f12:
+f12:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+.section __TEXT,j,regular,pure_instructions
+@ mix of symbols and constants
+@ CHECK-LABEL: f13:
+f13:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x101
+@ CHECK: ldr r0, Ltmp10
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =bar
+@ CHECK: ldr r0, Ltmp11
+ adds r0, r0, #1
+ adds r0, r0, #1
+@
+@ Check for correct usage in other contexts
+@
+
+@ usage in macro
+.macro useit_in_a_macro
+ ldr r0, =0x10008
+ ldr r0, =baz
+.endm
+.section __TEXT,k,regular,pure_instructions
+@ CHECK-LABEL: f14:
+f14:
+ useit_in_a_macro
+@ CHECK: ldr r0, Ltmp12
+@ CHECK: ldr r0, Ltmp13
+
+@ usage with expressions
+.section __TEXT,l,regular,pure_instructions
+@ CHECK-LABEL: f15:
+f15:
+ ldr r0, =0x10001+8
+@ CHECK: ldr r0, Ltmp14
+ adds r0, r0, #1
+ ldr r0, =bar+4
+@ CHECK: ldr r0, Ltmp15
+ adds r0, r0, #1
+
+@
+@ Constant Pools
+@
+@ CHECK: .section __TEXT,b,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp0:
+@ CHECK: .long 65537
+@ CHECK: .end_data_region
+
+@ CHECK: .section __TEXT,c,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp1:
+@ CHECK: .long 65538
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp2:
+@ CHECK: .long 65539
+@ CHECK: .end_data_region
+
+@ CHECK: .section __TEXT,d,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp3:
+@ CHECK: .long 65540
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp4:
+@ CHECK: .long 65540
+@ CHECK: .end_data_region
+
+@ CHECK: .section __TEXT,e,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp5:
+@ CHECK: .long 65542
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp6:
+@ CHECK: .long 65543
+@ CHECK: .end_data_region
+
+@ Should not switch to section because it has no constant pool
+@ CHECK-NOT: .section __TEXT,f,regular,pure_instructions
+
+@ CHECK: .section __TEXT,g,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp7:
+@ CHECK: .long foo
+@ CHECK: .end_data_region
+
+@ CHECK: .section __TEXT,h,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp8:
+@ CHECK: .long f5
+@ CHECK: .end_data_region
+
+@ CHECK: .section __TEXT,i,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp9:
+@ CHECK: .long f12
+@ CHECK: .end_data_region
+
+@ CHECK: .section __TEXT,j,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp10:
+@ CHECK: .long 257
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp11:
+@ CHECK: .long bar
+@ CHECK: .end_data_region
+
+@ CHECK: .section __TEXT,k,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp12:
+@ CHECK: .long 65544
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp13:
+@ CHECK: .long baz
+@ CHECK: .end_data_region
+
+@ CHECK: .section __TEXT,l,regular,pure_instructions
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp14:
+@ CHECK: .long 65545
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp15:
+@ CHECK: .long bar+4
+@ CHECK: .end_data_region
diff --git a/test/MC/ARM/ldr-pseudo-obj-errors.s b/test/MC/ARM/ldr-pseudo-obj-errors.s
new file mode 100644
index 000000000000..dce88f058f83
--- /dev/null
+++ b/test/MC/ARM/ldr-pseudo-obj-errors.s
@@ -0,0 +1,17 @@
+@RUN: not llvm-mc -triple=armv7-unknown-linux-gnueabi -filetype=obj %s -o %t1 2> %t2
+@RUN: cat %t2 | FileCheck %s
+@RUN: not llvm-mc -triple=armv7-darwin-apple -filetype=obj %s -o %t1_darwin 2> %t2_darwin
+@RUN: cat %t2_darwin | FileCheck %s
+
+@These tests look for errors that should be reported for invalid object layout
+@with the ldr pseudo. They are tested separately from parse errors because they
+@only trigger when the file has successfully parsed and the object file is about
+@to be written out.
+
+.text
+foo:
+ ldr r0, =0x101
+ .space 8000
+@ CHECK: error: out of range pc-relative fixup value
+@ CHECK: ldr r0, =0x101
+@ CHECK: ^
diff --git a/test/MC/ARM/ldr-pseudo-parse-errors.s b/test/MC/ARM/ldr-pseudo-parse-errors.s
new file mode 100644
index 000000000000..2e6114d6fe08
--- /dev/null
+++ b/test/MC/ARM/ldr-pseudo-parse-errors.s
@@ -0,0 +1,10 @@
+@RUN: not llvm-mc -triple=armv7-unknown-linux-gnueabi < %s 2>&1 | FileCheck %s
+@RUN: not llvm-mc -triple=armv7-apple-darwin < %s 2>&1 | FileCheck %s
+
+.text
+bar:
+ mov r0, =0x101
+@ CHECK: error: unexpected token in operand
+@ CHECK: mov r0, =0x101
+@ CHECK: ^
+
diff --git a/test/MC/ARM/ldr-pseudo.s b/test/MC/ARM/ldr-pseudo.s
new file mode 100644
index 000000000000..df0d88a9a56e
--- /dev/null
+++ b/test/MC/ARM/ldr-pseudo.s
@@ -0,0 +1,221 @@
+@ This test has a partner (ldr-pseudo-darwin.s) that contains matching
+@ tests for the ldr-pseudo on darwin targets. We need separate files
+@ because the syntax for switching sections and temporary labels differs
+@ between darwin and linux. Any tests added here should have a matching
+@ test added there.
+
+@RUN: llvm-mc -triple armv7-unknown-linux-gnueabi %s | FileCheck %s
+@RUN: llvm-mc -triple thumbv5-unknown-linux-gnueabi %s | FileCheck %s
+@RUN: llvm-mc -triple thumbv7-unknown-linux-gnueabi %s | FileCheck %s
+
+@
+@ Check that large constants are converted to ldr from constant pool
+@
+@ simple test
+.section b,"ax",%progbits
+@ CHECK-LABEL: f3:
+f3:
+ ldr r0, =0x10001
+@ CHECK: ldr r0, .Ltmp[[TMP0:[0-9]+]]
+
+@ loading multiple constants
+.section c,"ax",%progbits
+@ CHECK-LABEL: f4:
+f4:
+ ldr r0, =0x10002
+@ CHECK: ldr r0, .Ltmp[[TMP1:[0-9]+]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x10003
+@ CHECK: ldr r0, .Ltmp[[TMP2:[0-9]+]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ TODO: the same constants should have the same constant pool location
+.section d,"ax",%progbits
+@ CHECK-LABEL: f5:
+f5:
+ ldr r0, =0x10004
+@ CHECK: ldr r0, .Ltmp[[TMP3:[0-9]+]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x10004
+@ CHECK: ldr r0, .Ltmp[[TMP4:[0-9]+]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ a section defined in multiple pieces should be merged and use a single constant pool
+.section e,"ax",%progbits
+@ CHECK-LABEL: f6:
+f6:
+ ldr r0, =0x10006
+@ CHECK: ldr r0, .Ltmp[[TMP5:[0-9]+]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+.section f, "ax", %progbits
+@ CHECK-LABEL: f7:
+f7:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+.section e, "ax", %progbits
+@ CHECK-LABEL: f8:
+f8:
+ adds r0, r0, #1
+ ldr r0, =0x10007
+@ CHECK: ldr r0, .Ltmp[[TMP6:[0-9]+]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@
+@ Check that symbols can be loaded using ldr pseudo
+@
+
+@ load an undefined symbol
+.section g,"ax",%progbits
+@ CHECK-LABEL: f9:
+f9:
+ ldr r0, =foo
+@ CHECK: ldr r0, .Ltmp[[TMP7:[0-9]+]]
+
+@ load a symbol from another section
+.section h,"ax",%progbits
+@ CHECK-LABEL: f10:
+f10:
+ ldr r0, =f5
+@ CHECK: ldr r0, .Ltmp[[TMP8:[0-9]+]]
+
+@ load a symbol from the same section
+.section i,"ax",%progbits
+@ CHECK-LABEL: f11:
+f11:
+ ldr r0, =f12
+@ CHECK: ldr r0, .Ltmp[[TMP9:[0-9]+]]
+
+@ CHECK-LABEL: f12:
+f12:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+.section j,"ax",%progbits
+@ mix of symbols and constants
+@ CHECK-LABEL: f13:
+f13:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x101
+@ CHECK: ldr r0, .Ltmp[[TMP10:[0-9]+]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =bar
+@ CHECK: ldr r0, .Ltmp[[TMP11:[0-9]+]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+@
+@ Check for correct usage in other contexts
+@
+
+@ usage in macro
+.macro useit_in_a_macro
+ ldr r0, =0x10008
+ ldr r0, =baz
+.endm
+.section k,"ax",%progbits
+@ CHECK-LABEL: f14:
+f14:
+ useit_in_a_macro
+@ CHECK: ldr r0, .Ltmp[[TMP12:[0-9]+]]
+@ CHECK: ldr r0, .Ltmp[[TMP13:[0-9]+]]
+
+@ usage with expressions
+.section l, "ax", %progbits
+@ CHECK-LABEL: f15:
+f15:
+ ldr r0, =0x10001+8
+@ CHECK: ldr r0, .Ltmp[[TMP14:[0-9]+]]
+ adds r0, r0, #1
+ ldr r0, =bar+4
+@ CHECK: ldr r0, .Ltmp[[TMP15:[0-9]+]]
+ adds r0, r0, #1
+
+@
+@ Constant Pools
+@
+@ CHECK: .section b,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP0]]
+@ CHECK: .long 65537
+
+@ CHECK: .section c,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP1]]
+@ CHECK: .long 65538
+@ CHECK: .Ltmp[[TMP2]]
+@ CHECK: .long 65539
+
+@ CHECK: .section d,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP3]]
+@ CHECK: .long 65540
+@ CHECK: .Ltmp[[TMP4]]
+@ CHECK: .long 65540
+
+@ CHECK: .section e,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP5]]
+@ CHECK: .long 65542
+@ CHECK: .Ltmp[[TMP6]]
+@ CHECK: .long 65543
+
+@ Should not switch to section because it has no constant pool
+@ CHECK-NOT: .section f,"ax",%progbits
+
+@ CHECK: .section g,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP7]]
+@ CHECK: .long foo
+
+@ CHECK: .section h,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP8]]
+@ CHECK: .long f5
+
+@ CHECK: .section i,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP9]]
+@ CHECK: .long f12
+
+@ CHECK: .section j,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP10]]
+@ CHECK: .long 257
+@ CHECK: .Ltmp[[TMP11]]
+@ CHECK: .long bar
+
+@ CHECK: .section k,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP12]]
+@ CHECK: .long 65544
+@ CHECK: .Ltmp[[TMP13]]
+@ CHECK: .long baz
+
+@ CHECK: .section l,"ax",%progbits
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP14]]
+@ CHECK: .long 65545
+@ CHECK: .Ltmp[[TMP15]]
+@ CHECK: .long bar+4
diff --git a/test/MC/ARM/ldrd-strd-gnu-arm-bad-imm.s b/test/MC/ARM/ldrd-strd-gnu-arm-bad-imm.s
new file mode 100644
index 000000000000..fbe459c4168e
--- /dev/null
+++ b/test/MC/ARM/ldrd-strd-gnu-arm-bad-imm.s
@@ -0,0 +1,9 @@
+@ RUN: not llvm-mc -triple=armv7-linux-gnueabi %s 2>&1 | FileCheck %s
+.text
+@ CHECK: error: instruction requires: thumb2
+@ CHECK: ldrd r0, [r0, #512]
+ ldrd r0, [r0, #512]
+
+@ CHECK: error: instruction requires: thumb2
+@ CHECK: strd r0, [r0, #512]
+ strd r0, [r0, #512]
diff --git a/test/MC/ARM/ldrd-strd-gnu-arm.s b/test/MC/ARM/ldrd-strd-gnu-arm.s
new file mode 100644
index 000000000000..57d21c7a1464
--- /dev/null
+++ b/test/MC/ARM/ldrd-strd-gnu-arm.s
@@ -0,0 +1,20 @@
+@ PR18921
+@ RUN: llvm-mc -triple=armv7-linux-gnueabi -show-encoding < %s | FileCheck %s
+.text
+
+@ CHECK-NOT: .code 16
+
+
+@ CHECK: ldrd r0, r1, [r10, #32]! @ encoding: [0xd0,0x02,0xea,0xe1]
+@ CHECK: ldrd r0, r1, [r10], #32 @ encoding: [0xd0,0x02,0xca,0xe0]
+@ CHECK: ldrd r0, r1, [r10, #32] @ encoding: [0xd0,0x02,0xca,0xe1]
+ ldrd r0, [r10, #32]!
+ ldrd r0, [r10], #32
+ ldrd r0, [r10, #32]
+
+@ CHECK: strd r0, r1, [r10, #32]! @ encoding: [0xf0,0x02,0xea,0xe1]
+@ CHECK: strd r0, r1, [r10], #32 @ encoding: [0xf0,0x02,0xca,0xe0]
+@ CHECK: strd r0, r1, [r10, #32] @ encoding: [0xf0,0x02,0xca,0xe1]
+ strd r0, [r10, #32]!
+ strd r0, [r10], #32
+ strd r0, [r10, #32]
diff --git a/test/MC/ARM/ldrd-strd-gnu-sp.s b/test/MC/ARM/ldrd-strd-gnu-sp.s
new file mode 100644
index 000000000000..21efae985255
--- /dev/null
+++ b/test/MC/ARM/ldrd-strd-gnu-sp.s
@@ -0,0 +1,9 @@
+// PR19320
+// RUN: llvm-mc -triple=armv7-linux-gnueabi -show-encoding < %s | FileCheck %s
+.text
+
+// CHECK: ldrd r12, sp, [r0, #32] @ encoding: [0xd0,0xc2,0xc0,0xe1]
+ ldrd r12, [r0, #32]
+
+// CHECK: strd r12, sp, [r0, #32] @ encoding: [0xf0,0xc2,0xc0,0xe1]
+ strd r12, [r0, #32]
diff --git a/test/MC/ARM/ldrd-strd-gnu-thumb-bad-regs.s b/test/MC/ARM/ldrd-strd-gnu-thumb-bad-regs.s
new file mode 100644
index 000000000000..9d81a27f0ca5
--- /dev/null
+++ b/test/MC/ARM/ldrd-strd-gnu-thumb-bad-regs.s
@@ -0,0 +1,10 @@
+@ RUN: not llvm-mc -triple=armv7-linux-gnueabi %s 2>&1 | FileCheck %s
+.text
+.thumb
+@ CHECK: error: invalid operand for instruction
+@ CHECK: ldrd r12, [r0, #512]
+ ldrd r12, [r0, #512]
+
+@ CHECK: error: invalid operand for instruction
+@ CHECK: strd r12, [r0, #512]
+ strd r12, [r0, #512]
diff --git a/test/MC/ARM/ldrd-strd-gnu-thumb.s b/test/MC/ARM/ldrd-strd-gnu-thumb.s
new file mode 100644
index 000000000000..67d2aa7f548d
--- /dev/null
+++ b/test/MC/ARM/ldrd-strd-gnu-thumb.s
@@ -0,0 +1,20 @@
+@ PR18921
+@ RUN: llvm-mc -triple=armv7-linux-gnueabi -show-encoding < %s | FileCheck %s
+.text
+.thumb
+
+@ CHECK: .code 16
+
+@ CHECK: ldrd r0, r1, [r10, #512]! @ encoding: [0xfa,0xe9,0x80,0x01]
+@ CHECK: ldrd r0, r1, [r10], #512 @ encoding: [0xfa,0xe8,0x80,0x01]
+@ CHECK: ldrd r0, r1, [r10, #512] @ encoding: [0xda,0xe9,0x80,0x01]
+ ldrd r0, [r10, #512]!
+ ldrd r0, [r10], #512
+ ldrd r0, [r10, #512]
+
+@ CHECK: strd r0, r1, [r10, #512]! @ encoding: [0xea,0xe9,0x80,0x01]
+@ CHECK: strd r0, r1, [r10], #512 @ encoding: [0xea,0xe8,0x80,0x01]
+@ CHECK: strd r0, r1, [r10, #512] @ encoding: [0xca,0xe9,0x80,0x01]
+ strd r0, [r10, #512]!
+ strd r0, [r10], #512
+ strd r0, [r10, #512]
diff --git a/test/MC/ARM/lit.local.cfg b/test/MC/ARM/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/MC/ARM/lit.local.cfg
+++ b/test/MC/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/ARM/ltorg-darwin.s b/test/MC/ARM/ltorg-darwin.s
new file mode 100644
index 000000000000..3402f4053cb4
--- /dev/null
+++ b/test/MC/ARM/ltorg-darwin.s
@@ -0,0 +1,151 @@
+@ This test has a partner (ltorg.s) that contains matching
+@ tests for the .ltorg on linux targets. We need separate files
+@ because the syntax for switching sections and temporary labels differs
+@ between darwin and linux. Any tests added here should have a matching
+@ test added there.
+
+@RUN: llvm-mc -triple armv7-apple-darwin %s | FileCheck %s
+@RUN: llvm-mc -triple thumbv5-apple-darwin %s | FileCheck %s
+@RUN: llvm-mc -triple thumbv7-apple-darwin %s | FileCheck %s
+
+@ check that ltorg dumps the constant pool at the current location
+.section __TEXT,a,regular,pure_instructions
+@ CHECK-LABEL: f2:
+f2:
+ ldr r0, =0x10001
+@ CHECK: ldr r0, Ltmp0
+ adds r0, r0, #1
+ adds r0, r0, #1
+ b f3
+.ltorg
+@ constant pool
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp0:
+@ CHECK: .long 65537
+@ CHECK: .end_data_region
+
+@ CHECK-LABEL: f3:
+f3:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ check that ltorg clears the constant pool after dumping it
+.section __TEXT,b,regular,pure_instructions
+@ CHECK-LABEL: f4:
+f4:
+ ldr r0, =0x10002
+@ CHECK: ldr r0, Ltmp1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ b f5
+.ltorg
+@ constant pool
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp1:
+@ CHECK: .long 65538
+@ CHECK: .end_data_region
+
+@ CHECK-LABEL: f5:
+f5:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x10003
+@ CHECK: ldr r0, Ltmp2
+ adds r0, r0, #1
+ b f6
+.ltorg
+@ constant pool
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp2:
+@ CHECK: .long 65539
+@ CHECK: .end_data_region
+
+@ CHECK-LABEL: f6:
+f6:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ check that ltorg does not issue an error if there is no constant pool
+.section __TEXT,c,regular,pure_instructions
+@ CHECK-LABEL: f7:
+f7:
+ adds r0, r0, #1
+ b f8
+ .ltorg
+f8:
+ adds r0, r0, #1
+
+@ check that ltorg works for labels
+.section __TEXT,d,regular,pure_instructions
+@ CHECK-LABEL: f9:
+f9:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =bar
+@ CHECK: ldr r0, Ltmp3
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ b f10
+.ltorg
+@ constant pool
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp3:
+@ CHECK: .long bar
+@ CHECK: .end_data_region
+
+@ CHECK-LABEL: f10:
+f10:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ check that use of ltorg does not prevent dumping non-empty constant pools at end of section
+.section __TEXT,e,regular,pure_instructions
+@ CHECK-LABEL: f11:
+f11:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x10004
+@ CHECK: ldr r0, Ltmp4
+ b f12
+ .ltorg
+@ constant pool
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp4:
+@ CHECK: .long 65540
+@ CHECK: .end_data_region
+
+@ CHECK-LABEL: f12:
+f12:
+ adds r0, r0, #1
+ ldr r0, =0x10005
+@ CHECK: ldr r0, Ltmp5
+
+.section __TEXT,f,regular,pure_instructions
+@ CHECK-LABEL: f13
+f13:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ should not have a constant pool at end of section with empty constant pools
+@ CHECK-NOT: .section __TEXT,a,regular,pure_instructions
+@ CHECK-NOT: .section __TEXT,b,regular,pure_instructions
+@ CHECK-NOT: .section __TEXT,c,regular,pure_instructions
+@ CHECK-NOT: .section __TEXT,d,regular,pure_instructions
+
+@ should have a non-empty constant pool at end of this section
+@ CHECK: .section __TEXT,e,regular,pure_instructions
+@ constant pool
+@ CHECK: .data_region
+@ CHECK: .align 2
+@ CHECK-LABEL: Ltmp5:
+@ CHECK: .long 65541
+@ CHECK: .end_data_region
+
+@ should not have a constant pool at end of section with empty constant pools
+@ CHECK-NOT: .section __TEXT,f,regular,pure_instructions
diff --git a/test/MC/ARM/ltorg.s b/test/MC/ARM/ltorg.s
new file mode 100644
index 000000000000..e28862cb2a47
--- /dev/null
+++ b/test/MC/ARM/ltorg.s
@@ -0,0 +1,138 @@
+@ This test has a partner (ltorg-darwin.s) that contains matching
+@ tests for the .ltorg on darwin targets. We need separate files
+@ because the syntax for switching sections and temporary labels differs
+@ between darwin and linux. Any tests added here should have a matching
+@ test added there.
+
+@RUN: llvm-mc -triple armv7-unknown-linux-gnueabi %s | FileCheck %s
+@RUN: llvm-mc -triple thumbv5-unknown-linux-gnueabi %s | FileCheck %s
+@RUN: llvm-mc -triple thumbv7-unknown-linux-gnueabi %s | FileCheck %s
+
+@ check that ltorg dumps the constant pool at the current location
+.section a,"ax",%progbits
+@ CHECK-LABEL: f2:
+f2:
+ ldr r0, =0x10001
+@ CHECK: ldr r0, .Ltmp[[TMP0:[0-9+]]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+ b f3
+.ltorg
+@ constant pool
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP0]]
+@ CHECK: .long 65537
+
+@ CHECK-LABEL: f3:
+f3:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ check that ltorg clears the constant pool after dumping it
+.section b,"ax",%progbits
+@ CHECK-LABEL: f4:
+f4:
+ ldr r0, =0x10002
+@ CHECK: ldr r0, .Ltmp[[TMP1:[0-9+]]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+ b f5
+.ltorg
+@ constant pool
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP1]]
+@ CHECK: .long 65538
+
+@ CHECK-LABEL: f5:
+f5:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x10003
+@ CHECK: ldr r0, .Ltmp[[TMP2:[0-9+]]]
+ adds r0, r0, #1
+ b f6
+.ltorg
+@ constant pool
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP2]]
+@ CHECK: .long 65539
+
+@ CHECK-LABEL: f6:
+f6:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ check that ltorg does not issue an error if there is no constant pool
+.section c,"ax",%progbits
+@ CHECK-LABEL: f7:
+f7:
+ adds r0, r0, #1
+ b f8
+ .ltorg
+f8:
+ adds r0, r0, #1
+
+@ check that ltorg works for labels
+.section d,"ax",%progbits
+@ CHECK-LABEL: f9:
+f9:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =bar
+@ CHECK: ldr r0, .Ltmp[[TMP3:[0-9+]]]
+ adds r0, r0, #1
+ adds r0, r0, #1
+ adds r0, r0, #1
+ b f10
+.ltorg
+@ constant pool
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP3]]
+@ CHECK: .long bar
+
+@ CHECK-LABEL: f10:
+f10:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ check that use of ltorg does not prevent dumping non-empty constant pools at end of section
+.section e,"ax",%progbits
+@ CHECK-LABEL: f11:
+f11:
+ adds r0, r0, #1
+ adds r0, r0, #1
+ ldr r0, =0x10004
+@ CHECK: ldr r0, .Ltmp[[TMP4:[0-9+]]]
+ b f12
+ .ltorg
+@ constant pool
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP4]]
+@ CHECK: .long 65540
+@ CHECK-LABEL: f12:
+f12:
+ adds r0, r0, #1
+ ldr r0, =0x10005
+@ CHECK: ldr r0, .Ltmp[[TMP5:[0-9+]]]
+
+.section f,"ax",%progbits
+@ CHECK-LABEL: f13
+f13:
+ adds r0, r0, #1
+ adds r0, r0, #1
+
+@ should not have a constant pool at end of section with empty constant pools
+@ CHECK-NOT: .section a,"ax",%progbits
+@ CHECK-NOT: .section b,"ax",%progbits
+@ CHECK-NOT: .section c,"ax",%progbits
+@ CHECK-NOT: .section d,"ax",%progbits
+
+@ should have a non-empty constant pool at end of this section
+@ CHECK: .section e,"ax",%progbits
+@ constant pool
+@ CHECK: .align 2
+@ CHECK: .Ltmp[[TMP5]]
+@ CHECK: .long 65541
+
+@ should not have a constant pool at end of section with empty constant pools
+@ CHECK-NOT: .section f,"ax",%progbits
diff --git a/test/MC/ARM/macho-relocs-with-addend.s b/test/MC/ARM/macho-relocs-with-addend.s
new file mode 100644
index 000000000000..fee930eee1b3
--- /dev/null
+++ b/test/MC/ARM/macho-relocs-with-addend.s
@@ -0,0 +1,34 @@
+@ RUN: llvm-mc -triple thumbv7-apple-ios7.0 -filetype=obj -o - %s | \
+@ RUN: llvm-readobj -r - | FileCheck %s
+
+ @ MachO relocations that end up expressed as internal
+ @ (scattered) still need to have the type set correctly.
+
+ .text
+ .thumb_func
+ .thumb
+ .globl _with_thumb
+_with_thumb:
+ bl _dest+10
+ blx _dest+20
+
+ .globl _with_arm
+ .arm
+_with_arm:
+ bl _dest+10
+ blx _dest+20
+ bne _dest+30
+ b _dest+40
+
+ .data
+_dest:
+ .word 42
+
+@ CHECK: Relocations [
+@ CHECK-NEXT: Section __text {
+@ CHECK-NEXT: 0x14 1 2 n/a ARM_RELOC_BR24 1 0x18
+@ CHECK-NEXT: 0x10 1 2 n/a ARM_RELOC_BR24 1 0x18
+@ CHECK-NEXT: 0xC 1 2 n/a ARM_RELOC_BR24 1 0x18
+@ CHECK-NEXT: 0x8 1 2 n/a ARM_RELOC_BR24 1 0x18
+@ CHECK-NEXT: 0x4 1 2 n/a ARM_THUMB_RELOC_BR22 1 0x18
+@ CHECK-NEXT: 0x0 1 2 n/a ARM_THUMB_RELOC_BR22 1 0x18
diff --git a/test/MC/ARM/mul-v4.s b/test/MC/ARM/mul-v4.s
new file mode 100644
index 000000000000..e21468084ffb
--- /dev/null
+++ b/test/MC/ARM/mul-v4.s
@@ -0,0 +1,39 @@
+@ PR17647: MUL/MLA/SMLAL/UMLAL should be avalaibe to IAS for ARMv4 and higher
+
+@ RUN: llvm-mc < %s -triple armv4-unknown-unknown -show-encoding | FileCheck %s --check-prefix=ARMV4
+
+@ ARMV4: mul r0, r1, r2 @ encoding: [0x91,0x02,0x00,0xe0]
+@ ARMV4: muls r0, r1, r2 @ encoding: [0x91,0x02,0x10,0xe0]
+@ ARMV4: mulne r0, r1, r2 @ encoding: [0x91,0x02,0x00,0x10]
+@ ARMV4: mulseq r0, r1, r2 @ encoding: [0x91,0x02,0x10,0x00]
+mul r0, r1, r2
+muls r0, r1, r2
+mulne r0, r1, r2
+mulseq r0, r1, r2
+
+@ ARMV4: mla r0, r1, r2, r3 @ encoding: [0x91,0x32,0x20,0xe0]
+@ ARMV4: mlas r0, r1, r2, r3 @ encoding: [0x91,0x32,0x30,0xe0]
+@ ARMV4: mlane r0, r1, r2, r3 @ encoding: [0x91,0x32,0x20,0x10]
+@ ARMV4: mlaseq r0, r1, r2, r3 @ encoding: [0x91,0x32,0x30,0x00]
+mla r0, r1, r2, r3
+mlas r0, r1, r2, r3
+mlane r0, r1, r2, r3
+mlaseq r0, r1, r2, r3
+
+@ ARMV4: smlal r2, r3, r0, r1 @ encoding: [0x90,0x21,0xe3,0xe0]
+@ ARMV4: smlals r2, r3, r0, r1 @ encoding: [0x90,0x21,0xf3,0xe0]
+@ ARMV4: smlalne r2, r3, r0, r1 @ encoding: [0x90,0x21,0xe3,0x10]
+@ ARMV4: smlalseq r2, r3, r0, r1 @ encoding: [0x90,0x21,0xf3,0x00]
+smlal r2,r3,r0,r1
+smlals r2,r3,r0,r1
+smlalne r2,r3,r0,r1
+smlalseq r2,r3,r0,r1
+
+@ ARMV4: umlal r2, r3, r0, r1 @ encoding: [0x90,0x21,0xa3,0xe0]
+@ ARMV4: umlals r2, r3, r0, r1 @ encoding: [0x90,0x21,0xb3,0xe0]
+@ ARMV4: umlalne r2, r3, r0, r1 @ encoding: [0x90,0x21,0xa3,0x10]
+@ ARMV4: umlalseq r2, r3, r0, r1 @ encoding: [0x90,0x21,0xb3,0x00]
+umlal r2,r3,r0,r1
+umlals r2,r3,r0,r1
+umlalne r2,r3,r0,r1
+umlalseq r2,r3,r0,r1
diff --git a/test/MC/ARM/neon-vld-encoding.s b/test/MC/ARM/neon-vld-encoding.s
index 648e91705782..b96784e4689c 100644
--- a/test/MC/ARM/neon-vld-encoding.s
+++ b/test/MC/ARM/neon-vld-encoding.s
@@ -367,7 +367,7 @@
@ CHECK: vld3.16 {d16[], d17[], d18[]}, [r2]! @ encoding: [0x4d,0x0e,0xe2,0xf4]
@ CHECK: vld3.32 {d16[], d17[], d18[]}, [r3]! @ encoding: [0x8d,0x0e,0xe3,0xf4]
@ CHECK: vld3.8 {d17[], d18[], d19[]}, [r7]! @ encoding: [0x2d,0x1e,0xe7,0xf4]
-@ CHECK: vld3.16 {d17[], d18[], d19[]}, [r7]! @ encoding: [0x6d,0x1e,0xe7,0xf4]
+@ CHECK: vld3.16 {d17[], d19[], d21[]}, [r7]! @ encoding: [0x6d,0x1e,0xe7,0xf4]
@ CHECK: vld3.32 {d16[], d18[], d20[]}, [r8]! @ encoding: [0xad,0x0e,0xe8,0xf4]
@ CHECK: vld3.8 {d16[], d17[], d18[]}, [r1], r8 @ encoding: [0x08,0x0e,0xe1,0xf4]
@ CHECK: vld3.16 {d16[], d17[], d18[]}, [r2], r7 @ encoding: [0x47,0x0e,0xe2,0xf4]
@@ -403,7 +403,7 @@
@ CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [r1:32]! @ encoding: [0x3d,0x03,0xe1,0xf4]
@ CHECK: vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [r2:64]! @ encoding: [0x5d,0x07,0xe2,0xf4]
@ CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [r3:128]! @ encoding: [0xad,0x0b,0xe3,0xf4]
-@ CHECK: vld4.16 {d17[1], d18[1], d19[1], d20[1]}, [r7]! @ encoding: [0x6d,0x17,0xe7,0xf4]
+@ CHECK: vld4.16 {d17[1], d19[1], d21[1], d23[1]}, [r7]! @ encoding: [0x6d,0x17,0xe7,0xf4]
@ CHECK: vld4.32 {d16[1], d18[1], d20[1], d22[1]}, [r8]! @ encoding: [0xcd,0x0b,0xe8,0xf4]
@ CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [r1:32], r8 @ encoding: [0x38,0x03,0xe1,0xf4]
@ CHECK: vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [r2], r7 @ encoding: [0x47,0x07,0xe2,0xf4]
diff --git a/test/MC/ARM/neon-vld-vst-align.s b/test/MC/ARM/neon-vld-vst-align.s
new file mode 100644
index 000000000000..c3628ced909e
--- /dev/null
+++ b/test/MC/ARM/neon-vld-vst-align.s
@@ -0,0 +1,8354 @@
+@ RUN: not llvm-mc -triple=thumbv7-apple-darwin -show-encoding < %s > %t 2> %t.err
+@ RUN: FileCheck < %t %s
+@ RUN: FileCheck --check-prefix=CHECK-ERRORS < %t.err %s
+
+ vld1.8 {d0}, [r4]
+ vld1.8 {d0}, [r4:16]
+ vld1.8 {d0}, [r4:32]
+ vld1.8 {d0}, [r4:64]
+ vld1.8 {d0}, [r4:128]
+ vld1.8 {d0}, [r4:256]
+
+@ CHECK: vld1.8 {d0}, [r4] @ encoding: [0x24,0xf9,0x0f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0}, [r4]!
+ vld1.8 {d0}, [r4:16]!
+ vld1.8 {d0}, [r4:32]!
+ vld1.8 {d0}, [r4:64]!
+ vld1.8 {d0}, [r4:128]!
+ vld1.8 {d0}, [r4:256]!
+
+@ CHECK: vld1.8 {d0}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0}, [r4], r6
+ vld1.8 {d0}, [r4:16], r6
+ vld1.8 {d0}, [r4:32], r6
+ vld1.8 {d0}, [r4:64], r6
+ vld1.8 {d0}, [r4:128], r6
+ vld1.8 {d0}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1}, [r4]
+ vld1.8 {d0, d1}, [r4:16]
+ vld1.8 {d0, d1}, [r4:32]
+ vld1.8 {d0, d1}, [r4:64]
+ vld1.8 {d0, d1}, [r4:128]
+ vld1.8 {d0, d1}, [r4:256]
+
+@ CHECK: vld1.8 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x0f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x0a]
+@ CHECK: vld1.8 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1}, [r4]!
+ vld1.8 {d0, d1}, [r4:16]!
+ vld1.8 {d0, d1}, [r4:32]!
+ vld1.8 {d0, d1}, [r4:64]!
+ vld1.8 {d0, d1}, [r4:128]!
+ vld1.8 {d0, d1}, [r4:256]!
+
+@ CHECK: vld1.8 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x0a]
+@ CHECK: vld1.8 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1}, [r4], r6
+ vld1.8 {d0, d1}, [r4:16], r6
+ vld1.8 {d0, d1}, [r4:32], r6
+ vld1.8 {d0, d1}, [r4:64], r6
+ vld1.8 {d0, d1}, [r4:128], r6
+ vld1.8 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x0a]
+@ CHECK: vld1.8 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1, d2}, [r4]
+ vld1.8 {d0, d1, d2}, [r4:16]
+ vld1.8 {d0, d1, d2}, [r4:32]
+ vld1.8 {d0, d1, d2}, [r4:64]
+ vld1.8 {d0, d1, d2}, [r4:128]
+ vld1.8 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld1.8 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x0f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1, d2}, [r4]!
+ vld1.8 {d0, d1, d2}, [r4:16]!
+ vld1.8 {d0, d1, d2}, [r4:32]!
+ vld1.8 {d0, d1, d2}, [r4:64]!
+ vld1.8 {d0, d1, d2}, [r4:128]!
+ vld1.8 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld1.8 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1, d2}, [r4], r6
+ vld1.8 {d0, d1, d2}, [r4:16], r6
+ vld1.8 {d0, d1, d2}, [r4:32], r6
+ vld1.8 {d0, d1, d2}, [r4:64], r6
+ vld1.8 {d0, d1, d2}, [r4:128], r6
+ vld1.8 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1, d2, d3}, [r4]
+ vld1.8 {d0, d1, d2, d3}, [r4:16]
+ vld1.8 {d0, d1, d2, d3}, [r4:32]
+ vld1.8 {d0, d1, d2, d3}, [r4:64]
+ vld1.8 {d0, d1, d2, d3}, [r4:128]
+ vld1.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x0f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x3f,0x02]
+
+ vld1.8 {d0, d1, d2, d3}, [r4]!
+ vld1.8 {d0, d1, d2, d3}, [r4:16]!
+ vld1.8 {d0, d1, d2, d3}, [r4:32]!
+ vld1.8 {d0, d1, d2, d3}, [r4:64]!
+ vld1.8 {d0, d1, d2, d3}, [r4:128]!
+ vld1.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x3d,0x02]
+
+ vld1.8 {d0, d1, d2, d3}, [r4], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:16], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:32], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:64], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:128], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x36,0x02]
+
+ vld1.8 {d0[2]}, [r4]
+ vld1.8 {d0[2]}, [r4:16]
+ vld1.8 {d0[2]}, [r4:32]
+ vld1.8 {d0[2]}, [r4:64]
+ vld1.8 {d0[2]}, [r4:128]
+ vld1.8 {d0[2]}, [r4:256]
+
+@ CHECK: vld1.8 {d0[2]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[2]}, [r4]!
+ vld1.8 {d0[2]}, [r4:16]!
+ vld1.8 {d0[2]}, [r4:32]!
+ vld1.8 {d0[2]}, [r4:64]!
+ vld1.8 {d0[2]}, [r4:128]!
+ vld1.8 {d0[2]}, [r4:256]!
+
+@ CHECK: vld1.8 {d0[2]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[2]}, [r4], r6
+ vld1.8 {d0[2]}, [r4:16], r6
+ vld1.8 {d0[2]}, [r4:32], r6
+ vld1.8 {d0[2]}, [r4:64], r6
+ vld1.8 {d0[2]}, [r4:128], r6
+ vld1.8 {d0[2]}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[]}, [r4]
+ vld1.8 {d0[]}, [r4:16]
+ vld1.8 {d0[]}, [r4:32]
+ vld1.8 {d0[]}, [r4:64]
+ vld1.8 {d0[]}, [r4:128]
+ vld1.8 {d0[]}, [r4:256]
+
+@ CHECK: vld1.8 {d0[]}, [r4] @ encoding: [0xa4,0xf9,0x0f,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[]}, [r4]!
+ vld1.8 {d0[]}, [r4:16]!
+ vld1.8 {d0[]}, [r4:32]!
+ vld1.8 {d0[]}, [r4:64]!
+ vld1.8 {d0[]}, [r4:128]!
+ vld1.8 {d0[]}, [r4:256]!
+
+@ CHECK: vld1.8 {d0[]}, [r4]! @ encoding: [0xa4,0xf9,0x0d,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[]}, [r4], r6
+ vld1.8 {d0[]}, [r4:16], r6
+ vld1.8 {d0[]}, [r4:32], r6
+ vld1.8 {d0[]}, [r4:64], r6
+ vld1.8 {d0[]}, [r4:128], r6
+ vld1.8 {d0[]}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x06,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[], d1[]}, [r4]
+ vld1.8 {d0[], d1[]}, [r4:16]
+ vld1.8 {d0[], d1[]}, [r4:32]
+ vld1.8 {d0[], d1[]}, [r4:64]
+ vld1.8 {d0[], d1[]}, [r4:128]
+ vld1.8 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld1.8 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[], d1[]}, [r4]!
+ vld1.8 {d0[], d1[]}, [r4:16]!
+ vld1.8 {d0[], d1[]}, [r4:32]!
+ vld1.8 {d0[], d1[]}, [r4:64]!
+ vld1.8 {d0[], d1[]}, [r4:128]!
+ vld1.8 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld1.8 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[], d1[]}, [r4], r6
+ vld1.8 {d0[], d1[]}, [r4:16], r6
+ vld1.8 {d0[], d1[]}, [r4:32], r6
+ vld1.8 {d0[], d1[]}, [r4:64], r6
+ vld1.8 {d0[], d1[]}, [r4:128], r6
+ vld1.8 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0}, [r4]
+ vld1.16 {d0}, [r4:16]
+ vld1.16 {d0}, [r4:32]
+ vld1.16 {d0}, [r4:64]
+ vld1.16 {d0}, [r4:128]
+ vld1.16 {d0}, [r4:256]
+
+@ CHECK: vld1.16 {d0}, [r4] @ encoding: [0x24,0xf9,0x4f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0}, [r4]!
+ vld1.16 {d0}, [r4:16]!
+ vld1.16 {d0}, [r4:32]!
+ vld1.16 {d0}, [r4:64]!
+ vld1.16 {d0}, [r4:128]!
+ vld1.16 {d0}, [r4:256]!
+
+@ CHECK: vld1.16 {d0}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0}, [r4], r6
+ vld1.16 {d0}, [r4:16], r6
+ vld1.16 {d0}, [r4:32], r6
+ vld1.16 {d0}, [r4:64], r6
+ vld1.16 {d0}, [r4:128], r6
+ vld1.16 {d0}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1}, [r4]
+ vld1.16 {d0, d1}, [r4:16]
+ vld1.16 {d0, d1}, [r4:32]
+ vld1.16 {d0, d1}, [r4:64]
+ vld1.16 {d0, d1}, [r4:128]
+ vld1.16 {d0, d1}, [r4:256]
+
+@ CHECK: vld1.16 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x4f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x0a]
+@ CHECK: vld1.16 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1}, [r4]!
+ vld1.16 {d0, d1}, [r4:16]!
+ vld1.16 {d0, d1}, [r4:32]!
+ vld1.16 {d0, d1}, [r4:64]!
+ vld1.16 {d0, d1}, [r4:128]!
+ vld1.16 {d0, d1}, [r4:256]!
+
+@ CHECK: vld1.16 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x0a]
+@ CHECK: vld1.16 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1}, [r4], r6
+ vld1.16 {d0, d1}, [r4:16], r6
+ vld1.16 {d0, d1}, [r4:32], r6
+ vld1.16 {d0, d1}, [r4:64], r6
+ vld1.16 {d0, d1}, [r4:128], r6
+ vld1.16 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x0a]
+@ CHECK: vld1.16 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1, d2}, [r4]
+ vld1.16 {d0, d1, d2}, [r4:16]
+ vld1.16 {d0, d1, d2}, [r4:32]
+ vld1.16 {d0, d1, d2}, [r4:64]
+ vld1.16 {d0, d1, d2}, [r4:128]
+ vld1.16 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld1.16 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x4f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1, d2}, [r4]!
+ vld1.16 {d0, d1, d2}, [r4:16]!
+ vld1.16 {d0, d1, d2}, [r4:32]!
+ vld1.16 {d0, d1, d2}, [r4:64]!
+ vld1.16 {d0, d1, d2}, [r4:128]!
+ vld1.16 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld1.16 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1, d2}, [r4], r6
+ vld1.16 {d0, d1, d2}, [r4:16], r6
+ vld1.16 {d0, d1, d2}, [r4:32], r6
+ vld1.16 {d0, d1, d2}, [r4:64], r6
+ vld1.16 {d0, d1, d2}, [r4:128], r6
+ vld1.16 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1, d2, d3}, [r4]
+ vld1.16 {d0, d1, d2, d3}, [r4:16]
+ vld1.16 {d0, d1, d2, d3}, [r4:32]
+ vld1.16 {d0, d1, d2, d3}, [r4:64]
+ vld1.16 {d0, d1, d2, d3}, [r4:128]
+ vld1.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x4f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x7f,0x02]
+
+ vld1.16 {d0, d1, d2, d3}, [r4]!
+ vld1.16 {d0, d1, d2, d3}, [r4:16]!
+ vld1.16 {d0, d1, d2, d3}, [r4:32]!
+ vld1.16 {d0, d1, d2, d3}, [r4:64]!
+ vld1.16 {d0, d1, d2, d3}, [r4:128]!
+ vld1.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x02]
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x7d,0x02]
+
+ vld1.16 {d0, d1, d2, d3}, [r4], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:16], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:32], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:64], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:128], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x76,0x02]
+
+ vld1.16 {d0[2]}, [r4]
+ vld1.16 {d0[2]}, [r4:16]
+ vld1.16 {d0[2]}, [r4:32]
+ vld1.16 {d0[2]}, [r4:64]
+ vld1.16 {d0[2]}, [r4:128]
+ vld1.16 {d0[2]}, [r4:256]
+
+@ CHECK: vld1.16 {d0[2]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x04]
+@ CHECK: vld1.16 {d0[2]}, [r4:16] @ encoding: [0xa4,0xf9,0x9f,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[2]}, [r4]!
+ vld1.16 {d0[2]}, [r4:16]!
+ vld1.16 {d0[2]}, [r4:32]!
+ vld1.16 {d0[2]}, [r4:64]!
+ vld1.16 {d0[2]}, [r4:128]!
+ vld1.16 {d0[2]}, [r4:256]!
+
+@ CHECK: vld1.16 {d0[2]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x04]
+@ CHECK: vld1.16 {d0[2]}, [r4:16]! @ encoding: [0xa4,0xf9,0x9d,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[2]}, [r4], r6
+ vld1.16 {d0[2]}, [r4:16], r6
+ vld1.16 {d0[2]}, [r4:32], r6
+ vld1.16 {d0[2]}, [r4:64], r6
+ vld1.16 {d0[2]}, [r4:128], r6
+ vld1.16 {d0[2]}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x04]
+@ CHECK: vld1.16 {d0[2]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x96,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[]}, [r4]
+ vld1.16 {d0[]}, [r4:16]
+ vld1.16 {d0[]}, [r4:32]
+ vld1.16 {d0[]}, [r4:64]
+ vld1.16 {d0[]}, [r4:128]
+ vld1.16 {d0[]}, [r4:256]
+
+@ CHECK: vld1.16 {d0[]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x0c]
+@ CHECK: vld1.16 {d0[]}, [r4:16] @ encoding: [0xa4,0xf9,0x5f,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[]}, [r4]!
+ vld1.16 {d0[]}, [r4:16]!
+ vld1.16 {d0[]}, [r4:32]!
+ vld1.16 {d0[]}, [r4:64]!
+ vld1.16 {d0[]}, [r4:128]!
+ vld1.16 {d0[]}, [r4:256]!
+
+@ CHECK: vld1.16 {d0[]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x0c]
+@ CHECK: vld1.16 {d0[]}, [r4:16]! @ encoding: [0xa4,0xf9,0x5d,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[]}, [r4], r6
+ vld1.16 {d0[]}, [r4:16], r6
+ vld1.16 {d0[]}, [r4:32], r6
+ vld1.16 {d0[]}, [r4:64], r6
+ vld1.16 {d0[]}, [r4:128], r6
+ vld1.16 {d0[]}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x0c]
+@ CHECK: vld1.16 {d0[]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x56,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[], d1[]}, [r4]
+ vld1.16 {d0[], d1[]}, [r4:16]
+ vld1.16 {d0[], d1[]}, [r4:32]
+ vld1.16 {d0[], d1[]}, [r4:64]
+ vld1.16 {d0[], d1[]}, [r4:128]
+ vld1.16 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld1.16 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x0c]
+@ CHECK: vld1.16 {d0[], d1[]}, [r4:16] @ encoding: [0xa4,0xf9,0x7f,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[], d1[]}, [r4]!
+ vld1.16 {d0[], d1[]}, [r4:16]!
+ vld1.16 {d0[], d1[]}, [r4:32]!
+ vld1.16 {d0[], d1[]}, [r4:64]!
+ vld1.16 {d0[], d1[]}, [r4:128]!
+ vld1.16 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld1.16 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x0c]
+@ CHECK: vld1.16 {d0[], d1[]}, [r4:16]! @ encoding: [0xa4,0xf9,0x7d,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[], d1[]}, [r4], r6
+ vld1.16 {d0[], d1[]}, [r4:16], r6
+ vld1.16 {d0[], d1[]}, [r4:32], r6
+ vld1.16 {d0[], d1[]}, [r4:64], r6
+ vld1.16 {d0[], d1[]}, [r4:128], r6
+ vld1.16 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x0c]
+@ CHECK: vld1.16 {d0[], d1[]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x76,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0}, [r4]
+ vld1.32 {d0}, [r4:16]
+ vld1.32 {d0}, [r4:32]
+ vld1.32 {d0}, [r4:64]
+ vld1.32 {d0}, [r4:128]
+ vld1.32 {d0}, [r4:256]
+
+@ CHECK: vld1.32 {d0}, [r4] @ encoding: [0x24,0xf9,0x8f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0}, [r4]!
+ vld1.32 {d0}, [r4:16]!
+ vld1.32 {d0}, [r4:32]!
+ vld1.32 {d0}, [r4:64]!
+ vld1.32 {d0}, [r4:128]!
+ vld1.32 {d0}, [r4:256]!
+
+@ CHECK: vld1.32 {d0}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0}, [r4], r6
+ vld1.32 {d0}, [r4:16], r6
+ vld1.32 {d0}, [r4:32], r6
+ vld1.32 {d0}, [r4:64], r6
+ vld1.32 {d0}, [r4:128], r6
+ vld1.32 {d0}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1}, [r4]
+ vld1.32 {d0, d1}, [r4:16]
+ vld1.32 {d0, d1}, [r4:32]
+ vld1.32 {d0, d1}, [r4:64]
+ vld1.32 {d0, d1}, [r4:128]
+ vld1.32 {d0, d1}, [r4:256]
+
+@ CHECK: vld1.32 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x8f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x0a]
+@ CHECK: vld1.32 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1}, [r4]!
+ vld1.32 {d0, d1}, [r4:16]!
+ vld1.32 {d0, d1}, [r4:32]!
+ vld1.32 {d0, d1}, [r4:64]!
+ vld1.32 {d0, d1}, [r4:128]!
+ vld1.32 {d0, d1}, [r4:256]!
+
+@ CHECK: vld1.32 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x0a]
+@ CHECK: vld1.32 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1}, [r4], r6
+ vld1.32 {d0, d1}, [r4:16], r6
+ vld1.32 {d0, d1}, [r4:32], r6
+ vld1.32 {d0, d1}, [r4:64], r6
+ vld1.32 {d0, d1}, [r4:128], r6
+ vld1.32 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x0a]
+@ CHECK: vld1.32 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1, d2}, [r4]
+ vld1.32 {d0, d1, d2}, [r4:16]
+ vld1.32 {d0, d1, d2}, [r4:32]
+ vld1.32 {d0, d1, d2}, [r4:64]
+ vld1.32 {d0, d1, d2}, [r4:128]
+ vld1.32 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld1.32 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x8f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1, d2}, [r4]!
+ vld1.32 {d0, d1, d2}, [r4:16]!
+ vld1.32 {d0, d1, d2}, [r4:32]!
+ vld1.32 {d0, d1, d2}, [r4:64]!
+ vld1.32 {d0, d1, d2}, [r4:128]!
+ vld1.32 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld1.32 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1, d2}, [r4], r6
+ vld1.32 {d0, d1, d2}, [r4:16], r6
+ vld1.32 {d0, d1, d2}, [r4:32], r6
+ vld1.32 {d0, d1, d2}, [r4:64], r6
+ vld1.32 {d0, d1, d2}, [r4:128], r6
+ vld1.32 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1, d2, d3}, [r4]
+ vld1.32 {d0, d1, d2, d3}, [r4:16]
+ vld1.32 {d0, d1, d2, d3}, [r4:32]
+ vld1.32 {d0, d1, d2, d3}, [r4:64]
+ vld1.32 {d0, d1, d2, d3}, [r4:128]
+ vld1.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x8f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0xbf,0x02]
+
+ vld1.32 {d0, d1, d2, d3}, [r4]!
+ vld1.32 {d0, d1, d2, d3}, [r4:16]!
+ vld1.32 {d0, d1, d2, d3}, [r4:32]!
+ vld1.32 {d0, d1, d2, d3}, [r4:64]!
+ vld1.32 {d0, d1, d2, d3}, [r4:128]!
+ vld1.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0xbd,0x02]
+
+ vld1.32 {d0, d1, d2, d3}, [r4], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:16], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:32], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:64], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:128], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0xb6,0x02]
+
+ vld1.32 {d0[1]}, [r4]
+ vld1.32 {d0[1]}, [r4:16]
+ vld1.32 {d0[1]}, [r4:32]
+ vld1.32 {d0[1]}, [r4:64]
+ vld1.32 {d0[1]}, [r4:128]
+ vld1.32 {d0[1]}, [r4:256]
+
+@ CHECK: vld1.32 {d0[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32] @ encoding: [0xa4,0xf9,0xbf,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4]!
+ vld1.32 {d0[1]}, [r4:16]!
+ vld1.32 {d0[1]}, [r4:32]!
+ vld1.32 {d0[1]}, [r4:64]!
+ vld1.32 {d0[1]}, [r4:128]!
+ vld1.32 {d0[1]}, [r4:256]!
+
+@ CHECK: vld1.32 {d0[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32]! @ encoding: [0xa4,0xf9,0xbd,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4], r6
+ vld1.32 {d0[1]}, [r4:16], r6
+ vld1.32 {d0[1]}, [r4:32], r6
+ vld1.32 {d0[1]}, [r4:64], r6
+ vld1.32 {d0[1]}, [r4:128], r6
+ vld1.32 {d0[1]}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0xb6,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[]}, [r4]
+ vld1.32 {d0[]}, [r4:16]
+ vld1.32 {d0[]}, [r4:32]
+ vld1.32 {d0[]}, [r4:64]
+ vld1.32 {d0[]}, [r4:128]
+ vld1.32 {d0[]}, [r4:256]
+
+@ CHECK: vld1.32 {d0[]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[]}, [r4:32] @ encoding: [0xa4,0xf9,0x9f,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[]}, [r4]!
+ vld1.32 {d0[]}, [r4:16]!
+ vld1.32 {d0[]}, [r4:32]!
+ vld1.32 {d0[]}, [r4:64]!
+ vld1.32 {d0[]}, [r4:128]!
+ vld1.32 {d0[]}, [r4:256]!
+
+@ CHECK: vld1.32 {d0[]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x9d,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[]}, [r4], r6
+ vld1.32 {d0[]}, [r4:16], r6
+ vld1.32 {d0[]}, [r4:32], r6
+ vld1.32 {d0[]}, [r4:64], r6
+ vld1.32 {d0[]}, [r4:128], r6
+ vld1.32 {d0[]}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x96,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[], d1[]}, [r4]
+ vld1.32 {d0[], d1[]}, [r4:16]
+ vld1.32 {d0[], d1[]}, [r4:32]
+ vld1.32 {d0[], d1[]}, [r4:64]
+ vld1.32 {d0[], d1[]}, [r4:128]
+ vld1.32 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld1.32 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[], d1[]}, [r4:32] @ encoding: [0xa4,0xf9,0xbf,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[], d1[]}, [r4]!
+ vld1.32 {d0[], d1[]}, [r4:16]!
+ vld1.32 {d0[], d1[]}, [r4:32]!
+ vld1.32 {d0[], d1[]}, [r4:64]!
+ vld1.32 {d0[], d1[]}, [r4:128]!
+ vld1.32 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld1.32 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[], d1[]}, [r4:32]! @ encoding: [0xa4,0xf9,0xbd,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[], d1[]}, [r4], r6
+ vld1.32 {d0[], d1[]}, [r4:16], r6
+ vld1.32 {d0[], d1[]}, [r4:32], r6
+ vld1.32 {d0[], d1[]}, [r4:64], r6
+ vld1.32 {d0[], d1[]}, [r4:128], r6
+ vld1.32 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[], d1[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0xb6,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4]
+ vld1.32 {d0[1]}, [r4:16]
+ vld1.32 {d0[1]}, [r4:32]
+ vld1.32 {d0[1]}, [r4:64]
+ vld1.32 {d0[1]}, [r4:128]
+ vld1.32 {d0[1]}, [r4:256]
+
+@ CHECK: vld1.32 {d0[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32] @ encoding: [0xa4,0xf9,0xbf,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4]!
+ vld1.32 {d0[1]}, [r4:16]!
+ vld1.32 {d0[1]}, [r4:32]!
+ vld1.32 {d0[1]}, [r4:64]!
+ vld1.32 {d0[1]}, [r4:128]!
+ vld1.32 {d0[1]}, [r4:256]!
+
+@ CHECK: vld1.32 {d0[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32]! @ encoding: [0xa4,0xf9,0xbd,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4], r6
+ vld1.32 {d0[1]}, [r4:16], r6
+ vld1.32 {d0[1]}, [r4:32], r6
+ vld1.32 {d0[1]}, [r4:64], r6
+ vld1.32 {d0[1]}, [r4:128], r6
+ vld1.32 {d0[1]}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0xb6,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0}, [r4]
+ vld1.64 {d0}, [r4:16]
+ vld1.64 {d0}, [r4:32]
+ vld1.64 {d0}, [r4:64]
+ vld1.64 {d0}, [r4:128]
+ vld1.64 {d0}, [r4:256]
+
+@ CHECK: vld1.64 {d0}, [r4] @ encoding: [0x24,0xf9,0xcf,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0}, [r4:64] @ encoding: [0x24,0xf9,0xdf,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0}, [r4]!
+ vld1.64 {d0}, [r4:16]!
+ vld1.64 {d0}, [r4:32]!
+ vld1.64 {d0}, [r4:64]!
+ vld1.64 {d0}, [r4:128]!
+ vld1.64 {d0}, [r4:256]!
+
+@ CHECK: vld1.64 {d0}, [r4]! @ encoding: [0x24,0xf9,0xcd,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0}, [r4:64]! @ encoding: [0x24,0xf9,0xdd,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0}, [r4], r6
+ vld1.64 {d0}, [r4:16], r6
+ vld1.64 {d0}, [r4:32], r6
+ vld1.64 {d0}, [r4:64], r6
+ vld1.64 {d0}, [r4:128], r6
+ vld1.64 {d0}, [r4:256], r6
+
+@ CHECK: vld1.64 {d0}, [r4], r6 @ encoding: [0x24,0xf9,0xc6,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0}, [r4:64], r6 @ encoding: [0x24,0xf9,0xd6,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1}, [r4]
+ vld1.64 {d0, d1}, [r4:16]
+ vld1.64 {d0, d1}, [r4:32]
+ vld1.64 {d0, d1}, [r4:64]
+ vld1.64 {d0, d1}, [r4:128]
+ vld1.64 {d0, d1}, [r4:256]
+
+@ CHECK: vld1.64 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0xcf,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0xdf,0x0a]
+@ CHECK: vld1.64 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0xef,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1}, [r4]!
+ vld1.64 {d0, d1}, [r4:16]!
+ vld1.64 {d0, d1}, [r4:32]!
+ vld1.64 {d0, d1}, [r4:64]!
+ vld1.64 {d0, d1}, [r4:128]!
+ vld1.64 {d0, d1}, [r4:256]!
+
+@ CHECK: vld1.64 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0xcd,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0xdd,0x0a]
+@ CHECK: vld1.64 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0xed,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1}, [r4], r6
+ vld1.64 {d0, d1}, [r4:16], r6
+ vld1.64 {d0, d1}, [r4:32], r6
+ vld1.64 {d0, d1}, [r4:64], r6
+ vld1.64 {d0, d1}, [r4:128], r6
+ vld1.64 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld1.64 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0xc6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0xd6,0x0a]
+@ CHECK: vld1.64 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0xe6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1, d2}, [r4]
+ vld1.64 {d0, d1, d2}, [r4:16]
+ vld1.64 {d0, d1, d2}, [r4:32]
+ vld1.64 {d0, d1, d2}, [r4:64]
+ vld1.64 {d0, d1, d2}, [r4:128]
+ vld1.64 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld1.64 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0xcf,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0xdf,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1, d2}, [r4]!
+ vld1.64 {d0, d1, d2}, [r4:16]!
+ vld1.64 {d0, d1, d2}, [r4:32]!
+ vld1.64 {d0, d1, d2}, [r4:64]!
+ vld1.64 {d0, d1, d2}, [r4:128]!
+ vld1.64 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld1.64 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0xcd,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0xdd,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1, d2}, [r4], r6
+ vld1.64 {d0, d1, d2}, [r4:16], r6
+ vld1.64 {d0, d1, d2}, [r4:32], r6
+ vld1.64 {d0, d1, d2}, [r4:64], r6
+ vld1.64 {d0, d1, d2}, [r4:128], r6
+ vld1.64 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld1.64 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0xc6,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0xd6,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1, d2, d3}, [r4]
+ vld1.64 {d0, d1, d2, d3}, [r4:16]
+ vld1.64 {d0, d1, d2, d3}, [r4:32]
+ vld1.64 {d0, d1, d2, d3}, [r4:64]
+ vld1.64 {d0, d1, d2, d3}, [r4:128]
+ vld1.64 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0xcf,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0xdf,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0xef,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0xff,0x02]
+
+ vld1.64 {d0, d1, d2, d3}, [r4]!
+ vld1.64 {d0, d1, d2, d3}, [r4:16]!
+ vld1.64 {d0, d1, d2, d3}, [r4:32]!
+ vld1.64 {d0, d1, d2, d3}, [r4:64]!
+ vld1.64 {d0, d1, d2, d3}, [r4:128]!
+ vld1.64 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0xcd,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0xdd,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0xed,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0xfd,0x02]
+
+ vld1.64 {d0, d1, d2, d3}, [r4], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:16], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:32], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:64], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:128], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0xc6,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0xd6,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0xe6,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0xf6,0x02]
+
+ vld2.8 {d0, d1}, [r4]
+ vld2.8 {d0, d1}, [r4:16]
+ vld2.8 {d0, d1}, [r4:32]
+ vld2.8 {d0, d1}, [r4:64]
+ vld2.8 {d0, d1}, [r4:128]
+ vld2.8 {d0, d1}, [r4:256]
+
+@ CHECK: vld2.8 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x0f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x08]
+@ CHECK: vld2.8 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d1}, [r4]!
+ vld2.8 {d0, d1}, [r4:16]!
+ vld2.8 {d0, d1}, [r4:32]!
+ vld2.8 {d0, d1}, [r4:64]!
+ vld2.8 {d0, d1}, [r4:128]!
+ vld2.8 {d0, d1}, [r4:256]!
+
+@ CHECK: vld2.8 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x08]
+@ CHECK: vld2.8 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d1}, [r4], r6
+ vld2.8 {d0, d1}, [r4:16], r6
+ vld2.8 {d0, d1}, [r4:32], r6
+ vld2.8 {d0, d1}, [r4:64], r6
+ vld2.8 {d0, d1}, [r4:128], r6
+ vld2.8 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x08]
+@ CHECK: vld2.8 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d2}, [r4]
+ vld2.8 {d0, d2}, [r4:16]
+ vld2.8 {d0, d2}, [r4:32]
+ vld2.8 {d0, d2}, [r4:64]
+ vld2.8 {d0, d2}, [r4:128]
+ vld2.8 {d0, d2}, [r4:256]
+
+@ CHECK: vld2.8 {d0, d2}, [r4] @ encoding: [0x24,0xf9,0x0f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d2}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x09]
+@ CHECK: vld2.8 {d0, d2}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d2}, [r4]!
+ vld2.8 {d0, d2}, [r4:16]!
+ vld2.8 {d0, d2}, [r4:32]!
+ vld2.8 {d0, d2}, [r4:64]!
+ vld2.8 {d0, d2}, [r4:128]!
+ vld2.8 {d0, d2}, [r4:256]!
+
+@ CHECK: vld2.8 {d0, d2}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x09]
+@ CHECK: vld2.8 {d0, d2}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d2}, [r4], r6
+ vld2.8 {d0, d2}, [r4:16], r6
+ vld2.8 {d0, d2}, [r4:32], r6
+ vld2.8 {d0, d2}, [r4:64], r6
+ vld2.8 {d0, d2}, [r4:128], r6
+ vld2.8 {d0, d2}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x09]
+@ CHECK: vld2.8 {d0, d2}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d1, d2, d3}, [r4]
+ vld2.8 {d0, d1, d2, d3}, [r4:16]
+ vld2.8 {d0, d1, d2, d3}, [r4:32]
+ vld2.8 {d0, d1, d2, d3}, [r4:64]
+ vld2.8 {d0, d1, d2, d3}, [r4:128]
+ vld2.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x0f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x3f,0x03]
+
+ vld2.8 {d0, d1, d2, d3}, [r4]!
+ vld2.8 {d0, d1, d2, d3}, [r4:16]!
+ vld2.8 {d0, d1, d2, d3}, [r4:32]!
+ vld2.8 {d0, d1, d2, d3}, [r4:64]!
+ vld2.8 {d0, d1, d2, d3}, [r4:128]!
+ vld2.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x3d,0x03]
+
+ vld2.8 {d0, d1, d2, d3}, [r4], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:16], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:32], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:64], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:128], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x36,0x03]
+
+ vld2.8 {d0[2], d1[2]}, [r4]
+ vld2.8 {d0[2], d1[2]}, [r4:16]
+ vld2.8 {d0[2], d1[2]}, [r4:32]
+ vld2.8 {d0[2], d1[2]}, [r4:64]
+ vld2.8 {d0[2], d1[2]}, [r4:128]
+ vld2.8 {d0[2], d1[2]}, [r4:256]
+
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x01]
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4:16] @ encoding: [0xa4,0xf9,0x5f,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[2], d1[2]}, [r4]!
+ vld2.8 {d0[2], d1[2]}, [r4:16]!
+ vld2.8 {d0[2], d1[2]}, [r4:32]!
+ vld2.8 {d0[2], d1[2]}, [r4:64]!
+ vld2.8 {d0[2], d1[2]}, [r4:128]!
+ vld2.8 {d0[2], d1[2]}, [r4:256]!
+
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x01]
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4:16]! @ encoding: [0xa4,0xf9,0x5d,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[2], d1[2]}, [r4], r6
+ vld2.8 {d0[2], d1[2]}, [r4:16], r6
+ vld2.8 {d0[2], d1[2]}, [r4:32], r6
+ vld2.8 {d0[2], d1[2]}, [r4:64], r6
+ vld2.8 {d0[2], d1[2]}, [r4:128], r6
+ vld2.8 {d0[2], d1[2]}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x01]
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x56,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d1[]}, [r4]
+ vld2.8 {d0[], d1[]}, [r4:16]
+ vld2.8 {d0[], d1[]}, [r4:32]
+ vld2.8 {d0[], d1[]}, [r4:64]
+ vld2.8 {d0[], d1[]}, [r4:128]
+ vld2.8 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld2.8 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x0f,0x0d]
+@ CHECK: vld2.8 {d0[], d1[]}, [r4:16] @ encoding: [0xa4,0xf9,0x1f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d1[]}, [r4]!
+ vld2.8 {d0[], d1[]}, [r4:16]!
+ vld2.8 {d0[], d1[]}, [r4:32]!
+ vld2.8 {d0[], d1[]}, [r4:64]!
+ vld2.8 {d0[], d1[]}, [r4:128]!
+ vld2.8 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld2.8 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x0d,0x0d]
+@ CHECK: vld2.8 {d0[], d1[]}, [r4:16]! @ encoding: [0xa4,0xf9,0x1d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d1[]}, [r4], r6
+ vld2.8 {d0[], d1[]}, [r4:16], r6
+ vld2.8 {d0[], d1[]}, [r4:32], r6
+ vld2.8 {d0[], d1[]}, [r4:64], r6
+ vld2.8 {d0[], d1[]}, [r4:128], r6
+ vld2.8 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x06,0x0d]
+@ CHECK: vld2.8 {d0[], d1[]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x16,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d2[]}, [r4]
+ vld2.8 {d0[], d2[]}, [r4:16]
+ vld2.8 {d0[], d2[]}, [r4:32]
+ vld2.8 {d0[], d2[]}, [r4:64]
+ vld2.8 {d0[], d2[]}, [r4:128]
+ vld2.8 {d0[], d2[]}, [r4:256]
+
+@ CHECK: vld2.8 {d0[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x0d]
+@ CHECK: vld2.8 {d0[], d2[]}, [r4:16] @ encoding: [0xa4,0xf9,0x3f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d2[]}, [r4]!
+ vld2.8 {d0[], d2[]}, [r4:16]!
+ vld2.8 {d0[], d2[]}, [r4:32]!
+ vld2.8 {d0[], d2[]}, [r4:64]!
+ vld2.8 {d0[], d2[]}, [r4:128]!
+ vld2.8 {d0[], d2[]}, [r4:256]!
+
+@ CHECK: vld2.8 {d0[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x0d]
+@ CHECK: vld2.8 {d0[], d2[]}, [r4:16]! @ encoding: [0xa4,0xf9,0x3d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d2[]}, [r4], r6
+ vld2.8 {d0[], d2[]}, [r4:16], r6
+ vld2.8 {d0[], d2[]}, [r4:32], r6
+ vld2.8 {d0[], d2[]}, [r4:64], r6
+ vld2.8 {d0[], d2[]}, [r4:128], r6
+ vld2.8 {d0[], d2[]}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x0d]
+@ CHECK: vld2.8 {d0[], d2[]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x36,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d1}, [r4]
+ vld2.16 {d0, d1}, [r4:16]
+ vld2.16 {d0, d1}, [r4:32]
+ vld2.16 {d0, d1}, [r4:64]
+ vld2.16 {d0, d1}, [r4:128]
+ vld2.16 {d0, d1}, [r4:256]
+
+@ CHECK: vld2.16 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x4f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x08]
+@ CHECK: vld2.16 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d1}, [r4]!
+ vld2.16 {d0, d1}, [r4:16]!
+ vld2.16 {d0, d1}, [r4:32]!
+ vld2.16 {d0, d1}, [r4:64]!
+ vld2.16 {d0, d1}, [r4:128]!
+ vld2.16 {d0, d1}, [r4:256]!
+
+@ CHECK: vld2.16 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x08]
+@ CHECK: vld2.16 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d1}, [r4], r6
+ vld2.16 {d0, d1}, [r4:16], r6
+ vld2.16 {d0, d1}, [r4:32], r6
+ vld2.16 {d0, d1}, [r4:64], r6
+ vld2.16 {d0, d1}, [r4:128], r6
+ vld2.16 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x08]
+@ CHECK: vld2.16 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d2}, [r4]
+ vld2.16 {d0, d2}, [r4:16]
+ vld2.16 {d0, d2}, [r4:32]
+ vld2.16 {d0, d2}, [r4:64]
+ vld2.16 {d0, d2}, [r4:128]
+ vld2.16 {d0, d2}, [r4:256]
+
+@ CHECK: vld2.16 {d0, d2}, [r4] @ encoding: [0x24,0xf9,0x4f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d2}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x09]
+@ CHECK: vld2.16 {d0, d2}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d2}, [r4]!
+ vld2.16 {d0, d2}, [r4:16]!
+ vld2.16 {d0, d2}, [r4:32]!
+ vld2.16 {d0, d2}, [r4:64]!
+ vld2.16 {d0, d2}, [r4:128]!
+ vld2.16 {d0, d2}, [r4:256]!
+
+@ CHECK: vld2.16 {d0, d2}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x09]
+@ CHECK: vld2.16 {d0, d2}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d2}, [r4], r6
+ vld2.16 {d0, d2}, [r4:16], r6
+ vld2.16 {d0, d2}, [r4:32], r6
+ vld2.16 {d0, d2}, [r4:64], r6
+ vld2.16 {d0, d2}, [r4:128], r6
+ vld2.16 {d0, d2}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x09]
+@ CHECK: vld2.16 {d0, d2}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d1, d2, d3}, [r4]
+ vld2.16 {d0, d1, d2, d3}, [r4:16]
+ vld2.16 {d0, d1, d2, d3}, [r4:32]
+ vld2.16 {d0, d1, d2, d3}, [r4:64]
+ vld2.16 {d0, d1, d2, d3}, [r4:128]
+ vld2.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x4f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x7f,0x03]
+
+ vld2.16 {d0, d1, d2, d3}, [r4]!
+ vld2.16 {d0, d1, d2, d3}, [r4:16]!
+ vld2.16 {d0, d1, d2, d3}, [r4:32]!
+ vld2.16 {d0, d1, d2, d3}, [r4:64]!
+ vld2.16 {d0, d1, d2, d3}, [r4:128]!
+ vld2.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x7d,0x03]
+
+ vld2.16 {d0, d1, d2, d3}, [r4], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:16], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:32], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:64], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:128], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x76,0x03]
+
+ vld2.16 {d0[2], d1[2]}, [r4]
+ vld2.16 {d0[2], d1[2]}, [r4:16]
+ vld2.16 {d0[2], d1[2]}, [r4:32]
+ vld2.16 {d0[2], d1[2]}, [r4:64]
+ vld2.16 {d0[2], d1[2]}, [r4:128]
+ vld2.16 {d0[2], d1[2]}, [r4:256]
+
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4:32] @ encoding: [0xa4,0xf9,0x9f,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d1[2]}, [r4]!
+ vld2.16 {d0[2], d1[2]}, [r4:16]!
+ vld2.16 {d0[2], d1[2]}, [r4:32]!
+ vld2.16 {d0[2], d1[2]}, [r4:64]!
+ vld2.16 {d0[2], d1[2]}, [r4:128]!
+ vld2.16 {d0[2], d1[2]}, [r4:256]!
+
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4:32]! @ encoding: [0xa4,0xf9,0x9d,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d1[2]}, [r4], r6
+ vld2.16 {d0[2], d1[2]}, [r4:16], r6
+ vld2.16 {d0[2], d1[2]}, [r4:32], r6
+ vld2.16 {d0[2], d1[2]}, [r4:64], r6
+ vld2.16 {d0[2], d1[2]}, [r4:128], r6
+ vld2.16 {d0[2], d1[2]}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x96,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d2[2]}, [r4]
+ vld2.16 {d0[2], d2[2]}, [r4:16]
+ vld2.16 {d0[2], d2[2]}, [r4:32]
+ vld2.16 {d0[2], d2[2]}, [r4:64]
+ vld2.16 {d0[2], d2[2]}, [r4:128]
+ vld2.16 {d0[2], d2[2]}, [r4:256]
+
+@ CHECK: vld2.16 {d0[2], d2[2]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d2[2]}, [r4:32] @ encoding: [0xa4,0xf9,0xbf,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d2[2]}, [r4]!
+ vld2.16 {d0[2], d2[2]}, [r4:16]!
+ vld2.16 {d0[2], d2[2]}, [r4:32]!
+ vld2.16 {d0[2], d2[2]}, [r4:64]!
+ vld2.16 {d0[2], d2[2]}, [r4:128]!
+ vld2.16 {d0[2], d2[2]}, [r4:256]!
+
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4:32]! @ encoding: [0xa4,0xf9,0xbd,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d2[2]}, [r4], r6
+ vld2.16 {d0[2], d2[2]}, [r4:16], r6
+ vld2.16 {d0[2], d2[2]}, [r4:32], r6
+ vld2.16 {d0[2], d2[2]}, [r4:64], r6
+ vld2.16 {d0[2], d2[2]}, [r4:128], r6
+ vld2.16 {d0[2], d2[2]}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0[2], d2[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d2[2]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0xb6,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d1[]}, [r4]
+ vld2.16 {d0[], d1[]}, [r4:16]
+ vld2.16 {d0[], d1[]}, [r4:32]
+ vld2.16 {d0[], d1[]}, [r4:64]
+ vld2.16 {d0[], d1[]}, [r4:128]
+ vld2.16 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld2.16 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d1[]}, [r4:32] @ encoding: [0xa4,0xf9,0x5f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d1[]}, [r4]!
+ vld2.16 {d0[], d1[]}, [r4:16]!
+ vld2.16 {d0[], d1[]}, [r4:32]!
+ vld2.16 {d0[], d1[]}, [r4:64]!
+ vld2.16 {d0[], d1[]}, [r4:128]!
+ vld2.16 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld2.16 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d1[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x5d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d1[]}, [r4], r6
+ vld2.16 {d0[], d1[]}, [r4:16], r6
+ vld2.16 {d0[], d1[]}, [r4:32], r6
+ vld2.16 {d0[], d1[]}, [r4:64], r6
+ vld2.16 {d0[], d1[]}, [r4:128], r6
+ vld2.16 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d1[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x56,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d2[]}, [r4]
+ vld2.16 {d0[], d2[]}, [r4:16]
+ vld2.16 {d0[], d2[]}, [r4:32]
+ vld2.16 {d0[], d2[]}, [r4:64]
+ vld2.16 {d0[], d2[]}, [r4:128]
+ vld2.16 {d0[], d2[]}, [r4:256]
+
+@ CHECK: vld2.16 {d0[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d2[]}, [r4:32] @ encoding: [0xa4,0xf9,0x7f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d2[]}, [r4]!
+ vld2.16 {d0[], d2[]}, [r4:16]!
+ vld2.16 {d0[], d2[]}, [r4:32]!
+ vld2.16 {d0[], d2[]}, [r4:64]!
+ vld2.16 {d0[], d2[]}, [r4:128]!
+ vld2.16 {d0[], d2[]}, [r4:256]!
+
+@ CHECK: vld2.16 {d0[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d2[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x7d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:256]!
+
+ vld2.16 {d0[], d2[]}, [r4], r6
+ vld2.16 {d0[], d2[]}, [r4:16], r6
+ vld2.16 {d0[], d2[]}, [r4:32], r6
+ vld2.16 {d0[], d2[]}, [r4:64], r6
+ vld2.16 {d0[], d2[]}, [r4:128], r6
+ vld2.16 {d0[], d2[]}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d2[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x76,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d1}, [r4]
+ vld2.32 {d0, d1}, [r4:16]
+ vld2.32 {d0, d1}, [r4:32]
+ vld2.32 {d0, d1}, [r4:64]
+ vld2.32 {d0, d1}, [r4:128]
+ vld2.32 {d0, d1}, [r4:256]
+
+@ CHECK: vld2.32 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x08]
+@ CHECK: vld2.32 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d1}, [r4]!
+ vld2.32 {d0, d1}, [r4:16]!
+ vld2.32 {d0, d1}, [r4:32]!
+ vld2.32 {d0, d1}, [r4:64]!
+ vld2.32 {d0, d1}, [r4:128]!
+ vld2.32 {d0, d1}, [r4:256]!
+
+@ CHECK: vld2.32 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x08]
+@ CHECK: vld2.32 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d1}, [r4], r6
+ vld2.32 {d0, d1}, [r4:16], r6
+ vld2.32 {d0, d1}, [r4:32], r6
+ vld2.32 {d0, d1}, [r4:64], r6
+ vld2.32 {d0, d1}, [r4:128], r6
+ vld2.32 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x08]
+@ CHECK: vld2.32 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d2}, [r4]
+ vld2.32 {d0, d2}, [r4:16]
+ vld2.32 {d0, d2}, [r4:32]
+ vld2.32 {d0, d2}, [r4:64]
+ vld2.32 {d0, d2}, [r4:128]
+ vld2.32 {d0, d2}, [r4:256]
+
+@ CHECK: vld2.32 {d0, d2}, [r4] @ encoding: [0x24,0xf9,0x8f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d2}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x09]
+@ CHECK: vld2.32 {d0, d2}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d2}, [r4]!
+ vld2.32 {d0, d2}, [r4:16]!
+ vld2.32 {d0, d2}, [r4:32]!
+ vld2.32 {d0, d2}, [r4:64]!
+ vld2.32 {d0, d2}, [r4:128]!
+ vld2.32 {d0, d2}, [r4:256]!
+
+@ CHECK: vld2.32 {d0, d2}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x09]
+@ CHECK: vld2.32 {d0, d2}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d2}, [r4], r6
+ vld2.32 {d0, d2}, [r4:16], r6
+ vld2.32 {d0, d2}, [r4:32], r6
+ vld2.32 {d0, d2}, [r4:64], r6
+ vld2.32 {d0, d2}, [r4:128], r6
+ vld2.32 {d0, d2}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x09]
+@ CHECK: vld2.32 {d0, d2}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d1, d2, d3}, [r4]
+ vld2.32 {d0, d1, d2, d3}, [r4:16]
+ vld2.32 {d0, d1, d2, d3}, [r4:32]
+ vld2.32 {d0, d1, d2, d3}, [r4:64]
+ vld2.32 {d0, d1, d2, d3}, [r4:128]
+ vld2.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x8f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0xbf,0x03]
+
+ vld2.32 {d0, d1, d2, d3}, [r4]!
+ vld2.32 {d0, d1, d2, d3}, [r4:16]!
+ vld2.32 {d0, d1, d2, d3}, [r4:32]!
+ vld2.32 {d0, d1, d2, d3}, [r4:64]!
+ vld2.32 {d0, d1, d2, d3}, [r4:128]!
+ vld2.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0xbd,0x03]
+
+ vld2.32 {d0, d1, d2, d3}, [r4], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:16], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:32], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:64], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:128], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0xb6,0x03]
+
+ vld2.32 {d0[1], d1[1]}, [r4]
+ vld2.32 {d0[1], d1[1]}, [r4:16]
+ vld2.32 {d0[1], d1[1]}, [r4:32]
+ vld2.32 {d0[1], d1[1]}, [r4:64]
+ vld2.32 {d0[1], d1[1]}, [r4:128]
+ vld2.32 {d0[1], d1[1]}, [r4:256]
+
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4:64] @ encoding: [0xa4,0xf9,0x9f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d1[1]}, [r4]!
+ vld2.32 {d0[1], d1[1]}, [r4:16]!
+ vld2.32 {d0[1], d1[1]}, [r4:32]!
+ vld2.32 {d0[1], d1[1]}, [r4:64]!
+ vld2.32 {d0[1], d1[1]}, [r4:128]!
+ vld2.32 {d0[1], d1[1]}, [r4:256]!
+
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0x9d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d1[1]}, [r4], r6
+ vld2.32 {d0[1], d1[1]}, [r4:16], r6
+ vld2.32 {d0[1], d1[1]}, [r4:32], r6
+ vld2.32 {d0[1], d1[1]}, [r4:64], r6
+ vld2.32 {d0[1], d1[1]}, [r4:128], r6
+ vld2.32 {d0[1], d1[1]}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x96,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d2[1]}, [r4]
+ vld2.32 {d0[1], d2[1]}, [r4:16]
+ vld2.32 {d0[1], d2[1]}, [r4:32]
+ vld2.32 {d0[1], d2[1]}, [r4:64]
+ vld2.32 {d0[1], d2[1]}, [r4:128]
+ vld2.32 {d0[1], d2[1]}, [r4:256]
+
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4] @ encoding: [0xa4,0xf9,0xcf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4:64] @ encoding: [0xa4,0xf9,0xdf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d2[1]}, [r4]!
+ vld2.32 {d0[1], d2[1]}, [r4:16]!
+ vld2.32 {d0[1], d2[1]}, [r4:32]!
+ vld2.32 {d0[1], d2[1]}, [r4:64]!
+ vld2.32 {d0[1], d2[1]}, [r4:128]!
+ vld2.32 {d0[1], d2[1]}, [r4:256]!
+
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0xcd,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0xdd,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d2[1]}, [r4], r6
+ vld2.32 {d0[1], d2[1]}, [r4:16], r6
+ vld2.32 {d0[1], d2[1]}, [r4:32], r6
+ vld2.32 {d0[1], d2[1]}, [r4:64], r6
+ vld2.32 {d0[1], d2[1]}, [r4:128], r6
+ vld2.32 {d0[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0xc6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0xd6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d1[]}, [r4]
+ vld2.32 {d0[], d1[]}, [r4:16]
+ vld2.32 {d0[], d1[]}, [r4:32]
+ vld2.32 {d0[], d1[]}, [r4:64]
+ vld2.32 {d0[], d1[]}, [r4:128]
+ vld2.32 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld2.32 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d1[]}, [r4:64] @ encoding: [0xa4,0xf9,0x9f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d1[]}, [r4]!
+ vld2.32 {d0[], d1[]}, [r4:16]!
+ vld2.32 {d0[], d1[]}, [r4:32]!
+ vld2.32 {d0[], d1[]}, [r4:64]!
+ vld2.32 {d0[], d1[]}, [r4:128]!
+ vld2.32 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld2.32 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d1[]}, [r4:64]! @ encoding: [0xa4,0xf9,0x9d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d1[]}, [r4], r6
+ vld2.32 {d0[], d1[]}, [r4:16], r6
+ vld2.32 {d0[], d1[]}, [r4:32], r6
+ vld2.32 {d0[], d1[]}, [r4:64], r6
+ vld2.32 {d0[], d1[]}, [r4:128], r6
+ vld2.32 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d1[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x96,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d2[]}, [r4]
+ vld2.32 {d0[], d2[]}, [r4:16]
+ vld2.32 {d0[], d2[]}, [r4:32]
+ vld2.32 {d0[], d2[]}, [r4:64]
+ vld2.32 {d0[], d2[]}, [r4:128]
+ vld2.32 {d0[], d2[]}, [r4:256]
+
+@ CHECK: vld2.32 {d0[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d2[]}, [r4:64] @ encoding: [0xa4,0xf9,0xbf,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d2[]}, [r4]!
+ vld2.32 {d0[], d2[]}, [r4:16]!
+ vld2.32 {d0[], d2[]}, [r4:32]!
+ vld2.32 {d0[], d2[]}, [r4:64]!
+ vld2.32 {d0[], d2[]}, [r4:128]!
+ vld2.32 {d0[], d2[]}, [r4:256]!
+
+@ CHECK: vld2.32 {d0[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d2[]}, [r4:64]! @ encoding: [0xa4,0xf9,0xbd,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d2[]}, [r4], r6
+ vld2.32 {d0[], d2[]}, [r4:16], r6
+ vld2.32 {d0[], d2[]}, [r4:32], r6
+ vld2.32 {d0[], d2[]}, [r4:64], r6
+ vld2.32 {d0[], d2[]}, [r4:128], r6
+ vld2.32 {d0[], d2[]}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d2[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0xb6,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d1, d2}, [r4]
+ vld3.8 {d0, d1, d2}, [r4:16]
+ vld3.8 {d0, d1, d2}, [r4:32]
+ vld3.8 {d0, d1, d2}, [r4:64]
+ vld3.8 {d0, d1, d2}, [r4:128]
+ vld3.8 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld3.8 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x0f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d1, d2}, [r4]!
+ vld3.8 {d0, d1, d2}, [r4:16]!
+ vld3.8 {d0, d1, d2}, [r4:32]!
+ vld3.8 {d0, d1, d2}, [r4:64]!
+ vld3.8 {d0, d1, d2}, [r4:128]!
+ vld3.8 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld3.8 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d1, d2}, [r4], r6
+ vld3.8 {d0, d1, d2}, [r4:16], r6
+ vld3.8 {d0, d1, d2}, [r4:32], r6
+ vld3.8 {d0, d1, d2}, [r4:64], r6
+ vld3.8 {d0, d1, d2}, [r4:128], r6
+ vld3.8 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d2, d4}, [r4]
+ vld3.8 {d0, d2, d4}, [r4:16]
+ vld3.8 {d0, d2, d4}, [r4:32]
+ vld3.8 {d0, d2, d4}, [r4:64]
+ vld3.8 {d0, d2, d4}, [r4:128]
+ vld3.8 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vld3.8 {d0, d2, d4}, [r4] @ encoding: [0x24,0xf9,0x0f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d2, d4}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d2, d4}, [r4]!
+ vld3.8 {d0, d2, d4}, [r4:16]!
+ vld3.8 {d0, d2, d4}, [r4:32]!
+ vld3.8 {d0, d2, d4}, [r4:64]!
+ vld3.8 {d0, d2, d4}, [r4:128]!
+ vld3.8 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vld3.8 {d0, d2, d4}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d2, d4}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d2, d4}, [r4], r6
+ vld3.8 {d0, d2, d4}, [r4:16], r6
+ vld3.8 {d0, d2, d4}, [r4:32], r6
+ vld3.8 {d0, d2, d4}, [r4:64], r6
+ vld3.8 {d0, d2, d4}, [r4:128], r6
+ vld3.8 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0, d2, d4}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:16]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:32]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:64]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:128]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vld3.8 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vld3.8 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d1[], d2[]}, [r4]
+ vld3.8 {d0[], d1[], d2[]}, [r4:16]
+ vld3.8 {d0[], d1[], d2[]}, [r4:32]
+ vld3.8 {d0[], d1[], d2[]}, [r4:64]
+ vld3.8 {d0[], d1[], d2[]}, [r4:128]
+ vld3.8 {d0[], d1[], d2[]}, [r4:256]
+
+@ CHECK: vld3.8 {d0[], d1[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x0f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d1[], d2[]}, [r4]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:16]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:32]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:64]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:128]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:256]!
+
+@ CHECK: vld3.8 {d0[], d1[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x0d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d1[], d2[]}, [r4], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:16], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:32], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:64], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:128], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0[], d1[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x06,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d2[], d4[]}, [r4]
+ vld3.8 {d0[], d2[], d4[]}, [r4:16]
+ vld3.8 {d0[], d2[], d4[]}, [r4:32]
+ vld3.8 {d0[], d2[], d4[]}, [r4:64]
+ vld3.8 {d0[], d2[], d4[]}, [r4:128]
+ vld3.8 {d0[], d2[], d4[]}, [r4:256]
+
+@ CHECK: vld3.8 {d0[], d2[], d4[]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d2[], d4[]}, [r4]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:16]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:32]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:64]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:128]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:256]!
+
+@ CHECK: vld3.8 {d0[], d1[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d2[], d4[]}, [r4], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:16], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:32], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:64], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:128], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0[], d2[], d4[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d1, d2}, [r4]
+ vld3.16 {d0, d1, d2}, [r4:16]
+ vld3.16 {d0, d1, d2}, [r4:32]
+ vld3.16 {d0, d1, d2}, [r4:64]
+ vld3.16 {d0, d1, d2}, [r4:128]
+ vld3.16 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld3.16 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x4f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d1, d2}, [r4]!
+ vld3.16 {d0, d1, d2}, [r4:16]!
+ vld3.16 {d0, d1, d2}, [r4:32]!
+ vld3.16 {d0, d1, d2}, [r4:64]!
+ vld3.16 {d0, d1, d2}, [r4:128]!
+ vld3.16 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld3.16 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d1, d2}, [r4], r6
+ vld3.16 {d0, d1, d2}, [r4:16], r6
+ vld3.16 {d0, d1, d2}, [r4:32], r6
+ vld3.16 {d0, d1, d2}, [r4:64], r6
+ vld3.16 {d0, d1, d2}, [r4:128], r6
+ vld3.16 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d2, d4}, [r4]
+ vld3.16 {d0, d2, d4}, [r4:16]
+ vld3.16 {d0, d2, d4}, [r4:32]
+ vld3.16 {d0, d2, d4}, [r4:64]
+ vld3.16 {d0, d2, d4}, [r4:128]
+ vld3.16 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vld3.16 {d0, d2, d4}, [r4] @ encoding: [0x24,0xf9,0x4f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d2, d4}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d2, d4}, [r4]!
+ vld3.16 {d0, d2, d4}, [r4:16]!
+ vld3.16 {d0, d2, d4}, [r4:32]!
+ vld3.16 {d0, d2, d4}, [r4:64]!
+ vld3.16 {d0, d2, d4}, [r4:128]!
+ vld3.16 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vld3.16 {d0, d2, d4}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d2, d4}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d2, d4}, [r4], r6
+ vld3.16 {d0, d2, d4}, [r4:16], r6
+ vld3.16 {d0, d2, d4}, [r4:32], r6
+ vld3.16 {d0, d2, d4}, [r4:64], r6
+ vld3.16 {d0, d2, d4}, [r4:128], r6
+ vld3.16 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0, d2, d4}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:16]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:32]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:64]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:128]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vld3.16 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vld3.16 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:16]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:32]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:64]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:128]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:256]
+
+@ CHECK: vld3.16 {d0[1], d2[1], d4[1]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:16]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:32]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:64]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:128]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:256]!
+
+@ CHECK: vld3.16 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:16], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:32], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:64], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:128], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0[1], d2[1], d4[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d1[], d2[]}, [r4]
+ vld3.16 {d0[], d1[], d2[]}, [r4:16]
+ vld3.16 {d0[], d1[], d2[]}, [r4:32]
+ vld3.16 {d0[], d1[], d2[]}, [r4:64]
+ vld3.16 {d0[], d1[], d2[]}, [r4:128]
+ vld3.16 {d0[], d1[], d2[]}, [r4:256]
+
+@ CHECK: vld3.16 {d0[], d1[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d1[], d2[]}, [r4]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:16]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:32]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:64]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:128]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:256]!
+
+@ CHECK: vld3.16 {d0[], d1[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d1[], d2[]}, [r4], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:16], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:32], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:64], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:128], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0[], d1[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d2[], d4[]}, [r4]
+ vld3.16 {d0[], d2[], d4[]}, [r4:16]
+ vld3.16 {d0[], d2[], d4[]}, [r4:32]
+ vld3.16 {d0[], d2[], d4[]}, [r4:64]
+ vld3.16 {d0[], d2[], d4[]}, [r4:128]
+ vld3.16 {d0[], d2[], d4[]}, [r4:256]
+
+@ CHECK: vld3.16 {d0[], d2[], d4[]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d2[], d4[]}, [r4]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:16]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:32]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:64]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:128]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:256]!
+
+@ CHECK: vld3.16 {d0[], d2[], d4[]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d2[], d4[]}, [r4], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:16], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:32], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:64], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:128], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0[], d2[], d4[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:256], r6
+
+ vld3.32 {d0, d1, d2}, [r4]
+ vld3.32 {d0, d1, d2}, [r4:16]
+ vld3.32 {d0, d1, d2}, [r4:32]
+ vld3.32 {d0, d1, d2}, [r4:64]
+ vld3.32 {d0, d1, d2}, [r4:128]
+ vld3.32 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld3.32 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x8f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d1, d2}, [r4]!
+ vld3.32 {d0, d1, d2}, [r4:16]!
+ vld3.32 {d0, d1, d2}, [r4:32]!
+ vld3.32 {d0, d1, d2}, [r4:64]!
+ vld3.32 {d0, d1, d2}, [r4:128]!
+ vld3.32 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld3.32 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d1, d2}, [r4], r6
+ vld3.32 {d0, d1, d2}, [r4:16], r6
+ vld3.32 {d0, d1, d2}, [r4:32], r6
+ vld3.32 {d0, d1, d2}, [r4:64], r6
+ vld3.32 {d0, d1, d2}, [r4:128], r6
+ vld3.32 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d2, d4}, [r4]
+ vld3.32 {d0, d2, d4}, [r4:16]
+ vld3.32 {d0, d2, d4}, [r4:32]
+ vld3.32 {d0, d2, d4}, [r4:64]
+ vld3.32 {d0, d2, d4}, [r4:128]
+ vld3.32 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vld3.32 {d0, d2, d4}, [r4] @ encoding: [0x24,0xf9,0x8f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d2, d4}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d2, d4}, [r4]!
+ vld3.32 {d0, d2, d4}, [r4:16]!
+ vld3.32 {d0, d2, d4}, [r4:32]!
+ vld3.32 {d0, d2, d4}, [r4:64]!
+ vld3.32 {d0, d2, d4}, [r4:128]!
+ vld3.32 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vld3.32 {d0, d2, d4}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d2, d4}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d2, d4}, [r4], r6
+ vld3.32 {d0, d2, d4}, [r4:16], r6
+ vld3.32 {d0, d2, d4}, [r4:32], r6
+ vld3.32 {d0, d2, d4}, [r4:64], r6
+ vld3.32 {d0, d2, d4}, [r4:128], r6
+ vld3.32 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0, d2, d4}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:16]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:32]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:64]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:128]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vld3.32 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vld3.32 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:16]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:32]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:64]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:128]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:256]
+
+@ CHECK: vld3.32 {d0[1], d2[1], d4[1]}, [r4] @ encoding: [0xa4,0xf9,0xcf,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:16]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:32]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:64]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:128]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:256]!
+
+@ CHECK: vld3.32 {d0[1], d2[1], d4[1]}, [r4]! @ encoding: [0xa4,0xf9,0xcd,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:16], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:32], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:64], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:128], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0[1], d2[1], d4[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0xc6,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d1[], d2[]}, [r4]
+ vld3.32 {d0[], d1[], d2[]}, [r4:16]
+ vld3.32 {d0[], d1[], d2[]}, [r4:32]
+ vld3.32 {d0[], d1[], d2[]}, [r4:64]
+ vld3.32 {d0[], d1[], d2[]}, [r4:128]
+ vld3.32 {d0[], d1[], d2[]}, [r4:256]
+
+@ CHECK: vld3.32 {d0[], d1[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d1[], d2[]}, [r4]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:16]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:32]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:64]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:128]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:256]!
+
+@ CHECK: vld3.32 {d0[], d1[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d1[], d2[]}, [r4], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:16], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:32], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:64], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:128], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0[], d1[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d2[], d4[]}, [r4]
+ vld3.32 {d0[], d2[], d4[]}, [r4:16]
+ vld3.32 {d0[], d2[], d4[]}, [r4:32]
+ vld3.32 {d0[], d2[], d4[]}, [r4:64]
+ vld3.32 {d0[], d2[], d4[]}, [r4:128]
+ vld3.32 {d0[], d2[], d4[]}, [r4:256]
+
+@ CHECK: vld3.32 {d0[], d2[], d4[]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d2[], d4[]}, [r4]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:16]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:32]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:64]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:128]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:256]!
+
+@ CHECK: vld3.32 {d0[], d2[], d4[]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d2[], d4[]}, [r4], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:16], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:32], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:64], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:128], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0[], d2[], d4[]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0, d1, d2, d3}, [r4]
+ vld4.8 {d0, d1, d2, d3}, [r4:16]
+ vld4.8 {d0, d1, d2, d3}, [r4:32]
+ vld4.8 {d0, d1, d2, d3}, [r4:64]
+ vld4.8 {d0, d1, d2, d3}, [r4:128]
+ vld4.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x0f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x3f,0x00]
+
+ vld4.8 {d0, d1, d2, d3}, [r4]!
+ vld4.8 {d0, d1, d2, d3}, [r4:16]!
+ vld4.8 {d0, d1, d2, d3}, [r4:32]!
+ vld4.8 {d0, d1, d2, d3}, [r4:64]!
+ vld4.8 {d0, d1, d2, d3}, [r4:128]!
+ vld4.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x3d,0x00]
+
+ vld4.8 {d0, d1, d2, d3}, [r4], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:16], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:32], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:64], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:128], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x36,0x00]
+
+ vld4.8 {d0, d2, d4, d6}, [r4]
+ vld4.8 {d0, d2, d4, d6}, [r4:16]
+ vld4.8 {d0, d2, d4, d6}, [r4:32]
+ vld4.8 {d0, d2, d4, d6}, [r4:64]
+ vld4.8 {d0, d2, d4, d6}, [r4:128]
+ vld4.8 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4] @ encoding: [0x24,0xf9,0x0f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x24,0xf9,0x3f,0x01]
+
+ vld4.8 {d0, d2, d4, d6}, [r4]!
+ vld4.8 {d0, d2, d4, d6}, [r4:16]!
+ vld4.8 {d0, d2, d4, d6}, [r4:32]!
+ vld4.8 {d0, d2, d4, d6}, [r4:64]!
+ vld4.8 {d0, d2, d4, d6}, [r4:128]!
+ vld4.8 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x24,0xf9,0x3d,0x01]
+
+ vld4.8 {d0, d2, d4, d6}, [r4], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:16], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:32], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:64], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:128], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x24,0xf9,0x36,0x01]
+
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32] @ encoding: [0xa4,0xf9,0x3f,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]! @ encoding: [0xa4,0xf9,0x3d,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x36,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256]
+
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4] @ encoding: [0xa4,0xf9,0x0f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32] @ encoding: [0xa4,0xf9,0x1f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256]!
+
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x0d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x1d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x06,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x16,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256]
+
+@ CHECK: vld4.8 {d0[], d2[], d4[], d6[]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32] @ encoding: [0xa4,0xf9,0x3f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256]!
+
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x3d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0[], d2[], d4[], d6[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x36,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0, d1, d2, d3}, [r4]
+ vld4.16 {d0, d1, d2, d3}, [r4:16]
+ vld4.16 {d0, d1, d2, d3}, [r4:32]
+ vld4.16 {d0, d1, d2, d3}, [r4:64]
+ vld4.16 {d0, d1, d2, d3}, [r4:128]
+ vld4.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x4f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x7f,0x00]
+
+ vld4.16 {d0, d1, d2, d3}, [r4]!
+ vld4.16 {d0, d1, d2, d3}, [r4:16]!
+ vld4.16 {d0, d1, d2, d3}, [r4:32]!
+ vld4.16 {d0, d1, d2, d3}, [r4:64]!
+ vld4.16 {d0, d1, d2, d3}, [r4:128]!
+ vld4.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x7d,0x00]
+
+ vld4.16 {d0, d1, d2, d3}, [r4], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:16], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:32], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:64], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:128], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x76,0x00]
+
+ vld4.16 {d0, d2, d4, d6}, [r4]
+ vld4.16 {d0, d2, d4, d6}, [r4:16]
+ vld4.16 {d0, d2, d4, d6}, [r4:32]
+ vld4.16 {d0, d2, d4, d6}, [r4:64]
+ vld4.16 {d0, d2, d4, d6}, [r4:128]
+ vld4.16 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4] @ encoding: [0x24,0xf9,0x4f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x24,0xf9,0x7f,0x01]
+
+ vld4.16 {d0, d2, d4, d6}, [r4]!
+ vld4.16 {d0, d2, d4, d6}, [r4:16]!
+ vld4.16 {d0, d2, d4, d6}, [r4:32]!
+ vld4.16 {d0, d2, d4, d6}, [r4:64]!
+ vld4.16 {d0, d2, d4, d6}, [r4:128]!
+ vld4.16 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x24,0xf9,0x7d,0x01]
+
+ vld4.16 {d0, d2, d4, d6}, [r4], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:16], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:32], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:64], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:128], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x24,0xf9,0x76,0x01]
+
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64] @ encoding: [0xa4,0xf9,0x5f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0x5d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x56,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0xa4,0xf9,0x7f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0x7d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x76,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256]
+
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64] @ encoding: [0xa4,0xf9,0x5f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256]!
+
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64]! @ encoding: [0xa4,0xf9,0x5d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x56,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256]
+
+@ CHECK: vld4.16 {d0[], d2[], d4[], d6[]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64] @ encoding: [0xa4,0xf9,0x7f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256]!
+
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64]! @ encoding: [0xa4,0xf9,0x7d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0[], d2[], d4[], d6[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x76,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0, d1, d2, d3}, [r4]
+ vld4.32 {d0, d1, d2, d3}, [r4:16]
+ vld4.32 {d0, d1, d2, d3}, [r4:32]
+ vld4.32 {d0, d1, d2, d3}, [r4:64]
+ vld4.32 {d0, d1, d2, d3}, [r4:128]
+ vld4.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x8f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0xbf,0x00]
+
+ vld4.32 {d0, d1, d2, d3}, [r4]!
+ vld4.32 {d0, d1, d2, d3}, [r4:16]!
+ vld4.32 {d0, d1, d2, d3}, [r4:32]!
+ vld4.32 {d0, d1, d2, d3}, [r4:64]!
+ vld4.32 {d0, d1, d2, d3}, [r4:128]!
+ vld4.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0xbd,0x00]
+
+ vld4.32 {d0, d1, d2, d3}, [r4], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:16], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:32], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:64], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:128], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0xb6,0x00]
+
+ vld4.32 {d0, d2, d4, d6}, [r4]
+ vld4.32 {d0, d2, d4, d6}, [r4:16]
+ vld4.32 {d0, d2, d4, d6}, [r4:32]
+ vld4.32 {d0, d2, d4, d6}, [r4:64]
+ vld4.32 {d0, d2, d4, d6}, [r4:128]
+ vld4.32 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4] @ encoding: [0x24,0xf9,0x8f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x24,0xf9,0xbf,0x01]
+
+ vld4.32 {d0, d2, d4, d6}, [r4]!
+ vld4.32 {d0, d2, d4, d6}, [r4:16]!
+ vld4.32 {d0, d2, d4, d6}, [r4:32]!
+ vld4.32 {d0, d2, d4, d6}, [r4:64]!
+ vld4.32 {d0, d2, d4, d6}, [r4:128]!
+ vld4.32 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x24,0xf9,0xbd,0x01]
+
+ vld4.32 {d0, d2, d4, d6}, [r4], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:16], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:32], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:64], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:128], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x24,0xf9,0xb6,0x01]
+
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64] @ encoding: [0xa4,0xf9,0x9f,0x0b]
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128] @ encoding: [0xa4,0xf9,0xaf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0x9d,0x0b]
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]! @ encoding: [0xa4,0xf9,0xad,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x96,0x0b]
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6 @ encoding: [0xa4,0xf9,0xa6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0xa4,0xf9,0xcf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0xa4,0xf9,0xdf,0x0b]
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128] @ encoding: [0xa4,0xf9,0xef,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]! @ encoding: [0xa4,0xf9,0xcd,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0xdd,0x0b]
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]! @ encoding: [0xa4,0xf9,0xed,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0xc6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0xd6,0x0b]
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6 @ encoding: [0xa4,0xf9,0xe6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256]
+
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64] @ encoding: [0xa4,0xf9,0x9f,0x0f]
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128] @ encoding: [0xa4,0xf9,0xdf,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256]!
+
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64]! @ encoding: [0xa4,0xf9,0x9d,0x0f]
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128]! @ encoding: [0xa4,0xf9,0xdd,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x96,0x0f]
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128], r6 @ encoding: [0xa4,0xf9,0xd6,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256]
+
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64] @ encoding: [0xa4,0xf9,0xbf,0x0f]
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128] @ encoding: [0xa4,0xf9,0xff,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256]!
+
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64]! @ encoding: [0xa4,0xf9,0xbd,0x0f]
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128]! @ encoding: [0xa4,0xf9,0xfd,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0xb6,0x0f]
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128], r6 @ encoding: [0xa4,0xf9,0xf6,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0}, [r4]
+ vst1.8 {d0}, [r4:16]
+ vst1.8 {d0}, [r4:32]
+ vst1.8 {d0}, [r4:64]
+ vst1.8 {d0}, [r4:128]
+ vst1.8 {d0}, [r4:256]
+
+@ CHECK: vst1.8 {d0}, [r4] @ encoding: [0x04,0xf9,0x0f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0}, [r4]!
+ vst1.8 {d0}, [r4:16]!
+ vst1.8 {d0}, [r4:32]!
+ vst1.8 {d0}, [r4:64]!
+ vst1.8 {d0}, [r4:128]!
+ vst1.8 {d0}, [r4:256]!
+
+@ CHECK: vst1.8 {d0}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0}, [r4], r6
+ vst1.8 {d0}, [r4:16], r6
+ vst1.8 {d0}, [r4:32], r6
+ vst1.8 {d0}, [r4:64], r6
+ vst1.8 {d0}, [r4:128], r6
+ vst1.8 {d0}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1}, [r4]
+ vst1.8 {d0, d1}, [r4:16]
+ vst1.8 {d0, d1}, [r4:32]
+ vst1.8 {d0, d1}, [r4:64]
+ vst1.8 {d0, d1}, [r4:128]
+ vst1.8 {d0, d1}, [r4:256]
+
+@ CHECK: vst1.8 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x0f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x0a]
+@ CHECK: vst1.8 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1}, [r4]!
+ vst1.8 {d0, d1}, [r4:16]!
+ vst1.8 {d0, d1}, [r4:32]!
+ vst1.8 {d0, d1}, [r4:64]!
+ vst1.8 {d0, d1}, [r4:128]!
+ vst1.8 {d0, d1}, [r4:256]!
+
+@ CHECK: vst1.8 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x0a]
+@ CHECK: vst1.8 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1}, [r4], r6
+ vst1.8 {d0, d1}, [r4:16], r6
+ vst1.8 {d0, d1}, [r4:32], r6
+ vst1.8 {d0, d1}, [r4:64], r6
+ vst1.8 {d0, d1}, [r4:128], r6
+ vst1.8 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x0a]
+@ CHECK: vst1.8 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1, d2}, [r4]
+ vst1.8 {d0, d1, d2}, [r4:16]
+ vst1.8 {d0, d1, d2}, [r4:32]
+ vst1.8 {d0, d1, d2}, [r4:64]
+ vst1.8 {d0, d1, d2}, [r4:128]
+ vst1.8 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst1.8 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x0f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1, d2}, [r4]!
+ vst1.8 {d0, d1, d2}, [r4:16]!
+ vst1.8 {d0, d1, d2}, [r4:32]!
+ vst1.8 {d0, d1, d2}, [r4:64]!
+ vst1.8 {d0, d1, d2}, [r4:128]!
+ vst1.8 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst1.8 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1, d2}, [r4], r6
+ vst1.8 {d0, d1, d2}, [r4:16], r6
+ vst1.8 {d0, d1, d2}, [r4:32], r6
+ vst1.8 {d0, d1, d2}, [r4:64], r6
+ vst1.8 {d0, d1, d2}, [r4:128], r6
+ vst1.8 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1, d2, d3}, [r4]
+ vst1.8 {d0, d1, d2, d3}, [r4:16]
+ vst1.8 {d0, d1, d2, d3}, [r4:32]
+ vst1.8 {d0, d1, d2, d3}, [r4:64]
+ vst1.8 {d0, d1, d2, d3}, [r4:128]
+ vst1.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x0f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x3f,0x02]
+
+ vst1.8 {d0, d1, d2, d3}, [r4]!
+ vst1.8 {d0, d1, d2, d3}, [r4:16]!
+ vst1.8 {d0, d1, d2, d3}, [r4:32]!
+ vst1.8 {d0, d1, d2, d3}, [r4:64]!
+ vst1.8 {d0, d1, d2, d3}, [r4:128]!
+ vst1.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x3d,0x02]
+
+ vst1.8 {d0, d1, d2, d3}, [r4], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:16], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:32], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:64], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:128], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x36,0x02]
+
+ vst1.8 {d0[2]}, [r4]
+ vst1.8 {d0[2]}, [r4:16]
+ vst1.8 {d0[2]}, [r4:32]
+ vst1.8 {d0[2]}, [r4:64]
+ vst1.8 {d0[2]}, [r4:128]
+ vst1.8 {d0[2]}, [r4:256]
+
+@ CHECK: vst1.8 {d0[2]}, [r4] @ encoding: [0x84,0xf9,0x4f,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0[2]}, [r4]!
+ vst1.8 {d0[2]}, [r4:16]!
+ vst1.8 {d0[2]}, [r4:32]!
+ vst1.8 {d0[2]}, [r4:64]!
+ vst1.8 {d0[2]}, [r4:128]!
+ vst1.8 {d0[2]}, [r4:256]!
+
+@ CHECK: vst1.8 {d0[2]}, [r4]! @ encoding: [0x84,0xf9,0x4d,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0[2]}, [r4], r6
+ vst1.8 {d0[2]}, [r4:16], r6
+ vst1.8 {d0[2]}, [r4:32], r6
+ vst1.8 {d0[2]}, [r4:64], r6
+ vst1.8 {d0[2]}, [r4:128], r6
+ vst1.8 {d0[2]}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0[2]}, [r4], r6 @ encoding: [0x84,0xf9,0x46,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0}, [r4]
+ vst1.16 {d0}, [r4:16]
+ vst1.16 {d0}, [r4:32]
+ vst1.16 {d0}, [r4:64]
+ vst1.16 {d0}, [r4:128]
+ vst1.16 {d0}, [r4:256]
+
+@ CHECK: vst1.16 {d0}, [r4] @ encoding: [0x04,0xf9,0x4f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0}, [r4]!
+ vst1.16 {d0}, [r4:16]!
+ vst1.16 {d0}, [r4:32]!
+ vst1.16 {d0}, [r4:64]!
+ vst1.16 {d0}, [r4:128]!
+ vst1.16 {d0}, [r4:256]!
+
+@ CHECK: vst1.16 {d0}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0}, [r4], r6
+ vst1.16 {d0}, [r4:16], r6
+ vst1.16 {d0}, [r4:32], r6
+ vst1.16 {d0}, [r4:64], r6
+ vst1.16 {d0}, [r4:128], r6
+ vst1.16 {d0}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1}, [r4]
+ vst1.16 {d0, d1}, [r4:16]
+ vst1.16 {d0, d1}, [r4:32]
+ vst1.16 {d0, d1}, [r4:64]
+ vst1.16 {d0, d1}, [r4:128]
+ vst1.16 {d0, d1}, [r4:256]
+
+@ CHECK: vst1.16 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x4f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x0a]
+@ CHECK: vst1.16 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0x6f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1}, [r4]!
+ vst1.16 {d0, d1}, [r4:16]!
+ vst1.16 {d0, d1}, [r4:32]!
+ vst1.16 {d0, d1}, [r4:64]!
+ vst1.16 {d0, d1}, [r4:128]!
+ vst1.16 {d0, d1}, [r4:256]!
+
+@ CHECK: vst1.16 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x0a]
+@ CHECK: vst1.16 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0x6d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1}, [r4], r6
+ vst1.16 {d0, d1}, [r4:16], r6
+ vst1.16 {d0, d1}, [r4:32], r6
+ vst1.16 {d0, d1}, [r4:64], r6
+ vst1.16 {d0, d1}, [r4:128], r6
+ vst1.16 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x0a]
+@ CHECK: vst1.16 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0x66,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1, d2}, [r4]
+ vst1.16 {d0, d1, d2}, [r4:16]
+ vst1.16 {d0, d1, d2}, [r4:32]
+ vst1.16 {d0, d1, d2}, [r4:64]
+ vst1.16 {d0, d1, d2}, [r4:128]
+ vst1.16 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst1.16 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x4f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1, d2}, [r4]!
+ vst1.16 {d0, d1, d2}, [r4:16]!
+ vst1.16 {d0, d1, d2}, [r4:32]!
+ vst1.16 {d0, d1, d2}, [r4:64]!
+ vst1.16 {d0, d1, d2}, [r4:128]!
+ vst1.16 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst1.16 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1, d2}, [r4], r6
+ vst1.16 {d0, d1, d2}, [r4:16], r6
+ vst1.16 {d0, d1, d2}, [r4:32], r6
+ vst1.16 {d0, d1, d2}, [r4:64], r6
+ vst1.16 {d0, d1, d2}, [r4:128], r6
+ vst1.16 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1, d2, d3}, [r4]
+ vst1.16 {d0, d1, d2, d3}, [r4:16]
+ vst1.16 {d0, d1, d2, d3}, [r4:32]
+ vst1.16 {d0, d1, d2, d3}, [r4:64]
+ vst1.16 {d0, d1, d2, d3}, [r4:128]
+ vst1.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x4f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x6f,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x7f,0x02]
+
+ vst1.16 {d0, d1, d2, d3}, [r4]!
+ vst1.16 {d0, d1, d2, d3}, [r4:16]!
+ vst1.16 {d0, d1, d2, d3}, [r4:32]!
+ vst1.16 {d0, d1, d2, d3}, [r4:64]!
+ vst1.16 {d0, d1, d2, d3}, [r4:128]!
+ vst1.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x6d,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x7d,0x02]
+
+ vst1.16 {d0, d1, d2, d3}, [r4], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:16], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:32], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:64], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:128], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x66,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x76,0x02]
+
+ vst1.16 {d0[2]}, [r4]
+ vst1.16 {d0[2]}, [r4:16]
+ vst1.16 {d0[2]}, [r4:32]
+ vst1.16 {d0[2]}, [r4:64]
+ vst1.16 {d0[2]}, [r4:128]
+ vst1.16 {d0[2]}, [r4:256]
+
+@ CHECK: vst1.16 {d0[2]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x04]
+@ CHECK: vst1.16 {d0[2]}, [r4:16] @ encoding: [0x84,0xf9,0x9f,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0[2]}, [r4]!
+ vst1.16 {d0[2]}, [r4:16]!
+ vst1.16 {d0[2]}, [r4:32]!
+ vst1.16 {d0[2]}, [r4:64]!
+ vst1.16 {d0[2]}, [r4:128]!
+ vst1.16 {d0[2]}, [r4:256]!
+
+@ CHECK: vst1.16 {d0[2]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x04]
+@ CHECK: vst1.16 {d0[2]}, [r4:16]! @ encoding: [0x84,0xf9,0x9d,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0[2]}, [r4], r6
+ vst1.16 {d0[2]}, [r4:16], r6
+ vst1.16 {d0[2]}, [r4:32], r6
+ vst1.16 {d0[2]}, [r4:64], r6
+ vst1.16 {d0[2]}, [r4:128], r6
+ vst1.16 {d0[2]}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0[2]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x04]
+@ CHECK: vst1.16 {d0[2]}, [r4:16], r6 @ encoding: [0x84,0xf9,0x96,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0}, [r4]
+ vst1.32 {d0}, [r4:16]
+ vst1.32 {d0}, [r4:32]
+ vst1.32 {d0}, [r4:64]
+ vst1.32 {d0}, [r4:128]
+ vst1.32 {d0}, [r4:256]
+
+@ CHECK: vst1.32 {d0}, [r4] @ encoding: [0x04,0xf9,0x8f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0}, [r4]!
+ vst1.32 {d0}, [r4:16]!
+ vst1.32 {d0}, [r4:32]!
+ vst1.32 {d0}, [r4:64]!
+ vst1.32 {d0}, [r4:128]!
+ vst1.32 {d0}, [r4:256]!
+
+@ CHECK: vst1.32 {d0}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0}, [r4], r6
+ vst1.32 {d0}, [r4:16], r6
+ vst1.32 {d0}, [r4:32], r6
+ vst1.32 {d0}, [r4:64], r6
+ vst1.32 {d0}, [r4:128], r6
+ vst1.32 {d0}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1}, [r4]
+ vst1.32 {d0, d1}, [r4:16]
+ vst1.32 {d0, d1}, [r4:32]
+ vst1.32 {d0, d1}, [r4:64]
+ vst1.32 {d0, d1}, [r4:128]
+ vst1.32 {d0, d1}, [r4:256]
+
+@ CHECK: vst1.32 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x8f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x0a]
+@ CHECK: vst1.32 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1}, [r4]!
+ vst1.32 {d0, d1}, [r4:16]!
+ vst1.32 {d0, d1}, [r4:32]!
+ vst1.32 {d0, d1}, [r4:64]!
+ vst1.32 {d0, d1}, [r4:128]!
+ vst1.32 {d0, d1}, [r4:256]!
+
+@ CHECK: vst1.32 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x0a]
+@ CHECK: vst1.32 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1}, [r4], r6
+ vst1.32 {d0, d1}, [r4:16], r6
+ vst1.32 {d0, d1}, [r4:32], r6
+ vst1.32 {d0, d1}, [r4:64], r6
+ vst1.32 {d0, d1}, [r4:128], r6
+ vst1.32 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x0a]
+@ CHECK: vst1.32 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1, d2}, [r4]
+ vst1.32 {d0, d1, d2}, [r4:16]
+ vst1.32 {d0, d1, d2}, [r4:32]
+ vst1.32 {d0, d1, d2}, [r4:64]
+ vst1.32 {d0, d1, d2}, [r4:128]
+ vst1.32 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst1.32 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x8f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1, d2}, [r4]!
+ vst1.32 {d0, d1, d2}, [r4:16]!
+ vst1.32 {d0, d1, d2}, [r4:32]!
+ vst1.32 {d0, d1, d2}, [r4:64]!
+ vst1.32 {d0, d1, d2}, [r4:128]!
+ vst1.32 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst1.32 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1, d2}, [r4], r6
+ vst1.32 {d0, d1, d2}, [r4:16], r6
+ vst1.32 {d0, d1, d2}, [r4:32], r6
+ vst1.32 {d0, d1, d2}, [r4:64], r6
+ vst1.32 {d0, d1, d2}, [r4:128], r6
+ vst1.32 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1, d2, d3}, [r4]
+ vst1.32 {d0, d1, d2, d3}, [r4:16]
+ vst1.32 {d0, d1, d2, d3}, [r4:32]
+ vst1.32 {d0, d1, d2, d3}, [r4:64]
+ vst1.32 {d0, d1, d2, d3}, [r4:128]
+ vst1.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x8f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0xbf,0x02]
+
+ vst1.32 {d0, d1, d2, d3}, [r4]!
+ vst1.32 {d0, d1, d2, d3}, [r4:16]!
+ vst1.32 {d0, d1, d2, d3}, [r4:32]!
+ vst1.32 {d0, d1, d2, d3}, [r4:64]!
+ vst1.32 {d0, d1, d2, d3}, [r4:128]!
+ vst1.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0xbd,0x02]
+
+ vst1.32 {d0, d1, d2, d3}, [r4], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:16], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:32], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:64], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:128], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0xb6,0x02]
+
+ vst1.32 {d0[1]}, [r4]
+ vst1.32 {d0[1]}, [r4:16]
+ vst1.32 {d0[1]}, [r4:32]
+ vst1.32 {d0[1]}, [r4:64]
+ vst1.32 {d0[1]}, [r4:128]
+ vst1.32 {d0[1]}, [r4:256]
+
+@ CHECK: vst1.32 {d0[1]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0[1]}, [r4:32] @ encoding: [0x84,0xf9,0xbf,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0[1]}, [r4]!
+ vst1.32 {d0[1]}, [r4:16]!
+ vst1.32 {d0[1]}, [r4:32]!
+ vst1.32 {d0[1]}, [r4:64]!
+ vst1.32 {d0[1]}, [r4:128]!
+ vst1.32 {d0[1]}, [r4:256]!
+
+@ CHECK: vst1.32 {d0[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0[1]}, [r4:32]! @ encoding: [0x84,0xf9,0xbd,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0[1]}, [r4], r6
+ vst1.32 {d0[1]}, [r4:16], r6
+ vst1.32 {d0[1]}, [r4:32], r6
+ vst1.32 {d0[1]}, [r4:64], r6
+ vst1.32 {d0[1]}, [r4:128], r6
+ vst1.32 {d0[1]}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0[1]}, [r4:32], r6 @ encoding: [0x84,0xf9,0xb6,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0}, [r4]
+ vst1.64 {d0}, [r4:16]
+ vst1.64 {d0}, [r4:32]
+ vst1.64 {d0}, [r4:64]
+ vst1.64 {d0}, [r4:128]
+ vst1.64 {d0}, [r4:256]
+
+@ CHECK: vst1.64 {d0}, [r4] @ encoding: [0x04,0xf9,0xcf,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0}, [r4:64] @ encoding: [0x04,0xf9,0xdf,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0}, [r4]!
+ vst1.64 {d0}, [r4:16]!
+ vst1.64 {d0}, [r4:32]!
+ vst1.64 {d0}, [r4:64]!
+ vst1.64 {d0}, [r4:128]!
+ vst1.64 {d0}, [r4:256]!
+
+@ CHECK: vst1.64 {d0}, [r4]! @ encoding: [0x04,0xf9,0xcd,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0}, [r4:64]! @ encoding: [0x04,0xf9,0xdd,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0}, [r4], r6
+ vst1.64 {d0}, [r4:16], r6
+ vst1.64 {d0}, [r4:32], r6
+ vst1.64 {d0}, [r4:64], r6
+ vst1.64 {d0}, [r4:128], r6
+ vst1.64 {d0}, [r4:256], r6
+
+@ CHECK: vst1.64 {d0}, [r4], r6 @ encoding: [0x04,0xf9,0xc6,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0}, [r4:64], r6 @ encoding: [0x04,0xf9,0xd6,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1}, [r4]
+ vst1.64 {d0, d1}, [r4:16]
+ vst1.64 {d0, d1}, [r4:32]
+ vst1.64 {d0, d1}, [r4:64]
+ vst1.64 {d0, d1}, [r4:128]
+ vst1.64 {d0, d1}, [r4:256]
+
+@ CHECK: vst1.64 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0xcf,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0xdf,0x0a]
+@ CHECK: vst1.64 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0xef,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1}, [r4]!
+ vst1.64 {d0, d1}, [r4:16]!
+ vst1.64 {d0, d1}, [r4:32]!
+ vst1.64 {d0, d1}, [r4:64]!
+ vst1.64 {d0, d1}, [r4:128]!
+ vst1.64 {d0, d1}, [r4:256]!
+
+@ CHECK: vst1.64 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0xcd,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0xdd,0x0a]
+@ CHECK: vst1.64 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0xed,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1}, [r4], r6
+ vst1.64 {d0, d1}, [r4:16], r6
+ vst1.64 {d0, d1}, [r4:32], r6
+ vst1.64 {d0, d1}, [r4:64], r6
+ vst1.64 {d0, d1}, [r4:128], r6
+ vst1.64 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst1.64 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0xc6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0xd6,0x0a]
+@ CHECK: vst1.64 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0xe6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1, d2}, [r4]
+ vst1.64 {d0, d1, d2}, [r4:16]
+ vst1.64 {d0, d1, d2}, [r4:32]
+ vst1.64 {d0, d1, d2}, [r4:64]
+ vst1.64 {d0, d1, d2}, [r4:128]
+ vst1.64 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst1.64 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0xcf,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0xdf,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1, d2}, [r4]!
+ vst1.64 {d0, d1, d2}, [r4:16]!
+ vst1.64 {d0, d1, d2}, [r4:32]!
+ vst1.64 {d0, d1, d2}, [r4:64]!
+ vst1.64 {d0, d1, d2}, [r4:128]!
+ vst1.64 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst1.64 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0xcd,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0xdd,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1, d2}, [r4], r6
+ vst1.64 {d0, d1, d2}, [r4:16], r6
+ vst1.64 {d0, d1, d2}, [r4:32], r6
+ vst1.64 {d0, d1, d2}, [r4:64], r6
+ vst1.64 {d0, d1, d2}, [r4:128], r6
+ vst1.64 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst1.64 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0xc6,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0xd6,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1, d2, d3}, [r4]
+ vst1.64 {d0, d1, d2, d3}, [r4:16]
+ vst1.64 {d0, d1, d2, d3}, [r4:32]
+ vst1.64 {d0, d1, d2, d3}, [r4:64]
+ vst1.64 {d0, d1, d2, d3}, [r4:128]
+ vst1.64 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0xcf,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0xdf,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0xef,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0xff,0x02]
+
+ vst1.64 {d0, d1, d2, d3}, [r4]!
+ vst1.64 {d0, d1, d2, d3}, [r4:16]!
+ vst1.64 {d0, d1, d2, d3}, [r4:32]!
+ vst1.64 {d0, d1, d2, d3}, [r4:64]!
+ vst1.64 {d0, d1, d2, d3}, [r4:128]!
+ vst1.64 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0xcd,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0xdd,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0xed,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0xfd,0x02]
+
+ vst1.64 {d0, d1, d2, d3}, [r4], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:16], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:32], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:64], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:128], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0xc6,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0xd6,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0xe6,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0xf6,0x02]
+
+ vst2.8 {d0, d1}, [r4]
+ vst2.8 {d0, d1}, [r4:16]
+ vst2.8 {d0, d1}, [r4:32]
+ vst2.8 {d0, d1}, [r4:64]
+ vst2.8 {d0, d1}, [r4:128]
+ vst2.8 {d0, d1}, [r4:256]
+
+@ CHECK: vst2.8 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x0f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x08]
+@ CHECK: vst2.8 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d1}, [r4]!
+ vst2.8 {d0, d1}, [r4:16]!
+ vst2.8 {d0, d1}, [r4:32]!
+ vst2.8 {d0, d1}, [r4:64]!
+ vst2.8 {d0, d1}, [r4:128]!
+ vst2.8 {d0, d1}, [r4:256]!
+
+@ CHECK: vst2.8 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x08]
+@ CHECK: vst2.8 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d1}, [r4], r6
+ vst2.8 {d0, d1}, [r4:16], r6
+ vst2.8 {d0, d1}, [r4:32], r6
+ vst2.8 {d0, d1}, [r4:64], r6
+ vst2.8 {d0, d1}, [r4:128], r6
+ vst2.8 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst2.8 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x08]
+@ CHECK: vst2.8 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d2}, [r4]
+ vst2.8 {d0, d2}, [r4:16]
+ vst2.8 {d0, d2}, [r4:32]
+ vst2.8 {d0, d2}, [r4:64]
+ vst2.8 {d0, d2}, [r4:128]
+ vst2.8 {d0, d2}, [r4:256]
+
+@ CHECK: vst2.8 {d0, d2}, [r4] @ encoding: [0x04,0xf9,0x0f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d2}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x09]
+@ CHECK: vst2.8 {d0, d2}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d2}, [r4]!
+ vst2.8 {d0, d2}, [r4:16]!
+ vst2.8 {d0, d2}, [r4:32]!
+ vst2.8 {d0, d2}, [r4:64]!
+ vst2.8 {d0, d2}, [r4:128]!
+ vst2.8 {d0, d2}, [r4:256]!
+
+@ CHECK: vst2.8 {d0, d2}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x09]
+@ CHECK: vst2.8 {d0, d2}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d2}, [r4], r6
+ vst2.8 {d0, d2}, [r4:16], r6
+ vst2.8 {d0, d2}, [r4:32], r6
+ vst2.8 {d0, d2}, [r4:64], r6
+ vst2.8 {d0, d2}, [r4:128], r6
+ vst2.8 {d0, d2}, [r4:256], r6
+
+@ CHECK: vst2.8 {d0, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x09]
+@ CHECK: vst2.8 {d0, d2}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d1, d2, d3}, [r4]
+ vst2.8 {d0, d1, d2, d3}, [r4:16]
+ vst2.8 {d0, d1, d2, d3}, [r4:32]
+ vst2.8 {d0, d1, d2, d3}, [r4:64]
+ vst2.8 {d0, d1, d2, d3}, [r4:128]
+ vst2.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x0f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x3f,0x03]
+
+ vst2.8 {d0, d1, d2, d3}, [r4]!
+ vst2.8 {d0, d1, d2, d3}, [r4:16]!
+ vst2.8 {d0, d1, d2, d3}, [r4:32]!
+ vst2.8 {d0, d1, d2, d3}, [r4:64]!
+ vst2.8 {d0, d1, d2, d3}, [r4:128]!
+ vst2.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x3d,0x03]
+
+ vst2.8 {d0, d1, d2, d3}, [r4], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:16], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:32], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:64], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:128], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x36,0x03]
+
+ vst2.8 {d0[2], d1[2]}, [r4]
+ vst2.8 {d0[2], d1[2]}, [r4:16]
+ vst2.8 {d0[2], d1[2]}, [r4:32]
+ vst2.8 {d0[2], d1[2]}, [r4:64]
+ vst2.8 {d0[2], d1[2]}, [r4:128]
+ vst2.8 {d0[2], d1[2]}, [r4:256]
+
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4] @ encoding: [0x84,0xf9,0x4f,0x01]
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4:16] @ encoding: [0x84,0xf9,0x5f,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0[2], d1[2]}, [r4]!
+ vst2.8 {d0[2], d1[2]}, [r4:16]!
+ vst2.8 {d0[2], d1[2]}, [r4:32]!
+ vst2.8 {d0[2], d1[2]}, [r4:64]!
+ vst2.8 {d0[2], d1[2]}, [r4:128]!
+ vst2.8 {d0[2], d1[2]}, [r4:256]!
+
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4]! @ encoding: [0x84,0xf9,0x4d,0x01]
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4:16]! @ encoding: [0x84,0xf9,0x5d,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0[2], d1[2]}, [r4], r6
+ vst2.8 {d0[2], d1[2]}, [r4:16], r6
+ vst2.8 {d0[2], d1[2]}, [r4:32], r6
+ vst2.8 {d0[2], d1[2]}, [r4:64], r6
+ vst2.8 {d0[2], d1[2]}, [r4:128], r6
+ vst2.8 {d0[2], d1[2]}, [r4:256], r6
+
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4], r6 @ encoding: [0x84,0xf9,0x46,0x01]
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4:16], r6 @ encoding: [0x84,0xf9,0x56,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d1}, [r4]
+ vst2.32 {d0, d1}, [r4:16]
+ vst2.32 {d0, d1}, [r4:32]
+ vst2.32 {d0, d1}, [r4:64]
+ vst2.32 {d0, d1}, [r4:128]
+ vst2.32 {d0, d1}, [r4:256]
+
+@ CHECK: vst2.32 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x08]
+@ CHECK: vst2.32 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d1}, [r4]!
+ vst2.32 {d0, d1}, [r4:16]!
+ vst2.32 {d0, d1}, [r4:32]!
+ vst2.32 {d0, d1}, [r4:64]!
+ vst2.32 {d0, d1}, [r4:128]!
+ vst2.32 {d0, d1}, [r4:256]!
+
+@ CHECK: vst2.32 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x08]
+@ CHECK: vst2.32 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d1}, [r4], r6
+ vst2.32 {d0, d1}, [r4:16], r6
+ vst2.32 {d0, d1}, [r4:32], r6
+ vst2.32 {d0, d1}, [r4:64], r6
+ vst2.32 {d0, d1}, [r4:128], r6
+ vst2.32 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x08]
+@ CHECK: vst2.32 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d2}, [r4]
+ vst2.32 {d0, d2}, [r4:16]
+ vst2.32 {d0, d2}, [r4:32]
+ vst2.32 {d0, d2}, [r4:64]
+ vst2.32 {d0, d2}, [r4:128]
+ vst2.32 {d0, d2}, [r4:256]
+
+@ CHECK: vst2.32 {d0, d2}, [r4] @ encoding: [0x04,0xf9,0x8f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d2}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x09]
+@ CHECK: vst2.32 {d0, d2}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d2}, [r4]!
+ vst2.32 {d0, d2}, [r4:16]!
+ vst2.32 {d0, d2}, [r4:32]!
+ vst2.32 {d0, d2}, [r4:64]!
+ vst2.32 {d0, d2}, [r4:128]!
+ vst2.32 {d0, d2}, [r4:256]!
+
+@ CHECK: vst2.32 {d0, d2}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x09]
+@ CHECK: vst2.32 {d0, d2}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d2}, [r4], r6
+ vst2.32 {d0, d2}, [r4:16], r6
+ vst2.32 {d0, d2}, [r4:32], r6
+ vst2.32 {d0, d2}, [r4:64], r6
+ vst2.32 {d0, d2}, [r4:128], r6
+ vst2.32 {d0, d2}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x09]
+@ CHECK: vst2.32 {d0, d2}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d1, d2, d3}, [r4]
+ vst2.32 {d0, d1, d2, d3}, [r4:16]
+ vst2.32 {d0, d1, d2, d3}, [r4:32]
+ vst2.32 {d0, d1, d2, d3}, [r4:64]
+ vst2.32 {d0, d1, d2, d3}, [r4:128]
+ vst2.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x8f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0xbf,0x03]
+
+ vst2.32 {d0, d1, d2, d3}, [r4]!
+ vst2.32 {d0, d1, d2, d3}, [r4:16]!
+ vst2.32 {d0, d1, d2, d3}, [r4:32]!
+ vst2.32 {d0, d1, d2, d3}, [r4:64]!
+ vst2.32 {d0, d1, d2, d3}, [r4:128]!
+ vst2.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0xbd,0x03]
+
+ vst2.32 {d0, d1, d2, d3}, [r4], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:16], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:32], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:64], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:128], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0xb6,0x03]
+
+ vst2.32 {d0[1], d1[1]}, [r4]
+ vst2.32 {d0[1], d1[1]}, [r4:16]
+ vst2.32 {d0[1], d1[1]}, [r4:32]
+ vst2.32 {d0[1], d1[1]}, [r4:64]
+ vst2.32 {d0[1], d1[1]}, [r4:128]
+ vst2.32 {d0[1], d1[1]}, [r4:256]
+
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4:64] @ encoding: [0x84,0xf9,0x9f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d1[1]}, [r4]!
+ vst2.32 {d0[1], d1[1]}, [r4:16]!
+ vst2.32 {d0[1], d1[1]}, [r4:32]!
+ vst2.32 {d0[1], d1[1]}, [r4:64]!
+ vst2.32 {d0[1], d1[1]}, [r4:128]!
+ vst2.32 {d0[1], d1[1]}, [r4:256]!
+
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x9d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d1[1]}, [r4], r6
+ vst2.32 {d0[1], d1[1]}, [r4:16], r6
+ vst2.32 {d0[1], d1[1]}, [r4:32], r6
+ vst2.32 {d0[1], d1[1]}, [r4:64], r6
+ vst2.32 {d0[1], d1[1]}, [r4:128], r6
+ vst2.32 {d0[1], d1[1]}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x96,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d2[1]}, [r4]
+ vst2.32 {d0[1], d2[1]}, [r4:16]
+ vst2.32 {d0[1], d2[1]}, [r4:32]
+ vst2.32 {d0[1], d2[1]}, [r4:64]
+ vst2.32 {d0[1], d2[1]}, [r4:128]
+ vst2.32 {d0[1], d2[1]}, [r4:256]
+
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4] @ encoding: [0x84,0xf9,0xcf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4:64] @ encoding: [0x84,0xf9,0xdf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d2[1]}, [r4]!
+ vst2.32 {d0[1], d2[1]}, [r4:16]!
+ vst2.32 {d0[1], d2[1]}, [r4:32]!
+ vst2.32 {d0[1], d2[1]}, [r4:64]!
+ vst2.32 {d0[1], d2[1]}, [r4:128]!
+ vst2.32 {d0[1], d2[1]}, [r4:256]!
+
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0xcd,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4:64]! @ encoding: [0x84,0xf9,0xdd,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d2[1]}, [r4], r6
+ vst2.32 {d0[1], d2[1]}, [r4:16], r6
+ vst2.32 {d0[1], d2[1]}, [r4:32], r6
+ vst2.32 {d0[1], d2[1]}, [r4:64], r6
+ vst2.32 {d0[1], d2[1]}, [r4:128], r6
+ vst2.32 {d0[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4], r6 @ encoding: [0x84,0xf9,0xc6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0xd6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d1, d2}, [r4]
+ vst3.8 {d0, d1, d2}, [r4:16]
+ vst3.8 {d0, d1, d2}, [r4:32]
+ vst3.8 {d0, d1, d2}, [r4:64]
+ vst3.8 {d0, d1, d2}, [r4:128]
+ vst3.8 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst3.8 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x0f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d1, d2}, [r4]!
+ vst3.8 {d0, d1, d2}, [r4:16]!
+ vst3.8 {d0, d1, d2}, [r4:32]!
+ vst3.8 {d0, d1, d2}, [r4:64]!
+ vst3.8 {d0, d1, d2}, [r4:128]!
+ vst3.8 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst3.8 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d1, d2}, [r4], r6
+ vst3.8 {d0, d1, d2}, [r4:16], r6
+ vst3.8 {d0, d1, d2}, [r4:32], r6
+ vst3.8 {d0, d1, d2}, [r4:64], r6
+ vst3.8 {d0, d1, d2}, [r4:128], r6
+ vst3.8 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst3.8 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d2, d4}, [r4]
+ vst3.8 {d0, d2, d4}, [r4:16]
+ vst3.8 {d0, d2, d4}, [r4:32]
+ vst3.8 {d0, d2, d4}, [r4:64]
+ vst3.8 {d0, d2, d4}, [r4:128]
+ vst3.8 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vst3.8 {d0, d2, d4}, [r4] @ encoding: [0x04,0xf9,0x0f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d2, d4}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d2, d4}, [r4]!
+ vst3.8 {d0, d2, d4}, [r4:16]!
+ vst3.8 {d0, d2, d4}, [r4:32]!
+ vst3.8 {d0, d2, d4}, [r4:64]!
+ vst3.8 {d0, d2, d4}, [r4:128]!
+ vst3.8 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vst3.8 {d0, d2, d4}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d2, d4}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d2, d4}, [r4], r6
+ vst3.8 {d0, d2, d4}, [r4:16], r6
+ vst3.8 {d0, d2, d4}, [r4:32], r6
+ vst3.8 {d0, d2, d4}, [r4:64], r6
+ vst3.8 {d0, d2, d4}, [r4:128], r6
+ vst3.8 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vst3.8 {d0, d2, d4}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:16]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:32]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:64]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:128]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vst3.8 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0x84,0xf9,0x2f,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vst3.8 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0x2d,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vst3.8 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x26,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d1, d2}, [r4]
+ vst3.16 {d0, d1, d2}, [r4:16]
+ vst3.16 {d0, d1, d2}, [r4:32]
+ vst3.16 {d0, d1, d2}, [r4:64]
+ vst3.16 {d0, d1, d2}, [r4:128]
+ vst3.16 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst3.16 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x4f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d1, d2}, [r4]!
+ vst3.16 {d0, d1, d2}, [r4:16]!
+ vst3.16 {d0, d1, d2}, [r4:32]!
+ vst3.16 {d0, d1, d2}, [r4:64]!
+ vst3.16 {d0, d1, d2}, [r4:128]!
+ vst3.16 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst3.16 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d1, d2}, [r4], r6
+ vst3.16 {d0, d1, d2}, [r4:16], r6
+ vst3.16 {d0, d1, d2}, [r4:32], r6
+ vst3.16 {d0, d1, d2}, [r4:64], r6
+ vst3.16 {d0, d1, d2}, [r4:128], r6
+ vst3.16 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst3.16 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d2, d4}, [r4]
+ vst3.16 {d0, d2, d4}, [r4:16]
+ vst3.16 {d0, d2, d4}, [r4:32]
+ vst3.16 {d0, d2, d4}, [r4:64]
+ vst3.16 {d0, d2, d4}, [r4:128]
+ vst3.16 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vst3.16 {d0, d2, d4}, [r4] @ encoding: [0x04,0xf9,0x4f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d2, d4}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d2, d4}, [r4]!
+ vst3.16 {d0, d2, d4}, [r4:16]!
+ vst3.16 {d0, d2, d4}, [r4:32]!
+ vst3.16 {d0, d2, d4}, [r4:64]!
+ vst3.16 {d0, d2, d4}, [r4:128]!
+ vst3.16 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vst3.16 {d0, d2, d4}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d2, d4}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d2, d4}, [r4], r6
+ vst3.16 {d0, d2, d4}, [r4:16], r6
+ vst3.16 {d0, d2, d4}, [r4:32], r6
+ vst3.16 {d0, d2, d4}, [r4:64], r6
+ vst3.16 {d0, d2, d4}, [r4:128], r6
+ vst3.16 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vst3.16 {d0, d2, d4}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:16]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:32]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:64]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:128]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vst3.16 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0x84,0xf9,0x4f,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vst3.16 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0x4d,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vst3.16 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x46,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:16]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:32]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:64]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:128]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:256]
+
+@ CHECK: vst3.16 {d0[1], d2[1], d4[1]}, [r4] @ encoding: [0x84,0xf9,0x6f,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:16]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:32]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:64]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:128]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:256]!
+
+@ CHECK: vst3.16 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0x6d,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:16], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:32], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:64], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:128], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:256], r6
+
+@ CHECK: vst3.16 {d0[1], d2[1], d4[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x66,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d1, d2}, [r4]
+ vst3.32 {d0, d1, d2}, [r4:16]
+ vst3.32 {d0, d1, d2}, [r4:32]
+ vst3.32 {d0, d1, d2}, [r4:64]
+ vst3.32 {d0, d1, d2}, [r4:128]
+ vst3.32 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst3.32 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x8f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d1, d2}, [r4]!
+ vst3.32 {d0, d1, d2}, [r4:16]!
+ vst3.32 {d0, d1, d2}, [r4:32]!
+ vst3.32 {d0, d1, d2}, [r4:64]!
+ vst3.32 {d0, d1, d2}, [r4:128]!
+ vst3.32 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst3.32 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d1, d2}, [r4], r6
+ vst3.32 {d0, d1, d2}, [r4:16], r6
+ vst3.32 {d0, d1, d2}, [r4:32], r6
+ vst3.32 {d0, d1, d2}, [r4:64], r6
+ vst3.32 {d0, d1, d2}, [r4:128], r6
+ vst3.32 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst3.32 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d2, d4}, [r4]
+ vst3.32 {d0, d2, d4}, [r4:16]
+ vst3.32 {d0, d2, d4}, [r4:32]
+ vst3.32 {d0, d2, d4}, [r4:64]
+ vst3.32 {d0, d2, d4}, [r4:128]
+ vst3.32 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vst3.32 {d0, d2, d4}, [r4] @ encoding: [0x04,0xf9,0x8f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d2, d4}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d2, d4}, [r4]!
+ vst3.32 {d0, d2, d4}, [r4:16]!
+ vst3.32 {d0, d2, d4}, [r4:32]!
+ vst3.32 {d0, d2, d4}, [r4:64]!
+ vst3.32 {d0, d2, d4}, [r4:128]!
+ vst3.32 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vst3.32 {d0, d2, d4}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d2, d4}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d2, d4}, [r4], r6
+ vst3.32 {d0, d2, d4}, [r4:16], r6
+ vst3.32 {d0, d2, d4}, [r4:32], r6
+ vst3.32 {d0, d2, d4}, [r4:64], r6
+ vst3.32 {d0, d2, d4}, [r4:128], r6
+ vst3.32 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vst3.32 {d0, d2, d4}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:16]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:32]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:64]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:128]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vst3.32 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vst3.32 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vst3.32 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:16]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:32]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:64]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:128]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:256]
+
+@ CHECK: vst3.32 {d0[1], d2[1], d4[1]}, [r4] @ encoding: [0x84,0xf9,0xcf,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:16]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:32]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:64]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:128]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:256]!
+
+@ CHECK: vst3.32 {d0[1], d2[1], d4[1]}, [r4]! @ encoding: [0x84,0xf9,0xcd,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:16], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:32], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:64], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:128], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:256], r6
+
+@ CHECK: vst3.32 {d0[1], d2[1], d4[1]}, [r4], r6 @ encoding: [0x84,0xf9,0xc6,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.8 {d0, d1, d2, d3}, [r4]
+ vst4.8 {d0, d1, d2, d3}, [r4:16]
+ vst4.8 {d0, d1, d2, d3}, [r4:32]
+ vst4.8 {d0, d1, d2, d3}, [r4:64]
+ vst4.8 {d0, d1, d2, d3}, [r4:128]
+ vst4.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x0f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x3f,0x00]
+
+ vst4.8 {d0, d1, d2, d3}, [r4]!
+ vst4.8 {d0, d1, d2, d3}, [r4:16]!
+ vst4.8 {d0, d1, d2, d3}, [r4:32]!
+ vst4.8 {d0, d1, d2, d3}, [r4:64]!
+ vst4.8 {d0, d1, d2, d3}, [r4:128]!
+ vst4.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x3d,0x00]
+
+ vst4.8 {d0, d1, d2, d3}, [r4], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:16], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:32], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:64], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:128], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x36,0x00]
+
+ vst4.8 {d0, d2, d4, d6}, [r4]
+ vst4.8 {d0, d2, d4, d6}, [r4:16]
+ vst4.8 {d0, d2, d4, d6}, [r4:32]
+ vst4.8 {d0, d2, d4, d6}, [r4:64]
+ vst4.8 {d0, d2, d4, d6}, [r4:128]
+ vst4.8 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4] @ encoding: [0x04,0xf9,0x0f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x04,0xf9,0x3f,0x01]
+
+ vst4.8 {d0, d2, d4, d6}, [r4]!
+ vst4.8 {d0, d2, d4, d6}, [r4:16]!
+ vst4.8 {d0, d2, d4, d6}, [r4:32]!
+ vst4.8 {d0, d2, d4, d6}, [r4:64]!
+ vst4.8 {d0, d2, d4, d6}, [r4:128]!
+ vst4.8 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x04,0xf9,0x3d,0x01]
+
+ vst4.8 {d0, d2, d4, d6}, [r4], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:16], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:32], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:64], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:128], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x04,0xf9,0x36,0x01]
+
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0x84,0xf9,0x2f,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32] @ encoding: [0x84,0xf9,0x3f,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x2d,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]! @ encoding: [0x84,0xf9,0x3d,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x26,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6 @ encoding: [0x84,0xf9,0x36,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0, d1, d2, d3}, [r4]
+ vst4.16 {d0, d1, d2, d3}, [r4:16]
+ vst4.16 {d0, d1, d2, d3}, [r4:32]
+ vst4.16 {d0, d1, d2, d3}, [r4:64]
+ vst4.16 {d0, d1, d2, d3}, [r4:128]
+ vst4.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x4f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x6f,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x7f,0x00]
+
+ vst4.16 {d0, d1, d2, d3}, [r4]!
+ vst4.16 {d0, d1, d2, d3}, [r4:16]!
+ vst4.16 {d0, d1, d2, d3}, [r4:32]!
+ vst4.16 {d0, d1, d2, d3}, [r4:64]!
+ vst4.16 {d0, d1, d2, d3}, [r4:128]!
+ vst4.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x6d,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x7d,0x00]
+
+ vst4.16 {d0, d1, d2, d3}, [r4], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:16], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:32], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:64], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:128], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x66,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x76,0x00]
+
+ vst4.16 {d0, d2, d4, d6}, [r4]
+ vst4.16 {d0, d2, d4, d6}, [r4:16]
+ vst4.16 {d0, d2, d4, d6}, [r4:32]
+ vst4.16 {d0, d2, d4, d6}, [r4:64]
+ vst4.16 {d0, d2, d4, d6}, [r4:128]
+ vst4.16 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4] @ encoding: [0x04,0xf9,0x4f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x04,0xf9,0x6f,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x04,0xf9,0x7f,0x01]
+
+ vst4.16 {d0, d2, d4, d6}, [r4]!
+ vst4.16 {d0, d2, d4, d6}, [r4:16]!
+ vst4.16 {d0, d2, d4, d6}, [r4:32]!
+ vst4.16 {d0, d2, d4, d6}, [r4:64]!
+ vst4.16 {d0, d2, d4, d6}, [r4:128]!
+ vst4.16 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x04,0xf9,0x6d,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x04,0xf9,0x7d,0x01]
+
+ vst4.16 {d0, d2, d4, d6}, [r4], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:16], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:32], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:64], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:128], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x04,0xf9,0x66,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x04,0xf9,0x76,0x01]
+
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0x84,0xf9,0x4f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64] @ encoding: [0x84,0xf9,0x5f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x4d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x5d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x46,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x56,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0x84,0xf9,0x6f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0x84,0xf9,0x7f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x6d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x7d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x66,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x76,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0, d1, d2, d3}, [r4]
+ vst4.32 {d0, d1, d2, d3}, [r4:16]
+ vst4.32 {d0, d1, d2, d3}, [r4:32]
+ vst4.32 {d0, d1, d2, d3}, [r4:64]
+ vst4.32 {d0, d1, d2, d3}, [r4:128]
+ vst4.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x8f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0xbf,0x00]
+
+ vst4.32 {d0, d1, d2, d3}, [r4]!
+ vst4.32 {d0, d1, d2, d3}, [r4:16]!
+ vst4.32 {d0, d1, d2, d3}, [r4:32]!
+ vst4.32 {d0, d1, d2, d3}, [r4:64]!
+ vst4.32 {d0, d1, d2, d3}, [r4:128]!
+ vst4.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0xbd,0x00]
+
+ vst4.32 {d0, d1, d2, d3}, [r4], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:16], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:32], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:64], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:128], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0xb6,0x00]
+
+ vst4.32 {d0, d2, d4, d6}, [r4]
+ vst4.32 {d0, d2, d4, d6}, [r4:16]
+ vst4.32 {d0, d2, d4, d6}, [r4:32]
+ vst4.32 {d0, d2, d4, d6}, [r4:64]
+ vst4.32 {d0, d2, d4, d6}, [r4:128]
+ vst4.32 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4] @ encoding: [0x04,0xf9,0x8f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x04,0xf9,0xbf,0x01]
+
+ vst4.32 {d0, d2, d4, d6}, [r4]!
+ vst4.32 {d0, d2, d4, d6}, [r4:16]!
+ vst4.32 {d0, d2, d4, d6}, [r4:32]!
+ vst4.32 {d0, d2, d4, d6}, [r4:64]!
+ vst4.32 {d0, d2, d4, d6}, [r4:128]!
+ vst4.32 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x04,0xf9,0xbd,0x01]
+
+ vst4.32 {d0, d2, d4, d6}, [r4], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:16], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:32], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:64], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:128], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x04,0xf9,0xb6,0x01]
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x0b]
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64] @ encoding: [0x84,0xf9,0x9f,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128] @ encoding: [0x84,0xf9,0xaf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x9d,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]! @ encoding: [0x84,0xf9,0xad,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x96,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6 @ encoding: [0x84,0xf9,0xa6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0x84,0xf9,0xcf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0x84,0xf9,0xdf,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128] @ encoding: [0x84,0xf9,0xef,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]! @ encoding: [0x84,0xf9,0xcd,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]! @ encoding: [0x84,0xf9,0xdd,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]! @ encoding: [0x84,0xf9,0xed,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0x84,0xf9,0xc6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0xd6,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6 @ encoding: [0x84,0xf9,0xe6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x9d,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]! @ encoding: [0x84,0xf9,0xad,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x96,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6 @ encoding: [0x84,0xf9,0xa6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0x84,0xf9,0xcf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0x84,0xf9,0xdf,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128] @ encoding: [0x84,0xf9,0xef,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]! @ encoding: [0x84,0xf9,0xcd,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]! @ encoding: [0x84,0xf9,0xdd,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]! @ encoding: [0x84,0xf9,0xed,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0x84,0xf9,0xc6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0xd6,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6 @ encoding: [0x84,0xf9,0xe6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
diff --git a/test/MC/ARM/not-armv4.s b/test/MC/ARM/not-armv4.s
new file mode 100644
index 000000000000..a1ba611a61e4
--- /dev/null
+++ b/test/MC/ARM/not-armv4.s
@@ -0,0 +1,8 @@
+@ RUN: not llvm-mc < %s -triple armv4-unknown-unknown -show-encoding 2>&1 | FileCheck %s
+
+@ PR18524
+@ CHECK: error: instruction requires: armv5t
+clz r4,r9
+
+@ CHECK: error: instruction requires: armv6t2
+rbit r4,r9
diff --git a/test/MC/ARM/pool.s b/test/MC/ARM/pool.s
new file mode 100644
index 000000000000..782f67e1fea5
--- /dev/null
+++ b/test/MC/ARM/pool.s
@@ -0,0 +1,19 @@
+@ RUN: llvm-mc -triple thumbv7-linux-gnueabi -o - %s | FileCheck %s
+
+ .syntax unified
+
+ .align 2
+ .global pool
+ .type pool,%function
+pool:
+ ldr r0, =0xba5eba11
+ bx lr
+ .pool
+
+@ CHECK-LABEL: pool
+@ CHECK: ldr r0, .Ltmp0
+@ CHECK: .align 2
+@ CHECK-LABEL: .Ltmp0:
+@ CHECK: .long 3126770193
+
+
diff --git a/test/MC/ARM/simple-fp-encoding.s b/test/MC/ARM/simple-fp-encoding.s
index d840e9cd798a..539dd2c4d976 100644
--- a/test/MC/ARM/simple-fp-encoding.s
+++ b/test/MC/ARM/simple-fp-encoding.s
@@ -395,3 +395,46 @@
@ CHECK: vmov.i32 d4, #0x0 @ encoding: [0x10,0x40,0x80,0xf2]
@ CHECK: vmov.i32 d4, #0x42000000 @ encoding: [0x12,0x46,0x84,0xf2]
+
+@ Test encoding of floating point constants for vmov functions
+@ vfp3
+ vmov.f32 s5, #1.0
+ vmov.f32 s5, #0.125
+ vmov.f32 s5, #-1.875
+ vmov.f32 s5, #-0.59375
+
+ vmov.f64 d6, #1.0
+ vmov.f64 d6, #0.125
+ vmov.f64 d6, #-1.875
+ vmov.f64 d6, #-0.59375
+
+@ neon
+ vmov.f32 d7, #1.0
+ vmov.f32 d7, #0.125
+ vmov.f32 d7, #-1.875
+ vmov.f32 d7, #-0.59375
+
+ vmov.f32 q8, #1.0
+ vmov.f32 q8, #0.125
+ vmov.f32 q8, #-1.875
+ vmov.f32 q8, #-0.59375
+
+@ CHECK: vmov.f32 s5, #1.000000e+00 @ encoding: [0x00,0x2a,0xf7,0xee]
+@ CHECK: vmov.f32 s5, #1.250000e-01 @ encoding: [0x00,0x2a,0xf4,0xee]
+@ CHECK: vmov.f32 s5, #-1.875000e+00 @ encoding: [0x0e,0x2a,0xff,0xee]
+@ CHECK: vmov.f32 s5, #-5.937500e-01 @ encoding: [0x03,0x2a,0xfe,0xee]
+
+@ CHECK: vmov.f64 d6, #1.000000e+00 @ encoding: [0x00,0x6b,0xb7,0xee]
+@ CHECK: vmov.f64 d6, #1.250000e-01 @ encoding: [0x00,0x6b,0xb4,0xee]
+@ CHECK: vmov.f64 d6, #-1.875000e+00 @ encoding: [0x0e,0x6b,0xbf,0xee]
+@ CHECK: vmov.f64 d6, #-5.937500e-01 @ encoding: [0x03,0x6b,0xbe,0xee]
+
+@ CHECK: vmov.f32 d7, #1.000000e+00 @ encoding: [0x10,0x7f,0x87,0xf2]
+@ CHECK: vmov.f32 d7, #1.250000e-01 @ encoding: [0x10,0x7f,0x84,0xf2]
+@ CHECK: vmov.f32 d7, #-1.875000e+00 @ encoding: [0x1e,0x7f,0x87,0xf3]
+@ CHECK: vmov.f32 d7, #-5.937500e-01 @ encoding: [0x13,0x7f,0x86,0xf3]
+
+@ CHECK: vmov.f32 q8, #1.000000e+00 @ encoding: [0x50,0x0f,0xc7,0xf2]
+@ CHECK: vmov.f32 q8, #1.250000e-01 @ encoding: [0x50,0x0f,0xc4,0xf2]
+@ CHECK: vmov.f32 q8, #-1.875000e+00 @ encoding: [0x5e,0x0f,0xc7,0xf3]
+@ CHECK: vmov.f32 q8, #-5.937500e-01 @ encoding: [0x53,0x0f,0xc6,0xf3]
diff --git a/test/MC/ARM/symbol-variants-errors.s b/test/MC/ARM/symbol-variants-errors.s
new file mode 100644
index 000000000000..03401cd98b7e
--- /dev/null
+++ b/test/MC/ARM/symbol-variants-errors.s
@@ -0,0 +1,23 @@
+@ RUN: not llvm-mc < %s -triple armv7-none-linux-gnueabi 2>&1 | FileCheck %s
+
+@ check for invalid variant
+f1:
+ bl bar(blargh)
+@CHECK: error: invalid variant 'blargh'
+@CHECK: bl bar(blargh)
+@CHECK: ^
+
+@ check for missing closed paren
+f2:
+ .word bar(got
+@CHECK: error: unexpected token in variant, expected ')'
+@CHECK: .word bar(got
+@CHECK: ^
+
+@ check for invalid symbol before variant end
+f3:
+ .word bar(got+2)
+
+@CHECK: error: unexpected token in variant, expected ')'
+@CHECK: .word bar(got+2)
+@CHECK: ^
diff --git a/test/MC/ARM/symbol-variants.s b/test/MC/ARM/symbol-variants.s
new file mode 100644
index 000000000000..a10fe5029e0a
--- /dev/null
+++ b/test/MC/ARM/symbol-variants.s
@@ -0,0 +1,91 @@
+@ RUN: llvm-mc < %s -triple armv7-none-linux-gnueabi -filetype=obj | llvm-objdump -triple armv7-none-linux-gnueabi -r - | FileCheck %s --check-prefix=CHECK --check-prefix=ARM
+@ RUN: llvm-mc < %s -triple thumbv7-none-linux-gnueabi -filetype=obj | llvm-objdump -triple thumbv7-none-linux-gnueabi -r - | FileCheck %s --check-prefix=CHECK --check-prefix=THUMB
+
+@ CHECK-LABEL: RELOCATION RECORDS FOR [.rel.text]
+.Lsym:
+
+@ empty
+.word f00
+.word f01
+@CHECK: 0 R_ARM_ABS32 f00
+@CHECK: 4 R_ARM_ABS32 f01
+
+@ none
+.word f02(NONE)
+.word f03(none)
+@CHECK: 8 R_ARM_NONE f02
+@CHECK: c R_ARM_NONE f03
+
+@ plt
+bl f04(PLT)
+bl f05(plt)
+@ARM: 10 R_ARM_PLT32 f04
+@ARM: 14 R_ARM_PLT32 f05
+@THUMB: 10 R_ARM_THM_CALL f04
+@THUMB: 14 R_ARM_THM_CALL f05
+
+@ got
+.word f06(GOT)
+.word f07(got)
+@CHECK: 18 R_ARM_GOT_BREL f06
+@CHECK: 1c R_ARM_GOT_BREL f07
+
+@ gotoff
+.word f08(GOTOFF)
+.word f09(gotoff)
+@CHECK: 20 R_ARM_GOTOFF32 f08
+@CHECK: 24 R_ARM_GOTOFF32 f09
+
+@ tpoff
+.word f10(TPOFF)
+.word f11(tpoff)
+@CHECK: 28 R_ARM_TLS_LE32 f10
+@CHECK: 2c R_ARM_TLS_LE32 f11
+
+@ tlsgd
+.word f12(TLSGD)
+.word f13(tlsgd)
+@CHECK: 30 R_ARM_TLS_GD32 f12
+@CHECK: 34 R_ARM_TLS_GD32 f13
+
+@ target1
+.word f14(TARGET1)
+.word f15(target1)
+@CHECK: 38 R_ARM_TARGET1 f14
+@CHECK: 3c R_ARM_TARGET1 f15
+
+@ target2
+.word f16(TARGET2)
+.word f17(target2)
+@CHECK: 40 R_ARM_TARGET2 f16
+@CHECK: 44 R_ARM_TARGET2 f17
+
+@ prel31
+.word f18(PREL31)
+.word f19(prel31)
+@CHECK: 48 R_ARM_PREL31 f18
+@CHECK: 4c R_ARM_PREL31 f19
+
+@ tlsldo
+.word f20(TLSLDO)
+.word f21(tlsldo)
+@CHECK: 50 R_ARM_TLS_LDO32 f20
+@CHECK: 54 R_ARM_TLS_LDO32 f21
+
+@ tlscall
+.word f22(TLSCALL)
+.word f23(tlscall)
+@ CHECK: 58 R_ARM_TLS_CALL f22
+@ CHECK: 5c R_ARM_TLS_CALL f23
+
+@ tlsdesc
+.word f24(TLSDESC)
+.word f25(tlsdesc)
+@ CHECK: 60 R_ARM_TLS_GOTDESC f24
+@ CHECK: 64 R_ARM_TLS_GOTDESC f25
+
+@ got_prel
+.word f26(GOT_PREL) + (. - .Lsym)
+ ldr r3, =f27(GOT_PREL)
+@ CHECK: 68 R_ARM_GOT_PREL f26
+@ CHECK: 70 R_ARM_GOT_PREL f27
diff --git a/test/MC/ARM/target-expressions.s b/test/MC/ARM/target-expressions.s
new file mode 100644
index 000000000000..c87cd22b2bad
--- /dev/null
+++ b/test/MC/ARM/target-expressions.s
@@ -0,0 +1,80 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype asm -o - %s | FileCheck %s
+@ RUN: llvm-mc -triple armv7-eabi -filetype obj -o - %s | llvm-readobj -r \
+@ RUN: | FileCheck -check-prefix CHECK-RELOCATIONS %s
+
+ .syntax unified
+
+ .type function,%function
+function:
+ bx lr
+
+ .global external
+ .type external,%function
+
+.set deadbeat, 0xdeadbea7
+
+ .type test,%function
+test:
+ movw r0, :lower16:function
+ movt r0, :upper16:function
+
+ movw r1, #:lower16:function
+ movt r1, #:upper16:function
+
+ movw r2, :lower16:deadbeat
+ movt r2, :upper16:deadbeat
+
+ movw r3, #:lower16:deadbeat
+ movt r3, #:upper16:deadbeat
+
+ movw r4, :lower16:0xD1510D6E
+ movt r4, :upper16:0xD1510D6E
+
+ movw r5, #:lower16:0xD1510D6E
+ movt r5, #:upper16:0xD1510D6E
+
+ movw r0, :lower16:external
+ movt r0, :upper16:external
+
+ movw r1, #:lower16:external
+ movt r1, #:upper16:external
+
+ movw r2, #:lower16:(16 + 16)
+ movt r2, #:upper16:(16 + 16)
+
+ movw r3, :lower16:(16 + 16)
+ movt r3, :upper16:(16 + 16)
+
+@ CHECK-LABEL: test:
+@ CHECK: movw r0, :lower16:function
+@ CHECK: movt r0, :upper16:function
+@ CHECK: movw r1, :lower16:function
+@ CHECK: movt r1, :upper16:function
+@ CHECK: movw r2, :lower16:(3735928487)
+@ CHECK: movt r2, :upper16:(3735928487)
+@ CHECK: movw r3, :lower16:(3735928487)
+@ CHECK: movt r3, :upper16:(3735928487)
+@ CHECK: movw r4, :lower16:(3511749998)
+@ CHECK: movt r4, :upper16:(3511749998)
+@ CHECK: movw r5, :lower16:(3511749998)
+@ CHECK: movt r5, :upper16:(3511749998)
+@ CHECK: movw r0, :lower16:external
+@ CHECK: movt r0, :upper16:external
+@ CHECK: movw r1, :lower16:external
+@ CHECK: movt r1, :upper16:external
+@ CHECK: movw r2, :lower16:(32)
+@ CHECK: movt r2, :upper16:(32)
+@ CHECK: movw r3, :lower16:(32)
+@ CHECK: movt r3, :upper16:(32)
+
+@ CHECK-RELOCATIONS: Relocations [
+@ CHECK-RELOCATIONS: 0x4 R_ARM_MOVW_ABS_NC function 0x0
+@ CHECK-RELOCATIONS: 0x8 R_ARM_MOVT_ABS function 0x0
+@ CHECK-RELOCATIONS: 0xC R_ARM_MOVW_ABS_NC function 0x0
+@ CHECK-RELOCATIONS: 0x10 R_ARM_MOVT_ABS function 0x0
+@ CHECK-RELOCATIONS: 0x34 R_ARM_MOVW_ABS_NC external 0x0
+@ CHECK-RELOCATIONS: 0x38 R_ARM_MOVT_ABS external 0x0
+@ CHECK-RELOCATIONS: 0x3C R_ARM_MOVW_ABS_NC external 0x0
+@ CHECK-RELOCATIONS: 0x40 R_ARM_MOVT_ABS external 0x0
+@ CHECK-RELOCATIONS: ]
+
diff --git a/test/MC/ARM/thumb-far-jump.s b/test/MC/ARM/thumb-far-jump.s
new file mode 100644
index 000000000000..2fd2c567d133
--- /dev/null
+++ b/test/MC/ARM/thumb-far-jump.s
@@ -0,0 +1,26 @@
+@ RUN: llvm-mc < %s -triple thumbv5-linux-gnueabi -filetype=obj -o - \
+@ RUN: | llvm-readobj -r | FileCheck %s
+ .syntax unified
+
+ .text
+ .align 2
+ .globl main
+ .type main,%function
+ .thumb_func
+main:
+ bl end
+ .space 8192
+end:
+ bl main2
+ bx lr
+
+ .text
+ .align 2
+ .globl main2
+ .type main2,%function
+ .thumb_func
+main2:
+ bx lr
+
+@ CHECK-NOT: 0x0 R_ARM_THM_CALL end 0x0
+@ CHECK: 0x2004 R_ARM_THM_CALL main2 0x0
diff --git a/test/MC/ARM/thumb-st_other.s b/test/MC/ARM/thumb-st_other.s
new file mode 100644
index 000000000000..8750c2bba5fd
--- /dev/null
+++ b/test/MC/ARM/thumb-st_other.s
@@ -0,0 +1,19 @@
+@ Check the value of st_other for thumb function.
+
+@ ARM does not define any st_other flags for thumb function. The value
+@ for st_other should always be 0.
+
+@ RUN: llvm-mc < %s -triple thumbv5-linux-gnueabi -filetype=obj -o - \
+@ RUN: | llvm-readobj -t | FileCheck %s
+
+ .syntax unified
+ .text
+ .align 2
+ .thumb_func
+ .global main
+ .type main,%function
+main:
+ bx lr
+
+@ CHECK: Name: main
+@ CHECK: Other: 0
diff --git a/test/MC/ARM/thumb-types.s b/test/MC/ARM/thumb-types.s
new file mode 100644
index 000000000000..b3aaf7d80532
--- /dev/null
+++ b/test/MC/ARM/thumb-types.s
@@ -0,0 +1,108 @@
+@ RUN: llvm-mc -triple armv7-elf -filetype obj -o - %s | llvm-readobj -t \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+
+ .thumb
+
+ .type implicit_function,%function
+implicit_function:
+ nop
+
+ .type implicit_data,%object
+implicit_data:
+ .long 0
+
+ .arm
+ .type arm_function,%function
+arm_function:
+ nop
+
+ .thumb
+
+ .text
+
+untyped_text_label:
+ nop
+
+ .type explicit_function,%function
+explicit_function:
+ nop
+
+ .long tls(TPOFF)
+
+ .type indirect_function,%gnu_indirect_function
+indirect_function:
+ nop
+
+ .data
+
+untyped_data_label:
+ nop
+
+ .type explicit_data,%object
+explicit_data:
+ .long 0
+
+ .section .tdata,"awT",%progbits
+ .type tls,%object
+ .align 2
+tls:
+ .long 42
+ .size tls, 4
+
+
+@ CHECK: Symbol {
+@ CHECK: Name: arm_function
+@ CHECK: Value: 0x6
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: explicit_data
+@ CHECK: Value: 0x2
+@ CHECK: Type: Object
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: explicit_function
+@ CHECK: Value: 0xD
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: implicit_data
+@ CHECK: Value: 0x2
+@ CHECK: Type: Object
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: implicit_function
+@ CHECK: Value: 0x1
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: indirect_function
+@ CHECK: Value: 0x13
+@ CHECK: Type: GNU_IFunc
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: tls
+@ CHECK: Value: 0x0
+@ CHECK: Type: TLS
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: untyped_data_label
+@ CHECK: Value: 0x0
+@ CHECK: Type: None
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: untyped_text_label
+@ CHECK: Value: 0xA
+@ CHECK: Type: None
+@ CHECK: }
+
diff --git a/test/MC/ARM/thumb2-cbn-to-next-inst.s b/test/MC/ARM/thumb2-cbn-to-next-inst.s
new file mode 100644
index 000000000000..a7ad11b24508
--- /dev/null
+++ b/test/MC/ARM/thumb2-cbn-to-next-inst.s
@@ -0,0 +1,33 @@
+@ RUN: llvm-mc -triple thumbv7-apple-darwin -filetype=obj -o %t.o %s
+@ RUN: llvm-objdump -triple thumbv7-apple-darwin -d %t.o | FileCheck %s
+
+.thumb
+start:
+.thumb_func start
+ add r1, r2, r3
+ cbnz r2, L1 @ this can't be encoded, must turn into a nop
+L1:
+ add r4, r5, r6
+ cbnz r2, L2
+ sub r7, r8, r9
+L2:
+ add r7, r8, r9
+ cbz r2, L3 @ this can't be encoded, must turn into a nop
+L3:
+ add r10, r11, r12
+ cbz r2, L4
+ sub r7, r8, r9
+L4:
+ add r3, r4, r5
+
+@ CHECK: 0: 02 eb 03 01 add.w r1, r2, r3
+@ CHECK: 4: 00 bf nop
+@ CHECK: 6: 05 eb 06 04 add.w r4, r5, r6
+@ CHECK: a: 0a b9 cbnz r2, #2
+@ CHECK: c: a8 eb 09 07 sub.w r7, r8, r9
+@ CHECK: 10: 08 eb 09 07 add.w r7, r8, r9
+@ CHECK: 14: 00 bf nop
+@ CHECK: 16: 0b eb 0c 0a add.w r10, r11, r12
+@ CHECK: 1a: 0a b1 cbz r2, #2
+@ CHECK: 1c: a8 eb 09 07 sub.w r7, r8, r9
+@ CHECK: 20: 04 eb 05 03 add.w r3, r4, r5
diff --git a/test/MC/ARM/thumb2-diagnostics.s b/test/MC/ARM/thumb2-diagnostics.s
index 6ac2db02cca7..b2b14bc13d6a 100644
--- a/test/MC/ARM/thumb2-diagnostics.s
+++ b/test/MC/ARM/thumb2-diagnostics.s
@@ -70,3 +70,21 @@
@ CHECK-ERRORS: error: branch target out of range
@ CHECK-ERRORS: error: branch target out of range
@ CHECK-ERRORS: error: branch target out of range
+
+foo2:
+ mov r0, foo2
+ movw r0, foo2
+ movt r0, foo2
+@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: immediate expression for mov requires :lower16: or :upper16
+@ CHECK-ERRORS: ^
+
+ and sp, r1, #80008000
+ and pc, r1, #80008000
+@ CHECK-ERRORS: error: invalid operand for instruction
+@ CHECK-ERRORS: error: invalid operand for instruction
+
+
diff --git a/test/MC/ARM/thumb2-ldrd.s b/test/MC/ARM/thumb2-ldrd.s
index 4463c21fe8c0..5166ff00ec18 100644
--- a/test/MC/ARM/thumb2-ldrd.s
+++ b/test/MC/ARM/thumb2-ldrd.s
@@ -1,9 +1,16 @@
-// RUN: not llvm-mc -arch thumb -mattr=+thumb2 \
-// RUN: < %s >/dev/null 2> %t
-// RUN: grep "error: destination operands can't be identical" %t | count 4
-// rdar://14479780
+@ RUN: not llvm-mc -triple thumb-eabi -mattr=+thumb2 %s -o /dev/null 2>&1 \
+@ RUN: | FileCheck %s
+
+@ rdar://14479780
ldrd r0, r0, [pc, #0]
ldrd r0, r0, [r1, #4]
ldrd r0, r0, [r1], #4
ldrd r0, r0, [r1, #4]!
+
+@ CHECK: error: destination operands can't be identical
+@ CHECK: error: destination operands can't be identical
+@ CHECK: error: destination operands can't be identical
+@ CHECK: error: destination operands can't be identical
+@ CHECK-NOT: error: destination operands can't be identical
+
diff --git a/test/MC/ARM/thumb2-mclass.s b/test/MC/ARM/thumb2-mclass.s
index b7af7236207d..d9c96dfcdd34 100644
--- a/test/MC/ARM/thumb2-mclass.s
+++ b/test/MC/ARM/thumb2-mclass.s
@@ -1,9 +1,10 @@
@ RUN: llvm-mc -triple=thumbv7m-apple-darwin -show-encoding < %s | FileCheck %s
+@ RUN: llvm-mc -triple=thumbv6m -show-encoding < %s | FileCheck %s
.syntax unified
.globl _func
@ Check that the assembler can handle the documented syntax from the ARM ARM.
-@ These tests test instruction encodings specific to v7m & v7m (FeatureMClass).
+@ These tests test instruction encodings specific to v6m & v7m (FeatureMClass).
@------------------------------------------------------------------------------
@ MRS
@@ -19,9 +20,6 @@
mrs r0, msp
mrs r0, psp
mrs r0, primask
- mrs r0, basepri
- mrs r0, basepri_max
- mrs r0, faultmask
mrs r0, control
@ CHECK: mrs r0, apsr @ encoding: [0xef,0xf3,0x00,0x80]
@@ -34,9 +32,6 @@
@ CHECK: mrs r0, msp @ encoding: [0xef,0xf3,0x08,0x80]
@ CHECK: mrs r0, psp @ encoding: [0xef,0xf3,0x09,0x80]
@ CHECK: mrs r0, primask @ encoding: [0xef,0xf3,0x10,0x80]
-@ CHECK: mrs r0, basepri @ encoding: [0xef,0xf3,0x11,0x80]
-@ CHECK: mrs r0, basepri_max @ encoding: [0xef,0xf3,0x12,0x80]
-@ CHECK: mrs r0, faultmask @ encoding: [0xef,0xf3,0x13,0x80]
@ CHECK: mrs r0, control @ encoding: [0xef,0xf3,0x14,0x80]
@------------------------------------------------------------------------------
@@ -65,9 +60,6 @@
msr msp, r0
msr psp, r0
msr primask, r0
- msr basepri, r0
- msr basepri_max, r0
- msr faultmask, r0
msr control, r0
@ CHECK: msr apsr, r0 @ encoding: [0x80,0xf3,0x00,0x88]
@@ -92,7 +84,4 @@
@ CHECK: msr msp, r0 @ encoding: [0x80,0xf3,0x08,0x88]
@ CHECK: msr psp, r0 @ encoding: [0x80,0xf3,0x09,0x88]
@ CHECK: msr primask, r0 @ encoding: [0x80,0xf3,0x10,0x88]
-@ CHECK: msr basepri, r0 @ encoding: [0x80,0xf3,0x11,0x88]
-@ CHECK: msr basepri_max, r0 @ encoding: [0x80,0xf3,0x12,0x88]
-@ CHECK: msr faultmask, r0 @ encoding: [0x80,0xf3,0x13,0x88]
@ CHECK: msr control, r0 @ encoding: [0x80,0xf3,0x14,0x88]
diff --git a/test/MC/ARM/thumb2-strd.s b/test/MC/ARM/thumb2-strd.s
new file mode 100644
index 000000000000..3f8025d2cd70
--- /dev/null
+++ b/test/MC/ARM/thumb2-strd.s
@@ -0,0 +1,10 @@
+@ RUN: not llvm-mc -triple=armv7-linux-gnueabi %s 2>&1 | FileCheck %s
+.text
+.thumb
+
+@ CHECK: error: invalid operand for instruction
+@ CHECK: error: invalid operand for instruction
+@ CHECK: error: invalid operand for instruction
+strd r12, SP, [r0, #256]
+strd r12, SP, [r0, #256]!
+strd r12, SP, [r0], #256
diff --git a/test/MC/ARM/thumb2be-b.w-encoding.s b/test/MC/ARM/thumb2be-b.w-encoding.s
new file mode 100644
index 000000000000..2c3e31b3d986
--- /dev/null
+++ b/test/MC/ARM/thumb2be-b.w-encoding.s
@@ -0,0 +1,9 @@
+@ RUN: llvm-mc -triple=thumbv7-none-linux-gnueabi -show-encoding < %s | FileCheck %s --check-prefix=CHECK-LE
+@ RUN: llvm-mc -triple=thumbebv7-none-linux-gnueabi -show-encoding < %s | FileCheck %s --check-prefix=CHECK-BE
+
+b.w bar
+@ CHECK-LE: b.w bar @ encoding: [A,0xf0'A',A,0x90'A']
+@ CHECK-LE-NEXT: @ fixup A - offset: 0, value: bar, kind: fixup_t2_uncondbranch
+@ CHECK-BE: b.w bar @ encoding: [0xf0'A',A,0x90'A',A]
+@ CHECK-BE-NEXT: @ fixup A - offset: 0, value: bar, kind: fixup_t2_uncondbranch
+
diff --git a/test/MC/ARM/thumb2be-beq.w-encoding.s b/test/MC/ARM/thumb2be-beq.w-encoding.s
new file mode 100644
index 000000000000..e39e541f69f1
--- /dev/null
+++ b/test/MC/ARM/thumb2be-beq.w-encoding.s
@@ -0,0 +1,9 @@
+@ RUN: llvm-mc -triple=thumbv7-none-linux-gnueabi -show-encoding < %s | FileCheck %s --check-prefix=CHECK-LE
+@ RUN: llvm-mc -triple=thumbebv7-none-linux-gnueabi -show-encoding < %s | FileCheck %s --check-prefix=CHECK-BE
+
+beq.w bar
+@ CHECK-LE: beq.w bar @ encoding: [A,0xf0'A',A,0x80'A']
+@ CHECK-LE-NEXT: @ fixup A - offset: 0, value: bar, kind: fixup_t2_condbranch
+@ CHECK-BE: beq.w bar @ encoding: [0xf0'A',A,0x80'A',A]
+@ CHECK-BE-NEXT: @ fixup A - offset: 0, value: bar, kind: fixup_t2_condbranch
+
diff --git a/test/MC/ARM/thumb2be-movt-encoding.s b/test/MC/ARM/thumb2be-movt-encoding.s
new file mode 100644
index 000000000000..cc6c04ef18b5
--- /dev/null
+++ b/test/MC/ARM/thumb2be-movt-encoding.s
@@ -0,0 +1,9 @@
+@ RUN: llvm-mc -triple=thumbv7-none-linux-gnueabi -show-encoding < %s | FileCheck %s --check-prefix=CHECK-LE
+@ RUN: llvm-mc -triple=thumbebv7-none-linux-gnueabi -show-encoding < %s | FileCheck %s --check-prefix=CHECK-BE
+
+movt r9, :upper16:(_bar)
+@ CHECK-LE: movt r9, :upper16:_bar @ encoding: [0xc0'A',0xf2'A',0b0000AAAA,0x09]
+@ CHECK-LE-NEXT: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_movt_hi16
+@ CHECK-BE: movt r9, :upper16:_bar @ encoding: [0xf2,0b1100AAAA,0x09'A',A]
+@ CHECK-BE-NEXT: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_movt_hi16
+
diff --git a/test/MC/ARM/thumb2be-movw-encoding.s b/test/MC/ARM/thumb2be-movw-encoding.s
new file mode 100644
index 000000000000..3bff457961be
--- /dev/null
+++ b/test/MC/ARM/thumb2be-movw-encoding.s
@@ -0,0 +1,9 @@
+@ RUN: llvm-mc -triple=thumbv7-none-linux-gnueabi -show-encoding < %s | FileCheck %s --check-prefix=CHECK-LE
+@ RUN: llvm-mc -triple=thumbebv7-none-linux-gnueabi -show-encoding < %s | FileCheck %s --check-prefix=CHECK-BE
+
+movw r9, :lower16:(_bar)
+@ CHECK-LE: movw r9, :lower16:_bar @ encoding: [0x40'A',0xf2'A',0b0000AAAA,0x09]
+@ CHECK-LE-NEXT: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_movw_lo16
+@ CHECK-BE: movw r9, :lower16:_bar @ encoding: [0xf2,0b0100AAAA,0x09'A',A]
+@ CHECK-BE-NEXT: @ fixup A - offset: 0, value: _bar, kind: fixup_t2_movw_lo16
+
diff --git a/test/MC/ARM/thumb_set-diagnostics.s b/test/MC/ARM/thumb_set-diagnostics.s
new file mode 100644
index 000000000000..5f1844de01ff
--- /dev/null
+++ b/test/MC/ARM/thumb_set-diagnostics.s
@@ -0,0 +1,43 @@
+@ RUN: not llvm-mc -triple armv7-eabi -o /dev/null 2>&1 %s | FileCheck %s
+
+ .syntax unified
+
+ .thumb
+
+ .thumb_set
+
+@ CHECK: error: expected identifier after '.thumb_set'
+@ CHECK: .thumb_set
+@ CHECL: ^
+
+ .thumb_set ., 0x0b5e55ed
+
+@ CHECK: error: expected identifier after '.thumb_set'
+@ CHECK: .thumb_set ., 0x0b5e55ed
+@ CHECK: ^
+
+ .thumb_set labelled, 0x1abe11ed
+ .thumb_set invalid, :lower16:labelled
+
+@ CHECK: error: unknown token in expression
+@ CHECK: .thumb_set invalid, :lower16:labelled
+@ CHECK: ^
+
+ .thumb_set missing_comma
+
+@ CHECK: error: expected comma after name 'missing_comma'
+@ CHECK: .thumb_set missing_comma
+@ CHECK: ^
+
+ .thumb_set missing_expression,
+
+@ CHECK: error: missing expression
+@ CHECK: .thumb_set missing_expression,
+@ CHECK: ^
+
+ .thumb_set trailer_trash, 0x11fe1e55,
+
+@ CHECK: error: unexpected token
+@ CHECK: .thumb_set trailer_trash, 0x11fe1e55,
+@ CHECK: ^
+
diff --git a/test/MC/ARM/thumb_set.s b/test/MC/ARM/thumb_set.s
new file mode 100644
index 000000000000..d2a0dc04730c
--- /dev/null
+++ b/test/MC/ARM/thumb_set.s
@@ -0,0 +1,154 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype obj -o - %s | llvm-readobj -t \
+@ RUN: | FileCheck %s
+
+@ RUN: llvm-mc -triple armv7-eabi -filetype asm -o - %s \
+@ RUN: | FileCheck --check-prefix=ASM %s
+
+ .syntax unified
+
+ .arm
+
+ .type arm_func,%function
+arm_func:
+ nop
+
+ .thumb_set alias_arm_func, arm_func
+
+ alias_arm_func2 = alias_arm_func
+ alias_arm_func3 = alias_arm_func2
+
+@ ASM: .thumb_set alias_arm_func, arm_func
+
+ .thumb
+
+ .type thumb_func,%function
+ .thumb_func
+thumb_func:
+ nop
+
+ .thumb_set alias_thumb_func, thumb_func
+
+ .thumb_set seedless, 0x5eed1e55
+ .thumb_set eggsalad, seedless + 0x87788358
+ .thumb_set faceless, ~eggsalad + 0xe133c002
+
+ .thumb_set alias_undefined_data, badblood
+
+ .data
+
+ .type badblood,%object
+badblood:
+ .long 0xbadb100d
+
+ .type bedazzle,%object
+bedazzle:
+ .long 0xbeda221e
+
+ .text
+ .thumb
+
+ .thumb_set alias_defined_data, bedazzle
+
+ .type alpha,%function
+alpha:
+ nop
+
+ .type beta,%function
+beta:
+ bkpt
+
+ .thumb_set beta, alpha
+
+@ CHECK: Symbol {
+@ CHECK: Name: alias_arm_func
+@ CHECK: Value: 0x1
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: alias_arm_func2
+@ CHECK: Value: 0x1
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: alias_arm_func3
+@ CHECK: Value: 0x1
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: alias_defined_data
+@ CHECK: Value: 0x5
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: alias_thumb_func
+@ CHECK: Value: 0x5
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: alias_undefined_data
+@ CHECK: Value: 0x0
+@ CHECK: Type: Object
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: alpha
+@ CHECK: Value: 0x7
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: arm_func
+@ CHECK: Value: 0x0
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: badblood
+@ CHECK-NEXT: Value: 0x0
+@ CHECK-NEXT: Size: 0
+@ CHECK-NEXT: Binding: Local
+@ CHECK-NEXT: Type: Object
+@ CHECK-NEXT: Other: 0
+@ CHECK-NEXT: Section: .data
+@ CHECK-NEXT: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: bedazzle
+@ CHECK: Value: 0x4
+@ CHECK: Type: Object
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: beta
+@ CHECK: Value: 0x7
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: eggsalad
+@ CHECK: Value: 0xE665A1AD
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: faceless
+@ CHECK: Value: 0xFACE1E55
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: seedless
+@ CHECK: Value: 0x5EED1E55
+@ CHECK: Type: Function
+@ CHECK: }
+
+@ CHECK: Symbol {
+@ CHECK: Name: thumb_func
+@ CHECK: Value: 0x5
+@ CHECK: Type: Function
+@ CHECK: }
diff --git a/test/MC/ARM/thumbv7m.s b/test/MC/ARM/thumbv7m.s
new file mode 100644
index 000000000000..33ed44cd3da4
--- /dev/null
+++ b/test/MC/ARM/thumbv7m.s
@@ -0,0 +1,45 @@
+@ RUN: llvm-mc -triple=thumbv7m-apple-darwin -show-encoding < %s | FileCheck %s
+@ RUN: not llvm-mc -triple=thumbv6 -show-encoding 2>&1 < %s | FileCheck %s --check-prefix=CHECK-V6M
+ .syntax unified
+ .globl _func
+
+@ Check that the assembler can handle the documented syntax from the ARM ARM.
+@ These tests test instruction encodings specific to ARMv7m.
+
+@------------------------------------------------------------------------------
+@ MRS
+@------------------------------------------------------------------------------
+
+ mrs r0, basepri
+ mrs r0, basepri_max
+ mrs r0, faultmask
+
+@ CHECK: mrs r0, basepri @ encoding: [0xef,0xf3,0x11,0x80]
+@ CHECK: mrs r0, basepri_max @ encoding: [0xef,0xf3,0x12,0x80]
+@ CHECK: mrs r0, faultmask @ encoding: [0xef,0xf3,0x13,0x80]
+
+@------------------------------------------------------------------------------
+@ MSR
+@------------------------------------------------------------------------------
+
+ msr basepri, r0
+ msr basepri_max, r0
+ msr faultmask, r0
+
+@ CHECK: msr basepri, r0 @ encoding: [0x80,0xf3,0x11,0x88]
+@ CHECK: msr basepri_max, r0 @ encoding: [0x80,0xf3,0x12,0x88]
+@ CHECK: msr faultmask, r0 @ encoding: [0x80,0xf3,0x13,0x88]
+
+@ CHECK-V6M: error: invalid operand for instruction
+@ CHECK-V6M-NEXT: mrs r0, basepri
+@ CHECK-V6M: error: invalid operand for instruction
+@ CHECK-V6M-NEXT: mrs r0, basepri_max
+@ CHECK-V6M: error: invalid operand for instruction
+@ CHECK-V6M-NEXT: mrs r0, faultmask
+@ CHECK-V6M: error: invalid operand for instruction
+@ CHECK-V6M-NEXT: msr basepri, r0
+@ CHECK-V6M: error: invalid operand for instruction
+@ CHECK-V6M-NEXT: msr basepri_max, r0
+@ CHECK-V6M: error: invalid operand for instruction
+@ CHECK-V6M-NEXT: msr faultmask, r0
+
diff --git a/test/MC/ARM/udf-arm-diagnostics.s b/test/MC/ARM/udf-arm-diagnostics.s
new file mode 100644
index 000000000000..9ec9bf2124f0
--- /dev/null
+++ b/test/MC/ARM/udf-arm-diagnostics.s
@@ -0,0 +1,19 @@
+@ RUN: not llvm-mc -triple arm-eabi %s 2>&1 | FileCheck %s
+
+ .syntax unified
+ .text
+ .arm
+
+undefined:
+ udfpl
+
+@ CHECK: error: instruction 'udf' is not predicable, but condition code specified
+@ CHECK: udfpl
+@ CHECK: ^
+
+ udf #65536
+
+@ CHECK: error: invalid operand for instruction
+@ CHECK: udf #65536
+@ CHECK: ^
+
diff --git a/test/MC/ARM/udf-arm.s b/test/MC/ARM/udf-arm.s
new file mode 100644
index 000000000000..a9d19ca66e6c
--- /dev/null
+++ b/test/MC/ARM/udf-arm.s
@@ -0,0 +1,11 @@
+@ RUN: llvm-mc -triple arm-eabi -show-encoding %s | FileCheck %s
+
+ .syntax unified
+ .text
+ .arm
+
+undefined:
+ udf #0
+
+@ CHECK: udf #0 @ encoding: [0xf0,0x00,0xf0,0xe7]
+
diff --git a/test/MC/ARM/udf-thumb-2-diagnostics.s b/test/MC/ARM/udf-thumb-2-diagnostics.s
new file mode 100644
index 000000000000..f8375601a031
--- /dev/null
+++ b/test/MC/ARM/udf-thumb-2-diagnostics.s
@@ -0,0 +1,25 @@
+@ RUN: not llvm-mc -triple thumbv7-eabi -mattr +thumb2 %s 2>&1 | FileCheck %s
+
+ .syntax unified
+ .text
+ .thumb
+
+undefined:
+ udfpl
+
+@ CHECK: error: instruction 'udf' is not predicable, but condition code specified
+@ CHECK: udfpl
+@ CHECK: ^
+
+ udf #256
+
+@ CHECK: error: instruction requires: arm-mode
+@ CHECK: udf #256
+@ CHECK: ^
+
+ udf.w #65536
+
+@ CHECK: error: invalid operand for instruction
+@ CHECK: udf.w #65536
+@ CHECK: ^
+
diff --git a/test/MC/ARM/udf-thumb-2.s b/test/MC/ARM/udf-thumb-2.s
new file mode 100644
index 000000000000..beb6549cb08f
--- /dev/null
+++ b/test/MC/ARM/udf-thumb-2.s
@@ -0,0 +1,13 @@
+@ RUN: llvm-mc -triple thumbv7-eabi -mattr +thumb2 -show-encoding %s | FileCheck %s
+
+ .syntax unified
+ .text
+ .thumb
+
+undefined:
+ udf #0
+ udf.w #0
+
+@ CHECK: udf #0 @ encoding: [0x00,0xde]
+@ CHECK: udf.w #0 @ encoding: [0xf0,0xf7,0x00,0xa0]
+
diff --git a/test/MC/ARM/udf-thumb-diagnostics.s b/test/MC/ARM/udf-thumb-diagnostics.s
new file mode 100644
index 000000000000..51388d0f10d6
--- /dev/null
+++ b/test/MC/ARM/udf-thumb-diagnostics.s
@@ -0,0 +1,19 @@
+@ RUN: not llvm-mc -triple thumbv6m-eabi %s 2>&1 | FileCheck %s
+
+ .syntax unified
+ .text
+ .thumb
+
+undefined:
+ udfpl
+
+@ CHECK: error: conditional execution not supported in Thumb1
+@ CHECK: udfpl
+@ CHECK: ^
+
+ udf #256
+
+@ CHECK: error: instruction requires: arm-mode
+@ CHECK: udf #256
+@ CHECK: ^
+
diff --git a/test/MC/ARM/udf-thumb.s b/test/MC/ARM/udf-thumb.s
new file mode 100644
index 000000000000..10b3aff1aa30
--- /dev/null
+++ b/test/MC/ARM/udf-thumb.s
@@ -0,0 +1,11 @@
+@ RUN: llvm-mc -triple thumbv6m-eabi -show-encoding %s | FileCheck %s
+
+ .syntax unified
+ .text
+ .thumb
+
+undefined:
+ udf #0
+
+@ CHECK: udf #0 @ encoding: [0x00,0xde]
+
diff --git a/test/MC/ARM/unwind-stack-diagnostics.s b/test/MC/ARM/unwind-stack-diagnostics.s
new file mode 100644
index 000000000000..28d5672d3aa5
--- /dev/null
+++ b/test/MC/ARM/unwind-stack-diagnostics.s
@@ -0,0 +1,30 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+ .thumb
+
+ .text
+
+ .global multiple_personality_disorder
+ .type multiple_personality_disorder,%function
+multiple_personality_disorder:
+ .fnstart
+ .personality __gcc_personality_v0
+ .personality __gxx_personality_v0
+ .personality __gxx_personality_sj0
+ .cantunwind
+
+@ CHECK: error: .cantunwind can't be used with .personality directive
+@ CHECK: .cantunwind
+@ CHECK: ^
+@ CHECK: note: .personality was specified here
+@ CHECK: .personality __gcc_personality_v0
+@ CHECK: ^
+@ CHECK: note: .personality was specified here
+@ CHECK: .personality __gxx_personality_v0
+@ CHECK: ^
+@ CHECK: note: .personality was specified here
+@ CHECK: .personality __gxx_personality_sj0
+@ CHECK: ^
+
diff --git a/test/MC/ARM/variant-diagnostics.s b/test/MC/ARM/variant-diagnostics.s
new file mode 100644
index 000000000000..535ee2686f18
--- /dev/null
+++ b/test/MC/ARM/variant-diagnostics.s
@@ -0,0 +1,13 @@
+@ RUN: not llvm-mc -triple armv7-linux-eabi -filetype asm -o /dev/null 2>&1 %s \
+@ RUN: | FileCheck %s
+
+ .arch armv7
+
+ .type invalid_variant,%function
+invalid_variant:
+ bx target(invalid)
+
+@ CHECK: error: invalid variant 'invalid'
+@ CHECK: bx target(invalid)
+@ CHECK: ^
+
diff --git a/test/MC/ARM/vfp-aliases-diagnostics.s b/test/MC/ARM/vfp-aliases-diagnostics.s
new file mode 100644
index 000000000000..d1ab18e99828
--- /dev/null
+++ b/test/MC/ARM/vfp-aliases-diagnostics.s
@@ -0,0 +1,114 @@
+@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null %s 2>&1 \
+@ RUN: | FileCheck %s
+
+ .syntax unified
+ .fpu vfp
+
+ .type aliases,%function
+aliases:
+ fstmfdd sp!, {s0}
+ fstmead sp!, {s0}
+ fstmdbd sp!, {s0}
+ fstmiad sp!, {s0}
+ fstmfds sp!, {d0}
+ fstmeas sp!, {d0}
+ fstmdbs sp!, {d0}
+ fstmias sp!, {d0}
+
+ fldmias sp!, {d0}
+ fldmdbs sp!, {d0}
+ fldmeas sp!, {d0}
+ fldmfds sp!, {d0}
+ fldmiad sp!, {s0}
+ fldmdbd sp!, {s0}
+ fldmead sp!, {s0}
+ fldmfdd sp!, {s0}
+
+ fstmeax sp!, {s0}
+ fldmfdx sp!, {s0}
+
+ fstmfdx sp!, {s0}
+ fldmeax sp!, {s0}
+
+@ CHECK-LABEL: aliases
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmfdd sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmead sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmdbd sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmiad sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon single precision register expected
+@ CHECK: fstmfds sp!, {d0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon single precision register expected
+@ CHECK: fstmeas sp!, {d0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon single precision register expected
+@ CHECK: fstmdbs sp!, {d0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon single precision register expected
+@ CHECK: fstmias sp!, {d0}
+@ CHECK: ^
+
+@ CHECK: error: VFP/Neon single precision register expected
+@ CHECK: fldmias sp!, {d0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon single precision register expected
+@ CHECK: fldmdbs sp!, {d0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon single precision register expected
+@ CHECK: fldmeas sp!, {d0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon single precision register expected
+@ CHECK: fldmfds sp!, {d0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fldmiad sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fldmdbd sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fldmead sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fldmfdd sp!, {s0}
+@ CHECK: ^
+
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmeax sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fldmfdx sp!, {s0}
+@ CHECK: ^
+
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmfdx sp!, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fldmeax sp!, {s0}
+@ CHECK: ^
+
+ fstmiaxcs r0, {s0}
+ fstmiaxhs r0, {s0}
+ fstmiaxls r0, {s0}
+ fstmiaxvs r0, {s0}
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmiaxcs r0, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmiaxhs r0, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmiaxls r0, {s0}
+@ CHECK: ^
+@ CHECK: error: VFP/Neon double precision register expected
+@ CHECK: fstmiaxvs r0, {s0}
+@ CHECK: ^
+
diff --git a/test/MC/ARM/vfp-aliases.s b/test/MC/ARM/vfp-aliases.s
new file mode 100644
index 000000000000..4074feabbf18
--- /dev/null
+++ b/test/MC/ARM/vfp-aliases.s
@@ -0,0 +1,62 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype asm -o - %s | FileCheck %s
+
+ .syntax unified
+ .fpu vfp
+
+ .type aliases,%function
+aliases:
+ fstmfdd sp!, {d0}
+ fstmead sp!, {d0}
+ fstmdbd sp!, {d0}
+ fstmiad sp!, {d0}
+ fstmfds sp!, {s0}
+ fstmeas sp!, {s0}
+ fstmdbs sp!, {s0}
+ fstmias sp!, {s0}
+
+ fldmias sp!, {s0}
+ fldmdbs sp!, {s0}
+ fldmeas sp!, {s0}
+ fldmfds sp!, {s0}
+ fldmiad sp!, {d0}
+ fldmdbd sp!, {d0}
+ fldmead sp!, {d0}
+ fldmfdd sp!, {d0}
+
+ fstmeax sp!, {d0}
+ fldmfdx sp!, {d0}
+
+ fstmfdx sp!, {d0}
+ fldmeax sp!, {d0}
+
+@ CHECK-LABEL: aliases
+@ CHECK: vpush {d0}
+@ CHECK: vstmia sp!, {d0}
+@ CHECK: vpush {d0}
+@ CHECK: vstmia sp!, {d0}
+@ CHECK: vpush {s0}
+@ CHECK: vstmia sp!, {s0}
+@ CHECK: vpush {s0}
+@ CHECK: vstmia sp!, {s0}
+@ CHECK: vpop {s0}
+@ CHECK: vldmdb sp!, {s0}
+@ CHECK: vldmdb sp!, {s0}
+@ CHECK: vpop {s0}
+@ CHECK: vpop {d0}
+@ CHECK: vldmdb sp!, {d0}
+@ CHECK: vldmdb sp!, {d0}
+@ CHECK: vpop {d0}
+@ CHECK: fstmiax sp!, {d0}
+@ CHECK: fldmiax sp!, {d0}
+@ CHECK: fstmdbx sp!, {d0}
+@ CHECK: fldmdbx sp!, {d0}
+
+ fstmiaxcs r0, {d0}
+ fstmiaxhs r0, {d0}
+ fstmiaxls r0, {d0}
+ fstmiaxvs r0, {d0}
+@ CHECK: fstmiaxhs r0, {d0}
+@ CHECK: fstmiaxhs r0, {d0}
+@ CHECK: fstmiaxls r0, {d0}
+@ CHECK: fstmiaxvs r0, {d0}
+
diff --git a/test/MC/ARM/vmov-vmvn-byte-replicate.s b/test/MC/ARM/vmov-vmvn-byte-replicate.s
new file mode 100644
index 000000000000..5931160afbc5
--- /dev/null
+++ b/test/MC/ARM/vmov-vmvn-byte-replicate.s
@@ -0,0 +1,31 @@
+@ PR18921, "vmov" part.
+@ RUN: llvm-mc -triple=armv7-linux-gnueabi -show-encoding < %s | FileCheck %s
+.text
+
+@ CHECK: vmov.i8 d2, #0xff @ encoding: [0x1f,0x2e,0x87,0xf3]
+@ CHECK: vmov.i8 q2, #0xff @ encoding: [0x5f,0x4e,0x87,0xf3]
+@ CHECK: vmov.i8 d2, #0xab @ encoding: [0x1b,0x2e,0x82,0xf3]
+@ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3]
+@ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3]
+@ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3]
+
+@ CHECK: vmov.i8 d2, #0x0 @ encoding: [0x10,0x2e,0x80,0xf2]
+@ CHECK: vmov.i8 q2, #0x0 @ encoding: [0x50,0x4e,0x80,0xf2]
+@ CHECK: vmov.i8 d2, #0x54 @ encoding: [0x14,0x2e,0x85,0xf2]
+@ CHECK: vmov.i8 q2, #0x54 @ encoding: [0x54,0x4e,0x85,0xf2]
+@ CHECK: vmov.i8 d2, #0x54 @ encoding: [0x14,0x2e,0x85,0xf2]
+@ CHECK: vmov.i8 q2, #0x54 @ encoding: [0x54,0x4e,0x85,0xf2]
+
+ vmov.i32 d2, #0xffffffff
+ vmov.i32 q2, #0xffffffff
+ vmov.i32 d2, #0xabababab
+ vmov.i32 q2, #0xabababab
+ vmov.i16 q2, #0xabab
+ vmov.i16 q2, #0xabab
+
+ vmvn.i32 d2, #0xffffffff
+ vmvn.i32 q2, #0xffffffff
+ vmvn.i32 d2, #0xabababab
+ vmvn.i32 q2, #0xabababab
+ vmvn.i16 d2, #0xabab
+ vmvn.i16 q2, #0xabab
diff --git a/test/MC/ARM/vmov-vmvn-illegal-cases.s b/test/MC/ARM/vmov-vmvn-illegal-cases.s
new file mode 100644
index 000000000000..4609b7793551
--- /dev/null
+++ b/test/MC/ARM/vmov-vmvn-illegal-cases.s
@@ -0,0 +1,30 @@
+@ RUN: not llvm-mc -triple=armv7-linux-gnueabi %s 2>&1 | FileCheck %s
+.text
+
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vmov.i32 d2, #0xffffffab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vmov.i32 q2, #0xffffffab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vmov.i16 q2, #0xffab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vmov.i16 q2, #0xffab
+
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vmvn.i32 d2, #0xffffffab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vmvn.i32 q2, #0xffffffab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vmvn.i16 q2, #0xffab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vmvn.i16 q2, #0xffab
+
+ vmov.i32 d2, #0xffffffab
+ vmov.i32 q2, #0xffffffab
+ vmov.i16 q2, #0xffab
+ vmov.i16 q2, #0xffab
+
+ vmvn.i32 d2, #0xffffffab
+ vmvn.i32 q2, #0xffffffab
+ vmvn.i16 q2, #0xffab
+ vmvn.i16 q2, #0xffab
diff --git a/test/MC/ARM/vorr-vbic-illegal-cases.s b/test/MC/ARM/vorr-vbic-illegal-cases.s
new file mode 100644
index 000000000000..16ab6b5bc74b
--- /dev/null
+++ b/test/MC/ARM/vorr-vbic-illegal-cases.s
@@ -0,0 +1,42 @@
+@ RUN: not llvm-mc -triple=armv7-linux-gnueabi %s 2>&1 | FileCheck %s
+.text
+
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vorr.i32 d2, #0xffffffff
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vorr.i32 q2, #0xffffffff
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vorr.i32 d2, #0xabababab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vorr.i32 q2, #0xabababab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vorr.i16 q2, #0xabab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vorr.i16 q2, #0xabab
+
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vbic.i32 d2, #0xffffffff
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vbic.i32 q2, #0xffffffff
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vbic.i32 d2, #0xabababab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vbic.i32 q2, #0xabababab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vbic.i16 d2, #0xabab
+@ CHECK: error: invalid operand for instruction
+@ CHECK: vbic.i16 q2, #0xabab
+
+ vorr.i32 d2, #0xffffffff
+ vorr.i32 q2, #0xffffffff
+ vorr.i32 d2, #0xabababab
+ vorr.i32 q2, #0xabababab
+ vorr.i16 q2, #0xabab
+ vorr.i16 q2, #0xabab
+
+ vbic.i32 d2, #0xffffffff
+ vbic.i32 q2, #0xffffffff
+ vbic.i32 d2, #0xabababab
+ vbic.i32 q2, #0xabababab
+ vbic.i16 d2, #0xabab
+ vbic.i16 q2, #0xabab
diff --git a/test/MC/ARM/xscale-attributes.ll b/test/MC/ARM/xscale-attributes.ll
deleted file mode 100644
index 718fd8fcc78b..000000000000
--- a/test/MC/ARM/xscale-attributes.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale -o - | \
-; RUN: FileCheck -check-prefix=ASM %s
-
-; RUN: llc %s -mtriple=thumbv5-linux-gnueabi -filetype=obj \
-; RUN: -mcpu=xscale -o - | llvm-readobj -s -sd | \
-; RUN: FileCheck -check-prefix=OBJ %s
-
-; FIXME: The OBJ test should be a .s to .o test and the ASM test should
-; be moved to test/CodeGen/ARM.
-
-define void @foo() nounwind {
-entry:
- ret void
-}
-
-; ASM: .eabi_attribute 6, 5
-; ASM-NEXT: .eabi_attribute 8, 1
-; ASM-NEXT: .eabi_attribute 9, 1
-
-; OBJ: Sections [
-; OBJ: Section {
-; OBJ: Index: 4
-; OBJ-NEXT: Name: .ARM.attributes (12)
-; OBJ-NEXT: Type: SHT_ARM_ATTRIBUTES
-; OBJ-NEXT: Flags [ (0x0)
-; OBJ-NEXT: ]
-; OBJ-NEXT: Address: 0x0
-; OBJ-NEXT: Offset: 0x38
-; OBJ-NEXT: Size: 40
-; OBJ-NEXT: Link: 0
-; OBJ-NEXT: Info: 0
-; OBJ-NEXT: AddressAlignment: 1
-; OBJ-NEXT: EntrySize: 0
-; OBJ-NEXT: SectionData (
-; OBJ-NEXT: 0000: 41270000 00616561 62690001 1D000000
-; OBJ-NEXT: 0010: 05585343 414C4500 06050801 09011401
-; OBJ-NEXT: 0020: 15011703 18011901
-; OBJ-NEXT: )
-; OBJ-NEXT: }
diff --git a/test/MC/AsmParser/conditional_asm.s b/test/MC/AsmParser/conditional_asm.s
index f619ef9bb428..ecbceb1dc362 100644
--- a/test/MC/AsmParser/conditional_asm.s
+++ b/test/MC/AsmParser/conditional_asm.s
@@ -10,3 +10,72 @@
.byte 0
.endif
.endif
+
+# CHECK: .byte 0
+# CHECK-NOT: .byte 1
+.ifeq 32 - 32
+ .byte 0
+.else
+ .byte 1
+.endif
+
+# CHECK: .byte 0
+# CHECK: .byte 1
+# CHECK-NOT: .byte 2
+.ifge 32 - 31
+ .byte 0
+.endif
+.ifge 32 - 32
+ .byte 1
+.endif
+.ifge 32 - 33
+ .byte 2
+.endif
+
+# CHECK: .byte 0
+# CHECK-NOT: .byte 1
+# CHECK-NOT: .byte 2
+.ifgt 32 - 31
+ .byte 0
+.endif
+.ifgt 32 - 32
+ .byte 1
+.endif
+.ifgt 32 - 33
+ .byte 2
+.endif
+
+# CHECK-NOT: .byte 0
+# CHECK: .byte 1
+# CHECK: .byte 2
+.ifle 32 - 31
+ .byte 0
+.endif
+.ifle 32 - 32
+ .byte 1
+.endif
+.ifle 32 - 33
+ .byte 2
+.endif
+
+# CHECK-NOT: .byte 0
+# CHECK-NOT: .byte 1
+# CHECK: .byte 2
+.iflt 32 - 31
+ .byte 0
+.endif
+.iflt 32 - 32
+ .byte 1
+.endif
+.iflt 32 - 33
+ .byte 2
+.endif
+
+# CHECK: .byte 1
+# CHECK-NOT: .byte 0
+.ifne 32 - 32
+ .byte 0
+.else
+ .byte 1
+.endif
+
diff --git a/test/MC/AsmParser/directive-err-diagnostics.s b/test/MC/AsmParser/directive-err-diagnostics.s
new file mode 100644
index 000000000000..ccc5450d938b
--- /dev/null
+++ b/test/MC/AsmParser/directive-err-diagnostics.s
@@ -0,0 +1,17 @@
+// RUN: not llvm-mc -triple i386 %s 2>&1 | FileCheck %s
+
+ .error 0
+// CHECK: error: .error argument must be a string
+// CHECK: .error 0
+// CHECK: ^
+
+ .ifeqs "0", "1"
+ .ifeqs "", ""
+ .error "message"
+ .endif
+ .endif
+// CHECK-NOT: error: message
+// CHECK-NOT: error: invalid instruction mnemonic 'message'
+// CHECK-NOT: .error "message"
+// CHECK-NOT: ^
+
diff --git a/test/MC/AsmParser/directive-err.s b/test/MC/AsmParser/directive-err.s
new file mode 100644
index 000000000000..63b89397f7ce
--- /dev/null
+++ b/test/MC/AsmParser/directive-err.s
@@ -0,0 +1,30 @@
+// RUN: not llvm-mc -triple i386 %s 2>&1 | FileCheck %s
+
+ .err
+// CHECK: error: .err encountered
+// CHECK-NEXT: .err
+// CHECK-NEXT: ^
+
+ .ifc a,a
+ .err
+ .endif
+// CHECK: error: .err encountered
+// CHECK-NEXT: .err
+// CHECK-NEXT: ^
+
+ .ifnc a,a
+ .err
+ .endif
+// CHECK-NOT: error: .err encountered
+
+ .error "This is my error. There are many like it, but this one is mine."
+// CHECK: error: This is my error. There are many like it, but this one is mine.
+
+ .ifc one, two
+ .error "My error is my best friend."
+ .endif
+// CHECK-NOT: error: My error is my best friend.
+
+ .error
+// CHECK: error: .error directive invoked in source file
+
diff --git a/test/MC/AsmParser/directive_end-2.s b/test/MC/AsmParser/directive_end-2.s
new file mode 100644
index 000000000000..96188da80fc3
--- /dev/null
+++ b/test/MC/AsmParser/directive_end-2.s
@@ -0,0 +1,14 @@
+# RUN: llvm-mc -triple i386-unknown-unknown %s -I %p -filetype obj -o - \
+# RUN: | llvm-readobj -t | FileCheck %s
+
+rock:
+ movl $42, %eax
+
+.include "directive_end.s"
+
+hard_place:
+ movl $42, %ebx
+
+# CHECK: Symbol {
+# CHECK: Name: rock
+# CHECK-NOT: Name: hard_place
diff --git a/test/MC/AsmParser/directive_end.s b/test/MC/AsmParser/directive_end.s
new file mode 100644
index 000000000000..ec43cad8e967
--- /dev/null
+++ b/test/MC/AsmParser/directive_end.s
@@ -0,0 +1,11 @@
+# RUN: llvm-mc -triple i386-unknown-unknown %s -filetype obj -o - \
+# RUN: | llvm-readobj -t | FileCheck %s
+
+ .end
+
+its_a_tarp:
+ int $0x3
+
+# CHECK: Symbol {
+# CHECK-NOT: Name: its_a_tarp
+
diff --git a/test/MC/AsmParser/directive_file.s b/test/MC/AsmParser/directive_file.s
index 9b99e0f24e99..d7290ebe1dbe 100644
--- a/test/MC/AsmParser/directive_file.s
+++ b/test/MC/AsmParser/directive_file.s
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple i386-unknown-unknown %s | FileCheck %s
+# RUN: llvm-mc -triple i386-unknown-unknown %s -filetype=null
.file "hello"
.file 1 "worl\144" # "\144" is "d"
diff --git a/test/MC/AsmParser/directive_fill.s b/test/MC/AsmParser/directive_fill.s
index bb3ced091c80..28d7fa2a3435 100644
--- a/test/MC/AsmParser/directive_fill.s
+++ b/test/MC/AsmParser/directive_fill.s
@@ -1,4 +1,5 @@
-# RUN: llvm-mc -triple i386-unknown-unknown %s | FileCheck %s
+# RUN: llvm-mc -triple i386-unknown-unknown %s 2> %t.err | FileCheck %s
+# RUN: FileCheck --check-prefix=CHECK-WARNINGS %s < %t.err
# CHECK: TEST0:
# CHECK: .byte 10
@@ -12,7 +13,8 @@ TEST1:
.fill 2, 2, 3
# CHECK: TEST2:
-# CHECK: .quad 4
+# CHECK: .long 4
+# CHECK: .long 0
TEST2:
.fill 1, 8, 4
@@ -31,3 +33,43 @@ TEST3:
# CHECK: .short 0
TEST4:
.fill 4, 2
+
+# CHECK: TEST5
+# CHECK: .short 2
+# CHECK: .byte 0
+# CHECK: .short 2
+# CHECK: .byte 0
+# CHECK: .short 2
+# CHECK: .byte 0
+# CHECK: .short 2
+# CHECK: .byte 0
+TEST5:
+ .fill 4, 3, 2
+
+# CHECK: TEST6
+# CHECK: .long 2
+# CHECK: .long 0
+# CHECK-WARNINGS: '.fill' directive with size greater than 8 has been truncated to 8
+TEST6:
+ .fill 1, 9, 2
+
+# CHECK: TEST7
+# CHECK: .long 0
+# CHECK: .long 0
+# CHECK-WARNINGS: '.fill' directive pattern has been truncated to 32-bits
+TEST7:
+ .fill 1, 8, 1<<32
+
+# CHECK-WARNINGS: '.fill' directive with negative repeat count has no effect
+TEST8:
+ .fill -1, 8, 1
+
+# CHECK-WARNINGS: '.fill' directive with negative size has no effect
+TEST9:
+ .fill 1, -1, 1
+
+# CHECK: TEST10
+# CHECK: .short 22136
+# CHECK: .byte 52
+TEST10:
+ .fill 1, 3, 0x12345678
diff --git a/test/MC/AsmParser/directive_line.s b/test/MC/AsmParser/directive_line.s
index 94ce44602998..110b68a46216 100644
--- a/test/MC/AsmParser/directive_line.s
+++ b/test/MC/AsmParser/directive_line.s
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple i386-unknown-unknown %s
+# RUN: llvm-mc -triple i386-unknown-unknown %s -filetype=null
# FIXME: Actually test the output.
.line
diff --git a/test/MC/AsmParser/directive_loc.s b/test/MC/AsmParser/directive_loc.s
index 700a32ca389f..404ebcecdd0a 100644
--- a/test/MC/AsmParser/directive_loc.s
+++ b/test/MC/AsmParser/directive_loc.s
@@ -1,9 +1,15 @@
-# RUN: llvm-mc -triple i386-unknown-unknown %s
-# FIXME: Actually test the output.
+# RUN: llvm-mc -triple i386-unknown-unknown %s | FileCheck %s
+# RUN: llvm-mc -triple i386-unknown-unknown %s -filetype=null
.file 1 "hello"
+# CHECK: .file 1 "hello"
.loc 1
.loc 1 2
+# CHECK: .loc 1 2 0
.loc 1 2 3
+# CHECK: .loc 1 2 3
.loc 1 2 discriminator 1
+# CHECK: 1 2 0 discriminator 1
+ .loc 1 2 0 isa 3
+# CHECK: 1 2 0 isa 3
.loc 1 0
diff --git a/test/MC/AsmParser/directive_rept-diagnostics.s b/test/MC/AsmParser/directive_rept-diagnostics.s
new file mode 100644
index 000000000000..cbef15869004
--- /dev/null
+++ b/test/MC/AsmParser/directive_rept-diagnostics.s
@@ -0,0 +1,41 @@
+# RUN: not llvm-mc -triple i686-elf -filetype asm -o /dev/null %s 2>&1 \
+# RUN: | FileCheck %s
+
+ .data
+
+ .global invalid_expression
+ .type invalid_expression,@object
+invalid_expression:
+ .rept *
+
+# CHECK: error: unknown token in expression
+# CHECK: .rept *
+# CHECK: ^
+
+ .global bad_token
+ .type bad_token,@object
+bad_token:
+ .rept bad_token
+
+# CHECK: error: unexpected token in '.rept' directive
+# CHECK: .rept bad_token
+# CHECK: ^
+
+ .global negative
+ .type negative,@object
+negative:
+ .rept -32
+
+# CHECK: error: Count is negative
+# CHECK: .rept -32
+# CHECK: ^
+
+ .global trailer
+ .type trailer,@object
+trailer:
+ .rep 0 trailer
+
+# CHECK: error: unexpected token in '.rep' directive
+# CHECK: .rep 0 trailer
+# CHECK: ^
+
diff --git a/test/MC/AsmParser/directive_rept.s b/test/MC/AsmParser/directive_rept.s
new file mode 100644
index 000000000000..4f8ed7c0b80c
--- /dev/null
+++ b/test/MC/AsmParser/directive_rept.s
@@ -0,0 +1,30 @@
+# RUN: llvm-mc -triple i686-elf -filetype asm -o - %s | FileCheck %s
+
+ .data
+
+ .global two_bad_calls
+ .type two_bad_calls,@function
+two_bad_calls:
+ .rept 2
+ .long 0xbadca11
+ .endr
+
+# CHECK-LABEL: two_bad_calls
+# CHECK: .long 195938833
+# CHECK: .long 195938833
+
+ .global half_a_dozen_daffodils
+ .type half_a_dozen_daffodils,@function
+half_a_dozen_daffodils:
+ .rep 6
+ .long 0xdaff0d11
+ .endr
+
+# CHECK-LABEL: half_a_dozen_daffodils
+# CHECK: .long 3674148113
+# CHECK: .long 3674148113
+# CHECK: .long 3674148113
+# CHECK: .long 3674148113
+# CHECK: .long 3674148113
+# CHECK: .long 3674148113
+
diff --git a/test/MC/AsmParser/directive_seh.s b/test/MC/AsmParser/directive_seh.s
index 98fc6061f9dc..1821747a2ba4 100644
--- a/test/MC/AsmParser/directive_seh.s
+++ b/test/MC/AsmParser/directive_seh.s
@@ -1,36 +1,25 @@
# RUN: llvm-mc -triple x86_64-pc-win32 %s | FileCheck %s
-# CHECK: .seh_proc func
-# CHECK: .seh_pushframe @code
-# CHECK: .seh_stackalloc 24
-# CHECK: .seh_savereg 6, 16
-# CHECK: .seh_savexmm 8, 0
-# CHECK: .seh_pushreg 3
-# CHECK: .seh_setframe 3, 0
-# CHECK: .seh_endprologue
-# CHECK: .seh_handler __C_specific_handler, @except
-# CHECK-NOT: .section{{.*}}.xdata
-# CHECK: .seh_handlerdata
-# CHECK: .text
-# CHECK: .seh_startchained
-# CHECK: .seh_endprologue
-# CHECK: .seh_endchained
-# CHECK: .seh_endproc
-
.text
.globl func
.def func; .scl 2; .type 32; .endef
.seh_proc func
+# CHECK: .seh_proc func
func:
.seh_pushframe @code
+# CHECK: .seh_pushframe @code
subq $24, %rsp
.seh_stackalloc 24
+# CHECK: .seh_stackalloc 24
movq %rsi, 16(%rsp)
.seh_savereg %rsi, 16
+# CHECK: .seh_savereg 6, 16
movups %xmm8, (%rsp)
.seh_savexmm %xmm8, 0
+# CHECK: .seh_savexmm 8, 0
pushq %rbx
.seh_pushreg 3
+# CHECK: .seh_pushreg 3
mov %rsp, %rbx
.seh_setframe 3, 0
.seh_endprologue
@@ -41,8 +30,18 @@ func:
.seh_startchained
.seh_endprologue
.seh_endchained
+# CHECK: .seh_setframe 3, 0
+# CHECK: .seh_endprologue
+# CHECK: .seh_handler __C_specific_handler, @except
+# CHECK-NOT: .section{{.*}}.xdata
+# CHECK: .seh_handlerdata
+# CHECK: .text
+# CHECK: .seh_startchained
+# CHECK: .seh_endprologue
+# CHECK: .seh_endchained
lea (%rbx), %rsp
pop %rbx
addq $24, %rsp
ret
.seh_endproc
+# CHECK: .seh_endproc
diff --git a/test/MC/AsmParser/directive_values.s b/test/MC/AsmParser/directive_values.s
index ed932b297462..6941a1a59409 100644
--- a/test/MC/AsmParser/directive_values.s
+++ b/test/MC/AsmParser/directive_values.s
@@ -69,3 +69,15 @@ TEST8:
.long 0x200000L+1
# CHECK: .long 2097153
# CHECK: .long 2097153
+
+TEST9:
+ .octa 0x1234567812345678abcdef, 340282366920938463463374607431768211455
+ .octa 0b00111010010110100101101001011010010110100101101001011010010110100101101001011010010110100101101001011010010110100101101001011010
+# CHECK: TEST9
+# CHECK: .quad 8652035380128501231
+# CHECK: .quad 1193046
+# CHECK: .quad -1
+# CHECK: .quad -1
+# CHECK: .quad 6510615555426900570
+# CHECK: .quad 4204772546213206618
+
diff --git a/test/MC/AsmParser/dot-symbol-assignment-backwards.s b/test/MC/AsmParser/dot-symbol-assignment-backwards.s
new file mode 100644
index 000000000000..2619788203a9
--- /dev/null
+++ b/test/MC/AsmParser/dot-symbol-assignment-backwards.s
@@ -0,0 +1,12 @@
+# RUN: not llvm-mc -filetype=obj -triple i386-unknown-unknown %s 2> %t
+# RUN: FileCheck -input-file %t %s
+
+. = 0x10
+ .byte 1
+
+. = . + 10
+ .byte 2
+
+# CHECK: LLVM ERROR: invalid .org offset '24' (at offset '28')
+. = 0x18
+ .byte 3
diff --git a/test/MC/AsmParser/dot-symbol-assignment.s b/test/MC/AsmParser/dot-symbol-assignment.s
new file mode 100644
index 000000000000..65651d77485e
--- /dev/null
+++ b/test/MC/AsmParser/dot-symbol-assignment.s
@@ -0,0 +1,31 @@
+# RUN: llvm-mc -triple i386-unknown-unknown %s | FileCheck %s
+
+ .extern start
+
+# CHECK: .org 1024, 0
+. = 0x400
+ lgdt 0x400 + 0x100
+
+ ljmpl $0x08, $(0x400 + 0x150)
+
+
+# CHECK: .org 1280, 0
+. = 0x400 + 0x100
+ .word (3*8)-1
+ .quad (0x400 + 0x110)
+
+# CHECK: .org 1296, 0
+. = 0x400 + 0x110
+ .quad 0x0
+ .quad 0x0020980000000000
+ .quad 0x0000900000000000
+
+ .code64
+
+# CHECK: .org 1360, 0
+. = 0x400 + 0x150
+ movabsq $start, %rcx
+ jmp *%rcx
+
+
+. = 0x300
diff --git a/test/MC/AsmParser/dot-symbol-non-absolute.s b/test/MC/AsmParser/dot-symbol-non-absolute.s
new file mode 100644
index 000000000000..7342365fe1b3
--- /dev/null
+++ b/test/MC/AsmParser/dot-symbol-non-absolute.s
@@ -0,0 +1,9 @@
+# RUN: not llvm-mc -filetype=obj -triple i386-unknown-unknown %s 2> %t
+# RUN: FileCheck -input-file %t %s
+
+
+ .extern foo
+
+# CHECK: error: expected absolute expression
+. = foo + 10
+ .byte 1
diff --git a/test/MC/AsmParser/dot-symbol.s b/test/MC/AsmParser/dot-symbol.s
index 4a38a4053242..4bae3231868c 100644
--- a/test/MC/AsmParser/dot-symbol.s
+++ b/test/MC/AsmParser/dot-symbol.s
@@ -1,12 +1,9 @@
# Historically 'as' treats '.' as a reference to the current location in
-# arbitrary contects. We don't support this in general.
+# arbitrary contexts. We don't support this in general.
# RUN: not llvm-mc -triple i386-unknown-unknown %s 2> %t
# RUN: FileCheck -input-file %t %s
-# CHECK: assignment to pseudo-symbol '.' is unsupported (use '.space' or '.org').
-. = . + 8
-
# CHECK: invalid use of pseudo-symbol '.' as a label
.:
.long 0
diff --git a/test/MC/AsmParser/exprs.s b/test/MC/AsmParser/exprs.s
index a7e10020b67b..c5fc9b594a0c 100644
--- a/test/MC/AsmParser/exprs.s
+++ b/test/MC/AsmParser/exprs.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple i386-unknown-unknown %s > %t
+// RUN: llvm-mc -triple i386-apple-darwin %s
.macro check_expr
.if ($0) != ($1)
diff --git a/test/MC/AsmParser/if-diagnostics.s b/test/MC/AsmParser/if-diagnostics.s
new file mode 100644
index 000000000000..d102a5686d98
--- /dev/null
+++ b/test/MC/AsmParser/if-diagnostics.s
@@ -0,0 +1,29 @@
+// RUN: not llvm-mc -triple i386 %s -o /dev/null 2>&1 | FileCheck %s
+
+.if
+.endif
+
+// CHECK: error: unknown token in expression
+// CHECK: .if
+// CHECK: ^
+
+.ifeq 0, 3
+.endif
+
+// CHECK:error: unexpected token in '.if' directive
+// CHECK: .ifeq 0, 3
+// CHECK: ^
+
+.iflt "string1"
+.endif
+
+// CHECK: error: expected absolute expression
+// CHECK: .iflt "string1"
+// CHECK: ^
+
+.ifge test
+.endif
+
+// CHECK: error: expected absolute expression
+// CHECK: .ifge test
+// CHECK: ^
diff --git a/test/MC/AsmParser/ifc.s b/test/MC/AsmParser/ifc.s
index 20e55c0a24fc..24944a21e4fe 100644
--- a/test/MC/AsmParser/ifc.s
+++ b/test/MC/AsmParser/ifc.s
@@ -63,3 +63,8 @@
.else
.byte 0
.endif
+
+# CHECK-NOT: .byte 0
+# CHECK: .byte 1
+.ifnc equal, equal ; .byte 0 ; .else ; .byte 1 ; .endif
+
diff --git a/test/MC/AsmParser/ifeqs-diagnostics.s b/test/MC/AsmParser/ifeqs-diagnostics.s
new file mode 100644
index 000000000000..1e5e8c3751f6
--- /dev/null
+++ b/test/MC/AsmParser/ifeqs-diagnostics.s
@@ -0,0 +1,22 @@
+// RUN: not llvm-mc -triple i386 %s -o /dev/null 2>&1 | FileCheck %s
+
+.ifeqs
+
+// CHECK: error: expected string parameter for '.ifeqs' directive
+// CHECK: .ifeqs
+// CHECK: ^
+
+.ifeqs "string1"
+
+// CHECK: error: expected comma after first string for '.ifeqs' directive
+// CHECK: .ifeqs "string1"
+// CHECK: ^
+
+.ifeqs "string1",
+
+// CHECK: error: expected string parameter for '.ifeqs' directive
+// CHECK: .ifeqs "string1",
+// CHECK: ^
+
+// CHECK-NOT: error: unmatched .ifs or .elses
+
diff --git a/test/MC/AsmParser/ifeqs.s b/test/MC/AsmParser/ifeqs.s
new file mode 100644
index 000000000000..05a26a237fcd
--- /dev/null
+++ b/test/MC/AsmParser/ifeqs.s
@@ -0,0 +1,20 @@
+// RUN: llvm-mc -triple i386 %s | FileCheck %s
+
+.ifeqs "alpha", "alpha"
+ .byte 1
+.else
+ .byte 0
+.endif
+
+// CHECK-NOT: .byte 0
+// CHECK: .byte 1
+
+.ifeqs "alpha", "alpha "
+ .byte 0
+.else
+ .byte 1
+.endif
+
+// CHECK-NOT: .byte 0
+// CHECK: .byte 1
+
diff --git a/test/MC/AsmParser/invalid-input-assertion.s b/test/MC/AsmParser/invalid-input-assertion.s
new file mode 100644
index 000000000000..2557f6e4aa6f
--- /dev/null
+++ b/test/MC/AsmParser/invalid-input-assertion.s
@@ -0,0 +1,9 @@
+// RUN: not llvm-mc -triple i686-linux -o /dev/null %s
+
+ .macro macro parameter=0
+ .if \parameter
+ .else
+ .endm
+
+ macro 1
+
diff --git a/test/MC/AsmParser/lit.local.cfg b/test/MC/AsmParser/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/MC/AsmParser/lit.local.cfg
+++ b/test/MC/AsmParser/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/AsmParser/macro-def-in-instantiation.s b/test/MC/AsmParser/macro-def-in-instantiation.s
index b6483b3b32b2..773df701aab4 100644
--- a/test/MC/AsmParser/macro-def-in-instantiation.s
+++ b/test/MC/AsmParser/macro-def-in-instantiation.s
@@ -11,3 +11,23 @@ $4
.data
// CHECK: .byte 10
.mybyte 10
+
+// PR18599
+.macro macro_a
+
+.macro macro_b
+.byte 10
+.macro macro_c
+.endm
+
+macro_c
+.purgem macro_c
+.endm
+
+macro_b
+.endm
+
+macro_a
+macro_b
+// CHECK: .byte 10
+// CHECK: .byte 10
diff --git a/test/MC/AsmParser/macro-err1.s b/test/MC/AsmParser/macro-err1.s
index 924deb0cf6e1..bd9c837d8be0 100644
--- a/test/MC/AsmParser/macro-err1.s
+++ b/test/MC/AsmParser/macro-err1.s
@@ -7,4 +7,4 @@
foo 42, 42
-// CHECK: Too many arguments
+// CHECK: too many positional arguments
diff --git a/test/MC/AsmParser/macro-irp.s b/test/MC/AsmParser/macro-irp.s
index a368b7446dc0..2f26eabae53c 100644
--- a/test/MC/AsmParser/macro-irp.s
+++ b/test/MC/AsmParser/macro-irp.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple x86_64-unknown-unknown %s | FileCheck %s
+// RUN: llvm-mc -triple i386-unknown-unknown %s | FileCheck %s
.irp reg,%eax,%ebx
pushl \reg
diff --git a/test/MC/AsmParser/macro-qualifier-diagnostics.s b/test/MC/AsmParser/macro-qualifier-diagnostics.s
new file mode 100644
index 000000000000..4ebad2b69e48
--- /dev/null
+++ b/test/MC/AsmParser/macro-qualifier-diagnostics.s
@@ -0,0 +1,64 @@
+# RUN: not llvm-mc -triple i386 -o /dev/null %s 2>&1 | FileCheck %s
+
+ .macro missing_qualifier parameter:
+
+# CHECK: error: missing parameter qualifier for 'parameter' in macro 'missing_qualifier'
+# CHECK: .macro missing_qualifier parameter:
+# CHECK: ^
+
+ .macro non_identifier_qualifier parameter:0
+
+# CHECK: error: missing parameter qualifier for 'parameter' in macro 'non_identifier_qualifier'
+# CHECK: .macro non_identifier_qualifier parameter:0
+# CHECK: ^
+
+ .macro invalid_qualifier parameter:invalid_qualifier
+
+# CHECK: error: invalid_qualifier is not a valid parameter qualifier for 'parameter' in macro 'invalid_qualifier'
+# CHECK: .macro invalid_qualifier parameter:invalid_qualifier
+# CHECK: ^
+
+ .macro pointless_default parameter:req=default
+ .endm
+
+# CHECK: warning: pointless default value for required parameter 'parameter' in macro 'pointless_default'
+# CHECK: .macro pointless_default parameter:req=default
+# CHECK: ^
+
+ .macro missing_required_parameter parameter:req
+ .endm
+
+ missing_required_parameter
+
+# CHECK: error: missing value for required parameter 'parameter' in macro 'missing_required_parameter'
+# CHECK: missing_required_parameter
+# CHECK: ^
+
+ .macro missing_second_required_argument first=0 second:req
+ .endm
+
+ missing_second_required_argument
+
+# CHECK: error: missing value for required parameter 'second' in macro 'missing_second_required_argument'
+# CHECK: missing_second_required_argument
+# CHECK: ^
+
+ .macro second_third_required first=0 second:req third:req
+ .endm
+
+ second_third_required 0
+
+# CHECK: error: missing value for required parameter 'second' in macro 'second_third_required'
+# CHECK: second_third_required 0
+# CHECK: ^
+
+# CHECK: error: missing value for required parameter 'third' in macro 'second_third_required'
+# CHECK: second_third_required 0
+# CHECK: ^
+
+ second_third_required third=3 first=1
+
+# CHECK: error: missing value for required parameter 'second' in macro 'second_third_required'
+# CHECK: second_third_required third=3 first=1
+# CHECK: ^
+
diff --git a/test/MC/AsmParser/macro-qualifier.s b/test/MC/AsmParser/macro-qualifier.s
new file mode 100644
index 000000000000..7714e5cfa936
--- /dev/null
+++ b/test/MC/AsmParser/macro-qualifier.s
@@ -0,0 +1,16 @@
+# RUN: llvm-mc -triple i386 -o - %s | FileCheck %s
+
+ .macro required parameter:req
+ .long \parameter
+ .endm
+
+ required 0
+# CHECK: .long 0
+
+ .macro required_with_default parameter:req=0
+ .long \parameter
+ .endm
+
+ required 1
+# CHECK: .long 1
+
diff --git a/test/MC/AsmParser/macros-argument-parsing-diagnostics.s b/test/MC/AsmParser/macros-argument-parsing-diagnostics.s
new file mode 100644
index 000000000000..a1970e0c9d57
--- /dev/null
+++ b/test/MC/AsmParser/macros-argument-parsing-diagnostics.s
@@ -0,0 +1,24 @@
+# RUN: not llvm-mc -triple i386 -filetype asm -o /dev/null %s 2>&1 | FileCheck %s
+
+ .macro double first = -1, second = -1
+ # begin entry
+ .long \first
+ .long \second
+ # end entry
+ .endm
+
+ double 0, 1, 2
+# CHECK: error: too many positional arguments
+# CHECK: double 0, 1, 2
+# CHECK: ^
+
+ double second = 1, 2
+# CHECK: error: cannot mix positional and keyword arguments
+# CHECK: double second = 1, 2
+# CHECK: ^
+
+ double third = 0
+# CHECK: error: parameter named 'third' does not exist for macro 'double'
+# CHECK: double third = 0
+# CHECK: ^
+
diff --git a/test/MC/AsmParser/macros-argument-parsing.s b/test/MC/AsmParser/macros-argument-parsing.s
new file mode 100644
index 000000000000..39242cf1736d
--- /dev/null
+++ b/test/MC/AsmParser/macros-argument-parsing.s
@@ -0,0 +1,91 @@
+# RUN: llvm-mc -triple i386 -filetype asm -o - %s | FileCheck %s
+
+ .macro it, cond
+ .endm
+
+ it ne
+ .long 1
+
+# CHECK: .long 1
+
+ .macro double first = -1, second = -1
+ # begin entry
+ .long \first
+ .long \second
+ # end entry
+ .endm
+
+ double
+# CHECK: .long -1
+# CHECK: .long -1
+
+ double 1
+# CHECK: .long 1
+# CHECK: .long -1
+
+ double 2, 3
+# CHECK: .long 2
+# CHECK: .long 3
+
+ double , 4
+# CHECK: .long -1
+# CHECK: .long 4
+
+ double 5, second = 6
+# CHECK: .long 5
+# CHECK: .long 6
+
+ double first = 7
+# CHECK: .long 7
+# CHECK: .long -1
+
+ double second = 8
+# CHECK: .long -1
+# CHECK: .long 8
+
+ double second = 9, first = 10
+# CHECK: .long 10
+# CHECK: .long 9
+
+ double second + 11
+# CHECK: .long second+11
+# CHECK: .long -1
+
+ double , second + 12
+# CHECK: .long -1
+# CHECK: .long second+12
+
+ double second
+# CHECK: .long second
+# CHECK: .long -1
+
+ .macro mixed arg0 = 0, arg1 = 1 arg2 = 2, arg3 = 3
+ # begin entry
+ .long \arg0
+ .long \arg1
+ .long \arg2
+ .long \arg3
+ # end entry
+ .endm
+
+mixed 1, 2 3
+
+# CHECK: .long 1
+# CHECK: .long 2
+# CHECK: .long 3
+# CHECK: .long 3
+
+mixed 1 2, 3
+
+# CHECK: .long 1
+# CHECK: .long 2
+# CHECK: .long 3
+# CHECK: .long 3
+
+mixed 1 2, 3 4
+
+# CHECK: .long 1
+# CHECK: .long 2
+# CHECK: .long 3
+# CHECK: .long 4
+
diff --git a/test/MC/AsmParser/macros-darwin-vararg.s b/test/MC/AsmParser/macros-darwin-vararg.s
new file mode 100644
index 000000000000..a650c0871d23
--- /dev/null
+++ b/test/MC/AsmParser/macros-darwin-vararg.s
@@ -0,0 +1,8 @@
+// RUN: not llvm-mc -triple i386-apple-darwin10 %s 2>&1 | FileCheck %s
+
+// CHECK: error: vararg is not a valid parameter qualifier for 'arg' in macro 'abc'
+// CHECK: .macro abc arg:vararg
+
+.macro abc arg:vararg
+ \arg
+.endm
diff --git a/test/MC/AsmParser/macros-darwin.s b/test/MC/AsmParser/macros-darwin.s
index 95965d3fe1cb..e22038e7d1b2 100644
--- a/test/MC/AsmParser/macros-darwin.s
+++ b/test/MC/AsmParser/macros-darwin.s
@@ -1,9 +1,93 @@
-// RUN: llvm-mc -triple i386-apple-darwin10 %s | FileCheck %s
+// RUN: not llvm-mc -triple i386-apple-darwin10 %s 2> %t.err | FileCheck %s
+// RUN: FileCheck --check-prefix=CHECK-ERRORS %s < %t.err
-.macro test1
+.macro .test0
+.macrobody0
+.endmacro
+.macro .test1
+.test0
+.endmacro
+
+.test1
+// CHECK-ERRORS: <instantiation>:1:1: error: unknown directive
+// CHECK-ERRORS-NEXT: macrobody0
+// CHECK-ERRORS-NEXT: ^
+// CHECK-ERRORS: <instantiation>:1:1: note: while in macro instantiation
+// CHECK-ERRORS-NEXT: .test0
+// CHECK-ERRORS-NEXT: ^
+// CHECK-ERRORS: 11:1: note: while in macro instantiation
+// CHECK-ERRORS-NEXT: .test1
+// CHECK-ERRORS-NEXT: ^
+
+.macro test2
+.byte $0
+.endmacro
+// CHECK: .byte 10
+test2 10
+
+.macro test3
.globl "$0 $1 $2 $$3 $n"
.endmacro
// CHECK: .globl "1 23 $3 2"
-test1 1, 2 3
+test3 1, 2 3
+
+// CHECK: .globl "1 (23) $3 2"
+test3 1, (2 3)
+
+// CHECK: .globl "12 $3 1"
+test3 1 2
+
+.macro test4
+.globl "$0 -- $1"
+.endmacro
+
+// CHECK: .globl "(ab)(,)) -- (cd)"
+test4 (a b)(,)),(cd)
+
+// CHECK: .globl "(ab)(,)) -- (cd)"
+test4 (a b)(,)),(cd)
+
+.macro test5 _a
+.globl "\_a"
+.endm
+
+// CHECK: .globl zed1
+test5 zed1
+
+.macro test6 $a
+.globl "\$a"
+.endm
+
+// CHECK: .globl zed2
+test6 zed2
+
+.macro test7 .a
+.globl "\.a"
+.endm
+
+// CHECK: .globl zed3
+test7 zed3
+
+.macro test8 _a, _b, _c
+.globl "\_a,\_b,\_c"
+.endmacro
+
+.macro test9 _a _b _c
+.globl "\_a \_b \_c"
+.endmacro
+
+// CHECK: .globl "a,b,c"
+test8 a, b, c
+// CHECK: .globl "%1,%2,%3"
+test8 %1, %2, %3 #a comment
+// CHECK: .globl "x-y,z,1"
+test8 x - y, z, 1
+// CHECK: .globl "1 2 3"
+test9 1, 2,3
+
+// CHECK: .globl "1,23,"
+test8 1,2 3
+// CHECK: .globl "12,3,"
+test8 1 2, 3
diff --git a/test/MC/AsmParser/macros-gas.s b/test/MC/AsmParser/macros-gas.s
new file mode 100644
index 000000000000..d907a2517f8c
--- /dev/null
+++ b/test/MC/AsmParser/macros-gas.s
@@ -0,0 +1,105 @@
+// RUN: not llvm-mc -triple i386-linux-gnu %s 2> %t.err | FileCheck %s
+// RUN: FileCheck --check-prefix=CHECK-ERRORS %s < %t.err
+
+.macro .test0
+.macrobody0
+.endm
+.macro .test1
+.test0
+.endm
+
+.test1
+// CHECK-ERRORS: <instantiation>:1:1: error: unknown directive
+// CHECK-ERRORS-NEXT: macrobody0
+// CHECK-ERRORS-NEXT: ^
+// CHECK-ERRORS: <instantiation>:1:1: note: while in macro instantiation
+// CHECK-ERRORS-NEXT: .test0
+// CHECK-ERRORS-NEXT: ^
+// CHECK-ERRORS: 11:1: note: while in macro instantiation
+// CHECK-ERRORS-NEXT: .test1
+// CHECK-ERRORS-NEXT: ^
+
+.macro test2 _a
+.byte \_a
+.endm
+// CHECK: .byte 10
+test2 10
+
+.macro test3 _a _b _c
+.ascii "\_a \_b \_c \\_c"
+.endm
+
+// CHECK: .ascii "1 2 3 \003"
+test3 1, 2, 3
+
+// CHECK: .ascii "1 2 3 \003"
+test3 1, 2 3
+
+.macro test3_prime _a _b _c
+.ascii "\_a \_b \_c"
+.endm
+
+// CHECK: .ascii "1 (23) "
+test3_prime 1, (2 3)
+
+// CHECK: .ascii "1 (23) "
+test3_prime 1 (2 3)
+
+// CHECK: .ascii "1 2 "
+test3_prime 1 2
+
+.macro test5 _a
+.globl \_a
+.endm
+
+// CHECK: .globl zed1
+test5 zed1
+
+.macro test6 $a
+.globl \$a
+.endm
+
+// CHECK: .globl zed2
+test6 zed2
+
+.macro test7 .a
+.globl \.a
+.endm
+
+// CHECK: .globl zed3
+test7 zed3
+
+.macro test8 _a, _b, _c
+.ascii "\_a,\_b,\_c"
+.endm
+
+.macro test9 _a _b _c
+.ascii "\_a \_b \_c"
+.endm
+
+// CHECK: .ascii "a,b,c"
+test8 a, b, c
+// CHECK: .ascii "%1,%2,%3"
+test8 %1 %2 %3 #a comment
+// CHECK: .ascii "x-y,z,1"
+test8 x - y z 1
+// CHECK: .ascii "1 2 3"
+test9 1, 2,3
+
+// CHECK: .ascii "1,2,3"
+test8 1,2 3
+
+// CHECK: .ascii "1,2,3"
+test8 1 2, 3
+
+.macro test10
+.ascii "$20"
+.endm
+
+test10
+// CHECK: .ascii "$20"
+
+test10 42
+// CHECK-ERRORS: 102:10: error: Wrong number of arguments
+// CHECK-ERRORS-NEXT: test10 42
+// CHECK-ERRORS-NEXT: ^
diff --git a/test/MC/AsmParser/macros.s b/test/MC/AsmParser/macros.s
deleted file mode 100644
index b1cb851fcd6b..000000000000
--- a/test/MC/AsmParser/macros.s
+++ /dev/null
@@ -1,93 +0,0 @@
-// RUN: not llvm-mc -triple i386-unknown-unknown %s 2> %t.err | FileCheck %s
-// RUN: FileCheck --check-prefix=CHECK-ERRORS %s < %t.err
-
-.macro .test0
-.macrobody0
-.endmacro
-.macro .test1
-.test0
-.endmacro
-
-.test1
-// CHECK-ERRORS: <instantiation>:1:1: error: unknown directive
-// CHECK-ERRORS-NEXT: macrobody0
-// CHECK-ERRORS-NEXT: ^
-// CHECK-ERRORS: <instantiation>:1:1: note: while in macro instantiation
-// CHECK-ERRORS-NEXT: .test0
-// CHECK-ERRORS-NEXT: ^
-// CHECK-ERRORS: 11:1: note: while in macro instantiation
-// CHECK-ERRORS-NEXT: .test1
-// CHECK-ERRORS-NEXT: ^
-
-.macro test2
-.byte $0
-.endmacro
-test2 10
-
-.macro test3
-.globl "$0 $1 $2 $$3 $n"
-.endmacro
-
-// CHECK: .globl "1 (23) $3 2"
-test3 1, (2 3)
-
-// CHECK: .globl "1 2 $3 2"
-test3 1 2
-
-.macro test4
-.globl "$0 -- $1"
-.endmacro
-
-// CHECK: .globl "(ab)(,)) -- (cd)"
-test4 (a b)(,)),(cd)
-
-// CHECK: .globl "(ab)(,)) -- (cd)"
-test4 (a b)(,)),(cd)
-
-.macro test5 _a
-.globl "\_a"
-.endm
-
-// CHECK: .globl zed1
-test5 zed1
-
-.macro test6 $a
-.globl "\$a"
-.endm
-
-// CHECK: .globl zed2
-test6 zed2
-
-.macro test7 .a
-.globl "\.a"
-.endm
-
-// CHECK: .globl zed3
-test7 zed3
-
-.macro test8 _a, _b, _c
-.globl "\_a,\_b,\_c"
-.endmacro
-
-.macro test9 _a _b _c
-.globl "\_a \_b \_c"
-.endmacro
-
-// CHECK: .globl "a,b,c"
-test8 a, b, c
-// CHECK: .globl "%1,%2,%3"
-test8 %1 %2 %3 #a comment
-// CHECK: .globl "x-y,z,1"
-test8 x - y z 1
-// CHECK: .globl "1 2 3"
-test9 1, 2,3
-
-test8 1,2 3
-// CHECK-ERRORS: error: macro argument '_c' is missing
-// CHECK-ERRORS-NEXT: test8 1,2 3
-// CHECK-ERRORS-NEXT: ^
-
-test8 1 2, 3
-// CHECK-ERRORS: error: expected ' ' for macro argument separator
-// CHECK-ERRORS-NEXT:test8 1 2, 3
-// CHECK-ERRORS-NEXT: ^
diff --git a/test/MC/AsmParser/vararg-default-value.s b/test/MC/AsmParser/vararg-default-value.s
new file mode 100644
index 000000000000..77cd1e8817e7
--- /dev/null
+++ b/test/MC/AsmParser/vararg-default-value.s
@@ -0,0 +1,15 @@
+// RUN: llvm-mc -triple x86_64-linux-gnu %s | FileCheck %s
+.macro abc arg:vararg=nop
+ \arg
+.endm
+
+.macro abcd arg0=%eax arg1:vararg=%ebx
+ movl \arg0, \arg1
+.endm
+
+.text
+
+// CHECK: nop
+ abc
+// CHECK: movl %eax, %ebx
+ abcd ,
diff --git a/test/MC/AsmParser/vararg.s b/test/MC/AsmParser/vararg.s
new file mode 100644
index 000000000000..e3236b072d12
--- /dev/null
+++ b/test/MC/AsmParser/vararg.s
@@ -0,0 +1,51 @@
+// RUN: llvm-mc -triple x86_64-linux-gnu %s | FileCheck %s
+.macro ifcc arg:vararg
+.if cc
+ \arg
+.endif
+.endm
+
+.macro ifcc2 arg0 arg1:vararg
+.if cc
+ movl \arg0, \arg1
+.endif
+.endm
+
+.macro ifcc3 arg0, arg1:vararg
+.if cc
+ movl \arg0, \arg1
+.endif
+.endm
+
+.macro ifcc4 arg0, arg1:vararg
+.if cc
+ movl \arg1, \arg0
+.endif
+.endm
+
+.text
+
+// CHECK: movl %esp, %ebp
+// CHECK: subl $0, %esp
+// CHECK: movl %eax, %ebx
+// CHECK: movl %ecx, %ebx
+// CHECK: movl %ecx, %eax
+// CHECK: movl %eax, %ecx
+// CHECK: movl %ecx, %eax
+// CHECK: movl %eax, %ecx
+.set cc,1
+ ifcc movl %esp, %ebp
+ subl $0, %esp
+
+ ifcc2 %eax %ebx
+ ifcc2 %ecx, %ebx
+ ifcc3 %ecx %eax
+ ifcc3 %eax, %ecx
+ ifcc4 %eax %ecx ## test
+ ifcc4 %ecx, %eax ## test
+
+// CHECK-NOT movl
+// CHECK: subl $1, %esp
+.set cc,0
+ ifcc movl %esp, %ebp
+ subl $1, %esp
diff --git a/test/MC/COFF/alias.s b/test/MC/COFF/alias.s
index f6f6d46bf6a3..fcaa4204d49b 100644
--- a/test/MC/COFF/alias.s
+++ b/test/MC/COFF/alias.s
@@ -1,4 +1,5 @@
-// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s -o - | llvm-readobj -t -r | FileCheck %s
+// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s -o - \
+// RUN: | llvm-readobj -t -r | FileCheck %s
local1:
external_aliased_to_local = local1
@@ -36,7 +37,7 @@ weak_aliased_to_external = external2
// CHECK-NEXT: AuxSymbolCount: 1
// CHECK: }
// CHECK: Symbol {
-// CHECK-NEXT: Name: local1
+// CHECK: Name: local1
// CHECK-NEXT: Value: 0
// CHECK-NEXT: Section: .text (1)
// CHECK-NEXT: BaseType: Null (0x0)
@@ -45,7 +46,7 @@ weak_aliased_to_external = external2
// CHECK-NEXT: AuxSymbolCount: 0
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: global_aliased_to_external
+// CHECK: Name: global_aliased_to_external
// CHECK-NEXT: Value: 0
// CHECK-NEXT: Section: (0)
// CHECK-NEXT: BaseType: Null (0x0)
@@ -68,7 +69,7 @@ weak_aliased_to_external = external2
// CHECK-NEXT: Section: .text (1)
// CHECK-NEXT: BaseType: Null (0x0)
// CHECK-NEXT: ComplexType: Null (0x0)
-// CHECK-NEXT: StorageClass: Static (0x3)
+// CHECK-NEXT: StorageClass: External (0x2)
// CHECK-NEXT: AuxSymbolCount: 0
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
@@ -89,7 +90,7 @@ weak_aliased_to_external = external2
// CHECK-NEXT: StorageClass: WeakExternal (0x69)
// CHECK-NEXT: AuxSymbolCount: 1
// CHECK-NEXT: AuxWeakExternal {
-// CHECK-NEXT: Linked: external2 (9)
+// CHECK-NEXT: Linked: external2
// CHECK-NEXT: Search: Library (0x2)
// CHECK-NEXT: Unused: (00 00 00 00 00 00 00 00 00 00)
// CHECK-NEXT: }
diff --git a/test/MC/COFF/bad-expr.s b/test/MC/COFF/bad-expr.s
new file mode 100644
index 000000000000..ecbdd415c3a6
--- /dev/null
+++ b/test/MC/COFF/bad-expr.s
@@ -0,0 +1,7 @@
+// RUN: not llvm-mc -filetype=obj -triple i386-pc-win32 %s 2>&1 | FileCheck %s
+
+// CHECK: symbol '__ImageBase' can not be undefined in a subtraction expression
+
+ .data
+_x:
+ .long _x-__ImageBase
diff --git a/test/MC/COFF/basic-coff-64.s b/test/MC/COFF/basic-coff-64.s
index 89d17452d0d7..38a9e578a4ca 100644
--- a/test/MC/COFF/basic-coff-64.s
+++ b/test/MC/COFF/basic-coff-64.s
@@ -25,10 +25,10 @@ _main: # @main
// CHECK: ImageFileHeader {
// CHECK: Machine: IMAGE_FILE_MACHINE_AMD64
-// CHECK: SectionCount: 2
+// CHECK: SectionCount: 3
// CHECK: TimeDateStamp: {{[0-9]+}}
// CHECK: PointerToSymbolTable: 0x{{[0-9A-F]+}}
-// CHECK: SymbolCount: 6
+// CHECK: SymbolCount: 8
// CHECK: OptionalHeaderSize: 0
// CHECK: Characteristics [ (0x0)
// CHECK: ]
diff --git a/test/MC/COFF/basic-coff.s b/test/MC/COFF/basic-coff.s
index 9b299707a130..38bfa6d1014c 100644
--- a/test/MC/COFF/basic-coff.s
+++ b/test/MC/COFF/basic-coff.s
@@ -25,10 +25,10 @@ L_.str: # @.str
// CHECK: ImageFileHeader {
// CHECK: Machine: IMAGE_FILE_MACHINE_I386
-// CHECK: SectionCount: 2
+// CHECK: SectionCount: 3
// CHECK: TimeDateStamp: {{[0-9]+}}
// CHECK: PointerToSymbolTable: 0x{{[0-9A-F]+}}
-// CHECK: SymbolCount: 6
+// CHECK: SymbolCount: 8
// CHECK: OptionalHeaderSize: 0
// CHECK: Characteristics [ (0x0)
// CHECK: ]
diff --git a/test/MC/COFF/bss.s b/test/MC/COFF/bss.s
index 86294c18683e..17ae0daa1c3a 100644
--- a/test/MC/COFF/bss.s
+++ b/test/MC/COFF/bss.s
@@ -1,4 +1,4 @@
-// The purpose of this test is to verify that bss sections are emited correctly.
+// The purpose of this test is to verify that bss sections are emitted correctly.
// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s | llvm-readobj -s | FileCheck %s
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-win32 %s | llvm-readobj -s | FileCheck %s
diff --git a/test/MC/COFF/bss_section.ll b/test/MC/COFF/bss_section.ll
index 60924f13064f..477b3dfbd3a6 100644
--- a/test/MC/COFF/bss_section.ll
+++ b/test/MC/COFF/bss_section.ll
@@ -4,3 +4,6 @@
@"\01?thingy@@3Ufoo@@B" = global %struct.foo zeroinitializer, align 4
; CHECK: .bss
+
+@thingy_linkonce = linkonce_odr global %struct.foo zeroinitializer, align 4
+; CHECK: .section .bss,"bw",discard,_thingy_linkonce
diff --git a/test/MC/COFF/comm.ll b/test/MC/COFF/comm.ll
index 74da557fb5cc..6fe122ef1d93 100644
--- a/test/MC/COFF/comm.ll
+++ b/test/MC/COFF/comm.ll
@@ -9,5 +9,5 @@
; CHECK: .lcomm _a,1
; CHECK: .lcomm _b,8,8
; .comm uses log2 alignment
-; CHECK: .comm _c,1,0
-; CHECK: .comm _d,8,3
+; CHECK: .comm _c,1
+; CHECK: .comm _d,8
diff --git a/test/MC/COFF/comm.s b/test/MC/COFF/comm.s
new file mode 100644
index 000000000000..37db75f9cc4a
--- /dev/null
+++ b/test/MC/COFF/comm.s
@@ -0,0 +1,25 @@
+// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s | llvm-readobj -t | FileCheck %s
+
+.lcomm _a,4,4
+.comm _b, 4
+
+
+// CHECK: Symbol {
+// CHECK: Name: _a
+// CHECK-NEXT: Value:
+// CHECK-NEXT: Section: .bss
+// CHECK-NEXT: BaseType: Null
+// CHECK-NEXT: ComplexType: Null
+// CHECK-NEXT: StorageClass: Static
+// CHECK-NEXT: AuxSymbolCount: 0
+// CHECK-NEXT: }
+
+// CHECK: Symbol {
+// CHECK: Name: _b
+// CHECK-NEXT: Value: 4
+// CHECK-NEXT: Section: (0)
+// CHECK-NEXT: BaseType: Null
+// CHECK-NEXT: ComplexType: Null
+// CHECK-NEXT: StorageClass: External
+// CHECK-NEXT: AuxSymbolCount: 0
+// CHECK-NEXT: }
diff --git a/test/MC/COFF/directive-section-characteristics.ll b/test/MC/COFF/directive-section-characteristics.ll
new file mode 100644
index 000000000000..ca8102af641d
--- /dev/null
+++ b/test/MC/COFF/directive-section-characteristics.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple i686-windows -filetype obj -o - %s | llvm-readobj -sections \
+; RUN: | FileCheck %s
+
+define dllexport void @function() {
+entry:
+ ret void
+}
+
+; CHECK: Section {
+; CHECK: Name: .drectve
+; CHECK: Characteristics [
+; CHECK: IMAGE_SCN_ALIGN_1BYTES
+; CHECK: IMAGE_SCN_LNK_INFO
+; CHECK: IMAGE_SCN_LNK_REMOVE
+; CHECK: ]
+; CHECK: }
+
diff --git a/test/MC/COFF/early-dce.s b/test/MC/COFF/early-dce.s
new file mode 100644
index 000000000000..ec1a9bda76fc
--- /dev/null
+++ b/test/MC/COFF/early-dce.s
@@ -0,0 +1,16 @@
+# RUN: llvm-mc -triple i686-windows -g -filetype obj -o - %s \
+# RUN: | llvm-readobj -s -t | FileCheck %s
+
+ .section .rdata
+
+ .align 8
+ .global data
+data:
+ .quad 0
+
+# CHECK: Sections [
+# CHECK: Section {
+# CHECK: Name: .text
+# CHECK: }
+# CHECK: ]
+
diff --git a/test/MC/COFF/feat00.s b/test/MC/COFF/feat00.s
index d08f407cef58..bfd47ad4abc0 100644
--- a/test/MC/COFF/feat00.s
+++ b/test/MC/COFF/feat00.s
@@ -6,7 +6,7 @@
// CHECK: Symbol {
// CHECK: Name: @feat.00
// CHECK: Value: 123
-// CHECK: Section: (-1)
+// CHECK: Section: (65535)
// CHECK: BaseType: Null (0x0)
// CHECK: ComplexType: Null (0x0)
// CHECK: StorageClass: External (0x2)
diff --git a/test/MC/COFF/file.s b/test/MC/COFF/file.s
new file mode 100644
index 000000000000..132e82b2e25d
--- /dev/null
+++ b/test/MC/COFF/file.s
@@ -0,0 +1,47 @@
+// RUN: llvm-mc -triple i686-windows -filetype obj %s -o - | llvm-objdump -t - \
+// RUN: | FileCheck %s
+
+// RUN: llvm-mc -triple i686-windows -filetype obj %s -o - \
+// RUN: | llvm-readobj -symbols | FileCheck %s -check-prefix CHECK-SCN
+
+ .file "null-padded.asm"
+// CHECK: (nx 1) {{0x[0-9]+}} .file
+// CHECK-NEXT: AUX null-padded.asm{{$}}
+
+ .file "eighteen-chars.asm"
+
+// CHECK: (nx 1) {{0x[0-9]+}} .file
+// CHECK-NEXT: AUX eighteen-chars.asm{{$}}
+
+ .file "multiple-auxiliary-entries.asm"
+
+// CHECK: (nx 2) {{0x[0-9]+}} .file
+// CHECK-NEXT: AUX multiple-auxiliary-entries.asm{{$}}
+
+// CHECK-SCN: Symbols [
+// CHECK-SCN: Symbol {
+// CHECK-SCN: Name: .file
+// CHECK-SCN: Section: (65534)
+// CHECK-SCN: StorageClass: File
+// CHECK-SCN: AuxFileRecord {
+// CHECK-SCN: FileName: null-padded.asm
+// CHECK-SCN: }
+// CHECK-SCN: }
+// CHECK-SCN: Symbol {
+// CHECK-SCN: Name: .file
+// CHECK-SCN: Section: (65534)
+// CHECK-SCN: StorageClass: File
+// CHECK-SCN: AuxFileRecord {
+// CHECK-SCN: FileName: eighteen-chars.asm
+// CHECK-SCN: }
+// CHECK-SCN: }
+// CHECK-SCN: Symbol {
+// CHECK-SCN: Name: .file
+// CHECK-SCN: Section: (65534)
+// CHECK-SCN: StorageClass: File
+// CHECK-SCN: AuxFileRecord {
+// CHECK-SCN: FileName: multiple-auxiliary-entries.asm
+// CHECK-SCN: }
+// CHECK-SCN: }
+// CHECK-SCN: ]
+
diff --git a/test/MC/COFF/global_ctors_dtors.ll b/test/MC/COFF/global_ctors_dtors.ll
index 2a25219a778c..ca17f24a68e0 100644
--- a/test/MC/COFF/global_ctors_dtors.ll
+++ b/test/MC/COFF/global_ctors_dtors.ll
@@ -9,8 +9,14 @@
@.str2 = private unnamed_addr constant [12 x i8] c"destructing\00", align 1
@.str3 = private unnamed_addr constant [5 x i8] c"main\00", align 1
-@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @a_global_ctor }]
-@llvm.global_dtors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @a_global_dtor }]
+%ini = type { i32, void()*, i8* }
+
+@llvm.global_ctors = appending global [3 x %ini ] [
+ %ini { i32 65535, void ()* @a_global_ctor, i8* null },
+ %ini { i32 65535, void ()* @b_global_ctor, i8* bitcast (i32* @b to i8*) },
+ %ini { i32 65535, void ()* @c_global_ctor, i8* bitcast (i32* @c to i8*) }
+]
+@llvm.global_dtors = appending global [1 x %ini ] [%ini { i32 65535, void ()* @a_global_dtor, i8* null }]
declare i32 @puts(i8*)
@@ -19,6 +25,20 @@ define void @a_global_ctor() nounwind {
ret void
}
+@b = global i32 zeroinitializer
+
+@c = available_externally dllimport global i32 zeroinitializer
+
+define void @b_global_ctor() nounwind {
+ store i32 42, i32* @b
+ ret void
+}
+
+define void @c_global_ctor() nounwind {
+ store i32 42, i32* @c
+ ret void
+}
+
define void @a_global_dtor() nounwind {
%1 = call i32 @puts(i8* getelementptr inbounds ([12 x i8]* @.str2, i32 0, i32 0))
ret void
@@ -29,11 +49,17 @@ define i32 @main() nounwind {
ret i32 0
}
-; WIN32: .section .CRT$XCU,"r"
+; WIN32: .section .CRT$XCU,"rd"
; WIN32: a_global_ctor
-; WIN32: .section .CRT$XTX,"r"
+; WIN32: .section .CRT$XCU,"rd",associative,{{_?}}b
+; WIN32: b_global_ctor
+; WIN32-NOT: c_global_ctor
+; WIN32: .section .CRT$XTX,"rd"
; WIN32: a_global_dtor
-; MINGW32: .section .ctors,"w"
+; MINGW32: .section .ctors,"wd"
; MINGW32: a_global_ctor
-; MINGW32: .section .dtors,"w"
+; MINGW32: .section .ctors,"wd",associative,{{_?}}b
+; MINGW32: b_global_ctor
+; MINGW32-NOT: c_global_ctor
+; MINGW32: .section .dtors,"wd"
; MINGW32: a_global_dtor
diff --git a/test/MC/COFF/initialised-data.ll b/test/MC/COFF/initialised-data.ll
new file mode 100644
index 000000000000..c4284696b8ca
--- /dev/null
+++ b/test/MC/COFF/initialised-data.ll
@@ -0,0 +1,7 @@
+; RUN: llc -mtriple i686-windows %s -o - | FileCheck %s
+; RUN: llc -mtriple x86_64-windows %s -o - | FileCheck %s
+
+@data = dllexport constant [5 x i8] c"data\00", align 1
+
+; CHECK: .section .rdata,"rd"
+
diff --git a/test/MC/COFF/invalid-def.s b/test/MC/COFF/invalid-def.s
new file mode 100644
index 000000000000..42821c22cf71
--- /dev/null
+++ b/test/MC/COFF/invalid-def.s
@@ -0,0 +1,5 @@
+# RUN: not llvm-mc -triple i686-windows -filetype obj -o /dev/null %s
+
+ .def first
+ .def second
+
diff --git a/test/MC/COFF/invalid-endef.s b/test/MC/COFF/invalid-endef.s
new file mode 100644
index 000000000000..c6fd8f596268
--- /dev/null
+++ b/test/MC/COFF/invalid-endef.s
@@ -0,0 +1,4 @@
+# RUN: not llvm-mc -triple i686-windows -filetype obj -o /dev/null %s
+
+ .endef
+
diff --git a/test/MC/COFF/invalid-scl-range.s b/test/MC/COFF/invalid-scl-range.s
new file mode 100644
index 000000000000..57225059821e
--- /dev/null
+++ b/test/MC/COFF/invalid-scl-range.s
@@ -0,0 +1,6 @@
+# RUN: not llvm-mc -triple i686-windows -filetype obj -o /dev/null %s
+
+ .def storage_class_range
+ .scl 1337
+ .endef
+
diff --git a/test/MC/COFF/invalid-scl.s b/test/MC/COFF/invalid-scl.s
new file mode 100644
index 000000000000..8565a5afe0e9
--- /dev/null
+++ b/test/MC/COFF/invalid-scl.s
@@ -0,0 +1,4 @@
+# RUN: not llvm-mc -triple i686-windows -filetype obj -o /dev/null %s
+
+ .scl 1337
+
diff --git a/test/MC/COFF/invalid-type-range.s b/test/MC/COFF/invalid-type-range.s
new file mode 100644
index 000000000000..92874cc4586b
--- /dev/null
+++ b/test/MC/COFF/invalid-type-range.s
@@ -0,0 +1,6 @@
+# RUN: not llvm-mc -triple i686-windows -filetype obj -o /dev/null %s
+
+ .def invalid_type_range
+ .type 65536
+ .endef
+
diff --git a/test/MC/COFF/invalid-type.s b/test/MC/COFF/invalid-type.s
new file mode 100644
index 000000000000..a1e131e99e55
--- /dev/null
+++ b/test/MC/COFF/invalid-type.s
@@ -0,0 +1,4 @@
+# RUN: not llvm-mc -triple i686-windows -filetype obj -o /dev/null %s
+
+ .type 65536
+
diff --git a/test/MC/COFF/ir-to-imgrel.ll b/test/MC/COFF/ir-to-imgrel.ll
new file mode 100644
index 000000000000..39884d2a15eb
--- /dev/null
+++ b/test/MC/COFF/ir-to-imgrel.ll
@@ -0,0 +1,6 @@
+; RUN: llc -mtriple=x86_64-pc-win32 %s -o - | FileCheck %s --check-prefix=X64
+
+@__ImageBase = external global i8
+
+; X64: .quad "?x@@3HA"@IMGREL32
+@"\01?x@@3HA" = global i64 sub nsw (i64 ptrtoint (i64* @"\01?x@@3HA" to i64), i64 ptrtoint (i8* @__ImageBase to i64)), align 8
diff --git a/test/MC/COFF/linker-options.ll b/test/MC/COFF/linker-options.ll
index de119417447e..0be74e57ad6e 100755
--- a/test/MC/COFF/linker-options.ll
+++ b/test/MC/COFF/linker-options.ll
@@ -5,6 +5,7 @@
metadata !{ metadata !"/DEFAULTLIB:msvcrt.lib" },
metadata !{ metadata !"/DEFAULTLIB:msvcrt.lib",
metadata !"/DEFAULTLIB:secur32.lib" },
+ metadata !{ metadata !"/DEFAULTLIB:C:\5Cpath to\5Casan_rt.lib" },
metadata !{ metadata !"/with spaces" } } }
!llvm.module.flags = !{ !0 }
@@ -17,5 +18,6 @@ define dllexport void @foo() {
; CHECK: .ascii " /DEFAULTLIB:msvcrt.lib"
; CHECK: .ascii " /DEFAULTLIB:msvcrt.lib"
; CHECK: .ascii " /DEFAULTLIB:secur32.lib"
+; CHECK: .ascii " \"/DEFAULTLIB:C:\\path to\\asan_rt.lib\""
; CHECK: .ascii " \"/with spaces\""
; CHECK: .ascii " /EXPORT:_foo"
diff --git a/test/MC/COFF/linkonce-invalid.s b/test/MC/COFF/linkonce-invalid.s
index 90ce4a7ad317..cc3a27c90333 100644
--- a/test/MC/COFF/linkonce-invalid.s
+++ b/test/MC/COFF/linkonce-invalid.s
@@ -19,21 +19,9 @@
// CHECK: error: unexpected token in directive
.linkonce discard foo
-// CHECK: error: expected associated section name
+// CHECK: error: cannot make section associative with .linkonce
.linkonce associative
-// CHECK: error: cannot associate unknown section 'unknown'
-.linkonce associative unknown
-
-// CHECK: error: cannot associate a section with itself
-.linkonce associative invalid
-
-// CHECK: error: associated section must be a COMDAT section
-.linkonce associative non_comdat
-
-// CHECK: error: associated section cannot be itself associative
-.linkonce associative assoc
-
// CHECK: error: section 'multi' is already linkonce
.section multi
.linkonce discard
diff --git a/test/MC/COFF/linkonce.s b/test/MC/COFF/linkonce.s
index e7b7f475a3c6..f2e350645bfc 100644
--- a/test/MC/COFF/linkonce.s
+++ b/test/MC/COFF/linkonce.s
@@ -24,7 +24,6 @@
.long 1
.section s6
-.linkonce associative s1
.long 1
.section s7
@@ -39,11 +38,6 @@
.linkonce discard
.long 1
-// Check that valid '.section' names can be associated.
-.section multi
-.linkonce associative .foo$bar
-.long 1
-
// CHECK: Sections [
// CHECK: Section {
@@ -79,7 +73,6 @@
// CHECK: Section {
// CHECK: Name: s6
// CHECK: Characteristics [
-// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
// CHECK: Section {
@@ -94,86 +87,64 @@
// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
-// CHECK: Section {
-// CHECK: Name: multi
-// CHECK: Characteristics [
-// CHECK: IMAGE_SCN_LNK_COMDAT
-// CHECK: ]
-// CHECK: }
// CHECK: ]
// CHECK: Symbols [
// CHECK: Symbol {
// CHECK: Name: s1
-// CHECK: Section: s1 (1)
+// CHECK: Section: s1 (4)
// CHECK: AuxSectionDef {
-// CHECK: Number: 1
+// CHECK: Number: 4
// CHECK: Selection: Any (0x2)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: s2
-// CHECK: Section: s2 (2)
+// CHECK: Section: s2 (5)
// CHECK: AuxSectionDef {
-// CHECK: Number: 2
+// CHECK: Number: 5
// CHECK: Selection: NoDuplicates (0x1)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: s3
-// CHECK: Section: s3 (3)
+// CHECK: Section: s3 (6)
// CHECK: AuxSectionDef {
-// CHECK: Number: 3
+// CHECK: Number: 6
// CHECK: Selection: Any (0x2)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: s4
-// CHECK: Section: s4 (4)
+// CHECK: Section: s4 (7)
// CHECK: AuxSectionDef {
-// CHECK: Number: 4
+// CHECK: Number: 7
// CHECK: Selection: SameSize (0x3)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: s5
-// CHECK: Section: s5 (5)
+// CHECK: Section: s5 (8)
// CHECK: AuxSectionDef {
-// CHECK: Number: 5
+// CHECK: Number: 8
// CHECK: Selection: ExactMatch (0x4)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: s6
-// CHECK: Section: s6 (6)
-// CHECK: AuxSectionDef {
-// CHECK: Number: 1
-// CHECK: Selection: Associative (0x5)
-// CHECK: AssocSection: s1
-// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: s7
-// CHECK: Section: s7 (7)
+// CHECK: Section: s7 (10)
// CHECK: AuxSectionDef {
-// CHECK: Number: 7
+// CHECK: Number: 10
// CHECK: Selection: Largest (0x6)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: s8
-// CHECK: Section: s8 (8)
+// CHECK: Section: s8 (11)
// CHECK: AuxSectionDef {
-// CHECK: Number: 8
+// CHECK: Number: 11
// CHECK: Selection: Newest (0x7)
// CHECK: }
// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: multi
-// CHECK: Value: 0
-// CHECK: Section: multi (10)
-// CHECK: AuxSectionDef {
-// CHECK: Number: 9
-// CHECK: Selection: Associative (0x5)
-// CHECK: AssocSection: .foo$bar
-// CHECK: }
-// CHECK: }
diff --git a/test/MC/COFF/lit.local.cfg b/test/MC/COFF/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/MC/COFF/lit.local.cfg
+++ b/test/MC/COFF/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/COFF/lset0.s b/test/MC/COFF/lset0.s
index f5020c83ef60..f4a13bf4f893 100755
--- a/test/MC/COFF/lset0.s
+++ b/test/MC/COFF/lset0.s
@@ -1,12 +1,11 @@
-// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s -o - | llvm-nm | FileCheck %s
+// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s -o - | llvm-nm - | FileCheck %s --check-prefix=GLOBAL
+// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s -o - | llvm-nm - | FileCheck %s --check-prefix=LOCAL
not_global = 123
global = 456
.globl global
-.Llocal = 789
+Llocal = 789
-// CHECK-NOT: not_global
-// CHECK-NOT: Llocal
-// CHECK: global
-// CHECK-NOT: not_global
-// CHECK-NOT: Llocal
+// LOCAL-NOT: local
+// GLOBAL: A global
+// GLOBAL: a not_global
diff --git a/test/MC/COFF/offset.s b/test/MC/COFF/offset.s
new file mode 100644
index 000000000000..d0d3710fcce2
--- /dev/null
+++ b/test/MC/COFF/offset.s
@@ -0,0 +1,19 @@
+// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s -o - | llvm-readobj -t -r | FileCheck %s
+
+ .data
+ .globl test1_foo
+test1_foo:
+ .long 42
+
+ .globl test1_zed
+test1_zed = test1_foo + 1
+
+// CHECK: Symbol {
+// CHECK: Name: test1_zed
+// CHECK-NEXT: Value: 1
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: BaseType: Null
+// CHECK-NEXT: ComplexType: Null
+// CHECK-NEXT: StorageClass: External
+// CHECK-NEXT: AuxSymbolCount: 0
+// CHECK-NEXT: }
diff --git a/test/MC/COFF/secidx-diagnostic.s b/test/MC/COFF/secidx-diagnostic.s
new file mode 100644
index 000000000000..3e496c3fd45c
--- /dev/null
+++ b/test/MC/COFF/secidx-diagnostic.s
@@ -0,0 +1,8 @@
+// RUN: not llvm-mc -filetype=obj -triple i686-pc-win32 %s 2>%t
+// RUN: FileCheck %s < %t
+
+// CHECK: symbol 'bar' can not be undefined
+
+.data
+foo:
+ .secidx bar
diff --git a/test/MC/COFF/secidx.s b/test/MC/COFF/secidx.s
new file mode 100644
index 000000000000..619d777e0177
--- /dev/null
+++ b/test/MC/COFF/secidx.s
@@ -0,0 +1,16 @@
+// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s | llvm-readobj -s -sr | FileCheck %s
+
+// check that we produce the correct relocation for .secidx
+
+Lfoo:
+ .secidx Lfoo
+ .secidx Lbar
+
+.section spam
+Lbar:
+ ret
+
+// CHECK: Relocations [
+// CHECK-NEXT: 0x0 IMAGE_REL_I386_SECTION .text
+// CHECK-NEXT: 0x4 IMAGE_REL_I386_SECTION spam
+// CHECK-NEXT: ]
diff --git a/test/MC/COFF/section-comdat-conflict.s b/test/MC/COFF/section-comdat-conflict.s
new file mode 100644
index 000000000000..7ed452a5cdcb
--- /dev/null
+++ b/test/MC/COFF/section-comdat-conflict.s
@@ -0,0 +1,13 @@
+// RUN: not llvm-mc -triple i386-pc-win32 -filetype=obj < %s 2>&1 | FileCheck %s
+
+// CHECK: conflicting sections for symbol
+
+ .section .xyz
+ .global bar
+bar:
+ .long 42
+
+ .section .abcd,"xr",discard,bar
+ .global foo
+foo:
+ .long 42
diff --git a/test/MC/COFF/section-comdat-conflict2.s b/test/MC/COFF/section-comdat-conflict2.s
new file mode 100644
index 000000000000..e2dfc2d68b2f
--- /dev/null
+++ b/test/MC/COFF/section-comdat-conflict2.s
@@ -0,0 +1,6 @@
+// RUN: not llvm-mc -triple i386-pc-win32 -filetype=obj < %s 2>&1 | FileCheck %s
+
+// CHECK: two sections have the same comdat
+
+ .section .xyz,"xr",discard,bar
+ .section .abcd,"xr",discard,bar
diff --git a/test/MC/COFF/section-comdat.s b/test/MC/COFF/section-comdat.s
index dd5be871b050..e7052d8f5ae3 100644
--- a/test/MC/COFF/section-comdat.s
+++ b/test/MC/COFF/section-comdat.s
@@ -1,8 +1,7 @@
// RUN: llvm-mc -triple i386-pc-win32 -filetype=obj %s | llvm-readobj -s -t | FileCheck %s
// RUN: llvm-mc -triple x86_64-pc-win32 -filetype=obj %s | llvm-readobj -s -t | FileCheck %s
-.section assocSec
-.linkonce
+.section assocSec, "dr", discard, "assocSym"
.long 1
.section secName, "dr", discard, "Symbol1"
@@ -25,7 +24,7 @@ Symbol3:
Symbol4:
.long 1
-.section SecName, "dr", associative assocSec, "Symbol5"
+.section SecName, "dr", associative, "assocSym"
.globl Symbol5
Symbol5:
.long 1
@@ -40,58 +39,63 @@ Symbol6:
Symbol7:
.long 1
+.section assocSec, "dr", associative, "assocSym"
+.globl Symbol8
+Symbol8:
+.long 1
+
// CHECK: Sections [
// CHECK: Section {
-// CHECK: Number: 1
+// CHECK: Number: 4
// CHECK: Name: assocSec
// CHECK: Characteristics [
// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
// CHECK: Section {
-// CHECK: Number: 2
+// CHECK: Number: 5
// CHECK: Name: secName
// CHECK: Characteristics [
// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
// CHECK: Section {
-// CHECK: Number: 3
+// CHECK: Number: 6
// CHECK: Name: secName
// CHECK: Characteristics [
// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
// CHECK: Section {
-// CHECK: Number: 4
+// CHECK: Number: 7
// CHECK: Name: SecName
// CHECK: Characteristics [
// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
// CHECK: Section {
-// CHECK: Number: 5
+// CHECK: Number: 8
// CHECK: Name: SecName
// CHECK: Characteristics [
// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
// CHECK: Section {
-// CHECK: Number: 6
+// CHECK: Number: 9
// CHECK: Name: SecName
// CHECK: Characteristics [
// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
// CHECK: Section {
-// CHECK: Number: 7
+// CHECK: Number: 10
// CHECK: Name: SecName
// CHECK: Characteristics [
// CHECK: IMAGE_SCN_LNK_COMDAT
// CHECK: ]
// CHECK: }
// CHECK: Section {
-// CHECK: Number: 8
+// CHECK: Number: 11
// CHECK: Name: SecName
// CHECK: Characteristics [
// CHECK: IMAGE_SCN_LNK_COMDAT
@@ -101,88 +105,104 @@ Symbol7:
// CHECK: Symbols [
// CHECK: Symbol {
// CHECK: Name: assocSec
-// CHECK: Section: assocSec (1)
+// CHECK: Section: assocSec (4)
// CHECK: AuxSectionDef {
// CHECK: Selection: Any
// CHECK: }
// CHECK: }
// CHECK: Symbol {
+// CHECK: Name: assocSym
+// CHECK: Section: assocSec
+// CHECK: }
+// CHECK: Symbol {
// CHECK: Name: secName
-// CHECK: Section: secName (2)
+// CHECK: Section: secName (5)
// CHECK: AuxSectionDef {
// CHECK: Selection: Any
// CHECK: }
// CHECK: }
// CHECK: Symbol {
+// CHECK: Name: Symbol1
+// CHECK: Section: secName (5)
+// CHECK: }
+// CHECK: Symbol {
// CHECK: Name: secName
-// CHECK: Section: secName (3)
+// CHECK: Section: secName (6)
// CHECK: AuxSectionDef {
// CHECK: Selection: NoDuplicates
// CHECK: }
// CHECK: }
// CHECK: Symbol {
+// CHECK: Name: Symbol2
+// CHECK: Section: secName (6)
+// CHECK: }
+// CHECK: Symbol {
// CHECK: Name: SecName
-// CHECK: Section: SecName (4)
+// CHECK: Section: SecName (7)
// CHECK: AuxSectionDef {
// CHECK: Selection: SameSize
// CHECK: }
// CHECK: }
// CHECK: Symbol {
+// CHECK: Name: Symbol3
+// CHECK: Section: SecName (7)
+// CHECK: }
+// CHECK: Symbol {
// CHECK: Name: SecName
-// CHECK: Section: SecName (5)
+// CHECK: Section: SecName (8)
// CHECK: AuxSymbolCount: 1
// CHECK: AuxSectionDef {
// CHECK: Selection: ExactMatch
// CHECK: }
// CHECK: }
// CHECK: Symbol {
+// CHECK: Name: Symbol4
+// CHECK: Section: SecName (8)
+// CHECK: }
+// CHECK: Symbol {
// CHECK: Name: SecName
-// CHECK: Section: SecName (6)
+// CHECK: Section: SecName (9)
// CHECK: AuxSectionDef {
// CHECK: Selection: Associative
-// CHECK: AssocSection: assocSec (1)
+// CHECK: AssocSection: assocSec (4)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: SecName
-// CHECK: Section: SecName (7)
+// CHECK: Section: SecName (10)
// CHECK: AuxSectionDef {
// CHECK: Selection: Largest
// CHECK: }
// CHECK: }
// CHECK: Symbol {
+// CHECK: Name: Symbol6
+// CHECK: Section: SecName (10)
+// CHECK: }
+// CHECK: Symbol {
// CHECK: Name: SecName
-// CHECK: Section: SecName (8)
+// CHECK: Section: SecName (11)
// CHECK: AuxSectionDef {
// CHECK: Selection: Newest (0x7)
// CHECK: }
// CHECK: }
// CHECK: Symbol {
-// CHECK: Name: Symbol1
-// CHECK: Section: secName (2)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: Symbol2
-// CHECK: Section: secName (3)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: Symbol3
-// CHECK: Section: SecName (4)
+// CHECK: Name: Symbol7
+// CHECK: Section: SecName (11)
// CHECK: }
// CHECK: Symbol {
-// CHECK: Name: Symbol4
-// CHECK: Section: SecName (5)
+// CHECK: Name: assocSec
+// CHECK: Section: assocSec (12)
+// CHECK: AuxSectionDef {
+// CHECK: Selection: Associative (0x5)
+// CHECK: AssocSection: assocSec (4)
+// CHECK: }
// CHECK: }
// CHECK: Symbol {
// CHECK: Name: Symbol5
-// CHECK: Section: SecName (6)
+// CHECK: Section: SecName (9)
// CHECK: }
// CHECK: Symbol {
-// CHECK: Name: Symbol6
-// CHECK: Section: SecName (7)
-// CHECK: }
-// CHECK: Symbol {
-// CHECK: Name: Symbol7
-// CHECK: Section: SecName (8)
+// CHECK: Name: Symbol8
+// CHECK: Section: assocSec (12)
// CHECK: }
// CHECK: ]
diff --git a/test/MC/COFF/section-name-encoding.s b/test/MC/COFF/section-name-encoding.s
index 0f531f397565..7edd6d7446d9 100644
--- a/test/MC/COFF/section-name-encoding.s
+++ b/test/MC/COFF/section-name-encoding.s
@@ -3,17 +3,18 @@
// Encodings for different lengths:
// [0, 8]: raw name
// (8, 999999]: base 10 string table index (/9999999)
+// (999999, 0xFFFFFFFF]: base 64 string table index (//AAAAAA)
//
// RUN: llvm-mc -triple x86_64-pc-win32 -filetype=obj %s | llvm-readobj -s | FileCheck %s
// Raw encoding
// CHECK: Section {
-// CHECK: Number: 1
+// CHECK: Number: 4
// CHECK: Name: s (73 00 00 00 00 00 00 00)
// CHECK: }
// CHECK: Section {
-// CHECK: Number: 2
+// CHECK: Number: 5
// CHECK: Name: s1234567 (73 31 32 33 34 35 36 37)
// CHECK: }
.section s; .long 1
@@ -24,7 +25,7 @@
// /4
// CHECK: Section {
-// CHECK: Number: 3
+// CHECK: Number: 6
// CHECK: Name: s12345678 (2F 34 00 00 00 00 00 00)
// CHECK: }
.section s12345678; .long 1
@@ -52,11 +53,36 @@ pad_sections aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
// /1000029 == 4 + 10 + (5 * (2 + (20 * 10 * 1000) + 1))
// v | | v ~~~~~~~~~~~~~~ v
-// table size v v "p0" pad NUL seperator
+// table size v v "p0" pad NUL separator
// "s12345678\0" # of pad sections
//
// CHECK: Section {
-// CHECK: Number: 9
+// CHECK: Number: 12
// CHECK: Name: seven_digit (2F 31 30 30 30 30 32 39)
// CHECK: }
.section seven_digit; .long 1
+
+
+// Generate padding sections to increase the string table size to at least
+// 10,000,000 bytes.
+.macro pad_sections_ex pad
+ // 9x \pad
+ pad_sections \pad\pad\pad\pad\pad\pad\pad\pad\pad
+.endm
+
+// 1000x 'a'
+pad_sections_ex aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+
+
+// //AAmJa4 == 1000029 + 12 + (5 * (2 + (9 * 20 * 10 * 1000) + 1)) == 38*64^3 + 9*64^2 + 26*64 + 56
+// v | | v ~~~~~~~~~~~~~~~~~~ v
+// seven_digit offset v v "p0" pad NUL separator
+// "seven_digit\0" # of pad sections
+//
+// "2F 2F 41 41 6D 4A 61 34" is "//AAmJa4", which decodes to "0 0 38 9 26 56".
+//
+// CHECK: Section {
+// CHECK: Number: 18
+// CHECK: Name: double_slash (2F 2F 41 41 6D 4A 61 34)
+// CHECK: }
+.section double_slash; .long 1
diff --git a/test/MC/COFF/seh-stackalloc-zero.s b/test/MC/COFF/seh-stackalloc-zero.s
new file mode 100644
index 000000000000..898ac844417d
--- /dev/null
+++ b/test/MC/COFF/seh-stackalloc-zero.s
@@ -0,0 +1,11 @@
+// RUN: not llvm-mc -triple x86_64-pc-win32 -filetype=obj %s -o %t.o 2>&1 | FileCheck %s
+
+// CHECK: Allocation size must be non-zero!
+
+ .globl smallFunc
+ .def smallFunc; .scl 2; .type 32; .endef
+ .seh_proc smallFunc
+ .seh_stackalloc 0
+smallFunc:
+ ret
+ .seh_endproc
diff --git a/test/MC/COFF/seh.s b/test/MC/COFF/seh.s
index 72d42f4b2ea3..cd884b4fa009 100644
--- a/test/MC/COFF/seh.s
+++ b/test/MC/COFF/seh.s
@@ -35,13 +35,13 @@
// CHECK-NEXT: ]
// CHECK-NEXT: Relocations [
-// CHECK-NEXT: Section (2) .xdata {
+// CHECK-NEXT: Section (4) .xdata {
// CHECK-NEXT: 0x14 IMAGE_REL_AMD64_ADDR32NB __C_specific_handler
// CHECK-NEXT: 0x20 IMAGE_REL_AMD64_ADDR32NB func
// CHECK-NEXT: 0x24 IMAGE_REL_AMD64_ADDR32NB func
// CHECK-NEXT: 0x28 IMAGE_REL_AMD64_ADDR32NB .xdata
// CHECK-NEXT: }
-// CHECK-NEXT: Section (3) .pdata {
+// CHECK-NEXT: Section (5) .pdata {
// CHECK-NEXT: 0x0 IMAGE_REL_AMD64_ADDR32NB func
// CHECK-NEXT: 0x4 IMAGE_REL_AMD64_ADDR32NB func
// CHECK-NEXT: 0x8 IMAGE_REL_AMD64_ADDR32NB .xdata
diff --git a/test/MC/COFF/symbol-alias.s b/test/MC/COFF/symbol-alias.s
index ccada3793ca3..71ccec31b821 100644
--- a/test/MC/COFF/symbol-alias.s
+++ b/test/MC/COFF/symbol-alias.s
@@ -51,7 +51,7 @@ _bar_alias = _bar
// CHECK-NEXT: Value: [[FOO_VALUE]]
// CHECK-NEXT: Section: [[FOO_SECTION_NUMBER]]
// CHECK-NEXT: BaseType: [[FOO_SIMPLE_TYPE]]
-// CHECK-NEXT: ComplexType: [[FOO_COMPLEX_TYPE]]
+// CHECK-NEXT: ComplexType: Null (0x0)
// CHECK-NEXT: StorageClass: [[FOO_STORAGE_CLASS]]
// CHECK-NEXT: AuxSymbolCount: [[FOO_NUMBER_OF_AUX_SYMBOLS]]
diff --git a/test/MC/COFF/symbol-fragment-offset-64.s b/test/MC/COFF/symbol-fragment-offset-64.s
index b8244709aa75..deac88869b20 100644
--- a/test/MC/COFF/symbol-fragment-offset-64.s
+++ b/test/MC/COFF/symbol-fragment-offset-64.s
@@ -36,10 +36,10 @@ _main: # @main
// CHECK: {
// CHECK: Machine: IMAGE_FILE_MACHINE_AMD64
-// CHECK: SectionCount: 2
+// CHECK: SectionCount: 3
// CHECK: TimeDateStamp: {{[0-9]+}}
// CHECK: PointerToSymbolTable: 0x{{[0-9A-F]+}}
-// CHECK: SymbolCount: 7
+// CHECK: SymbolCount: 9
// CHECK: OptionalHeaderSize: 0
// CHECK: Characteristics [ (0x0)
// CHECK: ]
diff --git a/test/MC/COFF/symbol-fragment-offset.s b/test/MC/COFF/symbol-fragment-offset.s
index 71b1703972ab..b09c5af1b61e 100644
--- a/test/MC/COFF/symbol-fragment-offset.s
+++ b/test/MC/COFF/symbol-fragment-offset.s
@@ -36,10 +36,10 @@ L_.str2:
// CHECK: {
// CHECK: Machine: IMAGE_FILE_MACHINE_I386 (0x14C)
-// CHECK: SectionCount: 2
+// CHECK: SectionCount: 3
// CHECK: TimeDateStamp: {{[0-9]+}}
// CHECK: PointerToSymbolTable: 0x{{[0-9A-F]+}}
-// CHECK: SymbolCount: 7
+// CHECK: SymbolCount: 9
// CHECK: OptionalHeaderSize: 0
// CHECK: Characteristics [ (0x0)
// CHECK: ]
diff --git a/test/MC/COFF/timestamp.s b/test/MC/COFF/timestamp.s
new file mode 100644
index 000000000000..18736a2b2d68
--- /dev/null
+++ b/test/MC/COFF/timestamp.s
@@ -0,0 +1,4 @@
+// RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s -o - | llvm-readobj -h | FileCheck %s
+
+// CHECK: ImageFileHeader {
+// CHECK: TimeDateStamp: {{.*}} (0x0)
diff --git a/test/MC/COFF/tricky-names.ll b/test/MC/COFF/tricky-names.ll
index 6e041d38d345..458aa41fb2be 100644
--- a/test/MC/COFF/tricky-names.ll
+++ b/test/MC/COFF/tricky-names.ll
@@ -25,11 +25,11 @@ define weak i32 @"\01??_B?$num_put@_WV?$back_insert_iterator@V?$basic_string@_WU
; ASM-NOT: .globl "@foo.bar"
; READOBJ: Symbol
-; READOBJ: Name: .text$??_B?$num_put@_WV?$back_insert_iterator@V?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@@std@@@std@@51
-; READOBJ: Section: .text$??_B?$num_put@_WV?$back_insert_iterator@V?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@@std@@@std@@51
+; READOBJ: Name: .text
+; READOBJ: Section: .text
; READOBJ: Symbol
; READOBJ: Name: ??_B?$num_put@_WV?$back_insert_iterator@V?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@@std@@@std@@51
-; READOBJ: Section: .text$??_B?$num_put@_WV?$back_insert_iterator@V?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@@std@@@std@@51
+; READOBJ: Section: .text
; READOBJ: Symbol
; READOBJ: Name: ??__E_Generic_object@?$_Error_objects@H@std@@YAXXZ
; READOBJ: Symbol
diff --git a/test/MC/COFF/weak-symbol.ll b/test/MC/COFF/weak-symbol.ll
index 7f2e87cb75a7..fd78307c1f22 100644
--- a/test/MC/COFF/weak-symbol.ll
+++ b/test/MC/COFF/weak-symbol.ll
@@ -8,37 +8,41 @@
; RUN: llc -mtriple=x86_64-pc-mingw32 %s -o - | FileCheck %s --check-prefix=X64
; Mangled function
-; X86: .section .text$_Z3foo
-; X86: .linkonce discard
+; X86: .section .text,"xr",discard,__Z3foo
; X86: .globl __Z3foo
;
-; X64: .section .text$_Z3foo
-; X64: .linkonce discard
+; X64: .section .text,"xr",discard,_Z3foo
; X64: .globl _Z3foo
define weak void @_Z3foo() {
ret void
}
; Unmangled function
-; X86: .section .sect$f
-; X86: .linkonce discard
+; X86: .section .sect,"xr",discard,_f
; X86: .globl _f
;
-; X64: .section .sect$f
-; X64: .linkonce discard
+; X64: .section .sect,"xr",discard,f
; X64: .globl f
define weak void @f() section ".sect" {
ret void
}
; Weak global
-; X86: .section .data$a
-; X86: .linkonce discard
+; X86: .section .data,"rd",discard,_a
; X86: .globl _a
; X86: .zero 12
;
-; X64: .section .data$a
-; X64: .linkonce discard
+; X64: .section .data,"rd",discard,a
; X64: .globl a
; X64: .zero 12
@a = weak unnamed_addr constant { i32, i32, i32 } { i32 0, i32 0, i32 0}, section ".data"
+
+; X86: .section .tls$,"wd",discard,_b
+; X86: .globl _b
+; X86: .long 0
+;
+; X64: .section .tls$,"wd",discard,b
+; X64: .globl b
+; X64: .long 0
+
+@b = weak_odr thread_local global i32 0, align 4
diff --git a/test/MC/COFF/weak.s b/test/MC/COFF/weak.s
index b9df0f1df2fd..accd3f452eaf 100644
--- a/test/MC/COFF/weak.s
+++ b/test/MC/COFF/weak.s
@@ -52,7 +52,7 @@ LBB0_2: # %return
// CHECK: Symbol {
// CHECK: Name: .weak._test_weak.default
// CHECK-NEXT: Value: 0
-// CHECK-NEXT: Section: (-1)
+// CHECK-NEXT: Section: (65535)
// CHECK-NEXT: BaseType: Null
// CHECK-NEXT: ComplexType: Null
// CHECK-NEXT: StorageClass: External
diff --git a/test/MC/Disassembler/AArch64/a64-ignored-fields.txt b/test/MC/Disassembler/AArch64/a64-ignored-fields.txt
index 799ecdfdcdf3..1860bf6601b7 100644
--- a/test/MC/Disassembler/AArch64/a64-ignored-fields.txt
+++ b/test/MC/Disassembler/AArch64/a64-ignored-fields.txt
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple=aarch64 -mattr=fp-armv8 -disassemble -show-encoding < %s | FileCheck %s
+# RUN: llvm-mc -triple=arm64 -mattr=fp-armv8 -disassemble -show-encoding < %s | FileCheck %s
# The "Rm" bits are ignored, but the canonical representation has them filled
# with 0s. This is what we should produce even if the input bit-pattern had
diff --git a/test/MC/Disassembler/AArch64/arm64-advsimd.txt b/test/MC/Disassembler/AArch64/arm64-advsimd.txt
new file mode 100644
index 000000000000..cceee672dfd7
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-advsimd.txt
@@ -0,0 +1,2283 @@
+# RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -output-asm-variant=1 --disassemble < %s | FileCheck %s
+
+0x00 0xb8 0x20 0x0e
+0x00 0xb8 0x20 0x4e
+0x00 0xb8 0x60 0x0e
+0x00 0xb8 0x60 0x4e
+0x00 0xb8 0xa0 0x0e
+0x00 0xb8 0xa0 0x4e
+
+# CHECK: abs.8b v0, v0
+# CHECK: abs.16b v0, v0
+# CHECK: abs.4h v0, v0
+# CHECK: abs.8h v0, v0
+# CHECK: abs.2s v0, v0
+# CHECK: abs.4s v0, v0
+
+0x00 0x84 0x20 0x0e
+0x00 0x84 0x20 0x4e
+0x00 0x84 0x60 0x0e
+0x00 0x84 0x60 0x4e
+0x00 0x84 0xa0 0x0e
+0x00 0x84 0xa0 0x4e
+0x00 0x84 0xe0 0x4e
+
+# CHECK: add.8b v0, v0, v0
+# CHECK: add.16b v0, v0, v0
+# CHECK: add.4h v0, v0, v0
+# CHECK: add.8h v0, v0, v0
+# CHECK: add.2s v0, v0, v0
+# CHECK: add.4s v0, v0, v0
+# CHECK: add.2d v0, v0, v0
+
+0x41 0x84 0xe3 0x5e
+
+# CHECK: add d1, d2, d3
+
+0x00 0x40 0x20 0x0e
+0x00 0x40 0x20 0x4e
+0x00 0x40 0x60 0x0e
+0x00 0x40 0x60 0x4e
+0x00 0x40 0xa0 0x0e
+0x00 0x40 0xa0 0x4e
+
+# CHECK: addhn.8b v0, v0, v0
+# CHECK: addhn2.16b v0, v0, v0
+# CHECK: addhn.4h v0, v0, v0
+# CHECK: addhn2.8h v0, v0, v0
+# CHECK: addhn.2s v0, v0, v0
+# CHECK: addhn2.4s v0, v0, v0
+
+0x00 0xbc 0x20 0x0e
+0x00 0xbc 0x20 0x4e
+0x00 0xbc 0x60 0x0e
+0x00 0xbc 0x60 0x4e
+0x00 0xbc 0xa0 0x0e
+0x00 0xbc 0xa0 0x4e
+0x00 0xbc 0xe0 0x4e
+
+# CHECK: addp.8b v0, v0, v0
+# CHECK: addp.16b v0, v0, v0
+# CHECK: addp.4h v0, v0, v0
+# CHECK: addp.8h v0, v0, v0
+# CHECK: addp.2s v0, v0, v0
+# CHECK: addp.4s v0, v0, v0
+# CHECK: addp.2d v0, v0, v0
+
+0x00 0xb8 0xf1 0x5e
+
+# CHECK: addp.2d d0, v0
+
+0x00 0xb8 0x31 0x0e
+0x00 0xb8 0x31 0x4e
+0x00 0xb8 0x71 0x0e
+0x00 0xb8 0x71 0x4e
+0x00 0xb8 0xb1 0x4e
+
+# CHECK: addv.8b b0, v0
+# CHECK: addv.16b b0, v0
+# CHECK: addv.4h h0, v0
+# CHECK: addv.8h h0, v0
+# CHECK: addv.4s s0, v0
+
+
+# INS/DUP
+0x60 0x0c 0x08 0x4e
+0x60 0x0c 0x04 0x4e
+0x60 0x0c 0x04 0x0e
+0x60 0x0c 0x02 0x4e
+0x60 0x0c 0x02 0x0e
+0x60 0x0c 0x01 0x4e
+0x60 0x0c 0x01 0x0e
+
+# CHECK: dup.2d v0, x3
+# CHECK: dup.4s v0, w3
+# CHECK: dup.2s v0, w3
+# CHECK: dup.8h v0, w3
+# CHECK: dup.4h v0, w3
+# CHECK: dup.16b v0, w3
+# CHECK: dup.8b v0, w3
+
+0x60 0x04 0x18 0x4e
+0x60 0x04 0x0c 0x0e
+0x60 0x04 0x0c 0x4e
+0x60 0x04 0x06 0x0e
+0x60 0x04 0x06 0x4e
+0x60 0x04 0x03 0x0e
+0x60 0x04 0x03 0x4e
+
+# CHECK: dup.2d v0, v3[1]
+# CHECK: dup.2s v0, v3[1]
+# CHECK: dup.4s v0, v3[1]
+# CHECK: dup.4h v0, v3[1]
+# CHECK: dup.8h v0, v3[1]
+# CHECK: dup.8b v0, v3[1]
+# CHECK: dup.16b v0, v3[1]
+
+
+0x43 0x2c 0x14 0x4e
+0x43 0x2c 0x14 0x4e
+0x43 0x3c 0x14 0x0e
+0x43 0x3c 0x14 0x0e
+0x43 0x3c 0x18 0x4e
+0x43 0x3c 0x18 0x4e
+
+# CHECK: smov.s x3, v2[2]
+# CHECK: smov.s x3, v2[2]
+# CHECK: mov.s w3, v2[2]
+# CHECK: mov.s w3, v2[2]
+# CHECK: mov.d x3, v2[1]
+# CHECK: mov.d x3, v2[1]
+
+0xa2 0x1c 0x18 0x4e
+0xa2 0x1c 0x0c 0x4e
+0xa2 0x1c 0x06 0x4e
+0xa2 0x1c 0x03 0x4e
+
+0xa2 0x1c 0x18 0x4e
+0xa2 0x1c 0x0c 0x4e
+0xa2 0x1c 0x06 0x4e
+0xa2 0x1c 0x03 0x4e
+
+# CHECK: ins.d v2[1], x5
+# CHECK: ins.s v2[1], w5
+# CHECK: ins.h v2[1], w5
+# CHECK: ins.b v2[1], w5
+
+# CHECK: ins.d v2[1], x5
+# CHECK: ins.s v2[1], w5
+# CHECK: ins.h v2[1], w5
+# CHECK: ins.b v2[1], w5
+
+0xe2 0x45 0x18 0x6e
+0xe2 0x25 0x0c 0x6e
+0xe2 0x15 0x06 0x6e
+0xe2 0x0d 0x03 0x6e
+
+0xe2 0x05 0x18 0x6e
+0xe2 0x45 0x1c 0x6e
+0xe2 0x35 0x1e 0x6e
+0xe2 0x2d 0x15 0x6e
+
+# CHECK: ins.d v2[1], v15[1]
+# CHECK: ins.s v2[1], v15[1]
+# CHECK: ins.h v2[1], v15[1]
+# CHECK: ins.b v2[1], v15[1]
+
+# CHECK: ins.d v2[1], v15[0]
+# CHECK: ins.s v2[3], v15[2]
+# CHECK: ins.h v2[7], v15[3]
+# CHECK: ins.b v2[10], v15[5]
+
+0x00 0x1c 0x20 0x0e
+0x00 0x1c 0x20 0x4e
+
+# CHECK: and.8b v0, v0, v0
+# CHECK: and.16b v0, v0, v0
+
+0x00 0x1c 0x60 0x0e
+
+# CHECK: bic.8b v0, v0, v0
+
+0x00 0x8c 0x20 0x2e
+0x00 0x3c 0x20 0x0e
+0x00 0x34 0x20 0x0e
+0x00 0x34 0x20 0x2e
+0x00 0x3c 0x20 0x2e
+0x00 0x8c 0x20 0x0e
+0x00 0xd4 0xa0 0x2e
+0x00 0xec 0x20 0x2e
+0x00 0xec 0xa0 0x2e
+0x00 0xd4 0x20 0x2e
+0x00 0xd4 0x20 0x0e
+0x00 0xe4 0x20 0x0e
+0x00 0xe4 0x20 0x2e
+0x00 0xe4 0xa0 0x2e
+0x00 0xfc 0x20 0x2e
+0x00 0xc4 0x20 0x2e
+0x00 0xc4 0x20 0x0e
+0x00 0xf4 0x20 0x2e
+0x00 0xf4 0x20 0x0e
+0x00 0xc4 0xa0 0x2e
+0x00 0xc4 0xa0 0x0e
+0x00 0xf4 0xa0 0x2e
+0x00 0xf4 0xa0 0x0e
+0x00 0xcc 0x20 0x0e
+0x00 0xcc 0xa0 0x0e
+0x00 0xdc 0x20 0x0e
+0x00 0xdc 0x20 0x2e
+0x00 0xfc 0x20 0x0e
+0x00 0xfc 0xa0 0x0e
+0x00 0xd4 0xa0 0x0e
+0x00 0x94 0x20 0x0e
+0x00 0x94 0x20 0x2e
+0x00 0x9c 0x20 0x0e
+0x00 0x9c 0x20 0x2e
+0x00 0x7c 0x20 0x0e
+0x00 0x74 0x20 0x0e
+0x00 0x04 0x20 0x0e
+0x00 0x24 0x20 0x0e
+0x00 0xa4 0x20 0x0e
+0x00 0x64 0x20 0x0e
+0x00 0xac 0x20 0x0e
+0x00 0x6c 0x20 0x0e
+0x00 0x0c 0x20 0x0e
+0x00 0xb4 0x60 0x0e
+0x00 0xb4 0x60 0x2e
+0x00 0x5c 0x20 0x0e
+0x00 0x4c 0x20 0x0e
+0x00 0x2c 0x20 0x0e
+0x00 0x14 0x20 0x0e
+0x00 0x54 0x20 0x0e
+0x00 0x44 0x20 0x0e
+0x00 0x84 0x20 0x2e
+0x00 0x7c 0x20 0x2e
+0x00 0x74 0x20 0x2e
+0x00 0x04 0x20 0x2e
+0x00 0x24 0x20 0x2e
+0x00 0xa4 0x20 0x2e
+0x00 0x64 0x20 0x2e
+0x00 0xac 0x20 0x2e
+0x00 0x6c 0x20 0x2e
+0x00 0x0c 0x20 0x2e
+0x00 0x5c 0x20 0x2e
+0x00 0x4c 0x20 0x2e
+0x00 0x2c 0x20 0x2e
+0x00 0x14 0x20 0x2e
+0x00 0x54 0x20 0x2e
+0x00 0x44 0x20 0x2e
+
+# CHECK: cmeq.8b v0, v0, v0
+# CHECK: cmge.8b v0, v0, v0
+# CHECK: cmgt.8b v0, v0, v0
+# CHECK: cmhi.8b v0, v0, v0
+# CHECK: cmhs.8b v0, v0, v0
+# CHECK: cmtst.8b v0, v0, v0
+# CHECK: fabd.2s v0, v0, v0
+# CHECK: facge.2s v0, v0, v0
+# CHECK: facgt.2s v0, v0, v0
+# CHECK: faddp.2s v0, v0, v0
+# CHECK: fadd.2s v0, v0, v0
+# CHECK: fcmeq.2s v0, v0, v0
+# CHECK: fcmge.2s v0, v0, v0
+# CHECK: fcmgt.2s v0, v0, v0
+# CHECK: fdiv.2s v0, v0, v0
+# CHECK: fmaxnmp.2s v0, v0, v0
+# CHECK: fmaxnm.2s v0, v0, v0
+# CHECK: fmaxp.2s v0, v0, v0
+# CHECK: fmax.2s v0, v0, v0
+# CHECK: fminnmp.2s v0, v0, v0
+# CHECK: fminnm.2s v0, v0, v0
+# CHECK: fminp.2s v0, v0, v0
+# CHECK: fmin.2s v0, v0, v0
+# CHECK: fmla.2s v0, v0, v0
+# CHECK: fmls.2s v0, v0, v0
+# CHECK: fmulx.2s v0, v0, v0
+# CHECK: fmul.2s v0, v0, v0
+# CHECK: frecps.2s v0, v0, v0
+# CHECK: frsqrts.2s v0, v0, v0
+# CHECK: fsub.2s v0, v0, v0
+# CHECK: mla.8b v0, v0, v0
+# CHECK: mls.8b v0, v0, v0
+# CHECK: mul.8b v0, v0, v0
+# CHECK: pmul.8b v0, v0, v0
+# CHECK: saba.8b v0, v0, v0
+# CHECK: sabd.8b v0, v0, v0
+# CHECK: shadd.8b v0, v0, v0
+# CHECK: shsub.8b v0, v0, v0
+# CHECK: smaxp.8b v0, v0, v0
+# CHECK: smax.8b v0, v0, v0
+# CHECK: sminp.8b v0, v0, v0
+# CHECK: smin.8b v0, v0, v0
+# CHECK: sqadd.8b v0, v0, v0
+# CHECK: sqdmulh.4h v0, v0, v0
+# CHECK: sqrdmulh.4h v0, v0, v0
+# CHECK: sqrshl.8b v0, v0, v0
+# CHECK: sqshl.8b v0, v0, v0
+# CHECK: sqsub.8b v0, v0, v0
+# CHECK: srhadd.8b v0, v0, v0
+# CHECK: srshl.8b v0, v0, v0
+# CHECK: sshl.8b v0, v0, v0
+# CHECK: sub.8b v0, v0, v0
+# CHECK: uaba.8b v0, v0, v0
+# CHECK: uabd.8b v0, v0, v0
+# CHECK: uhadd.8b v0, v0, v0
+# CHECK: uhsub.8b v0, v0, v0
+# CHECK: umaxp.8b v0, v0, v0
+# CHECK: umax.8b v0, v0, v0
+# CHECK: uminp.8b v0, v0, v0
+# CHECK: umin.8b v0, v0, v0
+# CHECK: uqadd.8b v0, v0, v0
+# CHECK: uqrshl.8b v0, v0, v0
+# CHECK: uqshl.8b v0, v0, v0
+# CHECK: uqsub.8b v0, v0, v0
+# CHECK: urhadd.8b v0, v0, v0
+# CHECK: urshl.8b v0, v0, v0
+# CHECK: ushl.8b v0, v0, v0
+
+0x00 0x1c 0xe0 0x2e
+0x00 0x1c 0xa0 0x2e
+0x00 0x1c 0x60 0x2e
+0x00 0x1c 0x20 0x2e
+0x00 0x1c 0xe0 0x0e
+0x00 0x1c 0xa1 0x0e
+
+# CHECK: bif.8b v0, v0, v0
+# CHECK: bit.8b v0, v0, v0
+# CHECK: bsl.8b v0, v0, v0
+# CHECK: eor.8b v0, v0, v0
+# CHECK: orn.8b v0, v0, v0
+# CHECK: orr.8b v0, v0, v1
+
+0x00 0x68 0x20 0x0e
+0x00 0x68 0x20 0x4e
+0x00 0x68 0x60 0x0e
+0x00 0x68 0x60 0x4e
+0x00 0x68 0xa0 0x0e
+0x00 0x68 0xa0 0x4e
+
+# CHECK: sadalp.4h v0, v0
+# CHECK: sadalp.8h v0, v0
+# CHECK: sadalp.2s v0, v0
+# CHECK: sadalp.4s v0, v0
+# CHECK: sadalp.1d v0, v0
+# CHECK: sadalp.2d v0, v0
+
+0x00 0x48 0x20 0x0e
+0x00 0x48 0x20 0x2e
+0x00 0x58 0x20 0x0e
+0x00 0xf8 0xa0 0x0e
+0x00 0xc8 0x21 0x0e
+0x00 0xc8 0x21 0x2e
+0x00 0xb8 0x21 0x0e
+0x00 0xb8 0x21 0x2e
+0x00 0xa8 0x21 0x0e
+0x00 0xa8 0x21 0x2e
+0x00 0xa8 0xa1 0x0e
+0x00 0xa8 0xa1 0x2e
+0x00 0xb8 0xa1 0x0e
+0x00 0xb8 0xa1 0x2e
+0x00 0xf8 0xa0 0x2e
+0x00 0xd8 0xa1 0x0e
+0x00 0xd8 0xa1 0x2e
+0x00 0xf8 0xa1 0x2e
+0x00 0xb8 0x20 0x2e
+0x00 0x58 0x20 0x2e
+0x00 0x58 0x60 0x2e
+0x00 0x18 0x20 0x0e
+0x00 0x08 0x20 0x2e
+0x00 0x08 0x20 0x0e
+0x00 0x68 0x20 0x0e
+0x00 0x28 0x20 0x0e
+0x00 0xd8 0x21 0x0e
+0x00 0x38 0x21 0x2e
+0x00 0x78 0x20 0x0e
+0x00 0x78 0x20 0x2e
+0x00 0x48 0x21 0x0e
+0x00 0x28 0x21 0x2e
+0x00 0x38 0x20 0x0e
+0x00 0x68 0x20 0x2e
+0x00 0x28 0x20 0x2e
+0x00 0xd8 0x21 0x2e
+0x00 0x48 0x21 0x2e
+0x00 0xc8 0xa1 0x0e
+0x00 0xc8 0xa1 0x2e
+0x00 0x38 0x20 0x2e
+0x00 0x28 0x21 0x0e
+0x00 0x48 0x20 0x0e
+0x00 0x48 0x20 0x2e
+0x00 0x58 0x20 0x0e
+0x00 0xf8 0xa0 0x0e
+0x00 0xc8 0x21 0x0e
+0x00 0xc8 0x21 0x2e
+0x00 0xb8 0x21 0x0e
+0x00 0xb8 0x21 0x2e
+0x00 0xa8 0x21 0x0e
+0x00 0xa8 0x21 0x2e
+0x00 0xa8 0xa1 0x0e
+0x00 0xa8 0xa1 0x2e
+0x00 0xb8 0xa1 0x0e
+0x00 0xb8 0xa1 0x2e
+0x00 0xf8 0xa0 0x2e
+0x00 0xd8 0xa1 0x0e
+0x00 0xd8 0xa1 0x2e
+0x00 0xf8 0xa1 0x2e
+0x00 0xb8 0x20 0x2e
+0x00 0x58 0x20 0x2e
+0x00 0x58 0x60 0x2e
+0x00 0x18 0x20 0x0e
+0x00 0x08 0x20 0x2e
+0x00 0x08 0x20 0x0e
+0x00 0x68 0x20 0x0e
+0x00 0x28 0x20 0x0e
+0x00 0xd8 0x21 0x0e
+0x00 0x38 0x21 0x2e
+0x00 0x78 0x20 0x0e
+0x00 0x78 0x20 0x2e
+0x00 0x48 0x21 0x0e
+0x00 0x28 0x21 0x2e
+0x00 0x38 0x20 0x0e
+0x00 0x68 0x20 0x2e
+0x00 0x28 0x20 0x2e
+0x00 0xd8 0x21 0x2e
+0x00 0x48 0x21 0x2e
+0x00 0xc8 0xa1 0x0e
+0x00 0xc8 0xa1 0x2e
+0x00 0x38 0x20 0x2e
+0x00 0x28 0x21 0x0e
+
+# CHECK: cls.8b v0, v0
+# CHECK: clz.8b v0, v0
+# CHECK: cnt.8b v0, v0
+# CHECK: fabs.2s v0, v0
+# CHECK: fcvtas.2s v0, v0
+# CHECK: fcvtau.2s v0, v0
+# CHECK: fcvtms.2s v0, v0
+# CHECK: fcvtmu.2s v0, v0
+# CHECK: fcvtns.2s v0, v0
+# CHECK: fcvtnu.2s v0, v0
+# CHECK: fcvtps.2s v0, v0
+# CHECK: fcvtpu.2s v0, v0
+# CHECK: fcvtzs.2s v0, v0
+# CHECK: fcvtzu.2s v0, v0
+# CHECK: fneg.2s v0, v0
+# CHECK: frecpe.2s v0, v0
+# CHECK: frsqrte.2s v0, v0
+# CHECK: fsqrt.2s v0, v0
+# CHECK: neg.8b v0, v0
+# CHECK: mvn.8b v0, v0
+# CHECK: rbit.8b v0, v0
+# CHECK: rev16.8b v0, v0
+# CHECK: rev32.8b v0, v0
+# CHECK: rev64.8b v0, v0
+# CHECK: sadalp.4h v0, v0
+# CHECK: saddlp.4h v0, v0
+# CHECK: scvtf.2s v0, v0
+# CHECK: shll.8h v0, v0, #8
+# CHECK: sqabs.8b v0, v0
+# CHECK: sqneg.8b v0, v0
+# CHECK: sqxtn.8b v0, v0
+# CHECK: sqxtun.8b v0, v0
+# CHECK: suqadd.8b v0, v0
+# CHECK: uadalp.4h v0, v0
+# CHECK: uaddlp.4h v0, v0
+# CHECK: ucvtf.2s v0, v0
+# CHECK: uqxtn.8b v0, v0
+# CHECK: urecpe.2s v0, v0
+# CHECK: ursqrte.2s v0, v0
+# CHECK: usqadd.8b v0, v0
+# CHECK: xtn.8b v0, v0
+
+0x00 0x98 0x20 0x0e
+0x00 0x98 0x20 0x4e
+0x00 0x98 0x60 0x0e
+0x00 0x98 0x60 0x4e
+0x00 0x98 0xa0 0x0e
+0x00 0x98 0xa0 0x4e
+0x00 0x98 0xe0 0x4e
+
+# CHECK: cmeq.8b v0, v0, #0
+# CHECK: cmeq.16b v0, v0, #0
+# CHECK: cmeq.4h v0, v0, #0
+# CHECK: cmeq.8h v0, v0, #0
+# CHECK: cmeq.2s v0, v0, #0
+# CHECK: cmeq.4s v0, v0, #0
+# CHECK: cmeq.2d v0, v0, #0
+
+0x00 0x88 0x20 0x2e
+0x00 0x88 0x20 0x0e
+0x00 0x98 0x20 0x2e
+0x00 0xa8 0x20 0x0e
+0x00 0xd8 0xa0 0x0e
+0x00 0xc8 0xa0 0x2e
+0x00 0xc8 0xa0 0x0e
+0x00 0xd8 0xa0 0x2e
+0x00 0xe8 0xa0 0x0e
+
+# CHECK: cmge.8b v0, v0, #0
+# CHECK: cmgt.8b v0, v0, #0
+# CHECK: cmle.8b v0, v0, #0
+# CHECK: cmlt.8b v0, v0, #0
+# CHECK: fcmeq.2s v0, v0, #0
+# CHECK: fcmge.2s v0, v0, #0
+# CHECK: fcmgt.2s v0, v0, #0
+# CHECK: fcmle.2s v0, v0, #0
+# CHECK: fcmlt.2s v0, v0, #0
+
+0x00 0x78 0x21 0x0e
+0x00 0x78 0x21 0x4e
+0x00 0x78 0x61 0x0e
+0x00 0x78 0x61 0x4e
+0x00 0x68 0x21 0x0e
+0x00 0x68 0x21 0x4e
+0x00 0x68 0x61 0x0e
+0x00 0x68 0x61 0x4e
+0x00 0x68 0x61 0x2e
+0x00 0x68 0x61 0x6e
+
+# CHECK: fcvtl v0.4s, v0.4h
+# CHECK: fcvtl2 v0.4s, v0.8h
+# CHECK: fcvtl v0.2d, v0.2s
+# CHECK: fcvtl2 v0.2d, v0.4s
+# CHECK: fcvtn v0.4h, v0.4s
+# CHECK: fcvtn2 v0.8h, v0.4s
+# CHECK: fcvtn v0.2s, v0.2d
+# CHECK: fcvtn2 v0.4s, v0.2d
+# CHECK: fcvtxn v0.2s, v0.2d
+# CHECK: fcvtxn2 v0.4s, v0.2d
+
+#===-------------------------------------------------------------------------===
+# AdvSIMD modified immediate instructions
+#===-------------------------------------------------------------------------===
+
+0x20 0x14 0x00 0x2f
+0x20 0x34 0x00 0x2f
+0x20 0x54 0x00 0x2f
+0x20 0x74 0x00 0x2f
+
+# CHECK: bic.2s v0, #0x1
+# CHECK: bic.2s v0, #0x1, lsl #8
+# CHECK: bic.2s v0, #0x1, lsl #16
+# CHECK: bic.2s v0, #0x1, lsl #24
+
+0x20 0x94 0x00 0x2f
+0x20 0x94 0x00 0x2f
+0x20 0xb4 0x00 0x2f
+
+# CHECK: bic.4h v0, #0x1
+# CHECK: bic.4h v0, #0x1
+# FIXME: bic.4h v0, #0x1, lsl #8
+# 'bic.4h' should be selected over "fcvtnu.2s v0, v1, #0"
+
+0x20 0x14 0x00 0x6f
+0x20 0x34 0x00 0x6f
+0x20 0x54 0x00 0x6f
+0x20 0x74 0x00 0x6f
+
+# CHECK: bic.4s v0, #0x1
+# CHECK: bic.4s v0, #0x1, lsl #8
+# CHECK: bic.4s v0, #0x1, lsl #16
+# CHECK: bic.4s v0, #0x1, lsl #24
+
+0x20 0x94 0x00 0x6f
+0x20 0xb4 0x00 0x6f
+
+# CHECK: bic.8h v0, #0x1
+# FIXME: bic.8h v0, #0x1, lsl #8
+# "bic.8h" should be selected over "fcvtnu.4s v0, v1, #0"
+
+0x00 0xf4 0x02 0x6f
+
+# CHECK: fmov.2d v0, #0.12500000
+
+0x00 0xf4 0x02 0x0f
+0x00 0xf4 0x02 0x4f
+
+# CHECK: fmov.2s v0, #0.12500000
+# CHECK: fmov.4s v0, #0.12500000
+
+0x20 0x14 0x00 0x0f
+0x20 0x34 0x00 0x0f
+0x20 0x54 0x00 0x0f
+0x20 0x74 0x00 0x0f
+
+# CHECK: orr.2s v0, #0x1
+# CHECK: orr.2s v0, #0x1, lsl #8
+# CHECK: orr.2s v0, #0x1, lsl #16
+# CHECK: orr.2s v0, #0x1, lsl #24
+
+0x20 0x94 0x00 0x0f
+0x20 0xb4 0x00 0x0f
+
+# CHECK: orr.4h v0, #0x1
+# FIXME: orr.4h v0, #0x1, lsl #8
+# 'orr.4h' should be selected over "fcvtns.2s v0, v1, #0"
+
+0x20 0x14 0x00 0x4f
+0x20 0x34 0x00 0x4f
+0x20 0x54 0x00 0x4f
+0x20 0x74 0x00 0x4f
+
+# CHECK: orr.4s v0, #0x1
+# CHECK: orr.4s v0, #0x1, lsl #8
+# CHECK: orr.4s v0, #0x1, lsl #16
+# CHECK: orr.4s v0, #0x1, lsl #24
+
+0x20 0x94 0x00 0x4f
+0x20 0xb4 0x00 0x4f
+
+# CHECK: orr.8h v0, #0x1
+# CHECK: orr.8h v0, #0x1, lsl #8
+
+0x21 0x70 0x40 0x0c
+0x42 0xa0 0x40 0x4c
+0x64 0x64 0x40 0x0c
+0x87 0x24 0x40 0x4c
+0x0c 0xa8 0x40 0x0c
+0x0a 0x68 0x40 0x4c
+0x2d 0xac 0x40 0x0c
+0x4f 0x7c 0x40 0x4c
+0xe0 0x03 0x40 0x0d
+
+# CHECK: ld1.8b { v1 }, [x1]
+# CHECK: ld1.16b { v2, v3 }, [x2]
+# CHECK: ld1.4h { v4, v5, v6 }, [x3]
+# CHECK: ld1.8h { v7, v8, v9, v10 }, [x4]
+# CHECK: ld1.2s { v12, v13 }, [x0]
+# CHECK: ld1.4s { v10, v11, v12 }, [x0]
+# CHECK: ld1.1d { v13, v14 }, [x1]
+# CHECK: ld1.2d { v15 }, [x2]
+# CHECK: ld1.b { v0 }[0], [sp]
+
+0x41 0x70 0xdf 0x0c
+0x41 0xa0 0xdf 0x0c
+0x41 0x60 0xdf 0x0c
+0x41 0x20 0xdf 0x0c
+0x42 0x70 0xdf 0x4c
+0x42 0xa0 0xdf 0x4c
+0x42 0x60 0xdf 0x4c
+0x42 0x20 0xdf 0x4c
+0x64 0x74 0xdf 0x0c
+0x64 0xa4 0xdf 0x0c
+0x64 0x64 0xdf 0x0c
+0x64 0x24 0xdf 0x0c
+0x87 0x74 0xdf 0x4c
+0x87 0xa4 0xdf 0x4c
+0x87 0x64 0xdf 0x4c
+0x87 0x24 0xdf 0x4c
+0x0c 0x78 0xdf 0x0c
+0x0c 0xa8 0xdf 0x0c
+0x0c 0x68 0xdf 0x0c
+0x0c 0x28 0xdf 0x0c
+0x0a 0x78 0xdf 0x4c
+0x0a 0xa8 0xdf 0x4c
+0x0a 0x68 0xdf 0x4c
+0x0a 0x28 0xdf 0x4c
+0x2d 0x7c 0xdf 0x0c
+0x2d 0xac 0xdf 0x0c
+0x2d 0x6c 0xdf 0x0c
+0x2d 0x2c 0xdf 0x0c
+0x4f 0x7c 0xdf 0x4c
+0x4f 0xac 0xdf 0x4c
+0x4f 0x6c 0xdf 0x4c
+0x4f 0x2c 0xdf 0x4c
+
+# CHECK: ld1.8b { v1 }, [x2], #8
+# CHECK: ld1.8b { v1, v2 }, [x2], #16
+# CHECK: ld1.8b { v1, v2, v3 }, [x2], #24
+# CHECK: ld1.8b { v1, v2, v3, v4 }, [x2], #32
+# CHECK: ld1.16b { v2 }, [x2], #16
+# CHECK: ld1.16b { v2, v3 }, [x2], #32
+# CHECK: ld1.16b { v2, v3, v4 }, [x2], #48
+# CHECK: ld1.16b { v2, v3, v4, v5 }, [x2], #64
+# CHECK: ld1.4h { v4 }, [x3], #8
+# CHECK: ld1.4h { v4, v5 }, [x3], #16
+# CHECK: ld1.4h { v4, v5, v6 }, [x3], #24
+# CHECK: ld1.4h { v4, v5, v6, v7 }, [x3], #32
+# CHECK: ld1.8h { v7 }, [x4], #16
+# CHECK: ld1.8h { v7, v8 }, [x4], #32
+# CHECK: ld1.8h { v7, v8, v9 }, [x4], #48
+# CHECK: ld1.8h { v7, v8, v9, v10 }, [x4], #64
+# CHECK: ld1.2s { v12 }, [x0], #8
+# CHECK: ld1.2s { v12, v13 }, [x0], #16
+# CHECK: ld1.2s { v12, v13, v14 }, [x0], #24
+# CHECK: ld1.2s { v12, v13, v14, v15 }, [x0], #32
+# CHECK: ld1.4s { v10 }, [x0], #16
+# CHECK: ld1.4s { v10, v11 }, [x0], #32
+# CHECK: ld1.4s { v10, v11, v12 }, [x0], #48
+# CHECK: ld1.4s { v10, v11, v12, v13 }, [x0], #64
+# CHECK: ld1.1d { v13 }, [x1], #8
+# CHECK: ld1.1d { v13, v14 }, [x1], #16
+# CHECK: ld1.1d { v13, v14, v15 }, [x1], #24
+# CHECK: ld1.1d { v13, v14, v15, v16 }, [x1], #32
+# CHECK: ld1.2d { v15 }, [x2], #16
+# CHECK: ld1.2d { v15, v16 }, [x2], #32
+# CHECK: ld1.2d { v15, v16, v17 }, [x2], #48
+# CHECK: ld1.2d { v15, v16, v17, v18 }, [x2], #64
+
+0x21 0x70 0x00 0x0c
+0x42 0xa0 0x00 0x4c
+0x64 0x64 0x00 0x0c
+0x87 0x24 0x00 0x4c
+0x0c 0xa8 0x00 0x0c
+0x0a 0x68 0x00 0x4c
+0x2d 0xac 0x00 0x0c
+0x4f 0x7c 0x00 0x4c
+
+# CHECK: st1.8b { v1 }, [x1]
+# CHECK: st1.16b { v2, v3 }, [x2]
+# CHECK: st1.4h { v4, v5, v6 }, [x3]
+# CHECK: st1.8h { v7, v8, v9, v10 }, [x4]
+# CHECK: st1.2s { v12, v13 }, [x0]
+# CHECK: st1.4s { v10, v11, v12 }, [x0]
+# CHECK: st1.1d { v13, v14 }, [x1]
+# CHECK: st1.2d { v15 }, [x2]
+
+0x61 0x08 0x40 0x0d
+0x82 0x84 0x40 0x4d
+0xa3 0x58 0x40 0x0d
+0xc4 0x80 0x40 0x4d
+
+# CHECK: ld1.b { v1 }[2], [x3]
+# CHECK: ld1.d { v2 }[1], [x4]
+# CHECK: ld1.h { v3 }[3], [x5]
+# CHECK: ld1.s { v4 }[2], [x6]
+
+0x61 0x08 0xdf 0x0d
+0x82 0x84 0xdf 0x4d
+0xa3 0x58 0xdf 0x0d
+0xc4 0x80 0xdf 0x4d
+
+# CHECK: ld1.b { v1 }[2], [x3], #1
+# CHECK: ld1.d { v2 }[1], [x4], #8
+# CHECK: ld1.h { v3 }[3], [x5], #2
+# CHECK: ld1.s { v4 }[2], [x6], #4
+
+0x61 0x08 0x00 0x0d
+0x82 0x84 0x00 0x4d
+0xa3 0x58 0x00 0x0d
+0xc4 0x80 0x00 0x4d
+
+# CHECK: st1.b { v1 }[2], [x3]
+# CHECK: st1.d { v2 }[1], [x4]
+# CHECK: st1.h { v3 }[3], [x5]
+# CHECK: st1.s { v4 }[2], [x6]
+
+0x61 0x08 0x9f 0x0d
+0x82 0x84 0x9f 0x4d
+0xa3 0x58 0x9f 0x0d
+0xc4 0x80 0x9f 0x4d
+
+# CHECK: st1.b { v1 }[2], [x3], #1
+# CHECK: st1.d { v2 }[1], [x4], #8
+# CHECK: st1.h { v3 }[3], [x5], #2
+# CHECK: st1.s { v4 }[2], [x6], #4
+
+0x61 0x08 0xc4 0x0d
+0x82 0x84 0xc5 0x4d
+0xa3 0x58 0xc6 0x0d
+0xc4 0x80 0xc7 0x4d
+
+# CHECK: ld1.b { v1 }[2], [x3], x4
+# CHECK: ld1.d { v2 }[1], [x4], x5
+# CHECK: ld1.h { v3 }[3], [x5], x6
+# CHECK: ld1.s { v4 }[2], [x6], x7
+
+0x61 0x08 0x84 0x0d
+0x82 0x84 0x85 0x4d
+0xa3 0x58 0x86 0x0d
+0xc4 0x80 0x87 0x4d
+
+# CHECK: st1.b { v1 }[2], [x3], x4
+# CHECK: st1.d { v2 }[1], [x4], x5
+# CHECK: st1.h { v3 }[3], [x5], x6
+# CHECK: st1.s { v4 }[2], [x6], x7
+
+0x41 0x70 0xc3 0x0c
+0x42 0xa0 0xc4 0x4c
+0x64 0x64 0xc5 0x0c
+0x87 0x24 0xc6 0x4c
+0x0c 0xa8 0xc7 0x0c
+0x0a 0x68 0xc8 0x4c
+0x2d 0xac 0xc9 0x0c
+0x4f 0x7c 0xca 0x4c
+
+# CHECK: ld1.8b { v1 }, [x2], x3
+# CHECK: ld1.16b { v2, v3 }, [x2], x4
+# CHECK: ld1.4h { v4, v5, v6 }, [x3], x5
+# CHECK: ld1.8h { v7, v8, v9, v10 }, [x4], x6
+# CHECK: ld1.2s { v12, v13 }, [x0], x7
+# CHECK: ld1.4s { v10, v11, v12 }, [x0], x8
+# CHECK: ld1.1d { v13, v14 }, [x1], x9
+# CHECK: ld1.2d { v15 }, [x2], x10
+
+0x41 0x70 0x83 0x0c
+0x42 0xa0 0x84 0x4c
+0x64 0x64 0x85 0x0c
+0x87 0x24 0x86 0x4c
+0x0c 0xa8 0x87 0x0c
+0x0a 0x68 0x88 0x4c
+0x2d 0xac 0x89 0x0c
+0x4f 0x7c 0x8a 0x4c
+
+# CHECK: st1.8b { v1 }, [x2], x3
+# CHECK: st1.16b { v2, v3 }, [x2], x4
+# CHECK: st1.4h { v4, v5, v6 }, [x3], x5
+# CHECK: st1.8h { v7, v8, v9, v10 }, [x4], x6
+# CHECK: st1.2s { v12, v13 }, [x0], x7
+# CHECK: st1.4s { v10, v11, v12 }, [x0], x8
+# CHECK: st1.1d { v13, v14 }, [x1], x9
+# CHECK: st1.2d { v15 }, [x2], x10
+
+0x41 0x70 0x9f 0x0c
+0x41 0xa0 0x9f 0x0c
+0x41 0x60 0x9f 0x0c
+0x41 0x20 0x9f 0x0c
+0x42 0x70 0x9f 0x4c
+0x42 0xa0 0x9f 0x4c
+0x42 0x60 0x9f 0x4c
+0x42 0x20 0x9f 0x4c
+0x64 0x74 0x9f 0x0c
+0x64 0xa4 0x9f 0x0c
+0x64 0x64 0x9f 0x0c
+0x64 0x24 0x9f 0x0c
+0x87 0x74 0x9f 0x4c
+0x87 0xa4 0x9f 0x4c
+0x87 0x64 0x9f 0x4c
+0x87 0x24 0x9f 0x4c
+0x0c 0x78 0x9f 0x0c
+0x0c 0xa8 0x9f 0x0c
+0x0c 0x68 0x9f 0x0c
+0x0c 0x28 0x9f 0x0c
+0x0a 0x78 0x9f 0x4c
+0x0a 0xa8 0x9f 0x4c
+0x0a 0x68 0x9f 0x4c
+0x0a 0x28 0x9f 0x4c
+0x2d 0x7c 0x9f 0x0c
+0x2d 0xac 0x9f 0x0c
+0x2d 0x6c 0x9f 0x0c
+0x2d 0x2c 0x9f 0x0c
+0x4f 0x7c 0x9f 0x4c
+0x4f 0xac 0x9f 0x4c
+0x4f 0x6c 0x9f 0x4c
+0x4f 0x2c 0x9f 0x4c
+
+# CHECK: st1.8b { v1 }, [x2], #8
+# CHECK: st1.8b { v1, v2 }, [x2], #16
+# CHECK: st1.8b { v1, v2, v3 }, [x2], #24
+# CHECK: st1.8b { v1, v2, v3, v4 }, [x2], #32
+# CHECK: st1.16b { v2 }, [x2], #16
+# CHECK: st1.16b { v2, v3 }, [x2], #32
+# CHECK: st1.16b { v2, v3, v4 }, [x2], #48
+# CHECK: st1.16b { v2, v3, v4, v5 }, [x2], #64
+# CHECK: st1.4h { v4 }, [x3], #8
+# CHECK: st1.4h { v4, v5 }, [x3], #16
+# CHECK: st1.4h { v4, v5, v6 }, [x3], #24
+# CHECK: st1.4h { v4, v5, v6, v7 }, [x3], #32
+# CHECK: st1.8h { v7 }, [x4], #16
+# CHECK: st1.8h { v7, v8 }, [x4], #32
+# CHECK: st1.8h { v7, v8, v9 }, [x4], #48
+# CHECK: st1.8h { v7, v8, v9, v10 }, [x4], #64
+# CHECK: st1.2s { v12 }, [x0], #8
+# CHECK: st1.2s { v12, v13 }, [x0], #16
+# CHECK: st1.2s { v12, v13, v14 }, [x0], #24
+# CHECK: st1.2s { v12, v13, v14, v15 }, [x0], #32
+# CHECK: st1.4s { v10 }, [x0], #16
+# CHECK: st1.4s { v10, v11 }, [x0], #32
+# CHECK: st1.4s { v10, v11, v12 }, [x0], #48
+# CHECK: st1.4s { v10, v11, v12, v13 }, [x0], #64
+# CHECK: st1.1d { v13 }, [x1], #8
+# CHECK: st1.1d { v13, v14 }, [x1], #16
+# CHECK: st1.1d { v13, v14, v15 }, [x1], #24
+# CHECK: st1.1d { v13, v14, v15, v16 }, [x1], #32
+# CHECK: st1.2d { v15 }, [x2], #16
+# CHECK: st1.2d { v15, v16 }, [x2], #32
+# CHECK: st1.2d { v15, v16, v17 }, [x2], #48
+# CHECK: st1.2d { v15, v16, v17, v18 }, [x2], #64
+
+0x21 0xc0 0x40 0x0d
+0x21 0xc0 0xc2 0x0d
+0x64 0xc4 0x40 0x0d
+0x64 0xc4 0xc5 0x0d
+0xa9 0xc8 0x40 0x0d
+0xa9 0xc8 0xc6 0x0d
+0xec 0xcc 0x40 0x0d
+0xec 0xcc 0xc8 0x0d
+
+# CHECK: ld1r.8b { v1 }, [x1]
+# CHECK: ld1r.8b { v1 }, [x1], x2
+# CHECK: ld1r.4h { v4 }, [x3]
+# CHECK: ld1r.4h { v4 }, [x3], x5
+# CHECK: ld1r.2s { v9 }, [x5]
+# CHECK: ld1r.2s { v9 }, [x5], x6
+# CHECK: ld1r.1d { v12 }, [x7]
+# CHECK: ld1r.1d { v12 }, [x7], x8
+
+0x21 0xc0 0xdf 0x0d
+0x21 0xc4 0xdf 0x0d
+0x21 0xc8 0xdf 0x0d
+0x21 0xcc 0xdf 0x0d
+
+# CHECK: ld1r.8b { v1 }, [x1], #1
+# CHECK: ld1r.4h { v1 }, [x1], #2
+# CHECK: ld1r.2s { v1 }, [x1], #4
+# CHECK: ld1r.1d { v1 }, [x1], #8
+
+0x45 0x80 0x40 0x4c
+0x0a 0x88 0x40 0x0c
+
+# CHECK: ld2.16b { v5, v6 }, [x2]
+# CHECK: ld2.2s { v10, v11 }, [x0]
+
+0x45 0x80 0x00 0x4c
+0x0a 0x88 0x00 0x0c
+
+# CHECK: st2.16b { v5, v6 }, [x2]
+# CHECK: st2.2s { v10, v11 }, [x0]
+
+0x61 0x08 0x20 0x0d
+0x82 0x84 0x20 0x4d
+0xc3 0x50 0x20 0x0d
+0xe4 0x90 0x20 0x4d
+
+# CHECK: st2.b { v1, v2 }[2], [x3]
+# CHECK: st2.d { v2, v3 }[1], [x4]
+# CHECK: st2.h { v3, v4 }[2], [x6]
+# CHECK: st2.s { v4, v5 }[3], [x7]
+
+0x61 0x08 0xbf 0x0d
+0x82 0x84 0xbf 0x4d
+0xa3 0x58 0xbf 0x0d
+0xc4 0x80 0xbf 0x4d
+
+# CHECK: st2.b { v1, v2 }[2], [x3], #2
+# CHECK: st2.d { v2, v3 }[1], [x4], #16
+# CHECK: st2.h { v3, v4 }[3], [x5], #4
+# CHECK: st2.s { v4, v5 }[2], [x6], #8
+
+0x61 0x08 0x60 0x0d
+0x82 0x84 0x60 0x4d
+0xc3 0x50 0x60 0x0d
+0xe4 0x90 0x60 0x4d
+
+# CHECK: ld2.b { v1, v2 }[2], [x3]
+# CHECK: ld2.d { v2, v3 }[1], [x4]
+# CHECK: ld2.h { v3, v4 }[2], [x6]
+# CHECK: ld2.s { v4, v5 }[3], [x7]
+
+0x61 0x08 0xff 0x0d
+0x82 0x84 0xff 0x4d
+0xa3 0x58 0xff 0x0d
+0xc4 0x80 0xff 0x4d
+
+# CHECK: ld2.b { v1, v2 }[2], [x3], #2
+# CHECK: ld2.d { v2, v3 }[1], [x4], #16
+# CHECK: ld2.h { v3, v4 }[3], [x5], #4
+# CHECK: ld2.s { v4, v5 }[2], [x6], #8
+
+0x61 0x08 0xe4 0x0d
+0x82 0x84 0xe6 0x4d
+0xa3 0x58 0xe8 0x0d
+0xc4 0x80 0xea 0x4d
+
+# CHECK: ld2.b { v1, v2 }[2], [x3], x4
+# CHECK: ld2.d { v2, v3 }[1], [x4], x6
+# CHECK: ld2.h { v3, v4 }[3], [x5], x8
+# CHECK: ld2.s { v4, v5 }[2], [x6], x10
+
+0x61 0x08 0xa4 0x0d
+0x82 0x84 0xa6 0x4d
+0xa3 0x58 0xa8 0x0d
+0xc4 0x80 0xaa 0x4d
+
+# CHECK: st2.b { v1, v2 }[2], [x3], x4
+# CHECK: st2.d { v2, v3 }[1], [x4], x6
+# CHECK: st2.h { v3, v4 }[3], [x5], x8
+# CHECK: st2.s { v4, v5 }[2], [x6], x10
+
+0x64 0x84 0xc5 0x0c
+0x0c 0x88 0xc7 0x0c
+
+# CHECK: ld2.4h { v4, v5 }, [x3], x5
+# CHECK: ld2.2s { v12, v13 }, [x0], x7
+
+0x00 0x80 0xdf 0x0c
+0x00 0x80 0xdf 0x4c
+0x00 0x84 0xdf 0x0c
+0x00 0x84 0xdf 0x4c
+0x00 0x88 0xdf 0x0c
+0x00 0x88 0xdf 0x4c
+0x00 0x8c 0xdf 0x4c
+
+# CHECK: ld2.8b { v0, v1 }, [x0], #16
+# CHECK: ld2.16b { v0, v1 }, [x0], #32
+# CHECK: ld2.4h { v0, v1 }, [x0], #16
+# CHECK: ld2.8h { v0, v1 }, [x0], #32
+# CHECK: ld2.2s { v0, v1 }, [x0], #16
+# CHECK: ld2.4s { v0, v1 }, [x0], #32
+# CHECK: ld2.2d { v0, v1 }, [x0], #32
+
+0x64 0x84 0x85 0x0c
+0x0c 0x88 0x87 0x0c
+
+# CHECK: st2.4h { v4, v5 }, [x3], x5
+# CHECK: st2.2s { v12, v13 }, [x0], x7
+
+0x00 0x80 0x9f 0x0c
+0x00 0x80 0x9f 0x4c
+0x00 0x84 0x9f 0x0c
+0x00 0x84 0x9f 0x4c
+0x00 0x88 0x9f 0x0c
+0x00 0x88 0x9f 0x4c
+0x00 0x8c 0x9f 0x4c
+
+# CHECK: st2.8b { v0, v1 }, [x0], #16
+# CHECK: st2.16b { v0, v1 }, [x0], #32
+# CHECK: st2.4h { v0, v1 }, [x0], #16
+# CHECK: st2.8h { v0, v1 }, [x0], #32
+# CHECK: st2.2s { v0, v1 }, [x0], #16
+# CHECK: st2.4s { v0, v1 }, [x0], #32
+# CHECK: st2.2d { v0, v1 }, [x0], #32
+
+0x21 0xc0 0x60 0x0d
+0x21 0xc0 0xe2 0x0d
+0x21 0xc0 0x60 0x4d
+0x21 0xc0 0xe2 0x4d
+0x21 0xc4 0x60 0x0d
+0x21 0xc4 0xe2 0x0d
+0x21 0xc4 0x60 0x4d
+0x21 0xc4 0xe2 0x4d
+0x21 0xc8 0x60 0x0d
+0x21 0xc8 0xe2 0x0d
+0x21 0xcc 0x60 0x4d
+0x21 0xcc 0xe2 0x4d
+0x21 0xcc 0x60 0x0d
+0x21 0xcc 0xe2 0x0d
+
+# CHECK: ld2r.8b { v1, v2 }, [x1]
+# CHECK: ld2r.8b { v1, v2 }, [x1], x2
+# CHECK: ld2r.16b { v1, v2 }, [x1]
+# CHECK: ld2r.16b { v1, v2 }, [x1], x2
+# CHECK: ld2r.4h { v1, v2 }, [x1]
+# CHECK: ld2r.4h { v1, v2 }, [x1], x2
+# CHECK: ld2r.8h { v1, v2 }, [x1]
+# CHECK: ld2r.8h { v1, v2 }, [x1], x2
+# CHECK: ld2r.2s { v1, v2 }, [x1]
+# CHECK: ld2r.2s { v1, v2 }, [x1], x2
+# CHECK: ld2r.2d { v1, v2 }, [x1]
+# CHECK: ld2r.2d { v1, v2 }, [x1], x2
+# CHECK: ld2r.1d { v1, v2 }, [x1]
+# CHECK: ld2r.1d { v1, v2 }, [x1], x2
+
+0x21 0xc0 0xff 0x0d
+0x21 0xc0 0xff 0x4d
+0x21 0xc4 0xff 0x0d
+0x21 0xc4 0xff 0x4d
+0x21 0xc8 0xff 0x0d
+0x21 0xcc 0xff 0x4d
+0x21 0xcc 0xff 0x0d
+
+# CHECK: ld2r.8b { v1, v2 }, [x1], #2
+# CHECK: ld2r.16b { v1, v2 }, [x1], #2
+# CHECK: ld2r.4h { v1, v2 }, [x1], #4
+# CHECK: ld2r.8h { v1, v2 }, [x1], #4
+# CHECK: ld2r.2s { v1, v2 }, [x1], #8
+# CHECK: ld2r.2d { v1, v2 }, [x1], #16
+# CHECK: ld2r.1d { v1, v2 }, [x1], #16
+
+0x21 0x40 0x40 0x0c
+0x45 0x40 0x40 0x4c
+0x0a 0x48 0x40 0x0c
+
+# CHECK: ld3.8b { v1, v2, v3 }, [x1]
+# CHECK: ld3.16b { v5, v6, v7 }, [x2]
+# CHECK: ld3.2s { v10, v11, v12 }, [x0]
+
+0x21 0x40 0x00 0x0c
+0x45 0x40 0x00 0x4c
+0x0a 0x48 0x00 0x0c
+
+# CHECK: st3.8b { v1, v2, v3 }, [x1]
+# CHECK: st3.16b { v5, v6, v7 }, [x2]
+# CHECK: st3.2s { v10, v11, v12 }, [x0]
+
+0x61 0x28 0xc4 0x0d
+0x82 0xa4 0xc5 0x4d
+0xa3 0x78 0xc6 0x0d
+0xc4 0xa0 0xc7 0x4d
+
+# CHECK: ld3.b { v1, v2, v3 }[2], [x3], x4
+# CHECK: ld3.d { v2, v3, v4 }[1], [x4], x5
+# CHECK: ld3.h { v3, v4, v5 }[3], [x5], x6
+# CHECK: ld3.s { v4, v5, v6 }[2], [x6], x7
+
+0x61 0x28 0x84 0x0d
+0x82 0xa4 0x85 0x4d
+0xa3 0x78 0x86 0x0d
+0xc4 0xa0 0x87 0x4d
+
+# CHECK: st3.b { v1, v2, v3 }[2], [x3], x4
+# CHECK: st3.d { v2, v3, v4 }[1], [x4], x5
+# CHECK: st3.h { v3, v4, v5 }[3], [x5], x6
+# CHECK: st3.s { v4, v5, v6 }[2], [x6], x7
+
+0x61 0x28 0x9f 0x0d
+0x82 0xa4 0x9f 0x4d
+0xa3 0x78 0x9f 0x0d
+0xc4 0xa0 0x9f 0x4d
+
+# CHECK: st3.b { v1, v2, v3 }[2], [x3], #3
+# CHECK: st3.d { v2, v3, v4 }[1], [x4], #24
+# CHECK: st3.h { v3, v4, v5 }[3], [x5], #6
+# CHECK: st3.s { v4, v5, v6 }[2], [x6], #12
+
+0x41 0x40 0xc3 0x0c
+0x42 0x40 0xc4 0x4c
+0x64 0x44 0xc5 0x0c
+0x87 0x44 0xc6 0x4c
+0x0c 0x48 0xc7 0x0c
+0x0a 0x48 0xc8 0x4c
+0x4f 0x4c 0xca 0x4c
+
+# CHECK: ld3.8b { v1, v2, v3 }, [x2], x3
+# CHECK: ld3.16b { v2, v3, v4 }, [x2], x4
+# CHECK: ld3.4h { v4, v5, v6 }, [x3], x5
+# CHECK: ld3.8h { v7, v8, v9 }, [x4], x6
+# CHECK: ld3.2s { v12, v13, v14 }, [x0], x7
+# CHECK: ld3.4s { v10, v11, v12 }, [x0], x8
+# CHECK: ld3.2d { v15, v16, v17 }, [x2], x10
+
+0x00 0x40 0xdf 0x0c
+0x00 0x40 0xdf 0x4c
+0x00 0x44 0xdf 0x0c
+0x00 0x44 0xdf 0x4c
+0x00 0x48 0xdf 0x0c
+0x00 0x48 0xdf 0x4c
+0x00 0x4c 0xdf 0x4c
+
+# CHECK: ld3.8b { v0, v1, v2 }, [x0], #24
+# CHECK: ld3.16b { v0, v1, v2 }, [x0], #48
+# CHECK: ld3.4h { v0, v1, v2 }, [x0], #24
+# CHECK: ld3.8h { v0, v1, v2 }, [x0], #48
+# CHECK: ld3.2s { v0, v1, v2 }, [x0], #24
+# CHECK: ld3.4s { v0, v1, v2 }, [x0], #48
+# CHECK: ld3.2d { v0, v1, v2 }, [x0], #48
+
+0x41 0x40 0x83 0x0c
+0x42 0x40 0x84 0x4c
+0x64 0x44 0x85 0x0c
+0x87 0x44 0x86 0x4c
+0x0c 0x48 0x87 0x0c
+0x0a 0x48 0x88 0x4c
+0x4f 0x4c 0x8a 0x4c
+
+# CHECK: st3.8b { v1, v2, v3 }, [x2], x3
+# CHECK: st3.16b { v2, v3, v4 }, [x2], x4
+# CHECK: st3.4h { v4, v5, v6 }, [x3], x5
+# CHECK: st3.8h { v7, v8, v9 }, [x4], x6
+# CHECK: st3.2s { v12, v13, v14 }, [x0], x7
+# CHECK: st3.4s { v10, v11, v12 }, [x0], x8
+# CHECK: st3.2d { v15, v16, v17 }, [x2], x10
+
+0x00 0x40 0x9f 0x0c
+0x00 0x40 0x9f 0x4c
+0x00 0x44 0x9f 0x0c
+0x00 0x44 0x9f 0x4c
+0x00 0x48 0x9f 0x0c
+0x00 0x48 0x9f 0x4c
+0x00 0x4c 0x9f 0x4c
+
+# CHECK: st3.8b { v0, v1, v2 }, [x0], #24
+# CHECK: st3.16b { v0, v1, v2 }, [x0], #48
+# CHECK: st3.4h { v0, v1, v2 }, [x0], #24
+# CHECK: st3.8h { v0, v1, v2 }, [x0], #48
+# CHECK: st3.2s { v0, v1, v2 }, [x0], #24
+# CHECK: st3.4s { v0, v1, v2 }, [x0], #48
+# CHECK: st3.2d { v0, v1, v2 }, [x0], #48
+
+0x61 0x28 0x40 0x0d
+0x82 0xa4 0x40 0x4d
+0xc3 0x70 0x40 0x0d
+0xe4 0xb0 0x40 0x4d
+
+# CHECK: ld3.b { v1, v2, v3 }[2], [x3]
+# CHECK: ld3.d { v2, v3, v4 }[1], [x4]
+# CHECK: ld3.h { v3, v4, v5 }[2], [x6]
+# CHECK: ld3.s { v4, v5, v6 }[3], [x7]
+
+0x61 0x28 0xdf 0x0d
+0x82 0xa4 0xdf 0x4d
+0xa3 0x78 0xdf 0x0d
+0xc4 0xa0 0xdf 0x4d
+
+# CHECK: ld3.b { v1, v2, v3 }[2], [x3], #3
+# CHECK: ld3.d { v2, v3, v4 }[1], [x4], #24
+# CHECK: ld3.h { v3, v4, v5 }[3], [x5], #6
+# CHECK: ld3.s { v4, v5, v6 }[2], [x6], #12
+
+0x61 0x28 0x00 0x0d
+0x82 0xa4 0x00 0x4d
+0xc3 0x70 0x00 0x0d
+0xe4 0xb0 0x00 0x4d
+
+# CHECK: st3.b { v1, v2, v3 }[2], [x3]
+# CHECK: st3.d { v2, v3, v4 }[1], [x4]
+# CHECK: st3.h { v3, v4, v5 }[2], [x6]
+# CHECK: st3.s { v4, v5, v6 }[3], [x7]
+
+0x21 0xe0 0x40 0x0d
+0x21 0xe0 0xc2 0x0d
+0x21 0xe0 0x40 0x4d
+0x21 0xe0 0xc2 0x4d
+0x21 0xe4 0x40 0x0d
+0x21 0xe4 0xc2 0x0d
+0x21 0xe4 0x40 0x4d
+0x21 0xe4 0xc2 0x4d
+0x21 0xe8 0x40 0x0d
+0x21 0xe8 0xc2 0x0d
+0x21 0xec 0x40 0x4d
+0x21 0xec 0xc2 0x4d
+0x21 0xec 0x40 0x0d
+0x21 0xec 0xc2 0x0d
+
+# CHECK: ld3r.8b { v1, v2, v3 }, [x1]
+# CHECK: ld3r.8b { v1, v2, v3 }, [x1], x2
+# CHECK: ld3r.16b { v1, v2, v3 }, [x1]
+# CHECK: ld3r.16b { v1, v2, v3 }, [x1], x2
+# CHECK: ld3r.4h { v1, v2, v3 }, [x1]
+# CHECK: ld3r.4h { v1, v2, v3 }, [x1], x2
+# CHECK: ld3r.8h { v1, v2, v3 }, [x1]
+# CHECK: ld3r.8h { v1, v2, v3 }, [x1], x2
+# CHECK: ld3r.2s { v1, v2, v3 }, [x1]
+# CHECK: ld3r.2s { v1, v2, v3 }, [x1], x2
+# CHECK: ld3r.2d { v1, v2, v3 }, [x1]
+# CHECK: ld3r.2d { v1, v2, v3 }, [x1], x2
+# CHECK: ld3r.1d { v1, v2, v3 }, [x1]
+# CHECK: ld3r.1d { v1, v2, v3 }, [x1], x2
+
+0x21 0xe0 0xdf 0x0d
+0x21 0xe0 0xdf 0x4d
+0x21 0xe4 0xdf 0x0d
+0x21 0xe4 0xdf 0x4d
+0x21 0xe8 0xdf 0x0d
+0x21 0xec 0xdf 0x4d
+0x21 0xec 0xdf 0x0d
+
+# CHECK: ld3r.8b { v1, v2, v3 }, [x1], #3
+# CHECK: ld3r.16b { v1, v2, v3 }, [x1], #3
+# CHECK: ld3r.4h { v1, v2, v3 }, [x1], #6
+# CHECK: ld3r.8h { v1, v2, v3 }, [x1], #6
+# CHECK: ld3r.2s { v1, v2, v3 }, [x1], #12
+# CHECK: ld3r.2d { v1, v2, v3 }, [x1], #24
+# CHECK: ld3r.1d { v1, v2, v3 }, [x1], #24
+
+0x21 0x00 0x40 0x0c
+0x45 0x00 0x40 0x4c
+0x0a 0x08 0x40 0x0c
+
+# CHECK: ld4.8b { v1, v2, v3, v4 }, [x1]
+# CHECK: ld4.16b { v5, v6, v7, v8 }, [x2]
+# CHECK: ld4.2s { v10, v11, v12, v13 }, [x0]
+
+0x21 0x00 0x00 0x0c
+0x45 0x00 0x00 0x4c
+0x0a 0x08 0x00 0x0c
+
+# CHECK: st4.8b { v1, v2, v3, v4 }, [x1]
+# CHECK: st4.16b { v5, v6, v7, v8 }, [x2]
+# CHECK: st4.2s { v10, v11, v12, v13 }, [x0]
+
+0x61 0x28 0xe4 0x0d
+0x82 0xa4 0xe5 0x4d
+0xa3 0x78 0xe6 0x0d
+0xc4 0xa0 0xe7 0x4d
+
+# CHECK: ld4.b { v1, v2, v3, v4 }[2], [x3], x4
+# CHECK: ld4.d { v2, v3, v4, v5 }[1], [x4], x5
+# CHECK: ld4.h { v3, v4, v5, v6 }[3], [x5], x6
+# CHECK: ld4.s { v4, v5, v6, v7 }[2], [x6], x7
+
+0x61 0x28 0xff 0x0d
+0x82 0xa4 0xff 0x4d
+0xa3 0x78 0xff 0x0d
+0xc4 0xa0 0xff 0x4d
+
+# CHECK: ld4.b { v1, v2, v3, v4 }[2], [x3], #4
+# CHECK: ld4.d { v2, v3, v4, v5 }[1], [x4], #32
+# CHECK: ld4.h { v3, v4, v5, v6 }[3], [x5], #8
+# CHECK: ld4.s { v4, v5, v6, v7 }[2], [x6], #16
+
+0x61 0x28 0xa4 0x0d
+0x82 0xa4 0xa5 0x4d
+0xa3 0x78 0xa6 0x0d
+0xc4 0xa0 0xa7 0x4d
+
+# CHECK: st4.b { v1, v2, v3, v4 }[2], [x3], x4
+# CHECK: st4.d { v2, v3, v4, v5 }[1], [x4], x5
+# CHECK: st4.h { v3, v4, v5, v6 }[3], [x5], x6
+# CHECK: st4.s { v4, v5, v6, v7 }[2], [x6], x7
+
+0x61 0x28 0xbf 0x0d
+0x82 0xa4 0xbf 0x4d
+0xa3 0x78 0xbf 0x0d
+0xc4 0xa0 0xbf 0x4d
+
+# CHECK: st4.b { v1, v2, v3, v4 }[2], [x3], #4
+# CHECK: st4.d { v2, v3, v4, v5 }[1], [x4], #32
+# CHECK: st4.h { v3, v4, v5, v6 }[3], [x5], #8
+# CHECK: st4.s { v4, v5, v6, v7 }[2], [x6], #16
+
+0x41 0x00 0xc3 0x0c
+0x42 0x00 0xc4 0x4c
+0x64 0x04 0xc5 0x0c
+0x87 0x04 0xc6 0x4c
+0x0c 0x08 0xc7 0x0c
+0x0a 0x08 0xc8 0x4c
+0x4f 0x0c 0xca 0x4c
+
+# CHECK: ld4.8b { v1, v2, v3, v4 }, [x2], x3
+# CHECK: ld4.16b { v2, v3, v4, v5 }, [x2], x4
+# CHECK: ld4.4h { v4, v5, v6, v7 }, [x3], x5
+# CHECK: ld4.8h { v7, v8, v9, v10 }, [x4], x6
+# CHECK: ld4.2s { v12, v13, v14, v15 }, [x0], x7
+# CHECK: ld4.4s { v10, v11, v12, v13 }, [x0], x8
+# CHECK: ld4.2d { v15, v16, v17, v18 }, [x2], x10
+
+0x00 0x00 0xdf 0x0c
+0x00 0x00 0xdf 0x4c
+0x00 0x04 0xdf 0x0c
+0x00 0x04 0xdf 0x4c
+0x00 0x08 0xdf 0x0c
+0x00 0x08 0xdf 0x4c
+0x00 0x0c 0xdf 0x4c
+
+# CHECK: ld4.8b { v0, v1, v2, v3 }, [x0], #32
+# CHECK: ld4.16b { v0, v1, v2, v3 }, [x0], #64
+# CHECK: ld4.4h { v0, v1, v2, v3 }, [x0], #32
+# CHECK: ld4.8h { v0, v1, v2, v3 }, [x0], #64
+# CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], #32
+# CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], #64
+# CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], #64
+
+0x00 0x00 0x9f 0x0c
+0x00 0x00 0x9f 0x4c
+0x00 0x04 0x9f 0x0c
+0x00 0x04 0x9f 0x4c
+0x00 0x08 0x9f 0x0c
+0x00 0x08 0x9f 0x4c
+0x00 0x0c 0x9f 0x4c
+
+# CHECK: st4.8b { v0, v1, v2, v3 }, [x0], #32
+# CHECK: st4.16b { v0, v1, v2, v3 }, [x0], #64
+# CHECK: st4.4h { v0, v1, v2, v3 }, [x0], #32
+# CHECK: st4.8h { v0, v1, v2, v3 }, [x0], #64
+# CHECK: st4.2s { v0, v1, v2, v3 }, [x0], #32
+# CHECK: st4.4s { v0, v1, v2, v3 }, [x0], #64
+# CHECK: st4.2d { v0, v1, v2, v3 }, [x0], #64
+
+0x41 0x00 0x83 0x0c
+0x42 0x00 0x84 0x4c
+0x64 0x04 0x85 0x0c
+0x87 0x04 0x86 0x4c
+0x0c 0x08 0x87 0x0c
+0x0a 0x08 0x88 0x4c
+0x4f 0x0c 0x8a 0x4c
+
+# CHECK: st4.8b { v1, v2, v3, v4 }, [x2], x3
+# CHECK: st4.16b { v2, v3, v4, v5 }, [x2], x4
+# CHECK: st4.4h { v4, v5, v6, v7 }, [x3], x5
+# CHECK: st4.8h { v7, v8, v9, v10 }, [x4], x6
+# CHECK: st4.2s { v12, v13, v14, v15 }, [x0], x7
+# CHECK: st4.4s { v10, v11, v12, v13 }, [x0], x8
+# CHECK: st4.2d { v15, v16, v17, v18 }, [x2], x10
+
+0x61 0x28 0x60 0x0d
+0x82 0xa4 0x60 0x4d
+0xc3 0x70 0x60 0x0d
+0xe4 0xb0 0x60 0x4d
+
+# CHECK: ld4.b { v1, v2, v3, v4 }[2], [x3]
+# CHECK: ld4.d { v2, v3, v4, v5 }[1], [x4]
+# CHECK: ld4.h { v3, v4, v5, v6 }[2], [x6]
+# CHECK: ld4.s { v4, v5, v6, v7 }[3], [x7]
+
+0x61 0x28 0x20 0x0d
+0x82 0xa4 0x20 0x4d
+0xc3 0x70 0x20 0x0d
+0xe4 0xb0 0x20 0x4d
+
+# CHECK: st4.b { v1, v2, v3, v4 }[2], [x3]
+# CHECK: st4.d { v2, v3, v4, v5 }[1], [x4]
+# CHECK: st4.h { v3, v4, v5, v6 }[2], [x6]
+# CHECK: st4.s { v4, v5, v6, v7 }[3], [x7]
+
+0x21 0xe0 0x60 0x0d
+0x21 0xe0 0xe2 0x0d
+0x21 0xe0 0x60 0x4d
+0x21 0xe0 0xe2 0x4d
+0x21 0xe4 0x60 0x0d
+0x21 0xe4 0xe2 0x0d
+0x21 0xe4 0x60 0x4d
+0x21 0xe4 0xe2 0x4d
+0x21 0xe8 0x60 0x0d
+0x21 0xe8 0xe2 0x0d
+0x21 0xec 0x60 0x4d
+0x21 0xec 0xe2 0x4d
+0x21 0xec 0x60 0x0d
+0x21 0xec 0xe2 0x0d
+
+# CHECK: ld4r.8b { v1, v2, v3, v4 }, [x1]
+# CHECK: ld4r.8b { v1, v2, v3, v4 }, [x1], x2
+# CHECK: ld4r.16b { v1, v2, v3, v4 }, [x1]
+# CHECK: ld4r.16b { v1, v2, v3, v4 }, [x1], x2
+# CHECK: ld4r.4h { v1, v2, v3, v4 }, [x1]
+# CHECK: ld4r.4h { v1, v2, v3, v4 }, [x1], x2
+# CHECK: ld4r.8h { v1, v2, v3, v4 }, [x1]
+# CHECK: ld4r.8h { v1, v2, v3, v4 }, [x1], x2
+# CHECK: ld4r.2s { v1, v2, v3, v4 }, [x1]
+# CHECK: ld4r.2s { v1, v2, v3, v4 }, [x1], x2
+# CHECK: ld4r.2d { v1, v2, v3, v4 }, [x1]
+# CHECK: ld4r.2d { v1, v2, v3, v4 }, [x1], x2
+# CHECK: ld4r.1d { v1, v2, v3, v4 }, [x1]
+# CHECK: ld4r.1d { v1, v2, v3, v4 }, [x1], x2
+
+0x21 0xe0 0xff 0x0d
+0x21 0xe0 0xff 0x4d
+0x21 0xe4 0xff 0x0d
+0x21 0xe4 0xff 0x4d
+0x21 0xe8 0xff 0x0d
+0x21 0xec 0xff 0x4d
+0x21 0xec 0xff 0x0d
+
+# CHECK: ld4r.8b { v1, v2, v3, v4 }, [x1], #4
+# CHECK: ld4r.16b { v1, v2, v3, v4 }, [x1], #4
+# CHECK: ld4r.4h { v1, v2, v3, v4 }, [x1], #8
+# CHECK: ld4r.8h { v1, v2, v3, v4 }, [x1], #8
+# CHECK: ld4r.2s { v1, v2, v3, v4 }, [x1], #16
+# CHECK: ld4r.2d { v1, v2, v3, v4 }, [x1], #32
+# CHECK: ld4r.1d { v1, v2, v3, v4 }, [x1], #32
+
+0x20 0xe4 0x00 0x2f
+0x20 0xe4 0x00 0x6f
+0x20 0xe4 0x00 0x0f
+0x20 0xe4 0x00 0x4f
+
+# CHECK: movi d0, #0x000000000000ff
+# CHECK: movi.2d v0, #0x000000000000ff
+# CHECK: movi.8b v0, #0x1
+# CHECK: movi.16b v0, #0x1
+
+0x20 0x04 0x00 0x0f
+0x20 0x24 0x00 0x0f
+0x20 0x44 0x00 0x0f
+0x20 0x64 0x00 0x0f
+
+# CHECK: movi.2s v0, #0x1
+# CHECK: movi.2s v0, #0x1, lsl #8
+# CHECK: movi.2s v0, #0x1, lsl #16
+# CHECK: movi.2s v0, #0x1, lsl #24
+
+0x20 0x04 0x00 0x4f
+0x20 0x24 0x00 0x4f
+0x20 0x44 0x00 0x4f
+0x20 0x64 0x00 0x4f
+
+# CHECK: movi.4s v0, #0x1
+# CHECK: movi.4s v0, #0x1, lsl #8
+# CHECK: movi.4s v0, #0x1, lsl #16
+# CHECK: movi.4s v0, #0x1, lsl #24
+
+0x20 0x84 0x00 0x0f
+0x20 0xa4 0x00 0x0f
+
+# CHECK: movi.4h v0, #0x1
+# CHECK: movi.4h v0, #0x1, lsl #8
+
+0x20 0x84 0x00 0x4f
+0x20 0xa4 0x00 0x4f
+
+# CHECK: movi.8h v0, #0x1
+# CHECK: movi.8h v0, #0x1, lsl #8
+
+0x20 0x04 0x00 0x2f
+0x20 0x24 0x00 0x2f
+0x20 0x44 0x00 0x2f
+0x20 0x64 0x00 0x2f
+
+# CHECK: mvni.2s v0, #0x1
+# CHECK: mvni.2s v0, #0x1, lsl #8
+# CHECK: mvni.2s v0, #0x1, lsl #16
+# CHECK: mvni.2s v0, #0x1, lsl #24
+
+0x20 0x04 0x00 0x6f
+0x20 0x24 0x00 0x6f
+0x20 0x44 0x00 0x6f
+0x20 0x64 0x00 0x6f
+
+# CHECK: mvni.4s v0, #0x1
+# CHECK: mvni.4s v0, #0x1, lsl #8
+# CHECK: mvni.4s v0, #0x1, lsl #16
+# CHECK: mvni.4s v0, #0x1, lsl #24
+
+0x20 0x84 0x00 0x2f
+0x20 0xa4 0x00 0x2f
+
+# CHECK: mvni.4h v0, #0x1
+# CHECK: mvni.4h v0, #0x1, lsl #8
+
+0x20 0x84 0x00 0x6f
+0x20 0xa4 0x00 0x6f
+
+# CHECK: mvni.8h v0, #0x1
+# CHECK: mvni.8h v0, #0x1, lsl #8
+
+0x20 0xc4 0x00 0x2f
+0x20 0xd4 0x00 0x2f
+0x20 0xc4 0x00 0x6f
+0x20 0xd4 0x00 0x6f
+
+# CHECK: mvni.2s v0, #0x1, msl #8
+# CHECK: mvni.2s v0, #0x1, msl #16
+# CHECK: mvni.4s v0, #0x1, msl #8
+# CHECK: mvni.4s v0, #0x1, msl #16
+
+0x00 0x88 0x21 0x2e
+0x00 0x98 0x21 0x2e
+0x00 0x98 0xa1 0x2e
+0x00 0x98 0x21 0x0e
+0x00 0x88 0x21 0x0e
+0x00 0x88 0xa1 0x0e
+0x00 0x98 0xa1 0x0e
+
+# CHECK: frinta.2s v0, v0
+# CHECK: frintx.2s v0, v0
+# CHECK: frinti.2s v0, v0
+# CHECK: frintm.2s v0, v0
+# CHECK: frintn.2s v0, v0
+# CHECK: frintp.2s v0, v0
+# CHECK: frintz.2s v0, v0
+
+#===-------------------------------------------------------------------------===
+# AdvSIMD scalar x index instructions
+#===-------------------------------------------------------------------------===
+
+0x00 0x18 0xa0 0x5f
+0x00 0x18 0xc0 0x5f
+0x00 0x58 0xa0 0x5f
+0x00 0x58 0xc0 0x5f
+0x00 0x98 0xa0 0x7f
+0x00 0x98 0xc0 0x7f
+0x00 0x98 0xa0 0x5f
+0x00 0x98 0xc0 0x5f
+0x00 0x38 0x70 0x5f
+0x00 0x38 0xa0 0x5f
+0x00 0x78 0x70 0x5f
+0x00 0xc8 0x70 0x5f
+0x00 0xc8 0xa0 0x5f
+0x00 0xb8 0x70 0x5f
+0x00 0xb8 0xa0 0x5f
+0x00 0xd8 0x70 0x5f
+0x00 0xd8 0xa0 0x5f
+
+# CHECK: fmla.s s0, s0, v0[3]
+# CHECK: fmla.d d0, d0, v0[1]
+# CHECK: fmls.s s0, s0, v0[3]
+# CHECK: fmls.d d0, d0, v0[1]
+# CHECK: fmulx.s s0, s0, v0[3]
+# CHECK: fmulx.d d0, d0, v0[1]
+# CHECK: fmul.s s0, s0, v0[3]
+# CHECK: fmul.d d0, d0, v0[1]
+# CHECK: sqdmlal.h s0, h0, v0[7]
+# CHECK: sqdmlal.s d0, s0, v0[3]
+# CHECK: sqdmlsl.h s0, h0, v0[7]
+# CHECK: sqdmulh.h h0, h0, v0[7]
+# CHECK: sqdmulh.s s0, s0, v0[3]
+# CHECK: sqdmull.h s0, h0, v0[7]
+# CHECK: sqdmull.s d0, s0, v0[3]
+# CHECK: sqrdmulh.h h0, h0, v0[7]
+# CHECK: sqrdmulh.s s0, s0, v0[3]
+
+#===-------------------------------------------------------------------------===
+# AdvSIMD vector x index instructions
+#===-------------------------------------------------------------------------===
+
+ 0x00 0x10 0x80 0x0f
+ 0x00 0x10 0xa0 0x4f
+ 0x00 0x18 0xc0 0x4f
+ 0x00 0x50 0x80 0x0f
+ 0x00 0x50 0xa0 0x4f
+ 0x00 0x58 0xc0 0x4f
+ 0x00 0x90 0x80 0x2f
+ 0x00 0x90 0xa0 0x6f
+ 0x00 0x98 0xc0 0x6f
+ 0x00 0x90 0x80 0x0f
+ 0x00 0x90 0xa0 0x4f
+ 0x00 0x98 0xc0 0x4f
+ 0x00 0x00 0x40 0x2f
+ 0x00 0x00 0x50 0x6f
+ 0x00 0x08 0x80 0x2f
+ 0x00 0x08 0xa0 0x6f
+ 0x00 0x40 0x40 0x2f
+ 0x00 0x40 0x50 0x6f
+ 0x00 0x48 0x80 0x2f
+ 0x00 0x48 0xa0 0x6f
+ 0x00 0x80 0x40 0x0f
+ 0x00 0x80 0x50 0x4f
+ 0x00 0x88 0x80 0x0f
+ 0x00 0x88 0xa0 0x4f
+ 0x00 0x20 0x40 0x0f
+ 0x00 0x20 0x50 0x4f
+ 0x00 0x28 0x80 0x0f
+ 0x00 0x28 0xa0 0x4f
+ 0x00 0x60 0x40 0x0f
+ 0x00 0x60 0x50 0x4f
+ 0x00 0x68 0x80 0x0f
+ 0x00 0x68 0xa0 0x4f
+ 0x00 0xa0 0x40 0x0f
+ 0x00 0xa0 0x50 0x4f
+ 0x00 0xa8 0x80 0x0f
+ 0x00 0xa8 0xa0 0x4f
+ 0x00 0x30 0x40 0x0f
+ 0x00 0x30 0x50 0x4f
+ 0x00 0x38 0x80 0x0f
+ 0x00 0x38 0xa0 0x4f
+ 0x00 0x70 0x40 0x0f
+ 0x00 0x70 0x50 0x4f
+ 0x00 0x78 0x80 0x0f
+ 0x00 0x78 0xa0 0x4f
+ 0x00 0xc0 0x40 0x0f
+ 0x00 0xc0 0x50 0x4f
+ 0x00 0xc8 0x80 0x0f
+ 0x00 0xc8 0xa0 0x4f
+ 0x00 0xb0 0x40 0x0f
+ 0x00 0xb0 0x50 0x4f
+ 0x00 0xb8 0x80 0x0f
+ 0x00 0xb8 0xa0 0x4f
+ 0x00 0xd0 0x40 0x0f
+ 0x00 0xd0 0x50 0x4f
+ 0x00 0xd8 0x80 0x0f
+ 0x00 0xd8 0xa0 0x4f
+ 0x00 0x20 0x40 0x2f
+ 0x00 0x20 0x50 0x6f
+ 0x00 0x28 0x80 0x2f
+ 0x00 0x28 0xa0 0x6f
+ 0x00 0x60 0x40 0x2f
+ 0x00 0x60 0x50 0x6f
+ 0x00 0x68 0x80 0x2f
+ 0x00 0x68 0xa0 0x6f
+ 0x00 0xa0 0x40 0x2f
+ 0x00 0xa0 0x50 0x6f
+ 0x00 0xa8 0x80 0x2f
+ 0x00 0xa8 0xa0 0x6f
+
+# CHECK: fmla.2s v0, v0, v0[0]
+# CHECK: fmla.4s v0, v0, v0[1]
+# CHECK: fmla.2d v0, v0, v0[1]
+# CHECK: fmls.2s v0, v0, v0[0]
+# CHECK: fmls.4s v0, v0, v0[1]
+# CHECK: fmls.2d v0, v0, v0[1]
+# CHECK: fmulx.2s v0, v0, v0[0]
+# CHECK: fmulx.4s v0, v0, v0[1]
+# CHECK: fmulx.2d v0, v0, v0[1]
+# CHECK: fmul.2s v0, v0, v0[0]
+# CHECK: fmul.4s v0, v0, v0[1]
+# CHECK: fmul.2d v0, v0, v0[1]
+# CHECK: mla.4h v0, v0, v0[0]
+# CHECK: mla.8h v0, v0, v0[1]
+# CHECK: mla.2s v0, v0, v0[2]
+# CHECK: mla.4s v0, v0, v0[3]
+# CHECK: mls.4h v0, v0, v0[0]
+# CHECK: mls.8h v0, v0, v0[1]
+# CHECK: mls.2s v0, v0, v0[2]
+# CHECK: mls.4s v0, v0, v0[3]
+# CHECK: mul.4h v0, v0, v0[0]
+# CHECK: mul.8h v0, v0, v0[1]
+# CHECK: mul.2s v0, v0, v0[2]
+# CHECK: mul.4s v0, v0, v0[3]
+# CHECK: smlal.4s v0, v0, v0[0]
+# CHECK: smlal2.4s v0, v0, v0[1]
+# CHECK: smlal.2d v0, v0, v0[2]
+# CHECK: smlal2.2d v0, v0, v0[3]
+# CHECK: smlsl.4s v0, v0, v0[0]
+# CHECK: smlsl2.4s v0, v0, v0[1]
+# CHECK: smlsl.2d v0, v0, v0[2]
+# CHECK: smlsl2.2d v0, v0, v0[3]
+# CHECK: smull.4s v0, v0, v0[0]
+# CHECK: smull2.4s v0, v0, v0[1]
+# CHECK: smull.2d v0, v0, v0[2]
+# CHECK: smull2.2d v0, v0, v0[3]
+# CHECK: sqdmlal.4s v0, v0, v0[0]
+# CHECK: sqdmlal2.4s v0, v0, v0[1]
+# CHECK: sqdmlal.2d v0, v0, v0[2]
+# CHECK: sqdmlal2.2d v0, v0, v0[3]
+# CHECK: sqdmlsl.4s v0, v0, v0[0]
+# CHECK: sqdmlsl2.4s v0, v0, v0[1]
+# CHECK: sqdmlsl.2d v0, v0, v0[2]
+# CHECK: sqdmlsl2.2d v0, v0, v0[3]
+# CHECK: sqdmulh.4h v0, v0, v0[0]
+# CHECK: sqdmulh.8h v0, v0, v0[1]
+# CHECK: sqdmulh.2s v0, v0, v0[2]
+# CHECK: sqdmulh.4s v0, v0, v0[3]
+# CHECK: sqdmull.4s v0, v0, v0[0]
+# CHECK: sqdmull2.4s v0, v0, v0[1]
+# CHECK: sqdmull.2d v0, v0, v0[2]
+# CHECK: sqdmull2.2d v0, v0, v0[3]
+# CHECK: sqrdmulh.4h v0, v0, v0[0]
+# CHECK: sqrdmulh.8h v0, v0, v0[1]
+# CHECK: sqrdmulh.2s v0, v0, v0[2]
+# CHECK: sqrdmulh.4s v0, v0, v0[3]
+# CHECK: umlal.4s v0, v0, v0[0]
+# CHECK: umlal2.4s v0, v0, v0[1]
+# CHECK: umlal.2d v0, v0, v0[2]
+# CHECK: umlal2.2d v0, v0, v0[3]
+# CHECK: umlsl.4s v0, v0, v0[0]
+# CHECK: umlsl2.4s v0, v0, v0[1]
+# CHECK: umlsl.2d v0, v0, v0[2]
+# CHECK: umlsl2.2d v0, v0, v0[3]
+# CHECK: umull.4s v0, v0, v0[0]
+# CHECK: umull2.4s v0, v0, v0[1]
+# CHECK: umull.2d v0, v0, v0[2]
+# CHECK: umull2.2d v0, v0, v0[3]
+
+
+#===-------------------------------------------------------------------------===
+# AdvSIMD scalar + shift instructions
+#===-------------------------------------------------------------------------===
+
+ 0x00 0x54 0x41 0x5f
+ 0x00 0x54 0x41 0x7f
+ 0x00 0x9c 0x09 0x5f
+ 0x00 0x9c 0x12 0x5f
+ 0x00 0x9c 0x23 0x5f
+ 0x00 0x8c 0x09 0x7f
+ 0x00 0x8c 0x12 0x7f
+ 0x00 0x8c 0x23 0x7f
+ 0x00 0x64 0x09 0x7f
+ 0x00 0x64 0x12 0x7f
+ 0x00 0x64 0x23 0x7f
+ 0x00 0x64 0x44 0x7f
+ 0x00 0x74 0x09 0x5f
+ 0x00 0x74 0x12 0x5f
+ 0x00 0x74 0x23 0x5f
+ 0x00 0x74 0x44 0x5f
+ 0x00 0x94 0x09 0x5f
+ 0x00 0x94 0x12 0x5f
+ 0x00 0x94 0x23 0x5f
+ 0x00 0x84 0x09 0x7f
+ 0x00 0x84 0x12 0x7f
+ 0x00 0x84 0x23 0x7f
+ 0x00 0x44 0x41 0x7f
+ 0x00 0x24 0x41 0x5f
+ 0x00 0x34 0x41 0x5f
+ 0x00 0x04 0x41 0x5f
+ 0x00 0xe4 0x21 0x7f
+ 0x00 0xe4 0x42 0x7f
+ 0x00 0x9c 0x09 0x7f
+ 0x00 0x9c 0x12 0x7f
+ 0x00 0x9c 0x23 0x7f
+ 0x00 0x74 0x09 0x7f
+ 0x00 0x74 0x12 0x7f
+ 0x00 0x74 0x23 0x7f
+ 0x00 0x74 0x44 0x7f
+ 0x00 0x94 0x09 0x7f
+ 0x00 0x94 0x12 0x7f
+ 0x00 0x94 0x23 0x7f
+ 0x00 0x24 0x41 0x7f
+ 0x00 0x34 0x41 0x7f
+ 0x00 0x04 0x41 0x7f
+ 0x00 0x14 0x41 0x7f
+
+# CHECK: shl d0, d0, #1
+# CHECK: sli d0, d0, #1
+# CHECK: sqrshrn b0, h0, #7
+# CHECK: sqrshrn h0, s0, #14
+# CHECK: sqrshrn s0, d0, #29
+# CHECK: sqrshrun b0, h0, #7
+# CHECK: sqrshrun h0, s0, #14
+# CHECK: sqrshrun s0, d0, #29
+# CHECK: sqshlu b0, b0, #1
+# CHECK: sqshlu h0, h0, #2
+# CHECK: sqshlu s0, s0, #3
+# CHECK: sqshlu d0, d0, #4
+# CHECK: sqshl b0, b0, #1
+# CHECK: sqshl h0, h0, #2
+# CHECK: sqshl s0, s0, #3
+# CHECK: sqshl d0, d0, #4
+# CHECK: sqshrn b0, h0, #7
+# CHECK: sqshrn h0, s0, #14
+# CHECK: sqshrn s0, d0, #29
+# CHECK: sqshrun b0, h0, #7
+# CHECK: sqshrun h0, s0, #14
+# CHECK: sqshrun s0, d0, #29
+# CHECK: sri d0, d0, #63
+# CHECK: srshr d0, d0, #63
+# CHECK: srsra d0, d0, #63
+# CHECK: sshr d0, d0, #63
+# CHECK: ucvtf s0, s0, #31
+# CHECK: ucvtf d0, d0, #62
+# CHECK: uqrshrn b0, h0, #7
+# CHECK: uqrshrn h0, s0, #14
+# CHECK: uqrshrn s0, d0, #29
+# CHECK: uqshl b0, b0, #1
+# CHECK: uqshl h0, h0, #2
+# CHECK: uqshl s0, s0, #3
+# CHECK: uqshl d0, d0, #4
+# CHECK: uqshrn b0, h0, #7
+# CHECK: uqshrn h0, s0, #14
+# CHECK: uqshrn s0, d0, #29
+# CHECK: urshr d0, d0, #63
+# CHECK: ursra d0, d0, #63
+# CHECK: ushr d0, d0, #63
+# CHECK: usra d0, d0, #63
+
+#===-------------------------------------------------------------------------===
+# AdvSIMD vector + shift instructions
+#===-------------------------------------------------------------------------===
+
+ 0x00 0xfc 0x21 0x0f
+ 0x00 0xfc 0x22 0x4f
+ 0x00 0xfc 0x43 0x4f
+ 0x00 0xfc 0x21 0x2f
+ 0x00 0xfc 0x22 0x6f
+ 0x00 0xfc 0x43 0x6f
+ 0x00 0x8c 0x09 0x0f
+ 0x00 0x8c 0x0a 0x4f
+ 0x00 0x8c 0x13 0x0f
+ 0x00 0x8c 0x14 0x4f
+ 0x00 0x8c 0x25 0x0f
+ 0x00 0x8c 0x26 0x4f
+ 0x00 0xe4 0x21 0x0f
+ 0x00 0xe4 0x22 0x4f
+ 0x00 0xe4 0x43 0x4f
+ 0x00 0x54 0x09 0x0f
+ 0x00 0x54 0x0a 0x4f
+ 0x00 0x54 0x13 0x0f
+ 0x00 0x54 0x14 0x4f
+ 0x00 0x54 0x25 0x0f
+ 0x00 0x54 0x26 0x4f
+ 0x00 0x54 0x47 0x4f
+ 0x00 0x84 0x09 0x0f
+ 0x00 0x84 0x0a 0x4f
+ 0x00 0x84 0x13 0x0f
+ 0x00 0x84 0x14 0x4f
+ 0x00 0x84 0x25 0x0f
+ 0x00 0x84 0x26 0x4f
+ 0x00 0x54 0x09 0x2f
+ 0x00 0x54 0x0a 0x6f
+ 0x00 0x54 0x13 0x2f
+ 0x00 0x54 0x14 0x6f
+ 0x00 0x54 0x25 0x2f
+ 0x00 0x54 0x26 0x6f
+ 0x00 0x54 0x47 0x6f
+ 0x00 0x9c 0x09 0x0f
+ 0x00 0x9c 0x0a 0x4f
+ 0x00 0x9c 0x13 0x0f
+ 0x00 0x9c 0x14 0x4f
+ 0x00 0x9c 0x25 0x0f
+ 0x00 0x9c 0x26 0x4f
+ 0x00 0x8c 0x09 0x2f
+ 0x00 0x8c 0x0a 0x6f
+ 0x00 0x8c 0x13 0x2f
+ 0x00 0x8c 0x14 0x6f
+ 0x00 0x8c 0x25 0x2f
+ 0x00 0x8c 0x26 0x6f
+ 0x00 0x64 0x09 0x2f
+ 0x00 0x64 0x0a 0x6f
+ 0x00 0x64 0x13 0x2f
+ 0x00 0x64 0x14 0x6f
+ 0x00 0x64 0x25 0x2f
+ 0x00 0x64 0x26 0x6f
+ 0x00 0x64 0x47 0x6f
+ 0x00 0x74 0x09 0x0f
+ 0x00 0x74 0x0a 0x4f
+ 0x00 0x74 0x13 0x0f
+ 0x00 0x74 0x14 0x4f
+ 0x00 0x74 0x25 0x0f
+ 0x00 0x74 0x26 0x4f
+ 0x00 0x74 0x47 0x4f
+ 0x00 0x94 0x09 0x0f
+ 0x00 0x94 0x0a 0x4f
+ 0x00 0x94 0x13 0x0f
+ 0x00 0x94 0x14 0x4f
+ 0x00 0x94 0x25 0x0f
+ 0x00 0x94 0x26 0x4f
+ 0x00 0x84 0x09 0x2f
+ 0x00 0x84 0x0a 0x6f
+ 0x00 0x84 0x13 0x2f
+ 0x00 0x84 0x14 0x6f
+ 0x00 0x84 0x25 0x2f
+ 0x00 0x84 0x26 0x6f
+ 0x00 0x44 0x09 0x2f
+ 0x00 0x44 0x0a 0x6f
+ 0x00 0x44 0x13 0x2f
+ 0x00 0x44 0x14 0x6f
+ 0x00 0x44 0x25 0x2f
+ 0x00 0x44 0x26 0x6f
+ 0x00 0x44 0x47 0x6f
+ 0x00 0x24 0x09 0x0f
+ 0x00 0x24 0x0a 0x4f
+ 0x00 0x24 0x13 0x0f
+ 0x00 0x24 0x14 0x4f
+ 0x00 0x24 0x25 0x0f
+ 0x00 0x24 0x26 0x4f
+ 0x00 0x24 0x47 0x4f
+ 0x00 0x34 0x09 0x0f
+ 0x00 0x34 0x0a 0x4f
+ 0x00 0x34 0x13 0x0f
+ 0x00 0x34 0x14 0x4f
+ 0x00 0x34 0x25 0x0f
+ 0x00 0x34 0x26 0x4f
+ 0x00 0x34 0x47 0x4f
+ 0x00 0xa4 0x09 0x0f
+ 0x00 0xa4 0x0a 0x4f
+ 0x00 0xa4 0x13 0x0f
+ 0x00 0xa4 0x14 0x4f
+ 0x00 0xa4 0x25 0x0f
+ 0x00 0xa4 0x26 0x4f
+ 0x00 0x04 0x09 0x0f
+ 0x00 0x04 0x0a 0x4f
+ 0x00 0x04 0x13 0x0f
+ 0x00 0x04 0x14 0x4f
+ 0x00 0x04 0x25 0x0f
+ 0x00 0x04 0x26 0x4f
+ 0x00 0x04 0x47 0x4f
+ 0x00 0x04 0x09 0x0f
+ 0x00 0x14 0x0a 0x4f
+ 0x00 0x14 0x13 0x0f
+ 0x00 0x14 0x14 0x4f
+ 0x00 0x14 0x25 0x0f
+ 0x00 0x14 0x26 0x4f
+ 0x00 0x14 0x47 0x4f
+ 0x00 0x14 0x40 0x5f
+ 0x00 0xe4 0x21 0x2f
+ 0x00 0xe4 0x22 0x6f
+ 0x00 0xe4 0x43 0x6f
+ 0x00 0x9c 0x09 0x2f
+ 0x00 0x9c 0x0a 0x6f
+ 0x00 0x9c 0x13 0x2f
+ 0x00 0x9c 0x14 0x6f
+ 0x00 0x9c 0x25 0x2f
+ 0x00 0x9c 0x26 0x6f
+ 0x00 0x74 0x09 0x2f
+ 0x00 0x74 0x0a 0x6f
+ 0x00 0x74 0x13 0x2f
+ 0x00 0x74 0x14 0x6f
+ 0x00 0x74 0x25 0x2f
+ 0x00 0x74 0x26 0x6f
+ 0x00 0x74 0x47 0x6f
+ 0x00 0x94 0x09 0x2f
+ 0x00 0x94 0x0a 0x6f
+ 0x00 0x94 0x13 0x2f
+ 0x00 0x94 0x14 0x6f
+ 0x00 0x94 0x25 0x2f
+ 0x00 0x94 0x26 0x6f
+ 0x00 0x24 0x09 0x2f
+ 0x00 0x24 0x0a 0x6f
+ 0x00 0x24 0x13 0x2f
+ 0x00 0x24 0x14 0x6f
+ 0x00 0x24 0x25 0x2f
+ 0x00 0x24 0x26 0x6f
+ 0x00 0x24 0x47 0x6f
+ 0x00 0x34 0x09 0x2f
+ 0x00 0x34 0x0a 0x6f
+ 0x00 0x34 0x13 0x2f
+ 0x00 0x34 0x14 0x6f
+ 0x00 0x34 0x25 0x2f
+ 0x00 0x34 0x26 0x6f
+ 0x00 0x34 0x47 0x6f
+ 0x00 0xa4 0x09 0x2f
+ 0x00 0xa4 0x0a 0x6f
+ 0x00 0xa4 0x13 0x2f
+ 0x00 0xa4 0x14 0x6f
+ 0x00 0xa4 0x25 0x2f
+ 0x00 0xa4 0x26 0x6f
+ 0x00 0x04 0x09 0x2f
+ 0x00 0x04 0x0a 0x6f
+ 0x00 0x04 0x13 0x2f
+ 0x00 0x04 0x14 0x6f
+ 0x00 0x04 0x25 0x2f
+ 0x00 0x04 0x26 0x6f
+ 0x00 0x04 0x47 0x6f
+ 0x00 0x14 0x09 0x2f
+ 0x00 0x14 0x0a 0x6f
+ 0x00 0x14 0x13 0x2f
+ 0x00 0x14 0x14 0x6f
+ 0x00 0x14 0x25 0x2f
+ 0x00 0x14 0x26 0x6f
+ 0x00 0x14 0x47 0x6f
+
+# CHECK: fcvtzs.2s v0, v0, #31
+# CHECK: fcvtzs.4s v0, v0, #30
+# CHECK: fcvtzs.2d v0, v0, #61
+# CHECK: fcvtzu.2s v0, v0, #31
+# CHECK: fcvtzu.4s v0, v0, #30
+# CHECK: fcvtzu.2d v0, v0, #61
+# CHECK: rshrn.8b v0, v0, #7
+# CHECK: rshrn2.16b v0, v0, #6
+# CHECK: rshrn.4h v0, v0, #13
+# CHECK: rshrn2.8h v0, v0, #12
+# CHECK: rshrn.2s v0, v0, #27
+# CHECK: rshrn2.4s v0, v0, #26
+# CHECK: scvtf.2s v0, v0, #31
+# CHECK: scvtf.4s v0, v0, #30
+# CHECK: scvtf.2d v0, v0, #61
+# CHECK: shl.8b v0, v0, #1
+# CHECK: shl.16b v0, v0, #2
+# CHECK: shl.4h v0, v0, #3
+# CHECK: shl.8h v0, v0, #4
+# CHECK: shl.2s v0, v0, #5
+# CHECK: shl.4s v0, v0, #6
+# CHECK: shl.2d v0, v0, #7
+# CHECK: shrn.8b v0, v0, #7
+# CHECK: shrn2.16b v0, v0, #6
+# CHECK: shrn.4h v0, v0, #13
+# CHECK: shrn2.8h v0, v0, #12
+# CHECK: shrn.2s v0, v0, #27
+# CHECK: shrn2.4s v0, v0, #26
+# CHECK: sli.8b v0, v0, #1
+# CHECK: sli.16b v0, v0, #2
+# CHECK: sli.4h v0, v0, #3
+# CHECK: sli.8h v0, v0, #4
+# CHECK: sli.2s v0, v0, #5
+# CHECK: sli.4s v0, v0, #6
+# CHECK: sli.2d v0, v0, #7
+# CHECK: sqrshrn.8b v0, v0, #7
+# CHECK: sqrshrn2.16b v0, v0, #6
+# CHECK: sqrshrn.4h v0, v0, #13
+# CHECK: sqrshrn2.8h v0, v0, #12
+# CHECK: sqrshrn.2s v0, v0, #27
+# CHECK: sqrshrn2.4s v0, v0, #26
+# CHECK: sqrshrun.8b v0, v0, #7
+# CHECK: sqrshrun2.16b v0, v0, #6
+# CHECK: sqrshrun.4h v0, v0, #13
+# CHECK: sqrshrun2.8h v0, v0, #12
+# CHECK: sqrshrun.2s v0, v0, #27
+# CHECK: sqrshrun2.4s v0, v0, #26
+# CHECK: sqshlu.8b v0, v0, #1
+# CHECK: sqshlu.16b v0, v0, #2
+# CHECK: sqshlu.4h v0, v0, #3
+# CHECK: sqshlu.8h v0, v0, #4
+# CHECK: sqshlu.2s v0, v0, #5
+# CHECK: sqshlu.4s v0, v0, #6
+# CHECK: sqshlu.2d v0, v0, #7
+# CHECK: sqshl.8b v0, v0, #1
+# CHECK: sqshl.16b v0, v0, #2
+# CHECK: sqshl.4h v0, v0, #3
+# CHECK: sqshl.8h v0, v0, #4
+# CHECK: sqshl.2s v0, v0, #5
+# CHECK: sqshl.4s v0, v0, #6
+# CHECK: sqshl.2d v0, v0, #7
+# CHECK: sqshrn.8b v0, v0, #7
+# CHECK: sqshrn2.16b v0, v0, #6
+# CHECK: sqshrn.4h v0, v0, #13
+# CHECK: sqshrn2.8h v0, v0, #12
+# CHECK: sqshrn.2s v0, v0, #27
+# CHECK: sqshrn2.4s v0, v0, #26
+# CHECK: sqshrun.8b v0, v0, #7
+# CHECK: sqshrun2.16b v0, v0, #6
+# CHECK: sqshrun.4h v0, v0, #13
+# CHECK: sqshrun2.8h v0, v0, #12
+# CHECK: sqshrun.2s v0, v0, #27
+# CHECK: sqshrun2.4s v0, v0, #26
+# CHECK: sri.8b v0, v0, #7
+# CHECK: sri.16b v0, v0, #6
+# CHECK: sri.4h v0, v0, #13
+# CHECK: sri.8h v0, v0, #12
+# CHECK: sri.2s v0, v0, #27
+# CHECK: sri.4s v0, v0, #26
+# CHECK: sri.2d v0, v0, #57
+# CHECK: srshr.8b v0, v0, #7
+# CHECK: srshr.16b v0, v0, #6
+# CHECK: srshr.4h v0, v0, #13
+# CHECK: srshr.8h v0, v0, #12
+# CHECK: srshr.2s v0, v0, #27
+# CHECK: srshr.4s v0, v0, #26
+# CHECK: srshr.2d v0, v0, #57
+# CHECK: srsra.8b v0, v0, #7
+# CHECK: srsra.16b v0, v0, #6
+# CHECK: srsra.4h v0, v0, #13
+# CHECK: srsra.8h v0, v0, #12
+# CHECK: srsra.2s v0, v0, #27
+# CHECK: srsra.4s v0, v0, #26
+# CHECK: srsra.2d v0, v0, #57
+# CHECK: sshll.8h v0, v0, #1
+# CHECK: sshll2.8h v0, v0, #2
+# CHECK: sshll.4s v0, v0, #3
+# CHECK: sshll2.4s v0, v0, #4
+# CHECK: sshll.2d v0, v0, #5
+# CHECK: sshll2.2d v0, v0, #6
+# CHECK: sshr.8b v0, v0, #7
+# CHECK: sshr.16b v0, v0, #6
+# CHECK: sshr.4h v0, v0, #13
+# CHECK: sshr.8h v0, v0, #12
+# CHECK: sshr.2s v0, v0, #27
+# CHECK: sshr.4s v0, v0, #26
+# CHECK: sshr.2d v0, v0, #57
+# CHECK: sshr.8b v0, v0, #7
+# CHECK: ssra.16b v0, v0, #6
+# CHECK: ssra.4h v0, v0, #13
+# CHECK: ssra.8h v0, v0, #12
+# CHECK: ssra.2s v0, v0, #27
+# CHECK: ssra.4s v0, v0, #26
+# CHECK: ssra.2d v0, v0, #57
+# CHECK: ssra d0, d0, #64
+# CHECK: ucvtf.2s v0, v0, #31
+# CHECK: ucvtf.4s v0, v0, #30
+# CHECK: ucvtf.2d v0, v0, #61
+# CHECK: uqrshrn.8b v0, v0, #7
+# CHECK: uqrshrn2.16b v0, v0, #6
+# CHECK: uqrshrn.4h v0, v0, #13
+# CHECK: uqrshrn2.8h v0, v0, #12
+# CHECK: uqrshrn.2s v0, v0, #27
+# CHECK: uqrshrn2.4s v0, v0, #26
+# CHECK: uqshl.8b v0, v0, #1
+# CHECK: uqshl.16b v0, v0, #2
+# CHECK: uqshl.4h v0, v0, #3
+# CHECK: uqshl.8h v0, v0, #4
+# CHECK: uqshl.2s v0, v0, #5
+# CHECK: uqshl.4s v0, v0, #6
+# CHECK: uqshl.2d v0, v0, #7
+# CHECK: uqshrn.8b v0, v0, #7
+# CHECK: uqshrn2.16b v0, v0, #6
+# CHECK: uqshrn.4h v0, v0, #13
+# CHECK: uqshrn2.8h v0, v0, #12
+# CHECK: uqshrn.2s v0, v0, #27
+# CHECK: uqshrn2.4s v0, v0, #26
+# CHECK: urshr.8b v0, v0, #7
+# CHECK: urshr.16b v0, v0, #6
+# CHECK: urshr.4h v0, v0, #13
+# CHECK: urshr.8h v0, v0, #12
+# CHECK: urshr.2s v0, v0, #27
+# CHECK: urshr.4s v0, v0, #26
+# CHECK: urshr.2d v0, v0, #57
+# CHECK: ursra.8b v0, v0, #7
+# CHECK: ursra.16b v0, v0, #6
+# CHECK: ursra.4h v0, v0, #13
+# CHECK: ursra.8h v0, v0, #12
+# CHECK: ursra.2s v0, v0, #27
+# CHECK: ursra.4s v0, v0, #26
+# CHECK: ursra.2d v0, v0, #57
+# CHECK: ushll.8h v0, v0, #1
+# CHECK: ushll2.8h v0, v0, #2
+# CHECK: ushll.4s v0, v0, #3
+# CHECK: ushll2.4s v0, v0, #4
+# CHECK: ushll.2d v0, v0, #5
+# CHECK: ushll2.2d v0, v0, #6
+# CHECK: ushr.8b v0, v0, #7
+# CHECK: ushr.16b v0, v0, #6
+# CHECK: ushr.4h v0, v0, #13
+# CHECK: ushr.8h v0, v0, #12
+# CHECK: ushr.2s v0, v0, #27
+# CHECK: ushr.4s v0, v0, #26
+# CHECK: ushr.2d v0, v0, #57
+# CHECK: usra.8b v0, v0, #7
+# CHECK: usra.16b v0, v0, #6
+# CHECK: usra.4h v0, v0, #13
+# CHECK: usra.8h v0, v0, #12
+# CHECK: usra.2s v0, v0, #27
+# CHECK: usra.4s v0, v0, #26
+# CHECK: usra.2d v0, v0, #57
+
+
+ 0x00 0xe0 0x20 0x0e
+ 0x00 0xe0 0x20 0x4e
+ 0x00 0xe0 0xe0 0x0e
+ 0x00 0xe0 0xe0 0x4e
+
+# CHECK: pmull.8h v0, v0, v0
+# CHECK: pmull2.8h v0, v0, v0
+# CHECK: pmull.1q v0, v0, v0
+# CHECK: pmull2.1q v0, v0, v0
+
+ 0x41 0xd8 0x70 0x7e
+ 0x83 0xd8 0x30 0x7e
+# CHECK: faddp.2d d1, v2
+# CHECK: faddp.2s s3, v4
+
+ 0x82 0x60 0x01 0x4e
+ 0x80 0x60 0x01 0x0e
+ 0xa2 0x00 0x01 0x4e
+ 0xa0 0x00 0x01 0x0e
+ 0xa2 0x40 0x01 0x4e
+ 0xa0 0x40 0x01 0x0e
+ 0xc2 0x20 0x01 0x4e
+ 0xc0 0x20 0x01 0x0e
+
+# CHECK: tbl.16b v2, { v4, v5, v6, v7 }, v1
+# CHECK: tbl.8b v0, { v4, v5, v6, v7 }, v1
+# CHECK: tbl.16b v2, { v5 }, v1
+# CHECK: tbl.8b v0, { v5 }, v1
+# CHECK: tbl.16b v2, { v5, v6, v7 }, v1
+# CHECK: tbl.8b v0, { v5, v6, v7 }, v1
+# CHECK: tbl.16b v2, { v6, v7 }, v1
+# CHECK: tbl.8b v0, { v6, v7 }, v1
+#
+ 0x82 0x70 0x01 0x4e
+ 0x80 0x70 0x01 0x0e
+ 0xa2 0x10 0x01 0x4e
+ 0xa0 0x10 0x01 0x0e
+ 0xa2 0x50 0x01 0x4e
+ 0xa0 0x50 0x01 0x0e
+ 0xc2 0x30 0x01 0x4e
+ 0xc0 0x30 0x01 0x0e
+
+# CHECK: tbx.16b v2, { v4, v5, v6, v7 }, v1
+# CHECK: tbx.8b v0, { v4, v5, v6, v7 }, v1
+# CHECK: tbx.16b v2, { v5 }, v1
+# CHECK: tbx.8b v0, { v5 }, v1
+# CHECK: tbx.16b v2, { v5, v6, v7 }, v1
+# CHECK: tbx.8b v0, { v5, v6, v7 }, v1
+# CHECK: tbx.16b v2, { v6, v7 }, v1
+# CHECK: tbx.8b v0, { v6, v7 }, v1
+#
+
+0x00 0x80 0x20 0x0e
+0x00 0x80 0x20 0x4e
+0x00 0x80 0xa0 0x0e
+0x00 0x80 0xa0 0x4e
+
+# CHECK: smlal.8h v0, v0, v0
+# CHECK: smlal2.8h v0, v0, v0
+# CHECK: smlal.2d v0, v0, v0
+# CHECK: smlal2.2d v0, v0, v0
+
+0x00 0x80 0x20 0x2e
+0x00 0x80 0x20 0x6e
+0x00 0x80 0xa0 0x2e
+0x00 0x80 0xa0 0x6e
+
+# CHECK: umlal.8h v0, v0, v0
+# CHECK: umlal2.8h v0, v0, v0
+# CHECK: umlal.2d v0, v0, v0
+# CHECK: umlal2.2d v0, v0, v0
+
+0x00 0x90 0x60 0x5e
+0x00 0x90 0xa0 0x5e
+0x00 0xb0 0x60 0x5e
+0x00 0xb0 0xa0 0x5e
+
+# CHECK: sqdmlal s0, h0, h0
+# CHECK: sqdmlal d0, s0, s0
+# CHECK: sqdmlsl s0, h0, h0
+# CHECK: sqdmlsl d0, s0, s0
+
+0xaa 0xc5 0xc7 0x4d
+0xaa 0xc9 0xc7 0x4d
+0xaa 0xc1 0xc7 0x4d
+
+# CHECK: ld1r.8h { v10 }, [x13], x7
+# CHECK: ld1r.4s { v10 }, [x13], x7
+# CHECK: ld1r.16b { v10 }, [x13], x7
+
+0x00 0xd0 0x60 0x5e
+0x00 0xd0 0xa0 0x5e
+# CHECK: sqdmull s0, h0, h0
+# CHECK: sqdmull d0, s0, s0
+
+0x00 0xd8 0xa1 0x7e
+0x00 0xd8 0xe1 0x7e
+
+# CHECK: frsqrte s0, s0
+# CHECK: frsqrte d0, d0
+
+0xca 0xcd 0xc7 0x4d
+0xea 0xc9 0xe7 0x4d
+0xea 0xe9 0xc7 0x4d
+0xea 0xe9 0xe7 0x4d
+# CHECK: ld1r.2d { v10 }, [x14], x7
+# CHECK: ld2r.4s { v10, v11 }, [x15], x7
+# CHECK: ld3r.4s { v10, v11, v12 }, [x15], x7
+# CHECK: ld4r.4s { v10, v11, v12, v13 }, [x15], x7
+
+#===-------------------------------------------------------------------------===
+# AdvSIMD scalar three same
+#===-------------------------------------------------------------------------===
+0x62 0xdc 0x21 0x5e
+# CHECK: fmulx s2, s3, s1
+0x62 0xdc 0x61 0x5e
+# CHECK: fmulx d2, d3, d1
+
+
+# rdar://12511369
+0xe8 0x6b 0xdf 0x4c
+# CHECK: ld1.4s { v8, v9, v10 }, [sp], #48
diff --git a/test/MC/Disassembler/AArch64/arm64-arithmetic.txt b/test/MC/Disassembler/AArch64/arm64-arithmetic.txt
new file mode 100644
index 000000000000..bd870edc8af7
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-arithmetic.txt
@@ -0,0 +1,526 @@
+# RUN: llvm-mc -triple arm64-apple-darwin --disassemble < %s | FileCheck %s
+
+#==---------------------------------------------------------------------------==
+# Add/Subtract with carry/borrow
+#==---------------------------------------------------------------------------==
+
+0x41 0x00 0x03 0x1a
+0x41 0x00 0x03 0x9a
+0x85 0x00 0x03 0x3a
+0x85 0x00 0x03 0xba
+
+# CHECK: adc w1, w2, w3
+# CHECK: adc x1, x2, x3
+# CHECK: adcs w5, w4, w3
+# CHECK: adcs x5, x4, x3
+
+0x41 0x00 0x03 0x5a
+0x41 0x00 0x03 0xda
+0x41 0x00 0x03 0x7a
+0x41 0x00 0x03 0xfa
+
+# CHECK: sbc w1, w2, w3
+# CHECK: sbc x1, x2, x3
+# CHECK: sbcs w1, w2, w3
+# CHECK: sbcs x1, x2, x3
+
+#==---------------------------------------------------------------------------==
+# Add/Subtract with (optionally shifted) immediate
+#==---------------------------------------------------------------------------==
+
+0x83 0x00 0x10 0x11
+0x83 0x00 0x10 0x91
+
+# CHECK: add w3, w4, #1024
+# CHECK: add x3, x4, #1024
+
+0x83 0x00 0x50 0x11
+0x83 0x00 0x40 0x11
+0x83 0x00 0x50 0x91
+0x83 0x00 0x40 0x91
+0xff 0x83 0x00 0x91
+
+# CHECK: add w3, w4, #1024, lsl #12
+# CHECK: add x3, x4, #1024, lsl #12
+# CHECK: add x3, x4, #0, lsl #12
+# CHECK: add sp, sp, #32
+
+0x83 0x00 0x10 0x31
+0x83 0x00 0x50 0x31
+0x83 0x00 0x10 0xb1
+0x83 0x00 0x50 0xb1
+0xff 0x83 0x00 0xb1
+
+# CHECK: adds w3, w4, #1024
+# CHECK: adds w3, w4, #1024, lsl #12
+# CHECK: adds x3, x4, #1024
+# CHECK: adds x3, x4, #1024, lsl #12
+# CHECK: cmn sp, #32
+
+0x83 0x00 0x10 0x51
+0x83 0x00 0x50 0x51
+0x83 0x00 0x10 0xd1
+0x83 0x00 0x50 0xd1
+0xff 0x83 0x00 0xd1
+
+# CHECK: sub w3, w4, #1024
+# CHECK: sub w3, w4, #1024, lsl #12
+# CHECK: sub x3, x4, #1024
+# CHECK: sub x3, x4, #1024, lsl #12
+# CHECK: sub sp, sp, #32
+
+0x83 0x00 0x10 0x71
+0x83 0x00 0x50 0x71
+0x83 0x00 0x10 0xf1
+0x83 0x00 0x50 0xf1
+0xff 0x83 0x00 0xf1
+
+# CHECK: subs w3, w4, #1024
+# CHECK: subs w3, w4, #1024, lsl #12
+# CHECK: subs x3, x4, #1024
+# CHECK: subs x3, x4, #1024, lsl #12
+# CHECK: cmp sp, #32
+
+#==---------------------------------------------------------------------------==
+# Add/Subtract register with (optional) shift
+#==---------------------------------------------------------------------------==
+
+0xac 0x01 0x0e 0x0b
+0xac 0x01 0x0e 0x8b
+0xac 0x31 0x0e 0x0b
+0xac 0x31 0x0e 0x8b
+0xac 0x29 0x4e 0x0b
+0xac 0x29 0x4e 0x8b
+0xac 0x1d 0x8e 0x0b
+0xac 0x9d 0x8e 0x8b
+
+# CHECK: add w12, w13, w14
+# CHECK: add x12, x13, x14
+# CHECK: add w12, w13, w14, lsl #12
+# CHECK: add x12, x13, x14, lsl #12
+# CHECK: add w12, w13, w14, lsr #10
+# CHECK: add x12, x13, x14, lsr #10
+# CHECK: add w12, w13, w14, asr #7
+# CHECK: add x12, x13, x14, asr #39
+
+0xac 0x01 0x0e 0x4b
+0xac 0x01 0x0e 0xcb
+0xac 0x31 0x0e 0x4b
+0xac 0x31 0x0e 0xcb
+0xac 0x29 0x4e 0x4b
+0xac 0x29 0x4e 0xcb
+0xac 0x1d 0x8e 0x4b
+0xac 0x9d 0x8e 0xcb
+
+# CHECK: sub w12, w13, w14
+# CHECK: sub x12, x13, x14
+# CHECK: sub w12, w13, w14, lsl #12
+# CHECK: sub x12, x13, x14, lsl #12
+# CHECK: sub w12, w13, w14, lsr #10
+# CHECK: sub x12, x13, x14, lsr #10
+# CHECK: sub w12, w13, w14, asr #7
+# CHECK: sub x12, x13, x14, asr #39
+
+0xac 0x01 0x0e 0x2b
+0xac 0x01 0x0e 0xab
+0xac 0x31 0x0e 0x2b
+0xac 0x31 0x0e 0xab
+0xac 0x29 0x4e 0x2b
+0xac 0x29 0x4e 0xab
+0xac 0x1d 0x8e 0x2b
+0xac 0x9d 0x8e 0xab
+
+# CHECK: adds w12, w13, w14
+# CHECK: adds x12, x13, x14
+# CHECK: adds w12, w13, w14, lsl #12
+# CHECK: adds x12, x13, x14, lsl #12
+# CHECK: adds w12, w13, w14, lsr #10
+# CHECK: adds x12, x13, x14, lsr #10
+# CHECK: adds w12, w13, w14, asr #7
+# CHECK: adds x12, x13, x14, asr #39
+
+0xac 0x01 0x0e 0x6b
+0xac 0x01 0x0e 0xeb
+0xac 0x31 0x0e 0x6b
+0xac 0x31 0x0e 0xeb
+0xac 0x29 0x4e 0x6b
+0xac 0x29 0x4e 0xeb
+0xac 0x1d 0x8e 0x6b
+0xac 0x9d 0x8e 0xeb
+
+# CHECK: subs w12, w13, w14
+# CHECK: subs x12, x13, x14
+# CHECK: subs w12, w13, w14, lsl #12
+# CHECK: subs x12, x13, x14, lsl #12
+# CHECK: subs w12, w13, w14, lsr #10
+# CHECK: subs x12, x13, x14, lsr #10
+# CHECK: subs w12, w13, w14, asr #7
+# CHECK: subs x12, x13, x14, asr #39
+
+#==---------------------------------------------------------------------------==
+# Add/Subtract with (optional) extend
+#==---------------------------------------------------------------------------==
+
+0x41 0x00 0x23 0x0b
+0x41 0x20 0x23 0x0b
+0x41 0x40 0x23 0x0b
+0x41 0x60 0x23 0x0b
+0x41 0x80 0x23 0x0b
+0x41 0xa0 0x23 0x0b
+0x41 0xc0 0x23 0x0b
+0x41 0xe0 0x23 0x0b
+
+# CHECK: add w1, w2, w3, uxtb
+# CHECK: add w1, w2, w3, uxth
+# CHECK: add w1, w2, w3
+# CHECK: add w1, w2, w3, uxtx
+# CHECK: add w1, w2, w3, sxtb
+# CHECK: add w1, w2, w3, sxth
+# CHECK: add w1, w2, w3, sxtw
+# CHECK: add w1, w2, w3, sxtx
+
+0x41 0x00 0x23 0x8b
+0x41 0x20 0x23 0x8b
+0x41 0x40 0x23 0x8b
+0x41 0x80 0x23 0x8b
+0x41 0xa0 0x23 0x8b
+0x41 0xc0 0x23 0x8b
+
+# CHECK: add x1, x2, w3, uxtb
+# CHECK: add x1, x2, w3, uxth
+# CHECK: add x1, x2, w3, uxtw
+# CHECK: add x1, x2, w3, sxtb
+# CHECK: add x1, x2, w3, sxth
+# CHECK: add x1, x2, w3, sxtw
+
+0xe1 0x43 0x23 0x0b
+0xe1 0x43 0x23 0x0b
+0x5f 0x60 0x23 0x8b
+0x5f 0x60 0x23 0x8b
+
+# CHECK: add w1, wsp, w3
+# CHECK: add w1, wsp, w3
+# CHECK: add sp, x2, x3
+# CHECK: add sp, x2, x3
+
+0x41 0x00 0x23 0x4b
+0x41 0x20 0x23 0x4b
+0x41 0x40 0x23 0x4b
+0x41 0x60 0x23 0x4b
+0x41 0x80 0x23 0x4b
+0x41 0xa0 0x23 0x4b
+0x41 0xc0 0x23 0x4b
+0x41 0xe0 0x23 0x4b
+
+# CHECK: sub w1, w2, w3, uxtb
+# CHECK: sub w1, w2, w3, uxth
+# CHECK: sub w1, w2, w3
+# CHECK: sub w1, w2, w3, uxtx
+# CHECK: sub w1, w2, w3, sxtb
+# CHECK: sub w1, w2, w3, sxth
+# CHECK: sub w1, w2, w3, sxtw
+# CHECK: sub w1, w2, w3, sxtx
+
+0x41 0x00 0x23 0xcb
+0x41 0x20 0x23 0xcb
+0x41 0x40 0x23 0xcb
+0x41 0x80 0x23 0xcb
+0x41 0xa0 0x23 0xcb
+0x41 0xc0 0x23 0xcb
+
+# CHECK: sub x1, x2, w3, uxtb
+# CHECK: sub x1, x2, w3, uxth
+# CHECK: sub x1, x2, w3, uxtw
+# CHECK: sub x1, x2, w3, sxtb
+# CHECK: sub x1, x2, w3, sxth
+# CHECK: sub x1, x2, w3, sxtw
+
+0xe1 0x43 0x23 0x4b
+0xe1 0x43 0x23 0x4b
+0x5f 0x60 0x23 0xcb
+0x5f 0x60 0x23 0xcb
+
+# CHECK: sub w1, wsp, w3
+# CHECK: sub w1, wsp, w3
+# CHECK: sub sp, x2, x3
+# CHECK: sub sp, x2, x3
+
+0x41 0x00 0x23 0x2b
+0x41 0x20 0x23 0x2b
+0x41 0x40 0x23 0x2b
+0x41 0x60 0x23 0x2b
+0x41 0x80 0x23 0x2b
+0x41 0xa0 0x23 0x2b
+0x41 0xc0 0x23 0x2b
+0x41 0xe0 0x23 0x2b
+
+# CHECK: adds w1, w2, w3, uxtb
+# CHECK: adds w1, w2, w3, uxth
+# CHECK: adds w1, w2, w3
+# CHECK: adds w1, w2, w3, uxtx
+# CHECK: adds w1, w2, w3, sxtb
+# CHECK: adds w1, w2, w3, sxth
+# CHECK: adds w1, w2, w3, sxtw
+# CHECK: adds w1, w2, w3, sxtx
+
+0x41 0x00 0x23 0xab
+0x41 0x20 0x23 0xab
+0x41 0x40 0x23 0xab
+0x41 0x80 0x23 0xab
+0x41 0xa0 0x23 0xab
+0x41 0xc0 0x23 0xab
+
+# CHECK: adds x1, x2, w3, uxtb
+# CHECK: adds x1, x2, w3, uxth
+# CHECK: adds x1, x2, w3, uxtw
+# CHECK: adds x1, x2, w3, sxtb
+# CHECK: adds x1, x2, w3, sxth
+# CHECK: adds x1, x2, w3, sxtw
+
+0xe1 0x43 0x23 0x2b
+0xe1 0x43 0x23 0x2b
+
+# CHECK: adds w1, wsp, w3
+# CHECK: adds w1, wsp, w3
+
+0x41 0x00 0x23 0x6b
+0x41 0x20 0x23 0x6b
+0x41 0x40 0x23 0x6b
+0x41 0x60 0x23 0x6b
+0x41 0x80 0x23 0x6b
+0x41 0xa0 0x23 0x6b
+0x41 0xc0 0x23 0x6b
+0x41 0xe0 0x23 0x6b
+
+# CHECK: subs w1, w2, w3, uxtb
+# CHECK: subs w1, w2, w3, uxth
+# CHECK: subs w1, w2, w3
+# CHECK: subs w1, w2, w3, uxtx
+# CHECK: subs w1, w2, w3, sxtb
+# CHECK: subs w1, w2, w3, sxth
+# CHECK: subs w1, w2, w3, sxtw
+# CHECK: subs w1, w2, w3, sxtx
+
+0x41 0x00 0x23 0xeb
+0x41 0x20 0x23 0xeb
+0x41 0x40 0x23 0xeb
+0x41 0x80 0x23 0xeb
+0x41 0xa0 0x23 0xeb
+0x41 0xc0 0x23 0xeb
+
+# CHECK: subs x1, x2, w3, uxtb
+# CHECK: subs x1, x2, w3, uxth
+# CHECK: subs x1, x2, w3, uxtw
+# CHECK: subs x1, x2, w3, sxtb
+# CHECK: subs x1, x2, w3, sxth
+# CHECK: subs x1, x2, w3, sxtw
+
+0xe1 0x43 0x23 0x6b
+0xe1 0x43 0x23 0x6b
+
+# CHECK: subs w1, wsp, w3
+# CHECK: subs w1, wsp, w3
+
+0x1f 0x41 0x28 0xeb
+0x3f 0x41 0x28 0x6b
+0xff 0x43 0x28 0x6b
+0xff 0x43 0x28 0xeb
+
+# CHECK: cmp x8, w8, uxtw
+# CHECK: cmp w9, w8, uxtw
+# CHECK: cmp wsp, w8
+# CHECK: cmp sp, w8
+
+0x3f 0x41 0x28 0x4b
+0xe1 0x43 0x28 0x4b
+0xff 0x43 0x28 0x4b
+0x3f 0x41 0x28 0xcb
+0xe1 0x43 0x28 0xcb
+0xff 0x43 0x28 0xcb
+0xe1 0x43 0x28 0x6b
+0xe1 0x43 0x28 0xeb
+
+# CHECK: sub wsp, w9, w8
+# CHECK: sub w1, wsp, w8
+# CHECK: sub wsp, wsp, w8
+# CHECK: sub sp, x9, w8
+# CHECK: sub x1, sp, w8
+# CHECK: sub sp, sp, w8
+# CHECK: subs w1, wsp, w8
+# CHECK: subs x1, sp, w8
+
+#==---------------------------------------------------------------------------==
+# Signed/Unsigned divide
+#==---------------------------------------------------------------------------==
+
+0x41 0x0c 0xc3 0x1a
+0x41 0x0c 0xc3 0x9a
+0x41 0x08 0xc3 0x1a
+0x41 0x08 0xc3 0x9a
+
+# CHECK: sdiv w1, w2, w3
+# CHECK: sdiv x1, x2, x3
+# CHECK: udiv w1, w2, w3
+# CHECK: udiv x1, x2, x3
+
+#==---------------------------------------------------------------------------==
+# Variable shifts
+#==---------------------------------------------------------------------------==
+
+ 0x41 0x28 0xc3 0x1a
+# CHECK: asr w1, w2, w3
+ 0x41 0x28 0xc3 0x9a
+# CHECK: asr x1, x2, x3
+ 0x41 0x20 0xc3 0x1a
+# CHECK: lsl w1, w2, w3
+ 0x41 0x20 0xc3 0x9a
+# CHECK: lsl x1, x2, x3
+ 0x41 0x24 0xc3 0x1a
+# CHECK: lsr w1, w2, w3
+ 0x41 0x24 0xc3 0x9a
+# CHECK: lsr x1, x2, x3
+ 0x41 0x2c 0xc3 0x1a
+# CHECK: ror w1, w2, w3
+ 0x41 0x2c 0xc3 0x9a
+# CHECK: ror x1, x2, x3
+
+#==---------------------------------------------------------------------------==
+# One operand instructions
+#==---------------------------------------------------------------------------==
+
+ 0x41 0x14 0xc0 0x5a
+# CHECK: cls w1, w2
+ 0x41 0x14 0xc0 0xda
+# CHECK: cls x1, x2
+ 0x41 0x10 0xc0 0x5a
+# CHECK: clz w1, w2
+ 0x41 0x10 0xc0 0xda
+# CHECK: clz x1, x2
+ 0x41 0x00 0xc0 0x5a
+# CHECK: rbit w1, w2
+ 0x41 0x00 0xc0 0xda
+# CHECK: rbit x1, x2
+ 0x41 0x08 0xc0 0x5a
+# CHECK: rev w1, w2
+ 0x41 0x0c 0xc0 0xda
+# CHECK: rev x1, x2
+ 0x41 0x04 0xc0 0x5a
+# CHECK: rev16 w1, w2
+ 0x41 0x04 0xc0 0xda
+# CHECK: rev16 x1, x2
+ 0x41 0x08 0xc0 0xda
+# CHECK: rev32 x1, x2
+
+#==---------------------------------------------------------------------------==
+# 6.6.1 Multiply-add instructions
+#==---------------------------------------------------------------------------==
+
+0x41 0x10 0x03 0x1b
+0x41 0x10 0x03 0x9b
+0x41 0x90 0x03 0x1b
+0x41 0x90 0x03 0x9b
+0x41 0x10 0x23 0x9b
+0x41 0x90 0x23 0x9b
+0x41 0x10 0xa3 0x9b
+0x41 0x90 0xa3 0x9b
+
+# CHECK: madd w1, w2, w3, w4
+# CHECK: madd x1, x2, x3, x4
+# CHECK: msub w1, w2, w3, w4
+# CHECK: msub x1, x2, x3, x4
+# CHECK: smaddl x1, w2, w3, x4
+# CHECK: smsubl x1, w2, w3, x4
+# CHECK: umaddl x1, w2, w3, x4
+# CHECK: umsubl x1, w2, w3, x4
+
+#==---------------------------------------------------------------------------==
+# Multiply-high instructions
+#==---------------------------------------------------------------------------==
+
+0x41 0x7c 0x43 0x9b
+0x41 0x7c 0xc3 0x9b
+
+# CHECK: smulh x1, x2, x3
+# CHECK: umulh x1, x2, x3
+
+#==---------------------------------------------------------------------------==
+# Move immediate instructions
+#==---------------------------------------------------------------------------==
+
+0x20 0x00 0x80 0x52
+0x20 0x00 0x80 0xd2
+0x20 0x00 0xa0 0x52
+0x20 0x00 0xa0 0xd2
+
+# CHECK: movz w0, #0x1
+# CHECK: movz x0, #0x1
+# CHECK: movz w0, #0x1, lsl #16
+# CHECK: movz x0, #0x1, lsl #16
+
+0x40 0x00 0x80 0x12
+0x40 0x00 0x80 0x92
+0x40 0x00 0xa0 0x12
+0x40 0x00 0xa0 0x92
+
+# CHECK: movn w0, #0x2
+# CHECK: movn x0, #0x2
+# CHECK: movn w0, #0x2, lsl #16
+# CHECK: movn x0, #0x2, lsl #16
+
+0x20 0x00 0x80 0x72
+0x20 0x00 0x80 0xf2
+0x20 0x00 0xa0 0x72
+0x20 0x00 0xa0 0xf2
+
+# CHECK: movk w0, #0x1
+# CHECK: movk x0, #0x1
+# CHECK: movk w0, #0x1, lsl #16
+# CHECK: movk x0, #0x1, lsl #16
+
+#==---------------------------------------------------------------------------==
+# Conditionally set flags instructions
+#==---------------------------------------------------------------------------==
+
+ 0x1f 0x00 0x00 0x31
+# CHECK: cmn w0, #0
+ 0x1f 0xfc 0x03 0xb1
+# CHECK: x0, #255
+
+ 0x23 0x08 0x42 0x3a
+# CHECK: ccmn w1, #2, #3, eq
+ 0x23 0x08 0x42 0xba
+# CHECK: ccmn x1, #2, #3, eq
+ 0x23 0x08 0x42 0x7a
+# CHECK: ccmp w1, #2, #3, eq
+ 0x23 0x08 0x42 0xfa
+# CHECK: ccmp x1, #2, #3, eq
+
+ 0x23 0x00 0x42 0x3a
+# CHECK: ccmn w1, w2, #3, eq
+ 0x23 0x00 0x42 0xba
+# CHECK: ccmn x1, x2, #3, eq
+ 0x23 0x00 0x42 0x7a
+# CHECK: ccmp w1, w2, #3, eq
+ 0x23 0x00 0x42 0xfa
+# CHECK: ccmp x1, x2, #3, eq
+
+#==---------------------------------------------------------------------------==
+# Conditional select instructions
+#==---------------------------------------------------------------------------==
+
+ 0x41 0x00 0x83 0x1a
+# CHECK: csel w1, w2, w3, eq
+ 0x41 0x00 0x83 0x9a
+# CHECK: csel x1, x2, x3, eq
+ 0x41 0x04 0x83 0x1a
+# CHECK: csinc w1, w2, w3, eq
+ 0x41 0x04 0x83 0x9a
+# CHECK: csinc x1, x2, x3, eq
+ 0x41 0x00 0x83 0x5a
+# CHECK: csinv w1, w2, w3, eq
+ 0x41 0x00 0x83 0xda
+# CHECK: csinv x1, x2, x3, eq
+ 0x41 0x04 0x83 0x5a
+# CHECK: csneg w1, w2, w3, eq
+ 0x41 0x04 0x83 0xda
+# CHECK: csneg x1, x2, x3, eq
diff --git a/test/MC/Disassembler/AArch64/arm64-basic-a64-undefined.txt b/test/MC/Disassembler/AArch64/arm64-basic-a64-undefined.txt
new file mode 100644
index 000000000000..0e15af63e684
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-basic-a64-undefined.txt
@@ -0,0 +1,31 @@
+# These spawn another process so they're rather expensive. Not many.
+
+# LDR/STR: undefined if option field is 10x or 00x.
+# RUN: echo "0x00 0x08 0x20 0xf8" | llvm-mc -triple arm64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x00 0x88 0x20 0xf8" | llvm-mc -triple arm64 -disassemble 2>&1 | FileCheck %s
+
+# Instructions notionally in the add/sub (extended register) sheet, but with
+# invalid shift amount or "opt" field.
+# RUN: echo "0x00 0x10 0xa0 0x0b" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x00 0x10 0x60 0x0b" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x00 0x14 0x20 0x0b" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+
+# MOVK with sf == 0 and hw<1> == 1 is unallocated.
+# RUN: echo "0x00 0x00 0xc0 0x72" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+
+# ADD/SUB (shifted register) are reserved if shift == '11' or sf == '0' and imm6<5> == '1'.
+# RUN: echo "0x00 0x00 0xc0 0xeb" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x00 0x80 0x80 0x6b" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+
+# UBFM is undefined when s == 0 and imms<5> or immr<5> is 1.
+# RUN: echo "0x00 0x80 0x00 0x53" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+
+# EXT on vectors of i8 must have imm<3> = 0.
+# RUN: echo "0x00 0x40 0x00 0x2e" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+
+# SCVTF on fixed point W-registers is undefined if scale<5> == 0.
+# Same with FCVTZS and FCVTZU.
+# RUN: echo "0x00 0x00 0x02 0x1e" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+# RUN: echo "0x00 0x00 0x18 0x1e" | llvm-mc -triple=arm64 -disassemble 2>&1 | FileCheck %s
+
+# CHECK: invalid instruction encoding
diff --git a/test/MC/Disassembler/AArch64/arm64-bitfield.txt b/test/MC/Disassembler/AArch64/arm64-bitfield.txt
new file mode 100644
index 000000000000..d620cb3b2176
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-bitfield.txt
@@ -0,0 +1,29 @@
+# RUN: llvm-mc -triple arm64-apple-darwin --disassemble < %s | FileCheck %s
+
+#==---------------------------------------------------------------------------==
+# 5.4.4 Bitfield Operations
+#==---------------------------------------------------------------------------==
+
+0x41 0x3c 0x01 0x33
+0x41 0x3c 0x41 0xb3
+0x41 0x3c 0x01 0x13
+0x41 0x3c 0x41 0x93
+0x41 0x3c 0x01 0x53
+0x41 0x3c 0x41 0xd3
+
+# CHECK: bfxil w1, w2, #1, #15
+# CHECK: bfxil x1, x2, #1, #15
+# CHECK: sbfx w1, w2, #1, #15
+# CHECK: sbfx x1, x2, #1, #15
+# CHECK: ubfx w1, w2, #1, #15
+# CHECK: ubfx x1, x2, #1, #15
+
+#==---------------------------------------------------------------------------==
+# 5.4.5 Extract (immediate)
+#==---------------------------------------------------------------------------==
+
+0x41 0x3c 0x83 0x13
+0x62 0x04 0xc4 0x93
+
+# CHECK: extr w1, w2, w3, #15
+# CHECK: extr x2, x3, x4, #1
diff --git a/test/MC/Disassembler/AArch64/arm64-branch.txt b/test/MC/Disassembler/AArch64/arm64-branch.txt
new file mode 100644
index 000000000000..6af1ad886a43
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-branch.txt
@@ -0,0 +1,75 @@
+# RUN: llvm-mc -triple arm64-apple-darwin --disassemble < %s | FileCheck %s
+
+#-----------------------------------------------------------------------------
+# Unconditional branch (register) instructions.
+#-----------------------------------------------------------------------------
+
+ 0xc0 0x03 0x5f 0xd6
+# CHECK: ret
+ 0x20 0x00 0x5f 0xd6
+# CHECK: ret x1
+ 0xe0 0x03 0xbf 0xd6
+# CHECK: drps
+ 0xe0 0x03 0x9f 0xd6
+# CHECK: eret
+ 0xa0 0x00 0x1f 0xd6
+# CHECK: br x5
+ 0x20 0x01 0x3f 0xd6
+# CHECK: blr x9
+ 0x0B 0x00 0x18 0x37
+# CHECK: tbnz w11, #3, #0
+
+#-----------------------------------------------------------------------------
+# Exception generation instructions.
+#-----------------------------------------------------------------------------
+
+ 0x20 0x00 0x20 0xd4
+# CHECK: brk #0x1
+ 0x41 0x00 0xa0 0xd4
+# CHECK: dcps1 #0x2
+ 0x62 0x00 0xa0 0xd4
+# CHECK: dcps2 #0x3
+ 0x83 0x00 0xa0 0xd4
+# CHECK: dcps3 #0x4
+ 0xa0 0x00 0x40 0xd4
+# CHECK: hlt #0x5
+ 0xc2 0x00 0x00 0xd4
+# CHECK: hvc #0x6
+ 0xe3 0x00 0x00 0xd4
+# CHECK: smc #0x7
+ 0x01 0x01 0x00 0xd4
+# CHECK: svc #0x8
+
+#-----------------------------------------------------------------------------
+# PC-relative branches (both positive and negative displacement)
+#-----------------------------------------------------------------------------
+
+ 0x07 0x00 0x00 0x14
+# CHECK: b #28
+ 0x06 0x00 0x00 0x94
+# CHECK: bl #24
+ 0xa1 0x00 0x00 0x54
+# CHECK: b.ne #20
+ 0x80 0x00 0x08 0x36
+# CHECK: tbz w0, #1, #16
+ 0xe1 0xff 0xf7 0x36
+# CHECK: tbz w1, #30, #-4
+ 0x60 0x00 0x08 0x37
+# CHECK: tbnz w0, #1, #12
+ 0x40 0x00 0x00 0xb4
+# CHECK: cbz x0, #8
+ 0x20 0x00 0x00 0xb5
+# CHECK: cbnz x0, #4
+ 0x1f 0x20 0x03 0xd5
+# CHECK: nop
+ 0xff 0xff 0xff 0x17
+# CHECK: b #-4
+ 0xc1 0xff 0xff 0x54
+# CHECK: b.ne #-8
+ 0xa0 0xff 0x0f 0x36
+# CHECK: tbz w0, #1, #-12
+ 0x80 0xff 0xff 0xb4
+# CHECK: cbz x0, #-16
+ 0x1f 0x20 0x03 0xd5
+# CHECK: nop
+
diff --git a/test/MC/Disassembler/AArch64/arm64-canonical-form.txt b/test/MC/Disassembler/AArch64/arm64-canonical-form.txt
new file mode 100644
index 000000000000..1c94b13b4acf
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-canonical-form.txt
@@ -0,0 +1,21 @@
+# RUN: llvm-mc -triple arm64-apple-darwin -mattr=neon --disassemble < %s | FileCheck %s
+
+0x00 0x08 0x00 0xc8
+
+# CHECK: stxr w0, x0, [x0]
+
+0x00 0x00 0x40 0x9b
+
+# CHECK: smulh x0, x0, x0
+
+0x08 0x20 0x21 0x1e
+
+# CHECK: fcmp s0, #0.0
+
+0x1f 0x00 0x00 0x11
+
+# CHECK: mov wsp, w0
+
+0x00 0x7c 0x00 0x13
+
+# CHECK: asr w0, w0, #0
diff --git a/test/MC/Disassembler/AArch64/arm64-crc32.txt b/test/MC/Disassembler/AArch64/arm64-crc32.txt
new file mode 100644
index 000000000000..51717ee28627
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-crc32.txt
@@ -0,0 +1,18 @@
+# RUN: llvm-mc -triple=arm64 -mattr=+crc -disassemble < %s | FileCheck %s
+
+# CHECK: crc32b w5, w7, w20
+# CHECK: crc32h w28, wzr, w30
+# CHECK: crc32w w0, w1, w2
+# CHECK: crc32x w7, w9, x20
+# CHECK: crc32cb w9, w5, w4
+# CHECK: crc32ch w13, w17, w25
+# CHECK: crc32cw wzr, w3, w5
+# CHECK: crc32cx w18, w16, xzr
+0xe5 0x40 0xd4 0x1a
+0xfc 0x47 0xde 0x1a
+0x20 0x48 0xc2 0x1a
+0x27 0x4d 0xd4 0x9a
+0xa9 0x50 0xc4 0x1a
+0x2d 0x56 0xd9 0x1a
+0x7f 0x58 0xc5 0x1a
+0x12 0x5e 0xdf 0x9a
diff --git a/test/MC/Disassembler/AArch64/arm64-crypto.txt b/test/MC/Disassembler/AArch64/arm64-crypto.txt
new file mode 100644
index 000000000000..b905b92c636c
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-crypto.txt
@@ -0,0 +1,47 @@
+# RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto --disassemble < %s | FileCheck %s
+# RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -output-asm-variant=1 --disassemble < %s | FileCheck %s --check-prefix=CHECK-APPLE
+
+ 0x20 0x48 0x28 0x4e
+ 0x20 0x58 0x28 0x4e
+ 0x20 0x68 0x28 0x4e
+ 0x20 0x78 0x28 0x4e
+ 0x20 0x00 0x02 0x5e
+ 0x20 0x10 0x02 0x5e
+ 0x20 0x20 0x02 0x5e
+ 0x20 0x30 0x02 0x5e
+ 0x20 0x40 0x02 0x5e
+ 0x20 0x50 0x02 0x5e
+ 0x20 0x60 0x02 0x5e
+ 0x20 0x08 0x28 0x5e
+ 0x20 0x18 0x28 0x5e
+ 0x20 0x28 0x28 0x5e
+
+# CHECK: aese v0.16b, v1.16b
+# CHECK: aesd v0.16b, v1.16b
+# CHECK: aesmc v0.16b, v1.16b
+# CHECK: aesimc v0.16b, v1.16b
+# CHECK: sha1c q0, s1, v2.4s
+# CHECK: sha1p q0, s1, v2.4s
+# CHECK: sha1m q0, s1, v2.4s
+# CHECK: sha1su0 v0.4s, v1.4s, v2
+# CHECK: sha256h q0, q1, v2.4s
+# CHECK: sha256h2 q0, q1, v2.4s
+# CHECK: sha256su1 v0.4s, v1.4s, v2.4s
+# CHECK: sha1h s0, s1
+# CHECK: sha1su1 v0.4s, v1.4s
+# CHECK: sha256su0 v0.4s, v1.4s
+
+# CHECK-APPLE: aese.16b v0, v1
+# CHECK-APPLE: aesd.16b v0, v1
+# CHECK-APPLE: aesmc.16b v0, v1
+# CHECK-APPLE: aesimc.16b v0, v1
+# CHECK-APPLE: sha1c.4s q0, s1, v2
+# CHECK-APPLE: sha1p.4s q0, s1, v2
+# CHECK-APPLE: sha1m.4s q0, s1, v2
+# CHECK-APPLE: sha1su0.4s v0, v1, v2
+# CHECK-APPLE: sha256h.4s q0, q1, v2
+# CHECK-APPLE: sha256h2.4s q0, q1, v2
+# CHECK-APPLE: sha256su1.4s v0, v1, v2
+# CHECK-APPLE: sha1h s0, s1
+# CHECK-APPLE: sha1su1.4s v0, v1
+# CHECK-APPLE: sha256su0.4s v0, v1
diff --git a/test/MC/Disassembler/AArch64/arm64-invalid-logical.txt b/test/MC/Disassembler/AArch64/arm64-invalid-logical.txt
new file mode 100644
index 000000000000..8a4ecb664ed9
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-invalid-logical.txt
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -triple arm64-apple-darwin -disassemble < %s 2>&1 | FileCheck %s
+
+# rdar://15226511
+0x7b 0xbf 0x25 0x72
+# CHECK: invalid instruction encoding
+# CHECK-NEXT: 0x7b 0xbf 0x25 0x72
diff --git a/test/MC/Disassembler/AArch64/arm64-logical.txt b/test/MC/Disassembler/AArch64/arm64-logical.txt
new file mode 100644
index 000000000000..e3cb3ebe7e00
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-logical.txt
@@ -0,0 +1,223 @@
+# RUN: llvm-mc -triple arm64-apple-darwin --disassemble < %s | FileCheck %s
+
+#==---------------------------------------------------------------------------==
+# 5.4.2 Logical (immediate)
+#==---------------------------------------------------------------------------==
+
+0x00 0x00 0x00 0x12
+0x00 0x00 0x40 0x92
+0x41 0x0c 0x00 0x12
+0x41 0x0c 0x40 0x92
+0xbf 0xec 0x7c 0x92
+0x00 0x00 0x00 0x72
+0x00 0x00 0x40 0xf2
+0x41 0x0c 0x00 0x72
+0x41 0x0c 0x40 0xf2
+0x5f 0x0c 0x40 0xf2
+
+# CHECK: and w0, w0, #0x1
+# CHECK: and x0, x0, #0x1
+# CHECK: and w1, w2, #0xf
+# CHECK: and x1, x2, #0xf
+# CHECK: and sp, x5, #0xfffffffffffffff0
+# CHECK: ands w0, w0, #0x1
+# CHECK: ands x0, x0, #0x1
+# CHECK: ands w1, w2, #0xf
+# CHECK: ands x1, x2, #0xf
+# CHECK: tst x2, #0xf
+
+0x41 0x00 0x12 0x52
+0x41 0x00 0x71 0xd2
+0x5f 0x00 0x71 0xd2
+
+# CHECK: eor w1, w2, #0x4000
+# CHECK: eor x1, x2, #0x8000
+# CHECK: eor sp, x2, #0x8000
+
+0x41 0x00 0x12 0x32
+0x41 0x00 0x71 0xb2
+0x5f 0x00 0x71 0xb2
+
+# CHECK: orr w1, w2, #0x4000
+# CHECK: orr x1, x2, #0x8000
+# CHECK: orr sp, x2, #0x8000
+
+#==---------------------------------------------------------------------------==
+# 5.5.3 Logical (shifted register)
+#==---------------------------------------------------------------------------==
+
+0x41 0x00 0x03 0x0a
+0x41 0x00 0x03 0x8a
+0x41 0x08 0x03 0x0a
+0x41 0x08 0x03 0x8a
+0x41 0x08 0x43 0x0a
+0x41 0x08 0x43 0x8a
+0x41 0x08 0x83 0x0a
+0x41 0x08 0x83 0x8a
+0x41 0x08 0xc3 0x0a
+0x41 0x08 0xc3 0x8a
+
+# CHECK: and w1, w2, w3
+# CHECK: and x1, x2, x3
+# CHECK: and w1, w2, w3, lsl #2
+# CHECK: and x1, x2, x3, lsl #2
+# CHECK: and w1, w2, w3, lsr #2
+# CHECK: and x1, x2, x3, lsr #2
+# CHECK: and w1, w2, w3, asr #2
+# CHECK: and x1, x2, x3, asr #2
+# CHECK: and w1, w2, w3, ror #2
+# CHECK: and x1, x2, x3, ror #2
+
+0x41 0x00 0x03 0x6a
+0x41 0x00 0x03 0xea
+0x41 0x08 0x03 0x6a
+0x41 0x08 0x03 0xea
+0x41 0x08 0x43 0x6a
+0x41 0x08 0x43 0xea
+0x41 0x08 0x83 0x6a
+0x41 0x08 0x83 0xea
+0x41 0x08 0xc3 0x6a
+0x41 0x08 0xc3 0xea
+
+# CHECK: ands w1, w2, w3
+# CHECK: ands x1, x2, x3
+# CHECK: ands w1, w2, w3, lsl #2
+# CHECK: ands x1, x2, x3, lsl #2
+# CHECK: ands w1, w2, w3, lsr #2
+# CHECK: ands x1, x2, x3, lsr #2
+# CHECK: ands w1, w2, w3, asr #2
+# CHECK: ands x1, x2, x3, asr #2
+# CHECK: ands w1, w2, w3, ror #2
+# CHECK: ands x1, x2, x3, ror #2
+
+0x41 0x00 0x23 0x0a
+0x41 0x00 0x23 0x8a
+0x41 0x0c 0x23 0x0a
+0x41 0x0c 0x23 0x8a
+0x41 0x0c 0x63 0x0a
+0x41 0x0c 0x63 0x8a
+0x41 0x0c 0xa3 0x0a
+0x41 0x0c 0xa3 0x8a
+0x41 0x0c 0xe3 0x0a
+0x41 0x0c 0xe3 0x8a
+
+# CHECK: bic w1, w2, w3
+# CHECK: bic x1, x2, x3
+# CHECK: bic w1, w2, w3, lsl #3
+# CHECK: bic x1, x2, x3, lsl #3
+# CHECK: bic w1, w2, w3, lsr #3
+# CHECK: bic x1, x2, x3, lsr #3
+# CHECK: bic w1, w2, w3, asr #3
+# CHECK: bic x1, x2, x3, asr #3
+# CHECK: bic w1, w2, w3, ror #3
+# CHECK: bic x1, x2, x3, ror #3
+
+0x41 0x00 0x23 0x6a
+0x41 0x00 0x23 0xea
+0x41 0x0c 0x23 0x6a
+0x41 0x0c 0x23 0xea
+0x41 0x0c 0x63 0x6a
+0x41 0x0c 0x63 0xea
+0x41 0x0c 0xa3 0x6a
+0x41 0x0c 0xa3 0xea
+0x41 0x0c 0xe3 0x6a
+0x41 0x0c 0xe3 0xea
+
+# CHECK: bics w1, w2, w3
+# CHECK: bics x1, x2, x3
+# CHECK: bics w1, w2, w3, lsl #3
+# CHECK: bics x1, x2, x3, lsl #3
+# CHECK: bics w1, w2, w3, lsr #3
+# CHECK: bics x1, x2, x3, lsr #3
+# CHECK: bics w1, w2, w3, asr #3
+# CHECK: bics x1, x2, x3, asr #3
+# CHECK: bics w1, w2, w3, ror #3
+# CHECK: bics x1, x2, x3, ror #3
+
+0x41 0x00 0x23 0x4a
+0x41 0x00 0x23 0xca
+0x41 0x10 0x23 0x4a
+0x41 0x10 0x23 0xca
+0x41 0x10 0x63 0x4a
+0x41 0x10 0x63 0xca
+0x41 0x10 0xa3 0x4a
+0x41 0x10 0xa3 0xca
+0x41 0x10 0xe3 0x4a
+0x41 0x10 0xe3 0xca
+
+# CHECK: eon w1, w2, w3
+# CHECK: eon x1, x2, x3
+# CHECK: eon w1, w2, w3, lsl #4
+# CHECK: eon x1, x2, x3, lsl #4
+# CHECK: eon w1, w2, w3, lsr #4
+# CHECK: eon x1, x2, x3, lsr #4
+# CHECK: eon w1, w2, w3, asr #4
+# CHECK: eon x1, x2, x3, asr #4
+# CHECK: eon w1, w2, w3, ror #4
+# CHECK: eon x1, x2, x3, ror #4
+
+0x41 0x00 0x03 0x4a
+0x41 0x00 0x03 0xca
+0x41 0x14 0x03 0x4a
+0x41 0x14 0x03 0xca
+0x41 0x14 0x43 0x4a
+0x41 0x14 0x43 0xca
+0x41 0x14 0x83 0x4a
+0x41 0x14 0x83 0xca
+0x41 0x14 0xc3 0x4a
+0x41 0x14 0xc3 0xca
+
+# CHECK: eor w1, w2, w3
+# CHECK: eor x1, x2, x3
+# CHECK: eor w1, w2, w3, lsl #5
+# CHECK: eor x1, x2, x3, lsl #5
+# CHECK: eor w1, w2, w3, lsr #5
+# CHECK: eor x1, x2, x3, lsr #5
+# CHECK: eor w1, w2, w3, asr #5
+# CHECK: eor x1, x2, x3, asr #5
+# CHECK: eor w1, w2, w3, ror #5
+# CHECK: eor x1, x2, x3, ror #5
+
+0x41 0x00 0x03 0x2a
+0x41 0x00 0x03 0xaa
+0x41 0x18 0x03 0x2a
+0x41 0x18 0x03 0xaa
+0x41 0x18 0x43 0x2a
+0x41 0x18 0x43 0xaa
+0x41 0x18 0x83 0x2a
+0x41 0x18 0x83 0xaa
+0x41 0x18 0xc3 0x2a
+0x41 0x18 0xc3 0xaa
+
+# CHECK: orr w1, w2, w3
+# CHECK: orr x1, x2, x3
+# CHECK: orr w1, w2, w3, lsl #6
+# CHECK: orr x1, x2, x3, lsl #6
+# CHECK: orr w1, w2, w3, lsr #6
+# CHECK: orr x1, x2, x3, lsr #6
+# CHECK: orr w1, w2, w3, asr #6
+# CHECK: orr x1, x2, x3, asr #6
+# CHECK: orr w1, w2, w3, ror #6
+# CHECK: orr x1, x2, x3, ror #6
+
+0x41 0x00 0x23 0x2a
+0x41 0x00 0x23 0xaa
+0x41 0x1c 0x23 0x2a
+0x41 0x1c 0x23 0xaa
+0x41 0x1c 0x63 0x2a
+0x41 0x1c 0x63 0xaa
+0x41 0x1c 0xa3 0x2a
+0x41 0x1c 0xa3 0xaa
+0x41 0x1c 0xe3 0x2a
+0x41 0x1c 0xe3 0xaa
+
+# CHECK: orn w1, w2, w3
+# CHECK: orn x1, x2, x3
+# CHECK: orn w1, w2, w3, lsl #7
+# CHECK: orn x1, x2, x3, lsl #7
+# CHECK: orn w1, w2, w3, lsr #7
+# CHECK: orn x1, x2, x3, lsr #7
+# CHECK: orn w1, w2, w3, asr #7
+# CHECK: orn x1, x2, x3, asr #7
+# CHECK: orn w1, w2, w3, ror #7
+# CHECK: orn x1, x2, x3, ror #7
diff --git a/test/MC/Disassembler/AArch64/arm64-memory.txt b/test/MC/Disassembler/AArch64/arm64-memory.txt
new file mode 100644
index 000000000000..54556a10b8a8
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-memory.txt
@@ -0,0 +1,564 @@
+# RUN: llvm-mc --disassemble -triple arm64-apple-darwin < %s | FileCheck %s
+
+#-----------------------------------------------------------------------------
+# Indexed loads
+#-----------------------------------------------------------------------------
+
+ 0x85 0x14 0x40 0xb9
+ 0x64 0x00 0x40 0xf9
+ 0xe2 0x13 0x40 0xf9
+ 0xe5 0x07 0x40 0x3d
+ 0xe6 0x07 0x40 0x7d
+ 0xe7 0x07 0x40 0xbd
+ 0xe8 0x07 0x40 0xfd
+ 0xe9 0x07 0xc0 0x3d
+ 0x64 0x00 0x40 0x39
+ 0x20 0x78 0xa0 0xb8
+ 0x85 0x50 0x40 0x39
+
+# CHECK: ldr w5, [x4, #20]
+# CHECK: ldr x4, [x3]
+# CHECK: ldr x2, [sp, #32]
+# CHECK: ldr b5, [sp, #1]
+# CHECK: ldr h6, [sp, #2]
+# CHECK: ldr s7, [sp, #4]
+# CHECK: ldr d8, [sp, #8]
+# CHECK: ldr q9, [sp, #16]
+# CHECK: ldrb w4, [x3]
+# CHECK: ldrsw x0, [x1, x0, lsl #2]
+# CHECK: ldrb w5, [x4, #20]
+# CHECK: ldrsb w9, [x3]
+# CHECK: ldrsb x2, [sp, #128]
+# CHECK: ldrh w2, [sp, #32]
+# CHECK: ldrsh w3, [sp, #32]
+# CHECK: ldrsh x5, [x9, #24]
+# CHECK: ldrsw x9, [sp, #512]
+# CHECK: prfm pldl3strm, [sp, #32]
+
+ 0x69 0x00 0xc0 0x39
+ 0xe2 0x03 0x82 0x39
+ 0xe2 0x43 0x40 0x79
+ 0xe3 0x43 0xc0 0x79
+ 0x25 0x31 0x80 0x79
+ 0xe9 0x03 0x82 0xb9
+ 0xe5 0x13 0x80 0xf9
+ 0x40 0x00 0x80 0xf9
+ 0x41 0x00 0x80 0xf9
+ 0x42 0x00 0x80 0xf9
+ 0x43 0x00 0x80 0xf9
+ 0x44 0x00 0x80 0xf9
+ 0x45 0x00 0x80 0xf9
+ 0x50 0x00 0x80 0xf9
+ 0x51 0x00 0x80 0xf9
+ 0x52 0x00 0x80 0xf9
+ 0x53 0x00 0x80 0xf9
+ 0x54 0x00 0x80 0xf9
+ 0x55 0x00 0x80 0xf9
+
+# CHECK: prfm pldl1keep, [x2]
+# CHECK: prfm pldl1strm, [x2]
+# CHECK: prfm pldl2keep, [x2]
+# CHECK: prfm pldl2strm, [x2]
+# CHECK: prfm pldl3keep, [x2]
+# CHECK: prfm pldl3strm, [x2]
+# CHECK: prfm pstl1keep, [x2]
+# CHECK: prfm pstl1strm, [x2]
+# CHECK: prfm pstl2keep, [x2]
+# CHECK: prfm pstl2strm, [x2]
+# CHECK: prfm pstl3keep, [x2]
+# CHECK: prfm pstl3strm, [x2]
+
+#-----------------------------------------------------------------------------
+# Indexed stores
+#-----------------------------------------------------------------------------
+
+ 0x64 0x00 0x00 0xf9
+ 0xe2 0x13 0x00 0xf9
+ 0x85 0x14 0x00 0xb9
+ 0xe5 0x07 0x00 0x3d
+ 0xe6 0x07 0x00 0x7d
+ 0xe7 0x07 0x00 0xbd
+ 0xe8 0x07 0x00 0xfd
+ 0xe9 0x07 0x80 0x3d
+ 0x64 0x00 0x00 0x39
+ 0x85 0x50 0x00 0x39
+ 0xe2 0x43 0x00 0x79
+ 0x00 0xe8 0x20 0x38
+ 0x00 0x48 0x20 0x38
+
+# CHECK: str x4, [x3]
+# CHECK: str x2, [sp, #32]
+# CHECK: str w5, [x4, #20]
+# CHECK: str b5, [sp, #1]
+# CHECK: str h6, [sp, #2]
+# CHECK: str s7, [sp, #4]
+# CHECK: str d8, [sp, #8]
+# CHECK: str q9, [sp, #16]
+# CHECK: strb w4, [x3]
+# CHECK: strb w5, [x4, #20]
+# CHECK: strh w2, [sp, #32]
+# CHECK: strb w0, [x0, x0, sxtx]
+# CHECK: strb w0, [x0, w0, uxtw]
+
+#-----------------------------------------------------------------------------
+# Unscaled immediate loads and stores
+#-----------------------------------------------------------------------------
+
+ 0x62 0x00 0x40 0xb8
+ 0xe2 0x83 0x41 0xb8
+ 0x62 0x00 0x40 0xf8
+ 0xe2 0x83 0x41 0xf8
+ 0xe5 0x13 0x40 0x3c
+ 0xe6 0x23 0x40 0x7c
+ 0xe7 0x43 0x40 0xbc
+ 0xe8 0x83 0x40 0xfc
+ 0xe9 0x03 0xc1 0x3c
+ 0x69 0x00 0xc0 0x38
+ 0xe2 0x03 0x88 0x38
+ 0xe3 0x03 0xc2 0x78
+ 0x25 0x81 0x81 0x78
+ 0xe9 0x03 0x98 0xb8
+
+# CHECK: ldur w2, [x3]
+# CHECK: ldur w2, [sp, #24]
+# CHECK: ldur x2, [x3]
+# CHECK: ldur x2, [sp, #24]
+# CHECK: ldur b5, [sp, #1]
+# CHECK: ldur h6, [sp, #2]
+# CHECK: ldur s7, [sp, #4]
+# CHECK: ldur d8, [sp, #8]
+# CHECK: ldur q9, [sp, #16]
+# CHECK: ldursb w9, [x3]
+# CHECK: ldursb x2, [sp, #128]
+# CHECK: ldursh w3, [sp, #32]
+# CHECK: ldursh x5, [x9, #24]
+# CHECK: ldursw x9, [sp, #-128]
+
+ 0x64 0x00 0x00 0xb8
+ 0xe2 0x03 0x02 0xb8
+ 0x64 0x00 0x00 0xf8
+ 0xe2 0x03 0x02 0xf8
+ 0x85 0x40 0x01 0xb8
+ 0xe5 0x13 0x00 0x3c
+ 0xe6 0x23 0x00 0x7c
+ 0xe7 0x43 0x00 0xbc
+ 0xe8 0x83 0x00 0xfc
+ 0xe9 0x03 0x81 0x3c
+ 0x64 0x00 0x00 0x38
+ 0x85 0x40 0x01 0x38
+ 0xe2 0x03 0x02 0x78
+ 0xe5 0x03 0x82 0xf8
+
+# CHECK: stur w4, [x3]
+# CHECK: stur w2, [sp, #32]
+# CHECK: stur x4, [x3]
+# CHECK: stur x2, [sp, #32]
+# CHECK: stur w5, [x4, #20]
+# CHECK: stur b5, [sp, #1]
+# CHECK: stur h6, [sp, #2]
+# CHECK: stur s7, [sp, #4]
+# CHECK: stur d8, [sp, #8]
+# CHECK: stur q9, [sp, #16]
+# CHECK: sturb w4, [x3]
+# CHECK: sturb w5, [x4, #20]
+# CHECK: sturh w2, [sp, #32]
+# CHECK: prfum pldl3strm, [sp, #32]
+
+#-----------------------------------------------------------------------------
+# Unprivileged loads and stores
+#-----------------------------------------------------------------------------
+
+ 0x83 0x08 0x41 0xb8
+ 0x83 0x08 0x41 0xf8
+ 0x83 0x08 0x41 0x38
+ 0x69 0x08 0xc0 0x38
+ 0xe2 0x0b 0x88 0x38
+ 0x83 0x08 0x41 0x78
+ 0xe3 0x0b 0xc2 0x78
+ 0x25 0x89 0x81 0x78
+ 0xe9 0x0b 0x98 0xb8
+
+# CHECK: ldtr w3, [x4, #16]
+# CHECK: ldtr x3, [x4, #16]
+# CHECK: ldtrb w3, [x4, #16]
+# CHECK: ldtrsb w9, [x3]
+# CHECK: ldtrsb x2, [sp, #128]
+# CHECK: ldtrh w3, [x4, #16]
+# CHECK: ldtrsh w3, [sp, #32]
+# CHECK: ldtrsh x5, [x9, #24]
+# CHECK: ldtrsw x9, [sp, #-128]
+
+ 0x85 0x48 0x01 0xb8
+ 0x64 0x08 0x00 0xf8
+ 0xe2 0x0b 0x02 0xf8
+ 0x64 0x08 0x00 0x38
+ 0x85 0x48 0x01 0x38
+ 0xe2 0x0b 0x02 0x78
+
+# CHECK: sttr w5, [x4, #20]
+# CHECK: sttr x4, [x3]
+# CHECK: sttr x2, [sp, #32]
+# CHECK: sttrb w4, [x3]
+# CHECK: sttrb w5, [x4, #20]
+# CHECK: sttrh w2, [sp, #32]
+
+#-----------------------------------------------------------------------------
+# Pre-indexed loads and stores
+#-----------------------------------------------------------------------------
+
+ 0xfd 0x8c 0x40 0xf8
+ 0xfe 0x8c 0x40 0xf8
+ 0x05 0x1c 0x40 0x3c
+ 0x06 0x2c 0x40 0x7c
+ 0x07 0x4c 0x40 0xbc
+ 0x08 0x8c 0x40 0xfc
+ 0x09 0x0c 0xc1 0x3c
+
+# CHECK: ldr x29, [x7, #8]!
+# CHECK: ldr x30, [x7, #8]!
+# CHECK: ldr b5, [x0, #1]!
+# CHECK: ldr h6, [x0, #2]!
+# CHECK: ldr s7, [x0, #4]!
+# CHECK: ldr d8, [x0, #8]!
+# CHECK: ldr q9, [x0, #16]!
+
+ 0xfe 0x8c 0x1f 0xf8
+ 0xfd 0x8c 0x1f 0xf8
+ 0x05 0xfc 0x1f 0x3c
+ 0x06 0xec 0x1f 0x7c
+ 0x07 0xcc 0x1f 0xbc
+ 0x08 0x8c 0x1f 0xfc
+ 0x09 0x0c 0x9f 0x3c
+
+# CHECK: str x30, [x7, #-8]!
+# CHECK: str x29, [x7, #-8]!
+# CHECK: str b5, [x0, #-1]!
+# CHECK: str h6, [x0, #-2]!
+# CHECK: str s7, [x0, #-4]!
+# CHECK: str d8, [x0, #-8]!
+# CHECK: str q9, [x0, #-16]!
+
+#-----------------------------------------------------------------------------
+# post-indexed loads and stores
+#-----------------------------------------------------------------------------
+
+ 0xfe 0x84 0x1f 0xf8
+ 0xfd 0x84 0x1f 0xf8
+ 0x05 0xf4 0x1f 0x3c
+ 0x06 0xe4 0x1f 0x7c
+ 0x07 0xc4 0x1f 0xbc
+ 0x08 0x84 0x1f 0xfc
+ 0x09 0x04 0x9f 0x3c
+
+# CHECK: str x30, [x7], #-8
+# CHECK: str x29, [x7], #-8
+# CHECK: str b5, [x0], #-1
+# CHECK: str h6, [x0], #-2
+# CHECK: str s7, [x0], #-4
+# CHECK: str d8, [x0], #-8
+# CHECK: str q9, [x0], #-16
+
+ 0xfd 0x84 0x40 0xf8
+ 0xfe 0x84 0x40 0xf8
+ 0x05 0x14 0x40 0x3c
+ 0x06 0x24 0x40 0x7c
+ 0x07 0x44 0x40 0xbc
+ 0x08 0x84 0x40 0xfc
+ 0x09 0x04 0xc1 0x3c
+
+# CHECK: ldr x29, [x7], #8
+# CHECK: ldr x30, [x7], #8
+# CHECK: ldr b5, [x0], #1
+# CHECK: ldr h6, [x0], #2
+# CHECK: ldr s7, [x0], #4
+# CHECK: ldr d8, [x0], #8
+# CHECK: ldr q9, [x0], #16
+
+#-----------------------------------------------------------------------------
+# Load/Store pair (indexed offset)
+#-----------------------------------------------------------------------------
+
+ 0xe3 0x09 0x42 0x29
+ 0xe4 0x27 0x7f 0xa9
+ 0xc2 0x0d 0x42 0x69
+ 0xe2 0x0f 0x7e 0x69
+ 0x4a 0x04 0x48 0x2d
+ 0x4a 0x04 0x40 0x6d
+
+# CHECK: ldp w3, w2, [x15, #16]
+# CHECK: ldp x4, x9, [sp, #-16]
+# CHECK: ldpsw x2, x3, [x14, #16]
+# CHECK: ldpsw x2, x3, [sp, #-16]
+# CHECK: ldp s10, s1, [x2, #64]
+# CHECK: ldp d10, d1, [x2]
+
+ 0xe3 0x09 0x02 0x29
+ 0xe4 0x27 0x3f 0xa9
+ 0x4a 0x04 0x08 0x2d
+ 0x4a 0x04 0x00 0x6d
+
+# CHECK: stp w3, w2, [x15, #16]
+# CHECK: stp x4, x9, [sp, #-16]
+# CHECK: stp s10, s1, [x2, #64]
+# CHECK: stp d10, d1, [x2]
+
+#-----------------------------------------------------------------------------
+# Load/Store pair (pre-indexed)
+#-----------------------------------------------------------------------------
+
+ 0xe3 0x09 0xc2 0x29
+ 0xe4 0x27 0xff 0xa9
+ 0xc2 0x0d 0xc2 0x69
+ 0xe2 0x0f 0xfe 0x69
+ 0x4a 0x04 0xc8 0x2d
+ 0x4a 0x04 0xc1 0x6d
+
+# CHECK: ldp w3, w2, [x15, #16]!
+# CHECK: ldp x4, x9, [sp, #-16]!
+# CHECK: ldpsw x2, x3, [x14, #16]!
+# CHECK: ldpsw x2, x3, [sp, #-16]!
+# CHECK: ldp s10, s1, [x2, #64]!
+# CHECK: ldp d10, d1, [x2, #16]!
+
+ 0xe3 0x09 0x82 0x29
+ 0xe4 0x27 0xbf 0xa9
+ 0x4a 0x04 0x88 0x2d
+ 0x4a 0x04 0x81 0x6d
+
+# CHECK: stp w3, w2, [x15, #16]!
+# CHECK: stp x4, x9, [sp, #-16]!
+# CHECK: stp s10, s1, [x2, #64]!
+# CHECK: stp d10, d1, [x2, #16]!
+
+#-----------------------------------------------------------------------------
+# Load/Store pair (post-indexed)
+#-----------------------------------------------------------------------------
+
+ 0xe3 0x09 0xc2 0x28
+ 0xe4 0x27 0xff 0xa8
+ 0xc2 0x0d 0xc2 0x68
+ 0xe2 0x0f 0xfe 0x68
+ 0x4a 0x04 0xc8 0x2c
+ 0x4a 0x04 0xc1 0x6c
+
+# CHECK: ldp w3, w2, [x15], #16
+# CHECK: ldp x4, x9, [sp], #-16
+# CHECK: ldpsw x2, x3, [x14], #16
+# CHECK: ldpsw x2, x3, [sp], #-16
+# CHECK: ldp s10, s1, [x2], #64
+# CHECK: ldp d10, d1, [x2], #16
+
+ 0xe3 0x09 0x82 0x28
+ 0xe4 0x27 0xbf 0xa8
+ 0x4a 0x04 0x88 0x2c
+ 0x4a 0x04 0x81 0x6c
+
+# CHECK: stp w3, w2, [x15], #16
+# CHECK: stp x4, x9, [sp], #-16
+# CHECK: stp s10, s1, [x2], #64
+# CHECK: stp d10, d1, [x2], #16
+
+#-----------------------------------------------------------------------------
+# Load/Store pair (no-allocate)
+#-----------------------------------------------------------------------------
+
+ 0xe3 0x09 0x42 0x28
+ 0xe4 0x27 0x7f 0xa8
+ 0x4a 0x04 0x48 0x2c
+ 0x4a 0x04 0x40 0x6c
+
+# CHECK: ldnp w3, w2, [x15, #16]
+# CHECK: ldnp x4, x9, [sp, #-16]
+# CHECK: ldnp s10, s1, [x2, #64]
+# CHECK: ldnp d10, d1, [x2]
+
+ 0xe3 0x09 0x02 0x28
+ 0xe4 0x27 0x3f 0xa8
+ 0x4a 0x04 0x08 0x2c
+ 0x4a 0x04 0x00 0x6c
+
+# CHECK: stnp w3, w2, [x15, #16]
+# CHECK: stnp x4, x9, [sp, #-16]
+# CHECK: stnp s10, s1, [x2, #64]
+# CHECK: stnp d10, d1, [x2]
+
+#-----------------------------------------------------------------------------
+# Load/Store register offset
+#-----------------------------------------------------------------------------
+
+ 0x00 0x68 0x60 0xb8
+ 0x00 0x78 0x60 0xb8
+ 0x00 0x68 0x60 0xf8
+ 0x00 0x78 0x60 0xf8
+ 0x00 0xe8 0x60 0xf8
+
+# CHECK: ldr w0, [x0, x0]
+# CHECK: ldr w0, [x0, x0, lsl #2]
+# CHECK: ldr x0, [x0, x0]
+# CHECK: ldr x0, [x0, x0, lsl #3]
+# CHECK: ldr x0, [x0, x0, sxtx]
+
+ 0x21 0x68 0x62 0x3c
+ 0x21 0x78 0x62 0x3c
+ 0x21 0x68 0x62 0x7c
+ 0x21 0x78 0x62 0x7c
+ 0x21 0x68 0x62 0xbc
+ 0x21 0x78 0x62 0xbc
+ 0x21 0x68 0x62 0xfc
+ 0x21 0x78 0x62 0xfc
+ 0x21 0x68 0xe2 0x3c
+ 0x21 0x78 0xe2 0x3c
+
+# CHECK: ldr b1, [x1, x2]
+# CHECK: ldr b1, [x1, x2, lsl #0]
+# CHECK: ldr h1, [x1, x2]
+# CHECK: ldr h1, [x1, x2, lsl #1]
+# CHECK: ldr s1, [x1, x2]
+# CHECK: ldr s1, [x1, x2, lsl #2]
+# CHECK: ldr d1, [x1, x2]
+# CHECK: ldr d1, [x1, x2, lsl #3]
+# CHECK: ldr q1, [x1, x2]
+# CHECK: ldr q1, [x1, x2, lsl #4]
+
+ 0x00 0x48 0x20 0x7c
+ 0xe1 0x6b 0x23 0xfc
+ 0xe1 0x5b 0x23 0xfc
+ 0xe1 0x6b 0xa3 0x3c
+ 0xe1 0x5b 0xa3 0x3c
+
+# CHECK: str h0, [x0, w0, uxtw]
+# CHECK: str d1, [sp, x3]
+# CHECK: str d1, [sp, w3, uxtw #3]
+# CHECK: str q1, [sp, x3]
+# CHECK: str q1, [sp, w3, uxtw #4]
+
+#-----------------------------------------------------------------------------
+# Load/Store exclusive
+#-----------------------------------------------------------------------------
+
+ 0x26 0x7c 0x5f 0x08
+ 0x26 0x7c 0x5f 0x48
+ 0x27 0x0d 0x7f 0x88
+ 0x27 0x0d 0x7f 0xc8
+
+# CHECK: ldxrb w6, [x1]
+# CHECK: ldxrh w6, [x1]
+# CHECK: ldxp w7, w3, [x9]
+# CHECK: ldxp x7, x3, [x9]
+
+ 0x64 0x7c 0x01 0xc8
+ 0x64 0x7c 0x01 0x88
+ 0x64 0x7c 0x01 0x08
+ 0x64 0x7c 0x01 0x48
+ 0x22 0x18 0x21 0xc8
+ 0x22 0x18 0x21 0x88
+
+# CHECK: stxr w1, x4, [x3]
+# CHECK: stxr w1, w4, [x3]
+# CHECK: stxrb w1, w4, [x3]
+# CHECK: stxrh w1, w4, [x3]
+# CHECK: stxp w1, x2, x6, [x1]
+# CHECK: stxp w1, w2, w6, [x1]
+
+#-----------------------------------------------------------------------------
+# Load-acquire/Store-release non-exclusive
+#-----------------------------------------------------------------------------
+
+ 0xe4 0xff 0xdf 0x88
+ 0xe4 0xff 0xdf 0xc8
+ 0xe4 0xff 0xdf 0x08
+ 0xe4 0xff 0xdf 0x48
+
+# CHECK: ldar w4, [sp]
+# CHECK: ldar x4, [sp]
+# CHECK: ldarb w4, [sp]
+# CHECK: ldarh w4, [sp]
+
+ 0xc3 0xfc 0x9f 0x88
+ 0xc3 0xfc 0x9f 0xc8
+ 0xc3 0xfc 0x9f 0x08
+ 0xc3 0xfc 0x9f 0x48
+
+# CHECK: stlr w3, [x6]
+# CHECK: stlr x3, [x6]
+# CHECK: stlrb w3, [x6]
+# CHECK: stlrh w3, [x6]
+
+#-----------------------------------------------------------------------------
+# Load-acquire/Store-release exclusive
+#-----------------------------------------------------------------------------
+
+ 0x82 0xfc 0x5f 0x88
+ 0x82 0xfc 0x5f 0xc8
+ 0x82 0xfc 0x5f 0x08
+ 0x82 0xfc 0x5f 0x48
+ 0x22 0x98 0x7f 0x88
+ 0x22 0x98 0x7f 0xc8
+
+# CHECK: ldaxr w2, [x4]
+# CHECK: ldaxr x2, [x4]
+# CHECK: ldaxrb w2, [x4]
+# CHECK: ldaxrh w2, [x4]
+# CHECK: ldaxp w2, w6, [x1]
+# CHECK: ldaxp x2, x6, [x1]
+
+ 0x27 0xfc 0x08 0xc8
+ 0x27 0xfc 0x08 0x88
+ 0x27 0xfc 0x08 0x08
+ 0x27 0xfc 0x08 0x48
+ 0x22 0x98 0x21 0xc8
+ 0x22 0x98 0x21 0x88
+
+# CHECK: stlxr w8, x7, [x1]
+# CHECK: stlxr w8, w7, [x1]
+# CHECK: stlxrb w8, w7, [x1]
+# CHECK: stlxrh w8, w7, [x1]
+# CHECK: stlxp w1, x2, x6, [x1]
+# CHECK: stlxp w1, w2, w6, [x1]
+
+#-----------------------------------------------------------------------------
+# Load/Store with explicit LSL values
+#-----------------------------------------------------------------------------
+ 0x20 0x78 0xa0 0xb8
+ 0x20 0x78 0x60 0xf8
+ 0x20 0x78 0x20 0xf8
+ 0x20 0x78 0x60 0xb8
+ 0x20 0x78 0x20 0xb8
+ 0x20 0x78 0xe0 0x3c
+ 0x20 0x78 0xa0 0x3c
+ 0x20 0x78 0x60 0xfc
+ 0x20 0x78 0x20 0xfc
+ 0x20 0x78 0x60 0xbc
+ 0x20 0x78 0x20 0xbc
+ 0x20 0x78 0x60 0x7c
+ 0x20 0x78 0x60 0x3c
+ 0x20 0x78 0x60 0x38
+ 0x20 0x78 0x20 0x38
+ 0x20 0x78 0xe0 0x38
+ 0x20 0x78 0x60 0x78
+ 0x20 0x78 0x20 0x78
+ 0x20 0x78 0xe0 0x78
+ 0x20 0x78 0xa0 0x38
+ 0x20 0x78 0xa0 0x78
+
+# CHECK: ldrsw x0, [x1, x0, lsl #2]
+# CHECK: ldr x0, [x1, x0, lsl #3]
+# CHECK: str x0, [x1, x0, lsl #3]
+# CHECK: ldr w0, [x1, x0, lsl #2]
+# CHECK: str w0, [x1, x0, lsl #2]
+# CHECK: ldr q0, [x1, x0, lsl #4]
+# CHECK: str q0, [x1, x0, lsl #4]
+# CHECK: ldr d0, [x1, x0, lsl #3]
+# CHECK: str d0, [x1, x0, lsl #3]
+# CHECK: ldr s0, [x1, x0, lsl #2]
+# CHECK: str s0, [x1, x0, lsl #2]
+# CHECK: ldr h0, [x1, x0, lsl #1]
+# CHECK: ldr b0, [x1, x0, lsl #0]
+# CHECK: ldrb w0, [x1, x0, lsl #0]
+# CHECK: strb w0, [x1, x0, lsl #0]
+# CHECK: ldrsb w0, [x1, x0, lsl #0]
+# CHECK: ldrh w0, [x1, x0, lsl #1]
+# CHECK: strh w0, [x1, x0, lsl #1]
+# CHECK: ldrsh w0, [x1, x0, lsl #1]
+# CHECK: ldrsb x0, [x1, x0, lsl #0]
+# CHECK: ldrsh x0, [x1, x0, lsl #1]
diff --git a/test/MC/Disassembler/AArch64/arm64-non-apple-fmov.txt b/test/MC/Disassembler/AArch64/arm64-non-apple-fmov.txt
new file mode 100644
index 000000000000..75cb95ce1863
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-non-apple-fmov.txt
@@ -0,0 +1,7 @@
+# RUN: llvm-mc -triple arm64 -mattr=neon -disassemble < %s | FileCheck %s
+
+0x00 0x00 0xae 0x9e
+0x00 0x00 0xaf 0x9e
+
+# CHECK: fmov x0, v0.d[1]
+# CHECK: fmov v0.d[1], x0
diff --git a/test/MC/Disassembler/AArch64/arm64-scalar-fp.txt b/test/MC/Disassembler/AArch64/arm64-scalar-fp.txt
new file mode 100644
index 000000000000..f139700164ca
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-scalar-fp.txt
@@ -0,0 +1,255 @@
+# RUN: llvm-mc -triple arm64-apple-darwin -mattr=neon --disassemble -output-asm-variant=1 < %s | FileCheck %s
+
+#-----------------------------------------------------------------------------
+# Floating-point arithmetic
+#-----------------------------------------------------------------------------
+
+0x41 0xc0 0x20 0x1e
+0x41 0xc0 0x60 0x1e
+
+# CHECK: fabs s1, s2
+# CHECK: fabs d1, d2
+
+0x41 0x28 0x23 0x1e
+0x41 0x28 0x63 0x1e
+
+# CHECK: fadd s1, s2, s3
+# CHECK: fadd d1, d2, d3
+
+0x41 0x18 0x23 0x1e
+0x41 0x18 0x63 0x1e
+
+# CHECK: fdiv s1, s2, s3
+# CHECK: fdiv d1, d2, d3
+
+0x41 0x10 0x03 0x1f
+0x41 0x10 0x43 0x1f
+
+# CHECK: fmadd s1, s2, s3, s4
+# CHECK: fmadd d1, d2, d3, d4
+
+0x41 0x48 0x23 0x1e
+0x41 0x48 0x63 0x1e
+0x41 0x68 0x23 0x1e
+0x41 0x68 0x63 0x1e
+
+# CHECK: fmax s1, s2, s3
+# CHECK: fmax d1, d2, d3
+# CHECK: fmaxnm s1, s2, s3
+# CHECK: fmaxnm d1, d2, d3
+
+0x41 0x58 0x23 0x1e
+0x41 0x58 0x63 0x1e
+0x41 0x78 0x23 0x1e
+0x41 0x78 0x63 0x1e
+
+# CHECK: fmin s1, s2, s3
+# CHECK: fmin d1, d2, d3
+# CHECK: fminnm s1, s2, s3
+# CHECK: fminnm d1, d2, d3
+
+0x41 0x90 0x03 0x1f
+0x41 0x90 0x43 0x1f
+
+# CHECK: fmsub s1, s2, s3, s4
+# CHECK: fmsub d1, d2, d3, d4
+
+0x41 0x08 0x23 0x1e
+0x41 0x08 0x63 0x1e
+
+# CHECK: fmul s1, s2, s3
+# CHECK: fmul d1, d2, d3
+
+0x41 0x40 0x21 0x1e
+0x41 0x40 0x61 0x1e
+
+# CHECK: fneg s1, s2
+# CHECK: fneg d1, d2
+
+0x41 0x10 0x23 0x1f
+0x41 0x10 0x63 0x1f
+
+# CHECK: fnmadd s1, s2, s3, s4
+# CHECK: fnmadd d1, d2, d3, d4
+
+0x41 0x90 0x23 0x1f
+0x41 0x90 0x63 0x1f
+
+# CHECK: fnmsub s1, s2, s3, s4
+# CHECK: fnmsub d1, d2, d3, d4
+
+0x41 0x88 0x23 0x1e
+0x41 0x88 0x63 0x1e
+
+# CHECK: fnmul s1, s2, s3
+# CHECK: fnmul d1, d2, d3
+
+0x41 0xc0 0x21 0x1e
+0x41 0xc0 0x61 0x1e
+
+# CHECK: fsqrt s1, s2
+# CHECK: fsqrt d1, d2
+
+0x41 0x38 0x23 0x1e
+0x41 0x38 0x63 0x1e
+
+# CHECK: fsub s1, s2, s3
+# CHECK: fsub d1, d2, d3
+
+#-----------------------------------------------------------------------------
+# Floating-point comparison
+#-----------------------------------------------------------------------------
+
+0x20 0x04 0x22 0x1e
+0x20 0x04 0x62 0x1e
+0x30 0x04 0x22 0x1e
+0x30 0x04 0x62 0x1e
+
+# CHECK: fccmp s1, s2, #0, eq
+# CHECK: fccmp d1, d2, #0, eq
+# CHECK: fccmpe s1, s2, #0, eq
+# CHECK: fccmpe d1, d2, #0, eq
+
+0x20 0x20 0x22 0x1e
+0x20 0x20 0x62 0x1e
+0x28 0x20 0x20 0x1e
+0x28 0x20 0x60 0x1e
+0x30 0x20 0x22 0x1e
+0x30 0x20 0x62 0x1e
+0x38 0x20 0x20 0x1e
+0x38 0x20 0x60 0x1e
+
+# CHECK: fcmp s1, s2
+# CHECK: fcmp d1, d2
+# CHECK: fcmp s1, #0.0
+# CHECK: fcmp d1, #0.0
+# CHECK: fcmpe s1, s2
+# CHECK: fcmpe d1, d2
+# CHECK: fcmpe s1, #0.0
+# CHECK: fcmpe d1, #0.0
+
+#-----------------------------------------------------------------------------
+# Floating-point conditional select
+#-----------------------------------------------------------------------------
+
+0x41 0x0c 0x23 0x1e
+0x41 0x0c 0x63 0x1e
+
+# CHECK: fcsel s1, s2, s3, eq
+# CHECK: fcsel d1, d2, d3, eq
+
+#-----------------------------------------------------------------------------
+# Floating-point convert
+#-----------------------------------------------------------------------------
+
+0x41 0xc0 0x63 0x1e
+0x41 0x40 0x62 0x1e
+0x41 0xc0 0xe2 0x1e
+0x41 0x40 0xe2 0x1e
+0x41 0xc0 0x22 0x1e
+0x41 0xc0 0x23 0x1e
+
+# CHECK: fcvt h1, d2
+# CHECK: fcvt s1, d2
+# CHECK: fcvt d1, h2
+# CHECK: fcvt s1, h2
+# CHECK: fcvt d1, s2
+# CHECK: fcvt h1, s2
+
+0x41 0x00 0x44 0x1e
+0x41 0x04 0x44 0x1e
+0x41 0x00 0x44 0x9e
+0x41 0x04 0x44 0x9e
+0x41 0x00 0x04 0x1e
+0x41 0x04 0x04 0x1e
+0x41 0x00 0x04 0x9e
+0x41 0x04 0x04 0x9e
+
+#-----------------------------------------------------------------------------
+# Floating-point move
+#-----------------------------------------------------------------------------
+
+0x41 0x00 0x27 0x1e
+0x41 0x00 0x26 0x1e
+0x41 0x00 0x67 0x9e
+0x41 0x00 0x66 0x9e
+
+# CHECK: fmov s1, w2
+# CHECK: fmov w1, s2
+# CHECK: fmov d1, x2
+# CHECK: fmov x1, d2
+
+0x01 0x10 0x28 0x1e
+0x01 0x10 0x68 0x1e
+0x01 0xf0 0x7b 0x1e
+0x01 0xf0 0x6b 0x1e
+
+# CHECK: fmov s1, #0.12500000
+# CHECK: fmov d1, #0.12500000
+# CHECK: fmov d1, #-0.48437500
+# CHECK: fmov d1, #0.48437500
+
+0x41 0x40 0x20 0x1e
+0x41 0x40 0x60 0x1e
+
+# CHECK: fmov s1, s2
+# CHECK: fmov d1, d2
+
+#-----------------------------------------------------------------------------
+# Floating-point round to integral
+#-----------------------------------------------------------------------------
+
+0x41 0x40 0x26 0x1e
+0x41 0x40 0x66 0x1e
+
+# CHECK: frinta s1, s2
+# CHECK: frinta d1, d2
+
+0x41 0xc0 0x27 0x1e
+0x41 0xc0 0x67 0x1e
+
+# CHECK: frinti s1, s2
+# CHECK: frinti d1, d2
+
+0x41 0x40 0x25 0x1e
+0x41 0x40 0x65 0x1e
+
+# CHECK: frintm s1, s2
+# CHECK: frintm d1, d2
+
+0x41 0x40 0x24 0x1e
+0x41 0x40 0x64 0x1e
+
+# CHECK: frintn s1, s2
+# CHECK: frintn d1, d2
+
+0x41 0xc0 0x24 0x1e
+0x41 0xc0 0x64 0x1e
+
+# CHECK: frintp s1, s2
+# CHECK: frintp d1, d2
+
+0x41 0x40 0x27 0x1e
+0x41 0x40 0x67 0x1e
+
+# CHECK: frintx s1, s2
+# CHECK: frintx d1, d2
+
+0x41 0xc0 0x25 0x1e
+0x41 0xc0 0x65 0x1e
+
+# CHECK: frintz s1, s2
+# CHECK: frintz d1, d2
+
+ 0x00 0x3c 0xe0 0x7e
+ 0x00 0x8c 0xe0 0x5e
+
+# CHECK: cmhs d0, d0, d0
+# CHECK: cmtst d0, d0, d0
+
+0x00 0x00 0xaf 0x9e
+0x00 0x00 0xae 0x9e
+
+# CHECK: fmov.d v0[1], x0
+# CHECK: fmov.d x0, v0[1]
+
diff --git a/test/MC/Disassembler/AArch64/arm64-system.txt b/test/MC/Disassembler/AArch64/arm64-system.txt
new file mode 100644
index 000000000000..9027a60dd30d
--- /dev/null
+++ b/test/MC/Disassembler/AArch64/arm64-system.txt
@@ -0,0 +1,62 @@
+# RUN: llvm-mc -triple arm64-apple-darwin --disassemble < %s | FileCheck %s
+
+
+#-----------------------------------------------------------------------------
+# Hint encodings
+#-----------------------------------------------------------------------------
+
+ 0x1f 0x20 0x03 0xd5
+# CHECK: nop
+ 0x9f 0x20 0x03 0xd5
+# CHECK: sev
+ 0xbf 0x20 0x03 0xd5
+# CHECK: sevl
+ 0x5f 0x20 0x03 0xd5
+# CHECK: wfe
+ 0x7f 0x20 0x03 0xd5
+# CHECK: wfi
+ 0x3f 0x20 0x03 0xd5
+# CHECK: yield
+
+#-----------------------------------------------------------------------------
+# Single-immediate operand instructions
+#-----------------------------------------------------------------------------
+
+ 0x5f 0x3a 0x03 0xd5
+# CHECK: clrex #10
+ 0xdf 0x3f 0x03 0xd5
+# CHECK: isb{{$}}
+ 0xdf 0x31 0x03 0xd5
+# CHECK: isb #1
+ 0xbf 0x33 0x03 0xd5
+# CHECK: dmb osh
+ 0x9f 0x37 0x03 0xd5
+# CHECK: dsb nsh
+ 0x3f 0x76 0x08 0xd5
+# CHECK: dc ivac
+
+#-----------------------------------------------------------------------------
+# Generic system instructions
+#-----------------------------------------------------------------------------
+ 0xff 0x05 0x0a 0xd5
+ 0xe7 0x6a 0x0f 0xd5
+ 0xf4 0x3f 0x2e 0xd5
+ 0xbf 0x40 0x00 0xd5
+ 0x00 0xb0 0x18 0xd5
+ 0x00 0xb0 0x38 0xd5
+
+# CHECK: sys #2, c0, c5, #7
+# CHECK: sys #7, c6, c10, #7, x7
+# CHECK: sysl x20, #6, c3, c15, #7
+# CHECK: msr SPSEL, #0
+# CHECK: msr S3_0_C11_C0_0, x0
+# CHECK: mrs x0, S3_0_C11_C0_0
+
+ 0x40 0xc0 0x1e 0xd5
+ 0x40 0xc0 0x1c 0xd5
+ 0x40 0xc0 0x18 0xd5
+
+# CHECK: msr RMR_EL3, x0
+# CHECK: msr RMR_EL2, x0
+# CHECK: msr RMR_EL1, x0
+
diff --git a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
index 40926b1fddc2..23da001accb6 100644
--- a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
+++ b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple=aarch64 -mattr=+fp-armv8 -disassemble < %s | FileCheck %s
+# RUN: llvm-mc -triple=arm64 -mattr=+fp-armv8 -disassemble < %s | FileCheck %s
#------------------------------------------------------------------------------
# Add/sub (immediate)
@@ -187,7 +188,7 @@
# CHECK: sub w3, w5, w7
# CHECK: sub wzr, w3, w5
-# CHECK: sub w20, wzr, w4
+# CHECK: {{sub w20, wzr, w4|neg w20, w4}}
# CHECK: sub w4, w6, wzr
# CHECK: sub w11, w13, w15
# CHECK: sub w9, w3, wzr, lsl #10
@@ -214,7 +215,7 @@
# CHECK: sub x3, x5, x7
# CHECK: sub xzr, x3, x5
-# CHECK: sub x20, xzr, x4
+# CHECK: {{sub x20, xzr, x4|neg x20, x4}}
# CHECK: sub x4, x6, xzr
# CHECK: sub x11, x13, x15
# CHECK: sub x9, x3, xzr, lsl #10
@@ -241,7 +242,7 @@
# CHECK: subs w3, w5, w7
# CHECK: cmp w3, w5
-# CHECK: subs w20, wzr, w4
+# CHECK: {{subs w20, wzr, w4|negs w20, w4}}
# CHECK: subs w4, w6, wzr
# CHECK: subs w11, w13, w15
# CHECK: subs w9, w3, wzr, lsl #10
@@ -268,7 +269,7 @@
# CHECK: subs x3, x5, x7
# CHECK: cmp x3, x5
-# CHECK: subs x20, xzr, x4
+# CHECK: {{subs x20, xzr, x4|negs x20, x4}}
# CHECK: subs x4, x6, xzr
# CHECK: subs x11, x13, x15
# CHECK: subs x9, x3, xzr, lsl #10
@@ -393,18 +394,18 @@
0x9f 0xde 0x95 0xeb
0xdf 0xfe 0x97 0xeb
-# CHECK: sub w29, wzr, w30
-# CHECK: sub w30, wzr, wzr
-# CHECK: sub wzr, wzr, w0
-# CHECK: sub w28, wzr, w27
-# CHECK: sub w26, wzr, w25, lsl #29
-# CHECK: sub w24, wzr, w23, lsl #31
-# CHECK: sub w22, wzr, w21, lsr #0
-# CHECK: sub w20, wzr, w19, lsr #1
-# CHECK: sub w18, wzr, w17, lsr #31
-# CHECK: sub w16, wzr, w15, asr #0
-# CHECK: sub w14, wzr, w13, asr #12
-# CHECK: sub w12, wzr, w11, asr #31
+# CHECK: {{sub w29, wzr|neg w29}}, w30
+# CHECK: {{sub w30, wzr|neg w30}}, wzr
+# CHECK: {{sub wzr, wzr|neg wzr}}, w0
+# CHECK: {{sub w28, wzr|neg w28}}, w27
+# CHECK: {{sub w26, wzr|neg w26}}, w25, lsl #29
+# CHECK: {{sub w24, wzr|neg w24}}, w23, lsl #31
+# CHECK: {{sub w22, wzr|neg w22}}, w21, lsr #0
+# CHECK: {{sub w20, wzr|neg w20}}, w19, lsr #1
+# CHECK: {{sub w18, wzr|neg w18}}, w17, lsr #31
+# CHECK: {{sub w16, wzr|neg w16}}, w15, asr #0
+# CHECK: {{sub w14, wzr|neg w14}}, w13, asr #12
+# CHECK: {{sub w12, wzr|neg w12}}, w11, asr #31
0xfd 0x3 0x1e 0x4b
0xfe 0x3 0x1f 0x4b
0xff 0x3 0x0 0x4b
@@ -418,18 +419,18 @@
0xee 0x33 0x8d 0x4b
0xec 0x7f 0x8b 0x4b
-# CHECK: sub x29, xzr, x30
-# CHECK: sub x30, xzr, xzr
-# CHECK: sub xzr, xzr, x0
-# CHECK: sub x28, xzr, x27
-# CHECK: sub x26, xzr, x25, lsl #29
-# CHECK: sub x24, xzr, x23, lsl #31
-# CHECK: sub x22, xzr, x21, lsr #0
-# CHECK: sub x20, xzr, x19, lsr #1
-# CHECK: sub x18, xzr, x17, lsr #31
-# CHECK: sub x16, xzr, x15, asr #0
-# CHECK: sub x14, xzr, x13, asr #12
-# CHECK: sub x12, xzr, x11, asr #31
+# CHECK: {{sub x29, xzr|neg x29}}, x30
+# CHECK: {{sub x30, xzr|neg x30}}, xzr
+# CHECK: {{sub xzr, xzr|neg xzr}}, x0
+# CHECK: {{sub x28, xzr|neg x28}}, x27
+# CHECK: {{sub x26, xzr|neg x26}}, x25, lsl #29
+# CHECK: {{sub x24, xzr|neg x24}}, x23, lsl #31
+# CHECK: {{sub x22, xzr|neg x22}}, x21, lsr #0
+# CHECK: {{sub x20, xzr|neg x20}}, x19, lsr #1
+# CHECK: {{sub x18, xzr|neg x18}}, x17, lsr #31
+# CHECK: {{sub x16, xzr|neg x16}}, x15, asr #0
+# CHECK: {{sub x14, xzr|neg x14}}, x13, asr #12
+# CHECK: {{sub x12, xzr|neg x12}}, x11, asr #31
0xfd 0x3 0x1e 0xcb
0xfe 0x3 0x1f 0xcb
0xff 0x3 0x0 0xcb
@@ -443,18 +444,18 @@
0xee 0x33 0x8d 0xcb
0xec 0x7f 0x8b 0xcb
-# CHECK: subs w29, wzr, w30
-# CHECK: subs w30, wzr, wzr
+# CHECK: {{subs w29, wzr|negs w29}}, w30
+# CHECK: {{subs w30, wzr|negs w30}}, wzr
# CHECK: cmp wzr, w0
-# CHECK: subs w28, wzr, w27
-# CHECK: subs w26, wzr, w25, lsl #29
-# CHECK: subs w24, wzr, w23, lsl #31
-# CHECK: subs w22, wzr, w21, lsr #0
-# CHECK: subs w20, wzr, w19, lsr #1
-# CHECK: subs w18, wzr, w17, lsr #31
-# CHECK: subs w16, wzr, w15, asr #0
-# CHECK: subs w14, wzr, w13, asr #12
-# CHECK: subs w12, wzr, w11, asr #31
+# CHECK: {{subs w28, wzr|negs w28}}, w27
+# CHECK: {{subs w26, wzr|negs w26}}, w25, lsl #29
+# CHECK: {{subs w24, wzr|negs w24}}, w23, lsl #31
+# CHECK: {{subs w22, wzr|negs w22}}, w21, lsr #0
+# CHECK: {{subs w20, wzr|negs w20}}, w19, lsr #1
+# CHECK: {{subs w18, wzr|negs w18}}, w17, lsr #31
+# CHECK: {{subs w16, wzr|negs w16}}, w15, asr #0
+# CHECK: {{subs w14, wzr|negs w14}}, w13, asr #12
+# CHECK: {{subs w12, wzr|negs w12}}, w11, asr #31
0xfd 0x3 0x1e 0x6b
0xfe 0x3 0x1f 0x6b
0xff 0x3 0x0 0x6b
@@ -468,18 +469,18 @@
0xee 0x33 0x8d 0x6b
0xec 0x7f 0x8b 0x6b
-# CHECK: subs x29, xzr, x30
-# CHECK: subs x30, xzr, xzr
+# CHECK: {{subs x29, xzr|negs x29}}, x30
+# CHECK: {{subs x30, xzr|negs x30}}, xzr
# CHECK: cmp xzr, x0
-# CHECK: subs x28, xzr, x27
-# CHECK: subs x26, xzr, x25, lsl #29
-# CHECK: subs x24, xzr, x23, lsl #31
-# CHECK: subs x22, xzr, x21, lsr #0
-# CHECK: subs x20, xzr, x19, lsr #1
-# CHECK: subs x18, xzr, x17, lsr #31
-# CHECK: subs x16, xzr, x15, asr #0
-# CHECK: subs x14, xzr, x13, asr #12
-# CHECK: subs x12, xzr, x11, asr #31
+# CHECK: {{subs x28, xzr|negs x28}}, x27
+# CHECK: {{subs x26, xzr|negs x26}}, x25, lsl #29
+# CHECK: {{subs x24, xzr|negs x24}}, x23, lsl #31
+# CHECK: {{subs x22, xzr|negs x22}}, x21, lsr #0
+# CHECK: {{subs x20, xzr|negs x20}}, x19, lsr #1
+# CHECK: {{subs x18, xzr|negs x18}}, x17, lsr #31
+# CHECK: {{subs x16, xzr|negs x16}}, x15, asr #0
+# CHECK: {{subs x14, xzr|negs x14}}, x13, asr #12
+# CHECK: {{subs x12, xzr|negs x12}}, x11, asr #31
0xfd 0x3 0x1e 0xeb
0xfe 0x3 0x1f 0xeb
0xff 0x3 0x0 0xeb
@@ -940,53 +941,73 @@
0xe5 0x27 0x86 0xda
0x7 0x35 0x9f 0xda
-# CHECK: csinc w3, wzr, wzr, ne
-# CHECK: csinc x9, xzr, xzr, mi
-# CHECK: csinv w20, wzr, wzr, eq
-# CHECK: csinv x30, xzr, xzr, lt
+# CHECK: cset w3, eq
+# CHECK: cset x9, pl
+# CHECK: csetm w20, ne
+# CHECK: csetm x30, ge
+# "cset w2, nv" and "csetm x3, al" are invalid aliases for these two
+# CHECK: csinc w2, wzr, wzr, al
+# CHECK: csinv x3, xzr, xzr, nv
0xe3 0x17 0x9f 0x1a
0xe9 0x47 0x9f 0x9a
0xf4 0x3 0x9f 0x5a
0xfe 0xb3 0x9f 0xda
-
-# CHECK: csinc w3, w5, w5, le
-# CHECK: csinc wzr, w4, w4, gt
-# CHECK: csinc w9, wzr, wzr, ge
-# CHECK: csinc x3, x5, x5, le
-# CHECK: csinc xzr, x4, x4, gt
-# CHECK: csinc x9, xzr, xzr, ge
+0xe2,0xe7,0x9f,0x1a
+0xe3,0xf3,0x9f,0xda
+
+# CHECK: cinc w3, w5, gt
+# CHECK: cinc wzr, w4, le
+# CHECK: cset w9, lt
+# CHECK: cinc x3, x5, gt
+# CHECK: cinc xzr, x4, le
+# CHECK: cset x9, lt
+# "cinc w5, w6, al" and "cinc x1, x2, nv" are invalid aliases for these two
+# CHECK: csinc w5, w6, w6, nv
+# CHECK: csinc x1, x2, x2, al
0xa3 0xd4 0x85 0x1a
0x9f 0xc4 0x84 0x1a
0xe9 0xa7 0x9f 0x1a
0xa3 0xd4 0x85 0x9a
0x9f 0xc4 0x84 0x9a
0xe9 0xa7 0x9f 0x9a
-
-# CHECK: csinv w3, w5, w5, le
-# CHECK: csinv wzr, w4, w4, gt
-# CHECK: csinv w9, wzr, wzr, ge
-# CHECK: csinv x3, x5, x5, le
-# CHECK: csinv xzr, x4, x4, gt
-# CHECK: csinv x9, xzr, xzr, ge
+0xc5,0xf4,0x86,0x1a
+0x41,0xe4,0x82,0x9a
+
+# CHECK: cinv w3, w5, gt
+# CHECK: cinv wzr, w4, le
+# CHECK: csetm w9, lt
+# CHECK: cinv x3, x5, gt
+# CHECK: cinv xzr, x4, le
+# CHECK: csetm x9, lt
+# "cinv x1, x0, nv" and "cinv w9, w8, al" are invalid aliases for these two
+# CHECK: csinv x1, x0, x0, al
+# CHECK: csinv w9, w8, w8, nv
0xa3 0xd0 0x85 0x5a
0x9f 0xc0 0x84 0x5a
0xe9 0xa3 0x9f 0x5a
0xa3 0xd0 0x85 0xda
0x9f 0xc0 0x84 0xda
0xe9 0xa3 0x9f 0xda
-
-# CHECK: csneg w3, w5, w5, le
-# CHECK: csneg wzr, w4, w4, gt
-# CHECK: csneg w9, wzr, wzr, ge
-# CHECK: csneg x3, x5, x5, le
-# CHECK: csneg xzr, x4, x4, gt
-# CHECK: csneg x9, xzr, xzr, ge
+0x01 0xe0 0x80 0xda
+0x09,0xf1,0x88,0x5a
+
+# CHECK: cneg w3, w5, gt
+# CHECK: cneg wzr, w4, le
+# CHECK: cneg w9, wzr, lt
+# CHECK: cneg x3, x5, gt
+# CHECK: cneg xzr, x4, le
+# CHECK: cneg x9, xzr, lt
+# "cneg x4, x8, nv" and "cneg w5, w6, al" are invalid aliases for these two
+# CHECK: csneg x4, x8, x8, al
+# CHECK: csinv w9, w8, w8, nv
0xa3 0xd4 0x85 0x5a
0x9f 0xc4 0x84 0x5a
0xe9 0xa7 0x9f 0x5a
0xa3 0xd4 0x85 0xda
0x9f 0xc4 0x84 0xda
0xe9 0xa7 0x9f 0xda
+0x04,0xe5,0x88,0xda
+0x09,0xf1,0x88,0x5a
#------------------------------------------------------------------------------
# Data-processing (1 source)
@@ -1243,22 +1264,22 @@
#------------------------------------------------------------------------------
# CHECK: svc #0
-# CHECK: svc #65535
+# CHECK: svc #{{65535|0xffff}}
0x1 0x0 0x0 0xd4
0xe1 0xff 0x1f 0xd4
-# CHECK: hvc #1
-# CHECK: smc #12000
-# CHECK: brk #12
-# CHECK: hlt #123
+# CHECK: hvc #{{1|0x1}}
+# CHECK: smc #{{12000|0x2ee0}}
+# CHECK: brk #{{12|0xc}}
+# CHECK: hlt #{{123|0x7b}}
0x22 0x0 0x0 0xd4
0x3 0xdc 0x5 0xd4
0x80 0x1 0x20 0xd4
0x60 0xf 0x40 0xd4
-# CHECK: dcps1 #42
-# CHECK: dcps2 #9
-# CHECK: dcps3 #1000
+# CHECK: dcps1 #{{42|0x2a}}
+# CHECK: dcps2 #{{9|0x9}}
+# CHECK: dcps3 #{{1000|0x3e8}}
0x41 0x5 0xa0 0xd4
0x22 0x1 0xa0 0xd4
0x3 0x7d 0xa0 0xd4
@@ -1284,9 +1305,9 @@
0xa3 0x3c 0xc7 0x93
0xab 0xfd 0xd1 0x93
-# CHECK: extr x19, x23, x23, #24
-# CHECK: extr x29, xzr, xzr, #63
-# CHECK: extr w9, w13, w13, #31
+# CHECK: ror x19, x23, #24
+# CHECK: ror x29, xzr, #63
+# CHECK: ror w9, w13, #31
0xf3 0x62 0xd7 0x93
0xfd 0xff 0xdf 0x93
0xa9 0x7d 0x8d 0x13
@@ -2353,23 +2374,23 @@
0xec 0xff 0xbf 0x3d
# CHECK: prfm pldl1keep, [sp, #8]
-# CHECK: prfm pldl1strm, [x3, #0]
+# CHECK: prfm pldl1strm, [x3{{(, #0)?}}]
# CHECK: prfm pldl2keep, [x5, #16]
-# CHECK: prfm pldl2strm, [x2, #0]
-# CHECK: prfm pldl3keep, [x5, #0]
-# CHECK: prfm pldl3strm, [x6, #0]
+# CHECK: prfm pldl2strm, [x2{{(, #0)?}}]
+# CHECK: prfm pldl3keep, [x5{{(, #0)?}}]
+# CHECK: prfm pldl3strm, [x6{{(, #0)?}}]
# CHECK: prfm plil1keep, [sp, #8]
-# CHECK: prfm plil1strm, [x3, #0]
+# CHECK: prfm plil1strm, [x3{{(, #0)?}}]
# CHECK: prfm plil2keep, [x5, #16]
-# CHECK: prfm plil2strm, [x2, #0]
-# CHECK: prfm plil3keep, [x5, #0]
-# CHECK: prfm plil3strm, [x6, #0]
+# CHECK: prfm plil2strm, [x2{{(, #0)?}}]
+# CHECK: prfm plil3keep, [x5{{(, #0)?}}]
+# CHECK: prfm plil3strm, [x6{{(, #0)?}}]
# CHECK: prfm pstl1keep, [sp, #8]
-# CHECK: prfm pstl1strm, [x3, #0]
+# CHECK: prfm pstl1strm, [x3{{(, #0)?}}]
# CHECK: prfm pstl2keep, [x5, #16]
-# CHECK: prfm pstl2strm, [x2, #0]
-# CHECK: prfm pstl3keep, [x5, #0]
-# CHECK: prfm pstl3strm, [x6, #0]
+# CHECK: prfm pstl2strm, [x2{{(, #0)?}}]
+# CHECK: prfm pstl3keep, [x5{{(, #0)?}}]
+# CHECK: prfm pstl3strm, [x6{{(, #0)?}}]
0xe0 0x07 0x80 0xf9
0x61 0x00 0x80 0xf9
0xa2 0x08 0x80 0xf9
@@ -2722,15 +2743,15 @@
0xff 0xc7 0x0 0x52
0x30 0xc6 0x1 0x52
-# CHECK: ands wzr, w18, #0xcccccccc
+# CHECK: {{ands wzr,|tst}} w18, #0xcccccccc
# CHECK: ands w19, w20, #0x33333333
# CHECK: ands w21, w22, #0x99999999
0x5f 0xe6 0x2 0x72
0x93 0xe6 0x0 0x72
0xd5 0xe6 0x1 0x72
-# CHECK: ands wzr, w3, #0xaaaaaaaa
-# CHECK: ands wzr, wzr, #0x55555555
+# CHECK: {{ands wzr,|tst}} w3, #0xaaaaaaaa
+# CHECK: {{ands wzr,|tst}} wzr, #0x55555555
0x7f 0xf0 0x1 0x72
0xff 0xf3 0x0 0x72
@@ -2762,15 +2783,15 @@
0xff 0xc7 0x0 0xd2
0x30 0xc6 0x1 0xd2
-# CHECK: ands xzr, x18, #0xcccccccccccccccc
+# CHECK: {{ands xzr,|tst}} x18, #0xcccccccccccccccc
# CHECK: ands x19, x20, #0x3333333333333333
# CHECK: ands x21, x22, #0x9999999999999999
0x5f 0xe6 0x2 0xf2
0x93 0xe6 0x0 0xf2
0xd5 0xe6 0x1 0xf2
-# CHECK: ands xzr, x3, #0xaaaaaaaaaaaaaaaa
-# CHECK: ands xzr, xzr, #0x5555555555555555
+# CHECK: {{ands xzr,|tst}} x3, #0xaaaaaaaaaaaaaaaa
+# CHECK: {{ands xzr,|tst}} xzr, #0x5555555555555555
0x7f 0xf0 0x1 0xf2
0xff 0xf3 0x0 0xf2
@@ -2858,15 +2879,15 @@
# limitation in InstAlias. Lots of the "mov[nz]" instructions should
# be "mov".
-# CHECK: movz w1, #65535
+# CHECK: movz w1, #{{65535|0xffff}}
# CHECK: movz w2, #0, lsl #16
-# CHECK: movn w2, #1234
+# CHECK: movn w2, #{{1234|0x4d2}}
0xe1 0xff 0x9f 0x52
0x2 0x0 0xa0 0x52
0x42 0x9a 0x80 0x12
-# CHECK: movz x2, #1234, lsl #32
-# CHECK: movk xzr, #4321, lsl #48
+# CHECK: movz x2, #{{1234|0x4d2}}, lsl #32
+# CHECK: movk xzr, #{{4321|0x10e1}}, lsl #48
0x42 0x9a 0xc0 0xd2
0x3f 0x1c 0xe2 0xf2
@@ -2906,7 +2927,7 @@
#------------------------------------------------------------------------------
# CHECK: nop
-# CHECK: hint #127
+# CHECK: hint #{{127|0x7f}}
# CHECK: nop
# CHECK: yield
# CHECK: wfe
@@ -2998,9 +3019,9 @@
0xdf 0x3f 0x3 0xd5
0xdf 0x3c 0x3 0xd5
-# CHECK: msr spsel, #0
-# CHECK: msr daifset, #15
-# CHECK: msr daifclr, #12
+# CHECK: msr {{spsel|SPSEL}}, #0
+# CHECK: msr {{daifset|DAIFSET}}, #15
+# CHECK: msr {{daifclr|DAIFCLR}}, #12
0xbf 0x40 0x0 0xd5
0xdf 0x4f 0x3 0xd5
0xff 0x4c 0x3 0xd5
@@ -3014,21 +3035,21 @@
0xe9 0x59 0x2f 0xd5
0x41 0xff 0x28 0xd5
-# CHECK: sys #0, c7, c1, #0, xzr
-# CHECK: sys #0, c7, c5, #0, xzr
-# CHECK: sys #3, c7, c5, #1, x9
+# CHECK: {{sys #0, c7, c1, #0|ic ialluis}}
+# CHECK: {{sys #0, c7, c5, #0|ic iallu}}
+# CHECK: {{sys #3, c7, c5, #1|ic ivau}}, x9
0x1f 0x71 0x8 0xd5
0x1f 0x75 0x8 0xd5
0x29 0x75 0xb 0xd5
-# CHECK: sys #3, c7, c4, #1, x12
-# CHECK: sys #0, c7, c6, #1, xzr
-# CHECK: sys #0, c7, c6, #2, x2
-# CHECK: sys #3, c7, c10, #1, x9
-# CHECK: sys #0, c7, c10, #2, x10
-# CHECK: sys #3, c7, c11, #1, x0
-# CHECK: sys #3, c7, c14, #1, x3
-# CHECK: sys #0, c7, c14, #2, x30
+# CHECK: {{sys #3, c7, c4, #1|dc zva}}, x12
+# CHECK: {{sys #0, c7, c6, #1|dc ivac}}
+# CHECK: {{sys #0, c7, c6, #2|dc isw}}, x2
+# CHECK: {{sys #3, c7, c10, #1|dc cvac}}, x9
+# CHECK: {{sys #0, c7, c10, #2|dc csw}}, x10
+# CHECK: {{sys #3, c7, c11, #1|dc cvau}}, x0
+# CHECK: {{sys #3, c7, c14, #1|dc civac}}, x3
+# CHECK: {{sys #0, c7, c14, #2|dc cisw}}, x30
0x2c 0x74 0xb 0xd5
0x3f 0x76 0x8 0xd5
0x42 0x76 0x8 0xd5
@@ -3039,559 +3060,559 @@
0x5e 0x7e 0x8 0xd5
-# CHECK: msr teecr32_el1, x12
-# CHECK: msr osdtrrx_el1, x12
-# CHECK: msr mdccint_el1, x12
-# CHECK: msr mdscr_el1, x12
-# CHECK: msr osdtrtx_el1, x12
-# CHECK: msr dbgdtr_el0, x12
-# CHECK: msr dbgdtrtx_el0, x12
-# CHECK: msr oseccr_el1, x12
-# CHECK: msr dbgvcr32_el2, x12
-# CHECK: msr dbgbvr0_el1, x12
-# CHECK: msr dbgbvr1_el1, x12
-# CHECK: msr dbgbvr2_el1, x12
-# CHECK: msr dbgbvr3_el1, x12
-# CHECK: msr dbgbvr4_el1, x12
-# CHECK: msr dbgbvr5_el1, x12
-# CHECK: msr dbgbvr6_el1, x12
-# CHECK: msr dbgbvr7_el1, x12
-# CHECK: msr dbgbvr8_el1, x12
-# CHECK: msr dbgbvr9_el1, x12
-# CHECK: msr dbgbvr10_el1, x12
-# CHECK: msr dbgbvr11_el1, x12
-# CHECK: msr dbgbvr12_el1, x12
-# CHECK: msr dbgbvr13_el1, x12
-# CHECK: msr dbgbvr14_el1, x12
-# CHECK: msr dbgbvr15_el1, x12
-# CHECK: msr dbgbcr0_el1, x12
-# CHECK: msr dbgbcr1_el1, x12
-# CHECK: msr dbgbcr2_el1, x12
-# CHECK: msr dbgbcr3_el1, x12
-# CHECK: msr dbgbcr4_el1, x12
-# CHECK: msr dbgbcr5_el1, x12
-# CHECK: msr dbgbcr6_el1, x12
-# CHECK: msr dbgbcr7_el1, x12
-# CHECK: msr dbgbcr8_el1, x12
-# CHECK: msr dbgbcr9_el1, x12
-# CHECK: msr dbgbcr10_el1, x12
-# CHECK: msr dbgbcr11_el1, x12
-# CHECK: msr dbgbcr12_el1, x12
-# CHECK: msr dbgbcr13_el1, x12
-# CHECK: msr dbgbcr14_el1, x12
-# CHECK: msr dbgbcr15_el1, x12
-# CHECK: msr dbgwvr0_el1, x12
-# CHECK: msr dbgwvr1_el1, x12
-# CHECK: msr dbgwvr2_el1, x12
-# CHECK: msr dbgwvr3_el1, x12
-# CHECK: msr dbgwvr4_el1, x12
-# CHECK: msr dbgwvr5_el1, x12
-# CHECK: msr dbgwvr6_el1, x12
-# CHECK: msr dbgwvr7_el1, x12
-# CHECK: msr dbgwvr8_el1, x12
-# CHECK: msr dbgwvr9_el1, x12
-# CHECK: msr dbgwvr10_el1, x12
-# CHECK: msr dbgwvr11_el1, x12
-# CHECK: msr dbgwvr12_el1, x12
-# CHECK: msr dbgwvr13_el1, x12
-# CHECK: msr dbgwvr14_el1, x12
-# CHECK: msr dbgwvr15_el1, x12
-# CHECK: msr dbgwcr0_el1, x12
-# CHECK: msr dbgwcr1_el1, x12
-# CHECK: msr dbgwcr2_el1, x12
-# CHECK: msr dbgwcr3_el1, x12
-# CHECK: msr dbgwcr4_el1, x12
-# CHECK: msr dbgwcr5_el1, x12
-# CHECK: msr dbgwcr6_el1, x12
-# CHECK: msr dbgwcr7_el1, x12
-# CHECK: msr dbgwcr8_el1, x12
-# CHECK: msr dbgwcr9_el1, x12
-# CHECK: msr dbgwcr10_el1, x12
-# CHECK: msr dbgwcr11_el1, x12
-# CHECK: msr dbgwcr12_el1, x12
-# CHECK: msr dbgwcr13_el1, x12
-# CHECK: msr dbgwcr14_el1, x12
-# CHECK: msr dbgwcr15_el1, x12
-# CHECK: msr teehbr32_el1, x12
-# CHECK: msr oslar_el1, x12
-# CHECK: msr osdlr_el1, x12
-# CHECK: msr dbgprcr_el1, x12
-# CHECK: msr dbgclaimset_el1, x12
-# CHECK: msr dbgclaimclr_el1, x12
-# CHECK: msr csselr_el1, x12
-# CHECK: msr vpidr_el2, x12
-# CHECK: msr vmpidr_el2, x12
-# CHECK: msr sctlr_el1, x12
-# CHECK: msr sctlr_el2, x12
-# CHECK: msr sctlr_el3, x12
-# CHECK: msr actlr_el1, x12
-# CHECK: msr actlr_el2, x12
-# CHECK: msr actlr_el3, x12
-# CHECK: msr cpacr_el1, x12
-# CHECK: msr hcr_el2, x12
-# CHECK: msr scr_el3, x12
-# CHECK: msr mdcr_el2, x12
-# CHECK: msr sder32_el3, x12
-# CHECK: msr cptr_el2, x12
-# CHECK: msr cptr_el3, x12
-# CHECK: msr hstr_el2, x12
-# CHECK: msr hacr_el2, x12
-# CHECK: msr mdcr_el3, x12
-# CHECK: msr ttbr0_el1, x12
-# CHECK: msr ttbr0_el2, x12
-# CHECK: msr ttbr0_el3, x12
-# CHECK: msr ttbr1_el1, x12
-# CHECK: msr tcr_el1, x12
-# CHECK: msr tcr_el2, x12
-# CHECK: msr tcr_el3, x12
-# CHECK: msr vttbr_el2, x12
-# CHECK: msr vtcr_el2, x12
-# CHECK: msr dacr32_el2, x12
-# CHECK: msr spsr_el1, x12
-# CHECK: msr spsr_el2, x12
-# CHECK: msr spsr_el3, x12
-# CHECK: msr elr_el1, x12
-# CHECK: msr elr_el2, x12
-# CHECK: msr elr_el3, x12
-# CHECK: msr sp_el0, x12
-# CHECK: msr sp_el1, x12
-# CHECK: msr sp_el2, x12
-# CHECK: msr spsel, x12
-# CHECK: msr nzcv, x12
-# CHECK: msr daif, x12
-# CHECK: msr currentel, x12
-# CHECK: msr spsr_irq, x12
-# CHECK: msr spsr_abt, x12
-# CHECK: msr spsr_und, x12
-# CHECK: msr spsr_fiq, x12
-# CHECK: msr fpcr, x12
-# CHECK: msr fpsr, x12
-# CHECK: msr dspsr_el0, x12
-# CHECK: msr dlr_el0, x12
-# CHECK: msr ifsr32_el2, x12
-# CHECK: msr afsr0_el1, x12
-# CHECK: msr afsr0_el2, x12
-# CHECK: msr afsr0_el3, x12
-# CHECK: msr afsr1_el1, x12
-# CHECK: msr afsr1_el2, x12
-# CHECK: msr afsr1_el3, x12
-# CHECK: msr esr_el1, x12
-# CHECK: msr esr_el2, x12
-# CHECK: msr esr_el3, x12
-# CHECK: msr fpexc32_el2, x12
-# CHECK: msr far_el1, x12
-# CHECK: msr far_el2, x12
-# CHECK: msr far_el3, x12
-# CHECK: msr hpfar_el2, x12
-# CHECK: msr par_el1, x12
-# CHECK: msr pmcr_el0, x12
-# CHECK: msr pmcntenset_el0, x12
-# CHECK: msr pmcntenclr_el0, x12
-# CHECK: msr pmovsclr_el0, x12
-# CHECK: msr pmselr_el0, x12
-# CHECK: msr pmccntr_el0, x12
-# CHECK: msr pmxevtyper_el0, x12
-# CHECK: msr pmxevcntr_el0, x12
-# CHECK: msr pmuserenr_el0, x12
-# CHECK: msr pmintenset_el1, x12
-# CHECK: msr pmintenclr_el1, x12
-# CHECK: msr pmovsset_el0, x12
-# CHECK: msr mair_el1, x12
-# CHECK: msr mair_el2, x12
-# CHECK: msr mair_el3, x12
-# CHECK: msr amair_el1, x12
-# CHECK: msr amair_el2, x12
-# CHECK: msr amair_el3, x12
-# CHECK: msr vbar_el1, x12
-# CHECK: msr vbar_el2, x12
-# CHECK: msr vbar_el3, x12
-# CHECK: msr rmr_el1, x12
-# CHECK: msr rmr_el2, x12
-# CHECK: msr rmr_el3, x12
-# CHECK: msr tpidr_el0, x12
-# CHECK: msr tpidr_el2, x12
-# CHECK: msr tpidr_el3, x12
-# CHECK: msr tpidrro_el0, x12
-# CHECK: msr tpidr_el1, x12
-# CHECK: msr cntfrq_el0, x12
-# CHECK: msr cntvoff_el2, x12
-# CHECK: msr cntkctl_el1, x12
-# CHECK: msr cnthctl_el2, x12
-# CHECK: msr cntp_tval_el0, x12
-# CHECK: msr cnthp_tval_el2, x12
-# CHECK: msr cntps_tval_el1, x12
-# CHECK: msr cntp_ctl_el0, x12
-# CHECK: msr cnthp_ctl_el2, x12
-# CHECK: msr cntps_ctl_el1, x12
-# CHECK: msr cntp_cval_el0, x12
-# CHECK: msr cnthp_cval_el2, x12
-# CHECK: msr cntps_cval_el1, x12
-# CHECK: msr cntv_tval_el0, x12
-# CHECK: msr cntv_ctl_el0, x12
-# CHECK: msr cntv_cval_el0, x12
-# CHECK: msr pmevcntr0_el0, x12
-# CHECK: msr pmevcntr1_el0, x12
-# CHECK: msr pmevcntr2_el0, x12
-# CHECK: msr pmevcntr3_el0, x12
-# CHECK: msr pmevcntr4_el0, x12
-# CHECK: msr pmevcntr5_el0, x12
-# CHECK: msr pmevcntr6_el0, x12
-# CHECK: msr pmevcntr7_el0, x12
-# CHECK: msr pmevcntr8_el0, x12
-# CHECK: msr pmevcntr9_el0, x12
-# CHECK: msr pmevcntr10_el0, x12
-# CHECK: msr pmevcntr11_el0, x12
-# CHECK: msr pmevcntr12_el0, x12
-# CHECK: msr pmevcntr13_el0, x12
-# CHECK: msr pmevcntr14_el0, x12
-# CHECK: msr pmevcntr15_el0, x12
-# CHECK: msr pmevcntr16_el0, x12
-# CHECK: msr pmevcntr17_el0, x12
-# CHECK: msr pmevcntr18_el0, x12
-# CHECK: msr pmevcntr19_el0, x12
-# CHECK: msr pmevcntr20_el0, x12
-# CHECK: msr pmevcntr21_el0, x12
-# CHECK: msr pmevcntr22_el0, x12
-# CHECK: msr pmevcntr23_el0, x12
-# CHECK: msr pmevcntr24_el0, x12
-# CHECK: msr pmevcntr25_el0, x12
-# CHECK: msr pmevcntr26_el0, x12
-# CHECK: msr pmevcntr27_el0, x12
-# CHECK: msr pmevcntr28_el0, x12
-# CHECK: msr pmevcntr29_el0, x12
-# CHECK: msr pmevcntr30_el0, x12
-# CHECK: msr pmccfiltr_el0, x12
-# CHECK: msr pmevtyper0_el0, x12
-# CHECK: msr pmevtyper1_el0, x12
-# CHECK: msr pmevtyper2_el0, x12
-# CHECK: msr pmevtyper3_el0, x12
-# CHECK: msr pmevtyper4_el0, x12
-# CHECK: msr pmevtyper5_el0, x12
-# CHECK: msr pmevtyper6_el0, x12
-# CHECK: msr pmevtyper7_el0, x12
-# CHECK: msr pmevtyper8_el0, x12
-# CHECK: msr pmevtyper9_el0, x12
-# CHECK: msr pmevtyper10_el0, x12
-# CHECK: msr pmevtyper11_el0, x12
-# CHECK: msr pmevtyper12_el0, x12
-# CHECK: msr pmevtyper13_el0, x12
-# CHECK: msr pmevtyper14_el0, x12
-# CHECK: msr pmevtyper15_el0, x12
-# CHECK: msr pmevtyper16_el0, x12
-# CHECK: msr pmevtyper17_el0, x12
-# CHECK: msr pmevtyper18_el0, x12
-# CHECK: msr pmevtyper19_el0, x12
-# CHECK: msr pmevtyper20_el0, x12
-# CHECK: msr pmevtyper21_el0, x12
-# CHECK: msr pmevtyper22_el0, x12
-# CHECK: msr pmevtyper23_el0, x12
-# CHECK: msr pmevtyper24_el0, x12
-# CHECK: msr pmevtyper25_el0, x12
-# CHECK: msr pmevtyper26_el0, x12
-# CHECK: msr pmevtyper27_el0, x12
-# CHECK: msr pmevtyper28_el0, x12
-# CHECK: msr pmevtyper29_el0, x12
-# CHECK: msr pmevtyper30_el0, x12
-# CHECK: mrs x9, teecr32_el1
-# CHECK: mrs x9, osdtrrx_el1
-# CHECK: mrs x9, mdccsr_el0
-# CHECK: mrs x9, mdccint_el1
-# CHECK: mrs x9, mdscr_el1
-# CHECK: mrs x9, osdtrtx_el1
-# CHECK: mrs x9, dbgdtr_el0
-# CHECK: mrs x9, dbgdtrrx_el0
-# CHECK: mrs x9, oseccr_el1
-# CHECK: mrs x9, dbgvcr32_el2
-# CHECK: mrs x9, dbgbvr0_el1
-# CHECK: mrs x9, dbgbvr1_el1
-# CHECK: mrs x9, dbgbvr2_el1
-# CHECK: mrs x9, dbgbvr3_el1
-# CHECK: mrs x9, dbgbvr4_el1
-# CHECK: mrs x9, dbgbvr5_el1
-# CHECK: mrs x9, dbgbvr6_el1
-# CHECK: mrs x9, dbgbvr7_el1
-# CHECK: mrs x9, dbgbvr8_el1
-# CHECK: mrs x9, dbgbvr9_el1
-# CHECK: mrs x9, dbgbvr10_el1
-# CHECK: mrs x9, dbgbvr11_el1
-# CHECK: mrs x9, dbgbvr12_el1
-# CHECK: mrs x9, dbgbvr13_el1
-# CHECK: mrs x9, dbgbvr14_el1
-# CHECK: mrs x9, dbgbvr15_el1
-# CHECK: mrs x9, dbgbcr0_el1
-# CHECK: mrs x9, dbgbcr1_el1
-# CHECK: mrs x9, dbgbcr2_el1
-# CHECK: mrs x9, dbgbcr3_el1
-# CHECK: mrs x9, dbgbcr4_el1
-# CHECK: mrs x9, dbgbcr5_el1
-# CHECK: mrs x9, dbgbcr6_el1
-# CHECK: mrs x9, dbgbcr7_el1
-# CHECK: mrs x9, dbgbcr8_el1
-# CHECK: mrs x9, dbgbcr9_el1
-# CHECK: mrs x9, dbgbcr10_el1
-# CHECK: mrs x9, dbgbcr11_el1
-# CHECK: mrs x9, dbgbcr12_el1
-# CHECK: mrs x9, dbgbcr13_el1
-# CHECK: mrs x9, dbgbcr14_el1
-# CHECK: mrs x9, dbgbcr15_el1
-# CHECK: mrs x9, dbgwvr0_el1
-# CHECK: mrs x9, dbgwvr1_el1
-# CHECK: mrs x9, dbgwvr2_el1
-# CHECK: mrs x9, dbgwvr3_el1
-# CHECK: mrs x9, dbgwvr4_el1
-# CHECK: mrs x9, dbgwvr5_el1
-# CHECK: mrs x9, dbgwvr6_el1
-# CHECK: mrs x9, dbgwvr7_el1
-# CHECK: mrs x9, dbgwvr8_el1
-# CHECK: mrs x9, dbgwvr9_el1
-# CHECK: mrs x9, dbgwvr10_el1
-# CHECK: mrs x9, dbgwvr11_el1
-# CHECK: mrs x9, dbgwvr12_el1
-# CHECK: mrs x9, dbgwvr13_el1
-# CHECK: mrs x9, dbgwvr14_el1
-# CHECK: mrs x9, dbgwvr15_el1
-# CHECK: mrs x9, dbgwcr0_el1
-# CHECK: mrs x9, dbgwcr1_el1
-# CHECK: mrs x9, dbgwcr2_el1
-# CHECK: mrs x9, dbgwcr3_el1
-# CHECK: mrs x9, dbgwcr4_el1
-# CHECK: mrs x9, dbgwcr5_el1
-# CHECK: mrs x9, dbgwcr6_el1
-# CHECK: mrs x9, dbgwcr7_el1
-# CHECK: mrs x9, dbgwcr8_el1
-# CHECK: mrs x9, dbgwcr9_el1
-# CHECK: mrs x9, dbgwcr10_el1
-# CHECK: mrs x9, dbgwcr11_el1
-# CHECK: mrs x9, dbgwcr12_el1
-# CHECK: mrs x9, dbgwcr13_el1
-# CHECK: mrs x9, dbgwcr14_el1
-# CHECK: mrs x9, dbgwcr15_el1
-# CHECK: mrs x9, mdrar_el1
-# CHECK: mrs x9, teehbr32_el1
-# CHECK: mrs x9, oslsr_el1
-# CHECK: mrs x9, osdlr_el1
-# CHECK: mrs x9, dbgprcr_el1
-# CHECK: mrs x9, dbgclaimset_el1
-# CHECK: mrs x9, dbgclaimclr_el1
-# CHECK: mrs x9, dbgauthstatus_el1
-# CHECK: mrs x9, midr_el1
-# CHECK: mrs x9, ccsidr_el1
-# CHECK: mrs x9, csselr_el1
-# CHECK: mrs x9, vpidr_el2
-# CHECK: mrs x9, clidr_el1
-# CHECK: mrs x9, ctr_el0
-# CHECK: mrs x9, mpidr_el1
-# CHECK: mrs x9, vmpidr_el2
-# CHECK: mrs x9, revidr_el1
-# CHECK: mrs x9, aidr_el1
-# CHECK: mrs x9, dczid_el0
-# CHECK: mrs x9, id_pfr0_el1
-# CHECK: mrs x9, id_pfr1_el1
-# CHECK: mrs x9, id_dfr0_el1
-# CHECK: mrs x9, id_afr0_el1
-# CHECK: mrs x9, id_mmfr0_el1
-# CHECK: mrs x9, id_mmfr1_el1
-# CHECK: mrs x9, id_mmfr2_el1
-# CHECK: mrs x9, id_mmfr3_el1
-# CHECK: mrs x9, id_isar0_el1
-# CHECK: mrs x9, id_isar1_el1
-# CHECK: mrs x9, id_isar2_el1
-# CHECK: mrs x9, id_isar3_el1
-# CHECK: mrs x9, id_isar4_el1
-# CHECK: mrs x9, id_isar5_el1
-# CHECK: mrs x9, mvfr0_el1
-# CHECK: mrs x9, mvfr1_el1
-# CHECK: mrs x9, mvfr2_el1
-# CHECK: mrs x9, id_aa64pfr0_el1
-# CHECK: mrs x9, id_aa64pfr1_el1
-# CHECK: mrs x9, id_aa64dfr0_el1
-# CHECK: mrs x9, id_aa64dfr1_el1
-# CHECK: mrs x9, id_aa64afr0_el1
-# CHECK: mrs x9, id_aa64afr1_el1
-# CHECK: mrs x9, id_aa64isar0_el1
-# CHECK: mrs x9, id_aa64isar1_el1
-# CHECK: mrs x9, id_aa64mmfr0_el1
-# CHECK: mrs x9, id_aa64mmfr1_el1
-# CHECK: mrs x9, sctlr_el1
-# CHECK: mrs x9, sctlr_el2
-# CHECK: mrs x9, sctlr_el3
-# CHECK: mrs x9, actlr_el1
-# CHECK: mrs x9, actlr_el2
-# CHECK: mrs x9, actlr_el3
-# CHECK: mrs x9, cpacr_el1
-# CHECK: mrs x9, hcr_el2
-# CHECK: mrs x9, scr_el3
-# CHECK: mrs x9, mdcr_el2
-# CHECK: mrs x9, sder32_el3
-# CHECK: mrs x9, cptr_el2
-# CHECK: mrs x9, cptr_el3
-# CHECK: mrs x9, hstr_el2
-# CHECK: mrs x9, hacr_el2
-# CHECK: mrs x9, mdcr_el3
-# CHECK: mrs x9, ttbr0_el1
-# CHECK: mrs x9, ttbr0_el2
-# CHECK: mrs x9, ttbr0_el3
-# CHECK: mrs x9, ttbr1_el1
-# CHECK: mrs x9, tcr_el1
-# CHECK: mrs x9, tcr_el2
-# CHECK: mrs x9, tcr_el3
-# CHECK: mrs x9, vttbr_el2
-# CHECK: mrs x9, vtcr_el2
-# CHECK: mrs x9, dacr32_el2
-# CHECK: mrs x9, spsr_el1
-# CHECK: mrs x9, spsr_el2
-# CHECK: mrs x9, spsr_el3
-# CHECK: mrs x9, elr_el1
-# CHECK: mrs x9, elr_el2
-# CHECK: mrs x9, elr_el3
-# CHECK: mrs x9, sp_el0
-# CHECK: mrs x9, sp_el1
-# CHECK: mrs x9, sp_el2
-# CHECK: mrs x9, spsel
-# CHECK: mrs x9, nzcv
-# CHECK: mrs x9, daif
-# CHECK: mrs x9, currentel
-# CHECK: mrs x9, spsr_irq
-# CHECK: mrs x9, spsr_abt
-# CHECK: mrs x9, spsr_und
-# CHECK: mrs x9, spsr_fiq
-# CHECK: mrs x9, fpcr
-# CHECK: mrs x9, fpsr
-# CHECK: mrs x9, dspsr_el0
-# CHECK: mrs x9, dlr_el0
-# CHECK: mrs x9, ifsr32_el2
-# CHECK: mrs x9, afsr0_el1
-# CHECK: mrs x9, afsr0_el2
-# CHECK: mrs x9, afsr0_el3
-# CHECK: mrs x9, afsr1_el1
-# CHECK: mrs x9, afsr1_el2
-# CHECK: mrs x9, afsr1_el3
-# CHECK: mrs x9, esr_el1
-# CHECK: mrs x9, esr_el2
-# CHECK: mrs x9, esr_el3
-# CHECK: mrs x9, fpexc32_el2
-# CHECK: mrs x9, far_el1
-# CHECK: mrs x9, far_el2
-# CHECK: mrs x9, far_el3
-# CHECK: mrs x9, hpfar_el2
-# CHECK: mrs x9, par_el1
-# CHECK: mrs x9, pmcr_el0
-# CHECK: mrs x9, pmcntenset_el0
-# CHECK: mrs x9, pmcntenclr_el0
-# CHECK: mrs x9, pmovsclr_el0
-# CHECK: mrs x9, pmselr_el0
-# CHECK: mrs x9, pmceid0_el0
-# CHECK: mrs x9, pmceid1_el0
-# CHECK: mrs x9, pmccntr_el0
-# CHECK: mrs x9, pmxevtyper_el0
-# CHECK: mrs x9, pmxevcntr_el0
-# CHECK: mrs x9, pmuserenr_el0
-# CHECK: mrs x9, pmintenset_el1
-# CHECK: mrs x9, pmintenclr_el1
-# CHECK: mrs x9, pmovsset_el0
-# CHECK: mrs x9, mair_el1
-# CHECK: mrs x9, mair_el2
-# CHECK: mrs x9, mair_el3
-# CHECK: mrs x9, amair_el1
-# CHECK: mrs x9, amair_el2
-# CHECK: mrs x9, amair_el3
-# CHECK: mrs x9, vbar_el1
-# CHECK: mrs x9, vbar_el2
-# CHECK: mrs x9, vbar_el3
-# CHECK: mrs x9, rvbar_el1
-# CHECK: mrs x9, rvbar_el2
-# CHECK: mrs x9, rvbar_el3
-# CHECK: mrs x9, rmr_el1
-# CHECK: mrs x9, rmr_el2
-# CHECK: mrs x9, rmr_el3
-# CHECK: mrs x9, isr_el1
-# CHECK: mrs x9, contextidr_el1
-# CHECK: mrs x9, tpidr_el0
-# CHECK: mrs x9, tpidr_el2
-# CHECK: mrs x9, tpidr_el3
-# CHECK: mrs x9, tpidrro_el0
-# CHECK: mrs x9, tpidr_el1
-# CHECK: mrs x9, cntfrq_el0
-# CHECK: mrs x9, cntpct_el0
-# CHECK: mrs x9, cntvct_el0
-# CHECK: mrs x9, cntvoff_el2
-# CHECK: mrs x9, cntkctl_el1
-# CHECK: mrs x9, cnthctl_el2
-# CHECK: mrs x9, cntp_tval_el0
-# CHECK: mrs x9, cnthp_tval_el2
-# CHECK: mrs x9, cntps_tval_el1
-# CHECK: mrs x9, cntp_ctl_el0
-# CHECK: mrs x9, cnthp_ctl_el2
-# CHECK: mrs x9, cntps_ctl_el1
-# CHECK: mrs x9, cntp_cval_el0
-# CHECK: mrs x9, cnthp_cval_el2
-# CHECK: mrs x9, cntps_cval_el1
-# CHECK: mrs x9, cntv_tval_el0
-# CHECK: mrs x9, cntv_ctl_el0
-# CHECK: mrs x9, cntv_cval_el0
-# CHECK: mrs x9, pmevcntr0_el0
-# CHECK: mrs x9, pmevcntr1_el0
-# CHECK: mrs x9, pmevcntr2_el0
-# CHECK: mrs x9, pmevcntr3_el0
-# CHECK: mrs x9, pmevcntr4_el0
-# CHECK: mrs x9, pmevcntr5_el0
-# CHECK: mrs x9, pmevcntr6_el0
-# CHECK: mrs x9, pmevcntr7_el0
-# CHECK: mrs x9, pmevcntr8_el0
-# CHECK: mrs x9, pmevcntr9_el0
-# CHECK: mrs x9, pmevcntr10_el0
-# CHECK: mrs x9, pmevcntr11_el0
-# CHECK: mrs x9, pmevcntr12_el0
-# CHECK: mrs x9, pmevcntr13_el0
-# CHECK: mrs x9, pmevcntr14_el0
-# CHECK: mrs x9, pmevcntr15_el0
-# CHECK: mrs x9, pmevcntr16_el0
-# CHECK: mrs x9, pmevcntr17_el0
-# CHECK: mrs x9, pmevcntr18_el0
-# CHECK: mrs x9, pmevcntr19_el0
-# CHECK: mrs x9, pmevcntr20_el0
-# CHECK: mrs x9, pmevcntr21_el0
-# CHECK: mrs x9, pmevcntr22_el0
-# CHECK: mrs x9, pmevcntr23_el0
-# CHECK: mrs x9, pmevcntr24_el0
-# CHECK: mrs x9, pmevcntr25_el0
-# CHECK: mrs x9, pmevcntr26_el0
-# CHECK: mrs x9, pmevcntr27_el0
-# CHECK: mrs x9, pmevcntr28_el0
-# CHECK: mrs x9, pmevcntr29_el0
-# CHECK: mrs x9, pmevcntr30_el0
-# CHECK: mrs x9, pmccfiltr_el0
-# CHECK: mrs x9, pmevtyper0_el0
-# CHECK: mrs x9, pmevtyper1_el0
-# CHECK: mrs x9, pmevtyper2_el0
-# CHECK: mrs x9, pmevtyper3_el0
-# CHECK: mrs x9, pmevtyper4_el0
-# CHECK: mrs x9, pmevtyper5_el0
-# CHECK: mrs x9, pmevtyper6_el0
-# CHECK: mrs x9, pmevtyper7_el0
-# CHECK: mrs x9, pmevtyper8_el0
-# CHECK: mrs x9, pmevtyper9_el0
-# CHECK: mrs x9, pmevtyper10_el0
-# CHECK: mrs x9, pmevtyper11_el0
-# CHECK: mrs x9, pmevtyper12_el0
-# CHECK: mrs x9, pmevtyper13_el0
-# CHECK: mrs x9, pmevtyper14_el0
-# CHECK: mrs x9, pmevtyper15_el0
-# CHECK: mrs x9, pmevtyper16_el0
-# CHECK: mrs x9, pmevtyper17_el0
-# CHECK: mrs x9, pmevtyper18_el0
-# CHECK: mrs x9, pmevtyper19_el0
-# CHECK: mrs x9, pmevtyper20_el0
-# CHECK: mrs x9, pmevtyper21_el0
-# CHECK: mrs x9, pmevtyper22_el0
-# CHECK: mrs x9, pmevtyper23_el0
-# CHECK: mrs x9, pmevtyper24_el0
-# CHECK: mrs x9, pmevtyper25_el0
-# CHECK: mrs x9, pmevtyper26_el0
-# CHECK: mrs x9, pmevtyper27_el0
-# CHECK: mrs x9, pmevtyper28_el0
-# CHECK: mrs x9, pmevtyper29_el0
-# CHECK: mrs x9, pmevtyper30_el0
+# CHECK: msr {{teecr32_el1|TEECR32_EL1}}, x12
+# CHECK: msr {{osdtrrx_el1|OSDTRRX_EL1}}, x12
+# CHECK: msr {{mdccint_el1|MDCCINT_EL1}}, x12
+# CHECK: msr {{mdscr_el1|MDSCR_EL1}}, x12
+# CHECK: msr {{osdtrtx_el1|OSDTRTX_EL1}}, x12
+# CHECK: msr {{dbgdtr_el0|DBGDTR_EL0}}, x12
+# CHECK: msr {{dbgdtrtx_el0|DBGDTRTX_EL0}}, x12
+# CHECK: msr {{oseccr_el1|OSECCR_EL1}}, x12
+# CHECK: msr {{dbgvcr32_el2|DBGVCR32_EL2}}, x12
+# CHECK: msr {{dbgbvr0_el1|DBGBVR0_EL1}}, x12
+# CHECK: msr {{dbgbvr1_el1|DBGBVR1_EL1}}, x12
+# CHECK: msr {{dbgbvr2_el1|DBGBVR2_EL1}}, x12
+# CHECK: msr {{dbgbvr3_el1|DBGBVR3_EL1}}, x12
+# CHECK: msr {{dbgbvr4_el1|DBGBVR4_EL1}}, x12
+# CHECK: msr {{dbgbvr5_el1|DBGBVR5_EL1}}, x12
+# CHECK: msr {{dbgbvr6_el1|DBGBVR6_EL1}}, x12
+# CHECK: msr {{dbgbvr7_el1|DBGBVR7_EL1}}, x12
+# CHECK: msr {{dbgbvr8_el1|DBGBVR8_EL1}}, x12
+# CHECK: msr {{dbgbvr9_el1|DBGBVR9_EL1}}, x12
+# CHECK: msr {{dbgbvr10_el1|DBGBVR10_EL1}}, x12
+# CHECK: msr {{dbgbvr11_el1|DBGBVR11_EL1}}, x12
+# CHECK: msr {{dbgbvr12_el1|DBGBVR12_EL1}}, x12
+# CHECK: msr {{dbgbvr13_el1|DBGBVR13_EL1}}, x12
+# CHECK: msr {{dbgbvr14_el1|DBGBVR14_EL1}}, x12
+# CHECK: msr {{dbgbvr15_el1|DBGBVR15_EL1}}, x12
+# CHECK: msr {{dbgbcr0_el1|DBGBCR0_EL1}}, x12
+# CHECK: msr {{dbgbcr1_el1|DBGBCR1_EL1}}, x12
+# CHECK: msr {{dbgbcr2_el1|DBGBCR2_EL1}}, x12
+# CHECK: msr {{dbgbcr3_el1|DBGBCR3_EL1}}, x12
+# CHECK: msr {{dbgbcr4_el1|DBGBCR4_EL1}}, x12
+# CHECK: msr {{dbgbcr5_el1|DBGBCR5_EL1}}, x12
+# CHECK: msr {{dbgbcr6_el1|DBGBCR6_EL1}}, x12
+# CHECK: msr {{dbgbcr7_el1|DBGBCR7_EL1}}, x12
+# CHECK: msr {{dbgbcr8_el1|DBGBCR8_EL1}}, x12
+# CHECK: msr {{dbgbcr9_el1|DBGBCR9_EL1}}, x12
+# CHECK: msr {{dbgbcr10_el1|DBGBCR10_EL1}}, x12
+# CHECK: msr {{dbgbcr11_el1|DBGBCR11_EL1}}, x12
+# CHECK: msr {{dbgbcr12_el1|DBGBCR12_EL1}}, x12
+# CHECK: msr {{dbgbcr13_el1|DBGBCR13_EL1}}, x12
+# CHECK: msr {{dbgbcr14_el1|DBGBCR14_EL1}}, x12
+# CHECK: msr {{dbgbcr15_el1|DBGBCR15_EL1}}, x12
+# CHECK: msr {{dbgwvr0_el1|DBGWVR0_EL1}}, x12
+# CHECK: msr {{dbgwvr1_el1|DBGWVR1_EL1}}, x12
+# CHECK: msr {{dbgwvr2_el1|DBGWVR2_EL1}}, x12
+# CHECK: msr {{dbgwvr3_el1|DBGWVR3_EL1}}, x12
+# CHECK: msr {{dbgwvr4_el1|DBGWVR4_EL1}}, x12
+# CHECK: msr {{dbgwvr5_el1|DBGWVR5_EL1}}, x12
+# CHECK: msr {{dbgwvr6_el1|DBGWVR6_EL1}}, x12
+# CHECK: msr {{dbgwvr7_el1|DBGWVR7_EL1}}, x12
+# CHECK: msr {{dbgwvr8_el1|DBGWVR8_EL1}}, x12
+# CHECK: msr {{dbgwvr9_el1|DBGWVR9_EL1}}, x12
+# CHECK: msr {{dbgwvr10_el1|DBGWVR10_EL1}}, x12
+# CHECK: msr {{dbgwvr11_el1|DBGWVR11_EL1}}, x12
+# CHECK: msr {{dbgwvr12_el1|DBGWVR12_EL1}}, x12
+# CHECK: msr {{dbgwvr13_el1|DBGWVR13_EL1}}, x12
+# CHECK: msr {{dbgwvr14_el1|DBGWVR14_EL1}}, x12
+# CHECK: msr {{dbgwvr15_el1|DBGWVR15_EL1}}, x12
+# CHECK: msr {{dbgwcr0_el1|DBGWCR0_EL1}}, x12
+# CHECK: msr {{dbgwcr1_el1|DBGWCR1_EL1}}, x12
+# CHECK: msr {{dbgwcr2_el1|DBGWCR2_EL1}}, x12
+# CHECK: msr {{dbgwcr3_el1|DBGWCR3_EL1}}, x12
+# CHECK: msr {{dbgwcr4_el1|DBGWCR4_EL1}}, x12
+# CHECK: msr {{dbgwcr5_el1|DBGWCR5_EL1}}, x12
+# CHECK: msr {{dbgwcr6_el1|DBGWCR6_EL1}}, x12
+# CHECK: msr {{dbgwcr7_el1|DBGWCR7_EL1}}, x12
+# CHECK: msr {{dbgwcr8_el1|DBGWCR8_EL1}}, x12
+# CHECK: msr {{dbgwcr9_el1|DBGWCR9_EL1}}, x12
+# CHECK: msr {{dbgwcr10_el1|DBGWCR10_EL1}}, x12
+# CHECK: msr {{dbgwcr11_el1|DBGWCR11_EL1}}, x12
+# CHECK: msr {{dbgwcr12_el1|DBGWCR12_EL1}}, x12
+# CHECK: msr {{dbgwcr13_el1|DBGWCR13_EL1}}, x12
+# CHECK: msr {{dbgwcr14_el1|DBGWCR14_EL1}}, x12
+# CHECK: msr {{dbgwcr15_el1|DBGWCR15_EL1}}, x12
+# CHECK: msr {{teehbr32_el1|TEEHBR32_EL1}}, x12
+# CHECK: msr {{oslar_el1|OSLAR_EL1}}, x12
+# CHECK: msr {{osdlr_el1|OSDLR_EL1}}, x12
+# CHECK: msr {{dbgprcr_el1|DBGPRCR_EL1}}, x12
+# CHECK: msr {{dbgclaimset_el1|DBGCLAIMSET_EL1}}, x12
+# CHECK: msr {{dbgclaimclr_el1|DBGCLAIMCLR_EL1}}, x12
+# CHECK: msr {{csselr_el1|CSSELR_EL1}}, x12
+# CHECK: msr {{vpidr_el2|VPIDR_EL2}}, x12
+# CHECK: msr {{vmpidr_el2|VMPIDR_EL2}}, x12
+# CHECK: msr {{sctlr_el1|SCTLR_EL1}}, x12
+# CHECK: msr {{sctlr_el2|SCTLR_EL2}}, x12
+# CHECK: msr {{sctlr_el3|SCTLR_EL3}}, x12
+# CHECK: msr {{actlr_el1|ACTLR_EL1}}, x12
+# CHECK: msr {{actlr_el2|ACTLR_EL2}}, x12
+# CHECK: msr {{actlr_el3|ACTLR_EL3}}, x12
+# CHECK: msr {{cpacr_el1|CPACR_EL1}}, x12
+# CHECK: msr {{hcr_el2|HCR_EL2}}, x12
+# CHECK: msr {{scr_el3|SCR_EL3}}, x12
+# CHECK: msr {{mdcr_el2|MDCR_EL2}}, x12
+# CHECK: msr {{sder32_el3|SDER32_EL3}}, x12
+# CHECK: msr {{cptr_el2|CPTR_EL2}}, x12
+# CHECK: msr {{cptr_el3|CPTR_EL3}}, x12
+# CHECK: msr {{hstr_el2|HSTR_EL2}}, x12
+# CHECK: msr {{hacr_el2|HACR_EL2}}, x12
+# CHECK: msr {{mdcr_el3|MDCR_EL3}}, x12
+# CHECK: msr {{ttbr0_el1|TTBR0_EL1}}, x12
+# CHECK: msr {{ttbr0_el2|TTBR0_EL2}}, x12
+# CHECK: msr {{ttbr0_el3|TTBR0_EL3}}, x12
+# CHECK: msr {{ttbr1_el1|TTBR1_EL1}}, x12
+# CHECK: msr {{tcr_el1|TCR_EL1}}, x12
+# CHECK: msr {{tcr_el2|TCR_EL2}}, x12
+# CHECK: msr {{tcr_el3|TCR_EL3}}, x12
+# CHECK: msr {{vttbr_el2|VTTBR_EL2}}, x12
+# CHECK: msr {{vtcr_el2|VTCR_EL2}}, x12
+# CHECK: msr {{dacr32_el2|DACR32_EL2}}, x12
+# CHECK: msr {{spsr_el1|SPSR_EL1}}, x12
+# CHECK: msr {{spsr_el2|SPSR_EL2}}, x12
+# CHECK: msr {{spsr_el3|SPSR_EL3}}, x12
+# CHECK: msr {{elr_el1|ELR_EL1}}, x12
+# CHECK: msr {{elr_el2|ELR_EL2}}, x12
+# CHECK: msr {{elr_el3|ELR_EL3}}, x12
+# CHECK: msr {{sp_el0|SP_EL0}}, x12
+# CHECK: msr {{sp_el1|SP_EL1}}, x12
+# CHECK: msr {{sp_el2|SP_EL2}}, x12
+# CHECK: msr {{spsel|SPSEL}}, x12
+# CHECK: msr {{nzcv|NZCV}}, x12
+# CHECK: msr {{daif|DAIF}}, x12
+# CHECK: msr {{currentel|CURRENTEL}}, x12
+# CHECK: msr {{spsr_irq|SPSR_IRQ}}, x12
+# CHECK: msr {{spsr_abt|SPSR_ABT}}, x12
+# CHECK: msr {{spsr_und|SPSR_UND}}, x12
+# CHECK: msr {{spsr_fiq|SPSR_FIQ}}, x12
+# CHECK: msr {{fpcr|FPCR}}, x12
+# CHECK: msr {{fpsr|FPSR}}, x12
+# CHECK: msr {{dspsr_el0|DSPSR_EL0}}, x12
+# CHECK: msr {{dlr_el0|DLR_EL0}}, x12
+# CHECK: msr {{ifsr32_el2|IFSR32_EL2}}, x12
+# CHECK: msr {{afsr0_el1|AFSR0_EL1}}, x12
+# CHECK: msr {{afsr0_el2|AFSR0_EL2}}, x12
+# CHECK: msr {{afsr0_el3|AFSR0_EL3}}, x12
+# CHECK: msr {{afsr1_el1|AFSR1_EL1}}, x12
+# CHECK: msr {{afsr1_el2|AFSR1_EL2}}, x12
+# CHECK: msr {{afsr1_el3|AFSR1_EL3}}, x12
+# CHECK: msr {{esr_el1|ESR_EL1}}, x12
+# CHECK: msr {{esr_el2|ESR_EL2}}, x12
+# CHECK: msr {{esr_el3|ESR_EL3}}, x12
+# CHECK: msr {{fpexc32_el2|FPEXC32_EL2}}, x12
+# CHECK: msr {{far_el1|FAR_EL1}}, x12
+# CHECK: msr {{far_el2|FAR_EL2}}, x12
+# CHECK: msr {{far_el3|FAR_EL3}}, x12
+# CHECK: msr {{hpfar_el2|HPFAR_EL2}}, x12
+# CHECK: msr {{par_el1|PAR_EL1}}, x12
+# CHECK: msr {{pmcr_el0|PMCR_EL0}}, x12
+# CHECK: msr {{pmcntenset_el0|PMCNTENSET_EL0}}, x12
+# CHECK: msr {{pmcntenclr_el0|PMCNTENCLR_EL0}}, x12
+# CHECK: msr {{pmovsclr_el0|PMOVSCLR_EL0}}, x12
+# CHECK: msr {{pmselr_el0|PMSELR_EL0}}, x12
+# CHECK: msr {{pmccntr_el0|PMCCNTR_EL0}}, x12
+# CHECK: msr {{pmxevtyper_el0|PMXEVTYPER_EL0}}, x12
+# CHECK: msr {{pmxevcntr_el0|PMXEVCNTR_EL0}}, x12
+# CHECK: msr {{pmuserenr_el0|PMUSERENR_EL0}}, x12
+# CHECK: msr {{pmintenset_el1|PMINTENSET_EL1}}, x12
+# CHECK: msr {{pmintenclr_el1|PMINTENCLR_EL1}}, x12
+# CHECK: msr {{pmovsset_el0|PMOVSSET_EL0}}, x12
+# CHECK: msr {{mair_el1|MAIR_EL1}}, x12
+# CHECK: msr {{mair_el2|MAIR_EL2}}, x12
+# CHECK: msr {{mair_el3|MAIR_EL3}}, x12
+# CHECK: msr {{amair_el1|AMAIR_EL1}}, x12
+# CHECK: msr {{amair_el2|AMAIR_EL2}}, x12
+# CHECK: msr {{amair_el3|AMAIR_EL3}}, x12
+# CHECK: msr {{vbar_el1|VBAR_EL1}}, x12
+# CHECK: msr {{vbar_el2|VBAR_EL2}}, x12
+# CHECK: msr {{vbar_el3|VBAR_EL3}}, x12
+# CHECK: msr {{rmr_el1|RMR_EL1}}, x12
+# CHECK: msr {{rmr_el2|RMR_EL2}}, x12
+# CHECK: msr {{rmr_el3|RMR_EL3}}, x12
+# CHECK: msr {{tpidr_el0|TPIDR_EL0}}, x12
+# CHECK: msr {{tpidr_el2|TPIDR_EL2}}, x12
+# CHECK: msr {{tpidr_el3|TPIDR_EL3}}, x12
+# CHECK: msr {{tpidrro_el0|TPIDRRO_EL0}}, x12
+# CHECK: msr {{tpidr_el1|TPIDR_EL1}}, x12
+# CHECK: msr {{cntfrq_el0|CNTFRQ_EL0}}, x12
+# CHECK: msr {{cntvoff_el2|CNTVOFF_EL2}}, x12
+# CHECK: msr {{cntkctl_el1|CNTKCTL_EL1}}, x12
+# CHECK: msr {{cnthctl_el2|CNTHCTL_EL2}}, x12
+# CHECK: msr {{cntp_tval_el0|CNTP_TVAL_EL0}}, x12
+# CHECK: msr {{cnthp_tval_el2|CNTHP_TVAL_EL2}}, x12
+# CHECK: msr {{cntps_tval_el1|CNTPS_TVAL_EL1}}, x12
+# CHECK: msr {{cntp_ctl_el0|CNTP_CTL_EL0}}, x12
+# CHECK: msr {{cnthp_ctl_el2|CNTHP_CTL_EL2}}, x12
+# CHECK: msr {{cntps_ctl_el1|CNTPS_CTL_EL1}}, x12
+# CHECK: msr {{cntp_cval_el0|CNTP_CVAL_EL0}}, x12
+# CHECK: msr {{cnthp_cval_el2|CNTHP_CVAL_EL2}}, x12
+# CHECK: msr {{cntps_cval_el1|CNTPS_CVAL_EL1}}, x12
+# CHECK: msr {{cntv_tval_el0|CNTV_TVAL_EL0}}, x12
+# CHECK: msr {{cntv_ctl_el0|CNTV_CTL_EL0}}, x12
+# CHECK: msr {{cntv_cval_el0|CNTV_CVAL_EL0}}, x12
+# CHECK: msr {{pmevcntr0_el0|PMEVCNTR0_EL0}}, x12
+# CHECK: msr {{pmevcntr1_el0|PMEVCNTR1_EL0}}, x12
+# CHECK: msr {{pmevcntr2_el0|PMEVCNTR2_EL0}}, x12
+# CHECK: msr {{pmevcntr3_el0|PMEVCNTR3_EL0}}, x12
+# CHECK: msr {{pmevcntr4_el0|PMEVCNTR4_EL0}}, x12
+# CHECK: msr {{pmevcntr5_el0|PMEVCNTR5_EL0}}, x12
+# CHECK: msr {{pmevcntr6_el0|PMEVCNTR6_EL0}}, x12
+# CHECK: msr {{pmevcntr7_el0|PMEVCNTR7_EL0}}, x12
+# CHECK: msr {{pmevcntr8_el0|PMEVCNTR8_EL0}}, x12
+# CHECK: msr {{pmevcntr9_el0|PMEVCNTR9_EL0}}, x12
+# CHECK: msr {{pmevcntr10_el0|PMEVCNTR10_EL0}}, x12
+# CHECK: msr {{pmevcntr11_el0|PMEVCNTR11_EL0}}, x12
+# CHECK: msr {{pmevcntr12_el0|PMEVCNTR12_EL0}}, x12
+# CHECK: msr {{pmevcntr13_el0|PMEVCNTR13_EL0}}, x12
+# CHECK: msr {{pmevcntr14_el0|PMEVCNTR14_EL0}}, x12
+# CHECK: msr {{pmevcntr15_el0|PMEVCNTR15_EL0}}, x12
+# CHECK: msr {{pmevcntr16_el0|PMEVCNTR16_EL0}}, x12
+# CHECK: msr {{pmevcntr17_el0|PMEVCNTR17_EL0}}, x12
+# CHECK: msr {{pmevcntr18_el0|PMEVCNTR18_EL0}}, x12
+# CHECK: msr {{pmevcntr19_el0|PMEVCNTR19_EL0}}, x12
+# CHECK: msr {{pmevcntr20_el0|PMEVCNTR20_EL0}}, x12
+# CHECK: msr {{pmevcntr21_el0|PMEVCNTR21_EL0}}, x12
+# CHECK: msr {{pmevcntr22_el0|PMEVCNTR22_EL0}}, x12
+# CHECK: msr {{pmevcntr23_el0|PMEVCNTR23_EL0}}, x12
+# CHECK: msr {{pmevcntr24_el0|PMEVCNTR24_EL0}}, x12
+# CHECK: msr {{pmevcntr25_el0|PMEVCNTR25_EL0}}, x12
+# CHECK: msr {{pmevcntr26_el0|PMEVCNTR26_EL0}}, x12
+# CHECK: msr {{pmevcntr27_el0|PMEVCNTR27_EL0}}, x12
+# CHECK: msr {{pmevcntr28_el0|PMEVCNTR28_EL0}}, x12
+# CHECK: msr {{pmevcntr29_el0|PMEVCNTR29_EL0}}, x12
+# CHECK: msr {{pmevcntr30_el0|PMEVCNTR30_EL0}}, x12
+# CHECK: msr {{pmccfiltr_el0|PMCCFILTR_EL0}}, x12
+# CHECK: msr {{pmevtyper0_el0|PMEVTYPER0_EL0}}, x12
+# CHECK: msr {{pmevtyper1_el0|PMEVTYPER1_EL0}}, x12
+# CHECK: msr {{pmevtyper2_el0|PMEVTYPER2_EL0}}, x12
+# CHECK: msr {{pmevtyper3_el0|PMEVTYPER3_EL0}}, x12
+# CHECK: msr {{pmevtyper4_el0|PMEVTYPER4_EL0}}, x12
+# CHECK: msr {{pmevtyper5_el0|PMEVTYPER5_EL0}}, x12
+# CHECK: msr {{pmevtyper6_el0|PMEVTYPER6_EL0}}, x12
+# CHECK: msr {{pmevtyper7_el0|PMEVTYPER7_EL0}}, x12
+# CHECK: msr {{pmevtyper8_el0|PMEVTYPER8_EL0}}, x12
+# CHECK: msr {{pmevtyper9_el0|PMEVTYPER9_EL0}}, x12
+# CHECK: msr {{pmevtyper10_el0|PMEVTYPER10_EL0}}, x12
+# CHECK: msr {{pmevtyper11_el0|PMEVTYPER11_EL0}}, x12
+# CHECK: msr {{pmevtyper12_el0|PMEVTYPER12_EL0}}, x12
+# CHECK: msr {{pmevtyper13_el0|PMEVTYPER13_EL0}}, x12
+# CHECK: msr {{pmevtyper14_el0|PMEVTYPER14_EL0}}, x12
+# CHECK: msr {{pmevtyper15_el0|PMEVTYPER15_EL0}}, x12
+# CHECK: msr {{pmevtyper16_el0|PMEVTYPER16_EL0}}, x12
+# CHECK: msr {{pmevtyper17_el0|PMEVTYPER17_EL0}}, x12
+# CHECK: msr {{pmevtyper18_el0|PMEVTYPER18_EL0}}, x12
+# CHECK: msr {{pmevtyper19_el0|PMEVTYPER19_EL0}}, x12
+# CHECK: msr {{pmevtyper20_el0|PMEVTYPER20_EL0}}, x12
+# CHECK: msr {{pmevtyper21_el0|PMEVTYPER21_EL0}}, x12
+# CHECK: msr {{pmevtyper22_el0|PMEVTYPER22_EL0}}, x12
+# CHECK: msr {{pmevtyper23_el0|PMEVTYPER23_EL0}}, x12
+# CHECK: msr {{pmevtyper24_el0|PMEVTYPER24_EL0}}, x12
+# CHECK: msr {{pmevtyper25_el0|PMEVTYPER25_EL0}}, x12
+# CHECK: msr {{pmevtyper26_el0|PMEVTYPER26_EL0}}, x12
+# CHECK: msr {{pmevtyper27_el0|PMEVTYPER27_EL0}}, x12
+# CHECK: msr {{pmevtyper28_el0|PMEVTYPER28_EL0}}, x12
+# CHECK: msr {{pmevtyper29_el0|PMEVTYPER29_EL0}}, x12
+# CHECK: msr {{pmevtyper30_el0|PMEVTYPER30_EL0}}, x12
+# CHECK: mrs x9, {{teecr32_el1|TEECR32_EL1}}
+# CHECK: mrs x9, {{osdtrrx_el1|OSDTRRX_EL1}}
+# CHECK: mrs x9, {{mdccsr_el0|MDCCSR_EL0}}
+# CHECK: mrs x9, {{mdccint_el1|MDCCINT_EL1}}
+# CHECK: mrs x9, {{mdscr_el1|MDSCR_EL1}}
+# CHECK: mrs x9, {{osdtrtx_el1|OSDTRTX_EL1}}
+# CHECK: mrs x9, {{dbgdtr_el0|DBGDTR_EL0}}
+# CHECK: mrs x9, {{dbgdtrrx_el0|DBGDTRRX_EL0}}
+# CHECK: mrs x9, {{oseccr_el1|OSECCR_EL1}}
+# CHECK: mrs x9, {{dbgvcr32_el2|DBGVCR32_EL2}}
+# CHECK: mrs x9, {{dbgbvr0_el1|DBGBVR0_EL1}}
+# CHECK: mrs x9, {{dbgbvr1_el1|DBGBVR1_EL1}}
+# CHECK: mrs x9, {{dbgbvr2_el1|DBGBVR2_EL1}}
+# CHECK: mrs x9, {{dbgbvr3_el1|DBGBVR3_EL1}}
+# CHECK: mrs x9, {{dbgbvr4_el1|DBGBVR4_EL1}}
+# CHECK: mrs x9, {{dbgbvr5_el1|DBGBVR5_EL1}}
+# CHECK: mrs x9, {{dbgbvr6_el1|DBGBVR6_EL1}}
+# CHECK: mrs x9, {{dbgbvr7_el1|DBGBVR7_EL1}}
+# CHECK: mrs x9, {{dbgbvr8_el1|DBGBVR8_EL1}}
+# CHECK: mrs x9, {{dbgbvr9_el1|DBGBVR9_EL1}}
+# CHECK: mrs x9, {{dbgbvr10_el1|DBGBVR10_EL1}}
+# CHECK: mrs x9, {{dbgbvr11_el1|DBGBVR11_EL1}}
+# CHECK: mrs x9, {{dbgbvr12_el1|DBGBVR12_EL1}}
+# CHECK: mrs x9, {{dbgbvr13_el1|DBGBVR13_EL1}}
+# CHECK: mrs x9, {{dbgbvr14_el1|DBGBVR14_EL1}}
+# CHECK: mrs x9, {{dbgbvr15_el1|DBGBVR15_EL1}}
+# CHECK: mrs x9, {{dbgbcr0_el1|DBGBCR0_EL1}}
+# CHECK: mrs x9, {{dbgbcr1_el1|DBGBCR1_EL1}}
+# CHECK: mrs x9, {{dbgbcr2_el1|DBGBCR2_EL1}}
+# CHECK: mrs x9, {{dbgbcr3_el1|DBGBCR3_EL1}}
+# CHECK: mrs x9, {{dbgbcr4_el1|DBGBCR4_EL1}}
+# CHECK: mrs x9, {{dbgbcr5_el1|DBGBCR5_EL1}}
+# CHECK: mrs x9, {{dbgbcr6_el1|DBGBCR6_EL1}}
+# CHECK: mrs x9, {{dbgbcr7_el1|DBGBCR7_EL1}}
+# CHECK: mrs x9, {{dbgbcr8_el1|DBGBCR8_EL1}}
+# CHECK: mrs x9, {{dbgbcr9_el1|DBGBCR9_EL1}}
+# CHECK: mrs x9, {{dbgbcr10_el1|DBGBCR10_EL1}}
+# CHECK: mrs x9, {{dbgbcr11_el1|DBGBCR11_EL1}}
+# CHECK: mrs x9, {{dbgbcr12_el1|DBGBCR12_EL1}}
+# CHECK: mrs x9, {{dbgbcr13_el1|DBGBCR13_EL1}}
+# CHECK: mrs x9, {{dbgbcr14_el1|DBGBCR14_EL1}}
+# CHECK: mrs x9, {{dbgbcr15_el1|DBGBCR15_EL1}}
+# CHECK: mrs x9, {{dbgwvr0_el1|DBGWVR0_EL1}}
+# CHECK: mrs x9, {{dbgwvr1_el1|DBGWVR1_EL1}}
+# CHECK: mrs x9, {{dbgwvr2_el1|DBGWVR2_EL1}}
+# CHECK: mrs x9, {{dbgwvr3_el1|DBGWVR3_EL1}}
+# CHECK: mrs x9, {{dbgwvr4_el1|DBGWVR4_EL1}}
+# CHECK: mrs x9, {{dbgwvr5_el1|DBGWVR5_EL1}}
+# CHECK: mrs x9, {{dbgwvr6_el1|DBGWVR6_EL1}}
+# CHECK: mrs x9, {{dbgwvr7_el1|DBGWVR7_EL1}}
+# CHECK: mrs x9, {{dbgwvr8_el1|DBGWVR8_EL1}}
+# CHECK: mrs x9, {{dbgwvr9_el1|DBGWVR9_EL1}}
+# CHECK: mrs x9, {{dbgwvr10_el1|DBGWVR10_EL1}}
+# CHECK: mrs x9, {{dbgwvr11_el1|DBGWVR11_EL1}}
+# CHECK: mrs x9, {{dbgwvr12_el1|DBGWVR12_EL1}}
+# CHECK: mrs x9, {{dbgwvr13_el1|DBGWVR13_EL1}}
+# CHECK: mrs x9, {{dbgwvr14_el1|DBGWVR14_EL1}}
+# CHECK: mrs x9, {{dbgwvr15_el1|DBGWVR15_EL1}}
+# CHECK: mrs x9, {{dbgwcr0_el1|DBGWCR0_EL1}}
+# CHECK: mrs x9, {{dbgwcr1_el1|DBGWCR1_EL1}}
+# CHECK: mrs x9, {{dbgwcr2_el1|DBGWCR2_EL1}}
+# CHECK: mrs x9, {{dbgwcr3_el1|DBGWCR3_EL1}}
+# CHECK: mrs x9, {{dbgwcr4_el1|DBGWCR4_EL1}}
+# CHECK: mrs x9, {{dbgwcr5_el1|DBGWCR5_EL1}}
+# CHECK: mrs x9, {{dbgwcr6_el1|DBGWCR6_EL1}}
+# CHECK: mrs x9, {{dbgwcr7_el1|DBGWCR7_EL1}}
+# CHECK: mrs x9, {{dbgwcr8_el1|DBGWCR8_EL1}}
+# CHECK: mrs x9, {{dbgwcr9_el1|DBGWCR9_EL1}}
+# CHECK: mrs x9, {{dbgwcr10_el1|DBGWCR10_EL1}}
+# CHECK: mrs x9, {{dbgwcr11_el1|DBGWCR11_EL1}}
+# CHECK: mrs x9, {{dbgwcr12_el1|DBGWCR12_EL1}}
+# CHECK: mrs x9, {{dbgwcr13_el1|DBGWCR13_EL1}}
+# CHECK: mrs x9, {{dbgwcr14_el1|DBGWCR14_EL1}}
+# CHECK: mrs x9, {{dbgwcr15_el1|DBGWCR15_EL1}}
+# CHECK: mrs x9, {{mdrar_el1|MDRAR_EL1}}
+# CHECK: mrs x9, {{teehbr32_el1|TEEHBR32_EL1}}
+# CHECK: mrs x9, {{oslsr_el1|OSLSR_EL1}}
+# CHECK: mrs x9, {{osdlr_el1|OSDLR_EL1}}
+# CHECK: mrs x9, {{dbgprcr_el1|DBGPRCR_EL1}}
+# CHECK: mrs x9, {{dbgclaimset_el1|DBGCLAIMSET_EL1}}
+# CHECK: mrs x9, {{dbgclaimclr_el1|DBGCLAIMCLR_EL1}}
+# CHECK: mrs x9, {{dbgauthstatus_el1|DBGAUTHSTATUS_EL1}}
+# CHECK: mrs x9, {{midr_el1|MIDR_EL1}}
+# CHECK: mrs x9, {{ccsidr_el1|CCSIDR_EL1}}
+# CHECK: mrs x9, {{csselr_el1|CSSELR_EL1}}
+# CHECK: mrs x9, {{vpidr_el2|VPIDR_EL2}}
+# CHECK: mrs x9, {{clidr_el1|CLIDR_EL1}}
+# CHECK: mrs x9, {{ctr_el0|CTR_EL0}}
+# CHECK: mrs x9, {{mpidr_el1|MPIDR_EL1}}
+# CHECK: mrs x9, {{vmpidr_el2|VMPIDR_EL2}}
+# CHECK: mrs x9, {{revidr_el1|REVIDR_EL1}}
+# CHECK: mrs x9, {{aidr_el1|AIDR_EL1}}
+# CHECK: mrs x9, {{dczid_el0|DCZID_EL0}}
+# CHECK: mrs x9, {{id_pfr0_el1|ID_PFR0_EL1}}
+# CHECK: mrs x9, {{id_pfr1_el1|ID_PFR1_EL1}}
+# CHECK: mrs x9, {{id_dfr0_el1|ID_DFR0_EL1}}
+# CHECK: mrs x9, {{id_afr0_el1|ID_AFR0_EL1}}
+# CHECK: mrs x9, {{id_mmfr0_el1|ID_MMFR0_EL1}}
+# CHECK: mrs x9, {{id_mmfr1_el1|ID_MMFR1_EL1}}
+# CHECK: mrs x9, {{id_mmfr2_el1|ID_MMFR2_EL1}}
+# CHECK: mrs x9, {{id_mmfr3_el1|ID_MMFR3_EL1}}
+# CHECK: mrs x9, {{id_isar0_el1|ID_ISAR0_EL1}}
+# CHECK: mrs x9, {{id_isar1_el1|ID_ISAR1_EL1}}
+# CHECK: mrs x9, {{id_isar2_el1|ID_ISAR2_EL1}}
+# CHECK: mrs x9, {{id_isar3_el1|ID_ISAR3_EL1}}
+# CHECK: mrs x9, {{id_isar4_el1|ID_ISAR4_EL1}}
+# CHECK: mrs x9, {{id_isar5_el1|ID_ISAR5_EL1}}
+# CHECK: mrs x9, {{mvfr0_el1|MVFR0_EL1}}
+# CHECK: mrs x9, {{mvfr1_el1|MVFR1_EL1}}
+# CHECK: mrs x9, {{mvfr2_el1|MVFR2_EL1}}
+# CHECK: mrs x9, {{id_aa64pfr0_el1|ID_AA64PFR0_EL1}}
+# CHECK: mrs x9, {{id_aa64pfr1_el1|ID_AA64PFR1_EL1}}
+# CHECK: mrs x9, {{id_aa64dfr0_el1|ID_AA64DFR0_EL1}}
+# CHECK: mrs x9, {{id_aa64dfr1_el1|ID_AA64DFR1_EL1}}
+# CHECK: mrs x9, {{id_aa64afr0_el1|ID_AA64AFR0_EL1}}
+# CHECK: mrs x9, {{id_aa64afr1_el1|ID_AA64AFR1_EL1}}
+# CHECK: mrs x9, {{id_aa64isar0_el1|ID_AA64ISAR0_EL1}}
+# CHECK: mrs x9, {{id_aa64isar1_el1|ID_AA64ISAR1_EL1}}
+# CHECK: mrs x9, {{id_aa64mmfr0_el1|ID_AA64MMFR0_EL1}}
+# CHECK: mrs x9, {{id_aa64mmfr1_el1|ID_AA64MMFR1_EL1}}
+# CHECK: mrs x9, {{sctlr_el1|SCTLR_EL1}}
+# CHECK: mrs x9, {{sctlr_el2|SCTLR_EL2}}
+# CHECK: mrs x9, {{sctlr_el3|SCTLR_EL3}}
+# CHECK: mrs x9, {{actlr_el1|ACTLR_EL1}}
+# CHECK: mrs x9, {{actlr_el2|ACTLR_EL2}}
+# CHECK: mrs x9, {{actlr_el3|ACTLR_EL3}}
+# CHECK: mrs x9, {{cpacr_el1|CPACR_EL1}}
+# CHECK: mrs x9, {{hcr_el2|HCR_EL2}}
+# CHECK: mrs x9, {{scr_el3|SCR_EL3}}
+# CHECK: mrs x9, {{mdcr_el2|MDCR_EL2}}
+# CHECK: mrs x9, {{sder32_el3|SDER32_EL3}}
+# CHECK: mrs x9, {{cptr_el2|CPTR_EL2}}
+# CHECK: mrs x9, {{cptr_el3|CPTR_EL3}}
+# CHECK: mrs x9, {{hstr_el2|HSTR_EL2}}
+# CHECK: mrs x9, {{hacr_el2|HACR_EL2}}
+# CHECK: mrs x9, {{mdcr_el3|MDCR_EL3}}
+# CHECK: mrs x9, {{ttbr0_el1|TTBR0_EL1}}
+# CHECK: mrs x9, {{ttbr0_el2|TTBR0_EL2}}
+# CHECK: mrs x9, {{ttbr0_el3|TTBR0_EL3}}
+# CHECK: mrs x9, {{ttbr1_el1|TTBR1_EL1}}
+# CHECK: mrs x9, {{tcr_el1|TCR_EL1}}
+# CHECK: mrs x9, {{tcr_el2|TCR_EL2}}
+# CHECK: mrs x9, {{tcr_el3|TCR_EL3}}
+# CHECK: mrs x9, {{vttbr_el2|VTTBR_EL2}}
+# CHECK: mrs x9, {{vtcr_el2|VTCR_EL2}}
+# CHECK: mrs x9, {{dacr32_el2|DACR32_EL2}}
+# CHECK: mrs x9, {{spsr_el1|SPSR_EL1}}
+# CHECK: mrs x9, {{spsr_el2|SPSR_EL2}}
+# CHECK: mrs x9, {{spsr_el3|SPSR_EL3}}
+# CHECK: mrs x9, {{elr_el1|ELR_EL1}}
+# CHECK: mrs x9, {{elr_el2|ELR_EL2}}
+# CHECK: mrs x9, {{elr_el3|ELR_EL3}}
+# CHECK: mrs x9, {{sp_el0|SP_EL0}}
+# CHECK: mrs x9, {{sp_el1|SP_EL1}}
+# CHECK: mrs x9, {{sp_el2|SP_EL2}}
+# CHECK: mrs x9, {{spsel|SPSEL}}
+# CHECK: mrs x9, {{nzcv|NZCV}}
+# CHECK: mrs x9, {{daif|DAIF}}
+# CHECK: mrs x9, {{currentel|CURRENTEL}}
+# CHECK: mrs x9, {{spsr_irq|SPSR_IRQ}}
+# CHECK: mrs x9, {{spsr_abt|SPSR_ABT}}
+# CHECK: mrs x9, {{spsr_und|SPSR_UND}}
+# CHECK: mrs x9, {{spsr_fiq|SPSR_FIQ}}
+# CHECK: mrs x9, {{fpcr|FPCR}}
+# CHECK: mrs x9, {{fpsr|FPSR}}
+# CHECK: mrs x9, {{dspsr_el0|DSPSR_EL0}}
+# CHECK: mrs x9, {{dlr_el0|DLR_EL0}}
+# CHECK: mrs x9, {{ifsr32_el2|IFSR32_EL2}}
+# CHECK: mrs x9, {{afsr0_el1|AFSR0_EL1}}
+# CHECK: mrs x9, {{afsr0_el2|AFSR0_EL2}}
+# CHECK: mrs x9, {{afsr0_el3|AFSR0_EL3}}
+# CHECK: mrs x9, {{afsr1_el1|AFSR1_EL1}}
+# CHECK: mrs x9, {{afsr1_el2|AFSR1_EL2}}
+# CHECK: mrs x9, {{afsr1_el3|AFSR1_EL3}}
+# CHECK: mrs x9, {{esr_el1|ESR_EL1}}
+# CHECK: mrs x9, {{esr_el2|ESR_EL2}}
+# CHECK: mrs x9, {{esr_el3|ESR_EL3}}
+# CHECK: mrs x9, {{fpexc32_el2|FPEXC32_EL2}}
+# CHECK: mrs x9, {{far_el1|FAR_EL1}}
+# CHECK: mrs x9, {{far_el2|FAR_EL2}}
+# CHECK: mrs x9, {{far_el3|FAR_EL3}}
+# CHECK: mrs x9, {{hpfar_el2|HPFAR_EL2}}
+# CHECK: mrs x9, {{par_el1|PAR_EL1}}
+# CHECK: mrs x9, {{pmcr_el0|PMCR_EL0}}
+# CHECK: mrs x9, {{pmcntenset_el0|PMCNTENSET_EL0}}
+# CHECK: mrs x9, {{pmcntenclr_el0|PMCNTENCLR_EL0}}
+# CHECK: mrs x9, {{pmovsclr_el0|PMOVSCLR_EL0}}
+# CHECK: mrs x9, {{pmselr_el0|PMSELR_EL0}}
+# CHECK: mrs x9, {{pmceid0_el0|PMCEID0_EL0}}
+# CHECK: mrs x9, {{pmceid1_el0|PMCEID1_EL0}}
+# CHECK: mrs x9, {{pmccntr_el0|PMCCNTR_EL0}}
+# CHECK: mrs x9, {{pmxevtyper_el0|PMXEVTYPER_EL0}}
+# CHECK: mrs x9, {{pmxevcntr_el0|PMXEVCNTR_EL0}}
+# CHECK: mrs x9, {{pmuserenr_el0|PMUSERENR_EL0}}
+# CHECK: mrs x9, {{pmintenset_el1|PMINTENSET_EL1}}
+# CHECK: mrs x9, {{pmintenclr_el1|PMINTENCLR_EL1}}
+# CHECK: mrs x9, {{pmovsset_el0|PMOVSSET_EL0}}
+# CHECK: mrs x9, {{mair_el1|MAIR_EL1}}
+# CHECK: mrs x9, {{mair_el2|MAIR_EL2}}
+# CHECK: mrs x9, {{mair_el3|MAIR_EL3}}
+# CHECK: mrs x9, {{amair_el1|AMAIR_EL1}}
+# CHECK: mrs x9, {{amair_el2|AMAIR_EL2}}
+# CHECK: mrs x9, {{amair_el3|AMAIR_EL3}}
+# CHECK: mrs x9, {{vbar_el1|VBAR_EL1}}
+# CHECK: mrs x9, {{vbar_el2|VBAR_EL2}}
+# CHECK: mrs x9, {{vbar_el3|VBAR_EL3}}
+# CHECK: mrs x9, {{rvbar_el1|RVBAR_EL1}}
+# CHECK: mrs x9, {{rvbar_el2|RVBAR_EL2}}
+# CHECK: mrs x9, {{rvbar_el3|RVBAR_EL3}}
+# CHECK: mrs x9, {{rmr_el1|RMR_EL1}}
+# CHECK: mrs x9, {{rmr_el2|RMR_EL2}}
+# CHECK: mrs x9, {{rmr_el3|RMR_EL3}}
+# CHECK: mrs x9, {{isr_el1|ISR_EL1}}
+# CHECK: mrs x9, {{contextidr_el1|CONTEXTIDR_EL1}}
+# CHECK: mrs x9, {{tpidr_el0|TPIDR_EL0}}
+# CHECK: mrs x9, {{tpidr_el2|TPIDR_EL2}}
+# CHECK: mrs x9, {{tpidr_el3|TPIDR_EL3}}
+# CHECK: mrs x9, {{tpidrro_el0|TPIDRRO_EL0}}
+# CHECK: mrs x9, {{tpidr_el1|TPIDR_EL1}}
+# CHECK: mrs x9, {{cntfrq_el0|CNTFRQ_EL0}}
+# CHECK: mrs x9, {{cntpct_el0|CNTPCT_EL0}}
+# CHECK: mrs x9, {{cntvct_el0|CNTVCT_EL0}}
+# CHECK: mrs x9, {{cntvoff_el2|CNTVOFF_EL2}}
+# CHECK: mrs x9, {{cntkctl_el1|CNTKCTL_EL1}}
+# CHECK: mrs x9, {{cnthctl_el2|CNTHCTL_EL2}}
+# CHECK: mrs x9, {{cntp_tval_el0|CNTP_TVAL_EL0}}
+# CHECK: mrs x9, {{cnthp_tval_el2|CNTHP_TVAL_EL2}}
+# CHECK: mrs x9, {{cntps_tval_el1|CNTPS_TVAL_EL1}}
+# CHECK: mrs x9, {{cntp_ctl_el0|CNTP_CTL_EL0}}
+# CHECK: mrs x9, {{cnthp_ctl_el2|CNTHP_CTL_EL2}}
+# CHECK: mrs x9, {{cntps_ctl_el1|CNTPS_CTL_EL1}}
+# CHECK: mrs x9, {{cntp_cval_el0|CNTP_CVAL_EL0}}
+# CHECK: mrs x9, {{cnthp_cval_el2|CNTHP_CVAL_EL2}}
+# CHECK: mrs x9, {{cntps_cval_el1|CNTPS_CVAL_EL1}}
+# CHECK: mrs x9, {{cntv_tval_el0|CNTV_TVAL_EL0}}
+# CHECK: mrs x9, {{cntv_ctl_el0|CNTV_CTL_EL0}}
+# CHECK: mrs x9, {{cntv_cval_el0|CNTV_CVAL_EL0}}
+# CHECK: mrs x9, {{pmevcntr0_el0|PMEVCNTR0_EL0}}
+# CHECK: mrs x9, {{pmevcntr1_el0|PMEVCNTR1_EL0}}
+# CHECK: mrs x9, {{pmevcntr2_el0|PMEVCNTR2_EL0}}
+# CHECK: mrs x9, {{pmevcntr3_el0|PMEVCNTR3_EL0}}
+# CHECK: mrs x9, {{pmevcntr4_el0|PMEVCNTR4_EL0}}
+# CHECK: mrs x9, {{pmevcntr5_el0|PMEVCNTR5_EL0}}
+# CHECK: mrs x9, {{pmevcntr6_el0|PMEVCNTR6_EL0}}
+# CHECK: mrs x9, {{pmevcntr7_el0|PMEVCNTR7_EL0}}
+# CHECK: mrs x9, {{pmevcntr8_el0|PMEVCNTR8_EL0}}
+# CHECK: mrs x9, {{pmevcntr9_el0|PMEVCNTR9_EL0}}
+# CHECK: mrs x9, {{pmevcntr10_el0|PMEVCNTR10_EL0}}
+# CHECK: mrs x9, {{pmevcntr11_el0|PMEVCNTR11_EL0}}
+# CHECK: mrs x9, {{pmevcntr12_el0|PMEVCNTR12_EL0}}
+# CHECK: mrs x9, {{pmevcntr13_el0|PMEVCNTR13_EL0}}
+# CHECK: mrs x9, {{pmevcntr14_el0|PMEVCNTR14_EL0}}
+# CHECK: mrs x9, {{pmevcntr15_el0|PMEVCNTR15_EL0}}
+# CHECK: mrs x9, {{pmevcntr16_el0|PMEVCNTR16_EL0}}
+# CHECK: mrs x9, {{pmevcntr17_el0|PMEVCNTR17_EL0}}
+# CHECK: mrs x9, {{pmevcntr18_el0|PMEVCNTR18_EL0}}
+# CHECK: mrs x9, {{pmevcntr19_el0|PMEVCNTR19_EL0}}
+# CHECK: mrs x9, {{pmevcntr20_el0|PMEVCNTR20_EL0}}
+# CHECK: mrs x9, {{pmevcntr21_el0|PMEVCNTR21_EL0}}
+# CHECK: mrs x9, {{pmevcntr22_el0|PMEVCNTR22_EL0}}
+# CHECK: mrs x9, {{pmevcntr23_el0|PMEVCNTR23_EL0}}
+# CHECK: mrs x9, {{pmevcntr24_el0|PMEVCNTR24_EL0}}
+# CHECK: mrs x9, {{pmevcntr25_el0|PMEVCNTR25_EL0}}
+# CHECK: mrs x9, {{pmevcntr26_el0|PMEVCNTR26_EL0}}
+# CHECK: mrs x9, {{pmevcntr27_el0|PMEVCNTR27_EL0}}
+# CHECK: mrs x9, {{pmevcntr28_el0|PMEVCNTR28_EL0}}
+# CHECK: mrs x9, {{pmevcntr29_el0|PMEVCNTR29_EL0}}
+# CHECK: mrs x9, {{pmevcntr30_el0|PMEVCNTR30_EL0}}
+# CHECK: mrs x9, {{pmccfiltr_el0|PMCCFILTR_EL0}}
+# CHECK: mrs x9, {{pmevtyper0_el0|PMEVTYPER0_EL0}}
+# CHECK: mrs x9, {{pmevtyper1_el0|PMEVTYPER1_EL0}}
+# CHECK: mrs x9, {{pmevtyper2_el0|PMEVTYPER2_EL0}}
+# CHECK: mrs x9, {{pmevtyper3_el0|PMEVTYPER3_EL0}}
+# CHECK: mrs x9, {{pmevtyper4_el0|PMEVTYPER4_EL0}}
+# CHECK: mrs x9, {{pmevtyper5_el0|PMEVTYPER5_EL0}}
+# CHECK: mrs x9, {{pmevtyper6_el0|PMEVTYPER6_EL0}}
+# CHECK: mrs x9, {{pmevtyper7_el0|PMEVTYPER7_EL0}}
+# CHECK: mrs x9, {{pmevtyper8_el0|PMEVTYPER8_EL0}}
+# CHECK: mrs x9, {{pmevtyper9_el0|PMEVTYPER9_EL0}}
+# CHECK: mrs x9, {{pmevtyper10_el0|PMEVTYPER10_EL0}}
+# CHECK: mrs x9, {{pmevtyper11_el0|PMEVTYPER11_EL0}}
+# CHECK: mrs x9, {{pmevtyper12_el0|PMEVTYPER12_EL0}}
+# CHECK: mrs x9, {{pmevtyper13_el0|PMEVTYPER13_EL0}}
+# CHECK: mrs x9, {{pmevtyper14_el0|PMEVTYPER14_EL0}}
+# CHECK: mrs x9, {{pmevtyper15_el0|PMEVTYPER15_EL0}}
+# CHECK: mrs x9, {{pmevtyper16_el0|PMEVTYPER16_EL0}}
+# CHECK: mrs x9, {{pmevtyper17_el0|PMEVTYPER17_EL0}}
+# CHECK: mrs x9, {{pmevtyper18_el0|PMEVTYPER18_EL0}}
+# CHECK: mrs x9, {{pmevtyper19_el0|PMEVTYPER19_EL0}}
+# CHECK: mrs x9, {{pmevtyper20_el0|PMEVTYPER20_EL0}}
+# CHECK: mrs x9, {{pmevtyper21_el0|PMEVTYPER21_EL0}}
+# CHECK: mrs x9, {{pmevtyper22_el0|PMEVTYPER22_EL0}}
+# CHECK: mrs x9, {{pmevtyper23_el0|PMEVTYPER23_EL0}}
+# CHECK: mrs x9, {{pmevtyper24_el0|PMEVTYPER24_EL0}}
+# CHECK: mrs x9, {{pmevtyper25_el0|PMEVTYPER25_EL0}}
+# CHECK: mrs x9, {{pmevtyper26_el0|PMEVTYPER26_EL0}}
+# CHECK: mrs x9, {{pmevtyper27_el0|PMEVTYPER27_EL0}}
+# CHECK: mrs x9, {{pmevtyper28_el0|PMEVTYPER28_EL0}}
+# CHECK: mrs x9, {{pmevtyper29_el0|PMEVTYPER29_EL0}}
+# CHECK: mrs x9, {{pmevtyper30_el0|PMEVTYPER30_EL0}}
0xc 0x0 0x12 0xd5
0x4c 0x0 0x10 0xd5
@@ -4147,10 +4168,10 @@
0xa9 0xef 0x3b 0xd5
0xc9 0xef 0x3b 0xd5
-# CHECK: mrs x12, s3_7_c15_c1_5
-# CHECK: mrs x13, s3_2_c11_c15_7
-# CHECK: msr s3_0_c15_c0_0, x12
-# CHECK: msr s3_7_c11_c13_7, x5
+# CHECK: mrs x12, {{s3_7_c15_c1_5|S3_7_C15_C1_5}}
+# CHECK: mrs x13, {{s3_2_c11_c15_7|S3_2_C11_C15_7}}
+# CHECK: msr {{s3_0_c15_c0_0|S3_0_C15_C0_0}}, x12
+# CHECK: msr {{s3_7_c11_c13_7|S3_7_C11_C13_7}}, x5
0xac 0xf1 0x3f 0xd5
0xed 0xbf 0x3a 0xd5
0x0c 0xf0 0x18 0xd5
diff --git a/test/MC/Disassembler/AArch64/basic-a64-undefined.txt b/test/MC/Disassembler/AArch64/basic-a64-undefined.txt
index a17579cb1680..968a4541c79e 100644
--- a/test/MC/Disassembler/AArch64/basic-a64-undefined.txt
+++ b/test/MC/Disassembler/AArch64/basic-a64-undefined.txt
@@ -1,43 +1,66 @@
-# These spawn another process so they're rather expensive. Not many.
+# RUN: not llvm-mc -disassemble -triple=aarch64 %s 2> %t
+# RUN: FileCheck %s < %t
+# RUN: not llvm-mc -disassemble -triple=arm64 %s 2> %t
+# RUN: FileCheck %s < %t
# Instructions notionally in the add/sub (extended register) sheet, but with
# invalid shift amount or "opt" field.
-# RUN: echo "0x00 0x10 0xa0 0x0b" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0x00 0x10 0x60 0x0b" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0x00 0x14 0x20 0x0b" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+[0x00 0x10 0xa0 0x0b]
+[0x00 0x10 0x60 0x0b]
+[0x00 0x14 0x20 0x0b]
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
# Instructions notionally in the add/sub (immediate) sheet, but with
# invalid "shift" field.
-# RUN: echo "0xdf 0x3 0x80 0x91" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0xed 0x8e 0xc4 0x31" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0x62 0xfc 0xbf 0x11" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0x3 0xff 0xff 0x91" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+[0xdf 0x3 0x80 0x91]
+[0xed 0x8e 0xc4 0x31]
+[0x62 0xfc 0xbf 0x11]
+[0x3 0xff 0xff 0x91]
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
# Instructions notionally in the load/store (unsigned immediate) sheet.
# Only unallocated (int-register) variants are: opc=0b11, size=0b10, 0b11
-# RUN: echo "0xd7 0xfc 0xff 0xb9" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0xd7 0xfc 0xcf 0xf9" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+[0xd7 0xfc 0xff 0xb9]
+[0xd7 0xfc 0xcf 0xf9]
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
# Instructions notionally in the floating-point <-> fixed-point conversion
# Scale field is 64-<imm> and <imm> should be 1-32 for a 32-bit int register.
-# RUN: echo "0x23 0x01 0x18 0x1e" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0x23 0x25 0x42 0x1e" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+[0x23 0x01 0x18 0x1e]
+[0x23 0x25 0x42 0x1e]
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
# Instructions notionally in the logical (shifted register) sheet, but with out
# of range shift: w-registers can only have 0-31.
-# RUN: echo "0x00 0x80 0x00 0x0a" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+[0x00 0x80 0x00 0x0a]
+# CHECK: invalid instruction encoding
# Instructions notionally in the move wide (immediate) sheet, but with out
# of range shift: w-registers can only have 0 or 16.
-# RUN: echo "0x00 0x00 0xc0 0x12" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0x12 0x34 0xe0 0x52" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-
-# Data-processing instructions are undefined when S=1 and for the 0b0000111 value in opcode:sf
-# RUN: echo "0x00 0x00 0xc0 0x5f" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0x56 0x0c 0xc0 0x5a" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+[0x00 0x00 0xc0 0x12]
+[0x12 0x34 0xe0 0x52]
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
-# Data-processing instructions (2 source) are undefined for a value of 0001xx:0:x or 0011xx:0:x for opcode:S:sf
-# RUN: echo "0x00 0x30 0xc1 0x1a" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
-# RUN: echo "0x00 0x10 0xc1 0x1a" | llvm-mc -triple=aarch64 -disassemble 2>&1 | FileCheck %s
+# Data-processing instructions are undefined when S=1 and for the 0b0000111
+# value in opcode:sf
+[0x00 0x00 0xc0 0x5f]
+[0x56 0x0c 0xc0 0x5a]
+# CHECK: invalid instruction encoding
+# CHECK: invalid instruction encoding
+# Data-processing instructions (2 source) are undefined for a value of
+# 0001xx:0:x or 0011xx:0:x for opcode:S:sf
+[0x00 0x30 0xc1 0x1a]
+[0x00 0x10 0xc1 0x1a]
+# CHECK: invalid instruction encoding
# CHECK: invalid instruction encoding
+
+
diff --git a/test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt b/test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt
index 53638638d58c..2fccccbe841e 100644
--- a/test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt
+++ b/test/MC/Disassembler/AArch64/basic-a64-unpredictable.txt
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple=aarch64 -mattr=+fp-armv8 -disassemble < %s 2>&1 | FileCheck %s
+# RUN: llvm-mc -triple=arm64 -mattr=+fp-armv8 -disassemble < %s 2>&1 | FileCheck %s
#------------------------------------------------------------------------------
# Load-store exclusive
diff --git a/test/MC/Disassembler/AArch64/gicv3-regs.txt b/test/MC/Disassembler/AArch64/gicv3-regs.txt
index 4351f6460c75..851e83dab7f7 100644
--- a/test/MC/Disassembler/AArch64/gicv3-regs.txt
+++ b/test/MC/Disassembler/AArch64/gicv3-regs.txt
@@ -1,222 +1,223 @@
# RUN: llvm-mc -triple aarch64-none-linux-gnu -disassemble < %s | FileCheck %s
+# RUN: llvm-mc -triple arm64-none-linux-gnu -disassemble < %s | FileCheck %s
0x8 0xcc 0x38 0xd5
-# CHECK: mrs x8, icc_iar1_el1
+# CHECK: mrs x8, {{icc_iar1_el1|ICC_IAR1_EL1}}
0x1a 0xc8 0x38 0xd5
-# CHECK: mrs x26, icc_iar0_el1
+# CHECK: mrs x26, {{icc_iar0_el1|ICC_IAR0_EL1}}
0x42 0xcc 0x38 0xd5
-# CHECK: mrs x2, icc_hppir1_el1
+# CHECK: mrs x2, {{icc_hppir1_el1|ICC_HPPIR1_EL1}}
0x51 0xc8 0x38 0xd5
-# CHECK: mrs x17, icc_hppir0_el1
+# CHECK: mrs x17, {{icc_hppir0_el1|ICC_HPPIR0_EL1}}
0x7d 0xcb 0x38 0xd5
-# CHECK: mrs x29, icc_rpr_el1
+# CHECK: mrs x29, {{icc_rpr_el1|ICC_RPR_EL1}}
0x24 0xcb 0x3c 0xd5
-# CHECK: mrs x4, ich_vtr_el2
+# CHECK: mrs x4, {{ich_vtr_el2|ICH_VTR_EL2}}
0x78 0xcb 0x3c 0xd5
-# CHECK: mrs x24, ich_eisr_el2
+# CHECK: mrs x24, {{ich_eisr_el2|ICH_EISR_EL2}}
0xa9 0xcb 0x3c 0xd5
-# CHECK: mrs x9, ich_elsr_el2
+# CHECK: mrs x9, {{ich_elsr_el2|ICH_ELSR_EL2}}
0x78 0xcc 0x38 0xd5
-# CHECK: mrs x24, icc_bpr1_el1
+# CHECK: mrs x24, {{icc_bpr1_el1|ICC_BPR1_EL1}}
0x6e 0xc8 0x38 0xd5
-# CHECK: mrs x14, icc_bpr0_el1
+# CHECK: mrs x14, {{icc_bpr0_el1|ICC_BPR0_EL1}}
0x13 0x46 0x38 0xd5
-# CHECK: mrs x19, icc_pmr_el1
+# CHECK: mrs x19, {{icc_pmr_el1|ICC_PMR_EL1}}
0x97 0xcc 0x38 0xd5
-# CHECK: mrs x23, icc_ctlr_el1
+# CHECK: mrs x23, {{icc_ctlr_el1|ICC_CTLR_EL1}}
0x94 0xcc 0x3e 0xd5
-# CHECK: mrs x20, icc_ctlr_el3
+# CHECK: mrs x20, {{icc_ctlr_el3|ICC_CTLR_EL3}}
0xbc 0xcc 0x38 0xd5
-# CHECK: mrs x28, icc_sre_el1
+# CHECK: mrs x28, {{icc_sre_el1|ICC_SRE_EL1}}
0xb9 0xc9 0x3c 0xd5
-# CHECK: mrs x25, icc_sre_el2
+# CHECK: mrs x25, {{icc_sre_el2|ICC_SRE_EL2}}
0xa8 0xcc 0x3e 0xd5
-# CHECK: mrs x8, icc_sre_el3
+# CHECK: mrs x8, {{icc_sre_el3|ICC_SRE_EL3}}
0xd6 0xcc 0x38 0xd5
-# CHECK: mrs x22, icc_igrpen0_el1
+# CHECK: mrs x22, {{icc_igrpen0_el1|ICC_IGRPEN0_EL1}}
0xe5 0xcc 0x38 0xd5
-# CHECK: mrs x5, icc_igrpen1_el1
+# CHECK: mrs x5, {{icc_igrpen1_el1|ICC_IGRPEN1_EL1}}
0xe7 0xcc 0x3e 0xd5
-# CHECK: mrs x7, icc_igrpen1_el3
+# CHECK: mrs x7, {{icc_igrpen1_el3|ICC_IGRPEN1_EL3}}
0x16 0xcd 0x38 0xd5
-# CHECK: mrs x22, icc_seien_el1
+# CHECK: mrs x22, {{icc_seien_el1|ICC_SEIEN_EL1}}
0x84 0xc8 0x38 0xd5
-# CHECK: mrs x4, icc_ap0r0_el1
+# CHECK: mrs x4, {{icc_ap0r0_el1|ICC_AP0R0_EL1}}
0xab 0xc8 0x38 0xd5
-# CHECK: mrs x11, icc_ap0r1_el1
+# CHECK: mrs x11, {{icc_ap0r1_el1|ICC_AP0R1_EL1}}
0xdb 0xc8 0x38 0xd5
-# CHECK: mrs x27, icc_ap0r2_el1
+# CHECK: mrs x27, {{icc_ap0r2_el1|ICC_AP0R2_EL1}}
0xf5 0xc8 0x38 0xd5
-# CHECK: mrs x21, icc_ap0r3_el1
+# CHECK: mrs x21, {{icc_ap0r3_el1|ICC_AP0R3_EL1}}
0x2 0xc9 0x38 0xd5
-# CHECK: mrs x2, icc_ap1r0_el1
+# CHECK: mrs x2, {{icc_ap1r0_el1|ICC_AP1R0_EL1}}
0x35 0xc9 0x38 0xd5
-# CHECK: mrs x21, icc_ap1r1_el1
+# CHECK: mrs x21, {{icc_ap1r1_el1|ICC_AP1R1_EL1}}
0x4a 0xc9 0x38 0xd5
-# CHECK: mrs x10, icc_ap1r2_el1
+# CHECK: mrs x10, {{icc_ap1r2_el1|ICC_AP1R2_EL1}}
0x7b 0xc9 0x38 0xd5
-# CHECK: mrs x27, icc_ap1r3_el1
+# CHECK: mrs x27, {{icc_ap1r3_el1|ICC_AP1R3_EL1}}
0x14 0xc8 0x3c 0xd5
-# CHECK: mrs x20, ich_ap0r0_el2
+# CHECK: mrs x20, {{ich_ap0r0_el2|ICH_AP0R0_EL2}}
0x35 0xc8 0x3c 0xd5
-# CHECK: mrs x21, ich_ap0r1_el2
+# CHECK: mrs x21, {{ich_ap0r1_el2|ICH_AP0R1_EL2}}
0x45 0xc8 0x3c 0xd5
-# CHECK: mrs x5, ich_ap0r2_el2
+# CHECK: mrs x5, {{ich_ap0r2_el2|ICH_AP0R2_EL2}}
0x64 0xc8 0x3c 0xd5
-# CHECK: mrs x4, ich_ap0r3_el2
+# CHECK: mrs x4, {{ich_ap0r3_el2|ICH_AP0R3_EL2}}
0xf 0xc9 0x3c 0xd5
-# CHECK: mrs x15, ich_ap1r0_el2
+# CHECK: mrs x15, {{ich_ap1r0_el2|ICH_AP1R0_EL2}}
0x2c 0xc9 0x3c 0xd5
-# CHECK: mrs x12, ich_ap1r1_el2
+# CHECK: mrs x12, {{ich_ap1r1_el2|ICH_AP1R1_EL2}}
0x5b 0xc9 0x3c 0xd5
-# CHECK: mrs x27, ich_ap1r2_el2
+# CHECK: mrs x27, {{ich_ap1r2_el2|ICH_AP1R2_EL2}}
0x74 0xc9 0x3c 0xd5
-# CHECK: mrs x20, ich_ap1r3_el2
+# CHECK: mrs x20, {{ich_ap1r3_el2|ICH_AP1R3_EL2}}
0xa 0xcb 0x3c 0xd5
-# CHECK: mrs x10, ich_hcr_el2
+# CHECK: mrs x10, {{ich_hcr_el2|ICH_HCR_EL2}}
0x5b 0xcb 0x3c 0xd5
-# CHECK: mrs x27, ich_misr_el2
+# CHECK: mrs x27, {{ich_misr_el2|ICH_MISR_EL2}}
0xe6 0xcb 0x3c 0xd5
-# CHECK: mrs x6, ich_vmcr_el2
+# CHECK: mrs x6, {{ich_vmcr_el2|ICH_VMCR_EL2}}
0x93 0xc9 0x3c 0xd5
-# CHECK: mrs x19, ich_vseir_el2
+# CHECK: mrs x19, {{ich_vseir_el2|ICH_VSEIR_EL2}}
0x3 0xcc 0x3c 0xd5
-# CHECK: mrs x3, ich_lr0_el2
+# CHECK: mrs x3, {{ich_lr0_el2|ICH_LR0_EL2}}
0x21 0xcc 0x3c 0xd5
-# CHECK: mrs x1, ich_lr1_el2
+# CHECK: mrs x1, {{ich_lr1_el2|ICH_LR1_EL2}}
0x56 0xcc 0x3c 0xd5
-# CHECK: mrs x22, ich_lr2_el2
+# CHECK: mrs x22, {{ich_lr2_el2|ICH_LR2_EL2}}
0x75 0xcc 0x3c 0xd5
-# CHECK: mrs x21, ich_lr3_el2
+# CHECK: mrs x21, {{ich_lr3_el2|ICH_LR3_EL2}}
0x86 0xcc 0x3c 0xd5
-# CHECK: mrs x6, ich_lr4_el2
+# CHECK: mrs x6, {{ich_lr4_el2|ICH_LR4_EL2}}
0xaa 0xcc 0x3c 0xd5
-# CHECK: mrs x10, ich_lr5_el2
+# CHECK: mrs x10, {{ich_lr5_el2|ICH_LR5_EL2}}
0xcb 0xcc 0x3c 0xd5
-# CHECK: mrs x11, ich_lr6_el2
+# CHECK: mrs x11, {{ich_lr6_el2|ICH_LR6_EL2}}
0xec 0xcc 0x3c 0xd5
-# CHECK: mrs x12, ich_lr7_el2
+# CHECK: mrs x12, {{ich_lr7_el2|ICH_LR7_EL2}}
0x0 0xcd 0x3c 0xd5
-# CHECK: mrs x0, ich_lr8_el2
+# CHECK: mrs x0, {{ich_lr8_el2|ICH_LR8_EL2}}
0x35 0xcd 0x3c 0xd5
-# CHECK: mrs x21, ich_lr9_el2
+# CHECK: mrs x21, {{ich_lr9_el2|ICH_LR9_EL2}}
0x4d 0xcd 0x3c 0xd5
-# CHECK: mrs x13, ich_lr10_el2
+# CHECK: mrs x13, {{ich_lr10_el2|ICH_LR10_EL2}}
0x7a 0xcd 0x3c 0xd5
-# CHECK: mrs x26, ich_lr11_el2
+# CHECK: mrs x26, {{ich_lr11_el2|ICH_LR11_EL2}}
0x81 0xcd 0x3c 0xd5
-# CHECK: mrs x1, ich_lr12_el2
+# CHECK: mrs x1, {{ich_lr12_el2|ICH_LR12_EL2}}
0xa8 0xcd 0x3c 0xd5
-# CHECK: mrs x8, ich_lr13_el2
+# CHECK: mrs x8, {{ich_lr13_el2|ICH_LR13_EL2}}
0xc2 0xcd 0x3c 0xd5
-# CHECK: mrs x2, ich_lr14_el2
+# CHECK: mrs x2, {{ich_lr14_el2|ICH_LR14_EL2}}
0xe8 0xcd 0x3c 0xd5
-# CHECK: mrs x8, ich_lr15_el2
+# CHECK: mrs x8, {{ich_lr15_el2|ICH_LR15_EL2}}
0x3b 0xcc 0x18 0xd5
-# CHECK: msr icc_eoir1_el1, x27
+# CHECK: msr {{icc_eoir1_el1|ICC_EOIR1_EL1}}, x27
0x25 0xc8 0x18 0xd5
-# CHECK: msr icc_eoir0_el1, x5
+# CHECK: msr {{icc_eoir0_el1|ICC_EOIR0_EL1}}, x5
0x2d 0xcb 0x18 0xd5
-# CHECK: msr icc_dir_el1, x13
+# CHECK: msr {{icc_dir_el1|ICC_DIR_EL1}}, x13
0xb5 0xcb 0x18 0xd5
-# CHECK: msr icc_sgi1r_el1, x21
+# CHECK: msr {{icc_sgi1r_el1|ICC_SGI1R_EL1}}, x21
0xd9 0xcb 0x18 0xd5
-# CHECK: msr icc_asgi1r_el1, x25
+# CHECK: msr {{icc_asgi1r_el1|ICC_ASGI1R_EL1}}, x25
0xfc 0xcb 0x18 0xd5
-# CHECK: msr icc_sgi0r_el1, x28
+# CHECK: msr {{icc_sgi0r_el1|ICC_SGI0R_EL1}}, x28
0x67 0xcc 0x18 0xd5
-# CHECK: msr icc_bpr1_el1, x7
+# CHECK: msr {{icc_bpr1_el1|ICC_BPR1_EL1}}, x7
0x69 0xc8 0x18 0xd5
-# CHECK: msr icc_bpr0_el1, x9
+# CHECK: msr {{icc_bpr0_el1|ICC_BPR0_EL1}}, x9
0x1d 0x46 0x18 0xd5
-# CHECK: msr icc_pmr_el1, x29
+# CHECK: msr {{icc_pmr_el1|ICC_PMR_EL1}}, x29
0x98 0xcc 0x18 0xd5
-# CHECK: msr icc_ctlr_el1, x24
+# CHECK: msr {{icc_ctlr_el1|ICC_CTLR_EL1}}, x24
0x80 0xcc 0x1e 0xd5
-# CHECK: msr icc_ctlr_el3, x0
+# CHECK: msr {{icc_ctlr_el3|ICC_CTLR_EL3}}, x0
0xa2 0xcc 0x18 0xd5
-# CHECK: msr icc_sre_el1, x2
+# CHECK: msr {{icc_sre_el1|ICC_SRE_EL1}}, x2
0xa5 0xc9 0x1c 0xd5
-# CHECK: msr icc_sre_el2, x5
+# CHECK: msr {{icc_sre_el2|ICC_SRE_EL2}}, x5
0xaa 0xcc 0x1e 0xd5
-# CHECK: msr icc_sre_el3, x10
+# CHECK: msr {{icc_sre_el3|ICC_SRE_EL3}}, x10
0xd6 0xcc 0x18 0xd5
-# CHECK: msr icc_igrpen0_el1, x22
+# CHECK: msr {{icc_igrpen0_el1|ICC_IGRPEN0_EL1}}, x22
0xeb 0xcc 0x18 0xd5
-# CHECK: msr icc_igrpen1_el1, x11
+# CHECK: msr {{icc_igrpen1_el1|ICC_IGRPEN1_EL1}}, x11
0xe8 0xcc 0x1e 0xd5
-# CHECK: msr icc_igrpen1_el3, x8
+# CHECK: msr {{icc_igrpen1_el3|ICC_IGRPEN1_EL3}}, x8
0x4 0xcd 0x18 0xd5
-# CHECK: msr icc_seien_el1, x4
+# CHECK: msr {{icc_seien_el1|ICC_SEIEN_EL1}}, x4
0x9b 0xc8 0x18 0xd5
-# CHECK: msr icc_ap0r0_el1, x27
+# CHECK: msr {{icc_ap0r0_el1|ICC_AP0R0_EL1}}, x27
0xa5 0xc8 0x18 0xd5
-# CHECK: msr icc_ap0r1_el1, x5
+# CHECK: msr {{icc_ap0r1_el1|ICC_AP0R1_EL1}}, x5
0xd4 0xc8 0x18 0xd5
-# CHECK: msr icc_ap0r2_el1, x20
+# CHECK: msr {{icc_ap0r2_el1|ICC_AP0R2_EL1}}, x20
0xe0 0xc8 0x18 0xd5
-# CHECK: msr icc_ap0r3_el1, x0
+# CHECK: msr {{icc_ap0r3_el1|ICC_AP0R3_EL1}}, x0
0x2 0xc9 0x18 0xd5
-# CHECK: msr icc_ap1r0_el1, x2
+# CHECK: msr {{icc_ap1r0_el1|ICC_AP1R0_EL1}}, x2
0x3d 0xc9 0x18 0xd5
-# CHECK: msr icc_ap1r1_el1, x29
+# CHECK: msr {{icc_ap1r1_el1|ICC_AP1R1_EL1}}, x29
0x57 0xc9 0x18 0xd5
-# CHECK: msr icc_ap1r2_el1, x23
+# CHECK: msr {{icc_ap1r2_el1|ICC_AP1R2_EL1}}, x23
0x6b 0xc9 0x18 0xd5
-# CHECK: msr icc_ap1r3_el1, x11
+# CHECK: msr {{icc_ap1r3_el1|ICC_AP1R3_EL1}}, x11
0x2 0xc8 0x1c 0xd5
-# CHECK: msr ich_ap0r0_el2, x2
+# CHECK: msr {{ich_ap0r0_el2|ICH_AP0R0_EL2}}, x2
0x3b 0xc8 0x1c 0xd5
-# CHECK: msr ich_ap0r1_el2, x27
+# CHECK: msr {{ich_ap0r1_el2|ICH_AP0R1_EL2}}, x27
0x47 0xc8 0x1c 0xd5
-# CHECK: msr ich_ap0r2_el2, x7
+# CHECK: msr {{ich_ap0r2_el2|ICH_AP0R2_EL2}}, x7
0x61 0xc8 0x1c 0xd5
-# CHECK: msr ich_ap0r3_el2, x1
+# CHECK: msr {{ich_ap0r3_el2|ICH_AP0R3_EL2}}, x1
0x7 0xc9 0x1c 0xd5
-# CHECK: msr ich_ap1r0_el2, x7
+# CHECK: msr {{ich_ap1r0_el2|ICH_AP1R0_EL2}}, x7
0x2c 0xc9 0x1c 0xd5
-# CHECK: msr ich_ap1r1_el2, x12
+# CHECK: msr {{ich_ap1r1_el2|ICH_AP1R1_EL2}}, x12
0x4e 0xc9 0x1c 0xd5
-# CHECK: msr ich_ap1r2_el2, x14
+# CHECK: msr {{ich_ap1r2_el2|ICH_AP1R2_EL2}}, x14
0x6d 0xc9 0x1c 0xd5
-# CHECK: msr ich_ap1r3_el2, x13
+# CHECK: msr {{ich_ap1r3_el2|ICH_AP1R3_EL2}}, x13
0x1 0xcb 0x1c 0xd5
-# CHECK: msr ich_hcr_el2, x1
+# CHECK: msr {{ich_hcr_el2|ICH_HCR_EL2}}, x1
0x4a 0xcb 0x1c 0xd5
-# CHECK: msr ich_misr_el2, x10
+# CHECK: msr {{ich_misr_el2|ICH_MISR_EL2}}, x10
0xf8 0xcb 0x1c 0xd5
-# CHECK: msr ich_vmcr_el2, x24
+# CHECK: msr {{ich_vmcr_el2|ICH_VMCR_EL2}}, x24
0x9d 0xc9 0x1c 0xd5
-# CHECK: msr ich_vseir_el2, x29
+# CHECK: msr {{ich_vseir_el2|ICH_VSEIR_EL2}}, x29
0x1a 0xcc 0x1c 0xd5
-# CHECK: msr ich_lr0_el2, x26
+# CHECK: msr {{ich_lr0_el2|ICH_LR0_EL2}}, x26
0x29 0xcc 0x1c 0xd5
-# CHECK: msr ich_lr1_el2, x9
+# CHECK: msr {{ich_lr1_el2|ICH_LR1_EL2}}, x9
0x52 0xcc 0x1c 0xd5
-# CHECK: msr ich_lr2_el2, x18
+# CHECK: msr {{ich_lr2_el2|ICH_LR2_EL2}}, x18
0x7a 0xcc 0x1c 0xd5
-# CHECK: msr ich_lr3_el2, x26
+# CHECK: msr {{ich_lr3_el2|ICH_LR3_EL2}}, x26
0x96 0xcc 0x1c 0xd5
-# CHECK: msr ich_lr4_el2, x22
+# CHECK: msr {{ich_lr4_el2|ICH_LR4_EL2}}, x22
0xba 0xcc 0x1c 0xd5
-# CHECK: msr ich_lr5_el2, x26
+# CHECK: msr {{ich_lr5_el2|ICH_LR5_EL2}}, x26
0xdb 0xcc 0x1c 0xd5
-# CHECK: msr ich_lr6_el2, x27
+# CHECK: msr {{ich_lr6_el2|ICH_LR6_EL2}}, x27
0xe8 0xcc 0x1c 0xd5
-# CHECK: msr ich_lr7_el2, x8
+# CHECK: msr {{ich_lr7_el2|ICH_LR7_EL2}}, x8
0x11 0xcd 0x1c 0xd5
-# CHECK: msr ich_lr8_el2, x17
+# CHECK: msr {{ich_lr8_el2|ICH_LR8_EL2}}, x17
0x33 0xcd 0x1c 0xd5
-# CHECK: msr ich_lr9_el2, x19
+# CHECK: msr {{ich_lr9_el2|ICH_LR9_EL2}}, x19
0x51 0xcd 0x1c 0xd5
-# CHECK: msr ich_lr10_el2, x17
+# CHECK: msr {{ich_lr10_el2|ICH_LR10_EL2}}, x17
0x65 0xcd 0x1c 0xd5
-# CHECK: msr ich_lr11_el2, x5
+# CHECK: msr {{ich_lr11_el2|ICH_LR11_EL2}}, x5
0x9d 0xcd 0x1c 0xd5
-# CHECK: msr ich_lr12_el2, x29
+# CHECK: msr {{ich_lr12_el2|ICH_LR12_EL2}}, x29
0xa2 0xcd 0x1c 0xd5
-# CHECK: msr ich_lr13_el2, x2
+# CHECK: msr {{ich_lr13_el2|ICH_LR13_EL2}}, x2
0xcd 0xcd 0x1c 0xd5
-# CHECK: msr ich_lr14_el2, x13
+# CHECK: msr {{ich_lr14_el2|ICH_LR14_EL2}}, x13
0xfb 0xcd 0x1c 0xd5
-# CHECK: msr ich_lr15_el2, x27
+# CHECK: msr {{ich_lr15_el2|ICH_LR15_EL2}}, x27
diff --git a/test/MC/Disassembler/AArch64/ldp-offset-predictable.txt b/test/MC/Disassembler/AArch64/ldp-offset-predictable.txt
index 7ff495f4996d..3c443a999a28 100644
--- a/test/MC/Disassembler/AArch64/ldp-offset-predictable.txt
+++ b/test/MC/Disassembler/AArch64/ldp-offset-predictable.txt
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple=aarch64 -disassemble < %s 2>&1 | FileCheck %s
+# RUN: llvm-mc -triple=arm64 -disassemble < %s 2>&1 | FileCheck %s
# Stores are OK.
0xe0 0x83 0x00 0xa9
diff --git a/test/MC/Disassembler/AArch64/ldp-postind.predictable.txt b/test/MC/Disassembler/AArch64/ldp-postind.predictable.txt
index 637ebdbdba8a..6ba33ad99376 100644
--- a/test/MC/Disassembler/AArch64/ldp-postind.predictable.txt
+++ b/test/MC/Disassembler/AArch64/ldp-postind.predictable.txt
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple=aarch64 -mattr=+fp-armv8 -disassemble < %s 2>&1 | FileCheck %s
+# RUN: llvm-mc -triple=arm64 -mattr=+fp-armv8 -disassemble < %s 2>&1 | FileCheck %s
# None of these instructions should be classified as unpredictable:
diff --git a/test/MC/Disassembler/AArch64/ldp-preind.predictable.txt b/test/MC/Disassembler/AArch64/ldp-preind.predictable.txt
index f52d37ffda9b..19153406f01d 100644
--- a/test/MC/Disassembler/AArch64/ldp-preind.predictable.txt
+++ b/test/MC/Disassembler/AArch64/ldp-preind.predictable.txt
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple=aarch64 -mattr=+fp-armv8 -disassemble < %s 2>&1 | FileCheck %s
+# RUN: llvm-mc -triple=arm64 -mattr=+fp-armv8 -disassemble < %s 2>&1 | FileCheck %s
# None of these instructions should be classified as unpredictable:
diff --git a/test/MC/Disassembler/AArch64/lit.local.cfg b/test/MC/Disassembler/AArch64/lit.local.cfg
index 9a66a00189ea..180bb8a77f11 100644
--- a/test/MC/Disassembler/AArch64/lit.local.cfg
+++ b/test/MC/Disassembler/AArch64/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'AArch64' in targets:
+if 'AArch64' not in config.root.targets:
config.unsupported = True
diff --git a/test/MC/Disassembler/AArch64/neon-instructions.txt b/test/MC/Disassembler/AArch64/neon-instructions.txt
index 863730ac6be8..35906684f9ca 100644
--- a/test/MC/Disassembler/AArch64/neon-instructions.txt
+++ b/test/MC/Disassembler/AArch64/neon-instructions.txt
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=+neon -disassemble < %s | FileCheck %s
+# RUN: llvm-mc -triple arm64-none-linux-gnu -mattr=+neon -disassemble < %s | FileCheck %s
#------------------------------------------------------------------------------
# Vector Integer Add/Sub
@@ -87,7 +88,7 @@
# Vector Bitwise OR - immedidate
#------------------------------------------------------------------------------
# CHECK: movi v31.4s, #0xff, lsl #24
-# CHECK: mvni v0.2s, #0x0
+# CHECK: mvni v0.2s, #{{0x0|0}}
# CHECK: bic v15.4h, #0xf, lsl #8
# CHECK: orr v16.8h, #0x1f
0xff 0x67 0x07 0x4f
@@ -132,10 +133,8 @@
# Vector Move - register
#------------------------------------------------------------------------------
-# FIXME: these should print as "mov", but TableGen can't handle it.
-
-# CHECK: orr v1.16b, v15.16b, v15.16b
-# CHECK: orr v25.8b, v4.8b, v4.8b
+# CHECK: mov v1.16b, v15.16b
+# CHECK: mov v25.8b, v4.8b
0xe1 0x1d 0xaf 0x4e
0x99 0x1c 0xa4 0x0e
@@ -246,31 +245,31 @@
#----------------------------------------------------------------------
# Vector Compare Mask Equal to Zero (Integer)
#----------------------------------------------------------------------
-# CHECK: cmeq v31.16b, v15.16b, #0x0
+# CHECK: cmeq v31.16b, v15.16b, #{{0x0|0}}
0xff 0x99 0x20 0x4e
#----------------------------------------------------------------------
# Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
#----------------------------------------------------------------------
-# CHECK: cmge v3.8b, v15.8b, #0x0
+# CHECK: cmge v3.8b, v15.8b, #{{0x0|0}}
0xe3 0x89 0x20 0x2e
#----------------------------------------------------------------------
# Vector Compare Mask Greater Than Zero (Signed Integer)
#----------------------------------------------------------------------
-# CHECK: cmgt v22.2s, v9.2s, #0x0
+# CHECK: cmgt v22.2s, v9.2s, #{{0x0|0}}
0x36 0x89 0xa0 0x0e
#----------------------------------------------------------------------
# Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
#----------------------------------------------------------------------
-# CHECK: cmle v5.2d, v14.2d, #0x0
+# CHECK: cmle v5.2d, v14.2d, #{{0x0|0}}
0xc5 0x99 0xe0 0x6e
#----------------------------------------------------------------------
# Vector Compare Mask Less Than Zero (Signed Integer)
#----------------------------------------------------------------------
-# CHECK: cmlt v13.8h, v11.8h, #0x0
+# CHECK: cmlt v13.8h, v11.8h, #{{0x0|0}}
0x6d 0xa9 0x60 0x4e
#----------------------------------------------------------------------
@@ -1559,7 +1558,7 @@
#----------------------------------------------------------------------
# Scalar Compare Bitwise Equal To Zero
#----------------------------------------------------------------------
-# CHECK: cmeq d20, d21, #0x0
+# CHECK: cmeq d20, d21, #{{0x0|0}}
0xb4,0x9a,0xe0,0x5e
#----------------------------------------------------------------------
@@ -1578,7 +1577,7 @@
#----------------------------------------------------------------------
# Scalar Compare Signed Greather Than Or Equal To Zero
#----------------------------------------------------------------------
-# CHECK: cmge d20, d21, #0x0
+# CHECK: cmge d20, d21, #{{0x0|0}}
0xb4,0x8a,0xe0,0x7e
#----------------------------------------------------------------------
@@ -1596,19 +1595,19 @@
#----------------------------------------------------------------------
# Scalar Compare Signed Greater Than Zero
#----------------------------------------------------------------------
-# CHECK: cmgt d20, d21, #0x0
+# CHECK: cmgt d20, d21, #{{0x0|0}}
0xb4,0x8a,0xe0,0x5e
#----------------------------------------------------------------------
# Scalar Compare Signed Less Than Or Equal To Zero
#----------------------------------------------------------------------
-# CHECK: cmle d20, d21, #0x0
+# CHECK: cmle d20, d21, #{{0x0|0}}
0xb4,0x9a,0xe0,0x7e
#----------------------------------------------------------------------
# Scalar Compare Less Than Zero
#----------------------------------------------------------------------
-# CHECK: cmlt d20, d21, #0x0
+# CHECK: cmlt d20, d21, #{{0x0|0}}
0xb4,0xaa,0xe0,0x5e
#----------------------------------------------------------------------
@@ -2008,34 +2007,34 @@
#----------------------------------------------------------------------
# Vector load/store multiple N-element structure
#----------------------------------------------------------------------
-# CHECK: ld1 {v0.16b}, [x0]
-# CHECK: ld1 {v15.8h, v16.8h}, [x15]
-# CHECK: ld1 {v31.4s, v0.4s, v1.4s}, [sp]
-# CHECK: ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
+# CHECK: ld1 { v0.16b }, [x0]
+# CHECK: ld1 { v15.8h, v16.8h }, [x15]
+# CHECK: ld1 { v31.4s, v0.4s, v1.4s }, [sp]
+# CHECK: ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
0x00,0x70,0x40,0x4c
0xef,0xa5,0x40,0x4c
0xff,0x6b,0x40,0x4c
0x00,0x2c,0x40,0x4c
-# CHECK: ld2 {v0.8b, v1.8b}, [x0]
-# CHECK: ld3 {v15.4h, v16.4h, v17.4h}, [x15]
-# CHECK: ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
+# CHECK: ld2 { v0.8b, v1.8b }, [x0]
+# CHECK: ld3 { v15.4h, v16.4h, v17.4h }, [x15]
+# CHECK: ld4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp]
0x00,0x80,0x40,0x0c
0xef,0x45,0x40,0x0c
0xff,0x0b,0x40,0x0c
-# CHECK: st1 {v0.16b}, [x0]
-# CHECK: st1 {v15.8h, v16.8h}, [x15]
-# CHECK: st1 {v31.4s, v0.4s, v1.4s}, [sp]
-# CHECK: st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
+# CHECK: st1 { v0.16b }, [x0]
+# CHECK: st1 { v15.8h, v16.8h }, [x15]
+# CHECK: st1 { v31.4s, v0.4s, v1.4s }, [sp]
+# CHECK: st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
0x00,0x70,0x00,0x4c
0xef,0xa5,0x00,0x4c
0xff,0x6b,0x00,0x4c
0x00,0x2c,0x00,0x4c
-# CHECK: st2 {v0.8b, v1.8b}, [x0]
-# CHECK: st3 {v15.4h, v16.4h, v17.4h}, [x15]
-# CHECK: st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
+# CHECK: st2 { v0.8b, v1.8b }, [x0]
+# CHECK: st3 { v15.4h, v16.4h, v17.4h }, [x15]
+# CHECK: st4 { v31.2s, v0.2s, v1.2s, v2.2s }, [sp]
0x00,0x80,0x00,0x0c
0xef,0x45,0x00,0x0c
0xff,0x0b,0x00,0x0c
@@ -2043,35 +2042,35 @@
#----------------------------------------------------------------------
# Vector load/store multiple N-element structure (post-index)
#----------------------------------------------------------------------
-# CHECK: ld1 {v15.8h}, [x15], x2
-# CHECK: ld1 {v31.4s, v0.4s}, [sp], #32
-# CHECK: ld1 {v0.2d, v1.2d, v2.2d}, [x0], #48
-# CHECK: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
+# CHECK: ld1 { v15.8h }, [x15], x2
+# CHECK: ld1 { v31.4s, v0.4s }, [sp], #32
+# CHECK: ld1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+# CHECK: ld1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
0xef,0x75,0xc2,0x4c
0xff,0xab,0xdf,0x4c
0x00,0x6c,0xdf,0x4c
0x00,0x20,0xc3,0x0c
-# CHECK: ld2 {v0.16b, v1.16b}, [x0], x1
-# CHECK: ld3 {v15.8h, v16.8h, v17.8h}, [x15], x2
-# CHECK: ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
+# CHECK: ld2 { v0.16b, v1.16b }, [x0], x1
+# CHECK: ld3 { v15.8h, v16.8h, v17.8h }, [x15], x2
+# CHECK: ld4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
0x00,0x80,0xc1,0x4c
0xef,0x45,0xc2,0x4c
0xff,0x0b,0xdf,0x4c
-# CHECK: st1 {v15.8h}, [x15], x2
-# CHECK: st1 {v31.4s, v0.4s}, [sp], #32
-# CHECK: st1 {v0.2d, v1.2d, v2.2d}, [x0], #48
-# CHECK: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
+# CHECK: st1 { v15.8h }, [x15], x2
+# CHECK: st1 { v31.4s, v0.4s }, [sp], #32
+# CHECK: st1 { v0.2d, v1.2d, v2.2d }, [x0], #48
+# CHECK: st1 { v0.8b, v1.8b, v2.8b, v3.8b }, [x0], x3
0xef,0x75,0x82,0x4c
0xff,0xab,0x9f,0x4c
0x00,0x6c,0x9f,0x4c
0x00,0x20,0x83,0x0c
-# CHECK: st2 {v0.16b, v1.16b}, [x0], x1
-# CHECK: st3 {v15.8h, v16.8h, v17.8h}, [x15], x2
-# CHECK: st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
+# CHECK: st2 { v0.16b, v1.16b }, [x0], x1
+# CHECK: st3 { v15.8h, v16.8h, v17.8h }, [x15], x2
+# CHECK: st4 { v31.4s, v0.4s, v1.4s, v2.4s }, [sp], #64
0x00,0x80,0x81,0x4c
0xef,0x45,0x82,0x4c
0xff,0x0b,0x9f,0x4c
@@ -2080,14 +2079,14 @@
# Vector load single N-element structure to all lane of N
# consecutive registers (N = 1,2,3,4)
#----------------------------------------------------------------------
-# CHECK: ld1r {v0.16b}, [x0]
-# CHECK: ld1r {v15.8h}, [x15]
-# CHECK: ld2r {v31.4s, v0.4s}, [sp]
-# CHECK: ld2r {v0.2d, v1.2d}, [x0]
-# CHECK: ld3r {v0.8b, v1.8b, v2.8b}, [x0]
-# CHECK: ld3r {v15.4h, v16.4h, v17.4h}, [x15]
-# CHECK: ld4r {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
-# CHECK: ld4r {v31.1d, v0.1d, v1.1d, v2.1d}, [sp]
+# CHECK: ld1r { v0.16b }, [x0]
+# CHECK: ld1r { v15.8h }, [x15]
+# CHECK: ld2r { v31.4s, v0.4s }, [sp]
+# CHECK: ld2r { v0.2d, v1.2d }, [x0]
+# CHECK: ld3r { v0.8b, v1.8b, v2.8b }, [x0]
+# CHECK: ld3r { v15.4h, v16.4h, v17.4h }, [x15]
+# CHECK: ld4r { v31.2s, v0.2s, v1.2s, v2.2s }, [sp]
+# CHECK: ld4r { v31.1d, v0.1d, v1.1d, v2.1d }, [sp]
0x00,0xc0,0x40,0x4d
0xef,0xc5,0x40,0x4d
0xff,0xcb,0x60,0x4d
@@ -2101,14 +2100,14 @@
# Vector load/store single N-element structure to/from one lane of N
# consecutive registers (N = 1,2,3,4)
#----------------------------------------------------------------------
-# CHECK: ld1 {v0.b}[9], [x0]
-# CHECK: ld2 {v15.h, v16.h}[7], [x15]
-# CHECK: ld3 {v31.s, v0.s, v1.s}[3], [sp]
-# CHECK: ld4 {v0.d, v1.d, v2.d, v3.d}[1], [x0]
-# CHECK: st1 {v0.d}[1], [x0]
-# CHECK: st2 {v31.s, v0.s}[3], [sp]
-# CHECK: st3 {v15.h, v16.h, v17.h}[7], [x15]
-# CHECK: st4 {v0.b, v1.b, v2.b, v3.b}[9], [x0]
+# CHECK: ld1 { v0.b }[9], [x0]
+# CHECK: ld2 { v15.h, v16.h }[7], [x15]
+# CHECK: ld3 { v31.s, v0.s, v1.s }[3], [sp]
+# CHECK: ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0]
+# CHECK: st1 { v0.d }[1], [x0]
+# CHECK: st2 { v31.s, v0.s }[3], [sp]
+# CHECK: st3 { v15.h, v16.h, v17.h }[7], [x15]
+# CHECK: st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0]
0x00,0x04,0x40,0x4d
0xef,0x59,0x60,0x4d
0xff,0xb3,0x40,0x4d
@@ -2122,14 +2121,14 @@
# Post-index of vector load single N-element structure to all lane of N
# consecutive registers (N = 1,2,3,4)
#----------------------------------------------------------------------
-# CHECK: ld1r {v0.16b}, [x0], #1
-# CHECK: ld1r {v15.8h}, [x15], #2
-# CHECK: ld2r {v31.4s, v0.4s}, [sp], #8
-# CHECK: ld2r {v0.2d, v1.2d}, [x0], #16
-# CHECK: ld3r {v0.8b, v1.8b, v2.8b}, [x0], #3
-# CHECK: ld3r {v15.4h, v16.4h, v17.4h}, [x15], #6
-# CHECK: ld4r {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], x30
-# CHECK: ld4r {v31.1d, v0.1d, v1.1d, v2.1d}, [sp], x7
+# CHECK: ld1r { v0.16b }, [x0], #1
+# CHECK: ld1r { v15.8h }, [x15], #2
+# CHECK: ld2r { v31.4s, v0.4s }, [sp], #8
+# CHECK: ld2r { v0.2d, v1.2d }, [x0], #16
+# CHECK: ld3r { v0.8b, v1.8b, v2.8b }, [x0], #3
+# CHECK: ld3r { v15.4h, v16.4h, v17.4h }, [x15], #6
+# CHECK: ld4r { v31.2s, v0.2s, v1.2s, v2.2s }, [sp], x30
+# CHECK: ld4r { v31.1d, v0.1d, v1.1d, v2.1d }, [sp], x7
0x00,0xc0,0xdf,0x4d
0xef,0xc5,0xdf,0x4d
0xff,0xcb,0xff,0x4d
@@ -2143,15 +2142,15 @@
# Post-index of vector load/store single N-element structure to/from
# one lane of N consecutive registers (N = 1,2,3,4)
#----------------------------------------------------------------------
-# CHECK: ld1 {v0.b}[9], [x0], #1
-# CHECK: ld2 {v15.h, v16.h}[7], [x15], #4
-# CHECK: ld3 {v31.s, v0.s, v1.s}[3], [sp], x3
-# CHECK: ld4 {v0.d, v1.d, v2.d, v3.d}[1], [x0], #32
-# CHECK: ld4 {v0.h, v1.h, v2.h, v3.h}[7], [x0], x0
-# CHECK: st1 {v0.d}[1], [x0], #8
-# CHECK: st2 {v31.s, v0.s}[3], [sp], #8
-# CHECK: st3 {v15.h, v16.h, v17.h}[7], [x15], #6
-# CHECK: st4 {v0.b, v1.b, v2.b, v3.b}[9], [x0], x5
+# CHECK: ld1 { v0.b }[9], [x0], #1
+# CHECK: ld2 { v15.h, v16.h }[7], [x15], #4
+# CHECK: ld3 { v31.s, v0.s, v1.s }[3], [sp], x3
+# CHECK: ld4 { v0.d, v1.d, v2.d, v3.d }[1], [x0], #32
+# CHECK: ld4 { v0.h, v1.h, v2.h, v3.h }[7], [x0], x0
+# CHECK: st1 { v0.d }[1], [x0], #8
+# CHECK: st2 { v31.s, v0.s }[3], [sp], #8
+# CHECK: st3 { v15.h, v16.h, v17.h }[7], [x15], #6
+# CHECK: st4 { v0.b, v1.b, v2.b, v3.b }[9], [x0], x5
0x00,0x04,0xdf,0x4d
0xef,0x59,0xff,0x4d
0xff,0xb3,0xc3,0x4d
@@ -2167,8 +2166,8 @@
#----------------------------------------------------------------------
0x20,0x18,0x02,0x2e
0x20,0x18,0x02,0x6e
-# CHECK: ext v0.8b, v1.8b, v2.8b, #0x3
-# CHECK: ext v0.16b, v1.16b, v2.16b, #0x3
+# CHECK: ext v0.8b, v1.8b, v2.8b, #{{0x3|3}}
+# CHECK: ext v0.16b, v1.16b, v2.16b, #{{0x3|3}}
#----------------------------------------------------------------------
# unzip with 3 same vectors to get primary result
@@ -2481,10 +2480,10 @@
#----------------------------------------------------------------------
#Duplicate element (scalar)
#----------------------------------------------------------------------
-# CHECK: dup b0, v0.b[15]
-# CHECK: dup h2, v31.h[5]
-# CHECK: dup s17, v2.s[2]
-# CHECK: dup d6, v12.d[1]
+# CHECK: {{dup|mov}} b0, v0.b[15]
+# CHECK: {{dup|mov}} h2, v31.h[5]
+# CHECK: {{dup|mov}} s17, v2.s[2]
+# CHECK: {{dup|mov}} d6, v12.d[1]
0x00 0x04 0x1f 0x5e
0xe2 0x07 0x16 0x5e
0x51 0x04 0x14 0x5e
@@ -2497,37 +2496,37 @@
0xf0,0x23,0x02,0x0e
0x20,0x40,0x02,0x0e
0xf0,0x62,0x02,0x0e
-# CHECK: tbl v0.8b, {v1.16b}, v2.8b
-# CHECK: tbl v16.8b, {v31.16b, v0.16b}, v2.8b
-# CHECK: tbl v0.8b, {v1.16b, v2.16b, v3.16b}, v2.8b
-# CHECK: tbl v16.8b, {v23.16b, v24.16b, v25.16b, v26.16b}, v2.8b
+# CHECK: tbl v0.8b, { v1.16b }, v2.8b
+# CHECK: tbl v16.8b, { v31.16b, v0.16b }, v2.8b
+# CHECK: tbl v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b
+# CHECK: tbl v16.8b, { v23.16b, v24.16b, v25.16b, v26.16b }, v2.8b
0x20,0x00,0x02,0x4e
0xf0,0x23,0x02,0x4e
0x20,0x40,0x02,0x4e
0xe0,0x63,0x02,0x4e
-# CHECK: tbl v0.16b, {v1.16b}, v2.16b
-# CHECK: tbl v16.16b, {v31.16b, v0.16b}, v2.16b
-# CHECK: tbl v0.16b, {v1.16b, v2.16b, v3.16b}, v2.16b
-# CHECK: tbl v0.16b, {v31.16b, v0.16b, v1.16b, v2.16b}, v2.16b
+# CHECK: tbl v0.16b, { v1.16b }, v2.16b
+# CHECK: tbl v16.16b, { v31.16b, v0.16b }, v2.16b
+# CHECK: tbl v0.16b, { v1.16b, v2.16b, v3.16b }, v2.16b
+# CHECK: tbl v0.16b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.16b
0x20,0x10,0x02,0x0e
0xf0,0x33,0x02,0x0e
0x20,0x50,0x02,0x0e
0xf0,0x72,0x02,0x0e
-# CHECK: tbx v0.8b, {v1.16b}, v2.8b
-# CHECK: tbx v16.8b, {v31.16b, v0.16b}, v2.8b
-# CHECK: tbx v0.8b, {v1.16b, v2.16b, v3.16b}, v2.8b
-# CHECK: tbx v16.8b, {v23.16b, v24.16b, v25.16b, v26.16b}, v2.8b
+# CHECK: tbx v0.8b, { v1.16b }, v2.8b
+# CHECK: tbx v16.8b, { v31.16b, v0.16b }, v2.8b
+# CHECK: tbx v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b
+# CHECK: tbx v16.8b, { v23.16b, v24.16b, v25.16b, v26.16b }, v2.8b
0x20,0x10,0x02,0x4e
0xf0,0x33,0x02,0x4e
0x20,0x50,0x02,0x4e
0xf0,0x73,0x02,0x4e
-# CHECK: tbx v0.16b, {v1.16b}, v2.16b
-# CHECK: tbx v16.16b, {v31.16b, v0.16b}, v2.16b
-# CHECK: tbx v0.16b, {v1.16b, v2.16b, v3.16b}, v2.16b
-# CHECK: tbx v16.16b, {v31.16b, v0.16b, v1.16b, v2.16b}, v2.16b
+# CHECK: tbx v0.16b, { v1.16b }, v2.16b
+# CHECK: tbx v16.16b, { v31.16b, v0.16b }, v2.16b
+# CHECK: tbx v0.16b, { v1.16b, v2.16b, v3.16b }, v2.16b
+# CHECK: tbx v16.16b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.16b
#----------------------------------------------------------------------
# Scalar Floating-point Convert To Lower Precision Narrow, Rounding To
diff --git a/test/MC/Disassembler/AArch64/trace-regs.txt b/test/MC/Disassembler/AArch64/trace-regs.txt
index 10c5937f5dea..43171e3d2f66 100644
--- a/test/MC/Disassembler/AArch64/trace-regs.txt
+++ b/test/MC/Disassembler/AArch64/trace-regs.txt
@@ -1,736 +1,737 @@
# RUN: llvm-mc -triple aarch64-none-linux-gnu -disassemble < %s | FileCheck %s
+# RUN: llvm-mc -triple arm64-none-linux-gnu -disassemble < %s | FileCheck %s
0x8 0x3 0x31 0xd5
-# CHECK: mrs x8, trcstatr
+# CHECK: mrs x8, {{trcstatr|TRCSTATR}}
0xc9 0x0 0x31 0xd5
-# CHECK: mrs x9, trcidr8
+# CHECK: mrs x9, {{trcidr8|TRCIDR8}}
0xcb 0x1 0x31 0xd5
-# CHECK: mrs x11, trcidr9
+# CHECK: mrs x11, {{trcidr9|TRCIDR9}}
0xd9 0x2 0x31 0xd5
-# CHECK: mrs x25, trcidr10
+# CHECK: mrs x25, {{trcidr10|TRCIDR10}}
0xc7 0x3 0x31 0xd5
-# CHECK: mrs x7, trcidr11
+# CHECK: mrs x7, {{trcidr11|TRCIDR11}}
0xc7 0x4 0x31 0xd5
-# CHECK: mrs x7, trcidr12
+# CHECK: mrs x7, {{trcidr12|TRCIDR12}}
0xc6 0x5 0x31 0xd5
-# CHECK: mrs x6, trcidr13
+# CHECK: mrs x6, {{trcidr13|TRCIDR13}}
0xfb 0x8 0x31 0xd5
-# CHECK: mrs x27, trcidr0
+# CHECK: mrs x27, {{trcidr0|TRCIDR0}}
0xfd 0x9 0x31 0xd5
-# CHECK: mrs x29, trcidr1
+# CHECK: mrs x29, {{trcidr1|TRCIDR1}}
0xe4 0xa 0x31 0xd5
-# CHECK: mrs x4, trcidr2
+# CHECK: mrs x4, {{trcidr2|TRCIDR2}}
0xe8 0xb 0x31 0xd5
-# CHECK: mrs x8, trcidr3
+# CHECK: mrs x8, {{trcidr3|TRCIDR3}}
0xef 0xc 0x31 0xd5
-# CHECK: mrs x15, trcidr4
+# CHECK: mrs x15, {{trcidr4|TRCIDR4}}
0xf4 0xd 0x31 0xd5
-# CHECK: mrs x20, trcidr5
+# CHECK: mrs x20, {{trcidr5|TRCIDR5}}
0xe6 0xe 0x31 0xd5
-# CHECK: mrs x6, trcidr6
+# CHECK: mrs x6, {{trcidr6|TRCIDR6}}
0xe6 0xf 0x31 0xd5
-# CHECK: mrs x6, trcidr7
+# CHECK: mrs x6, {{trcidr7|TRCIDR7}}
0x98 0x11 0x31 0xd5
-# CHECK: mrs x24, trcoslsr
+# CHECK: mrs x24, {{trcoslsr|TRCOSLSR}}
0x92 0x15 0x31 0xd5
-# CHECK: mrs x18, trcpdsr
+# CHECK: mrs x18, {{trcpdsr|TRCPDSR}}
0xdc 0x7a 0x31 0xd5
-# CHECK: mrs x28, trcdevaff0
+# CHECK: mrs x28, {{trcdevaff0|TRCDEVAFF0}}
0xc5 0x7b 0x31 0xd5
-# CHECK: mrs x5, trcdevaff1
+# CHECK: mrs x5, {{trcdevaff1|TRCDEVAFF1}}
0xc5 0x7d 0x31 0xd5
-# CHECK: mrs x5, trclsr
+# CHECK: mrs x5, {{trclsr|TRCLSR}}
0xcb 0x7e 0x31 0xd5
-# CHECK: mrs x11, trcauthstatus
+# CHECK: mrs x11, {{trcauthstatus|TRCAUTHSTATUS}}
0xcd 0x7f 0x31 0xd5
-# CHECK: mrs x13, trcdevarch
+# CHECK: mrs x13, {{trcdevarch|TRCDEVARCH}}
0xf2 0x72 0x31 0xd5
-# CHECK: mrs x18, trcdevid
+# CHECK: mrs x18, {{trcdevid|TRCDEVID}}
0xf6 0x73 0x31 0xd5
-# CHECK: mrs x22, trcdevtype
+# CHECK: mrs x22, {{trcdevtype|TRCDEVTYPE}}
0xee 0x74 0x31 0xd5
-# CHECK: mrs x14, trcpidr4
+# CHECK: mrs x14, {{trcpidr4|TRCPIDR4}}
0xe5 0x75 0x31 0xd5
-# CHECK: mrs x5, trcpidr5
+# CHECK: mrs x5, {{trcpidr5|TRCPIDR5}}
0xe5 0x76 0x31 0xd5
-# CHECK: mrs x5, trcpidr6
+# CHECK: mrs x5, {{trcpidr6|TRCPIDR6}}
0xe9 0x77 0x31 0xd5
-# CHECK: mrs x9, trcpidr7
+# CHECK: mrs x9, {{trcpidr7|TRCPIDR7}}
0xef 0x78 0x31 0xd5
-# CHECK: mrs x15, trcpidr0
+# CHECK: mrs x15, {{trcpidr0|TRCPIDR0}}
0xe6 0x79 0x31 0xd5
-# CHECK: mrs x6, trcpidr1
+# CHECK: mrs x6, {{trcpidr1|TRCPIDR1}}
0xeb 0x7a 0x31 0xd5
-# CHECK: mrs x11, trcpidr2
+# CHECK: mrs x11, {{trcpidr2|TRCPIDR2}}
0xf4 0x7b 0x31 0xd5
-# CHECK: mrs x20, trcpidr3
+# CHECK: mrs x20, {{trcpidr3|TRCPIDR3}}
0xf1 0x7c 0x31 0xd5
-# CHECK: mrs x17, trccidr0
+# CHECK: mrs x17, {{trccidr0|TRCCIDR0}}
0xe2 0x7d 0x31 0xd5
-# CHECK: mrs x2, trccidr1
+# CHECK: mrs x2, {{trccidr1|TRCCIDR1}}
0xf4 0x7e 0x31 0xd5
-# CHECK: mrs x20, trccidr2
+# CHECK: mrs x20, {{trccidr2|TRCCIDR2}}
0xe4 0x7f 0x31 0xd5
-# CHECK: mrs x4, trccidr3
+# CHECK: mrs x4, {{trccidr3|TRCCIDR3}}
0xb 0x1 0x31 0xd5
-# CHECK: mrs x11, trcprgctlr
+# CHECK: mrs x11, {{trcprgctlr|TRCPRGCTLR}}
0x17 0x2 0x31 0xd5
-# CHECK: mrs x23, trcprocselr
+# CHECK: mrs x23, {{trcprocselr|TRCPROCSELR}}
0xd 0x4 0x31 0xd5
-# CHECK: mrs x13, trcconfigr
+# CHECK: mrs x13, {{trcconfigr|TRCCONFIGR}}
0x17 0x6 0x31 0xd5
-# CHECK: mrs x23, trcauxctlr
+# CHECK: mrs x23, {{trcauxctlr|TRCAUXCTLR}}
0x9 0x8 0x31 0xd5
-# CHECK: mrs x9, trceventctl0r
+# CHECK: mrs x9, {{trceventctl0r|TRCEVENTCTL0R}}
0x10 0x9 0x31 0xd5
-# CHECK: mrs x16, trceventctl1r
+# CHECK: mrs x16, {{trceventctl1r|TRCEVENTCTL1R}}
0x4 0xb 0x31 0xd5
-# CHECK: mrs x4, trcstallctlr
+# CHECK: mrs x4, {{trcstallctlr|TRCSTALLCTLR}}
0xe 0xc 0x31 0xd5
-# CHECK: mrs x14, trctsctlr
+# CHECK: mrs x14, {{trctsctlr|TRCTSCTLR}}
0x18 0xd 0x31 0xd5
-# CHECK: mrs x24, trcsyncpr
+# CHECK: mrs x24, {{trcsyncpr|TRCSYNCPR}}
0x1c 0xe 0x31 0xd5
-# CHECK: mrs x28, trcccctlr
+# CHECK: mrs x28, {{trcccctlr|TRCCCCTLR}}
0xf 0xf 0x31 0xd5
-# CHECK: mrs x15, trcbbctlr
+# CHECK: mrs x15, {{trcbbctlr|TRCBBCTLR}}
0x21 0x0 0x31 0xd5
-# CHECK: mrs x1, trctraceidr
+# CHECK: mrs x1, {{trctraceidr|TRCTRACEIDR}}
0x34 0x1 0x31 0xd5
-# CHECK: mrs x20, trcqctlr
+# CHECK: mrs x20, {{trcqctlr|TRCQCTLR}}
0x42 0x0 0x31 0xd5
-# CHECK: mrs x2, trcvictlr
+# CHECK: mrs x2, {{trcvictlr|TRCVICTLR}}
0x4c 0x1 0x31 0xd5
-# CHECK: mrs x12, trcviiectlr
+# CHECK: mrs x12, {{trcviiectlr|TRCVIIECTLR}}
0x50 0x2 0x31 0xd5
-# CHECK: mrs x16, trcvissctlr
+# CHECK: mrs x16, {{trcvissctlr|TRCVISSCTLR}}
0x48 0x3 0x31 0xd5
-# CHECK: mrs x8, trcvipcssctlr
+# CHECK: mrs x8, {{trcvipcssctlr|TRCVIPCSSCTLR}}
0x5b 0x8 0x31 0xd5
-# CHECK: mrs x27, trcvdctlr
+# CHECK: mrs x27, {{trcvdctlr|TRCVDCTLR}}
0x49 0x9 0x31 0xd5
-# CHECK: mrs x9, trcvdsacctlr
+# CHECK: mrs x9, {{trcvdsacctlr|TRCVDSACCTLR}}
0x40 0xa 0x31 0xd5
-# CHECK: mrs x0, trcvdarcctlr
+# CHECK: mrs x0, {{trcvdarcctlr|TRCVDARCCTLR}}
0x8d 0x0 0x31 0xd5
-# CHECK: mrs x13, trcseqevr0
+# CHECK: mrs x13, {{trcseqevr0|TRCSEQEVR0}}
0x8b 0x1 0x31 0xd5
-# CHECK: mrs x11, trcseqevr1
+# CHECK: mrs x11, {{trcseqevr1|TRCSEQEVR1}}
0x9a 0x2 0x31 0xd5
-# CHECK: mrs x26, trcseqevr2
+# CHECK: mrs x26, {{trcseqevr2|TRCSEQEVR2}}
0x8e 0x6 0x31 0xd5
-# CHECK: mrs x14, trcseqrstevr
+# CHECK: mrs x14, {{trcseqrstevr|TRCSEQRSTEVR}}
0x84 0x7 0x31 0xd5
-# CHECK: mrs x4, trcseqstr
+# CHECK: mrs x4, {{trcseqstr|TRCSEQSTR}}
0x91 0x8 0x31 0xd5
-# CHECK: mrs x17, trcextinselr
+# CHECK: mrs x17, {{trcextinselr|TRCEXTINSELR}}
0xb5 0x0 0x31 0xd5
-# CHECK: mrs x21, trccntrldvr0
+# CHECK: mrs x21, {{trccntrldvr0|TRCCNTRLDVR0}}
0xaa 0x1 0x31 0xd5
-# CHECK: mrs x10, trccntrldvr1
+# CHECK: mrs x10, {{trccntrldvr1|TRCCNTRLDVR1}}
0xb4 0x2 0x31 0xd5
-# CHECK: mrs x20, trccntrldvr2
+# CHECK: mrs x20, {{trccntrldvr2|TRCCNTRLDVR2}}
0xa5 0x3 0x31 0xd5
-# CHECK: mrs x5, trccntrldvr3
+# CHECK: mrs x5, {{trccntrldvr3|TRCCNTRLDVR3}}
0xb1 0x4 0x31 0xd5
-# CHECK: mrs x17, trccntctlr0
+# CHECK: mrs x17, {{trccntctlr0|TRCCNTCTLR0}}
0xa1 0x5 0x31 0xd5
-# CHECK: mrs x1, trccntctlr1
+# CHECK: mrs x1, {{trccntctlr1|TRCCNTCTLR1}}
0xb1 0x6 0x31 0xd5
-# CHECK: mrs x17, trccntctlr2
+# CHECK: mrs x17, {{trccntctlr2|TRCCNTCTLR2}}
0xa6 0x7 0x31 0xd5
-# CHECK: mrs x6, trccntctlr3
+# CHECK: mrs x6, {{trccntctlr3|TRCCNTCTLR3}}
0xbc 0x8 0x31 0xd5
-# CHECK: mrs x28, trccntvr0
+# CHECK: mrs x28, {{trccntvr0|TRCCNTVR0}}
0xb7 0x9 0x31 0xd5
-# CHECK: mrs x23, trccntvr1
+# CHECK: mrs x23, {{trccntvr1|TRCCNTVR1}}
0xa9 0xa 0x31 0xd5
-# CHECK: mrs x9, trccntvr2
+# CHECK: mrs x9, {{trccntvr2|TRCCNTVR2}}
0xa6 0xb 0x31 0xd5
-# CHECK: mrs x6, trccntvr3
+# CHECK: mrs x6, {{trccntvr3|TRCCNTVR3}}
0xf8 0x0 0x31 0xd5
-# CHECK: mrs x24, trcimspec0
+# CHECK: mrs x24, {{trcimspec0|TRCIMSPEC0}}
0xf8 0x1 0x31 0xd5
-# CHECK: mrs x24, trcimspec1
+# CHECK: mrs x24, {{trcimspec1|TRCIMSPEC1}}
0xef 0x2 0x31 0xd5
-# CHECK: mrs x15, trcimspec2
+# CHECK: mrs x15, {{trcimspec2|TRCIMSPEC2}}
0xea 0x3 0x31 0xd5
-# CHECK: mrs x10, trcimspec3
+# CHECK: mrs x10, {{trcimspec3|TRCIMSPEC3}}
0xfd 0x4 0x31 0xd5
-# CHECK: mrs x29, trcimspec4
+# CHECK: mrs x29, {{trcimspec4|TRCIMSPEC4}}
0xf2 0x5 0x31 0xd5
-# CHECK: mrs x18, trcimspec5
+# CHECK: mrs x18, {{trcimspec5|TRCIMSPEC5}}
0xfd 0x6 0x31 0xd5
-# CHECK: mrs x29, trcimspec6
+# CHECK: mrs x29, {{trcimspec6|TRCIMSPEC6}}
0xe2 0x7 0x31 0xd5
-# CHECK: mrs x2, trcimspec7
+# CHECK: mrs x2, {{trcimspec7|TRCIMSPEC7}}
0x8 0x12 0x31 0xd5
-# CHECK: mrs x8, trcrsctlr2
+# CHECK: mrs x8, {{trcrsctlr2|TRCRSCTLR2}}
0x0 0x13 0x31 0xd5
-# CHECK: mrs x0, trcrsctlr3
+# CHECK: mrs x0, {{trcrsctlr3|TRCRSCTLR3}}
0xc 0x14 0x31 0xd5
-# CHECK: mrs x12, trcrsctlr4
+# CHECK: mrs x12, {{trcrsctlr4|TRCRSCTLR4}}
0x1a 0x15 0x31 0xd5
-# CHECK: mrs x26, trcrsctlr5
+# CHECK: mrs x26, {{trcrsctlr5|TRCRSCTLR5}}
0x1d 0x16 0x31 0xd5
-# CHECK: mrs x29, trcrsctlr6
+# CHECK: mrs x29, {{trcrsctlr6|TRCRSCTLR6}}
0x11 0x17 0x31 0xd5
-# CHECK: mrs x17, trcrsctlr7
+# CHECK: mrs x17, {{trcrsctlr7|TRCRSCTLR7}}
0x0 0x18 0x31 0xd5
-# CHECK: mrs x0, trcrsctlr8
+# CHECK: mrs x0, {{trcrsctlr8|TRCRSCTLR8}}
0x1 0x19 0x31 0xd5
-# CHECK: mrs x1, trcrsctlr9
+# CHECK: mrs x1, {{trcrsctlr9|TRCRSCTLR9}}
0x11 0x1a 0x31 0xd5
-# CHECK: mrs x17, trcrsctlr10
+# CHECK: mrs x17, {{trcrsctlr10|TRCRSCTLR10}}
0x15 0x1b 0x31 0xd5
-# CHECK: mrs x21, trcrsctlr11
+# CHECK: mrs x21, {{trcrsctlr11|TRCRSCTLR11}}
0x1 0x1c 0x31 0xd5
-# CHECK: mrs x1, trcrsctlr12
+# CHECK: mrs x1, {{trcrsctlr12|TRCRSCTLR12}}
0x8 0x1d 0x31 0xd5
-# CHECK: mrs x8, trcrsctlr13
+# CHECK: mrs x8, {{trcrsctlr13|TRCRSCTLR13}}
0x18 0x1e 0x31 0xd5
-# CHECK: mrs x24, trcrsctlr14
+# CHECK: mrs x24, {{trcrsctlr14|TRCRSCTLR14}}
0x0 0x1f 0x31 0xd5
-# CHECK: mrs x0, trcrsctlr15
+# CHECK: mrs x0, {{trcrsctlr15|TRCRSCTLR15}}
0x22 0x10 0x31 0xd5
-# CHECK: mrs x2, trcrsctlr16
+# CHECK: mrs x2, {{trcrsctlr16|TRCRSCTLR16}}
0x3d 0x11 0x31 0xd5
-# CHECK: mrs x29, trcrsctlr17
+# CHECK: mrs x29, {{trcrsctlr17|TRCRSCTLR17}}
0x36 0x12 0x31 0xd5
-# CHECK: mrs x22, trcrsctlr18
+# CHECK: mrs x22, {{trcrsctlr18|TRCRSCTLR18}}
0x26 0x13 0x31 0xd5
-# CHECK: mrs x6, trcrsctlr19
+# CHECK: mrs x6, {{trcrsctlr19|TRCRSCTLR19}}
0x3a 0x14 0x31 0xd5
-# CHECK: mrs x26, trcrsctlr20
+# CHECK: mrs x26, {{trcrsctlr20|TRCRSCTLR20}}
0x3a 0x15 0x31 0xd5
-# CHECK: mrs x26, trcrsctlr21
+# CHECK: mrs x26, {{trcrsctlr21|TRCRSCTLR21}}
0x24 0x16 0x31 0xd5
-# CHECK: mrs x4, trcrsctlr22
+# CHECK: mrs x4, {{trcrsctlr22|TRCRSCTLR22}}
0x2c 0x17 0x31 0xd5
-# CHECK: mrs x12, trcrsctlr23
+# CHECK: mrs x12, {{trcrsctlr23|TRCRSCTLR23}}
0x21 0x18 0x31 0xd5
-# CHECK: mrs x1, trcrsctlr24
+# CHECK: mrs x1, {{trcrsctlr24|TRCRSCTLR24}}
0x20 0x19 0x31 0xd5
-# CHECK: mrs x0, trcrsctlr25
+# CHECK: mrs x0, {{trcrsctlr25|TRCRSCTLR25}}
0x31 0x1a 0x31 0xd5
-# CHECK: mrs x17, trcrsctlr26
+# CHECK: mrs x17, {{trcrsctlr26|TRCRSCTLR26}}
0x28 0x1b 0x31 0xd5
-# CHECK: mrs x8, trcrsctlr27
+# CHECK: mrs x8, {{trcrsctlr27|TRCRSCTLR27}}
0x2a 0x1c 0x31 0xd5
-# CHECK: mrs x10, trcrsctlr28
+# CHECK: mrs x10, {{trcrsctlr28|TRCRSCTLR28}}
0x39 0x1d 0x31 0xd5
-# CHECK: mrs x25, trcrsctlr29
+# CHECK: mrs x25, {{trcrsctlr29|TRCRSCTLR29}}
0x2c 0x1e 0x31 0xd5
-# CHECK: mrs x12, trcrsctlr30
+# CHECK: mrs x12, {{trcrsctlr30|TRCRSCTLR30}}
0x2b 0x1f 0x31 0xd5
-# CHECK: mrs x11, trcrsctlr31
+# CHECK: mrs x11, {{trcrsctlr31|TRCRSCTLR31}}
0x52 0x10 0x31 0xd5
-# CHECK: mrs x18, trcssccr0
+# CHECK: mrs x18, {{trcssccr0|TRCSSCCR0}}
0x4c 0x11 0x31 0xd5
-# CHECK: mrs x12, trcssccr1
+# CHECK: mrs x12, {{trcssccr1|TRCSSCCR1}}
0x43 0x12 0x31 0xd5
-# CHECK: mrs x3, trcssccr2
+# CHECK: mrs x3, {{trcssccr2|TRCSSCCR2}}
0x42 0x13 0x31 0xd5
-# CHECK: mrs x2, trcssccr3
+# CHECK: mrs x2, {{trcssccr3|TRCSSCCR3}}
0x55 0x14 0x31 0xd5
-# CHECK: mrs x21, trcssccr4
+# CHECK: mrs x21, {{trcssccr4|TRCSSCCR4}}
0x4a 0x15 0x31 0xd5
-# CHECK: mrs x10, trcssccr5
+# CHECK: mrs x10, {{trcssccr5|TRCSSCCR5}}
0x56 0x16 0x31 0xd5
-# CHECK: mrs x22, trcssccr6
+# CHECK: mrs x22, {{trcssccr6|TRCSSCCR6}}
0x57 0x17 0x31 0xd5
-# CHECK: mrs x23, trcssccr7
+# CHECK: mrs x23, {{trcssccr7|TRCSSCCR7}}
0x57 0x18 0x31 0xd5
-# CHECK: mrs x23, trcsscsr0
+# CHECK: mrs x23, {{trcsscsr0|TRCSSCSR0}}
0x53 0x19 0x31 0xd5
-# CHECK: mrs x19, trcsscsr1
+# CHECK: mrs x19, {{trcsscsr1|TRCSSCSR1}}
0x59 0x1a 0x31 0xd5
-# CHECK: mrs x25, trcsscsr2
+# CHECK: mrs x25, {{trcsscsr2|TRCSSCSR2}}
0x51 0x1b 0x31 0xd5
-# CHECK: mrs x17, trcsscsr3
+# CHECK: mrs x17, {{trcsscsr3|TRCSSCSR3}}
0x53 0x1c 0x31 0xd5
-# CHECK: mrs x19, trcsscsr4
+# CHECK: mrs x19, {{trcsscsr4|TRCSSCSR4}}
0x4b 0x1d 0x31 0xd5
-# CHECK: mrs x11, trcsscsr5
+# CHECK: mrs x11, {{trcsscsr5|TRCSSCSR5}}
0x45 0x1e 0x31 0xd5
-# CHECK: mrs x5, trcsscsr6
+# CHECK: mrs x5, {{trcsscsr6|TRCSSCSR6}}
0x49 0x1f 0x31 0xd5
-# CHECK: mrs x9, trcsscsr7
+# CHECK: mrs x9, {{trcsscsr7|TRCSSCSR7}}
0x9a 0x14 0x31 0xd5
-# CHECK: mrs x26, trcpdcr
+# CHECK: mrs x26, {{trcpdcr|TRCPDCR}}
0x8 0x20 0x31 0xd5
-# CHECK: mrs x8, trcacvr0
+# CHECK: mrs x8, {{trcacvr0|TRCACVR0}}
0xf 0x22 0x31 0xd5
-# CHECK: mrs x15, trcacvr1
+# CHECK: mrs x15, {{trcacvr1|TRCACVR1}}
0x13 0x24 0x31 0xd5
-# CHECK: mrs x19, trcacvr2
+# CHECK: mrs x19, {{trcacvr2|TRCACVR2}}
0x8 0x26 0x31 0xd5
-# CHECK: mrs x8, trcacvr3
+# CHECK: mrs x8, {{trcacvr3|TRCACVR3}}
0x1c 0x28 0x31 0xd5
-# CHECK: mrs x28, trcacvr4
+# CHECK: mrs x28, {{trcacvr4|TRCACVR4}}
0x3 0x2a 0x31 0xd5
-# CHECK: mrs x3, trcacvr5
+# CHECK: mrs x3, {{trcacvr5|TRCACVR5}}
0x19 0x2c 0x31 0xd5
-# CHECK: mrs x25, trcacvr6
+# CHECK: mrs x25, {{trcacvr6|TRCACVR6}}
0x18 0x2e 0x31 0xd5
-# CHECK: mrs x24, trcacvr7
+# CHECK: mrs x24, {{trcacvr7|TRCACVR7}}
0x26 0x20 0x31 0xd5
-# CHECK: mrs x6, trcacvr8
+# CHECK: mrs x6, {{trcacvr8|TRCACVR8}}
0x23 0x22 0x31 0xd5
-# CHECK: mrs x3, trcacvr9
+# CHECK: mrs x3, {{trcacvr9|TRCACVR9}}
0x38 0x24 0x31 0xd5
-# CHECK: mrs x24, trcacvr10
+# CHECK: mrs x24, {{trcacvr10|TRCACVR10}}
0x23 0x26 0x31 0xd5
-# CHECK: mrs x3, trcacvr11
+# CHECK: mrs x3, {{trcacvr11|TRCACVR11}}
0x2c 0x28 0x31 0xd5
-# CHECK: mrs x12, trcacvr12
+# CHECK: mrs x12, {{trcacvr12|TRCACVR12}}
0x29 0x2a 0x31 0xd5
-# CHECK: mrs x9, trcacvr13
+# CHECK: mrs x9, {{trcacvr13|TRCACVR13}}
0x2e 0x2c 0x31 0xd5
-# CHECK: mrs x14, trcacvr14
+# CHECK: mrs x14, {{trcacvr14|TRCACVR14}}
0x23 0x2e 0x31 0xd5
-# CHECK: mrs x3, trcacvr15
+# CHECK: mrs x3, {{trcacvr15|TRCACVR15}}
0x55 0x20 0x31 0xd5
-# CHECK: mrs x21, trcacatr0
+# CHECK: mrs x21, {{trcacatr0|TRCACATR0}}
0x5a 0x22 0x31 0xd5
-# CHECK: mrs x26, trcacatr1
+# CHECK: mrs x26, {{trcacatr1|TRCACATR1}}
0x48 0x24 0x31 0xd5
-# CHECK: mrs x8, trcacatr2
+# CHECK: mrs x8, {{trcacatr2|TRCACATR2}}
0x56 0x26 0x31 0xd5
-# CHECK: mrs x22, trcacatr3
+# CHECK: mrs x22, {{trcacatr3|TRCACATR3}}
0x46 0x28 0x31 0xd5
-# CHECK: mrs x6, trcacatr4
+# CHECK: mrs x6, {{trcacatr4|TRCACATR4}}
0x5d 0x2a 0x31 0xd5
-# CHECK: mrs x29, trcacatr5
+# CHECK: mrs x29, {{trcacatr5|TRCACATR5}}
0x45 0x2c 0x31 0xd5
-# CHECK: mrs x5, trcacatr6
+# CHECK: mrs x5, {{trcacatr6|TRCACATR6}}
0x52 0x2e 0x31 0xd5
-# CHECK: mrs x18, trcacatr7
+# CHECK: mrs x18, {{trcacatr7|TRCACATR7}}
0x62 0x20 0x31 0xd5
-# CHECK: mrs x2, trcacatr8
+# CHECK: mrs x2, {{trcacatr8|TRCACATR8}}
0x73 0x22 0x31 0xd5
-# CHECK: mrs x19, trcacatr9
+# CHECK: mrs x19, {{trcacatr9|TRCACATR9}}
0x6d 0x24 0x31 0xd5
-# CHECK: mrs x13, trcacatr10
+# CHECK: mrs x13, {{trcacatr10|TRCACATR10}}
0x79 0x26 0x31 0xd5
-# CHECK: mrs x25, trcacatr11
+# CHECK: mrs x25, {{trcacatr11|TRCACATR11}}
0x72 0x28 0x31 0xd5
-# CHECK: mrs x18, trcacatr12
+# CHECK: mrs x18, {{trcacatr12|TRCACATR12}}
0x7d 0x2a 0x31 0xd5
-# CHECK: mrs x29, trcacatr13
+# CHECK: mrs x29, {{trcacatr13|TRCACATR13}}
0x69 0x2c 0x31 0xd5
-# CHECK: mrs x9, trcacatr14
+# CHECK: mrs x9, {{trcacatr14|TRCACATR14}}
0x72 0x2e 0x31 0xd5
-# CHECK: mrs x18, trcacatr15
+# CHECK: mrs x18, {{trcacatr15|TRCACATR15}}
0x9d 0x20 0x31 0xd5
-# CHECK: mrs x29, trcdvcvr0
+# CHECK: mrs x29, {{trcdvcvr0|TRCDVCVR0}}
0x8f 0x24 0x31 0xd5
-# CHECK: mrs x15, trcdvcvr1
+# CHECK: mrs x15, {{trcdvcvr1|TRCDVCVR1}}
0x8f 0x28 0x31 0xd5
-# CHECK: mrs x15, trcdvcvr2
+# CHECK: mrs x15, {{trcdvcvr2|TRCDVCVR2}}
0x8f 0x2c 0x31 0xd5
-# CHECK: mrs x15, trcdvcvr3
+# CHECK: mrs x15, {{trcdvcvr3|TRCDVCVR3}}
0xb3 0x20 0x31 0xd5
-# CHECK: mrs x19, trcdvcvr4
+# CHECK: mrs x19, {{trcdvcvr4|TRCDVCVR4}}
0xb6 0x24 0x31 0xd5
-# CHECK: mrs x22, trcdvcvr5
+# CHECK: mrs x22, {{trcdvcvr5|TRCDVCVR5}}
0xbb 0x28 0x31 0xd5
-# CHECK: mrs x27, trcdvcvr6
+# CHECK: mrs x27, {{trcdvcvr6|TRCDVCVR6}}
0xa1 0x2c 0x31 0xd5
-# CHECK: mrs x1, trcdvcvr7
+# CHECK: mrs x1, {{trcdvcvr7|TRCDVCVR7}}
0xdd 0x20 0x31 0xd5
-# CHECK: mrs x29, trcdvcmr0
+# CHECK: mrs x29, {{trcdvcmr0|TRCDVCMR0}}
0xc9 0x24 0x31 0xd5
-# CHECK: mrs x9, trcdvcmr1
+# CHECK: mrs x9, {{trcdvcmr1|TRCDVCMR1}}
0xc1 0x28 0x31 0xd5
-# CHECK: mrs x1, trcdvcmr2
+# CHECK: mrs x1, {{trcdvcmr2|TRCDVCMR2}}
0xc2 0x2c 0x31 0xd5
-# CHECK: mrs x2, trcdvcmr3
+# CHECK: mrs x2, {{trcdvcmr3|TRCDVCMR3}}
0xe5 0x20 0x31 0xd5
-# CHECK: mrs x5, trcdvcmr4
+# CHECK: mrs x5, {{trcdvcmr4|TRCDVCMR4}}
0xf5 0x24 0x31 0xd5
-# CHECK: mrs x21, trcdvcmr5
+# CHECK: mrs x21, {{trcdvcmr5|TRCDVCMR5}}
0xe5 0x28 0x31 0xd5
-# CHECK: mrs x5, trcdvcmr6
+# CHECK: mrs x5, {{trcdvcmr6|TRCDVCMR6}}
0xe1 0x2c 0x31 0xd5
-# CHECK: mrs x1, trcdvcmr7
+# CHECK: mrs x1, {{trcdvcmr7|TRCDVCMR7}}
0x15 0x30 0x31 0xd5
-# CHECK: mrs x21, trccidcvr0
+# CHECK: mrs x21, {{trccidcvr0|TRCCIDCVR0}}
0x18 0x32 0x31 0xd5
-# CHECK: mrs x24, trccidcvr1
+# CHECK: mrs x24, {{trccidcvr1|TRCCIDCVR1}}
0x18 0x34 0x31 0xd5
-# CHECK: mrs x24, trccidcvr2
+# CHECK: mrs x24, {{trccidcvr2|TRCCIDCVR2}}
0xc 0x36 0x31 0xd5
-# CHECK: mrs x12, trccidcvr3
+# CHECK: mrs x12, {{trccidcvr3|TRCCIDCVR3}}
0xa 0x38 0x31 0xd5
-# CHECK: mrs x10, trccidcvr4
+# CHECK: mrs x10, {{trccidcvr4|TRCCIDCVR4}}
0x9 0x3a 0x31 0xd5
-# CHECK: mrs x9, trccidcvr5
+# CHECK: mrs x9, {{trccidcvr5|TRCCIDCVR5}}
0x6 0x3c 0x31 0xd5
-# CHECK: mrs x6, trccidcvr6
+# CHECK: mrs x6, {{trccidcvr6|TRCCIDCVR6}}
0x14 0x3e 0x31 0xd5
-# CHECK: mrs x20, trccidcvr7
+# CHECK: mrs x20, {{trccidcvr7|TRCCIDCVR7}}
0x34 0x30 0x31 0xd5
-# CHECK: mrs x20, trcvmidcvr0
+# CHECK: mrs x20, {{trcvmidcvr0|TRCVMIDCVR0}}
0x34 0x32 0x31 0xd5
-# CHECK: mrs x20, trcvmidcvr1
+# CHECK: mrs x20, {{trcvmidcvr1|TRCVMIDCVR1}}
0x3a 0x34 0x31 0xd5
-# CHECK: mrs x26, trcvmidcvr2
+# CHECK: mrs x26, {{trcvmidcvr2|TRCVMIDCVR2}}
0x21 0x36 0x31 0xd5
-# CHECK: mrs x1, trcvmidcvr3
+# CHECK: mrs x1, {{trcvmidcvr3|TRCVMIDCVR3}}
0x2e 0x38 0x31 0xd5
-# CHECK: mrs x14, trcvmidcvr4
+# CHECK: mrs x14, {{trcvmidcvr4|TRCVMIDCVR4}}
0x3b 0x3a 0x31 0xd5
-# CHECK: mrs x27, trcvmidcvr5
+# CHECK: mrs x27, {{trcvmidcvr5|TRCVMIDCVR5}}
0x3d 0x3c 0x31 0xd5
-# CHECK: mrs x29, trcvmidcvr6
+# CHECK: mrs x29, {{trcvmidcvr6|TRCVMIDCVR6}}
0x31 0x3e 0x31 0xd5
-# CHECK: mrs x17, trcvmidcvr7
+# CHECK: mrs x17, {{trcvmidcvr7|TRCVMIDCVR7}}
0x4a 0x30 0x31 0xd5
-# CHECK: mrs x10, trccidcctlr0
+# CHECK: mrs x10, {{trccidcctlr0|TRCCIDCCTLR0}}
0x44 0x31 0x31 0xd5
-# CHECK: mrs x4, trccidcctlr1
+# CHECK: mrs x4, {{trccidcctlr1|TRCCIDCCTLR1}}
0x49 0x32 0x31 0xd5
-# CHECK: mrs x9, trcvmidcctlr0
+# CHECK: mrs x9, {{trcvmidcctlr0|TRCVMIDCCTLR0}}
0x4b 0x33 0x31 0xd5
-# CHECK: mrs x11, trcvmidcctlr1
+# CHECK: mrs x11, {{trcvmidcctlr1|TRCVMIDCCTLR1}}
0x96 0x70 0x31 0xd5
-# CHECK: mrs x22, trcitctrl
+# CHECK: mrs x22, {{trcitctrl|TRCITCTRL}}
0xd7 0x78 0x31 0xd5
-# CHECK: mrs x23, trcclaimset
+# CHECK: mrs x23, {{trcclaimset|TRCCLAIMSET}}
0xce 0x79 0x31 0xd5
-# CHECK: mrs x14, trcclaimclr
+# CHECK: mrs x14, {{trcclaimclr|TRCCLAIMCLR}}
0x9c 0x10 0x11 0xd5
-# CHECK: msr trcoslar, x28
+# CHECK: msr {{trcoslar|TRCOSLAR}}, x28
0xce 0x7c 0x11 0xd5
-# CHECK: msr trclar, x14
+# CHECK: msr {{trclar|TRCLAR}}, x14
0xa 0x1 0x11 0xd5
-# CHECK: msr trcprgctlr, x10
+# CHECK: msr {{trcprgctlr|TRCPRGCTLR}}, x10
0x1b 0x2 0x11 0xd5
-# CHECK: msr trcprocselr, x27
+# CHECK: msr {{trcprocselr|TRCPROCSELR}}, x27
0x18 0x4 0x11 0xd5
-# CHECK: msr trcconfigr, x24
+# CHECK: msr {{trcconfigr|TRCCONFIGR}}, x24
0x8 0x6 0x11 0xd5
-# CHECK: msr trcauxctlr, x8
+# CHECK: msr {{trcauxctlr|TRCAUXCTLR}}, x8
0x10 0x8 0x11 0xd5
-# CHECK: msr trceventctl0r, x16
+# CHECK: msr {{trceventctl0r|TRCEVENTCTL0R}}, x16
0x1b 0x9 0x11 0xd5
-# CHECK: msr trceventctl1r, x27
+# CHECK: msr {{trceventctl1r|TRCEVENTCTL1R}}, x27
0x1a 0xb 0x11 0xd5
-# CHECK: msr trcstallctlr, x26
+# CHECK: msr {{trcstallctlr|TRCSTALLCTLR}}, x26
0x0 0xc 0x11 0xd5
-# CHECK: msr trctsctlr, x0
+# CHECK: msr {{trctsctlr|TRCTSCTLR}}, x0
0xe 0xd 0x11 0xd5
-# CHECK: msr trcsyncpr, x14
+# CHECK: msr {{trcsyncpr|TRCSYNCPR}}, x14
0x8 0xe 0x11 0xd5
-# CHECK: msr trcccctlr, x8
+# CHECK: msr {{trcccctlr|TRCCCCTLR}}, x8
0x6 0xf 0x11 0xd5
-# CHECK: msr trcbbctlr, x6
+# CHECK: msr {{trcbbctlr|TRCBBCTLR}}, x6
0x37 0x0 0x11 0xd5
-# CHECK: msr trctraceidr, x23
+# CHECK: msr {{trctraceidr|TRCTRACEIDR}}, x23
0x25 0x1 0x11 0xd5
-# CHECK: msr trcqctlr, x5
+# CHECK: msr {{trcqctlr|TRCQCTLR}}, x5
0x40 0x0 0x11 0xd5
-# CHECK: msr trcvictlr, x0
+# CHECK: msr {{trcvictlr|TRCVICTLR}}, x0
0x40 0x1 0x11 0xd5
-# CHECK: msr trcviiectlr, x0
+# CHECK: msr {{trcviiectlr|TRCVIIECTLR}}, x0
0x41 0x2 0x11 0xd5
-# CHECK: msr trcvissctlr, x1
+# CHECK: msr {{trcvissctlr|TRCVISSCTLR}}, x1
0x40 0x3 0x11 0xd5
-# CHECK: msr trcvipcssctlr, x0
+# CHECK: msr {{trcvipcssctlr|TRCVIPCSSCTLR}}, x0
0x47 0x8 0x11 0xd5
-# CHECK: msr trcvdctlr, x7
+# CHECK: msr {{trcvdctlr|TRCVDCTLR}}, x7
0x52 0x9 0x11 0xd5
-# CHECK: msr trcvdsacctlr, x18
+# CHECK: msr {{trcvdsacctlr|TRCVDSACCTLR}}, x18
0x58 0xa 0x11 0xd5
-# CHECK: msr trcvdarcctlr, x24
+# CHECK: msr {{trcvdarcctlr|TRCVDARCCTLR}}, x24
0x9c 0x0 0x11 0xd5
-# CHECK: msr trcseqevr0, x28
+# CHECK: msr {{trcseqevr0|TRCSEQEVR0}}, x28
0x95 0x1 0x11 0xd5
-# CHECK: msr trcseqevr1, x21
+# CHECK: msr {{trcseqevr1|TRCSEQEVR1}}, x21
0x90 0x2 0x11 0xd5
-# CHECK: msr trcseqevr2, x16
+# CHECK: msr {{trcseqevr2|TRCSEQEVR2}}, x16
0x90 0x6 0x11 0xd5
-# CHECK: msr trcseqrstevr, x16
+# CHECK: msr {{trcseqrstevr|TRCSEQRSTEVR}}, x16
0x99 0x7 0x11 0xd5
-# CHECK: msr trcseqstr, x25
+# CHECK: msr {{trcseqstr|TRCSEQSTR}}, x25
0x9d 0x8 0x11 0xd5
-# CHECK: msr trcextinselr, x29
+# CHECK: msr {{trcextinselr|TRCEXTINSELR}}, x29
0xb4 0x0 0x11 0xd5
-# CHECK: msr trccntrldvr0, x20
+# CHECK: msr {{trccntrldvr0|TRCCNTRLDVR0}}, x20
0xb4 0x1 0x11 0xd5
-# CHECK: msr trccntrldvr1, x20
+# CHECK: msr {{trccntrldvr1|TRCCNTRLDVR1}}, x20
0xb6 0x2 0x11 0xd5
-# CHECK: msr trccntrldvr2, x22
+# CHECK: msr {{trccntrldvr2|TRCCNTRLDVR2}}, x22
0xac 0x3 0x11 0xd5
-# CHECK: msr trccntrldvr3, x12
+# CHECK: msr {{trccntrldvr3|TRCCNTRLDVR3}}, x12
0xb4 0x4 0x11 0xd5
-# CHECK: msr trccntctlr0, x20
+# CHECK: msr {{trccntctlr0|TRCCNTCTLR0}}, x20
0xa4 0x5 0x11 0xd5
-# CHECK: msr trccntctlr1, x4
+# CHECK: msr {{trccntctlr1|TRCCNTCTLR1}}, x4
0xa8 0x6 0x11 0xd5
-# CHECK: msr trccntctlr2, x8
+# CHECK: msr {{trccntctlr2|TRCCNTCTLR2}}, x8
0xb0 0x7 0x11 0xd5
-# CHECK: msr trccntctlr3, x16
+# CHECK: msr {{trccntctlr3|TRCCNTCTLR3}}, x16
0xa5 0x8 0x11 0xd5
-# CHECK: msr trccntvr0, x5
+# CHECK: msr {{trccntvr0|TRCCNTVR0}}, x5
0xbb 0x9 0x11 0xd5
-# CHECK: msr trccntvr1, x27
+# CHECK: msr {{trccntvr1|TRCCNTVR1}}, x27
0xb5 0xa 0x11 0xd5
-# CHECK: msr trccntvr2, x21
+# CHECK: msr {{trccntvr2|TRCCNTVR2}}, x21
0xa8 0xb 0x11 0xd5
-# CHECK: msr trccntvr3, x8
+# CHECK: msr {{trccntvr3|TRCCNTVR3}}, x8
0xe6 0x0 0x11 0xd5
-# CHECK: msr trcimspec0, x6
+# CHECK: msr {{trcimspec0|TRCIMSPEC0}}, x6
0xfb 0x1 0x11 0xd5
-# CHECK: msr trcimspec1, x27
+# CHECK: msr {{trcimspec1|TRCIMSPEC1}}, x27
0xf7 0x2 0x11 0xd5
-# CHECK: msr trcimspec2, x23
+# CHECK: msr {{trcimspec2|TRCIMSPEC2}}, x23
0xef 0x3 0x11 0xd5
-# CHECK: msr trcimspec3, x15
+# CHECK: msr {{trcimspec3|TRCIMSPEC3}}, x15
0xed 0x4 0x11 0xd5
-# CHECK: msr trcimspec4, x13
+# CHECK: msr {{trcimspec4|TRCIMSPEC4}}, x13
0xf9 0x5 0x11 0xd5
-# CHECK: msr trcimspec5, x25
+# CHECK: msr {{trcimspec5|TRCIMSPEC5}}, x25
0xf3 0x6 0x11 0xd5
-# CHECK: msr trcimspec6, x19
+# CHECK: msr {{trcimspec6|TRCIMSPEC6}}, x19
0xfb 0x7 0x11 0xd5
-# CHECK: msr trcimspec7, x27
+# CHECK: msr {{trcimspec7|TRCIMSPEC7}}, x27
0x4 0x12 0x11 0xd5
-# CHECK: msr trcrsctlr2, x4
+# CHECK: msr {{trcrsctlr2|TRCRSCTLR2}}, x4
0x0 0x13 0x11 0xd5
-# CHECK: msr trcrsctlr3, x0
+# CHECK: msr {{trcrsctlr3|TRCRSCTLR3}}, x0
0x15 0x14 0x11 0xd5
-# CHECK: msr trcrsctlr4, x21
+# CHECK: msr {{trcrsctlr4|TRCRSCTLR4}}, x21
0x8 0x15 0x11 0xd5
-# CHECK: msr trcrsctlr5, x8
+# CHECK: msr {{trcrsctlr5|TRCRSCTLR5}}, x8
0x14 0x16 0x11 0xd5
-# CHECK: msr trcrsctlr6, x20
+# CHECK: msr {{trcrsctlr6|TRCRSCTLR6}}, x20
0xb 0x17 0x11 0xd5
-# CHECK: msr trcrsctlr7, x11
+# CHECK: msr {{trcrsctlr7|TRCRSCTLR7}}, x11
0x12 0x18 0x11 0xd5
-# CHECK: msr trcrsctlr8, x18
+# CHECK: msr {{trcrsctlr8|TRCRSCTLR8}}, x18
0x18 0x19 0x11 0xd5
-# CHECK: msr trcrsctlr9, x24
+# CHECK: msr {{trcrsctlr9|TRCRSCTLR9}}, x24
0xf 0x1a 0x11 0xd5
-# CHECK: msr trcrsctlr10, x15
+# CHECK: msr {{trcrsctlr10|TRCRSCTLR10}}, x15
0x15 0x1b 0x11 0xd5
-# CHECK: msr trcrsctlr11, x21
+# CHECK: msr {{trcrsctlr11|TRCRSCTLR11}}, x21
0x4 0x1c 0x11 0xd5
-# CHECK: msr trcrsctlr12, x4
+# CHECK: msr {{trcrsctlr12|TRCRSCTLR12}}, x4
0x1c 0x1d 0x11 0xd5
-# CHECK: msr trcrsctlr13, x28
+# CHECK: msr {{trcrsctlr13|TRCRSCTLR13}}, x28
0x3 0x1e 0x11 0xd5
-# CHECK: msr trcrsctlr14, x3
+# CHECK: msr {{trcrsctlr14|TRCRSCTLR14}}, x3
0x14 0x1f 0x11 0xd5
-# CHECK: msr trcrsctlr15, x20
+# CHECK: msr {{trcrsctlr15|TRCRSCTLR15}}, x20
0x2c 0x10 0x11 0xd5
-# CHECK: msr trcrsctlr16, x12
+# CHECK: msr {{trcrsctlr16|TRCRSCTLR16}}, x12
0x31 0x11 0x11 0xd5
-# CHECK: msr trcrsctlr17, x17
+# CHECK: msr {{trcrsctlr17|TRCRSCTLR17}}, x17
0x2a 0x12 0x11 0xd5
-# CHECK: msr trcrsctlr18, x10
+# CHECK: msr {{trcrsctlr18|TRCRSCTLR18}}, x10
0x2b 0x13 0x11 0xd5
-# CHECK: msr trcrsctlr19, x11
+# CHECK: msr {{trcrsctlr19|TRCRSCTLR19}}, x11
0x23 0x14 0x11 0xd5
-# CHECK: msr trcrsctlr20, x3
+# CHECK: msr {{trcrsctlr20|TRCRSCTLR20}}, x3
0x32 0x15 0x11 0xd5
-# CHECK: msr trcrsctlr21, x18
+# CHECK: msr {{trcrsctlr21|TRCRSCTLR21}}, x18
0x3a 0x16 0x11 0xd5
-# CHECK: msr trcrsctlr22, x26
+# CHECK: msr {{trcrsctlr22|TRCRSCTLR22}}, x26
0x25 0x17 0x11 0xd5
-# CHECK: msr trcrsctlr23, x5
+# CHECK: msr {{trcrsctlr23|TRCRSCTLR23}}, x5
0x39 0x18 0x11 0xd5
-# CHECK: msr trcrsctlr24, x25
+# CHECK: msr {{trcrsctlr24|TRCRSCTLR24}}, x25
0x25 0x19 0x11 0xd5
-# CHECK: msr trcrsctlr25, x5
+# CHECK: msr {{trcrsctlr25|TRCRSCTLR25}}, x5
0x24 0x1a 0x11 0xd5
-# CHECK: msr trcrsctlr26, x4
+# CHECK: msr {{trcrsctlr26|TRCRSCTLR26}}, x4
0x34 0x1b 0x11 0xd5
-# CHECK: msr trcrsctlr27, x20
+# CHECK: msr {{trcrsctlr27|TRCRSCTLR27}}, x20
0x25 0x1c 0x11 0xd5
-# CHECK: msr trcrsctlr28, x5
+# CHECK: msr {{trcrsctlr28|TRCRSCTLR28}}, x5
0x2a 0x1d 0x11 0xd5
-# CHECK: msr trcrsctlr29, x10
+# CHECK: msr {{trcrsctlr29|TRCRSCTLR29}}, x10
0x38 0x1e 0x11 0xd5
-# CHECK: msr trcrsctlr30, x24
+# CHECK: msr {{trcrsctlr30|TRCRSCTLR30}}, x24
0x34 0x1f 0x11 0xd5
-# CHECK: msr trcrsctlr31, x20
+# CHECK: msr {{trcrsctlr31|TRCRSCTLR31}}, x20
0x57 0x10 0x11 0xd5
-# CHECK: msr trcssccr0, x23
+# CHECK: msr {{trcssccr0|TRCSSCCR0}}, x23
0x5b 0x11 0x11 0xd5
-# CHECK: msr trcssccr1, x27
+# CHECK: msr {{trcssccr1|TRCSSCCR1}}, x27
0x5b 0x12 0x11 0xd5
-# CHECK: msr trcssccr2, x27
+# CHECK: msr {{trcssccr2|TRCSSCCR2}}, x27
0x46 0x13 0x11 0xd5
-# CHECK: msr trcssccr3, x6
+# CHECK: msr {{trcssccr3|TRCSSCCR3}}, x6
0x43 0x14 0x11 0xd5
-# CHECK: msr trcssccr4, x3
+# CHECK: msr {{trcssccr4|TRCSSCCR4}}, x3
0x4c 0x15 0x11 0xd5
-# CHECK: msr trcssccr5, x12
+# CHECK: msr {{trcssccr5|TRCSSCCR5}}, x12
0x47 0x16 0x11 0xd5
-# CHECK: msr trcssccr6, x7
+# CHECK: msr {{trcssccr6|TRCSSCCR6}}, x7
0x46 0x17 0x11 0xd5
-# CHECK: msr trcssccr7, x6
+# CHECK: msr {{trcssccr7|TRCSSCCR7}}, x6
0x54 0x18 0x11 0xd5
-# CHECK: msr trcsscsr0, x20
+# CHECK: msr {{trcsscsr0|TRCSSCSR0}}, x20
0x51 0x19 0x11 0xd5
-# CHECK: msr trcsscsr1, x17
+# CHECK: msr {{trcsscsr1|TRCSSCSR1}}, x17
0x4b 0x1a 0x11 0xd5
-# CHECK: msr trcsscsr2, x11
+# CHECK: msr {{trcsscsr2|TRCSSCSR2}}, x11
0x44 0x1b 0x11 0xd5
-# CHECK: msr trcsscsr3, x4
+# CHECK: msr {{trcsscsr3|TRCSSCSR3}}, x4
0x4e 0x1c 0x11 0xd5
-# CHECK: msr trcsscsr4, x14
+# CHECK: msr {{trcsscsr4|TRCSSCSR4}}, x14
0x56 0x1d 0x11 0xd5
-# CHECK: msr trcsscsr5, x22
+# CHECK: msr {{trcsscsr5|TRCSSCSR5}}, x22
0x43 0x1e 0x11 0xd5
-# CHECK: msr trcsscsr6, x3
+# CHECK: msr {{trcsscsr6|TRCSSCSR6}}, x3
0x4b 0x1f 0x11 0xd5
-# CHECK: msr trcsscsr7, x11
+# CHECK: msr {{trcsscsr7|TRCSSCSR7}}, x11
0x83 0x14 0x11 0xd5
-# CHECK: msr trcpdcr, x3
+# CHECK: msr {{trcpdcr|TRCPDCR}}, x3
0x6 0x20 0x11 0xd5
-# CHECK: msr trcacvr0, x6
+# CHECK: msr {{trcacvr0|TRCACVR0}}, x6
0x14 0x22 0x11 0xd5
-# CHECK: msr trcacvr1, x20
+# CHECK: msr {{trcacvr1|TRCACVR1}}, x20
0x19 0x24 0x11 0xd5
-# CHECK: msr trcacvr2, x25
+# CHECK: msr {{trcacvr2|TRCACVR2}}, x25
0x1 0x26 0x11 0xd5
-# CHECK: msr trcacvr3, x1
+# CHECK: msr {{trcacvr3|TRCACVR3}}, x1
0x1c 0x28 0x11 0xd5
-# CHECK: msr trcacvr4, x28
+# CHECK: msr {{trcacvr4|TRCACVR4}}, x28
0xf 0x2a 0x11 0xd5
-# CHECK: msr trcacvr5, x15
+# CHECK: msr {{trcacvr5|TRCACVR5}}, x15
0x19 0x2c 0x11 0xd5
-# CHECK: msr trcacvr6, x25
+# CHECK: msr {{trcacvr6|TRCACVR6}}, x25
0xc 0x2e 0x11 0xd5
-# CHECK: msr trcacvr7, x12
+# CHECK: msr {{trcacvr7|TRCACVR7}}, x12
0x25 0x20 0x11 0xd5
-# CHECK: msr trcacvr8, x5
+# CHECK: msr {{trcacvr8|TRCACVR8}}, x5
0x39 0x22 0x11 0xd5
-# CHECK: msr trcacvr9, x25
+# CHECK: msr {{trcacvr9|TRCACVR9}}, x25
0x2d 0x24 0x11 0xd5
-# CHECK: msr trcacvr10, x13
+# CHECK: msr {{trcacvr10|TRCACVR10}}, x13
0x2a 0x26 0x11 0xd5
-# CHECK: msr trcacvr11, x10
+# CHECK: msr {{trcacvr11|TRCACVR11}}, x10
0x33 0x28 0x11 0xd5
-# CHECK: msr trcacvr12, x19
+# CHECK: msr {{trcacvr12|TRCACVR12}}, x19
0x2a 0x2a 0x11 0xd5
-# CHECK: msr trcacvr13, x10
+# CHECK: msr {{trcacvr13|TRCACVR13}}, x10
0x33 0x2c 0x11 0xd5
-# CHECK: msr trcacvr14, x19
+# CHECK: msr {{trcacvr14|TRCACVR14}}, x19
0x22 0x2e 0x11 0xd5
-# CHECK: msr trcacvr15, x2
+# CHECK: msr {{trcacvr15|TRCACVR15}}, x2
0x4f 0x20 0x11 0xd5
-# CHECK: msr trcacatr0, x15
+# CHECK: msr {{trcacatr0|TRCACATR0}}, x15
0x4d 0x22 0x11 0xd5
-# CHECK: msr trcacatr1, x13
+# CHECK: msr {{trcacatr1|TRCACATR1}}, x13
0x48 0x24 0x11 0xd5
-# CHECK: msr trcacatr2, x8
+# CHECK: msr {{trcacatr2|TRCACATR2}}, x8
0x41 0x26 0x11 0xd5
-# CHECK: msr trcacatr3, x1
+# CHECK: msr {{trcacatr3|TRCACATR3}}, x1
0x4b 0x28 0x11 0xd5
-# CHECK: msr trcacatr4, x11
+# CHECK: msr {{trcacatr4|TRCACATR4}}, x11
0x48 0x2a 0x11 0xd5
-# CHECK: msr trcacatr5, x8
+# CHECK: msr {{trcacatr5|TRCACATR5}}, x8
0x58 0x2c 0x11 0xd5
-# CHECK: msr trcacatr6, x24
+# CHECK: msr {{trcacatr6|TRCACATR6}}, x24
0x46 0x2e 0x11 0xd5
-# CHECK: msr trcacatr7, x6
+# CHECK: msr {{trcacatr7|TRCACATR7}}, x6
0x77 0x20 0x11 0xd5
-# CHECK: msr trcacatr8, x23
+# CHECK: msr {{trcacatr8|TRCACATR8}}, x23
0x65 0x22 0x11 0xd5
-# CHECK: msr trcacatr9, x5
+# CHECK: msr {{trcacatr9|TRCACATR9}}, x5
0x6b 0x24 0x11 0xd5
-# CHECK: msr trcacatr10, x11
+# CHECK: msr {{trcacatr10|TRCACATR10}}, x11
0x6b 0x26 0x11 0xd5
-# CHECK: msr trcacatr11, x11
+# CHECK: msr {{trcacatr11|TRCACATR11}}, x11
0x63 0x28 0x11 0xd5
-# CHECK: msr trcacatr12, x3
+# CHECK: msr {{trcacatr12|TRCACATR12}}, x3
0x7c 0x2a 0x11 0xd5
-# CHECK: msr trcacatr13, x28
+# CHECK: msr {{trcacatr13|TRCACATR13}}, x28
0x79 0x2c 0x11 0xd5
-# CHECK: msr trcacatr14, x25
+# CHECK: msr {{trcacatr14|TRCACATR14}}, x25
0x64 0x2e 0x11 0xd5
-# CHECK: msr trcacatr15, x4
+# CHECK: msr {{trcacatr15|TRCACATR15}}, x4
0x86 0x20 0x11 0xd5
-# CHECK: msr trcdvcvr0, x6
+# CHECK: msr {{trcdvcvr0|TRCDVCVR0}}, x6
0x83 0x24 0x11 0xd5
-# CHECK: msr trcdvcvr1, x3
+# CHECK: msr {{trcdvcvr1|TRCDVCVR1}}, x3
0x85 0x28 0x11 0xd5
-# CHECK: msr trcdvcvr2, x5
+# CHECK: msr {{trcdvcvr2|TRCDVCVR2}}, x5
0x8b 0x2c 0x11 0xd5
-# CHECK: msr trcdvcvr3, x11
+# CHECK: msr {{trcdvcvr3|TRCDVCVR3}}, x11
0xa9 0x20 0x11 0xd5
-# CHECK: msr trcdvcvr4, x9
+# CHECK: msr {{trcdvcvr4|TRCDVCVR4}}, x9
0xae 0x24 0x11 0xd5
-# CHECK: msr trcdvcvr5, x14
+# CHECK: msr {{trcdvcvr5|TRCDVCVR5}}, x14
0xaa 0x28 0x11 0xd5
-# CHECK: msr trcdvcvr6, x10
+# CHECK: msr {{trcdvcvr6|TRCDVCVR6}}, x10
0xac 0x2c 0x11 0xd5
-# CHECK: msr trcdvcvr7, x12
+# CHECK: msr {{trcdvcvr7|TRCDVCVR7}}, x12
0xc8 0x20 0x11 0xd5
-# CHECK: msr trcdvcmr0, x8
+# CHECK: msr {{trcdvcmr0|TRCDVCMR0}}, x8
0xc8 0x24 0x11 0xd5
-# CHECK: msr trcdvcmr1, x8
+# CHECK: msr {{trcdvcmr1|TRCDVCMR1}}, x8
0xd6 0x28 0x11 0xd5
-# CHECK: msr trcdvcmr2, x22
+# CHECK: msr {{trcdvcmr2|TRCDVCMR2}}, x22
0xd6 0x2c 0x11 0xd5
-# CHECK: msr trcdvcmr3, x22
+# CHECK: msr {{trcdvcmr3|TRCDVCMR3}}, x22
0xe5 0x20 0x11 0xd5
-# CHECK: msr trcdvcmr4, x5
+# CHECK: msr {{trcdvcmr4|TRCDVCMR4}}, x5
0xf0 0x24 0x11 0xd5
-# CHECK: msr trcdvcmr5, x16
+# CHECK: msr {{trcdvcmr5|TRCDVCMR5}}, x16
0xfb 0x28 0x11 0xd5
-# CHECK: msr trcdvcmr6, x27
+# CHECK: msr {{trcdvcmr6|TRCDVCMR6}}, x27
0xf5 0x2c 0x11 0xd5
-# CHECK: msr trcdvcmr7, x21
+# CHECK: msr {{trcdvcmr7|TRCDVCMR7}}, x21
0x8 0x30 0x11 0xd5
-# CHECK: msr trccidcvr0, x8
+# CHECK: msr {{trccidcvr0|TRCCIDCVR0}}, x8
0x6 0x32 0x11 0xd5
-# CHECK: msr trccidcvr1, x6
+# CHECK: msr {{trccidcvr1|TRCCIDCVR1}}, x6
0x9 0x34 0x11 0xd5
-# CHECK: msr trccidcvr2, x9
+# CHECK: msr {{trccidcvr2|TRCCIDCVR2}}, x9
0x8 0x36 0x11 0xd5
-# CHECK: msr trccidcvr3, x8
+# CHECK: msr {{trccidcvr3|TRCCIDCVR3}}, x8
0x3 0x38 0x11 0xd5
-# CHECK: msr trccidcvr4, x3
+# CHECK: msr {{trccidcvr4|TRCCIDCVR4}}, x3
0x15 0x3a 0x11 0xd5
-# CHECK: msr trccidcvr5, x21
+# CHECK: msr {{trccidcvr5|TRCCIDCVR5}}, x21
0xc 0x3c 0x11 0xd5
-# CHECK: msr trccidcvr6, x12
+# CHECK: msr {{trccidcvr6|TRCCIDCVR6}}, x12
0x7 0x3e 0x11 0xd5
-# CHECK: msr trccidcvr7, x7
+# CHECK: msr {{trccidcvr7|TRCCIDCVR7}}, x7
0x24 0x30 0x11 0xd5
-# CHECK: msr trcvmidcvr0, x4
+# CHECK: msr {{trcvmidcvr0|TRCVMIDCVR0}}, x4
0x23 0x32 0x11 0xd5
-# CHECK: msr trcvmidcvr1, x3
+# CHECK: msr {{trcvmidcvr1|TRCVMIDCVR1}}, x3
0x29 0x34 0x11 0xd5
-# CHECK: msr trcvmidcvr2, x9
+# CHECK: msr {{trcvmidcvr2|TRCVMIDCVR2}}, x9
0x31 0x36 0x11 0xd5
-# CHECK: msr trcvmidcvr3, x17
+# CHECK: msr {{trcvmidcvr3|TRCVMIDCVR3}}, x17
0x2e 0x38 0x11 0xd5
-# CHECK: msr trcvmidcvr4, x14
+# CHECK: msr {{trcvmidcvr4|TRCVMIDCVR4}}, x14
0x2c 0x3a 0x11 0xd5
-# CHECK: msr trcvmidcvr5, x12
+# CHECK: msr {{trcvmidcvr5|TRCVMIDCVR5}}, x12
0x2a 0x3c 0x11 0xd5
-# CHECK: msr trcvmidcvr6, x10
+# CHECK: msr {{trcvmidcvr6|TRCVMIDCVR6}}, x10
0x23 0x3e 0x11 0xd5
-# CHECK: msr trcvmidcvr7, x3
+# CHECK: msr {{trcvmidcvr7|TRCVMIDCVR7}}, x3
0x4e 0x30 0x11 0xd5
-# CHECK: msr trccidcctlr0, x14
+# CHECK: msr {{trccidcctlr0|TRCCIDCCTLR0}}, x14
0x56 0x31 0x11 0xd5
-# CHECK: msr trccidcctlr1, x22
+# CHECK: msr {{trccidcctlr1|TRCCIDCCTLR1}}, x22
0x48 0x32 0x11 0xd5
-# CHECK: msr trcvmidcctlr0, x8
+# CHECK: msr {{trcvmidcctlr0|TRCVMIDCCTLR0}}, x8
0x4f 0x33 0x11 0xd5
-# CHECK: msr trcvmidcctlr1, x15
+# CHECK: msr {{trcvmidcctlr1|TRCVMIDCCTLR1}}, x15
0x81 0x70 0x11 0xd5
-# CHECK: msr trcitctrl, x1
+# CHECK: msr {{trcitctrl|TRCITCTRL}}, x1
0xc7 0x78 0x11 0xd5
-# CHECK: msr trcclaimset, x7
+# CHECK: msr {{trcclaimset|TRCCLAIMSET}}, x7
0xdd 0x79 0x11 0xd5
-# CHECK: msr trcclaimclr, x29
+# CHECK: msr {{trcclaimclr|TRCCLAIMCLR}}, x29
diff --git a/test/MC/Disassembler/ARM/addrmode2-reencoding.txt b/test/MC/Disassembler/ARM/addrmode2-reencoding.txt
new file mode 100644
index 000000000000..08d2de687a96
--- /dev/null
+++ b/test/MC/Disassembler/ARM/addrmode2-reencoding.txt
@@ -0,0 +1,12 @@
+# RUN: llvm-mc -triple armv7 -show-encoding -disassemble < %s | FileCheck %s
+
+0x00 0x10 0xb0 0xe4
+0x00 0x10 0xf0 0xe4
+0x00 0x10 0xa0 0xe4
+0x00 0x10 0xe0 0xe4
+
+# CHECK: ldrt r1, [r0], #0 @ encoding: [0x00,0x10,0xb0,0xe4]
+# CHECK: ldrbt r1, [r0], #0 @ encoding: [0x00,0x10,0xf0,0xe4]
+# CHECK: strt r1, [r0], #0 @ encoding: [0x00,0x10,0xa0,0xe4]
+# CHECK: strbt r1, [r0], #0 @ encoding: [0x00,0x10,0xe0,0xe4]
+
diff --git a/test/MC/Disassembler/ARM/hex-immediates.txt b/test/MC/Disassembler/ARM/hex-immediates.txt
index 2634d7ed3368..875d6679d239 100644
--- a/test/MC/Disassembler/ARM/hex-immediates.txt
+++ b/test/MC/Disassembler/ARM/hex-immediates.txt
@@ -1,5 +1,11 @@
-# RUN: llvm-mc -triple=thumbv7-apple-darwin -mcpu=cortex-a8 -hdis < %s | FileCheck %s
+# RUN: llvm-mc -triple=thumbv7-apple-darwin -mcpu=cortex-a8 --disassemble --print-imm-hex < %s | FileCheck %s
# CHECK: ldr r4, [pc, #0x20]
0x08 0x4c
# CHECK: sub sp, #0x84
0xa1 0xb0
+# CHECK: ldr r0, [sp, #0xb4]
+0x2d 0x98
+# CHECK: str.w r8, [sp, #0xb4]
+0xcd 0xf8 0xb4 0x80
+# CHECK: ldr.w r8, [sp, #0xb4]
+0xdd 0xf8 0xb4 0x80
diff --git a/test/MC/Disassembler/ARM/invalid-thumbv7.txt b/test/MC/Disassembler/ARM/invalid-thumbv7.txt
index 2c84b8a7aa57..5257633e579f 100644
--- a/test/MC/Disassembler/ARM/invalid-thumbv7.txt
+++ b/test/MC/Disassembler/ARM/invalid-thumbv7.txt
@@ -21,17 +21,6 @@
# CHECK: warning: invalid instruction encoding
# CHECK-NEXT: [0xaf 0xf7 0x44 0x8b]
-# Opcode=2249 Name=tBcc Format=ARM_FORMAT_THUMBFRM(25)
-# 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
-# -------------------------------------------------------------------------------------------------
-# | 0: 0: 0: 0| 0: 0: 0: 0| 0: 0: 0: 0| 0: 0: 0: 0| 1: 1: 0: 1| 1: 1: 1: 0| 0: 1: 1: 0| 1: 1: 1: 1|
-# -------------------------------------------------------------------------------------------------
-#
-# if cond = '1110' then UNDEFINED
-[0x6f 0xde]
-# CHECK: invalid instruction encoding
-# CHECK-NEXT: [0x6f 0xde]
-
#------------------------------------------------------------------------------
# Undefined encoding for it
#------------------------------------------------------------------------------
@@ -249,34 +238,6 @@
# CHECK-NEXT: [0xe4 0xe9 0x02 0x46]
#------------------------------------------------------------------------------
-# Undefined encodings for NEON/VFP instructions with invalid predicate bits
-#------------------------------------------------------------------------------
-
-# VABS
-[0x40 0xde 0x00 0x0a]
-# CHECK: invalid instruction encoding
-# CHECK-NEXT: [0x40 0xde 0x00 0x0a]
-
-
-# VMLA
-[0xf0 0xde 0xe0 0x0b]
-# CHECK: invalid instruction encoding
-# CHECK-NEXT: [0xf0 0xde 0xe0 0x0b]
-
-# VMOV/VDUP between scalar and core registers with invalid predicate bits (pred != 0b1110)
-
-# VMOV
-[0x00 0xde 0x10 0x0b]
-# CHECK: invalid instruction encoding
-# CHECK-NEXT: [0x00 0xde 0x10 0x0b]
-
-# VDUP
-[0xff 0xde 0xf0 0xfb]
-# CHECK: invalid instruction encoding
-# CHECK-NEXT: [0xff 0xde 0xf0 0xfb]
-
-
-#------------------------------------------------------------------------------
# Undefined encodings for NEON vld instructions
#------------------------------------------------------------------------------
diff --git a/test/MC/Disassembler/ARM/lit.local.cfg b/test/MC/Disassembler/ARM/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/MC/Disassembler/ARM/lit.local.cfg
+++ b/test/MC/Disassembler/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/Disassembler/Mips/lit.local.cfg b/test/MC/Disassembler/Mips/lit.local.cfg
index 1fa54b428cd9..a3183a25afaa 100644
--- a/test/MC/Disassembler/Mips/lit.local.cfg
+++ b/test/MC/Disassembler/Mips/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Mips' in targets:
+if not 'Mips' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/Disassembler/Mips/micromips.txt b/test/MC/Disassembler/Mips/micromips.txt
index b2d0cc02fc72..1458ce2ed4b3 100644
--- a/test/MC/Disassembler/Mips/micromips.txt
+++ b/test/MC/Disassembler/Mips/micromips.txt
@@ -145,6 +145,9 @@
# CHECK: sw $5, 4($6)
0xf8 0xa6 0x00 0x04
+# CHECK: lwu $2, 8($4)
+0x60 0x44 0xe0 0x08
+
# CHECK: lwl $4, 16($5)
0x60 0x85 0x00 0x10
@@ -285,3 +288,9 @@
# CHECK: tnei $9, 17767
0x41 0x89 0x45 0x67
+
+# CHECK: ll $2, 8($4)
+0x60 0x44 0x30 0x08
+
+# CHECK: sc $2, 8($4)
+0x60 0x44 0xb0 0x08
diff --git a/test/MC/Disassembler/Mips/micromips_le.txt b/test/MC/Disassembler/Mips/micromips_le.txt
index 5b2fe30dd065..bdfe88eaffbf 100644
--- a/test/MC/Disassembler/Mips/micromips_le.txt
+++ b/test/MC/Disassembler/Mips/micromips_le.txt
@@ -145,6 +145,9 @@
# CHECK: sw $5, 4($6)
0xa6 0xf8 0x04 0x00
+# CHECK: lwu $2, 8($4)
+0x44 0x60 0x08 0xe0
+
# CHECK: lwl $4, 16($5)
0x85 0x60 0x10 0x00
@@ -285,3 +288,9 @@
# CHECK: tnei $9, 17767
0x89 0x41 0x67 0x45
+
+# CHECK: ll $2, 8($4)
+0x44 0x60 0x08 0x30
+
+# CHECK: sc $2, 8($4)
+0x44 0x60 0x08 0xb0
diff --git a/test/MC/Disassembler/Mips/mips32.txt b/test/MC/Disassembler/Mips/mips32.txt
index 6d02925ff7b6..bfb145e39596 100644
--- a/test/MC/Disassembler/Mips/mips32.txt
+++ b/test/MC/Disassembler/Mips/mips32.txt
@@ -206,6 +206,9 @@
# CHECK: jal 1328
0x0c 0x00 0x01 0x4c
+# CHECK: jalx 1328
+0x74 0x00 0x01 0x4c
+
# CHECK: jalr $7
0x00 0xe0 0xf8 0x09
diff --git a/test/MC/Disassembler/Mips/mips32_le.txt b/test/MC/Disassembler/Mips/mips32_le.txt
index 61e6fc868d04..533fc69598c1 100644
--- a/test/MC/Disassembler/Mips/mips32_le.txt
+++ b/test/MC/Disassembler/Mips/mips32_le.txt
@@ -206,6 +206,9 @@
# CHECK: jal 1328
0x4c 0x01 0x00 0x0c
+# CHECK: jalx 1328
+0x4c 0x01 0x00 0x74
+
# CHECK: jalr $7
0x09 0xf8 0xe0 0x00
diff --git a/test/MC/Disassembler/Mips/mips32r2.txt b/test/MC/Disassembler/Mips/mips32r2.txt
index 11d9058221c2..299f6f0c8a3e 100644
--- a/test/MC/Disassembler/Mips/mips32r2.txt
+++ b/test/MC/Disassembler/Mips/mips32r2.txt
@@ -215,6 +215,9 @@
# CHECK: jal 1328
0x0c 0x00 0x01 0x4c
+# CHECK: jalx 1328
+0x74 0x00 0x01 0x4c
+
# CHECK: jalr $7
0x00 0xe0 0xf8 0x09
diff --git a/test/MC/Disassembler/Mips/mips32r2_le.txt b/test/MC/Disassembler/Mips/mips32r2_le.txt
index adafcf1258cc..0362ca6d8d67 100644
--- a/test/MC/Disassembler/Mips/mips32r2_le.txt
+++ b/test/MC/Disassembler/Mips/mips32r2_le.txt
@@ -215,6 +215,9 @@
# CHECK: jal 1328
0x4c 0x01 0x00 0x0c
+# CHECK: jalx 1328
+0x4c 0x01 0x00 0x74
+
# CHECK: jalr $7
0x09 0xf8 0xe0 0x00
diff --git a/test/MC/Disassembler/Mips/mips32r6.txt b/test/MC/Disassembler/Mips/mips32r6.txt
new file mode 100644
index 000000000000..afef8ada152b
--- /dev/null
+++ b/test/MC/Disassembler/Mips/mips32r6.txt
@@ -0,0 +1,127 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r6 | FileCheck %s
+
+0xec 0x80 0x00 0x19 # CHECK: addiupc $4, 100
+0x7c 0x43 0x22 0xa0 # CHECK: align $4, $2, $3, 2
+0xec 0x7f 0x00 0x38 # CHECK: aluipc $3, 56
+0x3c 0x62 0xff 0xe9 # CHECK: aui $3, $2, -23
+0xec 0x7e 0xff 0xff # CHECK: auipc $3, -1
+0xe8 0x37 0x96 0xb8 # CHECK: balc 14572256
+0xc8 0x37 0x96 0xb8 # CHECK: bc 14572256
+
+# FIXME: Don't check the immediate on these for the moment, the encode/decode
+# functions are not inverses of eachother.
+# The immediate should be 4 but the disassembler currently emits 8
+0x45 0x20 0x00 0x01 # CHECK: bc1eqz $f0,
+0x45 0x3f 0x00 0x01 # CHECK: bc1eqz $f31,
+0x45 0xa0 0x00 0x01 # CHECK: bc1nez $f0,
+0x45 0xbf 0x00 0x01 # CHECK: bc1nez $f31,
+# FIXME: Don't check the immediate on these for the moment, the encode/decode
+# functions are not inverses of eachother.
+# The immediate should be 8 but the disassembler currently emits 12
+0x49 0x20 0x00 0x02 # CHECK: bc2eqz $0,
+0x49 0x3f 0x00 0x02 # CHECK: bc2eqz $31,
+0x49 0xa0 0x00 0x02 # CHECK: bc2nez $0,
+0x49 0xbf 0x00 0x02 # CHECK: bc2nez $31,
+
+0x20 0xa6 0x00 0x40 # CHECK: beqc $5, $6, 256
+# FIXME: Don't check the immediate on the bcczal's for the moment, the
+# encode/decode functions are not inverses of eachother.
+0x20 0x02 0x01 0x4d # CHECK: beqzalc $2,
+0x60 0xa6 0x00 0x40 # CHECK: bnec $5, $6, 256
+0x60 0x02 0x01 0x4d # CHECK: bnezalc $2,
+0xd8 0xa0 0x46 0x90 # CHECK: beqzc $5, 72256
+0x58 0x43 0x00 0x40 # CHECK: bgec $2, $3, 256
+0x18 0x43 0x00 0x40 # CHECK: bgeuc $2, $3, 256
+0x18 0x42 0x01 0x4d # CHECK: bgezalc $2,
+0xf8 0xa0 0x46 0x90 # CHECK: bnezc $5, 72256
+0x5c 0xa5 0x00 0x40 # CHECK: bltzc $5, 256
+0x58 0xa5 0x00 0x40 # CHECK: bgezc $5, 256
+0x1c 0x02 0x01 0x4d # CHECK: bgtzalc $2,
+0x58 0x05 0x00 0x40 # CHECK: blezc $5, 256
+0x1c 0x42 0x01 0x4d # CHECK: bltzalc $2,
+0x5c 0x05 0x00 0x40 # CHECK: bgtzc $5, 256
+0x7c 0x02 0x20 0x20 # CHECK: bitswap $4, $2
+0x18 0x02 0x01 0x4d # CHECK: blezalc $2,
+0x5c 0xa6 0x00 0x40 # CHECK: bltc $5, $6, 256
+0x1c 0xa6 0x00 0x40 # CHECK: bltuc $5, $6, 256
+0x60 0x00 0x00 0x01 # CHECK: bnvc $zero, $zero, 4
+0x60 0x40 0x00 0x01 # CHECK: bnvc $2, $zero, 4
+0x60 0x82 0x00 0x01 # CHECK: bnvc $4, $2, 4
+0x20 0x00 0x00 0x01 # CHECK: bovc $zero, $zero, 4
+0x20 0x40 0x00 0x01 # CHECK: bovc $2, $zero, 4
+0x20 0x82 0x00 0x01 # CHECK: bovc $4, $2, 4
+0x46 0x84 0x18 0x80 # CHECK: cmp.af.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x80 # CHECK: cmp.af.d $f2, $f3, $f4
+0x46 0x84 0x18 0x81 # CHECK: cmp.un.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x81 # CHECK: cmp.un.d $f2, $f3, $f4
+0x46 0x84 0x18 0x82 # CHECK: cmp.eq.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x82 # CHECK: cmp.eq.d $f2, $f3, $f4
+0x46 0x84 0x18 0x83 # CHECK: cmp.ueq.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x83 # CHECK: cmp.ueq.d $f2, $f3, $f4
+0x46 0x84 0x18 0x84 # CHECK: cmp.lt.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x84 # CHECK: cmp.lt.d $f2, $f3, $f4
+0x46 0x84 0x18 0x85 # CHECK: cmp.ult.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x85 # CHECK: cmp.ult.d $f2, $f3, $f4
+0x46 0x84 0x18 0x86 # CHECK: cmp.le.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x86 # CHECK: cmp.le.d $f2, $f3, $f4
+0x46 0x84 0x18 0x87 # CHECK: cmp.ule.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x87 # CHECK: cmp.ule.d $f2, $f3, $f4
+0x46 0x84 0x18 0x88 # CHECK: cmp.saf.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x88 # CHECK: cmp.saf.d $f2, $f3, $f4
+0x46 0x84 0x18 0x89 # CHECK: cmp.sun.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x89 # CHECK: cmp.sun.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8a # CHECK: cmp.seq.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8a # CHECK: cmp.seq.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8b # CHECK: cmp.sueq.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8b # CHECK: cmp.sueq.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8c # CHECK: cmp.slt.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8c # CHECK: cmp.slt.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8d # CHECK: cmp.sult.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8d # CHECK: cmp.sult.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8e # CHECK: cmp.sle.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8e # CHECK: cmp.sle.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8f # CHECK: cmp.sule.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8f # CHECK: cmp.sule.d $f2, $f3, $f4
+0x00 0x64 0x10 0x9a # CHECK: div $2, $3, $4
+0x00 0x64 0x10 0x9b # CHECK: divu $2, $3, $4
+# 0xf8 0x05 0x01 0x00 # CHECK-TODO: jialc $5, 256
+# 0xd8 0x05 0x01 0x00 # CHECK-TODO: jic $5, 256
+0xec 0x48 0x00 0x43 # CHECK: lwpc $2, 268
+0xec 0x50 0x00 0x43 # CHECK: lwupc $2, 268
+0x00 0x64 0x10 0xda # CHECK: mod $2, $3, $4
+0x00 0x64 0x10 0xdb # CHECK: modu $2, $3, $4
+0x00 0x64 0x10 0x98 # CHECK: mul $2, $3, $4
+0x00 0x64 0x10 0xd8 # CHECK: muh $2, $3, $4
+0x00 0x64 0x10 0x99 # CHECK: mulu $2, $3, $4
+0x00 0x64 0x10 0xd9 # CHECK: muhu $2, $3, $4
+0x46 0x04 0x18 0x98 # CHECK: maddf.s $f2, $f3, $f4
+0x46 0x24 0x18 0x98 # CHECK: maddf.d $f2, $f3, $f4
+0x46 0x04 0x18 0x99 # CHECK: msubf.s $f2, $f3, $f4
+0x46 0x24 0x18 0x99 # CHECK: msubf.d $f2, $f3, $f4
+0x46 0x22 0x08 0x10 # CHECK: sel.d $f0, $f1, $f2
+0x46 0x02 0x08 0x10 # CHECK: sel.s $f0, $f1, $f2
+0x00 0x64 0x10 0x35 # CHECK: seleqz $2, $3, $4
+0x00 0x64 0x10 0x37 # CHECK: selnez $2, $3, $4
+0x46 0x04 0x10 0x1d # CHECK: max.s $f0, $f2, $f4
+0x46 0x24 0x10 0x1d # CHECK: max.d $f0, $f2, $f4
+0x46 0x04 0x10 0x1c # CHECK: min.s $f0, $f2, $f4
+0x46 0x24 0x10 0x1c # CHECK: min.d $f0, $f2, $f4
+0x46 0x04 0x10 0x1f # CHECK: maxa.s $f0, $f2, $f4
+0x46 0x24 0x10 0x1f # CHECK: maxa.d $f0, $f2, $f4
+0x46 0x04 0x10 0x1e # CHECK: mina.s $f0, $f2, $f4
+0x46 0x24 0x10 0x1e # CHECK: mina.d $f0, $f2, $f4
+0x46 0x04 0x10 0x14 # CHECK: seleqz.s $f0, $f2, $f4
+0x46 0x24 0x10 0x14 # CHECK: seleqz.d $f0, $f2, $f4
+0x46 0x04 0x10 0x17 # CHECK: selnez.s $f0, $f2, $f4
+0x46 0x24 0x10 0x17 # CHECK: selnez.d $f0, $f2, $f4
+0x46 0x00 0x20 0x9a # CHECK: rint.s $f2, $f4
+0x46 0x20 0x20 0x9a # CHECK: rint.d $f2, $f4
+0x46 0x00 0x20 0x9b # CHECK: class.s $f2, $f4
+0x46 0x20 0x20 0x9b # CHECK: class.d $f2, $f4
+0x00 0x80 0x04 0x09 # CHECK: jr.hb $4
+0x00 0x80 0xfc 0x09 # CHECK: jalr.hb $4
+0x00 0xa0 0x24 0x09 # CHECK: jalr.hb $4, $5
+0x7e 0x42 0xb3 0xb6 # CHECK: ll $2, -153($18)
+0x7e 0x6f 0xec 0x26 # CHECK: sc $15, -40($19)
+0x00 0xa0 0x58 0x51 # CHECK: clo $11, $5
+0x03 0x80 0xe8 0x50 # CHECK: clz $sp, $gp
diff --git a/test/MC/Disassembler/Mips/mips64r6.txt b/test/MC/Disassembler/Mips/mips64r6.txt
new file mode 100644
index 000000000000..3ddef9ab42ab
--- /dev/null
+++ b/test/MC/Disassembler/Mips/mips64r6.txt
@@ -0,0 +1,145 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips64r6 | FileCheck %s
+
+0xec 0x80 0x00 0x19 # CHECK: addiupc $4, 100
+0x7c 0x43 0x22 0xa0 # CHECK: align $4, $2, $3, 2
+0xec 0x7f 0x00 0x38 # CHECK: aluipc $3, 56
+0x3c 0x62 0xff 0xe9 # CHECK: aui $3, $2, -23
+0xec 0x7e 0xff 0xff # CHECK: auipc $3, -1
+0xe8 0x37 0x96 0xb8 # CHECK: balc 14572256
+0xc8 0x37 0x96 0xb8 # CHECK: bc 14572256
+
+# FIXME: Don't check the immediate on these for the moment, the encode/decode
+# functions are not inverses of eachother.
+# The immediate should be 4 but the disassembler currently emits 8
+0x45 0x20 0x00 0x01 # CHECK: bc1eqz $f0,
+0x45 0x3f 0x00 0x01 # CHECK: bc1eqz $f31,
+0x45 0xa0 0x00 0x01 # CHECK: bc1nez $f0,
+0x45 0xbf 0x00 0x01 # CHECK: bc1nez $f31,
+# FIXME: Don't check the immediate on these for the moment, the encode/decode
+# functions are not inverses of eachother.
+# The immediate should be 8 but the disassembler currently emits 12
+0x49 0x20 0x00 0x02 # CHECK: bc2eqz $0,
+0x49 0x3f 0x00 0x02 # CHECK: bc2eqz $31,
+0x49 0xa0 0x00 0x02 # CHECK: bc2nez $0,
+0x49 0xbf 0x00 0x02 # CHECK: bc2nez $31,
+
+0x20 0xa6 0x00 0x40 # CHECK: beqc $5, $6, 256
+# FIXME: Don't check the immediate on the bcczal's for the moment, the
+# encode/decode functions are not inverses of eachother.
+0x20 0x02 0x01 0x4d # CHECK: beqzalc $2,
+0x60 0xa6 0x00 0x40 # CHECK: bnec $5, $6, 256
+0x60 0x02 0x01 0x4d # CHECK: bnezalc $2,
+0xd8 0xa0 0x46 0x90 # CHECK: beqzc $5, 72256
+0x58 0x43 0x00 0x40 # CHECK: bgec $2, $3, 256
+0x18 0x43 0x00 0x40 # CHECK: bgeuc $2, $3, 256
+0x18 0x42 0x01 0x4d # CHECK: bgezalc $2,
+0xf8 0xa0 0x46 0x90 # CHECK: bnezc $5, 72256
+0x5c 0xa5 0x00 0x40 # CHECK: bltzc $5, 256
+0x58 0xa5 0x00 0x40 # CHECK: bgezc $5, 256
+0x1c 0x02 0x01 0x4d # CHECK: bgtzalc $2,
+0x58 0x05 0x00 0x40 # CHECK: blezc $5, 256
+0x1c 0x42 0x01 0x4d # CHECK: bltzalc $2,
+0x5c 0x05 0x00 0x40 # CHECK: bgtzc $5, 256
+0x7c 0x02 0x20 0x20 # CHECK: bitswap $4, $2
+0x18 0x02 0x01 0x4d # CHECK: blezalc $2,
+0x5c 0xa6 0x00 0x40 # CHECK: bltc $5, $6, 256
+0x1c 0xa6 0x00 0x40 # CHECK: bltuc $5, $6, 256
+0x60 0x00 0x00 0x01 # CHECK: bnvc $zero, $zero, 4
+0x60 0x40 0x00 0x01 # CHECK: bnvc $2, $zero, 4
+0x60 0x82 0x00 0x01 # CHECK: bnvc $4, $2, 4
+0x20 0x00 0x00 0x01 # CHECK: bovc $zero, $zero, 4
+0x20 0x40 0x00 0x01 # CHECK: bovc $2, $zero, 4
+0x20 0x82 0x00 0x01 # CHECK: bovc $4, $2, 4
+0x46 0x84 0x18 0x80 # CHECK: cmp.af.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x80 # CHECK: cmp.af.d $f2, $f3, $f4
+0x46 0x84 0x18 0x81 # CHECK: cmp.un.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x81 # CHECK: cmp.un.d $f2, $f3, $f4
+0x46 0x84 0x18 0x82 # CHECK: cmp.eq.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x82 # CHECK: cmp.eq.d $f2, $f3, $f4
+0x46 0x84 0x18 0x83 # CHECK: cmp.ueq.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x83 # CHECK: cmp.ueq.d $f2, $f3, $f4
+0x46 0x84 0x18 0x84 # CHECK: cmp.lt.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x84 # CHECK: cmp.lt.d $f2, $f3, $f4
+0x46 0x84 0x18 0x85 # CHECK: cmp.ult.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x85 # CHECK: cmp.ult.d $f2, $f3, $f4
+0x46 0x84 0x18 0x86 # CHECK: cmp.le.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x86 # CHECK: cmp.le.d $f2, $f3, $f4
+0x46 0x84 0x18 0x87 # CHECK: cmp.ule.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x87 # CHECK: cmp.ule.d $f2, $f3, $f4
+0x46 0x84 0x18 0x88 # CHECK: cmp.saf.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x88 # CHECK: cmp.saf.d $f2, $f3, $f4
+0x46 0x84 0x18 0x89 # CHECK: cmp.sun.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x89 # CHECK: cmp.sun.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8a # CHECK: cmp.seq.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8a # CHECK: cmp.seq.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8b # CHECK: cmp.sueq.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8b # CHECK: cmp.sueq.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8c # CHECK: cmp.slt.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8c # CHECK: cmp.slt.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8d # CHECK: cmp.sult.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8d # CHECK: cmp.sult.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8e # CHECK: cmp.sle.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8e # CHECK: cmp.sle.d $f2, $f3, $f4
+0x46 0x84 0x18 0x8f # CHECK: cmp.sule.s $f2, $f3, $f4
+0x46 0xa4 0x18 0x8f # CHECK: cmp.sule.d $f2, $f3, $f4
+0x7c 0x43 0x23 0x64 # CHECK: dalign $4, $2, $3, 5
+0x74 0x62 0x12 0x34 # CHECK: daui $3, $2, 4660
+0x04 0x66 0x56 0x78 # CHECK: dahi $3, 22136
+0x04 0x7e 0xab 0xcd # CHECK: dati $3, -21555
+0x7c 0x02 0x20 0x24 # CHECK: dbitswap $4, $2
+0x00 0x64 0x10 0x9a # CHECK: div $2, $3, $4
+0x00 0x64 0x10 0x9b # CHECK: divu $2, $3, $4
+# 0xf8 0x05 0x01 0x00 # CHECK-TODO: jialc $5, 256
+# 0xd8 0x05 0x01 0x00 # CHECK-TODO: jic $5, 256
+0xec 0x48 0x00 0x43 # CHECK: lwpc $2, 268
+0xec 0x50 0x00 0x43 # CHECK: lwupc $2, 268
+0x00 0x64 0x10 0xda # CHECK: mod $2, $3, $4
+0x00 0x64 0x10 0xdb # CHECK: modu $2, $3, $4
+0x00 0x64 0x10 0x9e # CHECK: ddiv $2, $3, $4
+0x00 0x64 0x10 0x9f # CHECK: ddivu $2, $3, $4
+0x00 0x64 0x10 0xde # CHECK: dmod $2, $3, $4
+0x00 0x64 0x10 0xdf # CHECK: dmodu $2, $3, $4
+0x00 0x64 0x10 0x98 # CHECK: mul $2, $3, $4
+0x00 0x64 0x10 0xd8 # CHECK: muh $2, $3, $4
+0x00 0x64 0x10 0x99 # CHECK: mulu $2, $3, $4
+0x00 0x64 0x10 0xd9 # CHECK: muhu $2, $3, $4
+0x00 0x64 0x10 0x9c # CHECK: dmul $2, $3, $4
+0x00 0x64 0x10 0xdc # CHECK: dmuh $2, $3, $4
+0x00 0x64 0x10 0x9d # CHECK: dmulu $2, $3, $4
+0x00 0x64 0x10 0xdd # CHECK: dmuhu $2, $3, $4
+0x46 0x04 0x18 0x98 # CHECK: maddf.s $f2, $f3, $f4
+0x46 0x24 0x18 0x98 # CHECK: maddf.d $f2, $f3, $f4
+0x46 0x04 0x18 0x99 # CHECK: msubf.s $f2, $f3, $f4
+0x46 0x24 0x18 0x99 # CHECK: msubf.d $f2, $f3, $f4
+0x46 0x22 0x08 0x10 # CHECK: sel.d $f0, $f1, $f2
+0x46 0x02 0x08 0x10 # CHECK: sel.s $f0, $f1, $f2
+0x00 0x64 0x10 0x35 # CHECK: seleqz $2, $3, $4
+0x00 0x64 0x10 0x37 # CHECK: selnez $2, $3, $4
+0x46 0x04 0x10 0x1d # CHECK: max.s $f0, $f2, $f4
+0x46 0x24 0x10 0x1d # CHECK: max.d $f0, $f2, $f4
+0x46 0x04 0x10 0x1c # CHECK: min.s $f0, $f2, $f4
+0x46 0x24 0x10 0x1c # CHECK: min.d $f0, $f2, $f4
+0x46 0x04 0x10 0x1f # CHECK: maxa.s $f0, $f2, $f4
+0x46 0x24 0x10 0x1f # CHECK: maxa.d $f0, $f2, $f4
+0x46 0x04 0x10 0x1e # CHECK: mina.s $f0, $f2, $f4
+0x46 0x24 0x10 0x1e # CHECK: mina.d $f0, $f2, $f4
+0x46 0x04 0x10 0x14 # CHECK: seleqz.s $f0, $f2, $f4
+0x46 0x24 0x10 0x14 # CHECK: seleqz.d $f0, $f2, $f4
+0x46 0x04 0x10 0x17 # CHECK: selnez.s $f0, $f2, $f4
+0x46 0x24 0x10 0x17 # CHECK: selnez.d $f0, $f2, $f4
+0x46 0x00 0x20 0x9a # CHECK: rint.s $f2, $f4
+0x46 0x20 0x20 0x9a # CHECK: rint.d $f2, $f4
+0x46 0x00 0x20 0x9b # CHECK: class.s $f2, $f4
+0x46 0x20 0x20 0x9b # CHECK: class.d $f2, $f4
+0xec 0x58 0x3c 0x48 # CHECK: ldpc $2, 123456
+0x00 0x80 0x04 0x09 # CHECK: jr.hb $4
+0x00 0x80 0xfc 0x09 # CHECK: jalr.hb $4
+0x00 0xa0 0x24 0x09 # CHECK: jalr.hb $4, $5
+0x7e 0x42 0xb3 0xb6 # CHECK: ll $2, -153($18)
+0x7f 0xe0 0x38 0x37 # CHECK: lld $zero, 112($ra)
+0x7e 0x6f 0xec 0x26 # CHECK: sc $15, -40($19)
+0x7f 0xaf 0xe6 0xa7 # CHECK: scd $15, -51($sp)
+0x00 0xa0 0x58 0x51 # CHECK: clo $11, $5
+0x03 0x80 0xe8 0x50 # CHECK: clz $sp, $gp
+0x00 0xc0 0x90 0x53 # CHECK: dclo $18, $6
+0x03 0x20 0x80 0x52 # CHECK: dclz $16, $25
diff --git a/test/MC/Disassembler/Mips/msa/test_2r.txt b/test/MC/Disassembler/Mips/msa/test_2r.txt
new file mode 100644
index 000000000000..7faa13c971c4
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_2r.txt
@@ -0,0 +1,17 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x7b 0x00 0x4f 0x9e # CHECK: fill.b $w30, $9
+0x7b 0x01 0xbf 0xde # CHECK: fill.h $w31, $23
+0x7b 0x02 0xc4 0x1e # CHECK: fill.w $w16, $24
+0x7b 0x08 0x05 0x5e # CHECK: nloc.b $w21, $w0
+0x7b 0x09 0xfc 0x9e # CHECK: nloc.h $w18, $w31
+0x7b 0x0a 0xb8 0x9e # CHECK: nloc.w $w2, $w23
+0x7b 0x0b 0x51 0x1e # CHECK: nloc.d $w4, $w10
+0x7b 0x0c 0x17 0xde # CHECK: nlzc.b $w31, $w2
+0x7b 0x0d 0xb6 0xde # CHECK: nlzc.h $w27, $w22
+0x7b 0x0e 0xea 0x9e # CHECK: nlzc.w $w10, $w29
+0x7b 0x0f 0x4e 0x5e # CHECK: nlzc.d $w25, $w9
+0x7b 0x04 0x95 0x1e # CHECK: pcnt.b $w20, $w18
+0x7b 0x05 0x40 0x1e # CHECK: pcnt.h $w0, $w8
+0x7b 0x06 0x4d 0xde # CHECK: pcnt.w $w23, $w9
+0x7b 0x07 0xc5 0x5e # CHECK: pcnt.d $w21, $w24
diff --git a/test/MC/Disassembler/Mips/msa/test_2r_msa64.txt b/test/MC/Disassembler/Mips/msa/test_2r_msa64.txt
new file mode 100644
index 000000000000..f212390ebc78
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_2r_msa64.txt
@@ -0,0 +1,3 @@
+# RUN: llvm-mc --disassemble %s -triple=mips64-unknown-linux -mcpu=mips64r2 -mattr=+msa | FileCheck %s
+
+0x7b 0x03 0x4e 0xde # CHECK: fill.d $w27, $9
diff --git a/test/MC/Disassembler/Mips/msa/test_2rf.txt b/test/MC/Disassembler/Mips/msa/test_2rf.txt
new file mode 100644
index 000000000000..e004f11b9461
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_2rf.txt
@@ -0,0 +1,34 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x7b 0x20 0x66 0x9e # CHECK: fclass.w $w26, $w12
+0x7b 0x21 0x8e 0x1e # CHECK: fclass.d $w24, $w17
+0x7b 0x30 0x02 0x1e # CHECK: fexupl.w $w8, $w0
+0x7b 0x31 0xec 0x5e # CHECK: fexupl.d $w17, $w29
+0x7b 0x32 0x23 0x5e # CHECK: fexupr.w $w13, $w4
+0x7b 0x33 0x11 0x5e # CHECK: fexupr.d $w5, $w2
+0x7b 0x3c 0xed 0x1e # CHECK: ffint_s.w $w20, $w29
+0x7b 0x3d 0x7b 0x1e # CHECK: ffint_s.d $w12, $w15
+0x7b 0x3e 0xd9 0xde # CHECK: ffint_u.w $w7, $w27
+0x7b 0x3f 0x84 0xde # CHECK: ffint_u.d $w19, $w16
+0x7b 0x34 0x6f 0xde # CHECK: ffql.w $w31, $w13
+0x7b 0x35 0x6b 0x1e # CHECK: ffql.d $w12, $w13
+0x7b 0x36 0xf6 0xde # CHECK: ffqr.w $w27, $w30
+0x7b 0x37 0x7f 0x9e # CHECK: ffqr.d $w30, $w15
+0x7b 0x2e 0xfe 0x5e # CHECK: flog2.w $w25, $w31
+0x7b 0x2f 0x54 0x9e # CHECK: flog2.d $w18, $w10
+0x7b 0x2c 0x79 0xde # CHECK: frint.w $w7, $w15
+0x7b 0x2d 0xb5 0x5e # CHECK: frint.d $w21, $w22
+0x7b 0x2a 0x04 0xde # CHECK: frcp.w $w19, $w0
+0x7b 0x2b 0x71 0x1e # CHECK: frcp.d $w4, $w14
+0x7b 0x28 0x8b 0x1e # CHECK: frsqrt.w $w12, $w17
+0x7b 0x29 0x5d 0xde # CHECK: frsqrt.d $w23, $w11
+0x7b 0x26 0x58 0x1e # CHECK: fsqrt.w $w0, $w11
+0x7b 0x27 0x63 0xde # CHECK: fsqrt.d $w15, $w12
+0x7b 0x38 0x2f 0x9e # CHECK: ftint_s.w $w30, $w5
+0x7b 0x39 0xb9 0x5e # CHECK: ftint_s.d $w5, $w23
+0x7b 0x3a 0x75 0x1e # CHECK: ftint_u.w $w20, $w14
+0x7b 0x3b 0xad 0xde # CHECK: ftint_u.d $w23, $w21
+0x7b 0x22 0x8f 0x5e # CHECK: ftrunc_s.w $w29, $w17
+0x7b 0x23 0xdb 0x1e # CHECK: ftrunc_s.d $w12, $w27
+0x7b 0x24 0x7c 0x5e # CHECK: ftrunc_u.w $w17, $w15
+0x7b 0x25 0xd9 0x5e # CHECK: ftrunc_u.d $w5, $w27
diff --git a/test/MC/Disassembler/Mips/msa/test_3r.txt b/test/MC/Disassembler/Mips/msa/test_3r.txt
new file mode 100644
index 000000000000..2ef3a8927e8e
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_3r.txt
@@ -0,0 +1,244 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x78 0x04 0x4e 0x90 # CHECK: add_a.b $w26, $w9, $w4
+0x78 0x3f 0xdd 0xd0 # CHECK: add_a.h $w23, $w27, $w31
+0x78 0x56 0x32 0xd0 # CHECK: add_a.w $w11, $w6, $w22
+0x78 0x60 0x51 0x90 # CHECK: add_a.d $w6, $w10, $w0
+0x78 0x93 0xc4 0xd0 # CHECK: adds_a.b $w19, $w24, $w19
+0x78 0xa4 0x36 0x50 # CHECK: adds_a.h $w25, $w6, $w4
+0x78 0xdb 0x8e 0x50 # CHECK: adds_a.w $w25, $w17, $w27
+0x78 0xfa 0x93 0xd0 # CHECK: adds_a.d $w15, $w18, $w26
+0x79 0x13 0x5f 0x50 # CHECK: adds_s.b $w29, $w11, $w19
+0x79 0x3a 0xb9 0x50 # CHECK: adds_s.h $w5, $w23, $w26
+0x79 0x4d 0x74 0x10 # CHECK: adds_s.w $w16, $w14, $w13
+0x79 0x7c 0x70 0x90 # CHECK: adds_s.d $w2, $w14, $w28
+0x79 0x8e 0x88 0xd0 # CHECK: adds_u.b $w3, $w17, $w14
+0x79 0xa4 0xf2 0x90 # CHECK: adds_u.h $w10, $w30, $w4
+0x79 0xd4 0x93 0xd0 # CHECK: adds_u.w $w15, $w18, $w20
+0x79 0xe9 0x57 0x90 # CHECK: adds_u.d $w30, $w10, $w9
+0x78 0x15 0xa6 0x0e # CHECK: addv.b $w24, $w20, $w21
+0x78 0x3b 0x69 0x0e # CHECK: addv.h $w4, $w13, $w27
+0x78 0x4e 0x5c 0xce # CHECK: addv.w $w19, $w11, $w14
+0x78 0x7f 0xa8 0x8e # CHECK: addv.d $w2, $w21, $w31
+0x7a 0x03 0x85 0xd1 # CHECK: asub_s.b $w23, $w16, $w3
+0x7a 0x39 0x8d 0x91 # CHECK: asub_s.h $w22, $w17, $w25
+0x7a 0x49 0x0e 0x11 # CHECK: asub_s.w $w24, $w1, $w9
+0x7a 0x6c 0x63 0x51 # CHECK: asub_s.d $w13, $w12, $w12
+0x7a 0x8b 0xea 0x91 # CHECK: asub_u.b $w10, $w29, $w11
+0x7a 0xaf 0x4c 0x91 # CHECK: asub_u.h $w18, $w9, $w15
+0x7a 0xdf 0x9a 0x91 # CHECK: asub_u.w $w10, $w19, $w31
+0x7a 0xe0 0x54 0x51 # CHECK: asub_u.d $w17, $w10, $w0
+0x7a 0x01 0x28 0x90 # CHECK: ave_s.b $w2, $w5, $w1
+0x7a 0x29 0x9c 0x10 # CHECK: ave_s.h $w16, $w19, $w9
+0x7a 0x45 0xfc 0x50 # CHECK: ave_s.w $w17, $w31, $w5
+0x7a 0x6a 0xce 0xd0 # CHECK: ave_s.d $w27, $w25, $w10
+0x7a 0x89 0x9c 0x10 # CHECK: ave_u.b $w16, $w19, $w9
+0x7a 0xab 0xe7 0x10 # CHECK: ave_u.h $w28, $w28, $w11
+0x7a 0xcb 0x62 0xd0 # CHECK: ave_u.w $w11, $w12, $w11
+0x7a 0xfc 0x9f 0x90 # CHECK: ave_u.d $w30, $w19, $w28
+0x7b 0x02 0x86 0x90 # CHECK: aver_s.b $w26, $w16, $w2
+0x7b 0x3b 0xdf 0xd0 # CHECK: aver_s.h $w31, $w27, $w27
+0x7b 0x59 0x97 0x10 # CHECK: aver_s.w $w28, $w18, $w25
+0x7b 0x7b 0xaf 0x50 # CHECK: aver_s.d $w29, $w21, $w27
+0x7b 0x83 0xd7 0x50 # CHECK: aver_u.b $w29, $w26, $w3
+0x7b 0xa9 0x94 0x90 # CHECK: aver_u.h $w18, $w18, $w9
+0x7b 0xdd 0xcc 0x50 # CHECK: aver_u.w $w17, $w25, $w29
+0x7b 0xf3 0xb5 0x90 # CHECK: aver_u.d $w22, $w22, $w19
+0x79 0x9d 0x78 0x8d # CHECK: bclr.b $w2, $w15, $w29
+0x79 0xbc 0xac 0x0d # CHECK: bclr.h $w16, $w21, $w28
+0x79 0xc9 0x14 0xcd # CHECK: bclr.w $w19, $w2, $w9
+0x79 0xe4 0xfe 0xcd # CHECK: bclr.d $w27, $w31, $w4
+0x7b 0x18 0x81 0x4d # CHECK: binsl.b $w5, $w16, $w24
+0x7b 0x2a 0x2f 0x8d # CHECK: binsl.h $w30, $w5, $w10
+0x7b 0x4d 0x7b 0x8d # CHECK: binsl.w $w14, $w15, $w13
+0x7b 0x6c 0xa5 0xcd # CHECK: binsl.d $w23, $w20, $w12
+0x7b 0x82 0x5d 0x8d # CHECK: binsr.b $w22, $w11, $w2
+0x7b 0xa6 0xd0 0x0d # CHECK: binsr.h $w0, $w26, $w6
+0x7b 0xdc 0x1e 0x8d # CHECK: binsr.w $w26, $w3, $w28
+0x7b 0xf5 0x00 0x0d # CHECK: binsr.d $w0, $w0, $w21
+0x7a 0x98 0x58 0x0d # CHECK: bneg.b $w0, $w11, $w24
+0x7a 0xa4 0x87 0x0d # CHECK: bneg.h $w28, $w16, $w4
+0x7a 0xd3 0xd0 0xcd # CHECK: bneg.w $w3, $w26, $w19
+0x7a 0xef 0xeb 0x4d # CHECK: bneg.d $w13, $w29, $w15
+0x7a 0x1f 0x2f 0xcd # CHECK: bset.b $w31, $w5, $w31
+0x7a 0x26 0x63 0x8d # CHECK: bset.h $w14, $w12, $w6
+0x7a 0x4c 0x4f 0xcd # CHECK: bset.w $w31, $w9, $w12
+0x7a 0x65 0xb1 0x4d # CHECK: bset.d $w5, $w22, $w5
+0x78 0x12 0xff 0xcf # CHECK: ceq.b $w31, $w31, $w18
+0x78 0x29 0xda 0x8f # CHECK: ceq.h $w10, $w27, $w9
+0x78 0x4e 0x2a 0x4f # CHECK: ceq.w $w9, $w5, $w14
+0x78 0x60 0x89 0x4f # CHECK: ceq.d $w5, $w17, $w0
+0x7a 0x09 0x25 0xcf # CHECK: cle_s.b $w23, $w4, $w9
+0x7a 0x33 0xdd 0x8f # CHECK: cle_s.h $w22, $w27, $w19
+0x7a 0x4a 0xd7 0x8f # CHECK: cle_s.w $w30, $w26, $w10
+0x7a 0x6a 0x2c 0x8f # CHECK: cle_s.d $w18, $w5, $w10
+0x7a 0x80 0xc8 0x4f # CHECK: cle_u.b $w1, $w25, $w0
+0x7a 0xbd 0x01 0xcf # CHECK: cle_u.h $w7, $w0, $w29
+0x7a 0xc1 0x96 0x4f # CHECK: cle_u.w $w25, $w18, $w1
+0x7a 0xfe 0x01 0x8f # CHECK: cle_u.d $w6, $w0, $w30
+0x79 0x15 0x16 0x4f # CHECK: clt_s.b $w25, $w2, $w21
+0x79 0x29 0x98 0x8f # CHECK: clt_s.h $w2, $w19, $w9
+0x79 0x50 0x45 0xcf # CHECK: clt_s.w $w23, $w8, $w16
+0x79 0x6c 0xf1 0xcf # CHECK: clt_s.d $w7, $w30, $w12
+0x79 0x8d 0xf8 0x8f # CHECK: clt_u.b $w2, $w31, $w13
+0x79 0xb7 0xfc 0x0f # CHECK: clt_u.h $w16, $w31, $w23
+0x79 0xc9 0xc0 0xcf # CHECK: clt_u.w $w3, $w24, $w9
+0x79 0xe1 0x01 0xcf # CHECK: clt_u.d $w7, $w0, $w1
+0x7a 0x12 0x1f 0x52 # CHECK: div_s.b $w29, $w3, $w18
+0x7a 0x2d 0x84 0x52 # CHECK: div_s.h $w17, $w16, $w13
+0x7a 0x5e 0xc9 0x12 # CHECK: div_s.w $w4, $w25, $w30
+0x7a 0x74 0x4f 0xd2 # CHECK: div_s.d $w31, $w9, $w20
+0x7a 0x8a 0xe9 0x92 # CHECK: div_u.b $w6, $w29, $w10
+0x7a 0xae 0xae 0x12 # CHECK: div_u.h $w24, $w21, $w14
+0x7a 0xd9 0x77 0x52 # CHECK: div_u.w $w29, $w14, $w25
+0x7a 0xf5 0x0f 0xd2 # CHECK: div_u.d $w31, $w1, $w21
+0x78 0x39 0xb5 0xd3 # CHECK: dotp_s.h $w23, $w22, $w25
+0x78 0x45 0x75 0x13 # CHECK: dotp_s.w $w20, $w14, $w5
+0x78 0x76 0x14 0x53 # CHECK: dotp_s.d $w17, $w2, $w22
+0x78 0xa6 0x13 0x53 # CHECK: dotp_u.h $w13, $w2, $w6
+0x78 0xd5 0xb3 0xd3 # CHECK: dotp_u.w $w15, $w22, $w21
+0x78 0xfa 0x81 0x13 # CHECK: dotp_u.d $w4, $w16, $w26
+0x79 0x36 0xe0 0x53 # CHECK: dpadd_s.h $w1, $w28, $w22
+0x79 0x4c 0x0a 0x93 # CHECK: dpadd_s.w $w10, $w1, $w12
+0x79 0x7b 0xa8 0xd3 # CHECK: dpadd_s.d $w3, $w21, $w27
+0x79 0xb4 0x2c 0x53 # CHECK: dpadd_u.h $w17, $w5, $w20
+0x79 0xd0 0x46 0x13 # CHECK: dpadd_u.w $w24, $w8, $w16
+0x79 0xf0 0xeb 0xd3 # CHECK: dpadd_u.d $w15, $w29, $w16
+0x7a 0x2c 0x59 0x13 # CHECK: dpsub_s.h $w4, $w11, $w12
+0x7a 0x46 0x39 0x13 # CHECK: dpsub_s.w $w4, $w7, $w6
+0x7a 0x7c 0x67 0xd3 # CHECK: dpsub_s.d $w31, $w12, $w28
+0x7a 0xb1 0xc9 0x13 # CHECK: dpsub_u.h $w4, $w25, $w17
+0x7a 0xd0 0xcc 0xd3 # CHECK: dpsub_u.w $w19, $w25, $w16
+0x7a 0xfa 0x51 0xd3 # CHECK: dpsub_u.d $w7, $w10, $w26
+0x7a 0x22 0xc7 0x15 # CHECK: hadd_s.h $w28, $w24, $w2
+0x7a 0x4b 0x8e 0x15 # CHECK: hadd_s.w $w24, $w17, $w11
+0x7a 0x74 0x7c 0x55 # CHECK: hadd_s.d $w17, $w15, $w20
+0x7a 0xb1 0xeb 0x15 # CHECK: hadd_u.h $w12, $w29, $w17
+0x7a 0xc6 0x2a 0x55 # CHECK: hadd_u.w $w9, $w5, $w6
+0x7a 0xe6 0xa0 0x55 # CHECK: hadd_u.d $w1, $w20, $w6
+0x7b 0x3d 0x74 0x15 # CHECK: hsub_s.h $w16, $w14, $w29
+0x7b 0x4b 0x6a 0x55 # CHECK: hsub_s.w $w9, $w13, $w11
+0x7b 0x6e 0x97 0x95 # CHECK: hsub_s.d $w30, $w18, $w14
+0x7b 0xae 0x61 0xd5 # CHECK: hsub_u.h $w7, $w12, $w14
+0x7b 0xc5 0x2d 0x55 # CHECK: hsub_u.w $w21, $w5, $w5
+0x7b 0xff 0x62 0xd5 # CHECK: hsub_u.d $w11, $w12, $w31
+0x7b 0x1e 0x84 0x94 # CHECK: ilvev.b $w18, $w16, $w30
+0x7b 0x2d 0x03 0x94 # CHECK: ilvev.h $w14, $w0, $w13
+0x7b 0x56 0xcb 0x14 # CHECK: ilvev.w $w12, $w25, $w22
+0x7b 0x63 0xdf 0x94 # CHECK: ilvev.d $w30, $w27, $w3
+0x7a 0x15 0x1f 0x54 # CHECK: ilvl.b $w29, $w3, $w21
+0x7a 0x31 0x56 0xd4 # CHECK: ilvl.h $w27, $w10, $w17
+0x7a 0x40 0x09 0x94 # CHECK: ilvl.w $w6, $w1, $w0
+0x7a 0x78 0x80 0xd4 # CHECK: ilvl.d $w3, $w16, $w24
+0x7b 0x94 0x2a 0xd4 # CHECK: ilvod.b $w11, $w5, $w20
+0x7b 0xbf 0x6c 0x94 # CHECK: ilvod.h $w18, $w13, $w31
+0x7b 0xd8 0x87 0x54 # CHECK: ilvod.w $w29, $w16, $w24
+0x7b 0xfd 0x65 0x94 # CHECK: ilvod.d $w22, $w12, $w29
+0x7a 0x86 0xf1 0x14 # CHECK: ilvr.b $w4, $w30, $w6
+0x7a 0xbd 0x9f 0x14 # CHECK: ilvr.h $w28, $w19, $w29
+0x7a 0xd5 0xa4 0x94 # CHECK: ilvr.w $w18, $w20, $w21
+0x7a 0xec 0xf5 0xd4 # CHECK: ilvr.d $w23, $w30, $w12
+0x78 0x9d 0xfc 0x52 # CHECK: maddv.b $w17, $w31, $w29
+0x78 0xa9 0xc1 0xd2 # CHECK: maddv.h $w7, $w24, $w9
+0x78 0xd4 0xb5 0x92 # CHECK: maddv.w $w22, $w22, $w20
+0x78 0xf4 0xd7 0x92 # CHECK: maddv.d $w30, $w26, $w20
+0x7b 0x17 0x5d 0xce # CHECK: max_a.b $w23, $w11, $w23
+0x7b 0x3e 0x2d 0x0e # CHECK: max_a.h $w20, $w5, $w30
+0x7b 0x5e 0x91 0xce # CHECK: max_a.w $w7, $w18, $w30
+0x7b 0x7f 0x42 0x0e # CHECK: max_a.d $w8, $w8, $w31
+0x79 0x13 0x0a 0x8e # CHECK: max_s.b $w10, $w1, $w19
+0x79 0x31 0xeb 0xce # CHECK: max_s.h $w15, $w29, $w17
+0x79 0x4e 0xeb 0xce # CHECK: max_s.w $w15, $w29, $w14
+0x79 0x63 0xc6 0x4e # CHECK: max_s.d $w25, $w24, $w3
+0x79 0x85 0xc3 0x0e # CHECK: max_u.b $w12, $w24, $w5
+0x79 0xa7 0x31 0x4e # CHECK: max_u.h $w5, $w6, $w7
+0x79 0xc7 0x24 0x0e # CHECK: max_u.w $w16, $w4, $w7
+0x79 0xf8 0x66 0x8e # CHECK: max_u.d $w26, $w12, $w24
+0x7b 0x81 0xd1 0x0e # CHECK: min_a.b $w4, $w26, $w1
+0x7b 0xbf 0x6b 0x0e # CHECK: min_a.h $w12, $w13, $w31
+0x7b 0xc0 0xa7 0x0e # CHECK: min_a.w $w28, $w20, $w0
+0x7b 0xf3 0xa3 0x0e # CHECK: min_a.d $w12, $w20, $w19
+0x7a 0x0e 0x1c 0xce # CHECK: min_s.b $w19, $w3, $w14
+0x7a 0x28 0xae 0xce # CHECK: min_s.h $w27, $w21, $w8
+0x7a 0x5e 0x70 0x0e # CHECK: min_s.w $w0, $w14, $w30
+0x7a 0x75 0x41 0x8e # CHECK: min_s.d $w6, $w8, $w21
+0x7a 0x88 0xd5 0x8e # CHECK: min_u.b $w22, $w26, $w8
+0x7a 0xac 0xd9 0xce # CHECK: min_u.h $w7, $w27, $w12
+0x7a 0xce 0xa2 0x0e # CHECK: min_u.w $w8, $w20, $w14
+0x7a 0xef 0x76 0x8e # CHECK: min_u.d $w26, $w14, $w15
+0x7b 0x1a 0x0c 0x92 # CHECK: mod_s.b $w18, $w1, $w26
+0x7b 0x3c 0xf7 0xd2 # CHECK: mod_s.h $w31, $w30, $w28
+0x7b 0x4d 0x30 0x92 # CHECK: mod_s.w $w2, $w6, $w13
+0x7b 0x76 0xdd 0x52 # CHECK: mod_s.d $w21, $w27, $w22
+0x7b 0x8d 0x3c 0x12 # CHECK: mod_u.b $w16, $w7, $w13
+0x7b 0xa7 0x46 0x12 # CHECK: mod_u.h $w24, $w8, $w7
+0x7b 0xd1 0x17 0x92 # CHECK: mod_u.w $w30, $w2, $w17
+0x7b 0xf9 0x17 0xd2 # CHECK: mod_u.d $w31, $w2, $w25
+0x79 0x0c 0x2b 0x92 # CHECK: msubv.b $w14, $w5, $w12
+0x79 0x3e 0x39 0x92 # CHECK: msubv.h $w6, $w7, $w30
+0x79 0x55 0x13 0x52 # CHECK: msubv.w $w13, $w2, $w21
+0x79 0x7b 0x74 0x12 # CHECK: msubv.d $w16, $w14, $w27
+0x78 0x0d 0x1d 0x12 # CHECK: mulv.b $w20, $w3, $w13
+0x78 0x2e 0xd6 0xd2 # CHECK: mulv.h $w27, $w26, $w14
+0x78 0x43 0xea 0x92 # CHECK: mulv.w $w10, $w29, $w3
+0x78 0x7d 0x99 0xd2 # CHECK: mulv.d $w7, $w19, $w29
+0x79 0x07 0xd9 0x54 # CHECK: pckev.b $w5, $w27, $w7
+0x79 0x3b 0x20 0x54 # CHECK: pckev.h $w1, $w4, $w27
+0x79 0x40 0xa7 0x94 # CHECK: pckev.w $w30, $w20, $w0
+0x79 0x6f 0x09 0x94 # CHECK: pckev.d $w6, $w1, $w15
+0x79 0x9e 0xe4 0x94 # CHECK: pckod.b $w18, $w28, $w30
+0x79 0xa8 0x2e 0x94 # CHECK: pckod.h $w26, $w5, $w8
+0x79 0xc2 0x22 0x54 # CHECK: pckod.w $w9, $w4, $w2
+0x79 0xf4 0xb7 0x94 # CHECK: pckod.d $w30, $w22, $w20
+0x78 0x0c 0xb9 0x54 # CHECK: sld.b $w5, $w23[$12]
+0x78 0x23 0xb8 0x54 # CHECK: sld.h $w1, $w23[$3]
+0x78 0x49 0x45 0x14 # CHECK: sld.w $w20, $w8[$9]
+0x78 0x7e 0xb9 0xd4 # CHECK: sld.d $w7, $w23[$fp]
+0x78 0x11 0x00 0xcd # CHECK: sll.b $w3, $w0, $w17
+0x78 0x23 0xdc 0x4d # CHECK: sll.h $w17, $w27, $w3
+0x78 0x46 0x3c 0x0d # CHECK: sll.w $w16, $w7, $w6
+0x78 0x7a 0x02 0x4d # CHECK: sll.d $w9, $w0, $w26
+0x78 0x81 0x0f 0x14 # CHECK: splat.b $w28, $w1[$1]
+0x78 0xab 0x58 0x94 # CHECK: splat.h $w2, $w11[$11]
+0x78 0xcb 0x05 0x94 # CHECK: splat.w $w22, $w0[$11]
+0x78 0xe2 0x00 0x14 # CHECK: splat.d $w0, $w0[$2]
+0x78 0x91 0x27 0x0d # CHECK: sra.b $w28, $w4, $w17
+0x78 0xa3 0x4b 0x4d # CHECK: sra.h $w13, $w9, $w3
+0x78 0xd3 0xae 0xcd # CHECK: sra.w $w27, $w21, $w19
+0x78 0xf7 0x47 0x8d # CHECK: sra.d $w30, $w8, $w23
+0x78 0x92 0x94 0xd5 # CHECK: srar.b $w19, $w18, $w18
+0x78 0xa8 0xb9 0xd5 # CHECK: srar.h $w7, $w23, $w8
+0x78 0xc2 0x60 0x55 # CHECK: srar.w $w1, $w12, $w2
+0x78 0xee 0x3d 0x55 # CHECK: srar.d $w21, $w7, $w14
+0x79 0x13 0x1b 0x0d # CHECK: srl.b $w12, $w3, $w19
+0x79 0x34 0xfd 0xcd # CHECK: srl.h $w23, $w31, $w20
+0x79 0x4b 0xdc 0x8d # CHECK: srl.w $w18, $w27, $w11
+0x79 0x7a 0x60 0xcd # CHECK: srl.d $w3, $w12, $w26
+0x79 0x0b 0xab 0xd5 # CHECK: srlr.b $w15, $w21, $w11
+0x79 0x33 0x6d 0x55 # CHECK: srlr.h $w21, $w13, $w19
+0x79 0x43 0xf1 0x95 # CHECK: srlr.w $w6, $w30, $w3
+0x79 0x6e 0x10 0x55 # CHECK: srlr.d $w1, $w2, $w14
+0x78 0x01 0x7e 0x51 # CHECK: subs_s.b $w25, $w15, $w1
+0x78 0x36 0xcf 0x11 # CHECK: subs_s.h $w28, $w25, $w22
+0x78 0x55 0x62 0x91 # CHECK: subs_s.w $w10, $w12, $w21
+0x78 0x72 0xa1 0x11 # CHECK: subs_s.d $w4, $w20, $w18
+0x78 0x99 0x35 0x51 # CHECK: subs_u.b $w21, $w6, $w25
+0x78 0xa7 0x50 0xd1 # CHECK: subs_u.h $w3, $w10, $w7
+0x78 0xca 0x7a 0x51 # CHECK: subs_u.w $w9, $w15, $w10
+0x78 0xea 0x99 0xd1 # CHECK: subs_u.d $w7, $w19, $w10
+0x79 0x0c 0x39 0x91 # CHECK: subsus_u.b $w6, $w7, $w12
+0x79 0x33 0xe9 0x91 # CHECK: subsus_u.h $w6, $w29, $w19
+0x79 0x47 0x79 0xd1 # CHECK: subsus_u.w $w7, $w15, $w7
+0x79 0x6f 0x1a 0x51 # CHECK: subsus_u.d $w9, $w3, $w15
+0x79 0x9f 0x1d 0x91 # CHECK: subsuu_s.b $w22, $w3, $w31
+0x79 0xb6 0xbc 0xd1 # CHECK: subsuu_s.h $w19, $w23, $w22
+0x79 0xcd 0x52 0x51 # CHECK: subsuu_s.w $w9, $w10, $w13
+0x79 0xe0 0x31 0x51 # CHECK: subsuu_s.d $w5, $w6, $w0
+0x78 0x93 0x69 0x8e # CHECK: subv.b $w6, $w13, $w19
+0x78 0xac 0xc9 0x0e # CHECK: subv.h $w4, $w25, $w12
+0x78 0xcb 0xde 0xce # CHECK: subv.w $w27, $w27, $w11
+0x78 0xea 0xc2 0x4e # CHECK: subv.d $w9, $w24, $w10
+0x78 0x05 0x80 0xd5 # CHECK: vshf.b $w3, $w16, $w5
+0x78 0x28 0x9d 0x15 # CHECK: vshf.h $w20, $w19, $w8
+0x78 0x59 0xf4 0x15 # CHECK: vshf.w $w16, $w30, $w25
+0x78 0x6f 0x5c 0xd5 # CHECK: vshf.d $w19, $w11, $w15
diff --git a/test/MC/Disassembler/Mips/msa/test_3rf.txt b/test/MC/Disassembler/Mips/msa/test_3rf.txt
new file mode 100644
index 000000000000..3b7b07ce2e8e
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_3rf.txt
@@ -0,0 +1,84 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x78 0x1c 0x9f 0x1b # CHECK: fadd.w $w28, $w19, $w28
+0x78 0x3d 0x13 0x5b # CHECK: fadd.d $w13, $w2, $w29
+0x78 0x19 0x5b 0x9a # CHECK: fcaf.w $w14, $w11, $w25
+0x78 0x33 0x08 0x5a # CHECK: fcaf.d $w1, $w1, $w19
+0x78 0x90 0xb8 0x5a # CHECK: fceq.w $w1, $w23, $w16
+0x78 0xb0 0x40 0x1a # CHECK: fceq.d $w0, $w8, $w16
+0x79 0x98 0x4c 0x1a # CHECK: fcle.w $w16, $w9, $w24
+0x79 0xa1 0x76 0xda # CHECK: fcle.d $w27, $w14, $w1
+0x79 0x08 0x47 0x1a # CHECK: fclt.w $w28, $w8, $w8
+0x79 0x2b 0xcf 0x9a # CHECK: fclt.d $w30, $w25, $w11
+0x78 0xd7 0x90 0x9c # CHECK: fcne.w $w2, $w18, $w23
+0x78 0xef 0xa3 0x9c # CHECK: fcne.d $w14, $w20, $w15
+0x78 0x59 0x92 0x9c # CHECK: fcor.w $w10, $w18, $w25
+0x78 0x6b 0xcc 0x5c # CHECK: fcor.d $w17, $w25, $w11
+0x78 0xd5 0x13 0x9a # CHECK: fcueq.w $w14, $w2, $w21
+0x78 0xe7 0x1f 0x5a # CHECK: fcueq.d $w29, $w3, $w7
+0x79 0xc3 0x2c 0x5a # CHECK: fcule.w $w17, $w5, $w3
+0x79 0xfe 0x0f 0xda # CHECK: fcule.d $w31, $w1, $w30
+0x79 0x49 0xc9 0x9a # CHECK: fcult.w $w6, $w25, $w9
+0x79 0x71 0x46 0xda # CHECK: fcult.d $w27, $w8, $w17
+0x78 0x48 0xa1 0x1a # CHECK: fcun.w $w4, $w20, $w8
+0x78 0x63 0x5f 0x5a # CHECK: fcun.d $w29, $w11, $w3
+0x78 0x93 0x93 0x5c # CHECK: fcune.w $w13, $w18, $w19
+0x78 0xb5 0xd4 0x1c # CHECK: fcune.d $w16, $w26, $w21
+0x78 0xc2 0xc3 0x5b # CHECK: fdiv.w $w13, $w24, $w2
+0x78 0xf9 0x24 0xdb # CHECK: fdiv.d $w19, $w4, $w25
+0x7a 0x10 0x02 0x1b # CHECK: fexdo.h $w8, $w0, $w16
+0x7a 0x3b 0x68 0x1b # CHECK: fexdo.w $w0, $w13, $w27
+0x79 0xc3 0x04 0x5b # CHECK: fexp2.w $w17, $w0, $w3
+0x79 0xea 0x05 0x9b # CHECK: fexp2.d $w22, $w0, $w10
+0x79 0x17 0x37 0x5b # CHECK: fmadd.w $w29, $w6, $w23
+0x79 0x35 0xe2 0xdb # CHECK: fmadd.d $w11, $w28, $w21
+0x7b 0x8d 0xb8 0x1b # CHECK: fmax.w $w0, $w23, $w13
+0x7b 0xa8 0x96 0x9b # CHECK: fmax.d $w26, $w18, $w8
+0x7b 0xca 0x82 0x9b # CHECK: fmax_a.w $w10, $w16, $w10
+0x7b 0xf6 0x4f 0x9b # CHECK: fmax_a.d $w30, $w9, $w22
+0x7b 0x1e 0x0e 0x1b # CHECK: fmin.w $w24, $w1, $w30
+0x7b 0x2a 0xde 0xdb # CHECK: fmin.d $w27, $w27, $w10
+0x7b 0x54 0xea 0x9b # CHECK: fmin_a.w $w10, $w29, $w20
+0x7b 0x78 0xf3 0x5b # CHECK: fmin_a.d $w13, $w30, $w24
+0x79 0x40 0xcc 0x5b # CHECK: fmsub.w $w17, $w25, $w0
+0x79 0x70 0x92 0x1b # CHECK: fmsub.d $w8, $w18, $w16
+0x78 0x8f 0x78 0xdb # CHECK: fmul.w $w3, $w15, $w15
+0x78 0xaa 0xf2 0x5b # CHECK: fmul.d $w9, $w30, $w10
+0x7a 0x0a 0x2e 0x5a # CHECK: fsaf.w $w25, $w5, $w10
+0x7a 0x3d 0x1e 0x5a # CHECK: fsaf.d $w25, $w3, $w29
+0x7a 0x8d 0x8a 0xda # CHECK: fseq.w $w11, $w17, $w13
+0x7a 0xbf 0x07 0x5a # CHECK: fseq.d $w29, $w0, $w31
+0x7b 0x9f 0xff 0x9a # CHECK: fsle.w $w30, $w31, $w31
+0x7b 0xb8 0xbc 0x9a # CHECK: fsle.d $w18, $w23, $w24
+0x7b 0x06 0x2b 0x1a # CHECK: fslt.w $w12, $w5, $w6
+0x7b 0x35 0xd4 0x1a # CHECK: fslt.d $w16, $w26, $w21
+0x7a 0xcc 0x0f 0x9c # CHECK: fsne.w $w30, $w1, $w12
+0x7a 0xf7 0x6b 0x9c # CHECK: fsne.d $w14, $w13, $w23
+0x7a 0x5b 0x6e 0xdc # CHECK: fsor.w $w27, $w13, $w27
+0x7a 0x6b 0xc3 0x1c # CHECK: fsor.d $w12, $w24, $w11
+0x78 0x41 0xd7 0xdb # CHECK: fsub.w $w31, $w26, $w1
+0x78 0x7b 0x8c 0xdb # CHECK: fsub.d $w19, $w17, $w27
+0x7a 0xd9 0xc4 0x1a # CHECK: fsueq.w $w16, $w24, $w25
+0x7a 0xee 0x74 0x9a # CHECK: fsueq.d $w18, $w14, $w14
+0x7b 0xcd 0xf5 0xda # CHECK: fsule.w $w23, $w30, $w13
+0x7b 0xfa 0x58 0x9a # CHECK: fsule.d $w2, $w11, $w26
+0x7b 0x56 0xd2 0xda # CHECK: fsult.w $w11, $w26, $w22
+0x7b 0x7e 0xb9 0x9a # CHECK: fsult.d $w6, $w23, $w30
+0x7a 0x5c 0x90 0xda # CHECK: fsun.w $w3, $w18, $w28
+0x7a 0x73 0x5c 0x9a # CHECK: fsun.d $w18, $w11, $w19
+0x7a 0x82 0xfc 0x1c # CHECK: fsune.w $w16, $w31, $w2
+0x7a 0xb1 0xd0 0xdc # CHECK: fsune.d $w3, $w26, $w17
+0x7a 0x98 0x24 0x1b # CHECK: ftq.h $w16, $w4, $w24
+0x7a 0xb9 0x29 0x5b # CHECK: ftq.w $w5, $w5, $w25
+0x79 0x4a 0xa4 0x1c # CHECK: madd_q.h $w16, $w20, $w10
+0x79 0x69 0x17 0x1c # CHECK: madd_q.w $w28, $w2, $w9
+0x7b 0x49 0x92 0x1c # CHECK: maddr_q.h $w8, $w18, $w9
+0x7b 0x70 0x67 0x5c # CHECK: maddr_q.w $w29, $w12, $w16
+0x79 0x8a 0xd6 0x1c # CHECK: msub_q.h $w24, $w26, $w10
+0x79 0xbc 0xf3 0x5c # CHECK: msub_q.w $w13, $w30, $w28
+0x7b 0x8b 0xab 0x1c # CHECK: msubr_q.h $w12, $w21, $w11
+0x7b 0xb4 0x70 0x5c # CHECK: msubr_q.w $w1, $w14, $w20
+0x79 0x1e 0x81 0x9c # CHECK: mul_q.h $w6, $w16, $w30
+0x79 0x24 0x0c 0x1c # CHECK: mul_q.w $w16, $w1, $w4
+0x7b 0x13 0xa1 0x9c # CHECK: mulr_q.h $w6, $w20, $w19
+0x7b 0x34 0x0e 0xdc # CHECK: mulr_q.w $w27, $w1, $w20
diff --git a/test/MC/Disassembler/Mips/msa/test_bit.txt b/test/MC/Disassembler/Mips/msa/test_bit.txt
new file mode 100644
index 000000000000..422d71e19ea5
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_bit.txt
@@ -0,0 +1,50 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x79 0xf2 0xf5 0x49 # CHECK: bclri.b $w21, $w30, 2
+0x79 0xe0 0xae 0x09 # CHECK: bclri.h $w24, $w21, 0
+0x79 0xc3 0xf5 0xc9 # CHECK: bclri.w $w23, $w30, 3
+0x79 0x80 0x5a 0x49 # CHECK: bclri.d $w9, $w11, 0
+0x7b 0x71 0x66 0x49 # CHECK: binsli.b $w25, $w12, 1
+0x7b 0x60 0xb5 0x49 # CHECK: binsli.h $w21, $w22, 0
+0x7b 0x40 0x25 0x89 # CHECK: binsli.w $w22, $w4, 0
+0x7b 0x06 0x11 0x89 # CHECK: binsli.d $w6, $w2, 6
+0x7b 0xf0 0x9b 0xc9 # CHECK: binsri.b $w15, $w19, 0
+0x7b 0xe1 0xf2 0x09 # CHECK: binsri.h $w8, $w30, 1
+0x7b 0xc5 0x98 0x89 # CHECK: binsri.w $w2, $w19, 5
+0x7b 0x81 0xa4 0x89 # CHECK: binsri.d $w18, $w20, 1
+0x7a 0xf0 0x9e 0x09 # CHECK: bnegi.b $w24, $w19, 0
+0x7a 0xe3 0x5f 0x09 # CHECK: bnegi.h $w28, $w11, 3
+0x7a 0xc5 0xd8 0x49 # CHECK: bnegi.w $w1, $w27, 5
+0x7a 0x81 0xa9 0x09 # CHECK: bnegi.d $w4, $w21, 1
+0x7a 0x70 0x44 0x89 # CHECK: bseti.b $w18, $w8, 0
+0x7a 0x62 0x76 0x09 # CHECK: bseti.h $w24, $w14, 2
+0x7a 0x44 0x92 0x49 # CHECK: bseti.w $w9, $w18, 4
+0x7a 0x01 0x79 0xc9 # CHECK: bseti.d $w7, $w15, 1
+0x78 0x72 0xff 0xca # CHECK: sat_s.b $w31, $w31, 2
+0x78 0x60 0x9c 0xca # CHECK: sat_s.h $w19, $w19, 0
+0x78 0x40 0xec 0xca # CHECK: sat_s.w $w19, $w29, 0
+0x78 0x00 0xb2 0xca # CHECK: sat_s.d $w11, $w22, 0
+0x78 0xf3 0x68 0x4a # CHECK: sat_u.b $w1, $w13, 3
+0x78 0xe4 0xc7 0x8a # CHECK: sat_u.h $w30, $w24, 4
+0x78 0xc0 0x6f 0xca # CHECK: sat_u.w $w31, $w13, 0
+0x78 0x85 0x87 0x4a # CHECK: sat_u.d $w29, $w16, 5
+0x78 0x71 0x55 0xc9 # CHECK: slli.b $w23, $w10, 1
+0x78 0x61 0x92 0x49 # CHECK: slli.h $w9, $w18, 1
+0x78 0x44 0xea 0xc9 # CHECK: slli.w $w11, $w29, 4
+0x78 0x01 0xa6 0x49 # CHECK: slli.d $w25, $w20, 1
+0x78 0xf1 0xee 0x09 # CHECK: srai.b $w24, $w29, 1
+0x78 0xe0 0x30 0x49 # CHECK: srai.h $w1, $w6, 0
+0x78 0xc1 0xd1 0xc9 # CHECK: srai.w $w7, $w26, 1
+0x78 0x83 0xcd 0x09 # CHECK: srai.d $w20, $w25, 3
+0x79 0x70 0xc9 0x4a # CHECK: srari.b $w5, $w25, 0
+0x79 0x64 0x31 0xca # CHECK: srari.h $w7, $w6, 4
+0x79 0x45 0x5c 0x4a # CHECK: srari.w $w17, $w11, 5
+0x79 0x05 0xcd 0x4a # CHECK: srari.d $w21, $w25, 5
+0x79 0x72 0x00 0x89 # CHECK: srli.b $w2, $w0, 2
+0x79 0x62 0xff 0xc9 # CHECK: srli.h $w31, $w31, 2
+0x79 0x44 0x49 0x49 # CHECK: srli.w $w5, $w9, 4
+0x79 0x05 0xd6 0xc9 # CHECK: srli.d $w27, $w26, 5
+0x79 0xf0 0x1c 0x8a # CHECK: srlri.b $w18, $w3, 0
+0x79 0xe3 0x10 0x4a # CHECK: srlri.h $w1, $w2, 3
+0x79 0xc2 0xb2 0xca # CHECK: srlri.w $w11, $w22, 2
+0x79 0x86 0x56 0x0a # CHECK: srlri.d $w24, $w10, 6
diff --git a/test/MC/Disassembler/Mips/msa/test_ctrlregs.txt b/test/MC/Disassembler/Mips/msa/test_ctrlregs.txt
new file mode 100644
index 000000000000..fb5b0be7ffee
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_ctrlregs.txt
@@ -0,0 +1,35 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x78 0x7e 0x00 0x59 # CHECK: cfcmsa $1, $0
+0x78 0x7e 0x00 0x59 # CHECK: cfcmsa $1, $0
+0x78 0x7e 0x08 0x99 # CHECK: cfcmsa $2, $1
+0x78 0x7e 0x08 0x99 # CHECK: cfcmsa $2, $1
+0x78 0x7e 0x10 0xd9 # CHECK: cfcmsa $3, $2
+0x78 0x7e 0x10 0xd9 # CHECK: cfcmsa $3, $2
+0x78 0x7e 0x19 0x19 # CHECK: cfcmsa $4, $3
+0x78 0x7e 0x19 0x19 # CHECK: cfcmsa $4, $3
+0x78 0x7e 0x21 0x59 # CHECK: cfcmsa $5, $4
+0x78 0x7e 0x21 0x59 # CHECK: cfcmsa $5, $4
+0x78 0x7e 0x29 0x99 # CHECK: cfcmsa $6, $5
+0x78 0x7e 0x29 0x99 # CHECK: cfcmsa $6, $5
+0x78 0x7e 0x31 0xd9 # CHECK: cfcmsa $7, $6
+0x78 0x7e 0x31 0xd9 # CHECK: cfcmsa $7, $6
+0x78 0x7e 0x3a 0x19 # CHECK: cfcmsa $8, $7
+0x78 0x7e 0x3a 0x19 # CHECK: cfcmsa $8, $7
+
+0x78 0x3e 0x08 0x19 # CHECK: ctcmsa $0, $1
+0x78 0x3e 0x08 0x19 # CHECK: ctcmsa $0, $1
+0x78 0x3e 0x10 0x59 # CHECK: ctcmsa $1, $2
+0x78 0x3e 0x10 0x59 # CHECK: ctcmsa $1, $2
+0x78 0x3e 0x18 0x99 # CHECK: ctcmsa $2, $3
+0x78 0x3e 0x18 0x99 # CHECK: ctcmsa $2, $3
+0x78 0x3e 0x20 0xd9 # CHECK: ctcmsa $3, $4
+0x78 0x3e 0x20 0xd9 # CHECK: ctcmsa $3, $4
+0x78 0x3e 0x29 0x19 # CHECK: ctcmsa $4, $5
+0x78 0x3e 0x29 0x19 # CHECK: ctcmsa $4, $5
+0x78 0x3e 0x31 0x59 # CHECK: ctcmsa $5, $6
+0x78 0x3e 0x31 0x59 # CHECK: ctcmsa $5, $6
+0x78 0x3e 0x39 0x99 # CHECK: ctcmsa $6, $7
+0x78 0x3e 0x39 0x99 # CHECK: ctcmsa $6, $7
+0x78 0x3e 0x41 0xd9 # CHECK: ctcmsa $7, $8
+0x78 0x3e 0x41 0xd9 # CHECK: ctcmsa $7, $8
diff --git a/test/MC/Disassembler/Mips/msa/test_dlsa.txt b/test/MC/Disassembler/Mips/msa/test_dlsa.txt
new file mode 100644
index 000000000000..2a1d90bd17ba
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_dlsa.txt
@@ -0,0 +1,6 @@
+# RUN: llvm-mc --disassemble %s -triple=mips64-unknown-linux -mcpu=mips64r2 -mattr=+msa | FileCheck %s
+
+0x01 0x2a 0x40 0x15 # CHECK: dlsa $8, $9, $10, 1
+0x01 0x2a 0x40 0x55 # CHECK: dlsa $8, $9, $10, 2
+0x01 0x2a 0x40 0x95 # CHECK: dlsa $8, $9, $10, 3
+0x01 0x2a 0x40 0xd5 # CHECK: dlsa $8, $9, $10, 4
diff --git a/test/MC/Disassembler/Mips/msa/test_elm.txt b/test/MC/Disassembler/Mips/msa/test_elm.txt
new file mode 100644
index 000000000000..832587b23412
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_elm.txt
@@ -0,0 +1,17 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x78 0x82 0x43 0x59 # CHECK: copy_s.b $13, $w8[2]
+0x78 0xa0 0xc8 0x59 # CHECK: copy_s.h $1, $w25[0]
+0x78 0xb1 0x2d 0x99 # CHECK: copy_s.w $22, $w5[1]
+0x78 0xc4 0xa5 0x99 # CHECK: copy_u.b $22, $w20[4]
+0x78 0xe0 0x25 0x19 # CHECK: copy_u.h $20, $w4[0]
+0x78 0xf2 0x6f 0x99 # CHECK: copy_u.w $fp, $w13[2]
+0x78 0x04 0xe8 0x19 # CHECK: sldi.b $w0, $w29[4]
+0x78 0x20 0x8a 0x19 # CHECK: sldi.h $w8, $w17[0]
+0x78 0x32 0xdd 0x19 # CHECK: sldi.w $w20, $w27[2]
+0x78 0x38 0x61 0x19 # CHECK: sldi.d $w4, $w12[0]
+0x78 0x42 0x1e 0x59 # CHECK: splati.b $w25, $w3[2]
+0x78 0x61 0xe6 0x19 # CHECK: splati.h $w24, $w28[1]
+0x78 0x70 0x93 0x59 # CHECK: splati.w $w13, $w18[0]
+0x78 0x78 0x0f 0x19 # CHECK: splati.d $w28, $w1[0]
+0x78 0xbe 0xc5 0xd9 # CHECK: move.v $w23, $w24
diff --git a/test/MC/Disassembler/Mips/msa/test_elm_insert.txt b/test/MC/Disassembler/Mips/msa/test_elm_insert.txt
new file mode 100644
index 000000000000..605d495e526d
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_elm_insert.txt
@@ -0,0 +1,5 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x79 0x03 0xed 0xd9 # CHECK: insert.b $w23[3], $sp
+0x79 0x22 0x2d 0x19 # CHECK: insert.h $w20[2], $5
+0x79 0x32 0x7a 0x19 # CHECK: insert.w $w8[2], $15
diff --git a/test/MC/Disassembler/Mips/msa/test_elm_insert_msa64.txt b/test/MC/Disassembler/Mips/msa/test_elm_insert_msa64.txt
new file mode 100644
index 000000000000..62920f399e41
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_elm_insert_msa64.txt
@@ -0,0 +1,3 @@
+# RUN: llvm-mc --disassemble %s -triple=mips64-unknown-linux -mcpu=mips64r2 -mattr=+msa | FileCheck %s
+
+0x79 0x39 0xe8 0x59 # CHECK: insert.d $w1[1], $sp
diff --git a/test/MC/Disassembler/Mips/msa/test_elm_insve.txt b/test/MC/Disassembler/Mips/msa/test_elm_insve.txt
new file mode 100644
index 000000000000..c5c3ba05829c
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_elm_insve.txt
@@ -0,0 +1,6 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x79 0x43 0x4e 0x59 # CHECK: insve.b $w25[3], $w9[0]
+0x79 0x62 0x16 0x19 # CHECK: insve.h $w24[2], $w2[0]
+0x79 0x72 0x68 0x19 # CHECK: insve.w $w0[2], $w13[0]
+0x79 0x78 0x90 0xd9 # CHECK: insve.d $w3[0], $w18[0]
diff --git a/test/MC/Disassembler/Mips/msa/test_elm_msa64.txt b/test/MC/Disassembler/Mips/msa/test_elm_msa64.txt
new file mode 100644
index 000000000000..70c831ac2743
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_elm_msa64.txt
@@ -0,0 +1,6 @@
+# RUN: llvm-mc --disassemble %s -triple=mips64-unknown-linux -mcpu=mips64r2 -mattr=+msa | FileCheck %s
+
+# CHECK: copy_s.d $19, $w31[0]
+0x78 0xb8 0xfc 0xd9
+# CHECK: copy_u.d $18, $w29[1]
+0x78 0xf9 0xec 0x99
diff --git a/test/MC/Disassembler/Mips/msa/test_i10.txt b/test/MC/Disassembler/Mips/msa/test_i10.txt
new file mode 100644
index 000000000000..ac95d8819a3f
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_i10.txt
@@ -0,0 +1,6 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32 -mattr=+msa | FileCheck %s
+
+0x7b 0x06 0x32 0x07 # CHECK: ldi.b $w8, 198
+0x7b 0x29 0xcd 0x07 # CHECK: ldi.h $w20, 313
+0x7b 0x4f 0x66 0x07 # CHECK: ldi.w $w24, 492
+0x7b 0x7a 0x66 0xc7 # CHECK: ldi.d $w27, 844
diff --git a/test/MC/Disassembler/Mips/msa/test_i5.txt b/test/MC/Disassembler/Mips/msa/test_i5.txt
new file mode 100644
index 000000000000..bf5bc5184e74
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_i5.txt
@@ -0,0 +1,46 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32 -mattr=+msa | FileCheck %s
+
+0x78 0x1e 0xf8 0xc6 # CHECK: addvi.b $w3, $w31, 30
+0x78 0x3a 0x6e 0x06 # CHECK: addvi.h $w24, $w13, 26
+0x78 0x5a 0xa6 0x86 # CHECK: addvi.w $w26, $w20, 26
+0x78 0x75 0x0c 0x06 # CHECK: addvi.d $w16, $w1, 21
+0x78 0x18 0xae 0x07 # CHECK: ceqi.b $w24, $w21, 24
+0x78 0x22 0x7f 0xc7 # CHECK: ceqi.h $w31, $w15, 2
+0x78 0x5f 0x0b 0x07 # CHECK: ceqi.w $w12, $w1, 31
+0x78 0x67 0xb6 0x07 # CHECK: ceqi.d $w24, $w22, 7
+0x7a 0x01 0x83 0x07 # CHECK: clei_s.b $w12, $w16, 1
+0x7a 0x37 0x50 0x87 # CHECK: clei_s.h $w2, $w10, 23
+0x7a 0x56 0x59 0x07 # CHECK: clei_s.w $w4, $w11, 22
+0x7a 0x76 0xe8 0x07 # CHECK: clei_s.d $w0, $w29, 22
+0x7a 0x83 0x8d 0x47 # CHECK: clei_u.b $w21, $w17, 3
+0x7a 0xb1 0x3f 0x47 # CHECK: clei_u.h $w29, $w7, 17
+0x7a 0xc2 0x08 0x47 # CHECK: clei_u.w $w1, $w1, 2
+0x7a 0xfd 0xde 0xc7 # CHECK: clei_u.d $w27, $w27, 29
+0x79 0x19 0x6c 0xc7 # CHECK: clti_s.b $w19, $w13, 25
+0x79 0x34 0x53 0xc7 # CHECK: clti_s.h $w15, $w10, 20
+0x79 0x4b 0x63 0x07 # CHECK: clti_s.w $w12, $w12, 11
+0x79 0x71 0xa7 0x47 # CHECK: clti_s.d $w29, $w20, 17
+0x79 0x9d 0x4b 0x87 # CHECK: clti_u.b $w14, $w9, 29
+0x79 0xb9 0xce 0x07 # CHECK: clti_u.h $w24, $w25, 25
+0x79 0xd6 0x08 0x47 # CHECK: clti_u.w $w1, $w1, 22
+0x79 0xe1 0xcd 0x47 # CHECK: clti_u.d $w21, $w25, 1
+0x79 0x01 0xad 0x86 # CHECK: maxi_s.b $w22, $w21, 1
+0x79 0x38 0x2f 0x46 # CHECK: maxi_s.h $w29, $w5, 24
+0x79 0x54 0x50 0x46 # CHECK: maxi_s.w $w1, $w10, 20
+0x79 0x70 0xeb 0x46 # CHECK: maxi_s.d $w13, $w29, 16
+0x79 0x8c 0x05 0x06 # CHECK: maxi_u.b $w20, $w0, 12
+0x79 0xa3 0x70 0x46 # CHECK: maxi_u.h $w1, $w14, 3
+0x79 0xcb 0xb6 0xc6 # CHECK: maxi_u.w $w27, $w22, 11
+0x79 0xe4 0x36 0x86 # CHECK: maxi_u.d $w26, $w6, 4
+0x7a 0x01 0x09 0x06 # CHECK: mini_s.b $w4, $w1, 1
+0x7a 0x37 0xde 0xc6 # CHECK: mini_s.h $w27, $w27, 23
+0x7a 0x49 0x5f 0x06 # CHECK: mini_s.w $w28, $w11, 9
+0x7a 0x6a 0x52 0xc6 # CHECK: mini_s.d $w11, $w10, 10
+0x7a 0x9b 0xbc 0x86 # CHECK: mini_u.b $w18, $w23, 27
+0x7a 0xb2 0xd1 0xc6 # CHECK: mini_u.h $w7, $w26, 18
+0x7a 0xda 0x62 0xc6 # CHECK: mini_u.w $w11, $w12, 26
+0x7a 0xe2 0x7a 0xc6 # CHECK: mini_u.d $w11, $w15, 2
+0x78 0x93 0xa6 0x06 # CHECK: subvi.b $w24, $w20, 19
+0x78 0xa4 0x9a 0xc6 # CHECK: subvi.h $w11, $w19, 4
+0x78 0xcb 0x53 0x06 # CHECK: subvi.w $w12, $w10, 11
+0x78 0xe7 0x84 0xc6 # CHECK: subvi.d $w19, $w16, 7
diff --git a/test/MC/Disassembler/Mips/msa/test_i8.txt b/test/MC/Disassembler/Mips/msa/test_i8.txt
new file mode 100644
index 000000000000..e08c39ba2638
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_i8.txt
@@ -0,0 +1,12 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32 -mattr=+msa | FileCheck %s
+
+0x78 0x30 0xe8 0x80 # CHECK: andi.b $w2, $w29, 48
+0x78 0x7e 0xb1 0x81 # CHECK: bmnzi.b $w6, $w22, 126
+0x79 0x58 0x0e 0xc1 # CHECK: bmzi.b $w27, $w1, 88
+0x7a 0xbd 0x1f 0x41 # CHECK: bseli.b $w29, $w3, 189
+0x7a 0x38 0x88 0x40 # CHECK: nori.b $w1, $w17, 56
+0x79 0x87 0xa6 0x80 # CHECK: ori.b $w26, $w20, 135
+0x78 0x69 0xf4 0xc2 # CHECK: shf.b $w19, $w30, 105
+0x79 0x4c 0x44 0x42 # CHECK: shf.h $w17, $w8, 76
+0x7a 0x5d 0x1b 0x82 # CHECK: shf.w $w14, $w3, 93
+0x7b 0x14 0x54 0x00 # CHECK: xori.b $w16, $w10, 20
diff --git a/test/MC/Disassembler/Mips/msa/test_lsa.txt b/test/MC/Disassembler/Mips/msa/test_lsa.txt
new file mode 100644
index 000000000000..c3e950bbf946
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_lsa.txt
@@ -0,0 +1,6 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32r2 -mattr=+msa | FileCheck %s
+
+0x01 0x2a 0x40 0x05 # CHECK: lsa $8, $9, $10, 1
+0x01 0x2a 0x40 0x45 # CHECK: lsa $8, $9, $10, 2
+0x01 0x2a 0x40 0x85 # CHECK: lsa $8, $9, $10, 3
+0x01 0x2a 0x40 0xc5 # CHECK: lsa $8, $9, $10, 4
diff --git a/test/MC/Disassembler/Mips/msa/test_mi10.txt b/test/MC/Disassembler/Mips/msa/test_mi10.txt
new file mode 100644
index 000000000000..b75b49ee1fa4
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_mi10.txt
@@ -0,0 +1,28 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32 -mattr=+msa | FileCheck %s
+
+0x7a 0x00 0x08 0x20 # CHECK: ld.b $w0, -512($1)
+0x78 0x00 0x10 0x60 # CHECK: ld.b $w1, 0($2)
+0x79 0xff 0x18 0xa0 # CHECK: ld.b $w2, 511($3)
+
+0x7a 0x00 0x20 0xe1 # CHECK: ld.h $w3, -1024($4)
+0x7b 0x00 0x29 0x21 # CHECK: ld.h $w4, -512($5)
+0x78 0x00 0x31 0x61 # CHECK: ld.h $w5, 0($6)
+0x79 0x00 0x39 0xa1 # CHECK: ld.h $w6, 512($7)
+0x79 0xff 0x41 0xe1 # CHECK: ld.h $w7, 1022($8)
+
+0x7a 0x00 0x4a 0x22 # CHECK: ld.w $w8, -2048($9)
+0x7b 0x00 0x52 0x62 # CHECK: ld.w $w9, -1024($10)
+0x7b 0x80 0x5a 0xa2 # CHECK: ld.w $w10, -512($11)
+0x78 0x80 0x62 0xe2 # CHECK: ld.w $w11, 512($12)
+0x79 0x00 0x6b 0x22 # CHECK: ld.w $w12, 1024($13)
+0x79 0xff 0x73 0x62 # CHECK: ld.w $w13, 2044($14)
+
+0x7a 0x00 0x7b 0xa3 # CHECK: ld.d $w14, -4096($15)
+0x7b 0x00 0x83 0xe3 # CHECK: ld.d $w15, -2048($16)
+0x7b 0x80 0x8c 0x23 # CHECK: ld.d $w16, -1024($17)
+0x7b 0xc0 0x94 0x63 # CHECK: ld.d $w17, -512($18)
+0x78 0x00 0x9c 0xa3 # CHECK: ld.d $w18, 0($19)
+0x78 0x40 0xa4 0xe3 # CHECK: ld.d $w19, 512($20)
+0x78 0x80 0xad 0x23 # CHECK: ld.d $w20, 1024($21)
+0x79 0x00 0xb5 0x63 # CHECK: ld.d $w21, 2048($22)
+0x79 0xff 0xbd 0xa3 # CHECK: ld.d $w22, 4088($23)
diff --git a/test/MC/Disassembler/Mips/msa/test_vec.txt b/test/MC/Disassembler/Mips/msa/test_vec.txt
new file mode 100644
index 000000000000..eff984f8f12f
--- /dev/null
+++ b/test/MC/Disassembler/Mips/msa/test_vec.txt
@@ -0,0 +1,9 @@
+# RUN: llvm-mc --disassemble %s -triple=mips-unknown-linux -mcpu=mips32 -mattr=+msa | FileCheck %s
+
+0x78 0x1b 0xa6 0x5e # CHECK: and.v $w25, $w20, $w27
+0x78 0x87 0x34 0x5e # CHECK: bmnz.v $w17, $w6, $w7
+0x78 0xa9 0x88 0xde # CHECK: bmz.v $w3, $w17, $w9
+0x78 0xce 0x02 0x1e # CHECK: bsel.v $w8, $w0, $w14
+0x78 0x40 0xf9 0xde # CHECK: nor.v $w7, $w31, $w0
+0x78 0x3e 0xd6 0x1e # CHECK: or.v $w24, $w26, $w30
+0x78 0x6f 0xd9 0xde # CHECK: xor.v $w7, $w27, $w15
diff --git a/test/MC/Disassembler/PowerPC/lit.local.cfg b/test/MC/Disassembler/PowerPC/lit.local.cfg
new file mode 100644
index 000000000000..5d33887ff0a4
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'PowerPC' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/MC/Disassembler/PowerPC/ppc64-encoding-bookII.txt b/test/MC/Disassembler/PowerPC/ppc64-encoding-bookII.txt
new file mode 100644
index 000000000000..5e6033d4299a
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/ppc64-encoding-bookII.txt
@@ -0,0 +1,74 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64-unknown-unknown -mcpu=pwr7 | FileCheck %s
+
+# CHECK: icbi 2, 3
+0x7c 0x02 0x1f 0xac
+
+# CHECK: dcbt 2, 3
+0x7c 0x02 0x1a 0x2c
+
+# CHECK: dcbtst 2, 3
+0x7c 0x02 0x19 0xec
+
+# CHECK: dcbz 2, 3
+0x7c 0x02 0x1f 0xec
+
+# CHECK: dcbst 2, 3
+0x7c 0x02 0x18 0x6c
+
+# CHECK: isync
+0x4c 0x00 0x01 0x2c
+
+# CHECK: stwcx. 2, 3, 4
+0x7c 0x43 0x21 0x2d
+
+# CHECK: stdcx. 2, 3, 4
+0x7c 0x43 0x21 0xad
+
+# CHECK: sync 2
+0x7c 0x40 0x04 0xac
+
+# CHECK: eieio
+0x7c 0x00 0x06 0xac
+
+# CHECK: wait 2
+0x7c 0x40 0x00 0x7c
+
+# CHECK: dcbf 2, 3
+0x7c 0x02 0x18 0xac
+
+# CHECK: lwarx 2, 3, 4
+0x7c 0x43 0x20 0x28
+
+# CHECK: ldarx 2, 3, 4
+0x7c 0x43 0x20 0xa8
+
+# CHECK: sync 0
+0x7c 0x00 0x04 0xac
+
+# CHECK: sync 0
+0x7c 0x00 0x04 0xac
+
+# CHECK: sync 1
+0x7c 0x20 0x04 0xac
+
+# CHECK: sync 2
+0x7c 0x40 0x04 0xac
+
+# CHECK: wait 0
+0x7c 0x00 0x00 0x7c
+
+# CHECK: wait 1
+0x7c 0x20 0x00 0x7c
+
+# CHECK: wait 2
+0x7c 0x40 0x00 0x7c
+
+# CHECK: mftb 2, 123
+0x7c 0x5b 0x1a 0xe6
+
+# CHECK: mftb 2, 268
+0x7c 0x4c 0x42 0xe6
+
+# CHECK: mftb 2, 269
+0x7c 0x4d 0x42 0xe6
+
diff --git a/test/MC/Disassembler/PowerPC/ppc64-encoding-bookIII.txt b/test/MC/Disassembler/PowerPC/ppc64-encoding-bookIII.txt
new file mode 100644
index 000000000000..c5d615568cc3
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/ppc64-encoding-bookIII.txt
@@ -0,0 +1,107 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64-unknown-unknown -mcpu=pwr7 | FileCheck %s
+
+# CHECK: mtmsr 4, 0
+0x7c 0x80 0x01 0x24
+
+# CHECK: mtmsr 4, 1
+0x7c 0x81 0x01 0x24
+
+# CHECK: mfmsr 4
+0x7c 0x80 0x00 0xa6
+
+# CHECK: mtmsrd 4, 0
+0x7c 0x80 0x01 0x64
+
+# CHECK: mtmsrd 4, 1
+0x7c 0x81 0x01 0x64
+
+# CHECK: mfspr 4, 272
+0x7c 0x90 0x42 0xa6
+
+# CHECK: mfspr 4, 273
+0x7c 0x91 0x42 0xa6
+
+# CHECK: mfspr 4, 274
+0x7c 0x92 0x42 0xa6
+
+# CHECK: mfspr 4, 275
+0x7c 0x93 0x42 0xa6
+
+# CHECK: mtspr 272, 4
+0x7c 0x90 0x43 0xa6
+
+# CHECK: mtspr 273, 4
+0x7c 0x91 0x43 0xa6
+
+# CHECK: mtspr 274, 4
+0x7c 0x92 0x43 0xa6
+
+# CHECK: mtspr 275, 4
+0x7c 0x93 0x43 0xa6
+
+# CHECK: mtspr 272, 4
+0x7c 0x90 0x43 0xa6
+
+# CHECK: mtspr 273, 4
+0x7c 0x91 0x43 0xa6
+
+# CHECK: mtspr 274, 4
+0x7c 0x92 0x43 0xa6
+
+# CHECK: mtspr 275, 4
+0x7c 0x93 0x43 0xa6
+
+# CHECK: mtspr 280, 4
+0x7c 0x98 0x43 0xa6
+
+# CHECK: mfspr 4, 22
+0x7c 0x96 0x02 0xa6
+
+# CHECK: mtspr 22, 4
+0x7c 0x96 0x03 0xa6
+
+# CHECK: mfspr 4, 287
+0x7c 0x9f 0x42 0xa6
+
+# CHECK: mfspr 4, 25
+0x7c 0x99 0x02 0xa6
+
+# CHECK: mtspr 25, 4
+0x7c 0x99 0x03 0xa6
+
+# CHECK: mfspr 4, 26
+0x7c 0x9a 0x02 0xa6
+
+# CHECK: mtspr 26, 4
+0x7c 0x9a 0x03 0xa6
+
+# CHECK: mfspr 4, 27
+0x7c 0x9b 0x02 0xa6
+
+# CHECK: mtspr 27, 4
+0x7c 0x9b 0x03 0xa6
+
+# CHECK: slbie 4
+0x7c 0x00 0x23 0x64
+
+# CHECK: slbmte 4, 5
+0x7c 0x80 0x2b 0x24
+
+# CHECK: slbmfee 4, 5
+0x7c 0x80 0x2f 0x26
+
+# CHECK: slbia
+0x7c 0x00 0x03 0xe4
+
+# CHECK: tlbsync
+0x7c 0x00 0x04 0x6c
+
+# CHECK: tlbiel 4
+0x7c 0x00 0x22 0x24
+
+# CHECK: tlbie 4,0
+0x7c 0x00 0x22 0x64
+
+# CHECK: tlbie 4,0
+0x7c 0x00 0x22 0x64
+
diff --git a/test/MC/Disassembler/PowerPC/ppc64-encoding-ext.txt b/test/MC/Disassembler/PowerPC/ppc64-encoding-ext.txt
new file mode 100644
index 000000000000..108df30aa8c8
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/ppc64-encoding-ext.txt
@@ -0,0 +1,2253 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64-unknown-unknown -mcpu=pwr7 | FileCheck %s
+
+# FIXME: decode as beqlr 0
+# CHECK: bclr 12, 2, 0
+0x4d 0x82 0x00 0x20
+
+# FIXME: decode as beqlr 1
+# CHECK: bclr 12, 6, 0
+0x4d 0x86 0x00 0x20
+
+# FIXME: decode as beqlr 2
+# CHECK: bclr 12, 10, 0
+0x4d 0x8a 0x00 0x20
+
+# FIXME: decode as beqlr 3
+# CHECK: bclr 12, 14, 0
+0x4d 0x8e 0x00 0x20
+
+# FIXME: decode as beqlr 4
+# CHECK: bclr 12, 18, 0
+0x4d 0x92 0x00 0x20
+
+# FIXME: decode as beqlr 5
+# CHECK: bclr 12, 22, 0
+0x4d 0x96 0x00 0x20
+
+# FIXME: decode as beqlr 6
+# CHECK: bclr 12, 26, 0
+0x4d 0x9a 0x00 0x20
+
+# FIXME: decode as beqlr 7
+# CHECK: bclr 12, 30, 0
+0x4d 0x9e 0x00 0x20
+
+# CHECK: bclr 12, 0, 0
+0x4d 0x80 0x00 0x20
+
+# CHECK: bclr 12, 1, 0
+0x4d 0x81 0x00 0x20
+
+# CHECK: bclr 12, 2, 0
+0x4d 0x82 0x00 0x20
+
+# CHECK: bclr 12, 3, 0
+0x4d 0x83 0x00 0x20
+
+# CHECK: bclr 12, 3, 0
+0x4d 0x83 0x00 0x20
+
+# CHECK: bclr 12, 4, 0
+0x4d 0x84 0x00 0x20
+
+# CHECK: bclr 12, 5, 0
+0x4d 0x85 0x00 0x20
+
+# CHECK: bclr 12, 6, 0
+0x4d 0x86 0x00 0x20
+
+# CHECK: bclr 12, 7, 0
+0x4d 0x87 0x00 0x20
+
+# CHECK: bclr 12, 7, 0
+0x4d 0x87 0x00 0x20
+
+# CHECK: bclr 12, 8, 0
+0x4d 0x88 0x00 0x20
+
+# CHECK: bclr 12, 9, 0
+0x4d 0x89 0x00 0x20
+
+# CHECK: bclr 12, 10, 0
+0x4d 0x8a 0x00 0x20
+
+# CHECK: bclr 12, 11, 0
+0x4d 0x8b 0x00 0x20
+
+# CHECK: bclr 12, 11, 0
+0x4d 0x8b 0x00 0x20
+
+# CHECK: bclr 12, 12, 0
+0x4d 0x8c 0x00 0x20
+
+# CHECK: bclr 12, 13, 0
+0x4d 0x8d 0x00 0x20
+
+# CHECK: bclr 12, 14, 0
+0x4d 0x8e 0x00 0x20
+
+# CHECK: bclr 12, 15, 0
+0x4d 0x8f 0x00 0x20
+
+# CHECK: bclr 12, 15, 0
+0x4d 0x8f 0x00 0x20
+
+# CHECK: bclr 12, 16, 0
+0x4d 0x90 0x00 0x20
+
+# CHECK: bclr 12, 17, 0
+0x4d 0x91 0x00 0x20
+
+# CHECK: bclr 12, 18, 0
+0x4d 0x92 0x00 0x20
+
+# CHECK: bclr 12, 19, 0
+0x4d 0x93 0x00 0x20
+
+# CHECK: bclr 12, 19, 0
+0x4d 0x93 0x00 0x20
+
+# CHECK: bclr 12, 20, 0
+0x4d 0x94 0x00 0x20
+
+# CHECK: bclr 12, 21, 0
+0x4d 0x95 0x00 0x20
+
+# CHECK: bclr 12, 22, 0
+0x4d 0x96 0x00 0x20
+
+# CHECK: bclr 12, 23, 0
+0x4d 0x97 0x00 0x20
+
+# CHECK: bclr 12, 23, 0
+0x4d 0x97 0x00 0x20
+
+# CHECK: bclr 12, 24, 0
+0x4d 0x98 0x00 0x20
+
+# CHECK: bclr 12, 25, 0
+0x4d 0x99 0x00 0x20
+
+# CHECK: bclr 12, 26, 0
+0x4d 0x9a 0x00 0x20
+
+# CHECK: bclr 12, 27, 0
+0x4d 0x9b 0x00 0x20
+
+# CHECK: bclr 12, 27, 0
+0x4d 0x9b 0x00 0x20
+
+# CHECK: bclr 12, 28, 0
+0x4d 0x9c 0x00 0x20
+
+# CHECK: bclr 12, 29, 0
+0x4d 0x9d 0x00 0x20
+
+# CHECK: bclr 12, 30, 0
+0x4d 0x9e 0x00 0x20
+
+# CHECK: bclr 12, 31, 0
+0x4d 0x9f 0x00 0x20
+
+# CHECK: bclr 12, 31, 0
+0x4d 0x9f 0x00 0x20
+
+# CHECK: blr
+0x4e 0x80 0x00 0x20
+
+# CHECK: bctr
+0x4e 0x80 0x04 0x20
+
+# CHECK: blrl
+0x4e 0x80 0x00 0x21
+
+# CHECK: bctrl
+0x4e 0x80 0x04 0x21
+
+# CHECK: bclr 12, 2, 0
+0x4d 0x82 0x00 0x20
+
+# CHECK: bcctr 12, 2, 0
+0x4d 0x82 0x04 0x20
+
+# CHECK: bclrl 12, 2, 0
+0x4d 0x82 0x00 0x21
+
+# CHECK: bcctrl 12, 2, 0
+0x4d 0x82 0x04 0x21
+
+# CHECK: bclr 15, 2, 0
+0x4d 0xe2 0x00 0x20
+
+# CHECK: bcctr 15, 2, 0
+0x4d 0xe2 0x04 0x20
+
+# CHECK: bclrl 15, 2, 0
+0x4d 0xe2 0x00 0x21
+
+# CHECK: bcctrl 15, 2, 0
+0x4d 0xe2 0x04 0x21
+
+# CHECK: bclr 14, 2, 0
+0x4d 0xc2 0x00 0x20
+
+# CHECK: bcctr 14, 2, 0
+0x4d 0xc2 0x04 0x20
+
+# CHECK: bclrl 14, 2, 0
+0x4d 0xc2 0x00 0x21
+
+# CHECK: bcctrl 14, 2, 0
+0x4d 0xc2 0x04 0x21
+
+# CHECK: bclr 4, 2, 0
+0x4c 0x82 0x00 0x20
+
+# CHECK: bcctr 4, 2, 0
+0x4c 0x82 0x04 0x20
+
+# CHECK: bclrl 4, 2, 0
+0x4c 0x82 0x00 0x21
+
+# CHECK: bcctrl 4, 2, 0
+0x4c 0x82 0x04 0x21
+
+# CHECK: bclr 7, 2, 0
+0x4c 0xe2 0x00 0x20
+
+# CHECK: bcctr 7, 2, 0
+0x4c 0xe2 0x04 0x20
+
+# CHECK: bclrl 7, 2, 0
+0x4c 0xe2 0x00 0x21
+
+# CHECK: bcctrl 7, 2, 0
+0x4c 0xe2 0x04 0x21
+
+# CHECK: bclr 6, 2, 0
+0x4c 0xc2 0x00 0x20
+
+# CHECK: bcctr 6, 2, 0
+0x4c 0xc2 0x04 0x20
+
+# CHECK: bclrl 6, 2, 0
+0x4c 0xc2 0x00 0x21
+
+# CHECK: bcctrl 6, 2, 0
+0x4c 0xc2 0x04 0x21
+
+# CHECK: bdnzlr
+0x4e 0x00 0x00 0x20
+
+# CHECK: bdnzlrl
+0x4e 0x00 0x00 0x21
+
+# CHECK: bdnzlr+
+0x4f 0x20 0x00 0x20
+
+# CHECK: bdnzlrl+
+0x4f 0x20 0x00 0x21
+
+# CHECK: bdnzlr-
+0x4f 0x00 0x00 0x20
+
+# CHECK: bdnzlrl-
+0x4f 0x00 0x00 0x21
+
+# CHECK: bclr 8, 2, 0
+0x4d 0x02 0x00 0x20
+
+# CHECK: bclrl 8, 2, 0
+0x4d 0x02 0x00 0x21
+
+# CHECK: bclr 0, 2, 0
+0x4c 0x02 0x00 0x20
+
+# CHECK: bclrl 0, 2, 0
+0x4c 0x02 0x00 0x21
+
+# CHECK: bdzlr
+0x4e 0x40 0x00 0x20
+
+# CHECK: bdzlrl
+0x4e 0x40 0x00 0x21
+
+# CHECK: bdzlr+
+0x4f 0x60 0x00 0x20
+
+# CHECK: bdzlrl+
+0x4f 0x60 0x00 0x21
+
+# CHECK: bdzlr-
+0x4f 0x40 0x00 0x20
+
+# CHECK: bdzlrl-
+0x4f 0x40 0x00 0x21
+
+# CHECK: bclr 10, 2, 0
+0x4d 0x42 0x00 0x20
+
+# CHECK: bclrl 10, 2, 0
+0x4d 0x42 0x00 0x21
+
+# CHECK: bclr 2, 2, 0
+0x4c 0x42 0x00 0x20
+
+# CHECK: bclrl 2, 2, 0
+0x4c 0x42 0x00 0x21
+
+# FIXME: decode as bltlr 2
+# CHECK: bclr 12, 8, 0
+0x4d 0x88 0x00 0x20
+
+# FIXME: decode as bltlr 0
+# CHECK: bclr 12, 0, 0
+0x4d 0x80 0x00 0x20
+
+# FIXME: decode as bltctr 2
+# CHECK: bcctr 12, 8, 0
+0x4d 0x88 0x04 0x20
+
+# FIXME: decode as bltctr 0
+# CHECK: bcctr 12, 0, 0
+0x4d 0x80 0x04 0x20
+
+# FIXME: decode as bltlrl 2
+# CHECK: bclrl 12, 8, 0
+0x4d 0x88 0x00 0x21
+
+# FIXME: decode as bltlrl 0
+# CHECK: bclrl 12, 0, 0
+0x4d 0x80 0x00 0x21
+
+# FIXME: decode as bltctrl 2
+# CHECK: bcctrl 12, 8, 0
+0x4d 0x88 0x04 0x21
+
+# FIXME: decode as bltctrl 0
+# CHECK: bcctrl 12, 0, 0
+0x4d 0x80 0x04 0x21
+
+# FIXME: decode as bltlr+ 2
+# CHECK: bclr 15, 8, 0
+0x4d 0xe8 0x00 0x20
+
+# FIXME: decode as bltlr+ 0
+# CHECK: bclr 15, 0, 0
+0x4d 0xe0 0x00 0x20
+
+# FIXME: decode as bltctr+ 2
+# CHECK: bcctr 15, 8, 0
+0x4d 0xe8 0x04 0x20
+
+# FIXME: decode as bltctr+ 0
+# CHECK: bcctr 15, 0, 0
+0x4d 0xe0 0x04 0x20
+
+# FIXME: decode as bltlrl+ 2
+# CHECK: bclrl 15, 8, 0
+0x4d 0xe8 0x00 0x21
+
+# FIXME: decode as bltlrl+ 0
+# CHECK: bclrl 15, 0, 0
+0x4d 0xe0 0x00 0x21
+
+# FIXME: decode as bltctrl+ 2
+# CHECK: bcctrl 15, 8, 0
+0x4d 0xe8 0x04 0x21
+
+# FIXME: decode as bltctrl+ 0
+# CHECK: bcctrl 15, 0, 0
+0x4d 0xe0 0x04 0x21
+
+# FIXME: decode as bltlr- 2
+# CHECK: bclr 14, 8, 0
+0x4d 0xc8 0x00 0x20
+
+# FIXME: decode as bltlr- 0
+# CHECK: bclr 14, 0, 0
+0x4d 0xc0 0x00 0x20
+
+# FIXME: decode as bltctr- 2
+# CHECK: bcctr 14, 8, 0
+0x4d 0xc8 0x04 0x20
+
+# FIXME: decode as bltctr- 0
+# CHECK: bcctr 14, 0, 0
+0x4d 0xc0 0x04 0x20
+
+# FIXME: decode as bltlrl- 2
+# CHECK: bclrl 14, 8, 0
+0x4d 0xc8 0x00 0x21
+
+# FIXME: decode as bltlrl- 0
+# CHECK: bclrl 14, 0, 0
+0x4d 0xc0 0x00 0x21
+
+# FIXME: decode as bltctrl- 2
+# CHECK: bcctrl 14, 8, 0
+0x4d 0xc8 0x04 0x21
+
+# FIXME: decode as bltctrl- 0
+# CHECK: bcctrl 14, 0, 0
+0x4d 0xc0 0x04 0x21
+
+# FIXME: decode as blelr 2
+# CHECK: bclr 4, 9, 0
+0x4c 0x89 0x00 0x20
+
+# FIXME: decode as blelr 0
+# CHECK: bclr 4, 1, 0
+0x4c 0x81 0x00 0x20
+
+# FIXME: decode as blectr 2
+# CHECK: bcctr 4, 9, 0
+0x4c 0x89 0x04 0x20
+
+# FIXME: decode as blectr 0
+# CHECK: bcctr 4, 1, 0
+0x4c 0x81 0x04 0x20
+
+# FIXME: decode as blelrl 2
+# CHECK: bclrl 4, 9, 0
+0x4c 0x89 0x00 0x21
+
+# FIXME: decode as blelrl 0
+# CHECK: bclrl 4, 1, 0
+0x4c 0x81 0x00 0x21
+
+# FIXME: decode as blectrl 2
+# CHECK: bcctrl 4, 9, 0
+0x4c 0x89 0x04 0x21
+
+# FIXME: decode as blectrl 0
+# CHECK: bcctrl 4, 1, 0
+0x4c 0x81 0x04 0x21
+
+# FIXME: decode as blelr+ 2
+# CHECK: bclr 7, 9, 0
+0x4c 0xe9 0x00 0x20
+
+# FIXME: decode as blelr+ 0
+# CHECK: bclr 7, 1, 0
+0x4c 0xe1 0x00 0x20
+
+# FIXME: decode as blectr+ 2
+# CHECK: bcctr 7, 9, 0
+0x4c 0xe9 0x04 0x20
+
+# FIXME: decode as blectr+ 0
+# CHECK: bcctr 7, 1, 0
+0x4c 0xe1 0x04 0x20
+
+# FIXME: decode as blelrl+ 2
+# CHECK: bclrl 7, 9, 0
+0x4c 0xe9 0x00 0x21
+
+# FIXME: decode as blelrl+ 0
+# CHECK: bclrl 7, 1, 0
+0x4c 0xe1 0x00 0x21
+
+# FIXME: decode as blectrl+ 2
+# CHECK: bcctrl 7, 9, 0
+0x4c 0xe9 0x04 0x21
+
+# FIXME: decode as blectrl+ 0
+# CHECK: bcctrl 7, 1, 0
+0x4c 0xe1 0x04 0x21
+
+# FIXME: decode as blelr- 2
+# CHECK: bclr 6, 9, 0
+0x4c 0xc9 0x00 0x20
+
+# FIXME: decode as blelr- 0
+# CHECK: bclr 6, 1, 0
+0x4c 0xc1 0x00 0x20
+
+# FIXME: decode as blectr- 2
+# CHECK: bcctr 6, 9, 0
+0x4c 0xc9 0x04 0x20
+
+# FIXME: decode as blectr- 0
+# CHECK: bcctr 6, 1, 0
+0x4c 0xc1 0x04 0x20
+
+# FIXME: decode as blelrl- 2
+# CHECK: bclrl 6, 9, 0
+0x4c 0xc9 0x00 0x21
+
+# FIXME: decode as blelrl- 0
+# CHECK: bclrl 6, 1, 0
+0x4c 0xc1 0x00 0x21
+
+# FIXME: decode as blectrl- 2
+# CHECK: bcctrl 6, 9, 0
+0x4c 0xc9 0x04 0x21
+
+# FIXME: decode as blectrl- 0
+# CHECK: bcctrl 6, 1, 0
+0x4c 0xc1 0x04 0x21
+
+# FIXME: decode as beqlr 2
+# CHECK: bclr 12, 10, 0
+0x4d 0x8a 0x00 0x20
+
+# FIXME: decode as beqlr 0
+# CHECK: bclr 12, 2, 0
+0x4d 0x82 0x00 0x20
+
+# FIXME: decode as beqctr 2
+# CHECK: bcctr 12, 10, 0
+0x4d 0x8a 0x04 0x20
+
+# FIXME: decode as beqctr 0
+# CHECK: bcctr 12, 2, 0
+0x4d 0x82 0x04 0x20
+
+# FIXME: decode as beqlrl 2
+# CHECK: bclrl 12, 10, 0
+0x4d 0x8a 0x00 0x21
+
+# FIXME: decode as beqlrl 0
+# CHECK: bclrl 12, 2, 0
+0x4d 0x82 0x00 0x21
+
+# FIXME: decode as beqctrl 2
+# CHECK: bcctrl 12, 10, 0
+0x4d 0x8a 0x04 0x21
+
+# FIXME: decode as beqctrl 0
+# CHECK: bcctrl 12, 2, 0
+0x4d 0x82 0x04 0x21
+
+# FIXME: decode as beqlr+ 2
+# CHECK: bclr 15, 10, 0
+0x4d 0xea 0x00 0x20
+
+# FIXME: decode as beqlr+ 0
+# CHECK: bclr 15, 2, 0
+0x4d 0xe2 0x00 0x20
+
+# FIXME: decode as beqctr+ 2
+# CHECK: bcctr 15, 10, 0
+0x4d 0xea 0x04 0x20
+
+# FIXME: decode as beqctr+ 0
+# CHECK: bcctr 15, 2, 0
+0x4d 0xe2 0x04 0x20
+
+# FIXME: decode as beqlrl+ 2
+# CHECK: bclrl 15, 10, 0
+0x4d 0xea 0x00 0x21
+
+# FIXME: decode as beqlrl+ 0
+# CHECK: bclrl 15, 2, 0
+0x4d 0xe2 0x00 0x21
+
+# FIXME: decode as beqctrl+ 2
+# CHECK: bcctrl 15, 10, 0
+0x4d 0xea 0x04 0x21
+
+# FIXME: decode as beqctrl+ 0
+# CHECK: bcctrl 15, 2, 0
+0x4d 0xe2 0x04 0x21
+
+# FIXME: decode as beqlr- 2
+# CHECK: bclr 14, 10, 0
+0x4d 0xca 0x00 0x20
+
+# FIXME: decode as beqlr- 0
+# CHECK: bclr 14, 2, 0
+0x4d 0xc2 0x00 0x20
+
+# FIXME: decode as beqctr- 2
+# CHECK: bcctr 14, 10, 0
+0x4d 0xca 0x04 0x20
+
+# FIXME: decode as beqctr- 0
+# CHECK: bcctr 14, 2, 0
+0x4d 0xc2 0x04 0x20
+
+# FIXME: decode as beqlrl- 2
+# CHECK: bclrl 14, 10, 0
+0x4d 0xca 0x00 0x21
+
+# FIXME: decode as beqlrl- 0
+# CHECK: bclrl 14, 2, 0
+0x4d 0xc2 0x00 0x21
+
+# FIXME: decode as beqctrl- 2
+# CHECK: bcctrl 14, 10, 0
+0x4d 0xca 0x04 0x21
+
+# FIXME: decode as beqctrl- 0
+# CHECK: bcctrl 14, 2, 0
+0x4d 0xc2 0x04 0x21
+
+# FIXME: decode as bgelr 2
+# CHECK: bclr 4, 8, 0
+0x4c 0x88 0x00 0x20
+
+# FIXME: decode as bgelr 0
+# CHECK: bclr 4, 0, 0
+0x4c 0x80 0x00 0x20
+
+# FIXME: decode as bgectr 2
+# CHECK: bcctr 4, 8, 0
+0x4c 0x88 0x04 0x20
+
+# FIXME: decode as bgectr 0
+# CHECK: bcctr 4, 0, 0
+0x4c 0x80 0x04 0x20
+
+# FIXME: decode as bgelrl 2
+# CHECK: bclrl 4, 8, 0
+0x4c 0x88 0x00 0x21
+
+# FIXME: decode as bgelrl 0
+# CHECK: bclrl 4, 0, 0
+0x4c 0x80 0x00 0x21
+
+# FIXME: decode as bgectrl 2
+# CHECK: bcctrl 4, 8, 0
+0x4c 0x88 0x04 0x21
+
+# FIXME: decode as bgectrl 0
+# CHECK: bcctrl 4, 0, 0
+0x4c 0x80 0x04 0x21
+
+# FIXME: decode as bgelr+ 2
+# CHECK: bclr 7, 8, 0
+0x4c 0xe8 0x00 0x20
+
+# FIXME: decode as bgelr+ 0
+# CHECK: bclr 7, 0, 0
+0x4c 0xe0 0x00 0x20
+
+# FIXME: decode as bgectr+ 2
+# CHECK: bcctr 7, 8, 0
+0x4c 0xe8 0x04 0x20
+
+# FIXME: decode as bgectr+ 0
+# CHECK: bcctr 7, 0, 0
+0x4c 0xe0 0x04 0x20
+
+# FIXME: decode as bgelrl+ 2
+# CHECK: bclrl 7, 8, 0
+0x4c 0xe8 0x00 0x21
+
+# FIXME: decode as bgelrl+ 0
+# CHECK: bclrl 7, 0, 0
+0x4c 0xe0 0x00 0x21
+
+# FIXME: decode as bgectrl+ 2
+# CHECK: bcctrl 7, 8, 0
+0x4c 0xe8 0x04 0x21
+
+# FIXME: decode as bgectrl+ 0
+# CHECK: bcctrl 7, 0, 0
+0x4c 0xe0 0x04 0x21
+
+# FIXME: decode as bgelr- 2
+# CHECK: bclr 6, 8, 0
+0x4c 0xc8 0x00 0x20
+
+# FIXME: decode as bgelr- 0
+# CHECK: bclr 6, 0, 0
+0x4c 0xc0 0x00 0x20
+
+# FIXME: decode as bgectr- 2
+# CHECK: bcctr 6, 8, 0
+0x4c 0xc8 0x04 0x20
+
+# FIXME: decode as bgectr- 0
+# CHECK: bcctr 6, 0, 0
+0x4c 0xc0 0x04 0x20
+
+# FIXME: decode as bgelrl- 2
+# CHECK: bclrl 6, 8, 0
+0x4c 0xc8 0x00 0x21
+
+# FIXME: decode as bgelrl- 0
+# CHECK: bclrl 6, 0, 0
+0x4c 0xc0 0x00 0x21
+
+# FIXME: decode as bgectrl- 2
+# CHECK: bcctrl 6, 8, 0
+0x4c 0xc8 0x04 0x21
+
+# FIXME: decode as bgectrl- 0
+# CHECK: bcctrl 6, 0, 0
+0x4c 0xc0 0x04 0x21
+
+# FIXME: decode as bgtlr 2
+# CHECK: bclr 12, 9, 0
+0x4d 0x89 0x00 0x20
+
+# FIXME: decode as bgtlr 0
+# CHECK: bclr 12, 1, 0
+0x4d 0x81 0x00 0x20
+
+# FIXME: decode as bgtctr 2
+# CHECK: bcctr 12, 9, 0
+0x4d 0x89 0x04 0x20
+
+# FIXME: decode as bgtctr 0
+# CHECK: bcctr 12, 1, 0
+0x4d 0x81 0x04 0x20
+
+# FIXME: decode as bgtlrl 2
+# CHECK: bclrl 12, 9, 0
+0x4d 0x89 0x00 0x21
+
+# FIXME: decode as bgtlrl 0
+# CHECK: bclrl 12, 1, 0
+0x4d 0x81 0x00 0x21
+
+# FIXME: decode as bgtctrl 2
+# CHECK: bcctrl 12, 9, 0
+0x4d 0x89 0x04 0x21
+
+# FIXME: decode as bgtctrl 0
+# CHECK: bcctrl 12, 1, 0
+0x4d 0x81 0x04 0x21
+
+# FIXME: decode as bgtlr+ 2
+# CHECK: bclr 15, 9, 0
+0x4d 0xe9 0x00 0x20
+
+# FIXME: decode as bgtlr+ 0
+# CHECK: bclr 15, 1, 0
+0x4d 0xe1 0x00 0x20
+
+# FIXME: decode as bgtctr+ 2
+# CHECK: bcctr 15, 9, 0
+0x4d 0xe9 0x04 0x20
+
+# FIXME: decode as bgtctr+ 0
+# CHECK: bcctr 15, 1, 0
+0x4d 0xe1 0x04 0x20
+
+# FIXME: decode as bgtlrl+ 2
+# CHECK: bclrl 15, 9, 0
+0x4d 0xe9 0x00 0x21
+
+# FIXME: decode as bgtlrl+ 0
+# CHECK: bclrl 15, 1, 0
+0x4d 0xe1 0x00 0x21
+
+# FIXME: decode as bgtctrl+ 2
+# CHECK: bcctrl 15, 9, 0
+0x4d 0xe9 0x04 0x21
+
+# FIXME: decode as bgtctrl+ 0
+# CHECK: bcctrl 15, 1, 0
+0x4d 0xe1 0x04 0x21
+
+# FIXME: decode as bgtlr- 2
+# CHECK: bclr 14, 9, 0
+0x4d 0xc9 0x00 0x20
+
+# FIXME: decode as bgtlr- 0
+# CHECK: bclr 14, 1, 0
+0x4d 0xc1 0x00 0x20
+
+# FIXME: decode as bgtctr- 2
+# CHECK: bcctr 14, 9, 0
+0x4d 0xc9 0x04 0x20
+
+# FIXME: decode as bgtctr- 0
+# CHECK: bcctr 14, 1, 0
+0x4d 0xc1 0x04 0x20
+
+# FIXME: decode as bgtlrl- 2
+# CHECK: bclrl 14, 9, 0
+0x4d 0xc9 0x00 0x21
+
+# FIXME: decode as bgtlrl- 0
+# CHECK: bclrl 14, 1, 0
+0x4d 0xc1 0x00 0x21
+
+# FIXME: decode as bgtctrl- 2
+# CHECK: bcctrl 14, 9, 0
+0x4d 0xc9 0x04 0x21
+
+# FIXME: decode as bgtctrl- 0
+# CHECK: bcctrl 14, 1, 0
+0x4d 0xc1 0x04 0x21
+
+# FIXME: decode as bgelr 2
+# CHECK: bclr 4, 8, 0
+0x4c 0x88 0x00 0x20
+
+# FIXME: decode as bgelr 0
+# CHECK: bclr 4, 0, 0
+0x4c 0x80 0x00 0x20
+
+# FIXME: decode as bgectr 2
+# CHECK: bcctr 4, 8, 0
+0x4c 0x88 0x04 0x20
+
+# FIXME: decode as bgectr 0
+# CHECK: bcctr 4, 0, 0
+0x4c 0x80 0x04 0x20
+
+# FIXME: decode as bgelrl 2
+# CHECK: bclrl 4, 8, 0
+0x4c 0x88 0x00 0x21
+
+# FIXME: decode as bgelrl 0
+# CHECK: bclrl 4, 0, 0
+0x4c 0x80 0x00 0x21
+
+# FIXME: decode as bgectrl 2
+# CHECK: bcctrl 4, 8, 0
+0x4c 0x88 0x04 0x21
+
+# FIXME: decode as bgectrl 0
+# CHECK: bcctrl 4, 0, 0
+0x4c 0x80 0x04 0x21
+
+# FIXME: decode as bgelr+ 2
+# CHECK: bclr 7, 8, 0
+0x4c 0xe8 0x00 0x20
+
+# FIXME: decode as bgelr+ 0
+# CHECK: bclr 7, 0, 0
+0x4c 0xe0 0x00 0x20
+
+# FIXME: decode as bgectr+ 2
+# CHECK: bcctr 7, 8, 0
+0x4c 0xe8 0x04 0x20
+
+# FIXME: decode as bgectr+ 0
+# CHECK: bcctr 7, 0, 0
+0x4c 0xe0 0x04 0x20
+
+# FIXME: decode as bgelrl+ 2
+# CHECK: bclrl 7, 8, 0
+0x4c 0xe8 0x00 0x21
+
+# FIXME: decode as bgelrl+ 0
+# CHECK: bclrl 7, 0, 0
+0x4c 0xe0 0x00 0x21
+
+# FIXME: decode as bgectrl+ 2
+# CHECK: bcctrl 7, 8, 0
+0x4c 0xe8 0x04 0x21
+
+# FIXME: decode as bgectrl+ 0
+# CHECK: bcctrl 7, 0, 0
+0x4c 0xe0 0x04 0x21
+
+# FIXME: decode as bgelr- 2
+# CHECK: bclr 6, 8, 0
+0x4c 0xc8 0x00 0x20
+
+# FIXME: decode as bgelr- 0
+# CHECK: bclr 6, 0, 0
+0x4c 0xc0 0x00 0x20
+
+# FIXME: decode as bgectr- 2
+# CHECK: bcctr 6, 8, 0
+0x4c 0xc8 0x04 0x20
+
+# FIXME: decode as bgectr- 0
+# CHECK: bcctr 6, 0, 0
+0x4c 0xc0 0x04 0x20
+
+# FIXME: decode as bgelrl- 2
+# CHECK: bclrl 6, 8, 0
+0x4c 0xc8 0x00 0x21
+
+# FIXME: decode as bgelrl- 0
+# CHECK: bclrl 6, 0, 0
+0x4c 0xc0 0x00 0x21
+
+# FIXME: decode as bgectrl- 2
+# CHECK: bcctrl 6, 8, 0
+0x4c 0xc8 0x04 0x21
+
+# FIXME: decode as bgectrl- 0
+# CHECK: bcctrl 6, 0, 0
+0x4c 0xc0 0x04 0x21
+
+# FIXME: decode as bnelr 2
+# CHECK: bclr 4, 10, 0
+0x4c 0x8a 0x00 0x20
+
+# FIXME: decode as bnelr 0
+# CHECK: bclr 4, 2, 0
+0x4c 0x82 0x00 0x20
+
+# FIXME: decode as bnectr 2
+# CHECK: bcctr 4, 10, 0
+0x4c 0x8a 0x04 0x20
+
+# FIXME: decode as bnectr 0
+# CHECK: bcctr 4, 2, 0
+0x4c 0x82 0x04 0x20
+
+# FIXME: decode as bnelrl 2
+# CHECK: bclrl 4, 10, 0
+0x4c 0x8a 0x00 0x21
+
+# FIXME: decode as bnelrl 0
+# CHECK: bclrl 4, 2, 0
+0x4c 0x82 0x00 0x21
+
+# FIXME: decode as bnectrl 2
+# CHECK: bcctrl 4, 10, 0
+0x4c 0x8a 0x04 0x21
+
+# FIXME: decode as bnectrl 0
+# CHECK: bcctrl 4, 2, 0
+0x4c 0x82 0x04 0x21
+
+# FIXME: decode as bnelr+ 2
+# CHECK: bclr 7, 10, 0
+0x4c 0xea 0x00 0x20
+
+# FIXME: decode as bnelr+ 0
+# CHECK: bclr 7, 2, 0
+0x4c 0xe2 0x00 0x20
+
+# FIXME: decode as bnectr+ 2
+# CHECK: bcctr 7, 10, 0
+0x4c 0xea 0x04 0x20
+
+# FIXME: decode as bnectr+ 0
+# CHECK: bcctr 7, 2, 0
+0x4c 0xe2 0x04 0x20
+
+# FIXME: decode as bnelrl+ 2
+# CHECK: bclrl 7, 10, 0
+0x4c 0xea 0x00 0x21
+
+# FIXME: decode as bnelrl+ 0
+# CHECK: bclrl 7, 2, 0
+0x4c 0xe2 0x00 0x21
+
+# FIXME: decode as bnectrl+ 2
+# CHECK: bcctrl 7, 10, 0
+0x4c 0xea 0x04 0x21
+
+# FIXME: decode as bnectrl+ 0
+# CHECK: bcctrl 7, 2, 0
+0x4c 0xe2 0x04 0x21
+
+# FIXME: decode as bnelr- 2
+# CHECK: bclr 6, 10, 0
+0x4c 0xca 0x00 0x20
+
+# FIXME: decode as bnelr- 0
+# CHECK: bclr 6, 2, 0
+0x4c 0xc2 0x00 0x20
+
+# FIXME: decode as bnectr- 2
+# CHECK: bcctr 6, 10, 0
+0x4c 0xca 0x04 0x20
+
+# FIXME: decode as bnectr- 0
+# CHECK: bcctr 6, 2, 0
+0x4c 0xc2 0x04 0x20
+
+# FIXME: decode as bnelrl- 2
+# CHECK: bclrl 6, 10, 0
+0x4c 0xca 0x00 0x21
+
+# FIXME: decode as bnelrl- 0
+# CHECK: bclrl 6, 2, 0
+0x4c 0xc2 0x00 0x21
+
+# FIXME: decode as bnectrl- 2
+# CHECK: bcctrl 6, 10, 0
+0x4c 0xca 0x04 0x21
+
+# FIXME: decode as bnectrl- 0
+# CHECK: bcctrl 6, 2, 0
+0x4c 0xc2 0x04 0x21
+
+# FIXME: decode as blelr 2
+# CHECK: bclr 4, 9, 0
+0x4c 0x89 0x00 0x20
+
+# FIXME: decode as blelr 0
+# CHECK: bclr 4, 1, 0
+0x4c 0x81 0x00 0x20
+
+# FIXME: decode as blectr 2
+# CHECK: bcctr 4, 9, 0
+0x4c 0x89 0x04 0x20
+
+# FIXME: decode as blectr 0
+# CHECK: bcctr 4, 1, 0
+0x4c 0x81 0x04 0x20
+
+# FIXME: decode as blelrl 2
+# CHECK: bclrl 4, 9, 0
+0x4c 0x89 0x00 0x21
+
+# FIXME: decode as blelrl 0
+# CHECK: bclrl 4, 1, 0
+0x4c 0x81 0x00 0x21
+
+# FIXME: decode as blectrl 2
+# CHECK: bcctrl 4, 9, 0
+0x4c 0x89 0x04 0x21
+
+# FIXME: decode as blectrl 0
+# CHECK: bcctrl 4, 1, 0
+0x4c 0x81 0x04 0x21
+
+# FIXME: decode as blelr+ 2
+# CHECK: bclr 7, 9, 0
+0x4c 0xe9 0x00 0x20
+
+# FIXME: decode as blelr+ 0
+# CHECK: bclr 7, 1, 0
+0x4c 0xe1 0x00 0x20
+
+# FIXME: decode as blectr+ 2
+# CHECK: bcctr 7, 9, 0
+0x4c 0xe9 0x04 0x20
+
+# FIXME: decode as blectr+ 0
+# CHECK: bcctr 7, 1, 0
+0x4c 0xe1 0x04 0x20
+
+# FIXME: decode as blelrl+ 2
+# CHECK: bclrl 7, 9, 0
+0x4c 0xe9 0x00 0x21
+
+# FIXME: decode as blelrl+ 0
+# CHECK: bclrl 7, 1, 0
+0x4c 0xe1 0x00 0x21
+
+# FIXME: decode as blectrl+ 2
+# CHECK: bcctrl 7, 9, 0
+0x4c 0xe9 0x04 0x21
+
+# FIXME: decode as blectrl+ 0
+# CHECK: bcctrl 7, 1, 0
+0x4c 0xe1 0x04 0x21
+
+# FIXME: decode as blelr- 2
+# CHECK: bclr 6, 9, 0
+0x4c 0xc9 0x00 0x20
+
+# FIXME: decode as blelr- 0
+# CHECK: bclr 6, 1, 0
+0x4c 0xc1 0x00 0x20
+
+# FIXME: decode as blectr- 2
+# CHECK: bcctr 6, 9, 0
+0x4c 0xc9 0x04 0x20
+
+# FIXME: decode as blectr- 0
+# CHECK: bcctr 6, 1, 0
+0x4c 0xc1 0x04 0x20
+
+# FIXME: decode as blelrl- 2
+# CHECK: bclrl 6, 9, 0
+0x4c 0xc9 0x00 0x21
+
+# FIXME: decode as blelrl- 0
+# CHECK: bclrl 6, 1, 0
+0x4c 0xc1 0x00 0x21
+
+# FIXME: decode as blectrl- 2
+# CHECK: bcctrl 6, 9, 0
+0x4c 0xc9 0x04 0x21
+
+# FIXME: decode as blectrl- 0
+# CHECK: bcctrl 6, 1, 0
+0x4c 0xc1 0x04 0x21
+
+# FIXME: decode as bunlr 2
+# CHECK: bclr 12, 11, 0
+0x4d 0x8b 0x00 0x20
+
+# FIXME: decode as bunlr 0
+# CHECK: bclr 12, 3, 0
+0x4d 0x83 0x00 0x20
+
+# FIXME: decode as bunctr 2
+# CHECK: bcctr 12, 11, 0
+0x4d 0x8b 0x04 0x20
+
+# FIXME: decode as bunctr 0
+# CHECK: bcctr 12, 3, 0
+0x4d 0x83 0x04 0x20
+
+# FIXME: decode as bunlrl 2
+# CHECK: bclrl 12, 11, 0
+0x4d 0x8b 0x00 0x21
+
+# FIXME: decode as bunlrl 0
+# CHECK: bclrl 12, 3, 0
+0x4d 0x83 0x00 0x21
+
+# FIXME: decode as bunctrl 2
+# CHECK: bcctrl 12, 11, 0
+0x4d 0x8b 0x04 0x21
+
+# FIXME: decode as bunctrl 0
+# CHECK: bcctrl 12, 3, 0
+0x4d 0x83 0x04 0x21
+
+# FIXME: decode as bunlr+ 2
+# CHECK: bclr 15, 11, 0
+0x4d 0xeb 0x00 0x20
+
+# FIXME: decode as bunlr+ 0
+# CHECK: bclr 15, 3, 0
+0x4d 0xe3 0x00 0x20
+
+# FIXME: decode as bunctr+ 2
+# CHECK: bcctr 15, 11, 0
+0x4d 0xeb 0x04 0x20
+
+# FIXME: decode as bunctr+ 0
+# CHECK: bcctr 15, 3, 0
+0x4d 0xe3 0x04 0x20
+
+# FIXME: decode as bunlrl+ 2
+# CHECK: bclrl 15, 11, 0
+0x4d 0xeb 0x00 0x21
+
+# FIXME: decode as bunlrl+ 0
+# CHECK: bclrl 15, 3, 0
+0x4d 0xe3 0x00 0x21
+
+# FIXME: decode as bunctrl+ 2
+# CHECK: bcctrl 15, 11, 0
+0x4d 0xeb 0x04 0x21
+
+# FIXME: decode as bunctrl+ 0
+# CHECK: bcctrl 15, 3, 0
+0x4d 0xe3 0x04 0x21
+
+# FIXME: decode as bunlr- 2
+# CHECK: bclr 14, 11, 0
+0x4d 0xcb 0x00 0x20
+
+# FIXME: decode as bunlr- 0
+# CHECK: bclr 14, 3, 0
+0x4d 0xc3 0x00 0x20
+
+# FIXME: decode as bunctr- 2
+# CHECK: bcctr 14, 11, 0
+0x4d 0xcb 0x04 0x20
+
+# FIXME: decode as bunctr- 0
+# CHECK: bcctr 14, 3, 0
+0x4d 0xc3 0x04 0x20
+
+# FIXME: decode as bunlrl- 2
+# CHECK: bclrl 14, 11, 0
+0x4d 0xcb 0x00 0x21
+
+# FIXME: decode as bunlrl- 0
+# CHECK: bclrl 14, 3, 0
+0x4d 0xc3 0x00 0x21
+
+# FIXME: decode as bunctrl- 2
+# CHECK: bcctrl 14, 11, 0
+0x4d 0xcb 0x04 0x21
+
+# FIXME: decode as bunctrl- 0
+# CHECK: bcctrl 14, 3, 0
+0x4d 0xc3 0x04 0x21
+
+# FIXME: decode as bnulr 2
+# CHECK: bclr 4, 11, 0
+0x4c 0x8b 0x00 0x20
+
+# FIXME: decode as bnulr 0
+# CHECK: bclr 4, 3, 0
+0x4c 0x83 0x00 0x20
+
+# FIXME: decode as bnuctr 2
+# CHECK: bcctr 4, 11, 0
+0x4c 0x8b 0x04 0x20
+
+# FIXME: decode as bnuctr 0
+# CHECK: bcctr 4, 3, 0
+0x4c 0x83 0x04 0x20
+
+# FIXME: decode as bnulrl 2
+# CHECK: bclrl 4, 11, 0
+0x4c 0x8b 0x00 0x21
+
+# FIXME: decode as bnulrl 0
+# CHECK: bclrl 4, 3, 0
+0x4c 0x83 0x00 0x21
+
+# FIXME: decode as bnuctrl 2
+# CHECK: bcctrl 4, 11, 0
+0x4c 0x8b 0x04 0x21
+
+# FIXME: decode as bnuctrl 0
+# CHECK: bcctrl 4, 3, 0
+0x4c 0x83 0x04 0x21
+
+# FIXME: decode as bnulr+ 2
+# CHECK: bclr 7, 11, 0
+0x4c 0xeb 0x00 0x20
+
+# FIXME: decode as bnulr+ 0
+# CHECK: bclr 7, 3, 0
+0x4c 0xe3 0x00 0x20
+
+# FIXME: decode as bnuctr+ 2
+# CHECK: bcctr 7, 11, 0
+0x4c 0xeb 0x04 0x20
+
+# FIXME: decode as bnuctr+ 0
+# CHECK: bcctr 7, 3, 0
+0x4c 0xe3 0x04 0x20
+
+# FIXME: decode as bnulrl+ 2
+# CHECK: bclrl 7, 11, 0
+0x4c 0xeb 0x00 0x21
+
+# FIXME: decode as bnulrl+ 0
+# CHECK: bclrl 7, 3, 0
+0x4c 0xe3 0x00 0x21
+
+# FIXME: decode as bnuctrl+ 2
+# CHECK: bcctrl 7, 11, 0
+0x4c 0xeb 0x04 0x21
+
+# FIXME: decode as bnuctrl+ 0
+# CHECK: bcctrl 7, 3, 0
+0x4c 0xe3 0x04 0x21
+
+# FIXME: decode as bnulr- 2
+# CHECK: bclr 6, 11, 0
+0x4c 0xcb 0x00 0x20
+
+# FIXME: decode as bnulr- 0
+# CHECK: bclr 6, 3, 0
+0x4c 0xc3 0x00 0x20
+
+# FIXME: decode as bnuctr- 2
+# CHECK: bcctr 6, 11, 0
+0x4c 0xcb 0x04 0x20
+
+# FIXME: decode as bnuctr- 0
+# CHECK: bcctr 6, 3, 0
+0x4c 0xc3 0x04 0x20
+
+# FIXME: decode as bnulrl- 2
+# CHECK: bclrl 6, 11, 0
+0x4c 0xcb 0x00 0x21
+
+# FIXME: decode as bnulrl- 0
+# CHECK: bclrl 6, 3, 0
+0x4c 0xc3 0x00 0x21
+
+# FIXME: decode as bnuctrl- 2
+# CHECK: bcctrl 6, 11, 0
+0x4c 0xcb 0x04 0x21
+
+# FIXME: decode as bnuctrl- 0
+# CHECK: bcctrl 6, 3, 0
+0x4c 0xc3 0x04 0x21
+
+# FIXME: decode as bunlr 2
+# CHECK: bclr 12, 11, 0
+0x4d 0x8b 0x00 0x20
+
+# FIXME: decode as bunlr 0
+# CHECK: bclr 12, 3, 0
+0x4d 0x83 0x00 0x20
+
+# FIXME: decode as bunctr 2
+# CHECK: bcctr 12, 11, 0
+0x4d 0x8b 0x04 0x20
+
+# FIXME: decode as bunctr 0
+# CHECK: bcctr 12, 3, 0
+0x4d 0x83 0x04 0x20
+
+# FIXME: decode as bunlrl 2
+# CHECK: bclrl 12, 11, 0
+0x4d 0x8b 0x00 0x21
+
+# FIXME: decode as bunlrl 0
+# CHECK: bclrl 12, 3, 0
+0x4d 0x83 0x00 0x21
+
+# FIXME: decode as bunctrl 2
+# CHECK: bcctrl 12, 11, 0
+0x4d 0x8b 0x04 0x21
+
+# FIXME: decode as bunctrl 0
+# CHECK: bcctrl 12, 3, 0
+0x4d 0x83 0x04 0x21
+
+# FIXME: decode as bunlr+ 2
+# CHECK: bclr 15, 11, 0
+0x4d 0xeb 0x00 0x20
+
+# FIXME: decode as bunlr+ 0
+# CHECK: bclr 15, 3, 0
+0x4d 0xe3 0x00 0x20
+
+# FIXME: decode as bunctr+ 2
+# CHECK: bcctr 15, 11, 0
+0x4d 0xeb 0x04 0x20
+
+# FIXME: decode as bunctr+ 0
+# CHECK: bcctr 15, 3, 0
+0x4d 0xe3 0x04 0x20
+
+# FIXME: decode as bunlrl+ 2
+# CHECK: bclrl 15, 11, 0
+0x4d 0xeb 0x00 0x21
+
+# FIXME: decode as bunlrl+ 0
+# CHECK: bclrl 15, 3, 0
+0x4d 0xe3 0x00 0x21
+
+# FIXME: decode as bunctrl+ 2
+# CHECK: bcctrl 15, 11, 0
+0x4d 0xeb 0x04 0x21
+
+# FIXME: decode as bunctrl+ 0
+# CHECK: bcctrl 15, 3, 0
+0x4d 0xe3 0x04 0x21
+
+# FIXME: decode as bunlr- 2
+# CHECK: bclr 14, 11, 0
+0x4d 0xcb 0x00 0x20
+
+# FIXME: decode as bunlr- 0
+# CHECK: bclr 14, 3, 0
+0x4d 0xc3 0x00 0x20
+
+# FIXME: decode as bunctr- 2
+# CHECK: bcctr 14, 11, 0
+0x4d 0xcb 0x04 0x20
+
+# FIXME: decode as bunctr- 0
+# CHECK: bcctr 14, 3, 0
+0x4d 0xc3 0x04 0x20
+
+# FIXME: decode as bunlrl- 2
+# CHECK: bclrl 14, 11, 0
+0x4d 0xcb 0x00 0x21
+
+# FIXME: decode as bunlrl- 0
+# CHECK: bclrl 14, 3, 0
+0x4d 0xc3 0x00 0x21
+
+# FIXME: decode as bunctrl- 2
+# CHECK: bcctrl 14, 11, 0
+0x4d 0xcb 0x04 0x21
+
+# FIXME: decode as bunctrl- 0
+# CHECK: bcctrl 14, 3, 0
+0x4d 0xc3 0x04 0x21
+
+# FIXME: decode as bnulr 2
+# CHECK: bclr 4, 11, 0
+0x4c 0x8b 0x00 0x20
+
+# FIXME: decode as bnulr 0
+# CHECK: bclr 4, 3, 0
+0x4c 0x83 0x00 0x20
+
+# FIXME: decode as bnuctr 2
+# CHECK: bcctr 4, 11, 0
+0x4c 0x8b 0x04 0x20
+
+# FIXME: decode as bnuctr 0
+# CHECK: bcctr 4, 3, 0
+0x4c 0x83 0x04 0x20
+
+# FIXME: decode as bnulrl 2
+# CHECK: bclrl 4, 11, 0
+0x4c 0x8b 0x00 0x21
+
+# FIXME: decode as bnulrl 0
+# CHECK: bclrl 4, 3, 0
+0x4c 0x83 0x00 0x21
+
+# FIXME: decode as bnuctrl 2
+# CHECK: bcctrl 4, 11, 0
+0x4c 0x8b 0x04 0x21
+
+# FIXME: decode as bnuctrl 0
+# CHECK: bcctrl 4, 3, 0
+0x4c 0x83 0x04 0x21
+
+# FIXME: decode as bnulr+ 2
+# CHECK: bclr 7, 11, 0
+0x4c 0xeb 0x00 0x20
+
+# FIXME: decode as bnulr+ 0
+# CHECK: bclr 7, 3, 0
+0x4c 0xe3 0x00 0x20
+
+# FIXME: decode as bnuctr+ 2
+# CHECK: bcctr 7, 11, 0
+0x4c 0xeb 0x04 0x20
+
+# FIXME: decode as bnuctr+ 0
+# CHECK: bcctr 7, 3, 0
+0x4c 0xe3 0x04 0x20
+
+# FIXME: decode as bnulrl+ 2
+# CHECK: bclrl 7, 11, 0
+0x4c 0xeb 0x00 0x21
+
+# FIXME: decode as bnulrl+ 0
+# CHECK: bclrl 7, 3, 0
+0x4c 0xe3 0x00 0x21
+
+# FIXME: decode as bnuctrl+ 2
+# CHECK: bcctrl 7, 11, 0
+0x4c 0xeb 0x04 0x21
+
+# FIXME: decode as bnuctrl+ 0
+# CHECK: bcctrl 7, 3, 0
+0x4c 0xe3 0x04 0x21
+
+# FIXME: decode as bnulr- 2
+# CHECK: bclr 6, 11, 0
+0x4c 0xcb 0x00 0x20
+
+# FIXME: decode as bnulr- 0
+# CHECK: bclr 6, 3, 0
+0x4c 0xc3 0x00 0x20
+
+# FIXME: decode as bnuctr- 2
+# CHECK: bcctr 6, 11, 0
+0x4c 0xcb 0x04 0x20
+
+# FIXME: decode as bnuctr- 0
+# CHECK: bcctr 6, 3, 0
+0x4c 0xc3 0x04 0x20
+
+# FIXME: decode as bnulrl- 2
+# CHECK: bclrl 6, 11, 0
+0x4c 0xcb 0x00 0x21
+
+# FIXME: decode as bnulrl- 0
+# CHECK: bclrl 6, 3, 0
+0x4c 0xc3 0x00 0x21
+
+# FIXME: decode as bnuctrl- 2
+# CHECK: bcctrl 6, 11, 0
+0x4c 0xcb 0x04 0x21
+
+# FIXME: decode as bnuctrl- 0
+# CHECK: bcctrl 6, 3, 0
+0x4c 0xc3 0x04 0x21
+
+# FIXME: test bc 12, 2, target
+# FIXME: test bca 12, 2, target
+# FIXME: test bcl 12, 2, target
+# FIXME: test bcla 12, 2, target
+# FIXME: test bc 15, 2, target
+# FIXME: test bca 15, 2, target
+# FIXME: test bcl 15, 2, target
+# FIXME: test bcla 15, 2, target
+# FIXME: test bc 14, 2, target
+# FIXME: test bca 14, 2, target
+# FIXME: test bcl 14, 2, target
+# FIXME: test bcla 14, 2, target
+# FIXME: test bc 4, 2, target
+# FIXME: test bca 4, 2, target
+# FIXME: test bcl 4, 2, target
+# FIXME: test bcla 4, 2, target
+# FIXME: test bc 7, 2, target
+# FIXME: test bca 7, 2, target
+# FIXME: test bcl 7, 2, target
+# FIXME: test bcla 7, 2, target
+# FIXME: test bc 6, 2, target
+# FIXME: test bca 6, 2, target
+# FIXME: test bcl 6, 2, target
+# FIXME: test bcla 6, 2, target
+# FIXME: test bdnz target
+# FIXME: test bdnza target
+# FIXME: test bdnzl target
+# FIXME: test bdnzla target
+# FIXME: test bdnz+ target
+# FIXME: test bdnza+ target
+# FIXME: test bdnzl+ target
+# FIXME: test bdnzla+ target
+# FIXME: test bdnz- target
+# FIXME: test bdnza- target
+# FIXME: test bdnzl- target
+# FIXME: test bdnzla- target
+# FIXME: test bc 8, 2, target
+# FIXME: test bca 8, 2, target
+# FIXME: test bcl 8, 2, target
+# FIXME: test bcla 8, 2, target
+# FIXME: test bc 0, 2, target
+# FIXME: test bca 0, 2, target
+# FIXME: test bcl 0, 2, target
+# FIXME: test bcla 0, 2, target
+# FIXME: test bdz target
+# FIXME: test bdza target
+# FIXME: test bdzl target
+# FIXME: test bdzla target
+# FIXME: test bdz+ target
+# FIXME: test bdza+ target
+# FIXME: test bdzl+ target
+# FIXME: test bdzla+ target
+# FIXME: test bdz- target
+# FIXME: test bdza- target
+# FIXME: test bdzl- target
+# FIXME: test bdzla- target
+# FIXME: test bc 10, 2, target
+# FIXME: test bca 10, 2, target
+# FIXME: test bcl 10, 2, target
+# FIXME: test bcla 10, 2, target
+# FIXME: test bc 2, 2, target
+# FIXME: test bca 2, 2, target
+# FIXME: test bcl 2, 2, target
+# FIXME: test bcla 2, 2, target
+# FIXME: test blt 2, target
+# FIXME: test blt 0, target
+# FIXME: test blta 2, target
+# FIXME: test blta 0, target
+# FIXME: test bltl 2, target
+# FIXME: test bltl 0, target
+# FIXME: test bltla 2, target
+# FIXME: test bltla 0, target
+# FIXME: test blt+ 2, target
+# FIXME: test blt+ 0, target
+# FIXME: test blta+ 2, target
+# FIXME: test blta+ 0, target
+# FIXME: test bltl+ 2, target
+# FIXME: test bltl+ 0, target
+# FIXME: test bltla+ 2, target
+# FIXME: test bltla+ 0, target
+# FIXME: test blt- 2, target
+# FIXME: test blt- 0, target
+# FIXME: test blta- 2, target
+# FIXME: test blta- 0, target
+# FIXME: test bltl- 2, target
+# FIXME: test bltl- 0, target
+# FIXME: test bltla- 2, target
+# FIXME: test bltla- 0, target
+# FIXME: test ble 2, target
+# FIXME: test ble 0, target
+# FIXME: test blea 2, target
+# FIXME: test blea 0, target
+# FIXME: test blel 2, target
+# FIXME: test blel 0, target
+# FIXME: test blela 2, target
+# FIXME: test blela 0, target
+# FIXME: test ble+ 2, target
+# FIXME: test ble+ 0, target
+# FIXME: test blea+ 2, target
+# FIXME: test blea+ 0, target
+# FIXME: test blel+ 2, target
+# FIXME: test blel+ 0, target
+# FIXME: test blela+ 2, target
+# FIXME: test blela+ 0, target
+# FIXME: test ble- 2, target
+# FIXME: test ble- 0, target
+# FIXME: test blea- 2, target
+# FIXME: test blea- 0, target
+# FIXME: test blel- 2, target
+# FIXME: test blel- 0, target
+# FIXME: test blela- 2, target
+# FIXME: test blela- 0, target
+# FIXME: test beq 2, target
+# FIXME: test beq 0, target
+# FIXME: test beqa 2, target
+# FIXME: test beqa 0, target
+# FIXME: test beql 2, target
+# FIXME: test beql 0, target
+# FIXME: test beqla 2, target
+# FIXME: test beqla 0, target
+# FIXME: test beq+ 2, target
+# FIXME: test beq+ 0, target
+# FIXME: test beqa+ 2, target
+# FIXME: test beqa+ 0, target
+# FIXME: test beql+ 2, target
+# FIXME: test beql+ 0, target
+# FIXME: test beqla+ 2, target
+# FIXME: test beqla+ 0, target
+# FIXME: test beq- 2, target
+# FIXME: test beq- 0, target
+# FIXME: test beqa- 2, target
+# FIXME: test beqa- 0, target
+# FIXME: test beql- 2, target
+# FIXME: test beql- 0, target
+# FIXME: test beqla- 2, target
+# FIXME: test beqla- 0, target
+# FIXME: test bge 2, target
+# FIXME: test bge 0, target
+# FIXME: test bgea 2, target
+# FIXME: test bgea 0, target
+# FIXME: test bgel 2, target
+# FIXME: test bgel 0, target
+# FIXME: test bgela 2, target
+# FIXME: test bgela 0, target
+# FIXME: test bge+ 2, target
+# FIXME: test bge+ 0, target
+# FIXME: test bgea+ 2, target
+# FIXME: test bgea+ 0, target
+# FIXME: test bgel+ 2, target
+# FIXME: test bgel+ 0, target
+# FIXME: test bgela+ 2, target
+# FIXME: test bgela+ 0, target
+# FIXME: test bge- 2, target
+# FIXME: test bge- 0, target
+# FIXME: test bgea- 2, target
+# FIXME: test bgea- 0, target
+# FIXME: test bgel- 2, target
+# FIXME: test bgel- 0, target
+# FIXME: test bgela- 2, target
+# FIXME: test bgela- 0, target
+# FIXME: test bgt 2, target
+# FIXME: test bgt 0, target
+# FIXME: test bgta 2, target
+# FIXME: test bgta 0, target
+# FIXME: test bgtl 2, target
+# FIXME: test bgtl 0, target
+# FIXME: test bgtla 2, target
+# FIXME: test bgtla 0, target
+# FIXME: test bgt+ 2, target
+# FIXME: test bgt+ 0, target
+# FIXME: test bgta+ 2, target
+# FIXME: test bgta+ 0, target
+# FIXME: test bgtl+ 2, target
+# FIXME: test bgtl+ 0, target
+# FIXME: test bgtla+ 2, target
+# FIXME: test bgtla+ 0, target
+# FIXME: test bgt- 2, target
+# FIXME: test bgt- 0, target
+# FIXME: test bgta- 2, target
+# FIXME: test bgta- 0, target
+# FIXME: test bgtl- 2, target
+# FIXME: test bgtl- 0, target
+# FIXME: test bgtla- 2, target
+# FIXME: test bgtla- 0, target
+# FIXME: test bge 2, target
+# FIXME: test bge 0, target
+# FIXME: test bgea 2, target
+# FIXME: test bgea 0, target
+# FIXME: test bgel 2, target
+# FIXME: test bgel 0, target
+# FIXME: test bgela 2, target
+# FIXME: test bgela 0, target
+# FIXME: test bge+ 2, target
+# FIXME: test bge+ 0, target
+# FIXME: test bgea+ 2, target
+# FIXME: test bgea+ 0, target
+# FIXME: test bgel+ 2, target
+# FIXME: test bgel+ 0, target
+# FIXME: test bgela+ 2, target
+# FIXME: test bgela+ 0, target
+# FIXME: test bge- 2, target
+# FIXME: test bge- 0, target
+# FIXME: test bgea- 2, target
+# FIXME: test bgea- 0, target
+# FIXME: test bgel- 2, target
+# FIXME: test bgel- 0, target
+# FIXME: test bgela- 2, target
+# FIXME: test bgela- 0, target
+# FIXME: test bne 2, target
+# FIXME: test bne 0, target
+# FIXME: test bnea 2, target
+# FIXME: test bnea 0, target
+# FIXME: test bnel 2, target
+# FIXME: test bnel 0, target
+# FIXME: test bnela 2, target
+# FIXME: test bnela 0, target
+# FIXME: test bne+ 2, target
+# FIXME: test bne+ 0, target
+# FIXME: test bnea+ 2, target
+# FIXME: test bnea+ 0, target
+# FIXME: test bnel+ 2, target
+# FIXME: test bnel+ 0, target
+# FIXME: test bnela+ 2, target
+# FIXME: test bnela+ 0, target
+# FIXME: test bne- 2, target
+# FIXME: test bne- 0, target
+# FIXME: test bnea- 2, target
+# FIXME: test bnea- 0, target
+# FIXME: test bnel- 2, target
+# FIXME: test bnel- 0, target
+# FIXME: test bnela- 2, target
+# FIXME: test bnela- 0, target
+# FIXME: test ble 2, target
+# FIXME: test ble 0, target
+# FIXME: test blea 2, target
+# FIXME: test blea 0, target
+# FIXME: test blel 2, target
+# FIXME: test blel 0, target
+# FIXME: test blela 2, target
+# FIXME: test blela 0, target
+# FIXME: test ble+ 2, target
+# FIXME: test ble+ 0, target
+# FIXME: test blea+ 2, target
+# FIXME: test blea+ 0, target
+# FIXME: test blel+ 2, target
+# FIXME: test blel+ 0, target
+# FIXME: test blela+ 2, target
+# FIXME: test blela+ 0, target
+# FIXME: test ble- 2, target
+# FIXME: test ble- 0, target
+# FIXME: test blea- 2, target
+# FIXME: test blea- 0, target
+# FIXME: test blel- 2, target
+# FIXME: test blel- 0, target
+# FIXME: test blela- 2, target
+# FIXME: test blela- 0, target
+# FIXME: test bun 2, target
+# FIXME: test bun 0, target
+# FIXME: test buna 2, target
+# FIXME: test buna 0, target
+# FIXME: test bunl 2, target
+# FIXME: test bunl 0, target
+# FIXME: test bunla 2, target
+# FIXME: test bunla 0, target
+# FIXME: test bun+ 2, target
+# FIXME: test bun+ 0, target
+# FIXME: test buna+ 2, target
+# FIXME: test buna+ 0, target
+# FIXME: test bunl+ 2, target
+# FIXME: test bunl+ 0, target
+# FIXME: test bunla+ 2, target
+# FIXME: test bunla+ 0, target
+# FIXME: test bun- 2, target
+# FIXME: test bun- 0, target
+# FIXME: test buna- 2, target
+# FIXME: test buna- 0, target
+# FIXME: test bunl- 2, target
+# FIXME: test bunl- 0, target
+# FIXME: test bunla- 2, target
+# FIXME: test bunla- 0, target
+# FIXME: test bnu 2, target
+# FIXME: test bnu 0, target
+# FIXME: test bnua 2, target
+# FIXME: test bnua 0, target
+# FIXME: test bnul 2, target
+# FIXME: test bnul 0, target
+# FIXME: test bnula 2, target
+# FIXME: test bnula 0, target
+# FIXME: test bnu+ 2, target
+# FIXME: test bnu+ 0, target
+# FIXME: test bnua+ 2, target
+# FIXME: test bnua+ 0, target
+# FIXME: test bnul+ 2, target
+# FIXME: test bnul+ 0, target
+# FIXME: test bnula+ 2, target
+# FIXME: test bnula+ 0, target
+# FIXME: test bnu- 2, target
+# FIXME: test bnu- 0, target
+# FIXME: test bnua- 2, target
+# FIXME: test bnua- 0, target
+# FIXME: test bnul- 2, target
+# FIXME: test bnul- 0, target
+# FIXME: test bnula- 2, target
+# FIXME: test bnula- 0, target
+# FIXME: test bun 2, target
+# FIXME: test bun 0, target
+# FIXME: test buna 2, target
+# FIXME: test buna 0, target
+# FIXME: test bunl 2, target
+# FIXME: test bunl 0, target
+# FIXME: test bunla 2, target
+# FIXME: test bunla 0, target
+# FIXME: test bun+ 2, target
+# FIXME: test bun+ 0, target
+# FIXME: test buna+ 2, target
+# FIXME: test buna+ 0, target
+# FIXME: test bunl+ 2, target
+# FIXME: test bunl+ 0, target
+# FIXME: test bunla+ 2, target
+# FIXME: test bunla+ 0, target
+# FIXME: test bun- 2, target
+# FIXME: test bun- 0, target
+# FIXME: test buna- 2, target
+# FIXME: test buna- 0, target
+# FIXME: test bunl- 2, target
+# FIXME: test bunl- 0, target
+# FIXME: test bunla- 2, target
+# FIXME: test bunla- 0, target
+# FIXME: test bnu 2, target
+# FIXME: test bnu 0, target
+# FIXME: test bnua 2, target
+# FIXME: test bnua 0, target
+# FIXME: test bnul 2, target
+# FIXME: test bnul 0, target
+# FIXME: test bnula 2, target
+# FIXME: test bnula 0, target
+# FIXME: test bnu+ 2, target
+# FIXME: test bnu+ 0, target
+# FIXME: test bnua+ 2, target
+# FIXME: test bnua+ 0, target
+# FIXME: test bnul+ 2, target
+# FIXME: test bnul+ 0, target
+# FIXME: test bnula+ 2, target
+# FIXME: test bnula+ 0, target
+# FIXME: test bnu- 2, target
+# FIXME: test bnu- 0, target
+# FIXME: test bnua- 2, target
+# FIXME: test bnua- 0, target
+# FIXME: test bnul- 2, target
+# FIXME: test bnul- 0, target
+# FIXME: test bnula- 2, target
+# FIXME: test bnula- 0, target
+
+# CHECK: creqv 2, 2, 2
+0x4c 0x42 0x12 0x42
+
+# CHECK: crxor 2, 2, 2
+0x4c 0x42 0x11 0x82
+
+# CHECK: cror 2, 3, 3
+0x4c 0x43 0x1b 0x82
+
+# CHECK: crnor 2, 3, 3
+0x4c 0x43 0x18 0x42
+
+# CHECK: addi 2, 3, -128
+0x38 0x43 0xff 0x80
+
+# CHECK: addis 2, 3, -128
+0x3c 0x43 0xff 0x80
+
+# CHECK: addic 2, 3, -128
+0x30 0x43 0xff 0x80
+
+# CHECK: addic. 2, 3, -128
+0x34 0x43 0xff 0x80
+
+# CHECK: subf 2, 4, 3
+0x7c 0x44 0x18 0x50
+
+# CHECK: subf. 2, 4, 3
+0x7c 0x44 0x18 0x51
+
+# CHECK: subfc 2, 4, 3
+0x7c 0x44 0x18 0x10
+
+# CHECK: subfc. 2, 4, 3
+0x7c 0x44 0x18 0x11
+
+# CHECK: cmpdi 2, 3, 128
+0x2d 0x23 0x00 0x80
+
+# CHECK: cmpdi 0, 3, 128
+0x2c 0x23 0x00 0x80
+
+# CHECK: cmpd 2, 3, 4
+0x7d 0x23 0x20 0x00
+
+# CHECK: cmpd 0, 3, 4
+0x7c 0x23 0x20 0x00
+
+# CHECK: cmpldi 2, 3, 128
+0x29 0x23 0x00 0x80
+
+# CHECK: cmpldi 0, 3, 128
+0x28 0x23 0x00 0x80
+
+# CHECK: cmpld 2, 3, 4
+0x7d 0x23 0x20 0x40
+
+# CHECK: cmpld 0, 3, 4
+0x7c 0x23 0x20 0x40
+
+# CHECK: cmpwi 2, 3, 128
+0x2d 0x03 0x00 0x80
+
+# CHECK: cmpwi 0, 3, 128
+0x2c 0x03 0x00 0x80
+
+# CHECK: cmpw 2, 3, 4
+0x7d 0x03 0x20 0x00
+
+# CHECK: cmpw 0, 3, 4
+0x7c 0x03 0x20 0x00
+
+# CHECK: cmplwi 2, 3, 128
+0x29 0x03 0x00 0x80
+
+# CHECK: cmplwi 0, 3, 128
+0x28 0x03 0x00 0x80
+
+# CHECK: cmplw 2, 3, 4
+0x7d 0x03 0x20 0x40
+
+# CHECK: cmplw 0, 3, 4
+0x7c 0x03 0x20 0x40
+
+# CHECK: twi 16, 3, 4
+0x0e 0x03 0x00 0x04
+
+# CHECK: tw 16, 3, 4
+0x7e 0x03 0x20 0x08
+
+# CHECK: tdi 16, 3, 4
+0x0a 0x03 0x00 0x04
+
+# CHECK: td 16, 3, 4
+0x7e 0x03 0x20 0x88
+
+# CHECK: twi 20, 3, 4
+0x0e 0x83 0x00 0x04
+
+# CHECK: tw 20, 3, 4
+0x7e 0x83 0x20 0x08
+
+# CHECK: tdi 20, 3, 4
+0x0a 0x83 0x00 0x04
+
+# CHECK: td 20, 3, 4
+0x7e 0x83 0x20 0x88
+
+# CHECK: twi 4, 3, 4
+0x0c 0x83 0x00 0x04
+
+# CHECK: tw 4, 3, 4
+0x7c 0x83 0x20 0x08
+
+# CHECK: tdi 4, 3, 4
+0x08 0x83 0x00 0x04
+
+# CHECK: td 4, 3, 4
+0x7c 0x83 0x20 0x88
+
+# CHECK: twi 12, 3, 4
+0x0d 0x83 0x00 0x04
+
+# CHECK: tw 12, 3, 4
+0x7d 0x83 0x20 0x08
+
+# CHECK: tdi 12, 3, 4
+0x09 0x83 0x00 0x04
+
+# CHECK: td 12, 3, 4
+0x7d 0x83 0x20 0x88
+
+# CHECK: twi 8, 3, 4
+0x0d 0x03 0x00 0x04
+
+# CHECK: tw 8, 3, 4
+0x7d 0x03 0x20 0x08
+
+# CHECK: tdi 8, 3, 4
+0x09 0x03 0x00 0x04
+
+# CHECK: td 8, 3, 4
+0x7d 0x03 0x20 0x88
+
+# CHECK: twi 12, 3, 4
+0x0d 0x83 0x00 0x04
+
+# CHECK: tw 12, 3, 4
+0x7d 0x83 0x20 0x08
+
+# CHECK: tdi 12, 3, 4
+0x09 0x83 0x00 0x04
+
+# CHECK: td 12, 3, 4
+0x7d 0x83 0x20 0x88
+
+# CHECK: twi 24, 3, 4
+0x0f 0x03 0x00 0x04
+
+# CHECK: tw 24, 3, 4
+0x7f 0x03 0x20 0x08
+
+# CHECK: tdi 24, 3, 4
+0x0b 0x03 0x00 0x04
+
+# CHECK: td 24, 3, 4
+0x7f 0x03 0x20 0x88
+
+# CHECK: twi 20, 3, 4
+0x0e 0x83 0x00 0x04
+
+# CHECK: tw 20, 3, 4
+0x7e 0x83 0x20 0x08
+
+# CHECK: tdi 20, 3, 4
+0x0a 0x83 0x00 0x04
+
+# CHECK: td 20, 3, 4
+0x7e 0x83 0x20 0x88
+
+# CHECK: twi 2, 3, 4
+0x0c 0x43 0x00 0x04
+
+# CHECK: tw 2, 3, 4
+0x7c 0x43 0x20 0x08
+
+# CHECK: tdi 2, 3, 4
+0x08 0x43 0x00 0x04
+
+# CHECK: td 2, 3, 4
+0x7c 0x43 0x20 0x88
+
+# CHECK: twi 6, 3, 4
+0x0c 0xc3 0x00 0x04
+
+# CHECK: tw 6, 3, 4
+0x7c 0xc3 0x20 0x08
+
+# CHECK: tdi 6, 3, 4
+0x08 0xc3 0x00 0x04
+
+# CHECK: td 6, 3, 4
+0x7c 0xc3 0x20 0x88
+
+# CHECK: twi 5, 3, 4
+0x0c 0xa3 0x00 0x04
+
+# CHECK: tw 5, 3, 4
+0x7c 0xa3 0x20 0x08
+
+# CHECK: tdi 5, 3, 4
+0x08 0xa3 0x00 0x04
+
+# CHECK: td 5, 3, 4
+0x7c 0xa3 0x20 0x88
+
+# CHECK: twi 1, 3, 4
+0x0c 0x23 0x00 0x04
+
+# CHECK: tw 1, 3, 4
+0x7c 0x23 0x20 0x08
+
+# CHECK: tdi 1, 3, 4
+0x08 0x23 0x00 0x04
+
+# CHECK: td 1, 3, 4
+0x7c 0x23 0x20 0x88
+
+# CHECK: twi 5, 3, 4
+0x0c 0xa3 0x00 0x04
+
+# CHECK: tw 5, 3, 4
+0x7c 0xa3 0x20 0x08
+
+# CHECK: tdi 5, 3, 4
+0x08 0xa3 0x00 0x04
+
+# CHECK: td 5, 3, 4
+0x7c 0xa3 0x20 0x88
+
+# CHECK: twi 6, 3, 4
+0x0c 0xc3 0x00 0x04
+
+# CHECK: tw 6, 3, 4
+0x7c 0xc3 0x20 0x08
+
+# CHECK: tdi 6, 3, 4
+0x08 0xc3 0x00 0x04
+
+# CHECK: td 6, 3, 4
+0x7c 0xc3 0x20 0x88
+
+# CHECK: twi 31, 3, 4
+0x0f 0xe3 0x00 0x04
+
+# CHECK: tw 31, 3, 4
+0x7f 0xe3 0x20 0x08
+
+# CHECK: tdi 31, 3, 4
+0x0b 0xe3 0x00 0x04
+
+# CHECK: td 31, 3, 4
+0x7f 0xe3 0x20 0x88
+
+# CHECK: trap
+0x7f 0xe0 0x00 0x08
+
+# CHECK: rldicr 2, 3, 5, 3
+0x78 0x62 0x28 0xc4
+
+# CHECK: rldicr. 2, 3, 5, 3
+0x78 0x62 0x28 0xc5
+
+# CHECK: rldicl 2, 3, 9, 60
+0x78 0x62 0x4f 0x20
+
+# CHECK: rldicl. 2, 3, 9, 60
+0x78 0x62 0x4f 0x21
+
+# CHECK: rldimi 2, 3, 55, 5
+0x78 0x62 0xb9 0x4e
+
+# CHECK: rldimi. 2, 3, 55, 5
+0x78 0x62 0xb9 0x4f
+
+# CHECK: rldicl 2, 3, 4, 0
+0x78 0x62 0x20 0x00
+
+# CHECK: rldicl. 2, 3, 4, 0
+0x78 0x62 0x20 0x01
+
+# CHECK: rldicl 2, 3, 60, 0
+0x78 0x62 0xe0 0x02
+
+# CHECK: rldicl. 2, 3, 60, 0
+0x78 0x62 0xe0 0x03
+
+# CHECK: rldcl 2, 3, 4, 0
+0x78 0x62 0x20 0x10
+
+# CHECK: rldcl. 2, 3, 4, 0
+0x78 0x62 0x20 0x11
+
+# CHECK: sldi 2, 3, 4
+0x78 0x62 0x26 0xe4
+
+# CHECK: rldicr. 2, 3, 4, 59
+0x78 0x62 0x26 0xe5
+
+# CHECK: rldicl 2, 3, 60, 4
+0x78 0x62 0xe1 0x02
+
+# CHECK: rldicl. 2, 3, 60, 4
+0x78 0x62 0xe1 0x03
+
+# CHECK: rldicl 2, 3, 0, 4
+0x78 0x62 0x01 0x00
+
+# CHECK: rldicl. 2, 3, 0, 4
+0x78 0x62 0x01 0x01
+
+# CHECK: rldicr 2, 3, 0, 59
+0x78 0x62 0x06 0xe4
+
+# CHECK: rldicr. 2, 3, 0, 59
+0x78 0x62 0x06 0xe5
+
+# CHECK: rldic 2, 3, 4, 1
+0x78 0x62 0x20 0x48
+
+# CHECK: rldic. 2, 3, 4, 1
+0x78 0x62 0x20 0x49
+
+# CHECK: rlwinm 2, 3, 5, 0, 3
+0x54 0x62 0x28 0x06
+
+# CHECK: rlwinm. 2, 3, 5, 0, 3
+0x54 0x62 0x28 0x07
+
+# CHECK: rlwinm 2, 3, 9, 28, 31
+0x54 0x62 0x4f 0x3e
+
+# CHECK: rlwinm. 2, 3, 9, 28, 31
+0x54 0x62 0x4f 0x3f
+
+# CHECK: rlwimi 2, 3, 27, 5, 8
+0x50 0x62 0xd9 0x50
+
+# CHECK: rlwimi. 2, 3, 27, 5, 8
+0x50 0x62 0xd9 0x51
+
+# CHECK: rlwimi 2, 3, 23, 5, 8
+0x50 0x62 0xb9 0x50
+
+# CHECK: rlwimi. 2, 3, 23, 5, 8
+0x50 0x62 0xb9 0x51
+
+# CHECK: rlwinm 2, 3, 4, 0, 31
+0x54 0x62 0x20 0x3e
+
+# CHECK: rlwinm. 2, 3, 4, 0, 31
+0x54 0x62 0x20 0x3f
+
+# CHECK: rlwinm 2, 3, 28, 0, 31
+0x54 0x62 0xe0 0x3e
+
+# CHECK: rlwinm. 2, 3, 28, 0, 31
+0x54 0x62 0xe0 0x3f
+
+# CHECK: rlwnm 2, 3, 4, 0, 31
+0x5c 0x62 0x20 0x3e
+
+# CHECK: rlwnm. 2, 3, 4, 0, 31
+0x5c 0x62 0x20 0x3f
+
+# CHECK: slwi 2, 3, 4
+0x54 0x62 0x20 0x36
+
+# CHECK: rlwinm. 2, 3, 4, 0, 27
+0x54 0x62 0x20 0x37
+
+# CHECK: srwi 2, 3, 4
+0x54 0x62 0xe1 0x3e
+
+# CHECK: rlwinm. 2, 3, 28, 4, 31
+0x54 0x62 0xe1 0x3f
+
+# CHECK: rlwinm 2, 3, 0, 4, 31
+0x54 0x62 0x01 0x3e
+
+# CHECK: rlwinm. 2, 3, 0, 4, 31
+0x54 0x62 0x01 0x3f
+
+# CHECK: rlwinm 2, 3, 0, 0, 27
+0x54 0x62 0x00 0x36
+
+# CHECK: rlwinm. 2, 3, 0, 0, 27
+0x54 0x62 0x00 0x37
+
+# CHECK: rlwinm 2, 3, 4, 1, 27
+0x54 0x62 0x20 0x76
+
+# CHECK: rlwinm. 2, 3, 4, 1, 27
+0x54 0x62 0x20 0x77
+
+# CHECK: mtspr 1, 2
+0x7c 0x41 0x03 0xa6
+
+# CHECK: mfspr 2, 1
+0x7c 0x41 0x02 0xa6
+
+# CHECK: mtlr 2
+0x7c 0x48 0x03 0xa6
+
+# CHECK: mflr 2
+0x7c 0x48 0x02 0xa6
+
+# CHECK: mtctr 2
+0x7c 0x49 0x03 0xa6
+
+# CHECK: mfctr 2
+0x7c 0x49 0x02 0xa6
+
+# CHECK: nop
+0x60 0x00 0x00 0x00
+
+# CHECK: xori 0, 0, 0
+0x68 0x00 0x00 0x00
+
+# CHECK: li 2, 128
+0x38 0x40 0x00 0x80
+
+# CHECK: lis 2, 128
+0x3c 0x40 0x00 0x80
+
+# CHECK: mr 2, 3
+0x7c 0x62 0x1b 0x78
+
+# CHECK: or. 2, 3, 3
+0x7c 0x62 0x1b 0x79
+
+# CHECK: nor 2, 3, 3
+0x7c 0x62 0x18 0xf8
+
+# CHECK: nor. 2, 3, 3
+0x7c 0x62 0x18 0xf9
+
+# CHECK: mtcrf 255, 2
+0x7c 0x4f 0xf1 0x20
+
diff --git a/test/MC/Disassembler/PowerPC/ppc64-encoding-fp.txt b/test/MC/Disassembler/PowerPC/ppc64-encoding-fp.txt
new file mode 100644
index 000000000000..1c01c9dcf82a
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/ppc64-encoding-fp.txt
@@ -0,0 +1,329 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64-unknown-unknown -mcpu=pwr7 | FileCheck %s
+
+# CHECK: lfs 2, 128(4)
+0xc0 0x44 0x00 0x80
+
+# CHECK: lfsx 2, 3, 4
+0x7c 0x43 0x24 0x2e
+
+# CHECK: lfsu 2, 128(4)
+0xc4 0x44 0x00 0x80
+
+# CHECK: lfsux 2, 3, 4
+0x7c 0x43 0x24 0x6e
+
+# CHECK: lfd 2, 128(4)
+0xc8 0x44 0x00 0x80
+
+# CHECK: lfdx 2, 3, 4
+0x7c 0x43 0x24 0xae
+
+# CHECK: lfdu 2, 128(4)
+0xcc 0x44 0x00 0x80
+
+# CHECK: lfdux 2, 3, 4
+0x7c 0x43 0x24 0xee
+
+# CHECK: lfiwax 2, 3, 4
+0x7c 0x43 0x26 0xae
+
+# CHECK: lfiwzx 2, 3, 4
+0x7c 0x43 0x26 0xee
+
+# CHECK: stfs 2, 128(4)
+0xd0 0x44 0x00 0x80
+
+# CHECK: stfsx 2, 3, 4
+0x7c 0x43 0x25 0x2e
+
+# CHECK: stfsu 2, 128(4)
+0xd4 0x44 0x00 0x80
+
+# CHECK: stfsux 2, 3, 4
+0x7c 0x43 0x25 0x6e
+
+# CHECK: stfd 2, 128(4)
+0xd8 0x44 0x00 0x80
+
+# CHECK: stfdx 2, 3, 4
+0x7c 0x43 0x25 0xae
+
+# CHECK: stfdu 2, 128(4)
+0xdc 0x44 0x00 0x80
+
+# CHECK: stfdux 2, 3, 4
+0x7c 0x43 0x25 0xee
+
+# CHECK: stfiwx 2, 3, 4
+0x7c 0x43 0x27 0xae
+
+# CHECK: fmr 2, 3
+0xfc 0x40 0x18 0x90
+
+# CHECK: fmr. 2, 3
+0xfc 0x40 0x18 0x91
+
+# CHECK: fneg 2, 3
+0xfc 0x40 0x18 0x50
+
+# CHECK: fneg. 2, 3
+0xfc 0x40 0x18 0x51
+
+# CHECK: fabs 2, 3
+0xfc 0x40 0x1a 0x10
+
+# CHECK: fabs. 2, 3
+0xfc 0x40 0x1a 0x11
+
+# CHECK: fnabs 2, 3
+0xfc 0x40 0x19 0x10
+
+# CHECK: fnabs. 2, 3
+0xfc 0x40 0x19 0x11
+
+# CHECK: fcpsgn 2, 3, 4
+0xfc 0x43 0x20 0x10
+
+# CHECK: fcpsgn. 2, 3, 4
+0xfc 0x43 0x20 0x11
+
+# CHECK: fadd 2, 3, 4
+0xfc 0x43 0x20 0x2a
+
+# CHECK: fadd. 2, 3, 4
+0xfc 0x43 0x20 0x2b
+
+# CHECK: fadds 2, 3, 4
+0xec 0x43 0x20 0x2a
+
+# CHECK: fadds. 2, 3, 4
+0xec 0x43 0x20 0x2b
+
+# CHECK: fsub 2, 3, 4
+0xfc 0x43 0x20 0x28
+
+# CHECK: fsub. 2, 3, 4
+0xfc 0x43 0x20 0x29
+
+# CHECK: fsubs 2, 3, 4
+0xec 0x43 0x20 0x28
+
+# CHECK: fsubs. 2, 3, 4
+0xec 0x43 0x20 0x29
+
+# CHECK: fmul 2, 3, 4
+0xfc 0x43 0x01 0x32
+
+# CHECK: fmul. 2, 3, 4
+0xfc 0x43 0x01 0x33
+
+# CHECK: fmuls 2, 3, 4
+0xec 0x43 0x01 0x32
+
+# CHECK: fmuls. 2, 3, 4
+0xec 0x43 0x01 0x33
+
+# CHECK: fdiv 2, 3, 4
+0xfc 0x43 0x20 0x24
+
+# CHECK: fdiv. 2, 3, 4
+0xfc 0x43 0x20 0x25
+
+# CHECK: fdivs 2, 3, 4
+0xec 0x43 0x20 0x24
+
+# CHECK: fdivs. 2, 3, 4
+0xec 0x43 0x20 0x25
+
+# CHECK: fsqrt 2, 3
+0xfc 0x40 0x18 0x2c
+
+# CHECK: fsqrt. 2, 3
+0xfc 0x40 0x18 0x2d
+
+# CHECK: fsqrts 2, 3
+0xec 0x40 0x18 0x2c
+
+# CHECK: fsqrts. 2, 3
+0xec 0x40 0x18 0x2d
+
+# CHECK: fre 2, 3
+0xfc 0x40 0x18 0x30
+
+# CHECK: fre. 2, 3
+0xfc 0x40 0x18 0x31
+
+# CHECK: fres 2, 3
+0xec 0x40 0x18 0x30
+
+# CHECK: fres. 2, 3
+0xec 0x40 0x18 0x31
+
+# CHECK: frsqrte 2, 3
+0xfc 0x40 0x18 0x34
+
+# CHECK: frsqrte. 2, 3
+0xfc 0x40 0x18 0x35
+
+# CHECK: frsqrtes 2, 3
+0xec 0x40 0x18 0x34
+
+# CHECK: frsqrtes. 2, 3
+0xec 0x40 0x18 0x35
+
+# CHECK: fmadd 2, 3, 4, 5
+0xfc 0x43 0x29 0x3a
+
+# CHECK: fmadd. 2, 3, 4, 5
+0xfc 0x43 0x29 0x3b
+
+# CHECK: fmadds 2, 3, 4, 5
+0xec 0x43 0x29 0x3a
+
+# CHECK: fmadds. 2, 3, 4, 5
+0xec 0x43 0x29 0x3b
+
+# CHECK: fmsub 2, 3, 4, 5
+0xfc 0x43 0x29 0x38
+
+# CHECK: fmsub. 2, 3, 4, 5
+0xfc 0x43 0x29 0x39
+
+# CHECK: fmsubs 2, 3, 4, 5
+0xec 0x43 0x29 0x38
+
+# CHECK: fmsubs. 2, 3, 4, 5
+0xec 0x43 0x29 0x39
+
+# CHECK: fnmadd 2, 3, 4, 5
+0xfc 0x43 0x29 0x3e
+
+# CHECK: fnmadd. 2, 3, 4, 5
+0xfc 0x43 0x29 0x3f
+
+# CHECK: fnmadds 2, 3, 4, 5
+0xec 0x43 0x29 0x3e
+
+# CHECK: fnmadds. 2, 3, 4, 5
+0xec 0x43 0x29 0x3f
+
+# CHECK: fnmsub 2, 3, 4, 5
+0xfc 0x43 0x29 0x3c
+
+# CHECK: fnmsub. 2, 3, 4, 5
+0xfc 0x43 0x29 0x3d
+
+# CHECK: fnmsubs 2, 3, 4, 5
+0xec 0x43 0x29 0x3c
+
+# CHECK: fnmsubs. 2, 3, 4, 5
+0xec 0x43 0x29 0x3d
+
+# CHECK: frsp 2, 3
+0xfc 0x40 0x18 0x18
+
+# CHECK: frsp. 2, 3
+0xfc 0x40 0x18 0x19
+
+# CHECK: fctid 2, 3
+0xfc 0x40 0x1e 0x5c
+
+# CHECK: fctid. 2, 3
+0xfc 0x40 0x1e 0x5d
+
+# CHECK: fctidz 2, 3
+0xfc 0x40 0x1e 0x5e
+
+# CHECK: fctidz. 2, 3
+0xfc 0x40 0x1e 0x5f
+
+# CHECK: fctiduz 2, 3
+0xfc 0x40 0x1f 0x5e
+
+# CHECK: fctiduz. 2, 3
+0xfc 0x40 0x1f 0x5f
+
+# CHECK: fctiw 2, 3
+0xfc 0x40 0x18 0x1c
+
+# CHECK: fctiw. 2, 3
+0xfc 0x40 0x18 0x1d
+
+# CHECK: fctiwz 2, 3
+0xfc 0x40 0x18 0x1e
+
+# CHECK: fctiwz. 2, 3
+0xfc 0x40 0x18 0x1f
+
+# CHECK: fctiwuz 2, 3
+0xfc 0x40 0x19 0x1e
+
+# CHECK: fctiwuz. 2, 3
+0xfc 0x40 0x19 0x1f
+
+# CHECK: fcfid 2, 3
+0xfc 0x40 0x1e 0x9c
+
+# CHECK: fcfid. 2, 3
+0xfc 0x40 0x1e 0x9d
+
+# CHECK: fcfidu 2, 3
+0xfc 0x40 0x1f 0x9c
+
+# CHECK: fcfidu. 2, 3
+0xfc 0x40 0x1f 0x9d
+
+# CHECK: fcfids 2, 3
+0xec 0x40 0x1e 0x9c
+
+# CHECK: fcfids. 2, 3
+0xec 0x40 0x1e 0x9d
+
+# CHECK: fcfidus 2, 3
+0xec 0x40 0x1f 0x9c
+
+# CHECK: fcfidus. 2, 3
+0xec 0x40 0x1f 0x9d
+
+# CHECK: frin 2, 3
+0xfc 0x40 0x1b 0x10
+
+# CHECK: frin. 2, 3
+0xfc 0x40 0x1b 0x11
+
+# CHECK: frip 2, 3
+0xfc 0x40 0x1b 0x90
+
+# CHECK: frip. 2, 3
+0xfc 0x40 0x1b 0x91
+
+# CHECK: friz 2, 3
+0xfc 0x40 0x1b 0x50
+
+# CHECK: friz. 2, 3
+0xfc 0x40 0x1b 0x51
+
+# CHECK: frim 2, 3
+0xfc 0x40 0x1b 0xd0
+
+# CHECK: frim. 2, 3
+0xfc 0x40 0x1b 0xd1
+
+# CHECK: fcmpu 2, 3, 4
+0xfd 0x03 0x20 0x00
+
+# CHECK: fsel 2, 3, 4, 5
+0xfc 0x43 0x29 0x2e
+
+# CHECK: fsel. 2, 3, 4, 5
+0xfc 0x43 0x29 0x2f
+
+# CHECK: mffs 2
+0xfc 0x40 0x04 0x8e
+
+# CHECK: mtfsb0 31
+0xff 0xe0 0x00 0x8c
+
+# CHECK: mtfsb1 31
+0xff 0xe0 0x00 0x4c
+
diff --git a/test/MC/Disassembler/PowerPC/ppc64-encoding-vmx.txt b/test/MC/Disassembler/PowerPC/ppc64-encoding-vmx.txt
new file mode 100644
index 000000000000..3896bf755434
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/ppc64-encoding-vmx.txt
@@ -0,0 +1,509 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64-unknown-unknown -mcpu=pwr7 | FileCheck %s
+
+# CHECK: lvebx 2, 3, 4
+0x7c 0x43 0x20 0x0e
+
+# CHECK: lvehx 2, 3, 4
+0x7c 0x43 0x20 0x4e
+
+# CHECK: lvewx 2, 3, 4
+0x7c 0x43 0x20 0x8e
+
+# CHECK: lvx 2, 3, 4
+0x7c 0x43 0x20 0xce
+
+# CHECK: lvxl 2, 3, 4
+0x7c 0x43 0x22 0xce
+
+# CHECK: stvebx 2, 3, 4
+0x7c 0x43 0x21 0x0e
+
+# CHECK: stvehx 2, 3, 4
+0x7c 0x43 0x21 0x4e
+
+# CHECK: stvewx 2, 3, 4
+0x7c 0x43 0x21 0x8e
+
+# CHECK: stvx 2, 3, 4
+0x7c 0x43 0x21 0xce
+
+# CHECK: stvxl 2, 3, 4
+0x7c 0x43 0x23 0xce
+
+# CHECK: lvsl 2, 3, 4
+0x7c 0x43 0x20 0x0c
+
+# CHECK: lvsr 2, 3, 4
+0x7c 0x43 0x20 0x4c
+
+# CHECK: vpkpx 2, 3, 4
+0x10 0x43 0x23 0x0e
+
+# CHECK: vpkshss 2, 3, 4
+0x10 0x43 0x21 0x8e
+
+# CHECK: vpkshus 2, 3, 4
+0x10 0x43 0x21 0x0e
+
+# CHECK: vpkswss 2, 3, 4
+0x10 0x43 0x21 0xce
+
+# CHECK: vpkswus 2, 3, 4
+0x10 0x43 0x21 0x4e
+
+# CHECK: vpkuhum 2, 3, 4
+0x10 0x43 0x20 0x0e
+
+# CHECK: vpkuhus 2, 3, 4
+0x10 0x43 0x20 0x8e
+
+# CHECK: vpkuwum 2, 3, 4
+0x10 0x43 0x20 0x4e
+
+# CHECK: vpkuwus 2, 3, 4
+0x10 0x43 0x20 0xce
+
+# CHECK: vupkhpx 2, 3
+0x10 0x40 0x1b 0x4e
+
+# CHECK: vupkhsb 2, 3
+0x10 0x40 0x1a 0x0e
+
+# CHECK: vupkhsh 2, 3
+0x10 0x40 0x1a 0x4e
+
+# CHECK: vupklpx 2, 3
+0x10 0x40 0x1b 0xce
+
+# CHECK: vupklsb 2, 3
+0x10 0x40 0x1a 0x8e
+
+# CHECK: vupklsh 2, 3
+0x10 0x40 0x1a 0xce
+
+# CHECK: vmrghb 2, 3, 4
+0x10 0x43 0x20 0x0c
+
+# CHECK: vmrghh 2, 3, 4
+0x10 0x43 0x20 0x4c
+
+# CHECK: vmrghw 2, 3, 4
+0x10 0x43 0x20 0x8c
+
+# CHECK: vmrglb 2, 3, 4
+0x10 0x43 0x21 0x0c
+
+# CHECK: vmrglh 2, 3, 4
+0x10 0x43 0x21 0x4c
+
+# CHECK: vmrglw 2, 3, 4
+0x10 0x43 0x21 0x8c
+
+# CHECK: vspltb 2, 3, 1
+0x10 0x41 0x1a 0x0c
+
+# CHECK: vsplth 2, 3, 1
+0x10 0x41 0x1a 0x4c
+
+# CHECK: vspltw 2, 3, 1
+0x10 0x41 0x1a 0x8c
+
+# CHECK: vspltisb 2, 3
+0x10 0x43 0x03 0x0c
+
+# CHECK: vspltish 2, 3
+0x10 0x43 0x03 0x4c
+
+# CHECK: vspltisw 2, 3
+0x10 0x43 0x03 0x8c
+
+# CHECK: vperm 2, 3, 4, 5
+0x10 0x43 0x21 0x6b
+
+# CHECK: vsel 2, 3, 4, 5
+0x10 0x43 0x21 0x6a
+
+# CHECK: vsl 2, 3, 4
+0x10 0x43 0x21 0xc4
+
+# CHECK: vsldoi 2, 3, 4, 5
+0x10 0x43 0x21 0x6c
+
+# CHECK: vslo 2, 3, 4
+0x10 0x43 0x24 0x0c
+
+# CHECK: vsr 2, 3, 4
+0x10 0x43 0x22 0xc4
+
+# CHECK: vsro 2, 3, 4
+0x10 0x43 0x24 0x4c
+
+# CHECK: vaddcuw 2, 3, 4
+0x10 0x43 0x21 0x80
+
+# CHECK: vaddsbs 2, 3, 4
+0x10 0x43 0x23 0x00
+
+# CHECK: vaddshs 2, 3, 4
+0x10 0x43 0x23 0x40
+
+# CHECK: vaddsws 2, 3, 4
+0x10 0x43 0x23 0x80
+
+# CHECK: vaddubm 2, 3, 4
+0x10 0x43 0x20 0x00
+
+# CHECK: vadduhm 2, 3, 4
+0x10 0x43 0x20 0x40
+
+# CHECK: vadduwm 2, 3, 4
+0x10 0x43 0x20 0x80
+
+# CHECK: vaddubs 2, 3, 4
+0x10 0x43 0x22 0x00
+
+# CHECK: vadduhs 2, 3, 4
+0x10 0x43 0x22 0x40
+
+# CHECK: vadduws 2, 3, 4
+0x10 0x43 0x22 0x80
+
+# CHECK: vsubcuw 2, 3, 4
+0x10 0x43 0x25 0x80
+
+# CHECK: vsubsbs 2, 3, 4
+0x10 0x43 0x27 0x00
+
+# CHECK: vsubshs 2, 3, 4
+0x10 0x43 0x27 0x40
+
+# CHECK: vsubsws 2, 3, 4
+0x10 0x43 0x27 0x80
+
+# CHECK: vsububm 2, 3, 4
+0x10 0x43 0x24 0x00
+
+# CHECK: vsubuhm 2, 3, 4
+0x10 0x43 0x24 0x40
+
+# CHECK: vsubuwm 2, 3, 4
+0x10 0x43 0x24 0x80
+
+# CHECK: vsububs 2, 3, 4
+0x10 0x43 0x26 0x00
+
+# CHECK: vsubuhs 2, 3, 4
+0x10 0x43 0x26 0x40
+
+# CHECK: vsubuws 2, 3, 4
+0x10 0x43 0x26 0x80
+
+# CHECK: vmulesb 2, 3, 4
+0x10 0x43 0x23 0x08
+
+# CHECK: vmulesh 2, 3, 4
+0x10 0x43 0x23 0x48
+
+# CHECK: vmuleub 2, 3, 4
+0x10 0x43 0x22 0x08
+
+# CHECK: vmuleuh 2, 3, 4
+0x10 0x43 0x22 0x48
+
+# CHECK: vmulosb 2, 3, 4
+0x10 0x43 0x21 0x08
+
+# CHECK: vmulosh 2, 3, 4
+0x10 0x43 0x21 0x48
+
+# CHECK: vmuloub 2, 3, 4
+0x10 0x43 0x20 0x08
+
+# CHECK: vmulouh 2, 3, 4
+0x10 0x43 0x20 0x48
+
+# CHECK: vmhaddshs 2, 3, 4, 5
+0x10 0x43 0x21 0x60
+
+# CHECK: vmhraddshs 2, 3, 4, 5
+0x10 0x43 0x21 0x61
+
+# CHECK: vmladduhm 2, 3, 4, 5
+0x10 0x43 0x21 0x62
+
+# CHECK: vmsumubm 2, 3, 4, 5
+0x10 0x43 0x21 0x64
+
+# CHECK: vmsummbm 2, 3, 4, 5
+0x10 0x43 0x21 0x65
+
+# CHECK: vmsumshm 2, 3, 4, 5
+0x10 0x43 0x21 0x68
+
+# CHECK: vmsumshs 2, 3, 4, 5
+0x10 0x43 0x21 0x69
+
+# CHECK: vmsumuhm 2, 3, 4, 5
+0x10 0x43 0x21 0x66
+
+# CHECK: vmsumuhs 2, 3, 4, 5
+0x10 0x43 0x21 0x67
+
+# CHECK: vsumsws 2, 3, 4
+0x10 0x43 0x27 0x88
+
+# CHECK: vsum2sws 2, 3, 4
+0x10 0x43 0x26 0x88
+
+# CHECK: vsum4sbs 2, 3, 4
+0x10 0x43 0x27 0x08
+
+# CHECK: vsum4shs 2, 3, 4
+0x10 0x43 0x26 0x48
+
+# CHECK: vsum4ubs 2, 3, 4
+0x10 0x43 0x26 0x08
+
+# CHECK: vavgsb 2, 3, 4
+0x10 0x43 0x25 0x02
+
+# CHECK: vavgsh 2, 3, 4
+0x10 0x43 0x25 0x42
+
+# CHECK: vavgsw 2, 3, 4
+0x10 0x43 0x25 0x82
+
+# CHECK: vavgub 2, 3, 4
+0x10 0x43 0x24 0x02
+
+# CHECK: vavguh 2, 3, 4
+0x10 0x43 0x24 0x42
+
+# CHECK: vavguw 2, 3, 4
+0x10 0x43 0x24 0x82
+
+# CHECK: vmaxsb 2, 3, 4
+0x10 0x43 0x21 0x02
+
+# CHECK: vmaxsh 2, 3, 4
+0x10 0x43 0x21 0x42
+
+# CHECK: vmaxsw 2, 3, 4
+0x10 0x43 0x21 0x82
+
+# CHECK: vmaxub 2, 3, 4
+0x10 0x43 0x20 0x02
+
+# CHECK: vmaxuh 2, 3, 4
+0x10 0x43 0x20 0x42
+
+# CHECK: vmaxuw 2, 3, 4
+0x10 0x43 0x20 0x82
+
+# CHECK: vminsb 2, 3, 4
+0x10 0x43 0x23 0x02
+
+# CHECK: vminsh 2, 3, 4
+0x10 0x43 0x23 0x42
+
+# CHECK: vminsw 2, 3, 4
+0x10 0x43 0x23 0x82
+
+# CHECK: vminub 2, 3, 4
+0x10 0x43 0x22 0x02
+
+# CHECK: vminuh 2, 3, 4
+0x10 0x43 0x22 0x42
+
+# CHECK: vminuw 2, 3, 4
+0x10 0x43 0x22 0x82
+
+# CHECK: vcmpequb 2, 3, 4
+0x10 0x43 0x20 0x06
+
+# CHECK: vcmpequb. 2, 3, 4
+0x10 0x43 0x24 0x06
+
+# CHECK: vcmpequh 2, 3, 4
+0x10 0x43 0x20 0x46
+
+# CHECK: vcmpequh. 2, 3, 4
+0x10 0x43 0x24 0x46
+
+# CHECK: vcmpequw 2, 3, 4
+0x10 0x43 0x20 0x86
+
+# CHECK: vcmpequw. 2, 3, 4
+0x10 0x43 0x24 0x86
+
+# CHECK: vcmpgtsb 2, 3, 4
+0x10 0x43 0x23 0x06
+
+# CHECK: vcmpgtsb. 2, 3, 4
+0x10 0x43 0x27 0x06
+
+# CHECK: vcmpgtsh 2, 3, 4
+0x10 0x43 0x23 0x46
+
+# CHECK: vcmpgtsh. 2, 3, 4
+0x10 0x43 0x27 0x46
+
+# CHECK: vcmpgtsw 2, 3, 4
+0x10 0x43 0x23 0x86
+
+# CHECK: vcmpgtsw. 2, 3, 4
+0x10 0x43 0x27 0x86
+
+# CHECK: vcmpgtub 2, 3, 4
+0x10 0x43 0x22 0x06
+
+# CHECK: vcmpgtub. 2, 3, 4
+0x10 0x43 0x26 0x06
+
+# CHECK: vcmpgtuh 2, 3, 4
+0x10 0x43 0x22 0x46
+
+# CHECK: vcmpgtuh. 2, 3, 4
+0x10 0x43 0x26 0x46
+
+# CHECK: vcmpgtuw 2, 3, 4
+0x10 0x43 0x22 0x86
+
+# CHECK: vcmpgtuw. 2, 3, 4
+0x10 0x43 0x26 0x86
+
+# CHECK: vand 2, 3, 4
+0x10 0x43 0x24 0x04
+
+# CHECK: vandc 2, 3, 4
+0x10 0x43 0x24 0x44
+
+# CHECK: vnor 2, 3, 4
+0x10 0x43 0x25 0x04
+
+# CHECK: vor 2, 3, 4
+0x10 0x43 0x24 0x84
+
+# CHECK: vxor 2, 3, 4
+0x10 0x43 0x24 0xc4
+
+# CHECK: vrlb 2, 3, 4
+0x10 0x43 0x20 0x04
+
+# CHECK: vrlh 2, 3, 4
+0x10 0x43 0x20 0x44
+
+# CHECK: vrlw 2, 3, 4
+0x10 0x43 0x20 0x84
+
+# CHECK: vslb 2, 3, 4
+0x10 0x43 0x21 0x04
+
+# CHECK: vslh 2, 3, 4
+0x10 0x43 0x21 0x44
+
+# CHECK: vslw 2, 3, 4
+0x10 0x43 0x21 0x84
+
+# CHECK: vsrb 2, 3, 4
+0x10 0x43 0x22 0x04
+
+# CHECK: vsrh 2, 3, 4
+0x10 0x43 0x22 0x44
+
+# CHECK: vsrw 2, 3, 4
+0x10 0x43 0x22 0x84
+
+# CHECK: vsrab 2, 3, 4
+0x10 0x43 0x23 0x04
+
+# CHECK: vsrah 2, 3, 4
+0x10 0x43 0x23 0x44
+
+# CHECK: vsraw 2, 3, 4
+0x10 0x43 0x23 0x84
+
+# CHECK: vaddfp 2, 3, 4
+0x10 0x43 0x20 0x0a
+
+# CHECK: vsubfp 2, 3, 4
+0x10 0x43 0x20 0x4a
+
+# CHECK: vmaddfp 2, 3, 4, 5
+0x10 0x43 0x29 0x2e
+
+# CHECK: vnmsubfp 2, 3, 4, 5
+0x10 0x43 0x29 0x2f
+
+# CHECK: vmaxfp 2, 3, 4
+0x10 0x43 0x24 0x0a
+
+# CHECK: vminfp 2, 3, 4
+0x10 0x43 0x24 0x4a
+
+# CHECK: vctsxs 2, 3, 4
+0x10 0x44 0x1b 0xca
+
+# CHECK: vctuxs 2, 3, 4
+0x10 0x44 0x1b 0x8a
+
+# CHECK: vcfsx 2, 3, 4
+0x10 0x44 0x1b 0x4a
+
+# CHECK: vcfux 2, 3, 4
+0x10 0x44 0x1b 0x0a
+
+# CHECK: vrfim 2, 3
+0x10 0x40 0x1a 0xca
+
+# CHECK: vrfin 2, 3
+0x10 0x40 0x1a 0x0a
+
+# CHECK: vrfip 2, 3
+0x10 0x40 0x1a 0x8a
+
+# CHECK: vrfiz 2, 3
+0x10 0x40 0x1a 0x4a
+
+# CHECK: vcmpbfp 2, 3, 4
+0x10 0x43 0x23 0xc6
+
+# CHECK: vcmpbfp. 2, 3, 4
+0x10 0x43 0x27 0xc6
+
+# CHECK: vcmpeqfp 2, 3, 4
+0x10 0x43 0x20 0xc6
+
+# CHECK: vcmpeqfp. 2, 3, 4
+0x10 0x43 0x24 0xc6
+
+# CHECK: vcmpgefp 2, 3, 4
+0x10 0x43 0x21 0xc6
+
+# CHECK: vcmpgefp. 2, 3, 4
+0x10 0x43 0x25 0xc6
+
+# CHECK: vcmpgtfp 2, 3, 4
+0x10 0x43 0x22 0xc6
+
+# CHECK: vcmpgtfp. 2, 3, 4
+0x10 0x43 0x26 0xc6
+
+# CHECK: vexptefp 2, 3
+0x10 0x40 0x19 0x8a
+
+# CHECK: vlogefp 2, 3
+0x10 0x40 0x19 0xca
+
+# CHECK: vrefp 2, 3
+0x10 0x40 0x19 0x0a
+
+# CHECK: vrsqrtefp 2, 3
+0x10 0x40 0x19 0x4a
+
+# CHECK: mtvscr 2
+0x10 0x00 0x16 0x44
+
+# CHECK: mfvscr 2
+0x10 0x40 0x06 0x04
+
diff --git a/test/MC/Disassembler/PowerPC/ppc64-encoding.txt b/test/MC/Disassembler/PowerPC/ppc64-encoding.txt
new file mode 100644
index 000000000000..33a8c0ed5ded
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/ppc64-encoding.txt
@@ -0,0 +1,621 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64-unknown-unknown -mcpu=pwr7 | FileCheck %s
+
+# FIXME: test b target
+
+# FIXME: test ba target
+
+# FIXME: test bl target
+
+# FIXME: test bla target
+
+# FIXME: test bc 4, 10, target
+
+# FIXME: test bca 4, 10, target
+
+# FIXME: test bcl 4, 10, target
+
+# FIXME: test bcla 4, 10, target
+
+# CHECK: bclr 4, 10, 3
+0x4c 0x8a 0x18 0x20
+
+# CHECK: bclr 4, 10, 0
+0x4c 0x8a 0x00 0x20
+
+# CHECK: bclrl 4, 10, 3
+0x4c 0x8a 0x18 0x21
+
+# CHECK: bclrl 4, 10, 0
+0x4c 0x8a 0x00 0x21
+
+# CHECK: bcctr 4, 10, 3
+0x4c 0x8a 0x1c 0x20
+
+# CHECK: bcctr 4, 10, 0
+0x4c 0x8a 0x04 0x20
+
+# CHECK: bcctrl 4, 10, 3
+0x4c 0x8a 0x1c 0x21
+
+# CHECK: bcctrl 4, 10, 0
+0x4c 0x8a 0x04 0x21
+
+# CHECK: crand 2, 3, 4
+0x4c 0x43 0x22 0x02
+
+# CHECK: crnand 2, 3, 4
+0x4c 0x43 0x21 0xc2
+
+# CHECK: cror 2, 3, 4
+0x4c 0x43 0x23 0x82
+
+# CHECK: crxor 2, 3, 4
+0x4c 0x43 0x21 0x82
+
+# CHECK: crnor 2, 3, 4
+0x4c 0x43 0x20 0x42
+
+# CHECK: creqv 2, 3, 4
+0x4c 0x43 0x22 0x42
+
+# CHECK: crandc 2, 3, 4
+0x4c 0x43 0x21 0x02
+
+# CHECK: crorc 2, 3, 4
+0x4c 0x43 0x23 0x42
+
+# CHECK: mcrf 2, 3
+0x4d 0x0c 0x00 0x00
+
+# CHECK: sc 1
+0x44 0x00 0x00 0x22
+
+# CHECK: sc 0
+0x44 0x00 0x00 0x02
+
+# CHECK: lbz 2, 128(4)
+0x88 0x44 0x00 0x80
+
+# CHECK: lbzx 2, 3, 4
+0x7c 0x43 0x20 0xae
+
+# CHECK: lbzu 2, 128(4)
+0x8c 0x44 0x00 0x80
+
+# CHECK: lbzux 2, 3, 4
+0x7c 0x43 0x20 0xee
+
+# CHECK: lhz 2, 128(4)
+0xa0 0x44 0x00 0x80
+
+# CHECK: lhzx 2, 3, 4
+0x7c 0x43 0x22 0x2e
+
+# CHECK: lhzu 2, 128(4)
+0xa4 0x44 0x00 0x80
+
+# CHECK: lhzux 2, 3, 4
+0x7c 0x43 0x22 0x6e
+
+# CHECK: lha 2, 128(4)
+0xa8 0x44 0x00 0x80
+
+# CHECK: lhax 2, 3, 4
+0x7c 0x43 0x22 0xae
+
+# CHECK: lhau 2, 128(4)
+0xac 0x44 0x00 0x80
+
+# CHECK: lhaux 2, 3, 4
+0x7c 0x43 0x22 0xee
+
+# CHECK: lwz 2, 128(4)
+0x80 0x44 0x00 0x80
+
+# CHECK: lwzx 2, 3, 4
+0x7c 0x43 0x20 0x2e
+
+# CHECK: lwzu 2, 128(4)
+0x84 0x44 0x00 0x80
+
+# CHECK: lwzux 2, 3, 4
+0x7c 0x43 0x20 0x6e
+
+# CHECK: lwa 2, 128(4)
+0xe8 0x44 0x00 0x82
+
+# CHECK: lwax 2, 3, 4
+0x7c 0x43 0x22 0xaa
+
+# CHECK: lwaux 2, 3, 4
+0x7c 0x43 0x22 0xea
+
+# CHECK: ld 2, 128(4)
+0xe8 0x44 0x00 0x80
+
+# CHECK: ldx 2, 3, 4
+0x7c 0x43 0x20 0x2a
+
+# CHECK: ldu 2, 128(4)
+0xe8 0x44 0x00 0x81
+
+# CHECK: ldux 2, 3, 4
+0x7c 0x43 0x20 0x6a
+
+# CHECK: stb 2, 128(4)
+0x98 0x44 0x00 0x80
+
+# CHECK: stbx 2, 3, 4
+0x7c 0x43 0x21 0xae
+
+# CHECK: stbu 2, 128(4)
+0x9c 0x44 0x00 0x80
+
+# CHECK: stbux 2, 3, 4
+0x7c 0x43 0x21 0xee
+
+# CHECK: sth 2, 128(4)
+0xb0 0x44 0x00 0x80
+
+# CHECK: sthx 2, 3, 4
+0x7c 0x43 0x23 0x2e
+
+# CHECK: sthu 2, 128(4)
+0xb4 0x44 0x00 0x80
+
+# CHECK: sthux 2, 3, 4
+0x7c 0x43 0x23 0x6e
+
+# CHECK: stw 2, 128(4)
+0x90 0x44 0x00 0x80
+
+# CHECK: stwx 2, 3, 4
+0x7c 0x43 0x21 0x2e
+
+# CHECK: stwu 2, 128(4)
+0x94 0x44 0x00 0x80
+
+# CHECK: stwux 2, 3, 4
+0x7c 0x43 0x21 0x6e
+
+# CHECK: std 2, 128(4)
+0xf8 0x44 0x00 0x80
+
+# CHECK: stdx 2, 3, 4
+0x7c 0x43 0x21 0x2a
+
+# CHECK: stdu 2, 128(4)
+0xf8 0x44 0x00 0x81
+
+# CHECK: stdux 2, 3, 4
+0x7c 0x43 0x21 0x6a
+
+# CHECK: lhbrx 2, 3, 4
+0x7c 0x43 0x26 0x2c
+
+# CHECK: sthbrx 2, 3, 4
+0x7c 0x43 0x27 0x2c
+
+# CHECK: lwbrx 2, 3, 4
+0x7c 0x43 0x24 0x2c
+
+# CHECK: stwbrx 2, 3, 4
+0x7c 0x43 0x25 0x2c
+
+# CHECK: ldbrx 2, 3, 4
+0x7c 0x43 0x24 0x28
+
+# CHECK: stdbrx 2, 3, 4
+0x7c 0x43 0x25 0x28
+
+# CHECK: lmw 2, 128(1)
+0xb8 0x41 0x00 0x80
+
+# CHECK: stmw 2, 128(1)
+0xbc 0x41 0x00 0x80
+
+# CHECK: addi 2, 3, 128
+0x38 0x43 0x00 0x80
+
+# CHECK: addis 2, 3, 128
+0x3c 0x43 0x00 0x80
+
+# CHECK: add 2, 3, 4
+0x7c 0x43 0x22 0x14
+
+# CHECK: add. 2, 3, 4
+0x7c 0x43 0x22 0x15
+
+# CHECK: subf 2, 3, 4
+0x7c 0x43 0x20 0x50
+
+# CHECK: subf. 2, 3, 4
+0x7c 0x43 0x20 0x51
+
+# CHECK: addic 2, 3, 128
+0x30 0x43 0x00 0x80
+
+# CHECK: addic. 2, 3, 128
+0x34 0x43 0x00 0x80
+
+# CHECK: subfic 2, 3, 4
+0x20 0x43 0x00 0x04
+
+# CHECK: addc 2, 3, 4
+0x7c 0x43 0x20 0x14
+
+# CHECK: addc. 2, 3, 4
+0x7c 0x43 0x20 0x15
+
+# CHECK: subfc 2, 3, 4
+0x7c 0x43 0x20 0x10
+
+# CHECK: subfc 2, 3, 4
+0x7c 0x43 0x20 0x10
+
+# CHECK: adde 2, 3, 4
+0x7c 0x43 0x21 0x14
+
+# CHECK: adde. 2, 3, 4
+0x7c 0x43 0x21 0x15
+
+# CHECK: subfe 2, 3, 4
+0x7c 0x43 0x21 0x10
+
+# CHECK: subfe. 2, 3, 4
+0x7c 0x43 0x21 0x11
+
+# CHECK: addme 2, 3
+0x7c 0x43 0x01 0xd4
+
+# CHECK: addme. 2, 3
+0x7c 0x43 0x01 0xd5
+
+# CHECK: subfme 2, 3
+0x7c 0x43 0x01 0xd0
+
+# CHECK: subfme. 2, 3
+0x7c 0x43 0x01 0xd1
+
+# CHECK: addze 2, 3
+0x7c 0x43 0x01 0x94
+
+# CHECK: addze. 2, 3
+0x7c 0x43 0x01 0x95
+
+# CHECK: subfze 2, 3
+0x7c 0x43 0x01 0x90
+
+# CHECK: subfze. 2, 3
+0x7c 0x43 0x01 0x91
+
+# CHECK: neg 2, 3
+0x7c 0x43 0x00 0xd0
+
+# CHECK: neg. 2, 3
+0x7c 0x43 0x00 0xd1
+
+# CHECK: mulli 2, 3, 128
+0x1c 0x43 0x00 0x80
+
+# CHECK: mulhw 2, 3, 4
+0x7c 0x43 0x20 0x96
+
+# CHECK: mulhw. 2, 3, 4
+0x7c 0x43 0x20 0x97
+
+# CHECK: mullw 2, 3, 4
+0x7c 0x43 0x21 0xd6
+
+# CHECK: mullw. 2, 3, 4
+0x7c 0x43 0x21 0xd7
+
+# CHECK: mulhwu 2, 3, 4
+0x7c 0x43 0x20 0x16
+
+# CHECK: mulhwu. 2, 3, 4
+0x7c 0x43 0x20 0x17
+
+# CHECK: divw 2, 3, 4
+0x7c 0x43 0x23 0xd6
+
+# CHECK: divw. 2, 3, 4
+0x7c 0x43 0x23 0xd7
+
+# CHECK: divwu 2, 3, 4
+0x7c 0x43 0x23 0x96
+
+# CHECK: divwu. 2, 3, 4
+0x7c 0x43 0x23 0x97
+
+# CHECK: mulld 2, 3, 4
+0x7c 0x43 0x21 0xd2
+
+# CHECK: mulld. 2, 3, 4
+0x7c 0x43 0x21 0xd3
+
+# CHECK: mulhd 2, 3, 4
+0x7c 0x43 0x20 0x92
+
+# CHECK: mulhd. 2, 3, 4
+0x7c 0x43 0x20 0x93
+
+# CHECK: mulhdu 2, 3, 4
+0x7c 0x43 0x20 0x12
+
+# CHECK: mulhdu. 2, 3, 4
+0x7c 0x43 0x20 0x13
+
+# CHECK: divd 2, 3, 4
+0x7c 0x43 0x23 0xd2
+
+# CHECK: divd. 2, 3, 4
+0x7c 0x43 0x23 0xd3
+
+# CHECK: divdu 2, 3, 4
+0x7c 0x43 0x23 0x92
+
+# CHECK: divdu. 2, 3, 4
+0x7c 0x43 0x23 0x93
+
+# CHECK: cmpdi 2, 3, 128
+0x2d 0x23 0x00 0x80
+
+# CHECK: cmpd 2, 3, 4
+0x7d 0x23 0x20 0x00
+
+# CHECK: cmpldi 2, 3, 128
+0x29 0x23 0x00 0x80
+
+# CHECK: cmpld 2, 3, 4
+0x7d 0x23 0x20 0x40
+
+# CHECK: cmpwi 2, 3, 128
+0x2d 0x03 0x00 0x80
+
+# CHECK: cmpw 2, 3, 4
+0x7d 0x03 0x20 0x00
+
+# CHECK: cmplwi 2, 3, 128
+0x29 0x03 0x00 0x80
+
+# CHECK: cmplw 2, 3, 4
+0x7d 0x03 0x20 0x40
+
+# CHECK: twi 2, 3, 4
+0x0c 0x43 0x00 0x04
+
+# CHECK: tw 2, 3, 4
+0x7c 0x43 0x20 0x08
+
+# CHECK: tdi 2, 3, 4
+0x08 0x43 0x00 0x04
+
+# CHECK: td 2, 3, 4
+0x7c 0x43 0x20 0x88
+
+# CHECK: isel 2, 3, 4, 5
+0x7c 0x43 0x21 0x5e
+
+# CHECK: andi. 2, 3, 128
+0x70 0x62 0x00 0x80
+
+# CHECK: andis. 2, 3, 128
+0x74 0x62 0x00 0x80
+
+# CHECK: ori 2, 3, 128
+0x60 0x62 0x00 0x80
+
+# CHECK: oris 2, 3, 128
+0x64 0x62 0x00 0x80
+
+# CHECK: xori 2, 3, 128
+0x68 0x62 0x00 0x80
+
+# CHECK: xoris 2, 3, 128
+0x6c 0x62 0x00 0x80
+
+# CHECK: and 2, 3, 4
+0x7c 0x62 0x20 0x38
+
+# CHECK: and. 2, 3, 4
+0x7c 0x62 0x20 0x39
+
+# CHECK: xor 2, 3, 4
+0x7c 0x62 0x22 0x78
+
+# CHECK: xor. 2, 3, 4
+0x7c 0x62 0x22 0x79
+
+# CHECK: nand 2, 3, 4
+0x7c 0x62 0x23 0xb8
+
+# CHECK: nand. 2, 3, 4
+0x7c 0x62 0x23 0xb9
+
+# CHECK: or 2, 3, 4
+0x7c 0x62 0x23 0x78
+
+# CHECK: or. 2, 3, 4
+0x7c 0x62 0x23 0x79
+
+# CHECK: nor 2, 3, 4
+0x7c 0x62 0x20 0xf8
+
+# CHECK: nor. 2, 3, 4
+0x7c 0x62 0x20 0xf9
+
+# CHECK: eqv 2, 3, 4
+0x7c 0x62 0x22 0x38
+
+# CHECK: eqv. 2, 3, 4
+0x7c 0x62 0x22 0x39
+
+# CHECK: andc 2, 3, 4
+0x7c 0x62 0x20 0x78
+
+# CHECK: andc. 2, 3, 4
+0x7c 0x62 0x20 0x79
+
+# CHECK: orc 2, 3, 4
+0x7c 0x62 0x23 0x38
+
+# CHECK: orc. 2, 3, 4
+0x7c 0x62 0x23 0x39
+
+# CHECK: extsb 2, 3
+0x7c 0x62 0x07 0x74
+
+# CHECK: extsb. 2, 3
+0x7c 0x62 0x07 0x75
+
+# CHECK: extsh 2, 3
+0x7c 0x62 0x07 0x34
+
+# CHECK: extsh. 2, 3
+0x7c 0x62 0x07 0x35
+
+# CHECK: cntlzw 2, 3
+0x7c 0x62 0x00 0x34
+
+# CHECK: cntlzw. 2, 3
+0x7c 0x62 0x00 0x35
+
+# CHECK: popcntw 2, 3
+0x7c 0x62 0x02 0xf4
+
+# CHECK: extsw 2, 3
+0x7c 0x62 0x07 0xb4
+
+# CHECK: extsw. 2, 3
+0x7c 0x62 0x07 0xb5
+
+# CHECK: cntlzd 2, 3
+0x7c 0x62 0x00 0x74
+
+# CHECK: cntlzd. 2, 3
+0x7c 0x62 0x00 0x75
+
+# CHECK: popcntd 2, 3
+0x7c 0x62 0x03 0xf4
+
+# CHECK: rlwinm 2, 3, 4, 5, 6
+0x54 0x62 0x21 0x4c
+
+# CHECK: rlwinm. 2, 3, 4, 5, 6
+0x54 0x62 0x21 0x4d
+
+# CHECK: rlwnm 2, 3, 4, 5, 6
+0x5c 0x62 0x21 0x4c
+
+# CHECK: rlwnm. 2, 3, 4, 5, 6
+0x5c 0x62 0x21 0x4d
+
+# CHECK: rlwimi 2, 3, 4, 5, 6
+0x50 0x62 0x21 0x4c
+
+# CHECK: rlwimi. 2, 3, 4, 5, 6
+0x50 0x62 0x21 0x4d
+
+# CHECK: rldicl 2, 3, 4, 5
+0x78 0x62 0x21 0x40
+
+# CHECK: rldicl. 2, 3, 4, 5
+0x78 0x62 0x21 0x41
+
+# CHECK: rldicr 2, 3, 4, 5
+0x78 0x62 0x21 0x44
+
+# CHECK: rldicr. 2, 3, 4, 5
+0x78 0x62 0x21 0x45
+
+# CHECK: rldic 2, 3, 4, 5
+0x78 0x62 0x21 0x48
+
+# CHECK: rldic. 2, 3, 4, 5
+0x78 0x62 0x21 0x49
+
+# CHECK: rldcl 2, 3, 4, 5
+0x78 0x62 0x21 0x50
+
+# CHECK: rldcl. 2, 3, 4, 5
+0x78 0x62 0x21 0x51
+
+# CHECK: rldcr 2, 3, 4, 5
+0x78 0x62 0x21 0x52
+
+# CHECK: rldcr. 2, 3, 4, 5
+0x78 0x62 0x21 0x53
+
+# CHECK: rldimi 2, 3, 4, 5
+0x78 0x62 0x21 0x4c
+
+# CHECK: rldimi. 2, 3, 4, 5
+0x78 0x62 0x21 0x4d
+
+# CHECK: slw 2, 3, 4
+0x7c 0x62 0x20 0x30
+
+# CHECK: slw. 2, 3, 4
+0x7c 0x62 0x20 0x31
+
+# CHECK: srw 2, 3, 4
+0x7c 0x62 0x24 0x30
+
+# CHECK: srw. 2, 3, 4
+0x7c 0x62 0x24 0x31
+
+# CHECK: srawi 2, 3, 4
+0x7c 0x62 0x26 0x70
+
+# CHECK: srawi. 2, 3, 4
+0x7c 0x62 0x26 0x71
+
+# CHECK: sraw 2, 3, 4
+0x7c 0x62 0x26 0x30
+
+# CHECK: sraw. 2, 3, 4
+0x7c 0x62 0x26 0x31
+
+# CHECK: sld 2, 3, 4
+0x7c 0x62 0x20 0x36
+
+# CHECK: sld. 2, 3, 4
+0x7c 0x62 0x20 0x37
+
+# CHECK: srd 2, 3, 4
+0x7c 0x62 0x24 0x36
+
+# CHECK: srd. 2, 3, 4
+0x7c 0x62 0x24 0x37
+
+# CHECK: sradi 2, 3, 4
+0x7c 0x62 0x26 0x74
+
+# CHECK: sradi. 2, 3, 4
+0x7c 0x62 0x26 0x75
+
+# CHECK: srad 2, 3, 4
+0x7c 0x62 0x26 0x34
+
+# CHECK: srad. 2, 3, 4
+0x7c 0x62 0x26 0x35
+
+# CHECK: mtspr 600, 2
+0x7c 0x58 0x93 0xa6
+
+# CHECK: mfspr 2, 600
+0x7c 0x58 0x92 0xa6
+
+# CHECK: mtcrf 123, 2
+0x7c 0x47 0xb1 0x20
+
+# CHECK: mfcr 2
+0x7c 0x40 0x00 0x26
+
+# CHECK: mtocrf 16, 2
+0x7c 0x51 0x01 0x20
+
+# CHECK: mfocrf 16, 8
+0x7e 0x10 0x80 0x26
+
diff --git a/test/MC/Disassembler/PowerPC/ppc64-operands.txt b/test/MC/Disassembler/PowerPC/ppc64-operands.txt
new file mode 100644
index 000000000000..a2da3227f7cd
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/ppc64-operands.txt
@@ -0,0 +1,94 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64-unknown-unknown -mcpu=pwr7 | FileCheck %s
+
+# CHECK: add 1, 2, 3
+0x7c 0x22 0x1a 0x14
+
+# CHECK: add 1, 2, 3
+0x7c 0x22 0x1a 0x14
+
+# CHECK: add 0, 0, 0
+0x7c 0x00 0x02 0x14
+
+# CHECK: add 31, 31, 31
+0x7f 0xff 0xfa 0x14
+
+# CHECK: li 1, 0
+0x38 0x20 0x00 0x00
+
+# CHECK: addi 1, 2, 0
+0x38 0x22 0x00 0x00
+
+# CHECK: li 1, -32768
+0x38 0x20 0x80 0x00
+
+# CHECK: li 1, 32767
+0x38 0x20 0x7f 0xff
+
+# CHECK: ori 1, 2, 0
+0x60 0x41 0x00 0x00
+
+# CHECK: ori 1, 2, 65535
+0x60 0x41 0xff 0xff
+
+# CHECK: lis 1, 0
+0x3c 0x20 0x00 0x00
+
+# CHECK: lis 1, -1
+0x3c 0x20 0xff 0xff
+
+# CHECK: lwz 1, 0(0)
+0x80 0x20 0x00 0x00
+
+# CHECK: lwz 1, 0(0)
+0x80 0x20 0x00 0x00
+
+# CHECK: lwz 1, 0(31)
+0x80 0x3f 0x00 0x00
+
+# CHECK: lwz 1, 0(31)
+0x80 0x3f 0x00 0x00
+
+# CHECK: lwz 1, -32768(2)
+0x80 0x22 0x80 0x00
+
+# CHECK: lwz 1, 32767(2)
+0x80 0x22 0x7f 0xff
+
+# CHECK: ld 1, 0(0)
+0xe8 0x20 0x00 0x00
+
+# CHECK: ld 1, 0(0)
+0xe8 0x20 0x00 0x00
+
+# CHECK: ld 1, 0(31)
+0xe8 0x3f 0x00 0x00
+
+# CHECK: ld 1, 0(31)
+0xe8 0x3f 0x00 0x00
+
+# CHECK: ld 1, -32768(2)
+0xe8 0x22 0x80 0x00
+
+# CHECK: ld 1, 32764(2)
+0xe8 0x22 0x7f 0xfc
+
+# CHECK: ld 1, 4(2)
+0xe8 0x22 0x00 0x04
+
+# CHECK: ld 1, -4(2)
+0xe8 0x22 0xff 0xfc
+
+# CHECK: b .+1024
+0x48 0x00 0x04 0x00
+
+# CHECK: ba 1024
+0x48 0x00 0x04 0x02
+
+# FIXME: decode as beq 0, .+1024
+# CHECK: bc 12, 2, .+1024
+0x41 0x82 0x04 0x00
+
+# FIXME: decode as beqa 0, 1024
+# CHECK: bca 12, 2, 1024
+0x41 0x82 0x04 0x02
+
diff --git a/test/MC/Disassembler/PowerPC/vsx.txt b/test/MC/Disassembler/PowerPC/vsx.txt
new file mode 100644
index 000000000000..b5e2751225e4
--- /dev/null
+++ b/test/MC/Disassembler/PowerPC/vsx.txt
@@ -0,0 +1,452 @@
+# RUN: llvm-mc --disassemble %s -triple powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+
+# CHECK: lxsdx 7, 5, 31
+0x7c 0xe5 0xfc 0x98
+
+# CHECK: lxvd2x 7, 5, 31
+0x7c 0xe5 0xfe 0x98
+
+# CHECK: lxvdsx 7, 5, 31
+0x7c 0xe5 0xfa 0x98
+
+# CHECK: lxvw4x 7, 5, 31
+0x7c 0xe5 0xfe 0x18
+
+# CHECK: stxsdx 8, 5, 31
+0x7d 0x05 0xfd 0x98
+
+# CHECK: stxvd2x 8, 5, 31
+0x7d 0x05 0xff 0x98
+
+# CHECK: stxvw4x 8, 5, 31
+0x7d 0x05 0xff 0x18
+
+# CHECK: xsabsdp 7, 27
+0xf0 0xe0 0xdd 0x64
+
+# CHECK: xsadddp 7, 63, 27
+0xf0 0xff 0xd9 0x04
+
+# CHECK: xscmpodp 6, 63, 27
+0xf3 0x1f 0xd9 0x5c
+
+# CHECK: xscmpudp 6, 63, 27
+0xf3 0x1f 0xd9 0x1c
+
+# CHECK: xscpsgndp 7, 63, 27
+0xf0 0xff 0xdd 0x84
+
+# CHECK: xscvdpsp 7, 27
+0xf0 0xe0 0xdc 0x24
+
+# CHECK: xscvdpsxds 7, 27
+0xf0 0xe0 0xdd 0x60
+
+# CHECK: xscvdpsxws 7, 27
+0xf0 0xe0 0xd9 0x60
+
+# CHECK: xscvdpuxds 7, 27
+0xf0 0xe0 0xdd 0x20
+
+# CHECK: xscvdpuxws 7, 27
+0xf0 0xe0 0xd9 0x20
+
+# CHECK: xscvspdp 7, 27
+0xf0 0xe0 0xdd 0x24
+
+# CHECK: xscvsxddp 7, 27
+0xf0 0xe0 0xdd 0xe0
+
+# CHECK: xscvuxddp 7, 27
+0xf0 0xe0 0xdd 0xa0
+
+# CHECK: xsdivdp 7, 63, 27
+0xf0 0xff 0xd9 0xc4
+
+# CHECK: xsmaddadp 7, 63, 27
+0xf0 0xff 0xd9 0x0c
+
+# CHECK: xsmaddmdp 7, 63, 27
+0xf0 0xff 0xd9 0x4c
+
+# CHECK: xsmaxdp 7, 63, 27
+0xf0 0xff 0xdd 0x04
+
+# CHECK: xsmindp 7, 63, 27
+0xf0 0xff 0xdd 0x44
+
+# CHECK: xsmsubadp 7, 63, 27
+0xf0 0xff 0xd9 0x8c
+
+# CHECK: xsmsubmdp 7, 63, 27
+0xf0 0xff 0xd9 0xcc
+
+# CHECK: xsmuldp 7, 63, 27
+0xf0 0xff 0xd9 0x84
+
+# CHECK: xsnabsdp 7, 27
+0xf0 0xe0 0xdd 0xa4
+
+# CHECK: xsnegdp 7, 27
+0xf0 0xe0 0xdd 0xe4
+
+# CHECK: xsnmaddadp 7, 63, 27
+0xf0 0xff 0xdd 0x0c
+
+# CHECK: xsnmaddmdp 7, 63, 27
+0xf0 0xff 0xdd 0x4c
+
+# CHECK: xsnmsubadp 7, 63, 27
+0xf0 0xff 0xdd 0x8c
+
+# CHECK: xsnmsubmdp 7, 63, 27
+0xf0 0xff 0xdd 0xcc
+
+# CHECK: xsrdpi 7, 27
+0xf0 0xe0 0xd9 0x24
+
+# CHECK: xsrdpic 7, 27
+0xf0 0xe0 0xd9 0xac
+
+# CHECK: xsrdpim 7, 27
+0xf0 0xe0 0xd9 0xe4
+
+# CHECK: xsrdpip 7, 27
+0xf0 0xe0 0xd9 0xa4
+
+# CHECK: xsrdpiz 7, 27
+0xf0 0xe0 0xd9 0x64
+
+# CHECK: xsredp 7, 27
+0xf0 0xe0 0xd9 0x68
+
+# CHECK: xsrsqrtedp 7, 27
+0xf0 0xe0 0xd9 0x28
+
+# CHECK: xssqrtdp 7, 27
+0xf0 0xe0 0xd9 0x2c
+
+# CHECK: xssubdp 7, 63, 27
+0xf0 0xff 0xd9 0x44
+
+# CHECK: xstdivdp 6, 63, 27
+0xf3 0x1f 0xd9 0xec
+
+# CHECK: xstsqrtdp 6, 27
+0xf3 0x00 0xd9 0xa8
+
+# CHECK: xvabsdp 7, 27
+0xf0 0xe0 0xdf 0x64
+
+# CHECK: xvabssp 7, 27
+0xf0 0xe0 0xde 0x64
+
+# CHECK: xvadddp 7, 63, 27
+0xf0 0xff 0xdb 0x04
+
+# CHECK: xvaddsp 7, 63, 27
+0xf0 0xff 0xda 0x04
+
+# CHECK: xvcmpeqdp 7, 63, 27
+0xf0 0xff 0xdb 0x1c
+
+# CHECK: xvcmpeqdp. 7, 63, 27
+0xf0 0xff 0xdf 0x1c
+
+# CHECK: xvcmpeqsp 7, 63, 27
+0xf0 0xff 0xda 0x1c
+
+# CHECK: xvcmpeqsp. 7, 63, 27
+0xf0 0xff 0xde 0x1c
+
+# CHECK: xvcmpgedp 7, 63, 27
+0xf0 0xff 0xdb 0x9c
+
+# CHECK: xvcmpgedp. 7, 63, 27
+0xf0 0xff 0xdf 0x9c
+
+# CHECK: xvcmpgesp 7, 63, 27
+0xf0 0xff 0xda 0x9c
+
+# CHECK: xvcmpgesp. 7, 63, 27
+0xf0 0xff 0xde 0x9c
+
+# CHECK: xvcmpgtdp 7, 63, 27
+0xf0 0xff 0xdb 0x5c
+
+# CHECK: xvcmpgtdp. 7, 63, 27
+0xf0 0xff 0xdf 0x5c
+
+# CHECK: xvcmpgtsp 7, 63, 27
+0xf0 0xff 0xda 0x5c
+
+# CHECK: xvcmpgtsp. 7, 63, 27
+0xf0 0xff 0xde 0x5c
+
+# CHECK: xvcpsgndp 7, 63, 27
+0xf0 0xff 0xdf 0x84
+
+# CHECK: xvcpsgnsp 7, 63, 27
+0xf0 0xff 0xde 0x84
+
+# CHECK: xvcvdpsp 7, 27
+0xf0 0xe0 0xde 0x24
+
+# CHECK: xvcvdpsxds 7, 27
+0xf0 0xe0 0xdf 0x60
+
+# CHECK: xvcvdpsxws 7, 27
+0xf0 0xe0 0xdb 0x60
+
+# CHECK: xvcvdpuxds 7, 27
+0xf0 0xe0 0xdf 0x20
+
+# CHECK: xvcvdpuxws 7, 27
+0xf0 0xe0 0xdb 0x20
+
+# CHECK: xvcvspdp 7, 27
+0xf0 0xe0 0xdf 0x24
+
+# CHECK: xvcvspsxds 7, 27
+0xf0 0xe0 0xde 0x60
+
+# CHECK: xvcvspsxws 7, 27
+0xf0 0xe0 0xda 0x60
+
+# CHECK: xvcvspuxds 7, 27
+0xf0 0xe0 0xde 0x20
+
+# CHECK: xvcvspuxws 7, 27
+0xf0 0xe0 0xda 0x20
+
+# CHECK: xvcvsxddp 7, 27
+0xf0 0xe0 0xdf 0xe0
+
+# CHECK: xvcvsxdsp 7, 27
+0xf0 0xe0 0xde 0xe0
+
+# CHECK: xvcvsxwdp 7, 27
+0xf0 0xe0 0xdb 0xe0
+
+# CHECK: xvcvsxwsp 7, 27
+0xf0 0xe0 0xda 0xe0
+
+# CHECK: xvcvuxddp 7, 27
+0xf0 0xe0 0xdf 0xa0
+
+# CHECK: xvcvuxdsp 7, 27
+0xf0 0xe0 0xde 0xa0
+
+# CHECK: xvcvuxwdp 7, 27
+0xf0 0xe0 0xdb 0xa0
+
+# CHECK: xvcvuxwsp 7, 27
+0xf0 0xe0 0xda 0xa0
+
+# CHECK: xvdivdp 7, 63, 27
+0xf0 0xff 0xdb 0xc4
+
+# CHECK: xvdivsp 7, 63, 27
+0xf0 0xff 0xda 0xc4
+
+# CHECK: xvmaddadp 7, 63, 27
+0xf0 0xff 0xdb 0x0c
+
+# CHECK: xvmaddasp 7, 63, 27
+0xf0 0xff 0xda 0x0c
+
+# CHECK: xvmaddmdp 7, 63, 27
+0xf0 0xff 0xdb 0x4c
+
+# CHECK: xvmaddmsp 7, 63, 27
+0xf0 0xff 0xda 0x4c
+
+# CHECK: xvmaxdp 7, 63, 27
+0xf0 0xff 0xdf 0x04
+
+# CHECK: xvmaxsp 7, 63, 27
+0xf0 0xff 0xde 0x04
+
+# CHECK: xvmindp 7, 63, 27
+0xf0 0xff 0xdf 0x44
+
+# CHECK: xvminsp 7, 63, 27
+0xf0 0xff 0xde 0x44
+
+# FIXME: decode as xvmovdp 7, 63
+# CHECK: xvcpsgndp 7, 63, 63
+0xf0 0xff 0xff 0x86
+
+# FIXME: decode as xvmovsp 7, 63
+# CHECK: xvcpsgnsp 7, 63, 63
+0xf0 0xff 0xfe 0x86
+
+# CHECK: xvmsubadp 7, 63, 27
+0xf0 0xff 0xdb 0x8c
+
+# CHECK: xvmsubasp 7, 63, 27
+0xf0 0xff 0xda 0x8c
+
+# CHECK: xvmsubmdp 7, 63, 27
+0xf0 0xff 0xdb 0xcc
+
+# CHECK: xvmsubmsp 7, 63, 27
+0xf0 0xff 0xda 0xcc
+
+# CHECK: xvmuldp 7, 63, 27
+0xf0 0xff 0xdb 0x84
+
+# CHECK: xvmulsp 7, 63, 27
+0xf0 0xff 0xda 0x84
+
+# CHECK: xvnabsdp 7, 27
+0xf0 0xe0 0xdf 0xa4
+
+# CHECK: xvnabssp 7, 27
+0xf0 0xe0 0xde 0xa4
+
+# CHECK: xvnegdp 7, 27
+0xf0 0xe0 0xdf 0xe4
+
+# CHECK: xvnegsp 7, 27
+0xf0 0xe0 0xde 0xe4
+
+# CHECK: xvnmaddadp 7, 63, 27
+0xf0 0xff 0xdf 0x0c
+
+# CHECK: xvnmaddasp 7, 63, 27
+0xf0 0xff 0xde 0x0c
+
+# CHECK: xvnmaddmdp 7, 63, 27
+0xf0 0xff 0xdf 0x4c
+
+# CHECK: xvnmaddmsp 7, 63, 27
+0xf0 0xff 0xde 0x4c
+
+# CHECK: xvnmsubadp 7, 63, 27
+0xf0 0xff 0xdf 0x8c
+
+# CHECK: xvnmsubasp 7, 63, 27
+0xf0 0xff 0xde 0x8c
+
+# CHECK: xvnmsubmdp 7, 63, 27
+0xf0 0xff 0xdf 0xcc
+
+# CHECK: xvnmsubmsp 7, 63, 27
+0xf0 0xff 0xde 0xcc
+
+# CHECK: xvrdpi 7, 27
+0xf0 0xe0 0xdb 0x24
+
+# CHECK: xvrdpic 7, 27
+0xf0 0xe0 0xdb 0xac
+
+# CHECK: xvrdpim 7, 27
+0xf0 0xe0 0xdb 0xe4
+
+# CHECK: xvrdpip 7, 27
+0xf0 0xe0 0xdb 0xa4
+
+# CHECK: xvrdpiz 7, 27
+0xf0 0xe0 0xdb 0x64
+
+# CHECK: xvredp 7, 27
+0xf0 0xe0 0xdb 0x68
+
+# CHECK: xvresp 7, 27
+0xf0 0xe0 0xda 0x68
+
+# CHECK: xvrspi 7, 27
+0xf0 0xe0 0xda 0x24
+
+# CHECK: xvrspic 7, 27
+0xf0 0xe0 0xda 0xac
+
+# CHECK: xvrspim 7, 27
+0xf0 0xe0 0xda 0xe4
+
+# CHECK: xvrspip 7, 27
+0xf0 0xe0 0xda 0xa4
+
+# CHECK: xvrspiz 7, 27
+0xf0 0xe0 0xda 0x64
+
+# CHECK: xvrsqrtedp 7, 27
+0xf0 0xe0 0xdb 0x28
+
+# CHECK: xvrsqrtesp 7, 27
+0xf0 0xe0 0xda 0x28
+
+# CHECK: xvsqrtdp 7, 27
+0xf0 0xe0 0xdb 0x2c
+
+# CHECK: xvsqrtsp 7, 27
+0xf0 0xe0 0xda 0x2c
+
+# CHECK: xvsubdp 7, 63, 27
+0xf0 0xff 0xdb 0x44
+
+# CHECK: xvsubsp 7, 63, 27
+0xf0 0xff 0xda 0x44
+
+# CHECK: xvtdivdp 6, 63, 27
+0xf3 0x1f 0xdb 0xec
+
+# CHECK: xvtdivsp 6, 63, 27
+0xf3 0x1f 0xda 0xec
+
+# CHECK: xvtsqrtdp 6, 27
+0xf3 0x00 0xdb 0xa8
+
+# CHECK: xvtsqrtsp 6, 27
+0xf3 0x00 0xda 0xa8
+
+# CHECK: xxland 7, 63, 27
+0xf0 0xff 0xdc 0x14
+
+# CHECK: xxlandc 7, 63, 27
+0xf0 0xff 0xdc 0x54
+
+# CHECK: xxlnor 7, 63, 27
+0xf0 0xff 0xdd 0x14
+
+# CHECK: xxlor 7, 63, 27
+0xf0 0xff 0xdc 0x94
+
+# CHECK: xxlxor 7, 63, 27
+0xf0 0xff 0xdc 0xd4
+
+# FIXME: decode as xxmrghd 7, 63, 27
+# CHECK: xxpermdi 7, 63, 27, 0
+0xf0 0xff 0xd8 0x54
+
+# CHECK: xxmrghw 7, 63, 27
+0xf0 0xff 0xd8 0x94
+
+# FIXME: decode as xxmrgld 7, 63, 27
+# CHECK: xxpermdi 7, 63, 27, 3
+0xf0 0xff 0xdb 0x54
+
+# CHECK: xxmrglw 7, 63, 27
+0xf0 0xff 0xd9 0x94
+
+# CHECK: xxpermdi 7, 63, 27, 2
+0xf0 0xff 0xda 0x54
+
+# CHECK: xxsel 7, 63, 27, 14
+0xf0 0xff 0xdb 0xb4
+
+# CHECK: xxsldwi 7, 63, 27, 1
+0xf0 0xff 0xd9 0x14
+
+# FIXME: decode as xxspltd 7, 63, 1
+# CHECK: xxpermdi 7, 63, 63, 3
+0xf0 0xff 0xfb 0x56
+
+# CHECK: xxspltw 7, 27, 3
+0xf0 0xe3 0xda 0x90
+
+# FIXME: decode as xxswapd 7, 63
+# CHECK: xxpermdi 7, 63, 63, 2
+0xf0 0xff 0xfa 0x56
+
diff --git a/test/MC/Disassembler/Sparc/lit.local.cfg b/test/MC/Disassembler/Sparc/lit.local.cfg
new file mode 100644
index 000000000000..fa6a54e50132
--- /dev/null
+++ b/test/MC/Disassembler/Sparc/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'Sparc' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/MC/Disassembler/Sparc/sparc-fp.txt b/test/MC/Disassembler/Sparc/sparc-fp.txt
new file mode 100644
index 000000000000..b8a5017383d7
--- /dev/null
+++ b/test/MC/Disassembler/Sparc/sparc-fp.txt
@@ -0,0 +1,148 @@
+# RUN: llvm-mc --disassemble %s -triple=sparc64-linux-gnu | FileCheck %s
+
+
+# CHECK: fitos %f0, %f4
+0x89 0xa0 0x18 0x80
+
+# CHECK: fitod %f0, %f4
+0x89 0xa0 0x19 0x00
+
+# CHECK: fitoq %f0, %f4
+0x89 0xa0 0x19 0x80
+
+# CHECK: fstoi %f0, %f4
+0x89 0xa0 0x1a 0x20
+
+# CHECK: fdtoi %f0, %f4
+0x89 0xa0 0x1a 0x40
+
+# CHECK: fqtoi %f0, %f4
+0x89 0xa0 0x1a 0x60
+
+# CHECK: fstod %f0, %f4
+0x89 0xa0 0x19 0x20
+# CHECK: fstoq %f0, %f4
+0x89 0xa0 0x19 0xa0
+
+# CHECK: fdtos %f0, %f4
+0x89 0xa0 0x18 0xc0
+
+# CHECK: fdtoq %f0, %f4
+0x89 0xa0 0x19 0xc0
+
+# CHECK: fqtos %f0, %f4
+0x89 0xa0 0x18 0xe0
+
+# CHECK: fqtod %f0, %f4
+0x89 0xa0 0x19 0x60
+
+# CHECK: fmovs %f0, %f4
+0x89 0xa0 0x00 0x20
+
+# CHECK: fmovd %f0, %f4
+0x89 0xa0 0x00 0x40
+
+# CHECK: fmovq %f0, %f4
+0x89 0xa0 0x00 0x60
+
+# CHECK: fnegs %f0, %f4
+0x89 0xa0 0x00 0xa0
+
+# CHECK: fnegd %f0, %f4
+0x89 0xa0 0x00 0xc0
+
+# CHECK: fnegq %f0, %f4
+0x89 0xa0 0x00 0xe0
+
+# CHECK: fabss %f0, %f4
+0x89 0xa0 0x01 0x20
+
+# CHECK: fabsd %f0, %f4
+0x89 0xa0 0x01 0x40
+
+# CHECK: fabsq %f0, %f4
+0x89 0xa0 0x01 0x60
+
+# CHECK: fsqrts %f0, %f4
+0x89 0xa0 0x05 0x20
+
+# CHECK: fsqrtd %f0, %f4
+0x89 0xa0 0x05 0x40
+
+# CHECK: fsqrtq %f0, %f4
+0x89 0xa0 0x05 0x60
+
+# CHECK: fadds %f0, %f4, %f8
+0x91 0xa0 0x08 0x24
+
+# CHECK: faddd %f0, %f4, %f8
+0x91 0xa0 0x08 0x44
+
+# CHECK: faddq %f0, %f4, %f8
+0x91 0xa0 0x08 0x64
+
+# CHECK: faddd %f32, %f34, %f62
+0xbf 0xa0 0x48 0x43
+
+# CHECK: faddq %f32, %f36, %f60
+0xbb 0xa0 0x48 0x65
+
+# CHECK: fsubs %f0, %f4, %f8
+0x91 0xa0 0x08 0xa4
+
+# CHECK: fsubd %f0, %f4, %f8
+0x91 0xa0 0x08 0xc4
+
+# CHECK: fsubq %f0, %f4, %f8
+0x91 0xa0 0x08 0xe4
+
+# CHECK: fmuls %f0, %f4, %f8
+0x91 0xa0 0x09 0x24
+
+# CHECK: fmuld %f0, %f4, %f8
+0x91 0xa0 0x09 0x44
+
+# CHECK: fmulq %f0, %f4, %f8
+0x91 0xa0 0x09 0x64
+
+# CHECK: fsmuld %f0, %f4, %f8
+0x91 0xa0 0x0d 0x24
+
+# CHECK: fdmulq %f0, %f4, %f8
+0x91 0xa0 0x0d 0xc4
+
+# CHECK: fdivs %f0, %f4, %f8
+0x91 0xa0 0x09 0xa4
+
+# CHECK: fdivd %f0, %f4, %f8
+0x91 0xa0 0x09 0xc4
+
+# CHECK: fdivq %f0, %f4, %f8
+0x91 0xa0 0x09 0xe4
+
+# CHECK: fcmps %f0, %f4
+0x81 0xa8 0x0a 0x24
+
+# CHECK: fcmpd %f0, %f4
+0x81 0xa8 0x0a 0x44
+
+# CHECK: fcmpq %f0, %f4
+0x81 0xa8 0x0a 0x64
+
+# CHECK: fxtos %f0, %f4
+0x89 0xa0 0x10 0x80
+
+# CHECK: fxtod %f0, %f4
+0x89 0xa0 0x11 0x00
+
+# CHECK: fxtoq %f0, %f4
+0x89 0xa0 0x11 0x80
+
+# CHECK: fstox %f0, %f4
+0x89 0xa0 0x10 0x20
+
+# CHECK: fdtox %f0, %f4
+0x89 0xa0 0x10 0x40
+
+# CHECK: fqtox %f0, %f4
+0x89 0xa0 0x10 0x60
diff --git a/test/MC/Disassembler/Sparc/sparc-mem.txt b/test/MC/Disassembler/Sparc/sparc-mem.txt
new file mode 100644
index 000000000000..6ad4be167214
--- /dev/null
+++ b/test/MC/Disassembler/Sparc/sparc-mem.txt
@@ -0,0 +1,163 @@
+# RUN: llvm-mc --disassemble %s -triple=sparcv9-unknown-linux | FileCheck %s
+
+# CHECK: ldsb [%i0+%l6], %o2
+0xd4 0x4e 0x00 0x16
+
+# CHECK: ldsb [%i0+32], %o2
+0xd4 0x4e 0x20 0x20
+
+# CHECK: ldsb [%g1], %o4
+0xd8 0x48 0x60 0x00
+
+# CHECK: ldsh [%i0+%l6], %o2
+0xd4 0x56 0x00 0x16
+
+# CHECK: ldsh [%i0+32], %o2
+0xd4 0x56 0x20 0x20
+
+# CHECK: ldsh [%g1], %o4
+0xd8 0x50 0x60 0x00
+
+# CHECK: ldub [%i0+%l6], %o2
+0xd4 0x0e 0x00 0x16
+
+# CHECK: ldub [%i0+32], %o2
+0xd4 0x0e 0x20 0x20
+
+# CHECK: ldub [%g1], %o2
+0xd4 0x08 0x60 0x00
+
+# CHECK: lduh [%i0+%l6], %o2
+0xd4 0x16 0x00 0x16
+
+# CHECK: lduh [%i0+32], %o2
+0xd4 0x16 0x20 0x20
+
+# CHECK: lduh [%g1], %o2
+0xd4 0x10 0x60 0x00
+
+# CHECK: ld [%i0+%l6], %o2
+0xd4 0x06 0x00 0x16
+
+# CHECK: ld [%i0+32], %o2
+0xd4 0x06 0x20 0x20
+
+# CHECK: ld [%g1], %o2
+0xd4 0x00 0x60 0x00
+
+# CHECK: ld [%i0+%l6], %f2
+0xc5 0x06 0x00 0x16
+
+# CHECK: ld [%i0+32], %f2
+0xc5 0x06 0x20 0x20
+
+# CHECK: ld [%g1], %f2
+0xc5 0x00 0x60 0x00
+
+# CHECK: ldd [%i0+%l6], %f2
+0xc5 0x1e 0x00 0x16
+
+# CHECK: ldd [%i0+32], %f2
+0xc5 0x1e 0x20 0x20
+
+# CHECK: ldd [%g1], %f2
+0xc5 0x18 0x60 0x00
+
+# CHECK: ldq [%i0+%l6], %f4
+0xc9 0x16 0x00 0x16
+
+# CHECK: ldq [%i0+32], %f4
+0xc9 0x16 0x20 0x20
+
+# CHECK: ldq [%g1], %f4
+0xc9 0x10 0x60 0x00
+
+# CHECK: ldx [%i0+%l6], %o2
+0xd4 0x5e 0x00 0x16
+
+# CHECK: ldx [%i0+32], %o2
+0xd4 0x5e 0x20 0x20
+
+# CHECK: ldx [%g1], %o2
+0xd4 0x58 0x60 0x00
+
+# CHECK: ldsw [%i0+%l6], %o2
+0xd4 0x46 0x00 0x16
+
+# CHECK: ldsw [%i0+32], %o2
+0xd4 0x46 0x20 0x20
+
+# CHECK: ldsw [%g1], %o2
+0xd4 0x40 0x60 0x00
+
+# CHECK: stb %o2, [%i0+%l6]
+0xd4 0x2e 0x00 0x16
+
+# CHECK: stb %o2, [%i0+32]
+0xd4 0x2e 0x20 0x20
+
+# CHECK: stb %o2, [%g1]
+0xd4 0x28 0x60 0x00
+
+# CHECK: sth %o2, [%i0+%l6]
+0xd4 0x36 0x00 0x16
+
+# CHECK: sth %o2, [%i0+32]
+0xd4 0x36 0x20 0x20
+
+# CHECK: sth %o2, [%g1]
+0xd4 0x30 0x60 0x00
+
+# CHECK: st %o2, [%i0+%l6]
+0xd4 0x26 0x00 0x16
+
+# CHECK: st %o2, [%i0+32]
+0xd4 0x26 0x20 0x20
+
+# CHECK: st %o2, [%g1]
+0xd4 0x20 0x60 0x00
+
+# CHECK: st %f2, [%i0+%l6]
+0xc5 0x26 0x00 0x16
+
+# CHECK: st %f2, [%i0+32]
+0xc5 0x26 0x20 0x20
+
+# CHECK: st %f2, [%g1]
+0xc5 0x20 0x60 0x00
+
+# CHECK: std %f2, [%i0+%l6]
+0xc5 0x3e 0x00 0x16
+
+# CHECK: std %f2, [%i0+32]
+0xc5 0x3e 0x20 0x20
+
+# CHECK: std %f2, [%g1]
+0xc5 0x38 0x60 0x00
+
+# CHECK: stq %f4, [%i0+%l6]
+0xc9 0x36 0x00 0x16
+
+# CHECK: stq %f4, [%i0+32]
+0xc9 0x36 0x20 0x20
+
+# CHECK: stq %f4, [%g1]
+0xc9 0x30 0x60 0x00
+
+# CHECK: stx %o2, [%i0+%l6]
+0xd4 0x76 0x00 0x16
+
+# CHECK: stx %o2, [%i0+32]
+0xd4 0x76 0x20 0x20
+
+# CHECK: stx %o2, [%g1]
+0xd4 0x70 0x60 0x00
+
+# CHECK: swap [%i0+%l6], %o2
+0xd4 0x7e 0x00 0x16
+
+# CHECK: swap [%i0+32], %o2
+0xd4 0x7e 0x20 0x20
+
+# CHECK: swap [%g1], %o2
+0xd4 0x78 0x60 0x00
diff --git a/test/MC/Disassembler/Sparc/sparc.txt b/test/MC/Disassembler/Sparc/sparc.txt
new file mode 100644
index 000000000000..a9420246361f
--- /dev/null
+++ b/test/MC/Disassembler/Sparc/sparc.txt
@@ -0,0 +1,202 @@
+# RUN: llvm-mc --disassemble %s -triple=sparc-unknown-linux | FileCheck %s
+
+# CHECK: add %g0, %g0, %g0
+0x80 0x00 0x00 0x00
+
+# CHECK: add %g1, %g2, %g3
+0x86 0x00 0x40 0x02
+
+# CHECK: add %o0, %o1, %l0
+0xa0 0x02 0x00 0x09
+
+# CHECK: add %o0, 10, %l0
+0xa0 0x02 0x20 0x0a
+
+# CHECK: addcc %g1, %g2, %g3
+0x86 0x80 0x40 0x02
+
+# CHECK: addxcc %g1, %g2, %g3
+0x86 0xc0 0x40 0x02
+
+# CHECK: udiv %g1, %g2, %g3
+0x86 0x70 0x40 0x02
+
+# CHECK: sdiv %g1, %g2, %g3
+0x86 0x78 0x40 0x02
+
+# CHECK: and %g1, %g2, %g3
+0x86 0x08 0x40 0x02
+
+# CHECK: andn %g1, %g2, %g3
+0x86 0x28 0x40 0x02
+
+# CHECK: or %g1, %g2, %g3
+0x86 0x10 0x40 0x02
+
+# CHECK: orn %g1, %g2, %g3
+0x86 0x30 0x40 0x02
+
+# CHECK: xor %g1, %g2, %g3
+0x86 0x18 0x40 0x02
+
+# CHECK: xnor %g1, %g2, %g3
+0x86 0x38 0x40 0x02
+
+# CHECK: umul %g1, %g2, %g3
+0x86 0x50 0x40 0x02
+
+# CHECK: smul %g1, %g2, %g3
+0x86 0x58 0x40 0x02
+
+# CHECK: nop
+0x01 0x00 0x00 0x00
+
+# CHECK: sethi 10, %l0
+0x21 0x00 0x00 0x0a
+
+# CHECK: sll %g1, %g2, %g3
+0x87 0x28 0x40 0x02
+
+# CHECK: sll %g1, 31, %g3
+0x87 0x28 0x60 0x1f
+
+# CHECK: srl %g1, %g2, %g3
+0x87 0x30 0x40 0x02
+
+# CHECK: srl %g1, 31, %g3
+0x87 0x30 0x60 0x1f
+
+# CHECK: sra %g1, %g2, %g3
+0x87 0x38 0x40 0x02
+
+# CHECK: sra %g1, 31, %g3
+0x87 0x38 0x60 0x1f
+
+# CHECK: sub %g1, %g2, %g3
+0x86 0x20 0x40 0x02
+
+# CHECK: subcc %g1, %g2, %g3
+0x86 0xa0 0x40 0x02
+
+# CHECK: subxcc %g1, %g2, %g3
+0x86 0xe0 0x40 0x02
+
+# CHECK: ba 4194303
+0x10 0xbf 0xff 0xff
+
+# CHECK: bne 4194303
+0x12 0xbf 0xff 0xff
+
+# CHECK: be 4194303
+0x02 0xbf 0xff 0xff
+
+# CHECK: bg 4194303
+0x14 0xbf 0xff 0xff
+
+# CHECK: ble 4194303
+0x04 0xbf 0xff 0xff
+
+# CHECK: bge 4194303
+0x16 0xbf 0xff 0xff
+
+# CHECK: bl 4194303
+0x06 0xbf 0xff 0xff
+
+# CHECK: bgu 4194303
+0x18 0xbf 0xff 0xff
+
+# CHECK: bleu 4194303
+0x08 0xbf 0xff 0xff
+
+# CHECK: bcc 4194303
+0x1a 0xbf 0xff 0xff
+
+# CHECK: bcs 4194303
+0x0a 0xbf 0xff 0xff
+
+# CHECK: bpos 4194303
+0x1c 0xbf 0xff 0xff
+
+# CHECK: bneg 4194303
+0x0c 0xbf 0xff 0xff
+
+# CHECK: bvc 4194303
+0x1e 0xbf 0xff 0xff
+
+# CHECK: bvs 4194303
+0x0e 0xbf 0xff 0xff
+
+# CHECK: fbu 4194303
+0x0f 0xbf 0xff 0xff
+
+# CHECK: fbg 4194303
+0x0d 0xbf 0xff 0xff
+
+# CHECK: fbug 4194303
+0x0b 0xbf 0xff 0xff
+
+# CHECK: fbl 4194303
+0x09 0xbf 0xff 0xff
+
+# CHECK: fbul 4194303
+0x07 0xbf 0xff 0xff
+
+# CHECK: fblg 4194303
+0x05 0xbf 0xff 0xff
+
+# CHECK: fbne 4194303
+0x03 0xbf 0xff 0xff
+
+# CHECK: fbe 4194303
+0x13 0xbf 0xff 0xff
+
+# CHECK: fbue 4194303
+0x15 0xbf 0xff 0xff
+
+# CHECK: fbge 4194303
+0x17 0xbf 0xff 0xff
+
+# CHECK: fbuge 4194303
+0x19 0xbf 0xff 0xff
+
+# CHECK: fble 4194303
+0x1b 0xbf 0xff 0xff
+
+# CHECK: fbule 4194303
+0x1d 0xbf 0xff 0xff
+
+# CHECK: fbo 4194303
+0x1f 0xbf 0xff 0xff
+
+# CHECK: restore
+0x81 0xe8 0x00 0x00
+
+# CHECK: call 16
+0x40 0x00 0x00 0x04
+
+# CHECK: add %g1, -10, %g2
+0x84 0x00 0x7f 0xf6
+
+# CHECK: save %sp, -196, %sp
+0x9d 0xe3 0xbf 0x3c
+
+# CHECK: cmp %g1, -2
+0x80 0xa0 0x7f 0xfe
+
+# CHECK: wr %g1, -2, %y
+0x81 0x80 0x7f 0xfe
+
+# CHECK: unimp 12
+0x00 0x00 0x00 0x0c
+
+# CHECK: jmp %g1+12
+0x81,0xc0,0x60,0x0c
+
+# CHECK: retl
+0x81 0xc3 0xe0 0x08
+
+# CHECK: ret
+0x81,0xc7,0xe0,0x08
+
+# CHECK: rett %i7+8
+0x81 0xcf 0xe0 0x08
diff --git a/test/MC/Disassembler/SystemZ/insns.txt b/test/MC/Disassembler/SystemZ/insns.txt
index 78d348d7c194..54a3c5b1d6a1 100644
--- a/test/MC/Disassembler/SystemZ/insns.txt
+++ b/test/MC/Disassembler/SystemZ/insns.txt
@@ -907,6 +907,42 @@
# CHECK: cdgbr %f15, %r15
0xb3 0xa5 0x00 0xff
+# CHECK: cdlfbr %f0, 0, %r0, 1
+0xb3 0x91 0x01 0x00
+
+# CHECK: cdlfbr %f0, 0, %r0, 15
+0xb3 0x91 0x0f 0x00
+
+# CHECK: cdlfbr %f0, 0, %r15, 1
+0xb3 0x91 0x01 0x0f
+
+# CHECK: cdlfbr %f0, 15, %r0, 1
+0xb3 0x91 0xf1 0x00
+
+# CHECK: cdlfbr %f4, 5, %r6, 7
+0xb3 0x91 0x57 0x46
+
+# CHECK: cdlfbr %f15, 0, %r0, 1
+0xb3 0x91 0x01 0xf0
+
+# CHECK: cdlgbr %f0, 0, %r0, 1
+0xb3 0xa1 0x01 0x00
+
+# CHECK: cdlgbr %f0, 0, %r0, 15
+0xb3 0xa1 0x0f 0x00
+
+# CHECK: cdlgbr %f0, 0, %r15, 1
+0xb3 0xa1 0x01 0x0f
+
+# CHECK: cdlgbr %f0, 15, %r0, 1
+0xb3 0xa1 0xf1 0x00
+
+# CHECK: cdlgbr %f4, 5, %r6, 7
+0xb3 0xa1 0x57 0x46
+
+# CHECK: cdlgbr %f15, 0, %r0, 1
+0xb3 0xa1 0x01 0xf0
+
# CHECK: cebr %f0, %f0
0xb3 0x09 0x00 0x00
@@ -970,6 +1006,42 @@
# CHECK: cegbr %f15, %r15
0xb3 0xa4 0x00 0xff
+# CHECK: celfbr %f0, 0, %r0, 1
+0xb3 0x90 0x01 0x00
+
+# CHECK: celfbr %f0, 0, %r0, 15
+0xb3 0x90 0x0f 0x00
+
+# CHECK: celfbr %f0, 0, %r15, 1
+0xb3 0x90 0x01 0x0f
+
+# CHECK: celfbr %f0, 15, %r0, 1
+0xb3 0x90 0xf1 0x00
+
+# CHECK: celfbr %f4, 5, %r6, 7
+0xb3 0x90 0x57 0x46
+
+# CHECK: celfbr %f15, 0, %r0, 1
+0xb3 0x90 0x01 0xf0
+
+# CHECK: celgbr %f0, 0, %r0, 1
+0xb3 0xa0 0x01 0x00
+
+# CHECK: celgbr %f0, 0, %r0, 15
+0xb3 0xa0 0x0f 0x00
+
+# CHECK: celgbr %f0, 0, %r15, 1
+0xb3 0xa0 0x01 0x0f
+
+# CHECK: celgbr %f0, 15, %r0, 1
+0xb3 0xa0 0xf1 0x00
+
+# CHECK: celgbr %f4, 5, %r6, 7
+0xb3 0xa0 0x57 0x46
+
+# CHECK: celgbr %f15, 0, %r0, 1
+0xb3 0xa0 0x01 0xf0
+
# CHECK: cfdbr %r0, 0, %f0
0xb3 0x99 0x00 0x00
@@ -1480,6 +1552,114 @@
# CHECK: clc 0(256,%r15), 0
0xd5 0xff 0xf0 0x00 0x00 0x00
+# CHECK: clfdbr %r0, 0, %f0, 1
+0xb3 0x9d 0x01 0x00
+
+# CHECK: clfdbr %r0, 0, %f0, 15
+0xb3 0x9d 0x0f 0x00
+
+# CHECK: clfdbr %r0, 0, %f15, 1
+0xb3 0x9d 0x01 0x0f
+
+# CHECK: clfdbr %r0, 15, %f0, 1
+0xb3 0x9d 0xf1 0x00
+
+# CHECK: clfdbr %r4, 5, %f6, 7
+0xb3 0x9d 0x57 0x46
+
+# CHECK: clfdbr %r15, 0, %f0, 1
+0xb3 0x9d 0x01 0xf0
+
+# CHECK: clfebr %r0, 0, %f0, 1
+0xb3 0x9c 0x01 0x00
+
+# CHECK: clfebr %r0, 0, %f0, 15
+0xb3 0x9c 0x0f 0x00
+
+# CHECK: clfebr %r0, 0, %f15, 1
+0xb3 0x9c 0x01 0x0f
+
+# CHECK: clfebr %r0, 15, %f0, 1
+0xb3 0x9c 0xf1 0x00
+
+# CHECK: clfebr %r4, 5, %f6, 7
+0xb3 0x9c 0x57 0x46
+
+# CHECK: clfebr %r15, 0, %f0, 1
+0xb3 0x9c 0x01 0xf0
+
+# CHECK: clfxbr %r0, 0, %f0, 1
+0xb3 0x9e 0x01 0x00
+
+# CHECK: clfxbr %r0, 0, %f0, 15
+0xb3 0x9e 0x0f 0x00
+
+# CHECK: clfxbr %r0, 0, %f13, 1
+0xb3 0x9e 0x01 0x0d
+
+# CHECK: clfxbr %r0, 15, %f0, 1
+0xb3 0x9e 0xf1 0x00
+
+# CHECK: clfxbr %r4, 5, %f8, 9
+0xb3 0x9e 0x59 0x48
+
+# CHECK: clfxbr %r15, 0, %f0, 1
+0xb3 0x9e 0x01 0xf0
+
+# CHECK: clgdbr %r0, 0, %f0, 1
+0xb3 0xad 0x01 0x00
+
+# CHECK: clgdbr %r0, 0, %f0, 15
+0xb3 0xad 0x0f 0x00
+
+# CHECK: clgdbr %r0, 0, %f15, 1
+0xb3 0xad 0x01 0x0f
+
+# CHECK: clgdbr %r0, 15, %f0, 1
+0xb3 0xad 0xf1 0x00
+
+# CHECK: clgdbr %r4, 5, %f6, 7
+0xb3 0xad 0x57 0x46
+
+# CHECK: clgdbr %r15, 0, %f0, 1
+0xb3 0xad 0x01 0xf0
+
+# CHECK: clgebr %r0, 0, %f0, 1
+0xb3 0xac 0x01 0x00
+
+# CHECK: clgebr %r0, 0, %f0, 15
+0xb3 0xac 0x0f 0x00
+
+# CHECK: clgebr %r0, 0, %f15, 1
+0xb3 0xac 0x01 0x0f
+
+# CHECK: clgebr %r0, 15, %f0, 1
+0xb3 0xac 0xf1 0x00
+
+# CHECK: clgebr %r4, 5, %f6, 7
+0xb3 0xac 0x57 0x46
+
+# CHECK: clgebr %r15, 0, %f0, 1
+0xb3 0xac 0x01 0xf0
+
+# CHECK: clgxbr %r0, 0, %f0, 1
+0xb3 0xae 0x01 0x00
+
+# CHECK: clgxbr %r0, 0, %f0, 15
+0xb3 0xae 0x0f 0x00
+
+# CHECK: clgxbr %r0, 0, %f13, 1
+0xb3 0xae 0x01 0x0d
+
+# CHECK: clgxbr %r0, 15, %f0, 1
+0xb3 0xae 0xf1 0x00
+
+# CHECK: clgxbr %r4, 5, %f8, 9
+0xb3 0xae 0x59 0x48
+
+# CHECK: clgxbr %r15, 0, %f0, 1
+0xb3 0xae 0x01 0xf0
+
# CHECK: clfhsi 0, 0
0xe5 0x5d 0x00 0x00 0x00 0x00
@@ -1996,6 +2176,42 @@
# CHECK: cxgbr %f13, %r15
0xb3 0xa6 0x00 0xdf
+# CHECK: cxlfbr %f0, 0, %r0, 1
+0xb3 0x92 0x01 0x00
+
+# CHECK: cxlfbr %f0, 0, %r0, 15
+0xb3 0x92 0x0f 0x00
+
+# CHECK: cxlfbr %f0, 0, %r15, 1
+0xb3 0x92 0x01 0x0f
+
+# CHECK: cxlfbr %f0, 15, %r0, 1
+0xb3 0x92 0xf1 0x00
+
+# CHECK: cxlfbr %f4, 5, %r6, 7
+0xb3 0x92 0x57 0x46
+
+# CHECK: cxlfbr %f13, 0, %r0, 1
+0xb3 0x92 0x01 0xd0
+
+# CHECK: cxlgbr %f0, 0, %r0, 1
+0xb3 0xa2 0x01 0x00
+
+# CHECK: cxlgbr %f0, 0, %r0, 15
+0xb3 0xa2 0x0f 0x00
+
+# CHECK: cxlgbr %f0, 0, %r15, 1
+0xb3 0xa2 0x01 0x0f
+
+# CHECK: cxlgbr %f0, 15, %r0, 1
+0xb3 0xa2 0xf1 0x00
+
+# CHECK: cxlgbr %f4, 5, %r6, 7
+0xb3 0xa2 0x57 0x46
+
+# CHECK: cxlgbr %f13, 0, %r0, 1
+0xb3 0xa2 0x01 0xd0
+
# CHECK: cy %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x59
@@ -2545,6 +2761,336 @@
# CHECK: la %r15, 0
0x41 0xf0 0x00 0x00
+# CHECK: laa %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xf8
+
+# CHECK: laa %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xf8
+
+# CHECK: laa %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xf8
+
+# CHECK: laa %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xf8
+
+# CHECK: laa %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xf8
+
+# CHECK: laa %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xf8
+
+# CHECK: laa %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xf8
+
+# CHECK: laa %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xf8
+
+# CHECK: laa %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xf8
+
+# CHECK: laa %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xf8
+
+# CHECK: laa %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xf8
+
+# CHECK: laag %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xe8
+
+# CHECK: laag %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xe8
+
+# CHECK: laag %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xe8
+
+# CHECK: laag %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xe8
+
+# CHECK: laag %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xe8
+
+# CHECK: laag %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xe8
+
+# CHECK: laag %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xe8
+
+# CHECK: laag %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xe8
+
+# CHECK: laag %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xe8
+
+# CHECK: laag %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xe8
+
+# CHECK: laag %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xe8
+
+# CHECK: laal %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xfa
+
+# CHECK: laal %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xfa
+
+# CHECK: laal %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xfa
+
+# CHECK: laal %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xfa
+
+# CHECK: laal %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xfa
+
+# CHECK: laal %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xfa
+
+# CHECK: laal %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xfa
+
+# CHECK: laal %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xfa
+
+# CHECK: laal %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xfa
+
+# CHECK: laal %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xfa
+
+# CHECK: laal %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xfa
+
+# CHECK: laalg %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xea
+
+# CHECK: laalg %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xea
+
+# CHECK: laalg %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xea
+
+# CHECK: laalg %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xea
+
+# CHECK: laalg %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xea
+
+# CHECK: laalg %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xea
+
+# CHECK: laalg %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xea
+
+# CHECK: laalg %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xea
+
+# CHECK: laalg %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xea
+
+# CHECK: laalg %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xea
+
+# CHECK: laalg %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xea
+
+# CHECK: lan %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xf4
+
+# CHECK: lan %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xf4
+
+# CHECK: lan %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xf4
+
+# CHECK: lan %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xf4
+
+# CHECK: lan %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xf4
+
+# CHECK: lan %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xf4
+
+# CHECK: lan %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xf4
+
+# CHECK: lan %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xf4
+
+# CHECK: lan %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xf4
+
+# CHECK: lan %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xf4
+
+# CHECK: lan %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xf4
+
+# CHECK: csy %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0x14
+
+# CHECK: lang %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xe4
+
+# CHECK: lang %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xe4
+
+# CHECK: lang %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xe4
+
+# CHECK: lang %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xe4
+
+# CHECK: lang %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xe4
+
+# CHECK: lang %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xe4
+
+# CHECK: lang %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xe4
+
+# CHECK: lang %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xe4
+
+# CHECK: lang %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xe4
+
+# CHECK: lang %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xe4
+
+# CHECK: lao %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xf6
+
+# CHECK: lao %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xf6
+
+# CHECK: lao %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xf6
+
+# CHECK: lao %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xf6
+
+# CHECK: lao %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xf6
+
+# CHECK: lao %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xf6
+
+# CHECK: lao %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xf6
+
+# CHECK: lao %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xf6
+
+# CHECK: lao %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xf6
+
+# CHECK: lao %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xf6
+
+# CHECK: lao %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xf6
+
+# CHECK: laog %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xe6
+
+# CHECK: laog %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xe6
+
+# CHECK: laog %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xe6
+
+# CHECK: laog %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xe6
+
+# CHECK: laog %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xe6
+
+# CHECK: laog %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xe6
+
+# CHECK: laog %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xe6
+
+# CHECK: laog %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xe6
+
+# CHECK: laog %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xe6
+
+# CHECK: laog %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xe6
+
+# CHECK: laog %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xe6
+
+# CHECK: lax %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xf7
+
+# CHECK: lax %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xf7
+
+# CHECK: lax %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xf7
+
+# CHECK: lax %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xf7
+
+# CHECK: lax %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xf7
+
+# CHECK: lax %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xf7
+
+# CHECK: lax %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xf7
+
+# CHECK: lax %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xf7
+
+# CHECK: lax %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xf7
+
+# CHECK: lax %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xf7
+
+# CHECK: lax %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xf7
+
+# CHECK: laxg %r0, %r0, -524288
+0xeb 0x00 0x00 0x00 0x80 0xe7
+
+# CHECK: laxg %r0, %r0, -1
+0xeb 0x00 0x0f 0xff 0xff 0xe7
+
+# CHECK: laxg %r0, %r0, 0
+0xeb 0x00 0x00 0x00 0x00 0xe7
+
+# CHECK: laxg %r0, %r0, 1
+0xeb 0x00 0x00 0x01 0x00 0xe7
+
+# CHECK: laxg %r0, %r0, 524287
+0xeb 0x00 0x0f 0xff 0x7f 0xe7
+
+# CHECK: laxg %r0, %r0, 0(%r1)
+0xeb 0x00 0x10 0x00 0x00 0xe7
+
+# CHECK: laxg %r0, %r0, 0(%r15)
+0xeb 0x00 0xf0 0x00 0x00 0xe7
+
+# CHECK: laxg %r0, %r0, 524287(%r1)
+0xeb 0x00 0x1f 0xff 0x7f 0xe7
+
+# CHECK: laxg %r0, %r0, 524287(%r15)
+0xeb 0x00 0xff 0xff 0x7f 0xe7
+
+# CHECK: laxg %r0, %r15, 0
+0xeb 0x0f 0x00 0x00 0x00 0xe7
+
+# CHECK: laxg %r15, %r0, 0
+0xeb 0xf0 0x00 0x00 0x00 0xe7
+
# CHECK: lay %r0, -524288
0xe3 0x00 0x00 0x00 0x80 0x71
@@ -2809,6 +3355,24 @@
# CHECK: ldxbr %f13, %f13
0xb3 0x45 0x00 0xdd
+# CHECK: ldxbra %f0, 0, %f0, 1
+0xb3 0x45 0x01 0x00
+
+# CHECK: ldxbra %f0, 0, %f0, 15
+0xb3 0x45 0x0f 0x00
+
+# CHECK: ldxbra %f0, 0, %f13, 1
+0xb3 0x45 0x01 0x0d
+
+# CHECK: ldxbra %f0, 15, %f0, 1
+0xb3 0x45 0xf1 0x00
+
+# CHECK: ldxbra %f4, 5, %f8, 9
+0xb3 0x45 0x59 0x48
+
+# CHECK: ldxbra %f13, 0, %f0, 1
+0xb3 0x45 0x01 0xd0
+
# CHECK: ldy %f0, -524288
0xed 0x00 0x00 0x00 0x80 0x65
@@ -2854,6 +3418,24 @@
# CHECK: ledbr %f15, %f15
0xb3 0x44 0x00 0xff
+# CHECK: ledbra %f0, 0, %f0, 1
+0xb3 0x44 0x01 0x00
+
+# CHECK: ledbra %f0, 0, %f0, 15
+0xb3 0x44 0x0f 0x00
+
+# CHECK: ledbra %f0, 0, %f15, 1
+0xb3 0x44 0x01 0x0f
+
+# CHECK: ledbra %f0, 15, %f0, 1
+0xb3 0x44 0xf1 0x00
+
+# CHECK: ledbra %f4, 5, %f6, 7
+0xb3 0x44 0x57 0x46
+
+# CHECK: ledbra %f15, 0, %f0, 1
+0xb3 0x44 0x01 0xf0
+
# CHECK: ler %f0, %f9
0x38 0x09
@@ -2902,6 +3484,24 @@
# CHECK: lexbr %f13, %f13
0xb3 0x46 0x00 0xdd
+# CHECK: lexbra %f0, 0, %f0, 1
+0xb3 0x46 0x01 0x00
+
+# CHECK: lexbra %f0, 0, %f0, 15
+0xb3 0x46 0x0f 0x00
+
+# CHECK: lexbra %f0, 0, %f13, 1
+0xb3 0x46 0x01 0x0d
+
+# CHECK: lexbra %f0, 15, %f0, 1
+0xb3 0x46 0xf1 0x00
+
+# CHECK: lexbra %f4, 5, %f8, 9
+0xb3 0x46 0x59 0x48
+
+# CHECK: lexbra %f13, 0, %f0, 1
+0xb3 0x46 0x01 0xd0
+
# CHECK: ley %f0, -524288
0xed 0x00 0x00 0x00 0x80 0x64
diff --git a/test/MC/Disassembler/SystemZ/lit.local.cfg b/test/MC/Disassembler/SystemZ/lit.local.cfg
index b12af09434be..5c02dd3614a4 100644
--- a/test/MC/Disassembler/SystemZ/lit.local.cfg
+++ b/test/MC/Disassembler/SystemZ/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'SystemZ' in targets:
+if not 'SystemZ' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/Disassembler/X86/avx-512.txt b/test/MC/Disassembler/X86/avx-512.txt
new file mode 100644
index 000000000000..f78db552935e
--- /dev/null
+++ b/test/MC/Disassembler/X86/avx-512.txt
@@ -0,0 +1,104 @@
+# RUN: llvm-mc --disassemble %s -triple=x86_64-apple-darwin9 -mcpu=knl | FileCheck %s
+
+# CHECK: vpbroadcastd %xmm18, %zmm28 {%k7} {z}
+0x62 0x22 0x7d 0xcf 0x58 0xe2
+
+# CHECK: vbroadcastss (%rsp), %zmm28
+0x62 0x62 0x7d 0x48 0x18 0x24 0x24
+
+# CHECK: vblendmpd (%rsi), %zmm2, %zmm8 {%k7}
+0x62 0x72 0xed 0x4f 0x65 0x06
+
+# CHECK: vpermpd (%rsi,%r10,4), %zmm2, %zmm8
+0x62 0x32 0xed 0x48 0x16 0x04 0x96
+
+# CHECK: vpbroadcastmw2d %k2, %zmm8
+0x62 0xd2 0x7e 0x48 0x3a 0xd0
+
+# CHECK: vpbroadcastq (%r9,%rax), %zmm28
+0x62 0x42 0xfd 0x48 0x59 0x24 0x01
+
+# CHECK: vbroadcastss %xmm0, %zmm1
+0x62 0xf2 0x7d 0x48 0x18 0xc8
+
+# CHECK: vextracti32x4 $4, %zmm0, (%r10)
+0x62 0xd3 0x7d 0x48 0x39 0x02 0x04
+
+# CHECK: vextracti32x4 $4, %zmm0, %xmm1
+0x62 0xf3 0x7d 0x48 0x39 0xc1 0x04
+
+# CHECK: vinserti32x4 $1, %xmm21, %zmm5, %zmm17
+0x62 0xa3 0x55 0x48 0x38 0xcd 0x01
+
+# CHECK: vmovaps %zmm21, %zmm5 {%k3}
+0x62 0xb1 0x7c 0x4b 0x28 0xed
+
+# CHECK: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k2}
+0x62 0xf2 0x7d 0x4a 0x92 0x0c 0x86
+
+# CHECK: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k2}
+0x62 0xf2 0xfd 0x4a 0x92 0x0c 0x86
+
+# CHECK: vpslld $16, %zmm21, %zmm22
+0x62 0xb1 0x4d 0x40 0x72 0xf5 0x10
+
+# CHECK: vpord %zmm22, %zmm21, %zmm23
+0x62 0xa1 0x55 0x40 0xeb 0xfe
+
+#####################################################
+# MASK INSTRUCTIONS #
+#####################################################
+
+# CHECK: kshiftlw $3, %k1, %k2
+0xc4 0xe3 0xf9 0x32 0xd1 0x03
+
+# CHECK: kmovw (%rdi), %k1
+0xc5 0xf8 0x90 0x0f
+
+# CHECK: kmovw %k1, %eax
+0xc5 0xf8 0x93 0xc1
+
+# CHECK: kandw %k1, %k2, %k3
+0xc5 0xec 0x41 0xd9
+
+# CHECK: kmovw %k5, %k1
+0xc5 0xf8 0x90 0xcd
+
+#####################################################
+# COMPRESSED DISPLACEMENT #
+#####################################################
+
+# TupleType = FVM
+# CHECK: vmovdqu32 %zmm0, -448(%rcx)
+0x62 0xf1 0x7e 0x48 0x7f 0x41 0xf9
+
+# TupleType = T1S, 64-bit eltsize
+# CHECK: vaddsd 256(%rdx), %xmm0, %xmm16
+0x62 0xe1 0xff 0x08 0x58 0x42 0x20
+
+# TupleType = T1S, 32-bit eltsize
+# CHECK: vaddss 256(%rdx), %xmm0, %xmm16
+0x62 0xe1 0x7e 0x08 0x58 0x42 0x40
+
+# TupleType = FV
+# CHECK: vaddpd 256(%rdx), %zmm0, %zmm16
+0x62 0xe1 0xfd 0x48 0x58 0x42 0x04
+
+# TupleType = FV, broadcast, 64-bit eltsize
+# CHECK: vaddpd 256(%rdx){1to8}, %zmm0, %zmm16
+0x62 0xe1 0xfd 0x58 0x58 0x42 0x20
+
+# TupleType = FV, broadcast, 32-bit eltsize
+# CHECK: vaddps 256(%rdx){1to16}, %zmm0, %zmm16
+0x62 0xe1 0x7c 0x58 0x58 0x42 0x40
+
+# TupleType = T4
+# CHECK: vbroadcasti32x4 256(%rdx), %zmm16
+0x62 0xe2 0x7d 0x48 0x5a 0x42 0x10
+
+# Cases where we can't use cdisp8
+# CHECK: vaddss 255(%rdx), %xmm0, %xmm16
+0x62 0xe1 0x7e 0x08 0x58 0x82 0xff 0x00 0x00 0x00
+
+# CHECK: vaddss 1024(%rdx), %xmm0, %xmm16
+0x62 0xe1 0x7e 0x08 0x58 0x82 0x00 0x04 0x00 0x00
diff --git a/test/MC/Disassembler/X86/fp-stack.txt b/test/MC/Disassembler/X86/fp-stack.txt
new file mode 100644
index 000000000000..f9aa402a3913
--- /dev/null
+++ b/test/MC/Disassembler/X86/fp-stack.txt
@@ -0,0 +1,1037 @@
+# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s
+# RUN: llvm-mc --disassemble %s -triple=i686-apple-darwin9 | FileCheck %s
+
+# CHECK: fadd %st(0)
+0xd8,0xc0
+
+# CHECK: fadd %st(1)
+0xd8,0xc1
+
+# CHECK: fadd %st(2)
+0xd8,0xc2
+
+# CHECK: fadd %st(3)
+0xd8,0xc3
+
+# CHECK: fadd %st(4)
+0xd8,0xc4
+
+# CHECK: fadd %st(5)
+0xd8,0xc5
+
+# CHECK: fadd %st(6)
+0xd8,0xc6
+
+# CHECK: fadd %st(7)
+0xd8,0xc7
+
+# CHECK: fmul %st(0)
+0xd8,0xc8
+
+# CHECK: fmul %st(1)
+0xd8,0xc9
+
+# CHECK: fmul %st(2)
+0xd8,0xca
+
+# CHECK: fmul %st(3)
+0xd8,0xcb
+
+# CHECK: fmul %st(4)
+0xd8,0xcc
+
+# CHECK: fmul %st(5)
+0xd8,0xcd
+
+# CHECK: fmul %st(6)
+0xd8,0xce
+
+# CHECK: fmul %st(7)
+0xd8,0xcf
+
+# CHECK: fcom %st(0)
+0xd8,0xd0
+
+# CHECK: fcom %st(1)
+0xd8,0xd1
+
+# CHECK: fcom %st(2)
+0xd8,0xd2
+
+# CHECK: fcom %st(3)
+0xd8,0xd3
+
+# CHECK: fcom %st(4)
+0xd8,0xd4
+
+# CHECK: fcom %st(5)
+0xd8,0xd5
+
+# CHECK: fcom %st(6)
+0xd8,0xd6
+
+# CHECK: fcom %st(7)
+0xd8,0xd7
+
+# CHECK: fcomp %st(0)
+0xd8,0xd8
+
+# CHECK: fcomp %st(1)
+0xd8,0xd9
+
+# CHECK: fcomp %st(2)
+0xd8,0xda
+
+# CHECK: fcomp %st(3)
+0xd8,0xdb
+
+# CHECK: fcomp %st(4)
+0xd8,0xdc
+
+# CHECK: fcomp %st(5)
+0xd8,0xdd
+
+# CHECK: fcomp %st(6)
+0xd8,0xde
+
+# CHECK: fcomp %st(7)
+0xd8,0xdf
+
+# CHECK: fsub %st(0)
+0xd8,0xe0
+
+# CHECK: fsub %st(1)
+0xd8,0xe1
+
+# CHECK: fsub %st(2)
+0xd8,0xe2
+
+# CHECK: fsub %st(3)
+0xd8,0xe3
+
+# CHECK: fsub %st(4)
+0xd8,0xe4
+
+# CHECK: fsub %st(5)
+0xd8,0xe5
+
+# CHECK: fsub %st(6)
+0xd8,0xe6
+
+# CHECK: fsub %st(7)
+0xd8,0xe7
+
+# CHECK: fsubr %st(0)
+0xd8,0xe8
+
+# CHECK: fsubr %st(1)
+0xd8,0xe9
+
+# CHECK: fsubr %st(2)
+0xd8,0xea
+
+# CHECK: fsubr %st(3)
+0xd8,0xeb
+
+# CHECK: fsubr %st(4)
+0xd8,0xec
+
+# CHECK: fsubr %st(5)
+0xd8,0xed
+
+# CHECK: fsubr %st(6)
+0xd8,0xee
+
+# CHECK: fsubr %st(7)
+0xd8,0xef
+
+# CHECK: fdiv %st(0)
+0xd8,0xf0
+
+# CHECK: fdiv %st(1)
+0xd8,0xf1
+
+# CHECK: fdiv %st(2)
+0xd8,0xf2
+
+# CHECK: fdiv %st(3)
+0xd8,0xf3
+
+# CHECK: fdiv %st(4)
+0xd8,0xf4
+
+# CHECK: fdiv %st(5)
+0xd8,0xf5
+
+# CHECK: fdiv %st(6)
+0xd8,0xf6
+
+# CHECK: fdiv %st(7)
+0xd8,0xf7
+
+# CHECK: fdivr %st(0)
+0xd8,0xf8
+
+# CHECK: fdivr %st(1)
+0xd8,0xf9
+
+# CHECK: fdivr %st(2)
+0xd8,0xfa
+
+# CHECK: fdivr %st(3)
+0xd8,0xfb
+
+# CHECK: fdivr %st(4)
+0xd8,0xfc
+
+# CHECK: fdivr %st(5)
+0xd8,0xfd
+
+# CHECK: fdivr %st(6)
+0xd8,0xfe
+
+# CHECK: fdivr %st(7)
+0xd8,0xff
+
+# CHECK: fld %st(0)
+0xd9,0xc0
+
+# CHECK: fld %st(1)
+0xd9,0xc1
+
+# CHECK: fld %st(2)
+0xd9,0xc2
+
+# CHECK: fld %st(3)
+0xd9,0xc3
+
+# CHECK: fld %st(4)
+0xd9,0xc4
+
+# CHECK: fld %st(5)
+0xd9,0xc5
+
+# CHECK: fld %st(6)
+0xd9,0xc6
+
+# CHECK: fld %st(7)
+0xd9,0xc7
+
+# CHECK: fxch %st(0)
+0xd9,0xc8
+
+# CHECK: fxch %st(1)
+0xd9,0xc9
+
+# CHECK: fxch %st(2)
+0xd9,0xca
+
+# CHECK: fxch %st(3)
+0xd9,0xcb
+
+# CHECK: fxch %st(4)
+0xd9,0xcc
+
+# CHECK: fxch %st(5)
+0xd9,0xcd
+
+# CHECK: fxch %st(6)
+0xd9,0xce
+
+# CHECK: fxch %st(7)
+0xd9,0xcf
+
+# CHECK: fnop
+0xd9,0xd0
+
+# CHECK: fchs
+0xd9,0xe0
+
+# CHECK: fabs
+0xd9,0xe1
+
+# CHECK: ftst
+0xd9,0xe4
+
+# CHECK: fxam
+0xd9,0xe5
+
+# CHECK: fld1
+0xd9,0xe8
+
+# CHECK: fldl2t
+0xd9,0xe9
+
+# CHECK: fldl2e
+0xd9,0xea
+
+# CHECK: fldpi
+0xd9,0xeb
+
+# CHECK: fldlg2
+0xd9,0xec
+
+# CHECK: fldln2
+0xd9,0xed
+
+# CHECK: fldz
+0xd9,0xee
+
+# CHECK: f2xm1
+0xd9,0xf0
+
+# CHECK: fyl2x
+0xd9,0xf1
+
+# CHECK: fptan
+0xd9,0xf2
+
+# CHECK: fpatan
+0xd9,0xf3
+
+# CHECK: fxtract
+0xd9,0xf4
+
+# CHECK: fprem1
+0xd9,0xf5
+
+# CHECK: fdecstp
+0xd9,0xf6
+
+# CHECK: fincstp
+0xd9,0xf7
+
+# CHECK: fprem
+0xd9,0xf8
+
+# CHECK: fyl2xp1
+0xd9,0xf9
+
+# CHECK: fsqrt
+0xd9,0xfa
+
+# CHECK: fsincos
+0xd9,0xfb
+
+# CHECK: frndint
+0xd9,0xfc
+
+# CHECK: fscale
+0xd9,0xfd
+
+# CHECK: fsin
+0xd9,0xfe
+
+# CHECK: fcos
+0xd9,0xff
+
+# CHECK: fcmovb %st(0), %st(0)
+0xda,0xc0
+
+# CHECK: fcmovb %st(1), %st(0)
+0xda,0xc1
+
+# CHECK: fcmovb %st(2), %st(0)
+0xda,0xc2
+
+# CHECK: fcmovb %st(3), %st(0)
+0xda,0xc3
+
+# CHECK: fcmovb %st(4), %st(0)
+0xda,0xc4
+
+# CHECK: fcmovb %st(5), %st(0)
+0xda,0xc5
+
+# CHECK: fcmovb %st(6), %st(0)
+0xda,0xc6
+
+# CHECK: fcmovb %st(7), %st(0)
+0xda,0xc7
+
+# CHECK: fcmove %st(0), %st(0)
+0xda,0xc8
+
+# CHECK: fcmove %st(1), %st(0)
+0xda,0xc9
+
+# CHECK: fcmove %st(2), %st(0)
+0xda,0xca
+
+# CHECK: fcmove %st(3), %st(0)
+0xda,0xcb
+
+# CHECK: fcmove %st(4), %st(0)
+0xda,0xcc
+
+# CHECK: fcmove %st(5), %st(0)
+0xda,0xcd
+
+# CHECK: fcmove %st(6), %st(0)
+0xda,0xce
+
+# CHECK: fcmove %st(7), %st(0)
+0xda,0xcf
+
+# CHECK: fcmovbe %st(0), %st(0)
+0xda,0xd0
+
+# CHECK: fcmovbe %st(1), %st(0)
+0xda,0xd1
+
+# CHECK: fcmovbe %st(2), %st(0)
+0xda,0xd2
+
+# CHECK: fcmovbe %st(3), %st(0)
+0xda,0xd3
+
+# CHECK: fcmovbe %st(4), %st(0)
+0xda,0xd4
+
+# CHECK: fcmovbe %st(5), %st(0)
+0xda,0xd5
+
+# CHECK: fcmovbe %st(6), %st(0)
+0xda,0xd6
+
+# CHECK: fcmovbe %st(7), %st(0)
+0xda,0xd7
+
+# CHECK: fcmovu %st(0), %st(0)
+0xda,0xd8
+
+# CHECK: fcmovu %st(1), %st(0)
+0xda,0xd9
+
+# CHECK: fcmovu %st(2), %st(0)
+0xda,0xda
+
+# CHECK: fcmovu %st(3), %st(0)
+0xda,0xdb
+
+# CHECK: fcmovu %st(4), %st(0)
+0xda,0xdc
+
+# CHECK: fcmovu %st(5), %st(0)
+0xda,0xdd
+
+# CHECK: fcmovu %st(6), %st(0)
+0xda,0xde
+
+# CHECK: fcmovu %st(7), %st(0)
+0xda,0xdf
+
+# CHECK: fucompp
+0xda,0xe9
+
+# CHECK: fcmovnb %st(0), %st(0)
+0xdb,0xc0
+
+# CHECK: fcmovnb %st(1), %st(0)
+0xdb,0xc1
+
+# CHECK: fcmovnb %st(2), %st(0)
+0xdb,0xc2
+
+# CHECK: fcmovnb %st(3), %st(0)
+0xdb,0xc3
+
+# CHECK: fcmovnb %st(4), %st(0)
+0xdb,0xc4
+
+# CHECK: fcmovnb %st(5), %st(0)
+0xdb,0xc5
+
+# CHECK: fcmovnb %st(6), %st(0)
+0xdb,0xc6
+
+# CHECK: fcmovnb %st(7), %st(0)
+0xdb,0xc7
+
+# CHECK: fcmovne %st(0), %st(0)
+0xdb,0xc8
+
+# CHECK: fcmovne %st(1), %st(0)
+0xdb,0xc9
+
+# CHECK: fcmovne %st(2), %st(0)
+0xdb,0xca
+
+# CHECK: fcmovne %st(3), %st(0)
+0xdb,0xcb
+
+# CHECK: fcmovne %st(4), %st(0)
+0xdb,0xcc
+
+# CHECK: fcmovne %st(5), %st(0)
+0xdb,0xcd
+
+# CHECK: fcmovne %st(6), %st(0)
+0xdb,0xce
+
+# CHECK: fcmovne %st(7), %st(0)
+0xdb,0xcf
+
+# CHECK: fcmovnbe %st(0), %st(0)
+0xdb,0xd0
+
+# CHECK: fcmovnbe %st(1), %st(0)
+0xdb,0xd1
+
+# CHECK: fcmovnbe %st(2), %st(0)
+0xdb,0xd2
+
+# CHECK: fcmovnbe %st(3), %st(0)
+0xdb,0xd3
+
+# CHECK: fcmovnbe %st(4), %st(0)
+0xdb,0xd4
+
+# CHECK: fcmovnbe %st(5), %st(0)
+0xdb,0xd5
+
+# CHECK: fcmovnbe %st(6), %st(0)
+0xdb,0xd6
+
+# CHECK: fcmovnbe %st(7), %st(0)
+0xdb,0xd7
+
+# CHECK: fcmovnu %st(0), %st(0)
+0xdb,0xd8
+
+# CHECK: fcmovnu %st(1), %st(0)
+0xdb,0xd9
+
+# CHECK: fcmovnu %st(2), %st(0)
+0xdb,0xda
+
+# CHECK: fcmovnu %st(3), %st(0)
+0xdb,0xdb
+
+# CHECK: fcmovnu %st(4), %st(0)
+0xdb,0xdc
+
+# CHECK: fcmovnu %st(5), %st(0)
+0xdb,0xdd
+
+# CHECK: fcmovnu %st(6), %st(0)
+0xdb,0xde
+
+# CHECK: fcmovnu %st(7), %st(0)
+0xdb,0xdf
+
+# CHECK: fnclex
+0xdb,0xe2
+
+# CHECK: fninit
+0xdb,0xe3
+
+# CHECK: fucomi %st(0)
+0xdb,0xe8
+
+# CHECK: fucomi %st(1)
+0xdb,0xe9
+
+# CHECK: fucomi %st(2)
+0xdb,0xea
+
+# CHECK: fucomi %st(3)
+0xdb,0xeb
+
+# CHECK: fucomi %st(4)
+0xdb,0xec
+
+# CHECK: fucomi %st(5)
+0xdb,0xed
+
+# CHECK: fucomi %st(6)
+0xdb,0xee
+
+# CHECK: fucomi %st(7)
+0xdb,0xef
+
+# CHECK: fcomi %st(0)
+0xdb,0xf0
+
+# CHECK: fcomi %st(1)
+0xdb,0xf1
+
+# CHECK: fcomi %st(2)
+0xdb,0xf2
+
+# CHECK: fcomi %st(3)
+0xdb,0xf3
+
+# CHECK: fcomi %st(4)
+0xdb,0xf4
+
+# CHECK: fcomi %st(5)
+0xdb,0xf5
+
+# CHECK: fcomi %st(6)
+0xdb,0xf6
+
+# CHECK: fcomi %st(7)
+0xdb,0xf7
+
+# CHECK: fadd %st(0), %st(0)
+0xdc,0xc0
+
+# CHECK: fadd %st(0), %st(1)
+0xdc,0xc1
+
+# CHECK: fadd %st(0), %st(2)
+0xdc,0xc2
+
+# CHECK: fadd %st(0), %st(3)
+0xdc,0xc3
+
+# CHECK: fadd %st(0), %st(4)
+0xdc,0xc4
+
+# CHECK: fadd %st(0), %st(5)
+0xdc,0xc5
+
+# CHECK: fadd %st(0), %st(6)
+0xdc,0xc6
+
+# CHECK: fadd %st(0), %st(7)
+0xdc,0xc7
+
+# CHECK: fmul %st(0), %st(0)
+0xdc,0xc8
+
+# CHECK: fmul %st(0), %st(1)
+0xdc,0xc9
+
+# CHECK: fmul %st(0), %st(2)
+0xdc,0xca
+
+# CHECK: fmul %st(0), %st(3)
+0xdc,0xcb
+
+# CHECK: fmul %st(0), %st(4)
+0xdc,0xcc
+
+# CHECK: fmul %st(0), %st(5)
+0xdc,0xcd
+
+# CHECK: fmul %st(0), %st(6)
+0xdc,0xce
+
+# CHECK: fmul %st(0), %st(7)
+0xdc,0xcf
+
+# CHECK: fsub %st(0), %st(0)
+0xdc,0xe0
+
+# CHECK: fsub %st(0), %st(1)
+0xdc,0xe1
+
+# CHECK: fsub %st(0), %st(2)
+0xdc,0xe2
+
+# CHECK: fsub %st(0), %st(3)
+0xdc,0xe3
+
+# CHECK: fsub %st(0), %st(4)
+0xdc,0xe4
+
+# CHECK: fsub %st(0), %st(5)
+0xdc,0xe5
+
+# CHECK: fsub %st(0), %st(6)
+0xdc,0xe6
+
+# CHECK: fsub %st(0), %st(7)
+0xdc,0xe7
+
+# CHECK: fsubr %st(0), %st(0)
+0xdc,0xe8
+
+# CHECK: fsubr %st(0), %st(1)
+0xdc,0xe9
+
+# CHECK: fsubr %st(0), %st(2)
+0xdc,0xea
+
+# CHECK: fsubr %st(0), %st(3)
+0xdc,0xeb
+
+# CHECK: fsubr %st(0), %st(4)
+0xdc,0xec
+
+# CHECK: fsubr %st(0), %st(5)
+0xdc,0xed
+
+# CHECK: fsubr %st(0), %st(6)
+0xdc,0xee
+
+# CHECK: fsubr %st(0), %st(7)
+0xdc,0xef
+
+# CHECK: fdiv %st(0), %st(0)
+0xdc,0xf0
+
+# CHECK: fdiv %st(0), %st(1)
+0xdc,0xf1
+
+# CHECK: fdiv %st(0), %st(2)
+0xdc,0xf2
+
+# CHECK: fdiv %st(0), %st(3)
+0xdc,0xf3
+
+# CHECK: fdiv %st(0), %st(4)
+0xdc,0xf4
+
+# CHECK: fdiv %st(0), %st(5)
+0xdc,0xf5
+
+# CHECK: fdiv %st(0), %st(6)
+0xdc,0xf6
+
+# CHECK: fdiv %st(0), %st(7)
+0xdc,0xf7
+
+# CHECK: fdivr %st(0), %st(0)
+0xdc,0xf8
+
+# CHECK: fdivr %st(0), %st(1)
+0xdc,0xf9
+
+# CHECK: fdivr %st(0), %st(2)
+0xdc,0xfa
+
+# CHECK: fdivr %st(0), %st(3)
+0xdc,0xfb
+
+# CHECK: fdivr %st(0), %st(4)
+0xdc,0xfc
+
+# CHECK: fdivr %st(0), %st(5)
+0xdc,0xfd
+
+# CHECK: fdivr %st(0), %st(6)
+0xdc,0xfe
+
+# CHECK: fdivr %st(0), %st(7)
+0xdc,0xff
+
+# CHECK: ffree %st(0)
+0xdd,0xc0
+
+# CHECK: ffree %st(1)
+0xdd,0xc1
+
+# CHECK: ffree %st(2)
+0xdd,0xc2
+
+# CHECK: ffree %st(3)
+0xdd,0xc3
+
+# CHECK: ffree %st(4)
+0xdd,0xc4
+
+# CHECK: ffree %st(5)
+0xdd,0xc5
+
+# CHECK: ffree %st(6)
+0xdd,0xc6
+
+# CHECK: ffree %st(7)
+0xdd,0xc7
+
+# CHECK: fst %st(0)
+0xdd,0xd0
+
+# CHECK: fst %st(1)
+0xdd,0xd1
+
+# CHECK: fst %st(2)
+0xdd,0xd2
+
+# CHECK: fst %st(3)
+0xdd,0xd3
+
+# CHECK: fst %st(4)
+0xdd,0xd4
+
+# CHECK: fst %st(5)
+0xdd,0xd5
+
+# CHECK: fst %st(6)
+0xdd,0xd6
+
+# CHECK: fst %st(7)
+0xdd,0xd7
+
+# CHECK: fstp %st(0)
+0xdd,0xd8
+
+# CHECK: fstp %st(1)
+0xdd,0xd9
+
+# CHECK: fstp %st(2)
+0xdd,0xda
+
+# CHECK: fstp %st(3)
+0xdd,0xdb
+
+# CHECK: fstp %st(4)
+0xdd,0xdc
+
+# CHECK: fstp %st(5)
+0xdd,0xdd
+
+# CHECK: fstp %st(6)
+0xdd,0xde
+
+# CHECK: fstp %st(7)
+0xdd,0xdf
+
+# CHECK: fucom %st(0)
+0xdd,0xe0
+
+# CHECK: fucom %st(1)
+0xdd,0xe1
+
+# CHECK: fucom %st(2)
+0xdd,0xe2
+
+# CHECK: fucom %st(3)
+0xdd,0xe3
+
+# CHECK: fucom %st(4)
+0xdd,0xe4
+
+# CHECK: fucom %st(5)
+0xdd,0xe5
+
+# CHECK: fucom %st(6)
+0xdd,0xe6
+
+# CHECK: fucom %st(7)
+0xdd,0xe7
+
+# CHECK: fucomp %st(0)
+0xdd,0xe8
+
+# CHECK: fucomp %st(1)
+0xdd,0xe9
+
+# CHECK: fucomp %st(2)
+0xdd,0xea
+
+# CHECK: fucomp %st(3)
+0xdd,0xeb
+
+# CHECK: fucomp %st(4)
+0xdd,0xec
+
+# CHECK: fucomp %st(5)
+0xdd,0xed
+
+# CHECK: fucomp %st(6)
+0xdd,0xee
+
+# CHECK: fucomp %st(7)
+0xdd,0xef
+
+# CHECK: faddp %st(0)
+0xde,0xc0
+
+# CHECK: faddp %st(1)
+0xde,0xc1
+
+# CHECK: faddp %st(2)
+0xde,0xc2
+
+# CHECK: faddp %st(3)
+0xde,0xc3
+
+# CHECK: faddp %st(4)
+0xde,0xc4
+
+# CHECK: faddp %st(5)
+0xde,0xc5
+
+# CHECK: faddp %st(6)
+0xde,0xc6
+
+# CHECK: faddp %st(7)
+0xde,0xc7
+
+# CHECK: fmulp %st(0)
+0xde,0xc8
+
+# CHECK: fmulp %st(1)
+0xde,0xc9
+
+# CHECK: fmulp %st(2)
+0xde,0xca
+
+# CHECK: fmulp %st(3)
+0xde,0xcb
+
+# CHECK: fmulp %st(4)
+0xde,0xcc
+
+# CHECK: fmulp %st(5)
+0xde,0xcd
+
+# CHECK: fmulp %st(6)
+0xde,0xce
+
+# CHECK: fmulp %st(7)
+0xde,0xcf
+
+# CHECK: fcompp
+0xde,0xd9
+
+# CHECK: fsubp %st(0)
+0xde,0xe0
+
+# CHECK: fsubp %st(1)
+0xde,0xe1
+
+# CHECK: fsubp %st(2)
+0xde,0xe2
+
+# CHECK: fsubp %st(3)
+0xde,0xe3
+
+# CHECK: fsubp %st(4)
+0xde,0xe4
+
+# CHECK: fsubp %st(5)
+0xde,0xe5
+
+# CHECK: fsubp %st(6)
+0xde,0xe6
+
+# CHECK: fsubp %st(7)
+0xde,0xe7
+
+# CHECK: fsubrp %st(0)
+0xde,0xe8
+
+# CHECK: fsubrp %st(1)
+0xde,0xe9
+
+# CHECK: fsubrp %st(2)
+0xde,0xea
+
+# CHECK: fsubrp %st(3)
+0xde,0xeb
+
+# CHECK: fsubrp %st(4)
+0xde,0xec
+
+# CHECK: fsubrp %st(5)
+0xde,0xed
+
+# CHECK: fsubrp %st(6)
+0xde,0xee
+
+# CHECK: fsubrp %st(7)
+0xde,0xef
+
+# CHECK: fdivp %st(0)
+0xde,0xf0
+
+# CHECK: fdivp %st(1)
+0xde,0xf1
+
+# CHECK: fdivp %st(2)
+0xde,0xf2
+
+# CHECK: fdivp %st(3)
+0xde,0xf3
+
+# CHECK: fdivp %st(4)
+0xde,0xf4
+
+# CHECK: fdivp %st(5)
+0xde,0xf5
+
+# CHECK: fdivp %st(6)
+0xde,0xf6
+
+# CHECK: fdivp %st(7)
+0xde,0xf7
+
+# CHECK: fdivrp %st(0)
+0xde,0xf8
+
+# CHECK: fdivrp %st(1)
+0xde,0xf9
+
+# CHECK: fdivrp %st(2)
+0xde,0xfa
+
+# CHECK: fdivrp %st(3)
+0xde,0xfb
+
+# CHECK: fdivrp %st(4)
+0xde,0xfc
+
+# CHECK: fdivrp %st(5)
+0xde,0xfd
+
+# CHECK: fdivrp %st(6)
+0xde,0xfe
+
+# CHECK: fdivrp %st(7)
+0xde,0xff
+
+# CHECK: fnstsw %ax
+0xdf,0xe0
+
+# CHECK: fucompi %st(0)
+0xdf,0xe8
+
+# CHECK: fucompi %st(1)
+0xdf,0xe9
+
+# CHECK: fucompi %st(2)
+0xdf,0xea
+
+# CHECK: fucompi %st(3)
+0xdf,0xeb
+
+# CHECK: fucompi %st(4)
+0xdf,0xec
+
+# CHECK: fucompi %st(5)
+0xdf,0xed
+
+# CHECK: fucompi %st(6)
+0xdf,0xee
+
+# CHECK: fucompi %st(7)
+0xdf,0xef
+
+# CHECK: fcompi %st(0)
+0xdf,0xf0
+
+# CHECK: fcompi %st(1)
+0xdf,0xf1
+
+# CHECK: fcompi %st(2)
+0xdf,0xf2
+
+# CHECK: fcompi %st(3)
+0xdf,0xf3
+
+# CHECK: fcompi %st(4)
+0xdf,0xf4
+
+# CHECK: fcompi %st(5)
+0xdf,0xf5
+
+# CHECK: fcompi %st(6)
+0xdf,0xf6
+
+# CHECK: fcompi %st(7)
+0xdf,0xf7
diff --git a/test/MC/Disassembler/X86/hex-immediates.txt b/test/MC/Disassembler/X86/hex-immediates.txt
index 80d24487ee74..fb76c26bcb76 100644
--- a/test/MC/Disassembler/X86/hex-immediates.txt
+++ b/test/MC/Disassembler/X86/hex-immediates.txt
@@ -1,4 +1,4 @@
-# RUN: llvm-mc --hdis %s -triple=x86_64-apple-darwin9 2>&1 | FileCheck %s
+# RUN: llvm-mc --print-imm-hex --disassemble %s -triple=x86_64-apple-darwin9 2>&1 | FileCheck %s
# CHECK: movabsq $0x7fffffffffffffff, %rcx
0x48 0xb9 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0x7f
diff --git a/test/MC/Disassembler/X86/lit.local.cfg b/test/MC/Disassembler/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/MC/Disassembler/X86/lit.local.cfg
+++ b/test/MC/Disassembler/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/Disassembler/X86/missing-sib.txt b/test/MC/Disassembler/X86/missing-sib.txt
new file mode 100644
index 000000000000..814f68406d38
--- /dev/null
+++ b/test/MC/Disassembler/X86/missing-sib.txt
@@ -0,0 +1,4 @@
+# RUN: llvm-mc --disassemble %s -triple=x86_64-apple-darwin9 2>&1 | grep "invalid instruction encoding"
+
+# This instruction would decode as jmp32m if it didn't run out of bytes
+0xff 0x24
diff --git a/test/MC/Disassembler/X86/moffs.txt b/test/MC/Disassembler/X86/moffs.txt
new file mode 100644
index 000000000000..dd2664cb7737
--- /dev/null
+++ b/test/MC/Disassembler/X86/moffs.txt
@@ -0,0 +1,86 @@
+# RUN: llvm-mc --disassemble --print-imm-hex %s -triple=i686-linux-gnu-code16 | FileCheck --check-prefix=16 %s
+# RUN: llvm-mc --disassemble --print-imm-hex %s -triple=i686-linux-gnu | FileCheck --check-prefix=32 %s
+# RUN: llvm-mc --disassemble --print-imm-hex %s -triple=x86_64-linux-gnu | FileCheck --check-prefix=64 %s
+
+# 16: movb 0x5a5a, %al
+# 32: movb 0x5a5a5a5a, %al
+# 64: movabsb 0x5a5a5a5a5a5a5a5a, %al
+0xa0 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movb 0x5a5a5a5a, %al
+# 32: movb 0x5a5a, %al
+# 64: movabsb 0x5a5a5a5a, %al
+0x67 0xa0 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movw 0x5a5a, %ax
+# 32: movl 0x5a5a5a5a, %eax
+# 64: movabsl 0x5a5a5a5a5a5a5a5a, %eax
+0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movw 0x5a5a5a5a, %ax
+# 32: movl 0x5a5a, %eax
+# 64: movabsl 0x5a5a5a5a, %eax
+0x67 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movl 0x5a5a, %eax
+# 32: movw 0x5a5a5a5a, %ax
+# 64: movabsw 0x5a5a5a5a5a5a5a5a, %ax
+0x66 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movl 0x5a5a5a5a, %eax
+# 32: movw 0x5a5a, %ax
+# 64: movabsw 0x5a5a5a5a, %ax
+0x66 0x67 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movl 0x5a5a5a5a, %eax
+# 32: movw 0x5a5a, %ax
+# 64: movabsw 0x5a5a5a5a, %ax
+0x67 0x66 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movl %es:0x5a5a5a5a, %eax
+# 32: movw %es:0x5a5a, %ax
+# 64: movabsw %es:0x5a5a5a5a, %ax
+0x67 0x26 0x66 0xa1 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+
+
+# 16: movb %al, 0x5a5a
+# 32: movb %al, 0x5a5a5a5a
+# 64: movabsb %al, 0x5a5a5a5a5a5a5a5a
+0xa2 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movb %al, 0x5a5a5a5a
+# 32: movb %al, 0x5a5a
+# 64: movabsb %al, 0x5a5a5a5a
+0x67 0xa2 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movw %ax, 0x5a5a
+# 32: movl %eax, 0x5a5a5a5a
+# 64: movabsl %eax, 0x5a5a5a5a5a5a5a5a
+0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movw %ax, %gs:0x5a5a5a5a
+# 32: movl %eax, %gs:0x5a5a
+# 64: movabsl %eax, %gs:0x5a5a5a5a
+0x65 0x67 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movl %eax, 0x5a5a
+# 32: movw %ax, 0x5a5a5a5a
+# 64: movabsw %ax, 0x5a5a5a5a5a5a5a5a
+0x66 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movl %eax, 0x5a5a5a5a
+# 32: movw %ax, 0x5a5a
+# 64: movabsw %ax, 0x5a5a5a5a
+0x66 0x67 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movl %eax, 0x5a5a5a5a
+# 32: movw %ax, 0x5a5a
+# 64: movabsw %ax, 0x5a5a5a5a
+0x67 0x66 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
+# 16: movl %eax, %es:0x5a5a5a5a
+# 32: movw %ax, %es:0x5a5a
+# 64: movabsw %ax, %es:0x5a5a5a5a
+0x67 0x26 0x66 0xa3 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
+
diff --git a/test/MC/Disassembler/X86/padlock.txt b/test/MC/Disassembler/X86/padlock.txt
new file mode 100644
index 000000000000..2060a33dcd49
--- /dev/null
+++ b/test/MC/Disassembler/X86/padlock.txt
@@ -0,0 +1,56 @@
+# RUN: llvm-mc --disassemble %s -triple=x86_64-apple-darwin9 | FileCheck %s
+
+# CHECK: xstore
+0x0f 0xa7 0xc0
+
+# CHECK: xcryptecb
+0x0f 0xa7 0xc8
+
+# CHECK: xcryptcbc
+0x0f 0xa7 0xd0
+
+# CHECK: xcryptctr
+0x0f 0xa7 0xd8
+
+# CHECK: xcryptcfb
+0x0f 0xa7 0xe0
+
+# CHECK: xcryptofb
+0x0f 0xa7 0xe8
+
+# CHECK: xsha1
+0x0f 0xa6 0xc8
+
+# CHECK: xsha256
+0x0f 0xa6 0xd0
+
+# CHECK: montmul
+0x0f 0xa6 0xc0
+# RUN: llvm-mc --disassemble %s -triple=x86_64-apple-darwin9 | FileCheck %s
+
+# CHECK: xstore
+0x0f 0xa7 0xc0
+
+# CHECK: xcryptecb
+0x0f 0xa7 0xc8
+
+# CHECK: xcryptcbc
+0x0f 0xa7 0xd0
+
+# CHECK: xcryptctr
+0x0f 0xa7 0xd8
+
+# CHECK: xcryptcfb
+0x0f 0xa7 0xe0
+
+# CHECK: xcryptofb
+0x0f 0xa7 0xe8
+
+# CHECK: xsha1
+0x0f 0xa6 0xc8
+
+# CHECK: xsha256
+0x0f 0xa6 0xd0
+
+# CHECK: montmul
+0x0f 0xa6 0xc0
diff --git a/test/MC/Disassembler/X86/prefixes.txt b/test/MC/Disassembler/X86/prefixes.txt
index 56596e387511..b8830dc3f3b9 100644
--- a/test/MC/Disassembler/X86/prefixes.txt
+++ b/test/MC/Disassembler/X86/prefixes.txt
@@ -44,6 +44,10 @@
# CHECK-NEXT: nop
0xf0 0x90
+# Test that immediate is printed correctly within opsize prefix
+# CHECK: addw $-12, %ax
+0x66,0x83,0xc0,0xf4
+
# Test that multiple redundant prefixes work (redundant, but valid x86).
# CHECK: rep
# CHECK-NEXT: rep
diff --git a/test/MC/Disassembler/X86/simple-tests.txt b/test/MC/Disassembler/X86/simple-tests.txt
index 7ca087438972..e6e9c7bc9cc5 100644
--- a/test/MC/Disassembler/X86/simple-tests.txt
+++ b/test/MC/Disassembler/X86/simple-tests.txt
@@ -359,6 +359,18 @@
# CHECK: xchgq %r8, %rax
0x49 0x90
+# CHECK: xchgl %r9d, %eax
+0x41 0x91
+
+# CHECK: xchgq %r9, %rax
+0x49 0x91
+
+# CHECK: xchgl %ecx, %eax
+0x91
+
+# CHECK: xchgq %rcx, %rax
+0x48 0x91
+
# CHECK: addb $0, %al
0x04 0x00
diff --git a/test/MC/Disassembler/X86/x86-16.txt b/test/MC/Disassembler/X86/x86-16.txt
new file mode 100644
index 000000000000..93974d433e3d
--- /dev/null
+++ b/test/MC/Disassembler/X86/x86-16.txt
@@ -0,0 +1,788 @@
+# RUN: llvm-mc --disassemble %s -triple=i686-linux-gnu-code16 | FileCheck %s
+
+# CHECK: movl $305419896, %ebx
+0x66 0xbb 0x78 0x56 0x34 0x12
+
+# CHECK: pause
+0xf3 0x90
+
+# CHECK: sfence
+0x0f 0xae 0xf8
+
+# CHECK: lfence
+0x0f 0xae 0xe8
+
+# CHECK: mfence
+0x0f 0xae 0xf0
+
+# CHECK: stgi
+0x0f 0x01 0xdc
+
+# CHECK: clgi
+0x0f 0x01 0xdd
+
+# CHECK: rdtscp
+0x0f 0x01 0xf9
+
+# CHECK: movl %eax, 16(%ebp)
+0x67 0x66 0x89 0x45 0x10
+
+# CHECK: movl %eax, -16(%ebp)
+0x67 0x66 0x89 0x45 0xf0
+
+# CHECK: testb %bl, %cl
+0x84 0xcb
+
+# CHECK: cmpl %eax, %ebx
+0x66 0x39 0xc3
+
+# CHECK: addw %ax, %ax
+0x01 0xc0
+
+# CHECK: shrl %eax
+0x66 0xd1 0xe8
+
+# CHECK: shll %eax
+0x66 0xd1 0xe0
+
+# CHECK: shll %eax
+0x66 0xd1 0xe0
+
+# CHECK: movb 0, %al
+0xa0 0x00 0x00
+
+# CHECK: movw 0, %ax
+0xa1 0x00 0x00
+
+# CHECK: movl 0, %eax
+0x66 0xa1 0x00 0x00
+
+# CHECK: into
+0xce
+
+# CHECK: int3
+0xcc
+
+# CHECK: int $4
+0xcd 0x04
+
+# CHECK: int $127
+0xcd 0x7f
+
+# CHECK: pushfw
+0x9c
+
+# CHECK: pushfl
+0x66 0x9c
+
+# CHECK: popfw
+0x9d
+
+# CHECK: popfl
+0x66 0x9d
+
+# CHECK: retl
+0x66 0xc3
+
+# CHECK: cmoval %eax, %edx
+0x66 0x0f 0x47 0xd0
+
+# CHECK: cmovael %eax, %edx
+0x66 0x0f 0x43 0xd0
+
+# CHECK: cmovbel %eax, %edx
+0x66 0x0f 0x46 0xd0
+
+# CHECK: cmovbl %eax, %edx
+0x66 0x0f 0x42 0xd0
+
+# CHECK: cmovbw %bx, %bx
+0x0f 0x42 0xdb
+
+# CHECK: cmovbel %eax, %edx
+0x66 0x0f 0x46 0xd0
+
+# CHECK: cmovbl %eax, %edx
+0x66 0x0f 0x42 0xd0
+
+# CHECK: cmovel %eax, %edx
+0x66 0x0f 0x44 0xd0
+
+# CHECK: cmovgl %eax, %edx
+0x66 0x0f 0x4f 0xd0
+
+# CHECK: cmovgel %eax, %edx
+0x66 0x0f 0x4d 0xd0
+
+# CHECK: cmovll %eax, %edx
+0x66 0x0f 0x4c 0xd0
+
+# CHECK: cmovlel %eax, %edx
+0x66 0x0f 0x4e 0xd0
+
+# CHECK: cmovbel %eax, %edx
+0x66 0x0f 0x46 0xd0
+
+# CHECK: cmovnel %eax, %edx
+0x66 0x0f 0x45 0xd0
+
+# CHECK: cmovael %eax, %edx
+0x66 0x0f 0x43 0xd0
+
+# CHECK: cmoval %eax, %edx
+0x66 0x0f 0x47 0xd0
+
+# CHECK: cmovael %eax, %edx
+0x66 0x0f 0x43 0xd0
+
+# CHECK: cmovnel %eax, %edx
+0x66 0x0f 0x45 0xd0
+
+# CHECK: cmovlel %eax, %edx
+0x66 0x0f 0x4e 0xd0
+
+# CHECK: cmovgel %eax, %edx
+0x66 0x0f 0x4d 0xd0
+
+# CHECK: cmovnel %eax, %edx
+0x66 0x0f 0x45 0xd0
+
+# CHECK: cmovlel %eax, %edx
+0x66 0x0f 0x4e 0xd0
+
+# CHECK: cmovll %eax, %edx
+0x66 0x0f 0x4c 0xd0
+
+# CHECK: cmovgel %eax, %edx
+0x66 0x0f 0x4d 0xd0
+
+# CHECK: cmovgl %eax, %edx
+0x66 0x0f 0x4f 0xd0
+
+# CHECK: cmovnol %eax, %edx
+0x66 0x0f 0x41 0xd0
+
+# CHECK: cmovnpl %eax, %edx
+0x66 0x0f 0x4b 0xd0
+
+# CHECK: cmovnsl %eax, %edx
+0x66 0x0f 0x49 0xd0
+
+# CHECK: cmovnel %eax, %edx
+0x66 0x0f 0x45 0xd0
+
+# CHECK: cmovol %eax, %edx
+0x66 0x0f 0x40 0xd0
+
+# CHECK: cmovpl %eax, %edx
+0x66 0x0f 0x4a 0xd0
+
+# CHECK: cmovsl %eax, %edx
+0x66 0x0f 0x48 0xd0
+
+# CHECK: cmovel %eax, %edx
+0x66 0x0f 0x44 0xd0
+
+# CHECK: fmul %st(0)
+0xd8 0xc8
+
+# CHECK: fadd %st(0)
+0xd8 0xc0
+
+# CHECK: fsub %st(0)
+0xd8 0xe0
+
+# CHECK: fsubr %st(0)
+0xd8 0xe8
+
+# CHECK: fdivr %st(0)
+0xd8 0xf8
+
+# CHECK: fdiv %st(0)
+0xd8 0xf0
+
+# CHECK: movl %cs, %eax
+0x66 0x8c 0xc8
+
+# CHECK: movw %cs, %ax
+0x8c 0xc8
+
+# CHECK: movl %cs, (%eax)
+0x67 0x66 0x8c 0x08
+
+# CHECK: movw %cs, (%eax)
+0x67 0x8c 0x08
+
+# CHECK: movl %eax, %cs
+0x66 0x8e 0xc8
+
+# CHECK: movl (%eax), %cs
+0x67 0x66 0x8e 0x08
+
+# CHECK: movw (%eax), %cs
+0x67 0x8e 0x08
+
+# CHECKX: movl %cr0, %eax
+0x0f 0x20 0xc0
+
+# CHECKX: movl %cr1, %eax
+0x0f 0x20 0xc8
+
+# CHECKX: movl %cr2, %eax
+0x0f 0x20 0xd0
+
+# CHECKX: movl %cr3, %eax
+0x0f 0x20 0xd8
+
+# CHECKX: movl %cr4, %eax
+0x0f 0x20 0xe0
+
+# CHECKX: movl %dr0, %eax
+0x0f 0x21 0xc0
+
+# CHECKX: movl %dr1, %eax
+0x0f 0x21 0xc8
+
+# CHECKX: movl %dr1, %eax
+0x0f 0x21 0xc8
+
+# CHECKX: movl %dr2, %eax
+0x0f 0x21 0xd0
+
+# CHECKX: movl %dr3, %eax
+0x0f 0x21 0xd8
+
+# CHECKX: movl %dr4, %eax
+0x0f 0x21 0xe0
+
+# CHECKX: movl %dr5, %eax
+0x0f 0x21 0xe8
+
+# CHECKX: movl %dr6, %eax
+0x0f 0x21 0xf0
+
+# CHECKX: movl %dr7, %eax
+0x0f 0x21 0xf8
+
+# CHECK: wait
+0x9b
+
+# CHECK: movl %gs:124, %eax
+0x65 0x66 0x8b 0x06 0x7c 0x00
+
+# CHECK: pushaw
+0x60
+
+# CHECK: popaw
+0x61
+
+# CHECK: pushaw
+0x60
+
+# CHECK: popaw
+0x61
+
+# CHECK: pushal
+0x66 0x60
+
+# CHECK: popal
+0x66 0x61
+
+# CHECK: jmpw *8(%eax)
+0x67 0xff 0x60 0x08
+
+# CHECK: jmpl *8(%eax)
+0x67 0x66 0xff 0x60 0x08
+
+# CHECK: lcalll $2, $4660
+0x66 0x9a 0x34 0x12 0x00 0x00 0x02 0x00
+
+# CHECK: jcxz
+0xe3 0x00
+
+# CHECK: jecxz
+0x67 0xe3 0x00
+
+# CHECK: iretw
+0xcf
+
+# CHECK: iretw
+0xcf
+
+# CHECK: iretl
+0x66 0xcf
+
+# CHECK: sysretl
+0x0f 0x07
+
+# CHECK: sysretl
+0x0f 0x07
+
+# CHECK: testl -24(%ebp), %ecx
+0x67 0x66 0x85 0x4d 0xe8
+
+# CHECK: testl -24(%ebp), %ecx
+0x67 0x66 0x85 0x4d 0xe8
+
+# CHECK: pushw %cs
+0x0e
+
+# CHECK: pushw %ds
+0x1e
+
+# CHECK: pushw %ss
+0x16
+
+# CHECK: pushw %es
+0x06
+
+# CHECK: pushw %fs
+0x0f 0xa0
+
+# CHECK: pushw %gs
+0x0f 0xa8
+
+# CHECK: pushw %cs
+0x0e
+
+# CHECK: pushw %ds
+0x1e
+
+# CHECK: pushw %ss
+0x16
+
+# CHECK: pushw %es
+0x06
+
+# CHECK: pushw %fs
+0x0f 0xa0
+
+# CHECK: pushw %gs
+0x0f 0xa8
+
+# CHECK: pushl %cs
+0x66 0x0e
+
+# CHECK: pushl %ds
+0x66 0x1e
+
+# CHECK: pushl %ss
+0x66 0x16
+
+# CHECK: pushl %es
+0x66 0x06
+
+# CHECK: pushl %fs
+0x66 0x0f 0xa0
+
+# CHECK: pushl %gs
+0x66 0x0f 0xa8
+
+# CHECK: popw %ss
+0x17
+
+# CHECK: popw %ds
+0x1f
+
+# CHECK: popw %es
+0x07
+
+# CHECK: popl %ss
+0x66 0x17
+
+# CHECK: popl %ds
+0x66 0x1f
+
+# CHECK: popl %es
+0x66 0x07
+
+# CHECK: pushfl
+0x66 0x9c
+
+# CHECK: popfl
+0x66 0x9d
+
+# CHECK: pushfl
+0x66 0x9c
+
+# CHECK: popfl
+0x66 0x9d
+
+# CHECK: setb %bl
+0x0f 0x92 0xc3
+
+# CHECK: setb %bl
+0x0f 0x92 0xc3
+
+# CHECK: setae %bl
+0x0f 0x93 0xc3
+
+# CHECK: setae %bl
+0x0f 0x93 0xc3
+
+# CHECK: setbe %bl
+0x0f 0x96 0xc3
+
+# CHECK: seta %bl
+0x0f 0x97 0xc3
+
+# CHECK: setp %bl
+0x0f 0x9a 0xc3
+
+# CHECK: setnp %bl
+0x0f 0x9b 0xc3
+
+# CHECK: setl %bl
+0x0f 0x9c 0xc3
+
+# CHECK: setge %bl
+0x0f 0x9d 0xc3
+
+# CHECK: setle %bl
+0x0f 0x9e 0xc3
+
+# CHECK: setg %bl
+0x0f 0x9f 0xc3
+
+# CHECK: setne %cl
+0x0f 0x95 0xc1
+
+# CHECK: setb %bl
+0x0f 0x92 0xc3
+
+# CHECK: setb %bl
+0x0f 0x92 0xc3
+
+# CHECK: lcalll $31438, $31438
+0x66 0x9a 0xce 0x7a 0x00 0x00 0xce 0x7a
+
+# CHECK: lcalll $31438, $31438
+0x66 0x9a 0xce 0x7a 0x00 0x00 0xce 0x7a
+
+# CHECK: ljmpl $31438, $31438
+0x66 0xea 0xce 0x7a 0x00 0x00 0xce 0x7a
+
+# CHECK: ljmpl $31438, $31438
+0x66 0xea 0xce 0x7a 0x00 0x00 0xce 0x7a
+
+# CHECK: lcallw $31438, $31438
+0x9a 0xce 0x7a 0xce 0x7a
+
+# CHECK: lcallw $31438, $31438
+0x9a 0xce 0x7a 0xce 0x7a
+
+# CHECK: ljmpw $31438, $31438
+0xea 0xce 0x7a 0xce 0x7a
+
+# CHECK: ljmpw $31438, $31438
+0xea 0xce 0x7a 0xce 0x7a
+
+# CHECK: lcallw $31438, $31438
+0x9a 0xce 0x7a 0xce 0x7a
+
+# CHECK: lcallw $31438, $31438
+0x9a 0xce 0x7a 0xce 0x7a
+
+# CHECK: ljmpw $31438, $31438
+0xea 0xce 0x7a 0xce 0x7a
+
+# CHECK: ljmpw $31438, $31438
+0xea 0xce 0x7a 0xce 0x7a
+
+# CHECK: calll
+0x66 0xe8 0x00 0x00 0x00 0x00
+
+# CHECK: callw
+0xe8 0x00 0x00
+
+# CHECK: incb %al
+0xfe 0xc0
+
+# CHECK: incw %ax
+0x40
+
+# CHECK: incl %eax
+0x66 0x40
+
+# CHECK: decb %al
+0xfe 0xc8
+
+# CHECK: decw %ax
+0x48
+
+# CHECK: decl %eax
+0x66 0x48
+
+# CHECK: pshufw $14, %mm4, %mm0
+0x0f 0x70 0xc4 0x0e
+
+# CHECK: pshufw $90, %mm4, %mm0
+0x0f 0x70 0xc4 0x5a
+
+# CHECK: aaa
+0x37
+
+# CHECK: aad $1
+0xd5 0x01
+
+# CHECK: aad
+0xd5 0x0a
+
+# CHECK: aad
+0xd5 0x0a
+
+# CHECK: aam $2
+0xd4 0x02
+
+# CHECK: aam
+0xd4 0x0a
+
+# CHECK: aam
+0xd4 0x0a
+
+# CHECK: aas
+0x3f
+
+# CHECK: daa
+0x27
+
+# CHECK: das
+0x2f
+
+# CHECK: retw $31438
+0xc2 0xce 0x7a
+
+# CHECK: lretw $31438
+0xca 0xce 0x7a
+
+# CHECK: retw $31438
+0xc2 0xce 0x7a
+
+# CHECK: lretw $31438
+0xca 0xce 0x7a
+
+# CHECK: retl $31438
+0x66 0xc2 0xce 0x7a
+
+# CHECK: lretl $31438
+0x66 0xca 0xce 0x7a
+
+# CHECK: bound 2(%eax), %bx
+0x67 0x62 0x58 0x02
+
+# CHECK: bound 4(%ebx), %ecx
+0x67 0x66 0x62 0x4b 0x04
+
+# CHECK: arpl %bx, %bx
+0x63 0xdb
+
+# CHECK: arpl %bx, 6(%ecx)
+0x67 0x63 0x59 0x06
+
+# CHECK: lgdtw 4(%eax)
+0x67 0x0f 0x01 0x50 0x04
+
+# CHECK: lgdtw 4(%eax)
+0x67 0x0f 0x01 0x50 0x04
+
+# CHECK: lgdtl 4(%eax)
+0x67 0x66 0x0f 0x01 0x50 0x04
+
+# CHECK: lidtw 4(%eax)
+0x67 0x0f 0x01 0x58 0x04
+
+# CHECK: lidtw 4(%eax)
+0x67 0x0f 0x01 0x58 0x04
+
+# CHECK: lidtl 4(%eax)
+0x67 0x66 0x0f 0x01 0x58 0x04
+
+# CHECK: sgdtw 4(%eax)
+0x67 0x0f 0x01 0x40 0x04
+
+# CHECK: sgdtw 4(%eax)
+0x67 0x0f 0x01 0x40 0x04
+
+# CHECK: sgdtl 4(%eax)
+0x67 0x66 0x0f 0x01 0x40 0x04
+
+# CHECK: sidtw 4(%eax)
+0x67 0x0f 0x01 0x48 0x04
+
+# CHECK: sidtw 4(%eax)
+0x67 0x0f 0x01 0x48 0x04
+
+# CHECK: sidtl 4(%eax)
+0x67 0x66 0x0f 0x01 0x48 0x04
+
+# CHECK: fcompi %st(2)
+0xdf 0xf2
+
+# CHECK: fcompi %st(2)
+0xdf 0xf2
+
+# CHECK: fcompi %st(1)
+0xdf 0xf1
+
+# CHECK: fucompi %st(2)
+0xdf 0xea
+
+# CHECK: fucompi %st(2)
+0xdf 0xea
+
+# CHECK: fucompi %st(1)
+0xdf 0xe9
+
+# CHECK: fldcw 32493
+0xd9 0x2e 0xed 0x7e
+
+# CHECK: fldcw 32493
+0xd9 0x2e 0xed 0x7e
+
+# CHECK: fnstcw 32493
+0xd9 0x3e 0xed 0x7e
+
+# CHECK: fnstcw 32493
+0xd9 0x3e 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnstcw 32493
+0xd9 0x3e 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnstcw 32493
+0xd9 0x3e 0xed 0x7e
+
+# CHECK: fnstsw 32493
+0xdd 0x3e 0xed 0x7e
+
+# CHECK: fnstsw 32493
+0xdd 0x3e 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnstsw 32493
+0xdd 0x3e 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnstsw 32493
+0xdd 0x3e 0xed 0x7e
+
+# CHECK: verr 32493
+0x0f 0x00 0x26 0xed 0x7e
+
+# CHECK: verr 32493
+0x0f 0x00 0x26 0xed 0x7e
+
+# CHECK: wait
+0x9b
+
+# CHECK: fnclex
+0xdb 0xe2
+
+# CHECK: fnclex
+0xdb 0xe2
+
+# CHECK: ud2
+0x0f 0x0b
+
+# CHECK: ud2
+0x0f 0x0b
+
+# CHECK: ud2b
+0x0f 0xb9
+
+# CHECK: loope
+0xe1 0x00
+
+# CHECK: loopne
+0xe0 0x00
+
+# CHECK: outsb
+0x6e
+
+# CHECK: outsw
+0x6f
+
+# CHECK: outsl
+0x66 0x6f
+
+# CHECK: insb
+0x6c
+
+# CHECK: insw
+0x6d
+
+# CHECK: insl
+0x66 0x6d
+
+# CHECK: movsb
+0xa4
+
+# CHECK: movsw
+0xa5
+
+# CHECK: movsl
+0x66 0xa5
+
+# CHECK: lodsb
+0xac
+
+# CHECK: lodsw
+0xad
+
+# CHECK: lodsl
+0x66 0xad
+
+# CHECK: stosb
+0xaa
+
+# CHECK: stosw
+0xab
+
+# CHECK: stosl
+0x66 0xab
+
+# CHECK: strw %ax
+0x0f 0x00 0xc8
+
+# CHECK: strl %eax
+0x66 0x0f 0x00 0xc8
+
+# CHECK: fsubp %st(1)
+0xde 0xe1
+
+# CHECK: fsubp %st(2)
+0xde 0xe2
+
+# CHECKX: nop
+0x66 0x90
+
+# CHECKX: nop
+0x90
+
+# CHECK: xchgl %ecx, %eax
+0x66 0x91
+
+# CHECK: xchgl %ecx, %eax
+0x66 0x91
+
+# CHECK: retw
+0xc3
+
+# CHECK: retl
+0x66 0xc3
+
+# CHECK: lretw
+0xcb
+
+# CHECK: lretl
+0x66 0xcb
+
diff --git a/test/MC/Disassembler/X86/x86-32.txt b/test/MC/Disassembler/X86/x86-32.txt
index b6a62c4f6975..c9c508680c5a 100644
--- a/test/MC/Disassembler/X86/x86-32.txt
+++ b/test/MC/Disassembler/X86/x86-32.txt
@@ -696,3 +696,18 @@
# CHECK: vmovq %xmm0, %xmm0
0xc5 0xfa 0x7e 0xc0
+
+# CHECK: movl %fs:0, %eax
+0x64 0xa1 0x00 0x00 0x00 0x00
+
+# CHECK: movb $-1, %al
+0xc6 0xc0 0xff
+
+# CHECK: movw $65535, %ax
+0x66 0xc7 0xc0 0xff 0xff
+
+# CHECK: movl $4294967295, %eax
+0xc7 0xc0 0xff 0xff 0xff 0xff
+
+# CHECK: movq %mm0, %mm1
+0x0f 0x7f 0xc1
diff --git a/test/MC/Disassembler/XCore/lit.local.cfg b/test/MC/Disassembler/XCore/lit.local.cfg
index 4d17d4642045..bb48713fe33e 100644
--- a/test/MC/Disassembler/XCore/lit.local.cfg
+++ b/test/MC/Disassembler/XCore/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'XCore' in targets:
+if not 'XCore' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/ELF/ARM/bss-non-zero-value.s b/test/MC/ELF/ARM/bss-non-zero-value.s
new file mode 100644
index 000000000000..999b8b019c9c
--- /dev/null
+++ b/test/MC/ELF/ARM/bss-non-zero-value.s
@@ -0,0 +1,9 @@
+// RUN: not llvm-mc -filetype=obj -triple arm-linux-gnu %s -o %t 2>%t.out
+// RUN: FileCheck --input-file=%t.out %s
+// CHECK: non-zero initializer found in section '.bss'
+ .bss
+ .globl a
+ .align 2
+a:
+ .long 1
+ .size a, 4
diff --git a/test/MC/ELF/ARM/gnu-type-hash-diagnostics.s b/test/MC/ELF/ARM/gnu-type-hash-diagnostics.s
new file mode 100644
index 000000000000..eb364755c4d7
--- /dev/null
+++ b/test/MC/ELF/ARM/gnu-type-hash-diagnostics.s
@@ -0,0 +1,9 @@
+@ RUN: not llvm-mc -triple arm-elf -filetype asm -o /dev/null %s 2>&1 | FileCheck %s
+
+ .syntax unified
+
+ .type TYPE #32
+// CHECK: error: expected symbol type in directive
+// CHECK: .type TYPE #32
+// CHECK: ^
+
diff --git a/test/MC/ELF/ARM/gnu-type-hash.s b/test/MC/ELF/ARM/gnu-type-hash.s
new file mode 100644
index 000000000000..ae5c47c567cd
--- /dev/null
+++ b/test/MC/ELF/ARM/gnu-type-hash.s
@@ -0,0 +1,16 @@
+@ RUN: llvm-mc -triple arm-elf -filetype asm -o - %s | FileCheck %s
+
+ .syntax unified
+
+ .type TYPE #STT_FUNC
+// CHECK: .type TYPE,%function
+
+ .type type #function
+// CHECK: .type type,%function
+
+ .type comma_TYPE, #STT_FUNC
+// CHECK: .type comma_TYPE,%function
+
+ .type comma_type, #function
+// CHECK: .type comma_type,%function
+
diff --git a/test/MC/ELF/ARM/lit.local.cfg b/test/MC/ELF/ARM/lit.local.cfg
new file mode 100644
index 000000000000..d825cc04bf37
--- /dev/null
+++ b/test/MC/ELF/ARM/lit.local.cfg
@@ -0,0 +1,3 @@
+# We have to reset config.unsupported here because the parent directory is
+# predicated on 'X86'.
+config.unsupported = not 'ARM' in config.root.targets
diff --git a/test/MC/ELF/abs.s b/test/MC/ELF/abs.s
index 1836f4005973..4adf1cb56afd 100644
--- a/test/MC/ELF/abs.s
+++ b/test/MC/ELF/abs.s
@@ -13,5 +13,5 @@
// CHECK-NEXT: Binding: Local
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0xFFF1)
+// CHECK-NEXT: Section: Absolute (0xFFF1)
// CHECK-NEXT: }
diff --git a/test/MC/ELF/alias-reloc.s b/test/MC/ELF/alias-reloc.s
index 0ee73e9ca24a..74930c63aeb8 100644
--- a/test/MC/ELF/alias-reloc.s
+++ b/test/MC/ELF/alias-reloc.s
@@ -32,7 +32,7 @@ foo2:
// CHECK-NEXT: Binding: Local (0x0)
// CHECK-NEXT: Type: None (0x0)
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: bar
diff --git a/test/MC/ELF/alias.s b/test/MC/ELF/alias.s
index 8da75f7ef2de..2e65ace6ba11 100644
--- a/test/MC/ELF/alias.s
+++ b/test/MC/ELF/alias.s
@@ -10,10 +10,13 @@ foo3:
.globl bar3
bar3 = foo3
-// Test that bar4 is also a function
- .type foo4,@function
+
+// Test that bar4 and bar 5 are also functions and have the same value as foo4.
+ .byte 0
+ .type foo4,@function
foo4:
bar4 = foo4
+bar5 = bar4
.long foo2
@@ -25,7 +28,7 @@ bar4 = foo4
// CHECK-NEXT: Binding: Local (0x0)
// CHECK-NEXT: Type: None (0x0)
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: bar
@@ -38,7 +41,16 @@ bar4 = foo4
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: bar4
-// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Value: 0x1
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: Function
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: bar5
+// CHECK-NEXT: Value: 0x1
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
// CHECK-NEXT: Type: Function
@@ -65,7 +77,7 @@ bar4 = foo4
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: foo4
-// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Value: 0x1
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
// CHECK-NEXT: Type: Function
@@ -97,6 +109,6 @@ bar4 = foo4
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: ]
diff --git a/test/MC/ELF/bad-expr.s b/test/MC/ELF/bad-expr.s
new file mode 100644
index 000000000000..1cad919fee3c
--- /dev/null
+++ b/test/MC/ELF/bad-expr.s
@@ -0,0 +1,8 @@
+// RUN: not llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o /dev/null 2>%t
+// RUN: FileCheck --input-file=%t %s
+
+// CHECK: symbol '__executable_start' can not be undefined in a subtraction expression
+
+ .data
+x:
+ .quad x-__executable_start
diff --git a/test/MC/ELF/bad-expr2.s b/test/MC/ELF/bad-expr2.s
new file mode 100644
index 000000000000..3da916b7fa11
--- /dev/null
+++ b/test/MC/ELF/bad-expr2.s
@@ -0,0 +1,12 @@
+// RUN: not llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o /dev/null \
+// RUN: 2>&1 | FileCheck %s
+
+// CHECK: No relocation available to represent this relative expression
+// CHECK: call foo - bar
+
+
+ call foo - bar
+ .section .foo
+foo:
+ .section .bar
+bar:
diff --git a/test/MC/ELF/bad-expr3.s b/test/MC/ELF/bad-expr3.s
new file mode 100644
index 000000000000..990167cda53f
--- /dev/null
+++ b/test/MC/ELF/bad-expr3.s
@@ -0,0 +1,10 @@
+// RUN: not llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o /dev/null \
+// RUN: 2>&1 | FileCheck %s
+
+// CHECK: Cannot represent a difference across sections
+
+ .long foo - bar
+ .section .zed
+foo:
+ .section .bah
+bar:
diff --git a/test/MC/ELF/basic-elf-32.s b/test/MC/ELF/basic-elf-32.s
index 1f618e1bb6c6..e12fc526369e 100644
--- a/test/MC/ELF/basic-elf-32.s
+++ b/test/MC/ELF/basic-elf-32.s
@@ -46,9 +46,9 @@ main: # @main
// CHECK: Relocations [
// CHECK: Section (2) .rel.text {
-// CHECK: 0x6 R_386_32 .rodata.str1.1
+// CHECK: 0x6 R_386_32 .L.str1
// CHECK: 0xB R_386_PC32 puts
-// CHECK: 0x12 R_386_32 .rodata.str1.1
+// CHECK: 0x12 R_386_32 .L.str2
// CHECK: 0x17 R_386_PC32 puts
// CHECK: }
// CHECK: ]
diff --git a/test/MC/ELF/cfi-adjust-cfa-offset.s b/test/MC/ELF/cfi-adjust-cfa-offset.s
index b3768cb9834c..9d639f70d8dd 100644
--- a/test/MC/ELF/cfi-adjust-cfa-offset.s
+++ b/test/MC/ELF/cfi-adjust-cfa-offset.s
@@ -28,7 +28,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 1C000000 1C000000
// CHECK-NEXT: 0020: 00000000 0A000000 00440E10 410E1444
// CHECK-NEXT: 0030: 0E080000 00000000
diff --git a/test/MC/ELF/cfi-advance-loc2.s b/test/MC/ELF/cfi-advance-loc2.s
index d7a53c462b70..98caa0185f59 100644
--- a/test/MC/ELF/cfi-advance-loc2.s
+++ b/test/MC/ELF/cfi-advance-loc2.s
@@ -26,7 +26,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 01010000 00030001 0E080000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-def-cfa-offset.s b/test/MC/ELF/cfi-def-cfa-offset.s
index eac2c731fa93..59f740055d47 100644
--- a/test/MC/ELF/cfi-def-cfa-offset.s
+++ b/test/MC/ELF/cfi-def-cfa-offset.s
@@ -27,7 +27,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 0A000000 00440E10 450E0800
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-def-cfa-register.s b/test/MC/ELF/cfi-def-cfa-register.s
index 00d8b99af9d6..178ba32882dc 100644
--- a/test/MC/ELF/cfi-def-cfa-register.s
+++ b/test/MC/ELF/cfi-def-cfa-register.s
@@ -23,7 +23,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 00410D06 00000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-def-cfa.s b/test/MC/ELF/cfi-def-cfa.s
index 36e147f5a4da..dfb0d4b59396 100644
--- a/test/MC/ELF/cfi-def-cfa.s
+++ b/test/MC/ELF/cfi-def-cfa.s
@@ -23,7 +23,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 00410C07 08000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-escape.s b/test/MC/ELF/cfi-escape.s
index 839d6717debc..5394ee414aa7 100644
--- a/test/MC/ELF/cfi-escape.s
+++ b/test/MC/ELF/cfi-escape.s
@@ -24,7 +24,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 00411507 7F000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-offset.s b/test/MC/ELF/cfi-offset.s
index 951a6001e519..a65b4fc783c7 100644
--- a/test/MC/ELF/cfi-offset.s
+++ b/test/MC/ELF/cfi-offset.s
@@ -23,7 +23,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 00418602 00000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-register.s b/test/MC/ELF/cfi-register.s
index 4abbb53b8fc9..94417702c13c 100644
--- a/test/MC/ELF/cfi-register.s
+++ b/test/MC/ELF/cfi-register.s
@@ -24,7 +24,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 00410906 00000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-rel-offset.s b/test/MC/ELF/cfi-rel-offset.s
index 34254c862a46..0dc69c89cf4c 100644
--- a/test/MC/ELF/cfi-rel-offset.s
+++ b/test/MC/ELF/cfi-rel-offset.s
@@ -31,7 +31,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 24000000 1C000000
// CHECK-NEXT: 0020: 00000000 05000000 00410E08 410D0641
// CHECK-NEXT: 0030: 11067F41 0E104186 02000000 00000000
diff --git a/test/MC/ELF/cfi-rel-offset2.s b/test/MC/ELF/cfi-rel-offset2.s
index 3de769f39fa0..360e7b0ea0f5 100644
--- a/test/MC/ELF/cfi-rel-offset2.s
+++ b/test/MC/ELF/cfi-rel-offset2.s
@@ -23,7 +23,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 01000000 00411106 7F000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-remember.s b/test/MC/ELF/cfi-remember.s
index 98c759d4fffc..3a38948b6a3e 100644
--- a/test/MC/ELF/cfi-remember.s
+++ b/test/MC/ELF/cfi-remember.s
@@ -26,7 +26,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 03000000 00410A41 0B000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-restore.s b/test/MC/ELF/cfi-restore.s
index d25b5ff2e93f..e225797f54d6 100644
--- a/test/MC/ELF/cfi-restore.s
+++ b/test/MC/ELF/cfi-restore.s
@@ -24,7 +24,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 0041C600 00000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-same-value.s b/test/MC/ELF/cfi-same-value.s
index 9f5ae4be9ed4..2d37f4d0b43e 100644
--- a/test/MC/ELF/cfi-same-value.s
+++ b/test/MC/ELF/cfi-same-value.s
@@ -24,7 +24,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 00410806 00000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-sections.s b/test/MC/ELF/cfi-sections.s
index 15a79e5c055e..b0ba543e5bdb 100644
--- a/test/MC/ELF/cfi-sections.s
+++ b/test/MC/ELF/cfi-sections.s
@@ -26,7 +26,7 @@ f2:
// ELF_64-NEXT: AddressAlignment: 8
// ELF_64-NEXT: EntrySize: 0
// ELF_64-NEXT: SectionData (
-// ELF_64-NEXT: 0000: 14000000 FFFFFFFF 01000178 100C0708
+// ELF_64-NEXT: 0000: 14000000 FFFFFFFF 03000178 100C0708
// ELF_64-NEXT: 0010: 90010000 00000000 14000000 00000000
// ELF_64-NEXT: 0020: 00000000 00000000 01000000 00000000
// ELF_64-NEXT: 0030: 14000000 00000000 00000000 00000000
@@ -47,7 +47,7 @@ f2:
// ELF_32-NEXT: AddressAlignment: 4
// ELF_32-NEXT: EntrySize: 0
// ELF_32-NEXT: SectionData (
-// ELF_32-NEXT: 0000: 10000000 FFFFFFFF 0100017C 080C0404
+// ELF_32-NEXT: 0000: 10000000 FFFFFFFF 0300017C 080C0404
// ELF_32-NEXT: 0010: 88010000 0C000000 00000000 00000000
// ELF_32-NEXT: 0020: 01000000 0C000000 00000000 01000000
// ELF_32-NEXT: 0030: 01000000
diff --git a/test/MC/ELF/cfi-signal-frame.s b/test/MC/ELF/cfi-signal-frame.s
index 023311962189..98deb0a1de5c 100644
--- a/test/MC/ELF/cfi-signal-frame.s
+++ b/test/MC/ELF/cfi-signal-frame.s
@@ -23,10 +23,10 @@ g:
// CHECK-NEXT: AddressAlignment: 8
// CHECK-NEXT: EntrySize: 0
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5253 00017810
+// CHECK-NEXT: 0000: 14000000 00000000 037A5253 00017810
// CHECK-NEXT: 0010: 011B0C07 08900100 10000000 1C000000
// CHECK-NEXT: 0020: 00000000 00000000 00000000 14000000
-// CHECK-NEXT: 0030: 00000000 017A5200 01781001 1B0C0708
+// CHECK-NEXT: 0030: 00000000 037A5200 01781001 1B0C0708
// CHECK-NEXT: 0040: 90010000 10000000 1C000000 00000000
// CHECK-NEXT: 0050: 00000000 00000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-undefined.s b/test/MC/ELF/cfi-undefined.s
index 9773a36a3b03..568b3159cc44 100644
--- a/test/MC/ELF/cfi-undefined.s
+++ b/test/MC/ELF/cfi-undefined.s
@@ -24,7 +24,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 00410706 00000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-version.ll b/test/MC/ELF/cfi-version.ll
new file mode 100644
index 000000000000..10daa1dd408e
--- /dev/null
+++ b/test/MC/ELF/cfi-version.ll
@@ -0,0 +1,45 @@
+; RUN: %llc_dwarf %s -o - -dwarf-version 2 -filetype=obj | llvm-dwarfdump - | FileCheck %s --check-prefix=DWARF2
+; RUN: %llc_dwarf %s -o - -dwarf-version 3 -filetype=obj | llvm-dwarfdump - | FileCheck %s --check-prefix=DWARF34
+; RUN: %llc_dwarf %s -o - -dwarf-version 4 -filetype=obj | llvm-dwarfdump - | FileCheck %s --check-prefix=DWARF34
+
+; .debug_frame is not emitted for targeting Windows x64.
+; REQUIRES: debug_frame
+
+; Function Attrs: nounwind
+define i32 @foo() #0 {
+entry:
+ %call = call i32 bitcast (i32 (...)* @bar to i32 ()*)(), !dbg !12
+ %add = add nsw i32 %call, 1, !dbg !12
+ ret i32 %add, !dbg !12
+}
+
+declare i32 @bar(...) #1
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/test.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"test.c", metadata !"/tmp"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @foo, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/test.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5.0 "}
+!12 = metadata !{i32 2, i32 0, metadata !4, null}
+
+; DWARF2: .debug_frame contents:
+; DWARF2: Version: 1
+; DWARF2-NEXT: Augmentation:
+
+; DWARF34: .debug_frame contents:
+; DWARF34: Version: 3
+; DWARF34-NEXT: Augmentation:
diff --git a/test/MC/ELF/cfi-window-save.s b/test/MC/ELF/cfi-window-save.s
index c7d438a19260..b083901c137a 100644
--- a/test/MC/ELF/cfi-window-save.s
+++ b/test/MC/ELF/cfi-window-save.s
@@ -26,7 +26,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 02000000 00412D00 00000000
// CHECK-NEXT: )
diff --git a/test/MC/ELF/cfi-zero-addr-delta.s b/test/MC/ELF/cfi-zero-addr-delta.s
index 05cb0ae35bd2..8662839b5274 100644
--- a/test/MC/ELF/cfi-zero-addr-delta.s
+++ b/test/MC/ELF/cfi-zero-addr-delta.s
@@ -30,7 +30,7 @@ f:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001
+// CHECK-NEXT: 0000: 14000000 00000000 037A5200 01781001
// CHECK-NEXT: 0010: 1B0C0708 90010000 1C000000 1C000000
// CHECK-NEXT: 0020: 00000000 04000000 00410E10 410A0E08
// CHECK-NEXT: 0030: 410B0000 00000000
diff --git a/test/MC/ELF/cfi.s b/test/MC/ELF/cfi.s
index b8b6e6b52af1..21be615c5f39 100644
--- a/test/MC/ELF/cfi.s
+++ b/test/MC/ELF/cfi.s
@@ -212,6 +212,11 @@ f36:
nop
.cfi_endproc
+f37:
+ .cfi_startproc simple
+ nop
+ .cfi_endproc
+
// CHECK: Section {
// CHECK: Index: 4
// CHECK-NEXT: Name: .eh_frame
@@ -221,7 +226,7 @@ f36:
// CHECK-NEXT: ]
// CHECK-NEXT: Address: 0x0
// CHECK-NEXT: Offset: 0x68
-// CHECK-NEXT: Size: 1736
+// CHECK-NEXT: Size: 1776
// CHECK-NEXT: Link: 0
// CHECK-NEXT: Info: 0
// CHECK-NEXT: AddressAlignment: 8
@@ -229,115 +234,117 @@ f36:
// CHECK-NEXT: Relocations [
// CHECK-NEXT: ]
// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A4C52 00017810
+// CHECK-NEXT: 0000: 14000000 00000000 037A4C52 00017810
// CHECK-NEXT: 0010: 02031B0C 07089001 14000000 1C000000
// CHECK-NEXT: 0020: 00000000 01000000 04000000 00000000
-// CHECK-NEXT: 0030: 20000000 00000000 017A504C 52000178
+// CHECK-NEXT: 0030: 20000000 00000000 037A504C 52000178
// CHECK-NEXT: 0040: 100B0000 00000000 00000003 1B0C0708
// CHECK-NEXT: 0050: 90010000 14000000 28000000 00000000
// CHECK-NEXT: 0060: 01000000 04000000 00000000 14000000
// CHECK-NEXT: 0070: 70000000 00000000 01000000 04000000
-// CHECK-NEXT: 0080: 00000000 20000000 00000000 017A504C
+// CHECK-NEXT: 0080: 00000000 20000000 00000000 037A504C
// CHECK-NEXT: 0090: 52000178 100B0000 00000000 00000002
// CHECK-NEXT: 00A0: 1B0C0708 90010000 10000000 28000000
// CHECK-NEXT: 00B0: 00000000 01000000 02000000 18000000
-// CHECK-NEXT: 00C0: 00000000 017A5052 00017810 04020000
+// CHECK-NEXT: 00C0: 00000000 037A5052 00017810 04020000
// CHECK-NEXT: 00D0: 1B0C0708 90010000 10000000 20000000
// CHECK-NEXT: 00E0: 00000000 01000000 00000000 18000000
-// CHECK-NEXT: 00F0: 00000000 017A5052 00017810 06030000
+// CHECK-NEXT: 00F0: 00000000 037A5052 00017810 06030000
// CHECK-NEXT: 0100: 00001B0C 07089001 10000000 20000000
// CHECK-NEXT: 0110: 00000000 01000000 00000000 1C000000
-// CHECK-NEXT: 0120: 00000000 017A5052 00017810 0A040000
+// CHECK-NEXT: 0120: 00000000 037A5052 00017810 0A040000
// CHECK-NEXT: 0130: 00000000 00001B0C 07089001 10000000
// CHECK-NEXT: 0140: 24000000 00000000 01000000 00000000
-// CHECK-NEXT: 0150: 18000000 00000000 017A5052 00017810
+// CHECK-NEXT: 0150: 18000000 00000000 037A5052 00017810
// CHECK-NEXT: 0160: 040A0000 1B0C0708 90010000 10000000
// CHECK-NEXT: 0170: 20000000 00000000 01000000 00000000
-// CHECK-NEXT: 0180: 18000000 00000000 017A5052 00017810
+// CHECK-NEXT: 0180: 18000000 00000000 037A5052 00017810
// CHECK-NEXT: 0190: 060B0000 00001B0C 07089001 10000000
// CHECK-NEXT: 01A0: 20000000 00000000 01000000 00000000
-// CHECK-NEXT: 01B0: 1C000000 00000000 017A5052 00017810
+// CHECK-NEXT: 01B0: 1C000000 00000000 037A5052 00017810
// CHECK-NEXT: 01C0: 0A0C0000 00000000 00001B0C 07089001
// CHECK-NEXT: 01D0: 10000000 24000000 00000000 01000000
-// CHECK-NEXT: 01E0: 00000000 1C000000 00000000 017A5052
+// CHECK-NEXT: 01E0: 00000000 1C000000 00000000 037A5052
// CHECK-NEXT: 01F0: 00017810 0A080000 00000000 00001B0C
// CHECK-NEXT: 0200: 07089001 10000000 24000000 00000000
// CHECK-NEXT: 0210: 01000000 00000000 1C000000 00000000
-// CHECK-NEXT: 0220: 017A5052 00017810 0A100000 00000000
+// CHECK-NEXT: 0220: 037A5052 00017810 0A100000 00000000
// CHECK-NEXT: 0230: 00001B0C 07089001 10000000 24000000
// CHECK-NEXT: 0240: 00000000 01000000 00000000 18000000
-// CHECK-NEXT: 0250: 00000000 017A5052 00017810 04120000
+// CHECK-NEXT: 0250: 00000000 037A5052 00017810 04120000
// CHECK-NEXT: 0260: 1B0C0708 90010000 10000000 20000000
// CHECK-NEXT: 0270: 00000000 01000000 00000000 18000000
-// CHECK-NEXT: 0280: 00000000 017A5052 00017810 06130000
+// CHECK-NEXT: 0280: 00000000 037A5052 00017810 06130000
// CHECK-NEXT: 0290: 00001B0C 07089001 10000000 20000000
// CHECK-NEXT: 02A0: 00000000 01000000 00000000 1C000000
-// CHECK-NEXT: 02B0: 00000000 017A5052 00017810 0A140000
+// CHECK-NEXT: 02B0: 00000000 037A5052 00017810 0A140000
// CHECK-NEXT: 02C0: 00000000 00001B0C 07089001 10000000
// CHECK-NEXT: 02D0: 24000000 00000000 01000000 00000000
-// CHECK-NEXT: 02E0: 18000000 00000000 017A5052 00017810
+// CHECK-NEXT: 02E0: 18000000 00000000 037A5052 00017810
// CHECK-NEXT: 02F0: 041A0000 1B0C0708 90010000 10000000
// CHECK-NEXT: 0300: 20000000 00000000 01000000 00000000
-// CHECK-NEXT: 0310: 18000000 00000000 017A5052 00017810
+// CHECK-NEXT: 0310: 18000000 00000000 037A5052 00017810
// CHECK-NEXT: 0320: 061B0000 00001B0C 07089001 10000000
// CHECK-NEXT: 0330: 20000000 00000000 01000000 00000000
-// CHECK-NEXT: 0340: 1C000000 00000000 017A5052 00017810
+// CHECK-NEXT: 0340: 1C000000 00000000 037A5052 00017810
// CHECK-NEXT: 0350: 0A1C0000 00000000 00001B0C 07089001
// CHECK-NEXT: 0360: 10000000 24000000 00000000 01000000
-// CHECK-NEXT: 0370: 00000000 1C000000 00000000 017A5052
+// CHECK-NEXT: 0370: 00000000 1C000000 00000000 037A5052
// CHECK-NEXT: 0380: 00017810 0A180000 00000000 00001B0C
// CHECK-NEXT: 0390: 07089001 10000000 24000000 00000000
// CHECK-NEXT: 03A0: 01000000 00000000 1C000000 00000000
-// CHECK-NEXT: 03B0: 017A5052 00017810 0A800000 00000000
+// CHECK-NEXT: 03B0: 037A5052 00017810 0A800000 00000000
// CHECK-NEXT: 03C0: 00001B0C 07089001 10000000 24000000
// CHECK-NEXT: 03D0: 00000000 01000000 00000000 18000000
-// CHECK-NEXT: 03E0: 00000000 017A5052 00017810 04820000
+// CHECK-NEXT: 03E0: 00000000 037A5052 00017810 04820000
// CHECK-NEXT: 03F0: 1B0C0708 90010000 10000000 20000000
// CHECK-NEXT: 0400: 00000000 01000000 00000000 18000000
-// CHECK-NEXT: 0410: 00000000 017A5052 00017810 06830000
+// CHECK-NEXT: 0410: 00000000 037A5052 00017810 06830000
// CHECK-NEXT: 0420: 00001B0C 07089001 10000000 20000000
// CHECK-NEXT: 0430: 00000000 01000000 00000000 1C000000
-// CHECK-NEXT: 0440: 00000000 017A5052 00017810 0A840000
+// CHECK-NEXT: 0440: 00000000 037A5052 00017810 0A840000
// CHECK-NEXT: 0450: 00000000 00001B0C 07089001 10000000
// CHECK-NEXT: 0460: 24000000 00000000 01000000 00000000
-// CHECK-NEXT: 0470: 18000000 00000000 017A5052 00017810
+// CHECK-NEXT: 0470: 18000000 00000000 037A5052 00017810
// CHECK-NEXT: 0480: 048A0000 1B0C0708 90010000 10000000
// CHECK-NEXT: 0490: 20000000 00000000 01000000 00000000
-// CHECK-NEXT: 04A0: 18000000 00000000 017A5052 00017810
+// CHECK-NEXT: 04A0: 18000000 00000000 037A5052 00017810
// CHECK-NEXT: 04B0: 068B0000 00001B0C 07089001 10000000
// CHECK-NEXT: 04C0: 20000000 00000000 01000000 00000000
-// CHECK-NEXT: 04D0: 1C000000 00000000 017A5052 00017810
+// CHECK-NEXT: 04D0: 1C000000 00000000 037A5052 00017810
// CHECK-NEXT: 04E0: 0A8C0000 00000000 00001B0C 07089001
// CHECK-NEXT: 04F0: 10000000 24000000 00000000 01000000
-// CHECK-NEXT: 0500: 00000000 1C000000 00000000 017A5052
+// CHECK-NEXT: 0500: 00000000 1C000000 00000000 037A5052
// CHECK-NEXT: 0510: 00017810 0A880000 00000000 00001B0C
// CHECK-NEXT: 0520: 07089001 10000000 24000000 00000000
// CHECK-NEXT: 0530: 01000000 00000000 1C000000 00000000
-// CHECK-NEXT: 0540: 017A5052 00017810 0A900000 00000000
+// CHECK-NEXT: 0540: 037A5052 00017810 0A900000 00000000
// CHECK-NEXT: 0550: 00001B0C 07089001 10000000 24000000
// CHECK-NEXT: 0560: 00000000 01000000 00000000 18000000
-// CHECK-NEXT: 0570: 00000000 017A5052 00017810 04920000
+// CHECK-NEXT: 0570: 00000000 037A5052 00017810 04920000
// CHECK-NEXT: 0580: 1B0C0708 90010000 10000000 20000000
// CHECK-NEXT: 0590: 00000000 01000000 00000000 18000000
-// CHECK-NEXT: 05A0: 00000000 017A5052 00017810 06930000
+// CHECK-NEXT: 05A0: 00000000 037A5052 00017810 06930000
// CHECK-NEXT: 05B0: 00001B0C 07089001 10000000 20000000
// CHECK-NEXT: 05C0: 00000000 01000000 00000000 1C000000
-// CHECK-NEXT: 05D0: 00000000 017A5052 00017810 0A940000
+// CHECK-NEXT: 05D0: 00000000 037A5052 00017810 0A940000
// CHECK-NEXT: 05E0: 00000000 00001B0C 07089001 10000000
// CHECK-NEXT: 05F0: 24000000 00000000 01000000 00000000
-// CHECK-NEXT: 0600: 18000000 00000000 017A5052 00017810
+// CHECK-NEXT: 0600: 18000000 00000000 037A5052 00017810
// CHECK-NEXT: 0610: 049A0000 1B0C0708 90010000 10000000
// CHECK-NEXT: 0620: 20000000 00000000 01000000 00000000
-// CHECK-NEXT: 0630: 18000000 00000000 017A5052 00017810
+// CHECK-NEXT: 0630: 18000000 00000000 037A5052 00017810
// CHECK-NEXT: 0640: 069B0000 00001B0C 07089001 10000000
// CHECK-NEXT: 0650: 20000000 00000000 01000000 00000000
-// CHECK-NEXT: 0660: 1C000000 00000000 017A5052 00017810
+// CHECK-NEXT: 0660: 1C000000 00000000 037A5052 00017810
// CHECK-NEXT: 0670: 0A9C0000 00000000 00001B0C 07089001
// CHECK-NEXT: 0680: 10000000 24000000 00000000 01000000
-// CHECK-NEXT: 0690: 00000000 1C000000 00000000 017A5052
+// CHECK-NEXT: 0690: 00000000 1C000000 00000000 037A5052
// CHECK-NEXT: 06A0: 00017810 0A980000 00000000 00001B0C
// CHECK-NEXT: 06B0: 07089001 10000000 24000000 00000000
-// CHECK-NEXT: 06C0: 01000000 00000000
+// CHECK-NEXT: 06C0: 01000000 00000000 10000000 00000000
+// CHECK-NEXT: 06D0: 037A5200 01781001 1B000000 10000000
+// CHECK-NEXT: 06E0: 18000000 00000000 01000000 00000000
// CHECK-NEXT: )
// CHECK-NEXT: }
@@ -348,8 +355,8 @@ f36:
// CHECK-NEXT: Flags [
// CHECK-NEXT: ]
// CHECK-NEXT: Address: 0x0
-// CHECK-NEXT: Offset: 0xE30
-// CHECK-NEXT: Size: 1728
+// CHECK-NEXT: Offset: 0xE70
+// CHECK-NEXT: Size: 1752
// CHECK-NEXT: Link: 7
// CHECK-NEXT: Info: 4
// CHECK-NEXT: AddressAlignment: 8
@@ -427,5 +434,6 @@ f36:
// CHECK-NEXT: 0x688 R_X86_64_PC32 .text 0x21
// CHECK-NEXT: 0x6A6 R_X86_64_PC64 foo 0x0
// CHECK-NEXT: 0x6BC R_X86_64_PC32 .text 0x22
+// CHECK-NEXT: 0x6E4 R_X86_64_PC32 .text 0x23
// CHECK-NEXT: ]
// CHECK: }
diff --git a/test/MC/ELF/comdat.s b/test/MC/ELF/comdat.s
index 3e4a001f05ad..68b0f328f9bc 100644
--- a/test/MC/ELF/comdat.s
+++ b/test/MC/ELF/comdat.s
@@ -49,7 +49,7 @@
// Test that g1 and g2 are local, but g3 is an undefined global.
// CHECK: Symbol {
-// CHECK: Name: g1 (1)
+// CHECK: Name: g1
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -58,7 +58,7 @@
// CHECK-NEXT: Section: .foo (0x7)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: g2 (4)
+// CHECK-NEXT: Name: g2
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -68,13 +68,13 @@
// CHECK-NEXT: }
// CHECK: Symbol {
-// CHECK: Name: g3 (7)
+// CHECK: Name: g3
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
diff --git a/test/MC/ELF/common.s b/test/MC/ELF/common.s
index 4fc2154d850c..bd96564a5ca1 100644
--- a/test/MC/ELF/common.s
+++ b/test/MC/ELF/common.s
@@ -9,7 +9,7 @@
.comm common1,1,1
// CHECK: Symbol {
-// CHECK: Name: common1 (1)
+// CHECK: Name: common1
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 1
// CHECK-NEXT: Binding: Local
@@ -25,7 +25,7 @@
.comm common2,1,1
// CHECK: Symbol {
-// CHECK: Name: common2 (9)
+// CHECK: Name: common2
// CHECK-NEXT: Value: 0x1
// CHECK-NEXT: Size: 1
// CHECK-NEXT: Binding: Local
@@ -39,7 +39,7 @@
.comm common6,8,16
// CHECK: Symbol {
-// CHECK: Name: common6 (17)
+// CHECK: Name: common6
// CHECK-NEXT: Value: 0x10
// CHECK-NEXT: Size: 8
// CHECK-NEXT: Binding: Local
@@ -54,13 +54,13 @@
.comm common3,4,4
// CHECK: Symbol {
-// CHECK: Name: common3 (25)
+// CHECK: Name: common3
// CHECK-NEXT: Value: 0x4
// CHECK-NEXT: Size: 4
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: Object
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0xFFF2)
+// CHECK-NEXT: Section: Common (0xFFF2)
// CHECK-NEXT: }
@@ -76,24 +76,24 @@ foo:
.comm common4,40,16
// CHECK: Symbol {
-// CHECK: Name: common4 (37)
+// CHECK: Name: common4
// CHECK-NEXT: Value: 0x10
// CHECK-NEXT: Size: 40
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: Object
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0xFFF2)
+// CHECK-NEXT: Section: Common (0xFFF2)
// CHECK-NEXT: }
.comm common5,4,4
// CHECK: Symbol {
-// CHECK: Name: common5 (45)
+// CHECK: Name: common5
// CHECK-NEXT: Value: 0x4
// CHECK-NEXT: Size: 4
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: Object
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0xFFF2)
+// CHECK-NEXT: Section: Common (0xFFF2)
// CHECK-NEXT: }
diff --git a/test/MC/ELF/comp-dir.s b/test/MC/ELF/comp-dir.s
index 1b91f64a502c..c8d996faf326 100644
--- a/test/MC/ELF/comp-dir.s
+++ b/test/MC/ELF/comp-dir.s
@@ -1,5 +1,4 @@
// REQUIRES: shell
-// XFAIL: mingw
// RUN: llvm-mc -triple=x86_64-linux-unknown -g -fdebug-compilation-dir=/test/comp/dir %s -filetype=obj -o %t.o
// RUN: llvm-dwarfdump -debug-dump=info %t.o | FileCheck %s
diff --git a/test/MC/ELF/compression.s b/test/MC/ELF/compression.s
new file mode 100644
index 000000000000..07b689eef1f1
--- /dev/null
+++ b/test/MC/ELF/compression.s
@@ -0,0 +1,80 @@
+// RUN: llvm-mc -filetype=obj -compress-debug-sections -triple x86_64-pc-linux-gnu < %s -o %t
+// RUN: llvm-objdump -s %t | FileCheck %s
+// RUN: llvm-dwarfdump -debug-dump=info %t | FileCheck --check-prefix=INFO %s
+// RUN: llvm-mc -filetype=obj -compress-debug-sections -triple i386-pc-linux-gnu < %s \
+// RUN: | llvm-readobj -symbols - | FileCheck --check-prefix=386-SYMBOLS %s
+
+// REQUIRES: zlib
+
+// CHECK: Contents of section .zdebug_line:
+// Check for the 'ZLIB' file magic at the start of the section only
+// CHECK-NEXT: ZLIB
+// CHECK-NOT: ZLIB
+// CHECK: Contents of
+
+// Don't compress small sections, such as this simple debug_abbrev example
+// CHECK: Contents of section .debug_abbrev:
+// CHECK-NOT: ZLIB
+// CHECK-NOT: Contents of
+
+// CHECK: Contents of section .debug_info:
+
+// FIXME: Handle compressing alignment fragments to support compressing debug_frame
+// CHECK: Contents of section .debug_frame:
+// CHECK-NOT: ZLIB
+// CHECK: Contents of
+
+// Decompress one valid dwarf section just to check that this roundtrips
+// INFO: 0x00000000: Compile Unit: length = 0x0000000c version = 0x0004 abbr_offset = 0x0000 addr_size = 0x08 (next unit at 0x00000010)
+
+// In x86 32 bit named symbols are used for temporary symbols in merge
+// sections, so make sure we handle symbols inside compressed sections
+// 386-SYMBOLS: Name: .Linfo_string0
+// 386-SYMBOLS-NOT: }
+// 386-SYMBOLS: Section: .zdebug_str
+
+ .section .debug_line,"",@progbits
+
+ .section .debug_abbrev,"",@progbits
+.Lsection_abbrev:
+ .byte 1 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 0 # DW_CHILDREN_no
+ .byte 27 # DW_AT_comp_dir
+ .byte 14 # DW_FORM_strp
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+
+ .section .debug_info,"",@progbits
+ .long 12 # Length of Unit
+ .short 4 # DWARF version number
+ .long .Lsection_abbrev # Offset Into Abbrev. Section
+ .byte 8 # Address Size (in bytes)
+ .byte 1 # Abbrev [1] DW_TAG_compile_unit
+ .long .Linfo_string0 # DW_AT_comp_dir
+
+ .text
+foo:
+ .cfi_startproc
+ .file 1 "Driver.ii"
+# pad out the line table to make sure it's big enough to warrant compression
+ .loc 1 2 0
+ nop
+ .loc 1 3 0
+ nop
+ .loc 1 4 0
+ nop
+ .loc 1 5 0
+ nop
+ .loc 1 6 0
+ nop
+ .loc 1 7 0
+ nop
+ .loc 1 8 0
+ nop
+ .cfi_endproc
+ .cfi_sections .debug_frame
+
+ .section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "compress this "
diff --git a/test/MC/ELF/discriminator.s b/test/MC/ELF/discriminator.s
new file mode 100644
index 000000000000..8a695b96ede1
--- /dev/null
+++ b/test/MC/ELF/discriminator.s
@@ -0,0 +1,61 @@
+# RUN: llvm-mc -triple i386-unknown-unknown %s -filetype=obj -o %t.o
+# RUN: llvm-readobj -r %t.o | FileCheck %s
+# RUN: llvm-dwarfdump -debug-dump=line %t.o | FileCheck %s -check-prefix=DWARF-DUMP
+
+ .file 1 "foo.c"
+ .text
+ .globl foo
+ .type foo, @function
+ .align 4
+foo:
+ .loc 1 2 discriminator 1
+ ret
+ .size foo, .-foo
+
+ .section .debug_info,"",@progbits
+.L.debug_info_begin0:
+ .long 34 # Length of Unit
+ .short 4 # DWARF version number
+ .long .L.debug_abbrev_begin # Offset Into Abbrev. Section
+ .byte 8 # Address Size (in bytes)
+ .byte 1 # Abbrev [1] 0xb:0x1b DW_TAG_compile_unit
+ .long .Linfo_string0 # DW_AT_producer
+ .short 12 # DW_AT_language
+ .long .Linfo_string1 # DW_AT_name
+ .quad 0 # DW_AT_low_pc
+ .long 0 # DW_AT_stmt_list
+ .long .Linfo_string2 # DW_AT_comp_dir
+ # DW_AT_APPLE_optimized
+ .section .debug_abbrev,"",@progbits
+.L.debug_abbrev_begin:
+ .byte 1 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 0 # DW_CHILDREN_no
+ .byte 37 # DW_AT_producer
+ .byte 14 # DW_FORM_strp
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 3 # DW_AT_name
+ .byte 14 # DW_FORM_strp
+ .byte 17 # DW_AT_low_pc
+ .byte 1 # DW_FORM_addr
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 27 # DW_AT_comp_dir
+ .byte 14 # DW_FORM_strp
+ .ascii "\341\177" # DW_AT_APPLE_optimized
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+.L.debug_abbrev_end:
+
+
+# CHECK: Relocations [
+# CHECK: Section ({{[^ ]+}}) .rel.debug_line {
+# CHECK-NEXT: 0x2D R_386_32 .text 0x0
+# CHECK-NEXT: }
+
+# DWARF-DUMP: Address Line Column File ISA Discriminator Flags
+# DWARF-DUMP: ------------------ ------ ------ ------ --- ------------- -------------
+# DWARF-DUMP: 0x0001021300000000 1 0 1 0 1 is_stmt
diff --git a/test/MC/ELF/dot-symbol-assignment.s b/test/MC/ELF/dot-symbol-assignment.s
new file mode 100644
index 000000000000..00fe205082be
--- /dev/null
+++ b/test/MC/ELF/dot-symbol-assignment.s
@@ -0,0 +1,22 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -sections -section-data | FileCheck %s
+
+one:
+ .quad 0xffffffffffffffff
+
+. = . + 16
+two:
+ .quad 0xeeeeeeeeeeeeeeee
+
+. = 0x20
+three:
+ .quad 0xdddddddddddddddd
+
+// CHECK: Section {
+// CHECK: Name: .text
+// CHECK-NEXT: Type:
+// CHECK-NEXT: Flags [
+// CHECK: SectionData (
+// CHECK-NEXT: 0000: FFFFFFFF FFFFFFFF 00000000 00000000
+// CHECK-NEXT: 0010: 00000000 00000000 EEEEEEEE EEEEEEEE
+// CHECK-NEXT: 0020: DDDDDDDD DDDDDDDD
+// CHECK-NEXT: )
diff --git a/test/MC/ELF/file-double.s b/test/MC/ELF/file-double.s
index b0731e67ed0d..b5da8c5a8588 100644
--- a/test/MC/ELF/file-double.s
+++ b/test/MC/ELF/file-double.s
@@ -11,24 +11,24 @@ foo.c:
bar.c:
// CHECK: Symbol {
-// CHECK: Name: foo.c (1)
+// CHECK: Name: foo.c
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
// CHECK-NEXT: Type: File
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0xFFF1)
+// CHECK-NEXT: Section: Absolute (0xFFF1)
// CHECK-NEXT: }
-// CHECK: Name: bar.c (7)
+// CHECK: Name: bar.c
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
// CHECK-NEXT: Type: File
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0xFFF1)
+// CHECK-NEXT: Section: Absolute (0xFFF1)
// CHECK-NEXT: }
// CHECK: Symbol {
-// CHECK: Name: bar.c (7)
+// CHECK: Name: bar.c
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
@@ -37,7 +37,7 @@ bar.c:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK: Symbol {
-// CHECK: Name: foo.c (1)
+// CHECK: Name: foo.c
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
diff --git a/test/MC/ELF/file.s b/test/MC/ELF/file.s
index 7e287f7e3fa9..6b6cb44e2b28 100644
--- a/test/MC/ELF/file.s
+++ b/test/MC/ELF/file.s
@@ -12,7 +12,7 @@ foa:
// CHECK-NEXT: Binding: Local
// CHECK-NEXT: Type: File
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0xFFF1)
+// CHECK-NEXT: Section: Absolute (0xFFF1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: foa (5)
diff --git a/test/MC/ELF/gen-dwarf.s b/test/MC/ELF/gen-dwarf.s
index a702bc8610c8..7f0c0594ce95 100644
--- a/test/MC/ELF/gen-dwarf.s
+++ b/test/MC/ELF/gen-dwarf.s
@@ -1,4 +1,9 @@
-// RUN: llvm-mc -g -triple i686-pc-linux-gnu %s -filetype=obj -o - | llvm-readobj -r | FileCheck %s
+// RUN: llvm-mc -g -dwarf-version 2 -triple i686-pc-linux-gnu %s -filetype=obj -o - | llvm-readobj -r | FileCheck %s
+// RUN: not llvm-mc -g -dwarf-version 1 -triple i686-pc-linux-gnu %s -filetype=asm -o - 2>&1 | FileCheck --check-prefix=DWARF1 %s
+// RUN: llvm-mc -g -dwarf-version 2 -triple i686-pc-linux-gnu %s -filetype=asm -o - | FileCheck --check-prefix=ASM --check-prefix=DWARF2 %s
+// RUN: llvm-mc -g -dwarf-version 3 -triple i686-pc-linux-gnu %s -filetype=asm -o - | FileCheck --check-prefix=ASM --check-prefix=DWARF3 %s
+// RUN: llvm-mc -g -triple i686-pc-linux-gnu %s -filetype=asm -o - | FileCheck --check-prefix=ASM --check-prefix=DWARF4 %s
+// RUN: not llvm-mc -g -dwarf-version 5 -triple i686-pc-linux-gnu %s -filetype=asm -o - 2>&1 | FileCheck --check-prefix=DWARF5 %s
// Test that on ELF:
@@ -23,4 +28,27 @@ foo:
// CHECK-NEXT: 0x6 R_386_32 .debug_info 0x0
// CHECK-NEXT: 0x10 R_386_32 .text 0x0
// CHECK-NEXT: }
-// CHECK-NEXT: ]
+// CHECK: ]
+
+// First instance of the section is just to give it a label for debug_aranges to refer to
+// ASM: .section .debug_info
+
+// ASM: .section .debug_abbrev
+// ASM-NEXT: [[ABBREV_LABEL:.Ltmp[0-9]+]]
+
+// Second instance of the section has the CU
+// ASM: .section .debug_info
+// Dwarf version
+// DWARF2: .short 2
+// DWARF3: .short 3
+// DWARF4: .short 4
+// ASM-NEXT: .long [[ABBREV_LABEL]]
+// First .byte 1 is the abbreviation number for the compile_unit abbrev
+// ASM: .byte 1
+// ASM-NEXT: .long [[LINE_LABEL:.L[a-z0-9]+]]
+
+// ASM: .section .debug_line
+// ASM-NEXT: [[LINE_LABEL]]
+
+// DWARF1: Dwarf version 1 is not supported.
+// DWARF5: Dwarf version 5 is not supported.
diff --git a/test/MC/ELF/gnu-type-diagnostics.s b/test/MC/ELF/gnu-type-diagnostics.s
new file mode 100644
index 000000000000..df87d6df082e
--- /dev/null
+++ b/test/MC/ELF/gnu-type-diagnostics.s
@@ -0,0 +1,18 @@
+// RUN: not llvm-mc -triple i686-elf -filetype asm -o /dev/null %s 2>&1 | FileCheck %s
+
+ .type TYPE FUNC
+// CHECK: error: unsupported attribute in '.type' directive
+// CHECK: .type TYPE FUNC
+// CHECK: ^
+
+ .type type stt_func
+// CHECK: error: unsupported attribute in '.type' directive
+// CHECK: .type type stt_func
+// CHECK: ^
+
+ .type symbol 32
+// CHECK: error: expected STT_<TYPE_IN_UPPER_CASE>, '#<type>', '@<type>', '%<type>' or "<type>"
+// CHECK: .type symbol 32
+// CHECK: ^
+
+
diff --git a/test/MC/ELF/gnu-type.s b/test/MC/ELF/gnu-type.s
new file mode 100644
index 000000000000..19029e48ee96
--- /dev/null
+++ b/test/MC/ELF/gnu-type.s
@@ -0,0 +1,38 @@
+// RUN: llvm-mc -triple i686-elf -filetype asm -o - %s | FileCheck %s
+
+ .type TYPE STT_FUNC
+// CHECK: .type TYPE,@function
+
+ .type comma_TYPE, STT_FUNC
+// CHECK: .type comma_TYPE,@function
+
+ .type at_TYPE, @STT_FUNC
+// CHECK: .type at_TYPE,@function
+
+ .type percent_TYPE, %STT_FUNC
+// CHECK: .type percent_TYPE,@function
+
+ .type string_TYPE, "STT_FUNC"
+// CHECK: .type string_TYPE,@function
+
+ .type type function
+// CHECK: .type type,@function
+
+ .type comma_type, function
+// CHECK: .type comma_type,@function
+
+ .type at_type, @function
+// CHECK: .type at_type,@function
+
+ .type percent_type, %function
+// CHECK: .type percent_type,@function
+
+ .type string_type, "function"
+// CHECK: .type string_type,@function
+
+ .type special gnu_unique_object
+// CHECK: .type special,@gnu_unique_object
+
+ .type comma_special, gnu_unique_object
+// CHECK: .type comma_special,@gnu_unique_object
+
diff --git a/test/MC/ELF/ifunc-reloc.s b/test/MC/ELF/ifunc-reloc.s
new file mode 100644
index 000000000000..01954631cca1
--- /dev/null
+++ b/test/MC/ELF/ifunc-reloc.s
@@ -0,0 +1,16 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -r | FileCheck %s
+ .global sym
+ .type sym, @gnu_indirect_function
+alias:
+ .global alias
+ .type alias, @function
+ .set sym, alias
+
+
+ callq sym
+
+// CHECK: Relocations [
+// CHECK-NEXT: Section (2) .rela.text {
+// CHECK-NEXT: 0x1 R_X86_64_PC32 sym 0xFFFFFFFFFFFFFFFC
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
diff --git a/test/MC/ELF/lcomm.s b/test/MC/ELF/lcomm.s
index 430b79b54b0a..7d8ac3fcafa0 100644
--- a/test/MC/ELF/lcomm.s
+++ b/test/MC/ELF/lcomm.s
@@ -4,7 +4,7 @@
.lcomm B, 32 << 20
// CHECK: Symbol {
-// CHECK: Name: A (1)
+// CHECK: Name: A
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 5
// CHECK-NEXT: Binding: Local
@@ -13,7 +13,7 @@
// CHECK-NEXT: Section: .bss (0x3)
// CHECK-NEXT: }
// CHECK: Symbol {
-// CHECK: Name: B (3)
+// CHECK: Name: B
// CHECK-NEXT: Value: 0x5
// CHECK-NEXT: Size: 33554432
// CHECK-NEXT: Binding: Local
diff --git a/test/MC/ELF/lit.local.cfg b/test/MC/ELF/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/MC/ELF/lit.local.cfg
+++ b/test/MC/ELF/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/ELF/local-reloc.s b/test/MC/ELF/local-reloc.s
index 0c745197c814..19b950986680 100644
--- a/test/MC/ELF/local-reloc.s
+++ b/test/MC/ELF/local-reloc.s
@@ -6,7 +6,7 @@
movl foo, %r14d
foo:
-// CHECKT: Relocations [
+// CHECK: Relocations [
// CHECK: Section (2) .rela.text {
// CHECK-NEXT: 0x{{[^ ]+}} R_X86_64_32S .text 0x{{[^ ]+}}
// CHECK-NEXT: }
diff --git a/test/MC/ELF/many-section.s b/test/MC/ELF/many-section.s
deleted file mode 100644
index b729e668168e..000000000000
--- a/test/MC/ELF/many-section.s
+++ /dev/null
@@ -1,93319 +0,0 @@
-// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o %t
-// RUN: llvm-nm -a %t | FileCheck %s
-
-// CHECK: s000a
-// CHECK-NOT: U
-// CHECK: szzzb
-
-.section saaaa
-.section saaab
-.section saaba
-.section saabb
-.section saaca
-.section saacb
-.section saada
-.section saadb
-.section saaea
-.section saaeb
-.section saafa
-.section saafb
-.section saaga
-.section saagb
-.section saaha
-.section saahb
-.section saaia
-.section saaib
-.section saaja
-.section saajb
-.section saaka
-.section saakb
-.section saala
-.section saalb
-.section saama
-.section saamb
-.section saana
-.section saanb
-.section saaoa
-.section saaob
-.section saapa
-.section saapb
-.section saaqa
-.section saaqb
-.section saara
-.section saarb
-.section saasa
-.section saasb
-.section saata
-.section saatb
-.section saaua
-.section saaub
-.section saava
-.section saavb
-.section saawa
-.section saawb
-.section saaxa
-.section saaxb
-.section saaya
-.section saayb
-.section saaza
-.section saazb
-.section saa1a
-.section saa1b
-.section saa2a
-.section saa2b
-.section saa3a
-.section saa3b
-.section saa4a
-.section saa4b
-.section saa5a
-.section saa5b
-.section saa6a
-.section saa6b
-.section saa7a
-.section saa7b
-.section saa8a
-.section saa8b
-.section saa9a
-.section saa9b
-.section saa0a
-.section saa0b
-.section sabaa
-.section sabab
-.section sabba
-.section sabbb
-.section sabca
-.section sabcb
-.section sabda
-.section sabdb
-.section sabea
-.section sabeb
-.section sabfa
-.section sabfb
-.section sabga
-.section sabgb
-.section sabha
-.section sabhb
-.section sabia
-.section sabib
-.section sabja
-.section sabjb
-.section sabka
-.section sabkb
-.section sabla
-.section sablb
-.section sabma
-.section sabmb
-.section sabna
-.section sabnb
-.section saboa
-.section sabob
-.section sabpa
-.section sabpb
-.section sabqa
-.section sabqb
-.section sabra
-.section sabrb
-.section sabsa
-.section sabsb
-.section sabta
-.section sabtb
-.section sabua
-.section sabub
-.section sabva
-.section sabvb
-.section sabwa
-.section sabwb
-.section sabxa
-.section sabxb
-.section sabya
-.section sabyb
-.section sabza
-.section sabzb
-.section sab1a
-.section sab1b
-.section sab2a
-.section sab2b
-.section sab3a
-.section sab3b
-.section sab4a
-.section sab4b
-.section sab5a
-.section sab5b
-.section sab6a
-.section sab6b
-.section sab7a
-.section sab7b
-.section sab8a
-.section sab8b
-.section sab9a
-.section sab9b
-.section sab0a
-.section sab0b
-.section sacaa
-.section sacab
-.section sacba
-.section sacbb
-.section sacca
-.section saccb
-.section sacda
-.section sacdb
-.section sacea
-.section saceb
-.section sacfa
-.section sacfb
-.section sacga
-.section sacgb
-.section sacha
-.section sachb
-.section sacia
-.section sacib
-.section sacja
-.section sacjb
-.section sacka
-.section sackb
-.section sacla
-.section saclb
-.section sacma
-.section sacmb
-.section sacna
-.section sacnb
-.section sacoa
-.section sacob
-.section sacpa
-.section sacpb
-.section sacqa
-.section sacqb
-.section sacra
-.section sacrb
-.section sacsa
-.section sacsb
-.section sacta
-.section sactb
-.section sacua
-.section sacub
-.section sacva
-.section sacvb
-.section sacwa
-.section sacwb
-.section sacxa
-.section sacxb
-.section sacya
-.section sacyb
-.section sacza
-.section saczb
-.section sac1a
-.section sac1b
-.section sac2a
-.section sac2b
-.section sac3a
-.section sac3b
-.section sac4a
-.section sac4b
-.section sac5a
-.section sac5b
-.section sac6a
-.section sac6b
-.section sac7a
-.section sac7b
-.section sac8a
-.section sac8b
-.section sac9a
-.section sac9b
-.section sac0a
-.section sac0b
-.section sadaa
-.section sadab
-.section sadba
-.section sadbb
-.section sadca
-.section sadcb
-.section sadda
-.section saddb
-.section sadea
-.section sadeb
-.section sadfa
-.section sadfb
-.section sadga
-.section sadgb
-.section sadha
-.section sadhb
-.section sadia
-.section sadib
-.section sadja
-.section sadjb
-.section sadka
-.section sadkb
-.section sadla
-.section sadlb
-.section sadma
-.section sadmb
-.section sadna
-.section sadnb
-.section sadoa
-.section sadob
-.section sadpa
-.section sadpb
-.section sadqa
-.section sadqb
-.section sadra
-.section sadrb
-.section sadsa
-.section sadsb
-.section sadta
-.section sadtb
-.section sadua
-.section sadub
-.section sadva
-.section sadvb
-.section sadwa
-.section sadwb
-.section sadxa
-.section sadxb
-.section sadya
-.section sadyb
-.section sadza
-.section sadzb
-.section sad1a
-.section sad1b
-.section sad2a
-.section sad2b
-.section sad3a
-.section sad3b
-.section sad4a
-.section sad4b
-.section sad5a
-.section sad5b
-.section sad6a
-.section sad6b
-.section sad7a
-.section sad7b
-.section sad8a
-.section sad8b
-.section sad9a
-.section sad9b
-.section sad0a
-.section sad0b
-.section saeaa
-.section saeab
-.section saeba
-.section saebb
-.section saeca
-.section saecb
-.section saeda
-.section saedb
-.section saeea
-.section saeeb
-.section saefa
-.section saefb
-.section saega
-.section saegb
-.section saeha
-.section saehb
-.section saeia
-.section saeib
-.section saeja
-.section saejb
-.section saeka
-.section saekb
-.section saela
-.section saelb
-.section saema
-.section saemb
-.section saena
-.section saenb
-.section saeoa
-.section saeob
-.section saepa
-.section saepb
-.section saeqa
-.section saeqb
-.section saera
-.section saerb
-.section saesa
-.section saesb
-.section saeta
-.section saetb
-.section saeua
-.section saeub
-.section saeva
-.section saevb
-.section saewa
-.section saewb
-.section saexa
-.section saexb
-.section saeya
-.section saeyb
-.section saeza
-.section saezb
-.section sae1a
-.section sae1b
-.section sae2a
-.section sae2b
-.section sae3a
-.section sae3b
-.section sae4a
-.section sae4b
-.section sae5a
-.section sae5b
-.section sae6a
-.section sae6b
-.section sae7a
-.section sae7b
-.section sae8a
-.section sae8b
-.section sae9a
-.section sae9b
-.section sae0a
-.section sae0b
-.section safaa
-.section safab
-.section safba
-.section safbb
-.section safca
-.section safcb
-.section safda
-.section safdb
-.section safea
-.section safeb
-.section saffa
-.section saffb
-.section safga
-.section safgb
-.section safha
-.section safhb
-.section safia
-.section safib
-.section safja
-.section safjb
-.section safka
-.section safkb
-.section safla
-.section saflb
-.section safma
-.section safmb
-.section safna
-.section safnb
-.section safoa
-.section safob
-.section safpa
-.section safpb
-.section safqa
-.section safqb
-.section safra
-.section safrb
-.section safsa
-.section safsb
-.section safta
-.section saftb
-.section safua
-.section safub
-.section safva
-.section safvb
-.section safwa
-.section safwb
-.section safxa
-.section safxb
-.section safya
-.section safyb
-.section safza
-.section safzb
-.section saf1a
-.section saf1b
-.section saf2a
-.section saf2b
-.section saf3a
-.section saf3b
-.section saf4a
-.section saf4b
-.section saf5a
-.section saf5b
-.section saf6a
-.section saf6b
-.section saf7a
-.section saf7b
-.section saf8a
-.section saf8b
-.section saf9a
-.section saf9b
-.section saf0a
-.section saf0b
-.section sagaa
-.section sagab
-.section sagba
-.section sagbb
-.section sagca
-.section sagcb
-.section sagda
-.section sagdb
-.section sagea
-.section sageb
-.section sagfa
-.section sagfb
-.section sagga
-.section saggb
-.section sagha
-.section saghb
-.section sagia
-.section sagib
-.section sagja
-.section sagjb
-.section sagka
-.section sagkb
-.section sagla
-.section saglb
-.section sagma
-.section sagmb
-.section sagna
-.section sagnb
-.section sagoa
-.section sagob
-.section sagpa
-.section sagpb
-.section sagqa
-.section sagqb
-.section sagra
-.section sagrb
-.section sagsa
-.section sagsb
-.section sagta
-.section sagtb
-.section sagua
-.section sagub
-.section sagva
-.section sagvb
-.section sagwa
-.section sagwb
-.section sagxa
-.section sagxb
-.section sagya
-.section sagyb
-.section sagza
-.section sagzb
-.section sag1a
-.section sag1b
-.section sag2a
-.section sag2b
-.section sag3a
-.section sag3b
-.section sag4a
-.section sag4b
-.section sag5a
-.section sag5b
-.section sag6a
-.section sag6b
-.section sag7a
-.section sag7b
-.section sag8a
-.section sag8b
-.section sag9a
-.section sag9b
-.section sag0a
-.section sag0b
-.section sahaa
-.section sahab
-.section sahba
-.section sahbb
-.section sahca
-.section sahcb
-.section sahda
-.section sahdb
-.section sahea
-.section saheb
-.section sahfa
-.section sahfb
-.section sahga
-.section sahgb
-.section sahha
-.section sahhb
-.section sahia
-.section sahib
-.section sahja
-.section sahjb
-.section sahka
-.section sahkb
-.section sahla
-.section sahlb
-.section sahma
-.section sahmb
-.section sahna
-.section sahnb
-.section sahoa
-.section sahob
-.section sahpa
-.section sahpb
-.section sahqa
-.section sahqb
-.section sahra
-.section sahrb
-.section sahsa
-.section sahsb
-.section sahta
-.section sahtb
-.section sahua
-.section sahub
-.section sahva
-.section sahvb
-.section sahwa
-.section sahwb
-.section sahxa
-.section sahxb
-.section sahya
-.section sahyb
-.section sahza
-.section sahzb
-.section sah1a
-.section sah1b
-.section sah2a
-.section sah2b
-.section sah3a
-.section sah3b
-.section sah4a
-.section sah4b
-.section sah5a
-.section sah5b
-.section sah6a
-.section sah6b
-.section sah7a
-.section sah7b
-.section sah8a
-.section sah8b
-.section sah9a
-.section sah9b
-.section sah0a
-.section sah0b
-.section saiaa
-.section saiab
-.section saiba
-.section saibb
-.section saica
-.section saicb
-.section saida
-.section saidb
-.section saiea
-.section saieb
-.section saifa
-.section saifb
-.section saiga
-.section saigb
-.section saiha
-.section saihb
-.section saiia
-.section saiib
-.section saija
-.section saijb
-.section saika
-.section saikb
-.section saila
-.section sailb
-.section saima
-.section saimb
-.section saina
-.section sainb
-.section saioa
-.section saiob
-.section saipa
-.section saipb
-.section saiqa
-.section saiqb
-.section saira
-.section sairb
-.section saisa
-.section saisb
-.section saita
-.section saitb
-.section saiua
-.section saiub
-.section saiva
-.section saivb
-.section saiwa
-.section saiwb
-.section saixa
-.section saixb
-.section saiya
-.section saiyb
-.section saiza
-.section saizb
-.section sai1a
-.section sai1b
-.section sai2a
-.section sai2b
-.section sai3a
-.section sai3b
-.section sai4a
-.section sai4b
-.section sai5a
-.section sai5b
-.section sai6a
-.section sai6b
-.section sai7a
-.section sai7b
-.section sai8a
-.section sai8b
-.section sai9a
-.section sai9b
-.section sai0a
-.section sai0b
-.section sajaa
-.section sajab
-.section sajba
-.section sajbb
-.section sajca
-.section sajcb
-.section sajda
-.section sajdb
-.section sajea
-.section sajeb
-.section sajfa
-.section sajfb
-.section sajga
-.section sajgb
-.section sajha
-.section sajhb
-.section sajia
-.section sajib
-.section sajja
-.section sajjb
-.section sajka
-.section sajkb
-.section sajla
-.section sajlb
-.section sajma
-.section sajmb
-.section sajna
-.section sajnb
-.section sajoa
-.section sajob
-.section sajpa
-.section sajpb
-.section sajqa
-.section sajqb
-.section sajra
-.section sajrb
-.section sajsa
-.section sajsb
-.section sajta
-.section sajtb
-.section sajua
-.section sajub
-.section sajva
-.section sajvb
-.section sajwa
-.section sajwb
-.section sajxa
-.section sajxb
-.section sajya
-.section sajyb
-.section sajza
-.section sajzb
-.section saj1a
-.section saj1b
-.section saj2a
-.section saj2b
-.section saj3a
-.section saj3b
-.section saj4a
-.section saj4b
-.section saj5a
-.section saj5b
-.section saj6a
-.section saj6b
-.section saj7a
-.section saj7b
-.section saj8a
-.section saj8b
-.section saj9a
-.section saj9b
-.section saj0a
-.section saj0b
-.section sakaa
-.section sakab
-.section sakba
-.section sakbb
-.section sakca
-.section sakcb
-.section sakda
-.section sakdb
-.section sakea
-.section sakeb
-.section sakfa
-.section sakfb
-.section sakga
-.section sakgb
-.section sakha
-.section sakhb
-.section sakia
-.section sakib
-.section sakja
-.section sakjb
-.section sakka
-.section sakkb
-.section sakla
-.section saklb
-.section sakma
-.section sakmb
-.section sakna
-.section saknb
-.section sakoa
-.section sakob
-.section sakpa
-.section sakpb
-.section sakqa
-.section sakqb
-.section sakra
-.section sakrb
-.section saksa
-.section saksb
-.section sakta
-.section saktb
-.section sakua
-.section sakub
-.section sakva
-.section sakvb
-.section sakwa
-.section sakwb
-.section sakxa
-.section sakxb
-.section sakya
-.section sakyb
-.section sakza
-.section sakzb
-.section sak1a
-.section sak1b
-.section sak2a
-.section sak2b
-.section sak3a
-.section sak3b
-.section sak4a
-.section sak4b
-.section sak5a
-.section sak5b
-.section sak6a
-.section sak6b
-.section sak7a
-.section sak7b
-.section sak8a
-.section sak8b
-.section sak9a
-.section sak9b
-.section sak0a
-.section sak0b
-.section salaa
-.section salab
-.section salba
-.section salbb
-.section salca
-.section salcb
-.section salda
-.section saldb
-.section salea
-.section saleb
-.section salfa
-.section salfb
-.section salga
-.section salgb
-.section salha
-.section salhb
-.section salia
-.section salib
-.section salja
-.section saljb
-.section salka
-.section salkb
-.section salla
-.section sallb
-.section salma
-.section salmb
-.section salna
-.section salnb
-.section saloa
-.section salob
-.section salpa
-.section salpb
-.section salqa
-.section salqb
-.section salra
-.section salrb
-.section salsa
-.section salsb
-.section salta
-.section saltb
-.section salua
-.section salub
-.section salva
-.section salvb
-.section salwa
-.section salwb
-.section salxa
-.section salxb
-.section salya
-.section salyb
-.section salza
-.section salzb
-.section sal1a
-.section sal1b
-.section sal2a
-.section sal2b
-.section sal3a
-.section sal3b
-.section sal4a
-.section sal4b
-.section sal5a
-.section sal5b
-.section sal6a
-.section sal6b
-.section sal7a
-.section sal7b
-.section sal8a
-.section sal8b
-.section sal9a
-.section sal9b
-.section sal0a
-.section sal0b
-.section samaa
-.section samab
-.section samba
-.section sambb
-.section samca
-.section samcb
-.section samda
-.section samdb
-.section samea
-.section sameb
-.section samfa
-.section samfb
-.section samga
-.section samgb
-.section samha
-.section samhb
-.section samia
-.section samib
-.section samja
-.section samjb
-.section samka
-.section samkb
-.section samla
-.section samlb
-.section samma
-.section sammb
-.section samna
-.section samnb
-.section samoa
-.section samob
-.section sampa
-.section sampb
-.section samqa
-.section samqb
-.section samra
-.section samrb
-.section samsa
-.section samsb
-.section samta
-.section samtb
-.section samua
-.section samub
-.section samva
-.section samvb
-.section samwa
-.section samwb
-.section samxa
-.section samxb
-.section samya
-.section samyb
-.section samza
-.section samzb
-.section sam1a
-.section sam1b
-.section sam2a
-.section sam2b
-.section sam3a
-.section sam3b
-.section sam4a
-.section sam4b
-.section sam5a
-.section sam5b
-.section sam6a
-.section sam6b
-.section sam7a
-.section sam7b
-.section sam8a
-.section sam8b
-.section sam9a
-.section sam9b
-.section sam0a
-.section sam0b
-.section sanaa
-.section sanab
-.section sanba
-.section sanbb
-.section sanca
-.section sancb
-.section sanda
-.section sandb
-.section sanea
-.section saneb
-.section sanfa
-.section sanfb
-.section sanga
-.section sangb
-.section sanha
-.section sanhb
-.section sania
-.section sanib
-.section sanja
-.section sanjb
-.section sanka
-.section sankb
-.section sanla
-.section sanlb
-.section sanma
-.section sanmb
-.section sanna
-.section sannb
-.section sanoa
-.section sanob
-.section sanpa
-.section sanpb
-.section sanqa
-.section sanqb
-.section sanra
-.section sanrb
-.section sansa
-.section sansb
-.section santa
-.section santb
-.section sanua
-.section sanub
-.section sanva
-.section sanvb
-.section sanwa
-.section sanwb
-.section sanxa
-.section sanxb
-.section sanya
-.section sanyb
-.section sanza
-.section sanzb
-.section san1a
-.section san1b
-.section san2a
-.section san2b
-.section san3a
-.section san3b
-.section san4a
-.section san4b
-.section san5a
-.section san5b
-.section san6a
-.section san6b
-.section san7a
-.section san7b
-.section san8a
-.section san8b
-.section san9a
-.section san9b
-.section san0a
-.section san0b
-.section saoaa
-.section saoab
-.section saoba
-.section saobb
-.section saoca
-.section saocb
-.section saoda
-.section saodb
-.section saoea
-.section saoeb
-.section saofa
-.section saofb
-.section saoga
-.section saogb
-.section saoha
-.section saohb
-.section saoia
-.section saoib
-.section saoja
-.section saojb
-.section saoka
-.section saokb
-.section saola
-.section saolb
-.section saoma
-.section saomb
-.section saona
-.section saonb
-.section saooa
-.section saoob
-.section saopa
-.section saopb
-.section saoqa
-.section saoqb
-.section saora
-.section saorb
-.section saosa
-.section saosb
-.section saota
-.section saotb
-.section saoua
-.section saoub
-.section saova
-.section saovb
-.section saowa
-.section saowb
-.section saoxa
-.section saoxb
-.section saoya
-.section saoyb
-.section saoza
-.section saozb
-.section sao1a
-.section sao1b
-.section sao2a
-.section sao2b
-.section sao3a
-.section sao3b
-.section sao4a
-.section sao4b
-.section sao5a
-.section sao5b
-.section sao6a
-.section sao6b
-.section sao7a
-.section sao7b
-.section sao8a
-.section sao8b
-.section sao9a
-.section sao9b
-.section sao0a
-.section sao0b
-.section sapaa
-.section sapab
-.section sapba
-.section sapbb
-.section sapca
-.section sapcb
-.section sapda
-.section sapdb
-.section sapea
-.section sapeb
-.section sapfa
-.section sapfb
-.section sapga
-.section sapgb
-.section sapha
-.section saphb
-.section sapia
-.section sapib
-.section sapja
-.section sapjb
-.section sapka
-.section sapkb
-.section sapla
-.section saplb
-.section sapma
-.section sapmb
-.section sapna
-.section sapnb
-.section sapoa
-.section sapob
-.section sappa
-.section sappb
-.section sapqa
-.section sapqb
-.section sapra
-.section saprb
-.section sapsa
-.section sapsb
-.section sapta
-.section saptb
-.section sapua
-.section sapub
-.section sapva
-.section sapvb
-.section sapwa
-.section sapwb
-.section sapxa
-.section sapxb
-.section sapya
-.section sapyb
-.section sapza
-.section sapzb
-.section sap1a
-.section sap1b
-.section sap2a
-.section sap2b
-.section sap3a
-.section sap3b
-.section sap4a
-.section sap4b
-.section sap5a
-.section sap5b
-.section sap6a
-.section sap6b
-.section sap7a
-.section sap7b
-.section sap8a
-.section sap8b
-.section sap9a
-.section sap9b
-.section sap0a
-.section sap0b
-.section saqaa
-.section saqab
-.section saqba
-.section saqbb
-.section saqca
-.section saqcb
-.section saqda
-.section saqdb
-.section saqea
-.section saqeb
-.section saqfa
-.section saqfb
-.section saqga
-.section saqgb
-.section saqha
-.section saqhb
-.section saqia
-.section saqib
-.section saqja
-.section saqjb
-.section saqka
-.section saqkb
-.section saqla
-.section saqlb
-.section saqma
-.section saqmb
-.section saqna
-.section saqnb
-.section saqoa
-.section saqob
-.section saqpa
-.section saqpb
-.section saqqa
-.section saqqb
-.section saqra
-.section saqrb
-.section saqsa
-.section saqsb
-.section saqta
-.section saqtb
-.section saqua
-.section saqub
-.section saqva
-.section saqvb
-.section saqwa
-.section saqwb
-.section saqxa
-.section saqxb
-.section saqya
-.section saqyb
-.section saqza
-.section saqzb
-.section saq1a
-.section saq1b
-.section saq2a
-.section saq2b
-.section saq3a
-.section saq3b
-.section saq4a
-.section saq4b
-.section saq5a
-.section saq5b
-.section saq6a
-.section saq6b
-.section saq7a
-.section saq7b
-.section saq8a
-.section saq8b
-.section saq9a
-.section saq9b
-.section saq0a
-.section saq0b
-.section saraa
-.section sarab
-.section sarba
-.section sarbb
-.section sarca
-.section sarcb
-.section sarda
-.section sardb
-.section sarea
-.section sareb
-.section sarfa
-.section sarfb
-.section sarga
-.section sargb
-.section sarha
-.section sarhb
-.section saria
-.section sarib
-.section sarja
-.section sarjb
-.section sarka
-.section sarkb
-.section sarla
-.section sarlb
-.section sarma
-.section sarmb
-.section sarna
-.section sarnb
-.section saroa
-.section sarob
-.section sarpa
-.section sarpb
-.section sarqa
-.section sarqb
-.section sarra
-.section sarrb
-.section sarsa
-.section sarsb
-.section sarta
-.section sartb
-.section sarua
-.section sarub
-.section sarva
-.section sarvb
-.section sarwa
-.section sarwb
-.section sarxa
-.section sarxb
-.section sarya
-.section saryb
-.section sarza
-.section sarzb
-.section sar1a
-.section sar1b
-.section sar2a
-.section sar2b
-.section sar3a
-.section sar3b
-.section sar4a
-.section sar4b
-.section sar5a
-.section sar5b
-.section sar6a
-.section sar6b
-.section sar7a
-.section sar7b
-.section sar8a
-.section sar8b
-.section sar9a
-.section sar9b
-.section sar0a
-.section sar0b
-.section sasaa
-.section sasab
-.section sasba
-.section sasbb
-.section sasca
-.section sascb
-.section sasda
-.section sasdb
-.section sasea
-.section saseb
-.section sasfa
-.section sasfb
-.section sasga
-.section sasgb
-.section sasha
-.section sashb
-.section sasia
-.section sasib
-.section sasja
-.section sasjb
-.section saska
-.section saskb
-.section sasla
-.section saslb
-.section sasma
-.section sasmb
-.section sasna
-.section sasnb
-.section sasoa
-.section sasob
-.section saspa
-.section saspb
-.section sasqa
-.section sasqb
-.section sasra
-.section sasrb
-.section sassa
-.section sassb
-.section sasta
-.section sastb
-.section sasua
-.section sasub
-.section sasva
-.section sasvb
-.section saswa
-.section saswb
-.section sasxa
-.section sasxb
-.section sasya
-.section sasyb
-.section sasza
-.section saszb
-.section sas1a
-.section sas1b
-.section sas2a
-.section sas2b
-.section sas3a
-.section sas3b
-.section sas4a
-.section sas4b
-.section sas5a
-.section sas5b
-.section sas6a
-.section sas6b
-.section sas7a
-.section sas7b
-.section sas8a
-.section sas8b
-.section sas9a
-.section sas9b
-.section sas0a
-.section sas0b
-.section sataa
-.section satab
-.section satba
-.section satbb
-.section satca
-.section satcb
-.section satda
-.section satdb
-.section satea
-.section sateb
-.section satfa
-.section satfb
-.section satga
-.section satgb
-.section satha
-.section sathb
-.section satia
-.section satib
-.section satja
-.section satjb
-.section satka
-.section satkb
-.section satla
-.section satlb
-.section satma
-.section satmb
-.section satna
-.section satnb
-.section satoa
-.section satob
-.section satpa
-.section satpb
-.section satqa
-.section satqb
-.section satra
-.section satrb
-.section satsa
-.section satsb
-.section satta
-.section sattb
-.section satua
-.section satub
-.section satva
-.section satvb
-.section satwa
-.section satwb
-.section satxa
-.section satxb
-.section satya
-.section satyb
-.section satza
-.section satzb
-.section sat1a
-.section sat1b
-.section sat2a
-.section sat2b
-.section sat3a
-.section sat3b
-.section sat4a
-.section sat4b
-.section sat5a
-.section sat5b
-.section sat6a
-.section sat6b
-.section sat7a
-.section sat7b
-.section sat8a
-.section sat8b
-.section sat9a
-.section sat9b
-.section sat0a
-.section sat0b
-.section sauaa
-.section sauab
-.section sauba
-.section saubb
-.section sauca
-.section saucb
-.section sauda
-.section saudb
-.section sauea
-.section saueb
-.section saufa
-.section saufb
-.section sauga
-.section saugb
-.section sauha
-.section sauhb
-.section sauia
-.section sauib
-.section sauja
-.section saujb
-.section sauka
-.section saukb
-.section saula
-.section saulb
-.section sauma
-.section saumb
-.section sauna
-.section saunb
-.section sauoa
-.section sauob
-.section saupa
-.section saupb
-.section sauqa
-.section sauqb
-.section saura
-.section saurb
-.section sausa
-.section sausb
-.section sauta
-.section sautb
-.section sauua
-.section sauub
-.section sauva
-.section sauvb
-.section sauwa
-.section sauwb
-.section sauxa
-.section sauxb
-.section sauya
-.section sauyb
-.section sauza
-.section sauzb
-.section sau1a
-.section sau1b
-.section sau2a
-.section sau2b
-.section sau3a
-.section sau3b
-.section sau4a
-.section sau4b
-.section sau5a
-.section sau5b
-.section sau6a
-.section sau6b
-.section sau7a
-.section sau7b
-.section sau8a
-.section sau8b
-.section sau9a
-.section sau9b
-.section sau0a
-.section sau0b
-.section savaa
-.section savab
-.section savba
-.section savbb
-.section savca
-.section savcb
-.section savda
-.section savdb
-.section savea
-.section saveb
-.section savfa
-.section savfb
-.section savga
-.section savgb
-.section savha
-.section savhb
-.section savia
-.section savib
-.section savja
-.section savjb
-.section savka
-.section savkb
-.section savla
-.section savlb
-.section savma
-.section savmb
-.section savna
-.section savnb
-.section savoa
-.section savob
-.section savpa
-.section savpb
-.section savqa
-.section savqb
-.section savra
-.section savrb
-.section savsa
-.section savsb
-.section savta
-.section savtb
-.section savua
-.section savub
-.section savva
-.section savvb
-.section savwa
-.section savwb
-.section savxa
-.section savxb
-.section savya
-.section savyb
-.section savza
-.section savzb
-.section sav1a
-.section sav1b
-.section sav2a
-.section sav2b
-.section sav3a
-.section sav3b
-.section sav4a
-.section sav4b
-.section sav5a
-.section sav5b
-.section sav6a
-.section sav6b
-.section sav7a
-.section sav7b
-.section sav8a
-.section sav8b
-.section sav9a
-.section sav9b
-.section sav0a
-.section sav0b
-.section sawaa
-.section sawab
-.section sawba
-.section sawbb
-.section sawca
-.section sawcb
-.section sawda
-.section sawdb
-.section sawea
-.section saweb
-.section sawfa
-.section sawfb
-.section sawga
-.section sawgb
-.section sawha
-.section sawhb
-.section sawia
-.section sawib
-.section sawja
-.section sawjb
-.section sawka
-.section sawkb
-.section sawla
-.section sawlb
-.section sawma
-.section sawmb
-.section sawna
-.section sawnb
-.section sawoa
-.section sawob
-.section sawpa
-.section sawpb
-.section sawqa
-.section sawqb
-.section sawra
-.section sawrb
-.section sawsa
-.section sawsb
-.section sawta
-.section sawtb
-.section sawua
-.section sawub
-.section sawva
-.section sawvb
-.section sawwa
-.section sawwb
-.section sawxa
-.section sawxb
-.section sawya
-.section sawyb
-.section sawza
-.section sawzb
-.section saw1a
-.section saw1b
-.section saw2a
-.section saw2b
-.section saw3a
-.section saw3b
-.section saw4a
-.section saw4b
-.section saw5a
-.section saw5b
-.section saw6a
-.section saw6b
-.section saw7a
-.section saw7b
-.section saw8a
-.section saw8b
-.section saw9a
-.section saw9b
-.section saw0a
-.section saw0b
-.section saxaa
-.section saxab
-.section saxba
-.section saxbb
-.section saxca
-.section saxcb
-.section saxda
-.section saxdb
-.section saxea
-.section saxeb
-.section saxfa
-.section saxfb
-.section saxga
-.section saxgb
-.section saxha
-.section saxhb
-.section saxia
-.section saxib
-.section saxja
-.section saxjb
-.section saxka
-.section saxkb
-.section saxla
-.section saxlb
-.section saxma
-.section saxmb
-.section saxna
-.section saxnb
-.section saxoa
-.section saxob
-.section saxpa
-.section saxpb
-.section saxqa
-.section saxqb
-.section saxra
-.section saxrb
-.section saxsa
-.section saxsb
-.section saxta
-.section saxtb
-.section saxua
-.section saxub
-.section saxva
-.section saxvb
-.section saxwa
-.section saxwb
-.section saxxa
-.section saxxb
-.section saxya
-.section saxyb
-.section saxza
-.section saxzb
-.section sax1a
-.section sax1b
-.section sax2a
-.section sax2b
-.section sax3a
-.section sax3b
-.section sax4a
-.section sax4b
-.section sax5a
-.section sax5b
-.section sax6a
-.section sax6b
-.section sax7a
-.section sax7b
-.section sax8a
-.section sax8b
-.section sax9a
-.section sax9b
-.section sax0a
-.section sax0b
-.section sayaa
-.section sayab
-.section sayba
-.section saybb
-.section sayca
-.section saycb
-.section sayda
-.section saydb
-.section sayea
-.section sayeb
-.section sayfa
-.section sayfb
-.section sayga
-.section saygb
-.section sayha
-.section sayhb
-.section sayia
-.section sayib
-.section sayja
-.section sayjb
-.section sayka
-.section saykb
-.section sayla
-.section saylb
-.section sayma
-.section saymb
-.section sayna
-.section saynb
-.section sayoa
-.section sayob
-.section saypa
-.section saypb
-.section sayqa
-.section sayqb
-.section sayra
-.section sayrb
-.section saysa
-.section saysb
-.section sayta
-.section saytb
-.section sayua
-.section sayub
-.section sayva
-.section sayvb
-.section saywa
-.section saywb
-.section sayxa
-.section sayxb
-.section sayya
-.section sayyb
-.section sayza
-.section sayzb
-.section say1a
-.section say1b
-.section say2a
-.section say2b
-.section say3a
-.section say3b
-.section say4a
-.section say4b
-.section say5a
-.section say5b
-.section say6a
-.section say6b
-.section say7a
-.section say7b
-.section say8a
-.section say8b
-.section say9a
-.section say9b
-.section say0a
-.section say0b
-.section sazaa
-.section sazab
-.section sazba
-.section sazbb
-.section sazca
-.section sazcb
-.section sazda
-.section sazdb
-.section sazea
-.section sazeb
-.section sazfa
-.section sazfb
-.section sazga
-.section sazgb
-.section sazha
-.section sazhb
-.section sazia
-.section sazib
-.section sazja
-.section sazjb
-.section sazka
-.section sazkb
-.section sazla
-.section sazlb
-.section sazma
-.section sazmb
-.section sazna
-.section saznb
-.section sazoa
-.section sazob
-.section sazpa
-.section sazpb
-.section sazqa
-.section sazqb
-.section sazra
-.section sazrb
-.section sazsa
-.section sazsb
-.section sazta
-.section saztb
-.section sazua
-.section sazub
-.section sazva
-.section sazvb
-.section sazwa
-.section sazwb
-.section sazxa
-.section sazxb
-.section sazya
-.section sazyb
-.section sazza
-.section sazzb
-.section saz1a
-.section saz1b
-.section saz2a
-.section saz2b
-.section saz3a
-.section saz3b
-.section saz4a
-.section saz4b
-.section saz5a
-.section saz5b
-.section saz6a
-.section saz6b
-.section saz7a
-.section saz7b
-.section saz8a
-.section saz8b
-.section saz9a
-.section saz9b
-.section saz0a
-.section saz0b
-.section sa1aa
-.section sa1ab
-.section sa1ba
-.section sa1bb
-.section sa1ca
-.section sa1cb
-.section sa1da
-.section sa1db
-.section sa1ea
-.section sa1eb
-.section sa1fa
-.section sa1fb
-.section sa1ga
-.section sa1gb
-.section sa1ha
-.section sa1hb
-.section sa1ia
-.section sa1ib
-.section sa1ja
-.section sa1jb
-.section sa1ka
-.section sa1kb
-.section sa1la
-.section sa1lb
-.section sa1ma
-.section sa1mb
-.section sa1na
-.section sa1nb
-.section sa1oa
-.section sa1ob
-.section sa1pa
-.section sa1pb
-.section sa1qa
-.section sa1qb
-.section sa1ra
-.section sa1rb
-.section sa1sa
-.section sa1sb
-.section sa1ta
-.section sa1tb
-.section sa1ua
-.section sa1ub
-.section sa1va
-.section sa1vb
-.section sa1wa
-.section sa1wb
-.section sa1xa
-.section sa1xb
-.section sa1ya
-.section sa1yb
-.section sa1za
-.section sa1zb
-.section sa11a
-.section sa11b
-.section sa12a
-.section sa12b
-.section sa13a
-.section sa13b
-.section sa14a
-.section sa14b
-.section sa15a
-.section sa15b
-.section sa16a
-.section sa16b
-.section sa17a
-.section sa17b
-.section sa18a
-.section sa18b
-.section sa19a
-.section sa19b
-.section sa10a
-.section sa10b
-.section sa2aa
-.section sa2ab
-.section sa2ba
-.section sa2bb
-.section sa2ca
-.section sa2cb
-.section sa2da
-.section sa2db
-.section sa2ea
-.section sa2eb
-.section sa2fa
-.section sa2fb
-.section sa2ga
-.section sa2gb
-.section sa2ha
-.section sa2hb
-.section sa2ia
-.section sa2ib
-.section sa2ja
-.section sa2jb
-.section sa2ka
-.section sa2kb
-.section sa2la
-.section sa2lb
-.section sa2ma
-.section sa2mb
-.section sa2na
-.section sa2nb
-.section sa2oa
-.section sa2ob
-.section sa2pa
-.section sa2pb
-.section sa2qa
-.section sa2qb
-.section sa2ra
-.section sa2rb
-.section sa2sa
-.section sa2sb
-.section sa2ta
-.section sa2tb
-.section sa2ua
-.section sa2ub
-.section sa2va
-.section sa2vb
-.section sa2wa
-.section sa2wb
-.section sa2xa
-.section sa2xb
-.section sa2ya
-.section sa2yb
-.section sa2za
-.section sa2zb
-.section sa21a
-.section sa21b
-.section sa22a
-.section sa22b
-.section sa23a
-.section sa23b
-.section sa24a
-.section sa24b
-.section sa25a
-.section sa25b
-.section sa26a
-.section sa26b
-.section sa27a
-.section sa27b
-.section sa28a
-.section sa28b
-.section sa29a
-.section sa29b
-.section sa20a
-.section sa20b
-.section sa3aa
-.section sa3ab
-.section sa3ba
-.section sa3bb
-.section sa3ca
-.section sa3cb
-.section sa3da
-.section sa3db
-.section sa3ea
-.section sa3eb
-.section sa3fa
-.section sa3fb
-.section sa3ga
-.section sa3gb
-.section sa3ha
-.section sa3hb
-.section sa3ia
-.section sa3ib
-.section sa3ja
-.section sa3jb
-.section sa3ka
-.section sa3kb
-.section sa3la
-.section sa3lb
-.section sa3ma
-.section sa3mb
-.section sa3na
-.section sa3nb
-.section sa3oa
-.section sa3ob
-.section sa3pa
-.section sa3pb
-.section sa3qa
-.section sa3qb
-.section sa3ra
-.section sa3rb
-.section sa3sa
-.section sa3sb
-.section sa3ta
-.section sa3tb
-.section sa3ua
-.section sa3ub
-.section sa3va
-.section sa3vb
-.section sa3wa
-.section sa3wb
-.section sa3xa
-.section sa3xb
-.section sa3ya
-.section sa3yb
-.section sa3za
-.section sa3zb
-.section sa31a
-.section sa31b
-.section sa32a
-.section sa32b
-.section sa33a
-.section sa33b
-.section sa34a
-.section sa34b
-.section sa35a
-.section sa35b
-.section sa36a
-.section sa36b
-.section sa37a
-.section sa37b
-.section sa38a
-.section sa38b
-.section sa39a
-.section sa39b
-.section sa30a
-.section sa30b
-.section sa4aa
-.section sa4ab
-.section sa4ba
-.section sa4bb
-.section sa4ca
-.section sa4cb
-.section sa4da
-.section sa4db
-.section sa4ea
-.section sa4eb
-.section sa4fa
-.section sa4fb
-.section sa4ga
-.section sa4gb
-.section sa4ha
-.section sa4hb
-.section sa4ia
-.section sa4ib
-.section sa4ja
-.section sa4jb
-.section sa4ka
-.section sa4kb
-.section sa4la
-.section sa4lb
-.section sa4ma
-.section sa4mb
-.section sa4na
-.section sa4nb
-.section sa4oa
-.section sa4ob
-.section sa4pa
-.section sa4pb
-.section sa4qa
-.section sa4qb
-.section sa4ra
-.section sa4rb
-.section sa4sa
-.section sa4sb
-.section sa4ta
-.section sa4tb
-.section sa4ua
-.section sa4ub
-.section sa4va
-.section sa4vb
-.section sa4wa
-.section sa4wb
-.section sa4xa
-.section sa4xb
-.section sa4ya
-.section sa4yb
-.section sa4za
-.section sa4zb
-.section sa41a
-.section sa41b
-.section sa42a
-.section sa42b
-.section sa43a
-.section sa43b
-.section sa44a
-.section sa44b
-.section sa45a
-.section sa45b
-.section sa46a
-.section sa46b
-.section sa47a
-.section sa47b
-.section sa48a
-.section sa48b
-.section sa49a
-.section sa49b
-.section sa40a
-.section sa40b
-.section sa5aa
-.section sa5ab
-.section sa5ba
-.section sa5bb
-.section sa5ca
-.section sa5cb
-.section sa5da
-.section sa5db
-.section sa5ea
-.section sa5eb
-.section sa5fa
-.section sa5fb
-.section sa5ga
-.section sa5gb
-.section sa5ha
-.section sa5hb
-.section sa5ia
-.section sa5ib
-.section sa5ja
-.section sa5jb
-.section sa5ka
-.section sa5kb
-.section sa5la
-.section sa5lb
-.section sa5ma
-.section sa5mb
-.section sa5na
-.section sa5nb
-.section sa5oa
-.section sa5ob
-.section sa5pa
-.section sa5pb
-.section sa5qa
-.section sa5qb
-.section sa5ra
-.section sa5rb
-.section sa5sa
-.section sa5sb
-.section sa5ta
-.section sa5tb
-.section sa5ua
-.section sa5ub
-.section sa5va
-.section sa5vb
-.section sa5wa
-.section sa5wb
-.section sa5xa
-.section sa5xb
-.section sa5ya
-.section sa5yb
-.section sa5za
-.section sa5zb
-.section sa51a
-.section sa51b
-.section sa52a
-.section sa52b
-.section sa53a
-.section sa53b
-.section sa54a
-.section sa54b
-.section sa55a
-.section sa55b
-.section sa56a
-.section sa56b
-.section sa57a
-.section sa57b
-.section sa58a
-.section sa58b
-.section sa59a
-.section sa59b
-.section sa50a
-.section sa50b
-.section sa6aa
-.section sa6ab
-.section sa6ba
-.section sa6bb
-.section sa6ca
-.section sa6cb
-.section sa6da
-.section sa6db
-.section sa6ea
-.section sa6eb
-.section sa6fa
-.section sa6fb
-.section sa6ga
-.section sa6gb
-.section sa6ha
-.section sa6hb
-.section sa6ia
-.section sa6ib
-.section sa6ja
-.section sa6jb
-.section sa6ka
-.section sa6kb
-.section sa6la
-.section sa6lb
-.section sa6ma
-.section sa6mb
-.section sa6na
-.section sa6nb
-.section sa6oa
-.section sa6ob
-.section sa6pa
-.section sa6pb
-.section sa6qa
-.section sa6qb
-.section sa6ra
-.section sa6rb
-.section sa6sa
-.section sa6sb
-.section sa6ta
-.section sa6tb
-.section sa6ua
-.section sa6ub
-.section sa6va
-.section sa6vb
-.section sa6wa
-.section sa6wb
-.section sa6xa
-.section sa6xb
-.section sa6ya
-.section sa6yb
-.section sa6za
-.section sa6zb
-.section sa61a
-.section sa61b
-.section sa62a
-.section sa62b
-.section sa63a
-.section sa63b
-.section sa64a
-.section sa64b
-.section sa65a
-.section sa65b
-.section sa66a
-.section sa66b
-.section sa67a
-.section sa67b
-.section sa68a
-.section sa68b
-.section sa69a
-.section sa69b
-.section sa60a
-.section sa60b
-.section sa7aa
-.section sa7ab
-.section sa7ba
-.section sa7bb
-.section sa7ca
-.section sa7cb
-.section sa7da
-.section sa7db
-.section sa7ea
-.section sa7eb
-.section sa7fa
-.section sa7fb
-.section sa7ga
-.section sa7gb
-.section sa7ha
-.section sa7hb
-.section sa7ia
-.section sa7ib
-.section sa7ja
-.section sa7jb
-.section sa7ka
-.section sa7kb
-.section sa7la
-.section sa7lb
-.section sa7ma
-.section sa7mb
-.section sa7na
-.section sa7nb
-.section sa7oa
-.section sa7ob
-.section sa7pa
-.section sa7pb
-.section sa7qa
-.section sa7qb
-.section sa7ra
-.section sa7rb
-.section sa7sa
-.section sa7sb
-.section sa7ta
-.section sa7tb
-.section sa7ua
-.section sa7ub
-.section sa7va
-.section sa7vb
-.section sa7wa
-.section sa7wb
-.section sa7xa
-.section sa7xb
-.section sa7ya
-.section sa7yb
-.section sa7za
-.section sa7zb
-.section sa71a
-.section sa71b
-.section sa72a
-.section sa72b
-.section sa73a
-.section sa73b
-.section sa74a
-.section sa74b
-.section sa75a
-.section sa75b
-.section sa76a
-.section sa76b
-.section sa77a
-.section sa77b
-.section sa78a
-.section sa78b
-.section sa79a
-.section sa79b
-.section sa70a
-.section sa70b
-.section sa8aa
-.section sa8ab
-.section sa8ba
-.section sa8bb
-.section sa8ca
-.section sa8cb
-.section sa8da
-.section sa8db
-.section sa8ea
-.section sa8eb
-.section sa8fa
-.section sa8fb
-.section sa8ga
-.section sa8gb
-.section sa8ha
-.section sa8hb
-.section sa8ia
-.section sa8ib
-.section sa8ja
-.section sa8jb
-.section sa8ka
-.section sa8kb
-.section sa8la
-.section sa8lb
-.section sa8ma
-.section sa8mb
-.section sa8na
-.section sa8nb
-.section sa8oa
-.section sa8ob
-.section sa8pa
-.section sa8pb
-.section sa8qa
-.section sa8qb
-.section sa8ra
-.section sa8rb
-.section sa8sa
-.section sa8sb
-.section sa8ta
-.section sa8tb
-.section sa8ua
-.section sa8ub
-.section sa8va
-.section sa8vb
-.section sa8wa
-.section sa8wb
-.section sa8xa
-.section sa8xb
-.section sa8ya
-.section sa8yb
-.section sa8za
-.section sa8zb
-.section sa81a
-.section sa81b
-.section sa82a
-.section sa82b
-.section sa83a
-.section sa83b
-.section sa84a
-.section sa84b
-.section sa85a
-.section sa85b
-.section sa86a
-.section sa86b
-.section sa87a
-.section sa87b
-.section sa88a
-.section sa88b
-.section sa89a
-.section sa89b
-.section sa80a
-.section sa80b
-.section sa9aa
-.section sa9ab
-.section sa9ba
-.section sa9bb
-.section sa9ca
-.section sa9cb
-.section sa9da
-.section sa9db
-.section sa9ea
-.section sa9eb
-.section sa9fa
-.section sa9fb
-.section sa9ga
-.section sa9gb
-.section sa9ha
-.section sa9hb
-.section sa9ia
-.section sa9ib
-.section sa9ja
-.section sa9jb
-.section sa9ka
-.section sa9kb
-.section sa9la
-.section sa9lb
-.section sa9ma
-.section sa9mb
-.section sa9na
-.section sa9nb
-.section sa9oa
-.section sa9ob
-.section sa9pa
-.section sa9pb
-.section sa9qa
-.section sa9qb
-.section sa9ra
-.section sa9rb
-.section sa9sa
-.section sa9sb
-.section sa9ta
-.section sa9tb
-.section sa9ua
-.section sa9ub
-.section sa9va
-.section sa9vb
-.section sa9wa
-.section sa9wb
-.section sa9xa
-.section sa9xb
-.section sa9ya
-.section sa9yb
-.section sa9za
-.section sa9zb
-.section sa91a
-.section sa91b
-.section sa92a
-.section sa92b
-.section sa93a
-.section sa93b
-.section sa94a
-.section sa94b
-.section sa95a
-.section sa95b
-.section sa96a
-.section sa96b
-.section sa97a
-.section sa97b
-.section sa98a
-.section sa98b
-.section sa99a
-.section sa99b
-.section sa90a
-.section sa90b
-.section sa0aa
-.section sa0ab
-.section sa0ba
-.section sa0bb
-.section sa0ca
-.section sa0cb
-.section sa0da
-.section sa0db
-.section sa0ea
-.section sa0eb
-.section sa0fa
-.section sa0fb
-.section sa0ga
-.section sa0gb
-.section sa0ha
-.section sa0hb
-.section sa0ia
-.section sa0ib
-.section sa0ja
-.section sa0jb
-.section sa0ka
-.section sa0kb
-.section sa0la
-.section sa0lb
-.section sa0ma
-.section sa0mb
-.section sa0na
-.section sa0nb
-.section sa0oa
-.section sa0ob
-.section sa0pa
-.section sa0pb
-.section sa0qa
-.section sa0qb
-.section sa0ra
-.section sa0rb
-.section sa0sa
-.section sa0sb
-.section sa0ta
-.section sa0tb
-.section sa0ua
-.section sa0ub
-.section sa0va
-.section sa0vb
-.section sa0wa
-.section sa0wb
-.section sa0xa
-.section sa0xb
-.section sa0ya
-.section sa0yb
-.section sa0za
-.section sa0zb
-.section sa01a
-.section sa01b
-.section sa02a
-.section sa02b
-.section sa03a
-.section sa03b
-.section sa04a
-.section sa04b
-.section sa05a
-.section sa05b
-.section sa06a
-.section sa06b
-.section sa07a
-.section sa07b
-.section sa08a
-.section sa08b
-.section sa09a
-.section sa09b
-.section sa00a
-.section sa00b
-.section sbaaa
-.section sbaab
-.section sbaba
-.section sbabb
-.section sbaca
-.section sbacb
-.section sbada
-.section sbadb
-.section sbaea
-.section sbaeb
-.section sbafa
-.section sbafb
-.section sbaga
-.section sbagb
-.section sbaha
-.section sbahb
-.section sbaia
-.section sbaib
-.section sbaja
-.section sbajb
-.section sbaka
-.section sbakb
-.section sbala
-.section sbalb
-.section sbama
-.section sbamb
-.section sbana
-.section sbanb
-.section sbaoa
-.section sbaob
-.section sbapa
-.section sbapb
-.section sbaqa
-.section sbaqb
-.section sbara
-.section sbarb
-.section sbasa
-.section sbasb
-.section sbata
-.section sbatb
-.section sbaua
-.section sbaub
-.section sbava
-.section sbavb
-.section sbawa
-.section sbawb
-.section sbaxa
-.section sbaxb
-.section sbaya
-.section sbayb
-.section sbaza
-.section sbazb
-.section sba1a
-.section sba1b
-.section sba2a
-.section sba2b
-.section sba3a
-.section sba3b
-.section sba4a
-.section sba4b
-.section sba5a
-.section sba5b
-.section sba6a
-.section sba6b
-.section sba7a
-.section sba7b
-.section sba8a
-.section sba8b
-.section sba9a
-.section sba9b
-.section sba0a
-.section sba0b
-.section sbbaa
-.section sbbab
-.section sbbba
-.section sbbbb
-.section sbbca
-.section sbbcb
-.section sbbda
-.section sbbdb
-.section sbbea
-.section sbbeb
-.section sbbfa
-.section sbbfb
-.section sbbga
-.section sbbgb
-.section sbbha
-.section sbbhb
-.section sbbia
-.section sbbib
-.section sbbja
-.section sbbjb
-.section sbbka
-.section sbbkb
-.section sbbla
-.section sbblb
-.section sbbma
-.section sbbmb
-.section sbbna
-.section sbbnb
-.section sbboa
-.section sbbob
-.section sbbpa
-.section sbbpb
-.section sbbqa
-.section sbbqb
-.section sbbra
-.section sbbrb
-.section sbbsa
-.section sbbsb
-.section sbbta
-.section sbbtb
-.section sbbua
-.section sbbub
-.section sbbva
-.section sbbvb
-.section sbbwa
-.section sbbwb
-.section sbbxa
-.section sbbxb
-.section sbbya
-.section sbbyb
-.section sbbza
-.section sbbzb
-.section sbb1a
-.section sbb1b
-.section sbb2a
-.section sbb2b
-.section sbb3a
-.section sbb3b
-.section sbb4a
-.section sbb4b
-.section sbb5a
-.section sbb5b
-.section sbb6a
-.section sbb6b
-.section sbb7a
-.section sbb7b
-.section sbb8a
-.section sbb8b
-.section sbb9a
-.section sbb9b
-.section sbb0a
-.section sbb0b
-.section sbcaa
-.section sbcab
-.section sbcba
-.section sbcbb
-.section sbcca
-.section sbccb
-.section sbcda
-.section sbcdb
-.section sbcea
-.section sbceb
-.section sbcfa
-.section sbcfb
-.section sbcga
-.section sbcgb
-.section sbcha
-.section sbchb
-.section sbcia
-.section sbcib
-.section sbcja
-.section sbcjb
-.section sbcka
-.section sbckb
-.section sbcla
-.section sbclb
-.section sbcma
-.section sbcmb
-.section sbcna
-.section sbcnb
-.section sbcoa
-.section sbcob
-.section sbcpa
-.section sbcpb
-.section sbcqa
-.section sbcqb
-.section sbcra
-.section sbcrb
-.section sbcsa
-.section sbcsb
-.section sbcta
-.section sbctb
-.section sbcua
-.section sbcub
-.section sbcva
-.section sbcvb
-.section sbcwa
-.section sbcwb
-.section sbcxa
-.section sbcxb
-.section sbcya
-.section sbcyb
-.section sbcza
-.section sbczb
-.section sbc1a
-.section sbc1b
-.section sbc2a
-.section sbc2b
-.section sbc3a
-.section sbc3b
-.section sbc4a
-.section sbc4b
-.section sbc5a
-.section sbc5b
-.section sbc6a
-.section sbc6b
-.section sbc7a
-.section sbc7b
-.section sbc8a
-.section sbc8b
-.section sbc9a
-.section sbc9b
-.section sbc0a
-.section sbc0b
-.section sbdaa
-.section sbdab
-.section sbdba
-.section sbdbb
-.section sbdca
-.section sbdcb
-.section sbdda
-.section sbddb
-.section sbdea
-.section sbdeb
-.section sbdfa
-.section sbdfb
-.section sbdga
-.section sbdgb
-.section sbdha
-.section sbdhb
-.section sbdia
-.section sbdib
-.section sbdja
-.section sbdjb
-.section sbdka
-.section sbdkb
-.section sbdla
-.section sbdlb
-.section sbdma
-.section sbdmb
-.section sbdna
-.section sbdnb
-.section sbdoa
-.section sbdob
-.section sbdpa
-.section sbdpb
-.section sbdqa
-.section sbdqb
-.section sbdra
-.section sbdrb
-.section sbdsa
-.section sbdsb
-.section sbdta
-.section sbdtb
-.section sbdua
-.section sbdub
-.section sbdva
-.section sbdvb
-.section sbdwa
-.section sbdwb
-.section sbdxa
-.section sbdxb
-.section sbdya
-.section sbdyb
-.section sbdza
-.section sbdzb
-.section sbd1a
-.section sbd1b
-.section sbd2a
-.section sbd2b
-.section sbd3a
-.section sbd3b
-.section sbd4a
-.section sbd4b
-.section sbd5a
-.section sbd5b
-.section sbd6a
-.section sbd6b
-.section sbd7a
-.section sbd7b
-.section sbd8a
-.section sbd8b
-.section sbd9a
-.section sbd9b
-.section sbd0a
-.section sbd0b
-.section sbeaa
-.section sbeab
-.section sbeba
-.section sbebb
-.section sbeca
-.section sbecb
-.section sbeda
-.section sbedb
-.section sbeea
-.section sbeeb
-.section sbefa
-.section sbefb
-.section sbega
-.section sbegb
-.section sbeha
-.section sbehb
-.section sbeia
-.section sbeib
-.section sbeja
-.section sbejb
-.section sbeka
-.section sbekb
-.section sbela
-.section sbelb
-.section sbema
-.section sbemb
-.section sbena
-.section sbenb
-.section sbeoa
-.section sbeob
-.section sbepa
-.section sbepb
-.section sbeqa
-.section sbeqb
-.section sbera
-.section sberb
-.section sbesa
-.section sbesb
-.section sbeta
-.section sbetb
-.section sbeua
-.section sbeub
-.section sbeva
-.section sbevb
-.section sbewa
-.section sbewb
-.section sbexa
-.section sbexb
-.section sbeya
-.section sbeyb
-.section sbeza
-.section sbezb
-.section sbe1a
-.section sbe1b
-.section sbe2a
-.section sbe2b
-.section sbe3a
-.section sbe3b
-.section sbe4a
-.section sbe4b
-.section sbe5a
-.section sbe5b
-.section sbe6a
-.section sbe6b
-.section sbe7a
-.section sbe7b
-.section sbe8a
-.section sbe8b
-.section sbe9a
-.section sbe9b
-.section sbe0a
-.section sbe0b
-.section sbfaa
-.section sbfab
-.section sbfba
-.section sbfbb
-.section sbfca
-.section sbfcb
-.section sbfda
-.section sbfdb
-.section sbfea
-.section sbfeb
-.section sbffa
-.section sbffb
-.section sbfga
-.section sbfgb
-.section sbfha
-.section sbfhb
-.section sbfia
-.section sbfib
-.section sbfja
-.section sbfjb
-.section sbfka
-.section sbfkb
-.section sbfla
-.section sbflb
-.section sbfma
-.section sbfmb
-.section sbfna
-.section sbfnb
-.section sbfoa
-.section sbfob
-.section sbfpa
-.section sbfpb
-.section sbfqa
-.section sbfqb
-.section sbfra
-.section sbfrb
-.section sbfsa
-.section sbfsb
-.section sbfta
-.section sbftb
-.section sbfua
-.section sbfub
-.section sbfva
-.section sbfvb
-.section sbfwa
-.section sbfwb
-.section sbfxa
-.section sbfxb
-.section sbfya
-.section sbfyb
-.section sbfza
-.section sbfzb
-.section sbf1a
-.section sbf1b
-.section sbf2a
-.section sbf2b
-.section sbf3a
-.section sbf3b
-.section sbf4a
-.section sbf4b
-.section sbf5a
-.section sbf5b
-.section sbf6a
-.section sbf6b
-.section sbf7a
-.section sbf7b
-.section sbf8a
-.section sbf8b
-.section sbf9a
-.section sbf9b
-.section sbf0a
-.section sbf0b
-.section sbgaa
-.section sbgab
-.section sbgba
-.section sbgbb
-.section sbgca
-.section sbgcb
-.section sbgda
-.section sbgdb
-.section sbgea
-.section sbgeb
-.section sbgfa
-.section sbgfb
-.section sbgga
-.section sbggb
-.section sbgha
-.section sbghb
-.section sbgia
-.section sbgib
-.section sbgja
-.section sbgjb
-.section sbgka
-.section sbgkb
-.section sbgla
-.section sbglb
-.section sbgma
-.section sbgmb
-.section sbgna
-.section sbgnb
-.section sbgoa
-.section sbgob
-.section sbgpa
-.section sbgpb
-.section sbgqa
-.section sbgqb
-.section sbgra
-.section sbgrb
-.section sbgsa
-.section sbgsb
-.section sbgta
-.section sbgtb
-.section sbgua
-.section sbgub
-.section sbgva
-.section sbgvb
-.section sbgwa
-.section sbgwb
-.section sbgxa
-.section sbgxb
-.section sbgya
-.section sbgyb
-.section sbgza
-.section sbgzb
-.section sbg1a
-.section sbg1b
-.section sbg2a
-.section sbg2b
-.section sbg3a
-.section sbg3b
-.section sbg4a
-.section sbg4b
-.section sbg5a
-.section sbg5b
-.section sbg6a
-.section sbg6b
-.section sbg7a
-.section sbg7b
-.section sbg8a
-.section sbg8b
-.section sbg9a
-.section sbg9b
-.section sbg0a
-.section sbg0b
-.section sbhaa
-.section sbhab
-.section sbhba
-.section sbhbb
-.section sbhca
-.section sbhcb
-.section sbhda
-.section sbhdb
-.section sbhea
-.section sbheb
-.section sbhfa
-.section sbhfb
-.section sbhga
-.section sbhgb
-.section sbhha
-.section sbhhb
-.section sbhia
-.section sbhib
-.section sbhja
-.section sbhjb
-.section sbhka
-.section sbhkb
-.section sbhla
-.section sbhlb
-.section sbhma
-.section sbhmb
-.section sbhna
-.section sbhnb
-.section sbhoa
-.section sbhob
-.section sbhpa
-.section sbhpb
-.section sbhqa
-.section sbhqb
-.section sbhra
-.section sbhrb
-.section sbhsa
-.section sbhsb
-.section sbhta
-.section sbhtb
-.section sbhua
-.section sbhub
-.section sbhva
-.section sbhvb
-.section sbhwa
-.section sbhwb
-.section sbhxa
-.section sbhxb
-.section sbhya
-.section sbhyb
-.section sbhza
-.section sbhzb
-.section sbh1a
-.section sbh1b
-.section sbh2a
-.section sbh2b
-.section sbh3a
-.section sbh3b
-.section sbh4a
-.section sbh4b
-.section sbh5a
-.section sbh5b
-.section sbh6a
-.section sbh6b
-.section sbh7a
-.section sbh7b
-.section sbh8a
-.section sbh8b
-.section sbh9a
-.section sbh9b
-.section sbh0a
-.section sbh0b
-.section sbiaa
-.section sbiab
-.section sbiba
-.section sbibb
-.section sbica
-.section sbicb
-.section sbida
-.section sbidb
-.section sbiea
-.section sbieb
-.section sbifa
-.section sbifb
-.section sbiga
-.section sbigb
-.section sbiha
-.section sbihb
-.section sbiia
-.section sbiib
-.section sbija
-.section sbijb
-.section sbika
-.section sbikb
-.section sbila
-.section sbilb
-.section sbima
-.section sbimb
-.section sbina
-.section sbinb
-.section sbioa
-.section sbiob
-.section sbipa
-.section sbipb
-.section sbiqa
-.section sbiqb
-.section sbira
-.section sbirb
-.section sbisa
-.section sbisb
-.section sbita
-.section sbitb
-.section sbiua
-.section sbiub
-.section sbiva
-.section sbivb
-.section sbiwa
-.section sbiwb
-.section sbixa
-.section sbixb
-.section sbiya
-.section sbiyb
-.section sbiza
-.section sbizb
-.section sbi1a
-.section sbi1b
-.section sbi2a
-.section sbi2b
-.section sbi3a
-.section sbi3b
-.section sbi4a
-.section sbi4b
-.section sbi5a
-.section sbi5b
-.section sbi6a
-.section sbi6b
-.section sbi7a
-.section sbi7b
-.section sbi8a
-.section sbi8b
-.section sbi9a
-.section sbi9b
-.section sbi0a
-.section sbi0b
-.section sbjaa
-.section sbjab
-.section sbjba
-.section sbjbb
-.section sbjca
-.section sbjcb
-.section sbjda
-.section sbjdb
-.section sbjea
-.section sbjeb
-.section sbjfa
-.section sbjfb
-.section sbjga
-.section sbjgb
-.section sbjha
-.section sbjhb
-.section sbjia
-.section sbjib
-.section sbjja
-.section sbjjb
-.section sbjka
-.section sbjkb
-.section sbjla
-.section sbjlb
-.section sbjma
-.section sbjmb
-.section sbjna
-.section sbjnb
-.section sbjoa
-.section sbjob
-.section sbjpa
-.section sbjpb
-.section sbjqa
-.section sbjqb
-.section sbjra
-.section sbjrb
-.section sbjsa
-.section sbjsb
-.section sbjta
-.section sbjtb
-.section sbjua
-.section sbjub
-.section sbjva
-.section sbjvb
-.section sbjwa
-.section sbjwb
-.section sbjxa
-.section sbjxb
-.section sbjya
-.section sbjyb
-.section sbjza
-.section sbjzb
-.section sbj1a
-.section sbj1b
-.section sbj2a
-.section sbj2b
-.section sbj3a
-.section sbj3b
-.section sbj4a
-.section sbj4b
-.section sbj5a
-.section sbj5b
-.section sbj6a
-.section sbj6b
-.section sbj7a
-.section sbj7b
-.section sbj8a
-.section sbj8b
-.section sbj9a
-.section sbj9b
-.section sbj0a
-.section sbj0b
-.section sbkaa
-.section sbkab
-.section sbkba
-.section sbkbb
-.section sbkca
-.section sbkcb
-.section sbkda
-.section sbkdb
-.section sbkea
-.section sbkeb
-.section sbkfa
-.section sbkfb
-.section sbkga
-.section sbkgb
-.section sbkha
-.section sbkhb
-.section sbkia
-.section sbkib
-.section sbkja
-.section sbkjb
-.section sbkka
-.section sbkkb
-.section sbkla
-.section sbklb
-.section sbkma
-.section sbkmb
-.section sbkna
-.section sbknb
-.section sbkoa
-.section sbkob
-.section sbkpa
-.section sbkpb
-.section sbkqa
-.section sbkqb
-.section sbkra
-.section sbkrb
-.section sbksa
-.section sbksb
-.section sbkta
-.section sbktb
-.section sbkua
-.section sbkub
-.section sbkva
-.section sbkvb
-.section sbkwa
-.section sbkwb
-.section sbkxa
-.section sbkxb
-.section sbkya
-.section sbkyb
-.section sbkza
-.section sbkzb
-.section sbk1a
-.section sbk1b
-.section sbk2a
-.section sbk2b
-.section sbk3a
-.section sbk3b
-.section sbk4a
-.section sbk4b
-.section sbk5a
-.section sbk5b
-.section sbk6a
-.section sbk6b
-.section sbk7a
-.section sbk7b
-.section sbk8a
-.section sbk8b
-.section sbk9a
-.section sbk9b
-.section sbk0a
-.section sbk0b
-.section sblaa
-.section sblab
-.section sblba
-.section sblbb
-.section sblca
-.section sblcb
-.section sblda
-.section sbldb
-.section sblea
-.section sbleb
-.section sblfa
-.section sblfb
-.section sblga
-.section sblgb
-.section sblha
-.section sblhb
-.section sblia
-.section sblib
-.section sblja
-.section sbljb
-.section sblka
-.section sblkb
-.section sblla
-.section sbllb
-.section sblma
-.section sblmb
-.section sblna
-.section sblnb
-.section sbloa
-.section sblob
-.section sblpa
-.section sblpb
-.section sblqa
-.section sblqb
-.section sblra
-.section sblrb
-.section sblsa
-.section sblsb
-.section sblta
-.section sbltb
-.section sblua
-.section sblub
-.section sblva
-.section sblvb
-.section sblwa
-.section sblwb
-.section sblxa
-.section sblxb
-.section sblya
-.section sblyb
-.section sblza
-.section sblzb
-.section sbl1a
-.section sbl1b
-.section sbl2a
-.section sbl2b
-.section sbl3a
-.section sbl3b
-.section sbl4a
-.section sbl4b
-.section sbl5a
-.section sbl5b
-.section sbl6a
-.section sbl6b
-.section sbl7a
-.section sbl7b
-.section sbl8a
-.section sbl8b
-.section sbl9a
-.section sbl9b
-.section sbl0a
-.section sbl0b
-.section sbmaa
-.section sbmab
-.section sbmba
-.section sbmbb
-.section sbmca
-.section sbmcb
-.section sbmda
-.section sbmdb
-.section sbmea
-.section sbmeb
-.section sbmfa
-.section sbmfb
-.section sbmga
-.section sbmgb
-.section sbmha
-.section sbmhb
-.section sbmia
-.section sbmib
-.section sbmja
-.section sbmjb
-.section sbmka
-.section sbmkb
-.section sbmla
-.section sbmlb
-.section sbmma
-.section sbmmb
-.section sbmna
-.section sbmnb
-.section sbmoa
-.section sbmob
-.section sbmpa
-.section sbmpb
-.section sbmqa
-.section sbmqb
-.section sbmra
-.section sbmrb
-.section sbmsa
-.section sbmsb
-.section sbmta
-.section sbmtb
-.section sbmua
-.section sbmub
-.section sbmva
-.section sbmvb
-.section sbmwa
-.section sbmwb
-.section sbmxa
-.section sbmxb
-.section sbmya
-.section sbmyb
-.section sbmza
-.section sbmzb
-.section sbm1a
-.section sbm1b
-.section sbm2a
-.section sbm2b
-.section sbm3a
-.section sbm3b
-.section sbm4a
-.section sbm4b
-.section sbm5a
-.section sbm5b
-.section sbm6a
-.section sbm6b
-.section sbm7a
-.section sbm7b
-.section sbm8a
-.section sbm8b
-.section sbm9a
-.section sbm9b
-.section sbm0a
-.section sbm0b
-.section sbnaa
-.section sbnab
-.section sbnba
-.section sbnbb
-.section sbnca
-.section sbncb
-.section sbnda
-.section sbndb
-.section sbnea
-.section sbneb
-.section sbnfa
-.section sbnfb
-.section sbnga
-.section sbngb
-.section sbnha
-.section sbnhb
-.section sbnia
-.section sbnib
-.section sbnja
-.section sbnjb
-.section sbnka
-.section sbnkb
-.section sbnla
-.section sbnlb
-.section sbnma
-.section sbnmb
-.section sbnna
-.section sbnnb
-.section sbnoa
-.section sbnob
-.section sbnpa
-.section sbnpb
-.section sbnqa
-.section sbnqb
-.section sbnra
-.section sbnrb
-.section sbnsa
-.section sbnsb
-.section sbnta
-.section sbntb
-.section sbnua
-.section sbnub
-.section sbnva
-.section sbnvb
-.section sbnwa
-.section sbnwb
-.section sbnxa
-.section sbnxb
-.section sbnya
-.section sbnyb
-.section sbnza
-.section sbnzb
-.section sbn1a
-.section sbn1b
-.section sbn2a
-.section sbn2b
-.section sbn3a
-.section sbn3b
-.section sbn4a
-.section sbn4b
-.section sbn5a
-.section sbn5b
-.section sbn6a
-.section sbn6b
-.section sbn7a
-.section sbn7b
-.section sbn8a
-.section sbn8b
-.section sbn9a
-.section sbn9b
-.section sbn0a
-.section sbn0b
-.section sboaa
-.section sboab
-.section sboba
-.section sbobb
-.section sboca
-.section sbocb
-.section sboda
-.section sbodb
-.section sboea
-.section sboeb
-.section sbofa
-.section sbofb
-.section sboga
-.section sbogb
-.section sboha
-.section sbohb
-.section sboia
-.section sboib
-.section sboja
-.section sbojb
-.section sboka
-.section sbokb
-.section sbola
-.section sbolb
-.section sboma
-.section sbomb
-.section sbona
-.section sbonb
-.section sbooa
-.section sboob
-.section sbopa
-.section sbopb
-.section sboqa
-.section sboqb
-.section sbora
-.section sborb
-.section sbosa
-.section sbosb
-.section sbota
-.section sbotb
-.section sboua
-.section sboub
-.section sbova
-.section sbovb
-.section sbowa
-.section sbowb
-.section sboxa
-.section sboxb
-.section sboya
-.section sboyb
-.section sboza
-.section sbozb
-.section sbo1a
-.section sbo1b
-.section sbo2a
-.section sbo2b
-.section sbo3a
-.section sbo3b
-.section sbo4a
-.section sbo4b
-.section sbo5a
-.section sbo5b
-.section sbo6a
-.section sbo6b
-.section sbo7a
-.section sbo7b
-.section sbo8a
-.section sbo8b
-.section sbo9a
-.section sbo9b
-.section sbo0a
-.section sbo0b
-.section sbpaa
-.section sbpab
-.section sbpba
-.section sbpbb
-.section sbpca
-.section sbpcb
-.section sbpda
-.section sbpdb
-.section sbpea
-.section sbpeb
-.section sbpfa
-.section sbpfb
-.section sbpga
-.section sbpgb
-.section sbpha
-.section sbphb
-.section sbpia
-.section sbpib
-.section sbpja
-.section sbpjb
-.section sbpka
-.section sbpkb
-.section sbpla
-.section sbplb
-.section sbpma
-.section sbpmb
-.section sbpna
-.section sbpnb
-.section sbpoa
-.section sbpob
-.section sbppa
-.section sbppb
-.section sbpqa
-.section sbpqb
-.section sbpra
-.section sbprb
-.section sbpsa
-.section sbpsb
-.section sbpta
-.section sbptb
-.section sbpua
-.section sbpub
-.section sbpva
-.section sbpvb
-.section sbpwa
-.section sbpwb
-.section sbpxa
-.section sbpxb
-.section sbpya
-.section sbpyb
-.section sbpza
-.section sbpzb
-.section sbp1a
-.section sbp1b
-.section sbp2a
-.section sbp2b
-.section sbp3a
-.section sbp3b
-.section sbp4a
-.section sbp4b
-.section sbp5a
-.section sbp5b
-.section sbp6a
-.section sbp6b
-.section sbp7a
-.section sbp7b
-.section sbp8a
-.section sbp8b
-.section sbp9a
-.section sbp9b
-.section sbp0a
-.section sbp0b
-.section sbqaa
-.section sbqab
-.section sbqba
-.section sbqbb
-.section sbqca
-.section sbqcb
-.section sbqda
-.section sbqdb
-.section sbqea
-.section sbqeb
-.section sbqfa
-.section sbqfb
-.section sbqga
-.section sbqgb
-.section sbqha
-.section sbqhb
-.section sbqia
-.section sbqib
-.section sbqja
-.section sbqjb
-.section sbqka
-.section sbqkb
-.section sbqla
-.section sbqlb
-.section sbqma
-.section sbqmb
-.section sbqna
-.section sbqnb
-.section sbqoa
-.section sbqob
-.section sbqpa
-.section sbqpb
-.section sbqqa
-.section sbqqb
-.section sbqra
-.section sbqrb
-.section sbqsa
-.section sbqsb
-.section sbqta
-.section sbqtb
-.section sbqua
-.section sbqub
-.section sbqva
-.section sbqvb
-.section sbqwa
-.section sbqwb
-.section sbqxa
-.section sbqxb
-.section sbqya
-.section sbqyb
-.section sbqza
-.section sbqzb
-.section sbq1a
-.section sbq1b
-.section sbq2a
-.section sbq2b
-.section sbq3a
-.section sbq3b
-.section sbq4a
-.section sbq4b
-.section sbq5a
-.section sbq5b
-.section sbq6a
-.section sbq6b
-.section sbq7a
-.section sbq7b
-.section sbq8a
-.section sbq8b
-.section sbq9a
-.section sbq9b
-.section sbq0a
-.section sbq0b
-.section sbraa
-.section sbrab
-.section sbrba
-.section sbrbb
-.section sbrca
-.section sbrcb
-.section sbrda
-.section sbrdb
-.section sbrea
-.section sbreb
-.section sbrfa
-.section sbrfb
-.section sbrga
-.section sbrgb
-.section sbrha
-.section sbrhb
-.section sbria
-.section sbrib
-.section sbrja
-.section sbrjb
-.section sbrka
-.section sbrkb
-.section sbrla
-.section sbrlb
-.section sbrma
-.section sbrmb
-.section sbrna
-.section sbrnb
-.section sbroa
-.section sbrob
-.section sbrpa
-.section sbrpb
-.section sbrqa
-.section sbrqb
-.section sbrra
-.section sbrrb
-.section sbrsa
-.section sbrsb
-.section sbrta
-.section sbrtb
-.section sbrua
-.section sbrub
-.section sbrva
-.section sbrvb
-.section sbrwa
-.section sbrwb
-.section sbrxa
-.section sbrxb
-.section sbrya
-.section sbryb
-.section sbrza
-.section sbrzb
-.section sbr1a
-.section sbr1b
-.section sbr2a
-.section sbr2b
-.section sbr3a
-.section sbr3b
-.section sbr4a
-.section sbr4b
-.section sbr5a
-.section sbr5b
-.section sbr6a
-.section sbr6b
-.section sbr7a
-.section sbr7b
-.section sbr8a
-.section sbr8b
-.section sbr9a
-.section sbr9b
-.section sbr0a
-.section sbr0b
-.section sbsaa
-.section sbsab
-.section sbsba
-.section sbsbb
-.section sbsca
-.section sbscb
-.section sbsda
-.section sbsdb
-.section sbsea
-.section sbseb
-.section sbsfa
-.section sbsfb
-.section sbsga
-.section sbsgb
-.section sbsha
-.section sbshb
-.section sbsia
-.section sbsib
-.section sbsja
-.section sbsjb
-.section sbska
-.section sbskb
-.section sbsla
-.section sbslb
-.section sbsma
-.section sbsmb
-.section sbsna
-.section sbsnb
-.section sbsoa
-.section sbsob
-.section sbspa
-.section sbspb
-.section sbsqa
-.section sbsqb
-.section sbsra
-.section sbsrb
-.section sbssa
-.section sbssb
-.section sbsta
-.section sbstb
-.section sbsua
-.section sbsub
-.section sbsva
-.section sbsvb
-.section sbswa
-.section sbswb
-.section sbsxa
-.section sbsxb
-.section sbsya
-.section sbsyb
-.section sbsza
-.section sbszb
-.section sbs1a
-.section sbs1b
-.section sbs2a
-.section sbs2b
-.section sbs3a
-.section sbs3b
-.section sbs4a
-.section sbs4b
-.section sbs5a
-.section sbs5b
-.section sbs6a
-.section sbs6b
-.section sbs7a
-.section sbs7b
-.section sbs8a
-.section sbs8b
-.section sbs9a
-.section sbs9b
-.section sbs0a
-.section sbs0b
-.section sbtaa
-.section sbtab
-.section sbtba
-.section sbtbb
-.section sbtca
-.section sbtcb
-.section sbtda
-.section sbtdb
-.section sbtea
-.section sbteb
-.section sbtfa
-.section sbtfb
-.section sbtga
-.section sbtgb
-.section sbtha
-.section sbthb
-.section sbtia
-.section sbtib
-.section sbtja
-.section sbtjb
-.section sbtka
-.section sbtkb
-.section sbtla
-.section sbtlb
-.section sbtma
-.section sbtmb
-.section sbtna
-.section sbtnb
-.section sbtoa
-.section sbtob
-.section sbtpa
-.section sbtpb
-.section sbtqa
-.section sbtqb
-.section sbtra
-.section sbtrb
-.section sbtsa
-.section sbtsb
-.section sbtta
-.section sbttb
-.section sbtua
-.section sbtub
-.section sbtva
-.section sbtvb
-.section sbtwa
-.section sbtwb
-.section sbtxa
-.section sbtxb
-.section sbtya
-.section sbtyb
-.section sbtza
-.section sbtzb
-.section sbt1a
-.section sbt1b
-.section sbt2a
-.section sbt2b
-.section sbt3a
-.section sbt3b
-.section sbt4a
-.section sbt4b
-.section sbt5a
-.section sbt5b
-.section sbt6a
-.section sbt6b
-.section sbt7a
-.section sbt7b
-.section sbt8a
-.section sbt8b
-.section sbt9a
-.section sbt9b
-.section sbt0a
-.section sbt0b
-.section sbuaa
-.section sbuab
-.section sbuba
-.section sbubb
-.section sbuca
-.section sbucb
-.section sbuda
-.section sbudb
-.section sbuea
-.section sbueb
-.section sbufa
-.section sbufb
-.section sbuga
-.section sbugb
-.section sbuha
-.section sbuhb
-.section sbuia
-.section sbuib
-.section sbuja
-.section sbujb
-.section sbuka
-.section sbukb
-.section sbula
-.section sbulb
-.section sbuma
-.section sbumb
-.section sbuna
-.section sbunb
-.section sbuoa
-.section sbuob
-.section sbupa
-.section sbupb
-.section sbuqa
-.section sbuqb
-.section sbura
-.section sburb
-.section sbusa
-.section sbusb
-.section sbuta
-.section sbutb
-.section sbuua
-.section sbuub
-.section sbuva
-.section sbuvb
-.section sbuwa
-.section sbuwb
-.section sbuxa
-.section sbuxb
-.section sbuya
-.section sbuyb
-.section sbuza
-.section sbuzb
-.section sbu1a
-.section sbu1b
-.section sbu2a
-.section sbu2b
-.section sbu3a
-.section sbu3b
-.section sbu4a
-.section sbu4b
-.section sbu5a
-.section sbu5b
-.section sbu6a
-.section sbu6b
-.section sbu7a
-.section sbu7b
-.section sbu8a
-.section sbu8b
-.section sbu9a
-.section sbu9b
-.section sbu0a
-.section sbu0b
-.section sbvaa
-.section sbvab
-.section sbvba
-.section sbvbb
-.section sbvca
-.section sbvcb
-.section sbvda
-.section sbvdb
-.section sbvea
-.section sbveb
-.section sbvfa
-.section sbvfb
-.section sbvga
-.section sbvgb
-.section sbvha
-.section sbvhb
-.section sbvia
-.section sbvib
-.section sbvja
-.section sbvjb
-.section sbvka
-.section sbvkb
-.section sbvla
-.section sbvlb
-.section sbvma
-.section sbvmb
-.section sbvna
-.section sbvnb
-.section sbvoa
-.section sbvob
-.section sbvpa
-.section sbvpb
-.section sbvqa
-.section sbvqb
-.section sbvra
-.section sbvrb
-.section sbvsa
-.section sbvsb
-.section sbvta
-.section sbvtb
-.section sbvua
-.section sbvub
-.section sbvva
-.section sbvvb
-.section sbvwa
-.section sbvwb
-.section sbvxa
-.section sbvxb
-.section sbvya
-.section sbvyb
-.section sbvza
-.section sbvzb
-.section sbv1a
-.section sbv1b
-.section sbv2a
-.section sbv2b
-.section sbv3a
-.section sbv3b
-.section sbv4a
-.section sbv4b
-.section sbv5a
-.section sbv5b
-.section sbv6a
-.section sbv6b
-.section sbv7a
-.section sbv7b
-.section sbv8a
-.section sbv8b
-.section sbv9a
-.section sbv9b
-.section sbv0a
-.section sbv0b
-.section sbwaa
-.section sbwab
-.section sbwba
-.section sbwbb
-.section sbwca
-.section sbwcb
-.section sbwda
-.section sbwdb
-.section sbwea
-.section sbweb
-.section sbwfa
-.section sbwfb
-.section sbwga
-.section sbwgb
-.section sbwha
-.section sbwhb
-.section sbwia
-.section sbwib
-.section sbwja
-.section sbwjb
-.section sbwka
-.section sbwkb
-.section sbwla
-.section sbwlb
-.section sbwma
-.section sbwmb
-.section sbwna
-.section sbwnb
-.section sbwoa
-.section sbwob
-.section sbwpa
-.section sbwpb
-.section sbwqa
-.section sbwqb
-.section sbwra
-.section sbwrb
-.section sbwsa
-.section sbwsb
-.section sbwta
-.section sbwtb
-.section sbwua
-.section sbwub
-.section sbwva
-.section sbwvb
-.section sbwwa
-.section sbwwb
-.section sbwxa
-.section sbwxb
-.section sbwya
-.section sbwyb
-.section sbwza
-.section sbwzb
-.section sbw1a
-.section sbw1b
-.section sbw2a
-.section sbw2b
-.section sbw3a
-.section sbw3b
-.section sbw4a
-.section sbw4b
-.section sbw5a
-.section sbw5b
-.section sbw6a
-.section sbw6b
-.section sbw7a
-.section sbw7b
-.section sbw8a
-.section sbw8b
-.section sbw9a
-.section sbw9b
-.section sbw0a
-.section sbw0b
-.section sbxaa
-.section sbxab
-.section sbxba
-.section sbxbb
-.section sbxca
-.section sbxcb
-.section sbxda
-.section sbxdb
-.section sbxea
-.section sbxeb
-.section sbxfa
-.section sbxfb
-.section sbxga
-.section sbxgb
-.section sbxha
-.section sbxhb
-.section sbxia
-.section sbxib
-.section sbxja
-.section sbxjb
-.section sbxka
-.section sbxkb
-.section sbxla
-.section sbxlb
-.section sbxma
-.section sbxmb
-.section sbxna
-.section sbxnb
-.section sbxoa
-.section sbxob
-.section sbxpa
-.section sbxpb
-.section sbxqa
-.section sbxqb
-.section sbxra
-.section sbxrb
-.section sbxsa
-.section sbxsb
-.section sbxta
-.section sbxtb
-.section sbxua
-.section sbxub
-.section sbxva
-.section sbxvb
-.section sbxwa
-.section sbxwb
-.section sbxxa
-.section sbxxb
-.section sbxya
-.section sbxyb
-.section sbxza
-.section sbxzb
-.section sbx1a
-.section sbx1b
-.section sbx2a
-.section sbx2b
-.section sbx3a
-.section sbx3b
-.section sbx4a
-.section sbx4b
-.section sbx5a
-.section sbx5b
-.section sbx6a
-.section sbx6b
-.section sbx7a
-.section sbx7b
-.section sbx8a
-.section sbx8b
-.section sbx9a
-.section sbx9b
-.section sbx0a
-.section sbx0b
-.section sbyaa
-.section sbyab
-.section sbyba
-.section sbybb
-.section sbyca
-.section sbycb
-.section sbyda
-.section sbydb
-.section sbyea
-.section sbyeb
-.section sbyfa
-.section sbyfb
-.section sbyga
-.section sbygb
-.section sbyha
-.section sbyhb
-.section sbyia
-.section sbyib
-.section sbyja
-.section sbyjb
-.section sbyka
-.section sbykb
-.section sbyla
-.section sbylb
-.section sbyma
-.section sbymb
-.section sbyna
-.section sbynb
-.section sbyoa
-.section sbyob
-.section sbypa
-.section sbypb
-.section sbyqa
-.section sbyqb
-.section sbyra
-.section sbyrb
-.section sbysa
-.section sbysb
-.section sbyta
-.section sbytb
-.section sbyua
-.section sbyub
-.section sbyva
-.section sbyvb
-.section sbywa
-.section sbywb
-.section sbyxa
-.section sbyxb
-.section sbyya
-.section sbyyb
-.section sbyza
-.section sbyzb
-.section sby1a
-.section sby1b
-.section sby2a
-.section sby2b
-.section sby3a
-.section sby3b
-.section sby4a
-.section sby4b
-.section sby5a
-.section sby5b
-.section sby6a
-.section sby6b
-.section sby7a
-.section sby7b
-.section sby8a
-.section sby8b
-.section sby9a
-.section sby9b
-.section sby0a
-.section sby0b
-.section sbzaa
-.section sbzab
-.section sbzba
-.section sbzbb
-.section sbzca
-.section sbzcb
-.section sbzda
-.section sbzdb
-.section sbzea
-.section sbzeb
-.section sbzfa
-.section sbzfb
-.section sbzga
-.section sbzgb
-.section sbzha
-.section sbzhb
-.section sbzia
-.section sbzib
-.section sbzja
-.section sbzjb
-.section sbzka
-.section sbzkb
-.section sbzla
-.section sbzlb
-.section sbzma
-.section sbzmb
-.section sbzna
-.section sbznb
-.section sbzoa
-.section sbzob
-.section sbzpa
-.section sbzpb
-.section sbzqa
-.section sbzqb
-.section sbzra
-.section sbzrb
-.section sbzsa
-.section sbzsb
-.section sbzta
-.section sbztb
-.section sbzua
-.section sbzub
-.section sbzva
-.section sbzvb
-.section sbzwa
-.section sbzwb
-.section sbzxa
-.section sbzxb
-.section sbzya
-.section sbzyb
-.section sbzza
-.section sbzzb
-.section sbz1a
-.section sbz1b
-.section sbz2a
-.section sbz2b
-.section sbz3a
-.section sbz3b
-.section sbz4a
-.section sbz4b
-.section sbz5a
-.section sbz5b
-.section sbz6a
-.section sbz6b
-.section sbz7a
-.section sbz7b
-.section sbz8a
-.section sbz8b
-.section sbz9a
-.section sbz9b
-.section sbz0a
-.section sbz0b
-.section sb1aa
-.section sb1ab
-.section sb1ba
-.section sb1bb
-.section sb1ca
-.section sb1cb
-.section sb1da
-.section sb1db
-.section sb1ea
-.section sb1eb
-.section sb1fa
-.section sb1fb
-.section sb1ga
-.section sb1gb
-.section sb1ha
-.section sb1hb
-.section sb1ia
-.section sb1ib
-.section sb1ja
-.section sb1jb
-.section sb1ka
-.section sb1kb
-.section sb1la
-.section sb1lb
-.section sb1ma
-.section sb1mb
-.section sb1na
-.section sb1nb
-.section sb1oa
-.section sb1ob
-.section sb1pa
-.section sb1pb
-.section sb1qa
-.section sb1qb
-.section sb1ra
-.section sb1rb
-.section sb1sa
-.section sb1sb
-.section sb1ta
-.section sb1tb
-.section sb1ua
-.section sb1ub
-.section sb1va
-.section sb1vb
-.section sb1wa
-.section sb1wb
-.section sb1xa
-.section sb1xb
-.section sb1ya
-.section sb1yb
-.section sb1za
-.section sb1zb
-.section sb11a
-.section sb11b
-.section sb12a
-.section sb12b
-.section sb13a
-.section sb13b
-.section sb14a
-.section sb14b
-.section sb15a
-.section sb15b
-.section sb16a
-.section sb16b
-.section sb17a
-.section sb17b
-.section sb18a
-.section sb18b
-.section sb19a
-.section sb19b
-.section sb10a
-.section sb10b
-.section sb2aa
-.section sb2ab
-.section sb2ba
-.section sb2bb
-.section sb2ca
-.section sb2cb
-.section sb2da
-.section sb2db
-.section sb2ea
-.section sb2eb
-.section sb2fa
-.section sb2fb
-.section sb2ga
-.section sb2gb
-.section sb2ha
-.section sb2hb
-.section sb2ia
-.section sb2ib
-.section sb2ja
-.section sb2jb
-.section sb2ka
-.section sb2kb
-.section sb2la
-.section sb2lb
-.section sb2ma
-.section sb2mb
-.section sb2na
-.section sb2nb
-.section sb2oa
-.section sb2ob
-.section sb2pa
-.section sb2pb
-.section sb2qa
-.section sb2qb
-.section sb2ra
-.section sb2rb
-.section sb2sa
-.section sb2sb
-.section sb2ta
-.section sb2tb
-.section sb2ua
-.section sb2ub
-.section sb2va
-.section sb2vb
-.section sb2wa
-.section sb2wb
-.section sb2xa
-.section sb2xb
-.section sb2ya
-.section sb2yb
-.section sb2za
-.section sb2zb
-.section sb21a
-.section sb21b
-.section sb22a
-.section sb22b
-.section sb23a
-.section sb23b
-.section sb24a
-.section sb24b
-.section sb25a
-.section sb25b
-.section sb26a
-.section sb26b
-.section sb27a
-.section sb27b
-.section sb28a
-.section sb28b
-.section sb29a
-.section sb29b
-.section sb20a
-.section sb20b
-.section sb3aa
-.section sb3ab
-.section sb3ba
-.section sb3bb
-.section sb3ca
-.section sb3cb
-.section sb3da
-.section sb3db
-.section sb3ea
-.section sb3eb
-.section sb3fa
-.section sb3fb
-.section sb3ga
-.section sb3gb
-.section sb3ha
-.section sb3hb
-.section sb3ia
-.section sb3ib
-.section sb3ja
-.section sb3jb
-.section sb3ka
-.section sb3kb
-.section sb3la
-.section sb3lb
-.section sb3ma
-.section sb3mb
-.section sb3na
-.section sb3nb
-.section sb3oa
-.section sb3ob
-.section sb3pa
-.section sb3pb
-.section sb3qa
-.section sb3qb
-.section sb3ra
-.section sb3rb
-.section sb3sa
-.section sb3sb
-.section sb3ta
-.section sb3tb
-.section sb3ua
-.section sb3ub
-.section sb3va
-.section sb3vb
-.section sb3wa
-.section sb3wb
-.section sb3xa
-.section sb3xb
-.section sb3ya
-.section sb3yb
-.section sb3za
-.section sb3zb
-.section sb31a
-.section sb31b
-.section sb32a
-.section sb32b
-.section sb33a
-.section sb33b
-.section sb34a
-.section sb34b
-.section sb35a
-.section sb35b
-.section sb36a
-.section sb36b
-.section sb37a
-.section sb37b
-.section sb38a
-.section sb38b
-.section sb39a
-.section sb39b
-.section sb30a
-.section sb30b
-.section sb4aa
-.section sb4ab
-.section sb4ba
-.section sb4bb
-.section sb4ca
-.section sb4cb
-.section sb4da
-.section sb4db
-.section sb4ea
-.section sb4eb
-.section sb4fa
-.section sb4fb
-.section sb4ga
-.section sb4gb
-.section sb4ha
-.section sb4hb
-.section sb4ia
-.section sb4ib
-.section sb4ja
-.section sb4jb
-.section sb4ka
-.section sb4kb
-.section sb4la
-.section sb4lb
-.section sb4ma
-.section sb4mb
-.section sb4na
-.section sb4nb
-.section sb4oa
-.section sb4ob
-.section sb4pa
-.section sb4pb
-.section sb4qa
-.section sb4qb
-.section sb4ra
-.section sb4rb
-.section sb4sa
-.section sb4sb
-.section sb4ta
-.section sb4tb
-.section sb4ua
-.section sb4ub
-.section sb4va
-.section sb4vb
-.section sb4wa
-.section sb4wb
-.section sb4xa
-.section sb4xb
-.section sb4ya
-.section sb4yb
-.section sb4za
-.section sb4zb
-.section sb41a
-.section sb41b
-.section sb42a
-.section sb42b
-.section sb43a
-.section sb43b
-.section sb44a
-.section sb44b
-.section sb45a
-.section sb45b
-.section sb46a
-.section sb46b
-.section sb47a
-.section sb47b
-.section sb48a
-.section sb48b
-.section sb49a
-.section sb49b
-.section sb40a
-.section sb40b
-.section sb5aa
-.section sb5ab
-.section sb5ba
-.section sb5bb
-.section sb5ca
-.section sb5cb
-.section sb5da
-.section sb5db
-.section sb5ea
-.section sb5eb
-.section sb5fa
-.section sb5fb
-.section sb5ga
-.section sb5gb
-.section sb5ha
-.section sb5hb
-.section sb5ia
-.section sb5ib
-.section sb5ja
-.section sb5jb
-.section sb5ka
-.section sb5kb
-.section sb5la
-.section sb5lb
-.section sb5ma
-.section sb5mb
-.section sb5na
-.section sb5nb
-.section sb5oa
-.section sb5ob
-.section sb5pa
-.section sb5pb
-.section sb5qa
-.section sb5qb
-.section sb5ra
-.section sb5rb
-.section sb5sa
-.section sb5sb
-.section sb5ta
-.section sb5tb
-.section sb5ua
-.section sb5ub
-.section sb5va
-.section sb5vb
-.section sb5wa
-.section sb5wb
-.section sb5xa
-.section sb5xb
-.section sb5ya
-.section sb5yb
-.section sb5za
-.section sb5zb
-.section sb51a
-.section sb51b
-.section sb52a
-.section sb52b
-.section sb53a
-.section sb53b
-.section sb54a
-.section sb54b
-.section sb55a
-.section sb55b
-.section sb56a
-.section sb56b
-.section sb57a
-.section sb57b
-.section sb58a
-.section sb58b
-.section sb59a
-.section sb59b
-.section sb50a
-.section sb50b
-.section sb6aa
-.section sb6ab
-.section sb6ba
-.section sb6bb
-.section sb6ca
-.section sb6cb
-.section sb6da
-.section sb6db
-.section sb6ea
-.section sb6eb
-.section sb6fa
-.section sb6fb
-.section sb6ga
-.section sb6gb
-.section sb6ha
-.section sb6hb
-.section sb6ia
-.section sb6ib
-.section sb6ja
-.section sb6jb
-.section sb6ka
-.section sb6kb
-.section sb6la
-.section sb6lb
-.section sb6ma
-.section sb6mb
-.section sb6na
-.section sb6nb
-.section sb6oa
-.section sb6ob
-.section sb6pa
-.section sb6pb
-.section sb6qa
-.section sb6qb
-.section sb6ra
-.section sb6rb
-.section sb6sa
-.section sb6sb
-.section sb6ta
-.section sb6tb
-.section sb6ua
-.section sb6ub
-.section sb6va
-.section sb6vb
-.section sb6wa
-.section sb6wb
-.section sb6xa
-.section sb6xb
-.section sb6ya
-.section sb6yb
-.section sb6za
-.section sb6zb
-.section sb61a
-.section sb61b
-.section sb62a
-.section sb62b
-.section sb63a
-.section sb63b
-.section sb64a
-.section sb64b
-.section sb65a
-.section sb65b
-.section sb66a
-.section sb66b
-.section sb67a
-.section sb67b
-.section sb68a
-.section sb68b
-.section sb69a
-.section sb69b
-.section sb60a
-.section sb60b
-.section sb7aa
-.section sb7ab
-.section sb7ba
-.section sb7bb
-.section sb7ca
-.section sb7cb
-.section sb7da
-.section sb7db
-.section sb7ea
-.section sb7eb
-.section sb7fa
-.section sb7fb
-.section sb7ga
-.section sb7gb
-.section sb7ha
-.section sb7hb
-.section sb7ia
-.section sb7ib
-.section sb7ja
-.section sb7jb
-.section sb7ka
-.section sb7kb
-.section sb7la
-.section sb7lb
-.section sb7ma
-.section sb7mb
-.section sb7na
-.section sb7nb
-.section sb7oa
-.section sb7ob
-.section sb7pa
-.section sb7pb
-.section sb7qa
-.section sb7qb
-.section sb7ra
-.section sb7rb
-.section sb7sa
-.section sb7sb
-.section sb7ta
-.section sb7tb
-.section sb7ua
-.section sb7ub
-.section sb7va
-.section sb7vb
-.section sb7wa
-.section sb7wb
-.section sb7xa
-.section sb7xb
-.section sb7ya
-.section sb7yb
-.section sb7za
-.section sb7zb
-.section sb71a
-.section sb71b
-.section sb72a
-.section sb72b
-.section sb73a
-.section sb73b
-.section sb74a
-.section sb74b
-.section sb75a
-.section sb75b
-.section sb76a
-.section sb76b
-.section sb77a
-.section sb77b
-.section sb78a
-.section sb78b
-.section sb79a
-.section sb79b
-.section sb70a
-.section sb70b
-.section sb8aa
-.section sb8ab
-.section sb8ba
-.section sb8bb
-.section sb8ca
-.section sb8cb
-.section sb8da
-.section sb8db
-.section sb8ea
-.section sb8eb
-.section sb8fa
-.section sb8fb
-.section sb8ga
-.section sb8gb
-.section sb8ha
-.section sb8hb
-.section sb8ia
-.section sb8ib
-.section sb8ja
-.section sb8jb
-.section sb8ka
-.section sb8kb
-.section sb8la
-.section sb8lb
-.section sb8ma
-.section sb8mb
-.section sb8na
-.section sb8nb
-.section sb8oa
-.section sb8ob
-.section sb8pa
-.section sb8pb
-.section sb8qa
-.section sb8qb
-.section sb8ra
-.section sb8rb
-.section sb8sa
-.section sb8sb
-.section sb8ta
-.section sb8tb
-.section sb8ua
-.section sb8ub
-.section sb8va
-.section sb8vb
-.section sb8wa
-.section sb8wb
-.section sb8xa
-.section sb8xb
-.section sb8ya
-.section sb8yb
-.section sb8za
-.section sb8zb
-.section sb81a
-.section sb81b
-.section sb82a
-.section sb82b
-.section sb83a
-.section sb83b
-.section sb84a
-.section sb84b
-.section sb85a
-.section sb85b
-.section sb86a
-.section sb86b
-.section sb87a
-.section sb87b
-.section sb88a
-.section sb88b
-.section sb89a
-.section sb89b
-.section sb80a
-.section sb80b
-.section sb9aa
-.section sb9ab
-.section sb9ba
-.section sb9bb
-.section sb9ca
-.section sb9cb
-.section sb9da
-.section sb9db
-.section sb9ea
-.section sb9eb
-.section sb9fa
-.section sb9fb
-.section sb9ga
-.section sb9gb
-.section sb9ha
-.section sb9hb
-.section sb9ia
-.section sb9ib
-.section sb9ja
-.section sb9jb
-.section sb9ka
-.section sb9kb
-.section sb9la
-.section sb9lb
-.section sb9ma
-.section sb9mb
-.section sb9na
-.section sb9nb
-.section sb9oa
-.section sb9ob
-.section sb9pa
-.section sb9pb
-.section sb9qa
-.section sb9qb
-.section sb9ra
-.section sb9rb
-.section sb9sa
-.section sb9sb
-.section sb9ta
-.section sb9tb
-.section sb9ua
-.section sb9ub
-.section sb9va
-.section sb9vb
-.section sb9wa
-.section sb9wb
-.section sb9xa
-.section sb9xb
-.section sb9ya
-.section sb9yb
-.section sb9za
-.section sb9zb
-.section sb91a
-.section sb91b
-.section sb92a
-.section sb92b
-.section sb93a
-.section sb93b
-.section sb94a
-.section sb94b
-.section sb95a
-.section sb95b
-.section sb96a
-.section sb96b
-.section sb97a
-.section sb97b
-.section sb98a
-.section sb98b
-.section sb99a
-.section sb99b
-.section sb90a
-.section sb90b
-.section sb0aa
-.section sb0ab
-.section sb0ba
-.section sb0bb
-.section sb0ca
-.section sb0cb
-.section sb0da
-.section sb0db
-.section sb0ea
-.section sb0eb
-.section sb0fa
-.section sb0fb
-.section sb0ga
-.section sb0gb
-.section sb0ha
-.section sb0hb
-.section sb0ia
-.section sb0ib
-.section sb0ja
-.section sb0jb
-.section sb0ka
-.section sb0kb
-.section sb0la
-.section sb0lb
-.section sb0ma
-.section sb0mb
-.section sb0na
-.section sb0nb
-.section sb0oa
-.section sb0ob
-.section sb0pa
-.section sb0pb
-.section sb0qa
-.section sb0qb
-.section sb0ra
-.section sb0rb
-.section sb0sa
-.section sb0sb
-.section sb0ta
-.section sb0tb
-.section sb0ua
-.section sb0ub
-.section sb0va
-.section sb0vb
-.section sb0wa
-.section sb0wb
-.section sb0xa
-.section sb0xb
-.section sb0ya
-.section sb0yb
-.section sb0za
-.section sb0zb
-.section sb01a
-.section sb01b
-.section sb02a
-.section sb02b
-.section sb03a
-.section sb03b
-.section sb04a
-.section sb04b
-.section sb05a
-.section sb05b
-.section sb06a
-.section sb06b
-.section sb07a
-.section sb07b
-.section sb08a
-.section sb08b
-.section sb09a
-.section sb09b
-.section sb00a
-.section sb00b
-.section scaaa
-.section scaab
-.section scaba
-.section scabb
-.section scaca
-.section scacb
-.section scada
-.section scadb
-.section scaea
-.section scaeb
-.section scafa
-.section scafb
-.section scaga
-.section scagb
-.section scaha
-.section scahb
-.section scaia
-.section scaib
-.section scaja
-.section scajb
-.section scaka
-.section scakb
-.section scala
-.section scalb
-.section scama
-.section scamb
-.section scana
-.section scanb
-.section scaoa
-.section scaob
-.section scapa
-.section scapb
-.section scaqa
-.section scaqb
-.section scara
-.section scarb
-.section scasa
-.section scasb
-.section scata
-.section scatb
-.section scaua
-.section scaub
-.section scava
-.section scavb
-.section scawa
-.section scawb
-.section scaxa
-.section scaxb
-.section scaya
-.section scayb
-.section scaza
-.section scazb
-.section sca1a
-.section sca1b
-.section sca2a
-.section sca2b
-.section sca3a
-.section sca3b
-.section sca4a
-.section sca4b
-.section sca5a
-.section sca5b
-.section sca6a
-.section sca6b
-.section sca7a
-.section sca7b
-.section sca8a
-.section sca8b
-.section sca9a
-.section sca9b
-.section sca0a
-.section sca0b
-.section scbaa
-.section scbab
-.section scbba
-.section scbbb
-.section scbca
-.section scbcb
-.section scbda
-.section scbdb
-.section scbea
-.section scbeb
-.section scbfa
-.section scbfb
-.section scbga
-.section scbgb
-.section scbha
-.section scbhb
-.section scbia
-.section scbib
-.section scbja
-.section scbjb
-.section scbka
-.section scbkb
-.section scbla
-.section scblb
-.section scbma
-.section scbmb
-.section scbna
-.section scbnb
-.section scboa
-.section scbob
-.section scbpa
-.section scbpb
-.section scbqa
-.section scbqb
-.section scbra
-.section scbrb
-.section scbsa
-.section scbsb
-.section scbta
-.section scbtb
-.section scbua
-.section scbub
-.section scbva
-.section scbvb
-.section scbwa
-.section scbwb
-.section scbxa
-.section scbxb
-.section scbya
-.section scbyb
-.section scbza
-.section scbzb
-.section scb1a
-.section scb1b
-.section scb2a
-.section scb2b
-.section scb3a
-.section scb3b
-.section scb4a
-.section scb4b
-.section scb5a
-.section scb5b
-.section scb6a
-.section scb6b
-.section scb7a
-.section scb7b
-.section scb8a
-.section scb8b
-.section scb9a
-.section scb9b
-.section scb0a
-.section scb0b
-.section sccaa
-.section sccab
-.section sccba
-.section sccbb
-.section sccca
-.section scccb
-.section sccda
-.section sccdb
-.section sccea
-.section scceb
-.section sccfa
-.section sccfb
-.section sccga
-.section sccgb
-.section sccha
-.section scchb
-.section sccia
-.section sccib
-.section sccja
-.section sccjb
-.section sccka
-.section scckb
-.section sccla
-.section scclb
-.section sccma
-.section sccmb
-.section sccna
-.section sccnb
-.section sccoa
-.section sccob
-.section sccpa
-.section sccpb
-.section sccqa
-.section sccqb
-.section sccra
-.section sccrb
-.section sccsa
-.section sccsb
-.section sccta
-.section scctb
-.section sccua
-.section sccub
-.section sccva
-.section sccvb
-.section sccwa
-.section sccwb
-.section sccxa
-.section sccxb
-.section sccya
-.section sccyb
-.section sccza
-.section scczb
-.section scc1a
-.section scc1b
-.section scc2a
-.section scc2b
-.section scc3a
-.section scc3b
-.section scc4a
-.section scc4b
-.section scc5a
-.section scc5b
-.section scc6a
-.section scc6b
-.section scc7a
-.section scc7b
-.section scc8a
-.section scc8b
-.section scc9a
-.section scc9b
-.section scc0a
-.section scc0b
-.section scdaa
-.section scdab
-.section scdba
-.section scdbb
-.section scdca
-.section scdcb
-.section scdda
-.section scddb
-.section scdea
-.section scdeb
-.section scdfa
-.section scdfb
-.section scdga
-.section scdgb
-.section scdha
-.section scdhb
-.section scdia
-.section scdib
-.section scdja
-.section scdjb
-.section scdka
-.section scdkb
-.section scdla
-.section scdlb
-.section scdma
-.section scdmb
-.section scdna
-.section scdnb
-.section scdoa
-.section scdob
-.section scdpa
-.section scdpb
-.section scdqa
-.section scdqb
-.section scdra
-.section scdrb
-.section scdsa
-.section scdsb
-.section scdta
-.section scdtb
-.section scdua
-.section scdub
-.section scdva
-.section scdvb
-.section scdwa
-.section scdwb
-.section scdxa
-.section scdxb
-.section scdya
-.section scdyb
-.section scdza
-.section scdzb
-.section scd1a
-.section scd1b
-.section scd2a
-.section scd2b
-.section scd3a
-.section scd3b
-.section scd4a
-.section scd4b
-.section scd5a
-.section scd5b
-.section scd6a
-.section scd6b
-.section scd7a
-.section scd7b
-.section scd8a
-.section scd8b
-.section scd9a
-.section scd9b
-.section scd0a
-.section scd0b
-.section sceaa
-.section sceab
-.section sceba
-.section scebb
-.section sceca
-.section scecb
-.section sceda
-.section scedb
-.section sceea
-.section sceeb
-.section scefa
-.section scefb
-.section scega
-.section scegb
-.section sceha
-.section scehb
-.section sceia
-.section sceib
-.section sceja
-.section scejb
-.section sceka
-.section scekb
-.section scela
-.section scelb
-.section scema
-.section scemb
-.section scena
-.section scenb
-.section sceoa
-.section sceob
-.section scepa
-.section scepb
-.section sceqa
-.section sceqb
-.section scera
-.section scerb
-.section scesa
-.section scesb
-.section sceta
-.section scetb
-.section sceua
-.section sceub
-.section sceva
-.section scevb
-.section scewa
-.section scewb
-.section scexa
-.section scexb
-.section sceya
-.section sceyb
-.section sceza
-.section scezb
-.section sce1a
-.section sce1b
-.section sce2a
-.section sce2b
-.section sce3a
-.section sce3b
-.section sce4a
-.section sce4b
-.section sce5a
-.section sce5b
-.section sce6a
-.section sce6b
-.section sce7a
-.section sce7b
-.section sce8a
-.section sce8b
-.section sce9a
-.section sce9b
-.section sce0a
-.section sce0b
-.section scfaa
-.section scfab
-.section scfba
-.section scfbb
-.section scfca
-.section scfcb
-.section scfda
-.section scfdb
-.section scfea
-.section scfeb
-.section scffa
-.section scffb
-.section scfga
-.section scfgb
-.section scfha
-.section scfhb
-.section scfia
-.section scfib
-.section scfja
-.section scfjb
-.section scfka
-.section scfkb
-.section scfla
-.section scflb
-.section scfma
-.section scfmb
-.section scfna
-.section scfnb
-.section scfoa
-.section scfob
-.section scfpa
-.section scfpb
-.section scfqa
-.section scfqb
-.section scfra
-.section scfrb
-.section scfsa
-.section scfsb
-.section scfta
-.section scftb
-.section scfua
-.section scfub
-.section scfva
-.section scfvb
-.section scfwa
-.section scfwb
-.section scfxa
-.section scfxb
-.section scfya
-.section scfyb
-.section scfza
-.section scfzb
-.section scf1a
-.section scf1b
-.section scf2a
-.section scf2b
-.section scf3a
-.section scf3b
-.section scf4a
-.section scf4b
-.section scf5a
-.section scf5b
-.section scf6a
-.section scf6b
-.section scf7a
-.section scf7b
-.section scf8a
-.section scf8b
-.section scf9a
-.section scf9b
-.section scf0a
-.section scf0b
-.section scgaa
-.section scgab
-.section scgba
-.section scgbb
-.section scgca
-.section scgcb
-.section scgda
-.section scgdb
-.section scgea
-.section scgeb
-.section scgfa
-.section scgfb
-.section scgga
-.section scggb
-.section scgha
-.section scghb
-.section scgia
-.section scgib
-.section scgja
-.section scgjb
-.section scgka
-.section scgkb
-.section scgla
-.section scglb
-.section scgma
-.section scgmb
-.section scgna
-.section scgnb
-.section scgoa
-.section scgob
-.section scgpa
-.section scgpb
-.section scgqa
-.section scgqb
-.section scgra
-.section scgrb
-.section scgsa
-.section scgsb
-.section scgta
-.section scgtb
-.section scgua
-.section scgub
-.section scgva
-.section scgvb
-.section scgwa
-.section scgwb
-.section scgxa
-.section scgxb
-.section scgya
-.section scgyb
-.section scgza
-.section scgzb
-.section scg1a
-.section scg1b
-.section scg2a
-.section scg2b
-.section scg3a
-.section scg3b
-.section scg4a
-.section scg4b
-.section scg5a
-.section scg5b
-.section scg6a
-.section scg6b
-.section scg7a
-.section scg7b
-.section scg8a
-.section scg8b
-.section scg9a
-.section scg9b
-.section scg0a
-.section scg0b
-.section schaa
-.section schab
-.section schba
-.section schbb
-.section schca
-.section schcb
-.section schda
-.section schdb
-.section schea
-.section scheb
-.section schfa
-.section schfb
-.section schga
-.section schgb
-.section schha
-.section schhb
-.section schia
-.section schib
-.section schja
-.section schjb
-.section schka
-.section schkb
-.section schla
-.section schlb
-.section schma
-.section schmb
-.section schna
-.section schnb
-.section schoa
-.section schob
-.section schpa
-.section schpb
-.section schqa
-.section schqb
-.section schra
-.section schrb
-.section schsa
-.section schsb
-.section schta
-.section schtb
-.section schua
-.section schub
-.section schva
-.section schvb
-.section schwa
-.section schwb
-.section schxa
-.section schxb
-.section schya
-.section schyb
-.section schza
-.section schzb
-.section sch1a
-.section sch1b
-.section sch2a
-.section sch2b
-.section sch3a
-.section sch3b
-.section sch4a
-.section sch4b
-.section sch5a
-.section sch5b
-.section sch6a
-.section sch6b
-.section sch7a
-.section sch7b
-.section sch8a
-.section sch8b
-.section sch9a
-.section sch9b
-.section sch0a
-.section sch0b
-.section sciaa
-.section sciab
-.section sciba
-.section scibb
-.section scica
-.section scicb
-.section scida
-.section scidb
-.section sciea
-.section scieb
-.section scifa
-.section scifb
-.section sciga
-.section scigb
-.section sciha
-.section scihb
-.section sciia
-.section sciib
-.section scija
-.section scijb
-.section scika
-.section scikb
-.section scila
-.section scilb
-.section scima
-.section scimb
-.section scina
-.section scinb
-.section scioa
-.section sciob
-.section scipa
-.section scipb
-.section sciqa
-.section sciqb
-.section scira
-.section scirb
-.section scisa
-.section scisb
-.section scita
-.section scitb
-.section sciua
-.section sciub
-.section sciva
-.section scivb
-.section sciwa
-.section sciwb
-.section scixa
-.section scixb
-.section sciya
-.section sciyb
-.section sciza
-.section scizb
-.section sci1a
-.section sci1b
-.section sci2a
-.section sci2b
-.section sci3a
-.section sci3b
-.section sci4a
-.section sci4b
-.section sci5a
-.section sci5b
-.section sci6a
-.section sci6b
-.section sci7a
-.section sci7b
-.section sci8a
-.section sci8b
-.section sci9a
-.section sci9b
-.section sci0a
-.section sci0b
-.section scjaa
-.section scjab
-.section scjba
-.section scjbb
-.section scjca
-.section scjcb
-.section scjda
-.section scjdb
-.section scjea
-.section scjeb
-.section scjfa
-.section scjfb
-.section scjga
-.section scjgb
-.section scjha
-.section scjhb
-.section scjia
-.section scjib
-.section scjja
-.section scjjb
-.section scjka
-.section scjkb
-.section scjla
-.section scjlb
-.section scjma
-.section scjmb
-.section scjna
-.section scjnb
-.section scjoa
-.section scjob
-.section scjpa
-.section scjpb
-.section scjqa
-.section scjqb
-.section scjra
-.section scjrb
-.section scjsa
-.section scjsb
-.section scjta
-.section scjtb
-.section scjua
-.section scjub
-.section scjva
-.section scjvb
-.section scjwa
-.section scjwb
-.section scjxa
-.section scjxb
-.section scjya
-.section scjyb
-.section scjza
-.section scjzb
-.section scj1a
-.section scj1b
-.section scj2a
-.section scj2b
-.section scj3a
-.section scj3b
-.section scj4a
-.section scj4b
-.section scj5a
-.section scj5b
-.section scj6a
-.section scj6b
-.section scj7a
-.section scj7b
-.section scj8a
-.section scj8b
-.section scj9a
-.section scj9b
-.section scj0a
-.section scj0b
-.section sckaa
-.section sckab
-.section sckba
-.section sckbb
-.section sckca
-.section sckcb
-.section sckda
-.section sckdb
-.section sckea
-.section sckeb
-.section sckfa
-.section sckfb
-.section sckga
-.section sckgb
-.section sckha
-.section sckhb
-.section sckia
-.section sckib
-.section sckja
-.section sckjb
-.section sckka
-.section sckkb
-.section sckla
-.section scklb
-.section sckma
-.section sckmb
-.section sckna
-.section scknb
-.section sckoa
-.section sckob
-.section sckpa
-.section sckpb
-.section sckqa
-.section sckqb
-.section sckra
-.section sckrb
-.section scksa
-.section scksb
-.section sckta
-.section scktb
-.section sckua
-.section sckub
-.section sckva
-.section sckvb
-.section sckwa
-.section sckwb
-.section sckxa
-.section sckxb
-.section sckya
-.section sckyb
-.section sckza
-.section sckzb
-.section sck1a
-.section sck1b
-.section sck2a
-.section sck2b
-.section sck3a
-.section sck3b
-.section sck4a
-.section sck4b
-.section sck5a
-.section sck5b
-.section sck6a
-.section sck6b
-.section sck7a
-.section sck7b
-.section sck8a
-.section sck8b
-.section sck9a
-.section sck9b
-.section sck0a
-.section sck0b
-.section sclaa
-.section sclab
-.section sclba
-.section sclbb
-.section sclca
-.section sclcb
-.section sclda
-.section scldb
-.section sclea
-.section scleb
-.section sclfa
-.section sclfb
-.section sclga
-.section sclgb
-.section sclha
-.section sclhb
-.section sclia
-.section sclib
-.section sclja
-.section scljb
-.section sclka
-.section sclkb
-.section sclla
-.section scllb
-.section sclma
-.section sclmb
-.section sclna
-.section sclnb
-.section scloa
-.section sclob
-.section sclpa
-.section sclpb
-.section sclqa
-.section sclqb
-.section sclra
-.section sclrb
-.section sclsa
-.section sclsb
-.section sclta
-.section scltb
-.section sclua
-.section sclub
-.section sclva
-.section sclvb
-.section sclwa
-.section sclwb
-.section sclxa
-.section sclxb
-.section sclya
-.section sclyb
-.section sclza
-.section sclzb
-.section scl1a
-.section scl1b
-.section scl2a
-.section scl2b
-.section scl3a
-.section scl3b
-.section scl4a
-.section scl4b
-.section scl5a
-.section scl5b
-.section scl6a
-.section scl6b
-.section scl7a
-.section scl7b
-.section scl8a
-.section scl8b
-.section scl9a
-.section scl9b
-.section scl0a
-.section scl0b
-.section scmaa
-.section scmab
-.section scmba
-.section scmbb
-.section scmca
-.section scmcb
-.section scmda
-.section scmdb
-.section scmea
-.section scmeb
-.section scmfa
-.section scmfb
-.section scmga
-.section scmgb
-.section scmha
-.section scmhb
-.section scmia
-.section scmib
-.section scmja
-.section scmjb
-.section scmka
-.section scmkb
-.section scmla
-.section scmlb
-.section scmma
-.section scmmb
-.section scmna
-.section scmnb
-.section scmoa
-.section scmob
-.section scmpa
-.section scmpb
-.section scmqa
-.section scmqb
-.section scmra
-.section scmrb
-.section scmsa
-.section scmsb
-.section scmta
-.section scmtb
-.section scmua
-.section scmub
-.section scmva
-.section scmvb
-.section scmwa
-.section scmwb
-.section scmxa
-.section scmxb
-.section scmya
-.section scmyb
-.section scmza
-.section scmzb
-.section scm1a
-.section scm1b
-.section scm2a
-.section scm2b
-.section scm3a
-.section scm3b
-.section scm4a
-.section scm4b
-.section scm5a
-.section scm5b
-.section scm6a
-.section scm6b
-.section scm7a
-.section scm7b
-.section scm8a
-.section scm8b
-.section scm9a
-.section scm9b
-.section scm0a
-.section scm0b
-.section scnaa
-.section scnab
-.section scnba
-.section scnbb
-.section scnca
-.section scncb
-.section scnda
-.section scndb
-.section scnea
-.section scneb
-.section scnfa
-.section scnfb
-.section scnga
-.section scngb
-.section scnha
-.section scnhb
-.section scnia
-.section scnib
-.section scnja
-.section scnjb
-.section scnka
-.section scnkb
-.section scnla
-.section scnlb
-.section scnma
-.section scnmb
-.section scnna
-.section scnnb
-.section scnoa
-.section scnob
-.section scnpa
-.section scnpb
-.section scnqa
-.section scnqb
-.section scnra
-.section scnrb
-.section scnsa
-.section scnsb
-.section scnta
-.section scntb
-.section scnua
-.section scnub
-.section scnva
-.section scnvb
-.section scnwa
-.section scnwb
-.section scnxa
-.section scnxb
-.section scnya
-.section scnyb
-.section scnza
-.section scnzb
-.section scn1a
-.section scn1b
-.section scn2a
-.section scn2b
-.section scn3a
-.section scn3b
-.section scn4a
-.section scn4b
-.section scn5a
-.section scn5b
-.section scn6a
-.section scn6b
-.section scn7a
-.section scn7b
-.section scn8a
-.section scn8b
-.section scn9a
-.section scn9b
-.section scn0a
-.section scn0b
-.section scoaa
-.section scoab
-.section scoba
-.section scobb
-.section scoca
-.section scocb
-.section scoda
-.section scodb
-.section scoea
-.section scoeb
-.section scofa
-.section scofb
-.section scoga
-.section scogb
-.section scoha
-.section scohb
-.section scoia
-.section scoib
-.section scoja
-.section scojb
-.section scoka
-.section scokb
-.section scola
-.section scolb
-.section scoma
-.section scomb
-.section scona
-.section sconb
-.section scooa
-.section scoob
-.section scopa
-.section scopb
-.section scoqa
-.section scoqb
-.section scora
-.section scorb
-.section scosa
-.section scosb
-.section scota
-.section scotb
-.section scoua
-.section scoub
-.section scova
-.section scovb
-.section scowa
-.section scowb
-.section scoxa
-.section scoxb
-.section scoya
-.section scoyb
-.section scoza
-.section scozb
-.section sco1a
-.section sco1b
-.section sco2a
-.section sco2b
-.section sco3a
-.section sco3b
-.section sco4a
-.section sco4b
-.section sco5a
-.section sco5b
-.section sco6a
-.section sco6b
-.section sco7a
-.section sco7b
-.section sco8a
-.section sco8b
-.section sco9a
-.section sco9b
-.section sco0a
-.section sco0b
-.section scpaa
-.section scpab
-.section scpba
-.section scpbb
-.section scpca
-.section scpcb
-.section scpda
-.section scpdb
-.section scpea
-.section scpeb
-.section scpfa
-.section scpfb
-.section scpga
-.section scpgb
-.section scpha
-.section scphb
-.section scpia
-.section scpib
-.section scpja
-.section scpjb
-.section scpka
-.section scpkb
-.section scpla
-.section scplb
-.section scpma
-.section scpmb
-.section scpna
-.section scpnb
-.section scpoa
-.section scpob
-.section scppa
-.section scppb
-.section scpqa
-.section scpqb
-.section scpra
-.section scprb
-.section scpsa
-.section scpsb
-.section scpta
-.section scptb
-.section scpua
-.section scpub
-.section scpva
-.section scpvb
-.section scpwa
-.section scpwb
-.section scpxa
-.section scpxb
-.section scpya
-.section scpyb
-.section scpza
-.section scpzb
-.section scp1a
-.section scp1b
-.section scp2a
-.section scp2b
-.section scp3a
-.section scp3b
-.section scp4a
-.section scp4b
-.section scp5a
-.section scp5b
-.section scp6a
-.section scp6b
-.section scp7a
-.section scp7b
-.section scp8a
-.section scp8b
-.section scp9a
-.section scp9b
-.section scp0a
-.section scp0b
-.section scqaa
-.section scqab
-.section scqba
-.section scqbb
-.section scqca
-.section scqcb
-.section scqda
-.section scqdb
-.section scqea
-.section scqeb
-.section scqfa
-.section scqfb
-.section scqga
-.section scqgb
-.section scqha
-.section scqhb
-.section scqia
-.section scqib
-.section scqja
-.section scqjb
-.section scqka
-.section scqkb
-.section scqla
-.section scqlb
-.section scqma
-.section scqmb
-.section scqna
-.section scqnb
-.section scqoa
-.section scqob
-.section scqpa
-.section scqpb
-.section scqqa
-.section scqqb
-.section scqra
-.section scqrb
-.section scqsa
-.section scqsb
-.section scqta
-.section scqtb
-.section scqua
-.section scqub
-.section scqva
-.section scqvb
-.section scqwa
-.section scqwb
-.section scqxa
-.section scqxb
-.section scqya
-.section scqyb
-.section scqza
-.section scqzb
-.section scq1a
-.section scq1b
-.section scq2a
-.section scq2b
-.section scq3a
-.section scq3b
-.section scq4a
-.section scq4b
-.section scq5a
-.section scq5b
-.section scq6a
-.section scq6b
-.section scq7a
-.section scq7b
-.section scq8a
-.section scq8b
-.section scq9a
-.section scq9b
-.section scq0a
-.section scq0b
-.section scraa
-.section scrab
-.section scrba
-.section scrbb
-.section scrca
-.section scrcb
-.section scrda
-.section scrdb
-.section screa
-.section screb
-.section scrfa
-.section scrfb
-.section scrga
-.section scrgb
-.section scrha
-.section scrhb
-.section scria
-.section scrib
-.section scrja
-.section scrjb
-.section scrka
-.section scrkb
-.section scrla
-.section scrlb
-.section scrma
-.section scrmb
-.section scrna
-.section scrnb
-.section scroa
-.section scrob
-.section scrpa
-.section scrpb
-.section scrqa
-.section scrqb
-.section scrra
-.section scrrb
-.section scrsa
-.section scrsb
-.section scrta
-.section scrtb
-.section scrua
-.section scrub
-.section scrva
-.section scrvb
-.section scrwa
-.section scrwb
-.section scrxa
-.section scrxb
-.section scrya
-.section scryb
-.section scrza
-.section scrzb
-.section scr1a
-.section scr1b
-.section scr2a
-.section scr2b
-.section scr3a
-.section scr3b
-.section scr4a
-.section scr4b
-.section scr5a
-.section scr5b
-.section scr6a
-.section scr6b
-.section scr7a
-.section scr7b
-.section scr8a
-.section scr8b
-.section scr9a
-.section scr9b
-.section scr0a
-.section scr0b
-.section scsaa
-.section scsab
-.section scsba
-.section scsbb
-.section scsca
-.section scscb
-.section scsda
-.section scsdb
-.section scsea
-.section scseb
-.section scsfa
-.section scsfb
-.section scsga
-.section scsgb
-.section scsha
-.section scshb
-.section scsia
-.section scsib
-.section scsja
-.section scsjb
-.section scska
-.section scskb
-.section scsla
-.section scslb
-.section scsma
-.section scsmb
-.section scsna
-.section scsnb
-.section scsoa
-.section scsob
-.section scspa
-.section scspb
-.section scsqa
-.section scsqb
-.section scsra
-.section scsrb
-.section scssa
-.section scssb
-.section scsta
-.section scstb
-.section scsua
-.section scsub
-.section scsva
-.section scsvb
-.section scswa
-.section scswb
-.section scsxa
-.section scsxb
-.section scsya
-.section scsyb
-.section scsza
-.section scszb
-.section scs1a
-.section scs1b
-.section scs2a
-.section scs2b
-.section scs3a
-.section scs3b
-.section scs4a
-.section scs4b
-.section scs5a
-.section scs5b
-.section scs6a
-.section scs6b
-.section scs7a
-.section scs7b
-.section scs8a
-.section scs8b
-.section scs9a
-.section scs9b
-.section scs0a
-.section scs0b
-.section sctaa
-.section sctab
-.section sctba
-.section sctbb
-.section sctca
-.section sctcb
-.section sctda
-.section sctdb
-.section sctea
-.section scteb
-.section sctfa
-.section sctfb
-.section sctga
-.section sctgb
-.section sctha
-.section scthb
-.section sctia
-.section sctib
-.section sctja
-.section sctjb
-.section sctka
-.section sctkb
-.section sctla
-.section sctlb
-.section sctma
-.section sctmb
-.section sctna
-.section sctnb
-.section sctoa
-.section sctob
-.section sctpa
-.section sctpb
-.section sctqa
-.section sctqb
-.section sctra
-.section sctrb
-.section sctsa
-.section sctsb
-.section sctta
-.section scttb
-.section sctua
-.section sctub
-.section sctva
-.section sctvb
-.section sctwa
-.section sctwb
-.section sctxa
-.section sctxb
-.section sctya
-.section sctyb
-.section sctza
-.section sctzb
-.section sct1a
-.section sct1b
-.section sct2a
-.section sct2b
-.section sct3a
-.section sct3b
-.section sct4a
-.section sct4b
-.section sct5a
-.section sct5b
-.section sct6a
-.section sct6b
-.section sct7a
-.section sct7b
-.section sct8a
-.section sct8b
-.section sct9a
-.section sct9b
-.section sct0a
-.section sct0b
-.section scuaa
-.section scuab
-.section scuba
-.section scubb
-.section scuca
-.section scucb
-.section scuda
-.section scudb
-.section scuea
-.section scueb
-.section scufa
-.section scufb
-.section scuga
-.section scugb
-.section scuha
-.section scuhb
-.section scuia
-.section scuib
-.section scuja
-.section scujb
-.section scuka
-.section scukb
-.section scula
-.section sculb
-.section scuma
-.section scumb
-.section scuna
-.section scunb
-.section scuoa
-.section scuob
-.section scupa
-.section scupb
-.section scuqa
-.section scuqb
-.section scura
-.section scurb
-.section scusa
-.section scusb
-.section scuta
-.section scutb
-.section scuua
-.section scuub
-.section scuva
-.section scuvb
-.section scuwa
-.section scuwb
-.section scuxa
-.section scuxb
-.section scuya
-.section scuyb
-.section scuza
-.section scuzb
-.section scu1a
-.section scu1b
-.section scu2a
-.section scu2b
-.section scu3a
-.section scu3b
-.section scu4a
-.section scu4b
-.section scu5a
-.section scu5b
-.section scu6a
-.section scu6b
-.section scu7a
-.section scu7b
-.section scu8a
-.section scu8b
-.section scu9a
-.section scu9b
-.section scu0a
-.section scu0b
-.section scvaa
-.section scvab
-.section scvba
-.section scvbb
-.section scvca
-.section scvcb
-.section scvda
-.section scvdb
-.section scvea
-.section scveb
-.section scvfa
-.section scvfb
-.section scvga
-.section scvgb
-.section scvha
-.section scvhb
-.section scvia
-.section scvib
-.section scvja
-.section scvjb
-.section scvka
-.section scvkb
-.section scvla
-.section scvlb
-.section scvma
-.section scvmb
-.section scvna
-.section scvnb
-.section scvoa
-.section scvob
-.section scvpa
-.section scvpb
-.section scvqa
-.section scvqb
-.section scvra
-.section scvrb
-.section scvsa
-.section scvsb
-.section scvta
-.section scvtb
-.section scvua
-.section scvub
-.section scvva
-.section scvvb
-.section scvwa
-.section scvwb
-.section scvxa
-.section scvxb
-.section scvya
-.section scvyb
-.section scvza
-.section scvzb
-.section scv1a
-.section scv1b
-.section scv2a
-.section scv2b
-.section scv3a
-.section scv3b
-.section scv4a
-.section scv4b
-.section scv5a
-.section scv5b
-.section scv6a
-.section scv6b
-.section scv7a
-.section scv7b
-.section scv8a
-.section scv8b
-.section scv9a
-.section scv9b
-.section scv0a
-.section scv0b
-.section scwaa
-.section scwab
-.section scwba
-.section scwbb
-.section scwca
-.section scwcb
-.section scwda
-.section scwdb
-.section scwea
-.section scweb
-.section scwfa
-.section scwfb
-.section scwga
-.section scwgb
-.section scwha
-.section scwhb
-.section scwia
-.section scwib
-.section scwja
-.section scwjb
-.section scwka
-.section scwkb
-.section scwla
-.section scwlb
-.section scwma
-.section scwmb
-.section scwna
-.section scwnb
-.section scwoa
-.section scwob
-.section scwpa
-.section scwpb
-.section scwqa
-.section scwqb
-.section scwra
-.section scwrb
-.section scwsa
-.section scwsb
-.section scwta
-.section scwtb
-.section scwua
-.section scwub
-.section scwva
-.section scwvb
-.section scwwa
-.section scwwb
-.section scwxa
-.section scwxb
-.section scwya
-.section scwyb
-.section scwza
-.section scwzb
-.section scw1a
-.section scw1b
-.section scw2a
-.section scw2b
-.section scw3a
-.section scw3b
-.section scw4a
-.section scw4b
-.section scw5a
-.section scw5b
-.section scw6a
-.section scw6b
-.section scw7a
-.section scw7b
-.section scw8a
-.section scw8b
-.section scw9a
-.section scw9b
-.section scw0a
-.section scw0b
-.section scxaa
-.section scxab
-.section scxba
-.section scxbb
-.section scxca
-.section scxcb
-.section scxda
-.section scxdb
-.section scxea
-.section scxeb
-.section scxfa
-.section scxfb
-.section scxga
-.section scxgb
-.section scxha
-.section scxhb
-.section scxia
-.section scxib
-.section scxja
-.section scxjb
-.section scxka
-.section scxkb
-.section scxla
-.section scxlb
-.section scxma
-.section scxmb
-.section scxna
-.section scxnb
-.section scxoa
-.section scxob
-.section scxpa
-.section scxpb
-.section scxqa
-.section scxqb
-.section scxra
-.section scxrb
-.section scxsa
-.section scxsb
-.section scxta
-.section scxtb
-.section scxua
-.section scxub
-.section scxva
-.section scxvb
-.section scxwa
-.section scxwb
-.section scxxa
-.section scxxb
-.section scxya
-.section scxyb
-.section scxza
-.section scxzb
-.section scx1a
-.section scx1b
-.section scx2a
-.section scx2b
-.section scx3a
-.section scx3b
-.section scx4a
-.section scx4b
-.section scx5a
-.section scx5b
-.section scx6a
-.section scx6b
-.section scx7a
-.section scx7b
-.section scx8a
-.section scx8b
-.section scx9a
-.section scx9b
-.section scx0a
-.section scx0b
-.section scyaa
-.section scyab
-.section scyba
-.section scybb
-.section scyca
-.section scycb
-.section scyda
-.section scydb
-.section scyea
-.section scyeb
-.section scyfa
-.section scyfb
-.section scyga
-.section scygb
-.section scyha
-.section scyhb
-.section scyia
-.section scyib
-.section scyja
-.section scyjb
-.section scyka
-.section scykb
-.section scyla
-.section scylb
-.section scyma
-.section scymb
-.section scyna
-.section scynb
-.section scyoa
-.section scyob
-.section scypa
-.section scypb
-.section scyqa
-.section scyqb
-.section scyra
-.section scyrb
-.section scysa
-.section scysb
-.section scyta
-.section scytb
-.section scyua
-.section scyub
-.section scyva
-.section scyvb
-.section scywa
-.section scywb
-.section scyxa
-.section scyxb
-.section scyya
-.section scyyb
-.section scyza
-.section scyzb
-.section scy1a
-.section scy1b
-.section scy2a
-.section scy2b
-.section scy3a
-.section scy3b
-.section scy4a
-.section scy4b
-.section scy5a
-.section scy5b
-.section scy6a
-.section scy6b
-.section scy7a
-.section scy7b
-.section scy8a
-.section scy8b
-.section scy9a
-.section scy9b
-.section scy0a
-.section scy0b
-.section sczaa
-.section sczab
-.section sczba
-.section sczbb
-.section sczca
-.section sczcb
-.section sczda
-.section sczdb
-.section sczea
-.section sczeb
-.section sczfa
-.section sczfb
-.section sczga
-.section sczgb
-.section sczha
-.section sczhb
-.section sczia
-.section sczib
-.section sczja
-.section sczjb
-.section sczka
-.section sczkb
-.section sczla
-.section sczlb
-.section sczma
-.section sczmb
-.section sczna
-.section scznb
-.section sczoa
-.section sczob
-.section sczpa
-.section sczpb
-.section sczqa
-.section sczqb
-.section sczra
-.section sczrb
-.section sczsa
-.section sczsb
-.section sczta
-.section scztb
-.section sczua
-.section sczub
-.section sczva
-.section sczvb
-.section sczwa
-.section sczwb
-.section sczxa
-.section sczxb
-.section sczya
-.section sczyb
-.section sczza
-.section sczzb
-.section scz1a
-.section scz1b
-.section scz2a
-.section scz2b
-.section scz3a
-.section scz3b
-.section scz4a
-.section scz4b
-.section scz5a
-.section scz5b
-.section scz6a
-.section scz6b
-.section scz7a
-.section scz7b
-.section scz8a
-.section scz8b
-.section scz9a
-.section scz9b
-.section scz0a
-.section scz0b
-.section sc1aa
-.section sc1ab
-.section sc1ba
-.section sc1bb
-.section sc1ca
-.section sc1cb
-.section sc1da
-.section sc1db
-.section sc1ea
-.section sc1eb
-.section sc1fa
-.section sc1fb
-.section sc1ga
-.section sc1gb
-.section sc1ha
-.section sc1hb
-.section sc1ia
-.section sc1ib
-.section sc1ja
-.section sc1jb
-.section sc1ka
-.section sc1kb
-.section sc1la
-.section sc1lb
-.section sc1ma
-.section sc1mb
-.section sc1na
-.section sc1nb
-.section sc1oa
-.section sc1ob
-.section sc1pa
-.section sc1pb
-.section sc1qa
-.section sc1qb
-.section sc1ra
-.section sc1rb
-.section sc1sa
-.section sc1sb
-.section sc1ta
-.section sc1tb
-.section sc1ua
-.section sc1ub
-.section sc1va
-.section sc1vb
-.section sc1wa
-.section sc1wb
-.section sc1xa
-.section sc1xb
-.section sc1ya
-.section sc1yb
-.section sc1za
-.section sc1zb
-.section sc11a
-.section sc11b
-.section sc12a
-.section sc12b
-.section sc13a
-.section sc13b
-.section sc14a
-.section sc14b
-.section sc15a
-.section sc15b
-.section sc16a
-.section sc16b
-.section sc17a
-.section sc17b
-.section sc18a
-.section sc18b
-.section sc19a
-.section sc19b
-.section sc10a
-.section sc10b
-.section sc2aa
-.section sc2ab
-.section sc2ba
-.section sc2bb
-.section sc2ca
-.section sc2cb
-.section sc2da
-.section sc2db
-.section sc2ea
-.section sc2eb
-.section sc2fa
-.section sc2fb
-.section sc2ga
-.section sc2gb
-.section sc2ha
-.section sc2hb
-.section sc2ia
-.section sc2ib
-.section sc2ja
-.section sc2jb
-.section sc2ka
-.section sc2kb
-.section sc2la
-.section sc2lb
-.section sc2ma
-.section sc2mb
-.section sc2na
-.section sc2nb
-.section sc2oa
-.section sc2ob
-.section sc2pa
-.section sc2pb
-.section sc2qa
-.section sc2qb
-.section sc2ra
-.section sc2rb
-.section sc2sa
-.section sc2sb
-.section sc2ta
-.section sc2tb
-.section sc2ua
-.section sc2ub
-.section sc2va
-.section sc2vb
-.section sc2wa
-.section sc2wb
-.section sc2xa
-.section sc2xb
-.section sc2ya
-.section sc2yb
-.section sc2za
-.section sc2zb
-.section sc21a
-.section sc21b
-.section sc22a
-.section sc22b
-.section sc23a
-.section sc23b
-.section sc24a
-.section sc24b
-.section sc25a
-.section sc25b
-.section sc26a
-.section sc26b
-.section sc27a
-.section sc27b
-.section sc28a
-.section sc28b
-.section sc29a
-.section sc29b
-.section sc20a
-.section sc20b
-.section sc3aa
-.section sc3ab
-.section sc3ba
-.section sc3bb
-.section sc3ca
-.section sc3cb
-.section sc3da
-.section sc3db
-.section sc3ea
-.section sc3eb
-.section sc3fa
-.section sc3fb
-.section sc3ga
-.section sc3gb
-.section sc3ha
-.section sc3hb
-.section sc3ia
-.section sc3ib
-.section sc3ja
-.section sc3jb
-.section sc3ka
-.section sc3kb
-.section sc3la
-.section sc3lb
-.section sc3ma
-.section sc3mb
-.section sc3na
-.section sc3nb
-.section sc3oa
-.section sc3ob
-.section sc3pa
-.section sc3pb
-.section sc3qa
-.section sc3qb
-.section sc3ra
-.section sc3rb
-.section sc3sa
-.section sc3sb
-.section sc3ta
-.section sc3tb
-.section sc3ua
-.section sc3ub
-.section sc3va
-.section sc3vb
-.section sc3wa
-.section sc3wb
-.section sc3xa
-.section sc3xb
-.section sc3ya
-.section sc3yb
-.section sc3za
-.section sc3zb
-.section sc31a
-.section sc31b
-.section sc32a
-.section sc32b
-.section sc33a
-.section sc33b
-.section sc34a
-.section sc34b
-.section sc35a
-.section sc35b
-.section sc36a
-.section sc36b
-.section sc37a
-.section sc37b
-.section sc38a
-.section sc38b
-.section sc39a
-.section sc39b
-.section sc30a
-.section sc30b
-.section sc4aa
-.section sc4ab
-.section sc4ba
-.section sc4bb
-.section sc4ca
-.section sc4cb
-.section sc4da
-.section sc4db
-.section sc4ea
-.section sc4eb
-.section sc4fa
-.section sc4fb
-.section sc4ga
-.section sc4gb
-.section sc4ha
-.section sc4hb
-.section sc4ia
-.section sc4ib
-.section sc4ja
-.section sc4jb
-.section sc4ka
-.section sc4kb
-.section sc4la
-.section sc4lb
-.section sc4ma
-.section sc4mb
-.section sc4na
-.section sc4nb
-.section sc4oa
-.section sc4ob
-.section sc4pa
-.section sc4pb
-.section sc4qa
-.section sc4qb
-.section sc4ra
-.section sc4rb
-.section sc4sa
-.section sc4sb
-.section sc4ta
-.section sc4tb
-.section sc4ua
-.section sc4ub
-.section sc4va
-.section sc4vb
-.section sc4wa
-.section sc4wb
-.section sc4xa
-.section sc4xb
-.section sc4ya
-.section sc4yb
-.section sc4za
-.section sc4zb
-.section sc41a
-.section sc41b
-.section sc42a
-.section sc42b
-.section sc43a
-.section sc43b
-.section sc44a
-.section sc44b
-.section sc45a
-.section sc45b
-.section sc46a
-.section sc46b
-.section sc47a
-.section sc47b
-.section sc48a
-.section sc48b
-.section sc49a
-.section sc49b
-.section sc40a
-.section sc40b
-.section sc5aa
-.section sc5ab
-.section sc5ba
-.section sc5bb
-.section sc5ca
-.section sc5cb
-.section sc5da
-.section sc5db
-.section sc5ea
-.section sc5eb
-.section sc5fa
-.section sc5fb
-.section sc5ga
-.section sc5gb
-.section sc5ha
-.section sc5hb
-.section sc5ia
-.section sc5ib
-.section sc5ja
-.section sc5jb
-.section sc5ka
-.section sc5kb
-.section sc5la
-.section sc5lb
-.section sc5ma
-.section sc5mb
-.section sc5na
-.section sc5nb
-.section sc5oa
-.section sc5ob
-.section sc5pa
-.section sc5pb
-.section sc5qa
-.section sc5qb
-.section sc5ra
-.section sc5rb
-.section sc5sa
-.section sc5sb
-.section sc5ta
-.section sc5tb
-.section sc5ua
-.section sc5ub
-.section sc5va
-.section sc5vb
-.section sc5wa
-.section sc5wb
-.section sc5xa
-.section sc5xb
-.section sc5ya
-.section sc5yb
-.section sc5za
-.section sc5zb
-.section sc51a
-.section sc51b
-.section sc52a
-.section sc52b
-.section sc53a
-.section sc53b
-.section sc54a
-.section sc54b
-.section sc55a
-.section sc55b
-.section sc56a
-.section sc56b
-.section sc57a
-.section sc57b
-.section sc58a
-.section sc58b
-.section sc59a
-.section sc59b
-.section sc50a
-.section sc50b
-.section sc6aa
-.section sc6ab
-.section sc6ba
-.section sc6bb
-.section sc6ca
-.section sc6cb
-.section sc6da
-.section sc6db
-.section sc6ea
-.section sc6eb
-.section sc6fa
-.section sc6fb
-.section sc6ga
-.section sc6gb
-.section sc6ha
-.section sc6hb
-.section sc6ia
-.section sc6ib
-.section sc6ja
-.section sc6jb
-.section sc6ka
-.section sc6kb
-.section sc6la
-.section sc6lb
-.section sc6ma
-.section sc6mb
-.section sc6na
-.section sc6nb
-.section sc6oa
-.section sc6ob
-.section sc6pa
-.section sc6pb
-.section sc6qa
-.section sc6qb
-.section sc6ra
-.section sc6rb
-.section sc6sa
-.section sc6sb
-.section sc6ta
-.section sc6tb
-.section sc6ua
-.section sc6ub
-.section sc6va
-.section sc6vb
-.section sc6wa
-.section sc6wb
-.section sc6xa
-.section sc6xb
-.section sc6ya
-.section sc6yb
-.section sc6za
-.section sc6zb
-.section sc61a
-.section sc61b
-.section sc62a
-.section sc62b
-.section sc63a
-.section sc63b
-.section sc64a
-.section sc64b
-.section sc65a
-.section sc65b
-.section sc66a
-.section sc66b
-.section sc67a
-.section sc67b
-.section sc68a
-.section sc68b
-.section sc69a
-.section sc69b
-.section sc60a
-.section sc60b
-.section sc7aa
-.section sc7ab
-.section sc7ba
-.section sc7bb
-.section sc7ca
-.section sc7cb
-.section sc7da
-.section sc7db
-.section sc7ea
-.section sc7eb
-.section sc7fa
-.section sc7fb
-.section sc7ga
-.section sc7gb
-.section sc7ha
-.section sc7hb
-.section sc7ia
-.section sc7ib
-.section sc7ja
-.section sc7jb
-.section sc7ka
-.section sc7kb
-.section sc7la
-.section sc7lb
-.section sc7ma
-.section sc7mb
-.section sc7na
-.section sc7nb
-.section sc7oa
-.section sc7ob
-.section sc7pa
-.section sc7pb
-.section sc7qa
-.section sc7qb
-.section sc7ra
-.section sc7rb
-.section sc7sa
-.section sc7sb
-.section sc7ta
-.section sc7tb
-.section sc7ua
-.section sc7ub
-.section sc7va
-.section sc7vb
-.section sc7wa
-.section sc7wb
-.section sc7xa
-.section sc7xb
-.section sc7ya
-.section sc7yb
-.section sc7za
-.section sc7zb
-.section sc71a
-.section sc71b
-.section sc72a
-.section sc72b
-.section sc73a
-.section sc73b
-.section sc74a
-.section sc74b
-.section sc75a
-.section sc75b
-.section sc76a
-.section sc76b
-.section sc77a
-.section sc77b
-.section sc78a
-.section sc78b
-.section sc79a
-.section sc79b
-.section sc70a
-.section sc70b
-.section sc8aa
-.section sc8ab
-.section sc8ba
-.section sc8bb
-.section sc8ca
-.section sc8cb
-.section sc8da
-.section sc8db
-.section sc8ea
-.section sc8eb
-.section sc8fa
-.section sc8fb
-.section sc8ga
-.section sc8gb
-.section sc8ha
-.section sc8hb
-.section sc8ia
-.section sc8ib
-.section sc8ja
-.section sc8jb
-.section sc8ka
-.section sc8kb
-.section sc8la
-.section sc8lb
-.section sc8ma
-.section sc8mb
-.section sc8na
-.section sc8nb
-.section sc8oa
-.section sc8ob
-.section sc8pa
-.section sc8pb
-.section sc8qa
-.section sc8qb
-.section sc8ra
-.section sc8rb
-.section sc8sa
-.section sc8sb
-.section sc8ta
-.section sc8tb
-.section sc8ua
-.section sc8ub
-.section sc8va
-.section sc8vb
-.section sc8wa
-.section sc8wb
-.section sc8xa
-.section sc8xb
-.section sc8ya
-.section sc8yb
-.section sc8za
-.section sc8zb
-.section sc81a
-.section sc81b
-.section sc82a
-.section sc82b
-.section sc83a
-.section sc83b
-.section sc84a
-.section sc84b
-.section sc85a
-.section sc85b
-.section sc86a
-.section sc86b
-.section sc87a
-.section sc87b
-.section sc88a
-.section sc88b
-.section sc89a
-.section sc89b
-.section sc80a
-.section sc80b
-.section sc9aa
-.section sc9ab
-.section sc9ba
-.section sc9bb
-.section sc9ca
-.section sc9cb
-.section sc9da
-.section sc9db
-.section sc9ea
-.section sc9eb
-.section sc9fa
-.section sc9fb
-.section sc9ga
-.section sc9gb
-.section sc9ha
-.section sc9hb
-.section sc9ia
-.section sc9ib
-.section sc9ja
-.section sc9jb
-.section sc9ka
-.section sc9kb
-.section sc9la
-.section sc9lb
-.section sc9ma
-.section sc9mb
-.section sc9na
-.section sc9nb
-.section sc9oa
-.section sc9ob
-.section sc9pa
-.section sc9pb
-.section sc9qa
-.section sc9qb
-.section sc9ra
-.section sc9rb
-.section sc9sa
-.section sc9sb
-.section sc9ta
-.section sc9tb
-.section sc9ua
-.section sc9ub
-.section sc9va
-.section sc9vb
-.section sc9wa
-.section sc9wb
-.section sc9xa
-.section sc9xb
-.section sc9ya
-.section sc9yb
-.section sc9za
-.section sc9zb
-.section sc91a
-.section sc91b
-.section sc92a
-.section sc92b
-.section sc93a
-.section sc93b
-.section sc94a
-.section sc94b
-.section sc95a
-.section sc95b
-.section sc96a
-.section sc96b
-.section sc97a
-.section sc97b
-.section sc98a
-.section sc98b
-.section sc99a
-.section sc99b
-.section sc90a
-.section sc90b
-.section sc0aa
-.section sc0ab
-.section sc0ba
-.section sc0bb
-.section sc0ca
-.section sc0cb
-.section sc0da
-.section sc0db
-.section sc0ea
-.section sc0eb
-.section sc0fa
-.section sc0fb
-.section sc0ga
-.section sc0gb
-.section sc0ha
-.section sc0hb
-.section sc0ia
-.section sc0ib
-.section sc0ja
-.section sc0jb
-.section sc0ka
-.section sc0kb
-.section sc0la
-.section sc0lb
-.section sc0ma
-.section sc0mb
-.section sc0na
-.section sc0nb
-.section sc0oa
-.section sc0ob
-.section sc0pa
-.section sc0pb
-.section sc0qa
-.section sc0qb
-.section sc0ra
-.section sc0rb
-.section sc0sa
-.section sc0sb
-.section sc0ta
-.section sc0tb
-.section sc0ua
-.section sc0ub
-.section sc0va
-.section sc0vb
-.section sc0wa
-.section sc0wb
-.section sc0xa
-.section sc0xb
-.section sc0ya
-.section sc0yb
-.section sc0za
-.section sc0zb
-.section sc01a
-.section sc01b
-.section sc02a
-.section sc02b
-.section sc03a
-.section sc03b
-.section sc04a
-.section sc04b
-.section sc05a
-.section sc05b
-.section sc06a
-.section sc06b
-.section sc07a
-.section sc07b
-.section sc08a
-.section sc08b
-.section sc09a
-.section sc09b
-.section sc00a
-.section sc00b
-.section sdaaa
-.section sdaab
-.section sdaba
-.section sdabb
-.section sdaca
-.section sdacb
-.section sdada
-.section sdadb
-.section sdaea
-.section sdaeb
-.section sdafa
-.section sdafb
-.section sdaga
-.section sdagb
-.section sdaha
-.section sdahb
-.section sdaia
-.section sdaib
-.section sdaja
-.section sdajb
-.section sdaka
-.section sdakb
-.section sdala
-.section sdalb
-.section sdama
-.section sdamb
-.section sdana
-.section sdanb
-.section sdaoa
-.section sdaob
-.section sdapa
-.section sdapb
-.section sdaqa
-.section sdaqb
-.section sdara
-.section sdarb
-.section sdasa
-.section sdasb
-.section sdata
-.section sdatb
-.section sdaua
-.section sdaub
-.section sdava
-.section sdavb
-.section sdawa
-.section sdawb
-.section sdaxa
-.section sdaxb
-.section sdaya
-.section sdayb
-.section sdaza
-.section sdazb
-.section sda1a
-.section sda1b
-.section sda2a
-.section sda2b
-.section sda3a
-.section sda3b
-.section sda4a
-.section sda4b
-.section sda5a
-.section sda5b
-.section sda6a
-.section sda6b
-.section sda7a
-.section sda7b
-.section sda8a
-.section sda8b
-.section sda9a
-.section sda9b
-.section sda0a
-.section sda0b
-.section sdbaa
-.section sdbab
-.section sdbba
-.section sdbbb
-.section sdbca
-.section sdbcb
-.section sdbda
-.section sdbdb
-.section sdbea
-.section sdbeb
-.section sdbfa
-.section sdbfb
-.section sdbga
-.section sdbgb
-.section sdbha
-.section sdbhb
-.section sdbia
-.section sdbib
-.section sdbja
-.section sdbjb
-.section sdbka
-.section sdbkb
-.section sdbla
-.section sdblb
-.section sdbma
-.section sdbmb
-.section sdbna
-.section sdbnb
-.section sdboa
-.section sdbob
-.section sdbpa
-.section sdbpb
-.section sdbqa
-.section sdbqb
-.section sdbra
-.section sdbrb
-.section sdbsa
-.section sdbsb
-.section sdbta
-.section sdbtb
-.section sdbua
-.section sdbub
-.section sdbva
-.section sdbvb
-.section sdbwa
-.section sdbwb
-.section sdbxa
-.section sdbxb
-.section sdbya
-.section sdbyb
-.section sdbza
-.section sdbzb
-.section sdb1a
-.section sdb1b
-.section sdb2a
-.section sdb2b
-.section sdb3a
-.section sdb3b
-.section sdb4a
-.section sdb4b
-.section sdb5a
-.section sdb5b
-.section sdb6a
-.section sdb6b
-.section sdb7a
-.section sdb7b
-.section sdb8a
-.section sdb8b
-.section sdb9a
-.section sdb9b
-.section sdb0a
-.section sdb0b
-.section sdcaa
-.section sdcab
-.section sdcba
-.section sdcbb
-.section sdcca
-.section sdccb
-.section sdcda
-.section sdcdb
-.section sdcea
-.section sdceb
-.section sdcfa
-.section sdcfb
-.section sdcga
-.section sdcgb
-.section sdcha
-.section sdchb
-.section sdcia
-.section sdcib
-.section sdcja
-.section sdcjb
-.section sdcka
-.section sdckb
-.section sdcla
-.section sdclb
-.section sdcma
-.section sdcmb
-.section sdcna
-.section sdcnb
-.section sdcoa
-.section sdcob
-.section sdcpa
-.section sdcpb
-.section sdcqa
-.section sdcqb
-.section sdcra
-.section sdcrb
-.section sdcsa
-.section sdcsb
-.section sdcta
-.section sdctb
-.section sdcua
-.section sdcub
-.section sdcva
-.section sdcvb
-.section sdcwa
-.section sdcwb
-.section sdcxa
-.section sdcxb
-.section sdcya
-.section sdcyb
-.section sdcza
-.section sdczb
-.section sdc1a
-.section sdc1b
-.section sdc2a
-.section sdc2b
-.section sdc3a
-.section sdc3b
-.section sdc4a
-.section sdc4b
-.section sdc5a
-.section sdc5b
-.section sdc6a
-.section sdc6b
-.section sdc7a
-.section sdc7b
-.section sdc8a
-.section sdc8b
-.section sdc9a
-.section sdc9b
-.section sdc0a
-.section sdc0b
-.section sddaa
-.section sddab
-.section sddba
-.section sddbb
-.section sddca
-.section sddcb
-.section sddda
-.section sdddb
-.section sddea
-.section sddeb
-.section sddfa
-.section sddfb
-.section sddga
-.section sddgb
-.section sddha
-.section sddhb
-.section sddia
-.section sddib
-.section sddja
-.section sddjb
-.section sddka
-.section sddkb
-.section sddla
-.section sddlb
-.section sddma
-.section sddmb
-.section sddna
-.section sddnb
-.section sddoa
-.section sddob
-.section sddpa
-.section sddpb
-.section sddqa
-.section sddqb
-.section sddra
-.section sddrb
-.section sddsa
-.section sddsb
-.section sddta
-.section sddtb
-.section sddua
-.section sddub
-.section sddva
-.section sddvb
-.section sddwa
-.section sddwb
-.section sddxa
-.section sddxb
-.section sddya
-.section sddyb
-.section sddza
-.section sddzb
-.section sdd1a
-.section sdd1b
-.section sdd2a
-.section sdd2b
-.section sdd3a
-.section sdd3b
-.section sdd4a
-.section sdd4b
-.section sdd5a
-.section sdd5b
-.section sdd6a
-.section sdd6b
-.section sdd7a
-.section sdd7b
-.section sdd8a
-.section sdd8b
-.section sdd9a
-.section sdd9b
-.section sdd0a
-.section sdd0b
-.section sdeaa
-.section sdeab
-.section sdeba
-.section sdebb
-.section sdeca
-.section sdecb
-.section sdeda
-.section sdedb
-.section sdeea
-.section sdeeb
-.section sdefa
-.section sdefb
-.section sdega
-.section sdegb
-.section sdeha
-.section sdehb
-.section sdeia
-.section sdeib
-.section sdeja
-.section sdejb
-.section sdeka
-.section sdekb
-.section sdela
-.section sdelb
-.section sdema
-.section sdemb
-.section sdena
-.section sdenb
-.section sdeoa
-.section sdeob
-.section sdepa
-.section sdepb
-.section sdeqa
-.section sdeqb
-.section sdera
-.section sderb
-.section sdesa
-.section sdesb
-.section sdeta
-.section sdetb
-.section sdeua
-.section sdeub
-.section sdeva
-.section sdevb
-.section sdewa
-.section sdewb
-.section sdexa
-.section sdexb
-.section sdeya
-.section sdeyb
-.section sdeza
-.section sdezb
-.section sde1a
-.section sde1b
-.section sde2a
-.section sde2b
-.section sde3a
-.section sde3b
-.section sde4a
-.section sde4b
-.section sde5a
-.section sde5b
-.section sde6a
-.section sde6b
-.section sde7a
-.section sde7b
-.section sde8a
-.section sde8b
-.section sde9a
-.section sde9b
-.section sde0a
-.section sde0b
-.section sdfaa
-.section sdfab
-.section sdfba
-.section sdfbb
-.section sdfca
-.section sdfcb
-.section sdfda
-.section sdfdb
-.section sdfea
-.section sdfeb
-.section sdffa
-.section sdffb
-.section sdfga
-.section sdfgb
-.section sdfha
-.section sdfhb
-.section sdfia
-.section sdfib
-.section sdfja
-.section sdfjb
-.section sdfka
-.section sdfkb
-.section sdfla
-.section sdflb
-.section sdfma
-.section sdfmb
-.section sdfna
-.section sdfnb
-.section sdfoa
-.section sdfob
-.section sdfpa
-.section sdfpb
-.section sdfqa
-.section sdfqb
-.section sdfra
-.section sdfrb
-.section sdfsa
-.section sdfsb
-.section sdfta
-.section sdftb
-.section sdfua
-.section sdfub
-.section sdfva
-.section sdfvb
-.section sdfwa
-.section sdfwb
-.section sdfxa
-.section sdfxb
-.section sdfya
-.section sdfyb
-.section sdfza
-.section sdfzb
-.section sdf1a
-.section sdf1b
-.section sdf2a
-.section sdf2b
-.section sdf3a
-.section sdf3b
-.section sdf4a
-.section sdf4b
-.section sdf5a
-.section sdf5b
-.section sdf6a
-.section sdf6b
-.section sdf7a
-.section sdf7b
-.section sdf8a
-.section sdf8b
-.section sdf9a
-.section sdf9b
-.section sdf0a
-.section sdf0b
-.section sdgaa
-.section sdgab
-.section sdgba
-.section sdgbb
-.section sdgca
-.section sdgcb
-.section sdgda
-.section sdgdb
-.section sdgea
-.section sdgeb
-.section sdgfa
-.section sdgfb
-.section sdgga
-.section sdggb
-.section sdgha
-.section sdghb
-.section sdgia
-.section sdgib
-.section sdgja
-.section sdgjb
-.section sdgka
-.section sdgkb
-.section sdgla
-.section sdglb
-.section sdgma
-.section sdgmb
-.section sdgna
-.section sdgnb
-.section sdgoa
-.section sdgob
-.section sdgpa
-.section sdgpb
-.section sdgqa
-.section sdgqb
-.section sdgra
-.section sdgrb
-.section sdgsa
-.section sdgsb
-.section sdgta
-.section sdgtb
-.section sdgua
-.section sdgub
-.section sdgva
-.section sdgvb
-.section sdgwa
-.section sdgwb
-.section sdgxa
-.section sdgxb
-.section sdgya
-.section sdgyb
-.section sdgza
-.section sdgzb
-.section sdg1a
-.section sdg1b
-.section sdg2a
-.section sdg2b
-.section sdg3a
-.section sdg3b
-.section sdg4a
-.section sdg4b
-.section sdg5a
-.section sdg5b
-.section sdg6a
-.section sdg6b
-.section sdg7a
-.section sdg7b
-.section sdg8a
-.section sdg8b
-.section sdg9a
-.section sdg9b
-.section sdg0a
-.section sdg0b
-.section sdhaa
-.section sdhab
-.section sdhba
-.section sdhbb
-.section sdhca
-.section sdhcb
-.section sdhda
-.section sdhdb
-.section sdhea
-.section sdheb
-.section sdhfa
-.section sdhfb
-.section sdhga
-.section sdhgb
-.section sdhha
-.section sdhhb
-.section sdhia
-.section sdhib
-.section sdhja
-.section sdhjb
-.section sdhka
-.section sdhkb
-.section sdhla
-.section sdhlb
-.section sdhma
-.section sdhmb
-.section sdhna
-.section sdhnb
-.section sdhoa
-.section sdhob
-.section sdhpa
-.section sdhpb
-.section sdhqa
-.section sdhqb
-.section sdhra
-.section sdhrb
-.section sdhsa
-.section sdhsb
-.section sdhta
-.section sdhtb
-.section sdhua
-.section sdhub
-.section sdhva
-.section sdhvb
-.section sdhwa
-.section sdhwb
-.section sdhxa
-.section sdhxb
-.section sdhya
-.section sdhyb
-.section sdhza
-.section sdhzb
-.section sdh1a
-.section sdh1b
-.section sdh2a
-.section sdh2b
-.section sdh3a
-.section sdh3b
-.section sdh4a
-.section sdh4b
-.section sdh5a
-.section sdh5b
-.section sdh6a
-.section sdh6b
-.section sdh7a
-.section sdh7b
-.section sdh8a
-.section sdh8b
-.section sdh9a
-.section sdh9b
-.section sdh0a
-.section sdh0b
-.section sdiaa
-.section sdiab
-.section sdiba
-.section sdibb
-.section sdica
-.section sdicb
-.section sdida
-.section sdidb
-.section sdiea
-.section sdieb
-.section sdifa
-.section sdifb
-.section sdiga
-.section sdigb
-.section sdiha
-.section sdihb
-.section sdiia
-.section sdiib
-.section sdija
-.section sdijb
-.section sdika
-.section sdikb
-.section sdila
-.section sdilb
-.section sdima
-.section sdimb
-.section sdina
-.section sdinb
-.section sdioa
-.section sdiob
-.section sdipa
-.section sdipb
-.section sdiqa
-.section sdiqb
-.section sdira
-.section sdirb
-.section sdisa
-.section sdisb
-.section sdita
-.section sditb
-.section sdiua
-.section sdiub
-.section sdiva
-.section sdivb
-.section sdiwa
-.section sdiwb
-.section sdixa
-.section sdixb
-.section sdiya
-.section sdiyb
-.section sdiza
-.section sdizb
-.section sdi1a
-.section sdi1b
-.section sdi2a
-.section sdi2b
-.section sdi3a
-.section sdi3b
-.section sdi4a
-.section sdi4b
-.section sdi5a
-.section sdi5b
-.section sdi6a
-.section sdi6b
-.section sdi7a
-.section sdi7b
-.section sdi8a
-.section sdi8b
-.section sdi9a
-.section sdi9b
-.section sdi0a
-.section sdi0b
-.section sdjaa
-.section sdjab
-.section sdjba
-.section sdjbb
-.section sdjca
-.section sdjcb
-.section sdjda
-.section sdjdb
-.section sdjea
-.section sdjeb
-.section sdjfa
-.section sdjfb
-.section sdjga
-.section sdjgb
-.section sdjha
-.section sdjhb
-.section sdjia
-.section sdjib
-.section sdjja
-.section sdjjb
-.section sdjka
-.section sdjkb
-.section sdjla
-.section sdjlb
-.section sdjma
-.section sdjmb
-.section sdjna
-.section sdjnb
-.section sdjoa
-.section sdjob
-.section sdjpa
-.section sdjpb
-.section sdjqa
-.section sdjqb
-.section sdjra
-.section sdjrb
-.section sdjsa
-.section sdjsb
-.section sdjta
-.section sdjtb
-.section sdjua
-.section sdjub
-.section sdjva
-.section sdjvb
-.section sdjwa
-.section sdjwb
-.section sdjxa
-.section sdjxb
-.section sdjya
-.section sdjyb
-.section sdjza
-.section sdjzb
-.section sdj1a
-.section sdj1b
-.section sdj2a
-.section sdj2b
-.section sdj3a
-.section sdj3b
-.section sdj4a
-.section sdj4b
-.section sdj5a
-.section sdj5b
-.section sdj6a
-.section sdj6b
-.section sdj7a
-.section sdj7b
-.section sdj8a
-.section sdj8b
-.section sdj9a
-.section sdj9b
-.section sdj0a
-.section sdj0b
-.section sdkaa
-.section sdkab
-.section sdkba
-.section sdkbb
-.section sdkca
-.section sdkcb
-.section sdkda
-.section sdkdb
-.section sdkea
-.section sdkeb
-.section sdkfa
-.section sdkfb
-.section sdkga
-.section sdkgb
-.section sdkha
-.section sdkhb
-.section sdkia
-.section sdkib
-.section sdkja
-.section sdkjb
-.section sdkka
-.section sdkkb
-.section sdkla
-.section sdklb
-.section sdkma
-.section sdkmb
-.section sdkna
-.section sdknb
-.section sdkoa
-.section sdkob
-.section sdkpa
-.section sdkpb
-.section sdkqa
-.section sdkqb
-.section sdkra
-.section sdkrb
-.section sdksa
-.section sdksb
-.section sdkta
-.section sdktb
-.section sdkua
-.section sdkub
-.section sdkva
-.section sdkvb
-.section sdkwa
-.section sdkwb
-.section sdkxa
-.section sdkxb
-.section sdkya
-.section sdkyb
-.section sdkza
-.section sdkzb
-.section sdk1a
-.section sdk1b
-.section sdk2a
-.section sdk2b
-.section sdk3a
-.section sdk3b
-.section sdk4a
-.section sdk4b
-.section sdk5a
-.section sdk5b
-.section sdk6a
-.section sdk6b
-.section sdk7a
-.section sdk7b
-.section sdk8a
-.section sdk8b
-.section sdk9a
-.section sdk9b
-.section sdk0a
-.section sdk0b
-.section sdlaa
-.section sdlab
-.section sdlba
-.section sdlbb
-.section sdlca
-.section sdlcb
-.section sdlda
-.section sdldb
-.section sdlea
-.section sdleb
-.section sdlfa
-.section sdlfb
-.section sdlga
-.section sdlgb
-.section sdlha
-.section sdlhb
-.section sdlia
-.section sdlib
-.section sdlja
-.section sdljb
-.section sdlka
-.section sdlkb
-.section sdlla
-.section sdllb
-.section sdlma
-.section sdlmb
-.section sdlna
-.section sdlnb
-.section sdloa
-.section sdlob
-.section sdlpa
-.section sdlpb
-.section sdlqa
-.section sdlqb
-.section sdlra
-.section sdlrb
-.section sdlsa
-.section sdlsb
-.section sdlta
-.section sdltb
-.section sdlua
-.section sdlub
-.section sdlva
-.section sdlvb
-.section sdlwa
-.section sdlwb
-.section sdlxa
-.section sdlxb
-.section sdlya
-.section sdlyb
-.section sdlza
-.section sdlzb
-.section sdl1a
-.section sdl1b
-.section sdl2a
-.section sdl2b
-.section sdl3a
-.section sdl3b
-.section sdl4a
-.section sdl4b
-.section sdl5a
-.section sdl5b
-.section sdl6a
-.section sdl6b
-.section sdl7a
-.section sdl7b
-.section sdl8a
-.section sdl8b
-.section sdl9a
-.section sdl9b
-.section sdl0a
-.section sdl0b
-.section sdmaa
-.section sdmab
-.section sdmba
-.section sdmbb
-.section sdmca
-.section sdmcb
-.section sdmda
-.section sdmdb
-.section sdmea
-.section sdmeb
-.section sdmfa
-.section sdmfb
-.section sdmga
-.section sdmgb
-.section sdmha
-.section sdmhb
-.section sdmia
-.section sdmib
-.section sdmja
-.section sdmjb
-.section sdmka
-.section sdmkb
-.section sdmla
-.section sdmlb
-.section sdmma
-.section sdmmb
-.section sdmna
-.section sdmnb
-.section sdmoa
-.section sdmob
-.section sdmpa
-.section sdmpb
-.section sdmqa
-.section sdmqb
-.section sdmra
-.section sdmrb
-.section sdmsa
-.section sdmsb
-.section sdmta
-.section sdmtb
-.section sdmua
-.section sdmub
-.section sdmva
-.section sdmvb
-.section sdmwa
-.section sdmwb
-.section sdmxa
-.section sdmxb
-.section sdmya
-.section sdmyb
-.section sdmza
-.section sdmzb
-.section sdm1a
-.section sdm1b
-.section sdm2a
-.section sdm2b
-.section sdm3a
-.section sdm3b
-.section sdm4a
-.section sdm4b
-.section sdm5a
-.section sdm5b
-.section sdm6a
-.section sdm6b
-.section sdm7a
-.section sdm7b
-.section sdm8a
-.section sdm8b
-.section sdm9a
-.section sdm9b
-.section sdm0a
-.section sdm0b
-.section sdnaa
-.section sdnab
-.section sdnba
-.section sdnbb
-.section sdnca
-.section sdncb
-.section sdnda
-.section sdndb
-.section sdnea
-.section sdneb
-.section sdnfa
-.section sdnfb
-.section sdnga
-.section sdngb
-.section sdnha
-.section sdnhb
-.section sdnia
-.section sdnib
-.section sdnja
-.section sdnjb
-.section sdnka
-.section sdnkb
-.section sdnla
-.section sdnlb
-.section sdnma
-.section sdnmb
-.section sdnna
-.section sdnnb
-.section sdnoa
-.section sdnob
-.section sdnpa
-.section sdnpb
-.section sdnqa
-.section sdnqb
-.section sdnra
-.section sdnrb
-.section sdnsa
-.section sdnsb
-.section sdnta
-.section sdntb
-.section sdnua
-.section sdnub
-.section sdnva
-.section sdnvb
-.section sdnwa
-.section sdnwb
-.section sdnxa
-.section sdnxb
-.section sdnya
-.section sdnyb
-.section sdnza
-.section sdnzb
-.section sdn1a
-.section sdn1b
-.section sdn2a
-.section sdn2b
-.section sdn3a
-.section sdn3b
-.section sdn4a
-.section sdn4b
-.section sdn5a
-.section sdn5b
-.section sdn6a
-.section sdn6b
-.section sdn7a
-.section sdn7b
-.section sdn8a
-.section sdn8b
-.section sdn9a
-.section sdn9b
-.section sdn0a
-.section sdn0b
-.section sdoaa
-.section sdoab
-.section sdoba
-.section sdobb
-.section sdoca
-.section sdocb
-.section sdoda
-.section sdodb
-.section sdoea
-.section sdoeb
-.section sdofa
-.section sdofb
-.section sdoga
-.section sdogb
-.section sdoha
-.section sdohb
-.section sdoia
-.section sdoib
-.section sdoja
-.section sdojb
-.section sdoka
-.section sdokb
-.section sdola
-.section sdolb
-.section sdoma
-.section sdomb
-.section sdona
-.section sdonb
-.section sdooa
-.section sdoob
-.section sdopa
-.section sdopb
-.section sdoqa
-.section sdoqb
-.section sdora
-.section sdorb
-.section sdosa
-.section sdosb
-.section sdota
-.section sdotb
-.section sdoua
-.section sdoub
-.section sdova
-.section sdovb
-.section sdowa
-.section sdowb
-.section sdoxa
-.section sdoxb
-.section sdoya
-.section sdoyb
-.section sdoza
-.section sdozb
-.section sdo1a
-.section sdo1b
-.section sdo2a
-.section sdo2b
-.section sdo3a
-.section sdo3b
-.section sdo4a
-.section sdo4b
-.section sdo5a
-.section sdo5b
-.section sdo6a
-.section sdo6b
-.section sdo7a
-.section sdo7b
-.section sdo8a
-.section sdo8b
-.section sdo9a
-.section sdo9b
-.section sdo0a
-.section sdo0b
-.section sdpaa
-.section sdpab
-.section sdpba
-.section sdpbb
-.section sdpca
-.section sdpcb
-.section sdpda
-.section sdpdb
-.section sdpea
-.section sdpeb
-.section sdpfa
-.section sdpfb
-.section sdpga
-.section sdpgb
-.section sdpha
-.section sdphb
-.section sdpia
-.section sdpib
-.section sdpja
-.section sdpjb
-.section sdpka
-.section sdpkb
-.section sdpla
-.section sdplb
-.section sdpma
-.section sdpmb
-.section sdpna
-.section sdpnb
-.section sdpoa
-.section sdpob
-.section sdppa
-.section sdppb
-.section sdpqa
-.section sdpqb
-.section sdpra
-.section sdprb
-.section sdpsa
-.section sdpsb
-.section sdpta
-.section sdptb
-.section sdpua
-.section sdpub
-.section sdpva
-.section sdpvb
-.section sdpwa
-.section sdpwb
-.section sdpxa
-.section sdpxb
-.section sdpya
-.section sdpyb
-.section sdpza
-.section sdpzb
-.section sdp1a
-.section sdp1b
-.section sdp2a
-.section sdp2b
-.section sdp3a
-.section sdp3b
-.section sdp4a
-.section sdp4b
-.section sdp5a
-.section sdp5b
-.section sdp6a
-.section sdp6b
-.section sdp7a
-.section sdp7b
-.section sdp8a
-.section sdp8b
-.section sdp9a
-.section sdp9b
-.section sdp0a
-.section sdp0b
-.section sdqaa
-.section sdqab
-.section sdqba
-.section sdqbb
-.section sdqca
-.section sdqcb
-.section sdqda
-.section sdqdb
-.section sdqea
-.section sdqeb
-.section sdqfa
-.section sdqfb
-.section sdqga
-.section sdqgb
-.section sdqha
-.section sdqhb
-.section sdqia
-.section sdqib
-.section sdqja
-.section sdqjb
-.section sdqka
-.section sdqkb
-.section sdqla
-.section sdqlb
-.section sdqma
-.section sdqmb
-.section sdqna
-.section sdqnb
-.section sdqoa
-.section sdqob
-.section sdqpa
-.section sdqpb
-.section sdqqa
-.section sdqqb
-.section sdqra
-.section sdqrb
-.section sdqsa
-.section sdqsb
-.section sdqta
-.section sdqtb
-.section sdqua
-.section sdqub
-.section sdqva
-.section sdqvb
-.section sdqwa
-.section sdqwb
-.section sdqxa
-.section sdqxb
-.section sdqya
-.section sdqyb
-.section sdqza
-.section sdqzb
-.section sdq1a
-.section sdq1b
-.section sdq2a
-.section sdq2b
-.section sdq3a
-.section sdq3b
-.section sdq4a
-.section sdq4b
-.section sdq5a
-.section sdq5b
-.section sdq6a
-.section sdq6b
-.section sdq7a
-.section sdq7b
-.section sdq8a
-.section sdq8b
-.section sdq9a
-.section sdq9b
-.section sdq0a
-.section sdq0b
-.section sdraa
-.section sdrab
-.section sdrba
-.section sdrbb
-.section sdrca
-.section sdrcb
-.section sdrda
-.section sdrdb
-.section sdrea
-.section sdreb
-.section sdrfa
-.section sdrfb
-.section sdrga
-.section sdrgb
-.section sdrha
-.section sdrhb
-.section sdria
-.section sdrib
-.section sdrja
-.section sdrjb
-.section sdrka
-.section sdrkb
-.section sdrla
-.section sdrlb
-.section sdrma
-.section sdrmb
-.section sdrna
-.section sdrnb
-.section sdroa
-.section sdrob
-.section sdrpa
-.section sdrpb
-.section sdrqa
-.section sdrqb
-.section sdrra
-.section sdrrb
-.section sdrsa
-.section sdrsb
-.section sdrta
-.section sdrtb
-.section sdrua
-.section sdrub
-.section sdrva
-.section sdrvb
-.section sdrwa
-.section sdrwb
-.section sdrxa
-.section sdrxb
-.section sdrya
-.section sdryb
-.section sdrza
-.section sdrzb
-.section sdr1a
-.section sdr1b
-.section sdr2a
-.section sdr2b
-.section sdr3a
-.section sdr3b
-.section sdr4a
-.section sdr4b
-.section sdr5a
-.section sdr5b
-.section sdr6a
-.section sdr6b
-.section sdr7a
-.section sdr7b
-.section sdr8a
-.section sdr8b
-.section sdr9a
-.section sdr9b
-.section sdr0a
-.section sdr0b
-.section sdsaa
-.section sdsab
-.section sdsba
-.section sdsbb
-.section sdsca
-.section sdscb
-.section sdsda
-.section sdsdb
-.section sdsea
-.section sdseb
-.section sdsfa
-.section sdsfb
-.section sdsga
-.section sdsgb
-.section sdsha
-.section sdshb
-.section sdsia
-.section sdsib
-.section sdsja
-.section sdsjb
-.section sdska
-.section sdskb
-.section sdsla
-.section sdslb
-.section sdsma
-.section sdsmb
-.section sdsna
-.section sdsnb
-.section sdsoa
-.section sdsob
-.section sdspa
-.section sdspb
-.section sdsqa
-.section sdsqb
-.section sdsra
-.section sdsrb
-.section sdssa
-.section sdssb
-.section sdsta
-.section sdstb
-.section sdsua
-.section sdsub
-.section sdsva
-.section sdsvb
-.section sdswa
-.section sdswb
-.section sdsxa
-.section sdsxb
-.section sdsya
-.section sdsyb
-.section sdsza
-.section sdszb
-.section sds1a
-.section sds1b
-.section sds2a
-.section sds2b
-.section sds3a
-.section sds3b
-.section sds4a
-.section sds4b
-.section sds5a
-.section sds5b
-.section sds6a
-.section sds6b
-.section sds7a
-.section sds7b
-.section sds8a
-.section sds8b
-.section sds9a
-.section sds9b
-.section sds0a
-.section sds0b
-.section sdtaa
-.section sdtab
-.section sdtba
-.section sdtbb
-.section sdtca
-.section sdtcb
-.section sdtda
-.section sdtdb
-.section sdtea
-.section sdteb
-.section sdtfa
-.section sdtfb
-.section sdtga
-.section sdtgb
-.section sdtha
-.section sdthb
-.section sdtia
-.section sdtib
-.section sdtja
-.section sdtjb
-.section sdtka
-.section sdtkb
-.section sdtla
-.section sdtlb
-.section sdtma
-.section sdtmb
-.section sdtna
-.section sdtnb
-.section sdtoa
-.section sdtob
-.section sdtpa
-.section sdtpb
-.section sdtqa
-.section sdtqb
-.section sdtra
-.section sdtrb
-.section sdtsa
-.section sdtsb
-.section sdtta
-.section sdttb
-.section sdtua
-.section sdtub
-.section sdtva
-.section sdtvb
-.section sdtwa
-.section sdtwb
-.section sdtxa
-.section sdtxb
-.section sdtya
-.section sdtyb
-.section sdtza
-.section sdtzb
-.section sdt1a
-.section sdt1b
-.section sdt2a
-.section sdt2b
-.section sdt3a
-.section sdt3b
-.section sdt4a
-.section sdt4b
-.section sdt5a
-.section sdt5b
-.section sdt6a
-.section sdt6b
-.section sdt7a
-.section sdt7b
-.section sdt8a
-.section sdt8b
-.section sdt9a
-.section sdt9b
-.section sdt0a
-.section sdt0b
-.section sduaa
-.section sduab
-.section sduba
-.section sdubb
-.section sduca
-.section sducb
-.section sduda
-.section sdudb
-.section sduea
-.section sdueb
-.section sdufa
-.section sdufb
-.section sduga
-.section sdugb
-.section sduha
-.section sduhb
-.section sduia
-.section sduib
-.section sduja
-.section sdujb
-.section sduka
-.section sdukb
-.section sdula
-.section sdulb
-.section sduma
-.section sdumb
-.section sduna
-.section sdunb
-.section sduoa
-.section sduob
-.section sdupa
-.section sdupb
-.section sduqa
-.section sduqb
-.section sdura
-.section sdurb
-.section sdusa
-.section sdusb
-.section sduta
-.section sdutb
-.section sduua
-.section sduub
-.section sduva
-.section sduvb
-.section sduwa
-.section sduwb
-.section sduxa
-.section sduxb
-.section sduya
-.section sduyb
-.section sduza
-.section sduzb
-.section sdu1a
-.section sdu1b
-.section sdu2a
-.section sdu2b
-.section sdu3a
-.section sdu3b
-.section sdu4a
-.section sdu4b
-.section sdu5a
-.section sdu5b
-.section sdu6a
-.section sdu6b
-.section sdu7a
-.section sdu7b
-.section sdu8a
-.section sdu8b
-.section sdu9a
-.section sdu9b
-.section sdu0a
-.section sdu0b
-.section sdvaa
-.section sdvab
-.section sdvba
-.section sdvbb
-.section sdvca
-.section sdvcb
-.section sdvda
-.section sdvdb
-.section sdvea
-.section sdveb
-.section sdvfa
-.section sdvfb
-.section sdvga
-.section sdvgb
-.section sdvha
-.section sdvhb
-.section sdvia
-.section sdvib
-.section sdvja
-.section sdvjb
-.section sdvka
-.section sdvkb
-.section sdvla
-.section sdvlb
-.section sdvma
-.section sdvmb
-.section sdvna
-.section sdvnb
-.section sdvoa
-.section sdvob
-.section sdvpa
-.section sdvpb
-.section sdvqa
-.section sdvqb
-.section sdvra
-.section sdvrb
-.section sdvsa
-.section sdvsb
-.section sdvta
-.section sdvtb
-.section sdvua
-.section sdvub
-.section sdvva
-.section sdvvb
-.section sdvwa
-.section sdvwb
-.section sdvxa
-.section sdvxb
-.section sdvya
-.section sdvyb
-.section sdvza
-.section sdvzb
-.section sdv1a
-.section sdv1b
-.section sdv2a
-.section sdv2b
-.section sdv3a
-.section sdv3b
-.section sdv4a
-.section sdv4b
-.section sdv5a
-.section sdv5b
-.section sdv6a
-.section sdv6b
-.section sdv7a
-.section sdv7b
-.section sdv8a
-.section sdv8b
-.section sdv9a
-.section sdv9b
-.section sdv0a
-.section sdv0b
-.section sdwaa
-.section sdwab
-.section sdwba
-.section sdwbb
-.section sdwca
-.section sdwcb
-.section sdwda
-.section sdwdb
-.section sdwea
-.section sdweb
-.section sdwfa
-.section sdwfb
-.section sdwga
-.section sdwgb
-.section sdwha
-.section sdwhb
-.section sdwia
-.section sdwib
-.section sdwja
-.section sdwjb
-.section sdwka
-.section sdwkb
-.section sdwla
-.section sdwlb
-.section sdwma
-.section sdwmb
-.section sdwna
-.section sdwnb
-.section sdwoa
-.section sdwob
-.section sdwpa
-.section sdwpb
-.section sdwqa
-.section sdwqb
-.section sdwra
-.section sdwrb
-.section sdwsa
-.section sdwsb
-.section sdwta
-.section sdwtb
-.section sdwua
-.section sdwub
-.section sdwva
-.section sdwvb
-.section sdwwa
-.section sdwwb
-.section sdwxa
-.section sdwxb
-.section sdwya
-.section sdwyb
-.section sdwza
-.section sdwzb
-.section sdw1a
-.section sdw1b
-.section sdw2a
-.section sdw2b
-.section sdw3a
-.section sdw3b
-.section sdw4a
-.section sdw4b
-.section sdw5a
-.section sdw5b
-.section sdw6a
-.section sdw6b
-.section sdw7a
-.section sdw7b
-.section sdw8a
-.section sdw8b
-.section sdw9a
-.section sdw9b
-.section sdw0a
-.section sdw0b
-.section sdxaa
-.section sdxab
-.section sdxba
-.section sdxbb
-.section sdxca
-.section sdxcb
-.section sdxda
-.section sdxdb
-.section sdxea
-.section sdxeb
-.section sdxfa
-.section sdxfb
-.section sdxga
-.section sdxgb
-.section sdxha
-.section sdxhb
-.section sdxia
-.section sdxib
-.section sdxja
-.section sdxjb
-.section sdxka
-.section sdxkb
-.section sdxla
-.section sdxlb
-.section sdxma
-.section sdxmb
-.section sdxna
-.section sdxnb
-.section sdxoa
-.section sdxob
-.section sdxpa
-.section sdxpb
-.section sdxqa
-.section sdxqb
-.section sdxra
-.section sdxrb
-.section sdxsa
-.section sdxsb
-.section sdxta
-.section sdxtb
-.section sdxua
-.section sdxub
-.section sdxva
-.section sdxvb
-.section sdxwa
-.section sdxwb
-.section sdxxa
-.section sdxxb
-.section sdxya
-.section sdxyb
-.section sdxza
-.section sdxzb
-.section sdx1a
-.section sdx1b
-.section sdx2a
-.section sdx2b
-.section sdx3a
-.section sdx3b
-.section sdx4a
-.section sdx4b
-.section sdx5a
-.section sdx5b
-.section sdx6a
-.section sdx6b
-.section sdx7a
-.section sdx7b
-.section sdx8a
-.section sdx8b
-.section sdx9a
-.section sdx9b
-.section sdx0a
-.section sdx0b
-.section sdyaa
-.section sdyab
-.section sdyba
-.section sdybb
-.section sdyca
-.section sdycb
-.section sdyda
-.section sdydb
-.section sdyea
-.section sdyeb
-.section sdyfa
-.section sdyfb
-.section sdyga
-.section sdygb
-.section sdyha
-.section sdyhb
-.section sdyia
-.section sdyib
-.section sdyja
-.section sdyjb
-.section sdyka
-.section sdykb
-.section sdyla
-.section sdylb
-.section sdyma
-.section sdymb
-.section sdyna
-.section sdynb
-.section sdyoa
-.section sdyob
-.section sdypa
-.section sdypb
-.section sdyqa
-.section sdyqb
-.section sdyra
-.section sdyrb
-.section sdysa
-.section sdysb
-.section sdyta
-.section sdytb
-.section sdyua
-.section sdyub
-.section sdyva
-.section sdyvb
-.section sdywa
-.section sdywb
-.section sdyxa
-.section sdyxb
-.section sdyya
-.section sdyyb
-.section sdyza
-.section sdyzb
-.section sdy1a
-.section sdy1b
-.section sdy2a
-.section sdy2b
-.section sdy3a
-.section sdy3b
-.section sdy4a
-.section sdy4b
-.section sdy5a
-.section sdy5b
-.section sdy6a
-.section sdy6b
-.section sdy7a
-.section sdy7b
-.section sdy8a
-.section sdy8b
-.section sdy9a
-.section sdy9b
-.section sdy0a
-.section sdy0b
-.section sdzaa
-.section sdzab
-.section sdzba
-.section sdzbb
-.section sdzca
-.section sdzcb
-.section sdzda
-.section sdzdb
-.section sdzea
-.section sdzeb
-.section sdzfa
-.section sdzfb
-.section sdzga
-.section sdzgb
-.section sdzha
-.section sdzhb
-.section sdzia
-.section sdzib
-.section sdzja
-.section sdzjb
-.section sdzka
-.section sdzkb
-.section sdzla
-.section sdzlb
-.section sdzma
-.section sdzmb
-.section sdzna
-.section sdznb
-.section sdzoa
-.section sdzob
-.section sdzpa
-.section sdzpb
-.section sdzqa
-.section sdzqb
-.section sdzra
-.section sdzrb
-.section sdzsa
-.section sdzsb
-.section sdzta
-.section sdztb
-.section sdzua
-.section sdzub
-.section sdzva
-.section sdzvb
-.section sdzwa
-.section sdzwb
-.section sdzxa
-.section sdzxb
-.section sdzya
-.section sdzyb
-.section sdzza
-.section sdzzb
-.section sdz1a
-.section sdz1b
-.section sdz2a
-.section sdz2b
-.section sdz3a
-.section sdz3b
-.section sdz4a
-.section sdz4b
-.section sdz5a
-.section sdz5b
-.section sdz6a
-.section sdz6b
-.section sdz7a
-.section sdz7b
-.section sdz8a
-.section sdz8b
-.section sdz9a
-.section sdz9b
-.section sdz0a
-.section sdz0b
-.section sd1aa
-.section sd1ab
-.section sd1ba
-.section sd1bb
-.section sd1ca
-.section sd1cb
-.section sd1da
-.section sd1db
-.section sd1ea
-.section sd1eb
-.section sd1fa
-.section sd1fb
-.section sd1ga
-.section sd1gb
-.section sd1ha
-.section sd1hb
-.section sd1ia
-.section sd1ib
-.section sd1ja
-.section sd1jb
-.section sd1ka
-.section sd1kb
-.section sd1la
-.section sd1lb
-.section sd1ma
-.section sd1mb
-.section sd1na
-.section sd1nb
-.section sd1oa
-.section sd1ob
-.section sd1pa
-.section sd1pb
-.section sd1qa
-.section sd1qb
-.section sd1ra
-.section sd1rb
-.section sd1sa
-.section sd1sb
-.section sd1ta
-.section sd1tb
-.section sd1ua
-.section sd1ub
-.section sd1va
-.section sd1vb
-.section sd1wa
-.section sd1wb
-.section sd1xa
-.section sd1xb
-.section sd1ya
-.section sd1yb
-.section sd1za
-.section sd1zb
-.section sd11a
-.section sd11b
-.section sd12a
-.section sd12b
-.section sd13a
-.section sd13b
-.section sd14a
-.section sd14b
-.section sd15a
-.section sd15b
-.section sd16a
-.section sd16b
-.section sd17a
-.section sd17b
-.section sd18a
-.section sd18b
-.section sd19a
-.section sd19b
-.section sd10a
-.section sd10b
-.section sd2aa
-.section sd2ab
-.section sd2ba
-.section sd2bb
-.section sd2ca
-.section sd2cb
-.section sd2da
-.section sd2db
-.section sd2ea
-.section sd2eb
-.section sd2fa
-.section sd2fb
-.section sd2ga
-.section sd2gb
-.section sd2ha
-.section sd2hb
-.section sd2ia
-.section sd2ib
-.section sd2ja
-.section sd2jb
-.section sd2ka
-.section sd2kb
-.section sd2la
-.section sd2lb
-.section sd2ma
-.section sd2mb
-.section sd2na
-.section sd2nb
-.section sd2oa
-.section sd2ob
-.section sd2pa
-.section sd2pb
-.section sd2qa
-.section sd2qb
-.section sd2ra
-.section sd2rb
-.section sd2sa
-.section sd2sb
-.section sd2ta
-.section sd2tb
-.section sd2ua
-.section sd2ub
-.section sd2va
-.section sd2vb
-.section sd2wa
-.section sd2wb
-.section sd2xa
-.section sd2xb
-.section sd2ya
-.section sd2yb
-.section sd2za
-.section sd2zb
-.section sd21a
-.section sd21b
-.section sd22a
-.section sd22b
-.section sd23a
-.section sd23b
-.section sd24a
-.section sd24b
-.section sd25a
-.section sd25b
-.section sd26a
-.section sd26b
-.section sd27a
-.section sd27b
-.section sd28a
-.section sd28b
-.section sd29a
-.section sd29b
-.section sd20a
-.section sd20b
-.section sd3aa
-.section sd3ab
-.section sd3ba
-.section sd3bb
-.section sd3ca
-.section sd3cb
-.section sd3da
-.section sd3db
-.section sd3ea
-.section sd3eb
-.section sd3fa
-.section sd3fb
-.section sd3ga
-.section sd3gb
-.section sd3ha
-.section sd3hb
-.section sd3ia
-.section sd3ib
-.section sd3ja
-.section sd3jb
-.section sd3ka
-.section sd3kb
-.section sd3la
-.section sd3lb
-.section sd3ma
-.section sd3mb
-.section sd3na
-.section sd3nb
-.section sd3oa
-.section sd3ob
-.section sd3pa
-.section sd3pb
-.section sd3qa
-.section sd3qb
-.section sd3ra
-.section sd3rb
-.section sd3sa
-.section sd3sb
-.section sd3ta
-.section sd3tb
-.section sd3ua
-.section sd3ub
-.section sd3va
-.section sd3vb
-.section sd3wa
-.section sd3wb
-.section sd3xa
-.section sd3xb
-.section sd3ya
-.section sd3yb
-.section sd3za
-.section sd3zb
-.section sd31a
-.section sd31b
-.section sd32a
-.section sd32b
-.section sd33a
-.section sd33b
-.section sd34a
-.section sd34b
-.section sd35a
-.section sd35b
-.section sd36a
-.section sd36b
-.section sd37a
-.section sd37b
-.section sd38a
-.section sd38b
-.section sd39a
-.section sd39b
-.section sd30a
-.section sd30b
-.section sd4aa
-.section sd4ab
-.section sd4ba
-.section sd4bb
-.section sd4ca
-.section sd4cb
-.section sd4da
-.section sd4db
-.section sd4ea
-.section sd4eb
-.section sd4fa
-.section sd4fb
-.section sd4ga
-.section sd4gb
-.section sd4ha
-.section sd4hb
-.section sd4ia
-.section sd4ib
-.section sd4ja
-.section sd4jb
-.section sd4ka
-.section sd4kb
-.section sd4la
-.section sd4lb
-.section sd4ma
-.section sd4mb
-.section sd4na
-.section sd4nb
-.section sd4oa
-.section sd4ob
-.section sd4pa
-.section sd4pb
-.section sd4qa
-.section sd4qb
-.section sd4ra
-.section sd4rb
-.section sd4sa
-.section sd4sb
-.section sd4ta
-.section sd4tb
-.section sd4ua
-.section sd4ub
-.section sd4va
-.section sd4vb
-.section sd4wa
-.section sd4wb
-.section sd4xa
-.section sd4xb
-.section sd4ya
-.section sd4yb
-.section sd4za
-.section sd4zb
-.section sd41a
-.section sd41b
-.section sd42a
-.section sd42b
-.section sd43a
-.section sd43b
-.section sd44a
-.section sd44b
-.section sd45a
-.section sd45b
-.section sd46a
-.section sd46b
-.section sd47a
-.section sd47b
-.section sd48a
-.section sd48b
-.section sd49a
-.section sd49b
-.section sd40a
-.section sd40b
-.section sd5aa
-.section sd5ab
-.section sd5ba
-.section sd5bb
-.section sd5ca
-.section sd5cb
-.section sd5da
-.section sd5db
-.section sd5ea
-.section sd5eb
-.section sd5fa
-.section sd5fb
-.section sd5ga
-.section sd5gb
-.section sd5ha
-.section sd5hb
-.section sd5ia
-.section sd5ib
-.section sd5ja
-.section sd5jb
-.section sd5ka
-.section sd5kb
-.section sd5la
-.section sd5lb
-.section sd5ma
-.section sd5mb
-.section sd5na
-.section sd5nb
-.section sd5oa
-.section sd5ob
-.section sd5pa
-.section sd5pb
-.section sd5qa
-.section sd5qb
-.section sd5ra
-.section sd5rb
-.section sd5sa
-.section sd5sb
-.section sd5ta
-.section sd5tb
-.section sd5ua
-.section sd5ub
-.section sd5va
-.section sd5vb
-.section sd5wa
-.section sd5wb
-.section sd5xa
-.section sd5xb
-.section sd5ya
-.section sd5yb
-.section sd5za
-.section sd5zb
-.section sd51a
-.section sd51b
-.section sd52a
-.section sd52b
-.section sd53a
-.section sd53b
-.section sd54a
-.section sd54b
-.section sd55a
-.section sd55b
-.section sd56a
-.section sd56b
-.section sd57a
-.section sd57b
-.section sd58a
-.section sd58b
-.section sd59a
-.section sd59b
-.section sd50a
-.section sd50b
-.section sd6aa
-.section sd6ab
-.section sd6ba
-.section sd6bb
-.section sd6ca
-.section sd6cb
-.section sd6da
-.section sd6db
-.section sd6ea
-.section sd6eb
-.section sd6fa
-.section sd6fb
-.section sd6ga
-.section sd6gb
-.section sd6ha
-.section sd6hb
-.section sd6ia
-.section sd6ib
-.section sd6ja
-.section sd6jb
-.section sd6ka
-.section sd6kb
-.section sd6la
-.section sd6lb
-.section sd6ma
-.section sd6mb
-.section sd6na
-.section sd6nb
-.section sd6oa
-.section sd6ob
-.section sd6pa
-.section sd6pb
-.section sd6qa
-.section sd6qb
-.section sd6ra
-.section sd6rb
-.section sd6sa
-.section sd6sb
-.section sd6ta
-.section sd6tb
-.section sd6ua
-.section sd6ub
-.section sd6va
-.section sd6vb
-.section sd6wa
-.section sd6wb
-.section sd6xa
-.section sd6xb
-.section sd6ya
-.section sd6yb
-.section sd6za
-.section sd6zb
-.section sd61a
-.section sd61b
-.section sd62a
-.section sd62b
-.section sd63a
-.section sd63b
-.section sd64a
-.section sd64b
-.section sd65a
-.section sd65b
-.section sd66a
-.section sd66b
-.section sd67a
-.section sd67b
-.section sd68a
-.section sd68b
-.section sd69a
-.section sd69b
-.section sd60a
-.section sd60b
-.section sd7aa
-.section sd7ab
-.section sd7ba
-.section sd7bb
-.section sd7ca
-.section sd7cb
-.section sd7da
-.section sd7db
-.section sd7ea
-.section sd7eb
-.section sd7fa
-.section sd7fb
-.section sd7ga
-.section sd7gb
-.section sd7ha
-.section sd7hb
-.section sd7ia
-.section sd7ib
-.section sd7ja
-.section sd7jb
-.section sd7ka
-.section sd7kb
-.section sd7la
-.section sd7lb
-.section sd7ma
-.section sd7mb
-.section sd7na
-.section sd7nb
-.section sd7oa
-.section sd7ob
-.section sd7pa
-.section sd7pb
-.section sd7qa
-.section sd7qb
-.section sd7ra
-.section sd7rb
-.section sd7sa
-.section sd7sb
-.section sd7ta
-.section sd7tb
-.section sd7ua
-.section sd7ub
-.section sd7va
-.section sd7vb
-.section sd7wa
-.section sd7wb
-.section sd7xa
-.section sd7xb
-.section sd7ya
-.section sd7yb
-.section sd7za
-.section sd7zb
-.section sd71a
-.section sd71b
-.section sd72a
-.section sd72b
-.section sd73a
-.section sd73b
-.section sd74a
-.section sd74b
-.section sd75a
-.section sd75b
-.section sd76a
-.section sd76b
-.section sd77a
-.section sd77b
-.section sd78a
-.section sd78b
-.section sd79a
-.section sd79b
-.section sd70a
-.section sd70b
-.section sd8aa
-.section sd8ab
-.section sd8ba
-.section sd8bb
-.section sd8ca
-.section sd8cb
-.section sd8da
-.section sd8db
-.section sd8ea
-.section sd8eb
-.section sd8fa
-.section sd8fb
-.section sd8ga
-.section sd8gb
-.section sd8ha
-.section sd8hb
-.section sd8ia
-.section sd8ib
-.section sd8ja
-.section sd8jb
-.section sd8ka
-.section sd8kb
-.section sd8la
-.section sd8lb
-.section sd8ma
-.section sd8mb
-.section sd8na
-.section sd8nb
-.section sd8oa
-.section sd8ob
-.section sd8pa
-.section sd8pb
-.section sd8qa
-.section sd8qb
-.section sd8ra
-.section sd8rb
-.section sd8sa
-.section sd8sb
-.section sd8ta
-.section sd8tb
-.section sd8ua
-.section sd8ub
-.section sd8va
-.section sd8vb
-.section sd8wa
-.section sd8wb
-.section sd8xa
-.section sd8xb
-.section sd8ya
-.section sd8yb
-.section sd8za
-.section sd8zb
-.section sd81a
-.section sd81b
-.section sd82a
-.section sd82b
-.section sd83a
-.section sd83b
-.section sd84a
-.section sd84b
-.section sd85a
-.section sd85b
-.section sd86a
-.section sd86b
-.section sd87a
-.section sd87b
-.section sd88a
-.section sd88b
-.section sd89a
-.section sd89b
-.section sd80a
-.section sd80b
-.section sd9aa
-.section sd9ab
-.section sd9ba
-.section sd9bb
-.section sd9ca
-.section sd9cb
-.section sd9da
-.section sd9db
-.section sd9ea
-.section sd9eb
-.section sd9fa
-.section sd9fb
-.section sd9ga
-.section sd9gb
-.section sd9ha
-.section sd9hb
-.section sd9ia
-.section sd9ib
-.section sd9ja
-.section sd9jb
-.section sd9ka
-.section sd9kb
-.section sd9la
-.section sd9lb
-.section sd9ma
-.section sd9mb
-.section sd9na
-.section sd9nb
-.section sd9oa
-.section sd9ob
-.section sd9pa
-.section sd9pb
-.section sd9qa
-.section sd9qb
-.section sd9ra
-.section sd9rb
-.section sd9sa
-.section sd9sb
-.section sd9ta
-.section sd9tb
-.section sd9ua
-.section sd9ub
-.section sd9va
-.section sd9vb
-.section sd9wa
-.section sd9wb
-.section sd9xa
-.section sd9xb
-.section sd9ya
-.section sd9yb
-.section sd9za
-.section sd9zb
-.section sd91a
-.section sd91b
-.section sd92a
-.section sd92b
-.section sd93a
-.section sd93b
-.section sd94a
-.section sd94b
-.section sd95a
-.section sd95b
-.section sd96a
-.section sd96b
-.section sd97a
-.section sd97b
-.section sd98a
-.section sd98b
-.section sd99a
-.section sd99b
-.section sd90a
-.section sd90b
-.section sd0aa
-.section sd0ab
-.section sd0ba
-.section sd0bb
-.section sd0ca
-.section sd0cb
-.section sd0da
-.section sd0db
-.section sd0ea
-.section sd0eb
-.section sd0fa
-.section sd0fb
-.section sd0ga
-.section sd0gb
-.section sd0ha
-.section sd0hb
-.section sd0ia
-.section sd0ib
-.section sd0ja
-.section sd0jb
-.section sd0ka
-.section sd0kb
-.section sd0la
-.section sd0lb
-.section sd0ma
-.section sd0mb
-.section sd0na
-.section sd0nb
-.section sd0oa
-.section sd0ob
-.section sd0pa
-.section sd0pb
-.section sd0qa
-.section sd0qb
-.section sd0ra
-.section sd0rb
-.section sd0sa
-.section sd0sb
-.section sd0ta
-.section sd0tb
-.section sd0ua
-.section sd0ub
-.section sd0va
-.section sd0vb
-.section sd0wa
-.section sd0wb
-.section sd0xa
-.section sd0xb
-.section sd0ya
-.section sd0yb
-.section sd0za
-.section sd0zb
-.section sd01a
-.section sd01b
-.section sd02a
-.section sd02b
-.section sd03a
-.section sd03b
-.section sd04a
-.section sd04b
-.section sd05a
-.section sd05b
-.section sd06a
-.section sd06b
-.section sd07a
-.section sd07b
-.section sd08a
-.section sd08b
-.section sd09a
-.section sd09b
-.section sd00a
-.section sd00b
-.section seaaa
-.section seaab
-.section seaba
-.section seabb
-.section seaca
-.section seacb
-.section seada
-.section seadb
-.section seaea
-.section seaeb
-.section seafa
-.section seafb
-.section seaga
-.section seagb
-.section seaha
-.section seahb
-.section seaia
-.section seaib
-.section seaja
-.section seajb
-.section seaka
-.section seakb
-.section seala
-.section sealb
-.section seama
-.section seamb
-.section seana
-.section seanb
-.section seaoa
-.section seaob
-.section seapa
-.section seapb
-.section seaqa
-.section seaqb
-.section seara
-.section searb
-.section seasa
-.section seasb
-.section seata
-.section seatb
-.section seaua
-.section seaub
-.section seava
-.section seavb
-.section seawa
-.section seawb
-.section seaxa
-.section seaxb
-.section seaya
-.section seayb
-.section seaza
-.section seazb
-.section sea1a
-.section sea1b
-.section sea2a
-.section sea2b
-.section sea3a
-.section sea3b
-.section sea4a
-.section sea4b
-.section sea5a
-.section sea5b
-.section sea6a
-.section sea6b
-.section sea7a
-.section sea7b
-.section sea8a
-.section sea8b
-.section sea9a
-.section sea9b
-.section sea0a
-.section sea0b
-.section sebaa
-.section sebab
-.section sebba
-.section sebbb
-.section sebca
-.section sebcb
-.section sebda
-.section sebdb
-.section sebea
-.section sebeb
-.section sebfa
-.section sebfb
-.section sebga
-.section sebgb
-.section sebha
-.section sebhb
-.section sebia
-.section sebib
-.section sebja
-.section sebjb
-.section sebka
-.section sebkb
-.section sebla
-.section seblb
-.section sebma
-.section sebmb
-.section sebna
-.section sebnb
-.section seboa
-.section sebob
-.section sebpa
-.section sebpb
-.section sebqa
-.section sebqb
-.section sebra
-.section sebrb
-.section sebsa
-.section sebsb
-.section sebta
-.section sebtb
-.section sebua
-.section sebub
-.section sebva
-.section sebvb
-.section sebwa
-.section sebwb
-.section sebxa
-.section sebxb
-.section sebya
-.section sebyb
-.section sebza
-.section sebzb
-.section seb1a
-.section seb1b
-.section seb2a
-.section seb2b
-.section seb3a
-.section seb3b
-.section seb4a
-.section seb4b
-.section seb5a
-.section seb5b
-.section seb6a
-.section seb6b
-.section seb7a
-.section seb7b
-.section seb8a
-.section seb8b
-.section seb9a
-.section seb9b
-.section seb0a
-.section seb0b
-.section secaa
-.section secab
-.section secba
-.section secbb
-.section secca
-.section seccb
-.section secda
-.section secdb
-.section secea
-.section seceb
-.section secfa
-.section secfb
-.section secga
-.section secgb
-.section secha
-.section sechb
-.section secia
-.section secib
-.section secja
-.section secjb
-.section secka
-.section seckb
-.section secla
-.section seclb
-.section secma
-.section secmb
-.section secna
-.section secnb
-.section secoa
-.section secob
-.section secpa
-.section secpb
-.section secqa
-.section secqb
-.section secra
-.section secrb
-.section secsa
-.section secsb
-.section secta
-.section sectb
-.section secua
-.section secub
-.section secva
-.section secvb
-.section secwa
-.section secwb
-.section secxa
-.section secxb
-.section secya
-.section secyb
-.section secza
-.section seczb
-.section sec1a
-.section sec1b
-.section sec2a
-.section sec2b
-.section sec3a
-.section sec3b
-.section sec4a
-.section sec4b
-.section sec5a
-.section sec5b
-.section sec6a
-.section sec6b
-.section sec7a
-.section sec7b
-.section sec8a
-.section sec8b
-.section sec9a
-.section sec9b
-.section sec0a
-.section sec0b
-.section sedaa
-.section sedab
-.section sedba
-.section sedbb
-.section sedca
-.section sedcb
-.section sedda
-.section seddb
-.section sedea
-.section sedeb
-.section sedfa
-.section sedfb
-.section sedga
-.section sedgb
-.section sedha
-.section sedhb
-.section sedia
-.section sedib
-.section sedja
-.section sedjb
-.section sedka
-.section sedkb
-.section sedla
-.section sedlb
-.section sedma
-.section sedmb
-.section sedna
-.section sednb
-.section sedoa
-.section sedob
-.section sedpa
-.section sedpb
-.section sedqa
-.section sedqb
-.section sedra
-.section sedrb
-.section sedsa
-.section sedsb
-.section sedta
-.section sedtb
-.section sedua
-.section sedub
-.section sedva
-.section sedvb
-.section sedwa
-.section sedwb
-.section sedxa
-.section sedxb
-.section sedya
-.section sedyb
-.section sedza
-.section sedzb
-.section sed1a
-.section sed1b
-.section sed2a
-.section sed2b
-.section sed3a
-.section sed3b
-.section sed4a
-.section sed4b
-.section sed5a
-.section sed5b
-.section sed6a
-.section sed6b
-.section sed7a
-.section sed7b
-.section sed8a
-.section sed8b
-.section sed9a
-.section sed9b
-.section sed0a
-.section sed0b
-.section seeaa
-.section seeab
-.section seeba
-.section seebb
-.section seeca
-.section seecb
-.section seeda
-.section seedb
-.section seeea
-.section seeeb
-.section seefa
-.section seefb
-.section seega
-.section seegb
-.section seeha
-.section seehb
-.section seeia
-.section seeib
-.section seeja
-.section seejb
-.section seeka
-.section seekb
-.section seela
-.section seelb
-.section seema
-.section seemb
-.section seena
-.section seenb
-.section seeoa
-.section seeob
-.section seepa
-.section seepb
-.section seeqa
-.section seeqb
-.section seera
-.section seerb
-.section seesa
-.section seesb
-.section seeta
-.section seetb
-.section seeua
-.section seeub
-.section seeva
-.section seevb
-.section seewa
-.section seewb
-.section seexa
-.section seexb
-.section seeya
-.section seeyb
-.section seeza
-.section seezb
-.section see1a
-.section see1b
-.section see2a
-.section see2b
-.section see3a
-.section see3b
-.section see4a
-.section see4b
-.section see5a
-.section see5b
-.section see6a
-.section see6b
-.section see7a
-.section see7b
-.section see8a
-.section see8b
-.section see9a
-.section see9b
-.section see0a
-.section see0b
-.section sefaa
-.section sefab
-.section sefba
-.section sefbb
-.section sefca
-.section sefcb
-.section sefda
-.section sefdb
-.section sefea
-.section sefeb
-.section seffa
-.section seffb
-.section sefga
-.section sefgb
-.section sefha
-.section sefhb
-.section sefia
-.section sefib
-.section sefja
-.section sefjb
-.section sefka
-.section sefkb
-.section sefla
-.section seflb
-.section sefma
-.section sefmb
-.section sefna
-.section sefnb
-.section sefoa
-.section sefob
-.section sefpa
-.section sefpb
-.section sefqa
-.section sefqb
-.section sefra
-.section sefrb
-.section sefsa
-.section sefsb
-.section sefta
-.section seftb
-.section sefua
-.section sefub
-.section sefva
-.section sefvb
-.section sefwa
-.section sefwb
-.section sefxa
-.section sefxb
-.section sefya
-.section sefyb
-.section sefza
-.section sefzb
-.section sef1a
-.section sef1b
-.section sef2a
-.section sef2b
-.section sef3a
-.section sef3b
-.section sef4a
-.section sef4b
-.section sef5a
-.section sef5b
-.section sef6a
-.section sef6b
-.section sef7a
-.section sef7b
-.section sef8a
-.section sef8b
-.section sef9a
-.section sef9b
-.section sef0a
-.section sef0b
-.section segaa
-.section segab
-.section segba
-.section segbb
-.section segca
-.section segcb
-.section segda
-.section segdb
-.section segea
-.section segeb
-.section segfa
-.section segfb
-.section segga
-.section seggb
-.section segha
-.section seghb
-.section segia
-.section segib
-.section segja
-.section segjb
-.section segka
-.section segkb
-.section segla
-.section seglb
-.section segma
-.section segmb
-.section segna
-.section segnb
-.section segoa
-.section segob
-.section segpa
-.section segpb
-.section segqa
-.section segqb
-.section segra
-.section segrb
-.section segsa
-.section segsb
-.section segta
-.section segtb
-.section segua
-.section segub
-.section segva
-.section segvb
-.section segwa
-.section segwb
-.section segxa
-.section segxb
-.section segya
-.section segyb
-.section segza
-.section segzb
-.section seg1a
-.section seg1b
-.section seg2a
-.section seg2b
-.section seg3a
-.section seg3b
-.section seg4a
-.section seg4b
-.section seg5a
-.section seg5b
-.section seg6a
-.section seg6b
-.section seg7a
-.section seg7b
-.section seg8a
-.section seg8b
-.section seg9a
-.section seg9b
-.section seg0a
-.section seg0b
-.section sehaa
-.section sehab
-.section sehba
-.section sehbb
-.section sehca
-.section sehcb
-.section sehda
-.section sehdb
-.section sehea
-.section seheb
-.section sehfa
-.section sehfb
-.section sehga
-.section sehgb
-.section sehha
-.section sehhb
-.section sehia
-.section sehib
-.section sehja
-.section sehjb
-.section sehka
-.section sehkb
-.section sehla
-.section sehlb
-.section sehma
-.section sehmb
-.section sehna
-.section sehnb
-.section sehoa
-.section sehob
-.section sehpa
-.section sehpb
-.section sehqa
-.section sehqb
-.section sehra
-.section sehrb
-.section sehsa
-.section sehsb
-.section sehta
-.section sehtb
-.section sehua
-.section sehub
-.section sehva
-.section sehvb
-.section sehwa
-.section sehwb
-.section sehxa
-.section sehxb
-.section sehya
-.section sehyb
-.section sehza
-.section sehzb
-.section seh1a
-.section seh1b
-.section seh2a
-.section seh2b
-.section seh3a
-.section seh3b
-.section seh4a
-.section seh4b
-.section seh5a
-.section seh5b
-.section seh6a
-.section seh6b
-.section seh7a
-.section seh7b
-.section seh8a
-.section seh8b
-.section seh9a
-.section seh9b
-.section seh0a
-.section seh0b
-.section seiaa
-.section seiab
-.section seiba
-.section seibb
-.section seica
-.section seicb
-.section seida
-.section seidb
-.section seiea
-.section seieb
-.section seifa
-.section seifb
-.section seiga
-.section seigb
-.section seiha
-.section seihb
-.section seiia
-.section seiib
-.section seija
-.section seijb
-.section seika
-.section seikb
-.section seila
-.section seilb
-.section seima
-.section seimb
-.section seina
-.section seinb
-.section seioa
-.section seiob
-.section seipa
-.section seipb
-.section seiqa
-.section seiqb
-.section seira
-.section seirb
-.section seisa
-.section seisb
-.section seita
-.section seitb
-.section seiua
-.section seiub
-.section seiva
-.section seivb
-.section seiwa
-.section seiwb
-.section seixa
-.section seixb
-.section seiya
-.section seiyb
-.section seiza
-.section seizb
-.section sei1a
-.section sei1b
-.section sei2a
-.section sei2b
-.section sei3a
-.section sei3b
-.section sei4a
-.section sei4b
-.section sei5a
-.section sei5b
-.section sei6a
-.section sei6b
-.section sei7a
-.section sei7b
-.section sei8a
-.section sei8b
-.section sei9a
-.section sei9b
-.section sei0a
-.section sei0b
-.section sejaa
-.section sejab
-.section sejba
-.section sejbb
-.section sejca
-.section sejcb
-.section sejda
-.section sejdb
-.section sejea
-.section sejeb
-.section sejfa
-.section sejfb
-.section sejga
-.section sejgb
-.section sejha
-.section sejhb
-.section sejia
-.section sejib
-.section sejja
-.section sejjb
-.section sejka
-.section sejkb
-.section sejla
-.section sejlb
-.section sejma
-.section sejmb
-.section sejna
-.section sejnb
-.section sejoa
-.section sejob
-.section sejpa
-.section sejpb
-.section sejqa
-.section sejqb
-.section sejra
-.section sejrb
-.section sejsa
-.section sejsb
-.section sejta
-.section sejtb
-.section sejua
-.section sejub
-.section sejva
-.section sejvb
-.section sejwa
-.section sejwb
-.section sejxa
-.section sejxb
-.section sejya
-.section sejyb
-.section sejza
-.section sejzb
-.section sej1a
-.section sej1b
-.section sej2a
-.section sej2b
-.section sej3a
-.section sej3b
-.section sej4a
-.section sej4b
-.section sej5a
-.section sej5b
-.section sej6a
-.section sej6b
-.section sej7a
-.section sej7b
-.section sej8a
-.section sej8b
-.section sej9a
-.section sej9b
-.section sej0a
-.section sej0b
-.section sekaa
-.section sekab
-.section sekba
-.section sekbb
-.section sekca
-.section sekcb
-.section sekda
-.section sekdb
-.section sekea
-.section sekeb
-.section sekfa
-.section sekfb
-.section sekga
-.section sekgb
-.section sekha
-.section sekhb
-.section sekia
-.section sekib
-.section sekja
-.section sekjb
-.section sekka
-.section sekkb
-.section sekla
-.section seklb
-.section sekma
-.section sekmb
-.section sekna
-.section seknb
-.section sekoa
-.section sekob
-.section sekpa
-.section sekpb
-.section sekqa
-.section sekqb
-.section sekra
-.section sekrb
-.section seksa
-.section seksb
-.section sekta
-.section sektb
-.section sekua
-.section sekub
-.section sekva
-.section sekvb
-.section sekwa
-.section sekwb
-.section sekxa
-.section sekxb
-.section sekya
-.section sekyb
-.section sekza
-.section sekzb
-.section sek1a
-.section sek1b
-.section sek2a
-.section sek2b
-.section sek3a
-.section sek3b
-.section sek4a
-.section sek4b
-.section sek5a
-.section sek5b
-.section sek6a
-.section sek6b
-.section sek7a
-.section sek7b
-.section sek8a
-.section sek8b
-.section sek9a
-.section sek9b
-.section sek0a
-.section sek0b
-.section selaa
-.section selab
-.section selba
-.section selbb
-.section selca
-.section selcb
-.section selda
-.section seldb
-.section selea
-.section seleb
-.section selfa
-.section selfb
-.section selga
-.section selgb
-.section selha
-.section selhb
-.section selia
-.section selib
-.section selja
-.section seljb
-.section selka
-.section selkb
-.section sella
-.section sellb
-.section selma
-.section selmb
-.section selna
-.section selnb
-.section seloa
-.section selob
-.section selpa
-.section selpb
-.section selqa
-.section selqb
-.section selra
-.section selrb
-.section selsa
-.section selsb
-.section selta
-.section seltb
-.section selua
-.section selub
-.section selva
-.section selvb
-.section selwa
-.section selwb
-.section selxa
-.section selxb
-.section selya
-.section selyb
-.section selza
-.section selzb
-.section sel1a
-.section sel1b
-.section sel2a
-.section sel2b
-.section sel3a
-.section sel3b
-.section sel4a
-.section sel4b
-.section sel5a
-.section sel5b
-.section sel6a
-.section sel6b
-.section sel7a
-.section sel7b
-.section sel8a
-.section sel8b
-.section sel9a
-.section sel9b
-.section sel0a
-.section sel0b
-.section semaa
-.section semab
-.section semba
-.section sembb
-.section semca
-.section semcb
-.section semda
-.section semdb
-.section semea
-.section semeb
-.section semfa
-.section semfb
-.section semga
-.section semgb
-.section semha
-.section semhb
-.section semia
-.section semib
-.section semja
-.section semjb
-.section semka
-.section semkb
-.section semla
-.section semlb
-.section semma
-.section semmb
-.section semna
-.section semnb
-.section semoa
-.section semob
-.section sempa
-.section sempb
-.section semqa
-.section semqb
-.section semra
-.section semrb
-.section semsa
-.section semsb
-.section semta
-.section semtb
-.section semua
-.section semub
-.section semva
-.section semvb
-.section semwa
-.section semwb
-.section semxa
-.section semxb
-.section semya
-.section semyb
-.section semza
-.section semzb
-.section sem1a
-.section sem1b
-.section sem2a
-.section sem2b
-.section sem3a
-.section sem3b
-.section sem4a
-.section sem4b
-.section sem5a
-.section sem5b
-.section sem6a
-.section sem6b
-.section sem7a
-.section sem7b
-.section sem8a
-.section sem8b
-.section sem9a
-.section sem9b
-.section sem0a
-.section sem0b
-.section senaa
-.section senab
-.section senba
-.section senbb
-.section senca
-.section sencb
-.section senda
-.section sendb
-.section senea
-.section seneb
-.section senfa
-.section senfb
-.section senga
-.section sengb
-.section senha
-.section senhb
-.section senia
-.section senib
-.section senja
-.section senjb
-.section senka
-.section senkb
-.section senla
-.section senlb
-.section senma
-.section senmb
-.section senna
-.section sennb
-.section senoa
-.section senob
-.section senpa
-.section senpb
-.section senqa
-.section senqb
-.section senra
-.section senrb
-.section sensa
-.section sensb
-.section senta
-.section sentb
-.section senua
-.section senub
-.section senva
-.section senvb
-.section senwa
-.section senwb
-.section senxa
-.section senxb
-.section senya
-.section senyb
-.section senza
-.section senzb
-.section sen1a
-.section sen1b
-.section sen2a
-.section sen2b
-.section sen3a
-.section sen3b
-.section sen4a
-.section sen4b
-.section sen5a
-.section sen5b
-.section sen6a
-.section sen6b
-.section sen7a
-.section sen7b
-.section sen8a
-.section sen8b
-.section sen9a
-.section sen9b
-.section sen0a
-.section sen0b
-.section seoaa
-.section seoab
-.section seoba
-.section seobb
-.section seoca
-.section seocb
-.section seoda
-.section seodb
-.section seoea
-.section seoeb
-.section seofa
-.section seofb
-.section seoga
-.section seogb
-.section seoha
-.section seohb
-.section seoia
-.section seoib
-.section seoja
-.section seojb
-.section seoka
-.section seokb
-.section seola
-.section seolb
-.section seoma
-.section seomb
-.section seona
-.section seonb
-.section seooa
-.section seoob
-.section seopa
-.section seopb
-.section seoqa
-.section seoqb
-.section seora
-.section seorb
-.section seosa
-.section seosb
-.section seota
-.section seotb
-.section seoua
-.section seoub
-.section seova
-.section seovb
-.section seowa
-.section seowb
-.section seoxa
-.section seoxb
-.section seoya
-.section seoyb
-.section seoza
-.section seozb
-.section seo1a
-.section seo1b
-.section seo2a
-.section seo2b
-.section seo3a
-.section seo3b
-.section seo4a
-.section seo4b
-.section seo5a
-.section seo5b
-.section seo6a
-.section seo6b
-.section seo7a
-.section seo7b
-.section seo8a
-.section seo8b
-.section seo9a
-.section seo9b
-.section seo0a
-.section seo0b
-.section sepaa
-.section sepab
-.section sepba
-.section sepbb
-.section sepca
-.section sepcb
-.section sepda
-.section sepdb
-.section sepea
-.section sepeb
-.section sepfa
-.section sepfb
-.section sepga
-.section sepgb
-.section sepha
-.section sephb
-.section sepia
-.section sepib
-.section sepja
-.section sepjb
-.section sepka
-.section sepkb
-.section sepla
-.section seplb
-.section sepma
-.section sepmb
-.section sepna
-.section sepnb
-.section sepoa
-.section sepob
-.section seppa
-.section seppb
-.section sepqa
-.section sepqb
-.section sepra
-.section seprb
-.section sepsa
-.section sepsb
-.section septa
-.section septb
-.section sepua
-.section sepub
-.section sepva
-.section sepvb
-.section sepwa
-.section sepwb
-.section sepxa
-.section sepxb
-.section sepya
-.section sepyb
-.section sepza
-.section sepzb
-.section sep1a
-.section sep1b
-.section sep2a
-.section sep2b
-.section sep3a
-.section sep3b
-.section sep4a
-.section sep4b
-.section sep5a
-.section sep5b
-.section sep6a
-.section sep6b
-.section sep7a
-.section sep7b
-.section sep8a
-.section sep8b
-.section sep9a
-.section sep9b
-.section sep0a
-.section sep0b
-.section seqaa
-.section seqab
-.section seqba
-.section seqbb
-.section seqca
-.section seqcb
-.section seqda
-.section seqdb
-.section seqea
-.section seqeb
-.section seqfa
-.section seqfb
-.section seqga
-.section seqgb
-.section seqha
-.section seqhb
-.section seqia
-.section seqib
-.section seqja
-.section seqjb
-.section seqka
-.section seqkb
-.section seqla
-.section seqlb
-.section seqma
-.section seqmb
-.section seqna
-.section seqnb
-.section seqoa
-.section seqob
-.section seqpa
-.section seqpb
-.section seqqa
-.section seqqb
-.section seqra
-.section seqrb
-.section seqsa
-.section seqsb
-.section seqta
-.section seqtb
-.section sequa
-.section sequb
-.section seqva
-.section seqvb
-.section seqwa
-.section seqwb
-.section seqxa
-.section seqxb
-.section seqya
-.section seqyb
-.section seqza
-.section seqzb
-.section seq1a
-.section seq1b
-.section seq2a
-.section seq2b
-.section seq3a
-.section seq3b
-.section seq4a
-.section seq4b
-.section seq5a
-.section seq5b
-.section seq6a
-.section seq6b
-.section seq7a
-.section seq7b
-.section seq8a
-.section seq8b
-.section seq9a
-.section seq9b
-.section seq0a
-.section seq0b
-.section seraa
-.section serab
-.section serba
-.section serbb
-.section serca
-.section sercb
-.section serda
-.section serdb
-.section serea
-.section sereb
-.section serfa
-.section serfb
-.section serga
-.section sergb
-.section serha
-.section serhb
-.section seria
-.section serib
-.section serja
-.section serjb
-.section serka
-.section serkb
-.section serla
-.section serlb
-.section serma
-.section sermb
-.section serna
-.section sernb
-.section seroa
-.section serob
-.section serpa
-.section serpb
-.section serqa
-.section serqb
-.section serra
-.section serrb
-.section sersa
-.section sersb
-.section serta
-.section sertb
-.section serua
-.section serub
-.section serva
-.section servb
-.section serwa
-.section serwb
-.section serxa
-.section serxb
-.section serya
-.section seryb
-.section serza
-.section serzb
-.section ser1a
-.section ser1b
-.section ser2a
-.section ser2b
-.section ser3a
-.section ser3b
-.section ser4a
-.section ser4b
-.section ser5a
-.section ser5b
-.section ser6a
-.section ser6b
-.section ser7a
-.section ser7b
-.section ser8a
-.section ser8b
-.section ser9a
-.section ser9b
-.section ser0a
-.section ser0b
-.section sesaa
-.section sesab
-.section sesba
-.section sesbb
-.section sesca
-.section sescb
-.section sesda
-.section sesdb
-.section sesea
-.section seseb
-.section sesfa
-.section sesfb
-.section sesga
-.section sesgb
-.section sesha
-.section seshb
-.section sesia
-.section sesib
-.section sesja
-.section sesjb
-.section seska
-.section seskb
-.section sesla
-.section seslb
-.section sesma
-.section sesmb
-.section sesna
-.section sesnb
-.section sesoa
-.section sesob
-.section sespa
-.section sespb
-.section sesqa
-.section sesqb
-.section sesra
-.section sesrb
-.section sessa
-.section sessb
-.section sesta
-.section sestb
-.section sesua
-.section sesub
-.section sesva
-.section sesvb
-.section seswa
-.section seswb
-.section sesxa
-.section sesxb
-.section sesya
-.section sesyb
-.section sesza
-.section seszb
-.section ses1a
-.section ses1b
-.section ses2a
-.section ses2b
-.section ses3a
-.section ses3b
-.section ses4a
-.section ses4b
-.section ses5a
-.section ses5b
-.section ses6a
-.section ses6b
-.section ses7a
-.section ses7b
-.section ses8a
-.section ses8b
-.section ses9a
-.section ses9b
-.section ses0a
-.section ses0b
-.section setaa
-.section setab
-.section setba
-.section setbb
-.section setca
-.section setcb
-.section setda
-.section setdb
-.section setea
-.section seteb
-.section setfa
-.section setfb
-.section setga
-.section setgb
-.section setha
-.section sethb
-.section setia
-.section setib
-.section setja
-.section setjb
-.section setka
-.section setkb
-.section setla
-.section setlb
-.section setma
-.section setmb
-.section setna
-.section setnb
-.section setoa
-.section setob
-.section setpa
-.section setpb
-.section setqa
-.section setqb
-.section setra
-.section setrb
-.section setsa
-.section setsb
-.section setta
-.section settb
-.section setua
-.section setub
-.section setva
-.section setvb
-.section setwa
-.section setwb
-.section setxa
-.section setxb
-.section setya
-.section setyb
-.section setza
-.section setzb
-.section set1a
-.section set1b
-.section set2a
-.section set2b
-.section set3a
-.section set3b
-.section set4a
-.section set4b
-.section set5a
-.section set5b
-.section set6a
-.section set6b
-.section set7a
-.section set7b
-.section set8a
-.section set8b
-.section set9a
-.section set9b
-.section set0a
-.section set0b
-.section seuaa
-.section seuab
-.section seuba
-.section seubb
-.section seuca
-.section seucb
-.section seuda
-.section seudb
-.section seuea
-.section seueb
-.section seufa
-.section seufb
-.section seuga
-.section seugb
-.section seuha
-.section seuhb
-.section seuia
-.section seuib
-.section seuja
-.section seujb
-.section seuka
-.section seukb
-.section seula
-.section seulb
-.section seuma
-.section seumb
-.section seuna
-.section seunb
-.section seuoa
-.section seuob
-.section seupa
-.section seupb
-.section seuqa
-.section seuqb
-.section seura
-.section seurb
-.section seusa
-.section seusb
-.section seuta
-.section seutb
-.section seuua
-.section seuub
-.section seuva
-.section seuvb
-.section seuwa
-.section seuwb
-.section seuxa
-.section seuxb
-.section seuya
-.section seuyb
-.section seuza
-.section seuzb
-.section seu1a
-.section seu1b
-.section seu2a
-.section seu2b
-.section seu3a
-.section seu3b
-.section seu4a
-.section seu4b
-.section seu5a
-.section seu5b
-.section seu6a
-.section seu6b
-.section seu7a
-.section seu7b
-.section seu8a
-.section seu8b
-.section seu9a
-.section seu9b
-.section seu0a
-.section seu0b
-.section sevaa
-.section sevab
-.section sevba
-.section sevbb
-.section sevca
-.section sevcb
-.section sevda
-.section sevdb
-.section sevea
-.section seveb
-.section sevfa
-.section sevfb
-.section sevga
-.section sevgb
-.section sevha
-.section sevhb
-.section sevia
-.section sevib
-.section sevja
-.section sevjb
-.section sevka
-.section sevkb
-.section sevla
-.section sevlb
-.section sevma
-.section sevmb
-.section sevna
-.section sevnb
-.section sevoa
-.section sevob
-.section sevpa
-.section sevpb
-.section sevqa
-.section sevqb
-.section sevra
-.section sevrb
-.section sevsa
-.section sevsb
-.section sevta
-.section sevtb
-.section sevua
-.section sevub
-.section sevva
-.section sevvb
-.section sevwa
-.section sevwb
-.section sevxa
-.section sevxb
-.section sevya
-.section sevyb
-.section sevza
-.section sevzb
-.section sev1a
-.section sev1b
-.section sev2a
-.section sev2b
-.section sev3a
-.section sev3b
-.section sev4a
-.section sev4b
-.section sev5a
-.section sev5b
-.section sev6a
-.section sev6b
-.section sev7a
-.section sev7b
-.section sev8a
-.section sev8b
-.section sev9a
-.section sev9b
-.section sev0a
-.section sev0b
-.section sewaa
-.section sewab
-.section sewba
-.section sewbb
-.section sewca
-.section sewcb
-.section sewda
-.section sewdb
-.section sewea
-.section seweb
-.section sewfa
-.section sewfb
-.section sewga
-.section sewgb
-.section sewha
-.section sewhb
-.section sewia
-.section sewib
-.section sewja
-.section sewjb
-.section sewka
-.section sewkb
-.section sewla
-.section sewlb
-.section sewma
-.section sewmb
-.section sewna
-.section sewnb
-.section sewoa
-.section sewob
-.section sewpa
-.section sewpb
-.section sewqa
-.section sewqb
-.section sewra
-.section sewrb
-.section sewsa
-.section sewsb
-.section sewta
-.section sewtb
-.section sewua
-.section sewub
-.section sewva
-.section sewvb
-.section sewwa
-.section sewwb
-.section sewxa
-.section sewxb
-.section sewya
-.section sewyb
-.section sewza
-.section sewzb
-.section sew1a
-.section sew1b
-.section sew2a
-.section sew2b
-.section sew3a
-.section sew3b
-.section sew4a
-.section sew4b
-.section sew5a
-.section sew5b
-.section sew6a
-.section sew6b
-.section sew7a
-.section sew7b
-.section sew8a
-.section sew8b
-.section sew9a
-.section sew9b
-.section sew0a
-.section sew0b
-.section sexaa
-.section sexab
-.section sexba
-.section sexbb
-.section sexca
-.section sexcb
-.section sexda
-.section sexdb
-.section sexea
-.section sexeb
-.section sexfa
-.section sexfb
-.section sexga
-.section sexgb
-.section sexha
-.section sexhb
-.section sexia
-.section sexib
-.section sexja
-.section sexjb
-.section sexka
-.section sexkb
-.section sexla
-.section sexlb
-.section sexma
-.section sexmb
-.section sexna
-.section sexnb
-.section sexoa
-.section sexob
-.section sexpa
-.section sexpb
-.section sexqa
-.section sexqb
-.section sexra
-.section sexrb
-.section sexsa
-.section sexsb
-.section sexta
-.section sextb
-.section sexua
-.section sexub
-.section sexva
-.section sexvb
-.section sexwa
-.section sexwb
-.section sexxa
-.section sexxb
-.section sexya
-.section sexyb
-.section sexza
-.section sexzb
-.section sex1a
-.section sex1b
-.section sex2a
-.section sex2b
-.section sex3a
-.section sex3b
-.section sex4a
-.section sex4b
-.section sex5a
-.section sex5b
-.section sex6a
-.section sex6b
-.section sex7a
-.section sex7b
-.section sex8a
-.section sex8b
-.section sex9a
-.section sex9b
-.section sex0a
-.section sex0b
-.section seyaa
-.section seyab
-.section seyba
-.section seybb
-.section seyca
-.section seycb
-.section seyda
-.section seydb
-.section seyea
-.section seyeb
-.section seyfa
-.section seyfb
-.section seyga
-.section seygb
-.section seyha
-.section seyhb
-.section seyia
-.section seyib
-.section seyja
-.section seyjb
-.section seyka
-.section seykb
-.section seyla
-.section seylb
-.section seyma
-.section seymb
-.section seyna
-.section seynb
-.section seyoa
-.section seyob
-.section seypa
-.section seypb
-.section seyqa
-.section seyqb
-.section seyra
-.section seyrb
-.section seysa
-.section seysb
-.section seyta
-.section seytb
-.section seyua
-.section seyub
-.section seyva
-.section seyvb
-.section seywa
-.section seywb
-.section seyxa
-.section seyxb
-.section seyya
-.section seyyb
-.section seyza
-.section seyzb
-.section sey1a
-.section sey1b
-.section sey2a
-.section sey2b
-.section sey3a
-.section sey3b
-.section sey4a
-.section sey4b
-.section sey5a
-.section sey5b
-.section sey6a
-.section sey6b
-.section sey7a
-.section sey7b
-.section sey8a
-.section sey8b
-.section sey9a
-.section sey9b
-.section sey0a
-.section sey0b
-.section sezaa
-.section sezab
-.section sezba
-.section sezbb
-.section sezca
-.section sezcb
-.section sezda
-.section sezdb
-.section sezea
-.section sezeb
-.section sezfa
-.section sezfb
-.section sezga
-.section sezgb
-.section sezha
-.section sezhb
-.section sezia
-.section sezib
-.section sezja
-.section sezjb
-.section sezka
-.section sezkb
-.section sezla
-.section sezlb
-.section sezma
-.section sezmb
-.section sezna
-.section seznb
-.section sezoa
-.section sezob
-.section sezpa
-.section sezpb
-.section sezqa
-.section sezqb
-.section sezra
-.section sezrb
-.section sezsa
-.section sezsb
-.section sezta
-.section seztb
-.section sezua
-.section sezub
-.section sezva
-.section sezvb
-.section sezwa
-.section sezwb
-.section sezxa
-.section sezxb
-.section sezya
-.section sezyb
-.section sezza
-.section sezzb
-.section sez1a
-.section sez1b
-.section sez2a
-.section sez2b
-.section sez3a
-.section sez3b
-.section sez4a
-.section sez4b
-.section sez5a
-.section sez5b
-.section sez6a
-.section sez6b
-.section sez7a
-.section sez7b
-.section sez8a
-.section sez8b
-.section sez9a
-.section sez9b
-.section sez0a
-.section sez0b
-.section se1aa
-.section se1ab
-.section se1ba
-.section se1bb
-.section se1ca
-.section se1cb
-.section se1da
-.section se1db
-.section se1ea
-.section se1eb
-.section se1fa
-.section se1fb
-.section se1ga
-.section se1gb
-.section se1ha
-.section se1hb
-.section se1ia
-.section se1ib
-.section se1ja
-.section se1jb
-.section se1ka
-.section se1kb
-.section se1la
-.section se1lb
-.section se1ma
-.section se1mb
-.section se1na
-.section se1nb
-.section se1oa
-.section se1ob
-.section se1pa
-.section se1pb
-.section se1qa
-.section se1qb
-.section se1ra
-.section se1rb
-.section se1sa
-.section se1sb
-.section se1ta
-.section se1tb
-.section se1ua
-.section se1ub
-.section se1va
-.section se1vb
-.section se1wa
-.section se1wb
-.section se1xa
-.section se1xb
-.section se1ya
-.section se1yb
-.section se1za
-.section se1zb
-.section se11a
-.section se11b
-.section se12a
-.section se12b
-.section se13a
-.section se13b
-.section se14a
-.section se14b
-.section se15a
-.section se15b
-.section se16a
-.section se16b
-.section se17a
-.section se17b
-.section se18a
-.section se18b
-.section se19a
-.section se19b
-.section se10a
-.section se10b
-.section se2aa
-.section se2ab
-.section se2ba
-.section se2bb
-.section se2ca
-.section se2cb
-.section se2da
-.section se2db
-.section se2ea
-.section se2eb
-.section se2fa
-.section se2fb
-.section se2ga
-.section se2gb
-.section se2ha
-.section se2hb
-.section se2ia
-.section se2ib
-.section se2ja
-.section se2jb
-.section se2ka
-.section se2kb
-.section se2la
-.section se2lb
-.section se2ma
-.section se2mb
-.section se2na
-.section se2nb
-.section se2oa
-.section se2ob
-.section se2pa
-.section se2pb
-.section se2qa
-.section se2qb
-.section se2ra
-.section se2rb
-.section se2sa
-.section se2sb
-.section se2ta
-.section se2tb
-.section se2ua
-.section se2ub
-.section se2va
-.section se2vb
-.section se2wa
-.section se2wb
-.section se2xa
-.section se2xb
-.section se2ya
-.section se2yb
-.section se2za
-.section se2zb
-.section se21a
-.section se21b
-.section se22a
-.section se22b
-.section se23a
-.section se23b
-.section se24a
-.section se24b
-.section se25a
-.section se25b
-.section se26a
-.section se26b
-.section se27a
-.section se27b
-.section se28a
-.section se28b
-.section se29a
-.section se29b
-.section se20a
-.section se20b
-.section se3aa
-.section se3ab
-.section se3ba
-.section se3bb
-.section se3ca
-.section se3cb
-.section se3da
-.section se3db
-.section se3ea
-.section se3eb
-.section se3fa
-.section se3fb
-.section se3ga
-.section se3gb
-.section se3ha
-.section se3hb
-.section se3ia
-.section se3ib
-.section se3ja
-.section se3jb
-.section se3ka
-.section se3kb
-.section se3la
-.section se3lb
-.section se3ma
-.section se3mb
-.section se3na
-.section se3nb
-.section se3oa
-.section se3ob
-.section se3pa
-.section se3pb
-.section se3qa
-.section se3qb
-.section se3ra
-.section se3rb
-.section se3sa
-.section se3sb
-.section se3ta
-.section se3tb
-.section se3ua
-.section se3ub
-.section se3va
-.section se3vb
-.section se3wa
-.section se3wb
-.section se3xa
-.section se3xb
-.section se3ya
-.section se3yb
-.section se3za
-.section se3zb
-.section se31a
-.section se31b
-.section se32a
-.section se32b
-.section se33a
-.section se33b
-.section se34a
-.section se34b
-.section se35a
-.section se35b
-.section se36a
-.section se36b
-.section se37a
-.section se37b
-.section se38a
-.section se38b
-.section se39a
-.section se39b
-.section se30a
-.section se30b
-.section se4aa
-.section se4ab
-.section se4ba
-.section se4bb
-.section se4ca
-.section se4cb
-.section se4da
-.section se4db
-.section se4ea
-.section se4eb
-.section se4fa
-.section se4fb
-.section se4ga
-.section se4gb
-.section se4ha
-.section se4hb
-.section se4ia
-.section se4ib
-.section se4ja
-.section se4jb
-.section se4ka
-.section se4kb
-.section se4la
-.section se4lb
-.section se4ma
-.section se4mb
-.section se4na
-.section se4nb
-.section se4oa
-.section se4ob
-.section se4pa
-.section se4pb
-.section se4qa
-.section se4qb
-.section se4ra
-.section se4rb
-.section se4sa
-.section se4sb
-.section se4ta
-.section se4tb
-.section se4ua
-.section se4ub
-.section se4va
-.section se4vb
-.section se4wa
-.section se4wb
-.section se4xa
-.section se4xb
-.section se4ya
-.section se4yb
-.section se4za
-.section se4zb
-.section se41a
-.section se41b
-.section se42a
-.section se42b
-.section se43a
-.section se43b
-.section se44a
-.section se44b
-.section se45a
-.section se45b
-.section se46a
-.section se46b
-.section se47a
-.section se47b
-.section se48a
-.section se48b
-.section se49a
-.section se49b
-.section se40a
-.section se40b
-.section se5aa
-.section se5ab
-.section se5ba
-.section se5bb
-.section se5ca
-.section se5cb
-.section se5da
-.section se5db
-.section se5ea
-.section se5eb
-.section se5fa
-.section se5fb
-.section se5ga
-.section se5gb
-.section se5ha
-.section se5hb
-.section se5ia
-.section se5ib
-.section se5ja
-.section se5jb
-.section se5ka
-.section se5kb
-.section se5la
-.section se5lb
-.section se5ma
-.section se5mb
-.section se5na
-.section se5nb
-.section se5oa
-.section se5ob
-.section se5pa
-.section se5pb
-.section se5qa
-.section se5qb
-.section se5ra
-.section se5rb
-.section se5sa
-.section se5sb
-.section se5ta
-.section se5tb
-.section se5ua
-.section se5ub
-.section se5va
-.section se5vb
-.section se5wa
-.section se5wb
-.section se5xa
-.section se5xb
-.section se5ya
-.section se5yb
-.section se5za
-.section se5zb
-.section se51a
-.section se51b
-.section se52a
-.section se52b
-.section se53a
-.section se53b
-.section se54a
-.section se54b
-.section se55a
-.section se55b
-.section se56a
-.section se56b
-.section se57a
-.section se57b
-.section se58a
-.section se58b
-.section se59a
-.section se59b
-.section se50a
-.section se50b
-.section se6aa
-.section se6ab
-.section se6ba
-.section se6bb
-.section se6ca
-.section se6cb
-.section se6da
-.section se6db
-.section se6ea
-.section se6eb
-.section se6fa
-.section se6fb
-.section se6ga
-.section se6gb
-.section se6ha
-.section se6hb
-.section se6ia
-.section se6ib
-.section se6ja
-.section se6jb
-.section se6ka
-.section se6kb
-.section se6la
-.section se6lb
-.section se6ma
-.section se6mb
-.section se6na
-.section se6nb
-.section se6oa
-.section se6ob
-.section se6pa
-.section se6pb
-.section se6qa
-.section se6qb
-.section se6ra
-.section se6rb
-.section se6sa
-.section se6sb
-.section se6ta
-.section se6tb
-.section se6ua
-.section se6ub
-.section se6va
-.section se6vb
-.section se6wa
-.section se6wb
-.section se6xa
-.section se6xb
-.section se6ya
-.section se6yb
-.section se6za
-.section se6zb
-.section se61a
-.section se61b
-.section se62a
-.section se62b
-.section se63a
-.section se63b
-.section se64a
-.section se64b
-.section se65a
-.section se65b
-.section se66a
-.section se66b
-.section se67a
-.section se67b
-.section se68a
-.section se68b
-.section se69a
-.section se69b
-.section se60a
-.section se60b
-.section se7aa
-.section se7ab
-.section se7ba
-.section se7bb
-.section se7ca
-.section se7cb
-.section se7da
-.section se7db
-.section se7ea
-.section se7eb
-.section se7fa
-.section se7fb
-.section se7ga
-.section se7gb
-.section se7ha
-.section se7hb
-.section se7ia
-.section se7ib
-.section se7ja
-.section se7jb
-.section se7ka
-.section se7kb
-.section se7la
-.section se7lb
-.section se7ma
-.section se7mb
-.section se7na
-.section se7nb
-.section se7oa
-.section se7ob
-.section se7pa
-.section se7pb
-.section se7qa
-.section se7qb
-.section se7ra
-.section se7rb
-.section se7sa
-.section se7sb
-.section se7ta
-.section se7tb
-.section se7ua
-.section se7ub
-.section se7va
-.section se7vb
-.section se7wa
-.section se7wb
-.section se7xa
-.section se7xb
-.section se7ya
-.section se7yb
-.section se7za
-.section se7zb
-.section se71a
-.section se71b
-.section se72a
-.section se72b
-.section se73a
-.section se73b
-.section se74a
-.section se74b
-.section se75a
-.section se75b
-.section se76a
-.section se76b
-.section se77a
-.section se77b
-.section se78a
-.section se78b
-.section se79a
-.section se79b
-.section se70a
-.section se70b
-.section se8aa
-.section se8ab
-.section se8ba
-.section se8bb
-.section se8ca
-.section se8cb
-.section se8da
-.section se8db
-.section se8ea
-.section se8eb
-.section se8fa
-.section se8fb
-.section se8ga
-.section se8gb
-.section se8ha
-.section se8hb
-.section se8ia
-.section se8ib
-.section se8ja
-.section se8jb
-.section se8ka
-.section se8kb
-.section se8la
-.section se8lb
-.section se8ma
-.section se8mb
-.section se8na
-.section se8nb
-.section se8oa
-.section se8ob
-.section se8pa
-.section se8pb
-.section se8qa
-.section se8qb
-.section se8ra
-.section se8rb
-.section se8sa
-.section se8sb
-.section se8ta
-.section se8tb
-.section se8ua
-.section se8ub
-.section se8va
-.section se8vb
-.section se8wa
-.section se8wb
-.section se8xa
-.section se8xb
-.section se8ya
-.section se8yb
-.section se8za
-.section se8zb
-.section se81a
-.section se81b
-.section se82a
-.section se82b
-.section se83a
-.section se83b
-.section se84a
-.section se84b
-.section se85a
-.section se85b
-.section se86a
-.section se86b
-.section se87a
-.section se87b
-.section se88a
-.section se88b
-.section se89a
-.section se89b
-.section se80a
-.section se80b
-.section se9aa
-.section se9ab
-.section se9ba
-.section se9bb
-.section se9ca
-.section se9cb
-.section se9da
-.section se9db
-.section se9ea
-.section se9eb
-.section se9fa
-.section se9fb
-.section se9ga
-.section se9gb
-.section se9ha
-.section se9hb
-.section se9ia
-.section se9ib
-.section se9ja
-.section se9jb
-.section se9ka
-.section se9kb
-.section se9la
-.section se9lb
-.section se9ma
-.section se9mb
-.section se9na
-.section se9nb
-.section se9oa
-.section se9ob
-.section se9pa
-.section se9pb
-.section se9qa
-.section se9qb
-.section se9ra
-.section se9rb
-.section se9sa
-.section se9sb
-.section se9ta
-.section se9tb
-.section se9ua
-.section se9ub
-.section se9va
-.section se9vb
-.section se9wa
-.section se9wb
-.section se9xa
-.section se9xb
-.section se9ya
-.section se9yb
-.section se9za
-.section se9zb
-.section se91a
-.section se91b
-.section se92a
-.section se92b
-.section se93a
-.section se93b
-.section se94a
-.section se94b
-.section se95a
-.section se95b
-.section se96a
-.section se96b
-.section se97a
-.section se97b
-.section se98a
-.section se98b
-.section se99a
-.section se99b
-.section se90a
-.section se90b
-.section se0aa
-.section se0ab
-.section se0ba
-.section se0bb
-.section se0ca
-.section se0cb
-.section se0da
-.section se0db
-.section se0ea
-.section se0eb
-.section se0fa
-.section se0fb
-.section se0ga
-.section se0gb
-.section se0ha
-.section se0hb
-.section se0ia
-.section se0ib
-.section se0ja
-.section se0jb
-.section se0ka
-.section se0kb
-.section se0la
-.section se0lb
-.section se0ma
-.section se0mb
-.section se0na
-.section se0nb
-.section se0oa
-.section se0ob
-.section se0pa
-.section se0pb
-.section se0qa
-.section se0qb
-.section se0ra
-.section se0rb
-.section se0sa
-.section se0sb
-.section se0ta
-.section se0tb
-.section se0ua
-.section se0ub
-.section se0va
-.section se0vb
-.section se0wa
-.section se0wb
-.section se0xa
-.section se0xb
-.section se0ya
-.section se0yb
-.section se0za
-.section se0zb
-.section se01a
-.section se01b
-.section se02a
-.section se02b
-.section se03a
-.section se03b
-.section se04a
-.section se04b
-.section se05a
-.section se05b
-.section se06a
-.section se06b
-.section se07a
-.section se07b
-.section se08a
-.section se08b
-.section se09a
-.section se09b
-.section se00a
-.section se00b
-.section sfaaa
-.section sfaab
-.section sfaba
-.section sfabb
-.section sfaca
-.section sfacb
-.section sfada
-.section sfadb
-.section sfaea
-.section sfaeb
-.section sfafa
-.section sfafb
-.section sfaga
-.section sfagb
-.section sfaha
-.section sfahb
-.section sfaia
-.section sfaib
-.section sfaja
-.section sfajb
-.section sfaka
-.section sfakb
-.section sfala
-.section sfalb
-.section sfama
-.section sfamb
-.section sfana
-.section sfanb
-.section sfaoa
-.section sfaob
-.section sfapa
-.section sfapb
-.section sfaqa
-.section sfaqb
-.section sfara
-.section sfarb
-.section sfasa
-.section sfasb
-.section sfata
-.section sfatb
-.section sfaua
-.section sfaub
-.section sfava
-.section sfavb
-.section sfawa
-.section sfawb
-.section sfaxa
-.section sfaxb
-.section sfaya
-.section sfayb
-.section sfaza
-.section sfazb
-.section sfa1a
-.section sfa1b
-.section sfa2a
-.section sfa2b
-.section sfa3a
-.section sfa3b
-.section sfa4a
-.section sfa4b
-.section sfa5a
-.section sfa5b
-.section sfa6a
-.section sfa6b
-.section sfa7a
-.section sfa7b
-.section sfa8a
-.section sfa8b
-.section sfa9a
-.section sfa9b
-.section sfa0a
-.section sfa0b
-.section sfbaa
-.section sfbab
-.section sfbba
-.section sfbbb
-.section sfbca
-.section sfbcb
-.section sfbda
-.section sfbdb
-.section sfbea
-.section sfbeb
-.section sfbfa
-.section sfbfb
-.section sfbga
-.section sfbgb
-.section sfbha
-.section sfbhb
-.section sfbia
-.section sfbib
-.section sfbja
-.section sfbjb
-.section sfbka
-.section sfbkb
-.section sfbla
-.section sfblb
-.section sfbma
-.section sfbmb
-.section sfbna
-.section sfbnb
-.section sfboa
-.section sfbob
-.section sfbpa
-.section sfbpb
-.section sfbqa
-.section sfbqb
-.section sfbra
-.section sfbrb
-.section sfbsa
-.section sfbsb
-.section sfbta
-.section sfbtb
-.section sfbua
-.section sfbub
-.section sfbva
-.section sfbvb
-.section sfbwa
-.section sfbwb
-.section sfbxa
-.section sfbxb
-.section sfbya
-.section sfbyb
-.section sfbza
-.section sfbzb
-.section sfb1a
-.section sfb1b
-.section sfb2a
-.section sfb2b
-.section sfb3a
-.section sfb3b
-.section sfb4a
-.section sfb4b
-.section sfb5a
-.section sfb5b
-.section sfb6a
-.section sfb6b
-.section sfb7a
-.section sfb7b
-.section sfb8a
-.section sfb8b
-.section sfb9a
-.section sfb9b
-.section sfb0a
-.section sfb0b
-.section sfcaa
-.section sfcab
-.section sfcba
-.section sfcbb
-.section sfcca
-.section sfccb
-.section sfcda
-.section sfcdb
-.section sfcea
-.section sfceb
-.section sfcfa
-.section sfcfb
-.section sfcga
-.section sfcgb
-.section sfcha
-.section sfchb
-.section sfcia
-.section sfcib
-.section sfcja
-.section sfcjb
-.section sfcka
-.section sfckb
-.section sfcla
-.section sfclb
-.section sfcma
-.section sfcmb
-.section sfcna
-.section sfcnb
-.section sfcoa
-.section sfcob
-.section sfcpa
-.section sfcpb
-.section sfcqa
-.section sfcqb
-.section sfcra
-.section sfcrb
-.section sfcsa
-.section sfcsb
-.section sfcta
-.section sfctb
-.section sfcua
-.section sfcub
-.section sfcva
-.section sfcvb
-.section sfcwa
-.section sfcwb
-.section sfcxa
-.section sfcxb
-.section sfcya
-.section sfcyb
-.section sfcza
-.section sfczb
-.section sfc1a
-.section sfc1b
-.section sfc2a
-.section sfc2b
-.section sfc3a
-.section sfc3b
-.section sfc4a
-.section sfc4b
-.section sfc5a
-.section sfc5b
-.section sfc6a
-.section sfc6b
-.section sfc7a
-.section sfc7b
-.section sfc8a
-.section sfc8b
-.section sfc9a
-.section sfc9b
-.section sfc0a
-.section sfc0b
-.section sfdaa
-.section sfdab
-.section sfdba
-.section sfdbb
-.section sfdca
-.section sfdcb
-.section sfdda
-.section sfddb
-.section sfdea
-.section sfdeb
-.section sfdfa
-.section sfdfb
-.section sfdga
-.section sfdgb
-.section sfdha
-.section sfdhb
-.section sfdia
-.section sfdib
-.section sfdja
-.section sfdjb
-.section sfdka
-.section sfdkb
-.section sfdla
-.section sfdlb
-.section sfdma
-.section sfdmb
-.section sfdna
-.section sfdnb
-.section sfdoa
-.section sfdob
-.section sfdpa
-.section sfdpb
-.section sfdqa
-.section sfdqb
-.section sfdra
-.section sfdrb
-.section sfdsa
-.section sfdsb
-.section sfdta
-.section sfdtb
-.section sfdua
-.section sfdub
-.section sfdva
-.section sfdvb
-.section sfdwa
-.section sfdwb
-.section sfdxa
-.section sfdxb
-.section sfdya
-.section sfdyb
-.section sfdza
-.section sfdzb
-.section sfd1a
-.section sfd1b
-.section sfd2a
-.section sfd2b
-.section sfd3a
-.section sfd3b
-.section sfd4a
-.section sfd4b
-.section sfd5a
-.section sfd5b
-.section sfd6a
-.section sfd6b
-.section sfd7a
-.section sfd7b
-.section sfd8a
-.section sfd8b
-.section sfd9a
-.section sfd9b
-.section sfd0a
-.section sfd0b
-.section sfeaa
-.section sfeab
-.section sfeba
-.section sfebb
-.section sfeca
-.section sfecb
-.section sfeda
-.section sfedb
-.section sfeea
-.section sfeeb
-.section sfefa
-.section sfefb
-.section sfega
-.section sfegb
-.section sfeha
-.section sfehb
-.section sfeia
-.section sfeib
-.section sfeja
-.section sfejb
-.section sfeka
-.section sfekb
-.section sfela
-.section sfelb
-.section sfema
-.section sfemb
-.section sfena
-.section sfenb
-.section sfeoa
-.section sfeob
-.section sfepa
-.section sfepb
-.section sfeqa
-.section sfeqb
-.section sfera
-.section sferb
-.section sfesa
-.section sfesb
-.section sfeta
-.section sfetb
-.section sfeua
-.section sfeub
-.section sfeva
-.section sfevb
-.section sfewa
-.section sfewb
-.section sfexa
-.section sfexb
-.section sfeya
-.section sfeyb
-.section sfeza
-.section sfezb
-.section sfe1a
-.section sfe1b
-.section sfe2a
-.section sfe2b
-.section sfe3a
-.section sfe3b
-.section sfe4a
-.section sfe4b
-.section sfe5a
-.section sfe5b
-.section sfe6a
-.section sfe6b
-.section sfe7a
-.section sfe7b
-.section sfe8a
-.section sfe8b
-.section sfe9a
-.section sfe9b
-.section sfe0a
-.section sfe0b
-.section sffaa
-.section sffab
-.section sffba
-.section sffbb
-.section sffca
-.section sffcb
-.section sffda
-.section sffdb
-.section sffea
-.section sffeb
-.section sfffa
-.section sfffb
-.section sffga
-.section sffgb
-.section sffha
-.section sffhb
-.section sffia
-.section sffib
-.section sffja
-.section sffjb
-.section sffka
-.section sffkb
-.section sffla
-.section sfflb
-.section sffma
-.section sffmb
-.section sffna
-.section sffnb
-.section sffoa
-.section sffob
-.section sffpa
-.section sffpb
-.section sffqa
-.section sffqb
-.section sffra
-.section sffrb
-.section sffsa
-.section sffsb
-.section sffta
-.section sfftb
-.section sffua
-.section sffub
-.section sffva
-.section sffvb
-.section sffwa
-.section sffwb
-.section sffxa
-.section sffxb
-.section sffya
-.section sffyb
-.section sffza
-.section sffzb
-.section sff1a
-.section sff1b
-.section sff2a
-.section sff2b
-.section sff3a
-.section sff3b
-.section sff4a
-.section sff4b
-.section sff5a
-.section sff5b
-.section sff6a
-.section sff6b
-.section sff7a
-.section sff7b
-.section sff8a
-.section sff8b
-.section sff9a
-.section sff9b
-.section sff0a
-.section sff0b
-.section sfgaa
-.section sfgab
-.section sfgba
-.section sfgbb
-.section sfgca
-.section sfgcb
-.section sfgda
-.section sfgdb
-.section sfgea
-.section sfgeb
-.section sfgfa
-.section sfgfb
-.section sfgga
-.section sfggb
-.section sfgha
-.section sfghb
-.section sfgia
-.section sfgib
-.section sfgja
-.section sfgjb
-.section sfgka
-.section sfgkb
-.section sfgla
-.section sfglb
-.section sfgma
-.section sfgmb
-.section sfgna
-.section sfgnb
-.section sfgoa
-.section sfgob
-.section sfgpa
-.section sfgpb
-.section sfgqa
-.section sfgqb
-.section sfgra
-.section sfgrb
-.section sfgsa
-.section sfgsb
-.section sfgta
-.section sfgtb
-.section sfgua
-.section sfgub
-.section sfgva
-.section sfgvb
-.section sfgwa
-.section sfgwb
-.section sfgxa
-.section sfgxb
-.section sfgya
-.section sfgyb
-.section sfgza
-.section sfgzb
-.section sfg1a
-.section sfg1b
-.section sfg2a
-.section sfg2b
-.section sfg3a
-.section sfg3b
-.section sfg4a
-.section sfg4b
-.section sfg5a
-.section sfg5b
-.section sfg6a
-.section sfg6b
-.section sfg7a
-.section sfg7b
-.section sfg8a
-.section sfg8b
-.section sfg9a
-.section sfg9b
-.section sfg0a
-.section sfg0b
-.section sfhaa
-.section sfhab
-.section sfhba
-.section sfhbb
-.section sfhca
-.section sfhcb
-.section sfhda
-.section sfhdb
-.section sfhea
-.section sfheb
-.section sfhfa
-.section sfhfb
-.section sfhga
-.section sfhgb
-.section sfhha
-.section sfhhb
-.section sfhia
-.section sfhib
-.section sfhja
-.section sfhjb
-.section sfhka
-.section sfhkb
-.section sfhla
-.section sfhlb
-.section sfhma
-.section sfhmb
-.section sfhna
-.section sfhnb
-.section sfhoa
-.section sfhob
-.section sfhpa
-.section sfhpb
-.section sfhqa
-.section sfhqb
-.section sfhra
-.section sfhrb
-.section sfhsa
-.section sfhsb
-.section sfhta
-.section sfhtb
-.section sfhua
-.section sfhub
-.section sfhva
-.section sfhvb
-.section sfhwa
-.section sfhwb
-.section sfhxa
-.section sfhxb
-.section sfhya
-.section sfhyb
-.section sfhza
-.section sfhzb
-.section sfh1a
-.section sfh1b
-.section sfh2a
-.section sfh2b
-.section sfh3a
-.section sfh3b
-.section sfh4a
-.section sfh4b
-.section sfh5a
-.section sfh5b
-.section sfh6a
-.section sfh6b
-.section sfh7a
-.section sfh7b
-.section sfh8a
-.section sfh8b
-.section sfh9a
-.section sfh9b
-.section sfh0a
-.section sfh0b
-.section sfiaa
-.section sfiab
-.section sfiba
-.section sfibb
-.section sfica
-.section sficb
-.section sfida
-.section sfidb
-.section sfiea
-.section sfieb
-.section sfifa
-.section sfifb
-.section sfiga
-.section sfigb
-.section sfiha
-.section sfihb
-.section sfiia
-.section sfiib
-.section sfija
-.section sfijb
-.section sfika
-.section sfikb
-.section sfila
-.section sfilb
-.section sfima
-.section sfimb
-.section sfina
-.section sfinb
-.section sfioa
-.section sfiob
-.section sfipa
-.section sfipb
-.section sfiqa
-.section sfiqb
-.section sfira
-.section sfirb
-.section sfisa
-.section sfisb
-.section sfita
-.section sfitb
-.section sfiua
-.section sfiub
-.section sfiva
-.section sfivb
-.section sfiwa
-.section sfiwb
-.section sfixa
-.section sfixb
-.section sfiya
-.section sfiyb
-.section sfiza
-.section sfizb
-.section sfi1a
-.section sfi1b
-.section sfi2a
-.section sfi2b
-.section sfi3a
-.section sfi3b
-.section sfi4a
-.section sfi4b
-.section sfi5a
-.section sfi5b
-.section sfi6a
-.section sfi6b
-.section sfi7a
-.section sfi7b
-.section sfi8a
-.section sfi8b
-.section sfi9a
-.section sfi9b
-.section sfi0a
-.section sfi0b
-.section sfjaa
-.section sfjab
-.section sfjba
-.section sfjbb
-.section sfjca
-.section sfjcb
-.section sfjda
-.section sfjdb
-.section sfjea
-.section sfjeb
-.section sfjfa
-.section sfjfb
-.section sfjga
-.section sfjgb
-.section sfjha
-.section sfjhb
-.section sfjia
-.section sfjib
-.section sfjja
-.section sfjjb
-.section sfjka
-.section sfjkb
-.section sfjla
-.section sfjlb
-.section sfjma
-.section sfjmb
-.section sfjna
-.section sfjnb
-.section sfjoa
-.section sfjob
-.section sfjpa
-.section sfjpb
-.section sfjqa
-.section sfjqb
-.section sfjra
-.section sfjrb
-.section sfjsa
-.section sfjsb
-.section sfjta
-.section sfjtb
-.section sfjua
-.section sfjub
-.section sfjva
-.section sfjvb
-.section sfjwa
-.section sfjwb
-.section sfjxa
-.section sfjxb
-.section sfjya
-.section sfjyb
-.section sfjza
-.section sfjzb
-.section sfj1a
-.section sfj1b
-.section sfj2a
-.section sfj2b
-.section sfj3a
-.section sfj3b
-.section sfj4a
-.section sfj4b
-.section sfj5a
-.section sfj5b
-.section sfj6a
-.section sfj6b
-.section sfj7a
-.section sfj7b
-.section sfj8a
-.section sfj8b
-.section sfj9a
-.section sfj9b
-.section sfj0a
-.section sfj0b
-.section sfkaa
-.section sfkab
-.section sfkba
-.section sfkbb
-.section sfkca
-.section sfkcb
-.section sfkda
-.section sfkdb
-.section sfkea
-.section sfkeb
-.section sfkfa
-.section sfkfb
-.section sfkga
-.section sfkgb
-.section sfkha
-.section sfkhb
-.section sfkia
-.section sfkib
-.section sfkja
-.section sfkjb
-.section sfkka
-.section sfkkb
-.section sfkla
-.section sfklb
-.section sfkma
-.section sfkmb
-.section sfkna
-.section sfknb
-.section sfkoa
-.section sfkob
-.section sfkpa
-.section sfkpb
-.section sfkqa
-.section sfkqb
-.section sfkra
-.section sfkrb
-.section sfksa
-.section sfksb
-.section sfkta
-.section sfktb
-.section sfkua
-.section sfkub
-.section sfkva
-.section sfkvb
-.section sfkwa
-.section sfkwb
-.section sfkxa
-.section sfkxb
-.section sfkya
-.section sfkyb
-.section sfkza
-.section sfkzb
-.section sfk1a
-.section sfk1b
-.section sfk2a
-.section sfk2b
-.section sfk3a
-.section sfk3b
-.section sfk4a
-.section sfk4b
-.section sfk5a
-.section sfk5b
-.section sfk6a
-.section sfk6b
-.section sfk7a
-.section sfk7b
-.section sfk8a
-.section sfk8b
-.section sfk9a
-.section sfk9b
-.section sfk0a
-.section sfk0b
-.section sflaa
-.section sflab
-.section sflba
-.section sflbb
-.section sflca
-.section sflcb
-.section sflda
-.section sfldb
-.section sflea
-.section sfleb
-.section sflfa
-.section sflfb
-.section sflga
-.section sflgb
-.section sflha
-.section sflhb
-.section sflia
-.section sflib
-.section sflja
-.section sfljb
-.section sflka
-.section sflkb
-.section sflla
-.section sfllb
-.section sflma
-.section sflmb
-.section sflna
-.section sflnb
-.section sfloa
-.section sflob
-.section sflpa
-.section sflpb
-.section sflqa
-.section sflqb
-.section sflra
-.section sflrb
-.section sflsa
-.section sflsb
-.section sflta
-.section sfltb
-.section sflua
-.section sflub
-.section sflva
-.section sflvb
-.section sflwa
-.section sflwb
-.section sflxa
-.section sflxb
-.section sflya
-.section sflyb
-.section sflza
-.section sflzb
-.section sfl1a
-.section sfl1b
-.section sfl2a
-.section sfl2b
-.section sfl3a
-.section sfl3b
-.section sfl4a
-.section sfl4b
-.section sfl5a
-.section sfl5b
-.section sfl6a
-.section sfl6b
-.section sfl7a
-.section sfl7b
-.section sfl8a
-.section sfl8b
-.section sfl9a
-.section sfl9b
-.section sfl0a
-.section sfl0b
-.section sfmaa
-.section sfmab
-.section sfmba
-.section sfmbb
-.section sfmca
-.section sfmcb
-.section sfmda
-.section sfmdb
-.section sfmea
-.section sfmeb
-.section sfmfa
-.section sfmfb
-.section sfmga
-.section sfmgb
-.section sfmha
-.section sfmhb
-.section sfmia
-.section sfmib
-.section sfmja
-.section sfmjb
-.section sfmka
-.section sfmkb
-.section sfmla
-.section sfmlb
-.section sfmma
-.section sfmmb
-.section sfmna
-.section sfmnb
-.section sfmoa
-.section sfmob
-.section sfmpa
-.section sfmpb
-.section sfmqa
-.section sfmqb
-.section sfmra
-.section sfmrb
-.section sfmsa
-.section sfmsb
-.section sfmta
-.section sfmtb
-.section sfmua
-.section sfmub
-.section sfmva
-.section sfmvb
-.section sfmwa
-.section sfmwb
-.section sfmxa
-.section sfmxb
-.section sfmya
-.section sfmyb
-.section sfmza
-.section sfmzb
-.section sfm1a
-.section sfm1b
-.section sfm2a
-.section sfm2b
-.section sfm3a
-.section sfm3b
-.section sfm4a
-.section sfm4b
-.section sfm5a
-.section sfm5b
-.section sfm6a
-.section sfm6b
-.section sfm7a
-.section sfm7b
-.section sfm8a
-.section sfm8b
-.section sfm9a
-.section sfm9b
-.section sfm0a
-.section sfm0b
-.section sfnaa
-.section sfnab
-.section sfnba
-.section sfnbb
-.section sfnca
-.section sfncb
-.section sfnda
-.section sfndb
-.section sfnea
-.section sfneb
-.section sfnfa
-.section sfnfb
-.section sfnga
-.section sfngb
-.section sfnha
-.section sfnhb
-.section sfnia
-.section sfnib
-.section sfnja
-.section sfnjb
-.section sfnka
-.section sfnkb
-.section sfnla
-.section sfnlb
-.section sfnma
-.section sfnmb
-.section sfnna
-.section sfnnb
-.section sfnoa
-.section sfnob
-.section sfnpa
-.section sfnpb
-.section sfnqa
-.section sfnqb
-.section sfnra
-.section sfnrb
-.section sfnsa
-.section sfnsb
-.section sfnta
-.section sfntb
-.section sfnua
-.section sfnub
-.section sfnva
-.section sfnvb
-.section sfnwa
-.section sfnwb
-.section sfnxa
-.section sfnxb
-.section sfnya
-.section sfnyb
-.section sfnza
-.section sfnzb
-.section sfn1a
-.section sfn1b
-.section sfn2a
-.section sfn2b
-.section sfn3a
-.section sfn3b
-.section sfn4a
-.section sfn4b
-.section sfn5a
-.section sfn5b
-.section sfn6a
-.section sfn6b
-.section sfn7a
-.section sfn7b
-.section sfn8a
-.section sfn8b
-.section sfn9a
-.section sfn9b
-.section sfn0a
-.section sfn0b
-.section sfoaa
-.section sfoab
-.section sfoba
-.section sfobb
-.section sfoca
-.section sfocb
-.section sfoda
-.section sfodb
-.section sfoea
-.section sfoeb
-.section sfofa
-.section sfofb
-.section sfoga
-.section sfogb
-.section sfoha
-.section sfohb
-.section sfoia
-.section sfoib
-.section sfoja
-.section sfojb
-.section sfoka
-.section sfokb
-.section sfola
-.section sfolb
-.section sfoma
-.section sfomb
-.section sfona
-.section sfonb
-.section sfooa
-.section sfoob
-.section sfopa
-.section sfopb
-.section sfoqa
-.section sfoqb
-.section sfora
-.section sforb
-.section sfosa
-.section sfosb
-.section sfota
-.section sfotb
-.section sfoua
-.section sfoub
-.section sfova
-.section sfovb
-.section sfowa
-.section sfowb
-.section sfoxa
-.section sfoxb
-.section sfoya
-.section sfoyb
-.section sfoza
-.section sfozb
-.section sfo1a
-.section sfo1b
-.section sfo2a
-.section sfo2b
-.section sfo3a
-.section sfo3b
-.section sfo4a
-.section sfo4b
-.section sfo5a
-.section sfo5b
-.section sfo6a
-.section sfo6b
-.section sfo7a
-.section sfo7b
-.section sfo8a
-.section sfo8b
-.section sfo9a
-.section sfo9b
-.section sfo0a
-.section sfo0b
-.section sfpaa
-.section sfpab
-.section sfpba
-.section sfpbb
-.section sfpca
-.section sfpcb
-.section sfpda
-.section sfpdb
-.section sfpea
-.section sfpeb
-.section sfpfa
-.section sfpfb
-.section sfpga
-.section sfpgb
-.section sfpha
-.section sfphb
-.section sfpia
-.section sfpib
-.section sfpja
-.section sfpjb
-.section sfpka
-.section sfpkb
-.section sfpla
-.section sfplb
-.section sfpma
-.section sfpmb
-.section sfpna
-.section sfpnb
-.section sfpoa
-.section sfpob
-.section sfppa
-.section sfppb
-.section sfpqa
-.section sfpqb
-.section sfpra
-.section sfprb
-.section sfpsa
-.section sfpsb
-.section sfpta
-.section sfptb
-.section sfpua
-.section sfpub
-.section sfpva
-.section sfpvb
-.section sfpwa
-.section sfpwb
-.section sfpxa
-.section sfpxb
-.section sfpya
-.section sfpyb
-.section sfpza
-.section sfpzb
-.section sfp1a
-.section sfp1b
-.section sfp2a
-.section sfp2b
-.section sfp3a
-.section sfp3b
-.section sfp4a
-.section sfp4b
-.section sfp5a
-.section sfp5b
-.section sfp6a
-.section sfp6b
-.section sfp7a
-.section sfp7b
-.section sfp8a
-.section sfp8b
-.section sfp9a
-.section sfp9b
-.section sfp0a
-.section sfp0b
-.section sfqaa
-.section sfqab
-.section sfqba
-.section sfqbb
-.section sfqca
-.section sfqcb
-.section sfqda
-.section sfqdb
-.section sfqea
-.section sfqeb
-.section sfqfa
-.section sfqfb
-.section sfqga
-.section sfqgb
-.section sfqha
-.section sfqhb
-.section sfqia
-.section sfqib
-.section sfqja
-.section sfqjb
-.section sfqka
-.section sfqkb
-.section sfqla
-.section sfqlb
-.section sfqma
-.section sfqmb
-.section sfqna
-.section sfqnb
-.section sfqoa
-.section sfqob
-.section sfqpa
-.section sfqpb
-.section sfqqa
-.section sfqqb
-.section sfqra
-.section sfqrb
-.section sfqsa
-.section sfqsb
-.section sfqta
-.section sfqtb
-.section sfqua
-.section sfqub
-.section sfqva
-.section sfqvb
-.section sfqwa
-.section sfqwb
-.section sfqxa
-.section sfqxb
-.section sfqya
-.section sfqyb
-.section sfqza
-.section sfqzb
-.section sfq1a
-.section sfq1b
-.section sfq2a
-.section sfq2b
-.section sfq3a
-.section sfq3b
-.section sfq4a
-.section sfq4b
-.section sfq5a
-.section sfq5b
-.section sfq6a
-.section sfq6b
-.section sfq7a
-.section sfq7b
-.section sfq8a
-.section sfq8b
-.section sfq9a
-.section sfq9b
-.section sfq0a
-.section sfq0b
-.section sfraa
-.section sfrab
-.section sfrba
-.section sfrbb
-.section sfrca
-.section sfrcb
-.section sfrda
-.section sfrdb
-.section sfrea
-.section sfreb
-.section sfrfa
-.section sfrfb
-.section sfrga
-.section sfrgb
-.section sfrha
-.section sfrhb
-.section sfria
-.section sfrib
-.section sfrja
-.section sfrjb
-.section sfrka
-.section sfrkb
-.section sfrla
-.section sfrlb
-.section sfrma
-.section sfrmb
-.section sfrna
-.section sfrnb
-.section sfroa
-.section sfrob
-.section sfrpa
-.section sfrpb
-.section sfrqa
-.section sfrqb
-.section sfrra
-.section sfrrb
-.section sfrsa
-.section sfrsb
-.section sfrta
-.section sfrtb
-.section sfrua
-.section sfrub
-.section sfrva
-.section sfrvb
-.section sfrwa
-.section sfrwb
-.section sfrxa
-.section sfrxb
-.section sfrya
-.section sfryb
-.section sfrza
-.section sfrzb
-.section sfr1a
-.section sfr1b
-.section sfr2a
-.section sfr2b
-.section sfr3a
-.section sfr3b
-.section sfr4a
-.section sfr4b
-.section sfr5a
-.section sfr5b
-.section sfr6a
-.section sfr6b
-.section sfr7a
-.section sfr7b
-.section sfr8a
-.section sfr8b
-.section sfr9a
-.section sfr9b
-.section sfr0a
-.section sfr0b
-.section sfsaa
-.section sfsab
-.section sfsba
-.section sfsbb
-.section sfsca
-.section sfscb
-.section sfsda
-.section sfsdb
-.section sfsea
-.section sfseb
-.section sfsfa
-.section sfsfb
-.section sfsga
-.section sfsgb
-.section sfsha
-.section sfshb
-.section sfsia
-.section sfsib
-.section sfsja
-.section sfsjb
-.section sfska
-.section sfskb
-.section sfsla
-.section sfslb
-.section sfsma
-.section sfsmb
-.section sfsna
-.section sfsnb
-.section sfsoa
-.section sfsob
-.section sfspa
-.section sfspb
-.section sfsqa
-.section sfsqb
-.section sfsra
-.section sfsrb
-.section sfssa
-.section sfssb
-.section sfsta
-.section sfstb
-.section sfsua
-.section sfsub
-.section sfsva
-.section sfsvb
-.section sfswa
-.section sfswb
-.section sfsxa
-.section sfsxb
-.section sfsya
-.section sfsyb
-.section sfsza
-.section sfszb
-.section sfs1a
-.section sfs1b
-.section sfs2a
-.section sfs2b
-.section sfs3a
-.section sfs3b
-.section sfs4a
-.section sfs4b
-.section sfs5a
-.section sfs5b
-.section sfs6a
-.section sfs6b
-.section sfs7a
-.section sfs7b
-.section sfs8a
-.section sfs8b
-.section sfs9a
-.section sfs9b
-.section sfs0a
-.section sfs0b
-.section sftaa
-.section sftab
-.section sftba
-.section sftbb
-.section sftca
-.section sftcb
-.section sftda
-.section sftdb
-.section sftea
-.section sfteb
-.section sftfa
-.section sftfb
-.section sftga
-.section sftgb
-.section sftha
-.section sfthb
-.section sftia
-.section sftib
-.section sftja
-.section sftjb
-.section sftka
-.section sftkb
-.section sftla
-.section sftlb
-.section sftma
-.section sftmb
-.section sftna
-.section sftnb
-.section sftoa
-.section sftob
-.section sftpa
-.section sftpb
-.section sftqa
-.section sftqb
-.section sftra
-.section sftrb
-.section sftsa
-.section sftsb
-.section sftta
-.section sfttb
-.section sftua
-.section sftub
-.section sftva
-.section sftvb
-.section sftwa
-.section sftwb
-.section sftxa
-.section sftxb
-.section sftya
-.section sftyb
-.section sftza
-.section sftzb
-.section sft1a
-.section sft1b
-.section sft2a
-.section sft2b
-.section sft3a
-.section sft3b
-.section sft4a
-.section sft4b
-.section sft5a
-.section sft5b
-.section sft6a
-.section sft6b
-.section sft7a
-.section sft7b
-.section sft8a
-.section sft8b
-.section sft9a
-.section sft9b
-.section sft0a
-.section sft0b
-.section sfuaa
-.section sfuab
-.section sfuba
-.section sfubb
-.section sfuca
-.section sfucb
-.section sfuda
-.section sfudb
-.section sfuea
-.section sfueb
-.section sfufa
-.section sfufb
-.section sfuga
-.section sfugb
-.section sfuha
-.section sfuhb
-.section sfuia
-.section sfuib
-.section sfuja
-.section sfujb
-.section sfuka
-.section sfukb
-.section sfula
-.section sfulb
-.section sfuma
-.section sfumb
-.section sfuna
-.section sfunb
-.section sfuoa
-.section sfuob
-.section sfupa
-.section sfupb
-.section sfuqa
-.section sfuqb
-.section sfura
-.section sfurb
-.section sfusa
-.section sfusb
-.section sfuta
-.section sfutb
-.section sfuua
-.section sfuub
-.section sfuva
-.section sfuvb
-.section sfuwa
-.section sfuwb
-.section sfuxa
-.section sfuxb
-.section sfuya
-.section sfuyb
-.section sfuza
-.section sfuzb
-.section sfu1a
-.section sfu1b
-.section sfu2a
-.section sfu2b
-.section sfu3a
-.section sfu3b
-.section sfu4a
-.section sfu4b
-.section sfu5a
-.section sfu5b
-.section sfu6a
-.section sfu6b
-.section sfu7a
-.section sfu7b
-.section sfu8a
-.section sfu8b
-.section sfu9a
-.section sfu9b
-.section sfu0a
-.section sfu0b
-.section sfvaa
-.section sfvab
-.section sfvba
-.section sfvbb
-.section sfvca
-.section sfvcb
-.section sfvda
-.section sfvdb
-.section sfvea
-.section sfveb
-.section sfvfa
-.section sfvfb
-.section sfvga
-.section sfvgb
-.section sfvha
-.section sfvhb
-.section sfvia
-.section sfvib
-.section sfvja
-.section sfvjb
-.section sfvka
-.section sfvkb
-.section sfvla
-.section sfvlb
-.section sfvma
-.section sfvmb
-.section sfvna
-.section sfvnb
-.section sfvoa
-.section sfvob
-.section sfvpa
-.section sfvpb
-.section sfvqa
-.section sfvqb
-.section sfvra
-.section sfvrb
-.section sfvsa
-.section sfvsb
-.section sfvta
-.section sfvtb
-.section sfvua
-.section sfvub
-.section sfvva
-.section sfvvb
-.section sfvwa
-.section sfvwb
-.section sfvxa
-.section sfvxb
-.section sfvya
-.section sfvyb
-.section sfvza
-.section sfvzb
-.section sfv1a
-.section sfv1b
-.section sfv2a
-.section sfv2b
-.section sfv3a
-.section sfv3b
-.section sfv4a
-.section sfv4b
-.section sfv5a
-.section sfv5b
-.section sfv6a
-.section sfv6b
-.section sfv7a
-.section sfv7b
-.section sfv8a
-.section sfv8b
-.section sfv9a
-.section sfv9b
-.section sfv0a
-.section sfv0b
-.section sfwaa
-.section sfwab
-.section sfwba
-.section sfwbb
-.section sfwca
-.section sfwcb
-.section sfwda
-.section sfwdb
-.section sfwea
-.section sfweb
-.section sfwfa
-.section sfwfb
-.section sfwga
-.section sfwgb
-.section sfwha
-.section sfwhb
-.section sfwia
-.section sfwib
-.section sfwja
-.section sfwjb
-.section sfwka
-.section sfwkb
-.section sfwla
-.section sfwlb
-.section sfwma
-.section sfwmb
-.section sfwna
-.section sfwnb
-.section sfwoa
-.section sfwob
-.section sfwpa
-.section sfwpb
-.section sfwqa
-.section sfwqb
-.section sfwra
-.section sfwrb
-.section sfwsa
-.section sfwsb
-.section sfwta
-.section sfwtb
-.section sfwua
-.section sfwub
-.section sfwva
-.section sfwvb
-.section sfwwa
-.section sfwwb
-.section sfwxa
-.section sfwxb
-.section sfwya
-.section sfwyb
-.section sfwza
-.section sfwzb
-.section sfw1a
-.section sfw1b
-.section sfw2a
-.section sfw2b
-.section sfw3a
-.section sfw3b
-.section sfw4a
-.section sfw4b
-.section sfw5a
-.section sfw5b
-.section sfw6a
-.section sfw6b
-.section sfw7a
-.section sfw7b
-.section sfw8a
-.section sfw8b
-.section sfw9a
-.section sfw9b
-.section sfw0a
-.section sfw0b
-.section sfxaa
-.section sfxab
-.section sfxba
-.section sfxbb
-.section sfxca
-.section sfxcb
-.section sfxda
-.section sfxdb
-.section sfxea
-.section sfxeb
-.section sfxfa
-.section sfxfb
-.section sfxga
-.section sfxgb
-.section sfxha
-.section sfxhb
-.section sfxia
-.section sfxib
-.section sfxja
-.section sfxjb
-.section sfxka
-.section sfxkb
-.section sfxla
-.section sfxlb
-.section sfxma
-.section sfxmb
-.section sfxna
-.section sfxnb
-.section sfxoa
-.section sfxob
-.section sfxpa
-.section sfxpb
-.section sfxqa
-.section sfxqb
-.section sfxra
-.section sfxrb
-.section sfxsa
-.section sfxsb
-.section sfxta
-.section sfxtb
-.section sfxua
-.section sfxub
-.section sfxva
-.section sfxvb
-.section sfxwa
-.section sfxwb
-.section sfxxa
-.section sfxxb
-.section sfxya
-.section sfxyb
-.section sfxza
-.section sfxzb
-.section sfx1a
-.section sfx1b
-.section sfx2a
-.section sfx2b
-.section sfx3a
-.section sfx3b
-.section sfx4a
-.section sfx4b
-.section sfx5a
-.section sfx5b
-.section sfx6a
-.section sfx6b
-.section sfx7a
-.section sfx7b
-.section sfx8a
-.section sfx8b
-.section sfx9a
-.section sfx9b
-.section sfx0a
-.section sfx0b
-.section sfyaa
-.section sfyab
-.section sfyba
-.section sfybb
-.section sfyca
-.section sfycb
-.section sfyda
-.section sfydb
-.section sfyea
-.section sfyeb
-.section sfyfa
-.section sfyfb
-.section sfyga
-.section sfygb
-.section sfyha
-.section sfyhb
-.section sfyia
-.section sfyib
-.section sfyja
-.section sfyjb
-.section sfyka
-.section sfykb
-.section sfyla
-.section sfylb
-.section sfyma
-.section sfymb
-.section sfyna
-.section sfynb
-.section sfyoa
-.section sfyob
-.section sfypa
-.section sfypb
-.section sfyqa
-.section sfyqb
-.section sfyra
-.section sfyrb
-.section sfysa
-.section sfysb
-.section sfyta
-.section sfytb
-.section sfyua
-.section sfyub
-.section sfyva
-.section sfyvb
-.section sfywa
-.section sfywb
-.section sfyxa
-.section sfyxb
-.section sfyya
-.section sfyyb
-.section sfyza
-.section sfyzb
-.section sfy1a
-.section sfy1b
-.section sfy2a
-.section sfy2b
-.section sfy3a
-.section sfy3b
-.section sfy4a
-.section sfy4b
-.section sfy5a
-.section sfy5b
-.section sfy6a
-.section sfy6b
-.section sfy7a
-.section sfy7b
-.section sfy8a
-.section sfy8b
-.section sfy9a
-.section sfy9b
-.section sfy0a
-.section sfy0b
-.section sfzaa
-.section sfzab
-.section sfzba
-.section sfzbb
-.section sfzca
-.section sfzcb
-.section sfzda
-.section sfzdb
-.section sfzea
-.section sfzeb
-.section sfzfa
-.section sfzfb
-.section sfzga
-.section sfzgb
-.section sfzha
-.section sfzhb
-.section sfzia
-.section sfzib
-.section sfzja
-.section sfzjb
-.section sfzka
-.section sfzkb
-.section sfzla
-.section sfzlb
-.section sfzma
-.section sfzmb
-.section sfzna
-.section sfznb
-.section sfzoa
-.section sfzob
-.section sfzpa
-.section sfzpb
-.section sfzqa
-.section sfzqb
-.section sfzra
-.section sfzrb
-.section sfzsa
-.section sfzsb
-.section sfzta
-.section sfztb
-.section sfzua
-.section sfzub
-.section sfzva
-.section sfzvb
-.section sfzwa
-.section sfzwb
-.section sfzxa
-.section sfzxb
-.section sfzya
-.section sfzyb
-.section sfzza
-.section sfzzb
-.section sfz1a
-.section sfz1b
-.section sfz2a
-.section sfz2b
-.section sfz3a
-.section sfz3b
-.section sfz4a
-.section sfz4b
-.section sfz5a
-.section sfz5b
-.section sfz6a
-.section sfz6b
-.section sfz7a
-.section sfz7b
-.section sfz8a
-.section sfz8b
-.section sfz9a
-.section sfz9b
-.section sfz0a
-.section sfz0b
-.section sf1aa
-.section sf1ab
-.section sf1ba
-.section sf1bb
-.section sf1ca
-.section sf1cb
-.section sf1da
-.section sf1db
-.section sf1ea
-.section sf1eb
-.section sf1fa
-.section sf1fb
-.section sf1ga
-.section sf1gb
-.section sf1ha
-.section sf1hb
-.section sf1ia
-.section sf1ib
-.section sf1ja
-.section sf1jb
-.section sf1ka
-.section sf1kb
-.section sf1la
-.section sf1lb
-.section sf1ma
-.section sf1mb
-.section sf1na
-.section sf1nb
-.section sf1oa
-.section sf1ob
-.section sf1pa
-.section sf1pb
-.section sf1qa
-.section sf1qb
-.section sf1ra
-.section sf1rb
-.section sf1sa
-.section sf1sb
-.section sf1ta
-.section sf1tb
-.section sf1ua
-.section sf1ub
-.section sf1va
-.section sf1vb
-.section sf1wa
-.section sf1wb
-.section sf1xa
-.section sf1xb
-.section sf1ya
-.section sf1yb
-.section sf1za
-.section sf1zb
-.section sf11a
-.section sf11b
-.section sf12a
-.section sf12b
-.section sf13a
-.section sf13b
-.section sf14a
-.section sf14b
-.section sf15a
-.section sf15b
-.section sf16a
-.section sf16b
-.section sf17a
-.section sf17b
-.section sf18a
-.section sf18b
-.section sf19a
-.section sf19b
-.section sf10a
-.section sf10b
-.section sf2aa
-.section sf2ab
-.section sf2ba
-.section sf2bb
-.section sf2ca
-.section sf2cb
-.section sf2da
-.section sf2db
-.section sf2ea
-.section sf2eb
-.section sf2fa
-.section sf2fb
-.section sf2ga
-.section sf2gb
-.section sf2ha
-.section sf2hb
-.section sf2ia
-.section sf2ib
-.section sf2ja
-.section sf2jb
-.section sf2ka
-.section sf2kb
-.section sf2la
-.section sf2lb
-.section sf2ma
-.section sf2mb
-.section sf2na
-.section sf2nb
-.section sf2oa
-.section sf2ob
-.section sf2pa
-.section sf2pb
-.section sf2qa
-.section sf2qb
-.section sf2ra
-.section sf2rb
-.section sf2sa
-.section sf2sb
-.section sf2ta
-.section sf2tb
-.section sf2ua
-.section sf2ub
-.section sf2va
-.section sf2vb
-.section sf2wa
-.section sf2wb
-.section sf2xa
-.section sf2xb
-.section sf2ya
-.section sf2yb
-.section sf2za
-.section sf2zb
-.section sf21a
-.section sf21b
-.section sf22a
-.section sf22b
-.section sf23a
-.section sf23b
-.section sf24a
-.section sf24b
-.section sf25a
-.section sf25b
-.section sf26a
-.section sf26b
-.section sf27a
-.section sf27b
-.section sf28a
-.section sf28b
-.section sf29a
-.section sf29b
-.section sf20a
-.section sf20b
-.section sf3aa
-.section sf3ab
-.section sf3ba
-.section sf3bb
-.section sf3ca
-.section sf3cb
-.section sf3da
-.section sf3db
-.section sf3ea
-.section sf3eb
-.section sf3fa
-.section sf3fb
-.section sf3ga
-.section sf3gb
-.section sf3ha
-.section sf3hb
-.section sf3ia
-.section sf3ib
-.section sf3ja
-.section sf3jb
-.section sf3ka
-.section sf3kb
-.section sf3la
-.section sf3lb
-.section sf3ma
-.section sf3mb
-.section sf3na
-.section sf3nb
-.section sf3oa
-.section sf3ob
-.section sf3pa
-.section sf3pb
-.section sf3qa
-.section sf3qb
-.section sf3ra
-.section sf3rb
-.section sf3sa
-.section sf3sb
-.section sf3ta
-.section sf3tb
-.section sf3ua
-.section sf3ub
-.section sf3va
-.section sf3vb
-.section sf3wa
-.section sf3wb
-.section sf3xa
-.section sf3xb
-.section sf3ya
-.section sf3yb
-.section sf3za
-.section sf3zb
-.section sf31a
-.section sf31b
-.section sf32a
-.section sf32b
-.section sf33a
-.section sf33b
-.section sf34a
-.section sf34b
-.section sf35a
-.section sf35b
-.section sf36a
-.section sf36b
-.section sf37a
-.section sf37b
-.section sf38a
-.section sf38b
-.section sf39a
-.section sf39b
-.section sf30a
-.section sf30b
-.section sf4aa
-.section sf4ab
-.section sf4ba
-.section sf4bb
-.section sf4ca
-.section sf4cb
-.section sf4da
-.section sf4db
-.section sf4ea
-.section sf4eb
-.section sf4fa
-.section sf4fb
-.section sf4ga
-.section sf4gb
-.section sf4ha
-.section sf4hb
-.section sf4ia
-.section sf4ib
-.section sf4ja
-.section sf4jb
-.section sf4ka
-.section sf4kb
-.section sf4la
-.section sf4lb
-.section sf4ma
-.section sf4mb
-.section sf4na
-.section sf4nb
-.section sf4oa
-.section sf4ob
-.section sf4pa
-.section sf4pb
-.section sf4qa
-.section sf4qb
-.section sf4ra
-.section sf4rb
-.section sf4sa
-.section sf4sb
-.section sf4ta
-.section sf4tb
-.section sf4ua
-.section sf4ub
-.section sf4va
-.section sf4vb
-.section sf4wa
-.section sf4wb
-.section sf4xa
-.section sf4xb
-.section sf4ya
-.section sf4yb
-.section sf4za
-.section sf4zb
-.section sf41a
-.section sf41b
-.section sf42a
-.section sf42b
-.section sf43a
-.section sf43b
-.section sf44a
-.section sf44b
-.section sf45a
-.section sf45b
-.section sf46a
-.section sf46b
-.section sf47a
-.section sf47b
-.section sf48a
-.section sf48b
-.section sf49a
-.section sf49b
-.section sf40a
-.section sf40b
-.section sf5aa
-.section sf5ab
-.section sf5ba
-.section sf5bb
-.section sf5ca
-.section sf5cb
-.section sf5da
-.section sf5db
-.section sf5ea
-.section sf5eb
-.section sf5fa
-.section sf5fb
-.section sf5ga
-.section sf5gb
-.section sf5ha
-.section sf5hb
-.section sf5ia
-.section sf5ib
-.section sf5ja
-.section sf5jb
-.section sf5ka
-.section sf5kb
-.section sf5la
-.section sf5lb
-.section sf5ma
-.section sf5mb
-.section sf5na
-.section sf5nb
-.section sf5oa
-.section sf5ob
-.section sf5pa
-.section sf5pb
-.section sf5qa
-.section sf5qb
-.section sf5ra
-.section sf5rb
-.section sf5sa
-.section sf5sb
-.section sf5ta
-.section sf5tb
-.section sf5ua
-.section sf5ub
-.section sf5va
-.section sf5vb
-.section sf5wa
-.section sf5wb
-.section sf5xa
-.section sf5xb
-.section sf5ya
-.section sf5yb
-.section sf5za
-.section sf5zb
-.section sf51a
-.section sf51b
-.section sf52a
-.section sf52b
-.section sf53a
-.section sf53b
-.section sf54a
-.section sf54b
-.section sf55a
-.section sf55b
-.section sf56a
-.section sf56b
-.section sf57a
-.section sf57b
-.section sf58a
-.section sf58b
-.section sf59a
-.section sf59b
-.section sf50a
-.section sf50b
-.section sf6aa
-.section sf6ab
-.section sf6ba
-.section sf6bb
-.section sf6ca
-.section sf6cb
-.section sf6da
-.section sf6db
-.section sf6ea
-.section sf6eb
-.section sf6fa
-.section sf6fb
-.section sf6ga
-.section sf6gb
-.section sf6ha
-.section sf6hb
-.section sf6ia
-.section sf6ib
-.section sf6ja
-.section sf6jb
-.section sf6ka
-.section sf6kb
-.section sf6la
-.section sf6lb
-.section sf6ma
-.section sf6mb
-.section sf6na
-.section sf6nb
-.section sf6oa
-.section sf6ob
-.section sf6pa
-.section sf6pb
-.section sf6qa
-.section sf6qb
-.section sf6ra
-.section sf6rb
-.section sf6sa
-.section sf6sb
-.section sf6ta
-.section sf6tb
-.section sf6ua
-.section sf6ub
-.section sf6va
-.section sf6vb
-.section sf6wa
-.section sf6wb
-.section sf6xa
-.section sf6xb
-.section sf6ya
-.section sf6yb
-.section sf6za
-.section sf6zb
-.section sf61a
-.section sf61b
-.section sf62a
-.section sf62b
-.section sf63a
-.section sf63b
-.section sf64a
-.section sf64b
-.section sf65a
-.section sf65b
-.section sf66a
-.section sf66b
-.section sf67a
-.section sf67b
-.section sf68a
-.section sf68b
-.section sf69a
-.section sf69b
-.section sf60a
-.section sf60b
-.section sf7aa
-.section sf7ab
-.section sf7ba
-.section sf7bb
-.section sf7ca
-.section sf7cb
-.section sf7da
-.section sf7db
-.section sf7ea
-.section sf7eb
-.section sf7fa
-.section sf7fb
-.section sf7ga
-.section sf7gb
-.section sf7ha
-.section sf7hb
-.section sf7ia
-.section sf7ib
-.section sf7ja
-.section sf7jb
-.section sf7ka
-.section sf7kb
-.section sf7la
-.section sf7lb
-.section sf7ma
-.section sf7mb
-.section sf7na
-.section sf7nb
-.section sf7oa
-.section sf7ob
-.section sf7pa
-.section sf7pb
-.section sf7qa
-.section sf7qb
-.section sf7ra
-.section sf7rb
-.section sf7sa
-.section sf7sb
-.section sf7ta
-.section sf7tb
-.section sf7ua
-.section sf7ub
-.section sf7va
-.section sf7vb
-.section sf7wa
-.section sf7wb
-.section sf7xa
-.section sf7xb
-.section sf7ya
-.section sf7yb
-.section sf7za
-.section sf7zb
-.section sf71a
-.section sf71b
-.section sf72a
-.section sf72b
-.section sf73a
-.section sf73b
-.section sf74a
-.section sf74b
-.section sf75a
-.section sf75b
-.section sf76a
-.section sf76b
-.section sf77a
-.section sf77b
-.section sf78a
-.section sf78b
-.section sf79a
-.section sf79b
-.section sf70a
-.section sf70b
-.section sf8aa
-.section sf8ab
-.section sf8ba
-.section sf8bb
-.section sf8ca
-.section sf8cb
-.section sf8da
-.section sf8db
-.section sf8ea
-.section sf8eb
-.section sf8fa
-.section sf8fb
-.section sf8ga
-.section sf8gb
-.section sf8ha
-.section sf8hb
-.section sf8ia
-.section sf8ib
-.section sf8ja
-.section sf8jb
-.section sf8ka
-.section sf8kb
-.section sf8la
-.section sf8lb
-.section sf8ma
-.section sf8mb
-.section sf8na
-.section sf8nb
-.section sf8oa
-.section sf8ob
-.section sf8pa
-.section sf8pb
-.section sf8qa
-.section sf8qb
-.section sf8ra
-.section sf8rb
-.section sf8sa
-.section sf8sb
-.section sf8ta
-.section sf8tb
-.section sf8ua
-.section sf8ub
-.section sf8va
-.section sf8vb
-.section sf8wa
-.section sf8wb
-.section sf8xa
-.section sf8xb
-.section sf8ya
-.section sf8yb
-.section sf8za
-.section sf8zb
-.section sf81a
-.section sf81b
-.section sf82a
-.section sf82b
-.section sf83a
-.section sf83b
-.section sf84a
-.section sf84b
-.section sf85a
-.section sf85b
-.section sf86a
-.section sf86b
-.section sf87a
-.section sf87b
-.section sf88a
-.section sf88b
-.section sf89a
-.section sf89b
-.section sf80a
-.section sf80b
-.section sf9aa
-.section sf9ab
-.section sf9ba
-.section sf9bb
-.section sf9ca
-.section sf9cb
-.section sf9da
-.section sf9db
-.section sf9ea
-.section sf9eb
-.section sf9fa
-.section sf9fb
-.section sf9ga
-.section sf9gb
-.section sf9ha
-.section sf9hb
-.section sf9ia
-.section sf9ib
-.section sf9ja
-.section sf9jb
-.section sf9ka
-.section sf9kb
-.section sf9la
-.section sf9lb
-.section sf9ma
-.section sf9mb
-.section sf9na
-.section sf9nb
-.section sf9oa
-.section sf9ob
-.section sf9pa
-.section sf9pb
-.section sf9qa
-.section sf9qb
-.section sf9ra
-.section sf9rb
-.section sf9sa
-.section sf9sb
-.section sf9ta
-.section sf9tb
-.section sf9ua
-.section sf9ub
-.section sf9va
-.section sf9vb
-.section sf9wa
-.section sf9wb
-.section sf9xa
-.section sf9xb
-.section sf9ya
-.section sf9yb
-.section sf9za
-.section sf9zb
-.section sf91a
-.section sf91b
-.section sf92a
-.section sf92b
-.section sf93a
-.section sf93b
-.section sf94a
-.section sf94b
-.section sf95a
-.section sf95b
-.section sf96a
-.section sf96b
-.section sf97a
-.section sf97b
-.section sf98a
-.section sf98b
-.section sf99a
-.section sf99b
-.section sf90a
-.section sf90b
-.section sf0aa
-.section sf0ab
-.section sf0ba
-.section sf0bb
-.section sf0ca
-.section sf0cb
-.section sf0da
-.section sf0db
-.section sf0ea
-.section sf0eb
-.section sf0fa
-.section sf0fb
-.section sf0ga
-.section sf0gb
-.section sf0ha
-.section sf0hb
-.section sf0ia
-.section sf0ib
-.section sf0ja
-.section sf0jb
-.section sf0ka
-.section sf0kb
-.section sf0la
-.section sf0lb
-.section sf0ma
-.section sf0mb
-.section sf0na
-.section sf0nb
-.section sf0oa
-.section sf0ob
-.section sf0pa
-.section sf0pb
-.section sf0qa
-.section sf0qb
-.section sf0ra
-.section sf0rb
-.section sf0sa
-.section sf0sb
-.section sf0ta
-.section sf0tb
-.section sf0ua
-.section sf0ub
-.section sf0va
-.section sf0vb
-.section sf0wa
-.section sf0wb
-.section sf0xa
-.section sf0xb
-.section sf0ya
-.section sf0yb
-.section sf0za
-.section sf0zb
-.section sf01a
-.section sf01b
-.section sf02a
-.section sf02b
-.section sf03a
-.section sf03b
-.section sf04a
-.section sf04b
-.section sf05a
-.section sf05b
-.section sf06a
-.section sf06b
-.section sf07a
-.section sf07b
-.section sf08a
-.section sf08b
-.section sf09a
-.section sf09b
-.section sf00a
-.section sf00b
-.section sgaaa
-.section sgaab
-.section sgaba
-.section sgabb
-.section sgaca
-.section sgacb
-.section sgada
-.section sgadb
-.section sgaea
-.section sgaeb
-.section sgafa
-.section sgafb
-.section sgaga
-.section sgagb
-.section sgaha
-.section sgahb
-.section sgaia
-.section sgaib
-.section sgaja
-.section sgajb
-.section sgaka
-.section sgakb
-.section sgala
-.section sgalb
-.section sgama
-.section sgamb
-.section sgana
-.section sganb
-.section sgaoa
-.section sgaob
-.section sgapa
-.section sgapb
-.section sgaqa
-.section sgaqb
-.section sgara
-.section sgarb
-.section sgasa
-.section sgasb
-.section sgata
-.section sgatb
-.section sgaua
-.section sgaub
-.section sgava
-.section sgavb
-.section sgawa
-.section sgawb
-.section sgaxa
-.section sgaxb
-.section sgaya
-.section sgayb
-.section sgaza
-.section sgazb
-.section sga1a
-.section sga1b
-.section sga2a
-.section sga2b
-.section sga3a
-.section sga3b
-.section sga4a
-.section sga4b
-.section sga5a
-.section sga5b
-.section sga6a
-.section sga6b
-.section sga7a
-.section sga7b
-.section sga8a
-.section sga8b
-.section sga9a
-.section sga9b
-.section sga0a
-.section sga0b
-.section sgbaa
-.section sgbab
-.section sgbba
-.section sgbbb
-.section sgbca
-.section sgbcb
-.section sgbda
-.section sgbdb
-.section sgbea
-.section sgbeb
-.section sgbfa
-.section sgbfb
-.section sgbga
-.section sgbgb
-.section sgbha
-.section sgbhb
-.section sgbia
-.section sgbib
-.section sgbja
-.section sgbjb
-.section sgbka
-.section sgbkb
-.section sgbla
-.section sgblb
-.section sgbma
-.section sgbmb
-.section sgbna
-.section sgbnb
-.section sgboa
-.section sgbob
-.section sgbpa
-.section sgbpb
-.section sgbqa
-.section sgbqb
-.section sgbra
-.section sgbrb
-.section sgbsa
-.section sgbsb
-.section sgbta
-.section sgbtb
-.section sgbua
-.section sgbub
-.section sgbva
-.section sgbvb
-.section sgbwa
-.section sgbwb
-.section sgbxa
-.section sgbxb
-.section sgbya
-.section sgbyb
-.section sgbza
-.section sgbzb
-.section sgb1a
-.section sgb1b
-.section sgb2a
-.section sgb2b
-.section sgb3a
-.section sgb3b
-.section sgb4a
-.section sgb4b
-.section sgb5a
-.section sgb5b
-.section sgb6a
-.section sgb6b
-.section sgb7a
-.section sgb7b
-.section sgb8a
-.section sgb8b
-.section sgb9a
-.section sgb9b
-.section sgb0a
-.section sgb0b
-.section sgcaa
-.section sgcab
-.section sgcba
-.section sgcbb
-.section sgcca
-.section sgccb
-.section sgcda
-.section sgcdb
-.section sgcea
-.section sgceb
-.section sgcfa
-.section sgcfb
-.section sgcga
-.section sgcgb
-.section sgcha
-.section sgchb
-.section sgcia
-.section sgcib
-.section sgcja
-.section sgcjb
-.section sgcka
-.section sgckb
-.section sgcla
-.section sgclb
-.section sgcma
-.section sgcmb
-.section sgcna
-.section sgcnb
-.section sgcoa
-.section sgcob
-.section sgcpa
-.section sgcpb
-.section sgcqa
-.section sgcqb
-.section sgcra
-.section sgcrb
-.section sgcsa
-.section sgcsb
-.section sgcta
-.section sgctb
-.section sgcua
-.section sgcub
-.section sgcva
-.section sgcvb
-.section sgcwa
-.section sgcwb
-.section sgcxa
-.section sgcxb
-.section sgcya
-.section sgcyb
-.section sgcza
-.section sgczb
-.section sgc1a
-.section sgc1b
-.section sgc2a
-.section sgc2b
-.section sgc3a
-.section sgc3b
-.section sgc4a
-.section sgc4b
-.section sgc5a
-.section sgc5b
-.section sgc6a
-.section sgc6b
-.section sgc7a
-.section sgc7b
-.section sgc8a
-.section sgc8b
-.section sgc9a
-.section sgc9b
-.section sgc0a
-.section sgc0b
-.section sgdaa
-.section sgdab
-.section sgdba
-.section sgdbb
-.section sgdca
-.section sgdcb
-.section sgdda
-.section sgddb
-.section sgdea
-.section sgdeb
-.section sgdfa
-.section sgdfb
-.section sgdga
-.section sgdgb
-.section sgdha
-.section sgdhb
-.section sgdia
-.section sgdib
-.section sgdja
-.section sgdjb
-.section sgdka
-.section sgdkb
-.section sgdla
-.section sgdlb
-.section sgdma
-.section sgdmb
-.section sgdna
-.section sgdnb
-.section sgdoa
-.section sgdob
-.section sgdpa
-.section sgdpb
-.section sgdqa
-.section sgdqb
-.section sgdra
-.section sgdrb
-.section sgdsa
-.section sgdsb
-.section sgdta
-.section sgdtb
-.section sgdua
-.section sgdub
-.section sgdva
-.section sgdvb
-.section sgdwa
-.section sgdwb
-.section sgdxa
-.section sgdxb
-.section sgdya
-.section sgdyb
-.section sgdza
-.section sgdzb
-.section sgd1a
-.section sgd1b
-.section sgd2a
-.section sgd2b
-.section sgd3a
-.section sgd3b
-.section sgd4a
-.section sgd4b
-.section sgd5a
-.section sgd5b
-.section sgd6a
-.section sgd6b
-.section sgd7a
-.section sgd7b
-.section sgd8a
-.section sgd8b
-.section sgd9a
-.section sgd9b
-.section sgd0a
-.section sgd0b
-.section sgeaa
-.section sgeab
-.section sgeba
-.section sgebb
-.section sgeca
-.section sgecb
-.section sgeda
-.section sgedb
-.section sgeea
-.section sgeeb
-.section sgefa
-.section sgefb
-.section sgega
-.section sgegb
-.section sgeha
-.section sgehb
-.section sgeia
-.section sgeib
-.section sgeja
-.section sgejb
-.section sgeka
-.section sgekb
-.section sgela
-.section sgelb
-.section sgema
-.section sgemb
-.section sgena
-.section sgenb
-.section sgeoa
-.section sgeob
-.section sgepa
-.section sgepb
-.section sgeqa
-.section sgeqb
-.section sgera
-.section sgerb
-.section sgesa
-.section sgesb
-.section sgeta
-.section sgetb
-.section sgeua
-.section sgeub
-.section sgeva
-.section sgevb
-.section sgewa
-.section sgewb
-.section sgexa
-.section sgexb
-.section sgeya
-.section sgeyb
-.section sgeza
-.section sgezb
-.section sge1a
-.section sge1b
-.section sge2a
-.section sge2b
-.section sge3a
-.section sge3b
-.section sge4a
-.section sge4b
-.section sge5a
-.section sge5b
-.section sge6a
-.section sge6b
-.section sge7a
-.section sge7b
-.section sge8a
-.section sge8b
-.section sge9a
-.section sge9b
-.section sge0a
-.section sge0b
-.section sgfaa
-.section sgfab
-.section sgfba
-.section sgfbb
-.section sgfca
-.section sgfcb
-.section sgfda
-.section sgfdb
-.section sgfea
-.section sgfeb
-.section sgffa
-.section sgffb
-.section sgfga
-.section sgfgb
-.section sgfha
-.section sgfhb
-.section sgfia
-.section sgfib
-.section sgfja
-.section sgfjb
-.section sgfka
-.section sgfkb
-.section sgfla
-.section sgflb
-.section sgfma
-.section sgfmb
-.section sgfna
-.section sgfnb
-.section sgfoa
-.section sgfob
-.section sgfpa
-.section sgfpb
-.section sgfqa
-.section sgfqb
-.section sgfra
-.section sgfrb
-.section sgfsa
-.section sgfsb
-.section sgfta
-.section sgftb
-.section sgfua
-.section sgfub
-.section sgfva
-.section sgfvb
-.section sgfwa
-.section sgfwb
-.section sgfxa
-.section sgfxb
-.section sgfya
-.section sgfyb
-.section sgfza
-.section sgfzb
-.section sgf1a
-.section sgf1b
-.section sgf2a
-.section sgf2b
-.section sgf3a
-.section sgf3b
-.section sgf4a
-.section sgf4b
-.section sgf5a
-.section sgf5b
-.section sgf6a
-.section sgf6b
-.section sgf7a
-.section sgf7b
-.section sgf8a
-.section sgf8b
-.section sgf9a
-.section sgf9b
-.section sgf0a
-.section sgf0b
-.section sggaa
-.section sggab
-.section sggba
-.section sggbb
-.section sggca
-.section sggcb
-.section sggda
-.section sggdb
-.section sggea
-.section sggeb
-.section sggfa
-.section sggfb
-.section sggga
-.section sgggb
-.section sggha
-.section sgghb
-.section sggia
-.section sggib
-.section sggja
-.section sggjb
-.section sggka
-.section sggkb
-.section sggla
-.section sgglb
-.section sggma
-.section sggmb
-.section sggna
-.section sggnb
-.section sggoa
-.section sggob
-.section sggpa
-.section sggpb
-.section sggqa
-.section sggqb
-.section sggra
-.section sggrb
-.section sggsa
-.section sggsb
-.section sggta
-.section sggtb
-.section sggua
-.section sggub
-.section sggva
-.section sggvb
-.section sggwa
-.section sggwb
-.section sggxa
-.section sggxb
-.section sggya
-.section sggyb
-.section sggza
-.section sggzb
-.section sgg1a
-.section sgg1b
-.section sgg2a
-.section sgg2b
-.section sgg3a
-.section sgg3b
-.section sgg4a
-.section sgg4b
-.section sgg5a
-.section sgg5b
-.section sgg6a
-.section sgg6b
-.section sgg7a
-.section sgg7b
-.section sgg8a
-.section sgg8b
-.section sgg9a
-.section sgg9b
-.section sgg0a
-.section sgg0b
-.section sghaa
-.section sghab
-.section sghba
-.section sghbb
-.section sghca
-.section sghcb
-.section sghda
-.section sghdb
-.section sghea
-.section sgheb
-.section sghfa
-.section sghfb
-.section sghga
-.section sghgb
-.section sghha
-.section sghhb
-.section sghia
-.section sghib
-.section sghja
-.section sghjb
-.section sghka
-.section sghkb
-.section sghla
-.section sghlb
-.section sghma
-.section sghmb
-.section sghna
-.section sghnb
-.section sghoa
-.section sghob
-.section sghpa
-.section sghpb
-.section sghqa
-.section sghqb
-.section sghra
-.section sghrb
-.section sghsa
-.section sghsb
-.section sghta
-.section sghtb
-.section sghua
-.section sghub
-.section sghva
-.section sghvb
-.section sghwa
-.section sghwb
-.section sghxa
-.section sghxb
-.section sghya
-.section sghyb
-.section sghza
-.section sghzb
-.section sgh1a
-.section sgh1b
-.section sgh2a
-.section sgh2b
-.section sgh3a
-.section sgh3b
-.section sgh4a
-.section sgh4b
-.section sgh5a
-.section sgh5b
-.section sgh6a
-.section sgh6b
-.section sgh7a
-.section sgh7b
-.section sgh8a
-.section sgh8b
-.section sgh9a
-.section sgh9b
-.section sgh0a
-.section sgh0b
-.section sgiaa
-.section sgiab
-.section sgiba
-.section sgibb
-.section sgica
-.section sgicb
-.section sgida
-.section sgidb
-.section sgiea
-.section sgieb
-.section sgifa
-.section sgifb
-.section sgiga
-.section sgigb
-.section sgiha
-.section sgihb
-.section sgiia
-.section sgiib
-.section sgija
-.section sgijb
-.section sgika
-.section sgikb
-.section sgila
-.section sgilb
-.section sgima
-.section sgimb
-.section sgina
-.section sginb
-.section sgioa
-.section sgiob
-.section sgipa
-.section sgipb
-.section sgiqa
-.section sgiqb
-.section sgira
-.section sgirb
-.section sgisa
-.section sgisb
-.section sgita
-.section sgitb
-.section sgiua
-.section sgiub
-.section sgiva
-.section sgivb
-.section sgiwa
-.section sgiwb
-.section sgixa
-.section sgixb
-.section sgiya
-.section sgiyb
-.section sgiza
-.section sgizb
-.section sgi1a
-.section sgi1b
-.section sgi2a
-.section sgi2b
-.section sgi3a
-.section sgi3b
-.section sgi4a
-.section sgi4b
-.section sgi5a
-.section sgi5b
-.section sgi6a
-.section sgi6b
-.section sgi7a
-.section sgi7b
-.section sgi8a
-.section sgi8b
-.section sgi9a
-.section sgi9b
-.section sgi0a
-.section sgi0b
-.section sgjaa
-.section sgjab
-.section sgjba
-.section sgjbb
-.section sgjca
-.section sgjcb
-.section sgjda
-.section sgjdb
-.section sgjea
-.section sgjeb
-.section sgjfa
-.section sgjfb
-.section sgjga
-.section sgjgb
-.section sgjha
-.section sgjhb
-.section sgjia
-.section sgjib
-.section sgjja
-.section sgjjb
-.section sgjka
-.section sgjkb
-.section sgjla
-.section sgjlb
-.section sgjma
-.section sgjmb
-.section sgjna
-.section sgjnb
-.section sgjoa
-.section sgjob
-.section sgjpa
-.section sgjpb
-.section sgjqa
-.section sgjqb
-.section sgjra
-.section sgjrb
-.section sgjsa
-.section sgjsb
-.section sgjta
-.section sgjtb
-.section sgjua
-.section sgjub
-.section sgjva
-.section sgjvb
-.section sgjwa
-.section sgjwb
-.section sgjxa
-.section sgjxb
-.section sgjya
-.section sgjyb
-.section sgjza
-.section sgjzb
-.section sgj1a
-.section sgj1b
-.section sgj2a
-.section sgj2b
-.section sgj3a
-.section sgj3b
-.section sgj4a
-.section sgj4b
-.section sgj5a
-.section sgj5b
-.section sgj6a
-.section sgj6b
-.section sgj7a
-.section sgj7b
-.section sgj8a
-.section sgj8b
-.section sgj9a
-.section sgj9b
-.section sgj0a
-.section sgj0b
-.section sgkaa
-.section sgkab
-.section sgkba
-.section sgkbb
-.section sgkca
-.section sgkcb
-.section sgkda
-.section sgkdb
-.section sgkea
-.section sgkeb
-.section sgkfa
-.section sgkfb
-.section sgkga
-.section sgkgb
-.section sgkha
-.section sgkhb
-.section sgkia
-.section sgkib
-.section sgkja
-.section sgkjb
-.section sgkka
-.section sgkkb
-.section sgkla
-.section sgklb
-.section sgkma
-.section sgkmb
-.section sgkna
-.section sgknb
-.section sgkoa
-.section sgkob
-.section sgkpa
-.section sgkpb
-.section sgkqa
-.section sgkqb
-.section sgkra
-.section sgkrb
-.section sgksa
-.section sgksb
-.section sgkta
-.section sgktb
-.section sgkua
-.section sgkub
-.section sgkva
-.section sgkvb
-.section sgkwa
-.section sgkwb
-.section sgkxa
-.section sgkxb
-.section sgkya
-.section sgkyb
-.section sgkza
-.section sgkzb
-.section sgk1a
-.section sgk1b
-.section sgk2a
-.section sgk2b
-.section sgk3a
-.section sgk3b
-.section sgk4a
-.section sgk4b
-.section sgk5a
-.section sgk5b
-.section sgk6a
-.section sgk6b
-.section sgk7a
-.section sgk7b
-.section sgk8a
-.section sgk8b
-.section sgk9a
-.section sgk9b
-.section sgk0a
-.section sgk0b
-.section sglaa
-.section sglab
-.section sglba
-.section sglbb
-.section sglca
-.section sglcb
-.section sglda
-.section sgldb
-.section sglea
-.section sgleb
-.section sglfa
-.section sglfb
-.section sglga
-.section sglgb
-.section sglha
-.section sglhb
-.section sglia
-.section sglib
-.section sglja
-.section sgljb
-.section sglka
-.section sglkb
-.section sglla
-.section sgllb
-.section sglma
-.section sglmb
-.section sglna
-.section sglnb
-.section sgloa
-.section sglob
-.section sglpa
-.section sglpb
-.section sglqa
-.section sglqb
-.section sglra
-.section sglrb
-.section sglsa
-.section sglsb
-.section sglta
-.section sgltb
-.section sglua
-.section sglub
-.section sglva
-.section sglvb
-.section sglwa
-.section sglwb
-.section sglxa
-.section sglxb
-.section sglya
-.section sglyb
-.section sglza
-.section sglzb
-.section sgl1a
-.section sgl1b
-.section sgl2a
-.section sgl2b
-.section sgl3a
-.section sgl3b
-.section sgl4a
-.section sgl4b
-.section sgl5a
-.section sgl5b
-.section sgl6a
-.section sgl6b
-.section sgl7a
-.section sgl7b
-.section sgl8a
-.section sgl8b
-.section sgl9a
-.section sgl9b
-.section sgl0a
-.section sgl0b
-.section sgmaa
-.section sgmab
-.section sgmba
-.section sgmbb
-.section sgmca
-.section sgmcb
-.section sgmda
-.section sgmdb
-.section sgmea
-.section sgmeb
-.section sgmfa
-.section sgmfb
-.section sgmga
-.section sgmgb
-.section sgmha
-.section sgmhb
-.section sgmia
-.section sgmib
-.section sgmja
-.section sgmjb
-.section sgmka
-.section sgmkb
-.section sgmla
-.section sgmlb
-.section sgmma
-.section sgmmb
-.section sgmna
-.section sgmnb
-.section sgmoa
-.section sgmob
-.section sgmpa
-.section sgmpb
-.section sgmqa
-.section sgmqb
-.section sgmra
-.section sgmrb
-.section sgmsa
-.section sgmsb
-.section sgmta
-.section sgmtb
-.section sgmua
-.section sgmub
-.section sgmva
-.section sgmvb
-.section sgmwa
-.section sgmwb
-.section sgmxa
-.section sgmxb
-.section sgmya
-.section sgmyb
-.section sgmza
-.section sgmzb
-.section sgm1a
-.section sgm1b
-.section sgm2a
-.section sgm2b
-.section sgm3a
-.section sgm3b
-.section sgm4a
-.section sgm4b
-.section sgm5a
-.section sgm5b
-.section sgm6a
-.section sgm6b
-.section sgm7a
-.section sgm7b
-.section sgm8a
-.section sgm8b
-.section sgm9a
-.section sgm9b
-.section sgm0a
-.section sgm0b
-.section sgnaa
-.section sgnab
-.section sgnba
-.section sgnbb
-.section sgnca
-.section sgncb
-.section sgnda
-.section sgndb
-.section sgnea
-.section sgneb
-.section sgnfa
-.section sgnfb
-.section sgnga
-.section sgngb
-.section sgnha
-.section sgnhb
-.section sgnia
-.section sgnib
-.section sgnja
-.section sgnjb
-.section sgnka
-.section sgnkb
-.section sgnla
-.section sgnlb
-.section sgnma
-.section sgnmb
-.section sgnna
-.section sgnnb
-.section sgnoa
-.section sgnob
-.section sgnpa
-.section sgnpb
-.section sgnqa
-.section sgnqb
-.section sgnra
-.section sgnrb
-.section sgnsa
-.section sgnsb
-.section sgnta
-.section sgntb
-.section sgnua
-.section sgnub
-.section sgnva
-.section sgnvb
-.section sgnwa
-.section sgnwb
-.section sgnxa
-.section sgnxb
-.section sgnya
-.section sgnyb
-.section sgnza
-.section sgnzb
-.section sgn1a
-.section sgn1b
-.section sgn2a
-.section sgn2b
-.section sgn3a
-.section sgn3b
-.section sgn4a
-.section sgn4b
-.section sgn5a
-.section sgn5b
-.section sgn6a
-.section sgn6b
-.section sgn7a
-.section sgn7b
-.section sgn8a
-.section sgn8b
-.section sgn9a
-.section sgn9b
-.section sgn0a
-.section sgn0b
-.section sgoaa
-.section sgoab
-.section sgoba
-.section sgobb
-.section sgoca
-.section sgocb
-.section sgoda
-.section sgodb
-.section sgoea
-.section sgoeb
-.section sgofa
-.section sgofb
-.section sgoga
-.section sgogb
-.section sgoha
-.section sgohb
-.section sgoia
-.section sgoib
-.section sgoja
-.section sgojb
-.section sgoka
-.section sgokb
-.section sgola
-.section sgolb
-.section sgoma
-.section sgomb
-.section sgona
-.section sgonb
-.section sgooa
-.section sgoob
-.section sgopa
-.section sgopb
-.section sgoqa
-.section sgoqb
-.section sgora
-.section sgorb
-.section sgosa
-.section sgosb
-.section sgota
-.section sgotb
-.section sgoua
-.section sgoub
-.section sgova
-.section sgovb
-.section sgowa
-.section sgowb
-.section sgoxa
-.section sgoxb
-.section sgoya
-.section sgoyb
-.section sgoza
-.section sgozb
-.section sgo1a
-.section sgo1b
-.section sgo2a
-.section sgo2b
-.section sgo3a
-.section sgo3b
-.section sgo4a
-.section sgo4b
-.section sgo5a
-.section sgo5b
-.section sgo6a
-.section sgo6b
-.section sgo7a
-.section sgo7b
-.section sgo8a
-.section sgo8b
-.section sgo9a
-.section sgo9b
-.section sgo0a
-.section sgo0b
-.section sgpaa
-.section sgpab
-.section sgpba
-.section sgpbb
-.section sgpca
-.section sgpcb
-.section sgpda
-.section sgpdb
-.section sgpea
-.section sgpeb
-.section sgpfa
-.section sgpfb
-.section sgpga
-.section sgpgb
-.section sgpha
-.section sgphb
-.section sgpia
-.section sgpib
-.section sgpja
-.section sgpjb
-.section sgpka
-.section sgpkb
-.section sgpla
-.section sgplb
-.section sgpma
-.section sgpmb
-.section sgpna
-.section sgpnb
-.section sgpoa
-.section sgpob
-.section sgppa
-.section sgppb
-.section sgpqa
-.section sgpqb
-.section sgpra
-.section sgprb
-.section sgpsa
-.section sgpsb
-.section sgpta
-.section sgptb
-.section sgpua
-.section sgpub
-.section sgpva
-.section sgpvb
-.section sgpwa
-.section sgpwb
-.section sgpxa
-.section sgpxb
-.section sgpya
-.section sgpyb
-.section sgpza
-.section sgpzb
-.section sgp1a
-.section sgp1b
-.section sgp2a
-.section sgp2b
-.section sgp3a
-.section sgp3b
-.section sgp4a
-.section sgp4b
-.section sgp5a
-.section sgp5b
-.section sgp6a
-.section sgp6b
-.section sgp7a
-.section sgp7b
-.section sgp8a
-.section sgp8b
-.section sgp9a
-.section sgp9b
-.section sgp0a
-.section sgp0b
-.section sgqaa
-.section sgqab
-.section sgqba
-.section sgqbb
-.section sgqca
-.section sgqcb
-.section sgqda
-.section sgqdb
-.section sgqea
-.section sgqeb
-.section sgqfa
-.section sgqfb
-.section sgqga
-.section sgqgb
-.section sgqha
-.section sgqhb
-.section sgqia
-.section sgqib
-.section sgqja
-.section sgqjb
-.section sgqka
-.section sgqkb
-.section sgqla
-.section sgqlb
-.section sgqma
-.section sgqmb
-.section sgqna
-.section sgqnb
-.section sgqoa
-.section sgqob
-.section sgqpa
-.section sgqpb
-.section sgqqa
-.section sgqqb
-.section sgqra
-.section sgqrb
-.section sgqsa
-.section sgqsb
-.section sgqta
-.section sgqtb
-.section sgqua
-.section sgqub
-.section sgqva
-.section sgqvb
-.section sgqwa
-.section sgqwb
-.section sgqxa
-.section sgqxb
-.section sgqya
-.section sgqyb
-.section sgqza
-.section sgqzb
-.section sgq1a
-.section sgq1b
-.section sgq2a
-.section sgq2b
-.section sgq3a
-.section sgq3b
-.section sgq4a
-.section sgq4b
-.section sgq5a
-.section sgq5b
-.section sgq6a
-.section sgq6b
-.section sgq7a
-.section sgq7b
-.section sgq8a
-.section sgq8b
-.section sgq9a
-.section sgq9b
-.section sgq0a
-.section sgq0b
-.section sgraa
-.section sgrab
-.section sgrba
-.section sgrbb
-.section sgrca
-.section sgrcb
-.section sgrda
-.section sgrdb
-.section sgrea
-.section sgreb
-.section sgrfa
-.section sgrfb
-.section sgrga
-.section sgrgb
-.section sgrha
-.section sgrhb
-.section sgria
-.section sgrib
-.section sgrja
-.section sgrjb
-.section sgrka
-.section sgrkb
-.section sgrla
-.section sgrlb
-.section sgrma
-.section sgrmb
-.section sgrna
-.section sgrnb
-.section sgroa
-.section sgrob
-.section sgrpa
-.section sgrpb
-.section sgrqa
-.section sgrqb
-.section sgrra
-.section sgrrb
-.section sgrsa
-.section sgrsb
-.section sgrta
-.section sgrtb
-.section sgrua
-.section sgrub
-.section sgrva
-.section sgrvb
-.section sgrwa
-.section sgrwb
-.section sgrxa
-.section sgrxb
-.section sgrya
-.section sgryb
-.section sgrza
-.section sgrzb
-.section sgr1a
-.section sgr1b
-.section sgr2a
-.section sgr2b
-.section sgr3a
-.section sgr3b
-.section sgr4a
-.section sgr4b
-.section sgr5a
-.section sgr5b
-.section sgr6a
-.section sgr6b
-.section sgr7a
-.section sgr7b
-.section sgr8a
-.section sgr8b
-.section sgr9a
-.section sgr9b
-.section sgr0a
-.section sgr0b
-.section sgsaa
-.section sgsab
-.section sgsba
-.section sgsbb
-.section sgsca
-.section sgscb
-.section sgsda
-.section sgsdb
-.section sgsea
-.section sgseb
-.section sgsfa
-.section sgsfb
-.section sgsga
-.section sgsgb
-.section sgsha
-.section sgshb
-.section sgsia
-.section sgsib
-.section sgsja
-.section sgsjb
-.section sgska
-.section sgskb
-.section sgsla
-.section sgslb
-.section sgsma
-.section sgsmb
-.section sgsna
-.section sgsnb
-.section sgsoa
-.section sgsob
-.section sgspa
-.section sgspb
-.section sgsqa
-.section sgsqb
-.section sgsra
-.section sgsrb
-.section sgssa
-.section sgssb
-.section sgsta
-.section sgstb
-.section sgsua
-.section sgsub
-.section sgsva
-.section sgsvb
-.section sgswa
-.section sgswb
-.section sgsxa
-.section sgsxb
-.section sgsya
-.section sgsyb
-.section sgsza
-.section sgszb
-.section sgs1a
-.section sgs1b
-.section sgs2a
-.section sgs2b
-.section sgs3a
-.section sgs3b
-.section sgs4a
-.section sgs4b
-.section sgs5a
-.section sgs5b
-.section sgs6a
-.section sgs6b
-.section sgs7a
-.section sgs7b
-.section sgs8a
-.section sgs8b
-.section sgs9a
-.section sgs9b
-.section sgs0a
-.section sgs0b
-.section sgtaa
-.section sgtab
-.section sgtba
-.section sgtbb
-.section sgtca
-.section sgtcb
-.section sgtda
-.section sgtdb
-.section sgtea
-.section sgteb
-.section sgtfa
-.section sgtfb
-.section sgtga
-.section sgtgb
-.section sgtha
-.section sgthb
-.section sgtia
-.section sgtib
-.section sgtja
-.section sgtjb
-.section sgtka
-.section sgtkb
-.section sgtla
-.section sgtlb
-.section sgtma
-.section sgtmb
-.section sgtna
-.section sgtnb
-.section sgtoa
-.section sgtob
-.section sgtpa
-.section sgtpb
-.section sgtqa
-.section sgtqb
-.section sgtra
-.section sgtrb
-.section sgtsa
-.section sgtsb
-.section sgtta
-.section sgttb
-.section sgtua
-.section sgtub
-.section sgtva
-.section sgtvb
-.section sgtwa
-.section sgtwb
-.section sgtxa
-.section sgtxb
-.section sgtya
-.section sgtyb
-.section sgtza
-.section sgtzb
-.section sgt1a
-.section sgt1b
-.section sgt2a
-.section sgt2b
-.section sgt3a
-.section sgt3b
-.section sgt4a
-.section sgt4b
-.section sgt5a
-.section sgt5b
-.section sgt6a
-.section sgt6b
-.section sgt7a
-.section sgt7b
-.section sgt8a
-.section sgt8b
-.section sgt9a
-.section sgt9b
-.section sgt0a
-.section sgt0b
-.section sguaa
-.section sguab
-.section sguba
-.section sgubb
-.section sguca
-.section sgucb
-.section sguda
-.section sgudb
-.section sguea
-.section sgueb
-.section sgufa
-.section sgufb
-.section sguga
-.section sgugb
-.section sguha
-.section sguhb
-.section sguia
-.section sguib
-.section sguja
-.section sgujb
-.section sguka
-.section sgukb
-.section sgula
-.section sgulb
-.section sguma
-.section sgumb
-.section sguna
-.section sgunb
-.section sguoa
-.section sguob
-.section sgupa
-.section sgupb
-.section sguqa
-.section sguqb
-.section sgura
-.section sgurb
-.section sgusa
-.section sgusb
-.section sguta
-.section sgutb
-.section sguua
-.section sguub
-.section sguva
-.section sguvb
-.section sguwa
-.section sguwb
-.section sguxa
-.section sguxb
-.section sguya
-.section sguyb
-.section sguza
-.section sguzb
-.section sgu1a
-.section sgu1b
-.section sgu2a
-.section sgu2b
-.section sgu3a
-.section sgu3b
-.section sgu4a
-.section sgu4b
-.section sgu5a
-.section sgu5b
-.section sgu6a
-.section sgu6b
-.section sgu7a
-.section sgu7b
-.section sgu8a
-.section sgu8b
-.section sgu9a
-.section sgu9b
-.section sgu0a
-.section sgu0b
-.section sgvaa
-.section sgvab
-.section sgvba
-.section sgvbb
-.section sgvca
-.section sgvcb
-.section sgvda
-.section sgvdb
-.section sgvea
-.section sgveb
-.section sgvfa
-.section sgvfb
-.section sgvga
-.section sgvgb
-.section sgvha
-.section sgvhb
-.section sgvia
-.section sgvib
-.section sgvja
-.section sgvjb
-.section sgvka
-.section sgvkb
-.section sgvla
-.section sgvlb
-.section sgvma
-.section sgvmb
-.section sgvna
-.section sgvnb
-.section sgvoa
-.section sgvob
-.section sgvpa
-.section sgvpb
-.section sgvqa
-.section sgvqb
-.section sgvra
-.section sgvrb
-.section sgvsa
-.section sgvsb
-.section sgvta
-.section sgvtb
-.section sgvua
-.section sgvub
-.section sgvva
-.section sgvvb
-.section sgvwa
-.section sgvwb
-.section sgvxa
-.section sgvxb
-.section sgvya
-.section sgvyb
-.section sgvza
-.section sgvzb
-.section sgv1a
-.section sgv1b
-.section sgv2a
-.section sgv2b
-.section sgv3a
-.section sgv3b
-.section sgv4a
-.section sgv4b
-.section sgv5a
-.section sgv5b
-.section sgv6a
-.section sgv6b
-.section sgv7a
-.section sgv7b
-.section sgv8a
-.section sgv8b
-.section sgv9a
-.section sgv9b
-.section sgv0a
-.section sgv0b
-.section sgwaa
-.section sgwab
-.section sgwba
-.section sgwbb
-.section sgwca
-.section sgwcb
-.section sgwda
-.section sgwdb
-.section sgwea
-.section sgweb
-.section sgwfa
-.section sgwfb
-.section sgwga
-.section sgwgb
-.section sgwha
-.section sgwhb
-.section sgwia
-.section sgwib
-.section sgwja
-.section sgwjb
-.section sgwka
-.section sgwkb
-.section sgwla
-.section sgwlb
-.section sgwma
-.section sgwmb
-.section sgwna
-.section sgwnb
-.section sgwoa
-.section sgwob
-.section sgwpa
-.section sgwpb
-.section sgwqa
-.section sgwqb
-.section sgwra
-.section sgwrb
-.section sgwsa
-.section sgwsb
-.section sgwta
-.section sgwtb
-.section sgwua
-.section sgwub
-.section sgwva
-.section sgwvb
-.section sgwwa
-.section sgwwb
-.section sgwxa
-.section sgwxb
-.section sgwya
-.section sgwyb
-.section sgwza
-.section sgwzb
-.section sgw1a
-.section sgw1b
-.section sgw2a
-.section sgw2b
-.section sgw3a
-.section sgw3b
-.section sgw4a
-.section sgw4b
-.section sgw5a
-.section sgw5b
-.section sgw6a
-.section sgw6b
-.section sgw7a
-.section sgw7b
-.section sgw8a
-.section sgw8b
-.section sgw9a
-.section sgw9b
-.section sgw0a
-.section sgw0b
-.section sgxaa
-.section sgxab
-.section sgxba
-.section sgxbb
-.section sgxca
-.section sgxcb
-.section sgxda
-.section sgxdb
-.section sgxea
-.section sgxeb
-.section sgxfa
-.section sgxfb
-.section sgxga
-.section sgxgb
-.section sgxha
-.section sgxhb
-.section sgxia
-.section sgxib
-.section sgxja
-.section sgxjb
-.section sgxka
-.section sgxkb
-.section sgxla
-.section sgxlb
-.section sgxma
-.section sgxmb
-.section sgxna
-.section sgxnb
-.section sgxoa
-.section sgxob
-.section sgxpa
-.section sgxpb
-.section sgxqa
-.section sgxqb
-.section sgxra
-.section sgxrb
-.section sgxsa
-.section sgxsb
-.section sgxta
-.section sgxtb
-.section sgxua
-.section sgxub
-.section sgxva
-.section sgxvb
-.section sgxwa
-.section sgxwb
-.section sgxxa
-.section sgxxb
-.section sgxya
-.section sgxyb
-.section sgxza
-.section sgxzb
-.section sgx1a
-.section sgx1b
-.section sgx2a
-.section sgx2b
-.section sgx3a
-.section sgx3b
-.section sgx4a
-.section sgx4b
-.section sgx5a
-.section sgx5b
-.section sgx6a
-.section sgx6b
-.section sgx7a
-.section sgx7b
-.section sgx8a
-.section sgx8b
-.section sgx9a
-.section sgx9b
-.section sgx0a
-.section sgx0b
-.section sgyaa
-.section sgyab
-.section sgyba
-.section sgybb
-.section sgyca
-.section sgycb
-.section sgyda
-.section sgydb
-.section sgyea
-.section sgyeb
-.section sgyfa
-.section sgyfb
-.section sgyga
-.section sgygb
-.section sgyha
-.section sgyhb
-.section sgyia
-.section sgyib
-.section sgyja
-.section sgyjb
-.section sgyka
-.section sgykb
-.section sgyla
-.section sgylb
-.section sgyma
-.section sgymb
-.section sgyna
-.section sgynb
-.section sgyoa
-.section sgyob
-.section sgypa
-.section sgypb
-.section sgyqa
-.section sgyqb
-.section sgyra
-.section sgyrb
-.section sgysa
-.section sgysb
-.section sgyta
-.section sgytb
-.section sgyua
-.section sgyub
-.section sgyva
-.section sgyvb
-.section sgywa
-.section sgywb
-.section sgyxa
-.section sgyxb
-.section sgyya
-.section sgyyb
-.section sgyza
-.section sgyzb
-.section sgy1a
-.section sgy1b
-.section sgy2a
-.section sgy2b
-.section sgy3a
-.section sgy3b
-.section sgy4a
-.section sgy4b
-.section sgy5a
-.section sgy5b
-.section sgy6a
-.section sgy6b
-.section sgy7a
-.section sgy7b
-.section sgy8a
-.section sgy8b
-.section sgy9a
-.section sgy9b
-.section sgy0a
-.section sgy0b
-.section sgzaa
-.section sgzab
-.section sgzba
-.section sgzbb
-.section sgzca
-.section sgzcb
-.section sgzda
-.section sgzdb
-.section sgzea
-.section sgzeb
-.section sgzfa
-.section sgzfb
-.section sgzga
-.section sgzgb
-.section sgzha
-.section sgzhb
-.section sgzia
-.section sgzib
-.section sgzja
-.section sgzjb
-.section sgzka
-.section sgzkb
-.section sgzla
-.section sgzlb
-.section sgzma
-.section sgzmb
-.section sgzna
-.section sgznb
-.section sgzoa
-.section sgzob
-.section sgzpa
-.section sgzpb
-.section sgzqa
-.section sgzqb
-.section sgzra
-.section sgzrb
-.section sgzsa
-.section sgzsb
-.section sgzta
-.section sgztb
-.section sgzua
-.section sgzub
-.section sgzva
-.section sgzvb
-.section sgzwa
-.section sgzwb
-.section sgzxa
-.section sgzxb
-.section sgzya
-.section sgzyb
-.section sgzza
-.section sgzzb
-.section sgz1a
-.section sgz1b
-.section sgz2a
-.section sgz2b
-.section sgz3a
-.section sgz3b
-.section sgz4a
-.section sgz4b
-.section sgz5a
-.section sgz5b
-.section sgz6a
-.section sgz6b
-.section sgz7a
-.section sgz7b
-.section sgz8a
-.section sgz8b
-.section sgz9a
-.section sgz9b
-.section sgz0a
-.section sgz0b
-.section sg1aa
-.section sg1ab
-.section sg1ba
-.section sg1bb
-.section sg1ca
-.section sg1cb
-.section sg1da
-.section sg1db
-.section sg1ea
-.section sg1eb
-.section sg1fa
-.section sg1fb
-.section sg1ga
-.section sg1gb
-.section sg1ha
-.section sg1hb
-.section sg1ia
-.section sg1ib
-.section sg1ja
-.section sg1jb
-.section sg1ka
-.section sg1kb
-.section sg1la
-.section sg1lb
-.section sg1ma
-.section sg1mb
-.section sg1na
-.section sg1nb
-.section sg1oa
-.section sg1ob
-.section sg1pa
-.section sg1pb
-.section sg1qa
-.section sg1qb
-.section sg1ra
-.section sg1rb
-.section sg1sa
-.section sg1sb
-.section sg1ta
-.section sg1tb
-.section sg1ua
-.section sg1ub
-.section sg1va
-.section sg1vb
-.section sg1wa
-.section sg1wb
-.section sg1xa
-.section sg1xb
-.section sg1ya
-.section sg1yb
-.section sg1za
-.section sg1zb
-.section sg11a
-.section sg11b
-.section sg12a
-.section sg12b
-.section sg13a
-.section sg13b
-.section sg14a
-.section sg14b
-.section sg15a
-.section sg15b
-.section sg16a
-.section sg16b
-.section sg17a
-.section sg17b
-.section sg18a
-.section sg18b
-.section sg19a
-.section sg19b
-.section sg10a
-.section sg10b
-.section sg2aa
-.section sg2ab
-.section sg2ba
-.section sg2bb
-.section sg2ca
-.section sg2cb
-.section sg2da
-.section sg2db
-.section sg2ea
-.section sg2eb
-.section sg2fa
-.section sg2fb
-.section sg2ga
-.section sg2gb
-.section sg2ha
-.section sg2hb
-.section sg2ia
-.section sg2ib
-.section sg2ja
-.section sg2jb
-.section sg2ka
-.section sg2kb
-.section sg2la
-.section sg2lb
-.section sg2ma
-.section sg2mb
-.section sg2na
-.section sg2nb
-.section sg2oa
-.section sg2ob
-.section sg2pa
-.section sg2pb
-.section sg2qa
-.section sg2qb
-.section sg2ra
-.section sg2rb
-.section sg2sa
-.section sg2sb
-.section sg2ta
-.section sg2tb
-.section sg2ua
-.section sg2ub
-.section sg2va
-.section sg2vb
-.section sg2wa
-.section sg2wb
-.section sg2xa
-.section sg2xb
-.section sg2ya
-.section sg2yb
-.section sg2za
-.section sg2zb
-.section sg21a
-.section sg21b
-.section sg22a
-.section sg22b
-.section sg23a
-.section sg23b
-.section sg24a
-.section sg24b
-.section sg25a
-.section sg25b
-.section sg26a
-.section sg26b
-.section sg27a
-.section sg27b
-.section sg28a
-.section sg28b
-.section sg29a
-.section sg29b
-.section sg20a
-.section sg20b
-.section sg3aa
-.section sg3ab
-.section sg3ba
-.section sg3bb
-.section sg3ca
-.section sg3cb
-.section sg3da
-.section sg3db
-.section sg3ea
-.section sg3eb
-.section sg3fa
-.section sg3fb
-.section sg3ga
-.section sg3gb
-.section sg3ha
-.section sg3hb
-.section sg3ia
-.section sg3ib
-.section sg3ja
-.section sg3jb
-.section sg3ka
-.section sg3kb
-.section sg3la
-.section sg3lb
-.section sg3ma
-.section sg3mb
-.section sg3na
-.section sg3nb
-.section sg3oa
-.section sg3ob
-.section sg3pa
-.section sg3pb
-.section sg3qa
-.section sg3qb
-.section sg3ra
-.section sg3rb
-.section sg3sa
-.section sg3sb
-.section sg3ta
-.section sg3tb
-.section sg3ua
-.section sg3ub
-.section sg3va
-.section sg3vb
-.section sg3wa
-.section sg3wb
-.section sg3xa
-.section sg3xb
-.section sg3ya
-.section sg3yb
-.section sg3za
-.section sg3zb
-.section sg31a
-.section sg31b
-.section sg32a
-.section sg32b
-.section sg33a
-.section sg33b
-.section sg34a
-.section sg34b
-.section sg35a
-.section sg35b
-.section sg36a
-.section sg36b
-.section sg37a
-.section sg37b
-.section sg38a
-.section sg38b
-.section sg39a
-.section sg39b
-.section sg30a
-.section sg30b
-.section sg4aa
-.section sg4ab
-.section sg4ba
-.section sg4bb
-.section sg4ca
-.section sg4cb
-.section sg4da
-.section sg4db
-.section sg4ea
-.section sg4eb
-.section sg4fa
-.section sg4fb
-.section sg4ga
-.section sg4gb
-.section sg4ha
-.section sg4hb
-.section sg4ia
-.section sg4ib
-.section sg4ja
-.section sg4jb
-.section sg4ka
-.section sg4kb
-.section sg4la
-.section sg4lb
-.section sg4ma
-.section sg4mb
-.section sg4na
-.section sg4nb
-.section sg4oa
-.section sg4ob
-.section sg4pa
-.section sg4pb
-.section sg4qa
-.section sg4qb
-.section sg4ra
-.section sg4rb
-.section sg4sa
-.section sg4sb
-.section sg4ta
-.section sg4tb
-.section sg4ua
-.section sg4ub
-.section sg4va
-.section sg4vb
-.section sg4wa
-.section sg4wb
-.section sg4xa
-.section sg4xb
-.section sg4ya
-.section sg4yb
-.section sg4za
-.section sg4zb
-.section sg41a
-.section sg41b
-.section sg42a
-.section sg42b
-.section sg43a
-.section sg43b
-.section sg44a
-.section sg44b
-.section sg45a
-.section sg45b
-.section sg46a
-.section sg46b
-.section sg47a
-.section sg47b
-.section sg48a
-.section sg48b
-.section sg49a
-.section sg49b
-.section sg40a
-.section sg40b
-.section sg5aa
-.section sg5ab
-.section sg5ba
-.section sg5bb
-.section sg5ca
-.section sg5cb
-.section sg5da
-.section sg5db
-.section sg5ea
-.section sg5eb
-.section sg5fa
-.section sg5fb
-.section sg5ga
-.section sg5gb
-.section sg5ha
-.section sg5hb
-.section sg5ia
-.section sg5ib
-.section sg5ja
-.section sg5jb
-.section sg5ka
-.section sg5kb
-.section sg5la
-.section sg5lb
-.section sg5ma
-.section sg5mb
-.section sg5na
-.section sg5nb
-.section sg5oa
-.section sg5ob
-.section sg5pa
-.section sg5pb
-.section sg5qa
-.section sg5qb
-.section sg5ra
-.section sg5rb
-.section sg5sa
-.section sg5sb
-.section sg5ta
-.section sg5tb
-.section sg5ua
-.section sg5ub
-.section sg5va
-.section sg5vb
-.section sg5wa
-.section sg5wb
-.section sg5xa
-.section sg5xb
-.section sg5ya
-.section sg5yb
-.section sg5za
-.section sg5zb
-.section sg51a
-.section sg51b
-.section sg52a
-.section sg52b
-.section sg53a
-.section sg53b
-.section sg54a
-.section sg54b
-.section sg55a
-.section sg55b
-.section sg56a
-.section sg56b
-.section sg57a
-.section sg57b
-.section sg58a
-.section sg58b
-.section sg59a
-.section sg59b
-.section sg50a
-.section sg50b
-.section sg6aa
-.section sg6ab
-.section sg6ba
-.section sg6bb
-.section sg6ca
-.section sg6cb
-.section sg6da
-.section sg6db
-.section sg6ea
-.section sg6eb
-.section sg6fa
-.section sg6fb
-.section sg6ga
-.section sg6gb
-.section sg6ha
-.section sg6hb
-.section sg6ia
-.section sg6ib
-.section sg6ja
-.section sg6jb
-.section sg6ka
-.section sg6kb
-.section sg6la
-.section sg6lb
-.section sg6ma
-.section sg6mb
-.section sg6na
-.section sg6nb
-.section sg6oa
-.section sg6ob
-.section sg6pa
-.section sg6pb
-.section sg6qa
-.section sg6qb
-.section sg6ra
-.section sg6rb
-.section sg6sa
-.section sg6sb
-.section sg6ta
-.section sg6tb
-.section sg6ua
-.section sg6ub
-.section sg6va
-.section sg6vb
-.section sg6wa
-.section sg6wb
-.section sg6xa
-.section sg6xb
-.section sg6ya
-.section sg6yb
-.section sg6za
-.section sg6zb
-.section sg61a
-.section sg61b
-.section sg62a
-.section sg62b
-.section sg63a
-.section sg63b
-.section sg64a
-.section sg64b
-.section sg65a
-.section sg65b
-.section sg66a
-.section sg66b
-.section sg67a
-.section sg67b
-.section sg68a
-.section sg68b
-.section sg69a
-.section sg69b
-.section sg60a
-.section sg60b
-.section sg7aa
-.section sg7ab
-.section sg7ba
-.section sg7bb
-.section sg7ca
-.section sg7cb
-.section sg7da
-.section sg7db
-.section sg7ea
-.section sg7eb
-.section sg7fa
-.section sg7fb
-.section sg7ga
-.section sg7gb
-.section sg7ha
-.section sg7hb
-.section sg7ia
-.section sg7ib
-.section sg7ja
-.section sg7jb
-.section sg7ka
-.section sg7kb
-.section sg7la
-.section sg7lb
-.section sg7ma
-.section sg7mb
-.section sg7na
-.section sg7nb
-.section sg7oa
-.section sg7ob
-.section sg7pa
-.section sg7pb
-.section sg7qa
-.section sg7qb
-.section sg7ra
-.section sg7rb
-.section sg7sa
-.section sg7sb
-.section sg7ta
-.section sg7tb
-.section sg7ua
-.section sg7ub
-.section sg7va
-.section sg7vb
-.section sg7wa
-.section sg7wb
-.section sg7xa
-.section sg7xb
-.section sg7ya
-.section sg7yb
-.section sg7za
-.section sg7zb
-.section sg71a
-.section sg71b
-.section sg72a
-.section sg72b
-.section sg73a
-.section sg73b
-.section sg74a
-.section sg74b
-.section sg75a
-.section sg75b
-.section sg76a
-.section sg76b
-.section sg77a
-.section sg77b
-.section sg78a
-.section sg78b
-.section sg79a
-.section sg79b
-.section sg70a
-.section sg70b
-.section sg8aa
-.section sg8ab
-.section sg8ba
-.section sg8bb
-.section sg8ca
-.section sg8cb
-.section sg8da
-.section sg8db
-.section sg8ea
-.section sg8eb
-.section sg8fa
-.section sg8fb
-.section sg8ga
-.section sg8gb
-.section sg8ha
-.section sg8hb
-.section sg8ia
-.section sg8ib
-.section sg8ja
-.section sg8jb
-.section sg8ka
-.section sg8kb
-.section sg8la
-.section sg8lb
-.section sg8ma
-.section sg8mb
-.section sg8na
-.section sg8nb
-.section sg8oa
-.section sg8ob
-.section sg8pa
-.section sg8pb
-.section sg8qa
-.section sg8qb
-.section sg8ra
-.section sg8rb
-.section sg8sa
-.section sg8sb
-.section sg8ta
-.section sg8tb
-.section sg8ua
-.section sg8ub
-.section sg8va
-.section sg8vb
-.section sg8wa
-.section sg8wb
-.section sg8xa
-.section sg8xb
-.section sg8ya
-.section sg8yb
-.section sg8za
-.section sg8zb
-.section sg81a
-.section sg81b
-.section sg82a
-.section sg82b
-.section sg83a
-.section sg83b
-.section sg84a
-.section sg84b
-.section sg85a
-.section sg85b
-.section sg86a
-.section sg86b
-.section sg87a
-.section sg87b
-.section sg88a
-.section sg88b
-.section sg89a
-.section sg89b
-.section sg80a
-.section sg80b
-.section sg9aa
-.section sg9ab
-.section sg9ba
-.section sg9bb
-.section sg9ca
-.section sg9cb
-.section sg9da
-.section sg9db
-.section sg9ea
-.section sg9eb
-.section sg9fa
-.section sg9fb
-.section sg9ga
-.section sg9gb
-.section sg9ha
-.section sg9hb
-.section sg9ia
-.section sg9ib
-.section sg9ja
-.section sg9jb
-.section sg9ka
-.section sg9kb
-.section sg9la
-.section sg9lb
-.section sg9ma
-.section sg9mb
-.section sg9na
-.section sg9nb
-.section sg9oa
-.section sg9ob
-.section sg9pa
-.section sg9pb
-.section sg9qa
-.section sg9qb
-.section sg9ra
-.section sg9rb
-.section sg9sa
-.section sg9sb
-.section sg9ta
-.section sg9tb
-.section sg9ua
-.section sg9ub
-.section sg9va
-.section sg9vb
-.section sg9wa
-.section sg9wb
-.section sg9xa
-.section sg9xb
-.section sg9ya
-.section sg9yb
-.section sg9za
-.section sg9zb
-.section sg91a
-.section sg91b
-.section sg92a
-.section sg92b
-.section sg93a
-.section sg93b
-.section sg94a
-.section sg94b
-.section sg95a
-.section sg95b
-.section sg96a
-.section sg96b
-.section sg97a
-.section sg97b
-.section sg98a
-.section sg98b
-.section sg99a
-.section sg99b
-.section sg90a
-.section sg90b
-.section sg0aa
-.section sg0ab
-.section sg0ba
-.section sg0bb
-.section sg0ca
-.section sg0cb
-.section sg0da
-.section sg0db
-.section sg0ea
-.section sg0eb
-.section sg0fa
-.section sg0fb
-.section sg0ga
-.section sg0gb
-.section sg0ha
-.section sg0hb
-.section sg0ia
-.section sg0ib
-.section sg0ja
-.section sg0jb
-.section sg0ka
-.section sg0kb
-.section sg0la
-.section sg0lb
-.section sg0ma
-.section sg0mb
-.section sg0na
-.section sg0nb
-.section sg0oa
-.section sg0ob
-.section sg0pa
-.section sg0pb
-.section sg0qa
-.section sg0qb
-.section sg0ra
-.section sg0rb
-.section sg0sa
-.section sg0sb
-.section sg0ta
-.section sg0tb
-.section sg0ua
-.section sg0ub
-.section sg0va
-.section sg0vb
-.section sg0wa
-.section sg0wb
-.section sg0xa
-.section sg0xb
-.section sg0ya
-.section sg0yb
-.section sg0za
-.section sg0zb
-.section sg01a
-.section sg01b
-.section sg02a
-.section sg02b
-.section sg03a
-.section sg03b
-.section sg04a
-.section sg04b
-.section sg05a
-.section sg05b
-.section sg06a
-.section sg06b
-.section sg07a
-.section sg07b
-.section sg08a
-.section sg08b
-.section sg09a
-.section sg09b
-.section sg00a
-.section sg00b
-.section shaaa
-.section shaab
-.section shaba
-.section shabb
-.section shaca
-.section shacb
-.section shada
-.section shadb
-.section shaea
-.section shaeb
-.section shafa
-.section shafb
-.section shaga
-.section shagb
-.section shaha
-.section shahb
-.section shaia
-.section shaib
-.section shaja
-.section shajb
-.section shaka
-.section shakb
-.section shala
-.section shalb
-.section shama
-.section shamb
-.section shana
-.section shanb
-.section shaoa
-.section shaob
-.section shapa
-.section shapb
-.section shaqa
-.section shaqb
-.section shara
-.section sharb
-.section shasa
-.section shasb
-.section shata
-.section shatb
-.section shaua
-.section shaub
-.section shava
-.section shavb
-.section shawa
-.section shawb
-.section shaxa
-.section shaxb
-.section shaya
-.section shayb
-.section shaza
-.section shazb
-.section sha1a
-.section sha1b
-.section sha2a
-.section sha2b
-.section sha3a
-.section sha3b
-.section sha4a
-.section sha4b
-.section sha5a
-.section sha5b
-.section sha6a
-.section sha6b
-.section sha7a
-.section sha7b
-.section sha8a
-.section sha8b
-.section sha9a
-.section sha9b
-.section sha0a
-.section sha0b
-.section shbaa
-.section shbab
-.section shbba
-.section shbbb
-.section shbca
-.section shbcb
-.section shbda
-.section shbdb
-.section shbea
-.section shbeb
-.section shbfa
-.section shbfb
-.section shbga
-.section shbgb
-.section shbha
-.section shbhb
-.section shbia
-.section shbib
-.section shbja
-.section shbjb
-.section shbka
-.section shbkb
-.section shbla
-.section shblb
-.section shbma
-.section shbmb
-.section shbna
-.section shbnb
-.section shboa
-.section shbob
-.section shbpa
-.section shbpb
-.section shbqa
-.section shbqb
-.section shbra
-.section shbrb
-.section shbsa
-.section shbsb
-.section shbta
-.section shbtb
-.section shbua
-.section shbub
-.section shbva
-.section shbvb
-.section shbwa
-.section shbwb
-.section shbxa
-.section shbxb
-.section shbya
-.section shbyb
-.section shbza
-.section shbzb
-.section shb1a
-.section shb1b
-.section shb2a
-.section shb2b
-.section shb3a
-.section shb3b
-.section shb4a
-.section shb4b
-.section shb5a
-.section shb5b
-.section shb6a
-.section shb6b
-.section shb7a
-.section shb7b
-.section shb8a
-.section shb8b
-.section shb9a
-.section shb9b
-.section shb0a
-.section shb0b
-.section shcaa
-.section shcab
-.section shcba
-.section shcbb
-.section shcca
-.section shccb
-.section shcda
-.section shcdb
-.section shcea
-.section shceb
-.section shcfa
-.section shcfb
-.section shcga
-.section shcgb
-.section shcha
-.section shchb
-.section shcia
-.section shcib
-.section shcja
-.section shcjb
-.section shcka
-.section shckb
-.section shcla
-.section shclb
-.section shcma
-.section shcmb
-.section shcna
-.section shcnb
-.section shcoa
-.section shcob
-.section shcpa
-.section shcpb
-.section shcqa
-.section shcqb
-.section shcra
-.section shcrb
-.section shcsa
-.section shcsb
-.section shcta
-.section shctb
-.section shcua
-.section shcub
-.section shcva
-.section shcvb
-.section shcwa
-.section shcwb
-.section shcxa
-.section shcxb
-.section shcya
-.section shcyb
-.section shcza
-.section shczb
-.section shc1a
-.section shc1b
-.section shc2a
-.section shc2b
-.section shc3a
-.section shc3b
-.section shc4a
-.section shc4b
-.section shc5a
-.section shc5b
-.section shc6a
-.section shc6b
-.section shc7a
-.section shc7b
-.section shc8a
-.section shc8b
-.section shc9a
-.section shc9b
-.section shc0a
-.section shc0b
-.section shdaa
-.section shdab
-.section shdba
-.section shdbb
-.section shdca
-.section shdcb
-.section shdda
-.section shddb
-.section shdea
-.section shdeb
-.section shdfa
-.section shdfb
-.section shdga
-.section shdgb
-.section shdha
-.section shdhb
-.section shdia
-.section shdib
-.section shdja
-.section shdjb
-.section shdka
-.section shdkb
-.section shdla
-.section shdlb
-.section shdma
-.section shdmb
-.section shdna
-.section shdnb
-.section shdoa
-.section shdob
-.section shdpa
-.section shdpb
-.section shdqa
-.section shdqb
-.section shdra
-.section shdrb
-.section shdsa
-.section shdsb
-.section shdta
-.section shdtb
-.section shdua
-.section shdub
-.section shdva
-.section shdvb
-.section shdwa
-.section shdwb
-.section shdxa
-.section shdxb
-.section shdya
-.section shdyb
-.section shdza
-.section shdzb
-.section shd1a
-.section shd1b
-.section shd2a
-.section shd2b
-.section shd3a
-.section shd3b
-.section shd4a
-.section shd4b
-.section shd5a
-.section shd5b
-.section shd6a
-.section shd6b
-.section shd7a
-.section shd7b
-.section shd8a
-.section shd8b
-.section shd9a
-.section shd9b
-.section shd0a
-.section shd0b
-.section sheaa
-.section sheab
-.section sheba
-.section shebb
-.section sheca
-.section shecb
-.section sheda
-.section shedb
-.section sheea
-.section sheeb
-.section shefa
-.section shefb
-.section shega
-.section shegb
-.section sheha
-.section shehb
-.section sheia
-.section sheib
-.section sheja
-.section shejb
-.section sheka
-.section shekb
-.section shela
-.section shelb
-.section shema
-.section shemb
-.section shena
-.section shenb
-.section sheoa
-.section sheob
-.section shepa
-.section shepb
-.section sheqa
-.section sheqb
-.section shera
-.section sherb
-.section shesa
-.section shesb
-.section sheta
-.section shetb
-.section sheua
-.section sheub
-.section sheva
-.section shevb
-.section shewa
-.section shewb
-.section shexa
-.section shexb
-.section sheya
-.section sheyb
-.section sheza
-.section shezb
-.section she1a
-.section she1b
-.section she2a
-.section she2b
-.section she3a
-.section she3b
-.section she4a
-.section she4b
-.section she5a
-.section she5b
-.section she6a
-.section she6b
-.section she7a
-.section she7b
-.section she8a
-.section she8b
-.section she9a
-.section she9b
-.section she0a
-.section she0b
-.section shfaa
-.section shfab
-.section shfba
-.section shfbb
-.section shfca
-.section shfcb
-.section shfda
-.section shfdb
-.section shfea
-.section shfeb
-.section shffa
-.section shffb
-.section shfga
-.section shfgb
-.section shfha
-.section shfhb
-.section shfia
-.section shfib
-.section shfja
-.section shfjb
-.section shfka
-.section shfkb
-.section shfla
-.section shflb
-.section shfma
-.section shfmb
-.section shfna
-.section shfnb
-.section shfoa
-.section shfob
-.section shfpa
-.section shfpb
-.section shfqa
-.section shfqb
-.section shfra
-.section shfrb
-.section shfsa
-.section shfsb
-.section shfta
-.section shftb
-.section shfua
-.section shfub
-.section shfva
-.section shfvb
-.section shfwa
-.section shfwb
-.section shfxa
-.section shfxb
-.section shfya
-.section shfyb
-.section shfza
-.section shfzb
-.section shf1a
-.section shf1b
-.section shf2a
-.section shf2b
-.section shf3a
-.section shf3b
-.section shf4a
-.section shf4b
-.section shf5a
-.section shf5b
-.section shf6a
-.section shf6b
-.section shf7a
-.section shf7b
-.section shf8a
-.section shf8b
-.section shf9a
-.section shf9b
-.section shf0a
-.section shf0b
-.section shgaa
-.section shgab
-.section shgba
-.section shgbb
-.section shgca
-.section shgcb
-.section shgda
-.section shgdb
-.section shgea
-.section shgeb
-.section shgfa
-.section shgfb
-.section shgga
-.section shggb
-.section shgha
-.section shghb
-.section shgia
-.section shgib
-.section shgja
-.section shgjb
-.section shgka
-.section shgkb
-.section shgla
-.section shglb
-.section shgma
-.section shgmb
-.section shgna
-.section shgnb
-.section shgoa
-.section shgob
-.section shgpa
-.section shgpb
-.section shgqa
-.section shgqb
-.section shgra
-.section shgrb
-.section shgsa
-.section shgsb
-.section shgta
-.section shgtb
-.section shgua
-.section shgub
-.section shgva
-.section shgvb
-.section shgwa
-.section shgwb
-.section shgxa
-.section shgxb
-.section shgya
-.section shgyb
-.section shgza
-.section shgzb
-.section shg1a
-.section shg1b
-.section shg2a
-.section shg2b
-.section shg3a
-.section shg3b
-.section shg4a
-.section shg4b
-.section shg5a
-.section shg5b
-.section shg6a
-.section shg6b
-.section shg7a
-.section shg7b
-.section shg8a
-.section shg8b
-.section shg9a
-.section shg9b
-.section shg0a
-.section shg0b
-.section shhaa
-.section shhab
-.section shhba
-.section shhbb
-.section shhca
-.section shhcb
-.section shhda
-.section shhdb
-.section shhea
-.section shheb
-.section shhfa
-.section shhfb
-.section shhga
-.section shhgb
-.section shhha
-.section shhhb
-.section shhia
-.section shhib
-.section shhja
-.section shhjb
-.section shhka
-.section shhkb
-.section shhla
-.section shhlb
-.section shhma
-.section shhmb
-.section shhna
-.section shhnb
-.section shhoa
-.section shhob
-.section shhpa
-.section shhpb
-.section shhqa
-.section shhqb
-.section shhra
-.section shhrb
-.section shhsa
-.section shhsb
-.section shhta
-.section shhtb
-.section shhua
-.section shhub
-.section shhva
-.section shhvb
-.section shhwa
-.section shhwb
-.section shhxa
-.section shhxb
-.section shhya
-.section shhyb
-.section shhza
-.section shhzb
-.section shh1a
-.section shh1b
-.section shh2a
-.section shh2b
-.section shh3a
-.section shh3b
-.section shh4a
-.section shh4b
-.section shh5a
-.section shh5b
-.section shh6a
-.section shh6b
-.section shh7a
-.section shh7b
-.section shh8a
-.section shh8b
-.section shh9a
-.section shh9b
-.section shh0a
-.section shh0b
-.section shiaa
-.section shiab
-.section shiba
-.section shibb
-.section shica
-.section shicb
-.section shida
-.section shidb
-.section shiea
-.section shieb
-.section shifa
-.section shifb
-.section shiga
-.section shigb
-.section shiha
-.section shihb
-.section shiia
-.section shiib
-.section shija
-.section shijb
-.section shika
-.section shikb
-.section shila
-.section shilb
-.section shima
-.section shimb
-.section shina
-.section shinb
-.section shioa
-.section shiob
-.section shipa
-.section shipb
-.section shiqa
-.section shiqb
-.section shira
-.section shirb
-.section shisa
-.section shisb
-.section shita
-.section shitb
-.section shiua
-.section shiub
-.section shiva
-.section shivb
-.section shiwa
-.section shiwb
-.section shixa
-.section shixb
-.section shiya
-.section shiyb
-.section shiza
-.section shizb
-.section shi1a
-.section shi1b
-.section shi2a
-.section shi2b
-.section shi3a
-.section shi3b
-.section shi4a
-.section shi4b
-.section shi5a
-.section shi5b
-.section shi6a
-.section shi6b
-.section shi7a
-.section shi7b
-.section shi8a
-.section shi8b
-.section shi9a
-.section shi9b
-.section shi0a
-.section shi0b
-.section shjaa
-.section shjab
-.section shjba
-.section shjbb
-.section shjca
-.section shjcb
-.section shjda
-.section shjdb
-.section shjea
-.section shjeb
-.section shjfa
-.section shjfb
-.section shjga
-.section shjgb
-.section shjha
-.section shjhb
-.section shjia
-.section shjib
-.section shjja
-.section shjjb
-.section shjka
-.section shjkb
-.section shjla
-.section shjlb
-.section shjma
-.section shjmb
-.section shjna
-.section shjnb
-.section shjoa
-.section shjob
-.section shjpa
-.section shjpb
-.section shjqa
-.section shjqb
-.section shjra
-.section shjrb
-.section shjsa
-.section shjsb
-.section shjta
-.section shjtb
-.section shjua
-.section shjub
-.section shjva
-.section shjvb
-.section shjwa
-.section shjwb
-.section shjxa
-.section shjxb
-.section shjya
-.section shjyb
-.section shjza
-.section shjzb
-.section shj1a
-.section shj1b
-.section shj2a
-.section shj2b
-.section shj3a
-.section shj3b
-.section shj4a
-.section shj4b
-.section shj5a
-.section shj5b
-.section shj6a
-.section shj6b
-.section shj7a
-.section shj7b
-.section shj8a
-.section shj8b
-.section shj9a
-.section shj9b
-.section shj0a
-.section shj0b
-.section shkaa
-.section shkab
-.section shkba
-.section shkbb
-.section shkca
-.section shkcb
-.section shkda
-.section shkdb
-.section shkea
-.section shkeb
-.section shkfa
-.section shkfb
-.section shkga
-.section shkgb
-.section shkha
-.section shkhb
-.section shkia
-.section shkib
-.section shkja
-.section shkjb
-.section shkka
-.section shkkb
-.section shkla
-.section shklb
-.section shkma
-.section shkmb
-.section shkna
-.section shknb
-.section shkoa
-.section shkob
-.section shkpa
-.section shkpb
-.section shkqa
-.section shkqb
-.section shkra
-.section shkrb
-.section shksa
-.section shksb
-.section shkta
-.section shktb
-.section shkua
-.section shkub
-.section shkva
-.section shkvb
-.section shkwa
-.section shkwb
-.section shkxa
-.section shkxb
-.section shkya
-.section shkyb
-.section shkza
-.section shkzb
-.section shk1a
-.section shk1b
-.section shk2a
-.section shk2b
-.section shk3a
-.section shk3b
-.section shk4a
-.section shk4b
-.section shk5a
-.section shk5b
-.section shk6a
-.section shk6b
-.section shk7a
-.section shk7b
-.section shk8a
-.section shk8b
-.section shk9a
-.section shk9b
-.section shk0a
-.section shk0b
-.section shlaa
-.section shlab
-.section shlba
-.section shlbb
-.section shlca
-.section shlcb
-.section shlda
-.section shldb
-.section shlea
-.section shleb
-.section shlfa
-.section shlfb
-.section shlga
-.section shlgb
-.section shlha
-.section shlhb
-.section shlia
-.section shlib
-.section shlja
-.section shljb
-.section shlka
-.section shlkb
-.section shlla
-.section shllb
-.section shlma
-.section shlmb
-.section shlna
-.section shlnb
-.section shloa
-.section shlob
-.section shlpa
-.section shlpb
-.section shlqa
-.section shlqb
-.section shlra
-.section shlrb
-.section shlsa
-.section shlsb
-.section shlta
-.section shltb
-.section shlua
-.section shlub
-.section shlva
-.section shlvb
-.section shlwa
-.section shlwb
-.section shlxa
-.section shlxb
-.section shlya
-.section shlyb
-.section shlza
-.section shlzb
-.section shl1a
-.section shl1b
-.section shl2a
-.section shl2b
-.section shl3a
-.section shl3b
-.section shl4a
-.section shl4b
-.section shl5a
-.section shl5b
-.section shl6a
-.section shl6b
-.section shl7a
-.section shl7b
-.section shl8a
-.section shl8b
-.section shl9a
-.section shl9b
-.section shl0a
-.section shl0b
-.section shmaa
-.section shmab
-.section shmba
-.section shmbb
-.section shmca
-.section shmcb
-.section shmda
-.section shmdb
-.section shmea
-.section shmeb
-.section shmfa
-.section shmfb
-.section shmga
-.section shmgb
-.section shmha
-.section shmhb
-.section shmia
-.section shmib
-.section shmja
-.section shmjb
-.section shmka
-.section shmkb
-.section shmla
-.section shmlb
-.section shmma
-.section shmmb
-.section shmna
-.section shmnb
-.section shmoa
-.section shmob
-.section shmpa
-.section shmpb
-.section shmqa
-.section shmqb
-.section shmra
-.section shmrb
-.section shmsa
-.section shmsb
-.section shmta
-.section shmtb
-.section shmua
-.section shmub
-.section shmva
-.section shmvb
-.section shmwa
-.section shmwb
-.section shmxa
-.section shmxb
-.section shmya
-.section shmyb
-.section shmza
-.section shmzb
-.section shm1a
-.section shm1b
-.section shm2a
-.section shm2b
-.section shm3a
-.section shm3b
-.section shm4a
-.section shm4b
-.section shm5a
-.section shm5b
-.section shm6a
-.section shm6b
-.section shm7a
-.section shm7b
-.section shm8a
-.section shm8b
-.section shm9a
-.section shm9b
-.section shm0a
-.section shm0b
-.section shnaa
-.section shnab
-.section shnba
-.section shnbb
-.section shnca
-.section shncb
-.section shnda
-.section shndb
-.section shnea
-.section shneb
-.section shnfa
-.section shnfb
-.section shnga
-.section shngb
-.section shnha
-.section shnhb
-.section shnia
-.section shnib
-.section shnja
-.section shnjb
-.section shnka
-.section shnkb
-.section shnla
-.section shnlb
-.section shnma
-.section shnmb
-.section shnna
-.section shnnb
-.section shnoa
-.section shnob
-.section shnpa
-.section shnpb
-.section shnqa
-.section shnqb
-.section shnra
-.section shnrb
-.section shnsa
-.section shnsb
-.section shnta
-.section shntb
-.section shnua
-.section shnub
-.section shnva
-.section shnvb
-.section shnwa
-.section shnwb
-.section shnxa
-.section shnxb
-.section shnya
-.section shnyb
-.section shnza
-.section shnzb
-.section shn1a
-.section shn1b
-.section shn2a
-.section shn2b
-.section shn3a
-.section shn3b
-.section shn4a
-.section shn4b
-.section shn5a
-.section shn5b
-.section shn6a
-.section shn6b
-.section shn7a
-.section shn7b
-.section shn8a
-.section shn8b
-.section shn9a
-.section shn9b
-.section shn0a
-.section shn0b
-.section shoaa
-.section shoab
-.section shoba
-.section shobb
-.section shoca
-.section shocb
-.section shoda
-.section shodb
-.section shoea
-.section shoeb
-.section shofa
-.section shofb
-.section shoga
-.section shogb
-.section shoha
-.section shohb
-.section shoia
-.section shoib
-.section shoja
-.section shojb
-.section shoka
-.section shokb
-.section shola
-.section sholb
-.section shoma
-.section shomb
-.section shona
-.section shonb
-.section shooa
-.section shoob
-.section shopa
-.section shopb
-.section shoqa
-.section shoqb
-.section shora
-.section shorb
-.section shosa
-.section shosb
-.section shota
-.section shotb
-.section shoua
-.section shoub
-.section shova
-.section shovb
-.section showa
-.section showb
-.section shoxa
-.section shoxb
-.section shoya
-.section shoyb
-.section shoza
-.section shozb
-.section sho1a
-.section sho1b
-.section sho2a
-.section sho2b
-.section sho3a
-.section sho3b
-.section sho4a
-.section sho4b
-.section sho5a
-.section sho5b
-.section sho6a
-.section sho6b
-.section sho7a
-.section sho7b
-.section sho8a
-.section sho8b
-.section sho9a
-.section sho9b
-.section sho0a
-.section sho0b
-.section shpaa
-.section shpab
-.section shpba
-.section shpbb
-.section shpca
-.section shpcb
-.section shpda
-.section shpdb
-.section shpea
-.section shpeb
-.section shpfa
-.section shpfb
-.section shpga
-.section shpgb
-.section shpha
-.section shphb
-.section shpia
-.section shpib
-.section shpja
-.section shpjb
-.section shpka
-.section shpkb
-.section shpla
-.section shplb
-.section shpma
-.section shpmb
-.section shpna
-.section shpnb
-.section shpoa
-.section shpob
-.section shppa
-.section shppb
-.section shpqa
-.section shpqb
-.section shpra
-.section shprb
-.section shpsa
-.section shpsb
-.section shpta
-.section shptb
-.section shpua
-.section shpub
-.section shpva
-.section shpvb
-.section shpwa
-.section shpwb
-.section shpxa
-.section shpxb
-.section shpya
-.section shpyb
-.section shpza
-.section shpzb
-.section shp1a
-.section shp1b
-.section shp2a
-.section shp2b
-.section shp3a
-.section shp3b
-.section shp4a
-.section shp4b
-.section shp5a
-.section shp5b
-.section shp6a
-.section shp6b
-.section shp7a
-.section shp7b
-.section shp8a
-.section shp8b
-.section shp9a
-.section shp9b
-.section shp0a
-.section shp0b
-.section shqaa
-.section shqab
-.section shqba
-.section shqbb
-.section shqca
-.section shqcb
-.section shqda
-.section shqdb
-.section shqea
-.section shqeb
-.section shqfa
-.section shqfb
-.section shqga
-.section shqgb
-.section shqha
-.section shqhb
-.section shqia
-.section shqib
-.section shqja
-.section shqjb
-.section shqka
-.section shqkb
-.section shqla
-.section shqlb
-.section shqma
-.section shqmb
-.section shqna
-.section shqnb
-.section shqoa
-.section shqob
-.section shqpa
-.section shqpb
-.section shqqa
-.section shqqb
-.section shqra
-.section shqrb
-.section shqsa
-.section shqsb
-.section shqta
-.section shqtb
-.section shqua
-.section shqub
-.section shqva
-.section shqvb
-.section shqwa
-.section shqwb
-.section shqxa
-.section shqxb
-.section shqya
-.section shqyb
-.section shqza
-.section shqzb
-.section shq1a
-.section shq1b
-.section shq2a
-.section shq2b
-.section shq3a
-.section shq3b
-.section shq4a
-.section shq4b
-.section shq5a
-.section shq5b
-.section shq6a
-.section shq6b
-.section shq7a
-.section shq7b
-.section shq8a
-.section shq8b
-.section shq9a
-.section shq9b
-.section shq0a
-.section shq0b
-.section shraa
-.section shrab
-.section shrba
-.section shrbb
-.section shrca
-.section shrcb
-.section shrda
-.section shrdb
-.section shrea
-.section shreb
-.section shrfa
-.section shrfb
-.section shrga
-.section shrgb
-.section shrha
-.section shrhb
-.section shria
-.section shrib
-.section shrja
-.section shrjb
-.section shrka
-.section shrkb
-.section shrla
-.section shrlb
-.section shrma
-.section shrmb
-.section shrna
-.section shrnb
-.section shroa
-.section shrob
-.section shrpa
-.section shrpb
-.section shrqa
-.section shrqb
-.section shrra
-.section shrrb
-.section shrsa
-.section shrsb
-.section shrta
-.section shrtb
-.section shrua
-.section shrub
-.section shrva
-.section shrvb
-.section shrwa
-.section shrwb
-.section shrxa
-.section shrxb
-.section shrya
-.section shryb
-.section shrza
-.section shrzb
-.section shr1a
-.section shr1b
-.section shr2a
-.section shr2b
-.section shr3a
-.section shr3b
-.section shr4a
-.section shr4b
-.section shr5a
-.section shr5b
-.section shr6a
-.section shr6b
-.section shr7a
-.section shr7b
-.section shr8a
-.section shr8b
-.section shr9a
-.section shr9b
-.section shr0a
-.section shr0b
-.section shsaa
-.section shsab
-.section shsba
-.section shsbb
-.section shsca
-.section shscb
-.section shsda
-.section shsdb
-.section shsea
-.section shseb
-.section shsfa
-.section shsfb
-.section shsga
-.section shsgb
-.section shsha
-.section shshb
-.section shsia
-.section shsib
-.section shsja
-.section shsjb
-.section shska
-.section shskb
-.section shsla
-.section shslb
-.section shsma
-.section shsmb
-.section shsna
-.section shsnb
-.section shsoa
-.section shsob
-.section shspa
-.section shspb
-.section shsqa
-.section shsqb
-.section shsra
-.section shsrb
-.section shssa
-.section shssb
-.section shsta
-.section shstb
-.section shsua
-.section shsub
-.section shsva
-.section shsvb
-.section shswa
-.section shswb
-.section shsxa
-.section shsxb
-.section shsya
-.section shsyb
-.section shsza
-.section shszb
-.section shs1a
-.section shs1b
-.section shs2a
-.section shs2b
-.section shs3a
-.section shs3b
-.section shs4a
-.section shs4b
-.section shs5a
-.section shs5b
-.section shs6a
-.section shs6b
-.section shs7a
-.section shs7b
-.section shs8a
-.section shs8b
-.section shs9a
-.section shs9b
-.section shs0a
-.section shs0b
-.section shtaa
-.section shtab
-.section shtba
-.section shtbb
-.section shtca
-.section shtcb
-.section shtda
-.section shtdb
-.section shtea
-.section shteb
-.section shtfa
-.section shtfb
-.section shtga
-.section shtgb
-.section shtha
-.section shthb
-.section shtia
-.section shtib
-.section shtja
-.section shtjb
-.section shtka
-.section shtkb
-.section shtla
-.section shtlb
-.section shtma
-.section shtmb
-.section shtna
-.section shtnb
-.section shtoa
-.section shtob
-.section shtpa
-.section shtpb
-.section shtqa
-.section shtqb
-.section shtra
-.section shtrb
-.section shtsa
-.section shtsb
-.section shtta
-.section shttb
-.section shtua
-.section shtub
-.section shtva
-.section shtvb
-.section shtwa
-.section shtwb
-.section shtxa
-.section shtxb
-.section shtya
-.section shtyb
-.section shtza
-.section shtzb
-.section sht1a
-.section sht1b
-.section sht2a
-.section sht2b
-.section sht3a
-.section sht3b
-.section sht4a
-.section sht4b
-.section sht5a
-.section sht5b
-.section sht6a
-.section sht6b
-.section sht7a
-.section sht7b
-.section sht8a
-.section sht8b
-.section sht9a
-.section sht9b
-.section sht0a
-.section sht0b
-.section shuaa
-.section shuab
-.section shuba
-.section shubb
-.section shuca
-.section shucb
-.section shuda
-.section shudb
-.section shuea
-.section shueb
-.section shufa
-.section shufb
-.section shuga
-.section shugb
-.section shuha
-.section shuhb
-.section shuia
-.section shuib
-.section shuja
-.section shujb
-.section shuka
-.section shukb
-.section shula
-.section shulb
-.section shuma
-.section shumb
-.section shuna
-.section shunb
-.section shuoa
-.section shuob
-.section shupa
-.section shupb
-.section shuqa
-.section shuqb
-.section shura
-.section shurb
-.section shusa
-.section shusb
-.section shuta
-.section shutb
-.section shuua
-.section shuub
-.section shuva
-.section shuvb
-.section shuwa
-.section shuwb
-.section shuxa
-.section shuxb
-.section shuya
-.section shuyb
-.section shuza
-.section shuzb
-.section shu1a
-.section shu1b
-.section shu2a
-.section shu2b
-.section shu3a
-.section shu3b
-.section shu4a
-.section shu4b
-.section shu5a
-.section shu5b
-.section shu6a
-.section shu6b
-.section shu7a
-.section shu7b
-.section shu8a
-.section shu8b
-.section shu9a
-.section shu9b
-.section shu0a
-.section shu0b
-.section shvaa
-.section shvab
-.section shvba
-.section shvbb
-.section shvca
-.section shvcb
-.section shvda
-.section shvdb
-.section shvea
-.section shveb
-.section shvfa
-.section shvfb
-.section shvga
-.section shvgb
-.section shvha
-.section shvhb
-.section shvia
-.section shvib
-.section shvja
-.section shvjb
-.section shvka
-.section shvkb
-.section shvla
-.section shvlb
-.section shvma
-.section shvmb
-.section shvna
-.section shvnb
-.section shvoa
-.section shvob
-.section shvpa
-.section shvpb
-.section shvqa
-.section shvqb
-.section shvra
-.section shvrb
-.section shvsa
-.section shvsb
-.section shvta
-.section shvtb
-.section shvua
-.section shvub
-.section shvva
-.section shvvb
-.section shvwa
-.section shvwb
-.section shvxa
-.section shvxb
-.section shvya
-.section shvyb
-.section shvza
-.section shvzb
-.section shv1a
-.section shv1b
-.section shv2a
-.section shv2b
-.section shv3a
-.section shv3b
-.section shv4a
-.section shv4b
-.section shv5a
-.section shv5b
-.section shv6a
-.section shv6b
-.section shv7a
-.section shv7b
-.section shv8a
-.section shv8b
-.section shv9a
-.section shv9b
-.section shv0a
-.section shv0b
-.section shwaa
-.section shwab
-.section shwba
-.section shwbb
-.section shwca
-.section shwcb
-.section shwda
-.section shwdb
-.section shwea
-.section shweb
-.section shwfa
-.section shwfb
-.section shwga
-.section shwgb
-.section shwha
-.section shwhb
-.section shwia
-.section shwib
-.section shwja
-.section shwjb
-.section shwka
-.section shwkb
-.section shwla
-.section shwlb
-.section shwma
-.section shwmb
-.section shwna
-.section shwnb
-.section shwoa
-.section shwob
-.section shwpa
-.section shwpb
-.section shwqa
-.section shwqb
-.section shwra
-.section shwrb
-.section shwsa
-.section shwsb
-.section shwta
-.section shwtb
-.section shwua
-.section shwub
-.section shwva
-.section shwvb
-.section shwwa
-.section shwwb
-.section shwxa
-.section shwxb
-.section shwya
-.section shwyb
-.section shwza
-.section shwzb
-.section shw1a
-.section shw1b
-.section shw2a
-.section shw2b
-.section shw3a
-.section shw3b
-.section shw4a
-.section shw4b
-.section shw5a
-.section shw5b
-.section shw6a
-.section shw6b
-.section shw7a
-.section shw7b
-.section shw8a
-.section shw8b
-.section shw9a
-.section shw9b
-.section shw0a
-.section shw0b
-.section shxaa
-.section shxab
-.section shxba
-.section shxbb
-.section shxca
-.section shxcb
-.section shxda
-.section shxdb
-.section shxea
-.section shxeb
-.section shxfa
-.section shxfb
-.section shxga
-.section shxgb
-.section shxha
-.section shxhb
-.section shxia
-.section shxib
-.section shxja
-.section shxjb
-.section shxka
-.section shxkb
-.section shxla
-.section shxlb
-.section shxma
-.section shxmb
-.section shxna
-.section shxnb
-.section shxoa
-.section shxob
-.section shxpa
-.section shxpb
-.section shxqa
-.section shxqb
-.section shxra
-.section shxrb
-.section shxsa
-.section shxsb
-.section shxta
-.section shxtb
-.section shxua
-.section shxub
-.section shxva
-.section shxvb
-.section shxwa
-.section shxwb
-.section shxxa
-.section shxxb
-.section shxya
-.section shxyb
-.section shxza
-.section shxzb
-.section shx1a
-.section shx1b
-.section shx2a
-.section shx2b
-.section shx3a
-.section shx3b
-.section shx4a
-.section shx4b
-.section shx5a
-.section shx5b
-.section shx6a
-.section shx6b
-.section shx7a
-.section shx7b
-.section shx8a
-.section shx8b
-.section shx9a
-.section shx9b
-.section shx0a
-.section shx0b
-.section shyaa
-.section shyab
-.section shyba
-.section shybb
-.section shyca
-.section shycb
-.section shyda
-.section shydb
-.section shyea
-.section shyeb
-.section shyfa
-.section shyfb
-.section shyga
-.section shygb
-.section shyha
-.section shyhb
-.section shyia
-.section shyib
-.section shyja
-.section shyjb
-.section shyka
-.section shykb
-.section shyla
-.section shylb
-.section shyma
-.section shymb
-.section shyna
-.section shynb
-.section shyoa
-.section shyob
-.section shypa
-.section shypb
-.section shyqa
-.section shyqb
-.section shyra
-.section shyrb
-.section shysa
-.section shysb
-.section shyta
-.section shytb
-.section shyua
-.section shyub
-.section shyva
-.section shyvb
-.section shywa
-.section shywb
-.section shyxa
-.section shyxb
-.section shyya
-.section shyyb
-.section shyza
-.section shyzb
-.section shy1a
-.section shy1b
-.section shy2a
-.section shy2b
-.section shy3a
-.section shy3b
-.section shy4a
-.section shy4b
-.section shy5a
-.section shy5b
-.section shy6a
-.section shy6b
-.section shy7a
-.section shy7b
-.section shy8a
-.section shy8b
-.section shy9a
-.section shy9b
-.section shy0a
-.section shy0b
-.section shzaa
-.section shzab
-.section shzba
-.section shzbb
-.section shzca
-.section shzcb
-.section shzda
-.section shzdb
-.section shzea
-.section shzeb
-.section shzfa
-.section shzfb
-.section shzga
-.section shzgb
-.section shzha
-.section shzhb
-.section shzia
-.section shzib
-.section shzja
-.section shzjb
-.section shzka
-.section shzkb
-.section shzla
-.section shzlb
-.section shzma
-.section shzmb
-.section shzna
-.section shznb
-.section shzoa
-.section shzob
-.section shzpa
-.section shzpb
-.section shzqa
-.section shzqb
-.section shzra
-.section shzrb
-.section shzsa
-.section shzsb
-.section shzta
-.section shztb
-.section shzua
-.section shzub
-.section shzva
-.section shzvb
-.section shzwa
-.section shzwb
-.section shzxa
-.section shzxb
-.section shzya
-.section shzyb
-.section shzza
-.section shzzb
-.section shz1a
-.section shz1b
-.section shz2a
-.section shz2b
-.section shz3a
-.section shz3b
-.section shz4a
-.section shz4b
-.section shz5a
-.section shz5b
-.section shz6a
-.section shz6b
-.section shz7a
-.section shz7b
-.section shz8a
-.section shz8b
-.section shz9a
-.section shz9b
-.section shz0a
-.section shz0b
-.section sh1aa
-.section sh1ab
-.section sh1ba
-.section sh1bb
-.section sh1ca
-.section sh1cb
-.section sh1da
-.section sh1db
-.section sh1ea
-.section sh1eb
-.section sh1fa
-.section sh1fb
-.section sh1ga
-.section sh1gb
-.section sh1ha
-.section sh1hb
-.section sh1ia
-.section sh1ib
-.section sh1ja
-.section sh1jb
-.section sh1ka
-.section sh1kb
-.section sh1la
-.section sh1lb
-.section sh1ma
-.section sh1mb
-.section sh1na
-.section sh1nb
-.section sh1oa
-.section sh1ob
-.section sh1pa
-.section sh1pb
-.section sh1qa
-.section sh1qb
-.section sh1ra
-.section sh1rb
-.section sh1sa
-.section sh1sb
-.section sh1ta
-.section sh1tb
-.section sh1ua
-.section sh1ub
-.section sh1va
-.section sh1vb
-.section sh1wa
-.section sh1wb
-.section sh1xa
-.section sh1xb
-.section sh1ya
-.section sh1yb
-.section sh1za
-.section sh1zb
-.section sh11a
-.section sh11b
-.section sh12a
-.section sh12b
-.section sh13a
-.section sh13b
-.section sh14a
-.section sh14b
-.section sh15a
-.section sh15b
-.section sh16a
-.section sh16b
-.section sh17a
-.section sh17b
-.section sh18a
-.section sh18b
-.section sh19a
-.section sh19b
-.section sh10a
-.section sh10b
-.section sh2aa
-.section sh2ab
-.section sh2ba
-.section sh2bb
-.section sh2ca
-.section sh2cb
-.section sh2da
-.section sh2db
-.section sh2ea
-.section sh2eb
-.section sh2fa
-.section sh2fb
-.section sh2ga
-.section sh2gb
-.section sh2ha
-.section sh2hb
-.section sh2ia
-.section sh2ib
-.section sh2ja
-.section sh2jb
-.section sh2ka
-.section sh2kb
-.section sh2la
-.section sh2lb
-.section sh2ma
-.section sh2mb
-.section sh2na
-.section sh2nb
-.section sh2oa
-.section sh2ob
-.section sh2pa
-.section sh2pb
-.section sh2qa
-.section sh2qb
-.section sh2ra
-.section sh2rb
-.section sh2sa
-.section sh2sb
-.section sh2ta
-.section sh2tb
-.section sh2ua
-.section sh2ub
-.section sh2va
-.section sh2vb
-.section sh2wa
-.section sh2wb
-.section sh2xa
-.section sh2xb
-.section sh2ya
-.section sh2yb
-.section sh2za
-.section sh2zb
-.section sh21a
-.section sh21b
-.section sh22a
-.section sh22b
-.section sh23a
-.section sh23b
-.section sh24a
-.section sh24b
-.section sh25a
-.section sh25b
-.section sh26a
-.section sh26b
-.section sh27a
-.section sh27b
-.section sh28a
-.section sh28b
-.section sh29a
-.section sh29b
-.section sh20a
-.section sh20b
-.section sh3aa
-.section sh3ab
-.section sh3ba
-.section sh3bb
-.section sh3ca
-.section sh3cb
-.section sh3da
-.section sh3db
-.section sh3ea
-.section sh3eb
-.section sh3fa
-.section sh3fb
-.section sh3ga
-.section sh3gb
-.section sh3ha
-.section sh3hb
-.section sh3ia
-.section sh3ib
-.section sh3ja
-.section sh3jb
-.section sh3ka
-.section sh3kb
-.section sh3la
-.section sh3lb
-.section sh3ma
-.section sh3mb
-.section sh3na
-.section sh3nb
-.section sh3oa
-.section sh3ob
-.section sh3pa
-.section sh3pb
-.section sh3qa
-.section sh3qb
-.section sh3ra
-.section sh3rb
-.section sh3sa
-.section sh3sb
-.section sh3ta
-.section sh3tb
-.section sh3ua
-.section sh3ub
-.section sh3va
-.section sh3vb
-.section sh3wa
-.section sh3wb
-.section sh3xa
-.section sh3xb
-.section sh3ya
-.section sh3yb
-.section sh3za
-.section sh3zb
-.section sh31a
-.section sh31b
-.section sh32a
-.section sh32b
-.section sh33a
-.section sh33b
-.section sh34a
-.section sh34b
-.section sh35a
-.section sh35b
-.section sh36a
-.section sh36b
-.section sh37a
-.section sh37b
-.section sh38a
-.section sh38b
-.section sh39a
-.section sh39b
-.section sh30a
-.section sh30b
-.section sh4aa
-.section sh4ab
-.section sh4ba
-.section sh4bb
-.section sh4ca
-.section sh4cb
-.section sh4da
-.section sh4db
-.section sh4ea
-.section sh4eb
-.section sh4fa
-.section sh4fb
-.section sh4ga
-.section sh4gb
-.section sh4ha
-.section sh4hb
-.section sh4ia
-.section sh4ib
-.section sh4ja
-.section sh4jb
-.section sh4ka
-.section sh4kb
-.section sh4la
-.section sh4lb
-.section sh4ma
-.section sh4mb
-.section sh4na
-.section sh4nb
-.section sh4oa
-.section sh4ob
-.section sh4pa
-.section sh4pb
-.section sh4qa
-.section sh4qb
-.section sh4ra
-.section sh4rb
-.section sh4sa
-.section sh4sb
-.section sh4ta
-.section sh4tb
-.section sh4ua
-.section sh4ub
-.section sh4va
-.section sh4vb
-.section sh4wa
-.section sh4wb
-.section sh4xa
-.section sh4xb
-.section sh4ya
-.section sh4yb
-.section sh4za
-.section sh4zb
-.section sh41a
-.section sh41b
-.section sh42a
-.section sh42b
-.section sh43a
-.section sh43b
-.section sh44a
-.section sh44b
-.section sh45a
-.section sh45b
-.section sh46a
-.section sh46b
-.section sh47a
-.section sh47b
-.section sh48a
-.section sh48b
-.section sh49a
-.section sh49b
-.section sh40a
-.section sh40b
-.section sh5aa
-.section sh5ab
-.section sh5ba
-.section sh5bb
-.section sh5ca
-.section sh5cb
-.section sh5da
-.section sh5db
-.section sh5ea
-.section sh5eb
-.section sh5fa
-.section sh5fb
-.section sh5ga
-.section sh5gb
-.section sh5ha
-.section sh5hb
-.section sh5ia
-.section sh5ib
-.section sh5ja
-.section sh5jb
-.section sh5ka
-.section sh5kb
-.section sh5la
-.section sh5lb
-.section sh5ma
-.section sh5mb
-.section sh5na
-.section sh5nb
-.section sh5oa
-.section sh5ob
-.section sh5pa
-.section sh5pb
-.section sh5qa
-.section sh5qb
-.section sh5ra
-.section sh5rb
-.section sh5sa
-.section sh5sb
-.section sh5ta
-.section sh5tb
-.section sh5ua
-.section sh5ub
-.section sh5va
-.section sh5vb
-.section sh5wa
-.section sh5wb
-.section sh5xa
-.section sh5xb
-.section sh5ya
-.section sh5yb
-.section sh5za
-.section sh5zb
-.section sh51a
-.section sh51b
-.section sh52a
-.section sh52b
-.section sh53a
-.section sh53b
-.section sh54a
-.section sh54b
-.section sh55a
-.section sh55b
-.section sh56a
-.section sh56b
-.section sh57a
-.section sh57b
-.section sh58a
-.section sh58b
-.section sh59a
-.section sh59b
-.section sh50a
-.section sh50b
-.section sh6aa
-.section sh6ab
-.section sh6ba
-.section sh6bb
-.section sh6ca
-.section sh6cb
-.section sh6da
-.section sh6db
-.section sh6ea
-.section sh6eb
-.section sh6fa
-.section sh6fb
-.section sh6ga
-.section sh6gb
-.section sh6ha
-.section sh6hb
-.section sh6ia
-.section sh6ib
-.section sh6ja
-.section sh6jb
-.section sh6ka
-.section sh6kb
-.section sh6la
-.section sh6lb
-.section sh6ma
-.section sh6mb
-.section sh6na
-.section sh6nb
-.section sh6oa
-.section sh6ob
-.section sh6pa
-.section sh6pb
-.section sh6qa
-.section sh6qb
-.section sh6ra
-.section sh6rb
-.section sh6sa
-.section sh6sb
-.section sh6ta
-.section sh6tb
-.section sh6ua
-.section sh6ub
-.section sh6va
-.section sh6vb
-.section sh6wa
-.section sh6wb
-.section sh6xa
-.section sh6xb
-.section sh6ya
-.section sh6yb
-.section sh6za
-.section sh6zb
-.section sh61a
-.section sh61b
-.section sh62a
-.section sh62b
-.section sh63a
-.section sh63b
-.section sh64a
-.section sh64b
-.section sh65a
-.section sh65b
-.section sh66a
-.section sh66b
-.section sh67a
-.section sh67b
-.section sh68a
-.section sh68b
-.section sh69a
-.section sh69b
-.section sh60a
-.section sh60b
-.section sh7aa
-.section sh7ab
-.section sh7ba
-.section sh7bb
-.section sh7ca
-.section sh7cb
-.section sh7da
-.section sh7db
-.section sh7ea
-.section sh7eb
-.section sh7fa
-.section sh7fb
-.section sh7ga
-.section sh7gb
-.section sh7ha
-.section sh7hb
-.section sh7ia
-.section sh7ib
-.section sh7ja
-.section sh7jb
-.section sh7ka
-.section sh7kb
-.section sh7la
-.section sh7lb
-.section sh7ma
-.section sh7mb
-.section sh7na
-.section sh7nb
-.section sh7oa
-.section sh7ob
-.section sh7pa
-.section sh7pb
-.section sh7qa
-.section sh7qb
-.section sh7ra
-.section sh7rb
-.section sh7sa
-.section sh7sb
-.section sh7ta
-.section sh7tb
-.section sh7ua
-.section sh7ub
-.section sh7va
-.section sh7vb
-.section sh7wa
-.section sh7wb
-.section sh7xa
-.section sh7xb
-.section sh7ya
-.section sh7yb
-.section sh7za
-.section sh7zb
-.section sh71a
-.section sh71b
-.section sh72a
-.section sh72b
-.section sh73a
-.section sh73b
-.section sh74a
-.section sh74b
-.section sh75a
-.section sh75b
-.section sh76a
-.section sh76b
-.section sh77a
-.section sh77b
-.section sh78a
-.section sh78b
-.section sh79a
-.section sh79b
-.section sh70a
-.section sh70b
-.section sh8aa
-.section sh8ab
-.section sh8ba
-.section sh8bb
-.section sh8ca
-.section sh8cb
-.section sh8da
-.section sh8db
-.section sh8ea
-.section sh8eb
-.section sh8fa
-.section sh8fb
-.section sh8ga
-.section sh8gb
-.section sh8ha
-.section sh8hb
-.section sh8ia
-.section sh8ib
-.section sh8ja
-.section sh8jb
-.section sh8ka
-.section sh8kb
-.section sh8la
-.section sh8lb
-.section sh8ma
-.section sh8mb
-.section sh8na
-.section sh8nb
-.section sh8oa
-.section sh8ob
-.section sh8pa
-.section sh8pb
-.section sh8qa
-.section sh8qb
-.section sh8ra
-.section sh8rb
-.section sh8sa
-.section sh8sb
-.section sh8ta
-.section sh8tb
-.section sh8ua
-.section sh8ub
-.section sh8va
-.section sh8vb
-.section sh8wa
-.section sh8wb
-.section sh8xa
-.section sh8xb
-.section sh8ya
-.section sh8yb
-.section sh8za
-.section sh8zb
-.section sh81a
-.section sh81b
-.section sh82a
-.section sh82b
-.section sh83a
-.section sh83b
-.section sh84a
-.section sh84b
-.section sh85a
-.section sh85b
-.section sh86a
-.section sh86b
-.section sh87a
-.section sh87b
-.section sh88a
-.section sh88b
-.section sh89a
-.section sh89b
-.section sh80a
-.section sh80b
-.section sh9aa
-.section sh9ab
-.section sh9ba
-.section sh9bb
-.section sh9ca
-.section sh9cb
-.section sh9da
-.section sh9db
-.section sh9ea
-.section sh9eb
-.section sh9fa
-.section sh9fb
-.section sh9ga
-.section sh9gb
-.section sh9ha
-.section sh9hb
-.section sh9ia
-.section sh9ib
-.section sh9ja
-.section sh9jb
-.section sh9ka
-.section sh9kb
-.section sh9la
-.section sh9lb
-.section sh9ma
-.section sh9mb
-.section sh9na
-.section sh9nb
-.section sh9oa
-.section sh9ob
-.section sh9pa
-.section sh9pb
-.section sh9qa
-.section sh9qb
-.section sh9ra
-.section sh9rb
-.section sh9sa
-.section sh9sb
-.section sh9ta
-.section sh9tb
-.section sh9ua
-.section sh9ub
-.section sh9va
-.section sh9vb
-.section sh9wa
-.section sh9wb
-.section sh9xa
-.section sh9xb
-.section sh9ya
-.section sh9yb
-.section sh9za
-.section sh9zb
-.section sh91a
-.section sh91b
-.section sh92a
-.section sh92b
-.section sh93a
-.section sh93b
-.section sh94a
-.section sh94b
-.section sh95a
-.section sh95b
-.section sh96a
-.section sh96b
-.section sh97a
-.section sh97b
-.section sh98a
-.section sh98b
-.section sh99a
-.section sh99b
-.section sh90a
-.section sh90b
-.section sh0aa
-.section sh0ab
-.section sh0ba
-.section sh0bb
-.section sh0ca
-.section sh0cb
-.section sh0da
-.section sh0db
-.section sh0ea
-.section sh0eb
-.section sh0fa
-.section sh0fb
-.section sh0ga
-.section sh0gb
-.section sh0ha
-.section sh0hb
-.section sh0ia
-.section sh0ib
-.section sh0ja
-.section sh0jb
-.section sh0ka
-.section sh0kb
-.section sh0la
-.section sh0lb
-.section sh0ma
-.section sh0mb
-.section sh0na
-.section sh0nb
-.section sh0oa
-.section sh0ob
-.section sh0pa
-.section sh0pb
-.section sh0qa
-.section sh0qb
-.section sh0ra
-.section sh0rb
-.section sh0sa
-.section sh0sb
-.section sh0ta
-.section sh0tb
-.section sh0ua
-.section sh0ub
-.section sh0va
-.section sh0vb
-.section sh0wa
-.section sh0wb
-.section sh0xa
-.section sh0xb
-.section sh0ya
-.section sh0yb
-.section sh0za
-.section sh0zb
-.section sh01a
-.section sh01b
-.section sh02a
-.section sh02b
-.section sh03a
-.section sh03b
-.section sh04a
-.section sh04b
-.section sh05a
-.section sh05b
-.section sh06a
-.section sh06b
-.section sh07a
-.section sh07b
-.section sh08a
-.section sh08b
-.section sh09a
-.section sh09b
-.section sh00a
-.section sh00b
-.section siaaa
-.section siaab
-.section siaba
-.section siabb
-.section siaca
-.section siacb
-.section siada
-.section siadb
-.section siaea
-.section siaeb
-.section siafa
-.section siafb
-.section siaga
-.section siagb
-.section siaha
-.section siahb
-.section siaia
-.section siaib
-.section siaja
-.section siajb
-.section siaka
-.section siakb
-.section siala
-.section sialb
-.section siama
-.section siamb
-.section siana
-.section sianb
-.section siaoa
-.section siaob
-.section siapa
-.section siapb
-.section siaqa
-.section siaqb
-.section siara
-.section siarb
-.section siasa
-.section siasb
-.section siata
-.section siatb
-.section siaua
-.section siaub
-.section siava
-.section siavb
-.section siawa
-.section siawb
-.section siaxa
-.section siaxb
-.section siaya
-.section siayb
-.section siaza
-.section siazb
-.section sia1a
-.section sia1b
-.section sia2a
-.section sia2b
-.section sia3a
-.section sia3b
-.section sia4a
-.section sia4b
-.section sia5a
-.section sia5b
-.section sia6a
-.section sia6b
-.section sia7a
-.section sia7b
-.section sia8a
-.section sia8b
-.section sia9a
-.section sia9b
-.section sia0a
-.section sia0b
-.section sibaa
-.section sibab
-.section sibba
-.section sibbb
-.section sibca
-.section sibcb
-.section sibda
-.section sibdb
-.section sibea
-.section sibeb
-.section sibfa
-.section sibfb
-.section sibga
-.section sibgb
-.section sibha
-.section sibhb
-.section sibia
-.section sibib
-.section sibja
-.section sibjb
-.section sibka
-.section sibkb
-.section sibla
-.section siblb
-.section sibma
-.section sibmb
-.section sibna
-.section sibnb
-.section siboa
-.section sibob
-.section sibpa
-.section sibpb
-.section sibqa
-.section sibqb
-.section sibra
-.section sibrb
-.section sibsa
-.section sibsb
-.section sibta
-.section sibtb
-.section sibua
-.section sibub
-.section sibva
-.section sibvb
-.section sibwa
-.section sibwb
-.section sibxa
-.section sibxb
-.section sibya
-.section sibyb
-.section sibza
-.section sibzb
-.section sib1a
-.section sib1b
-.section sib2a
-.section sib2b
-.section sib3a
-.section sib3b
-.section sib4a
-.section sib4b
-.section sib5a
-.section sib5b
-.section sib6a
-.section sib6b
-.section sib7a
-.section sib7b
-.section sib8a
-.section sib8b
-.section sib9a
-.section sib9b
-.section sib0a
-.section sib0b
-.section sicaa
-.section sicab
-.section sicba
-.section sicbb
-.section sicca
-.section siccb
-.section sicda
-.section sicdb
-.section sicea
-.section siceb
-.section sicfa
-.section sicfb
-.section sicga
-.section sicgb
-.section sicha
-.section sichb
-.section sicia
-.section sicib
-.section sicja
-.section sicjb
-.section sicka
-.section sickb
-.section sicla
-.section siclb
-.section sicma
-.section sicmb
-.section sicna
-.section sicnb
-.section sicoa
-.section sicob
-.section sicpa
-.section sicpb
-.section sicqa
-.section sicqb
-.section sicra
-.section sicrb
-.section sicsa
-.section sicsb
-.section sicta
-.section sictb
-.section sicua
-.section sicub
-.section sicva
-.section sicvb
-.section sicwa
-.section sicwb
-.section sicxa
-.section sicxb
-.section sicya
-.section sicyb
-.section sicza
-.section siczb
-.section sic1a
-.section sic1b
-.section sic2a
-.section sic2b
-.section sic3a
-.section sic3b
-.section sic4a
-.section sic4b
-.section sic5a
-.section sic5b
-.section sic6a
-.section sic6b
-.section sic7a
-.section sic7b
-.section sic8a
-.section sic8b
-.section sic9a
-.section sic9b
-.section sic0a
-.section sic0b
-.section sidaa
-.section sidab
-.section sidba
-.section sidbb
-.section sidca
-.section sidcb
-.section sidda
-.section siddb
-.section sidea
-.section sideb
-.section sidfa
-.section sidfb
-.section sidga
-.section sidgb
-.section sidha
-.section sidhb
-.section sidia
-.section sidib
-.section sidja
-.section sidjb
-.section sidka
-.section sidkb
-.section sidla
-.section sidlb
-.section sidma
-.section sidmb
-.section sidna
-.section sidnb
-.section sidoa
-.section sidob
-.section sidpa
-.section sidpb
-.section sidqa
-.section sidqb
-.section sidra
-.section sidrb
-.section sidsa
-.section sidsb
-.section sidta
-.section sidtb
-.section sidua
-.section sidub
-.section sidva
-.section sidvb
-.section sidwa
-.section sidwb
-.section sidxa
-.section sidxb
-.section sidya
-.section sidyb
-.section sidza
-.section sidzb
-.section sid1a
-.section sid1b
-.section sid2a
-.section sid2b
-.section sid3a
-.section sid3b
-.section sid4a
-.section sid4b
-.section sid5a
-.section sid5b
-.section sid6a
-.section sid6b
-.section sid7a
-.section sid7b
-.section sid8a
-.section sid8b
-.section sid9a
-.section sid9b
-.section sid0a
-.section sid0b
-.section sieaa
-.section sieab
-.section sieba
-.section siebb
-.section sieca
-.section siecb
-.section sieda
-.section siedb
-.section sieea
-.section sieeb
-.section siefa
-.section siefb
-.section siega
-.section siegb
-.section sieha
-.section siehb
-.section sieia
-.section sieib
-.section sieja
-.section siejb
-.section sieka
-.section siekb
-.section siela
-.section sielb
-.section siema
-.section siemb
-.section siena
-.section sienb
-.section sieoa
-.section sieob
-.section siepa
-.section siepb
-.section sieqa
-.section sieqb
-.section siera
-.section sierb
-.section siesa
-.section siesb
-.section sieta
-.section sietb
-.section sieua
-.section sieub
-.section sieva
-.section sievb
-.section siewa
-.section siewb
-.section siexa
-.section siexb
-.section sieya
-.section sieyb
-.section sieza
-.section siezb
-.section sie1a
-.section sie1b
-.section sie2a
-.section sie2b
-.section sie3a
-.section sie3b
-.section sie4a
-.section sie4b
-.section sie5a
-.section sie5b
-.section sie6a
-.section sie6b
-.section sie7a
-.section sie7b
-.section sie8a
-.section sie8b
-.section sie9a
-.section sie9b
-.section sie0a
-.section sie0b
-.section sifaa
-.section sifab
-.section sifba
-.section sifbb
-.section sifca
-.section sifcb
-.section sifda
-.section sifdb
-.section sifea
-.section sifeb
-.section siffa
-.section siffb
-.section sifga
-.section sifgb
-.section sifha
-.section sifhb
-.section sifia
-.section sifib
-.section sifja
-.section sifjb
-.section sifka
-.section sifkb
-.section sifla
-.section siflb
-.section sifma
-.section sifmb
-.section sifna
-.section sifnb
-.section sifoa
-.section sifob
-.section sifpa
-.section sifpb
-.section sifqa
-.section sifqb
-.section sifra
-.section sifrb
-.section sifsa
-.section sifsb
-.section sifta
-.section siftb
-.section sifua
-.section sifub
-.section sifva
-.section sifvb
-.section sifwa
-.section sifwb
-.section sifxa
-.section sifxb
-.section sifya
-.section sifyb
-.section sifza
-.section sifzb
-.section sif1a
-.section sif1b
-.section sif2a
-.section sif2b
-.section sif3a
-.section sif3b
-.section sif4a
-.section sif4b
-.section sif5a
-.section sif5b
-.section sif6a
-.section sif6b
-.section sif7a
-.section sif7b
-.section sif8a
-.section sif8b
-.section sif9a
-.section sif9b
-.section sif0a
-.section sif0b
-.section sigaa
-.section sigab
-.section sigba
-.section sigbb
-.section sigca
-.section sigcb
-.section sigda
-.section sigdb
-.section sigea
-.section sigeb
-.section sigfa
-.section sigfb
-.section sigga
-.section siggb
-.section sigha
-.section sighb
-.section sigia
-.section sigib
-.section sigja
-.section sigjb
-.section sigka
-.section sigkb
-.section sigla
-.section siglb
-.section sigma
-.section sigmb
-.section signa
-.section signb
-.section sigoa
-.section sigob
-.section sigpa
-.section sigpb
-.section sigqa
-.section sigqb
-.section sigra
-.section sigrb
-.section sigsa
-.section sigsb
-.section sigta
-.section sigtb
-.section sigua
-.section sigub
-.section sigva
-.section sigvb
-.section sigwa
-.section sigwb
-.section sigxa
-.section sigxb
-.section sigya
-.section sigyb
-.section sigza
-.section sigzb
-.section sig1a
-.section sig1b
-.section sig2a
-.section sig2b
-.section sig3a
-.section sig3b
-.section sig4a
-.section sig4b
-.section sig5a
-.section sig5b
-.section sig6a
-.section sig6b
-.section sig7a
-.section sig7b
-.section sig8a
-.section sig8b
-.section sig9a
-.section sig9b
-.section sig0a
-.section sig0b
-.section sihaa
-.section sihab
-.section sihba
-.section sihbb
-.section sihca
-.section sihcb
-.section sihda
-.section sihdb
-.section sihea
-.section siheb
-.section sihfa
-.section sihfb
-.section sihga
-.section sihgb
-.section sihha
-.section sihhb
-.section sihia
-.section sihib
-.section sihja
-.section sihjb
-.section sihka
-.section sihkb
-.section sihla
-.section sihlb
-.section sihma
-.section sihmb
-.section sihna
-.section sihnb
-.section sihoa
-.section sihob
-.section sihpa
-.section sihpb
-.section sihqa
-.section sihqb
-.section sihra
-.section sihrb
-.section sihsa
-.section sihsb
-.section sihta
-.section sihtb
-.section sihua
-.section sihub
-.section sihva
-.section sihvb
-.section sihwa
-.section sihwb
-.section sihxa
-.section sihxb
-.section sihya
-.section sihyb
-.section sihza
-.section sihzb
-.section sih1a
-.section sih1b
-.section sih2a
-.section sih2b
-.section sih3a
-.section sih3b
-.section sih4a
-.section sih4b
-.section sih5a
-.section sih5b
-.section sih6a
-.section sih6b
-.section sih7a
-.section sih7b
-.section sih8a
-.section sih8b
-.section sih9a
-.section sih9b
-.section sih0a
-.section sih0b
-.section siiaa
-.section siiab
-.section siiba
-.section siibb
-.section siica
-.section siicb
-.section siida
-.section siidb
-.section siiea
-.section siieb
-.section siifa
-.section siifb
-.section siiga
-.section siigb
-.section siiha
-.section siihb
-.section siiia
-.section siiib
-.section siija
-.section siijb
-.section siika
-.section siikb
-.section siila
-.section siilb
-.section siima
-.section siimb
-.section siina
-.section siinb
-.section siioa
-.section siiob
-.section siipa
-.section siipb
-.section siiqa
-.section siiqb
-.section siira
-.section siirb
-.section siisa
-.section siisb
-.section siita
-.section siitb
-.section siiua
-.section siiub
-.section siiva
-.section siivb
-.section siiwa
-.section siiwb
-.section siixa
-.section siixb
-.section siiya
-.section siiyb
-.section siiza
-.section siizb
-.section sii1a
-.section sii1b
-.section sii2a
-.section sii2b
-.section sii3a
-.section sii3b
-.section sii4a
-.section sii4b
-.section sii5a
-.section sii5b
-.section sii6a
-.section sii6b
-.section sii7a
-.section sii7b
-.section sii8a
-.section sii8b
-.section sii9a
-.section sii9b
-.section sii0a
-.section sii0b
-.section sijaa
-.section sijab
-.section sijba
-.section sijbb
-.section sijca
-.section sijcb
-.section sijda
-.section sijdb
-.section sijea
-.section sijeb
-.section sijfa
-.section sijfb
-.section sijga
-.section sijgb
-.section sijha
-.section sijhb
-.section sijia
-.section sijib
-.section sijja
-.section sijjb
-.section sijka
-.section sijkb
-.section sijla
-.section sijlb
-.section sijma
-.section sijmb
-.section sijna
-.section sijnb
-.section sijoa
-.section sijob
-.section sijpa
-.section sijpb
-.section sijqa
-.section sijqb
-.section sijra
-.section sijrb
-.section sijsa
-.section sijsb
-.section sijta
-.section sijtb
-.section sijua
-.section sijub
-.section sijva
-.section sijvb
-.section sijwa
-.section sijwb
-.section sijxa
-.section sijxb
-.section sijya
-.section sijyb
-.section sijza
-.section sijzb
-.section sij1a
-.section sij1b
-.section sij2a
-.section sij2b
-.section sij3a
-.section sij3b
-.section sij4a
-.section sij4b
-.section sij5a
-.section sij5b
-.section sij6a
-.section sij6b
-.section sij7a
-.section sij7b
-.section sij8a
-.section sij8b
-.section sij9a
-.section sij9b
-.section sij0a
-.section sij0b
-.section sikaa
-.section sikab
-.section sikba
-.section sikbb
-.section sikca
-.section sikcb
-.section sikda
-.section sikdb
-.section sikea
-.section sikeb
-.section sikfa
-.section sikfb
-.section sikga
-.section sikgb
-.section sikha
-.section sikhb
-.section sikia
-.section sikib
-.section sikja
-.section sikjb
-.section sikka
-.section sikkb
-.section sikla
-.section siklb
-.section sikma
-.section sikmb
-.section sikna
-.section siknb
-.section sikoa
-.section sikob
-.section sikpa
-.section sikpb
-.section sikqa
-.section sikqb
-.section sikra
-.section sikrb
-.section siksa
-.section siksb
-.section sikta
-.section siktb
-.section sikua
-.section sikub
-.section sikva
-.section sikvb
-.section sikwa
-.section sikwb
-.section sikxa
-.section sikxb
-.section sikya
-.section sikyb
-.section sikza
-.section sikzb
-.section sik1a
-.section sik1b
-.section sik2a
-.section sik2b
-.section sik3a
-.section sik3b
-.section sik4a
-.section sik4b
-.section sik5a
-.section sik5b
-.section sik6a
-.section sik6b
-.section sik7a
-.section sik7b
-.section sik8a
-.section sik8b
-.section sik9a
-.section sik9b
-.section sik0a
-.section sik0b
-.section silaa
-.section silab
-.section silba
-.section silbb
-.section silca
-.section silcb
-.section silda
-.section sildb
-.section silea
-.section sileb
-.section silfa
-.section silfb
-.section silga
-.section silgb
-.section silha
-.section silhb
-.section silia
-.section silib
-.section silja
-.section siljb
-.section silka
-.section silkb
-.section silla
-.section sillb
-.section silma
-.section silmb
-.section silna
-.section silnb
-.section siloa
-.section silob
-.section silpa
-.section silpb
-.section silqa
-.section silqb
-.section silra
-.section silrb
-.section silsa
-.section silsb
-.section silta
-.section siltb
-.section silua
-.section silub
-.section silva
-.section silvb
-.section silwa
-.section silwb
-.section silxa
-.section silxb
-.section silya
-.section silyb
-.section silza
-.section silzb
-.section sil1a
-.section sil1b
-.section sil2a
-.section sil2b
-.section sil3a
-.section sil3b
-.section sil4a
-.section sil4b
-.section sil5a
-.section sil5b
-.section sil6a
-.section sil6b
-.section sil7a
-.section sil7b
-.section sil8a
-.section sil8b
-.section sil9a
-.section sil9b
-.section sil0a
-.section sil0b
-.section simaa
-.section simab
-.section simba
-.section simbb
-.section simca
-.section simcb
-.section simda
-.section simdb
-.section simea
-.section simeb
-.section simfa
-.section simfb
-.section simga
-.section simgb
-.section simha
-.section simhb
-.section simia
-.section simib
-.section simja
-.section simjb
-.section simka
-.section simkb
-.section simla
-.section simlb
-.section simma
-.section simmb
-.section simna
-.section simnb
-.section simoa
-.section simob
-.section simpa
-.section simpb
-.section simqa
-.section simqb
-.section simra
-.section simrb
-.section simsa
-.section simsb
-.section simta
-.section simtb
-.section simua
-.section simub
-.section simva
-.section simvb
-.section simwa
-.section simwb
-.section simxa
-.section simxb
-.section simya
-.section simyb
-.section simza
-.section simzb
-.section sim1a
-.section sim1b
-.section sim2a
-.section sim2b
-.section sim3a
-.section sim3b
-.section sim4a
-.section sim4b
-.section sim5a
-.section sim5b
-.section sim6a
-.section sim6b
-.section sim7a
-.section sim7b
-.section sim8a
-.section sim8b
-.section sim9a
-.section sim9b
-.section sim0a
-.section sim0b
-.section sinaa
-.section sinab
-.section sinba
-.section sinbb
-.section sinca
-.section sincb
-.section sinda
-.section sindb
-.section sinea
-.section sineb
-.section sinfa
-.section sinfb
-.section singa
-.section singb
-.section sinha
-.section sinhb
-.section sinia
-.section sinib
-.section sinja
-.section sinjb
-.section sinka
-.section sinkb
-.section sinla
-.section sinlb
-.section sinma
-.section sinmb
-.section sinna
-.section sinnb
-.section sinoa
-.section sinob
-.section sinpa
-.section sinpb
-.section sinqa
-.section sinqb
-.section sinra
-.section sinrb
-.section sinsa
-.section sinsb
-.section sinta
-.section sintb
-.section sinua
-.section sinub
-.section sinva
-.section sinvb
-.section sinwa
-.section sinwb
-.section sinxa
-.section sinxb
-.section sinya
-.section sinyb
-.section sinza
-.section sinzb
-.section sin1a
-.section sin1b
-.section sin2a
-.section sin2b
-.section sin3a
-.section sin3b
-.section sin4a
-.section sin4b
-.section sin5a
-.section sin5b
-.section sin6a
-.section sin6b
-.section sin7a
-.section sin7b
-.section sin8a
-.section sin8b
-.section sin9a
-.section sin9b
-.section sin0a
-.section sin0b
-.section sioaa
-.section sioab
-.section sioba
-.section siobb
-.section sioca
-.section siocb
-.section sioda
-.section siodb
-.section sioea
-.section sioeb
-.section siofa
-.section siofb
-.section sioga
-.section siogb
-.section sioha
-.section siohb
-.section sioia
-.section sioib
-.section sioja
-.section siojb
-.section sioka
-.section siokb
-.section siola
-.section siolb
-.section sioma
-.section siomb
-.section siona
-.section sionb
-.section siooa
-.section sioob
-.section siopa
-.section siopb
-.section sioqa
-.section sioqb
-.section siora
-.section siorb
-.section siosa
-.section siosb
-.section siota
-.section siotb
-.section sioua
-.section sioub
-.section siova
-.section siovb
-.section siowa
-.section siowb
-.section sioxa
-.section sioxb
-.section sioya
-.section sioyb
-.section sioza
-.section siozb
-.section sio1a
-.section sio1b
-.section sio2a
-.section sio2b
-.section sio3a
-.section sio3b
-.section sio4a
-.section sio4b
-.section sio5a
-.section sio5b
-.section sio6a
-.section sio6b
-.section sio7a
-.section sio7b
-.section sio8a
-.section sio8b
-.section sio9a
-.section sio9b
-.section sio0a
-.section sio0b
-.section sipaa
-.section sipab
-.section sipba
-.section sipbb
-.section sipca
-.section sipcb
-.section sipda
-.section sipdb
-.section sipea
-.section sipeb
-.section sipfa
-.section sipfb
-.section sipga
-.section sipgb
-.section sipha
-.section siphb
-.section sipia
-.section sipib
-.section sipja
-.section sipjb
-.section sipka
-.section sipkb
-.section sipla
-.section siplb
-.section sipma
-.section sipmb
-.section sipna
-.section sipnb
-.section sipoa
-.section sipob
-.section sippa
-.section sippb
-.section sipqa
-.section sipqb
-.section sipra
-.section siprb
-.section sipsa
-.section sipsb
-.section sipta
-.section siptb
-.section sipua
-.section sipub
-.section sipva
-.section sipvb
-.section sipwa
-.section sipwb
-.section sipxa
-.section sipxb
-.section sipya
-.section sipyb
-.section sipza
-.section sipzb
-.section sip1a
-.section sip1b
-.section sip2a
-.section sip2b
-.section sip3a
-.section sip3b
-.section sip4a
-.section sip4b
-.section sip5a
-.section sip5b
-.section sip6a
-.section sip6b
-.section sip7a
-.section sip7b
-.section sip8a
-.section sip8b
-.section sip9a
-.section sip9b
-.section sip0a
-.section sip0b
-.section siqaa
-.section siqab
-.section siqba
-.section siqbb
-.section siqca
-.section siqcb
-.section siqda
-.section siqdb
-.section siqea
-.section siqeb
-.section siqfa
-.section siqfb
-.section siqga
-.section siqgb
-.section siqha
-.section siqhb
-.section siqia
-.section siqib
-.section siqja
-.section siqjb
-.section siqka
-.section siqkb
-.section siqla
-.section siqlb
-.section siqma
-.section siqmb
-.section siqna
-.section siqnb
-.section siqoa
-.section siqob
-.section siqpa
-.section siqpb
-.section siqqa
-.section siqqb
-.section siqra
-.section siqrb
-.section siqsa
-.section siqsb
-.section siqta
-.section siqtb
-.section siqua
-.section siqub
-.section siqva
-.section siqvb
-.section siqwa
-.section siqwb
-.section siqxa
-.section siqxb
-.section siqya
-.section siqyb
-.section siqza
-.section siqzb
-.section siq1a
-.section siq1b
-.section siq2a
-.section siq2b
-.section siq3a
-.section siq3b
-.section siq4a
-.section siq4b
-.section siq5a
-.section siq5b
-.section siq6a
-.section siq6b
-.section siq7a
-.section siq7b
-.section siq8a
-.section siq8b
-.section siq9a
-.section siq9b
-.section siq0a
-.section siq0b
-.section siraa
-.section sirab
-.section sirba
-.section sirbb
-.section sirca
-.section sircb
-.section sirda
-.section sirdb
-.section sirea
-.section sireb
-.section sirfa
-.section sirfb
-.section sirga
-.section sirgb
-.section sirha
-.section sirhb
-.section siria
-.section sirib
-.section sirja
-.section sirjb
-.section sirka
-.section sirkb
-.section sirla
-.section sirlb
-.section sirma
-.section sirmb
-.section sirna
-.section sirnb
-.section siroa
-.section sirob
-.section sirpa
-.section sirpb
-.section sirqa
-.section sirqb
-.section sirra
-.section sirrb
-.section sirsa
-.section sirsb
-.section sirta
-.section sirtb
-.section sirua
-.section sirub
-.section sirva
-.section sirvb
-.section sirwa
-.section sirwb
-.section sirxa
-.section sirxb
-.section sirya
-.section siryb
-.section sirza
-.section sirzb
-.section sir1a
-.section sir1b
-.section sir2a
-.section sir2b
-.section sir3a
-.section sir3b
-.section sir4a
-.section sir4b
-.section sir5a
-.section sir5b
-.section sir6a
-.section sir6b
-.section sir7a
-.section sir7b
-.section sir8a
-.section sir8b
-.section sir9a
-.section sir9b
-.section sir0a
-.section sir0b
-.section sisaa
-.section sisab
-.section sisba
-.section sisbb
-.section sisca
-.section siscb
-.section sisda
-.section sisdb
-.section sisea
-.section siseb
-.section sisfa
-.section sisfb
-.section sisga
-.section sisgb
-.section sisha
-.section sishb
-.section sisia
-.section sisib
-.section sisja
-.section sisjb
-.section siska
-.section siskb
-.section sisla
-.section sislb
-.section sisma
-.section sismb
-.section sisna
-.section sisnb
-.section sisoa
-.section sisob
-.section sispa
-.section sispb
-.section sisqa
-.section sisqb
-.section sisra
-.section sisrb
-.section sissa
-.section sissb
-.section sista
-.section sistb
-.section sisua
-.section sisub
-.section sisva
-.section sisvb
-.section siswa
-.section siswb
-.section sisxa
-.section sisxb
-.section sisya
-.section sisyb
-.section sisza
-.section siszb
-.section sis1a
-.section sis1b
-.section sis2a
-.section sis2b
-.section sis3a
-.section sis3b
-.section sis4a
-.section sis4b
-.section sis5a
-.section sis5b
-.section sis6a
-.section sis6b
-.section sis7a
-.section sis7b
-.section sis8a
-.section sis8b
-.section sis9a
-.section sis9b
-.section sis0a
-.section sis0b
-.section sitaa
-.section sitab
-.section sitba
-.section sitbb
-.section sitca
-.section sitcb
-.section sitda
-.section sitdb
-.section sitea
-.section siteb
-.section sitfa
-.section sitfb
-.section sitga
-.section sitgb
-.section sitha
-.section sithb
-.section sitia
-.section sitib
-.section sitja
-.section sitjb
-.section sitka
-.section sitkb
-.section sitla
-.section sitlb
-.section sitma
-.section sitmb
-.section sitna
-.section sitnb
-.section sitoa
-.section sitob
-.section sitpa
-.section sitpb
-.section sitqa
-.section sitqb
-.section sitra
-.section sitrb
-.section sitsa
-.section sitsb
-.section sitta
-.section sittb
-.section situa
-.section situb
-.section sitva
-.section sitvb
-.section sitwa
-.section sitwb
-.section sitxa
-.section sitxb
-.section sitya
-.section sityb
-.section sitza
-.section sitzb
-.section sit1a
-.section sit1b
-.section sit2a
-.section sit2b
-.section sit3a
-.section sit3b
-.section sit4a
-.section sit4b
-.section sit5a
-.section sit5b
-.section sit6a
-.section sit6b
-.section sit7a
-.section sit7b
-.section sit8a
-.section sit8b
-.section sit9a
-.section sit9b
-.section sit0a
-.section sit0b
-.section siuaa
-.section siuab
-.section siuba
-.section siubb
-.section siuca
-.section siucb
-.section siuda
-.section siudb
-.section siuea
-.section siueb
-.section siufa
-.section siufb
-.section siuga
-.section siugb
-.section siuha
-.section siuhb
-.section siuia
-.section siuib
-.section siuja
-.section siujb
-.section siuka
-.section siukb
-.section siula
-.section siulb
-.section siuma
-.section siumb
-.section siuna
-.section siunb
-.section siuoa
-.section siuob
-.section siupa
-.section siupb
-.section siuqa
-.section siuqb
-.section siura
-.section siurb
-.section siusa
-.section siusb
-.section siuta
-.section siutb
-.section siuua
-.section siuub
-.section siuva
-.section siuvb
-.section siuwa
-.section siuwb
-.section siuxa
-.section siuxb
-.section siuya
-.section siuyb
-.section siuza
-.section siuzb
-.section siu1a
-.section siu1b
-.section siu2a
-.section siu2b
-.section siu3a
-.section siu3b
-.section siu4a
-.section siu4b
-.section siu5a
-.section siu5b
-.section siu6a
-.section siu6b
-.section siu7a
-.section siu7b
-.section siu8a
-.section siu8b
-.section siu9a
-.section siu9b
-.section siu0a
-.section siu0b
-.section sivaa
-.section sivab
-.section sivba
-.section sivbb
-.section sivca
-.section sivcb
-.section sivda
-.section sivdb
-.section sivea
-.section siveb
-.section sivfa
-.section sivfb
-.section sivga
-.section sivgb
-.section sivha
-.section sivhb
-.section sivia
-.section sivib
-.section sivja
-.section sivjb
-.section sivka
-.section sivkb
-.section sivla
-.section sivlb
-.section sivma
-.section sivmb
-.section sivna
-.section sivnb
-.section sivoa
-.section sivob
-.section sivpa
-.section sivpb
-.section sivqa
-.section sivqb
-.section sivra
-.section sivrb
-.section sivsa
-.section sivsb
-.section sivta
-.section sivtb
-.section sivua
-.section sivub
-.section sivva
-.section sivvb
-.section sivwa
-.section sivwb
-.section sivxa
-.section sivxb
-.section sivya
-.section sivyb
-.section sivza
-.section sivzb
-.section siv1a
-.section siv1b
-.section siv2a
-.section siv2b
-.section siv3a
-.section siv3b
-.section siv4a
-.section siv4b
-.section siv5a
-.section siv5b
-.section siv6a
-.section siv6b
-.section siv7a
-.section siv7b
-.section siv8a
-.section siv8b
-.section siv9a
-.section siv9b
-.section siv0a
-.section siv0b
-.section siwaa
-.section siwab
-.section siwba
-.section siwbb
-.section siwca
-.section siwcb
-.section siwda
-.section siwdb
-.section siwea
-.section siweb
-.section siwfa
-.section siwfb
-.section siwga
-.section siwgb
-.section siwha
-.section siwhb
-.section siwia
-.section siwib
-.section siwja
-.section siwjb
-.section siwka
-.section siwkb
-.section siwla
-.section siwlb
-.section siwma
-.section siwmb
-.section siwna
-.section siwnb
-.section siwoa
-.section siwob
-.section siwpa
-.section siwpb
-.section siwqa
-.section siwqb
-.section siwra
-.section siwrb
-.section siwsa
-.section siwsb
-.section siwta
-.section siwtb
-.section siwua
-.section siwub
-.section siwva
-.section siwvb
-.section siwwa
-.section siwwb
-.section siwxa
-.section siwxb
-.section siwya
-.section siwyb
-.section siwza
-.section siwzb
-.section siw1a
-.section siw1b
-.section siw2a
-.section siw2b
-.section siw3a
-.section siw3b
-.section siw4a
-.section siw4b
-.section siw5a
-.section siw5b
-.section siw6a
-.section siw6b
-.section siw7a
-.section siw7b
-.section siw8a
-.section siw8b
-.section siw9a
-.section siw9b
-.section siw0a
-.section siw0b
-.section sixaa
-.section sixab
-.section sixba
-.section sixbb
-.section sixca
-.section sixcb
-.section sixda
-.section sixdb
-.section sixea
-.section sixeb
-.section sixfa
-.section sixfb
-.section sixga
-.section sixgb
-.section sixha
-.section sixhb
-.section sixia
-.section sixib
-.section sixja
-.section sixjb
-.section sixka
-.section sixkb
-.section sixla
-.section sixlb
-.section sixma
-.section sixmb
-.section sixna
-.section sixnb
-.section sixoa
-.section sixob
-.section sixpa
-.section sixpb
-.section sixqa
-.section sixqb
-.section sixra
-.section sixrb
-.section sixsa
-.section sixsb
-.section sixta
-.section sixtb
-.section sixua
-.section sixub
-.section sixva
-.section sixvb
-.section sixwa
-.section sixwb
-.section sixxa
-.section sixxb
-.section sixya
-.section sixyb
-.section sixza
-.section sixzb
-.section six1a
-.section six1b
-.section six2a
-.section six2b
-.section six3a
-.section six3b
-.section six4a
-.section six4b
-.section six5a
-.section six5b
-.section six6a
-.section six6b
-.section six7a
-.section six7b
-.section six8a
-.section six8b
-.section six9a
-.section six9b
-.section six0a
-.section six0b
-.section siyaa
-.section siyab
-.section siyba
-.section siybb
-.section siyca
-.section siycb
-.section siyda
-.section siydb
-.section siyea
-.section siyeb
-.section siyfa
-.section siyfb
-.section siyga
-.section siygb
-.section siyha
-.section siyhb
-.section siyia
-.section siyib
-.section siyja
-.section siyjb
-.section siyka
-.section siykb
-.section siyla
-.section siylb
-.section siyma
-.section siymb
-.section siyna
-.section siynb
-.section siyoa
-.section siyob
-.section siypa
-.section siypb
-.section siyqa
-.section siyqb
-.section siyra
-.section siyrb
-.section siysa
-.section siysb
-.section siyta
-.section siytb
-.section siyua
-.section siyub
-.section siyva
-.section siyvb
-.section siywa
-.section siywb
-.section siyxa
-.section siyxb
-.section siyya
-.section siyyb
-.section siyza
-.section siyzb
-.section siy1a
-.section siy1b
-.section siy2a
-.section siy2b
-.section siy3a
-.section siy3b
-.section siy4a
-.section siy4b
-.section siy5a
-.section siy5b
-.section siy6a
-.section siy6b
-.section siy7a
-.section siy7b
-.section siy8a
-.section siy8b
-.section siy9a
-.section siy9b
-.section siy0a
-.section siy0b
-.section sizaa
-.section sizab
-.section sizba
-.section sizbb
-.section sizca
-.section sizcb
-.section sizda
-.section sizdb
-.section sizea
-.section sizeb
-.section sizfa
-.section sizfb
-.section sizga
-.section sizgb
-.section sizha
-.section sizhb
-.section sizia
-.section sizib
-.section sizja
-.section sizjb
-.section sizka
-.section sizkb
-.section sizla
-.section sizlb
-.section sizma
-.section sizmb
-.section sizna
-.section siznb
-.section sizoa
-.section sizob
-.section sizpa
-.section sizpb
-.section sizqa
-.section sizqb
-.section sizra
-.section sizrb
-.section sizsa
-.section sizsb
-.section sizta
-.section siztb
-.section sizua
-.section sizub
-.section sizva
-.section sizvb
-.section sizwa
-.section sizwb
-.section sizxa
-.section sizxb
-.section sizya
-.section sizyb
-.section sizza
-.section sizzb
-.section siz1a
-.section siz1b
-.section siz2a
-.section siz2b
-.section siz3a
-.section siz3b
-.section siz4a
-.section siz4b
-.section siz5a
-.section siz5b
-.section siz6a
-.section siz6b
-.section siz7a
-.section siz7b
-.section siz8a
-.section siz8b
-.section siz9a
-.section siz9b
-.section siz0a
-.section siz0b
-.section si1aa
-.section si1ab
-.section si1ba
-.section si1bb
-.section si1ca
-.section si1cb
-.section si1da
-.section si1db
-.section si1ea
-.section si1eb
-.section si1fa
-.section si1fb
-.section si1ga
-.section si1gb
-.section si1ha
-.section si1hb
-.section si1ia
-.section si1ib
-.section si1ja
-.section si1jb
-.section si1ka
-.section si1kb
-.section si1la
-.section si1lb
-.section si1ma
-.section si1mb
-.section si1na
-.section si1nb
-.section si1oa
-.section si1ob
-.section si1pa
-.section si1pb
-.section si1qa
-.section si1qb
-.section si1ra
-.section si1rb
-.section si1sa
-.section si1sb
-.section si1ta
-.section si1tb
-.section si1ua
-.section si1ub
-.section si1va
-.section si1vb
-.section si1wa
-.section si1wb
-.section si1xa
-.section si1xb
-.section si1ya
-.section si1yb
-.section si1za
-.section si1zb
-.section si11a
-.section si11b
-.section si12a
-.section si12b
-.section si13a
-.section si13b
-.section si14a
-.section si14b
-.section si15a
-.section si15b
-.section si16a
-.section si16b
-.section si17a
-.section si17b
-.section si18a
-.section si18b
-.section si19a
-.section si19b
-.section si10a
-.section si10b
-.section si2aa
-.section si2ab
-.section si2ba
-.section si2bb
-.section si2ca
-.section si2cb
-.section si2da
-.section si2db
-.section si2ea
-.section si2eb
-.section si2fa
-.section si2fb
-.section si2ga
-.section si2gb
-.section si2ha
-.section si2hb
-.section si2ia
-.section si2ib
-.section si2ja
-.section si2jb
-.section si2ka
-.section si2kb
-.section si2la
-.section si2lb
-.section si2ma
-.section si2mb
-.section si2na
-.section si2nb
-.section si2oa
-.section si2ob
-.section si2pa
-.section si2pb
-.section si2qa
-.section si2qb
-.section si2ra
-.section si2rb
-.section si2sa
-.section si2sb
-.section si2ta
-.section si2tb
-.section si2ua
-.section si2ub
-.section si2va
-.section si2vb
-.section si2wa
-.section si2wb
-.section si2xa
-.section si2xb
-.section si2ya
-.section si2yb
-.section si2za
-.section si2zb
-.section si21a
-.section si21b
-.section si22a
-.section si22b
-.section si23a
-.section si23b
-.section si24a
-.section si24b
-.section si25a
-.section si25b
-.section si26a
-.section si26b
-.section si27a
-.section si27b
-.section si28a
-.section si28b
-.section si29a
-.section si29b
-.section si20a
-.section si20b
-.section si3aa
-.section si3ab
-.section si3ba
-.section si3bb
-.section si3ca
-.section si3cb
-.section si3da
-.section si3db
-.section si3ea
-.section si3eb
-.section si3fa
-.section si3fb
-.section si3ga
-.section si3gb
-.section si3ha
-.section si3hb
-.section si3ia
-.section si3ib
-.section si3ja
-.section si3jb
-.section si3ka
-.section si3kb
-.section si3la
-.section si3lb
-.section si3ma
-.section si3mb
-.section si3na
-.section si3nb
-.section si3oa
-.section si3ob
-.section si3pa
-.section si3pb
-.section si3qa
-.section si3qb
-.section si3ra
-.section si3rb
-.section si3sa
-.section si3sb
-.section si3ta
-.section si3tb
-.section si3ua
-.section si3ub
-.section si3va
-.section si3vb
-.section si3wa
-.section si3wb
-.section si3xa
-.section si3xb
-.section si3ya
-.section si3yb
-.section si3za
-.section si3zb
-.section si31a
-.section si31b
-.section si32a
-.section si32b
-.section si33a
-.section si33b
-.section si34a
-.section si34b
-.section si35a
-.section si35b
-.section si36a
-.section si36b
-.section si37a
-.section si37b
-.section si38a
-.section si38b
-.section si39a
-.section si39b
-.section si30a
-.section si30b
-.section si4aa
-.section si4ab
-.section si4ba
-.section si4bb
-.section si4ca
-.section si4cb
-.section si4da
-.section si4db
-.section si4ea
-.section si4eb
-.section si4fa
-.section si4fb
-.section si4ga
-.section si4gb
-.section si4ha
-.section si4hb
-.section si4ia
-.section si4ib
-.section si4ja
-.section si4jb
-.section si4ka
-.section si4kb
-.section si4la
-.section si4lb
-.section si4ma
-.section si4mb
-.section si4na
-.section si4nb
-.section si4oa
-.section si4ob
-.section si4pa
-.section si4pb
-.section si4qa
-.section si4qb
-.section si4ra
-.section si4rb
-.section si4sa
-.section si4sb
-.section si4ta
-.section si4tb
-.section si4ua
-.section si4ub
-.section si4va
-.section si4vb
-.section si4wa
-.section si4wb
-.section si4xa
-.section si4xb
-.section si4ya
-.section si4yb
-.section si4za
-.section si4zb
-.section si41a
-.section si41b
-.section si42a
-.section si42b
-.section si43a
-.section si43b
-.section si44a
-.section si44b
-.section si45a
-.section si45b
-.section si46a
-.section si46b
-.section si47a
-.section si47b
-.section si48a
-.section si48b
-.section si49a
-.section si49b
-.section si40a
-.section si40b
-.section si5aa
-.section si5ab
-.section si5ba
-.section si5bb
-.section si5ca
-.section si5cb
-.section si5da
-.section si5db
-.section si5ea
-.section si5eb
-.section si5fa
-.section si5fb
-.section si5ga
-.section si5gb
-.section si5ha
-.section si5hb
-.section si5ia
-.section si5ib
-.section si5ja
-.section si5jb
-.section si5ka
-.section si5kb
-.section si5la
-.section si5lb
-.section si5ma
-.section si5mb
-.section si5na
-.section si5nb
-.section si5oa
-.section si5ob
-.section si5pa
-.section si5pb
-.section si5qa
-.section si5qb
-.section si5ra
-.section si5rb
-.section si5sa
-.section si5sb
-.section si5ta
-.section si5tb
-.section si5ua
-.section si5ub
-.section si5va
-.section si5vb
-.section si5wa
-.section si5wb
-.section si5xa
-.section si5xb
-.section si5ya
-.section si5yb
-.section si5za
-.section si5zb
-.section si51a
-.section si51b
-.section si52a
-.section si52b
-.section si53a
-.section si53b
-.section si54a
-.section si54b
-.section si55a
-.section si55b
-.section si56a
-.section si56b
-.section si57a
-.section si57b
-.section si58a
-.section si58b
-.section si59a
-.section si59b
-.section si50a
-.section si50b
-.section si6aa
-.section si6ab
-.section si6ba
-.section si6bb
-.section si6ca
-.section si6cb
-.section si6da
-.section si6db
-.section si6ea
-.section si6eb
-.section si6fa
-.section si6fb
-.section si6ga
-.section si6gb
-.section si6ha
-.section si6hb
-.section si6ia
-.section si6ib
-.section si6ja
-.section si6jb
-.section si6ka
-.section si6kb
-.section si6la
-.section si6lb
-.section si6ma
-.section si6mb
-.section si6na
-.section si6nb
-.section si6oa
-.section si6ob
-.section si6pa
-.section si6pb
-.section si6qa
-.section si6qb
-.section si6ra
-.section si6rb
-.section si6sa
-.section si6sb
-.section si6ta
-.section si6tb
-.section si6ua
-.section si6ub
-.section si6va
-.section si6vb
-.section si6wa
-.section si6wb
-.section si6xa
-.section si6xb
-.section si6ya
-.section si6yb
-.section si6za
-.section si6zb
-.section si61a
-.section si61b
-.section si62a
-.section si62b
-.section si63a
-.section si63b
-.section si64a
-.section si64b
-.section si65a
-.section si65b
-.section si66a
-.section si66b
-.section si67a
-.section si67b
-.section si68a
-.section si68b
-.section si69a
-.section si69b
-.section si60a
-.section si60b
-.section si7aa
-.section si7ab
-.section si7ba
-.section si7bb
-.section si7ca
-.section si7cb
-.section si7da
-.section si7db
-.section si7ea
-.section si7eb
-.section si7fa
-.section si7fb
-.section si7ga
-.section si7gb
-.section si7ha
-.section si7hb
-.section si7ia
-.section si7ib
-.section si7ja
-.section si7jb
-.section si7ka
-.section si7kb
-.section si7la
-.section si7lb
-.section si7ma
-.section si7mb
-.section si7na
-.section si7nb
-.section si7oa
-.section si7ob
-.section si7pa
-.section si7pb
-.section si7qa
-.section si7qb
-.section si7ra
-.section si7rb
-.section si7sa
-.section si7sb
-.section si7ta
-.section si7tb
-.section si7ua
-.section si7ub
-.section si7va
-.section si7vb
-.section si7wa
-.section si7wb
-.section si7xa
-.section si7xb
-.section si7ya
-.section si7yb
-.section si7za
-.section si7zb
-.section si71a
-.section si71b
-.section si72a
-.section si72b
-.section si73a
-.section si73b
-.section si74a
-.section si74b
-.section si75a
-.section si75b
-.section si76a
-.section si76b
-.section si77a
-.section si77b
-.section si78a
-.section si78b
-.section si79a
-.section si79b
-.section si70a
-.section si70b
-.section si8aa
-.section si8ab
-.section si8ba
-.section si8bb
-.section si8ca
-.section si8cb
-.section si8da
-.section si8db
-.section si8ea
-.section si8eb
-.section si8fa
-.section si8fb
-.section si8ga
-.section si8gb
-.section si8ha
-.section si8hb
-.section si8ia
-.section si8ib
-.section si8ja
-.section si8jb
-.section si8ka
-.section si8kb
-.section si8la
-.section si8lb
-.section si8ma
-.section si8mb
-.section si8na
-.section si8nb
-.section si8oa
-.section si8ob
-.section si8pa
-.section si8pb
-.section si8qa
-.section si8qb
-.section si8ra
-.section si8rb
-.section si8sa
-.section si8sb
-.section si8ta
-.section si8tb
-.section si8ua
-.section si8ub
-.section si8va
-.section si8vb
-.section si8wa
-.section si8wb
-.section si8xa
-.section si8xb
-.section si8ya
-.section si8yb
-.section si8za
-.section si8zb
-.section si81a
-.section si81b
-.section si82a
-.section si82b
-.section si83a
-.section si83b
-.section si84a
-.section si84b
-.section si85a
-.section si85b
-.section si86a
-.section si86b
-.section si87a
-.section si87b
-.section si88a
-.section si88b
-.section si89a
-.section si89b
-.section si80a
-.section si80b
-.section si9aa
-.section si9ab
-.section si9ba
-.section si9bb
-.section si9ca
-.section si9cb
-.section si9da
-.section si9db
-.section si9ea
-.section si9eb
-.section si9fa
-.section si9fb
-.section si9ga
-.section si9gb
-.section si9ha
-.section si9hb
-.section si9ia
-.section si9ib
-.section si9ja
-.section si9jb
-.section si9ka
-.section si9kb
-.section si9la
-.section si9lb
-.section si9ma
-.section si9mb
-.section si9na
-.section si9nb
-.section si9oa
-.section si9ob
-.section si9pa
-.section si9pb
-.section si9qa
-.section si9qb
-.section si9ra
-.section si9rb
-.section si9sa
-.section si9sb
-.section si9ta
-.section si9tb
-.section si9ua
-.section si9ub
-.section si9va
-.section si9vb
-.section si9wa
-.section si9wb
-.section si9xa
-.section si9xb
-.section si9ya
-.section si9yb
-.section si9za
-.section si9zb
-.section si91a
-.section si91b
-.section si92a
-.section si92b
-.section si93a
-.section si93b
-.section si94a
-.section si94b
-.section si95a
-.section si95b
-.section si96a
-.section si96b
-.section si97a
-.section si97b
-.section si98a
-.section si98b
-.section si99a
-.section si99b
-.section si90a
-.section si90b
-.section si0aa
-.section si0ab
-.section si0ba
-.section si0bb
-.section si0ca
-.section si0cb
-.section si0da
-.section si0db
-.section si0ea
-.section si0eb
-.section si0fa
-.section si0fb
-.section si0ga
-.section si0gb
-.section si0ha
-.section si0hb
-.section si0ia
-.section si0ib
-.section si0ja
-.section si0jb
-.section si0ka
-.section si0kb
-.section si0la
-.section si0lb
-.section si0ma
-.section si0mb
-.section si0na
-.section si0nb
-.section si0oa
-.section si0ob
-.section si0pa
-.section si0pb
-.section si0qa
-.section si0qb
-.section si0ra
-.section si0rb
-.section si0sa
-.section si0sb
-.section si0ta
-.section si0tb
-.section si0ua
-.section si0ub
-.section si0va
-.section si0vb
-.section si0wa
-.section si0wb
-.section si0xa
-.section si0xb
-.section si0ya
-.section si0yb
-.section si0za
-.section si0zb
-.section si01a
-.section si01b
-.section si02a
-.section si02b
-.section si03a
-.section si03b
-.section si04a
-.section si04b
-.section si05a
-.section si05b
-.section si06a
-.section si06b
-.section si07a
-.section si07b
-.section si08a
-.section si08b
-.section si09a
-.section si09b
-.section si00a
-.section si00b
-.section sjaaa
-.section sjaab
-.section sjaba
-.section sjabb
-.section sjaca
-.section sjacb
-.section sjada
-.section sjadb
-.section sjaea
-.section sjaeb
-.section sjafa
-.section sjafb
-.section sjaga
-.section sjagb
-.section sjaha
-.section sjahb
-.section sjaia
-.section sjaib
-.section sjaja
-.section sjajb
-.section sjaka
-.section sjakb
-.section sjala
-.section sjalb
-.section sjama
-.section sjamb
-.section sjana
-.section sjanb
-.section sjaoa
-.section sjaob
-.section sjapa
-.section sjapb
-.section sjaqa
-.section sjaqb
-.section sjara
-.section sjarb
-.section sjasa
-.section sjasb
-.section sjata
-.section sjatb
-.section sjaua
-.section sjaub
-.section sjava
-.section sjavb
-.section sjawa
-.section sjawb
-.section sjaxa
-.section sjaxb
-.section sjaya
-.section sjayb
-.section sjaza
-.section sjazb
-.section sja1a
-.section sja1b
-.section sja2a
-.section sja2b
-.section sja3a
-.section sja3b
-.section sja4a
-.section sja4b
-.section sja5a
-.section sja5b
-.section sja6a
-.section sja6b
-.section sja7a
-.section sja7b
-.section sja8a
-.section sja8b
-.section sja9a
-.section sja9b
-.section sja0a
-.section sja0b
-.section sjbaa
-.section sjbab
-.section sjbba
-.section sjbbb
-.section sjbca
-.section sjbcb
-.section sjbda
-.section sjbdb
-.section sjbea
-.section sjbeb
-.section sjbfa
-.section sjbfb
-.section sjbga
-.section sjbgb
-.section sjbha
-.section sjbhb
-.section sjbia
-.section sjbib
-.section sjbja
-.section sjbjb
-.section sjbka
-.section sjbkb
-.section sjbla
-.section sjblb
-.section sjbma
-.section sjbmb
-.section sjbna
-.section sjbnb
-.section sjboa
-.section sjbob
-.section sjbpa
-.section sjbpb
-.section sjbqa
-.section sjbqb
-.section sjbra
-.section sjbrb
-.section sjbsa
-.section sjbsb
-.section sjbta
-.section sjbtb
-.section sjbua
-.section sjbub
-.section sjbva
-.section sjbvb
-.section sjbwa
-.section sjbwb
-.section sjbxa
-.section sjbxb
-.section sjbya
-.section sjbyb
-.section sjbza
-.section sjbzb
-.section sjb1a
-.section sjb1b
-.section sjb2a
-.section sjb2b
-.section sjb3a
-.section sjb3b
-.section sjb4a
-.section sjb4b
-.section sjb5a
-.section sjb5b
-.section sjb6a
-.section sjb6b
-.section sjb7a
-.section sjb7b
-.section sjb8a
-.section sjb8b
-.section sjb9a
-.section sjb9b
-.section sjb0a
-.section sjb0b
-.section sjcaa
-.section sjcab
-.section sjcba
-.section sjcbb
-.section sjcca
-.section sjccb
-.section sjcda
-.section sjcdb
-.section sjcea
-.section sjceb
-.section sjcfa
-.section sjcfb
-.section sjcga
-.section sjcgb
-.section sjcha
-.section sjchb
-.section sjcia
-.section sjcib
-.section sjcja
-.section sjcjb
-.section sjcka
-.section sjckb
-.section sjcla
-.section sjclb
-.section sjcma
-.section sjcmb
-.section sjcna
-.section sjcnb
-.section sjcoa
-.section sjcob
-.section sjcpa
-.section sjcpb
-.section sjcqa
-.section sjcqb
-.section sjcra
-.section sjcrb
-.section sjcsa
-.section sjcsb
-.section sjcta
-.section sjctb
-.section sjcua
-.section sjcub
-.section sjcva
-.section sjcvb
-.section sjcwa
-.section sjcwb
-.section sjcxa
-.section sjcxb
-.section sjcya
-.section sjcyb
-.section sjcza
-.section sjczb
-.section sjc1a
-.section sjc1b
-.section sjc2a
-.section sjc2b
-.section sjc3a
-.section sjc3b
-.section sjc4a
-.section sjc4b
-.section sjc5a
-.section sjc5b
-.section sjc6a
-.section sjc6b
-.section sjc7a
-.section sjc7b
-.section sjc8a
-.section sjc8b
-.section sjc9a
-.section sjc9b
-.section sjc0a
-.section sjc0b
-.section sjdaa
-.section sjdab
-.section sjdba
-.section sjdbb
-.section sjdca
-.section sjdcb
-.section sjdda
-.section sjddb
-.section sjdea
-.section sjdeb
-.section sjdfa
-.section sjdfb
-.section sjdga
-.section sjdgb
-.section sjdha
-.section sjdhb
-.section sjdia
-.section sjdib
-.section sjdja
-.section sjdjb
-.section sjdka
-.section sjdkb
-.section sjdla
-.section sjdlb
-.section sjdma
-.section sjdmb
-.section sjdna
-.section sjdnb
-.section sjdoa
-.section sjdob
-.section sjdpa
-.section sjdpb
-.section sjdqa
-.section sjdqb
-.section sjdra
-.section sjdrb
-.section sjdsa
-.section sjdsb
-.section sjdta
-.section sjdtb
-.section sjdua
-.section sjdub
-.section sjdva
-.section sjdvb
-.section sjdwa
-.section sjdwb
-.section sjdxa
-.section sjdxb
-.section sjdya
-.section sjdyb
-.section sjdza
-.section sjdzb
-.section sjd1a
-.section sjd1b
-.section sjd2a
-.section sjd2b
-.section sjd3a
-.section sjd3b
-.section sjd4a
-.section sjd4b
-.section sjd5a
-.section sjd5b
-.section sjd6a
-.section sjd6b
-.section sjd7a
-.section sjd7b
-.section sjd8a
-.section sjd8b
-.section sjd9a
-.section sjd9b
-.section sjd0a
-.section sjd0b
-.section sjeaa
-.section sjeab
-.section sjeba
-.section sjebb
-.section sjeca
-.section sjecb
-.section sjeda
-.section sjedb
-.section sjeea
-.section sjeeb
-.section sjefa
-.section sjefb
-.section sjega
-.section sjegb
-.section sjeha
-.section sjehb
-.section sjeia
-.section sjeib
-.section sjeja
-.section sjejb
-.section sjeka
-.section sjekb
-.section sjela
-.section sjelb
-.section sjema
-.section sjemb
-.section sjena
-.section sjenb
-.section sjeoa
-.section sjeob
-.section sjepa
-.section sjepb
-.section sjeqa
-.section sjeqb
-.section sjera
-.section sjerb
-.section sjesa
-.section sjesb
-.section sjeta
-.section sjetb
-.section sjeua
-.section sjeub
-.section sjeva
-.section sjevb
-.section sjewa
-.section sjewb
-.section sjexa
-.section sjexb
-.section sjeya
-.section sjeyb
-.section sjeza
-.section sjezb
-.section sje1a
-.section sje1b
-.section sje2a
-.section sje2b
-.section sje3a
-.section sje3b
-.section sje4a
-.section sje4b
-.section sje5a
-.section sje5b
-.section sje6a
-.section sje6b
-.section sje7a
-.section sje7b
-.section sje8a
-.section sje8b
-.section sje9a
-.section sje9b
-.section sje0a
-.section sje0b
-.section sjfaa
-.section sjfab
-.section sjfba
-.section sjfbb
-.section sjfca
-.section sjfcb
-.section sjfda
-.section sjfdb
-.section sjfea
-.section sjfeb
-.section sjffa
-.section sjffb
-.section sjfga
-.section sjfgb
-.section sjfha
-.section sjfhb
-.section sjfia
-.section sjfib
-.section sjfja
-.section sjfjb
-.section sjfka
-.section sjfkb
-.section sjfla
-.section sjflb
-.section sjfma
-.section sjfmb
-.section sjfna
-.section sjfnb
-.section sjfoa
-.section sjfob
-.section sjfpa
-.section sjfpb
-.section sjfqa
-.section sjfqb
-.section sjfra
-.section sjfrb
-.section sjfsa
-.section sjfsb
-.section sjfta
-.section sjftb
-.section sjfua
-.section sjfub
-.section sjfva
-.section sjfvb
-.section sjfwa
-.section sjfwb
-.section sjfxa
-.section sjfxb
-.section sjfya
-.section sjfyb
-.section sjfza
-.section sjfzb
-.section sjf1a
-.section sjf1b
-.section sjf2a
-.section sjf2b
-.section sjf3a
-.section sjf3b
-.section sjf4a
-.section sjf4b
-.section sjf5a
-.section sjf5b
-.section sjf6a
-.section sjf6b
-.section sjf7a
-.section sjf7b
-.section sjf8a
-.section sjf8b
-.section sjf9a
-.section sjf9b
-.section sjf0a
-.section sjf0b
-.section sjgaa
-.section sjgab
-.section sjgba
-.section sjgbb
-.section sjgca
-.section sjgcb
-.section sjgda
-.section sjgdb
-.section sjgea
-.section sjgeb
-.section sjgfa
-.section sjgfb
-.section sjgga
-.section sjggb
-.section sjgha
-.section sjghb
-.section sjgia
-.section sjgib
-.section sjgja
-.section sjgjb
-.section sjgka
-.section sjgkb
-.section sjgla
-.section sjglb
-.section sjgma
-.section sjgmb
-.section sjgna
-.section sjgnb
-.section sjgoa
-.section sjgob
-.section sjgpa
-.section sjgpb
-.section sjgqa
-.section sjgqb
-.section sjgra
-.section sjgrb
-.section sjgsa
-.section sjgsb
-.section sjgta
-.section sjgtb
-.section sjgua
-.section sjgub
-.section sjgva
-.section sjgvb
-.section sjgwa
-.section sjgwb
-.section sjgxa
-.section sjgxb
-.section sjgya
-.section sjgyb
-.section sjgza
-.section sjgzb
-.section sjg1a
-.section sjg1b
-.section sjg2a
-.section sjg2b
-.section sjg3a
-.section sjg3b
-.section sjg4a
-.section sjg4b
-.section sjg5a
-.section sjg5b
-.section sjg6a
-.section sjg6b
-.section sjg7a
-.section sjg7b
-.section sjg8a
-.section sjg8b
-.section sjg9a
-.section sjg9b
-.section sjg0a
-.section sjg0b
-.section sjhaa
-.section sjhab
-.section sjhba
-.section sjhbb
-.section sjhca
-.section sjhcb
-.section sjhda
-.section sjhdb
-.section sjhea
-.section sjheb
-.section sjhfa
-.section sjhfb
-.section sjhga
-.section sjhgb
-.section sjhha
-.section sjhhb
-.section sjhia
-.section sjhib
-.section sjhja
-.section sjhjb
-.section sjhka
-.section sjhkb
-.section sjhla
-.section sjhlb
-.section sjhma
-.section sjhmb
-.section sjhna
-.section sjhnb
-.section sjhoa
-.section sjhob
-.section sjhpa
-.section sjhpb
-.section sjhqa
-.section sjhqb
-.section sjhra
-.section sjhrb
-.section sjhsa
-.section sjhsb
-.section sjhta
-.section sjhtb
-.section sjhua
-.section sjhub
-.section sjhva
-.section sjhvb
-.section sjhwa
-.section sjhwb
-.section sjhxa
-.section sjhxb
-.section sjhya
-.section sjhyb
-.section sjhza
-.section sjhzb
-.section sjh1a
-.section sjh1b
-.section sjh2a
-.section sjh2b
-.section sjh3a
-.section sjh3b
-.section sjh4a
-.section sjh4b
-.section sjh5a
-.section sjh5b
-.section sjh6a
-.section sjh6b
-.section sjh7a
-.section sjh7b
-.section sjh8a
-.section sjh8b
-.section sjh9a
-.section sjh9b
-.section sjh0a
-.section sjh0b
-.section sjiaa
-.section sjiab
-.section sjiba
-.section sjibb
-.section sjica
-.section sjicb
-.section sjida
-.section sjidb
-.section sjiea
-.section sjieb
-.section sjifa
-.section sjifb
-.section sjiga
-.section sjigb
-.section sjiha
-.section sjihb
-.section sjiia
-.section sjiib
-.section sjija
-.section sjijb
-.section sjika
-.section sjikb
-.section sjila
-.section sjilb
-.section sjima
-.section sjimb
-.section sjina
-.section sjinb
-.section sjioa
-.section sjiob
-.section sjipa
-.section sjipb
-.section sjiqa
-.section sjiqb
-.section sjira
-.section sjirb
-.section sjisa
-.section sjisb
-.section sjita
-.section sjitb
-.section sjiua
-.section sjiub
-.section sjiva
-.section sjivb
-.section sjiwa
-.section sjiwb
-.section sjixa
-.section sjixb
-.section sjiya
-.section sjiyb
-.section sjiza
-.section sjizb
-.section sji1a
-.section sji1b
-.section sji2a
-.section sji2b
-.section sji3a
-.section sji3b
-.section sji4a
-.section sji4b
-.section sji5a
-.section sji5b
-.section sji6a
-.section sji6b
-.section sji7a
-.section sji7b
-.section sji8a
-.section sji8b
-.section sji9a
-.section sji9b
-.section sji0a
-.section sji0b
-.section sjjaa
-.section sjjab
-.section sjjba
-.section sjjbb
-.section sjjca
-.section sjjcb
-.section sjjda
-.section sjjdb
-.section sjjea
-.section sjjeb
-.section sjjfa
-.section sjjfb
-.section sjjga
-.section sjjgb
-.section sjjha
-.section sjjhb
-.section sjjia
-.section sjjib
-.section sjjja
-.section sjjjb
-.section sjjka
-.section sjjkb
-.section sjjla
-.section sjjlb
-.section sjjma
-.section sjjmb
-.section sjjna
-.section sjjnb
-.section sjjoa
-.section sjjob
-.section sjjpa
-.section sjjpb
-.section sjjqa
-.section sjjqb
-.section sjjra
-.section sjjrb
-.section sjjsa
-.section sjjsb
-.section sjjta
-.section sjjtb
-.section sjjua
-.section sjjub
-.section sjjva
-.section sjjvb
-.section sjjwa
-.section sjjwb
-.section sjjxa
-.section sjjxb
-.section sjjya
-.section sjjyb
-.section sjjza
-.section sjjzb
-.section sjj1a
-.section sjj1b
-.section sjj2a
-.section sjj2b
-.section sjj3a
-.section sjj3b
-.section sjj4a
-.section sjj4b
-.section sjj5a
-.section sjj5b
-.section sjj6a
-.section sjj6b
-.section sjj7a
-.section sjj7b
-.section sjj8a
-.section sjj8b
-.section sjj9a
-.section sjj9b
-.section sjj0a
-.section sjj0b
-.section sjkaa
-.section sjkab
-.section sjkba
-.section sjkbb
-.section sjkca
-.section sjkcb
-.section sjkda
-.section sjkdb
-.section sjkea
-.section sjkeb
-.section sjkfa
-.section sjkfb
-.section sjkga
-.section sjkgb
-.section sjkha
-.section sjkhb
-.section sjkia
-.section sjkib
-.section sjkja
-.section sjkjb
-.section sjkka
-.section sjkkb
-.section sjkla
-.section sjklb
-.section sjkma
-.section sjkmb
-.section sjkna
-.section sjknb
-.section sjkoa
-.section sjkob
-.section sjkpa
-.section sjkpb
-.section sjkqa
-.section sjkqb
-.section sjkra
-.section sjkrb
-.section sjksa
-.section sjksb
-.section sjkta
-.section sjktb
-.section sjkua
-.section sjkub
-.section sjkva
-.section sjkvb
-.section sjkwa
-.section sjkwb
-.section sjkxa
-.section sjkxb
-.section sjkya
-.section sjkyb
-.section sjkza
-.section sjkzb
-.section sjk1a
-.section sjk1b
-.section sjk2a
-.section sjk2b
-.section sjk3a
-.section sjk3b
-.section sjk4a
-.section sjk4b
-.section sjk5a
-.section sjk5b
-.section sjk6a
-.section sjk6b
-.section sjk7a
-.section sjk7b
-.section sjk8a
-.section sjk8b
-.section sjk9a
-.section sjk9b
-.section sjk0a
-.section sjk0b
-.section sjlaa
-.section sjlab
-.section sjlba
-.section sjlbb
-.section sjlca
-.section sjlcb
-.section sjlda
-.section sjldb
-.section sjlea
-.section sjleb
-.section sjlfa
-.section sjlfb
-.section sjlga
-.section sjlgb
-.section sjlha
-.section sjlhb
-.section sjlia
-.section sjlib
-.section sjlja
-.section sjljb
-.section sjlka
-.section sjlkb
-.section sjlla
-.section sjllb
-.section sjlma
-.section sjlmb
-.section sjlna
-.section sjlnb
-.section sjloa
-.section sjlob
-.section sjlpa
-.section sjlpb
-.section sjlqa
-.section sjlqb
-.section sjlra
-.section sjlrb
-.section sjlsa
-.section sjlsb
-.section sjlta
-.section sjltb
-.section sjlua
-.section sjlub
-.section sjlva
-.section sjlvb
-.section sjlwa
-.section sjlwb
-.section sjlxa
-.section sjlxb
-.section sjlya
-.section sjlyb
-.section sjlza
-.section sjlzb
-.section sjl1a
-.section sjl1b
-.section sjl2a
-.section sjl2b
-.section sjl3a
-.section sjl3b
-.section sjl4a
-.section sjl4b
-.section sjl5a
-.section sjl5b
-.section sjl6a
-.section sjl6b
-.section sjl7a
-.section sjl7b
-.section sjl8a
-.section sjl8b
-.section sjl9a
-.section sjl9b
-.section sjl0a
-.section sjl0b
-.section sjmaa
-.section sjmab
-.section sjmba
-.section sjmbb
-.section sjmca
-.section sjmcb
-.section sjmda
-.section sjmdb
-.section sjmea
-.section sjmeb
-.section sjmfa
-.section sjmfb
-.section sjmga
-.section sjmgb
-.section sjmha
-.section sjmhb
-.section sjmia
-.section sjmib
-.section sjmja
-.section sjmjb
-.section sjmka
-.section sjmkb
-.section sjmla
-.section sjmlb
-.section sjmma
-.section sjmmb
-.section sjmna
-.section sjmnb
-.section sjmoa
-.section sjmob
-.section sjmpa
-.section sjmpb
-.section sjmqa
-.section sjmqb
-.section sjmra
-.section sjmrb
-.section sjmsa
-.section sjmsb
-.section sjmta
-.section sjmtb
-.section sjmua
-.section sjmub
-.section sjmva
-.section sjmvb
-.section sjmwa
-.section sjmwb
-.section sjmxa
-.section sjmxb
-.section sjmya
-.section sjmyb
-.section sjmza
-.section sjmzb
-.section sjm1a
-.section sjm1b
-.section sjm2a
-.section sjm2b
-.section sjm3a
-.section sjm3b
-.section sjm4a
-.section sjm4b
-.section sjm5a
-.section sjm5b
-.section sjm6a
-.section sjm6b
-.section sjm7a
-.section sjm7b
-.section sjm8a
-.section sjm8b
-.section sjm9a
-.section sjm9b
-.section sjm0a
-.section sjm0b
-.section sjnaa
-.section sjnab
-.section sjnba
-.section sjnbb
-.section sjnca
-.section sjncb
-.section sjnda
-.section sjndb
-.section sjnea
-.section sjneb
-.section sjnfa
-.section sjnfb
-.section sjnga
-.section sjngb
-.section sjnha
-.section sjnhb
-.section sjnia
-.section sjnib
-.section sjnja
-.section sjnjb
-.section sjnka
-.section sjnkb
-.section sjnla
-.section sjnlb
-.section sjnma
-.section sjnmb
-.section sjnna
-.section sjnnb
-.section sjnoa
-.section sjnob
-.section sjnpa
-.section sjnpb
-.section sjnqa
-.section sjnqb
-.section sjnra
-.section sjnrb
-.section sjnsa
-.section sjnsb
-.section sjnta
-.section sjntb
-.section sjnua
-.section sjnub
-.section sjnva
-.section sjnvb
-.section sjnwa
-.section sjnwb
-.section sjnxa
-.section sjnxb
-.section sjnya
-.section sjnyb
-.section sjnza
-.section sjnzb
-.section sjn1a
-.section sjn1b
-.section sjn2a
-.section sjn2b
-.section sjn3a
-.section sjn3b
-.section sjn4a
-.section sjn4b
-.section sjn5a
-.section sjn5b
-.section sjn6a
-.section sjn6b
-.section sjn7a
-.section sjn7b
-.section sjn8a
-.section sjn8b
-.section sjn9a
-.section sjn9b
-.section sjn0a
-.section sjn0b
-.section sjoaa
-.section sjoab
-.section sjoba
-.section sjobb
-.section sjoca
-.section sjocb
-.section sjoda
-.section sjodb
-.section sjoea
-.section sjoeb
-.section sjofa
-.section sjofb
-.section sjoga
-.section sjogb
-.section sjoha
-.section sjohb
-.section sjoia
-.section sjoib
-.section sjoja
-.section sjojb
-.section sjoka
-.section sjokb
-.section sjola
-.section sjolb
-.section sjoma
-.section sjomb
-.section sjona
-.section sjonb
-.section sjooa
-.section sjoob
-.section sjopa
-.section sjopb
-.section sjoqa
-.section sjoqb
-.section sjora
-.section sjorb
-.section sjosa
-.section sjosb
-.section sjota
-.section sjotb
-.section sjoua
-.section sjoub
-.section sjova
-.section sjovb
-.section sjowa
-.section sjowb
-.section sjoxa
-.section sjoxb
-.section sjoya
-.section sjoyb
-.section sjoza
-.section sjozb
-.section sjo1a
-.section sjo1b
-.section sjo2a
-.section sjo2b
-.section sjo3a
-.section sjo3b
-.section sjo4a
-.section sjo4b
-.section sjo5a
-.section sjo5b
-.section sjo6a
-.section sjo6b
-.section sjo7a
-.section sjo7b
-.section sjo8a
-.section sjo8b
-.section sjo9a
-.section sjo9b
-.section sjo0a
-.section sjo0b
-.section sjpaa
-.section sjpab
-.section sjpba
-.section sjpbb
-.section sjpca
-.section sjpcb
-.section sjpda
-.section sjpdb
-.section sjpea
-.section sjpeb
-.section sjpfa
-.section sjpfb
-.section sjpga
-.section sjpgb
-.section sjpha
-.section sjphb
-.section sjpia
-.section sjpib
-.section sjpja
-.section sjpjb
-.section sjpka
-.section sjpkb
-.section sjpla
-.section sjplb
-.section sjpma
-.section sjpmb
-.section sjpna
-.section sjpnb
-.section sjpoa
-.section sjpob
-.section sjppa
-.section sjppb
-.section sjpqa
-.section sjpqb
-.section sjpra
-.section sjprb
-.section sjpsa
-.section sjpsb
-.section sjpta
-.section sjptb
-.section sjpua
-.section sjpub
-.section sjpva
-.section sjpvb
-.section sjpwa
-.section sjpwb
-.section sjpxa
-.section sjpxb
-.section sjpya
-.section sjpyb
-.section sjpza
-.section sjpzb
-.section sjp1a
-.section sjp1b
-.section sjp2a
-.section sjp2b
-.section sjp3a
-.section sjp3b
-.section sjp4a
-.section sjp4b
-.section sjp5a
-.section sjp5b
-.section sjp6a
-.section sjp6b
-.section sjp7a
-.section sjp7b
-.section sjp8a
-.section sjp8b
-.section sjp9a
-.section sjp9b
-.section sjp0a
-.section sjp0b
-.section sjqaa
-.section sjqab
-.section sjqba
-.section sjqbb
-.section sjqca
-.section sjqcb
-.section sjqda
-.section sjqdb
-.section sjqea
-.section sjqeb
-.section sjqfa
-.section sjqfb
-.section sjqga
-.section sjqgb
-.section sjqha
-.section sjqhb
-.section sjqia
-.section sjqib
-.section sjqja
-.section sjqjb
-.section sjqka
-.section sjqkb
-.section sjqla
-.section sjqlb
-.section sjqma
-.section sjqmb
-.section sjqna
-.section sjqnb
-.section sjqoa
-.section sjqob
-.section sjqpa
-.section sjqpb
-.section sjqqa
-.section sjqqb
-.section sjqra
-.section sjqrb
-.section sjqsa
-.section sjqsb
-.section sjqta
-.section sjqtb
-.section sjqua
-.section sjqub
-.section sjqva
-.section sjqvb
-.section sjqwa
-.section sjqwb
-.section sjqxa
-.section sjqxb
-.section sjqya
-.section sjqyb
-.section sjqza
-.section sjqzb
-.section sjq1a
-.section sjq1b
-.section sjq2a
-.section sjq2b
-.section sjq3a
-.section sjq3b
-.section sjq4a
-.section sjq4b
-.section sjq5a
-.section sjq5b
-.section sjq6a
-.section sjq6b
-.section sjq7a
-.section sjq7b
-.section sjq8a
-.section sjq8b
-.section sjq9a
-.section sjq9b
-.section sjq0a
-.section sjq0b
-.section sjraa
-.section sjrab
-.section sjrba
-.section sjrbb
-.section sjrca
-.section sjrcb
-.section sjrda
-.section sjrdb
-.section sjrea
-.section sjreb
-.section sjrfa
-.section sjrfb
-.section sjrga
-.section sjrgb
-.section sjrha
-.section sjrhb
-.section sjria
-.section sjrib
-.section sjrja
-.section sjrjb
-.section sjrka
-.section sjrkb
-.section sjrla
-.section sjrlb
-.section sjrma
-.section sjrmb
-.section sjrna
-.section sjrnb
-.section sjroa
-.section sjrob
-.section sjrpa
-.section sjrpb
-.section sjrqa
-.section sjrqb
-.section sjrra
-.section sjrrb
-.section sjrsa
-.section sjrsb
-.section sjrta
-.section sjrtb
-.section sjrua
-.section sjrub
-.section sjrva
-.section sjrvb
-.section sjrwa
-.section sjrwb
-.section sjrxa
-.section sjrxb
-.section sjrya
-.section sjryb
-.section sjrza
-.section sjrzb
-.section sjr1a
-.section sjr1b
-.section sjr2a
-.section sjr2b
-.section sjr3a
-.section sjr3b
-.section sjr4a
-.section sjr4b
-.section sjr5a
-.section sjr5b
-.section sjr6a
-.section sjr6b
-.section sjr7a
-.section sjr7b
-.section sjr8a
-.section sjr8b
-.section sjr9a
-.section sjr9b
-.section sjr0a
-.section sjr0b
-.section sjsaa
-.section sjsab
-.section sjsba
-.section sjsbb
-.section sjsca
-.section sjscb
-.section sjsda
-.section sjsdb
-.section sjsea
-.section sjseb
-.section sjsfa
-.section sjsfb
-.section sjsga
-.section sjsgb
-.section sjsha
-.section sjshb
-.section sjsia
-.section sjsib
-.section sjsja
-.section sjsjb
-.section sjska
-.section sjskb
-.section sjsla
-.section sjslb
-.section sjsma
-.section sjsmb
-.section sjsna
-.section sjsnb
-.section sjsoa
-.section sjsob
-.section sjspa
-.section sjspb
-.section sjsqa
-.section sjsqb
-.section sjsra
-.section sjsrb
-.section sjssa
-.section sjssb
-.section sjsta
-.section sjstb
-.section sjsua
-.section sjsub
-.section sjsva
-.section sjsvb
-.section sjswa
-.section sjswb
-.section sjsxa
-.section sjsxb
-.section sjsya
-.section sjsyb
-.section sjsza
-.section sjszb
-.section sjs1a
-.section sjs1b
-.section sjs2a
-.section sjs2b
-.section sjs3a
-.section sjs3b
-.section sjs4a
-.section sjs4b
-.section sjs5a
-.section sjs5b
-.section sjs6a
-.section sjs6b
-.section sjs7a
-.section sjs7b
-.section sjs8a
-.section sjs8b
-.section sjs9a
-.section sjs9b
-.section sjs0a
-.section sjs0b
-.section sjtaa
-.section sjtab
-.section sjtba
-.section sjtbb
-.section sjtca
-.section sjtcb
-.section sjtda
-.section sjtdb
-.section sjtea
-.section sjteb
-.section sjtfa
-.section sjtfb
-.section sjtga
-.section sjtgb
-.section sjtha
-.section sjthb
-.section sjtia
-.section sjtib
-.section sjtja
-.section sjtjb
-.section sjtka
-.section sjtkb
-.section sjtla
-.section sjtlb
-.section sjtma
-.section sjtmb
-.section sjtna
-.section sjtnb
-.section sjtoa
-.section sjtob
-.section sjtpa
-.section sjtpb
-.section sjtqa
-.section sjtqb
-.section sjtra
-.section sjtrb
-.section sjtsa
-.section sjtsb
-.section sjtta
-.section sjttb
-.section sjtua
-.section sjtub
-.section sjtva
-.section sjtvb
-.section sjtwa
-.section sjtwb
-.section sjtxa
-.section sjtxb
-.section sjtya
-.section sjtyb
-.section sjtza
-.section sjtzb
-.section sjt1a
-.section sjt1b
-.section sjt2a
-.section sjt2b
-.section sjt3a
-.section sjt3b
-.section sjt4a
-.section sjt4b
-.section sjt5a
-.section sjt5b
-.section sjt6a
-.section sjt6b
-.section sjt7a
-.section sjt7b
-.section sjt8a
-.section sjt8b
-.section sjt9a
-.section sjt9b
-.section sjt0a
-.section sjt0b
-.section sjuaa
-.section sjuab
-.section sjuba
-.section sjubb
-.section sjuca
-.section sjucb
-.section sjuda
-.section sjudb
-.section sjuea
-.section sjueb
-.section sjufa
-.section sjufb
-.section sjuga
-.section sjugb
-.section sjuha
-.section sjuhb
-.section sjuia
-.section sjuib
-.section sjuja
-.section sjujb
-.section sjuka
-.section sjukb
-.section sjula
-.section sjulb
-.section sjuma
-.section sjumb
-.section sjuna
-.section sjunb
-.section sjuoa
-.section sjuob
-.section sjupa
-.section sjupb
-.section sjuqa
-.section sjuqb
-.section sjura
-.section sjurb
-.section sjusa
-.section sjusb
-.section sjuta
-.section sjutb
-.section sjuua
-.section sjuub
-.section sjuva
-.section sjuvb
-.section sjuwa
-.section sjuwb
-.section sjuxa
-.section sjuxb
-.section sjuya
-.section sjuyb
-.section sjuza
-.section sjuzb
-.section sju1a
-.section sju1b
-.section sju2a
-.section sju2b
-.section sju3a
-.section sju3b
-.section sju4a
-.section sju4b
-.section sju5a
-.section sju5b
-.section sju6a
-.section sju6b
-.section sju7a
-.section sju7b
-.section sju8a
-.section sju8b
-.section sju9a
-.section sju9b
-.section sju0a
-.section sju0b
-.section sjvaa
-.section sjvab
-.section sjvba
-.section sjvbb
-.section sjvca
-.section sjvcb
-.section sjvda
-.section sjvdb
-.section sjvea
-.section sjveb
-.section sjvfa
-.section sjvfb
-.section sjvga
-.section sjvgb
-.section sjvha
-.section sjvhb
-.section sjvia
-.section sjvib
-.section sjvja
-.section sjvjb
-.section sjvka
-.section sjvkb
-.section sjvla
-.section sjvlb
-.section sjvma
-.section sjvmb
-.section sjvna
-.section sjvnb
-.section sjvoa
-.section sjvob
-.section sjvpa
-.section sjvpb
-.section sjvqa
-.section sjvqb
-.section sjvra
-.section sjvrb
-.section sjvsa
-.section sjvsb
-.section sjvta
-.section sjvtb
-.section sjvua
-.section sjvub
-.section sjvva
-.section sjvvb
-.section sjvwa
-.section sjvwb
-.section sjvxa
-.section sjvxb
-.section sjvya
-.section sjvyb
-.section sjvza
-.section sjvzb
-.section sjv1a
-.section sjv1b
-.section sjv2a
-.section sjv2b
-.section sjv3a
-.section sjv3b
-.section sjv4a
-.section sjv4b
-.section sjv5a
-.section sjv5b
-.section sjv6a
-.section sjv6b
-.section sjv7a
-.section sjv7b
-.section sjv8a
-.section sjv8b
-.section sjv9a
-.section sjv9b
-.section sjv0a
-.section sjv0b
-.section sjwaa
-.section sjwab
-.section sjwba
-.section sjwbb
-.section sjwca
-.section sjwcb
-.section sjwda
-.section sjwdb
-.section sjwea
-.section sjweb
-.section sjwfa
-.section sjwfb
-.section sjwga
-.section sjwgb
-.section sjwha
-.section sjwhb
-.section sjwia
-.section sjwib
-.section sjwja
-.section sjwjb
-.section sjwka
-.section sjwkb
-.section sjwla
-.section sjwlb
-.section sjwma
-.section sjwmb
-.section sjwna
-.section sjwnb
-.section sjwoa
-.section sjwob
-.section sjwpa
-.section sjwpb
-.section sjwqa
-.section sjwqb
-.section sjwra
-.section sjwrb
-.section sjwsa
-.section sjwsb
-.section sjwta
-.section sjwtb
-.section sjwua
-.section sjwub
-.section sjwva
-.section sjwvb
-.section sjwwa
-.section sjwwb
-.section sjwxa
-.section sjwxb
-.section sjwya
-.section sjwyb
-.section sjwza
-.section sjwzb
-.section sjw1a
-.section sjw1b
-.section sjw2a
-.section sjw2b
-.section sjw3a
-.section sjw3b
-.section sjw4a
-.section sjw4b
-.section sjw5a
-.section sjw5b
-.section sjw6a
-.section sjw6b
-.section sjw7a
-.section sjw7b
-.section sjw8a
-.section sjw8b
-.section sjw9a
-.section sjw9b
-.section sjw0a
-.section sjw0b
-.section sjxaa
-.section sjxab
-.section sjxba
-.section sjxbb
-.section sjxca
-.section sjxcb
-.section sjxda
-.section sjxdb
-.section sjxea
-.section sjxeb
-.section sjxfa
-.section sjxfb
-.section sjxga
-.section sjxgb
-.section sjxha
-.section sjxhb
-.section sjxia
-.section sjxib
-.section sjxja
-.section sjxjb
-.section sjxka
-.section sjxkb
-.section sjxla
-.section sjxlb
-.section sjxma
-.section sjxmb
-.section sjxna
-.section sjxnb
-.section sjxoa
-.section sjxob
-.section sjxpa
-.section sjxpb
-.section sjxqa
-.section sjxqb
-.section sjxra
-.section sjxrb
-.section sjxsa
-.section sjxsb
-.section sjxta
-.section sjxtb
-.section sjxua
-.section sjxub
-.section sjxva
-.section sjxvb
-.section sjxwa
-.section sjxwb
-.section sjxxa
-.section sjxxb
-.section sjxya
-.section sjxyb
-.section sjxza
-.section sjxzb
-.section sjx1a
-.section sjx1b
-.section sjx2a
-.section sjx2b
-.section sjx3a
-.section sjx3b
-.section sjx4a
-.section sjx4b
-.section sjx5a
-.section sjx5b
-.section sjx6a
-.section sjx6b
-.section sjx7a
-.section sjx7b
-.section sjx8a
-.section sjx8b
-.section sjx9a
-.section sjx9b
-.section sjx0a
-.section sjx0b
-.section sjyaa
-.section sjyab
-.section sjyba
-.section sjybb
-.section sjyca
-.section sjycb
-.section sjyda
-.section sjydb
-.section sjyea
-.section sjyeb
-.section sjyfa
-.section sjyfb
-.section sjyga
-.section sjygb
-.section sjyha
-.section sjyhb
-.section sjyia
-.section sjyib
-.section sjyja
-.section sjyjb
-.section sjyka
-.section sjykb
-.section sjyla
-.section sjylb
-.section sjyma
-.section sjymb
-.section sjyna
-.section sjynb
-.section sjyoa
-.section sjyob
-.section sjypa
-.section sjypb
-.section sjyqa
-.section sjyqb
-.section sjyra
-.section sjyrb
-.section sjysa
-.section sjysb
-.section sjyta
-.section sjytb
-.section sjyua
-.section sjyub
-.section sjyva
-.section sjyvb
-.section sjywa
-.section sjywb
-.section sjyxa
-.section sjyxb
-.section sjyya
-.section sjyyb
-.section sjyza
-.section sjyzb
-.section sjy1a
-.section sjy1b
-.section sjy2a
-.section sjy2b
-.section sjy3a
-.section sjy3b
-.section sjy4a
-.section sjy4b
-.section sjy5a
-.section sjy5b
-.section sjy6a
-.section sjy6b
-.section sjy7a
-.section sjy7b
-.section sjy8a
-.section sjy8b
-.section sjy9a
-.section sjy9b
-.section sjy0a
-.section sjy0b
-.section sjzaa
-.section sjzab
-.section sjzba
-.section sjzbb
-.section sjzca
-.section sjzcb
-.section sjzda
-.section sjzdb
-.section sjzea
-.section sjzeb
-.section sjzfa
-.section sjzfb
-.section sjzga
-.section sjzgb
-.section sjzha
-.section sjzhb
-.section sjzia
-.section sjzib
-.section sjzja
-.section sjzjb
-.section sjzka
-.section sjzkb
-.section sjzla
-.section sjzlb
-.section sjzma
-.section sjzmb
-.section sjzna
-.section sjznb
-.section sjzoa
-.section sjzob
-.section sjzpa
-.section sjzpb
-.section sjzqa
-.section sjzqb
-.section sjzra
-.section sjzrb
-.section sjzsa
-.section sjzsb
-.section sjzta
-.section sjztb
-.section sjzua
-.section sjzub
-.section sjzva
-.section sjzvb
-.section sjzwa
-.section sjzwb
-.section sjzxa
-.section sjzxb
-.section sjzya
-.section sjzyb
-.section sjzza
-.section sjzzb
-.section sjz1a
-.section sjz1b
-.section sjz2a
-.section sjz2b
-.section sjz3a
-.section sjz3b
-.section sjz4a
-.section sjz4b
-.section sjz5a
-.section sjz5b
-.section sjz6a
-.section sjz6b
-.section sjz7a
-.section sjz7b
-.section sjz8a
-.section sjz8b
-.section sjz9a
-.section sjz9b
-.section sjz0a
-.section sjz0b
-.section sj1aa
-.section sj1ab
-.section sj1ba
-.section sj1bb
-.section sj1ca
-.section sj1cb
-.section sj1da
-.section sj1db
-.section sj1ea
-.section sj1eb
-.section sj1fa
-.section sj1fb
-.section sj1ga
-.section sj1gb
-.section sj1ha
-.section sj1hb
-.section sj1ia
-.section sj1ib
-.section sj1ja
-.section sj1jb
-.section sj1ka
-.section sj1kb
-.section sj1la
-.section sj1lb
-.section sj1ma
-.section sj1mb
-.section sj1na
-.section sj1nb
-.section sj1oa
-.section sj1ob
-.section sj1pa
-.section sj1pb
-.section sj1qa
-.section sj1qb
-.section sj1ra
-.section sj1rb
-.section sj1sa
-.section sj1sb
-.section sj1ta
-.section sj1tb
-.section sj1ua
-.section sj1ub
-.section sj1va
-.section sj1vb
-.section sj1wa
-.section sj1wb
-.section sj1xa
-.section sj1xb
-.section sj1ya
-.section sj1yb
-.section sj1za
-.section sj1zb
-.section sj11a
-.section sj11b
-.section sj12a
-.section sj12b
-.section sj13a
-.section sj13b
-.section sj14a
-.section sj14b
-.section sj15a
-.section sj15b
-.section sj16a
-.section sj16b
-.section sj17a
-.section sj17b
-.section sj18a
-.section sj18b
-.section sj19a
-.section sj19b
-.section sj10a
-.section sj10b
-.section sj2aa
-.section sj2ab
-.section sj2ba
-.section sj2bb
-.section sj2ca
-.section sj2cb
-.section sj2da
-.section sj2db
-.section sj2ea
-.section sj2eb
-.section sj2fa
-.section sj2fb
-.section sj2ga
-.section sj2gb
-.section sj2ha
-.section sj2hb
-.section sj2ia
-.section sj2ib
-.section sj2ja
-.section sj2jb
-.section sj2ka
-.section sj2kb
-.section sj2la
-.section sj2lb
-.section sj2ma
-.section sj2mb
-.section sj2na
-.section sj2nb
-.section sj2oa
-.section sj2ob
-.section sj2pa
-.section sj2pb
-.section sj2qa
-.section sj2qb
-.section sj2ra
-.section sj2rb
-.section sj2sa
-.section sj2sb
-.section sj2ta
-.section sj2tb
-.section sj2ua
-.section sj2ub
-.section sj2va
-.section sj2vb
-.section sj2wa
-.section sj2wb
-.section sj2xa
-.section sj2xb
-.section sj2ya
-.section sj2yb
-.section sj2za
-.section sj2zb
-.section sj21a
-.section sj21b
-.section sj22a
-.section sj22b
-.section sj23a
-.section sj23b
-.section sj24a
-.section sj24b
-.section sj25a
-.section sj25b
-.section sj26a
-.section sj26b
-.section sj27a
-.section sj27b
-.section sj28a
-.section sj28b
-.section sj29a
-.section sj29b
-.section sj20a
-.section sj20b
-.section sj3aa
-.section sj3ab
-.section sj3ba
-.section sj3bb
-.section sj3ca
-.section sj3cb
-.section sj3da
-.section sj3db
-.section sj3ea
-.section sj3eb
-.section sj3fa
-.section sj3fb
-.section sj3ga
-.section sj3gb
-.section sj3ha
-.section sj3hb
-.section sj3ia
-.section sj3ib
-.section sj3ja
-.section sj3jb
-.section sj3ka
-.section sj3kb
-.section sj3la
-.section sj3lb
-.section sj3ma
-.section sj3mb
-.section sj3na
-.section sj3nb
-.section sj3oa
-.section sj3ob
-.section sj3pa
-.section sj3pb
-.section sj3qa
-.section sj3qb
-.section sj3ra
-.section sj3rb
-.section sj3sa
-.section sj3sb
-.section sj3ta
-.section sj3tb
-.section sj3ua
-.section sj3ub
-.section sj3va
-.section sj3vb
-.section sj3wa
-.section sj3wb
-.section sj3xa
-.section sj3xb
-.section sj3ya
-.section sj3yb
-.section sj3za
-.section sj3zb
-.section sj31a
-.section sj31b
-.section sj32a
-.section sj32b
-.section sj33a
-.section sj33b
-.section sj34a
-.section sj34b
-.section sj35a
-.section sj35b
-.section sj36a
-.section sj36b
-.section sj37a
-.section sj37b
-.section sj38a
-.section sj38b
-.section sj39a
-.section sj39b
-.section sj30a
-.section sj30b
-.section sj4aa
-.section sj4ab
-.section sj4ba
-.section sj4bb
-.section sj4ca
-.section sj4cb
-.section sj4da
-.section sj4db
-.section sj4ea
-.section sj4eb
-.section sj4fa
-.section sj4fb
-.section sj4ga
-.section sj4gb
-.section sj4ha
-.section sj4hb
-.section sj4ia
-.section sj4ib
-.section sj4ja
-.section sj4jb
-.section sj4ka
-.section sj4kb
-.section sj4la
-.section sj4lb
-.section sj4ma
-.section sj4mb
-.section sj4na
-.section sj4nb
-.section sj4oa
-.section sj4ob
-.section sj4pa
-.section sj4pb
-.section sj4qa
-.section sj4qb
-.section sj4ra
-.section sj4rb
-.section sj4sa
-.section sj4sb
-.section sj4ta
-.section sj4tb
-.section sj4ua
-.section sj4ub
-.section sj4va
-.section sj4vb
-.section sj4wa
-.section sj4wb
-.section sj4xa
-.section sj4xb
-.section sj4ya
-.section sj4yb
-.section sj4za
-.section sj4zb
-.section sj41a
-.section sj41b
-.section sj42a
-.section sj42b
-.section sj43a
-.section sj43b
-.section sj44a
-.section sj44b
-.section sj45a
-.section sj45b
-.section sj46a
-.section sj46b
-.section sj47a
-.section sj47b
-.section sj48a
-.section sj48b
-.section sj49a
-.section sj49b
-.section sj40a
-.section sj40b
-.section sj5aa
-.section sj5ab
-.section sj5ba
-.section sj5bb
-.section sj5ca
-.section sj5cb
-.section sj5da
-.section sj5db
-.section sj5ea
-.section sj5eb
-.section sj5fa
-.section sj5fb
-.section sj5ga
-.section sj5gb
-.section sj5ha
-.section sj5hb
-.section sj5ia
-.section sj5ib
-.section sj5ja
-.section sj5jb
-.section sj5ka
-.section sj5kb
-.section sj5la
-.section sj5lb
-.section sj5ma
-.section sj5mb
-.section sj5na
-.section sj5nb
-.section sj5oa
-.section sj5ob
-.section sj5pa
-.section sj5pb
-.section sj5qa
-.section sj5qb
-.section sj5ra
-.section sj5rb
-.section sj5sa
-.section sj5sb
-.section sj5ta
-.section sj5tb
-.section sj5ua
-.section sj5ub
-.section sj5va
-.section sj5vb
-.section sj5wa
-.section sj5wb
-.section sj5xa
-.section sj5xb
-.section sj5ya
-.section sj5yb
-.section sj5za
-.section sj5zb
-.section sj51a
-.section sj51b
-.section sj52a
-.section sj52b
-.section sj53a
-.section sj53b
-.section sj54a
-.section sj54b
-.section sj55a
-.section sj55b
-.section sj56a
-.section sj56b
-.section sj57a
-.section sj57b
-.section sj58a
-.section sj58b
-.section sj59a
-.section sj59b
-.section sj50a
-.section sj50b
-.section sj6aa
-.section sj6ab
-.section sj6ba
-.section sj6bb
-.section sj6ca
-.section sj6cb
-.section sj6da
-.section sj6db
-.section sj6ea
-.section sj6eb
-.section sj6fa
-.section sj6fb
-.section sj6ga
-.section sj6gb
-.section sj6ha
-.section sj6hb
-.section sj6ia
-.section sj6ib
-.section sj6ja
-.section sj6jb
-.section sj6ka
-.section sj6kb
-.section sj6la
-.section sj6lb
-.section sj6ma
-.section sj6mb
-.section sj6na
-.section sj6nb
-.section sj6oa
-.section sj6ob
-.section sj6pa
-.section sj6pb
-.section sj6qa
-.section sj6qb
-.section sj6ra
-.section sj6rb
-.section sj6sa
-.section sj6sb
-.section sj6ta
-.section sj6tb
-.section sj6ua
-.section sj6ub
-.section sj6va
-.section sj6vb
-.section sj6wa
-.section sj6wb
-.section sj6xa
-.section sj6xb
-.section sj6ya
-.section sj6yb
-.section sj6za
-.section sj6zb
-.section sj61a
-.section sj61b
-.section sj62a
-.section sj62b
-.section sj63a
-.section sj63b
-.section sj64a
-.section sj64b
-.section sj65a
-.section sj65b
-.section sj66a
-.section sj66b
-.section sj67a
-.section sj67b
-.section sj68a
-.section sj68b
-.section sj69a
-.section sj69b
-.section sj60a
-.section sj60b
-.section sj7aa
-.section sj7ab
-.section sj7ba
-.section sj7bb
-.section sj7ca
-.section sj7cb
-.section sj7da
-.section sj7db
-.section sj7ea
-.section sj7eb
-.section sj7fa
-.section sj7fb
-.section sj7ga
-.section sj7gb
-.section sj7ha
-.section sj7hb
-.section sj7ia
-.section sj7ib
-.section sj7ja
-.section sj7jb
-.section sj7ka
-.section sj7kb
-.section sj7la
-.section sj7lb
-.section sj7ma
-.section sj7mb
-.section sj7na
-.section sj7nb
-.section sj7oa
-.section sj7ob
-.section sj7pa
-.section sj7pb
-.section sj7qa
-.section sj7qb
-.section sj7ra
-.section sj7rb
-.section sj7sa
-.section sj7sb
-.section sj7ta
-.section sj7tb
-.section sj7ua
-.section sj7ub
-.section sj7va
-.section sj7vb
-.section sj7wa
-.section sj7wb
-.section sj7xa
-.section sj7xb
-.section sj7ya
-.section sj7yb
-.section sj7za
-.section sj7zb
-.section sj71a
-.section sj71b
-.section sj72a
-.section sj72b
-.section sj73a
-.section sj73b
-.section sj74a
-.section sj74b
-.section sj75a
-.section sj75b
-.section sj76a
-.section sj76b
-.section sj77a
-.section sj77b
-.section sj78a
-.section sj78b
-.section sj79a
-.section sj79b
-.section sj70a
-.section sj70b
-.section sj8aa
-.section sj8ab
-.section sj8ba
-.section sj8bb
-.section sj8ca
-.section sj8cb
-.section sj8da
-.section sj8db
-.section sj8ea
-.section sj8eb
-.section sj8fa
-.section sj8fb
-.section sj8ga
-.section sj8gb
-.section sj8ha
-.section sj8hb
-.section sj8ia
-.section sj8ib
-.section sj8ja
-.section sj8jb
-.section sj8ka
-.section sj8kb
-.section sj8la
-.section sj8lb
-.section sj8ma
-.section sj8mb
-.section sj8na
-.section sj8nb
-.section sj8oa
-.section sj8ob
-.section sj8pa
-.section sj8pb
-.section sj8qa
-.section sj8qb
-.section sj8ra
-.section sj8rb
-.section sj8sa
-.section sj8sb
-.section sj8ta
-.section sj8tb
-.section sj8ua
-.section sj8ub
-.section sj8va
-.section sj8vb
-.section sj8wa
-.section sj8wb
-.section sj8xa
-.section sj8xb
-.section sj8ya
-.section sj8yb
-.section sj8za
-.section sj8zb
-.section sj81a
-.section sj81b
-.section sj82a
-.section sj82b
-.section sj83a
-.section sj83b
-.section sj84a
-.section sj84b
-.section sj85a
-.section sj85b
-.section sj86a
-.section sj86b
-.section sj87a
-.section sj87b
-.section sj88a
-.section sj88b
-.section sj89a
-.section sj89b
-.section sj80a
-.section sj80b
-.section sj9aa
-.section sj9ab
-.section sj9ba
-.section sj9bb
-.section sj9ca
-.section sj9cb
-.section sj9da
-.section sj9db
-.section sj9ea
-.section sj9eb
-.section sj9fa
-.section sj9fb
-.section sj9ga
-.section sj9gb
-.section sj9ha
-.section sj9hb
-.section sj9ia
-.section sj9ib
-.section sj9ja
-.section sj9jb
-.section sj9ka
-.section sj9kb
-.section sj9la
-.section sj9lb
-.section sj9ma
-.section sj9mb
-.section sj9na
-.section sj9nb
-.section sj9oa
-.section sj9ob
-.section sj9pa
-.section sj9pb
-.section sj9qa
-.section sj9qb
-.section sj9ra
-.section sj9rb
-.section sj9sa
-.section sj9sb
-.section sj9ta
-.section sj9tb
-.section sj9ua
-.section sj9ub
-.section sj9va
-.section sj9vb
-.section sj9wa
-.section sj9wb
-.section sj9xa
-.section sj9xb
-.section sj9ya
-.section sj9yb
-.section sj9za
-.section sj9zb
-.section sj91a
-.section sj91b
-.section sj92a
-.section sj92b
-.section sj93a
-.section sj93b
-.section sj94a
-.section sj94b
-.section sj95a
-.section sj95b
-.section sj96a
-.section sj96b
-.section sj97a
-.section sj97b
-.section sj98a
-.section sj98b
-.section sj99a
-.section sj99b
-.section sj90a
-.section sj90b
-.section sj0aa
-.section sj0ab
-.section sj0ba
-.section sj0bb
-.section sj0ca
-.section sj0cb
-.section sj0da
-.section sj0db
-.section sj0ea
-.section sj0eb
-.section sj0fa
-.section sj0fb
-.section sj0ga
-.section sj0gb
-.section sj0ha
-.section sj0hb
-.section sj0ia
-.section sj0ib
-.section sj0ja
-.section sj0jb
-.section sj0ka
-.section sj0kb
-.section sj0la
-.section sj0lb
-.section sj0ma
-.section sj0mb
-.section sj0na
-.section sj0nb
-.section sj0oa
-.section sj0ob
-.section sj0pa
-.section sj0pb
-.section sj0qa
-.section sj0qb
-.section sj0ra
-.section sj0rb
-.section sj0sa
-.section sj0sb
-.section sj0ta
-.section sj0tb
-.section sj0ua
-.section sj0ub
-.section sj0va
-.section sj0vb
-.section sj0wa
-.section sj0wb
-.section sj0xa
-.section sj0xb
-.section sj0ya
-.section sj0yb
-.section sj0za
-.section sj0zb
-.section sj01a
-.section sj01b
-.section sj02a
-.section sj02b
-.section sj03a
-.section sj03b
-.section sj04a
-.section sj04b
-.section sj05a
-.section sj05b
-.section sj06a
-.section sj06b
-.section sj07a
-.section sj07b
-.section sj08a
-.section sj08b
-.section sj09a
-.section sj09b
-.section sj00a
-.section sj00b
-.section skaaa
-.section skaab
-.section skaba
-.section skabb
-.section skaca
-.section skacb
-.section skada
-.section skadb
-.section skaea
-.section skaeb
-.section skafa
-.section skafb
-.section skaga
-.section skagb
-.section skaha
-.section skahb
-.section skaia
-.section skaib
-.section skaja
-.section skajb
-.section skaka
-.section skakb
-.section skala
-.section skalb
-.section skama
-.section skamb
-.section skana
-.section skanb
-.section skaoa
-.section skaob
-.section skapa
-.section skapb
-.section skaqa
-.section skaqb
-.section skara
-.section skarb
-.section skasa
-.section skasb
-.section skata
-.section skatb
-.section skaua
-.section skaub
-.section skava
-.section skavb
-.section skawa
-.section skawb
-.section skaxa
-.section skaxb
-.section skaya
-.section skayb
-.section skaza
-.section skazb
-.section ska1a
-.section ska1b
-.section ska2a
-.section ska2b
-.section ska3a
-.section ska3b
-.section ska4a
-.section ska4b
-.section ska5a
-.section ska5b
-.section ska6a
-.section ska6b
-.section ska7a
-.section ska7b
-.section ska8a
-.section ska8b
-.section ska9a
-.section ska9b
-.section ska0a
-.section ska0b
-.section skbaa
-.section skbab
-.section skbba
-.section skbbb
-.section skbca
-.section skbcb
-.section skbda
-.section skbdb
-.section skbea
-.section skbeb
-.section skbfa
-.section skbfb
-.section skbga
-.section skbgb
-.section skbha
-.section skbhb
-.section skbia
-.section skbib
-.section skbja
-.section skbjb
-.section skbka
-.section skbkb
-.section skbla
-.section skblb
-.section skbma
-.section skbmb
-.section skbna
-.section skbnb
-.section skboa
-.section skbob
-.section skbpa
-.section skbpb
-.section skbqa
-.section skbqb
-.section skbra
-.section skbrb
-.section skbsa
-.section skbsb
-.section skbta
-.section skbtb
-.section skbua
-.section skbub
-.section skbva
-.section skbvb
-.section skbwa
-.section skbwb
-.section skbxa
-.section skbxb
-.section skbya
-.section skbyb
-.section skbza
-.section skbzb
-.section skb1a
-.section skb1b
-.section skb2a
-.section skb2b
-.section skb3a
-.section skb3b
-.section skb4a
-.section skb4b
-.section skb5a
-.section skb5b
-.section skb6a
-.section skb6b
-.section skb7a
-.section skb7b
-.section skb8a
-.section skb8b
-.section skb9a
-.section skb9b
-.section skb0a
-.section skb0b
-.section skcaa
-.section skcab
-.section skcba
-.section skcbb
-.section skcca
-.section skccb
-.section skcda
-.section skcdb
-.section skcea
-.section skceb
-.section skcfa
-.section skcfb
-.section skcga
-.section skcgb
-.section skcha
-.section skchb
-.section skcia
-.section skcib
-.section skcja
-.section skcjb
-.section skcka
-.section skckb
-.section skcla
-.section skclb
-.section skcma
-.section skcmb
-.section skcna
-.section skcnb
-.section skcoa
-.section skcob
-.section skcpa
-.section skcpb
-.section skcqa
-.section skcqb
-.section skcra
-.section skcrb
-.section skcsa
-.section skcsb
-.section skcta
-.section skctb
-.section skcua
-.section skcub
-.section skcva
-.section skcvb
-.section skcwa
-.section skcwb
-.section skcxa
-.section skcxb
-.section skcya
-.section skcyb
-.section skcza
-.section skczb
-.section skc1a
-.section skc1b
-.section skc2a
-.section skc2b
-.section skc3a
-.section skc3b
-.section skc4a
-.section skc4b
-.section skc5a
-.section skc5b
-.section skc6a
-.section skc6b
-.section skc7a
-.section skc7b
-.section skc8a
-.section skc8b
-.section skc9a
-.section skc9b
-.section skc0a
-.section skc0b
-.section skdaa
-.section skdab
-.section skdba
-.section skdbb
-.section skdca
-.section skdcb
-.section skdda
-.section skddb
-.section skdea
-.section skdeb
-.section skdfa
-.section skdfb
-.section skdga
-.section skdgb
-.section skdha
-.section skdhb
-.section skdia
-.section skdib
-.section skdja
-.section skdjb
-.section skdka
-.section skdkb
-.section skdla
-.section skdlb
-.section skdma
-.section skdmb
-.section skdna
-.section skdnb
-.section skdoa
-.section skdob
-.section skdpa
-.section skdpb
-.section skdqa
-.section skdqb
-.section skdra
-.section skdrb
-.section skdsa
-.section skdsb
-.section skdta
-.section skdtb
-.section skdua
-.section skdub
-.section skdva
-.section skdvb
-.section skdwa
-.section skdwb
-.section skdxa
-.section skdxb
-.section skdya
-.section skdyb
-.section skdza
-.section skdzb
-.section skd1a
-.section skd1b
-.section skd2a
-.section skd2b
-.section skd3a
-.section skd3b
-.section skd4a
-.section skd4b
-.section skd5a
-.section skd5b
-.section skd6a
-.section skd6b
-.section skd7a
-.section skd7b
-.section skd8a
-.section skd8b
-.section skd9a
-.section skd9b
-.section skd0a
-.section skd0b
-.section skeaa
-.section skeab
-.section skeba
-.section skebb
-.section skeca
-.section skecb
-.section skeda
-.section skedb
-.section skeea
-.section skeeb
-.section skefa
-.section skefb
-.section skega
-.section skegb
-.section skeha
-.section skehb
-.section skeia
-.section skeib
-.section skeja
-.section skejb
-.section skeka
-.section skekb
-.section skela
-.section skelb
-.section skema
-.section skemb
-.section skena
-.section skenb
-.section skeoa
-.section skeob
-.section skepa
-.section skepb
-.section skeqa
-.section skeqb
-.section skera
-.section skerb
-.section skesa
-.section skesb
-.section sketa
-.section sketb
-.section skeua
-.section skeub
-.section skeva
-.section skevb
-.section skewa
-.section skewb
-.section skexa
-.section skexb
-.section skeya
-.section skeyb
-.section skeza
-.section skezb
-.section ske1a
-.section ske1b
-.section ske2a
-.section ske2b
-.section ske3a
-.section ske3b
-.section ske4a
-.section ske4b
-.section ske5a
-.section ske5b
-.section ske6a
-.section ske6b
-.section ske7a
-.section ske7b
-.section ske8a
-.section ske8b
-.section ske9a
-.section ske9b
-.section ske0a
-.section ske0b
-.section skfaa
-.section skfab
-.section skfba
-.section skfbb
-.section skfca
-.section skfcb
-.section skfda
-.section skfdb
-.section skfea
-.section skfeb
-.section skffa
-.section skffb
-.section skfga
-.section skfgb
-.section skfha
-.section skfhb
-.section skfia
-.section skfib
-.section skfja
-.section skfjb
-.section skfka
-.section skfkb
-.section skfla
-.section skflb
-.section skfma
-.section skfmb
-.section skfna
-.section skfnb
-.section skfoa
-.section skfob
-.section skfpa
-.section skfpb
-.section skfqa
-.section skfqb
-.section skfra
-.section skfrb
-.section skfsa
-.section skfsb
-.section skfta
-.section skftb
-.section skfua
-.section skfub
-.section skfva
-.section skfvb
-.section skfwa
-.section skfwb
-.section skfxa
-.section skfxb
-.section skfya
-.section skfyb
-.section skfza
-.section skfzb
-.section skf1a
-.section skf1b
-.section skf2a
-.section skf2b
-.section skf3a
-.section skf3b
-.section skf4a
-.section skf4b
-.section skf5a
-.section skf5b
-.section skf6a
-.section skf6b
-.section skf7a
-.section skf7b
-.section skf8a
-.section skf8b
-.section skf9a
-.section skf9b
-.section skf0a
-.section skf0b
-.section skgaa
-.section skgab
-.section skgba
-.section skgbb
-.section skgca
-.section skgcb
-.section skgda
-.section skgdb
-.section skgea
-.section skgeb
-.section skgfa
-.section skgfb
-.section skgga
-.section skggb
-.section skgha
-.section skghb
-.section skgia
-.section skgib
-.section skgja
-.section skgjb
-.section skgka
-.section skgkb
-.section skgla
-.section skglb
-.section skgma
-.section skgmb
-.section skgna
-.section skgnb
-.section skgoa
-.section skgob
-.section skgpa
-.section skgpb
-.section skgqa
-.section skgqb
-.section skgra
-.section skgrb
-.section skgsa
-.section skgsb
-.section skgta
-.section skgtb
-.section skgua
-.section skgub
-.section skgva
-.section skgvb
-.section skgwa
-.section skgwb
-.section skgxa
-.section skgxb
-.section skgya
-.section skgyb
-.section skgza
-.section skgzb
-.section skg1a
-.section skg1b
-.section skg2a
-.section skg2b
-.section skg3a
-.section skg3b
-.section skg4a
-.section skg4b
-.section skg5a
-.section skg5b
-.section skg6a
-.section skg6b
-.section skg7a
-.section skg7b
-.section skg8a
-.section skg8b
-.section skg9a
-.section skg9b
-.section skg0a
-.section skg0b
-.section skhaa
-.section skhab
-.section skhba
-.section skhbb
-.section skhca
-.section skhcb
-.section skhda
-.section skhdb
-.section skhea
-.section skheb
-.section skhfa
-.section skhfb
-.section skhga
-.section skhgb
-.section skhha
-.section skhhb
-.section skhia
-.section skhib
-.section skhja
-.section skhjb
-.section skhka
-.section skhkb
-.section skhla
-.section skhlb
-.section skhma
-.section skhmb
-.section skhna
-.section skhnb
-.section skhoa
-.section skhob
-.section skhpa
-.section skhpb
-.section skhqa
-.section skhqb
-.section skhra
-.section skhrb
-.section skhsa
-.section skhsb
-.section skhta
-.section skhtb
-.section skhua
-.section skhub
-.section skhva
-.section skhvb
-.section skhwa
-.section skhwb
-.section skhxa
-.section skhxb
-.section skhya
-.section skhyb
-.section skhza
-.section skhzb
-.section skh1a
-.section skh1b
-.section skh2a
-.section skh2b
-.section skh3a
-.section skh3b
-.section skh4a
-.section skh4b
-.section skh5a
-.section skh5b
-.section skh6a
-.section skh6b
-.section skh7a
-.section skh7b
-.section skh8a
-.section skh8b
-.section skh9a
-.section skh9b
-.section skh0a
-.section skh0b
-.section skiaa
-.section skiab
-.section skiba
-.section skibb
-.section skica
-.section skicb
-.section skida
-.section skidb
-.section skiea
-.section skieb
-.section skifa
-.section skifb
-.section skiga
-.section skigb
-.section skiha
-.section skihb
-.section skiia
-.section skiib
-.section skija
-.section skijb
-.section skika
-.section skikb
-.section skila
-.section skilb
-.section skima
-.section skimb
-.section skina
-.section skinb
-.section skioa
-.section skiob
-.section skipa
-.section skipb
-.section skiqa
-.section skiqb
-.section skira
-.section skirb
-.section skisa
-.section skisb
-.section skita
-.section skitb
-.section skiua
-.section skiub
-.section skiva
-.section skivb
-.section skiwa
-.section skiwb
-.section skixa
-.section skixb
-.section skiya
-.section skiyb
-.section skiza
-.section skizb
-.section ski1a
-.section ski1b
-.section ski2a
-.section ski2b
-.section ski3a
-.section ski3b
-.section ski4a
-.section ski4b
-.section ski5a
-.section ski5b
-.section ski6a
-.section ski6b
-.section ski7a
-.section ski7b
-.section ski8a
-.section ski8b
-.section ski9a
-.section ski9b
-.section ski0a
-.section ski0b
-.section skjaa
-.section skjab
-.section skjba
-.section skjbb
-.section skjca
-.section skjcb
-.section skjda
-.section skjdb
-.section skjea
-.section skjeb
-.section skjfa
-.section skjfb
-.section skjga
-.section skjgb
-.section skjha
-.section skjhb
-.section skjia
-.section skjib
-.section skjja
-.section skjjb
-.section skjka
-.section skjkb
-.section skjla
-.section skjlb
-.section skjma
-.section skjmb
-.section skjna
-.section skjnb
-.section skjoa
-.section skjob
-.section skjpa
-.section skjpb
-.section skjqa
-.section skjqb
-.section skjra
-.section skjrb
-.section skjsa
-.section skjsb
-.section skjta
-.section skjtb
-.section skjua
-.section skjub
-.section skjva
-.section skjvb
-.section skjwa
-.section skjwb
-.section skjxa
-.section skjxb
-.section skjya
-.section skjyb
-.section skjza
-.section skjzb
-.section skj1a
-.section skj1b
-.section skj2a
-.section skj2b
-.section skj3a
-.section skj3b
-.section skj4a
-.section skj4b
-.section skj5a
-.section skj5b
-.section skj6a
-.section skj6b
-.section skj7a
-.section skj7b
-.section skj8a
-.section skj8b
-.section skj9a
-.section skj9b
-.section skj0a
-.section skj0b
-.section skkaa
-.section skkab
-.section skkba
-.section skkbb
-.section skkca
-.section skkcb
-.section skkda
-.section skkdb
-.section skkea
-.section skkeb
-.section skkfa
-.section skkfb
-.section skkga
-.section skkgb
-.section skkha
-.section skkhb
-.section skkia
-.section skkib
-.section skkja
-.section skkjb
-.section skkka
-.section skkkb
-.section skkla
-.section skklb
-.section skkma
-.section skkmb
-.section skkna
-.section skknb
-.section skkoa
-.section skkob
-.section skkpa
-.section skkpb
-.section skkqa
-.section skkqb
-.section skkra
-.section skkrb
-.section skksa
-.section skksb
-.section skkta
-.section skktb
-.section skkua
-.section skkub
-.section skkva
-.section skkvb
-.section skkwa
-.section skkwb
-.section skkxa
-.section skkxb
-.section skkya
-.section skkyb
-.section skkza
-.section skkzb
-.section skk1a
-.section skk1b
-.section skk2a
-.section skk2b
-.section skk3a
-.section skk3b
-.section skk4a
-.section skk4b
-.section skk5a
-.section skk5b
-.section skk6a
-.section skk6b
-.section skk7a
-.section skk7b
-.section skk8a
-.section skk8b
-.section skk9a
-.section skk9b
-.section skk0a
-.section skk0b
-.section sklaa
-.section sklab
-.section sklba
-.section sklbb
-.section sklca
-.section sklcb
-.section sklda
-.section skldb
-.section sklea
-.section skleb
-.section sklfa
-.section sklfb
-.section sklga
-.section sklgb
-.section sklha
-.section sklhb
-.section sklia
-.section sklib
-.section sklja
-.section skljb
-.section sklka
-.section sklkb
-.section sklla
-.section skllb
-.section sklma
-.section sklmb
-.section sklna
-.section sklnb
-.section skloa
-.section sklob
-.section sklpa
-.section sklpb
-.section sklqa
-.section sklqb
-.section sklra
-.section sklrb
-.section sklsa
-.section sklsb
-.section sklta
-.section skltb
-.section sklua
-.section sklub
-.section sklva
-.section sklvb
-.section sklwa
-.section sklwb
-.section sklxa
-.section sklxb
-.section sklya
-.section sklyb
-.section sklza
-.section sklzb
-.section skl1a
-.section skl1b
-.section skl2a
-.section skl2b
-.section skl3a
-.section skl3b
-.section skl4a
-.section skl4b
-.section skl5a
-.section skl5b
-.section skl6a
-.section skl6b
-.section skl7a
-.section skl7b
-.section skl8a
-.section skl8b
-.section skl9a
-.section skl9b
-.section skl0a
-.section skl0b
-.section skmaa
-.section skmab
-.section skmba
-.section skmbb
-.section skmca
-.section skmcb
-.section skmda
-.section skmdb
-.section skmea
-.section skmeb
-.section skmfa
-.section skmfb
-.section skmga
-.section skmgb
-.section skmha
-.section skmhb
-.section skmia
-.section skmib
-.section skmja
-.section skmjb
-.section skmka
-.section skmkb
-.section skmla
-.section skmlb
-.section skmma
-.section skmmb
-.section skmna
-.section skmnb
-.section skmoa
-.section skmob
-.section skmpa
-.section skmpb
-.section skmqa
-.section skmqb
-.section skmra
-.section skmrb
-.section skmsa
-.section skmsb
-.section skmta
-.section skmtb
-.section skmua
-.section skmub
-.section skmva
-.section skmvb
-.section skmwa
-.section skmwb
-.section skmxa
-.section skmxb
-.section skmya
-.section skmyb
-.section skmza
-.section skmzb
-.section skm1a
-.section skm1b
-.section skm2a
-.section skm2b
-.section skm3a
-.section skm3b
-.section skm4a
-.section skm4b
-.section skm5a
-.section skm5b
-.section skm6a
-.section skm6b
-.section skm7a
-.section skm7b
-.section skm8a
-.section skm8b
-.section skm9a
-.section skm9b
-.section skm0a
-.section skm0b
-.section sknaa
-.section sknab
-.section sknba
-.section sknbb
-.section sknca
-.section skncb
-.section sknda
-.section skndb
-.section sknea
-.section skneb
-.section sknfa
-.section sknfb
-.section sknga
-.section skngb
-.section sknha
-.section sknhb
-.section sknia
-.section sknib
-.section sknja
-.section sknjb
-.section sknka
-.section sknkb
-.section sknla
-.section sknlb
-.section sknma
-.section sknmb
-.section sknna
-.section sknnb
-.section sknoa
-.section sknob
-.section sknpa
-.section sknpb
-.section sknqa
-.section sknqb
-.section sknra
-.section sknrb
-.section sknsa
-.section sknsb
-.section sknta
-.section skntb
-.section sknua
-.section sknub
-.section sknva
-.section sknvb
-.section sknwa
-.section sknwb
-.section sknxa
-.section sknxb
-.section sknya
-.section sknyb
-.section sknza
-.section sknzb
-.section skn1a
-.section skn1b
-.section skn2a
-.section skn2b
-.section skn3a
-.section skn3b
-.section skn4a
-.section skn4b
-.section skn5a
-.section skn5b
-.section skn6a
-.section skn6b
-.section skn7a
-.section skn7b
-.section skn8a
-.section skn8b
-.section skn9a
-.section skn9b
-.section skn0a
-.section skn0b
-.section skoaa
-.section skoab
-.section skoba
-.section skobb
-.section skoca
-.section skocb
-.section skoda
-.section skodb
-.section skoea
-.section skoeb
-.section skofa
-.section skofb
-.section skoga
-.section skogb
-.section skoha
-.section skohb
-.section skoia
-.section skoib
-.section skoja
-.section skojb
-.section skoka
-.section skokb
-.section skola
-.section skolb
-.section skoma
-.section skomb
-.section skona
-.section skonb
-.section skooa
-.section skoob
-.section skopa
-.section skopb
-.section skoqa
-.section skoqb
-.section skora
-.section skorb
-.section skosa
-.section skosb
-.section skota
-.section skotb
-.section skoua
-.section skoub
-.section skova
-.section skovb
-.section skowa
-.section skowb
-.section skoxa
-.section skoxb
-.section skoya
-.section skoyb
-.section skoza
-.section skozb
-.section sko1a
-.section sko1b
-.section sko2a
-.section sko2b
-.section sko3a
-.section sko3b
-.section sko4a
-.section sko4b
-.section sko5a
-.section sko5b
-.section sko6a
-.section sko6b
-.section sko7a
-.section sko7b
-.section sko8a
-.section sko8b
-.section sko9a
-.section sko9b
-.section sko0a
-.section sko0b
-.section skpaa
-.section skpab
-.section skpba
-.section skpbb
-.section skpca
-.section skpcb
-.section skpda
-.section skpdb
-.section skpea
-.section skpeb
-.section skpfa
-.section skpfb
-.section skpga
-.section skpgb
-.section skpha
-.section skphb
-.section skpia
-.section skpib
-.section skpja
-.section skpjb
-.section skpka
-.section skpkb
-.section skpla
-.section skplb
-.section skpma
-.section skpmb
-.section skpna
-.section skpnb
-.section skpoa
-.section skpob
-.section skppa
-.section skppb
-.section skpqa
-.section skpqb
-.section skpra
-.section skprb
-.section skpsa
-.section skpsb
-.section skpta
-.section skptb
-.section skpua
-.section skpub
-.section skpva
-.section skpvb
-.section skpwa
-.section skpwb
-.section skpxa
-.section skpxb
-.section skpya
-.section skpyb
-.section skpza
-.section skpzb
-.section skp1a
-.section skp1b
-.section skp2a
-.section skp2b
-.section skp3a
-.section skp3b
-.section skp4a
-.section skp4b
-.section skp5a
-.section skp5b
-.section skp6a
-.section skp6b
-.section skp7a
-.section skp7b
-.section skp8a
-.section skp8b
-.section skp9a
-.section skp9b
-.section skp0a
-.section skp0b
-.section skqaa
-.section skqab
-.section skqba
-.section skqbb
-.section skqca
-.section skqcb
-.section skqda
-.section skqdb
-.section skqea
-.section skqeb
-.section skqfa
-.section skqfb
-.section skqga
-.section skqgb
-.section skqha
-.section skqhb
-.section skqia
-.section skqib
-.section skqja
-.section skqjb
-.section skqka
-.section skqkb
-.section skqla
-.section skqlb
-.section skqma
-.section skqmb
-.section skqna
-.section skqnb
-.section skqoa
-.section skqob
-.section skqpa
-.section skqpb
-.section skqqa
-.section skqqb
-.section skqra
-.section skqrb
-.section skqsa
-.section skqsb
-.section skqta
-.section skqtb
-.section skqua
-.section skqub
-.section skqva
-.section skqvb
-.section skqwa
-.section skqwb
-.section skqxa
-.section skqxb
-.section skqya
-.section skqyb
-.section skqza
-.section skqzb
-.section skq1a
-.section skq1b
-.section skq2a
-.section skq2b
-.section skq3a
-.section skq3b
-.section skq4a
-.section skq4b
-.section skq5a
-.section skq5b
-.section skq6a
-.section skq6b
-.section skq7a
-.section skq7b
-.section skq8a
-.section skq8b
-.section skq9a
-.section skq9b
-.section skq0a
-.section skq0b
-.section skraa
-.section skrab
-.section skrba
-.section skrbb
-.section skrca
-.section skrcb
-.section skrda
-.section skrdb
-.section skrea
-.section skreb
-.section skrfa
-.section skrfb
-.section skrga
-.section skrgb
-.section skrha
-.section skrhb
-.section skria
-.section skrib
-.section skrja
-.section skrjb
-.section skrka
-.section skrkb
-.section skrla
-.section skrlb
-.section skrma
-.section skrmb
-.section skrna
-.section skrnb
-.section skroa
-.section skrob
-.section skrpa
-.section skrpb
-.section skrqa
-.section skrqb
-.section skrra
-.section skrrb
-.section skrsa
-.section skrsb
-.section skrta
-.section skrtb
-.section skrua
-.section skrub
-.section skrva
-.section skrvb
-.section skrwa
-.section skrwb
-.section skrxa
-.section skrxb
-.section skrya
-.section skryb
-.section skrza
-.section skrzb
-.section skr1a
-.section skr1b
-.section skr2a
-.section skr2b
-.section skr3a
-.section skr3b
-.section skr4a
-.section skr4b
-.section skr5a
-.section skr5b
-.section skr6a
-.section skr6b
-.section skr7a
-.section skr7b
-.section skr8a
-.section skr8b
-.section skr9a
-.section skr9b
-.section skr0a
-.section skr0b
-.section sksaa
-.section sksab
-.section sksba
-.section sksbb
-.section sksca
-.section skscb
-.section sksda
-.section sksdb
-.section sksea
-.section skseb
-.section sksfa
-.section sksfb
-.section sksga
-.section sksgb
-.section sksha
-.section skshb
-.section sksia
-.section sksib
-.section sksja
-.section sksjb
-.section skska
-.section skskb
-.section sksla
-.section skslb
-.section sksma
-.section sksmb
-.section sksna
-.section sksnb
-.section sksoa
-.section sksob
-.section skspa
-.section skspb
-.section sksqa
-.section sksqb
-.section sksra
-.section sksrb
-.section skssa
-.section skssb
-.section sksta
-.section skstb
-.section sksua
-.section sksub
-.section sksva
-.section sksvb
-.section skswa
-.section skswb
-.section sksxa
-.section sksxb
-.section sksya
-.section sksyb
-.section sksza
-.section skszb
-.section sks1a
-.section sks1b
-.section sks2a
-.section sks2b
-.section sks3a
-.section sks3b
-.section sks4a
-.section sks4b
-.section sks5a
-.section sks5b
-.section sks6a
-.section sks6b
-.section sks7a
-.section sks7b
-.section sks8a
-.section sks8b
-.section sks9a
-.section sks9b
-.section sks0a
-.section sks0b
-.section sktaa
-.section sktab
-.section sktba
-.section sktbb
-.section sktca
-.section sktcb
-.section sktda
-.section sktdb
-.section sktea
-.section skteb
-.section sktfa
-.section sktfb
-.section sktga
-.section sktgb
-.section sktha
-.section skthb
-.section sktia
-.section sktib
-.section sktja
-.section sktjb
-.section sktka
-.section sktkb
-.section sktla
-.section sktlb
-.section sktma
-.section sktmb
-.section sktna
-.section sktnb
-.section sktoa
-.section sktob
-.section sktpa
-.section sktpb
-.section sktqa
-.section sktqb
-.section sktra
-.section sktrb
-.section sktsa
-.section sktsb
-.section sktta
-.section skttb
-.section sktua
-.section sktub
-.section sktva
-.section sktvb
-.section sktwa
-.section sktwb
-.section sktxa
-.section sktxb
-.section sktya
-.section sktyb
-.section sktza
-.section sktzb
-.section skt1a
-.section skt1b
-.section skt2a
-.section skt2b
-.section skt3a
-.section skt3b
-.section skt4a
-.section skt4b
-.section skt5a
-.section skt5b
-.section skt6a
-.section skt6b
-.section skt7a
-.section skt7b
-.section skt8a
-.section skt8b
-.section skt9a
-.section skt9b
-.section skt0a
-.section skt0b
-.section skuaa
-.section skuab
-.section skuba
-.section skubb
-.section skuca
-.section skucb
-.section skuda
-.section skudb
-.section skuea
-.section skueb
-.section skufa
-.section skufb
-.section skuga
-.section skugb
-.section skuha
-.section skuhb
-.section skuia
-.section skuib
-.section skuja
-.section skujb
-.section skuka
-.section skukb
-.section skula
-.section skulb
-.section skuma
-.section skumb
-.section skuna
-.section skunb
-.section skuoa
-.section skuob
-.section skupa
-.section skupb
-.section skuqa
-.section skuqb
-.section skura
-.section skurb
-.section skusa
-.section skusb
-.section skuta
-.section skutb
-.section skuua
-.section skuub
-.section skuva
-.section skuvb
-.section skuwa
-.section skuwb
-.section skuxa
-.section skuxb
-.section skuya
-.section skuyb
-.section skuza
-.section skuzb
-.section sku1a
-.section sku1b
-.section sku2a
-.section sku2b
-.section sku3a
-.section sku3b
-.section sku4a
-.section sku4b
-.section sku5a
-.section sku5b
-.section sku6a
-.section sku6b
-.section sku7a
-.section sku7b
-.section sku8a
-.section sku8b
-.section sku9a
-.section sku9b
-.section sku0a
-.section sku0b
-.section skvaa
-.section skvab
-.section skvba
-.section skvbb
-.section skvca
-.section skvcb
-.section skvda
-.section skvdb
-.section skvea
-.section skveb
-.section skvfa
-.section skvfb
-.section skvga
-.section skvgb
-.section skvha
-.section skvhb
-.section skvia
-.section skvib
-.section skvja
-.section skvjb
-.section skvka
-.section skvkb
-.section skvla
-.section skvlb
-.section skvma
-.section skvmb
-.section skvna
-.section skvnb
-.section skvoa
-.section skvob
-.section skvpa
-.section skvpb
-.section skvqa
-.section skvqb
-.section skvra
-.section skvrb
-.section skvsa
-.section skvsb
-.section skvta
-.section skvtb
-.section skvua
-.section skvub
-.section skvva
-.section skvvb
-.section skvwa
-.section skvwb
-.section skvxa
-.section skvxb
-.section skvya
-.section skvyb
-.section skvza
-.section skvzb
-.section skv1a
-.section skv1b
-.section skv2a
-.section skv2b
-.section skv3a
-.section skv3b
-.section skv4a
-.section skv4b
-.section skv5a
-.section skv5b
-.section skv6a
-.section skv6b
-.section skv7a
-.section skv7b
-.section skv8a
-.section skv8b
-.section skv9a
-.section skv9b
-.section skv0a
-.section skv0b
-.section skwaa
-.section skwab
-.section skwba
-.section skwbb
-.section skwca
-.section skwcb
-.section skwda
-.section skwdb
-.section skwea
-.section skweb
-.section skwfa
-.section skwfb
-.section skwga
-.section skwgb
-.section skwha
-.section skwhb
-.section skwia
-.section skwib
-.section skwja
-.section skwjb
-.section skwka
-.section skwkb
-.section skwla
-.section skwlb
-.section skwma
-.section skwmb
-.section skwna
-.section skwnb
-.section skwoa
-.section skwob
-.section skwpa
-.section skwpb
-.section skwqa
-.section skwqb
-.section skwra
-.section skwrb
-.section skwsa
-.section skwsb
-.section skwta
-.section skwtb
-.section skwua
-.section skwub
-.section skwva
-.section skwvb
-.section skwwa
-.section skwwb
-.section skwxa
-.section skwxb
-.section skwya
-.section skwyb
-.section skwza
-.section skwzb
-.section skw1a
-.section skw1b
-.section skw2a
-.section skw2b
-.section skw3a
-.section skw3b
-.section skw4a
-.section skw4b
-.section skw5a
-.section skw5b
-.section skw6a
-.section skw6b
-.section skw7a
-.section skw7b
-.section skw8a
-.section skw8b
-.section skw9a
-.section skw9b
-.section skw0a
-.section skw0b
-.section skxaa
-.section skxab
-.section skxba
-.section skxbb
-.section skxca
-.section skxcb
-.section skxda
-.section skxdb
-.section skxea
-.section skxeb
-.section skxfa
-.section skxfb
-.section skxga
-.section skxgb
-.section skxha
-.section skxhb
-.section skxia
-.section skxib
-.section skxja
-.section skxjb
-.section skxka
-.section skxkb
-.section skxla
-.section skxlb
-.section skxma
-.section skxmb
-.section skxna
-.section skxnb
-.section skxoa
-.section skxob
-.section skxpa
-.section skxpb
-.section skxqa
-.section skxqb
-.section skxra
-.section skxrb
-.section skxsa
-.section skxsb
-.section skxta
-.section skxtb
-.section skxua
-.section skxub
-.section skxva
-.section skxvb
-.section skxwa
-.section skxwb
-.section skxxa
-.section skxxb
-.section skxya
-.section skxyb
-.section skxza
-.section skxzb
-.section skx1a
-.section skx1b
-.section skx2a
-.section skx2b
-.section skx3a
-.section skx3b
-.section skx4a
-.section skx4b
-.section skx5a
-.section skx5b
-.section skx6a
-.section skx6b
-.section skx7a
-.section skx7b
-.section skx8a
-.section skx8b
-.section skx9a
-.section skx9b
-.section skx0a
-.section skx0b
-.section skyaa
-.section skyab
-.section skyba
-.section skybb
-.section skyca
-.section skycb
-.section skyda
-.section skydb
-.section skyea
-.section skyeb
-.section skyfa
-.section skyfb
-.section skyga
-.section skygb
-.section skyha
-.section skyhb
-.section skyia
-.section skyib
-.section skyja
-.section skyjb
-.section skyka
-.section skykb
-.section skyla
-.section skylb
-.section skyma
-.section skymb
-.section skyna
-.section skynb
-.section skyoa
-.section skyob
-.section skypa
-.section skypb
-.section skyqa
-.section skyqb
-.section skyra
-.section skyrb
-.section skysa
-.section skysb
-.section skyta
-.section skytb
-.section skyua
-.section skyub
-.section skyva
-.section skyvb
-.section skywa
-.section skywb
-.section skyxa
-.section skyxb
-.section skyya
-.section skyyb
-.section skyza
-.section skyzb
-.section sky1a
-.section sky1b
-.section sky2a
-.section sky2b
-.section sky3a
-.section sky3b
-.section sky4a
-.section sky4b
-.section sky5a
-.section sky5b
-.section sky6a
-.section sky6b
-.section sky7a
-.section sky7b
-.section sky8a
-.section sky8b
-.section sky9a
-.section sky9b
-.section sky0a
-.section sky0b
-.section skzaa
-.section skzab
-.section skzba
-.section skzbb
-.section skzca
-.section skzcb
-.section skzda
-.section skzdb
-.section skzea
-.section skzeb
-.section skzfa
-.section skzfb
-.section skzga
-.section skzgb
-.section skzha
-.section skzhb
-.section skzia
-.section skzib
-.section skzja
-.section skzjb
-.section skzka
-.section skzkb
-.section skzla
-.section skzlb
-.section skzma
-.section skzmb
-.section skzna
-.section skznb
-.section skzoa
-.section skzob
-.section skzpa
-.section skzpb
-.section skzqa
-.section skzqb
-.section skzra
-.section skzrb
-.section skzsa
-.section skzsb
-.section skzta
-.section skztb
-.section skzua
-.section skzub
-.section skzva
-.section skzvb
-.section skzwa
-.section skzwb
-.section skzxa
-.section skzxb
-.section skzya
-.section skzyb
-.section skzza
-.section skzzb
-.section skz1a
-.section skz1b
-.section skz2a
-.section skz2b
-.section skz3a
-.section skz3b
-.section skz4a
-.section skz4b
-.section skz5a
-.section skz5b
-.section skz6a
-.section skz6b
-.section skz7a
-.section skz7b
-.section skz8a
-.section skz8b
-.section skz9a
-.section skz9b
-.section skz0a
-.section skz0b
-.section sk1aa
-.section sk1ab
-.section sk1ba
-.section sk1bb
-.section sk1ca
-.section sk1cb
-.section sk1da
-.section sk1db
-.section sk1ea
-.section sk1eb
-.section sk1fa
-.section sk1fb
-.section sk1ga
-.section sk1gb
-.section sk1ha
-.section sk1hb
-.section sk1ia
-.section sk1ib
-.section sk1ja
-.section sk1jb
-.section sk1ka
-.section sk1kb
-.section sk1la
-.section sk1lb
-.section sk1ma
-.section sk1mb
-.section sk1na
-.section sk1nb
-.section sk1oa
-.section sk1ob
-.section sk1pa
-.section sk1pb
-.section sk1qa
-.section sk1qb
-.section sk1ra
-.section sk1rb
-.section sk1sa
-.section sk1sb
-.section sk1ta
-.section sk1tb
-.section sk1ua
-.section sk1ub
-.section sk1va
-.section sk1vb
-.section sk1wa
-.section sk1wb
-.section sk1xa
-.section sk1xb
-.section sk1ya
-.section sk1yb
-.section sk1za
-.section sk1zb
-.section sk11a
-.section sk11b
-.section sk12a
-.section sk12b
-.section sk13a
-.section sk13b
-.section sk14a
-.section sk14b
-.section sk15a
-.section sk15b
-.section sk16a
-.section sk16b
-.section sk17a
-.section sk17b
-.section sk18a
-.section sk18b
-.section sk19a
-.section sk19b
-.section sk10a
-.section sk10b
-.section sk2aa
-.section sk2ab
-.section sk2ba
-.section sk2bb
-.section sk2ca
-.section sk2cb
-.section sk2da
-.section sk2db
-.section sk2ea
-.section sk2eb
-.section sk2fa
-.section sk2fb
-.section sk2ga
-.section sk2gb
-.section sk2ha
-.section sk2hb
-.section sk2ia
-.section sk2ib
-.section sk2ja
-.section sk2jb
-.section sk2ka
-.section sk2kb
-.section sk2la
-.section sk2lb
-.section sk2ma
-.section sk2mb
-.section sk2na
-.section sk2nb
-.section sk2oa
-.section sk2ob
-.section sk2pa
-.section sk2pb
-.section sk2qa
-.section sk2qb
-.section sk2ra
-.section sk2rb
-.section sk2sa
-.section sk2sb
-.section sk2ta
-.section sk2tb
-.section sk2ua
-.section sk2ub
-.section sk2va
-.section sk2vb
-.section sk2wa
-.section sk2wb
-.section sk2xa
-.section sk2xb
-.section sk2ya
-.section sk2yb
-.section sk2za
-.section sk2zb
-.section sk21a
-.section sk21b
-.section sk22a
-.section sk22b
-.section sk23a
-.section sk23b
-.section sk24a
-.section sk24b
-.section sk25a
-.section sk25b
-.section sk26a
-.section sk26b
-.section sk27a
-.section sk27b
-.section sk28a
-.section sk28b
-.section sk29a
-.section sk29b
-.section sk20a
-.section sk20b
-.section sk3aa
-.section sk3ab
-.section sk3ba
-.section sk3bb
-.section sk3ca
-.section sk3cb
-.section sk3da
-.section sk3db
-.section sk3ea
-.section sk3eb
-.section sk3fa
-.section sk3fb
-.section sk3ga
-.section sk3gb
-.section sk3ha
-.section sk3hb
-.section sk3ia
-.section sk3ib
-.section sk3ja
-.section sk3jb
-.section sk3ka
-.section sk3kb
-.section sk3la
-.section sk3lb
-.section sk3ma
-.section sk3mb
-.section sk3na
-.section sk3nb
-.section sk3oa
-.section sk3ob
-.section sk3pa
-.section sk3pb
-.section sk3qa
-.section sk3qb
-.section sk3ra
-.section sk3rb
-.section sk3sa
-.section sk3sb
-.section sk3ta
-.section sk3tb
-.section sk3ua
-.section sk3ub
-.section sk3va
-.section sk3vb
-.section sk3wa
-.section sk3wb
-.section sk3xa
-.section sk3xb
-.section sk3ya
-.section sk3yb
-.section sk3za
-.section sk3zb
-.section sk31a
-.section sk31b
-.section sk32a
-.section sk32b
-.section sk33a
-.section sk33b
-.section sk34a
-.section sk34b
-.section sk35a
-.section sk35b
-.section sk36a
-.section sk36b
-.section sk37a
-.section sk37b
-.section sk38a
-.section sk38b
-.section sk39a
-.section sk39b
-.section sk30a
-.section sk30b
-.section sk4aa
-.section sk4ab
-.section sk4ba
-.section sk4bb
-.section sk4ca
-.section sk4cb
-.section sk4da
-.section sk4db
-.section sk4ea
-.section sk4eb
-.section sk4fa
-.section sk4fb
-.section sk4ga
-.section sk4gb
-.section sk4ha
-.section sk4hb
-.section sk4ia
-.section sk4ib
-.section sk4ja
-.section sk4jb
-.section sk4ka
-.section sk4kb
-.section sk4la
-.section sk4lb
-.section sk4ma
-.section sk4mb
-.section sk4na
-.section sk4nb
-.section sk4oa
-.section sk4ob
-.section sk4pa
-.section sk4pb
-.section sk4qa
-.section sk4qb
-.section sk4ra
-.section sk4rb
-.section sk4sa
-.section sk4sb
-.section sk4ta
-.section sk4tb
-.section sk4ua
-.section sk4ub
-.section sk4va
-.section sk4vb
-.section sk4wa
-.section sk4wb
-.section sk4xa
-.section sk4xb
-.section sk4ya
-.section sk4yb
-.section sk4za
-.section sk4zb
-.section sk41a
-.section sk41b
-.section sk42a
-.section sk42b
-.section sk43a
-.section sk43b
-.section sk44a
-.section sk44b
-.section sk45a
-.section sk45b
-.section sk46a
-.section sk46b
-.section sk47a
-.section sk47b
-.section sk48a
-.section sk48b
-.section sk49a
-.section sk49b
-.section sk40a
-.section sk40b
-.section sk5aa
-.section sk5ab
-.section sk5ba
-.section sk5bb
-.section sk5ca
-.section sk5cb
-.section sk5da
-.section sk5db
-.section sk5ea
-.section sk5eb
-.section sk5fa
-.section sk5fb
-.section sk5ga
-.section sk5gb
-.section sk5ha
-.section sk5hb
-.section sk5ia
-.section sk5ib
-.section sk5ja
-.section sk5jb
-.section sk5ka
-.section sk5kb
-.section sk5la
-.section sk5lb
-.section sk5ma
-.section sk5mb
-.section sk5na
-.section sk5nb
-.section sk5oa
-.section sk5ob
-.section sk5pa
-.section sk5pb
-.section sk5qa
-.section sk5qb
-.section sk5ra
-.section sk5rb
-.section sk5sa
-.section sk5sb
-.section sk5ta
-.section sk5tb
-.section sk5ua
-.section sk5ub
-.section sk5va
-.section sk5vb
-.section sk5wa
-.section sk5wb
-.section sk5xa
-.section sk5xb
-.section sk5ya
-.section sk5yb
-.section sk5za
-.section sk5zb
-.section sk51a
-.section sk51b
-.section sk52a
-.section sk52b
-.section sk53a
-.section sk53b
-.section sk54a
-.section sk54b
-.section sk55a
-.section sk55b
-.section sk56a
-.section sk56b
-.section sk57a
-.section sk57b
-.section sk58a
-.section sk58b
-.section sk59a
-.section sk59b
-.section sk50a
-.section sk50b
-.section sk6aa
-.section sk6ab
-.section sk6ba
-.section sk6bb
-.section sk6ca
-.section sk6cb
-.section sk6da
-.section sk6db
-.section sk6ea
-.section sk6eb
-.section sk6fa
-.section sk6fb
-.section sk6ga
-.section sk6gb
-.section sk6ha
-.section sk6hb
-.section sk6ia
-.section sk6ib
-.section sk6ja
-.section sk6jb
-.section sk6ka
-.section sk6kb
-.section sk6la
-.section sk6lb
-.section sk6ma
-.section sk6mb
-.section sk6na
-.section sk6nb
-.section sk6oa
-.section sk6ob
-.section sk6pa
-.section sk6pb
-.section sk6qa
-.section sk6qb
-.section sk6ra
-.section sk6rb
-.section sk6sa
-.section sk6sb
-.section sk6ta
-.section sk6tb
-.section sk6ua
-.section sk6ub
-.section sk6va
-.section sk6vb
-.section sk6wa
-.section sk6wb
-.section sk6xa
-.section sk6xb
-.section sk6ya
-.section sk6yb
-.section sk6za
-.section sk6zb
-.section sk61a
-.section sk61b
-.section sk62a
-.section sk62b
-.section sk63a
-.section sk63b
-.section sk64a
-.section sk64b
-.section sk65a
-.section sk65b
-.section sk66a
-.section sk66b
-.section sk67a
-.section sk67b
-.section sk68a
-.section sk68b
-.section sk69a
-.section sk69b
-.section sk60a
-.section sk60b
-.section sk7aa
-.section sk7ab
-.section sk7ba
-.section sk7bb
-.section sk7ca
-.section sk7cb
-.section sk7da
-.section sk7db
-.section sk7ea
-.section sk7eb
-.section sk7fa
-.section sk7fb
-.section sk7ga
-.section sk7gb
-.section sk7ha
-.section sk7hb
-.section sk7ia
-.section sk7ib
-.section sk7ja
-.section sk7jb
-.section sk7ka
-.section sk7kb
-.section sk7la
-.section sk7lb
-.section sk7ma
-.section sk7mb
-.section sk7na
-.section sk7nb
-.section sk7oa
-.section sk7ob
-.section sk7pa
-.section sk7pb
-.section sk7qa
-.section sk7qb
-.section sk7ra
-.section sk7rb
-.section sk7sa
-.section sk7sb
-.section sk7ta
-.section sk7tb
-.section sk7ua
-.section sk7ub
-.section sk7va
-.section sk7vb
-.section sk7wa
-.section sk7wb
-.section sk7xa
-.section sk7xb
-.section sk7ya
-.section sk7yb
-.section sk7za
-.section sk7zb
-.section sk71a
-.section sk71b
-.section sk72a
-.section sk72b
-.section sk73a
-.section sk73b
-.section sk74a
-.section sk74b
-.section sk75a
-.section sk75b
-.section sk76a
-.section sk76b
-.section sk77a
-.section sk77b
-.section sk78a
-.section sk78b
-.section sk79a
-.section sk79b
-.section sk70a
-.section sk70b
-.section sk8aa
-.section sk8ab
-.section sk8ba
-.section sk8bb
-.section sk8ca
-.section sk8cb
-.section sk8da
-.section sk8db
-.section sk8ea
-.section sk8eb
-.section sk8fa
-.section sk8fb
-.section sk8ga
-.section sk8gb
-.section sk8ha
-.section sk8hb
-.section sk8ia
-.section sk8ib
-.section sk8ja
-.section sk8jb
-.section sk8ka
-.section sk8kb
-.section sk8la
-.section sk8lb
-.section sk8ma
-.section sk8mb
-.section sk8na
-.section sk8nb
-.section sk8oa
-.section sk8ob
-.section sk8pa
-.section sk8pb
-.section sk8qa
-.section sk8qb
-.section sk8ra
-.section sk8rb
-.section sk8sa
-.section sk8sb
-.section sk8ta
-.section sk8tb
-.section sk8ua
-.section sk8ub
-.section sk8va
-.section sk8vb
-.section sk8wa
-.section sk8wb
-.section sk8xa
-.section sk8xb
-.section sk8ya
-.section sk8yb
-.section sk8za
-.section sk8zb
-.section sk81a
-.section sk81b
-.section sk82a
-.section sk82b
-.section sk83a
-.section sk83b
-.section sk84a
-.section sk84b
-.section sk85a
-.section sk85b
-.section sk86a
-.section sk86b
-.section sk87a
-.section sk87b
-.section sk88a
-.section sk88b
-.section sk89a
-.section sk89b
-.section sk80a
-.section sk80b
-.section sk9aa
-.section sk9ab
-.section sk9ba
-.section sk9bb
-.section sk9ca
-.section sk9cb
-.section sk9da
-.section sk9db
-.section sk9ea
-.section sk9eb
-.section sk9fa
-.section sk9fb
-.section sk9ga
-.section sk9gb
-.section sk9ha
-.section sk9hb
-.section sk9ia
-.section sk9ib
-.section sk9ja
-.section sk9jb
-.section sk9ka
-.section sk9kb
-.section sk9la
-.section sk9lb
-.section sk9ma
-.section sk9mb
-.section sk9na
-.section sk9nb
-.section sk9oa
-.section sk9ob
-.section sk9pa
-.section sk9pb
-.section sk9qa
-.section sk9qb
-.section sk9ra
-.section sk9rb
-.section sk9sa
-.section sk9sb
-.section sk9ta
-.section sk9tb
-.section sk9ua
-.section sk9ub
-.section sk9va
-.section sk9vb
-.section sk9wa
-.section sk9wb
-.section sk9xa
-.section sk9xb
-.section sk9ya
-.section sk9yb
-.section sk9za
-.section sk9zb
-.section sk91a
-.section sk91b
-.section sk92a
-.section sk92b
-.section sk93a
-.section sk93b
-.section sk94a
-.section sk94b
-.section sk95a
-.section sk95b
-.section sk96a
-.section sk96b
-.section sk97a
-.section sk97b
-.section sk98a
-.section sk98b
-.section sk99a
-.section sk99b
-.section sk90a
-.section sk90b
-.section sk0aa
-.section sk0ab
-.section sk0ba
-.section sk0bb
-.section sk0ca
-.section sk0cb
-.section sk0da
-.section sk0db
-.section sk0ea
-.section sk0eb
-.section sk0fa
-.section sk0fb
-.section sk0ga
-.section sk0gb
-.section sk0ha
-.section sk0hb
-.section sk0ia
-.section sk0ib
-.section sk0ja
-.section sk0jb
-.section sk0ka
-.section sk0kb
-.section sk0la
-.section sk0lb
-.section sk0ma
-.section sk0mb
-.section sk0na
-.section sk0nb
-.section sk0oa
-.section sk0ob
-.section sk0pa
-.section sk0pb
-.section sk0qa
-.section sk0qb
-.section sk0ra
-.section sk0rb
-.section sk0sa
-.section sk0sb
-.section sk0ta
-.section sk0tb
-.section sk0ua
-.section sk0ub
-.section sk0va
-.section sk0vb
-.section sk0wa
-.section sk0wb
-.section sk0xa
-.section sk0xb
-.section sk0ya
-.section sk0yb
-.section sk0za
-.section sk0zb
-.section sk01a
-.section sk01b
-.section sk02a
-.section sk02b
-.section sk03a
-.section sk03b
-.section sk04a
-.section sk04b
-.section sk05a
-.section sk05b
-.section sk06a
-.section sk06b
-.section sk07a
-.section sk07b
-.section sk08a
-.section sk08b
-.section sk09a
-.section sk09b
-.section sk00a
-.section sk00b
-.section slaaa
-.section slaab
-.section slaba
-.section slabb
-.section slaca
-.section slacb
-.section slada
-.section sladb
-.section slaea
-.section slaeb
-.section slafa
-.section slafb
-.section slaga
-.section slagb
-.section slaha
-.section slahb
-.section slaia
-.section slaib
-.section slaja
-.section slajb
-.section slaka
-.section slakb
-.section slala
-.section slalb
-.section slama
-.section slamb
-.section slana
-.section slanb
-.section slaoa
-.section slaob
-.section slapa
-.section slapb
-.section slaqa
-.section slaqb
-.section slara
-.section slarb
-.section slasa
-.section slasb
-.section slata
-.section slatb
-.section slaua
-.section slaub
-.section slava
-.section slavb
-.section slawa
-.section slawb
-.section slaxa
-.section slaxb
-.section slaya
-.section slayb
-.section slaza
-.section slazb
-.section sla1a
-.section sla1b
-.section sla2a
-.section sla2b
-.section sla3a
-.section sla3b
-.section sla4a
-.section sla4b
-.section sla5a
-.section sla5b
-.section sla6a
-.section sla6b
-.section sla7a
-.section sla7b
-.section sla8a
-.section sla8b
-.section sla9a
-.section sla9b
-.section sla0a
-.section sla0b
-.section slbaa
-.section slbab
-.section slbba
-.section slbbb
-.section slbca
-.section slbcb
-.section slbda
-.section slbdb
-.section slbea
-.section slbeb
-.section slbfa
-.section slbfb
-.section slbga
-.section slbgb
-.section slbha
-.section slbhb
-.section slbia
-.section slbib
-.section slbja
-.section slbjb
-.section slbka
-.section slbkb
-.section slbla
-.section slblb
-.section slbma
-.section slbmb
-.section slbna
-.section slbnb
-.section slboa
-.section slbob
-.section slbpa
-.section slbpb
-.section slbqa
-.section slbqb
-.section slbra
-.section slbrb
-.section slbsa
-.section slbsb
-.section slbta
-.section slbtb
-.section slbua
-.section slbub
-.section slbva
-.section slbvb
-.section slbwa
-.section slbwb
-.section slbxa
-.section slbxb
-.section slbya
-.section slbyb
-.section slbza
-.section slbzb
-.section slb1a
-.section slb1b
-.section slb2a
-.section slb2b
-.section slb3a
-.section slb3b
-.section slb4a
-.section slb4b
-.section slb5a
-.section slb5b
-.section slb6a
-.section slb6b
-.section slb7a
-.section slb7b
-.section slb8a
-.section slb8b
-.section slb9a
-.section slb9b
-.section slb0a
-.section slb0b
-.section slcaa
-.section slcab
-.section slcba
-.section slcbb
-.section slcca
-.section slccb
-.section slcda
-.section slcdb
-.section slcea
-.section slceb
-.section slcfa
-.section slcfb
-.section slcga
-.section slcgb
-.section slcha
-.section slchb
-.section slcia
-.section slcib
-.section slcja
-.section slcjb
-.section slcka
-.section slckb
-.section slcla
-.section slclb
-.section slcma
-.section slcmb
-.section slcna
-.section slcnb
-.section slcoa
-.section slcob
-.section slcpa
-.section slcpb
-.section slcqa
-.section slcqb
-.section slcra
-.section slcrb
-.section slcsa
-.section slcsb
-.section slcta
-.section slctb
-.section slcua
-.section slcub
-.section slcva
-.section slcvb
-.section slcwa
-.section slcwb
-.section slcxa
-.section slcxb
-.section slcya
-.section slcyb
-.section slcza
-.section slczb
-.section slc1a
-.section slc1b
-.section slc2a
-.section slc2b
-.section slc3a
-.section slc3b
-.section slc4a
-.section slc4b
-.section slc5a
-.section slc5b
-.section slc6a
-.section slc6b
-.section slc7a
-.section slc7b
-.section slc8a
-.section slc8b
-.section slc9a
-.section slc9b
-.section slc0a
-.section slc0b
-.section sldaa
-.section sldab
-.section sldba
-.section sldbb
-.section sldca
-.section sldcb
-.section sldda
-.section slddb
-.section sldea
-.section sldeb
-.section sldfa
-.section sldfb
-.section sldga
-.section sldgb
-.section sldha
-.section sldhb
-.section sldia
-.section sldib
-.section sldja
-.section sldjb
-.section sldka
-.section sldkb
-.section sldla
-.section sldlb
-.section sldma
-.section sldmb
-.section sldna
-.section sldnb
-.section sldoa
-.section sldob
-.section sldpa
-.section sldpb
-.section sldqa
-.section sldqb
-.section sldra
-.section sldrb
-.section sldsa
-.section sldsb
-.section sldta
-.section sldtb
-.section sldua
-.section sldub
-.section sldva
-.section sldvb
-.section sldwa
-.section sldwb
-.section sldxa
-.section sldxb
-.section sldya
-.section sldyb
-.section sldza
-.section sldzb
-.section sld1a
-.section sld1b
-.section sld2a
-.section sld2b
-.section sld3a
-.section sld3b
-.section sld4a
-.section sld4b
-.section sld5a
-.section sld5b
-.section sld6a
-.section sld6b
-.section sld7a
-.section sld7b
-.section sld8a
-.section sld8b
-.section sld9a
-.section sld9b
-.section sld0a
-.section sld0b
-.section sleaa
-.section sleab
-.section sleba
-.section slebb
-.section sleca
-.section slecb
-.section sleda
-.section sledb
-.section sleea
-.section sleeb
-.section slefa
-.section slefb
-.section slega
-.section slegb
-.section sleha
-.section slehb
-.section sleia
-.section sleib
-.section sleja
-.section slejb
-.section sleka
-.section slekb
-.section slela
-.section slelb
-.section slema
-.section slemb
-.section slena
-.section slenb
-.section sleoa
-.section sleob
-.section slepa
-.section slepb
-.section sleqa
-.section sleqb
-.section slera
-.section slerb
-.section slesa
-.section slesb
-.section sleta
-.section sletb
-.section sleua
-.section sleub
-.section sleva
-.section slevb
-.section slewa
-.section slewb
-.section slexa
-.section slexb
-.section sleya
-.section sleyb
-.section sleza
-.section slezb
-.section sle1a
-.section sle1b
-.section sle2a
-.section sle2b
-.section sle3a
-.section sle3b
-.section sle4a
-.section sle4b
-.section sle5a
-.section sle5b
-.section sle6a
-.section sle6b
-.section sle7a
-.section sle7b
-.section sle8a
-.section sle8b
-.section sle9a
-.section sle9b
-.section sle0a
-.section sle0b
-.section slfaa
-.section slfab
-.section slfba
-.section slfbb
-.section slfca
-.section slfcb
-.section slfda
-.section slfdb
-.section slfea
-.section slfeb
-.section slffa
-.section slffb
-.section slfga
-.section slfgb
-.section slfha
-.section slfhb
-.section slfia
-.section slfib
-.section slfja
-.section slfjb
-.section slfka
-.section slfkb
-.section slfla
-.section slflb
-.section slfma
-.section slfmb
-.section slfna
-.section slfnb
-.section slfoa
-.section slfob
-.section slfpa
-.section slfpb
-.section slfqa
-.section slfqb
-.section slfra
-.section slfrb
-.section slfsa
-.section slfsb
-.section slfta
-.section slftb
-.section slfua
-.section slfub
-.section slfva
-.section slfvb
-.section slfwa
-.section slfwb
-.section slfxa
-.section slfxb
-.section slfya
-.section slfyb
-.section slfza
-.section slfzb
-.section slf1a
-.section slf1b
-.section slf2a
-.section slf2b
-.section slf3a
-.section slf3b
-.section slf4a
-.section slf4b
-.section slf5a
-.section slf5b
-.section slf6a
-.section slf6b
-.section slf7a
-.section slf7b
-.section slf8a
-.section slf8b
-.section slf9a
-.section slf9b
-.section slf0a
-.section slf0b
-.section slgaa
-.section slgab
-.section slgba
-.section slgbb
-.section slgca
-.section slgcb
-.section slgda
-.section slgdb
-.section slgea
-.section slgeb
-.section slgfa
-.section slgfb
-.section slgga
-.section slggb
-.section slgha
-.section slghb
-.section slgia
-.section slgib
-.section slgja
-.section slgjb
-.section slgka
-.section slgkb
-.section slgla
-.section slglb
-.section slgma
-.section slgmb
-.section slgna
-.section slgnb
-.section slgoa
-.section slgob
-.section slgpa
-.section slgpb
-.section slgqa
-.section slgqb
-.section slgra
-.section slgrb
-.section slgsa
-.section slgsb
-.section slgta
-.section slgtb
-.section slgua
-.section slgub
-.section slgva
-.section slgvb
-.section slgwa
-.section slgwb
-.section slgxa
-.section slgxb
-.section slgya
-.section slgyb
-.section slgza
-.section slgzb
-.section slg1a
-.section slg1b
-.section slg2a
-.section slg2b
-.section slg3a
-.section slg3b
-.section slg4a
-.section slg4b
-.section slg5a
-.section slg5b
-.section slg6a
-.section slg6b
-.section slg7a
-.section slg7b
-.section slg8a
-.section slg8b
-.section slg9a
-.section slg9b
-.section slg0a
-.section slg0b
-.section slhaa
-.section slhab
-.section slhba
-.section slhbb
-.section slhca
-.section slhcb
-.section slhda
-.section slhdb
-.section slhea
-.section slheb
-.section slhfa
-.section slhfb
-.section slhga
-.section slhgb
-.section slhha
-.section slhhb
-.section slhia
-.section slhib
-.section slhja
-.section slhjb
-.section slhka
-.section slhkb
-.section slhla
-.section slhlb
-.section slhma
-.section slhmb
-.section slhna
-.section slhnb
-.section slhoa
-.section slhob
-.section slhpa
-.section slhpb
-.section slhqa
-.section slhqb
-.section slhra
-.section slhrb
-.section slhsa
-.section slhsb
-.section slhta
-.section slhtb
-.section slhua
-.section slhub
-.section slhva
-.section slhvb
-.section slhwa
-.section slhwb
-.section slhxa
-.section slhxb
-.section slhya
-.section slhyb
-.section slhza
-.section slhzb
-.section slh1a
-.section slh1b
-.section slh2a
-.section slh2b
-.section slh3a
-.section slh3b
-.section slh4a
-.section slh4b
-.section slh5a
-.section slh5b
-.section slh6a
-.section slh6b
-.section slh7a
-.section slh7b
-.section slh8a
-.section slh8b
-.section slh9a
-.section slh9b
-.section slh0a
-.section slh0b
-.section sliaa
-.section sliab
-.section sliba
-.section slibb
-.section slica
-.section slicb
-.section slida
-.section slidb
-.section sliea
-.section slieb
-.section slifa
-.section slifb
-.section sliga
-.section sligb
-.section sliha
-.section slihb
-.section sliia
-.section sliib
-.section slija
-.section slijb
-.section slika
-.section slikb
-.section slila
-.section slilb
-.section slima
-.section slimb
-.section slina
-.section slinb
-.section slioa
-.section sliob
-.section slipa
-.section slipb
-.section sliqa
-.section sliqb
-.section slira
-.section slirb
-.section slisa
-.section slisb
-.section slita
-.section slitb
-.section sliua
-.section sliub
-.section sliva
-.section slivb
-.section sliwa
-.section sliwb
-.section slixa
-.section slixb
-.section sliya
-.section sliyb
-.section sliza
-.section slizb
-.section sli1a
-.section sli1b
-.section sli2a
-.section sli2b
-.section sli3a
-.section sli3b
-.section sli4a
-.section sli4b
-.section sli5a
-.section sli5b
-.section sli6a
-.section sli6b
-.section sli7a
-.section sli7b
-.section sli8a
-.section sli8b
-.section sli9a
-.section sli9b
-.section sli0a
-.section sli0b
-.section sljaa
-.section sljab
-.section sljba
-.section sljbb
-.section sljca
-.section sljcb
-.section sljda
-.section sljdb
-.section sljea
-.section sljeb
-.section sljfa
-.section sljfb
-.section sljga
-.section sljgb
-.section sljha
-.section sljhb
-.section sljia
-.section sljib
-.section sljja
-.section sljjb
-.section sljka
-.section sljkb
-.section sljla
-.section sljlb
-.section sljma
-.section sljmb
-.section sljna
-.section sljnb
-.section sljoa
-.section sljob
-.section sljpa
-.section sljpb
-.section sljqa
-.section sljqb
-.section sljra
-.section sljrb
-.section sljsa
-.section sljsb
-.section sljta
-.section sljtb
-.section sljua
-.section sljub
-.section sljva
-.section sljvb
-.section sljwa
-.section sljwb
-.section sljxa
-.section sljxb
-.section sljya
-.section sljyb
-.section sljza
-.section sljzb
-.section slj1a
-.section slj1b
-.section slj2a
-.section slj2b
-.section slj3a
-.section slj3b
-.section slj4a
-.section slj4b
-.section slj5a
-.section slj5b
-.section slj6a
-.section slj6b
-.section slj7a
-.section slj7b
-.section slj8a
-.section slj8b
-.section slj9a
-.section slj9b
-.section slj0a
-.section slj0b
-.section slkaa
-.section slkab
-.section slkba
-.section slkbb
-.section slkca
-.section slkcb
-.section slkda
-.section slkdb
-.section slkea
-.section slkeb
-.section slkfa
-.section slkfb
-.section slkga
-.section slkgb
-.section slkha
-.section slkhb
-.section slkia
-.section slkib
-.section slkja
-.section slkjb
-.section slkka
-.section slkkb
-.section slkla
-.section slklb
-.section slkma
-.section slkmb
-.section slkna
-.section slknb
-.section slkoa
-.section slkob
-.section slkpa
-.section slkpb
-.section slkqa
-.section slkqb
-.section slkra
-.section slkrb
-.section slksa
-.section slksb
-.section slkta
-.section slktb
-.section slkua
-.section slkub
-.section slkva
-.section slkvb
-.section slkwa
-.section slkwb
-.section slkxa
-.section slkxb
-.section slkya
-.section slkyb
-.section slkza
-.section slkzb
-.section slk1a
-.section slk1b
-.section slk2a
-.section slk2b
-.section slk3a
-.section slk3b
-.section slk4a
-.section slk4b
-.section slk5a
-.section slk5b
-.section slk6a
-.section slk6b
-.section slk7a
-.section slk7b
-.section slk8a
-.section slk8b
-.section slk9a
-.section slk9b
-.section slk0a
-.section slk0b
-.section sllaa
-.section sllab
-.section sllba
-.section sllbb
-.section sllca
-.section sllcb
-.section sllda
-.section slldb
-.section sllea
-.section slleb
-.section sllfa
-.section sllfb
-.section sllga
-.section sllgb
-.section sllha
-.section sllhb
-.section sllia
-.section sllib
-.section sllja
-.section slljb
-.section sllka
-.section sllkb
-.section sllla
-.section slllb
-.section sllma
-.section sllmb
-.section sllna
-.section sllnb
-.section slloa
-.section sllob
-.section sllpa
-.section sllpb
-.section sllqa
-.section sllqb
-.section sllra
-.section sllrb
-.section sllsa
-.section sllsb
-.section sllta
-.section slltb
-.section sllua
-.section sllub
-.section sllva
-.section sllvb
-.section sllwa
-.section sllwb
-.section sllxa
-.section sllxb
-.section sllya
-.section sllyb
-.section sllza
-.section sllzb
-.section sll1a
-.section sll1b
-.section sll2a
-.section sll2b
-.section sll3a
-.section sll3b
-.section sll4a
-.section sll4b
-.section sll5a
-.section sll5b
-.section sll6a
-.section sll6b
-.section sll7a
-.section sll7b
-.section sll8a
-.section sll8b
-.section sll9a
-.section sll9b
-.section sll0a
-.section sll0b
-.section slmaa
-.section slmab
-.section slmba
-.section slmbb
-.section slmca
-.section slmcb
-.section slmda
-.section slmdb
-.section slmea
-.section slmeb
-.section slmfa
-.section slmfb
-.section slmga
-.section slmgb
-.section slmha
-.section slmhb
-.section slmia
-.section slmib
-.section slmja
-.section slmjb
-.section slmka
-.section slmkb
-.section slmla
-.section slmlb
-.section slmma
-.section slmmb
-.section slmna
-.section slmnb
-.section slmoa
-.section slmob
-.section slmpa
-.section slmpb
-.section slmqa
-.section slmqb
-.section slmra
-.section slmrb
-.section slmsa
-.section slmsb
-.section slmta
-.section slmtb
-.section slmua
-.section slmub
-.section slmva
-.section slmvb
-.section slmwa
-.section slmwb
-.section slmxa
-.section slmxb
-.section slmya
-.section slmyb
-.section slmza
-.section slmzb
-.section slm1a
-.section slm1b
-.section slm2a
-.section slm2b
-.section slm3a
-.section slm3b
-.section slm4a
-.section slm4b
-.section slm5a
-.section slm5b
-.section slm6a
-.section slm6b
-.section slm7a
-.section slm7b
-.section slm8a
-.section slm8b
-.section slm9a
-.section slm9b
-.section slm0a
-.section slm0b
-.section slnaa
-.section slnab
-.section slnba
-.section slnbb
-.section slnca
-.section slncb
-.section slnda
-.section slndb
-.section slnea
-.section slneb
-.section slnfa
-.section slnfb
-.section slnga
-.section slngb
-.section slnha
-.section slnhb
-.section slnia
-.section slnib
-.section slnja
-.section slnjb
-.section slnka
-.section slnkb
-.section slnla
-.section slnlb
-.section slnma
-.section slnmb
-.section slnna
-.section slnnb
-.section slnoa
-.section slnob
-.section slnpa
-.section slnpb
-.section slnqa
-.section slnqb
-.section slnra
-.section slnrb
-.section slnsa
-.section slnsb
-.section slnta
-.section slntb
-.section slnua
-.section slnub
-.section slnva
-.section slnvb
-.section slnwa
-.section slnwb
-.section slnxa
-.section slnxb
-.section slnya
-.section slnyb
-.section slnza
-.section slnzb
-.section sln1a
-.section sln1b
-.section sln2a
-.section sln2b
-.section sln3a
-.section sln3b
-.section sln4a
-.section sln4b
-.section sln5a
-.section sln5b
-.section sln6a
-.section sln6b
-.section sln7a
-.section sln7b
-.section sln8a
-.section sln8b
-.section sln9a
-.section sln9b
-.section sln0a
-.section sln0b
-.section sloaa
-.section sloab
-.section sloba
-.section slobb
-.section sloca
-.section slocb
-.section sloda
-.section slodb
-.section sloea
-.section sloeb
-.section slofa
-.section slofb
-.section sloga
-.section slogb
-.section sloha
-.section slohb
-.section sloia
-.section sloib
-.section sloja
-.section slojb
-.section sloka
-.section slokb
-.section slola
-.section slolb
-.section sloma
-.section slomb
-.section slona
-.section slonb
-.section slooa
-.section sloob
-.section slopa
-.section slopb
-.section sloqa
-.section sloqb
-.section slora
-.section slorb
-.section slosa
-.section slosb
-.section slota
-.section slotb
-.section sloua
-.section sloub
-.section slova
-.section slovb
-.section slowa
-.section slowb
-.section sloxa
-.section sloxb
-.section sloya
-.section sloyb
-.section sloza
-.section slozb
-.section slo1a
-.section slo1b
-.section slo2a
-.section slo2b
-.section slo3a
-.section slo3b
-.section slo4a
-.section slo4b
-.section slo5a
-.section slo5b
-.section slo6a
-.section slo6b
-.section slo7a
-.section slo7b
-.section slo8a
-.section slo8b
-.section slo9a
-.section slo9b
-.section slo0a
-.section slo0b
-.section slpaa
-.section slpab
-.section slpba
-.section slpbb
-.section slpca
-.section slpcb
-.section slpda
-.section slpdb
-.section slpea
-.section slpeb
-.section slpfa
-.section slpfb
-.section slpga
-.section slpgb
-.section slpha
-.section slphb
-.section slpia
-.section slpib
-.section slpja
-.section slpjb
-.section slpka
-.section slpkb
-.section slpla
-.section slplb
-.section slpma
-.section slpmb
-.section slpna
-.section slpnb
-.section slpoa
-.section slpob
-.section slppa
-.section slppb
-.section slpqa
-.section slpqb
-.section slpra
-.section slprb
-.section slpsa
-.section slpsb
-.section slpta
-.section slptb
-.section slpua
-.section slpub
-.section slpva
-.section slpvb
-.section slpwa
-.section slpwb
-.section slpxa
-.section slpxb
-.section slpya
-.section slpyb
-.section slpza
-.section slpzb
-.section slp1a
-.section slp1b
-.section slp2a
-.section slp2b
-.section slp3a
-.section slp3b
-.section slp4a
-.section slp4b
-.section slp5a
-.section slp5b
-.section slp6a
-.section slp6b
-.section slp7a
-.section slp7b
-.section slp8a
-.section slp8b
-.section slp9a
-.section slp9b
-.section slp0a
-.section slp0b
-.section slqaa
-.section slqab
-.section slqba
-.section slqbb
-.section slqca
-.section slqcb
-.section slqda
-.section slqdb
-.section slqea
-.section slqeb
-.section slqfa
-.section slqfb
-.section slqga
-.section slqgb
-.section slqha
-.section slqhb
-.section slqia
-.section slqib
-.section slqja
-.section slqjb
-.section slqka
-.section slqkb
-.section slqla
-.section slqlb
-.section slqma
-.section slqmb
-.section slqna
-.section slqnb
-.section slqoa
-.section slqob
-.section slqpa
-.section slqpb
-.section slqqa
-.section slqqb
-.section slqra
-.section slqrb
-.section slqsa
-.section slqsb
-.section slqta
-.section slqtb
-.section slqua
-.section slqub
-.section slqva
-.section slqvb
-.section slqwa
-.section slqwb
-.section slqxa
-.section slqxb
-.section slqya
-.section slqyb
-.section slqza
-.section slqzb
-.section slq1a
-.section slq1b
-.section slq2a
-.section slq2b
-.section slq3a
-.section slq3b
-.section slq4a
-.section slq4b
-.section slq5a
-.section slq5b
-.section slq6a
-.section slq6b
-.section slq7a
-.section slq7b
-.section slq8a
-.section slq8b
-.section slq9a
-.section slq9b
-.section slq0a
-.section slq0b
-.section slraa
-.section slrab
-.section slrba
-.section slrbb
-.section slrca
-.section slrcb
-.section slrda
-.section slrdb
-.section slrea
-.section slreb
-.section slrfa
-.section slrfb
-.section slrga
-.section slrgb
-.section slrha
-.section slrhb
-.section slria
-.section slrib
-.section slrja
-.section slrjb
-.section slrka
-.section slrkb
-.section slrla
-.section slrlb
-.section slrma
-.section slrmb
-.section slrna
-.section slrnb
-.section slroa
-.section slrob
-.section slrpa
-.section slrpb
-.section slrqa
-.section slrqb
-.section slrra
-.section slrrb
-.section slrsa
-.section slrsb
-.section slrta
-.section slrtb
-.section slrua
-.section slrub
-.section slrva
-.section slrvb
-.section slrwa
-.section slrwb
-.section slrxa
-.section slrxb
-.section slrya
-.section slryb
-.section slrza
-.section slrzb
-.section slr1a
-.section slr1b
-.section slr2a
-.section slr2b
-.section slr3a
-.section slr3b
-.section slr4a
-.section slr4b
-.section slr5a
-.section slr5b
-.section slr6a
-.section slr6b
-.section slr7a
-.section slr7b
-.section slr8a
-.section slr8b
-.section slr9a
-.section slr9b
-.section slr0a
-.section slr0b
-.section slsaa
-.section slsab
-.section slsba
-.section slsbb
-.section slsca
-.section slscb
-.section slsda
-.section slsdb
-.section slsea
-.section slseb
-.section slsfa
-.section slsfb
-.section slsga
-.section slsgb
-.section slsha
-.section slshb
-.section slsia
-.section slsib
-.section slsja
-.section slsjb
-.section slska
-.section slskb
-.section slsla
-.section slslb
-.section slsma
-.section slsmb
-.section slsna
-.section slsnb
-.section slsoa
-.section slsob
-.section slspa
-.section slspb
-.section slsqa
-.section slsqb
-.section slsra
-.section slsrb
-.section slssa
-.section slssb
-.section slsta
-.section slstb
-.section slsua
-.section slsub
-.section slsva
-.section slsvb
-.section slswa
-.section slswb
-.section slsxa
-.section slsxb
-.section slsya
-.section slsyb
-.section slsza
-.section slszb
-.section sls1a
-.section sls1b
-.section sls2a
-.section sls2b
-.section sls3a
-.section sls3b
-.section sls4a
-.section sls4b
-.section sls5a
-.section sls5b
-.section sls6a
-.section sls6b
-.section sls7a
-.section sls7b
-.section sls8a
-.section sls8b
-.section sls9a
-.section sls9b
-.section sls0a
-.section sls0b
-.section sltaa
-.section sltab
-.section sltba
-.section sltbb
-.section sltca
-.section sltcb
-.section sltda
-.section sltdb
-.section sltea
-.section slteb
-.section sltfa
-.section sltfb
-.section sltga
-.section sltgb
-.section sltha
-.section slthb
-.section sltia
-.section sltib
-.section sltja
-.section sltjb
-.section sltka
-.section sltkb
-.section sltla
-.section sltlb
-.section sltma
-.section sltmb
-.section sltna
-.section sltnb
-.section sltoa
-.section sltob
-.section sltpa
-.section sltpb
-.section sltqa
-.section sltqb
-.section sltra
-.section sltrb
-.section sltsa
-.section sltsb
-.section sltta
-.section slttb
-.section sltua
-.section sltub
-.section sltva
-.section sltvb
-.section sltwa
-.section sltwb
-.section sltxa
-.section sltxb
-.section sltya
-.section sltyb
-.section sltza
-.section sltzb
-.section slt1a
-.section slt1b
-.section slt2a
-.section slt2b
-.section slt3a
-.section slt3b
-.section slt4a
-.section slt4b
-.section slt5a
-.section slt5b
-.section slt6a
-.section slt6b
-.section slt7a
-.section slt7b
-.section slt8a
-.section slt8b
-.section slt9a
-.section slt9b
-.section slt0a
-.section slt0b
-.section sluaa
-.section sluab
-.section sluba
-.section slubb
-.section sluca
-.section slucb
-.section sluda
-.section sludb
-.section sluea
-.section slueb
-.section slufa
-.section slufb
-.section sluga
-.section slugb
-.section sluha
-.section sluhb
-.section sluia
-.section sluib
-.section sluja
-.section slujb
-.section sluka
-.section slukb
-.section slula
-.section slulb
-.section sluma
-.section slumb
-.section sluna
-.section slunb
-.section sluoa
-.section sluob
-.section slupa
-.section slupb
-.section sluqa
-.section sluqb
-.section slura
-.section slurb
-.section slusa
-.section slusb
-.section sluta
-.section slutb
-.section sluua
-.section sluub
-.section sluva
-.section sluvb
-.section sluwa
-.section sluwb
-.section sluxa
-.section sluxb
-.section sluya
-.section sluyb
-.section sluza
-.section sluzb
-.section slu1a
-.section slu1b
-.section slu2a
-.section slu2b
-.section slu3a
-.section slu3b
-.section slu4a
-.section slu4b
-.section slu5a
-.section slu5b
-.section slu6a
-.section slu6b
-.section slu7a
-.section slu7b
-.section slu8a
-.section slu8b
-.section slu9a
-.section slu9b
-.section slu0a
-.section slu0b
-.section slvaa
-.section slvab
-.section slvba
-.section slvbb
-.section slvca
-.section slvcb
-.section slvda
-.section slvdb
-.section slvea
-.section slveb
-.section slvfa
-.section slvfb
-.section slvga
-.section slvgb
-.section slvha
-.section slvhb
-.section slvia
-.section slvib
-.section slvja
-.section slvjb
-.section slvka
-.section slvkb
-.section slvla
-.section slvlb
-.section slvma
-.section slvmb
-.section slvna
-.section slvnb
-.section slvoa
-.section slvob
-.section slvpa
-.section slvpb
-.section slvqa
-.section slvqb
-.section slvra
-.section slvrb
-.section slvsa
-.section slvsb
-.section slvta
-.section slvtb
-.section slvua
-.section slvub
-.section slvva
-.section slvvb
-.section slvwa
-.section slvwb
-.section slvxa
-.section slvxb
-.section slvya
-.section slvyb
-.section slvza
-.section slvzb
-.section slv1a
-.section slv1b
-.section slv2a
-.section slv2b
-.section slv3a
-.section slv3b
-.section slv4a
-.section slv4b
-.section slv5a
-.section slv5b
-.section slv6a
-.section slv6b
-.section slv7a
-.section slv7b
-.section slv8a
-.section slv8b
-.section slv9a
-.section slv9b
-.section slv0a
-.section slv0b
-.section slwaa
-.section slwab
-.section slwba
-.section slwbb
-.section slwca
-.section slwcb
-.section slwda
-.section slwdb
-.section slwea
-.section slweb
-.section slwfa
-.section slwfb
-.section slwga
-.section slwgb
-.section slwha
-.section slwhb
-.section slwia
-.section slwib
-.section slwja
-.section slwjb
-.section slwka
-.section slwkb
-.section slwla
-.section slwlb
-.section slwma
-.section slwmb
-.section slwna
-.section slwnb
-.section slwoa
-.section slwob
-.section slwpa
-.section slwpb
-.section slwqa
-.section slwqb
-.section slwra
-.section slwrb
-.section slwsa
-.section slwsb
-.section slwta
-.section slwtb
-.section slwua
-.section slwub
-.section slwva
-.section slwvb
-.section slwwa
-.section slwwb
-.section slwxa
-.section slwxb
-.section slwya
-.section slwyb
-.section slwza
-.section slwzb
-.section slw1a
-.section slw1b
-.section slw2a
-.section slw2b
-.section slw3a
-.section slw3b
-.section slw4a
-.section slw4b
-.section slw5a
-.section slw5b
-.section slw6a
-.section slw6b
-.section slw7a
-.section slw7b
-.section slw8a
-.section slw8b
-.section slw9a
-.section slw9b
-.section slw0a
-.section slw0b
-.section slxaa
-.section slxab
-.section slxba
-.section slxbb
-.section slxca
-.section slxcb
-.section slxda
-.section slxdb
-.section slxea
-.section slxeb
-.section slxfa
-.section slxfb
-.section slxga
-.section slxgb
-.section slxha
-.section slxhb
-.section slxia
-.section slxib
-.section slxja
-.section slxjb
-.section slxka
-.section slxkb
-.section slxla
-.section slxlb
-.section slxma
-.section slxmb
-.section slxna
-.section slxnb
-.section slxoa
-.section slxob
-.section slxpa
-.section slxpb
-.section slxqa
-.section slxqb
-.section slxra
-.section slxrb
-.section slxsa
-.section slxsb
-.section slxta
-.section slxtb
-.section slxua
-.section slxub
-.section slxva
-.section slxvb
-.section slxwa
-.section slxwb
-.section slxxa
-.section slxxb
-.section slxya
-.section slxyb
-.section slxza
-.section slxzb
-.section slx1a
-.section slx1b
-.section slx2a
-.section slx2b
-.section slx3a
-.section slx3b
-.section slx4a
-.section slx4b
-.section slx5a
-.section slx5b
-.section slx6a
-.section slx6b
-.section slx7a
-.section slx7b
-.section slx8a
-.section slx8b
-.section slx9a
-.section slx9b
-.section slx0a
-.section slx0b
-.section slyaa
-.section slyab
-.section slyba
-.section slybb
-.section slyca
-.section slycb
-.section slyda
-.section slydb
-.section slyea
-.section slyeb
-.section slyfa
-.section slyfb
-.section slyga
-.section slygb
-.section slyha
-.section slyhb
-.section slyia
-.section slyib
-.section slyja
-.section slyjb
-.section slyka
-.section slykb
-.section slyla
-.section slylb
-.section slyma
-.section slymb
-.section slyna
-.section slynb
-.section slyoa
-.section slyob
-.section slypa
-.section slypb
-.section slyqa
-.section slyqb
-.section slyra
-.section slyrb
-.section slysa
-.section slysb
-.section slyta
-.section slytb
-.section slyua
-.section slyub
-.section slyva
-.section slyvb
-.section slywa
-.section slywb
-.section slyxa
-.section slyxb
-.section slyya
-.section slyyb
-.section slyza
-.section slyzb
-.section sly1a
-.section sly1b
-.section sly2a
-.section sly2b
-.section sly3a
-.section sly3b
-.section sly4a
-.section sly4b
-.section sly5a
-.section sly5b
-.section sly6a
-.section sly6b
-.section sly7a
-.section sly7b
-.section sly8a
-.section sly8b
-.section sly9a
-.section sly9b
-.section sly0a
-.section sly0b
-.section slzaa
-.section slzab
-.section slzba
-.section slzbb
-.section slzca
-.section slzcb
-.section slzda
-.section slzdb
-.section slzea
-.section slzeb
-.section slzfa
-.section slzfb
-.section slzga
-.section slzgb
-.section slzha
-.section slzhb
-.section slzia
-.section slzib
-.section slzja
-.section slzjb
-.section slzka
-.section slzkb
-.section slzla
-.section slzlb
-.section slzma
-.section slzmb
-.section slzna
-.section slznb
-.section slzoa
-.section slzob
-.section slzpa
-.section slzpb
-.section slzqa
-.section slzqb
-.section slzra
-.section slzrb
-.section slzsa
-.section slzsb
-.section slzta
-.section slztb
-.section slzua
-.section slzub
-.section slzva
-.section slzvb
-.section slzwa
-.section slzwb
-.section slzxa
-.section slzxb
-.section slzya
-.section slzyb
-.section slzza
-.section slzzb
-.section slz1a
-.section slz1b
-.section slz2a
-.section slz2b
-.section slz3a
-.section slz3b
-.section slz4a
-.section slz4b
-.section slz5a
-.section slz5b
-.section slz6a
-.section slz6b
-.section slz7a
-.section slz7b
-.section slz8a
-.section slz8b
-.section slz9a
-.section slz9b
-.section slz0a
-.section slz0b
-.section sl1aa
-.section sl1ab
-.section sl1ba
-.section sl1bb
-.section sl1ca
-.section sl1cb
-.section sl1da
-.section sl1db
-.section sl1ea
-.section sl1eb
-.section sl1fa
-.section sl1fb
-.section sl1ga
-.section sl1gb
-.section sl1ha
-.section sl1hb
-.section sl1ia
-.section sl1ib
-.section sl1ja
-.section sl1jb
-.section sl1ka
-.section sl1kb
-.section sl1la
-.section sl1lb
-.section sl1ma
-.section sl1mb
-.section sl1na
-.section sl1nb
-.section sl1oa
-.section sl1ob
-.section sl1pa
-.section sl1pb
-.section sl1qa
-.section sl1qb
-.section sl1ra
-.section sl1rb
-.section sl1sa
-.section sl1sb
-.section sl1ta
-.section sl1tb
-.section sl1ua
-.section sl1ub
-.section sl1va
-.section sl1vb
-.section sl1wa
-.section sl1wb
-.section sl1xa
-.section sl1xb
-.section sl1ya
-.section sl1yb
-.section sl1za
-.section sl1zb
-.section sl11a
-.section sl11b
-.section sl12a
-.section sl12b
-.section sl13a
-.section sl13b
-.section sl14a
-.section sl14b
-.section sl15a
-.section sl15b
-.section sl16a
-.section sl16b
-.section sl17a
-.section sl17b
-.section sl18a
-.section sl18b
-.section sl19a
-.section sl19b
-.section sl10a
-.section sl10b
-.section sl2aa
-.section sl2ab
-.section sl2ba
-.section sl2bb
-.section sl2ca
-.section sl2cb
-.section sl2da
-.section sl2db
-.section sl2ea
-.section sl2eb
-.section sl2fa
-.section sl2fb
-.section sl2ga
-.section sl2gb
-.section sl2ha
-.section sl2hb
-.section sl2ia
-.section sl2ib
-.section sl2ja
-.section sl2jb
-.section sl2ka
-.section sl2kb
-.section sl2la
-.section sl2lb
-.section sl2ma
-.section sl2mb
-.section sl2na
-.section sl2nb
-.section sl2oa
-.section sl2ob
-.section sl2pa
-.section sl2pb
-.section sl2qa
-.section sl2qb
-.section sl2ra
-.section sl2rb
-.section sl2sa
-.section sl2sb
-.section sl2ta
-.section sl2tb
-.section sl2ua
-.section sl2ub
-.section sl2va
-.section sl2vb
-.section sl2wa
-.section sl2wb
-.section sl2xa
-.section sl2xb
-.section sl2ya
-.section sl2yb
-.section sl2za
-.section sl2zb
-.section sl21a
-.section sl21b
-.section sl22a
-.section sl22b
-.section sl23a
-.section sl23b
-.section sl24a
-.section sl24b
-.section sl25a
-.section sl25b
-.section sl26a
-.section sl26b
-.section sl27a
-.section sl27b
-.section sl28a
-.section sl28b
-.section sl29a
-.section sl29b
-.section sl20a
-.section sl20b
-.section sl3aa
-.section sl3ab
-.section sl3ba
-.section sl3bb
-.section sl3ca
-.section sl3cb
-.section sl3da
-.section sl3db
-.section sl3ea
-.section sl3eb
-.section sl3fa
-.section sl3fb
-.section sl3ga
-.section sl3gb
-.section sl3ha
-.section sl3hb
-.section sl3ia
-.section sl3ib
-.section sl3ja
-.section sl3jb
-.section sl3ka
-.section sl3kb
-.section sl3la
-.section sl3lb
-.section sl3ma
-.section sl3mb
-.section sl3na
-.section sl3nb
-.section sl3oa
-.section sl3ob
-.section sl3pa
-.section sl3pb
-.section sl3qa
-.section sl3qb
-.section sl3ra
-.section sl3rb
-.section sl3sa
-.section sl3sb
-.section sl3ta
-.section sl3tb
-.section sl3ua
-.section sl3ub
-.section sl3va
-.section sl3vb
-.section sl3wa
-.section sl3wb
-.section sl3xa
-.section sl3xb
-.section sl3ya
-.section sl3yb
-.section sl3za
-.section sl3zb
-.section sl31a
-.section sl31b
-.section sl32a
-.section sl32b
-.section sl33a
-.section sl33b
-.section sl34a
-.section sl34b
-.section sl35a
-.section sl35b
-.section sl36a
-.section sl36b
-.section sl37a
-.section sl37b
-.section sl38a
-.section sl38b
-.section sl39a
-.section sl39b
-.section sl30a
-.section sl30b
-.section sl4aa
-.section sl4ab
-.section sl4ba
-.section sl4bb
-.section sl4ca
-.section sl4cb
-.section sl4da
-.section sl4db
-.section sl4ea
-.section sl4eb
-.section sl4fa
-.section sl4fb
-.section sl4ga
-.section sl4gb
-.section sl4ha
-.section sl4hb
-.section sl4ia
-.section sl4ib
-.section sl4ja
-.section sl4jb
-.section sl4ka
-.section sl4kb
-.section sl4la
-.section sl4lb
-.section sl4ma
-.section sl4mb
-.section sl4na
-.section sl4nb
-.section sl4oa
-.section sl4ob
-.section sl4pa
-.section sl4pb
-.section sl4qa
-.section sl4qb
-.section sl4ra
-.section sl4rb
-.section sl4sa
-.section sl4sb
-.section sl4ta
-.section sl4tb
-.section sl4ua
-.section sl4ub
-.section sl4va
-.section sl4vb
-.section sl4wa
-.section sl4wb
-.section sl4xa
-.section sl4xb
-.section sl4ya
-.section sl4yb
-.section sl4za
-.section sl4zb
-.section sl41a
-.section sl41b
-.section sl42a
-.section sl42b
-.section sl43a
-.section sl43b
-.section sl44a
-.section sl44b
-.section sl45a
-.section sl45b
-.section sl46a
-.section sl46b
-.section sl47a
-.section sl47b
-.section sl48a
-.section sl48b
-.section sl49a
-.section sl49b
-.section sl40a
-.section sl40b
-.section sl5aa
-.section sl5ab
-.section sl5ba
-.section sl5bb
-.section sl5ca
-.section sl5cb
-.section sl5da
-.section sl5db
-.section sl5ea
-.section sl5eb
-.section sl5fa
-.section sl5fb
-.section sl5ga
-.section sl5gb
-.section sl5ha
-.section sl5hb
-.section sl5ia
-.section sl5ib
-.section sl5ja
-.section sl5jb
-.section sl5ka
-.section sl5kb
-.section sl5la
-.section sl5lb
-.section sl5ma
-.section sl5mb
-.section sl5na
-.section sl5nb
-.section sl5oa
-.section sl5ob
-.section sl5pa
-.section sl5pb
-.section sl5qa
-.section sl5qb
-.section sl5ra
-.section sl5rb
-.section sl5sa
-.section sl5sb
-.section sl5ta
-.section sl5tb
-.section sl5ua
-.section sl5ub
-.section sl5va
-.section sl5vb
-.section sl5wa
-.section sl5wb
-.section sl5xa
-.section sl5xb
-.section sl5ya
-.section sl5yb
-.section sl5za
-.section sl5zb
-.section sl51a
-.section sl51b
-.section sl52a
-.section sl52b
-.section sl53a
-.section sl53b
-.section sl54a
-.section sl54b
-.section sl55a
-.section sl55b
-.section sl56a
-.section sl56b
-.section sl57a
-.section sl57b
-.section sl58a
-.section sl58b
-.section sl59a
-.section sl59b
-.section sl50a
-.section sl50b
-.section sl6aa
-.section sl6ab
-.section sl6ba
-.section sl6bb
-.section sl6ca
-.section sl6cb
-.section sl6da
-.section sl6db
-.section sl6ea
-.section sl6eb
-.section sl6fa
-.section sl6fb
-.section sl6ga
-.section sl6gb
-.section sl6ha
-.section sl6hb
-.section sl6ia
-.section sl6ib
-.section sl6ja
-.section sl6jb
-.section sl6ka
-.section sl6kb
-.section sl6la
-.section sl6lb
-.section sl6ma
-.section sl6mb
-.section sl6na
-.section sl6nb
-.section sl6oa
-.section sl6ob
-.section sl6pa
-.section sl6pb
-.section sl6qa
-.section sl6qb
-.section sl6ra
-.section sl6rb
-.section sl6sa
-.section sl6sb
-.section sl6ta
-.section sl6tb
-.section sl6ua
-.section sl6ub
-.section sl6va
-.section sl6vb
-.section sl6wa
-.section sl6wb
-.section sl6xa
-.section sl6xb
-.section sl6ya
-.section sl6yb
-.section sl6za
-.section sl6zb
-.section sl61a
-.section sl61b
-.section sl62a
-.section sl62b
-.section sl63a
-.section sl63b
-.section sl64a
-.section sl64b
-.section sl65a
-.section sl65b
-.section sl66a
-.section sl66b
-.section sl67a
-.section sl67b
-.section sl68a
-.section sl68b
-.section sl69a
-.section sl69b
-.section sl60a
-.section sl60b
-.section sl7aa
-.section sl7ab
-.section sl7ba
-.section sl7bb
-.section sl7ca
-.section sl7cb
-.section sl7da
-.section sl7db
-.section sl7ea
-.section sl7eb
-.section sl7fa
-.section sl7fb
-.section sl7ga
-.section sl7gb
-.section sl7ha
-.section sl7hb
-.section sl7ia
-.section sl7ib
-.section sl7ja
-.section sl7jb
-.section sl7ka
-.section sl7kb
-.section sl7la
-.section sl7lb
-.section sl7ma
-.section sl7mb
-.section sl7na
-.section sl7nb
-.section sl7oa
-.section sl7ob
-.section sl7pa
-.section sl7pb
-.section sl7qa
-.section sl7qb
-.section sl7ra
-.section sl7rb
-.section sl7sa
-.section sl7sb
-.section sl7ta
-.section sl7tb
-.section sl7ua
-.section sl7ub
-.section sl7va
-.section sl7vb
-.section sl7wa
-.section sl7wb
-.section sl7xa
-.section sl7xb
-.section sl7ya
-.section sl7yb
-.section sl7za
-.section sl7zb
-.section sl71a
-.section sl71b
-.section sl72a
-.section sl72b
-.section sl73a
-.section sl73b
-.section sl74a
-.section sl74b
-.section sl75a
-.section sl75b
-.section sl76a
-.section sl76b
-.section sl77a
-.section sl77b
-.section sl78a
-.section sl78b
-.section sl79a
-.section sl79b
-.section sl70a
-.section sl70b
-.section sl8aa
-.section sl8ab
-.section sl8ba
-.section sl8bb
-.section sl8ca
-.section sl8cb
-.section sl8da
-.section sl8db
-.section sl8ea
-.section sl8eb
-.section sl8fa
-.section sl8fb
-.section sl8ga
-.section sl8gb
-.section sl8ha
-.section sl8hb
-.section sl8ia
-.section sl8ib
-.section sl8ja
-.section sl8jb
-.section sl8ka
-.section sl8kb
-.section sl8la
-.section sl8lb
-.section sl8ma
-.section sl8mb
-.section sl8na
-.section sl8nb
-.section sl8oa
-.section sl8ob
-.section sl8pa
-.section sl8pb
-.section sl8qa
-.section sl8qb
-.section sl8ra
-.section sl8rb
-.section sl8sa
-.section sl8sb
-.section sl8ta
-.section sl8tb
-.section sl8ua
-.section sl8ub
-.section sl8va
-.section sl8vb
-.section sl8wa
-.section sl8wb
-.section sl8xa
-.section sl8xb
-.section sl8ya
-.section sl8yb
-.section sl8za
-.section sl8zb
-.section sl81a
-.section sl81b
-.section sl82a
-.section sl82b
-.section sl83a
-.section sl83b
-.section sl84a
-.section sl84b
-.section sl85a
-.section sl85b
-.section sl86a
-.section sl86b
-.section sl87a
-.section sl87b
-.section sl88a
-.section sl88b
-.section sl89a
-.section sl89b
-.section sl80a
-.section sl80b
-.section sl9aa
-.section sl9ab
-.section sl9ba
-.section sl9bb
-.section sl9ca
-.section sl9cb
-.section sl9da
-.section sl9db
-.section sl9ea
-.section sl9eb
-.section sl9fa
-.section sl9fb
-.section sl9ga
-.section sl9gb
-.section sl9ha
-.section sl9hb
-.section sl9ia
-.section sl9ib
-.section sl9ja
-.section sl9jb
-.section sl9ka
-.section sl9kb
-.section sl9la
-.section sl9lb
-.section sl9ma
-.section sl9mb
-.section sl9na
-.section sl9nb
-.section sl9oa
-.section sl9ob
-.section sl9pa
-.section sl9pb
-.section sl9qa
-.section sl9qb
-.section sl9ra
-.section sl9rb
-.section sl9sa
-.section sl9sb
-.section sl9ta
-.section sl9tb
-.section sl9ua
-.section sl9ub
-.section sl9va
-.section sl9vb
-.section sl9wa
-.section sl9wb
-.section sl9xa
-.section sl9xb
-.section sl9ya
-.section sl9yb
-.section sl9za
-.section sl9zb
-.section sl91a
-.section sl91b
-.section sl92a
-.section sl92b
-.section sl93a
-.section sl93b
-.section sl94a
-.section sl94b
-.section sl95a
-.section sl95b
-.section sl96a
-.section sl96b
-.section sl97a
-.section sl97b
-.section sl98a
-.section sl98b
-.section sl99a
-.section sl99b
-.section sl90a
-.section sl90b
-.section sl0aa
-.section sl0ab
-.section sl0ba
-.section sl0bb
-.section sl0ca
-.section sl0cb
-.section sl0da
-.section sl0db
-.section sl0ea
-.section sl0eb
-.section sl0fa
-.section sl0fb
-.section sl0ga
-.section sl0gb
-.section sl0ha
-.section sl0hb
-.section sl0ia
-.section sl0ib
-.section sl0ja
-.section sl0jb
-.section sl0ka
-.section sl0kb
-.section sl0la
-.section sl0lb
-.section sl0ma
-.section sl0mb
-.section sl0na
-.section sl0nb
-.section sl0oa
-.section sl0ob
-.section sl0pa
-.section sl0pb
-.section sl0qa
-.section sl0qb
-.section sl0ra
-.section sl0rb
-.section sl0sa
-.section sl0sb
-.section sl0ta
-.section sl0tb
-.section sl0ua
-.section sl0ub
-.section sl0va
-.section sl0vb
-.section sl0wa
-.section sl0wb
-.section sl0xa
-.section sl0xb
-.section sl0ya
-.section sl0yb
-.section sl0za
-.section sl0zb
-.section sl01a
-.section sl01b
-.section sl02a
-.section sl02b
-.section sl03a
-.section sl03b
-.section sl04a
-.section sl04b
-.section sl05a
-.section sl05b
-.section sl06a
-.section sl06b
-.section sl07a
-.section sl07b
-.section sl08a
-.section sl08b
-.section sl09a
-.section sl09b
-.section sl00a
-.section sl00b
-.section smaaa
-.section smaab
-.section smaba
-.section smabb
-.section smaca
-.section smacb
-.section smada
-.section smadb
-.section smaea
-.section smaeb
-.section smafa
-.section smafb
-.section smaga
-.section smagb
-.section smaha
-.section smahb
-.section smaia
-.section smaib
-.section smaja
-.section smajb
-.section smaka
-.section smakb
-.section smala
-.section smalb
-.section smama
-.section smamb
-.section smana
-.section smanb
-.section smaoa
-.section smaob
-.section smapa
-.section smapb
-.section smaqa
-.section smaqb
-.section smara
-.section smarb
-.section smasa
-.section smasb
-.section smata
-.section smatb
-.section smaua
-.section smaub
-.section smava
-.section smavb
-.section smawa
-.section smawb
-.section smaxa
-.section smaxb
-.section smaya
-.section smayb
-.section smaza
-.section smazb
-.section sma1a
-.section sma1b
-.section sma2a
-.section sma2b
-.section sma3a
-.section sma3b
-.section sma4a
-.section sma4b
-.section sma5a
-.section sma5b
-.section sma6a
-.section sma6b
-.section sma7a
-.section sma7b
-.section sma8a
-.section sma8b
-.section sma9a
-.section sma9b
-.section sma0a
-.section sma0b
-.section smbaa
-.section smbab
-.section smbba
-.section smbbb
-.section smbca
-.section smbcb
-.section smbda
-.section smbdb
-.section smbea
-.section smbeb
-.section smbfa
-.section smbfb
-.section smbga
-.section smbgb
-.section smbha
-.section smbhb
-.section smbia
-.section smbib
-.section smbja
-.section smbjb
-.section smbka
-.section smbkb
-.section smbla
-.section smblb
-.section smbma
-.section smbmb
-.section smbna
-.section smbnb
-.section smboa
-.section smbob
-.section smbpa
-.section smbpb
-.section smbqa
-.section smbqb
-.section smbra
-.section smbrb
-.section smbsa
-.section smbsb
-.section smbta
-.section smbtb
-.section smbua
-.section smbub
-.section smbva
-.section smbvb
-.section smbwa
-.section smbwb
-.section smbxa
-.section smbxb
-.section smbya
-.section smbyb
-.section smbza
-.section smbzb
-.section smb1a
-.section smb1b
-.section smb2a
-.section smb2b
-.section smb3a
-.section smb3b
-.section smb4a
-.section smb4b
-.section smb5a
-.section smb5b
-.section smb6a
-.section smb6b
-.section smb7a
-.section smb7b
-.section smb8a
-.section smb8b
-.section smb9a
-.section smb9b
-.section smb0a
-.section smb0b
-.section smcaa
-.section smcab
-.section smcba
-.section smcbb
-.section smcca
-.section smccb
-.section smcda
-.section smcdb
-.section smcea
-.section smceb
-.section smcfa
-.section smcfb
-.section smcga
-.section smcgb
-.section smcha
-.section smchb
-.section smcia
-.section smcib
-.section smcja
-.section smcjb
-.section smcka
-.section smckb
-.section smcla
-.section smclb
-.section smcma
-.section smcmb
-.section smcna
-.section smcnb
-.section smcoa
-.section smcob
-.section smcpa
-.section smcpb
-.section smcqa
-.section smcqb
-.section smcra
-.section smcrb
-.section smcsa
-.section smcsb
-.section smcta
-.section smctb
-.section smcua
-.section smcub
-.section smcva
-.section smcvb
-.section smcwa
-.section smcwb
-.section smcxa
-.section smcxb
-.section smcya
-.section smcyb
-.section smcza
-.section smczb
-.section smc1a
-.section smc1b
-.section smc2a
-.section smc2b
-.section smc3a
-.section smc3b
-.section smc4a
-.section smc4b
-.section smc5a
-.section smc5b
-.section smc6a
-.section smc6b
-.section smc7a
-.section smc7b
-.section smc8a
-.section smc8b
-.section smc9a
-.section smc9b
-.section smc0a
-.section smc0b
-.section smdaa
-.section smdab
-.section smdba
-.section smdbb
-.section smdca
-.section smdcb
-.section smdda
-.section smddb
-.section smdea
-.section smdeb
-.section smdfa
-.section smdfb
-.section smdga
-.section smdgb
-.section smdha
-.section smdhb
-.section smdia
-.section smdib
-.section smdja
-.section smdjb
-.section smdka
-.section smdkb
-.section smdla
-.section smdlb
-.section smdma
-.section smdmb
-.section smdna
-.section smdnb
-.section smdoa
-.section smdob
-.section smdpa
-.section smdpb
-.section smdqa
-.section smdqb
-.section smdra
-.section smdrb
-.section smdsa
-.section smdsb
-.section smdta
-.section smdtb
-.section smdua
-.section smdub
-.section smdva
-.section smdvb
-.section smdwa
-.section smdwb
-.section smdxa
-.section smdxb
-.section smdya
-.section smdyb
-.section smdza
-.section smdzb
-.section smd1a
-.section smd1b
-.section smd2a
-.section smd2b
-.section smd3a
-.section smd3b
-.section smd4a
-.section smd4b
-.section smd5a
-.section smd5b
-.section smd6a
-.section smd6b
-.section smd7a
-.section smd7b
-.section smd8a
-.section smd8b
-.section smd9a
-.section smd9b
-.section smd0a
-.section smd0b
-.section smeaa
-.section smeab
-.section smeba
-.section smebb
-.section smeca
-.section smecb
-.section smeda
-.section smedb
-.section smeea
-.section smeeb
-.section smefa
-.section smefb
-.section smega
-.section smegb
-.section smeha
-.section smehb
-.section smeia
-.section smeib
-.section smeja
-.section smejb
-.section smeka
-.section smekb
-.section smela
-.section smelb
-.section smema
-.section smemb
-.section smena
-.section smenb
-.section smeoa
-.section smeob
-.section smepa
-.section smepb
-.section smeqa
-.section smeqb
-.section smera
-.section smerb
-.section smesa
-.section smesb
-.section smeta
-.section smetb
-.section smeua
-.section smeub
-.section smeva
-.section smevb
-.section smewa
-.section smewb
-.section smexa
-.section smexb
-.section smeya
-.section smeyb
-.section smeza
-.section smezb
-.section sme1a
-.section sme1b
-.section sme2a
-.section sme2b
-.section sme3a
-.section sme3b
-.section sme4a
-.section sme4b
-.section sme5a
-.section sme5b
-.section sme6a
-.section sme6b
-.section sme7a
-.section sme7b
-.section sme8a
-.section sme8b
-.section sme9a
-.section sme9b
-.section sme0a
-.section sme0b
-.section smfaa
-.section smfab
-.section smfba
-.section smfbb
-.section smfca
-.section smfcb
-.section smfda
-.section smfdb
-.section smfea
-.section smfeb
-.section smffa
-.section smffb
-.section smfga
-.section smfgb
-.section smfha
-.section smfhb
-.section smfia
-.section smfib
-.section smfja
-.section smfjb
-.section smfka
-.section smfkb
-.section smfla
-.section smflb
-.section smfma
-.section smfmb
-.section smfna
-.section smfnb
-.section smfoa
-.section smfob
-.section smfpa
-.section smfpb
-.section smfqa
-.section smfqb
-.section smfra
-.section smfrb
-.section smfsa
-.section smfsb
-.section smfta
-.section smftb
-.section smfua
-.section smfub
-.section smfva
-.section smfvb
-.section smfwa
-.section smfwb
-.section smfxa
-.section smfxb
-.section smfya
-.section smfyb
-.section smfza
-.section smfzb
-.section smf1a
-.section smf1b
-.section smf2a
-.section smf2b
-.section smf3a
-.section smf3b
-.section smf4a
-.section smf4b
-.section smf5a
-.section smf5b
-.section smf6a
-.section smf6b
-.section smf7a
-.section smf7b
-.section smf8a
-.section smf8b
-.section smf9a
-.section smf9b
-.section smf0a
-.section smf0b
-.section smgaa
-.section smgab
-.section smgba
-.section smgbb
-.section smgca
-.section smgcb
-.section smgda
-.section smgdb
-.section smgea
-.section smgeb
-.section smgfa
-.section smgfb
-.section smgga
-.section smggb
-.section smgha
-.section smghb
-.section smgia
-.section smgib
-.section smgja
-.section smgjb
-.section smgka
-.section smgkb
-.section smgla
-.section smglb
-.section smgma
-.section smgmb
-.section smgna
-.section smgnb
-.section smgoa
-.section smgob
-.section smgpa
-.section smgpb
-.section smgqa
-.section smgqb
-.section smgra
-.section smgrb
-.section smgsa
-.section smgsb
-.section smgta
-.section smgtb
-.section smgua
-.section smgub
-.section smgva
-.section smgvb
-.section smgwa
-.section smgwb
-.section smgxa
-.section smgxb
-.section smgya
-.section smgyb
-.section smgza
-.section smgzb
-.section smg1a
-.section smg1b
-.section smg2a
-.section smg2b
-.section smg3a
-.section smg3b
-.section smg4a
-.section smg4b
-.section smg5a
-.section smg5b
-.section smg6a
-.section smg6b
-.section smg7a
-.section smg7b
-.section smg8a
-.section smg8b
-.section smg9a
-.section smg9b
-.section smg0a
-.section smg0b
-.section smhaa
-.section smhab
-.section smhba
-.section smhbb
-.section smhca
-.section smhcb
-.section smhda
-.section smhdb
-.section smhea
-.section smheb
-.section smhfa
-.section smhfb
-.section smhga
-.section smhgb
-.section smhha
-.section smhhb
-.section smhia
-.section smhib
-.section smhja
-.section smhjb
-.section smhka
-.section smhkb
-.section smhla
-.section smhlb
-.section smhma
-.section smhmb
-.section smhna
-.section smhnb
-.section smhoa
-.section smhob
-.section smhpa
-.section smhpb
-.section smhqa
-.section smhqb
-.section smhra
-.section smhrb
-.section smhsa
-.section smhsb
-.section smhta
-.section smhtb
-.section smhua
-.section smhub
-.section smhva
-.section smhvb
-.section smhwa
-.section smhwb
-.section smhxa
-.section smhxb
-.section smhya
-.section smhyb
-.section smhza
-.section smhzb
-.section smh1a
-.section smh1b
-.section smh2a
-.section smh2b
-.section smh3a
-.section smh3b
-.section smh4a
-.section smh4b
-.section smh5a
-.section smh5b
-.section smh6a
-.section smh6b
-.section smh7a
-.section smh7b
-.section smh8a
-.section smh8b
-.section smh9a
-.section smh9b
-.section smh0a
-.section smh0b
-.section smiaa
-.section smiab
-.section smiba
-.section smibb
-.section smica
-.section smicb
-.section smida
-.section smidb
-.section smiea
-.section smieb
-.section smifa
-.section smifb
-.section smiga
-.section smigb
-.section smiha
-.section smihb
-.section smiia
-.section smiib
-.section smija
-.section smijb
-.section smika
-.section smikb
-.section smila
-.section smilb
-.section smima
-.section smimb
-.section smina
-.section sminb
-.section smioa
-.section smiob
-.section smipa
-.section smipb
-.section smiqa
-.section smiqb
-.section smira
-.section smirb
-.section smisa
-.section smisb
-.section smita
-.section smitb
-.section smiua
-.section smiub
-.section smiva
-.section smivb
-.section smiwa
-.section smiwb
-.section smixa
-.section smixb
-.section smiya
-.section smiyb
-.section smiza
-.section smizb
-.section smi1a
-.section smi1b
-.section smi2a
-.section smi2b
-.section smi3a
-.section smi3b
-.section smi4a
-.section smi4b
-.section smi5a
-.section smi5b
-.section smi6a
-.section smi6b
-.section smi7a
-.section smi7b
-.section smi8a
-.section smi8b
-.section smi9a
-.section smi9b
-.section smi0a
-.section smi0b
-.section smjaa
-.section smjab
-.section smjba
-.section smjbb
-.section smjca
-.section smjcb
-.section smjda
-.section smjdb
-.section smjea
-.section smjeb
-.section smjfa
-.section smjfb
-.section smjga
-.section smjgb
-.section smjha
-.section smjhb
-.section smjia
-.section smjib
-.section smjja
-.section smjjb
-.section smjka
-.section smjkb
-.section smjla
-.section smjlb
-.section smjma
-.section smjmb
-.section smjna
-.section smjnb
-.section smjoa
-.section smjob
-.section smjpa
-.section smjpb
-.section smjqa
-.section smjqb
-.section smjra
-.section smjrb
-.section smjsa
-.section smjsb
-.section smjta
-.section smjtb
-.section smjua
-.section smjub
-.section smjva
-.section smjvb
-.section smjwa
-.section smjwb
-.section smjxa
-.section smjxb
-.section smjya
-.section smjyb
-.section smjza
-.section smjzb
-.section smj1a
-.section smj1b
-.section smj2a
-.section smj2b
-.section smj3a
-.section smj3b
-.section smj4a
-.section smj4b
-.section smj5a
-.section smj5b
-.section smj6a
-.section smj6b
-.section smj7a
-.section smj7b
-.section smj8a
-.section smj8b
-.section smj9a
-.section smj9b
-.section smj0a
-.section smj0b
-.section smkaa
-.section smkab
-.section smkba
-.section smkbb
-.section smkca
-.section smkcb
-.section smkda
-.section smkdb
-.section smkea
-.section smkeb
-.section smkfa
-.section smkfb
-.section smkga
-.section smkgb
-.section smkha
-.section smkhb
-.section smkia
-.section smkib
-.section smkja
-.section smkjb
-.section smkka
-.section smkkb
-.section smkla
-.section smklb
-.section smkma
-.section smkmb
-.section smkna
-.section smknb
-.section smkoa
-.section smkob
-.section smkpa
-.section smkpb
-.section smkqa
-.section smkqb
-.section smkra
-.section smkrb
-.section smksa
-.section smksb
-.section smkta
-.section smktb
-.section smkua
-.section smkub
-.section smkva
-.section smkvb
-.section smkwa
-.section smkwb
-.section smkxa
-.section smkxb
-.section smkya
-.section smkyb
-.section smkza
-.section smkzb
-.section smk1a
-.section smk1b
-.section smk2a
-.section smk2b
-.section smk3a
-.section smk3b
-.section smk4a
-.section smk4b
-.section smk5a
-.section smk5b
-.section smk6a
-.section smk6b
-.section smk7a
-.section smk7b
-.section smk8a
-.section smk8b
-.section smk9a
-.section smk9b
-.section smk0a
-.section smk0b
-.section smlaa
-.section smlab
-.section smlba
-.section smlbb
-.section smlca
-.section smlcb
-.section smlda
-.section smldb
-.section smlea
-.section smleb
-.section smlfa
-.section smlfb
-.section smlga
-.section smlgb
-.section smlha
-.section smlhb
-.section smlia
-.section smlib
-.section smlja
-.section smljb
-.section smlka
-.section smlkb
-.section smlla
-.section smllb
-.section smlma
-.section smlmb
-.section smlna
-.section smlnb
-.section smloa
-.section smlob
-.section smlpa
-.section smlpb
-.section smlqa
-.section smlqb
-.section smlra
-.section smlrb
-.section smlsa
-.section smlsb
-.section smlta
-.section smltb
-.section smlua
-.section smlub
-.section smlva
-.section smlvb
-.section smlwa
-.section smlwb
-.section smlxa
-.section smlxb
-.section smlya
-.section smlyb
-.section smlza
-.section smlzb
-.section sml1a
-.section sml1b
-.section sml2a
-.section sml2b
-.section sml3a
-.section sml3b
-.section sml4a
-.section sml4b
-.section sml5a
-.section sml5b
-.section sml6a
-.section sml6b
-.section sml7a
-.section sml7b
-.section sml8a
-.section sml8b
-.section sml9a
-.section sml9b
-.section sml0a
-.section sml0b
-.section smmaa
-.section smmab
-.section smmba
-.section smmbb
-.section smmca
-.section smmcb
-.section smmda
-.section smmdb
-.section smmea
-.section smmeb
-.section smmfa
-.section smmfb
-.section smmga
-.section smmgb
-.section smmha
-.section smmhb
-.section smmia
-.section smmib
-.section smmja
-.section smmjb
-.section smmka
-.section smmkb
-.section smmla
-.section smmlb
-.section smmma
-.section smmmb
-.section smmna
-.section smmnb
-.section smmoa
-.section smmob
-.section smmpa
-.section smmpb
-.section smmqa
-.section smmqb
-.section smmra
-.section smmrb
-.section smmsa
-.section smmsb
-.section smmta
-.section smmtb
-.section smmua
-.section smmub
-.section smmva
-.section smmvb
-.section smmwa
-.section smmwb
-.section smmxa
-.section smmxb
-.section smmya
-.section smmyb
-.section smmza
-.section smmzb
-.section smm1a
-.section smm1b
-.section smm2a
-.section smm2b
-.section smm3a
-.section smm3b
-.section smm4a
-.section smm4b
-.section smm5a
-.section smm5b
-.section smm6a
-.section smm6b
-.section smm7a
-.section smm7b
-.section smm8a
-.section smm8b
-.section smm9a
-.section smm9b
-.section smm0a
-.section smm0b
-.section smnaa
-.section smnab
-.section smnba
-.section smnbb
-.section smnca
-.section smncb
-.section smnda
-.section smndb
-.section smnea
-.section smneb
-.section smnfa
-.section smnfb
-.section smnga
-.section smngb
-.section smnha
-.section smnhb
-.section smnia
-.section smnib
-.section smnja
-.section smnjb
-.section smnka
-.section smnkb
-.section smnla
-.section smnlb
-.section smnma
-.section smnmb
-.section smnna
-.section smnnb
-.section smnoa
-.section smnob
-.section smnpa
-.section smnpb
-.section smnqa
-.section smnqb
-.section smnra
-.section smnrb
-.section smnsa
-.section smnsb
-.section smnta
-.section smntb
-.section smnua
-.section smnub
-.section smnva
-.section smnvb
-.section smnwa
-.section smnwb
-.section smnxa
-.section smnxb
-.section smnya
-.section smnyb
-.section smnza
-.section smnzb
-.section smn1a
-.section smn1b
-.section smn2a
-.section smn2b
-.section smn3a
-.section smn3b
-.section smn4a
-.section smn4b
-.section smn5a
-.section smn5b
-.section smn6a
-.section smn6b
-.section smn7a
-.section smn7b
-.section smn8a
-.section smn8b
-.section smn9a
-.section smn9b
-.section smn0a
-.section smn0b
-.section smoaa
-.section smoab
-.section smoba
-.section smobb
-.section smoca
-.section smocb
-.section smoda
-.section smodb
-.section smoea
-.section smoeb
-.section smofa
-.section smofb
-.section smoga
-.section smogb
-.section smoha
-.section smohb
-.section smoia
-.section smoib
-.section smoja
-.section smojb
-.section smoka
-.section smokb
-.section smola
-.section smolb
-.section smoma
-.section smomb
-.section smona
-.section smonb
-.section smooa
-.section smoob
-.section smopa
-.section smopb
-.section smoqa
-.section smoqb
-.section smora
-.section smorb
-.section smosa
-.section smosb
-.section smota
-.section smotb
-.section smoua
-.section smoub
-.section smova
-.section smovb
-.section smowa
-.section smowb
-.section smoxa
-.section smoxb
-.section smoya
-.section smoyb
-.section smoza
-.section smozb
-.section smo1a
-.section smo1b
-.section smo2a
-.section smo2b
-.section smo3a
-.section smo3b
-.section smo4a
-.section smo4b
-.section smo5a
-.section smo5b
-.section smo6a
-.section smo6b
-.section smo7a
-.section smo7b
-.section smo8a
-.section smo8b
-.section smo9a
-.section smo9b
-.section smo0a
-.section smo0b
-.section smpaa
-.section smpab
-.section smpba
-.section smpbb
-.section smpca
-.section smpcb
-.section smpda
-.section smpdb
-.section smpea
-.section smpeb
-.section smpfa
-.section smpfb
-.section smpga
-.section smpgb
-.section smpha
-.section smphb
-.section smpia
-.section smpib
-.section smpja
-.section smpjb
-.section smpka
-.section smpkb
-.section smpla
-.section smplb
-.section smpma
-.section smpmb
-.section smpna
-.section smpnb
-.section smpoa
-.section smpob
-.section smppa
-.section smppb
-.section smpqa
-.section smpqb
-.section smpra
-.section smprb
-.section smpsa
-.section smpsb
-.section smpta
-.section smptb
-.section smpua
-.section smpub
-.section smpva
-.section smpvb
-.section smpwa
-.section smpwb
-.section smpxa
-.section smpxb
-.section smpya
-.section smpyb
-.section smpza
-.section smpzb
-.section smp1a
-.section smp1b
-.section smp2a
-.section smp2b
-.section smp3a
-.section smp3b
-.section smp4a
-.section smp4b
-.section smp5a
-.section smp5b
-.section smp6a
-.section smp6b
-.section smp7a
-.section smp7b
-.section smp8a
-.section smp8b
-.section smp9a
-.section smp9b
-.section smp0a
-.section smp0b
-.section smqaa
-.section smqab
-.section smqba
-.section smqbb
-.section smqca
-.section smqcb
-.section smqda
-.section smqdb
-.section smqea
-.section smqeb
-.section smqfa
-.section smqfb
-.section smqga
-.section smqgb
-.section smqha
-.section smqhb
-.section smqia
-.section smqib
-.section smqja
-.section smqjb
-.section smqka
-.section smqkb
-.section smqla
-.section smqlb
-.section smqma
-.section smqmb
-.section smqna
-.section smqnb
-.section smqoa
-.section smqob
-.section smqpa
-.section smqpb
-.section smqqa
-.section smqqb
-.section smqra
-.section smqrb
-.section smqsa
-.section smqsb
-.section smqta
-.section smqtb
-.section smqua
-.section smqub
-.section smqva
-.section smqvb
-.section smqwa
-.section smqwb
-.section smqxa
-.section smqxb
-.section smqya
-.section smqyb
-.section smqza
-.section smqzb
-.section smq1a
-.section smq1b
-.section smq2a
-.section smq2b
-.section smq3a
-.section smq3b
-.section smq4a
-.section smq4b
-.section smq5a
-.section smq5b
-.section smq6a
-.section smq6b
-.section smq7a
-.section smq7b
-.section smq8a
-.section smq8b
-.section smq9a
-.section smq9b
-.section smq0a
-.section smq0b
-.section smraa
-.section smrab
-.section smrba
-.section smrbb
-.section smrca
-.section smrcb
-.section smrda
-.section smrdb
-.section smrea
-.section smreb
-.section smrfa
-.section smrfb
-.section smrga
-.section smrgb
-.section smrha
-.section smrhb
-.section smria
-.section smrib
-.section smrja
-.section smrjb
-.section smrka
-.section smrkb
-.section smrla
-.section smrlb
-.section smrma
-.section smrmb
-.section smrna
-.section smrnb
-.section smroa
-.section smrob
-.section smrpa
-.section smrpb
-.section smrqa
-.section smrqb
-.section smrra
-.section smrrb
-.section smrsa
-.section smrsb
-.section smrta
-.section smrtb
-.section smrua
-.section smrub
-.section smrva
-.section smrvb
-.section smrwa
-.section smrwb
-.section smrxa
-.section smrxb
-.section smrya
-.section smryb
-.section smrza
-.section smrzb
-.section smr1a
-.section smr1b
-.section smr2a
-.section smr2b
-.section smr3a
-.section smr3b
-.section smr4a
-.section smr4b
-.section smr5a
-.section smr5b
-.section smr6a
-.section smr6b
-.section smr7a
-.section smr7b
-.section smr8a
-.section smr8b
-.section smr9a
-.section smr9b
-.section smr0a
-.section smr0b
-.section smsaa
-.section smsab
-.section smsba
-.section smsbb
-.section smsca
-.section smscb
-.section smsda
-.section smsdb
-.section smsea
-.section smseb
-.section smsfa
-.section smsfb
-.section smsga
-.section smsgb
-.section smsha
-.section smshb
-.section smsia
-.section smsib
-.section smsja
-.section smsjb
-.section smska
-.section smskb
-.section smsla
-.section smslb
-.section smsma
-.section smsmb
-.section smsna
-.section smsnb
-.section smsoa
-.section smsob
-.section smspa
-.section smspb
-.section smsqa
-.section smsqb
-.section smsra
-.section smsrb
-.section smssa
-.section smssb
-.section smsta
-.section smstb
-.section smsua
-.section smsub
-.section smsva
-.section smsvb
-.section smswa
-.section smswb
-.section smsxa
-.section smsxb
-.section smsya
-.section smsyb
-.section smsza
-.section smszb
-.section sms1a
-.section sms1b
-.section sms2a
-.section sms2b
-.section sms3a
-.section sms3b
-.section sms4a
-.section sms4b
-.section sms5a
-.section sms5b
-.section sms6a
-.section sms6b
-.section sms7a
-.section sms7b
-.section sms8a
-.section sms8b
-.section sms9a
-.section sms9b
-.section sms0a
-.section sms0b
-.section smtaa
-.section smtab
-.section smtba
-.section smtbb
-.section smtca
-.section smtcb
-.section smtda
-.section smtdb
-.section smtea
-.section smteb
-.section smtfa
-.section smtfb
-.section smtga
-.section smtgb
-.section smtha
-.section smthb
-.section smtia
-.section smtib
-.section smtja
-.section smtjb
-.section smtka
-.section smtkb
-.section smtla
-.section smtlb
-.section smtma
-.section smtmb
-.section smtna
-.section smtnb
-.section smtoa
-.section smtob
-.section smtpa
-.section smtpb
-.section smtqa
-.section smtqb
-.section smtra
-.section smtrb
-.section smtsa
-.section smtsb
-.section smtta
-.section smttb
-.section smtua
-.section smtub
-.section smtva
-.section smtvb
-.section smtwa
-.section smtwb
-.section smtxa
-.section smtxb
-.section smtya
-.section smtyb
-.section smtza
-.section smtzb
-.section smt1a
-.section smt1b
-.section smt2a
-.section smt2b
-.section smt3a
-.section smt3b
-.section smt4a
-.section smt4b
-.section smt5a
-.section smt5b
-.section smt6a
-.section smt6b
-.section smt7a
-.section smt7b
-.section smt8a
-.section smt8b
-.section smt9a
-.section smt9b
-.section smt0a
-.section smt0b
-.section smuaa
-.section smuab
-.section smuba
-.section smubb
-.section smuca
-.section smucb
-.section smuda
-.section smudb
-.section smuea
-.section smueb
-.section smufa
-.section smufb
-.section smuga
-.section smugb
-.section smuha
-.section smuhb
-.section smuia
-.section smuib
-.section smuja
-.section smujb
-.section smuka
-.section smukb
-.section smula
-.section smulb
-.section smuma
-.section smumb
-.section smuna
-.section smunb
-.section smuoa
-.section smuob
-.section smupa
-.section smupb
-.section smuqa
-.section smuqb
-.section smura
-.section smurb
-.section smusa
-.section smusb
-.section smuta
-.section smutb
-.section smuua
-.section smuub
-.section smuva
-.section smuvb
-.section smuwa
-.section smuwb
-.section smuxa
-.section smuxb
-.section smuya
-.section smuyb
-.section smuza
-.section smuzb
-.section smu1a
-.section smu1b
-.section smu2a
-.section smu2b
-.section smu3a
-.section smu3b
-.section smu4a
-.section smu4b
-.section smu5a
-.section smu5b
-.section smu6a
-.section smu6b
-.section smu7a
-.section smu7b
-.section smu8a
-.section smu8b
-.section smu9a
-.section smu9b
-.section smu0a
-.section smu0b
-.section smvaa
-.section smvab
-.section smvba
-.section smvbb
-.section smvca
-.section smvcb
-.section smvda
-.section smvdb
-.section smvea
-.section smveb
-.section smvfa
-.section smvfb
-.section smvga
-.section smvgb
-.section smvha
-.section smvhb
-.section smvia
-.section smvib
-.section smvja
-.section smvjb
-.section smvka
-.section smvkb
-.section smvla
-.section smvlb
-.section smvma
-.section smvmb
-.section smvna
-.section smvnb
-.section smvoa
-.section smvob
-.section smvpa
-.section smvpb
-.section smvqa
-.section smvqb
-.section smvra
-.section smvrb
-.section smvsa
-.section smvsb
-.section smvta
-.section smvtb
-.section smvua
-.section smvub
-.section smvva
-.section smvvb
-.section smvwa
-.section smvwb
-.section smvxa
-.section smvxb
-.section smvya
-.section smvyb
-.section smvza
-.section smvzb
-.section smv1a
-.section smv1b
-.section smv2a
-.section smv2b
-.section smv3a
-.section smv3b
-.section smv4a
-.section smv4b
-.section smv5a
-.section smv5b
-.section smv6a
-.section smv6b
-.section smv7a
-.section smv7b
-.section smv8a
-.section smv8b
-.section smv9a
-.section smv9b
-.section smv0a
-.section smv0b
-.section smwaa
-.section smwab
-.section smwba
-.section smwbb
-.section smwca
-.section smwcb
-.section smwda
-.section smwdb
-.section smwea
-.section smweb
-.section smwfa
-.section smwfb
-.section smwga
-.section smwgb
-.section smwha
-.section smwhb
-.section smwia
-.section smwib
-.section smwja
-.section smwjb
-.section smwka
-.section smwkb
-.section smwla
-.section smwlb
-.section smwma
-.section smwmb
-.section smwna
-.section smwnb
-.section smwoa
-.section smwob
-.section smwpa
-.section smwpb
-.section smwqa
-.section smwqb
-.section smwra
-.section smwrb
-.section smwsa
-.section smwsb
-.section smwta
-.section smwtb
-.section smwua
-.section smwub
-.section smwva
-.section smwvb
-.section smwwa
-.section smwwb
-.section smwxa
-.section smwxb
-.section smwya
-.section smwyb
-.section smwza
-.section smwzb
-.section smw1a
-.section smw1b
-.section smw2a
-.section smw2b
-.section smw3a
-.section smw3b
-.section smw4a
-.section smw4b
-.section smw5a
-.section smw5b
-.section smw6a
-.section smw6b
-.section smw7a
-.section smw7b
-.section smw8a
-.section smw8b
-.section smw9a
-.section smw9b
-.section smw0a
-.section smw0b
-.section smxaa
-.section smxab
-.section smxba
-.section smxbb
-.section smxca
-.section smxcb
-.section smxda
-.section smxdb
-.section smxea
-.section smxeb
-.section smxfa
-.section smxfb
-.section smxga
-.section smxgb
-.section smxha
-.section smxhb
-.section smxia
-.section smxib
-.section smxja
-.section smxjb
-.section smxka
-.section smxkb
-.section smxla
-.section smxlb
-.section smxma
-.section smxmb
-.section smxna
-.section smxnb
-.section smxoa
-.section smxob
-.section smxpa
-.section smxpb
-.section smxqa
-.section smxqb
-.section smxra
-.section smxrb
-.section smxsa
-.section smxsb
-.section smxta
-.section smxtb
-.section smxua
-.section smxub
-.section smxva
-.section smxvb
-.section smxwa
-.section smxwb
-.section smxxa
-.section smxxb
-.section smxya
-.section smxyb
-.section smxza
-.section smxzb
-.section smx1a
-.section smx1b
-.section smx2a
-.section smx2b
-.section smx3a
-.section smx3b
-.section smx4a
-.section smx4b
-.section smx5a
-.section smx5b
-.section smx6a
-.section smx6b
-.section smx7a
-.section smx7b
-.section smx8a
-.section smx8b
-.section smx9a
-.section smx9b
-.section smx0a
-.section smx0b
-.section smyaa
-.section smyab
-.section smyba
-.section smybb
-.section smyca
-.section smycb
-.section smyda
-.section smydb
-.section smyea
-.section smyeb
-.section smyfa
-.section smyfb
-.section smyga
-.section smygb
-.section smyha
-.section smyhb
-.section smyia
-.section smyib
-.section smyja
-.section smyjb
-.section smyka
-.section smykb
-.section smyla
-.section smylb
-.section smyma
-.section smymb
-.section smyna
-.section smynb
-.section smyoa
-.section smyob
-.section smypa
-.section smypb
-.section smyqa
-.section smyqb
-.section smyra
-.section smyrb
-.section smysa
-.section smysb
-.section smyta
-.section smytb
-.section smyua
-.section smyub
-.section smyva
-.section smyvb
-.section smywa
-.section smywb
-.section smyxa
-.section smyxb
-.section smyya
-.section smyyb
-.section smyza
-.section smyzb
-.section smy1a
-.section smy1b
-.section smy2a
-.section smy2b
-.section smy3a
-.section smy3b
-.section smy4a
-.section smy4b
-.section smy5a
-.section smy5b
-.section smy6a
-.section smy6b
-.section smy7a
-.section smy7b
-.section smy8a
-.section smy8b
-.section smy9a
-.section smy9b
-.section smy0a
-.section smy0b
-.section smzaa
-.section smzab
-.section smzba
-.section smzbb
-.section smzca
-.section smzcb
-.section smzda
-.section smzdb
-.section smzea
-.section smzeb
-.section smzfa
-.section smzfb
-.section smzga
-.section smzgb
-.section smzha
-.section smzhb
-.section smzia
-.section smzib
-.section smzja
-.section smzjb
-.section smzka
-.section smzkb
-.section smzla
-.section smzlb
-.section smzma
-.section smzmb
-.section smzna
-.section smznb
-.section smzoa
-.section smzob
-.section smzpa
-.section smzpb
-.section smzqa
-.section smzqb
-.section smzra
-.section smzrb
-.section smzsa
-.section smzsb
-.section smzta
-.section smztb
-.section smzua
-.section smzub
-.section smzva
-.section smzvb
-.section smzwa
-.section smzwb
-.section smzxa
-.section smzxb
-.section smzya
-.section smzyb
-.section smzza
-.section smzzb
-.section smz1a
-.section smz1b
-.section smz2a
-.section smz2b
-.section smz3a
-.section smz3b
-.section smz4a
-.section smz4b
-.section smz5a
-.section smz5b
-.section smz6a
-.section smz6b
-.section smz7a
-.section smz7b
-.section smz8a
-.section smz8b
-.section smz9a
-.section smz9b
-.section smz0a
-.section smz0b
-.section sm1aa
-.section sm1ab
-.section sm1ba
-.section sm1bb
-.section sm1ca
-.section sm1cb
-.section sm1da
-.section sm1db
-.section sm1ea
-.section sm1eb
-.section sm1fa
-.section sm1fb
-.section sm1ga
-.section sm1gb
-.section sm1ha
-.section sm1hb
-.section sm1ia
-.section sm1ib
-.section sm1ja
-.section sm1jb
-.section sm1ka
-.section sm1kb
-.section sm1la
-.section sm1lb
-.section sm1ma
-.section sm1mb
-.section sm1na
-.section sm1nb
-.section sm1oa
-.section sm1ob
-.section sm1pa
-.section sm1pb
-.section sm1qa
-.section sm1qb
-.section sm1ra
-.section sm1rb
-.section sm1sa
-.section sm1sb
-.section sm1ta
-.section sm1tb
-.section sm1ua
-.section sm1ub
-.section sm1va
-.section sm1vb
-.section sm1wa
-.section sm1wb
-.section sm1xa
-.section sm1xb
-.section sm1ya
-.section sm1yb
-.section sm1za
-.section sm1zb
-.section sm11a
-.section sm11b
-.section sm12a
-.section sm12b
-.section sm13a
-.section sm13b
-.section sm14a
-.section sm14b
-.section sm15a
-.section sm15b
-.section sm16a
-.section sm16b
-.section sm17a
-.section sm17b
-.section sm18a
-.section sm18b
-.section sm19a
-.section sm19b
-.section sm10a
-.section sm10b
-.section sm2aa
-.section sm2ab
-.section sm2ba
-.section sm2bb
-.section sm2ca
-.section sm2cb
-.section sm2da
-.section sm2db
-.section sm2ea
-.section sm2eb
-.section sm2fa
-.section sm2fb
-.section sm2ga
-.section sm2gb
-.section sm2ha
-.section sm2hb
-.section sm2ia
-.section sm2ib
-.section sm2ja
-.section sm2jb
-.section sm2ka
-.section sm2kb
-.section sm2la
-.section sm2lb
-.section sm2ma
-.section sm2mb
-.section sm2na
-.section sm2nb
-.section sm2oa
-.section sm2ob
-.section sm2pa
-.section sm2pb
-.section sm2qa
-.section sm2qb
-.section sm2ra
-.section sm2rb
-.section sm2sa
-.section sm2sb
-.section sm2ta
-.section sm2tb
-.section sm2ua
-.section sm2ub
-.section sm2va
-.section sm2vb
-.section sm2wa
-.section sm2wb
-.section sm2xa
-.section sm2xb
-.section sm2ya
-.section sm2yb
-.section sm2za
-.section sm2zb
-.section sm21a
-.section sm21b
-.section sm22a
-.section sm22b
-.section sm23a
-.section sm23b
-.section sm24a
-.section sm24b
-.section sm25a
-.section sm25b
-.section sm26a
-.section sm26b
-.section sm27a
-.section sm27b
-.section sm28a
-.section sm28b
-.section sm29a
-.section sm29b
-.section sm20a
-.section sm20b
-.section sm3aa
-.section sm3ab
-.section sm3ba
-.section sm3bb
-.section sm3ca
-.section sm3cb
-.section sm3da
-.section sm3db
-.section sm3ea
-.section sm3eb
-.section sm3fa
-.section sm3fb
-.section sm3ga
-.section sm3gb
-.section sm3ha
-.section sm3hb
-.section sm3ia
-.section sm3ib
-.section sm3ja
-.section sm3jb
-.section sm3ka
-.section sm3kb
-.section sm3la
-.section sm3lb
-.section sm3ma
-.section sm3mb
-.section sm3na
-.section sm3nb
-.section sm3oa
-.section sm3ob
-.section sm3pa
-.section sm3pb
-.section sm3qa
-.section sm3qb
-.section sm3ra
-.section sm3rb
-.section sm3sa
-.section sm3sb
-.section sm3ta
-.section sm3tb
-.section sm3ua
-.section sm3ub
-.section sm3va
-.section sm3vb
-.section sm3wa
-.section sm3wb
-.section sm3xa
-.section sm3xb
-.section sm3ya
-.section sm3yb
-.section sm3za
-.section sm3zb
-.section sm31a
-.section sm31b
-.section sm32a
-.section sm32b
-.section sm33a
-.section sm33b
-.section sm34a
-.section sm34b
-.section sm35a
-.section sm35b
-.section sm36a
-.section sm36b
-.section sm37a
-.section sm37b
-.section sm38a
-.section sm38b
-.section sm39a
-.section sm39b
-.section sm30a
-.section sm30b
-.section sm4aa
-.section sm4ab
-.section sm4ba
-.section sm4bb
-.section sm4ca
-.section sm4cb
-.section sm4da
-.section sm4db
-.section sm4ea
-.section sm4eb
-.section sm4fa
-.section sm4fb
-.section sm4ga
-.section sm4gb
-.section sm4ha
-.section sm4hb
-.section sm4ia
-.section sm4ib
-.section sm4ja
-.section sm4jb
-.section sm4ka
-.section sm4kb
-.section sm4la
-.section sm4lb
-.section sm4ma
-.section sm4mb
-.section sm4na
-.section sm4nb
-.section sm4oa
-.section sm4ob
-.section sm4pa
-.section sm4pb
-.section sm4qa
-.section sm4qb
-.section sm4ra
-.section sm4rb
-.section sm4sa
-.section sm4sb
-.section sm4ta
-.section sm4tb
-.section sm4ua
-.section sm4ub
-.section sm4va
-.section sm4vb
-.section sm4wa
-.section sm4wb
-.section sm4xa
-.section sm4xb
-.section sm4ya
-.section sm4yb
-.section sm4za
-.section sm4zb
-.section sm41a
-.section sm41b
-.section sm42a
-.section sm42b
-.section sm43a
-.section sm43b
-.section sm44a
-.section sm44b
-.section sm45a
-.section sm45b
-.section sm46a
-.section sm46b
-.section sm47a
-.section sm47b
-.section sm48a
-.section sm48b
-.section sm49a
-.section sm49b
-.section sm40a
-.section sm40b
-.section sm5aa
-.section sm5ab
-.section sm5ba
-.section sm5bb
-.section sm5ca
-.section sm5cb
-.section sm5da
-.section sm5db
-.section sm5ea
-.section sm5eb
-.section sm5fa
-.section sm5fb
-.section sm5ga
-.section sm5gb
-.section sm5ha
-.section sm5hb
-.section sm5ia
-.section sm5ib
-.section sm5ja
-.section sm5jb
-.section sm5ka
-.section sm5kb
-.section sm5la
-.section sm5lb
-.section sm5ma
-.section sm5mb
-.section sm5na
-.section sm5nb
-.section sm5oa
-.section sm5ob
-.section sm5pa
-.section sm5pb
-.section sm5qa
-.section sm5qb
-.section sm5ra
-.section sm5rb
-.section sm5sa
-.section sm5sb
-.section sm5ta
-.section sm5tb
-.section sm5ua
-.section sm5ub
-.section sm5va
-.section sm5vb
-.section sm5wa
-.section sm5wb
-.section sm5xa
-.section sm5xb
-.section sm5ya
-.section sm5yb
-.section sm5za
-.section sm5zb
-.section sm51a
-.section sm51b
-.section sm52a
-.section sm52b
-.section sm53a
-.section sm53b
-.section sm54a
-.section sm54b
-.section sm55a
-.section sm55b
-.section sm56a
-.section sm56b
-.section sm57a
-.section sm57b
-.section sm58a
-.section sm58b
-.section sm59a
-.section sm59b
-.section sm50a
-.section sm50b
-.section sm6aa
-.section sm6ab
-.section sm6ba
-.section sm6bb
-.section sm6ca
-.section sm6cb
-.section sm6da
-.section sm6db
-.section sm6ea
-.section sm6eb
-.section sm6fa
-.section sm6fb
-.section sm6ga
-.section sm6gb
-.section sm6ha
-.section sm6hb
-.section sm6ia
-.section sm6ib
-.section sm6ja
-.section sm6jb
-.section sm6ka
-.section sm6kb
-.section sm6la
-.section sm6lb
-.section sm6ma
-.section sm6mb
-.section sm6na
-.section sm6nb
-.section sm6oa
-.section sm6ob
-.section sm6pa
-.section sm6pb
-.section sm6qa
-.section sm6qb
-.section sm6ra
-.section sm6rb
-.section sm6sa
-.section sm6sb
-.section sm6ta
-.section sm6tb
-.section sm6ua
-.section sm6ub
-.section sm6va
-.section sm6vb
-.section sm6wa
-.section sm6wb
-.section sm6xa
-.section sm6xb
-.section sm6ya
-.section sm6yb
-.section sm6za
-.section sm6zb
-.section sm61a
-.section sm61b
-.section sm62a
-.section sm62b
-.section sm63a
-.section sm63b
-.section sm64a
-.section sm64b
-.section sm65a
-.section sm65b
-.section sm66a
-.section sm66b
-.section sm67a
-.section sm67b
-.section sm68a
-.section sm68b
-.section sm69a
-.section sm69b
-.section sm60a
-.section sm60b
-.section sm7aa
-.section sm7ab
-.section sm7ba
-.section sm7bb
-.section sm7ca
-.section sm7cb
-.section sm7da
-.section sm7db
-.section sm7ea
-.section sm7eb
-.section sm7fa
-.section sm7fb
-.section sm7ga
-.section sm7gb
-.section sm7ha
-.section sm7hb
-.section sm7ia
-.section sm7ib
-.section sm7ja
-.section sm7jb
-.section sm7ka
-.section sm7kb
-.section sm7la
-.section sm7lb
-.section sm7ma
-.section sm7mb
-.section sm7na
-.section sm7nb
-.section sm7oa
-.section sm7ob
-.section sm7pa
-.section sm7pb
-.section sm7qa
-.section sm7qb
-.section sm7ra
-.section sm7rb
-.section sm7sa
-.section sm7sb
-.section sm7ta
-.section sm7tb
-.section sm7ua
-.section sm7ub
-.section sm7va
-.section sm7vb
-.section sm7wa
-.section sm7wb
-.section sm7xa
-.section sm7xb
-.section sm7ya
-.section sm7yb
-.section sm7za
-.section sm7zb
-.section sm71a
-.section sm71b
-.section sm72a
-.section sm72b
-.section sm73a
-.section sm73b
-.section sm74a
-.section sm74b
-.section sm75a
-.section sm75b
-.section sm76a
-.section sm76b
-.section sm77a
-.section sm77b
-.section sm78a
-.section sm78b
-.section sm79a
-.section sm79b
-.section sm70a
-.section sm70b
-.section sm8aa
-.section sm8ab
-.section sm8ba
-.section sm8bb
-.section sm8ca
-.section sm8cb
-.section sm8da
-.section sm8db
-.section sm8ea
-.section sm8eb
-.section sm8fa
-.section sm8fb
-.section sm8ga
-.section sm8gb
-.section sm8ha
-.section sm8hb
-.section sm8ia
-.section sm8ib
-.section sm8ja
-.section sm8jb
-.section sm8ka
-.section sm8kb
-.section sm8la
-.section sm8lb
-.section sm8ma
-.section sm8mb
-.section sm8na
-.section sm8nb
-.section sm8oa
-.section sm8ob
-.section sm8pa
-.section sm8pb
-.section sm8qa
-.section sm8qb
-.section sm8ra
-.section sm8rb
-.section sm8sa
-.section sm8sb
-.section sm8ta
-.section sm8tb
-.section sm8ua
-.section sm8ub
-.section sm8va
-.section sm8vb
-.section sm8wa
-.section sm8wb
-.section sm8xa
-.section sm8xb
-.section sm8ya
-.section sm8yb
-.section sm8za
-.section sm8zb
-.section sm81a
-.section sm81b
-.section sm82a
-.section sm82b
-.section sm83a
-.section sm83b
-.section sm84a
-.section sm84b
-.section sm85a
-.section sm85b
-.section sm86a
-.section sm86b
-.section sm87a
-.section sm87b
-.section sm88a
-.section sm88b
-.section sm89a
-.section sm89b
-.section sm80a
-.section sm80b
-.section sm9aa
-.section sm9ab
-.section sm9ba
-.section sm9bb
-.section sm9ca
-.section sm9cb
-.section sm9da
-.section sm9db
-.section sm9ea
-.section sm9eb
-.section sm9fa
-.section sm9fb
-.section sm9ga
-.section sm9gb
-.section sm9ha
-.section sm9hb
-.section sm9ia
-.section sm9ib
-.section sm9ja
-.section sm9jb
-.section sm9ka
-.section sm9kb
-.section sm9la
-.section sm9lb
-.section sm9ma
-.section sm9mb
-.section sm9na
-.section sm9nb
-.section sm9oa
-.section sm9ob
-.section sm9pa
-.section sm9pb
-.section sm9qa
-.section sm9qb
-.section sm9ra
-.section sm9rb
-.section sm9sa
-.section sm9sb
-.section sm9ta
-.section sm9tb
-.section sm9ua
-.section sm9ub
-.section sm9va
-.section sm9vb
-.section sm9wa
-.section sm9wb
-.section sm9xa
-.section sm9xb
-.section sm9ya
-.section sm9yb
-.section sm9za
-.section sm9zb
-.section sm91a
-.section sm91b
-.section sm92a
-.section sm92b
-.section sm93a
-.section sm93b
-.section sm94a
-.section sm94b
-.section sm95a
-.section sm95b
-.section sm96a
-.section sm96b
-.section sm97a
-.section sm97b
-.section sm98a
-.section sm98b
-.section sm99a
-.section sm99b
-.section sm90a
-.section sm90b
-.section sm0aa
-.section sm0ab
-.section sm0ba
-.section sm0bb
-.section sm0ca
-.section sm0cb
-.section sm0da
-.section sm0db
-.section sm0ea
-.section sm0eb
-.section sm0fa
-.section sm0fb
-.section sm0ga
-.section sm0gb
-.section sm0ha
-.section sm0hb
-.section sm0ia
-.section sm0ib
-.section sm0ja
-.section sm0jb
-.section sm0ka
-.section sm0kb
-.section sm0la
-.section sm0lb
-.section sm0ma
-.section sm0mb
-.section sm0na
-.section sm0nb
-.section sm0oa
-.section sm0ob
-.section sm0pa
-.section sm0pb
-.section sm0qa
-.section sm0qb
-.section sm0ra
-.section sm0rb
-.section sm0sa
-.section sm0sb
-.section sm0ta
-.section sm0tb
-.section sm0ua
-.section sm0ub
-.section sm0va
-.section sm0vb
-.section sm0wa
-.section sm0wb
-.section sm0xa
-.section sm0xb
-.section sm0ya
-.section sm0yb
-.section sm0za
-.section sm0zb
-.section sm01a
-.section sm01b
-.section sm02a
-.section sm02b
-.section sm03a
-.section sm03b
-.section sm04a
-.section sm04b
-.section sm05a
-.section sm05b
-.section sm06a
-.section sm06b
-.section sm07a
-.section sm07b
-.section sm08a
-.section sm08b
-.section sm09a
-.section sm09b
-.section sm00a
-.section sm00b
-.section snaaa
-.section snaab
-.section snaba
-.section snabb
-.section snaca
-.section snacb
-.section snada
-.section snadb
-.section snaea
-.section snaeb
-.section snafa
-.section snafb
-.section snaga
-.section snagb
-.section snaha
-.section snahb
-.section snaia
-.section snaib
-.section snaja
-.section snajb
-.section snaka
-.section snakb
-.section snala
-.section snalb
-.section snama
-.section snamb
-.section snana
-.section snanb
-.section snaoa
-.section snaob
-.section snapa
-.section snapb
-.section snaqa
-.section snaqb
-.section snara
-.section snarb
-.section snasa
-.section snasb
-.section snata
-.section snatb
-.section snaua
-.section snaub
-.section snava
-.section snavb
-.section snawa
-.section snawb
-.section snaxa
-.section snaxb
-.section snaya
-.section snayb
-.section snaza
-.section snazb
-.section sna1a
-.section sna1b
-.section sna2a
-.section sna2b
-.section sna3a
-.section sna3b
-.section sna4a
-.section sna4b
-.section sna5a
-.section sna5b
-.section sna6a
-.section sna6b
-.section sna7a
-.section sna7b
-.section sna8a
-.section sna8b
-.section sna9a
-.section sna9b
-.section sna0a
-.section sna0b
-.section snbaa
-.section snbab
-.section snbba
-.section snbbb
-.section snbca
-.section snbcb
-.section snbda
-.section snbdb
-.section snbea
-.section snbeb
-.section snbfa
-.section snbfb
-.section snbga
-.section snbgb
-.section snbha
-.section snbhb
-.section snbia
-.section snbib
-.section snbja
-.section snbjb
-.section snbka
-.section snbkb
-.section snbla
-.section snblb
-.section snbma
-.section snbmb
-.section snbna
-.section snbnb
-.section snboa
-.section snbob
-.section snbpa
-.section snbpb
-.section snbqa
-.section snbqb
-.section snbra
-.section snbrb
-.section snbsa
-.section snbsb
-.section snbta
-.section snbtb
-.section snbua
-.section snbub
-.section snbva
-.section snbvb
-.section snbwa
-.section snbwb
-.section snbxa
-.section snbxb
-.section snbya
-.section snbyb
-.section snbza
-.section snbzb
-.section snb1a
-.section snb1b
-.section snb2a
-.section snb2b
-.section snb3a
-.section snb3b
-.section snb4a
-.section snb4b
-.section snb5a
-.section snb5b
-.section snb6a
-.section snb6b
-.section snb7a
-.section snb7b
-.section snb8a
-.section snb8b
-.section snb9a
-.section snb9b
-.section snb0a
-.section snb0b
-.section sncaa
-.section sncab
-.section sncba
-.section sncbb
-.section sncca
-.section snccb
-.section sncda
-.section sncdb
-.section sncea
-.section snceb
-.section sncfa
-.section sncfb
-.section sncga
-.section sncgb
-.section sncha
-.section snchb
-.section sncia
-.section sncib
-.section sncja
-.section sncjb
-.section sncka
-.section snckb
-.section sncla
-.section snclb
-.section sncma
-.section sncmb
-.section sncna
-.section sncnb
-.section sncoa
-.section sncob
-.section sncpa
-.section sncpb
-.section sncqa
-.section sncqb
-.section sncra
-.section sncrb
-.section sncsa
-.section sncsb
-.section sncta
-.section snctb
-.section sncua
-.section sncub
-.section sncva
-.section sncvb
-.section sncwa
-.section sncwb
-.section sncxa
-.section sncxb
-.section sncya
-.section sncyb
-.section sncza
-.section snczb
-.section snc1a
-.section snc1b
-.section snc2a
-.section snc2b
-.section snc3a
-.section snc3b
-.section snc4a
-.section snc4b
-.section snc5a
-.section snc5b
-.section snc6a
-.section snc6b
-.section snc7a
-.section snc7b
-.section snc8a
-.section snc8b
-.section snc9a
-.section snc9b
-.section snc0a
-.section snc0b
-.section sndaa
-.section sndab
-.section sndba
-.section sndbb
-.section sndca
-.section sndcb
-.section sndda
-.section snddb
-.section sndea
-.section sndeb
-.section sndfa
-.section sndfb
-.section sndga
-.section sndgb
-.section sndha
-.section sndhb
-.section sndia
-.section sndib
-.section sndja
-.section sndjb
-.section sndka
-.section sndkb
-.section sndla
-.section sndlb
-.section sndma
-.section sndmb
-.section sndna
-.section sndnb
-.section sndoa
-.section sndob
-.section sndpa
-.section sndpb
-.section sndqa
-.section sndqb
-.section sndra
-.section sndrb
-.section sndsa
-.section sndsb
-.section sndta
-.section sndtb
-.section sndua
-.section sndub
-.section sndva
-.section sndvb
-.section sndwa
-.section sndwb
-.section sndxa
-.section sndxb
-.section sndya
-.section sndyb
-.section sndza
-.section sndzb
-.section snd1a
-.section snd1b
-.section snd2a
-.section snd2b
-.section snd3a
-.section snd3b
-.section snd4a
-.section snd4b
-.section snd5a
-.section snd5b
-.section snd6a
-.section snd6b
-.section snd7a
-.section snd7b
-.section snd8a
-.section snd8b
-.section snd9a
-.section snd9b
-.section snd0a
-.section snd0b
-.section sneaa
-.section sneab
-.section sneba
-.section snebb
-.section sneca
-.section snecb
-.section sneda
-.section snedb
-.section sneea
-.section sneeb
-.section snefa
-.section snefb
-.section snega
-.section snegb
-.section sneha
-.section snehb
-.section sneia
-.section sneib
-.section sneja
-.section snejb
-.section sneka
-.section snekb
-.section snela
-.section snelb
-.section snema
-.section snemb
-.section snena
-.section snenb
-.section sneoa
-.section sneob
-.section snepa
-.section snepb
-.section sneqa
-.section sneqb
-.section snera
-.section snerb
-.section snesa
-.section snesb
-.section sneta
-.section snetb
-.section sneua
-.section sneub
-.section sneva
-.section snevb
-.section snewa
-.section snewb
-.section snexa
-.section snexb
-.section sneya
-.section sneyb
-.section sneza
-.section snezb
-.section sne1a
-.section sne1b
-.section sne2a
-.section sne2b
-.section sne3a
-.section sne3b
-.section sne4a
-.section sne4b
-.section sne5a
-.section sne5b
-.section sne6a
-.section sne6b
-.section sne7a
-.section sne7b
-.section sne8a
-.section sne8b
-.section sne9a
-.section sne9b
-.section sne0a
-.section sne0b
-.section snfaa
-.section snfab
-.section snfba
-.section snfbb
-.section snfca
-.section snfcb
-.section snfda
-.section snfdb
-.section snfea
-.section snfeb
-.section snffa
-.section snffb
-.section snfga
-.section snfgb
-.section snfha
-.section snfhb
-.section snfia
-.section snfib
-.section snfja
-.section snfjb
-.section snfka
-.section snfkb
-.section snfla
-.section snflb
-.section snfma
-.section snfmb
-.section snfna
-.section snfnb
-.section snfoa
-.section snfob
-.section snfpa
-.section snfpb
-.section snfqa
-.section snfqb
-.section snfra
-.section snfrb
-.section snfsa
-.section snfsb
-.section snfta
-.section snftb
-.section snfua
-.section snfub
-.section snfva
-.section snfvb
-.section snfwa
-.section snfwb
-.section snfxa
-.section snfxb
-.section snfya
-.section snfyb
-.section snfza
-.section snfzb
-.section snf1a
-.section snf1b
-.section snf2a
-.section snf2b
-.section snf3a
-.section snf3b
-.section snf4a
-.section snf4b
-.section snf5a
-.section snf5b
-.section snf6a
-.section snf6b
-.section snf7a
-.section snf7b
-.section snf8a
-.section snf8b
-.section snf9a
-.section snf9b
-.section snf0a
-.section snf0b
-.section sngaa
-.section sngab
-.section sngba
-.section sngbb
-.section sngca
-.section sngcb
-.section sngda
-.section sngdb
-.section sngea
-.section sngeb
-.section sngfa
-.section sngfb
-.section sngga
-.section snggb
-.section sngha
-.section snghb
-.section sngia
-.section sngib
-.section sngja
-.section sngjb
-.section sngka
-.section sngkb
-.section sngla
-.section snglb
-.section sngma
-.section sngmb
-.section sngna
-.section sngnb
-.section sngoa
-.section sngob
-.section sngpa
-.section sngpb
-.section sngqa
-.section sngqb
-.section sngra
-.section sngrb
-.section sngsa
-.section sngsb
-.section sngta
-.section sngtb
-.section sngua
-.section sngub
-.section sngva
-.section sngvb
-.section sngwa
-.section sngwb
-.section sngxa
-.section sngxb
-.section sngya
-.section sngyb
-.section sngza
-.section sngzb
-.section sng1a
-.section sng1b
-.section sng2a
-.section sng2b
-.section sng3a
-.section sng3b
-.section sng4a
-.section sng4b
-.section sng5a
-.section sng5b
-.section sng6a
-.section sng6b
-.section sng7a
-.section sng7b
-.section sng8a
-.section sng8b
-.section sng9a
-.section sng9b
-.section sng0a
-.section sng0b
-.section snhaa
-.section snhab
-.section snhba
-.section snhbb
-.section snhca
-.section snhcb
-.section snhda
-.section snhdb
-.section snhea
-.section snheb
-.section snhfa
-.section snhfb
-.section snhga
-.section snhgb
-.section snhha
-.section snhhb
-.section snhia
-.section snhib
-.section snhja
-.section snhjb
-.section snhka
-.section snhkb
-.section snhla
-.section snhlb
-.section snhma
-.section snhmb
-.section snhna
-.section snhnb
-.section snhoa
-.section snhob
-.section snhpa
-.section snhpb
-.section snhqa
-.section snhqb
-.section snhra
-.section snhrb
-.section snhsa
-.section snhsb
-.section snhta
-.section snhtb
-.section snhua
-.section snhub
-.section snhva
-.section snhvb
-.section snhwa
-.section snhwb
-.section snhxa
-.section snhxb
-.section snhya
-.section snhyb
-.section snhza
-.section snhzb
-.section snh1a
-.section snh1b
-.section snh2a
-.section snh2b
-.section snh3a
-.section snh3b
-.section snh4a
-.section snh4b
-.section snh5a
-.section snh5b
-.section snh6a
-.section snh6b
-.section snh7a
-.section snh7b
-.section snh8a
-.section snh8b
-.section snh9a
-.section snh9b
-.section snh0a
-.section snh0b
-.section sniaa
-.section sniab
-.section sniba
-.section snibb
-.section snica
-.section snicb
-.section snida
-.section snidb
-.section sniea
-.section snieb
-.section snifa
-.section snifb
-.section sniga
-.section snigb
-.section sniha
-.section snihb
-.section sniia
-.section sniib
-.section snija
-.section snijb
-.section snika
-.section snikb
-.section snila
-.section snilb
-.section snima
-.section snimb
-.section snina
-.section sninb
-.section snioa
-.section sniob
-.section snipa
-.section snipb
-.section sniqa
-.section sniqb
-.section snira
-.section snirb
-.section snisa
-.section snisb
-.section snita
-.section snitb
-.section sniua
-.section sniub
-.section sniva
-.section snivb
-.section sniwa
-.section sniwb
-.section snixa
-.section snixb
-.section sniya
-.section sniyb
-.section sniza
-.section snizb
-.section sni1a
-.section sni1b
-.section sni2a
-.section sni2b
-.section sni3a
-.section sni3b
-.section sni4a
-.section sni4b
-.section sni5a
-.section sni5b
-.section sni6a
-.section sni6b
-.section sni7a
-.section sni7b
-.section sni8a
-.section sni8b
-.section sni9a
-.section sni9b
-.section sni0a
-.section sni0b
-.section snjaa
-.section snjab
-.section snjba
-.section snjbb
-.section snjca
-.section snjcb
-.section snjda
-.section snjdb
-.section snjea
-.section snjeb
-.section snjfa
-.section snjfb
-.section snjga
-.section snjgb
-.section snjha
-.section snjhb
-.section snjia
-.section snjib
-.section snjja
-.section snjjb
-.section snjka
-.section snjkb
-.section snjla
-.section snjlb
-.section snjma
-.section snjmb
-.section snjna
-.section snjnb
-.section snjoa
-.section snjob
-.section snjpa
-.section snjpb
-.section snjqa
-.section snjqb
-.section snjra
-.section snjrb
-.section snjsa
-.section snjsb
-.section snjta
-.section snjtb
-.section snjua
-.section snjub
-.section snjva
-.section snjvb
-.section snjwa
-.section snjwb
-.section snjxa
-.section snjxb
-.section snjya
-.section snjyb
-.section snjza
-.section snjzb
-.section snj1a
-.section snj1b
-.section snj2a
-.section snj2b
-.section snj3a
-.section snj3b
-.section snj4a
-.section snj4b
-.section snj5a
-.section snj5b
-.section snj6a
-.section snj6b
-.section snj7a
-.section snj7b
-.section snj8a
-.section snj8b
-.section snj9a
-.section snj9b
-.section snj0a
-.section snj0b
-.section snkaa
-.section snkab
-.section snkba
-.section snkbb
-.section snkca
-.section snkcb
-.section snkda
-.section snkdb
-.section snkea
-.section snkeb
-.section snkfa
-.section snkfb
-.section snkga
-.section snkgb
-.section snkha
-.section snkhb
-.section snkia
-.section snkib
-.section snkja
-.section snkjb
-.section snkka
-.section snkkb
-.section snkla
-.section snklb
-.section snkma
-.section snkmb
-.section snkna
-.section snknb
-.section snkoa
-.section snkob
-.section snkpa
-.section snkpb
-.section snkqa
-.section snkqb
-.section snkra
-.section snkrb
-.section snksa
-.section snksb
-.section snkta
-.section snktb
-.section snkua
-.section snkub
-.section snkva
-.section snkvb
-.section snkwa
-.section snkwb
-.section snkxa
-.section snkxb
-.section snkya
-.section snkyb
-.section snkza
-.section snkzb
-.section snk1a
-.section snk1b
-.section snk2a
-.section snk2b
-.section snk3a
-.section snk3b
-.section snk4a
-.section snk4b
-.section snk5a
-.section snk5b
-.section snk6a
-.section snk6b
-.section snk7a
-.section snk7b
-.section snk8a
-.section snk8b
-.section snk9a
-.section snk9b
-.section snk0a
-.section snk0b
-.section snlaa
-.section snlab
-.section snlba
-.section snlbb
-.section snlca
-.section snlcb
-.section snlda
-.section snldb
-.section snlea
-.section snleb
-.section snlfa
-.section snlfb
-.section snlga
-.section snlgb
-.section snlha
-.section snlhb
-.section snlia
-.section snlib
-.section snlja
-.section snljb
-.section snlka
-.section snlkb
-.section snlla
-.section snllb
-.section snlma
-.section snlmb
-.section snlna
-.section snlnb
-.section snloa
-.section snlob
-.section snlpa
-.section snlpb
-.section snlqa
-.section snlqb
-.section snlra
-.section snlrb
-.section snlsa
-.section snlsb
-.section snlta
-.section snltb
-.section snlua
-.section snlub
-.section snlva
-.section snlvb
-.section snlwa
-.section snlwb
-.section snlxa
-.section snlxb
-.section snlya
-.section snlyb
-.section snlza
-.section snlzb
-.section snl1a
-.section snl1b
-.section snl2a
-.section snl2b
-.section snl3a
-.section snl3b
-.section snl4a
-.section snl4b
-.section snl5a
-.section snl5b
-.section snl6a
-.section snl6b
-.section snl7a
-.section snl7b
-.section snl8a
-.section snl8b
-.section snl9a
-.section snl9b
-.section snl0a
-.section snl0b
-.section snmaa
-.section snmab
-.section snmba
-.section snmbb
-.section snmca
-.section snmcb
-.section snmda
-.section snmdb
-.section snmea
-.section snmeb
-.section snmfa
-.section snmfb
-.section snmga
-.section snmgb
-.section snmha
-.section snmhb
-.section snmia
-.section snmib
-.section snmja
-.section snmjb
-.section snmka
-.section snmkb
-.section snmla
-.section snmlb
-.section snmma
-.section snmmb
-.section snmna
-.section snmnb
-.section snmoa
-.section snmob
-.section snmpa
-.section snmpb
-.section snmqa
-.section snmqb
-.section snmra
-.section snmrb
-.section snmsa
-.section snmsb
-.section snmta
-.section snmtb
-.section snmua
-.section snmub
-.section snmva
-.section snmvb
-.section snmwa
-.section snmwb
-.section snmxa
-.section snmxb
-.section snmya
-.section snmyb
-.section snmza
-.section snmzb
-.section snm1a
-.section snm1b
-.section snm2a
-.section snm2b
-.section snm3a
-.section snm3b
-.section snm4a
-.section snm4b
-.section snm5a
-.section snm5b
-.section snm6a
-.section snm6b
-.section snm7a
-.section snm7b
-.section snm8a
-.section snm8b
-.section snm9a
-.section snm9b
-.section snm0a
-.section snm0b
-.section snnaa
-.section snnab
-.section snnba
-.section snnbb
-.section snnca
-.section snncb
-.section snnda
-.section snndb
-.section snnea
-.section snneb
-.section snnfa
-.section snnfb
-.section snnga
-.section snngb
-.section snnha
-.section snnhb
-.section snnia
-.section snnib
-.section snnja
-.section snnjb
-.section snnka
-.section snnkb
-.section snnla
-.section snnlb
-.section snnma
-.section snnmb
-.section snnna
-.section snnnb
-.section snnoa
-.section snnob
-.section snnpa
-.section snnpb
-.section snnqa
-.section snnqb
-.section snnra
-.section snnrb
-.section snnsa
-.section snnsb
-.section snnta
-.section snntb
-.section snnua
-.section snnub
-.section snnva
-.section snnvb
-.section snnwa
-.section snnwb
-.section snnxa
-.section snnxb
-.section snnya
-.section snnyb
-.section snnza
-.section snnzb
-.section snn1a
-.section snn1b
-.section snn2a
-.section snn2b
-.section snn3a
-.section snn3b
-.section snn4a
-.section snn4b
-.section snn5a
-.section snn5b
-.section snn6a
-.section snn6b
-.section snn7a
-.section snn7b
-.section snn8a
-.section snn8b
-.section snn9a
-.section snn9b
-.section snn0a
-.section snn0b
-.section snoaa
-.section snoab
-.section snoba
-.section snobb
-.section snoca
-.section snocb
-.section snoda
-.section snodb
-.section snoea
-.section snoeb
-.section snofa
-.section snofb
-.section snoga
-.section snogb
-.section snoha
-.section snohb
-.section snoia
-.section snoib
-.section snoja
-.section snojb
-.section snoka
-.section snokb
-.section snola
-.section snolb
-.section snoma
-.section snomb
-.section snona
-.section snonb
-.section snooa
-.section snoob
-.section snopa
-.section snopb
-.section snoqa
-.section snoqb
-.section snora
-.section snorb
-.section snosa
-.section snosb
-.section snota
-.section snotb
-.section snoua
-.section snoub
-.section snova
-.section snovb
-.section snowa
-.section snowb
-.section snoxa
-.section snoxb
-.section snoya
-.section snoyb
-.section snoza
-.section snozb
-.section sno1a
-.section sno1b
-.section sno2a
-.section sno2b
-.section sno3a
-.section sno3b
-.section sno4a
-.section sno4b
-.section sno5a
-.section sno5b
-.section sno6a
-.section sno6b
-.section sno7a
-.section sno7b
-.section sno8a
-.section sno8b
-.section sno9a
-.section sno9b
-.section sno0a
-.section sno0b
-.section snpaa
-.section snpab
-.section snpba
-.section snpbb
-.section snpca
-.section snpcb
-.section snpda
-.section snpdb
-.section snpea
-.section snpeb
-.section snpfa
-.section snpfb
-.section snpga
-.section snpgb
-.section snpha
-.section snphb
-.section snpia
-.section snpib
-.section snpja
-.section snpjb
-.section snpka
-.section snpkb
-.section snpla
-.section snplb
-.section snpma
-.section snpmb
-.section snpna
-.section snpnb
-.section snpoa
-.section snpob
-.section snppa
-.section snppb
-.section snpqa
-.section snpqb
-.section snpra
-.section snprb
-.section snpsa
-.section snpsb
-.section snpta
-.section snptb
-.section snpua
-.section snpub
-.section snpva
-.section snpvb
-.section snpwa
-.section snpwb
-.section snpxa
-.section snpxb
-.section snpya
-.section snpyb
-.section snpza
-.section snpzb
-.section snp1a
-.section snp1b
-.section snp2a
-.section snp2b
-.section snp3a
-.section snp3b
-.section snp4a
-.section snp4b
-.section snp5a
-.section snp5b
-.section snp6a
-.section snp6b
-.section snp7a
-.section snp7b
-.section snp8a
-.section snp8b
-.section snp9a
-.section snp9b
-.section snp0a
-.section snp0b
-.section snqaa
-.section snqab
-.section snqba
-.section snqbb
-.section snqca
-.section snqcb
-.section snqda
-.section snqdb
-.section snqea
-.section snqeb
-.section snqfa
-.section snqfb
-.section snqga
-.section snqgb
-.section snqha
-.section snqhb
-.section snqia
-.section snqib
-.section snqja
-.section snqjb
-.section snqka
-.section snqkb
-.section snqla
-.section snqlb
-.section snqma
-.section snqmb
-.section snqna
-.section snqnb
-.section snqoa
-.section snqob
-.section snqpa
-.section snqpb
-.section snqqa
-.section snqqb
-.section snqra
-.section snqrb
-.section snqsa
-.section snqsb
-.section snqta
-.section snqtb
-.section snqua
-.section snqub
-.section snqva
-.section snqvb
-.section snqwa
-.section snqwb
-.section snqxa
-.section snqxb
-.section snqya
-.section snqyb
-.section snqza
-.section snqzb
-.section snq1a
-.section snq1b
-.section snq2a
-.section snq2b
-.section snq3a
-.section snq3b
-.section snq4a
-.section snq4b
-.section snq5a
-.section snq5b
-.section snq6a
-.section snq6b
-.section snq7a
-.section snq7b
-.section snq8a
-.section snq8b
-.section snq9a
-.section snq9b
-.section snq0a
-.section snq0b
-.section snraa
-.section snrab
-.section snrba
-.section snrbb
-.section snrca
-.section snrcb
-.section snrda
-.section snrdb
-.section snrea
-.section snreb
-.section snrfa
-.section snrfb
-.section snrga
-.section snrgb
-.section snrha
-.section snrhb
-.section snria
-.section snrib
-.section snrja
-.section snrjb
-.section snrka
-.section snrkb
-.section snrla
-.section snrlb
-.section snrma
-.section snrmb
-.section snrna
-.section snrnb
-.section snroa
-.section snrob
-.section snrpa
-.section snrpb
-.section snrqa
-.section snrqb
-.section snrra
-.section snrrb
-.section snrsa
-.section snrsb
-.section snrta
-.section snrtb
-.section snrua
-.section snrub
-.section snrva
-.section snrvb
-.section snrwa
-.section snrwb
-.section snrxa
-.section snrxb
-.section snrya
-.section snryb
-.section snrza
-.section snrzb
-.section snr1a
-.section snr1b
-.section snr2a
-.section snr2b
-.section snr3a
-.section snr3b
-.section snr4a
-.section snr4b
-.section snr5a
-.section snr5b
-.section snr6a
-.section snr6b
-.section snr7a
-.section snr7b
-.section snr8a
-.section snr8b
-.section snr9a
-.section snr9b
-.section snr0a
-.section snr0b
-.section snsaa
-.section snsab
-.section snsba
-.section snsbb
-.section snsca
-.section snscb
-.section snsda
-.section snsdb
-.section snsea
-.section snseb
-.section snsfa
-.section snsfb
-.section snsga
-.section snsgb
-.section snsha
-.section snshb
-.section snsia
-.section snsib
-.section snsja
-.section snsjb
-.section snska
-.section snskb
-.section snsla
-.section snslb
-.section snsma
-.section snsmb
-.section snsna
-.section snsnb
-.section snsoa
-.section snsob
-.section snspa
-.section snspb
-.section snsqa
-.section snsqb
-.section snsra
-.section snsrb
-.section snssa
-.section snssb
-.section snsta
-.section snstb
-.section snsua
-.section snsub
-.section snsva
-.section snsvb
-.section snswa
-.section snswb
-.section snsxa
-.section snsxb
-.section snsya
-.section snsyb
-.section snsza
-.section snszb
-.section sns1a
-.section sns1b
-.section sns2a
-.section sns2b
-.section sns3a
-.section sns3b
-.section sns4a
-.section sns4b
-.section sns5a
-.section sns5b
-.section sns6a
-.section sns6b
-.section sns7a
-.section sns7b
-.section sns8a
-.section sns8b
-.section sns9a
-.section sns9b
-.section sns0a
-.section sns0b
-.section sntaa
-.section sntab
-.section sntba
-.section sntbb
-.section sntca
-.section sntcb
-.section sntda
-.section sntdb
-.section sntea
-.section snteb
-.section sntfa
-.section sntfb
-.section sntga
-.section sntgb
-.section sntha
-.section snthb
-.section sntia
-.section sntib
-.section sntja
-.section sntjb
-.section sntka
-.section sntkb
-.section sntla
-.section sntlb
-.section sntma
-.section sntmb
-.section sntna
-.section sntnb
-.section sntoa
-.section sntob
-.section sntpa
-.section sntpb
-.section sntqa
-.section sntqb
-.section sntra
-.section sntrb
-.section sntsa
-.section sntsb
-.section sntta
-.section snttb
-.section sntua
-.section sntub
-.section sntva
-.section sntvb
-.section sntwa
-.section sntwb
-.section sntxa
-.section sntxb
-.section sntya
-.section sntyb
-.section sntza
-.section sntzb
-.section snt1a
-.section snt1b
-.section snt2a
-.section snt2b
-.section snt3a
-.section snt3b
-.section snt4a
-.section snt4b
-.section snt5a
-.section snt5b
-.section snt6a
-.section snt6b
-.section snt7a
-.section snt7b
-.section snt8a
-.section snt8b
-.section snt9a
-.section snt9b
-.section snt0a
-.section snt0b
-.section snuaa
-.section snuab
-.section snuba
-.section snubb
-.section snuca
-.section snucb
-.section snuda
-.section snudb
-.section snuea
-.section snueb
-.section snufa
-.section snufb
-.section snuga
-.section snugb
-.section snuha
-.section snuhb
-.section snuia
-.section snuib
-.section snuja
-.section snujb
-.section snuka
-.section snukb
-.section snula
-.section snulb
-.section snuma
-.section snumb
-.section snuna
-.section snunb
-.section snuoa
-.section snuob
-.section snupa
-.section snupb
-.section snuqa
-.section snuqb
-.section snura
-.section snurb
-.section snusa
-.section snusb
-.section snuta
-.section snutb
-.section snuua
-.section snuub
-.section snuva
-.section snuvb
-.section snuwa
-.section snuwb
-.section snuxa
-.section snuxb
-.section snuya
-.section snuyb
-.section snuza
-.section snuzb
-.section snu1a
-.section snu1b
-.section snu2a
-.section snu2b
-.section snu3a
-.section snu3b
-.section snu4a
-.section snu4b
-.section snu5a
-.section snu5b
-.section snu6a
-.section snu6b
-.section snu7a
-.section snu7b
-.section snu8a
-.section snu8b
-.section snu9a
-.section snu9b
-.section snu0a
-.section snu0b
-.section snvaa
-.section snvab
-.section snvba
-.section snvbb
-.section snvca
-.section snvcb
-.section snvda
-.section snvdb
-.section snvea
-.section snveb
-.section snvfa
-.section snvfb
-.section snvga
-.section snvgb
-.section snvha
-.section snvhb
-.section snvia
-.section snvib
-.section snvja
-.section snvjb
-.section snvka
-.section snvkb
-.section snvla
-.section snvlb
-.section snvma
-.section snvmb
-.section snvna
-.section snvnb
-.section snvoa
-.section snvob
-.section snvpa
-.section snvpb
-.section snvqa
-.section snvqb
-.section snvra
-.section snvrb
-.section snvsa
-.section snvsb
-.section snvta
-.section snvtb
-.section snvua
-.section snvub
-.section snvva
-.section snvvb
-.section snvwa
-.section snvwb
-.section snvxa
-.section snvxb
-.section snvya
-.section snvyb
-.section snvza
-.section snvzb
-.section snv1a
-.section snv1b
-.section snv2a
-.section snv2b
-.section snv3a
-.section snv3b
-.section snv4a
-.section snv4b
-.section snv5a
-.section snv5b
-.section snv6a
-.section snv6b
-.section snv7a
-.section snv7b
-.section snv8a
-.section snv8b
-.section snv9a
-.section snv9b
-.section snv0a
-.section snv0b
-.section snwaa
-.section snwab
-.section snwba
-.section snwbb
-.section snwca
-.section snwcb
-.section snwda
-.section snwdb
-.section snwea
-.section snweb
-.section snwfa
-.section snwfb
-.section snwga
-.section snwgb
-.section snwha
-.section snwhb
-.section snwia
-.section snwib
-.section snwja
-.section snwjb
-.section snwka
-.section snwkb
-.section snwla
-.section snwlb
-.section snwma
-.section snwmb
-.section snwna
-.section snwnb
-.section snwoa
-.section snwob
-.section snwpa
-.section snwpb
-.section snwqa
-.section snwqb
-.section snwra
-.section snwrb
-.section snwsa
-.section snwsb
-.section snwta
-.section snwtb
-.section snwua
-.section snwub
-.section snwva
-.section snwvb
-.section snwwa
-.section snwwb
-.section snwxa
-.section snwxb
-.section snwya
-.section snwyb
-.section snwza
-.section snwzb
-.section snw1a
-.section snw1b
-.section snw2a
-.section snw2b
-.section snw3a
-.section snw3b
-.section snw4a
-.section snw4b
-.section snw5a
-.section snw5b
-.section snw6a
-.section snw6b
-.section snw7a
-.section snw7b
-.section snw8a
-.section snw8b
-.section snw9a
-.section snw9b
-.section snw0a
-.section snw0b
-.section snxaa
-.section snxab
-.section snxba
-.section snxbb
-.section snxca
-.section snxcb
-.section snxda
-.section snxdb
-.section snxea
-.section snxeb
-.section snxfa
-.section snxfb
-.section snxga
-.section snxgb
-.section snxha
-.section snxhb
-.section snxia
-.section snxib
-.section snxja
-.section snxjb
-.section snxka
-.section snxkb
-.section snxla
-.section snxlb
-.section snxma
-.section snxmb
-.section snxna
-.section snxnb
-.section snxoa
-.section snxob
-.section snxpa
-.section snxpb
-.section snxqa
-.section snxqb
-.section snxra
-.section snxrb
-.section snxsa
-.section snxsb
-.section snxta
-.section snxtb
-.section snxua
-.section snxub
-.section snxva
-.section snxvb
-.section snxwa
-.section snxwb
-.section snxxa
-.section snxxb
-.section snxya
-.section snxyb
-.section snxza
-.section snxzb
-.section snx1a
-.section snx1b
-.section snx2a
-.section snx2b
-.section snx3a
-.section snx3b
-.section snx4a
-.section snx4b
-.section snx5a
-.section snx5b
-.section snx6a
-.section snx6b
-.section snx7a
-.section snx7b
-.section snx8a
-.section snx8b
-.section snx9a
-.section snx9b
-.section snx0a
-.section snx0b
-.section snyaa
-.section snyab
-.section snyba
-.section snybb
-.section snyca
-.section snycb
-.section snyda
-.section snydb
-.section snyea
-.section snyeb
-.section snyfa
-.section snyfb
-.section snyga
-.section snygb
-.section snyha
-.section snyhb
-.section snyia
-.section snyib
-.section snyja
-.section snyjb
-.section snyka
-.section snykb
-.section snyla
-.section snylb
-.section snyma
-.section snymb
-.section snyna
-.section snynb
-.section snyoa
-.section snyob
-.section snypa
-.section snypb
-.section snyqa
-.section snyqb
-.section snyra
-.section snyrb
-.section snysa
-.section snysb
-.section snyta
-.section snytb
-.section snyua
-.section snyub
-.section snyva
-.section snyvb
-.section snywa
-.section snywb
-.section snyxa
-.section snyxb
-.section snyya
-.section snyyb
-.section snyza
-.section snyzb
-.section sny1a
-.section sny1b
-.section sny2a
-.section sny2b
-.section sny3a
-.section sny3b
-.section sny4a
-.section sny4b
-.section sny5a
-.section sny5b
-.section sny6a
-.section sny6b
-.section sny7a
-.section sny7b
-.section sny8a
-.section sny8b
-.section sny9a
-.section sny9b
-.section sny0a
-.section sny0b
-.section snzaa
-.section snzab
-.section snzba
-.section snzbb
-.section snzca
-.section snzcb
-.section snzda
-.section snzdb
-.section snzea
-.section snzeb
-.section snzfa
-.section snzfb
-.section snzga
-.section snzgb
-.section snzha
-.section snzhb
-.section snzia
-.section snzib
-.section snzja
-.section snzjb
-.section snzka
-.section snzkb
-.section snzla
-.section snzlb
-.section snzma
-.section snzmb
-.section snzna
-.section snznb
-.section snzoa
-.section snzob
-.section snzpa
-.section snzpb
-.section snzqa
-.section snzqb
-.section snzra
-.section snzrb
-.section snzsa
-.section snzsb
-.section snzta
-.section snztb
-.section snzua
-.section snzub
-.section snzva
-.section snzvb
-.section snzwa
-.section snzwb
-.section snzxa
-.section snzxb
-.section snzya
-.section snzyb
-.section snzza
-.section snzzb
-.section snz1a
-.section snz1b
-.section snz2a
-.section snz2b
-.section snz3a
-.section snz3b
-.section snz4a
-.section snz4b
-.section snz5a
-.section snz5b
-.section snz6a
-.section snz6b
-.section snz7a
-.section snz7b
-.section snz8a
-.section snz8b
-.section snz9a
-.section snz9b
-.section snz0a
-.section snz0b
-.section sn1aa
-.section sn1ab
-.section sn1ba
-.section sn1bb
-.section sn1ca
-.section sn1cb
-.section sn1da
-.section sn1db
-.section sn1ea
-.section sn1eb
-.section sn1fa
-.section sn1fb
-.section sn1ga
-.section sn1gb
-.section sn1ha
-.section sn1hb
-.section sn1ia
-.section sn1ib
-.section sn1ja
-.section sn1jb
-.section sn1ka
-.section sn1kb
-.section sn1la
-.section sn1lb
-.section sn1ma
-.section sn1mb
-.section sn1na
-.section sn1nb
-.section sn1oa
-.section sn1ob
-.section sn1pa
-.section sn1pb
-.section sn1qa
-.section sn1qb
-.section sn1ra
-.section sn1rb
-.section sn1sa
-.section sn1sb
-.section sn1ta
-.section sn1tb
-.section sn1ua
-.section sn1ub
-.section sn1va
-.section sn1vb
-.section sn1wa
-.section sn1wb
-.section sn1xa
-.section sn1xb
-.section sn1ya
-.section sn1yb
-.section sn1za
-.section sn1zb
-.section sn11a
-.section sn11b
-.section sn12a
-.section sn12b
-.section sn13a
-.section sn13b
-.section sn14a
-.section sn14b
-.section sn15a
-.section sn15b
-.section sn16a
-.section sn16b
-.section sn17a
-.section sn17b
-.section sn18a
-.section sn18b
-.section sn19a
-.section sn19b
-.section sn10a
-.section sn10b
-.section sn2aa
-.section sn2ab
-.section sn2ba
-.section sn2bb
-.section sn2ca
-.section sn2cb
-.section sn2da
-.section sn2db
-.section sn2ea
-.section sn2eb
-.section sn2fa
-.section sn2fb
-.section sn2ga
-.section sn2gb
-.section sn2ha
-.section sn2hb
-.section sn2ia
-.section sn2ib
-.section sn2ja
-.section sn2jb
-.section sn2ka
-.section sn2kb
-.section sn2la
-.section sn2lb
-.section sn2ma
-.section sn2mb
-.section sn2na
-.section sn2nb
-.section sn2oa
-.section sn2ob
-.section sn2pa
-.section sn2pb
-.section sn2qa
-.section sn2qb
-.section sn2ra
-.section sn2rb
-.section sn2sa
-.section sn2sb
-.section sn2ta
-.section sn2tb
-.section sn2ua
-.section sn2ub
-.section sn2va
-.section sn2vb
-.section sn2wa
-.section sn2wb
-.section sn2xa
-.section sn2xb
-.section sn2ya
-.section sn2yb
-.section sn2za
-.section sn2zb
-.section sn21a
-.section sn21b
-.section sn22a
-.section sn22b
-.section sn23a
-.section sn23b
-.section sn24a
-.section sn24b
-.section sn25a
-.section sn25b
-.section sn26a
-.section sn26b
-.section sn27a
-.section sn27b
-.section sn28a
-.section sn28b
-.section sn29a
-.section sn29b
-.section sn20a
-.section sn20b
-.section sn3aa
-.section sn3ab
-.section sn3ba
-.section sn3bb
-.section sn3ca
-.section sn3cb
-.section sn3da
-.section sn3db
-.section sn3ea
-.section sn3eb
-.section sn3fa
-.section sn3fb
-.section sn3ga
-.section sn3gb
-.section sn3ha
-.section sn3hb
-.section sn3ia
-.section sn3ib
-.section sn3ja
-.section sn3jb
-.section sn3ka
-.section sn3kb
-.section sn3la
-.section sn3lb
-.section sn3ma
-.section sn3mb
-.section sn3na
-.section sn3nb
-.section sn3oa
-.section sn3ob
-.section sn3pa
-.section sn3pb
-.section sn3qa
-.section sn3qb
-.section sn3ra
-.section sn3rb
-.section sn3sa
-.section sn3sb
-.section sn3ta
-.section sn3tb
-.section sn3ua
-.section sn3ub
-.section sn3va
-.section sn3vb
-.section sn3wa
-.section sn3wb
-.section sn3xa
-.section sn3xb
-.section sn3ya
-.section sn3yb
-.section sn3za
-.section sn3zb
-.section sn31a
-.section sn31b
-.section sn32a
-.section sn32b
-.section sn33a
-.section sn33b
-.section sn34a
-.section sn34b
-.section sn35a
-.section sn35b
-.section sn36a
-.section sn36b
-.section sn37a
-.section sn37b
-.section sn38a
-.section sn38b
-.section sn39a
-.section sn39b
-.section sn30a
-.section sn30b
-.section sn4aa
-.section sn4ab
-.section sn4ba
-.section sn4bb
-.section sn4ca
-.section sn4cb
-.section sn4da
-.section sn4db
-.section sn4ea
-.section sn4eb
-.section sn4fa
-.section sn4fb
-.section sn4ga
-.section sn4gb
-.section sn4ha
-.section sn4hb
-.section sn4ia
-.section sn4ib
-.section sn4ja
-.section sn4jb
-.section sn4ka
-.section sn4kb
-.section sn4la
-.section sn4lb
-.section sn4ma
-.section sn4mb
-.section sn4na
-.section sn4nb
-.section sn4oa
-.section sn4ob
-.section sn4pa
-.section sn4pb
-.section sn4qa
-.section sn4qb
-.section sn4ra
-.section sn4rb
-.section sn4sa
-.section sn4sb
-.section sn4ta
-.section sn4tb
-.section sn4ua
-.section sn4ub
-.section sn4va
-.section sn4vb
-.section sn4wa
-.section sn4wb
-.section sn4xa
-.section sn4xb
-.section sn4ya
-.section sn4yb
-.section sn4za
-.section sn4zb
-.section sn41a
-.section sn41b
-.section sn42a
-.section sn42b
-.section sn43a
-.section sn43b
-.section sn44a
-.section sn44b
-.section sn45a
-.section sn45b
-.section sn46a
-.section sn46b
-.section sn47a
-.section sn47b
-.section sn48a
-.section sn48b
-.section sn49a
-.section sn49b
-.section sn40a
-.section sn40b
-.section sn5aa
-.section sn5ab
-.section sn5ba
-.section sn5bb
-.section sn5ca
-.section sn5cb
-.section sn5da
-.section sn5db
-.section sn5ea
-.section sn5eb
-.section sn5fa
-.section sn5fb
-.section sn5ga
-.section sn5gb
-.section sn5ha
-.section sn5hb
-.section sn5ia
-.section sn5ib
-.section sn5ja
-.section sn5jb
-.section sn5ka
-.section sn5kb
-.section sn5la
-.section sn5lb
-.section sn5ma
-.section sn5mb
-.section sn5na
-.section sn5nb
-.section sn5oa
-.section sn5ob
-.section sn5pa
-.section sn5pb
-.section sn5qa
-.section sn5qb
-.section sn5ra
-.section sn5rb
-.section sn5sa
-.section sn5sb
-.section sn5ta
-.section sn5tb
-.section sn5ua
-.section sn5ub
-.section sn5va
-.section sn5vb
-.section sn5wa
-.section sn5wb
-.section sn5xa
-.section sn5xb
-.section sn5ya
-.section sn5yb
-.section sn5za
-.section sn5zb
-.section sn51a
-.section sn51b
-.section sn52a
-.section sn52b
-.section sn53a
-.section sn53b
-.section sn54a
-.section sn54b
-.section sn55a
-.section sn55b
-.section sn56a
-.section sn56b
-.section sn57a
-.section sn57b
-.section sn58a
-.section sn58b
-.section sn59a
-.section sn59b
-.section sn50a
-.section sn50b
-.section sn6aa
-.section sn6ab
-.section sn6ba
-.section sn6bb
-.section sn6ca
-.section sn6cb
-.section sn6da
-.section sn6db
-.section sn6ea
-.section sn6eb
-.section sn6fa
-.section sn6fb
-.section sn6ga
-.section sn6gb
-.section sn6ha
-.section sn6hb
-.section sn6ia
-.section sn6ib
-.section sn6ja
-.section sn6jb
-.section sn6ka
-.section sn6kb
-.section sn6la
-.section sn6lb
-.section sn6ma
-.section sn6mb
-.section sn6na
-.section sn6nb
-.section sn6oa
-.section sn6ob
-.section sn6pa
-.section sn6pb
-.section sn6qa
-.section sn6qb
-.section sn6ra
-.section sn6rb
-.section sn6sa
-.section sn6sb
-.section sn6ta
-.section sn6tb
-.section sn6ua
-.section sn6ub
-.section sn6va
-.section sn6vb
-.section sn6wa
-.section sn6wb
-.section sn6xa
-.section sn6xb
-.section sn6ya
-.section sn6yb
-.section sn6za
-.section sn6zb
-.section sn61a
-.section sn61b
-.section sn62a
-.section sn62b
-.section sn63a
-.section sn63b
-.section sn64a
-.section sn64b
-.section sn65a
-.section sn65b
-.section sn66a
-.section sn66b
-.section sn67a
-.section sn67b
-.section sn68a
-.section sn68b
-.section sn69a
-.section sn69b
-.section sn60a
-.section sn60b
-.section sn7aa
-.section sn7ab
-.section sn7ba
-.section sn7bb
-.section sn7ca
-.section sn7cb
-.section sn7da
-.section sn7db
-.section sn7ea
-.section sn7eb
-.section sn7fa
-.section sn7fb
-.section sn7ga
-.section sn7gb
-.section sn7ha
-.section sn7hb
-.section sn7ia
-.section sn7ib
-.section sn7ja
-.section sn7jb
-.section sn7ka
-.section sn7kb
-.section sn7la
-.section sn7lb
-.section sn7ma
-.section sn7mb
-.section sn7na
-.section sn7nb
-.section sn7oa
-.section sn7ob
-.section sn7pa
-.section sn7pb
-.section sn7qa
-.section sn7qb
-.section sn7ra
-.section sn7rb
-.section sn7sa
-.section sn7sb
-.section sn7ta
-.section sn7tb
-.section sn7ua
-.section sn7ub
-.section sn7va
-.section sn7vb
-.section sn7wa
-.section sn7wb
-.section sn7xa
-.section sn7xb
-.section sn7ya
-.section sn7yb
-.section sn7za
-.section sn7zb
-.section sn71a
-.section sn71b
-.section sn72a
-.section sn72b
-.section sn73a
-.section sn73b
-.section sn74a
-.section sn74b
-.section sn75a
-.section sn75b
-.section sn76a
-.section sn76b
-.section sn77a
-.section sn77b
-.section sn78a
-.section sn78b
-.section sn79a
-.section sn79b
-.section sn70a
-.section sn70b
-.section sn8aa
-.section sn8ab
-.section sn8ba
-.section sn8bb
-.section sn8ca
-.section sn8cb
-.section sn8da
-.section sn8db
-.section sn8ea
-.section sn8eb
-.section sn8fa
-.section sn8fb
-.section sn8ga
-.section sn8gb
-.section sn8ha
-.section sn8hb
-.section sn8ia
-.section sn8ib
-.section sn8ja
-.section sn8jb
-.section sn8ka
-.section sn8kb
-.section sn8la
-.section sn8lb
-.section sn8ma
-.section sn8mb
-.section sn8na
-.section sn8nb
-.section sn8oa
-.section sn8ob
-.section sn8pa
-.section sn8pb
-.section sn8qa
-.section sn8qb
-.section sn8ra
-.section sn8rb
-.section sn8sa
-.section sn8sb
-.section sn8ta
-.section sn8tb
-.section sn8ua
-.section sn8ub
-.section sn8va
-.section sn8vb
-.section sn8wa
-.section sn8wb
-.section sn8xa
-.section sn8xb
-.section sn8ya
-.section sn8yb
-.section sn8za
-.section sn8zb
-.section sn81a
-.section sn81b
-.section sn82a
-.section sn82b
-.section sn83a
-.section sn83b
-.section sn84a
-.section sn84b
-.section sn85a
-.section sn85b
-.section sn86a
-.section sn86b
-.section sn87a
-.section sn87b
-.section sn88a
-.section sn88b
-.section sn89a
-.section sn89b
-.section sn80a
-.section sn80b
-.section sn9aa
-.section sn9ab
-.section sn9ba
-.section sn9bb
-.section sn9ca
-.section sn9cb
-.section sn9da
-.section sn9db
-.section sn9ea
-.section sn9eb
-.section sn9fa
-.section sn9fb
-.section sn9ga
-.section sn9gb
-.section sn9ha
-.section sn9hb
-.section sn9ia
-.section sn9ib
-.section sn9ja
-.section sn9jb
-.section sn9ka
-.section sn9kb
-.section sn9la
-.section sn9lb
-.section sn9ma
-.section sn9mb
-.section sn9na
-.section sn9nb
-.section sn9oa
-.section sn9ob
-.section sn9pa
-.section sn9pb
-.section sn9qa
-.section sn9qb
-.section sn9ra
-.section sn9rb
-.section sn9sa
-.section sn9sb
-.section sn9ta
-.section sn9tb
-.section sn9ua
-.section sn9ub
-.section sn9va
-.section sn9vb
-.section sn9wa
-.section sn9wb
-.section sn9xa
-.section sn9xb
-.section sn9ya
-.section sn9yb
-.section sn9za
-.section sn9zb
-.section sn91a
-.section sn91b
-.section sn92a
-.section sn92b
-.section sn93a
-.section sn93b
-.section sn94a
-.section sn94b
-.section sn95a
-.section sn95b
-.section sn96a
-.section sn96b
-.section sn97a
-.section sn97b
-.section sn98a
-.section sn98b
-.section sn99a
-.section sn99b
-.section sn90a
-.section sn90b
-.section sn0aa
-.section sn0ab
-.section sn0ba
-.section sn0bb
-.section sn0ca
-.section sn0cb
-.section sn0da
-.section sn0db
-.section sn0ea
-.section sn0eb
-.section sn0fa
-.section sn0fb
-.section sn0ga
-.section sn0gb
-.section sn0ha
-.section sn0hb
-.section sn0ia
-.section sn0ib
-.section sn0ja
-.section sn0jb
-.section sn0ka
-.section sn0kb
-.section sn0la
-.section sn0lb
-.section sn0ma
-.section sn0mb
-.section sn0na
-.section sn0nb
-.section sn0oa
-.section sn0ob
-.section sn0pa
-.section sn0pb
-.section sn0qa
-.section sn0qb
-.section sn0ra
-.section sn0rb
-.section sn0sa
-.section sn0sb
-.section sn0ta
-.section sn0tb
-.section sn0ua
-.section sn0ub
-.section sn0va
-.section sn0vb
-.section sn0wa
-.section sn0wb
-.section sn0xa
-.section sn0xb
-.section sn0ya
-.section sn0yb
-.section sn0za
-.section sn0zb
-.section sn01a
-.section sn01b
-.section sn02a
-.section sn02b
-.section sn03a
-.section sn03b
-.section sn04a
-.section sn04b
-.section sn05a
-.section sn05b
-.section sn06a
-.section sn06b
-.section sn07a
-.section sn07b
-.section sn08a
-.section sn08b
-.section sn09a
-.section sn09b
-.section sn00a
-.section sn00b
-.section soaaa
-.section soaab
-.section soaba
-.section soabb
-.section soaca
-.section soacb
-.section soada
-.section soadb
-.section soaea
-.section soaeb
-.section soafa
-.section soafb
-.section soaga
-.section soagb
-.section soaha
-.section soahb
-.section soaia
-.section soaib
-.section soaja
-.section soajb
-.section soaka
-.section soakb
-.section soala
-.section soalb
-.section soama
-.section soamb
-.section soana
-.section soanb
-.section soaoa
-.section soaob
-.section soapa
-.section soapb
-.section soaqa
-.section soaqb
-.section soara
-.section soarb
-.section soasa
-.section soasb
-.section soata
-.section soatb
-.section soaua
-.section soaub
-.section soava
-.section soavb
-.section soawa
-.section soawb
-.section soaxa
-.section soaxb
-.section soaya
-.section soayb
-.section soaza
-.section soazb
-.section soa1a
-.section soa1b
-.section soa2a
-.section soa2b
-.section soa3a
-.section soa3b
-.section soa4a
-.section soa4b
-.section soa5a
-.section soa5b
-.section soa6a
-.section soa6b
-.section soa7a
-.section soa7b
-.section soa8a
-.section soa8b
-.section soa9a
-.section soa9b
-.section soa0a
-.section soa0b
-.section sobaa
-.section sobab
-.section sobba
-.section sobbb
-.section sobca
-.section sobcb
-.section sobda
-.section sobdb
-.section sobea
-.section sobeb
-.section sobfa
-.section sobfb
-.section sobga
-.section sobgb
-.section sobha
-.section sobhb
-.section sobia
-.section sobib
-.section sobja
-.section sobjb
-.section sobka
-.section sobkb
-.section sobla
-.section soblb
-.section sobma
-.section sobmb
-.section sobna
-.section sobnb
-.section soboa
-.section sobob
-.section sobpa
-.section sobpb
-.section sobqa
-.section sobqb
-.section sobra
-.section sobrb
-.section sobsa
-.section sobsb
-.section sobta
-.section sobtb
-.section sobua
-.section sobub
-.section sobva
-.section sobvb
-.section sobwa
-.section sobwb
-.section sobxa
-.section sobxb
-.section sobya
-.section sobyb
-.section sobza
-.section sobzb
-.section sob1a
-.section sob1b
-.section sob2a
-.section sob2b
-.section sob3a
-.section sob3b
-.section sob4a
-.section sob4b
-.section sob5a
-.section sob5b
-.section sob6a
-.section sob6b
-.section sob7a
-.section sob7b
-.section sob8a
-.section sob8b
-.section sob9a
-.section sob9b
-.section sob0a
-.section sob0b
-.section socaa
-.section socab
-.section socba
-.section socbb
-.section socca
-.section soccb
-.section socda
-.section socdb
-.section socea
-.section soceb
-.section socfa
-.section socfb
-.section socga
-.section socgb
-.section socha
-.section sochb
-.section socia
-.section socib
-.section socja
-.section socjb
-.section socka
-.section sockb
-.section socla
-.section soclb
-.section socma
-.section socmb
-.section socna
-.section socnb
-.section socoa
-.section socob
-.section socpa
-.section socpb
-.section socqa
-.section socqb
-.section socra
-.section socrb
-.section socsa
-.section socsb
-.section socta
-.section soctb
-.section socua
-.section socub
-.section socva
-.section socvb
-.section socwa
-.section socwb
-.section socxa
-.section socxb
-.section socya
-.section socyb
-.section socza
-.section soczb
-.section soc1a
-.section soc1b
-.section soc2a
-.section soc2b
-.section soc3a
-.section soc3b
-.section soc4a
-.section soc4b
-.section soc5a
-.section soc5b
-.section soc6a
-.section soc6b
-.section soc7a
-.section soc7b
-.section soc8a
-.section soc8b
-.section soc9a
-.section soc9b
-.section soc0a
-.section soc0b
-.section sodaa
-.section sodab
-.section sodba
-.section sodbb
-.section sodca
-.section sodcb
-.section sodda
-.section soddb
-.section sodea
-.section sodeb
-.section sodfa
-.section sodfb
-.section sodga
-.section sodgb
-.section sodha
-.section sodhb
-.section sodia
-.section sodib
-.section sodja
-.section sodjb
-.section sodka
-.section sodkb
-.section sodla
-.section sodlb
-.section sodma
-.section sodmb
-.section sodna
-.section sodnb
-.section sodoa
-.section sodob
-.section sodpa
-.section sodpb
-.section sodqa
-.section sodqb
-.section sodra
-.section sodrb
-.section sodsa
-.section sodsb
-.section sodta
-.section sodtb
-.section sodua
-.section sodub
-.section sodva
-.section sodvb
-.section sodwa
-.section sodwb
-.section sodxa
-.section sodxb
-.section sodya
-.section sodyb
-.section sodza
-.section sodzb
-.section sod1a
-.section sod1b
-.section sod2a
-.section sod2b
-.section sod3a
-.section sod3b
-.section sod4a
-.section sod4b
-.section sod5a
-.section sod5b
-.section sod6a
-.section sod6b
-.section sod7a
-.section sod7b
-.section sod8a
-.section sod8b
-.section sod9a
-.section sod9b
-.section sod0a
-.section sod0b
-.section soeaa
-.section soeab
-.section soeba
-.section soebb
-.section soeca
-.section soecb
-.section soeda
-.section soedb
-.section soeea
-.section soeeb
-.section soefa
-.section soefb
-.section soega
-.section soegb
-.section soeha
-.section soehb
-.section soeia
-.section soeib
-.section soeja
-.section soejb
-.section soeka
-.section soekb
-.section soela
-.section soelb
-.section soema
-.section soemb
-.section soena
-.section soenb
-.section soeoa
-.section soeob
-.section soepa
-.section soepb
-.section soeqa
-.section soeqb
-.section soera
-.section soerb
-.section soesa
-.section soesb
-.section soeta
-.section soetb
-.section soeua
-.section soeub
-.section soeva
-.section soevb
-.section soewa
-.section soewb
-.section soexa
-.section soexb
-.section soeya
-.section soeyb
-.section soeza
-.section soezb
-.section soe1a
-.section soe1b
-.section soe2a
-.section soe2b
-.section soe3a
-.section soe3b
-.section soe4a
-.section soe4b
-.section soe5a
-.section soe5b
-.section soe6a
-.section soe6b
-.section soe7a
-.section soe7b
-.section soe8a
-.section soe8b
-.section soe9a
-.section soe9b
-.section soe0a
-.section soe0b
-.section sofaa
-.section sofab
-.section sofba
-.section sofbb
-.section sofca
-.section sofcb
-.section sofda
-.section sofdb
-.section sofea
-.section sofeb
-.section soffa
-.section soffb
-.section sofga
-.section sofgb
-.section sofha
-.section sofhb
-.section sofia
-.section sofib
-.section sofja
-.section sofjb
-.section sofka
-.section sofkb
-.section sofla
-.section soflb
-.section sofma
-.section sofmb
-.section sofna
-.section sofnb
-.section sofoa
-.section sofob
-.section sofpa
-.section sofpb
-.section sofqa
-.section sofqb
-.section sofra
-.section sofrb
-.section sofsa
-.section sofsb
-.section softa
-.section softb
-.section sofua
-.section sofub
-.section sofva
-.section sofvb
-.section sofwa
-.section sofwb
-.section sofxa
-.section sofxb
-.section sofya
-.section sofyb
-.section sofza
-.section sofzb
-.section sof1a
-.section sof1b
-.section sof2a
-.section sof2b
-.section sof3a
-.section sof3b
-.section sof4a
-.section sof4b
-.section sof5a
-.section sof5b
-.section sof6a
-.section sof6b
-.section sof7a
-.section sof7b
-.section sof8a
-.section sof8b
-.section sof9a
-.section sof9b
-.section sof0a
-.section sof0b
-.section sogaa
-.section sogab
-.section sogba
-.section sogbb
-.section sogca
-.section sogcb
-.section sogda
-.section sogdb
-.section sogea
-.section sogeb
-.section sogfa
-.section sogfb
-.section sogga
-.section soggb
-.section sogha
-.section soghb
-.section sogia
-.section sogib
-.section sogja
-.section sogjb
-.section sogka
-.section sogkb
-.section sogla
-.section soglb
-.section sogma
-.section sogmb
-.section sogna
-.section sognb
-.section sogoa
-.section sogob
-.section sogpa
-.section sogpb
-.section sogqa
-.section sogqb
-.section sogra
-.section sogrb
-.section sogsa
-.section sogsb
-.section sogta
-.section sogtb
-.section sogua
-.section sogub
-.section sogva
-.section sogvb
-.section sogwa
-.section sogwb
-.section sogxa
-.section sogxb
-.section sogya
-.section sogyb
-.section sogza
-.section sogzb
-.section sog1a
-.section sog1b
-.section sog2a
-.section sog2b
-.section sog3a
-.section sog3b
-.section sog4a
-.section sog4b
-.section sog5a
-.section sog5b
-.section sog6a
-.section sog6b
-.section sog7a
-.section sog7b
-.section sog8a
-.section sog8b
-.section sog9a
-.section sog9b
-.section sog0a
-.section sog0b
-.section sohaa
-.section sohab
-.section sohba
-.section sohbb
-.section sohca
-.section sohcb
-.section sohda
-.section sohdb
-.section sohea
-.section soheb
-.section sohfa
-.section sohfb
-.section sohga
-.section sohgb
-.section sohha
-.section sohhb
-.section sohia
-.section sohib
-.section sohja
-.section sohjb
-.section sohka
-.section sohkb
-.section sohla
-.section sohlb
-.section sohma
-.section sohmb
-.section sohna
-.section sohnb
-.section sohoa
-.section sohob
-.section sohpa
-.section sohpb
-.section sohqa
-.section sohqb
-.section sohra
-.section sohrb
-.section sohsa
-.section sohsb
-.section sohta
-.section sohtb
-.section sohua
-.section sohub
-.section sohva
-.section sohvb
-.section sohwa
-.section sohwb
-.section sohxa
-.section sohxb
-.section sohya
-.section sohyb
-.section sohza
-.section sohzb
-.section soh1a
-.section soh1b
-.section soh2a
-.section soh2b
-.section soh3a
-.section soh3b
-.section soh4a
-.section soh4b
-.section soh5a
-.section soh5b
-.section soh6a
-.section soh6b
-.section soh7a
-.section soh7b
-.section soh8a
-.section soh8b
-.section soh9a
-.section soh9b
-.section soh0a
-.section soh0b
-.section soiaa
-.section soiab
-.section soiba
-.section soibb
-.section soica
-.section soicb
-.section soida
-.section soidb
-.section soiea
-.section soieb
-.section soifa
-.section soifb
-.section soiga
-.section soigb
-.section soiha
-.section soihb
-.section soiia
-.section soiib
-.section soija
-.section soijb
-.section soika
-.section soikb
-.section soila
-.section soilb
-.section soima
-.section soimb
-.section soina
-.section soinb
-.section soioa
-.section soiob
-.section soipa
-.section soipb
-.section soiqa
-.section soiqb
-.section soira
-.section soirb
-.section soisa
-.section soisb
-.section soita
-.section soitb
-.section soiua
-.section soiub
-.section soiva
-.section soivb
-.section soiwa
-.section soiwb
-.section soixa
-.section soixb
-.section soiya
-.section soiyb
-.section soiza
-.section soizb
-.section soi1a
-.section soi1b
-.section soi2a
-.section soi2b
-.section soi3a
-.section soi3b
-.section soi4a
-.section soi4b
-.section soi5a
-.section soi5b
-.section soi6a
-.section soi6b
-.section soi7a
-.section soi7b
-.section soi8a
-.section soi8b
-.section soi9a
-.section soi9b
-.section soi0a
-.section soi0b
-.section sojaa
-.section sojab
-.section sojba
-.section sojbb
-.section sojca
-.section sojcb
-.section sojda
-.section sojdb
-.section sojea
-.section sojeb
-.section sojfa
-.section sojfb
-.section sojga
-.section sojgb
-.section sojha
-.section sojhb
-.section sojia
-.section sojib
-.section sojja
-.section sojjb
-.section sojka
-.section sojkb
-.section sojla
-.section sojlb
-.section sojma
-.section sojmb
-.section sojna
-.section sojnb
-.section sojoa
-.section sojob
-.section sojpa
-.section sojpb
-.section sojqa
-.section sojqb
-.section sojra
-.section sojrb
-.section sojsa
-.section sojsb
-.section sojta
-.section sojtb
-.section sojua
-.section sojub
-.section sojva
-.section sojvb
-.section sojwa
-.section sojwb
-.section sojxa
-.section sojxb
-.section sojya
-.section sojyb
-.section sojza
-.section sojzb
-.section soj1a
-.section soj1b
-.section soj2a
-.section soj2b
-.section soj3a
-.section soj3b
-.section soj4a
-.section soj4b
-.section soj5a
-.section soj5b
-.section soj6a
-.section soj6b
-.section soj7a
-.section soj7b
-.section soj8a
-.section soj8b
-.section soj9a
-.section soj9b
-.section soj0a
-.section soj0b
-.section sokaa
-.section sokab
-.section sokba
-.section sokbb
-.section sokca
-.section sokcb
-.section sokda
-.section sokdb
-.section sokea
-.section sokeb
-.section sokfa
-.section sokfb
-.section sokga
-.section sokgb
-.section sokha
-.section sokhb
-.section sokia
-.section sokib
-.section sokja
-.section sokjb
-.section sokka
-.section sokkb
-.section sokla
-.section soklb
-.section sokma
-.section sokmb
-.section sokna
-.section soknb
-.section sokoa
-.section sokob
-.section sokpa
-.section sokpb
-.section sokqa
-.section sokqb
-.section sokra
-.section sokrb
-.section soksa
-.section soksb
-.section sokta
-.section soktb
-.section sokua
-.section sokub
-.section sokva
-.section sokvb
-.section sokwa
-.section sokwb
-.section sokxa
-.section sokxb
-.section sokya
-.section sokyb
-.section sokza
-.section sokzb
-.section sok1a
-.section sok1b
-.section sok2a
-.section sok2b
-.section sok3a
-.section sok3b
-.section sok4a
-.section sok4b
-.section sok5a
-.section sok5b
-.section sok6a
-.section sok6b
-.section sok7a
-.section sok7b
-.section sok8a
-.section sok8b
-.section sok9a
-.section sok9b
-.section sok0a
-.section sok0b
-.section solaa
-.section solab
-.section solba
-.section solbb
-.section solca
-.section solcb
-.section solda
-.section soldb
-.section solea
-.section soleb
-.section solfa
-.section solfb
-.section solga
-.section solgb
-.section solha
-.section solhb
-.section solia
-.section solib
-.section solja
-.section soljb
-.section solka
-.section solkb
-.section solla
-.section sollb
-.section solma
-.section solmb
-.section solna
-.section solnb
-.section soloa
-.section solob
-.section solpa
-.section solpb
-.section solqa
-.section solqb
-.section solra
-.section solrb
-.section solsa
-.section solsb
-.section solta
-.section soltb
-.section solua
-.section solub
-.section solva
-.section solvb
-.section solwa
-.section solwb
-.section solxa
-.section solxb
-.section solya
-.section solyb
-.section solza
-.section solzb
-.section sol1a
-.section sol1b
-.section sol2a
-.section sol2b
-.section sol3a
-.section sol3b
-.section sol4a
-.section sol4b
-.section sol5a
-.section sol5b
-.section sol6a
-.section sol6b
-.section sol7a
-.section sol7b
-.section sol8a
-.section sol8b
-.section sol9a
-.section sol9b
-.section sol0a
-.section sol0b
-.section somaa
-.section somab
-.section somba
-.section sombb
-.section somca
-.section somcb
-.section somda
-.section somdb
-.section somea
-.section someb
-.section somfa
-.section somfb
-.section somga
-.section somgb
-.section somha
-.section somhb
-.section somia
-.section somib
-.section somja
-.section somjb
-.section somka
-.section somkb
-.section somla
-.section somlb
-.section somma
-.section sommb
-.section somna
-.section somnb
-.section somoa
-.section somob
-.section sompa
-.section sompb
-.section somqa
-.section somqb
-.section somra
-.section somrb
-.section somsa
-.section somsb
-.section somta
-.section somtb
-.section somua
-.section somub
-.section somva
-.section somvb
-.section somwa
-.section somwb
-.section somxa
-.section somxb
-.section somya
-.section somyb
-.section somza
-.section somzb
-.section som1a
-.section som1b
-.section som2a
-.section som2b
-.section som3a
-.section som3b
-.section som4a
-.section som4b
-.section som5a
-.section som5b
-.section som6a
-.section som6b
-.section som7a
-.section som7b
-.section som8a
-.section som8b
-.section som9a
-.section som9b
-.section som0a
-.section som0b
-.section sonaa
-.section sonab
-.section sonba
-.section sonbb
-.section sonca
-.section soncb
-.section sonda
-.section sondb
-.section sonea
-.section soneb
-.section sonfa
-.section sonfb
-.section songa
-.section songb
-.section sonha
-.section sonhb
-.section sonia
-.section sonib
-.section sonja
-.section sonjb
-.section sonka
-.section sonkb
-.section sonla
-.section sonlb
-.section sonma
-.section sonmb
-.section sonna
-.section sonnb
-.section sonoa
-.section sonob
-.section sonpa
-.section sonpb
-.section sonqa
-.section sonqb
-.section sonra
-.section sonrb
-.section sonsa
-.section sonsb
-.section sonta
-.section sontb
-.section sonua
-.section sonub
-.section sonva
-.section sonvb
-.section sonwa
-.section sonwb
-.section sonxa
-.section sonxb
-.section sonya
-.section sonyb
-.section sonza
-.section sonzb
-.section son1a
-.section son1b
-.section son2a
-.section son2b
-.section son3a
-.section son3b
-.section son4a
-.section son4b
-.section son5a
-.section son5b
-.section son6a
-.section son6b
-.section son7a
-.section son7b
-.section son8a
-.section son8b
-.section son9a
-.section son9b
-.section son0a
-.section son0b
-.section sooaa
-.section sooab
-.section sooba
-.section soobb
-.section sooca
-.section soocb
-.section sooda
-.section soodb
-.section sooea
-.section sooeb
-.section soofa
-.section soofb
-.section sooga
-.section soogb
-.section sooha
-.section soohb
-.section sooia
-.section sooib
-.section sooja
-.section soojb
-.section sooka
-.section sookb
-.section soola
-.section soolb
-.section sooma
-.section soomb
-.section soona
-.section soonb
-.section soooa
-.section sooob
-.section soopa
-.section soopb
-.section sooqa
-.section sooqb
-.section soora
-.section soorb
-.section soosa
-.section soosb
-.section soota
-.section sootb
-.section sooua
-.section sooub
-.section soova
-.section soovb
-.section soowa
-.section soowb
-.section sooxa
-.section sooxb
-.section sooya
-.section sooyb
-.section sooza
-.section soozb
-.section soo1a
-.section soo1b
-.section soo2a
-.section soo2b
-.section soo3a
-.section soo3b
-.section soo4a
-.section soo4b
-.section soo5a
-.section soo5b
-.section soo6a
-.section soo6b
-.section soo7a
-.section soo7b
-.section soo8a
-.section soo8b
-.section soo9a
-.section soo9b
-.section soo0a
-.section soo0b
-.section sopaa
-.section sopab
-.section sopba
-.section sopbb
-.section sopca
-.section sopcb
-.section sopda
-.section sopdb
-.section sopea
-.section sopeb
-.section sopfa
-.section sopfb
-.section sopga
-.section sopgb
-.section sopha
-.section sophb
-.section sopia
-.section sopib
-.section sopja
-.section sopjb
-.section sopka
-.section sopkb
-.section sopla
-.section soplb
-.section sopma
-.section sopmb
-.section sopna
-.section sopnb
-.section sopoa
-.section sopob
-.section soppa
-.section soppb
-.section sopqa
-.section sopqb
-.section sopra
-.section soprb
-.section sopsa
-.section sopsb
-.section sopta
-.section soptb
-.section sopua
-.section sopub
-.section sopva
-.section sopvb
-.section sopwa
-.section sopwb
-.section sopxa
-.section sopxb
-.section sopya
-.section sopyb
-.section sopza
-.section sopzb
-.section sop1a
-.section sop1b
-.section sop2a
-.section sop2b
-.section sop3a
-.section sop3b
-.section sop4a
-.section sop4b
-.section sop5a
-.section sop5b
-.section sop6a
-.section sop6b
-.section sop7a
-.section sop7b
-.section sop8a
-.section sop8b
-.section sop9a
-.section sop9b
-.section sop0a
-.section sop0b
-.section soqaa
-.section soqab
-.section soqba
-.section soqbb
-.section soqca
-.section soqcb
-.section soqda
-.section soqdb
-.section soqea
-.section soqeb
-.section soqfa
-.section soqfb
-.section soqga
-.section soqgb
-.section soqha
-.section soqhb
-.section soqia
-.section soqib
-.section soqja
-.section soqjb
-.section soqka
-.section soqkb
-.section soqla
-.section soqlb
-.section soqma
-.section soqmb
-.section soqna
-.section soqnb
-.section soqoa
-.section soqob
-.section soqpa
-.section soqpb
-.section soqqa
-.section soqqb
-.section soqra
-.section soqrb
-.section soqsa
-.section soqsb
-.section soqta
-.section soqtb
-.section soqua
-.section soqub
-.section soqva
-.section soqvb
-.section soqwa
-.section soqwb
-.section soqxa
-.section soqxb
-.section soqya
-.section soqyb
-.section soqza
-.section soqzb
-.section soq1a
-.section soq1b
-.section soq2a
-.section soq2b
-.section soq3a
-.section soq3b
-.section soq4a
-.section soq4b
-.section soq5a
-.section soq5b
-.section soq6a
-.section soq6b
-.section soq7a
-.section soq7b
-.section soq8a
-.section soq8b
-.section soq9a
-.section soq9b
-.section soq0a
-.section soq0b
-.section soraa
-.section sorab
-.section sorba
-.section sorbb
-.section sorca
-.section sorcb
-.section sorda
-.section sordb
-.section sorea
-.section soreb
-.section sorfa
-.section sorfb
-.section sorga
-.section sorgb
-.section sorha
-.section sorhb
-.section soria
-.section sorib
-.section sorja
-.section sorjb
-.section sorka
-.section sorkb
-.section sorla
-.section sorlb
-.section sorma
-.section sormb
-.section sorna
-.section sornb
-.section soroa
-.section sorob
-.section sorpa
-.section sorpb
-.section sorqa
-.section sorqb
-.section sorra
-.section sorrb
-.section sorsa
-.section sorsb
-.section sorta
-.section sortb
-.section sorua
-.section sorub
-.section sorva
-.section sorvb
-.section sorwa
-.section sorwb
-.section sorxa
-.section sorxb
-.section sorya
-.section soryb
-.section sorza
-.section sorzb
-.section sor1a
-.section sor1b
-.section sor2a
-.section sor2b
-.section sor3a
-.section sor3b
-.section sor4a
-.section sor4b
-.section sor5a
-.section sor5b
-.section sor6a
-.section sor6b
-.section sor7a
-.section sor7b
-.section sor8a
-.section sor8b
-.section sor9a
-.section sor9b
-.section sor0a
-.section sor0b
-.section sosaa
-.section sosab
-.section sosba
-.section sosbb
-.section sosca
-.section soscb
-.section sosda
-.section sosdb
-.section sosea
-.section soseb
-.section sosfa
-.section sosfb
-.section sosga
-.section sosgb
-.section sosha
-.section soshb
-.section sosia
-.section sosib
-.section sosja
-.section sosjb
-.section soska
-.section soskb
-.section sosla
-.section soslb
-.section sosma
-.section sosmb
-.section sosna
-.section sosnb
-.section sosoa
-.section sosob
-.section sospa
-.section sospb
-.section sosqa
-.section sosqb
-.section sosra
-.section sosrb
-.section sossa
-.section sossb
-.section sosta
-.section sostb
-.section sosua
-.section sosub
-.section sosva
-.section sosvb
-.section soswa
-.section soswb
-.section sosxa
-.section sosxb
-.section sosya
-.section sosyb
-.section sosza
-.section soszb
-.section sos1a
-.section sos1b
-.section sos2a
-.section sos2b
-.section sos3a
-.section sos3b
-.section sos4a
-.section sos4b
-.section sos5a
-.section sos5b
-.section sos6a
-.section sos6b
-.section sos7a
-.section sos7b
-.section sos8a
-.section sos8b
-.section sos9a
-.section sos9b
-.section sos0a
-.section sos0b
-.section sotaa
-.section sotab
-.section sotba
-.section sotbb
-.section sotca
-.section sotcb
-.section sotda
-.section sotdb
-.section sotea
-.section soteb
-.section sotfa
-.section sotfb
-.section sotga
-.section sotgb
-.section sotha
-.section sothb
-.section sotia
-.section sotib
-.section sotja
-.section sotjb
-.section sotka
-.section sotkb
-.section sotla
-.section sotlb
-.section sotma
-.section sotmb
-.section sotna
-.section sotnb
-.section sotoa
-.section sotob
-.section sotpa
-.section sotpb
-.section sotqa
-.section sotqb
-.section sotra
-.section sotrb
-.section sotsa
-.section sotsb
-.section sotta
-.section sottb
-.section sotua
-.section sotub
-.section sotva
-.section sotvb
-.section sotwa
-.section sotwb
-.section sotxa
-.section sotxb
-.section sotya
-.section sotyb
-.section sotza
-.section sotzb
-.section sot1a
-.section sot1b
-.section sot2a
-.section sot2b
-.section sot3a
-.section sot3b
-.section sot4a
-.section sot4b
-.section sot5a
-.section sot5b
-.section sot6a
-.section sot6b
-.section sot7a
-.section sot7b
-.section sot8a
-.section sot8b
-.section sot9a
-.section sot9b
-.section sot0a
-.section sot0b
-.section souaa
-.section souab
-.section souba
-.section soubb
-.section souca
-.section soucb
-.section souda
-.section soudb
-.section souea
-.section soueb
-.section soufa
-.section soufb
-.section souga
-.section sougb
-.section souha
-.section souhb
-.section souia
-.section souib
-.section souja
-.section soujb
-.section souka
-.section soukb
-.section soula
-.section soulb
-.section souma
-.section soumb
-.section souna
-.section sounb
-.section souoa
-.section souob
-.section soupa
-.section soupb
-.section souqa
-.section souqb
-.section soura
-.section sourb
-.section sousa
-.section sousb
-.section souta
-.section soutb
-.section souua
-.section souub
-.section souva
-.section souvb
-.section souwa
-.section souwb
-.section souxa
-.section souxb
-.section souya
-.section souyb
-.section souza
-.section souzb
-.section sou1a
-.section sou1b
-.section sou2a
-.section sou2b
-.section sou3a
-.section sou3b
-.section sou4a
-.section sou4b
-.section sou5a
-.section sou5b
-.section sou6a
-.section sou6b
-.section sou7a
-.section sou7b
-.section sou8a
-.section sou8b
-.section sou9a
-.section sou9b
-.section sou0a
-.section sou0b
-.section sovaa
-.section sovab
-.section sovba
-.section sovbb
-.section sovca
-.section sovcb
-.section sovda
-.section sovdb
-.section sovea
-.section soveb
-.section sovfa
-.section sovfb
-.section sovga
-.section sovgb
-.section sovha
-.section sovhb
-.section sovia
-.section sovib
-.section sovja
-.section sovjb
-.section sovka
-.section sovkb
-.section sovla
-.section sovlb
-.section sovma
-.section sovmb
-.section sovna
-.section sovnb
-.section sovoa
-.section sovob
-.section sovpa
-.section sovpb
-.section sovqa
-.section sovqb
-.section sovra
-.section sovrb
-.section sovsa
-.section sovsb
-.section sovta
-.section sovtb
-.section sovua
-.section sovub
-.section sovva
-.section sovvb
-.section sovwa
-.section sovwb
-.section sovxa
-.section sovxb
-.section sovya
-.section sovyb
-.section sovza
-.section sovzb
-.section sov1a
-.section sov1b
-.section sov2a
-.section sov2b
-.section sov3a
-.section sov3b
-.section sov4a
-.section sov4b
-.section sov5a
-.section sov5b
-.section sov6a
-.section sov6b
-.section sov7a
-.section sov7b
-.section sov8a
-.section sov8b
-.section sov9a
-.section sov9b
-.section sov0a
-.section sov0b
-.section sowaa
-.section sowab
-.section sowba
-.section sowbb
-.section sowca
-.section sowcb
-.section sowda
-.section sowdb
-.section sowea
-.section soweb
-.section sowfa
-.section sowfb
-.section sowga
-.section sowgb
-.section sowha
-.section sowhb
-.section sowia
-.section sowib
-.section sowja
-.section sowjb
-.section sowka
-.section sowkb
-.section sowla
-.section sowlb
-.section sowma
-.section sowmb
-.section sowna
-.section sownb
-.section sowoa
-.section sowob
-.section sowpa
-.section sowpb
-.section sowqa
-.section sowqb
-.section sowra
-.section sowrb
-.section sowsa
-.section sowsb
-.section sowta
-.section sowtb
-.section sowua
-.section sowub
-.section sowva
-.section sowvb
-.section sowwa
-.section sowwb
-.section sowxa
-.section sowxb
-.section sowya
-.section sowyb
-.section sowza
-.section sowzb
-.section sow1a
-.section sow1b
-.section sow2a
-.section sow2b
-.section sow3a
-.section sow3b
-.section sow4a
-.section sow4b
-.section sow5a
-.section sow5b
-.section sow6a
-.section sow6b
-.section sow7a
-.section sow7b
-.section sow8a
-.section sow8b
-.section sow9a
-.section sow9b
-.section sow0a
-.section sow0b
-.section soxaa
-.section soxab
-.section soxba
-.section soxbb
-.section soxca
-.section soxcb
-.section soxda
-.section soxdb
-.section soxea
-.section soxeb
-.section soxfa
-.section soxfb
-.section soxga
-.section soxgb
-.section soxha
-.section soxhb
-.section soxia
-.section soxib
-.section soxja
-.section soxjb
-.section soxka
-.section soxkb
-.section soxla
-.section soxlb
-.section soxma
-.section soxmb
-.section soxna
-.section soxnb
-.section soxoa
-.section soxob
-.section soxpa
-.section soxpb
-.section soxqa
-.section soxqb
-.section soxra
-.section soxrb
-.section soxsa
-.section soxsb
-.section soxta
-.section soxtb
-.section soxua
-.section soxub
-.section soxva
-.section soxvb
-.section soxwa
-.section soxwb
-.section soxxa
-.section soxxb
-.section soxya
-.section soxyb
-.section soxza
-.section soxzb
-.section sox1a
-.section sox1b
-.section sox2a
-.section sox2b
-.section sox3a
-.section sox3b
-.section sox4a
-.section sox4b
-.section sox5a
-.section sox5b
-.section sox6a
-.section sox6b
-.section sox7a
-.section sox7b
-.section sox8a
-.section sox8b
-.section sox9a
-.section sox9b
-.section sox0a
-.section sox0b
-.section soyaa
-.section soyab
-.section soyba
-.section soybb
-.section soyca
-.section soycb
-.section soyda
-.section soydb
-.section soyea
-.section soyeb
-.section soyfa
-.section soyfb
-.section soyga
-.section soygb
-.section soyha
-.section soyhb
-.section soyia
-.section soyib
-.section soyja
-.section soyjb
-.section soyka
-.section soykb
-.section soyla
-.section soylb
-.section soyma
-.section soymb
-.section soyna
-.section soynb
-.section soyoa
-.section soyob
-.section soypa
-.section soypb
-.section soyqa
-.section soyqb
-.section soyra
-.section soyrb
-.section soysa
-.section soysb
-.section soyta
-.section soytb
-.section soyua
-.section soyub
-.section soyva
-.section soyvb
-.section soywa
-.section soywb
-.section soyxa
-.section soyxb
-.section soyya
-.section soyyb
-.section soyza
-.section soyzb
-.section soy1a
-.section soy1b
-.section soy2a
-.section soy2b
-.section soy3a
-.section soy3b
-.section soy4a
-.section soy4b
-.section soy5a
-.section soy5b
-.section soy6a
-.section soy6b
-.section soy7a
-.section soy7b
-.section soy8a
-.section soy8b
-.section soy9a
-.section soy9b
-.section soy0a
-.section soy0b
-.section sozaa
-.section sozab
-.section sozba
-.section sozbb
-.section sozca
-.section sozcb
-.section sozda
-.section sozdb
-.section sozea
-.section sozeb
-.section sozfa
-.section sozfb
-.section sozga
-.section sozgb
-.section sozha
-.section sozhb
-.section sozia
-.section sozib
-.section sozja
-.section sozjb
-.section sozka
-.section sozkb
-.section sozla
-.section sozlb
-.section sozma
-.section sozmb
-.section sozna
-.section soznb
-.section sozoa
-.section sozob
-.section sozpa
-.section sozpb
-.section sozqa
-.section sozqb
-.section sozra
-.section sozrb
-.section sozsa
-.section sozsb
-.section sozta
-.section soztb
-.section sozua
-.section sozub
-.section sozva
-.section sozvb
-.section sozwa
-.section sozwb
-.section sozxa
-.section sozxb
-.section sozya
-.section sozyb
-.section sozza
-.section sozzb
-.section soz1a
-.section soz1b
-.section soz2a
-.section soz2b
-.section soz3a
-.section soz3b
-.section soz4a
-.section soz4b
-.section soz5a
-.section soz5b
-.section soz6a
-.section soz6b
-.section soz7a
-.section soz7b
-.section soz8a
-.section soz8b
-.section soz9a
-.section soz9b
-.section soz0a
-.section soz0b
-.section so1aa
-.section so1ab
-.section so1ba
-.section so1bb
-.section so1ca
-.section so1cb
-.section so1da
-.section so1db
-.section so1ea
-.section so1eb
-.section so1fa
-.section so1fb
-.section so1ga
-.section so1gb
-.section so1ha
-.section so1hb
-.section so1ia
-.section so1ib
-.section so1ja
-.section so1jb
-.section so1ka
-.section so1kb
-.section so1la
-.section so1lb
-.section so1ma
-.section so1mb
-.section so1na
-.section so1nb
-.section so1oa
-.section so1ob
-.section so1pa
-.section so1pb
-.section so1qa
-.section so1qb
-.section so1ra
-.section so1rb
-.section so1sa
-.section so1sb
-.section so1ta
-.section so1tb
-.section so1ua
-.section so1ub
-.section so1va
-.section so1vb
-.section so1wa
-.section so1wb
-.section so1xa
-.section so1xb
-.section so1ya
-.section so1yb
-.section so1za
-.section so1zb
-.section so11a
-.section so11b
-.section so12a
-.section so12b
-.section so13a
-.section so13b
-.section so14a
-.section so14b
-.section so15a
-.section so15b
-.section so16a
-.section so16b
-.section so17a
-.section so17b
-.section so18a
-.section so18b
-.section so19a
-.section so19b
-.section so10a
-.section so10b
-.section so2aa
-.section so2ab
-.section so2ba
-.section so2bb
-.section so2ca
-.section so2cb
-.section so2da
-.section so2db
-.section so2ea
-.section so2eb
-.section so2fa
-.section so2fb
-.section so2ga
-.section so2gb
-.section so2ha
-.section so2hb
-.section so2ia
-.section so2ib
-.section so2ja
-.section so2jb
-.section so2ka
-.section so2kb
-.section so2la
-.section so2lb
-.section so2ma
-.section so2mb
-.section so2na
-.section so2nb
-.section so2oa
-.section so2ob
-.section so2pa
-.section so2pb
-.section so2qa
-.section so2qb
-.section so2ra
-.section so2rb
-.section so2sa
-.section so2sb
-.section so2ta
-.section so2tb
-.section so2ua
-.section so2ub
-.section so2va
-.section so2vb
-.section so2wa
-.section so2wb
-.section so2xa
-.section so2xb
-.section so2ya
-.section so2yb
-.section so2za
-.section so2zb
-.section so21a
-.section so21b
-.section so22a
-.section so22b
-.section so23a
-.section so23b
-.section so24a
-.section so24b
-.section so25a
-.section so25b
-.section so26a
-.section so26b
-.section so27a
-.section so27b
-.section so28a
-.section so28b
-.section so29a
-.section so29b
-.section so20a
-.section so20b
-.section so3aa
-.section so3ab
-.section so3ba
-.section so3bb
-.section so3ca
-.section so3cb
-.section so3da
-.section so3db
-.section so3ea
-.section so3eb
-.section so3fa
-.section so3fb
-.section so3ga
-.section so3gb
-.section so3ha
-.section so3hb
-.section so3ia
-.section so3ib
-.section so3ja
-.section so3jb
-.section so3ka
-.section so3kb
-.section so3la
-.section so3lb
-.section so3ma
-.section so3mb
-.section so3na
-.section so3nb
-.section so3oa
-.section so3ob
-.section so3pa
-.section so3pb
-.section so3qa
-.section so3qb
-.section so3ra
-.section so3rb
-.section so3sa
-.section so3sb
-.section so3ta
-.section so3tb
-.section so3ua
-.section so3ub
-.section so3va
-.section so3vb
-.section so3wa
-.section so3wb
-.section so3xa
-.section so3xb
-.section so3ya
-.section so3yb
-.section so3za
-.section so3zb
-.section so31a
-.section so31b
-.section so32a
-.section so32b
-.section so33a
-.section so33b
-.section so34a
-.section so34b
-.section so35a
-.section so35b
-.section so36a
-.section so36b
-.section so37a
-.section so37b
-.section so38a
-.section so38b
-.section so39a
-.section so39b
-.section so30a
-.section so30b
-.section so4aa
-.section so4ab
-.section so4ba
-.section so4bb
-.section so4ca
-.section so4cb
-.section so4da
-.section so4db
-.section so4ea
-.section so4eb
-.section so4fa
-.section so4fb
-.section so4ga
-.section so4gb
-.section so4ha
-.section so4hb
-.section so4ia
-.section so4ib
-.section so4ja
-.section so4jb
-.section so4ka
-.section so4kb
-.section so4la
-.section so4lb
-.section so4ma
-.section so4mb
-.section so4na
-.section so4nb
-.section so4oa
-.section so4ob
-.section so4pa
-.section so4pb
-.section so4qa
-.section so4qb
-.section so4ra
-.section so4rb
-.section so4sa
-.section so4sb
-.section so4ta
-.section so4tb
-.section so4ua
-.section so4ub
-.section so4va
-.section so4vb
-.section so4wa
-.section so4wb
-.section so4xa
-.section so4xb
-.section so4ya
-.section so4yb
-.section so4za
-.section so4zb
-.section so41a
-.section so41b
-.section so42a
-.section so42b
-.section so43a
-.section so43b
-.section so44a
-.section so44b
-.section so45a
-.section so45b
-.section so46a
-.section so46b
-.section so47a
-.section so47b
-.section so48a
-.section so48b
-.section so49a
-.section so49b
-.section so40a
-.section so40b
-.section so5aa
-.section so5ab
-.section so5ba
-.section so5bb
-.section so5ca
-.section so5cb
-.section so5da
-.section so5db
-.section so5ea
-.section so5eb
-.section so5fa
-.section so5fb
-.section so5ga
-.section so5gb
-.section so5ha
-.section so5hb
-.section so5ia
-.section so5ib
-.section so5ja
-.section so5jb
-.section so5ka
-.section so5kb
-.section so5la
-.section so5lb
-.section so5ma
-.section so5mb
-.section so5na
-.section so5nb
-.section so5oa
-.section so5ob
-.section so5pa
-.section so5pb
-.section so5qa
-.section so5qb
-.section so5ra
-.section so5rb
-.section so5sa
-.section so5sb
-.section so5ta
-.section so5tb
-.section so5ua
-.section so5ub
-.section so5va
-.section so5vb
-.section so5wa
-.section so5wb
-.section so5xa
-.section so5xb
-.section so5ya
-.section so5yb
-.section so5za
-.section so5zb
-.section so51a
-.section so51b
-.section so52a
-.section so52b
-.section so53a
-.section so53b
-.section so54a
-.section so54b
-.section so55a
-.section so55b
-.section so56a
-.section so56b
-.section so57a
-.section so57b
-.section so58a
-.section so58b
-.section so59a
-.section so59b
-.section so50a
-.section so50b
-.section so6aa
-.section so6ab
-.section so6ba
-.section so6bb
-.section so6ca
-.section so6cb
-.section so6da
-.section so6db
-.section so6ea
-.section so6eb
-.section so6fa
-.section so6fb
-.section so6ga
-.section so6gb
-.section so6ha
-.section so6hb
-.section so6ia
-.section so6ib
-.section so6ja
-.section so6jb
-.section so6ka
-.section so6kb
-.section so6la
-.section so6lb
-.section so6ma
-.section so6mb
-.section so6na
-.section so6nb
-.section so6oa
-.section so6ob
-.section so6pa
-.section so6pb
-.section so6qa
-.section so6qb
-.section so6ra
-.section so6rb
-.section so6sa
-.section so6sb
-.section so6ta
-.section so6tb
-.section so6ua
-.section so6ub
-.section so6va
-.section so6vb
-.section so6wa
-.section so6wb
-.section so6xa
-.section so6xb
-.section so6ya
-.section so6yb
-.section so6za
-.section so6zb
-.section so61a
-.section so61b
-.section so62a
-.section so62b
-.section so63a
-.section so63b
-.section so64a
-.section so64b
-.section so65a
-.section so65b
-.section so66a
-.section so66b
-.section so67a
-.section so67b
-.section so68a
-.section so68b
-.section so69a
-.section so69b
-.section so60a
-.section so60b
-.section so7aa
-.section so7ab
-.section so7ba
-.section so7bb
-.section so7ca
-.section so7cb
-.section so7da
-.section so7db
-.section so7ea
-.section so7eb
-.section so7fa
-.section so7fb
-.section so7ga
-.section so7gb
-.section so7ha
-.section so7hb
-.section so7ia
-.section so7ib
-.section so7ja
-.section so7jb
-.section so7ka
-.section so7kb
-.section so7la
-.section so7lb
-.section so7ma
-.section so7mb
-.section so7na
-.section so7nb
-.section so7oa
-.section so7ob
-.section so7pa
-.section so7pb
-.section so7qa
-.section so7qb
-.section so7ra
-.section so7rb
-.section so7sa
-.section so7sb
-.section so7ta
-.section so7tb
-.section so7ua
-.section so7ub
-.section so7va
-.section so7vb
-.section so7wa
-.section so7wb
-.section so7xa
-.section so7xb
-.section so7ya
-.section so7yb
-.section so7za
-.section so7zb
-.section so71a
-.section so71b
-.section so72a
-.section so72b
-.section so73a
-.section so73b
-.section so74a
-.section so74b
-.section so75a
-.section so75b
-.section so76a
-.section so76b
-.section so77a
-.section so77b
-.section so78a
-.section so78b
-.section so79a
-.section so79b
-.section so70a
-.section so70b
-.section so8aa
-.section so8ab
-.section so8ba
-.section so8bb
-.section so8ca
-.section so8cb
-.section so8da
-.section so8db
-.section so8ea
-.section so8eb
-.section so8fa
-.section so8fb
-.section so8ga
-.section so8gb
-.section so8ha
-.section so8hb
-.section so8ia
-.section so8ib
-.section so8ja
-.section so8jb
-.section so8ka
-.section so8kb
-.section so8la
-.section so8lb
-.section so8ma
-.section so8mb
-.section so8na
-.section so8nb
-.section so8oa
-.section so8ob
-.section so8pa
-.section so8pb
-.section so8qa
-.section so8qb
-.section so8ra
-.section so8rb
-.section so8sa
-.section so8sb
-.section so8ta
-.section so8tb
-.section so8ua
-.section so8ub
-.section so8va
-.section so8vb
-.section so8wa
-.section so8wb
-.section so8xa
-.section so8xb
-.section so8ya
-.section so8yb
-.section so8za
-.section so8zb
-.section so81a
-.section so81b
-.section so82a
-.section so82b
-.section so83a
-.section so83b
-.section so84a
-.section so84b
-.section so85a
-.section so85b
-.section so86a
-.section so86b
-.section so87a
-.section so87b
-.section so88a
-.section so88b
-.section so89a
-.section so89b
-.section so80a
-.section so80b
-.section so9aa
-.section so9ab
-.section so9ba
-.section so9bb
-.section so9ca
-.section so9cb
-.section so9da
-.section so9db
-.section so9ea
-.section so9eb
-.section so9fa
-.section so9fb
-.section so9ga
-.section so9gb
-.section so9ha
-.section so9hb
-.section so9ia
-.section so9ib
-.section so9ja
-.section so9jb
-.section so9ka
-.section so9kb
-.section so9la
-.section so9lb
-.section so9ma
-.section so9mb
-.section so9na
-.section so9nb
-.section so9oa
-.section so9ob
-.section so9pa
-.section so9pb
-.section so9qa
-.section so9qb
-.section so9ra
-.section so9rb
-.section so9sa
-.section so9sb
-.section so9ta
-.section so9tb
-.section so9ua
-.section so9ub
-.section so9va
-.section so9vb
-.section so9wa
-.section so9wb
-.section so9xa
-.section so9xb
-.section so9ya
-.section so9yb
-.section so9za
-.section so9zb
-.section so91a
-.section so91b
-.section so92a
-.section so92b
-.section so93a
-.section so93b
-.section so94a
-.section so94b
-.section so95a
-.section so95b
-.section so96a
-.section so96b
-.section so97a
-.section so97b
-.section so98a
-.section so98b
-.section so99a
-.section so99b
-.section so90a
-.section so90b
-.section so0aa
-.section so0ab
-.section so0ba
-.section so0bb
-.section so0ca
-.section so0cb
-.section so0da
-.section so0db
-.section so0ea
-.section so0eb
-.section so0fa
-.section so0fb
-.section so0ga
-.section so0gb
-.section so0ha
-.section so0hb
-.section so0ia
-.section so0ib
-.section so0ja
-.section so0jb
-.section so0ka
-.section so0kb
-.section so0la
-.section so0lb
-.section so0ma
-.section so0mb
-.section so0na
-.section so0nb
-.section so0oa
-.section so0ob
-.section so0pa
-.section so0pb
-.section so0qa
-.section so0qb
-.section so0ra
-.section so0rb
-.section so0sa
-.section so0sb
-.section so0ta
-.section so0tb
-.section so0ua
-.section so0ub
-.section so0va
-.section so0vb
-.section so0wa
-.section so0wb
-.section so0xa
-.section so0xb
-.section so0ya
-.section so0yb
-.section so0za
-.section so0zb
-.section so01a
-.section so01b
-.section so02a
-.section so02b
-.section so03a
-.section so03b
-.section so04a
-.section so04b
-.section so05a
-.section so05b
-.section so06a
-.section so06b
-.section so07a
-.section so07b
-.section so08a
-.section so08b
-.section so09a
-.section so09b
-.section so00a
-.section so00b
-.section spaaa
-.section spaab
-.section spaba
-.section spabb
-.section spaca
-.section spacb
-.section spada
-.section spadb
-.section spaea
-.section spaeb
-.section spafa
-.section spafb
-.section spaga
-.section spagb
-.section spaha
-.section spahb
-.section spaia
-.section spaib
-.section spaja
-.section spajb
-.section spaka
-.section spakb
-.section spala
-.section spalb
-.section spama
-.section spamb
-.section spana
-.section spanb
-.section spaoa
-.section spaob
-.section spapa
-.section spapb
-.section spaqa
-.section spaqb
-.section spara
-.section sparb
-.section spasa
-.section spasb
-.section spata
-.section spatb
-.section spaua
-.section spaub
-.section spava
-.section spavb
-.section spawa
-.section spawb
-.section spaxa
-.section spaxb
-.section spaya
-.section spayb
-.section spaza
-.section spazb
-.section spa1a
-.section spa1b
-.section spa2a
-.section spa2b
-.section spa3a
-.section spa3b
-.section spa4a
-.section spa4b
-.section spa5a
-.section spa5b
-.section spa6a
-.section spa6b
-.section spa7a
-.section spa7b
-.section spa8a
-.section spa8b
-.section spa9a
-.section spa9b
-.section spa0a
-.section spa0b
-.section spbaa
-.section spbab
-.section spbba
-.section spbbb
-.section spbca
-.section spbcb
-.section spbda
-.section spbdb
-.section spbea
-.section spbeb
-.section spbfa
-.section spbfb
-.section spbga
-.section spbgb
-.section spbha
-.section spbhb
-.section spbia
-.section spbib
-.section spbja
-.section spbjb
-.section spbka
-.section spbkb
-.section spbla
-.section spblb
-.section spbma
-.section spbmb
-.section spbna
-.section spbnb
-.section spboa
-.section spbob
-.section spbpa
-.section spbpb
-.section spbqa
-.section spbqb
-.section spbra
-.section spbrb
-.section spbsa
-.section spbsb
-.section spbta
-.section spbtb
-.section spbua
-.section spbub
-.section spbva
-.section spbvb
-.section spbwa
-.section spbwb
-.section spbxa
-.section spbxb
-.section spbya
-.section spbyb
-.section spbza
-.section spbzb
-.section spb1a
-.section spb1b
-.section spb2a
-.section spb2b
-.section spb3a
-.section spb3b
-.section spb4a
-.section spb4b
-.section spb5a
-.section spb5b
-.section spb6a
-.section spb6b
-.section spb7a
-.section spb7b
-.section spb8a
-.section spb8b
-.section spb9a
-.section spb9b
-.section spb0a
-.section spb0b
-.section spcaa
-.section spcab
-.section spcba
-.section spcbb
-.section spcca
-.section spccb
-.section spcda
-.section spcdb
-.section spcea
-.section spceb
-.section spcfa
-.section spcfb
-.section spcga
-.section spcgb
-.section spcha
-.section spchb
-.section spcia
-.section spcib
-.section spcja
-.section spcjb
-.section spcka
-.section spckb
-.section spcla
-.section spclb
-.section spcma
-.section spcmb
-.section spcna
-.section spcnb
-.section spcoa
-.section spcob
-.section spcpa
-.section spcpb
-.section spcqa
-.section spcqb
-.section spcra
-.section spcrb
-.section spcsa
-.section spcsb
-.section spcta
-.section spctb
-.section spcua
-.section spcub
-.section spcva
-.section spcvb
-.section spcwa
-.section spcwb
-.section spcxa
-.section spcxb
-.section spcya
-.section spcyb
-.section spcza
-.section spczb
-.section spc1a
-.section spc1b
-.section spc2a
-.section spc2b
-.section spc3a
-.section spc3b
-.section spc4a
-.section spc4b
-.section spc5a
-.section spc5b
-.section spc6a
-.section spc6b
-.section spc7a
-.section spc7b
-.section spc8a
-.section spc8b
-.section spc9a
-.section spc9b
-.section spc0a
-.section spc0b
-.section spdaa
-.section spdab
-.section spdba
-.section spdbb
-.section spdca
-.section spdcb
-.section spdda
-.section spddb
-.section spdea
-.section spdeb
-.section spdfa
-.section spdfb
-.section spdga
-.section spdgb
-.section spdha
-.section spdhb
-.section spdia
-.section spdib
-.section spdja
-.section spdjb
-.section spdka
-.section spdkb
-.section spdla
-.section spdlb
-.section spdma
-.section spdmb
-.section spdna
-.section spdnb
-.section spdoa
-.section spdob
-.section spdpa
-.section spdpb
-.section spdqa
-.section spdqb
-.section spdra
-.section spdrb
-.section spdsa
-.section spdsb
-.section spdta
-.section spdtb
-.section spdua
-.section spdub
-.section spdva
-.section spdvb
-.section spdwa
-.section spdwb
-.section spdxa
-.section spdxb
-.section spdya
-.section spdyb
-.section spdza
-.section spdzb
-.section spd1a
-.section spd1b
-.section spd2a
-.section spd2b
-.section spd3a
-.section spd3b
-.section spd4a
-.section spd4b
-.section spd5a
-.section spd5b
-.section spd6a
-.section spd6b
-.section spd7a
-.section spd7b
-.section spd8a
-.section spd8b
-.section spd9a
-.section spd9b
-.section spd0a
-.section spd0b
-.section speaa
-.section speab
-.section speba
-.section spebb
-.section speca
-.section specb
-.section speda
-.section spedb
-.section speea
-.section speeb
-.section spefa
-.section spefb
-.section spega
-.section spegb
-.section speha
-.section spehb
-.section speia
-.section speib
-.section speja
-.section spejb
-.section speka
-.section spekb
-.section spela
-.section spelb
-.section spema
-.section spemb
-.section spena
-.section spenb
-.section speoa
-.section speob
-.section spepa
-.section spepb
-.section speqa
-.section speqb
-.section spera
-.section sperb
-.section spesa
-.section spesb
-.section speta
-.section spetb
-.section speua
-.section speub
-.section speva
-.section spevb
-.section spewa
-.section spewb
-.section spexa
-.section spexb
-.section speya
-.section speyb
-.section speza
-.section spezb
-.section spe1a
-.section spe1b
-.section spe2a
-.section spe2b
-.section spe3a
-.section spe3b
-.section spe4a
-.section spe4b
-.section spe5a
-.section spe5b
-.section spe6a
-.section spe6b
-.section spe7a
-.section spe7b
-.section spe8a
-.section spe8b
-.section spe9a
-.section spe9b
-.section spe0a
-.section spe0b
-.section spfaa
-.section spfab
-.section spfba
-.section spfbb
-.section spfca
-.section spfcb
-.section spfda
-.section spfdb
-.section spfea
-.section spfeb
-.section spffa
-.section spffb
-.section spfga
-.section spfgb
-.section spfha
-.section spfhb
-.section spfia
-.section spfib
-.section spfja
-.section spfjb
-.section spfka
-.section spfkb
-.section spfla
-.section spflb
-.section spfma
-.section spfmb
-.section spfna
-.section spfnb
-.section spfoa
-.section spfob
-.section spfpa
-.section spfpb
-.section spfqa
-.section spfqb
-.section spfra
-.section spfrb
-.section spfsa
-.section spfsb
-.section spfta
-.section spftb
-.section spfua
-.section spfub
-.section spfva
-.section spfvb
-.section spfwa
-.section spfwb
-.section spfxa
-.section spfxb
-.section spfya
-.section spfyb
-.section spfza
-.section spfzb
-.section spf1a
-.section spf1b
-.section spf2a
-.section spf2b
-.section spf3a
-.section spf3b
-.section spf4a
-.section spf4b
-.section spf5a
-.section spf5b
-.section spf6a
-.section spf6b
-.section spf7a
-.section spf7b
-.section spf8a
-.section spf8b
-.section spf9a
-.section spf9b
-.section spf0a
-.section spf0b
-.section spgaa
-.section spgab
-.section spgba
-.section spgbb
-.section spgca
-.section spgcb
-.section spgda
-.section spgdb
-.section spgea
-.section spgeb
-.section spgfa
-.section spgfb
-.section spgga
-.section spggb
-.section spgha
-.section spghb
-.section spgia
-.section spgib
-.section spgja
-.section spgjb
-.section spgka
-.section spgkb
-.section spgla
-.section spglb
-.section spgma
-.section spgmb
-.section spgna
-.section spgnb
-.section spgoa
-.section spgob
-.section spgpa
-.section spgpb
-.section spgqa
-.section spgqb
-.section spgra
-.section spgrb
-.section spgsa
-.section spgsb
-.section spgta
-.section spgtb
-.section spgua
-.section spgub
-.section spgva
-.section spgvb
-.section spgwa
-.section spgwb
-.section spgxa
-.section spgxb
-.section spgya
-.section spgyb
-.section spgza
-.section spgzb
-.section spg1a
-.section spg1b
-.section spg2a
-.section spg2b
-.section spg3a
-.section spg3b
-.section spg4a
-.section spg4b
-.section spg5a
-.section spg5b
-.section spg6a
-.section spg6b
-.section spg7a
-.section spg7b
-.section spg8a
-.section spg8b
-.section spg9a
-.section spg9b
-.section spg0a
-.section spg0b
-.section sphaa
-.section sphab
-.section sphba
-.section sphbb
-.section sphca
-.section sphcb
-.section sphda
-.section sphdb
-.section sphea
-.section spheb
-.section sphfa
-.section sphfb
-.section sphga
-.section sphgb
-.section sphha
-.section sphhb
-.section sphia
-.section sphib
-.section sphja
-.section sphjb
-.section sphka
-.section sphkb
-.section sphla
-.section sphlb
-.section sphma
-.section sphmb
-.section sphna
-.section sphnb
-.section sphoa
-.section sphob
-.section sphpa
-.section sphpb
-.section sphqa
-.section sphqb
-.section sphra
-.section sphrb
-.section sphsa
-.section sphsb
-.section sphta
-.section sphtb
-.section sphua
-.section sphub
-.section sphva
-.section sphvb
-.section sphwa
-.section sphwb
-.section sphxa
-.section sphxb
-.section sphya
-.section sphyb
-.section sphza
-.section sphzb
-.section sph1a
-.section sph1b
-.section sph2a
-.section sph2b
-.section sph3a
-.section sph3b
-.section sph4a
-.section sph4b
-.section sph5a
-.section sph5b
-.section sph6a
-.section sph6b
-.section sph7a
-.section sph7b
-.section sph8a
-.section sph8b
-.section sph9a
-.section sph9b
-.section sph0a
-.section sph0b
-.section spiaa
-.section spiab
-.section spiba
-.section spibb
-.section spica
-.section spicb
-.section spida
-.section spidb
-.section spiea
-.section spieb
-.section spifa
-.section spifb
-.section spiga
-.section spigb
-.section spiha
-.section spihb
-.section spiia
-.section spiib
-.section spija
-.section spijb
-.section spika
-.section spikb
-.section spila
-.section spilb
-.section spima
-.section spimb
-.section spina
-.section spinb
-.section spioa
-.section spiob
-.section spipa
-.section spipb
-.section spiqa
-.section spiqb
-.section spira
-.section spirb
-.section spisa
-.section spisb
-.section spita
-.section spitb
-.section spiua
-.section spiub
-.section spiva
-.section spivb
-.section spiwa
-.section spiwb
-.section spixa
-.section spixb
-.section spiya
-.section spiyb
-.section spiza
-.section spizb
-.section spi1a
-.section spi1b
-.section spi2a
-.section spi2b
-.section spi3a
-.section spi3b
-.section spi4a
-.section spi4b
-.section spi5a
-.section spi5b
-.section spi6a
-.section spi6b
-.section spi7a
-.section spi7b
-.section spi8a
-.section spi8b
-.section spi9a
-.section spi9b
-.section spi0a
-.section spi0b
-.section spjaa
-.section spjab
-.section spjba
-.section spjbb
-.section spjca
-.section spjcb
-.section spjda
-.section spjdb
-.section spjea
-.section spjeb
-.section spjfa
-.section spjfb
-.section spjga
-.section spjgb
-.section spjha
-.section spjhb
-.section spjia
-.section spjib
-.section spjja
-.section spjjb
-.section spjka
-.section spjkb
-.section spjla
-.section spjlb
-.section spjma
-.section spjmb
-.section spjna
-.section spjnb
-.section spjoa
-.section spjob
-.section spjpa
-.section spjpb
-.section spjqa
-.section spjqb
-.section spjra
-.section spjrb
-.section spjsa
-.section spjsb
-.section spjta
-.section spjtb
-.section spjua
-.section spjub
-.section spjva
-.section spjvb
-.section spjwa
-.section spjwb
-.section spjxa
-.section spjxb
-.section spjya
-.section spjyb
-.section spjza
-.section spjzb
-.section spj1a
-.section spj1b
-.section spj2a
-.section spj2b
-.section spj3a
-.section spj3b
-.section spj4a
-.section spj4b
-.section spj5a
-.section spj5b
-.section spj6a
-.section spj6b
-.section spj7a
-.section spj7b
-.section spj8a
-.section spj8b
-.section spj9a
-.section spj9b
-.section spj0a
-.section spj0b
-.section spkaa
-.section spkab
-.section spkba
-.section spkbb
-.section spkca
-.section spkcb
-.section spkda
-.section spkdb
-.section spkea
-.section spkeb
-.section spkfa
-.section spkfb
-.section spkga
-.section spkgb
-.section spkha
-.section spkhb
-.section spkia
-.section spkib
-.section spkja
-.section spkjb
-.section spkka
-.section spkkb
-.section spkla
-.section spklb
-.section spkma
-.section spkmb
-.section spkna
-.section spknb
-.section spkoa
-.section spkob
-.section spkpa
-.section spkpb
-.section spkqa
-.section spkqb
-.section spkra
-.section spkrb
-.section spksa
-.section spksb
-.section spkta
-.section spktb
-.section spkua
-.section spkub
-.section spkva
-.section spkvb
-.section spkwa
-.section spkwb
-.section spkxa
-.section spkxb
-.section spkya
-.section spkyb
-.section spkza
-.section spkzb
-.section spk1a
-.section spk1b
-.section spk2a
-.section spk2b
-.section spk3a
-.section spk3b
-.section spk4a
-.section spk4b
-.section spk5a
-.section spk5b
-.section spk6a
-.section spk6b
-.section spk7a
-.section spk7b
-.section spk8a
-.section spk8b
-.section spk9a
-.section spk9b
-.section spk0a
-.section spk0b
-.section splaa
-.section splab
-.section splba
-.section splbb
-.section splca
-.section splcb
-.section splda
-.section spldb
-.section splea
-.section spleb
-.section splfa
-.section splfb
-.section splga
-.section splgb
-.section splha
-.section splhb
-.section splia
-.section splib
-.section splja
-.section spljb
-.section splka
-.section splkb
-.section splla
-.section spllb
-.section splma
-.section splmb
-.section splna
-.section splnb
-.section sploa
-.section splob
-.section splpa
-.section splpb
-.section splqa
-.section splqb
-.section splra
-.section splrb
-.section splsa
-.section splsb
-.section splta
-.section spltb
-.section splua
-.section splub
-.section splva
-.section splvb
-.section splwa
-.section splwb
-.section splxa
-.section splxb
-.section splya
-.section splyb
-.section splza
-.section splzb
-.section spl1a
-.section spl1b
-.section spl2a
-.section spl2b
-.section spl3a
-.section spl3b
-.section spl4a
-.section spl4b
-.section spl5a
-.section spl5b
-.section spl6a
-.section spl6b
-.section spl7a
-.section spl7b
-.section spl8a
-.section spl8b
-.section spl9a
-.section spl9b
-.section spl0a
-.section spl0b
-.section spmaa
-.section spmab
-.section spmba
-.section spmbb
-.section spmca
-.section spmcb
-.section spmda
-.section spmdb
-.section spmea
-.section spmeb
-.section spmfa
-.section spmfb
-.section spmga
-.section spmgb
-.section spmha
-.section spmhb
-.section spmia
-.section spmib
-.section spmja
-.section spmjb
-.section spmka
-.section spmkb
-.section spmla
-.section spmlb
-.section spmma
-.section spmmb
-.section spmna
-.section spmnb
-.section spmoa
-.section spmob
-.section spmpa
-.section spmpb
-.section spmqa
-.section spmqb
-.section spmra
-.section spmrb
-.section spmsa
-.section spmsb
-.section spmta
-.section spmtb
-.section spmua
-.section spmub
-.section spmva
-.section spmvb
-.section spmwa
-.section spmwb
-.section spmxa
-.section spmxb
-.section spmya
-.section spmyb
-.section spmza
-.section spmzb
-.section spm1a
-.section spm1b
-.section spm2a
-.section spm2b
-.section spm3a
-.section spm3b
-.section spm4a
-.section spm4b
-.section spm5a
-.section spm5b
-.section spm6a
-.section spm6b
-.section spm7a
-.section spm7b
-.section spm8a
-.section spm8b
-.section spm9a
-.section spm9b
-.section spm0a
-.section spm0b
-.section spnaa
-.section spnab
-.section spnba
-.section spnbb
-.section spnca
-.section spncb
-.section spnda
-.section spndb
-.section spnea
-.section spneb
-.section spnfa
-.section spnfb
-.section spnga
-.section spngb
-.section spnha
-.section spnhb
-.section spnia
-.section spnib
-.section spnja
-.section spnjb
-.section spnka
-.section spnkb
-.section spnla
-.section spnlb
-.section spnma
-.section spnmb
-.section spnna
-.section spnnb
-.section spnoa
-.section spnob
-.section spnpa
-.section spnpb
-.section spnqa
-.section spnqb
-.section spnra
-.section spnrb
-.section spnsa
-.section spnsb
-.section spnta
-.section spntb
-.section spnua
-.section spnub
-.section spnva
-.section spnvb
-.section spnwa
-.section spnwb
-.section spnxa
-.section spnxb
-.section spnya
-.section spnyb
-.section spnza
-.section spnzb
-.section spn1a
-.section spn1b
-.section spn2a
-.section spn2b
-.section spn3a
-.section spn3b
-.section spn4a
-.section spn4b
-.section spn5a
-.section spn5b
-.section spn6a
-.section spn6b
-.section spn7a
-.section spn7b
-.section spn8a
-.section spn8b
-.section spn9a
-.section spn9b
-.section spn0a
-.section spn0b
-.section spoaa
-.section spoab
-.section spoba
-.section spobb
-.section spoca
-.section spocb
-.section spoda
-.section spodb
-.section spoea
-.section spoeb
-.section spofa
-.section spofb
-.section spoga
-.section spogb
-.section spoha
-.section spohb
-.section spoia
-.section spoib
-.section spoja
-.section spojb
-.section spoka
-.section spokb
-.section spola
-.section spolb
-.section spoma
-.section spomb
-.section spona
-.section sponb
-.section spooa
-.section spoob
-.section spopa
-.section spopb
-.section spoqa
-.section spoqb
-.section spora
-.section sporb
-.section sposa
-.section sposb
-.section spota
-.section spotb
-.section spoua
-.section spoub
-.section spova
-.section spovb
-.section spowa
-.section spowb
-.section spoxa
-.section spoxb
-.section spoya
-.section spoyb
-.section spoza
-.section spozb
-.section spo1a
-.section spo1b
-.section spo2a
-.section spo2b
-.section spo3a
-.section spo3b
-.section spo4a
-.section spo4b
-.section spo5a
-.section spo5b
-.section spo6a
-.section spo6b
-.section spo7a
-.section spo7b
-.section spo8a
-.section spo8b
-.section spo9a
-.section spo9b
-.section spo0a
-.section spo0b
-.section sppaa
-.section sppab
-.section sppba
-.section sppbb
-.section sppca
-.section sppcb
-.section sppda
-.section sppdb
-.section sppea
-.section sppeb
-.section sppfa
-.section sppfb
-.section sppga
-.section sppgb
-.section sppha
-.section spphb
-.section sppia
-.section sppib
-.section sppja
-.section sppjb
-.section sppka
-.section sppkb
-.section sppla
-.section spplb
-.section sppma
-.section sppmb
-.section sppna
-.section sppnb
-.section sppoa
-.section sppob
-.section spppa
-.section spppb
-.section sppqa
-.section sppqb
-.section sppra
-.section spprb
-.section sppsa
-.section sppsb
-.section sppta
-.section spptb
-.section sppua
-.section sppub
-.section sppva
-.section sppvb
-.section sppwa
-.section sppwb
-.section sppxa
-.section sppxb
-.section sppya
-.section sppyb
-.section sppza
-.section sppzb
-.section spp1a
-.section spp1b
-.section spp2a
-.section spp2b
-.section spp3a
-.section spp3b
-.section spp4a
-.section spp4b
-.section spp5a
-.section spp5b
-.section spp6a
-.section spp6b
-.section spp7a
-.section spp7b
-.section spp8a
-.section spp8b
-.section spp9a
-.section spp9b
-.section spp0a
-.section spp0b
-.section spqaa
-.section spqab
-.section spqba
-.section spqbb
-.section spqca
-.section spqcb
-.section spqda
-.section spqdb
-.section spqea
-.section spqeb
-.section spqfa
-.section spqfb
-.section spqga
-.section spqgb
-.section spqha
-.section spqhb
-.section spqia
-.section spqib
-.section spqja
-.section spqjb
-.section spqka
-.section spqkb
-.section spqla
-.section spqlb
-.section spqma
-.section spqmb
-.section spqna
-.section spqnb
-.section spqoa
-.section spqob
-.section spqpa
-.section spqpb
-.section spqqa
-.section spqqb
-.section spqra
-.section spqrb
-.section spqsa
-.section spqsb
-.section spqta
-.section spqtb
-.section spqua
-.section spqub
-.section spqva
-.section spqvb
-.section spqwa
-.section spqwb
-.section spqxa
-.section spqxb
-.section spqya
-.section spqyb
-.section spqza
-.section spqzb
-.section spq1a
-.section spq1b
-.section spq2a
-.section spq2b
-.section spq3a
-.section spq3b
-.section spq4a
-.section spq4b
-.section spq5a
-.section spq5b
-.section spq6a
-.section spq6b
-.section spq7a
-.section spq7b
-.section spq8a
-.section spq8b
-.section spq9a
-.section spq9b
-.section spq0a
-.section spq0b
-.section spraa
-.section sprab
-.section sprba
-.section sprbb
-.section sprca
-.section sprcb
-.section sprda
-.section sprdb
-.section sprea
-.section spreb
-.section sprfa
-.section sprfb
-.section sprga
-.section sprgb
-.section sprha
-.section sprhb
-.section spria
-.section sprib
-.section sprja
-.section sprjb
-.section sprka
-.section sprkb
-.section sprla
-.section sprlb
-.section sprma
-.section sprmb
-.section sprna
-.section sprnb
-.section sproa
-.section sprob
-.section sprpa
-.section sprpb
-.section sprqa
-.section sprqb
-.section sprra
-.section sprrb
-.section sprsa
-.section sprsb
-.section sprta
-.section sprtb
-.section sprua
-.section sprub
-.section sprva
-.section sprvb
-.section sprwa
-.section sprwb
-.section sprxa
-.section sprxb
-.section sprya
-.section spryb
-.section sprza
-.section sprzb
-.section spr1a
-.section spr1b
-.section spr2a
-.section spr2b
-.section spr3a
-.section spr3b
-.section spr4a
-.section spr4b
-.section spr5a
-.section spr5b
-.section spr6a
-.section spr6b
-.section spr7a
-.section spr7b
-.section spr8a
-.section spr8b
-.section spr9a
-.section spr9b
-.section spr0a
-.section spr0b
-.section spsaa
-.section spsab
-.section spsba
-.section spsbb
-.section spsca
-.section spscb
-.section spsda
-.section spsdb
-.section spsea
-.section spseb
-.section spsfa
-.section spsfb
-.section spsga
-.section spsgb
-.section spsha
-.section spshb
-.section spsia
-.section spsib
-.section spsja
-.section spsjb
-.section spska
-.section spskb
-.section spsla
-.section spslb
-.section spsma
-.section spsmb
-.section spsna
-.section spsnb
-.section spsoa
-.section spsob
-.section spspa
-.section spspb
-.section spsqa
-.section spsqb
-.section spsra
-.section spsrb
-.section spssa
-.section spssb
-.section spsta
-.section spstb
-.section spsua
-.section spsub
-.section spsva
-.section spsvb
-.section spswa
-.section spswb
-.section spsxa
-.section spsxb
-.section spsya
-.section spsyb
-.section spsza
-.section spszb
-.section sps1a
-.section sps1b
-.section sps2a
-.section sps2b
-.section sps3a
-.section sps3b
-.section sps4a
-.section sps4b
-.section sps5a
-.section sps5b
-.section sps6a
-.section sps6b
-.section sps7a
-.section sps7b
-.section sps8a
-.section sps8b
-.section sps9a
-.section sps9b
-.section sps0a
-.section sps0b
-.section sptaa
-.section sptab
-.section sptba
-.section sptbb
-.section sptca
-.section sptcb
-.section sptda
-.section sptdb
-.section sptea
-.section spteb
-.section sptfa
-.section sptfb
-.section sptga
-.section sptgb
-.section sptha
-.section spthb
-.section sptia
-.section sptib
-.section sptja
-.section sptjb
-.section sptka
-.section sptkb
-.section sptla
-.section sptlb
-.section sptma
-.section sptmb
-.section sptna
-.section sptnb
-.section sptoa
-.section sptob
-.section sptpa
-.section sptpb
-.section sptqa
-.section sptqb
-.section sptra
-.section sptrb
-.section sptsa
-.section sptsb
-.section sptta
-.section spttb
-.section sptua
-.section sptub
-.section sptva
-.section sptvb
-.section sptwa
-.section sptwb
-.section sptxa
-.section sptxb
-.section sptya
-.section sptyb
-.section sptza
-.section sptzb
-.section spt1a
-.section spt1b
-.section spt2a
-.section spt2b
-.section spt3a
-.section spt3b
-.section spt4a
-.section spt4b
-.section spt5a
-.section spt5b
-.section spt6a
-.section spt6b
-.section spt7a
-.section spt7b
-.section spt8a
-.section spt8b
-.section spt9a
-.section spt9b
-.section spt0a
-.section spt0b
-.section spuaa
-.section spuab
-.section spuba
-.section spubb
-.section spuca
-.section spucb
-.section spuda
-.section spudb
-.section spuea
-.section spueb
-.section spufa
-.section spufb
-.section spuga
-.section spugb
-.section spuha
-.section spuhb
-.section spuia
-.section spuib
-.section spuja
-.section spujb
-.section spuka
-.section spukb
-.section spula
-.section spulb
-.section spuma
-.section spumb
-.section spuna
-.section spunb
-.section spuoa
-.section spuob
-.section spupa
-.section spupb
-.section spuqa
-.section spuqb
-.section spura
-.section spurb
-.section spusa
-.section spusb
-.section sputa
-.section sputb
-.section spuua
-.section spuub
-.section spuva
-.section spuvb
-.section spuwa
-.section spuwb
-.section spuxa
-.section spuxb
-.section spuya
-.section spuyb
-.section spuza
-.section spuzb
-.section spu1a
-.section spu1b
-.section spu2a
-.section spu2b
-.section spu3a
-.section spu3b
-.section spu4a
-.section spu4b
-.section spu5a
-.section spu5b
-.section spu6a
-.section spu6b
-.section spu7a
-.section spu7b
-.section spu8a
-.section spu8b
-.section spu9a
-.section spu9b
-.section spu0a
-.section spu0b
-.section spvaa
-.section spvab
-.section spvba
-.section spvbb
-.section spvca
-.section spvcb
-.section spvda
-.section spvdb
-.section spvea
-.section spveb
-.section spvfa
-.section spvfb
-.section spvga
-.section spvgb
-.section spvha
-.section spvhb
-.section spvia
-.section spvib
-.section spvja
-.section spvjb
-.section spvka
-.section spvkb
-.section spvla
-.section spvlb
-.section spvma
-.section spvmb
-.section spvna
-.section spvnb
-.section spvoa
-.section spvob
-.section spvpa
-.section spvpb
-.section spvqa
-.section spvqb
-.section spvra
-.section spvrb
-.section spvsa
-.section spvsb
-.section spvta
-.section spvtb
-.section spvua
-.section spvub
-.section spvva
-.section spvvb
-.section spvwa
-.section spvwb
-.section spvxa
-.section spvxb
-.section spvya
-.section spvyb
-.section spvza
-.section spvzb
-.section spv1a
-.section spv1b
-.section spv2a
-.section spv2b
-.section spv3a
-.section spv3b
-.section spv4a
-.section spv4b
-.section spv5a
-.section spv5b
-.section spv6a
-.section spv6b
-.section spv7a
-.section spv7b
-.section spv8a
-.section spv8b
-.section spv9a
-.section spv9b
-.section spv0a
-.section spv0b
-.section spwaa
-.section spwab
-.section spwba
-.section spwbb
-.section spwca
-.section spwcb
-.section spwda
-.section spwdb
-.section spwea
-.section spweb
-.section spwfa
-.section spwfb
-.section spwga
-.section spwgb
-.section spwha
-.section spwhb
-.section spwia
-.section spwib
-.section spwja
-.section spwjb
-.section spwka
-.section spwkb
-.section spwla
-.section spwlb
-.section spwma
-.section spwmb
-.section spwna
-.section spwnb
-.section spwoa
-.section spwob
-.section spwpa
-.section spwpb
-.section spwqa
-.section spwqb
-.section spwra
-.section spwrb
-.section spwsa
-.section spwsb
-.section spwta
-.section spwtb
-.section spwua
-.section spwub
-.section spwva
-.section spwvb
-.section spwwa
-.section spwwb
-.section spwxa
-.section spwxb
-.section spwya
-.section spwyb
-.section spwza
-.section spwzb
-.section spw1a
-.section spw1b
-.section spw2a
-.section spw2b
-.section spw3a
-.section spw3b
-.section spw4a
-.section spw4b
-.section spw5a
-.section spw5b
-.section spw6a
-.section spw6b
-.section spw7a
-.section spw7b
-.section spw8a
-.section spw8b
-.section spw9a
-.section spw9b
-.section spw0a
-.section spw0b
-.section spxaa
-.section spxab
-.section spxba
-.section spxbb
-.section spxca
-.section spxcb
-.section spxda
-.section spxdb
-.section spxea
-.section spxeb
-.section spxfa
-.section spxfb
-.section spxga
-.section spxgb
-.section spxha
-.section spxhb
-.section spxia
-.section spxib
-.section spxja
-.section spxjb
-.section spxka
-.section spxkb
-.section spxla
-.section spxlb
-.section spxma
-.section spxmb
-.section spxna
-.section spxnb
-.section spxoa
-.section spxob
-.section spxpa
-.section spxpb
-.section spxqa
-.section spxqb
-.section spxra
-.section spxrb
-.section spxsa
-.section spxsb
-.section spxta
-.section spxtb
-.section spxua
-.section spxub
-.section spxva
-.section spxvb
-.section spxwa
-.section spxwb
-.section spxxa
-.section spxxb
-.section spxya
-.section spxyb
-.section spxza
-.section spxzb
-.section spx1a
-.section spx1b
-.section spx2a
-.section spx2b
-.section spx3a
-.section spx3b
-.section spx4a
-.section spx4b
-.section spx5a
-.section spx5b
-.section spx6a
-.section spx6b
-.section spx7a
-.section spx7b
-.section spx8a
-.section spx8b
-.section spx9a
-.section spx9b
-.section spx0a
-.section spx0b
-.section spyaa
-.section spyab
-.section spyba
-.section spybb
-.section spyca
-.section spycb
-.section spyda
-.section spydb
-.section spyea
-.section spyeb
-.section spyfa
-.section spyfb
-.section spyga
-.section spygb
-.section spyha
-.section spyhb
-.section spyia
-.section spyib
-.section spyja
-.section spyjb
-.section spyka
-.section spykb
-.section spyla
-.section spylb
-.section spyma
-.section spymb
-.section spyna
-.section spynb
-.section spyoa
-.section spyob
-.section spypa
-.section spypb
-.section spyqa
-.section spyqb
-.section spyra
-.section spyrb
-.section spysa
-.section spysb
-.section spyta
-.section spytb
-.section spyua
-.section spyub
-.section spyva
-.section spyvb
-.section spywa
-.section spywb
-.section spyxa
-.section spyxb
-.section spyya
-.section spyyb
-.section spyza
-.section spyzb
-.section spy1a
-.section spy1b
-.section spy2a
-.section spy2b
-.section spy3a
-.section spy3b
-.section spy4a
-.section spy4b
-.section spy5a
-.section spy5b
-.section spy6a
-.section spy6b
-.section spy7a
-.section spy7b
-.section spy8a
-.section spy8b
-.section spy9a
-.section spy9b
-.section spy0a
-.section spy0b
-.section spzaa
-.section spzab
-.section spzba
-.section spzbb
-.section spzca
-.section spzcb
-.section spzda
-.section spzdb
-.section spzea
-.section spzeb
-.section spzfa
-.section spzfb
-.section spzga
-.section spzgb
-.section spzha
-.section spzhb
-.section spzia
-.section spzib
-.section spzja
-.section spzjb
-.section spzka
-.section spzkb
-.section spzla
-.section spzlb
-.section spzma
-.section spzmb
-.section spzna
-.section spznb
-.section spzoa
-.section spzob
-.section spzpa
-.section spzpb
-.section spzqa
-.section spzqb
-.section spzra
-.section spzrb
-.section spzsa
-.section spzsb
-.section spzta
-.section spztb
-.section spzua
-.section spzub
-.section spzva
-.section spzvb
-.section spzwa
-.section spzwb
-.section spzxa
-.section spzxb
-.section spzya
-.section spzyb
-.section spzza
-.section spzzb
-.section spz1a
-.section spz1b
-.section spz2a
-.section spz2b
-.section spz3a
-.section spz3b
-.section spz4a
-.section spz4b
-.section spz5a
-.section spz5b
-.section spz6a
-.section spz6b
-.section spz7a
-.section spz7b
-.section spz8a
-.section spz8b
-.section spz9a
-.section spz9b
-.section spz0a
-.section spz0b
-.section sp1aa
-.section sp1ab
-.section sp1ba
-.section sp1bb
-.section sp1ca
-.section sp1cb
-.section sp1da
-.section sp1db
-.section sp1ea
-.section sp1eb
-.section sp1fa
-.section sp1fb
-.section sp1ga
-.section sp1gb
-.section sp1ha
-.section sp1hb
-.section sp1ia
-.section sp1ib
-.section sp1ja
-.section sp1jb
-.section sp1ka
-.section sp1kb
-.section sp1la
-.section sp1lb
-.section sp1ma
-.section sp1mb
-.section sp1na
-.section sp1nb
-.section sp1oa
-.section sp1ob
-.section sp1pa
-.section sp1pb
-.section sp1qa
-.section sp1qb
-.section sp1ra
-.section sp1rb
-.section sp1sa
-.section sp1sb
-.section sp1ta
-.section sp1tb
-.section sp1ua
-.section sp1ub
-.section sp1va
-.section sp1vb
-.section sp1wa
-.section sp1wb
-.section sp1xa
-.section sp1xb
-.section sp1ya
-.section sp1yb
-.section sp1za
-.section sp1zb
-.section sp11a
-.section sp11b
-.section sp12a
-.section sp12b
-.section sp13a
-.section sp13b
-.section sp14a
-.section sp14b
-.section sp15a
-.section sp15b
-.section sp16a
-.section sp16b
-.section sp17a
-.section sp17b
-.section sp18a
-.section sp18b
-.section sp19a
-.section sp19b
-.section sp10a
-.section sp10b
-.section sp2aa
-.section sp2ab
-.section sp2ba
-.section sp2bb
-.section sp2ca
-.section sp2cb
-.section sp2da
-.section sp2db
-.section sp2ea
-.section sp2eb
-.section sp2fa
-.section sp2fb
-.section sp2ga
-.section sp2gb
-.section sp2ha
-.section sp2hb
-.section sp2ia
-.section sp2ib
-.section sp2ja
-.section sp2jb
-.section sp2ka
-.section sp2kb
-.section sp2la
-.section sp2lb
-.section sp2ma
-.section sp2mb
-.section sp2na
-.section sp2nb
-.section sp2oa
-.section sp2ob
-.section sp2pa
-.section sp2pb
-.section sp2qa
-.section sp2qb
-.section sp2ra
-.section sp2rb
-.section sp2sa
-.section sp2sb
-.section sp2ta
-.section sp2tb
-.section sp2ua
-.section sp2ub
-.section sp2va
-.section sp2vb
-.section sp2wa
-.section sp2wb
-.section sp2xa
-.section sp2xb
-.section sp2ya
-.section sp2yb
-.section sp2za
-.section sp2zb
-.section sp21a
-.section sp21b
-.section sp22a
-.section sp22b
-.section sp23a
-.section sp23b
-.section sp24a
-.section sp24b
-.section sp25a
-.section sp25b
-.section sp26a
-.section sp26b
-.section sp27a
-.section sp27b
-.section sp28a
-.section sp28b
-.section sp29a
-.section sp29b
-.section sp20a
-.section sp20b
-.section sp3aa
-.section sp3ab
-.section sp3ba
-.section sp3bb
-.section sp3ca
-.section sp3cb
-.section sp3da
-.section sp3db
-.section sp3ea
-.section sp3eb
-.section sp3fa
-.section sp3fb
-.section sp3ga
-.section sp3gb
-.section sp3ha
-.section sp3hb
-.section sp3ia
-.section sp3ib
-.section sp3ja
-.section sp3jb
-.section sp3ka
-.section sp3kb
-.section sp3la
-.section sp3lb
-.section sp3ma
-.section sp3mb
-.section sp3na
-.section sp3nb
-.section sp3oa
-.section sp3ob
-.section sp3pa
-.section sp3pb
-.section sp3qa
-.section sp3qb
-.section sp3ra
-.section sp3rb
-.section sp3sa
-.section sp3sb
-.section sp3ta
-.section sp3tb
-.section sp3ua
-.section sp3ub
-.section sp3va
-.section sp3vb
-.section sp3wa
-.section sp3wb
-.section sp3xa
-.section sp3xb
-.section sp3ya
-.section sp3yb
-.section sp3za
-.section sp3zb
-.section sp31a
-.section sp31b
-.section sp32a
-.section sp32b
-.section sp33a
-.section sp33b
-.section sp34a
-.section sp34b
-.section sp35a
-.section sp35b
-.section sp36a
-.section sp36b
-.section sp37a
-.section sp37b
-.section sp38a
-.section sp38b
-.section sp39a
-.section sp39b
-.section sp30a
-.section sp30b
-.section sp4aa
-.section sp4ab
-.section sp4ba
-.section sp4bb
-.section sp4ca
-.section sp4cb
-.section sp4da
-.section sp4db
-.section sp4ea
-.section sp4eb
-.section sp4fa
-.section sp4fb
-.section sp4ga
-.section sp4gb
-.section sp4ha
-.section sp4hb
-.section sp4ia
-.section sp4ib
-.section sp4ja
-.section sp4jb
-.section sp4ka
-.section sp4kb
-.section sp4la
-.section sp4lb
-.section sp4ma
-.section sp4mb
-.section sp4na
-.section sp4nb
-.section sp4oa
-.section sp4ob
-.section sp4pa
-.section sp4pb
-.section sp4qa
-.section sp4qb
-.section sp4ra
-.section sp4rb
-.section sp4sa
-.section sp4sb
-.section sp4ta
-.section sp4tb
-.section sp4ua
-.section sp4ub
-.section sp4va
-.section sp4vb
-.section sp4wa
-.section sp4wb
-.section sp4xa
-.section sp4xb
-.section sp4ya
-.section sp4yb
-.section sp4za
-.section sp4zb
-.section sp41a
-.section sp41b
-.section sp42a
-.section sp42b
-.section sp43a
-.section sp43b
-.section sp44a
-.section sp44b
-.section sp45a
-.section sp45b
-.section sp46a
-.section sp46b
-.section sp47a
-.section sp47b
-.section sp48a
-.section sp48b
-.section sp49a
-.section sp49b
-.section sp40a
-.section sp40b
-.section sp5aa
-.section sp5ab
-.section sp5ba
-.section sp5bb
-.section sp5ca
-.section sp5cb
-.section sp5da
-.section sp5db
-.section sp5ea
-.section sp5eb
-.section sp5fa
-.section sp5fb
-.section sp5ga
-.section sp5gb
-.section sp5ha
-.section sp5hb
-.section sp5ia
-.section sp5ib
-.section sp5ja
-.section sp5jb
-.section sp5ka
-.section sp5kb
-.section sp5la
-.section sp5lb
-.section sp5ma
-.section sp5mb
-.section sp5na
-.section sp5nb
-.section sp5oa
-.section sp5ob
-.section sp5pa
-.section sp5pb
-.section sp5qa
-.section sp5qb
-.section sp5ra
-.section sp5rb
-.section sp5sa
-.section sp5sb
-.section sp5ta
-.section sp5tb
-.section sp5ua
-.section sp5ub
-.section sp5va
-.section sp5vb
-.section sp5wa
-.section sp5wb
-.section sp5xa
-.section sp5xb
-.section sp5ya
-.section sp5yb
-.section sp5za
-.section sp5zb
-.section sp51a
-.section sp51b
-.section sp52a
-.section sp52b
-.section sp53a
-.section sp53b
-.section sp54a
-.section sp54b
-.section sp55a
-.section sp55b
-.section sp56a
-.section sp56b
-.section sp57a
-.section sp57b
-.section sp58a
-.section sp58b
-.section sp59a
-.section sp59b
-.section sp50a
-.section sp50b
-.section sp6aa
-.section sp6ab
-.section sp6ba
-.section sp6bb
-.section sp6ca
-.section sp6cb
-.section sp6da
-.section sp6db
-.section sp6ea
-.section sp6eb
-.section sp6fa
-.section sp6fb
-.section sp6ga
-.section sp6gb
-.section sp6ha
-.section sp6hb
-.section sp6ia
-.section sp6ib
-.section sp6ja
-.section sp6jb
-.section sp6ka
-.section sp6kb
-.section sp6la
-.section sp6lb
-.section sp6ma
-.section sp6mb
-.section sp6na
-.section sp6nb
-.section sp6oa
-.section sp6ob
-.section sp6pa
-.section sp6pb
-.section sp6qa
-.section sp6qb
-.section sp6ra
-.section sp6rb
-.section sp6sa
-.section sp6sb
-.section sp6ta
-.section sp6tb
-.section sp6ua
-.section sp6ub
-.section sp6va
-.section sp6vb
-.section sp6wa
-.section sp6wb
-.section sp6xa
-.section sp6xb
-.section sp6ya
-.section sp6yb
-.section sp6za
-.section sp6zb
-.section sp61a
-.section sp61b
-.section sp62a
-.section sp62b
-.section sp63a
-.section sp63b
-.section sp64a
-.section sp64b
-.section sp65a
-.section sp65b
-.section sp66a
-.section sp66b
-.section sp67a
-.section sp67b
-.section sp68a
-.section sp68b
-.section sp69a
-.section sp69b
-.section sp60a
-.section sp60b
-.section sp7aa
-.section sp7ab
-.section sp7ba
-.section sp7bb
-.section sp7ca
-.section sp7cb
-.section sp7da
-.section sp7db
-.section sp7ea
-.section sp7eb
-.section sp7fa
-.section sp7fb
-.section sp7ga
-.section sp7gb
-.section sp7ha
-.section sp7hb
-.section sp7ia
-.section sp7ib
-.section sp7ja
-.section sp7jb
-.section sp7ka
-.section sp7kb
-.section sp7la
-.section sp7lb
-.section sp7ma
-.section sp7mb
-.section sp7na
-.section sp7nb
-.section sp7oa
-.section sp7ob
-.section sp7pa
-.section sp7pb
-.section sp7qa
-.section sp7qb
-.section sp7ra
-.section sp7rb
-.section sp7sa
-.section sp7sb
-.section sp7ta
-.section sp7tb
-.section sp7ua
-.section sp7ub
-.section sp7va
-.section sp7vb
-.section sp7wa
-.section sp7wb
-.section sp7xa
-.section sp7xb
-.section sp7ya
-.section sp7yb
-.section sp7za
-.section sp7zb
-.section sp71a
-.section sp71b
-.section sp72a
-.section sp72b
-.section sp73a
-.section sp73b
-.section sp74a
-.section sp74b
-.section sp75a
-.section sp75b
-.section sp76a
-.section sp76b
-.section sp77a
-.section sp77b
-.section sp78a
-.section sp78b
-.section sp79a
-.section sp79b
-.section sp70a
-.section sp70b
-.section sp8aa
-.section sp8ab
-.section sp8ba
-.section sp8bb
-.section sp8ca
-.section sp8cb
-.section sp8da
-.section sp8db
-.section sp8ea
-.section sp8eb
-.section sp8fa
-.section sp8fb
-.section sp8ga
-.section sp8gb
-.section sp8ha
-.section sp8hb
-.section sp8ia
-.section sp8ib
-.section sp8ja
-.section sp8jb
-.section sp8ka
-.section sp8kb
-.section sp8la
-.section sp8lb
-.section sp8ma
-.section sp8mb
-.section sp8na
-.section sp8nb
-.section sp8oa
-.section sp8ob
-.section sp8pa
-.section sp8pb
-.section sp8qa
-.section sp8qb
-.section sp8ra
-.section sp8rb
-.section sp8sa
-.section sp8sb
-.section sp8ta
-.section sp8tb
-.section sp8ua
-.section sp8ub
-.section sp8va
-.section sp8vb
-.section sp8wa
-.section sp8wb
-.section sp8xa
-.section sp8xb
-.section sp8ya
-.section sp8yb
-.section sp8za
-.section sp8zb
-.section sp81a
-.section sp81b
-.section sp82a
-.section sp82b
-.section sp83a
-.section sp83b
-.section sp84a
-.section sp84b
-.section sp85a
-.section sp85b
-.section sp86a
-.section sp86b
-.section sp87a
-.section sp87b
-.section sp88a
-.section sp88b
-.section sp89a
-.section sp89b
-.section sp80a
-.section sp80b
-.section sp9aa
-.section sp9ab
-.section sp9ba
-.section sp9bb
-.section sp9ca
-.section sp9cb
-.section sp9da
-.section sp9db
-.section sp9ea
-.section sp9eb
-.section sp9fa
-.section sp9fb
-.section sp9ga
-.section sp9gb
-.section sp9ha
-.section sp9hb
-.section sp9ia
-.section sp9ib
-.section sp9ja
-.section sp9jb
-.section sp9ka
-.section sp9kb
-.section sp9la
-.section sp9lb
-.section sp9ma
-.section sp9mb
-.section sp9na
-.section sp9nb
-.section sp9oa
-.section sp9ob
-.section sp9pa
-.section sp9pb
-.section sp9qa
-.section sp9qb
-.section sp9ra
-.section sp9rb
-.section sp9sa
-.section sp9sb
-.section sp9ta
-.section sp9tb
-.section sp9ua
-.section sp9ub
-.section sp9va
-.section sp9vb
-.section sp9wa
-.section sp9wb
-.section sp9xa
-.section sp9xb
-.section sp9ya
-.section sp9yb
-.section sp9za
-.section sp9zb
-.section sp91a
-.section sp91b
-.section sp92a
-.section sp92b
-.section sp93a
-.section sp93b
-.section sp94a
-.section sp94b
-.section sp95a
-.section sp95b
-.section sp96a
-.section sp96b
-.section sp97a
-.section sp97b
-.section sp98a
-.section sp98b
-.section sp99a
-.section sp99b
-.section sp90a
-.section sp90b
-.section sp0aa
-.section sp0ab
-.section sp0ba
-.section sp0bb
-.section sp0ca
-.section sp0cb
-.section sp0da
-.section sp0db
-.section sp0ea
-.section sp0eb
-.section sp0fa
-.section sp0fb
-.section sp0ga
-.section sp0gb
-.section sp0ha
-.section sp0hb
-.section sp0ia
-.section sp0ib
-.section sp0ja
-.section sp0jb
-.section sp0ka
-.section sp0kb
-.section sp0la
-.section sp0lb
-.section sp0ma
-.section sp0mb
-.section sp0na
-.section sp0nb
-.section sp0oa
-.section sp0ob
-.section sp0pa
-.section sp0pb
-.section sp0qa
-.section sp0qb
-.section sp0ra
-.section sp0rb
-.section sp0sa
-.section sp0sb
-.section sp0ta
-.section sp0tb
-.section sp0ua
-.section sp0ub
-.section sp0va
-.section sp0vb
-.section sp0wa
-.section sp0wb
-.section sp0xa
-.section sp0xb
-.section sp0ya
-.section sp0yb
-.section sp0za
-.section sp0zb
-.section sp01a
-.section sp01b
-.section sp02a
-.section sp02b
-.section sp03a
-.section sp03b
-.section sp04a
-.section sp04b
-.section sp05a
-.section sp05b
-.section sp06a
-.section sp06b
-.section sp07a
-.section sp07b
-.section sp08a
-.section sp08b
-.section sp09a
-.section sp09b
-.section sp00a
-.section sp00b
-.section sqaaa
-.section sqaab
-.section sqaba
-.section sqabb
-.section sqaca
-.section sqacb
-.section sqada
-.section sqadb
-.section sqaea
-.section sqaeb
-.section sqafa
-.section sqafb
-.section sqaga
-.section sqagb
-.section sqaha
-.section sqahb
-.section sqaia
-.section sqaib
-.section sqaja
-.section sqajb
-.section sqaka
-.section sqakb
-.section sqala
-.section sqalb
-.section sqama
-.section sqamb
-.section sqana
-.section sqanb
-.section sqaoa
-.section sqaob
-.section sqapa
-.section sqapb
-.section sqaqa
-.section sqaqb
-.section sqara
-.section sqarb
-.section sqasa
-.section sqasb
-.section sqata
-.section sqatb
-.section sqaua
-.section sqaub
-.section sqava
-.section sqavb
-.section sqawa
-.section sqawb
-.section sqaxa
-.section sqaxb
-.section sqaya
-.section sqayb
-.section sqaza
-.section sqazb
-.section sqa1a
-.section sqa1b
-.section sqa2a
-.section sqa2b
-.section sqa3a
-.section sqa3b
-.section sqa4a
-.section sqa4b
-.section sqa5a
-.section sqa5b
-.section sqa6a
-.section sqa6b
-.section sqa7a
-.section sqa7b
-.section sqa8a
-.section sqa8b
-.section sqa9a
-.section sqa9b
-.section sqa0a
-.section sqa0b
-.section sqbaa
-.section sqbab
-.section sqbba
-.section sqbbb
-.section sqbca
-.section sqbcb
-.section sqbda
-.section sqbdb
-.section sqbea
-.section sqbeb
-.section sqbfa
-.section sqbfb
-.section sqbga
-.section sqbgb
-.section sqbha
-.section sqbhb
-.section sqbia
-.section sqbib
-.section sqbja
-.section sqbjb
-.section sqbka
-.section sqbkb
-.section sqbla
-.section sqblb
-.section sqbma
-.section sqbmb
-.section sqbna
-.section sqbnb
-.section sqboa
-.section sqbob
-.section sqbpa
-.section sqbpb
-.section sqbqa
-.section sqbqb
-.section sqbra
-.section sqbrb
-.section sqbsa
-.section sqbsb
-.section sqbta
-.section sqbtb
-.section sqbua
-.section sqbub
-.section sqbva
-.section sqbvb
-.section sqbwa
-.section sqbwb
-.section sqbxa
-.section sqbxb
-.section sqbya
-.section sqbyb
-.section sqbza
-.section sqbzb
-.section sqb1a
-.section sqb1b
-.section sqb2a
-.section sqb2b
-.section sqb3a
-.section sqb3b
-.section sqb4a
-.section sqb4b
-.section sqb5a
-.section sqb5b
-.section sqb6a
-.section sqb6b
-.section sqb7a
-.section sqb7b
-.section sqb8a
-.section sqb8b
-.section sqb9a
-.section sqb9b
-.section sqb0a
-.section sqb0b
-.section sqcaa
-.section sqcab
-.section sqcba
-.section sqcbb
-.section sqcca
-.section sqccb
-.section sqcda
-.section sqcdb
-.section sqcea
-.section sqceb
-.section sqcfa
-.section sqcfb
-.section sqcga
-.section sqcgb
-.section sqcha
-.section sqchb
-.section sqcia
-.section sqcib
-.section sqcja
-.section sqcjb
-.section sqcka
-.section sqckb
-.section sqcla
-.section sqclb
-.section sqcma
-.section sqcmb
-.section sqcna
-.section sqcnb
-.section sqcoa
-.section sqcob
-.section sqcpa
-.section sqcpb
-.section sqcqa
-.section sqcqb
-.section sqcra
-.section sqcrb
-.section sqcsa
-.section sqcsb
-.section sqcta
-.section sqctb
-.section sqcua
-.section sqcub
-.section sqcva
-.section sqcvb
-.section sqcwa
-.section sqcwb
-.section sqcxa
-.section sqcxb
-.section sqcya
-.section sqcyb
-.section sqcza
-.section sqczb
-.section sqc1a
-.section sqc1b
-.section sqc2a
-.section sqc2b
-.section sqc3a
-.section sqc3b
-.section sqc4a
-.section sqc4b
-.section sqc5a
-.section sqc5b
-.section sqc6a
-.section sqc6b
-.section sqc7a
-.section sqc7b
-.section sqc8a
-.section sqc8b
-.section sqc9a
-.section sqc9b
-.section sqc0a
-.section sqc0b
-.section sqdaa
-.section sqdab
-.section sqdba
-.section sqdbb
-.section sqdca
-.section sqdcb
-.section sqdda
-.section sqddb
-.section sqdea
-.section sqdeb
-.section sqdfa
-.section sqdfb
-.section sqdga
-.section sqdgb
-.section sqdha
-.section sqdhb
-.section sqdia
-.section sqdib
-.section sqdja
-.section sqdjb
-.section sqdka
-.section sqdkb
-.section sqdla
-.section sqdlb
-.section sqdma
-.section sqdmb
-.section sqdna
-.section sqdnb
-.section sqdoa
-.section sqdob
-.section sqdpa
-.section sqdpb
-.section sqdqa
-.section sqdqb
-.section sqdra
-.section sqdrb
-.section sqdsa
-.section sqdsb
-.section sqdta
-.section sqdtb
-.section sqdua
-.section sqdub
-.section sqdva
-.section sqdvb
-.section sqdwa
-.section sqdwb
-.section sqdxa
-.section sqdxb
-.section sqdya
-.section sqdyb
-.section sqdza
-.section sqdzb
-.section sqd1a
-.section sqd1b
-.section sqd2a
-.section sqd2b
-.section sqd3a
-.section sqd3b
-.section sqd4a
-.section sqd4b
-.section sqd5a
-.section sqd5b
-.section sqd6a
-.section sqd6b
-.section sqd7a
-.section sqd7b
-.section sqd8a
-.section sqd8b
-.section sqd9a
-.section sqd9b
-.section sqd0a
-.section sqd0b
-.section sqeaa
-.section sqeab
-.section sqeba
-.section sqebb
-.section sqeca
-.section sqecb
-.section sqeda
-.section sqedb
-.section sqeea
-.section sqeeb
-.section sqefa
-.section sqefb
-.section sqega
-.section sqegb
-.section sqeha
-.section sqehb
-.section sqeia
-.section sqeib
-.section sqeja
-.section sqejb
-.section sqeka
-.section sqekb
-.section sqela
-.section sqelb
-.section sqema
-.section sqemb
-.section sqena
-.section sqenb
-.section sqeoa
-.section sqeob
-.section sqepa
-.section sqepb
-.section sqeqa
-.section sqeqb
-.section sqera
-.section sqerb
-.section sqesa
-.section sqesb
-.section sqeta
-.section sqetb
-.section sqeua
-.section sqeub
-.section sqeva
-.section sqevb
-.section sqewa
-.section sqewb
-.section sqexa
-.section sqexb
-.section sqeya
-.section sqeyb
-.section sqeza
-.section sqezb
-.section sqe1a
-.section sqe1b
-.section sqe2a
-.section sqe2b
-.section sqe3a
-.section sqe3b
-.section sqe4a
-.section sqe4b
-.section sqe5a
-.section sqe5b
-.section sqe6a
-.section sqe6b
-.section sqe7a
-.section sqe7b
-.section sqe8a
-.section sqe8b
-.section sqe9a
-.section sqe9b
-.section sqe0a
-.section sqe0b
-.section sqfaa
-.section sqfab
-.section sqfba
-.section sqfbb
-.section sqfca
-.section sqfcb
-.section sqfda
-.section sqfdb
-.section sqfea
-.section sqfeb
-.section sqffa
-.section sqffb
-.section sqfga
-.section sqfgb
-.section sqfha
-.section sqfhb
-.section sqfia
-.section sqfib
-.section sqfja
-.section sqfjb
-.section sqfka
-.section sqfkb
-.section sqfla
-.section sqflb
-.section sqfma
-.section sqfmb
-.section sqfna
-.section sqfnb
-.section sqfoa
-.section sqfob
-.section sqfpa
-.section sqfpb
-.section sqfqa
-.section sqfqb
-.section sqfra
-.section sqfrb
-.section sqfsa
-.section sqfsb
-.section sqfta
-.section sqftb
-.section sqfua
-.section sqfub
-.section sqfva
-.section sqfvb
-.section sqfwa
-.section sqfwb
-.section sqfxa
-.section sqfxb
-.section sqfya
-.section sqfyb
-.section sqfza
-.section sqfzb
-.section sqf1a
-.section sqf1b
-.section sqf2a
-.section sqf2b
-.section sqf3a
-.section sqf3b
-.section sqf4a
-.section sqf4b
-.section sqf5a
-.section sqf5b
-.section sqf6a
-.section sqf6b
-.section sqf7a
-.section sqf7b
-.section sqf8a
-.section sqf8b
-.section sqf9a
-.section sqf9b
-.section sqf0a
-.section sqf0b
-.section sqgaa
-.section sqgab
-.section sqgba
-.section sqgbb
-.section sqgca
-.section sqgcb
-.section sqgda
-.section sqgdb
-.section sqgea
-.section sqgeb
-.section sqgfa
-.section sqgfb
-.section sqgga
-.section sqggb
-.section sqgha
-.section sqghb
-.section sqgia
-.section sqgib
-.section sqgja
-.section sqgjb
-.section sqgka
-.section sqgkb
-.section sqgla
-.section sqglb
-.section sqgma
-.section sqgmb
-.section sqgna
-.section sqgnb
-.section sqgoa
-.section sqgob
-.section sqgpa
-.section sqgpb
-.section sqgqa
-.section sqgqb
-.section sqgra
-.section sqgrb
-.section sqgsa
-.section sqgsb
-.section sqgta
-.section sqgtb
-.section sqgua
-.section sqgub
-.section sqgva
-.section sqgvb
-.section sqgwa
-.section sqgwb
-.section sqgxa
-.section sqgxb
-.section sqgya
-.section sqgyb
-.section sqgza
-.section sqgzb
-.section sqg1a
-.section sqg1b
-.section sqg2a
-.section sqg2b
-.section sqg3a
-.section sqg3b
-.section sqg4a
-.section sqg4b
-.section sqg5a
-.section sqg5b
-.section sqg6a
-.section sqg6b
-.section sqg7a
-.section sqg7b
-.section sqg8a
-.section sqg8b
-.section sqg9a
-.section sqg9b
-.section sqg0a
-.section sqg0b
-.section sqhaa
-.section sqhab
-.section sqhba
-.section sqhbb
-.section sqhca
-.section sqhcb
-.section sqhda
-.section sqhdb
-.section sqhea
-.section sqheb
-.section sqhfa
-.section sqhfb
-.section sqhga
-.section sqhgb
-.section sqhha
-.section sqhhb
-.section sqhia
-.section sqhib
-.section sqhja
-.section sqhjb
-.section sqhka
-.section sqhkb
-.section sqhla
-.section sqhlb
-.section sqhma
-.section sqhmb
-.section sqhna
-.section sqhnb
-.section sqhoa
-.section sqhob
-.section sqhpa
-.section sqhpb
-.section sqhqa
-.section sqhqb
-.section sqhra
-.section sqhrb
-.section sqhsa
-.section sqhsb
-.section sqhta
-.section sqhtb
-.section sqhua
-.section sqhub
-.section sqhva
-.section sqhvb
-.section sqhwa
-.section sqhwb
-.section sqhxa
-.section sqhxb
-.section sqhya
-.section sqhyb
-.section sqhza
-.section sqhzb
-.section sqh1a
-.section sqh1b
-.section sqh2a
-.section sqh2b
-.section sqh3a
-.section sqh3b
-.section sqh4a
-.section sqh4b
-.section sqh5a
-.section sqh5b
-.section sqh6a
-.section sqh6b
-.section sqh7a
-.section sqh7b
-.section sqh8a
-.section sqh8b
-.section sqh9a
-.section sqh9b
-.section sqh0a
-.section sqh0b
-.section sqiaa
-.section sqiab
-.section sqiba
-.section sqibb
-.section sqica
-.section sqicb
-.section sqida
-.section sqidb
-.section sqiea
-.section sqieb
-.section sqifa
-.section sqifb
-.section sqiga
-.section sqigb
-.section sqiha
-.section sqihb
-.section sqiia
-.section sqiib
-.section sqija
-.section sqijb
-.section sqika
-.section sqikb
-.section sqila
-.section sqilb
-.section sqima
-.section sqimb
-.section sqina
-.section sqinb
-.section sqioa
-.section sqiob
-.section sqipa
-.section sqipb
-.section sqiqa
-.section sqiqb
-.section sqira
-.section sqirb
-.section sqisa
-.section sqisb
-.section sqita
-.section sqitb
-.section sqiua
-.section sqiub
-.section sqiva
-.section sqivb
-.section sqiwa
-.section sqiwb
-.section sqixa
-.section sqixb
-.section sqiya
-.section sqiyb
-.section sqiza
-.section sqizb
-.section sqi1a
-.section sqi1b
-.section sqi2a
-.section sqi2b
-.section sqi3a
-.section sqi3b
-.section sqi4a
-.section sqi4b
-.section sqi5a
-.section sqi5b
-.section sqi6a
-.section sqi6b
-.section sqi7a
-.section sqi7b
-.section sqi8a
-.section sqi8b
-.section sqi9a
-.section sqi9b
-.section sqi0a
-.section sqi0b
-.section sqjaa
-.section sqjab
-.section sqjba
-.section sqjbb
-.section sqjca
-.section sqjcb
-.section sqjda
-.section sqjdb
-.section sqjea
-.section sqjeb
-.section sqjfa
-.section sqjfb
-.section sqjga
-.section sqjgb
-.section sqjha
-.section sqjhb
-.section sqjia
-.section sqjib
-.section sqjja
-.section sqjjb
-.section sqjka
-.section sqjkb
-.section sqjla
-.section sqjlb
-.section sqjma
-.section sqjmb
-.section sqjna
-.section sqjnb
-.section sqjoa
-.section sqjob
-.section sqjpa
-.section sqjpb
-.section sqjqa
-.section sqjqb
-.section sqjra
-.section sqjrb
-.section sqjsa
-.section sqjsb
-.section sqjta
-.section sqjtb
-.section sqjua
-.section sqjub
-.section sqjva
-.section sqjvb
-.section sqjwa
-.section sqjwb
-.section sqjxa
-.section sqjxb
-.section sqjya
-.section sqjyb
-.section sqjza
-.section sqjzb
-.section sqj1a
-.section sqj1b
-.section sqj2a
-.section sqj2b
-.section sqj3a
-.section sqj3b
-.section sqj4a
-.section sqj4b
-.section sqj5a
-.section sqj5b
-.section sqj6a
-.section sqj6b
-.section sqj7a
-.section sqj7b
-.section sqj8a
-.section sqj8b
-.section sqj9a
-.section sqj9b
-.section sqj0a
-.section sqj0b
-.section sqkaa
-.section sqkab
-.section sqkba
-.section sqkbb
-.section sqkca
-.section sqkcb
-.section sqkda
-.section sqkdb
-.section sqkea
-.section sqkeb
-.section sqkfa
-.section sqkfb
-.section sqkga
-.section sqkgb
-.section sqkha
-.section sqkhb
-.section sqkia
-.section sqkib
-.section sqkja
-.section sqkjb
-.section sqkka
-.section sqkkb
-.section sqkla
-.section sqklb
-.section sqkma
-.section sqkmb
-.section sqkna
-.section sqknb
-.section sqkoa
-.section sqkob
-.section sqkpa
-.section sqkpb
-.section sqkqa
-.section sqkqb
-.section sqkra
-.section sqkrb
-.section sqksa
-.section sqksb
-.section sqkta
-.section sqktb
-.section sqkua
-.section sqkub
-.section sqkva
-.section sqkvb
-.section sqkwa
-.section sqkwb
-.section sqkxa
-.section sqkxb
-.section sqkya
-.section sqkyb
-.section sqkza
-.section sqkzb
-.section sqk1a
-.section sqk1b
-.section sqk2a
-.section sqk2b
-.section sqk3a
-.section sqk3b
-.section sqk4a
-.section sqk4b
-.section sqk5a
-.section sqk5b
-.section sqk6a
-.section sqk6b
-.section sqk7a
-.section sqk7b
-.section sqk8a
-.section sqk8b
-.section sqk9a
-.section sqk9b
-.section sqk0a
-.section sqk0b
-.section sqlaa
-.section sqlab
-.section sqlba
-.section sqlbb
-.section sqlca
-.section sqlcb
-.section sqlda
-.section sqldb
-.section sqlea
-.section sqleb
-.section sqlfa
-.section sqlfb
-.section sqlga
-.section sqlgb
-.section sqlha
-.section sqlhb
-.section sqlia
-.section sqlib
-.section sqlja
-.section sqljb
-.section sqlka
-.section sqlkb
-.section sqlla
-.section sqllb
-.section sqlma
-.section sqlmb
-.section sqlna
-.section sqlnb
-.section sqloa
-.section sqlob
-.section sqlpa
-.section sqlpb
-.section sqlqa
-.section sqlqb
-.section sqlra
-.section sqlrb
-.section sqlsa
-.section sqlsb
-.section sqlta
-.section sqltb
-.section sqlua
-.section sqlub
-.section sqlva
-.section sqlvb
-.section sqlwa
-.section sqlwb
-.section sqlxa
-.section sqlxb
-.section sqlya
-.section sqlyb
-.section sqlza
-.section sqlzb
-.section sql1a
-.section sql1b
-.section sql2a
-.section sql2b
-.section sql3a
-.section sql3b
-.section sql4a
-.section sql4b
-.section sql5a
-.section sql5b
-.section sql6a
-.section sql6b
-.section sql7a
-.section sql7b
-.section sql8a
-.section sql8b
-.section sql9a
-.section sql9b
-.section sql0a
-.section sql0b
-.section sqmaa
-.section sqmab
-.section sqmba
-.section sqmbb
-.section sqmca
-.section sqmcb
-.section sqmda
-.section sqmdb
-.section sqmea
-.section sqmeb
-.section sqmfa
-.section sqmfb
-.section sqmga
-.section sqmgb
-.section sqmha
-.section sqmhb
-.section sqmia
-.section sqmib
-.section sqmja
-.section sqmjb
-.section sqmka
-.section sqmkb
-.section sqmla
-.section sqmlb
-.section sqmma
-.section sqmmb
-.section sqmna
-.section sqmnb
-.section sqmoa
-.section sqmob
-.section sqmpa
-.section sqmpb
-.section sqmqa
-.section sqmqb
-.section sqmra
-.section sqmrb
-.section sqmsa
-.section sqmsb
-.section sqmta
-.section sqmtb
-.section sqmua
-.section sqmub
-.section sqmva
-.section sqmvb
-.section sqmwa
-.section sqmwb
-.section sqmxa
-.section sqmxb
-.section sqmya
-.section sqmyb
-.section sqmza
-.section sqmzb
-.section sqm1a
-.section sqm1b
-.section sqm2a
-.section sqm2b
-.section sqm3a
-.section sqm3b
-.section sqm4a
-.section sqm4b
-.section sqm5a
-.section sqm5b
-.section sqm6a
-.section sqm6b
-.section sqm7a
-.section sqm7b
-.section sqm8a
-.section sqm8b
-.section sqm9a
-.section sqm9b
-.section sqm0a
-.section sqm0b
-.section sqnaa
-.section sqnab
-.section sqnba
-.section sqnbb
-.section sqnca
-.section sqncb
-.section sqnda
-.section sqndb
-.section sqnea
-.section sqneb
-.section sqnfa
-.section sqnfb
-.section sqnga
-.section sqngb
-.section sqnha
-.section sqnhb
-.section sqnia
-.section sqnib
-.section sqnja
-.section sqnjb
-.section sqnka
-.section sqnkb
-.section sqnla
-.section sqnlb
-.section sqnma
-.section sqnmb
-.section sqnna
-.section sqnnb
-.section sqnoa
-.section sqnob
-.section sqnpa
-.section sqnpb
-.section sqnqa
-.section sqnqb
-.section sqnra
-.section sqnrb
-.section sqnsa
-.section sqnsb
-.section sqnta
-.section sqntb
-.section sqnua
-.section sqnub
-.section sqnva
-.section sqnvb
-.section sqnwa
-.section sqnwb
-.section sqnxa
-.section sqnxb
-.section sqnya
-.section sqnyb
-.section sqnza
-.section sqnzb
-.section sqn1a
-.section sqn1b
-.section sqn2a
-.section sqn2b
-.section sqn3a
-.section sqn3b
-.section sqn4a
-.section sqn4b
-.section sqn5a
-.section sqn5b
-.section sqn6a
-.section sqn6b
-.section sqn7a
-.section sqn7b
-.section sqn8a
-.section sqn8b
-.section sqn9a
-.section sqn9b
-.section sqn0a
-.section sqn0b
-.section sqoaa
-.section sqoab
-.section sqoba
-.section sqobb
-.section sqoca
-.section sqocb
-.section sqoda
-.section sqodb
-.section sqoea
-.section sqoeb
-.section sqofa
-.section sqofb
-.section sqoga
-.section sqogb
-.section sqoha
-.section sqohb
-.section sqoia
-.section sqoib
-.section sqoja
-.section sqojb
-.section sqoka
-.section sqokb
-.section sqola
-.section sqolb
-.section sqoma
-.section sqomb
-.section sqona
-.section sqonb
-.section sqooa
-.section sqoob
-.section sqopa
-.section sqopb
-.section sqoqa
-.section sqoqb
-.section sqora
-.section sqorb
-.section sqosa
-.section sqosb
-.section sqota
-.section sqotb
-.section sqoua
-.section sqoub
-.section sqova
-.section sqovb
-.section sqowa
-.section sqowb
-.section sqoxa
-.section sqoxb
-.section sqoya
-.section sqoyb
-.section sqoza
-.section sqozb
-.section sqo1a
-.section sqo1b
-.section sqo2a
-.section sqo2b
-.section sqo3a
-.section sqo3b
-.section sqo4a
-.section sqo4b
-.section sqo5a
-.section sqo5b
-.section sqo6a
-.section sqo6b
-.section sqo7a
-.section sqo7b
-.section sqo8a
-.section sqo8b
-.section sqo9a
-.section sqo9b
-.section sqo0a
-.section sqo0b
-.section sqpaa
-.section sqpab
-.section sqpba
-.section sqpbb
-.section sqpca
-.section sqpcb
-.section sqpda
-.section sqpdb
-.section sqpea
-.section sqpeb
-.section sqpfa
-.section sqpfb
-.section sqpga
-.section sqpgb
-.section sqpha
-.section sqphb
-.section sqpia
-.section sqpib
-.section sqpja
-.section sqpjb
-.section sqpka
-.section sqpkb
-.section sqpla
-.section sqplb
-.section sqpma
-.section sqpmb
-.section sqpna
-.section sqpnb
-.section sqpoa
-.section sqpob
-.section sqppa
-.section sqppb
-.section sqpqa
-.section sqpqb
-.section sqpra
-.section sqprb
-.section sqpsa
-.section sqpsb
-.section sqpta
-.section sqptb
-.section sqpua
-.section sqpub
-.section sqpva
-.section sqpvb
-.section sqpwa
-.section sqpwb
-.section sqpxa
-.section sqpxb
-.section sqpya
-.section sqpyb
-.section sqpza
-.section sqpzb
-.section sqp1a
-.section sqp1b
-.section sqp2a
-.section sqp2b
-.section sqp3a
-.section sqp3b
-.section sqp4a
-.section sqp4b
-.section sqp5a
-.section sqp5b
-.section sqp6a
-.section sqp6b
-.section sqp7a
-.section sqp7b
-.section sqp8a
-.section sqp8b
-.section sqp9a
-.section sqp9b
-.section sqp0a
-.section sqp0b
-.section sqqaa
-.section sqqab
-.section sqqba
-.section sqqbb
-.section sqqca
-.section sqqcb
-.section sqqda
-.section sqqdb
-.section sqqea
-.section sqqeb
-.section sqqfa
-.section sqqfb
-.section sqqga
-.section sqqgb
-.section sqqha
-.section sqqhb
-.section sqqia
-.section sqqib
-.section sqqja
-.section sqqjb
-.section sqqka
-.section sqqkb
-.section sqqla
-.section sqqlb
-.section sqqma
-.section sqqmb
-.section sqqna
-.section sqqnb
-.section sqqoa
-.section sqqob
-.section sqqpa
-.section sqqpb
-.section sqqqa
-.section sqqqb
-.section sqqra
-.section sqqrb
-.section sqqsa
-.section sqqsb
-.section sqqta
-.section sqqtb
-.section sqqua
-.section sqqub
-.section sqqva
-.section sqqvb
-.section sqqwa
-.section sqqwb
-.section sqqxa
-.section sqqxb
-.section sqqya
-.section sqqyb
-.section sqqza
-.section sqqzb
-.section sqq1a
-.section sqq1b
-.section sqq2a
-.section sqq2b
-.section sqq3a
-.section sqq3b
-.section sqq4a
-.section sqq4b
-.section sqq5a
-.section sqq5b
-.section sqq6a
-.section sqq6b
-.section sqq7a
-.section sqq7b
-.section sqq8a
-.section sqq8b
-.section sqq9a
-.section sqq9b
-.section sqq0a
-.section sqq0b
-.section sqraa
-.section sqrab
-.section sqrba
-.section sqrbb
-.section sqrca
-.section sqrcb
-.section sqrda
-.section sqrdb
-.section sqrea
-.section sqreb
-.section sqrfa
-.section sqrfb
-.section sqrga
-.section sqrgb
-.section sqrha
-.section sqrhb
-.section sqria
-.section sqrib
-.section sqrja
-.section sqrjb
-.section sqrka
-.section sqrkb
-.section sqrla
-.section sqrlb
-.section sqrma
-.section sqrmb
-.section sqrna
-.section sqrnb
-.section sqroa
-.section sqrob
-.section sqrpa
-.section sqrpb
-.section sqrqa
-.section sqrqb
-.section sqrra
-.section sqrrb
-.section sqrsa
-.section sqrsb
-.section sqrta
-.section sqrtb
-.section sqrua
-.section sqrub
-.section sqrva
-.section sqrvb
-.section sqrwa
-.section sqrwb
-.section sqrxa
-.section sqrxb
-.section sqrya
-.section sqryb
-.section sqrza
-.section sqrzb
-.section sqr1a
-.section sqr1b
-.section sqr2a
-.section sqr2b
-.section sqr3a
-.section sqr3b
-.section sqr4a
-.section sqr4b
-.section sqr5a
-.section sqr5b
-.section sqr6a
-.section sqr6b
-.section sqr7a
-.section sqr7b
-.section sqr8a
-.section sqr8b
-.section sqr9a
-.section sqr9b
-.section sqr0a
-.section sqr0b
-.section sqsaa
-.section sqsab
-.section sqsba
-.section sqsbb
-.section sqsca
-.section sqscb
-.section sqsda
-.section sqsdb
-.section sqsea
-.section sqseb
-.section sqsfa
-.section sqsfb
-.section sqsga
-.section sqsgb
-.section sqsha
-.section sqshb
-.section sqsia
-.section sqsib
-.section sqsja
-.section sqsjb
-.section sqska
-.section sqskb
-.section sqsla
-.section sqslb
-.section sqsma
-.section sqsmb
-.section sqsna
-.section sqsnb
-.section sqsoa
-.section sqsob
-.section sqspa
-.section sqspb
-.section sqsqa
-.section sqsqb
-.section sqsra
-.section sqsrb
-.section sqssa
-.section sqssb
-.section sqsta
-.section sqstb
-.section sqsua
-.section sqsub
-.section sqsva
-.section sqsvb
-.section sqswa
-.section sqswb
-.section sqsxa
-.section sqsxb
-.section sqsya
-.section sqsyb
-.section sqsza
-.section sqszb
-.section sqs1a
-.section sqs1b
-.section sqs2a
-.section sqs2b
-.section sqs3a
-.section sqs3b
-.section sqs4a
-.section sqs4b
-.section sqs5a
-.section sqs5b
-.section sqs6a
-.section sqs6b
-.section sqs7a
-.section sqs7b
-.section sqs8a
-.section sqs8b
-.section sqs9a
-.section sqs9b
-.section sqs0a
-.section sqs0b
-.section sqtaa
-.section sqtab
-.section sqtba
-.section sqtbb
-.section sqtca
-.section sqtcb
-.section sqtda
-.section sqtdb
-.section sqtea
-.section sqteb
-.section sqtfa
-.section sqtfb
-.section sqtga
-.section sqtgb
-.section sqtha
-.section sqthb
-.section sqtia
-.section sqtib
-.section sqtja
-.section sqtjb
-.section sqtka
-.section sqtkb
-.section sqtla
-.section sqtlb
-.section sqtma
-.section sqtmb
-.section sqtna
-.section sqtnb
-.section sqtoa
-.section sqtob
-.section sqtpa
-.section sqtpb
-.section sqtqa
-.section sqtqb
-.section sqtra
-.section sqtrb
-.section sqtsa
-.section sqtsb
-.section sqtta
-.section sqttb
-.section sqtua
-.section sqtub
-.section sqtva
-.section sqtvb
-.section sqtwa
-.section sqtwb
-.section sqtxa
-.section sqtxb
-.section sqtya
-.section sqtyb
-.section sqtza
-.section sqtzb
-.section sqt1a
-.section sqt1b
-.section sqt2a
-.section sqt2b
-.section sqt3a
-.section sqt3b
-.section sqt4a
-.section sqt4b
-.section sqt5a
-.section sqt5b
-.section sqt6a
-.section sqt6b
-.section sqt7a
-.section sqt7b
-.section sqt8a
-.section sqt8b
-.section sqt9a
-.section sqt9b
-.section sqt0a
-.section sqt0b
-.section squaa
-.section squab
-.section squba
-.section squbb
-.section squca
-.section squcb
-.section squda
-.section squdb
-.section squea
-.section squeb
-.section squfa
-.section squfb
-.section squga
-.section squgb
-.section squha
-.section squhb
-.section squia
-.section squib
-.section squja
-.section squjb
-.section squka
-.section squkb
-.section squla
-.section squlb
-.section squma
-.section squmb
-.section squna
-.section squnb
-.section squoa
-.section squob
-.section squpa
-.section squpb
-.section squqa
-.section squqb
-.section squra
-.section squrb
-.section squsa
-.section squsb
-.section squta
-.section squtb
-.section squua
-.section squub
-.section squva
-.section squvb
-.section squwa
-.section squwb
-.section squxa
-.section squxb
-.section squya
-.section squyb
-.section squza
-.section squzb
-.section squ1a
-.section squ1b
-.section squ2a
-.section squ2b
-.section squ3a
-.section squ3b
-.section squ4a
-.section squ4b
-.section squ5a
-.section squ5b
-.section squ6a
-.section squ6b
-.section squ7a
-.section squ7b
-.section squ8a
-.section squ8b
-.section squ9a
-.section squ9b
-.section squ0a
-.section squ0b
-.section sqvaa
-.section sqvab
-.section sqvba
-.section sqvbb
-.section sqvca
-.section sqvcb
-.section sqvda
-.section sqvdb
-.section sqvea
-.section sqveb
-.section sqvfa
-.section sqvfb
-.section sqvga
-.section sqvgb
-.section sqvha
-.section sqvhb
-.section sqvia
-.section sqvib
-.section sqvja
-.section sqvjb
-.section sqvka
-.section sqvkb
-.section sqvla
-.section sqvlb
-.section sqvma
-.section sqvmb
-.section sqvna
-.section sqvnb
-.section sqvoa
-.section sqvob
-.section sqvpa
-.section sqvpb
-.section sqvqa
-.section sqvqb
-.section sqvra
-.section sqvrb
-.section sqvsa
-.section sqvsb
-.section sqvta
-.section sqvtb
-.section sqvua
-.section sqvub
-.section sqvva
-.section sqvvb
-.section sqvwa
-.section sqvwb
-.section sqvxa
-.section sqvxb
-.section sqvya
-.section sqvyb
-.section sqvza
-.section sqvzb
-.section sqv1a
-.section sqv1b
-.section sqv2a
-.section sqv2b
-.section sqv3a
-.section sqv3b
-.section sqv4a
-.section sqv4b
-.section sqv5a
-.section sqv5b
-.section sqv6a
-.section sqv6b
-.section sqv7a
-.section sqv7b
-.section sqv8a
-.section sqv8b
-.section sqv9a
-.section sqv9b
-.section sqv0a
-.section sqv0b
-.section sqwaa
-.section sqwab
-.section sqwba
-.section sqwbb
-.section sqwca
-.section sqwcb
-.section sqwda
-.section sqwdb
-.section sqwea
-.section sqweb
-.section sqwfa
-.section sqwfb
-.section sqwga
-.section sqwgb
-.section sqwha
-.section sqwhb
-.section sqwia
-.section sqwib
-.section sqwja
-.section sqwjb
-.section sqwka
-.section sqwkb
-.section sqwla
-.section sqwlb
-.section sqwma
-.section sqwmb
-.section sqwna
-.section sqwnb
-.section sqwoa
-.section sqwob
-.section sqwpa
-.section sqwpb
-.section sqwqa
-.section sqwqb
-.section sqwra
-.section sqwrb
-.section sqwsa
-.section sqwsb
-.section sqwta
-.section sqwtb
-.section sqwua
-.section sqwub
-.section sqwva
-.section sqwvb
-.section sqwwa
-.section sqwwb
-.section sqwxa
-.section sqwxb
-.section sqwya
-.section sqwyb
-.section sqwza
-.section sqwzb
-.section sqw1a
-.section sqw1b
-.section sqw2a
-.section sqw2b
-.section sqw3a
-.section sqw3b
-.section sqw4a
-.section sqw4b
-.section sqw5a
-.section sqw5b
-.section sqw6a
-.section sqw6b
-.section sqw7a
-.section sqw7b
-.section sqw8a
-.section sqw8b
-.section sqw9a
-.section sqw9b
-.section sqw0a
-.section sqw0b
-.section sqxaa
-.section sqxab
-.section sqxba
-.section sqxbb
-.section sqxca
-.section sqxcb
-.section sqxda
-.section sqxdb
-.section sqxea
-.section sqxeb
-.section sqxfa
-.section sqxfb
-.section sqxga
-.section sqxgb
-.section sqxha
-.section sqxhb
-.section sqxia
-.section sqxib
-.section sqxja
-.section sqxjb
-.section sqxka
-.section sqxkb
-.section sqxla
-.section sqxlb
-.section sqxma
-.section sqxmb
-.section sqxna
-.section sqxnb
-.section sqxoa
-.section sqxob
-.section sqxpa
-.section sqxpb
-.section sqxqa
-.section sqxqb
-.section sqxra
-.section sqxrb
-.section sqxsa
-.section sqxsb
-.section sqxta
-.section sqxtb
-.section sqxua
-.section sqxub
-.section sqxva
-.section sqxvb
-.section sqxwa
-.section sqxwb
-.section sqxxa
-.section sqxxb
-.section sqxya
-.section sqxyb
-.section sqxza
-.section sqxzb
-.section sqx1a
-.section sqx1b
-.section sqx2a
-.section sqx2b
-.section sqx3a
-.section sqx3b
-.section sqx4a
-.section sqx4b
-.section sqx5a
-.section sqx5b
-.section sqx6a
-.section sqx6b
-.section sqx7a
-.section sqx7b
-.section sqx8a
-.section sqx8b
-.section sqx9a
-.section sqx9b
-.section sqx0a
-.section sqx0b
-.section sqyaa
-.section sqyab
-.section sqyba
-.section sqybb
-.section sqyca
-.section sqycb
-.section sqyda
-.section sqydb
-.section sqyea
-.section sqyeb
-.section sqyfa
-.section sqyfb
-.section sqyga
-.section sqygb
-.section sqyha
-.section sqyhb
-.section sqyia
-.section sqyib
-.section sqyja
-.section sqyjb
-.section sqyka
-.section sqykb
-.section sqyla
-.section sqylb
-.section sqyma
-.section sqymb
-.section sqyna
-.section sqynb
-.section sqyoa
-.section sqyob
-.section sqypa
-.section sqypb
-.section sqyqa
-.section sqyqb
-.section sqyra
-.section sqyrb
-.section sqysa
-.section sqysb
-.section sqyta
-.section sqytb
-.section sqyua
-.section sqyub
-.section sqyva
-.section sqyvb
-.section sqywa
-.section sqywb
-.section sqyxa
-.section sqyxb
-.section sqyya
-.section sqyyb
-.section sqyza
-.section sqyzb
-.section sqy1a
-.section sqy1b
-.section sqy2a
-.section sqy2b
-.section sqy3a
-.section sqy3b
-.section sqy4a
-.section sqy4b
-.section sqy5a
-.section sqy5b
-.section sqy6a
-.section sqy6b
-.section sqy7a
-.section sqy7b
-.section sqy8a
-.section sqy8b
-.section sqy9a
-.section sqy9b
-.section sqy0a
-.section sqy0b
-.section sqzaa
-.section sqzab
-.section sqzba
-.section sqzbb
-.section sqzca
-.section sqzcb
-.section sqzda
-.section sqzdb
-.section sqzea
-.section sqzeb
-.section sqzfa
-.section sqzfb
-.section sqzga
-.section sqzgb
-.section sqzha
-.section sqzhb
-.section sqzia
-.section sqzib
-.section sqzja
-.section sqzjb
-.section sqzka
-.section sqzkb
-.section sqzla
-.section sqzlb
-.section sqzma
-.section sqzmb
-.section sqzna
-.section sqznb
-.section sqzoa
-.section sqzob
-.section sqzpa
-.section sqzpb
-.section sqzqa
-.section sqzqb
-.section sqzra
-.section sqzrb
-.section sqzsa
-.section sqzsb
-.section sqzta
-.section sqztb
-.section sqzua
-.section sqzub
-.section sqzva
-.section sqzvb
-.section sqzwa
-.section sqzwb
-.section sqzxa
-.section sqzxb
-.section sqzya
-.section sqzyb
-.section sqzza
-.section sqzzb
-.section sqz1a
-.section sqz1b
-.section sqz2a
-.section sqz2b
-.section sqz3a
-.section sqz3b
-.section sqz4a
-.section sqz4b
-.section sqz5a
-.section sqz5b
-.section sqz6a
-.section sqz6b
-.section sqz7a
-.section sqz7b
-.section sqz8a
-.section sqz8b
-.section sqz9a
-.section sqz9b
-.section sqz0a
-.section sqz0b
-.section sq1aa
-.section sq1ab
-.section sq1ba
-.section sq1bb
-.section sq1ca
-.section sq1cb
-.section sq1da
-.section sq1db
-.section sq1ea
-.section sq1eb
-.section sq1fa
-.section sq1fb
-.section sq1ga
-.section sq1gb
-.section sq1ha
-.section sq1hb
-.section sq1ia
-.section sq1ib
-.section sq1ja
-.section sq1jb
-.section sq1ka
-.section sq1kb
-.section sq1la
-.section sq1lb
-.section sq1ma
-.section sq1mb
-.section sq1na
-.section sq1nb
-.section sq1oa
-.section sq1ob
-.section sq1pa
-.section sq1pb
-.section sq1qa
-.section sq1qb
-.section sq1ra
-.section sq1rb
-.section sq1sa
-.section sq1sb
-.section sq1ta
-.section sq1tb
-.section sq1ua
-.section sq1ub
-.section sq1va
-.section sq1vb
-.section sq1wa
-.section sq1wb
-.section sq1xa
-.section sq1xb
-.section sq1ya
-.section sq1yb
-.section sq1za
-.section sq1zb
-.section sq11a
-.section sq11b
-.section sq12a
-.section sq12b
-.section sq13a
-.section sq13b
-.section sq14a
-.section sq14b
-.section sq15a
-.section sq15b
-.section sq16a
-.section sq16b
-.section sq17a
-.section sq17b
-.section sq18a
-.section sq18b
-.section sq19a
-.section sq19b
-.section sq10a
-.section sq10b
-.section sq2aa
-.section sq2ab
-.section sq2ba
-.section sq2bb
-.section sq2ca
-.section sq2cb
-.section sq2da
-.section sq2db
-.section sq2ea
-.section sq2eb
-.section sq2fa
-.section sq2fb
-.section sq2ga
-.section sq2gb
-.section sq2ha
-.section sq2hb
-.section sq2ia
-.section sq2ib
-.section sq2ja
-.section sq2jb
-.section sq2ka
-.section sq2kb
-.section sq2la
-.section sq2lb
-.section sq2ma
-.section sq2mb
-.section sq2na
-.section sq2nb
-.section sq2oa
-.section sq2ob
-.section sq2pa
-.section sq2pb
-.section sq2qa
-.section sq2qb
-.section sq2ra
-.section sq2rb
-.section sq2sa
-.section sq2sb
-.section sq2ta
-.section sq2tb
-.section sq2ua
-.section sq2ub
-.section sq2va
-.section sq2vb
-.section sq2wa
-.section sq2wb
-.section sq2xa
-.section sq2xb
-.section sq2ya
-.section sq2yb
-.section sq2za
-.section sq2zb
-.section sq21a
-.section sq21b
-.section sq22a
-.section sq22b
-.section sq23a
-.section sq23b
-.section sq24a
-.section sq24b
-.section sq25a
-.section sq25b
-.section sq26a
-.section sq26b
-.section sq27a
-.section sq27b
-.section sq28a
-.section sq28b
-.section sq29a
-.section sq29b
-.section sq20a
-.section sq20b
-.section sq3aa
-.section sq3ab
-.section sq3ba
-.section sq3bb
-.section sq3ca
-.section sq3cb
-.section sq3da
-.section sq3db
-.section sq3ea
-.section sq3eb
-.section sq3fa
-.section sq3fb
-.section sq3ga
-.section sq3gb
-.section sq3ha
-.section sq3hb
-.section sq3ia
-.section sq3ib
-.section sq3ja
-.section sq3jb
-.section sq3ka
-.section sq3kb
-.section sq3la
-.section sq3lb
-.section sq3ma
-.section sq3mb
-.section sq3na
-.section sq3nb
-.section sq3oa
-.section sq3ob
-.section sq3pa
-.section sq3pb
-.section sq3qa
-.section sq3qb
-.section sq3ra
-.section sq3rb
-.section sq3sa
-.section sq3sb
-.section sq3ta
-.section sq3tb
-.section sq3ua
-.section sq3ub
-.section sq3va
-.section sq3vb
-.section sq3wa
-.section sq3wb
-.section sq3xa
-.section sq3xb
-.section sq3ya
-.section sq3yb
-.section sq3za
-.section sq3zb
-.section sq31a
-.section sq31b
-.section sq32a
-.section sq32b
-.section sq33a
-.section sq33b
-.section sq34a
-.section sq34b
-.section sq35a
-.section sq35b
-.section sq36a
-.section sq36b
-.section sq37a
-.section sq37b
-.section sq38a
-.section sq38b
-.section sq39a
-.section sq39b
-.section sq30a
-.section sq30b
-.section sq4aa
-.section sq4ab
-.section sq4ba
-.section sq4bb
-.section sq4ca
-.section sq4cb
-.section sq4da
-.section sq4db
-.section sq4ea
-.section sq4eb
-.section sq4fa
-.section sq4fb
-.section sq4ga
-.section sq4gb
-.section sq4ha
-.section sq4hb
-.section sq4ia
-.section sq4ib
-.section sq4ja
-.section sq4jb
-.section sq4ka
-.section sq4kb
-.section sq4la
-.section sq4lb
-.section sq4ma
-.section sq4mb
-.section sq4na
-.section sq4nb
-.section sq4oa
-.section sq4ob
-.section sq4pa
-.section sq4pb
-.section sq4qa
-.section sq4qb
-.section sq4ra
-.section sq4rb
-.section sq4sa
-.section sq4sb
-.section sq4ta
-.section sq4tb
-.section sq4ua
-.section sq4ub
-.section sq4va
-.section sq4vb
-.section sq4wa
-.section sq4wb
-.section sq4xa
-.section sq4xb
-.section sq4ya
-.section sq4yb
-.section sq4za
-.section sq4zb
-.section sq41a
-.section sq41b
-.section sq42a
-.section sq42b
-.section sq43a
-.section sq43b
-.section sq44a
-.section sq44b
-.section sq45a
-.section sq45b
-.section sq46a
-.section sq46b
-.section sq47a
-.section sq47b
-.section sq48a
-.section sq48b
-.section sq49a
-.section sq49b
-.section sq40a
-.section sq40b
-.section sq5aa
-.section sq5ab
-.section sq5ba
-.section sq5bb
-.section sq5ca
-.section sq5cb
-.section sq5da
-.section sq5db
-.section sq5ea
-.section sq5eb
-.section sq5fa
-.section sq5fb
-.section sq5ga
-.section sq5gb
-.section sq5ha
-.section sq5hb
-.section sq5ia
-.section sq5ib
-.section sq5ja
-.section sq5jb
-.section sq5ka
-.section sq5kb
-.section sq5la
-.section sq5lb
-.section sq5ma
-.section sq5mb
-.section sq5na
-.section sq5nb
-.section sq5oa
-.section sq5ob
-.section sq5pa
-.section sq5pb
-.section sq5qa
-.section sq5qb
-.section sq5ra
-.section sq5rb
-.section sq5sa
-.section sq5sb
-.section sq5ta
-.section sq5tb
-.section sq5ua
-.section sq5ub
-.section sq5va
-.section sq5vb
-.section sq5wa
-.section sq5wb
-.section sq5xa
-.section sq5xb
-.section sq5ya
-.section sq5yb
-.section sq5za
-.section sq5zb
-.section sq51a
-.section sq51b
-.section sq52a
-.section sq52b
-.section sq53a
-.section sq53b
-.section sq54a
-.section sq54b
-.section sq55a
-.section sq55b
-.section sq56a
-.section sq56b
-.section sq57a
-.section sq57b
-.section sq58a
-.section sq58b
-.section sq59a
-.section sq59b
-.section sq50a
-.section sq50b
-.section sq6aa
-.section sq6ab
-.section sq6ba
-.section sq6bb
-.section sq6ca
-.section sq6cb
-.section sq6da
-.section sq6db
-.section sq6ea
-.section sq6eb
-.section sq6fa
-.section sq6fb
-.section sq6ga
-.section sq6gb
-.section sq6ha
-.section sq6hb
-.section sq6ia
-.section sq6ib
-.section sq6ja
-.section sq6jb
-.section sq6ka
-.section sq6kb
-.section sq6la
-.section sq6lb
-.section sq6ma
-.section sq6mb
-.section sq6na
-.section sq6nb
-.section sq6oa
-.section sq6ob
-.section sq6pa
-.section sq6pb
-.section sq6qa
-.section sq6qb
-.section sq6ra
-.section sq6rb
-.section sq6sa
-.section sq6sb
-.section sq6ta
-.section sq6tb
-.section sq6ua
-.section sq6ub
-.section sq6va
-.section sq6vb
-.section sq6wa
-.section sq6wb
-.section sq6xa
-.section sq6xb
-.section sq6ya
-.section sq6yb
-.section sq6za
-.section sq6zb
-.section sq61a
-.section sq61b
-.section sq62a
-.section sq62b
-.section sq63a
-.section sq63b
-.section sq64a
-.section sq64b
-.section sq65a
-.section sq65b
-.section sq66a
-.section sq66b
-.section sq67a
-.section sq67b
-.section sq68a
-.section sq68b
-.section sq69a
-.section sq69b
-.section sq60a
-.section sq60b
-.section sq7aa
-.section sq7ab
-.section sq7ba
-.section sq7bb
-.section sq7ca
-.section sq7cb
-.section sq7da
-.section sq7db
-.section sq7ea
-.section sq7eb
-.section sq7fa
-.section sq7fb
-.section sq7ga
-.section sq7gb
-.section sq7ha
-.section sq7hb
-.section sq7ia
-.section sq7ib
-.section sq7ja
-.section sq7jb
-.section sq7ka
-.section sq7kb
-.section sq7la
-.section sq7lb
-.section sq7ma
-.section sq7mb
-.section sq7na
-.section sq7nb
-.section sq7oa
-.section sq7ob
-.section sq7pa
-.section sq7pb
-.section sq7qa
-.section sq7qb
-.section sq7ra
-.section sq7rb
-.section sq7sa
-.section sq7sb
-.section sq7ta
-.section sq7tb
-.section sq7ua
-.section sq7ub
-.section sq7va
-.section sq7vb
-.section sq7wa
-.section sq7wb
-.section sq7xa
-.section sq7xb
-.section sq7ya
-.section sq7yb
-.section sq7za
-.section sq7zb
-.section sq71a
-.section sq71b
-.section sq72a
-.section sq72b
-.section sq73a
-.section sq73b
-.section sq74a
-.section sq74b
-.section sq75a
-.section sq75b
-.section sq76a
-.section sq76b
-.section sq77a
-.section sq77b
-.section sq78a
-.section sq78b
-.section sq79a
-.section sq79b
-.section sq70a
-.section sq70b
-.section sq8aa
-.section sq8ab
-.section sq8ba
-.section sq8bb
-.section sq8ca
-.section sq8cb
-.section sq8da
-.section sq8db
-.section sq8ea
-.section sq8eb
-.section sq8fa
-.section sq8fb
-.section sq8ga
-.section sq8gb
-.section sq8ha
-.section sq8hb
-.section sq8ia
-.section sq8ib
-.section sq8ja
-.section sq8jb
-.section sq8ka
-.section sq8kb
-.section sq8la
-.section sq8lb
-.section sq8ma
-.section sq8mb
-.section sq8na
-.section sq8nb
-.section sq8oa
-.section sq8ob
-.section sq8pa
-.section sq8pb
-.section sq8qa
-.section sq8qb
-.section sq8ra
-.section sq8rb
-.section sq8sa
-.section sq8sb
-.section sq8ta
-.section sq8tb
-.section sq8ua
-.section sq8ub
-.section sq8va
-.section sq8vb
-.section sq8wa
-.section sq8wb
-.section sq8xa
-.section sq8xb
-.section sq8ya
-.section sq8yb
-.section sq8za
-.section sq8zb
-.section sq81a
-.section sq81b
-.section sq82a
-.section sq82b
-.section sq83a
-.section sq83b
-.section sq84a
-.section sq84b
-.section sq85a
-.section sq85b
-.section sq86a
-.section sq86b
-.section sq87a
-.section sq87b
-.section sq88a
-.section sq88b
-.section sq89a
-.section sq89b
-.section sq80a
-.section sq80b
-.section sq9aa
-.section sq9ab
-.section sq9ba
-.section sq9bb
-.section sq9ca
-.section sq9cb
-.section sq9da
-.section sq9db
-.section sq9ea
-.section sq9eb
-.section sq9fa
-.section sq9fb
-.section sq9ga
-.section sq9gb
-.section sq9ha
-.section sq9hb
-.section sq9ia
-.section sq9ib
-.section sq9ja
-.section sq9jb
-.section sq9ka
-.section sq9kb
-.section sq9la
-.section sq9lb
-.section sq9ma
-.section sq9mb
-.section sq9na
-.section sq9nb
-.section sq9oa
-.section sq9ob
-.section sq9pa
-.section sq9pb
-.section sq9qa
-.section sq9qb
-.section sq9ra
-.section sq9rb
-.section sq9sa
-.section sq9sb
-.section sq9ta
-.section sq9tb
-.section sq9ua
-.section sq9ub
-.section sq9va
-.section sq9vb
-.section sq9wa
-.section sq9wb
-.section sq9xa
-.section sq9xb
-.section sq9ya
-.section sq9yb
-.section sq9za
-.section sq9zb
-.section sq91a
-.section sq91b
-.section sq92a
-.section sq92b
-.section sq93a
-.section sq93b
-.section sq94a
-.section sq94b
-.section sq95a
-.section sq95b
-.section sq96a
-.section sq96b
-.section sq97a
-.section sq97b
-.section sq98a
-.section sq98b
-.section sq99a
-.section sq99b
-.section sq90a
-.section sq90b
-.section sq0aa
-.section sq0ab
-.section sq0ba
-.section sq0bb
-.section sq0ca
-.section sq0cb
-.section sq0da
-.section sq0db
-.section sq0ea
-.section sq0eb
-.section sq0fa
-.section sq0fb
-.section sq0ga
-.section sq0gb
-.section sq0ha
-.section sq0hb
-.section sq0ia
-.section sq0ib
-.section sq0ja
-.section sq0jb
-.section sq0ka
-.section sq0kb
-.section sq0la
-.section sq0lb
-.section sq0ma
-.section sq0mb
-.section sq0na
-.section sq0nb
-.section sq0oa
-.section sq0ob
-.section sq0pa
-.section sq0pb
-.section sq0qa
-.section sq0qb
-.section sq0ra
-.section sq0rb
-.section sq0sa
-.section sq0sb
-.section sq0ta
-.section sq0tb
-.section sq0ua
-.section sq0ub
-.section sq0va
-.section sq0vb
-.section sq0wa
-.section sq0wb
-.section sq0xa
-.section sq0xb
-.section sq0ya
-.section sq0yb
-.section sq0za
-.section sq0zb
-.section sq01a
-.section sq01b
-.section sq02a
-.section sq02b
-.section sq03a
-.section sq03b
-.section sq04a
-.section sq04b
-.section sq05a
-.section sq05b
-.section sq06a
-.section sq06b
-.section sq07a
-.section sq07b
-.section sq08a
-.section sq08b
-.section sq09a
-.section sq09b
-.section sq00a
-.section sq00b
-.section sraaa
-.section sraab
-.section sraba
-.section srabb
-.section sraca
-.section sracb
-.section srada
-.section sradb
-.section sraea
-.section sraeb
-.section srafa
-.section srafb
-.section sraga
-.section sragb
-.section sraha
-.section srahb
-.section sraia
-.section sraib
-.section sraja
-.section srajb
-.section sraka
-.section srakb
-.section srala
-.section sralb
-.section srama
-.section sramb
-.section srana
-.section sranb
-.section sraoa
-.section sraob
-.section srapa
-.section srapb
-.section sraqa
-.section sraqb
-.section srara
-.section srarb
-.section srasa
-.section srasb
-.section srata
-.section sratb
-.section sraua
-.section sraub
-.section srava
-.section sravb
-.section srawa
-.section srawb
-.section sraxa
-.section sraxb
-.section sraya
-.section srayb
-.section sraza
-.section srazb
-.section sra1a
-.section sra1b
-.section sra2a
-.section sra2b
-.section sra3a
-.section sra3b
-.section sra4a
-.section sra4b
-.section sra5a
-.section sra5b
-.section sra6a
-.section sra6b
-.section sra7a
-.section sra7b
-.section sra8a
-.section sra8b
-.section sra9a
-.section sra9b
-.section sra0a
-.section sra0b
-.section srbaa
-.section srbab
-.section srbba
-.section srbbb
-.section srbca
-.section srbcb
-.section srbda
-.section srbdb
-.section srbea
-.section srbeb
-.section srbfa
-.section srbfb
-.section srbga
-.section srbgb
-.section srbha
-.section srbhb
-.section srbia
-.section srbib
-.section srbja
-.section srbjb
-.section srbka
-.section srbkb
-.section srbla
-.section srblb
-.section srbma
-.section srbmb
-.section srbna
-.section srbnb
-.section srboa
-.section srbob
-.section srbpa
-.section srbpb
-.section srbqa
-.section srbqb
-.section srbra
-.section srbrb
-.section srbsa
-.section srbsb
-.section srbta
-.section srbtb
-.section srbua
-.section srbub
-.section srbva
-.section srbvb
-.section srbwa
-.section srbwb
-.section srbxa
-.section srbxb
-.section srbya
-.section srbyb
-.section srbza
-.section srbzb
-.section srb1a
-.section srb1b
-.section srb2a
-.section srb2b
-.section srb3a
-.section srb3b
-.section srb4a
-.section srb4b
-.section srb5a
-.section srb5b
-.section srb6a
-.section srb6b
-.section srb7a
-.section srb7b
-.section srb8a
-.section srb8b
-.section srb9a
-.section srb9b
-.section srb0a
-.section srb0b
-.section srcaa
-.section srcab
-.section srcba
-.section srcbb
-.section srcca
-.section srccb
-.section srcda
-.section srcdb
-.section srcea
-.section srceb
-.section srcfa
-.section srcfb
-.section srcga
-.section srcgb
-.section srcha
-.section srchb
-.section srcia
-.section srcib
-.section srcja
-.section srcjb
-.section srcka
-.section srckb
-.section srcla
-.section srclb
-.section srcma
-.section srcmb
-.section srcna
-.section srcnb
-.section srcoa
-.section srcob
-.section srcpa
-.section srcpb
-.section srcqa
-.section srcqb
-.section srcra
-.section srcrb
-.section srcsa
-.section srcsb
-.section srcta
-.section srctb
-.section srcua
-.section srcub
-.section srcva
-.section srcvb
-.section srcwa
-.section srcwb
-.section srcxa
-.section srcxb
-.section srcya
-.section srcyb
-.section srcza
-.section srczb
-.section src1a
-.section src1b
-.section src2a
-.section src2b
-.section src3a
-.section src3b
-.section src4a
-.section src4b
-.section src5a
-.section src5b
-.section src6a
-.section src6b
-.section src7a
-.section src7b
-.section src8a
-.section src8b
-.section src9a
-.section src9b
-.section src0a
-.section src0b
-.section srdaa
-.section srdab
-.section srdba
-.section srdbb
-.section srdca
-.section srdcb
-.section srdda
-.section srddb
-.section srdea
-.section srdeb
-.section srdfa
-.section srdfb
-.section srdga
-.section srdgb
-.section srdha
-.section srdhb
-.section srdia
-.section srdib
-.section srdja
-.section srdjb
-.section srdka
-.section srdkb
-.section srdla
-.section srdlb
-.section srdma
-.section srdmb
-.section srdna
-.section srdnb
-.section srdoa
-.section srdob
-.section srdpa
-.section srdpb
-.section srdqa
-.section srdqb
-.section srdra
-.section srdrb
-.section srdsa
-.section srdsb
-.section srdta
-.section srdtb
-.section srdua
-.section srdub
-.section srdva
-.section srdvb
-.section srdwa
-.section srdwb
-.section srdxa
-.section srdxb
-.section srdya
-.section srdyb
-.section srdza
-.section srdzb
-.section srd1a
-.section srd1b
-.section srd2a
-.section srd2b
-.section srd3a
-.section srd3b
-.section srd4a
-.section srd4b
-.section srd5a
-.section srd5b
-.section srd6a
-.section srd6b
-.section srd7a
-.section srd7b
-.section srd8a
-.section srd8b
-.section srd9a
-.section srd9b
-.section srd0a
-.section srd0b
-.section sreaa
-.section sreab
-.section sreba
-.section srebb
-.section sreca
-.section srecb
-.section sreda
-.section sredb
-.section sreea
-.section sreeb
-.section srefa
-.section srefb
-.section srega
-.section sregb
-.section sreha
-.section srehb
-.section sreia
-.section sreib
-.section sreja
-.section srejb
-.section sreka
-.section srekb
-.section srela
-.section srelb
-.section srema
-.section sremb
-.section srena
-.section srenb
-.section sreoa
-.section sreob
-.section srepa
-.section srepb
-.section sreqa
-.section sreqb
-.section srera
-.section srerb
-.section sresa
-.section sresb
-.section sreta
-.section sretb
-.section sreua
-.section sreub
-.section sreva
-.section srevb
-.section srewa
-.section srewb
-.section srexa
-.section srexb
-.section sreya
-.section sreyb
-.section sreza
-.section srezb
-.section sre1a
-.section sre1b
-.section sre2a
-.section sre2b
-.section sre3a
-.section sre3b
-.section sre4a
-.section sre4b
-.section sre5a
-.section sre5b
-.section sre6a
-.section sre6b
-.section sre7a
-.section sre7b
-.section sre8a
-.section sre8b
-.section sre9a
-.section sre9b
-.section sre0a
-.section sre0b
-.section srfaa
-.section srfab
-.section srfba
-.section srfbb
-.section srfca
-.section srfcb
-.section srfda
-.section srfdb
-.section srfea
-.section srfeb
-.section srffa
-.section srffb
-.section srfga
-.section srfgb
-.section srfha
-.section srfhb
-.section srfia
-.section srfib
-.section srfja
-.section srfjb
-.section srfka
-.section srfkb
-.section srfla
-.section srflb
-.section srfma
-.section srfmb
-.section srfna
-.section srfnb
-.section srfoa
-.section srfob
-.section srfpa
-.section srfpb
-.section srfqa
-.section srfqb
-.section srfra
-.section srfrb
-.section srfsa
-.section srfsb
-.section srfta
-.section srftb
-.section srfua
-.section srfub
-.section srfva
-.section srfvb
-.section srfwa
-.section srfwb
-.section srfxa
-.section srfxb
-.section srfya
-.section srfyb
-.section srfza
-.section srfzb
-.section srf1a
-.section srf1b
-.section srf2a
-.section srf2b
-.section srf3a
-.section srf3b
-.section srf4a
-.section srf4b
-.section srf5a
-.section srf5b
-.section srf6a
-.section srf6b
-.section srf7a
-.section srf7b
-.section srf8a
-.section srf8b
-.section srf9a
-.section srf9b
-.section srf0a
-.section srf0b
-.section srgaa
-.section srgab
-.section srgba
-.section srgbb
-.section srgca
-.section srgcb
-.section srgda
-.section srgdb
-.section srgea
-.section srgeb
-.section srgfa
-.section srgfb
-.section srgga
-.section srggb
-.section srgha
-.section srghb
-.section srgia
-.section srgib
-.section srgja
-.section srgjb
-.section srgka
-.section srgkb
-.section srgla
-.section srglb
-.section srgma
-.section srgmb
-.section srgna
-.section srgnb
-.section srgoa
-.section srgob
-.section srgpa
-.section srgpb
-.section srgqa
-.section srgqb
-.section srgra
-.section srgrb
-.section srgsa
-.section srgsb
-.section srgta
-.section srgtb
-.section srgua
-.section srgub
-.section srgva
-.section srgvb
-.section srgwa
-.section srgwb
-.section srgxa
-.section srgxb
-.section srgya
-.section srgyb
-.section srgza
-.section srgzb
-.section srg1a
-.section srg1b
-.section srg2a
-.section srg2b
-.section srg3a
-.section srg3b
-.section srg4a
-.section srg4b
-.section srg5a
-.section srg5b
-.section srg6a
-.section srg6b
-.section srg7a
-.section srg7b
-.section srg8a
-.section srg8b
-.section srg9a
-.section srg9b
-.section srg0a
-.section srg0b
-.section srhaa
-.section srhab
-.section srhba
-.section srhbb
-.section srhca
-.section srhcb
-.section srhda
-.section srhdb
-.section srhea
-.section srheb
-.section srhfa
-.section srhfb
-.section srhga
-.section srhgb
-.section srhha
-.section srhhb
-.section srhia
-.section srhib
-.section srhja
-.section srhjb
-.section srhka
-.section srhkb
-.section srhla
-.section srhlb
-.section srhma
-.section srhmb
-.section srhna
-.section srhnb
-.section srhoa
-.section srhob
-.section srhpa
-.section srhpb
-.section srhqa
-.section srhqb
-.section srhra
-.section srhrb
-.section srhsa
-.section srhsb
-.section srhta
-.section srhtb
-.section srhua
-.section srhub
-.section srhva
-.section srhvb
-.section srhwa
-.section srhwb
-.section srhxa
-.section srhxb
-.section srhya
-.section srhyb
-.section srhza
-.section srhzb
-.section srh1a
-.section srh1b
-.section srh2a
-.section srh2b
-.section srh3a
-.section srh3b
-.section srh4a
-.section srh4b
-.section srh5a
-.section srh5b
-.section srh6a
-.section srh6b
-.section srh7a
-.section srh7b
-.section srh8a
-.section srh8b
-.section srh9a
-.section srh9b
-.section srh0a
-.section srh0b
-.section sriaa
-.section sriab
-.section sriba
-.section sribb
-.section srica
-.section sricb
-.section srida
-.section sridb
-.section sriea
-.section srieb
-.section srifa
-.section srifb
-.section sriga
-.section srigb
-.section sriha
-.section srihb
-.section sriia
-.section sriib
-.section srija
-.section srijb
-.section srika
-.section srikb
-.section srila
-.section srilb
-.section srima
-.section srimb
-.section srina
-.section srinb
-.section srioa
-.section sriob
-.section sripa
-.section sripb
-.section sriqa
-.section sriqb
-.section srira
-.section srirb
-.section srisa
-.section srisb
-.section srita
-.section sritb
-.section sriua
-.section sriub
-.section sriva
-.section srivb
-.section sriwa
-.section sriwb
-.section srixa
-.section srixb
-.section sriya
-.section sriyb
-.section sriza
-.section srizb
-.section sri1a
-.section sri1b
-.section sri2a
-.section sri2b
-.section sri3a
-.section sri3b
-.section sri4a
-.section sri4b
-.section sri5a
-.section sri5b
-.section sri6a
-.section sri6b
-.section sri7a
-.section sri7b
-.section sri8a
-.section sri8b
-.section sri9a
-.section sri9b
-.section sri0a
-.section sri0b
-.section srjaa
-.section srjab
-.section srjba
-.section srjbb
-.section srjca
-.section srjcb
-.section srjda
-.section srjdb
-.section srjea
-.section srjeb
-.section srjfa
-.section srjfb
-.section srjga
-.section srjgb
-.section srjha
-.section srjhb
-.section srjia
-.section srjib
-.section srjja
-.section srjjb
-.section srjka
-.section srjkb
-.section srjla
-.section srjlb
-.section srjma
-.section srjmb
-.section srjna
-.section srjnb
-.section srjoa
-.section srjob
-.section srjpa
-.section srjpb
-.section srjqa
-.section srjqb
-.section srjra
-.section srjrb
-.section srjsa
-.section srjsb
-.section srjta
-.section srjtb
-.section srjua
-.section srjub
-.section srjva
-.section srjvb
-.section srjwa
-.section srjwb
-.section srjxa
-.section srjxb
-.section srjya
-.section srjyb
-.section srjza
-.section srjzb
-.section srj1a
-.section srj1b
-.section srj2a
-.section srj2b
-.section srj3a
-.section srj3b
-.section srj4a
-.section srj4b
-.section srj5a
-.section srj5b
-.section srj6a
-.section srj6b
-.section srj7a
-.section srj7b
-.section srj8a
-.section srj8b
-.section srj9a
-.section srj9b
-.section srj0a
-.section srj0b
-.section srkaa
-.section srkab
-.section srkba
-.section srkbb
-.section srkca
-.section srkcb
-.section srkda
-.section srkdb
-.section srkea
-.section srkeb
-.section srkfa
-.section srkfb
-.section srkga
-.section srkgb
-.section srkha
-.section srkhb
-.section srkia
-.section srkib
-.section srkja
-.section srkjb
-.section srkka
-.section srkkb
-.section srkla
-.section srklb
-.section srkma
-.section srkmb
-.section srkna
-.section srknb
-.section srkoa
-.section srkob
-.section srkpa
-.section srkpb
-.section srkqa
-.section srkqb
-.section srkra
-.section srkrb
-.section srksa
-.section srksb
-.section srkta
-.section srktb
-.section srkua
-.section srkub
-.section srkva
-.section srkvb
-.section srkwa
-.section srkwb
-.section srkxa
-.section srkxb
-.section srkya
-.section srkyb
-.section srkza
-.section srkzb
-.section srk1a
-.section srk1b
-.section srk2a
-.section srk2b
-.section srk3a
-.section srk3b
-.section srk4a
-.section srk4b
-.section srk5a
-.section srk5b
-.section srk6a
-.section srk6b
-.section srk7a
-.section srk7b
-.section srk8a
-.section srk8b
-.section srk9a
-.section srk9b
-.section srk0a
-.section srk0b
-.section srlaa
-.section srlab
-.section srlba
-.section srlbb
-.section srlca
-.section srlcb
-.section srlda
-.section srldb
-.section srlea
-.section srleb
-.section srlfa
-.section srlfb
-.section srlga
-.section srlgb
-.section srlha
-.section srlhb
-.section srlia
-.section srlib
-.section srlja
-.section srljb
-.section srlka
-.section srlkb
-.section srlla
-.section srllb
-.section srlma
-.section srlmb
-.section srlna
-.section srlnb
-.section srloa
-.section srlob
-.section srlpa
-.section srlpb
-.section srlqa
-.section srlqb
-.section srlra
-.section srlrb
-.section srlsa
-.section srlsb
-.section srlta
-.section srltb
-.section srlua
-.section srlub
-.section srlva
-.section srlvb
-.section srlwa
-.section srlwb
-.section srlxa
-.section srlxb
-.section srlya
-.section srlyb
-.section srlza
-.section srlzb
-.section srl1a
-.section srl1b
-.section srl2a
-.section srl2b
-.section srl3a
-.section srl3b
-.section srl4a
-.section srl4b
-.section srl5a
-.section srl5b
-.section srl6a
-.section srl6b
-.section srl7a
-.section srl7b
-.section srl8a
-.section srl8b
-.section srl9a
-.section srl9b
-.section srl0a
-.section srl0b
-.section srmaa
-.section srmab
-.section srmba
-.section srmbb
-.section srmca
-.section srmcb
-.section srmda
-.section srmdb
-.section srmea
-.section srmeb
-.section srmfa
-.section srmfb
-.section srmga
-.section srmgb
-.section srmha
-.section srmhb
-.section srmia
-.section srmib
-.section srmja
-.section srmjb
-.section srmka
-.section srmkb
-.section srmla
-.section srmlb
-.section srmma
-.section srmmb
-.section srmna
-.section srmnb
-.section srmoa
-.section srmob
-.section srmpa
-.section srmpb
-.section srmqa
-.section srmqb
-.section srmra
-.section srmrb
-.section srmsa
-.section srmsb
-.section srmta
-.section srmtb
-.section srmua
-.section srmub
-.section srmva
-.section srmvb
-.section srmwa
-.section srmwb
-.section srmxa
-.section srmxb
-.section srmya
-.section srmyb
-.section srmza
-.section srmzb
-.section srm1a
-.section srm1b
-.section srm2a
-.section srm2b
-.section srm3a
-.section srm3b
-.section srm4a
-.section srm4b
-.section srm5a
-.section srm5b
-.section srm6a
-.section srm6b
-.section srm7a
-.section srm7b
-.section srm8a
-.section srm8b
-.section srm9a
-.section srm9b
-.section srm0a
-.section srm0b
-.section srnaa
-.section srnab
-.section srnba
-.section srnbb
-.section srnca
-.section srncb
-.section srnda
-.section srndb
-.section srnea
-.section srneb
-.section srnfa
-.section srnfb
-.section srnga
-.section srngb
-.section srnha
-.section srnhb
-.section srnia
-.section srnib
-.section srnja
-.section srnjb
-.section srnka
-.section srnkb
-.section srnla
-.section srnlb
-.section srnma
-.section srnmb
-.section srnna
-.section srnnb
-.section srnoa
-.section srnob
-.section srnpa
-.section srnpb
-.section srnqa
-.section srnqb
-.section srnra
-.section srnrb
-.section srnsa
-.section srnsb
-.section srnta
-.section srntb
-.section srnua
-.section srnub
-.section srnva
-.section srnvb
-.section srnwa
-.section srnwb
-.section srnxa
-.section srnxb
-.section srnya
-.section srnyb
-.section srnza
-.section srnzb
-.section srn1a
-.section srn1b
-.section srn2a
-.section srn2b
-.section srn3a
-.section srn3b
-.section srn4a
-.section srn4b
-.section srn5a
-.section srn5b
-.section srn6a
-.section srn6b
-.section srn7a
-.section srn7b
-.section srn8a
-.section srn8b
-.section srn9a
-.section srn9b
-.section srn0a
-.section srn0b
-.section sroaa
-.section sroab
-.section sroba
-.section srobb
-.section sroca
-.section srocb
-.section sroda
-.section srodb
-.section sroea
-.section sroeb
-.section srofa
-.section srofb
-.section sroga
-.section srogb
-.section sroha
-.section srohb
-.section sroia
-.section sroib
-.section sroja
-.section srojb
-.section sroka
-.section srokb
-.section srola
-.section srolb
-.section sroma
-.section sromb
-.section srona
-.section sronb
-.section srooa
-.section sroob
-.section sropa
-.section sropb
-.section sroqa
-.section sroqb
-.section srora
-.section srorb
-.section srosa
-.section srosb
-.section srota
-.section srotb
-.section sroua
-.section sroub
-.section srova
-.section srovb
-.section srowa
-.section srowb
-.section sroxa
-.section sroxb
-.section sroya
-.section sroyb
-.section sroza
-.section srozb
-.section sro1a
-.section sro1b
-.section sro2a
-.section sro2b
-.section sro3a
-.section sro3b
-.section sro4a
-.section sro4b
-.section sro5a
-.section sro5b
-.section sro6a
-.section sro6b
-.section sro7a
-.section sro7b
-.section sro8a
-.section sro8b
-.section sro9a
-.section sro9b
-.section sro0a
-.section sro0b
-.section srpaa
-.section srpab
-.section srpba
-.section srpbb
-.section srpca
-.section srpcb
-.section srpda
-.section srpdb
-.section srpea
-.section srpeb
-.section srpfa
-.section srpfb
-.section srpga
-.section srpgb
-.section srpha
-.section srphb
-.section srpia
-.section srpib
-.section srpja
-.section srpjb
-.section srpka
-.section srpkb
-.section srpla
-.section srplb
-.section srpma
-.section srpmb
-.section srpna
-.section srpnb
-.section srpoa
-.section srpob
-.section srppa
-.section srppb
-.section srpqa
-.section srpqb
-.section srpra
-.section srprb
-.section srpsa
-.section srpsb
-.section srpta
-.section srptb
-.section srpua
-.section srpub
-.section srpva
-.section srpvb
-.section srpwa
-.section srpwb
-.section srpxa
-.section srpxb
-.section srpya
-.section srpyb
-.section srpza
-.section srpzb
-.section srp1a
-.section srp1b
-.section srp2a
-.section srp2b
-.section srp3a
-.section srp3b
-.section srp4a
-.section srp4b
-.section srp5a
-.section srp5b
-.section srp6a
-.section srp6b
-.section srp7a
-.section srp7b
-.section srp8a
-.section srp8b
-.section srp9a
-.section srp9b
-.section srp0a
-.section srp0b
-.section srqaa
-.section srqab
-.section srqba
-.section srqbb
-.section srqca
-.section srqcb
-.section srqda
-.section srqdb
-.section srqea
-.section srqeb
-.section srqfa
-.section srqfb
-.section srqga
-.section srqgb
-.section srqha
-.section srqhb
-.section srqia
-.section srqib
-.section srqja
-.section srqjb
-.section srqka
-.section srqkb
-.section srqla
-.section srqlb
-.section srqma
-.section srqmb
-.section srqna
-.section srqnb
-.section srqoa
-.section srqob
-.section srqpa
-.section srqpb
-.section srqqa
-.section srqqb
-.section srqra
-.section srqrb
-.section srqsa
-.section srqsb
-.section srqta
-.section srqtb
-.section srqua
-.section srqub
-.section srqva
-.section srqvb
-.section srqwa
-.section srqwb
-.section srqxa
-.section srqxb
-.section srqya
-.section srqyb
-.section srqza
-.section srqzb
-.section srq1a
-.section srq1b
-.section srq2a
-.section srq2b
-.section srq3a
-.section srq3b
-.section srq4a
-.section srq4b
-.section srq5a
-.section srq5b
-.section srq6a
-.section srq6b
-.section srq7a
-.section srq7b
-.section srq8a
-.section srq8b
-.section srq9a
-.section srq9b
-.section srq0a
-.section srq0b
-.section srraa
-.section srrab
-.section srrba
-.section srrbb
-.section srrca
-.section srrcb
-.section srrda
-.section srrdb
-.section srrea
-.section srreb
-.section srrfa
-.section srrfb
-.section srrga
-.section srrgb
-.section srrha
-.section srrhb
-.section srria
-.section srrib
-.section srrja
-.section srrjb
-.section srrka
-.section srrkb
-.section srrla
-.section srrlb
-.section srrma
-.section srrmb
-.section srrna
-.section srrnb
-.section srroa
-.section srrob
-.section srrpa
-.section srrpb
-.section srrqa
-.section srrqb
-.section srrra
-.section srrrb
-.section srrsa
-.section srrsb
-.section srrta
-.section srrtb
-.section srrua
-.section srrub
-.section srrva
-.section srrvb
-.section srrwa
-.section srrwb
-.section srrxa
-.section srrxb
-.section srrya
-.section srryb
-.section srrza
-.section srrzb
-.section srr1a
-.section srr1b
-.section srr2a
-.section srr2b
-.section srr3a
-.section srr3b
-.section srr4a
-.section srr4b
-.section srr5a
-.section srr5b
-.section srr6a
-.section srr6b
-.section srr7a
-.section srr7b
-.section srr8a
-.section srr8b
-.section srr9a
-.section srr9b
-.section srr0a
-.section srr0b
-.section srsaa
-.section srsab
-.section srsba
-.section srsbb
-.section srsca
-.section srscb
-.section srsda
-.section srsdb
-.section srsea
-.section srseb
-.section srsfa
-.section srsfb
-.section srsga
-.section srsgb
-.section srsha
-.section srshb
-.section srsia
-.section srsib
-.section srsja
-.section srsjb
-.section srska
-.section srskb
-.section srsla
-.section srslb
-.section srsma
-.section srsmb
-.section srsna
-.section srsnb
-.section srsoa
-.section srsob
-.section srspa
-.section srspb
-.section srsqa
-.section srsqb
-.section srsra
-.section srsrb
-.section srssa
-.section srssb
-.section srsta
-.section srstb
-.section srsua
-.section srsub
-.section srsva
-.section srsvb
-.section srswa
-.section srswb
-.section srsxa
-.section srsxb
-.section srsya
-.section srsyb
-.section srsza
-.section srszb
-.section srs1a
-.section srs1b
-.section srs2a
-.section srs2b
-.section srs3a
-.section srs3b
-.section srs4a
-.section srs4b
-.section srs5a
-.section srs5b
-.section srs6a
-.section srs6b
-.section srs7a
-.section srs7b
-.section srs8a
-.section srs8b
-.section srs9a
-.section srs9b
-.section srs0a
-.section srs0b
-.section srtaa
-.section srtab
-.section srtba
-.section srtbb
-.section srtca
-.section srtcb
-.section srtda
-.section srtdb
-.section srtea
-.section srteb
-.section srtfa
-.section srtfb
-.section srtga
-.section srtgb
-.section srtha
-.section srthb
-.section srtia
-.section srtib
-.section srtja
-.section srtjb
-.section srtka
-.section srtkb
-.section srtla
-.section srtlb
-.section srtma
-.section srtmb
-.section srtna
-.section srtnb
-.section srtoa
-.section srtob
-.section srtpa
-.section srtpb
-.section srtqa
-.section srtqb
-.section srtra
-.section srtrb
-.section srtsa
-.section srtsb
-.section srtta
-.section srttb
-.section srtua
-.section srtub
-.section srtva
-.section srtvb
-.section srtwa
-.section srtwb
-.section srtxa
-.section srtxb
-.section srtya
-.section srtyb
-.section srtza
-.section srtzb
-.section srt1a
-.section srt1b
-.section srt2a
-.section srt2b
-.section srt3a
-.section srt3b
-.section srt4a
-.section srt4b
-.section srt5a
-.section srt5b
-.section srt6a
-.section srt6b
-.section srt7a
-.section srt7b
-.section srt8a
-.section srt8b
-.section srt9a
-.section srt9b
-.section srt0a
-.section srt0b
-.section sruaa
-.section sruab
-.section sruba
-.section srubb
-.section sruca
-.section srucb
-.section sruda
-.section srudb
-.section sruea
-.section srueb
-.section srufa
-.section srufb
-.section sruga
-.section srugb
-.section sruha
-.section sruhb
-.section sruia
-.section sruib
-.section sruja
-.section srujb
-.section sruka
-.section srukb
-.section srula
-.section srulb
-.section sruma
-.section srumb
-.section sruna
-.section srunb
-.section sruoa
-.section sruob
-.section srupa
-.section srupb
-.section sruqa
-.section sruqb
-.section srura
-.section srurb
-.section srusa
-.section srusb
-.section sruta
-.section srutb
-.section sruua
-.section sruub
-.section sruva
-.section sruvb
-.section sruwa
-.section sruwb
-.section sruxa
-.section sruxb
-.section sruya
-.section sruyb
-.section sruza
-.section sruzb
-.section sru1a
-.section sru1b
-.section sru2a
-.section sru2b
-.section sru3a
-.section sru3b
-.section sru4a
-.section sru4b
-.section sru5a
-.section sru5b
-.section sru6a
-.section sru6b
-.section sru7a
-.section sru7b
-.section sru8a
-.section sru8b
-.section sru9a
-.section sru9b
-.section sru0a
-.section sru0b
-.section srvaa
-.section srvab
-.section srvba
-.section srvbb
-.section srvca
-.section srvcb
-.section srvda
-.section srvdb
-.section srvea
-.section srveb
-.section srvfa
-.section srvfb
-.section srvga
-.section srvgb
-.section srvha
-.section srvhb
-.section srvia
-.section srvib
-.section srvja
-.section srvjb
-.section srvka
-.section srvkb
-.section srvla
-.section srvlb
-.section srvma
-.section srvmb
-.section srvna
-.section srvnb
-.section srvoa
-.section srvob
-.section srvpa
-.section srvpb
-.section srvqa
-.section srvqb
-.section srvra
-.section srvrb
-.section srvsa
-.section srvsb
-.section srvta
-.section srvtb
-.section srvua
-.section srvub
-.section srvva
-.section srvvb
-.section srvwa
-.section srvwb
-.section srvxa
-.section srvxb
-.section srvya
-.section srvyb
-.section srvza
-.section srvzb
-.section srv1a
-.section srv1b
-.section srv2a
-.section srv2b
-.section srv3a
-.section srv3b
-.section srv4a
-.section srv4b
-.section srv5a
-.section srv5b
-.section srv6a
-.section srv6b
-.section srv7a
-.section srv7b
-.section srv8a
-.section srv8b
-.section srv9a
-.section srv9b
-.section srv0a
-.section srv0b
-.section srwaa
-.section srwab
-.section srwba
-.section srwbb
-.section srwca
-.section srwcb
-.section srwda
-.section srwdb
-.section srwea
-.section srweb
-.section srwfa
-.section srwfb
-.section srwga
-.section srwgb
-.section srwha
-.section srwhb
-.section srwia
-.section srwib
-.section srwja
-.section srwjb
-.section srwka
-.section srwkb
-.section srwla
-.section srwlb
-.section srwma
-.section srwmb
-.section srwna
-.section srwnb
-.section srwoa
-.section srwob
-.section srwpa
-.section srwpb
-.section srwqa
-.section srwqb
-.section srwra
-.section srwrb
-.section srwsa
-.section srwsb
-.section srwta
-.section srwtb
-.section srwua
-.section srwub
-.section srwva
-.section srwvb
-.section srwwa
-.section srwwb
-.section srwxa
-.section srwxb
-.section srwya
-.section srwyb
-.section srwza
-.section srwzb
-.section srw1a
-.section srw1b
-.section srw2a
-.section srw2b
-.section srw3a
-.section srw3b
-.section srw4a
-.section srw4b
-.section srw5a
-.section srw5b
-.section srw6a
-.section srw6b
-.section srw7a
-.section srw7b
-.section srw8a
-.section srw8b
-.section srw9a
-.section srw9b
-.section srw0a
-.section srw0b
-.section srxaa
-.section srxab
-.section srxba
-.section srxbb
-.section srxca
-.section srxcb
-.section srxda
-.section srxdb
-.section srxea
-.section srxeb
-.section srxfa
-.section srxfb
-.section srxga
-.section srxgb
-.section srxha
-.section srxhb
-.section srxia
-.section srxib
-.section srxja
-.section srxjb
-.section srxka
-.section srxkb
-.section srxla
-.section srxlb
-.section srxma
-.section srxmb
-.section srxna
-.section srxnb
-.section srxoa
-.section srxob
-.section srxpa
-.section srxpb
-.section srxqa
-.section srxqb
-.section srxra
-.section srxrb
-.section srxsa
-.section srxsb
-.section srxta
-.section srxtb
-.section srxua
-.section srxub
-.section srxva
-.section srxvb
-.section srxwa
-.section srxwb
-.section srxxa
-.section srxxb
-.section srxya
-.section srxyb
-.section srxza
-.section srxzb
-.section srx1a
-.section srx1b
-.section srx2a
-.section srx2b
-.section srx3a
-.section srx3b
-.section srx4a
-.section srx4b
-.section srx5a
-.section srx5b
-.section srx6a
-.section srx6b
-.section srx7a
-.section srx7b
-.section srx8a
-.section srx8b
-.section srx9a
-.section srx9b
-.section srx0a
-.section srx0b
-.section sryaa
-.section sryab
-.section sryba
-.section srybb
-.section sryca
-.section srycb
-.section sryda
-.section srydb
-.section sryea
-.section sryeb
-.section sryfa
-.section sryfb
-.section sryga
-.section srygb
-.section sryha
-.section sryhb
-.section sryia
-.section sryib
-.section sryja
-.section sryjb
-.section sryka
-.section srykb
-.section sryla
-.section srylb
-.section sryma
-.section srymb
-.section sryna
-.section srynb
-.section sryoa
-.section sryob
-.section srypa
-.section srypb
-.section sryqa
-.section sryqb
-.section sryra
-.section sryrb
-.section srysa
-.section srysb
-.section sryta
-.section srytb
-.section sryua
-.section sryub
-.section sryva
-.section sryvb
-.section srywa
-.section srywb
-.section sryxa
-.section sryxb
-.section sryya
-.section sryyb
-.section sryza
-.section sryzb
-.section sry1a
-.section sry1b
-.section sry2a
-.section sry2b
-.section sry3a
-.section sry3b
-.section sry4a
-.section sry4b
-.section sry5a
-.section sry5b
-.section sry6a
-.section sry6b
-.section sry7a
-.section sry7b
-.section sry8a
-.section sry8b
-.section sry9a
-.section sry9b
-.section sry0a
-.section sry0b
-.section srzaa
-.section srzab
-.section srzba
-.section srzbb
-.section srzca
-.section srzcb
-.section srzda
-.section srzdb
-.section srzea
-.section srzeb
-.section srzfa
-.section srzfb
-.section srzga
-.section srzgb
-.section srzha
-.section srzhb
-.section srzia
-.section srzib
-.section srzja
-.section srzjb
-.section srzka
-.section srzkb
-.section srzla
-.section srzlb
-.section srzma
-.section srzmb
-.section srzna
-.section srznb
-.section srzoa
-.section srzob
-.section srzpa
-.section srzpb
-.section srzqa
-.section srzqb
-.section srzra
-.section srzrb
-.section srzsa
-.section srzsb
-.section srzta
-.section srztb
-.section srzua
-.section srzub
-.section srzva
-.section srzvb
-.section srzwa
-.section srzwb
-.section srzxa
-.section srzxb
-.section srzya
-.section srzyb
-.section srzza
-.section srzzb
-.section srz1a
-.section srz1b
-.section srz2a
-.section srz2b
-.section srz3a
-.section srz3b
-.section srz4a
-.section srz4b
-.section srz5a
-.section srz5b
-.section srz6a
-.section srz6b
-.section srz7a
-.section srz7b
-.section srz8a
-.section srz8b
-.section srz9a
-.section srz9b
-.section srz0a
-.section srz0b
-.section sr1aa
-.section sr1ab
-.section sr1ba
-.section sr1bb
-.section sr1ca
-.section sr1cb
-.section sr1da
-.section sr1db
-.section sr1ea
-.section sr1eb
-.section sr1fa
-.section sr1fb
-.section sr1ga
-.section sr1gb
-.section sr1ha
-.section sr1hb
-.section sr1ia
-.section sr1ib
-.section sr1ja
-.section sr1jb
-.section sr1ka
-.section sr1kb
-.section sr1la
-.section sr1lb
-.section sr1ma
-.section sr1mb
-.section sr1na
-.section sr1nb
-.section sr1oa
-.section sr1ob
-.section sr1pa
-.section sr1pb
-.section sr1qa
-.section sr1qb
-.section sr1ra
-.section sr1rb
-.section sr1sa
-.section sr1sb
-.section sr1ta
-.section sr1tb
-.section sr1ua
-.section sr1ub
-.section sr1va
-.section sr1vb
-.section sr1wa
-.section sr1wb
-.section sr1xa
-.section sr1xb
-.section sr1ya
-.section sr1yb
-.section sr1za
-.section sr1zb
-.section sr11a
-.section sr11b
-.section sr12a
-.section sr12b
-.section sr13a
-.section sr13b
-.section sr14a
-.section sr14b
-.section sr15a
-.section sr15b
-.section sr16a
-.section sr16b
-.section sr17a
-.section sr17b
-.section sr18a
-.section sr18b
-.section sr19a
-.section sr19b
-.section sr10a
-.section sr10b
-.section sr2aa
-.section sr2ab
-.section sr2ba
-.section sr2bb
-.section sr2ca
-.section sr2cb
-.section sr2da
-.section sr2db
-.section sr2ea
-.section sr2eb
-.section sr2fa
-.section sr2fb
-.section sr2ga
-.section sr2gb
-.section sr2ha
-.section sr2hb
-.section sr2ia
-.section sr2ib
-.section sr2ja
-.section sr2jb
-.section sr2ka
-.section sr2kb
-.section sr2la
-.section sr2lb
-.section sr2ma
-.section sr2mb
-.section sr2na
-.section sr2nb
-.section sr2oa
-.section sr2ob
-.section sr2pa
-.section sr2pb
-.section sr2qa
-.section sr2qb
-.section sr2ra
-.section sr2rb
-.section sr2sa
-.section sr2sb
-.section sr2ta
-.section sr2tb
-.section sr2ua
-.section sr2ub
-.section sr2va
-.section sr2vb
-.section sr2wa
-.section sr2wb
-.section sr2xa
-.section sr2xb
-.section sr2ya
-.section sr2yb
-.section sr2za
-.section sr2zb
-.section sr21a
-.section sr21b
-.section sr22a
-.section sr22b
-.section sr23a
-.section sr23b
-.section sr24a
-.section sr24b
-.section sr25a
-.section sr25b
-.section sr26a
-.section sr26b
-.section sr27a
-.section sr27b
-.section sr28a
-.section sr28b
-.section sr29a
-.section sr29b
-.section sr20a
-.section sr20b
-.section sr3aa
-.section sr3ab
-.section sr3ba
-.section sr3bb
-.section sr3ca
-.section sr3cb
-.section sr3da
-.section sr3db
-.section sr3ea
-.section sr3eb
-.section sr3fa
-.section sr3fb
-.section sr3ga
-.section sr3gb
-.section sr3ha
-.section sr3hb
-.section sr3ia
-.section sr3ib
-.section sr3ja
-.section sr3jb
-.section sr3ka
-.section sr3kb
-.section sr3la
-.section sr3lb
-.section sr3ma
-.section sr3mb
-.section sr3na
-.section sr3nb
-.section sr3oa
-.section sr3ob
-.section sr3pa
-.section sr3pb
-.section sr3qa
-.section sr3qb
-.section sr3ra
-.section sr3rb
-.section sr3sa
-.section sr3sb
-.section sr3ta
-.section sr3tb
-.section sr3ua
-.section sr3ub
-.section sr3va
-.section sr3vb
-.section sr3wa
-.section sr3wb
-.section sr3xa
-.section sr3xb
-.section sr3ya
-.section sr3yb
-.section sr3za
-.section sr3zb
-.section sr31a
-.section sr31b
-.section sr32a
-.section sr32b
-.section sr33a
-.section sr33b
-.section sr34a
-.section sr34b
-.section sr35a
-.section sr35b
-.section sr36a
-.section sr36b
-.section sr37a
-.section sr37b
-.section sr38a
-.section sr38b
-.section sr39a
-.section sr39b
-.section sr30a
-.section sr30b
-.section sr4aa
-.section sr4ab
-.section sr4ba
-.section sr4bb
-.section sr4ca
-.section sr4cb
-.section sr4da
-.section sr4db
-.section sr4ea
-.section sr4eb
-.section sr4fa
-.section sr4fb
-.section sr4ga
-.section sr4gb
-.section sr4ha
-.section sr4hb
-.section sr4ia
-.section sr4ib
-.section sr4ja
-.section sr4jb
-.section sr4ka
-.section sr4kb
-.section sr4la
-.section sr4lb
-.section sr4ma
-.section sr4mb
-.section sr4na
-.section sr4nb
-.section sr4oa
-.section sr4ob
-.section sr4pa
-.section sr4pb
-.section sr4qa
-.section sr4qb
-.section sr4ra
-.section sr4rb
-.section sr4sa
-.section sr4sb
-.section sr4ta
-.section sr4tb
-.section sr4ua
-.section sr4ub
-.section sr4va
-.section sr4vb
-.section sr4wa
-.section sr4wb
-.section sr4xa
-.section sr4xb
-.section sr4ya
-.section sr4yb
-.section sr4za
-.section sr4zb
-.section sr41a
-.section sr41b
-.section sr42a
-.section sr42b
-.section sr43a
-.section sr43b
-.section sr44a
-.section sr44b
-.section sr45a
-.section sr45b
-.section sr46a
-.section sr46b
-.section sr47a
-.section sr47b
-.section sr48a
-.section sr48b
-.section sr49a
-.section sr49b
-.section sr40a
-.section sr40b
-.section sr5aa
-.section sr5ab
-.section sr5ba
-.section sr5bb
-.section sr5ca
-.section sr5cb
-.section sr5da
-.section sr5db
-.section sr5ea
-.section sr5eb
-.section sr5fa
-.section sr5fb
-.section sr5ga
-.section sr5gb
-.section sr5ha
-.section sr5hb
-.section sr5ia
-.section sr5ib
-.section sr5ja
-.section sr5jb
-.section sr5ka
-.section sr5kb
-.section sr5la
-.section sr5lb
-.section sr5ma
-.section sr5mb
-.section sr5na
-.section sr5nb
-.section sr5oa
-.section sr5ob
-.section sr5pa
-.section sr5pb
-.section sr5qa
-.section sr5qb
-.section sr5ra
-.section sr5rb
-.section sr5sa
-.section sr5sb
-.section sr5ta
-.section sr5tb
-.section sr5ua
-.section sr5ub
-.section sr5va
-.section sr5vb
-.section sr5wa
-.section sr5wb
-.section sr5xa
-.section sr5xb
-.section sr5ya
-.section sr5yb
-.section sr5za
-.section sr5zb
-.section sr51a
-.section sr51b
-.section sr52a
-.section sr52b
-.section sr53a
-.section sr53b
-.section sr54a
-.section sr54b
-.section sr55a
-.section sr55b
-.section sr56a
-.section sr56b
-.section sr57a
-.section sr57b
-.section sr58a
-.section sr58b
-.section sr59a
-.section sr59b
-.section sr50a
-.section sr50b
-.section sr6aa
-.section sr6ab
-.section sr6ba
-.section sr6bb
-.section sr6ca
-.section sr6cb
-.section sr6da
-.section sr6db
-.section sr6ea
-.section sr6eb
-.section sr6fa
-.section sr6fb
-.section sr6ga
-.section sr6gb
-.section sr6ha
-.section sr6hb
-.section sr6ia
-.section sr6ib
-.section sr6ja
-.section sr6jb
-.section sr6ka
-.section sr6kb
-.section sr6la
-.section sr6lb
-.section sr6ma
-.section sr6mb
-.section sr6na
-.section sr6nb
-.section sr6oa
-.section sr6ob
-.section sr6pa
-.section sr6pb
-.section sr6qa
-.section sr6qb
-.section sr6ra
-.section sr6rb
-.section sr6sa
-.section sr6sb
-.section sr6ta
-.section sr6tb
-.section sr6ua
-.section sr6ub
-.section sr6va
-.section sr6vb
-.section sr6wa
-.section sr6wb
-.section sr6xa
-.section sr6xb
-.section sr6ya
-.section sr6yb
-.section sr6za
-.section sr6zb
-.section sr61a
-.section sr61b
-.section sr62a
-.section sr62b
-.section sr63a
-.section sr63b
-.section sr64a
-.section sr64b
-.section sr65a
-.section sr65b
-.section sr66a
-.section sr66b
-.section sr67a
-.section sr67b
-.section sr68a
-.section sr68b
-.section sr69a
-.section sr69b
-.section sr60a
-.section sr60b
-.section sr7aa
-.section sr7ab
-.section sr7ba
-.section sr7bb
-.section sr7ca
-.section sr7cb
-.section sr7da
-.section sr7db
-.section sr7ea
-.section sr7eb
-.section sr7fa
-.section sr7fb
-.section sr7ga
-.section sr7gb
-.section sr7ha
-.section sr7hb
-.section sr7ia
-.section sr7ib
-.section sr7ja
-.section sr7jb
-.section sr7ka
-.section sr7kb
-.section sr7la
-.section sr7lb
-.section sr7ma
-.section sr7mb
-.section sr7na
-.section sr7nb
-.section sr7oa
-.section sr7ob
-.section sr7pa
-.section sr7pb
-.section sr7qa
-.section sr7qb
-.section sr7ra
-.section sr7rb
-.section sr7sa
-.section sr7sb
-.section sr7ta
-.section sr7tb
-.section sr7ua
-.section sr7ub
-.section sr7va
-.section sr7vb
-.section sr7wa
-.section sr7wb
-.section sr7xa
-.section sr7xb
-.section sr7ya
-.section sr7yb
-.section sr7za
-.section sr7zb
-.section sr71a
-.section sr71b
-.section sr72a
-.section sr72b
-.section sr73a
-.section sr73b
-.section sr74a
-.section sr74b
-.section sr75a
-.section sr75b
-.section sr76a
-.section sr76b
-.section sr77a
-.section sr77b
-.section sr78a
-.section sr78b
-.section sr79a
-.section sr79b
-.section sr70a
-.section sr70b
-.section sr8aa
-.section sr8ab
-.section sr8ba
-.section sr8bb
-.section sr8ca
-.section sr8cb
-.section sr8da
-.section sr8db
-.section sr8ea
-.section sr8eb
-.section sr8fa
-.section sr8fb
-.section sr8ga
-.section sr8gb
-.section sr8ha
-.section sr8hb
-.section sr8ia
-.section sr8ib
-.section sr8ja
-.section sr8jb
-.section sr8ka
-.section sr8kb
-.section sr8la
-.section sr8lb
-.section sr8ma
-.section sr8mb
-.section sr8na
-.section sr8nb
-.section sr8oa
-.section sr8ob
-.section sr8pa
-.section sr8pb
-.section sr8qa
-.section sr8qb
-.section sr8ra
-.section sr8rb
-.section sr8sa
-.section sr8sb
-.section sr8ta
-.section sr8tb
-.section sr8ua
-.section sr8ub
-.section sr8va
-.section sr8vb
-.section sr8wa
-.section sr8wb
-.section sr8xa
-.section sr8xb
-.section sr8ya
-.section sr8yb
-.section sr8za
-.section sr8zb
-.section sr81a
-.section sr81b
-.section sr82a
-.section sr82b
-.section sr83a
-.section sr83b
-.section sr84a
-.section sr84b
-.section sr85a
-.section sr85b
-.section sr86a
-.section sr86b
-.section sr87a
-.section sr87b
-.section sr88a
-.section sr88b
-.section sr89a
-.section sr89b
-.section sr80a
-.section sr80b
-.section sr9aa
-.section sr9ab
-.section sr9ba
-.section sr9bb
-.section sr9ca
-.section sr9cb
-.section sr9da
-.section sr9db
-.section sr9ea
-.section sr9eb
-.section sr9fa
-.section sr9fb
-.section sr9ga
-.section sr9gb
-.section sr9ha
-.section sr9hb
-.section sr9ia
-.section sr9ib
-.section sr9ja
-.section sr9jb
-.section sr9ka
-.section sr9kb
-.section sr9la
-.section sr9lb
-.section sr9ma
-.section sr9mb
-.section sr9na
-.section sr9nb
-.section sr9oa
-.section sr9ob
-.section sr9pa
-.section sr9pb
-.section sr9qa
-.section sr9qb
-.section sr9ra
-.section sr9rb
-.section sr9sa
-.section sr9sb
-.section sr9ta
-.section sr9tb
-.section sr9ua
-.section sr9ub
-.section sr9va
-.section sr9vb
-.section sr9wa
-.section sr9wb
-.section sr9xa
-.section sr9xb
-.section sr9ya
-.section sr9yb
-.section sr9za
-.section sr9zb
-.section sr91a
-.section sr91b
-.section sr92a
-.section sr92b
-.section sr93a
-.section sr93b
-.section sr94a
-.section sr94b
-.section sr95a
-.section sr95b
-.section sr96a
-.section sr96b
-.section sr97a
-.section sr97b
-.section sr98a
-.section sr98b
-.section sr99a
-.section sr99b
-.section sr90a
-.section sr90b
-.section sr0aa
-.section sr0ab
-.section sr0ba
-.section sr0bb
-.section sr0ca
-.section sr0cb
-.section sr0da
-.section sr0db
-.section sr0ea
-.section sr0eb
-.section sr0fa
-.section sr0fb
-.section sr0ga
-.section sr0gb
-.section sr0ha
-.section sr0hb
-.section sr0ia
-.section sr0ib
-.section sr0ja
-.section sr0jb
-.section sr0ka
-.section sr0kb
-.section sr0la
-.section sr0lb
-.section sr0ma
-.section sr0mb
-.section sr0na
-.section sr0nb
-.section sr0oa
-.section sr0ob
-.section sr0pa
-.section sr0pb
-.section sr0qa
-.section sr0qb
-.section sr0ra
-.section sr0rb
-.section sr0sa
-.section sr0sb
-.section sr0ta
-.section sr0tb
-.section sr0ua
-.section sr0ub
-.section sr0va
-.section sr0vb
-.section sr0wa
-.section sr0wb
-.section sr0xa
-.section sr0xb
-.section sr0ya
-.section sr0yb
-.section sr0za
-.section sr0zb
-.section sr01a
-.section sr01b
-.section sr02a
-.section sr02b
-.section sr03a
-.section sr03b
-.section sr04a
-.section sr04b
-.section sr05a
-.section sr05b
-.section sr06a
-.section sr06b
-.section sr07a
-.section sr07b
-.section sr08a
-.section sr08b
-.section sr09a
-.section sr09b
-.section sr00a
-.section sr00b
-.section ssaaa
-.section ssaab
-.section ssaba
-.section ssabb
-.section ssaca
-.section ssacb
-.section ssada
-.section ssadb
-.section ssaea
-.section ssaeb
-.section ssafa
-.section ssafb
-.section ssaga
-.section ssagb
-.section ssaha
-.section ssahb
-.section ssaia
-.section ssaib
-.section ssaja
-.section ssajb
-.section ssaka
-.section ssakb
-.section ssala
-.section ssalb
-.section ssama
-.section ssamb
-.section ssana
-.section ssanb
-.section ssaoa
-.section ssaob
-.section ssapa
-.section ssapb
-.section ssaqa
-.section ssaqb
-.section ssara
-.section ssarb
-.section ssasa
-.section ssasb
-.section ssata
-.section ssatb
-.section ssaua
-.section ssaub
-.section ssava
-.section ssavb
-.section ssawa
-.section ssawb
-.section ssaxa
-.section ssaxb
-.section ssaya
-.section ssayb
-.section ssaza
-.section ssazb
-.section ssa1a
-.section ssa1b
-.section ssa2a
-.section ssa2b
-.section ssa3a
-.section ssa3b
-.section ssa4a
-.section ssa4b
-.section ssa5a
-.section ssa5b
-.section ssa6a
-.section ssa6b
-.section ssa7a
-.section ssa7b
-.section ssa8a
-.section ssa8b
-.section ssa9a
-.section ssa9b
-.section ssa0a
-.section ssa0b
-.section ssbaa
-.section ssbab
-.section ssbba
-.section ssbbb
-.section ssbca
-.section ssbcb
-.section ssbda
-.section ssbdb
-.section ssbea
-.section ssbeb
-.section ssbfa
-.section ssbfb
-.section ssbga
-.section ssbgb
-.section ssbha
-.section ssbhb
-.section ssbia
-.section ssbib
-.section ssbja
-.section ssbjb
-.section ssbka
-.section ssbkb
-.section ssbla
-.section ssblb
-.section ssbma
-.section ssbmb
-.section ssbna
-.section ssbnb
-.section ssboa
-.section ssbob
-.section ssbpa
-.section ssbpb
-.section ssbqa
-.section ssbqb
-.section ssbra
-.section ssbrb
-.section ssbsa
-.section ssbsb
-.section ssbta
-.section ssbtb
-.section ssbua
-.section ssbub
-.section ssbva
-.section ssbvb
-.section ssbwa
-.section ssbwb
-.section ssbxa
-.section ssbxb
-.section ssbya
-.section ssbyb
-.section ssbza
-.section ssbzb
-.section ssb1a
-.section ssb1b
-.section ssb2a
-.section ssb2b
-.section ssb3a
-.section ssb3b
-.section ssb4a
-.section ssb4b
-.section ssb5a
-.section ssb5b
-.section ssb6a
-.section ssb6b
-.section ssb7a
-.section ssb7b
-.section ssb8a
-.section ssb8b
-.section ssb9a
-.section ssb9b
-.section ssb0a
-.section ssb0b
-.section sscaa
-.section sscab
-.section sscba
-.section sscbb
-.section sscca
-.section ssccb
-.section sscda
-.section sscdb
-.section sscea
-.section ssceb
-.section sscfa
-.section sscfb
-.section sscga
-.section sscgb
-.section sscha
-.section sschb
-.section sscia
-.section sscib
-.section sscja
-.section sscjb
-.section sscka
-.section ssckb
-.section sscla
-.section ssclb
-.section sscma
-.section sscmb
-.section sscna
-.section sscnb
-.section sscoa
-.section sscob
-.section sscpa
-.section sscpb
-.section sscqa
-.section sscqb
-.section sscra
-.section sscrb
-.section sscsa
-.section sscsb
-.section sscta
-.section ssctb
-.section sscua
-.section sscub
-.section sscva
-.section sscvb
-.section sscwa
-.section sscwb
-.section sscxa
-.section sscxb
-.section sscya
-.section sscyb
-.section sscza
-.section ssczb
-.section ssc1a
-.section ssc1b
-.section ssc2a
-.section ssc2b
-.section ssc3a
-.section ssc3b
-.section ssc4a
-.section ssc4b
-.section ssc5a
-.section ssc5b
-.section ssc6a
-.section ssc6b
-.section ssc7a
-.section ssc7b
-.section ssc8a
-.section ssc8b
-.section ssc9a
-.section ssc9b
-.section ssc0a
-.section ssc0b
-.section ssdaa
-.section ssdab
-.section ssdba
-.section ssdbb
-.section ssdca
-.section ssdcb
-.section ssdda
-.section ssddb
-.section ssdea
-.section ssdeb
-.section ssdfa
-.section ssdfb
-.section ssdga
-.section ssdgb
-.section ssdha
-.section ssdhb
-.section ssdia
-.section ssdib
-.section ssdja
-.section ssdjb
-.section ssdka
-.section ssdkb
-.section ssdla
-.section ssdlb
-.section ssdma
-.section ssdmb
-.section ssdna
-.section ssdnb
-.section ssdoa
-.section ssdob
-.section ssdpa
-.section ssdpb
-.section ssdqa
-.section ssdqb
-.section ssdra
-.section ssdrb
-.section ssdsa
-.section ssdsb
-.section ssdta
-.section ssdtb
-.section ssdua
-.section ssdub
-.section ssdva
-.section ssdvb
-.section ssdwa
-.section ssdwb
-.section ssdxa
-.section ssdxb
-.section ssdya
-.section ssdyb
-.section ssdza
-.section ssdzb
-.section ssd1a
-.section ssd1b
-.section ssd2a
-.section ssd2b
-.section ssd3a
-.section ssd3b
-.section ssd4a
-.section ssd4b
-.section ssd5a
-.section ssd5b
-.section ssd6a
-.section ssd6b
-.section ssd7a
-.section ssd7b
-.section ssd8a
-.section ssd8b
-.section ssd9a
-.section ssd9b
-.section ssd0a
-.section ssd0b
-.section sseaa
-.section sseab
-.section sseba
-.section ssebb
-.section sseca
-.section ssecb
-.section sseda
-.section ssedb
-.section sseea
-.section sseeb
-.section ssefa
-.section ssefb
-.section ssega
-.section ssegb
-.section sseha
-.section ssehb
-.section sseia
-.section sseib
-.section sseja
-.section ssejb
-.section sseka
-.section ssekb
-.section ssela
-.section sselb
-.section ssema
-.section ssemb
-.section ssena
-.section ssenb
-.section sseoa
-.section sseob
-.section ssepa
-.section ssepb
-.section sseqa
-.section sseqb
-.section ssera
-.section sserb
-.section ssesa
-.section ssesb
-.section sseta
-.section ssetb
-.section sseua
-.section sseub
-.section sseva
-.section ssevb
-.section ssewa
-.section ssewb
-.section ssexa
-.section ssexb
-.section sseya
-.section sseyb
-.section sseza
-.section ssezb
-.section sse1a
-.section sse1b
-.section sse2a
-.section sse2b
-.section sse3a
-.section sse3b
-.section sse4a
-.section sse4b
-.section sse5a
-.section sse5b
-.section sse6a
-.section sse6b
-.section sse7a
-.section sse7b
-.section sse8a
-.section sse8b
-.section sse9a
-.section sse9b
-.section sse0a
-.section sse0b
-.section ssfaa
-.section ssfab
-.section ssfba
-.section ssfbb
-.section ssfca
-.section ssfcb
-.section ssfda
-.section ssfdb
-.section ssfea
-.section ssfeb
-.section ssffa
-.section ssffb
-.section ssfga
-.section ssfgb
-.section ssfha
-.section ssfhb
-.section ssfia
-.section ssfib
-.section ssfja
-.section ssfjb
-.section ssfka
-.section ssfkb
-.section ssfla
-.section ssflb
-.section ssfma
-.section ssfmb
-.section ssfna
-.section ssfnb
-.section ssfoa
-.section ssfob
-.section ssfpa
-.section ssfpb
-.section ssfqa
-.section ssfqb
-.section ssfra
-.section ssfrb
-.section ssfsa
-.section ssfsb
-.section ssfta
-.section ssftb
-.section ssfua
-.section ssfub
-.section ssfva
-.section ssfvb
-.section ssfwa
-.section ssfwb
-.section ssfxa
-.section ssfxb
-.section ssfya
-.section ssfyb
-.section ssfza
-.section ssfzb
-.section ssf1a
-.section ssf1b
-.section ssf2a
-.section ssf2b
-.section ssf3a
-.section ssf3b
-.section ssf4a
-.section ssf4b
-.section ssf5a
-.section ssf5b
-.section ssf6a
-.section ssf6b
-.section ssf7a
-.section ssf7b
-.section ssf8a
-.section ssf8b
-.section ssf9a
-.section ssf9b
-.section ssf0a
-.section ssf0b
-.section ssgaa
-.section ssgab
-.section ssgba
-.section ssgbb
-.section ssgca
-.section ssgcb
-.section ssgda
-.section ssgdb
-.section ssgea
-.section ssgeb
-.section ssgfa
-.section ssgfb
-.section ssgga
-.section ssggb
-.section ssgha
-.section ssghb
-.section ssgia
-.section ssgib
-.section ssgja
-.section ssgjb
-.section ssgka
-.section ssgkb
-.section ssgla
-.section ssglb
-.section ssgma
-.section ssgmb
-.section ssgna
-.section ssgnb
-.section ssgoa
-.section ssgob
-.section ssgpa
-.section ssgpb
-.section ssgqa
-.section ssgqb
-.section ssgra
-.section ssgrb
-.section ssgsa
-.section ssgsb
-.section ssgta
-.section ssgtb
-.section ssgua
-.section ssgub
-.section ssgva
-.section ssgvb
-.section ssgwa
-.section ssgwb
-.section ssgxa
-.section ssgxb
-.section ssgya
-.section ssgyb
-.section ssgza
-.section ssgzb
-.section ssg1a
-.section ssg1b
-.section ssg2a
-.section ssg2b
-.section ssg3a
-.section ssg3b
-.section ssg4a
-.section ssg4b
-.section ssg5a
-.section ssg5b
-.section ssg6a
-.section ssg6b
-.section ssg7a
-.section ssg7b
-.section ssg8a
-.section ssg8b
-.section ssg9a
-.section ssg9b
-.section ssg0a
-.section ssg0b
-.section sshaa
-.section sshab
-.section sshba
-.section sshbb
-.section sshca
-.section sshcb
-.section sshda
-.section sshdb
-.section sshea
-.section ssheb
-.section sshfa
-.section sshfb
-.section sshga
-.section sshgb
-.section sshha
-.section sshhb
-.section sshia
-.section sshib
-.section sshja
-.section sshjb
-.section sshka
-.section sshkb
-.section sshla
-.section sshlb
-.section sshma
-.section sshmb
-.section sshna
-.section sshnb
-.section sshoa
-.section sshob
-.section sshpa
-.section sshpb
-.section sshqa
-.section sshqb
-.section sshra
-.section sshrb
-.section sshsa
-.section sshsb
-.section sshta
-.section sshtb
-.section sshua
-.section sshub
-.section sshva
-.section sshvb
-.section sshwa
-.section sshwb
-.section sshxa
-.section sshxb
-.section sshya
-.section sshyb
-.section sshza
-.section sshzb
-.section ssh1a
-.section ssh1b
-.section ssh2a
-.section ssh2b
-.section ssh3a
-.section ssh3b
-.section ssh4a
-.section ssh4b
-.section ssh5a
-.section ssh5b
-.section ssh6a
-.section ssh6b
-.section ssh7a
-.section ssh7b
-.section ssh8a
-.section ssh8b
-.section ssh9a
-.section ssh9b
-.section ssh0a
-.section ssh0b
-.section ssiaa
-.section ssiab
-.section ssiba
-.section ssibb
-.section ssica
-.section ssicb
-.section ssida
-.section ssidb
-.section ssiea
-.section ssieb
-.section ssifa
-.section ssifb
-.section ssiga
-.section ssigb
-.section ssiha
-.section ssihb
-.section ssiia
-.section ssiib
-.section ssija
-.section ssijb
-.section ssika
-.section ssikb
-.section ssila
-.section ssilb
-.section ssima
-.section ssimb
-.section ssina
-.section ssinb
-.section ssioa
-.section ssiob
-.section ssipa
-.section ssipb
-.section ssiqa
-.section ssiqb
-.section ssira
-.section ssirb
-.section ssisa
-.section ssisb
-.section ssita
-.section ssitb
-.section ssiua
-.section ssiub
-.section ssiva
-.section ssivb
-.section ssiwa
-.section ssiwb
-.section ssixa
-.section ssixb
-.section ssiya
-.section ssiyb
-.section ssiza
-.section ssizb
-.section ssi1a
-.section ssi1b
-.section ssi2a
-.section ssi2b
-.section ssi3a
-.section ssi3b
-.section ssi4a
-.section ssi4b
-.section ssi5a
-.section ssi5b
-.section ssi6a
-.section ssi6b
-.section ssi7a
-.section ssi7b
-.section ssi8a
-.section ssi8b
-.section ssi9a
-.section ssi9b
-.section ssi0a
-.section ssi0b
-.section ssjaa
-.section ssjab
-.section ssjba
-.section ssjbb
-.section ssjca
-.section ssjcb
-.section ssjda
-.section ssjdb
-.section ssjea
-.section ssjeb
-.section ssjfa
-.section ssjfb
-.section ssjga
-.section ssjgb
-.section ssjha
-.section ssjhb
-.section ssjia
-.section ssjib
-.section ssjja
-.section ssjjb
-.section ssjka
-.section ssjkb
-.section ssjla
-.section ssjlb
-.section ssjma
-.section ssjmb
-.section ssjna
-.section ssjnb
-.section ssjoa
-.section ssjob
-.section ssjpa
-.section ssjpb
-.section ssjqa
-.section ssjqb
-.section ssjra
-.section ssjrb
-.section ssjsa
-.section ssjsb
-.section ssjta
-.section ssjtb
-.section ssjua
-.section ssjub
-.section ssjva
-.section ssjvb
-.section ssjwa
-.section ssjwb
-.section ssjxa
-.section ssjxb
-.section ssjya
-.section ssjyb
-.section ssjza
-.section ssjzb
-.section ssj1a
-.section ssj1b
-.section ssj2a
-.section ssj2b
-.section ssj3a
-.section ssj3b
-.section ssj4a
-.section ssj4b
-.section ssj5a
-.section ssj5b
-.section ssj6a
-.section ssj6b
-.section ssj7a
-.section ssj7b
-.section ssj8a
-.section ssj8b
-.section ssj9a
-.section ssj9b
-.section ssj0a
-.section ssj0b
-.section sskaa
-.section sskab
-.section sskba
-.section sskbb
-.section sskca
-.section sskcb
-.section sskda
-.section sskdb
-.section sskea
-.section sskeb
-.section sskfa
-.section sskfb
-.section sskga
-.section sskgb
-.section sskha
-.section sskhb
-.section sskia
-.section sskib
-.section sskja
-.section sskjb
-.section sskka
-.section sskkb
-.section sskla
-.section ssklb
-.section sskma
-.section sskmb
-.section sskna
-.section ssknb
-.section sskoa
-.section sskob
-.section sskpa
-.section sskpb
-.section sskqa
-.section sskqb
-.section sskra
-.section sskrb
-.section ssksa
-.section ssksb
-.section sskta
-.section ssktb
-.section sskua
-.section sskub
-.section sskva
-.section sskvb
-.section sskwa
-.section sskwb
-.section sskxa
-.section sskxb
-.section sskya
-.section sskyb
-.section sskza
-.section sskzb
-.section ssk1a
-.section ssk1b
-.section ssk2a
-.section ssk2b
-.section ssk3a
-.section ssk3b
-.section ssk4a
-.section ssk4b
-.section ssk5a
-.section ssk5b
-.section ssk6a
-.section ssk6b
-.section ssk7a
-.section ssk7b
-.section ssk8a
-.section ssk8b
-.section ssk9a
-.section ssk9b
-.section ssk0a
-.section ssk0b
-.section sslaa
-.section sslab
-.section sslba
-.section sslbb
-.section sslca
-.section sslcb
-.section sslda
-.section ssldb
-.section sslea
-.section ssleb
-.section sslfa
-.section sslfb
-.section sslga
-.section sslgb
-.section sslha
-.section sslhb
-.section sslia
-.section sslib
-.section sslja
-.section ssljb
-.section sslka
-.section sslkb
-.section sslla
-.section ssllb
-.section sslma
-.section sslmb
-.section sslna
-.section sslnb
-.section ssloa
-.section sslob
-.section sslpa
-.section sslpb
-.section sslqa
-.section sslqb
-.section sslra
-.section sslrb
-.section sslsa
-.section sslsb
-.section sslta
-.section ssltb
-.section sslua
-.section sslub
-.section sslva
-.section sslvb
-.section sslwa
-.section sslwb
-.section sslxa
-.section sslxb
-.section sslya
-.section sslyb
-.section sslza
-.section sslzb
-.section ssl1a
-.section ssl1b
-.section ssl2a
-.section ssl2b
-.section ssl3a
-.section ssl3b
-.section ssl4a
-.section ssl4b
-.section ssl5a
-.section ssl5b
-.section ssl6a
-.section ssl6b
-.section ssl7a
-.section ssl7b
-.section ssl8a
-.section ssl8b
-.section ssl9a
-.section ssl9b
-.section ssl0a
-.section ssl0b
-.section ssmaa
-.section ssmab
-.section ssmba
-.section ssmbb
-.section ssmca
-.section ssmcb
-.section ssmda
-.section ssmdb
-.section ssmea
-.section ssmeb
-.section ssmfa
-.section ssmfb
-.section ssmga
-.section ssmgb
-.section ssmha
-.section ssmhb
-.section ssmia
-.section ssmib
-.section ssmja
-.section ssmjb
-.section ssmka
-.section ssmkb
-.section ssmla
-.section ssmlb
-.section ssmma
-.section ssmmb
-.section ssmna
-.section ssmnb
-.section ssmoa
-.section ssmob
-.section ssmpa
-.section ssmpb
-.section ssmqa
-.section ssmqb
-.section ssmra
-.section ssmrb
-.section ssmsa
-.section ssmsb
-.section ssmta
-.section ssmtb
-.section ssmua
-.section ssmub
-.section ssmva
-.section ssmvb
-.section ssmwa
-.section ssmwb
-.section ssmxa
-.section ssmxb
-.section ssmya
-.section ssmyb
-.section ssmza
-.section ssmzb
-.section ssm1a
-.section ssm1b
-.section ssm2a
-.section ssm2b
-.section ssm3a
-.section ssm3b
-.section ssm4a
-.section ssm4b
-.section ssm5a
-.section ssm5b
-.section ssm6a
-.section ssm6b
-.section ssm7a
-.section ssm7b
-.section ssm8a
-.section ssm8b
-.section ssm9a
-.section ssm9b
-.section ssm0a
-.section ssm0b
-.section ssnaa
-.section ssnab
-.section ssnba
-.section ssnbb
-.section ssnca
-.section ssncb
-.section ssnda
-.section ssndb
-.section ssnea
-.section ssneb
-.section ssnfa
-.section ssnfb
-.section ssnga
-.section ssngb
-.section ssnha
-.section ssnhb
-.section ssnia
-.section ssnib
-.section ssnja
-.section ssnjb
-.section ssnka
-.section ssnkb
-.section ssnla
-.section ssnlb
-.section ssnma
-.section ssnmb
-.section ssnna
-.section ssnnb
-.section ssnoa
-.section ssnob
-.section ssnpa
-.section ssnpb
-.section ssnqa
-.section ssnqb
-.section ssnra
-.section ssnrb
-.section ssnsa
-.section ssnsb
-.section ssnta
-.section ssntb
-.section ssnua
-.section ssnub
-.section ssnva
-.section ssnvb
-.section ssnwa
-.section ssnwb
-.section ssnxa
-.section ssnxb
-.section ssnya
-.section ssnyb
-.section ssnza
-.section ssnzb
-.section ssn1a
-.section ssn1b
-.section ssn2a
-.section ssn2b
-.section ssn3a
-.section ssn3b
-.section ssn4a
-.section ssn4b
-.section ssn5a
-.section ssn5b
-.section ssn6a
-.section ssn6b
-.section ssn7a
-.section ssn7b
-.section ssn8a
-.section ssn8b
-.section ssn9a
-.section ssn9b
-.section ssn0a
-.section ssn0b
-.section ssoaa
-.section ssoab
-.section ssoba
-.section ssobb
-.section ssoca
-.section ssocb
-.section ssoda
-.section ssodb
-.section ssoea
-.section ssoeb
-.section ssofa
-.section ssofb
-.section ssoga
-.section ssogb
-.section ssoha
-.section ssohb
-.section ssoia
-.section ssoib
-.section ssoja
-.section ssojb
-.section ssoka
-.section ssokb
-.section ssola
-.section ssolb
-.section ssoma
-.section ssomb
-.section ssona
-.section ssonb
-.section ssooa
-.section ssoob
-.section ssopa
-.section ssopb
-.section ssoqa
-.section ssoqb
-.section ssora
-.section ssorb
-.section ssosa
-.section ssosb
-.section ssota
-.section ssotb
-.section ssoua
-.section ssoub
-.section ssova
-.section ssovb
-.section ssowa
-.section ssowb
-.section ssoxa
-.section ssoxb
-.section ssoya
-.section ssoyb
-.section ssoza
-.section ssozb
-.section sso1a
-.section sso1b
-.section sso2a
-.section sso2b
-.section sso3a
-.section sso3b
-.section sso4a
-.section sso4b
-.section sso5a
-.section sso5b
-.section sso6a
-.section sso6b
-.section sso7a
-.section sso7b
-.section sso8a
-.section sso8b
-.section sso9a
-.section sso9b
-.section sso0a
-.section sso0b
-.section sspaa
-.section sspab
-.section sspba
-.section sspbb
-.section sspca
-.section sspcb
-.section sspda
-.section sspdb
-.section sspea
-.section sspeb
-.section sspfa
-.section sspfb
-.section sspga
-.section sspgb
-.section sspha
-.section ssphb
-.section sspia
-.section sspib
-.section sspja
-.section sspjb
-.section sspka
-.section sspkb
-.section sspla
-.section ssplb
-.section sspma
-.section sspmb
-.section sspna
-.section sspnb
-.section sspoa
-.section sspob
-.section ssppa
-.section ssppb
-.section sspqa
-.section sspqb
-.section sspra
-.section ssprb
-.section sspsa
-.section sspsb
-.section sspta
-.section ssptb
-.section sspua
-.section sspub
-.section sspva
-.section sspvb
-.section sspwa
-.section sspwb
-.section sspxa
-.section sspxb
-.section sspya
-.section sspyb
-.section sspza
-.section sspzb
-.section ssp1a
-.section ssp1b
-.section ssp2a
-.section ssp2b
-.section ssp3a
-.section ssp3b
-.section ssp4a
-.section ssp4b
-.section ssp5a
-.section ssp5b
-.section ssp6a
-.section ssp6b
-.section ssp7a
-.section ssp7b
-.section ssp8a
-.section ssp8b
-.section ssp9a
-.section ssp9b
-.section ssp0a
-.section ssp0b
-.section ssqaa
-.section ssqab
-.section ssqba
-.section ssqbb
-.section ssqca
-.section ssqcb
-.section ssqda
-.section ssqdb
-.section ssqea
-.section ssqeb
-.section ssqfa
-.section ssqfb
-.section ssqga
-.section ssqgb
-.section ssqha
-.section ssqhb
-.section ssqia
-.section ssqib
-.section ssqja
-.section ssqjb
-.section ssqka
-.section ssqkb
-.section ssqla
-.section ssqlb
-.section ssqma
-.section ssqmb
-.section ssqna
-.section ssqnb
-.section ssqoa
-.section ssqob
-.section ssqpa
-.section ssqpb
-.section ssqqa
-.section ssqqb
-.section ssqra
-.section ssqrb
-.section ssqsa
-.section ssqsb
-.section ssqta
-.section ssqtb
-.section ssqua
-.section ssqub
-.section ssqva
-.section ssqvb
-.section ssqwa
-.section ssqwb
-.section ssqxa
-.section ssqxb
-.section ssqya
-.section ssqyb
-.section ssqza
-.section ssqzb
-.section ssq1a
-.section ssq1b
-.section ssq2a
-.section ssq2b
-.section ssq3a
-.section ssq3b
-.section ssq4a
-.section ssq4b
-.section ssq5a
-.section ssq5b
-.section ssq6a
-.section ssq6b
-.section ssq7a
-.section ssq7b
-.section ssq8a
-.section ssq8b
-.section ssq9a
-.section ssq9b
-.section ssq0a
-.section ssq0b
-.section ssraa
-.section ssrab
-.section ssrba
-.section ssrbb
-.section ssrca
-.section ssrcb
-.section ssrda
-.section ssrdb
-.section ssrea
-.section ssreb
-.section ssrfa
-.section ssrfb
-.section ssrga
-.section ssrgb
-.section ssrha
-.section ssrhb
-.section ssria
-.section ssrib
-.section ssrja
-.section ssrjb
-.section ssrka
-.section ssrkb
-.section ssrla
-.section ssrlb
-.section ssrma
-.section ssrmb
-.section ssrna
-.section ssrnb
-.section ssroa
-.section ssrob
-.section ssrpa
-.section ssrpb
-.section ssrqa
-.section ssrqb
-.section ssrra
-.section ssrrb
-.section ssrsa
-.section ssrsb
-.section ssrta
-.section ssrtb
-.section ssrua
-.section ssrub
-.section ssrva
-.section ssrvb
-.section ssrwa
-.section ssrwb
-.section ssrxa
-.section ssrxb
-.section ssrya
-.section ssryb
-.section ssrza
-.section ssrzb
-.section ssr1a
-.section ssr1b
-.section ssr2a
-.section ssr2b
-.section ssr3a
-.section ssr3b
-.section ssr4a
-.section ssr4b
-.section ssr5a
-.section ssr5b
-.section ssr6a
-.section ssr6b
-.section ssr7a
-.section ssr7b
-.section ssr8a
-.section ssr8b
-.section ssr9a
-.section ssr9b
-.section ssr0a
-.section ssr0b
-.section sssaa
-.section sssab
-.section sssba
-.section sssbb
-.section sssca
-.section ssscb
-.section sssda
-.section sssdb
-.section sssea
-.section ssseb
-.section sssfa
-.section sssfb
-.section sssga
-.section sssgb
-.section sssha
-.section ssshb
-.section sssia
-.section sssib
-.section sssja
-.section sssjb
-.section ssska
-.section ssskb
-.section sssla
-.section ssslb
-.section sssma
-.section sssmb
-.section sssna
-.section sssnb
-.section sssoa
-.section sssob
-.section ssspa
-.section ssspb
-.section sssqa
-.section sssqb
-.section sssra
-.section sssrb
-.section ssssa
-.section ssssb
-.section sssta
-.section ssstb
-.section sssua
-.section sssub
-.section sssva
-.section sssvb
-.section ssswa
-.section ssswb
-.section sssxa
-.section sssxb
-.section sssya
-.section sssyb
-.section sssza
-.section ssszb
-.section sss1a
-.section sss1b
-.section sss2a
-.section sss2b
-.section sss3a
-.section sss3b
-.section sss4a
-.section sss4b
-.section sss5a
-.section sss5b
-.section sss6a
-.section sss6b
-.section sss7a
-.section sss7b
-.section sss8a
-.section sss8b
-.section sss9a
-.section sss9b
-.section sss0a
-.section sss0b
-.section sstaa
-.section sstab
-.section sstba
-.section sstbb
-.section sstca
-.section sstcb
-.section sstda
-.section sstdb
-.section sstea
-.section ssteb
-.section sstfa
-.section sstfb
-.section sstga
-.section sstgb
-.section sstha
-.section ssthb
-.section sstia
-.section sstib
-.section sstja
-.section sstjb
-.section sstka
-.section sstkb
-.section sstla
-.section sstlb
-.section sstma
-.section sstmb
-.section sstna
-.section sstnb
-.section sstoa
-.section sstob
-.section sstpa
-.section sstpb
-.section sstqa
-.section sstqb
-.section sstra
-.section sstrb
-.section sstsa
-.section sstsb
-.section sstta
-.section ssttb
-.section sstua
-.section sstub
-.section sstva
-.section sstvb
-.section sstwa
-.section sstwb
-.section sstxa
-.section sstxb
-.section sstya
-.section sstyb
-.section sstza
-.section sstzb
-.section sst1a
-.section sst1b
-.section sst2a
-.section sst2b
-.section sst3a
-.section sst3b
-.section sst4a
-.section sst4b
-.section sst5a
-.section sst5b
-.section sst6a
-.section sst6b
-.section sst7a
-.section sst7b
-.section sst8a
-.section sst8b
-.section sst9a
-.section sst9b
-.section sst0a
-.section sst0b
-.section ssuaa
-.section ssuab
-.section ssuba
-.section ssubb
-.section ssuca
-.section ssucb
-.section ssuda
-.section ssudb
-.section ssuea
-.section ssueb
-.section ssufa
-.section ssufb
-.section ssuga
-.section ssugb
-.section ssuha
-.section ssuhb
-.section ssuia
-.section ssuib
-.section ssuja
-.section ssujb
-.section ssuka
-.section ssukb
-.section ssula
-.section ssulb
-.section ssuma
-.section ssumb
-.section ssuna
-.section ssunb
-.section ssuoa
-.section ssuob
-.section ssupa
-.section ssupb
-.section ssuqa
-.section ssuqb
-.section ssura
-.section ssurb
-.section ssusa
-.section ssusb
-.section ssuta
-.section ssutb
-.section ssuua
-.section ssuub
-.section ssuva
-.section ssuvb
-.section ssuwa
-.section ssuwb
-.section ssuxa
-.section ssuxb
-.section ssuya
-.section ssuyb
-.section ssuza
-.section ssuzb
-.section ssu1a
-.section ssu1b
-.section ssu2a
-.section ssu2b
-.section ssu3a
-.section ssu3b
-.section ssu4a
-.section ssu4b
-.section ssu5a
-.section ssu5b
-.section ssu6a
-.section ssu6b
-.section ssu7a
-.section ssu7b
-.section ssu8a
-.section ssu8b
-.section ssu9a
-.section ssu9b
-.section ssu0a
-.section ssu0b
-.section ssvaa
-.section ssvab
-.section ssvba
-.section ssvbb
-.section ssvca
-.section ssvcb
-.section ssvda
-.section ssvdb
-.section ssvea
-.section ssveb
-.section ssvfa
-.section ssvfb
-.section ssvga
-.section ssvgb
-.section ssvha
-.section ssvhb
-.section ssvia
-.section ssvib
-.section ssvja
-.section ssvjb
-.section ssvka
-.section ssvkb
-.section ssvla
-.section ssvlb
-.section ssvma
-.section ssvmb
-.section ssvna
-.section ssvnb
-.section ssvoa
-.section ssvob
-.section ssvpa
-.section ssvpb
-.section ssvqa
-.section ssvqb
-.section ssvra
-.section ssvrb
-.section ssvsa
-.section ssvsb
-.section ssvta
-.section ssvtb
-.section ssvua
-.section ssvub
-.section ssvva
-.section ssvvb
-.section ssvwa
-.section ssvwb
-.section ssvxa
-.section ssvxb
-.section ssvya
-.section ssvyb
-.section ssvza
-.section ssvzb
-.section ssv1a
-.section ssv1b
-.section ssv2a
-.section ssv2b
-.section ssv3a
-.section ssv3b
-.section ssv4a
-.section ssv4b
-.section ssv5a
-.section ssv5b
-.section ssv6a
-.section ssv6b
-.section ssv7a
-.section ssv7b
-.section ssv8a
-.section ssv8b
-.section ssv9a
-.section ssv9b
-.section ssv0a
-.section ssv0b
-.section sswaa
-.section sswab
-.section sswba
-.section sswbb
-.section sswca
-.section sswcb
-.section sswda
-.section sswdb
-.section sswea
-.section ssweb
-.section sswfa
-.section sswfb
-.section sswga
-.section sswgb
-.section sswha
-.section sswhb
-.section sswia
-.section sswib
-.section sswja
-.section sswjb
-.section sswka
-.section sswkb
-.section sswla
-.section sswlb
-.section sswma
-.section sswmb
-.section sswna
-.section sswnb
-.section sswoa
-.section sswob
-.section sswpa
-.section sswpb
-.section sswqa
-.section sswqb
-.section sswra
-.section sswrb
-.section sswsa
-.section sswsb
-.section sswta
-.section sswtb
-.section sswua
-.section sswub
-.section sswva
-.section sswvb
-.section sswwa
-.section sswwb
-.section sswxa
-.section sswxb
-.section sswya
-.section sswyb
-.section sswza
-.section sswzb
-.section ssw1a
-.section ssw1b
-.section ssw2a
-.section ssw2b
-.section ssw3a
-.section ssw3b
-.section ssw4a
-.section ssw4b
-.section ssw5a
-.section ssw5b
-.section ssw6a
-.section ssw6b
-.section ssw7a
-.section ssw7b
-.section ssw8a
-.section ssw8b
-.section ssw9a
-.section ssw9b
-.section ssw0a
-.section ssw0b
-.section ssxaa
-.section ssxab
-.section ssxba
-.section ssxbb
-.section ssxca
-.section ssxcb
-.section ssxda
-.section ssxdb
-.section ssxea
-.section ssxeb
-.section ssxfa
-.section ssxfb
-.section ssxga
-.section ssxgb
-.section ssxha
-.section ssxhb
-.section ssxia
-.section ssxib
-.section ssxja
-.section ssxjb
-.section ssxka
-.section ssxkb
-.section ssxla
-.section ssxlb
-.section ssxma
-.section ssxmb
-.section ssxna
-.section ssxnb
-.section ssxoa
-.section ssxob
-.section ssxpa
-.section ssxpb
-.section ssxqa
-.section ssxqb
-.section ssxra
-.section ssxrb
-.section ssxsa
-.section ssxsb
-.section ssxta
-.section ssxtb
-.section ssxua
-.section ssxub
-.section ssxva
-.section ssxvb
-.section ssxwa
-.section ssxwb
-.section ssxxa
-.section ssxxb
-.section ssxya
-.section ssxyb
-.section ssxza
-.section ssxzb
-.section ssx1a
-.section ssx1b
-.section ssx2a
-.section ssx2b
-.section ssx3a
-.section ssx3b
-.section ssx4a
-.section ssx4b
-.section ssx5a
-.section ssx5b
-.section ssx6a
-.section ssx6b
-.section ssx7a
-.section ssx7b
-.section ssx8a
-.section ssx8b
-.section ssx9a
-.section ssx9b
-.section ssx0a
-.section ssx0b
-.section ssyaa
-.section ssyab
-.section ssyba
-.section ssybb
-.section ssyca
-.section ssycb
-.section ssyda
-.section ssydb
-.section ssyea
-.section ssyeb
-.section ssyfa
-.section ssyfb
-.section ssyga
-.section ssygb
-.section ssyha
-.section ssyhb
-.section ssyia
-.section ssyib
-.section ssyja
-.section ssyjb
-.section ssyka
-.section ssykb
-.section ssyla
-.section ssylb
-.section ssyma
-.section ssymb
-.section ssyna
-.section ssynb
-.section ssyoa
-.section ssyob
-.section ssypa
-.section ssypb
-.section ssyqa
-.section ssyqb
-.section ssyra
-.section ssyrb
-.section ssysa
-.section ssysb
-.section ssyta
-.section ssytb
-.section ssyua
-.section ssyub
-.section ssyva
-.section ssyvb
-.section ssywa
-.section ssywb
-.section ssyxa
-.section ssyxb
-.section ssyya
-.section ssyyb
-.section ssyza
-.section ssyzb
-.section ssy1a
-.section ssy1b
-.section ssy2a
-.section ssy2b
-.section ssy3a
-.section ssy3b
-.section ssy4a
-.section ssy4b
-.section ssy5a
-.section ssy5b
-.section ssy6a
-.section ssy6b
-.section ssy7a
-.section ssy7b
-.section ssy8a
-.section ssy8b
-.section ssy9a
-.section ssy9b
-.section ssy0a
-.section ssy0b
-.section sszaa
-.section sszab
-.section sszba
-.section sszbb
-.section sszca
-.section sszcb
-.section sszda
-.section sszdb
-.section sszea
-.section sszeb
-.section sszfa
-.section sszfb
-.section sszga
-.section sszgb
-.section sszha
-.section sszhb
-.section sszia
-.section sszib
-.section sszja
-.section sszjb
-.section sszka
-.section sszkb
-.section sszla
-.section sszlb
-.section sszma
-.section sszmb
-.section sszna
-.section ssznb
-.section sszoa
-.section sszob
-.section sszpa
-.section sszpb
-.section sszqa
-.section sszqb
-.section sszra
-.section sszrb
-.section sszsa
-.section sszsb
-.section sszta
-.section ssztb
-.section sszua
-.section sszub
-.section sszva
-.section sszvb
-.section sszwa
-.section sszwb
-.section sszxa
-.section sszxb
-.section sszya
-.section sszyb
-.section sszza
-.section sszzb
-.section ssz1a
-.section ssz1b
-.section ssz2a
-.section ssz2b
-.section ssz3a
-.section ssz3b
-.section ssz4a
-.section ssz4b
-.section ssz5a
-.section ssz5b
-.section ssz6a
-.section ssz6b
-.section ssz7a
-.section ssz7b
-.section ssz8a
-.section ssz8b
-.section ssz9a
-.section ssz9b
-.section ssz0a
-.section ssz0b
-.section ss1aa
-.section ss1ab
-.section ss1ba
-.section ss1bb
-.section ss1ca
-.section ss1cb
-.section ss1da
-.section ss1db
-.section ss1ea
-.section ss1eb
-.section ss1fa
-.section ss1fb
-.section ss1ga
-.section ss1gb
-.section ss1ha
-.section ss1hb
-.section ss1ia
-.section ss1ib
-.section ss1ja
-.section ss1jb
-.section ss1ka
-.section ss1kb
-.section ss1la
-.section ss1lb
-.section ss1ma
-.section ss1mb
-.section ss1na
-.section ss1nb
-.section ss1oa
-.section ss1ob
-.section ss1pa
-.section ss1pb
-.section ss1qa
-.section ss1qb
-.section ss1ra
-.section ss1rb
-.section ss1sa
-.section ss1sb
-.section ss1ta
-.section ss1tb
-.section ss1ua
-.section ss1ub
-.section ss1va
-.section ss1vb
-.section ss1wa
-.section ss1wb
-.section ss1xa
-.section ss1xb
-.section ss1ya
-.section ss1yb
-.section ss1za
-.section ss1zb
-.section ss11a
-.section ss11b
-.section ss12a
-.section ss12b
-.section ss13a
-.section ss13b
-.section ss14a
-.section ss14b
-.section ss15a
-.section ss15b
-.section ss16a
-.section ss16b
-.section ss17a
-.section ss17b
-.section ss18a
-.section ss18b
-.section ss19a
-.section ss19b
-.section ss10a
-.section ss10b
-.section ss2aa
-.section ss2ab
-.section ss2ba
-.section ss2bb
-.section ss2ca
-.section ss2cb
-.section ss2da
-.section ss2db
-.section ss2ea
-.section ss2eb
-.section ss2fa
-.section ss2fb
-.section ss2ga
-.section ss2gb
-.section ss2ha
-.section ss2hb
-.section ss2ia
-.section ss2ib
-.section ss2ja
-.section ss2jb
-.section ss2ka
-.section ss2kb
-.section ss2la
-.section ss2lb
-.section ss2ma
-.section ss2mb
-.section ss2na
-.section ss2nb
-.section ss2oa
-.section ss2ob
-.section ss2pa
-.section ss2pb
-.section ss2qa
-.section ss2qb
-.section ss2ra
-.section ss2rb
-.section ss2sa
-.section ss2sb
-.section ss2ta
-.section ss2tb
-.section ss2ua
-.section ss2ub
-.section ss2va
-.section ss2vb
-.section ss2wa
-.section ss2wb
-.section ss2xa
-.section ss2xb
-.section ss2ya
-.section ss2yb
-.section ss2za
-.section ss2zb
-.section ss21a
-.section ss21b
-.section ss22a
-.section ss22b
-.section ss23a
-.section ss23b
-.section ss24a
-.section ss24b
-.section ss25a
-.section ss25b
-.section ss26a
-.section ss26b
-.section ss27a
-.section ss27b
-.section ss28a
-.section ss28b
-.section ss29a
-.section ss29b
-.section ss20a
-.section ss20b
-.section ss3aa
-.section ss3ab
-.section ss3ba
-.section ss3bb
-.section ss3ca
-.section ss3cb
-.section ss3da
-.section ss3db
-.section ss3ea
-.section ss3eb
-.section ss3fa
-.section ss3fb
-.section ss3ga
-.section ss3gb
-.section ss3ha
-.section ss3hb
-.section ss3ia
-.section ss3ib
-.section ss3ja
-.section ss3jb
-.section ss3ka
-.section ss3kb
-.section ss3la
-.section ss3lb
-.section ss3ma
-.section ss3mb
-.section ss3na
-.section ss3nb
-.section ss3oa
-.section ss3ob
-.section ss3pa
-.section ss3pb
-.section ss3qa
-.section ss3qb
-.section ss3ra
-.section ss3rb
-.section ss3sa
-.section ss3sb
-.section ss3ta
-.section ss3tb
-.section ss3ua
-.section ss3ub
-.section ss3va
-.section ss3vb
-.section ss3wa
-.section ss3wb
-.section ss3xa
-.section ss3xb
-.section ss3ya
-.section ss3yb
-.section ss3za
-.section ss3zb
-.section ss31a
-.section ss31b
-.section ss32a
-.section ss32b
-.section ss33a
-.section ss33b
-.section ss34a
-.section ss34b
-.section ss35a
-.section ss35b
-.section ss36a
-.section ss36b
-.section ss37a
-.section ss37b
-.section ss38a
-.section ss38b
-.section ss39a
-.section ss39b
-.section ss30a
-.section ss30b
-.section ss4aa
-.section ss4ab
-.section ss4ba
-.section ss4bb
-.section ss4ca
-.section ss4cb
-.section ss4da
-.section ss4db
-.section ss4ea
-.section ss4eb
-.section ss4fa
-.section ss4fb
-.section ss4ga
-.section ss4gb
-.section ss4ha
-.section ss4hb
-.section ss4ia
-.section ss4ib
-.section ss4ja
-.section ss4jb
-.section ss4ka
-.section ss4kb
-.section ss4la
-.section ss4lb
-.section ss4ma
-.section ss4mb
-.section ss4na
-.section ss4nb
-.section ss4oa
-.section ss4ob
-.section ss4pa
-.section ss4pb
-.section ss4qa
-.section ss4qb
-.section ss4ra
-.section ss4rb
-.section ss4sa
-.section ss4sb
-.section ss4ta
-.section ss4tb
-.section ss4ua
-.section ss4ub
-.section ss4va
-.section ss4vb
-.section ss4wa
-.section ss4wb
-.section ss4xa
-.section ss4xb
-.section ss4ya
-.section ss4yb
-.section ss4za
-.section ss4zb
-.section ss41a
-.section ss41b
-.section ss42a
-.section ss42b
-.section ss43a
-.section ss43b
-.section ss44a
-.section ss44b
-.section ss45a
-.section ss45b
-.section ss46a
-.section ss46b
-.section ss47a
-.section ss47b
-.section ss48a
-.section ss48b
-.section ss49a
-.section ss49b
-.section ss40a
-.section ss40b
-.section ss5aa
-.section ss5ab
-.section ss5ba
-.section ss5bb
-.section ss5ca
-.section ss5cb
-.section ss5da
-.section ss5db
-.section ss5ea
-.section ss5eb
-.section ss5fa
-.section ss5fb
-.section ss5ga
-.section ss5gb
-.section ss5ha
-.section ss5hb
-.section ss5ia
-.section ss5ib
-.section ss5ja
-.section ss5jb
-.section ss5ka
-.section ss5kb
-.section ss5la
-.section ss5lb
-.section ss5ma
-.section ss5mb
-.section ss5na
-.section ss5nb
-.section ss5oa
-.section ss5ob
-.section ss5pa
-.section ss5pb
-.section ss5qa
-.section ss5qb
-.section ss5ra
-.section ss5rb
-.section ss5sa
-.section ss5sb
-.section ss5ta
-.section ss5tb
-.section ss5ua
-.section ss5ub
-.section ss5va
-.section ss5vb
-.section ss5wa
-.section ss5wb
-.section ss5xa
-.section ss5xb
-.section ss5ya
-.section ss5yb
-.section ss5za
-.section ss5zb
-.section ss51a
-.section ss51b
-.section ss52a
-.section ss52b
-.section ss53a
-.section ss53b
-.section ss54a
-.section ss54b
-.section ss55a
-.section ss55b
-.section ss56a
-.section ss56b
-.section ss57a
-.section ss57b
-.section ss58a
-.section ss58b
-.section ss59a
-.section ss59b
-.section ss50a
-.section ss50b
-.section ss6aa
-.section ss6ab
-.section ss6ba
-.section ss6bb
-.section ss6ca
-.section ss6cb
-.section ss6da
-.section ss6db
-.section ss6ea
-.section ss6eb
-.section ss6fa
-.section ss6fb
-.section ss6ga
-.section ss6gb
-.section ss6ha
-.section ss6hb
-.section ss6ia
-.section ss6ib
-.section ss6ja
-.section ss6jb
-.section ss6ka
-.section ss6kb
-.section ss6la
-.section ss6lb
-.section ss6ma
-.section ss6mb
-.section ss6na
-.section ss6nb
-.section ss6oa
-.section ss6ob
-.section ss6pa
-.section ss6pb
-.section ss6qa
-.section ss6qb
-.section ss6ra
-.section ss6rb
-.section ss6sa
-.section ss6sb
-.section ss6ta
-.section ss6tb
-.section ss6ua
-.section ss6ub
-.section ss6va
-.section ss6vb
-.section ss6wa
-.section ss6wb
-.section ss6xa
-.section ss6xb
-.section ss6ya
-.section ss6yb
-.section ss6za
-.section ss6zb
-.section ss61a
-.section ss61b
-.section ss62a
-.section ss62b
-.section ss63a
-.section ss63b
-.section ss64a
-.section ss64b
-.section ss65a
-.section ss65b
-.section ss66a
-.section ss66b
-.section ss67a
-.section ss67b
-.section ss68a
-.section ss68b
-.section ss69a
-.section ss69b
-.section ss60a
-.section ss60b
-.section ss7aa
-.section ss7ab
-.section ss7ba
-.section ss7bb
-.section ss7ca
-.section ss7cb
-.section ss7da
-.section ss7db
-.section ss7ea
-.section ss7eb
-.section ss7fa
-.section ss7fb
-.section ss7ga
-.section ss7gb
-.section ss7ha
-.section ss7hb
-.section ss7ia
-.section ss7ib
-.section ss7ja
-.section ss7jb
-.section ss7ka
-.section ss7kb
-.section ss7la
-.section ss7lb
-.section ss7ma
-.section ss7mb
-.section ss7na
-.section ss7nb
-.section ss7oa
-.section ss7ob
-.section ss7pa
-.section ss7pb
-.section ss7qa
-.section ss7qb
-.section ss7ra
-.section ss7rb
-.section ss7sa
-.section ss7sb
-.section ss7ta
-.section ss7tb
-.section ss7ua
-.section ss7ub
-.section ss7va
-.section ss7vb
-.section ss7wa
-.section ss7wb
-.section ss7xa
-.section ss7xb
-.section ss7ya
-.section ss7yb
-.section ss7za
-.section ss7zb
-.section ss71a
-.section ss71b
-.section ss72a
-.section ss72b
-.section ss73a
-.section ss73b
-.section ss74a
-.section ss74b
-.section ss75a
-.section ss75b
-.section ss76a
-.section ss76b
-.section ss77a
-.section ss77b
-.section ss78a
-.section ss78b
-.section ss79a
-.section ss79b
-.section ss70a
-.section ss70b
-.section ss8aa
-.section ss8ab
-.section ss8ba
-.section ss8bb
-.section ss8ca
-.section ss8cb
-.section ss8da
-.section ss8db
-.section ss8ea
-.section ss8eb
-.section ss8fa
-.section ss8fb
-.section ss8ga
-.section ss8gb
-.section ss8ha
-.section ss8hb
-.section ss8ia
-.section ss8ib
-.section ss8ja
-.section ss8jb
-.section ss8ka
-.section ss8kb
-.section ss8la
-.section ss8lb
-.section ss8ma
-.section ss8mb
-.section ss8na
-.section ss8nb
-.section ss8oa
-.section ss8ob
-.section ss8pa
-.section ss8pb
-.section ss8qa
-.section ss8qb
-.section ss8ra
-.section ss8rb
-.section ss8sa
-.section ss8sb
-.section ss8ta
-.section ss8tb
-.section ss8ua
-.section ss8ub
-.section ss8va
-.section ss8vb
-.section ss8wa
-.section ss8wb
-.section ss8xa
-.section ss8xb
-.section ss8ya
-.section ss8yb
-.section ss8za
-.section ss8zb
-.section ss81a
-.section ss81b
-.section ss82a
-.section ss82b
-.section ss83a
-.section ss83b
-.section ss84a
-.section ss84b
-.section ss85a
-.section ss85b
-.section ss86a
-.section ss86b
-.section ss87a
-.section ss87b
-.section ss88a
-.section ss88b
-.section ss89a
-.section ss89b
-.section ss80a
-.section ss80b
-.section ss9aa
-.section ss9ab
-.section ss9ba
-.section ss9bb
-.section ss9ca
-.section ss9cb
-.section ss9da
-.section ss9db
-.section ss9ea
-.section ss9eb
-.section ss9fa
-.section ss9fb
-.section ss9ga
-.section ss9gb
-.section ss9ha
-.section ss9hb
-.section ss9ia
-.section ss9ib
-.section ss9ja
-.section ss9jb
-.section ss9ka
-.section ss9kb
-.section ss9la
-.section ss9lb
-.section ss9ma
-.section ss9mb
-.section ss9na
-.section ss9nb
-.section ss9oa
-.section ss9ob
-.section ss9pa
-.section ss9pb
-.section ss9qa
-.section ss9qb
-.section ss9ra
-.section ss9rb
-.section ss9sa
-.section ss9sb
-.section ss9ta
-.section ss9tb
-.section ss9ua
-.section ss9ub
-.section ss9va
-.section ss9vb
-.section ss9wa
-.section ss9wb
-.section ss9xa
-.section ss9xb
-.section ss9ya
-.section ss9yb
-.section ss9za
-.section ss9zb
-.section ss91a
-.section ss91b
-.section ss92a
-.section ss92b
-.section ss93a
-.section ss93b
-.section ss94a
-.section ss94b
-.section ss95a
-.section ss95b
-.section ss96a
-.section ss96b
-.section ss97a
-.section ss97b
-.section ss98a
-.section ss98b
-.section ss99a
-.section ss99b
-.section ss90a
-.section ss90b
-.section ss0aa
-.section ss0ab
-.section ss0ba
-.section ss0bb
-.section ss0ca
-.section ss0cb
-.section ss0da
-.section ss0db
-.section ss0ea
-.section ss0eb
-.section ss0fa
-.section ss0fb
-.section ss0ga
-.section ss0gb
-.section ss0ha
-.section ss0hb
-.section ss0ia
-.section ss0ib
-.section ss0ja
-.section ss0jb
-.section ss0ka
-.section ss0kb
-.section ss0la
-.section ss0lb
-.section ss0ma
-.section ss0mb
-.section ss0na
-.section ss0nb
-.section ss0oa
-.section ss0ob
-.section ss0pa
-.section ss0pb
-.section ss0qa
-.section ss0qb
-.section ss0ra
-.section ss0rb
-.section ss0sa
-.section ss0sb
-.section ss0ta
-.section ss0tb
-.section ss0ua
-.section ss0ub
-.section ss0va
-.section ss0vb
-.section ss0wa
-.section ss0wb
-.section ss0xa
-.section ss0xb
-.section ss0ya
-.section ss0yb
-.section ss0za
-.section ss0zb
-.section ss01a
-.section ss01b
-.section ss02a
-.section ss02b
-.section ss03a
-.section ss03b
-.section ss04a
-.section ss04b
-.section ss05a
-.section ss05b
-.section ss06a
-.section ss06b
-.section ss07a
-.section ss07b
-.section ss08a
-.section ss08b
-.section ss09a
-.section ss09b
-.section ss00a
-.section ss00b
-.section staaa
-.section staab
-.section staba
-.section stabb
-.section staca
-.section stacb
-.section stada
-.section stadb
-.section staea
-.section staeb
-.section stafa
-.section stafb
-.section staga
-.section stagb
-.section staha
-.section stahb
-.section staia
-.section staib
-.section staja
-.section stajb
-.section staka
-.section stakb
-.section stala
-.section stalb
-.section stama
-.section stamb
-.section stana
-.section stanb
-.section staoa
-.section staob
-.section stapa
-.section stapb
-.section staqa
-.section staqb
-.section stara
-.section starb
-.section stasa
-.section stasb
-.section stata
-.section statb
-.section staua
-.section staub
-.section stava
-.section stavb
-.section stawa
-.section stawb
-.section staxa
-.section staxb
-.section staya
-.section stayb
-.section staza
-.section stazb
-.section sta1a
-.section sta1b
-.section sta2a
-.section sta2b
-.section sta3a
-.section sta3b
-.section sta4a
-.section sta4b
-.section sta5a
-.section sta5b
-.section sta6a
-.section sta6b
-.section sta7a
-.section sta7b
-.section sta8a
-.section sta8b
-.section sta9a
-.section sta9b
-.section sta0a
-.section sta0b
-.section stbaa
-.section stbab
-.section stbba
-.section stbbb
-.section stbca
-.section stbcb
-.section stbda
-.section stbdb
-.section stbea
-.section stbeb
-.section stbfa
-.section stbfb
-.section stbga
-.section stbgb
-.section stbha
-.section stbhb
-.section stbia
-.section stbib
-.section stbja
-.section stbjb
-.section stbka
-.section stbkb
-.section stbla
-.section stblb
-.section stbma
-.section stbmb
-.section stbna
-.section stbnb
-.section stboa
-.section stbob
-.section stbpa
-.section stbpb
-.section stbqa
-.section stbqb
-.section stbra
-.section stbrb
-.section stbsa
-.section stbsb
-.section stbta
-.section stbtb
-.section stbua
-.section stbub
-.section stbva
-.section stbvb
-.section stbwa
-.section stbwb
-.section stbxa
-.section stbxb
-.section stbya
-.section stbyb
-.section stbza
-.section stbzb
-.section stb1a
-.section stb1b
-.section stb2a
-.section stb2b
-.section stb3a
-.section stb3b
-.section stb4a
-.section stb4b
-.section stb5a
-.section stb5b
-.section stb6a
-.section stb6b
-.section stb7a
-.section stb7b
-.section stb8a
-.section stb8b
-.section stb9a
-.section stb9b
-.section stb0a
-.section stb0b
-.section stcaa
-.section stcab
-.section stcba
-.section stcbb
-.section stcca
-.section stccb
-.section stcda
-.section stcdb
-.section stcea
-.section stceb
-.section stcfa
-.section stcfb
-.section stcga
-.section stcgb
-.section stcha
-.section stchb
-.section stcia
-.section stcib
-.section stcja
-.section stcjb
-.section stcka
-.section stckb
-.section stcla
-.section stclb
-.section stcma
-.section stcmb
-.section stcna
-.section stcnb
-.section stcoa
-.section stcob
-.section stcpa
-.section stcpb
-.section stcqa
-.section stcqb
-.section stcra
-.section stcrb
-.section stcsa
-.section stcsb
-.section stcta
-.section stctb
-.section stcua
-.section stcub
-.section stcva
-.section stcvb
-.section stcwa
-.section stcwb
-.section stcxa
-.section stcxb
-.section stcya
-.section stcyb
-.section stcza
-.section stczb
-.section stc1a
-.section stc1b
-.section stc2a
-.section stc2b
-.section stc3a
-.section stc3b
-.section stc4a
-.section stc4b
-.section stc5a
-.section stc5b
-.section stc6a
-.section stc6b
-.section stc7a
-.section stc7b
-.section stc8a
-.section stc8b
-.section stc9a
-.section stc9b
-.section stc0a
-.section stc0b
-.section stdaa
-.section stdab
-.section stdba
-.section stdbb
-.section stdca
-.section stdcb
-.section stdda
-.section stddb
-.section stdea
-.section stdeb
-.section stdfa
-.section stdfb
-.section stdga
-.section stdgb
-.section stdha
-.section stdhb
-.section stdia
-.section stdib
-.section stdja
-.section stdjb
-.section stdka
-.section stdkb
-.section stdla
-.section stdlb
-.section stdma
-.section stdmb
-.section stdna
-.section stdnb
-.section stdoa
-.section stdob
-.section stdpa
-.section stdpb
-.section stdqa
-.section stdqb
-.section stdra
-.section stdrb
-.section stdsa
-.section stdsb
-.section stdta
-.section stdtb
-.section stdua
-.section stdub
-.section stdva
-.section stdvb
-.section stdwa
-.section stdwb
-.section stdxa
-.section stdxb
-.section stdya
-.section stdyb
-.section stdza
-.section stdzb
-.section std1a
-.section std1b
-.section std2a
-.section std2b
-.section std3a
-.section std3b
-.section std4a
-.section std4b
-.section std5a
-.section std5b
-.section std6a
-.section std6b
-.section std7a
-.section std7b
-.section std8a
-.section std8b
-.section std9a
-.section std9b
-.section std0a
-.section std0b
-.section steaa
-.section steab
-.section steba
-.section stebb
-.section steca
-.section stecb
-.section steda
-.section stedb
-.section steea
-.section steeb
-.section stefa
-.section stefb
-.section stega
-.section stegb
-.section steha
-.section stehb
-.section steia
-.section steib
-.section steja
-.section stejb
-.section steka
-.section stekb
-.section stela
-.section stelb
-.section stema
-.section stemb
-.section stena
-.section stenb
-.section steoa
-.section steob
-.section stepa
-.section stepb
-.section steqa
-.section steqb
-.section stera
-.section sterb
-.section stesa
-.section stesb
-.section steta
-.section stetb
-.section steua
-.section steub
-.section steva
-.section stevb
-.section stewa
-.section stewb
-.section stexa
-.section stexb
-.section steya
-.section steyb
-.section steza
-.section stezb
-.section ste1a
-.section ste1b
-.section ste2a
-.section ste2b
-.section ste3a
-.section ste3b
-.section ste4a
-.section ste4b
-.section ste5a
-.section ste5b
-.section ste6a
-.section ste6b
-.section ste7a
-.section ste7b
-.section ste8a
-.section ste8b
-.section ste9a
-.section ste9b
-.section ste0a
-.section ste0b
-.section stfaa
-.section stfab
-.section stfba
-.section stfbb
-.section stfca
-.section stfcb
-.section stfda
-.section stfdb
-.section stfea
-.section stfeb
-.section stffa
-.section stffb
-.section stfga
-.section stfgb
-.section stfha
-.section stfhb
-.section stfia
-.section stfib
-.section stfja
-.section stfjb
-.section stfka
-.section stfkb
-.section stfla
-.section stflb
-.section stfma
-.section stfmb
-.section stfna
-.section stfnb
-.section stfoa
-.section stfob
-.section stfpa
-.section stfpb
-.section stfqa
-.section stfqb
-.section stfra
-.section stfrb
-.section stfsa
-.section stfsb
-.section stfta
-.section stftb
-.section stfua
-.section stfub
-.section stfva
-.section stfvb
-.section stfwa
-.section stfwb
-.section stfxa
-.section stfxb
-.section stfya
-.section stfyb
-.section stfza
-.section stfzb
-.section stf1a
-.section stf1b
-.section stf2a
-.section stf2b
-.section stf3a
-.section stf3b
-.section stf4a
-.section stf4b
-.section stf5a
-.section stf5b
-.section stf6a
-.section stf6b
-.section stf7a
-.section stf7b
-.section stf8a
-.section stf8b
-.section stf9a
-.section stf9b
-.section stf0a
-.section stf0b
-.section stgaa
-.section stgab
-.section stgba
-.section stgbb
-.section stgca
-.section stgcb
-.section stgda
-.section stgdb
-.section stgea
-.section stgeb
-.section stgfa
-.section stgfb
-.section stgga
-.section stggb
-.section stgha
-.section stghb
-.section stgia
-.section stgib
-.section stgja
-.section stgjb
-.section stgka
-.section stgkb
-.section stgla
-.section stglb
-.section stgma
-.section stgmb
-.section stgna
-.section stgnb
-.section stgoa
-.section stgob
-.section stgpa
-.section stgpb
-.section stgqa
-.section stgqb
-.section stgra
-.section stgrb
-.section stgsa
-.section stgsb
-.section stgta
-.section stgtb
-.section stgua
-.section stgub
-.section stgva
-.section stgvb
-.section stgwa
-.section stgwb
-.section stgxa
-.section stgxb
-.section stgya
-.section stgyb
-.section stgza
-.section stgzb
-.section stg1a
-.section stg1b
-.section stg2a
-.section stg2b
-.section stg3a
-.section stg3b
-.section stg4a
-.section stg4b
-.section stg5a
-.section stg5b
-.section stg6a
-.section stg6b
-.section stg7a
-.section stg7b
-.section stg8a
-.section stg8b
-.section stg9a
-.section stg9b
-.section stg0a
-.section stg0b
-.section sthaa
-.section sthab
-.section sthba
-.section sthbb
-.section sthca
-.section sthcb
-.section sthda
-.section sthdb
-.section sthea
-.section stheb
-.section sthfa
-.section sthfb
-.section sthga
-.section sthgb
-.section sthha
-.section sthhb
-.section sthia
-.section sthib
-.section sthja
-.section sthjb
-.section sthka
-.section sthkb
-.section sthla
-.section sthlb
-.section sthma
-.section sthmb
-.section sthna
-.section sthnb
-.section sthoa
-.section sthob
-.section sthpa
-.section sthpb
-.section sthqa
-.section sthqb
-.section sthra
-.section sthrb
-.section sthsa
-.section sthsb
-.section sthta
-.section sthtb
-.section sthua
-.section sthub
-.section sthva
-.section sthvb
-.section sthwa
-.section sthwb
-.section sthxa
-.section sthxb
-.section sthya
-.section sthyb
-.section sthza
-.section sthzb
-.section sth1a
-.section sth1b
-.section sth2a
-.section sth2b
-.section sth3a
-.section sth3b
-.section sth4a
-.section sth4b
-.section sth5a
-.section sth5b
-.section sth6a
-.section sth6b
-.section sth7a
-.section sth7b
-.section sth8a
-.section sth8b
-.section sth9a
-.section sth9b
-.section sth0a
-.section sth0b
-.section stiaa
-.section stiab
-.section stiba
-.section stibb
-.section stica
-.section sticb
-.section stida
-.section stidb
-.section stiea
-.section stieb
-.section stifa
-.section stifb
-.section stiga
-.section stigb
-.section stiha
-.section stihb
-.section stiia
-.section stiib
-.section stija
-.section stijb
-.section stika
-.section stikb
-.section stila
-.section stilb
-.section stima
-.section stimb
-.section stina
-.section stinb
-.section stioa
-.section stiob
-.section stipa
-.section stipb
-.section stiqa
-.section stiqb
-.section stira
-.section stirb
-.section stisa
-.section stisb
-.section stita
-.section stitb
-.section stiua
-.section stiub
-.section stiva
-.section stivb
-.section stiwa
-.section stiwb
-.section stixa
-.section stixb
-.section stiya
-.section stiyb
-.section stiza
-.section stizb
-.section sti1a
-.section sti1b
-.section sti2a
-.section sti2b
-.section sti3a
-.section sti3b
-.section sti4a
-.section sti4b
-.section sti5a
-.section sti5b
-.section sti6a
-.section sti6b
-.section sti7a
-.section sti7b
-.section sti8a
-.section sti8b
-.section sti9a
-.section sti9b
-.section sti0a
-.section sti0b
-.section stjaa
-.section stjab
-.section stjba
-.section stjbb
-.section stjca
-.section stjcb
-.section stjda
-.section stjdb
-.section stjea
-.section stjeb
-.section stjfa
-.section stjfb
-.section stjga
-.section stjgb
-.section stjha
-.section stjhb
-.section stjia
-.section stjib
-.section stjja
-.section stjjb
-.section stjka
-.section stjkb
-.section stjla
-.section stjlb
-.section stjma
-.section stjmb
-.section stjna
-.section stjnb
-.section stjoa
-.section stjob
-.section stjpa
-.section stjpb
-.section stjqa
-.section stjqb
-.section stjra
-.section stjrb
-.section stjsa
-.section stjsb
-.section stjta
-.section stjtb
-.section stjua
-.section stjub
-.section stjva
-.section stjvb
-.section stjwa
-.section stjwb
-.section stjxa
-.section stjxb
-.section stjya
-.section stjyb
-.section stjza
-.section stjzb
-.section stj1a
-.section stj1b
-.section stj2a
-.section stj2b
-.section stj3a
-.section stj3b
-.section stj4a
-.section stj4b
-.section stj5a
-.section stj5b
-.section stj6a
-.section stj6b
-.section stj7a
-.section stj7b
-.section stj8a
-.section stj8b
-.section stj9a
-.section stj9b
-.section stj0a
-.section stj0b
-.section stkaa
-.section stkab
-.section stkba
-.section stkbb
-.section stkca
-.section stkcb
-.section stkda
-.section stkdb
-.section stkea
-.section stkeb
-.section stkfa
-.section stkfb
-.section stkga
-.section stkgb
-.section stkha
-.section stkhb
-.section stkia
-.section stkib
-.section stkja
-.section stkjb
-.section stkka
-.section stkkb
-.section stkla
-.section stklb
-.section stkma
-.section stkmb
-.section stkna
-.section stknb
-.section stkoa
-.section stkob
-.section stkpa
-.section stkpb
-.section stkqa
-.section stkqb
-.section stkra
-.section stkrb
-.section stksa
-.section stksb
-.section stkta
-.section stktb
-.section stkua
-.section stkub
-.section stkva
-.section stkvb
-.section stkwa
-.section stkwb
-.section stkxa
-.section stkxb
-.section stkya
-.section stkyb
-.section stkza
-.section stkzb
-.section stk1a
-.section stk1b
-.section stk2a
-.section stk2b
-.section stk3a
-.section stk3b
-.section stk4a
-.section stk4b
-.section stk5a
-.section stk5b
-.section stk6a
-.section stk6b
-.section stk7a
-.section stk7b
-.section stk8a
-.section stk8b
-.section stk9a
-.section stk9b
-.section stk0a
-.section stk0b
-.section stlaa
-.section stlab
-.section stlba
-.section stlbb
-.section stlca
-.section stlcb
-.section stlda
-.section stldb
-.section stlea
-.section stleb
-.section stlfa
-.section stlfb
-.section stlga
-.section stlgb
-.section stlha
-.section stlhb
-.section stlia
-.section stlib
-.section stlja
-.section stljb
-.section stlka
-.section stlkb
-.section stlla
-.section stllb
-.section stlma
-.section stlmb
-.section stlna
-.section stlnb
-.section stloa
-.section stlob
-.section stlpa
-.section stlpb
-.section stlqa
-.section stlqb
-.section stlra
-.section stlrb
-.section stlsa
-.section stlsb
-.section stlta
-.section stltb
-.section stlua
-.section stlub
-.section stlva
-.section stlvb
-.section stlwa
-.section stlwb
-.section stlxa
-.section stlxb
-.section stlya
-.section stlyb
-.section stlza
-.section stlzb
-.section stl1a
-.section stl1b
-.section stl2a
-.section stl2b
-.section stl3a
-.section stl3b
-.section stl4a
-.section stl4b
-.section stl5a
-.section stl5b
-.section stl6a
-.section stl6b
-.section stl7a
-.section stl7b
-.section stl8a
-.section stl8b
-.section stl9a
-.section stl9b
-.section stl0a
-.section stl0b
-.section stmaa
-.section stmab
-.section stmba
-.section stmbb
-.section stmca
-.section stmcb
-.section stmda
-.section stmdb
-.section stmea
-.section stmeb
-.section stmfa
-.section stmfb
-.section stmga
-.section stmgb
-.section stmha
-.section stmhb
-.section stmia
-.section stmib
-.section stmja
-.section stmjb
-.section stmka
-.section stmkb
-.section stmla
-.section stmlb
-.section stmma
-.section stmmb
-.section stmna
-.section stmnb
-.section stmoa
-.section stmob
-.section stmpa
-.section stmpb
-.section stmqa
-.section stmqb
-.section stmra
-.section stmrb
-.section stmsa
-.section stmsb
-.section stmta
-.section stmtb
-.section stmua
-.section stmub
-.section stmva
-.section stmvb
-.section stmwa
-.section stmwb
-.section stmxa
-.section stmxb
-.section stmya
-.section stmyb
-.section stmza
-.section stmzb
-.section stm1a
-.section stm1b
-.section stm2a
-.section stm2b
-.section stm3a
-.section stm3b
-.section stm4a
-.section stm4b
-.section stm5a
-.section stm5b
-.section stm6a
-.section stm6b
-.section stm7a
-.section stm7b
-.section stm8a
-.section stm8b
-.section stm9a
-.section stm9b
-.section stm0a
-.section stm0b
-.section stnaa
-.section stnab
-.section stnba
-.section stnbb
-.section stnca
-.section stncb
-.section stnda
-.section stndb
-.section stnea
-.section stneb
-.section stnfa
-.section stnfb
-.section stnga
-.section stngb
-.section stnha
-.section stnhb
-.section stnia
-.section stnib
-.section stnja
-.section stnjb
-.section stnka
-.section stnkb
-.section stnla
-.section stnlb
-.section stnma
-.section stnmb
-.section stnna
-.section stnnb
-.section stnoa
-.section stnob
-.section stnpa
-.section stnpb
-.section stnqa
-.section stnqb
-.section stnra
-.section stnrb
-.section stnsa
-.section stnsb
-.section stnta
-.section stntb
-.section stnua
-.section stnub
-.section stnva
-.section stnvb
-.section stnwa
-.section stnwb
-.section stnxa
-.section stnxb
-.section stnya
-.section stnyb
-.section stnza
-.section stnzb
-.section stn1a
-.section stn1b
-.section stn2a
-.section stn2b
-.section stn3a
-.section stn3b
-.section stn4a
-.section stn4b
-.section stn5a
-.section stn5b
-.section stn6a
-.section stn6b
-.section stn7a
-.section stn7b
-.section stn8a
-.section stn8b
-.section stn9a
-.section stn9b
-.section stn0a
-.section stn0b
-.section stoaa
-.section stoab
-.section stoba
-.section stobb
-.section stoca
-.section stocb
-.section stoda
-.section stodb
-.section stoea
-.section stoeb
-.section stofa
-.section stofb
-.section stoga
-.section stogb
-.section stoha
-.section stohb
-.section stoia
-.section stoib
-.section stoja
-.section stojb
-.section stoka
-.section stokb
-.section stola
-.section stolb
-.section stoma
-.section stomb
-.section stona
-.section stonb
-.section stooa
-.section stoob
-.section stopa
-.section stopb
-.section stoqa
-.section stoqb
-.section stora
-.section storb
-.section stosa
-.section stosb
-.section stota
-.section stotb
-.section stoua
-.section stoub
-.section stova
-.section stovb
-.section stowa
-.section stowb
-.section stoxa
-.section stoxb
-.section stoya
-.section stoyb
-.section stoza
-.section stozb
-.section sto1a
-.section sto1b
-.section sto2a
-.section sto2b
-.section sto3a
-.section sto3b
-.section sto4a
-.section sto4b
-.section sto5a
-.section sto5b
-.section sto6a
-.section sto6b
-.section sto7a
-.section sto7b
-.section sto8a
-.section sto8b
-.section sto9a
-.section sto9b
-.section sto0a
-.section sto0b
-.section stpaa
-.section stpab
-.section stpba
-.section stpbb
-.section stpca
-.section stpcb
-.section stpda
-.section stpdb
-.section stpea
-.section stpeb
-.section stpfa
-.section stpfb
-.section stpga
-.section stpgb
-.section stpha
-.section stphb
-.section stpia
-.section stpib
-.section stpja
-.section stpjb
-.section stpka
-.section stpkb
-.section stpla
-.section stplb
-.section stpma
-.section stpmb
-.section stpna
-.section stpnb
-.section stpoa
-.section stpob
-.section stppa
-.section stppb
-.section stpqa
-.section stpqb
-.section stpra
-.section stprb
-.section stpsa
-.section stpsb
-.section stpta
-.section stptb
-.section stpua
-.section stpub
-.section stpva
-.section stpvb
-.section stpwa
-.section stpwb
-.section stpxa
-.section stpxb
-.section stpya
-.section stpyb
-.section stpza
-.section stpzb
-.section stp1a
-.section stp1b
-.section stp2a
-.section stp2b
-.section stp3a
-.section stp3b
-.section stp4a
-.section stp4b
-.section stp5a
-.section stp5b
-.section stp6a
-.section stp6b
-.section stp7a
-.section stp7b
-.section stp8a
-.section stp8b
-.section stp9a
-.section stp9b
-.section stp0a
-.section stp0b
-.section stqaa
-.section stqab
-.section stqba
-.section stqbb
-.section stqca
-.section stqcb
-.section stqda
-.section stqdb
-.section stqea
-.section stqeb
-.section stqfa
-.section stqfb
-.section stqga
-.section stqgb
-.section stqha
-.section stqhb
-.section stqia
-.section stqib
-.section stqja
-.section stqjb
-.section stqka
-.section stqkb
-.section stqla
-.section stqlb
-.section stqma
-.section stqmb
-.section stqna
-.section stqnb
-.section stqoa
-.section stqob
-.section stqpa
-.section stqpb
-.section stqqa
-.section stqqb
-.section stqra
-.section stqrb
-.section stqsa
-.section stqsb
-.section stqta
-.section stqtb
-.section stqua
-.section stqub
-.section stqva
-.section stqvb
-.section stqwa
-.section stqwb
-.section stqxa
-.section stqxb
-.section stqya
-.section stqyb
-.section stqza
-.section stqzb
-.section stq1a
-.section stq1b
-.section stq2a
-.section stq2b
-.section stq3a
-.section stq3b
-.section stq4a
-.section stq4b
-.section stq5a
-.section stq5b
-.section stq6a
-.section stq6b
-.section stq7a
-.section stq7b
-.section stq8a
-.section stq8b
-.section stq9a
-.section stq9b
-.section stq0a
-.section stq0b
-.section straa
-.section strab
-.section strba
-.section strbb
-.section strca
-.section strcb
-.section strda
-.section strdb
-.section strea
-.section streb
-.section strfa
-.section strfb
-.section strga
-.section strgb
-.section strha
-.section strhb
-.section stria
-.section strib
-.section strja
-.section strjb
-.section strka
-.section strkb
-.section strla
-.section strlb
-.section strma
-.section strmb
-.section strna
-.section strnb
-.section stroa
-.section strob
-.section strpa
-.section strpb
-.section strqa
-.section strqb
-.section strra
-.section strrb
-.section strsa
-.section strsb
-.section strta
-.section strtb
-.section strua
-.section strub
-.section strva
-.section strvb
-.section strwa
-.section strwb
-.section strxa
-.section strxb
-.section strya
-.section stryb
-.section strza
-.section strzb
-.section str1a
-.section str1b
-.section str2a
-.section str2b
-.section str3a
-.section str3b
-.section str4a
-.section str4b
-.section str5a
-.section str5b
-.section str6a
-.section str6b
-.section str7a
-.section str7b
-.section str8a
-.section str8b
-.section str9a
-.section str9b
-.section str0a
-.section str0b
-.section stsaa
-.section stsab
-.section stsba
-.section stsbb
-.section stsca
-.section stscb
-.section stsda
-.section stsdb
-.section stsea
-.section stseb
-.section stsfa
-.section stsfb
-.section stsga
-.section stsgb
-.section stsha
-.section stshb
-.section stsia
-.section stsib
-.section stsja
-.section stsjb
-.section stska
-.section stskb
-.section stsla
-.section stslb
-.section stsma
-.section stsmb
-.section stsna
-.section stsnb
-.section stsoa
-.section stsob
-.section stspa
-.section stspb
-.section stsqa
-.section stsqb
-.section stsra
-.section stsrb
-.section stssa
-.section stssb
-.section ststa
-.section ststb
-.section stsua
-.section stsub
-.section stsva
-.section stsvb
-.section stswa
-.section stswb
-.section stsxa
-.section stsxb
-.section stsya
-.section stsyb
-.section stsza
-.section stszb
-.section sts1a
-.section sts1b
-.section sts2a
-.section sts2b
-.section sts3a
-.section sts3b
-.section sts4a
-.section sts4b
-.section sts5a
-.section sts5b
-.section sts6a
-.section sts6b
-.section sts7a
-.section sts7b
-.section sts8a
-.section sts8b
-.section sts9a
-.section sts9b
-.section sts0a
-.section sts0b
-.section sttaa
-.section sttab
-.section sttba
-.section sttbb
-.section sttca
-.section sttcb
-.section sttda
-.section sttdb
-.section sttea
-.section stteb
-.section sttfa
-.section sttfb
-.section sttga
-.section sttgb
-.section sttha
-.section stthb
-.section sttia
-.section sttib
-.section sttja
-.section sttjb
-.section sttka
-.section sttkb
-.section sttla
-.section sttlb
-.section sttma
-.section sttmb
-.section sttna
-.section sttnb
-.section sttoa
-.section sttob
-.section sttpa
-.section sttpb
-.section sttqa
-.section sttqb
-.section sttra
-.section sttrb
-.section sttsa
-.section sttsb
-.section sttta
-.section stttb
-.section sttua
-.section sttub
-.section sttva
-.section sttvb
-.section sttwa
-.section sttwb
-.section sttxa
-.section sttxb
-.section sttya
-.section sttyb
-.section sttza
-.section sttzb
-.section stt1a
-.section stt1b
-.section stt2a
-.section stt2b
-.section stt3a
-.section stt3b
-.section stt4a
-.section stt4b
-.section stt5a
-.section stt5b
-.section stt6a
-.section stt6b
-.section stt7a
-.section stt7b
-.section stt8a
-.section stt8b
-.section stt9a
-.section stt9b
-.section stt0a
-.section stt0b
-.section stuaa
-.section stuab
-.section stuba
-.section stubb
-.section stuca
-.section stucb
-.section studa
-.section studb
-.section stuea
-.section stueb
-.section stufa
-.section stufb
-.section stuga
-.section stugb
-.section stuha
-.section stuhb
-.section stuia
-.section stuib
-.section stuja
-.section stujb
-.section stuka
-.section stukb
-.section stula
-.section stulb
-.section stuma
-.section stumb
-.section stuna
-.section stunb
-.section stuoa
-.section stuob
-.section stupa
-.section stupb
-.section stuqa
-.section stuqb
-.section stura
-.section sturb
-.section stusa
-.section stusb
-.section stuta
-.section stutb
-.section stuua
-.section stuub
-.section stuva
-.section stuvb
-.section stuwa
-.section stuwb
-.section stuxa
-.section stuxb
-.section stuya
-.section stuyb
-.section stuza
-.section stuzb
-.section stu1a
-.section stu1b
-.section stu2a
-.section stu2b
-.section stu3a
-.section stu3b
-.section stu4a
-.section stu4b
-.section stu5a
-.section stu5b
-.section stu6a
-.section stu6b
-.section stu7a
-.section stu7b
-.section stu8a
-.section stu8b
-.section stu9a
-.section stu9b
-.section stu0a
-.section stu0b
-.section stvaa
-.section stvab
-.section stvba
-.section stvbb
-.section stvca
-.section stvcb
-.section stvda
-.section stvdb
-.section stvea
-.section stveb
-.section stvfa
-.section stvfb
-.section stvga
-.section stvgb
-.section stvha
-.section stvhb
-.section stvia
-.section stvib
-.section stvja
-.section stvjb
-.section stvka
-.section stvkb
-.section stvla
-.section stvlb
-.section stvma
-.section stvmb
-.section stvna
-.section stvnb
-.section stvoa
-.section stvob
-.section stvpa
-.section stvpb
-.section stvqa
-.section stvqb
-.section stvra
-.section stvrb
-.section stvsa
-.section stvsb
-.section stvta
-.section stvtb
-.section stvua
-.section stvub
-.section stvva
-.section stvvb
-.section stvwa
-.section stvwb
-.section stvxa
-.section stvxb
-.section stvya
-.section stvyb
-.section stvza
-.section stvzb
-.section stv1a
-.section stv1b
-.section stv2a
-.section stv2b
-.section stv3a
-.section stv3b
-.section stv4a
-.section stv4b
-.section stv5a
-.section stv5b
-.section stv6a
-.section stv6b
-.section stv7a
-.section stv7b
-.section stv8a
-.section stv8b
-.section stv9a
-.section stv9b
-.section stv0a
-.section stv0b
-.section stwaa
-.section stwab
-.section stwba
-.section stwbb
-.section stwca
-.section stwcb
-.section stwda
-.section stwdb
-.section stwea
-.section stweb
-.section stwfa
-.section stwfb
-.section stwga
-.section stwgb
-.section stwha
-.section stwhb
-.section stwia
-.section stwib
-.section stwja
-.section stwjb
-.section stwka
-.section stwkb
-.section stwla
-.section stwlb
-.section stwma
-.section stwmb
-.section stwna
-.section stwnb
-.section stwoa
-.section stwob
-.section stwpa
-.section stwpb
-.section stwqa
-.section stwqb
-.section stwra
-.section stwrb
-.section stwsa
-.section stwsb
-.section stwta
-.section stwtb
-.section stwua
-.section stwub
-.section stwva
-.section stwvb
-.section stwwa
-.section stwwb
-.section stwxa
-.section stwxb
-.section stwya
-.section stwyb
-.section stwza
-.section stwzb
-.section stw1a
-.section stw1b
-.section stw2a
-.section stw2b
-.section stw3a
-.section stw3b
-.section stw4a
-.section stw4b
-.section stw5a
-.section stw5b
-.section stw6a
-.section stw6b
-.section stw7a
-.section stw7b
-.section stw8a
-.section stw8b
-.section stw9a
-.section stw9b
-.section stw0a
-.section stw0b
-.section stxaa
-.section stxab
-.section stxba
-.section stxbb
-.section stxca
-.section stxcb
-.section stxda
-.section stxdb
-.section stxea
-.section stxeb
-.section stxfa
-.section stxfb
-.section stxga
-.section stxgb
-.section stxha
-.section stxhb
-.section stxia
-.section stxib
-.section stxja
-.section stxjb
-.section stxka
-.section stxkb
-.section stxla
-.section stxlb
-.section stxma
-.section stxmb
-.section stxna
-.section stxnb
-.section stxoa
-.section stxob
-.section stxpa
-.section stxpb
-.section stxqa
-.section stxqb
-.section stxra
-.section stxrb
-.section stxsa
-.section stxsb
-.section stxta
-.section stxtb
-.section stxua
-.section stxub
-.section stxva
-.section stxvb
-.section stxwa
-.section stxwb
-.section stxxa
-.section stxxb
-.section stxya
-.section stxyb
-.section stxza
-.section stxzb
-.section stx1a
-.section stx1b
-.section stx2a
-.section stx2b
-.section stx3a
-.section stx3b
-.section stx4a
-.section stx4b
-.section stx5a
-.section stx5b
-.section stx6a
-.section stx6b
-.section stx7a
-.section stx7b
-.section stx8a
-.section stx8b
-.section stx9a
-.section stx9b
-.section stx0a
-.section stx0b
-.section styaa
-.section styab
-.section styba
-.section stybb
-.section styca
-.section stycb
-.section styda
-.section stydb
-.section styea
-.section styeb
-.section styfa
-.section styfb
-.section styga
-.section stygb
-.section styha
-.section styhb
-.section styia
-.section styib
-.section styja
-.section styjb
-.section styka
-.section stykb
-.section styla
-.section stylb
-.section styma
-.section stymb
-.section styna
-.section stynb
-.section styoa
-.section styob
-.section stypa
-.section stypb
-.section styqa
-.section styqb
-.section styra
-.section styrb
-.section stysa
-.section stysb
-.section styta
-.section stytb
-.section styua
-.section styub
-.section styva
-.section styvb
-.section stywa
-.section stywb
-.section styxa
-.section styxb
-.section styya
-.section styyb
-.section styza
-.section styzb
-.section sty1a
-.section sty1b
-.section sty2a
-.section sty2b
-.section sty3a
-.section sty3b
-.section sty4a
-.section sty4b
-.section sty5a
-.section sty5b
-.section sty6a
-.section sty6b
-.section sty7a
-.section sty7b
-.section sty8a
-.section sty8b
-.section sty9a
-.section sty9b
-.section sty0a
-.section sty0b
-.section stzaa
-.section stzab
-.section stzba
-.section stzbb
-.section stzca
-.section stzcb
-.section stzda
-.section stzdb
-.section stzea
-.section stzeb
-.section stzfa
-.section stzfb
-.section stzga
-.section stzgb
-.section stzha
-.section stzhb
-.section stzia
-.section stzib
-.section stzja
-.section stzjb
-.section stzka
-.section stzkb
-.section stzla
-.section stzlb
-.section stzma
-.section stzmb
-.section stzna
-.section stznb
-.section stzoa
-.section stzob
-.section stzpa
-.section stzpb
-.section stzqa
-.section stzqb
-.section stzra
-.section stzrb
-.section stzsa
-.section stzsb
-.section stzta
-.section stztb
-.section stzua
-.section stzub
-.section stzva
-.section stzvb
-.section stzwa
-.section stzwb
-.section stzxa
-.section stzxb
-.section stzya
-.section stzyb
-.section stzza
-.section stzzb
-.section stz1a
-.section stz1b
-.section stz2a
-.section stz2b
-.section stz3a
-.section stz3b
-.section stz4a
-.section stz4b
-.section stz5a
-.section stz5b
-.section stz6a
-.section stz6b
-.section stz7a
-.section stz7b
-.section stz8a
-.section stz8b
-.section stz9a
-.section stz9b
-.section stz0a
-.section stz0b
-.section st1aa
-.section st1ab
-.section st1ba
-.section st1bb
-.section st1ca
-.section st1cb
-.section st1da
-.section st1db
-.section st1ea
-.section st1eb
-.section st1fa
-.section st1fb
-.section st1ga
-.section st1gb
-.section st1ha
-.section st1hb
-.section st1ia
-.section st1ib
-.section st1ja
-.section st1jb
-.section st1ka
-.section st1kb
-.section st1la
-.section st1lb
-.section st1ma
-.section st1mb
-.section st1na
-.section st1nb
-.section st1oa
-.section st1ob
-.section st1pa
-.section st1pb
-.section st1qa
-.section st1qb
-.section st1ra
-.section st1rb
-.section st1sa
-.section st1sb
-.section st1ta
-.section st1tb
-.section st1ua
-.section st1ub
-.section st1va
-.section st1vb
-.section st1wa
-.section st1wb
-.section st1xa
-.section st1xb
-.section st1ya
-.section st1yb
-.section st1za
-.section st1zb
-.section st11a
-.section st11b
-.section st12a
-.section st12b
-.section st13a
-.section st13b
-.section st14a
-.section st14b
-.section st15a
-.section st15b
-.section st16a
-.section st16b
-.section st17a
-.section st17b
-.section st18a
-.section st18b
-.section st19a
-.section st19b
-.section st10a
-.section st10b
-.section st2aa
-.section st2ab
-.section st2ba
-.section st2bb
-.section st2ca
-.section st2cb
-.section st2da
-.section st2db
-.section st2ea
-.section st2eb
-.section st2fa
-.section st2fb
-.section st2ga
-.section st2gb
-.section st2ha
-.section st2hb
-.section st2ia
-.section st2ib
-.section st2ja
-.section st2jb
-.section st2ka
-.section st2kb
-.section st2la
-.section st2lb
-.section st2ma
-.section st2mb
-.section st2na
-.section st2nb
-.section st2oa
-.section st2ob
-.section st2pa
-.section st2pb
-.section st2qa
-.section st2qb
-.section st2ra
-.section st2rb
-.section st2sa
-.section st2sb
-.section st2ta
-.section st2tb
-.section st2ua
-.section st2ub
-.section st2va
-.section st2vb
-.section st2wa
-.section st2wb
-.section st2xa
-.section st2xb
-.section st2ya
-.section st2yb
-.section st2za
-.section st2zb
-.section st21a
-.section st21b
-.section st22a
-.section st22b
-.section st23a
-.section st23b
-.section st24a
-.section st24b
-.section st25a
-.section st25b
-.section st26a
-.section st26b
-.section st27a
-.section st27b
-.section st28a
-.section st28b
-.section st29a
-.section st29b
-.section st20a
-.section st20b
-.section st3aa
-.section st3ab
-.section st3ba
-.section st3bb
-.section st3ca
-.section st3cb
-.section st3da
-.section st3db
-.section st3ea
-.section st3eb
-.section st3fa
-.section st3fb
-.section st3ga
-.section st3gb
-.section st3ha
-.section st3hb
-.section st3ia
-.section st3ib
-.section st3ja
-.section st3jb
-.section st3ka
-.section st3kb
-.section st3la
-.section st3lb
-.section st3ma
-.section st3mb
-.section st3na
-.section st3nb
-.section st3oa
-.section st3ob
-.section st3pa
-.section st3pb
-.section st3qa
-.section st3qb
-.section st3ra
-.section st3rb
-.section st3sa
-.section st3sb
-.section st3ta
-.section st3tb
-.section st3ua
-.section st3ub
-.section st3va
-.section st3vb
-.section st3wa
-.section st3wb
-.section st3xa
-.section st3xb
-.section st3ya
-.section st3yb
-.section st3za
-.section st3zb
-.section st31a
-.section st31b
-.section st32a
-.section st32b
-.section st33a
-.section st33b
-.section st34a
-.section st34b
-.section st35a
-.section st35b
-.section st36a
-.section st36b
-.section st37a
-.section st37b
-.section st38a
-.section st38b
-.section st39a
-.section st39b
-.section st30a
-.section st30b
-.section st4aa
-.section st4ab
-.section st4ba
-.section st4bb
-.section st4ca
-.section st4cb
-.section st4da
-.section st4db
-.section st4ea
-.section st4eb
-.section st4fa
-.section st4fb
-.section st4ga
-.section st4gb
-.section st4ha
-.section st4hb
-.section st4ia
-.section st4ib
-.section st4ja
-.section st4jb
-.section st4ka
-.section st4kb
-.section st4la
-.section st4lb
-.section st4ma
-.section st4mb
-.section st4na
-.section st4nb
-.section st4oa
-.section st4ob
-.section st4pa
-.section st4pb
-.section st4qa
-.section st4qb
-.section st4ra
-.section st4rb
-.section st4sa
-.section st4sb
-.section st4ta
-.section st4tb
-.section st4ua
-.section st4ub
-.section st4va
-.section st4vb
-.section st4wa
-.section st4wb
-.section st4xa
-.section st4xb
-.section st4ya
-.section st4yb
-.section st4za
-.section st4zb
-.section st41a
-.section st41b
-.section st42a
-.section st42b
-.section st43a
-.section st43b
-.section st44a
-.section st44b
-.section st45a
-.section st45b
-.section st46a
-.section st46b
-.section st47a
-.section st47b
-.section st48a
-.section st48b
-.section st49a
-.section st49b
-.section st40a
-.section st40b
-.section st5aa
-.section st5ab
-.section st5ba
-.section st5bb
-.section st5ca
-.section st5cb
-.section st5da
-.section st5db
-.section st5ea
-.section st5eb
-.section st5fa
-.section st5fb
-.section st5ga
-.section st5gb
-.section st5ha
-.section st5hb
-.section st5ia
-.section st5ib
-.section st5ja
-.section st5jb
-.section st5ka
-.section st5kb
-.section st5la
-.section st5lb
-.section st5ma
-.section st5mb
-.section st5na
-.section st5nb
-.section st5oa
-.section st5ob
-.section st5pa
-.section st5pb
-.section st5qa
-.section st5qb
-.section st5ra
-.section st5rb
-.section st5sa
-.section st5sb
-.section st5ta
-.section st5tb
-.section st5ua
-.section st5ub
-.section st5va
-.section st5vb
-.section st5wa
-.section st5wb
-.section st5xa
-.section st5xb
-.section st5ya
-.section st5yb
-.section st5za
-.section st5zb
-.section st51a
-.section st51b
-.section st52a
-.section st52b
-.section st53a
-.section st53b
-.section st54a
-.section st54b
-.section st55a
-.section st55b
-.section st56a
-.section st56b
-.section st57a
-.section st57b
-.section st58a
-.section st58b
-.section st59a
-.section st59b
-.section st50a
-.section st50b
-.section st6aa
-.section st6ab
-.section st6ba
-.section st6bb
-.section st6ca
-.section st6cb
-.section st6da
-.section st6db
-.section st6ea
-.section st6eb
-.section st6fa
-.section st6fb
-.section st6ga
-.section st6gb
-.section st6ha
-.section st6hb
-.section st6ia
-.section st6ib
-.section st6ja
-.section st6jb
-.section st6ka
-.section st6kb
-.section st6la
-.section st6lb
-.section st6ma
-.section st6mb
-.section st6na
-.section st6nb
-.section st6oa
-.section st6ob
-.section st6pa
-.section st6pb
-.section st6qa
-.section st6qb
-.section st6ra
-.section st6rb
-.section st6sa
-.section st6sb
-.section st6ta
-.section st6tb
-.section st6ua
-.section st6ub
-.section st6va
-.section st6vb
-.section st6wa
-.section st6wb
-.section st6xa
-.section st6xb
-.section st6ya
-.section st6yb
-.section st6za
-.section st6zb
-.section st61a
-.section st61b
-.section st62a
-.section st62b
-.section st63a
-.section st63b
-.section st64a
-.section st64b
-.section st65a
-.section st65b
-.section st66a
-.section st66b
-.section st67a
-.section st67b
-.section st68a
-.section st68b
-.section st69a
-.section st69b
-.section st60a
-.section st60b
-.section st7aa
-.section st7ab
-.section st7ba
-.section st7bb
-.section st7ca
-.section st7cb
-.section st7da
-.section st7db
-.section st7ea
-.section st7eb
-.section st7fa
-.section st7fb
-.section st7ga
-.section st7gb
-.section st7ha
-.section st7hb
-.section st7ia
-.section st7ib
-.section st7ja
-.section st7jb
-.section st7ka
-.section st7kb
-.section st7la
-.section st7lb
-.section st7ma
-.section st7mb
-.section st7na
-.section st7nb
-.section st7oa
-.section st7ob
-.section st7pa
-.section st7pb
-.section st7qa
-.section st7qb
-.section st7ra
-.section st7rb
-.section st7sa
-.section st7sb
-.section st7ta
-.section st7tb
-.section st7ua
-.section st7ub
-.section st7va
-.section st7vb
-.section st7wa
-.section st7wb
-.section st7xa
-.section st7xb
-.section st7ya
-.section st7yb
-.section st7za
-.section st7zb
-.section st71a
-.section st71b
-.section st72a
-.section st72b
-.section st73a
-.section st73b
-.section st74a
-.section st74b
-.section st75a
-.section st75b
-.section st76a
-.section st76b
-.section st77a
-.section st77b
-.section st78a
-.section st78b
-.section st79a
-.section st79b
-.section st70a
-.section st70b
-.section st8aa
-.section st8ab
-.section st8ba
-.section st8bb
-.section st8ca
-.section st8cb
-.section st8da
-.section st8db
-.section st8ea
-.section st8eb
-.section st8fa
-.section st8fb
-.section st8ga
-.section st8gb
-.section st8ha
-.section st8hb
-.section st8ia
-.section st8ib
-.section st8ja
-.section st8jb
-.section st8ka
-.section st8kb
-.section st8la
-.section st8lb
-.section st8ma
-.section st8mb
-.section st8na
-.section st8nb
-.section st8oa
-.section st8ob
-.section st8pa
-.section st8pb
-.section st8qa
-.section st8qb
-.section st8ra
-.section st8rb
-.section st8sa
-.section st8sb
-.section st8ta
-.section st8tb
-.section st8ua
-.section st8ub
-.section st8va
-.section st8vb
-.section st8wa
-.section st8wb
-.section st8xa
-.section st8xb
-.section st8ya
-.section st8yb
-.section st8za
-.section st8zb
-.section st81a
-.section st81b
-.section st82a
-.section st82b
-.section st83a
-.section st83b
-.section st84a
-.section st84b
-.section st85a
-.section st85b
-.section st86a
-.section st86b
-.section st87a
-.section st87b
-.section st88a
-.section st88b
-.section st89a
-.section st89b
-.section st80a
-.section st80b
-.section st9aa
-.section st9ab
-.section st9ba
-.section st9bb
-.section st9ca
-.section st9cb
-.section st9da
-.section st9db
-.section st9ea
-.section st9eb
-.section st9fa
-.section st9fb
-.section st9ga
-.section st9gb
-.section st9ha
-.section st9hb
-.section st9ia
-.section st9ib
-.section st9ja
-.section st9jb
-.section st9ka
-.section st9kb
-.section st9la
-.section st9lb
-.section st9ma
-.section st9mb
-.section st9na
-.section st9nb
-.section st9oa
-.section st9ob
-.section st9pa
-.section st9pb
-.section st9qa
-.section st9qb
-.section st9ra
-.section st9rb
-.section st9sa
-.section st9sb
-.section st9ta
-.section st9tb
-.section st9ua
-.section st9ub
-.section st9va
-.section st9vb
-.section st9wa
-.section st9wb
-.section st9xa
-.section st9xb
-.section st9ya
-.section st9yb
-.section st9za
-.section st9zb
-.section st91a
-.section st91b
-.section st92a
-.section st92b
-.section st93a
-.section st93b
-.section st94a
-.section st94b
-.section st95a
-.section st95b
-.section st96a
-.section st96b
-.section st97a
-.section st97b
-.section st98a
-.section st98b
-.section st99a
-.section st99b
-.section st90a
-.section st90b
-.section st0aa
-.section st0ab
-.section st0ba
-.section st0bb
-.section st0ca
-.section st0cb
-.section st0da
-.section st0db
-.section st0ea
-.section st0eb
-.section st0fa
-.section st0fb
-.section st0ga
-.section st0gb
-.section st0ha
-.section st0hb
-.section st0ia
-.section st0ib
-.section st0ja
-.section st0jb
-.section st0ka
-.section st0kb
-.section st0la
-.section st0lb
-.section st0ma
-.section st0mb
-.section st0na
-.section st0nb
-.section st0oa
-.section st0ob
-.section st0pa
-.section st0pb
-.section st0qa
-.section st0qb
-.section st0ra
-.section st0rb
-.section st0sa
-.section st0sb
-.section st0ta
-.section st0tb
-.section st0ua
-.section st0ub
-.section st0va
-.section st0vb
-.section st0wa
-.section st0wb
-.section st0xa
-.section st0xb
-.section st0ya
-.section st0yb
-.section st0za
-.section st0zb
-.section st01a
-.section st01b
-.section st02a
-.section st02b
-.section st03a
-.section st03b
-.section st04a
-.section st04b
-.section st05a
-.section st05b
-.section st06a
-.section st06b
-.section st07a
-.section st07b
-.section st08a
-.section st08b
-.section st09a
-.section st09b
-.section st00a
-.section st00b
-.section suaaa
-.section suaab
-.section suaba
-.section suabb
-.section suaca
-.section suacb
-.section suada
-.section suadb
-.section suaea
-.section suaeb
-.section suafa
-.section suafb
-.section suaga
-.section suagb
-.section suaha
-.section suahb
-.section suaia
-.section suaib
-.section suaja
-.section suajb
-.section suaka
-.section suakb
-.section suala
-.section sualb
-.section suama
-.section suamb
-.section suana
-.section suanb
-.section suaoa
-.section suaob
-.section suapa
-.section suapb
-.section suaqa
-.section suaqb
-.section suara
-.section suarb
-.section suasa
-.section suasb
-.section suata
-.section suatb
-.section suaua
-.section suaub
-.section suava
-.section suavb
-.section suawa
-.section suawb
-.section suaxa
-.section suaxb
-.section suaya
-.section suayb
-.section suaza
-.section suazb
-.section sua1a
-.section sua1b
-.section sua2a
-.section sua2b
-.section sua3a
-.section sua3b
-.section sua4a
-.section sua4b
-.section sua5a
-.section sua5b
-.section sua6a
-.section sua6b
-.section sua7a
-.section sua7b
-.section sua8a
-.section sua8b
-.section sua9a
-.section sua9b
-.section sua0a
-.section sua0b
-.section subaa
-.section subab
-.section subba
-.section subbb
-.section subca
-.section subcb
-.section subda
-.section subdb
-.section subea
-.section subeb
-.section subfa
-.section subfb
-.section subga
-.section subgb
-.section subha
-.section subhb
-.section subia
-.section subib
-.section subja
-.section subjb
-.section subka
-.section subkb
-.section subla
-.section sublb
-.section subma
-.section submb
-.section subna
-.section subnb
-.section suboa
-.section subob
-.section subpa
-.section subpb
-.section subqa
-.section subqb
-.section subra
-.section subrb
-.section subsa
-.section subsb
-.section subta
-.section subtb
-.section subua
-.section subub
-.section subva
-.section subvb
-.section subwa
-.section subwb
-.section subxa
-.section subxb
-.section subya
-.section subyb
-.section subza
-.section subzb
-.section sub1a
-.section sub1b
-.section sub2a
-.section sub2b
-.section sub3a
-.section sub3b
-.section sub4a
-.section sub4b
-.section sub5a
-.section sub5b
-.section sub6a
-.section sub6b
-.section sub7a
-.section sub7b
-.section sub8a
-.section sub8b
-.section sub9a
-.section sub9b
-.section sub0a
-.section sub0b
-.section sucaa
-.section sucab
-.section sucba
-.section sucbb
-.section succa
-.section succb
-.section sucda
-.section sucdb
-.section sucea
-.section suceb
-.section sucfa
-.section sucfb
-.section sucga
-.section sucgb
-.section sucha
-.section suchb
-.section sucia
-.section sucib
-.section sucja
-.section sucjb
-.section sucka
-.section suckb
-.section sucla
-.section suclb
-.section sucma
-.section sucmb
-.section sucna
-.section sucnb
-.section sucoa
-.section sucob
-.section sucpa
-.section sucpb
-.section sucqa
-.section sucqb
-.section sucra
-.section sucrb
-.section sucsa
-.section sucsb
-.section sucta
-.section suctb
-.section sucua
-.section sucub
-.section sucva
-.section sucvb
-.section sucwa
-.section sucwb
-.section sucxa
-.section sucxb
-.section sucya
-.section sucyb
-.section sucza
-.section suczb
-.section suc1a
-.section suc1b
-.section suc2a
-.section suc2b
-.section suc3a
-.section suc3b
-.section suc4a
-.section suc4b
-.section suc5a
-.section suc5b
-.section suc6a
-.section suc6b
-.section suc7a
-.section suc7b
-.section suc8a
-.section suc8b
-.section suc9a
-.section suc9b
-.section suc0a
-.section suc0b
-.section sudaa
-.section sudab
-.section sudba
-.section sudbb
-.section sudca
-.section sudcb
-.section sudda
-.section suddb
-.section sudea
-.section sudeb
-.section sudfa
-.section sudfb
-.section sudga
-.section sudgb
-.section sudha
-.section sudhb
-.section sudia
-.section sudib
-.section sudja
-.section sudjb
-.section sudka
-.section sudkb
-.section sudla
-.section sudlb
-.section sudma
-.section sudmb
-.section sudna
-.section sudnb
-.section sudoa
-.section sudob
-.section sudpa
-.section sudpb
-.section sudqa
-.section sudqb
-.section sudra
-.section sudrb
-.section sudsa
-.section sudsb
-.section sudta
-.section sudtb
-.section sudua
-.section sudub
-.section sudva
-.section sudvb
-.section sudwa
-.section sudwb
-.section sudxa
-.section sudxb
-.section sudya
-.section sudyb
-.section sudza
-.section sudzb
-.section sud1a
-.section sud1b
-.section sud2a
-.section sud2b
-.section sud3a
-.section sud3b
-.section sud4a
-.section sud4b
-.section sud5a
-.section sud5b
-.section sud6a
-.section sud6b
-.section sud7a
-.section sud7b
-.section sud8a
-.section sud8b
-.section sud9a
-.section sud9b
-.section sud0a
-.section sud0b
-.section sueaa
-.section sueab
-.section sueba
-.section suebb
-.section sueca
-.section suecb
-.section sueda
-.section suedb
-.section sueea
-.section sueeb
-.section suefa
-.section suefb
-.section suega
-.section suegb
-.section sueha
-.section suehb
-.section sueia
-.section sueib
-.section sueja
-.section suejb
-.section sueka
-.section suekb
-.section suela
-.section suelb
-.section suema
-.section suemb
-.section suena
-.section suenb
-.section sueoa
-.section sueob
-.section suepa
-.section suepb
-.section sueqa
-.section sueqb
-.section suera
-.section suerb
-.section suesa
-.section suesb
-.section sueta
-.section suetb
-.section sueua
-.section sueub
-.section sueva
-.section suevb
-.section suewa
-.section suewb
-.section suexa
-.section suexb
-.section sueya
-.section sueyb
-.section sueza
-.section suezb
-.section sue1a
-.section sue1b
-.section sue2a
-.section sue2b
-.section sue3a
-.section sue3b
-.section sue4a
-.section sue4b
-.section sue5a
-.section sue5b
-.section sue6a
-.section sue6b
-.section sue7a
-.section sue7b
-.section sue8a
-.section sue8b
-.section sue9a
-.section sue9b
-.section sue0a
-.section sue0b
-.section sufaa
-.section sufab
-.section sufba
-.section sufbb
-.section sufca
-.section sufcb
-.section sufda
-.section sufdb
-.section sufea
-.section sufeb
-.section suffa
-.section suffb
-.section sufga
-.section sufgb
-.section sufha
-.section sufhb
-.section sufia
-.section sufib
-.section sufja
-.section sufjb
-.section sufka
-.section sufkb
-.section sufla
-.section suflb
-.section sufma
-.section sufmb
-.section sufna
-.section sufnb
-.section sufoa
-.section sufob
-.section sufpa
-.section sufpb
-.section sufqa
-.section sufqb
-.section sufra
-.section sufrb
-.section sufsa
-.section sufsb
-.section sufta
-.section suftb
-.section sufua
-.section sufub
-.section sufva
-.section sufvb
-.section sufwa
-.section sufwb
-.section sufxa
-.section sufxb
-.section sufya
-.section sufyb
-.section sufza
-.section sufzb
-.section suf1a
-.section suf1b
-.section suf2a
-.section suf2b
-.section suf3a
-.section suf3b
-.section suf4a
-.section suf4b
-.section suf5a
-.section suf5b
-.section suf6a
-.section suf6b
-.section suf7a
-.section suf7b
-.section suf8a
-.section suf8b
-.section suf9a
-.section suf9b
-.section suf0a
-.section suf0b
-.section sugaa
-.section sugab
-.section sugba
-.section sugbb
-.section sugca
-.section sugcb
-.section sugda
-.section sugdb
-.section sugea
-.section sugeb
-.section sugfa
-.section sugfb
-.section sugga
-.section suggb
-.section sugha
-.section sughb
-.section sugia
-.section sugib
-.section sugja
-.section sugjb
-.section sugka
-.section sugkb
-.section sugla
-.section suglb
-.section sugma
-.section sugmb
-.section sugna
-.section sugnb
-.section sugoa
-.section sugob
-.section sugpa
-.section sugpb
-.section sugqa
-.section sugqb
-.section sugra
-.section sugrb
-.section sugsa
-.section sugsb
-.section sugta
-.section sugtb
-.section sugua
-.section sugub
-.section sugva
-.section sugvb
-.section sugwa
-.section sugwb
-.section sugxa
-.section sugxb
-.section sugya
-.section sugyb
-.section sugza
-.section sugzb
-.section sug1a
-.section sug1b
-.section sug2a
-.section sug2b
-.section sug3a
-.section sug3b
-.section sug4a
-.section sug4b
-.section sug5a
-.section sug5b
-.section sug6a
-.section sug6b
-.section sug7a
-.section sug7b
-.section sug8a
-.section sug8b
-.section sug9a
-.section sug9b
-.section sug0a
-.section sug0b
-.section suhaa
-.section suhab
-.section suhba
-.section suhbb
-.section suhca
-.section suhcb
-.section suhda
-.section suhdb
-.section suhea
-.section suheb
-.section suhfa
-.section suhfb
-.section suhga
-.section suhgb
-.section suhha
-.section suhhb
-.section suhia
-.section suhib
-.section suhja
-.section suhjb
-.section suhka
-.section suhkb
-.section suhla
-.section suhlb
-.section suhma
-.section suhmb
-.section suhna
-.section suhnb
-.section suhoa
-.section suhob
-.section suhpa
-.section suhpb
-.section suhqa
-.section suhqb
-.section suhra
-.section suhrb
-.section suhsa
-.section suhsb
-.section suhta
-.section suhtb
-.section suhua
-.section suhub
-.section suhva
-.section suhvb
-.section suhwa
-.section suhwb
-.section suhxa
-.section suhxb
-.section suhya
-.section suhyb
-.section suhza
-.section suhzb
-.section suh1a
-.section suh1b
-.section suh2a
-.section suh2b
-.section suh3a
-.section suh3b
-.section suh4a
-.section suh4b
-.section suh5a
-.section suh5b
-.section suh6a
-.section suh6b
-.section suh7a
-.section suh7b
-.section suh8a
-.section suh8b
-.section suh9a
-.section suh9b
-.section suh0a
-.section suh0b
-.section suiaa
-.section suiab
-.section suiba
-.section suibb
-.section suica
-.section suicb
-.section suida
-.section suidb
-.section suiea
-.section suieb
-.section suifa
-.section suifb
-.section suiga
-.section suigb
-.section suiha
-.section suihb
-.section suiia
-.section suiib
-.section suija
-.section suijb
-.section suika
-.section suikb
-.section suila
-.section suilb
-.section suima
-.section suimb
-.section suina
-.section suinb
-.section suioa
-.section suiob
-.section suipa
-.section suipb
-.section suiqa
-.section suiqb
-.section suira
-.section suirb
-.section suisa
-.section suisb
-.section suita
-.section suitb
-.section suiua
-.section suiub
-.section suiva
-.section suivb
-.section suiwa
-.section suiwb
-.section suixa
-.section suixb
-.section suiya
-.section suiyb
-.section suiza
-.section suizb
-.section sui1a
-.section sui1b
-.section sui2a
-.section sui2b
-.section sui3a
-.section sui3b
-.section sui4a
-.section sui4b
-.section sui5a
-.section sui5b
-.section sui6a
-.section sui6b
-.section sui7a
-.section sui7b
-.section sui8a
-.section sui8b
-.section sui9a
-.section sui9b
-.section sui0a
-.section sui0b
-.section sujaa
-.section sujab
-.section sujba
-.section sujbb
-.section sujca
-.section sujcb
-.section sujda
-.section sujdb
-.section sujea
-.section sujeb
-.section sujfa
-.section sujfb
-.section sujga
-.section sujgb
-.section sujha
-.section sujhb
-.section sujia
-.section sujib
-.section sujja
-.section sujjb
-.section sujka
-.section sujkb
-.section sujla
-.section sujlb
-.section sujma
-.section sujmb
-.section sujna
-.section sujnb
-.section sujoa
-.section sujob
-.section sujpa
-.section sujpb
-.section sujqa
-.section sujqb
-.section sujra
-.section sujrb
-.section sujsa
-.section sujsb
-.section sujta
-.section sujtb
-.section sujua
-.section sujub
-.section sujva
-.section sujvb
-.section sujwa
-.section sujwb
-.section sujxa
-.section sujxb
-.section sujya
-.section sujyb
-.section sujza
-.section sujzb
-.section suj1a
-.section suj1b
-.section suj2a
-.section suj2b
-.section suj3a
-.section suj3b
-.section suj4a
-.section suj4b
-.section suj5a
-.section suj5b
-.section suj6a
-.section suj6b
-.section suj7a
-.section suj7b
-.section suj8a
-.section suj8b
-.section suj9a
-.section suj9b
-.section suj0a
-.section suj0b
-.section sukaa
-.section sukab
-.section sukba
-.section sukbb
-.section sukca
-.section sukcb
-.section sukda
-.section sukdb
-.section sukea
-.section sukeb
-.section sukfa
-.section sukfb
-.section sukga
-.section sukgb
-.section sukha
-.section sukhb
-.section sukia
-.section sukib
-.section sukja
-.section sukjb
-.section sukka
-.section sukkb
-.section sukla
-.section suklb
-.section sukma
-.section sukmb
-.section sukna
-.section suknb
-.section sukoa
-.section sukob
-.section sukpa
-.section sukpb
-.section sukqa
-.section sukqb
-.section sukra
-.section sukrb
-.section suksa
-.section suksb
-.section sukta
-.section suktb
-.section sukua
-.section sukub
-.section sukva
-.section sukvb
-.section sukwa
-.section sukwb
-.section sukxa
-.section sukxb
-.section sukya
-.section sukyb
-.section sukza
-.section sukzb
-.section suk1a
-.section suk1b
-.section suk2a
-.section suk2b
-.section suk3a
-.section suk3b
-.section suk4a
-.section suk4b
-.section suk5a
-.section suk5b
-.section suk6a
-.section suk6b
-.section suk7a
-.section suk7b
-.section suk8a
-.section suk8b
-.section suk9a
-.section suk9b
-.section suk0a
-.section suk0b
-.section sulaa
-.section sulab
-.section sulba
-.section sulbb
-.section sulca
-.section sulcb
-.section sulda
-.section suldb
-.section sulea
-.section suleb
-.section sulfa
-.section sulfb
-.section sulga
-.section sulgb
-.section sulha
-.section sulhb
-.section sulia
-.section sulib
-.section sulja
-.section suljb
-.section sulka
-.section sulkb
-.section sulla
-.section sullb
-.section sulma
-.section sulmb
-.section sulna
-.section sulnb
-.section suloa
-.section sulob
-.section sulpa
-.section sulpb
-.section sulqa
-.section sulqb
-.section sulra
-.section sulrb
-.section sulsa
-.section sulsb
-.section sulta
-.section sultb
-.section sulua
-.section sulub
-.section sulva
-.section sulvb
-.section sulwa
-.section sulwb
-.section sulxa
-.section sulxb
-.section sulya
-.section sulyb
-.section sulza
-.section sulzb
-.section sul1a
-.section sul1b
-.section sul2a
-.section sul2b
-.section sul3a
-.section sul3b
-.section sul4a
-.section sul4b
-.section sul5a
-.section sul5b
-.section sul6a
-.section sul6b
-.section sul7a
-.section sul7b
-.section sul8a
-.section sul8b
-.section sul9a
-.section sul9b
-.section sul0a
-.section sul0b
-.section sumaa
-.section sumab
-.section sumba
-.section sumbb
-.section sumca
-.section sumcb
-.section sumda
-.section sumdb
-.section sumea
-.section sumeb
-.section sumfa
-.section sumfb
-.section sumga
-.section sumgb
-.section sumha
-.section sumhb
-.section sumia
-.section sumib
-.section sumja
-.section sumjb
-.section sumka
-.section sumkb
-.section sumla
-.section sumlb
-.section summa
-.section summb
-.section sumna
-.section sumnb
-.section sumoa
-.section sumob
-.section sumpa
-.section sumpb
-.section sumqa
-.section sumqb
-.section sumra
-.section sumrb
-.section sumsa
-.section sumsb
-.section sumta
-.section sumtb
-.section sumua
-.section sumub
-.section sumva
-.section sumvb
-.section sumwa
-.section sumwb
-.section sumxa
-.section sumxb
-.section sumya
-.section sumyb
-.section sumza
-.section sumzb
-.section sum1a
-.section sum1b
-.section sum2a
-.section sum2b
-.section sum3a
-.section sum3b
-.section sum4a
-.section sum4b
-.section sum5a
-.section sum5b
-.section sum6a
-.section sum6b
-.section sum7a
-.section sum7b
-.section sum8a
-.section sum8b
-.section sum9a
-.section sum9b
-.section sum0a
-.section sum0b
-.section sunaa
-.section sunab
-.section sunba
-.section sunbb
-.section sunca
-.section suncb
-.section sunda
-.section sundb
-.section sunea
-.section suneb
-.section sunfa
-.section sunfb
-.section sunga
-.section sungb
-.section sunha
-.section sunhb
-.section sunia
-.section sunib
-.section sunja
-.section sunjb
-.section sunka
-.section sunkb
-.section sunla
-.section sunlb
-.section sunma
-.section sunmb
-.section sunna
-.section sunnb
-.section sunoa
-.section sunob
-.section sunpa
-.section sunpb
-.section sunqa
-.section sunqb
-.section sunra
-.section sunrb
-.section sunsa
-.section sunsb
-.section sunta
-.section suntb
-.section sunua
-.section sunub
-.section sunva
-.section sunvb
-.section sunwa
-.section sunwb
-.section sunxa
-.section sunxb
-.section sunya
-.section sunyb
-.section sunza
-.section sunzb
-.section sun1a
-.section sun1b
-.section sun2a
-.section sun2b
-.section sun3a
-.section sun3b
-.section sun4a
-.section sun4b
-.section sun5a
-.section sun5b
-.section sun6a
-.section sun6b
-.section sun7a
-.section sun7b
-.section sun8a
-.section sun8b
-.section sun9a
-.section sun9b
-.section sun0a
-.section sun0b
-.section suoaa
-.section suoab
-.section suoba
-.section suobb
-.section suoca
-.section suocb
-.section suoda
-.section suodb
-.section suoea
-.section suoeb
-.section suofa
-.section suofb
-.section suoga
-.section suogb
-.section suoha
-.section suohb
-.section suoia
-.section suoib
-.section suoja
-.section suojb
-.section suoka
-.section suokb
-.section suola
-.section suolb
-.section suoma
-.section suomb
-.section suona
-.section suonb
-.section suooa
-.section suoob
-.section suopa
-.section suopb
-.section suoqa
-.section suoqb
-.section suora
-.section suorb
-.section suosa
-.section suosb
-.section suota
-.section suotb
-.section suoua
-.section suoub
-.section suova
-.section suovb
-.section suowa
-.section suowb
-.section suoxa
-.section suoxb
-.section suoya
-.section suoyb
-.section suoza
-.section suozb
-.section suo1a
-.section suo1b
-.section suo2a
-.section suo2b
-.section suo3a
-.section suo3b
-.section suo4a
-.section suo4b
-.section suo5a
-.section suo5b
-.section suo6a
-.section suo6b
-.section suo7a
-.section suo7b
-.section suo8a
-.section suo8b
-.section suo9a
-.section suo9b
-.section suo0a
-.section suo0b
-.section supaa
-.section supab
-.section supba
-.section supbb
-.section supca
-.section supcb
-.section supda
-.section supdb
-.section supea
-.section supeb
-.section supfa
-.section supfb
-.section supga
-.section supgb
-.section supha
-.section suphb
-.section supia
-.section supib
-.section supja
-.section supjb
-.section supka
-.section supkb
-.section supla
-.section suplb
-.section supma
-.section supmb
-.section supna
-.section supnb
-.section supoa
-.section supob
-.section suppa
-.section suppb
-.section supqa
-.section supqb
-.section supra
-.section suprb
-.section supsa
-.section supsb
-.section supta
-.section suptb
-.section supua
-.section supub
-.section supva
-.section supvb
-.section supwa
-.section supwb
-.section supxa
-.section supxb
-.section supya
-.section supyb
-.section supza
-.section supzb
-.section sup1a
-.section sup1b
-.section sup2a
-.section sup2b
-.section sup3a
-.section sup3b
-.section sup4a
-.section sup4b
-.section sup5a
-.section sup5b
-.section sup6a
-.section sup6b
-.section sup7a
-.section sup7b
-.section sup8a
-.section sup8b
-.section sup9a
-.section sup9b
-.section sup0a
-.section sup0b
-.section suqaa
-.section suqab
-.section suqba
-.section suqbb
-.section suqca
-.section suqcb
-.section suqda
-.section suqdb
-.section suqea
-.section suqeb
-.section suqfa
-.section suqfb
-.section suqga
-.section suqgb
-.section suqha
-.section suqhb
-.section suqia
-.section suqib
-.section suqja
-.section suqjb
-.section suqka
-.section suqkb
-.section suqla
-.section suqlb
-.section suqma
-.section suqmb
-.section suqna
-.section suqnb
-.section suqoa
-.section suqob
-.section suqpa
-.section suqpb
-.section suqqa
-.section suqqb
-.section suqra
-.section suqrb
-.section suqsa
-.section suqsb
-.section suqta
-.section suqtb
-.section suqua
-.section suqub
-.section suqva
-.section suqvb
-.section suqwa
-.section suqwb
-.section suqxa
-.section suqxb
-.section suqya
-.section suqyb
-.section suqza
-.section suqzb
-.section suq1a
-.section suq1b
-.section suq2a
-.section suq2b
-.section suq3a
-.section suq3b
-.section suq4a
-.section suq4b
-.section suq5a
-.section suq5b
-.section suq6a
-.section suq6b
-.section suq7a
-.section suq7b
-.section suq8a
-.section suq8b
-.section suq9a
-.section suq9b
-.section suq0a
-.section suq0b
-.section suraa
-.section surab
-.section surba
-.section surbb
-.section surca
-.section surcb
-.section surda
-.section surdb
-.section surea
-.section sureb
-.section surfa
-.section surfb
-.section surga
-.section surgb
-.section surha
-.section surhb
-.section suria
-.section surib
-.section surja
-.section surjb
-.section surka
-.section surkb
-.section surla
-.section surlb
-.section surma
-.section surmb
-.section surna
-.section surnb
-.section suroa
-.section surob
-.section surpa
-.section surpb
-.section surqa
-.section surqb
-.section surra
-.section surrb
-.section sursa
-.section sursb
-.section surta
-.section surtb
-.section surua
-.section surub
-.section surva
-.section survb
-.section surwa
-.section surwb
-.section surxa
-.section surxb
-.section surya
-.section suryb
-.section surza
-.section surzb
-.section sur1a
-.section sur1b
-.section sur2a
-.section sur2b
-.section sur3a
-.section sur3b
-.section sur4a
-.section sur4b
-.section sur5a
-.section sur5b
-.section sur6a
-.section sur6b
-.section sur7a
-.section sur7b
-.section sur8a
-.section sur8b
-.section sur9a
-.section sur9b
-.section sur0a
-.section sur0b
-.section susaa
-.section susab
-.section susba
-.section susbb
-.section susca
-.section suscb
-.section susda
-.section susdb
-.section susea
-.section suseb
-.section susfa
-.section susfb
-.section susga
-.section susgb
-.section susha
-.section sushb
-.section susia
-.section susib
-.section susja
-.section susjb
-.section suska
-.section suskb
-.section susla
-.section suslb
-.section susma
-.section susmb
-.section susna
-.section susnb
-.section susoa
-.section susob
-.section suspa
-.section suspb
-.section susqa
-.section susqb
-.section susra
-.section susrb
-.section sussa
-.section sussb
-.section susta
-.section sustb
-.section susua
-.section susub
-.section susva
-.section susvb
-.section suswa
-.section suswb
-.section susxa
-.section susxb
-.section susya
-.section susyb
-.section susza
-.section suszb
-.section sus1a
-.section sus1b
-.section sus2a
-.section sus2b
-.section sus3a
-.section sus3b
-.section sus4a
-.section sus4b
-.section sus5a
-.section sus5b
-.section sus6a
-.section sus6b
-.section sus7a
-.section sus7b
-.section sus8a
-.section sus8b
-.section sus9a
-.section sus9b
-.section sus0a
-.section sus0b
-.section sutaa
-.section sutab
-.section sutba
-.section sutbb
-.section sutca
-.section sutcb
-.section sutda
-.section sutdb
-.section sutea
-.section suteb
-.section sutfa
-.section sutfb
-.section sutga
-.section sutgb
-.section sutha
-.section suthb
-.section sutia
-.section sutib
-.section sutja
-.section sutjb
-.section sutka
-.section sutkb
-.section sutla
-.section sutlb
-.section sutma
-.section sutmb
-.section sutna
-.section sutnb
-.section sutoa
-.section sutob
-.section sutpa
-.section sutpb
-.section sutqa
-.section sutqb
-.section sutra
-.section sutrb
-.section sutsa
-.section sutsb
-.section sutta
-.section suttb
-.section sutua
-.section sutub
-.section sutva
-.section sutvb
-.section sutwa
-.section sutwb
-.section sutxa
-.section sutxb
-.section sutya
-.section sutyb
-.section sutza
-.section sutzb
-.section sut1a
-.section sut1b
-.section sut2a
-.section sut2b
-.section sut3a
-.section sut3b
-.section sut4a
-.section sut4b
-.section sut5a
-.section sut5b
-.section sut6a
-.section sut6b
-.section sut7a
-.section sut7b
-.section sut8a
-.section sut8b
-.section sut9a
-.section sut9b
-.section sut0a
-.section sut0b
-.section suuaa
-.section suuab
-.section suuba
-.section suubb
-.section suuca
-.section suucb
-.section suuda
-.section suudb
-.section suuea
-.section suueb
-.section suufa
-.section suufb
-.section suuga
-.section suugb
-.section suuha
-.section suuhb
-.section suuia
-.section suuib
-.section suuja
-.section suujb
-.section suuka
-.section suukb
-.section suula
-.section suulb
-.section suuma
-.section suumb
-.section suuna
-.section suunb
-.section suuoa
-.section suuob
-.section suupa
-.section suupb
-.section suuqa
-.section suuqb
-.section suura
-.section suurb
-.section suusa
-.section suusb
-.section suuta
-.section suutb
-.section suuua
-.section suuub
-.section suuva
-.section suuvb
-.section suuwa
-.section suuwb
-.section suuxa
-.section suuxb
-.section suuya
-.section suuyb
-.section suuza
-.section suuzb
-.section suu1a
-.section suu1b
-.section suu2a
-.section suu2b
-.section suu3a
-.section suu3b
-.section suu4a
-.section suu4b
-.section suu5a
-.section suu5b
-.section suu6a
-.section suu6b
-.section suu7a
-.section suu7b
-.section suu8a
-.section suu8b
-.section suu9a
-.section suu9b
-.section suu0a
-.section suu0b
-.section suvaa
-.section suvab
-.section suvba
-.section suvbb
-.section suvca
-.section suvcb
-.section suvda
-.section suvdb
-.section suvea
-.section suveb
-.section suvfa
-.section suvfb
-.section suvga
-.section suvgb
-.section suvha
-.section suvhb
-.section suvia
-.section suvib
-.section suvja
-.section suvjb
-.section suvka
-.section suvkb
-.section suvla
-.section suvlb
-.section suvma
-.section suvmb
-.section suvna
-.section suvnb
-.section suvoa
-.section suvob
-.section suvpa
-.section suvpb
-.section suvqa
-.section suvqb
-.section suvra
-.section suvrb
-.section suvsa
-.section suvsb
-.section suvta
-.section suvtb
-.section suvua
-.section suvub
-.section suvva
-.section suvvb
-.section suvwa
-.section suvwb
-.section suvxa
-.section suvxb
-.section suvya
-.section suvyb
-.section suvza
-.section suvzb
-.section suv1a
-.section suv1b
-.section suv2a
-.section suv2b
-.section suv3a
-.section suv3b
-.section suv4a
-.section suv4b
-.section suv5a
-.section suv5b
-.section suv6a
-.section suv6b
-.section suv7a
-.section suv7b
-.section suv8a
-.section suv8b
-.section suv9a
-.section suv9b
-.section suv0a
-.section suv0b
-.section suwaa
-.section suwab
-.section suwba
-.section suwbb
-.section suwca
-.section suwcb
-.section suwda
-.section suwdb
-.section suwea
-.section suweb
-.section suwfa
-.section suwfb
-.section suwga
-.section suwgb
-.section suwha
-.section suwhb
-.section suwia
-.section suwib
-.section suwja
-.section suwjb
-.section suwka
-.section suwkb
-.section suwla
-.section suwlb
-.section suwma
-.section suwmb
-.section suwna
-.section suwnb
-.section suwoa
-.section suwob
-.section suwpa
-.section suwpb
-.section suwqa
-.section suwqb
-.section suwra
-.section suwrb
-.section suwsa
-.section suwsb
-.section suwta
-.section suwtb
-.section suwua
-.section suwub
-.section suwva
-.section suwvb
-.section suwwa
-.section suwwb
-.section suwxa
-.section suwxb
-.section suwya
-.section suwyb
-.section suwza
-.section suwzb
-.section suw1a
-.section suw1b
-.section suw2a
-.section suw2b
-.section suw3a
-.section suw3b
-.section suw4a
-.section suw4b
-.section suw5a
-.section suw5b
-.section suw6a
-.section suw6b
-.section suw7a
-.section suw7b
-.section suw8a
-.section suw8b
-.section suw9a
-.section suw9b
-.section suw0a
-.section suw0b
-.section suxaa
-.section suxab
-.section suxba
-.section suxbb
-.section suxca
-.section suxcb
-.section suxda
-.section suxdb
-.section suxea
-.section suxeb
-.section suxfa
-.section suxfb
-.section suxga
-.section suxgb
-.section suxha
-.section suxhb
-.section suxia
-.section suxib
-.section suxja
-.section suxjb
-.section suxka
-.section suxkb
-.section suxla
-.section suxlb
-.section suxma
-.section suxmb
-.section suxna
-.section suxnb
-.section suxoa
-.section suxob
-.section suxpa
-.section suxpb
-.section suxqa
-.section suxqb
-.section suxra
-.section suxrb
-.section suxsa
-.section suxsb
-.section suxta
-.section suxtb
-.section suxua
-.section suxub
-.section suxva
-.section suxvb
-.section suxwa
-.section suxwb
-.section suxxa
-.section suxxb
-.section suxya
-.section suxyb
-.section suxza
-.section suxzb
-.section sux1a
-.section sux1b
-.section sux2a
-.section sux2b
-.section sux3a
-.section sux3b
-.section sux4a
-.section sux4b
-.section sux5a
-.section sux5b
-.section sux6a
-.section sux6b
-.section sux7a
-.section sux7b
-.section sux8a
-.section sux8b
-.section sux9a
-.section sux9b
-.section sux0a
-.section sux0b
-.section suyaa
-.section suyab
-.section suyba
-.section suybb
-.section suyca
-.section suycb
-.section suyda
-.section suydb
-.section suyea
-.section suyeb
-.section suyfa
-.section suyfb
-.section suyga
-.section suygb
-.section suyha
-.section suyhb
-.section suyia
-.section suyib
-.section suyja
-.section suyjb
-.section suyka
-.section suykb
-.section suyla
-.section suylb
-.section suyma
-.section suymb
-.section suyna
-.section suynb
-.section suyoa
-.section suyob
-.section suypa
-.section suypb
-.section suyqa
-.section suyqb
-.section suyra
-.section suyrb
-.section suysa
-.section suysb
-.section suyta
-.section suytb
-.section suyua
-.section suyub
-.section suyva
-.section suyvb
-.section suywa
-.section suywb
-.section suyxa
-.section suyxb
-.section suyya
-.section suyyb
-.section suyza
-.section suyzb
-.section suy1a
-.section suy1b
-.section suy2a
-.section suy2b
-.section suy3a
-.section suy3b
-.section suy4a
-.section suy4b
-.section suy5a
-.section suy5b
-.section suy6a
-.section suy6b
-.section suy7a
-.section suy7b
-.section suy8a
-.section suy8b
-.section suy9a
-.section suy9b
-.section suy0a
-.section suy0b
-.section suzaa
-.section suzab
-.section suzba
-.section suzbb
-.section suzca
-.section suzcb
-.section suzda
-.section suzdb
-.section suzea
-.section suzeb
-.section suzfa
-.section suzfb
-.section suzga
-.section suzgb
-.section suzha
-.section suzhb
-.section suzia
-.section suzib
-.section suzja
-.section suzjb
-.section suzka
-.section suzkb
-.section suzla
-.section suzlb
-.section suzma
-.section suzmb
-.section suzna
-.section suznb
-.section suzoa
-.section suzob
-.section suzpa
-.section suzpb
-.section suzqa
-.section suzqb
-.section suzra
-.section suzrb
-.section suzsa
-.section suzsb
-.section suzta
-.section suztb
-.section suzua
-.section suzub
-.section suzva
-.section suzvb
-.section suzwa
-.section suzwb
-.section suzxa
-.section suzxb
-.section suzya
-.section suzyb
-.section suzza
-.section suzzb
-.section suz1a
-.section suz1b
-.section suz2a
-.section suz2b
-.section suz3a
-.section suz3b
-.section suz4a
-.section suz4b
-.section suz5a
-.section suz5b
-.section suz6a
-.section suz6b
-.section suz7a
-.section suz7b
-.section suz8a
-.section suz8b
-.section suz9a
-.section suz9b
-.section suz0a
-.section suz0b
-.section su1aa
-.section su1ab
-.section su1ba
-.section su1bb
-.section su1ca
-.section su1cb
-.section su1da
-.section su1db
-.section su1ea
-.section su1eb
-.section su1fa
-.section su1fb
-.section su1ga
-.section su1gb
-.section su1ha
-.section su1hb
-.section su1ia
-.section su1ib
-.section su1ja
-.section su1jb
-.section su1ka
-.section su1kb
-.section su1la
-.section su1lb
-.section su1ma
-.section su1mb
-.section su1na
-.section su1nb
-.section su1oa
-.section su1ob
-.section su1pa
-.section su1pb
-.section su1qa
-.section su1qb
-.section su1ra
-.section su1rb
-.section su1sa
-.section su1sb
-.section su1ta
-.section su1tb
-.section su1ua
-.section su1ub
-.section su1va
-.section su1vb
-.section su1wa
-.section su1wb
-.section su1xa
-.section su1xb
-.section su1ya
-.section su1yb
-.section su1za
-.section su1zb
-.section su11a
-.section su11b
-.section su12a
-.section su12b
-.section su13a
-.section su13b
-.section su14a
-.section su14b
-.section su15a
-.section su15b
-.section su16a
-.section su16b
-.section su17a
-.section su17b
-.section su18a
-.section su18b
-.section su19a
-.section su19b
-.section su10a
-.section su10b
-.section su2aa
-.section su2ab
-.section su2ba
-.section su2bb
-.section su2ca
-.section su2cb
-.section su2da
-.section su2db
-.section su2ea
-.section su2eb
-.section su2fa
-.section su2fb
-.section su2ga
-.section su2gb
-.section su2ha
-.section su2hb
-.section su2ia
-.section su2ib
-.section su2ja
-.section su2jb
-.section su2ka
-.section su2kb
-.section su2la
-.section su2lb
-.section su2ma
-.section su2mb
-.section su2na
-.section su2nb
-.section su2oa
-.section su2ob
-.section su2pa
-.section su2pb
-.section su2qa
-.section su2qb
-.section su2ra
-.section su2rb
-.section su2sa
-.section su2sb
-.section su2ta
-.section su2tb
-.section su2ua
-.section su2ub
-.section su2va
-.section su2vb
-.section su2wa
-.section su2wb
-.section su2xa
-.section su2xb
-.section su2ya
-.section su2yb
-.section su2za
-.section su2zb
-.section su21a
-.section su21b
-.section su22a
-.section su22b
-.section su23a
-.section su23b
-.section su24a
-.section su24b
-.section su25a
-.section su25b
-.section su26a
-.section su26b
-.section su27a
-.section su27b
-.section su28a
-.section su28b
-.section su29a
-.section su29b
-.section su20a
-.section su20b
-.section su3aa
-.section su3ab
-.section su3ba
-.section su3bb
-.section su3ca
-.section su3cb
-.section su3da
-.section su3db
-.section su3ea
-.section su3eb
-.section su3fa
-.section su3fb
-.section su3ga
-.section su3gb
-.section su3ha
-.section su3hb
-.section su3ia
-.section su3ib
-.section su3ja
-.section su3jb
-.section su3ka
-.section su3kb
-.section su3la
-.section su3lb
-.section su3ma
-.section su3mb
-.section su3na
-.section su3nb
-.section su3oa
-.section su3ob
-.section su3pa
-.section su3pb
-.section su3qa
-.section su3qb
-.section su3ra
-.section su3rb
-.section su3sa
-.section su3sb
-.section su3ta
-.section su3tb
-.section su3ua
-.section su3ub
-.section su3va
-.section su3vb
-.section su3wa
-.section su3wb
-.section su3xa
-.section su3xb
-.section su3ya
-.section su3yb
-.section su3za
-.section su3zb
-.section su31a
-.section su31b
-.section su32a
-.section su32b
-.section su33a
-.section su33b
-.section su34a
-.section su34b
-.section su35a
-.section su35b
-.section su36a
-.section su36b
-.section su37a
-.section su37b
-.section su38a
-.section su38b
-.section su39a
-.section su39b
-.section su30a
-.section su30b
-.section su4aa
-.section su4ab
-.section su4ba
-.section su4bb
-.section su4ca
-.section su4cb
-.section su4da
-.section su4db
-.section su4ea
-.section su4eb
-.section su4fa
-.section su4fb
-.section su4ga
-.section su4gb
-.section su4ha
-.section su4hb
-.section su4ia
-.section su4ib
-.section su4ja
-.section su4jb
-.section su4ka
-.section su4kb
-.section su4la
-.section su4lb
-.section su4ma
-.section su4mb
-.section su4na
-.section su4nb
-.section su4oa
-.section su4ob
-.section su4pa
-.section su4pb
-.section su4qa
-.section su4qb
-.section su4ra
-.section su4rb
-.section su4sa
-.section su4sb
-.section su4ta
-.section su4tb
-.section su4ua
-.section su4ub
-.section su4va
-.section su4vb
-.section su4wa
-.section su4wb
-.section su4xa
-.section su4xb
-.section su4ya
-.section su4yb
-.section su4za
-.section su4zb
-.section su41a
-.section su41b
-.section su42a
-.section su42b
-.section su43a
-.section su43b
-.section su44a
-.section su44b
-.section su45a
-.section su45b
-.section su46a
-.section su46b
-.section su47a
-.section su47b
-.section su48a
-.section su48b
-.section su49a
-.section su49b
-.section su40a
-.section su40b
-.section su5aa
-.section su5ab
-.section su5ba
-.section su5bb
-.section su5ca
-.section su5cb
-.section su5da
-.section su5db
-.section su5ea
-.section su5eb
-.section su5fa
-.section su5fb
-.section su5ga
-.section su5gb
-.section su5ha
-.section su5hb
-.section su5ia
-.section su5ib
-.section su5ja
-.section su5jb
-.section su5ka
-.section su5kb
-.section su5la
-.section su5lb
-.section su5ma
-.section su5mb
-.section su5na
-.section su5nb
-.section su5oa
-.section su5ob
-.section su5pa
-.section su5pb
-.section su5qa
-.section su5qb
-.section su5ra
-.section su5rb
-.section su5sa
-.section su5sb
-.section su5ta
-.section su5tb
-.section su5ua
-.section su5ub
-.section su5va
-.section su5vb
-.section su5wa
-.section su5wb
-.section su5xa
-.section su5xb
-.section su5ya
-.section su5yb
-.section su5za
-.section su5zb
-.section su51a
-.section su51b
-.section su52a
-.section su52b
-.section su53a
-.section su53b
-.section su54a
-.section su54b
-.section su55a
-.section su55b
-.section su56a
-.section su56b
-.section su57a
-.section su57b
-.section su58a
-.section su58b
-.section su59a
-.section su59b
-.section su50a
-.section su50b
-.section su6aa
-.section su6ab
-.section su6ba
-.section su6bb
-.section su6ca
-.section su6cb
-.section su6da
-.section su6db
-.section su6ea
-.section su6eb
-.section su6fa
-.section su6fb
-.section su6ga
-.section su6gb
-.section su6ha
-.section su6hb
-.section su6ia
-.section su6ib
-.section su6ja
-.section su6jb
-.section su6ka
-.section su6kb
-.section su6la
-.section su6lb
-.section su6ma
-.section su6mb
-.section su6na
-.section su6nb
-.section su6oa
-.section su6ob
-.section su6pa
-.section su6pb
-.section su6qa
-.section su6qb
-.section su6ra
-.section su6rb
-.section su6sa
-.section su6sb
-.section su6ta
-.section su6tb
-.section su6ua
-.section su6ub
-.section su6va
-.section su6vb
-.section su6wa
-.section su6wb
-.section su6xa
-.section su6xb
-.section su6ya
-.section su6yb
-.section su6za
-.section su6zb
-.section su61a
-.section su61b
-.section su62a
-.section su62b
-.section su63a
-.section su63b
-.section su64a
-.section su64b
-.section su65a
-.section su65b
-.section su66a
-.section su66b
-.section su67a
-.section su67b
-.section su68a
-.section su68b
-.section su69a
-.section su69b
-.section su60a
-.section su60b
-.section su7aa
-.section su7ab
-.section su7ba
-.section su7bb
-.section su7ca
-.section su7cb
-.section su7da
-.section su7db
-.section su7ea
-.section su7eb
-.section su7fa
-.section su7fb
-.section su7ga
-.section su7gb
-.section su7ha
-.section su7hb
-.section su7ia
-.section su7ib
-.section su7ja
-.section su7jb
-.section su7ka
-.section su7kb
-.section su7la
-.section su7lb
-.section su7ma
-.section su7mb
-.section su7na
-.section su7nb
-.section su7oa
-.section su7ob
-.section su7pa
-.section su7pb
-.section su7qa
-.section su7qb
-.section su7ra
-.section su7rb
-.section su7sa
-.section su7sb
-.section su7ta
-.section su7tb
-.section su7ua
-.section su7ub
-.section su7va
-.section su7vb
-.section su7wa
-.section su7wb
-.section su7xa
-.section su7xb
-.section su7ya
-.section su7yb
-.section su7za
-.section su7zb
-.section su71a
-.section su71b
-.section su72a
-.section su72b
-.section su73a
-.section su73b
-.section su74a
-.section su74b
-.section su75a
-.section su75b
-.section su76a
-.section su76b
-.section su77a
-.section su77b
-.section su78a
-.section su78b
-.section su79a
-.section su79b
-.section su70a
-.section su70b
-.section su8aa
-.section su8ab
-.section su8ba
-.section su8bb
-.section su8ca
-.section su8cb
-.section su8da
-.section su8db
-.section su8ea
-.section su8eb
-.section su8fa
-.section su8fb
-.section su8ga
-.section su8gb
-.section su8ha
-.section su8hb
-.section su8ia
-.section su8ib
-.section su8ja
-.section su8jb
-.section su8ka
-.section su8kb
-.section su8la
-.section su8lb
-.section su8ma
-.section su8mb
-.section su8na
-.section su8nb
-.section su8oa
-.section su8ob
-.section su8pa
-.section su8pb
-.section su8qa
-.section su8qb
-.section su8ra
-.section su8rb
-.section su8sa
-.section su8sb
-.section su8ta
-.section su8tb
-.section su8ua
-.section su8ub
-.section su8va
-.section su8vb
-.section su8wa
-.section su8wb
-.section su8xa
-.section su8xb
-.section su8ya
-.section su8yb
-.section su8za
-.section su8zb
-.section su81a
-.section su81b
-.section su82a
-.section su82b
-.section su83a
-.section su83b
-.section su84a
-.section su84b
-.section su85a
-.section su85b
-.section su86a
-.section su86b
-.section su87a
-.section su87b
-.section su88a
-.section su88b
-.section su89a
-.section su89b
-.section su80a
-.section su80b
-.section su9aa
-.section su9ab
-.section su9ba
-.section su9bb
-.section su9ca
-.section su9cb
-.section su9da
-.section su9db
-.section su9ea
-.section su9eb
-.section su9fa
-.section su9fb
-.section su9ga
-.section su9gb
-.section su9ha
-.section su9hb
-.section su9ia
-.section su9ib
-.section su9ja
-.section su9jb
-.section su9ka
-.section su9kb
-.section su9la
-.section su9lb
-.section su9ma
-.section su9mb
-.section su9na
-.section su9nb
-.section su9oa
-.section su9ob
-.section su9pa
-.section su9pb
-.section su9qa
-.section su9qb
-.section su9ra
-.section su9rb
-.section su9sa
-.section su9sb
-.section su9ta
-.section su9tb
-.section su9ua
-.section su9ub
-.section su9va
-.section su9vb
-.section su9wa
-.section su9wb
-.section su9xa
-.section su9xb
-.section su9ya
-.section su9yb
-.section su9za
-.section su9zb
-.section su91a
-.section su91b
-.section su92a
-.section su92b
-.section su93a
-.section su93b
-.section su94a
-.section su94b
-.section su95a
-.section su95b
-.section su96a
-.section su96b
-.section su97a
-.section su97b
-.section su98a
-.section su98b
-.section su99a
-.section su99b
-.section su90a
-.section su90b
-.section su0aa
-.section su0ab
-.section su0ba
-.section su0bb
-.section su0ca
-.section su0cb
-.section su0da
-.section su0db
-.section su0ea
-.section su0eb
-.section su0fa
-.section su0fb
-.section su0ga
-.section su0gb
-.section su0ha
-.section su0hb
-.section su0ia
-.section su0ib
-.section su0ja
-.section su0jb
-.section su0ka
-.section su0kb
-.section su0la
-.section su0lb
-.section su0ma
-.section su0mb
-.section su0na
-.section su0nb
-.section su0oa
-.section su0ob
-.section su0pa
-.section su0pb
-.section su0qa
-.section su0qb
-.section su0ra
-.section su0rb
-.section su0sa
-.section su0sb
-.section su0ta
-.section su0tb
-.section su0ua
-.section su0ub
-.section su0va
-.section su0vb
-.section su0wa
-.section su0wb
-.section su0xa
-.section su0xb
-.section su0ya
-.section su0yb
-.section su0za
-.section su0zb
-.section su01a
-.section su01b
-.section su02a
-.section su02b
-.section su03a
-.section su03b
-.section su04a
-.section su04b
-.section su05a
-.section su05b
-.section su06a
-.section su06b
-.section su07a
-.section su07b
-.section su08a
-.section su08b
-.section su09a
-.section su09b
-.section su00a
-.section su00b
-.section svaaa
-.section svaab
-.section svaba
-.section svabb
-.section svaca
-.section svacb
-.section svada
-.section svadb
-.section svaea
-.section svaeb
-.section svafa
-.section svafb
-.section svaga
-.section svagb
-.section svaha
-.section svahb
-.section svaia
-.section svaib
-.section svaja
-.section svajb
-.section svaka
-.section svakb
-.section svala
-.section svalb
-.section svama
-.section svamb
-.section svana
-.section svanb
-.section svaoa
-.section svaob
-.section svapa
-.section svapb
-.section svaqa
-.section svaqb
-.section svara
-.section svarb
-.section svasa
-.section svasb
-.section svata
-.section svatb
-.section svaua
-.section svaub
-.section svava
-.section svavb
-.section svawa
-.section svawb
-.section svaxa
-.section svaxb
-.section svaya
-.section svayb
-.section svaza
-.section svazb
-.section sva1a
-.section sva1b
-.section sva2a
-.section sva2b
-.section sva3a
-.section sva3b
-.section sva4a
-.section sva4b
-.section sva5a
-.section sva5b
-.section sva6a
-.section sva6b
-.section sva7a
-.section sva7b
-.section sva8a
-.section sva8b
-.section sva9a
-.section sva9b
-.section sva0a
-.section sva0b
-.section svbaa
-.section svbab
-.section svbba
-.section svbbb
-.section svbca
-.section svbcb
-.section svbda
-.section svbdb
-.section svbea
-.section svbeb
-.section svbfa
-.section svbfb
-.section svbga
-.section svbgb
-.section svbha
-.section svbhb
-.section svbia
-.section svbib
-.section svbja
-.section svbjb
-.section svbka
-.section svbkb
-.section svbla
-.section svblb
-.section svbma
-.section svbmb
-.section svbna
-.section svbnb
-.section svboa
-.section svbob
-.section svbpa
-.section svbpb
-.section svbqa
-.section svbqb
-.section svbra
-.section svbrb
-.section svbsa
-.section svbsb
-.section svbta
-.section svbtb
-.section svbua
-.section svbub
-.section svbva
-.section svbvb
-.section svbwa
-.section svbwb
-.section svbxa
-.section svbxb
-.section svbya
-.section svbyb
-.section svbza
-.section svbzb
-.section svb1a
-.section svb1b
-.section svb2a
-.section svb2b
-.section svb3a
-.section svb3b
-.section svb4a
-.section svb4b
-.section svb5a
-.section svb5b
-.section svb6a
-.section svb6b
-.section svb7a
-.section svb7b
-.section svb8a
-.section svb8b
-.section svb9a
-.section svb9b
-.section svb0a
-.section svb0b
-.section svcaa
-.section svcab
-.section svcba
-.section svcbb
-.section svcca
-.section svccb
-.section svcda
-.section svcdb
-.section svcea
-.section svceb
-.section svcfa
-.section svcfb
-.section svcga
-.section svcgb
-.section svcha
-.section svchb
-.section svcia
-.section svcib
-.section svcja
-.section svcjb
-.section svcka
-.section svckb
-.section svcla
-.section svclb
-.section svcma
-.section svcmb
-.section svcna
-.section svcnb
-.section svcoa
-.section svcob
-.section svcpa
-.section svcpb
-.section svcqa
-.section svcqb
-.section svcra
-.section svcrb
-.section svcsa
-.section svcsb
-.section svcta
-.section svctb
-.section svcua
-.section svcub
-.section svcva
-.section svcvb
-.section svcwa
-.section svcwb
-.section svcxa
-.section svcxb
-.section svcya
-.section svcyb
-.section svcza
-.section svczb
-.section svc1a
-.section svc1b
-.section svc2a
-.section svc2b
-.section svc3a
-.section svc3b
-.section svc4a
-.section svc4b
-.section svc5a
-.section svc5b
-.section svc6a
-.section svc6b
-.section svc7a
-.section svc7b
-.section svc8a
-.section svc8b
-.section svc9a
-.section svc9b
-.section svc0a
-.section svc0b
-.section svdaa
-.section svdab
-.section svdba
-.section svdbb
-.section svdca
-.section svdcb
-.section svdda
-.section svddb
-.section svdea
-.section svdeb
-.section svdfa
-.section svdfb
-.section svdga
-.section svdgb
-.section svdha
-.section svdhb
-.section svdia
-.section svdib
-.section svdja
-.section svdjb
-.section svdka
-.section svdkb
-.section svdla
-.section svdlb
-.section svdma
-.section svdmb
-.section svdna
-.section svdnb
-.section svdoa
-.section svdob
-.section svdpa
-.section svdpb
-.section svdqa
-.section svdqb
-.section svdra
-.section svdrb
-.section svdsa
-.section svdsb
-.section svdta
-.section svdtb
-.section svdua
-.section svdub
-.section svdva
-.section svdvb
-.section svdwa
-.section svdwb
-.section svdxa
-.section svdxb
-.section svdya
-.section svdyb
-.section svdza
-.section svdzb
-.section svd1a
-.section svd1b
-.section svd2a
-.section svd2b
-.section svd3a
-.section svd3b
-.section svd4a
-.section svd4b
-.section svd5a
-.section svd5b
-.section svd6a
-.section svd6b
-.section svd7a
-.section svd7b
-.section svd8a
-.section svd8b
-.section svd9a
-.section svd9b
-.section svd0a
-.section svd0b
-.section sveaa
-.section sveab
-.section sveba
-.section svebb
-.section sveca
-.section svecb
-.section sveda
-.section svedb
-.section sveea
-.section sveeb
-.section svefa
-.section svefb
-.section svega
-.section svegb
-.section sveha
-.section svehb
-.section sveia
-.section sveib
-.section sveja
-.section svejb
-.section sveka
-.section svekb
-.section svela
-.section svelb
-.section svema
-.section svemb
-.section svena
-.section svenb
-.section sveoa
-.section sveob
-.section svepa
-.section svepb
-.section sveqa
-.section sveqb
-.section svera
-.section sverb
-.section svesa
-.section svesb
-.section sveta
-.section svetb
-.section sveua
-.section sveub
-.section sveva
-.section svevb
-.section svewa
-.section svewb
-.section svexa
-.section svexb
-.section sveya
-.section sveyb
-.section sveza
-.section svezb
-.section sve1a
-.section sve1b
-.section sve2a
-.section sve2b
-.section sve3a
-.section sve3b
-.section sve4a
-.section sve4b
-.section sve5a
-.section sve5b
-.section sve6a
-.section sve6b
-.section sve7a
-.section sve7b
-.section sve8a
-.section sve8b
-.section sve9a
-.section sve9b
-.section sve0a
-.section sve0b
-.section svfaa
-.section svfab
-.section svfba
-.section svfbb
-.section svfca
-.section svfcb
-.section svfda
-.section svfdb
-.section svfea
-.section svfeb
-.section svffa
-.section svffb
-.section svfga
-.section svfgb
-.section svfha
-.section svfhb
-.section svfia
-.section svfib
-.section svfja
-.section svfjb
-.section svfka
-.section svfkb
-.section svfla
-.section svflb
-.section svfma
-.section svfmb
-.section svfna
-.section svfnb
-.section svfoa
-.section svfob
-.section svfpa
-.section svfpb
-.section svfqa
-.section svfqb
-.section svfra
-.section svfrb
-.section svfsa
-.section svfsb
-.section svfta
-.section svftb
-.section svfua
-.section svfub
-.section svfva
-.section svfvb
-.section svfwa
-.section svfwb
-.section svfxa
-.section svfxb
-.section svfya
-.section svfyb
-.section svfza
-.section svfzb
-.section svf1a
-.section svf1b
-.section svf2a
-.section svf2b
-.section svf3a
-.section svf3b
-.section svf4a
-.section svf4b
-.section svf5a
-.section svf5b
-.section svf6a
-.section svf6b
-.section svf7a
-.section svf7b
-.section svf8a
-.section svf8b
-.section svf9a
-.section svf9b
-.section svf0a
-.section svf0b
-.section svgaa
-.section svgab
-.section svgba
-.section svgbb
-.section svgca
-.section svgcb
-.section svgda
-.section svgdb
-.section svgea
-.section svgeb
-.section svgfa
-.section svgfb
-.section svgga
-.section svggb
-.section svgha
-.section svghb
-.section svgia
-.section svgib
-.section svgja
-.section svgjb
-.section svgka
-.section svgkb
-.section svgla
-.section svglb
-.section svgma
-.section svgmb
-.section svgna
-.section svgnb
-.section svgoa
-.section svgob
-.section svgpa
-.section svgpb
-.section svgqa
-.section svgqb
-.section svgra
-.section svgrb
-.section svgsa
-.section svgsb
-.section svgta
-.section svgtb
-.section svgua
-.section svgub
-.section svgva
-.section svgvb
-.section svgwa
-.section svgwb
-.section svgxa
-.section svgxb
-.section svgya
-.section svgyb
-.section svgza
-.section svgzb
-.section svg1a
-.section svg1b
-.section svg2a
-.section svg2b
-.section svg3a
-.section svg3b
-.section svg4a
-.section svg4b
-.section svg5a
-.section svg5b
-.section svg6a
-.section svg6b
-.section svg7a
-.section svg7b
-.section svg8a
-.section svg8b
-.section svg9a
-.section svg9b
-.section svg0a
-.section svg0b
-.section svhaa
-.section svhab
-.section svhba
-.section svhbb
-.section svhca
-.section svhcb
-.section svhda
-.section svhdb
-.section svhea
-.section svheb
-.section svhfa
-.section svhfb
-.section svhga
-.section svhgb
-.section svhha
-.section svhhb
-.section svhia
-.section svhib
-.section svhja
-.section svhjb
-.section svhka
-.section svhkb
-.section svhla
-.section svhlb
-.section svhma
-.section svhmb
-.section svhna
-.section svhnb
-.section svhoa
-.section svhob
-.section svhpa
-.section svhpb
-.section svhqa
-.section svhqb
-.section svhra
-.section svhrb
-.section svhsa
-.section svhsb
-.section svhta
-.section svhtb
-.section svhua
-.section svhub
-.section svhva
-.section svhvb
-.section svhwa
-.section svhwb
-.section svhxa
-.section svhxb
-.section svhya
-.section svhyb
-.section svhza
-.section svhzb
-.section svh1a
-.section svh1b
-.section svh2a
-.section svh2b
-.section svh3a
-.section svh3b
-.section svh4a
-.section svh4b
-.section svh5a
-.section svh5b
-.section svh6a
-.section svh6b
-.section svh7a
-.section svh7b
-.section svh8a
-.section svh8b
-.section svh9a
-.section svh9b
-.section svh0a
-.section svh0b
-.section sviaa
-.section sviab
-.section sviba
-.section svibb
-.section svica
-.section svicb
-.section svida
-.section svidb
-.section sviea
-.section svieb
-.section svifa
-.section svifb
-.section sviga
-.section svigb
-.section sviha
-.section svihb
-.section sviia
-.section sviib
-.section svija
-.section svijb
-.section svika
-.section svikb
-.section svila
-.section svilb
-.section svima
-.section svimb
-.section svina
-.section svinb
-.section svioa
-.section sviob
-.section svipa
-.section svipb
-.section sviqa
-.section sviqb
-.section svira
-.section svirb
-.section svisa
-.section svisb
-.section svita
-.section svitb
-.section sviua
-.section sviub
-.section sviva
-.section svivb
-.section sviwa
-.section sviwb
-.section svixa
-.section svixb
-.section sviya
-.section sviyb
-.section sviza
-.section svizb
-.section svi1a
-.section svi1b
-.section svi2a
-.section svi2b
-.section svi3a
-.section svi3b
-.section svi4a
-.section svi4b
-.section svi5a
-.section svi5b
-.section svi6a
-.section svi6b
-.section svi7a
-.section svi7b
-.section svi8a
-.section svi8b
-.section svi9a
-.section svi9b
-.section svi0a
-.section svi0b
-.section svjaa
-.section svjab
-.section svjba
-.section svjbb
-.section svjca
-.section svjcb
-.section svjda
-.section svjdb
-.section svjea
-.section svjeb
-.section svjfa
-.section svjfb
-.section svjga
-.section svjgb
-.section svjha
-.section svjhb
-.section svjia
-.section svjib
-.section svjja
-.section svjjb
-.section svjka
-.section svjkb
-.section svjla
-.section svjlb
-.section svjma
-.section svjmb
-.section svjna
-.section svjnb
-.section svjoa
-.section svjob
-.section svjpa
-.section svjpb
-.section svjqa
-.section svjqb
-.section svjra
-.section svjrb
-.section svjsa
-.section svjsb
-.section svjta
-.section svjtb
-.section svjua
-.section svjub
-.section svjva
-.section svjvb
-.section svjwa
-.section svjwb
-.section svjxa
-.section svjxb
-.section svjya
-.section svjyb
-.section svjza
-.section svjzb
-.section svj1a
-.section svj1b
-.section svj2a
-.section svj2b
-.section svj3a
-.section svj3b
-.section svj4a
-.section svj4b
-.section svj5a
-.section svj5b
-.section svj6a
-.section svj6b
-.section svj7a
-.section svj7b
-.section svj8a
-.section svj8b
-.section svj9a
-.section svj9b
-.section svj0a
-.section svj0b
-.section svkaa
-.section svkab
-.section svkba
-.section svkbb
-.section svkca
-.section svkcb
-.section svkda
-.section svkdb
-.section svkea
-.section svkeb
-.section svkfa
-.section svkfb
-.section svkga
-.section svkgb
-.section svkha
-.section svkhb
-.section svkia
-.section svkib
-.section svkja
-.section svkjb
-.section svkka
-.section svkkb
-.section svkla
-.section svklb
-.section svkma
-.section svkmb
-.section svkna
-.section svknb
-.section svkoa
-.section svkob
-.section svkpa
-.section svkpb
-.section svkqa
-.section svkqb
-.section svkra
-.section svkrb
-.section svksa
-.section svksb
-.section svkta
-.section svktb
-.section svkua
-.section svkub
-.section svkva
-.section svkvb
-.section svkwa
-.section svkwb
-.section svkxa
-.section svkxb
-.section svkya
-.section svkyb
-.section svkza
-.section svkzb
-.section svk1a
-.section svk1b
-.section svk2a
-.section svk2b
-.section svk3a
-.section svk3b
-.section svk4a
-.section svk4b
-.section svk5a
-.section svk5b
-.section svk6a
-.section svk6b
-.section svk7a
-.section svk7b
-.section svk8a
-.section svk8b
-.section svk9a
-.section svk9b
-.section svk0a
-.section svk0b
-.section svlaa
-.section svlab
-.section svlba
-.section svlbb
-.section svlca
-.section svlcb
-.section svlda
-.section svldb
-.section svlea
-.section svleb
-.section svlfa
-.section svlfb
-.section svlga
-.section svlgb
-.section svlha
-.section svlhb
-.section svlia
-.section svlib
-.section svlja
-.section svljb
-.section svlka
-.section svlkb
-.section svlla
-.section svllb
-.section svlma
-.section svlmb
-.section svlna
-.section svlnb
-.section svloa
-.section svlob
-.section svlpa
-.section svlpb
-.section svlqa
-.section svlqb
-.section svlra
-.section svlrb
-.section svlsa
-.section svlsb
-.section svlta
-.section svltb
-.section svlua
-.section svlub
-.section svlva
-.section svlvb
-.section svlwa
-.section svlwb
-.section svlxa
-.section svlxb
-.section svlya
-.section svlyb
-.section svlza
-.section svlzb
-.section svl1a
-.section svl1b
-.section svl2a
-.section svl2b
-.section svl3a
-.section svl3b
-.section svl4a
-.section svl4b
-.section svl5a
-.section svl5b
-.section svl6a
-.section svl6b
-.section svl7a
-.section svl7b
-.section svl8a
-.section svl8b
-.section svl9a
-.section svl9b
-.section svl0a
-.section svl0b
-.section svmaa
-.section svmab
-.section svmba
-.section svmbb
-.section svmca
-.section svmcb
-.section svmda
-.section svmdb
-.section svmea
-.section svmeb
-.section svmfa
-.section svmfb
-.section svmga
-.section svmgb
-.section svmha
-.section svmhb
-.section svmia
-.section svmib
-.section svmja
-.section svmjb
-.section svmka
-.section svmkb
-.section svmla
-.section svmlb
-.section svmma
-.section svmmb
-.section svmna
-.section svmnb
-.section svmoa
-.section svmob
-.section svmpa
-.section svmpb
-.section svmqa
-.section svmqb
-.section svmra
-.section svmrb
-.section svmsa
-.section svmsb
-.section svmta
-.section svmtb
-.section svmua
-.section svmub
-.section svmva
-.section svmvb
-.section svmwa
-.section svmwb
-.section svmxa
-.section svmxb
-.section svmya
-.section svmyb
-.section svmza
-.section svmzb
-.section svm1a
-.section svm1b
-.section svm2a
-.section svm2b
-.section svm3a
-.section svm3b
-.section svm4a
-.section svm4b
-.section svm5a
-.section svm5b
-.section svm6a
-.section svm6b
-.section svm7a
-.section svm7b
-.section svm8a
-.section svm8b
-.section svm9a
-.section svm9b
-.section svm0a
-.section svm0b
-.section svnaa
-.section svnab
-.section svnba
-.section svnbb
-.section svnca
-.section svncb
-.section svnda
-.section svndb
-.section svnea
-.section svneb
-.section svnfa
-.section svnfb
-.section svnga
-.section svngb
-.section svnha
-.section svnhb
-.section svnia
-.section svnib
-.section svnja
-.section svnjb
-.section svnka
-.section svnkb
-.section svnla
-.section svnlb
-.section svnma
-.section svnmb
-.section svnna
-.section svnnb
-.section svnoa
-.section svnob
-.section svnpa
-.section svnpb
-.section svnqa
-.section svnqb
-.section svnra
-.section svnrb
-.section svnsa
-.section svnsb
-.section svnta
-.section svntb
-.section svnua
-.section svnub
-.section svnva
-.section svnvb
-.section svnwa
-.section svnwb
-.section svnxa
-.section svnxb
-.section svnya
-.section svnyb
-.section svnza
-.section svnzb
-.section svn1a
-.section svn1b
-.section svn2a
-.section svn2b
-.section svn3a
-.section svn3b
-.section svn4a
-.section svn4b
-.section svn5a
-.section svn5b
-.section svn6a
-.section svn6b
-.section svn7a
-.section svn7b
-.section svn8a
-.section svn8b
-.section svn9a
-.section svn9b
-.section svn0a
-.section svn0b
-.section svoaa
-.section svoab
-.section svoba
-.section svobb
-.section svoca
-.section svocb
-.section svoda
-.section svodb
-.section svoea
-.section svoeb
-.section svofa
-.section svofb
-.section svoga
-.section svogb
-.section svoha
-.section svohb
-.section svoia
-.section svoib
-.section svoja
-.section svojb
-.section svoka
-.section svokb
-.section svola
-.section svolb
-.section svoma
-.section svomb
-.section svona
-.section svonb
-.section svooa
-.section svoob
-.section svopa
-.section svopb
-.section svoqa
-.section svoqb
-.section svora
-.section svorb
-.section svosa
-.section svosb
-.section svota
-.section svotb
-.section svoua
-.section svoub
-.section svova
-.section svovb
-.section svowa
-.section svowb
-.section svoxa
-.section svoxb
-.section svoya
-.section svoyb
-.section svoza
-.section svozb
-.section svo1a
-.section svo1b
-.section svo2a
-.section svo2b
-.section svo3a
-.section svo3b
-.section svo4a
-.section svo4b
-.section svo5a
-.section svo5b
-.section svo6a
-.section svo6b
-.section svo7a
-.section svo7b
-.section svo8a
-.section svo8b
-.section svo9a
-.section svo9b
-.section svo0a
-.section svo0b
-.section svpaa
-.section svpab
-.section svpba
-.section svpbb
-.section svpca
-.section svpcb
-.section svpda
-.section svpdb
-.section svpea
-.section svpeb
-.section svpfa
-.section svpfb
-.section svpga
-.section svpgb
-.section svpha
-.section svphb
-.section svpia
-.section svpib
-.section svpja
-.section svpjb
-.section svpka
-.section svpkb
-.section svpla
-.section svplb
-.section svpma
-.section svpmb
-.section svpna
-.section svpnb
-.section svpoa
-.section svpob
-.section svppa
-.section svppb
-.section svpqa
-.section svpqb
-.section svpra
-.section svprb
-.section svpsa
-.section svpsb
-.section svpta
-.section svptb
-.section svpua
-.section svpub
-.section svpva
-.section svpvb
-.section svpwa
-.section svpwb
-.section svpxa
-.section svpxb
-.section svpya
-.section svpyb
-.section svpza
-.section svpzb
-.section svp1a
-.section svp1b
-.section svp2a
-.section svp2b
-.section svp3a
-.section svp3b
-.section svp4a
-.section svp4b
-.section svp5a
-.section svp5b
-.section svp6a
-.section svp6b
-.section svp7a
-.section svp7b
-.section svp8a
-.section svp8b
-.section svp9a
-.section svp9b
-.section svp0a
-.section svp0b
-.section svqaa
-.section svqab
-.section svqba
-.section svqbb
-.section svqca
-.section svqcb
-.section svqda
-.section svqdb
-.section svqea
-.section svqeb
-.section svqfa
-.section svqfb
-.section svqga
-.section svqgb
-.section svqha
-.section svqhb
-.section svqia
-.section svqib
-.section svqja
-.section svqjb
-.section svqka
-.section svqkb
-.section svqla
-.section svqlb
-.section svqma
-.section svqmb
-.section svqna
-.section svqnb
-.section svqoa
-.section svqob
-.section svqpa
-.section svqpb
-.section svqqa
-.section svqqb
-.section svqra
-.section svqrb
-.section svqsa
-.section svqsb
-.section svqta
-.section svqtb
-.section svqua
-.section svqub
-.section svqva
-.section svqvb
-.section svqwa
-.section svqwb
-.section svqxa
-.section svqxb
-.section svqya
-.section svqyb
-.section svqza
-.section svqzb
-.section svq1a
-.section svq1b
-.section svq2a
-.section svq2b
-.section svq3a
-.section svq3b
-.section svq4a
-.section svq4b
-.section svq5a
-.section svq5b
-.section svq6a
-.section svq6b
-.section svq7a
-.section svq7b
-.section svq8a
-.section svq8b
-.section svq9a
-.section svq9b
-.section svq0a
-.section svq0b
-.section svraa
-.section svrab
-.section svrba
-.section svrbb
-.section svrca
-.section svrcb
-.section svrda
-.section svrdb
-.section svrea
-.section svreb
-.section svrfa
-.section svrfb
-.section svrga
-.section svrgb
-.section svrha
-.section svrhb
-.section svria
-.section svrib
-.section svrja
-.section svrjb
-.section svrka
-.section svrkb
-.section svrla
-.section svrlb
-.section svrma
-.section svrmb
-.section svrna
-.section svrnb
-.section svroa
-.section svrob
-.section svrpa
-.section svrpb
-.section svrqa
-.section svrqb
-.section svrra
-.section svrrb
-.section svrsa
-.section svrsb
-.section svrta
-.section svrtb
-.section svrua
-.section svrub
-.section svrva
-.section svrvb
-.section svrwa
-.section svrwb
-.section svrxa
-.section svrxb
-.section svrya
-.section svryb
-.section svrza
-.section svrzb
-.section svr1a
-.section svr1b
-.section svr2a
-.section svr2b
-.section svr3a
-.section svr3b
-.section svr4a
-.section svr4b
-.section svr5a
-.section svr5b
-.section svr6a
-.section svr6b
-.section svr7a
-.section svr7b
-.section svr8a
-.section svr8b
-.section svr9a
-.section svr9b
-.section svr0a
-.section svr0b
-.section svsaa
-.section svsab
-.section svsba
-.section svsbb
-.section svsca
-.section svscb
-.section svsda
-.section svsdb
-.section svsea
-.section svseb
-.section svsfa
-.section svsfb
-.section svsga
-.section svsgb
-.section svsha
-.section svshb
-.section svsia
-.section svsib
-.section svsja
-.section svsjb
-.section svska
-.section svskb
-.section svsla
-.section svslb
-.section svsma
-.section svsmb
-.section svsna
-.section svsnb
-.section svsoa
-.section svsob
-.section svspa
-.section svspb
-.section svsqa
-.section svsqb
-.section svsra
-.section svsrb
-.section svssa
-.section svssb
-.section svsta
-.section svstb
-.section svsua
-.section svsub
-.section svsva
-.section svsvb
-.section svswa
-.section svswb
-.section svsxa
-.section svsxb
-.section svsya
-.section svsyb
-.section svsza
-.section svszb
-.section svs1a
-.section svs1b
-.section svs2a
-.section svs2b
-.section svs3a
-.section svs3b
-.section svs4a
-.section svs4b
-.section svs5a
-.section svs5b
-.section svs6a
-.section svs6b
-.section svs7a
-.section svs7b
-.section svs8a
-.section svs8b
-.section svs9a
-.section svs9b
-.section svs0a
-.section svs0b
-.section svtaa
-.section svtab
-.section svtba
-.section svtbb
-.section svtca
-.section svtcb
-.section svtda
-.section svtdb
-.section svtea
-.section svteb
-.section svtfa
-.section svtfb
-.section svtga
-.section svtgb
-.section svtha
-.section svthb
-.section svtia
-.section svtib
-.section svtja
-.section svtjb
-.section svtka
-.section svtkb
-.section svtla
-.section svtlb
-.section svtma
-.section svtmb
-.section svtna
-.section svtnb
-.section svtoa
-.section svtob
-.section svtpa
-.section svtpb
-.section svtqa
-.section svtqb
-.section svtra
-.section svtrb
-.section svtsa
-.section svtsb
-.section svtta
-.section svttb
-.section svtua
-.section svtub
-.section svtva
-.section svtvb
-.section svtwa
-.section svtwb
-.section svtxa
-.section svtxb
-.section svtya
-.section svtyb
-.section svtza
-.section svtzb
-.section svt1a
-.section svt1b
-.section svt2a
-.section svt2b
-.section svt3a
-.section svt3b
-.section svt4a
-.section svt4b
-.section svt5a
-.section svt5b
-.section svt6a
-.section svt6b
-.section svt7a
-.section svt7b
-.section svt8a
-.section svt8b
-.section svt9a
-.section svt9b
-.section svt0a
-.section svt0b
-.section svuaa
-.section svuab
-.section svuba
-.section svubb
-.section svuca
-.section svucb
-.section svuda
-.section svudb
-.section svuea
-.section svueb
-.section svufa
-.section svufb
-.section svuga
-.section svugb
-.section svuha
-.section svuhb
-.section svuia
-.section svuib
-.section svuja
-.section svujb
-.section svuka
-.section svukb
-.section svula
-.section svulb
-.section svuma
-.section svumb
-.section svuna
-.section svunb
-.section svuoa
-.section svuob
-.section svupa
-.section svupb
-.section svuqa
-.section svuqb
-.section svura
-.section svurb
-.section svusa
-.section svusb
-.section svuta
-.section svutb
-.section svuua
-.section svuub
-.section svuva
-.section svuvb
-.section svuwa
-.section svuwb
-.section svuxa
-.section svuxb
-.section svuya
-.section svuyb
-.section svuza
-.section svuzb
-.section svu1a
-.section svu1b
-.section svu2a
-.section svu2b
-.section svu3a
-.section svu3b
-.section svu4a
-.section svu4b
-.section svu5a
-.section svu5b
-.section svu6a
-.section svu6b
-.section svu7a
-.section svu7b
-.section svu8a
-.section svu8b
-.section svu9a
-.section svu9b
-.section svu0a
-.section svu0b
-.section svvaa
-.section svvab
-.section svvba
-.section svvbb
-.section svvca
-.section svvcb
-.section svvda
-.section svvdb
-.section svvea
-.section svveb
-.section svvfa
-.section svvfb
-.section svvga
-.section svvgb
-.section svvha
-.section svvhb
-.section svvia
-.section svvib
-.section svvja
-.section svvjb
-.section svvka
-.section svvkb
-.section svvla
-.section svvlb
-.section svvma
-.section svvmb
-.section svvna
-.section svvnb
-.section svvoa
-.section svvob
-.section svvpa
-.section svvpb
-.section svvqa
-.section svvqb
-.section svvra
-.section svvrb
-.section svvsa
-.section svvsb
-.section svvta
-.section svvtb
-.section svvua
-.section svvub
-.section svvva
-.section svvvb
-.section svvwa
-.section svvwb
-.section svvxa
-.section svvxb
-.section svvya
-.section svvyb
-.section svvza
-.section svvzb
-.section svv1a
-.section svv1b
-.section svv2a
-.section svv2b
-.section svv3a
-.section svv3b
-.section svv4a
-.section svv4b
-.section svv5a
-.section svv5b
-.section svv6a
-.section svv6b
-.section svv7a
-.section svv7b
-.section svv8a
-.section svv8b
-.section svv9a
-.section svv9b
-.section svv0a
-.section svv0b
-.section svwaa
-.section svwab
-.section svwba
-.section svwbb
-.section svwca
-.section svwcb
-.section svwda
-.section svwdb
-.section svwea
-.section svweb
-.section svwfa
-.section svwfb
-.section svwga
-.section svwgb
-.section svwha
-.section svwhb
-.section svwia
-.section svwib
-.section svwja
-.section svwjb
-.section svwka
-.section svwkb
-.section svwla
-.section svwlb
-.section svwma
-.section svwmb
-.section svwna
-.section svwnb
-.section svwoa
-.section svwob
-.section svwpa
-.section svwpb
-.section svwqa
-.section svwqb
-.section svwra
-.section svwrb
-.section svwsa
-.section svwsb
-.section svwta
-.section svwtb
-.section svwua
-.section svwub
-.section svwva
-.section svwvb
-.section svwwa
-.section svwwb
-.section svwxa
-.section svwxb
-.section svwya
-.section svwyb
-.section svwza
-.section svwzb
-.section svw1a
-.section svw1b
-.section svw2a
-.section svw2b
-.section svw3a
-.section svw3b
-.section svw4a
-.section svw4b
-.section svw5a
-.section svw5b
-.section svw6a
-.section svw6b
-.section svw7a
-.section svw7b
-.section svw8a
-.section svw8b
-.section svw9a
-.section svw9b
-.section svw0a
-.section svw0b
-.section svxaa
-.section svxab
-.section svxba
-.section svxbb
-.section svxca
-.section svxcb
-.section svxda
-.section svxdb
-.section svxea
-.section svxeb
-.section svxfa
-.section svxfb
-.section svxga
-.section svxgb
-.section svxha
-.section svxhb
-.section svxia
-.section svxib
-.section svxja
-.section svxjb
-.section svxka
-.section svxkb
-.section svxla
-.section svxlb
-.section svxma
-.section svxmb
-.section svxna
-.section svxnb
-.section svxoa
-.section svxob
-.section svxpa
-.section svxpb
-.section svxqa
-.section svxqb
-.section svxra
-.section svxrb
-.section svxsa
-.section svxsb
-.section svxta
-.section svxtb
-.section svxua
-.section svxub
-.section svxva
-.section svxvb
-.section svxwa
-.section svxwb
-.section svxxa
-.section svxxb
-.section svxya
-.section svxyb
-.section svxza
-.section svxzb
-.section svx1a
-.section svx1b
-.section svx2a
-.section svx2b
-.section svx3a
-.section svx3b
-.section svx4a
-.section svx4b
-.section svx5a
-.section svx5b
-.section svx6a
-.section svx6b
-.section svx7a
-.section svx7b
-.section svx8a
-.section svx8b
-.section svx9a
-.section svx9b
-.section svx0a
-.section svx0b
-.section svyaa
-.section svyab
-.section svyba
-.section svybb
-.section svyca
-.section svycb
-.section svyda
-.section svydb
-.section svyea
-.section svyeb
-.section svyfa
-.section svyfb
-.section svyga
-.section svygb
-.section svyha
-.section svyhb
-.section svyia
-.section svyib
-.section svyja
-.section svyjb
-.section svyka
-.section svykb
-.section svyla
-.section svylb
-.section svyma
-.section svymb
-.section svyna
-.section svynb
-.section svyoa
-.section svyob
-.section svypa
-.section svypb
-.section svyqa
-.section svyqb
-.section svyra
-.section svyrb
-.section svysa
-.section svysb
-.section svyta
-.section svytb
-.section svyua
-.section svyub
-.section svyva
-.section svyvb
-.section svywa
-.section svywb
-.section svyxa
-.section svyxb
-.section svyya
-.section svyyb
-.section svyza
-.section svyzb
-.section svy1a
-.section svy1b
-.section svy2a
-.section svy2b
-.section svy3a
-.section svy3b
-.section svy4a
-.section svy4b
-.section svy5a
-.section svy5b
-.section svy6a
-.section svy6b
-.section svy7a
-.section svy7b
-.section svy8a
-.section svy8b
-.section svy9a
-.section svy9b
-.section svy0a
-.section svy0b
-.section svzaa
-.section svzab
-.section svzba
-.section svzbb
-.section svzca
-.section svzcb
-.section svzda
-.section svzdb
-.section svzea
-.section svzeb
-.section svzfa
-.section svzfb
-.section svzga
-.section svzgb
-.section svzha
-.section svzhb
-.section svzia
-.section svzib
-.section svzja
-.section svzjb
-.section svzka
-.section svzkb
-.section svzla
-.section svzlb
-.section svzma
-.section svzmb
-.section svzna
-.section svznb
-.section svzoa
-.section svzob
-.section svzpa
-.section svzpb
-.section svzqa
-.section svzqb
-.section svzra
-.section svzrb
-.section svzsa
-.section svzsb
-.section svzta
-.section svztb
-.section svzua
-.section svzub
-.section svzva
-.section svzvb
-.section svzwa
-.section svzwb
-.section svzxa
-.section svzxb
-.section svzya
-.section svzyb
-.section svzza
-.section svzzb
-.section svz1a
-.section svz1b
-.section svz2a
-.section svz2b
-.section svz3a
-.section svz3b
-.section svz4a
-.section svz4b
-.section svz5a
-.section svz5b
-.section svz6a
-.section svz6b
-.section svz7a
-.section svz7b
-.section svz8a
-.section svz8b
-.section svz9a
-.section svz9b
-.section svz0a
-.section svz0b
-.section sv1aa
-.section sv1ab
-.section sv1ba
-.section sv1bb
-.section sv1ca
-.section sv1cb
-.section sv1da
-.section sv1db
-.section sv1ea
-.section sv1eb
-.section sv1fa
-.section sv1fb
-.section sv1ga
-.section sv1gb
-.section sv1ha
-.section sv1hb
-.section sv1ia
-.section sv1ib
-.section sv1ja
-.section sv1jb
-.section sv1ka
-.section sv1kb
-.section sv1la
-.section sv1lb
-.section sv1ma
-.section sv1mb
-.section sv1na
-.section sv1nb
-.section sv1oa
-.section sv1ob
-.section sv1pa
-.section sv1pb
-.section sv1qa
-.section sv1qb
-.section sv1ra
-.section sv1rb
-.section sv1sa
-.section sv1sb
-.section sv1ta
-.section sv1tb
-.section sv1ua
-.section sv1ub
-.section sv1va
-.section sv1vb
-.section sv1wa
-.section sv1wb
-.section sv1xa
-.section sv1xb
-.section sv1ya
-.section sv1yb
-.section sv1za
-.section sv1zb
-.section sv11a
-.section sv11b
-.section sv12a
-.section sv12b
-.section sv13a
-.section sv13b
-.section sv14a
-.section sv14b
-.section sv15a
-.section sv15b
-.section sv16a
-.section sv16b
-.section sv17a
-.section sv17b
-.section sv18a
-.section sv18b
-.section sv19a
-.section sv19b
-.section sv10a
-.section sv10b
-.section sv2aa
-.section sv2ab
-.section sv2ba
-.section sv2bb
-.section sv2ca
-.section sv2cb
-.section sv2da
-.section sv2db
-.section sv2ea
-.section sv2eb
-.section sv2fa
-.section sv2fb
-.section sv2ga
-.section sv2gb
-.section sv2ha
-.section sv2hb
-.section sv2ia
-.section sv2ib
-.section sv2ja
-.section sv2jb
-.section sv2ka
-.section sv2kb
-.section sv2la
-.section sv2lb
-.section sv2ma
-.section sv2mb
-.section sv2na
-.section sv2nb
-.section sv2oa
-.section sv2ob
-.section sv2pa
-.section sv2pb
-.section sv2qa
-.section sv2qb
-.section sv2ra
-.section sv2rb
-.section sv2sa
-.section sv2sb
-.section sv2ta
-.section sv2tb
-.section sv2ua
-.section sv2ub
-.section sv2va
-.section sv2vb
-.section sv2wa
-.section sv2wb
-.section sv2xa
-.section sv2xb
-.section sv2ya
-.section sv2yb
-.section sv2za
-.section sv2zb
-.section sv21a
-.section sv21b
-.section sv22a
-.section sv22b
-.section sv23a
-.section sv23b
-.section sv24a
-.section sv24b
-.section sv25a
-.section sv25b
-.section sv26a
-.section sv26b
-.section sv27a
-.section sv27b
-.section sv28a
-.section sv28b
-.section sv29a
-.section sv29b
-.section sv20a
-.section sv20b
-.section sv3aa
-.section sv3ab
-.section sv3ba
-.section sv3bb
-.section sv3ca
-.section sv3cb
-.section sv3da
-.section sv3db
-.section sv3ea
-.section sv3eb
-.section sv3fa
-.section sv3fb
-.section sv3ga
-.section sv3gb
-.section sv3ha
-.section sv3hb
-.section sv3ia
-.section sv3ib
-.section sv3ja
-.section sv3jb
-.section sv3ka
-.section sv3kb
-.section sv3la
-.section sv3lb
-.section sv3ma
-.section sv3mb
-.section sv3na
-.section sv3nb
-.section sv3oa
-.section sv3ob
-.section sv3pa
-.section sv3pb
-.section sv3qa
-.section sv3qb
-.section sv3ra
-.section sv3rb
-.section sv3sa
-.section sv3sb
-.section sv3ta
-.section sv3tb
-.section sv3ua
-.section sv3ub
-.section sv3va
-.section sv3vb
-.section sv3wa
-.section sv3wb
-.section sv3xa
-.section sv3xb
-.section sv3ya
-.section sv3yb
-.section sv3za
-.section sv3zb
-.section sv31a
-.section sv31b
-.section sv32a
-.section sv32b
-.section sv33a
-.section sv33b
-.section sv34a
-.section sv34b
-.section sv35a
-.section sv35b
-.section sv36a
-.section sv36b
-.section sv37a
-.section sv37b
-.section sv38a
-.section sv38b
-.section sv39a
-.section sv39b
-.section sv30a
-.section sv30b
-.section sv4aa
-.section sv4ab
-.section sv4ba
-.section sv4bb
-.section sv4ca
-.section sv4cb
-.section sv4da
-.section sv4db
-.section sv4ea
-.section sv4eb
-.section sv4fa
-.section sv4fb
-.section sv4ga
-.section sv4gb
-.section sv4ha
-.section sv4hb
-.section sv4ia
-.section sv4ib
-.section sv4ja
-.section sv4jb
-.section sv4ka
-.section sv4kb
-.section sv4la
-.section sv4lb
-.section sv4ma
-.section sv4mb
-.section sv4na
-.section sv4nb
-.section sv4oa
-.section sv4ob
-.section sv4pa
-.section sv4pb
-.section sv4qa
-.section sv4qb
-.section sv4ra
-.section sv4rb
-.section sv4sa
-.section sv4sb
-.section sv4ta
-.section sv4tb
-.section sv4ua
-.section sv4ub
-.section sv4va
-.section sv4vb
-.section sv4wa
-.section sv4wb
-.section sv4xa
-.section sv4xb
-.section sv4ya
-.section sv4yb
-.section sv4za
-.section sv4zb
-.section sv41a
-.section sv41b
-.section sv42a
-.section sv42b
-.section sv43a
-.section sv43b
-.section sv44a
-.section sv44b
-.section sv45a
-.section sv45b
-.section sv46a
-.section sv46b
-.section sv47a
-.section sv47b
-.section sv48a
-.section sv48b
-.section sv49a
-.section sv49b
-.section sv40a
-.section sv40b
-.section sv5aa
-.section sv5ab
-.section sv5ba
-.section sv5bb
-.section sv5ca
-.section sv5cb
-.section sv5da
-.section sv5db
-.section sv5ea
-.section sv5eb
-.section sv5fa
-.section sv5fb
-.section sv5ga
-.section sv5gb
-.section sv5ha
-.section sv5hb
-.section sv5ia
-.section sv5ib
-.section sv5ja
-.section sv5jb
-.section sv5ka
-.section sv5kb
-.section sv5la
-.section sv5lb
-.section sv5ma
-.section sv5mb
-.section sv5na
-.section sv5nb
-.section sv5oa
-.section sv5ob
-.section sv5pa
-.section sv5pb
-.section sv5qa
-.section sv5qb
-.section sv5ra
-.section sv5rb
-.section sv5sa
-.section sv5sb
-.section sv5ta
-.section sv5tb
-.section sv5ua
-.section sv5ub
-.section sv5va
-.section sv5vb
-.section sv5wa
-.section sv5wb
-.section sv5xa
-.section sv5xb
-.section sv5ya
-.section sv5yb
-.section sv5za
-.section sv5zb
-.section sv51a
-.section sv51b
-.section sv52a
-.section sv52b
-.section sv53a
-.section sv53b
-.section sv54a
-.section sv54b
-.section sv55a
-.section sv55b
-.section sv56a
-.section sv56b
-.section sv57a
-.section sv57b
-.section sv58a
-.section sv58b
-.section sv59a
-.section sv59b
-.section sv50a
-.section sv50b
-.section sv6aa
-.section sv6ab
-.section sv6ba
-.section sv6bb
-.section sv6ca
-.section sv6cb
-.section sv6da
-.section sv6db
-.section sv6ea
-.section sv6eb
-.section sv6fa
-.section sv6fb
-.section sv6ga
-.section sv6gb
-.section sv6ha
-.section sv6hb
-.section sv6ia
-.section sv6ib
-.section sv6ja
-.section sv6jb
-.section sv6ka
-.section sv6kb
-.section sv6la
-.section sv6lb
-.section sv6ma
-.section sv6mb
-.section sv6na
-.section sv6nb
-.section sv6oa
-.section sv6ob
-.section sv6pa
-.section sv6pb
-.section sv6qa
-.section sv6qb
-.section sv6ra
-.section sv6rb
-.section sv6sa
-.section sv6sb
-.section sv6ta
-.section sv6tb
-.section sv6ua
-.section sv6ub
-.section sv6va
-.section sv6vb
-.section sv6wa
-.section sv6wb
-.section sv6xa
-.section sv6xb
-.section sv6ya
-.section sv6yb
-.section sv6za
-.section sv6zb
-.section sv61a
-.section sv61b
-.section sv62a
-.section sv62b
-.section sv63a
-.section sv63b
-.section sv64a
-.section sv64b
-.section sv65a
-.section sv65b
-.section sv66a
-.section sv66b
-.section sv67a
-.section sv67b
-.section sv68a
-.section sv68b
-.section sv69a
-.section sv69b
-.section sv60a
-.section sv60b
-.section sv7aa
-.section sv7ab
-.section sv7ba
-.section sv7bb
-.section sv7ca
-.section sv7cb
-.section sv7da
-.section sv7db
-.section sv7ea
-.section sv7eb
-.section sv7fa
-.section sv7fb
-.section sv7ga
-.section sv7gb
-.section sv7ha
-.section sv7hb
-.section sv7ia
-.section sv7ib
-.section sv7ja
-.section sv7jb
-.section sv7ka
-.section sv7kb
-.section sv7la
-.section sv7lb
-.section sv7ma
-.section sv7mb
-.section sv7na
-.section sv7nb
-.section sv7oa
-.section sv7ob
-.section sv7pa
-.section sv7pb
-.section sv7qa
-.section sv7qb
-.section sv7ra
-.section sv7rb
-.section sv7sa
-.section sv7sb
-.section sv7ta
-.section sv7tb
-.section sv7ua
-.section sv7ub
-.section sv7va
-.section sv7vb
-.section sv7wa
-.section sv7wb
-.section sv7xa
-.section sv7xb
-.section sv7ya
-.section sv7yb
-.section sv7za
-.section sv7zb
-.section sv71a
-.section sv71b
-.section sv72a
-.section sv72b
-.section sv73a
-.section sv73b
-.section sv74a
-.section sv74b
-.section sv75a
-.section sv75b
-.section sv76a
-.section sv76b
-.section sv77a
-.section sv77b
-.section sv78a
-.section sv78b
-.section sv79a
-.section sv79b
-.section sv70a
-.section sv70b
-.section sv8aa
-.section sv8ab
-.section sv8ba
-.section sv8bb
-.section sv8ca
-.section sv8cb
-.section sv8da
-.section sv8db
-.section sv8ea
-.section sv8eb
-.section sv8fa
-.section sv8fb
-.section sv8ga
-.section sv8gb
-.section sv8ha
-.section sv8hb
-.section sv8ia
-.section sv8ib
-.section sv8ja
-.section sv8jb
-.section sv8ka
-.section sv8kb
-.section sv8la
-.section sv8lb
-.section sv8ma
-.section sv8mb
-.section sv8na
-.section sv8nb
-.section sv8oa
-.section sv8ob
-.section sv8pa
-.section sv8pb
-.section sv8qa
-.section sv8qb
-.section sv8ra
-.section sv8rb
-.section sv8sa
-.section sv8sb
-.section sv8ta
-.section sv8tb
-.section sv8ua
-.section sv8ub
-.section sv8va
-.section sv8vb
-.section sv8wa
-.section sv8wb
-.section sv8xa
-.section sv8xb
-.section sv8ya
-.section sv8yb
-.section sv8za
-.section sv8zb
-.section sv81a
-.section sv81b
-.section sv82a
-.section sv82b
-.section sv83a
-.section sv83b
-.section sv84a
-.section sv84b
-.section sv85a
-.section sv85b
-.section sv86a
-.section sv86b
-.section sv87a
-.section sv87b
-.section sv88a
-.section sv88b
-.section sv89a
-.section sv89b
-.section sv80a
-.section sv80b
-.section sv9aa
-.section sv9ab
-.section sv9ba
-.section sv9bb
-.section sv9ca
-.section sv9cb
-.section sv9da
-.section sv9db
-.section sv9ea
-.section sv9eb
-.section sv9fa
-.section sv9fb
-.section sv9ga
-.section sv9gb
-.section sv9ha
-.section sv9hb
-.section sv9ia
-.section sv9ib
-.section sv9ja
-.section sv9jb
-.section sv9ka
-.section sv9kb
-.section sv9la
-.section sv9lb
-.section sv9ma
-.section sv9mb
-.section sv9na
-.section sv9nb
-.section sv9oa
-.section sv9ob
-.section sv9pa
-.section sv9pb
-.section sv9qa
-.section sv9qb
-.section sv9ra
-.section sv9rb
-.section sv9sa
-.section sv9sb
-.section sv9ta
-.section sv9tb
-.section sv9ua
-.section sv9ub
-.section sv9va
-.section sv9vb
-.section sv9wa
-.section sv9wb
-.section sv9xa
-.section sv9xb
-.section sv9ya
-.section sv9yb
-.section sv9za
-.section sv9zb
-.section sv91a
-.section sv91b
-.section sv92a
-.section sv92b
-.section sv93a
-.section sv93b
-.section sv94a
-.section sv94b
-.section sv95a
-.section sv95b
-.section sv96a
-.section sv96b
-.section sv97a
-.section sv97b
-.section sv98a
-.section sv98b
-.section sv99a
-.section sv99b
-.section sv90a
-.section sv90b
-.section sv0aa
-.section sv0ab
-.section sv0ba
-.section sv0bb
-.section sv0ca
-.section sv0cb
-.section sv0da
-.section sv0db
-.section sv0ea
-.section sv0eb
-.section sv0fa
-.section sv0fb
-.section sv0ga
-.section sv0gb
-.section sv0ha
-.section sv0hb
-.section sv0ia
-.section sv0ib
-.section sv0ja
-.section sv0jb
-.section sv0ka
-.section sv0kb
-.section sv0la
-.section sv0lb
-.section sv0ma
-.section sv0mb
-.section sv0na
-.section sv0nb
-.section sv0oa
-.section sv0ob
-.section sv0pa
-.section sv0pb
-.section sv0qa
-.section sv0qb
-.section sv0ra
-.section sv0rb
-.section sv0sa
-.section sv0sb
-.section sv0ta
-.section sv0tb
-.section sv0ua
-.section sv0ub
-.section sv0va
-.section sv0vb
-.section sv0wa
-.section sv0wb
-.section sv0xa
-.section sv0xb
-.section sv0ya
-.section sv0yb
-.section sv0za
-.section sv0zb
-.section sv01a
-.section sv01b
-.section sv02a
-.section sv02b
-.section sv03a
-.section sv03b
-.section sv04a
-.section sv04b
-.section sv05a
-.section sv05b
-.section sv06a
-.section sv06b
-.section sv07a
-.section sv07b
-.section sv08a
-.section sv08b
-.section sv09a
-.section sv09b
-.section sv00a
-.section sv00b
-.section swaaa
-.section swaab
-.section swaba
-.section swabb
-.section swaca
-.section swacb
-.section swada
-.section swadb
-.section swaea
-.section swaeb
-.section swafa
-.section swafb
-.section swaga
-.section swagb
-.section swaha
-.section swahb
-.section swaia
-.section swaib
-.section swaja
-.section swajb
-.section swaka
-.section swakb
-.section swala
-.section swalb
-.section swama
-.section swamb
-.section swana
-.section swanb
-.section swaoa
-.section swaob
-.section swapa
-.section swapb
-.section swaqa
-.section swaqb
-.section swara
-.section swarb
-.section swasa
-.section swasb
-.section swata
-.section swatb
-.section swaua
-.section swaub
-.section swava
-.section swavb
-.section swawa
-.section swawb
-.section swaxa
-.section swaxb
-.section swaya
-.section swayb
-.section swaza
-.section swazb
-.section swa1a
-.section swa1b
-.section swa2a
-.section swa2b
-.section swa3a
-.section swa3b
-.section swa4a
-.section swa4b
-.section swa5a
-.section swa5b
-.section swa6a
-.section swa6b
-.section swa7a
-.section swa7b
-.section swa8a
-.section swa8b
-.section swa9a
-.section swa9b
-.section swa0a
-.section swa0b
-.section swbaa
-.section swbab
-.section swbba
-.section swbbb
-.section swbca
-.section swbcb
-.section swbda
-.section swbdb
-.section swbea
-.section swbeb
-.section swbfa
-.section swbfb
-.section swbga
-.section swbgb
-.section swbha
-.section swbhb
-.section swbia
-.section swbib
-.section swbja
-.section swbjb
-.section swbka
-.section swbkb
-.section swbla
-.section swblb
-.section swbma
-.section swbmb
-.section swbna
-.section swbnb
-.section swboa
-.section swbob
-.section swbpa
-.section swbpb
-.section swbqa
-.section swbqb
-.section swbra
-.section swbrb
-.section swbsa
-.section swbsb
-.section swbta
-.section swbtb
-.section swbua
-.section swbub
-.section swbva
-.section swbvb
-.section swbwa
-.section swbwb
-.section swbxa
-.section swbxb
-.section swbya
-.section swbyb
-.section swbza
-.section swbzb
-.section swb1a
-.section swb1b
-.section swb2a
-.section swb2b
-.section swb3a
-.section swb3b
-.section swb4a
-.section swb4b
-.section swb5a
-.section swb5b
-.section swb6a
-.section swb6b
-.section swb7a
-.section swb7b
-.section swb8a
-.section swb8b
-.section swb9a
-.section swb9b
-.section swb0a
-.section swb0b
-.section swcaa
-.section swcab
-.section swcba
-.section swcbb
-.section swcca
-.section swccb
-.section swcda
-.section swcdb
-.section swcea
-.section swceb
-.section swcfa
-.section swcfb
-.section swcga
-.section swcgb
-.section swcha
-.section swchb
-.section swcia
-.section swcib
-.section swcja
-.section swcjb
-.section swcka
-.section swckb
-.section swcla
-.section swclb
-.section swcma
-.section swcmb
-.section swcna
-.section swcnb
-.section swcoa
-.section swcob
-.section swcpa
-.section swcpb
-.section swcqa
-.section swcqb
-.section swcra
-.section swcrb
-.section swcsa
-.section swcsb
-.section swcta
-.section swctb
-.section swcua
-.section swcub
-.section swcva
-.section swcvb
-.section swcwa
-.section swcwb
-.section swcxa
-.section swcxb
-.section swcya
-.section swcyb
-.section swcza
-.section swczb
-.section swc1a
-.section swc1b
-.section swc2a
-.section swc2b
-.section swc3a
-.section swc3b
-.section swc4a
-.section swc4b
-.section swc5a
-.section swc5b
-.section swc6a
-.section swc6b
-.section swc7a
-.section swc7b
-.section swc8a
-.section swc8b
-.section swc9a
-.section swc9b
-.section swc0a
-.section swc0b
-.section swdaa
-.section swdab
-.section swdba
-.section swdbb
-.section swdca
-.section swdcb
-.section swdda
-.section swddb
-.section swdea
-.section swdeb
-.section swdfa
-.section swdfb
-.section swdga
-.section swdgb
-.section swdha
-.section swdhb
-.section swdia
-.section swdib
-.section swdja
-.section swdjb
-.section swdka
-.section swdkb
-.section swdla
-.section swdlb
-.section swdma
-.section swdmb
-.section swdna
-.section swdnb
-.section swdoa
-.section swdob
-.section swdpa
-.section swdpb
-.section swdqa
-.section swdqb
-.section swdra
-.section swdrb
-.section swdsa
-.section swdsb
-.section swdta
-.section swdtb
-.section swdua
-.section swdub
-.section swdva
-.section swdvb
-.section swdwa
-.section swdwb
-.section swdxa
-.section swdxb
-.section swdya
-.section swdyb
-.section swdza
-.section swdzb
-.section swd1a
-.section swd1b
-.section swd2a
-.section swd2b
-.section swd3a
-.section swd3b
-.section swd4a
-.section swd4b
-.section swd5a
-.section swd5b
-.section swd6a
-.section swd6b
-.section swd7a
-.section swd7b
-.section swd8a
-.section swd8b
-.section swd9a
-.section swd9b
-.section swd0a
-.section swd0b
-.section sweaa
-.section sweab
-.section sweba
-.section swebb
-.section sweca
-.section swecb
-.section sweda
-.section swedb
-.section sweea
-.section sweeb
-.section swefa
-.section swefb
-.section swega
-.section swegb
-.section sweha
-.section swehb
-.section sweia
-.section sweib
-.section sweja
-.section swejb
-.section sweka
-.section swekb
-.section swela
-.section swelb
-.section swema
-.section swemb
-.section swena
-.section swenb
-.section sweoa
-.section sweob
-.section swepa
-.section swepb
-.section sweqa
-.section sweqb
-.section swera
-.section swerb
-.section swesa
-.section swesb
-.section sweta
-.section swetb
-.section sweua
-.section sweub
-.section sweva
-.section swevb
-.section swewa
-.section swewb
-.section swexa
-.section swexb
-.section sweya
-.section sweyb
-.section sweza
-.section swezb
-.section swe1a
-.section swe1b
-.section swe2a
-.section swe2b
-.section swe3a
-.section swe3b
-.section swe4a
-.section swe4b
-.section swe5a
-.section swe5b
-.section swe6a
-.section swe6b
-.section swe7a
-.section swe7b
-.section swe8a
-.section swe8b
-.section swe9a
-.section swe9b
-.section swe0a
-.section swe0b
-.section swfaa
-.section swfab
-.section swfba
-.section swfbb
-.section swfca
-.section swfcb
-.section swfda
-.section swfdb
-.section swfea
-.section swfeb
-.section swffa
-.section swffb
-.section swfga
-.section swfgb
-.section swfha
-.section swfhb
-.section swfia
-.section swfib
-.section swfja
-.section swfjb
-.section swfka
-.section swfkb
-.section swfla
-.section swflb
-.section swfma
-.section swfmb
-.section swfna
-.section swfnb
-.section swfoa
-.section swfob
-.section swfpa
-.section swfpb
-.section swfqa
-.section swfqb
-.section swfra
-.section swfrb
-.section swfsa
-.section swfsb
-.section swfta
-.section swftb
-.section swfua
-.section swfub
-.section swfva
-.section swfvb
-.section swfwa
-.section swfwb
-.section swfxa
-.section swfxb
-.section swfya
-.section swfyb
-.section swfza
-.section swfzb
-.section swf1a
-.section swf1b
-.section swf2a
-.section swf2b
-.section swf3a
-.section swf3b
-.section swf4a
-.section swf4b
-.section swf5a
-.section swf5b
-.section swf6a
-.section swf6b
-.section swf7a
-.section swf7b
-.section swf8a
-.section swf8b
-.section swf9a
-.section swf9b
-.section swf0a
-.section swf0b
-.section swgaa
-.section swgab
-.section swgba
-.section swgbb
-.section swgca
-.section swgcb
-.section swgda
-.section swgdb
-.section swgea
-.section swgeb
-.section swgfa
-.section swgfb
-.section swgga
-.section swggb
-.section swgha
-.section swghb
-.section swgia
-.section swgib
-.section swgja
-.section swgjb
-.section swgka
-.section swgkb
-.section swgla
-.section swglb
-.section swgma
-.section swgmb
-.section swgna
-.section swgnb
-.section swgoa
-.section swgob
-.section swgpa
-.section swgpb
-.section swgqa
-.section swgqb
-.section swgra
-.section swgrb
-.section swgsa
-.section swgsb
-.section swgta
-.section swgtb
-.section swgua
-.section swgub
-.section swgva
-.section swgvb
-.section swgwa
-.section swgwb
-.section swgxa
-.section swgxb
-.section swgya
-.section swgyb
-.section swgza
-.section swgzb
-.section swg1a
-.section swg1b
-.section swg2a
-.section swg2b
-.section swg3a
-.section swg3b
-.section swg4a
-.section swg4b
-.section swg5a
-.section swg5b
-.section swg6a
-.section swg6b
-.section swg7a
-.section swg7b
-.section swg8a
-.section swg8b
-.section swg9a
-.section swg9b
-.section swg0a
-.section swg0b
-.section swhaa
-.section swhab
-.section swhba
-.section swhbb
-.section swhca
-.section swhcb
-.section swhda
-.section swhdb
-.section swhea
-.section swheb
-.section swhfa
-.section swhfb
-.section swhga
-.section swhgb
-.section swhha
-.section swhhb
-.section swhia
-.section swhib
-.section swhja
-.section swhjb
-.section swhka
-.section swhkb
-.section swhla
-.section swhlb
-.section swhma
-.section swhmb
-.section swhna
-.section swhnb
-.section swhoa
-.section swhob
-.section swhpa
-.section swhpb
-.section swhqa
-.section swhqb
-.section swhra
-.section swhrb
-.section swhsa
-.section swhsb
-.section swhta
-.section swhtb
-.section swhua
-.section swhub
-.section swhva
-.section swhvb
-.section swhwa
-.section swhwb
-.section swhxa
-.section swhxb
-.section swhya
-.section swhyb
-.section swhza
-.section swhzb
-.section swh1a
-.section swh1b
-.section swh2a
-.section swh2b
-.section swh3a
-.section swh3b
-.section swh4a
-.section swh4b
-.section swh5a
-.section swh5b
-.section swh6a
-.section swh6b
-.section swh7a
-.section swh7b
-.section swh8a
-.section swh8b
-.section swh9a
-.section swh9b
-.section swh0a
-.section swh0b
-.section swiaa
-.section swiab
-.section swiba
-.section swibb
-.section swica
-.section swicb
-.section swida
-.section swidb
-.section swiea
-.section swieb
-.section swifa
-.section swifb
-.section swiga
-.section swigb
-.section swiha
-.section swihb
-.section swiia
-.section swiib
-.section swija
-.section swijb
-.section swika
-.section swikb
-.section swila
-.section swilb
-.section swima
-.section swimb
-.section swina
-.section swinb
-.section swioa
-.section swiob
-.section swipa
-.section swipb
-.section swiqa
-.section swiqb
-.section swira
-.section swirb
-.section swisa
-.section swisb
-.section swita
-.section switb
-.section swiua
-.section swiub
-.section swiva
-.section swivb
-.section swiwa
-.section swiwb
-.section swixa
-.section swixb
-.section swiya
-.section swiyb
-.section swiza
-.section swizb
-.section swi1a
-.section swi1b
-.section swi2a
-.section swi2b
-.section swi3a
-.section swi3b
-.section swi4a
-.section swi4b
-.section swi5a
-.section swi5b
-.section swi6a
-.section swi6b
-.section swi7a
-.section swi7b
-.section swi8a
-.section swi8b
-.section swi9a
-.section swi9b
-.section swi0a
-.section swi0b
-.section swjaa
-.section swjab
-.section swjba
-.section swjbb
-.section swjca
-.section swjcb
-.section swjda
-.section swjdb
-.section swjea
-.section swjeb
-.section swjfa
-.section swjfb
-.section swjga
-.section swjgb
-.section swjha
-.section swjhb
-.section swjia
-.section swjib
-.section swjja
-.section swjjb
-.section swjka
-.section swjkb
-.section swjla
-.section swjlb
-.section swjma
-.section swjmb
-.section swjna
-.section swjnb
-.section swjoa
-.section swjob
-.section swjpa
-.section swjpb
-.section swjqa
-.section swjqb
-.section swjra
-.section swjrb
-.section swjsa
-.section swjsb
-.section swjta
-.section swjtb
-.section swjua
-.section swjub
-.section swjva
-.section swjvb
-.section swjwa
-.section swjwb
-.section swjxa
-.section swjxb
-.section swjya
-.section swjyb
-.section swjza
-.section swjzb
-.section swj1a
-.section swj1b
-.section swj2a
-.section swj2b
-.section swj3a
-.section swj3b
-.section swj4a
-.section swj4b
-.section swj5a
-.section swj5b
-.section swj6a
-.section swj6b
-.section swj7a
-.section swj7b
-.section swj8a
-.section swj8b
-.section swj9a
-.section swj9b
-.section swj0a
-.section swj0b
-.section swkaa
-.section swkab
-.section swkba
-.section swkbb
-.section swkca
-.section swkcb
-.section swkda
-.section swkdb
-.section swkea
-.section swkeb
-.section swkfa
-.section swkfb
-.section swkga
-.section swkgb
-.section swkha
-.section swkhb
-.section swkia
-.section swkib
-.section swkja
-.section swkjb
-.section swkka
-.section swkkb
-.section swkla
-.section swklb
-.section swkma
-.section swkmb
-.section swkna
-.section swknb
-.section swkoa
-.section swkob
-.section swkpa
-.section swkpb
-.section swkqa
-.section swkqb
-.section swkra
-.section swkrb
-.section swksa
-.section swksb
-.section swkta
-.section swktb
-.section swkua
-.section swkub
-.section swkva
-.section swkvb
-.section swkwa
-.section swkwb
-.section swkxa
-.section swkxb
-.section swkya
-.section swkyb
-.section swkza
-.section swkzb
-.section swk1a
-.section swk1b
-.section swk2a
-.section swk2b
-.section swk3a
-.section swk3b
-.section swk4a
-.section swk4b
-.section swk5a
-.section swk5b
-.section swk6a
-.section swk6b
-.section swk7a
-.section swk7b
-.section swk8a
-.section swk8b
-.section swk9a
-.section swk9b
-.section swk0a
-.section swk0b
-.section swlaa
-.section swlab
-.section swlba
-.section swlbb
-.section swlca
-.section swlcb
-.section swlda
-.section swldb
-.section swlea
-.section swleb
-.section swlfa
-.section swlfb
-.section swlga
-.section swlgb
-.section swlha
-.section swlhb
-.section swlia
-.section swlib
-.section swlja
-.section swljb
-.section swlka
-.section swlkb
-.section swlla
-.section swllb
-.section swlma
-.section swlmb
-.section swlna
-.section swlnb
-.section swloa
-.section swlob
-.section swlpa
-.section swlpb
-.section swlqa
-.section swlqb
-.section swlra
-.section swlrb
-.section swlsa
-.section swlsb
-.section swlta
-.section swltb
-.section swlua
-.section swlub
-.section swlva
-.section swlvb
-.section swlwa
-.section swlwb
-.section swlxa
-.section swlxb
-.section swlya
-.section swlyb
-.section swlza
-.section swlzb
-.section swl1a
-.section swl1b
-.section swl2a
-.section swl2b
-.section swl3a
-.section swl3b
-.section swl4a
-.section swl4b
-.section swl5a
-.section swl5b
-.section swl6a
-.section swl6b
-.section swl7a
-.section swl7b
-.section swl8a
-.section swl8b
-.section swl9a
-.section swl9b
-.section swl0a
-.section swl0b
-.section swmaa
-.section swmab
-.section swmba
-.section swmbb
-.section swmca
-.section swmcb
-.section swmda
-.section swmdb
-.section swmea
-.section swmeb
-.section swmfa
-.section swmfb
-.section swmga
-.section swmgb
-.section swmha
-.section swmhb
-.section swmia
-.section swmib
-.section swmja
-.section swmjb
-.section swmka
-.section swmkb
-.section swmla
-.section swmlb
-.section swmma
-.section swmmb
-.section swmna
-.section swmnb
-.section swmoa
-.section swmob
-.section swmpa
-.section swmpb
-.section swmqa
-.section swmqb
-.section swmra
-.section swmrb
-.section swmsa
-.section swmsb
-.section swmta
-.section swmtb
-.section swmua
-.section swmub
-.section swmva
-.section swmvb
-.section swmwa
-.section swmwb
-.section swmxa
-.section swmxb
-.section swmya
-.section swmyb
-.section swmza
-.section swmzb
-.section swm1a
-.section swm1b
-.section swm2a
-.section swm2b
-.section swm3a
-.section swm3b
-.section swm4a
-.section swm4b
-.section swm5a
-.section swm5b
-.section swm6a
-.section swm6b
-.section swm7a
-.section swm7b
-.section swm8a
-.section swm8b
-.section swm9a
-.section swm9b
-.section swm0a
-.section swm0b
-.section swnaa
-.section swnab
-.section swnba
-.section swnbb
-.section swnca
-.section swncb
-.section swnda
-.section swndb
-.section swnea
-.section swneb
-.section swnfa
-.section swnfb
-.section swnga
-.section swngb
-.section swnha
-.section swnhb
-.section swnia
-.section swnib
-.section swnja
-.section swnjb
-.section swnka
-.section swnkb
-.section swnla
-.section swnlb
-.section swnma
-.section swnmb
-.section swnna
-.section swnnb
-.section swnoa
-.section swnob
-.section swnpa
-.section swnpb
-.section swnqa
-.section swnqb
-.section swnra
-.section swnrb
-.section swnsa
-.section swnsb
-.section swnta
-.section swntb
-.section swnua
-.section swnub
-.section swnva
-.section swnvb
-.section swnwa
-.section swnwb
-.section swnxa
-.section swnxb
-.section swnya
-.section swnyb
-.section swnza
-.section swnzb
-.section swn1a
-.section swn1b
-.section swn2a
-.section swn2b
-.section swn3a
-.section swn3b
-.section swn4a
-.section swn4b
-.section swn5a
-.section swn5b
-.section swn6a
-.section swn6b
-.section swn7a
-.section swn7b
-.section swn8a
-.section swn8b
-.section swn9a
-.section swn9b
-.section swn0a
-.section swn0b
-.section swoaa
-.section swoab
-.section swoba
-.section swobb
-.section swoca
-.section swocb
-.section swoda
-.section swodb
-.section swoea
-.section swoeb
-.section swofa
-.section swofb
-.section swoga
-.section swogb
-.section swoha
-.section swohb
-.section swoia
-.section swoib
-.section swoja
-.section swojb
-.section swoka
-.section swokb
-.section swola
-.section swolb
-.section swoma
-.section swomb
-.section swona
-.section swonb
-.section swooa
-.section swoob
-.section swopa
-.section swopb
-.section swoqa
-.section swoqb
-.section swora
-.section sworb
-.section swosa
-.section swosb
-.section swota
-.section swotb
-.section swoua
-.section swoub
-.section swova
-.section swovb
-.section swowa
-.section swowb
-.section swoxa
-.section swoxb
-.section swoya
-.section swoyb
-.section swoza
-.section swozb
-.section swo1a
-.section swo1b
-.section swo2a
-.section swo2b
-.section swo3a
-.section swo3b
-.section swo4a
-.section swo4b
-.section swo5a
-.section swo5b
-.section swo6a
-.section swo6b
-.section swo7a
-.section swo7b
-.section swo8a
-.section swo8b
-.section swo9a
-.section swo9b
-.section swo0a
-.section swo0b
-.section swpaa
-.section swpab
-.section swpba
-.section swpbb
-.section swpca
-.section swpcb
-.section swpda
-.section swpdb
-.section swpea
-.section swpeb
-.section swpfa
-.section swpfb
-.section swpga
-.section swpgb
-.section swpha
-.section swphb
-.section swpia
-.section swpib
-.section swpja
-.section swpjb
-.section swpka
-.section swpkb
-.section swpla
-.section swplb
-.section swpma
-.section swpmb
-.section swpna
-.section swpnb
-.section swpoa
-.section swpob
-.section swppa
-.section swppb
-.section swpqa
-.section swpqb
-.section swpra
-.section swprb
-.section swpsa
-.section swpsb
-.section swpta
-.section swptb
-.section swpua
-.section swpub
-.section swpva
-.section swpvb
-.section swpwa
-.section swpwb
-.section swpxa
-.section swpxb
-.section swpya
-.section swpyb
-.section swpza
-.section swpzb
-.section swp1a
-.section swp1b
-.section swp2a
-.section swp2b
-.section swp3a
-.section swp3b
-.section swp4a
-.section swp4b
-.section swp5a
-.section swp5b
-.section swp6a
-.section swp6b
-.section swp7a
-.section swp7b
-.section swp8a
-.section swp8b
-.section swp9a
-.section swp9b
-.section swp0a
-.section swp0b
-.section swqaa
-.section swqab
-.section swqba
-.section swqbb
-.section swqca
-.section swqcb
-.section swqda
-.section swqdb
-.section swqea
-.section swqeb
-.section swqfa
-.section swqfb
-.section swqga
-.section swqgb
-.section swqha
-.section swqhb
-.section swqia
-.section swqib
-.section swqja
-.section swqjb
-.section swqka
-.section swqkb
-.section swqla
-.section swqlb
-.section swqma
-.section swqmb
-.section swqna
-.section swqnb
-.section swqoa
-.section swqob
-.section swqpa
-.section swqpb
-.section swqqa
-.section swqqb
-.section swqra
-.section swqrb
-.section swqsa
-.section swqsb
-.section swqta
-.section swqtb
-.section swqua
-.section swqub
-.section swqva
-.section swqvb
-.section swqwa
-.section swqwb
-.section swqxa
-.section swqxb
-.section swqya
-.section swqyb
-.section swqza
-.section swqzb
-.section swq1a
-.section swq1b
-.section swq2a
-.section swq2b
-.section swq3a
-.section swq3b
-.section swq4a
-.section swq4b
-.section swq5a
-.section swq5b
-.section swq6a
-.section swq6b
-.section swq7a
-.section swq7b
-.section swq8a
-.section swq8b
-.section swq9a
-.section swq9b
-.section swq0a
-.section swq0b
-.section swraa
-.section swrab
-.section swrba
-.section swrbb
-.section swrca
-.section swrcb
-.section swrda
-.section swrdb
-.section swrea
-.section swreb
-.section swrfa
-.section swrfb
-.section swrga
-.section swrgb
-.section swrha
-.section swrhb
-.section swria
-.section swrib
-.section swrja
-.section swrjb
-.section swrka
-.section swrkb
-.section swrla
-.section swrlb
-.section swrma
-.section swrmb
-.section swrna
-.section swrnb
-.section swroa
-.section swrob
-.section swrpa
-.section swrpb
-.section swrqa
-.section swrqb
-.section swrra
-.section swrrb
-.section swrsa
-.section swrsb
-.section swrta
-.section swrtb
-.section swrua
-.section swrub
-.section swrva
-.section swrvb
-.section swrwa
-.section swrwb
-.section swrxa
-.section swrxb
-.section swrya
-.section swryb
-.section swrza
-.section swrzb
-.section swr1a
-.section swr1b
-.section swr2a
-.section swr2b
-.section swr3a
-.section swr3b
-.section swr4a
-.section swr4b
-.section swr5a
-.section swr5b
-.section swr6a
-.section swr6b
-.section swr7a
-.section swr7b
-.section swr8a
-.section swr8b
-.section swr9a
-.section swr9b
-.section swr0a
-.section swr0b
-.section swsaa
-.section swsab
-.section swsba
-.section swsbb
-.section swsca
-.section swscb
-.section swsda
-.section swsdb
-.section swsea
-.section swseb
-.section swsfa
-.section swsfb
-.section swsga
-.section swsgb
-.section swsha
-.section swshb
-.section swsia
-.section swsib
-.section swsja
-.section swsjb
-.section swska
-.section swskb
-.section swsla
-.section swslb
-.section swsma
-.section swsmb
-.section swsna
-.section swsnb
-.section swsoa
-.section swsob
-.section swspa
-.section swspb
-.section swsqa
-.section swsqb
-.section swsra
-.section swsrb
-.section swssa
-.section swssb
-.section swsta
-.section swstb
-.section swsua
-.section swsub
-.section swsva
-.section swsvb
-.section swswa
-.section swswb
-.section swsxa
-.section swsxb
-.section swsya
-.section swsyb
-.section swsza
-.section swszb
-.section sws1a
-.section sws1b
-.section sws2a
-.section sws2b
-.section sws3a
-.section sws3b
-.section sws4a
-.section sws4b
-.section sws5a
-.section sws5b
-.section sws6a
-.section sws6b
-.section sws7a
-.section sws7b
-.section sws8a
-.section sws8b
-.section sws9a
-.section sws9b
-.section sws0a
-.section sws0b
-.section swtaa
-.section swtab
-.section swtba
-.section swtbb
-.section swtca
-.section swtcb
-.section swtda
-.section swtdb
-.section swtea
-.section swteb
-.section swtfa
-.section swtfb
-.section swtga
-.section swtgb
-.section swtha
-.section swthb
-.section swtia
-.section swtib
-.section swtja
-.section swtjb
-.section swtka
-.section swtkb
-.section swtla
-.section swtlb
-.section swtma
-.section swtmb
-.section swtna
-.section swtnb
-.section swtoa
-.section swtob
-.section swtpa
-.section swtpb
-.section swtqa
-.section swtqb
-.section swtra
-.section swtrb
-.section swtsa
-.section swtsb
-.section swtta
-.section swttb
-.section swtua
-.section swtub
-.section swtva
-.section swtvb
-.section swtwa
-.section swtwb
-.section swtxa
-.section swtxb
-.section swtya
-.section swtyb
-.section swtza
-.section swtzb
-.section swt1a
-.section swt1b
-.section swt2a
-.section swt2b
-.section swt3a
-.section swt3b
-.section swt4a
-.section swt4b
-.section swt5a
-.section swt5b
-.section swt6a
-.section swt6b
-.section swt7a
-.section swt7b
-.section swt8a
-.section swt8b
-.section swt9a
-.section swt9b
-.section swt0a
-.section swt0b
-.section swuaa
-.section swuab
-.section swuba
-.section swubb
-.section swuca
-.section swucb
-.section swuda
-.section swudb
-.section swuea
-.section swueb
-.section swufa
-.section swufb
-.section swuga
-.section swugb
-.section swuha
-.section swuhb
-.section swuia
-.section swuib
-.section swuja
-.section swujb
-.section swuka
-.section swukb
-.section swula
-.section swulb
-.section swuma
-.section swumb
-.section swuna
-.section swunb
-.section swuoa
-.section swuob
-.section swupa
-.section swupb
-.section swuqa
-.section swuqb
-.section swura
-.section swurb
-.section swusa
-.section swusb
-.section swuta
-.section swutb
-.section swuua
-.section swuub
-.section swuva
-.section swuvb
-.section swuwa
-.section swuwb
-.section swuxa
-.section swuxb
-.section swuya
-.section swuyb
-.section swuza
-.section swuzb
-.section swu1a
-.section swu1b
-.section swu2a
-.section swu2b
-.section swu3a
-.section swu3b
-.section swu4a
-.section swu4b
-.section swu5a
-.section swu5b
-.section swu6a
-.section swu6b
-.section swu7a
-.section swu7b
-.section swu8a
-.section swu8b
-.section swu9a
-.section swu9b
-.section swu0a
-.section swu0b
-.section swvaa
-.section swvab
-.section swvba
-.section swvbb
-.section swvca
-.section swvcb
-.section swvda
-.section swvdb
-.section swvea
-.section swveb
-.section swvfa
-.section swvfb
-.section swvga
-.section swvgb
-.section swvha
-.section swvhb
-.section swvia
-.section swvib
-.section swvja
-.section swvjb
-.section swvka
-.section swvkb
-.section swvla
-.section swvlb
-.section swvma
-.section swvmb
-.section swvna
-.section swvnb
-.section swvoa
-.section swvob
-.section swvpa
-.section swvpb
-.section swvqa
-.section swvqb
-.section swvra
-.section swvrb
-.section swvsa
-.section swvsb
-.section swvta
-.section swvtb
-.section swvua
-.section swvub
-.section swvva
-.section swvvb
-.section swvwa
-.section swvwb
-.section swvxa
-.section swvxb
-.section swvya
-.section swvyb
-.section swvza
-.section swvzb
-.section swv1a
-.section swv1b
-.section swv2a
-.section swv2b
-.section swv3a
-.section swv3b
-.section swv4a
-.section swv4b
-.section swv5a
-.section swv5b
-.section swv6a
-.section swv6b
-.section swv7a
-.section swv7b
-.section swv8a
-.section swv8b
-.section swv9a
-.section swv9b
-.section swv0a
-.section swv0b
-.section swwaa
-.section swwab
-.section swwba
-.section swwbb
-.section swwca
-.section swwcb
-.section swwda
-.section swwdb
-.section swwea
-.section swweb
-.section swwfa
-.section swwfb
-.section swwga
-.section swwgb
-.section swwha
-.section swwhb
-.section swwia
-.section swwib
-.section swwja
-.section swwjb
-.section swwka
-.section swwkb
-.section swwla
-.section swwlb
-.section swwma
-.section swwmb
-.section swwna
-.section swwnb
-.section swwoa
-.section swwob
-.section swwpa
-.section swwpb
-.section swwqa
-.section swwqb
-.section swwra
-.section swwrb
-.section swwsa
-.section swwsb
-.section swwta
-.section swwtb
-.section swwua
-.section swwub
-.section swwva
-.section swwvb
-.section swwwa
-.section swwwb
-.section swwxa
-.section swwxb
-.section swwya
-.section swwyb
-.section swwza
-.section swwzb
-.section sww1a
-.section sww1b
-.section sww2a
-.section sww2b
-.section sww3a
-.section sww3b
-.section sww4a
-.section sww4b
-.section sww5a
-.section sww5b
-.section sww6a
-.section sww6b
-.section sww7a
-.section sww7b
-.section sww8a
-.section sww8b
-.section sww9a
-.section sww9b
-.section sww0a
-.section sww0b
-.section swxaa
-.section swxab
-.section swxba
-.section swxbb
-.section swxca
-.section swxcb
-.section swxda
-.section swxdb
-.section swxea
-.section swxeb
-.section swxfa
-.section swxfb
-.section swxga
-.section swxgb
-.section swxha
-.section swxhb
-.section swxia
-.section swxib
-.section swxja
-.section swxjb
-.section swxka
-.section swxkb
-.section swxla
-.section swxlb
-.section swxma
-.section swxmb
-.section swxna
-.section swxnb
-.section swxoa
-.section swxob
-.section swxpa
-.section swxpb
-.section swxqa
-.section swxqb
-.section swxra
-.section swxrb
-.section swxsa
-.section swxsb
-.section swxta
-.section swxtb
-.section swxua
-.section swxub
-.section swxva
-.section swxvb
-.section swxwa
-.section swxwb
-.section swxxa
-.section swxxb
-.section swxya
-.section swxyb
-.section swxza
-.section swxzb
-.section swx1a
-.section swx1b
-.section swx2a
-.section swx2b
-.section swx3a
-.section swx3b
-.section swx4a
-.section swx4b
-.section swx5a
-.section swx5b
-.section swx6a
-.section swx6b
-.section swx7a
-.section swx7b
-.section swx8a
-.section swx8b
-.section swx9a
-.section swx9b
-.section swx0a
-.section swx0b
-.section swyaa
-.section swyab
-.section swyba
-.section swybb
-.section swyca
-.section swycb
-.section swyda
-.section swydb
-.section swyea
-.section swyeb
-.section swyfa
-.section swyfb
-.section swyga
-.section swygb
-.section swyha
-.section swyhb
-.section swyia
-.section swyib
-.section swyja
-.section swyjb
-.section swyka
-.section swykb
-.section swyla
-.section swylb
-.section swyma
-.section swymb
-.section swyna
-.section swynb
-.section swyoa
-.section swyob
-.section swypa
-.section swypb
-.section swyqa
-.section swyqb
-.section swyra
-.section swyrb
-.section swysa
-.section swysb
-.section swyta
-.section swytb
-.section swyua
-.section swyub
-.section swyva
-.section swyvb
-.section swywa
-.section swywb
-.section swyxa
-.section swyxb
-.section swyya
-.section swyyb
-.section swyza
-.section swyzb
-.section swy1a
-.section swy1b
-.section swy2a
-.section swy2b
-.section swy3a
-.section swy3b
-.section swy4a
-.section swy4b
-.section swy5a
-.section swy5b
-.section swy6a
-.section swy6b
-.section swy7a
-.section swy7b
-.section swy8a
-.section swy8b
-.section swy9a
-.section swy9b
-.section swy0a
-.section swy0b
-.section swzaa
-.section swzab
-.section swzba
-.section swzbb
-.section swzca
-.section swzcb
-.section swzda
-.section swzdb
-.section swzea
-.section swzeb
-.section swzfa
-.section swzfb
-.section swzga
-.section swzgb
-.section swzha
-.section swzhb
-.section swzia
-.section swzib
-.section swzja
-.section swzjb
-.section swzka
-.section swzkb
-.section swzla
-.section swzlb
-.section swzma
-.section swzmb
-.section swzna
-.section swznb
-.section swzoa
-.section swzob
-.section swzpa
-.section swzpb
-.section swzqa
-.section swzqb
-.section swzra
-.section swzrb
-.section swzsa
-.section swzsb
-.section swzta
-.section swztb
-.section swzua
-.section swzub
-.section swzva
-.section swzvb
-.section swzwa
-.section swzwb
-.section swzxa
-.section swzxb
-.section swzya
-.section swzyb
-.section swzza
-.section swzzb
-.section swz1a
-.section swz1b
-.section swz2a
-.section swz2b
-.section swz3a
-.section swz3b
-.section swz4a
-.section swz4b
-.section swz5a
-.section swz5b
-.section swz6a
-.section swz6b
-.section swz7a
-.section swz7b
-.section swz8a
-.section swz8b
-.section swz9a
-.section swz9b
-.section swz0a
-.section swz0b
-.section sw1aa
-.section sw1ab
-.section sw1ba
-.section sw1bb
-.section sw1ca
-.section sw1cb
-.section sw1da
-.section sw1db
-.section sw1ea
-.section sw1eb
-.section sw1fa
-.section sw1fb
-.section sw1ga
-.section sw1gb
-.section sw1ha
-.section sw1hb
-.section sw1ia
-.section sw1ib
-.section sw1ja
-.section sw1jb
-.section sw1ka
-.section sw1kb
-.section sw1la
-.section sw1lb
-.section sw1ma
-.section sw1mb
-.section sw1na
-.section sw1nb
-.section sw1oa
-.section sw1ob
-.section sw1pa
-.section sw1pb
-.section sw1qa
-.section sw1qb
-.section sw1ra
-.section sw1rb
-.section sw1sa
-.section sw1sb
-.section sw1ta
-.section sw1tb
-.section sw1ua
-.section sw1ub
-.section sw1va
-.section sw1vb
-.section sw1wa
-.section sw1wb
-.section sw1xa
-.section sw1xb
-.section sw1ya
-.section sw1yb
-.section sw1za
-.section sw1zb
-.section sw11a
-.section sw11b
-.section sw12a
-.section sw12b
-.section sw13a
-.section sw13b
-.section sw14a
-.section sw14b
-.section sw15a
-.section sw15b
-.section sw16a
-.section sw16b
-.section sw17a
-.section sw17b
-.section sw18a
-.section sw18b
-.section sw19a
-.section sw19b
-.section sw10a
-.section sw10b
-.section sw2aa
-.section sw2ab
-.section sw2ba
-.section sw2bb
-.section sw2ca
-.section sw2cb
-.section sw2da
-.section sw2db
-.section sw2ea
-.section sw2eb
-.section sw2fa
-.section sw2fb
-.section sw2ga
-.section sw2gb
-.section sw2ha
-.section sw2hb
-.section sw2ia
-.section sw2ib
-.section sw2ja
-.section sw2jb
-.section sw2ka
-.section sw2kb
-.section sw2la
-.section sw2lb
-.section sw2ma
-.section sw2mb
-.section sw2na
-.section sw2nb
-.section sw2oa
-.section sw2ob
-.section sw2pa
-.section sw2pb
-.section sw2qa
-.section sw2qb
-.section sw2ra
-.section sw2rb
-.section sw2sa
-.section sw2sb
-.section sw2ta
-.section sw2tb
-.section sw2ua
-.section sw2ub
-.section sw2va
-.section sw2vb
-.section sw2wa
-.section sw2wb
-.section sw2xa
-.section sw2xb
-.section sw2ya
-.section sw2yb
-.section sw2za
-.section sw2zb
-.section sw21a
-.section sw21b
-.section sw22a
-.section sw22b
-.section sw23a
-.section sw23b
-.section sw24a
-.section sw24b
-.section sw25a
-.section sw25b
-.section sw26a
-.section sw26b
-.section sw27a
-.section sw27b
-.section sw28a
-.section sw28b
-.section sw29a
-.section sw29b
-.section sw20a
-.section sw20b
-.section sw3aa
-.section sw3ab
-.section sw3ba
-.section sw3bb
-.section sw3ca
-.section sw3cb
-.section sw3da
-.section sw3db
-.section sw3ea
-.section sw3eb
-.section sw3fa
-.section sw3fb
-.section sw3ga
-.section sw3gb
-.section sw3ha
-.section sw3hb
-.section sw3ia
-.section sw3ib
-.section sw3ja
-.section sw3jb
-.section sw3ka
-.section sw3kb
-.section sw3la
-.section sw3lb
-.section sw3ma
-.section sw3mb
-.section sw3na
-.section sw3nb
-.section sw3oa
-.section sw3ob
-.section sw3pa
-.section sw3pb
-.section sw3qa
-.section sw3qb
-.section sw3ra
-.section sw3rb
-.section sw3sa
-.section sw3sb
-.section sw3ta
-.section sw3tb
-.section sw3ua
-.section sw3ub
-.section sw3va
-.section sw3vb
-.section sw3wa
-.section sw3wb
-.section sw3xa
-.section sw3xb
-.section sw3ya
-.section sw3yb
-.section sw3za
-.section sw3zb
-.section sw31a
-.section sw31b
-.section sw32a
-.section sw32b
-.section sw33a
-.section sw33b
-.section sw34a
-.section sw34b
-.section sw35a
-.section sw35b
-.section sw36a
-.section sw36b
-.section sw37a
-.section sw37b
-.section sw38a
-.section sw38b
-.section sw39a
-.section sw39b
-.section sw30a
-.section sw30b
-.section sw4aa
-.section sw4ab
-.section sw4ba
-.section sw4bb
-.section sw4ca
-.section sw4cb
-.section sw4da
-.section sw4db
-.section sw4ea
-.section sw4eb
-.section sw4fa
-.section sw4fb
-.section sw4ga
-.section sw4gb
-.section sw4ha
-.section sw4hb
-.section sw4ia
-.section sw4ib
-.section sw4ja
-.section sw4jb
-.section sw4ka
-.section sw4kb
-.section sw4la
-.section sw4lb
-.section sw4ma
-.section sw4mb
-.section sw4na
-.section sw4nb
-.section sw4oa
-.section sw4ob
-.section sw4pa
-.section sw4pb
-.section sw4qa
-.section sw4qb
-.section sw4ra
-.section sw4rb
-.section sw4sa
-.section sw4sb
-.section sw4ta
-.section sw4tb
-.section sw4ua
-.section sw4ub
-.section sw4va
-.section sw4vb
-.section sw4wa
-.section sw4wb
-.section sw4xa
-.section sw4xb
-.section sw4ya
-.section sw4yb
-.section sw4za
-.section sw4zb
-.section sw41a
-.section sw41b
-.section sw42a
-.section sw42b
-.section sw43a
-.section sw43b
-.section sw44a
-.section sw44b
-.section sw45a
-.section sw45b
-.section sw46a
-.section sw46b
-.section sw47a
-.section sw47b
-.section sw48a
-.section sw48b
-.section sw49a
-.section sw49b
-.section sw40a
-.section sw40b
-.section sw5aa
-.section sw5ab
-.section sw5ba
-.section sw5bb
-.section sw5ca
-.section sw5cb
-.section sw5da
-.section sw5db
-.section sw5ea
-.section sw5eb
-.section sw5fa
-.section sw5fb
-.section sw5ga
-.section sw5gb
-.section sw5ha
-.section sw5hb
-.section sw5ia
-.section sw5ib
-.section sw5ja
-.section sw5jb
-.section sw5ka
-.section sw5kb
-.section sw5la
-.section sw5lb
-.section sw5ma
-.section sw5mb
-.section sw5na
-.section sw5nb
-.section sw5oa
-.section sw5ob
-.section sw5pa
-.section sw5pb
-.section sw5qa
-.section sw5qb
-.section sw5ra
-.section sw5rb
-.section sw5sa
-.section sw5sb
-.section sw5ta
-.section sw5tb
-.section sw5ua
-.section sw5ub
-.section sw5va
-.section sw5vb
-.section sw5wa
-.section sw5wb
-.section sw5xa
-.section sw5xb
-.section sw5ya
-.section sw5yb
-.section sw5za
-.section sw5zb
-.section sw51a
-.section sw51b
-.section sw52a
-.section sw52b
-.section sw53a
-.section sw53b
-.section sw54a
-.section sw54b
-.section sw55a
-.section sw55b
-.section sw56a
-.section sw56b
-.section sw57a
-.section sw57b
-.section sw58a
-.section sw58b
-.section sw59a
-.section sw59b
-.section sw50a
-.section sw50b
-.section sw6aa
-.section sw6ab
-.section sw6ba
-.section sw6bb
-.section sw6ca
-.section sw6cb
-.section sw6da
-.section sw6db
-.section sw6ea
-.section sw6eb
-.section sw6fa
-.section sw6fb
-.section sw6ga
-.section sw6gb
-.section sw6ha
-.section sw6hb
-.section sw6ia
-.section sw6ib
-.section sw6ja
-.section sw6jb
-.section sw6ka
-.section sw6kb
-.section sw6la
-.section sw6lb
-.section sw6ma
-.section sw6mb
-.section sw6na
-.section sw6nb
-.section sw6oa
-.section sw6ob
-.section sw6pa
-.section sw6pb
-.section sw6qa
-.section sw6qb
-.section sw6ra
-.section sw6rb
-.section sw6sa
-.section sw6sb
-.section sw6ta
-.section sw6tb
-.section sw6ua
-.section sw6ub
-.section sw6va
-.section sw6vb
-.section sw6wa
-.section sw6wb
-.section sw6xa
-.section sw6xb
-.section sw6ya
-.section sw6yb
-.section sw6za
-.section sw6zb
-.section sw61a
-.section sw61b
-.section sw62a
-.section sw62b
-.section sw63a
-.section sw63b
-.section sw64a
-.section sw64b
-.section sw65a
-.section sw65b
-.section sw66a
-.section sw66b
-.section sw67a
-.section sw67b
-.section sw68a
-.section sw68b
-.section sw69a
-.section sw69b
-.section sw60a
-.section sw60b
-.section sw7aa
-.section sw7ab
-.section sw7ba
-.section sw7bb
-.section sw7ca
-.section sw7cb
-.section sw7da
-.section sw7db
-.section sw7ea
-.section sw7eb
-.section sw7fa
-.section sw7fb
-.section sw7ga
-.section sw7gb
-.section sw7ha
-.section sw7hb
-.section sw7ia
-.section sw7ib
-.section sw7ja
-.section sw7jb
-.section sw7ka
-.section sw7kb
-.section sw7la
-.section sw7lb
-.section sw7ma
-.section sw7mb
-.section sw7na
-.section sw7nb
-.section sw7oa
-.section sw7ob
-.section sw7pa
-.section sw7pb
-.section sw7qa
-.section sw7qb
-.section sw7ra
-.section sw7rb
-.section sw7sa
-.section sw7sb
-.section sw7ta
-.section sw7tb
-.section sw7ua
-.section sw7ub
-.section sw7va
-.section sw7vb
-.section sw7wa
-.section sw7wb
-.section sw7xa
-.section sw7xb
-.section sw7ya
-.section sw7yb
-.section sw7za
-.section sw7zb
-.section sw71a
-.section sw71b
-.section sw72a
-.section sw72b
-.section sw73a
-.section sw73b
-.section sw74a
-.section sw74b
-.section sw75a
-.section sw75b
-.section sw76a
-.section sw76b
-.section sw77a
-.section sw77b
-.section sw78a
-.section sw78b
-.section sw79a
-.section sw79b
-.section sw70a
-.section sw70b
-.section sw8aa
-.section sw8ab
-.section sw8ba
-.section sw8bb
-.section sw8ca
-.section sw8cb
-.section sw8da
-.section sw8db
-.section sw8ea
-.section sw8eb
-.section sw8fa
-.section sw8fb
-.section sw8ga
-.section sw8gb
-.section sw8ha
-.section sw8hb
-.section sw8ia
-.section sw8ib
-.section sw8ja
-.section sw8jb
-.section sw8ka
-.section sw8kb
-.section sw8la
-.section sw8lb
-.section sw8ma
-.section sw8mb
-.section sw8na
-.section sw8nb
-.section sw8oa
-.section sw8ob
-.section sw8pa
-.section sw8pb
-.section sw8qa
-.section sw8qb
-.section sw8ra
-.section sw8rb
-.section sw8sa
-.section sw8sb
-.section sw8ta
-.section sw8tb
-.section sw8ua
-.section sw8ub
-.section sw8va
-.section sw8vb
-.section sw8wa
-.section sw8wb
-.section sw8xa
-.section sw8xb
-.section sw8ya
-.section sw8yb
-.section sw8za
-.section sw8zb
-.section sw81a
-.section sw81b
-.section sw82a
-.section sw82b
-.section sw83a
-.section sw83b
-.section sw84a
-.section sw84b
-.section sw85a
-.section sw85b
-.section sw86a
-.section sw86b
-.section sw87a
-.section sw87b
-.section sw88a
-.section sw88b
-.section sw89a
-.section sw89b
-.section sw80a
-.section sw80b
-.section sw9aa
-.section sw9ab
-.section sw9ba
-.section sw9bb
-.section sw9ca
-.section sw9cb
-.section sw9da
-.section sw9db
-.section sw9ea
-.section sw9eb
-.section sw9fa
-.section sw9fb
-.section sw9ga
-.section sw9gb
-.section sw9ha
-.section sw9hb
-.section sw9ia
-.section sw9ib
-.section sw9ja
-.section sw9jb
-.section sw9ka
-.section sw9kb
-.section sw9la
-.section sw9lb
-.section sw9ma
-.section sw9mb
-.section sw9na
-.section sw9nb
-.section sw9oa
-.section sw9ob
-.section sw9pa
-.section sw9pb
-.section sw9qa
-.section sw9qb
-.section sw9ra
-.section sw9rb
-.section sw9sa
-.section sw9sb
-.section sw9ta
-.section sw9tb
-.section sw9ua
-.section sw9ub
-.section sw9va
-.section sw9vb
-.section sw9wa
-.section sw9wb
-.section sw9xa
-.section sw9xb
-.section sw9ya
-.section sw9yb
-.section sw9za
-.section sw9zb
-.section sw91a
-.section sw91b
-.section sw92a
-.section sw92b
-.section sw93a
-.section sw93b
-.section sw94a
-.section sw94b
-.section sw95a
-.section sw95b
-.section sw96a
-.section sw96b
-.section sw97a
-.section sw97b
-.section sw98a
-.section sw98b
-.section sw99a
-.section sw99b
-.section sw90a
-.section sw90b
-.section sw0aa
-.section sw0ab
-.section sw0ba
-.section sw0bb
-.section sw0ca
-.section sw0cb
-.section sw0da
-.section sw0db
-.section sw0ea
-.section sw0eb
-.section sw0fa
-.section sw0fb
-.section sw0ga
-.section sw0gb
-.section sw0ha
-.section sw0hb
-.section sw0ia
-.section sw0ib
-.section sw0ja
-.section sw0jb
-.section sw0ka
-.section sw0kb
-.section sw0la
-.section sw0lb
-.section sw0ma
-.section sw0mb
-.section sw0na
-.section sw0nb
-.section sw0oa
-.section sw0ob
-.section sw0pa
-.section sw0pb
-.section sw0qa
-.section sw0qb
-.section sw0ra
-.section sw0rb
-.section sw0sa
-.section sw0sb
-.section sw0ta
-.section sw0tb
-.section sw0ua
-.section sw0ub
-.section sw0va
-.section sw0vb
-.section sw0wa
-.section sw0wb
-.section sw0xa
-.section sw0xb
-.section sw0ya
-.section sw0yb
-.section sw0za
-.section sw0zb
-.section sw01a
-.section sw01b
-.section sw02a
-.section sw02b
-.section sw03a
-.section sw03b
-.section sw04a
-.section sw04b
-.section sw05a
-.section sw05b
-.section sw06a
-.section sw06b
-.section sw07a
-.section sw07b
-.section sw08a
-.section sw08b
-.section sw09a
-.section sw09b
-.section sw00a
-.section sw00b
-.section sxaaa
-.section sxaab
-.section sxaba
-.section sxabb
-.section sxaca
-.section sxacb
-.section sxada
-.section sxadb
-.section sxaea
-.section sxaeb
-.section sxafa
-.section sxafb
-.section sxaga
-.section sxagb
-.section sxaha
-.section sxahb
-.section sxaia
-.section sxaib
-.section sxaja
-.section sxajb
-.section sxaka
-.section sxakb
-.section sxala
-.section sxalb
-.section sxama
-.section sxamb
-.section sxana
-.section sxanb
-.section sxaoa
-.section sxaob
-.section sxapa
-.section sxapb
-.section sxaqa
-.section sxaqb
-.section sxara
-.section sxarb
-.section sxasa
-.section sxasb
-.section sxata
-.section sxatb
-.section sxaua
-.section sxaub
-.section sxava
-.section sxavb
-.section sxawa
-.section sxawb
-.section sxaxa
-.section sxaxb
-.section sxaya
-.section sxayb
-.section sxaza
-.section sxazb
-.section sxa1a
-.section sxa1b
-.section sxa2a
-.section sxa2b
-.section sxa3a
-.section sxa3b
-.section sxa4a
-.section sxa4b
-.section sxa5a
-.section sxa5b
-.section sxa6a
-.section sxa6b
-.section sxa7a
-.section sxa7b
-.section sxa8a
-.section sxa8b
-.section sxa9a
-.section sxa9b
-.section sxa0a
-.section sxa0b
-.section sxbaa
-.section sxbab
-.section sxbba
-.section sxbbb
-.section sxbca
-.section sxbcb
-.section sxbda
-.section sxbdb
-.section sxbea
-.section sxbeb
-.section sxbfa
-.section sxbfb
-.section sxbga
-.section sxbgb
-.section sxbha
-.section sxbhb
-.section sxbia
-.section sxbib
-.section sxbja
-.section sxbjb
-.section sxbka
-.section sxbkb
-.section sxbla
-.section sxblb
-.section sxbma
-.section sxbmb
-.section sxbna
-.section sxbnb
-.section sxboa
-.section sxbob
-.section sxbpa
-.section sxbpb
-.section sxbqa
-.section sxbqb
-.section sxbra
-.section sxbrb
-.section sxbsa
-.section sxbsb
-.section sxbta
-.section sxbtb
-.section sxbua
-.section sxbub
-.section sxbva
-.section sxbvb
-.section sxbwa
-.section sxbwb
-.section sxbxa
-.section sxbxb
-.section sxbya
-.section sxbyb
-.section sxbza
-.section sxbzb
-.section sxb1a
-.section sxb1b
-.section sxb2a
-.section sxb2b
-.section sxb3a
-.section sxb3b
-.section sxb4a
-.section sxb4b
-.section sxb5a
-.section sxb5b
-.section sxb6a
-.section sxb6b
-.section sxb7a
-.section sxb7b
-.section sxb8a
-.section sxb8b
-.section sxb9a
-.section sxb9b
-.section sxb0a
-.section sxb0b
-.section sxcaa
-.section sxcab
-.section sxcba
-.section sxcbb
-.section sxcca
-.section sxccb
-.section sxcda
-.section sxcdb
-.section sxcea
-.section sxceb
-.section sxcfa
-.section sxcfb
-.section sxcga
-.section sxcgb
-.section sxcha
-.section sxchb
-.section sxcia
-.section sxcib
-.section sxcja
-.section sxcjb
-.section sxcka
-.section sxckb
-.section sxcla
-.section sxclb
-.section sxcma
-.section sxcmb
-.section sxcna
-.section sxcnb
-.section sxcoa
-.section sxcob
-.section sxcpa
-.section sxcpb
-.section sxcqa
-.section sxcqb
-.section sxcra
-.section sxcrb
-.section sxcsa
-.section sxcsb
-.section sxcta
-.section sxctb
-.section sxcua
-.section sxcub
-.section sxcva
-.section sxcvb
-.section sxcwa
-.section sxcwb
-.section sxcxa
-.section sxcxb
-.section sxcya
-.section sxcyb
-.section sxcza
-.section sxczb
-.section sxc1a
-.section sxc1b
-.section sxc2a
-.section sxc2b
-.section sxc3a
-.section sxc3b
-.section sxc4a
-.section sxc4b
-.section sxc5a
-.section sxc5b
-.section sxc6a
-.section sxc6b
-.section sxc7a
-.section sxc7b
-.section sxc8a
-.section sxc8b
-.section sxc9a
-.section sxc9b
-.section sxc0a
-.section sxc0b
-.section sxdaa
-.section sxdab
-.section sxdba
-.section sxdbb
-.section sxdca
-.section sxdcb
-.section sxdda
-.section sxddb
-.section sxdea
-.section sxdeb
-.section sxdfa
-.section sxdfb
-.section sxdga
-.section sxdgb
-.section sxdha
-.section sxdhb
-.section sxdia
-.section sxdib
-.section sxdja
-.section sxdjb
-.section sxdka
-.section sxdkb
-.section sxdla
-.section sxdlb
-.section sxdma
-.section sxdmb
-.section sxdna
-.section sxdnb
-.section sxdoa
-.section sxdob
-.section sxdpa
-.section sxdpb
-.section sxdqa
-.section sxdqb
-.section sxdra
-.section sxdrb
-.section sxdsa
-.section sxdsb
-.section sxdta
-.section sxdtb
-.section sxdua
-.section sxdub
-.section sxdva
-.section sxdvb
-.section sxdwa
-.section sxdwb
-.section sxdxa
-.section sxdxb
-.section sxdya
-.section sxdyb
-.section sxdza
-.section sxdzb
-.section sxd1a
-.section sxd1b
-.section sxd2a
-.section sxd2b
-.section sxd3a
-.section sxd3b
-.section sxd4a
-.section sxd4b
-.section sxd5a
-.section sxd5b
-.section sxd6a
-.section sxd6b
-.section sxd7a
-.section sxd7b
-.section sxd8a
-.section sxd8b
-.section sxd9a
-.section sxd9b
-.section sxd0a
-.section sxd0b
-.section sxeaa
-.section sxeab
-.section sxeba
-.section sxebb
-.section sxeca
-.section sxecb
-.section sxeda
-.section sxedb
-.section sxeea
-.section sxeeb
-.section sxefa
-.section sxefb
-.section sxega
-.section sxegb
-.section sxeha
-.section sxehb
-.section sxeia
-.section sxeib
-.section sxeja
-.section sxejb
-.section sxeka
-.section sxekb
-.section sxela
-.section sxelb
-.section sxema
-.section sxemb
-.section sxena
-.section sxenb
-.section sxeoa
-.section sxeob
-.section sxepa
-.section sxepb
-.section sxeqa
-.section sxeqb
-.section sxera
-.section sxerb
-.section sxesa
-.section sxesb
-.section sxeta
-.section sxetb
-.section sxeua
-.section sxeub
-.section sxeva
-.section sxevb
-.section sxewa
-.section sxewb
-.section sxexa
-.section sxexb
-.section sxeya
-.section sxeyb
-.section sxeza
-.section sxezb
-.section sxe1a
-.section sxe1b
-.section sxe2a
-.section sxe2b
-.section sxe3a
-.section sxe3b
-.section sxe4a
-.section sxe4b
-.section sxe5a
-.section sxe5b
-.section sxe6a
-.section sxe6b
-.section sxe7a
-.section sxe7b
-.section sxe8a
-.section sxe8b
-.section sxe9a
-.section sxe9b
-.section sxe0a
-.section sxe0b
-.section sxfaa
-.section sxfab
-.section sxfba
-.section sxfbb
-.section sxfca
-.section sxfcb
-.section sxfda
-.section sxfdb
-.section sxfea
-.section sxfeb
-.section sxffa
-.section sxffb
-.section sxfga
-.section sxfgb
-.section sxfha
-.section sxfhb
-.section sxfia
-.section sxfib
-.section sxfja
-.section sxfjb
-.section sxfka
-.section sxfkb
-.section sxfla
-.section sxflb
-.section sxfma
-.section sxfmb
-.section sxfna
-.section sxfnb
-.section sxfoa
-.section sxfob
-.section sxfpa
-.section sxfpb
-.section sxfqa
-.section sxfqb
-.section sxfra
-.section sxfrb
-.section sxfsa
-.section sxfsb
-.section sxfta
-.section sxftb
-.section sxfua
-.section sxfub
-.section sxfva
-.section sxfvb
-.section sxfwa
-.section sxfwb
-.section sxfxa
-.section sxfxb
-.section sxfya
-.section sxfyb
-.section sxfza
-.section sxfzb
-.section sxf1a
-.section sxf1b
-.section sxf2a
-.section sxf2b
-.section sxf3a
-.section sxf3b
-.section sxf4a
-.section sxf4b
-.section sxf5a
-.section sxf5b
-.section sxf6a
-.section sxf6b
-.section sxf7a
-.section sxf7b
-.section sxf8a
-.section sxf8b
-.section sxf9a
-.section sxf9b
-.section sxf0a
-.section sxf0b
-.section sxgaa
-.section sxgab
-.section sxgba
-.section sxgbb
-.section sxgca
-.section sxgcb
-.section sxgda
-.section sxgdb
-.section sxgea
-.section sxgeb
-.section sxgfa
-.section sxgfb
-.section sxgga
-.section sxggb
-.section sxgha
-.section sxghb
-.section sxgia
-.section sxgib
-.section sxgja
-.section sxgjb
-.section sxgka
-.section sxgkb
-.section sxgla
-.section sxglb
-.section sxgma
-.section sxgmb
-.section sxgna
-.section sxgnb
-.section sxgoa
-.section sxgob
-.section sxgpa
-.section sxgpb
-.section sxgqa
-.section sxgqb
-.section sxgra
-.section sxgrb
-.section sxgsa
-.section sxgsb
-.section sxgta
-.section sxgtb
-.section sxgua
-.section sxgub
-.section sxgva
-.section sxgvb
-.section sxgwa
-.section sxgwb
-.section sxgxa
-.section sxgxb
-.section sxgya
-.section sxgyb
-.section sxgza
-.section sxgzb
-.section sxg1a
-.section sxg1b
-.section sxg2a
-.section sxg2b
-.section sxg3a
-.section sxg3b
-.section sxg4a
-.section sxg4b
-.section sxg5a
-.section sxg5b
-.section sxg6a
-.section sxg6b
-.section sxg7a
-.section sxg7b
-.section sxg8a
-.section sxg8b
-.section sxg9a
-.section sxg9b
-.section sxg0a
-.section sxg0b
-.section sxhaa
-.section sxhab
-.section sxhba
-.section sxhbb
-.section sxhca
-.section sxhcb
-.section sxhda
-.section sxhdb
-.section sxhea
-.section sxheb
-.section sxhfa
-.section sxhfb
-.section sxhga
-.section sxhgb
-.section sxhha
-.section sxhhb
-.section sxhia
-.section sxhib
-.section sxhja
-.section sxhjb
-.section sxhka
-.section sxhkb
-.section sxhla
-.section sxhlb
-.section sxhma
-.section sxhmb
-.section sxhna
-.section sxhnb
-.section sxhoa
-.section sxhob
-.section sxhpa
-.section sxhpb
-.section sxhqa
-.section sxhqb
-.section sxhra
-.section sxhrb
-.section sxhsa
-.section sxhsb
-.section sxhta
-.section sxhtb
-.section sxhua
-.section sxhub
-.section sxhva
-.section sxhvb
-.section sxhwa
-.section sxhwb
-.section sxhxa
-.section sxhxb
-.section sxhya
-.section sxhyb
-.section sxhza
-.section sxhzb
-.section sxh1a
-.section sxh1b
-.section sxh2a
-.section sxh2b
-.section sxh3a
-.section sxh3b
-.section sxh4a
-.section sxh4b
-.section sxh5a
-.section sxh5b
-.section sxh6a
-.section sxh6b
-.section sxh7a
-.section sxh7b
-.section sxh8a
-.section sxh8b
-.section sxh9a
-.section sxh9b
-.section sxh0a
-.section sxh0b
-.section sxiaa
-.section sxiab
-.section sxiba
-.section sxibb
-.section sxica
-.section sxicb
-.section sxida
-.section sxidb
-.section sxiea
-.section sxieb
-.section sxifa
-.section sxifb
-.section sxiga
-.section sxigb
-.section sxiha
-.section sxihb
-.section sxiia
-.section sxiib
-.section sxija
-.section sxijb
-.section sxika
-.section sxikb
-.section sxila
-.section sxilb
-.section sxima
-.section sximb
-.section sxina
-.section sxinb
-.section sxioa
-.section sxiob
-.section sxipa
-.section sxipb
-.section sxiqa
-.section sxiqb
-.section sxira
-.section sxirb
-.section sxisa
-.section sxisb
-.section sxita
-.section sxitb
-.section sxiua
-.section sxiub
-.section sxiva
-.section sxivb
-.section sxiwa
-.section sxiwb
-.section sxixa
-.section sxixb
-.section sxiya
-.section sxiyb
-.section sxiza
-.section sxizb
-.section sxi1a
-.section sxi1b
-.section sxi2a
-.section sxi2b
-.section sxi3a
-.section sxi3b
-.section sxi4a
-.section sxi4b
-.section sxi5a
-.section sxi5b
-.section sxi6a
-.section sxi6b
-.section sxi7a
-.section sxi7b
-.section sxi8a
-.section sxi8b
-.section sxi9a
-.section sxi9b
-.section sxi0a
-.section sxi0b
-.section sxjaa
-.section sxjab
-.section sxjba
-.section sxjbb
-.section sxjca
-.section sxjcb
-.section sxjda
-.section sxjdb
-.section sxjea
-.section sxjeb
-.section sxjfa
-.section sxjfb
-.section sxjga
-.section sxjgb
-.section sxjha
-.section sxjhb
-.section sxjia
-.section sxjib
-.section sxjja
-.section sxjjb
-.section sxjka
-.section sxjkb
-.section sxjla
-.section sxjlb
-.section sxjma
-.section sxjmb
-.section sxjna
-.section sxjnb
-.section sxjoa
-.section sxjob
-.section sxjpa
-.section sxjpb
-.section sxjqa
-.section sxjqb
-.section sxjra
-.section sxjrb
-.section sxjsa
-.section sxjsb
-.section sxjta
-.section sxjtb
-.section sxjua
-.section sxjub
-.section sxjva
-.section sxjvb
-.section sxjwa
-.section sxjwb
-.section sxjxa
-.section sxjxb
-.section sxjya
-.section sxjyb
-.section sxjza
-.section sxjzb
-.section sxj1a
-.section sxj1b
-.section sxj2a
-.section sxj2b
-.section sxj3a
-.section sxj3b
-.section sxj4a
-.section sxj4b
-.section sxj5a
-.section sxj5b
-.section sxj6a
-.section sxj6b
-.section sxj7a
-.section sxj7b
-.section sxj8a
-.section sxj8b
-.section sxj9a
-.section sxj9b
-.section sxj0a
-.section sxj0b
-.section sxkaa
-.section sxkab
-.section sxkba
-.section sxkbb
-.section sxkca
-.section sxkcb
-.section sxkda
-.section sxkdb
-.section sxkea
-.section sxkeb
-.section sxkfa
-.section sxkfb
-.section sxkga
-.section sxkgb
-.section sxkha
-.section sxkhb
-.section sxkia
-.section sxkib
-.section sxkja
-.section sxkjb
-.section sxkka
-.section sxkkb
-.section sxkla
-.section sxklb
-.section sxkma
-.section sxkmb
-.section sxkna
-.section sxknb
-.section sxkoa
-.section sxkob
-.section sxkpa
-.section sxkpb
-.section sxkqa
-.section sxkqb
-.section sxkra
-.section sxkrb
-.section sxksa
-.section sxksb
-.section sxkta
-.section sxktb
-.section sxkua
-.section sxkub
-.section sxkva
-.section sxkvb
-.section sxkwa
-.section sxkwb
-.section sxkxa
-.section sxkxb
-.section sxkya
-.section sxkyb
-.section sxkza
-.section sxkzb
-.section sxk1a
-.section sxk1b
-.section sxk2a
-.section sxk2b
-.section sxk3a
-.section sxk3b
-.section sxk4a
-.section sxk4b
-.section sxk5a
-.section sxk5b
-.section sxk6a
-.section sxk6b
-.section sxk7a
-.section sxk7b
-.section sxk8a
-.section sxk8b
-.section sxk9a
-.section sxk9b
-.section sxk0a
-.section sxk0b
-.section sxlaa
-.section sxlab
-.section sxlba
-.section sxlbb
-.section sxlca
-.section sxlcb
-.section sxlda
-.section sxldb
-.section sxlea
-.section sxleb
-.section sxlfa
-.section sxlfb
-.section sxlga
-.section sxlgb
-.section sxlha
-.section sxlhb
-.section sxlia
-.section sxlib
-.section sxlja
-.section sxljb
-.section sxlka
-.section sxlkb
-.section sxlla
-.section sxllb
-.section sxlma
-.section sxlmb
-.section sxlna
-.section sxlnb
-.section sxloa
-.section sxlob
-.section sxlpa
-.section sxlpb
-.section sxlqa
-.section sxlqb
-.section sxlra
-.section sxlrb
-.section sxlsa
-.section sxlsb
-.section sxlta
-.section sxltb
-.section sxlua
-.section sxlub
-.section sxlva
-.section sxlvb
-.section sxlwa
-.section sxlwb
-.section sxlxa
-.section sxlxb
-.section sxlya
-.section sxlyb
-.section sxlza
-.section sxlzb
-.section sxl1a
-.section sxl1b
-.section sxl2a
-.section sxl2b
-.section sxl3a
-.section sxl3b
-.section sxl4a
-.section sxl4b
-.section sxl5a
-.section sxl5b
-.section sxl6a
-.section sxl6b
-.section sxl7a
-.section sxl7b
-.section sxl8a
-.section sxl8b
-.section sxl9a
-.section sxl9b
-.section sxl0a
-.section sxl0b
-.section sxmaa
-.section sxmab
-.section sxmba
-.section sxmbb
-.section sxmca
-.section sxmcb
-.section sxmda
-.section sxmdb
-.section sxmea
-.section sxmeb
-.section sxmfa
-.section sxmfb
-.section sxmga
-.section sxmgb
-.section sxmha
-.section sxmhb
-.section sxmia
-.section sxmib
-.section sxmja
-.section sxmjb
-.section sxmka
-.section sxmkb
-.section sxmla
-.section sxmlb
-.section sxmma
-.section sxmmb
-.section sxmna
-.section sxmnb
-.section sxmoa
-.section sxmob
-.section sxmpa
-.section sxmpb
-.section sxmqa
-.section sxmqb
-.section sxmra
-.section sxmrb
-.section sxmsa
-.section sxmsb
-.section sxmta
-.section sxmtb
-.section sxmua
-.section sxmub
-.section sxmva
-.section sxmvb
-.section sxmwa
-.section sxmwb
-.section sxmxa
-.section sxmxb
-.section sxmya
-.section sxmyb
-.section sxmza
-.section sxmzb
-.section sxm1a
-.section sxm1b
-.section sxm2a
-.section sxm2b
-.section sxm3a
-.section sxm3b
-.section sxm4a
-.section sxm4b
-.section sxm5a
-.section sxm5b
-.section sxm6a
-.section sxm6b
-.section sxm7a
-.section sxm7b
-.section sxm8a
-.section sxm8b
-.section sxm9a
-.section sxm9b
-.section sxm0a
-.section sxm0b
-.section sxnaa
-.section sxnab
-.section sxnba
-.section sxnbb
-.section sxnca
-.section sxncb
-.section sxnda
-.section sxndb
-.section sxnea
-.section sxneb
-.section sxnfa
-.section sxnfb
-.section sxnga
-.section sxngb
-.section sxnha
-.section sxnhb
-.section sxnia
-.section sxnib
-.section sxnja
-.section sxnjb
-.section sxnka
-.section sxnkb
-.section sxnla
-.section sxnlb
-.section sxnma
-.section sxnmb
-.section sxnna
-.section sxnnb
-.section sxnoa
-.section sxnob
-.section sxnpa
-.section sxnpb
-.section sxnqa
-.section sxnqb
-.section sxnra
-.section sxnrb
-.section sxnsa
-.section sxnsb
-.section sxnta
-.section sxntb
-.section sxnua
-.section sxnub
-.section sxnva
-.section sxnvb
-.section sxnwa
-.section sxnwb
-.section sxnxa
-.section sxnxb
-.section sxnya
-.section sxnyb
-.section sxnza
-.section sxnzb
-.section sxn1a
-.section sxn1b
-.section sxn2a
-.section sxn2b
-.section sxn3a
-.section sxn3b
-.section sxn4a
-.section sxn4b
-.section sxn5a
-.section sxn5b
-.section sxn6a
-.section sxn6b
-.section sxn7a
-.section sxn7b
-.section sxn8a
-.section sxn8b
-.section sxn9a
-.section sxn9b
-.section sxn0a
-.section sxn0b
-.section sxoaa
-.section sxoab
-.section sxoba
-.section sxobb
-.section sxoca
-.section sxocb
-.section sxoda
-.section sxodb
-.section sxoea
-.section sxoeb
-.section sxofa
-.section sxofb
-.section sxoga
-.section sxogb
-.section sxoha
-.section sxohb
-.section sxoia
-.section sxoib
-.section sxoja
-.section sxojb
-.section sxoka
-.section sxokb
-.section sxola
-.section sxolb
-.section sxoma
-.section sxomb
-.section sxona
-.section sxonb
-.section sxooa
-.section sxoob
-.section sxopa
-.section sxopb
-.section sxoqa
-.section sxoqb
-.section sxora
-.section sxorb
-.section sxosa
-.section sxosb
-.section sxota
-.section sxotb
-.section sxoua
-.section sxoub
-.section sxova
-.section sxovb
-.section sxowa
-.section sxowb
-.section sxoxa
-.section sxoxb
-.section sxoya
-.section sxoyb
-.section sxoza
-.section sxozb
-.section sxo1a
-.section sxo1b
-.section sxo2a
-.section sxo2b
-.section sxo3a
-.section sxo3b
-.section sxo4a
-.section sxo4b
-.section sxo5a
-.section sxo5b
-.section sxo6a
-.section sxo6b
-.section sxo7a
-.section sxo7b
-.section sxo8a
-.section sxo8b
-.section sxo9a
-.section sxo9b
-.section sxo0a
-.section sxo0b
-.section sxpaa
-.section sxpab
-.section sxpba
-.section sxpbb
-.section sxpca
-.section sxpcb
-.section sxpda
-.section sxpdb
-.section sxpea
-.section sxpeb
-.section sxpfa
-.section sxpfb
-.section sxpga
-.section sxpgb
-.section sxpha
-.section sxphb
-.section sxpia
-.section sxpib
-.section sxpja
-.section sxpjb
-.section sxpka
-.section sxpkb
-.section sxpla
-.section sxplb
-.section sxpma
-.section sxpmb
-.section sxpna
-.section sxpnb
-.section sxpoa
-.section sxpob
-.section sxppa
-.section sxppb
-.section sxpqa
-.section sxpqb
-.section sxpra
-.section sxprb
-.section sxpsa
-.section sxpsb
-.section sxpta
-.section sxptb
-.section sxpua
-.section sxpub
-.section sxpva
-.section sxpvb
-.section sxpwa
-.section sxpwb
-.section sxpxa
-.section sxpxb
-.section sxpya
-.section sxpyb
-.section sxpza
-.section sxpzb
-.section sxp1a
-.section sxp1b
-.section sxp2a
-.section sxp2b
-.section sxp3a
-.section sxp3b
-.section sxp4a
-.section sxp4b
-.section sxp5a
-.section sxp5b
-.section sxp6a
-.section sxp6b
-.section sxp7a
-.section sxp7b
-.section sxp8a
-.section sxp8b
-.section sxp9a
-.section sxp9b
-.section sxp0a
-.section sxp0b
-.section sxqaa
-.section sxqab
-.section sxqba
-.section sxqbb
-.section sxqca
-.section sxqcb
-.section sxqda
-.section sxqdb
-.section sxqea
-.section sxqeb
-.section sxqfa
-.section sxqfb
-.section sxqga
-.section sxqgb
-.section sxqha
-.section sxqhb
-.section sxqia
-.section sxqib
-.section sxqja
-.section sxqjb
-.section sxqka
-.section sxqkb
-.section sxqla
-.section sxqlb
-.section sxqma
-.section sxqmb
-.section sxqna
-.section sxqnb
-.section sxqoa
-.section sxqob
-.section sxqpa
-.section sxqpb
-.section sxqqa
-.section sxqqb
-.section sxqra
-.section sxqrb
-.section sxqsa
-.section sxqsb
-.section sxqta
-.section sxqtb
-.section sxqua
-.section sxqub
-.section sxqva
-.section sxqvb
-.section sxqwa
-.section sxqwb
-.section sxqxa
-.section sxqxb
-.section sxqya
-.section sxqyb
-.section sxqza
-.section sxqzb
-.section sxq1a
-.section sxq1b
-.section sxq2a
-.section sxq2b
-.section sxq3a
-.section sxq3b
-.section sxq4a
-.section sxq4b
-.section sxq5a
-.section sxq5b
-.section sxq6a
-.section sxq6b
-.section sxq7a
-.section sxq7b
-.section sxq8a
-.section sxq8b
-.section sxq9a
-.section sxq9b
-.section sxq0a
-.section sxq0b
-.section sxraa
-.section sxrab
-.section sxrba
-.section sxrbb
-.section sxrca
-.section sxrcb
-.section sxrda
-.section sxrdb
-.section sxrea
-.section sxreb
-.section sxrfa
-.section sxrfb
-.section sxrga
-.section sxrgb
-.section sxrha
-.section sxrhb
-.section sxria
-.section sxrib
-.section sxrja
-.section sxrjb
-.section sxrka
-.section sxrkb
-.section sxrla
-.section sxrlb
-.section sxrma
-.section sxrmb
-.section sxrna
-.section sxrnb
-.section sxroa
-.section sxrob
-.section sxrpa
-.section sxrpb
-.section sxrqa
-.section sxrqb
-.section sxrra
-.section sxrrb
-.section sxrsa
-.section sxrsb
-.section sxrta
-.section sxrtb
-.section sxrua
-.section sxrub
-.section sxrva
-.section sxrvb
-.section sxrwa
-.section sxrwb
-.section sxrxa
-.section sxrxb
-.section sxrya
-.section sxryb
-.section sxrza
-.section sxrzb
-.section sxr1a
-.section sxr1b
-.section sxr2a
-.section sxr2b
-.section sxr3a
-.section sxr3b
-.section sxr4a
-.section sxr4b
-.section sxr5a
-.section sxr5b
-.section sxr6a
-.section sxr6b
-.section sxr7a
-.section sxr7b
-.section sxr8a
-.section sxr8b
-.section sxr9a
-.section sxr9b
-.section sxr0a
-.section sxr0b
-.section sxsaa
-.section sxsab
-.section sxsba
-.section sxsbb
-.section sxsca
-.section sxscb
-.section sxsda
-.section sxsdb
-.section sxsea
-.section sxseb
-.section sxsfa
-.section sxsfb
-.section sxsga
-.section sxsgb
-.section sxsha
-.section sxshb
-.section sxsia
-.section sxsib
-.section sxsja
-.section sxsjb
-.section sxska
-.section sxskb
-.section sxsla
-.section sxslb
-.section sxsma
-.section sxsmb
-.section sxsna
-.section sxsnb
-.section sxsoa
-.section sxsob
-.section sxspa
-.section sxspb
-.section sxsqa
-.section sxsqb
-.section sxsra
-.section sxsrb
-.section sxssa
-.section sxssb
-.section sxsta
-.section sxstb
-.section sxsua
-.section sxsub
-.section sxsva
-.section sxsvb
-.section sxswa
-.section sxswb
-.section sxsxa
-.section sxsxb
-.section sxsya
-.section sxsyb
-.section sxsza
-.section sxszb
-.section sxs1a
-.section sxs1b
-.section sxs2a
-.section sxs2b
-.section sxs3a
-.section sxs3b
-.section sxs4a
-.section sxs4b
-.section sxs5a
-.section sxs5b
-.section sxs6a
-.section sxs6b
-.section sxs7a
-.section sxs7b
-.section sxs8a
-.section sxs8b
-.section sxs9a
-.section sxs9b
-.section sxs0a
-.section sxs0b
-.section sxtaa
-.section sxtab
-.section sxtba
-.section sxtbb
-.section sxtca
-.section sxtcb
-.section sxtda
-.section sxtdb
-.section sxtea
-.section sxteb
-.section sxtfa
-.section sxtfb
-.section sxtga
-.section sxtgb
-.section sxtha
-.section sxthb
-.section sxtia
-.section sxtib
-.section sxtja
-.section sxtjb
-.section sxtka
-.section sxtkb
-.section sxtla
-.section sxtlb
-.section sxtma
-.section sxtmb
-.section sxtna
-.section sxtnb
-.section sxtoa
-.section sxtob
-.section sxtpa
-.section sxtpb
-.section sxtqa
-.section sxtqb
-.section sxtra
-.section sxtrb
-.section sxtsa
-.section sxtsb
-.section sxtta
-.section sxttb
-.section sxtua
-.section sxtub
-.section sxtva
-.section sxtvb
-.section sxtwa
-.section sxtwb
-.section sxtxa
-.section sxtxb
-.section sxtya
-.section sxtyb
-.section sxtza
-.section sxtzb
-.section sxt1a
-.section sxt1b
-.section sxt2a
-.section sxt2b
-.section sxt3a
-.section sxt3b
-.section sxt4a
-.section sxt4b
-.section sxt5a
-.section sxt5b
-.section sxt6a
-.section sxt6b
-.section sxt7a
-.section sxt7b
-.section sxt8a
-.section sxt8b
-.section sxt9a
-.section sxt9b
-.section sxt0a
-.section sxt0b
-.section sxuaa
-.section sxuab
-.section sxuba
-.section sxubb
-.section sxuca
-.section sxucb
-.section sxuda
-.section sxudb
-.section sxuea
-.section sxueb
-.section sxufa
-.section sxufb
-.section sxuga
-.section sxugb
-.section sxuha
-.section sxuhb
-.section sxuia
-.section sxuib
-.section sxuja
-.section sxujb
-.section sxuka
-.section sxukb
-.section sxula
-.section sxulb
-.section sxuma
-.section sxumb
-.section sxuna
-.section sxunb
-.section sxuoa
-.section sxuob
-.section sxupa
-.section sxupb
-.section sxuqa
-.section sxuqb
-.section sxura
-.section sxurb
-.section sxusa
-.section sxusb
-.section sxuta
-.section sxutb
-.section sxuua
-.section sxuub
-.section sxuva
-.section sxuvb
-.section sxuwa
-.section sxuwb
-.section sxuxa
-.section sxuxb
-.section sxuya
-.section sxuyb
-.section sxuza
-.section sxuzb
-.section sxu1a
-.section sxu1b
-.section sxu2a
-.section sxu2b
-.section sxu3a
-.section sxu3b
-.section sxu4a
-.section sxu4b
-.section sxu5a
-.section sxu5b
-.section sxu6a
-.section sxu6b
-.section sxu7a
-.section sxu7b
-.section sxu8a
-.section sxu8b
-.section sxu9a
-.section sxu9b
-.section sxu0a
-.section sxu0b
-.section sxvaa
-.section sxvab
-.section sxvba
-.section sxvbb
-.section sxvca
-.section sxvcb
-.section sxvda
-.section sxvdb
-.section sxvea
-.section sxveb
-.section sxvfa
-.section sxvfb
-.section sxvga
-.section sxvgb
-.section sxvha
-.section sxvhb
-.section sxvia
-.section sxvib
-.section sxvja
-.section sxvjb
-.section sxvka
-.section sxvkb
-.section sxvla
-.section sxvlb
-.section sxvma
-.section sxvmb
-.section sxvna
-.section sxvnb
-.section sxvoa
-.section sxvob
-.section sxvpa
-.section sxvpb
-.section sxvqa
-.section sxvqb
-.section sxvra
-.section sxvrb
-.section sxvsa
-.section sxvsb
-.section sxvta
-.section sxvtb
-.section sxvua
-.section sxvub
-.section sxvva
-.section sxvvb
-.section sxvwa
-.section sxvwb
-.section sxvxa
-.section sxvxb
-.section sxvya
-.section sxvyb
-.section sxvza
-.section sxvzb
-.section sxv1a
-.section sxv1b
-.section sxv2a
-.section sxv2b
-.section sxv3a
-.section sxv3b
-.section sxv4a
-.section sxv4b
-.section sxv5a
-.section sxv5b
-.section sxv6a
-.section sxv6b
-.section sxv7a
-.section sxv7b
-.section sxv8a
-.section sxv8b
-.section sxv9a
-.section sxv9b
-.section sxv0a
-.section sxv0b
-.section sxwaa
-.section sxwab
-.section sxwba
-.section sxwbb
-.section sxwca
-.section sxwcb
-.section sxwda
-.section sxwdb
-.section sxwea
-.section sxweb
-.section sxwfa
-.section sxwfb
-.section sxwga
-.section sxwgb
-.section sxwha
-.section sxwhb
-.section sxwia
-.section sxwib
-.section sxwja
-.section sxwjb
-.section sxwka
-.section sxwkb
-.section sxwla
-.section sxwlb
-.section sxwma
-.section sxwmb
-.section sxwna
-.section sxwnb
-.section sxwoa
-.section sxwob
-.section sxwpa
-.section sxwpb
-.section sxwqa
-.section sxwqb
-.section sxwra
-.section sxwrb
-.section sxwsa
-.section sxwsb
-.section sxwta
-.section sxwtb
-.section sxwua
-.section sxwub
-.section sxwva
-.section sxwvb
-.section sxwwa
-.section sxwwb
-.section sxwxa
-.section sxwxb
-.section sxwya
-.section sxwyb
-.section sxwza
-.section sxwzb
-.section sxw1a
-.section sxw1b
-.section sxw2a
-.section sxw2b
-.section sxw3a
-.section sxw3b
-.section sxw4a
-.section sxw4b
-.section sxw5a
-.section sxw5b
-.section sxw6a
-.section sxw6b
-.section sxw7a
-.section sxw7b
-.section sxw8a
-.section sxw8b
-.section sxw9a
-.section sxw9b
-.section sxw0a
-.section sxw0b
-.section sxxaa
-.section sxxab
-.section sxxba
-.section sxxbb
-.section sxxca
-.section sxxcb
-.section sxxda
-.section sxxdb
-.section sxxea
-.section sxxeb
-.section sxxfa
-.section sxxfb
-.section sxxga
-.section sxxgb
-.section sxxha
-.section sxxhb
-.section sxxia
-.section sxxib
-.section sxxja
-.section sxxjb
-.section sxxka
-.section sxxkb
-.section sxxla
-.section sxxlb
-.section sxxma
-.section sxxmb
-.section sxxna
-.section sxxnb
-.section sxxoa
-.section sxxob
-.section sxxpa
-.section sxxpb
-.section sxxqa
-.section sxxqb
-.section sxxra
-.section sxxrb
-.section sxxsa
-.section sxxsb
-.section sxxta
-.section sxxtb
-.section sxxua
-.section sxxub
-.section sxxva
-.section sxxvb
-.section sxxwa
-.section sxxwb
-.section sxxxa
-.section sxxxb
-.section sxxya
-.section sxxyb
-.section sxxza
-.section sxxzb
-.section sxx1a
-.section sxx1b
-.section sxx2a
-.section sxx2b
-.section sxx3a
-.section sxx3b
-.section sxx4a
-.section sxx4b
-.section sxx5a
-.section sxx5b
-.section sxx6a
-.section sxx6b
-.section sxx7a
-.section sxx7b
-.section sxx8a
-.section sxx8b
-.section sxx9a
-.section sxx9b
-.section sxx0a
-.section sxx0b
-.section sxyaa
-.section sxyab
-.section sxyba
-.section sxybb
-.section sxyca
-.section sxycb
-.section sxyda
-.section sxydb
-.section sxyea
-.section sxyeb
-.section sxyfa
-.section sxyfb
-.section sxyga
-.section sxygb
-.section sxyha
-.section sxyhb
-.section sxyia
-.section sxyib
-.section sxyja
-.section sxyjb
-.section sxyka
-.section sxykb
-.section sxyla
-.section sxylb
-.section sxyma
-.section sxymb
-.section sxyna
-.section sxynb
-.section sxyoa
-.section sxyob
-.section sxypa
-.section sxypb
-.section sxyqa
-.section sxyqb
-.section sxyra
-.section sxyrb
-.section sxysa
-.section sxysb
-.section sxyta
-.section sxytb
-.section sxyua
-.section sxyub
-.section sxyva
-.section sxyvb
-.section sxywa
-.section sxywb
-.section sxyxa
-.section sxyxb
-.section sxyya
-.section sxyyb
-.section sxyza
-.section sxyzb
-.section sxy1a
-.section sxy1b
-.section sxy2a
-.section sxy2b
-.section sxy3a
-.section sxy3b
-.section sxy4a
-.section sxy4b
-.section sxy5a
-.section sxy5b
-.section sxy6a
-.section sxy6b
-.section sxy7a
-.section sxy7b
-.section sxy8a
-.section sxy8b
-.section sxy9a
-.section sxy9b
-.section sxy0a
-.section sxy0b
-.section sxzaa
-.section sxzab
-.section sxzba
-.section sxzbb
-.section sxzca
-.section sxzcb
-.section sxzda
-.section sxzdb
-.section sxzea
-.section sxzeb
-.section sxzfa
-.section sxzfb
-.section sxzga
-.section sxzgb
-.section sxzha
-.section sxzhb
-.section sxzia
-.section sxzib
-.section sxzja
-.section sxzjb
-.section sxzka
-.section sxzkb
-.section sxzla
-.section sxzlb
-.section sxzma
-.section sxzmb
-.section sxzna
-.section sxznb
-.section sxzoa
-.section sxzob
-.section sxzpa
-.section sxzpb
-.section sxzqa
-.section sxzqb
-.section sxzra
-.section sxzrb
-.section sxzsa
-.section sxzsb
-.section sxzta
-.section sxztb
-.section sxzua
-.section sxzub
-.section sxzva
-.section sxzvb
-.section sxzwa
-.section sxzwb
-.section sxzxa
-.section sxzxb
-.section sxzya
-.section sxzyb
-.section sxzza
-.section sxzzb
-.section sxz1a
-.section sxz1b
-.section sxz2a
-.section sxz2b
-.section sxz3a
-.section sxz3b
-.section sxz4a
-.section sxz4b
-.section sxz5a
-.section sxz5b
-.section sxz6a
-.section sxz6b
-.section sxz7a
-.section sxz7b
-.section sxz8a
-.section sxz8b
-.section sxz9a
-.section sxz9b
-.section sxz0a
-.section sxz0b
-.section sx1aa
-.section sx1ab
-.section sx1ba
-.section sx1bb
-.section sx1ca
-.section sx1cb
-.section sx1da
-.section sx1db
-.section sx1ea
-.section sx1eb
-.section sx1fa
-.section sx1fb
-.section sx1ga
-.section sx1gb
-.section sx1ha
-.section sx1hb
-.section sx1ia
-.section sx1ib
-.section sx1ja
-.section sx1jb
-.section sx1ka
-.section sx1kb
-.section sx1la
-.section sx1lb
-.section sx1ma
-.section sx1mb
-.section sx1na
-.section sx1nb
-.section sx1oa
-.section sx1ob
-.section sx1pa
-.section sx1pb
-.section sx1qa
-.section sx1qb
-.section sx1ra
-.section sx1rb
-.section sx1sa
-.section sx1sb
-.section sx1ta
-.section sx1tb
-.section sx1ua
-.section sx1ub
-.section sx1va
-.section sx1vb
-.section sx1wa
-.section sx1wb
-.section sx1xa
-.section sx1xb
-.section sx1ya
-.section sx1yb
-.section sx1za
-.section sx1zb
-.section sx11a
-.section sx11b
-.section sx12a
-.section sx12b
-.section sx13a
-.section sx13b
-.section sx14a
-.section sx14b
-.section sx15a
-.section sx15b
-.section sx16a
-.section sx16b
-.section sx17a
-.section sx17b
-.section sx18a
-.section sx18b
-.section sx19a
-.section sx19b
-.section sx10a
-.section sx10b
-.section sx2aa
-.section sx2ab
-.section sx2ba
-.section sx2bb
-.section sx2ca
-.section sx2cb
-.section sx2da
-.section sx2db
-.section sx2ea
-.section sx2eb
-.section sx2fa
-.section sx2fb
-.section sx2ga
-.section sx2gb
-.section sx2ha
-.section sx2hb
-.section sx2ia
-.section sx2ib
-.section sx2ja
-.section sx2jb
-.section sx2ka
-.section sx2kb
-.section sx2la
-.section sx2lb
-.section sx2ma
-.section sx2mb
-.section sx2na
-.section sx2nb
-.section sx2oa
-.section sx2ob
-.section sx2pa
-.section sx2pb
-.section sx2qa
-.section sx2qb
-.section sx2ra
-.section sx2rb
-.section sx2sa
-.section sx2sb
-.section sx2ta
-.section sx2tb
-.section sx2ua
-.section sx2ub
-.section sx2va
-.section sx2vb
-.section sx2wa
-.section sx2wb
-.section sx2xa
-.section sx2xb
-.section sx2ya
-.section sx2yb
-.section sx2za
-.section sx2zb
-.section sx21a
-.section sx21b
-.section sx22a
-.section sx22b
-.section sx23a
-.section sx23b
-.section sx24a
-.section sx24b
-.section sx25a
-.section sx25b
-.section sx26a
-.section sx26b
-.section sx27a
-.section sx27b
-.section sx28a
-.section sx28b
-.section sx29a
-.section sx29b
-.section sx20a
-.section sx20b
-.section sx3aa
-.section sx3ab
-.section sx3ba
-.section sx3bb
-.section sx3ca
-.section sx3cb
-.section sx3da
-.section sx3db
-.section sx3ea
-.section sx3eb
-.section sx3fa
-.section sx3fb
-.section sx3ga
-.section sx3gb
-.section sx3ha
-.section sx3hb
-.section sx3ia
-.section sx3ib
-.section sx3ja
-.section sx3jb
-.section sx3ka
-.section sx3kb
-.section sx3la
-.section sx3lb
-.section sx3ma
-.section sx3mb
-.section sx3na
-.section sx3nb
-.section sx3oa
-.section sx3ob
-.section sx3pa
-.section sx3pb
-.section sx3qa
-.section sx3qb
-.section sx3ra
-.section sx3rb
-.section sx3sa
-.section sx3sb
-.section sx3ta
-.section sx3tb
-.section sx3ua
-.section sx3ub
-.section sx3va
-.section sx3vb
-.section sx3wa
-.section sx3wb
-.section sx3xa
-.section sx3xb
-.section sx3ya
-.section sx3yb
-.section sx3za
-.section sx3zb
-.section sx31a
-.section sx31b
-.section sx32a
-.section sx32b
-.section sx33a
-.section sx33b
-.section sx34a
-.section sx34b
-.section sx35a
-.section sx35b
-.section sx36a
-.section sx36b
-.section sx37a
-.section sx37b
-.section sx38a
-.section sx38b
-.section sx39a
-.section sx39b
-.section sx30a
-.section sx30b
-.section sx4aa
-.section sx4ab
-.section sx4ba
-.section sx4bb
-.section sx4ca
-.section sx4cb
-.section sx4da
-.section sx4db
-.section sx4ea
-.section sx4eb
-.section sx4fa
-.section sx4fb
-.section sx4ga
-.section sx4gb
-.section sx4ha
-.section sx4hb
-.section sx4ia
-.section sx4ib
-.section sx4ja
-.section sx4jb
-.section sx4ka
-.section sx4kb
-.section sx4la
-.section sx4lb
-.section sx4ma
-.section sx4mb
-.section sx4na
-.section sx4nb
-.section sx4oa
-.section sx4ob
-.section sx4pa
-.section sx4pb
-.section sx4qa
-.section sx4qb
-.section sx4ra
-.section sx4rb
-.section sx4sa
-.section sx4sb
-.section sx4ta
-.section sx4tb
-.section sx4ua
-.section sx4ub
-.section sx4va
-.section sx4vb
-.section sx4wa
-.section sx4wb
-.section sx4xa
-.section sx4xb
-.section sx4ya
-.section sx4yb
-.section sx4za
-.section sx4zb
-.section sx41a
-.section sx41b
-.section sx42a
-.section sx42b
-.section sx43a
-.section sx43b
-.section sx44a
-.section sx44b
-.section sx45a
-.section sx45b
-.section sx46a
-.section sx46b
-.section sx47a
-.section sx47b
-.section sx48a
-.section sx48b
-.section sx49a
-.section sx49b
-.section sx40a
-.section sx40b
-.section sx5aa
-.section sx5ab
-.section sx5ba
-.section sx5bb
-.section sx5ca
-.section sx5cb
-.section sx5da
-.section sx5db
-.section sx5ea
-.section sx5eb
-.section sx5fa
-.section sx5fb
-.section sx5ga
-.section sx5gb
-.section sx5ha
-.section sx5hb
-.section sx5ia
-.section sx5ib
-.section sx5ja
-.section sx5jb
-.section sx5ka
-.section sx5kb
-.section sx5la
-.section sx5lb
-.section sx5ma
-.section sx5mb
-.section sx5na
-.section sx5nb
-.section sx5oa
-.section sx5ob
-.section sx5pa
-.section sx5pb
-.section sx5qa
-.section sx5qb
-.section sx5ra
-.section sx5rb
-.section sx5sa
-.section sx5sb
-.section sx5ta
-.section sx5tb
-.section sx5ua
-.section sx5ub
-.section sx5va
-.section sx5vb
-.section sx5wa
-.section sx5wb
-.section sx5xa
-.section sx5xb
-.section sx5ya
-.section sx5yb
-.section sx5za
-.section sx5zb
-.section sx51a
-.section sx51b
-.section sx52a
-.section sx52b
-.section sx53a
-.section sx53b
-.section sx54a
-.section sx54b
-.section sx55a
-.section sx55b
-.section sx56a
-.section sx56b
-.section sx57a
-.section sx57b
-.section sx58a
-.section sx58b
-.section sx59a
-.section sx59b
-.section sx50a
-.section sx50b
-.section sx6aa
-.section sx6ab
-.section sx6ba
-.section sx6bb
-.section sx6ca
-.section sx6cb
-.section sx6da
-.section sx6db
-.section sx6ea
-.section sx6eb
-.section sx6fa
-.section sx6fb
-.section sx6ga
-.section sx6gb
-.section sx6ha
-.section sx6hb
-.section sx6ia
-.section sx6ib
-.section sx6ja
-.section sx6jb
-.section sx6ka
-.section sx6kb
-.section sx6la
-.section sx6lb
-.section sx6ma
-.section sx6mb
-.section sx6na
-.section sx6nb
-.section sx6oa
-.section sx6ob
-.section sx6pa
-.section sx6pb
-.section sx6qa
-.section sx6qb
-.section sx6ra
-.section sx6rb
-.section sx6sa
-.section sx6sb
-.section sx6ta
-.section sx6tb
-.section sx6ua
-.section sx6ub
-.section sx6va
-.section sx6vb
-.section sx6wa
-.section sx6wb
-.section sx6xa
-.section sx6xb
-.section sx6ya
-.section sx6yb
-.section sx6za
-.section sx6zb
-.section sx61a
-.section sx61b
-.section sx62a
-.section sx62b
-.section sx63a
-.section sx63b
-.section sx64a
-.section sx64b
-.section sx65a
-.section sx65b
-.section sx66a
-.section sx66b
-.section sx67a
-.section sx67b
-.section sx68a
-.section sx68b
-.section sx69a
-.section sx69b
-.section sx60a
-.section sx60b
-.section sx7aa
-.section sx7ab
-.section sx7ba
-.section sx7bb
-.section sx7ca
-.section sx7cb
-.section sx7da
-.section sx7db
-.section sx7ea
-.section sx7eb
-.section sx7fa
-.section sx7fb
-.section sx7ga
-.section sx7gb
-.section sx7ha
-.section sx7hb
-.section sx7ia
-.section sx7ib
-.section sx7ja
-.section sx7jb
-.section sx7ka
-.section sx7kb
-.section sx7la
-.section sx7lb
-.section sx7ma
-.section sx7mb
-.section sx7na
-.section sx7nb
-.section sx7oa
-.section sx7ob
-.section sx7pa
-.section sx7pb
-.section sx7qa
-.section sx7qb
-.section sx7ra
-.section sx7rb
-.section sx7sa
-.section sx7sb
-.section sx7ta
-.section sx7tb
-.section sx7ua
-.section sx7ub
-.section sx7va
-.section sx7vb
-.section sx7wa
-.section sx7wb
-.section sx7xa
-.section sx7xb
-.section sx7ya
-.section sx7yb
-.section sx7za
-.section sx7zb
-.section sx71a
-.section sx71b
-.section sx72a
-.section sx72b
-.section sx73a
-.section sx73b
-.section sx74a
-.section sx74b
-.section sx75a
-.section sx75b
-.section sx76a
-.section sx76b
-.section sx77a
-.section sx77b
-.section sx78a
-.section sx78b
-.section sx79a
-.section sx79b
-.section sx70a
-.section sx70b
-.section sx8aa
-.section sx8ab
-.section sx8ba
-.section sx8bb
-.section sx8ca
-.section sx8cb
-.section sx8da
-.section sx8db
-.section sx8ea
-.section sx8eb
-.section sx8fa
-.section sx8fb
-.section sx8ga
-.section sx8gb
-.section sx8ha
-.section sx8hb
-.section sx8ia
-.section sx8ib
-.section sx8ja
-.section sx8jb
-.section sx8ka
-.section sx8kb
-.section sx8la
-.section sx8lb
-.section sx8ma
-.section sx8mb
-.section sx8na
-.section sx8nb
-.section sx8oa
-.section sx8ob
-.section sx8pa
-.section sx8pb
-.section sx8qa
-.section sx8qb
-.section sx8ra
-.section sx8rb
-.section sx8sa
-.section sx8sb
-.section sx8ta
-.section sx8tb
-.section sx8ua
-.section sx8ub
-.section sx8va
-.section sx8vb
-.section sx8wa
-.section sx8wb
-.section sx8xa
-.section sx8xb
-.section sx8ya
-.section sx8yb
-.section sx8za
-.section sx8zb
-.section sx81a
-.section sx81b
-.section sx82a
-.section sx82b
-.section sx83a
-.section sx83b
-.section sx84a
-.section sx84b
-.section sx85a
-.section sx85b
-.section sx86a
-.section sx86b
-.section sx87a
-.section sx87b
-.section sx88a
-.section sx88b
-.section sx89a
-.section sx89b
-.section sx80a
-.section sx80b
-.section sx9aa
-.section sx9ab
-.section sx9ba
-.section sx9bb
-.section sx9ca
-.section sx9cb
-.section sx9da
-.section sx9db
-.section sx9ea
-.section sx9eb
-.section sx9fa
-.section sx9fb
-.section sx9ga
-.section sx9gb
-.section sx9ha
-.section sx9hb
-.section sx9ia
-.section sx9ib
-.section sx9ja
-.section sx9jb
-.section sx9ka
-.section sx9kb
-.section sx9la
-.section sx9lb
-.section sx9ma
-.section sx9mb
-.section sx9na
-.section sx9nb
-.section sx9oa
-.section sx9ob
-.section sx9pa
-.section sx9pb
-.section sx9qa
-.section sx9qb
-.section sx9ra
-.section sx9rb
-.section sx9sa
-.section sx9sb
-.section sx9ta
-.section sx9tb
-.section sx9ua
-.section sx9ub
-.section sx9va
-.section sx9vb
-.section sx9wa
-.section sx9wb
-.section sx9xa
-.section sx9xb
-.section sx9ya
-.section sx9yb
-.section sx9za
-.section sx9zb
-.section sx91a
-.section sx91b
-.section sx92a
-.section sx92b
-.section sx93a
-.section sx93b
-.section sx94a
-.section sx94b
-.section sx95a
-.section sx95b
-.section sx96a
-.section sx96b
-.section sx97a
-.section sx97b
-.section sx98a
-.section sx98b
-.section sx99a
-.section sx99b
-.section sx90a
-.section sx90b
-.section sx0aa
-.section sx0ab
-.section sx0ba
-.section sx0bb
-.section sx0ca
-.section sx0cb
-.section sx0da
-.section sx0db
-.section sx0ea
-.section sx0eb
-.section sx0fa
-.section sx0fb
-.section sx0ga
-.section sx0gb
-.section sx0ha
-.section sx0hb
-.section sx0ia
-.section sx0ib
-.section sx0ja
-.section sx0jb
-.section sx0ka
-.section sx0kb
-.section sx0la
-.section sx0lb
-.section sx0ma
-.section sx0mb
-.section sx0na
-.section sx0nb
-.section sx0oa
-.section sx0ob
-.section sx0pa
-.section sx0pb
-.section sx0qa
-.section sx0qb
-.section sx0ra
-.section sx0rb
-.section sx0sa
-.section sx0sb
-.section sx0ta
-.section sx0tb
-.section sx0ua
-.section sx0ub
-.section sx0va
-.section sx0vb
-.section sx0wa
-.section sx0wb
-.section sx0xa
-.section sx0xb
-.section sx0ya
-.section sx0yb
-.section sx0za
-.section sx0zb
-.section sx01a
-.section sx01b
-.section sx02a
-.section sx02b
-.section sx03a
-.section sx03b
-.section sx04a
-.section sx04b
-.section sx05a
-.section sx05b
-.section sx06a
-.section sx06b
-.section sx07a
-.section sx07b
-.section sx08a
-.section sx08b
-.section sx09a
-.section sx09b
-.section sx00a
-.section sx00b
-.section syaaa
-.section syaab
-.section syaba
-.section syabb
-.section syaca
-.section syacb
-.section syada
-.section syadb
-.section syaea
-.section syaeb
-.section syafa
-.section syafb
-.section syaga
-.section syagb
-.section syaha
-.section syahb
-.section syaia
-.section syaib
-.section syaja
-.section syajb
-.section syaka
-.section syakb
-.section syala
-.section syalb
-.section syama
-.section syamb
-.section syana
-.section syanb
-.section syaoa
-.section syaob
-.section syapa
-.section syapb
-.section syaqa
-.section syaqb
-.section syara
-.section syarb
-.section syasa
-.section syasb
-.section syata
-.section syatb
-.section syaua
-.section syaub
-.section syava
-.section syavb
-.section syawa
-.section syawb
-.section syaxa
-.section syaxb
-.section syaya
-.section syayb
-.section syaza
-.section syazb
-.section sya1a
-.section sya1b
-.section sya2a
-.section sya2b
-.section sya3a
-.section sya3b
-.section sya4a
-.section sya4b
-.section sya5a
-.section sya5b
-.section sya6a
-.section sya6b
-.section sya7a
-.section sya7b
-.section sya8a
-.section sya8b
-.section sya9a
-.section sya9b
-.section sya0a
-.section sya0b
-.section sybaa
-.section sybab
-.section sybba
-.section sybbb
-.section sybca
-.section sybcb
-.section sybda
-.section sybdb
-.section sybea
-.section sybeb
-.section sybfa
-.section sybfb
-.section sybga
-.section sybgb
-.section sybha
-.section sybhb
-.section sybia
-.section sybib
-.section sybja
-.section sybjb
-.section sybka
-.section sybkb
-.section sybla
-.section syblb
-.section sybma
-.section sybmb
-.section sybna
-.section sybnb
-.section syboa
-.section sybob
-.section sybpa
-.section sybpb
-.section sybqa
-.section sybqb
-.section sybra
-.section sybrb
-.section sybsa
-.section sybsb
-.section sybta
-.section sybtb
-.section sybua
-.section sybub
-.section sybva
-.section sybvb
-.section sybwa
-.section sybwb
-.section sybxa
-.section sybxb
-.section sybya
-.section sybyb
-.section sybza
-.section sybzb
-.section syb1a
-.section syb1b
-.section syb2a
-.section syb2b
-.section syb3a
-.section syb3b
-.section syb4a
-.section syb4b
-.section syb5a
-.section syb5b
-.section syb6a
-.section syb6b
-.section syb7a
-.section syb7b
-.section syb8a
-.section syb8b
-.section syb9a
-.section syb9b
-.section syb0a
-.section syb0b
-.section sycaa
-.section sycab
-.section sycba
-.section sycbb
-.section sycca
-.section syccb
-.section sycda
-.section sycdb
-.section sycea
-.section syceb
-.section sycfa
-.section sycfb
-.section sycga
-.section sycgb
-.section sycha
-.section sychb
-.section sycia
-.section sycib
-.section sycja
-.section sycjb
-.section sycka
-.section syckb
-.section sycla
-.section syclb
-.section sycma
-.section sycmb
-.section sycna
-.section sycnb
-.section sycoa
-.section sycob
-.section sycpa
-.section sycpb
-.section sycqa
-.section sycqb
-.section sycra
-.section sycrb
-.section sycsa
-.section sycsb
-.section sycta
-.section syctb
-.section sycua
-.section sycub
-.section sycva
-.section sycvb
-.section sycwa
-.section sycwb
-.section sycxa
-.section sycxb
-.section sycya
-.section sycyb
-.section sycza
-.section syczb
-.section syc1a
-.section syc1b
-.section syc2a
-.section syc2b
-.section syc3a
-.section syc3b
-.section syc4a
-.section syc4b
-.section syc5a
-.section syc5b
-.section syc6a
-.section syc6b
-.section syc7a
-.section syc7b
-.section syc8a
-.section syc8b
-.section syc9a
-.section syc9b
-.section syc0a
-.section syc0b
-.section sydaa
-.section sydab
-.section sydba
-.section sydbb
-.section sydca
-.section sydcb
-.section sydda
-.section syddb
-.section sydea
-.section sydeb
-.section sydfa
-.section sydfb
-.section sydga
-.section sydgb
-.section sydha
-.section sydhb
-.section sydia
-.section sydib
-.section sydja
-.section sydjb
-.section sydka
-.section sydkb
-.section sydla
-.section sydlb
-.section sydma
-.section sydmb
-.section sydna
-.section sydnb
-.section sydoa
-.section sydob
-.section sydpa
-.section sydpb
-.section sydqa
-.section sydqb
-.section sydra
-.section sydrb
-.section sydsa
-.section sydsb
-.section sydta
-.section sydtb
-.section sydua
-.section sydub
-.section sydva
-.section sydvb
-.section sydwa
-.section sydwb
-.section sydxa
-.section sydxb
-.section sydya
-.section sydyb
-.section sydza
-.section sydzb
-.section syd1a
-.section syd1b
-.section syd2a
-.section syd2b
-.section syd3a
-.section syd3b
-.section syd4a
-.section syd4b
-.section syd5a
-.section syd5b
-.section syd6a
-.section syd6b
-.section syd7a
-.section syd7b
-.section syd8a
-.section syd8b
-.section syd9a
-.section syd9b
-.section syd0a
-.section syd0b
-.section syeaa
-.section syeab
-.section syeba
-.section syebb
-.section syeca
-.section syecb
-.section syeda
-.section syedb
-.section syeea
-.section syeeb
-.section syefa
-.section syefb
-.section syega
-.section syegb
-.section syeha
-.section syehb
-.section syeia
-.section syeib
-.section syeja
-.section syejb
-.section syeka
-.section syekb
-.section syela
-.section syelb
-.section syema
-.section syemb
-.section syena
-.section syenb
-.section syeoa
-.section syeob
-.section syepa
-.section syepb
-.section syeqa
-.section syeqb
-.section syera
-.section syerb
-.section syesa
-.section syesb
-.section syeta
-.section syetb
-.section syeua
-.section syeub
-.section syeva
-.section syevb
-.section syewa
-.section syewb
-.section syexa
-.section syexb
-.section syeya
-.section syeyb
-.section syeza
-.section syezb
-.section sye1a
-.section sye1b
-.section sye2a
-.section sye2b
-.section sye3a
-.section sye3b
-.section sye4a
-.section sye4b
-.section sye5a
-.section sye5b
-.section sye6a
-.section sye6b
-.section sye7a
-.section sye7b
-.section sye8a
-.section sye8b
-.section sye9a
-.section sye9b
-.section sye0a
-.section sye0b
-.section syfaa
-.section syfab
-.section syfba
-.section syfbb
-.section syfca
-.section syfcb
-.section syfda
-.section syfdb
-.section syfea
-.section syfeb
-.section syffa
-.section syffb
-.section syfga
-.section syfgb
-.section syfha
-.section syfhb
-.section syfia
-.section syfib
-.section syfja
-.section syfjb
-.section syfka
-.section syfkb
-.section syfla
-.section syflb
-.section syfma
-.section syfmb
-.section syfna
-.section syfnb
-.section syfoa
-.section syfob
-.section syfpa
-.section syfpb
-.section syfqa
-.section syfqb
-.section syfra
-.section syfrb
-.section syfsa
-.section syfsb
-.section syfta
-.section syftb
-.section syfua
-.section syfub
-.section syfva
-.section syfvb
-.section syfwa
-.section syfwb
-.section syfxa
-.section syfxb
-.section syfya
-.section syfyb
-.section syfza
-.section syfzb
-.section syf1a
-.section syf1b
-.section syf2a
-.section syf2b
-.section syf3a
-.section syf3b
-.section syf4a
-.section syf4b
-.section syf5a
-.section syf5b
-.section syf6a
-.section syf6b
-.section syf7a
-.section syf7b
-.section syf8a
-.section syf8b
-.section syf9a
-.section syf9b
-.section syf0a
-.section syf0b
-.section sygaa
-.section sygab
-.section sygba
-.section sygbb
-.section sygca
-.section sygcb
-.section sygda
-.section sygdb
-.section sygea
-.section sygeb
-.section sygfa
-.section sygfb
-.section sygga
-.section syggb
-.section sygha
-.section syghb
-.section sygia
-.section sygib
-.section sygja
-.section sygjb
-.section sygka
-.section sygkb
-.section sygla
-.section syglb
-.section sygma
-.section sygmb
-.section sygna
-.section sygnb
-.section sygoa
-.section sygob
-.section sygpa
-.section sygpb
-.section sygqa
-.section sygqb
-.section sygra
-.section sygrb
-.section sygsa
-.section sygsb
-.section sygta
-.section sygtb
-.section sygua
-.section sygub
-.section sygva
-.section sygvb
-.section sygwa
-.section sygwb
-.section sygxa
-.section sygxb
-.section sygya
-.section sygyb
-.section sygza
-.section sygzb
-.section syg1a
-.section syg1b
-.section syg2a
-.section syg2b
-.section syg3a
-.section syg3b
-.section syg4a
-.section syg4b
-.section syg5a
-.section syg5b
-.section syg6a
-.section syg6b
-.section syg7a
-.section syg7b
-.section syg8a
-.section syg8b
-.section syg9a
-.section syg9b
-.section syg0a
-.section syg0b
-.section syhaa
-.section syhab
-.section syhba
-.section syhbb
-.section syhca
-.section syhcb
-.section syhda
-.section syhdb
-.section syhea
-.section syheb
-.section syhfa
-.section syhfb
-.section syhga
-.section syhgb
-.section syhha
-.section syhhb
-.section syhia
-.section syhib
-.section syhja
-.section syhjb
-.section syhka
-.section syhkb
-.section syhla
-.section syhlb
-.section syhma
-.section syhmb
-.section syhna
-.section syhnb
-.section syhoa
-.section syhob
-.section syhpa
-.section syhpb
-.section syhqa
-.section syhqb
-.section syhra
-.section syhrb
-.section syhsa
-.section syhsb
-.section syhta
-.section syhtb
-.section syhua
-.section syhub
-.section syhva
-.section syhvb
-.section syhwa
-.section syhwb
-.section syhxa
-.section syhxb
-.section syhya
-.section syhyb
-.section syhza
-.section syhzb
-.section syh1a
-.section syh1b
-.section syh2a
-.section syh2b
-.section syh3a
-.section syh3b
-.section syh4a
-.section syh4b
-.section syh5a
-.section syh5b
-.section syh6a
-.section syh6b
-.section syh7a
-.section syh7b
-.section syh8a
-.section syh8b
-.section syh9a
-.section syh9b
-.section syh0a
-.section syh0b
-.section syiaa
-.section syiab
-.section syiba
-.section syibb
-.section syica
-.section syicb
-.section syida
-.section syidb
-.section syiea
-.section syieb
-.section syifa
-.section syifb
-.section syiga
-.section syigb
-.section syiha
-.section syihb
-.section syiia
-.section syiib
-.section syija
-.section syijb
-.section syika
-.section syikb
-.section syila
-.section syilb
-.section syima
-.section syimb
-.section syina
-.section syinb
-.section syioa
-.section syiob
-.section syipa
-.section syipb
-.section syiqa
-.section syiqb
-.section syira
-.section syirb
-.section syisa
-.section syisb
-.section syita
-.section syitb
-.section syiua
-.section syiub
-.section syiva
-.section syivb
-.section syiwa
-.section syiwb
-.section syixa
-.section syixb
-.section syiya
-.section syiyb
-.section syiza
-.section syizb
-.section syi1a
-.section syi1b
-.section syi2a
-.section syi2b
-.section syi3a
-.section syi3b
-.section syi4a
-.section syi4b
-.section syi5a
-.section syi5b
-.section syi6a
-.section syi6b
-.section syi7a
-.section syi7b
-.section syi8a
-.section syi8b
-.section syi9a
-.section syi9b
-.section syi0a
-.section syi0b
-.section syjaa
-.section syjab
-.section syjba
-.section syjbb
-.section syjca
-.section syjcb
-.section syjda
-.section syjdb
-.section syjea
-.section syjeb
-.section syjfa
-.section syjfb
-.section syjga
-.section syjgb
-.section syjha
-.section syjhb
-.section syjia
-.section syjib
-.section syjja
-.section syjjb
-.section syjka
-.section syjkb
-.section syjla
-.section syjlb
-.section syjma
-.section syjmb
-.section syjna
-.section syjnb
-.section syjoa
-.section syjob
-.section syjpa
-.section syjpb
-.section syjqa
-.section syjqb
-.section syjra
-.section syjrb
-.section syjsa
-.section syjsb
-.section syjta
-.section syjtb
-.section syjua
-.section syjub
-.section syjva
-.section syjvb
-.section syjwa
-.section syjwb
-.section syjxa
-.section syjxb
-.section syjya
-.section syjyb
-.section syjza
-.section syjzb
-.section syj1a
-.section syj1b
-.section syj2a
-.section syj2b
-.section syj3a
-.section syj3b
-.section syj4a
-.section syj4b
-.section syj5a
-.section syj5b
-.section syj6a
-.section syj6b
-.section syj7a
-.section syj7b
-.section syj8a
-.section syj8b
-.section syj9a
-.section syj9b
-.section syj0a
-.section syj0b
-.section sykaa
-.section sykab
-.section sykba
-.section sykbb
-.section sykca
-.section sykcb
-.section sykda
-.section sykdb
-.section sykea
-.section sykeb
-.section sykfa
-.section sykfb
-.section sykga
-.section sykgb
-.section sykha
-.section sykhb
-.section sykia
-.section sykib
-.section sykja
-.section sykjb
-.section sykka
-.section sykkb
-.section sykla
-.section syklb
-.section sykma
-.section sykmb
-.section sykna
-.section syknb
-.section sykoa
-.section sykob
-.section sykpa
-.section sykpb
-.section sykqa
-.section sykqb
-.section sykra
-.section sykrb
-.section syksa
-.section syksb
-.section sykta
-.section syktb
-.section sykua
-.section sykub
-.section sykva
-.section sykvb
-.section sykwa
-.section sykwb
-.section sykxa
-.section sykxb
-.section sykya
-.section sykyb
-.section sykza
-.section sykzb
-.section syk1a
-.section syk1b
-.section syk2a
-.section syk2b
-.section syk3a
-.section syk3b
-.section syk4a
-.section syk4b
-.section syk5a
-.section syk5b
-.section syk6a
-.section syk6b
-.section syk7a
-.section syk7b
-.section syk8a
-.section syk8b
-.section syk9a
-.section syk9b
-.section syk0a
-.section syk0b
-.section sylaa
-.section sylab
-.section sylba
-.section sylbb
-.section sylca
-.section sylcb
-.section sylda
-.section syldb
-.section sylea
-.section syleb
-.section sylfa
-.section sylfb
-.section sylga
-.section sylgb
-.section sylha
-.section sylhb
-.section sylia
-.section sylib
-.section sylja
-.section syljb
-.section sylka
-.section sylkb
-.section sylla
-.section syllb
-.section sylma
-.section sylmb
-.section sylna
-.section sylnb
-.section syloa
-.section sylob
-.section sylpa
-.section sylpb
-.section sylqa
-.section sylqb
-.section sylra
-.section sylrb
-.section sylsa
-.section sylsb
-.section sylta
-.section syltb
-.section sylua
-.section sylub
-.section sylva
-.section sylvb
-.section sylwa
-.section sylwb
-.section sylxa
-.section sylxb
-.section sylya
-.section sylyb
-.section sylza
-.section sylzb
-.section syl1a
-.section syl1b
-.section syl2a
-.section syl2b
-.section syl3a
-.section syl3b
-.section syl4a
-.section syl4b
-.section syl5a
-.section syl5b
-.section syl6a
-.section syl6b
-.section syl7a
-.section syl7b
-.section syl8a
-.section syl8b
-.section syl9a
-.section syl9b
-.section syl0a
-.section syl0b
-.section symaa
-.section symab
-.section symba
-.section symbb
-.section symca
-.section symcb
-.section symda
-.section symdb
-.section symea
-.section symeb
-.section symfa
-.section symfb
-.section symga
-.section symgb
-.section symha
-.section symhb
-.section symia
-.section symib
-.section symja
-.section symjb
-.section symka
-.section symkb
-.section symla
-.section symlb
-.section symma
-.section symmb
-.section symna
-.section symnb
-.section symoa
-.section symob
-.section sympa
-.section sympb
-.section symqa
-.section symqb
-.section symra
-.section symrb
-.section symsa
-.section symsb
-.section symta
-.section symtb
-.section symua
-.section symub
-.section symva
-.section symvb
-.section symwa
-.section symwb
-.section symxa
-.section symxb
-.section symya
-.section symyb
-.section symza
-.section symzb
-.section sym1a
-.section sym1b
-.section sym2a
-.section sym2b
-.section sym3a
-.section sym3b
-.section sym4a
-.section sym4b
-.section sym5a
-.section sym5b
-.section sym6a
-.section sym6b
-.section sym7a
-.section sym7b
-.section sym8a
-.section sym8b
-.section sym9a
-.section sym9b
-.section sym0a
-.section sym0b
-.section synaa
-.section synab
-.section synba
-.section synbb
-.section synca
-.section syncb
-.section synda
-.section syndb
-.section synea
-.section syneb
-.section synfa
-.section synfb
-.section synga
-.section syngb
-.section synha
-.section synhb
-.section synia
-.section synib
-.section synja
-.section synjb
-.section synka
-.section synkb
-.section synla
-.section synlb
-.section synma
-.section synmb
-.section synna
-.section synnb
-.section synoa
-.section synob
-.section synpa
-.section synpb
-.section synqa
-.section synqb
-.section synra
-.section synrb
-.section synsa
-.section synsb
-.section synta
-.section syntb
-.section synua
-.section synub
-.section synva
-.section synvb
-.section synwa
-.section synwb
-.section synxa
-.section synxb
-.section synya
-.section synyb
-.section synza
-.section synzb
-.section syn1a
-.section syn1b
-.section syn2a
-.section syn2b
-.section syn3a
-.section syn3b
-.section syn4a
-.section syn4b
-.section syn5a
-.section syn5b
-.section syn6a
-.section syn6b
-.section syn7a
-.section syn7b
-.section syn8a
-.section syn8b
-.section syn9a
-.section syn9b
-.section syn0a
-.section syn0b
-.section syoaa
-.section syoab
-.section syoba
-.section syobb
-.section syoca
-.section syocb
-.section syoda
-.section syodb
-.section syoea
-.section syoeb
-.section syofa
-.section syofb
-.section syoga
-.section syogb
-.section syoha
-.section syohb
-.section syoia
-.section syoib
-.section syoja
-.section syojb
-.section syoka
-.section syokb
-.section syola
-.section syolb
-.section syoma
-.section syomb
-.section syona
-.section syonb
-.section syooa
-.section syoob
-.section syopa
-.section syopb
-.section syoqa
-.section syoqb
-.section syora
-.section syorb
-.section syosa
-.section syosb
-.section syota
-.section syotb
-.section syoua
-.section syoub
-.section syova
-.section syovb
-.section syowa
-.section syowb
-.section syoxa
-.section syoxb
-.section syoya
-.section syoyb
-.section syoza
-.section syozb
-.section syo1a
-.section syo1b
-.section syo2a
-.section syo2b
-.section syo3a
-.section syo3b
-.section syo4a
-.section syo4b
-.section syo5a
-.section syo5b
-.section syo6a
-.section syo6b
-.section syo7a
-.section syo7b
-.section syo8a
-.section syo8b
-.section syo9a
-.section syo9b
-.section syo0a
-.section syo0b
-.section sypaa
-.section sypab
-.section sypba
-.section sypbb
-.section sypca
-.section sypcb
-.section sypda
-.section sypdb
-.section sypea
-.section sypeb
-.section sypfa
-.section sypfb
-.section sypga
-.section sypgb
-.section sypha
-.section syphb
-.section sypia
-.section sypib
-.section sypja
-.section sypjb
-.section sypka
-.section sypkb
-.section sypla
-.section syplb
-.section sypma
-.section sypmb
-.section sypna
-.section sypnb
-.section sypoa
-.section sypob
-.section syppa
-.section syppb
-.section sypqa
-.section sypqb
-.section sypra
-.section syprb
-.section sypsa
-.section sypsb
-.section sypta
-.section syptb
-.section sypua
-.section sypub
-.section sypva
-.section sypvb
-.section sypwa
-.section sypwb
-.section sypxa
-.section sypxb
-.section sypya
-.section sypyb
-.section sypza
-.section sypzb
-.section syp1a
-.section syp1b
-.section syp2a
-.section syp2b
-.section syp3a
-.section syp3b
-.section syp4a
-.section syp4b
-.section syp5a
-.section syp5b
-.section syp6a
-.section syp6b
-.section syp7a
-.section syp7b
-.section syp8a
-.section syp8b
-.section syp9a
-.section syp9b
-.section syp0a
-.section syp0b
-.section syqaa
-.section syqab
-.section syqba
-.section syqbb
-.section syqca
-.section syqcb
-.section syqda
-.section syqdb
-.section syqea
-.section syqeb
-.section syqfa
-.section syqfb
-.section syqga
-.section syqgb
-.section syqha
-.section syqhb
-.section syqia
-.section syqib
-.section syqja
-.section syqjb
-.section syqka
-.section syqkb
-.section syqla
-.section syqlb
-.section syqma
-.section syqmb
-.section syqna
-.section syqnb
-.section syqoa
-.section syqob
-.section syqpa
-.section syqpb
-.section syqqa
-.section syqqb
-.section syqra
-.section syqrb
-.section syqsa
-.section syqsb
-.section syqta
-.section syqtb
-.section syqua
-.section syqub
-.section syqva
-.section syqvb
-.section syqwa
-.section syqwb
-.section syqxa
-.section syqxb
-.section syqya
-.section syqyb
-.section syqza
-.section syqzb
-.section syq1a
-.section syq1b
-.section syq2a
-.section syq2b
-.section syq3a
-.section syq3b
-.section syq4a
-.section syq4b
-.section syq5a
-.section syq5b
-.section syq6a
-.section syq6b
-.section syq7a
-.section syq7b
-.section syq8a
-.section syq8b
-.section syq9a
-.section syq9b
-.section syq0a
-.section syq0b
-.section syraa
-.section syrab
-.section syrba
-.section syrbb
-.section syrca
-.section syrcb
-.section syrda
-.section syrdb
-.section syrea
-.section syreb
-.section syrfa
-.section syrfb
-.section syrga
-.section syrgb
-.section syrha
-.section syrhb
-.section syria
-.section syrib
-.section syrja
-.section syrjb
-.section syrka
-.section syrkb
-.section syrla
-.section syrlb
-.section syrma
-.section syrmb
-.section syrna
-.section syrnb
-.section syroa
-.section syrob
-.section syrpa
-.section syrpb
-.section syrqa
-.section syrqb
-.section syrra
-.section syrrb
-.section syrsa
-.section syrsb
-.section syrta
-.section syrtb
-.section syrua
-.section syrub
-.section syrva
-.section syrvb
-.section syrwa
-.section syrwb
-.section syrxa
-.section syrxb
-.section syrya
-.section syryb
-.section syrza
-.section syrzb
-.section syr1a
-.section syr1b
-.section syr2a
-.section syr2b
-.section syr3a
-.section syr3b
-.section syr4a
-.section syr4b
-.section syr5a
-.section syr5b
-.section syr6a
-.section syr6b
-.section syr7a
-.section syr7b
-.section syr8a
-.section syr8b
-.section syr9a
-.section syr9b
-.section syr0a
-.section syr0b
-.section sysaa
-.section sysab
-.section sysba
-.section sysbb
-.section sysca
-.section syscb
-.section sysda
-.section sysdb
-.section sysea
-.section syseb
-.section sysfa
-.section sysfb
-.section sysga
-.section sysgb
-.section sysha
-.section syshb
-.section sysia
-.section sysib
-.section sysja
-.section sysjb
-.section syska
-.section syskb
-.section sysla
-.section syslb
-.section sysma
-.section sysmb
-.section sysna
-.section sysnb
-.section sysoa
-.section sysob
-.section syspa
-.section syspb
-.section sysqa
-.section sysqb
-.section sysra
-.section sysrb
-.section syssa
-.section syssb
-.section systa
-.section systb
-.section sysua
-.section sysub
-.section sysva
-.section sysvb
-.section syswa
-.section syswb
-.section sysxa
-.section sysxb
-.section sysya
-.section sysyb
-.section sysza
-.section syszb
-.section sys1a
-.section sys1b
-.section sys2a
-.section sys2b
-.section sys3a
-.section sys3b
-.section sys4a
-.section sys4b
-.section sys5a
-.section sys5b
-.section sys6a
-.section sys6b
-.section sys7a
-.section sys7b
-.section sys8a
-.section sys8b
-.section sys9a
-.section sys9b
-.section sys0a
-.section sys0b
-.section sytaa
-.section sytab
-.section sytba
-.section sytbb
-.section sytca
-.section sytcb
-.section sytda
-.section sytdb
-.section sytea
-.section syteb
-.section sytfa
-.section sytfb
-.section sytga
-.section sytgb
-.section sytha
-.section sythb
-.section sytia
-.section sytib
-.section sytja
-.section sytjb
-.section sytka
-.section sytkb
-.section sytla
-.section sytlb
-.section sytma
-.section sytmb
-.section sytna
-.section sytnb
-.section sytoa
-.section sytob
-.section sytpa
-.section sytpb
-.section sytqa
-.section sytqb
-.section sytra
-.section sytrb
-.section sytsa
-.section sytsb
-.section sytta
-.section syttb
-.section sytua
-.section sytub
-.section sytva
-.section sytvb
-.section sytwa
-.section sytwb
-.section sytxa
-.section sytxb
-.section sytya
-.section sytyb
-.section sytza
-.section sytzb
-.section syt1a
-.section syt1b
-.section syt2a
-.section syt2b
-.section syt3a
-.section syt3b
-.section syt4a
-.section syt4b
-.section syt5a
-.section syt5b
-.section syt6a
-.section syt6b
-.section syt7a
-.section syt7b
-.section syt8a
-.section syt8b
-.section syt9a
-.section syt9b
-.section syt0a
-.section syt0b
-.section syuaa
-.section syuab
-.section syuba
-.section syubb
-.section syuca
-.section syucb
-.section syuda
-.section syudb
-.section syuea
-.section syueb
-.section syufa
-.section syufb
-.section syuga
-.section syugb
-.section syuha
-.section syuhb
-.section syuia
-.section syuib
-.section syuja
-.section syujb
-.section syuka
-.section syukb
-.section syula
-.section syulb
-.section syuma
-.section syumb
-.section syuna
-.section syunb
-.section syuoa
-.section syuob
-.section syupa
-.section syupb
-.section syuqa
-.section syuqb
-.section syura
-.section syurb
-.section syusa
-.section syusb
-.section syuta
-.section syutb
-.section syuua
-.section syuub
-.section syuva
-.section syuvb
-.section syuwa
-.section syuwb
-.section syuxa
-.section syuxb
-.section syuya
-.section syuyb
-.section syuza
-.section syuzb
-.section syu1a
-.section syu1b
-.section syu2a
-.section syu2b
-.section syu3a
-.section syu3b
-.section syu4a
-.section syu4b
-.section syu5a
-.section syu5b
-.section syu6a
-.section syu6b
-.section syu7a
-.section syu7b
-.section syu8a
-.section syu8b
-.section syu9a
-.section syu9b
-.section syu0a
-.section syu0b
-.section syvaa
-.section syvab
-.section syvba
-.section syvbb
-.section syvca
-.section syvcb
-.section syvda
-.section syvdb
-.section syvea
-.section syveb
-.section syvfa
-.section syvfb
-.section syvga
-.section syvgb
-.section syvha
-.section syvhb
-.section syvia
-.section syvib
-.section syvja
-.section syvjb
-.section syvka
-.section syvkb
-.section syvla
-.section syvlb
-.section syvma
-.section syvmb
-.section syvna
-.section syvnb
-.section syvoa
-.section syvob
-.section syvpa
-.section syvpb
-.section syvqa
-.section syvqb
-.section syvra
-.section syvrb
-.section syvsa
-.section syvsb
-.section syvta
-.section syvtb
-.section syvua
-.section syvub
-.section syvva
-.section syvvb
-.section syvwa
-.section syvwb
-.section syvxa
-.section syvxb
-.section syvya
-.section syvyb
-.section syvza
-.section syvzb
-.section syv1a
-.section syv1b
-.section syv2a
-.section syv2b
-.section syv3a
-.section syv3b
-.section syv4a
-.section syv4b
-.section syv5a
-.section syv5b
-.section syv6a
-.section syv6b
-.section syv7a
-.section syv7b
-.section syv8a
-.section syv8b
-.section syv9a
-.section syv9b
-.section syv0a
-.section syv0b
-.section sywaa
-.section sywab
-.section sywba
-.section sywbb
-.section sywca
-.section sywcb
-.section sywda
-.section sywdb
-.section sywea
-.section syweb
-.section sywfa
-.section sywfb
-.section sywga
-.section sywgb
-.section sywha
-.section sywhb
-.section sywia
-.section sywib
-.section sywja
-.section sywjb
-.section sywka
-.section sywkb
-.section sywla
-.section sywlb
-.section sywma
-.section sywmb
-.section sywna
-.section sywnb
-.section sywoa
-.section sywob
-.section sywpa
-.section sywpb
-.section sywqa
-.section sywqb
-.section sywra
-.section sywrb
-.section sywsa
-.section sywsb
-.section sywta
-.section sywtb
-.section sywua
-.section sywub
-.section sywva
-.section sywvb
-.section sywwa
-.section sywwb
-.section sywxa
-.section sywxb
-.section sywya
-.section sywyb
-.section sywza
-.section sywzb
-.section syw1a
-.section syw1b
-.section syw2a
-.section syw2b
-.section syw3a
-.section syw3b
-.section syw4a
-.section syw4b
-.section syw5a
-.section syw5b
-.section syw6a
-.section syw6b
-.section syw7a
-.section syw7b
-.section syw8a
-.section syw8b
-.section syw9a
-.section syw9b
-.section syw0a
-.section syw0b
-.section syxaa
-.section syxab
-.section syxba
-.section syxbb
-.section syxca
-.section syxcb
-.section syxda
-.section syxdb
-.section syxea
-.section syxeb
-.section syxfa
-.section syxfb
-.section syxga
-.section syxgb
-.section syxha
-.section syxhb
-.section syxia
-.section syxib
-.section syxja
-.section syxjb
-.section syxka
-.section syxkb
-.section syxla
-.section syxlb
-.section syxma
-.section syxmb
-.section syxna
-.section syxnb
-.section syxoa
-.section syxob
-.section syxpa
-.section syxpb
-.section syxqa
-.section syxqb
-.section syxra
-.section syxrb
-.section syxsa
-.section syxsb
-.section syxta
-.section syxtb
-.section syxua
-.section syxub
-.section syxva
-.section syxvb
-.section syxwa
-.section syxwb
-.section syxxa
-.section syxxb
-.section syxya
-.section syxyb
-.section syxza
-.section syxzb
-.section syx1a
-.section syx1b
-.section syx2a
-.section syx2b
-.section syx3a
-.section syx3b
-.section syx4a
-.section syx4b
-.section syx5a
-.section syx5b
-.section syx6a
-.section syx6b
-.section syx7a
-.section syx7b
-.section syx8a
-.section syx8b
-.section syx9a
-.section syx9b
-.section syx0a
-.section syx0b
-.section syyaa
-.section syyab
-.section syyba
-.section syybb
-.section syyca
-.section syycb
-.section syyda
-.section syydb
-.section syyea
-.section syyeb
-.section syyfa
-.section syyfb
-.section syyga
-.section syygb
-.section syyha
-.section syyhb
-.section syyia
-.section syyib
-.section syyja
-.section syyjb
-.section syyka
-.section syykb
-.section syyla
-.section syylb
-.section syyma
-.section syymb
-.section syyna
-.section syynb
-.section syyoa
-.section syyob
-.section syypa
-.section syypb
-.section syyqa
-.section syyqb
-.section syyra
-.section syyrb
-.section syysa
-.section syysb
-.section syyta
-.section syytb
-.section syyua
-.section syyub
-.section syyva
-.section syyvb
-.section syywa
-.section syywb
-.section syyxa
-.section syyxb
-.section syyya
-.section syyyb
-.section syyza
-.section syyzb
-.section syy1a
-.section syy1b
-.section syy2a
-.section syy2b
-.section syy3a
-.section syy3b
-.section syy4a
-.section syy4b
-.section syy5a
-.section syy5b
-.section syy6a
-.section syy6b
-.section syy7a
-.section syy7b
-.section syy8a
-.section syy8b
-.section syy9a
-.section syy9b
-.section syy0a
-.section syy0b
-.section syzaa
-.section syzab
-.section syzba
-.section syzbb
-.section syzca
-.section syzcb
-.section syzda
-.section syzdb
-.section syzea
-.section syzeb
-.section syzfa
-.section syzfb
-.section syzga
-.section syzgb
-.section syzha
-.section syzhb
-.section syzia
-.section syzib
-.section syzja
-.section syzjb
-.section syzka
-.section syzkb
-.section syzla
-.section syzlb
-.section syzma
-.section syzmb
-.section syzna
-.section syznb
-.section syzoa
-.section syzob
-.section syzpa
-.section syzpb
-.section syzqa
-.section syzqb
-.section syzra
-.section syzrb
-.section syzsa
-.section syzsb
-.section syzta
-.section syztb
-.section syzua
-.section syzub
-.section syzva
-.section syzvb
-.section syzwa
-.section syzwb
-.section syzxa
-.section syzxb
-.section syzya
-.section syzyb
-.section syzza
-.section syzzb
-.section syz1a
-.section syz1b
-.section syz2a
-.section syz2b
-.section syz3a
-.section syz3b
-.section syz4a
-.section syz4b
-.section syz5a
-.section syz5b
-.section syz6a
-.section syz6b
-.section syz7a
-.section syz7b
-.section syz8a
-.section syz8b
-.section syz9a
-.section syz9b
-.section syz0a
-.section syz0b
-.section sy1aa
-.section sy1ab
-.section sy1ba
-.section sy1bb
-.section sy1ca
-.section sy1cb
-.section sy1da
-.section sy1db
-.section sy1ea
-.section sy1eb
-.section sy1fa
-.section sy1fb
-.section sy1ga
-.section sy1gb
-.section sy1ha
-.section sy1hb
-.section sy1ia
-.section sy1ib
-.section sy1ja
-.section sy1jb
-.section sy1ka
-.section sy1kb
-.section sy1la
-.section sy1lb
-.section sy1ma
-.section sy1mb
-.section sy1na
-.section sy1nb
-.section sy1oa
-.section sy1ob
-.section sy1pa
-.section sy1pb
-.section sy1qa
-.section sy1qb
-.section sy1ra
-.section sy1rb
-.section sy1sa
-.section sy1sb
-.section sy1ta
-.section sy1tb
-.section sy1ua
-.section sy1ub
-.section sy1va
-.section sy1vb
-.section sy1wa
-.section sy1wb
-.section sy1xa
-.section sy1xb
-.section sy1ya
-.section sy1yb
-.section sy1za
-.section sy1zb
-.section sy11a
-.section sy11b
-.section sy12a
-.section sy12b
-.section sy13a
-.section sy13b
-.section sy14a
-.section sy14b
-.section sy15a
-.section sy15b
-.section sy16a
-.section sy16b
-.section sy17a
-.section sy17b
-.section sy18a
-.section sy18b
-.section sy19a
-.section sy19b
-.section sy10a
-.section sy10b
-.section sy2aa
-.section sy2ab
-.section sy2ba
-.section sy2bb
-.section sy2ca
-.section sy2cb
-.section sy2da
-.section sy2db
-.section sy2ea
-.section sy2eb
-.section sy2fa
-.section sy2fb
-.section sy2ga
-.section sy2gb
-.section sy2ha
-.section sy2hb
-.section sy2ia
-.section sy2ib
-.section sy2ja
-.section sy2jb
-.section sy2ka
-.section sy2kb
-.section sy2la
-.section sy2lb
-.section sy2ma
-.section sy2mb
-.section sy2na
-.section sy2nb
-.section sy2oa
-.section sy2ob
-.section sy2pa
-.section sy2pb
-.section sy2qa
-.section sy2qb
-.section sy2ra
-.section sy2rb
-.section sy2sa
-.section sy2sb
-.section sy2ta
-.section sy2tb
-.section sy2ua
-.section sy2ub
-.section sy2va
-.section sy2vb
-.section sy2wa
-.section sy2wb
-.section sy2xa
-.section sy2xb
-.section sy2ya
-.section sy2yb
-.section sy2za
-.section sy2zb
-.section sy21a
-.section sy21b
-.section sy22a
-.section sy22b
-.section sy23a
-.section sy23b
-.section sy24a
-.section sy24b
-.section sy25a
-.section sy25b
-.section sy26a
-.section sy26b
-.section sy27a
-.section sy27b
-.section sy28a
-.section sy28b
-.section sy29a
-.section sy29b
-.section sy20a
-.section sy20b
-.section sy3aa
-.section sy3ab
-.section sy3ba
-.section sy3bb
-.section sy3ca
-.section sy3cb
-.section sy3da
-.section sy3db
-.section sy3ea
-.section sy3eb
-.section sy3fa
-.section sy3fb
-.section sy3ga
-.section sy3gb
-.section sy3ha
-.section sy3hb
-.section sy3ia
-.section sy3ib
-.section sy3ja
-.section sy3jb
-.section sy3ka
-.section sy3kb
-.section sy3la
-.section sy3lb
-.section sy3ma
-.section sy3mb
-.section sy3na
-.section sy3nb
-.section sy3oa
-.section sy3ob
-.section sy3pa
-.section sy3pb
-.section sy3qa
-.section sy3qb
-.section sy3ra
-.section sy3rb
-.section sy3sa
-.section sy3sb
-.section sy3ta
-.section sy3tb
-.section sy3ua
-.section sy3ub
-.section sy3va
-.section sy3vb
-.section sy3wa
-.section sy3wb
-.section sy3xa
-.section sy3xb
-.section sy3ya
-.section sy3yb
-.section sy3za
-.section sy3zb
-.section sy31a
-.section sy31b
-.section sy32a
-.section sy32b
-.section sy33a
-.section sy33b
-.section sy34a
-.section sy34b
-.section sy35a
-.section sy35b
-.section sy36a
-.section sy36b
-.section sy37a
-.section sy37b
-.section sy38a
-.section sy38b
-.section sy39a
-.section sy39b
-.section sy30a
-.section sy30b
-.section sy4aa
-.section sy4ab
-.section sy4ba
-.section sy4bb
-.section sy4ca
-.section sy4cb
-.section sy4da
-.section sy4db
-.section sy4ea
-.section sy4eb
-.section sy4fa
-.section sy4fb
-.section sy4ga
-.section sy4gb
-.section sy4ha
-.section sy4hb
-.section sy4ia
-.section sy4ib
-.section sy4ja
-.section sy4jb
-.section sy4ka
-.section sy4kb
-.section sy4la
-.section sy4lb
-.section sy4ma
-.section sy4mb
-.section sy4na
-.section sy4nb
-.section sy4oa
-.section sy4ob
-.section sy4pa
-.section sy4pb
-.section sy4qa
-.section sy4qb
-.section sy4ra
-.section sy4rb
-.section sy4sa
-.section sy4sb
-.section sy4ta
-.section sy4tb
-.section sy4ua
-.section sy4ub
-.section sy4va
-.section sy4vb
-.section sy4wa
-.section sy4wb
-.section sy4xa
-.section sy4xb
-.section sy4ya
-.section sy4yb
-.section sy4za
-.section sy4zb
-.section sy41a
-.section sy41b
-.section sy42a
-.section sy42b
-.section sy43a
-.section sy43b
-.section sy44a
-.section sy44b
-.section sy45a
-.section sy45b
-.section sy46a
-.section sy46b
-.section sy47a
-.section sy47b
-.section sy48a
-.section sy48b
-.section sy49a
-.section sy49b
-.section sy40a
-.section sy40b
-.section sy5aa
-.section sy5ab
-.section sy5ba
-.section sy5bb
-.section sy5ca
-.section sy5cb
-.section sy5da
-.section sy5db
-.section sy5ea
-.section sy5eb
-.section sy5fa
-.section sy5fb
-.section sy5ga
-.section sy5gb
-.section sy5ha
-.section sy5hb
-.section sy5ia
-.section sy5ib
-.section sy5ja
-.section sy5jb
-.section sy5ka
-.section sy5kb
-.section sy5la
-.section sy5lb
-.section sy5ma
-.section sy5mb
-.section sy5na
-.section sy5nb
-.section sy5oa
-.section sy5ob
-.section sy5pa
-.section sy5pb
-.section sy5qa
-.section sy5qb
-.section sy5ra
-.section sy5rb
-.section sy5sa
-.section sy5sb
-.section sy5ta
-.section sy5tb
-.section sy5ua
-.section sy5ub
-.section sy5va
-.section sy5vb
-.section sy5wa
-.section sy5wb
-.section sy5xa
-.section sy5xb
-.section sy5ya
-.section sy5yb
-.section sy5za
-.section sy5zb
-.section sy51a
-.section sy51b
-.section sy52a
-.section sy52b
-.section sy53a
-.section sy53b
-.section sy54a
-.section sy54b
-.section sy55a
-.section sy55b
-.section sy56a
-.section sy56b
-.section sy57a
-.section sy57b
-.section sy58a
-.section sy58b
-.section sy59a
-.section sy59b
-.section sy50a
-.section sy50b
-.section sy6aa
-.section sy6ab
-.section sy6ba
-.section sy6bb
-.section sy6ca
-.section sy6cb
-.section sy6da
-.section sy6db
-.section sy6ea
-.section sy6eb
-.section sy6fa
-.section sy6fb
-.section sy6ga
-.section sy6gb
-.section sy6ha
-.section sy6hb
-.section sy6ia
-.section sy6ib
-.section sy6ja
-.section sy6jb
-.section sy6ka
-.section sy6kb
-.section sy6la
-.section sy6lb
-.section sy6ma
-.section sy6mb
-.section sy6na
-.section sy6nb
-.section sy6oa
-.section sy6ob
-.section sy6pa
-.section sy6pb
-.section sy6qa
-.section sy6qb
-.section sy6ra
-.section sy6rb
-.section sy6sa
-.section sy6sb
-.section sy6ta
-.section sy6tb
-.section sy6ua
-.section sy6ub
-.section sy6va
-.section sy6vb
-.section sy6wa
-.section sy6wb
-.section sy6xa
-.section sy6xb
-.section sy6ya
-.section sy6yb
-.section sy6za
-.section sy6zb
-.section sy61a
-.section sy61b
-.section sy62a
-.section sy62b
-.section sy63a
-.section sy63b
-.section sy64a
-.section sy64b
-.section sy65a
-.section sy65b
-.section sy66a
-.section sy66b
-.section sy67a
-.section sy67b
-.section sy68a
-.section sy68b
-.section sy69a
-.section sy69b
-.section sy60a
-.section sy60b
-.section sy7aa
-.section sy7ab
-.section sy7ba
-.section sy7bb
-.section sy7ca
-.section sy7cb
-.section sy7da
-.section sy7db
-.section sy7ea
-.section sy7eb
-.section sy7fa
-.section sy7fb
-.section sy7ga
-.section sy7gb
-.section sy7ha
-.section sy7hb
-.section sy7ia
-.section sy7ib
-.section sy7ja
-.section sy7jb
-.section sy7ka
-.section sy7kb
-.section sy7la
-.section sy7lb
-.section sy7ma
-.section sy7mb
-.section sy7na
-.section sy7nb
-.section sy7oa
-.section sy7ob
-.section sy7pa
-.section sy7pb
-.section sy7qa
-.section sy7qb
-.section sy7ra
-.section sy7rb
-.section sy7sa
-.section sy7sb
-.section sy7ta
-.section sy7tb
-.section sy7ua
-.section sy7ub
-.section sy7va
-.section sy7vb
-.section sy7wa
-.section sy7wb
-.section sy7xa
-.section sy7xb
-.section sy7ya
-.section sy7yb
-.section sy7za
-.section sy7zb
-.section sy71a
-.section sy71b
-.section sy72a
-.section sy72b
-.section sy73a
-.section sy73b
-.section sy74a
-.section sy74b
-.section sy75a
-.section sy75b
-.section sy76a
-.section sy76b
-.section sy77a
-.section sy77b
-.section sy78a
-.section sy78b
-.section sy79a
-.section sy79b
-.section sy70a
-.section sy70b
-.section sy8aa
-.section sy8ab
-.section sy8ba
-.section sy8bb
-.section sy8ca
-.section sy8cb
-.section sy8da
-.section sy8db
-.section sy8ea
-.section sy8eb
-.section sy8fa
-.section sy8fb
-.section sy8ga
-.section sy8gb
-.section sy8ha
-.section sy8hb
-.section sy8ia
-.section sy8ib
-.section sy8ja
-.section sy8jb
-.section sy8ka
-.section sy8kb
-.section sy8la
-.section sy8lb
-.section sy8ma
-.section sy8mb
-.section sy8na
-.section sy8nb
-.section sy8oa
-.section sy8ob
-.section sy8pa
-.section sy8pb
-.section sy8qa
-.section sy8qb
-.section sy8ra
-.section sy8rb
-.section sy8sa
-.section sy8sb
-.section sy8ta
-.section sy8tb
-.section sy8ua
-.section sy8ub
-.section sy8va
-.section sy8vb
-.section sy8wa
-.section sy8wb
-.section sy8xa
-.section sy8xb
-.section sy8ya
-.section sy8yb
-.section sy8za
-.section sy8zb
-.section sy81a
-.section sy81b
-.section sy82a
-.section sy82b
-.section sy83a
-.section sy83b
-.section sy84a
-.section sy84b
-.section sy85a
-.section sy85b
-.section sy86a
-.section sy86b
-.section sy87a
-.section sy87b
-.section sy88a
-.section sy88b
-.section sy89a
-.section sy89b
-.section sy80a
-.section sy80b
-.section sy9aa
-.section sy9ab
-.section sy9ba
-.section sy9bb
-.section sy9ca
-.section sy9cb
-.section sy9da
-.section sy9db
-.section sy9ea
-.section sy9eb
-.section sy9fa
-.section sy9fb
-.section sy9ga
-.section sy9gb
-.section sy9ha
-.section sy9hb
-.section sy9ia
-.section sy9ib
-.section sy9ja
-.section sy9jb
-.section sy9ka
-.section sy9kb
-.section sy9la
-.section sy9lb
-.section sy9ma
-.section sy9mb
-.section sy9na
-.section sy9nb
-.section sy9oa
-.section sy9ob
-.section sy9pa
-.section sy9pb
-.section sy9qa
-.section sy9qb
-.section sy9ra
-.section sy9rb
-.section sy9sa
-.section sy9sb
-.section sy9ta
-.section sy9tb
-.section sy9ua
-.section sy9ub
-.section sy9va
-.section sy9vb
-.section sy9wa
-.section sy9wb
-.section sy9xa
-.section sy9xb
-.section sy9ya
-.section sy9yb
-.section sy9za
-.section sy9zb
-.section sy91a
-.section sy91b
-.section sy92a
-.section sy92b
-.section sy93a
-.section sy93b
-.section sy94a
-.section sy94b
-.section sy95a
-.section sy95b
-.section sy96a
-.section sy96b
-.section sy97a
-.section sy97b
-.section sy98a
-.section sy98b
-.section sy99a
-.section sy99b
-.section sy90a
-.section sy90b
-.section sy0aa
-.section sy0ab
-.section sy0ba
-.section sy0bb
-.section sy0ca
-.section sy0cb
-.section sy0da
-.section sy0db
-.section sy0ea
-.section sy0eb
-.section sy0fa
-.section sy0fb
-.section sy0ga
-.section sy0gb
-.section sy0ha
-.section sy0hb
-.section sy0ia
-.section sy0ib
-.section sy0ja
-.section sy0jb
-.section sy0ka
-.section sy0kb
-.section sy0la
-.section sy0lb
-.section sy0ma
-.section sy0mb
-.section sy0na
-.section sy0nb
-.section sy0oa
-.section sy0ob
-.section sy0pa
-.section sy0pb
-.section sy0qa
-.section sy0qb
-.section sy0ra
-.section sy0rb
-.section sy0sa
-.section sy0sb
-.section sy0ta
-.section sy0tb
-.section sy0ua
-.section sy0ub
-.section sy0va
-.section sy0vb
-.section sy0wa
-.section sy0wb
-.section sy0xa
-.section sy0xb
-.section sy0ya
-.section sy0yb
-.section sy0za
-.section sy0zb
-.section sy01a
-.section sy01b
-.section sy02a
-.section sy02b
-.section sy03a
-.section sy03b
-.section sy04a
-.section sy04b
-.section sy05a
-.section sy05b
-.section sy06a
-.section sy06b
-.section sy07a
-.section sy07b
-.section sy08a
-.section sy08b
-.section sy09a
-.section sy09b
-.section sy00a
-.section sy00b
-.section szaaa
-.section szaab
-.section szaba
-.section szabb
-.section szaca
-.section szacb
-.section szada
-.section szadb
-.section szaea
-.section szaeb
-.section szafa
-.section szafb
-.section szaga
-.section szagb
-.section szaha
-.section szahb
-.section szaia
-.section szaib
-.section szaja
-.section szajb
-.section szaka
-.section szakb
-.section szala
-.section szalb
-.section szama
-.section szamb
-.section szana
-.section szanb
-.section szaoa
-.section szaob
-.section szapa
-.section szapb
-.section szaqa
-.section szaqb
-.section szara
-.section szarb
-.section szasa
-.section szasb
-.section szata
-.section szatb
-.section szaua
-.section szaub
-.section szava
-.section szavb
-.section szawa
-.section szawb
-.section szaxa
-.section szaxb
-.section szaya
-.section szayb
-.section szaza
-.section szazb
-.section sza1a
-.section sza1b
-.section sza2a
-.section sza2b
-.section sza3a
-.section sza3b
-.section sza4a
-.section sza4b
-.section sza5a
-.section sza5b
-.section sza6a
-.section sza6b
-.section sza7a
-.section sza7b
-.section sza8a
-.section sza8b
-.section sza9a
-.section sza9b
-.section sza0a
-.section sza0b
-.section szbaa
-.section szbab
-.section szbba
-.section szbbb
-.section szbca
-.section szbcb
-.section szbda
-.section szbdb
-.section szbea
-.section szbeb
-.section szbfa
-.section szbfb
-.section szbga
-.section szbgb
-.section szbha
-.section szbhb
-.section szbia
-.section szbib
-.section szbja
-.section szbjb
-.section szbka
-.section szbkb
-.section szbla
-.section szblb
-.section szbma
-.section szbmb
-.section szbna
-.section szbnb
-.section szboa
-.section szbob
-.section szbpa
-.section szbpb
-.section szbqa
-.section szbqb
-.section szbra
-.section szbrb
-.section szbsa
-.section szbsb
-.section szbta
-.section szbtb
-.section szbua
-.section szbub
-.section szbva
-.section szbvb
-.section szbwa
-.section szbwb
-.section szbxa
-.section szbxb
-.section szbya
-.section szbyb
-.section szbza
-.section szbzb
-.section szb1a
-.section szb1b
-.section szb2a
-.section szb2b
-.section szb3a
-.section szb3b
-.section szb4a
-.section szb4b
-.section szb5a
-.section szb5b
-.section szb6a
-.section szb6b
-.section szb7a
-.section szb7b
-.section szb8a
-.section szb8b
-.section szb9a
-.section szb9b
-.section szb0a
-.section szb0b
-.section szcaa
-.section szcab
-.section szcba
-.section szcbb
-.section szcca
-.section szccb
-.section szcda
-.section szcdb
-.section szcea
-.section szceb
-.section szcfa
-.section szcfb
-.section szcga
-.section szcgb
-.section szcha
-.section szchb
-.section szcia
-.section szcib
-.section szcja
-.section szcjb
-.section szcka
-.section szckb
-.section szcla
-.section szclb
-.section szcma
-.section szcmb
-.section szcna
-.section szcnb
-.section szcoa
-.section szcob
-.section szcpa
-.section szcpb
-.section szcqa
-.section szcqb
-.section szcra
-.section szcrb
-.section szcsa
-.section szcsb
-.section szcta
-.section szctb
-.section szcua
-.section szcub
-.section szcva
-.section szcvb
-.section szcwa
-.section szcwb
-.section szcxa
-.section szcxb
-.section szcya
-.section szcyb
-.section szcza
-.section szczb
-.section szc1a
-.section szc1b
-.section szc2a
-.section szc2b
-.section szc3a
-.section szc3b
-.section szc4a
-.section szc4b
-.section szc5a
-.section szc5b
-.section szc6a
-.section szc6b
-.section szc7a
-.section szc7b
-.section szc8a
-.section szc8b
-.section szc9a
-.section szc9b
-.section szc0a
-.section szc0b
-.section szdaa
-.section szdab
-.section szdba
-.section szdbb
-.section szdca
-.section szdcb
-.section szdda
-.section szddb
-.section szdea
-.section szdeb
-.section szdfa
-.section szdfb
-.section szdga
-.section szdgb
-.section szdha
-.section szdhb
-.section szdia
-.section szdib
-.section szdja
-.section szdjb
-.section szdka
-.section szdkb
-.section szdla
-.section szdlb
-.section szdma
-.section szdmb
-.section szdna
-.section szdnb
-.section szdoa
-.section szdob
-.section szdpa
-.section szdpb
-.section szdqa
-.section szdqb
-.section szdra
-.section szdrb
-.section szdsa
-.section szdsb
-.section szdta
-.section szdtb
-.section szdua
-.section szdub
-.section szdva
-.section szdvb
-.section szdwa
-.section szdwb
-.section szdxa
-.section szdxb
-.section szdya
-.section szdyb
-.section szdza
-.section szdzb
-.section szd1a
-.section szd1b
-.section szd2a
-.section szd2b
-.section szd3a
-.section szd3b
-.section szd4a
-.section szd4b
-.section szd5a
-.section szd5b
-.section szd6a
-.section szd6b
-.section szd7a
-.section szd7b
-.section szd8a
-.section szd8b
-.section szd9a
-.section szd9b
-.section szd0a
-.section szd0b
-.section szeaa
-.section szeab
-.section szeba
-.section szebb
-.section szeca
-.section szecb
-.section szeda
-.section szedb
-.section szeea
-.section szeeb
-.section szefa
-.section szefb
-.section szega
-.section szegb
-.section szeha
-.section szehb
-.section szeia
-.section szeib
-.section szeja
-.section szejb
-.section szeka
-.section szekb
-.section szela
-.section szelb
-.section szema
-.section szemb
-.section szena
-.section szenb
-.section szeoa
-.section szeob
-.section szepa
-.section szepb
-.section szeqa
-.section szeqb
-.section szera
-.section szerb
-.section szesa
-.section szesb
-.section szeta
-.section szetb
-.section szeua
-.section szeub
-.section szeva
-.section szevb
-.section szewa
-.section szewb
-.section szexa
-.section szexb
-.section szeya
-.section szeyb
-.section szeza
-.section szezb
-.section sze1a
-.section sze1b
-.section sze2a
-.section sze2b
-.section sze3a
-.section sze3b
-.section sze4a
-.section sze4b
-.section sze5a
-.section sze5b
-.section sze6a
-.section sze6b
-.section sze7a
-.section sze7b
-.section sze8a
-.section sze8b
-.section sze9a
-.section sze9b
-.section sze0a
-.section sze0b
-.section szfaa
-.section szfab
-.section szfba
-.section szfbb
-.section szfca
-.section szfcb
-.section szfda
-.section szfdb
-.section szfea
-.section szfeb
-.section szffa
-.section szffb
-.section szfga
-.section szfgb
-.section szfha
-.section szfhb
-.section szfia
-.section szfib
-.section szfja
-.section szfjb
-.section szfka
-.section szfkb
-.section szfla
-.section szflb
-.section szfma
-.section szfmb
-.section szfna
-.section szfnb
-.section szfoa
-.section szfob
-.section szfpa
-.section szfpb
-.section szfqa
-.section szfqb
-.section szfra
-.section szfrb
-.section szfsa
-.section szfsb
-.section szfta
-.section szftb
-.section szfua
-.section szfub
-.section szfva
-.section szfvb
-.section szfwa
-.section szfwb
-.section szfxa
-.section szfxb
-.section szfya
-.section szfyb
-.section szfza
-.section szfzb
-.section szf1a
-.section szf1b
-.section szf2a
-.section szf2b
-.section szf3a
-.section szf3b
-.section szf4a
-.section szf4b
-.section szf5a
-.section szf5b
-.section szf6a
-.section szf6b
-.section szf7a
-.section szf7b
-.section szf8a
-.section szf8b
-.section szf9a
-.section szf9b
-.section szf0a
-.section szf0b
-.section szgaa
-.section szgab
-.section szgba
-.section szgbb
-.section szgca
-.section szgcb
-.section szgda
-.section szgdb
-.section szgea
-.section szgeb
-.section szgfa
-.section szgfb
-.section szgga
-.section szggb
-.section szgha
-.section szghb
-.section szgia
-.section szgib
-.section szgja
-.section szgjb
-.section szgka
-.section szgkb
-.section szgla
-.section szglb
-.section szgma
-.section szgmb
-.section szgna
-.section szgnb
-.section szgoa
-.section szgob
-.section szgpa
-.section szgpb
-.section szgqa
-.section szgqb
-.section szgra
-.section szgrb
-.section szgsa
-.section szgsb
-.section szgta
-.section szgtb
-.section szgua
-.section szgub
-.section szgva
-.section szgvb
-.section szgwa
-.section szgwb
-.section szgxa
-.section szgxb
-.section szgya
-.section szgyb
-.section szgza
-.section szgzb
-.section szg1a
-.section szg1b
-.section szg2a
-.section szg2b
-.section szg3a
-.section szg3b
-.section szg4a
-.section szg4b
-.section szg5a
-.section szg5b
-.section szg6a
-.section szg6b
-.section szg7a
-.section szg7b
-.section szg8a
-.section szg8b
-.section szg9a
-.section szg9b
-.section szg0a
-.section szg0b
-.section szhaa
-.section szhab
-.section szhba
-.section szhbb
-.section szhca
-.section szhcb
-.section szhda
-.section szhdb
-.section szhea
-.section szheb
-.section szhfa
-.section szhfb
-.section szhga
-.section szhgb
-.section szhha
-.section szhhb
-.section szhia
-.section szhib
-.section szhja
-.section szhjb
-.section szhka
-.section szhkb
-.section szhla
-.section szhlb
-.section szhma
-.section szhmb
-.section szhna
-.section szhnb
-.section szhoa
-.section szhob
-.section szhpa
-.section szhpb
-.section szhqa
-.section szhqb
-.section szhra
-.section szhrb
-.section szhsa
-.section szhsb
-.section szhta
-.section szhtb
-.section szhua
-.section szhub
-.section szhva
-.section szhvb
-.section szhwa
-.section szhwb
-.section szhxa
-.section szhxb
-.section szhya
-.section szhyb
-.section szhza
-.section szhzb
-.section szh1a
-.section szh1b
-.section szh2a
-.section szh2b
-.section szh3a
-.section szh3b
-.section szh4a
-.section szh4b
-.section szh5a
-.section szh5b
-.section szh6a
-.section szh6b
-.section szh7a
-.section szh7b
-.section szh8a
-.section szh8b
-.section szh9a
-.section szh9b
-.section szh0a
-.section szh0b
-.section sziaa
-.section sziab
-.section sziba
-.section szibb
-.section szica
-.section szicb
-.section szida
-.section szidb
-.section sziea
-.section szieb
-.section szifa
-.section szifb
-.section sziga
-.section szigb
-.section sziha
-.section szihb
-.section sziia
-.section sziib
-.section szija
-.section szijb
-.section szika
-.section szikb
-.section szila
-.section szilb
-.section szima
-.section szimb
-.section szina
-.section szinb
-.section szioa
-.section sziob
-.section szipa
-.section szipb
-.section sziqa
-.section sziqb
-.section szira
-.section szirb
-.section szisa
-.section szisb
-.section szita
-.section szitb
-.section sziua
-.section sziub
-.section sziva
-.section szivb
-.section sziwa
-.section sziwb
-.section szixa
-.section szixb
-.section sziya
-.section sziyb
-.section sziza
-.section szizb
-.section szi1a
-.section szi1b
-.section szi2a
-.section szi2b
-.section szi3a
-.section szi3b
-.section szi4a
-.section szi4b
-.section szi5a
-.section szi5b
-.section szi6a
-.section szi6b
-.section szi7a
-.section szi7b
-.section szi8a
-.section szi8b
-.section szi9a
-.section szi9b
-.section szi0a
-.section szi0b
-.section szjaa
-.section szjab
-.section szjba
-.section szjbb
-.section szjca
-.section szjcb
-.section szjda
-.section szjdb
-.section szjea
-.section szjeb
-.section szjfa
-.section szjfb
-.section szjga
-.section szjgb
-.section szjha
-.section szjhb
-.section szjia
-.section szjib
-.section szjja
-.section szjjb
-.section szjka
-.section szjkb
-.section szjla
-.section szjlb
-.section szjma
-.section szjmb
-.section szjna
-.section szjnb
-.section szjoa
-.section szjob
-.section szjpa
-.section szjpb
-.section szjqa
-.section szjqb
-.section szjra
-.section szjrb
-.section szjsa
-.section szjsb
-.section szjta
-.section szjtb
-.section szjua
-.section szjub
-.section szjva
-.section szjvb
-.section szjwa
-.section szjwb
-.section szjxa
-.section szjxb
-.section szjya
-.section szjyb
-.section szjza
-.section szjzb
-.section szj1a
-.section szj1b
-.section szj2a
-.section szj2b
-.section szj3a
-.section szj3b
-.section szj4a
-.section szj4b
-.section szj5a
-.section szj5b
-.section szj6a
-.section szj6b
-.section szj7a
-.section szj7b
-.section szj8a
-.section szj8b
-.section szj9a
-.section szj9b
-.section szj0a
-.section szj0b
-.section szkaa
-.section szkab
-.section szkba
-.section szkbb
-.section szkca
-.section szkcb
-.section szkda
-.section szkdb
-.section szkea
-.section szkeb
-.section szkfa
-.section szkfb
-.section szkga
-.section szkgb
-.section szkha
-.section szkhb
-.section szkia
-.section szkib
-.section szkja
-.section szkjb
-.section szkka
-.section szkkb
-.section szkla
-.section szklb
-.section szkma
-.section szkmb
-.section szkna
-.section szknb
-.section szkoa
-.section szkob
-.section szkpa
-.section szkpb
-.section szkqa
-.section szkqb
-.section szkra
-.section szkrb
-.section szksa
-.section szksb
-.section szkta
-.section szktb
-.section szkua
-.section szkub
-.section szkva
-.section szkvb
-.section szkwa
-.section szkwb
-.section szkxa
-.section szkxb
-.section szkya
-.section szkyb
-.section szkza
-.section szkzb
-.section szk1a
-.section szk1b
-.section szk2a
-.section szk2b
-.section szk3a
-.section szk3b
-.section szk4a
-.section szk4b
-.section szk5a
-.section szk5b
-.section szk6a
-.section szk6b
-.section szk7a
-.section szk7b
-.section szk8a
-.section szk8b
-.section szk9a
-.section szk9b
-.section szk0a
-.section szk0b
-.section szlaa
-.section szlab
-.section szlba
-.section szlbb
-.section szlca
-.section szlcb
-.section szlda
-.section szldb
-.section szlea
-.section szleb
-.section szlfa
-.section szlfb
-.section szlga
-.section szlgb
-.section szlha
-.section szlhb
-.section szlia
-.section szlib
-.section szlja
-.section szljb
-.section szlka
-.section szlkb
-.section szlla
-.section szllb
-.section szlma
-.section szlmb
-.section szlna
-.section szlnb
-.section szloa
-.section szlob
-.section szlpa
-.section szlpb
-.section szlqa
-.section szlqb
-.section szlra
-.section szlrb
-.section szlsa
-.section szlsb
-.section szlta
-.section szltb
-.section szlua
-.section szlub
-.section szlva
-.section szlvb
-.section szlwa
-.section szlwb
-.section szlxa
-.section szlxb
-.section szlya
-.section szlyb
-.section szlza
-.section szlzb
-.section szl1a
-.section szl1b
-.section szl2a
-.section szl2b
-.section szl3a
-.section szl3b
-.section szl4a
-.section szl4b
-.section szl5a
-.section szl5b
-.section szl6a
-.section szl6b
-.section szl7a
-.section szl7b
-.section szl8a
-.section szl8b
-.section szl9a
-.section szl9b
-.section szl0a
-.section szl0b
-.section szmaa
-.section szmab
-.section szmba
-.section szmbb
-.section szmca
-.section szmcb
-.section szmda
-.section szmdb
-.section szmea
-.section szmeb
-.section szmfa
-.section szmfb
-.section szmga
-.section szmgb
-.section szmha
-.section szmhb
-.section szmia
-.section szmib
-.section szmja
-.section szmjb
-.section szmka
-.section szmkb
-.section szmla
-.section szmlb
-.section szmma
-.section szmmb
-.section szmna
-.section szmnb
-.section szmoa
-.section szmob
-.section szmpa
-.section szmpb
-.section szmqa
-.section szmqb
-.section szmra
-.section szmrb
-.section szmsa
-.section szmsb
-.section szmta
-.section szmtb
-.section szmua
-.section szmub
-.section szmva
-.section szmvb
-.section szmwa
-.section szmwb
-.section szmxa
-.section szmxb
-.section szmya
-.section szmyb
-.section szmza
-.section szmzb
-.section szm1a
-.section szm1b
-.section szm2a
-.section szm2b
-.section szm3a
-.section szm3b
-.section szm4a
-.section szm4b
-.section szm5a
-.section szm5b
-.section szm6a
-.section szm6b
-.section szm7a
-.section szm7b
-.section szm8a
-.section szm8b
-.section szm9a
-.section szm9b
-.section szm0a
-.section szm0b
-.section sznaa
-.section sznab
-.section sznba
-.section sznbb
-.section sznca
-.section szncb
-.section sznda
-.section szndb
-.section sznea
-.section szneb
-.section sznfa
-.section sznfb
-.section sznga
-.section szngb
-.section sznha
-.section sznhb
-.section sznia
-.section sznib
-.section sznja
-.section sznjb
-.section sznka
-.section sznkb
-.section sznla
-.section sznlb
-.section sznma
-.section sznmb
-.section sznna
-.section sznnb
-.section sznoa
-.section sznob
-.section sznpa
-.section sznpb
-.section sznqa
-.section sznqb
-.section sznra
-.section sznrb
-.section sznsa
-.section sznsb
-.section sznta
-.section szntb
-.section sznua
-.section sznub
-.section sznva
-.section sznvb
-.section sznwa
-.section sznwb
-.section sznxa
-.section sznxb
-.section sznya
-.section sznyb
-.section sznza
-.section sznzb
-.section szn1a
-.section szn1b
-.section szn2a
-.section szn2b
-.section szn3a
-.section szn3b
-.section szn4a
-.section szn4b
-.section szn5a
-.section szn5b
-.section szn6a
-.section szn6b
-.section szn7a
-.section szn7b
-.section szn8a
-.section szn8b
-.section szn9a
-.section szn9b
-.section szn0a
-.section szn0b
-.section szoaa
-.section szoab
-.section szoba
-.section szobb
-.section szoca
-.section szocb
-.section szoda
-.section szodb
-.section szoea
-.section szoeb
-.section szofa
-.section szofb
-.section szoga
-.section szogb
-.section szoha
-.section szohb
-.section szoia
-.section szoib
-.section szoja
-.section szojb
-.section szoka
-.section szokb
-.section szola
-.section szolb
-.section szoma
-.section szomb
-.section szona
-.section szonb
-.section szooa
-.section szoob
-.section szopa
-.section szopb
-.section szoqa
-.section szoqb
-.section szora
-.section szorb
-.section szosa
-.section szosb
-.section szota
-.section szotb
-.section szoua
-.section szoub
-.section szova
-.section szovb
-.section szowa
-.section szowb
-.section szoxa
-.section szoxb
-.section szoya
-.section szoyb
-.section szoza
-.section szozb
-.section szo1a
-.section szo1b
-.section szo2a
-.section szo2b
-.section szo3a
-.section szo3b
-.section szo4a
-.section szo4b
-.section szo5a
-.section szo5b
-.section szo6a
-.section szo6b
-.section szo7a
-.section szo7b
-.section szo8a
-.section szo8b
-.section szo9a
-.section szo9b
-.section szo0a
-.section szo0b
-.section szpaa
-.section szpab
-.section szpba
-.section szpbb
-.section szpca
-.section szpcb
-.section szpda
-.section szpdb
-.section szpea
-.section szpeb
-.section szpfa
-.section szpfb
-.section szpga
-.section szpgb
-.section szpha
-.section szphb
-.section szpia
-.section szpib
-.section szpja
-.section szpjb
-.section szpka
-.section szpkb
-.section szpla
-.section szplb
-.section szpma
-.section szpmb
-.section szpna
-.section szpnb
-.section szpoa
-.section szpob
-.section szppa
-.section szppb
-.section szpqa
-.section szpqb
-.section szpra
-.section szprb
-.section szpsa
-.section szpsb
-.section szpta
-.section szptb
-.section szpua
-.section szpub
-.section szpva
-.section szpvb
-.section szpwa
-.section szpwb
-.section szpxa
-.section szpxb
-.section szpya
-.section szpyb
-.section szpza
-.section szpzb
-.section szp1a
-.section szp1b
-.section szp2a
-.section szp2b
-.section szp3a
-.section szp3b
-.section szp4a
-.section szp4b
-.section szp5a
-.section szp5b
-.section szp6a
-.section szp6b
-.section szp7a
-.section szp7b
-.section szp8a
-.section szp8b
-.section szp9a
-.section szp9b
-.section szp0a
-.section szp0b
-.section szqaa
-.section szqab
-.section szqba
-.section szqbb
-.section szqca
-.section szqcb
-.section szqda
-.section szqdb
-.section szqea
-.section szqeb
-.section szqfa
-.section szqfb
-.section szqga
-.section szqgb
-.section szqha
-.section szqhb
-.section szqia
-.section szqib
-.section szqja
-.section szqjb
-.section szqka
-.section szqkb
-.section szqla
-.section szqlb
-.section szqma
-.section szqmb
-.section szqna
-.section szqnb
-.section szqoa
-.section szqob
-.section szqpa
-.section szqpb
-.section szqqa
-.section szqqb
-.section szqra
-.section szqrb
-.section szqsa
-.section szqsb
-.section szqta
-.section szqtb
-.section szqua
-.section szqub
-.section szqva
-.section szqvb
-.section szqwa
-.section szqwb
-.section szqxa
-.section szqxb
-.section szqya
-.section szqyb
-.section szqza
-.section szqzb
-.section szq1a
-.section szq1b
-.section szq2a
-.section szq2b
-.section szq3a
-.section szq3b
-.section szq4a
-.section szq4b
-.section szq5a
-.section szq5b
-.section szq6a
-.section szq6b
-.section szq7a
-.section szq7b
-.section szq8a
-.section szq8b
-.section szq9a
-.section szq9b
-.section szq0a
-.section szq0b
-.section szraa
-.section szrab
-.section szrba
-.section szrbb
-.section szrca
-.section szrcb
-.section szrda
-.section szrdb
-.section szrea
-.section szreb
-.section szrfa
-.section szrfb
-.section szrga
-.section szrgb
-.section szrha
-.section szrhb
-.section szria
-.section szrib
-.section szrja
-.section szrjb
-.section szrka
-.section szrkb
-.section szrla
-.section szrlb
-.section szrma
-.section szrmb
-.section szrna
-.section szrnb
-.section szroa
-.section szrob
-.section szrpa
-.section szrpb
-.section szrqa
-.section szrqb
-.section szrra
-.section szrrb
-.section szrsa
-.section szrsb
-.section szrta
-.section szrtb
-.section szrua
-.section szrub
-.section szrva
-.section szrvb
-.section szrwa
-.section szrwb
-.section szrxa
-.section szrxb
-.section szrya
-.section szryb
-.section szrza
-.section szrzb
-.section szr1a
-.section szr1b
-.section szr2a
-.section szr2b
-.section szr3a
-.section szr3b
-.section szr4a
-.section szr4b
-.section szr5a
-.section szr5b
-.section szr6a
-.section szr6b
-.section szr7a
-.section szr7b
-.section szr8a
-.section szr8b
-.section szr9a
-.section szr9b
-.section szr0a
-.section szr0b
-.section szsaa
-.section szsab
-.section szsba
-.section szsbb
-.section szsca
-.section szscb
-.section szsda
-.section szsdb
-.section szsea
-.section szseb
-.section szsfa
-.section szsfb
-.section szsga
-.section szsgb
-.section szsha
-.section szshb
-.section szsia
-.section szsib
-.section szsja
-.section szsjb
-.section szska
-.section szskb
-.section szsla
-.section szslb
-.section szsma
-.section szsmb
-.section szsna
-.section szsnb
-.section szsoa
-.section szsob
-.section szspa
-.section szspb
-.section szsqa
-.section szsqb
-.section szsra
-.section szsrb
-.section szssa
-.section szssb
-.section szsta
-.section szstb
-.section szsua
-.section szsub
-.section szsva
-.section szsvb
-.section szswa
-.section szswb
-.section szsxa
-.section szsxb
-.section szsya
-.section szsyb
-.section szsza
-.section szszb
-.section szs1a
-.section szs1b
-.section szs2a
-.section szs2b
-.section szs3a
-.section szs3b
-.section szs4a
-.section szs4b
-.section szs5a
-.section szs5b
-.section szs6a
-.section szs6b
-.section szs7a
-.section szs7b
-.section szs8a
-.section szs8b
-.section szs9a
-.section szs9b
-.section szs0a
-.section szs0b
-.section sztaa
-.section sztab
-.section sztba
-.section sztbb
-.section sztca
-.section sztcb
-.section sztda
-.section sztdb
-.section sztea
-.section szteb
-.section sztfa
-.section sztfb
-.section sztga
-.section sztgb
-.section sztha
-.section szthb
-.section sztia
-.section sztib
-.section sztja
-.section sztjb
-.section sztka
-.section sztkb
-.section sztla
-.section sztlb
-.section sztma
-.section sztmb
-.section sztna
-.section sztnb
-.section sztoa
-.section sztob
-.section sztpa
-.section sztpb
-.section sztqa
-.section sztqb
-.section sztra
-.section sztrb
-.section sztsa
-.section sztsb
-.section sztta
-.section szttb
-.section sztua
-.section sztub
-.section sztva
-.section sztvb
-.section sztwa
-.section sztwb
-.section sztxa
-.section sztxb
-.section sztya
-.section sztyb
-.section sztza
-.section sztzb
-.section szt1a
-.section szt1b
-.section szt2a
-.section szt2b
-.section szt3a
-.section szt3b
-.section szt4a
-.section szt4b
-.section szt5a
-.section szt5b
-.section szt6a
-.section szt6b
-.section szt7a
-.section szt7b
-.section szt8a
-.section szt8b
-.section szt9a
-.section szt9b
-.section szt0a
-.section szt0b
-.section szuaa
-.section szuab
-.section szuba
-.section szubb
-.section szuca
-.section szucb
-.section szuda
-.section szudb
-.section szuea
-.section szueb
-.section szufa
-.section szufb
-.section szuga
-.section szugb
-.section szuha
-.section szuhb
-.section szuia
-.section szuib
-.section szuja
-.section szujb
-.section szuka
-.section szukb
-.section szula
-.section szulb
-.section szuma
-.section szumb
-.section szuna
-.section szunb
-.section szuoa
-.section szuob
-.section szupa
-.section szupb
-.section szuqa
-.section szuqb
-.section szura
-.section szurb
-.section szusa
-.section szusb
-.section szuta
-.section szutb
-.section szuua
-.section szuub
-.section szuva
-.section szuvb
-.section szuwa
-.section szuwb
-.section szuxa
-.section szuxb
-.section szuya
-.section szuyb
-.section szuza
-.section szuzb
-.section szu1a
-.section szu1b
-.section szu2a
-.section szu2b
-.section szu3a
-.section szu3b
-.section szu4a
-.section szu4b
-.section szu5a
-.section szu5b
-.section szu6a
-.section szu6b
-.section szu7a
-.section szu7b
-.section szu8a
-.section szu8b
-.section szu9a
-.section szu9b
-.section szu0a
-.section szu0b
-.section szvaa
-.section szvab
-.section szvba
-.section szvbb
-.section szvca
-.section szvcb
-.section szvda
-.section szvdb
-.section szvea
-.section szveb
-.section szvfa
-.section szvfb
-.section szvga
-.section szvgb
-.section szvha
-.section szvhb
-.section szvia
-.section szvib
-.section szvja
-.section szvjb
-.section szvka
-.section szvkb
-.section szvla
-.section szvlb
-.section szvma
-.section szvmb
-.section szvna
-.section szvnb
-.section szvoa
-.section szvob
-.section szvpa
-.section szvpb
-.section szvqa
-.section szvqb
-.section szvra
-.section szvrb
-.section szvsa
-.section szvsb
-.section szvta
-.section szvtb
-.section szvua
-.section szvub
-.section szvva
-.section szvvb
-.section szvwa
-.section szvwb
-.section szvxa
-.section szvxb
-.section szvya
-.section szvyb
-.section szvza
-.section szvzb
-.section szv1a
-.section szv1b
-.section szv2a
-.section szv2b
-.section szv3a
-.section szv3b
-.section szv4a
-.section szv4b
-.section szv5a
-.section szv5b
-.section szv6a
-.section szv6b
-.section szv7a
-.section szv7b
-.section szv8a
-.section szv8b
-.section szv9a
-.section szv9b
-.section szv0a
-.section szv0b
-.section szwaa
-.section szwab
-.section szwba
-.section szwbb
-.section szwca
-.section szwcb
-.section szwda
-.section szwdb
-.section szwea
-.section szweb
-.section szwfa
-.section szwfb
-.section szwga
-.section szwgb
-.section szwha
-.section szwhb
-.section szwia
-.section szwib
-.section szwja
-.section szwjb
-.section szwka
-.section szwkb
-.section szwla
-.section szwlb
-.section szwma
-.section szwmb
-.section szwna
-.section szwnb
-.section szwoa
-.section szwob
-.section szwpa
-.section szwpb
-.section szwqa
-.section szwqb
-.section szwra
-.section szwrb
-.section szwsa
-.section szwsb
-.section szwta
-.section szwtb
-.section szwua
-.section szwub
-.section szwva
-.section szwvb
-.section szwwa
-.section szwwb
-.section szwxa
-.section szwxb
-.section szwya
-.section szwyb
-.section szwza
-.section szwzb
-.section szw1a
-.section szw1b
-.section szw2a
-.section szw2b
-.section szw3a
-.section szw3b
-.section szw4a
-.section szw4b
-.section szw5a
-.section szw5b
-.section szw6a
-.section szw6b
-.section szw7a
-.section szw7b
-.section szw8a
-.section szw8b
-.section szw9a
-.section szw9b
-.section szw0a
-.section szw0b
-.section szxaa
-.section szxab
-.section szxba
-.section szxbb
-.section szxca
-.section szxcb
-.section szxda
-.section szxdb
-.section szxea
-.section szxeb
-.section szxfa
-.section szxfb
-.section szxga
-.section szxgb
-.section szxha
-.section szxhb
-.section szxia
-.section szxib
-.section szxja
-.section szxjb
-.section szxka
-.section szxkb
-.section szxla
-.section szxlb
-.section szxma
-.section szxmb
-.section szxna
-.section szxnb
-.section szxoa
-.section szxob
-.section szxpa
-.section szxpb
-.section szxqa
-.section szxqb
-.section szxra
-.section szxrb
-.section szxsa
-.section szxsb
-.section szxta
-.section szxtb
-.section szxua
-.section szxub
-.section szxva
-.section szxvb
-.section szxwa
-.section szxwb
-.section szxxa
-.section szxxb
-.section szxya
-.section szxyb
-.section szxza
-.section szxzb
-.section szx1a
-.section szx1b
-.section szx2a
-.section szx2b
-.section szx3a
-.section szx3b
-.section szx4a
-.section szx4b
-.section szx5a
-.section szx5b
-.section szx6a
-.section szx6b
-.section szx7a
-.section szx7b
-.section szx8a
-.section szx8b
-.section szx9a
-.section szx9b
-.section szx0a
-.section szx0b
-.section szyaa
-.section szyab
-.section szyba
-.section szybb
-.section szyca
-.section szycb
-.section szyda
-.section szydb
-.section szyea
-.section szyeb
-.section szyfa
-.section szyfb
-.section szyga
-.section szygb
-.section szyha
-.section szyhb
-.section szyia
-.section szyib
-.section szyja
-.section szyjb
-.section szyka
-.section szykb
-.section szyla
-.section szylb
-.section szyma
-.section szymb
-.section szyna
-.section szynb
-.section szyoa
-.section szyob
-.section szypa
-.section szypb
-.section szyqa
-.section szyqb
-.section szyra
-.section szyrb
-.section szysa
-.section szysb
-.section szyta
-.section szytb
-.section szyua
-.section szyub
-.section szyva
-.section szyvb
-.section szywa
-.section szywb
-.section szyxa
-.section szyxb
-.section szyya
-.section szyyb
-.section szyza
-.section szyzb
-.section szy1a
-.section szy1b
-.section szy2a
-.section szy2b
-.section szy3a
-.section szy3b
-.section szy4a
-.section szy4b
-.section szy5a
-.section szy5b
-.section szy6a
-.section szy6b
-.section szy7a
-.section szy7b
-.section szy8a
-.section szy8b
-.section szy9a
-.section szy9b
-.section szy0a
-.section szy0b
-.section szzaa
-.section szzab
-.section szzba
-.section szzbb
-.section szzca
-.section szzcb
-.section szzda
-.section szzdb
-.section szzea
-.section szzeb
-.section szzfa
-.section szzfb
-.section szzga
-.section szzgb
-.section szzha
-.section szzhb
-.section szzia
-.section szzib
-.section szzja
-.section szzjb
-.section szzka
-.section szzkb
-.section szzla
-.section szzlb
-.section szzma
-.section szzmb
-.section szzna
-.section szznb
-.section szzoa
-.section szzob
-.section szzpa
-.section szzpb
-.section szzqa
-.section szzqb
-.section szzra
-.section szzrb
-.section szzsa
-.section szzsb
-.section szzta
-.section szztb
-.section szzua
-.section szzub
-.section szzva
-.section szzvb
-.section szzwa
-.section szzwb
-.section szzxa
-.section szzxb
-.section szzya
-.section szzyb
-.section szzza
-.section szzzb
-.section szz1a
-.section szz1b
-.section szz2a
-.section szz2b
-.section szz3a
-.section szz3b
-.section szz4a
-.section szz4b
-.section szz5a
-.section szz5b
-.section szz6a
-.section szz6b
-.section szz7a
-.section szz7b
-.section szz8a
-.section szz8b
-.section szz9a
-.section szz9b
-.section szz0a
-.section szz0b
-.section sz1aa
-.section sz1ab
-.section sz1ba
-.section sz1bb
-.section sz1ca
-.section sz1cb
-.section sz1da
-.section sz1db
-.section sz1ea
-.section sz1eb
-.section sz1fa
-.section sz1fb
-.section sz1ga
-.section sz1gb
-.section sz1ha
-.section sz1hb
-.section sz1ia
-.section sz1ib
-.section sz1ja
-.section sz1jb
-.section sz1ka
-.section sz1kb
-.section sz1la
-.section sz1lb
-.section sz1ma
-.section sz1mb
-.section sz1na
-.section sz1nb
-.section sz1oa
-.section sz1ob
-.section sz1pa
-.section sz1pb
-.section sz1qa
-.section sz1qb
-.section sz1ra
-.section sz1rb
-.section sz1sa
-.section sz1sb
-.section sz1ta
-.section sz1tb
-.section sz1ua
-.section sz1ub
-.section sz1va
-.section sz1vb
-.section sz1wa
-.section sz1wb
-.section sz1xa
-.section sz1xb
-.section sz1ya
-.section sz1yb
-.section sz1za
-.section sz1zb
-.section sz11a
-.section sz11b
-.section sz12a
-.section sz12b
-.section sz13a
-.section sz13b
-.section sz14a
-.section sz14b
-.section sz15a
-.section sz15b
-.section sz16a
-.section sz16b
-.section sz17a
-.section sz17b
-.section sz18a
-.section sz18b
-.section sz19a
-.section sz19b
-.section sz10a
-.section sz10b
-.section sz2aa
-.section sz2ab
-.section sz2ba
-.section sz2bb
-.section sz2ca
-.section sz2cb
-.section sz2da
-.section sz2db
-.section sz2ea
-.section sz2eb
-.section sz2fa
-.section sz2fb
-.section sz2ga
-.section sz2gb
-.section sz2ha
-.section sz2hb
-.section sz2ia
-.section sz2ib
-.section sz2ja
-.section sz2jb
-.section sz2ka
-.section sz2kb
-.section sz2la
-.section sz2lb
-.section sz2ma
-.section sz2mb
-.section sz2na
-.section sz2nb
-.section sz2oa
-.section sz2ob
-.section sz2pa
-.section sz2pb
-.section sz2qa
-.section sz2qb
-.section sz2ra
-.section sz2rb
-.section sz2sa
-.section sz2sb
-.section sz2ta
-.section sz2tb
-.section sz2ua
-.section sz2ub
-.section sz2va
-.section sz2vb
-.section sz2wa
-.section sz2wb
-.section sz2xa
-.section sz2xb
-.section sz2ya
-.section sz2yb
-.section sz2za
-.section sz2zb
-.section sz21a
-.section sz21b
-.section sz22a
-.section sz22b
-.section sz23a
-.section sz23b
-.section sz24a
-.section sz24b
-.section sz25a
-.section sz25b
-.section sz26a
-.section sz26b
-.section sz27a
-.section sz27b
-.section sz28a
-.section sz28b
-.section sz29a
-.section sz29b
-.section sz20a
-.section sz20b
-.section sz3aa
-.section sz3ab
-.section sz3ba
-.section sz3bb
-.section sz3ca
-.section sz3cb
-.section sz3da
-.section sz3db
-.section sz3ea
-.section sz3eb
-.section sz3fa
-.section sz3fb
-.section sz3ga
-.section sz3gb
-.section sz3ha
-.section sz3hb
-.section sz3ia
-.section sz3ib
-.section sz3ja
-.section sz3jb
-.section sz3ka
-.section sz3kb
-.section sz3la
-.section sz3lb
-.section sz3ma
-.section sz3mb
-.section sz3na
-.section sz3nb
-.section sz3oa
-.section sz3ob
-.section sz3pa
-.section sz3pb
-.section sz3qa
-.section sz3qb
-.section sz3ra
-.section sz3rb
-.section sz3sa
-.section sz3sb
-.section sz3ta
-.section sz3tb
-.section sz3ua
-.section sz3ub
-.section sz3va
-.section sz3vb
-.section sz3wa
-.section sz3wb
-.section sz3xa
-.section sz3xb
-.section sz3ya
-.section sz3yb
-.section sz3za
-.section sz3zb
-.section sz31a
-.section sz31b
-.section sz32a
-.section sz32b
-.section sz33a
-.section sz33b
-.section sz34a
-.section sz34b
-.section sz35a
-.section sz35b
-.section sz36a
-.section sz36b
-.section sz37a
-.section sz37b
-.section sz38a
-.section sz38b
-.section sz39a
-.section sz39b
-.section sz30a
-.section sz30b
-.section sz4aa
-.section sz4ab
-.section sz4ba
-.section sz4bb
-.section sz4ca
-.section sz4cb
-.section sz4da
-.section sz4db
-.section sz4ea
-.section sz4eb
-.section sz4fa
-.section sz4fb
-.section sz4ga
-.section sz4gb
-.section sz4ha
-.section sz4hb
-.section sz4ia
-.section sz4ib
-.section sz4ja
-.section sz4jb
-.section sz4ka
-.section sz4kb
-.section sz4la
-.section sz4lb
-.section sz4ma
-.section sz4mb
-.section sz4na
-.section sz4nb
-.section sz4oa
-.section sz4ob
-.section sz4pa
-.section sz4pb
-.section sz4qa
-.section sz4qb
-.section sz4ra
-.section sz4rb
-.section sz4sa
-.section sz4sb
-.section sz4ta
-.section sz4tb
-.section sz4ua
-.section sz4ub
-.section sz4va
-.section sz4vb
-.section sz4wa
-.section sz4wb
-.section sz4xa
-.section sz4xb
-.section sz4ya
-.section sz4yb
-.section sz4za
-.section sz4zb
-.section sz41a
-.section sz41b
-.section sz42a
-.section sz42b
-.section sz43a
-.section sz43b
-.section sz44a
-.section sz44b
-.section sz45a
-.section sz45b
-.section sz46a
-.section sz46b
-.section sz47a
-.section sz47b
-.section sz48a
-.section sz48b
-.section sz49a
-.section sz49b
-.section sz40a
-.section sz40b
-.section sz5aa
-.section sz5ab
-.section sz5ba
-.section sz5bb
-.section sz5ca
-.section sz5cb
-.section sz5da
-.section sz5db
-.section sz5ea
-.section sz5eb
-.section sz5fa
-.section sz5fb
-.section sz5ga
-.section sz5gb
-.section sz5ha
-.section sz5hb
-.section sz5ia
-.section sz5ib
-.section sz5ja
-.section sz5jb
-.section sz5ka
-.section sz5kb
-.section sz5la
-.section sz5lb
-.section sz5ma
-.section sz5mb
-.section sz5na
-.section sz5nb
-.section sz5oa
-.section sz5ob
-.section sz5pa
-.section sz5pb
-.section sz5qa
-.section sz5qb
-.section sz5ra
-.section sz5rb
-.section sz5sa
-.section sz5sb
-.section sz5ta
-.section sz5tb
-.section sz5ua
-.section sz5ub
-.section sz5va
-.section sz5vb
-.section sz5wa
-.section sz5wb
-.section sz5xa
-.section sz5xb
-.section sz5ya
-.section sz5yb
-.section sz5za
-.section sz5zb
-.section sz51a
-.section sz51b
-.section sz52a
-.section sz52b
-.section sz53a
-.section sz53b
-.section sz54a
-.section sz54b
-.section sz55a
-.section sz55b
-.section sz56a
-.section sz56b
-.section sz57a
-.section sz57b
-.section sz58a
-.section sz58b
-.section sz59a
-.section sz59b
-.section sz50a
-.section sz50b
-.section sz6aa
-.section sz6ab
-.section sz6ba
-.section sz6bb
-.section sz6ca
-.section sz6cb
-.section sz6da
-.section sz6db
-.section sz6ea
-.section sz6eb
-.section sz6fa
-.section sz6fb
-.section sz6ga
-.section sz6gb
-.section sz6ha
-.section sz6hb
-.section sz6ia
-.section sz6ib
-.section sz6ja
-.section sz6jb
-.section sz6ka
-.section sz6kb
-.section sz6la
-.section sz6lb
-.section sz6ma
-.section sz6mb
-.section sz6na
-.section sz6nb
-.section sz6oa
-.section sz6ob
-.section sz6pa
-.section sz6pb
-.section sz6qa
-.section sz6qb
-.section sz6ra
-.section sz6rb
-.section sz6sa
-.section sz6sb
-.section sz6ta
-.section sz6tb
-.section sz6ua
-.section sz6ub
-.section sz6va
-.section sz6vb
-.section sz6wa
-.section sz6wb
-.section sz6xa
-.section sz6xb
-.section sz6ya
-.section sz6yb
-.section sz6za
-.section sz6zb
-.section sz61a
-.section sz61b
-.section sz62a
-.section sz62b
-.section sz63a
-.section sz63b
-.section sz64a
-.section sz64b
-.section sz65a
-.section sz65b
-.section sz66a
-.section sz66b
-.section sz67a
-.section sz67b
-.section sz68a
-.section sz68b
-.section sz69a
-.section sz69b
-.section sz60a
-.section sz60b
-.section sz7aa
-.section sz7ab
-.section sz7ba
-.section sz7bb
-.section sz7ca
-.section sz7cb
-.section sz7da
-.section sz7db
-.section sz7ea
-.section sz7eb
-.section sz7fa
-.section sz7fb
-.section sz7ga
-.section sz7gb
-.section sz7ha
-.section sz7hb
-.section sz7ia
-.section sz7ib
-.section sz7ja
-.section sz7jb
-.section sz7ka
-.section sz7kb
-.section sz7la
-.section sz7lb
-.section sz7ma
-.section sz7mb
-.section sz7na
-.section sz7nb
-.section sz7oa
-.section sz7ob
-.section sz7pa
-.section sz7pb
-.section sz7qa
-.section sz7qb
-.section sz7ra
-.section sz7rb
-.section sz7sa
-.section sz7sb
-.section sz7ta
-.section sz7tb
-.section sz7ua
-.section sz7ub
-.section sz7va
-.section sz7vb
-.section sz7wa
-.section sz7wb
-.section sz7xa
-.section sz7xb
-.section sz7ya
-.section sz7yb
-.section sz7za
-.section sz7zb
-.section sz71a
-.section sz71b
-.section sz72a
-.section sz72b
-.section sz73a
-.section sz73b
-.section sz74a
-.section sz74b
-.section sz75a
-.section sz75b
-.section sz76a
-.section sz76b
-.section sz77a
-.section sz77b
-.section sz78a
-.section sz78b
-.section sz79a
-.section sz79b
-.section sz70a
-.section sz70b
-.section sz8aa
-.section sz8ab
-.section sz8ba
-.section sz8bb
-.section sz8ca
-.section sz8cb
-.section sz8da
-.section sz8db
-.section sz8ea
-.section sz8eb
-.section sz8fa
-.section sz8fb
-.section sz8ga
-.section sz8gb
-.section sz8ha
-.section sz8hb
-.section sz8ia
-.section sz8ib
-.section sz8ja
-.section sz8jb
-.section sz8ka
-.section sz8kb
-.section sz8la
-.section sz8lb
-.section sz8ma
-.section sz8mb
-.section sz8na
-.section sz8nb
-.section sz8oa
-.section sz8ob
-.section sz8pa
-.section sz8pb
-.section sz8qa
-.section sz8qb
-.section sz8ra
-.section sz8rb
-.section sz8sa
-.section sz8sb
-.section sz8ta
-.section sz8tb
-.section sz8ua
-.section sz8ub
-.section sz8va
-.section sz8vb
-.section sz8wa
-.section sz8wb
-.section sz8xa
-.section sz8xb
-.section sz8ya
-.section sz8yb
-.section sz8za
-.section sz8zb
-.section sz81a
-.section sz81b
-.section sz82a
-.section sz82b
-.section sz83a
-.section sz83b
-.section sz84a
-.section sz84b
-.section sz85a
-.section sz85b
-.section sz86a
-.section sz86b
-.section sz87a
-.section sz87b
-.section sz88a
-.section sz88b
-.section sz89a
-.section sz89b
-.section sz80a
-.section sz80b
-.section sz9aa
-.section sz9ab
-.section sz9ba
-.section sz9bb
-.section sz9ca
-.section sz9cb
-.section sz9da
-.section sz9db
-.section sz9ea
-.section sz9eb
-.section sz9fa
-.section sz9fb
-.section sz9ga
-.section sz9gb
-.section sz9ha
-.section sz9hb
-.section sz9ia
-.section sz9ib
-.section sz9ja
-.section sz9jb
-.section sz9ka
-.section sz9kb
-.section sz9la
-.section sz9lb
-.section sz9ma
-.section sz9mb
-.section sz9na
-.section sz9nb
-.section sz9oa
-.section sz9ob
-.section sz9pa
-.section sz9pb
-.section sz9qa
-.section sz9qb
-.section sz9ra
-.section sz9rb
-.section sz9sa
-.section sz9sb
-.section sz9ta
-.section sz9tb
-.section sz9ua
-.section sz9ub
-.section sz9va
-.section sz9vb
-.section sz9wa
-.section sz9wb
-.section sz9xa
-.section sz9xb
-.section sz9ya
-.section sz9yb
-.section sz9za
-.section sz9zb
-.section sz91a
-.section sz91b
-.section sz92a
-.section sz92b
-.section sz93a
-.section sz93b
-.section sz94a
-.section sz94b
-.section sz95a
-.section sz95b
-.section sz96a
-.section sz96b
-.section sz97a
-.section sz97b
-.section sz98a
-.section sz98b
-.section sz99a
-.section sz99b
-.section sz90a
-.section sz90b
-.section sz0aa
-.section sz0ab
-.section sz0ba
-.section sz0bb
-.section sz0ca
-.section sz0cb
-.section sz0da
-.section sz0db
-.section sz0ea
-.section sz0eb
-.section sz0fa
-.section sz0fb
-.section sz0ga
-.section sz0gb
-.section sz0ha
-.section sz0hb
-.section sz0ia
-.section sz0ib
-.section sz0ja
-.section sz0jb
-.section sz0ka
-.section sz0kb
-.section sz0la
-.section sz0lb
-.section sz0ma
-.section sz0mb
-.section sz0na
-.section sz0nb
-.section sz0oa
-.section sz0ob
-.section sz0pa
-.section sz0pb
-.section sz0qa
-.section sz0qb
-.section sz0ra
-.section sz0rb
-.section sz0sa
-.section sz0sb
-.section sz0ta
-.section sz0tb
-.section sz0ua
-.section sz0ub
-.section sz0va
-.section sz0vb
-.section sz0wa
-.section sz0wb
-.section sz0xa
-.section sz0xb
-.section sz0ya
-.section sz0yb
-.section sz0za
-.section sz0zb
-.section sz01a
-.section sz01b
-.section sz02a
-.section sz02b
-.section sz03a
-.section sz03b
-.section sz04a
-.section sz04b
-.section sz05a
-.section sz05b
-.section sz06a
-.section sz06b
-.section sz07a
-.section sz07b
-.section sz08a
-.section sz08b
-.section sz09a
-.section sz09b
-.section sz00a
-.section sz00b
-.section s1aaa
-.section s1aab
-.section s1aba
-.section s1abb
-.section s1aca
-.section s1acb
-.section s1ada
-.section s1adb
-.section s1aea
-.section s1aeb
-.section s1afa
-.section s1afb
-.section s1aga
-.section s1agb
-.section s1aha
-.section s1ahb
-.section s1aia
-.section s1aib
-.section s1aja
-.section s1ajb
-.section s1aka
-.section s1akb
-.section s1ala
-.section s1alb
-.section s1ama
-.section s1amb
-.section s1ana
-.section s1anb
-.section s1aoa
-.section s1aob
-.section s1apa
-.section s1apb
-.section s1aqa
-.section s1aqb
-.section s1ara
-.section s1arb
-.section s1asa
-.section s1asb
-.section s1ata
-.section s1atb
-.section s1aua
-.section s1aub
-.section s1ava
-.section s1avb
-.section s1awa
-.section s1awb
-.section s1axa
-.section s1axb
-.section s1aya
-.section s1ayb
-.section s1aza
-.section s1azb
-.section s1a1a
-.section s1a1b
-.section s1a2a
-.section s1a2b
-.section s1a3a
-.section s1a3b
-.section s1a4a
-.section s1a4b
-.section s1a5a
-.section s1a5b
-.section s1a6a
-.section s1a6b
-.section s1a7a
-.section s1a7b
-.section s1a8a
-.section s1a8b
-.section s1a9a
-.section s1a9b
-.section s1a0a
-.section s1a0b
-.section s1baa
-.section s1bab
-.section s1bba
-.section s1bbb
-.section s1bca
-.section s1bcb
-.section s1bda
-.section s1bdb
-.section s1bea
-.section s1beb
-.section s1bfa
-.section s1bfb
-.section s1bga
-.section s1bgb
-.section s1bha
-.section s1bhb
-.section s1bia
-.section s1bib
-.section s1bja
-.section s1bjb
-.section s1bka
-.section s1bkb
-.section s1bla
-.section s1blb
-.section s1bma
-.section s1bmb
-.section s1bna
-.section s1bnb
-.section s1boa
-.section s1bob
-.section s1bpa
-.section s1bpb
-.section s1bqa
-.section s1bqb
-.section s1bra
-.section s1brb
-.section s1bsa
-.section s1bsb
-.section s1bta
-.section s1btb
-.section s1bua
-.section s1bub
-.section s1bva
-.section s1bvb
-.section s1bwa
-.section s1bwb
-.section s1bxa
-.section s1bxb
-.section s1bya
-.section s1byb
-.section s1bza
-.section s1bzb
-.section s1b1a
-.section s1b1b
-.section s1b2a
-.section s1b2b
-.section s1b3a
-.section s1b3b
-.section s1b4a
-.section s1b4b
-.section s1b5a
-.section s1b5b
-.section s1b6a
-.section s1b6b
-.section s1b7a
-.section s1b7b
-.section s1b8a
-.section s1b8b
-.section s1b9a
-.section s1b9b
-.section s1b0a
-.section s1b0b
-.section s1caa
-.section s1cab
-.section s1cba
-.section s1cbb
-.section s1cca
-.section s1ccb
-.section s1cda
-.section s1cdb
-.section s1cea
-.section s1ceb
-.section s1cfa
-.section s1cfb
-.section s1cga
-.section s1cgb
-.section s1cha
-.section s1chb
-.section s1cia
-.section s1cib
-.section s1cja
-.section s1cjb
-.section s1cka
-.section s1ckb
-.section s1cla
-.section s1clb
-.section s1cma
-.section s1cmb
-.section s1cna
-.section s1cnb
-.section s1coa
-.section s1cob
-.section s1cpa
-.section s1cpb
-.section s1cqa
-.section s1cqb
-.section s1cra
-.section s1crb
-.section s1csa
-.section s1csb
-.section s1cta
-.section s1ctb
-.section s1cua
-.section s1cub
-.section s1cva
-.section s1cvb
-.section s1cwa
-.section s1cwb
-.section s1cxa
-.section s1cxb
-.section s1cya
-.section s1cyb
-.section s1cza
-.section s1czb
-.section s1c1a
-.section s1c1b
-.section s1c2a
-.section s1c2b
-.section s1c3a
-.section s1c3b
-.section s1c4a
-.section s1c4b
-.section s1c5a
-.section s1c5b
-.section s1c6a
-.section s1c6b
-.section s1c7a
-.section s1c7b
-.section s1c8a
-.section s1c8b
-.section s1c9a
-.section s1c9b
-.section s1c0a
-.section s1c0b
-.section s1daa
-.section s1dab
-.section s1dba
-.section s1dbb
-.section s1dca
-.section s1dcb
-.section s1dda
-.section s1ddb
-.section s1dea
-.section s1deb
-.section s1dfa
-.section s1dfb
-.section s1dga
-.section s1dgb
-.section s1dha
-.section s1dhb
-.section s1dia
-.section s1dib
-.section s1dja
-.section s1djb
-.section s1dka
-.section s1dkb
-.section s1dla
-.section s1dlb
-.section s1dma
-.section s1dmb
-.section s1dna
-.section s1dnb
-.section s1doa
-.section s1dob
-.section s1dpa
-.section s1dpb
-.section s1dqa
-.section s1dqb
-.section s1dra
-.section s1drb
-.section s1dsa
-.section s1dsb
-.section s1dta
-.section s1dtb
-.section s1dua
-.section s1dub
-.section s1dva
-.section s1dvb
-.section s1dwa
-.section s1dwb
-.section s1dxa
-.section s1dxb
-.section s1dya
-.section s1dyb
-.section s1dza
-.section s1dzb
-.section s1d1a
-.section s1d1b
-.section s1d2a
-.section s1d2b
-.section s1d3a
-.section s1d3b
-.section s1d4a
-.section s1d4b
-.section s1d5a
-.section s1d5b
-.section s1d6a
-.section s1d6b
-.section s1d7a
-.section s1d7b
-.section s1d8a
-.section s1d8b
-.section s1d9a
-.section s1d9b
-.section s1d0a
-.section s1d0b
-.section s1eaa
-.section s1eab
-.section s1eba
-.section s1ebb
-.section s1eca
-.section s1ecb
-.section s1eda
-.section s1edb
-.section s1eea
-.section s1eeb
-.section s1efa
-.section s1efb
-.section s1ega
-.section s1egb
-.section s1eha
-.section s1ehb
-.section s1eia
-.section s1eib
-.section s1eja
-.section s1ejb
-.section s1eka
-.section s1ekb
-.section s1ela
-.section s1elb
-.section s1ema
-.section s1emb
-.section s1ena
-.section s1enb
-.section s1eoa
-.section s1eob
-.section s1epa
-.section s1epb
-.section s1eqa
-.section s1eqb
-.section s1era
-.section s1erb
-.section s1esa
-.section s1esb
-.section s1eta
-.section s1etb
-.section s1eua
-.section s1eub
-.section s1eva
-.section s1evb
-.section s1ewa
-.section s1ewb
-.section s1exa
-.section s1exb
-.section s1eya
-.section s1eyb
-.section s1eza
-.section s1ezb
-.section s1e1a
-.section s1e1b
-.section s1e2a
-.section s1e2b
-.section s1e3a
-.section s1e3b
-.section s1e4a
-.section s1e4b
-.section s1e5a
-.section s1e5b
-.section s1e6a
-.section s1e6b
-.section s1e7a
-.section s1e7b
-.section s1e8a
-.section s1e8b
-.section s1e9a
-.section s1e9b
-.section s1e0a
-.section s1e0b
-.section s1faa
-.section s1fab
-.section s1fba
-.section s1fbb
-.section s1fca
-.section s1fcb
-.section s1fda
-.section s1fdb
-.section s1fea
-.section s1feb
-.section s1ffa
-.section s1ffb
-.section s1fga
-.section s1fgb
-.section s1fha
-.section s1fhb
-.section s1fia
-.section s1fib
-.section s1fja
-.section s1fjb
-.section s1fka
-.section s1fkb
-.section s1fla
-.section s1flb
-.section s1fma
-.section s1fmb
-.section s1fna
-.section s1fnb
-.section s1foa
-.section s1fob
-.section s1fpa
-.section s1fpb
-.section s1fqa
-.section s1fqb
-.section s1fra
-.section s1frb
-.section s1fsa
-.section s1fsb
-.section s1fta
-.section s1ftb
-.section s1fua
-.section s1fub
-.section s1fva
-.section s1fvb
-.section s1fwa
-.section s1fwb
-.section s1fxa
-.section s1fxb
-.section s1fya
-.section s1fyb
-.section s1fza
-.section s1fzb
-.section s1f1a
-.section s1f1b
-.section s1f2a
-.section s1f2b
-.section s1f3a
-.section s1f3b
-.section s1f4a
-.section s1f4b
-.section s1f5a
-.section s1f5b
-.section s1f6a
-.section s1f6b
-.section s1f7a
-.section s1f7b
-.section s1f8a
-.section s1f8b
-.section s1f9a
-.section s1f9b
-.section s1f0a
-.section s1f0b
-.section s1gaa
-.section s1gab
-.section s1gba
-.section s1gbb
-.section s1gca
-.section s1gcb
-.section s1gda
-.section s1gdb
-.section s1gea
-.section s1geb
-.section s1gfa
-.section s1gfb
-.section s1gga
-.section s1ggb
-.section s1gha
-.section s1ghb
-.section s1gia
-.section s1gib
-.section s1gja
-.section s1gjb
-.section s1gka
-.section s1gkb
-.section s1gla
-.section s1glb
-.section s1gma
-.section s1gmb
-.section s1gna
-.section s1gnb
-.section s1goa
-.section s1gob
-.section s1gpa
-.section s1gpb
-.section s1gqa
-.section s1gqb
-.section s1gra
-.section s1grb
-.section s1gsa
-.section s1gsb
-.section s1gta
-.section s1gtb
-.section s1gua
-.section s1gub
-.section s1gva
-.section s1gvb
-.section s1gwa
-.section s1gwb
-.section s1gxa
-.section s1gxb
-.section s1gya
-.section s1gyb
-.section s1gza
-.section s1gzb
-.section s1g1a
-.section s1g1b
-.section s1g2a
-.section s1g2b
-.section s1g3a
-.section s1g3b
-.section s1g4a
-.section s1g4b
-.section s1g5a
-.section s1g5b
-.section s1g6a
-.section s1g6b
-.section s1g7a
-.section s1g7b
-.section s1g8a
-.section s1g8b
-.section s1g9a
-.section s1g9b
-.section s1g0a
-.section s1g0b
-.section s1haa
-.section s1hab
-.section s1hba
-.section s1hbb
-.section s1hca
-.section s1hcb
-.section s1hda
-.section s1hdb
-.section s1hea
-.section s1heb
-.section s1hfa
-.section s1hfb
-.section s1hga
-.section s1hgb
-.section s1hha
-.section s1hhb
-.section s1hia
-.section s1hib
-.section s1hja
-.section s1hjb
-.section s1hka
-.section s1hkb
-.section s1hla
-.section s1hlb
-.section s1hma
-.section s1hmb
-.section s1hna
-.section s1hnb
-.section s1hoa
-.section s1hob
-.section s1hpa
-.section s1hpb
-.section s1hqa
-.section s1hqb
-.section s1hra
-.section s1hrb
-.section s1hsa
-.section s1hsb
-.section s1hta
-.section s1htb
-.section s1hua
-.section s1hub
-.section s1hva
-.section s1hvb
-.section s1hwa
-.section s1hwb
-.section s1hxa
-.section s1hxb
-.section s1hya
-.section s1hyb
-.section s1hza
-.section s1hzb
-.section s1h1a
-.section s1h1b
-.section s1h2a
-.section s1h2b
-.section s1h3a
-.section s1h3b
-.section s1h4a
-.section s1h4b
-.section s1h5a
-.section s1h5b
-.section s1h6a
-.section s1h6b
-.section s1h7a
-.section s1h7b
-.section s1h8a
-.section s1h8b
-.section s1h9a
-.section s1h9b
-.section s1h0a
-.section s1h0b
-.section s1iaa
-.section s1iab
-.section s1iba
-.section s1ibb
-.section s1ica
-.section s1icb
-.section s1ida
-.section s1idb
-.section s1iea
-.section s1ieb
-.section s1ifa
-.section s1ifb
-.section s1iga
-.section s1igb
-.section s1iha
-.section s1ihb
-.section s1iia
-.section s1iib
-.section s1ija
-.section s1ijb
-.section s1ika
-.section s1ikb
-.section s1ila
-.section s1ilb
-.section s1ima
-.section s1imb
-.section s1ina
-.section s1inb
-.section s1ioa
-.section s1iob
-.section s1ipa
-.section s1ipb
-.section s1iqa
-.section s1iqb
-.section s1ira
-.section s1irb
-.section s1isa
-.section s1isb
-.section s1ita
-.section s1itb
-.section s1iua
-.section s1iub
-.section s1iva
-.section s1ivb
-.section s1iwa
-.section s1iwb
-.section s1ixa
-.section s1ixb
-.section s1iya
-.section s1iyb
-.section s1iza
-.section s1izb
-.section s1i1a
-.section s1i1b
-.section s1i2a
-.section s1i2b
-.section s1i3a
-.section s1i3b
-.section s1i4a
-.section s1i4b
-.section s1i5a
-.section s1i5b
-.section s1i6a
-.section s1i6b
-.section s1i7a
-.section s1i7b
-.section s1i8a
-.section s1i8b
-.section s1i9a
-.section s1i9b
-.section s1i0a
-.section s1i0b
-.section s1jaa
-.section s1jab
-.section s1jba
-.section s1jbb
-.section s1jca
-.section s1jcb
-.section s1jda
-.section s1jdb
-.section s1jea
-.section s1jeb
-.section s1jfa
-.section s1jfb
-.section s1jga
-.section s1jgb
-.section s1jha
-.section s1jhb
-.section s1jia
-.section s1jib
-.section s1jja
-.section s1jjb
-.section s1jka
-.section s1jkb
-.section s1jla
-.section s1jlb
-.section s1jma
-.section s1jmb
-.section s1jna
-.section s1jnb
-.section s1joa
-.section s1job
-.section s1jpa
-.section s1jpb
-.section s1jqa
-.section s1jqb
-.section s1jra
-.section s1jrb
-.section s1jsa
-.section s1jsb
-.section s1jta
-.section s1jtb
-.section s1jua
-.section s1jub
-.section s1jva
-.section s1jvb
-.section s1jwa
-.section s1jwb
-.section s1jxa
-.section s1jxb
-.section s1jya
-.section s1jyb
-.section s1jza
-.section s1jzb
-.section s1j1a
-.section s1j1b
-.section s1j2a
-.section s1j2b
-.section s1j3a
-.section s1j3b
-.section s1j4a
-.section s1j4b
-.section s1j5a
-.section s1j5b
-.section s1j6a
-.section s1j6b
-.section s1j7a
-.section s1j7b
-.section s1j8a
-.section s1j8b
-.section s1j9a
-.section s1j9b
-.section s1j0a
-.section s1j0b
-.section s1kaa
-.section s1kab
-.section s1kba
-.section s1kbb
-.section s1kca
-.section s1kcb
-.section s1kda
-.section s1kdb
-.section s1kea
-.section s1keb
-.section s1kfa
-.section s1kfb
-.section s1kga
-.section s1kgb
-.section s1kha
-.section s1khb
-.section s1kia
-.section s1kib
-.section s1kja
-.section s1kjb
-.section s1kka
-.section s1kkb
-.section s1kla
-.section s1klb
-.section s1kma
-.section s1kmb
-.section s1kna
-.section s1knb
-.section s1koa
-.section s1kob
-.section s1kpa
-.section s1kpb
-.section s1kqa
-.section s1kqb
-.section s1kra
-.section s1krb
-.section s1ksa
-.section s1ksb
-.section s1kta
-.section s1ktb
-.section s1kua
-.section s1kub
-.section s1kva
-.section s1kvb
-.section s1kwa
-.section s1kwb
-.section s1kxa
-.section s1kxb
-.section s1kya
-.section s1kyb
-.section s1kza
-.section s1kzb
-.section s1k1a
-.section s1k1b
-.section s1k2a
-.section s1k2b
-.section s1k3a
-.section s1k3b
-.section s1k4a
-.section s1k4b
-.section s1k5a
-.section s1k5b
-.section s1k6a
-.section s1k6b
-.section s1k7a
-.section s1k7b
-.section s1k8a
-.section s1k8b
-.section s1k9a
-.section s1k9b
-.section s1k0a
-.section s1k0b
-.section s1laa
-.section s1lab
-.section s1lba
-.section s1lbb
-.section s1lca
-.section s1lcb
-.section s1lda
-.section s1ldb
-.section s1lea
-.section s1leb
-.section s1lfa
-.section s1lfb
-.section s1lga
-.section s1lgb
-.section s1lha
-.section s1lhb
-.section s1lia
-.section s1lib
-.section s1lja
-.section s1ljb
-.section s1lka
-.section s1lkb
-.section s1lla
-.section s1llb
-.section s1lma
-.section s1lmb
-.section s1lna
-.section s1lnb
-.section s1loa
-.section s1lob
-.section s1lpa
-.section s1lpb
-.section s1lqa
-.section s1lqb
-.section s1lra
-.section s1lrb
-.section s1lsa
-.section s1lsb
-.section s1lta
-.section s1ltb
-.section s1lua
-.section s1lub
-.section s1lva
-.section s1lvb
-.section s1lwa
-.section s1lwb
-.section s1lxa
-.section s1lxb
-.section s1lya
-.section s1lyb
-.section s1lza
-.section s1lzb
-.section s1l1a
-.section s1l1b
-.section s1l2a
-.section s1l2b
-.section s1l3a
-.section s1l3b
-.section s1l4a
-.section s1l4b
-.section s1l5a
-.section s1l5b
-.section s1l6a
-.section s1l6b
-.section s1l7a
-.section s1l7b
-.section s1l8a
-.section s1l8b
-.section s1l9a
-.section s1l9b
-.section s1l0a
-.section s1l0b
-.section s1maa
-.section s1mab
-.section s1mba
-.section s1mbb
-.section s1mca
-.section s1mcb
-.section s1mda
-.section s1mdb
-.section s1mea
-.section s1meb
-.section s1mfa
-.section s1mfb
-.section s1mga
-.section s1mgb
-.section s1mha
-.section s1mhb
-.section s1mia
-.section s1mib
-.section s1mja
-.section s1mjb
-.section s1mka
-.section s1mkb
-.section s1mla
-.section s1mlb
-.section s1mma
-.section s1mmb
-.section s1mna
-.section s1mnb
-.section s1moa
-.section s1mob
-.section s1mpa
-.section s1mpb
-.section s1mqa
-.section s1mqb
-.section s1mra
-.section s1mrb
-.section s1msa
-.section s1msb
-.section s1mta
-.section s1mtb
-.section s1mua
-.section s1mub
-.section s1mva
-.section s1mvb
-.section s1mwa
-.section s1mwb
-.section s1mxa
-.section s1mxb
-.section s1mya
-.section s1myb
-.section s1mza
-.section s1mzb
-.section s1m1a
-.section s1m1b
-.section s1m2a
-.section s1m2b
-.section s1m3a
-.section s1m3b
-.section s1m4a
-.section s1m4b
-.section s1m5a
-.section s1m5b
-.section s1m6a
-.section s1m6b
-.section s1m7a
-.section s1m7b
-.section s1m8a
-.section s1m8b
-.section s1m9a
-.section s1m9b
-.section s1m0a
-.section s1m0b
-.section s1naa
-.section s1nab
-.section s1nba
-.section s1nbb
-.section s1nca
-.section s1ncb
-.section s1nda
-.section s1ndb
-.section s1nea
-.section s1neb
-.section s1nfa
-.section s1nfb
-.section s1nga
-.section s1ngb
-.section s1nha
-.section s1nhb
-.section s1nia
-.section s1nib
-.section s1nja
-.section s1njb
-.section s1nka
-.section s1nkb
-.section s1nla
-.section s1nlb
-.section s1nma
-.section s1nmb
-.section s1nna
-.section s1nnb
-.section s1noa
-.section s1nob
-.section s1npa
-.section s1npb
-.section s1nqa
-.section s1nqb
-.section s1nra
-.section s1nrb
-.section s1nsa
-.section s1nsb
-.section s1nta
-.section s1ntb
-.section s1nua
-.section s1nub
-.section s1nva
-.section s1nvb
-.section s1nwa
-.section s1nwb
-.section s1nxa
-.section s1nxb
-.section s1nya
-.section s1nyb
-.section s1nza
-.section s1nzb
-.section s1n1a
-.section s1n1b
-.section s1n2a
-.section s1n2b
-.section s1n3a
-.section s1n3b
-.section s1n4a
-.section s1n4b
-.section s1n5a
-.section s1n5b
-.section s1n6a
-.section s1n6b
-.section s1n7a
-.section s1n7b
-.section s1n8a
-.section s1n8b
-.section s1n9a
-.section s1n9b
-.section s1n0a
-.section s1n0b
-.section s1oaa
-.section s1oab
-.section s1oba
-.section s1obb
-.section s1oca
-.section s1ocb
-.section s1oda
-.section s1odb
-.section s1oea
-.section s1oeb
-.section s1ofa
-.section s1ofb
-.section s1oga
-.section s1ogb
-.section s1oha
-.section s1ohb
-.section s1oia
-.section s1oib
-.section s1oja
-.section s1ojb
-.section s1oka
-.section s1okb
-.section s1ola
-.section s1olb
-.section s1oma
-.section s1omb
-.section s1ona
-.section s1onb
-.section s1ooa
-.section s1oob
-.section s1opa
-.section s1opb
-.section s1oqa
-.section s1oqb
-.section s1ora
-.section s1orb
-.section s1osa
-.section s1osb
-.section s1ota
-.section s1otb
-.section s1oua
-.section s1oub
-.section s1ova
-.section s1ovb
-.section s1owa
-.section s1owb
-.section s1oxa
-.section s1oxb
-.section s1oya
-.section s1oyb
-.section s1oza
-.section s1ozb
-.section s1o1a
-.section s1o1b
-.section s1o2a
-.section s1o2b
-.section s1o3a
-.section s1o3b
-.section s1o4a
-.section s1o4b
-.section s1o5a
-.section s1o5b
-.section s1o6a
-.section s1o6b
-.section s1o7a
-.section s1o7b
-.section s1o8a
-.section s1o8b
-.section s1o9a
-.section s1o9b
-.section s1o0a
-.section s1o0b
-.section s1paa
-.section s1pab
-.section s1pba
-.section s1pbb
-.section s1pca
-.section s1pcb
-.section s1pda
-.section s1pdb
-.section s1pea
-.section s1peb
-.section s1pfa
-.section s1pfb
-.section s1pga
-.section s1pgb
-.section s1pha
-.section s1phb
-.section s1pia
-.section s1pib
-.section s1pja
-.section s1pjb
-.section s1pka
-.section s1pkb
-.section s1pla
-.section s1plb
-.section s1pma
-.section s1pmb
-.section s1pna
-.section s1pnb
-.section s1poa
-.section s1pob
-.section s1ppa
-.section s1ppb
-.section s1pqa
-.section s1pqb
-.section s1pra
-.section s1prb
-.section s1psa
-.section s1psb
-.section s1pta
-.section s1ptb
-.section s1pua
-.section s1pub
-.section s1pva
-.section s1pvb
-.section s1pwa
-.section s1pwb
-.section s1pxa
-.section s1pxb
-.section s1pya
-.section s1pyb
-.section s1pza
-.section s1pzb
-.section s1p1a
-.section s1p1b
-.section s1p2a
-.section s1p2b
-.section s1p3a
-.section s1p3b
-.section s1p4a
-.section s1p4b
-.section s1p5a
-.section s1p5b
-.section s1p6a
-.section s1p6b
-.section s1p7a
-.section s1p7b
-.section s1p8a
-.section s1p8b
-.section s1p9a
-.section s1p9b
-.section s1p0a
-.section s1p0b
-.section s1qaa
-.section s1qab
-.section s1qba
-.section s1qbb
-.section s1qca
-.section s1qcb
-.section s1qda
-.section s1qdb
-.section s1qea
-.section s1qeb
-.section s1qfa
-.section s1qfb
-.section s1qga
-.section s1qgb
-.section s1qha
-.section s1qhb
-.section s1qia
-.section s1qib
-.section s1qja
-.section s1qjb
-.section s1qka
-.section s1qkb
-.section s1qla
-.section s1qlb
-.section s1qma
-.section s1qmb
-.section s1qna
-.section s1qnb
-.section s1qoa
-.section s1qob
-.section s1qpa
-.section s1qpb
-.section s1qqa
-.section s1qqb
-.section s1qra
-.section s1qrb
-.section s1qsa
-.section s1qsb
-.section s1qta
-.section s1qtb
-.section s1qua
-.section s1qub
-.section s1qva
-.section s1qvb
-.section s1qwa
-.section s1qwb
-.section s1qxa
-.section s1qxb
-.section s1qya
-.section s1qyb
-.section s1qza
-.section s1qzb
-.section s1q1a
-.section s1q1b
-.section s1q2a
-.section s1q2b
-.section s1q3a
-.section s1q3b
-.section s1q4a
-.section s1q4b
-.section s1q5a
-.section s1q5b
-.section s1q6a
-.section s1q6b
-.section s1q7a
-.section s1q7b
-.section s1q8a
-.section s1q8b
-.section s1q9a
-.section s1q9b
-.section s1q0a
-.section s1q0b
-.section s1raa
-.section s1rab
-.section s1rba
-.section s1rbb
-.section s1rca
-.section s1rcb
-.section s1rda
-.section s1rdb
-.section s1rea
-.section s1reb
-.section s1rfa
-.section s1rfb
-.section s1rga
-.section s1rgb
-.section s1rha
-.section s1rhb
-.section s1ria
-.section s1rib
-.section s1rja
-.section s1rjb
-.section s1rka
-.section s1rkb
-.section s1rla
-.section s1rlb
-.section s1rma
-.section s1rmb
-.section s1rna
-.section s1rnb
-.section s1roa
-.section s1rob
-.section s1rpa
-.section s1rpb
-.section s1rqa
-.section s1rqb
-.section s1rra
-.section s1rrb
-.section s1rsa
-.section s1rsb
-.section s1rta
-.section s1rtb
-.section s1rua
-.section s1rub
-.section s1rva
-.section s1rvb
-.section s1rwa
-.section s1rwb
-.section s1rxa
-.section s1rxb
-.section s1rya
-.section s1ryb
-.section s1rza
-.section s1rzb
-.section s1r1a
-.section s1r1b
-.section s1r2a
-.section s1r2b
-.section s1r3a
-.section s1r3b
-.section s1r4a
-.section s1r4b
-.section s1r5a
-.section s1r5b
-.section s1r6a
-.section s1r6b
-.section s1r7a
-.section s1r7b
-.section s1r8a
-.section s1r8b
-.section s1r9a
-.section s1r9b
-.section s1r0a
-.section s1r0b
-.section s1saa
-.section s1sab
-.section s1sba
-.section s1sbb
-.section s1sca
-.section s1scb
-.section s1sda
-.section s1sdb
-.section s1sea
-.section s1seb
-.section s1sfa
-.section s1sfb
-.section s1sga
-.section s1sgb
-.section s1sha
-.section s1shb
-.section s1sia
-.section s1sib
-.section s1sja
-.section s1sjb
-.section s1ska
-.section s1skb
-.section s1sla
-.section s1slb
-.section s1sma
-.section s1smb
-.section s1sna
-.section s1snb
-.section s1soa
-.section s1sob
-.section s1spa
-.section s1spb
-.section s1sqa
-.section s1sqb
-.section s1sra
-.section s1srb
-.section s1ssa
-.section s1ssb
-.section s1sta
-.section s1stb
-.section s1sua
-.section s1sub
-.section s1sva
-.section s1svb
-.section s1swa
-.section s1swb
-.section s1sxa
-.section s1sxb
-.section s1sya
-.section s1syb
-.section s1sza
-.section s1szb
-.section s1s1a
-.section s1s1b
-.section s1s2a
-.section s1s2b
-.section s1s3a
-.section s1s3b
-.section s1s4a
-.section s1s4b
-.section s1s5a
-.section s1s5b
-.section s1s6a
-.section s1s6b
-.section s1s7a
-.section s1s7b
-.section s1s8a
-.section s1s8b
-.section s1s9a
-.section s1s9b
-.section s1s0a
-.section s1s0b
-.section s1taa
-.section s1tab
-.section s1tba
-.section s1tbb
-.section s1tca
-.section s1tcb
-.section s1tda
-.section s1tdb
-.section s1tea
-.section s1teb
-.section s1tfa
-.section s1tfb
-.section s1tga
-.section s1tgb
-.section s1tha
-.section s1thb
-.section s1tia
-.section s1tib
-.section s1tja
-.section s1tjb
-.section s1tka
-.section s1tkb
-.section s1tla
-.section s1tlb
-.section s1tma
-.section s1tmb
-.section s1tna
-.section s1tnb
-.section s1toa
-.section s1tob
-.section s1tpa
-.section s1tpb
-.section s1tqa
-.section s1tqb
-.section s1tra
-.section s1trb
-.section s1tsa
-.section s1tsb
-.section s1tta
-.section s1ttb
-.section s1tua
-.section s1tub
-.section s1tva
-.section s1tvb
-.section s1twa
-.section s1twb
-.section s1txa
-.section s1txb
-.section s1tya
-.section s1tyb
-.section s1tza
-.section s1tzb
-.section s1t1a
-.section s1t1b
-.section s1t2a
-.section s1t2b
-.section s1t3a
-.section s1t3b
-.section s1t4a
-.section s1t4b
-.section s1t5a
-.section s1t5b
-.section s1t6a
-.section s1t6b
-.section s1t7a
-.section s1t7b
-.section s1t8a
-.section s1t8b
-.section s1t9a
-.section s1t9b
-.section s1t0a
-.section s1t0b
-.section s1uaa
-.section s1uab
-.section s1uba
-.section s1ubb
-.section s1uca
-.section s1ucb
-.section s1uda
-.section s1udb
-.section s1uea
-.section s1ueb
-.section s1ufa
-.section s1ufb
-.section s1uga
-.section s1ugb
-.section s1uha
-.section s1uhb
-.section s1uia
-.section s1uib
-.section s1uja
-.section s1ujb
-.section s1uka
-.section s1ukb
-.section s1ula
-.section s1ulb
-.section s1uma
-.section s1umb
-.section s1una
-.section s1unb
-.section s1uoa
-.section s1uob
-.section s1upa
-.section s1upb
-.section s1uqa
-.section s1uqb
-.section s1ura
-.section s1urb
-.section s1usa
-.section s1usb
-.section s1uta
-.section s1utb
-.section s1uua
-.section s1uub
-.section s1uva
-.section s1uvb
-.section s1uwa
-.section s1uwb
-.section s1uxa
-.section s1uxb
-.section s1uya
-.section s1uyb
-.section s1uza
-.section s1uzb
-.section s1u1a
-.section s1u1b
-.section s1u2a
-.section s1u2b
-.section s1u3a
-.section s1u3b
-.section s1u4a
-.section s1u4b
-.section s1u5a
-.section s1u5b
-.section s1u6a
-.section s1u6b
-.section s1u7a
-.section s1u7b
-.section s1u8a
-.section s1u8b
-.section s1u9a
-.section s1u9b
-.section s1u0a
-.section s1u0b
-.section s1vaa
-.section s1vab
-.section s1vba
-.section s1vbb
-.section s1vca
-.section s1vcb
-.section s1vda
-.section s1vdb
-.section s1vea
-.section s1veb
-.section s1vfa
-.section s1vfb
-.section s1vga
-.section s1vgb
-.section s1vha
-.section s1vhb
-.section s1via
-.section s1vib
-.section s1vja
-.section s1vjb
-.section s1vka
-.section s1vkb
-.section s1vla
-.section s1vlb
-.section s1vma
-.section s1vmb
-.section s1vna
-.section s1vnb
-.section s1voa
-.section s1vob
-.section s1vpa
-.section s1vpb
-.section s1vqa
-.section s1vqb
-.section s1vra
-.section s1vrb
-.section s1vsa
-.section s1vsb
-.section s1vta
-.section s1vtb
-.section s1vua
-.section s1vub
-.section s1vva
-.section s1vvb
-.section s1vwa
-.section s1vwb
-.section s1vxa
-.section s1vxb
-.section s1vya
-.section s1vyb
-.section s1vza
-.section s1vzb
-.section s1v1a
-.section s1v1b
-.section s1v2a
-.section s1v2b
-.section s1v3a
-.section s1v3b
-.section s1v4a
-.section s1v4b
-.section s1v5a
-.section s1v5b
-.section s1v6a
-.section s1v6b
-.section s1v7a
-.section s1v7b
-.section s1v8a
-.section s1v8b
-.section s1v9a
-.section s1v9b
-.section s1v0a
-.section s1v0b
-.section s1waa
-.section s1wab
-.section s1wba
-.section s1wbb
-.section s1wca
-.section s1wcb
-.section s1wda
-.section s1wdb
-.section s1wea
-.section s1web
-.section s1wfa
-.section s1wfb
-.section s1wga
-.section s1wgb
-.section s1wha
-.section s1whb
-.section s1wia
-.section s1wib
-.section s1wja
-.section s1wjb
-.section s1wka
-.section s1wkb
-.section s1wla
-.section s1wlb
-.section s1wma
-.section s1wmb
-.section s1wna
-.section s1wnb
-.section s1woa
-.section s1wob
-.section s1wpa
-.section s1wpb
-.section s1wqa
-.section s1wqb
-.section s1wra
-.section s1wrb
-.section s1wsa
-.section s1wsb
-.section s1wta
-.section s1wtb
-.section s1wua
-.section s1wub
-.section s1wva
-.section s1wvb
-.section s1wwa
-.section s1wwb
-.section s1wxa
-.section s1wxb
-.section s1wya
-.section s1wyb
-.section s1wza
-.section s1wzb
-.section s1w1a
-.section s1w1b
-.section s1w2a
-.section s1w2b
-.section s1w3a
-.section s1w3b
-.section s1w4a
-.section s1w4b
-.section s1w5a
-.section s1w5b
-.section s1w6a
-.section s1w6b
-.section s1w7a
-.section s1w7b
-.section s1w8a
-.section s1w8b
-.section s1w9a
-.section s1w9b
-.section s1w0a
-.section s1w0b
-.section s1xaa
-.section s1xab
-.section s1xba
-.section s1xbb
-.section s1xca
-.section s1xcb
-.section s1xda
-.section s1xdb
-.section s1xea
-.section s1xeb
-.section s1xfa
-.section s1xfb
-.section s1xga
-.section s1xgb
-.section s1xha
-.section s1xhb
-.section s1xia
-.section s1xib
-.section s1xja
-.section s1xjb
-.section s1xka
-.section s1xkb
-.section s1xla
-.section s1xlb
-.section s1xma
-.section s1xmb
-.section s1xna
-.section s1xnb
-.section s1xoa
-.section s1xob
-.section s1xpa
-.section s1xpb
-.section s1xqa
-.section s1xqb
-.section s1xra
-.section s1xrb
-.section s1xsa
-.section s1xsb
-.section s1xta
-.section s1xtb
-.section s1xua
-.section s1xub
-.section s1xva
-.section s1xvb
-.section s1xwa
-.section s1xwb
-.section s1xxa
-.section s1xxb
-.section s1xya
-.section s1xyb
-.section s1xza
-.section s1xzb
-.section s1x1a
-.section s1x1b
-.section s1x2a
-.section s1x2b
-.section s1x3a
-.section s1x3b
-.section s1x4a
-.section s1x4b
-.section s1x5a
-.section s1x5b
-.section s1x6a
-.section s1x6b
-.section s1x7a
-.section s1x7b
-.section s1x8a
-.section s1x8b
-.section s1x9a
-.section s1x9b
-.section s1x0a
-.section s1x0b
-.section s1yaa
-.section s1yab
-.section s1yba
-.section s1ybb
-.section s1yca
-.section s1ycb
-.section s1yda
-.section s1ydb
-.section s1yea
-.section s1yeb
-.section s1yfa
-.section s1yfb
-.section s1yga
-.section s1ygb
-.section s1yha
-.section s1yhb
-.section s1yia
-.section s1yib
-.section s1yja
-.section s1yjb
-.section s1yka
-.section s1ykb
-.section s1yla
-.section s1ylb
-.section s1yma
-.section s1ymb
-.section s1yna
-.section s1ynb
-.section s1yoa
-.section s1yob
-.section s1ypa
-.section s1ypb
-.section s1yqa
-.section s1yqb
-.section s1yra
-.section s1yrb
-.section s1ysa
-.section s1ysb
-.section s1yta
-.section s1ytb
-.section s1yua
-.section s1yub
-.section s1yva
-.section s1yvb
-.section s1ywa
-.section s1ywb
-.section s1yxa
-.section s1yxb
-.section s1yya
-.section s1yyb
-.section s1yza
-.section s1yzb
-.section s1y1a
-.section s1y1b
-.section s1y2a
-.section s1y2b
-.section s1y3a
-.section s1y3b
-.section s1y4a
-.section s1y4b
-.section s1y5a
-.section s1y5b
-.section s1y6a
-.section s1y6b
-.section s1y7a
-.section s1y7b
-.section s1y8a
-.section s1y8b
-.section s1y9a
-.section s1y9b
-.section s1y0a
-.section s1y0b
-.section s1zaa
-.section s1zab
-.section s1zba
-.section s1zbb
-.section s1zca
-.section s1zcb
-.section s1zda
-.section s1zdb
-.section s1zea
-.section s1zeb
-.section s1zfa
-.section s1zfb
-.section s1zga
-.section s1zgb
-.section s1zha
-.section s1zhb
-.section s1zia
-.section s1zib
-.section s1zja
-.section s1zjb
-.section s1zka
-.section s1zkb
-.section s1zla
-.section s1zlb
-.section s1zma
-.section s1zmb
-.section s1zna
-.section s1znb
-.section s1zoa
-.section s1zob
-.section s1zpa
-.section s1zpb
-.section s1zqa
-.section s1zqb
-.section s1zra
-.section s1zrb
-.section s1zsa
-.section s1zsb
-.section s1zta
-.section s1ztb
-.section s1zua
-.section s1zub
-.section s1zva
-.section s1zvb
-.section s1zwa
-.section s1zwb
-.section s1zxa
-.section s1zxb
-.section s1zya
-.section s1zyb
-.section s1zza
-.section s1zzb
-.section s1z1a
-.section s1z1b
-.section s1z2a
-.section s1z2b
-.section s1z3a
-.section s1z3b
-.section s1z4a
-.section s1z4b
-.section s1z5a
-.section s1z5b
-.section s1z6a
-.section s1z6b
-.section s1z7a
-.section s1z7b
-.section s1z8a
-.section s1z8b
-.section s1z9a
-.section s1z9b
-.section s1z0a
-.section s1z0b
-.section s11aa
-.section s11ab
-.section s11ba
-.section s11bb
-.section s11ca
-.section s11cb
-.section s11da
-.section s11db
-.section s11ea
-.section s11eb
-.section s11fa
-.section s11fb
-.section s11ga
-.section s11gb
-.section s11ha
-.section s11hb
-.section s11ia
-.section s11ib
-.section s11ja
-.section s11jb
-.section s11ka
-.section s11kb
-.section s11la
-.section s11lb
-.section s11ma
-.section s11mb
-.section s11na
-.section s11nb
-.section s11oa
-.section s11ob
-.section s11pa
-.section s11pb
-.section s11qa
-.section s11qb
-.section s11ra
-.section s11rb
-.section s11sa
-.section s11sb
-.section s11ta
-.section s11tb
-.section s11ua
-.section s11ub
-.section s11va
-.section s11vb
-.section s11wa
-.section s11wb
-.section s11xa
-.section s11xb
-.section s11ya
-.section s11yb
-.section s11za
-.section s11zb
-.section s111a
-.section s111b
-.section s112a
-.section s112b
-.section s113a
-.section s113b
-.section s114a
-.section s114b
-.section s115a
-.section s115b
-.section s116a
-.section s116b
-.section s117a
-.section s117b
-.section s118a
-.section s118b
-.section s119a
-.section s119b
-.section s110a
-.section s110b
-.section s12aa
-.section s12ab
-.section s12ba
-.section s12bb
-.section s12ca
-.section s12cb
-.section s12da
-.section s12db
-.section s12ea
-.section s12eb
-.section s12fa
-.section s12fb
-.section s12ga
-.section s12gb
-.section s12ha
-.section s12hb
-.section s12ia
-.section s12ib
-.section s12ja
-.section s12jb
-.section s12ka
-.section s12kb
-.section s12la
-.section s12lb
-.section s12ma
-.section s12mb
-.section s12na
-.section s12nb
-.section s12oa
-.section s12ob
-.section s12pa
-.section s12pb
-.section s12qa
-.section s12qb
-.section s12ra
-.section s12rb
-.section s12sa
-.section s12sb
-.section s12ta
-.section s12tb
-.section s12ua
-.section s12ub
-.section s12va
-.section s12vb
-.section s12wa
-.section s12wb
-.section s12xa
-.section s12xb
-.section s12ya
-.section s12yb
-.section s12za
-.section s12zb
-.section s121a
-.section s121b
-.section s122a
-.section s122b
-.section s123a
-.section s123b
-.section s124a
-.section s124b
-.section s125a
-.section s125b
-.section s126a
-.section s126b
-.section s127a
-.section s127b
-.section s128a
-.section s128b
-.section s129a
-.section s129b
-.section s120a
-.section s120b
-.section s13aa
-.section s13ab
-.section s13ba
-.section s13bb
-.section s13ca
-.section s13cb
-.section s13da
-.section s13db
-.section s13ea
-.section s13eb
-.section s13fa
-.section s13fb
-.section s13ga
-.section s13gb
-.section s13ha
-.section s13hb
-.section s13ia
-.section s13ib
-.section s13ja
-.section s13jb
-.section s13ka
-.section s13kb
-.section s13la
-.section s13lb
-.section s13ma
-.section s13mb
-.section s13na
-.section s13nb
-.section s13oa
-.section s13ob
-.section s13pa
-.section s13pb
-.section s13qa
-.section s13qb
-.section s13ra
-.section s13rb
-.section s13sa
-.section s13sb
-.section s13ta
-.section s13tb
-.section s13ua
-.section s13ub
-.section s13va
-.section s13vb
-.section s13wa
-.section s13wb
-.section s13xa
-.section s13xb
-.section s13ya
-.section s13yb
-.section s13za
-.section s13zb
-.section s131a
-.section s131b
-.section s132a
-.section s132b
-.section s133a
-.section s133b
-.section s134a
-.section s134b
-.section s135a
-.section s135b
-.section s136a
-.section s136b
-.section s137a
-.section s137b
-.section s138a
-.section s138b
-.section s139a
-.section s139b
-.section s130a
-.section s130b
-.section s14aa
-.section s14ab
-.section s14ba
-.section s14bb
-.section s14ca
-.section s14cb
-.section s14da
-.section s14db
-.section s14ea
-.section s14eb
-.section s14fa
-.section s14fb
-.section s14ga
-.section s14gb
-.section s14ha
-.section s14hb
-.section s14ia
-.section s14ib
-.section s14ja
-.section s14jb
-.section s14ka
-.section s14kb
-.section s14la
-.section s14lb
-.section s14ma
-.section s14mb
-.section s14na
-.section s14nb
-.section s14oa
-.section s14ob
-.section s14pa
-.section s14pb
-.section s14qa
-.section s14qb
-.section s14ra
-.section s14rb
-.section s14sa
-.section s14sb
-.section s14ta
-.section s14tb
-.section s14ua
-.section s14ub
-.section s14va
-.section s14vb
-.section s14wa
-.section s14wb
-.section s14xa
-.section s14xb
-.section s14ya
-.section s14yb
-.section s14za
-.section s14zb
-.section s141a
-.section s141b
-.section s142a
-.section s142b
-.section s143a
-.section s143b
-.section s144a
-.section s144b
-.section s145a
-.section s145b
-.section s146a
-.section s146b
-.section s147a
-.section s147b
-.section s148a
-.section s148b
-.section s149a
-.section s149b
-.section s140a
-.section s140b
-.section s15aa
-.section s15ab
-.section s15ba
-.section s15bb
-.section s15ca
-.section s15cb
-.section s15da
-.section s15db
-.section s15ea
-.section s15eb
-.section s15fa
-.section s15fb
-.section s15ga
-.section s15gb
-.section s15ha
-.section s15hb
-.section s15ia
-.section s15ib
-.section s15ja
-.section s15jb
-.section s15ka
-.section s15kb
-.section s15la
-.section s15lb
-.section s15ma
-.section s15mb
-.section s15na
-.section s15nb
-.section s15oa
-.section s15ob
-.section s15pa
-.section s15pb
-.section s15qa
-.section s15qb
-.section s15ra
-.section s15rb
-.section s15sa
-.section s15sb
-.section s15ta
-.section s15tb
-.section s15ua
-.section s15ub
-.section s15va
-.section s15vb
-.section s15wa
-.section s15wb
-.section s15xa
-.section s15xb
-.section s15ya
-.section s15yb
-.section s15za
-.section s15zb
-.section s151a
-.section s151b
-.section s152a
-.section s152b
-.section s153a
-.section s153b
-.section s154a
-.section s154b
-.section s155a
-.section s155b
-.section s156a
-.section s156b
-.section s157a
-.section s157b
-.section s158a
-.section s158b
-.section s159a
-.section s159b
-.section s150a
-.section s150b
-.section s16aa
-.section s16ab
-.section s16ba
-.section s16bb
-.section s16ca
-.section s16cb
-.section s16da
-.section s16db
-.section s16ea
-.section s16eb
-.section s16fa
-.section s16fb
-.section s16ga
-.section s16gb
-.section s16ha
-.section s16hb
-.section s16ia
-.section s16ib
-.section s16ja
-.section s16jb
-.section s16ka
-.section s16kb
-.section s16la
-.section s16lb
-.section s16ma
-.section s16mb
-.section s16na
-.section s16nb
-.section s16oa
-.section s16ob
-.section s16pa
-.section s16pb
-.section s16qa
-.section s16qb
-.section s16ra
-.section s16rb
-.section s16sa
-.section s16sb
-.section s16ta
-.section s16tb
-.section s16ua
-.section s16ub
-.section s16va
-.section s16vb
-.section s16wa
-.section s16wb
-.section s16xa
-.section s16xb
-.section s16ya
-.section s16yb
-.section s16za
-.section s16zb
-.section s161a
-.section s161b
-.section s162a
-.section s162b
-.section s163a
-.section s163b
-.section s164a
-.section s164b
-.section s165a
-.section s165b
-.section s166a
-.section s166b
-.section s167a
-.section s167b
-.section s168a
-.section s168b
-.section s169a
-.section s169b
-.section s160a
-.section s160b
-.section s17aa
-.section s17ab
-.section s17ba
-.section s17bb
-.section s17ca
-.section s17cb
-.section s17da
-.section s17db
-.section s17ea
-.section s17eb
-.section s17fa
-.section s17fb
-.section s17ga
-.section s17gb
-.section s17ha
-.section s17hb
-.section s17ia
-.section s17ib
-.section s17ja
-.section s17jb
-.section s17ka
-.section s17kb
-.section s17la
-.section s17lb
-.section s17ma
-.section s17mb
-.section s17na
-.section s17nb
-.section s17oa
-.section s17ob
-.section s17pa
-.section s17pb
-.section s17qa
-.section s17qb
-.section s17ra
-.section s17rb
-.section s17sa
-.section s17sb
-.section s17ta
-.section s17tb
-.section s17ua
-.section s17ub
-.section s17va
-.section s17vb
-.section s17wa
-.section s17wb
-.section s17xa
-.section s17xb
-.section s17ya
-.section s17yb
-.section s17za
-.section s17zb
-.section s171a
-.section s171b
-.section s172a
-.section s172b
-.section s173a
-.section s173b
-.section s174a
-.section s174b
-.section s175a
-.section s175b
-.section s176a
-.section s176b
-.section s177a
-.section s177b
-.section s178a
-.section s178b
-.section s179a
-.section s179b
-.section s170a
-.section s170b
-.section s18aa
-.section s18ab
-.section s18ba
-.section s18bb
-.section s18ca
-.section s18cb
-.section s18da
-.section s18db
-.section s18ea
-.section s18eb
-.section s18fa
-.section s18fb
-.section s18ga
-.section s18gb
-.section s18ha
-.section s18hb
-.section s18ia
-.section s18ib
-.section s18ja
-.section s18jb
-.section s18ka
-.section s18kb
-.section s18la
-.section s18lb
-.section s18ma
-.section s18mb
-.section s18na
-.section s18nb
-.section s18oa
-.section s18ob
-.section s18pa
-.section s18pb
-.section s18qa
-.section s18qb
-.section s18ra
-.section s18rb
-.section s18sa
-.section s18sb
-.section s18ta
-.section s18tb
-.section s18ua
-.section s18ub
-.section s18va
-.section s18vb
-.section s18wa
-.section s18wb
-.section s18xa
-.section s18xb
-.section s18ya
-.section s18yb
-.section s18za
-.section s18zb
-.section s181a
-.section s181b
-.section s182a
-.section s182b
-.section s183a
-.section s183b
-.section s184a
-.section s184b
-.section s185a
-.section s185b
-.section s186a
-.section s186b
-.section s187a
-.section s187b
-.section s188a
-.section s188b
-.section s189a
-.section s189b
-.section s180a
-.section s180b
-.section s19aa
-.section s19ab
-.section s19ba
-.section s19bb
-.section s19ca
-.section s19cb
-.section s19da
-.section s19db
-.section s19ea
-.section s19eb
-.section s19fa
-.section s19fb
-.section s19ga
-.section s19gb
-.section s19ha
-.section s19hb
-.section s19ia
-.section s19ib
-.section s19ja
-.section s19jb
-.section s19ka
-.section s19kb
-.section s19la
-.section s19lb
-.section s19ma
-.section s19mb
-.section s19na
-.section s19nb
-.section s19oa
-.section s19ob
-.section s19pa
-.section s19pb
-.section s19qa
-.section s19qb
-.section s19ra
-.section s19rb
-.section s19sa
-.section s19sb
-.section s19ta
-.section s19tb
-.section s19ua
-.section s19ub
-.section s19va
-.section s19vb
-.section s19wa
-.section s19wb
-.section s19xa
-.section s19xb
-.section s19ya
-.section s19yb
-.section s19za
-.section s19zb
-.section s191a
-.section s191b
-.section s192a
-.section s192b
-.section s193a
-.section s193b
-.section s194a
-.section s194b
-.section s195a
-.section s195b
-.section s196a
-.section s196b
-.section s197a
-.section s197b
-.section s198a
-.section s198b
-.section s199a
-.section s199b
-.section s190a
-.section s190b
-.section s10aa
-.section s10ab
-.section s10ba
-.section s10bb
-.section s10ca
-.section s10cb
-.section s10da
-.section s10db
-.section s10ea
-.section s10eb
-.section s10fa
-.section s10fb
-.section s10ga
-.section s10gb
-.section s10ha
-.section s10hb
-.section s10ia
-.section s10ib
-.section s10ja
-.section s10jb
-.section s10ka
-.section s10kb
-.section s10la
-.section s10lb
-.section s10ma
-.section s10mb
-.section s10na
-.section s10nb
-.section s10oa
-.section s10ob
-.section s10pa
-.section s10pb
-.section s10qa
-.section s10qb
-.section s10ra
-.section s10rb
-.section s10sa
-.section s10sb
-.section s10ta
-.section s10tb
-.section s10ua
-.section s10ub
-.section s10va
-.section s10vb
-.section s10wa
-.section s10wb
-.section s10xa
-.section s10xb
-.section s10ya
-.section s10yb
-.section s10za
-.section s10zb
-.section s101a
-.section s101b
-.section s102a
-.section s102b
-.section s103a
-.section s103b
-.section s104a
-.section s104b
-.section s105a
-.section s105b
-.section s106a
-.section s106b
-.section s107a
-.section s107b
-.section s108a
-.section s108b
-.section s109a
-.section s109b
-.section s100a
-.section s100b
-.section s2aaa
-.section s2aab
-.section s2aba
-.section s2abb
-.section s2aca
-.section s2acb
-.section s2ada
-.section s2adb
-.section s2aea
-.section s2aeb
-.section s2afa
-.section s2afb
-.section s2aga
-.section s2agb
-.section s2aha
-.section s2ahb
-.section s2aia
-.section s2aib
-.section s2aja
-.section s2ajb
-.section s2aka
-.section s2akb
-.section s2ala
-.section s2alb
-.section s2ama
-.section s2amb
-.section s2ana
-.section s2anb
-.section s2aoa
-.section s2aob
-.section s2apa
-.section s2apb
-.section s2aqa
-.section s2aqb
-.section s2ara
-.section s2arb
-.section s2asa
-.section s2asb
-.section s2ata
-.section s2atb
-.section s2aua
-.section s2aub
-.section s2ava
-.section s2avb
-.section s2awa
-.section s2awb
-.section s2axa
-.section s2axb
-.section s2aya
-.section s2ayb
-.section s2aza
-.section s2azb
-.section s2a1a
-.section s2a1b
-.section s2a2a
-.section s2a2b
-.section s2a3a
-.section s2a3b
-.section s2a4a
-.section s2a4b
-.section s2a5a
-.section s2a5b
-.section s2a6a
-.section s2a6b
-.section s2a7a
-.section s2a7b
-.section s2a8a
-.section s2a8b
-.section s2a9a
-.section s2a9b
-.section s2a0a
-.section s2a0b
-.section s2baa
-.section s2bab
-.section s2bba
-.section s2bbb
-.section s2bca
-.section s2bcb
-.section s2bda
-.section s2bdb
-.section s2bea
-.section s2beb
-.section s2bfa
-.section s2bfb
-.section s2bga
-.section s2bgb
-.section s2bha
-.section s2bhb
-.section s2bia
-.section s2bib
-.section s2bja
-.section s2bjb
-.section s2bka
-.section s2bkb
-.section s2bla
-.section s2blb
-.section s2bma
-.section s2bmb
-.section s2bna
-.section s2bnb
-.section s2boa
-.section s2bob
-.section s2bpa
-.section s2bpb
-.section s2bqa
-.section s2bqb
-.section s2bra
-.section s2brb
-.section s2bsa
-.section s2bsb
-.section s2bta
-.section s2btb
-.section s2bua
-.section s2bub
-.section s2bva
-.section s2bvb
-.section s2bwa
-.section s2bwb
-.section s2bxa
-.section s2bxb
-.section s2bya
-.section s2byb
-.section s2bza
-.section s2bzb
-.section s2b1a
-.section s2b1b
-.section s2b2a
-.section s2b2b
-.section s2b3a
-.section s2b3b
-.section s2b4a
-.section s2b4b
-.section s2b5a
-.section s2b5b
-.section s2b6a
-.section s2b6b
-.section s2b7a
-.section s2b7b
-.section s2b8a
-.section s2b8b
-.section s2b9a
-.section s2b9b
-.section s2b0a
-.section s2b0b
-.section s2caa
-.section s2cab
-.section s2cba
-.section s2cbb
-.section s2cca
-.section s2ccb
-.section s2cda
-.section s2cdb
-.section s2cea
-.section s2ceb
-.section s2cfa
-.section s2cfb
-.section s2cga
-.section s2cgb
-.section s2cha
-.section s2chb
-.section s2cia
-.section s2cib
-.section s2cja
-.section s2cjb
-.section s2cka
-.section s2ckb
-.section s2cla
-.section s2clb
-.section s2cma
-.section s2cmb
-.section s2cna
-.section s2cnb
-.section s2coa
-.section s2cob
-.section s2cpa
-.section s2cpb
-.section s2cqa
-.section s2cqb
-.section s2cra
-.section s2crb
-.section s2csa
-.section s2csb
-.section s2cta
-.section s2ctb
-.section s2cua
-.section s2cub
-.section s2cva
-.section s2cvb
-.section s2cwa
-.section s2cwb
-.section s2cxa
-.section s2cxb
-.section s2cya
-.section s2cyb
-.section s2cza
-.section s2czb
-.section s2c1a
-.section s2c1b
-.section s2c2a
-.section s2c2b
-.section s2c3a
-.section s2c3b
-.section s2c4a
-.section s2c4b
-.section s2c5a
-.section s2c5b
-.section s2c6a
-.section s2c6b
-.section s2c7a
-.section s2c7b
-.section s2c8a
-.section s2c8b
-.section s2c9a
-.section s2c9b
-.section s2c0a
-.section s2c0b
-.section s2daa
-.section s2dab
-.section s2dba
-.section s2dbb
-.section s2dca
-.section s2dcb
-.section s2dda
-.section s2ddb
-.section s2dea
-.section s2deb
-.section s2dfa
-.section s2dfb
-.section s2dga
-.section s2dgb
-.section s2dha
-.section s2dhb
-.section s2dia
-.section s2dib
-.section s2dja
-.section s2djb
-.section s2dka
-.section s2dkb
-.section s2dla
-.section s2dlb
-.section s2dma
-.section s2dmb
-.section s2dna
-.section s2dnb
-.section s2doa
-.section s2dob
-.section s2dpa
-.section s2dpb
-.section s2dqa
-.section s2dqb
-.section s2dra
-.section s2drb
-.section s2dsa
-.section s2dsb
-.section s2dta
-.section s2dtb
-.section s2dua
-.section s2dub
-.section s2dva
-.section s2dvb
-.section s2dwa
-.section s2dwb
-.section s2dxa
-.section s2dxb
-.section s2dya
-.section s2dyb
-.section s2dza
-.section s2dzb
-.section s2d1a
-.section s2d1b
-.section s2d2a
-.section s2d2b
-.section s2d3a
-.section s2d3b
-.section s2d4a
-.section s2d4b
-.section s2d5a
-.section s2d5b
-.section s2d6a
-.section s2d6b
-.section s2d7a
-.section s2d7b
-.section s2d8a
-.section s2d8b
-.section s2d9a
-.section s2d9b
-.section s2d0a
-.section s2d0b
-.section s2eaa
-.section s2eab
-.section s2eba
-.section s2ebb
-.section s2eca
-.section s2ecb
-.section s2eda
-.section s2edb
-.section s2eea
-.section s2eeb
-.section s2efa
-.section s2efb
-.section s2ega
-.section s2egb
-.section s2eha
-.section s2ehb
-.section s2eia
-.section s2eib
-.section s2eja
-.section s2ejb
-.section s2eka
-.section s2ekb
-.section s2ela
-.section s2elb
-.section s2ema
-.section s2emb
-.section s2ena
-.section s2enb
-.section s2eoa
-.section s2eob
-.section s2epa
-.section s2epb
-.section s2eqa
-.section s2eqb
-.section s2era
-.section s2erb
-.section s2esa
-.section s2esb
-.section s2eta
-.section s2etb
-.section s2eua
-.section s2eub
-.section s2eva
-.section s2evb
-.section s2ewa
-.section s2ewb
-.section s2exa
-.section s2exb
-.section s2eya
-.section s2eyb
-.section s2eza
-.section s2ezb
-.section s2e1a
-.section s2e1b
-.section s2e2a
-.section s2e2b
-.section s2e3a
-.section s2e3b
-.section s2e4a
-.section s2e4b
-.section s2e5a
-.section s2e5b
-.section s2e6a
-.section s2e6b
-.section s2e7a
-.section s2e7b
-.section s2e8a
-.section s2e8b
-.section s2e9a
-.section s2e9b
-.section s2e0a
-.section s2e0b
-.section s2faa
-.section s2fab
-.section s2fba
-.section s2fbb
-.section s2fca
-.section s2fcb
-.section s2fda
-.section s2fdb
-.section s2fea
-.section s2feb
-.section s2ffa
-.section s2ffb
-.section s2fga
-.section s2fgb
-.section s2fha
-.section s2fhb
-.section s2fia
-.section s2fib
-.section s2fja
-.section s2fjb
-.section s2fka
-.section s2fkb
-.section s2fla
-.section s2flb
-.section s2fma
-.section s2fmb
-.section s2fna
-.section s2fnb
-.section s2foa
-.section s2fob
-.section s2fpa
-.section s2fpb
-.section s2fqa
-.section s2fqb
-.section s2fra
-.section s2frb
-.section s2fsa
-.section s2fsb
-.section s2fta
-.section s2ftb
-.section s2fua
-.section s2fub
-.section s2fva
-.section s2fvb
-.section s2fwa
-.section s2fwb
-.section s2fxa
-.section s2fxb
-.section s2fya
-.section s2fyb
-.section s2fza
-.section s2fzb
-.section s2f1a
-.section s2f1b
-.section s2f2a
-.section s2f2b
-.section s2f3a
-.section s2f3b
-.section s2f4a
-.section s2f4b
-.section s2f5a
-.section s2f5b
-.section s2f6a
-.section s2f6b
-.section s2f7a
-.section s2f7b
-.section s2f8a
-.section s2f8b
-.section s2f9a
-.section s2f9b
-.section s2f0a
-.section s2f0b
-.section s2gaa
-.section s2gab
-.section s2gba
-.section s2gbb
-.section s2gca
-.section s2gcb
-.section s2gda
-.section s2gdb
-.section s2gea
-.section s2geb
-.section s2gfa
-.section s2gfb
-.section s2gga
-.section s2ggb
-.section s2gha
-.section s2ghb
-.section s2gia
-.section s2gib
-.section s2gja
-.section s2gjb
-.section s2gka
-.section s2gkb
-.section s2gla
-.section s2glb
-.section s2gma
-.section s2gmb
-.section s2gna
-.section s2gnb
-.section s2goa
-.section s2gob
-.section s2gpa
-.section s2gpb
-.section s2gqa
-.section s2gqb
-.section s2gra
-.section s2grb
-.section s2gsa
-.section s2gsb
-.section s2gta
-.section s2gtb
-.section s2gua
-.section s2gub
-.section s2gva
-.section s2gvb
-.section s2gwa
-.section s2gwb
-.section s2gxa
-.section s2gxb
-.section s2gya
-.section s2gyb
-.section s2gza
-.section s2gzb
-.section s2g1a
-.section s2g1b
-.section s2g2a
-.section s2g2b
-.section s2g3a
-.section s2g3b
-.section s2g4a
-.section s2g4b
-.section s2g5a
-.section s2g5b
-.section s2g6a
-.section s2g6b
-.section s2g7a
-.section s2g7b
-.section s2g8a
-.section s2g8b
-.section s2g9a
-.section s2g9b
-.section s2g0a
-.section s2g0b
-.section s2haa
-.section s2hab
-.section s2hba
-.section s2hbb
-.section s2hca
-.section s2hcb
-.section s2hda
-.section s2hdb
-.section s2hea
-.section s2heb
-.section s2hfa
-.section s2hfb
-.section s2hga
-.section s2hgb
-.section s2hha
-.section s2hhb
-.section s2hia
-.section s2hib
-.section s2hja
-.section s2hjb
-.section s2hka
-.section s2hkb
-.section s2hla
-.section s2hlb
-.section s2hma
-.section s2hmb
-.section s2hna
-.section s2hnb
-.section s2hoa
-.section s2hob
-.section s2hpa
-.section s2hpb
-.section s2hqa
-.section s2hqb
-.section s2hra
-.section s2hrb
-.section s2hsa
-.section s2hsb
-.section s2hta
-.section s2htb
-.section s2hua
-.section s2hub
-.section s2hva
-.section s2hvb
-.section s2hwa
-.section s2hwb
-.section s2hxa
-.section s2hxb
-.section s2hya
-.section s2hyb
-.section s2hza
-.section s2hzb
-.section s2h1a
-.section s2h1b
-.section s2h2a
-.section s2h2b
-.section s2h3a
-.section s2h3b
-.section s2h4a
-.section s2h4b
-.section s2h5a
-.section s2h5b
-.section s2h6a
-.section s2h6b
-.section s2h7a
-.section s2h7b
-.section s2h8a
-.section s2h8b
-.section s2h9a
-.section s2h9b
-.section s2h0a
-.section s2h0b
-.section s2iaa
-.section s2iab
-.section s2iba
-.section s2ibb
-.section s2ica
-.section s2icb
-.section s2ida
-.section s2idb
-.section s2iea
-.section s2ieb
-.section s2ifa
-.section s2ifb
-.section s2iga
-.section s2igb
-.section s2iha
-.section s2ihb
-.section s2iia
-.section s2iib
-.section s2ija
-.section s2ijb
-.section s2ika
-.section s2ikb
-.section s2ila
-.section s2ilb
-.section s2ima
-.section s2imb
-.section s2ina
-.section s2inb
-.section s2ioa
-.section s2iob
-.section s2ipa
-.section s2ipb
-.section s2iqa
-.section s2iqb
-.section s2ira
-.section s2irb
-.section s2isa
-.section s2isb
-.section s2ita
-.section s2itb
-.section s2iua
-.section s2iub
-.section s2iva
-.section s2ivb
-.section s2iwa
-.section s2iwb
-.section s2ixa
-.section s2ixb
-.section s2iya
-.section s2iyb
-.section s2iza
-.section s2izb
-.section s2i1a
-.section s2i1b
-.section s2i2a
-.section s2i2b
-.section s2i3a
-.section s2i3b
-.section s2i4a
-.section s2i4b
-.section s2i5a
-.section s2i5b
-.section s2i6a
-.section s2i6b
-.section s2i7a
-.section s2i7b
-.section s2i8a
-.section s2i8b
-.section s2i9a
-.section s2i9b
-.section s2i0a
-.section s2i0b
-.section s2jaa
-.section s2jab
-.section s2jba
-.section s2jbb
-.section s2jca
-.section s2jcb
-.section s2jda
-.section s2jdb
-.section s2jea
-.section s2jeb
-.section s2jfa
-.section s2jfb
-.section s2jga
-.section s2jgb
-.section s2jha
-.section s2jhb
-.section s2jia
-.section s2jib
-.section s2jja
-.section s2jjb
-.section s2jka
-.section s2jkb
-.section s2jla
-.section s2jlb
-.section s2jma
-.section s2jmb
-.section s2jna
-.section s2jnb
-.section s2joa
-.section s2job
-.section s2jpa
-.section s2jpb
-.section s2jqa
-.section s2jqb
-.section s2jra
-.section s2jrb
-.section s2jsa
-.section s2jsb
-.section s2jta
-.section s2jtb
-.section s2jua
-.section s2jub
-.section s2jva
-.section s2jvb
-.section s2jwa
-.section s2jwb
-.section s2jxa
-.section s2jxb
-.section s2jya
-.section s2jyb
-.section s2jza
-.section s2jzb
-.section s2j1a
-.section s2j1b
-.section s2j2a
-.section s2j2b
-.section s2j3a
-.section s2j3b
-.section s2j4a
-.section s2j4b
-.section s2j5a
-.section s2j5b
-.section s2j6a
-.section s2j6b
-.section s2j7a
-.section s2j7b
-.section s2j8a
-.section s2j8b
-.section s2j9a
-.section s2j9b
-.section s2j0a
-.section s2j0b
-.section s2kaa
-.section s2kab
-.section s2kba
-.section s2kbb
-.section s2kca
-.section s2kcb
-.section s2kda
-.section s2kdb
-.section s2kea
-.section s2keb
-.section s2kfa
-.section s2kfb
-.section s2kga
-.section s2kgb
-.section s2kha
-.section s2khb
-.section s2kia
-.section s2kib
-.section s2kja
-.section s2kjb
-.section s2kka
-.section s2kkb
-.section s2kla
-.section s2klb
-.section s2kma
-.section s2kmb
-.section s2kna
-.section s2knb
-.section s2koa
-.section s2kob
-.section s2kpa
-.section s2kpb
-.section s2kqa
-.section s2kqb
-.section s2kra
-.section s2krb
-.section s2ksa
-.section s2ksb
-.section s2kta
-.section s2ktb
-.section s2kua
-.section s2kub
-.section s2kva
-.section s2kvb
-.section s2kwa
-.section s2kwb
-.section s2kxa
-.section s2kxb
-.section s2kya
-.section s2kyb
-.section s2kza
-.section s2kzb
-.section s2k1a
-.section s2k1b
-.section s2k2a
-.section s2k2b
-.section s2k3a
-.section s2k3b
-.section s2k4a
-.section s2k4b
-.section s2k5a
-.section s2k5b
-.section s2k6a
-.section s2k6b
-.section s2k7a
-.section s2k7b
-.section s2k8a
-.section s2k8b
-.section s2k9a
-.section s2k9b
-.section s2k0a
-.section s2k0b
-.section s2laa
-.section s2lab
-.section s2lba
-.section s2lbb
-.section s2lca
-.section s2lcb
-.section s2lda
-.section s2ldb
-.section s2lea
-.section s2leb
-.section s2lfa
-.section s2lfb
-.section s2lga
-.section s2lgb
-.section s2lha
-.section s2lhb
-.section s2lia
-.section s2lib
-.section s2lja
-.section s2ljb
-.section s2lka
-.section s2lkb
-.section s2lla
-.section s2llb
-.section s2lma
-.section s2lmb
-.section s2lna
-.section s2lnb
-.section s2loa
-.section s2lob
-.section s2lpa
-.section s2lpb
-.section s2lqa
-.section s2lqb
-.section s2lra
-.section s2lrb
-.section s2lsa
-.section s2lsb
-.section s2lta
-.section s2ltb
-.section s2lua
-.section s2lub
-.section s2lva
-.section s2lvb
-.section s2lwa
-.section s2lwb
-.section s2lxa
-.section s2lxb
-.section s2lya
-.section s2lyb
-.section s2lza
-.section s2lzb
-.section s2l1a
-.section s2l1b
-.section s2l2a
-.section s2l2b
-.section s2l3a
-.section s2l3b
-.section s2l4a
-.section s2l4b
-.section s2l5a
-.section s2l5b
-.section s2l6a
-.section s2l6b
-.section s2l7a
-.section s2l7b
-.section s2l8a
-.section s2l8b
-.section s2l9a
-.section s2l9b
-.section s2l0a
-.section s2l0b
-.section s2maa
-.section s2mab
-.section s2mba
-.section s2mbb
-.section s2mca
-.section s2mcb
-.section s2mda
-.section s2mdb
-.section s2mea
-.section s2meb
-.section s2mfa
-.section s2mfb
-.section s2mga
-.section s2mgb
-.section s2mha
-.section s2mhb
-.section s2mia
-.section s2mib
-.section s2mja
-.section s2mjb
-.section s2mka
-.section s2mkb
-.section s2mla
-.section s2mlb
-.section s2mma
-.section s2mmb
-.section s2mna
-.section s2mnb
-.section s2moa
-.section s2mob
-.section s2mpa
-.section s2mpb
-.section s2mqa
-.section s2mqb
-.section s2mra
-.section s2mrb
-.section s2msa
-.section s2msb
-.section s2mta
-.section s2mtb
-.section s2mua
-.section s2mub
-.section s2mva
-.section s2mvb
-.section s2mwa
-.section s2mwb
-.section s2mxa
-.section s2mxb
-.section s2mya
-.section s2myb
-.section s2mza
-.section s2mzb
-.section s2m1a
-.section s2m1b
-.section s2m2a
-.section s2m2b
-.section s2m3a
-.section s2m3b
-.section s2m4a
-.section s2m4b
-.section s2m5a
-.section s2m5b
-.section s2m6a
-.section s2m6b
-.section s2m7a
-.section s2m7b
-.section s2m8a
-.section s2m8b
-.section s2m9a
-.section s2m9b
-.section s2m0a
-.section s2m0b
-.section s2naa
-.section s2nab
-.section s2nba
-.section s2nbb
-.section s2nca
-.section s2ncb
-.section s2nda
-.section s2ndb
-.section s2nea
-.section s2neb
-.section s2nfa
-.section s2nfb
-.section s2nga
-.section s2ngb
-.section s2nha
-.section s2nhb
-.section s2nia
-.section s2nib
-.section s2nja
-.section s2njb
-.section s2nka
-.section s2nkb
-.section s2nla
-.section s2nlb
-.section s2nma
-.section s2nmb
-.section s2nna
-.section s2nnb
-.section s2noa
-.section s2nob
-.section s2npa
-.section s2npb
-.section s2nqa
-.section s2nqb
-.section s2nra
-.section s2nrb
-.section s2nsa
-.section s2nsb
-.section s2nta
-.section s2ntb
-.section s2nua
-.section s2nub
-.section s2nva
-.section s2nvb
-.section s2nwa
-.section s2nwb
-.section s2nxa
-.section s2nxb
-.section s2nya
-.section s2nyb
-.section s2nza
-.section s2nzb
-.section s2n1a
-.section s2n1b
-.section s2n2a
-.section s2n2b
-.section s2n3a
-.section s2n3b
-.section s2n4a
-.section s2n4b
-.section s2n5a
-.section s2n5b
-.section s2n6a
-.section s2n6b
-.section s2n7a
-.section s2n7b
-.section s2n8a
-.section s2n8b
-.section s2n9a
-.section s2n9b
-.section s2n0a
-.section s2n0b
-.section s2oaa
-.section s2oab
-.section s2oba
-.section s2obb
-.section s2oca
-.section s2ocb
-.section s2oda
-.section s2odb
-.section s2oea
-.section s2oeb
-.section s2ofa
-.section s2ofb
-.section s2oga
-.section s2ogb
-.section s2oha
-.section s2ohb
-.section s2oia
-.section s2oib
-.section s2oja
-.section s2ojb
-.section s2oka
-.section s2okb
-.section s2ola
-.section s2olb
-.section s2oma
-.section s2omb
-.section s2ona
-.section s2onb
-.section s2ooa
-.section s2oob
-.section s2opa
-.section s2opb
-.section s2oqa
-.section s2oqb
-.section s2ora
-.section s2orb
-.section s2osa
-.section s2osb
-.section s2ota
-.section s2otb
-.section s2oua
-.section s2oub
-.section s2ova
-.section s2ovb
-.section s2owa
-.section s2owb
-.section s2oxa
-.section s2oxb
-.section s2oya
-.section s2oyb
-.section s2oza
-.section s2ozb
-.section s2o1a
-.section s2o1b
-.section s2o2a
-.section s2o2b
-.section s2o3a
-.section s2o3b
-.section s2o4a
-.section s2o4b
-.section s2o5a
-.section s2o5b
-.section s2o6a
-.section s2o6b
-.section s2o7a
-.section s2o7b
-.section s2o8a
-.section s2o8b
-.section s2o9a
-.section s2o9b
-.section s2o0a
-.section s2o0b
-.section s2paa
-.section s2pab
-.section s2pba
-.section s2pbb
-.section s2pca
-.section s2pcb
-.section s2pda
-.section s2pdb
-.section s2pea
-.section s2peb
-.section s2pfa
-.section s2pfb
-.section s2pga
-.section s2pgb
-.section s2pha
-.section s2phb
-.section s2pia
-.section s2pib
-.section s2pja
-.section s2pjb
-.section s2pka
-.section s2pkb
-.section s2pla
-.section s2plb
-.section s2pma
-.section s2pmb
-.section s2pna
-.section s2pnb
-.section s2poa
-.section s2pob
-.section s2ppa
-.section s2ppb
-.section s2pqa
-.section s2pqb
-.section s2pra
-.section s2prb
-.section s2psa
-.section s2psb
-.section s2pta
-.section s2ptb
-.section s2pua
-.section s2pub
-.section s2pva
-.section s2pvb
-.section s2pwa
-.section s2pwb
-.section s2pxa
-.section s2pxb
-.section s2pya
-.section s2pyb
-.section s2pza
-.section s2pzb
-.section s2p1a
-.section s2p1b
-.section s2p2a
-.section s2p2b
-.section s2p3a
-.section s2p3b
-.section s2p4a
-.section s2p4b
-.section s2p5a
-.section s2p5b
-.section s2p6a
-.section s2p6b
-.section s2p7a
-.section s2p7b
-.section s2p8a
-.section s2p8b
-.section s2p9a
-.section s2p9b
-.section s2p0a
-.section s2p0b
-.section s2qaa
-.section s2qab
-.section s2qba
-.section s2qbb
-.section s2qca
-.section s2qcb
-.section s2qda
-.section s2qdb
-.section s2qea
-.section s2qeb
-.section s2qfa
-.section s2qfb
-.section s2qga
-.section s2qgb
-.section s2qha
-.section s2qhb
-.section s2qia
-.section s2qib
-.section s2qja
-.section s2qjb
-.section s2qka
-.section s2qkb
-.section s2qla
-.section s2qlb
-.section s2qma
-.section s2qmb
-.section s2qna
-.section s2qnb
-.section s2qoa
-.section s2qob
-.section s2qpa
-.section s2qpb
-.section s2qqa
-.section s2qqb
-.section s2qra
-.section s2qrb
-.section s2qsa
-.section s2qsb
-.section s2qta
-.section s2qtb
-.section s2qua
-.section s2qub
-.section s2qva
-.section s2qvb
-.section s2qwa
-.section s2qwb
-.section s2qxa
-.section s2qxb
-.section s2qya
-.section s2qyb
-.section s2qza
-.section s2qzb
-.section s2q1a
-.section s2q1b
-.section s2q2a
-.section s2q2b
-.section s2q3a
-.section s2q3b
-.section s2q4a
-.section s2q4b
-.section s2q5a
-.section s2q5b
-.section s2q6a
-.section s2q6b
-.section s2q7a
-.section s2q7b
-.section s2q8a
-.section s2q8b
-.section s2q9a
-.section s2q9b
-.section s2q0a
-.section s2q0b
-.section s2raa
-.section s2rab
-.section s2rba
-.section s2rbb
-.section s2rca
-.section s2rcb
-.section s2rda
-.section s2rdb
-.section s2rea
-.section s2reb
-.section s2rfa
-.section s2rfb
-.section s2rga
-.section s2rgb
-.section s2rha
-.section s2rhb
-.section s2ria
-.section s2rib
-.section s2rja
-.section s2rjb
-.section s2rka
-.section s2rkb
-.section s2rla
-.section s2rlb
-.section s2rma
-.section s2rmb
-.section s2rna
-.section s2rnb
-.section s2roa
-.section s2rob
-.section s2rpa
-.section s2rpb
-.section s2rqa
-.section s2rqb
-.section s2rra
-.section s2rrb
-.section s2rsa
-.section s2rsb
-.section s2rta
-.section s2rtb
-.section s2rua
-.section s2rub
-.section s2rva
-.section s2rvb
-.section s2rwa
-.section s2rwb
-.section s2rxa
-.section s2rxb
-.section s2rya
-.section s2ryb
-.section s2rza
-.section s2rzb
-.section s2r1a
-.section s2r1b
-.section s2r2a
-.section s2r2b
-.section s2r3a
-.section s2r3b
-.section s2r4a
-.section s2r4b
-.section s2r5a
-.section s2r5b
-.section s2r6a
-.section s2r6b
-.section s2r7a
-.section s2r7b
-.section s2r8a
-.section s2r8b
-.section s2r9a
-.section s2r9b
-.section s2r0a
-.section s2r0b
-.section s2saa
-.section s2sab
-.section s2sba
-.section s2sbb
-.section s2sca
-.section s2scb
-.section s2sda
-.section s2sdb
-.section s2sea
-.section s2seb
-.section s2sfa
-.section s2sfb
-.section s2sga
-.section s2sgb
-.section s2sha
-.section s2shb
-.section s2sia
-.section s2sib
-.section s2sja
-.section s2sjb
-.section s2ska
-.section s2skb
-.section s2sla
-.section s2slb
-.section s2sma
-.section s2smb
-.section s2sna
-.section s2snb
-.section s2soa
-.section s2sob
-.section s2spa
-.section s2spb
-.section s2sqa
-.section s2sqb
-.section s2sra
-.section s2srb
-.section s2ssa
-.section s2ssb
-.section s2sta
-.section s2stb
-.section s2sua
-.section s2sub
-.section s2sva
-.section s2svb
-.section s2swa
-.section s2swb
-.section s2sxa
-.section s2sxb
-.section s2sya
-.section s2syb
-.section s2sza
-.section s2szb
-.section s2s1a
-.section s2s1b
-.section s2s2a
-.section s2s2b
-.section s2s3a
-.section s2s3b
-.section s2s4a
-.section s2s4b
-.section s2s5a
-.section s2s5b
-.section s2s6a
-.section s2s6b
-.section s2s7a
-.section s2s7b
-.section s2s8a
-.section s2s8b
-.section s2s9a
-.section s2s9b
-.section s2s0a
-.section s2s0b
-.section s2taa
-.section s2tab
-.section s2tba
-.section s2tbb
-.section s2tca
-.section s2tcb
-.section s2tda
-.section s2tdb
-.section s2tea
-.section s2teb
-.section s2tfa
-.section s2tfb
-.section s2tga
-.section s2tgb
-.section s2tha
-.section s2thb
-.section s2tia
-.section s2tib
-.section s2tja
-.section s2tjb
-.section s2tka
-.section s2tkb
-.section s2tla
-.section s2tlb
-.section s2tma
-.section s2tmb
-.section s2tna
-.section s2tnb
-.section s2toa
-.section s2tob
-.section s2tpa
-.section s2tpb
-.section s2tqa
-.section s2tqb
-.section s2tra
-.section s2trb
-.section s2tsa
-.section s2tsb
-.section s2tta
-.section s2ttb
-.section s2tua
-.section s2tub
-.section s2tva
-.section s2tvb
-.section s2twa
-.section s2twb
-.section s2txa
-.section s2txb
-.section s2tya
-.section s2tyb
-.section s2tza
-.section s2tzb
-.section s2t1a
-.section s2t1b
-.section s2t2a
-.section s2t2b
-.section s2t3a
-.section s2t3b
-.section s2t4a
-.section s2t4b
-.section s2t5a
-.section s2t5b
-.section s2t6a
-.section s2t6b
-.section s2t7a
-.section s2t7b
-.section s2t8a
-.section s2t8b
-.section s2t9a
-.section s2t9b
-.section s2t0a
-.section s2t0b
-.section s2uaa
-.section s2uab
-.section s2uba
-.section s2ubb
-.section s2uca
-.section s2ucb
-.section s2uda
-.section s2udb
-.section s2uea
-.section s2ueb
-.section s2ufa
-.section s2ufb
-.section s2uga
-.section s2ugb
-.section s2uha
-.section s2uhb
-.section s2uia
-.section s2uib
-.section s2uja
-.section s2ujb
-.section s2uka
-.section s2ukb
-.section s2ula
-.section s2ulb
-.section s2uma
-.section s2umb
-.section s2una
-.section s2unb
-.section s2uoa
-.section s2uob
-.section s2upa
-.section s2upb
-.section s2uqa
-.section s2uqb
-.section s2ura
-.section s2urb
-.section s2usa
-.section s2usb
-.section s2uta
-.section s2utb
-.section s2uua
-.section s2uub
-.section s2uva
-.section s2uvb
-.section s2uwa
-.section s2uwb
-.section s2uxa
-.section s2uxb
-.section s2uya
-.section s2uyb
-.section s2uza
-.section s2uzb
-.section s2u1a
-.section s2u1b
-.section s2u2a
-.section s2u2b
-.section s2u3a
-.section s2u3b
-.section s2u4a
-.section s2u4b
-.section s2u5a
-.section s2u5b
-.section s2u6a
-.section s2u6b
-.section s2u7a
-.section s2u7b
-.section s2u8a
-.section s2u8b
-.section s2u9a
-.section s2u9b
-.section s2u0a
-.section s2u0b
-.section s2vaa
-.section s2vab
-.section s2vba
-.section s2vbb
-.section s2vca
-.section s2vcb
-.section s2vda
-.section s2vdb
-.section s2vea
-.section s2veb
-.section s2vfa
-.section s2vfb
-.section s2vga
-.section s2vgb
-.section s2vha
-.section s2vhb
-.section s2via
-.section s2vib
-.section s2vja
-.section s2vjb
-.section s2vka
-.section s2vkb
-.section s2vla
-.section s2vlb
-.section s2vma
-.section s2vmb
-.section s2vna
-.section s2vnb
-.section s2voa
-.section s2vob
-.section s2vpa
-.section s2vpb
-.section s2vqa
-.section s2vqb
-.section s2vra
-.section s2vrb
-.section s2vsa
-.section s2vsb
-.section s2vta
-.section s2vtb
-.section s2vua
-.section s2vub
-.section s2vva
-.section s2vvb
-.section s2vwa
-.section s2vwb
-.section s2vxa
-.section s2vxb
-.section s2vya
-.section s2vyb
-.section s2vza
-.section s2vzb
-.section s2v1a
-.section s2v1b
-.section s2v2a
-.section s2v2b
-.section s2v3a
-.section s2v3b
-.section s2v4a
-.section s2v4b
-.section s2v5a
-.section s2v5b
-.section s2v6a
-.section s2v6b
-.section s2v7a
-.section s2v7b
-.section s2v8a
-.section s2v8b
-.section s2v9a
-.section s2v9b
-.section s2v0a
-.section s2v0b
-.section s2waa
-.section s2wab
-.section s2wba
-.section s2wbb
-.section s2wca
-.section s2wcb
-.section s2wda
-.section s2wdb
-.section s2wea
-.section s2web
-.section s2wfa
-.section s2wfb
-.section s2wga
-.section s2wgb
-.section s2wha
-.section s2whb
-.section s2wia
-.section s2wib
-.section s2wja
-.section s2wjb
-.section s2wka
-.section s2wkb
-.section s2wla
-.section s2wlb
-.section s2wma
-.section s2wmb
-.section s2wna
-.section s2wnb
-.section s2woa
-.section s2wob
-.section s2wpa
-.section s2wpb
-.section s2wqa
-.section s2wqb
-.section s2wra
-.section s2wrb
-.section s2wsa
-.section s2wsb
-.section s2wta
-.section s2wtb
-.section s2wua
-.section s2wub
-.section s2wva
-.section s2wvb
-.section s2wwa
-.section s2wwb
-.section s2wxa
-.section s2wxb
-.section s2wya
-.section s2wyb
-.section s2wza
-.section s2wzb
-.section s2w1a
-.section s2w1b
-.section s2w2a
-.section s2w2b
-.section s2w3a
-.section s2w3b
-.section s2w4a
-.section s2w4b
-.section s2w5a
-.section s2w5b
-.section s2w6a
-.section s2w6b
-.section s2w7a
-.section s2w7b
-.section s2w8a
-.section s2w8b
-.section s2w9a
-.section s2w9b
-.section s2w0a
-.section s2w0b
-.section s2xaa
-.section s2xab
-.section s2xba
-.section s2xbb
-.section s2xca
-.section s2xcb
-.section s2xda
-.section s2xdb
-.section s2xea
-.section s2xeb
-.section s2xfa
-.section s2xfb
-.section s2xga
-.section s2xgb
-.section s2xha
-.section s2xhb
-.section s2xia
-.section s2xib
-.section s2xja
-.section s2xjb
-.section s2xka
-.section s2xkb
-.section s2xla
-.section s2xlb
-.section s2xma
-.section s2xmb
-.section s2xna
-.section s2xnb
-.section s2xoa
-.section s2xob
-.section s2xpa
-.section s2xpb
-.section s2xqa
-.section s2xqb
-.section s2xra
-.section s2xrb
-.section s2xsa
-.section s2xsb
-.section s2xta
-.section s2xtb
-.section s2xua
-.section s2xub
-.section s2xva
-.section s2xvb
-.section s2xwa
-.section s2xwb
-.section s2xxa
-.section s2xxb
-.section s2xya
-.section s2xyb
-.section s2xza
-.section s2xzb
-.section s2x1a
-.section s2x1b
-.section s2x2a
-.section s2x2b
-.section s2x3a
-.section s2x3b
-.section s2x4a
-.section s2x4b
-.section s2x5a
-.section s2x5b
-.section s2x6a
-.section s2x6b
-.section s2x7a
-.section s2x7b
-.section s2x8a
-.section s2x8b
-.section s2x9a
-.section s2x9b
-.section s2x0a
-.section s2x0b
-.section s2yaa
-.section s2yab
-.section s2yba
-.section s2ybb
-.section s2yca
-.section s2ycb
-.section s2yda
-.section s2ydb
-.section s2yea
-.section s2yeb
-.section s2yfa
-.section s2yfb
-.section s2yga
-.section s2ygb
-.section s2yha
-.section s2yhb
-.section s2yia
-.section s2yib
-.section s2yja
-.section s2yjb
-.section s2yka
-.section s2ykb
-.section s2yla
-.section s2ylb
-.section s2yma
-.section s2ymb
-.section s2yna
-.section s2ynb
-.section s2yoa
-.section s2yob
-.section s2ypa
-.section s2ypb
-.section s2yqa
-.section s2yqb
-.section s2yra
-.section s2yrb
-.section s2ysa
-.section s2ysb
-.section s2yta
-.section s2ytb
-.section s2yua
-.section s2yub
-.section s2yva
-.section s2yvb
-.section s2ywa
-.section s2ywb
-.section s2yxa
-.section s2yxb
-.section s2yya
-.section s2yyb
-.section s2yza
-.section s2yzb
-.section s2y1a
-.section s2y1b
-.section s2y2a
-.section s2y2b
-.section s2y3a
-.section s2y3b
-.section s2y4a
-.section s2y4b
-.section s2y5a
-.section s2y5b
-.section s2y6a
-.section s2y6b
-.section s2y7a
-.section s2y7b
-.section s2y8a
-.section s2y8b
-.section s2y9a
-.section s2y9b
-.section s2y0a
-.section s2y0b
-.section s2zaa
-.section s2zab
-.section s2zba
-.section s2zbb
-.section s2zca
-.section s2zcb
-.section s2zda
-.section s2zdb
-.section s2zea
-.section s2zeb
-.section s2zfa
-.section s2zfb
-.section s2zga
-.section s2zgb
-.section s2zha
-.section s2zhb
-.section s2zia
-.section s2zib
-.section s2zja
-.section s2zjb
-.section s2zka
-.section s2zkb
-.section s2zla
-.section s2zlb
-.section s2zma
-.section s2zmb
-.section s2zna
-.section s2znb
-.section s2zoa
-.section s2zob
-.section s2zpa
-.section s2zpb
-.section s2zqa
-.section s2zqb
-.section s2zra
-.section s2zrb
-.section s2zsa
-.section s2zsb
-.section s2zta
-.section s2ztb
-.section s2zua
-.section s2zub
-.section s2zva
-.section s2zvb
-.section s2zwa
-.section s2zwb
-.section s2zxa
-.section s2zxb
-.section s2zya
-.section s2zyb
-.section s2zza
-.section s2zzb
-.section s2z1a
-.section s2z1b
-.section s2z2a
-.section s2z2b
-.section s2z3a
-.section s2z3b
-.section s2z4a
-.section s2z4b
-.section s2z5a
-.section s2z5b
-.section s2z6a
-.section s2z6b
-.section s2z7a
-.section s2z7b
-.section s2z8a
-.section s2z8b
-.section s2z9a
-.section s2z9b
-.section s2z0a
-.section s2z0b
-.section s21aa
-.section s21ab
-.section s21ba
-.section s21bb
-.section s21ca
-.section s21cb
-.section s21da
-.section s21db
-.section s21ea
-.section s21eb
-.section s21fa
-.section s21fb
-.section s21ga
-.section s21gb
-.section s21ha
-.section s21hb
-.section s21ia
-.section s21ib
-.section s21ja
-.section s21jb
-.section s21ka
-.section s21kb
-.section s21la
-.section s21lb
-.section s21ma
-.section s21mb
-.section s21na
-.section s21nb
-.section s21oa
-.section s21ob
-.section s21pa
-.section s21pb
-.section s21qa
-.section s21qb
-.section s21ra
-.section s21rb
-.section s21sa
-.section s21sb
-.section s21ta
-.section s21tb
-.section s21ua
-.section s21ub
-.section s21va
-.section s21vb
-.section s21wa
-.section s21wb
-.section s21xa
-.section s21xb
-.section s21ya
-.section s21yb
-.section s21za
-.section s21zb
-.section s211a
-.section s211b
-.section s212a
-.section s212b
-.section s213a
-.section s213b
-.section s214a
-.section s214b
-.section s215a
-.section s215b
-.section s216a
-.section s216b
-.section s217a
-.section s217b
-.section s218a
-.section s218b
-.section s219a
-.section s219b
-.section s210a
-.section s210b
-.section s22aa
-.section s22ab
-.section s22ba
-.section s22bb
-.section s22ca
-.section s22cb
-.section s22da
-.section s22db
-.section s22ea
-.section s22eb
-.section s22fa
-.section s22fb
-.section s22ga
-.section s22gb
-.section s22ha
-.section s22hb
-.section s22ia
-.section s22ib
-.section s22ja
-.section s22jb
-.section s22ka
-.section s22kb
-.section s22la
-.section s22lb
-.section s22ma
-.section s22mb
-.section s22na
-.section s22nb
-.section s22oa
-.section s22ob
-.section s22pa
-.section s22pb
-.section s22qa
-.section s22qb
-.section s22ra
-.section s22rb
-.section s22sa
-.section s22sb
-.section s22ta
-.section s22tb
-.section s22ua
-.section s22ub
-.section s22va
-.section s22vb
-.section s22wa
-.section s22wb
-.section s22xa
-.section s22xb
-.section s22ya
-.section s22yb
-.section s22za
-.section s22zb
-.section s221a
-.section s221b
-.section s222a
-.section s222b
-.section s223a
-.section s223b
-.section s224a
-.section s224b
-.section s225a
-.section s225b
-.section s226a
-.section s226b
-.section s227a
-.section s227b
-.section s228a
-.section s228b
-.section s229a
-.section s229b
-.section s220a
-.section s220b
-.section s23aa
-.section s23ab
-.section s23ba
-.section s23bb
-.section s23ca
-.section s23cb
-.section s23da
-.section s23db
-.section s23ea
-.section s23eb
-.section s23fa
-.section s23fb
-.section s23ga
-.section s23gb
-.section s23ha
-.section s23hb
-.section s23ia
-.section s23ib
-.section s23ja
-.section s23jb
-.section s23ka
-.section s23kb
-.section s23la
-.section s23lb
-.section s23ma
-.section s23mb
-.section s23na
-.section s23nb
-.section s23oa
-.section s23ob
-.section s23pa
-.section s23pb
-.section s23qa
-.section s23qb
-.section s23ra
-.section s23rb
-.section s23sa
-.section s23sb
-.section s23ta
-.section s23tb
-.section s23ua
-.section s23ub
-.section s23va
-.section s23vb
-.section s23wa
-.section s23wb
-.section s23xa
-.section s23xb
-.section s23ya
-.section s23yb
-.section s23za
-.section s23zb
-.section s231a
-.section s231b
-.section s232a
-.section s232b
-.section s233a
-.section s233b
-.section s234a
-.section s234b
-.section s235a
-.section s235b
-.section s236a
-.section s236b
-.section s237a
-.section s237b
-.section s238a
-.section s238b
-.section s239a
-.section s239b
-.section s230a
-.section s230b
-.section s24aa
-.section s24ab
-.section s24ba
-.section s24bb
-.section s24ca
-.section s24cb
-.section s24da
-.section s24db
-.section s24ea
-.section s24eb
-.section s24fa
-.section s24fb
-.section s24ga
-.section s24gb
-.section s24ha
-.section s24hb
-.section s24ia
-.section s24ib
-.section s24ja
-.section s24jb
-.section s24ka
-.section s24kb
-.section s24la
-.section s24lb
-.section s24ma
-.section s24mb
-.section s24na
-.section s24nb
-.section s24oa
-.section s24ob
-.section s24pa
-.section s24pb
-.section s24qa
-.section s24qb
-.section s24ra
-.section s24rb
-.section s24sa
-.section s24sb
-.section s24ta
-.section s24tb
-.section s24ua
-.section s24ub
-.section s24va
-.section s24vb
-.section s24wa
-.section s24wb
-.section s24xa
-.section s24xb
-.section s24ya
-.section s24yb
-.section s24za
-.section s24zb
-.section s241a
-.section s241b
-.section s242a
-.section s242b
-.section s243a
-.section s243b
-.section s244a
-.section s244b
-.section s245a
-.section s245b
-.section s246a
-.section s246b
-.section s247a
-.section s247b
-.section s248a
-.section s248b
-.section s249a
-.section s249b
-.section s240a
-.section s240b
-.section s25aa
-.section s25ab
-.section s25ba
-.section s25bb
-.section s25ca
-.section s25cb
-.section s25da
-.section s25db
-.section s25ea
-.section s25eb
-.section s25fa
-.section s25fb
-.section s25ga
-.section s25gb
-.section s25ha
-.section s25hb
-.section s25ia
-.section s25ib
-.section s25ja
-.section s25jb
-.section s25ka
-.section s25kb
-.section s25la
-.section s25lb
-.section s25ma
-.section s25mb
-.section s25na
-.section s25nb
-.section s25oa
-.section s25ob
-.section s25pa
-.section s25pb
-.section s25qa
-.section s25qb
-.section s25ra
-.section s25rb
-.section s25sa
-.section s25sb
-.section s25ta
-.section s25tb
-.section s25ua
-.section s25ub
-.section s25va
-.section s25vb
-.section s25wa
-.section s25wb
-.section s25xa
-.section s25xb
-.section s25ya
-.section s25yb
-.section s25za
-.section s25zb
-.section s251a
-.section s251b
-.section s252a
-.section s252b
-.section s253a
-.section s253b
-.section s254a
-.section s254b
-.section s255a
-.section s255b
-.section s256a
-.section s256b
-.section s257a
-.section s257b
-.section s258a
-.section s258b
-.section s259a
-.section s259b
-.section s250a
-.section s250b
-.section s26aa
-.section s26ab
-.section s26ba
-.section s26bb
-.section s26ca
-.section s26cb
-.section s26da
-.section s26db
-.section s26ea
-.section s26eb
-.section s26fa
-.section s26fb
-.section s26ga
-.section s26gb
-.section s26ha
-.section s26hb
-.section s26ia
-.section s26ib
-.section s26ja
-.section s26jb
-.section s26ka
-.section s26kb
-.section s26la
-.section s26lb
-.section s26ma
-.section s26mb
-.section s26na
-.section s26nb
-.section s26oa
-.section s26ob
-.section s26pa
-.section s26pb
-.section s26qa
-.section s26qb
-.section s26ra
-.section s26rb
-.section s26sa
-.section s26sb
-.section s26ta
-.section s26tb
-.section s26ua
-.section s26ub
-.section s26va
-.section s26vb
-.section s26wa
-.section s26wb
-.section s26xa
-.section s26xb
-.section s26ya
-.section s26yb
-.section s26za
-.section s26zb
-.section s261a
-.section s261b
-.section s262a
-.section s262b
-.section s263a
-.section s263b
-.section s264a
-.section s264b
-.section s265a
-.section s265b
-.section s266a
-.section s266b
-.section s267a
-.section s267b
-.section s268a
-.section s268b
-.section s269a
-.section s269b
-.section s260a
-.section s260b
-.section s27aa
-.section s27ab
-.section s27ba
-.section s27bb
-.section s27ca
-.section s27cb
-.section s27da
-.section s27db
-.section s27ea
-.section s27eb
-.section s27fa
-.section s27fb
-.section s27ga
-.section s27gb
-.section s27ha
-.section s27hb
-.section s27ia
-.section s27ib
-.section s27ja
-.section s27jb
-.section s27ka
-.section s27kb
-.section s27la
-.section s27lb
-.section s27ma
-.section s27mb
-.section s27na
-.section s27nb
-.section s27oa
-.section s27ob
-.section s27pa
-.section s27pb
-.section s27qa
-.section s27qb
-.section s27ra
-.section s27rb
-.section s27sa
-.section s27sb
-.section s27ta
-.section s27tb
-.section s27ua
-.section s27ub
-.section s27va
-.section s27vb
-.section s27wa
-.section s27wb
-.section s27xa
-.section s27xb
-.section s27ya
-.section s27yb
-.section s27za
-.section s27zb
-.section s271a
-.section s271b
-.section s272a
-.section s272b
-.section s273a
-.section s273b
-.section s274a
-.section s274b
-.section s275a
-.section s275b
-.section s276a
-.section s276b
-.section s277a
-.section s277b
-.section s278a
-.section s278b
-.section s279a
-.section s279b
-.section s270a
-.section s270b
-.section s28aa
-.section s28ab
-.section s28ba
-.section s28bb
-.section s28ca
-.section s28cb
-.section s28da
-.section s28db
-.section s28ea
-.section s28eb
-.section s28fa
-.section s28fb
-.section s28ga
-.section s28gb
-.section s28ha
-.section s28hb
-.section s28ia
-.section s28ib
-.section s28ja
-.section s28jb
-.section s28ka
-.section s28kb
-.section s28la
-.section s28lb
-.section s28ma
-.section s28mb
-.section s28na
-.section s28nb
-.section s28oa
-.section s28ob
-.section s28pa
-.section s28pb
-.section s28qa
-.section s28qb
-.section s28ra
-.section s28rb
-.section s28sa
-.section s28sb
-.section s28ta
-.section s28tb
-.section s28ua
-.section s28ub
-.section s28va
-.section s28vb
-.section s28wa
-.section s28wb
-.section s28xa
-.section s28xb
-.section s28ya
-.section s28yb
-.section s28za
-.section s28zb
-.section s281a
-.section s281b
-.section s282a
-.section s282b
-.section s283a
-.section s283b
-.section s284a
-.section s284b
-.section s285a
-.section s285b
-.section s286a
-.section s286b
-.section s287a
-.section s287b
-.section s288a
-.section s288b
-.section s289a
-.section s289b
-.section s280a
-.section s280b
-.section s29aa
-.section s29ab
-.section s29ba
-.section s29bb
-.section s29ca
-.section s29cb
-.section s29da
-.section s29db
-.section s29ea
-.section s29eb
-.section s29fa
-.section s29fb
-.section s29ga
-.section s29gb
-.section s29ha
-.section s29hb
-.section s29ia
-.section s29ib
-.section s29ja
-.section s29jb
-.section s29ka
-.section s29kb
-.section s29la
-.section s29lb
-.section s29ma
-.section s29mb
-.section s29na
-.section s29nb
-.section s29oa
-.section s29ob
-.section s29pa
-.section s29pb
-.section s29qa
-.section s29qb
-.section s29ra
-.section s29rb
-.section s29sa
-.section s29sb
-.section s29ta
-.section s29tb
-.section s29ua
-.section s29ub
-.section s29va
-.section s29vb
-.section s29wa
-.section s29wb
-.section s29xa
-.section s29xb
-.section s29ya
-.section s29yb
-.section s29za
-.section s29zb
-.section s291a
-.section s291b
-.section s292a
-.section s292b
-.section s293a
-.section s293b
-.section s294a
-.section s294b
-.section s295a
-.section s295b
-.section s296a
-.section s296b
-.section s297a
-.section s297b
-.section s298a
-.section s298b
-.section s299a
-.section s299b
-.section s290a
-.section s290b
-.section s20aa
-.section s20ab
-.section s20ba
-.section s20bb
-.section s20ca
-.section s20cb
-.section s20da
-.section s20db
-.section s20ea
-.section s20eb
-.section s20fa
-.section s20fb
-.section s20ga
-.section s20gb
-.section s20ha
-.section s20hb
-.section s20ia
-.section s20ib
-.section s20ja
-.section s20jb
-.section s20ka
-.section s20kb
-.section s20la
-.section s20lb
-.section s20ma
-.section s20mb
-.section s20na
-.section s20nb
-.section s20oa
-.section s20ob
-.section s20pa
-.section s20pb
-.section s20qa
-.section s20qb
-.section s20ra
-.section s20rb
-.section s20sa
-.section s20sb
-.section s20ta
-.section s20tb
-.section s20ua
-.section s20ub
-.section s20va
-.section s20vb
-.section s20wa
-.section s20wb
-.section s20xa
-.section s20xb
-.section s20ya
-.section s20yb
-.section s20za
-.section s20zb
-.section s201a
-.section s201b
-.section s202a
-.section s202b
-.section s203a
-.section s203b
-.section s204a
-.section s204b
-.section s205a
-.section s205b
-.section s206a
-.section s206b
-.section s207a
-.section s207b
-.section s208a
-.section s208b
-.section s209a
-.section s209b
-.section s200a
-.section s200b
-.section s3aaa
-.section s3aab
-.section s3aba
-.section s3abb
-.section s3aca
-.section s3acb
-.section s3ada
-.section s3adb
-.section s3aea
-.section s3aeb
-.section s3afa
-.section s3afb
-.section s3aga
-.section s3agb
-.section s3aha
-.section s3ahb
-.section s3aia
-.section s3aib
-.section s3aja
-.section s3ajb
-.section s3aka
-.section s3akb
-.section s3ala
-.section s3alb
-.section s3ama
-.section s3amb
-.section s3ana
-.section s3anb
-.section s3aoa
-.section s3aob
-.section s3apa
-.section s3apb
-.section s3aqa
-.section s3aqb
-.section s3ara
-.section s3arb
-.section s3asa
-.section s3asb
-.section s3ata
-.section s3atb
-.section s3aua
-.section s3aub
-.section s3ava
-.section s3avb
-.section s3awa
-.section s3awb
-.section s3axa
-.section s3axb
-.section s3aya
-.section s3ayb
-.section s3aza
-.section s3azb
-.section s3a1a
-.section s3a1b
-.section s3a2a
-.section s3a2b
-.section s3a3a
-.section s3a3b
-.section s3a4a
-.section s3a4b
-.section s3a5a
-.section s3a5b
-.section s3a6a
-.section s3a6b
-.section s3a7a
-.section s3a7b
-.section s3a8a
-.section s3a8b
-.section s3a9a
-.section s3a9b
-.section s3a0a
-.section s3a0b
-.section s3baa
-.section s3bab
-.section s3bba
-.section s3bbb
-.section s3bca
-.section s3bcb
-.section s3bda
-.section s3bdb
-.section s3bea
-.section s3beb
-.section s3bfa
-.section s3bfb
-.section s3bga
-.section s3bgb
-.section s3bha
-.section s3bhb
-.section s3bia
-.section s3bib
-.section s3bja
-.section s3bjb
-.section s3bka
-.section s3bkb
-.section s3bla
-.section s3blb
-.section s3bma
-.section s3bmb
-.section s3bna
-.section s3bnb
-.section s3boa
-.section s3bob
-.section s3bpa
-.section s3bpb
-.section s3bqa
-.section s3bqb
-.section s3bra
-.section s3brb
-.section s3bsa
-.section s3bsb
-.section s3bta
-.section s3btb
-.section s3bua
-.section s3bub
-.section s3bva
-.section s3bvb
-.section s3bwa
-.section s3bwb
-.section s3bxa
-.section s3bxb
-.section s3bya
-.section s3byb
-.section s3bza
-.section s3bzb
-.section s3b1a
-.section s3b1b
-.section s3b2a
-.section s3b2b
-.section s3b3a
-.section s3b3b
-.section s3b4a
-.section s3b4b
-.section s3b5a
-.section s3b5b
-.section s3b6a
-.section s3b6b
-.section s3b7a
-.section s3b7b
-.section s3b8a
-.section s3b8b
-.section s3b9a
-.section s3b9b
-.section s3b0a
-.section s3b0b
-.section s3caa
-.section s3cab
-.section s3cba
-.section s3cbb
-.section s3cca
-.section s3ccb
-.section s3cda
-.section s3cdb
-.section s3cea
-.section s3ceb
-.section s3cfa
-.section s3cfb
-.section s3cga
-.section s3cgb
-.section s3cha
-.section s3chb
-.section s3cia
-.section s3cib
-.section s3cja
-.section s3cjb
-.section s3cka
-.section s3ckb
-.section s3cla
-.section s3clb
-.section s3cma
-.section s3cmb
-.section s3cna
-.section s3cnb
-.section s3coa
-.section s3cob
-.section s3cpa
-.section s3cpb
-.section s3cqa
-.section s3cqb
-.section s3cra
-.section s3crb
-.section s3csa
-.section s3csb
-.section s3cta
-.section s3ctb
-.section s3cua
-.section s3cub
-.section s3cva
-.section s3cvb
-.section s3cwa
-.section s3cwb
-.section s3cxa
-.section s3cxb
-.section s3cya
-.section s3cyb
-.section s3cza
-.section s3czb
-.section s3c1a
-.section s3c1b
-.section s3c2a
-.section s3c2b
-.section s3c3a
-.section s3c3b
-.section s3c4a
-.section s3c4b
-.section s3c5a
-.section s3c5b
-.section s3c6a
-.section s3c6b
-.section s3c7a
-.section s3c7b
-.section s3c8a
-.section s3c8b
-.section s3c9a
-.section s3c9b
-.section s3c0a
-.section s3c0b
-.section s3daa
-.section s3dab
-.section s3dba
-.section s3dbb
-.section s3dca
-.section s3dcb
-.section s3dda
-.section s3ddb
-.section s3dea
-.section s3deb
-.section s3dfa
-.section s3dfb
-.section s3dga
-.section s3dgb
-.section s3dha
-.section s3dhb
-.section s3dia
-.section s3dib
-.section s3dja
-.section s3djb
-.section s3dka
-.section s3dkb
-.section s3dla
-.section s3dlb
-.section s3dma
-.section s3dmb
-.section s3dna
-.section s3dnb
-.section s3doa
-.section s3dob
-.section s3dpa
-.section s3dpb
-.section s3dqa
-.section s3dqb
-.section s3dra
-.section s3drb
-.section s3dsa
-.section s3dsb
-.section s3dta
-.section s3dtb
-.section s3dua
-.section s3dub
-.section s3dva
-.section s3dvb
-.section s3dwa
-.section s3dwb
-.section s3dxa
-.section s3dxb
-.section s3dya
-.section s3dyb
-.section s3dza
-.section s3dzb
-.section s3d1a
-.section s3d1b
-.section s3d2a
-.section s3d2b
-.section s3d3a
-.section s3d3b
-.section s3d4a
-.section s3d4b
-.section s3d5a
-.section s3d5b
-.section s3d6a
-.section s3d6b
-.section s3d7a
-.section s3d7b
-.section s3d8a
-.section s3d8b
-.section s3d9a
-.section s3d9b
-.section s3d0a
-.section s3d0b
-.section s3eaa
-.section s3eab
-.section s3eba
-.section s3ebb
-.section s3eca
-.section s3ecb
-.section s3eda
-.section s3edb
-.section s3eea
-.section s3eeb
-.section s3efa
-.section s3efb
-.section s3ega
-.section s3egb
-.section s3eha
-.section s3ehb
-.section s3eia
-.section s3eib
-.section s3eja
-.section s3ejb
-.section s3eka
-.section s3ekb
-.section s3ela
-.section s3elb
-.section s3ema
-.section s3emb
-.section s3ena
-.section s3enb
-.section s3eoa
-.section s3eob
-.section s3epa
-.section s3epb
-.section s3eqa
-.section s3eqb
-.section s3era
-.section s3erb
-.section s3esa
-.section s3esb
-.section s3eta
-.section s3etb
-.section s3eua
-.section s3eub
-.section s3eva
-.section s3evb
-.section s3ewa
-.section s3ewb
-.section s3exa
-.section s3exb
-.section s3eya
-.section s3eyb
-.section s3eza
-.section s3ezb
-.section s3e1a
-.section s3e1b
-.section s3e2a
-.section s3e2b
-.section s3e3a
-.section s3e3b
-.section s3e4a
-.section s3e4b
-.section s3e5a
-.section s3e5b
-.section s3e6a
-.section s3e6b
-.section s3e7a
-.section s3e7b
-.section s3e8a
-.section s3e8b
-.section s3e9a
-.section s3e9b
-.section s3e0a
-.section s3e0b
-.section s3faa
-.section s3fab
-.section s3fba
-.section s3fbb
-.section s3fca
-.section s3fcb
-.section s3fda
-.section s3fdb
-.section s3fea
-.section s3feb
-.section s3ffa
-.section s3ffb
-.section s3fga
-.section s3fgb
-.section s3fha
-.section s3fhb
-.section s3fia
-.section s3fib
-.section s3fja
-.section s3fjb
-.section s3fka
-.section s3fkb
-.section s3fla
-.section s3flb
-.section s3fma
-.section s3fmb
-.section s3fna
-.section s3fnb
-.section s3foa
-.section s3fob
-.section s3fpa
-.section s3fpb
-.section s3fqa
-.section s3fqb
-.section s3fra
-.section s3frb
-.section s3fsa
-.section s3fsb
-.section s3fta
-.section s3ftb
-.section s3fua
-.section s3fub
-.section s3fva
-.section s3fvb
-.section s3fwa
-.section s3fwb
-.section s3fxa
-.section s3fxb
-.section s3fya
-.section s3fyb
-.section s3fza
-.section s3fzb
-.section s3f1a
-.section s3f1b
-.section s3f2a
-.section s3f2b
-.section s3f3a
-.section s3f3b
-.section s3f4a
-.section s3f4b
-.section s3f5a
-.section s3f5b
-.section s3f6a
-.section s3f6b
-.section s3f7a
-.section s3f7b
-.section s3f8a
-.section s3f8b
-.section s3f9a
-.section s3f9b
-.section s3f0a
-.section s3f0b
-.section s3gaa
-.section s3gab
-.section s3gba
-.section s3gbb
-.section s3gca
-.section s3gcb
-.section s3gda
-.section s3gdb
-.section s3gea
-.section s3geb
-.section s3gfa
-.section s3gfb
-.section s3gga
-.section s3ggb
-.section s3gha
-.section s3ghb
-.section s3gia
-.section s3gib
-.section s3gja
-.section s3gjb
-.section s3gka
-.section s3gkb
-.section s3gla
-.section s3glb
-.section s3gma
-.section s3gmb
-.section s3gna
-.section s3gnb
-.section s3goa
-.section s3gob
-.section s3gpa
-.section s3gpb
-.section s3gqa
-.section s3gqb
-.section s3gra
-.section s3grb
-.section s3gsa
-.section s3gsb
-.section s3gta
-.section s3gtb
-.section s3gua
-.section s3gub
-.section s3gva
-.section s3gvb
-.section s3gwa
-.section s3gwb
-.section s3gxa
-.section s3gxb
-.section s3gya
-.section s3gyb
-.section s3gza
-.section s3gzb
-.section s3g1a
-.section s3g1b
-.section s3g2a
-.section s3g2b
-.section s3g3a
-.section s3g3b
-.section s3g4a
-.section s3g4b
-.section s3g5a
-.section s3g5b
-.section s3g6a
-.section s3g6b
-.section s3g7a
-.section s3g7b
-.section s3g8a
-.section s3g8b
-.section s3g9a
-.section s3g9b
-.section s3g0a
-.section s3g0b
-.section s3haa
-.section s3hab
-.section s3hba
-.section s3hbb
-.section s3hca
-.section s3hcb
-.section s3hda
-.section s3hdb
-.section s3hea
-.section s3heb
-.section s3hfa
-.section s3hfb
-.section s3hga
-.section s3hgb
-.section s3hha
-.section s3hhb
-.section s3hia
-.section s3hib
-.section s3hja
-.section s3hjb
-.section s3hka
-.section s3hkb
-.section s3hla
-.section s3hlb
-.section s3hma
-.section s3hmb
-.section s3hna
-.section s3hnb
-.section s3hoa
-.section s3hob
-.section s3hpa
-.section s3hpb
-.section s3hqa
-.section s3hqb
-.section s3hra
-.section s3hrb
-.section s3hsa
-.section s3hsb
-.section s3hta
-.section s3htb
-.section s3hua
-.section s3hub
-.section s3hva
-.section s3hvb
-.section s3hwa
-.section s3hwb
-.section s3hxa
-.section s3hxb
-.section s3hya
-.section s3hyb
-.section s3hza
-.section s3hzb
-.section s3h1a
-.section s3h1b
-.section s3h2a
-.section s3h2b
-.section s3h3a
-.section s3h3b
-.section s3h4a
-.section s3h4b
-.section s3h5a
-.section s3h5b
-.section s3h6a
-.section s3h6b
-.section s3h7a
-.section s3h7b
-.section s3h8a
-.section s3h8b
-.section s3h9a
-.section s3h9b
-.section s3h0a
-.section s3h0b
-.section s3iaa
-.section s3iab
-.section s3iba
-.section s3ibb
-.section s3ica
-.section s3icb
-.section s3ida
-.section s3idb
-.section s3iea
-.section s3ieb
-.section s3ifa
-.section s3ifb
-.section s3iga
-.section s3igb
-.section s3iha
-.section s3ihb
-.section s3iia
-.section s3iib
-.section s3ija
-.section s3ijb
-.section s3ika
-.section s3ikb
-.section s3ila
-.section s3ilb
-.section s3ima
-.section s3imb
-.section s3ina
-.section s3inb
-.section s3ioa
-.section s3iob
-.section s3ipa
-.section s3ipb
-.section s3iqa
-.section s3iqb
-.section s3ira
-.section s3irb
-.section s3isa
-.section s3isb
-.section s3ita
-.section s3itb
-.section s3iua
-.section s3iub
-.section s3iva
-.section s3ivb
-.section s3iwa
-.section s3iwb
-.section s3ixa
-.section s3ixb
-.section s3iya
-.section s3iyb
-.section s3iza
-.section s3izb
-.section s3i1a
-.section s3i1b
-.section s3i2a
-.section s3i2b
-.section s3i3a
-.section s3i3b
-.section s3i4a
-.section s3i4b
-.section s3i5a
-.section s3i5b
-.section s3i6a
-.section s3i6b
-.section s3i7a
-.section s3i7b
-.section s3i8a
-.section s3i8b
-.section s3i9a
-.section s3i9b
-.section s3i0a
-.section s3i0b
-.section s3jaa
-.section s3jab
-.section s3jba
-.section s3jbb
-.section s3jca
-.section s3jcb
-.section s3jda
-.section s3jdb
-.section s3jea
-.section s3jeb
-.section s3jfa
-.section s3jfb
-.section s3jga
-.section s3jgb
-.section s3jha
-.section s3jhb
-.section s3jia
-.section s3jib
-.section s3jja
-.section s3jjb
-.section s3jka
-.section s3jkb
-.section s3jla
-.section s3jlb
-.section s3jma
-.section s3jmb
-.section s3jna
-.section s3jnb
-.section s3joa
-.section s3job
-.section s3jpa
-.section s3jpb
-.section s3jqa
-.section s3jqb
-.section s3jra
-.section s3jrb
-.section s3jsa
-.section s3jsb
-.section s3jta
-.section s3jtb
-.section s3jua
-.section s3jub
-.section s3jva
-.section s3jvb
-.section s3jwa
-.section s3jwb
-.section s3jxa
-.section s3jxb
-.section s3jya
-.section s3jyb
-.section s3jza
-.section s3jzb
-.section s3j1a
-.section s3j1b
-.section s3j2a
-.section s3j2b
-.section s3j3a
-.section s3j3b
-.section s3j4a
-.section s3j4b
-.section s3j5a
-.section s3j5b
-.section s3j6a
-.section s3j6b
-.section s3j7a
-.section s3j7b
-.section s3j8a
-.section s3j8b
-.section s3j9a
-.section s3j9b
-.section s3j0a
-.section s3j0b
-.section s3kaa
-.section s3kab
-.section s3kba
-.section s3kbb
-.section s3kca
-.section s3kcb
-.section s3kda
-.section s3kdb
-.section s3kea
-.section s3keb
-.section s3kfa
-.section s3kfb
-.section s3kga
-.section s3kgb
-.section s3kha
-.section s3khb
-.section s3kia
-.section s3kib
-.section s3kja
-.section s3kjb
-.section s3kka
-.section s3kkb
-.section s3kla
-.section s3klb
-.section s3kma
-.section s3kmb
-.section s3kna
-.section s3knb
-.section s3koa
-.section s3kob
-.section s3kpa
-.section s3kpb
-.section s3kqa
-.section s3kqb
-.section s3kra
-.section s3krb
-.section s3ksa
-.section s3ksb
-.section s3kta
-.section s3ktb
-.section s3kua
-.section s3kub
-.section s3kva
-.section s3kvb
-.section s3kwa
-.section s3kwb
-.section s3kxa
-.section s3kxb
-.section s3kya
-.section s3kyb
-.section s3kza
-.section s3kzb
-.section s3k1a
-.section s3k1b
-.section s3k2a
-.section s3k2b
-.section s3k3a
-.section s3k3b
-.section s3k4a
-.section s3k4b
-.section s3k5a
-.section s3k5b
-.section s3k6a
-.section s3k6b
-.section s3k7a
-.section s3k7b
-.section s3k8a
-.section s3k8b
-.section s3k9a
-.section s3k9b
-.section s3k0a
-.section s3k0b
-.section s3laa
-.section s3lab
-.section s3lba
-.section s3lbb
-.section s3lca
-.section s3lcb
-.section s3lda
-.section s3ldb
-.section s3lea
-.section s3leb
-.section s3lfa
-.section s3lfb
-.section s3lga
-.section s3lgb
-.section s3lha
-.section s3lhb
-.section s3lia
-.section s3lib
-.section s3lja
-.section s3ljb
-.section s3lka
-.section s3lkb
-.section s3lla
-.section s3llb
-.section s3lma
-.section s3lmb
-.section s3lna
-.section s3lnb
-.section s3loa
-.section s3lob
-.section s3lpa
-.section s3lpb
-.section s3lqa
-.section s3lqb
-.section s3lra
-.section s3lrb
-.section s3lsa
-.section s3lsb
-.section s3lta
-.section s3ltb
-.section s3lua
-.section s3lub
-.section s3lva
-.section s3lvb
-.section s3lwa
-.section s3lwb
-.section s3lxa
-.section s3lxb
-.section s3lya
-.section s3lyb
-.section s3lza
-.section s3lzb
-.section s3l1a
-.section s3l1b
-.section s3l2a
-.section s3l2b
-.section s3l3a
-.section s3l3b
-.section s3l4a
-.section s3l4b
-.section s3l5a
-.section s3l5b
-.section s3l6a
-.section s3l6b
-.section s3l7a
-.section s3l7b
-.section s3l8a
-.section s3l8b
-.section s3l9a
-.section s3l9b
-.section s3l0a
-.section s3l0b
-.section s3maa
-.section s3mab
-.section s3mba
-.section s3mbb
-.section s3mca
-.section s3mcb
-.section s3mda
-.section s3mdb
-.section s3mea
-.section s3meb
-.section s3mfa
-.section s3mfb
-.section s3mga
-.section s3mgb
-.section s3mha
-.section s3mhb
-.section s3mia
-.section s3mib
-.section s3mja
-.section s3mjb
-.section s3mka
-.section s3mkb
-.section s3mla
-.section s3mlb
-.section s3mma
-.section s3mmb
-.section s3mna
-.section s3mnb
-.section s3moa
-.section s3mob
-.section s3mpa
-.section s3mpb
-.section s3mqa
-.section s3mqb
-.section s3mra
-.section s3mrb
-.section s3msa
-.section s3msb
-.section s3mta
-.section s3mtb
-.section s3mua
-.section s3mub
-.section s3mva
-.section s3mvb
-.section s3mwa
-.section s3mwb
-.section s3mxa
-.section s3mxb
-.section s3mya
-.section s3myb
-.section s3mza
-.section s3mzb
-.section s3m1a
-.section s3m1b
-.section s3m2a
-.section s3m2b
-.section s3m3a
-.section s3m3b
-.section s3m4a
-.section s3m4b
-.section s3m5a
-.section s3m5b
-.section s3m6a
-.section s3m6b
-.section s3m7a
-.section s3m7b
-.section s3m8a
-.section s3m8b
-.section s3m9a
-.section s3m9b
-.section s3m0a
-.section s3m0b
-.section s3naa
-.section s3nab
-.section s3nba
-.section s3nbb
-.section s3nca
-.section s3ncb
-.section s3nda
-.section s3ndb
-.section s3nea
-.section s3neb
-.section s3nfa
-.section s3nfb
-.section s3nga
-.section s3ngb
-.section s3nha
-.section s3nhb
-.section s3nia
-.section s3nib
-.section s3nja
-.section s3njb
-.section s3nka
-.section s3nkb
-.section s3nla
-.section s3nlb
-.section s3nma
-.section s3nmb
-.section s3nna
-.section s3nnb
-.section s3noa
-.section s3nob
-.section s3npa
-.section s3npb
-.section s3nqa
-.section s3nqb
-.section s3nra
-.section s3nrb
-.section s3nsa
-.section s3nsb
-.section s3nta
-.section s3ntb
-.section s3nua
-.section s3nub
-.section s3nva
-.section s3nvb
-.section s3nwa
-.section s3nwb
-.section s3nxa
-.section s3nxb
-.section s3nya
-.section s3nyb
-.section s3nza
-.section s3nzb
-.section s3n1a
-.section s3n1b
-.section s3n2a
-.section s3n2b
-.section s3n3a
-.section s3n3b
-.section s3n4a
-.section s3n4b
-.section s3n5a
-.section s3n5b
-.section s3n6a
-.section s3n6b
-.section s3n7a
-.section s3n7b
-.section s3n8a
-.section s3n8b
-.section s3n9a
-.section s3n9b
-.section s3n0a
-.section s3n0b
-.section s3oaa
-.section s3oab
-.section s3oba
-.section s3obb
-.section s3oca
-.section s3ocb
-.section s3oda
-.section s3odb
-.section s3oea
-.section s3oeb
-.section s3ofa
-.section s3ofb
-.section s3oga
-.section s3ogb
-.section s3oha
-.section s3ohb
-.section s3oia
-.section s3oib
-.section s3oja
-.section s3ojb
-.section s3oka
-.section s3okb
-.section s3ola
-.section s3olb
-.section s3oma
-.section s3omb
-.section s3ona
-.section s3onb
-.section s3ooa
-.section s3oob
-.section s3opa
-.section s3opb
-.section s3oqa
-.section s3oqb
-.section s3ora
-.section s3orb
-.section s3osa
-.section s3osb
-.section s3ota
-.section s3otb
-.section s3oua
-.section s3oub
-.section s3ova
-.section s3ovb
-.section s3owa
-.section s3owb
-.section s3oxa
-.section s3oxb
-.section s3oya
-.section s3oyb
-.section s3oza
-.section s3ozb
-.section s3o1a
-.section s3o1b
-.section s3o2a
-.section s3o2b
-.section s3o3a
-.section s3o3b
-.section s3o4a
-.section s3o4b
-.section s3o5a
-.section s3o5b
-.section s3o6a
-.section s3o6b
-.section s3o7a
-.section s3o7b
-.section s3o8a
-.section s3o8b
-.section s3o9a
-.section s3o9b
-.section s3o0a
-.section s3o0b
-.section s3paa
-.section s3pab
-.section s3pba
-.section s3pbb
-.section s3pca
-.section s3pcb
-.section s3pda
-.section s3pdb
-.section s3pea
-.section s3peb
-.section s3pfa
-.section s3pfb
-.section s3pga
-.section s3pgb
-.section s3pha
-.section s3phb
-.section s3pia
-.section s3pib
-.section s3pja
-.section s3pjb
-.section s3pka
-.section s3pkb
-.section s3pla
-.section s3plb
-.section s3pma
-.section s3pmb
-.section s3pna
-.section s3pnb
-.section s3poa
-.section s3pob
-.section s3ppa
-.section s3ppb
-.section s3pqa
-.section s3pqb
-.section s3pra
-.section s3prb
-.section s3psa
-.section s3psb
-.section s3pta
-.section s3ptb
-.section s3pua
-.section s3pub
-.section s3pva
-.section s3pvb
-.section s3pwa
-.section s3pwb
-.section s3pxa
-.section s3pxb
-.section s3pya
-.section s3pyb
-.section s3pza
-.section s3pzb
-.section s3p1a
-.section s3p1b
-.section s3p2a
-.section s3p2b
-.section s3p3a
-.section s3p3b
-.section s3p4a
-.section s3p4b
-.section s3p5a
-.section s3p5b
-.section s3p6a
-.section s3p6b
-.section s3p7a
-.section s3p7b
-.section s3p8a
-.section s3p8b
-.section s3p9a
-.section s3p9b
-.section s3p0a
-.section s3p0b
-.section s3qaa
-.section s3qab
-.section s3qba
-.section s3qbb
-.section s3qca
-.section s3qcb
-.section s3qda
-.section s3qdb
-.section s3qea
-.section s3qeb
-.section s3qfa
-.section s3qfb
-.section s3qga
-.section s3qgb
-.section s3qha
-.section s3qhb
-.section s3qia
-.section s3qib
-.section s3qja
-.section s3qjb
-.section s3qka
-.section s3qkb
-.section s3qla
-.section s3qlb
-.section s3qma
-.section s3qmb
-.section s3qna
-.section s3qnb
-.section s3qoa
-.section s3qob
-.section s3qpa
-.section s3qpb
-.section s3qqa
-.section s3qqb
-.section s3qra
-.section s3qrb
-.section s3qsa
-.section s3qsb
-.section s3qta
-.section s3qtb
-.section s3qua
-.section s3qub
-.section s3qva
-.section s3qvb
-.section s3qwa
-.section s3qwb
-.section s3qxa
-.section s3qxb
-.section s3qya
-.section s3qyb
-.section s3qza
-.section s3qzb
-.section s3q1a
-.section s3q1b
-.section s3q2a
-.section s3q2b
-.section s3q3a
-.section s3q3b
-.section s3q4a
-.section s3q4b
-.section s3q5a
-.section s3q5b
-.section s3q6a
-.section s3q6b
-.section s3q7a
-.section s3q7b
-.section s3q8a
-.section s3q8b
-.section s3q9a
-.section s3q9b
-.section s3q0a
-.section s3q0b
-.section s3raa
-.section s3rab
-.section s3rba
-.section s3rbb
-.section s3rca
-.section s3rcb
-.section s3rda
-.section s3rdb
-.section s3rea
-.section s3reb
-.section s3rfa
-.section s3rfb
-.section s3rga
-.section s3rgb
-.section s3rha
-.section s3rhb
-.section s3ria
-.section s3rib
-.section s3rja
-.section s3rjb
-.section s3rka
-.section s3rkb
-.section s3rla
-.section s3rlb
-.section s3rma
-.section s3rmb
-.section s3rna
-.section s3rnb
-.section s3roa
-.section s3rob
-.section s3rpa
-.section s3rpb
-.section s3rqa
-.section s3rqb
-.section s3rra
-.section s3rrb
-.section s3rsa
-.section s3rsb
-.section s3rta
-.section s3rtb
-.section s3rua
-.section s3rub
-.section s3rva
-.section s3rvb
-.section s3rwa
-.section s3rwb
-.section s3rxa
-.section s3rxb
-.section s3rya
-.section s3ryb
-.section s3rza
-.section s3rzb
-.section s3r1a
-.section s3r1b
-.section s3r2a
-.section s3r2b
-.section s3r3a
-.section s3r3b
-.section s3r4a
-.section s3r4b
-.section s3r5a
-.section s3r5b
-.section s3r6a
-.section s3r6b
-.section s3r7a
-.section s3r7b
-.section s3r8a
-.section s3r8b
-.section s3r9a
-.section s3r9b
-.section s3r0a
-.section s3r0b
-.section s3saa
-.section s3sab
-.section s3sba
-.section s3sbb
-.section s3sca
-.section s3scb
-.section s3sda
-.section s3sdb
-.section s3sea
-.section s3seb
-.section s3sfa
-.section s3sfb
-.section s3sga
-.section s3sgb
-.section s3sha
-.section s3shb
-.section s3sia
-.section s3sib
-.section s3sja
-.section s3sjb
-.section s3ska
-.section s3skb
-.section s3sla
-.section s3slb
-.section s3sma
-.section s3smb
-.section s3sna
-.section s3snb
-.section s3soa
-.section s3sob
-.section s3spa
-.section s3spb
-.section s3sqa
-.section s3sqb
-.section s3sra
-.section s3srb
-.section s3ssa
-.section s3ssb
-.section s3sta
-.section s3stb
-.section s3sua
-.section s3sub
-.section s3sva
-.section s3svb
-.section s3swa
-.section s3swb
-.section s3sxa
-.section s3sxb
-.section s3sya
-.section s3syb
-.section s3sza
-.section s3szb
-.section s3s1a
-.section s3s1b
-.section s3s2a
-.section s3s2b
-.section s3s3a
-.section s3s3b
-.section s3s4a
-.section s3s4b
-.section s3s5a
-.section s3s5b
-.section s3s6a
-.section s3s6b
-.section s3s7a
-.section s3s7b
-.section s3s8a
-.section s3s8b
-.section s3s9a
-.section s3s9b
-.section s3s0a
-.section s3s0b
-.section s3taa
-.section s3tab
-.section s3tba
-.section s3tbb
-.section s3tca
-.section s3tcb
-.section s3tda
-.section s3tdb
-.section s3tea
-.section s3teb
-.section s3tfa
-.section s3tfb
-.section s3tga
-.section s3tgb
-.section s3tha
-.section s3thb
-.section s3tia
-.section s3tib
-.section s3tja
-.section s3tjb
-.section s3tka
-.section s3tkb
-.section s3tla
-.section s3tlb
-.section s3tma
-.section s3tmb
-.section s3tna
-.section s3tnb
-.section s3toa
-.section s3tob
-.section s3tpa
-.section s3tpb
-.section s3tqa
-.section s3tqb
-.section s3tra
-.section s3trb
-.section s3tsa
-.section s3tsb
-.section s3tta
-.section s3ttb
-.section s3tua
-.section s3tub
-.section s3tva
-.section s3tvb
-.section s3twa
-.section s3twb
-.section s3txa
-.section s3txb
-.section s3tya
-.section s3tyb
-.section s3tza
-.section s3tzb
-.section s3t1a
-.section s3t1b
-.section s3t2a
-.section s3t2b
-.section s3t3a
-.section s3t3b
-.section s3t4a
-.section s3t4b
-.section s3t5a
-.section s3t5b
-.section s3t6a
-.section s3t6b
-.section s3t7a
-.section s3t7b
-.section s3t8a
-.section s3t8b
-.section s3t9a
-.section s3t9b
-.section s3t0a
-.section s3t0b
-.section s3uaa
-.section s3uab
-.section s3uba
-.section s3ubb
-.section s3uca
-.section s3ucb
-.section s3uda
-.section s3udb
-.section s3uea
-.section s3ueb
-.section s3ufa
-.section s3ufb
-.section s3uga
-.section s3ugb
-.section s3uha
-.section s3uhb
-.section s3uia
-.section s3uib
-.section s3uja
-.section s3ujb
-.section s3uka
-.section s3ukb
-.section s3ula
-.section s3ulb
-.section s3uma
-.section s3umb
-.section s3una
-.section s3unb
-.section s3uoa
-.section s3uob
-.section s3upa
-.section s3upb
-.section s3uqa
-.section s3uqb
-.section s3ura
-.section s3urb
-.section s3usa
-.section s3usb
-.section s3uta
-.section s3utb
-.section s3uua
-.section s3uub
-.section s3uva
-.section s3uvb
-.section s3uwa
-.section s3uwb
-.section s3uxa
-.section s3uxb
-.section s3uya
-.section s3uyb
-.section s3uza
-.section s3uzb
-.section s3u1a
-.section s3u1b
-.section s3u2a
-.section s3u2b
-.section s3u3a
-.section s3u3b
-.section s3u4a
-.section s3u4b
-.section s3u5a
-.section s3u5b
-.section s3u6a
-.section s3u6b
-.section s3u7a
-.section s3u7b
-.section s3u8a
-.section s3u8b
-.section s3u9a
-.section s3u9b
-.section s3u0a
-.section s3u0b
-.section s3vaa
-.section s3vab
-.section s3vba
-.section s3vbb
-.section s3vca
-.section s3vcb
-.section s3vda
-.section s3vdb
-.section s3vea
-.section s3veb
-.section s3vfa
-.section s3vfb
-.section s3vga
-.section s3vgb
-.section s3vha
-.section s3vhb
-.section s3via
-.section s3vib
-.section s3vja
-.section s3vjb
-.section s3vka
-.section s3vkb
-.section s3vla
-.section s3vlb
-.section s3vma
-.section s3vmb
-.section s3vna
-.section s3vnb
-.section s3voa
-.section s3vob
-.section s3vpa
-.section s3vpb
-.section s3vqa
-.section s3vqb
-.section s3vra
-.section s3vrb
-.section s3vsa
-.section s3vsb
-.section s3vta
-.section s3vtb
-.section s3vua
-.section s3vub
-.section s3vva
-.section s3vvb
-.section s3vwa
-.section s3vwb
-.section s3vxa
-.section s3vxb
-.section s3vya
-.section s3vyb
-.section s3vza
-.section s3vzb
-.section s3v1a
-.section s3v1b
-.section s3v2a
-.section s3v2b
-.section s3v3a
-.section s3v3b
-.section s3v4a
-.section s3v4b
-.section s3v5a
-.section s3v5b
-.section s3v6a
-.section s3v6b
-.section s3v7a
-.section s3v7b
-.section s3v8a
-.section s3v8b
-.section s3v9a
-.section s3v9b
-.section s3v0a
-.section s3v0b
-.section s3waa
-.section s3wab
-.section s3wba
-.section s3wbb
-.section s3wca
-.section s3wcb
-.section s3wda
-.section s3wdb
-.section s3wea
-.section s3web
-.section s3wfa
-.section s3wfb
-.section s3wga
-.section s3wgb
-.section s3wha
-.section s3whb
-.section s3wia
-.section s3wib
-.section s3wja
-.section s3wjb
-.section s3wka
-.section s3wkb
-.section s3wla
-.section s3wlb
-.section s3wma
-.section s3wmb
-.section s3wna
-.section s3wnb
-.section s3woa
-.section s3wob
-.section s3wpa
-.section s3wpb
-.section s3wqa
-.section s3wqb
-.section s3wra
-.section s3wrb
-.section s3wsa
-.section s3wsb
-.section s3wta
-.section s3wtb
-.section s3wua
-.section s3wub
-.section s3wva
-.section s3wvb
-.section s3wwa
-.section s3wwb
-.section s3wxa
-.section s3wxb
-.section s3wya
-.section s3wyb
-.section s3wza
-.section s3wzb
-.section s3w1a
-.section s3w1b
-.section s3w2a
-.section s3w2b
-.section s3w3a
-.section s3w3b
-.section s3w4a
-.section s3w4b
-.section s3w5a
-.section s3w5b
-.section s3w6a
-.section s3w6b
-.section s3w7a
-.section s3w7b
-.section s3w8a
-.section s3w8b
-.section s3w9a
-.section s3w9b
-.section s3w0a
-.section s3w0b
-.section s3xaa
-.section s3xab
-.section s3xba
-.section s3xbb
-.section s3xca
-.section s3xcb
-.section s3xda
-.section s3xdb
-.section s3xea
-.section s3xeb
-.section s3xfa
-.section s3xfb
-.section s3xga
-.section s3xgb
-.section s3xha
-.section s3xhb
-.section s3xia
-.section s3xib
-.section s3xja
-.section s3xjb
-.section s3xka
-.section s3xkb
-.section s3xla
-.section s3xlb
-.section s3xma
-.section s3xmb
-.section s3xna
-.section s3xnb
-.section s3xoa
-.section s3xob
-.section s3xpa
-.section s3xpb
-.section s3xqa
-.section s3xqb
-.section s3xra
-.section s3xrb
-.section s3xsa
-.section s3xsb
-.section s3xta
-.section s3xtb
-.section s3xua
-.section s3xub
-.section s3xva
-.section s3xvb
-.section s3xwa
-.section s3xwb
-.section s3xxa
-.section s3xxb
-.section s3xya
-.section s3xyb
-.section s3xza
-.section s3xzb
-.section s3x1a
-.section s3x1b
-.section s3x2a
-.section s3x2b
-.section s3x3a
-.section s3x3b
-.section s3x4a
-.section s3x4b
-.section s3x5a
-.section s3x5b
-.section s3x6a
-.section s3x6b
-.section s3x7a
-.section s3x7b
-.section s3x8a
-.section s3x8b
-.section s3x9a
-.section s3x9b
-.section s3x0a
-.section s3x0b
-.section s3yaa
-.section s3yab
-.section s3yba
-.section s3ybb
-.section s3yca
-.section s3ycb
-.section s3yda
-.section s3ydb
-.section s3yea
-.section s3yeb
-.section s3yfa
-.section s3yfb
-.section s3yga
-.section s3ygb
-.section s3yha
-.section s3yhb
-.section s3yia
-.section s3yib
-.section s3yja
-.section s3yjb
-.section s3yka
-.section s3ykb
-.section s3yla
-.section s3ylb
-.section s3yma
-.section s3ymb
-.section s3yna
-.section s3ynb
-.section s3yoa
-.section s3yob
-.section s3ypa
-.section s3ypb
-.section s3yqa
-.section s3yqb
-.section s3yra
-.section s3yrb
-.section s3ysa
-.section s3ysb
-.section s3yta
-.section s3ytb
-.section s3yua
-.section s3yub
-.section s3yva
-.section s3yvb
-.section s3ywa
-.section s3ywb
-.section s3yxa
-.section s3yxb
-.section s3yya
-.section s3yyb
-.section s3yza
-.section s3yzb
-.section s3y1a
-.section s3y1b
-.section s3y2a
-.section s3y2b
-.section s3y3a
-.section s3y3b
-.section s3y4a
-.section s3y4b
-.section s3y5a
-.section s3y5b
-.section s3y6a
-.section s3y6b
-.section s3y7a
-.section s3y7b
-.section s3y8a
-.section s3y8b
-.section s3y9a
-.section s3y9b
-.section s3y0a
-.section s3y0b
-.section s3zaa
-.section s3zab
-.section s3zba
-.section s3zbb
-.section s3zca
-.section s3zcb
-.section s3zda
-.section s3zdb
-.section s3zea
-.section s3zeb
-.section s3zfa
-.section s3zfb
-.section s3zga
-.section s3zgb
-.section s3zha
-.section s3zhb
-.section s3zia
-.section s3zib
-.section s3zja
-.section s3zjb
-.section s3zka
-.section s3zkb
-.section s3zla
-.section s3zlb
-.section s3zma
-.section s3zmb
-.section s3zna
-.section s3znb
-.section s3zoa
-.section s3zob
-.section s3zpa
-.section s3zpb
-.section s3zqa
-.section s3zqb
-.section s3zra
-.section s3zrb
-.section s3zsa
-.section s3zsb
-.section s3zta
-.section s3ztb
-.section s3zua
-.section s3zub
-.section s3zva
-.section s3zvb
-.section s3zwa
-.section s3zwb
-.section s3zxa
-.section s3zxb
-.section s3zya
-.section s3zyb
-.section s3zza
-.section s3zzb
-.section s3z1a
-.section s3z1b
-.section s3z2a
-.section s3z2b
-.section s3z3a
-.section s3z3b
-.section s3z4a
-.section s3z4b
-.section s3z5a
-.section s3z5b
-.section s3z6a
-.section s3z6b
-.section s3z7a
-.section s3z7b
-.section s3z8a
-.section s3z8b
-.section s3z9a
-.section s3z9b
-.section s3z0a
-.section s3z0b
-.section s31aa
-.section s31ab
-.section s31ba
-.section s31bb
-.section s31ca
-.section s31cb
-.section s31da
-.section s31db
-.section s31ea
-.section s31eb
-.section s31fa
-.section s31fb
-.section s31ga
-.section s31gb
-.section s31ha
-.section s31hb
-.section s31ia
-.section s31ib
-.section s31ja
-.section s31jb
-.section s31ka
-.section s31kb
-.section s31la
-.section s31lb
-.section s31ma
-.section s31mb
-.section s31na
-.section s31nb
-.section s31oa
-.section s31ob
-.section s31pa
-.section s31pb
-.section s31qa
-.section s31qb
-.section s31ra
-.section s31rb
-.section s31sa
-.section s31sb
-.section s31ta
-.section s31tb
-.section s31ua
-.section s31ub
-.section s31va
-.section s31vb
-.section s31wa
-.section s31wb
-.section s31xa
-.section s31xb
-.section s31ya
-.section s31yb
-.section s31za
-.section s31zb
-.section s311a
-.section s311b
-.section s312a
-.section s312b
-.section s313a
-.section s313b
-.section s314a
-.section s314b
-.section s315a
-.section s315b
-.section s316a
-.section s316b
-.section s317a
-.section s317b
-.section s318a
-.section s318b
-.section s319a
-.section s319b
-.section s310a
-.section s310b
-.section s32aa
-.section s32ab
-.section s32ba
-.section s32bb
-.section s32ca
-.section s32cb
-.section s32da
-.section s32db
-.section s32ea
-.section s32eb
-.section s32fa
-.section s32fb
-.section s32ga
-.section s32gb
-.section s32ha
-.section s32hb
-.section s32ia
-.section s32ib
-.section s32ja
-.section s32jb
-.section s32ka
-.section s32kb
-.section s32la
-.section s32lb
-.section s32ma
-.section s32mb
-.section s32na
-.section s32nb
-.section s32oa
-.section s32ob
-.section s32pa
-.section s32pb
-.section s32qa
-.section s32qb
-.section s32ra
-.section s32rb
-.section s32sa
-.section s32sb
-.section s32ta
-.section s32tb
-.section s32ua
-.section s32ub
-.section s32va
-.section s32vb
-.section s32wa
-.section s32wb
-.section s32xa
-.section s32xb
-.section s32ya
-.section s32yb
-.section s32za
-.section s32zb
-.section s321a
-.section s321b
-.section s322a
-.section s322b
-.section s323a
-.section s323b
-.section s324a
-.section s324b
-.section s325a
-.section s325b
-.section s326a
-.section s326b
-.section s327a
-.section s327b
-.section s328a
-.section s328b
-.section s329a
-.section s329b
-.section s320a
-.section s320b
-.section s33aa
-.section s33ab
-.section s33ba
-.section s33bb
-.section s33ca
-.section s33cb
-.section s33da
-.section s33db
-.section s33ea
-.section s33eb
-.section s33fa
-.section s33fb
-.section s33ga
-.section s33gb
-.section s33ha
-.section s33hb
-.section s33ia
-.section s33ib
-.section s33ja
-.section s33jb
-.section s33ka
-.section s33kb
-.section s33la
-.section s33lb
-.section s33ma
-.section s33mb
-.section s33na
-.section s33nb
-.section s33oa
-.section s33ob
-.section s33pa
-.section s33pb
-.section s33qa
-.section s33qb
-.section s33ra
-.section s33rb
-.section s33sa
-.section s33sb
-.section s33ta
-.section s33tb
-.section s33ua
-.section s33ub
-.section s33va
-.section s33vb
-.section s33wa
-.section s33wb
-.section s33xa
-.section s33xb
-.section s33ya
-.section s33yb
-.section s33za
-.section s33zb
-.section s331a
-.section s331b
-.section s332a
-.section s332b
-.section s333a
-.section s333b
-.section s334a
-.section s334b
-.section s335a
-.section s335b
-.section s336a
-.section s336b
-.section s337a
-.section s337b
-.section s338a
-.section s338b
-.section s339a
-.section s339b
-.section s330a
-.section s330b
-.section s34aa
-.section s34ab
-.section s34ba
-.section s34bb
-.section s34ca
-.section s34cb
-.section s34da
-.section s34db
-.section s34ea
-.section s34eb
-.section s34fa
-.section s34fb
-.section s34ga
-.section s34gb
-.section s34ha
-.section s34hb
-.section s34ia
-.section s34ib
-.section s34ja
-.section s34jb
-.section s34ka
-.section s34kb
-.section s34la
-.section s34lb
-.section s34ma
-.section s34mb
-.section s34na
-.section s34nb
-.section s34oa
-.section s34ob
-.section s34pa
-.section s34pb
-.section s34qa
-.section s34qb
-.section s34ra
-.section s34rb
-.section s34sa
-.section s34sb
-.section s34ta
-.section s34tb
-.section s34ua
-.section s34ub
-.section s34va
-.section s34vb
-.section s34wa
-.section s34wb
-.section s34xa
-.section s34xb
-.section s34ya
-.section s34yb
-.section s34za
-.section s34zb
-.section s341a
-.section s341b
-.section s342a
-.section s342b
-.section s343a
-.section s343b
-.section s344a
-.section s344b
-.section s345a
-.section s345b
-.section s346a
-.section s346b
-.section s347a
-.section s347b
-.section s348a
-.section s348b
-.section s349a
-.section s349b
-.section s340a
-.section s340b
-.section s35aa
-.section s35ab
-.section s35ba
-.section s35bb
-.section s35ca
-.section s35cb
-.section s35da
-.section s35db
-.section s35ea
-.section s35eb
-.section s35fa
-.section s35fb
-.section s35ga
-.section s35gb
-.section s35ha
-.section s35hb
-.section s35ia
-.section s35ib
-.section s35ja
-.section s35jb
-.section s35ka
-.section s35kb
-.section s35la
-.section s35lb
-.section s35ma
-.section s35mb
-.section s35na
-.section s35nb
-.section s35oa
-.section s35ob
-.section s35pa
-.section s35pb
-.section s35qa
-.section s35qb
-.section s35ra
-.section s35rb
-.section s35sa
-.section s35sb
-.section s35ta
-.section s35tb
-.section s35ua
-.section s35ub
-.section s35va
-.section s35vb
-.section s35wa
-.section s35wb
-.section s35xa
-.section s35xb
-.section s35ya
-.section s35yb
-.section s35za
-.section s35zb
-.section s351a
-.section s351b
-.section s352a
-.section s352b
-.section s353a
-.section s353b
-.section s354a
-.section s354b
-.section s355a
-.section s355b
-.section s356a
-.section s356b
-.section s357a
-.section s357b
-.section s358a
-.section s358b
-.section s359a
-.section s359b
-.section s350a
-.section s350b
-.section s36aa
-.section s36ab
-.section s36ba
-.section s36bb
-.section s36ca
-.section s36cb
-.section s36da
-.section s36db
-.section s36ea
-.section s36eb
-.section s36fa
-.section s36fb
-.section s36ga
-.section s36gb
-.section s36ha
-.section s36hb
-.section s36ia
-.section s36ib
-.section s36ja
-.section s36jb
-.section s36ka
-.section s36kb
-.section s36la
-.section s36lb
-.section s36ma
-.section s36mb
-.section s36na
-.section s36nb
-.section s36oa
-.section s36ob
-.section s36pa
-.section s36pb
-.section s36qa
-.section s36qb
-.section s36ra
-.section s36rb
-.section s36sa
-.section s36sb
-.section s36ta
-.section s36tb
-.section s36ua
-.section s36ub
-.section s36va
-.section s36vb
-.section s36wa
-.section s36wb
-.section s36xa
-.section s36xb
-.section s36ya
-.section s36yb
-.section s36za
-.section s36zb
-.section s361a
-.section s361b
-.section s362a
-.section s362b
-.section s363a
-.section s363b
-.section s364a
-.section s364b
-.section s365a
-.section s365b
-.section s366a
-.section s366b
-.section s367a
-.section s367b
-.section s368a
-.section s368b
-.section s369a
-.section s369b
-.section s360a
-.section s360b
-.section s37aa
-.section s37ab
-.section s37ba
-.section s37bb
-.section s37ca
-.section s37cb
-.section s37da
-.section s37db
-.section s37ea
-.section s37eb
-.section s37fa
-.section s37fb
-.section s37ga
-.section s37gb
-.section s37ha
-.section s37hb
-.section s37ia
-.section s37ib
-.section s37ja
-.section s37jb
-.section s37ka
-.section s37kb
-.section s37la
-.section s37lb
-.section s37ma
-.section s37mb
-.section s37na
-.section s37nb
-.section s37oa
-.section s37ob
-.section s37pa
-.section s37pb
-.section s37qa
-.section s37qb
-.section s37ra
-.section s37rb
-.section s37sa
-.section s37sb
-.section s37ta
-.section s37tb
-.section s37ua
-.section s37ub
-.section s37va
-.section s37vb
-.section s37wa
-.section s37wb
-.section s37xa
-.section s37xb
-.section s37ya
-.section s37yb
-.section s37za
-.section s37zb
-.section s371a
-.section s371b
-.section s372a
-.section s372b
-.section s373a
-.section s373b
-.section s374a
-.section s374b
-.section s375a
-.section s375b
-.section s376a
-.section s376b
-.section s377a
-.section s377b
-.section s378a
-.section s378b
-.section s379a
-.section s379b
-.section s370a
-.section s370b
-.section s38aa
-.section s38ab
-.section s38ba
-.section s38bb
-.section s38ca
-.section s38cb
-.section s38da
-.section s38db
-.section s38ea
-.section s38eb
-.section s38fa
-.section s38fb
-.section s38ga
-.section s38gb
-.section s38ha
-.section s38hb
-.section s38ia
-.section s38ib
-.section s38ja
-.section s38jb
-.section s38ka
-.section s38kb
-.section s38la
-.section s38lb
-.section s38ma
-.section s38mb
-.section s38na
-.section s38nb
-.section s38oa
-.section s38ob
-.section s38pa
-.section s38pb
-.section s38qa
-.section s38qb
-.section s38ra
-.section s38rb
-.section s38sa
-.section s38sb
-.section s38ta
-.section s38tb
-.section s38ua
-.section s38ub
-.section s38va
-.section s38vb
-.section s38wa
-.section s38wb
-.section s38xa
-.section s38xb
-.section s38ya
-.section s38yb
-.section s38za
-.section s38zb
-.section s381a
-.section s381b
-.section s382a
-.section s382b
-.section s383a
-.section s383b
-.section s384a
-.section s384b
-.section s385a
-.section s385b
-.section s386a
-.section s386b
-.section s387a
-.section s387b
-.section s388a
-.section s388b
-.section s389a
-.section s389b
-.section s380a
-.section s380b
-.section s39aa
-.section s39ab
-.section s39ba
-.section s39bb
-.section s39ca
-.section s39cb
-.section s39da
-.section s39db
-.section s39ea
-.section s39eb
-.section s39fa
-.section s39fb
-.section s39ga
-.section s39gb
-.section s39ha
-.section s39hb
-.section s39ia
-.section s39ib
-.section s39ja
-.section s39jb
-.section s39ka
-.section s39kb
-.section s39la
-.section s39lb
-.section s39ma
-.section s39mb
-.section s39na
-.section s39nb
-.section s39oa
-.section s39ob
-.section s39pa
-.section s39pb
-.section s39qa
-.section s39qb
-.section s39ra
-.section s39rb
-.section s39sa
-.section s39sb
-.section s39ta
-.section s39tb
-.section s39ua
-.section s39ub
-.section s39va
-.section s39vb
-.section s39wa
-.section s39wb
-.section s39xa
-.section s39xb
-.section s39ya
-.section s39yb
-.section s39za
-.section s39zb
-.section s391a
-.section s391b
-.section s392a
-.section s392b
-.section s393a
-.section s393b
-.section s394a
-.section s394b
-.section s395a
-.section s395b
-.section s396a
-.section s396b
-.section s397a
-.section s397b
-.section s398a
-.section s398b
-.section s399a
-.section s399b
-.section s390a
-.section s390b
-.section s30aa
-.section s30ab
-.section s30ba
-.section s30bb
-.section s30ca
-.section s30cb
-.section s30da
-.section s30db
-.section s30ea
-.section s30eb
-.section s30fa
-.section s30fb
-.section s30ga
-.section s30gb
-.section s30ha
-.section s30hb
-.section s30ia
-.section s30ib
-.section s30ja
-.section s30jb
-.section s30ka
-.section s30kb
-.section s30la
-.section s30lb
-.section s30ma
-.section s30mb
-.section s30na
-.section s30nb
-.section s30oa
-.section s30ob
-.section s30pa
-.section s30pb
-.section s30qa
-.section s30qb
-.section s30ra
-.section s30rb
-.section s30sa
-.section s30sb
-.section s30ta
-.section s30tb
-.section s30ua
-.section s30ub
-.section s30va
-.section s30vb
-.section s30wa
-.section s30wb
-.section s30xa
-.section s30xb
-.section s30ya
-.section s30yb
-.section s30za
-.section s30zb
-.section s301a
-.section s301b
-.section s302a
-.section s302b
-.section s303a
-.section s303b
-.section s304a
-.section s304b
-.section s305a
-.section s305b
-.section s306a
-.section s306b
-.section s307a
-.section s307b
-.section s308a
-.section s308b
-.section s309a
-.section s309b
-.section s300a
-.section s300b
-.section s4aaa
-.section s4aab
-.section s4aba
-.section s4abb
-.section s4aca
-.section s4acb
-.section s4ada
-.section s4adb
-.section s4aea
-.section s4aeb
-.section s4afa
-.section s4afb
-.section s4aga
-.section s4agb
-.section s4aha
-.section s4ahb
-.section s4aia
-.section s4aib
-.section s4aja
-.section s4ajb
-.section s4aka
-.section s4akb
-.section s4ala
-.section s4alb
-.section s4ama
-.section s4amb
-.section s4ana
-.section s4anb
-.section s4aoa
-.section s4aob
-.section s4apa
-.section s4apb
-.section s4aqa
-.section s4aqb
-.section s4ara
-.section s4arb
-.section s4asa
-.section s4asb
-.section s4ata
-.section s4atb
-.section s4aua
-.section s4aub
-.section s4ava
-.section s4avb
-.section s4awa
-.section s4awb
-.section s4axa
-.section s4axb
-.section s4aya
-.section s4ayb
-.section s4aza
-.section s4azb
-.section s4a1a
-.section s4a1b
-.section s4a2a
-.section s4a2b
-.section s4a3a
-.section s4a3b
-.section s4a4a
-.section s4a4b
-.section s4a5a
-.section s4a5b
-.section s4a6a
-.section s4a6b
-.section s4a7a
-.section s4a7b
-.section s4a8a
-.section s4a8b
-.section s4a9a
-.section s4a9b
-.section s4a0a
-.section s4a0b
-.section s4baa
-.section s4bab
-.section s4bba
-.section s4bbb
-.section s4bca
-.section s4bcb
-.section s4bda
-.section s4bdb
-.section s4bea
-.section s4beb
-.section s4bfa
-.section s4bfb
-.section s4bga
-.section s4bgb
-.section s4bha
-.section s4bhb
-.section s4bia
-.section s4bib
-.section s4bja
-.section s4bjb
-.section s4bka
-.section s4bkb
-.section s4bla
-.section s4blb
-.section s4bma
-.section s4bmb
-.section s4bna
-.section s4bnb
-.section s4boa
-.section s4bob
-.section s4bpa
-.section s4bpb
-.section s4bqa
-.section s4bqb
-.section s4bra
-.section s4brb
-.section s4bsa
-.section s4bsb
-.section s4bta
-.section s4btb
-.section s4bua
-.section s4bub
-.section s4bva
-.section s4bvb
-.section s4bwa
-.section s4bwb
-.section s4bxa
-.section s4bxb
-.section s4bya
-.section s4byb
-.section s4bza
-.section s4bzb
-.section s4b1a
-.section s4b1b
-.section s4b2a
-.section s4b2b
-.section s4b3a
-.section s4b3b
-.section s4b4a
-.section s4b4b
-.section s4b5a
-.section s4b5b
-.section s4b6a
-.section s4b6b
-.section s4b7a
-.section s4b7b
-.section s4b8a
-.section s4b8b
-.section s4b9a
-.section s4b9b
-.section s4b0a
-.section s4b0b
-.section s4caa
-.section s4cab
-.section s4cba
-.section s4cbb
-.section s4cca
-.section s4ccb
-.section s4cda
-.section s4cdb
-.section s4cea
-.section s4ceb
-.section s4cfa
-.section s4cfb
-.section s4cga
-.section s4cgb
-.section s4cha
-.section s4chb
-.section s4cia
-.section s4cib
-.section s4cja
-.section s4cjb
-.section s4cka
-.section s4ckb
-.section s4cla
-.section s4clb
-.section s4cma
-.section s4cmb
-.section s4cna
-.section s4cnb
-.section s4coa
-.section s4cob
-.section s4cpa
-.section s4cpb
-.section s4cqa
-.section s4cqb
-.section s4cra
-.section s4crb
-.section s4csa
-.section s4csb
-.section s4cta
-.section s4ctb
-.section s4cua
-.section s4cub
-.section s4cva
-.section s4cvb
-.section s4cwa
-.section s4cwb
-.section s4cxa
-.section s4cxb
-.section s4cya
-.section s4cyb
-.section s4cza
-.section s4czb
-.section s4c1a
-.section s4c1b
-.section s4c2a
-.section s4c2b
-.section s4c3a
-.section s4c3b
-.section s4c4a
-.section s4c4b
-.section s4c5a
-.section s4c5b
-.section s4c6a
-.section s4c6b
-.section s4c7a
-.section s4c7b
-.section s4c8a
-.section s4c8b
-.section s4c9a
-.section s4c9b
-.section s4c0a
-.section s4c0b
-.section s4daa
-.section s4dab
-.section s4dba
-.section s4dbb
-.section s4dca
-.section s4dcb
-.section s4dda
-.section s4ddb
-.section s4dea
-.section s4deb
-.section s4dfa
-.section s4dfb
-.section s4dga
-.section s4dgb
-.section s4dha
-.section s4dhb
-.section s4dia
-.section s4dib
-.section s4dja
-.section s4djb
-.section s4dka
-.section s4dkb
-.section s4dla
-.section s4dlb
-.section s4dma
-.section s4dmb
-.section s4dna
-.section s4dnb
-.section s4doa
-.section s4dob
-.section s4dpa
-.section s4dpb
-.section s4dqa
-.section s4dqb
-.section s4dra
-.section s4drb
-.section s4dsa
-.section s4dsb
-.section s4dta
-.section s4dtb
-.section s4dua
-.section s4dub
-.section s4dva
-.section s4dvb
-.section s4dwa
-.section s4dwb
-.section s4dxa
-.section s4dxb
-.section s4dya
-.section s4dyb
-.section s4dza
-.section s4dzb
-.section s4d1a
-.section s4d1b
-.section s4d2a
-.section s4d2b
-.section s4d3a
-.section s4d3b
-.section s4d4a
-.section s4d4b
-.section s4d5a
-.section s4d5b
-.section s4d6a
-.section s4d6b
-.section s4d7a
-.section s4d7b
-.section s4d8a
-.section s4d8b
-.section s4d9a
-.section s4d9b
-.section s4d0a
-.section s4d0b
-.section s4eaa
-.section s4eab
-.section s4eba
-.section s4ebb
-.section s4eca
-.section s4ecb
-.section s4eda
-.section s4edb
-.section s4eea
-.section s4eeb
-.section s4efa
-.section s4efb
-.section s4ega
-.section s4egb
-.section s4eha
-.section s4ehb
-.section s4eia
-.section s4eib
-.section s4eja
-.section s4ejb
-.section s4eka
-.section s4ekb
-.section s4ela
-.section s4elb
-.section s4ema
-.section s4emb
-.section s4ena
-.section s4enb
-.section s4eoa
-.section s4eob
-.section s4epa
-.section s4epb
-.section s4eqa
-.section s4eqb
-.section s4era
-.section s4erb
-.section s4esa
-.section s4esb
-.section s4eta
-.section s4etb
-.section s4eua
-.section s4eub
-.section s4eva
-.section s4evb
-.section s4ewa
-.section s4ewb
-.section s4exa
-.section s4exb
-.section s4eya
-.section s4eyb
-.section s4eza
-.section s4ezb
-.section s4e1a
-.section s4e1b
-.section s4e2a
-.section s4e2b
-.section s4e3a
-.section s4e3b
-.section s4e4a
-.section s4e4b
-.section s4e5a
-.section s4e5b
-.section s4e6a
-.section s4e6b
-.section s4e7a
-.section s4e7b
-.section s4e8a
-.section s4e8b
-.section s4e9a
-.section s4e9b
-.section s4e0a
-.section s4e0b
-.section s4faa
-.section s4fab
-.section s4fba
-.section s4fbb
-.section s4fca
-.section s4fcb
-.section s4fda
-.section s4fdb
-.section s4fea
-.section s4feb
-.section s4ffa
-.section s4ffb
-.section s4fga
-.section s4fgb
-.section s4fha
-.section s4fhb
-.section s4fia
-.section s4fib
-.section s4fja
-.section s4fjb
-.section s4fka
-.section s4fkb
-.section s4fla
-.section s4flb
-.section s4fma
-.section s4fmb
-.section s4fna
-.section s4fnb
-.section s4foa
-.section s4fob
-.section s4fpa
-.section s4fpb
-.section s4fqa
-.section s4fqb
-.section s4fra
-.section s4frb
-.section s4fsa
-.section s4fsb
-.section s4fta
-.section s4ftb
-.section s4fua
-.section s4fub
-.section s4fva
-.section s4fvb
-.section s4fwa
-.section s4fwb
-.section s4fxa
-.section s4fxb
-.section s4fya
-.section s4fyb
-.section s4fza
-.section s4fzb
-.section s4f1a
-.section s4f1b
-.section s4f2a
-.section s4f2b
-.section s4f3a
-.section s4f3b
-.section s4f4a
-.section s4f4b
-.section s4f5a
-.section s4f5b
-.section s4f6a
-.section s4f6b
-.section s4f7a
-.section s4f7b
-.section s4f8a
-.section s4f8b
-.section s4f9a
-.section s4f9b
-.section s4f0a
-.section s4f0b
-.section s4gaa
-.section s4gab
-.section s4gba
-.section s4gbb
-.section s4gca
-.section s4gcb
-.section s4gda
-.section s4gdb
-.section s4gea
-.section s4geb
-.section s4gfa
-.section s4gfb
-.section s4gga
-.section s4ggb
-.section s4gha
-.section s4ghb
-.section s4gia
-.section s4gib
-.section s4gja
-.section s4gjb
-.section s4gka
-.section s4gkb
-.section s4gla
-.section s4glb
-.section s4gma
-.section s4gmb
-.section s4gna
-.section s4gnb
-.section s4goa
-.section s4gob
-.section s4gpa
-.section s4gpb
-.section s4gqa
-.section s4gqb
-.section s4gra
-.section s4grb
-.section s4gsa
-.section s4gsb
-.section s4gta
-.section s4gtb
-.section s4gua
-.section s4gub
-.section s4gva
-.section s4gvb
-.section s4gwa
-.section s4gwb
-.section s4gxa
-.section s4gxb
-.section s4gya
-.section s4gyb
-.section s4gza
-.section s4gzb
-.section s4g1a
-.section s4g1b
-.section s4g2a
-.section s4g2b
-.section s4g3a
-.section s4g3b
-.section s4g4a
-.section s4g4b
-.section s4g5a
-.section s4g5b
-.section s4g6a
-.section s4g6b
-.section s4g7a
-.section s4g7b
-.section s4g8a
-.section s4g8b
-.section s4g9a
-.section s4g9b
-.section s4g0a
-.section s4g0b
-.section s4haa
-.section s4hab
-.section s4hba
-.section s4hbb
-.section s4hca
-.section s4hcb
-.section s4hda
-.section s4hdb
-.section s4hea
-.section s4heb
-.section s4hfa
-.section s4hfb
-.section s4hga
-.section s4hgb
-.section s4hha
-.section s4hhb
-.section s4hia
-.section s4hib
-.section s4hja
-.section s4hjb
-.section s4hka
-.section s4hkb
-.section s4hla
-.section s4hlb
-.section s4hma
-.section s4hmb
-.section s4hna
-.section s4hnb
-.section s4hoa
-.section s4hob
-.section s4hpa
-.section s4hpb
-.section s4hqa
-.section s4hqb
-.section s4hra
-.section s4hrb
-.section s4hsa
-.section s4hsb
-.section s4hta
-.section s4htb
-.section s4hua
-.section s4hub
-.section s4hva
-.section s4hvb
-.section s4hwa
-.section s4hwb
-.section s4hxa
-.section s4hxb
-.section s4hya
-.section s4hyb
-.section s4hza
-.section s4hzb
-.section s4h1a
-.section s4h1b
-.section s4h2a
-.section s4h2b
-.section s4h3a
-.section s4h3b
-.section s4h4a
-.section s4h4b
-.section s4h5a
-.section s4h5b
-.section s4h6a
-.section s4h6b
-.section s4h7a
-.section s4h7b
-.section s4h8a
-.section s4h8b
-.section s4h9a
-.section s4h9b
-.section s4h0a
-.section s4h0b
-.section s4iaa
-.section s4iab
-.section s4iba
-.section s4ibb
-.section s4ica
-.section s4icb
-.section s4ida
-.section s4idb
-.section s4iea
-.section s4ieb
-.section s4ifa
-.section s4ifb
-.section s4iga
-.section s4igb
-.section s4iha
-.section s4ihb
-.section s4iia
-.section s4iib
-.section s4ija
-.section s4ijb
-.section s4ika
-.section s4ikb
-.section s4ila
-.section s4ilb
-.section s4ima
-.section s4imb
-.section s4ina
-.section s4inb
-.section s4ioa
-.section s4iob
-.section s4ipa
-.section s4ipb
-.section s4iqa
-.section s4iqb
-.section s4ira
-.section s4irb
-.section s4isa
-.section s4isb
-.section s4ita
-.section s4itb
-.section s4iua
-.section s4iub
-.section s4iva
-.section s4ivb
-.section s4iwa
-.section s4iwb
-.section s4ixa
-.section s4ixb
-.section s4iya
-.section s4iyb
-.section s4iza
-.section s4izb
-.section s4i1a
-.section s4i1b
-.section s4i2a
-.section s4i2b
-.section s4i3a
-.section s4i3b
-.section s4i4a
-.section s4i4b
-.section s4i5a
-.section s4i5b
-.section s4i6a
-.section s4i6b
-.section s4i7a
-.section s4i7b
-.section s4i8a
-.section s4i8b
-.section s4i9a
-.section s4i9b
-.section s4i0a
-.section s4i0b
-.section s4jaa
-.section s4jab
-.section s4jba
-.section s4jbb
-.section s4jca
-.section s4jcb
-.section s4jda
-.section s4jdb
-.section s4jea
-.section s4jeb
-.section s4jfa
-.section s4jfb
-.section s4jga
-.section s4jgb
-.section s4jha
-.section s4jhb
-.section s4jia
-.section s4jib
-.section s4jja
-.section s4jjb
-.section s4jka
-.section s4jkb
-.section s4jla
-.section s4jlb
-.section s4jma
-.section s4jmb
-.section s4jna
-.section s4jnb
-.section s4joa
-.section s4job
-.section s4jpa
-.section s4jpb
-.section s4jqa
-.section s4jqb
-.section s4jra
-.section s4jrb
-.section s4jsa
-.section s4jsb
-.section s4jta
-.section s4jtb
-.section s4jua
-.section s4jub
-.section s4jva
-.section s4jvb
-.section s4jwa
-.section s4jwb
-.section s4jxa
-.section s4jxb
-.section s4jya
-.section s4jyb
-.section s4jza
-.section s4jzb
-.section s4j1a
-.section s4j1b
-.section s4j2a
-.section s4j2b
-.section s4j3a
-.section s4j3b
-.section s4j4a
-.section s4j4b
-.section s4j5a
-.section s4j5b
-.section s4j6a
-.section s4j6b
-.section s4j7a
-.section s4j7b
-.section s4j8a
-.section s4j8b
-.section s4j9a
-.section s4j9b
-.section s4j0a
-.section s4j0b
-.section s4kaa
-.section s4kab
-.section s4kba
-.section s4kbb
-.section s4kca
-.section s4kcb
-.section s4kda
-.section s4kdb
-.section s4kea
-.section s4keb
-.section s4kfa
-.section s4kfb
-.section s4kga
-.section s4kgb
-.section s4kha
-.section s4khb
-.section s4kia
-.section s4kib
-.section s4kja
-.section s4kjb
-.section s4kka
-.section s4kkb
-.section s4kla
-.section s4klb
-.section s4kma
-.section s4kmb
-.section s4kna
-.section s4knb
-.section s4koa
-.section s4kob
-.section s4kpa
-.section s4kpb
-.section s4kqa
-.section s4kqb
-.section s4kra
-.section s4krb
-.section s4ksa
-.section s4ksb
-.section s4kta
-.section s4ktb
-.section s4kua
-.section s4kub
-.section s4kva
-.section s4kvb
-.section s4kwa
-.section s4kwb
-.section s4kxa
-.section s4kxb
-.section s4kya
-.section s4kyb
-.section s4kza
-.section s4kzb
-.section s4k1a
-.section s4k1b
-.section s4k2a
-.section s4k2b
-.section s4k3a
-.section s4k3b
-.section s4k4a
-.section s4k4b
-.section s4k5a
-.section s4k5b
-.section s4k6a
-.section s4k6b
-.section s4k7a
-.section s4k7b
-.section s4k8a
-.section s4k8b
-.section s4k9a
-.section s4k9b
-.section s4k0a
-.section s4k0b
-.section s4laa
-.section s4lab
-.section s4lba
-.section s4lbb
-.section s4lca
-.section s4lcb
-.section s4lda
-.section s4ldb
-.section s4lea
-.section s4leb
-.section s4lfa
-.section s4lfb
-.section s4lga
-.section s4lgb
-.section s4lha
-.section s4lhb
-.section s4lia
-.section s4lib
-.section s4lja
-.section s4ljb
-.section s4lka
-.section s4lkb
-.section s4lla
-.section s4llb
-.section s4lma
-.section s4lmb
-.section s4lna
-.section s4lnb
-.section s4loa
-.section s4lob
-.section s4lpa
-.section s4lpb
-.section s4lqa
-.section s4lqb
-.section s4lra
-.section s4lrb
-.section s4lsa
-.section s4lsb
-.section s4lta
-.section s4ltb
-.section s4lua
-.section s4lub
-.section s4lva
-.section s4lvb
-.section s4lwa
-.section s4lwb
-.section s4lxa
-.section s4lxb
-.section s4lya
-.section s4lyb
-.section s4lza
-.section s4lzb
-.section s4l1a
-.section s4l1b
-.section s4l2a
-.section s4l2b
-.section s4l3a
-.section s4l3b
-.section s4l4a
-.section s4l4b
-.section s4l5a
-.section s4l5b
-.section s4l6a
-.section s4l6b
-.section s4l7a
-.section s4l7b
-.section s4l8a
-.section s4l8b
-.section s4l9a
-.section s4l9b
-.section s4l0a
-.section s4l0b
-.section s4maa
-.section s4mab
-.section s4mba
-.section s4mbb
-.section s4mca
-.section s4mcb
-.section s4mda
-.section s4mdb
-.section s4mea
-.section s4meb
-.section s4mfa
-.section s4mfb
-.section s4mga
-.section s4mgb
-.section s4mha
-.section s4mhb
-.section s4mia
-.section s4mib
-.section s4mja
-.section s4mjb
-.section s4mka
-.section s4mkb
-.section s4mla
-.section s4mlb
-.section s4mma
-.section s4mmb
-.section s4mna
-.section s4mnb
-.section s4moa
-.section s4mob
-.section s4mpa
-.section s4mpb
-.section s4mqa
-.section s4mqb
-.section s4mra
-.section s4mrb
-.section s4msa
-.section s4msb
-.section s4mta
-.section s4mtb
-.section s4mua
-.section s4mub
-.section s4mva
-.section s4mvb
-.section s4mwa
-.section s4mwb
-.section s4mxa
-.section s4mxb
-.section s4mya
-.section s4myb
-.section s4mza
-.section s4mzb
-.section s4m1a
-.section s4m1b
-.section s4m2a
-.section s4m2b
-.section s4m3a
-.section s4m3b
-.section s4m4a
-.section s4m4b
-.section s4m5a
-.section s4m5b
-.section s4m6a
-.section s4m6b
-.section s4m7a
-.section s4m7b
-.section s4m8a
-.section s4m8b
-.section s4m9a
-.section s4m9b
-.section s4m0a
-.section s4m0b
-.section s4naa
-.section s4nab
-.section s4nba
-.section s4nbb
-.section s4nca
-.section s4ncb
-.section s4nda
-.section s4ndb
-.section s4nea
-.section s4neb
-.section s4nfa
-.section s4nfb
-.section s4nga
-.section s4ngb
-.section s4nha
-.section s4nhb
-.section s4nia
-.section s4nib
-.section s4nja
-.section s4njb
-.section s4nka
-.section s4nkb
-.section s4nla
-.section s4nlb
-.section s4nma
-.section s4nmb
-.section s4nna
-.section s4nnb
-.section s4noa
-.section s4nob
-.section s4npa
-.section s4npb
-.section s4nqa
-.section s4nqb
-.section s4nra
-.section s4nrb
-.section s4nsa
-.section s4nsb
-.section s4nta
-.section s4ntb
-.section s4nua
-.section s4nub
-.section s4nva
-.section s4nvb
-.section s4nwa
-.section s4nwb
-.section s4nxa
-.section s4nxb
-.section s4nya
-.section s4nyb
-.section s4nza
-.section s4nzb
-.section s4n1a
-.section s4n1b
-.section s4n2a
-.section s4n2b
-.section s4n3a
-.section s4n3b
-.section s4n4a
-.section s4n4b
-.section s4n5a
-.section s4n5b
-.section s4n6a
-.section s4n6b
-.section s4n7a
-.section s4n7b
-.section s4n8a
-.section s4n8b
-.section s4n9a
-.section s4n9b
-.section s4n0a
-.section s4n0b
-.section s4oaa
-.section s4oab
-.section s4oba
-.section s4obb
-.section s4oca
-.section s4ocb
-.section s4oda
-.section s4odb
-.section s4oea
-.section s4oeb
-.section s4ofa
-.section s4ofb
-.section s4oga
-.section s4ogb
-.section s4oha
-.section s4ohb
-.section s4oia
-.section s4oib
-.section s4oja
-.section s4ojb
-.section s4oka
-.section s4okb
-.section s4ola
-.section s4olb
-.section s4oma
-.section s4omb
-.section s4ona
-.section s4onb
-.section s4ooa
-.section s4oob
-.section s4opa
-.section s4opb
-.section s4oqa
-.section s4oqb
-.section s4ora
-.section s4orb
-.section s4osa
-.section s4osb
-.section s4ota
-.section s4otb
-.section s4oua
-.section s4oub
-.section s4ova
-.section s4ovb
-.section s4owa
-.section s4owb
-.section s4oxa
-.section s4oxb
-.section s4oya
-.section s4oyb
-.section s4oza
-.section s4ozb
-.section s4o1a
-.section s4o1b
-.section s4o2a
-.section s4o2b
-.section s4o3a
-.section s4o3b
-.section s4o4a
-.section s4o4b
-.section s4o5a
-.section s4o5b
-.section s4o6a
-.section s4o6b
-.section s4o7a
-.section s4o7b
-.section s4o8a
-.section s4o8b
-.section s4o9a
-.section s4o9b
-.section s4o0a
-.section s4o0b
-.section s4paa
-.section s4pab
-.section s4pba
-.section s4pbb
-.section s4pca
-.section s4pcb
-.section s4pda
-.section s4pdb
-.section s4pea
-.section s4peb
-.section s4pfa
-.section s4pfb
-.section s4pga
-.section s4pgb
-.section s4pha
-.section s4phb
-.section s4pia
-.section s4pib
-.section s4pja
-.section s4pjb
-.section s4pka
-.section s4pkb
-.section s4pla
-.section s4plb
-.section s4pma
-.section s4pmb
-.section s4pna
-.section s4pnb
-.section s4poa
-.section s4pob
-.section s4ppa
-.section s4ppb
-.section s4pqa
-.section s4pqb
-.section s4pra
-.section s4prb
-.section s4psa
-.section s4psb
-.section s4pta
-.section s4ptb
-.section s4pua
-.section s4pub
-.section s4pva
-.section s4pvb
-.section s4pwa
-.section s4pwb
-.section s4pxa
-.section s4pxb
-.section s4pya
-.section s4pyb
-.section s4pza
-.section s4pzb
-.section s4p1a
-.section s4p1b
-.section s4p2a
-.section s4p2b
-.section s4p3a
-.section s4p3b
-.section s4p4a
-.section s4p4b
-.section s4p5a
-.section s4p5b
-.section s4p6a
-.section s4p6b
-.section s4p7a
-.section s4p7b
-.section s4p8a
-.section s4p8b
-.section s4p9a
-.section s4p9b
-.section s4p0a
-.section s4p0b
-.section s4qaa
-.section s4qab
-.section s4qba
-.section s4qbb
-.section s4qca
-.section s4qcb
-.section s4qda
-.section s4qdb
-.section s4qea
-.section s4qeb
-.section s4qfa
-.section s4qfb
-.section s4qga
-.section s4qgb
-.section s4qha
-.section s4qhb
-.section s4qia
-.section s4qib
-.section s4qja
-.section s4qjb
-.section s4qka
-.section s4qkb
-.section s4qla
-.section s4qlb
-.section s4qma
-.section s4qmb
-.section s4qna
-.section s4qnb
-.section s4qoa
-.section s4qob
-.section s4qpa
-.section s4qpb
-.section s4qqa
-.section s4qqb
-.section s4qra
-.section s4qrb
-.section s4qsa
-.section s4qsb
-.section s4qta
-.section s4qtb
-.section s4qua
-.section s4qub
-.section s4qva
-.section s4qvb
-.section s4qwa
-.section s4qwb
-.section s4qxa
-.section s4qxb
-.section s4qya
-.section s4qyb
-.section s4qza
-.section s4qzb
-.section s4q1a
-.section s4q1b
-.section s4q2a
-.section s4q2b
-.section s4q3a
-.section s4q3b
-.section s4q4a
-.section s4q4b
-.section s4q5a
-.section s4q5b
-.section s4q6a
-.section s4q6b
-.section s4q7a
-.section s4q7b
-.section s4q8a
-.section s4q8b
-.section s4q9a
-.section s4q9b
-.section s4q0a
-.section s4q0b
-.section s4raa
-.section s4rab
-.section s4rba
-.section s4rbb
-.section s4rca
-.section s4rcb
-.section s4rda
-.section s4rdb
-.section s4rea
-.section s4reb
-.section s4rfa
-.section s4rfb
-.section s4rga
-.section s4rgb
-.section s4rha
-.section s4rhb
-.section s4ria
-.section s4rib
-.section s4rja
-.section s4rjb
-.section s4rka
-.section s4rkb
-.section s4rla
-.section s4rlb
-.section s4rma
-.section s4rmb
-.section s4rna
-.section s4rnb
-.section s4roa
-.section s4rob
-.section s4rpa
-.section s4rpb
-.section s4rqa
-.section s4rqb
-.section s4rra
-.section s4rrb
-.section s4rsa
-.section s4rsb
-.section s4rta
-.section s4rtb
-.section s4rua
-.section s4rub
-.section s4rva
-.section s4rvb
-.section s4rwa
-.section s4rwb
-.section s4rxa
-.section s4rxb
-.section s4rya
-.section s4ryb
-.section s4rza
-.section s4rzb
-.section s4r1a
-.section s4r1b
-.section s4r2a
-.section s4r2b
-.section s4r3a
-.section s4r3b
-.section s4r4a
-.section s4r4b
-.section s4r5a
-.section s4r5b
-.section s4r6a
-.section s4r6b
-.section s4r7a
-.section s4r7b
-.section s4r8a
-.section s4r8b
-.section s4r9a
-.section s4r9b
-.section s4r0a
-.section s4r0b
-.section s4saa
-.section s4sab
-.section s4sba
-.section s4sbb
-.section s4sca
-.section s4scb
-.section s4sda
-.section s4sdb
-.section s4sea
-.section s4seb
-.section s4sfa
-.section s4sfb
-.section s4sga
-.section s4sgb
-.section s4sha
-.section s4shb
-.section s4sia
-.section s4sib
-.section s4sja
-.section s4sjb
-.section s4ska
-.section s4skb
-.section s4sla
-.section s4slb
-.section s4sma
-.section s4smb
-.section s4sna
-.section s4snb
-.section s4soa
-.section s4sob
-.section s4spa
-.section s4spb
-.section s4sqa
-.section s4sqb
-.section s4sra
-.section s4srb
-.section s4ssa
-.section s4ssb
-.section s4sta
-.section s4stb
-.section s4sua
-.section s4sub
-.section s4sva
-.section s4svb
-.section s4swa
-.section s4swb
-.section s4sxa
-.section s4sxb
-.section s4sya
-.section s4syb
-.section s4sza
-.section s4szb
-.section s4s1a
-.section s4s1b
-.section s4s2a
-.section s4s2b
-.section s4s3a
-.section s4s3b
-.section s4s4a
-.section s4s4b
-.section s4s5a
-.section s4s5b
-.section s4s6a
-.section s4s6b
-.section s4s7a
-.section s4s7b
-.section s4s8a
-.section s4s8b
-.section s4s9a
-.section s4s9b
-.section s4s0a
-.section s4s0b
-.section s4taa
-.section s4tab
-.section s4tba
-.section s4tbb
-.section s4tca
-.section s4tcb
-.section s4tda
-.section s4tdb
-.section s4tea
-.section s4teb
-.section s4tfa
-.section s4tfb
-.section s4tga
-.section s4tgb
-.section s4tha
-.section s4thb
-.section s4tia
-.section s4tib
-.section s4tja
-.section s4tjb
-.section s4tka
-.section s4tkb
-.section s4tla
-.section s4tlb
-.section s4tma
-.section s4tmb
-.section s4tna
-.section s4tnb
-.section s4toa
-.section s4tob
-.section s4tpa
-.section s4tpb
-.section s4tqa
-.section s4tqb
-.section s4tra
-.section s4trb
-.section s4tsa
-.section s4tsb
-.section s4tta
-.section s4ttb
-.section s4tua
-.section s4tub
-.section s4tva
-.section s4tvb
-.section s4twa
-.section s4twb
-.section s4txa
-.section s4txb
-.section s4tya
-.section s4tyb
-.section s4tza
-.section s4tzb
-.section s4t1a
-.section s4t1b
-.section s4t2a
-.section s4t2b
-.section s4t3a
-.section s4t3b
-.section s4t4a
-.section s4t4b
-.section s4t5a
-.section s4t5b
-.section s4t6a
-.section s4t6b
-.section s4t7a
-.section s4t7b
-.section s4t8a
-.section s4t8b
-.section s4t9a
-.section s4t9b
-.section s4t0a
-.section s4t0b
-.section s4uaa
-.section s4uab
-.section s4uba
-.section s4ubb
-.section s4uca
-.section s4ucb
-.section s4uda
-.section s4udb
-.section s4uea
-.section s4ueb
-.section s4ufa
-.section s4ufb
-.section s4uga
-.section s4ugb
-.section s4uha
-.section s4uhb
-.section s4uia
-.section s4uib
-.section s4uja
-.section s4ujb
-.section s4uka
-.section s4ukb
-.section s4ula
-.section s4ulb
-.section s4uma
-.section s4umb
-.section s4una
-.section s4unb
-.section s4uoa
-.section s4uob
-.section s4upa
-.section s4upb
-.section s4uqa
-.section s4uqb
-.section s4ura
-.section s4urb
-.section s4usa
-.section s4usb
-.section s4uta
-.section s4utb
-.section s4uua
-.section s4uub
-.section s4uva
-.section s4uvb
-.section s4uwa
-.section s4uwb
-.section s4uxa
-.section s4uxb
-.section s4uya
-.section s4uyb
-.section s4uza
-.section s4uzb
-.section s4u1a
-.section s4u1b
-.section s4u2a
-.section s4u2b
-.section s4u3a
-.section s4u3b
-.section s4u4a
-.section s4u4b
-.section s4u5a
-.section s4u5b
-.section s4u6a
-.section s4u6b
-.section s4u7a
-.section s4u7b
-.section s4u8a
-.section s4u8b
-.section s4u9a
-.section s4u9b
-.section s4u0a
-.section s4u0b
-.section s4vaa
-.section s4vab
-.section s4vba
-.section s4vbb
-.section s4vca
-.section s4vcb
-.section s4vda
-.section s4vdb
-.section s4vea
-.section s4veb
-.section s4vfa
-.section s4vfb
-.section s4vga
-.section s4vgb
-.section s4vha
-.section s4vhb
-.section s4via
-.section s4vib
-.section s4vja
-.section s4vjb
-.section s4vka
-.section s4vkb
-.section s4vla
-.section s4vlb
-.section s4vma
-.section s4vmb
-.section s4vna
-.section s4vnb
-.section s4voa
-.section s4vob
-.section s4vpa
-.section s4vpb
-.section s4vqa
-.section s4vqb
-.section s4vra
-.section s4vrb
-.section s4vsa
-.section s4vsb
-.section s4vta
-.section s4vtb
-.section s4vua
-.section s4vub
-.section s4vva
-.section s4vvb
-.section s4vwa
-.section s4vwb
-.section s4vxa
-.section s4vxb
-.section s4vya
-.section s4vyb
-.section s4vza
-.section s4vzb
-.section s4v1a
-.section s4v1b
-.section s4v2a
-.section s4v2b
-.section s4v3a
-.section s4v3b
-.section s4v4a
-.section s4v4b
-.section s4v5a
-.section s4v5b
-.section s4v6a
-.section s4v6b
-.section s4v7a
-.section s4v7b
-.section s4v8a
-.section s4v8b
-.section s4v9a
-.section s4v9b
-.section s4v0a
-.section s4v0b
-.section s4waa
-.section s4wab
-.section s4wba
-.section s4wbb
-.section s4wca
-.section s4wcb
-.section s4wda
-.section s4wdb
-.section s4wea
-.section s4web
-.section s4wfa
-.section s4wfb
-.section s4wga
-.section s4wgb
-.section s4wha
-.section s4whb
-.section s4wia
-.section s4wib
-.section s4wja
-.section s4wjb
-.section s4wka
-.section s4wkb
-.section s4wla
-.section s4wlb
-.section s4wma
-.section s4wmb
-.section s4wna
-.section s4wnb
-.section s4woa
-.section s4wob
-.section s4wpa
-.section s4wpb
-.section s4wqa
-.section s4wqb
-.section s4wra
-.section s4wrb
-.section s4wsa
-.section s4wsb
-.section s4wta
-.section s4wtb
-.section s4wua
-.section s4wub
-.section s4wva
-.section s4wvb
-.section s4wwa
-.section s4wwb
-.section s4wxa
-.section s4wxb
-.section s4wya
-.section s4wyb
-.section s4wza
-.section s4wzb
-.section s4w1a
-.section s4w1b
-.section s4w2a
-.section s4w2b
-.section s4w3a
-.section s4w3b
-.section s4w4a
-.section s4w4b
-.section s4w5a
-.section s4w5b
-.section s4w6a
-.section s4w6b
-.section s4w7a
-.section s4w7b
-.section s4w8a
-.section s4w8b
-.section s4w9a
-.section s4w9b
-.section s4w0a
-.section s4w0b
-.section s4xaa
-.section s4xab
-.section s4xba
-.section s4xbb
-.section s4xca
-.section s4xcb
-.section s4xda
-.section s4xdb
-.section s4xea
-.section s4xeb
-.section s4xfa
-.section s4xfb
-.section s4xga
-.section s4xgb
-.section s4xha
-.section s4xhb
-.section s4xia
-.section s4xib
-.section s4xja
-.section s4xjb
-.section s4xka
-.section s4xkb
-.section s4xla
-.section s4xlb
-.section s4xma
-.section s4xmb
-.section s4xna
-.section s4xnb
-.section s4xoa
-.section s4xob
-.section s4xpa
-.section s4xpb
-.section s4xqa
-.section s4xqb
-.section s4xra
-.section s4xrb
-.section s4xsa
-.section s4xsb
-.section s4xta
-.section s4xtb
-.section s4xua
-.section s4xub
-.section s4xva
-.section s4xvb
-.section s4xwa
-.section s4xwb
-.section s4xxa
-.section s4xxb
-.section s4xya
-.section s4xyb
-.section s4xza
-.section s4xzb
-.section s4x1a
-.section s4x1b
-.section s4x2a
-.section s4x2b
-.section s4x3a
-.section s4x3b
-.section s4x4a
-.section s4x4b
-.section s4x5a
-.section s4x5b
-.section s4x6a
-.section s4x6b
-.section s4x7a
-.section s4x7b
-.section s4x8a
-.section s4x8b
-.section s4x9a
-.section s4x9b
-.section s4x0a
-.section s4x0b
-.section s4yaa
-.section s4yab
-.section s4yba
-.section s4ybb
-.section s4yca
-.section s4ycb
-.section s4yda
-.section s4ydb
-.section s4yea
-.section s4yeb
-.section s4yfa
-.section s4yfb
-.section s4yga
-.section s4ygb
-.section s4yha
-.section s4yhb
-.section s4yia
-.section s4yib
-.section s4yja
-.section s4yjb
-.section s4yka
-.section s4ykb
-.section s4yla
-.section s4ylb
-.section s4yma
-.section s4ymb
-.section s4yna
-.section s4ynb
-.section s4yoa
-.section s4yob
-.section s4ypa
-.section s4ypb
-.section s4yqa
-.section s4yqb
-.section s4yra
-.section s4yrb
-.section s4ysa
-.section s4ysb
-.section s4yta
-.section s4ytb
-.section s4yua
-.section s4yub
-.section s4yva
-.section s4yvb
-.section s4ywa
-.section s4ywb
-.section s4yxa
-.section s4yxb
-.section s4yya
-.section s4yyb
-.section s4yza
-.section s4yzb
-.section s4y1a
-.section s4y1b
-.section s4y2a
-.section s4y2b
-.section s4y3a
-.section s4y3b
-.section s4y4a
-.section s4y4b
-.section s4y5a
-.section s4y5b
-.section s4y6a
-.section s4y6b
-.section s4y7a
-.section s4y7b
-.section s4y8a
-.section s4y8b
-.section s4y9a
-.section s4y9b
-.section s4y0a
-.section s4y0b
-.section s4zaa
-.section s4zab
-.section s4zba
-.section s4zbb
-.section s4zca
-.section s4zcb
-.section s4zda
-.section s4zdb
-.section s4zea
-.section s4zeb
-.section s4zfa
-.section s4zfb
-.section s4zga
-.section s4zgb
-.section s4zha
-.section s4zhb
-.section s4zia
-.section s4zib
-.section s4zja
-.section s4zjb
-.section s4zka
-.section s4zkb
-.section s4zla
-.section s4zlb
-.section s4zma
-.section s4zmb
-.section s4zna
-.section s4znb
-.section s4zoa
-.section s4zob
-.section s4zpa
-.section s4zpb
-.section s4zqa
-.section s4zqb
-.section s4zra
-.section s4zrb
-.section s4zsa
-.section s4zsb
-.section s4zta
-.section s4ztb
-.section s4zua
-.section s4zub
-.section s4zva
-.section s4zvb
-.section s4zwa
-.section s4zwb
-.section s4zxa
-.section s4zxb
-.section s4zya
-.section s4zyb
-.section s4zza
-.section s4zzb
-.section s4z1a
-.section s4z1b
-.section s4z2a
-.section s4z2b
-.section s4z3a
-.section s4z3b
-.section s4z4a
-.section s4z4b
-.section s4z5a
-.section s4z5b
-.section s4z6a
-.section s4z6b
-.section s4z7a
-.section s4z7b
-.section s4z8a
-.section s4z8b
-.section s4z9a
-.section s4z9b
-.section s4z0a
-.section s4z0b
-.section s41aa
-.section s41ab
-.section s41ba
-.section s41bb
-.section s41ca
-.section s41cb
-.section s41da
-.section s41db
-.section s41ea
-.section s41eb
-.section s41fa
-.section s41fb
-.section s41ga
-.section s41gb
-.section s41ha
-.section s41hb
-.section s41ia
-.section s41ib
-.section s41ja
-.section s41jb
-.section s41ka
-.section s41kb
-.section s41la
-.section s41lb
-.section s41ma
-.section s41mb
-.section s41na
-.section s41nb
-.section s41oa
-.section s41ob
-.section s41pa
-.section s41pb
-.section s41qa
-.section s41qb
-.section s41ra
-.section s41rb
-.section s41sa
-.section s41sb
-.section s41ta
-.section s41tb
-.section s41ua
-.section s41ub
-.section s41va
-.section s41vb
-.section s41wa
-.section s41wb
-.section s41xa
-.section s41xb
-.section s41ya
-.section s41yb
-.section s41za
-.section s41zb
-.section s411a
-.section s411b
-.section s412a
-.section s412b
-.section s413a
-.section s413b
-.section s414a
-.section s414b
-.section s415a
-.section s415b
-.section s416a
-.section s416b
-.section s417a
-.section s417b
-.section s418a
-.section s418b
-.section s419a
-.section s419b
-.section s410a
-.section s410b
-.section s42aa
-.section s42ab
-.section s42ba
-.section s42bb
-.section s42ca
-.section s42cb
-.section s42da
-.section s42db
-.section s42ea
-.section s42eb
-.section s42fa
-.section s42fb
-.section s42ga
-.section s42gb
-.section s42ha
-.section s42hb
-.section s42ia
-.section s42ib
-.section s42ja
-.section s42jb
-.section s42ka
-.section s42kb
-.section s42la
-.section s42lb
-.section s42ma
-.section s42mb
-.section s42na
-.section s42nb
-.section s42oa
-.section s42ob
-.section s42pa
-.section s42pb
-.section s42qa
-.section s42qb
-.section s42ra
-.section s42rb
-.section s42sa
-.section s42sb
-.section s42ta
-.section s42tb
-.section s42ua
-.section s42ub
-.section s42va
-.section s42vb
-.section s42wa
-.section s42wb
-.section s42xa
-.section s42xb
-.section s42ya
-.section s42yb
-.section s42za
-.section s42zb
-.section s421a
-.section s421b
-.section s422a
-.section s422b
-.section s423a
-.section s423b
-.section s424a
-.section s424b
-.section s425a
-.section s425b
-.section s426a
-.section s426b
-.section s427a
-.section s427b
-.section s428a
-.section s428b
-.section s429a
-.section s429b
-.section s420a
-.section s420b
-.section s43aa
-.section s43ab
-.section s43ba
-.section s43bb
-.section s43ca
-.section s43cb
-.section s43da
-.section s43db
-.section s43ea
-.section s43eb
-.section s43fa
-.section s43fb
-.section s43ga
-.section s43gb
-.section s43ha
-.section s43hb
-.section s43ia
-.section s43ib
-.section s43ja
-.section s43jb
-.section s43ka
-.section s43kb
-.section s43la
-.section s43lb
-.section s43ma
-.section s43mb
-.section s43na
-.section s43nb
-.section s43oa
-.section s43ob
-.section s43pa
-.section s43pb
-.section s43qa
-.section s43qb
-.section s43ra
-.section s43rb
-.section s43sa
-.section s43sb
-.section s43ta
-.section s43tb
-.section s43ua
-.section s43ub
-.section s43va
-.section s43vb
-.section s43wa
-.section s43wb
-.section s43xa
-.section s43xb
-.section s43ya
-.section s43yb
-.section s43za
-.section s43zb
-.section s431a
-.section s431b
-.section s432a
-.section s432b
-.section s433a
-.section s433b
-.section s434a
-.section s434b
-.section s435a
-.section s435b
-.section s436a
-.section s436b
-.section s437a
-.section s437b
-.section s438a
-.section s438b
-.section s439a
-.section s439b
-.section s430a
-.section s430b
-.section s44aa
-.section s44ab
-.section s44ba
-.section s44bb
-.section s44ca
-.section s44cb
-.section s44da
-.section s44db
-.section s44ea
-.section s44eb
-.section s44fa
-.section s44fb
-.section s44ga
-.section s44gb
-.section s44ha
-.section s44hb
-.section s44ia
-.section s44ib
-.section s44ja
-.section s44jb
-.section s44ka
-.section s44kb
-.section s44la
-.section s44lb
-.section s44ma
-.section s44mb
-.section s44na
-.section s44nb
-.section s44oa
-.section s44ob
-.section s44pa
-.section s44pb
-.section s44qa
-.section s44qb
-.section s44ra
-.section s44rb
-.section s44sa
-.section s44sb
-.section s44ta
-.section s44tb
-.section s44ua
-.section s44ub
-.section s44va
-.section s44vb
-.section s44wa
-.section s44wb
-.section s44xa
-.section s44xb
-.section s44ya
-.section s44yb
-.section s44za
-.section s44zb
-.section s441a
-.section s441b
-.section s442a
-.section s442b
-.section s443a
-.section s443b
-.section s444a
-.section s444b
-.section s445a
-.section s445b
-.section s446a
-.section s446b
-.section s447a
-.section s447b
-.section s448a
-.section s448b
-.section s449a
-.section s449b
-.section s440a
-.section s440b
-.section s45aa
-.section s45ab
-.section s45ba
-.section s45bb
-.section s45ca
-.section s45cb
-.section s45da
-.section s45db
-.section s45ea
-.section s45eb
-.section s45fa
-.section s45fb
-.section s45ga
-.section s45gb
-.section s45ha
-.section s45hb
-.section s45ia
-.section s45ib
-.section s45ja
-.section s45jb
-.section s45ka
-.section s45kb
-.section s45la
-.section s45lb
-.section s45ma
-.section s45mb
-.section s45na
-.section s45nb
-.section s45oa
-.section s45ob
-.section s45pa
-.section s45pb
-.section s45qa
-.section s45qb
-.section s45ra
-.section s45rb
-.section s45sa
-.section s45sb
-.section s45ta
-.section s45tb
-.section s45ua
-.section s45ub
-.section s45va
-.section s45vb
-.section s45wa
-.section s45wb
-.section s45xa
-.section s45xb
-.section s45ya
-.section s45yb
-.section s45za
-.section s45zb
-.section s451a
-.section s451b
-.section s452a
-.section s452b
-.section s453a
-.section s453b
-.section s454a
-.section s454b
-.section s455a
-.section s455b
-.section s456a
-.section s456b
-.section s457a
-.section s457b
-.section s458a
-.section s458b
-.section s459a
-.section s459b
-.section s450a
-.section s450b
-.section s46aa
-.section s46ab
-.section s46ba
-.section s46bb
-.section s46ca
-.section s46cb
-.section s46da
-.section s46db
-.section s46ea
-.section s46eb
-.section s46fa
-.section s46fb
-.section s46ga
-.section s46gb
-.section s46ha
-.section s46hb
-.section s46ia
-.section s46ib
-.section s46ja
-.section s46jb
-.section s46ka
-.section s46kb
-.section s46la
-.section s46lb
-.section s46ma
-.section s46mb
-.section s46na
-.section s46nb
-.section s46oa
-.section s46ob
-.section s46pa
-.section s46pb
-.section s46qa
-.section s46qb
-.section s46ra
-.section s46rb
-.section s46sa
-.section s46sb
-.section s46ta
-.section s46tb
-.section s46ua
-.section s46ub
-.section s46va
-.section s46vb
-.section s46wa
-.section s46wb
-.section s46xa
-.section s46xb
-.section s46ya
-.section s46yb
-.section s46za
-.section s46zb
-.section s461a
-.section s461b
-.section s462a
-.section s462b
-.section s463a
-.section s463b
-.section s464a
-.section s464b
-.section s465a
-.section s465b
-.section s466a
-.section s466b
-.section s467a
-.section s467b
-.section s468a
-.section s468b
-.section s469a
-.section s469b
-.section s460a
-.section s460b
-.section s47aa
-.section s47ab
-.section s47ba
-.section s47bb
-.section s47ca
-.section s47cb
-.section s47da
-.section s47db
-.section s47ea
-.section s47eb
-.section s47fa
-.section s47fb
-.section s47ga
-.section s47gb
-.section s47ha
-.section s47hb
-.section s47ia
-.section s47ib
-.section s47ja
-.section s47jb
-.section s47ka
-.section s47kb
-.section s47la
-.section s47lb
-.section s47ma
-.section s47mb
-.section s47na
-.section s47nb
-.section s47oa
-.section s47ob
-.section s47pa
-.section s47pb
-.section s47qa
-.section s47qb
-.section s47ra
-.section s47rb
-.section s47sa
-.section s47sb
-.section s47ta
-.section s47tb
-.section s47ua
-.section s47ub
-.section s47va
-.section s47vb
-.section s47wa
-.section s47wb
-.section s47xa
-.section s47xb
-.section s47ya
-.section s47yb
-.section s47za
-.section s47zb
-.section s471a
-.section s471b
-.section s472a
-.section s472b
-.section s473a
-.section s473b
-.section s474a
-.section s474b
-.section s475a
-.section s475b
-.section s476a
-.section s476b
-.section s477a
-.section s477b
-.section s478a
-.section s478b
-.section s479a
-.section s479b
-.section s470a
-.section s470b
-.section s48aa
-.section s48ab
-.section s48ba
-.section s48bb
-.section s48ca
-.section s48cb
-.section s48da
-.section s48db
-.section s48ea
-.section s48eb
-.section s48fa
-.section s48fb
-.section s48ga
-.section s48gb
-.section s48ha
-.section s48hb
-.section s48ia
-.section s48ib
-.section s48ja
-.section s48jb
-.section s48ka
-.section s48kb
-.section s48la
-.section s48lb
-.section s48ma
-.section s48mb
-.section s48na
-.section s48nb
-.section s48oa
-.section s48ob
-.section s48pa
-.section s48pb
-.section s48qa
-.section s48qb
-.section s48ra
-.section s48rb
-.section s48sa
-.section s48sb
-.section s48ta
-.section s48tb
-.section s48ua
-.section s48ub
-.section s48va
-.section s48vb
-.section s48wa
-.section s48wb
-.section s48xa
-.section s48xb
-.section s48ya
-.section s48yb
-.section s48za
-.section s48zb
-.section s481a
-.section s481b
-.section s482a
-.section s482b
-.section s483a
-.section s483b
-.section s484a
-.section s484b
-.section s485a
-.section s485b
-.section s486a
-.section s486b
-.section s487a
-.section s487b
-.section s488a
-.section s488b
-.section s489a
-.section s489b
-.section s480a
-.section s480b
-.section s49aa
-.section s49ab
-.section s49ba
-.section s49bb
-.section s49ca
-.section s49cb
-.section s49da
-.section s49db
-.section s49ea
-.section s49eb
-.section s49fa
-.section s49fb
-.section s49ga
-.section s49gb
-.section s49ha
-.section s49hb
-.section s49ia
-.section s49ib
-.section s49ja
-.section s49jb
-.section s49ka
-.section s49kb
-.section s49la
-.section s49lb
-.section s49ma
-.section s49mb
-.section s49na
-.section s49nb
-.section s49oa
-.section s49ob
-.section s49pa
-.section s49pb
-.section s49qa
-.section s49qb
-.section s49ra
-.section s49rb
-.section s49sa
-.section s49sb
-.section s49ta
-.section s49tb
-.section s49ua
-.section s49ub
-.section s49va
-.section s49vb
-.section s49wa
-.section s49wb
-.section s49xa
-.section s49xb
-.section s49ya
-.section s49yb
-.section s49za
-.section s49zb
-.section s491a
-.section s491b
-.section s492a
-.section s492b
-.section s493a
-.section s493b
-.section s494a
-.section s494b
-.section s495a
-.section s495b
-.section s496a
-.section s496b
-.section s497a
-.section s497b
-.section s498a
-.section s498b
-.section s499a
-.section s499b
-.section s490a
-.section s490b
-.section s40aa
-.section s40ab
-.section s40ba
-.section s40bb
-.section s40ca
-.section s40cb
-.section s40da
-.section s40db
-.section s40ea
-.section s40eb
-.section s40fa
-.section s40fb
-.section s40ga
-.section s40gb
-.section s40ha
-.section s40hb
-.section s40ia
-.section s40ib
-.section s40ja
-.section s40jb
-.section s40ka
-.section s40kb
-.section s40la
-.section s40lb
-.section s40ma
-.section s40mb
-.section s40na
-.section s40nb
-.section s40oa
-.section s40ob
-.section s40pa
-.section s40pb
-.section s40qa
-.section s40qb
-.section s40ra
-.section s40rb
-.section s40sa
-.section s40sb
-.section s40ta
-.section s40tb
-.section s40ua
-.section s40ub
-.section s40va
-.section s40vb
-.section s40wa
-.section s40wb
-.section s40xa
-.section s40xb
-.section s40ya
-.section s40yb
-.section s40za
-.section s40zb
-.section s401a
-.section s401b
-.section s402a
-.section s402b
-.section s403a
-.section s403b
-.section s404a
-.section s404b
-.section s405a
-.section s405b
-.section s406a
-.section s406b
-.section s407a
-.section s407b
-.section s408a
-.section s408b
-.section s409a
-.section s409b
-.section s400a
-.section s400b
-.section s5aaa
-.section s5aab
-.section s5aba
-.section s5abb
-.section s5aca
-.section s5acb
-.section s5ada
-.section s5adb
-.section s5aea
-.section s5aeb
-.section s5afa
-.section s5afb
-.section s5aga
-.section s5agb
-.section s5aha
-.section s5ahb
-.section s5aia
-.section s5aib
-.section s5aja
-.section s5ajb
-.section s5aka
-.section s5akb
-.section s5ala
-.section s5alb
-.section s5ama
-.section s5amb
-.section s5ana
-.section s5anb
-.section s5aoa
-.section s5aob
-.section s5apa
-.section s5apb
-.section s5aqa
-.section s5aqb
-.section s5ara
-.section s5arb
-.section s5asa
-.section s5asb
-.section s5ata
-.section s5atb
-.section s5aua
-.section s5aub
-.section s5ava
-.section s5avb
-.section s5awa
-.section s5awb
-.section s5axa
-.section s5axb
-.section s5aya
-.section s5ayb
-.section s5aza
-.section s5azb
-.section s5a1a
-.section s5a1b
-.section s5a2a
-.section s5a2b
-.section s5a3a
-.section s5a3b
-.section s5a4a
-.section s5a4b
-.section s5a5a
-.section s5a5b
-.section s5a6a
-.section s5a6b
-.section s5a7a
-.section s5a7b
-.section s5a8a
-.section s5a8b
-.section s5a9a
-.section s5a9b
-.section s5a0a
-.section s5a0b
-.section s5baa
-.section s5bab
-.section s5bba
-.section s5bbb
-.section s5bca
-.section s5bcb
-.section s5bda
-.section s5bdb
-.section s5bea
-.section s5beb
-.section s5bfa
-.section s5bfb
-.section s5bga
-.section s5bgb
-.section s5bha
-.section s5bhb
-.section s5bia
-.section s5bib
-.section s5bja
-.section s5bjb
-.section s5bka
-.section s5bkb
-.section s5bla
-.section s5blb
-.section s5bma
-.section s5bmb
-.section s5bna
-.section s5bnb
-.section s5boa
-.section s5bob
-.section s5bpa
-.section s5bpb
-.section s5bqa
-.section s5bqb
-.section s5bra
-.section s5brb
-.section s5bsa
-.section s5bsb
-.section s5bta
-.section s5btb
-.section s5bua
-.section s5bub
-.section s5bva
-.section s5bvb
-.section s5bwa
-.section s5bwb
-.section s5bxa
-.section s5bxb
-.section s5bya
-.section s5byb
-.section s5bza
-.section s5bzb
-.section s5b1a
-.section s5b1b
-.section s5b2a
-.section s5b2b
-.section s5b3a
-.section s5b3b
-.section s5b4a
-.section s5b4b
-.section s5b5a
-.section s5b5b
-.section s5b6a
-.section s5b6b
-.section s5b7a
-.section s5b7b
-.section s5b8a
-.section s5b8b
-.section s5b9a
-.section s5b9b
-.section s5b0a
-.section s5b0b
-.section s5caa
-.section s5cab
-.section s5cba
-.section s5cbb
-.section s5cca
-.section s5ccb
-.section s5cda
-.section s5cdb
-.section s5cea
-.section s5ceb
-.section s5cfa
-.section s5cfb
-.section s5cga
-.section s5cgb
-.section s5cha
-.section s5chb
-.section s5cia
-.section s5cib
-.section s5cja
-.section s5cjb
-.section s5cka
-.section s5ckb
-.section s5cla
-.section s5clb
-.section s5cma
-.section s5cmb
-.section s5cna
-.section s5cnb
-.section s5coa
-.section s5cob
-.section s5cpa
-.section s5cpb
-.section s5cqa
-.section s5cqb
-.section s5cra
-.section s5crb
-.section s5csa
-.section s5csb
-.section s5cta
-.section s5ctb
-.section s5cua
-.section s5cub
-.section s5cva
-.section s5cvb
-.section s5cwa
-.section s5cwb
-.section s5cxa
-.section s5cxb
-.section s5cya
-.section s5cyb
-.section s5cza
-.section s5czb
-.section s5c1a
-.section s5c1b
-.section s5c2a
-.section s5c2b
-.section s5c3a
-.section s5c3b
-.section s5c4a
-.section s5c4b
-.section s5c5a
-.section s5c5b
-.section s5c6a
-.section s5c6b
-.section s5c7a
-.section s5c7b
-.section s5c8a
-.section s5c8b
-.section s5c9a
-.section s5c9b
-.section s5c0a
-.section s5c0b
-.section s5daa
-.section s5dab
-.section s5dba
-.section s5dbb
-.section s5dca
-.section s5dcb
-.section s5dda
-.section s5ddb
-.section s5dea
-.section s5deb
-.section s5dfa
-.section s5dfb
-.section s5dga
-.section s5dgb
-.section s5dha
-.section s5dhb
-.section s5dia
-.section s5dib
-.section s5dja
-.section s5djb
-.section s5dka
-.section s5dkb
-.section s5dla
-.section s5dlb
-.section s5dma
-.section s5dmb
-.section s5dna
-.section s5dnb
-.section s5doa
-.section s5dob
-.section s5dpa
-.section s5dpb
-.section s5dqa
-.section s5dqb
-.section s5dra
-.section s5drb
-.section s5dsa
-.section s5dsb
-.section s5dta
-.section s5dtb
-.section s5dua
-.section s5dub
-.section s5dva
-.section s5dvb
-.section s5dwa
-.section s5dwb
-.section s5dxa
-.section s5dxb
-.section s5dya
-.section s5dyb
-.section s5dza
-.section s5dzb
-.section s5d1a
-.section s5d1b
-.section s5d2a
-.section s5d2b
-.section s5d3a
-.section s5d3b
-.section s5d4a
-.section s5d4b
-.section s5d5a
-.section s5d5b
-.section s5d6a
-.section s5d6b
-.section s5d7a
-.section s5d7b
-.section s5d8a
-.section s5d8b
-.section s5d9a
-.section s5d9b
-.section s5d0a
-.section s5d0b
-.section s5eaa
-.section s5eab
-.section s5eba
-.section s5ebb
-.section s5eca
-.section s5ecb
-.section s5eda
-.section s5edb
-.section s5eea
-.section s5eeb
-.section s5efa
-.section s5efb
-.section s5ega
-.section s5egb
-.section s5eha
-.section s5ehb
-.section s5eia
-.section s5eib
-.section s5eja
-.section s5ejb
-.section s5eka
-.section s5ekb
-.section s5ela
-.section s5elb
-.section s5ema
-.section s5emb
-.section s5ena
-.section s5enb
-.section s5eoa
-.section s5eob
-.section s5epa
-.section s5epb
-.section s5eqa
-.section s5eqb
-.section s5era
-.section s5erb
-.section s5esa
-.section s5esb
-.section s5eta
-.section s5etb
-.section s5eua
-.section s5eub
-.section s5eva
-.section s5evb
-.section s5ewa
-.section s5ewb
-.section s5exa
-.section s5exb
-.section s5eya
-.section s5eyb
-.section s5eza
-.section s5ezb
-.section s5e1a
-.section s5e1b
-.section s5e2a
-.section s5e2b
-.section s5e3a
-.section s5e3b
-.section s5e4a
-.section s5e4b
-.section s5e5a
-.section s5e5b
-.section s5e6a
-.section s5e6b
-.section s5e7a
-.section s5e7b
-.section s5e8a
-.section s5e8b
-.section s5e9a
-.section s5e9b
-.section s5e0a
-.section s5e0b
-.section s5faa
-.section s5fab
-.section s5fba
-.section s5fbb
-.section s5fca
-.section s5fcb
-.section s5fda
-.section s5fdb
-.section s5fea
-.section s5feb
-.section s5ffa
-.section s5ffb
-.section s5fga
-.section s5fgb
-.section s5fha
-.section s5fhb
-.section s5fia
-.section s5fib
-.section s5fja
-.section s5fjb
-.section s5fka
-.section s5fkb
-.section s5fla
-.section s5flb
-.section s5fma
-.section s5fmb
-.section s5fna
-.section s5fnb
-.section s5foa
-.section s5fob
-.section s5fpa
-.section s5fpb
-.section s5fqa
-.section s5fqb
-.section s5fra
-.section s5frb
-.section s5fsa
-.section s5fsb
-.section s5fta
-.section s5ftb
-.section s5fua
-.section s5fub
-.section s5fva
-.section s5fvb
-.section s5fwa
-.section s5fwb
-.section s5fxa
-.section s5fxb
-.section s5fya
-.section s5fyb
-.section s5fza
-.section s5fzb
-.section s5f1a
-.section s5f1b
-.section s5f2a
-.section s5f2b
-.section s5f3a
-.section s5f3b
-.section s5f4a
-.section s5f4b
-.section s5f5a
-.section s5f5b
-.section s5f6a
-.section s5f6b
-.section s5f7a
-.section s5f7b
-.section s5f8a
-.section s5f8b
-.section s5f9a
-.section s5f9b
-.section s5f0a
-.section s5f0b
-.section s5gaa
-.section s5gab
-.section s5gba
-.section s5gbb
-.section s5gca
-.section s5gcb
-.section s5gda
-.section s5gdb
-.section s5gea
-.section s5geb
-.section s5gfa
-.section s5gfb
-.section s5gga
-.section s5ggb
-.section s5gha
-.section s5ghb
-.section s5gia
-.section s5gib
-.section s5gja
-.section s5gjb
-.section s5gka
-.section s5gkb
-.section s5gla
-.section s5glb
-.section s5gma
-.section s5gmb
-.section s5gna
-.section s5gnb
-.section s5goa
-.section s5gob
-.section s5gpa
-.section s5gpb
-.section s5gqa
-.section s5gqb
-.section s5gra
-.section s5grb
-.section s5gsa
-.section s5gsb
-.section s5gta
-.section s5gtb
-.section s5gua
-.section s5gub
-.section s5gva
-.section s5gvb
-.section s5gwa
-.section s5gwb
-.section s5gxa
-.section s5gxb
-.section s5gya
-.section s5gyb
-.section s5gza
-.section s5gzb
-.section s5g1a
-.section s5g1b
-.section s5g2a
-.section s5g2b
-.section s5g3a
-.section s5g3b
-.section s5g4a
-.section s5g4b
-.section s5g5a
-.section s5g5b
-.section s5g6a
-.section s5g6b
-.section s5g7a
-.section s5g7b
-.section s5g8a
-.section s5g8b
-.section s5g9a
-.section s5g9b
-.section s5g0a
-.section s5g0b
-.section s5haa
-.section s5hab
-.section s5hba
-.section s5hbb
-.section s5hca
-.section s5hcb
-.section s5hda
-.section s5hdb
-.section s5hea
-.section s5heb
-.section s5hfa
-.section s5hfb
-.section s5hga
-.section s5hgb
-.section s5hha
-.section s5hhb
-.section s5hia
-.section s5hib
-.section s5hja
-.section s5hjb
-.section s5hka
-.section s5hkb
-.section s5hla
-.section s5hlb
-.section s5hma
-.section s5hmb
-.section s5hna
-.section s5hnb
-.section s5hoa
-.section s5hob
-.section s5hpa
-.section s5hpb
-.section s5hqa
-.section s5hqb
-.section s5hra
-.section s5hrb
-.section s5hsa
-.section s5hsb
-.section s5hta
-.section s5htb
-.section s5hua
-.section s5hub
-.section s5hva
-.section s5hvb
-.section s5hwa
-.section s5hwb
-.section s5hxa
-.section s5hxb
-.section s5hya
-.section s5hyb
-.section s5hza
-.section s5hzb
-.section s5h1a
-.section s5h1b
-.section s5h2a
-.section s5h2b
-.section s5h3a
-.section s5h3b
-.section s5h4a
-.section s5h4b
-.section s5h5a
-.section s5h5b
-.section s5h6a
-.section s5h6b
-.section s5h7a
-.section s5h7b
-.section s5h8a
-.section s5h8b
-.section s5h9a
-.section s5h9b
-.section s5h0a
-.section s5h0b
-.section s5iaa
-.section s5iab
-.section s5iba
-.section s5ibb
-.section s5ica
-.section s5icb
-.section s5ida
-.section s5idb
-.section s5iea
-.section s5ieb
-.section s5ifa
-.section s5ifb
-.section s5iga
-.section s5igb
-.section s5iha
-.section s5ihb
-.section s5iia
-.section s5iib
-.section s5ija
-.section s5ijb
-.section s5ika
-.section s5ikb
-.section s5ila
-.section s5ilb
-.section s5ima
-.section s5imb
-.section s5ina
-.section s5inb
-.section s5ioa
-.section s5iob
-.section s5ipa
-.section s5ipb
-.section s5iqa
-.section s5iqb
-.section s5ira
-.section s5irb
-.section s5isa
-.section s5isb
-.section s5ita
-.section s5itb
-.section s5iua
-.section s5iub
-.section s5iva
-.section s5ivb
-.section s5iwa
-.section s5iwb
-.section s5ixa
-.section s5ixb
-.section s5iya
-.section s5iyb
-.section s5iza
-.section s5izb
-.section s5i1a
-.section s5i1b
-.section s5i2a
-.section s5i2b
-.section s5i3a
-.section s5i3b
-.section s5i4a
-.section s5i4b
-.section s5i5a
-.section s5i5b
-.section s5i6a
-.section s5i6b
-.section s5i7a
-.section s5i7b
-.section s5i8a
-.section s5i8b
-.section s5i9a
-.section s5i9b
-.section s5i0a
-.section s5i0b
-.section s5jaa
-.section s5jab
-.section s5jba
-.section s5jbb
-.section s5jca
-.section s5jcb
-.section s5jda
-.section s5jdb
-.section s5jea
-.section s5jeb
-.section s5jfa
-.section s5jfb
-.section s5jga
-.section s5jgb
-.section s5jha
-.section s5jhb
-.section s5jia
-.section s5jib
-.section s5jja
-.section s5jjb
-.section s5jka
-.section s5jkb
-.section s5jla
-.section s5jlb
-.section s5jma
-.section s5jmb
-.section s5jna
-.section s5jnb
-.section s5joa
-.section s5job
-.section s5jpa
-.section s5jpb
-.section s5jqa
-.section s5jqb
-.section s5jra
-.section s5jrb
-.section s5jsa
-.section s5jsb
-.section s5jta
-.section s5jtb
-.section s5jua
-.section s5jub
-.section s5jva
-.section s5jvb
-.section s5jwa
-.section s5jwb
-.section s5jxa
-.section s5jxb
-.section s5jya
-.section s5jyb
-.section s5jza
-.section s5jzb
-.section s5j1a
-.section s5j1b
-.section s5j2a
-.section s5j2b
-.section s5j3a
-.section s5j3b
-.section s5j4a
-.section s5j4b
-.section s5j5a
-.section s5j5b
-.section s5j6a
-.section s5j6b
-.section s5j7a
-.section s5j7b
-.section s5j8a
-.section s5j8b
-.section s5j9a
-.section s5j9b
-.section s5j0a
-.section s5j0b
-.section s5kaa
-.section s5kab
-.section s5kba
-.section s5kbb
-.section s5kca
-.section s5kcb
-.section s5kda
-.section s5kdb
-.section s5kea
-.section s5keb
-.section s5kfa
-.section s5kfb
-.section s5kga
-.section s5kgb
-.section s5kha
-.section s5khb
-.section s5kia
-.section s5kib
-.section s5kja
-.section s5kjb
-.section s5kka
-.section s5kkb
-.section s5kla
-.section s5klb
-.section s5kma
-.section s5kmb
-.section s5kna
-.section s5knb
-.section s5koa
-.section s5kob
-.section s5kpa
-.section s5kpb
-.section s5kqa
-.section s5kqb
-.section s5kra
-.section s5krb
-.section s5ksa
-.section s5ksb
-.section s5kta
-.section s5ktb
-.section s5kua
-.section s5kub
-.section s5kva
-.section s5kvb
-.section s5kwa
-.section s5kwb
-.section s5kxa
-.section s5kxb
-.section s5kya
-.section s5kyb
-.section s5kza
-.section s5kzb
-.section s5k1a
-.section s5k1b
-.section s5k2a
-.section s5k2b
-.section s5k3a
-.section s5k3b
-.section s5k4a
-.section s5k4b
-.section s5k5a
-.section s5k5b
-.section s5k6a
-.section s5k6b
-.section s5k7a
-.section s5k7b
-.section s5k8a
-.section s5k8b
-.section s5k9a
-.section s5k9b
-.section s5k0a
-.section s5k0b
-.section s5laa
-.section s5lab
-.section s5lba
-.section s5lbb
-.section s5lca
-.section s5lcb
-.section s5lda
-.section s5ldb
-.section s5lea
-.section s5leb
-.section s5lfa
-.section s5lfb
-.section s5lga
-.section s5lgb
-.section s5lha
-.section s5lhb
-.section s5lia
-.section s5lib
-.section s5lja
-.section s5ljb
-.section s5lka
-.section s5lkb
-.section s5lla
-.section s5llb
-.section s5lma
-.section s5lmb
-.section s5lna
-.section s5lnb
-.section s5loa
-.section s5lob
-.section s5lpa
-.section s5lpb
-.section s5lqa
-.section s5lqb
-.section s5lra
-.section s5lrb
-.section s5lsa
-.section s5lsb
-.section s5lta
-.section s5ltb
-.section s5lua
-.section s5lub
-.section s5lva
-.section s5lvb
-.section s5lwa
-.section s5lwb
-.section s5lxa
-.section s5lxb
-.section s5lya
-.section s5lyb
-.section s5lza
-.section s5lzb
-.section s5l1a
-.section s5l1b
-.section s5l2a
-.section s5l2b
-.section s5l3a
-.section s5l3b
-.section s5l4a
-.section s5l4b
-.section s5l5a
-.section s5l5b
-.section s5l6a
-.section s5l6b
-.section s5l7a
-.section s5l7b
-.section s5l8a
-.section s5l8b
-.section s5l9a
-.section s5l9b
-.section s5l0a
-.section s5l0b
-.section s5maa
-.section s5mab
-.section s5mba
-.section s5mbb
-.section s5mca
-.section s5mcb
-.section s5mda
-.section s5mdb
-.section s5mea
-.section s5meb
-.section s5mfa
-.section s5mfb
-.section s5mga
-.section s5mgb
-.section s5mha
-.section s5mhb
-.section s5mia
-.section s5mib
-.section s5mja
-.section s5mjb
-.section s5mka
-.section s5mkb
-.section s5mla
-.section s5mlb
-.section s5mma
-.section s5mmb
-.section s5mna
-.section s5mnb
-.section s5moa
-.section s5mob
-.section s5mpa
-.section s5mpb
-.section s5mqa
-.section s5mqb
-.section s5mra
-.section s5mrb
-.section s5msa
-.section s5msb
-.section s5mta
-.section s5mtb
-.section s5mua
-.section s5mub
-.section s5mva
-.section s5mvb
-.section s5mwa
-.section s5mwb
-.section s5mxa
-.section s5mxb
-.section s5mya
-.section s5myb
-.section s5mza
-.section s5mzb
-.section s5m1a
-.section s5m1b
-.section s5m2a
-.section s5m2b
-.section s5m3a
-.section s5m3b
-.section s5m4a
-.section s5m4b
-.section s5m5a
-.section s5m5b
-.section s5m6a
-.section s5m6b
-.section s5m7a
-.section s5m7b
-.section s5m8a
-.section s5m8b
-.section s5m9a
-.section s5m9b
-.section s5m0a
-.section s5m0b
-.section s5naa
-.section s5nab
-.section s5nba
-.section s5nbb
-.section s5nca
-.section s5ncb
-.section s5nda
-.section s5ndb
-.section s5nea
-.section s5neb
-.section s5nfa
-.section s5nfb
-.section s5nga
-.section s5ngb
-.section s5nha
-.section s5nhb
-.section s5nia
-.section s5nib
-.section s5nja
-.section s5njb
-.section s5nka
-.section s5nkb
-.section s5nla
-.section s5nlb
-.section s5nma
-.section s5nmb
-.section s5nna
-.section s5nnb
-.section s5noa
-.section s5nob
-.section s5npa
-.section s5npb
-.section s5nqa
-.section s5nqb
-.section s5nra
-.section s5nrb
-.section s5nsa
-.section s5nsb
-.section s5nta
-.section s5ntb
-.section s5nua
-.section s5nub
-.section s5nva
-.section s5nvb
-.section s5nwa
-.section s5nwb
-.section s5nxa
-.section s5nxb
-.section s5nya
-.section s5nyb
-.section s5nza
-.section s5nzb
-.section s5n1a
-.section s5n1b
-.section s5n2a
-.section s5n2b
-.section s5n3a
-.section s5n3b
-.section s5n4a
-.section s5n4b
-.section s5n5a
-.section s5n5b
-.section s5n6a
-.section s5n6b
-.section s5n7a
-.section s5n7b
-.section s5n8a
-.section s5n8b
-.section s5n9a
-.section s5n9b
-.section s5n0a
-.section s5n0b
-.section s5oaa
-.section s5oab
-.section s5oba
-.section s5obb
-.section s5oca
-.section s5ocb
-.section s5oda
-.section s5odb
-.section s5oea
-.section s5oeb
-.section s5ofa
-.section s5ofb
-.section s5oga
-.section s5ogb
-.section s5oha
-.section s5ohb
-.section s5oia
-.section s5oib
-.section s5oja
-.section s5ojb
-.section s5oka
-.section s5okb
-.section s5ola
-.section s5olb
-.section s5oma
-.section s5omb
-.section s5ona
-.section s5onb
-.section s5ooa
-.section s5oob
-.section s5opa
-.section s5opb
-.section s5oqa
-.section s5oqb
-.section s5ora
-.section s5orb
-.section s5osa
-.section s5osb
-.section s5ota
-.section s5otb
-.section s5oua
-.section s5oub
-.section s5ova
-.section s5ovb
-.section s5owa
-.section s5owb
-.section s5oxa
-.section s5oxb
-.section s5oya
-.section s5oyb
-.section s5oza
-.section s5ozb
-.section s5o1a
-.section s5o1b
-.section s5o2a
-.section s5o2b
-.section s5o3a
-.section s5o3b
-.section s5o4a
-.section s5o4b
-.section s5o5a
-.section s5o5b
-.section s5o6a
-.section s5o6b
-.section s5o7a
-.section s5o7b
-.section s5o8a
-.section s5o8b
-.section s5o9a
-.section s5o9b
-.section s5o0a
-.section s5o0b
-.section s5paa
-.section s5pab
-.section s5pba
-.section s5pbb
-.section s5pca
-.section s5pcb
-.section s5pda
-.section s5pdb
-.section s5pea
-.section s5peb
-.section s5pfa
-.section s5pfb
-.section s5pga
-.section s5pgb
-.section s5pha
-.section s5phb
-.section s5pia
-.section s5pib
-.section s5pja
-.section s5pjb
-.section s5pka
-.section s5pkb
-.section s5pla
-.section s5plb
-.section s5pma
-.section s5pmb
-.section s5pna
-.section s5pnb
-.section s5poa
-.section s5pob
-.section s5ppa
-.section s5ppb
-.section s5pqa
-.section s5pqb
-.section s5pra
-.section s5prb
-.section s5psa
-.section s5psb
-.section s5pta
-.section s5ptb
-.section s5pua
-.section s5pub
-.section s5pva
-.section s5pvb
-.section s5pwa
-.section s5pwb
-.section s5pxa
-.section s5pxb
-.section s5pya
-.section s5pyb
-.section s5pza
-.section s5pzb
-.section s5p1a
-.section s5p1b
-.section s5p2a
-.section s5p2b
-.section s5p3a
-.section s5p3b
-.section s5p4a
-.section s5p4b
-.section s5p5a
-.section s5p5b
-.section s5p6a
-.section s5p6b
-.section s5p7a
-.section s5p7b
-.section s5p8a
-.section s5p8b
-.section s5p9a
-.section s5p9b
-.section s5p0a
-.section s5p0b
-.section s5qaa
-.section s5qab
-.section s5qba
-.section s5qbb
-.section s5qca
-.section s5qcb
-.section s5qda
-.section s5qdb
-.section s5qea
-.section s5qeb
-.section s5qfa
-.section s5qfb
-.section s5qga
-.section s5qgb
-.section s5qha
-.section s5qhb
-.section s5qia
-.section s5qib
-.section s5qja
-.section s5qjb
-.section s5qka
-.section s5qkb
-.section s5qla
-.section s5qlb
-.section s5qma
-.section s5qmb
-.section s5qna
-.section s5qnb
-.section s5qoa
-.section s5qob
-.section s5qpa
-.section s5qpb
-.section s5qqa
-.section s5qqb
-.section s5qra
-.section s5qrb
-.section s5qsa
-.section s5qsb
-.section s5qta
-.section s5qtb
-.section s5qua
-.section s5qub
-.section s5qva
-.section s5qvb
-.section s5qwa
-.section s5qwb
-.section s5qxa
-.section s5qxb
-.section s5qya
-.section s5qyb
-.section s5qza
-.section s5qzb
-.section s5q1a
-.section s5q1b
-.section s5q2a
-.section s5q2b
-.section s5q3a
-.section s5q3b
-.section s5q4a
-.section s5q4b
-.section s5q5a
-.section s5q5b
-.section s5q6a
-.section s5q6b
-.section s5q7a
-.section s5q7b
-.section s5q8a
-.section s5q8b
-.section s5q9a
-.section s5q9b
-.section s5q0a
-.section s5q0b
-.section s5raa
-.section s5rab
-.section s5rba
-.section s5rbb
-.section s5rca
-.section s5rcb
-.section s5rda
-.section s5rdb
-.section s5rea
-.section s5reb
-.section s5rfa
-.section s5rfb
-.section s5rga
-.section s5rgb
-.section s5rha
-.section s5rhb
-.section s5ria
-.section s5rib
-.section s5rja
-.section s5rjb
-.section s5rka
-.section s5rkb
-.section s5rla
-.section s5rlb
-.section s5rma
-.section s5rmb
-.section s5rna
-.section s5rnb
-.section s5roa
-.section s5rob
-.section s5rpa
-.section s5rpb
-.section s5rqa
-.section s5rqb
-.section s5rra
-.section s5rrb
-.section s5rsa
-.section s5rsb
-.section s5rta
-.section s5rtb
-.section s5rua
-.section s5rub
-.section s5rva
-.section s5rvb
-.section s5rwa
-.section s5rwb
-.section s5rxa
-.section s5rxb
-.section s5rya
-.section s5ryb
-.section s5rza
-.section s5rzb
-.section s5r1a
-.section s5r1b
-.section s5r2a
-.section s5r2b
-.section s5r3a
-.section s5r3b
-.section s5r4a
-.section s5r4b
-.section s5r5a
-.section s5r5b
-.section s5r6a
-.section s5r6b
-.section s5r7a
-.section s5r7b
-.section s5r8a
-.section s5r8b
-.section s5r9a
-.section s5r9b
-.section s5r0a
-.section s5r0b
-.section s5saa
-.section s5sab
-.section s5sba
-.section s5sbb
-.section s5sca
-.section s5scb
-.section s5sda
-.section s5sdb
-.section s5sea
-.section s5seb
-.section s5sfa
-.section s5sfb
-.section s5sga
-.section s5sgb
-.section s5sha
-.section s5shb
-.section s5sia
-.section s5sib
-.section s5sja
-.section s5sjb
-.section s5ska
-.section s5skb
-.section s5sla
-.section s5slb
-.section s5sma
-.section s5smb
-.section s5sna
-.section s5snb
-.section s5soa
-.section s5sob
-.section s5spa
-.section s5spb
-.section s5sqa
-.section s5sqb
-.section s5sra
-.section s5srb
-.section s5ssa
-.section s5ssb
-.section s5sta
-.section s5stb
-.section s5sua
-.section s5sub
-.section s5sva
-.section s5svb
-.section s5swa
-.section s5swb
-.section s5sxa
-.section s5sxb
-.section s5sya
-.section s5syb
-.section s5sza
-.section s5szb
-.section s5s1a
-.section s5s1b
-.section s5s2a
-.section s5s2b
-.section s5s3a
-.section s5s3b
-.section s5s4a
-.section s5s4b
-.section s5s5a
-.section s5s5b
-.section s5s6a
-.section s5s6b
-.section s5s7a
-.section s5s7b
-.section s5s8a
-.section s5s8b
-.section s5s9a
-.section s5s9b
-.section s5s0a
-.section s5s0b
-.section s5taa
-.section s5tab
-.section s5tba
-.section s5tbb
-.section s5tca
-.section s5tcb
-.section s5tda
-.section s5tdb
-.section s5tea
-.section s5teb
-.section s5tfa
-.section s5tfb
-.section s5tga
-.section s5tgb
-.section s5tha
-.section s5thb
-.section s5tia
-.section s5tib
-.section s5tja
-.section s5tjb
-.section s5tka
-.section s5tkb
-.section s5tla
-.section s5tlb
-.section s5tma
-.section s5tmb
-.section s5tna
-.section s5tnb
-.section s5toa
-.section s5tob
-.section s5tpa
-.section s5tpb
-.section s5tqa
-.section s5tqb
-.section s5tra
-.section s5trb
-.section s5tsa
-.section s5tsb
-.section s5tta
-.section s5ttb
-.section s5tua
-.section s5tub
-.section s5tva
-.section s5tvb
-.section s5twa
-.section s5twb
-.section s5txa
-.section s5txb
-.section s5tya
-.section s5tyb
-.section s5tza
-.section s5tzb
-.section s5t1a
-.section s5t1b
-.section s5t2a
-.section s5t2b
-.section s5t3a
-.section s5t3b
-.section s5t4a
-.section s5t4b
-.section s5t5a
-.section s5t5b
-.section s5t6a
-.section s5t6b
-.section s5t7a
-.section s5t7b
-.section s5t8a
-.section s5t8b
-.section s5t9a
-.section s5t9b
-.section s5t0a
-.section s5t0b
-.section s5uaa
-.section s5uab
-.section s5uba
-.section s5ubb
-.section s5uca
-.section s5ucb
-.section s5uda
-.section s5udb
-.section s5uea
-.section s5ueb
-.section s5ufa
-.section s5ufb
-.section s5uga
-.section s5ugb
-.section s5uha
-.section s5uhb
-.section s5uia
-.section s5uib
-.section s5uja
-.section s5ujb
-.section s5uka
-.section s5ukb
-.section s5ula
-.section s5ulb
-.section s5uma
-.section s5umb
-.section s5una
-.section s5unb
-.section s5uoa
-.section s5uob
-.section s5upa
-.section s5upb
-.section s5uqa
-.section s5uqb
-.section s5ura
-.section s5urb
-.section s5usa
-.section s5usb
-.section s5uta
-.section s5utb
-.section s5uua
-.section s5uub
-.section s5uva
-.section s5uvb
-.section s5uwa
-.section s5uwb
-.section s5uxa
-.section s5uxb
-.section s5uya
-.section s5uyb
-.section s5uza
-.section s5uzb
-.section s5u1a
-.section s5u1b
-.section s5u2a
-.section s5u2b
-.section s5u3a
-.section s5u3b
-.section s5u4a
-.section s5u4b
-.section s5u5a
-.section s5u5b
-.section s5u6a
-.section s5u6b
-.section s5u7a
-.section s5u7b
-.section s5u8a
-.section s5u8b
-.section s5u9a
-.section s5u9b
-.section s5u0a
-.section s5u0b
-.section s5vaa
-.section s5vab
-.section s5vba
-.section s5vbb
-.section s5vca
-.section s5vcb
-.section s5vda
-.section s5vdb
-.section s5vea
-.section s5veb
-.section s5vfa
-.section s5vfb
-.section s5vga
-.section s5vgb
-.section s5vha
-.section s5vhb
-.section s5via
-.section s5vib
-.section s5vja
-.section s5vjb
-.section s5vka
-.section s5vkb
-.section s5vla
-.section s5vlb
-.section s5vma
-.section s5vmb
-.section s5vna
-.section s5vnb
-.section s5voa
-.section s5vob
-.section s5vpa
-.section s5vpb
-.section s5vqa
-.section s5vqb
-.section s5vra
-.section s5vrb
-.section s5vsa
-.section s5vsb
-.section s5vta
-.section s5vtb
-.section s5vua
-.section s5vub
-.section s5vva
-.section s5vvb
-.section s5vwa
-.section s5vwb
-.section s5vxa
-.section s5vxb
-.section s5vya
-.section s5vyb
-.section s5vza
-.section s5vzb
-.section s5v1a
-.section s5v1b
-.section s5v2a
-.section s5v2b
-.section s5v3a
-.section s5v3b
-.section s5v4a
-.section s5v4b
-.section s5v5a
-.section s5v5b
-.section s5v6a
-.section s5v6b
-.section s5v7a
-.section s5v7b
-.section s5v8a
-.section s5v8b
-.section s5v9a
-.section s5v9b
-.section s5v0a
-.section s5v0b
-.section s5waa
-.section s5wab
-.section s5wba
-.section s5wbb
-.section s5wca
-.section s5wcb
-.section s5wda
-.section s5wdb
-.section s5wea
-.section s5web
-.section s5wfa
-.section s5wfb
-.section s5wga
-.section s5wgb
-.section s5wha
-.section s5whb
-.section s5wia
-.section s5wib
-.section s5wja
-.section s5wjb
-.section s5wka
-.section s5wkb
-.section s5wla
-.section s5wlb
-.section s5wma
-.section s5wmb
-.section s5wna
-.section s5wnb
-.section s5woa
-.section s5wob
-.section s5wpa
-.section s5wpb
-.section s5wqa
-.section s5wqb
-.section s5wra
-.section s5wrb
-.section s5wsa
-.section s5wsb
-.section s5wta
-.section s5wtb
-.section s5wua
-.section s5wub
-.section s5wva
-.section s5wvb
-.section s5wwa
-.section s5wwb
-.section s5wxa
-.section s5wxb
-.section s5wya
-.section s5wyb
-.section s5wza
-.section s5wzb
-.section s5w1a
-.section s5w1b
-.section s5w2a
-.section s5w2b
-.section s5w3a
-.section s5w3b
-.section s5w4a
-.section s5w4b
-.section s5w5a
-.section s5w5b
-.section s5w6a
-.section s5w6b
-.section s5w7a
-.section s5w7b
-.section s5w8a
-.section s5w8b
-.section s5w9a
-.section s5w9b
-.section s5w0a
-.section s5w0b
-.section s5xaa
-.section s5xab
-.section s5xba
-.section s5xbb
-.section s5xca
-.section s5xcb
-.section s5xda
-.section s5xdb
-.section s5xea
-.section s5xeb
-.section s5xfa
-.section s5xfb
-.section s5xga
-.section s5xgb
-.section s5xha
-.section s5xhb
-.section s5xia
-.section s5xib
-.section s5xja
-.section s5xjb
-.section s5xka
-.section s5xkb
-.section s5xla
-.section s5xlb
-.section s5xma
-.section s5xmb
-.section s5xna
-.section s5xnb
-.section s5xoa
-.section s5xob
-.section s5xpa
-.section s5xpb
-.section s5xqa
-.section s5xqb
-.section s5xra
-.section s5xrb
-.section s5xsa
-.section s5xsb
-.section s5xta
-.section s5xtb
-.section s5xua
-.section s5xub
-.section s5xva
-.section s5xvb
-.section s5xwa
-.section s5xwb
-.section s5xxa
-.section s5xxb
-.section s5xya
-.section s5xyb
-.section s5xza
-.section s5xzb
-.section s5x1a
-.section s5x1b
-.section s5x2a
-.section s5x2b
-.section s5x3a
-.section s5x3b
-.section s5x4a
-.section s5x4b
-.section s5x5a
-.section s5x5b
-.section s5x6a
-.section s5x6b
-.section s5x7a
-.section s5x7b
-.section s5x8a
-.section s5x8b
-.section s5x9a
-.section s5x9b
-.section s5x0a
-.section s5x0b
-.section s5yaa
-.section s5yab
-.section s5yba
-.section s5ybb
-.section s5yca
-.section s5ycb
-.section s5yda
-.section s5ydb
-.section s5yea
-.section s5yeb
-.section s5yfa
-.section s5yfb
-.section s5yga
-.section s5ygb
-.section s5yha
-.section s5yhb
-.section s5yia
-.section s5yib
-.section s5yja
-.section s5yjb
-.section s5yka
-.section s5ykb
-.section s5yla
-.section s5ylb
-.section s5yma
-.section s5ymb
-.section s5yna
-.section s5ynb
-.section s5yoa
-.section s5yob
-.section s5ypa
-.section s5ypb
-.section s5yqa
-.section s5yqb
-.section s5yra
-.section s5yrb
-.section s5ysa
-.section s5ysb
-.section s5yta
-.section s5ytb
-.section s5yua
-.section s5yub
-.section s5yva
-.section s5yvb
-.section s5ywa
-.section s5ywb
-.section s5yxa
-.section s5yxb
-.section s5yya
-.section s5yyb
-.section s5yza
-.section s5yzb
-.section s5y1a
-.section s5y1b
-.section s5y2a
-.section s5y2b
-.section s5y3a
-.section s5y3b
-.section s5y4a
-.section s5y4b
-.section s5y5a
-.section s5y5b
-.section s5y6a
-.section s5y6b
-.section s5y7a
-.section s5y7b
-.section s5y8a
-.section s5y8b
-.section s5y9a
-.section s5y9b
-.section s5y0a
-.section s5y0b
-.section s5zaa
-.section s5zab
-.section s5zba
-.section s5zbb
-.section s5zca
-.section s5zcb
-.section s5zda
-.section s5zdb
-.section s5zea
-.section s5zeb
-.section s5zfa
-.section s5zfb
-.section s5zga
-.section s5zgb
-.section s5zha
-.section s5zhb
-.section s5zia
-.section s5zib
-.section s5zja
-.section s5zjb
-.section s5zka
-.section s5zkb
-.section s5zla
-.section s5zlb
-.section s5zma
-.section s5zmb
-.section s5zna
-.section s5znb
-.section s5zoa
-.section s5zob
-.section s5zpa
-.section s5zpb
-.section s5zqa
-.section s5zqb
-.section s5zra
-.section s5zrb
-.section s5zsa
-.section s5zsb
-.section s5zta
-.section s5ztb
-.section s5zua
-.section s5zub
-.section s5zva
-.section s5zvb
-.section s5zwa
-.section s5zwb
-.section s5zxa
-.section s5zxb
-.section s5zya
-.section s5zyb
-.section s5zza
-.section s5zzb
-.section s5z1a
-.section s5z1b
-.section s5z2a
-.section s5z2b
-.section s5z3a
-.section s5z3b
-.section s5z4a
-.section s5z4b
-.section s5z5a
-.section s5z5b
-.section s5z6a
-.section s5z6b
-.section s5z7a
-.section s5z7b
-.section s5z8a
-.section s5z8b
-.section s5z9a
-.section s5z9b
-.section s5z0a
-.section s5z0b
-.section s51aa
-.section s51ab
-.section s51ba
-.section s51bb
-.section s51ca
-.section s51cb
-.section s51da
-.section s51db
-.section s51ea
-.section s51eb
-.section s51fa
-.section s51fb
-.section s51ga
-.section s51gb
-.section s51ha
-.section s51hb
-.section s51ia
-.section s51ib
-.section s51ja
-.section s51jb
-.section s51ka
-.section s51kb
-.section s51la
-.section s51lb
-.section s51ma
-.section s51mb
-.section s51na
-.section s51nb
-.section s51oa
-.section s51ob
-.section s51pa
-.section s51pb
-.section s51qa
-.section s51qb
-.section s51ra
-.section s51rb
-.section s51sa
-.section s51sb
-.section s51ta
-.section s51tb
-.section s51ua
-.section s51ub
-.section s51va
-.section s51vb
-.section s51wa
-.section s51wb
-.section s51xa
-.section s51xb
-.section s51ya
-.section s51yb
-.section s51za
-.section s51zb
-.section s511a
-.section s511b
-.section s512a
-.section s512b
-.section s513a
-.section s513b
-.section s514a
-.section s514b
-.section s515a
-.section s515b
-.section s516a
-.section s516b
-.section s517a
-.section s517b
-.section s518a
-.section s518b
-.section s519a
-.section s519b
-.section s510a
-.section s510b
-.section s52aa
-.section s52ab
-.section s52ba
-.section s52bb
-.section s52ca
-.section s52cb
-.section s52da
-.section s52db
-.section s52ea
-.section s52eb
-.section s52fa
-.section s52fb
-.section s52ga
-.section s52gb
-.section s52ha
-.section s52hb
-.section s52ia
-.section s52ib
-.section s52ja
-.section s52jb
-.section s52ka
-.section s52kb
-.section s52la
-.section s52lb
-.section s52ma
-.section s52mb
-.section s52na
-.section s52nb
-.section s52oa
-.section s52ob
-.section s52pa
-.section s52pb
-.section s52qa
-.section s52qb
-.section s52ra
-.section s52rb
-.section s52sa
-.section s52sb
-.section s52ta
-.section s52tb
-.section s52ua
-.section s52ub
-.section s52va
-.section s52vb
-.section s52wa
-.section s52wb
-.section s52xa
-.section s52xb
-.section s52ya
-.section s52yb
-.section s52za
-.section s52zb
-.section s521a
-.section s521b
-.section s522a
-.section s522b
-.section s523a
-.section s523b
-.section s524a
-.section s524b
-.section s525a
-.section s525b
-.section s526a
-.section s526b
-.section s527a
-.section s527b
-.section s528a
-.section s528b
-.section s529a
-.section s529b
-.section s520a
-.section s520b
-.section s53aa
-.section s53ab
-.section s53ba
-.section s53bb
-.section s53ca
-.section s53cb
-.section s53da
-.section s53db
-.section s53ea
-.section s53eb
-.section s53fa
-.section s53fb
-.section s53ga
-.section s53gb
-.section s53ha
-.section s53hb
-.section s53ia
-.section s53ib
-.section s53ja
-.section s53jb
-.section s53ka
-.section s53kb
-.section s53la
-.section s53lb
-.section s53ma
-.section s53mb
-.section s53na
-.section s53nb
-.section s53oa
-.section s53ob
-.section s53pa
-.section s53pb
-.section s53qa
-.section s53qb
-.section s53ra
-.section s53rb
-.section s53sa
-.section s53sb
-.section s53ta
-.section s53tb
-.section s53ua
-.section s53ub
-.section s53va
-.section s53vb
-.section s53wa
-.section s53wb
-.section s53xa
-.section s53xb
-.section s53ya
-.section s53yb
-.section s53za
-.section s53zb
-.section s531a
-.section s531b
-.section s532a
-.section s532b
-.section s533a
-.section s533b
-.section s534a
-.section s534b
-.section s535a
-.section s535b
-.section s536a
-.section s536b
-.section s537a
-.section s537b
-.section s538a
-.section s538b
-.section s539a
-.section s539b
-.section s530a
-.section s530b
-.section s54aa
-.section s54ab
-.section s54ba
-.section s54bb
-.section s54ca
-.section s54cb
-.section s54da
-.section s54db
-.section s54ea
-.section s54eb
-.section s54fa
-.section s54fb
-.section s54ga
-.section s54gb
-.section s54ha
-.section s54hb
-.section s54ia
-.section s54ib
-.section s54ja
-.section s54jb
-.section s54ka
-.section s54kb
-.section s54la
-.section s54lb
-.section s54ma
-.section s54mb
-.section s54na
-.section s54nb
-.section s54oa
-.section s54ob
-.section s54pa
-.section s54pb
-.section s54qa
-.section s54qb
-.section s54ra
-.section s54rb
-.section s54sa
-.section s54sb
-.section s54ta
-.section s54tb
-.section s54ua
-.section s54ub
-.section s54va
-.section s54vb
-.section s54wa
-.section s54wb
-.section s54xa
-.section s54xb
-.section s54ya
-.section s54yb
-.section s54za
-.section s54zb
-.section s541a
-.section s541b
-.section s542a
-.section s542b
-.section s543a
-.section s543b
-.section s544a
-.section s544b
-.section s545a
-.section s545b
-.section s546a
-.section s546b
-.section s547a
-.section s547b
-.section s548a
-.section s548b
-.section s549a
-.section s549b
-.section s540a
-.section s540b
-.section s55aa
-.section s55ab
-.section s55ba
-.section s55bb
-.section s55ca
-.section s55cb
-.section s55da
-.section s55db
-.section s55ea
-.section s55eb
-.section s55fa
-.section s55fb
-.section s55ga
-.section s55gb
-.section s55ha
-.section s55hb
-.section s55ia
-.section s55ib
-.section s55ja
-.section s55jb
-.section s55ka
-.section s55kb
-.section s55la
-.section s55lb
-.section s55ma
-.section s55mb
-.section s55na
-.section s55nb
-.section s55oa
-.section s55ob
-.section s55pa
-.section s55pb
-.section s55qa
-.section s55qb
-.section s55ra
-.section s55rb
-.section s55sa
-.section s55sb
-.section s55ta
-.section s55tb
-.section s55ua
-.section s55ub
-.section s55va
-.section s55vb
-.section s55wa
-.section s55wb
-.section s55xa
-.section s55xb
-.section s55ya
-.section s55yb
-.section s55za
-.section s55zb
-.section s551a
-.section s551b
-.section s552a
-.section s552b
-.section s553a
-.section s553b
-.section s554a
-.section s554b
-.section s555a
-.section s555b
-.section s556a
-.section s556b
-.section s557a
-.section s557b
-.section s558a
-.section s558b
-.section s559a
-.section s559b
-.section s550a
-.section s550b
-.section s56aa
-.section s56ab
-.section s56ba
-.section s56bb
-.section s56ca
-.section s56cb
-.section s56da
-.section s56db
-.section s56ea
-.section s56eb
-.section s56fa
-.section s56fb
-.section s56ga
-.section s56gb
-.section s56ha
-.section s56hb
-.section s56ia
-.section s56ib
-.section s56ja
-.section s56jb
-.section s56ka
-.section s56kb
-.section s56la
-.section s56lb
-.section s56ma
-.section s56mb
-.section s56na
-.section s56nb
-.section s56oa
-.section s56ob
-.section s56pa
-.section s56pb
-.section s56qa
-.section s56qb
-.section s56ra
-.section s56rb
-.section s56sa
-.section s56sb
-.section s56ta
-.section s56tb
-.section s56ua
-.section s56ub
-.section s56va
-.section s56vb
-.section s56wa
-.section s56wb
-.section s56xa
-.section s56xb
-.section s56ya
-.section s56yb
-.section s56za
-.section s56zb
-.section s561a
-.section s561b
-.section s562a
-.section s562b
-.section s563a
-.section s563b
-.section s564a
-.section s564b
-.section s565a
-.section s565b
-.section s566a
-.section s566b
-.section s567a
-.section s567b
-.section s568a
-.section s568b
-.section s569a
-.section s569b
-.section s560a
-.section s560b
-.section s57aa
-.section s57ab
-.section s57ba
-.section s57bb
-.section s57ca
-.section s57cb
-.section s57da
-.section s57db
-.section s57ea
-.section s57eb
-.section s57fa
-.section s57fb
-.section s57ga
-.section s57gb
-.section s57ha
-.section s57hb
-.section s57ia
-.section s57ib
-.section s57ja
-.section s57jb
-.section s57ka
-.section s57kb
-.section s57la
-.section s57lb
-.section s57ma
-.section s57mb
-.section s57na
-.section s57nb
-.section s57oa
-.section s57ob
-.section s57pa
-.section s57pb
-.section s57qa
-.section s57qb
-.section s57ra
-.section s57rb
-.section s57sa
-.section s57sb
-.section s57ta
-.section s57tb
-.section s57ua
-.section s57ub
-.section s57va
-.section s57vb
-.section s57wa
-.section s57wb
-.section s57xa
-.section s57xb
-.section s57ya
-.section s57yb
-.section s57za
-.section s57zb
-.section s571a
-.section s571b
-.section s572a
-.section s572b
-.section s573a
-.section s573b
-.section s574a
-.section s574b
-.section s575a
-.section s575b
-.section s576a
-.section s576b
-.section s577a
-.section s577b
-.section s578a
-.section s578b
-.section s579a
-.section s579b
-.section s570a
-.section s570b
-.section s58aa
-.section s58ab
-.section s58ba
-.section s58bb
-.section s58ca
-.section s58cb
-.section s58da
-.section s58db
-.section s58ea
-.section s58eb
-.section s58fa
-.section s58fb
-.section s58ga
-.section s58gb
-.section s58ha
-.section s58hb
-.section s58ia
-.section s58ib
-.section s58ja
-.section s58jb
-.section s58ka
-.section s58kb
-.section s58la
-.section s58lb
-.section s58ma
-.section s58mb
-.section s58na
-.section s58nb
-.section s58oa
-.section s58ob
-.section s58pa
-.section s58pb
-.section s58qa
-.section s58qb
-.section s58ra
-.section s58rb
-.section s58sa
-.section s58sb
-.section s58ta
-.section s58tb
-.section s58ua
-.section s58ub
-.section s58va
-.section s58vb
-.section s58wa
-.section s58wb
-.section s58xa
-.section s58xb
-.section s58ya
-.section s58yb
-.section s58za
-.section s58zb
-.section s581a
-.section s581b
-.section s582a
-.section s582b
-.section s583a
-.section s583b
-.section s584a
-.section s584b
-.section s585a
-.section s585b
-.section s586a
-.section s586b
-.section s587a
-.section s587b
-.section s588a
-.section s588b
-.section s589a
-.section s589b
-.section s580a
-.section s580b
-.section s59aa
-.section s59ab
-.section s59ba
-.section s59bb
-.section s59ca
-.section s59cb
-.section s59da
-.section s59db
-.section s59ea
-.section s59eb
-.section s59fa
-.section s59fb
-.section s59ga
-.section s59gb
-.section s59ha
-.section s59hb
-.section s59ia
-.section s59ib
-.section s59ja
-.section s59jb
-.section s59ka
-.section s59kb
-.section s59la
-.section s59lb
-.section s59ma
-.section s59mb
-.section s59na
-.section s59nb
-.section s59oa
-.section s59ob
-.section s59pa
-.section s59pb
-.section s59qa
-.section s59qb
-.section s59ra
-.section s59rb
-.section s59sa
-.section s59sb
-.section s59ta
-.section s59tb
-.section s59ua
-.section s59ub
-.section s59va
-.section s59vb
-.section s59wa
-.section s59wb
-.section s59xa
-.section s59xb
-.section s59ya
-.section s59yb
-.section s59za
-.section s59zb
-.section s591a
-.section s591b
-.section s592a
-.section s592b
-.section s593a
-.section s593b
-.section s594a
-.section s594b
-.section s595a
-.section s595b
-.section s596a
-.section s596b
-.section s597a
-.section s597b
-.section s598a
-.section s598b
-.section s599a
-.section s599b
-.section s590a
-.section s590b
-.section s50aa
-.section s50ab
-.section s50ba
-.section s50bb
-.section s50ca
-.section s50cb
-.section s50da
-.section s50db
-.section s50ea
-.section s50eb
-.section s50fa
-.section s50fb
-.section s50ga
-.section s50gb
-.section s50ha
-.section s50hb
-.section s50ia
-.section s50ib
-.section s50ja
-.section s50jb
-.section s50ka
-.section s50kb
-.section s50la
-.section s50lb
-.section s50ma
-.section s50mb
-.section s50na
-.section s50nb
-.section s50oa
-.section s50ob
-.section s50pa
-.section s50pb
-.section s50qa
-.section s50qb
-.section s50ra
-.section s50rb
-.section s50sa
-.section s50sb
-.section s50ta
-.section s50tb
-.section s50ua
-.section s50ub
-.section s50va
-.section s50vb
-.section s50wa
-.section s50wb
-.section s50xa
-.section s50xb
-.section s50ya
-.section s50yb
-.section s50za
-.section s50zb
-.section s501a
-.section s501b
-.section s502a
-.section s502b
-.section s503a
-.section s503b
-.section s504a
-.section s504b
-.section s505a
-.section s505b
-.section s506a
-.section s506b
-.section s507a
-.section s507b
-.section s508a
-.section s508b
-.section s509a
-.section s509b
-.section s500a
-.section s500b
-.section s6aaa
-.section s6aab
-.section s6aba
-.section s6abb
-.section s6aca
-.section s6acb
-.section s6ada
-.section s6adb
-.section s6aea
-.section s6aeb
-.section s6afa
-.section s6afb
-.section s6aga
-.section s6agb
-.section s6aha
-.section s6ahb
-.section s6aia
-.section s6aib
-.section s6aja
-.section s6ajb
-.section s6aka
-.section s6akb
-.section s6ala
-.section s6alb
-.section s6ama
-.section s6amb
-.section s6ana
-.section s6anb
-.section s6aoa
-.section s6aob
-.section s6apa
-.section s6apb
-.section s6aqa
-.section s6aqb
-.section s6ara
-.section s6arb
-.section s6asa
-.section s6asb
-.section s6ata
-.section s6atb
-.section s6aua
-.section s6aub
-.section s6ava
-.section s6avb
-.section s6awa
-.section s6awb
-.section s6axa
-.section s6axb
-.section s6aya
-.section s6ayb
-.section s6aza
-.section s6azb
-.section s6a1a
-.section s6a1b
-.section s6a2a
-.section s6a2b
-.section s6a3a
-.section s6a3b
-.section s6a4a
-.section s6a4b
-.section s6a5a
-.section s6a5b
-.section s6a6a
-.section s6a6b
-.section s6a7a
-.section s6a7b
-.section s6a8a
-.section s6a8b
-.section s6a9a
-.section s6a9b
-.section s6a0a
-.section s6a0b
-.section s6baa
-.section s6bab
-.section s6bba
-.section s6bbb
-.section s6bca
-.section s6bcb
-.section s6bda
-.section s6bdb
-.section s6bea
-.section s6beb
-.section s6bfa
-.section s6bfb
-.section s6bga
-.section s6bgb
-.section s6bha
-.section s6bhb
-.section s6bia
-.section s6bib
-.section s6bja
-.section s6bjb
-.section s6bka
-.section s6bkb
-.section s6bla
-.section s6blb
-.section s6bma
-.section s6bmb
-.section s6bna
-.section s6bnb
-.section s6boa
-.section s6bob
-.section s6bpa
-.section s6bpb
-.section s6bqa
-.section s6bqb
-.section s6bra
-.section s6brb
-.section s6bsa
-.section s6bsb
-.section s6bta
-.section s6btb
-.section s6bua
-.section s6bub
-.section s6bva
-.section s6bvb
-.section s6bwa
-.section s6bwb
-.section s6bxa
-.section s6bxb
-.section s6bya
-.section s6byb
-.section s6bza
-.section s6bzb
-.section s6b1a
-.section s6b1b
-.section s6b2a
-.section s6b2b
-.section s6b3a
-.section s6b3b
-.section s6b4a
-.section s6b4b
-.section s6b5a
-.section s6b5b
-.section s6b6a
-.section s6b6b
-.section s6b7a
-.section s6b7b
-.section s6b8a
-.section s6b8b
-.section s6b9a
-.section s6b9b
-.section s6b0a
-.section s6b0b
-.section s6caa
-.section s6cab
-.section s6cba
-.section s6cbb
-.section s6cca
-.section s6ccb
-.section s6cda
-.section s6cdb
-.section s6cea
-.section s6ceb
-.section s6cfa
-.section s6cfb
-.section s6cga
-.section s6cgb
-.section s6cha
-.section s6chb
-.section s6cia
-.section s6cib
-.section s6cja
-.section s6cjb
-.section s6cka
-.section s6ckb
-.section s6cla
-.section s6clb
-.section s6cma
-.section s6cmb
-.section s6cna
-.section s6cnb
-.section s6coa
-.section s6cob
-.section s6cpa
-.section s6cpb
-.section s6cqa
-.section s6cqb
-.section s6cra
-.section s6crb
-.section s6csa
-.section s6csb
-.section s6cta
-.section s6ctb
-.section s6cua
-.section s6cub
-.section s6cva
-.section s6cvb
-.section s6cwa
-.section s6cwb
-.section s6cxa
-.section s6cxb
-.section s6cya
-.section s6cyb
-.section s6cza
-.section s6czb
-.section s6c1a
-.section s6c1b
-.section s6c2a
-.section s6c2b
-.section s6c3a
-.section s6c3b
-.section s6c4a
-.section s6c4b
-.section s6c5a
-.section s6c5b
-.section s6c6a
-.section s6c6b
-.section s6c7a
-.section s6c7b
-.section s6c8a
-.section s6c8b
-.section s6c9a
-.section s6c9b
-.section s6c0a
-.section s6c0b
-.section s6daa
-.section s6dab
-.section s6dba
-.section s6dbb
-.section s6dca
-.section s6dcb
-.section s6dda
-.section s6ddb
-.section s6dea
-.section s6deb
-.section s6dfa
-.section s6dfb
-.section s6dga
-.section s6dgb
-.section s6dha
-.section s6dhb
-.section s6dia
-.section s6dib
-.section s6dja
-.section s6djb
-.section s6dka
-.section s6dkb
-.section s6dla
-.section s6dlb
-.section s6dma
-.section s6dmb
-.section s6dna
-.section s6dnb
-.section s6doa
-.section s6dob
-.section s6dpa
-.section s6dpb
-.section s6dqa
-.section s6dqb
-.section s6dra
-.section s6drb
-.section s6dsa
-.section s6dsb
-.section s6dta
-.section s6dtb
-.section s6dua
-.section s6dub
-.section s6dva
-.section s6dvb
-.section s6dwa
-.section s6dwb
-.section s6dxa
-.section s6dxb
-.section s6dya
-.section s6dyb
-.section s6dza
-.section s6dzb
-.section s6d1a
-.section s6d1b
-.section s6d2a
-.section s6d2b
-.section s6d3a
-.section s6d3b
-.section s6d4a
-.section s6d4b
-.section s6d5a
-.section s6d5b
-.section s6d6a
-.section s6d6b
-.section s6d7a
-.section s6d7b
-.section s6d8a
-.section s6d8b
-.section s6d9a
-.section s6d9b
-.section s6d0a
-.section s6d0b
-.section s6eaa
-.section s6eab
-.section s6eba
-.section s6ebb
-.section s6eca
-.section s6ecb
-.section s6eda
-.section s6edb
-.section s6eea
-.section s6eeb
-.section s6efa
-.section s6efb
-.section s6ega
-.section s6egb
-.section s6eha
-.section s6ehb
-.section s6eia
-.section s6eib
-.section s6eja
-.section s6ejb
-.section s6eka
-.section s6ekb
-.section s6ela
-.section s6elb
-.section s6ema
-.section s6emb
-.section s6ena
-.section s6enb
-.section s6eoa
-.section s6eob
-.section s6epa
-.section s6epb
-.section s6eqa
-.section s6eqb
-.section s6era
-.section s6erb
-.section s6esa
-.section s6esb
-.section s6eta
-.section s6etb
-.section s6eua
-.section s6eub
-.section s6eva
-.section s6evb
-.section s6ewa
-.section s6ewb
-.section s6exa
-.section s6exb
-.section s6eya
-.section s6eyb
-.section s6eza
-.section s6ezb
-.section s6e1a
-.section s6e1b
-.section s6e2a
-.section s6e2b
-.section s6e3a
-.section s6e3b
-.section s6e4a
-.section s6e4b
-.section s6e5a
-.section s6e5b
-.section s6e6a
-.section s6e6b
-.section s6e7a
-.section s6e7b
-.section s6e8a
-.section s6e8b
-.section s6e9a
-.section s6e9b
-.section s6e0a
-.section s6e0b
-.section s6faa
-.section s6fab
-.section s6fba
-.section s6fbb
-.section s6fca
-.section s6fcb
-.section s6fda
-.section s6fdb
-.section s6fea
-.section s6feb
-.section s6ffa
-.section s6ffb
-.section s6fga
-.section s6fgb
-.section s6fha
-.section s6fhb
-.section s6fia
-.section s6fib
-.section s6fja
-.section s6fjb
-.section s6fka
-.section s6fkb
-.section s6fla
-.section s6flb
-.section s6fma
-.section s6fmb
-.section s6fna
-.section s6fnb
-.section s6foa
-.section s6fob
-.section s6fpa
-.section s6fpb
-.section s6fqa
-.section s6fqb
-.section s6fra
-.section s6frb
-.section s6fsa
-.section s6fsb
-.section s6fta
-.section s6ftb
-.section s6fua
-.section s6fub
-.section s6fva
-.section s6fvb
-.section s6fwa
-.section s6fwb
-.section s6fxa
-.section s6fxb
-.section s6fya
-.section s6fyb
-.section s6fza
-.section s6fzb
-.section s6f1a
-.section s6f1b
-.section s6f2a
-.section s6f2b
-.section s6f3a
-.section s6f3b
-.section s6f4a
-.section s6f4b
-.section s6f5a
-.section s6f5b
-.section s6f6a
-.section s6f6b
-.section s6f7a
-.section s6f7b
-.section s6f8a
-.section s6f8b
-.section s6f9a
-.section s6f9b
-.section s6f0a
-.section s6f0b
-.section s6gaa
-.section s6gab
-.section s6gba
-.section s6gbb
-.section s6gca
-.section s6gcb
-.section s6gda
-.section s6gdb
-.section s6gea
-.section s6geb
-.section s6gfa
-.section s6gfb
-.section s6gga
-.section s6ggb
-.section s6gha
-.section s6ghb
-.section s6gia
-.section s6gib
-.section s6gja
-.section s6gjb
-.section s6gka
-.section s6gkb
-.section s6gla
-.section s6glb
-.section s6gma
-.section s6gmb
-.section s6gna
-.section s6gnb
-.section s6goa
-.section s6gob
-.section s6gpa
-.section s6gpb
-.section s6gqa
-.section s6gqb
-.section s6gra
-.section s6grb
-.section s6gsa
-.section s6gsb
-.section s6gta
-.section s6gtb
-.section s6gua
-.section s6gub
-.section s6gva
-.section s6gvb
-.section s6gwa
-.section s6gwb
-.section s6gxa
-.section s6gxb
-.section s6gya
-.section s6gyb
-.section s6gza
-.section s6gzb
-.section s6g1a
-.section s6g1b
-.section s6g2a
-.section s6g2b
-.section s6g3a
-.section s6g3b
-.section s6g4a
-.section s6g4b
-.section s6g5a
-.section s6g5b
-.section s6g6a
-.section s6g6b
-.section s6g7a
-.section s6g7b
-.section s6g8a
-.section s6g8b
-.section s6g9a
-.section s6g9b
-.section s6g0a
-.section s6g0b
-.section s6haa
-.section s6hab
-.section s6hba
-.section s6hbb
-.section s6hca
-.section s6hcb
-.section s6hda
-.section s6hdb
-.section s6hea
-.section s6heb
-.section s6hfa
-.section s6hfb
-.section s6hga
-.section s6hgb
-.section s6hha
-.section s6hhb
-.section s6hia
-.section s6hib
-.section s6hja
-.section s6hjb
-.section s6hka
-.section s6hkb
-.section s6hla
-.section s6hlb
-.section s6hma
-.section s6hmb
-.section s6hna
-.section s6hnb
-.section s6hoa
-.section s6hob
-.section s6hpa
-.section s6hpb
-.section s6hqa
-.section s6hqb
-.section s6hra
-.section s6hrb
-.section s6hsa
-.section s6hsb
-.section s6hta
-.section s6htb
-.section s6hua
-.section s6hub
-.section s6hva
-.section s6hvb
-.section s6hwa
-.section s6hwb
-.section s6hxa
-.section s6hxb
-.section s6hya
-.section s6hyb
-.section s6hza
-.section s6hzb
-.section s6h1a
-.section s6h1b
-.section s6h2a
-.section s6h2b
-.section s6h3a
-.section s6h3b
-.section s6h4a
-.section s6h4b
-.section s6h5a
-.section s6h5b
-.section s6h6a
-.section s6h6b
-.section s6h7a
-.section s6h7b
-.section s6h8a
-.section s6h8b
-.section s6h9a
-.section s6h9b
-.section s6h0a
-.section s6h0b
-.section s6iaa
-.section s6iab
-.section s6iba
-.section s6ibb
-.section s6ica
-.section s6icb
-.section s6ida
-.section s6idb
-.section s6iea
-.section s6ieb
-.section s6ifa
-.section s6ifb
-.section s6iga
-.section s6igb
-.section s6iha
-.section s6ihb
-.section s6iia
-.section s6iib
-.section s6ija
-.section s6ijb
-.section s6ika
-.section s6ikb
-.section s6ila
-.section s6ilb
-.section s6ima
-.section s6imb
-.section s6ina
-.section s6inb
-.section s6ioa
-.section s6iob
-.section s6ipa
-.section s6ipb
-.section s6iqa
-.section s6iqb
-.section s6ira
-.section s6irb
-.section s6isa
-.section s6isb
-.section s6ita
-.section s6itb
-.section s6iua
-.section s6iub
-.section s6iva
-.section s6ivb
-.section s6iwa
-.section s6iwb
-.section s6ixa
-.section s6ixb
-.section s6iya
-.section s6iyb
-.section s6iza
-.section s6izb
-.section s6i1a
-.section s6i1b
-.section s6i2a
-.section s6i2b
-.section s6i3a
-.section s6i3b
-.section s6i4a
-.section s6i4b
-.section s6i5a
-.section s6i5b
-.section s6i6a
-.section s6i6b
-.section s6i7a
-.section s6i7b
-.section s6i8a
-.section s6i8b
-.section s6i9a
-.section s6i9b
-.section s6i0a
-.section s6i0b
-.section s6jaa
-.section s6jab
-.section s6jba
-.section s6jbb
-.section s6jca
-.section s6jcb
-.section s6jda
-.section s6jdb
-.section s6jea
-.section s6jeb
-.section s6jfa
-.section s6jfb
-.section s6jga
-.section s6jgb
-.section s6jha
-.section s6jhb
-.section s6jia
-.section s6jib
-.section s6jja
-.section s6jjb
-.section s6jka
-.section s6jkb
-.section s6jla
-.section s6jlb
-.section s6jma
-.section s6jmb
-.section s6jna
-.section s6jnb
-.section s6joa
-.section s6job
-.section s6jpa
-.section s6jpb
-.section s6jqa
-.section s6jqb
-.section s6jra
-.section s6jrb
-.section s6jsa
-.section s6jsb
-.section s6jta
-.section s6jtb
-.section s6jua
-.section s6jub
-.section s6jva
-.section s6jvb
-.section s6jwa
-.section s6jwb
-.section s6jxa
-.section s6jxb
-.section s6jya
-.section s6jyb
-.section s6jza
-.section s6jzb
-.section s6j1a
-.section s6j1b
-.section s6j2a
-.section s6j2b
-.section s6j3a
-.section s6j3b
-.section s6j4a
-.section s6j4b
-.section s6j5a
-.section s6j5b
-.section s6j6a
-.section s6j6b
-.section s6j7a
-.section s6j7b
-.section s6j8a
-.section s6j8b
-.section s6j9a
-.section s6j9b
-.section s6j0a
-.section s6j0b
-.section s6kaa
-.section s6kab
-.section s6kba
-.section s6kbb
-.section s6kca
-.section s6kcb
-.section s6kda
-.section s6kdb
-.section s6kea
-.section s6keb
-.section s6kfa
-.section s6kfb
-.section s6kga
-.section s6kgb
-.section s6kha
-.section s6khb
-.section s6kia
-.section s6kib
-.section s6kja
-.section s6kjb
-.section s6kka
-.section s6kkb
-.section s6kla
-.section s6klb
-.section s6kma
-.section s6kmb
-.section s6kna
-.section s6knb
-.section s6koa
-.section s6kob
-.section s6kpa
-.section s6kpb
-.section s6kqa
-.section s6kqb
-.section s6kra
-.section s6krb
-.section s6ksa
-.section s6ksb
-.section s6kta
-.section s6ktb
-.section s6kua
-.section s6kub
-.section s6kva
-.section s6kvb
-.section s6kwa
-.section s6kwb
-.section s6kxa
-.section s6kxb
-.section s6kya
-.section s6kyb
-.section s6kza
-.section s6kzb
-.section s6k1a
-.section s6k1b
-.section s6k2a
-.section s6k2b
-.section s6k3a
-.section s6k3b
-.section s6k4a
-.section s6k4b
-.section s6k5a
-.section s6k5b
-.section s6k6a
-.section s6k6b
-.section s6k7a
-.section s6k7b
-.section s6k8a
-.section s6k8b
-.section s6k9a
-.section s6k9b
-.section s6k0a
-.section s6k0b
-.section s6laa
-.section s6lab
-.section s6lba
-.section s6lbb
-.section s6lca
-.section s6lcb
-.section s6lda
-.section s6ldb
-.section s6lea
-.section s6leb
-.section s6lfa
-.section s6lfb
-.section s6lga
-.section s6lgb
-.section s6lha
-.section s6lhb
-.section s6lia
-.section s6lib
-.section s6lja
-.section s6ljb
-.section s6lka
-.section s6lkb
-.section s6lla
-.section s6llb
-.section s6lma
-.section s6lmb
-.section s6lna
-.section s6lnb
-.section s6loa
-.section s6lob
-.section s6lpa
-.section s6lpb
-.section s6lqa
-.section s6lqb
-.section s6lra
-.section s6lrb
-.section s6lsa
-.section s6lsb
-.section s6lta
-.section s6ltb
-.section s6lua
-.section s6lub
-.section s6lva
-.section s6lvb
-.section s6lwa
-.section s6lwb
-.section s6lxa
-.section s6lxb
-.section s6lya
-.section s6lyb
-.section s6lza
-.section s6lzb
-.section s6l1a
-.section s6l1b
-.section s6l2a
-.section s6l2b
-.section s6l3a
-.section s6l3b
-.section s6l4a
-.section s6l4b
-.section s6l5a
-.section s6l5b
-.section s6l6a
-.section s6l6b
-.section s6l7a
-.section s6l7b
-.section s6l8a
-.section s6l8b
-.section s6l9a
-.section s6l9b
-.section s6l0a
-.section s6l0b
-.section s6maa
-.section s6mab
-.section s6mba
-.section s6mbb
-.section s6mca
-.section s6mcb
-.section s6mda
-.section s6mdb
-.section s6mea
-.section s6meb
-.section s6mfa
-.section s6mfb
-.section s6mga
-.section s6mgb
-.section s6mha
-.section s6mhb
-.section s6mia
-.section s6mib
-.section s6mja
-.section s6mjb
-.section s6mka
-.section s6mkb
-.section s6mla
-.section s6mlb
-.section s6mma
-.section s6mmb
-.section s6mna
-.section s6mnb
-.section s6moa
-.section s6mob
-.section s6mpa
-.section s6mpb
-.section s6mqa
-.section s6mqb
-.section s6mra
-.section s6mrb
-.section s6msa
-.section s6msb
-.section s6mta
-.section s6mtb
-.section s6mua
-.section s6mub
-.section s6mva
-.section s6mvb
-.section s6mwa
-.section s6mwb
-.section s6mxa
-.section s6mxb
-.section s6mya
-.section s6myb
-.section s6mza
-.section s6mzb
-.section s6m1a
-.section s6m1b
-.section s6m2a
-.section s6m2b
-.section s6m3a
-.section s6m3b
-.section s6m4a
-.section s6m4b
-.section s6m5a
-.section s6m5b
-.section s6m6a
-.section s6m6b
-.section s6m7a
-.section s6m7b
-.section s6m8a
-.section s6m8b
-.section s6m9a
-.section s6m9b
-.section s6m0a
-.section s6m0b
-.section s6naa
-.section s6nab
-.section s6nba
-.section s6nbb
-.section s6nca
-.section s6ncb
-.section s6nda
-.section s6ndb
-.section s6nea
-.section s6neb
-.section s6nfa
-.section s6nfb
-.section s6nga
-.section s6ngb
-.section s6nha
-.section s6nhb
-.section s6nia
-.section s6nib
-.section s6nja
-.section s6njb
-.section s6nka
-.section s6nkb
-.section s6nla
-.section s6nlb
-.section s6nma
-.section s6nmb
-.section s6nna
-.section s6nnb
-.section s6noa
-.section s6nob
-.section s6npa
-.section s6npb
-.section s6nqa
-.section s6nqb
-.section s6nra
-.section s6nrb
-.section s6nsa
-.section s6nsb
-.section s6nta
-.section s6ntb
-.section s6nua
-.section s6nub
-.section s6nva
-.section s6nvb
-.section s6nwa
-.section s6nwb
-.section s6nxa
-.section s6nxb
-.section s6nya
-.section s6nyb
-.section s6nza
-.section s6nzb
-.section s6n1a
-.section s6n1b
-.section s6n2a
-.section s6n2b
-.section s6n3a
-.section s6n3b
-.section s6n4a
-.section s6n4b
-.section s6n5a
-.section s6n5b
-.section s6n6a
-.section s6n6b
-.section s6n7a
-.section s6n7b
-.section s6n8a
-.section s6n8b
-.section s6n9a
-.section s6n9b
-.section s6n0a
-.section s6n0b
-.section s6oaa
-.section s6oab
-.section s6oba
-.section s6obb
-.section s6oca
-.section s6ocb
-.section s6oda
-.section s6odb
-.section s6oea
-.section s6oeb
-.section s6ofa
-.section s6ofb
-.section s6oga
-.section s6ogb
-.section s6oha
-.section s6ohb
-.section s6oia
-.section s6oib
-.section s6oja
-.section s6ojb
-.section s6oka
-.section s6okb
-.section s6ola
-.section s6olb
-.section s6oma
-.section s6omb
-.section s6ona
-.section s6onb
-.section s6ooa
-.section s6oob
-.section s6opa
-.section s6opb
-.section s6oqa
-.section s6oqb
-.section s6ora
-.section s6orb
-.section s6osa
-.section s6osb
-.section s6ota
-.section s6otb
-.section s6oua
-.section s6oub
-.section s6ova
-.section s6ovb
-.section s6owa
-.section s6owb
-.section s6oxa
-.section s6oxb
-.section s6oya
-.section s6oyb
-.section s6oza
-.section s6ozb
-.section s6o1a
-.section s6o1b
-.section s6o2a
-.section s6o2b
-.section s6o3a
-.section s6o3b
-.section s6o4a
-.section s6o4b
-.section s6o5a
-.section s6o5b
-.section s6o6a
-.section s6o6b
-.section s6o7a
-.section s6o7b
-.section s6o8a
-.section s6o8b
-.section s6o9a
-.section s6o9b
-.section s6o0a
-.section s6o0b
-.section s6paa
-.section s6pab
-.section s6pba
-.section s6pbb
-.section s6pca
-.section s6pcb
-.section s6pda
-.section s6pdb
-.section s6pea
-.section s6peb
-.section s6pfa
-.section s6pfb
-.section s6pga
-.section s6pgb
-.section s6pha
-.section s6phb
-.section s6pia
-.section s6pib
-.section s6pja
-.section s6pjb
-.section s6pka
-.section s6pkb
-.section s6pla
-.section s6plb
-.section s6pma
-.section s6pmb
-.section s6pna
-.section s6pnb
-.section s6poa
-.section s6pob
-.section s6ppa
-.section s6ppb
-.section s6pqa
-.section s6pqb
-.section s6pra
-.section s6prb
-.section s6psa
-.section s6psb
-.section s6pta
-.section s6ptb
-.section s6pua
-.section s6pub
-.section s6pva
-.section s6pvb
-.section s6pwa
-.section s6pwb
-.section s6pxa
-.section s6pxb
-.section s6pya
-.section s6pyb
-.section s6pza
-.section s6pzb
-.section s6p1a
-.section s6p1b
-.section s6p2a
-.section s6p2b
-.section s6p3a
-.section s6p3b
-.section s6p4a
-.section s6p4b
-.section s6p5a
-.section s6p5b
-.section s6p6a
-.section s6p6b
-.section s6p7a
-.section s6p7b
-.section s6p8a
-.section s6p8b
-.section s6p9a
-.section s6p9b
-.section s6p0a
-.section s6p0b
-.section s6qaa
-.section s6qab
-.section s6qba
-.section s6qbb
-.section s6qca
-.section s6qcb
-.section s6qda
-.section s6qdb
-.section s6qea
-.section s6qeb
-.section s6qfa
-.section s6qfb
-.section s6qga
-.section s6qgb
-.section s6qha
-.section s6qhb
-.section s6qia
-.section s6qib
-.section s6qja
-.section s6qjb
-.section s6qka
-.section s6qkb
-.section s6qla
-.section s6qlb
-.section s6qma
-.section s6qmb
-.section s6qna
-.section s6qnb
-.section s6qoa
-.section s6qob
-.section s6qpa
-.section s6qpb
-.section s6qqa
-.section s6qqb
-.section s6qra
-.section s6qrb
-.section s6qsa
-.section s6qsb
-.section s6qta
-.section s6qtb
-.section s6qua
-.section s6qub
-.section s6qva
-.section s6qvb
-.section s6qwa
-.section s6qwb
-.section s6qxa
-.section s6qxb
-.section s6qya
-.section s6qyb
-.section s6qza
-.section s6qzb
-.section s6q1a
-.section s6q1b
-.section s6q2a
-.section s6q2b
-.section s6q3a
-.section s6q3b
-.section s6q4a
-.section s6q4b
-.section s6q5a
-.section s6q5b
-.section s6q6a
-.section s6q6b
-.section s6q7a
-.section s6q7b
-.section s6q8a
-.section s6q8b
-.section s6q9a
-.section s6q9b
-.section s6q0a
-.section s6q0b
-.section s6raa
-.section s6rab
-.section s6rba
-.section s6rbb
-.section s6rca
-.section s6rcb
-.section s6rda
-.section s6rdb
-.section s6rea
-.section s6reb
-.section s6rfa
-.section s6rfb
-.section s6rga
-.section s6rgb
-.section s6rha
-.section s6rhb
-.section s6ria
-.section s6rib
-.section s6rja
-.section s6rjb
-.section s6rka
-.section s6rkb
-.section s6rla
-.section s6rlb
-.section s6rma
-.section s6rmb
-.section s6rna
-.section s6rnb
-.section s6roa
-.section s6rob
-.section s6rpa
-.section s6rpb
-.section s6rqa
-.section s6rqb
-.section s6rra
-.section s6rrb
-.section s6rsa
-.section s6rsb
-.section s6rta
-.section s6rtb
-.section s6rua
-.section s6rub
-.section s6rva
-.section s6rvb
-.section s6rwa
-.section s6rwb
-.section s6rxa
-.section s6rxb
-.section s6rya
-.section s6ryb
-.section s6rza
-.section s6rzb
-.section s6r1a
-.section s6r1b
-.section s6r2a
-.section s6r2b
-.section s6r3a
-.section s6r3b
-.section s6r4a
-.section s6r4b
-.section s6r5a
-.section s6r5b
-.section s6r6a
-.section s6r6b
-.section s6r7a
-.section s6r7b
-.section s6r8a
-.section s6r8b
-.section s6r9a
-.section s6r9b
-.section s6r0a
-.section s6r0b
-.section s6saa
-.section s6sab
-.section s6sba
-.section s6sbb
-.section s6sca
-.section s6scb
-.section s6sda
-.section s6sdb
-.section s6sea
-.section s6seb
-.section s6sfa
-.section s6sfb
-.section s6sga
-.section s6sgb
-.section s6sha
-.section s6shb
-.section s6sia
-.section s6sib
-.section s6sja
-.section s6sjb
-.section s6ska
-.section s6skb
-.section s6sla
-.section s6slb
-.section s6sma
-.section s6smb
-.section s6sna
-.section s6snb
-.section s6soa
-.section s6sob
-.section s6spa
-.section s6spb
-.section s6sqa
-.section s6sqb
-.section s6sra
-.section s6srb
-.section s6ssa
-.section s6ssb
-.section s6sta
-.section s6stb
-.section s6sua
-.section s6sub
-.section s6sva
-.section s6svb
-.section s6swa
-.section s6swb
-.section s6sxa
-.section s6sxb
-.section s6sya
-.section s6syb
-.section s6sza
-.section s6szb
-.section s6s1a
-.section s6s1b
-.section s6s2a
-.section s6s2b
-.section s6s3a
-.section s6s3b
-.section s6s4a
-.section s6s4b
-.section s6s5a
-.section s6s5b
-.section s6s6a
-.section s6s6b
-.section s6s7a
-.section s6s7b
-.section s6s8a
-.section s6s8b
-.section s6s9a
-.section s6s9b
-.section s6s0a
-.section s6s0b
-.section s6taa
-.section s6tab
-.section s6tba
-.section s6tbb
-.section s6tca
-.section s6tcb
-.section s6tda
-.section s6tdb
-.section s6tea
-.section s6teb
-.section s6tfa
-.section s6tfb
-.section s6tga
-.section s6tgb
-.section s6tha
-.section s6thb
-.section s6tia
-.section s6tib
-.section s6tja
-.section s6tjb
-.section s6tka
-.section s6tkb
-.section s6tla
-.section s6tlb
-.section s6tma
-.section s6tmb
-.section s6tna
-.section s6tnb
-.section s6toa
-.section s6tob
-.section s6tpa
-.section s6tpb
-.section s6tqa
-.section s6tqb
-.section s6tra
-.section s6trb
-.section s6tsa
-.section s6tsb
-.section s6tta
-.section s6ttb
-.section s6tua
-.section s6tub
-.section s6tva
-.section s6tvb
-.section s6twa
-.section s6twb
-.section s6txa
-.section s6txb
-.section s6tya
-.section s6tyb
-.section s6tza
-.section s6tzb
-.section s6t1a
-.section s6t1b
-.section s6t2a
-.section s6t2b
-.section s6t3a
-.section s6t3b
-.section s6t4a
-.section s6t4b
-.section s6t5a
-.section s6t5b
-.section s6t6a
-.section s6t6b
-.section s6t7a
-.section s6t7b
-.section s6t8a
-.section s6t8b
-.section s6t9a
-.section s6t9b
-.section s6t0a
-.section s6t0b
-.section s6uaa
-.section s6uab
-.section s6uba
-.section s6ubb
-.section s6uca
-.section s6ucb
-.section s6uda
-.section s6udb
-.section s6uea
-.section s6ueb
-.section s6ufa
-.section s6ufb
-.section s6uga
-.section s6ugb
-.section s6uha
-.section s6uhb
-.section s6uia
-.section s6uib
-.section s6uja
-.section s6ujb
-.section s6uka
-.section s6ukb
-.section s6ula
-.section s6ulb
-.section s6uma
-.section s6umb
-.section s6una
-.section s6unb
-.section s6uoa
-.section s6uob
-.section s6upa
-.section s6upb
-.section s6uqa
-.section s6uqb
-.section s6ura
-.section s6urb
-.section s6usa
-.section s6usb
-.section s6uta
-.section s6utb
-.section s6uua
-.section s6uub
-.section s6uva
-.section s6uvb
-.section s6uwa
-.section s6uwb
-.section s6uxa
-.section s6uxb
-.section s6uya
-.section s6uyb
-.section s6uza
-.section s6uzb
-.section s6u1a
-.section s6u1b
-.section s6u2a
-.section s6u2b
-.section s6u3a
-.section s6u3b
-.section s6u4a
-.section s6u4b
-.section s6u5a
-.section s6u5b
-.section s6u6a
-.section s6u6b
-.section s6u7a
-.section s6u7b
-.section s6u8a
-.section s6u8b
-.section s6u9a
-.section s6u9b
-.section s6u0a
-.section s6u0b
-.section s6vaa
-.section s6vab
-.section s6vba
-.section s6vbb
-.section s6vca
-.section s6vcb
-.section s6vda
-.section s6vdb
-.section s6vea
-.section s6veb
-.section s6vfa
-.section s6vfb
-.section s6vga
-.section s6vgb
-.section s6vha
-.section s6vhb
-.section s6via
-.section s6vib
-.section s6vja
-.section s6vjb
-.section s6vka
-.section s6vkb
-.section s6vla
-.section s6vlb
-.section s6vma
-.section s6vmb
-.section s6vna
-.section s6vnb
-.section s6voa
-.section s6vob
-.section s6vpa
-.section s6vpb
-.section s6vqa
-.section s6vqb
-.section s6vra
-.section s6vrb
-.section s6vsa
-.section s6vsb
-.section s6vta
-.section s6vtb
-.section s6vua
-.section s6vub
-.section s6vva
-.section s6vvb
-.section s6vwa
-.section s6vwb
-.section s6vxa
-.section s6vxb
-.section s6vya
-.section s6vyb
-.section s6vza
-.section s6vzb
-.section s6v1a
-.section s6v1b
-.section s6v2a
-.section s6v2b
-.section s6v3a
-.section s6v3b
-.section s6v4a
-.section s6v4b
-.section s6v5a
-.section s6v5b
-.section s6v6a
-.section s6v6b
-.section s6v7a
-.section s6v7b
-.section s6v8a
-.section s6v8b
-.section s6v9a
-.section s6v9b
-.section s6v0a
-.section s6v0b
-.section s6waa
-.section s6wab
-.section s6wba
-.section s6wbb
-.section s6wca
-.section s6wcb
-.section s6wda
-.section s6wdb
-.section s6wea
-.section s6web
-.section s6wfa
-.section s6wfb
-.section s6wga
-.section s6wgb
-.section s6wha
-.section s6whb
-.section s6wia
-.section s6wib
-.section s6wja
-.section s6wjb
-.section s6wka
-.section s6wkb
-.section s6wla
-.section s6wlb
-.section s6wma
-.section s6wmb
-.section s6wna
-.section s6wnb
-.section s6woa
-.section s6wob
-.section s6wpa
-.section s6wpb
-.section s6wqa
-.section s6wqb
-.section s6wra
-.section s6wrb
-.section s6wsa
-.section s6wsb
-.section s6wta
-.section s6wtb
-.section s6wua
-.section s6wub
-.section s6wva
-.section s6wvb
-.section s6wwa
-.section s6wwb
-.section s6wxa
-.section s6wxb
-.section s6wya
-.section s6wyb
-.section s6wza
-.section s6wzb
-.section s6w1a
-.section s6w1b
-.section s6w2a
-.section s6w2b
-.section s6w3a
-.section s6w3b
-.section s6w4a
-.section s6w4b
-.section s6w5a
-.section s6w5b
-.section s6w6a
-.section s6w6b
-.section s6w7a
-.section s6w7b
-.section s6w8a
-.section s6w8b
-.section s6w9a
-.section s6w9b
-.section s6w0a
-.section s6w0b
-.section s6xaa
-.section s6xab
-.section s6xba
-.section s6xbb
-.section s6xca
-.section s6xcb
-.section s6xda
-.section s6xdb
-.section s6xea
-.section s6xeb
-.section s6xfa
-.section s6xfb
-.section s6xga
-.section s6xgb
-.section s6xha
-.section s6xhb
-.section s6xia
-.section s6xib
-.section s6xja
-.section s6xjb
-.section s6xka
-.section s6xkb
-.section s6xla
-.section s6xlb
-.section s6xma
-.section s6xmb
-.section s6xna
-.section s6xnb
-.section s6xoa
-.section s6xob
-.section s6xpa
-.section s6xpb
-.section s6xqa
-.section s6xqb
-.section s6xra
-.section s6xrb
-.section s6xsa
-.section s6xsb
-.section s6xta
-.section s6xtb
-.section s6xua
-.section s6xub
-.section s6xva
-.section s6xvb
-.section s6xwa
-.section s6xwb
-.section s6xxa
-.section s6xxb
-.section s6xya
-.section s6xyb
-.section s6xza
-.section s6xzb
-.section s6x1a
-.section s6x1b
-.section s6x2a
-.section s6x2b
-.section s6x3a
-.section s6x3b
-.section s6x4a
-.section s6x4b
-.section s6x5a
-.section s6x5b
-.section s6x6a
-.section s6x6b
-.section s6x7a
-.section s6x7b
-.section s6x8a
-.section s6x8b
-.section s6x9a
-.section s6x9b
-.section s6x0a
-.section s6x0b
-.section s6yaa
-.section s6yab
-.section s6yba
-.section s6ybb
-.section s6yca
-.section s6ycb
-.section s6yda
-.section s6ydb
-.section s6yea
-.section s6yeb
-.section s6yfa
-.section s6yfb
-.section s6yga
-.section s6ygb
-.section s6yha
-.section s6yhb
-.section s6yia
-.section s6yib
-.section s6yja
-.section s6yjb
-.section s6yka
-.section s6ykb
-.section s6yla
-.section s6ylb
-.section s6yma
-.section s6ymb
-.section s6yna
-.section s6ynb
-.section s6yoa
-.section s6yob
-.section s6ypa
-.section s6ypb
-.section s6yqa
-.section s6yqb
-.section s6yra
-.section s6yrb
-.section s6ysa
-.section s6ysb
-.section s6yta
-.section s6ytb
-.section s6yua
-.section s6yub
-.section s6yva
-.section s6yvb
-.section s6ywa
-.section s6ywb
-.section s6yxa
-.section s6yxb
-.section s6yya
-.section s6yyb
-.section s6yza
-.section s6yzb
-.section s6y1a
-.section s6y1b
-.section s6y2a
-.section s6y2b
-.section s6y3a
-.section s6y3b
-.section s6y4a
-.section s6y4b
-.section s6y5a
-.section s6y5b
-.section s6y6a
-.section s6y6b
-.section s6y7a
-.section s6y7b
-.section s6y8a
-.section s6y8b
-.section s6y9a
-.section s6y9b
-.section s6y0a
-.section s6y0b
-.section s6zaa
-.section s6zab
-.section s6zba
-.section s6zbb
-.section s6zca
-.section s6zcb
-.section s6zda
-.section s6zdb
-.section s6zea
-.section s6zeb
-.section s6zfa
-.section s6zfb
-.section s6zga
-.section s6zgb
-.section s6zha
-.section s6zhb
-.section s6zia
-.section s6zib
-.section s6zja
-.section s6zjb
-.section s6zka
-.section s6zkb
-.section s6zla
-.section s6zlb
-.section s6zma
-.section s6zmb
-.section s6zna
-.section s6znb
-.section s6zoa
-.section s6zob
-.section s6zpa
-.section s6zpb
-.section s6zqa
-.section s6zqb
-.section s6zra
-.section s6zrb
-.section s6zsa
-.section s6zsb
-.section s6zta
-.section s6ztb
-.section s6zua
-.section s6zub
-.section s6zva
-.section s6zvb
-.section s6zwa
-.section s6zwb
-.section s6zxa
-.section s6zxb
-.section s6zya
-.section s6zyb
-.section s6zza
-.section s6zzb
-.section s6z1a
-.section s6z1b
-.section s6z2a
-.section s6z2b
-.section s6z3a
-.section s6z3b
-.section s6z4a
-.section s6z4b
-.section s6z5a
-.section s6z5b
-.section s6z6a
-.section s6z6b
-.section s6z7a
-.section s6z7b
-.section s6z8a
-.section s6z8b
-.section s6z9a
-.section s6z9b
-.section s6z0a
-.section s6z0b
-.section s61aa
-.section s61ab
-.section s61ba
-.section s61bb
-.section s61ca
-.section s61cb
-.section s61da
-.section s61db
-.section s61ea
-.section s61eb
-.section s61fa
-.section s61fb
-.section s61ga
-.section s61gb
-.section s61ha
-.section s61hb
-.section s61ia
-.section s61ib
-.section s61ja
-.section s61jb
-.section s61ka
-.section s61kb
-.section s61la
-.section s61lb
-.section s61ma
-.section s61mb
-.section s61na
-.section s61nb
-.section s61oa
-.section s61ob
-.section s61pa
-.section s61pb
-.section s61qa
-.section s61qb
-.section s61ra
-.section s61rb
-.section s61sa
-.section s61sb
-.section s61ta
-.section s61tb
-.section s61ua
-.section s61ub
-.section s61va
-.section s61vb
-.section s61wa
-.section s61wb
-.section s61xa
-.section s61xb
-.section s61ya
-.section s61yb
-.section s61za
-.section s61zb
-.section s611a
-.section s611b
-.section s612a
-.section s612b
-.section s613a
-.section s613b
-.section s614a
-.section s614b
-.section s615a
-.section s615b
-.section s616a
-.section s616b
-.section s617a
-.section s617b
-.section s618a
-.section s618b
-.section s619a
-.section s619b
-.section s610a
-.section s610b
-.section s62aa
-.section s62ab
-.section s62ba
-.section s62bb
-.section s62ca
-.section s62cb
-.section s62da
-.section s62db
-.section s62ea
-.section s62eb
-.section s62fa
-.section s62fb
-.section s62ga
-.section s62gb
-.section s62ha
-.section s62hb
-.section s62ia
-.section s62ib
-.section s62ja
-.section s62jb
-.section s62ka
-.section s62kb
-.section s62la
-.section s62lb
-.section s62ma
-.section s62mb
-.section s62na
-.section s62nb
-.section s62oa
-.section s62ob
-.section s62pa
-.section s62pb
-.section s62qa
-.section s62qb
-.section s62ra
-.section s62rb
-.section s62sa
-.section s62sb
-.section s62ta
-.section s62tb
-.section s62ua
-.section s62ub
-.section s62va
-.section s62vb
-.section s62wa
-.section s62wb
-.section s62xa
-.section s62xb
-.section s62ya
-.section s62yb
-.section s62za
-.section s62zb
-.section s621a
-.section s621b
-.section s622a
-.section s622b
-.section s623a
-.section s623b
-.section s624a
-.section s624b
-.section s625a
-.section s625b
-.section s626a
-.section s626b
-.section s627a
-.section s627b
-.section s628a
-.section s628b
-.section s629a
-.section s629b
-.section s620a
-.section s620b
-.section s63aa
-.section s63ab
-.section s63ba
-.section s63bb
-.section s63ca
-.section s63cb
-.section s63da
-.section s63db
-.section s63ea
-.section s63eb
-.section s63fa
-.section s63fb
-.section s63ga
-.section s63gb
-.section s63ha
-.section s63hb
-.section s63ia
-.section s63ib
-.section s63ja
-.section s63jb
-.section s63ka
-.section s63kb
-.section s63la
-.section s63lb
-.section s63ma
-.section s63mb
-.section s63na
-.section s63nb
-.section s63oa
-.section s63ob
-.section s63pa
-.section s63pb
-.section s63qa
-.section s63qb
-.section s63ra
-.section s63rb
-.section s63sa
-.section s63sb
-.section s63ta
-.section s63tb
-.section s63ua
-.section s63ub
-.section s63va
-.section s63vb
-.section s63wa
-.section s63wb
-.section s63xa
-.section s63xb
-.section s63ya
-.section s63yb
-.section s63za
-.section s63zb
-.section s631a
-.section s631b
-.section s632a
-.section s632b
-.section s633a
-.section s633b
-.section s634a
-.section s634b
-.section s635a
-.section s635b
-.section s636a
-.section s636b
-.section s637a
-.section s637b
-.section s638a
-.section s638b
-.section s639a
-.section s639b
-.section s630a
-.section s630b
-.section s64aa
-.section s64ab
-.section s64ba
-.section s64bb
-.section s64ca
-.section s64cb
-.section s64da
-.section s64db
-.section s64ea
-.section s64eb
-.section s64fa
-.section s64fb
-.section s64ga
-.section s64gb
-.section s64ha
-.section s64hb
-.section s64ia
-.section s64ib
-.section s64ja
-.section s64jb
-.section s64ka
-.section s64kb
-.section s64la
-.section s64lb
-.section s64ma
-.section s64mb
-.section s64na
-.section s64nb
-.section s64oa
-.section s64ob
-.section s64pa
-.section s64pb
-.section s64qa
-.section s64qb
-.section s64ra
-.section s64rb
-.section s64sa
-.section s64sb
-.section s64ta
-.section s64tb
-.section s64ua
-.section s64ub
-.section s64va
-.section s64vb
-.section s64wa
-.section s64wb
-.section s64xa
-.section s64xb
-.section s64ya
-.section s64yb
-.section s64za
-.section s64zb
-.section s641a
-.section s641b
-.section s642a
-.section s642b
-.section s643a
-.section s643b
-.section s644a
-.section s644b
-.section s645a
-.section s645b
-.section s646a
-.section s646b
-.section s647a
-.section s647b
-.section s648a
-.section s648b
-.section s649a
-.section s649b
-.section s640a
-.section s640b
-.section s65aa
-.section s65ab
-.section s65ba
-.section s65bb
-.section s65ca
-.section s65cb
-.section s65da
-.section s65db
-.section s65ea
-.section s65eb
-.section s65fa
-.section s65fb
-.section s65ga
-.section s65gb
-.section s65ha
-.section s65hb
-.section s65ia
-.section s65ib
-.section s65ja
-.section s65jb
-.section s65ka
-.section s65kb
-.section s65la
-.section s65lb
-.section s65ma
-.section s65mb
-.section s65na
-.section s65nb
-.section s65oa
-.section s65ob
-.section s65pa
-.section s65pb
-.section s65qa
-.section s65qb
-.section s65ra
-.section s65rb
-.section s65sa
-.section s65sb
-.section s65ta
-.section s65tb
-.section s65ua
-.section s65ub
-.section s65va
-.section s65vb
-.section s65wa
-.section s65wb
-.section s65xa
-.section s65xb
-.section s65ya
-.section s65yb
-.section s65za
-.section s65zb
-.section s651a
-.section s651b
-.section s652a
-.section s652b
-.section s653a
-.section s653b
-.section s654a
-.section s654b
-.section s655a
-.section s655b
-.section s656a
-.section s656b
-.section s657a
-.section s657b
-.section s658a
-.section s658b
-.section s659a
-.section s659b
-.section s650a
-.section s650b
-.section s66aa
-.section s66ab
-.section s66ba
-.section s66bb
-.section s66ca
-.section s66cb
-.section s66da
-.section s66db
-.section s66ea
-.section s66eb
-.section s66fa
-.section s66fb
-.section s66ga
-.section s66gb
-.section s66ha
-.section s66hb
-.section s66ia
-.section s66ib
-.section s66ja
-.section s66jb
-.section s66ka
-.section s66kb
-.section s66la
-.section s66lb
-.section s66ma
-.section s66mb
-.section s66na
-.section s66nb
-.section s66oa
-.section s66ob
-.section s66pa
-.section s66pb
-.section s66qa
-.section s66qb
-.section s66ra
-.section s66rb
-.section s66sa
-.section s66sb
-.section s66ta
-.section s66tb
-.section s66ua
-.section s66ub
-.section s66va
-.section s66vb
-.section s66wa
-.section s66wb
-.section s66xa
-.section s66xb
-.section s66ya
-.section s66yb
-.section s66za
-.section s66zb
-.section s661a
-.section s661b
-.section s662a
-.section s662b
-.section s663a
-.section s663b
-.section s664a
-.section s664b
-.section s665a
-.section s665b
-.section s666a
-.section s666b
-.section s667a
-.section s667b
-.section s668a
-.section s668b
-.section s669a
-.section s669b
-.section s660a
-.section s660b
-.section s67aa
-.section s67ab
-.section s67ba
-.section s67bb
-.section s67ca
-.section s67cb
-.section s67da
-.section s67db
-.section s67ea
-.section s67eb
-.section s67fa
-.section s67fb
-.section s67ga
-.section s67gb
-.section s67ha
-.section s67hb
-.section s67ia
-.section s67ib
-.section s67ja
-.section s67jb
-.section s67ka
-.section s67kb
-.section s67la
-.section s67lb
-.section s67ma
-.section s67mb
-.section s67na
-.section s67nb
-.section s67oa
-.section s67ob
-.section s67pa
-.section s67pb
-.section s67qa
-.section s67qb
-.section s67ra
-.section s67rb
-.section s67sa
-.section s67sb
-.section s67ta
-.section s67tb
-.section s67ua
-.section s67ub
-.section s67va
-.section s67vb
-.section s67wa
-.section s67wb
-.section s67xa
-.section s67xb
-.section s67ya
-.section s67yb
-.section s67za
-.section s67zb
-.section s671a
-.section s671b
-.section s672a
-.section s672b
-.section s673a
-.section s673b
-.section s674a
-.section s674b
-.section s675a
-.section s675b
-.section s676a
-.section s676b
-.section s677a
-.section s677b
-.section s678a
-.section s678b
-.section s679a
-.section s679b
-.section s670a
-.section s670b
-.section s68aa
-.section s68ab
-.section s68ba
-.section s68bb
-.section s68ca
-.section s68cb
-.section s68da
-.section s68db
-.section s68ea
-.section s68eb
-.section s68fa
-.section s68fb
-.section s68ga
-.section s68gb
-.section s68ha
-.section s68hb
-.section s68ia
-.section s68ib
-.section s68ja
-.section s68jb
-.section s68ka
-.section s68kb
-.section s68la
-.section s68lb
-.section s68ma
-.section s68mb
-.section s68na
-.section s68nb
-.section s68oa
-.section s68ob
-.section s68pa
-.section s68pb
-.section s68qa
-.section s68qb
-.section s68ra
-.section s68rb
-.section s68sa
-.section s68sb
-.section s68ta
-.section s68tb
-.section s68ua
-.section s68ub
-.section s68va
-.section s68vb
-.section s68wa
-.section s68wb
-.section s68xa
-.section s68xb
-.section s68ya
-.section s68yb
-.section s68za
-.section s68zb
-.section s681a
-.section s681b
-.section s682a
-.section s682b
-.section s683a
-.section s683b
-.section s684a
-.section s684b
-.section s685a
-.section s685b
-.section s686a
-.section s686b
-.section s687a
-.section s687b
-.section s688a
-.section s688b
-.section s689a
-.section s689b
-.section s680a
-.section s680b
-.section s69aa
-.section s69ab
-.section s69ba
-.section s69bb
-.section s69ca
-.section s69cb
-.section s69da
-.section s69db
-.section s69ea
-.section s69eb
-.section s69fa
-.section s69fb
-.section s69ga
-.section s69gb
-.section s69ha
-.section s69hb
-.section s69ia
-.section s69ib
-.section s69ja
-.section s69jb
-.section s69ka
-.section s69kb
-.section s69la
-.section s69lb
-.section s69ma
-.section s69mb
-.section s69na
-.section s69nb
-.section s69oa
-.section s69ob
-.section s69pa
-.section s69pb
-.section s69qa
-.section s69qb
-.section s69ra
-.section s69rb
-.section s69sa
-.section s69sb
-.section s69ta
-.section s69tb
-.section s69ua
-.section s69ub
-.section s69va
-.section s69vb
-.section s69wa
-.section s69wb
-.section s69xa
-.section s69xb
-.section s69ya
-.section s69yb
-.section s69za
-.section s69zb
-.section s691a
-.section s691b
-.section s692a
-.section s692b
-.section s693a
-.section s693b
-.section s694a
-.section s694b
-.section s695a
-.section s695b
-.section s696a
-.section s696b
-.section s697a
-.section s697b
-.section s698a
-.section s698b
-.section s699a
-.section s699b
-.section s690a
-.section s690b
-.section s60aa
-.section s60ab
-.section s60ba
-.section s60bb
-.section s60ca
-.section s60cb
-.section s60da
-.section s60db
-.section s60ea
-.section s60eb
-.section s60fa
-.section s60fb
-.section s60ga
-.section s60gb
-.section s60ha
-.section s60hb
-.section s60ia
-.section s60ib
-.section s60ja
-.section s60jb
-.section s60ka
-.section s60kb
-.section s60la
-.section s60lb
-.section s60ma
-.section s60mb
-.section s60na
-.section s60nb
-.section s60oa
-.section s60ob
-.section s60pa
-.section s60pb
-.section s60qa
-.section s60qb
-.section s60ra
-.section s60rb
-.section s60sa
-.section s60sb
-.section s60ta
-.section s60tb
-.section s60ua
-.section s60ub
-.section s60va
-.section s60vb
-.section s60wa
-.section s60wb
-.section s60xa
-.section s60xb
-.section s60ya
-.section s60yb
-.section s60za
-.section s60zb
-.section s601a
-.section s601b
-.section s602a
-.section s602b
-.section s603a
-.section s603b
-.section s604a
-.section s604b
-.section s605a
-.section s605b
-.section s606a
-.section s606b
-.section s607a
-.section s607b
-.section s608a
-.section s608b
-.section s609a
-.section s609b
-.section s600a
-.section s600b
-.section s7aaa
-.section s7aab
-.section s7aba
-.section s7abb
-.section s7aca
-.section s7acb
-.section s7ada
-.section s7adb
-.section s7aea
-.section s7aeb
-.section s7afa
-.section s7afb
-.section s7aga
-.section s7agb
-.section s7aha
-.section s7ahb
-.section s7aia
-.section s7aib
-.section s7aja
-.section s7ajb
-.section s7aka
-.section s7akb
-.section s7ala
-.section s7alb
-.section s7ama
-.section s7amb
-.section s7ana
-.section s7anb
-.section s7aoa
-.section s7aob
-.section s7apa
-.section s7apb
-.section s7aqa
-.section s7aqb
-.section s7ara
-.section s7arb
-.section s7asa
-.section s7asb
-.section s7ata
-.section s7atb
-.section s7aua
-.section s7aub
-.section s7ava
-.section s7avb
-.section s7awa
-.section s7awb
-.section s7axa
-.section s7axb
-.section s7aya
-.section s7ayb
-.section s7aza
-.section s7azb
-.section s7a1a
-.section s7a1b
-.section s7a2a
-.section s7a2b
-.section s7a3a
-.section s7a3b
-.section s7a4a
-.section s7a4b
-.section s7a5a
-.section s7a5b
-.section s7a6a
-.section s7a6b
-.section s7a7a
-.section s7a7b
-.section s7a8a
-.section s7a8b
-.section s7a9a
-.section s7a9b
-.section s7a0a
-.section s7a0b
-.section s7baa
-.section s7bab
-.section s7bba
-.section s7bbb
-.section s7bca
-.section s7bcb
-.section s7bda
-.section s7bdb
-.section s7bea
-.section s7beb
-.section s7bfa
-.section s7bfb
-.section s7bga
-.section s7bgb
-.section s7bha
-.section s7bhb
-.section s7bia
-.section s7bib
-.section s7bja
-.section s7bjb
-.section s7bka
-.section s7bkb
-.section s7bla
-.section s7blb
-.section s7bma
-.section s7bmb
-.section s7bna
-.section s7bnb
-.section s7boa
-.section s7bob
-.section s7bpa
-.section s7bpb
-.section s7bqa
-.section s7bqb
-.section s7bra
-.section s7brb
-.section s7bsa
-.section s7bsb
-.section s7bta
-.section s7btb
-.section s7bua
-.section s7bub
-.section s7bva
-.section s7bvb
-.section s7bwa
-.section s7bwb
-.section s7bxa
-.section s7bxb
-.section s7bya
-.section s7byb
-.section s7bza
-.section s7bzb
-.section s7b1a
-.section s7b1b
-.section s7b2a
-.section s7b2b
-.section s7b3a
-.section s7b3b
-.section s7b4a
-.section s7b4b
-.section s7b5a
-.section s7b5b
-.section s7b6a
-.section s7b6b
-.section s7b7a
-.section s7b7b
-.section s7b8a
-.section s7b8b
-.section s7b9a
-.section s7b9b
-.section s7b0a
-.section s7b0b
-.section s7caa
-.section s7cab
-.section s7cba
-.section s7cbb
-.section s7cca
-.section s7ccb
-.section s7cda
-.section s7cdb
-.section s7cea
-.section s7ceb
-.section s7cfa
-.section s7cfb
-.section s7cga
-.section s7cgb
-.section s7cha
-.section s7chb
-.section s7cia
-.section s7cib
-.section s7cja
-.section s7cjb
-.section s7cka
-.section s7ckb
-.section s7cla
-.section s7clb
-.section s7cma
-.section s7cmb
-.section s7cna
-.section s7cnb
-.section s7coa
-.section s7cob
-.section s7cpa
-.section s7cpb
-.section s7cqa
-.section s7cqb
-.section s7cra
-.section s7crb
-.section s7csa
-.section s7csb
-.section s7cta
-.section s7ctb
-.section s7cua
-.section s7cub
-.section s7cva
-.section s7cvb
-.section s7cwa
-.section s7cwb
-.section s7cxa
-.section s7cxb
-.section s7cya
-.section s7cyb
-.section s7cza
-.section s7czb
-.section s7c1a
-.section s7c1b
-.section s7c2a
-.section s7c2b
-.section s7c3a
-.section s7c3b
-.section s7c4a
-.section s7c4b
-.section s7c5a
-.section s7c5b
-.section s7c6a
-.section s7c6b
-.section s7c7a
-.section s7c7b
-.section s7c8a
-.section s7c8b
-.section s7c9a
-.section s7c9b
-.section s7c0a
-.section s7c0b
-.section s7daa
-.section s7dab
-.section s7dba
-.section s7dbb
-.section s7dca
-.section s7dcb
-.section s7dda
-.section s7ddb
-.section s7dea
-.section s7deb
-.section s7dfa
-.section s7dfb
-.section s7dga
-.section s7dgb
-.section s7dha
-.section s7dhb
-.section s7dia
-.section s7dib
-.section s7dja
-.section s7djb
-.section s7dka
-.section s7dkb
-.section s7dla
-.section s7dlb
-.section s7dma
-.section s7dmb
-.section s7dna
-.section s7dnb
-.section s7doa
-.section s7dob
-.section s7dpa
-.section s7dpb
-.section s7dqa
-.section s7dqb
-.section s7dra
-.section s7drb
-.section s7dsa
-.section s7dsb
-.section s7dta
-.section s7dtb
-.section s7dua
-.section s7dub
-.section s7dva
-.section s7dvb
-.section s7dwa
-.section s7dwb
-.section s7dxa
-.section s7dxb
-.section s7dya
-.section s7dyb
-.section s7dza
-.section s7dzb
-.section s7d1a
-.section s7d1b
-.section s7d2a
-.section s7d2b
-.section s7d3a
-.section s7d3b
-.section s7d4a
-.section s7d4b
-.section s7d5a
-.section s7d5b
-.section s7d6a
-.section s7d6b
-.section s7d7a
-.section s7d7b
-.section s7d8a
-.section s7d8b
-.section s7d9a
-.section s7d9b
-.section s7d0a
-.section s7d0b
-.section s7eaa
-.section s7eab
-.section s7eba
-.section s7ebb
-.section s7eca
-.section s7ecb
-.section s7eda
-.section s7edb
-.section s7eea
-.section s7eeb
-.section s7efa
-.section s7efb
-.section s7ega
-.section s7egb
-.section s7eha
-.section s7ehb
-.section s7eia
-.section s7eib
-.section s7eja
-.section s7ejb
-.section s7eka
-.section s7ekb
-.section s7ela
-.section s7elb
-.section s7ema
-.section s7emb
-.section s7ena
-.section s7enb
-.section s7eoa
-.section s7eob
-.section s7epa
-.section s7epb
-.section s7eqa
-.section s7eqb
-.section s7era
-.section s7erb
-.section s7esa
-.section s7esb
-.section s7eta
-.section s7etb
-.section s7eua
-.section s7eub
-.section s7eva
-.section s7evb
-.section s7ewa
-.section s7ewb
-.section s7exa
-.section s7exb
-.section s7eya
-.section s7eyb
-.section s7eza
-.section s7ezb
-.section s7e1a
-.section s7e1b
-.section s7e2a
-.section s7e2b
-.section s7e3a
-.section s7e3b
-.section s7e4a
-.section s7e4b
-.section s7e5a
-.section s7e5b
-.section s7e6a
-.section s7e6b
-.section s7e7a
-.section s7e7b
-.section s7e8a
-.section s7e8b
-.section s7e9a
-.section s7e9b
-.section s7e0a
-.section s7e0b
-.section s7faa
-.section s7fab
-.section s7fba
-.section s7fbb
-.section s7fca
-.section s7fcb
-.section s7fda
-.section s7fdb
-.section s7fea
-.section s7feb
-.section s7ffa
-.section s7ffb
-.section s7fga
-.section s7fgb
-.section s7fha
-.section s7fhb
-.section s7fia
-.section s7fib
-.section s7fja
-.section s7fjb
-.section s7fka
-.section s7fkb
-.section s7fla
-.section s7flb
-.section s7fma
-.section s7fmb
-.section s7fna
-.section s7fnb
-.section s7foa
-.section s7fob
-.section s7fpa
-.section s7fpb
-.section s7fqa
-.section s7fqb
-.section s7fra
-.section s7frb
-.section s7fsa
-.section s7fsb
-.section s7fta
-.section s7ftb
-.section s7fua
-.section s7fub
-.section s7fva
-.section s7fvb
-.section s7fwa
-.section s7fwb
-.section s7fxa
-.section s7fxb
-.section s7fya
-.section s7fyb
-.section s7fza
-.section s7fzb
-.section s7f1a
-.section s7f1b
-.section s7f2a
-.section s7f2b
-.section s7f3a
-.section s7f3b
-.section s7f4a
-.section s7f4b
-.section s7f5a
-.section s7f5b
-.section s7f6a
-.section s7f6b
-.section s7f7a
-.section s7f7b
-.section s7f8a
-.section s7f8b
-.section s7f9a
-.section s7f9b
-.section s7f0a
-.section s7f0b
-.section s7gaa
-.section s7gab
-.section s7gba
-.section s7gbb
-.section s7gca
-.section s7gcb
-.section s7gda
-.section s7gdb
-.section s7gea
-.section s7geb
-.section s7gfa
-.section s7gfb
-.section s7gga
-.section s7ggb
-.section s7gha
-.section s7ghb
-.section s7gia
-.section s7gib
-.section s7gja
-.section s7gjb
-.section s7gka
-.section s7gkb
-.section s7gla
-.section s7glb
-.section s7gma
-.section s7gmb
-.section s7gna
-.section s7gnb
-.section s7goa
-.section s7gob
-.section s7gpa
-.section s7gpb
-.section s7gqa
-.section s7gqb
-.section s7gra
-.section s7grb
-.section s7gsa
-.section s7gsb
-.section s7gta
-.section s7gtb
-.section s7gua
-.section s7gub
-.section s7gva
-.section s7gvb
-.section s7gwa
-.section s7gwb
-.section s7gxa
-.section s7gxb
-.section s7gya
-.section s7gyb
-.section s7gza
-.section s7gzb
-.section s7g1a
-.section s7g1b
-.section s7g2a
-.section s7g2b
-.section s7g3a
-.section s7g3b
-.section s7g4a
-.section s7g4b
-.section s7g5a
-.section s7g5b
-.section s7g6a
-.section s7g6b
-.section s7g7a
-.section s7g7b
-.section s7g8a
-.section s7g8b
-.section s7g9a
-.section s7g9b
-.section s7g0a
-.section s7g0b
-.section s7haa
-.section s7hab
-.section s7hba
-.section s7hbb
-.section s7hca
-.section s7hcb
-.section s7hda
-.section s7hdb
-.section s7hea
-.section s7heb
-.section s7hfa
-.section s7hfb
-.section s7hga
-.section s7hgb
-.section s7hha
-.section s7hhb
-.section s7hia
-.section s7hib
-.section s7hja
-.section s7hjb
-.section s7hka
-.section s7hkb
-.section s7hla
-.section s7hlb
-.section s7hma
-.section s7hmb
-.section s7hna
-.section s7hnb
-.section s7hoa
-.section s7hob
-.section s7hpa
-.section s7hpb
-.section s7hqa
-.section s7hqb
-.section s7hra
-.section s7hrb
-.section s7hsa
-.section s7hsb
-.section s7hta
-.section s7htb
-.section s7hua
-.section s7hub
-.section s7hva
-.section s7hvb
-.section s7hwa
-.section s7hwb
-.section s7hxa
-.section s7hxb
-.section s7hya
-.section s7hyb
-.section s7hza
-.section s7hzb
-.section s7h1a
-.section s7h1b
-.section s7h2a
-.section s7h2b
-.section s7h3a
-.section s7h3b
-.section s7h4a
-.section s7h4b
-.section s7h5a
-.section s7h5b
-.section s7h6a
-.section s7h6b
-.section s7h7a
-.section s7h7b
-.section s7h8a
-.section s7h8b
-.section s7h9a
-.section s7h9b
-.section s7h0a
-.section s7h0b
-.section s7iaa
-.section s7iab
-.section s7iba
-.section s7ibb
-.section s7ica
-.section s7icb
-.section s7ida
-.section s7idb
-.section s7iea
-.section s7ieb
-.section s7ifa
-.section s7ifb
-.section s7iga
-.section s7igb
-.section s7iha
-.section s7ihb
-.section s7iia
-.section s7iib
-.section s7ija
-.section s7ijb
-.section s7ika
-.section s7ikb
-.section s7ila
-.section s7ilb
-.section s7ima
-.section s7imb
-.section s7ina
-.section s7inb
-.section s7ioa
-.section s7iob
-.section s7ipa
-.section s7ipb
-.section s7iqa
-.section s7iqb
-.section s7ira
-.section s7irb
-.section s7isa
-.section s7isb
-.section s7ita
-.section s7itb
-.section s7iua
-.section s7iub
-.section s7iva
-.section s7ivb
-.section s7iwa
-.section s7iwb
-.section s7ixa
-.section s7ixb
-.section s7iya
-.section s7iyb
-.section s7iza
-.section s7izb
-.section s7i1a
-.section s7i1b
-.section s7i2a
-.section s7i2b
-.section s7i3a
-.section s7i3b
-.section s7i4a
-.section s7i4b
-.section s7i5a
-.section s7i5b
-.section s7i6a
-.section s7i6b
-.section s7i7a
-.section s7i7b
-.section s7i8a
-.section s7i8b
-.section s7i9a
-.section s7i9b
-.section s7i0a
-.section s7i0b
-.section s7jaa
-.section s7jab
-.section s7jba
-.section s7jbb
-.section s7jca
-.section s7jcb
-.section s7jda
-.section s7jdb
-.section s7jea
-.section s7jeb
-.section s7jfa
-.section s7jfb
-.section s7jga
-.section s7jgb
-.section s7jha
-.section s7jhb
-.section s7jia
-.section s7jib
-.section s7jja
-.section s7jjb
-.section s7jka
-.section s7jkb
-.section s7jla
-.section s7jlb
-.section s7jma
-.section s7jmb
-.section s7jna
-.section s7jnb
-.section s7joa
-.section s7job
-.section s7jpa
-.section s7jpb
-.section s7jqa
-.section s7jqb
-.section s7jra
-.section s7jrb
-.section s7jsa
-.section s7jsb
-.section s7jta
-.section s7jtb
-.section s7jua
-.section s7jub
-.section s7jva
-.section s7jvb
-.section s7jwa
-.section s7jwb
-.section s7jxa
-.section s7jxb
-.section s7jya
-.section s7jyb
-.section s7jza
-.section s7jzb
-.section s7j1a
-.section s7j1b
-.section s7j2a
-.section s7j2b
-.section s7j3a
-.section s7j3b
-.section s7j4a
-.section s7j4b
-.section s7j5a
-.section s7j5b
-.section s7j6a
-.section s7j6b
-.section s7j7a
-.section s7j7b
-.section s7j8a
-.section s7j8b
-.section s7j9a
-.section s7j9b
-.section s7j0a
-.section s7j0b
-.section s7kaa
-.section s7kab
-.section s7kba
-.section s7kbb
-.section s7kca
-.section s7kcb
-.section s7kda
-.section s7kdb
-.section s7kea
-.section s7keb
-.section s7kfa
-.section s7kfb
-.section s7kga
-.section s7kgb
-.section s7kha
-.section s7khb
-.section s7kia
-.section s7kib
-.section s7kja
-.section s7kjb
-.section s7kka
-.section s7kkb
-.section s7kla
-.section s7klb
-.section s7kma
-.section s7kmb
-.section s7kna
-.section s7knb
-.section s7koa
-.section s7kob
-.section s7kpa
-.section s7kpb
-.section s7kqa
-.section s7kqb
-.section s7kra
-.section s7krb
-.section s7ksa
-.section s7ksb
-.section s7kta
-.section s7ktb
-.section s7kua
-.section s7kub
-.section s7kva
-.section s7kvb
-.section s7kwa
-.section s7kwb
-.section s7kxa
-.section s7kxb
-.section s7kya
-.section s7kyb
-.section s7kza
-.section s7kzb
-.section s7k1a
-.section s7k1b
-.section s7k2a
-.section s7k2b
-.section s7k3a
-.section s7k3b
-.section s7k4a
-.section s7k4b
-.section s7k5a
-.section s7k5b
-.section s7k6a
-.section s7k6b
-.section s7k7a
-.section s7k7b
-.section s7k8a
-.section s7k8b
-.section s7k9a
-.section s7k9b
-.section s7k0a
-.section s7k0b
-.section s7laa
-.section s7lab
-.section s7lba
-.section s7lbb
-.section s7lca
-.section s7lcb
-.section s7lda
-.section s7ldb
-.section s7lea
-.section s7leb
-.section s7lfa
-.section s7lfb
-.section s7lga
-.section s7lgb
-.section s7lha
-.section s7lhb
-.section s7lia
-.section s7lib
-.section s7lja
-.section s7ljb
-.section s7lka
-.section s7lkb
-.section s7lla
-.section s7llb
-.section s7lma
-.section s7lmb
-.section s7lna
-.section s7lnb
-.section s7loa
-.section s7lob
-.section s7lpa
-.section s7lpb
-.section s7lqa
-.section s7lqb
-.section s7lra
-.section s7lrb
-.section s7lsa
-.section s7lsb
-.section s7lta
-.section s7ltb
-.section s7lua
-.section s7lub
-.section s7lva
-.section s7lvb
-.section s7lwa
-.section s7lwb
-.section s7lxa
-.section s7lxb
-.section s7lya
-.section s7lyb
-.section s7lza
-.section s7lzb
-.section s7l1a
-.section s7l1b
-.section s7l2a
-.section s7l2b
-.section s7l3a
-.section s7l3b
-.section s7l4a
-.section s7l4b
-.section s7l5a
-.section s7l5b
-.section s7l6a
-.section s7l6b
-.section s7l7a
-.section s7l7b
-.section s7l8a
-.section s7l8b
-.section s7l9a
-.section s7l9b
-.section s7l0a
-.section s7l0b
-.section s7maa
-.section s7mab
-.section s7mba
-.section s7mbb
-.section s7mca
-.section s7mcb
-.section s7mda
-.section s7mdb
-.section s7mea
-.section s7meb
-.section s7mfa
-.section s7mfb
-.section s7mga
-.section s7mgb
-.section s7mha
-.section s7mhb
-.section s7mia
-.section s7mib
-.section s7mja
-.section s7mjb
-.section s7mka
-.section s7mkb
-.section s7mla
-.section s7mlb
-.section s7mma
-.section s7mmb
-.section s7mna
-.section s7mnb
-.section s7moa
-.section s7mob
-.section s7mpa
-.section s7mpb
-.section s7mqa
-.section s7mqb
-.section s7mra
-.section s7mrb
-.section s7msa
-.section s7msb
-.section s7mta
-.section s7mtb
-.section s7mua
-.section s7mub
-.section s7mva
-.section s7mvb
-.section s7mwa
-.section s7mwb
-.section s7mxa
-.section s7mxb
-.section s7mya
-.section s7myb
-.section s7mza
-.section s7mzb
-.section s7m1a
-.section s7m1b
-.section s7m2a
-.section s7m2b
-.section s7m3a
-.section s7m3b
-.section s7m4a
-.section s7m4b
-.section s7m5a
-.section s7m5b
-.section s7m6a
-.section s7m6b
-.section s7m7a
-.section s7m7b
-.section s7m8a
-.section s7m8b
-.section s7m9a
-.section s7m9b
-.section s7m0a
-.section s7m0b
-.section s7naa
-.section s7nab
-.section s7nba
-.section s7nbb
-.section s7nca
-.section s7ncb
-.section s7nda
-.section s7ndb
-.section s7nea
-.section s7neb
-.section s7nfa
-.section s7nfb
-.section s7nga
-.section s7ngb
-.section s7nha
-.section s7nhb
-.section s7nia
-.section s7nib
-.section s7nja
-.section s7njb
-.section s7nka
-.section s7nkb
-.section s7nla
-.section s7nlb
-.section s7nma
-.section s7nmb
-.section s7nna
-.section s7nnb
-.section s7noa
-.section s7nob
-.section s7npa
-.section s7npb
-.section s7nqa
-.section s7nqb
-.section s7nra
-.section s7nrb
-.section s7nsa
-.section s7nsb
-.section s7nta
-.section s7ntb
-.section s7nua
-.section s7nub
-.section s7nva
-.section s7nvb
-.section s7nwa
-.section s7nwb
-.section s7nxa
-.section s7nxb
-.section s7nya
-.section s7nyb
-.section s7nza
-.section s7nzb
-.section s7n1a
-.section s7n1b
-.section s7n2a
-.section s7n2b
-.section s7n3a
-.section s7n3b
-.section s7n4a
-.section s7n4b
-.section s7n5a
-.section s7n5b
-.section s7n6a
-.section s7n6b
-.section s7n7a
-.section s7n7b
-.section s7n8a
-.section s7n8b
-.section s7n9a
-.section s7n9b
-.section s7n0a
-.section s7n0b
-.section s7oaa
-.section s7oab
-.section s7oba
-.section s7obb
-.section s7oca
-.section s7ocb
-.section s7oda
-.section s7odb
-.section s7oea
-.section s7oeb
-.section s7ofa
-.section s7ofb
-.section s7oga
-.section s7ogb
-.section s7oha
-.section s7ohb
-.section s7oia
-.section s7oib
-.section s7oja
-.section s7ojb
-.section s7oka
-.section s7okb
-.section s7ola
-.section s7olb
-.section s7oma
-.section s7omb
-.section s7ona
-.section s7onb
-.section s7ooa
-.section s7oob
-.section s7opa
-.section s7opb
-.section s7oqa
-.section s7oqb
-.section s7ora
-.section s7orb
-.section s7osa
-.section s7osb
-.section s7ota
-.section s7otb
-.section s7oua
-.section s7oub
-.section s7ova
-.section s7ovb
-.section s7owa
-.section s7owb
-.section s7oxa
-.section s7oxb
-.section s7oya
-.section s7oyb
-.section s7oza
-.section s7ozb
-.section s7o1a
-.section s7o1b
-.section s7o2a
-.section s7o2b
-.section s7o3a
-.section s7o3b
-.section s7o4a
-.section s7o4b
-.section s7o5a
-.section s7o5b
-.section s7o6a
-.section s7o6b
-.section s7o7a
-.section s7o7b
-.section s7o8a
-.section s7o8b
-.section s7o9a
-.section s7o9b
-.section s7o0a
-.section s7o0b
-.section s7paa
-.section s7pab
-.section s7pba
-.section s7pbb
-.section s7pca
-.section s7pcb
-.section s7pda
-.section s7pdb
-.section s7pea
-.section s7peb
-.section s7pfa
-.section s7pfb
-.section s7pga
-.section s7pgb
-.section s7pha
-.section s7phb
-.section s7pia
-.section s7pib
-.section s7pja
-.section s7pjb
-.section s7pka
-.section s7pkb
-.section s7pla
-.section s7plb
-.section s7pma
-.section s7pmb
-.section s7pna
-.section s7pnb
-.section s7poa
-.section s7pob
-.section s7ppa
-.section s7ppb
-.section s7pqa
-.section s7pqb
-.section s7pra
-.section s7prb
-.section s7psa
-.section s7psb
-.section s7pta
-.section s7ptb
-.section s7pua
-.section s7pub
-.section s7pva
-.section s7pvb
-.section s7pwa
-.section s7pwb
-.section s7pxa
-.section s7pxb
-.section s7pya
-.section s7pyb
-.section s7pza
-.section s7pzb
-.section s7p1a
-.section s7p1b
-.section s7p2a
-.section s7p2b
-.section s7p3a
-.section s7p3b
-.section s7p4a
-.section s7p4b
-.section s7p5a
-.section s7p5b
-.section s7p6a
-.section s7p6b
-.section s7p7a
-.section s7p7b
-.section s7p8a
-.section s7p8b
-.section s7p9a
-.section s7p9b
-.section s7p0a
-.section s7p0b
-.section s7qaa
-.section s7qab
-.section s7qba
-.section s7qbb
-.section s7qca
-.section s7qcb
-.section s7qda
-.section s7qdb
-.section s7qea
-.section s7qeb
-.section s7qfa
-.section s7qfb
-.section s7qga
-.section s7qgb
-.section s7qha
-.section s7qhb
-.section s7qia
-.section s7qib
-.section s7qja
-.section s7qjb
-.section s7qka
-.section s7qkb
-.section s7qla
-.section s7qlb
-.section s7qma
-.section s7qmb
-.section s7qna
-.section s7qnb
-.section s7qoa
-.section s7qob
-.section s7qpa
-.section s7qpb
-.section s7qqa
-.section s7qqb
-.section s7qra
-.section s7qrb
-.section s7qsa
-.section s7qsb
-.section s7qta
-.section s7qtb
-.section s7qua
-.section s7qub
-.section s7qva
-.section s7qvb
-.section s7qwa
-.section s7qwb
-.section s7qxa
-.section s7qxb
-.section s7qya
-.section s7qyb
-.section s7qza
-.section s7qzb
-.section s7q1a
-.section s7q1b
-.section s7q2a
-.section s7q2b
-.section s7q3a
-.section s7q3b
-.section s7q4a
-.section s7q4b
-.section s7q5a
-.section s7q5b
-.section s7q6a
-.section s7q6b
-.section s7q7a
-.section s7q7b
-.section s7q8a
-.section s7q8b
-.section s7q9a
-.section s7q9b
-.section s7q0a
-.section s7q0b
-.section s7raa
-.section s7rab
-.section s7rba
-.section s7rbb
-.section s7rca
-.section s7rcb
-.section s7rda
-.section s7rdb
-.section s7rea
-.section s7reb
-.section s7rfa
-.section s7rfb
-.section s7rga
-.section s7rgb
-.section s7rha
-.section s7rhb
-.section s7ria
-.section s7rib
-.section s7rja
-.section s7rjb
-.section s7rka
-.section s7rkb
-.section s7rla
-.section s7rlb
-.section s7rma
-.section s7rmb
-.section s7rna
-.section s7rnb
-.section s7roa
-.section s7rob
-.section s7rpa
-.section s7rpb
-.section s7rqa
-.section s7rqb
-.section s7rra
-.section s7rrb
-.section s7rsa
-.section s7rsb
-.section s7rta
-.section s7rtb
-.section s7rua
-.section s7rub
-.section s7rva
-.section s7rvb
-.section s7rwa
-.section s7rwb
-.section s7rxa
-.section s7rxb
-.section s7rya
-.section s7ryb
-.section s7rza
-.section s7rzb
-.section s7r1a
-.section s7r1b
-.section s7r2a
-.section s7r2b
-.section s7r3a
-.section s7r3b
-.section s7r4a
-.section s7r4b
-.section s7r5a
-.section s7r5b
-.section s7r6a
-.section s7r6b
-.section s7r7a
-.section s7r7b
-.section s7r8a
-.section s7r8b
-.section s7r9a
-.section s7r9b
-.section s7r0a
-.section s7r0b
-.section s7saa
-.section s7sab
-.section s7sba
-.section s7sbb
-.section s7sca
-.section s7scb
-.section s7sda
-.section s7sdb
-.section s7sea
-.section s7seb
-.section s7sfa
-.section s7sfb
-.section s7sga
-.section s7sgb
-.section s7sha
-.section s7shb
-.section s7sia
-.section s7sib
-.section s7sja
-.section s7sjb
-.section s7ska
-.section s7skb
-.section s7sla
-.section s7slb
-.section s7sma
-.section s7smb
-.section s7sna
-.section s7snb
-.section s7soa
-.section s7sob
-.section s7spa
-.section s7spb
-.section s7sqa
-.section s7sqb
-.section s7sra
-.section s7srb
-.section s7ssa
-.section s7ssb
-.section s7sta
-.section s7stb
-.section s7sua
-.section s7sub
-.section s7sva
-.section s7svb
-.section s7swa
-.section s7swb
-.section s7sxa
-.section s7sxb
-.section s7sya
-.section s7syb
-.section s7sza
-.section s7szb
-.section s7s1a
-.section s7s1b
-.section s7s2a
-.section s7s2b
-.section s7s3a
-.section s7s3b
-.section s7s4a
-.section s7s4b
-.section s7s5a
-.section s7s5b
-.section s7s6a
-.section s7s6b
-.section s7s7a
-.section s7s7b
-.section s7s8a
-.section s7s8b
-.section s7s9a
-.section s7s9b
-.section s7s0a
-.section s7s0b
-.section s7taa
-.section s7tab
-.section s7tba
-.section s7tbb
-.section s7tca
-.section s7tcb
-.section s7tda
-.section s7tdb
-.section s7tea
-.section s7teb
-.section s7tfa
-.section s7tfb
-.section s7tga
-.section s7tgb
-.section s7tha
-.section s7thb
-.section s7tia
-.section s7tib
-.section s7tja
-.section s7tjb
-.section s7tka
-.section s7tkb
-.section s7tla
-.section s7tlb
-.section s7tma
-.section s7tmb
-.section s7tna
-.section s7tnb
-.section s7toa
-.section s7tob
-.section s7tpa
-.section s7tpb
-.section s7tqa
-.section s7tqb
-.section s7tra
-.section s7trb
-.section s7tsa
-.section s7tsb
-.section s7tta
-.section s7ttb
-.section s7tua
-.section s7tub
-.section s7tva
-.section s7tvb
-.section s7twa
-.section s7twb
-.section s7txa
-.section s7txb
-.section s7tya
-.section s7tyb
-.section s7tza
-.section s7tzb
-.section s7t1a
-.section s7t1b
-.section s7t2a
-.section s7t2b
-.section s7t3a
-.section s7t3b
-.section s7t4a
-.section s7t4b
-.section s7t5a
-.section s7t5b
-.section s7t6a
-.section s7t6b
-.section s7t7a
-.section s7t7b
-.section s7t8a
-.section s7t8b
-.section s7t9a
-.section s7t9b
-.section s7t0a
-.section s7t0b
-.section s7uaa
-.section s7uab
-.section s7uba
-.section s7ubb
-.section s7uca
-.section s7ucb
-.section s7uda
-.section s7udb
-.section s7uea
-.section s7ueb
-.section s7ufa
-.section s7ufb
-.section s7uga
-.section s7ugb
-.section s7uha
-.section s7uhb
-.section s7uia
-.section s7uib
-.section s7uja
-.section s7ujb
-.section s7uka
-.section s7ukb
-.section s7ula
-.section s7ulb
-.section s7uma
-.section s7umb
-.section s7una
-.section s7unb
-.section s7uoa
-.section s7uob
-.section s7upa
-.section s7upb
-.section s7uqa
-.section s7uqb
-.section s7ura
-.section s7urb
-.section s7usa
-.section s7usb
-.section s7uta
-.section s7utb
-.section s7uua
-.section s7uub
-.section s7uva
-.section s7uvb
-.section s7uwa
-.section s7uwb
-.section s7uxa
-.section s7uxb
-.section s7uya
-.section s7uyb
-.section s7uza
-.section s7uzb
-.section s7u1a
-.section s7u1b
-.section s7u2a
-.section s7u2b
-.section s7u3a
-.section s7u3b
-.section s7u4a
-.section s7u4b
-.section s7u5a
-.section s7u5b
-.section s7u6a
-.section s7u6b
-.section s7u7a
-.section s7u7b
-.section s7u8a
-.section s7u8b
-.section s7u9a
-.section s7u9b
-.section s7u0a
-.section s7u0b
-.section s7vaa
-.section s7vab
-.section s7vba
-.section s7vbb
-.section s7vca
-.section s7vcb
-.section s7vda
-.section s7vdb
-.section s7vea
-.section s7veb
-.section s7vfa
-.section s7vfb
-.section s7vga
-.section s7vgb
-.section s7vha
-.section s7vhb
-.section s7via
-.section s7vib
-.section s7vja
-.section s7vjb
-.section s7vka
-.section s7vkb
-.section s7vla
-.section s7vlb
-.section s7vma
-.section s7vmb
-.section s7vna
-.section s7vnb
-.section s7voa
-.section s7vob
-.section s7vpa
-.section s7vpb
-.section s7vqa
-.section s7vqb
-.section s7vra
-.section s7vrb
-.section s7vsa
-.section s7vsb
-.section s7vta
-.section s7vtb
-.section s7vua
-.section s7vub
-.section s7vva
-.section s7vvb
-.section s7vwa
-.section s7vwb
-.section s7vxa
-.section s7vxb
-.section s7vya
-.section s7vyb
-.section s7vza
-.section s7vzb
-.section s7v1a
-.section s7v1b
-.section s7v2a
-.section s7v2b
-.section s7v3a
-.section s7v3b
-.section s7v4a
-.section s7v4b
-.section s7v5a
-.section s7v5b
-.section s7v6a
-.section s7v6b
-.section s7v7a
-.section s7v7b
-.section s7v8a
-.section s7v8b
-.section s7v9a
-.section s7v9b
-.section s7v0a
-.section s7v0b
-.section s7waa
-.section s7wab
-.section s7wba
-.section s7wbb
-.section s7wca
-.section s7wcb
-.section s7wda
-.section s7wdb
-.section s7wea
-.section s7web
-.section s7wfa
-.section s7wfb
-.section s7wga
-.section s7wgb
-.section s7wha
-.section s7whb
-.section s7wia
-.section s7wib
-.section s7wja
-.section s7wjb
-.section s7wka
-.section s7wkb
-.section s7wla
-.section s7wlb
-.section s7wma
-.section s7wmb
-.section s7wna
-.section s7wnb
-.section s7woa
-.section s7wob
-.section s7wpa
-.section s7wpb
-.section s7wqa
-.section s7wqb
-.section s7wra
-.section s7wrb
-.section s7wsa
-.section s7wsb
-.section s7wta
-.section s7wtb
-.section s7wua
-.section s7wub
-.section s7wva
-.section s7wvb
-.section s7wwa
-.section s7wwb
-.section s7wxa
-.section s7wxb
-.section s7wya
-.section s7wyb
-.section s7wza
-.section s7wzb
-.section s7w1a
-.section s7w1b
-.section s7w2a
-.section s7w2b
-.section s7w3a
-.section s7w3b
-.section s7w4a
-.section s7w4b
-.section s7w5a
-.section s7w5b
-.section s7w6a
-.section s7w6b
-.section s7w7a
-.section s7w7b
-.section s7w8a
-.section s7w8b
-.section s7w9a
-.section s7w9b
-.section s7w0a
-.section s7w0b
-.section s7xaa
-.section s7xab
-.section s7xba
-.section s7xbb
-.section s7xca
-.section s7xcb
-.section s7xda
-.section s7xdb
-.section s7xea
-.section s7xeb
-.section s7xfa
-.section s7xfb
-.section s7xga
-.section s7xgb
-.section s7xha
-.section s7xhb
-.section s7xia
-.section s7xib
-.section s7xja
-.section s7xjb
-.section s7xka
-.section s7xkb
-.section s7xla
-.section s7xlb
-.section s7xma
-.section s7xmb
-.section s7xna
-.section s7xnb
-.section s7xoa
-.section s7xob
-.section s7xpa
-.section s7xpb
-.section s7xqa
-.section s7xqb
-.section s7xra
-.section s7xrb
-.section s7xsa
-.section s7xsb
-.section s7xta
-.section s7xtb
-.section s7xua
-.section s7xub
-.section s7xva
-.section s7xvb
-.section s7xwa
-.section s7xwb
-.section s7xxa
-.section s7xxb
-.section s7xya
-.section s7xyb
-.section s7xza
-.section s7xzb
-.section s7x1a
-.section s7x1b
-.section s7x2a
-.section s7x2b
-.section s7x3a
-.section s7x3b
-.section s7x4a
-.section s7x4b
-.section s7x5a
-.section s7x5b
-.section s7x6a
-.section s7x6b
-.section s7x7a
-.section s7x7b
-.section s7x8a
-.section s7x8b
-.section s7x9a
-.section s7x9b
-.section s7x0a
-.section s7x0b
-.section s7yaa
-.section s7yab
-.section s7yba
-.section s7ybb
-.section s7yca
-.section s7ycb
-.section s7yda
-.section s7ydb
-.section s7yea
-.section s7yeb
-.section s7yfa
-.section s7yfb
-.section s7yga
-.section s7ygb
-.section s7yha
-.section s7yhb
-.section s7yia
-.section s7yib
-.section s7yja
-.section s7yjb
-.section s7yka
-.section s7ykb
-.section s7yla
-.section s7ylb
-.section s7yma
-.section s7ymb
-.section s7yna
-.section s7ynb
-.section s7yoa
-.section s7yob
-.section s7ypa
-.section s7ypb
-.section s7yqa
-.section s7yqb
-.section s7yra
-.section s7yrb
-.section s7ysa
-.section s7ysb
-.section s7yta
-.section s7ytb
-.section s7yua
-.section s7yub
-.section s7yva
-.section s7yvb
-.section s7ywa
-.section s7ywb
-.section s7yxa
-.section s7yxb
-.section s7yya
-.section s7yyb
-.section s7yza
-.section s7yzb
-.section s7y1a
-.section s7y1b
-.section s7y2a
-.section s7y2b
-.section s7y3a
-.section s7y3b
-.section s7y4a
-.section s7y4b
-.section s7y5a
-.section s7y5b
-.section s7y6a
-.section s7y6b
-.section s7y7a
-.section s7y7b
-.section s7y8a
-.section s7y8b
-.section s7y9a
-.section s7y9b
-.section s7y0a
-.section s7y0b
-.section s7zaa
-.section s7zab
-.section s7zba
-.section s7zbb
-.section s7zca
-.section s7zcb
-.section s7zda
-.section s7zdb
-.section s7zea
-.section s7zeb
-.section s7zfa
-.section s7zfb
-.section s7zga
-.section s7zgb
-.section s7zha
-.section s7zhb
-.section s7zia
-.section s7zib
-.section s7zja
-.section s7zjb
-.section s7zka
-.section s7zkb
-.section s7zla
-.section s7zlb
-.section s7zma
-.section s7zmb
-.section s7zna
-.section s7znb
-.section s7zoa
-.section s7zob
-.section s7zpa
-.section s7zpb
-.section s7zqa
-.section s7zqb
-.section s7zra
-.section s7zrb
-.section s7zsa
-.section s7zsb
-.section s7zta
-.section s7ztb
-.section s7zua
-.section s7zub
-.section s7zva
-.section s7zvb
-.section s7zwa
-.section s7zwb
-.section s7zxa
-.section s7zxb
-.section s7zya
-.section s7zyb
-.section s7zza
-.section s7zzb
-.section s7z1a
-.section s7z1b
-.section s7z2a
-.section s7z2b
-.section s7z3a
-.section s7z3b
-.section s7z4a
-.section s7z4b
-.section s7z5a
-.section s7z5b
-.section s7z6a
-.section s7z6b
-.section s7z7a
-.section s7z7b
-.section s7z8a
-.section s7z8b
-.section s7z9a
-.section s7z9b
-.section s7z0a
-.section s7z0b
-.section s71aa
-.section s71ab
-.section s71ba
-.section s71bb
-.section s71ca
-.section s71cb
-.section s71da
-.section s71db
-.section s71ea
-.section s71eb
-.section s71fa
-.section s71fb
-.section s71ga
-.section s71gb
-.section s71ha
-.section s71hb
-.section s71ia
-.section s71ib
-.section s71ja
-.section s71jb
-.section s71ka
-.section s71kb
-.section s71la
-.section s71lb
-.section s71ma
-.section s71mb
-.section s71na
-.section s71nb
-.section s71oa
-.section s71ob
-.section s71pa
-.section s71pb
-.section s71qa
-.section s71qb
-.section s71ra
-.section s71rb
-.section s71sa
-.section s71sb
-.section s71ta
-.section s71tb
-.section s71ua
-.section s71ub
-.section s71va
-.section s71vb
-.section s71wa
-.section s71wb
-.section s71xa
-.section s71xb
-.section s71ya
-.section s71yb
-.section s71za
-.section s71zb
-.section s711a
-.section s711b
-.section s712a
-.section s712b
-.section s713a
-.section s713b
-.section s714a
-.section s714b
-.section s715a
-.section s715b
-.section s716a
-.section s716b
-.section s717a
-.section s717b
-.section s718a
-.section s718b
-.section s719a
-.section s719b
-.section s710a
-.section s710b
-.section s72aa
-.section s72ab
-.section s72ba
-.section s72bb
-.section s72ca
-.section s72cb
-.section s72da
-.section s72db
-.section s72ea
-.section s72eb
-.section s72fa
-.section s72fb
-.section s72ga
-.section s72gb
-.section s72ha
-.section s72hb
-.section s72ia
-.section s72ib
-.section s72ja
-.section s72jb
-.section s72ka
-.section s72kb
-.section s72la
-.section s72lb
-.section s72ma
-.section s72mb
-.section s72na
-.section s72nb
-.section s72oa
-.section s72ob
-.section s72pa
-.section s72pb
-.section s72qa
-.section s72qb
-.section s72ra
-.section s72rb
-.section s72sa
-.section s72sb
-.section s72ta
-.section s72tb
-.section s72ua
-.section s72ub
-.section s72va
-.section s72vb
-.section s72wa
-.section s72wb
-.section s72xa
-.section s72xb
-.section s72ya
-.section s72yb
-.section s72za
-.section s72zb
-.section s721a
-.section s721b
-.section s722a
-.section s722b
-.section s723a
-.section s723b
-.section s724a
-.section s724b
-.section s725a
-.section s725b
-.section s726a
-.section s726b
-.section s727a
-.section s727b
-.section s728a
-.section s728b
-.section s729a
-.section s729b
-.section s720a
-.section s720b
-.section s73aa
-.section s73ab
-.section s73ba
-.section s73bb
-.section s73ca
-.section s73cb
-.section s73da
-.section s73db
-.section s73ea
-.section s73eb
-.section s73fa
-.section s73fb
-.section s73ga
-.section s73gb
-.section s73ha
-.section s73hb
-.section s73ia
-.section s73ib
-.section s73ja
-.section s73jb
-.section s73ka
-.section s73kb
-.section s73la
-.section s73lb
-.section s73ma
-.section s73mb
-.section s73na
-.section s73nb
-.section s73oa
-.section s73ob
-.section s73pa
-.section s73pb
-.section s73qa
-.section s73qb
-.section s73ra
-.section s73rb
-.section s73sa
-.section s73sb
-.section s73ta
-.section s73tb
-.section s73ua
-.section s73ub
-.section s73va
-.section s73vb
-.section s73wa
-.section s73wb
-.section s73xa
-.section s73xb
-.section s73ya
-.section s73yb
-.section s73za
-.section s73zb
-.section s731a
-.section s731b
-.section s732a
-.section s732b
-.section s733a
-.section s733b
-.section s734a
-.section s734b
-.section s735a
-.section s735b
-.section s736a
-.section s736b
-.section s737a
-.section s737b
-.section s738a
-.section s738b
-.section s739a
-.section s739b
-.section s730a
-.section s730b
-.section s74aa
-.section s74ab
-.section s74ba
-.section s74bb
-.section s74ca
-.section s74cb
-.section s74da
-.section s74db
-.section s74ea
-.section s74eb
-.section s74fa
-.section s74fb
-.section s74ga
-.section s74gb
-.section s74ha
-.section s74hb
-.section s74ia
-.section s74ib
-.section s74ja
-.section s74jb
-.section s74ka
-.section s74kb
-.section s74la
-.section s74lb
-.section s74ma
-.section s74mb
-.section s74na
-.section s74nb
-.section s74oa
-.section s74ob
-.section s74pa
-.section s74pb
-.section s74qa
-.section s74qb
-.section s74ra
-.section s74rb
-.section s74sa
-.section s74sb
-.section s74ta
-.section s74tb
-.section s74ua
-.section s74ub
-.section s74va
-.section s74vb
-.section s74wa
-.section s74wb
-.section s74xa
-.section s74xb
-.section s74ya
-.section s74yb
-.section s74za
-.section s74zb
-.section s741a
-.section s741b
-.section s742a
-.section s742b
-.section s743a
-.section s743b
-.section s744a
-.section s744b
-.section s745a
-.section s745b
-.section s746a
-.section s746b
-.section s747a
-.section s747b
-.section s748a
-.section s748b
-.section s749a
-.section s749b
-.section s740a
-.section s740b
-.section s75aa
-.section s75ab
-.section s75ba
-.section s75bb
-.section s75ca
-.section s75cb
-.section s75da
-.section s75db
-.section s75ea
-.section s75eb
-.section s75fa
-.section s75fb
-.section s75ga
-.section s75gb
-.section s75ha
-.section s75hb
-.section s75ia
-.section s75ib
-.section s75ja
-.section s75jb
-.section s75ka
-.section s75kb
-.section s75la
-.section s75lb
-.section s75ma
-.section s75mb
-.section s75na
-.section s75nb
-.section s75oa
-.section s75ob
-.section s75pa
-.section s75pb
-.section s75qa
-.section s75qb
-.section s75ra
-.section s75rb
-.section s75sa
-.section s75sb
-.section s75ta
-.section s75tb
-.section s75ua
-.section s75ub
-.section s75va
-.section s75vb
-.section s75wa
-.section s75wb
-.section s75xa
-.section s75xb
-.section s75ya
-.section s75yb
-.section s75za
-.section s75zb
-.section s751a
-.section s751b
-.section s752a
-.section s752b
-.section s753a
-.section s753b
-.section s754a
-.section s754b
-.section s755a
-.section s755b
-.section s756a
-.section s756b
-.section s757a
-.section s757b
-.section s758a
-.section s758b
-.section s759a
-.section s759b
-.section s750a
-.section s750b
-.section s76aa
-.section s76ab
-.section s76ba
-.section s76bb
-.section s76ca
-.section s76cb
-.section s76da
-.section s76db
-.section s76ea
-.section s76eb
-.section s76fa
-.section s76fb
-.section s76ga
-.section s76gb
-.section s76ha
-.section s76hb
-.section s76ia
-.section s76ib
-.section s76ja
-.section s76jb
-.section s76ka
-.section s76kb
-.section s76la
-.section s76lb
-.section s76ma
-.section s76mb
-.section s76na
-.section s76nb
-.section s76oa
-.section s76ob
-.section s76pa
-.section s76pb
-.section s76qa
-.section s76qb
-.section s76ra
-.section s76rb
-.section s76sa
-.section s76sb
-.section s76ta
-.section s76tb
-.section s76ua
-.section s76ub
-.section s76va
-.section s76vb
-.section s76wa
-.section s76wb
-.section s76xa
-.section s76xb
-.section s76ya
-.section s76yb
-.section s76za
-.section s76zb
-.section s761a
-.section s761b
-.section s762a
-.section s762b
-.section s763a
-.section s763b
-.section s764a
-.section s764b
-.section s765a
-.section s765b
-.section s766a
-.section s766b
-.section s767a
-.section s767b
-.section s768a
-.section s768b
-.section s769a
-.section s769b
-.section s760a
-.section s760b
-.section s77aa
-.section s77ab
-.section s77ba
-.section s77bb
-.section s77ca
-.section s77cb
-.section s77da
-.section s77db
-.section s77ea
-.section s77eb
-.section s77fa
-.section s77fb
-.section s77ga
-.section s77gb
-.section s77ha
-.section s77hb
-.section s77ia
-.section s77ib
-.section s77ja
-.section s77jb
-.section s77ka
-.section s77kb
-.section s77la
-.section s77lb
-.section s77ma
-.section s77mb
-.section s77na
-.section s77nb
-.section s77oa
-.section s77ob
-.section s77pa
-.section s77pb
-.section s77qa
-.section s77qb
-.section s77ra
-.section s77rb
-.section s77sa
-.section s77sb
-.section s77ta
-.section s77tb
-.section s77ua
-.section s77ub
-.section s77va
-.section s77vb
-.section s77wa
-.section s77wb
-.section s77xa
-.section s77xb
-.section s77ya
-.section s77yb
-.section s77za
-.section s77zb
-.section s771a
-.section s771b
-.section s772a
-.section s772b
-.section s773a
-.section s773b
-.section s774a
-.section s774b
-.section s775a
-.section s775b
-.section s776a
-.section s776b
-.section s777a
-.section s777b
-.section s778a
-.section s778b
-.section s779a
-.section s779b
-.section s770a
-.section s770b
-.section s78aa
-.section s78ab
-.section s78ba
-.section s78bb
-.section s78ca
-.section s78cb
-.section s78da
-.section s78db
-.section s78ea
-.section s78eb
-.section s78fa
-.section s78fb
-.section s78ga
-.section s78gb
-.section s78ha
-.section s78hb
-.section s78ia
-.section s78ib
-.section s78ja
-.section s78jb
-.section s78ka
-.section s78kb
-.section s78la
-.section s78lb
-.section s78ma
-.section s78mb
-.section s78na
-.section s78nb
-.section s78oa
-.section s78ob
-.section s78pa
-.section s78pb
-.section s78qa
-.section s78qb
-.section s78ra
-.section s78rb
-.section s78sa
-.section s78sb
-.section s78ta
-.section s78tb
-.section s78ua
-.section s78ub
-.section s78va
-.section s78vb
-.section s78wa
-.section s78wb
-.section s78xa
-.section s78xb
-.section s78ya
-.section s78yb
-.section s78za
-.section s78zb
-.section s781a
-.section s781b
-.section s782a
-.section s782b
-.section s783a
-.section s783b
-.section s784a
-.section s784b
-.section s785a
-.section s785b
-.section s786a
-.section s786b
-.section s787a
-.section s787b
-.section s788a
-.section s788b
-.section s789a
-.section s789b
-.section s780a
-.section s780b
-.section s79aa
-.section s79ab
-.section s79ba
-.section s79bb
-.section s79ca
-.section s79cb
-.section s79da
-.section s79db
-.section s79ea
-.section s79eb
-.section s79fa
-.section s79fb
-.section s79ga
-.section s79gb
-.section s79ha
-.section s79hb
-.section s79ia
-.section s79ib
-.section s79ja
-.section s79jb
-.section s79ka
-.section s79kb
-.section s79la
-.section s79lb
-.section s79ma
-.section s79mb
-.section s79na
-.section s79nb
-.section s79oa
-.section s79ob
-.section s79pa
-.section s79pb
-.section s79qa
-.section s79qb
-.section s79ra
-.section s79rb
-.section s79sa
-.section s79sb
-.section s79ta
-.section s79tb
-.section s79ua
-.section s79ub
-.section s79va
-.section s79vb
-.section s79wa
-.section s79wb
-.section s79xa
-.section s79xb
-.section s79ya
-.section s79yb
-.section s79za
-.section s79zb
-.section s791a
-.section s791b
-.section s792a
-.section s792b
-.section s793a
-.section s793b
-.section s794a
-.section s794b
-.section s795a
-.section s795b
-.section s796a
-.section s796b
-.section s797a
-.section s797b
-.section s798a
-.section s798b
-.section s799a
-.section s799b
-.section s790a
-.section s790b
-.section s70aa
-.section s70ab
-.section s70ba
-.section s70bb
-.section s70ca
-.section s70cb
-.section s70da
-.section s70db
-.section s70ea
-.section s70eb
-.section s70fa
-.section s70fb
-.section s70ga
-.section s70gb
-.section s70ha
-.section s70hb
-.section s70ia
-.section s70ib
-.section s70ja
-.section s70jb
-.section s70ka
-.section s70kb
-.section s70la
-.section s70lb
-.section s70ma
-.section s70mb
-.section s70na
-.section s70nb
-.section s70oa
-.section s70ob
-.section s70pa
-.section s70pb
-.section s70qa
-.section s70qb
-.section s70ra
-.section s70rb
-.section s70sa
-.section s70sb
-.section s70ta
-.section s70tb
-.section s70ua
-.section s70ub
-.section s70va
-.section s70vb
-.section s70wa
-.section s70wb
-.section s70xa
-.section s70xb
-.section s70ya
-.section s70yb
-.section s70za
-.section s70zb
-.section s701a
-.section s701b
-.section s702a
-.section s702b
-.section s703a
-.section s703b
-.section s704a
-.section s704b
-.section s705a
-.section s705b
-.section s706a
-.section s706b
-.section s707a
-.section s707b
-.section s708a
-.section s708b
-.section s709a
-.section s709b
-.section s700a
-.section s700b
-.section s8aaa
-.section s8aab
-.section s8aba
-.section s8abb
-.section s8aca
-.section s8acb
-.section s8ada
-.section s8adb
-.section s8aea
-.section s8aeb
-.section s8afa
-.section s8afb
-.section s8aga
-.section s8agb
-.section s8aha
-.section s8ahb
-.section s8aia
-.section s8aib
-.section s8aja
-.section s8ajb
-.section s8aka
-.section s8akb
-.section s8ala
-.section s8alb
-.section s8ama
-.section s8amb
-.section s8ana
-.section s8anb
-.section s8aoa
-.section s8aob
-.section s8apa
-.section s8apb
-.section s8aqa
-.section s8aqb
-.section s8ara
-.section s8arb
-.section s8asa
-.section s8asb
-.section s8ata
-.section s8atb
-.section s8aua
-.section s8aub
-.section s8ava
-.section s8avb
-.section s8awa
-.section s8awb
-.section s8axa
-.section s8axb
-.section s8aya
-.section s8ayb
-.section s8aza
-.section s8azb
-.section s8a1a
-.section s8a1b
-.section s8a2a
-.section s8a2b
-.section s8a3a
-.section s8a3b
-.section s8a4a
-.section s8a4b
-.section s8a5a
-.section s8a5b
-.section s8a6a
-.section s8a6b
-.section s8a7a
-.section s8a7b
-.section s8a8a
-.section s8a8b
-.section s8a9a
-.section s8a9b
-.section s8a0a
-.section s8a0b
-.section s8baa
-.section s8bab
-.section s8bba
-.section s8bbb
-.section s8bca
-.section s8bcb
-.section s8bda
-.section s8bdb
-.section s8bea
-.section s8beb
-.section s8bfa
-.section s8bfb
-.section s8bga
-.section s8bgb
-.section s8bha
-.section s8bhb
-.section s8bia
-.section s8bib
-.section s8bja
-.section s8bjb
-.section s8bka
-.section s8bkb
-.section s8bla
-.section s8blb
-.section s8bma
-.section s8bmb
-.section s8bna
-.section s8bnb
-.section s8boa
-.section s8bob
-.section s8bpa
-.section s8bpb
-.section s8bqa
-.section s8bqb
-.section s8bra
-.section s8brb
-.section s8bsa
-.section s8bsb
-.section s8bta
-.section s8btb
-.section s8bua
-.section s8bub
-.section s8bva
-.section s8bvb
-.section s8bwa
-.section s8bwb
-.section s8bxa
-.section s8bxb
-.section s8bya
-.section s8byb
-.section s8bza
-.section s8bzb
-.section s8b1a
-.section s8b1b
-.section s8b2a
-.section s8b2b
-.section s8b3a
-.section s8b3b
-.section s8b4a
-.section s8b4b
-.section s8b5a
-.section s8b5b
-.section s8b6a
-.section s8b6b
-.section s8b7a
-.section s8b7b
-.section s8b8a
-.section s8b8b
-.section s8b9a
-.section s8b9b
-.section s8b0a
-.section s8b0b
-.section s8caa
-.section s8cab
-.section s8cba
-.section s8cbb
-.section s8cca
-.section s8ccb
-.section s8cda
-.section s8cdb
-.section s8cea
-.section s8ceb
-.section s8cfa
-.section s8cfb
-.section s8cga
-.section s8cgb
-.section s8cha
-.section s8chb
-.section s8cia
-.section s8cib
-.section s8cja
-.section s8cjb
-.section s8cka
-.section s8ckb
-.section s8cla
-.section s8clb
-.section s8cma
-.section s8cmb
-.section s8cna
-.section s8cnb
-.section s8coa
-.section s8cob
-.section s8cpa
-.section s8cpb
-.section s8cqa
-.section s8cqb
-.section s8cra
-.section s8crb
-.section s8csa
-.section s8csb
-.section s8cta
-.section s8ctb
-.section s8cua
-.section s8cub
-.section s8cva
-.section s8cvb
-.section s8cwa
-.section s8cwb
-.section s8cxa
-.section s8cxb
-.section s8cya
-.section s8cyb
-.section s8cza
-.section s8czb
-.section s8c1a
-.section s8c1b
-.section s8c2a
-.section s8c2b
-.section s8c3a
-.section s8c3b
-.section s8c4a
-.section s8c4b
-.section s8c5a
-.section s8c5b
-.section s8c6a
-.section s8c6b
-.section s8c7a
-.section s8c7b
-.section s8c8a
-.section s8c8b
-.section s8c9a
-.section s8c9b
-.section s8c0a
-.section s8c0b
-.section s8daa
-.section s8dab
-.section s8dba
-.section s8dbb
-.section s8dca
-.section s8dcb
-.section s8dda
-.section s8ddb
-.section s8dea
-.section s8deb
-.section s8dfa
-.section s8dfb
-.section s8dga
-.section s8dgb
-.section s8dha
-.section s8dhb
-.section s8dia
-.section s8dib
-.section s8dja
-.section s8djb
-.section s8dka
-.section s8dkb
-.section s8dla
-.section s8dlb
-.section s8dma
-.section s8dmb
-.section s8dna
-.section s8dnb
-.section s8doa
-.section s8dob
-.section s8dpa
-.section s8dpb
-.section s8dqa
-.section s8dqb
-.section s8dra
-.section s8drb
-.section s8dsa
-.section s8dsb
-.section s8dta
-.section s8dtb
-.section s8dua
-.section s8dub
-.section s8dva
-.section s8dvb
-.section s8dwa
-.section s8dwb
-.section s8dxa
-.section s8dxb
-.section s8dya
-.section s8dyb
-.section s8dza
-.section s8dzb
-.section s8d1a
-.section s8d1b
-.section s8d2a
-.section s8d2b
-.section s8d3a
-.section s8d3b
-.section s8d4a
-.section s8d4b
-.section s8d5a
-.section s8d5b
-.section s8d6a
-.section s8d6b
-.section s8d7a
-.section s8d7b
-.section s8d8a
-.section s8d8b
-.section s8d9a
-.section s8d9b
-.section s8d0a
-.section s8d0b
-.section s8eaa
-.section s8eab
-.section s8eba
-.section s8ebb
-.section s8eca
-.section s8ecb
-.section s8eda
-.section s8edb
-.section s8eea
-.section s8eeb
-.section s8efa
-.section s8efb
-.section s8ega
-.section s8egb
-.section s8eha
-.section s8ehb
-.section s8eia
-.section s8eib
-.section s8eja
-.section s8ejb
-.section s8eka
-.section s8ekb
-.section s8ela
-.section s8elb
-.section s8ema
-.section s8emb
-.section s8ena
-.section s8enb
-.section s8eoa
-.section s8eob
-.section s8epa
-.section s8epb
-.section s8eqa
-.section s8eqb
-.section s8era
-.section s8erb
-.section s8esa
-.section s8esb
-.section s8eta
-.section s8etb
-.section s8eua
-.section s8eub
-.section s8eva
-.section s8evb
-.section s8ewa
-.section s8ewb
-.section s8exa
-.section s8exb
-.section s8eya
-.section s8eyb
-.section s8eza
-.section s8ezb
-.section s8e1a
-.section s8e1b
-.section s8e2a
-.section s8e2b
-.section s8e3a
-.section s8e3b
-.section s8e4a
-.section s8e4b
-.section s8e5a
-.section s8e5b
-.section s8e6a
-.section s8e6b
-.section s8e7a
-.section s8e7b
-.section s8e8a
-.section s8e8b
-.section s8e9a
-.section s8e9b
-.section s8e0a
-.section s8e0b
-.section s8faa
-.section s8fab
-.section s8fba
-.section s8fbb
-.section s8fca
-.section s8fcb
-.section s8fda
-.section s8fdb
-.section s8fea
-.section s8feb
-.section s8ffa
-.section s8ffb
-.section s8fga
-.section s8fgb
-.section s8fha
-.section s8fhb
-.section s8fia
-.section s8fib
-.section s8fja
-.section s8fjb
-.section s8fka
-.section s8fkb
-.section s8fla
-.section s8flb
-.section s8fma
-.section s8fmb
-.section s8fna
-.section s8fnb
-.section s8foa
-.section s8fob
-.section s8fpa
-.section s8fpb
-.section s8fqa
-.section s8fqb
-.section s8fra
-.section s8frb
-.section s8fsa
-.section s8fsb
-.section s8fta
-.section s8ftb
-.section s8fua
-.section s8fub
-.section s8fva
-.section s8fvb
-.section s8fwa
-.section s8fwb
-.section s8fxa
-.section s8fxb
-.section s8fya
-.section s8fyb
-.section s8fza
-.section s8fzb
-.section s8f1a
-.section s8f1b
-.section s8f2a
-.section s8f2b
-.section s8f3a
-.section s8f3b
-.section s8f4a
-.section s8f4b
-.section s8f5a
-.section s8f5b
-.section s8f6a
-.section s8f6b
-.section s8f7a
-.section s8f7b
-.section s8f8a
-.section s8f8b
-.section s8f9a
-.section s8f9b
-.section s8f0a
-.section s8f0b
-.section s8gaa
-.section s8gab
-.section s8gba
-.section s8gbb
-.section s8gca
-.section s8gcb
-.section s8gda
-.section s8gdb
-.section s8gea
-.section s8geb
-.section s8gfa
-.section s8gfb
-.section s8gga
-.section s8ggb
-.section s8gha
-.section s8ghb
-.section s8gia
-.section s8gib
-.section s8gja
-.section s8gjb
-.section s8gka
-.section s8gkb
-.section s8gla
-.section s8glb
-.section s8gma
-.section s8gmb
-.section s8gna
-.section s8gnb
-.section s8goa
-.section s8gob
-.section s8gpa
-.section s8gpb
-.section s8gqa
-.section s8gqb
-.section s8gra
-.section s8grb
-.section s8gsa
-.section s8gsb
-.section s8gta
-.section s8gtb
-.section s8gua
-.section s8gub
-.section s8gva
-.section s8gvb
-.section s8gwa
-.section s8gwb
-.section s8gxa
-.section s8gxb
-.section s8gya
-.section s8gyb
-.section s8gza
-.section s8gzb
-.section s8g1a
-.section s8g1b
-.section s8g2a
-.section s8g2b
-.section s8g3a
-.section s8g3b
-.section s8g4a
-.section s8g4b
-.section s8g5a
-.section s8g5b
-.section s8g6a
-.section s8g6b
-.section s8g7a
-.section s8g7b
-.section s8g8a
-.section s8g8b
-.section s8g9a
-.section s8g9b
-.section s8g0a
-.section s8g0b
-.section s8haa
-.section s8hab
-.section s8hba
-.section s8hbb
-.section s8hca
-.section s8hcb
-.section s8hda
-.section s8hdb
-.section s8hea
-.section s8heb
-.section s8hfa
-.section s8hfb
-.section s8hga
-.section s8hgb
-.section s8hha
-.section s8hhb
-.section s8hia
-.section s8hib
-.section s8hja
-.section s8hjb
-.section s8hka
-.section s8hkb
-.section s8hla
-.section s8hlb
-.section s8hma
-.section s8hmb
-.section s8hna
-.section s8hnb
-.section s8hoa
-.section s8hob
-.section s8hpa
-.section s8hpb
-.section s8hqa
-.section s8hqb
-.section s8hra
-.section s8hrb
-.section s8hsa
-.section s8hsb
-.section s8hta
-.section s8htb
-.section s8hua
-.section s8hub
-.section s8hva
-.section s8hvb
-.section s8hwa
-.section s8hwb
-.section s8hxa
-.section s8hxb
-.section s8hya
-.section s8hyb
-.section s8hza
-.section s8hzb
-.section s8h1a
-.section s8h1b
-.section s8h2a
-.section s8h2b
-.section s8h3a
-.section s8h3b
-.section s8h4a
-.section s8h4b
-.section s8h5a
-.section s8h5b
-.section s8h6a
-.section s8h6b
-.section s8h7a
-.section s8h7b
-.section s8h8a
-.section s8h8b
-.section s8h9a
-.section s8h9b
-.section s8h0a
-.section s8h0b
-.section s8iaa
-.section s8iab
-.section s8iba
-.section s8ibb
-.section s8ica
-.section s8icb
-.section s8ida
-.section s8idb
-.section s8iea
-.section s8ieb
-.section s8ifa
-.section s8ifb
-.section s8iga
-.section s8igb
-.section s8iha
-.section s8ihb
-.section s8iia
-.section s8iib
-.section s8ija
-.section s8ijb
-.section s8ika
-.section s8ikb
-.section s8ila
-.section s8ilb
-.section s8ima
-.section s8imb
-.section s8ina
-.section s8inb
-.section s8ioa
-.section s8iob
-.section s8ipa
-.section s8ipb
-.section s8iqa
-.section s8iqb
-.section s8ira
-.section s8irb
-.section s8isa
-.section s8isb
-.section s8ita
-.section s8itb
-.section s8iua
-.section s8iub
-.section s8iva
-.section s8ivb
-.section s8iwa
-.section s8iwb
-.section s8ixa
-.section s8ixb
-.section s8iya
-.section s8iyb
-.section s8iza
-.section s8izb
-.section s8i1a
-.section s8i1b
-.section s8i2a
-.section s8i2b
-.section s8i3a
-.section s8i3b
-.section s8i4a
-.section s8i4b
-.section s8i5a
-.section s8i5b
-.section s8i6a
-.section s8i6b
-.section s8i7a
-.section s8i7b
-.section s8i8a
-.section s8i8b
-.section s8i9a
-.section s8i9b
-.section s8i0a
-.section s8i0b
-.section s8jaa
-.section s8jab
-.section s8jba
-.section s8jbb
-.section s8jca
-.section s8jcb
-.section s8jda
-.section s8jdb
-.section s8jea
-.section s8jeb
-.section s8jfa
-.section s8jfb
-.section s8jga
-.section s8jgb
-.section s8jha
-.section s8jhb
-.section s8jia
-.section s8jib
-.section s8jja
-.section s8jjb
-.section s8jka
-.section s8jkb
-.section s8jla
-.section s8jlb
-.section s8jma
-.section s8jmb
-.section s8jna
-.section s8jnb
-.section s8joa
-.section s8job
-.section s8jpa
-.section s8jpb
-.section s8jqa
-.section s8jqb
-.section s8jra
-.section s8jrb
-.section s8jsa
-.section s8jsb
-.section s8jta
-.section s8jtb
-.section s8jua
-.section s8jub
-.section s8jva
-.section s8jvb
-.section s8jwa
-.section s8jwb
-.section s8jxa
-.section s8jxb
-.section s8jya
-.section s8jyb
-.section s8jza
-.section s8jzb
-.section s8j1a
-.section s8j1b
-.section s8j2a
-.section s8j2b
-.section s8j3a
-.section s8j3b
-.section s8j4a
-.section s8j4b
-.section s8j5a
-.section s8j5b
-.section s8j6a
-.section s8j6b
-.section s8j7a
-.section s8j7b
-.section s8j8a
-.section s8j8b
-.section s8j9a
-.section s8j9b
-.section s8j0a
-.section s8j0b
-.section s8kaa
-.section s8kab
-.section s8kba
-.section s8kbb
-.section s8kca
-.section s8kcb
-.section s8kda
-.section s8kdb
-.section s8kea
-.section s8keb
-.section s8kfa
-.section s8kfb
-.section s8kga
-.section s8kgb
-.section s8kha
-.section s8khb
-.section s8kia
-.section s8kib
-.section s8kja
-.section s8kjb
-.section s8kka
-.section s8kkb
-.section s8kla
-.section s8klb
-.section s8kma
-.section s8kmb
-.section s8kna
-.section s8knb
-.section s8koa
-.section s8kob
-.section s8kpa
-.section s8kpb
-.section s8kqa
-.section s8kqb
-.section s8kra
-.section s8krb
-.section s8ksa
-.section s8ksb
-.section s8kta
-.section s8ktb
-.section s8kua
-.section s8kub
-.section s8kva
-.section s8kvb
-.section s8kwa
-.section s8kwb
-.section s8kxa
-.section s8kxb
-.section s8kya
-.section s8kyb
-.section s8kza
-.section s8kzb
-.section s8k1a
-.section s8k1b
-.section s8k2a
-.section s8k2b
-.section s8k3a
-.section s8k3b
-.section s8k4a
-.section s8k4b
-.section s8k5a
-.section s8k5b
-.section s8k6a
-.section s8k6b
-.section s8k7a
-.section s8k7b
-.section s8k8a
-.section s8k8b
-.section s8k9a
-.section s8k9b
-.section s8k0a
-.section s8k0b
-.section s8laa
-.section s8lab
-.section s8lba
-.section s8lbb
-.section s8lca
-.section s8lcb
-.section s8lda
-.section s8ldb
-.section s8lea
-.section s8leb
-.section s8lfa
-.section s8lfb
-.section s8lga
-.section s8lgb
-.section s8lha
-.section s8lhb
-.section s8lia
-.section s8lib
-.section s8lja
-.section s8ljb
-.section s8lka
-.section s8lkb
-.section s8lla
-.section s8llb
-.section s8lma
-.section s8lmb
-.section s8lna
-.section s8lnb
-.section s8loa
-.section s8lob
-.section s8lpa
-.section s8lpb
-.section s8lqa
-.section s8lqb
-.section s8lra
-.section s8lrb
-.section s8lsa
-.section s8lsb
-.section s8lta
-.section s8ltb
-.section s8lua
-.section s8lub
-.section s8lva
-.section s8lvb
-.section s8lwa
-.section s8lwb
-.section s8lxa
-.section s8lxb
-.section s8lya
-.section s8lyb
-.section s8lza
-.section s8lzb
-.section s8l1a
-.section s8l1b
-.section s8l2a
-.section s8l2b
-.section s8l3a
-.section s8l3b
-.section s8l4a
-.section s8l4b
-.section s8l5a
-.section s8l5b
-.section s8l6a
-.section s8l6b
-.section s8l7a
-.section s8l7b
-.section s8l8a
-.section s8l8b
-.section s8l9a
-.section s8l9b
-.section s8l0a
-.section s8l0b
-.section s8maa
-.section s8mab
-.section s8mba
-.section s8mbb
-.section s8mca
-.section s8mcb
-.section s8mda
-.section s8mdb
-.section s8mea
-.section s8meb
-.section s8mfa
-.section s8mfb
-.section s8mga
-.section s8mgb
-.section s8mha
-.section s8mhb
-.section s8mia
-.section s8mib
-.section s8mja
-.section s8mjb
-.section s8mka
-.section s8mkb
-.section s8mla
-.section s8mlb
-.section s8mma
-.section s8mmb
-.section s8mna
-.section s8mnb
-.section s8moa
-.section s8mob
-.section s8mpa
-.section s8mpb
-.section s8mqa
-.section s8mqb
-.section s8mra
-.section s8mrb
-.section s8msa
-.section s8msb
-.section s8mta
-.section s8mtb
-.section s8mua
-.section s8mub
-.section s8mva
-.section s8mvb
-.section s8mwa
-.section s8mwb
-.section s8mxa
-.section s8mxb
-.section s8mya
-.section s8myb
-.section s8mza
-.section s8mzb
-.section s8m1a
-.section s8m1b
-.section s8m2a
-.section s8m2b
-.section s8m3a
-.section s8m3b
-.section s8m4a
-.section s8m4b
-.section s8m5a
-.section s8m5b
-.section s8m6a
-.section s8m6b
-.section s8m7a
-.section s8m7b
-.section s8m8a
-.section s8m8b
-.section s8m9a
-.section s8m9b
-.section s8m0a
-.section s8m0b
-.section s8naa
-.section s8nab
-.section s8nba
-.section s8nbb
-.section s8nca
-.section s8ncb
-.section s8nda
-.section s8ndb
-.section s8nea
-.section s8neb
-.section s8nfa
-.section s8nfb
-.section s8nga
-.section s8ngb
-.section s8nha
-.section s8nhb
-.section s8nia
-.section s8nib
-.section s8nja
-.section s8njb
-.section s8nka
-.section s8nkb
-.section s8nla
-.section s8nlb
-.section s8nma
-.section s8nmb
-.section s8nna
-.section s8nnb
-.section s8noa
-.section s8nob
-.section s8npa
-.section s8npb
-.section s8nqa
-.section s8nqb
-.section s8nra
-.section s8nrb
-.section s8nsa
-.section s8nsb
-.section s8nta
-.section s8ntb
-.section s8nua
-.section s8nub
-.section s8nva
-.section s8nvb
-.section s8nwa
-.section s8nwb
-.section s8nxa
-.section s8nxb
-.section s8nya
-.section s8nyb
-.section s8nza
-.section s8nzb
-.section s8n1a
-.section s8n1b
-.section s8n2a
-.section s8n2b
-.section s8n3a
-.section s8n3b
-.section s8n4a
-.section s8n4b
-.section s8n5a
-.section s8n5b
-.section s8n6a
-.section s8n6b
-.section s8n7a
-.section s8n7b
-.section s8n8a
-.section s8n8b
-.section s8n9a
-.section s8n9b
-.section s8n0a
-.section s8n0b
-.section s8oaa
-.section s8oab
-.section s8oba
-.section s8obb
-.section s8oca
-.section s8ocb
-.section s8oda
-.section s8odb
-.section s8oea
-.section s8oeb
-.section s8ofa
-.section s8ofb
-.section s8oga
-.section s8ogb
-.section s8oha
-.section s8ohb
-.section s8oia
-.section s8oib
-.section s8oja
-.section s8ojb
-.section s8oka
-.section s8okb
-.section s8ola
-.section s8olb
-.section s8oma
-.section s8omb
-.section s8ona
-.section s8onb
-.section s8ooa
-.section s8oob
-.section s8opa
-.section s8opb
-.section s8oqa
-.section s8oqb
-.section s8ora
-.section s8orb
-.section s8osa
-.section s8osb
-.section s8ota
-.section s8otb
-.section s8oua
-.section s8oub
-.section s8ova
-.section s8ovb
-.section s8owa
-.section s8owb
-.section s8oxa
-.section s8oxb
-.section s8oya
-.section s8oyb
-.section s8oza
-.section s8ozb
-.section s8o1a
-.section s8o1b
-.section s8o2a
-.section s8o2b
-.section s8o3a
-.section s8o3b
-.section s8o4a
-.section s8o4b
-.section s8o5a
-.section s8o5b
-.section s8o6a
-.section s8o6b
-.section s8o7a
-.section s8o7b
-.section s8o8a
-.section s8o8b
-.section s8o9a
-.section s8o9b
-.section s8o0a
-.section s8o0b
-.section s8paa
-.section s8pab
-.section s8pba
-.section s8pbb
-.section s8pca
-.section s8pcb
-.section s8pda
-.section s8pdb
-.section s8pea
-.section s8peb
-.section s8pfa
-.section s8pfb
-.section s8pga
-.section s8pgb
-.section s8pha
-.section s8phb
-.section s8pia
-.section s8pib
-.section s8pja
-.section s8pjb
-.section s8pka
-.section s8pkb
-.section s8pla
-.section s8plb
-.section s8pma
-.section s8pmb
-.section s8pna
-.section s8pnb
-.section s8poa
-.section s8pob
-.section s8ppa
-.section s8ppb
-.section s8pqa
-.section s8pqb
-.section s8pra
-.section s8prb
-.section s8psa
-.section s8psb
-.section s8pta
-.section s8ptb
-.section s8pua
-.section s8pub
-.section s8pva
-.section s8pvb
-.section s8pwa
-.section s8pwb
-.section s8pxa
-.section s8pxb
-.section s8pya
-.section s8pyb
-.section s8pza
-.section s8pzb
-.section s8p1a
-.section s8p1b
-.section s8p2a
-.section s8p2b
-.section s8p3a
-.section s8p3b
-.section s8p4a
-.section s8p4b
-.section s8p5a
-.section s8p5b
-.section s8p6a
-.section s8p6b
-.section s8p7a
-.section s8p7b
-.section s8p8a
-.section s8p8b
-.section s8p9a
-.section s8p9b
-.section s8p0a
-.section s8p0b
-.section s8qaa
-.section s8qab
-.section s8qba
-.section s8qbb
-.section s8qca
-.section s8qcb
-.section s8qda
-.section s8qdb
-.section s8qea
-.section s8qeb
-.section s8qfa
-.section s8qfb
-.section s8qga
-.section s8qgb
-.section s8qha
-.section s8qhb
-.section s8qia
-.section s8qib
-.section s8qja
-.section s8qjb
-.section s8qka
-.section s8qkb
-.section s8qla
-.section s8qlb
-.section s8qma
-.section s8qmb
-.section s8qna
-.section s8qnb
-.section s8qoa
-.section s8qob
-.section s8qpa
-.section s8qpb
-.section s8qqa
-.section s8qqb
-.section s8qra
-.section s8qrb
-.section s8qsa
-.section s8qsb
-.section s8qta
-.section s8qtb
-.section s8qua
-.section s8qub
-.section s8qva
-.section s8qvb
-.section s8qwa
-.section s8qwb
-.section s8qxa
-.section s8qxb
-.section s8qya
-.section s8qyb
-.section s8qza
-.section s8qzb
-.section s8q1a
-.section s8q1b
-.section s8q2a
-.section s8q2b
-.section s8q3a
-.section s8q3b
-.section s8q4a
-.section s8q4b
-.section s8q5a
-.section s8q5b
-.section s8q6a
-.section s8q6b
-.section s8q7a
-.section s8q7b
-.section s8q8a
-.section s8q8b
-.section s8q9a
-.section s8q9b
-.section s8q0a
-.section s8q0b
-.section s8raa
-.section s8rab
-.section s8rba
-.section s8rbb
-.section s8rca
-.section s8rcb
-.section s8rda
-.section s8rdb
-.section s8rea
-.section s8reb
-.section s8rfa
-.section s8rfb
-.section s8rga
-.section s8rgb
-.section s8rha
-.section s8rhb
-.section s8ria
-.section s8rib
-.section s8rja
-.section s8rjb
-.section s8rka
-.section s8rkb
-.section s8rla
-.section s8rlb
-.section s8rma
-.section s8rmb
-.section s8rna
-.section s8rnb
-.section s8roa
-.section s8rob
-.section s8rpa
-.section s8rpb
-.section s8rqa
-.section s8rqb
-.section s8rra
-.section s8rrb
-.section s8rsa
-.section s8rsb
-.section s8rta
-.section s8rtb
-.section s8rua
-.section s8rub
-.section s8rva
-.section s8rvb
-.section s8rwa
-.section s8rwb
-.section s8rxa
-.section s8rxb
-.section s8rya
-.section s8ryb
-.section s8rza
-.section s8rzb
-.section s8r1a
-.section s8r1b
-.section s8r2a
-.section s8r2b
-.section s8r3a
-.section s8r3b
-.section s8r4a
-.section s8r4b
-.section s8r5a
-.section s8r5b
-.section s8r6a
-.section s8r6b
-.section s8r7a
-.section s8r7b
-.section s8r8a
-.section s8r8b
-.section s8r9a
-.section s8r9b
-.section s8r0a
-.section s8r0b
-.section s8saa
-.section s8sab
-.section s8sba
-.section s8sbb
-.section s8sca
-.section s8scb
-.section s8sda
-.section s8sdb
-.section s8sea
-.section s8seb
-.section s8sfa
-.section s8sfb
-.section s8sga
-.section s8sgb
-.section s8sha
-.section s8shb
-.section s8sia
-.section s8sib
-.section s8sja
-.section s8sjb
-.section s8ska
-.section s8skb
-.section s8sla
-.section s8slb
-.section s8sma
-.section s8smb
-.section s8sna
-.section s8snb
-.section s8soa
-.section s8sob
-.section s8spa
-.section s8spb
-.section s8sqa
-.section s8sqb
-.section s8sra
-.section s8srb
-.section s8ssa
-.section s8ssb
-.section s8sta
-.section s8stb
-.section s8sua
-.section s8sub
-.section s8sva
-.section s8svb
-.section s8swa
-.section s8swb
-.section s8sxa
-.section s8sxb
-.section s8sya
-.section s8syb
-.section s8sza
-.section s8szb
-.section s8s1a
-.section s8s1b
-.section s8s2a
-.section s8s2b
-.section s8s3a
-.section s8s3b
-.section s8s4a
-.section s8s4b
-.section s8s5a
-.section s8s5b
-.section s8s6a
-.section s8s6b
-.section s8s7a
-.section s8s7b
-.section s8s8a
-.section s8s8b
-.section s8s9a
-.section s8s9b
-.section s8s0a
-.section s8s0b
-.section s8taa
-.section s8tab
-.section s8tba
-.section s8tbb
-.section s8tca
-.section s8tcb
-.section s8tda
-.section s8tdb
-.section s8tea
-.section s8teb
-.section s8tfa
-.section s8tfb
-.section s8tga
-.section s8tgb
-.section s8tha
-.section s8thb
-.section s8tia
-.section s8tib
-.section s8tja
-.section s8tjb
-.section s8tka
-.section s8tkb
-.section s8tla
-.section s8tlb
-.section s8tma
-.section s8tmb
-.section s8tna
-.section s8tnb
-.section s8toa
-.section s8tob
-.section s8tpa
-.section s8tpb
-.section s8tqa
-.section s8tqb
-.section s8tra
-.section s8trb
-.section s8tsa
-.section s8tsb
-.section s8tta
-.section s8ttb
-.section s8tua
-.section s8tub
-.section s8tva
-.section s8tvb
-.section s8twa
-.section s8twb
-.section s8txa
-.section s8txb
-.section s8tya
-.section s8tyb
-.section s8tza
-.section s8tzb
-.section s8t1a
-.section s8t1b
-.section s8t2a
-.section s8t2b
-.section s8t3a
-.section s8t3b
-.section s8t4a
-.section s8t4b
-.section s8t5a
-.section s8t5b
-.section s8t6a
-.section s8t6b
-.section s8t7a
-.section s8t7b
-.section s8t8a
-.section s8t8b
-.section s8t9a
-.section s8t9b
-.section s8t0a
-.section s8t0b
-.section s8uaa
-.section s8uab
-.section s8uba
-.section s8ubb
-.section s8uca
-.section s8ucb
-.section s8uda
-.section s8udb
-.section s8uea
-.section s8ueb
-.section s8ufa
-.section s8ufb
-.section s8uga
-.section s8ugb
-.section s8uha
-.section s8uhb
-.section s8uia
-.section s8uib
-.section s8uja
-.section s8ujb
-.section s8uka
-.section s8ukb
-.section s8ula
-.section s8ulb
-.section s8uma
-.section s8umb
-.section s8una
-.section s8unb
-.section s8uoa
-.section s8uob
-.section s8upa
-.section s8upb
-.section s8uqa
-.section s8uqb
-.section s8ura
-.section s8urb
-.section s8usa
-.section s8usb
-.section s8uta
-.section s8utb
-.section s8uua
-.section s8uub
-.section s8uva
-.section s8uvb
-.section s8uwa
-.section s8uwb
-.section s8uxa
-.section s8uxb
-.section s8uya
-.section s8uyb
-.section s8uza
-.section s8uzb
-.section s8u1a
-.section s8u1b
-.section s8u2a
-.section s8u2b
-.section s8u3a
-.section s8u3b
-.section s8u4a
-.section s8u4b
-.section s8u5a
-.section s8u5b
-.section s8u6a
-.section s8u6b
-.section s8u7a
-.section s8u7b
-.section s8u8a
-.section s8u8b
-.section s8u9a
-.section s8u9b
-.section s8u0a
-.section s8u0b
-.section s8vaa
-.section s8vab
-.section s8vba
-.section s8vbb
-.section s8vca
-.section s8vcb
-.section s8vda
-.section s8vdb
-.section s8vea
-.section s8veb
-.section s8vfa
-.section s8vfb
-.section s8vga
-.section s8vgb
-.section s8vha
-.section s8vhb
-.section s8via
-.section s8vib
-.section s8vja
-.section s8vjb
-.section s8vka
-.section s8vkb
-.section s8vla
-.section s8vlb
-.section s8vma
-.section s8vmb
-.section s8vna
-.section s8vnb
-.section s8voa
-.section s8vob
-.section s8vpa
-.section s8vpb
-.section s8vqa
-.section s8vqb
-.section s8vra
-.section s8vrb
-.section s8vsa
-.section s8vsb
-.section s8vta
-.section s8vtb
-.section s8vua
-.section s8vub
-.section s8vva
-.section s8vvb
-.section s8vwa
-.section s8vwb
-.section s8vxa
-.section s8vxb
-.section s8vya
-.section s8vyb
-.section s8vza
-.section s8vzb
-.section s8v1a
-.section s8v1b
-.section s8v2a
-.section s8v2b
-.section s8v3a
-.section s8v3b
-.section s8v4a
-.section s8v4b
-.section s8v5a
-.section s8v5b
-.section s8v6a
-.section s8v6b
-.section s8v7a
-.section s8v7b
-.section s8v8a
-.section s8v8b
-.section s8v9a
-.section s8v9b
-.section s8v0a
-.section s8v0b
-.section s8waa
-.section s8wab
-.section s8wba
-.section s8wbb
-.section s8wca
-.section s8wcb
-.section s8wda
-.section s8wdb
-.section s8wea
-.section s8web
-.section s8wfa
-.section s8wfb
-.section s8wga
-.section s8wgb
-.section s8wha
-.section s8whb
-.section s8wia
-.section s8wib
-.section s8wja
-.section s8wjb
-.section s8wka
-.section s8wkb
-.section s8wla
-.section s8wlb
-.section s8wma
-.section s8wmb
-.section s8wna
-.section s8wnb
-.section s8woa
-.section s8wob
-.section s8wpa
-.section s8wpb
-.section s8wqa
-.section s8wqb
-.section s8wra
-.section s8wrb
-.section s8wsa
-.section s8wsb
-.section s8wta
-.section s8wtb
-.section s8wua
-.section s8wub
-.section s8wva
-.section s8wvb
-.section s8wwa
-.section s8wwb
-.section s8wxa
-.section s8wxb
-.section s8wya
-.section s8wyb
-.section s8wza
-.section s8wzb
-.section s8w1a
-.section s8w1b
-.section s8w2a
-.section s8w2b
-.section s8w3a
-.section s8w3b
-.section s8w4a
-.section s8w4b
-.section s8w5a
-.section s8w5b
-.section s8w6a
-.section s8w6b
-.section s8w7a
-.section s8w7b
-.section s8w8a
-.section s8w8b
-.section s8w9a
-.section s8w9b
-.section s8w0a
-.section s8w0b
-.section s8xaa
-.section s8xab
-.section s8xba
-.section s8xbb
-.section s8xca
-.section s8xcb
-.section s8xda
-.section s8xdb
-.section s8xea
-.section s8xeb
-.section s8xfa
-.section s8xfb
-.section s8xga
-.section s8xgb
-.section s8xha
-.section s8xhb
-.section s8xia
-.section s8xib
-.section s8xja
-.section s8xjb
-.section s8xka
-.section s8xkb
-.section s8xla
-.section s8xlb
-.section s8xma
-.section s8xmb
-.section s8xna
-.section s8xnb
-.section s8xoa
-.section s8xob
-.section s8xpa
-.section s8xpb
-.section s8xqa
-.section s8xqb
-.section s8xra
-.section s8xrb
-.section s8xsa
-.section s8xsb
-.section s8xta
-.section s8xtb
-.section s8xua
-.section s8xub
-.section s8xva
-.section s8xvb
-.section s8xwa
-.section s8xwb
-.section s8xxa
-.section s8xxb
-.section s8xya
-.section s8xyb
-.section s8xza
-.section s8xzb
-.section s8x1a
-.section s8x1b
-.section s8x2a
-.section s8x2b
-.section s8x3a
-.section s8x3b
-.section s8x4a
-.section s8x4b
-.section s8x5a
-.section s8x5b
-.section s8x6a
-.section s8x6b
-.section s8x7a
-.section s8x7b
-.section s8x8a
-.section s8x8b
-.section s8x9a
-.section s8x9b
-.section s8x0a
-.section s8x0b
-.section s8yaa
-.section s8yab
-.section s8yba
-.section s8ybb
-.section s8yca
-.section s8ycb
-.section s8yda
-.section s8ydb
-.section s8yea
-.section s8yeb
-.section s8yfa
-.section s8yfb
-.section s8yga
-.section s8ygb
-.section s8yha
-.section s8yhb
-.section s8yia
-.section s8yib
-.section s8yja
-.section s8yjb
-.section s8yka
-.section s8ykb
-.section s8yla
-.section s8ylb
-.section s8yma
-.section s8ymb
-.section s8yna
-.section s8ynb
-.section s8yoa
-.section s8yob
-.section s8ypa
-.section s8ypb
-.section s8yqa
-.section s8yqb
-.section s8yra
-.section s8yrb
-.section s8ysa
-.section s8ysb
-.section s8yta
-.section s8ytb
-.section s8yua
-.section s8yub
-.section s8yva
-.section s8yvb
-.section s8ywa
-.section s8ywb
-.section s8yxa
-.section s8yxb
-.section s8yya
-.section s8yyb
-.section s8yza
-.section s8yzb
-.section s8y1a
-.section s8y1b
-.section s8y2a
-.section s8y2b
-.section s8y3a
-.section s8y3b
-.section s8y4a
-.section s8y4b
-.section s8y5a
-.section s8y5b
-.section s8y6a
-.section s8y6b
-.section s8y7a
-.section s8y7b
-.section s8y8a
-.section s8y8b
-.section s8y9a
-.section s8y9b
-.section s8y0a
-.section s8y0b
-.section s8zaa
-.section s8zab
-.section s8zba
-.section s8zbb
-.section s8zca
-.section s8zcb
-.section s8zda
-.section s8zdb
-.section s8zea
-.section s8zeb
-.section s8zfa
-.section s8zfb
-.section s8zga
-.section s8zgb
-.section s8zha
-.section s8zhb
-.section s8zia
-.section s8zib
-.section s8zja
-.section s8zjb
-.section s8zka
-.section s8zkb
-.section s8zla
-.section s8zlb
-.section s8zma
-.section s8zmb
-.section s8zna
-.section s8znb
-.section s8zoa
-.section s8zob
-.section s8zpa
-.section s8zpb
-.section s8zqa
-.section s8zqb
-.section s8zra
-.section s8zrb
-.section s8zsa
-.section s8zsb
-.section s8zta
-.section s8ztb
-.section s8zua
-.section s8zub
-.section s8zva
-.section s8zvb
-.section s8zwa
-.section s8zwb
-.section s8zxa
-.section s8zxb
-.section s8zya
-.section s8zyb
-.section s8zza
-.section s8zzb
-.section s8z1a
-.section s8z1b
-.section s8z2a
-.section s8z2b
-.section s8z3a
-.section s8z3b
-.section s8z4a
-.section s8z4b
-.section s8z5a
-.section s8z5b
-.section s8z6a
-.section s8z6b
-.section s8z7a
-.section s8z7b
-.section s8z8a
-.section s8z8b
-.section s8z9a
-.section s8z9b
-.section s8z0a
-.section s8z0b
-.section s81aa
-.section s81ab
-.section s81ba
-.section s81bb
-.section s81ca
-.section s81cb
-.section s81da
-.section s81db
-.section s81ea
-.section s81eb
-.section s81fa
-.section s81fb
-.section s81ga
-.section s81gb
-.section s81ha
-.section s81hb
-.section s81ia
-.section s81ib
-.section s81ja
-.section s81jb
-.section s81ka
-.section s81kb
-.section s81la
-.section s81lb
-.section s81ma
-.section s81mb
-.section s81na
-.section s81nb
-.section s81oa
-.section s81ob
-.section s81pa
-.section s81pb
-.section s81qa
-.section s81qb
-.section s81ra
-.section s81rb
-.section s81sa
-.section s81sb
-.section s81ta
-.section s81tb
-.section s81ua
-.section s81ub
-.section s81va
-.section s81vb
-.section s81wa
-.section s81wb
-.section s81xa
-.section s81xb
-.section s81ya
-.section s81yb
-.section s81za
-.section s81zb
-.section s811a
-.section s811b
-.section s812a
-.section s812b
-.section s813a
-.section s813b
-.section s814a
-.section s814b
-.section s815a
-.section s815b
-.section s816a
-.section s816b
-.section s817a
-.section s817b
-.section s818a
-.section s818b
-.section s819a
-.section s819b
-.section s810a
-.section s810b
-.section s82aa
-.section s82ab
-.section s82ba
-.section s82bb
-.section s82ca
-.section s82cb
-.section s82da
-.section s82db
-.section s82ea
-.section s82eb
-.section s82fa
-.section s82fb
-.section s82ga
-.section s82gb
-.section s82ha
-.section s82hb
-.section s82ia
-.section s82ib
-.section s82ja
-.section s82jb
-.section s82ka
-.section s82kb
-.section s82la
-.section s82lb
-.section s82ma
-.section s82mb
-.section s82na
-.section s82nb
-.section s82oa
-.section s82ob
-.section s82pa
-.section s82pb
-.section s82qa
-.section s82qb
-.section s82ra
-.section s82rb
-.section s82sa
-.section s82sb
-.section s82ta
-.section s82tb
-.section s82ua
-.section s82ub
-.section s82va
-.section s82vb
-.section s82wa
-.section s82wb
-.section s82xa
-.section s82xb
-.section s82ya
-.section s82yb
-.section s82za
-.section s82zb
-.section s821a
-.section s821b
-.section s822a
-.section s822b
-.section s823a
-.section s823b
-.section s824a
-.section s824b
-.section s825a
-.section s825b
-.section s826a
-.section s826b
-.section s827a
-.section s827b
-.section s828a
-.section s828b
-.section s829a
-.section s829b
-.section s820a
-.section s820b
-.section s83aa
-.section s83ab
-.section s83ba
-.section s83bb
-.section s83ca
-.section s83cb
-.section s83da
-.section s83db
-.section s83ea
-.section s83eb
-.section s83fa
-.section s83fb
-.section s83ga
-.section s83gb
-.section s83ha
-.section s83hb
-.section s83ia
-.section s83ib
-.section s83ja
-.section s83jb
-.section s83ka
-.section s83kb
-.section s83la
-.section s83lb
-.section s83ma
-.section s83mb
-.section s83na
-.section s83nb
-.section s83oa
-.section s83ob
-.section s83pa
-.section s83pb
-.section s83qa
-.section s83qb
-.section s83ra
-.section s83rb
-.section s83sa
-.section s83sb
-.section s83ta
-.section s83tb
-.section s83ua
-.section s83ub
-.section s83va
-.section s83vb
-.section s83wa
-.section s83wb
-.section s83xa
-.section s83xb
-.section s83ya
-.section s83yb
-.section s83za
-.section s83zb
-.section s831a
-.section s831b
-.section s832a
-.section s832b
-.section s833a
-.section s833b
-.section s834a
-.section s834b
-.section s835a
-.section s835b
-.section s836a
-.section s836b
-.section s837a
-.section s837b
-.section s838a
-.section s838b
-.section s839a
-.section s839b
-.section s830a
-.section s830b
-.section s84aa
-.section s84ab
-.section s84ba
-.section s84bb
-.section s84ca
-.section s84cb
-.section s84da
-.section s84db
-.section s84ea
-.section s84eb
-.section s84fa
-.section s84fb
-.section s84ga
-.section s84gb
-.section s84ha
-.section s84hb
-.section s84ia
-.section s84ib
-.section s84ja
-.section s84jb
-.section s84ka
-.section s84kb
-.section s84la
-.section s84lb
-.section s84ma
-.section s84mb
-.section s84na
-.section s84nb
-.section s84oa
-.section s84ob
-.section s84pa
-.section s84pb
-.section s84qa
-.section s84qb
-.section s84ra
-.section s84rb
-.section s84sa
-.section s84sb
-.section s84ta
-.section s84tb
-.section s84ua
-.section s84ub
-.section s84va
-.section s84vb
-.section s84wa
-.section s84wb
-.section s84xa
-.section s84xb
-.section s84ya
-.section s84yb
-.section s84za
-.section s84zb
-.section s841a
-.section s841b
-.section s842a
-.section s842b
-.section s843a
-.section s843b
-.section s844a
-.section s844b
-.section s845a
-.section s845b
-.section s846a
-.section s846b
-.section s847a
-.section s847b
-.section s848a
-.section s848b
-.section s849a
-.section s849b
-.section s840a
-.section s840b
-.section s85aa
-.section s85ab
-.section s85ba
-.section s85bb
-.section s85ca
-.section s85cb
-.section s85da
-.section s85db
-.section s85ea
-.section s85eb
-.section s85fa
-.section s85fb
-.section s85ga
-.section s85gb
-.section s85ha
-.section s85hb
-.section s85ia
-.section s85ib
-.section s85ja
-.section s85jb
-.section s85ka
-.section s85kb
-.section s85la
-.section s85lb
-.section s85ma
-.section s85mb
-.section s85na
-.section s85nb
-.section s85oa
-.section s85ob
-.section s85pa
-.section s85pb
-.section s85qa
-.section s85qb
-.section s85ra
-.section s85rb
-.section s85sa
-.section s85sb
-.section s85ta
-.section s85tb
-.section s85ua
-.section s85ub
-.section s85va
-.section s85vb
-.section s85wa
-.section s85wb
-.section s85xa
-.section s85xb
-.section s85ya
-.section s85yb
-.section s85za
-.section s85zb
-.section s851a
-.section s851b
-.section s852a
-.section s852b
-.section s853a
-.section s853b
-.section s854a
-.section s854b
-.section s855a
-.section s855b
-.section s856a
-.section s856b
-.section s857a
-.section s857b
-.section s858a
-.section s858b
-.section s859a
-.section s859b
-.section s850a
-.section s850b
-.section s86aa
-.section s86ab
-.section s86ba
-.section s86bb
-.section s86ca
-.section s86cb
-.section s86da
-.section s86db
-.section s86ea
-.section s86eb
-.section s86fa
-.section s86fb
-.section s86ga
-.section s86gb
-.section s86ha
-.section s86hb
-.section s86ia
-.section s86ib
-.section s86ja
-.section s86jb
-.section s86ka
-.section s86kb
-.section s86la
-.section s86lb
-.section s86ma
-.section s86mb
-.section s86na
-.section s86nb
-.section s86oa
-.section s86ob
-.section s86pa
-.section s86pb
-.section s86qa
-.section s86qb
-.section s86ra
-.section s86rb
-.section s86sa
-.section s86sb
-.section s86ta
-.section s86tb
-.section s86ua
-.section s86ub
-.section s86va
-.section s86vb
-.section s86wa
-.section s86wb
-.section s86xa
-.section s86xb
-.section s86ya
-.section s86yb
-.section s86za
-.section s86zb
-.section s861a
-.section s861b
-.section s862a
-.section s862b
-.section s863a
-.section s863b
-.section s864a
-.section s864b
-.section s865a
-.section s865b
-.section s866a
-.section s866b
-.section s867a
-.section s867b
-.section s868a
-.section s868b
-.section s869a
-.section s869b
-.section s860a
-.section s860b
-.section s87aa
-.section s87ab
-.section s87ba
-.section s87bb
-.section s87ca
-.section s87cb
-.section s87da
-.section s87db
-.section s87ea
-.section s87eb
-.section s87fa
-.section s87fb
-.section s87ga
-.section s87gb
-.section s87ha
-.section s87hb
-.section s87ia
-.section s87ib
-.section s87ja
-.section s87jb
-.section s87ka
-.section s87kb
-.section s87la
-.section s87lb
-.section s87ma
-.section s87mb
-.section s87na
-.section s87nb
-.section s87oa
-.section s87ob
-.section s87pa
-.section s87pb
-.section s87qa
-.section s87qb
-.section s87ra
-.section s87rb
-.section s87sa
-.section s87sb
-.section s87ta
-.section s87tb
-.section s87ua
-.section s87ub
-.section s87va
-.section s87vb
-.section s87wa
-.section s87wb
-.section s87xa
-.section s87xb
-.section s87ya
-.section s87yb
-.section s87za
-.section s87zb
-.section s871a
-.section s871b
-.section s872a
-.section s872b
-.section s873a
-.section s873b
-.section s874a
-.section s874b
-.section s875a
-.section s875b
-.section s876a
-.section s876b
-.section s877a
-.section s877b
-.section s878a
-.section s878b
-.section s879a
-.section s879b
-.section s870a
-.section s870b
-.section s88aa
-.section s88ab
-.section s88ba
-.section s88bb
-.section s88ca
-.section s88cb
-.section s88da
-.section s88db
-.section s88ea
-.section s88eb
-.section s88fa
-.section s88fb
-.section s88ga
-.section s88gb
-.section s88ha
-.section s88hb
-.section s88ia
-.section s88ib
-.section s88ja
-.section s88jb
-.section s88ka
-.section s88kb
-.section s88la
-.section s88lb
-.section s88ma
-.section s88mb
-.section s88na
-.section s88nb
-.section s88oa
-.section s88ob
-.section s88pa
-.section s88pb
-.section s88qa
-.section s88qb
-.section s88ra
-.section s88rb
-.section s88sa
-.section s88sb
-.section s88ta
-.section s88tb
-.section s88ua
-.section s88ub
-.section s88va
-.section s88vb
-.section s88wa
-.section s88wb
-.section s88xa
-.section s88xb
-.section s88ya
-.section s88yb
-.section s88za
-.section s88zb
-.section s881a
-.section s881b
-.section s882a
-.section s882b
-.section s883a
-.section s883b
-.section s884a
-.section s884b
-.section s885a
-.section s885b
-.section s886a
-.section s886b
-.section s887a
-.section s887b
-.section s888a
-.section s888b
-.section s889a
-.section s889b
-.section s880a
-.section s880b
-.section s89aa
-.section s89ab
-.section s89ba
-.section s89bb
-.section s89ca
-.section s89cb
-.section s89da
-.section s89db
-.section s89ea
-.section s89eb
-.section s89fa
-.section s89fb
-.section s89ga
-.section s89gb
-.section s89ha
-.section s89hb
-.section s89ia
-.section s89ib
-.section s89ja
-.section s89jb
-.section s89ka
-.section s89kb
-.section s89la
-.section s89lb
-.section s89ma
-.section s89mb
-.section s89na
-.section s89nb
-.section s89oa
-.section s89ob
-.section s89pa
-.section s89pb
-.section s89qa
-.section s89qb
-.section s89ra
-.section s89rb
-.section s89sa
-.section s89sb
-.section s89ta
-.section s89tb
-.section s89ua
-.section s89ub
-.section s89va
-.section s89vb
-.section s89wa
-.section s89wb
-.section s89xa
-.section s89xb
-.section s89ya
-.section s89yb
-.section s89za
-.section s89zb
-.section s891a
-.section s891b
-.section s892a
-.section s892b
-.section s893a
-.section s893b
-.section s894a
-.section s894b
-.section s895a
-.section s895b
-.section s896a
-.section s896b
-.section s897a
-.section s897b
-.section s898a
-.section s898b
-.section s899a
-.section s899b
-.section s890a
-.section s890b
-.section s80aa
-.section s80ab
-.section s80ba
-.section s80bb
-.section s80ca
-.section s80cb
-.section s80da
-.section s80db
-.section s80ea
-.section s80eb
-.section s80fa
-.section s80fb
-.section s80ga
-.section s80gb
-.section s80ha
-.section s80hb
-.section s80ia
-.section s80ib
-.section s80ja
-.section s80jb
-.section s80ka
-.section s80kb
-.section s80la
-.section s80lb
-.section s80ma
-.section s80mb
-.section s80na
-.section s80nb
-.section s80oa
-.section s80ob
-.section s80pa
-.section s80pb
-.section s80qa
-.section s80qb
-.section s80ra
-.section s80rb
-.section s80sa
-.section s80sb
-.section s80ta
-.section s80tb
-.section s80ua
-.section s80ub
-.section s80va
-.section s80vb
-.section s80wa
-.section s80wb
-.section s80xa
-.section s80xb
-.section s80ya
-.section s80yb
-.section s80za
-.section s80zb
-.section s801a
-.section s801b
-.section s802a
-.section s802b
-.section s803a
-.section s803b
-.section s804a
-.section s804b
-.section s805a
-.section s805b
-.section s806a
-.section s806b
-.section s807a
-.section s807b
-.section s808a
-.section s808b
-.section s809a
-.section s809b
-.section s800a
-.section s800b
-.section s9aaa
-.section s9aab
-.section s9aba
-.section s9abb
-.section s9aca
-.section s9acb
-.section s9ada
-.section s9adb
-.section s9aea
-.section s9aeb
-.section s9afa
-.section s9afb
-.section s9aga
-.section s9agb
-.section s9aha
-.section s9ahb
-.section s9aia
-.section s9aib
-.section s9aja
-.section s9ajb
-.section s9aka
-.section s9akb
-.section s9ala
-.section s9alb
-.section s9ama
-.section s9amb
-.section s9ana
-.section s9anb
-.section s9aoa
-.section s9aob
-.section s9apa
-.section s9apb
-.section s9aqa
-.section s9aqb
-.section s9ara
-.section s9arb
-.section s9asa
-.section s9asb
-.section s9ata
-.section s9atb
-.section s9aua
-.section s9aub
-.section s9ava
-.section s9avb
-.section s9awa
-.section s9awb
-.section s9axa
-.section s9axb
-.section s9aya
-.section s9ayb
-.section s9aza
-.section s9azb
-.section s9a1a
-.section s9a1b
-.section s9a2a
-.section s9a2b
-.section s9a3a
-.section s9a3b
-.section s9a4a
-.section s9a4b
-.section s9a5a
-.section s9a5b
-.section s9a6a
-.section s9a6b
-.section s9a7a
-.section s9a7b
-.section s9a8a
-.section s9a8b
-.section s9a9a
-.section s9a9b
-.section s9a0a
-.section s9a0b
-.section s9baa
-.section s9bab
-.section s9bba
-.section s9bbb
-.section s9bca
-.section s9bcb
-.section s9bda
-.section s9bdb
-.section s9bea
-.section s9beb
-.section s9bfa
-.section s9bfb
-.section s9bga
-.section s9bgb
-.section s9bha
-.section s9bhb
-.section s9bia
-.section s9bib
-.section s9bja
-.section s9bjb
-.section s9bka
-.section s9bkb
-.section s9bla
-.section s9blb
-.section s9bma
-.section s9bmb
-.section s9bna
-.section s9bnb
-.section s9boa
-.section s9bob
-.section s9bpa
-.section s9bpb
-.section s9bqa
-.section s9bqb
-.section s9bra
-.section s9brb
-.section s9bsa
-.section s9bsb
-.section s9bta
-.section s9btb
-.section s9bua
-.section s9bub
-.section s9bva
-.section s9bvb
-.section s9bwa
-.section s9bwb
-.section s9bxa
-.section s9bxb
-.section s9bya
-.section s9byb
-.section s9bza
-.section s9bzb
-.section s9b1a
-.section s9b1b
-.section s9b2a
-.section s9b2b
-.section s9b3a
-.section s9b3b
-.section s9b4a
-.section s9b4b
-.section s9b5a
-.section s9b5b
-.section s9b6a
-.section s9b6b
-.section s9b7a
-.section s9b7b
-.section s9b8a
-.section s9b8b
-.section s9b9a
-.section s9b9b
-.section s9b0a
-.section s9b0b
-.section s9caa
-.section s9cab
-.section s9cba
-.section s9cbb
-.section s9cca
-.section s9ccb
-.section s9cda
-.section s9cdb
-.section s9cea
-.section s9ceb
-.section s9cfa
-.section s9cfb
-.section s9cga
-.section s9cgb
-.section s9cha
-.section s9chb
-.section s9cia
-.section s9cib
-.section s9cja
-.section s9cjb
-.section s9cka
-.section s9ckb
-.section s9cla
-.section s9clb
-.section s9cma
-.section s9cmb
-.section s9cna
-.section s9cnb
-.section s9coa
-.section s9cob
-.section s9cpa
-.section s9cpb
-.section s9cqa
-.section s9cqb
-.section s9cra
-.section s9crb
-.section s9csa
-.section s9csb
-.section s9cta
-.section s9ctb
-.section s9cua
-.section s9cub
-.section s9cva
-.section s9cvb
-.section s9cwa
-.section s9cwb
-.section s9cxa
-.section s9cxb
-.section s9cya
-.section s9cyb
-.section s9cza
-.section s9czb
-.section s9c1a
-.section s9c1b
-.section s9c2a
-.section s9c2b
-.section s9c3a
-.section s9c3b
-.section s9c4a
-.section s9c4b
-.section s9c5a
-.section s9c5b
-.section s9c6a
-.section s9c6b
-.section s9c7a
-.section s9c7b
-.section s9c8a
-.section s9c8b
-.section s9c9a
-.section s9c9b
-.section s9c0a
-.section s9c0b
-.section s9daa
-.section s9dab
-.section s9dba
-.section s9dbb
-.section s9dca
-.section s9dcb
-.section s9dda
-.section s9ddb
-.section s9dea
-.section s9deb
-.section s9dfa
-.section s9dfb
-.section s9dga
-.section s9dgb
-.section s9dha
-.section s9dhb
-.section s9dia
-.section s9dib
-.section s9dja
-.section s9djb
-.section s9dka
-.section s9dkb
-.section s9dla
-.section s9dlb
-.section s9dma
-.section s9dmb
-.section s9dna
-.section s9dnb
-.section s9doa
-.section s9dob
-.section s9dpa
-.section s9dpb
-.section s9dqa
-.section s9dqb
-.section s9dra
-.section s9drb
-.section s9dsa
-.section s9dsb
-.section s9dta
-.section s9dtb
-.section s9dua
-.section s9dub
-.section s9dva
-.section s9dvb
-.section s9dwa
-.section s9dwb
-.section s9dxa
-.section s9dxb
-.section s9dya
-.section s9dyb
-.section s9dza
-.section s9dzb
-.section s9d1a
-.section s9d1b
-.section s9d2a
-.section s9d2b
-.section s9d3a
-.section s9d3b
-.section s9d4a
-.section s9d4b
-.section s9d5a
-.section s9d5b
-.section s9d6a
-.section s9d6b
-.section s9d7a
-.section s9d7b
-.section s9d8a
-.section s9d8b
-.section s9d9a
-.section s9d9b
-.section s9d0a
-.section s9d0b
-.section s9eaa
-.section s9eab
-.section s9eba
-.section s9ebb
-.section s9eca
-.section s9ecb
-.section s9eda
-.section s9edb
-.section s9eea
-.section s9eeb
-.section s9efa
-.section s9efb
-.section s9ega
-.section s9egb
-.section s9eha
-.section s9ehb
-.section s9eia
-.section s9eib
-.section s9eja
-.section s9ejb
-.section s9eka
-.section s9ekb
-.section s9ela
-.section s9elb
-.section s9ema
-.section s9emb
-.section s9ena
-.section s9enb
-.section s9eoa
-.section s9eob
-.section s9epa
-.section s9epb
-.section s9eqa
-.section s9eqb
-.section s9era
-.section s9erb
-.section s9esa
-.section s9esb
-.section s9eta
-.section s9etb
-.section s9eua
-.section s9eub
-.section s9eva
-.section s9evb
-.section s9ewa
-.section s9ewb
-.section s9exa
-.section s9exb
-.section s9eya
-.section s9eyb
-.section s9eza
-.section s9ezb
-.section s9e1a
-.section s9e1b
-.section s9e2a
-.section s9e2b
-.section s9e3a
-.section s9e3b
-.section s9e4a
-.section s9e4b
-.section s9e5a
-.section s9e5b
-.section s9e6a
-.section s9e6b
-.section s9e7a
-.section s9e7b
-.section s9e8a
-.section s9e8b
-.section s9e9a
-.section s9e9b
-.section s9e0a
-.section s9e0b
-.section s9faa
-.section s9fab
-.section s9fba
-.section s9fbb
-.section s9fca
-.section s9fcb
-.section s9fda
-.section s9fdb
-.section s9fea
-.section s9feb
-.section s9ffa
-.section s9ffb
-.section s9fga
-.section s9fgb
-.section s9fha
-.section s9fhb
-.section s9fia
-.section s9fib
-.section s9fja
-.section s9fjb
-.section s9fka
-.section s9fkb
-.section s9fla
-.section s9flb
-.section s9fma
-.section s9fmb
-.section s9fna
-.section s9fnb
-.section s9foa
-.section s9fob
-.section s9fpa
-.section s9fpb
-.section s9fqa
-.section s9fqb
-.section s9fra
-.section s9frb
-.section s9fsa
-.section s9fsb
-.section s9fta
-.section s9ftb
-.section s9fua
-.section s9fub
-.section s9fva
-.section s9fvb
-.section s9fwa
-.section s9fwb
-.section s9fxa
-.section s9fxb
-.section s9fya
-.section s9fyb
-.section s9fza
-.section s9fzb
-.section s9f1a
-.section s9f1b
-.section s9f2a
-.section s9f2b
-.section s9f3a
-.section s9f3b
-.section s9f4a
-.section s9f4b
-.section s9f5a
-.section s9f5b
-.section s9f6a
-.section s9f6b
-.section s9f7a
-.section s9f7b
-.section s9f8a
-.section s9f8b
-.section s9f9a
-.section s9f9b
-.section s9f0a
-.section s9f0b
-.section s9gaa
-.section s9gab
-.section s9gba
-.section s9gbb
-.section s9gca
-.section s9gcb
-.section s9gda
-.section s9gdb
-.section s9gea
-.section s9geb
-.section s9gfa
-.section s9gfb
-.section s9gga
-.section s9ggb
-.section s9gha
-.section s9ghb
-.section s9gia
-.section s9gib
-.section s9gja
-.section s9gjb
-.section s9gka
-.section s9gkb
-.section s9gla
-.section s9glb
-.section s9gma
-.section s9gmb
-.section s9gna
-.section s9gnb
-.section s9goa
-.section s9gob
-.section s9gpa
-.section s9gpb
-.section s9gqa
-.section s9gqb
-.section s9gra
-.section s9grb
-.section s9gsa
-.section s9gsb
-.section s9gta
-.section s9gtb
-.section s9gua
-.section s9gub
-.section s9gva
-.section s9gvb
-.section s9gwa
-.section s9gwb
-.section s9gxa
-.section s9gxb
-.section s9gya
-.section s9gyb
-.section s9gza
-.section s9gzb
-.section s9g1a
-.section s9g1b
-.section s9g2a
-.section s9g2b
-.section s9g3a
-.section s9g3b
-.section s9g4a
-.section s9g4b
-.section s9g5a
-.section s9g5b
-.section s9g6a
-.section s9g6b
-.section s9g7a
-.section s9g7b
-.section s9g8a
-.section s9g8b
-.section s9g9a
-.section s9g9b
-.section s9g0a
-.section s9g0b
-.section s9haa
-.section s9hab
-.section s9hba
-.section s9hbb
-.section s9hca
-.section s9hcb
-.section s9hda
-.section s9hdb
-.section s9hea
-.section s9heb
-.section s9hfa
-.section s9hfb
-.section s9hga
-.section s9hgb
-.section s9hha
-.section s9hhb
-.section s9hia
-.section s9hib
-.section s9hja
-.section s9hjb
-.section s9hka
-.section s9hkb
-.section s9hla
-.section s9hlb
-.section s9hma
-.section s9hmb
-.section s9hna
-.section s9hnb
-.section s9hoa
-.section s9hob
-.section s9hpa
-.section s9hpb
-.section s9hqa
-.section s9hqb
-.section s9hra
-.section s9hrb
-.section s9hsa
-.section s9hsb
-.section s9hta
-.section s9htb
-.section s9hua
-.section s9hub
-.section s9hva
-.section s9hvb
-.section s9hwa
-.section s9hwb
-.section s9hxa
-.section s9hxb
-.section s9hya
-.section s9hyb
-.section s9hza
-.section s9hzb
-.section s9h1a
-.section s9h1b
-.section s9h2a
-.section s9h2b
-.section s9h3a
-.section s9h3b
-.section s9h4a
-.section s9h4b
-.section s9h5a
-.section s9h5b
-.section s9h6a
-.section s9h6b
-.section s9h7a
-.section s9h7b
-.section s9h8a
-.section s9h8b
-.section s9h9a
-.section s9h9b
-.section s9h0a
-.section s9h0b
-.section s9iaa
-.section s9iab
-.section s9iba
-.section s9ibb
-.section s9ica
-.section s9icb
-.section s9ida
-.section s9idb
-.section s9iea
-.section s9ieb
-.section s9ifa
-.section s9ifb
-.section s9iga
-.section s9igb
-.section s9iha
-.section s9ihb
-.section s9iia
-.section s9iib
-.section s9ija
-.section s9ijb
-.section s9ika
-.section s9ikb
-.section s9ila
-.section s9ilb
-.section s9ima
-.section s9imb
-.section s9ina
-.section s9inb
-.section s9ioa
-.section s9iob
-.section s9ipa
-.section s9ipb
-.section s9iqa
-.section s9iqb
-.section s9ira
-.section s9irb
-.section s9isa
-.section s9isb
-.section s9ita
-.section s9itb
-.section s9iua
-.section s9iub
-.section s9iva
-.section s9ivb
-.section s9iwa
-.section s9iwb
-.section s9ixa
-.section s9ixb
-.section s9iya
-.section s9iyb
-.section s9iza
-.section s9izb
-.section s9i1a
-.section s9i1b
-.section s9i2a
-.section s9i2b
-.section s9i3a
-.section s9i3b
-.section s9i4a
-.section s9i4b
-.section s9i5a
-.section s9i5b
-.section s9i6a
-.section s9i6b
-.section s9i7a
-.section s9i7b
-.section s9i8a
-.section s9i8b
-.section s9i9a
-.section s9i9b
-.section s9i0a
-.section s9i0b
-.section s9jaa
-.section s9jab
-.section s9jba
-.section s9jbb
-.section s9jca
-.section s9jcb
-.section s9jda
-.section s9jdb
-.section s9jea
-.section s9jeb
-.section s9jfa
-.section s9jfb
-.section s9jga
-.section s9jgb
-.section s9jha
-.section s9jhb
-.section s9jia
-.section s9jib
-.section s9jja
-.section s9jjb
-.section s9jka
-.section s9jkb
-.section s9jla
-.section s9jlb
-.section s9jma
-.section s9jmb
-.section s9jna
-.section s9jnb
-.section s9joa
-.section s9job
-.section s9jpa
-.section s9jpb
-.section s9jqa
-.section s9jqb
-.section s9jra
-.section s9jrb
-.section s9jsa
-.section s9jsb
-.section s9jta
-.section s9jtb
-.section s9jua
-.section s9jub
-.section s9jva
-.section s9jvb
-.section s9jwa
-.section s9jwb
-.section s9jxa
-.section s9jxb
-.section s9jya
-.section s9jyb
-.section s9jza
-.section s9jzb
-.section s9j1a
-.section s9j1b
-.section s9j2a
-.section s9j2b
-.section s9j3a
-.section s9j3b
-.section s9j4a
-.section s9j4b
-.section s9j5a
-.section s9j5b
-.section s9j6a
-.section s9j6b
-.section s9j7a
-.section s9j7b
-.section s9j8a
-.section s9j8b
-.section s9j9a
-.section s9j9b
-.section s9j0a
-.section s9j0b
-.section s9kaa
-.section s9kab
-.section s9kba
-.section s9kbb
-.section s9kca
-.section s9kcb
-.section s9kda
-.section s9kdb
-.section s9kea
-.section s9keb
-.section s9kfa
-.section s9kfb
-.section s9kga
-.section s9kgb
-.section s9kha
-.section s9khb
-.section s9kia
-.section s9kib
-.section s9kja
-.section s9kjb
-.section s9kka
-.section s9kkb
-.section s9kla
-.section s9klb
-.section s9kma
-.section s9kmb
-.section s9kna
-.section s9knb
-.section s9koa
-.section s9kob
-.section s9kpa
-.section s9kpb
-.section s9kqa
-.section s9kqb
-.section s9kra
-.section s9krb
-.section s9ksa
-.section s9ksb
-.section s9kta
-.section s9ktb
-.section s9kua
-.section s9kub
-.section s9kva
-.section s9kvb
-.section s9kwa
-.section s9kwb
-.section s9kxa
-.section s9kxb
-.section s9kya
-.section s9kyb
-.section s9kza
-.section s9kzb
-.section s9k1a
-.section s9k1b
-.section s9k2a
-.section s9k2b
-.section s9k3a
-.section s9k3b
-.section s9k4a
-.section s9k4b
-.section s9k5a
-.section s9k5b
-.section s9k6a
-.section s9k6b
-.section s9k7a
-.section s9k7b
-.section s9k8a
-.section s9k8b
-.section s9k9a
-.section s9k9b
-.section s9k0a
-.section s9k0b
-.section s9laa
-.section s9lab
-.section s9lba
-.section s9lbb
-.section s9lca
-.section s9lcb
-.section s9lda
-.section s9ldb
-.section s9lea
-.section s9leb
-.section s9lfa
-.section s9lfb
-.section s9lga
-.section s9lgb
-.section s9lha
-.section s9lhb
-.section s9lia
-.section s9lib
-.section s9lja
-.section s9ljb
-.section s9lka
-.section s9lkb
-.section s9lla
-.section s9llb
-.section s9lma
-.section s9lmb
-.section s9lna
-.section s9lnb
-.section s9loa
-.section s9lob
-.section s9lpa
-.section s9lpb
-.section s9lqa
-.section s9lqb
-.section s9lra
-.section s9lrb
-.section s9lsa
-.section s9lsb
-.section s9lta
-.section s9ltb
-.section s9lua
-.section s9lub
-.section s9lva
-.section s9lvb
-.section s9lwa
-.section s9lwb
-.section s9lxa
-.section s9lxb
-.section s9lya
-.section s9lyb
-.section s9lza
-.section s9lzb
-.section s9l1a
-.section s9l1b
-.section s9l2a
-.section s9l2b
-.section s9l3a
-.section s9l3b
-.section s9l4a
-.section s9l4b
-.section s9l5a
-.section s9l5b
-.section s9l6a
-.section s9l6b
-.section s9l7a
-.section s9l7b
-.section s9l8a
-.section s9l8b
-.section s9l9a
-.section s9l9b
-.section s9l0a
-.section s9l0b
-.section s9maa
-.section s9mab
-.section s9mba
-.section s9mbb
-.section s9mca
-.section s9mcb
-.section s9mda
-.section s9mdb
-.section s9mea
-.section s9meb
-.section s9mfa
-.section s9mfb
-.section s9mga
-.section s9mgb
-.section s9mha
-.section s9mhb
-.section s9mia
-.section s9mib
-.section s9mja
-.section s9mjb
-.section s9mka
-.section s9mkb
-.section s9mla
-.section s9mlb
-.section s9mma
-.section s9mmb
-.section s9mna
-.section s9mnb
-.section s9moa
-.section s9mob
-.section s9mpa
-.section s9mpb
-.section s9mqa
-.section s9mqb
-.section s9mra
-.section s9mrb
-.section s9msa
-.section s9msb
-.section s9mta
-.section s9mtb
-.section s9mua
-.section s9mub
-.section s9mva
-.section s9mvb
-.section s9mwa
-.section s9mwb
-.section s9mxa
-.section s9mxb
-.section s9mya
-.section s9myb
-.section s9mza
-.section s9mzb
-.section s9m1a
-.section s9m1b
-.section s9m2a
-.section s9m2b
-.section s9m3a
-.section s9m3b
-.section s9m4a
-.section s9m4b
-.section s9m5a
-.section s9m5b
-.section s9m6a
-.section s9m6b
-.section s9m7a
-.section s9m7b
-.section s9m8a
-.section s9m8b
-.section s9m9a
-.section s9m9b
-.section s9m0a
-.section s9m0b
-.section s9naa
-.section s9nab
-.section s9nba
-.section s9nbb
-.section s9nca
-.section s9ncb
-.section s9nda
-.section s9ndb
-.section s9nea
-.section s9neb
-.section s9nfa
-.section s9nfb
-.section s9nga
-.section s9ngb
-.section s9nha
-.section s9nhb
-.section s9nia
-.section s9nib
-.section s9nja
-.section s9njb
-.section s9nka
-.section s9nkb
-.section s9nla
-.section s9nlb
-.section s9nma
-.section s9nmb
-.section s9nna
-.section s9nnb
-.section s9noa
-.section s9nob
-.section s9npa
-.section s9npb
-.section s9nqa
-.section s9nqb
-.section s9nra
-.section s9nrb
-.section s9nsa
-.section s9nsb
-.section s9nta
-.section s9ntb
-.section s9nua
-.section s9nub
-.section s9nva
-.section s9nvb
-.section s9nwa
-.section s9nwb
-.section s9nxa
-.section s9nxb
-.section s9nya
-.section s9nyb
-.section s9nza
-.section s9nzb
-.section s9n1a
-.section s9n1b
-.section s9n2a
-.section s9n2b
-.section s9n3a
-.section s9n3b
-.section s9n4a
-.section s9n4b
-.section s9n5a
-.section s9n5b
-.section s9n6a
-.section s9n6b
-.section s9n7a
-.section s9n7b
-.section s9n8a
-.section s9n8b
-.section s9n9a
-.section s9n9b
-.section s9n0a
-.section s9n0b
-.section s9oaa
-.section s9oab
-.section s9oba
-.section s9obb
-.section s9oca
-.section s9ocb
-.section s9oda
-.section s9odb
-.section s9oea
-.section s9oeb
-.section s9ofa
-.section s9ofb
-.section s9oga
-.section s9ogb
-.section s9oha
-.section s9ohb
-.section s9oia
-.section s9oib
-.section s9oja
-.section s9ojb
-.section s9oka
-.section s9okb
-.section s9ola
-.section s9olb
-.section s9oma
-.section s9omb
-.section s9ona
-.section s9onb
-.section s9ooa
-.section s9oob
-.section s9opa
-.section s9opb
-.section s9oqa
-.section s9oqb
-.section s9ora
-.section s9orb
-.section s9osa
-.section s9osb
-.section s9ota
-.section s9otb
-.section s9oua
-.section s9oub
-.section s9ova
-.section s9ovb
-.section s9owa
-.section s9owb
-.section s9oxa
-.section s9oxb
-.section s9oya
-.section s9oyb
-.section s9oza
-.section s9ozb
-.section s9o1a
-.section s9o1b
-.section s9o2a
-.section s9o2b
-.section s9o3a
-.section s9o3b
-.section s9o4a
-.section s9o4b
-.section s9o5a
-.section s9o5b
-.section s9o6a
-.section s9o6b
-.section s9o7a
-.section s9o7b
-.section s9o8a
-.section s9o8b
-.section s9o9a
-.section s9o9b
-.section s9o0a
-.section s9o0b
-.section s9paa
-.section s9pab
-.section s9pba
-.section s9pbb
-.section s9pca
-.section s9pcb
-.section s9pda
-.section s9pdb
-.section s9pea
-.section s9peb
-.section s9pfa
-.section s9pfb
-.section s9pga
-.section s9pgb
-.section s9pha
-.section s9phb
-.section s9pia
-.section s9pib
-.section s9pja
-.section s9pjb
-.section s9pka
-.section s9pkb
-.section s9pla
-.section s9plb
-.section s9pma
-.section s9pmb
-.section s9pna
-.section s9pnb
-.section s9poa
-.section s9pob
-.section s9ppa
-.section s9ppb
-.section s9pqa
-.section s9pqb
-.section s9pra
-.section s9prb
-.section s9psa
-.section s9psb
-.section s9pta
-.section s9ptb
-.section s9pua
-.section s9pub
-.section s9pva
-.section s9pvb
-.section s9pwa
-.section s9pwb
-.section s9pxa
-.section s9pxb
-.section s9pya
-.section s9pyb
-.section s9pza
-.section s9pzb
-.section s9p1a
-.section s9p1b
-.section s9p2a
-.section s9p2b
-.section s9p3a
-.section s9p3b
-.section s9p4a
-.section s9p4b
-.section s9p5a
-.section s9p5b
-.section s9p6a
-.section s9p6b
-.section s9p7a
-.section s9p7b
-.section s9p8a
-.section s9p8b
-.section s9p9a
-.section s9p9b
-.section s9p0a
-.section s9p0b
-.section s9qaa
-.section s9qab
-.section s9qba
-.section s9qbb
-.section s9qca
-.section s9qcb
-.section s9qda
-.section s9qdb
-.section s9qea
-.section s9qeb
-.section s9qfa
-.section s9qfb
-.section s9qga
-.section s9qgb
-.section s9qha
-.section s9qhb
-.section s9qia
-.section s9qib
-.section s9qja
-.section s9qjb
-.section s9qka
-.section s9qkb
-.section s9qla
-.section s9qlb
-.section s9qma
-.section s9qmb
-.section s9qna
-.section s9qnb
-.section s9qoa
-.section s9qob
-.section s9qpa
-.section s9qpb
-.section s9qqa
-.section s9qqb
-.section s9qra
-.section s9qrb
-.section s9qsa
-.section s9qsb
-.section s9qta
-.section s9qtb
-.section s9qua
-.section s9qub
-.section s9qva
-.section s9qvb
-.section s9qwa
-.section s9qwb
-.section s9qxa
-.section s9qxb
-.section s9qya
-.section s9qyb
-.section s9qza
-.section s9qzb
-.section s9q1a
-.section s9q1b
-.section s9q2a
-.section s9q2b
-.section s9q3a
-.section s9q3b
-.section s9q4a
-.section s9q4b
-.section s9q5a
-.section s9q5b
-.section s9q6a
-.section s9q6b
-.section s9q7a
-.section s9q7b
-.section s9q8a
-.section s9q8b
-.section s9q9a
-.section s9q9b
-.section s9q0a
-.section s9q0b
-.section s9raa
-.section s9rab
-.section s9rba
-.section s9rbb
-.section s9rca
-.section s9rcb
-.section s9rda
-.section s9rdb
-.section s9rea
-.section s9reb
-.section s9rfa
-.section s9rfb
-.section s9rga
-.section s9rgb
-.section s9rha
-.section s9rhb
-.section s9ria
-.section s9rib
-.section s9rja
-.section s9rjb
-.section s9rka
-.section s9rkb
-.section s9rla
-.section s9rlb
-.section s9rma
-.section s9rmb
-.section s9rna
-.section s9rnb
-.section s9roa
-.section s9rob
-.section s9rpa
-.section s9rpb
-.section s9rqa
-.section s9rqb
-.section s9rra
-.section s9rrb
-.section s9rsa
-.section s9rsb
-.section s9rta
-.section s9rtb
-.section s9rua
-.section s9rub
-.section s9rva
-.section s9rvb
-.section s9rwa
-.section s9rwb
-.section s9rxa
-.section s9rxb
-.section s9rya
-.section s9ryb
-.section s9rza
-.section s9rzb
-.section s9r1a
-.section s9r1b
-.section s9r2a
-.section s9r2b
-.section s9r3a
-.section s9r3b
-.section s9r4a
-.section s9r4b
-.section s9r5a
-.section s9r5b
-.section s9r6a
-.section s9r6b
-.section s9r7a
-.section s9r7b
-.section s9r8a
-.section s9r8b
-.section s9r9a
-.section s9r9b
-.section s9r0a
-.section s9r0b
-.section s9saa
-.section s9sab
-.section s9sba
-.section s9sbb
-.section s9sca
-.section s9scb
-.section s9sda
-.section s9sdb
-.section s9sea
-.section s9seb
-.section s9sfa
-.section s9sfb
-.section s9sga
-.section s9sgb
-.section s9sha
-.section s9shb
-.section s9sia
-.section s9sib
-.section s9sja
-.section s9sjb
-.section s9ska
-.section s9skb
-.section s9sla
-.section s9slb
-.section s9sma
-.section s9smb
-.section s9sna
-.section s9snb
-.section s9soa
-.section s9sob
-.section s9spa
-.section s9spb
-.section s9sqa
-.section s9sqb
-.section s9sra
-.section s9srb
-.section s9ssa
-.section s9ssb
-.section s9sta
-.section s9stb
-.section s9sua
-.section s9sub
-.section s9sva
-.section s9svb
-.section s9swa
-.section s9swb
-.section s9sxa
-.section s9sxb
-.section s9sya
-.section s9syb
-.section s9sza
-.section s9szb
-.section s9s1a
-.section s9s1b
-.section s9s2a
-.section s9s2b
-.section s9s3a
-.section s9s3b
-.section s9s4a
-.section s9s4b
-.section s9s5a
-.section s9s5b
-.section s9s6a
-.section s9s6b
-.section s9s7a
-.section s9s7b
-.section s9s8a
-.section s9s8b
-.section s9s9a
-.section s9s9b
-.section s9s0a
-.section s9s0b
-.section s9taa
-.section s9tab
-.section s9tba
-.section s9tbb
-.section s9tca
-.section s9tcb
-.section s9tda
-.section s9tdb
-.section s9tea
-.section s9teb
-.section s9tfa
-.section s9tfb
-.section s9tga
-.section s9tgb
-.section s9tha
-.section s9thb
-.section s9tia
-.section s9tib
-.section s9tja
-.section s9tjb
-.section s9tka
-.section s9tkb
-.section s9tla
-.section s9tlb
-.section s9tma
-.section s9tmb
-.section s9tna
-.section s9tnb
-.section s9toa
-.section s9tob
-.section s9tpa
-.section s9tpb
-.section s9tqa
-.section s9tqb
-.section s9tra
-.section s9trb
-.section s9tsa
-.section s9tsb
-.section s9tta
-.section s9ttb
-.section s9tua
-.section s9tub
-.section s9tva
-.section s9tvb
-.section s9twa
-.section s9twb
-.section s9txa
-.section s9txb
-.section s9tya
-.section s9tyb
-.section s9tza
-.section s9tzb
-.section s9t1a
-.section s9t1b
-.section s9t2a
-.section s9t2b
-.section s9t3a
-.section s9t3b
-.section s9t4a
-.section s9t4b
-.section s9t5a
-.section s9t5b
-.section s9t6a
-.section s9t6b
-.section s9t7a
-.section s9t7b
-.section s9t8a
-.section s9t8b
-.section s9t9a
-.section s9t9b
-.section s9t0a
-.section s9t0b
-.section s9uaa
-.section s9uab
-.section s9uba
-.section s9ubb
-.section s9uca
-.section s9ucb
-.section s9uda
-.section s9udb
-.section s9uea
-.section s9ueb
-.section s9ufa
-.section s9ufb
-.section s9uga
-.section s9ugb
-.section s9uha
-.section s9uhb
-.section s9uia
-.section s9uib
-.section s9uja
-.section s9ujb
-.section s9uka
-.section s9ukb
-.section s9ula
-.section s9ulb
-.section s9uma
-.section s9umb
-.section s9una
-.section s9unb
-.section s9uoa
-.section s9uob
-.section s9upa
-.section s9upb
-.section s9uqa
-.section s9uqb
-.section s9ura
-.section s9urb
-.section s9usa
-.section s9usb
-.section s9uta
-.section s9utb
-.section s9uua
-.section s9uub
-.section s9uva
-.section s9uvb
-.section s9uwa
-.section s9uwb
-.section s9uxa
-.section s9uxb
-.section s9uya
-.section s9uyb
-.section s9uza
-.section s9uzb
-.section s9u1a
-.section s9u1b
-.section s9u2a
-.section s9u2b
-.section s9u3a
-.section s9u3b
-.section s9u4a
-.section s9u4b
-.section s9u5a
-.section s9u5b
-.section s9u6a
-.section s9u6b
-.section s9u7a
-.section s9u7b
-.section s9u8a
-.section s9u8b
-.section s9u9a
-.section s9u9b
-.section s9u0a
-.section s9u0b
-.section s9vaa
-.section s9vab
-.section s9vba
-.section s9vbb
-.section s9vca
-.section s9vcb
-.section s9vda
-.section s9vdb
-.section s9vea
-.section s9veb
-.section s9vfa
-.section s9vfb
-.section s9vga
-.section s9vgb
-.section s9vha
-.section s9vhb
-.section s9via
-.section s9vib
-.section s9vja
-.section s9vjb
-.section s9vka
-.section s9vkb
-.section s9vla
-.section s9vlb
-.section s9vma
-.section s9vmb
-.section s9vna
-.section s9vnb
-.section s9voa
-.section s9vob
-.section s9vpa
-.section s9vpb
-.section s9vqa
-.section s9vqb
-.section s9vra
-.section s9vrb
-.section s9vsa
-.section s9vsb
-.section s9vta
-.section s9vtb
-.section s9vua
-.section s9vub
-.section s9vva
-.section s9vvb
-.section s9vwa
-.section s9vwb
-.section s9vxa
-.section s9vxb
-.section s9vya
-.section s9vyb
-.section s9vza
-.section s9vzb
-.section s9v1a
-.section s9v1b
-.section s9v2a
-.section s9v2b
-.section s9v3a
-.section s9v3b
-.section s9v4a
-.section s9v4b
-.section s9v5a
-.section s9v5b
-.section s9v6a
-.section s9v6b
-.section s9v7a
-.section s9v7b
-.section s9v8a
-.section s9v8b
-.section s9v9a
-.section s9v9b
-.section s9v0a
-.section s9v0b
-.section s9waa
-.section s9wab
-.section s9wba
-.section s9wbb
-.section s9wca
-.section s9wcb
-.section s9wda
-.section s9wdb
-.section s9wea
-.section s9web
-.section s9wfa
-.section s9wfb
-.section s9wga
-.section s9wgb
-.section s9wha
-.section s9whb
-.section s9wia
-.section s9wib
-.section s9wja
-.section s9wjb
-.section s9wka
-.section s9wkb
-.section s9wla
-.section s9wlb
-.section s9wma
-.section s9wmb
-.section s9wna
-.section s9wnb
-.section s9woa
-.section s9wob
-.section s9wpa
-.section s9wpb
-.section s9wqa
-.section s9wqb
-.section s9wra
-.section s9wrb
-.section s9wsa
-.section s9wsb
-.section s9wta
-.section s9wtb
-.section s9wua
-.section s9wub
-.section s9wva
-.section s9wvb
-.section s9wwa
-.section s9wwb
-.section s9wxa
-.section s9wxb
-.section s9wya
-.section s9wyb
-.section s9wza
-.section s9wzb
-.section s9w1a
-.section s9w1b
-.section s9w2a
-.section s9w2b
-.section s9w3a
-.section s9w3b
-.section s9w4a
-.section s9w4b
-.section s9w5a
-.section s9w5b
-.section s9w6a
-.section s9w6b
-.section s9w7a
-.section s9w7b
-.section s9w8a
-.section s9w8b
-.section s9w9a
-.section s9w9b
-.section s9w0a
-.section s9w0b
-.section s9xaa
-.section s9xab
-.section s9xba
-.section s9xbb
-.section s9xca
-.section s9xcb
-.section s9xda
-.section s9xdb
-.section s9xea
-.section s9xeb
-.section s9xfa
-.section s9xfb
-.section s9xga
-.section s9xgb
-.section s9xha
-.section s9xhb
-.section s9xia
-.section s9xib
-.section s9xja
-.section s9xjb
-.section s9xka
-.section s9xkb
-.section s9xla
-.section s9xlb
-.section s9xma
-.section s9xmb
-.section s9xna
-.section s9xnb
-.section s9xoa
-.section s9xob
-.section s9xpa
-.section s9xpb
-.section s9xqa
-.section s9xqb
-.section s9xra
-.section s9xrb
-.section s9xsa
-.section s9xsb
-.section s9xta
-.section s9xtb
-.section s9xua
-.section s9xub
-.section s9xva
-.section s9xvb
-.section s9xwa
-.section s9xwb
-.section s9xxa
-.section s9xxb
-.section s9xya
-.section s9xyb
-.section s9xza
-.section s9xzb
-.section s9x1a
-.section s9x1b
-.section s9x2a
-.section s9x2b
-.section s9x3a
-.section s9x3b
-.section s9x4a
-.section s9x4b
-.section s9x5a
-.section s9x5b
-.section s9x6a
-.section s9x6b
-.section s9x7a
-.section s9x7b
-.section s9x8a
-.section s9x8b
-.section s9x9a
-.section s9x9b
-.section s9x0a
-.section s9x0b
-.section s9yaa
-.section s9yab
-.section s9yba
-.section s9ybb
-.section s9yca
-.section s9ycb
-.section s9yda
-.section s9ydb
-.section s9yea
-.section s9yeb
-.section s9yfa
-.section s9yfb
-.section s9yga
-.section s9ygb
-.section s9yha
-.section s9yhb
-.section s9yia
-.section s9yib
-.section s9yja
-.section s9yjb
-.section s9yka
-.section s9ykb
-.section s9yla
-.section s9ylb
-.section s9yma
-.section s9ymb
-.section s9yna
-.section s9ynb
-.section s9yoa
-.section s9yob
-.section s9ypa
-.section s9ypb
-.section s9yqa
-.section s9yqb
-.section s9yra
-.section s9yrb
-.section s9ysa
-.section s9ysb
-.section s9yta
-.section s9ytb
-.section s9yua
-.section s9yub
-.section s9yva
-.section s9yvb
-.section s9ywa
-.section s9ywb
-.section s9yxa
-.section s9yxb
-.section s9yya
-.section s9yyb
-.section s9yza
-.section s9yzb
-.section s9y1a
-.section s9y1b
-.section s9y2a
-.section s9y2b
-.section s9y3a
-.section s9y3b
-.section s9y4a
-.section s9y4b
-.section s9y5a
-.section s9y5b
-.section s9y6a
-.section s9y6b
-.section s9y7a
-.section s9y7b
-.section s9y8a
-.section s9y8b
-.section s9y9a
-.section s9y9b
-.section s9y0a
-.section s9y0b
-.section s9zaa
-.section s9zab
-.section s9zba
-.section s9zbb
-.section s9zca
-.section s9zcb
-.section s9zda
-.section s9zdb
-.section s9zea
-.section s9zeb
-.section s9zfa
-.section s9zfb
-.section s9zga
-.section s9zgb
-.section s9zha
-.section s9zhb
-.section s9zia
-.section s9zib
-.section s9zja
-.section s9zjb
-.section s9zka
-.section s9zkb
-.section s9zla
-.section s9zlb
-.section s9zma
-.section s9zmb
-.section s9zna
-.section s9znb
-.section s9zoa
-.section s9zob
-.section s9zpa
-.section s9zpb
-.section s9zqa
-.section s9zqb
-.section s9zra
-.section s9zrb
-.section s9zsa
-.section s9zsb
-.section s9zta
-.section s9ztb
-.section s9zua
-.section s9zub
-.section s9zva
-.section s9zvb
-.section s9zwa
-.section s9zwb
-.section s9zxa
-.section s9zxb
-.section s9zya
-.section s9zyb
-.section s9zza
-.section s9zzb
-.section s9z1a
-.section s9z1b
-.section s9z2a
-.section s9z2b
-.section s9z3a
-.section s9z3b
-.section s9z4a
-.section s9z4b
-.section s9z5a
-.section s9z5b
-.section s9z6a
-.section s9z6b
-.section s9z7a
-.section s9z7b
-.section s9z8a
-.section s9z8b
-.section s9z9a
-.section s9z9b
-.section s9z0a
-.section s9z0b
-.section s91aa
-.section s91ab
-.section s91ba
-.section s91bb
-.section s91ca
-.section s91cb
-.section s91da
-.section s91db
-.section s91ea
-.section s91eb
-.section s91fa
-.section s91fb
-.section s91ga
-.section s91gb
-.section s91ha
-.section s91hb
-.section s91ia
-.section s91ib
-.section s91ja
-.section s91jb
-.section s91ka
-.section s91kb
-.section s91la
-.section s91lb
-.section s91ma
-.section s91mb
-.section s91na
-.section s91nb
-.section s91oa
-.section s91ob
-.section s91pa
-.section s91pb
-.section s91qa
-.section s91qb
-.section s91ra
-.section s91rb
-.section s91sa
-.section s91sb
-.section s91ta
-.section s91tb
-.section s91ua
-.section s91ub
-.section s91va
-.section s91vb
-.section s91wa
-.section s91wb
-.section s91xa
-.section s91xb
-.section s91ya
-.section s91yb
-.section s91za
-.section s91zb
-.section s911a
-.section s911b
-.section s912a
-.section s912b
-.section s913a
-.section s913b
-.section s914a
-.section s914b
-.section s915a
-.section s915b
-.section s916a
-.section s916b
-.section s917a
-.section s917b
-.section s918a
-.section s918b
-.section s919a
-.section s919b
-.section s910a
-.section s910b
-.section s92aa
-.section s92ab
-.section s92ba
-.section s92bb
-.section s92ca
-.section s92cb
-.section s92da
-.section s92db
-.section s92ea
-.section s92eb
-.section s92fa
-.section s92fb
-.section s92ga
-.section s92gb
-.section s92ha
-.section s92hb
-.section s92ia
-.section s92ib
-.section s92ja
-.section s92jb
-.section s92ka
-.section s92kb
-.section s92la
-.section s92lb
-.section s92ma
-.section s92mb
-.section s92na
-.section s92nb
-.section s92oa
-.section s92ob
-.section s92pa
-.section s92pb
-.section s92qa
-.section s92qb
-.section s92ra
-.section s92rb
-.section s92sa
-.section s92sb
-.section s92ta
-.section s92tb
-.section s92ua
-.section s92ub
-.section s92va
-.section s92vb
-.section s92wa
-.section s92wb
-.section s92xa
-.section s92xb
-.section s92ya
-.section s92yb
-.section s92za
-.section s92zb
-.section s921a
-.section s921b
-.section s922a
-.section s922b
-.section s923a
-.section s923b
-.section s924a
-.section s924b
-.section s925a
-.section s925b
-.section s926a
-.section s926b
-.section s927a
-.section s927b
-.section s928a
-.section s928b
-.section s929a
-.section s929b
-.section s920a
-.section s920b
-.section s93aa
-.section s93ab
-.section s93ba
-.section s93bb
-.section s93ca
-.section s93cb
-.section s93da
-.section s93db
-.section s93ea
-.section s93eb
-.section s93fa
-.section s93fb
-.section s93ga
-.section s93gb
-.section s93ha
-.section s93hb
-.section s93ia
-.section s93ib
-.section s93ja
-.section s93jb
-.section s93ka
-.section s93kb
-.section s93la
-.section s93lb
-.section s93ma
-.section s93mb
-.section s93na
-.section s93nb
-.section s93oa
-.section s93ob
-.section s93pa
-.section s93pb
-.section s93qa
-.section s93qb
-.section s93ra
-.section s93rb
-.section s93sa
-.section s93sb
-.section s93ta
-.section s93tb
-.section s93ua
-.section s93ub
-.section s93va
-.section s93vb
-.section s93wa
-.section s93wb
-.section s93xa
-.section s93xb
-.section s93ya
-.section s93yb
-.section s93za
-.section s93zb
-.section s931a
-.section s931b
-.section s932a
-.section s932b
-.section s933a
-.section s933b
-.section s934a
-.section s934b
-.section s935a
-.section s935b
-.section s936a
-.section s936b
-.section s937a
-.section s937b
-.section s938a
-.section s938b
-.section s939a
-.section s939b
-.section s930a
-.section s930b
-.section s94aa
-.section s94ab
-.section s94ba
-.section s94bb
-.section s94ca
-.section s94cb
-.section s94da
-.section s94db
-.section s94ea
-.section s94eb
-.section s94fa
-.section s94fb
-.section s94ga
-.section s94gb
-.section s94ha
-.section s94hb
-.section s94ia
-.section s94ib
-.section s94ja
-.section s94jb
-.section s94ka
-.section s94kb
-.section s94la
-.section s94lb
-.section s94ma
-.section s94mb
-.section s94na
-.section s94nb
-.section s94oa
-.section s94ob
-.section s94pa
-.section s94pb
-.section s94qa
-.section s94qb
-.section s94ra
-.section s94rb
-.section s94sa
-.section s94sb
-.section s94ta
-.section s94tb
-.section s94ua
-.section s94ub
-.section s94va
-.section s94vb
-.section s94wa
-.section s94wb
-.section s94xa
-.section s94xb
-.section s94ya
-.section s94yb
-.section s94za
-.section s94zb
-.section s941a
-.section s941b
-.section s942a
-.section s942b
-.section s943a
-.section s943b
-.section s944a
-.section s944b
-.section s945a
-.section s945b
-.section s946a
-.section s946b
-.section s947a
-.section s947b
-.section s948a
-.section s948b
-.section s949a
-.section s949b
-.section s940a
-.section s940b
-.section s95aa
-.section s95ab
-.section s95ba
-.section s95bb
-.section s95ca
-.section s95cb
-.section s95da
-.section s95db
-.section s95ea
-.section s95eb
-.section s95fa
-.section s95fb
-.section s95ga
-.section s95gb
-.section s95ha
-.section s95hb
-.section s95ia
-.section s95ib
-.section s95ja
-.section s95jb
-.section s95ka
-.section s95kb
-.section s95la
-.section s95lb
-.section s95ma
-.section s95mb
-.section s95na
-.section s95nb
-.section s95oa
-.section s95ob
-.section s95pa
-.section s95pb
-.section s95qa
-.section s95qb
-.section s95ra
-.section s95rb
-.section s95sa
-.section s95sb
-.section s95ta
-.section s95tb
-.section s95ua
-.section s95ub
-.section s95va
-.section s95vb
-.section s95wa
-.section s95wb
-.section s95xa
-.section s95xb
-.section s95ya
-.section s95yb
-.section s95za
-.section s95zb
-.section s951a
-.section s951b
-.section s952a
-.section s952b
-.section s953a
-.section s953b
-.section s954a
-.section s954b
-.section s955a
-.section s955b
-.section s956a
-.section s956b
-.section s957a
-.section s957b
-.section s958a
-.section s958b
-.section s959a
-.section s959b
-.section s950a
-.section s950b
-.section s96aa
-.section s96ab
-.section s96ba
-.section s96bb
-.section s96ca
-.section s96cb
-.section s96da
-.section s96db
-.section s96ea
-.section s96eb
-.section s96fa
-.section s96fb
-.section s96ga
-.section s96gb
-.section s96ha
-.section s96hb
-.section s96ia
-.section s96ib
-.section s96ja
-.section s96jb
-.section s96ka
-.section s96kb
-.section s96la
-.section s96lb
-.section s96ma
-.section s96mb
-.section s96na
-.section s96nb
-.section s96oa
-.section s96ob
-.section s96pa
-.section s96pb
-.section s96qa
-.section s96qb
-.section s96ra
-.section s96rb
-.section s96sa
-.section s96sb
-.section s96ta
-.section s96tb
-.section s96ua
-.section s96ub
-.section s96va
-.section s96vb
-.section s96wa
-.section s96wb
-.section s96xa
-.section s96xb
-.section s96ya
-.section s96yb
-.section s96za
-.section s96zb
-.section s961a
-.section s961b
-.section s962a
-.section s962b
-.section s963a
-.section s963b
-.section s964a
-.section s964b
-.section s965a
-.section s965b
-.section s966a
-.section s966b
-.section s967a
-.section s967b
-.section s968a
-.section s968b
-.section s969a
-.section s969b
-.section s960a
-.section s960b
-.section s97aa
-.section s97ab
-.section s97ba
-.section s97bb
-.section s97ca
-.section s97cb
-.section s97da
-.section s97db
-.section s97ea
-.section s97eb
-.section s97fa
-.section s97fb
-.section s97ga
-.section s97gb
-.section s97ha
-.section s97hb
-.section s97ia
-.section s97ib
-.section s97ja
-.section s97jb
-.section s97ka
-.section s97kb
-.section s97la
-.section s97lb
-.section s97ma
-.section s97mb
-.section s97na
-.section s97nb
-.section s97oa
-.section s97ob
-.section s97pa
-.section s97pb
-.section s97qa
-.section s97qb
-.section s97ra
-.section s97rb
-.section s97sa
-.section s97sb
-.section s97ta
-.section s97tb
-.section s97ua
-.section s97ub
-.section s97va
-.section s97vb
-.section s97wa
-.section s97wb
-.section s97xa
-.section s97xb
-.section s97ya
-.section s97yb
-.section s97za
-.section s97zb
-.section s971a
-.section s971b
-.section s972a
-.section s972b
-.section s973a
-.section s973b
-.section s974a
-.section s974b
-.section s975a
-.section s975b
-.section s976a
-.section s976b
-.section s977a
-.section s977b
-.section s978a
-.section s978b
-.section s979a
-.section s979b
-.section s970a
-.section s970b
-.section s98aa
-.section s98ab
-.section s98ba
-.section s98bb
-.section s98ca
-.section s98cb
-.section s98da
-.section s98db
-.section s98ea
-.section s98eb
-.section s98fa
-.section s98fb
-.section s98ga
-.section s98gb
-.section s98ha
-.section s98hb
-.section s98ia
-.section s98ib
-.section s98ja
-.section s98jb
-.section s98ka
-.section s98kb
-.section s98la
-.section s98lb
-.section s98ma
-.section s98mb
-.section s98na
-.section s98nb
-.section s98oa
-.section s98ob
-.section s98pa
-.section s98pb
-.section s98qa
-.section s98qb
-.section s98ra
-.section s98rb
-.section s98sa
-.section s98sb
-.section s98ta
-.section s98tb
-.section s98ua
-.section s98ub
-.section s98va
-.section s98vb
-.section s98wa
-.section s98wb
-.section s98xa
-.section s98xb
-.section s98ya
-.section s98yb
-.section s98za
-.section s98zb
-.section s981a
-.section s981b
-.section s982a
-.section s982b
-.section s983a
-.section s983b
-.section s984a
-.section s984b
-.section s985a
-.section s985b
-.section s986a
-.section s986b
-.section s987a
-.section s987b
-.section s988a
-.section s988b
-.section s989a
-.section s989b
-.section s980a
-.section s980b
-.section s99aa
-.section s99ab
-.section s99ba
-.section s99bb
-.section s99ca
-.section s99cb
-.section s99da
-.section s99db
-.section s99ea
-.section s99eb
-.section s99fa
-.section s99fb
-.section s99ga
-.section s99gb
-.section s99ha
-.section s99hb
-.section s99ia
-.section s99ib
-.section s99ja
-.section s99jb
-.section s99ka
-.section s99kb
-.section s99la
-.section s99lb
-.section s99ma
-.section s99mb
-.section s99na
-.section s99nb
-.section s99oa
-.section s99ob
-.section s99pa
-.section s99pb
-.section s99qa
-.section s99qb
-.section s99ra
-.section s99rb
-.section s99sa
-.section s99sb
-.section s99ta
-.section s99tb
-.section s99ua
-.section s99ub
-.section s99va
-.section s99vb
-.section s99wa
-.section s99wb
-.section s99xa
-.section s99xb
-.section s99ya
-.section s99yb
-.section s99za
-.section s99zb
-.section s991a
-.section s991b
-.section s992a
-.section s992b
-.section s993a
-.section s993b
-.section s994a
-.section s994b
-.section s995a
-.section s995b
-.section s996a
-.section s996b
-.section s997a
-.section s997b
-.section s998a
-.section s998b
-.section s999a
-.section s999b
-.section s990a
-.section s990b
-.section s90aa
-.section s90ab
-.section s90ba
-.section s90bb
-.section s90ca
-.section s90cb
-.section s90da
-.section s90db
-.section s90ea
-.section s90eb
-.section s90fa
-.section s90fb
-.section s90ga
-.section s90gb
-.section s90ha
-.section s90hb
-.section s90ia
-.section s90ib
-.section s90ja
-.section s90jb
-.section s90ka
-.section s90kb
-.section s90la
-.section s90lb
-.section s90ma
-.section s90mb
-.section s90na
-.section s90nb
-.section s90oa
-.section s90ob
-.section s90pa
-.section s90pb
-.section s90qa
-.section s90qb
-.section s90ra
-.section s90rb
-.section s90sa
-.section s90sb
-.section s90ta
-.section s90tb
-.section s90ua
-.section s90ub
-.section s90va
-.section s90vb
-.section s90wa
-.section s90wb
-.section s90xa
-.section s90xb
-.section s90ya
-.section s90yb
-.section s90za
-.section s90zb
-.section s901a
-.section s901b
-.section s902a
-.section s902b
-.section s903a
-.section s903b
-.section s904a
-.section s904b
-.section s905a
-.section s905b
-.section s906a
-.section s906b
-.section s907a
-.section s907b
-.section s908a
-.section s908b
-.section s909a
-.section s909b
-.section s900a
-.section s900b
-.section s0aaa
-.section s0aab
-.section s0aba
-.section s0abb
-.section s0aca
-.section s0acb
-.section s0ada
-.section s0adb
-.section s0aea
-.section s0aeb
-.section s0afa
-.section s0afb
-.section s0aga
-.section s0agb
-.section s0aha
-.section s0ahb
-.section s0aia
-.section s0aib
-.section s0aja
-.section s0ajb
-.section s0aka
-.section s0akb
-.section s0ala
-.section s0alb
-.section s0ama
-.section s0amb
-.section s0ana
-.section s0anb
-.section s0aoa
-.section s0aob
-.section s0apa
-.section s0apb
-.section s0aqa
-.section s0aqb
-.section s0ara
-.section s0arb
-.section s0asa
-.section s0asb
-.section s0ata
-.section s0atb
-.section s0aua
-.section s0aub
-.section s0ava
-.section s0avb
-.section s0awa
-.section s0awb
-.section s0axa
-.section s0axb
-.section s0aya
-.section s0ayb
-.section s0aza
-.section s0azb
-.section s0a1a
-.section s0a1b
-.section s0a2a
-.section s0a2b
-.section s0a3a
-.section s0a3b
-.section s0a4a
-.section s0a4b
-.section s0a5a
-.section s0a5b
-.section s0a6a
-.section s0a6b
-.section s0a7a
-.section s0a7b
-.section s0a8a
-.section s0a8b
-.section s0a9a
-.section s0a9b
-.section s0a0a
-.section s0a0b
-.section s0baa
-.section s0bab
-.section s0bba
-.section s0bbb
-.section s0bca
-.section s0bcb
-.section s0bda
-.section s0bdb
-.section s0bea
-.section s0beb
-.section s0bfa
-.section s0bfb
-.section s0bga
-.section s0bgb
-.section s0bha
-.section s0bhb
-.section s0bia
-.section s0bib
-.section s0bja
-.section s0bjb
-.section s0bka
-.section s0bkb
-.section s0bla
-.section s0blb
-.section s0bma
-.section s0bmb
-.section s0bna
-.section s0bnb
-.section s0boa
-.section s0bob
-.section s0bpa
-.section s0bpb
-.section s0bqa
-.section s0bqb
-.section s0bra
-.section s0brb
-.section s0bsa
-.section s0bsb
-.section s0bta
-.section s0btb
-.section s0bua
-.section s0bub
-.section s0bva
-.section s0bvb
-.section s0bwa
-.section s0bwb
-.section s0bxa
-.section s0bxb
-.section s0bya
-.section s0byb
-.section s0bza
-.section s0bzb
-.section s0b1a
-.section s0b1b
-.section s0b2a
-.section s0b2b
-.section s0b3a
-.section s0b3b
-.section s0b4a
-.section s0b4b
-.section s0b5a
-.section s0b5b
-.section s0b6a
-.section s0b6b
-.section s0b7a
-.section s0b7b
-.section s0b8a
-.section s0b8b
-.section s0b9a
-.section s0b9b
-.section s0b0a
-.section s0b0b
-.section s0caa
-.section s0cab
-.section s0cba
-.section s0cbb
-.section s0cca
-.section s0ccb
-.section s0cda
-.section s0cdb
-.section s0cea
-.section s0ceb
-.section s0cfa
-.section s0cfb
-.section s0cga
-.section s0cgb
-.section s0cha
-.section s0chb
-.section s0cia
-.section s0cib
-.section s0cja
-.section s0cjb
-.section s0cka
-.section s0ckb
-.section s0cla
-.section s0clb
-.section s0cma
-.section s0cmb
-.section s0cna
-.section s0cnb
-.section s0coa
-.section s0cob
-.section s0cpa
-.section s0cpb
-.section s0cqa
-.section s0cqb
-.section s0cra
-.section s0crb
-.section s0csa
-.section s0csb
-.section s0cta
-.section s0ctb
-.section s0cua
-.section s0cub
-.section s0cva
-.section s0cvb
-.section s0cwa
-.section s0cwb
-.section s0cxa
-.section s0cxb
-.section s0cya
-.section s0cyb
-.section s0cza
-.section s0czb
-.section s0c1a
-.section s0c1b
-.section s0c2a
-.section s0c2b
-.section s0c3a
-.section s0c3b
-.section s0c4a
-.section s0c4b
-.section s0c5a
-.section s0c5b
-.section s0c6a
-.section s0c6b
-.section s0c7a
-.section s0c7b
-.section s0c8a
-.section s0c8b
-.section s0c9a
-.section s0c9b
-.section s0c0a
-.section s0c0b
-.section s0daa
-.section s0dab
-.section s0dba
-.section s0dbb
-.section s0dca
-.section s0dcb
-.section s0dda
-.section s0ddb
-.section s0dea
-.section s0deb
-.section s0dfa
-.section s0dfb
-.section s0dga
-.section s0dgb
-.section s0dha
-.section s0dhb
-.section s0dia
-.section s0dib
-.section s0dja
-.section s0djb
-.section s0dka
-.section s0dkb
-.section s0dla
-.section s0dlb
-.section s0dma
-.section s0dmb
-.section s0dna
-.section s0dnb
-.section s0doa
-.section s0dob
-.section s0dpa
-.section s0dpb
-.section s0dqa
-.section s0dqb
-.section s0dra
-.section s0drb
-.section s0dsa
-.section s0dsb
-.section s0dta
-.section s0dtb
-.section s0dua
-.section s0dub
-.section s0dva
-.section s0dvb
-.section s0dwa
-.section s0dwb
-.section s0dxa
-.section s0dxb
-.section s0dya
-.section s0dyb
-.section s0dza
-.section s0dzb
-.section s0d1a
-.section s0d1b
-.section s0d2a
-.section s0d2b
-.section s0d3a
-.section s0d3b
-.section s0d4a
-.section s0d4b
-.section s0d5a
-.section s0d5b
-.section s0d6a
-.section s0d6b
-.section s0d7a
-.section s0d7b
-.section s0d8a
-.section s0d8b
-.section s0d9a
-.section s0d9b
-.section s0d0a
-.section s0d0b
-.section s0eaa
-.section s0eab
-.section s0eba
-.section s0ebb
-.section s0eca
-.section s0ecb
-.section s0eda
-.section s0edb
-.section s0eea
-.section s0eeb
-.section s0efa
-.section s0efb
-.section s0ega
-.section s0egb
-.section s0eha
-.section s0ehb
-.section s0eia
-.section s0eib
-.section s0eja
-.section s0ejb
-.section s0eka
-.section s0ekb
-.section s0ela
-.section s0elb
-.section s0ema
-.section s0emb
-.section s0ena
-.section s0enb
-.section s0eoa
-.section s0eob
-.section s0epa
-.section s0epb
-.section s0eqa
-.section s0eqb
-.section s0era
-.section s0erb
-.section s0esa
-.section s0esb
-.section s0eta
-.section s0etb
-.section s0eua
-.section s0eub
-.section s0eva
-.section s0evb
-.section s0ewa
-.section s0ewb
-.section s0exa
-.section s0exb
-.section s0eya
-.section s0eyb
-.section s0eza
-.section s0ezb
-.section s0e1a
-.section s0e1b
-.section s0e2a
-.section s0e2b
-.section s0e3a
-.section s0e3b
-.section s0e4a
-.section s0e4b
-.section s0e5a
-.section s0e5b
-.section s0e6a
-.section s0e6b
-.section s0e7a
-.section s0e7b
-.section s0e8a
-.section s0e8b
-.section s0e9a
-.section s0e9b
-.section s0e0a
-.section s0e0b
-.section s0faa
-.section s0fab
-.section s0fba
-.section s0fbb
-.section s0fca
-.section s0fcb
-.section s0fda
-.section s0fdb
-.section s0fea
-.section s0feb
-.section s0ffa
-.section s0ffb
-.section s0fga
-.section s0fgb
-.section s0fha
-.section s0fhb
-.section s0fia
-.section s0fib
-.section s0fja
-.section s0fjb
-.section s0fka
-.section s0fkb
-.section s0fla
-.section s0flb
-.section s0fma
-.section s0fmb
-.section s0fna
-.section s0fnb
-.section s0foa
-.section s0fob
-.section s0fpa
-.section s0fpb
-.section s0fqa
-.section s0fqb
-.section s0fra
-.section s0frb
-.section s0fsa
-.section s0fsb
-.section s0fta
-.section s0ftb
-.section s0fua
-.section s0fub
-.section s0fva
-.section s0fvb
-.section s0fwa
-.section s0fwb
-.section s0fxa
-.section s0fxb
-.section s0fya
-.section s0fyb
-.section s0fza
-.section s0fzb
-.section s0f1a
-.section s0f1b
-.section s0f2a
-.section s0f2b
-.section s0f3a
-.section s0f3b
-.section s0f4a
-.section s0f4b
-.section s0f5a
-.section s0f5b
-.section s0f6a
-.section s0f6b
-.section s0f7a
-.section s0f7b
-.section s0f8a
-.section s0f8b
-.section s0f9a
-.section s0f9b
-.section s0f0a
-.section s0f0b
-.section s0gaa
-.section s0gab
-.section s0gba
-.section s0gbb
-.section s0gca
-.section s0gcb
-.section s0gda
-.section s0gdb
-.section s0gea
-.section s0geb
-.section s0gfa
-.section s0gfb
-.section s0gga
-.section s0ggb
-.section s0gha
-.section s0ghb
-.section s0gia
-.section s0gib
-.section s0gja
-.section s0gjb
-.section s0gka
-.section s0gkb
-.section s0gla
-.section s0glb
-.section s0gma
-.section s0gmb
-.section s0gna
-.section s0gnb
-.section s0goa
-.section s0gob
-.section s0gpa
-.section s0gpb
-.section s0gqa
-.section s0gqb
-.section s0gra
-.section s0grb
-.section s0gsa
-.section s0gsb
-.section s0gta
-.section s0gtb
-.section s0gua
-.section s0gub
-.section s0gva
-.section s0gvb
-.section s0gwa
-.section s0gwb
-.section s0gxa
-.section s0gxb
-.section s0gya
-.section s0gyb
-.section s0gza
-.section s0gzb
-.section s0g1a
-.section s0g1b
-.section s0g2a
-.section s0g2b
-.section s0g3a
-.section s0g3b
-.section s0g4a
-.section s0g4b
-.section s0g5a
-.section s0g5b
-.section s0g6a
-.section s0g6b
-.section s0g7a
-.section s0g7b
-.section s0g8a
-.section s0g8b
-.section s0g9a
-.section s0g9b
-.section s0g0a
-.section s0g0b
-.section s0haa
-.section s0hab
-.section s0hba
-.section s0hbb
-.section s0hca
-.section s0hcb
-.section s0hda
-.section s0hdb
-.section s0hea
-.section s0heb
-.section s0hfa
-.section s0hfb
-.section s0hga
-.section s0hgb
-.section s0hha
-.section s0hhb
-.section s0hia
-.section s0hib
-.section s0hja
-.section s0hjb
-.section s0hka
-.section s0hkb
-.section s0hla
-.section s0hlb
-.section s0hma
-.section s0hmb
-.section s0hna
-.section s0hnb
-.section s0hoa
-.section s0hob
-.section s0hpa
-.section s0hpb
-.section s0hqa
-.section s0hqb
-.section s0hra
-.section s0hrb
-.section s0hsa
-.section s0hsb
-.section s0hta
-.section s0htb
-.section s0hua
-.section s0hub
-.section s0hva
-.section s0hvb
-.section s0hwa
-.section s0hwb
-.section s0hxa
-.section s0hxb
-.section s0hya
-.section s0hyb
-.section s0hza
-.section s0hzb
-.section s0h1a
-.section s0h1b
-.section s0h2a
-.section s0h2b
-.section s0h3a
-.section s0h3b
-.section s0h4a
-.section s0h4b
-.section s0h5a
-.section s0h5b
-.section s0h6a
-.section s0h6b
-.section s0h7a
-.section s0h7b
-.section s0h8a
-.section s0h8b
-.section s0h9a
-.section s0h9b
-.section s0h0a
-.section s0h0b
-.section s0iaa
-.section s0iab
-.section s0iba
-.section s0ibb
-.section s0ica
-.section s0icb
-.section s0ida
-.section s0idb
-.section s0iea
-.section s0ieb
-.section s0ifa
-.section s0ifb
-.section s0iga
-.section s0igb
-.section s0iha
-.section s0ihb
-.section s0iia
-.section s0iib
-.section s0ija
-.section s0ijb
-.section s0ika
-.section s0ikb
-.section s0ila
-.section s0ilb
-.section s0ima
-.section s0imb
-.section s0ina
-.section s0inb
-.section s0ioa
-.section s0iob
-.section s0ipa
-.section s0ipb
-.section s0iqa
-.section s0iqb
-.section s0ira
-.section s0irb
-.section s0isa
-.section s0isb
-.section s0ita
-.section s0itb
-.section s0iua
-.section s0iub
-.section s0iva
-.section s0ivb
-.section s0iwa
-.section s0iwb
-.section s0ixa
-.section s0ixb
-.section s0iya
-.section s0iyb
-.section s0iza
-.section s0izb
-.section s0i1a
-.section s0i1b
-.section s0i2a
-.section s0i2b
-.section s0i3a
-.section s0i3b
-.section s0i4a
-.section s0i4b
-.section s0i5a
-.section s0i5b
-.section s0i6a
-.section s0i6b
-.section s0i7a
-.section s0i7b
-.section s0i8a
-.section s0i8b
-.section s0i9a
-.section s0i9b
-.section s0i0a
-.section s0i0b
-.section s0jaa
-.section s0jab
-.section s0jba
-.section s0jbb
-.section s0jca
-.section s0jcb
-.section s0jda
-.section s0jdb
-.section s0jea
-.section s0jeb
-.section s0jfa
-.section s0jfb
-.section s0jga
-.section s0jgb
-.section s0jha
-.section s0jhb
-.section s0jia
-.section s0jib
-.section s0jja
-.section s0jjb
-.section s0jka
-.section s0jkb
-.section s0jla
-.section s0jlb
-.section s0jma
-.section s0jmb
-.section s0jna
-.section s0jnb
-.section s0joa
-.section s0job
-.section s0jpa
-.section s0jpb
-.section s0jqa
-.section s0jqb
-.section s0jra
-.section s0jrb
-.section s0jsa
-.section s0jsb
-.section s0jta
-.section s0jtb
-.section s0jua
-.section s0jub
-.section s0jva
-.section s0jvb
-.section s0jwa
-.section s0jwb
-.section s0jxa
-.section s0jxb
-.section s0jya
-.section s0jyb
-.section s0jza
-.section s0jzb
-.section s0j1a
-.section s0j1b
-.section s0j2a
-.section s0j2b
-.section s0j3a
-.section s0j3b
-.section s0j4a
-.section s0j4b
-.section s0j5a
-.section s0j5b
-.section s0j6a
-.section s0j6b
-.section s0j7a
-.section s0j7b
-.section s0j8a
-.section s0j8b
-.section s0j9a
-.section s0j9b
-.section s0j0a
-.section s0j0b
-.section s0kaa
-.section s0kab
-.section s0kba
-.section s0kbb
-.section s0kca
-.section s0kcb
-.section s0kda
-.section s0kdb
-.section s0kea
-.section s0keb
-.section s0kfa
-.section s0kfb
-.section s0kga
-.section s0kgb
-.section s0kha
-.section s0khb
-.section s0kia
-.section s0kib
-.section s0kja
-.section s0kjb
-.section s0kka
-.section s0kkb
-.section s0kla
-.section s0klb
-.section s0kma
-.section s0kmb
-.section s0kna
-.section s0knb
-.section s0koa
-.section s0kob
-.section s0kpa
-.section s0kpb
-.section s0kqa
-.section s0kqb
-.section s0kra
-.section s0krb
-.section s0ksa
-.section s0ksb
-.section s0kta
-.section s0ktb
-.section s0kua
-.section s0kub
-.section s0kva
-.section s0kvb
-.section s0kwa
-.section s0kwb
-.section s0kxa
-.section s0kxb
-.section s0kya
-.section s0kyb
-.section s0kza
-.section s0kzb
-.section s0k1a
-.section s0k1b
-.section s0k2a
-.section s0k2b
-.section s0k3a
-.section s0k3b
-.section s0k4a
-.section s0k4b
-.section s0k5a
-.section s0k5b
-.section s0k6a
-.section s0k6b
-.section s0k7a
-.section s0k7b
-.section s0k8a
-.section s0k8b
-.section s0k9a
-.section s0k9b
-.section s0k0a
-.section s0k0b
-.section s0laa
-.section s0lab
-.section s0lba
-.section s0lbb
-.section s0lca
-.section s0lcb
-.section s0lda
-.section s0ldb
-.section s0lea
-.section s0leb
-.section s0lfa
-.section s0lfb
-.section s0lga
-.section s0lgb
-.section s0lha
-.section s0lhb
-.section s0lia
-.section s0lib
-.section s0lja
-.section s0ljb
-.section s0lka
-.section s0lkb
-.section s0lla
-.section s0llb
-.section s0lma
-.section s0lmb
-.section s0lna
-.section s0lnb
-.section s0loa
-.section s0lob
-.section s0lpa
-.section s0lpb
-.section s0lqa
-.section s0lqb
-.section s0lra
-.section s0lrb
-.section s0lsa
-.section s0lsb
-.section s0lta
-.section s0ltb
-.section s0lua
-.section s0lub
-.section s0lva
-.section s0lvb
-.section s0lwa
-.section s0lwb
-.section s0lxa
-.section s0lxb
-.section s0lya
-.section s0lyb
-.section s0lza
-.section s0lzb
-.section s0l1a
-.section s0l1b
-.section s0l2a
-.section s0l2b
-.section s0l3a
-.section s0l3b
-.section s0l4a
-.section s0l4b
-.section s0l5a
-.section s0l5b
-.section s0l6a
-.section s0l6b
-.section s0l7a
-.section s0l7b
-.section s0l8a
-.section s0l8b
-.section s0l9a
-.section s0l9b
-.section s0l0a
-.section s0l0b
-.section s0maa
-.section s0mab
-.section s0mba
-.section s0mbb
-.section s0mca
-.section s0mcb
-.section s0mda
-.section s0mdb
-.section s0mea
-.section s0meb
-.section s0mfa
-.section s0mfb
-.section s0mga
-.section s0mgb
-.section s0mha
-.section s0mhb
-.section s0mia
-.section s0mib
-.section s0mja
-.section s0mjb
-.section s0mka
-.section s0mkb
-.section s0mla
-.section s0mlb
-.section s0mma
-.section s0mmb
-.section s0mna
-.section s0mnb
-.section s0moa
-.section s0mob
-.section s0mpa
-.section s0mpb
-.section s0mqa
-.section s0mqb
-.section s0mra
-.section s0mrb
-.section s0msa
-.section s0msb
-.section s0mta
-.section s0mtb
-.section s0mua
-.section s0mub
-.section s0mva
-.section s0mvb
-.section s0mwa
-.section s0mwb
-.section s0mxa
-.section s0mxb
-.section s0mya
-.section s0myb
-.section s0mza
-.section s0mzb
-.section s0m1a
-.section s0m1b
-.section s0m2a
-.section s0m2b
-.section s0m3a
-.section s0m3b
-.section s0m4a
-.section s0m4b
-.section s0m5a
-.section s0m5b
-.section s0m6a
-.section s0m6b
-.section s0m7a
-.section s0m7b
-.section s0m8a
-.section s0m8b
-.section s0m9a
-.section s0m9b
-.section s0m0a
-.section s0m0b
-.section s0naa
-.section s0nab
-.section s0nba
-.section s0nbb
-.section s0nca
-.section s0ncb
-.section s0nda
-.section s0ndb
-.section s0nea
-.section s0neb
-.section s0nfa
-.section s0nfb
-.section s0nga
-.section s0ngb
-.section s0nha
-.section s0nhb
-.section s0nia
-.section s0nib
-.section s0nja
-.section s0njb
-.section s0nka
-.section s0nkb
-.section s0nla
-.section s0nlb
-.section s0nma
-.section s0nmb
-.section s0nna
-.section s0nnb
-.section s0noa
-.section s0nob
-.section s0npa
-.section s0npb
-.section s0nqa
-.section s0nqb
-.section s0nra
-.section s0nrb
-.section s0nsa
-.section s0nsb
-.section s0nta
-.section s0ntb
-.section s0nua
-.section s0nub
-.section s0nva
-.section s0nvb
-.section s0nwa
-.section s0nwb
-.section s0nxa
-.section s0nxb
-.section s0nya
-.section s0nyb
-.section s0nza
-.section s0nzb
-.section s0n1a
-.section s0n1b
-.section s0n2a
-.section s0n2b
-.section s0n3a
-.section s0n3b
-.section s0n4a
-.section s0n4b
-.section s0n5a
-.section s0n5b
-.section s0n6a
-.section s0n6b
-.section s0n7a
-.section s0n7b
-.section s0n8a
-.section s0n8b
-.section s0n9a
-.section s0n9b
-.section s0n0a
-.section s0n0b
-.section s0oaa
-.section s0oab
-.section s0oba
-.section s0obb
-.section s0oca
-.section s0ocb
-.section s0oda
-.section s0odb
-.section s0oea
-.section s0oeb
-.section s0ofa
-.section s0ofb
-.section s0oga
-.section s0ogb
-.section s0oha
-.section s0ohb
-.section s0oia
-.section s0oib
-.section s0oja
-.section s0ojb
-.section s0oka
-.section s0okb
-.section s0ola
-.section s0olb
-.section s0oma
-.section s0omb
-.section s0ona
-.section s0onb
-.section s0ooa
-.section s0oob
-.section s0opa
-.section s0opb
-.section s0oqa
-.section s0oqb
-.section s0ora
-.section s0orb
-.section s0osa
-.section s0osb
-.section s0ota
-.section s0otb
-.section s0oua
-.section s0oub
-.section s0ova
-.section s0ovb
-.section s0owa
-.section s0owb
-.section s0oxa
-.section s0oxb
-.section s0oya
-.section s0oyb
-.section s0oza
-.section s0ozb
-.section s0o1a
-.section s0o1b
-.section s0o2a
-.section s0o2b
-.section s0o3a
-.section s0o3b
-.section s0o4a
-.section s0o4b
-.section s0o5a
-.section s0o5b
-.section s0o6a
-.section s0o6b
-.section s0o7a
-.section s0o7b
-.section s0o8a
-.section s0o8b
-.section s0o9a
-.section s0o9b
-.section s0o0a
-.section s0o0b
-.section s0paa
-.section s0pab
-.section s0pba
-.section s0pbb
-.section s0pca
-.section s0pcb
-.section s0pda
-.section s0pdb
-.section s0pea
-.section s0peb
-.section s0pfa
-.section s0pfb
-.section s0pga
-.section s0pgb
-.section s0pha
-.section s0phb
-.section s0pia
-.section s0pib
-.section s0pja
-.section s0pjb
-.section s0pka
-.section s0pkb
-.section s0pla
-.section s0plb
-.section s0pma
-.section s0pmb
-.section s0pna
-.section s0pnb
-.section s0poa
-.section s0pob
-.section s0ppa
-.section s0ppb
-.section s0pqa
-.section s0pqb
-.section s0pra
-.section s0prb
-.section s0psa
-.section s0psb
-.section s0pta
-.section s0ptb
-.section s0pua
-.section s0pub
-.section s0pva
-.section s0pvb
-.section s0pwa
-.section s0pwb
-.section s0pxa
-.section s0pxb
-.section s0pya
-.section s0pyb
-.section s0pza
-.section s0pzb
-.section s0p1a
-.section s0p1b
-.section s0p2a
-.section s0p2b
-.section s0p3a
-.section s0p3b
-.section s0p4a
-.section s0p4b
-.section s0p5a
-.section s0p5b
-.section s0p6a
-.section s0p6b
-.section s0p7a
-.section s0p7b
-.section s0p8a
-.section s0p8b
-.section s0p9a
-.section s0p9b
-.section s0p0a
-.section s0p0b
-.section s0qaa
-.section s0qab
-.section s0qba
-.section s0qbb
-.section s0qca
-.section s0qcb
-.section s0qda
-.section s0qdb
-.section s0qea
-.section s0qeb
-.section s0qfa
-.section s0qfb
-.section s0qga
-.section s0qgb
-.section s0qha
-.section s0qhb
-.section s0qia
-.section s0qib
-.section s0qja
-.section s0qjb
-.section s0qka
-.section s0qkb
-.section s0qla
-.section s0qlb
-.section s0qma
-.section s0qmb
-.section s0qna
-.section s0qnb
-.section s0qoa
-.section s0qob
-.section s0qpa
-.section s0qpb
-.section s0qqa
-.section s0qqb
-.section s0qra
-.section s0qrb
-.section s0qsa
-.section s0qsb
-.section s0qta
-.section s0qtb
-.section s0qua
-.section s0qub
-.section s0qva
-.section s0qvb
-.section s0qwa
-.section s0qwb
-.section s0qxa
-.section s0qxb
-.section s0qya
-.section s0qyb
-.section s0qza
-.section s0qzb
-.section s0q1a
-.section s0q1b
-.section s0q2a
-.section s0q2b
-.section s0q3a
-.section s0q3b
-.section s0q4a
-.section s0q4b
-.section s0q5a
-.section s0q5b
-.section s0q6a
-.section s0q6b
-.section s0q7a
-.section s0q7b
-.section s0q8a
-.section s0q8b
-.section s0q9a
-.section s0q9b
-.section s0q0a
-.section s0q0b
-.section s0raa
-.section s0rab
-.section s0rba
-.section s0rbb
-.section s0rca
-.section s0rcb
-.section s0rda
-.section s0rdb
-.section s0rea
-.section s0reb
-.section s0rfa
-.section s0rfb
-.section s0rga
-.section s0rgb
-.section s0rha
-.section s0rhb
-.section s0ria
-.section s0rib
-.section s0rja
-.section s0rjb
-.section s0rka
-.section s0rkb
-.section s0rla
-.section s0rlb
-.section s0rma
-.section s0rmb
-.section s0rna
-.section s0rnb
-.section s0roa
-.section s0rob
-.section s0rpa
-.section s0rpb
-.section s0rqa
-.section s0rqb
-.section s0rra
-.section s0rrb
-.section s0rsa
-.section s0rsb
-.section s0rta
-.section s0rtb
-.section s0rua
-.section s0rub
-.section s0rva
-.section s0rvb
-.section s0rwa
-.section s0rwb
-.section s0rxa
-.section s0rxb
-.section s0rya
-.section s0ryb
-.section s0rza
-.section s0rzb
-.section s0r1a
-.section s0r1b
-.section s0r2a
-.section s0r2b
-.section s0r3a
-.section s0r3b
-.section s0r4a
-.section s0r4b
-.section s0r5a
-.section s0r5b
-.section s0r6a
-.section s0r6b
-.section s0r7a
-.section s0r7b
-.section s0r8a
-.section s0r8b
-.section s0r9a
-.section s0r9b
-.section s0r0a
-.section s0r0b
-.section s0saa
-.section s0sab
-.section s0sba
-.section s0sbb
-.section s0sca
-.section s0scb
-.section s0sda
-.section s0sdb
-.section s0sea
-.section s0seb
-.section s0sfa
-.section s0sfb
-.section s0sga
-.section s0sgb
-.section s0sha
-.section s0shb
-.section s0sia
-.section s0sib
-.section s0sja
-.section s0sjb
-.section s0ska
-.section s0skb
-.section s0sla
-.section s0slb
-.section s0sma
-.section s0smb
-.section s0sna
-.section s0snb
-.section s0soa
-.section s0sob
-.section s0spa
-.section s0spb
-.section s0sqa
-.section s0sqb
-.section s0sra
-.section s0srb
-.section s0ssa
-.section s0ssb
-.section s0sta
-.section s0stb
-.section s0sua
-.section s0sub
-.section s0sva
-.section s0svb
-.section s0swa
-.section s0swb
-.section s0sxa
-.section s0sxb
-.section s0sya
-.section s0syb
-.section s0sza
-.section s0szb
-.section s0s1a
-.section s0s1b
-.section s0s2a
-.section s0s2b
-.section s0s3a
-.section s0s3b
-.section s0s4a
-.section s0s4b
-.section s0s5a
-.section s0s5b
-.section s0s6a
-.section s0s6b
-.section s0s7a
-.section s0s7b
-.section s0s8a
-.section s0s8b
-.section s0s9a
-.section s0s9b
-.section s0s0a
-.section s0s0b
-.section s0taa
-.section s0tab
-.section s0tba
-.section s0tbb
-.section s0tca
-.section s0tcb
-.section s0tda
-.section s0tdb
-.section s0tea
-.section s0teb
-.section s0tfa
-.section s0tfb
-.section s0tga
-.section s0tgb
-.section s0tha
-.section s0thb
-.section s0tia
-.section s0tib
-.section s0tja
-.section s0tjb
-.section s0tka
-.section s0tkb
-.section s0tla
-.section s0tlb
-.section s0tma
-.section s0tmb
-.section s0tna
-.section s0tnb
-.section s0toa
-.section s0tob
-.section s0tpa
-.section s0tpb
-.section s0tqa
-.section s0tqb
-.section s0tra
-.section s0trb
-.section s0tsa
-.section s0tsb
-.section s0tta
-.section s0ttb
-.section s0tua
-.section s0tub
-.section s0tva
-.section s0tvb
-.section s0twa
-.section s0twb
-.section s0txa
-.section s0txb
-.section s0tya
-.section s0tyb
-.section s0tza
-.section s0tzb
-.section s0t1a
-.section s0t1b
-.section s0t2a
-.section s0t2b
-.section s0t3a
-.section s0t3b
-.section s0t4a
-.section s0t4b
-.section s0t5a
-.section s0t5b
-.section s0t6a
-.section s0t6b
-.section s0t7a
-.section s0t7b
-.section s0t8a
-.section s0t8b
-.section s0t9a
-.section s0t9b
-.section s0t0a
-.section s0t0b
-.section s0uaa
-.section s0uab
-.section s0uba
-.section s0ubb
-.section s0uca
-.section s0ucb
-.section s0uda
-.section s0udb
-.section s0uea
-.section s0ueb
-.section s0ufa
-.section s0ufb
-.section s0uga
-.section s0ugb
-.section s0uha
-.section s0uhb
-.section s0uia
-.section s0uib
-.section s0uja
-.section s0ujb
-.section s0uka
-.section s0ukb
-.section s0ula
-.section s0ulb
-.section s0uma
-.section s0umb
-.section s0una
-.section s0unb
-.section s0uoa
-.section s0uob
-.section s0upa
-.section s0upb
-.section s0uqa
-.section s0uqb
-.section s0ura
-.section s0urb
-.section s0usa
-.section s0usb
-.section s0uta
-.section s0utb
-.section s0uua
-.section s0uub
-.section s0uva
-.section s0uvb
-.section s0uwa
-.section s0uwb
-.section s0uxa
-.section s0uxb
-.section s0uya
-.section s0uyb
-.section s0uza
-.section s0uzb
-.section s0u1a
-.section s0u1b
-.section s0u2a
-.section s0u2b
-.section s0u3a
-.section s0u3b
-.section s0u4a
-.section s0u4b
-.section s0u5a
-.section s0u5b
-.section s0u6a
-.section s0u6b
-.section s0u7a
-.section s0u7b
-.section s0u8a
-.section s0u8b
-.section s0u9a
-.section s0u9b
-.section s0u0a
-.section s0u0b
-.section s0vaa
-.section s0vab
-.section s0vba
-.section s0vbb
-.section s0vca
-.section s0vcb
-.section s0vda
-.section s0vdb
-.section s0vea
-.section s0veb
-.section s0vfa
-.section s0vfb
-.section s0vga
-.section s0vgb
-.section s0vha
-.section s0vhb
-.section s0via
-.section s0vib
-.section s0vja
-.section s0vjb
-.section s0vka
-.section s0vkb
-.section s0vla
-.section s0vlb
-.section s0vma
-.section s0vmb
-.section s0vna
-.section s0vnb
-.section s0voa
-.section s0vob
-.section s0vpa
-.section s0vpb
-.section s0vqa
-.section s0vqb
-.section s0vra
-.section s0vrb
-.section s0vsa
-.section s0vsb
-.section s0vta
-.section s0vtb
-.section s0vua
-.section s0vub
-.section s0vva
-.section s0vvb
-.section s0vwa
-.section s0vwb
-.section s0vxa
-.section s0vxb
-.section s0vya
-.section s0vyb
-.section s0vza
-.section s0vzb
-.section s0v1a
-.section s0v1b
-.section s0v2a
-.section s0v2b
-.section s0v3a
-.section s0v3b
-.section s0v4a
-.section s0v4b
-.section s0v5a
-.section s0v5b
-.section s0v6a
-.section s0v6b
-.section s0v7a
-.section s0v7b
-.section s0v8a
-.section s0v8b
-.section s0v9a
-.section s0v9b
-.section s0v0a
-.section s0v0b
-.section s0waa
-.section s0wab
-.section s0wba
-.section s0wbb
-.section s0wca
-.section s0wcb
-.section s0wda
-.section s0wdb
-.section s0wea
-.section s0web
-.section s0wfa
-.section s0wfb
-.section s0wga
-.section s0wgb
-.section s0wha
-.section s0whb
-.section s0wia
-.section s0wib
-.section s0wja
-.section s0wjb
-.section s0wka
-.section s0wkb
-.section s0wla
-.section s0wlb
-.section s0wma
-.section s0wmb
-.section s0wna
-.section s0wnb
-.section s0woa
-.section s0wob
-.section s0wpa
-.section s0wpb
-.section s0wqa
-.section s0wqb
-.section s0wra
-.section s0wrb
-.section s0wsa
-.section s0wsb
-.section s0wta
-.section s0wtb
-.section s0wua
-.section s0wub
-.section s0wva
-.section s0wvb
-.section s0wwa
-.section s0wwb
-.section s0wxa
-.section s0wxb
-.section s0wya
-.section s0wyb
-.section s0wza
-.section s0wzb
-.section s0w1a
-.section s0w1b
-.section s0w2a
-.section s0w2b
-.section s0w3a
-.section s0w3b
-.section s0w4a
-.section s0w4b
-.section s0w5a
-.section s0w5b
-.section s0w6a
-.section s0w6b
-.section s0w7a
-.section s0w7b
-.section s0w8a
-.section s0w8b
-.section s0w9a
-.section s0w9b
-.section s0w0a
-.section s0w0b
-.section s0xaa
-.section s0xab
-.section s0xba
-.section s0xbb
-.section s0xca
-.section s0xcb
-.section s0xda
-.section s0xdb
-.section s0xea
-.section s0xeb
-.section s0xfa
-.section s0xfb
-.section s0xga
-.section s0xgb
-.section s0xha
-.section s0xhb
-.section s0xia
-.section s0xib
-.section s0xja
-.section s0xjb
-.section s0xka
-.section s0xkb
-.section s0xla
-.section s0xlb
-.section s0xma
-.section s0xmb
-.section s0xna
-.section s0xnb
-.section s0xoa
-.section s0xob
-.section s0xpa
-.section s0xpb
-.section s0xqa
-.section s0xqb
-.section s0xra
-.section s0xrb
-.section s0xsa
-.section s0xsb
-.section s0xta
-.section s0xtb
-.section s0xua
-.section s0xub
-.section s0xva
-.section s0xvb
-.section s0xwa
-.section s0xwb
-.section s0xxa
-.section s0xxb
-.section s0xya
-.section s0xyb
-.section s0xza
-.section s0xzb
-.section s0x1a
-.section s0x1b
-.section s0x2a
-.section s0x2b
-.section s0x3a
-.section s0x3b
-.section s0x4a
-.section s0x4b
-.section s0x5a
-.section s0x5b
-.section s0x6a
-.section s0x6b
-.section s0x7a
-.section s0x7b
-.section s0x8a
-.section s0x8b
-.section s0x9a
-.section s0x9b
-.section s0x0a
-.section s0x0b
-.section s0yaa
-.section s0yab
-.section s0yba
-.section s0ybb
-.section s0yca
-.section s0ycb
-.section s0yda
-.section s0ydb
-.section s0yea
-.section s0yeb
-.section s0yfa
-.section s0yfb
-.section s0yga
-.section s0ygb
-.section s0yha
-.section s0yhb
-.section s0yia
-.section s0yib
-.section s0yja
-.section s0yjb
-.section s0yka
-.section s0ykb
-.section s0yla
-.section s0ylb
-.section s0yma
-.section s0ymb
-.section s0yna
-.section s0ynb
-.section s0yoa
-.section s0yob
-.section s0ypa
-.section s0ypb
-.section s0yqa
-.section s0yqb
-.section s0yra
-.section s0yrb
-.section s0ysa
-.section s0ysb
-.section s0yta
-.section s0ytb
-.section s0yua
-.section s0yub
-.section s0yva
-.section s0yvb
-.section s0ywa
-.section s0ywb
-.section s0yxa
-.section s0yxb
-.section s0yya
-.section s0yyb
-.section s0yza
-.section s0yzb
-.section s0y1a
-.section s0y1b
-.section s0y2a
-.section s0y2b
-.section s0y3a
-.section s0y3b
-.section s0y4a
-.section s0y4b
-.section s0y5a
-.section s0y5b
-.section s0y6a
-.section s0y6b
-.section s0y7a
-.section s0y7b
-.section s0y8a
-.section s0y8b
-.section s0y9a
-.section s0y9b
-.section s0y0a
-.section s0y0b
-.section s0zaa
-.section s0zab
-.section s0zba
-.section s0zbb
-.section s0zca
-.section s0zcb
-.section s0zda
-.section s0zdb
-.section s0zea
-.section s0zeb
-.section s0zfa
-.section s0zfb
-.section s0zga
-.section s0zgb
-.section s0zha
-.section s0zhb
-.section s0zia
-.section s0zib
-.section s0zja
-.section s0zjb
-.section s0zka
-.section s0zkb
-.section s0zla
-.section s0zlb
-.section s0zma
-.section s0zmb
-.section s0zna
-.section s0znb
-.section s0zoa
-.section s0zob
-.section s0zpa
-.section s0zpb
-.section s0zqa
-.section s0zqb
-.section s0zra
-.section s0zrb
-.section s0zsa
-.section s0zsb
-.section s0zta
-.section s0ztb
-.section s0zua
-.section s0zub
-.section s0zva
-.section s0zvb
-.section s0zwa
-.section s0zwb
-.section s0zxa
-.section s0zxb
-.section s0zya
-.section s0zyb
-.section s0zza
-.section s0zzb
-.section s0z1a
-.section s0z1b
-.section s0z2a
-.section s0z2b
-.section s0z3a
-.section s0z3b
-.section s0z4a
-.section s0z4b
-.section s0z5a
-.section s0z5b
-.section s0z6a
-.section s0z6b
-.section s0z7a
-.section s0z7b
-.section s0z8a
-.section s0z8b
-.section s0z9a
-.section s0z9b
-.section s0z0a
-.section s0z0b
-.section s01aa
-.section s01ab
-.section s01ba
-.section s01bb
-.section s01ca
-.section s01cb
-.section s01da
-.section s01db
-.section s01ea
-.section s01eb
-.section s01fa
-.section s01fb
-.section s01ga
-.section s01gb
-.section s01ha
-.section s01hb
-.section s01ia
-.section s01ib
-.section s01ja
-.section s01jb
-.section s01ka
-.section s01kb
-.section s01la
-.section s01lb
-.section s01ma
-.section s01mb
-.section s01na
-.section s01nb
-.section s01oa
-.section s01ob
-.section s01pa
-.section s01pb
-.section s01qa
-.section s01qb
-.section s01ra
-.section s01rb
-.section s01sa
-.section s01sb
-.section s01ta
-.section s01tb
-.section s01ua
-.section s01ub
-.section s01va
-.section s01vb
-.section s01wa
-.section s01wb
-.section s01xa
-.section s01xb
-.section s01ya
-.section s01yb
-.section s01za
-.section s01zb
-.section s011a
-.section s011b
-.section s012a
-.section s012b
-.section s013a
-.section s013b
-.section s014a
-.section s014b
-.section s015a
-.section s015b
-.section s016a
-.section s016b
-.section s017a
-.section s017b
-.section s018a
-.section s018b
-.section s019a
-.section s019b
-.section s010a
-.section s010b
-.section s02aa
-.section s02ab
-.section s02ba
-.section s02bb
-.section s02ca
-.section s02cb
-.section s02da
-.section s02db
-.section s02ea
-.section s02eb
-.section s02fa
-.section s02fb
-.section s02ga
-.section s02gb
-.section s02ha
-.section s02hb
-.section s02ia
-.section s02ib
-.section s02ja
-.section s02jb
-.section s02ka
-.section s02kb
-.section s02la
-.section s02lb
-.section s02ma
-.section s02mb
-.section s02na
-.section s02nb
-.section s02oa
-.section s02ob
-.section s02pa
-.section s02pb
-.section s02qa
-.section s02qb
-.section s02ra
-.section s02rb
-.section s02sa
-.section s02sb
-.section s02ta
-.section s02tb
-.section s02ua
-.section s02ub
-.section s02va
-.section s02vb
-.section s02wa
-.section s02wb
-.section s02xa
-.section s02xb
-.section s02ya
-.section s02yb
-.section s02za
-.section s02zb
-.section s021a
-.section s021b
-.section s022a
-.section s022b
-.section s023a
-.section s023b
-.section s024a
-.section s024b
-.section s025a
-.section s025b
-.section s026a
-.section s026b
-.section s027a
-.section s027b
-.section s028a
-.section s028b
-.section s029a
-.section s029b
-.section s020a
-.section s020b
-.section s03aa
-.section s03ab
-.section s03ba
-.section s03bb
-.section s03ca
-.section s03cb
-.section s03da
-.section s03db
-.section s03ea
-.section s03eb
-.section s03fa
-.section s03fb
-.section s03ga
-.section s03gb
-.section s03ha
-.section s03hb
-.section s03ia
-.section s03ib
-.section s03ja
-.section s03jb
-.section s03ka
-.section s03kb
-.section s03la
-.section s03lb
-.section s03ma
-.section s03mb
-.section s03na
-.section s03nb
-.section s03oa
-.section s03ob
-.section s03pa
-.section s03pb
-.section s03qa
-.section s03qb
-.section s03ra
-.section s03rb
-.section s03sa
-.section s03sb
-.section s03ta
-.section s03tb
-.section s03ua
-.section s03ub
-.section s03va
-.section s03vb
-.section s03wa
-.section s03wb
-.section s03xa
-.section s03xb
-.section s03ya
-.section s03yb
-.section s03za
-.section s03zb
-.section s031a
-.section s031b
-.section s032a
-.section s032b
-.section s033a
-.section s033b
-.section s034a
-.section s034b
-.section s035a
-.section s035b
-.section s036a
-.section s036b
-.section s037a
-.section s037b
-.section s038a
-.section s038b
-.section s039a
-.section s039b
-.section s030a
-.section s030b
-.section s04aa
-.section s04ab
-.section s04ba
-.section s04bb
-.section s04ca
-.section s04cb
-.section s04da
-.section s04db
-.section s04ea
-.section s04eb
-.section s04fa
-.section s04fb
-.section s04ga
-.section s04gb
-.section s04ha
-.section s04hb
-.section s04ia
-.section s04ib
-.section s04ja
-.section s04jb
-.section s04ka
-.section s04kb
-.section s04la
-.section s04lb
-.section s04ma
-.section s04mb
-.section s04na
-.section s04nb
-.section s04oa
-.section s04ob
-.section s04pa
-.section s04pb
-.section s04qa
-.section s04qb
-.section s04ra
-.section s04rb
-.section s04sa
-.section s04sb
-.section s04ta
-.section s04tb
-.section s04ua
-.section s04ub
-.section s04va
-.section s04vb
-.section s04wa
-.section s04wb
-.section s04xa
-.section s04xb
-.section s04ya
-.section s04yb
-.section s04za
-.section s04zb
-.section s041a
-.section s041b
-.section s042a
-.section s042b
-.section s043a
-.section s043b
-.section s044a
-.section s044b
-.section s045a
-.section s045b
-.section s046a
-.section s046b
-.section s047a
-.section s047b
-.section s048a
-.section s048b
-.section s049a
-.section s049b
-.section s040a
-.section s040b
-.section s05aa
-.section s05ab
-.section s05ba
-.section s05bb
-.section s05ca
-.section s05cb
-.section s05da
-.section s05db
-.section s05ea
-.section s05eb
-.section s05fa
-.section s05fb
-.section s05ga
-.section s05gb
-.section s05ha
-.section s05hb
-.section s05ia
-.section s05ib
-.section s05ja
-.section s05jb
-.section s05ka
-.section s05kb
-.section s05la
-.section s05lb
-.section s05ma
-.section s05mb
-.section s05na
-.section s05nb
-.section s05oa
-.section s05ob
-.section s05pa
-.section s05pb
-.section s05qa
-.section s05qb
-.section s05ra
-.section s05rb
-.section s05sa
-.section s05sb
-.section s05ta
-.section s05tb
-.section s05ua
-.section s05ub
-.section s05va
-.section s05vb
-.section s05wa
-.section s05wb
-.section s05xa
-.section s05xb
-.section s05ya
-.section s05yb
-.section s05za
-.section s05zb
-.section s051a
-.section s051b
-.section s052a
-.section s052b
-.section s053a
-.section s053b
-.section s054a
-.section s054b
-.section s055a
-.section s055b
-.section s056a
-.section s056b
-.section s057a
-.section s057b
-.section s058a
-.section s058b
-.section s059a
-.section s059b
-.section s050a
-.section s050b
-.section s06aa
-.section s06ab
-.section s06ba
-.section s06bb
-.section s06ca
-.section s06cb
-.section s06da
-.section s06db
-.section s06ea
-.section s06eb
-.section s06fa
-.section s06fb
-.section s06ga
-.section s06gb
-.section s06ha
-.section s06hb
-.section s06ia
-.section s06ib
-.section s06ja
-.section s06jb
-.section s06ka
-.section s06kb
-.section s06la
-.section s06lb
-.section s06ma
-.section s06mb
-.section s06na
-.section s06nb
-.section s06oa
-.section s06ob
-.section s06pa
-.section s06pb
-.section s06qa
-.section s06qb
-.section s06ra
-.section s06rb
-.section s06sa
-.section s06sb
-.section s06ta
-.section s06tb
-.section s06ua
-.section s06ub
-.section s06va
-.section s06vb
-.section s06wa
-.section s06wb
-.section s06xa
-.section s06xb
-.section s06ya
-.section s06yb
-.section s06za
-.section s06zb
-.section s061a
-.section s061b
-.section s062a
-.section s062b
-.section s063a
-.section s063b
-.section s064a
-.section s064b
-.section s065a
-.section s065b
-.section s066a
-.section s066b
-.section s067a
-.section s067b
-.section s068a
-.section s068b
-.section s069a
-.section s069b
-.section s060a
-.section s060b
-.section s07aa
-.section s07ab
-.section s07ba
-.section s07bb
-.section s07ca
-.section s07cb
-.section s07da
-.section s07db
-.section s07ea
-.section s07eb
-.section s07fa
-.section s07fb
-.section s07ga
-.section s07gb
-.section s07ha
-.section s07hb
-.section s07ia
-.section s07ib
-.section s07ja
-.section s07jb
-.section s07ka
-.section s07kb
-.section s07la
-.section s07lb
-.section s07ma
-.section s07mb
-.section s07na
-.section s07nb
-.section s07oa
-.section s07ob
-.section s07pa
-.section s07pb
-.section s07qa
-.section s07qb
-.section s07ra
-.section s07rb
-.section s07sa
-.section s07sb
-.section s07ta
-.section s07tb
-.section s07ua
-.section s07ub
-.section s07va
-.section s07vb
-.section s07wa
-.section s07wb
-.section s07xa
-.section s07xb
-.section s07ya
-.section s07yb
-.section s07za
-.section s07zb
-.section s071a
-.section s071b
-.section s072a
-.section s072b
-.section s073a
-.section s073b
-.section s074a
-.section s074b
-.section s075a
-.section s075b
-.section s076a
-.section s076b
-.section s077a
-.section s077b
-.section s078a
-.section s078b
-.section s079a
-.section s079b
-.section s070a
-.section s070b
-.section s08aa
-.section s08ab
-.section s08ba
-.section s08bb
-.section s08ca
-.section s08cb
-.section s08da
-.section s08db
-.section s08ea
-.section s08eb
-.section s08fa
-.section s08fb
-.section s08ga
-.section s08gb
-.section s08ha
-.section s08hb
-.section s08ia
-.section s08ib
-.section s08ja
-.section s08jb
-.section s08ka
-.section s08kb
-.section s08la
-.section s08lb
-.section s08ma
-.section s08mb
-.section s08na
-.section s08nb
-.section s08oa
-.section s08ob
-.section s08pa
-.section s08pb
-.section s08qa
-.section s08qb
-.section s08ra
-.section s08rb
-.section s08sa
-.section s08sb
-.section s08ta
-.section s08tb
-.section s08ua
-.section s08ub
-.section s08va
-.section s08vb
-.section s08wa
-.section s08wb
-.section s08xa
-.section s08xb
-.section s08ya
-.section s08yb
-.section s08za
-.section s08zb
-.section s081a
-.section s081b
-.section s082a
-.section s082b
-.section s083a
-.section s083b
-.section s084a
-.section s084b
-.section s085a
-.section s085b
-.section s086a
-.section s086b
-.section s087a
-.section s087b
-.section s088a
-.section s088b
-.section s089a
-.section s089b
-.section s080a
-.section s080b
-.section s09aa
-.section s09ab
-.section s09ba
-.section s09bb
-.section s09ca
-.section s09cb
-.section s09da
-.section s09db
-.section s09ea
-.section s09eb
-.section s09fa
-.section s09fb
-.section s09ga
-.section s09gb
-.section s09ha
-.section s09hb
-.section s09ia
-.section s09ib
-.section s09ja
-.section s09jb
-.section s09ka
-.section s09kb
-.section s09la
-.section s09lb
-.section s09ma
-.section s09mb
-.section s09na
-.section s09nb
-.section s09oa
-.section s09ob
-.section s09pa
-.section s09pb
-.section s09qa
-.section s09qb
-.section s09ra
-.section s09rb
-.section s09sa
-.section s09sb
-.section s09ta
-.section s09tb
-.section s09ua
-.section s09ub
-.section s09va
-.section s09vb
-.section s09wa
-.section s09wb
-.section s09xa
-.section s09xb
-.section s09ya
-.section s09yb
-.section s09za
-.section s09zb
-.section s091a
-.section s091b
-.section s092a
-.section s092b
-.section s093a
-.section s093b
-.section s094a
-.section s094b
-.section s095a
-.section s095b
-.section s096a
-.section s096b
-.section s097a
-.section s097b
-.section s098a
-.section s098b
-.section s099a
-.section s099b
-.section s090a
-.section s090b
-.section s00aa
-.section s00ab
-.section s00ba
-.section s00bb
-.section s00ca
-.section s00cb
-.section s00da
-.section s00db
-.section s00ea
-.section s00eb
-.section s00fa
-.section s00fb
-.section s00ga
-.section s00gb
-.section s00ha
-.section s00hb
-.section s00ia
-.section s00ib
-.section s00ja
-.section s00jb
-.section s00ka
-.section s00kb
-.section s00la
-.section s00lb
-.section s00ma
-.section s00mb
-.section s00na
-.section s00nb
-.section s00oa
-.section s00ob
-.section s00pa
-.section s00pb
-.section s00qa
-.section s00qb
-.section s00ra
-.section s00rb
-.section s00sa
-.section s00sb
-.section s00ta
-.section s00tb
-.section s00ua
-.section s00ub
-.section s00va
-.section s00vb
-.section s00wa
-.section s00wb
-.section s00xa
-.section s00xb
-.section s00ya
-.section s00yb
-.section s00za
-.section s00zb
-.section s001a
-.section s001b
-.section s002a
-.section s002b
-.section s003a
-.section s003b
-.section s004a
-.section s004b
-.section s005a
-.section s005b
-.section s006a
-.section s006b
-.section s007a
-.section s007b
-.section s008a
-.section s008b
-.section s009a
-.section s009b
-.section s000a
-.section s000b
diff --git a/test/MC/ELF/many-sections-2.s b/test/MC/ELF/many-sections-2.s
index 789ebf378d8e..88a4822c3869 100644
--- a/test/MC/ELF/many-sections-2.s
+++ b/test/MC/ELF/many-sections-2.s
@@ -1,65281 +1,133 @@
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o %t
-// RUN: llvm-readobj -s %t | FileCheck %s
+// RUN: llvm-readobj -s %t | FileCheck --check-prefix=SECTIONS %s
+// RUN: llvm-readobj -t %t | FileCheck --check-prefix=SYMBOLS %s
-// CHECK: symtab_shndx
+// Test that we create a .symtab_shndx if a symbol points to a section
+// numbered SHN_LORESERVE (0xFF00) or higher.
-.section saaaa
-.section saaab
-.section saaba
-.section saabb
-.section saaca
-.section saacb
-.section saada
-.section saadb
-.section saaea
-.section saaeb
-.section saafa
-.section saafb
-.section saaga
-.section saagb
-.section saaha
-.section saahb
-.section saaia
-.section saaib
-.section saaja
-.section saajb
-.section saaka
-.section saakb
-.section saala
-.section saalb
-.section saama
-.section saamb
-.section saana
-.section saanb
-.section saaoa
-.section saaob
-.section saapa
-.section saapb
-.section saaqa
-.section saaqb
-.section saara
-.section saarb
-.section saasa
-.section saasb
-.section saata
-.section saatb
-.section saaua
-.section saaub
-.section saava
-.section saavb
-.section saawa
-.section saawb
-.section saaxa
-.section saaxb
-.section saaya
-.section saayb
-.section saaza
-.section saazb
-.section saa1a
-.section saa1b
-.section saa2a
-.section saa2b
-.section saa3a
-.section saa3b
-.section saa4a
-.section saa4b
-.section saa5a
-.section saa5b
-.section saa6a
-.section saa6b
-.section saa7a
-.section saa7b
-.section saa8a
-.section saa8b
-.section saa9a
-.section saa9b
-.section saa0a
-.section saa0b
-.section sabaa
-.section sabab
-.section sabba
-.section sabbb
-.section sabca
-.section sabcb
-.section sabda
-.section sabdb
-.section sabea
-.section sabeb
-.section sabfa
-.section sabfb
-.section sabga
-.section sabgb
-.section sabha
-.section sabhb
-.section sabia
-.section sabib
-.section sabja
-.section sabjb
-.section sabka
-.section sabkb
-.section sabla
-.section sablb
-.section sabma
-.section sabmb
-.section sabna
-.section sabnb
-.section saboa
-.section sabob
-.section sabpa
-.section sabpb
-.section sabqa
-.section sabqb
-.section sabra
-.section sabrb
-.section sabsa
-.section sabsb
-.section sabta
-.section sabtb
-.section sabua
-.section sabub
-.section sabva
-.section sabvb
-.section sabwa
-.section sabwb
-.section sabxa
-.section sabxb
-.section sabya
-.section sabyb
-.section sabza
-.section sabzb
-.section sab1a
-.section sab1b
-.section sab2a
-.section sab2b
-.section sab3a
-.section sab3b
-.section sab4a
-.section sab4b
-.section sab5a
-.section sab5b
-.section sab6a
-.section sab6b
-.section sab7a
-.section sab7b
-.section sab8a
-.section sab8b
-.section sab9a
-.section sab9b
-.section sab0a
-.section sab0b
-.section sacaa
-.section sacab
-.section sacba
-.section sacbb
-.section sacca
-.section saccb
-.section sacda
-.section sacdb
-.section sacea
-.section saceb
-.section sacfa
-.section sacfb
-.section sacga
-.section sacgb
-.section sacha
-.section sachb
-.section sacia
-.section sacib
-.section sacja
-.section sacjb
-.section sacka
-.section sackb
-.section sacla
-.section saclb
-.section sacma
-.section sacmb
-.section sacna
-.section sacnb
-.section sacoa
-.section sacob
-.section sacpa
-.section sacpb
-.section sacqa
-.section sacqb
-.section sacra
-.section sacrb
-.section sacsa
-.section sacsb
-.section sacta
-.section sactb
-.section sacua
-.section sacub
-.section sacva
-.section sacvb
-.section sacwa
-.section sacwb
-.section sacxa
-.section sacxb
-.section sacya
-.section sacyb
-.section sacza
-.section saczb
-.section sac1a
-.section sac1b
-.section sac2a
-.section sac2b
-.section sac3a
-.section sac3b
-.section sac4a
-.section sac4b
-.section sac5a
-.section sac5b
-.section sac6a
-.section sac6b
-.section sac7a
-.section sac7b
-.section sac8a
-.section sac8b
-.section sac9a
-.section sac9b
-.section sac0a
-.section sac0b
-.section sadaa
-.section sadab
-.section sadba
-.section sadbb
-.section sadca
-.section sadcb
-.section sadda
-.section saddb
-.section sadea
-.section sadeb
-.section sadfa
-.section sadfb
-.section sadga
-.section sadgb
-.section sadha
-.section sadhb
-.section sadia
-.section sadib
-.section sadja
-.section sadjb
-.section sadka
-.section sadkb
-.section sadla
-.section sadlb
-.section sadma
-.section sadmb
-.section sadna
-.section sadnb
-.section sadoa
-.section sadob
-.section sadpa
-.section sadpb
-.section sadqa
-.section sadqb
-.section sadra
-.section sadrb
-.section sadsa
-.section sadsb
-.section sadta
-.section sadtb
-.section sadua
-.section sadub
-.section sadva
-.section sadvb
-.section sadwa
-.section sadwb
-.section sadxa
-.section sadxb
-.section sadya
-.section sadyb
-.section sadza
-.section sadzb
-.section sad1a
-.section sad1b
-.section sad2a
-.section sad2b
-.section sad3a
-.section sad3b
-.section sad4a
-.section sad4b
-.section sad5a
-.section sad5b
-.section sad6a
-.section sad6b
-.section sad7a
-.section sad7b
-.section sad8a
-.section sad8b
-.section sad9a
-.section sad9b
-.section sad0a
-.section sad0b
-.section saeaa
-.section saeab
-.section saeba
-.section saebb
-.section saeca
-.section saecb
-.section saeda
-.section saedb
-.section saeea
-.section saeeb
-.section saefa
-.section saefb
-.section saega
-.section saegb
-.section saeha
-.section saehb
-.section saeia
-.section saeib
-.section saeja
-.section saejb
-.section saeka
-.section saekb
-.section saela
-.section saelb
-.section saema
-.section saemb
-.section saena
-.section saenb
-.section saeoa
-.section saeob
-.section saepa
-.section saepb
-.section saeqa
-.section saeqb
-.section saera
-.section saerb
-.section saesa
-.section saesb
-.section saeta
-.section saetb
-.section saeua
-.section saeub
-.section saeva
-.section saevb
-.section saewa
-.section saewb
-.section saexa
-.section saexb
-.section saeya
-.section saeyb
-.section saeza
-.section saezb
-.section sae1a
-.section sae1b
-.section sae2a
-.section sae2b
-.section sae3a
-.section sae3b
-.section sae4a
-.section sae4b
-.section sae5a
-.section sae5b
-.section sae6a
-.section sae6b
-.section sae7a
-.section sae7b
-.section sae8a
-.section sae8b
-.section sae9a
-.section sae9b
-.section sae0a
-.section sae0b
-.section safaa
-.section safab
-.section safba
-.section safbb
-.section safca
-.section safcb
-.section safda
-.section safdb
-.section safea
-.section safeb
-.section saffa
-.section saffb
-.section safga
-.section safgb
-.section safha
-.section safhb
-.section safia
-.section safib
-.section safja
-.section safjb
-.section safka
-.section safkb
-.section safla
-.section saflb
-.section safma
-.section safmb
-.section safna
-.section safnb
-.section safoa
-.section safob
-.section safpa
-.section safpb
-.section safqa
-.section safqb
-.section safra
-.section safrb
-.section safsa
-.section safsb
-.section safta
-.section saftb
-.section safua
-.section safub
-.section safva
-.section safvb
-.section safwa
-.section safwb
-.section safxa
-.section safxb
-.section safya
-.section safyb
-.section safza
-.section safzb
-.section saf1a
-.section saf1b
-.section saf2a
-.section saf2b
-.section saf3a
-.section saf3b
-.section saf4a
-.section saf4b
-.section saf5a
-.section saf5b
-.section saf6a
-.section saf6b
-.section saf7a
-.section saf7b
-.section saf8a
-.section saf8b
-.section saf9a
-.section saf9b
-.section saf0a
-.section saf0b
-.section sagaa
-.section sagab
-.section sagba
-.section sagbb
-.section sagca
-.section sagcb
-.section sagda
-.section sagdb
-.section sagea
-.section sageb
-.section sagfa
-.section sagfb
-.section sagga
-.section saggb
-.section sagha
-.section saghb
-.section sagia
-.section sagib
-.section sagja
-.section sagjb
-.section sagka
-.section sagkb
-.section sagla
-.section saglb
-.section sagma
-.section sagmb
-.section sagna
-.section sagnb
-.section sagoa
-.section sagob
-.section sagpa
-.section sagpb
-.section sagqa
-.section sagqb
-.section sagra
-.section sagrb
-.section sagsa
-.section sagsb
-.section sagta
-.section sagtb
-.section sagua
-.section sagub
-.section sagva
-.section sagvb
-.section sagwa
-.section sagwb
-.section sagxa
-.section sagxb
-.section sagya
-.section sagyb
-.section sagza
-.section sagzb
-.section sag1a
-.section sag1b
-.section sag2a
-.section sag2b
-.section sag3a
-.section sag3b
-.section sag4a
-.section sag4b
-.section sag5a
-.section sag5b
-.section sag6a
-.section sag6b
-.section sag7a
-.section sag7b
-.section sag8a
-.section sag8b
-.section sag9a
-.section sag9b
-.section sag0a
-.section sag0b
-.section sahaa
-.section sahab
-.section sahba
-.section sahbb
-.section sahca
-.section sahcb
-.section sahda
-.section sahdb
-.section sahea
-.section saheb
-.section sahfa
-.section sahfb
-.section sahga
-.section sahgb
-.section sahha
-.section sahhb
-.section sahia
-.section sahib
-.section sahja
-.section sahjb
-.section sahka
-.section sahkb
-.section sahla
-.section sahlb
-.section sahma
-.section sahmb
-.section sahna
-.section sahnb
-.section sahoa
-.section sahob
-.section sahpa
-.section sahpb
-.section sahqa
-.section sahqb
-.section sahra
-.section sahrb
-.section sahsa
-.section sahsb
-.section sahta
-.section sahtb
-.section sahua
-.section sahub
-.section sahva
-.section sahvb
-.section sahwa
-.section sahwb
-.section sahxa
-.section sahxb
-.section sahya
-.section sahyb
-.section sahza
-.section sahzb
-.section sah1a
-.section sah1b
-.section sah2a
-.section sah2b
-.section sah3a
-.section sah3b
-.section sah4a
-.section sah4b
-.section sah5a
-.section sah5b
-.section sah6a
-.section sah6b
-.section sah7a
-.section sah7b
-.section sah8a
-.section sah8b
-.section sah9a
-.section sah9b
-.section sah0a
-.section sah0b
-.section saiaa
-.section saiab
-.section saiba
-.section saibb
-.section saica
-.section saicb
-.section saida
-.section saidb
-.section saiea
-.section saieb
-.section saifa
-.section saifb
-.section saiga
-.section saigb
-.section saiha
-.section saihb
-.section saiia
-.section saiib
-.section saija
-.section saijb
-.section saika
-.section saikb
-.section saila
-.section sailb
-.section saima
-.section saimb
-.section saina
-.section sainb
-.section saioa
-.section saiob
-.section saipa
-.section saipb
-.section saiqa
-.section saiqb
-.section saira
-.section sairb
-.section saisa
-.section saisb
-.section saita
-.section saitb
-.section saiua
-.section saiub
-.section saiva
-.section saivb
-.section saiwa
-.section saiwb
-.section saixa
-.section saixb
-.section saiya
-.section saiyb
-.section saiza
-.section saizb
-.section sai1a
-.section sai1b
-.section sai2a
-.section sai2b
-.section sai3a
-.section sai3b
-.section sai4a
-.section sai4b
-.section sai5a
-.section sai5b
-.section sai6a
-.section sai6b
-.section sai7a
-.section sai7b
-.section sai8a
-.section sai8b
-.section sai9a
-.section sai9b
-.section sai0a
-.section sai0b
-.section sajaa
-.section sajab
-.section sajba
-.section sajbb
-.section sajca
-.section sajcb
-.section sajda
-.section sajdb
-.section sajea
-.section sajeb
-.section sajfa
-.section sajfb
-.section sajga
-.section sajgb
-.section sajha
-.section sajhb
-.section sajia
-.section sajib
-.section sajja
-.section sajjb
-.section sajka
-.section sajkb
-.section sajla
-.section sajlb
-.section sajma
-.section sajmb
-.section sajna
-.section sajnb
-.section sajoa
-.section sajob
-.section sajpa
-.section sajpb
-.section sajqa
-.section sajqb
-.section sajra
-.section sajrb
-.section sajsa
-.section sajsb
-.section sajta
-.section sajtb
-.section sajua
-.section sajub
-.section sajva
-.section sajvb
-.section sajwa
-.section sajwb
-.section sajxa
-.section sajxb
-.section sajya
-.section sajyb
-.section sajza
-.section sajzb
-.section saj1a
-.section saj1b
-.section saj2a
-.section saj2b
-.section saj3a
-.section saj3b
-.section saj4a
-.section saj4b
-.section saj5a
-.section saj5b
-.section saj6a
-.section saj6b
-.section saj7a
-.section saj7b
-.section saj8a
-.section saj8b
-.section saj9a
-.section saj9b
-.section saj0a
-.section saj0b
-.section sakaa
-.section sakab
-.section sakba
-.section sakbb
-.section sakca
-.section sakcb
-.section sakda
-.section sakdb
-.section sakea
-.section sakeb
-.section sakfa
-.section sakfb
-.section sakga
-.section sakgb
-.section sakha
-.section sakhb
-.section sakia
-.section sakib
-.section sakja
-.section sakjb
-.section sakka
-.section sakkb
-.section sakla
-.section saklb
-.section sakma
-.section sakmb
-.section sakna
-.section saknb
-.section sakoa
-.section sakob
-.section sakpa
-.section sakpb
-.section sakqa
-.section sakqb
-.section sakra
-.section sakrb
-.section saksa
-.section saksb
-.section sakta
-.section saktb
-.section sakua
-.section sakub
-.section sakva
-.section sakvb
-.section sakwa
-.section sakwb
-.section sakxa
-.section sakxb
-.section sakya
-.section sakyb
-.section sakza
-.section sakzb
-.section sak1a
-.section sak1b
-.section sak2a
-.section sak2b
-.section sak3a
-.section sak3b
-.section sak4a
-.section sak4b
-.section sak5a
-.section sak5b
-.section sak6a
-.section sak6b
-.section sak7a
-.section sak7b
-.section sak8a
-.section sak8b
-.section sak9a
-.section sak9b
-.section sak0a
-.section sak0b
-.section salaa
-.section salab
-.section salba
-.section salbb
-.section salca
-.section salcb
-.section salda
-.section saldb
-.section salea
-.section saleb
-.section salfa
-.section salfb
-.section salga
-.section salgb
-.section salha
-.section salhb
-.section salia
-.section salib
-.section salja
-.section saljb
-.section salka
-.section salkb
-.section salla
-.section sallb
-.section salma
-.section salmb
-.section salna
-.section salnb
-.section saloa
-.section salob
-.section salpa
-.section salpb
-.section salqa
-.section salqb
-.section salra
-.section salrb
-.section salsa
-.section salsb
-.section salta
-.section saltb
-.section salua
-.section salub
-.section salva
-.section salvb
-.section salwa
-.section salwb
-.section salxa
-.section salxb
-.section salya
-.section salyb
-.section salza
-.section salzb
-.section sal1a
-.section sal1b
-.section sal2a
-.section sal2b
-.section sal3a
-.section sal3b
-.section sal4a
-.section sal4b
-.section sal5a
-.section sal5b
-.section sal6a
-.section sal6b
-.section sal7a
-.section sal7b
-.section sal8a
-.section sal8b
-.section sal9a
-.section sal9b
-.section sal0a
-.section sal0b
-.section samaa
-.section samab
-.section samba
-.section sambb
-.section samca
-.section samcb
-.section samda
-.section samdb
-.section samea
-.section sameb
-.section samfa
-.section samfb
-.section samga
-.section samgb
-.section samha
-.section samhb
-.section samia
-.section samib
-.section samja
-.section samjb
-.section samka
-.section samkb
-.section samla
-.section samlb
-.section samma
-.section sammb
-.section samna
-.section samnb
-.section samoa
-.section samob
-.section sampa
-.section sampb
-.section samqa
-.section samqb
-.section samra
-.section samrb
-.section samsa
-.section samsb
-.section samta
-.section samtb
-.section samua
-.section samub
-.section samva
-.section samvb
-.section samwa
-.section samwb
-.section samxa
-.section samxb
-.section samya
-.section samyb
-.section samza
-.section samzb
-.section sam1a
-.section sam1b
-.section sam2a
-.section sam2b
-.section sam3a
-.section sam3b
-.section sam4a
-.section sam4b
-.section sam5a
-.section sam5b
-.section sam6a
-.section sam6b
-.section sam7a
-.section sam7b
-.section sam8a
-.section sam8b
-.section sam9a
-.section sam9b
-.section sam0a
-.section sam0b
-.section sanaa
-.section sanab
-.section sanba
-.section sanbb
-.section sanca
-.section sancb
-.section sanda
-.section sandb
-.section sanea
-.section saneb
-.section sanfa
-.section sanfb
-.section sanga
-.section sangb
-.section sanha
-.section sanhb
-.section sania
-.section sanib
-.section sanja
-.section sanjb
-.section sanka
-.section sankb
-.section sanla
-.section sanlb
-.section sanma
-.section sanmb
-.section sanna
-.section sannb
-.section sanoa
-.section sanob
-.section sanpa
-.section sanpb
-.section sanqa
-.section sanqb
-.section sanra
-.section sanrb
-.section sansa
-.section sansb
-.section santa
-.section santb
-.section sanua
-.section sanub
-.section sanva
-.section sanvb
-.section sanwa
-.section sanwb
-.section sanxa
-.section sanxb
-.section sanya
-.section sanyb
-.section sanza
-.section sanzb
-.section san1a
-.section san1b
-.section san2a
-.section san2b
-.section san3a
-.section san3b
-.section san4a
-.section san4b
-.section san5a
-.section san5b
-.section san6a
-.section san6b
-.section san7a
-.section san7b
-.section san8a
-.section san8b
-.section san9a
-.section san9b
-.section san0a
-.section san0b
-.section saoaa
-.section saoab
-.section saoba
-.section saobb
-.section saoca
-.section saocb
-.section saoda
-.section saodb
-.section saoea
-.section saoeb
-.section saofa
-.section saofb
-.section saoga
-.section saogb
-.section saoha
-.section saohb
-.section saoia
-.section saoib
-.section saoja
-.section saojb
-.section saoka
-.section saokb
-.section saola
-.section saolb
-.section saoma
-.section saomb
-.section saona
-.section saonb
-.section saooa
-.section saoob
-.section saopa
-.section saopb
-.section saoqa
-.section saoqb
-.section saora
-.section saorb
-.section saosa
-.section saosb
-.section saota
-.section saotb
-.section saoua
-.section saoub
-.section saova
-.section saovb
-.section saowa
-.section saowb
-.section saoxa
-.section saoxb
-.section saoya
-.section saoyb
-.section saoza
-.section saozb
-.section sao1a
-.section sao1b
-.section sao2a
-.section sao2b
-.section sao3a
-.section sao3b
-.section sao4a
-.section sao4b
-.section sao5a
-.section sao5b
-.section sao6a
-.section sao6b
-.section sao7a
-.section sao7b
-.section sao8a
-.section sao8b
-.section sao9a
-.section sao9b
-.section sao0a
-.section sao0b
-.section sapaa
-.section sapab
-.section sapba
-.section sapbb
-.section sapca
-.section sapcb
-.section sapda
-.section sapdb
-.section sapea
-.section sapeb
-.section sapfa
-.section sapfb
-.section sapga
-.section sapgb
-.section sapha
-.section saphb
-.section sapia
-.section sapib
-.section sapja
-.section sapjb
-.section sapka
-.section sapkb
-.section sapla
-.section saplb
-.section sapma
-.section sapmb
-.section sapna
-.section sapnb
-.section sapoa
-.section sapob
-.section sappa
-.section sappb
-.section sapqa
-.section sapqb
-.section sapra
-.section saprb
-.section sapsa
-.section sapsb
-.section sapta
-.section saptb
-.section sapua
-.section sapub
-.section sapva
-.section sapvb
-.section sapwa
-.section sapwb
-.section sapxa
-.section sapxb
-.section sapya
-.section sapyb
-.section sapza
-.section sapzb
-.section sap1a
-.section sap1b
-.section sap2a
-.section sap2b
-.section sap3a
-.section sap3b
-.section sap4a
-.section sap4b
-.section sap5a
-.section sap5b
-.section sap6a
-.section sap6b
-.section sap7a
-.section sap7b
-.section sap8a
-.section sap8b
-.section sap9a
-.section sap9b
-.section sap0a
-.section sap0b
-.section saqaa
-.section saqab
-.section saqba
-.section saqbb
-.section saqca
-.section saqcb
-.section saqda
-.section saqdb
-.section saqea
-.section saqeb
-.section saqfa
-.section saqfb
-.section saqga
-.section saqgb
-.section saqha
-.section saqhb
-.section saqia
-.section saqib
-.section saqja
-.section saqjb
-.section saqka
-.section saqkb
-.section saqla
-.section saqlb
-.section saqma
-.section saqmb
-.section saqna
-.section saqnb
-.section saqoa
-.section saqob
-.section saqpa
-.section saqpb
-.section saqqa
-.section saqqb
-.section saqra
-.section saqrb
-.section saqsa
-.section saqsb
-.section saqta
-.section saqtb
-.section saqua
-.section saqub
-.section saqva
-.section saqvb
-.section saqwa
-.section saqwb
-.section saqxa
-.section saqxb
-.section saqya
-.section saqyb
-.section saqza
-.section saqzb
-.section saq1a
-.section saq1b
-.section saq2a
-.section saq2b
-.section saq3a
-.section saq3b
-.section saq4a
-.section saq4b
-.section saq5a
-.section saq5b
-.section saq6a
-.section saq6b
-.section saq7a
-.section saq7b
-.section saq8a
-.section saq8b
-.section saq9a
-.section saq9b
-.section saq0a
-.section saq0b
-.section saraa
-.section sarab
-.section sarba
-.section sarbb
-.section sarca
-.section sarcb
-.section sarda
-.section sardb
-.section sarea
-.section sareb
-.section sarfa
-.section sarfb
-.section sarga
-.section sargb
-.section sarha
-.section sarhb
-.section saria
-.section sarib
-.section sarja
-.section sarjb
-.section sarka
-.section sarkb
-.section sarla
-.section sarlb
-.section sarma
-.section sarmb
-.section sarna
-.section sarnb
-.section saroa
-.section sarob
-.section sarpa
-.section sarpb
-.section sarqa
-.section sarqb
-.section sarra
-.section sarrb
-.section sarsa
-.section sarsb
-.section sarta
-.section sartb
-.section sarua
-.section sarub
-.section sarva
-.section sarvb
-.section sarwa
-.section sarwb
-.section sarxa
-.section sarxb
-.section sarya
-.section saryb
-.section sarza
-.section sarzb
-.section sar1a
-.section sar1b
-.section sar2a
-.section sar2b
-.section sar3a
-.section sar3b
-.section sar4a
-.section sar4b
-.section sar5a
-.section sar5b
-.section sar6a
-.section sar6b
-.section sar7a
-.section sar7b
-.section sar8a
-.section sar8b
-.section sar9a
-.section sar9b
-.section sar0a
-.section sar0b
-.section sasaa
-.section sasab
-.section sasba
-.section sasbb
-.section sasca
-.section sascb
-.section sasda
-.section sasdb
-.section sasea
-.section saseb
-.section sasfa
-.section sasfb
-.section sasga
-.section sasgb
-.section sasha
-.section sashb
-.section sasia
-.section sasib
-.section sasja
-.section sasjb
-.section saska
-.section saskb
-.section sasla
-.section saslb
-.section sasma
-.section sasmb
-.section sasna
-.section sasnb
-.section sasoa
-.section sasob
-.section saspa
-.section saspb
-.section sasqa
-.section sasqb
-.section sasra
-.section sasrb
-.section sassa
-.section sassb
-.section sasta
-.section sastb
-.section sasua
-.section sasub
-.section sasva
-.section sasvb
-.section saswa
-.section saswb
-.section sasxa
-.section sasxb
-.section sasya
-.section sasyb
-.section sasza
-.section saszb
-.section sas1a
-.section sas1b
-.section sas2a
-.section sas2b
-.section sas3a
-.section sas3b
-.section sas4a
-.section sas4b
-.section sas5a
-.section sas5b
-.section sas6a
-.section sas6b
-.section sas7a
-.section sas7b
-.section sas8a
-.section sas8b
-.section sas9a
-.section sas9b
-.section sas0a
-.section sas0b
-.section sataa
-.section satab
-.section satba
-.section satbb
-.section satca
-.section satcb
-.section satda
-.section satdb
-.section satea
-.section sateb
-.section satfa
-.section satfb
-.section satga
-.section satgb
-.section satha
-.section sathb
-.section satia
-.section satib
-.section satja
-.section satjb
-.section satka
-.section satkb
-.section satla
-.section satlb
-.section satma
-.section satmb
-.section satna
-.section satnb
-.section satoa
-.section satob
-.section satpa
-.section satpb
-.section satqa
-.section satqb
-.section satra
-.section satrb
-.section satsa
-.section satsb
-.section satta
-.section sattb
-.section satua
-.section satub
-.section satva
-.section satvb
-.section satwa
-.section satwb
-.section satxa
-.section satxb
-.section satya
-.section satyb
-.section satza
-.section satzb
-.section sat1a
-.section sat1b
-.section sat2a
-.section sat2b
-.section sat3a
-.section sat3b
-.section sat4a
-.section sat4b
-.section sat5a
-.section sat5b
-.section sat6a
-.section sat6b
-.section sat7a
-.section sat7b
-.section sat8a
-.section sat8b
-.section sat9a
-.section sat9b
-.section sat0a
-.section sat0b
-.section sauaa
-.section sauab
-.section sauba
-.section saubb
-.section sauca
-.section saucb
-.section sauda
-.section saudb
-.section sauea
-.section saueb
-.section saufa
-.section saufb
-.section sauga
-.section saugb
-.section sauha
-.section sauhb
-.section sauia
-.section sauib
-.section sauja
-.section saujb
-.section sauka
-.section saukb
-.section saula
-.section saulb
-.section sauma
-.section saumb
-.section sauna
-.section saunb
-.section sauoa
-.section sauob
-.section saupa
-.section saupb
-.section sauqa
-.section sauqb
-.section saura
-.section saurb
-.section sausa
-.section sausb
-.section sauta
-.section sautb
-.section sauua
-.section sauub
-.section sauva
-.section sauvb
-.section sauwa
-.section sauwb
-.section sauxa
-.section sauxb
-.section sauya
-.section sauyb
-.section sauza
-.section sauzb
-.section sau1a
-.section sau1b
-.section sau2a
-.section sau2b
-.section sau3a
-.section sau3b
-.section sau4a
-.section sau4b
-.section sau5a
-.section sau5b
-.section sau6a
-.section sau6b
-.section sau7a
-.section sau7b
-.section sau8a
-.section sau8b
-.section sau9a
-.section sau9b
-.section sau0a
-.section sau0b
-.section savaa
-.section savab
-.section savba
-.section savbb
-.section savca
-.section savcb
-.section savda
-.section savdb
-.section savea
-.section saveb
-.section savfa
-.section savfb
-.section savga
-.section savgb
-.section savha
-.section savhb
-.section savia
-.section savib
-.section savja
-.section savjb
-.section savka
-.section savkb
-.section savla
-.section savlb
-.section savma
-.section savmb
-.section savna
-.section savnb
-.section savoa
-.section savob
-.section savpa
-.section savpb
-.section savqa
-.section savqb
-.section savra
-.section savrb
-.section savsa
-.section savsb
-.section savta
-.section savtb
-.section savua
-.section savub
-.section savva
-.section savvb
-.section savwa
-.section savwb
-.section savxa
-.section savxb
-.section savya
-.section savyb
-.section savza
-.section savzb
-.section sav1a
-.section sav1b
-.section sav2a
-.section sav2b
-.section sav3a
-.section sav3b
-.section sav4a
-.section sav4b
-.section sav5a
-.section sav5b
-.section sav6a
-.section sav6b
-.section sav7a
-.section sav7b
-.section sav8a
-.section sav8b
-.section sav9a
-.section sav9b
-.section sav0a
-.section sav0b
-.section sawaa
-.section sawab
-.section sawba
-.section sawbb
-.section sawca
-.section sawcb
-.section sawda
-.section sawdb
-.section sawea
-.section saweb
-.section sawfa
-.section sawfb
-.section sawga
-.section sawgb
-.section sawha
-.section sawhb
-.section sawia
-.section sawib
-.section sawja
-.section sawjb
-.section sawka
-.section sawkb
-.section sawla
-.section sawlb
-.section sawma
-.section sawmb
-.section sawna
-.section sawnb
-.section sawoa
-.section sawob
-.section sawpa
-.section sawpb
-.section sawqa
-.section sawqb
-.section sawra
-.section sawrb
-.section sawsa
-.section sawsb
-.section sawta
-.section sawtb
-.section sawua
-.section sawub
-.section sawva
-.section sawvb
-.section sawwa
-.section sawwb
-.section sawxa
-.section sawxb
-.section sawya
-.section sawyb
-.section sawza
-.section sawzb
-.section saw1a
-.section saw1b
-.section saw2a
-.section saw2b
-.section saw3a
-.section saw3b
-.section saw4a
-.section saw4b
-.section saw5a
-.section saw5b
-.section saw6a
-.section saw6b
-.section saw7a
-.section saw7b
-.section saw8a
-.section saw8b
-.section saw9a
-.section saw9b
-.section saw0a
-.section saw0b
-.section saxaa
-.section saxab
-.section saxba
-.section saxbb
-.section saxca
-.section saxcb
-.section saxda
-.section saxdb
-.section saxea
-.section saxeb
-.section saxfa
-.section saxfb
-.section saxga
-.section saxgb
-.section saxha
-.section saxhb
-.section saxia
-.section saxib
-.section saxja
-.section saxjb
-.section saxka
-.section saxkb
-.section saxla
-.section saxlb
-.section saxma
-.section saxmb
-.section saxna
-.section saxnb
-.section saxoa
-.section saxob
-.section saxpa
-.section saxpb
-.section saxqa
-.section saxqb
-.section saxra
-.section saxrb
-.section saxsa
-.section saxsb
-.section saxta
-.section saxtb
-.section saxua
-.section saxub
-.section saxva
-.section saxvb
-.section saxwa
-.section saxwb
-.section saxxa
-.section saxxb
-.section saxya
-.section saxyb
-.section saxza
-.section saxzb
-.section sax1a
-.section sax1b
-.section sax2a
-.section sax2b
-.section sax3a
-.section sax3b
-.section sax4a
-.section sax4b
-.section sax5a
-.section sax5b
-.section sax6a
-.section sax6b
-.section sax7a
-.section sax7b
-.section sax8a
-.section sax8b
-.section sax9a
-.section sax9b
-.section sax0a
-.section sax0b
-.section sayaa
-.section sayab
-.section sayba
-.section saybb
-.section sayca
-.section saycb
-.section sayda
-.section saydb
-.section sayea
-.section sayeb
-.section sayfa
-.section sayfb
-.section sayga
-.section saygb
-.section sayha
-.section sayhb
-.section sayia
-.section sayib
-.section sayja
-.section sayjb
-.section sayka
-.section saykb
-.section sayla
-.section saylb
-.section sayma
-.section saymb
-.section sayna
-.section saynb
-.section sayoa
-.section sayob
-.section saypa
-.section saypb
-.section sayqa
-.section sayqb
-.section sayra
-.section sayrb
-.section saysa
-.section saysb
-.section sayta
-.section saytb
-.section sayua
-.section sayub
-.section sayva
-.section sayvb
-.section saywa
-.section saywb
-.section sayxa
-.section sayxb
-.section sayya
-.section sayyb
-.section sayza
-.section sayzb
-.section say1a
-.section say1b
-.section say2a
-.section say2b
-.section say3a
-.section say3b
-.section say4a
-.section say4b
-.section say5a
-.section say5b
-.section say6a
-.section say6b
-.section say7a
-.section say7b
-.section say8a
-.section say8b
-.section say9a
-.section say9b
-.section say0a
-.section say0b
-.section sazaa
-.section sazab
-.section sazba
-.section sazbb
-.section sazca
-.section sazcb
-.section sazda
-.section sazdb
-.section sazea
-.section sazeb
-.section sazfa
-.section sazfb
-.section sazga
-.section sazgb
-.section sazha
-.section sazhb
-.section sazia
-.section sazib
-.section sazja
-.section sazjb
-.section sazka
-.section sazkb
-.section sazla
-.section sazlb
-.section sazma
-.section sazmb
-.section sazna
-.section saznb
-.section sazoa
-.section sazob
-.section sazpa
-.section sazpb
-.section sazqa
-.section sazqb
-.section sazra
-.section sazrb
-.section sazsa
-.section sazsb
-.section sazta
-.section saztb
-.section sazua
-.section sazub
-.section sazva
-.section sazvb
-.section sazwa
-.section sazwb
-.section sazxa
-.section sazxb
-.section sazya
-.section sazyb
-.section sazza
-.section sazzb
-.section saz1a
-.section saz1b
-.section saz2a
-.section saz2b
-.section saz3a
-.section saz3b
-.section saz4a
-.section saz4b
-.section saz5a
-.section saz5b
-.section saz6a
-.section saz6b
-.section saz7a
-.section saz7b
-.section saz8a
-.section saz8b
-.section saz9a
-.section saz9b
-.section saz0a
-.section saz0b
-.section sa1aa
-.section sa1ab
-.section sa1ba
-.section sa1bb
-.section sa1ca
-.section sa1cb
-.section sa1da
-.section sa1db
-.section sa1ea
-.section sa1eb
-.section sa1fa
-.section sa1fb
-.section sa1ga
-.section sa1gb
-.section sa1ha
-.section sa1hb
-.section sa1ia
-.section sa1ib
-.section sa1ja
-.section sa1jb
-.section sa1ka
-.section sa1kb
-.section sa1la
-.section sa1lb
-.section sa1ma
-.section sa1mb
-.section sa1na
-.section sa1nb
-.section sa1oa
-.section sa1ob
-.section sa1pa
-.section sa1pb
-.section sa1qa
-.section sa1qb
-.section sa1ra
-.section sa1rb
-.section sa1sa
-.section sa1sb
-.section sa1ta
-.section sa1tb
-.section sa1ua
-.section sa1ub
-.section sa1va
-.section sa1vb
-.section sa1wa
-.section sa1wb
-.section sa1xa
-.section sa1xb
-.section sa1ya
-.section sa1yb
-.section sa1za
-.section sa1zb
-.section sa11a
-.section sa11b
-.section sa12a
-.section sa12b
-.section sa13a
-.section sa13b
-.section sa14a
-.section sa14b
-.section sa15a
-.section sa15b
-.section sa16a
-.section sa16b
-.section sa17a
-.section sa17b
-.section sa18a
-.section sa18b
-.section sa19a
-.section sa19b
-.section sa10a
-.section sa10b
-.section sa2aa
-.section sa2ab
-.section sa2ba
-.section sa2bb
-.section sa2ca
-.section sa2cb
-.section sa2da
-.section sa2db
-.section sa2ea
-.section sa2eb
-.section sa2fa
-.section sa2fb
-.section sa2ga
-.section sa2gb
-.section sa2ha
-.section sa2hb
-.section sa2ia
-.section sa2ib
-.section sa2ja
-.section sa2jb
-.section sa2ka
-.section sa2kb
-.section sa2la
-.section sa2lb
-.section sa2ma
-.section sa2mb
-.section sa2na
-.section sa2nb
-.section sa2oa
-.section sa2ob
-.section sa2pa
-.section sa2pb
-.section sa2qa
-.section sa2qb
-.section sa2ra
-.section sa2rb
-.section sa2sa
-.section sa2sb
-.section sa2ta
-.section sa2tb
-.section sa2ua
-.section sa2ub
-.section sa2va
-.section sa2vb
-.section sa2wa
-.section sa2wb
-.section sa2xa
-.section sa2xb
-.section sa2ya
-.section sa2yb
-.section sa2za
-.section sa2zb
-.section sa21a
-.section sa21b
-.section sa22a
-.section sa22b
-.section sa23a
-.section sa23b
-.section sa24a
-.section sa24b
-.section sa25a
-.section sa25b
-.section sa26a
-.section sa26b
-.section sa27a
-.section sa27b
-.section sa28a
-.section sa28b
-.section sa29a
-.section sa29b
-.section sa20a
-.section sa20b
-.section sa3aa
-.section sa3ab
-.section sa3ba
-.section sa3bb
-.section sa3ca
-.section sa3cb
-.section sa3da
-.section sa3db
-.section sa3ea
-.section sa3eb
-.section sa3fa
-.section sa3fb
-.section sa3ga
-.section sa3gb
-.section sa3ha
-.section sa3hb
-.section sa3ia
-.section sa3ib
-.section sa3ja
-.section sa3jb
-.section sa3ka
-.section sa3kb
-.section sa3la
-.section sa3lb
-.section sa3ma
-.section sa3mb
-.section sa3na
-.section sa3nb
-.section sa3oa
-.section sa3ob
-.section sa3pa
-.section sa3pb
-.section sa3qa
-.section sa3qb
-.section sa3ra
-.section sa3rb
-.section sa3sa
-.section sa3sb
-.section sa3ta
-.section sa3tb
-.section sa3ua
-.section sa3ub
-.section sa3va
-.section sa3vb
-.section sa3wa
-.section sa3wb
-.section sa3xa
-.section sa3xb
-.section sa3ya
-.section sa3yb
-.section sa3za
-.section sa3zb
-.section sa31a
-.section sa31b
-.section sa32a
-.section sa32b
-.section sa33a
-.section sa33b
-.section sa34a
-.section sa34b
-.section sa35a
-.section sa35b
-.section sa36a
-.section sa36b
-.section sa37a
-.section sa37b
-.section sa38a
-.section sa38b
-.section sa39a
-.section sa39b
-.section sa30a
-.section sa30b
-.section sa4aa
-.section sa4ab
-.section sa4ba
-.section sa4bb
-.section sa4ca
-.section sa4cb
-.section sa4da
-.section sa4db
-.section sa4ea
-.section sa4eb
-.section sa4fa
-.section sa4fb
-.section sa4ga
-.section sa4gb
-.section sa4ha
-.section sa4hb
-.section sa4ia
-.section sa4ib
-.section sa4ja
-.section sa4jb
-.section sa4ka
-.section sa4kb
-.section sa4la
-.section sa4lb
-.section sa4ma
-.section sa4mb
-.section sa4na
-.section sa4nb
-.section sa4oa
-.section sa4ob
-.section sa4pa
-.section sa4pb
-.section sa4qa
-.section sa4qb
-.section sa4ra
-.section sa4rb
-.section sa4sa
-.section sa4sb
-.section sa4ta
-.section sa4tb
-.section sa4ua
-.section sa4ub
-.section sa4va
-.section sa4vb
-.section sa4wa
-.section sa4wb
-.section sa4xa
-.section sa4xb
-.section sa4ya
-.section sa4yb
-.section sa4za
-.section sa4zb
-.section sa41a
-.section sa41b
-.section sa42a
-.section sa42b
-.section sa43a
-.section sa43b
-.section sa44a
-.section sa44b
-.section sa45a
-.section sa45b
-.section sa46a
-.section sa46b
-.section sa47a
-.section sa47b
-.section sa48a
-.section sa48b
-.section sa49a
-.section sa49b
-.section sa40a
-.section sa40b
-.section sa5aa
-.section sa5ab
-.section sa5ba
-.section sa5bb
-.section sa5ca
-.section sa5cb
-.section sa5da
-.section sa5db
-.section sa5ea
-.section sa5eb
-.section sa5fa
-.section sa5fb
-.section sa5ga
-.section sa5gb
-.section sa5ha
-.section sa5hb
-.section sa5ia
-.section sa5ib
-.section sa5ja
-.section sa5jb
-.section sa5ka
-.section sa5kb
-.section sa5la
-.section sa5lb
-.section sa5ma
-.section sa5mb
-.section sa5na
-.section sa5nb
-.section sa5oa
-.section sa5ob
-.section sa5pa
-.section sa5pb
-.section sa5qa
-.section sa5qb
-.section sa5ra
-.section sa5rb
-.section sa5sa
-.section sa5sb
-.section sa5ta
-.section sa5tb
-.section sa5ua
-.section sa5ub
-.section sa5va
-.section sa5vb
-.section sa5wa
-.section sa5wb
-.section sa5xa
-.section sa5xb
-.section sa5ya
-.section sa5yb
-.section sa5za
-.section sa5zb
-.section sa51a
-.section sa51b
-.section sa52a
-.section sa52b
-.section sa53a
-.section sa53b
-.section sa54a
-.section sa54b
-.section sa55a
-.section sa55b
-.section sa56a
-.section sa56b
-.section sa57a
-.section sa57b
-.section sa58a
-.section sa58b
-.section sa59a
-.section sa59b
-.section sa50a
-.section sa50b
-.section sa6aa
-.section sa6ab
-.section sa6ba
-.section sa6bb
-.section sa6ca
-.section sa6cb
-.section sa6da
-.section sa6db
-.section sa6ea
-.section sa6eb
-.section sa6fa
-.section sa6fb
-.section sa6ga
-.section sa6gb
-.section sa6ha
-.section sa6hb
-.section sa6ia
-.section sa6ib
-.section sa6ja
-.section sa6jb
-.section sa6ka
-.section sa6kb
-.section sa6la
-.section sa6lb
-.section sa6ma
-.section sa6mb
-.section sa6na
-.section sa6nb
-.section sa6oa
-.section sa6ob
-.section sa6pa
-.section sa6pb
-.section sa6qa
-.section sa6qb
-.section sa6ra
-.section sa6rb
-.section sa6sa
-.section sa6sb
-.section sa6ta
-.section sa6tb
-.section sa6ua
-.section sa6ub
-.section sa6va
-.section sa6vb
-.section sa6wa
-.section sa6wb
-.section sa6xa
-.section sa6xb
-.section sa6ya
-.section sa6yb
-.section sa6za
-.section sa6zb
-.section sa61a
-.section sa61b
-.section sa62a
-.section sa62b
-.section sa63a
-.section sa63b
-.section sa64a
-.section sa64b
-.section sa65a
-.section sa65b
-.section sa66a
-.section sa66b
-.section sa67a
-.section sa67b
-.section sa68a
-.section sa68b
-.section sa69a
-.section sa69b
-.section sa60a
-.section sa60b
-.section sa7aa
-.section sa7ab
-.section sa7ba
-.section sa7bb
-.section sa7ca
-.section sa7cb
-.section sa7da
-.section sa7db
-.section sa7ea
-.section sa7eb
-.section sa7fa
-.section sa7fb
-.section sa7ga
-.section sa7gb
-.section sa7ha
-.section sa7hb
-.section sa7ia
-.section sa7ib
-.section sa7ja
-.section sa7jb
-.section sa7ka
-.section sa7kb
-.section sa7la
-.section sa7lb
-.section sa7ma
-.section sa7mb
-.section sa7na
-.section sa7nb
-.section sa7oa
-.section sa7ob
-.section sa7pa
-.section sa7pb
-.section sa7qa
-.section sa7qb
-.section sa7ra
-.section sa7rb
-.section sa7sa
-.section sa7sb
-.section sa7ta
-.section sa7tb
-.section sa7ua
-.section sa7ub
-.section sa7va
-.section sa7vb
-.section sa7wa
-.section sa7wb
-.section sa7xa
-.section sa7xb
-.section sa7ya
-.section sa7yb
-.section sa7za
-.section sa7zb
-.section sa71a
-.section sa71b
-.section sa72a
-.section sa72b
-.section sa73a
-.section sa73b
-.section sa74a
-.section sa74b
-.section sa75a
-.section sa75b
-.section sa76a
-.section sa76b
-.section sa77a
-.section sa77b
-.section sa78a
-.section sa78b
-.section sa79a
-.section sa79b
-.section sa70a
-.section sa70b
-.section sa8aa
-.section sa8ab
-.section sa8ba
-.section sa8bb
-.section sa8ca
-.section sa8cb
-.section sa8da
-.section sa8db
-.section sa8ea
-.section sa8eb
-.section sa8fa
-.section sa8fb
-.section sa8ga
-.section sa8gb
-.section sa8ha
-.section sa8hb
-.section sa8ia
-.section sa8ib
-.section sa8ja
-.section sa8jb
-.section sa8ka
-.section sa8kb
-.section sa8la
-.section sa8lb
-.section sa8ma
-.section sa8mb
-.section sa8na
-.section sa8nb
-.section sa8oa
-.section sa8ob
-.section sa8pa
-.section sa8pb
-.section sa8qa
-.section sa8qb
-.section sa8ra
-.section sa8rb
-.section sa8sa
-.section sa8sb
-.section sa8ta
-.section sa8tb
-.section sa8ua
-.section sa8ub
-.section sa8va
-.section sa8vb
-.section sa8wa
-.section sa8wb
-.section sa8xa
-.section sa8xb
-.section sa8ya
-.section sa8yb
-.section sa8za
-.section sa8zb
-.section sa81a
-.section sa81b
-.section sa82a
-.section sa82b
-.section sa83a
-.section sa83b
-.section sa84a
-.section sa84b
-.section sa85a
-.section sa85b
-.section sa86a
-.section sa86b
-.section sa87a
-.section sa87b
-.section sa88a
-.section sa88b
-.section sa89a
-.section sa89b
-.section sa80a
-.section sa80b
-.section sa9aa
-.section sa9ab
-.section sa9ba
-.section sa9bb
-.section sa9ca
-.section sa9cb
-.section sa9da
-.section sa9db
-.section sa9ea
-.section sa9eb
-.section sa9fa
-.section sa9fb
-.section sa9ga
-.section sa9gb
-.section sa9ha
-.section sa9hb
-.section sa9ia
-.section sa9ib
-.section sa9ja
-.section sa9jb
-.section sa9ka
-.section sa9kb
-.section sa9la
-.section sa9lb
-.section sa9ma
-.section sa9mb
-.section sa9na
-.section sa9nb
-.section sa9oa
-.section sa9ob
-.section sa9pa
-.section sa9pb
-.section sa9qa
-.section sa9qb
-.section sa9ra
-.section sa9rb
-.section sa9sa
-.section sa9sb
-.section sa9ta
-.section sa9tb
-.section sa9ua
-.section sa9ub
-.section sa9va
-.section sa9vb
-.section sa9wa
-.section sa9wb
-.section sa9xa
-.section sa9xb
-.section sa9ya
-.section sa9yb
-.section sa9za
-.section sa9zb
-.section sa91a
-.section sa91b
-.section sa92a
-.section sa92b
-.section sa93a
-.section sa93b
-.section sa94a
-.section sa94b
-.section sa95a
-.section sa95b
-.section sa96a
-.section sa96b
-.section sa97a
-.section sa97b
-.section sa98a
-.section sa98b
-.section sa99a
-.section sa99b
-.section sa90a
-.section sa90b
-.section sa0aa
-.section sa0ab
-.section sa0ba
-.section sa0bb
-.section sa0ca
-.section sa0cb
-.section sa0da
-.section sa0db
-.section sa0ea
-.section sa0eb
-.section sa0fa
-.section sa0fb
-.section sa0ga
-.section sa0gb
-.section sa0ha
-.section sa0hb
-.section sa0ia
-.section sa0ib
-.section sa0ja
-.section sa0jb
-.section sa0ka
-.section sa0kb
-.section sa0la
-.section sa0lb
-.section sa0ma
-.section sa0mb
-.section sa0na
-.section sa0nb
-.section sa0oa
-.section sa0ob
-.section sa0pa
-.section sa0pb
-.section sa0qa
-.section sa0qb
-.section sa0ra
-.section sa0rb
-.section sa0sa
-.section sa0sb
-.section sa0ta
-.section sa0tb
-.section sa0ua
-.section sa0ub
-.section sa0va
-.section sa0vb
-.section sa0wa
-.section sa0wb
-.section sa0xa
-.section sa0xb
-.section sa0ya
-.section sa0yb
-.section sa0za
-.section sa0zb
-.section sa01a
-.section sa01b
-.section sa02a
-.section sa02b
-.section sa03a
-.section sa03b
-.section sa04a
-.section sa04b
-.section sa05a
-.section sa05b
-.section sa06a
-.section sa06b
-.section sa07a
-.section sa07b
-.section sa08a
-.section sa08b
-.section sa09a
-.section sa09b
-.section sa00a
-.section sa00b
-.section sbaaa
-.section sbaab
-.section sbaba
-.section sbabb
-.section sbaca
-.section sbacb
-.section sbada
-.section sbadb
-.section sbaea
-.section sbaeb
-.section sbafa
-.section sbafb
-.section sbaga
-.section sbagb
-.section sbaha
-.section sbahb
-.section sbaia
-.section sbaib
-.section sbaja
-.section sbajb
-.section sbaka
-.section sbakb
-.section sbala
-.section sbalb
-.section sbama
-.section sbamb
-.section sbana
-.section sbanb
-.section sbaoa
-.section sbaob
-.section sbapa
-.section sbapb
-.section sbaqa
-.section sbaqb
-.section sbara
-.section sbarb
-.section sbasa
-.section sbasb
-.section sbata
-.section sbatb
-.section sbaua
-.section sbaub
-.section sbava
-.section sbavb
-.section sbawa
-.section sbawb
-.section sbaxa
-.section sbaxb
-.section sbaya
-.section sbayb
-.section sbaza
-.section sbazb
-.section sba1a
-.section sba1b
-.section sba2a
-.section sba2b
-.section sba3a
-.section sba3b
-.section sba4a
-.section sba4b
-.section sba5a
-.section sba5b
-.section sba6a
-.section sba6b
-.section sba7a
-.section sba7b
-.section sba8a
-.section sba8b
-.section sba9a
-.section sba9b
-.section sba0a
-.section sba0b
-.section sbbaa
-.section sbbab
-.section sbbba
-.section sbbbb
-.section sbbca
-.section sbbcb
-.section sbbda
-.section sbbdb
-.section sbbea
-.section sbbeb
-.section sbbfa
-.section sbbfb
-.section sbbga
-.section sbbgb
-.section sbbha
-.section sbbhb
-.section sbbia
-.section sbbib
-.section sbbja
-.section sbbjb
-.section sbbka
-.section sbbkb
-.section sbbla
-.section sbblb
-.section sbbma
-.section sbbmb
-.section sbbna
-.section sbbnb
-.section sbboa
-.section sbbob
-.section sbbpa
-.section sbbpb
-.section sbbqa
-.section sbbqb
-.section sbbra
-.section sbbrb
-.section sbbsa
-.section sbbsb
-.section sbbta
-.section sbbtb
-.section sbbua
-.section sbbub
-.section sbbva
-.section sbbvb
-.section sbbwa
-.section sbbwb
-.section sbbxa
-.section sbbxb
-.section sbbya
-.section sbbyb
-.section sbbza
-.section sbbzb
-.section sbb1a
-.section sbb1b
-.section sbb2a
-.section sbb2b
-.section sbb3a
-.section sbb3b
-.section sbb4a
-.section sbb4b
-.section sbb5a
-.section sbb5b
-.section sbb6a
-.section sbb6b
-.section sbb7a
-.section sbb7b
-.section sbb8a
-.section sbb8b
-.section sbb9a
-.section sbb9b
-.section sbb0a
-.section sbb0b
-.section sbcaa
-.section sbcab
-.section sbcba
-.section sbcbb
-.section sbcca
-.section sbccb
-.section sbcda
-.section sbcdb
-.section sbcea
-.section sbceb
-.section sbcfa
-.section sbcfb
-.section sbcga
-.section sbcgb
-.section sbcha
-.section sbchb
-.section sbcia
-.section sbcib
-.section sbcja
-.section sbcjb
-.section sbcka
-.section sbckb
-.section sbcla
-.section sbclb
-.section sbcma
-.section sbcmb
-.section sbcna
-.section sbcnb
-.section sbcoa
-.section sbcob
-.section sbcpa
-.section sbcpb
-.section sbcqa
-.section sbcqb
-.section sbcra
-.section sbcrb
-.section sbcsa
-.section sbcsb
-.section sbcta
-.section sbctb
-.section sbcua
-.section sbcub
-.section sbcva
-.section sbcvb
-.section sbcwa
-.section sbcwb
-.section sbcxa
-.section sbcxb
-.section sbcya
-.section sbcyb
-.section sbcza
-.section sbczb
-.section sbc1a
-.section sbc1b
-.section sbc2a
-.section sbc2b
-.section sbc3a
-.section sbc3b
-.section sbc4a
-.section sbc4b
-.section sbc5a
-.section sbc5b
-.section sbc6a
-.section sbc6b
-.section sbc7a
-.section sbc7b
-.section sbc8a
-.section sbc8b
-.section sbc9a
-.section sbc9b
-.section sbc0a
-.section sbc0b
-.section sbdaa
-.section sbdab
-.section sbdba
-.section sbdbb
-.section sbdca
-.section sbdcb
-.section sbdda
-.section sbddb
-.section sbdea
-.section sbdeb
-.section sbdfa
-.section sbdfb
-.section sbdga
-.section sbdgb
-.section sbdha
-.section sbdhb
-.section sbdia
-.section sbdib
-.section sbdja
-.section sbdjb
-.section sbdka
-.section sbdkb
-.section sbdla
-.section sbdlb
-.section sbdma
-.section sbdmb
-.section sbdna
-.section sbdnb
-.section sbdoa
-.section sbdob
-.section sbdpa
-.section sbdpb
-.section sbdqa
-.section sbdqb
-.section sbdra
-.section sbdrb
-.section sbdsa
-.section sbdsb
-.section sbdta
-.section sbdtb
-.section sbdua
-.section sbdub
-.section sbdva
-.section sbdvb
-.section sbdwa
-.section sbdwb
-.section sbdxa
-.section sbdxb
-.section sbdya
-.section sbdyb
-.section sbdza
-.section sbdzb
-.section sbd1a
-.section sbd1b
-.section sbd2a
-.section sbd2b
-.section sbd3a
-.section sbd3b
-.section sbd4a
-.section sbd4b
-.section sbd5a
-.section sbd5b
-.section sbd6a
-.section sbd6b
-.section sbd7a
-.section sbd7b
-.section sbd8a
-.section sbd8b
-.section sbd9a
-.section sbd9b
-.section sbd0a
-.section sbd0b
-.section sbeaa
-.section sbeab
-.section sbeba
-.section sbebb
-.section sbeca
-.section sbecb
-.section sbeda
-.section sbedb
-.section sbeea
-.section sbeeb
-.section sbefa
-.section sbefb
-.section sbega
-.section sbegb
-.section sbeha
-.section sbehb
-.section sbeia
-.section sbeib
-.section sbeja
-.section sbejb
-.section sbeka
-.section sbekb
-.section sbela
-.section sbelb
-.section sbema
-.section sbemb
-.section sbena
-.section sbenb
-.section sbeoa
-.section sbeob
-.section sbepa
-.section sbepb
-.section sbeqa
-.section sbeqb
-.section sbera
-.section sberb
-.section sbesa
-.section sbesb
-.section sbeta
-.section sbetb
-.section sbeua
-.section sbeub
-.section sbeva
-.section sbevb
-.section sbewa
-.section sbewb
-.section sbexa
-.section sbexb
-.section sbeya
-.section sbeyb
-.section sbeza
-.section sbezb
-.section sbe1a
-.section sbe1b
-.section sbe2a
-.section sbe2b
-.section sbe3a
-.section sbe3b
-.section sbe4a
-.section sbe4b
-.section sbe5a
-.section sbe5b
-.section sbe6a
-.section sbe6b
-.section sbe7a
-.section sbe7b
-.section sbe8a
-.section sbe8b
-.section sbe9a
-.section sbe9b
-.section sbe0a
-.section sbe0b
-.section sbfaa
-.section sbfab
-.section sbfba
-.section sbfbb
-.section sbfca
-.section sbfcb
-.section sbfda
-.section sbfdb
-.section sbfea
-.section sbfeb
-.section sbffa
-.section sbffb
-.section sbfga
-.section sbfgb
-.section sbfha
-.section sbfhb
-.section sbfia
-.section sbfib
-.section sbfja
-.section sbfjb
-.section sbfka
-.section sbfkb
-.section sbfla
-.section sbflb
-.section sbfma
-.section sbfmb
-.section sbfna
-.section sbfnb
-.section sbfoa
-.section sbfob
-.section sbfpa
-.section sbfpb
-.section sbfqa
-.section sbfqb
-.section sbfra
-.section sbfrb
-.section sbfsa
-.section sbfsb
-.section sbfta
-.section sbftb
-.section sbfua
-.section sbfub
-.section sbfva
-.section sbfvb
-.section sbfwa
-.section sbfwb
-.section sbfxa
-.section sbfxb
-.section sbfya
-.section sbfyb
-.section sbfza
-.section sbfzb
-.section sbf1a
-.section sbf1b
-.section sbf2a
-.section sbf2b
-.section sbf3a
-.section sbf3b
-.section sbf4a
-.section sbf4b
-.section sbf5a
-.section sbf5b
-.section sbf6a
-.section sbf6b
-.section sbf7a
-.section sbf7b
-.section sbf8a
-.section sbf8b
-.section sbf9a
-.section sbf9b
-.section sbf0a
-.section sbf0b
-.section sbgaa
-.section sbgab
-.section sbgba
-.section sbgbb
-.section sbgca
-.section sbgcb
-.section sbgda
-.section sbgdb
-.section sbgea
-.section sbgeb
-.section sbgfa
-.section sbgfb
-.section sbgga
-.section sbggb
-.section sbgha
-.section sbghb
-.section sbgia
-.section sbgib
-.section sbgja
-.section sbgjb
-.section sbgka
-.section sbgkb
-.section sbgla
-.section sbglb
-.section sbgma
-.section sbgmb
-.section sbgna
-.section sbgnb
-.section sbgoa
-.section sbgob
-.section sbgpa
-.section sbgpb
-.section sbgqa
-.section sbgqb
-.section sbgra
-.section sbgrb
-.section sbgsa
-.section sbgsb
-.section sbgta
-.section sbgtb
-.section sbgua
-.section sbgub
-.section sbgva
-.section sbgvb
-.section sbgwa
-.section sbgwb
-.section sbgxa
-.section sbgxb
-.section sbgya
-.section sbgyb
-.section sbgza
-.section sbgzb
-.section sbg1a
-.section sbg1b
-.section sbg2a
-.section sbg2b
-.section sbg3a
-.section sbg3b
-.section sbg4a
-.section sbg4b
-.section sbg5a
-.section sbg5b
-.section sbg6a
-.section sbg6b
-.section sbg7a
-.section sbg7b
-.section sbg8a
-.section sbg8b
-.section sbg9a
-.section sbg9b
-.section sbg0a
-.section sbg0b
-.section sbhaa
-.section sbhab
-.section sbhba
-.section sbhbb
-.section sbhca
-.section sbhcb
-.section sbhda
-.section sbhdb
-.section sbhea
-.section sbheb
-.section sbhfa
-.section sbhfb
-.section sbhga
-.section sbhgb
-.section sbhha
-.section sbhhb
-.section sbhia
-.section sbhib
-.section sbhja
-.section sbhjb
-.section sbhka
-.section sbhkb
-.section sbhla
-.section sbhlb
-.section sbhma
-.section sbhmb
-.section sbhna
-.section sbhnb
-.section sbhoa
-.section sbhob
-.section sbhpa
-.section sbhpb
-.section sbhqa
-.section sbhqb
-.section sbhra
-.section sbhrb
-.section sbhsa
-.section sbhsb
-.section sbhta
-.section sbhtb
-.section sbhua
-.section sbhub
-.section sbhva
-.section sbhvb
-.section sbhwa
-.section sbhwb
-.section sbhxa
-.section sbhxb
-.section sbhya
-.section sbhyb
-.section sbhza
-.section sbhzb
-.section sbh1a
-.section sbh1b
-.section sbh2a
-.section sbh2b
-.section sbh3a
-.section sbh3b
-.section sbh4a
-.section sbh4b
-.section sbh5a
-.section sbh5b
-.section sbh6a
-.section sbh6b
-.section sbh7a
-.section sbh7b
-.section sbh8a
-.section sbh8b
-.section sbh9a
-.section sbh9b
-.section sbh0a
-.section sbh0b
-.section sbiaa
-.section sbiab
-.section sbiba
-.section sbibb
-.section sbica
-.section sbicb
-.section sbida
-.section sbidb
-.section sbiea
-.section sbieb
-.section sbifa
-.section sbifb
-.section sbiga
-.section sbigb
-.section sbiha
-.section sbihb
-.section sbiia
-.section sbiib
-.section sbija
-.section sbijb
-.section sbika
-.section sbikb
-.section sbila
-.section sbilb
-.section sbima
-.section sbimb
-.section sbina
-.section sbinb
-.section sbioa
-.section sbiob
-.section sbipa
-.section sbipb
-.section sbiqa
-.section sbiqb
-.section sbira
-.section sbirb
-.section sbisa
-.section sbisb
-.section sbita
-.section sbitb
-.section sbiua
-.section sbiub
-.section sbiva
-.section sbivb
-.section sbiwa
-.section sbiwb
-.section sbixa
-.section sbixb
-.section sbiya
-.section sbiyb
-.section sbiza
-.section sbizb
-.section sbi1a
-.section sbi1b
-.section sbi2a
-.section sbi2b
-.section sbi3a
-.section sbi3b
-.section sbi4a
-.section sbi4b
-.section sbi5a
-.section sbi5b
-.section sbi6a
-.section sbi6b
-.section sbi7a
-.section sbi7b
-.section sbi8a
-.section sbi8b
-.section sbi9a
-.section sbi9b
-.section sbi0a
-.section sbi0b
-.section sbjaa
-.section sbjab
-.section sbjba
-.section sbjbb
-.section sbjca
-.section sbjcb
-.section sbjda
-.section sbjdb
-.section sbjea
-.section sbjeb
-.section sbjfa
-.section sbjfb
-.section sbjga
-.section sbjgb
-.section sbjha
-.section sbjhb
-.section sbjia
-.section sbjib
-.section sbjja
-.section sbjjb
-.section sbjka
-.section sbjkb
-.section sbjla
-.section sbjlb
-.section sbjma
-.section sbjmb
-.section sbjna
-.section sbjnb
-.section sbjoa
-.section sbjob
-.section sbjpa
-.section sbjpb
-.section sbjqa
-.section sbjqb
-.section sbjra
-.section sbjrb
-.section sbjsa
-.section sbjsb
-.section sbjta
-.section sbjtb
-.section sbjua
-.section sbjub
-.section sbjva
-.section sbjvb
-.section sbjwa
-.section sbjwb
-.section sbjxa
-.section sbjxb
-.section sbjya
-.section sbjyb
-.section sbjza
-.section sbjzb
-.section sbj1a
-.section sbj1b
-.section sbj2a
-.section sbj2b
-.section sbj3a
-.section sbj3b
-.section sbj4a
-.section sbj4b
-.section sbj5a
-.section sbj5b
-.section sbj6a
-.section sbj6b
-.section sbj7a
-.section sbj7b
-.section sbj8a
-.section sbj8b
-.section sbj9a
-.section sbj9b
-.section sbj0a
-.section sbj0b
-.section sbkaa
-.section sbkab
-.section sbkba
-.section sbkbb
-.section sbkca
-.section sbkcb
-.section sbkda
-.section sbkdb
-.section sbkea
-.section sbkeb
-.section sbkfa
-.section sbkfb
-.section sbkga
-.section sbkgb
-.section sbkha
-.section sbkhb
-.section sbkia
-.section sbkib
-.section sbkja
-.section sbkjb
-.section sbkka
-.section sbkkb
-.section sbkla
-.section sbklb
-.section sbkma
-.section sbkmb
-.section sbkna
-.section sbknb
-.section sbkoa
-.section sbkob
-.section sbkpa
-.section sbkpb
-.section sbkqa
-.section sbkqb
-.section sbkra
-.section sbkrb
-.section sbksa
-.section sbksb
-.section sbkta
-.section sbktb
-.section sbkua
-.section sbkub
-.section sbkva
-.section sbkvb
-.section sbkwa
-.section sbkwb
-.section sbkxa
-.section sbkxb
-.section sbkya
-.section sbkyb
-.section sbkza
-.section sbkzb
-.section sbk1a
-.section sbk1b
-.section sbk2a
-.section sbk2b
-.section sbk3a
-.section sbk3b
-.section sbk4a
-.section sbk4b
-.section sbk5a
-.section sbk5b
-.section sbk6a
-.section sbk6b
-.section sbk7a
-.section sbk7b
-.section sbk8a
-.section sbk8b
-.section sbk9a
-.section sbk9b
-.section sbk0a
-.section sbk0b
-.section sblaa
-.section sblab
-.section sblba
-.section sblbb
-.section sblca
-.section sblcb
-.section sblda
-.section sbldb
-.section sblea
-.section sbleb
-.section sblfa
-.section sblfb
-.section sblga
-.section sblgb
-.section sblha
-.section sblhb
-.section sblia
-.section sblib
-.section sblja
-.section sbljb
-.section sblka
-.section sblkb
-.section sblla
-.section sbllb
-.section sblma
-.section sblmb
-.section sblna
-.section sblnb
-.section sbloa
-.section sblob
-.section sblpa
-.section sblpb
-.section sblqa
-.section sblqb
-.section sblra
-.section sblrb
-.section sblsa
-.section sblsb
-.section sblta
-.section sbltb
-.section sblua
-.section sblub
-.section sblva
-.section sblvb
-.section sblwa
-.section sblwb
-.section sblxa
-.section sblxb
-.section sblya
-.section sblyb
-.section sblza
-.section sblzb
-.section sbl1a
-.section sbl1b
-.section sbl2a
-.section sbl2b
-.section sbl3a
-.section sbl3b
-.section sbl4a
-.section sbl4b
-.section sbl5a
-.section sbl5b
-.section sbl6a
-.section sbl6b
-.section sbl7a
-.section sbl7b
-.section sbl8a
-.section sbl8b
-.section sbl9a
-.section sbl9b
-.section sbl0a
-.section sbl0b
-.section sbmaa
-.section sbmab
-.section sbmba
-.section sbmbb
-.section sbmca
-.section sbmcb
-.section sbmda
-.section sbmdb
-.section sbmea
-.section sbmeb
-.section sbmfa
-.section sbmfb
-.section sbmga
-.section sbmgb
-.section sbmha
-.section sbmhb
-.section sbmia
-.section sbmib
-.section sbmja
-.section sbmjb
-.section sbmka
-.section sbmkb
-.section sbmla
-.section sbmlb
-.section sbmma
-.section sbmmb
-.section sbmna
-.section sbmnb
-.section sbmoa
-.section sbmob
-.section sbmpa
-.section sbmpb
-.section sbmqa
-.section sbmqb
-.section sbmra
-.section sbmrb
-.section sbmsa
-.section sbmsb
-.section sbmta
-.section sbmtb
-.section sbmua
-.section sbmub
-.section sbmva
-.section sbmvb
-.section sbmwa
-.section sbmwb
-.section sbmxa
-.section sbmxb
-.section sbmya
-.section sbmyb
-.section sbmza
-.section sbmzb
-.section sbm1a
-.section sbm1b
-.section sbm2a
-.section sbm2b
-.section sbm3a
-.section sbm3b
-.section sbm4a
-.section sbm4b
-.section sbm5a
-.section sbm5b
-.section sbm6a
-.section sbm6b
-.section sbm7a
-.section sbm7b
-.section sbm8a
-.section sbm8b
-.section sbm9a
-.section sbm9b
-.section sbm0a
-.section sbm0b
-.section sbnaa
-.section sbnab
-.section sbnba
-.section sbnbb
-.section sbnca
-.section sbncb
-.section sbnda
-.section sbndb
-.section sbnea
-.section sbneb
-.section sbnfa
-.section sbnfb
-.section sbnga
-.section sbngb
-.section sbnha
-.section sbnhb
-.section sbnia
-.section sbnib
-.section sbnja
-.section sbnjb
-.section sbnka
-.section sbnkb
-.section sbnla
-.section sbnlb
-.section sbnma
-.section sbnmb
-.section sbnna
-.section sbnnb
-.section sbnoa
-.section sbnob
-.section sbnpa
-.section sbnpb
-.section sbnqa
-.section sbnqb
-.section sbnra
-.section sbnrb
-.section sbnsa
-.section sbnsb
-.section sbnta
-.section sbntb
-.section sbnua
-.section sbnub
-.section sbnva
-.section sbnvb
-.section sbnwa
-.section sbnwb
-.section sbnxa
-.section sbnxb
-.section sbnya
-.section sbnyb
-.section sbnza
-.section sbnzb
-.section sbn1a
-.section sbn1b
-.section sbn2a
-.section sbn2b
-.section sbn3a
-.section sbn3b
-.section sbn4a
-.section sbn4b
-.section sbn5a
-.section sbn5b
-.section sbn6a
-.section sbn6b
-.section sbn7a
-.section sbn7b
-.section sbn8a
-.section sbn8b
-.section sbn9a
-.section sbn9b
-.section sbn0a
-.section sbn0b
-.section sboaa
-.section sboab
-.section sboba
-.section sbobb
-.section sboca
-.section sbocb
-.section sboda
-.section sbodb
-.section sboea
-.section sboeb
-.section sbofa
-.section sbofb
-.section sboga
-.section sbogb
-.section sboha
-.section sbohb
-.section sboia
-.section sboib
-.section sboja
-.section sbojb
-.section sboka
-.section sbokb
-.section sbola
-.section sbolb
-.section sboma
-.section sbomb
-.section sbona
-.section sbonb
-.section sbooa
-.section sboob
-.section sbopa
-.section sbopb
-.section sboqa
-.section sboqb
-.section sbora
-.section sborb
-.section sbosa
-.section sbosb
-.section sbota
-.section sbotb
-.section sboua
-.section sboub
-.section sbova
-.section sbovb
-.section sbowa
-.section sbowb
-.section sboxa
-.section sboxb
-.section sboya
-.section sboyb
-.section sboza
-.section sbozb
-.section sbo1a
-.section sbo1b
-.section sbo2a
-.section sbo2b
-.section sbo3a
-.section sbo3b
-.section sbo4a
-.section sbo4b
-.section sbo5a
-.section sbo5b
-.section sbo6a
-.section sbo6b
-.section sbo7a
-.section sbo7b
-.section sbo8a
-.section sbo8b
-.section sbo9a
-.section sbo9b
-.section sbo0a
-.section sbo0b
-.section sbpaa
-.section sbpab
-.section sbpba
-.section sbpbb
-.section sbpca
-.section sbpcb
-.section sbpda
-.section sbpdb
-.section sbpea
-.section sbpeb
-.section sbpfa
-.section sbpfb
-.section sbpga
-.section sbpgb
-.section sbpha
-.section sbphb
-.section sbpia
-.section sbpib
-.section sbpja
-.section sbpjb
-.section sbpka
-.section sbpkb
-.section sbpla
-.section sbplb
-.section sbpma
-.section sbpmb
-.section sbpna
-.section sbpnb
-.section sbpoa
-.section sbpob
-.section sbppa
-.section sbppb
-.section sbpqa
-.section sbpqb
-.section sbpra
-.section sbprb
-.section sbpsa
-.section sbpsb
-.section sbpta
-.section sbptb
-.section sbpua
-.section sbpub
-.section sbpva
-.section sbpvb
-.section sbpwa
-.section sbpwb
-.section sbpxa
-.section sbpxb
-.section sbpya
-.section sbpyb
-.section sbpza
-.section sbpzb
-.section sbp1a
-.section sbp1b
-.section sbp2a
-.section sbp2b
-.section sbp3a
-.section sbp3b
-.section sbp4a
-.section sbp4b
-.section sbp5a
-.section sbp5b
-.section sbp6a
-.section sbp6b
-.section sbp7a
-.section sbp7b
-.section sbp8a
-.section sbp8b
-.section sbp9a
-.section sbp9b
-.section sbp0a
-.section sbp0b
-.section sbqaa
-.section sbqab
-.section sbqba
-.section sbqbb
-.section sbqca
-.section sbqcb
-.section sbqda
-.section sbqdb
-.section sbqea
-.section sbqeb
-.section sbqfa
-.section sbqfb
-.section sbqga
-.section sbqgb
-.section sbqha
-.section sbqhb
-.section sbqia
-.section sbqib
-.section sbqja
-.section sbqjb
-.section sbqka
-.section sbqkb
-.section sbqla
-.section sbqlb
-.section sbqma
-.section sbqmb
-.section sbqna
-.section sbqnb
-.section sbqoa
-.section sbqob
-.section sbqpa
-.section sbqpb
-.section sbqqa
-.section sbqqb
-.section sbqra
-.section sbqrb
-.section sbqsa
-.section sbqsb
-.section sbqta
-.section sbqtb
-.section sbqua
-.section sbqub
-.section sbqva
-.section sbqvb
-.section sbqwa
-.section sbqwb
-.section sbqxa
-.section sbqxb
-.section sbqya
-.section sbqyb
-.section sbqza
-.section sbqzb
-.section sbq1a
-.section sbq1b
-.section sbq2a
-.section sbq2b
-.section sbq3a
-.section sbq3b
-.section sbq4a
-.section sbq4b
-.section sbq5a
-.section sbq5b
-.section sbq6a
-.section sbq6b
-.section sbq7a
-.section sbq7b
-.section sbq8a
-.section sbq8b
-.section sbq9a
-.section sbq9b
-.section sbq0a
-.section sbq0b
-.section sbraa
-.section sbrab
-.section sbrba
-.section sbrbb
-.section sbrca
-.section sbrcb
-.section sbrda
-.section sbrdb
-.section sbrea
-.section sbreb
-.section sbrfa
-.section sbrfb
-.section sbrga
-.section sbrgb
-.section sbrha
-.section sbrhb
-.section sbria
-.section sbrib
-.section sbrja
-.section sbrjb
-.section sbrka
-.section sbrkb
-.section sbrla
-.section sbrlb
-.section sbrma
-.section sbrmb
-.section sbrna
-.section sbrnb
-.section sbroa
-.section sbrob
-.section sbrpa
-.section sbrpb
-.section sbrqa
-.section sbrqb
-.section sbrra
-.section sbrrb
-.section sbrsa
-.section sbrsb
-.section sbrta
-.section sbrtb
-.section sbrua
-.section sbrub
-.section sbrva
-.section sbrvb
-.section sbrwa
-.section sbrwb
-.section sbrxa
-.section sbrxb
-.section sbrya
-.section sbryb
-.section sbrza
-.section sbrzb
-.section sbr1a
-.section sbr1b
-.section sbr2a
-.section sbr2b
-.section sbr3a
-.section sbr3b
-.section sbr4a
-.section sbr4b
-.section sbr5a
-.section sbr5b
-.section sbr6a
-.section sbr6b
-.section sbr7a
-.section sbr7b
-.section sbr8a
-.section sbr8b
-.section sbr9a
-.section sbr9b
-.section sbr0a
-.section sbr0b
-.section sbsaa
-.section sbsab
-.section sbsba
-.section sbsbb
-.section sbsca
-.section sbscb
-.section sbsda
-.section sbsdb
-.section sbsea
-.section sbseb
-.section sbsfa
-.section sbsfb
-.section sbsga
-.section sbsgb
-.section sbsha
-.section sbshb
-.section sbsia
-.section sbsib
-.section sbsja
-.section sbsjb
-.section sbska
-.section sbskb
-.section sbsla
-.section sbslb
-.section sbsma
-.section sbsmb
-.section sbsna
-.section sbsnb
-.section sbsoa
-.section sbsob
-.section sbspa
-.section sbspb
-.section sbsqa
-.section sbsqb
-.section sbsra
-.section sbsrb
-.section sbssa
-.section sbssb
-.section sbsta
-.section sbstb
-.section sbsua
-.section sbsub
-.section sbsva
-.section sbsvb
-.section sbswa
-.section sbswb
-.section sbsxa
-.section sbsxb
-.section sbsya
-.section sbsyb
-.section sbsza
-.section sbszb
-.section sbs1a
-.section sbs1b
-.section sbs2a
-.section sbs2b
-.section sbs3a
-.section sbs3b
-.section sbs4a
-.section sbs4b
-.section sbs5a
-.section sbs5b
-.section sbs6a
-.section sbs6b
-.section sbs7a
-.section sbs7b
-.section sbs8a
-.section sbs8b
-.section sbs9a
-.section sbs9b
-.section sbs0a
-.section sbs0b
-.section sbtaa
-.section sbtab
-.section sbtba
-.section sbtbb
-.section sbtca
-.section sbtcb
-.section sbtda
-.section sbtdb
-.section sbtea
-.section sbteb
-.section sbtfa
-.section sbtfb
-.section sbtga
-.section sbtgb
-.section sbtha
-.section sbthb
-.section sbtia
-.section sbtib
-.section sbtja
-.section sbtjb
-.section sbtka
-.section sbtkb
-.section sbtla
-.section sbtlb
-.section sbtma
-.section sbtmb
-.section sbtna
-.section sbtnb
-.section sbtoa
-.section sbtob
-.section sbtpa
-.section sbtpb
-.section sbtqa
-.section sbtqb
-.section sbtra
-.section sbtrb
-.section sbtsa
-.section sbtsb
-.section sbtta
-.section sbttb
-.section sbtua
-.section sbtub
-.section sbtva
-.section sbtvb
-.section sbtwa
-.section sbtwb
-.section sbtxa
-.section sbtxb
-.section sbtya
-.section sbtyb
-.section sbtza
-.section sbtzb
-.section sbt1a
-.section sbt1b
-.section sbt2a
-.section sbt2b
-.section sbt3a
-.section sbt3b
-.section sbt4a
-.section sbt4b
-.section sbt5a
-.section sbt5b
-.section sbt6a
-.section sbt6b
-.section sbt7a
-.section sbt7b
-.section sbt8a
-.section sbt8b
-.section sbt9a
-.section sbt9b
-.section sbt0a
-.section sbt0b
-.section sbuaa
-.section sbuab
-.section sbuba
-.section sbubb
-.section sbuca
-.section sbucb
-.section sbuda
-.section sbudb
-.section sbuea
-.section sbueb
-.section sbufa
-.section sbufb
-.section sbuga
-.section sbugb
-.section sbuha
-.section sbuhb
-.section sbuia
-.section sbuib
-.section sbuja
-.section sbujb
-.section sbuka
-.section sbukb
-.section sbula
-.section sbulb
-.section sbuma
-.section sbumb
-.section sbuna
-.section sbunb
-.section sbuoa
-.section sbuob
-.section sbupa
-.section sbupb
-.section sbuqa
-.section sbuqb
-.section sbura
-.section sburb
-.section sbusa
-.section sbusb
-.section sbuta
-.section sbutb
-.section sbuua
-.section sbuub
-.section sbuva
-.section sbuvb
-.section sbuwa
-.section sbuwb
-.section sbuxa
-.section sbuxb
-.section sbuya
-.section sbuyb
-.section sbuza
-.section sbuzb
-.section sbu1a
-.section sbu1b
-.section sbu2a
-.section sbu2b
-.section sbu3a
-.section sbu3b
-.section sbu4a
-.section sbu4b
-.section sbu5a
-.section sbu5b
-.section sbu6a
-.section sbu6b
-.section sbu7a
-.section sbu7b
-.section sbu8a
-.section sbu8b
-.section sbu9a
-.section sbu9b
-.section sbu0a
-.section sbu0b
-.section sbvaa
-.section sbvab
-.section sbvba
-.section sbvbb
-.section sbvca
-.section sbvcb
-.section sbvda
-.section sbvdb
-.section sbvea
-.section sbveb
-.section sbvfa
-.section sbvfb
-.section sbvga
-.section sbvgb
-.section sbvha
-.section sbvhb
-.section sbvia
-.section sbvib
-.section sbvja
-.section sbvjb
-.section sbvka
-.section sbvkb
-.section sbvla
-.section sbvlb
-.section sbvma
-.section sbvmb
-.section sbvna
-.section sbvnb
-.section sbvoa
-.section sbvob
-.section sbvpa
-.section sbvpb
-.section sbvqa
-.section sbvqb
-.section sbvra
-.section sbvrb
-.section sbvsa
-.section sbvsb
-.section sbvta
-.section sbvtb
-.section sbvua
-.section sbvub
-.section sbvva
-.section sbvvb
-.section sbvwa
-.section sbvwb
-.section sbvxa
-.section sbvxb
-.section sbvya
-.section sbvyb
-.section sbvza
-.section sbvzb
-.section sbv1a
-.section sbv1b
-.section sbv2a
-.section sbv2b
-.section sbv3a
-.section sbv3b
-.section sbv4a
-.section sbv4b
-.section sbv5a
-.section sbv5b
-.section sbv6a
-.section sbv6b
-.section sbv7a
-.section sbv7b
-.section sbv8a
-.section sbv8b
-.section sbv9a
-.section sbv9b
-.section sbv0a
-.section sbv0b
-.section sbwaa
-.section sbwab
-.section sbwba
-.section sbwbb
-.section sbwca
-.section sbwcb
-.section sbwda
-.section sbwdb
-.section sbwea
-.section sbweb
-.section sbwfa
-.section sbwfb
-.section sbwga
-.section sbwgb
-.section sbwha
-.section sbwhb
-.section sbwia
-.section sbwib
-.section sbwja
-.section sbwjb
-.section sbwka
-.section sbwkb
-.section sbwla
-.section sbwlb
-.section sbwma
-.section sbwmb
-.section sbwna
-.section sbwnb
-.section sbwoa
-.section sbwob
-.section sbwpa
-.section sbwpb
-.section sbwqa
-.section sbwqb
-.section sbwra
-.section sbwrb
-.section sbwsa
-.section sbwsb
-.section sbwta
-.section sbwtb
-.section sbwua
-.section sbwub
-.section sbwva
-.section sbwvb
-.section sbwwa
-.section sbwwb
-.section sbwxa
-.section sbwxb
-.section sbwya
-.section sbwyb
-.section sbwza
-.section sbwzb
-.section sbw1a
-.section sbw1b
-.section sbw2a
-.section sbw2b
-.section sbw3a
-.section sbw3b
-.section sbw4a
-.section sbw4b
-.section sbw5a
-.section sbw5b
-.section sbw6a
-.section sbw6b
-.section sbw7a
-.section sbw7b
-.section sbw8a
-.section sbw8b
-.section sbw9a
-.section sbw9b
-.section sbw0a
-.section sbw0b
-.section sbxaa
-.section sbxab
-.section sbxba
-.section sbxbb
-.section sbxca
-.section sbxcb
-.section sbxda
-.section sbxdb
-.section sbxea
-.section sbxeb
-.section sbxfa
-.section sbxfb
-.section sbxga
-.section sbxgb
-.section sbxha
-.section sbxhb
-.section sbxia
-.section sbxib
-.section sbxja
-.section sbxjb
-.section sbxka
-.section sbxkb
-.section sbxla
-.section sbxlb
-.section sbxma
-.section sbxmb
-.section sbxna
-.section sbxnb
-.section sbxoa
-.section sbxob
-.section sbxpa
-.section sbxpb
-.section sbxqa
-.section sbxqb
-.section sbxra
-.section sbxrb
-.section sbxsa
-.section sbxsb
-.section sbxta
-.section sbxtb
-.section sbxua
-.section sbxub
-.section sbxva
-.section sbxvb
-.section sbxwa
-.section sbxwb
-.section sbxxa
-.section sbxxb
-.section sbxya
-.section sbxyb
-.section sbxza
-.section sbxzb
-.section sbx1a
-.section sbx1b
-.section sbx2a
-.section sbx2b
-.section sbx3a
-.section sbx3b
-.section sbx4a
-.section sbx4b
-.section sbx5a
-.section sbx5b
-.section sbx6a
-.section sbx6b
-.section sbx7a
-.section sbx7b
-.section sbx8a
-.section sbx8b
-.section sbx9a
-.section sbx9b
-.section sbx0a
-.section sbx0b
-.section sbyaa
-.section sbyab
-.section sbyba
-.section sbybb
-.section sbyca
-.section sbycb
-.section sbyda
-.section sbydb
-.section sbyea
-.section sbyeb
-.section sbyfa
-.section sbyfb
-.section sbyga
-.section sbygb
-.section sbyha
-.section sbyhb
-.section sbyia
-.section sbyib
-.section sbyja
-.section sbyjb
-.section sbyka
-.section sbykb
-.section sbyla
-.section sbylb
-.section sbyma
-.section sbymb
-.section sbyna
-.section sbynb
-.section sbyoa
-.section sbyob
-.section sbypa
-.section sbypb
-.section sbyqa
-.section sbyqb
-.section sbyra
-.section sbyrb
-.section sbysa
-.section sbysb
-.section sbyta
-.section sbytb
-.section sbyua
-.section sbyub
-.section sbyva
-.section sbyvb
-.section sbywa
-.section sbywb
-.section sbyxa
-.section sbyxb
-.section sbyya
-.section sbyyb
-.section sbyza
-.section sbyzb
-.section sby1a
-.section sby1b
-.section sby2a
-.section sby2b
-.section sby3a
-.section sby3b
-.section sby4a
-.section sby4b
-.section sby5a
-.section sby5b
-.section sby6a
-.section sby6b
-.section sby7a
-.section sby7b
-.section sby8a
-.section sby8b
-.section sby9a
-.section sby9b
-.section sby0a
-.section sby0b
-.section sbzaa
-.section sbzab
-.section sbzba
-.section sbzbb
-.section sbzca
-.section sbzcb
-.section sbzda
-.section sbzdb
-.section sbzea
-.section sbzeb
-.section sbzfa
-.section sbzfb
-.section sbzga
-.section sbzgb
-.section sbzha
-.section sbzhb
-.section sbzia
-.section sbzib
-.section sbzja
-.section sbzjb
-.section sbzka
-.section sbzkb
-.section sbzla
-.section sbzlb
-.section sbzma
-.section sbzmb
-.section sbzna
-.section sbznb
-.section sbzoa
-.section sbzob
-.section sbzpa
-.section sbzpb
-.section sbzqa
-.section sbzqb
-.section sbzra
-.section sbzrb
-.section sbzsa
-.section sbzsb
-.section sbzta
-.section sbztb
-.section sbzua
-.section sbzub
-.section sbzva
-.section sbzvb
-.section sbzwa
-.section sbzwb
-.section sbzxa
-.section sbzxb
-.section sbzya
-.section sbzyb
-.section sbzza
-.section sbzzb
-.section sbz1a
-.section sbz1b
-.section sbz2a
-.section sbz2b
-.section sbz3a
-.section sbz3b
-.section sbz4a
-.section sbz4b
-.section sbz5a
-.section sbz5b
-.section sbz6a
-.section sbz6b
-.section sbz7a
-.section sbz7b
-.section sbz8a
-.section sbz8b
-.section sbz9a
-.section sbz9b
-.section sbz0a
-.section sbz0b
-.section sb1aa
-.section sb1ab
-.section sb1ba
-.section sb1bb
-.section sb1ca
-.section sb1cb
-.section sb1da
-.section sb1db
-.section sb1ea
-.section sb1eb
-.section sb1fa
-.section sb1fb
-.section sb1ga
-.section sb1gb
-.section sb1ha
-.section sb1hb
-.section sb1ia
-.section sb1ib
-.section sb1ja
-.section sb1jb
-.section sb1ka
-.section sb1kb
-.section sb1la
-.section sb1lb
-.section sb1ma
-.section sb1mb
-.section sb1na
-.section sb1nb
-.section sb1oa
-.section sb1ob
-.section sb1pa
-.section sb1pb
-.section sb1qa
-.section sb1qb
-.section sb1ra
-.section sb1rb
-.section sb1sa
-.section sb1sb
-.section sb1ta
-.section sb1tb
-.section sb1ua
-.section sb1ub
-.section sb1va
-.section sb1vb
-.section sb1wa
-.section sb1wb
-.section sb1xa
-.section sb1xb
-.section sb1ya
-.section sb1yb
-.section sb1za
-.section sb1zb
-.section sb11a
-.section sb11b
-.section sb12a
-.section sb12b
-.section sb13a
-.section sb13b
-.section sb14a
-.section sb14b
-.section sb15a
-.section sb15b
-.section sb16a
-.section sb16b
-.section sb17a
-.section sb17b
-.section sb18a
-.section sb18b
-.section sb19a
-.section sb19b
-.section sb10a
-.section sb10b
-.section sb2aa
-.section sb2ab
-.section sb2ba
-.section sb2bb
-.section sb2ca
-.section sb2cb
-.section sb2da
-.section sb2db
-.section sb2ea
-.section sb2eb
-.section sb2fa
-.section sb2fb
-.section sb2ga
-.section sb2gb
-.section sb2ha
-.section sb2hb
-.section sb2ia
-.section sb2ib
-.section sb2ja
-.section sb2jb
-.section sb2ka
-.section sb2kb
-.section sb2la
-.section sb2lb
-.section sb2ma
-.section sb2mb
-.section sb2na
-.section sb2nb
-.section sb2oa
-.section sb2ob
-.section sb2pa
-.section sb2pb
-.section sb2qa
-.section sb2qb
-.section sb2ra
-.section sb2rb
-.section sb2sa
-.section sb2sb
-.section sb2ta
-.section sb2tb
-.section sb2ua
-.section sb2ub
-.section sb2va
-.section sb2vb
-.section sb2wa
-.section sb2wb
-.section sb2xa
-.section sb2xb
-.section sb2ya
-.section sb2yb
-.section sb2za
-.section sb2zb
-.section sb21a
-.section sb21b
-.section sb22a
-.section sb22b
-.section sb23a
-.section sb23b
-.section sb24a
-.section sb24b
-.section sb25a
-.section sb25b
-.section sb26a
-.section sb26b
-.section sb27a
-.section sb27b
-.section sb28a
-.section sb28b
-.section sb29a
-.section sb29b
-.section sb20a
-.section sb20b
-.section sb3aa
-.section sb3ab
-.section sb3ba
-.section sb3bb
-.section sb3ca
-.section sb3cb
-.section sb3da
-.section sb3db
-.section sb3ea
-.section sb3eb
-.section sb3fa
-.section sb3fb
-.section sb3ga
-.section sb3gb
-.section sb3ha
-.section sb3hb
-.section sb3ia
-.section sb3ib
-.section sb3ja
-.section sb3jb
-.section sb3ka
-.section sb3kb
-.section sb3la
-.section sb3lb
-.section sb3ma
-.section sb3mb
-.section sb3na
-.section sb3nb
-.section sb3oa
-.section sb3ob
-.section sb3pa
-.section sb3pb
-.section sb3qa
-.section sb3qb
-.section sb3ra
-.section sb3rb
-.section sb3sa
-.section sb3sb
-.section sb3ta
-.section sb3tb
-.section sb3ua
-.section sb3ub
-.section sb3va
-.section sb3vb
-.section sb3wa
-.section sb3wb
-.section sb3xa
-.section sb3xb
-.section sb3ya
-.section sb3yb
-.section sb3za
-.section sb3zb
-.section sb31a
-.section sb31b
-.section sb32a
-.section sb32b
-.section sb33a
-.section sb33b
-.section sb34a
-.section sb34b
-.section sb35a
-.section sb35b
-.section sb36a
-.section sb36b
-.section sb37a
-.section sb37b
-.section sb38a
-.section sb38b
-.section sb39a
-.section sb39b
-.section sb30a
-.section sb30b
-.section sb4aa
-.section sb4ab
-.section sb4ba
-.section sb4bb
-.section sb4ca
-.section sb4cb
-.section sb4da
-.section sb4db
-.section sb4ea
-.section sb4eb
-.section sb4fa
-.section sb4fb
-.section sb4ga
-.section sb4gb
-.section sb4ha
-.section sb4hb
-.section sb4ia
-.section sb4ib
-.section sb4ja
-.section sb4jb
-.section sb4ka
-.section sb4kb
-.section sb4la
-.section sb4lb
-.section sb4ma
-.section sb4mb
-.section sb4na
-.section sb4nb
-.section sb4oa
-.section sb4ob
-.section sb4pa
-.section sb4pb
-.section sb4qa
-.section sb4qb
-.section sb4ra
-.section sb4rb
-.section sb4sa
-.section sb4sb
-.section sb4ta
-.section sb4tb
-.section sb4ua
-.section sb4ub
-.section sb4va
-.section sb4vb
-.section sb4wa
-.section sb4wb
-.section sb4xa
-.section sb4xb
-.section sb4ya
-.section sb4yb
-.section sb4za
-.section sb4zb
-.section sb41a
-.section sb41b
-.section sb42a
-.section sb42b
-.section sb43a
-.section sb43b
-.section sb44a
-.section sb44b
-.section sb45a
-.section sb45b
-.section sb46a
-.section sb46b
-.section sb47a
-.section sb47b
-.section sb48a
-.section sb48b
-.section sb49a
-.section sb49b
-.section sb40a
-.section sb40b
-.section sb5aa
-.section sb5ab
-.section sb5ba
-.section sb5bb
-.section sb5ca
-.section sb5cb
-.section sb5da
-.section sb5db
-.section sb5ea
-.section sb5eb
-.section sb5fa
-.section sb5fb
-.section sb5ga
-.section sb5gb
-.section sb5ha
-.section sb5hb
-.section sb5ia
-.section sb5ib
-.section sb5ja
-.section sb5jb
-.section sb5ka
-.section sb5kb
-.section sb5la
-.section sb5lb
-.section sb5ma
-.section sb5mb
-.section sb5na
-.section sb5nb
-.section sb5oa
-.section sb5ob
-.section sb5pa
-.section sb5pb
-.section sb5qa
-.section sb5qb
-.section sb5ra
-.section sb5rb
-.section sb5sa
-.section sb5sb
-.section sb5ta
-.section sb5tb
-.section sb5ua
-.section sb5ub
-.section sb5va
-.section sb5vb
-.section sb5wa
-.section sb5wb
-.section sb5xa
-.section sb5xb
-.section sb5ya
-.section sb5yb
-.section sb5za
-.section sb5zb
-.section sb51a
-.section sb51b
-.section sb52a
-.section sb52b
-.section sb53a
-.section sb53b
-.section sb54a
-.section sb54b
-.section sb55a
-.section sb55b
-.section sb56a
-.section sb56b
-.section sb57a
-.section sb57b
-.section sb58a
-.section sb58b
-.section sb59a
-.section sb59b
-.section sb50a
-.section sb50b
-.section sb6aa
-.section sb6ab
-.section sb6ba
-.section sb6bb
-.section sb6ca
-.section sb6cb
-.section sb6da
-.section sb6db
-.section sb6ea
-.section sb6eb
-.section sb6fa
-.section sb6fb
-.section sb6ga
-.section sb6gb
-.section sb6ha
-.section sb6hb
-.section sb6ia
-.section sb6ib
-.section sb6ja
-.section sb6jb
-.section sb6ka
-.section sb6kb
-.section sb6la
-.section sb6lb
-.section sb6ma
-.section sb6mb
-.section sb6na
-.section sb6nb
-.section sb6oa
-.section sb6ob
-.section sb6pa
-.section sb6pb
-.section sb6qa
-.section sb6qb
-.section sb6ra
-.section sb6rb
-.section sb6sa
-.section sb6sb
-.section sb6ta
-.section sb6tb
-.section sb6ua
-.section sb6ub
-.section sb6va
-.section sb6vb
-.section sb6wa
-.section sb6wb
-.section sb6xa
-.section sb6xb
-.section sb6ya
-.section sb6yb
-.section sb6za
-.section sb6zb
-.section sb61a
-.section sb61b
-.section sb62a
-.section sb62b
-.section sb63a
-.section sb63b
-.section sb64a
-.section sb64b
-.section sb65a
-.section sb65b
-.section sb66a
-.section sb66b
-.section sb67a
-.section sb67b
-.section sb68a
-.section sb68b
-.section sb69a
-.section sb69b
-.section sb60a
-.section sb60b
-.section sb7aa
-.section sb7ab
-.section sb7ba
-.section sb7bb
-.section sb7ca
-.section sb7cb
-.section sb7da
-.section sb7db
-.section sb7ea
-.section sb7eb
-.section sb7fa
-.section sb7fb
-.section sb7ga
-.section sb7gb
-.section sb7ha
-.section sb7hb
-.section sb7ia
-.section sb7ib
-.section sb7ja
-.section sb7jb
-.section sb7ka
-.section sb7kb
-.section sb7la
-.section sb7lb
-.section sb7ma
-.section sb7mb
-.section sb7na
-.section sb7nb
-.section sb7oa
-.section sb7ob
-.section sb7pa
-.section sb7pb
-.section sb7qa
-.section sb7qb
-.section sb7ra
-.section sb7rb
-.section sb7sa
-.section sb7sb
-.section sb7ta
-.section sb7tb
-.section sb7ua
-.section sb7ub
-.section sb7va
-.section sb7vb
-.section sb7wa
-.section sb7wb
-.section sb7xa
-.section sb7xb
-.section sb7ya
-.section sb7yb
-.section sb7za
-.section sb7zb
-.section sb71a
-.section sb71b
-.section sb72a
-.section sb72b
-.section sb73a
-.section sb73b
-.section sb74a
-.section sb74b
-.section sb75a
-.section sb75b
-.section sb76a
-.section sb76b
-.section sb77a
-.section sb77b
-.section sb78a
-.section sb78b
-.section sb79a
-.section sb79b
-.section sb70a
-.section sb70b
-.section sb8aa
-.section sb8ab
-.section sb8ba
-.section sb8bb
-.section sb8ca
-.section sb8cb
-.section sb8da
-.section sb8db
-.section sb8ea
-.section sb8eb
-.section sb8fa
-.section sb8fb
-.section sb8ga
-.section sb8gb
-.section sb8ha
-.section sb8hb
-.section sb8ia
-.section sb8ib
-.section sb8ja
-.section sb8jb
-.section sb8ka
-.section sb8kb
-.section sb8la
-.section sb8lb
-.section sb8ma
-.section sb8mb
-.section sb8na
-.section sb8nb
-.section sb8oa
-.section sb8ob
-.section sb8pa
-.section sb8pb
-.section sb8qa
-.section sb8qb
-.section sb8ra
-.section sb8rb
-.section sb8sa
-.section sb8sb
-.section sb8ta
-.section sb8tb
-.section sb8ua
-.section sb8ub
-.section sb8va
-.section sb8vb
-.section sb8wa
-.section sb8wb
-.section sb8xa
-.section sb8xb
-.section sb8ya
-.section sb8yb
-.section sb8za
-.section sb8zb
-.section sb81a
-.section sb81b
-.section sb82a
-.section sb82b
-.section sb83a
-.section sb83b
-.section sb84a
-.section sb84b
-.section sb85a
-.section sb85b
-.section sb86a
-.section sb86b
-.section sb87a
-.section sb87b
-.section sb88a
-.section sb88b
-.section sb89a
-.section sb89b
-.section sb80a
-.section sb80b
-.section sb9aa
-.section sb9ab
-.section sb9ba
-.section sb9bb
-.section sb9ca
-.section sb9cb
-.section sb9da
-.section sb9db
-.section sb9ea
-.section sb9eb
-.section sb9fa
-.section sb9fb
-.section sb9ga
-.section sb9gb
-.section sb9ha
-.section sb9hb
-.section sb9ia
-.section sb9ib
-.section sb9ja
-.section sb9jb
-.section sb9ka
-.section sb9kb
-.section sb9la
-.section sb9lb
-.section sb9ma
-.section sb9mb
-.section sb9na
-.section sb9nb
-.section sb9oa
-.section sb9ob
-.section sb9pa
-.section sb9pb
-.section sb9qa
-.section sb9qb
-.section sb9ra
-.section sb9rb
-.section sb9sa
-.section sb9sb
-.section sb9ta
-.section sb9tb
-.section sb9ua
-.section sb9ub
-.section sb9va
-.section sb9vb
-.section sb9wa
-.section sb9wb
-.section sb9xa
-.section sb9xb
-.section sb9ya
-.section sb9yb
-.section sb9za
-.section sb9zb
-.section sb91a
-.section sb91b
-.section sb92a
-.section sb92b
-.section sb93a
-.section sb93b
-.section sb94a
-.section sb94b
-.section sb95a
-.section sb95b
-.section sb96a
-.section sb96b
-.section sb97a
-.section sb97b
-.section sb98a
-.section sb98b
-.section sb99a
-.section sb99b
-.section sb90a
-.section sb90b
-.section sb0aa
-.section sb0ab
-.section sb0ba
-.section sb0bb
-.section sb0ca
-.section sb0cb
-.section sb0da
-.section sb0db
-.section sb0ea
-.section sb0eb
-.section sb0fa
-.section sb0fb
-.section sb0ga
-.section sb0gb
-.section sb0ha
-.section sb0hb
-.section sb0ia
-.section sb0ib
-.section sb0ja
-.section sb0jb
-.section sb0ka
-.section sb0kb
-.section sb0la
-.section sb0lb
-.section sb0ma
-.section sb0mb
-.section sb0na
-.section sb0nb
-.section sb0oa
-.section sb0ob
-.section sb0pa
-.section sb0pb
-.section sb0qa
-.section sb0qb
-.section sb0ra
-.section sb0rb
-.section sb0sa
-.section sb0sb
-.section sb0ta
-.section sb0tb
-.section sb0ua
-.section sb0ub
-.section sb0va
-.section sb0vb
-.section sb0wa
-.section sb0wb
-.section sb0xa
-.section sb0xb
-.section sb0ya
-.section sb0yb
-.section sb0za
-.section sb0zb
-.section sb01a
-.section sb01b
-.section sb02a
-.section sb02b
-.section sb03a
-.section sb03b
-.section sb04a
-.section sb04b
-.section sb05a
-.section sb05b
-.section sb06a
-.section sb06b
-.section sb07a
-.section sb07b
-.section sb08a
-.section sb08b
-.section sb09a
-.section sb09b
-.section sb00a
-.section sb00b
-.section scaaa
-.section scaab
-.section scaba
-.section scabb
-.section scaca
-.section scacb
-.section scada
-.section scadb
-.section scaea
-.section scaeb
-.section scafa
-.section scafb
-.section scaga
-.section scagb
-.section scaha
-.section scahb
-.section scaia
-.section scaib
-.section scaja
-.section scajb
-.section scaka
-.section scakb
-.section scala
-.section scalb
-.section scama
-.section scamb
-.section scana
-.section scanb
-.section scaoa
-.section scaob
-.section scapa
-.section scapb
-.section scaqa
-.section scaqb
-.section scara
-.section scarb
-.section scasa
-.section scasb
-.section scata
-.section scatb
-.section scaua
-.section scaub
-.section scava
-.section scavb
-.section scawa
-.section scawb
-.section scaxa
-.section scaxb
-.section scaya
-.section scayb
-.section scaza
-.section scazb
-.section sca1a
-.section sca1b
-.section sca2a
-.section sca2b
-.section sca3a
-.section sca3b
-.section sca4a
-.section sca4b
-.section sca5a
-.section sca5b
-.section sca6a
-.section sca6b
-.section sca7a
-.section sca7b
-.section sca8a
-.section sca8b
-.section sca9a
-.section sca9b
-.section sca0a
-.section sca0b
-.section scbaa
-.section scbab
-.section scbba
-.section scbbb
-.section scbca
-.section scbcb
-.section scbda
-.section scbdb
-.section scbea
-.section scbeb
-.section scbfa
-.section scbfb
-.section scbga
-.section scbgb
-.section scbha
-.section scbhb
-.section scbia
-.section scbib
-.section scbja
-.section scbjb
-.section scbka
-.section scbkb
-.section scbla
-.section scblb
-.section scbma
-.section scbmb
-.section scbna
-.section scbnb
-.section scboa
-.section scbob
-.section scbpa
-.section scbpb
-.section scbqa
-.section scbqb
-.section scbra
-.section scbrb
-.section scbsa
-.section scbsb
-.section scbta
-.section scbtb
-.section scbua
-.section scbub
-.section scbva
-.section scbvb
-.section scbwa
-.section scbwb
-.section scbxa
-.section scbxb
-.section scbya
-.section scbyb
-.section scbza
-.section scbzb
-.section scb1a
-.section scb1b
-.section scb2a
-.section scb2b
-.section scb3a
-.section scb3b
-.section scb4a
-.section scb4b
-.section scb5a
-.section scb5b
-.section scb6a
-.section scb6b
-.section scb7a
-.section scb7b
-.section scb8a
-.section scb8b
-.section scb9a
-.section scb9b
-.section scb0a
-.section scb0b
-.section sccaa
-.section sccab
-.section sccba
-.section sccbb
-.section sccca
-.section scccb
-.section sccda
-.section sccdb
-.section sccea
-.section scceb
-.section sccfa
-.section sccfb
-.section sccga
-.section sccgb
-.section sccha
-.section scchb
-.section sccia
-.section sccib
-.section sccja
-.section sccjb
-.section sccka
-.section scckb
-.section sccla
-.section scclb
-.section sccma
-.section sccmb
-.section sccna
-.section sccnb
-.section sccoa
-.section sccob
-.section sccpa
-.section sccpb
-.section sccqa
-.section sccqb
-.section sccra
-.section sccrb
-.section sccsa
-.section sccsb
-.section sccta
-.section scctb
-.section sccua
-.section sccub
-.section sccva
-.section sccvb
-.section sccwa
-.section sccwb
-.section sccxa
-.section sccxb
-.section sccya
-.section sccyb
-.section sccza
-.section scczb
-.section scc1a
-.section scc1b
-.section scc2a
-.section scc2b
-.section scc3a
-.section scc3b
-.section scc4a
-.section scc4b
-.section scc5a
-.section scc5b
-.section scc6a
-.section scc6b
-.section scc7a
-.section scc7b
-.section scc8a
-.section scc8b
-.section scc9a
-.section scc9b
-.section scc0a
-.section scc0b
-.section scdaa
-.section scdab
-.section scdba
-.section scdbb
-.section scdca
-.section scdcb
-.section scdda
-.section scddb
-.section scdea
-.section scdeb
-.section scdfa
-.section scdfb
-.section scdga
-.section scdgb
-.section scdha
-.section scdhb
-.section scdia
-.section scdib
-.section scdja
-.section scdjb
-.section scdka
-.section scdkb
-.section scdla
-.section scdlb
-.section scdma
-.section scdmb
-.section scdna
-.section scdnb
-.section scdoa
-.section scdob
-.section scdpa
-.section scdpb
-.section scdqa
-.section scdqb
-.section scdra
-.section scdrb
-.section scdsa
-.section scdsb
-.section scdta
-.section scdtb
-.section scdua
-.section scdub
-.section scdva
-.section scdvb
-.section scdwa
-.section scdwb
-.section scdxa
-.section scdxb
-.section scdya
-.section scdyb
-.section scdza
-.section scdzb
-.section scd1a
-.section scd1b
-.section scd2a
-.section scd2b
-.section scd3a
-.section scd3b
-.section scd4a
-.section scd4b
-.section scd5a
-.section scd5b
-.section scd6a
-.section scd6b
-.section scd7a
-.section scd7b
-.section scd8a
-.section scd8b
-.section scd9a
-.section scd9b
-.section scd0a
-.section scd0b
-.section sceaa
-.section sceab
-.section sceba
-.section scebb
-.section sceca
-.section scecb
-.section sceda
-.section scedb
-.section sceea
-.section sceeb
-.section scefa
-.section scefb
-.section scega
-.section scegb
-.section sceha
-.section scehb
-.section sceia
-.section sceib
-.section sceja
-.section scejb
-.section sceka
-.section scekb
-.section scela
-.section scelb
-.section scema
-.section scemb
-.section scena
-.section scenb
-.section sceoa
-.section sceob
-.section scepa
-.section scepb
-.section sceqa
-.section sceqb
-.section scera
-.section scerb
-.section scesa
-.section scesb
-.section sceta
-.section scetb
-.section sceua
-.section sceub
-.section sceva
-.section scevb
-.section scewa
-.section scewb
-.section scexa
-.section scexb
-.section sceya
-.section sceyb
-.section sceza
-.section scezb
-.section sce1a
-.section sce1b
-.section sce2a
-.section sce2b
-.section sce3a
-.section sce3b
-.section sce4a
-.section sce4b
-.section sce5a
-.section sce5b
-.section sce6a
-.section sce6b
-.section sce7a
-.section sce7b
-.section sce8a
-.section sce8b
-.section sce9a
-.section sce9b
-.section sce0a
-.section sce0b
-.section scfaa
-.section scfab
-.section scfba
-.section scfbb
-.section scfca
-.section scfcb
-.section scfda
-.section scfdb
-.section scfea
-.section scfeb
-.section scffa
-.section scffb
-.section scfga
-.section scfgb
-.section scfha
-.section scfhb
-.section scfia
-.section scfib
-.section scfja
-.section scfjb
-.section scfka
-.section scfkb
-.section scfla
-.section scflb
-.section scfma
-.section scfmb
-.section scfna
-.section scfnb
-.section scfoa
-.section scfob
-.section scfpa
-.section scfpb
-.section scfqa
-.section scfqb
-.section scfra
-.section scfrb
-.section scfsa
-.section scfsb
-.section scfta
-.section scftb
-.section scfua
-.section scfub
-.section scfva
-.section scfvb
-.section scfwa
-.section scfwb
-.section scfxa
-.section scfxb
-.section scfya
-.section scfyb
-.section scfza
-.section scfzb
-.section scf1a
-.section scf1b
-.section scf2a
-.section scf2b
-.section scf3a
-.section scf3b
-.section scf4a
-.section scf4b
-.section scf5a
-.section scf5b
-.section scf6a
-.section scf6b
-.section scf7a
-.section scf7b
-.section scf8a
-.section scf8b
-.section scf9a
-.section scf9b
-.section scf0a
-.section scf0b
-.section scgaa
-.section scgab
-.section scgba
-.section scgbb
-.section scgca
-.section scgcb
-.section scgda
-.section scgdb
-.section scgea
-.section scgeb
-.section scgfa
-.section scgfb
-.section scgga
-.section scggb
-.section scgha
-.section scghb
-.section scgia
-.section scgib
-.section scgja
-.section scgjb
-.section scgka
-.section scgkb
-.section scgla
-.section scglb
-.section scgma
-.section scgmb
-.section scgna
-.section scgnb
-.section scgoa
-.section scgob
-.section scgpa
-.section scgpb
-.section scgqa
-.section scgqb
-.section scgra
-.section scgrb
-.section scgsa
-.section scgsb
-.section scgta
-.section scgtb
-.section scgua
-.section scgub
-.section scgva
-.section scgvb
-.section scgwa
-.section scgwb
-.section scgxa
-.section scgxb
-.section scgya
-.section scgyb
-.section scgza
-.section scgzb
-.section scg1a
-.section scg1b
-.section scg2a
-.section scg2b
-.section scg3a
-.section scg3b
-.section scg4a
-.section scg4b
-.section scg5a
-.section scg5b
-.section scg6a
-.section scg6b
-.section scg7a
-.section scg7b
-.section scg8a
-.section scg8b
-.section scg9a
-.section scg9b
-.section scg0a
-.section scg0b
-.section schaa
-.section schab
-.section schba
-.section schbb
-.section schca
-.section schcb
-.section schda
-.section schdb
-.section schea
-.section scheb
-.section schfa
-.section schfb
-.section schga
-.section schgb
-.section schha
-.section schhb
-.section schia
-.section schib
-.section schja
-.section schjb
-.section schka
-.section schkb
-.section schla
-.section schlb
-.section schma
-.section schmb
-.section schna
-.section schnb
-.section schoa
-.section schob
-.section schpa
-.section schpb
-.section schqa
-.section schqb
-.section schra
-.section schrb
-.section schsa
-.section schsb
-.section schta
-.section schtb
-.section schua
-.section schub
-.section schva
-.section schvb
-.section schwa
-.section schwb
-.section schxa
-.section schxb
-.section schya
-.section schyb
-.section schza
-.section schzb
-.section sch1a
-.section sch1b
-.section sch2a
-.section sch2b
-.section sch3a
-.section sch3b
-.section sch4a
-.section sch4b
-.section sch5a
-.section sch5b
-.section sch6a
-.section sch6b
-.section sch7a
-.section sch7b
-.section sch8a
-.section sch8b
-.section sch9a
-.section sch9b
-.section sch0a
-.section sch0b
-.section sciaa
-.section sciab
-.section sciba
-.section scibb
-.section scica
-.section scicb
-.section scida
-.section scidb
-.section sciea
-.section scieb
-.section scifa
-.section scifb
-.section sciga
-.section scigb
-.section sciha
-.section scihb
-.section sciia
-.section sciib
-.section scija
-.section scijb
-.section scika
-.section scikb
-.section scila
-.section scilb
-.section scima
-.section scimb
-.section scina
-.section scinb
-.section scioa
-.section sciob
-.section scipa
-.section scipb
-.section sciqa
-.section sciqb
-.section scira
-.section scirb
-.section scisa
-.section scisb
-.section scita
-.section scitb
-.section sciua
-.section sciub
-.section sciva
-.section scivb
-.section sciwa
-.section sciwb
-.section scixa
-.section scixb
-.section sciya
-.section sciyb
-.section sciza
-.section scizb
-.section sci1a
-.section sci1b
-.section sci2a
-.section sci2b
-.section sci3a
-.section sci3b
-.section sci4a
-.section sci4b
-.section sci5a
-.section sci5b
-.section sci6a
-.section sci6b
-.section sci7a
-.section sci7b
-.section sci8a
-.section sci8b
-.section sci9a
-.section sci9b
-.section sci0a
-.section sci0b
-.section scjaa
-.section scjab
-.section scjba
-.section scjbb
-.section scjca
-.section scjcb
-.section scjda
-.section scjdb
-.section scjea
-.section scjeb
-.section scjfa
-.section scjfb
-.section scjga
-.section scjgb
-.section scjha
-.section scjhb
-.section scjia
-.section scjib
-.section scjja
-.section scjjb
-.section scjka
-.section scjkb
-.section scjla
-.section scjlb
-.section scjma
-.section scjmb
-.section scjna
-.section scjnb
-.section scjoa
-.section scjob
-.section scjpa
-.section scjpb
-.section scjqa
-.section scjqb
-.section scjra
-.section scjrb
-.section scjsa
-.section scjsb
-.section scjta
-.section scjtb
-.section scjua
-.section scjub
-.section scjva
-.section scjvb
-.section scjwa
-.section scjwb
-.section scjxa
-.section scjxb
-.section scjya
-.section scjyb
-.section scjza
-.section scjzb
-.section scj1a
-.section scj1b
-.section scj2a
-.section scj2b
-.section scj3a
-.section scj3b
-.section scj4a
-.section scj4b
-.section scj5a
-.section scj5b
-.section scj6a
-.section scj6b
-.section scj7a
-.section scj7b
-.section scj8a
-.section scj8b
-.section scj9a
-.section scj9b
-.section scj0a
-.section scj0b
-.section sckaa
-.section sckab
-.section sckba
-.section sckbb
-.section sckca
-.section sckcb
-.section sckda
-.section sckdb
-.section sckea
-.section sckeb
-.section sckfa
-.section sckfb
-.section sckga
-.section sckgb
-.section sckha
-.section sckhb
-.section sckia
-.section sckib
-.section sckja
-.section sckjb
-.section sckka
-.section sckkb
-.section sckla
-.section scklb
-.section sckma
-.section sckmb
-.section sckna
-.section scknb
-.section sckoa
-.section sckob
-.section sckpa
-.section sckpb
-.section sckqa
-.section sckqb
-.section sckra
-.section sckrb
-.section scksa
-.section scksb
-.section sckta
-.section scktb
-.section sckua
-.section sckub
-.section sckva
-.section sckvb
-.section sckwa
-.section sckwb
-.section sckxa
-.section sckxb
-.section sckya
-.section sckyb
-.section sckza
-.section sckzb
-.section sck1a
-.section sck1b
-.section sck2a
-.section sck2b
-.section sck3a
-.section sck3b
-.section sck4a
-.section sck4b
-.section sck5a
-.section sck5b
-.section sck6a
-.section sck6b
-.section sck7a
-.section sck7b
-.section sck8a
-.section sck8b
-.section sck9a
-.section sck9b
-.section sck0a
-.section sck0b
-.section sclaa
-.section sclab
-.section sclba
-.section sclbb
-.section sclca
-.section sclcb
-.section sclda
-.section scldb
-.section sclea
-.section scleb
-.section sclfa
-.section sclfb
-.section sclga
-.section sclgb
-.section sclha
-.section sclhb
-.section sclia
-.section sclib
-.section sclja
-.section scljb
-.section sclka
-.section sclkb
-.section sclla
-.section scllb
-.section sclma
-.section sclmb
-.section sclna
-.section sclnb
-.section scloa
-.section sclob
-.section sclpa
-.section sclpb
-.section sclqa
-.section sclqb
-.section sclra
-.section sclrb
-.section sclsa
-.section sclsb
-.section sclta
-.section scltb
-.section sclua
-.section sclub
-.section sclva
-.section sclvb
-.section sclwa
-.section sclwb
-.section sclxa
-.section sclxb
-.section sclya
-.section sclyb
-.section sclza
-.section sclzb
-.section scl1a
-.section scl1b
-.section scl2a
-.section scl2b
-.section scl3a
-.section scl3b
-.section scl4a
-.section scl4b
-.section scl5a
-.section scl5b
-.section scl6a
-.section scl6b
-.section scl7a
-.section scl7b
-.section scl8a
-.section scl8b
-.section scl9a
-.section scl9b
-.section scl0a
-.section scl0b
-.section scmaa
-.section scmab
-.section scmba
-.section scmbb
-.section scmca
-.section scmcb
-.section scmda
-.section scmdb
-.section scmea
-.section scmeb
-.section scmfa
-.section scmfb
-.section scmga
-.section scmgb
-.section scmha
-.section scmhb
-.section scmia
-.section scmib
-.section scmja
-.section scmjb
-.section scmka
-.section scmkb
-.section scmla
-.section scmlb
-.section scmma
-.section scmmb
-.section scmna
-.section scmnb
-.section scmoa
-.section scmob
-.section scmpa
-.section scmpb
-.section scmqa
-.section scmqb
-.section scmra
-.section scmrb
-.section scmsa
-.section scmsb
-.section scmta
-.section scmtb
-.section scmua
-.section scmub
-.section scmva
-.section scmvb
-.section scmwa
-.section scmwb
-.section scmxa
-.section scmxb
-.section scmya
-.section scmyb
-.section scmza
-.section scmzb
-.section scm1a
-.section scm1b
-.section scm2a
-.section scm2b
-.section scm3a
-.section scm3b
-.section scm4a
-.section scm4b
-.section scm5a
-.section scm5b
-.section scm6a
-.section scm6b
-.section scm7a
-.section scm7b
-.section scm8a
-.section scm8b
-.section scm9a
-.section scm9b
-.section scm0a
-.section scm0b
-.section scnaa
-.section scnab
-.section scnba
-.section scnbb
-.section scnca
-.section scncb
-.section scnda
-.section scndb
-.section scnea
-.section scneb
-.section scnfa
-.section scnfb
-.section scnga
-.section scngb
-.section scnha
-.section scnhb
-.section scnia
-.section scnib
-.section scnja
-.section scnjb
-.section scnka
-.section scnkb
-.section scnla
-.section scnlb
-.section scnma
-.section scnmb
-.section scnna
-.section scnnb
-.section scnoa
-.section scnob
-.section scnpa
-.section scnpb
-.section scnqa
-.section scnqb
-.section scnra
-.section scnrb
-.section scnsa
-.section scnsb
-.section scnta
-.section scntb
-.section scnua
-.section scnub
-.section scnva
-.section scnvb
-.section scnwa
-.section scnwb
-.section scnxa
-.section scnxb
-.section scnya
-.section scnyb
-.section scnza
-.section scnzb
-.section scn1a
-.section scn1b
-.section scn2a
-.section scn2b
-.section scn3a
-.section scn3b
-.section scn4a
-.section scn4b
-.section scn5a
-.section scn5b
-.section scn6a
-.section scn6b
-.section scn7a
-.section scn7b
-.section scn8a
-.section scn8b
-.section scn9a
-.section scn9b
-.section scn0a
-.section scn0b
-.section scoaa
-.section scoab
-.section scoba
-.section scobb
-.section scoca
-.section scocb
-.section scoda
-.section scodb
-.section scoea
-.section scoeb
-.section scofa
-.section scofb
-.section scoga
-.section scogb
-.section scoha
-.section scohb
-.section scoia
-.section scoib
-.section scoja
-.section scojb
-.section scoka
-.section scokb
-.section scola
-.section scolb
-.section scoma
-.section scomb
-.section scona
-.section sconb
-.section scooa
-.section scoob
-.section scopa
-.section scopb
-.section scoqa
-.section scoqb
-.section scora
-.section scorb
-.section scosa
-.section scosb
-.section scota
-.section scotb
-.section scoua
-.section scoub
-.section scova
-.section scovb
-.section scowa
-.section scowb
-.section scoxa
-.section scoxb
-.section scoya
-.section scoyb
-.section scoza
-.section scozb
-.section sco1a
-.section sco1b
-.section sco2a
-.section sco2b
-.section sco3a
-.section sco3b
-.section sco4a
-.section sco4b
-.section sco5a
-.section sco5b
-.section sco6a
-.section sco6b
-.section sco7a
-.section sco7b
-.section sco8a
-.section sco8b
-.section sco9a
-.section sco9b
-.section sco0a
-.section sco0b
-.section scpaa
-.section scpab
-.section scpba
-.section scpbb
-.section scpca
-.section scpcb
-.section scpda
-.section scpdb
-.section scpea
-.section scpeb
-.section scpfa
-.section scpfb
-.section scpga
-.section scpgb
-.section scpha
-.section scphb
-.section scpia
-.section scpib
-.section scpja
-.section scpjb
-.section scpka
-.section scpkb
-.section scpla
-.section scplb
-.section scpma
-.section scpmb
-.section scpna
-.section scpnb
-.section scpoa
-.section scpob
-.section scppa
-.section scppb
-.section scpqa
-.section scpqb
-.section scpra
-.section scprb
-.section scpsa
-.section scpsb
-.section scpta
-.section scptb
-.section scpua
-.section scpub
-.section scpva
-.section scpvb
-.section scpwa
-.section scpwb
-.section scpxa
-.section scpxb
-.section scpya
-.section scpyb
-.section scpza
-.section scpzb
-.section scp1a
-.section scp1b
-.section scp2a
-.section scp2b
-.section scp3a
-.section scp3b
-.section scp4a
-.section scp4b
-.section scp5a
-.section scp5b
-.section scp6a
-.section scp6b
-.section scp7a
-.section scp7b
-.section scp8a
-.section scp8b
-.section scp9a
-.section scp9b
-.section scp0a
-.section scp0b
-.section scqaa
-.section scqab
-.section scqba
-.section scqbb
-.section scqca
-.section scqcb
-.section scqda
-.section scqdb
-.section scqea
-.section scqeb
-.section scqfa
-.section scqfb
-.section scqga
-.section scqgb
-.section scqha
-.section scqhb
-.section scqia
-.section scqib
-.section scqja
-.section scqjb
-.section scqka
-.section scqkb
-.section scqla
-.section scqlb
-.section scqma
-.section scqmb
-.section scqna
-.section scqnb
-.section scqoa
-.section scqob
-.section scqpa
-.section scqpb
-.section scqqa
-.section scqqb
-.section scqra
-.section scqrb
-.section scqsa
-.section scqsb
-.section scqta
-.section scqtb
-.section scqua
-.section scqub
-.section scqva
-.section scqvb
-.section scqwa
-.section scqwb
-.section scqxa
-.section scqxb
-.section scqya
-.section scqyb
-.section scqza
-.section scqzb
-.section scq1a
-.section scq1b
-.section scq2a
-.section scq2b
-.section scq3a
-.section scq3b
-.section scq4a
-.section scq4b
-.section scq5a
-.section scq5b
-.section scq6a
-.section scq6b
-.section scq7a
-.section scq7b
-.section scq8a
-.section scq8b
-.section scq9a
-.section scq9b
-.section scq0a
-.section scq0b
-.section scraa
-.section scrab
-.section scrba
-.section scrbb
-.section scrca
-.section scrcb
-.section scrda
-.section scrdb
-.section screa
-.section screb
-.section scrfa
-.section scrfb
-.section scrga
-.section scrgb
-.section scrha
-.section scrhb
-.section scria
-.section scrib
-.section scrja
-.section scrjb
-.section scrka
-.section scrkb
-.section scrla
-.section scrlb
-.section scrma
-.section scrmb
-.section scrna
-.section scrnb
-.section scroa
-.section scrob
-.section scrpa
-.section scrpb
-.section scrqa
-.section scrqb
-.section scrra
-.section scrrb
-.section scrsa
-.section scrsb
-.section scrta
-.section scrtb
-.section scrua
-.section scrub
-.section scrva
-.section scrvb
-.section scrwa
-.section scrwb
-.section scrxa
-.section scrxb
-.section scrya
-.section scryb
-.section scrza
-.section scrzb
-.section scr1a
-.section scr1b
-.section scr2a
-.section scr2b
-.section scr3a
-.section scr3b
-.section scr4a
-.section scr4b
-.section scr5a
-.section scr5b
-.section scr6a
-.section scr6b
-.section scr7a
-.section scr7b
-.section scr8a
-.section scr8b
-.section scr9a
-.section scr9b
-.section scr0a
-.section scr0b
-.section scsaa
-.section scsab
-.section scsba
-.section scsbb
-.section scsca
-.section scscb
-.section scsda
-.section scsdb
-.section scsea
-.section scseb
-.section scsfa
-.section scsfb
-.section scsga
-.section scsgb
-.section scsha
-.section scshb
-.section scsia
-.section scsib
-.section scsja
-.section scsjb
-.section scska
-.section scskb
-.section scsla
-.section scslb
-.section scsma
-.section scsmb
-.section scsna
-.section scsnb
-.section scsoa
-.section scsob
-.section scspa
-.section scspb
-.section scsqa
-.section scsqb
-.section scsra
-.section scsrb
-.section scssa
-.section scssb
-.section scsta
-.section scstb
-.section scsua
-.section scsub
-.section scsva
-.section scsvb
-.section scswa
-.section scswb
-.section scsxa
-.section scsxb
-.section scsya
-.section scsyb
-.section scsza
-.section scszb
-.section scs1a
-.section scs1b
-.section scs2a
-.section scs2b
-.section scs3a
-.section scs3b
-.section scs4a
-.section scs4b
-.section scs5a
-.section scs5b
-.section scs6a
-.section scs6b
-.section scs7a
-.section scs7b
-.section scs8a
-.section scs8b
-.section scs9a
-.section scs9b
-.section scs0a
-.section scs0b
-.section sctaa
-.section sctab
-.section sctba
-.section sctbb
-.section sctca
-.section sctcb
-.section sctda
-.section sctdb
-.section sctea
-.section scteb
-.section sctfa
-.section sctfb
-.section sctga
-.section sctgb
-.section sctha
-.section scthb
-.section sctia
-.section sctib
-.section sctja
-.section sctjb
-.section sctka
-.section sctkb
-.section sctla
-.section sctlb
-.section sctma
-.section sctmb
-.section sctna
-.section sctnb
-.section sctoa
-.section sctob
-.section sctpa
-.section sctpb
-.section sctqa
-.section sctqb
-.section sctra
-.section sctrb
-.section sctsa
-.section sctsb
-.section sctta
-.section scttb
-.section sctua
-.section sctub
-.section sctva
-.section sctvb
-.section sctwa
-.section sctwb
-.section sctxa
-.section sctxb
-.section sctya
-.section sctyb
-.section sctza
-.section sctzb
-.section sct1a
-.section sct1b
-.section sct2a
-.section sct2b
-.section sct3a
-.section sct3b
-.section sct4a
-.section sct4b
-.section sct5a
-.section sct5b
-.section sct6a
-.section sct6b
-.section sct7a
-.section sct7b
-.section sct8a
-.section sct8b
-.section sct9a
-.section sct9b
-.section sct0a
-.section sct0b
-.section scuaa
-.section scuab
-.section scuba
-.section scubb
-.section scuca
-.section scucb
-.section scuda
-.section scudb
-.section scuea
-.section scueb
-.section scufa
-.section scufb
-.section scuga
-.section scugb
-.section scuha
-.section scuhb
-.section scuia
-.section scuib
-.section scuja
-.section scujb
-.section scuka
-.section scukb
-.section scula
-.section sculb
-.section scuma
-.section scumb
-.section scuna
-.section scunb
-.section scuoa
-.section scuob
-.section scupa
-.section scupb
-.section scuqa
-.section scuqb
-.section scura
-.section scurb
-.section scusa
-.section scusb
-.section scuta
-.section scutb
-.section scuua
-.section scuub
-.section scuva
-.section scuvb
-.section scuwa
-.section scuwb
-.section scuxa
-.section scuxb
-.section scuya
-.section scuyb
-.section scuza
-.section scuzb
-.section scu1a
-.section scu1b
-.section scu2a
-.section scu2b
-.section scu3a
-.section scu3b
-.section scu4a
-.section scu4b
-.section scu5a
-.section scu5b
-.section scu6a
-.section scu6b
-.section scu7a
-.section scu7b
-.section scu8a
-.section scu8b
-.section scu9a
-.section scu9b
-.section scu0a
-.section scu0b
-.section scvaa
-.section scvab
-.section scvba
-.section scvbb
-.section scvca
-.section scvcb
-.section scvda
-.section scvdb
-.section scvea
-.section scveb
-.section scvfa
-.section scvfb
-.section scvga
-.section scvgb
-.section scvha
-.section scvhb
-.section scvia
-.section scvib
-.section scvja
-.section scvjb
-.section scvka
-.section scvkb
-.section scvla
-.section scvlb
-.section scvma
-.section scvmb
-.section scvna
-.section scvnb
-.section scvoa
-.section scvob
-.section scvpa
-.section scvpb
-.section scvqa
-.section scvqb
-.section scvra
-.section scvrb
-.section scvsa
-.section scvsb
-.section scvta
-.section scvtb
-.section scvua
-.section scvub
-.section scvva
-.section scvvb
-.section scvwa
-.section scvwb
-.section scvxa
-.section scvxb
-.section scvya
-.section scvyb
-.section scvza
-.section scvzb
-.section scv1a
-.section scv1b
-.section scv2a
-.section scv2b
-.section scv3a
-.section scv3b
-.section scv4a
-.section scv4b
-.section scv5a
-.section scv5b
-.section scv6a
-.section scv6b
-.section scv7a
-.section scv7b
-.section scv8a
-.section scv8b
-.section scv9a
-.section scv9b
-.section scv0a
-.section scv0b
-.section scwaa
-.section scwab
-.section scwba
-.section scwbb
-.section scwca
-.section scwcb
-.section scwda
-.section scwdb
-.section scwea
-.section scweb
-.section scwfa
-.section scwfb
-.section scwga
-.section scwgb
-.section scwha
-.section scwhb
-.section scwia
-.section scwib
-.section scwja
-.section scwjb
-.section scwka
-.section scwkb
-.section scwla
-.section scwlb
-.section scwma
-.section scwmb
-.section scwna
-.section scwnb
-.section scwoa
-.section scwob
-.section scwpa
-.section scwpb
-.section scwqa
-.section scwqb
-.section scwra
-.section scwrb
-.section scwsa
-.section scwsb
-.section scwta
-.section scwtb
-.section scwua
-.section scwub
-.section scwva
-.section scwvb
-.section scwwa
-.section scwwb
-.section scwxa
-.section scwxb
-.section scwya
-.section scwyb
-.section scwza
-.section scwzb
-.section scw1a
-.section scw1b
-.section scw2a
-.section scw2b
-.section scw3a
-.section scw3b
-.section scw4a
-.section scw4b
-.section scw5a
-.section scw5b
-.section scw6a
-.section scw6b
-.section scw7a
-.section scw7b
-.section scw8a
-.section scw8b
-.section scw9a
-.section scw9b
-.section scw0a
-.section scw0b
-.section scxaa
-.section scxab
-.section scxba
-.section scxbb
-.section scxca
-.section scxcb
-.section scxda
-.section scxdb
-.section scxea
-.section scxeb
-.section scxfa
-.section scxfb
-.section scxga
-.section scxgb
-.section scxha
-.section scxhb
-.section scxia
-.section scxib
-.section scxja
-.section scxjb
-.section scxka
-.section scxkb
-.section scxla
-.section scxlb
-.section scxma
-.section scxmb
-.section scxna
-.section scxnb
-.section scxoa
-.section scxob
-.section scxpa
-.section scxpb
-.section scxqa
-.section scxqb
-.section scxra
-.section scxrb
-.section scxsa
-.section scxsb
-.section scxta
-.section scxtb
-.section scxua
-.section scxub
-.section scxva
-.section scxvb
-.section scxwa
-.section scxwb
-.section scxxa
-.section scxxb
-.section scxya
-.section scxyb
-.section scxza
-.section scxzb
-.section scx1a
-.section scx1b
-.section scx2a
-.section scx2b
-.section scx3a
-.section scx3b
-.section scx4a
-.section scx4b
-.section scx5a
-.section scx5b
-.section scx6a
-.section scx6b
-.section scx7a
-.section scx7b
-.section scx8a
-.section scx8b
-.section scx9a
-.section scx9b
-.section scx0a
-.section scx0b
-.section scyaa
-.section scyab
-.section scyba
-.section scybb
-.section scyca
-.section scycb
-.section scyda
-.section scydb
-.section scyea
-.section scyeb
-.section scyfa
-.section scyfb
-.section scyga
-.section scygb
-.section scyha
-.section scyhb
-.section scyia
-.section scyib
-.section scyja
-.section scyjb
-.section scyka
-.section scykb
-.section scyla
-.section scylb
-.section scyma
-.section scymb
-.section scyna
-.section scynb
-.section scyoa
-.section scyob
-.section scypa
-.section scypb
-.section scyqa
-.section scyqb
-.section scyra
-.section scyrb
-.section scysa
-.section scysb
-.section scyta
-.section scytb
-.section scyua
-.section scyub
-.section scyva
-.section scyvb
-.section scywa
-.section scywb
-.section scyxa
-.section scyxb
-.section scyya
-.section scyyb
-.section scyza
-.section scyzb
-.section scy1a
-.section scy1b
-.section scy2a
-.section scy2b
-.section scy3a
-.section scy3b
-.section scy4a
-.section scy4b
-.section scy5a
-.section scy5b
-.section scy6a
-.section scy6b
-.section scy7a
-.section scy7b
-.section scy8a
-.section scy8b
-.section scy9a
-.section scy9b
-.section scy0a
-.section scy0b
-.section sczaa
-.section sczab
-.section sczba
-.section sczbb
-.section sczca
-.section sczcb
-.section sczda
-.section sczdb
-.section sczea
-.section sczeb
-.section sczfa
-.section sczfb
-.section sczga
-.section sczgb
-.section sczha
-.section sczhb
-.section sczia
-.section sczib
-.section sczja
-.section sczjb
-.section sczka
-.section sczkb
-.section sczla
-.section sczlb
-.section sczma
-.section sczmb
-.section sczna
-.section scznb
-.section sczoa
-.section sczob
-.section sczpa
-.section sczpb
-.section sczqa
-.section sczqb
-.section sczra
-.section sczrb
-.section sczsa
-.section sczsb
-.section sczta
-.section scztb
-.section sczua
-.section sczub
-.section sczva
-.section sczvb
-.section sczwa
-.section sczwb
-.section sczxa
-.section sczxb
-.section sczya
-.section sczyb
-.section sczza
-.section sczzb
-.section scz1a
-.section scz1b
-.section scz2a
-.section scz2b
-.section scz3a
-.section scz3b
-.section scz4a
-.section scz4b
-.section scz5a
-.section scz5b
-.section scz6a
-.section scz6b
-.section scz7a
-.section scz7b
-.section scz8a
-.section scz8b
-.section scz9a
-.section scz9b
-.section scz0a
-.section scz0b
-.section sc1aa
-.section sc1ab
-.section sc1ba
-.section sc1bb
-.section sc1ca
-.section sc1cb
-.section sc1da
-.section sc1db
-.section sc1ea
-.section sc1eb
-.section sc1fa
-.section sc1fb
-.section sc1ga
-.section sc1gb
-.section sc1ha
-.section sc1hb
-.section sc1ia
-.section sc1ib
-.section sc1ja
-.section sc1jb
-.section sc1ka
-.section sc1kb
-.section sc1la
-.section sc1lb
-.section sc1ma
-.section sc1mb
-.section sc1na
-.section sc1nb
-.section sc1oa
-.section sc1ob
-.section sc1pa
-.section sc1pb
-.section sc1qa
-.section sc1qb
-.section sc1ra
-.section sc1rb
-.section sc1sa
-.section sc1sb
-.section sc1ta
-.section sc1tb
-.section sc1ua
-.section sc1ub
-.section sc1va
-.section sc1vb
-.section sc1wa
-.section sc1wb
-.section sc1xa
-.section sc1xb
-.section sc1ya
-.section sc1yb
-.section sc1za
-.section sc1zb
-.section sc11a
-.section sc11b
-.section sc12a
-.section sc12b
-.section sc13a
-.section sc13b
-.section sc14a
-.section sc14b
-.section sc15a
-.section sc15b
-.section sc16a
-.section sc16b
-.section sc17a
-.section sc17b
-.section sc18a
-.section sc18b
-.section sc19a
-.section sc19b
-.section sc10a
-.section sc10b
-.section sc2aa
-.section sc2ab
-.section sc2ba
-.section sc2bb
-.section sc2ca
-.section sc2cb
-.section sc2da
-.section sc2db
-.section sc2ea
-.section sc2eb
-.section sc2fa
-.section sc2fb
-.section sc2ga
-.section sc2gb
-.section sc2ha
-.section sc2hb
-.section sc2ia
-.section sc2ib
-.section sc2ja
-.section sc2jb
-.section sc2ka
-.section sc2kb
-.section sc2la
-.section sc2lb
-.section sc2ma
-.section sc2mb
-.section sc2na
-.section sc2nb
-.section sc2oa
-.section sc2ob
-.section sc2pa
-.section sc2pb
-.section sc2qa
-.section sc2qb
-.section sc2ra
-.section sc2rb
-.section sc2sa
-.section sc2sb
-.section sc2ta
-.section sc2tb
-.section sc2ua
-.section sc2ub
-.section sc2va
-.section sc2vb
-.section sc2wa
-.section sc2wb
-.section sc2xa
-.section sc2xb
-.section sc2ya
-.section sc2yb
-.section sc2za
-.section sc2zb
-.section sc21a
-.section sc21b
-.section sc22a
-.section sc22b
-.section sc23a
-.section sc23b
-.section sc24a
-.section sc24b
-.section sc25a
-.section sc25b
-.section sc26a
-.section sc26b
-.section sc27a
-.section sc27b
-.section sc28a
-.section sc28b
-.section sc29a
-.section sc29b
-.section sc20a
-.section sc20b
-.section sc3aa
-.section sc3ab
-.section sc3ba
-.section sc3bb
-.section sc3ca
-.section sc3cb
-.section sc3da
-.section sc3db
-.section sc3ea
-.section sc3eb
-.section sc3fa
-.section sc3fb
-.section sc3ga
-.section sc3gb
-.section sc3ha
-.section sc3hb
-.section sc3ia
-.section sc3ib
-.section sc3ja
-.section sc3jb
-.section sc3ka
-.section sc3kb
-.section sc3la
-.section sc3lb
-.section sc3ma
-.section sc3mb
-.section sc3na
-.section sc3nb
-.section sc3oa
-.section sc3ob
-.section sc3pa
-.section sc3pb
-.section sc3qa
-.section sc3qb
-.section sc3ra
-.section sc3rb
-.section sc3sa
-.section sc3sb
-.section sc3ta
-.section sc3tb
-.section sc3ua
-.section sc3ub
-.section sc3va
-.section sc3vb
-.section sc3wa
-.section sc3wb
-.section sc3xa
-.section sc3xb
-.section sc3ya
-.section sc3yb
-.section sc3za
-.section sc3zb
-.section sc31a
-.section sc31b
-.section sc32a
-.section sc32b
-.section sc33a
-.section sc33b
-.section sc34a
-.section sc34b
-.section sc35a
-.section sc35b
-.section sc36a
-.section sc36b
-.section sc37a
-.section sc37b
-.section sc38a
-.section sc38b
-.section sc39a
-.section sc39b
-.section sc30a
-.section sc30b
-.section sc4aa
-.section sc4ab
-.section sc4ba
-.section sc4bb
-.section sc4ca
-.section sc4cb
-.section sc4da
-.section sc4db
-.section sc4ea
-.section sc4eb
-.section sc4fa
-.section sc4fb
-.section sc4ga
-.section sc4gb
-.section sc4ha
-.section sc4hb
-.section sc4ia
-.section sc4ib
-.section sc4ja
-.section sc4jb
-.section sc4ka
-.section sc4kb
-.section sc4la
-.section sc4lb
-.section sc4ma
-.section sc4mb
-.section sc4na
-.section sc4nb
-.section sc4oa
-.section sc4ob
-.section sc4pa
-.section sc4pb
-.section sc4qa
-.section sc4qb
-.section sc4ra
-.section sc4rb
-.section sc4sa
-.section sc4sb
-.section sc4ta
-.section sc4tb
-.section sc4ua
-.section sc4ub
-.section sc4va
-.section sc4vb
-.section sc4wa
-.section sc4wb
-.section sc4xa
-.section sc4xb
-.section sc4ya
-.section sc4yb
-.section sc4za
-.section sc4zb
-.section sc41a
-.section sc41b
-.section sc42a
-.section sc42b
-.section sc43a
-.section sc43b
-.section sc44a
-.section sc44b
-.section sc45a
-.section sc45b
-.section sc46a
-.section sc46b
-.section sc47a
-.section sc47b
-.section sc48a
-.section sc48b
-.section sc49a
-.section sc49b
-.section sc40a
-.section sc40b
-.section sc5aa
-.section sc5ab
-.section sc5ba
-.section sc5bb
-.section sc5ca
-.section sc5cb
-.section sc5da
-.section sc5db
-.section sc5ea
-.section sc5eb
-.section sc5fa
-.section sc5fb
-.section sc5ga
-.section sc5gb
-.section sc5ha
-.section sc5hb
-.section sc5ia
-.section sc5ib
-.section sc5ja
-.section sc5jb
-.section sc5ka
-.section sc5kb
-.section sc5la
-.section sc5lb
-.section sc5ma
-.section sc5mb
-.section sc5na
-.section sc5nb
-.section sc5oa
-.section sc5ob
-.section sc5pa
-.section sc5pb
-.section sc5qa
-.section sc5qb
-.section sc5ra
-.section sc5rb
-.section sc5sa
-.section sc5sb
-.section sc5ta
-.section sc5tb
-.section sc5ua
-.section sc5ub
-.section sc5va
-.section sc5vb
-.section sc5wa
-.section sc5wb
-.section sc5xa
-.section sc5xb
-.section sc5ya
-.section sc5yb
-.section sc5za
-.section sc5zb
-.section sc51a
-.section sc51b
-.section sc52a
-.section sc52b
-.section sc53a
-.section sc53b
-.section sc54a
-.section sc54b
-.section sc55a
-.section sc55b
-.section sc56a
-.section sc56b
-.section sc57a
-.section sc57b
-.section sc58a
-.section sc58b
-.section sc59a
-.section sc59b
-.section sc50a
-.section sc50b
-.section sc6aa
-.section sc6ab
-.section sc6ba
-.section sc6bb
-.section sc6ca
-.section sc6cb
-.section sc6da
-.section sc6db
-.section sc6ea
-.section sc6eb
-.section sc6fa
-.section sc6fb
-.section sc6ga
-.section sc6gb
-.section sc6ha
-.section sc6hb
-.section sc6ia
-.section sc6ib
-.section sc6ja
-.section sc6jb
-.section sc6ka
-.section sc6kb
-.section sc6la
-.section sc6lb
-.section sc6ma
-.section sc6mb
-.section sc6na
-.section sc6nb
-.section sc6oa
-.section sc6ob
-.section sc6pa
-.section sc6pb
-.section sc6qa
-.section sc6qb
-.section sc6ra
-.section sc6rb
-.section sc6sa
-.section sc6sb
-.section sc6ta
-.section sc6tb
-.section sc6ua
-.section sc6ub
-.section sc6va
-.section sc6vb
-.section sc6wa
-.section sc6wb
-.section sc6xa
-.section sc6xb
-.section sc6ya
-.section sc6yb
-.section sc6za
-.section sc6zb
-.section sc61a
-.section sc61b
-.section sc62a
-.section sc62b
-.section sc63a
-.section sc63b
-.section sc64a
-.section sc64b
-.section sc65a
-.section sc65b
-.section sc66a
-.section sc66b
-.section sc67a
-.section sc67b
-.section sc68a
-.section sc68b
-.section sc69a
-.section sc69b
-.section sc60a
-.section sc60b
-.section sc7aa
-.section sc7ab
-.section sc7ba
-.section sc7bb
-.section sc7ca
-.section sc7cb
-.section sc7da
-.section sc7db
-.section sc7ea
-.section sc7eb
-.section sc7fa
-.section sc7fb
-.section sc7ga
-.section sc7gb
-.section sc7ha
-.section sc7hb
-.section sc7ia
-.section sc7ib
-.section sc7ja
-.section sc7jb
-.section sc7ka
-.section sc7kb
-.section sc7la
-.section sc7lb
-.section sc7ma
-.section sc7mb
-.section sc7na
-.section sc7nb
-.section sc7oa
-.section sc7ob
-.section sc7pa
-.section sc7pb
-.section sc7qa
-.section sc7qb
-.section sc7ra
-.section sc7rb
-.section sc7sa
-.section sc7sb
-.section sc7ta
-.section sc7tb
-.section sc7ua
-.section sc7ub
-.section sc7va
-.section sc7vb
-.section sc7wa
-.section sc7wb
-.section sc7xa
-.section sc7xb
-.section sc7ya
-.section sc7yb
-.section sc7za
-.section sc7zb
-.section sc71a
-.section sc71b
-.section sc72a
-.section sc72b
-.section sc73a
-.section sc73b
-.section sc74a
-.section sc74b
-.section sc75a
-.section sc75b
-.section sc76a
-.section sc76b
-.section sc77a
-.section sc77b
-.section sc78a
-.section sc78b
-.section sc79a
-.section sc79b
-.section sc70a
-.section sc70b
-.section sc8aa
-.section sc8ab
-.section sc8ba
-.section sc8bb
-.section sc8ca
-.section sc8cb
-.section sc8da
-.section sc8db
-.section sc8ea
-.section sc8eb
-.section sc8fa
-.section sc8fb
-.section sc8ga
-.section sc8gb
-.section sc8ha
-.section sc8hb
-.section sc8ia
-.section sc8ib
-.section sc8ja
-.section sc8jb
-.section sc8ka
-.section sc8kb
-.section sc8la
-.section sc8lb
-.section sc8ma
-.section sc8mb
-.section sc8na
-.section sc8nb
-.section sc8oa
-.section sc8ob
-.section sc8pa
-.section sc8pb
-.section sc8qa
-.section sc8qb
-.section sc8ra
-.section sc8rb
-.section sc8sa
-.section sc8sb
-.section sc8ta
-.section sc8tb
-.section sc8ua
-.section sc8ub
-.section sc8va
-.section sc8vb
-.section sc8wa
-.section sc8wb
-.section sc8xa
-.section sc8xb
-.section sc8ya
-.section sc8yb
-.section sc8za
-.section sc8zb
-.section sc81a
-.section sc81b
-.section sc82a
-.section sc82b
-.section sc83a
-.section sc83b
-.section sc84a
-.section sc84b
-.section sc85a
-.section sc85b
-.section sc86a
-.section sc86b
-.section sc87a
-.section sc87b
-.section sc88a
-.section sc88b
-.section sc89a
-.section sc89b
-.section sc80a
-.section sc80b
-.section sc9aa
-.section sc9ab
-.section sc9ba
-.section sc9bb
-.section sc9ca
-.section sc9cb
-.section sc9da
-.section sc9db
-.section sc9ea
-.section sc9eb
-.section sc9fa
-.section sc9fb
-.section sc9ga
-.section sc9gb
-.section sc9ha
-.section sc9hb
-.section sc9ia
-.section sc9ib
-.section sc9ja
-.section sc9jb
-.section sc9ka
-.section sc9kb
-.section sc9la
-.section sc9lb
-.section sc9ma
-.section sc9mb
-.section sc9na
-.section sc9nb
-.section sc9oa
-.section sc9ob
-.section sc9pa
-.section sc9pb
-.section sc9qa
-.section sc9qb
-.section sc9ra
-.section sc9rb
-.section sc9sa
-.section sc9sb
-.section sc9ta
-.section sc9tb
-.section sc9ua
-.section sc9ub
-.section sc9va
-.section sc9vb
-.section sc9wa
-.section sc9wb
-.section sc9xa
-.section sc9xb
-.section sc9ya
-.section sc9yb
-.section sc9za
-.section sc9zb
-.section sc91a
-.section sc91b
-.section sc92a
-.section sc92b
-.section sc93a
-.section sc93b
-.section sc94a
-.section sc94b
-.section sc95a
-.section sc95b
-.section sc96a
-.section sc96b
-.section sc97a
-.section sc97b
-.section sc98a
-.section sc98b
-.section sc99a
-.section sc99b
-.section sc90a
-.section sc90b
-.section sc0aa
-.section sc0ab
-.section sc0ba
-.section sc0bb
-.section sc0ca
-.section sc0cb
-.section sc0da
-.section sc0db
-.section sc0ea
-.section sc0eb
-.section sc0fa
-.section sc0fb
-.section sc0ga
-.section sc0gb
-.section sc0ha
-.section sc0hb
-.section sc0ia
-.section sc0ib
-.section sc0ja
-.section sc0jb
-.section sc0ka
-.section sc0kb
-.section sc0la
-.section sc0lb
-.section sc0ma
-.section sc0mb
-.section sc0na
-.section sc0nb
-.section sc0oa
-.section sc0ob
-.section sc0pa
-.section sc0pb
-.section sc0qa
-.section sc0qb
-.section sc0ra
-.section sc0rb
-.section sc0sa
-.section sc0sb
-.section sc0ta
-.section sc0tb
-.section sc0ua
-.section sc0ub
-.section sc0va
-.section sc0vb
-.section sc0wa
-.section sc0wb
-.section sc0xa
-.section sc0xb
-.section sc0ya
-.section sc0yb
-.section sc0za
-.section sc0zb
-.section sc01a
-.section sc01b
-.section sc02a
-.section sc02b
-.section sc03a
-.section sc03b
-.section sc04a
-.section sc04b
-.section sc05a
-.section sc05b
-.section sc06a
-.section sc06b
-.section sc07a
-.section sc07b
-.section sc08a
-.section sc08b
-.section sc09a
-.section sc09b
-.section sc00a
-.section sc00b
-.section sdaaa
-.section sdaab
-.section sdaba
-.section sdabb
-.section sdaca
-.section sdacb
-.section sdada
-.section sdadb
-.section sdaea
-.section sdaeb
-.section sdafa
-.section sdafb
-.section sdaga
-.section sdagb
-.section sdaha
-.section sdahb
-.section sdaia
-.section sdaib
-.section sdaja
-.section sdajb
-.section sdaka
-.section sdakb
-.section sdala
-.section sdalb
-.section sdama
-.section sdamb
-.section sdana
-.section sdanb
-.section sdaoa
-.section sdaob
-.section sdapa
-.section sdapb
-.section sdaqa
-.section sdaqb
-.section sdara
-.section sdarb
-.section sdasa
-.section sdasb
-.section sdata
-.section sdatb
-.section sdaua
-.section sdaub
-.section sdava
-.section sdavb
-.section sdawa
-.section sdawb
-.section sdaxa
-.section sdaxb
-.section sdaya
-.section sdayb
-.section sdaza
-.section sdazb
-.section sda1a
-.section sda1b
-.section sda2a
-.section sda2b
-.section sda3a
-.section sda3b
-.section sda4a
-.section sda4b
-.section sda5a
-.section sda5b
-.section sda6a
-.section sda6b
-.section sda7a
-.section sda7b
-.section sda8a
-.section sda8b
-.section sda9a
-.section sda9b
-.section sda0a
-.section sda0b
-.section sdbaa
-.section sdbab
-.section sdbba
-.section sdbbb
-.section sdbca
-.section sdbcb
-.section sdbda
-.section sdbdb
-.section sdbea
-.section sdbeb
-.section sdbfa
-.section sdbfb
-.section sdbga
-.section sdbgb
-.section sdbha
-.section sdbhb
-.section sdbia
-.section sdbib
-.section sdbja
-.section sdbjb
-.section sdbka
-.section sdbkb
-.section sdbla
-.section sdblb
-.section sdbma
-.section sdbmb
-.section sdbna
-.section sdbnb
-.section sdboa
-.section sdbob
-.section sdbpa
-.section sdbpb
-.section sdbqa
-.section sdbqb
-.section sdbra
-.section sdbrb
-.section sdbsa
-.section sdbsb
-.section sdbta
-.section sdbtb
-.section sdbua
-.section sdbub
-.section sdbva
-.section sdbvb
-.section sdbwa
-.section sdbwb
-.section sdbxa
-.section sdbxb
-.section sdbya
-.section sdbyb
-.section sdbza
-.section sdbzb
-.section sdb1a
-.section sdb1b
-.section sdb2a
-.section sdb2b
-.section sdb3a
-.section sdb3b
-.section sdb4a
-.section sdb4b
-.section sdb5a
-.section sdb5b
-.section sdb6a
-.section sdb6b
-.section sdb7a
-.section sdb7b
-.section sdb8a
-.section sdb8b
-.section sdb9a
-.section sdb9b
-.section sdb0a
-.section sdb0b
-.section sdcaa
-.section sdcab
-.section sdcba
-.section sdcbb
-.section sdcca
-.section sdccb
-.section sdcda
-.section sdcdb
-.section sdcea
-.section sdceb
-.section sdcfa
-.section sdcfb
-.section sdcga
-.section sdcgb
-.section sdcha
-.section sdchb
-.section sdcia
-.section sdcib
-.section sdcja
-.section sdcjb
-.section sdcka
-.section sdckb
-.section sdcla
-.section sdclb
-.section sdcma
-.section sdcmb
-.section sdcna
-.section sdcnb
-.section sdcoa
-.section sdcob
-.section sdcpa
-.section sdcpb
-.section sdcqa
-.section sdcqb
-.section sdcra
-.section sdcrb
-.section sdcsa
-.section sdcsb
-.section sdcta
-.section sdctb
-.section sdcua
-.section sdcub
-.section sdcva
-.section sdcvb
-.section sdcwa
-.section sdcwb
-.section sdcxa
-.section sdcxb
-.section sdcya
-.section sdcyb
-.section sdcza
-.section sdczb
-.section sdc1a
-.section sdc1b
-.section sdc2a
-.section sdc2b
-.section sdc3a
-.section sdc3b
-.section sdc4a
-.section sdc4b
-.section sdc5a
-.section sdc5b
-.section sdc6a
-.section sdc6b
-.section sdc7a
-.section sdc7b
-.section sdc8a
-.section sdc8b
-.section sdc9a
-.section sdc9b
-.section sdc0a
-.section sdc0b
-.section sddaa
-.section sddab
-.section sddba
-.section sddbb
-.section sddca
-.section sddcb
-.section sddda
-.section sdddb
-.section sddea
-.section sddeb
-.section sddfa
-.section sddfb
-.section sddga
-.section sddgb
-.section sddha
-.section sddhb
-.section sddia
-.section sddib
-.section sddja
-.section sddjb
-.section sddka
-.section sddkb
-.section sddla
-.section sddlb
-.section sddma
-.section sddmb
-.section sddna
-.section sddnb
-.section sddoa
-.section sddob
-.section sddpa
-.section sddpb
-.section sddqa
-.section sddqb
-.section sddra
-.section sddrb
-.section sddsa
-.section sddsb
-.section sddta
-.section sddtb
-.section sddua
-.section sddub
-.section sddva
-.section sddvb
-.section sddwa
-.section sddwb
-.section sddxa
-.section sddxb
-.section sddya
-.section sddyb
-.section sddza
-.section sddzb
-.section sdd1a
-.section sdd1b
-.section sdd2a
-.section sdd2b
-.section sdd3a
-.section sdd3b
-.section sdd4a
-.section sdd4b
-.section sdd5a
-.section sdd5b
-.section sdd6a
-.section sdd6b
-.section sdd7a
-.section sdd7b
-.section sdd8a
-.section sdd8b
-.section sdd9a
-.section sdd9b
-.section sdd0a
-.section sdd0b
-.section sdeaa
-.section sdeab
-.section sdeba
-.section sdebb
-.section sdeca
-.section sdecb
-.section sdeda
-.section sdedb
-.section sdeea
-.section sdeeb
-.section sdefa
-.section sdefb
-.section sdega
-.section sdegb
-.section sdeha
-.section sdehb
-.section sdeia
-.section sdeib
-.section sdeja
-.section sdejb
-.section sdeka
-.section sdekb
-.section sdela
-.section sdelb
-.section sdema
-.section sdemb
-.section sdena
-.section sdenb
-.section sdeoa
-.section sdeob
-.section sdepa
-.section sdepb
-.section sdeqa
-.section sdeqb
-.section sdera
-.section sderb
-.section sdesa
-.section sdesb
-.section sdeta
-.section sdetb
-.section sdeua
-.section sdeub
-.section sdeva
-.section sdevb
-.section sdewa
-.section sdewb
-.section sdexa
-.section sdexb
-.section sdeya
-.section sdeyb
-.section sdeza
-.section sdezb
-.section sde1a
-.section sde1b
-.section sde2a
-.section sde2b
-.section sde3a
-.section sde3b
-.section sde4a
-.section sde4b
-.section sde5a
-.section sde5b
-.section sde6a
-.section sde6b
-.section sde7a
-.section sde7b
-.section sde8a
-.section sde8b
-.section sde9a
-.section sde9b
-.section sde0a
-.section sde0b
-.section sdfaa
-.section sdfab
-.section sdfba
-.section sdfbb
-.section sdfca
-.section sdfcb
-.section sdfda
-.section sdfdb
-.section sdfea
-.section sdfeb
-.section sdffa
-.section sdffb
-.section sdfga
-.section sdfgb
-.section sdfha
-.section sdfhb
-.section sdfia
-.section sdfib
-.section sdfja
-.section sdfjb
-.section sdfka
-.section sdfkb
-.section sdfla
-.section sdflb
-.section sdfma
-.section sdfmb
-.section sdfna
-.section sdfnb
-.section sdfoa
-.section sdfob
-.section sdfpa
-.section sdfpb
-.section sdfqa
-.section sdfqb
-.section sdfra
-.section sdfrb
-.section sdfsa
-.section sdfsb
-.section sdfta
-.section sdftb
-.section sdfua
-.section sdfub
-.section sdfva
-.section sdfvb
-.section sdfwa
-.section sdfwb
-.section sdfxa
-.section sdfxb
-.section sdfya
-.section sdfyb
-.section sdfza
-.section sdfzb
-.section sdf1a
-.section sdf1b
-.section sdf2a
-.section sdf2b
-.section sdf3a
-.section sdf3b
-.section sdf4a
-.section sdf4b
-.section sdf5a
-.section sdf5b
-.section sdf6a
-.section sdf6b
-.section sdf7a
-.section sdf7b
-.section sdf8a
-.section sdf8b
-.section sdf9a
-.section sdf9b
-.section sdf0a
-.section sdf0b
-.section sdgaa
-.section sdgab
-.section sdgba
-.section sdgbb
-.section sdgca
-.section sdgcb
-.section sdgda
-.section sdgdb
-.section sdgea
-.section sdgeb
-.section sdgfa
-.section sdgfb
-.section sdgga
-.section sdggb
-.section sdgha
-.section sdghb
-.section sdgia
-.section sdgib
-.section sdgja
-.section sdgjb
-.section sdgka
-.section sdgkb
-.section sdgla
-.section sdglb
-.section sdgma
-.section sdgmb
-.section sdgna
-.section sdgnb
-.section sdgoa
-.section sdgob
-.section sdgpa
-.section sdgpb
-.section sdgqa
-.section sdgqb
-.section sdgra
-.section sdgrb
-.section sdgsa
-.section sdgsb
-.section sdgta
-.section sdgtb
-.section sdgua
-.section sdgub
-.section sdgva
-.section sdgvb
-.section sdgwa
-.section sdgwb
-.section sdgxa
-.section sdgxb
-.section sdgya
-.section sdgyb
-.section sdgza
-.section sdgzb
-.section sdg1a
-.section sdg1b
-.section sdg2a
-.section sdg2b
-.section sdg3a
-.section sdg3b
-.section sdg4a
-.section sdg4b
-.section sdg5a
-.section sdg5b
-.section sdg6a
-.section sdg6b
-.section sdg7a
-.section sdg7b
-.section sdg8a
-.section sdg8b
-.section sdg9a
-.section sdg9b
-.section sdg0a
-.section sdg0b
-.section sdhaa
-.section sdhab
-.section sdhba
-.section sdhbb
-.section sdhca
-.section sdhcb
-.section sdhda
-.section sdhdb
-.section sdhea
-.section sdheb
-.section sdhfa
-.section sdhfb
-.section sdhga
-.section sdhgb
-.section sdhha
-.section sdhhb
-.section sdhia
-.section sdhib
-.section sdhja
-.section sdhjb
-.section sdhka
-.section sdhkb
-.section sdhla
-.section sdhlb
-.section sdhma
-.section sdhmb
-.section sdhna
-.section sdhnb
-.section sdhoa
-.section sdhob
-.section sdhpa
-.section sdhpb
-.section sdhqa
-.section sdhqb
-.section sdhra
-.section sdhrb
-.section sdhsa
-.section sdhsb
-.section sdhta
-.section sdhtb
-.section sdhua
-.section sdhub
-.section sdhva
-.section sdhvb
-.section sdhwa
-.section sdhwb
-.section sdhxa
-.section sdhxb
-.section sdhya
-.section sdhyb
-.section sdhza
-.section sdhzb
-.section sdh1a
-.section sdh1b
-.section sdh2a
-.section sdh2b
-.section sdh3a
-.section sdh3b
-.section sdh4a
-.section sdh4b
-.section sdh5a
-.section sdh5b
-.section sdh6a
-.section sdh6b
-.section sdh7a
-.section sdh7b
-.section sdh8a
-.section sdh8b
-.section sdh9a
-.section sdh9b
-.section sdh0a
-.section sdh0b
-.section sdiaa
-.section sdiab
-.section sdiba
-.section sdibb
-.section sdica
-.section sdicb
-.section sdida
-.section sdidb
-.section sdiea
-.section sdieb
-.section sdifa
-.section sdifb
-.section sdiga
-.section sdigb
-.section sdiha
-.section sdihb
-.section sdiia
-.section sdiib
-.section sdija
-.section sdijb
-.section sdika
-.section sdikb
-.section sdila
-.section sdilb
-.section sdima
-.section sdimb
-.section sdina
-.section sdinb
-.section sdioa
-.section sdiob
-.section sdipa
-.section sdipb
-.section sdiqa
-.section sdiqb
-.section sdira
-.section sdirb
-.section sdisa
-.section sdisb
-.section sdita
-.section sditb
-.section sdiua
-.section sdiub
-.section sdiva
-.section sdivb
-.section sdiwa
-.section sdiwb
-.section sdixa
-.section sdixb
-.section sdiya
-.section sdiyb
-.section sdiza
-.section sdizb
-.section sdi1a
-.section sdi1b
-.section sdi2a
-.section sdi2b
-.section sdi3a
-.section sdi3b
-.section sdi4a
-.section sdi4b
-.section sdi5a
-.section sdi5b
-.section sdi6a
-.section sdi6b
-.section sdi7a
-.section sdi7b
-.section sdi8a
-.section sdi8b
-.section sdi9a
-.section sdi9b
-.section sdi0a
-.section sdi0b
-.section sdjaa
-.section sdjab
-.section sdjba
-.section sdjbb
-.section sdjca
-.section sdjcb
-.section sdjda
-.section sdjdb
-.section sdjea
-.section sdjeb
-.section sdjfa
-.section sdjfb
-.section sdjga
-.section sdjgb
-.section sdjha
-.section sdjhb
-.section sdjia
-.section sdjib
-.section sdjja
-.section sdjjb
-.section sdjka
-.section sdjkb
-.section sdjla
-.section sdjlb
-.section sdjma
-.section sdjmb
-.section sdjna
-.section sdjnb
-.section sdjoa
-.section sdjob
-.section sdjpa
-.section sdjpb
-.section sdjqa
-.section sdjqb
-.section sdjra
-.section sdjrb
-.section sdjsa
-.section sdjsb
-.section sdjta
-.section sdjtb
-.section sdjua
-.section sdjub
-.section sdjva
-.section sdjvb
-.section sdjwa
-.section sdjwb
-.section sdjxa
-.section sdjxb
-.section sdjya
-.section sdjyb
-.section sdjza
-.section sdjzb
-.section sdj1a
-.section sdj1b
-.section sdj2a
-.section sdj2b
-.section sdj3a
-.section sdj3b
-.section sdj4a
-.section sdj4b
-.section sdj5a
-.section sdj5b
-.section sdj6a
-.section sdj6b
-.section sdj7a
-.section sdj7b
-.section sdj8a
-.section sdj8b
-.section sdj9a
-.section sdj9b
-.section sdj0a
-.section sdj0b
-.section sdkaa
-.section sdkab
-.section sdkba
-.section sdkbb
-.section sdkca
-.section sdkcb
-.section sdkda
-.section sdkdb
-.section sdkea
-.section sdkeb
-.section sdkfa
-.section sdkfb
-.section sdkga
-.section sdkgb
-.section sdkha
-.section sdkhb
-.section sdkia
-.section sdkib
-.section sdkja
-.section sdkjb
-.section sdkka
-.section sdkkb
-.section sdkla
-.section sdklb
-.section sdkma
-.section sdkmb
-.section sdkna
-.section sdknb
-.section sdkoa
-.section sdkob
-.section sdkpa
-.section sdkpb
-.section sdkqa
-.section sdkqb
-.section sdkra
-.section sdkrb
-.section sdksa
-.section sdksb
-.section sdkta
-.section sdktb
-.section sdkua
-.section sdkub
-.section sdkva
-.section sdkvb
-.section sdkwa
-.section sdkwb
-.section sdkxa
-.section sdkxb
-.section sdkya
-.section sdkyb
-.section sdkza
-.section sdkzb
-.section sdk1a
-.section sdk1b
-.section sdk2a
-.section sdk2b
-.section sdk3a
-.section sdk3b
-.section sdk4a
-.section sdk4b
-.section sdk5a
-.section sdk5b
-.section sdk6a
-.section sdk6b
-.section sdk7a
-.section sdk7b
-.section sdk8a
-.section sdk8b
-.section sdk9a
-.section sdk9b
-.section sdk0a
-.section sdk0b
-.section sdlaa
-.section sdlab
-.section sdlba
-.section sdlbb
-.section sdlca
-.section sdlcb
-.section sdlda
-.section sdldb
-.section sdlea
-.section sdleb
-.section sdlfa
-.section sdlfb
-.section sdlga
-.section sdlgb
-.section sdlha
-.section sdlhb
-.section sdlia
-.section sdlib
-.section sdlja
-.section sdljb
-.section sdlka
-.section sdlkb
-.section sdlla
-.section sdllb
-.section sdlma
-.section sdlmb
-.section sdlna
-.section sdlnb
-.section sdloa
-.section sdlob
-.section sdlpa
-.section sdlpb
-.section sdlqa
-.section sdlqb
-.section sdlra
-.section sdlrb
-.section sdlsa
-.section sdlsb
-.section sdlta
-.section sdltb
-.section sdlua
-.section sdlub
-.section sdlva
-.section sdlvb
-.section sdlwa
-.section sdlwb
-.section sdlxa
-.section sdlxb
-.section sdlya
-.section sdlyb
-.section sdlza
-.section sdlzb
-.section sdl1a
-.section sdl1b
-.section sdl2a
-.section sdl2b
-.section sdl3a
-.section sdl3b
-.section sdl4a
-.section sdl4b
-.section sdl5a
-.section sdl5b
-.section sdl6a
-.section sdl6b
-.section sdl7a
-.section sdl7b
-.section sdl8a
-.section sdl8b
-.section sdl9a
-.section sdl9b
-.section sdl0a
-.section sdl0b
-.section sdmaa
-.section sdmab
-.section sdmba
-.section sdmbb
-.section sdmca
-.section sdmcb
-.section sdmda
-.section sdmdb
-.section sdmea
-.section sdmeb
-.section sdmfa
-.section sdmfb
-.section sdmga
-.section sdmgb
-.section sdmha
-.section sdmhb
-.section sdmia
-.section sdmib
-.section sdmja
-.section sdmjb
-.section sdmka
-.section sdmkb
-.section sdmla
-.section sdmlb
-.section sdmma
-.section sdmmb
-.section sdmna
-.section sdmnb
-.section sdmoa
-.section sdmob
-.section sdmpa
-.section sdmpb
-.section sdmqa
-.section sdmqb
-.section sdmra
-.section sdmrb
-.section sdmsa
-.section sdmsb
-.section sdmta
-.section sdmtb
-.section sdmua
-.section sdmub
-.section sdmva
-.section sdmvb
-.section sdmwa
-.section sdmwb
-.section sdmxa
-.section sdmxb
-.section sdmya
-.section sdmyb
-.section sdmza
-.section sdmzb
-.section sdm1a
-.section sdm1b
-.section sdm2a
-.section sdm2b
-.section sdm3a
-.section sdm3b
-.section sdm4a
-.section sdm4b
-.section sdm5a
-.section sdm5b
-.section sdm6a
-.section sdm6b
-.section sdm7a
-.section sdm7b
-.section sdm8a
-.section sdm8b
-.section sdm9a
-.section sdm9b
-.section sdm0a
-.section sdm0b
-.section sdnaa
-.section sdnab
-.section sdnba
-.section sdnbb
-.section sdnca
-.section sdncb
-.section sdnda
-.section sdndb
-.section sdnea
-.section sdneb
-.section sdnfa
-.section sdnfb
-.section sdnga
-.section sdngb
-.section sdnha
-.section sdnhb
-.section sdnia
-.section sdnib
-.section sdnja
-.section sdnjb
-.section sdnka
-.section sdnkb
-.section sdnla
-.section sdnlb
-.section sdnma
-.section sdnmb
-.section sdnna
-.section sdnnb
-.section sdnoa
-.section sdnob
-.section sdnpa
-.section sdnpb
-.section sdnqa
-.section sdnqb
-.section sdnra
-.section sdnrb
-.section sdnsa
-.section sdnsb
-.section sdnta
-.section sdntb
-.section sdnua
-.section sdnub
-.section sdnva
-.section sdnvb
-.section sdnwa
-.section sdnwb
-.section sdnxa
-.section sdnxb
-.section sdnya
-.section sdnyb
-.section sdnza
-.section sdnzb
-.section sdn1a
-.section sdn1b
-.section sdn2a
-.section sdn2b
-.section sdn3a
-.section sdn3b
-.section sdn4a
-.section sdn4b
-.section sdn5a
-.section sdn5b
-.section sdn6a
-.section sdn6b
-.section sdn7a
-.section sdn7b
-.section sdn8a
-.section sdn8b
-.section sdn9a
-.section sdn9b
-.section sdn0a
-.section sdn0b
-.section sdoaa
-.section sdoab
-.section sdoba
-.section sdobb
-.section sdoca
-.section sdocb
-.section sdoda
-.section sdodb
-.section sdoea
-.section sdoeb
-.section sdofa
-.section sdofb
-.section sdoga
-.section sdogb
-.section sdoha
-.section sdohb
-.section sdoia
-.section sdoib
-.section sdoja
-.section sdojb
-.section sdoka
-.section sdokb
-.section sdola
-.section sdolb
-.section sdoma
-.section sdomb
-.section sdona
-.section sdonb
-.section sdooa
-.section sdoob
-.section sdopa
-.section sdopb
-.section sdoqa
-.section sdoqb
-.section sdora
-.section sdorb
-.section sdosa
-.section sdosb
-.section sdota
-.section sdotb
-.section sdoua
-.section sdoub
-.section sdova
-.section sdovb
-.section sdowa
-.section sdowb
-.section sdoxa
-.section sdoxb
-.section sdoya
-.section sdoyb
-.section sdoza
-.section sdozb
-.section sdo1a
-.section sdo1b
-.section sdo2a
-.section sdo2b
-.section sdo3a
-.section sdo3b
-.section sdo4a
-.section sdo4b
-.section sdo5a
-.section sdo5b
-.section sdo6a
-.section sdo6b
-.section sdo7a
-.section sdo7b
-.section sdo8a
-.section sdo8b
-.section sdo9a
-.section sdo9b
-.section sdo0a
-.section sdo0b
-.section sdpaa
-.section sdpab
-.section sdpba
-.section sdpbb
-.section sdpca
-.section sdpcb
-.section sdpda
-.section sdpdb
-.section sdpea
-.section sdpeb
-.section sdpfa
-.section sdpfb
-.section sdpga
-.section sdpgb
-.section sdpha
-.section sdphb
-.section sdpia
-.section sdpib
-.section sdpja
-.section sdpjb
-.section sdpka
-.section sdpkb
-.section sdpla
-.section sdplb
-.section sdpma
-.section sdpmb
-.section sdpna
-.section sdpnb
-.section sdpoa
-.section sdpob
-.section sdppa
-.section sdppb
-.section sdpqa
-.section sdpqb
-.section sdpra
-.section sdprb
-.section sdpsa
-.section sdpsb
-.section sdpta
-.section sdptb
-.section sdpua
-.section sdpub
-.section sdpva
-.section sdpvb
-.section sdpwa
-.section sdpwb
-.section sdpxa
-.section sdpxb
-.section sdpya
-.section sdpyb
-.section sdpza
-.section sdpzb
-.section sdp1a
-.section sdp1b
-.section sdp2a
-.section sdp2b
-.section sdp3a
-.section sdp3b
-.section sdp4a
-.section sdp4b
-.section sdp5a
-.section sdp5b
-.section sdp6a
-.section sdp6b
-.section sdp7a
-.section sdp7b
-.section sdp8a
-.section sdp8b
-.section sdp9a
-.section sdp9b
-.section sdp0a
-.section sdp0b
-.section sdqaa
-.section sdqab
-.section sdqba
-.section sdqbb
-.section sdqca
-.section sdqcb
-.section sdqda
-.section sdqdb
-.section sdqea
-.section sdqeb
-.section sdqfa
-.section sdqfb
-.section sdqga
-.section sdqgb
-.section sdqha
-.section sdqhb
-.section sdqia
-.section sdqib
-.section sdqja
-.section sdqjb
-.section sdqka
-.section sdqkb
-.section sdqla
-.section sdqlb
-.section sdqma
-.section sdqmb
-.section sdqna
-.section sdqnb
-.section sdqoa
-.section sdqob
-.section sdqpa
-.section sdqpb
-.section sdqqa
-.section sdqqb
-.section sdqra
-.section sdqrb
-.section sdqsa
-.section sdqsb
-.section sdqta
-.section sdqtb
-.section sdqua
-.section sdqub
-.section sdqva
-.section sdqvb
-.section sdqwa
-.section sdqwb
-.section sdqxa
-.section sdqxb
-.section sdqya
-.section sdqyb
-.section sdqza
-.section sdqzb
-.section sdq1a
-.section sdq1b
-.section sdq2a
-.section sdq2b
-.section sdq3a
-.section sdq3b
-.section sdq4a
-.section sdq4b
-.section sdq5a
-.section sdq5b
-.section sdq6a
-.section sdq6b
-.section sdq7a
-.section sdq7b
-.section sdq8a
-.section sdq8b
-.section sdq9a
-.section sdq9b
-.section sdq0a
-.section sdq0b
-.section sdraa
-.section sdrab
-.section sdrba
-.section sdrbb
-.section sdrca
-.section sdrcb
-.section sdrda
-.section sdrdb
-.section sdrea
-.section sdreb
-.section sdrfa
-.section sdrfb
-.section sdrga
-.section sdrgb
-.section sdrha
-.section sdrhb
-.section sdria
-.section sdrib
-.section sdrja
-.section sdrjb
-.section sdrka
-.section sdrkb
-.section sdrla
-.section sdrlb
-.section sdrma
-.section sdrmb
-.section sdrna
-.section sdrnb
-.section sdroa
-.section sdrob
-.section sdrpa
-.section sdrpb
-.section sdrqa
-.section sdrqb
-.section sdrra
-.section sdrrb
-.section sdrsa
-.section sdrsb
-.section sdrta
-.section sdrtb
-.section sdrua
-.section sdrub
-.section sdrva
-.section sdrvb
-.section sdrwa
-.section sdrwb
-.section sdrxa
-.section sdrxb
-.section sdrya
-.section sdryb
-.section sdrza
-.section sdrzb
-.section sdr1a
-.section sdr1b
-.section sdr2a
-.section sdr2b
-.section sdr3a
-.section sdr3b
-.section sdr4a
-.section sdr4b
-.section sdr5a
-.section sdr5b
-.section sdr6a
-.section sdr6b
-.section sdr7a
-.section sdr7b
-.section sdr8a
-.section sdr8b
-.section sdr9a
-.section sdr9b
-.section sdr0a
-.section sdr0b
-.section sdsaa
-.section sdsab
-.section sdsba
-.section sdsbb
-.section sdsca
-.section sdscb
-.section sdsda
-.section sdsdb
-.section sdsea
-.section sdseb
-.section sdsfa
-.section sdsfb
-.section sdsga
-.section sdsgb
-.section sdsha
-.section sdshb
-.section sdsia
-.section sdsib
-.section sdsja
-.section sdsjb
-.section sdska
-.section sdskb
-.section sdsla
-.section sdslb
-.section sdsma
-.section sdsmb
-.section sdsna
-.section sdsnb
-.section sdsoa
-.section sdsob
-.section sdspa
-.section sdspb
-.section sdsqa
-.section sdsqb
-.section sdsra
-.section sdsrb
-.section sdssa
-.section sdssb
-.section sdsta
-.section sdstb
-.section sdsua
-.section sdsub
-.section sdsva
-.section sdsvb
-.section sdswa
-.section sdswb
-.section sdsxa
-.section sdsxb
-.section sdsya
-.section sdsyb
-.section sdsza
-.section sdszb
-.section sds1a
-.section sds1b
-.section sds2a
-.section sds2b
-.section sds3a
-.section sds3b
-.section sds4a
-.section sds4b
-.section sds5a
-.section sds5b
-.section sds6a
-.section sds6b
-.section sds7a
-.section sds7b
-.section sds8a
-.section sds8b
-.section sds9a
-.section sds9b
-.section sds0a
-.section sds0b
-.section sdtaa
-.section sdtab
-.section sdtba
-.section sdtbb
-.section sdtca
-.section sdtcb
-.section sdtda
-.section sdtdb
-.section sdtea
-.section sdteb
-.section sdtfa
-.section sdtfb
-.section sdtga
-.section sdtgb
-.section sdtha
-.section sdthb
-.section sdtia
-.section sdtib
-.section sdtja
-.section sdtjb
-.section sdtka
-.section sdtkb
-.section sdtla
-.section sdtlb
-.section sdtma
-.section sdtmb
-.section sdtna
-.section sdtnb
-.section sdtoa
-.section sdtob
-.section sdtpa
-.section sdtpb
-.section sdtqa
-.section sdtqb
-.section sdtra
-.section sdtrb
-.section sdtsa
-.section sdtsb
-.section sdtta
-.section sdttb
-.section sdtua
-.section sdtub
-.section sdtva
-.section sdtvb
-.section sdtwa
-.section sdtwb
-.section sdtxa
-.section sdtxb
-.section sdtya
-.section sdtyb
-.section sdtza
-.section sdtzb
-.section sdt1a
-.section sdt1b
-.section sdt2a
-.section sdt2b
-.section sdt3a
-.section sdt3b
-.section sdt4a
-.section sdt4b
-.section sdt5a
-.section sdt5b
-.section sdt6a
-.section sdt6b
-.section sdt7a
-.section sdt7b
-.section sdt8a
-.section sdt8b
-.section sdt9a
-.section sdt9b
-.section sdt0a
-.section sdt0b
-.section sduaa
-.section sduab
-.section sduba
-.section sdubb
-.section sduca
-.section sducb
-.section sduda
-.section sdudb
-.section sduea
-.section sdueb
-.section sdufa
-.section sdufb
-.section sduga
-.section sdugb
-.section sduha
-.section sduhb
-.section sduia
-.section sduib
-.section sduja
-.section sdujb
-.section sduka
-.section sdukb
-.section sdula
-.section sdulb
-.section sduma
-.section sdumb
-.section sduna
-.section sdunb
-.section sduoa
-.section sduob
-.section sdupa
-.section sdupb
-.section sduqa
-.section sduqb
-.section sdura
-.section sdurb
-.section sdusa
-.section sdusb
-.section sduta
-.section sdutb
-.section sduua
-.section sduub
-.section sduva
-.section sduvb
-.section sduwa
-.section sduwb
-.section sduxa
-.section sduxb
-.section sduya
-.section sduyb
-.section sduza
-.section sduzb
-.section sdu1a
-.section sdu1b
-.section sdu2a
-.section sdu2b
-.section sdu3a
-.section sdu3b
-.section sdu4a
-.section sdu4b
-.section sdu5a
-.section sdu5b
-.section sdu6a
-.section sdu6b
-.section sdu7a
-.section sdu7b
-.section sdu8a
-.section sdu8b
-.section sdu9a
-.section sdu9b
-.section sdu0a
-.section sdu0b
-.section sdvaa
-.section sdvab
-.section sdvba
-.section sdvbb
-.section sdvca
-.section sdvcb
-.section sdvda
-.section sdvdb
-.section sdvea
-.section sdveb
-.section sdvfa
-.section sdvfb
-.section sdvga
-.section sdvgb
-.section sdvha
-.section sdvhb
-.section sdvia
-.section sdvib
-.section sdvja
-.section sdvjb
-.section sdvka
-.section sdvkb
-.section sdvla
-.section sdvlb
-.section sdvma
-.section sdvmb
-.section sdvna
-.section sdvnb
-.section sdvoa
-.section sdvob
-.section sdvpa
-.section sdvpb
-.section sdvqa
-.section sdvqb
-.section sdvra
-.section sdvrb
-.section sdvsa
-.section sdvsb
-.section sdvta
-.section sdvtb
-.section sdvua
-.section sdvub
-.section sdvva
-.section sdvvb
-.section sdvwa
-.section sdvwb
-.section sdvxa
-.section sdvxb
-.section sdvya
-.section sdvyb
-.section sdvza
-.section sdvzb
-.section sdv1a
-.section sdv1b
-.section sdv2a
-.section sdv2b
-.section sdv3a
-.section sdv3b
-.section sdv4a
-.section sdv4b
-.section sdv5a
-.section sdv5b
-.section sdv6a
-.section sdv6b
-.section sdv7a
-.section sdv7b
-.section sdv8a
-.section sdv8b
-.section sdv9a
-.section sdv9b
-.section sdv0a
-.section sdv0b
-.section sdwaa
-.section sdwab
-.section sdwba
-.section sdwbb
-.section sdwca
-.section sdwcb
-.section sdwda
-.section sdwdb
-.section sdwea
-.section sdweb
-.section sdwfa
-.section sdwfb
-.section sdwga
-.section sdwgb
-.section sdwha
-.section sdwhb
-.section sdwia
-.section sdwib
-.section sdwja
-.section sdwjb
-.section sdwka
-.section sdwkb
-.section sdwla
-.section sdwlb
-.section sdwma
-.section sdwmb
-.section sdwna
-.section sdwnb
-.section sdwoa
-.section sdwob
-.section sdwpa
-.section sdwpb
-.section sdwqa
-.section sdwqb
-.section sdwra
-.section sdwrb
-.section sdwsa
-.section sdwsb
-.section sdwta
-.section sdwtb
-.section sdwua
-.section sdwub
-.section sdwva
-.section sdwvb
-.section sdwwa
-.section sdwwb
-.section sdwxa
-.section sdwxb
-.section sdwya
-.section sdwyb
-.section sdwza
-.section sdwzb
-.section sdw1a
-.section sdw1b
-.section sdw2a
-.section sdw2b
-.section sdw3a
-.section sdw3b
-.section sdw4a
-.section sdw4b
-.section sdw5a
-.section sdw5b
-.section sdw6a
-.section sdw6b
-.section sdw7a
-.section sdw7b
-.section sdw8a
-.section sdw8b
-.section sdw9a
-.section sdw9b
-.section sdw0a
-.section sdw0b
-.section sdxaa
-.section sdxab
-.section sdxba
-.section sdxbb
-.section sdxca
-.section sdxcb
-.section sdxda
-.section sdxdb
-.section sdxea
-.section sdxeb
-.section sdxfa
-.section sdxfb
-.section sdxga
-.section sdxgb
-.section sdxha
-.section sdxhb
-.section sdxia
-.section sdxib
-.section sdxja
-.section sdxjb
-.section sdxka
-.section sdxkb
-.section sdxla
-.section sdxlb
-.section sdxma
-.section sdxmb
-.section sdxna
-.section sdxnb
-.section sdxoa
-.section sdxob
-.section sdxpa
-.section sdxpb
-.section sdxqa
-.section sdxqb
-.section sdxra
-.section sdxrb
-.section sdxsa
-.section sdxsb
-.section sdxta
-.section sdxtb
-.section sdxua
-.section sdxub
-.section sdxva
-.section sdxvb
-.section sdxwa
-.section sdxwb
-.section sdxxa
-.section sdxxb
-.section sdxya
-.section sdxyb
-.section sdxza
-.section sdxzb
-.section sdx1a
-.section sdx1b
-.section sdx2a
-.section sdx2b
-.section sdx3a
-.section sdx3b
-.section sdx4a
-.section sdx4b
-.section sdx5a
-.section sdx5b
-.section sdx6a
-.section sdx6b
-.section sdx7a
-.section sdx7b
-.section sdx8a
-.section sdx8b
-.section sdx9a
-.section sdx9b
-.section sdx0a
-.section sdx0b
-.section sdyaa
-.section sdyab
-.section sdyba
-.section sdybb
-.section sdyca
-.section sdycb
-.section sdyda
-.section sdydb
-.section sdyea
-.section sdyeb
-.section sdyfa
-.section sdyfb
-.section sdyga
-.section sdygb
-.section sdyha
-.section sdyhb
-.section sdyia
-.section sdyib
-.section sdyja
-.section sdyjb
-.section sdyka
-.section sdykb
-.section sdyla
-.section sdylb
-.section sdyma
-.section sdymb
-.section sdyna
-.section sdynb
-.section sdyoa
-.section sdyob
-.section sdypa
-.section sdypb
-.section sdyqa
-.section sdyqb
-.section sdyra
-.section sdyrb
-.section sdysa
-.section sdysb
-.section sdyta
-.section sdytb
-.section sdyua
-.section sdyub
-.section sdyva
-.section sdyvb
-.section sdywa
-.section sdywb
-.section sdyxa
-.section sdyxb
-.section sdyya
-.section sdyyb
-.section sdyza
-.section sdyzb
-.section sdy1a
-.section sdy1b
-.section sdy2a
-.section sdy2b
-.section sdy3a
-.section sdy3b
-.section sdy4a
-.section sdy4b
-.section sdy5a
-.section sdy5b
-.section sdy6a
-.section sdy6b
-.section sdy7a
-.section sdy7b
-.section sdy8a
-.section sdy8b
-.section sdy9a
-.section sdy9b
-.section sdy0a
-.section sdy0b
-.section sdzaa
-.section sdzab
-.section sdzba
-.section sdzbb
-.section sdzca
-.section sdzcb
-.section sdzda
-.section sdzdb
-.section sdzea
-.section sdzeb
-.section sdzfa
-.section sdzfb
-.section sdzga
-.section sdzgb
-.section sdzha
-.section sdzhb
-.section sdzia
-.section sdzib
-.section sdzja
-.section sdzjb
-.section sdzka
-.section sdzkb
-.section sdzla
-.section sdzlb
-.section sdzma
-.section sdzmb
-.section sdzna
-.section sdznb
-.section sdzoa
-.section sdzob
-.section sdzpa
-.section sdzpb
-.section sdzqa
-.section sdzqb
-.section sdzra
-.section sdzrb
-.section sdzsa
-.section sdzsb
-.section sdzta
-.section sdztb
-.section sdzua
-.section sdzub
-.section sdzva
-.section sdzvb
-.section sdzwa
-.section sdzwb
-.section sdzxa
-.section sdzxb
-.section sdzya
-.section sdzyb
-.section sdzza
-.section sdzzb
-.section sdz1a
-.section sdz1b
-.section sdz2a
-.section sdz2b
-.section sdz3a
-.section sdz3b
-.section sdz4a
-.section sdz4b
-.section sdz5a
-.section sdz5b
-.section sdz6a
-.section sdz6b
-.section sdz7a
-.section sdz7b
-.section sdz8a
-.section sdz8b
-.section sdz9a
-.section sdz9b
-.section sdz0a
-.section sdz0b
-.section sd1aa
-.section sd1ab
-.section sd1ba
-.section sd1bb
-.section sd1ca
-.section sd1cb
-.section sd1da
-.section sd1db
-.section sd1ea
-.section sd1eb
-.section sd1fa
-.section sd1fb
-.section sd1ga
-.section sd1gb
-.section sd1ha
-.section sd1hb
-.section sd1ia
-.section sd1ib
-.section sd1ja
-.section sd1jb
-.section sd1ka
-.section sd1kb
-.section sd1la
-.section sd1lb
-.section sd1ma
-.section sd1mb
-.section sd1na
-.section sd1nb
-.section sd1oa
-.section sd1ob
-.section sd1pa
-.section sd1pb
-.section sd1qa
-.section sd1qb
-.section sd1ra
-.section sd1rb
-.section sd1sa
-.section sd1sb
-.section sd1ta
-.section sd1tb
-.section sd1ua
-.section sd1ub
-.section sd1va
-.section sd1vb
-.section sd1wa
-.section sd1wb
-.section sd1xa
-.section sd1xb
-.section sd1ya
-.section sd1yb
-.section sd1za
-.section sd1zb
-.section sd11a
-.section sd11b
-.section sd12a
-.section sd12b
-.section sd13a
-.section sd13b
-.section sd14a
-.section sd14b
-.section sd15a
-.section sd15b
-.section sd16a
-.section sd16b
-.section sd17a
-.section sd17b
-.section sd18a
-.section sd18b
-.section sd19a
-.section sd19b
-.section sd10a
-.section sd10b
-.section sd2aa
-.section sd2ab
-.section sd2ba
-.section sd2bb
-.section sd2ca
-.section sd2cb
-.section sd2da
-.section sd2db
-.section sd2ea
-.section sd2eb
-.section sd2fa
-.section sd2fb
-.section sd2ga
-.section sd2gb
-.section sd2ha
-.section sd2hb
-.section sd2ia
-.section sd2ib
-.section sd2ja
-.section sd2jb
-.section sd2ka
-.section sd2kb
-.section sd2la
-.section sd2lb
-.section sd2ma
-.section sd2mb
-.section sd2na
-.section sd2nb
-.section sd2oa
-.section sd2ob
-.section sd2pa
-.section sd2pb
-.section sd2qa
-.section sd2qb
-.section sd2ra
-.section sd2rb
-.section sd2sa
-.section sd2sb
-.section sd2ta
-.section sd2tb
-.section sd2ua
-.section sd2ub
-.section sd2va
-.section sd2vb
-.section sd2wa
-.section sd2wb
-.section sd2xa
-.section sd2xb
-.section sd2ya
-.section sd2yb
-.section sd2za
-.section sd2zb
-.section sd21a
-.section sd21b
-.section sd22a
-.section sd22b
-.section sd23a
-.section sd23b
-.section sd24a
-.section sd24b
-.section sd25a
-.section sd25b
-.section sd26a
-.section sd26b
-.section sd27a
-.section sd27b
-.section sd28a
-.section sd28b
-.section sd29a
-.section sd29b
-.section sd20a
-.section sd20b
-.section sd3aa
-.section sd3ab
-.section sd3ba
-.section sd3bb
-.section sd3ca
-.section sd3cb
-.section sd3da
-.section sd3db
-.section sd3ea
-.section sd3eb
-.section sd3fa
-.section sd3fb
-.section sd3ga
-.section sd3gb
-.section sd3ha
-.section sd3hb
-.section sd3ia
-.section sd3ib
-.section sd3ja
-.section sd3jb
-.section sd3ka
-.section sd3kb
-.section sd3la
-.section sd3lb
-.section sd3ma
-.section sd3mb
-.section sd3na
-.section sd3nb
-.section sd3oa
-.section sd3ob
-.section sd3pa
-.section sd3pb
-.section sd3qa
-.section sd3qb
-.section sd3ra
-.section sd3rb
-.section sd3sa
-.section sd3sb
-.section sd3ta
-.section sd3tb
-.section sd3ua
-.section sd3ub
-.section sd3va
-.section sd3vb
-.section sd3wa
-.section sd3wb
-.section sd3xa
-.section sd3xb
-.section sd3ya
-.section sd3yb
-.section sd3za
-.section sd3zb
-.section sd31a
-.section sd31b
-.section sd32a
-.section sd32b
-.section sd33a
-.section sd33b
-.section sd34a
-.section sd34b
-.section sd35a
-.section sd35b
-.section sd36a
-.section sd36b
-.section sd37a
-.section sd37b
-.section sd38a
-.section sd38b
-.section sd39a
-.section sd39b
-.section sd30a
-.section sd30b
-.section sd4aa
-.section sd4ab
-.section sd4ba
-.section sd4bb
-.section sd4ca
-.section sd4cb
-.section sd4da
-.section sd4db
-.section sd4ea
-.section sd4eb
-.section sd4fa
-.section sd4fb
-.section sd4ga
-.section sd4gb
-.section sd4ha
-.section sd4hb
-.section sd4ia
-.section sd4ib
-.section sd4ja
-.section sd4jb
-.section sd4ka
-.section sd4kb
-.section sd4la
-.section sd4lb
-.section sd4ma
-.section sd4mb
-.section sd4na
-.section sd4nb
-.section sd4oa
-.section sd4ob
-.section sd4pa
-.section sd4pb
-.section sd4qa
-.section sd4qb
-.section sd4ra
-.section sd4rb
-.section sd4sa
-.section sd4sb
-.section sd4ta
-.section sd4tb
-.section sd4ua
-.section sd4ub
-.section sd4va
-.section sd4vb
-.section sd4wa
-.section sd4wb
-.section sd4xa
-.section sd4xb
-.section sd4ya
-.section sd4yb
-.section sd4za
-.section sd4zb
-.section sd41a
-.section sd41b
-.section sd42a
-.section sd42b
-.section sd43a
-.section sd43b
-.section sd44a
-.section sd44b
-.section sd45a
-.section sd45b
-.section sd46a
-.section sd46b
-.section sd47a
-.section sd47b
-.section sd48a
-.section sd48b
-.section sd49a
-.section sd49b
-.section sd40a
-.section sd40b
-.section sd5aa
-.section sd5ab
-.section sd5ba
-.section sd5bb
-.section sd5ca
-.section sd5cb
-.section sd5da
-.section sd5db
-.section sd5ea
-.section sd5eb
-.section sd5fa
-.section sd5fb
-.section sd5ga
-.section sd5gb
-.section sd5ha
-.section sd5hb
-.section sd5ia
-.section sd5ib
-.section sd5ja
-.section sd5jb
-.section sd5ka
-.section sd5kb
-.section sd5la
-.section sd5lb
-.section sd5ma
-.section sd5mb
-.section sd5na
-.section sd5nb
-.section sd5oa
-.section sd5ob
-.section sd5pa
-.section sd5pb
-.section sd5qa
-.section sd5qb
-.section sd5ra
-.section sd5rb
-.section sd5sa
-.section sd5sb
-.section sd5ta
-.section sd5tb
-.section sd5ua
-.section sd5ub
-.section sd5va
-.section sd5vb
-.section sd5wa
-.section sd5wb
-.section sd5xa
-.section sd5xb
-.section sd5ya
-.section sd5yb
-.section sd5za
-.section sd5zb
-.section sd51a
-.section sd51b
-.section sd52a
-.section sd52b
-.section sd53a
-.section sd53b
-.section sd54a
-.section sd54b
-.section sd55a
-.section sd55b
-.section sd56a
-.section sd56b
-.section sd57a
-.section sd57b
-.section sd58a
-.section sd58b
-.section sd59a
-.section sd59b
-.section sd50a
-.section sd50b
-.section sd6aa
-.section sd6ab
-.section sd6ba
-.section sd6bb
-.section sd6ca
-.section sd6cb
-.section sd6da
-.section sd6db
-.section sd6ea
-.section sd6eb
-.section sd6fa
-.section sd6fb
-.section sd6ga
-.section sd6gb
-.section sd6ha
-.section sd6hb
-.section sd6ia
-.section sd6ib
-.section sd6ja
-.section sd6jb
-.section sd6ka
-.section sd6kb
-.section sd6la
-.section sd6lb
-.section sd6ma
-.section sd6mb
-.section sd6na
-.section sd6nb
-.section sd6oa
-.section sd6ob
-.section sd6pa
-.section sd6pb
-.section sd6qa
-.section sd6qb
-.section sd6ra
-.section sd6rb
-.section sd6sa
-.section sd6sb
-.section sd6ta
-.section sd6tb
-.section sd6ua
-.section sd6ub
-.section sd6va
-.section sd6vb
-.section sd6wa
-.section sd6wb
-.section sd6xa
-.section sd6xb
-.section sd6ya
-.section sd6yb
-.section sd6za
-.section sd6zb
-.section sd61a
-.section sd61b
-.section sd62a
-.section sd62b
-.section sd63a
-.section sd63b
-.section sd64a
-.section sd64b
-.section sd65a
-.section sd65b
-.section sd66a
-.section sd66b
-.section sd67a
-.section sd67b
-.section sd68a
-.section sd68b
-.section sd69a
-.section sd69b
-.section sd60a
-.section sd60b
-.section sd7aa
-.section sd7ab
-.section sd7ba
-.section sd7bb
-.section sd7ca
-.section sd7cb
-.section sd7da
-.section sd7db
-.section sd7ea
-.section sd7eb
-.section sd7fa
-.section sd7fb
-.section sd7ga
-.section sd7gb
-.section sd7ha
-.section sd7hb
-.section sd7ia
-.section sd7ib
-.section sd7ja
-.section sd7jb
-.section sd7ka
-.section sd7kb
-.section sd7la
-.section sd7lb
-.section sd7ma
-.section sd7mb
-.section sd7na
-.section sd7nb
-.section sd7oa
-.section sd7ob
-.section sd7pa
-.section sd7pb
-.section sd7qa
-.section sd7qb
-.section sd7ra
-.section sd7rb
-.section sd7sa
-.section sd7sb
-.section sd7ta
-.section sd7tb
-.section sd7ua
-.section sd7ub
-.section sd7va
-.section sd7vb
-.section sd7wa
-.section sd7wb
-.section sd7xa
-.section sd7xb
-.section sd7ya
-.section sd7yb
-.section sd7za
-.section sd7zb
-.section sd71a
-.section sd71b
-.section sd72a
-.section sd72b
-.section sd73a
-.section sd73b
-.section sd74a
-.section sd74b
-.section sd75a
-.section sd75b
-.section sd76a
-.section sd76b
-.section sd77a
-.section sd77b
-.section sd78a
-.section sd78b
-.section sd79a
-.section sd79b
-.section sd70a
-.section sd70b
-.section sd8aa
-.section sd8ab
-.section sd8ba
-.section sd8bb
-.section sd8ca
-.section sd8cb
-.section sd8da
-.section sd8db
-.section sd8ea
-.section sd8eb
-.section sd8fa
-.section sd8fb
-.section sd8ga
-.section sd8gb
-.section sd8ha
-.section sd8hb
-.section sd8ia
-.section sd8ib
-.section sd8ja
-.section sd8jb
-.section sd8ka
-.section sd8kb
-.section sd8la
-.section sd8lb
-.section sd8ma
-.section sd8mb
-.section sd8na
-.section sd8nb
-.section sd8oa
-.section sd8ob
-.section sd8pa
-.section sd8pb
-.section sd8qa
-.section sd8qb
-.section sd8ra
-.section sd8rb
-.section sd8sa
-.section sd8sb
-.section sd8ta
-.section sd8tb
-.section sd8ua
-.section sd8ub
-.section sd8va
-.section sd8vb
-.section sd8wa
-.section sd8wb
-.section sd8xa
-.section sd8xb
-.section sd8ya
-.section sd8yb
-.section sd8za
-.section sd8zb
-.section sd81a
-.section sd81b
-.section sd82a
-.section sd82b
-.section sd83a
-.section sd83b
-.section sd84a
-.section sd84b
-.section sd85a
-.section sd85b
-.section sd86a
-.section sd86b
-.section sd87a
-.section sd87b
-.section sd88a
-.section sd88b
-.section sd89a
-.section sd89b
-.section sd80a
-.section sd80b
-.section sd9aa
-.section sd9ab
-.section sd9ba
-.section sd9bb
-.section sd9ca
-.section sd9cb
-.section sd9da
-.section sd9db
-.section sd9ea
-.section sd9eb
-.section sd9fa
-.section sd9fb
-.section sd9ga
-.section sd9gb
-.section sd9ha
-.section sd9hb
-.section sd9ia
-.section sd9ib
-.section sd9ja
-.section sd9jb
-.section sd9ka
-.section sd9kb
-.section sd9la
-.section sd9lb
-.section sd9ma
-.section sd9mb
-.section sd9na
-.section sd9nb
-.section sd9oa
-.section sd9ob
-.section sd9pa
-.section sd9pb
-.section sd9qa
-.section sd9qb
-.section sd9ra
-.section sd9rb
-.section sd9sa
-.section sd9sb
-.section sd9ta
-.section sd9tb
-.section sd9ua
-.section sd9ub
-.section sd9va
-.section sd9vb
-.section sd9wa
-.section sd9wb
-.section sd9xa
-.section sd9xb
-.section sd9ya
-.section sd9yb
-.section sd9za
-.section sd9zb
-.section sd91a
-.section sd91b
-.section sd92a
-.section sd92b
-.section sd93a
-.section sd93b
-.section sd94a
-.section sd94b
-.section sd95a
-.section sd95b
-.section sd96a
-.section sd96b
-.section sd97a
-.section sd97b
-.section sd98a
-.section sd98b
-.section sd99a
-.section sd99b
-.section sd90a
-.section sd90b
-.section sd0aa
-.section sd0ab
-.section sd0ba
-.section sd0bb
-.section sd0ca
-.section sd0cb
-.section sd0da
-.section sd0db
-.section sd0ea
-.section sd0eb
-.section sd0fa
-.section sd0fb
-.section sd0ga
-.section sd0gb
-.section sd0ha
-.section sd0hb
-.section sd0ia
-.section sd0ib
-.section sd0ja
-.section sd0jb
-.section sd0ka
-.section sd0kb
-.section sd0la
-.section sd0lb
-.section sd0ma
-.section sd0mb
-.section sd0na
-.section sd0nb
-.section sd0oa
-.section sd0ob
-.section sd0pa
-.section sd0pb
-.section sd0qa
-.section sd0qb
-.section sd0ra
-.section sd0rb
-.section sd0sa
-.section sd0sb
-.section sd0ta
-.section sd0tb
-.section sd0ua
-.section sd0ub
-.section sd0va
-.section sd0vb
-.section sd0wa
-.section sd0wb
-.section sd0xa
-.section sd0xb
-.section sd0ya
-.section sd0yb
-.section sd0za
-.section sd0zb
-.section sd01a
-.section sd01b
-.section sd02a
-.section sd02b
-.section sd03a
-.section sd03b
-.section sd04a
-.section sd04b
-.section sd05a
-.section sd05b
-.section sd06a
-.section sd06b
-.section sd07a
-.section sd07b
-.section sd08a
-.section sd08b
-.section sd09a
-.section sd09b
-.section sd00a
-.section sd00b
-.section seaaa
-.section seaab
-.section seaba
-.section seabb
-.section seaca
-.section seacb
-.section seada
-.section seadb
-.section seaea
-.section seaeb
-.section seafa
-.section seafb
-.section seaga
-.section seagb
-.section seaha
-.section seahb
-.section seaia
-.section seaib
-.section seaja
-.section seajb
-.section seaka
-.section seakb
-.section seala
-.section sealb
-.section seama
-.section seamb
-.section seana
-.section seanb
-.section seaoa
-.section seaob
-.section seapa
-.section seapb
-.section seaqa
-.section seaqb
-.section seara
-.section searb
-.section seasa
-.section seasb
-.section seata
-.section seatb
-.section seaua
-.section seaub
-.section seava
-.section seavb
-.section seawa
-.section seawb
-.section seaxa
-.section seaxb
-.section seaya
-.section seayb
-.section seaza
-.section seazb
-.section sea1a
-.section sea1b
-.section sea2a
-.section sea2b
-.section sea3a
-.section sea3b
-.section sea4a
-.section sea4b
-.section sea5a
-.section sea5b
-.section sea6a
-.section sea6b
-.section sea7a
-.section sea7b
-.section sea8a
-.section sea8b
-.section sea9a
-.section sea9b
-.section sea0a
-.section sea0b
-.section sebaa
-.section sebab
-.section sebba
-.section sebbb
-.section sebca
-.section sebcb
-.section sebda
-.section sebdb
-.section sebea
-.section sebeb
-.section sebfa
-.section sebfb
-.section sebga
-.section sebgb
-.section sebha
-.section sebhb
-.section sebia
-.section sebib
-.section sebja
-.section sebjb
-.section sebka
-.section sebkb
-.section sebla
-.section seblb
-.section sebma
-.section sebmb
-.section sebna
-.section sebnb
-.section seboa
-.section sebob
-.section sebpa
-.section sebpb
-.section sebqa
-.section sebqb
-.section sebra
-.section sebrb
-.section sebsa
-.section sebsb
-.section sebta
-.section sebtb
-.section sebua
-.section sebub
-.section sebva
-.section sebvb
-.section sebwa
-.section sebwb
-.section sebxa
-.section sebxb
-.section sebya
-.section sebyb
-.section sebza
-.section sebzb
-.section seb1a
-.section seb1b
-.section seb2a
-.section seb2b
-.section seb3a
-.section seb3b
-.section seb4a
-.section seb4b
-.section seb5a
-.section seb5b
-.section seb6a
-.section seb6b
-.section seb7a
-.section seb7b
-.section seb8a
-.section seb8b
-.section seb9a
-.section seb9b
-.section seb0a
-.section seb0b
-.section secaa
-.section secab
-.section secba
-.section secbb
-.section secca
-.section seccb
-.section secda
-.section secdb
-.section secea
-.section seceb
-.section secfa
-.section secfb
-.section secga
-.section secgb
-.section secha
-.section sechb
-.section secia
-.section secib
-.section secja
-.section secjb
-.section secka
-.section seckb
-.section secla
-.section seclb
-.section secma
-.section secmb
-.section secna
-.section secnb
-.section secoa
-.section secob
-.section secpa
-.section secpb
-.section secqa
-.section secqb
-.section secra
-.section secrb
-.section secsa
-.section secsb
-.section secta
-.section sectb
-.section secua
-.section secub
-.section secva
-.section secvb
-.section secwa
-.section secwb
-.section secxa
-.section secxb
-.section secya
-.section secyb
-.section secza
-.section seczb
-.section sec1a
-.section sec1b
-.section sec2a
-.section sec2b
-.section sec3a
-.section sec3b
-.section sec4a
-.section sec4b
-.section sec5a
-.section sec5b
-.section sec6a
-.section sec6b
-.section sec7a
-.section sec7b
-.section sec8a
-.section sec8b
-.section sec9a
-.section sec9b
-.section sec0a
-.section sec0b
-.section sedaa
-.section sedab
-.section sedba
-.section sedbb
-.section sedca
-.section sedcb
-.section sedda
-.section seddb
-.section sedea
-.section sedeb
-.section sedfa
-.section sedfb
-.section sedga
-.section sedgb
-.section sedha
-.section sedhb
-.section sedia
-.section sedib
-.section sedja
-.section sedjb
-.section sedka
-.section sedkb
-.section sedla
-.section sedlb
-.section sedma
-.section sedmb
-.section sedna
-.section sednb
-.section sedoa
-.section sedob
-.section sedpa
-.section sedpb
-.section sedqa
-.section sedqb
-.section sedra
-.section sedrb
-.section sedsa
-.section sedsb
-.section sedta
-.section sedtb
-.section sedua
-.section sedub
-.section sedva
-.section sedvb
-.section sedwa
-.section sedwb
-.section sedxa
-.section sedxb
-.section sedya
-.section sedyb
-.section sedza
-.section sedzb
-.section sed1a
-.section sed1b
-.section sed2a
-.section sed2b
-.section sed3a
-.section sed3b
-.section sed4a
-.section sed4b
-.section sed5a
-.section sed5b
-.section sed6a
-.section sed6b
-.section sed7a
-.section sed7b
-.section sed8a
-.section sed8b
-.section sed9a
-.section sed9b
-.section sed0a
-.section sed0b
-.section seeaa
-.section seeab
-.section seeba
-.section seebb
-.section seeca
-.section seecb
-.section seeda
-.section seedb
-.section seeea
-.section seeeb
-.section seefa
-.section seefb
-.section seega
-.section seegb
-.section seeha
-.section seehb
-.section seeia
-.section seeib
-.section seeja
-.section seejb
-.section seeka
-.section seekb
-.section seela
-.section seelb
-.section seema
-.section seemb
-.section seena
-.section seenb
-.section seeoa
-.section seeob
-.section seepa
-.section seepb
-.section seeqa
-.section seeqb
-.section seera
-.section seerb
-.section seesa
-.section seesb
-.section seeta
-.section seetb
-.section seeua
-.section seeub
-.section seeva
-.section seevb
-.section seewa
-.section seewb
-.section seexa
-.section seexb
-.section seeya
-.section seeyb
-.section seeza
-.section seezb
-.section see1a
-.section see1b
-.section see2a
-.section see2b
-.section see3a
-.section see3b
-.section see4a
-.section see4b
-.section see5a
-.section see5b
-.section see6a
-.section see6b
-.section see7a
-.section see7b
-.section see8a
-.section see8b
-.section see9a
-.section see9b
-.section see0a
-.section see0b
-.section sefaa
-.section sefab
-.section sefba
-.section sefbb
-.section sefca
-.section sefcb
-.section sefda
-.section sefdb
-.section sefea
-.section sefeb
-.section seffa
-.section seffb
-.section sefga
-.section sefgb
-.section sefha
-.section sefhb
-.section sefia
-.section sefib
-.section sefja
-.section sefjb
-.section sefka
-.section sefkb
-.section sefla
-.section seflb
-.section sefma
-.section sefmb
-.section sefna
-.section sefnb
-.section sefoa
-.section sefob
-.section sefpa
-.section sefpb
-.section sefqa
-.section sefqb
-.section sefra
-.section sefrb
-.section sefsa
-.section sefsb
-.section sefta
-.section seftb
-.section sefua
-.section sefub
-.section sefva
-.section sefvb
-.section sefwa
-.section sefwb
-.section sefxa
-.section sefxb
-.section sefya
-.section sefyb
-.section sefza
-.section sefzb
-.section sef1a
-.section sef1b
-.section sef2a
-.section sef2b
-.section sef3a
-.section sef3b
-.section sef4a
-.section sef4b
-.section sef5a
-.section sef5b
-.section sef6a
-.section sef6b
-.section sef7a
-.section sef7b
-.section sef8a
-.section sef8b
-.section sef9a
-.section sef9b
-.section sef0a
-.section sef0b
-.section segaa
-.section segab
-.section segba
-.section segbb
-.section segca
-.section segcb
-.section segda
-.section segdb
-.section segea
-.section segeb
-.section segfa
-.section segfb
-.section segga
-.section seggb
-.section segha
-.section seghb
-.section segia
-.section segib
-.section segja
-.section segjb
-.section segka
-.section segkb
-.section segla
-.section seglb
-.section segma
-.section segmb
-.section segna
-.section segnb
-.section segoa
-.section segob
-.section segpa
-.section segpb
-.section segqa
-.section segqb
-.section segra
-.section segrb
-.section segsa
-.section segsb
-.section segta
-.section segtb
-.section segua
-.section segub
-.section segva
-.section segvb
-.section segwa
-.section segwb
-.section segxa
-.section segxb
-.section segya
-.section segyb
-.section segza
-.section segzb
-.section seg1a
-.section seg1b
-.section seg2a
-.section seg2b
-.section seg3a
-.section seg3b
-.section seg4a
-.section seg4b
-.section seg5a
-.section seg5b
-.section seg6a
-.section seg6b
-.section seg7a
-.section seg7b
-.section seg8a
-.section seg8b
-.section seg9a
-.section seg9b
-.section seg0a
-.section seg0b
-.section sehaa
-.section sehab
-.section sehba
-.section sehbb
-.section sehca
-.section sehcb
-.section sehda
-.section sehdb
-.section sehea
-.section seheb
-.section sehfa
-.section sehfb
-.section sehga
-.section sehgb
-.section sehha
-.section sehhb
-.section sehia
-.section sehib
-.section sehja
-.section sehjb
-.section sehka
-.section sehkb
-.section sehla
-.section sehlb
-.section sehma
-.section sehmb
-.section sehna
-.section sehnb
-.section sehoa
-.section sehob
-.section sehpa
-.section sehpb
-.section sehqa
-.section sehqb
-.section sehra
-.section sehrb
-.section sehsa
-.section sehsb
-.section sehta
-.section sehtb
-.section sehua
-.section sehub
-.section sehva
-.section sehvb
-.section sehwa
-.section sehwb
-.section sehxa
-.section sehxb
-.section sehya
-.section sehyb
-.section sehza
-.section sehzb
-.section seh1a
-.section seh1b
-.section seh2a
-.section seh2b
-.section seh3a
-.section seh3b
-.section seh4a
-.section seh4b
-.section seh5a
-.section seh5b
-.section seh6a
-.section seh6b
-.section seh7a
-.section seh7b
-.section seh8a
-.section seh8b
-.section seh9a
-.section seh9b
-.section seh0a
-.section seh0b
-.section seiaa
-.section seiab
-.section seiba
-.section seibb
-.section seica
-.section seicb
-.section seida
-.section seidb
-.section seiea
-.section seieb
-.section seifa
-.section seifb
-.section seiga
-.section seigb
-.section seiha
-.section seihb
-.section seiia
-.section seiib
-.section seija
-.section seijb
-.section seika
-.section seikb
-.section seila
-.section seilb
-.section seima
-.section seimb
-.section seina
-.section seinb
-.section seioa
-.section seiob
-.section seipa
-.section seipb
-.section seiqa
-.section seiqb
-.section seira
-.section seirb
-.section seisa
-.section seisb
-.section seita
-.section seitb
-.section seiua
-.section seiub
-.section seiva
-.section seivb
-.section seiwa
-.section seiwb
-.section seixa
-.section seixb
-.section seiya
-.section seiyb
-.section seiza
-.section seizb
-.section sei1a
-.section sei1b
-.section sei2a
-.section sei2b
-.section sei3a
-.section sei3b
-.section sei4a
-.section sei4b
-.section sei5a
-.section sei5b
-.section sei6a
-.section sei6b
-.section sei7a
-.section sei7b
-.section sei8a
-.section sei8b
-.section sei9a
-.section sei9b
-.section sei0a
-.section sei0b
-.section sejaa
-.section sejab
-.section sejba
-.section sejbb
-.section sejca
-.section sejcb
-.section sejda
-.section sejdb
-.section sejea
-.section sejeb
-.section sejfa
-.section sejfb
-.section sejga
-.section sejgb
-.section sejha
-.section sejhb
-.section sejia
-.section sejib
-.section sejja
-.section sejjb
-.section sejka
-.section sejkb
-.section sejla
-.section sejlb
-.section sejma
-.section sejmb
-.section sejna
-.section sejnb
-.section sejoa
-.section sejob
-.section sejpa
-.section sejpb
-.section sejqa
-.section sejqb
-.section sejra
-.section sejrb
-.section sejsa
-.section sejsb
-.section sejta
-.section sejtb
-.section sejua
-.section sejub
-.section sejva
-.section sejvb
-.section sejwa
-.section sejwb
-.section sejxa
-.section sejxb
-.section sejya
-.section sejyb
-.section sejza
-.section sejzb
-.section sej1a
-.section sej1b
-.section sej2a
-.section sej2b
-.section sej3a
-.section sej3b
-.section sej4a
-.section sej4b
-.section sej5a
-.section sej5b
-.section sej6a
-.section sej6b
-.section sej7a
-.section sej7b
-.section sej8a
-.section sej8b
-.section sej9a
-.section sej9b
-.section sej0a
-.section sej0b
-.section sekaa
-.section sekab
-.section sekba
-.section sekbb
-.section sekca
-.section sekcb
-.section sekda
-.section sekdb
-.section sekea
-.section sekeb
-.section sekfa
-.section sekfb
-.section sekga
-.section sekgb
-.section sekha
-.section sekhb
-.section sekia
-.section sekib
-.section sekja
-.section sekjb
-.section sekka
-.section sekkb
-.section sekla
-.section seklb
-.section sekma
-.section sekmb
-.section sekna
-.section seknb
-.section sekoa
-.section sekob
-.section sekpa
-.section sekpb
-.section sekqa
-.section sekqb
-.section sekra
-.section sekrb
-.section seksa
-.section seksb
-.section sekta
-.section sektb
-.section sekua
-.section sekub
-.section sekva
-.section sekvb
-.section sekwa
-.section sekwb
-.section sekxa
-.section sekxb
-.section sekya
-.section sekyb
-.section sekza
-.section sekzb
-.section sek1a
-.section sek1b
-.section sek2a
-.section sek2b
-.section sek3a
-.section sek3b
-.section sek4a
-.section sek4b
-.section sek5a
-.section sek5b
-.section sek6a
-.section sek6b
-.section sek7a
-.section sek7b
-.section sek8a
-.section sek8b
-.section sek9a
-.section sek9b
-.section sek0a
-.section sek0b
-.section selaa
-.section selab
-.section selba
-.section selbb
-.section selca
-.section selcb
-.section selda
-.section seldb
-.section selea
-.section seleb
-.section selfa
-.section selfb
-.section selga
-.section selgb
-.section selha
-.section selhb
-.section selia
-.section selib
-.section selja
-.section seljb
-.section selka
-.section selkb
-.section sella
-.section sellb
-.section selma
-.section selmb
-.section selna
-.section selnb
-.section seloa
-.section selob
-.section selpa
-.section selpb
-.section selqa
-.section selqb
-.section selra
-.section selrb
-.section selsa
-.section selsb
-.section selta
-.section seltb
-.section selua
-.section selub
-.section selva
-.section selvb
-.section selwa
-.section selwb
-.section selxa
-.section selxb
-.section selya
-.section selyb
-.section selza
-.section selzb
-.section sel1a
-.section sel1b
-.section sel2a
-.section sel2b
-.section sel3a
-.section sel3b
-.section sel4a
-.section sel4b
-.section sel5a
-.section sel5b
-.section sel6a
-.section sel6b
-.section sel7a
-.section sel7b
-.section sel8a
-.section sel8b
-.section sel9a
-.section sel9b
-.section sel0a
-.section sel0b
-.section semaa
-.section semab
-.section semba
-.section sembb
-.section semca
-.section semcb
-.section semda
-.section semdb
-.section semea
-.section semeb
-.section semfa
-.section semfb
-.section semga
-.section semgb
-.section semha
-.section semhb
-.section semia
-.section semib
-.section semja
-.section semjb
-.section semka
-.section semkb
-.section semla
-.section semlb
-.section semma
-.section semmb
-.section semna
-.section semnb
-.section semoa
-.section semob
-.section sempa
-.section sempb
-.section semqa
-.section semqb
-.section semra
-.section semrb
-.section semsa
-.section semsb
-.section semta
-.section semtb
-.section semua
-.section semub
-.section semva
-.section semvb
-.section semwa
-.section semwb
-.section semxa
-.section semxb
-.section semya
-.section semyb
-.section semza
-.section semzb
-.section sem1a
-.section sem1b
-.section sem2a
-.section sem2b
-.section sem3a
-.section sem3b
-.section sem4a
-.section sem4b
-.section sem5a
-.section sem5b
-.section sem6a
-.section sem6b
-.section sem7a
-.section sem7b
-.section sem8a
-.section sem8b
-.section sem9a
-.section sem9b
-.section sem0a
-.section sem0b
-.section senaa
-.section senab
-.section senba
-.section senbb
-.section senca
-.section sencb
-.section senda
-.section sendb
-.section senea
-.section seneb
-.section senfa
-.section senfb
-.section senga
-.section sengb
-.section senha
-.section senhb
-.section senia
-.section senib
-.section senja
-.section senjb
-.section senka
-.section senkb
-.section senla
-.section senlb
-.section senma
-.section senmb
-.section senna
-.section sennb
-.section senoa
-.section senob
-.section senpa
-.section senpb
-.section senqa
-.section senqb
-.section senra
-.section senrb
-.section sensa
-.section sensb
-.section senta
-.section sentb
-.section senua
-.section senub
-.section senva
-.section senvb
-.section senwa
-.section senwb
-.section senxa
-.section senxb
-.section senya
-.section senyb
-.section senza
-.section senzb
-.section sen1a
-.section sen1b
-.section sen2a
-.section sen2b
-.section sen3a
-.section sen3b
-.section sen4a
-.section sen4b
-.section sen5a
-.section sen5b
-.section sen6a
-.section sen6b
-.section sen7a
-.section sen7b
-.section sen8a
-.section sen8b
-.section sen9a
-.section sen9b
-.section sen0a
-.section sen0b
-.section seoaa
-.section seoab
-.section seoba
-.section seobb
-.section seoca
-.section seocb
-.section seoda
-.section seodb
-.section seoea
-.section seoeb
-.section seofa
-.section seofb
-.section seoga
-.section seogb
-.section seoha
-.section seohb
-.section seoia
-.section seoib
-.section seoja
-.section seojb
-.section seoka
-.section seokb
-.section seola
-.section seolb
-.section seoma
-.section seomb
-.section seona
-.section seonb
-.section seooa
-.section seoob
-.section seopa
-.section seopb
-.section seoqa
-.section seoqb
-.section seora
-.section seorb
-.section seosa
-.section seosb
-.section seota
-.section seotb
-.section seoua
-.section seoub
-.section seova
-.section seovb
-.section seowa
-.section seowb
-.section seoxa
-.section seoxb
-.section seoya
-.section seoyb
-.section seoza
-.section seozb
-.section seo1a
-.section seo1b
-.section seo2a
-.section seo2b
-.section seo3a
-.section seo3b
-.section seo4a
-.section seo4b
-.section seo5a
-.section seo5b
-.section seo6a
-.section seo6b
-.section seo7a
-.section seo7b
-.section seo8a
-.section seo8b
-.section seo9a
-.section seo9b
-.section seo0a
-.section seo0b
-.section sepaa
-.section sepab
-.section sepba
-.section sepbb
-.section sepca
-.section sepcb
-.section sepda
-.section sepdb
-.section sepea
-.section sepeb
-.section sepfa
-.section sepfb
-.section sepga
-.section sepgb
-.section sepha
-.section sephb
-.section sepia
-.section sepib
-.section sepja
-.section sepjb
-.section sepka
-.section sepkb
-.section sepla
-.section seplb
-.section sepma
-.section sepmb
-.section sepna
-.section sepnb
-.section sepoa
-.section sepob
-.section seppa
-.section seppb
-.section sepqa
-.section sepqb
-.section sepra
-.section seprb
-.section sepsa
-.section sepsb
-.section septa
-.section septb
-.section sepua
-.section sepub
-.section sepva
-.section sepvb
-.section sepwa
-.section sepwb
-.section sepxa
-.section sepxb
-.section sepya
-.section sepyb
-.section sepza
-.section sepzb
-.section sep1a
-.section sep1b
-.section sep2a
-.section sep2b
-.section sep3a
-.section sep3b
-.section sep4a
-.section sep4b
-.section sep5a
-.section sep5b
-.section sep6a
-.section sep6b
-.section sep7a
-.section sep7b
-.section sep8a
-.section sep8b
-.section sep9a
-.section sep9b
-.section sep0a
-.section sep0b
-.section seqaa
-.section seqab
-.section seqba
-.section seqbb
-.section seqca
-.section seqcb
-.section seqda
-.section seqdb
-.section seqea
-.section seqeb
-.section seqfa
-.section seqfb
-.section seqga
-.section seqgb
-.section seqha
-.section seqhb
-.section seqia
-.section seqib
-.section seqja
-.section seqjb
-.section seqka
-.section seqkb
-.section seqla
-.section seqlb
-.section seqma
-.section seqmb
-.section seqna
-.section seqnb
-.section seqoa
-.section seqob
-.section seqpa
-.section seqpb
-.section seqqa
-.section seqqb
-.section seqra
-.section seqrb
-.section seqsa
-.section seqsb
-.section seqta
-.section seqtb
-.section sequa
-.section sequb
-.section seqva
-.section seqvb
-.section seqwa
-.section seqwb
-.section seqxa
-.section seqxb
-.section seqya
-.section seqyb
-.section seqza
-.section seqzb
-.section seq1a
-.section seq1b
-.section seq2a
-.section seq2b
-.section seq3a
-.section seq3b
-.section seq4a
-.section seq4b
-.section seq5a
-.section seq5b
-.section seq6a
-.section seq6b
-.section seq7a
-.section seq7b
-.section seq8a
-.section seq8b
-.section seq9a
-.section seq9b
-.section seq0a
-.section seq0b
-.section seraa
-.section serab
-.section serba
-.section serbb
-.section serca
-.section sercb
-.section serda
-.section serdb
-.section serea
-.section sereb
-.section serfa
-.section serfb
-.section serga
-.section sergb
-.section serha
-.section serhb
-.section seria
-.section serib
-.section serja
-.section serjb
-.section serka
-.section serkb
-.section serla
-.section serlb
-.section serma
-.section sermb
-.section serna
-.section sernb
-.section seroa
-.section serob
-.section serpa
-.section serpb
-.section serqa
-.section serqb
-.section serra
-.section serrb
-.section sersa
-.section sersb
-.section serta
-.section sertb
-.section serua
-.section serub
-.section serva
-.section servb
-.section serwa
-.section serwb
-.section serxa
-.section serxb
-.section serya
-.section seryb
-.section serza
-.section serzb
-.section ser1a
-.section ser1b
-.section ser2a
-.section ser2b
-.section ser3a
-.section ser3b
-.section ser4a
-.section ser4b
-.section ser5a
-.section ser5b
-.section ser6a
-.section ser6b
-.section ser7a
-.section ser7b
-.section ser8a
-.section ser8b
-.section ser9a
-.section ser9b
-.section ser0a
-.section ser0b
-.section sesaa
-.section sesab
-.section sesba
-.section sesbb
-.section sesca
-.section sescb
-.section sesda
-.section sesdb
-.section sesea
-.section seseb
-.section sesfa
-.section sesfb
-.section sesga
-.section sesgb
-.section sesha
-.section seshb
-.section sesia
-.section sesib
-.section sesja
-.section sesjb
-.section seska
-.section seskb
-.section sesla
-.section seslb
-.section sesma
-.section sesmb
-.section sesna
-.section sesnb
-.section sesoa
-.section sesob
-.section sespa
-.section sespb
-.section sesqa
-.section sesqb
-.section sesra
-.section sesrb
-.section sessa
-.section sessb
-.section sesta
-.section sestb
-.section sesua
-.section sesub
-.section sesva
-.section sesvb
-.section seswa
-.section seswb
-.section sesxa
-.section sesxb
-.section sesya
-.section sesyb
-.section sesza
-.section seszb
-.section ses1a
-.section ses1b
-.section ses2a
-.section ses2b
-.section ses3a
-.section ses3b
-.section ses4a
-.section ses4b
-.section ses5a
-.section ses5b
-.section ses6a
-.section ses6b
-.section ses7a
-.section ses7b
-.section ses8a
-.section ses8b
-.section ses9a
-.section ses9b
-.section ses0a
-.section ses0b
-.section setaa
-.section setab
-.section setba
-.section setbb
-.section setca
-.section setcb
-.section setda
-.section setdb
-.section setea
-.section seteb
-.section setfa
-.section setfb
-.section setga
-.section setgb
-.section setha
-.section sethb
-.section setia
-.section setib
-.section setja
-.section setjb
-.section setka
-.section setkb
-.section setla
-.section setlb
-.section setma
-.section setmb
-.section setna
-.section setnb
-.section setoa
-.section setob
-.section setpa
-.section setpb
-.section setqa
-.section setqb
-.section setra
-.section setrb
-.section setsa
-.section setsb
-.section setta
-.section settb
-.section setua
-.section setub
-.section setva
-.section setvb
-.section setwa
-.section setwb
-.section setxa
-.section setxb
-.section setya
-.section setyb
-.section setza
-.section setzb
-.section set1a
-.section set1b
-.section set2a
-.section set2b
-.section set3a
-.section set3b
-.section set4a
-.section set4b
-.section set5a
-.section set5b
-.section set6a
-.section set6b
-.section set7a
-.section set7b
-.section set8a
-.section set8b
-.section set9a
-.section set9b
-.section set0a
-.section set0b
-.section seuaa
-.section seuab
-.section seuba
-.section seubb
-.section seuca
-.section seucb
-.section seuda
-.section seudb
-.section seuea
-.section seueb
-.section seufa
-.section seufb
-.section seuga
-.section seugb
-.section seuha
-.section seuhb
-.section seuia
-.section seuib
-.section seuja
-.section seujb
-.section seuka
-.section seukb
-.section seula
-.section seulb
-.section seuma
-.section seumb
-.section seuna
-.section seunb
-.section seuoa
-.section seuob
-.section seupa
-.section seupb
-.section seuqa
-.section seuqb
-.section seura
-.section seurb
-.section seusa
-.section seusb
-.section seuta
-.section seutb
-.section seuua
-.section seuub
-.section seuva
-.section seuvb
-.section seuwa
-.section seuwb
-.section seuxa
-.section seuxb
-.section seuya
-.section seuyb
-.section seuza
-.section seuzb
-.section seu1a
-.section seu1b
-.section seu2a
-.section seu2b
-.section seu3a
-.section seu3b
-.section seu4a
-.section seu4b
-.section seu5a
-.section seu5b
-.section seu6a
-.section seu6b
-.section seu7a
-.section seu7b
-.section seu8a
-.section seu8b
-.section seu9a
-.section seu9b
-.section seu0a
-.section seu0b
-.section sevaa
-.section sevab
-.section sevba
-.section sevbb
-.section sevca
-.section sevcb
-.section sevda
-.section sevdb
-.section sevea
-.section seveb
-.section sevfa
-.section sevfb
-.section sevga
-.section sevgb
-.section sevha
-.section sevhb
-.section sevia
-.section sevib
-.section sevja
-.section sevjb
-.section sevka
-.section sevkb
-.section sevla
-.section sevlb
-.section sevma
-.section sevmb
-.section sevna
-.section sevnb
-.section sevoa
-.section sevob
-.section sevpa
-.section sevpb
-.section sevqa
-.section sevqb
-.section sevra
-.section sevrb
-.section sevsa
-.section sevsb
-.section sevta
-.section sevtb
-.section sevua
-.section sevub
-.section sevva
-.section sevvb
-.section sevwa
-.section sevwb
-.section sevxa
-.section sevxb
-.section sevya
-.section sevyb
-.section sevza
-.section sevzb
-.section sev1a
-.section sev1b
-.section sev2a
-.section sev2b
-.section sev3a
-.section sev3b
-.section sev4a
-.section sev4b
-.section sev5a
-.section sev5b
-.section sev6a
-.section sev6b
-.section sev7a
-.section sev7b
-.section sev8a
-.section sev8b
-.section sev9a
-.section sev9b
-.section sev0a
-.section sev0b
-.section sewaa
-.section sewab
-.section sewba
-.section sewbb
-.section sewca
-.section sewcb
-.section sewda
-.section sewdb
-.section sewea
-.section seweb
-.section sewfa
-.section sewfb
-.section sewga
-.section sewgb
-.section sewha
-.section sewhb
-.section sewia
-.section sewib
-.section sewja
-.section sewjb
-.section sewka
-.section sewkb
-.section sewla
-.section sewlb
-.section sewma
-.section sewmb
-.section sewna
-.section sewnb
-.section sewoa
-.section sewob
-.section sewpa
-.section sewpb
-.section sewqa
-.section sewqb
-.section sewra
-.section sewrb
-.section sewsa
-.section sewsb
-.section sewta
-.section sewtb
-.section sewua
-.section sewub
-.section sewva
-.section sewvb
-.section sewwa
-.section sewwb
-.section sewxa
-.section sewxb
-.section sewya
-.section sewyb
-.section sewza
-.section sewzb
-.section sew1a
-.section sew1b
-.section sew2a
-.section sew2b
-.section sew3a
-.section sew3b
-.section sew4a
-.section sew4b
-.section sew5a
-.section sew5b
-.section sew6a
-.section sew6b
-.section sew7a
-.section sew7b
-.section sew8a
-.section sew8b
-.section sew9a
-.section sew9b
-.section sew0a
-.section sew0b
-.section sexaa
-.section sexab
-.section sexba
-.section sexbb
-.section sexca
-.section sexcb
-.section sexda
-.section sexdb
-.section sexea
-.section sexeb
-.section sexfa
-.section sexfb
-.section sexga
-.section sexgb
-.section sexha
-.section sexhb
-.section sexia
-.section sexib
-.section sexja
-.section sexjb
-.section sexka
-.section sexkb
-.section sexla
-.section sexlb
-.section sexma
-.section sexmb
-.section sexna
-.section sexnb
-.section sexoa
-.section sexob
-.section sexpa
-.section sexpb
-.section sexqa
-.section sexqb
-.section sexra
-.section sexrb
-.section sexsa
-.section sexsb
-.section sexta
-.section sextb
-.section sexua
-.section sexub
-.section sexva
-.section sexvb
-.section sexwa
-.section sexwb
-.section sexxa
-.section sexxb
-.section sexya
-.section sexyb
-.section sexza
-.section sexzb
-.section sex1a
-.section sex1b
-.section sex2a
-.section sex2b
-.section sex3a
-.section sex3b
-.section sex4a
-.section sex4b
-.section sex5a
-.section sex5b
-.section sex6a
-.section sex6b
-.section sex7a
-.section sex7b
-.section sex8a
-.section sex8b
-.section sex9a
-.section sex9b
-.section sex0a
-.section sex0b
-.section seyaa
-.section seyab
-.section seyba
-.section seybb
-.section seyca
-.section seycb
-.section seyda
-.section seydb
-.section seyea
-.section seyeb
-.section seyfa
-.section seyfb
-.section seyga
-.section seygb
-.section seyha
-.section seyhb
-.section seyia
-.section seyib
-.section seyja
-.section seyjb
-.section seyka
-.section seykb
-.section seyla
-.section seylb
-.section seyma
-.section seymb
-.section seyna
-.section seynb
-.section seyoa
-.section seyob
-.section seypa
-.section seypb
-.section seyqa
-.section seyqb
-.section seyra
-.section seyrb
-.section seysa
-.section seysb
-.section seyta
-.section seytb
-.section seyua
-.section seyub
-.section seyva
-.section seyvb
-.section seywa
-.section seywb
-.section seyxa
-.section seyxb
-.section seyya
-.section seyyb
-.section seyza
-.section seyzb
-.section sey1a
-.section sey1b
-.section sey2a
-.section sey2b
-.section sey3a
-.section sey3b
-.section sey4a
-.section sey4b
-.section sey5a
-.section sey5b
-.section sey6a
-.section sey6b
-.section sey7a
-.section sey7b
-.section sey8a
-.section sey8b
-.section sey9a
-.section sey9b
-.section sey0a
-.section sey0b
-.section sezaa
-.section sezab
-.section sezba
-.section sezbb
-.section sezca
-.section sezcb
-.section sezda
-.section sezdb
-.section sezea
-.section sezeb
-.section sezfa
-.section sezfb
-.section sezga
-.section sezgb
-.section sezha
-.section sezhb
-.section sezia
-.section sezib
-.section sezja
-.section sezjb
-.section sezka
-.section sezkb
-.section sezla
-.section sezlb
-.section sezma
-.section sezmb
-.section sezna
-.section seznb
-.section sezoa
-.section sezob
-.section sezpa
-.section sezpb
-.section sezqa
-.section sezqb
-.section sezra
-.section sezrb
-.section sezsa
-.section sezsb
-.section sezta
-.section seztb
-.section sezua
-.section sezub
-.section sezva
-.section sezvb
-.section sezwa
-.section sezwb
-.section sezxa
-.section sezxb
-.section sezya
-.section sezyb
-.section sezza
-.section sezzb
-.section sez1a
-.section sez1b
-.section sez2a
-.section sez2b
-.section sez3a
-.section sez3b
-.section sez4a
-.section sez4b
-.section sez5a
-.section sez5b
-.section sez6a
-.section sez6b
-.section sez7a
-.section sez7b
-.section sez8a
-.section sez8b
-.section sez9a
-.section sez9b
-.section sez0a
-.section sez0b
-.section se1aa
-.section se1ab
-.section se1ba
-.section se1bb
-.section se1ca
-.section se1cb
-.section se1da
-.section se1db
-.section se1ea
-.section se1eb
-.section se1fa
-.section se1fb
-.section se1ga
-.section se1gb
-.section se1ha
-.section se1hb
-.section se1ia
-.section se1ib
-.section se1ja
-.section se1jb
-.section se1ka
-.section se1kb
-.section se1la
-.section se1lb
-.section se1ma
-.section se1mb
-.section se1na
-.section se1nb
-.section se1oa
-.section se1ob
-.section se1pa
-.section se1pb
-.section se1qa
-.section se1qb
-.section se1ra
-.section se1rb
-.section se1sa
-.section se1sb
-.section se1ta
-.section se1tb
-.section se1ua
-.section se1ub
-.section se1va
-.section se1vb
-.section se1wa
-.section se1wb
-.section se1xa
-.section se1xb
-.section se1ya
-.section se1yb
-.section se1za
-.section se1zb
-.section se11a
-.section se11b
-.section se12a
-.section se12b
-.section se13a
-.section se13b
-.section se14a
-.section se14b
-.section se15a
-.section se15b
-.section se16a
-.section se16b
-.section se17a
-.section se17b
-.section se18a
-.section se18b
-.section se19a
-.section se19b
-.section se10a
-.section se10b
-.section se2aa
-.section se2ab
-.section se2ba
-.section se2bb
-.section se2ca
-.section se2cb
-.section se2da
-.section se2db
-.section se2ea
-.section se2eb
-.section se2fa
-.section se2fb
-.section se2ga
-.section se2gb
-.section se2ha
-.section se2hb
-.section se2ia
-.section se2ib
-.section se2ja
-.section se2jb
-.section se2ka
-.section se2kb
-.section se2la
-.section se2lb
-.section se2ma
-.section se2mb
-.section se2na
-.section se2nb
-.section se2oa
-.section se2ob
-.section se2pa
-.section se2pb
-.section se2qa
-.section se2qb
-.section se2ra
-.section se2rb
-.section se2sa
-.section se2sb
-.section se2ta
-.section se2tb
-.section se2ua
-.section se2ub
-.section se2va
-.section se2vb
-.section se2wa
-.section se2wb
-.section se2xa
-.section se2xb
-.section se2ya
-.section se2yb
-.section se2za
-.section se2zb
-.section se21a
-.section se21b
-.section se22a
-.section se22b
-.section se23a
-.section se23b
-.section se24a
-.section se24b
-.section se25a
-.section se25b
-.section se26a
-.section se26b
-.section se27a
-.section se27b
-.section se28a
-.section se28b
-.section se29a
-.section se29b
-.section se20a
-.section se20b
-.section se3aa
-.section se3ab
-.section se3ba
-.section se3bb
-.section se3ca
-.section se3cb
-.section se3da
-.section se3db
-.section se3ea
-.section se3eb
-.section se3fa
-.section se3fb
-.section se3ga
-.section se3gb
-.section se3ha
-.section se3hb
-.section se3ia
-.section se3ib
-.section se3ja
-.section se3jb
-.section se3ka
-.section se3kb
-.section se3la
-.section se3lb
-.section se3ma
-.section se3mb
-.section se3na
-.section se3nb
-.section se3oa
-.section se3ob
-.section se3pa
-.section se3pb
-.section se3qa
-.section se3qb
-.section se3ra
-.section se3rb
-.section se3sa
-.section se3sb
-.section se3ta
-.section se3tb
-.section se3ua
-.section se3ub
-.section se3va
-.section se3vb
-.section se3wa
-.section se3wb
-.section se3xa
-.section se3xb
-.section se3ya
-.section se3yb
-.section se3za
-.section se3zb
-.section se31a
-.section se31b
-.section se32a
-.section se32b
-.section se33a
-.section se33b
-.section se34a
-.section se34b
-.section se35a
-.section se35b
-.section se36a
-.section se36b
-.section se37a
-.section se37b
-.section se38a
-.section se38b
-.section se39a
-.section se39b
-.section se30a
-.section se30b
-.section se4aa
-.section se4ab
-.section se4ba
-.section se4bb
-.section se4ca
-.section se4cb
-.section se4da
-.section se4db
-.section se4ea
-.section se4eb
-.section se4fa
-.section se4fb
-.section se4ga
-.section se4gb
-.section se4ha
-.section se4hb
-.section se4ia
-.section se4ib
-.section se4ja
-.section se4jb
-.section se4ka
-.section se4kb
-.section se4la
-.section se4lb
-.section se4ma
-.section se4mb
-.section se4na
-.section se4nb
-.section se4oa
-.section se4ob
-.section se4pa
-.section se4pb
-.section se4qa
-.section se4qb
-.section se4ra
-.section se4rb
-.section se4sa
-.section se4sb
-.section se4ta
-.section se4tb
-.section se4ua
-.section se4ub
-.section se4va
-.section se4vb
-.section se4wa
-.section se4wb
-.section se4xa
-.section se4xb
-.section se4ya
-.section se4yb
-.section se4za
-.section se4zb
-.section se41a
-.section se41b
-.section se42a
-.section se42b
-.section se43a
-.section se43b
-.section se44a
-.section se44b
-.section se45a
-.section se45b
-.section se46a
-.section se46b
-.section se47a
-.section se47b
-.section se48a
-.section se48b
-.section se49a
-.section se49b
-.section se40a
-.section se40b
-.section se5aa
-.section se5ab
-.section se5ba
-.section se5bb
-.section se5ca
-.section se5cb
-.section se5da
-.section se5db
-.section se5ea
-.section se5eb
-.section se5fa
-.section se5fb
-.section se5ga
-.section se5gb
-.section se5ha
-.section se5hb
-.section se5ia
-.section se5ib
-.section se5ja
-.section se5jb
-.section se5ka
-.section se5kb
-.section se5la
-.section se5lb
-.section se5ma
-.section se5mb
-.section se5na
-.section se5nb
-.section se5oa
-.section se5ob
-.section se5pa
-.section se5pb
-.section se5qa
-.section se5qb
-.section se5ra
-.section se5rb
-.section se5sa
-.section se5sb
-.section se5ta
-.section se5tb
-.section se5ua
-.section se5ub
-.section se5va
-.section se5vb
-.section se5wa
-.section se5wb
-.section se5xa
-.section se5xb
-.section se5ya
-.section se5yb
-.section se5za
-.section se5zb
-.section se51a
-.section se51b
-.section se52a
-.section se52b
-.section se53a
-.section se53b
-.section se54a
-.section se54b
-.section se55a
-.section se55b
-.section se56a
-.section se56b
-.section se57a
-.section se57b
-.section se58a
-.section se58b
-.section se59a
-.section se59b
-.section se50a
-.section se50b
-.section se6aa
-.section se6ab
-.section se6ba
-.section se6bb
-.section se6ca
-.section se6cb
-.section se6da
-.section se6db
-.section se6ea
-.section se6eb
-.section se6fa
-.section se6fb
-.section se6ga
-.section se6gb
-.section se6ha
-.section se6hb
-.section se6ia
-.section se6ib
-.section se6ja
-.section se6jb
-.section se6ka
-.section se6kb
-.section se6la
-.section se6lb
-.section se6ma
-.section se6mb
-.section se6na
-.section se6nb
-.section se6oa
-.section se6ob
-.section se6pa
-.section se6pb
-.section se6qa
-.section se6qb
-.section se6ra
-.section se6rb
-.section se6sa
-.section se6sb
-.section se6ta
-.section se6tb
-.section se6ua
-.section se6ub
-.section se6va
-.section se6vb
-.section se6wa
-.section se6wb
-.section se6xa
-.section se6xb
-.section se6ya
-.section se6yb
-.section se6za
-.section se6zb
-.section se61a
-.section se61b
-.section se62a
-.section se62b
-.section se63a
-.section se63b
-.section se64a
-.section se64b
-.section se65a
-.section se65b
-.section se66a
-.section se66b
-.section se67a
-.section se67b
-.section se68a
-.section se68b
-.section se69a
-.section se69b
-.section se60a
-.section se60b
-.section se7aa
-.section se7ab
-.section se7ba
-.section se7bb
-.section se7ca
-.section se7cb
-.section se7da
-.section se7db
-.section se7ea
-.section se7eb
-.section se7fa
-.section se7fb
-.section se7ga
-.section se7gb
-.section se7ha
-.section se7hb
-.section se7ia
-.section se7ib
-.section se7ja
-.section se7jb
-.section se7ka
-.section se7kb
-.section se7la
-.section se7lb
-.section se7ma
-.section se7mb
-.section se7na
-.section se7nb
-.section se7oa
-.section se7ob
-.section se7pa
-.section se7pb
-.section se7qa
-.section se7qb
-.section se7ra
-.section se7rb
-.section se7sa
-.section se7sb
-.section se7ta
-.section se7tb
-.section se7ua
-.section se7ub
-.section se7va
-.section se7vb
-.section se7wa
-.section se7wb
-.section se7xa
-.section se7xb
-.section se7ya
-.section se7yb
-.section se7za
-.section se7zb
-.section se71a
-.section se71b
-.section se72a
-.section se72b
-.section se73a
-.section se73b
-.section se74a
-.section se74b
-.section se75a
-.section se75b
-.section se76a
-.section se76b
-.section se77a
-.section se77b
-.section se78a
-.section se78b
-.section se79a
-.section se79b
-.section se70a
-.section se70b
-.section se8aa
-.section se8ab
-.section se8ba
-.section se8bb
-.section se8ca
-.section se8cb
-.section se8da
-.section se8db
-.section se8ea
-.section se8eb
-.section se8fa
-.section se8fb
-.section se8ga
-.section se8gb
-.section se8ha
-.section se8hb
-.section se8ia
-.section se8ib
-.section se8ja
-.section se8jb
-.section se8ka
-.section se8kb
-.section se8la
-.section se8lb
-.section se8ma
-.section se8mb
-.section se8na
-.section se8nb
-.section se8oa
-.section se8ob
-.section se8pa
-.section se8pb
-.section se8qa
-.section se8qb
-.section se8ra
-.section se8rb
-.section se8sa
-.section se8sb
-.section se8ta
-.section se8tb
-.section se8ua
-.section se8ub
-.section se8va
-.section se8vb
-.section se8wa
-.section se8wb
-.section se8xa
-.section se8xb
-.section se8ya
-.section se8yb
-.section se8za
-.section se8zb
-.section se81a
-.section se81b
-.section se82a
-.section se82b
-.section se83a
-.section se83b
-.section se84a
-.section se84b
-.section se85a
-.section se85b
-.section se86a
-.section se86b
-.section se87a
-.section se87b
-.section se88a
-.section se88b
-.section se89a
-.section se89b
-.section se80a
-.section se80b
-.section se9aa
-.section se9ab
-.section se9ba
-.section se9bb
-.section se9ca
-.section se9cb
-.section se9da
-.section se9db
-.section se9ea
-.section se9eb
-.section se9fa
-.section se9fb
-.section se9ga
-.section se9gb
-.section se9ha
-.section se9hb
-.section se9ia
-.section se9ib
-.section se9ja
-.section se9jb
-.section se9ka
-.section se9kb
-.section se9la
-.section se9lb
-.section se9ma
-.section se9mb
-.section se9na
-.section se9nb
-.section se9oa
-.section se9ob
-.section se9pa
-.section se9pb
-.section se9qa
-.section se9qb
-.section se9ra
-.section se9rb
-.section se9sa
-.section se9sb
-.section se9ta
-.section se9tb
-.section se9ua
-.section se9ub
-.section se9va
-.section se9vb
-.section se9wa
-.section se9wb
-.section se9xa
-.section se9xb
-.section se9ya
-.section se9yb
-.section se9za
-.section se9zb
-.section se91a
-.section se91b
-.section se92a
-.section se92b
-.section se93a
-.section se93b
-.section se94a
-.section se94b
-.section se95a
-.section se95b
-.section se96a
-.section se96b
-.section se97a
-.section se97b
-.section se98a
-.section se98b
-.section se99a
-.section se99b
-.section se90a
-.section se90b
-.section se0aa
-.section se0ab
-.section se0ba
-.section se0bb
-.section se0ca
-.section se0cb
-.section se0da
-.section se0db
-.section se0ea
-.section se0eb
-.section se0fa
-.section se0fb
-.section se0ga
-.section se0gb
-.section se0ha
-.section se0hb
-.section se0ia
-.section se0ib
-.section se0ja
-.section se0jb
-.section se0ka
-.section se0kb
-.section se0la
-.section se0lb
-.section se0ma
-.section se0mb
-.section se0na
-.section se0nb
-.section se0oa
-.section se0ob
-.section se0pa
-.section se0pb
-.section se0qa
-.section se0qb
-.section se0ra
-.section se0rb
-.section se0sa
-.section se0sb
-.section se0ta
-.section se0tb
-.section se0ua
-.section se0ub
-.section se0va
-.section se0vb
-.section se0wa
-.section se0wb
-.section se0xa
-.section se0xb
-.section se0ya
-.section se0yb
-.section se0za
-.section se0zb
-.section se01a
-.section se01b
-.section se02a
-.section se02b
-.section se03a
-.section se03b
-.section se04a
-.section se04b
-.section se05a
-.section se05b
-.section se06a
-.section se06b
-.section se07a
-.section se07b
-.section se08a
-.section se08b
-.section se09a
-.section se09b
-.section se00a
-.section se00b
-.section sfaaa
-.section sfaab
-.section sfaba
-.section sfabb
-.section sfaca
-.section sfacb
-.section sfada
-.section sfadb
-.section sfaea
-.section sfaeb
-.section sfafa
-.section sfafb
-.section sfaga
-.section sfagb
-.section sfaha
-.section sfahb
-.section sfaia
-.section sfaib
-.section sfaja
-.section sfajb
-.section sfaka
-.section sfakb
-.section sfala
-.section sfalb
-.section sfama
-.section sfamb
-.section sfana
-.section sfanb
-.section sfaoa
-.section sfaob
-.section sfapa
-.section sfapb
-.section sfaqa
-.section sfaqb
-.section sfara
-.section sfarb
-.section sfasa
-.section sfasb
-.section sfata
-.section sfatb
-.section sfaua
-.section sfaub
-.section sfava
-.section sfavb
-.section sfawa
-.section sfawb
-.section sfaxa
-.section sfaxb
-.section sfaya
-.section sfayb
-.section sfaza
-.section sfazb
-.section sfa1a
-.section sfa1b
-.section sfa2a
-.section sfa2b
-.section sfa3a
-.section sfa3b
-.section sfa4a
-.section sfa4b
-.section sfa5a
-.section sfa5b
-.section sfa6a
-.section sfa6b
-.section sfa7a
-.section sfa7b
-.section sfa8a
-.section sfa8b
-.section sfa9a
-.section sfa9b
-.section sfa0a
-.section sfa0b
-.section sfbaa
-.section sfbab
-.section sfbba
-.section sfbbb
-.section sfbca
-.section sfbcb
-.section sfbda
-.section sfbdb
-.section sfbea
-.section sfbeb
-.section sfbfa
-.section sfbfb
-.section sfbga
-.section sfbgb
-.section sfbha
-.section sfbhb
-.section sfbia
-.section sfbib
-.section sfbja
-.section sfbjb
-.section sfbka
-.section sfbkb
-.section sfbla
-.section sfblb
-.section sfbma
-.section sfbmb
-.section sfbna
-.section sfbnb
-.section sfboa
-.section sfbob
-.section sfbpa
-.section sfbpb
-.section sfbqa
-.section sfbqb
-.section sfbra
-.section sfbrb
-.section sfbsa
-.section sfbsb
-.section sfbta
-.section sfbtb
-.section sfbua
-.section sfbub
-.section sfbva
-.section sfbvb
-.section sfbwa
-.section sfbwb
-.section sfbxa
-.section sfbxb
-.section sfbya
-.section sfbyb
-.section sfbza
-.section sfbzb
-.section sfb1a
-.section sfb1b
-.section sfb2a
-.section sfb2b
-.section sfb3a
-.section sfb3b
-.section sfb4a
-.section sfb4b
-.section sfb5a
-.section sfb5b
-.section sfb6a
-.section sfb6b
-.section sfb7a
-.section sfb7b
-.section sfb8a
-.section sfb8b
-.section sfb9a
-.section sfb9b
-.section sfb0a
-.section sfb0b
-.section sfcaa
-.section sfcab
-.section sfcba
-.section sfcbb
-.section sfcca
-.section sfccb
-.section sfcda
-.section sfcdb
-.section sfcea
-.section sfceb
-.section sfcfa
-.section sfcfb
-.section sfcga
-.section sfcgb
-.section sfcha
-.section sfchb
-.section sfcia
-.section sfcib
-.section sfcja
-.section sfcjb
-.section sfcka
-.section sfckb
-.section sfcla
-.section sfclb
-.section sfcma
-.section sfcmb
-.section sfcna
-.section sfcnb
-.section sfcoa
-.section sfcob
-.section sfcpa
-.section sfcpb
-.section sfcqa
-.section sfcqb
-.section sfcra
-.section sfcrb
-.section sfcsa
-.section sfcsb
-.section sfcta
-.section sfctb
-.section sfcua
-.section sfcub
-.section sfcva
-.section sfcvb
-.section sfcwa
-.section sfcwb
-.section sfcxa
-.section sfcxb
-.section sfcya
-.section sfcyb
-.section sfcza
-.section sfczb
-.section sfc1a
-.section sfc1b
-.section sfc2a
-.section sfc2b
-.section sfc3a
-.section sfc3b
-.section sfc4a
-.section sfc4b
-.section sfc5a
-.section sfc5b
-.section sfc6a
-.section sfc6b
-.section sfc7a
-.section sfc7b
-.section sfc8a
-.section sfc8b
-.section sfc9a
-.section sfc9b
-.section sfc0a
-.section sfc0b
-.section sfdaa
-.section sfdab
-.section sfdba
-.section sfdbb
-.section sfdca
-.section sfdcb
-.section sfdda
-.section sfddb
-.section sfdea
-.section sfdeb
-.section sfdfa
-.section sfdfb
-.section sfdga
-.section sfdgb
-.section sfdha
-.section sfdhb
-.section sfdia
-.section sfdib
-.section sfdja
-.section sfdjb
-.section sfdka
-.section sfdkb
-.section sfdla
-.section sfdlb
-.section sfdma
-.section sfdmb
-.section sfdna
-.section sfdnb
-.section sfdoa
-.section sfdob
-.section sfdpa
-.section sfdpb
-.section sfdqa
-.section sfdqb
-.section sfdra
-.section sfdrb
-.section sfdsa
-.section sfdsb
-.section sfdta
-.section sfdtb
-.section sfdua
-.section sfdub
-.section sfdva
-.section sfdvb
-.section sfdwa
-.section sfdwb
-.section sfdxa
-.section sfdxb
-.section sfdya
-.section sfdyb
-.section sfdza
-.section sfdzb
-.section sfd1a
-.section sfd1b
-.section sfd2a
-.section sfd2b
-.section sfd3a
-.section sfd3b
-.section sfd4a
-.section sfd4b
-.section sfd5a
-.section sfd5b
-.section sfd6a
-.section sfd6b
-.section sfd7a
-.section sfd7b
-.section sfd8a
-.section sfd8b
-.section sfd9a
-.section sfd9b
-.section sfd0a
-.section sfd0b
-.section sfeaa
-.section sfeab
-.section sfeba
-.section sfebb
-.section sfeca
-.section sfecb
-.section sfeda
-.section sfedb
-.section sfeea
-.section sfeeb
-.section sfefa
-.section sfefb
-.section sfega
-.section sfegb
-.section sfeha
-.section sfehb
-.section sfeia
-.section sfeib
-.section sfeja
-.section sfejb
-.section sfeka
-.section sfekb
-.section sfela
-.section sfelb
-.section sfema
-.section sfemb
-.section sfena
-.section sfenb
-.section sfeoa
-.section sfeob
-.section sfepa
-.section sfepb
-.section sfeqa
-.section sfeqb
-.section sfera
-.section sferb
-.section sfesa
-.section sfesb
-.section sfeta
-.section sfetb
-.section sfeua
-.section sfeub
-.section sfeva
-.section sfevb
-.section sfewa
-.section sfewb
-.section sfexa
-.section sfexb
-.section sfeya
-.section sfeyb
-.section sfeza
-.section sfezb
-.section sfe1a
-.section sfe1b
-.section sfe2a
-.section sfe2b
-.section sfe3a
-.section sfe3b
-.section sfe4a
-.section sfe4b
-.section sfe5a
-.section sfe5b
-.section sfe6a
-.section sfe6b
-.section sfe7a
-.section sfe7b
-.section sfe8a
-.section sfe8b
-.section sfe9a
-.section sfe9b
-.section sfe0a
-.section sfe0b
-.section sffaa
-.section sffab
-.section sffba
-.section sffbb
-.section sffca
-.section sffcb
-.section sffda
-.section sffdb
-.section sffea
-.section sffeb
-.section sfffa
-.section sfffb
-.section sffga
-.section sffgb
-.section sffha
-.section sffhb
-.section sffia
-.section sffib
-.section sffja
-.section sffjb
-.section sffka
-.section sffkb
-.section sffla
-.section sfflb
-.section sffma
-.section sffmb
-.section sffna
-.section sffnb
-.section sffoa
-.section sffob
-.section sffpa
-.section sffpb
-.section sffqa
-.section sffqb
-.section sffra
-.section sffrb
-.section sffsa
-.section sffsb
-.section sffta
-.section sfftb
-.section sffua
-.section sffub
-.section sffva
-.section sffvb
-.section sffwa
-.section sffwb
-.section sffxa
-.section sffxb
-.section sffya
-.section sffyb
-.section sffza
-.section sffzb
-.section sff1a
-.section sff1b
-.section sff2a
-.section sff2b
-.section sff3a
-.section sff3b
-.section sff4a
-.section sff4b
-.section sff5a
-.section sff5b
-.section sff6a
-.section sff6b
-.section sff7a
-.section sff7b
-.section sff8a
-.section sff8b
-.section sff9a
-.section sff9b
-.section sff0a
-.section sff0b
-.section sfgaa
-.section sfgab
-.section sfgba
-.section sfgbb
-.section sfgca
-.section sfgcb
-.section sfgda
-.section sfgdb
-.section sfgea
-.section sfgeb
-.section sfgfa
-.section sfgfb
-.section sfgga
-.section sfggb
-.section sfgha
-.section sfghb
-.section sfgia
-.section sfgib
-.section sfgja
-.section sfgjb
-.section sfgka
-.section sfgkb
-.section sfgla
-.section sfglb
-.section sfgma
-.section sfgmb
-.section sfgna
-.section sfgnb
-.section sfgoa
-.section sfgob
-.section sfgpa
-.section sfgpb
-.section sfgqa
-.section sfgqb
-.section sfgra
-.section sfgrb
-.section sfgsa
-.section sfgsb
-.section sfgta
-.section sfgtb
-.section sfgua
-.section sfgub
-.section sfgva
-.section sfgvb
-.section sfgwa
-.section sfgwb
-.section sfgxa
-.section sfgxb
-.section sfgya
-.section sfgyb
-.section sfgza
-.section sfgzb
-.section sfg1a
-.section sfg1b
-.section sfg2a
-.section sfg2b
-.section sfg3a
-.section sfg3b
-.section sfg4a
-.section sfg4b
-.section sfg5a
-.section sfg5b
-.section sfg6a
-.section sfg6b
-.section sfg7a
-.section sfg7b
-.section sfg8a
-.section sfg8b
-.section sfg9a
-.section sfg9b
-.section sfg0a
-.section sfg0b
-.section sfhaa
-.section sfhab
-.section sfhba
-.section sfhbb
-.section sfhca
-.section sfhcb
-.section sfhda
-.section sfhdb
-.section sfhea
-.section sfheb
-.section sfhfa
-.section sfhfb
-.section sfhga
-.section sfhgb
-.section sfhha
-.section sfhhb
-.section sfhia
-.section sfhib
-.section sfhja
-.section sfhjb
-.section sfhka
-.section sfhkb
-.section sfhla
-.section sfhlb
-.section sfhma
-.section sfhmb
-.section sfhna
-.section sfhnb
-.section sfhoa
-.section sfhob
-.section sfhpa
-.section sfhpb
-.section sfhqa
-.section sfhqb
-.section sfhra
-.section sfhrb
-.section sfhsa
-.section sfhsb
-.section sfhta
-.section sfhtb
-.section sfhua
-.section sfhub
-.section sfhva
-.section sfhvb
-.section sfhwa
-.section sfhwb
-.section sfhxa
-.section sfhxb
-.section sfhya
-.section sfhyb
-.section sfhza
-.section sfhzb
-.section sfh1a
-.section sfh1b
-.section sfh2a
-.section sfh2b
-.section sfh3a
-.section sfh3b
-.section sfh4a
-.section sfh4b
-.section sfh5a
-.section sfh5b
-.section sfh6a
-.section sfh6b
-.section sfh7a
-.section sfh7b
-.section sfh8a
-.section sfh8b
-.section sfh9a
-.section sfh9b
-.section sfh0a
-.section sfh0b
-.section sfiaa
-.section sfiab
-.section sfiba
-.section sfibb
-.section sfica
-.section sficb
-.section sfida
-.section sfidb
-.section sfiea
-.section sfieb
-.section sfifa
-.section sfifb
-.section sfiga
-.section sfigb
-.section sfiha
-.section sfihb
-.section sfiia
-.section sfiib
-.section sfija
-.section sfijb
-.section sfika
-.section sfikb
-.section sfila
-.section sfilb
-.section sfima
-.section sfimb
-.section sfina
-.section sfinb
-.section sfioa
-.section sfiob
-.section sfipa
-.section sfipb
-.section sfiqa
-.section sfiqb
-.section sfira
-.section sfirb
-.section sfisa
-.section sfisb
-.section sfita
-.section sfitb
-.section sfiua
-.section sfiub
-.section sfiva
-.section sfivb
-.section sfiwa
-.section sfiwb
-.section sfixa
-.section sfixb
-.section sfiya
-.section sfiyb
-.section sfiza
-.section sfizb
-.section sfi1a
-.section sfi1b
-.section sfi2a
-.section sfi2b
-.section sfi3a
-.section sfi3b
-.section sfi4a
-.section sfi4b
-.section sfi5a
-.section sfi5b
-.section sfi6a
-.section sfi6b
-.section sfi7a
-.section sfi7b
-.section sfi8a
-.section sfi8b
-.section sfi9a
-.section sfi9b
-.section sfi0a
-.section sfi0b
-.section sfjaa
-.section sfjab
-.section sfjba
-.section sfjbb
-.section sfjca
-.section sfjcb
-.section sfjda
-.section sfjdb
-.section sfjea
-.section sfjeb
-.section sfjfa
-.section sfjfb
-.section sfjga
-.section sfjgb
-.section sfjha
-.section sfjhb
-.section sfjia
-.section sfjib
-.section sfjja
-.section sfjjb
-.section sfjka
-.section sfjkb
-.section sfjla
-.section sfjlb
-.section sfjma
-.section sfjmb
-.section sfjna
-.section sfjnb
-.section sfjoa
-.section sfjob
-.section sfjpa
-.section sfjpb
-.section sfjqa
-.section sfjqb
-.section sfjra
-.section sfjrb
-.section sfjsa
-.section sfjsb
-.section sfjta
-.section sfjtb
-.section sfjua
-.section sfjub
-.section sfjva
-.section sfjvb
-.section sfjwa
-.section sfjwb
-.section sfjxa
-.section sfjxb
-.section sfjya
-.section sfjyb
-.section sfjza
-.section sfjzb
-.section sfj1a
-.section sfj1b
-.section sfj2a
-.section sfj2b
-.section sfj3a
-.section sfj3b
-.section sfj4a
-.section sfj4b
-.section sfj5a
-.section sfj5b
-.section sfj6a
-.section sfj6b
-.section sfj7a
-.section sfj7b
-.section sfj8a
-.section sfj8b
-.section sfj9a
-.section sfj9b
-.section sfj0a
-.section sfj0b
-.section sfkaa
-.section sfkab
-.section sfkba
-.section sfkbb
-.section sfkca
-.section sfkcb
-.section sfkda
-.section sfkdb
-.section sfkea
-.section sfkeb
-.section sfkfa
-.section sfkfb
-.section sfkga
-.section sfkgb
-.section sfkha
-.section sfkhb
-.section sfkia
-.section sfkib
-.section sfkja
-.section sfkjb
-.section sfkka
-.section sfkkb
-.section sfkla
-.section sfklb
-.section sfkma
-.section sfkmb
-.section sfkna
-.section sfknb
-.section sfkoa
-.section sfkob
-.section sfkpa
-.section sfkpb
-.section sfkqa
-.section sfkqb
-.section sfkra
-.section sfkrb
-.section sfksa
-.section sfksb
-.section sfkta
-.section sfktb
-.section sfkua
-.section sfkub
-.section sfkva
-.section sfkvb
-.section sfkwa
-.section sfkwb
-.section sfkxa
-.section sfkxb
-.section sfkya
-.section sfkyb
-.section sfkza
-.section sfkzb
-.section sfk1a
-.section sfk1b
-.section sfk2a
-.section sfk2b
-.section sfk3a
-.section sfk3b
-.section sfk4a
-.section sfk4b
-.section sfk5a
-.section sfk5b
-.section sfk6a
-.section sfk6b
-.section sfk7a
-.section sfk7b
-.section sfk8a
-.section sfk8b
-.section sfk9a
-.section sfk9b
-.section sfk0a
-.section sfk0b
-.section sflaa
-.section sflab
-.section sflba
-.section sflbb
-.section sflca
-.section sflcb
-.section sflda
-.section sfldb
-.section sflea
-.section sfleb
-.section sflfa
-.section sflfb
-.section sflga
-.section sflgb
-.section sflha
-.section sflhb
-.section sflia
-.section sflib
-.section sflja
-.section sfljb
-.section sflka
-.section sflkb
-.section sflla
-.section sfllb
-.section sflma
-.section sflmb
-.section sflna
-.section sflnb
-.section sfloa
-.section sflob
-.section sflpa
-.section sflpb
-.section sflqa
-.section sflqb
-.section sflra
-.section sflrb
-.section sflsa
-.section sflsb
-.section sflta
-.section sfltb
-.section sflua
-.section sflub
-.section sflva
-.section sflvb
-.section sflwa
-.section sflwb
-.section sflxa
-.section sflxb
-.section sflya
-.section sflyb
-.section sflza
-.section sflzb
-.section sfl1a
-.section sfl1b
-.section sfl2a
-.section sfl2b
-.section sfl3a
-.section sfl3b
-.section sfl4a
-.section sfl4b
-.section sfl5a
-.section sfl5b
-.section sfl6a
-.section sfl6b
-.section sfl7a
-.section sfl7b
-.section sfl8a
-.section sfl8b
-.section sfl9a
-.section sfl9b
-.section sfl0a
-.section sfl0b
-.section sfmaa
-.section sfmab
-.section sfmba
-.section sfmbb
-.section sfmca
-.section sfmcb
-.section sfmda
-.section sfmdb
-.section sfmea
-.section sfmeb
-.section sfmfa
-.section sfmfb
-.section sfmga
-.section sfmgb
-.section sfmha
-.section sfmhb
-.section sfmia
-.section sfmib
-.section sfmja
-.section sfmjb
-.section sfmka
-.section sfmkb
-.section sfmla
-.section sfmlb
-.section sfmma
-.section sfmmb
-.section sfmna
-.section sfmnb
-.section sfmoa
-.section sfmob
-.section sfmpa
-.section sfmpb
-.section sfmqa
-.section sfmqb
-.section sfmra
-.section sfmrb
-.section sfmsa
-.section sfmsb
-.section sfmta
-.section sfmtb
-.section sfmua
-.section sfmub
-.section sfmva
-.section sfmvb
-.section sfmwa
-.section sfmwb
-.section sfmxa
-.section sfmxb
-.section sfmya
-.section sfmyb
-.section sfmza
-.section sfmzb
-.section sfm1a
-.section sfm1b
-.section sfm2a
-.section sfm2b
-.section sfm3a
-.section sfm3b
-.section sfm4a
-.section sfm4b
-.section sfm5a
-.section sfm5b
-.section sfm6a
-.section sfm6b
-.section sfm7a
-.section sfm7b
-.section sfm8a
-.section sfm8b
-.section sfm9a
-.section sfm9b
-.section sfm0a
-.section sfm0b
-.section sfnaa
-.section sfnab
-.section sfnba
-.section sfnbb
-.section sfnca
-.section sfncb
-.section sfnda
-.section sfndb
-.section sfnea
-.section sfneb
-.section sfnfa
-.section sfnfb
-.section sfnga
-.section sfngb
-.section sfnha
-.section sfnhb
-.section sfnia
-.section sfnib
-.section sfnja
-.section sfnjb
-.section sfnka
-.section sfnkb
-.section sfnla
-.section sfnlb
-.section sfnma
-.section sfnmb
-.section sfnna
-.section sfnnb
-.section sfnoa
-.section sfnob
-.section sfnpa
-.section sfnpb
-.section sfnqa
-.section sfnqb
-.section sfnra
-.section sfnrb
-.section sfnsa
-.section sfnsb
-.section sfnta
-.section sfntb
-.section sfnua
-.section sfnub
-.section sfnva
-.section sfnvb
-.section sfnwa
-.section sfnwb
-.section sfnxa
-.section sfnxb
-.section sfnya
-.section sfnyb
-.section sfnza
-.section sfnzb
-.section sfn1a
-.section sfn1b
-.section sfn2a
-.section sfn2b
-.section sfn3a
-.section sfn3b
-.section sfn4a
-.section sfn4b
-.section sfn5a
-.section sfn5b
-.section sfn6a
-.section sfn6b
-.section sfn7a
-.section sfn7b
-.section sfn8a
-.section sfn8b
-.section sfn9a
-.section sfn9b
-.section sfn0a
-.section sfn0b
-.section sfoaa
-.section sfoab
-.section sfoba
-.section sfobb
-.section sfoca
-.section sfocb
-.section sfoda
-.section sfodb
-.section sfoea
-.section sfoeb
-.section sfofa
-.section sfofb
-.section sfoga
-.section sfogb
-.section sfoha
-.section sfohb
-.section sfoia
-.section sfoib
-.section sfoja
-.section sfojb
-.section sfoka
-.section sfokb
-.section sfola
-.section sfolb
-.section sfoma
-.section sfomb
-.section sfona
-.section sfonb
-.section sfooa
-.section sfoob
-.section sfopa
-.section sfopb
-.section sfoqa
-.section sfoqb
-.section sfora
-.section sforb
-.section sfosa
-.section sfosb
-.section sfota
-.section sfotb
-.section sfoua
-.section sfoub
-.section sfova
-.section sfovb
-.section sfowa
-.section sfowb
-.section sfoxa
-.section sfoxb
-.section sfoya
-.section sfoyb
-.section sfoza
-.section sfozb
-.section sfo1a
-.section sfo1b
-.section sfo2a
-.section sfo2b
-.section sfo3a
-.section sfo3b
-.section sfo4a
-.section sfo4b
-.section sfo5a
-.section sfo5b
-.section sfo6a
-.section sfo6b
-.section sfo7a
-.section sfo7b
-.section sfo8a
-.section sfo8b
-.section sfo9a
-.section sfo9b
-.section sfo0a
-.section sfo0b
-.section sfpaa
-.section sfpab
-.section sfpba
-.section sfpbb
-.section sfpca
-.section sfpcb
-.section sfpda
-.section sfpdb
-.section sfpea
-.section sfpeb
-.section sfpfa
-.section sfpfb
-.section sfpga
-.section sfpgb
-.section sfpha
-.section sfphb
-.section sfpia
-.section sfpib
-.section sfpja
-.section sfpjb
-.section sfpka
-.section sfpkb
-.section sfpla
-.section sfplb
-.section sfpma
-.section sfpmb
-.section sfpna
-.section sfpnb
-.section sfpoa
-.section sfpob
-.section sfppa
-.section sfppb
-.section sfpqa
-.section sfpqb
-.section sfpra
-.section sfprb
-.section sfpsa
-.section sfpsb
-.section sfpta
-.section sfptb
-.section sfpua
-.section sfpub
-.section sfpva
-.section sfpvb
-.section sfpwa
-.section sfpwb
-.section sfpxa
-.section sfpxb
-.section sfpya
-.section sfpyb
-.section sfpza
-.section sfpzb
-.section sfp1a
-.section sfp1b
-.section sfp2a
-.section sfp2b
-.section sfp3a
-.section sfp3b
-.section sfp4a
-.section sfp4b
-.section sfp5a
-.section sfp5b
-.section sfp6a
-.section sfp6b
-.section sfp7a
-.section sfp7b
-.section sfp8a
-.section sfp8b
-.section sfp9a
-.section sfp9b
-.section sfp0a
-.section sfp0b
-.section sfqaa
-.section sfqab
-.section sfqba
-.section sfqbb
-.section sfqca
-.section sfqcb
-.section sfqda
-.section sfqdb
-.section sfqea
-.section sfqeb
-.section sfqfa
-.section sfqfb
-.section sfqga
-.section sfqgb
-.section sfqha
-.section sfqhb
-.section sfqia
-.section sfqib
-.section sfqja
-.section sfqjb
-.section sfqka
-.section sfqkb
-.section sfqla
-.section sfqlb
-.section sfqma
-.section sfqmb
-.section sfqna
-.section sfqnb
-.section sfqoa
-.section sfqob
-.section sfqpa
-.section sfqpb
-.section sfqqa
-.section sfqqb
-.section sfqra
-.section sfqrb
-.section sfqsa
-.section sfqsb
-.section sfqta
-.section sfqtb
-.section sfqua
-.section sfqub
-.section sfqva
-.section sfqvb
-.section sfqwa
-.section sfqwb
-.section sfqxa
-.section sfqxb
-.section sfqya
-.section sfqyb
-.section sfqza
-.section sfqzb
-.section sfq1a
-.section sfq1b
-.section sfq2a
-.section sfq2b
-.section sfq3a
-.section sfq3b
-.section sfq4a
-.section sfq4b
-.section sfq5a
-.section sfq5b
-.section sfq6a
-.section sfq6b
-.section sfq7a
-.section sfq7b
-.section sfq8a
-.section sfq8b
-.section sfq9a
-.section sfq9b
-.section sfq0a
-.section sfq0b
-.section sfraa
-.section sfrab
-.section sfrba
-.section sfrbb
-.section sfrca
-.section sfrcb
-.section sfrda
-.section sfrdb
-.section sfrea
-.section sfreb
-.section sfrfa
-.section sfrfb
-.section sfrga
-.section sfrgb
-.section sfrha
-.section sfrhb
-.section sfria
-.section sfrib
-.section sfrja
-.section sfrjb
-.section sfrka
-.section sfrkb
-.section sfrla
-.section sfrlb
-.section sfrma
-.section sfrmb
-.section sfrna
-.section sfrnb
-.section sfroa
-.section sfrob
-.section sfrpa
-.section sfrpb
-.section sfrqa
-.section sfrqb
-.section sfrra
-.section sfrrb
-.section sfrsa
-.section sfrsb
-.section sfrta
-.section sfrtb
-.section sfrua
-.section sfrub
-.section sfrva
-.section sfrvb
-.section sfrwa
-.section sfrwb
-.section sfrxa
-.section sfrxb
-.section sfrya
-.section sfryb
-.section sfrza
-.section sfrzb
-.section sfr1a
-.section sfr1b
-.section sfr2a
-.section sfr2b
-.section sfr3a
-.section sfr3b
-.section sfr4a
-.section sfr4b
-.section sfr5a
-.section sfr5b
-.section sfr6a
-.section sfr6b
-.section sfr7a
-.section sfr7b
-.section sfr8a
-.section sfr8b
-.section sfr9a
-.section sfr9b
-.section sfr0a
-.section sfr0b
-.section sfsaa
-.section sfsab
-.section sfsba
-.section sfsbb
-.section sfsca
-.section sfscb
-.section sfsda
-.section sfsdb
-.section sfsea
-.section sfseb
-.section sfsfa
-.section sfsfb
-.section sfsga
-.section sfsgb
-.section sfsha
-.section sfshb
-.section sfsia
-.section sfsib
-.section sfsja
-.section sfsjb
-.section sfska
-.section sfskb
-.section sfsla
-.section sfslb
-.section sfsma
-.section sfsmb
-.section sfsna
-.section sfsnb
-.section sfsoa
-.section sfsob
-.section sfspa
-.section sfspb
-.section sfsqa
-.section sfsqb
-.section sfsra
-.section sfsrb
-.section sfssa
-.section sfssb
-.section sfsta
-.section sfstb
-.section sfsua
-.section sfsub
-.section sfsva
-.section sfsvb
-.section sfswa
-.section sfswb
-.section sfsxa
-.section sfsxb
-.section sfsya
-.section sfsyb
-.section sfsza
-.section sfszb
-.section sfs1a
-.section sfs1b
-.section sfs2a
-.section sfs2b
-.section sfs3a
-.section sfs3b
-.section sfs4a
-.section sfs4b
-.section sfs5a
-.section sfs5b
-.section sfs6a
-.section sfs6b
-.section sfs7a
-.section sfs7b
-.section sfs8a
-.section sfs8b
-.section sfs9a
-.section sfs9b
-.section sfs0a
-.section sfs0b
-.section sftaa
-.section sftab
-.section sftba
-.section sftbb
-.section sftca
-.section sftcb
-.section sftda
-.section sftdb
-.section sftea
-.section sfteb
-.section sftfa
-.section sftfb
-.section sftga
-.section sftgb
-.section sftha
-.section sfthb
-.section sftia
-.section sftib
-.section sftja
-.section sftjb
-.section sftka
-.section sftkb
-.section sftla
-.section sftlb
-.section sftma
-.section sftmb
-.section sftna
-.section sftnb
-.section sftoa
-.section sftob
-.section sftpa
-.section sftpb
-.section sftqa
-.section sftqb
-.section sftra
-.section sftrb
-.section sftsa
-.section sftsb
-.section sftta
-.section sfttb
-.section sftua
-.section sftub
-.section sftva
-.section sftvb
-.section sftwa
-.section sftwb
-.section sftxa
-.section sftxb
-.section sftya
-.section sftyb
-.section sftza
-.section sftzb
-.section sft1a
-.section sft1b
-.section sft2a
-.section sft2b
-.section sft3a
-.section sft3b
-.section sft4a
-.section sft4b
-.section sft5a
-.section sft5b
-.section sft6a
-.section sft6b
-.section sft7a
-.section sft7b
-.section sft8a
-.section sft8b
-.section sft9a
-.section sft9b
-.section sft0a
-.section sft0b
-.section sfuaa
-.section sfuab
-.section sfuba
-.section sfubb
-.section sfuca
-.section sfucb
-.section sfuda
-.section sfudb
-.section sfuea
-.section sfueb
-.section sfufa
-.section sfufb
-.section sfuga
-.section sfugb
-.section sfuha
-.section sfuhb
-.section sfuia
-.section sfuib
-.section sfuja
-.section sfujb
-.section sfuka
-.section sfukb
-.section sfula
-.section sfulb
-.section sfuma
-.section sfumb
-.section sfuna
-.section sfunb
-.section sfuoa
-.section sfuob
-.section sfupa
-.section sfupb
-.section sfuqa
-.section sfuqb
-.section sfura
-.section sfurb
-.section sfusa
-.section sfusb
-.section sfuta
-.section sfutb
-.section sfuua
-.section sfuub
-.section sfuva
-.section sfuvb
-.section sfuwa
-.section sfuwb
-.section sfuxa
-.section sfuxb
-.section sfuya
-.section sfuyb
-.section sfuza
-.section sfuzb
-.section sfu1a
-.section sfu1b
-.section sfu2a
-.section sfu2b
-.section sfu3a
-.section sfu3b
-.section sfu4a
-.section sfu4b
-.section sfu5a
-.section sfu5b
-.section sfu6a
-.section sfu6b
-.section sfu7a
-.section sfu7b
-.section sfu8a
-.section sfu8b
-.section sfu9a
-.section sfu9b
-.section sfu0a
-.section sfu0b
-.section sfvaa
-.section sfvab
-.section sfvba
-.section sfvbb
-.section sfvca
-.section sfvcb
-.section sfvda
-.section sfvdb
-.section sfvea
-.section sfveb
-.section sfvfa
-.section sfvfb
-.section sfvga
-.section sfvgb
-.section sfvha
-.section sfvhb
-.section sfvia
-.section sfvib
-.section sfvja
-.section sfvjb
-.section sfvka
-.section sfvkb
-.section sfvla
-.section sfvlb
-.section sfvma
-.section sfvmb
-.section sfvna
-.section sfvnb
-.section sfvoa
-.section sfvob
-.section sfvpa
-.section sfvpb
-.section sfvqa
-.section sfvqb
-.section sfvra
-.section sfvrb
-.section sfvsa
-.section sfvsb
-.section sfvta
-.section sfvtb
-.section sfvua
-.section sfvub
-.section sfvva
-.section sfvvb
-.section sfvwa
-.section sfvwb
-.section sfvxa
-.section sfvxb
-.section sfvya
-.section sfvyb
-.section sfvza
-.section sfvzb
-.section sfv1a
-.section sfv1b
-.section sfv2a
-.section sfv2b
-.section sfv3a
-.section sfv3b
-.section sfv4a
-.section sfv4b
-.section sfv5a
-.section sfv5b
-.section sfv6a
-.section sfv6b
-.section sfv7a
-.section sfv7b
-.section sfv8a
-.section sfv8b
-.section sfv9a
-.section sfv9b
-.section sfv0a
-.section sfv0b
-.section sfwaa
-.section sfwab
-.section sfwba
-.section sfwbb
-.section sfwca
-.section sfwcb
-.section sfwda
-.section sfwdb
-.section sfwea
-.section sfweb
-.section sfwfa
-.section sfwfb
-.section sfwga
-.section sfwgb
-.section sfwha
-.section sfwhb
-.section sfwia
-.section sfwib
-.section sfwja
-.section sfwjb
-.section sfwka
-.section sfwkb
-.section sfwla
-.section sfwlb
-.section sfwma
-.section sfwmb
-.section sfwna
-.section sfwnb
-.section sfwoa
-.section sfwob
-.section sfwpa
-.section sfwpb
-.section sfwqa
-.section sfwqb
-.section sfwra
-.section sfwrb
-.section sfwsa
-.section sfwsb
-.section sfwta
-.section sfwtb
-.section sfwua
-.section sfwub
-.section sfwva
-.section sfwvb
-.section sfwwa
-.section sfwwb
-.section sfwxa
-.section sfwxb
-.section sfwya
-.section sfwyb
-.section sfwza
-.section sfwzb
-.section sfw1a
-.section sfw1b
-.section sfw2a
-.section sfw2b
-.section sfw3a
-.section sfw3b
-.section sfw4a
-.section sfw4b
-.section sfw5a
-.section sfw5b
-.section sfw6a
-.section sfw6b
-.section sfw7a
-.section sfw7b
-.section sfw8a
-.section sfw8b
-.section sfw9a
-.section sfw9b
-.section sfw0a
-.section sfw0b
-.section sfxaa
-.section sfxab
-.section sfxba
-.section sfxbb
-.section sfxca
-.section sfxcb
-.section sfxda
-.section sfxdb
-.section sfxea
-.section sfxeb
-.section sfxfa
-.section sfxfb
-.section sfxga
-.section sfxgb
-.section sfxha
-.section sfxhb
-.section sfxia
-.section sfxib
-.section sfxja
-.section sfxjb
-.section sfxka
-.section sfxkb
-.section sfxla
-.section sfxlb
-.section sfxma
-.section sfxmb
-.section sfxna
-.section sfxnb
-.section sfxoa
-.section sfxob
-.section sfxpa
-.section sfxpb
-.section sfxqa
-.section sfxqb
-.section sfxra
-.section sfxrb
-.section sfxsa
-.section sfxsb
-.section sfxta
-.section sfxtb
-.section sfxua
-.section sfxub
-.section sfxva
-.section sfxvb
-.section sfxwa
-.section sfxwb
-.section sfxxa
-.section sfxxb
-.section sfxya
-.section sfxyb
-.section sfxza
-.section sfxzb
-.section sfx1a
-.section sfx1b
-.section sfx2a
-.section sfx2b
-.section sfx3a
-.section sfx3b
-.section sfx4a
-.section sfx4b
-.section sfx5a
-.section sfx5b
-.section sfx6a
-.section sfx6b
-.section sfx7a
-.section sfx7b
-.section sfx8a
-.section sfx8b
-.section sfx9a
-.section sfx9b
-.section sfx0a
-.section sfx0b
-.section sfyaa
-.section sfyab
-.section sfyba
-.section sfybb
-.section sfyca
-.section sfycb
-.section sfyda
-.section sfydb
-.section sfyea
-.section sfyeb
-.section sfyfa
-.section sfyfb
-.section sfyga
-.section sfygb
-.section sfyha
-.section sfyhb
-.section sfyia
-.section sfyib
-.section sfyja
-.section sfyjb
-.section sfyka
-.section sfykb
-.section sfyla
-.section sfylb
-.section sfyma
-.section sfymb
-.section sfyna
-.section sfynb
-.section sfyoa
-.section sfyob
-.section sfypa
-.section sfypb
-.section sfyqa
-.section sfyqb
-.section sfyra
-.section sfyrb
-.section sfysa
-.section sfysb
-.section sfyta
-.section sfytb
-.section sfyua
-.section sfyub
-.section sfyva
-.section sfyvb
-.section sfywa
-.section sfywb
-.section sfyxa
-.section sfyxb
-.section sfyya
-.section sfyyb
-.section sfyza
-.section sfyzb
-.section sfy1a
-.section sfy1b
-.section sfy2a
-.section sfy2b
-.section sfy3a
-.section sfy3b
-.section sfy4a
-.section sfy4b
-.section sfy5a
-.section sfy5b
-.section sfy6a
-.section sfy6b
-.section sfy7a
-.section sfy7b
-.section sfy8a
-.section sfy8b
-.section sfy9a
-.section sfy9b
-.section sfy0a
-.section sfy0b
-.section sfzaa
-.section sfzab
-.section sfzba
-.section sfzbb
-.section sfzca
-.section sfzcb
-.section sfzda
-.section sfzdb
-.section sfzea
-.section sfzeb
-.section sfzfa
-.section sfzfb
-.section sfzga
-.section sfzgb
-.section sfzha
-.section sfzhb
-.section sfzia
-.section sfzib
-.section sfzja
-.section sfzjb
-.section sfzka
-.section sfzkb
-.section sfzla
-.section sfzlb
-.section sfzma
-.section sfzmb
-.section sfzna
-.section sfznb
-.section sfzoa
-.section sfzob
-.section sfzpa
-.section sfzpb
-.section sfzqa
-.section sfzqb
-.section sfzra
-.section sfzrb
-.section sfzsa
-.section sfzsb
-.section sfzta
-.section sfztb
-.section sfzua
-.section sfzub
-.section sfzva
-.section sfzvb
-.section sfzwa
-.section sfzwb
-.section sfzxa
-.section sfzxb
-.section sfzya
-.section sfzyb
-.section sfzza
-.section sfzzb
-.section sfz1a
-.section sfz1b
-.section sfz2a
-.section sfz2b
-.section sfz3a
-.section sfz3b
-.section sfz4a
-.section sfz4b
-.section sfz5a
-.section sfz5b
-.section sfz6a
-.section sfz6b
-.section sfz7a
-.section sfz7b
-.section sfz8a
-.section sfz8b
-.section sfz9a
-.section sfz9b
-.section sfz0a
-.section sfz0b
-.section sf1aa
-.section sf1ab
-.section sf1ba
-.section sf1bb
-.section sf1ca
-.section sf1cb
-.section sf1da
-.section sf1db
-.section sf1ea
-.section sf1eb
-.section sf1fa
-.section sf1fb
-.section sf1ga
-.section sf1gb
-.section sf1ha
-.section sf1hb
-.section sf1ia
-.section sf1ib
-.section sf1ja
-.section sf1jb
-.section sf1ka
-.section sf1kb
-.section sf1la
-.section sf1lb
-.section sf1ma
-.section sf1mb
-.section sf1na
-.section sf1nb
-.section sf1oa
-.section sf1ob
-.section sf1pa
-.section sf1pb
-.section sf1qa
-.section sf1qb
-.section sf1ra
-.section sf1rb
-.section sf1sa
-.section sf1sb
-.section sf1ta
-.section sf1tb
-.section sf1ua
-.section sf1ub
-.section sf1va
-.section sf1vb
-.section sf1wa
-.section sf1wb
-.section sf1xa
-.section sf1xb
-.section sf1ya
-.section sf1yb
-.section sf1za
-.section sf1zb
-.section sf11a
-.section sf11b
-.section sf12a
-.section sf12b
-.section sf13a
-.section sf13b
-.section sf14a
-.section sf14b
-.section sf15a
-.section sf15b
-.section sf16a
-.section sf16b
-.section sf17a
-.section sf17b
-.section sf18a
-.section sf18b
-.section sf19a
-.section sf19b
-.section sf10a
-.section sf10b
-.section sf2aa
-.section sf2ab
-.section sf2ba
-.section sf2bb
-.section sf2ca
-.section sf2cb
-.section sf2da
-.section sf2db
-.section sf2ea
-.section sf2eb
-.section sf2fa
-.section sf2fb
-.section sf2ga
-.section sf2gb
-.section sf2ha
-.section sf2hb
-.section sf2ia
-.section sf2ib
-.section sf2ja
-.section sf2jb
-.section sf2ka
-.section sf2kb
-.section sf2la
-.section sf2lb
-.section sf2ma
-.section sf2mb
-.section sf2na
-.section sf2nb
-.section sf2oa
-.section sf2ob
-.section sf2pa
-.section sf2pb
-.section sf2qa
-.section sf2qb
-.section sf2ra
-.section sf2rb
-.section sf2sa
-.section sf2sb
-.section sf2ta
-.section sf2tb
-.section sf2ua
-.section sf2ub
-.section sf2va
-.section sf2vb
-.section sf2wa
-.section sf2wb
-.section sf2xa
-.section sf2xb
-.section sf2ya
-.section sf2yb
-.section sf2za
-.section sf2zb
-.section sf21a
-.section sf21b
-.section sf22a
-.section sf22b
-.section sf23a
-.section sf23b
-.section sf24a
-.section sf24b
-.section sf25a
-.section sf25b
-.section sf26a
-.section sf26b
-.section sf27a
-.section sf27b
-.section sf28a
-.section sf28b
-.section sf29a
-.section sf29b
-.section sf20a
-.section sf20b
-.section sf3aa
-.section sf3ab
-.section sf3ba
-.section sf3bb
-.section sf3ca
-.section sf3cb
-.section sf3da
-.section sf3db
-.section sf3ea
-.section sf3eb
-.section sf3fa
-.section sf3fb
-.section sf3ga
-.section sf3gb
-.section sf3ha
-.section sf3hb
-.section sf3ia
-.section sf3ib
-.section sf3ja
-.section sf3jb
-.section sf3ka
-.section sf3kb
-.section sf3la
-.section sf3lb
-.section sf3ma
-.section sf3mb
-.section sf3na
-.section sf3nb
-.section sf3oa
-.section sf3ob
-.section sf3pa
-.section sf3pb
-.section sf3qa
-.section sf3qb
-.section sf3ra
-.section sf3rb
-.section sf3sa
-.section sf3sb
-.section sf3ta
-.section sf3tb
-.section sf3ua
-.section sf3ub
-.section sf3va
-.section sf3vb
-.section sf3wa
-.section sf3wb
-.section sf3xa
-.section sf3xb
-.section sf3ya
-.section sf3yb
-.section sf3za
-.section sf3zb
-.section sf31a
-.section sf31b
-.section sf32a
-.section sf32b
-.section sf33a
-.section sf33b
-.section sf34a
-.section sf34b
-.section sf35a
-.section sf35b
-.section sf36a
-.section sf36b
-.section sf37a
-.section sf37b
-.section sf38a
-.section sf38b
-.section sf39a
-.section sf39b
-.section sf30a
-.section sf30b
-.section sf4aa
-.section sf4ab
-.section sf4ba
-.section sf4bb
-.section sf4ca
-.section sf4cb
-.section sf4da
-.section sf4db
-.section sf4ea
-.section sf4eb
-.section sf4fa
-.section sf4fb
-.section sf4ga
-.section sf4gb
-.section sf4ha
-.section sf4hb
-.section sf4ia
-.section sf4ib
-.section sf4ja
-.section sf4jb
-.section sf4ka
-.section sf4kb
-.section sf4la
-.section sf4lb
-.section sf4ma
-.section sf4mb
-.section sf4na
-.section sf4nb
-.section sf4oa
-.section sf4ob
-.section sf4pa
-.section sf4pb
-.section sf4qa
-.section sf4qb
-.section sf4ra
-.section sf4rb
-.section sf4sa
-.section sf4sb
-.section sf4ta
-.section sf4tb
-.section sf4ua
-.section sf4ub
-.section sf4va
-.section sf4vb
-.section sf4wa
-.section sf4wb
-.section sf4xa
-.section sf4xb
-.section sf4ya
-.section sf4yb
-.section sf4za
-.section sf4zb
-.section sf41a
-.section sf41b
-.section sf42a
-.section sf42b
-.section sf43a
-.section sf43b
-.section sf44a
-.section sf44b
-.section sf45a
-.section sf45b
-.section sf46a
-.section sf46b
-.section sf47a
-.section sf47b
-.section sf48a
-.section sf48b
-.section sf49a
-.section sf49b
-.section sf40a
-.section sf40b
-.section sf5aa
-.section sf5ab
-.section sf5ba
-.section sf5bb
-.section sf5ca
-.section sf5cb
-.section sf5da
-.section sf5db
-.section sf5ea
-.section sf5eb
-.section sf5fa
-.section sf5fb
-.section sf5ga
-.section sf5gb
-.section sf5ha
-.section sf5hb
-.section sf5ia
-.section sf5ib
-.section sf5ja
-.section sf5jb
-.section sf5ka
-.section sf5kb
-.section sf5la
-.section sf5lb
-.section sf5ma
-.section sf5mb
-.section sf5na
-.section sf5nb
-.section sf5oa
-.section sf5ob
-.section sf5pa
-.section sf5pb
-.section sf5qa
-.section sf5qb
-.section sf5ra
-.section sf5rb
-.section sf5sa
-.section sf5sb
-.section sf5ta
-.section sf5tb
-.section sf5ua
-.section sf5ub
-.section sf5va
-.section sf5vb
-.section sf5wa
-.section sf5wb
-.section sf5xa
-.section sf5xb
-.section sf5ya
-.section sf5yb
-.section sf5za
-.section sf5zb
-.section sf51a
-.section sf51b
-.section sf52a
-.section sf52b
-.section sf53a
-.section sf53b
-.section sf54a
-.section sf54b
-.section sf55a
-.section sf55b
-.section sf56a
-.section sf56b
-.section sf57a
-.section sf57b
-.section sf58a
-.section sf58b
-.section sf59a
-.section sf59b
-.section sf50a
-.section sf50b
-.section sf6aa
-.section sf6ab
-.section sf6ba
-.section sf6bb
-.section sf6ca
-.section sf6cb
-.section sf6da
-.section sf6db
-.section sf6ea
-.section sf6eb
-.section sf6fa
-.section sf6fb
-.section sf6ga
-.section sf6gb
-.section sf6ha
-.section sf6hb
-.section sf6ia
-.section sf6ib
-.section sf6ja
-.section sf6jb
-.section sf6ka
-.section sf6kb
-.section sf6la
-.section sf6lb
-.section sf6ma
-.section sf6mb
-.section sf6na
-.section sf6nb
-.section sf6oa
-.section sf6ob
-.section sf6pa
-.section sf6pb
-.section sf6qa
-.section sf6qb
-.section sf6ra
-.section sf6rb
-.section sf6sa
-.section sf6sb
-.section sf6ta
-.section sf6tb
-.section sf6ua
-.section sf6ub
-.section sf6va
-.section sf6vb
-.section sf6wa
-.section sf6wb
-.section sf6xa
-.section sf6xb
-.section sf6ya
-.section sf6yb
-.section sf6za
-.section sf6zb
-.section sf61a
-.section sf61b
-.section sf62a
-.section sf62b
-.section sf63a
-.section sf63b
-.section sf64a
-.section sf64b
-.section sf65a
-.section sf65b
-.section sf66a
-.section sf66b
-.section sf67a
-.section sf67b
-.section sf68a
-.section sf68b
-.section sf69a
-.section sf69b
-.section sf60a
-.section sf60b
-.section sf7aa
-.section sf7ab
-.section sf7ba
-.section sf7bb
-.section sf7ca
-.section sf7cb
-.section sf7da
-.section sf7db
-.section sf7ea
-.section sf7eb
-.section sf7fa
-.section sf7fb
-.section sf7ga
-.section sf7gb
-.section sf7ha
-.section sf7hb
-.section sf7ia
-.section sf7ib
-.section sf7ja
-.section sf7jb
-.section sf7ka
-.section sf7kb
-.section sf7la
-.section sf7lb
-.section sf7ma
-.section sf7mb
-.section sf7na
-.section sf7nb
-.section sf7oa
-.section sf7ob
-.section sf7pa
-.section sf7pb
-.section sf7qa
-.section sf7qb
-.section sf7ra
-.section sf7rb
-.section sf7sa
-.section sf7sb
-.section sf7ta
-.section sf7tb
-.section sf7ua
-.section sf7ub
-.section sf7va
-.section sf7vb
-.section sf7wa
-.section sf7wb
-.section sf7xa
-.section sf7xb
-.section sf7ya
-.section sf7yb
-.section sf7za
-.section sf7zb
-.section sf71a
-.section sf71b
-.section sf72a
-.section sf72b
-.section sf73a
-.section sf73b
-.section sf74a
-.section sf74b
-.section sf75a
-.section sf75b
-.section sf76a
-.section sf76b
-.section sf77a
-.section sf77b
-.section sf78a
-.section sf78b
-.section sf79a
-.section sf79b
-.section sf70a
-.section sf70b
-.section sf8aa
-.section sf8ab
-.section sf8ba
-.section sf8bb
-.section sf8ca
-.section sf8cb
-.section sf8da
-.section sf8db
-.section sf8ea
-.section sf8eb
-.section sf8fa
-.section sf8fb
-.section sf8ga
-.section sf8gb
-.section sf8ha
-.section sf8hb
-.section sf8ia
-.section sf8ib
-.section sf8ja
-.section sf8jb
-.section sf8ka
-.section sf8kb
-.section sf8la
-.section sf8lb
-.section sf8ma
-.section sf8mb
-.section sf8na
-.section sf8nb
-.section sf8oa
-.section sf8ob
-.section sf8pa
-.section sf8pb
-.section sf8qa
-.section sf8qb
-.section sf8ra
-.section sf8rb
-.section sf8sa
-.section sf8sb
-.section sf8ta
-.section sf8tb
-.section sf8ua
-.section sf8ub
-.section sf8va
-.section sf8vb
-.section sf8wa
-.section sf8wb
-.section sf8xa
-.section sf8xb
-.section sf8ya
-.section sf8yb
-.section sf8za
-.section sf8zb
-.section sf81a
-.section sf81b
-.section sf82a
-.section sf82b
-.section sf83a
-.section sf83b
-.section sf84a
-.section sf84b
-.section sf85a
-.section sf85b
-.section sf86a
-.section sf86b
-.section sf87a
-.section sf87b
-.section sf88a
-.section sf88b
-.section sf89a
-.section sf89b
-.section sf80a
-.section sf80b
-.section sf9aa
-.section sf9ab
-.section sf9ba
-.section sf9bb
-.section sf9ca
-.section sf9cb
-.section sf9da
-.section sf9db
-.section sf9ea
-.section sf9eb
-.section sf9fa
-.section sf9fb
-.section sf9ga
-.section sf9gb
-.section sf9ha
-.section sf9hb
-.section sf9ia
-.section sf9ib
-.section sf9ja
-.section sf9jb
-.section sf9ka
-.section sf9kb
-.section sf9la
-.section sf9lb
-.section sf9ma
-.section sf9mb
-.section sf9na
-.section sf9nb
-.section sf9oa
-.section sf9ob
-.section sf9pa
-.section sf9pb
-.section sf9qa
-.section sf9qb
-.section sf9ra
-.section sf9rb
-.section sf9sa
-.section sf9sb
-.section sf9ta
-.section sf9tb
-.section sf9ua
-.section sf9ub
-.section sf9va
-.section sf9vb
-.section sf9wa
-.section sf9wb
-.section sf9xa
-.section sf9xb
-.section sf9ya
-.section sf9yb
-.section sf9za
-.section sf9zb
-.section sf91a
-.section sf91b
-.section sf92a
-.section sf92b
-.section sf93a
-.section sf93b
-.section sf94a
-.section sf94b
-.section sf95a
-.section sf95b
-.section sf96a
-.section sf96b
-.section sf97a
-.section sf97b
-.section sf98a
-.section sf98b
-.section sf99a
-.section sf99b
-.section sf90a
-.section sf90b
-.section sf0aa
-.section sf0ab
-.section sf0ba
-.section sf0bb
-.section sf0ca
-.section sf0cb
-.section sf0da
-.section sf0db
-.section sf0ea
-.section sf0eb
-.section sf0fa
-.section sf0fb
-.section sf0ga
-.section sf0gb
-.section sf0ha
-.section sf0hb
-.section sf0ia
-.section sf0ib
-.section sf0ja
-.section sf0jb
-.section sf0ka
-.section sf0kb
-.section sf0la
-.section sf0lb
-.section sf0ma
-.section sf0mb
-.section sf0na
-.section sf0nb
-.section sf0oa
-.section sf0ob
-.section sf0pa
-.section sf0pb
-.section sf0qa
-.section sf0qb
-.section sf0ra
-.section sf0rb
-.section sf0sa
-.section sf0sb
-.section sf0ta
-.section sf0tb
-.section sf0ua
-.section sf0ub
-.section sf0va
-.section sf0vb
-.section sf0wa
-.section sf0wb
-.section sf0xa
-.section sf0xb
-.section sf0ya
-.section sf0yb
-.section sf0za
-.section sf0zb
-.section sf01a
-.section sf01b
-.section sf02a
-.section sf02b
-.section sf03a
-.section sf03b
-.section sf04a
-.section sf04b
-.section sf05a
-.section sf05b
-.section sf06a
-.section sf06b
-.section sf07a
-.section sf07b
-.section sf08a
-.section sf08b
-.section sf09a
-.section sf09b
-.section sf00a
-.section sf00b
-.section sgaaa
-.section sgaab
-.section sgaba
-.section sgabb
-.section sgaca
-.section sgacb
-.section sgada
-.section sgadb
-.section sgaea
-.section sgaeb
-.section sgafa
-.section sgafb
-.section sgaga
-.section sgagb
-.section sgaha
-.section sgahb
-.section sgaia
-.section sgaib
-.section sgaja
-.section sgajb
-.section sgaka
-.section sgakb
-.section sgala
-.section sgalb
-.section sgama
-.section sgamb
-.section sgana
-.section sganb
-.section sgaoa
-.section sgaob
-.section sgapa
-.section sgapb
-.section sgaqa
-.section sgaqb
-.section sgara
-.section sgarb
-.section sgasa
-.section sgasb
-.section sgata
-.section sgatb
-.section sgaua
-.section sgaub
-.section sgava
-.section sgavb
-.section sgawa
-.section sgawb
-.section sgaxa
-.section sgaxb
-.section sgaya
-.section sgayb
-.section sgaza
-.section sgazb
-.section sga1a
-.section sga1b
-.section sga2a
-.section sga2b
-.section sga3a
-.section sga3b
-.section sga4a
-.section sga4b
-.section sga5a
-.section sga5b
-.section sga6a
-.section sga6b
-.section sga7a
-.section sga7b
-.section sga8a
-.section sga8b
-.section sga9a
-.section sga9b
-.section sga0a
-.section sga0b
-.section sgbaa
-.section sgbab
-.section sgbba
-.section sgbbb
-.section sgbca
-.section sgbcb
-.section sgbda
-.section sgbdb
-.section sgbea
-.section sgbeb
-.section sgbfa
-.section sgbfb
-.section sgbga
-.section sgbgb
-.section sgbha
-.section sgbhb
-.section sgbia
-.section sgbib
-.section sgbja
-.section sgbjb
-.section sgbka
-.section sgbkb
-.section sgbla
-.section sgblb
-.section sgbma
-.section sgbmb
-.section sgbna
-.section sgbnb
-.section sgboa
-.section sgbob
-.section sgbpa
-.section sgbpb
-.section sgbqa
-.section sgbqb
-.section sgbra
-.section sgbrb
-.section sgbsa
-.section sgbsb
-.section sgbta
-.section sgbtb
-.section sgbua
-.section sgbub
-.section sgbva
-.section sgbvb
-.section sgbwa
-.section sgbwb
-.section sgbxa
-.section sgbxb
-.section sgbya
-.section sgbyb
-.section sgbza
-.section sgbzb
-.section sgb1a
-.section sgb1b
-.section sgb2a
-.section sgb2b
-.section sgb3a
-.section sgb3b
-.section sgb4a
-.section sgb4b
-.section sgb5a
-.section sgb5b
-.section sgb6a
-.section sgb6b
-.section sgb7a
-.section sgb7b
-.section sgb8a
-.section sgb8b
-.section sgb9a
-.section sgb9b
-.section sgb0a
-.section sgb0b
-.section sgcaa
-.section sgcab
-.section sgcba
-.section sgcbb
-.section sgcca
-.section sgccb
-.section sgcda
-.section sgcdb
-.section sgcea
-.section sgceb
-.section sgcfa
-.section sgcfb
-.section sgcga
-.section sgcgb
-.section sgcha
-.section sgchb
-.section sgcia
-.section sgcib
-.section sgcja
-.section sgcjb
-.section sgcka
-.section sgckb
-.section sgcla
-.section sgclb
-.section sgcma
-.section sgcmb
-.section sgcna
-.section sgcnb
-.section sgcoa
-.section sgcob
-.section sgcpa
-.section sgcpb
-.section sgcqa
-.section sgcqb
-.section sgcra
-.section sgcrb
-.section sgcsa
-.section sgcsb
-.section sgcta
-.section sgctb
-.section sgcua
-.section sgcub
-.section sgcva
-.section sgcvb
-.section sgcwa
-.section sgcwb
-.section sgcxa
-.section sgcxb
-.section sgcya
-.section sgcyb
-.section sgcza
-.section sgczb
-.section sgc1a
-.section sgc1b
-.section sgc2a
-.section sgc2b
-.section sgc3a
-.section sgc3b
-.section sgc4a
-.section sgc4b
-.section sgc5a
-.section sgc5b
-.section sgc6a
-.section sgc6b
-.section sgc7a
-.section sgc7b
-.section sgc8a
-.section sgc8b
-.section sgc9a
-.section sgc9b
-.section sgc0a
-.section sgc0b
-.section sgdaa
-.section sgdab
-.section sgdba
-.section sgdbb
-.section sgdca
-.section sgdcb
-.section sgdda
-.section sgddb
-.section sgdea
-.section sgdeb
-.section sgdfa
-.section sgdfb
-.section sgdga
-.section sgdgb
-.section sgdha
-.section sgdhb
-.section sgdia
-.section sgdib
-.section sgdja
-.section sgdjb
-.section sgdka
-.section sgdkb
-.section sgdla
-.section sgdlb
-.section sgdma
-.section sgdmb
-.section sgdna
-.section sgdnb
-.section sgdoa
-.section sgdob
-.section sgdpa
-.section sgdpb
-.section sgdqa
-.section sgdqb
-.section sgdra
-.section sgdrb
-.section sgdsa
-.section sgdsb
-.section sgdta
-.section sgdtb
-.section sgdua
-.section sgdub
-.section sgdva
-.section sgdvb
-.section sgdwa
-.section sgdwb
-.section sgdxa
-.section sgdxb
-.section sgdya
-.section sgdyb
-.section sgdza
-.section sgdzb
-.section sgd1a
-.section sgd1b
-.section sgd2a
-.section sgd2b
-.section sgd3a
-.section sgd3b
-.section sgd4a
-.section sgd4b
-.section sgd5a
-.section sgd5b
-.section sgd6a
-.section sgd6b
-.section sgd7a
-.section sgd7b
-.section sgd8a
-.section sgd8b
-.section sgd9a
-.section sgd9b
-.section sgd0a
-.section sgd0b
-.section sgeaa
-.section sgeab
-.section sgeba
-.section sgebb
-.section sgeca
-.section sgecb
-.section sgeda
-.section sgedb
-.section sgeea
-.section sgeeb
-.section sgefa
-.section sgefb
-.section sgega
-.section sgegb
-.section sgeha
-.section sgehb
-.section sgeia
-.section sgeib
-.section sgeja
-.section sgejb
-.section sgeka
-.section sgekb
-.section sgela
-.section sgelb
-.section sgema
-.section sgemb
-.section sgena
-.section sgenb
-.section sgeoa
-.section sgeob
-.section sgepa
-.section sgepb
-.section sgeqa
-.section sgeqb
-.section sgera
-.section sgerb
-.section sgesa
-.section sgesb
-.section sgeta
-.section sgetb
-.section sgeua
-.section sgeub
-.section sgeva
-.section sgevb
-.section sgewa
-.section sgewb
-.section sgexa
-.section sgexb
-.section sgeya
-.section sgeyb
-.section sgeza
-.section sgezb
-.section sge1a
-.section sge1b
-.section sge2a
-.section sge2b
-.section sge3a
-.section sge3b
-.section sge4a
-.section sge4b
-.section sge5a
-.section sge5b
-.section sge6a
-.section sge6b
-.section sge7a
-.section sge7b
-.section sge8a
-.section sge8b
-.section sge9a
-.section sge9b
-.section sge0a
-.section sge0b
-.section sgfaa
-.section sgfab
-.section sgfba
-.section sgfbb
-.section sgfca
-.section sgfcb
-.section sgfda
-.section sgfdb
-.section sgfea
-.section sgfeb
-.section sgffa
-.section sgffb
-.section sgfga
-.section sgfgb
-.section sgfha
-.section sgfhb
-.section sgfia
-.section sgfib
-.section sgfja
-.section sgfjb
-.section sgfka
-.section sgfkb
-.section sgfla
-.section sgflb
-.section sgfma
-.section sgfmb
-.section sgfna
-.section sgfnb
-.section sgfoa
-.section sgfob
-.section sgfpa
-.section sgfpb
-.section sgfqa
-.section sgfqb
-.section sgfra
-.section sgfrb
-.section sgfsa
-.section sgfsb
-.section sgfta
-.section sgftb
-.section sgfua
-.section sgfub
-.section sgfva
-.section sgfvb
-.section sgfwa
-.section sgfwb
-.section sgfxa
-.section sgfxb
-.section sgfya
-.section sgfyb
-.section sgfza
-.section sgfzb
-.section sgf1a
-.section sgf1b
-.section sgf2a
-.section sgf2b
-.section sgf3a
-.section sgf3b
-.section sgf4a
-.section sgf4b
-.section sgf5a
-.section sgf5b
-.section sgf6a
-.section sgf6b
-.section sgf7a
-.section sgf7b
-.section sgf8a
-.section sgf8b
-.section sgf9a
-.section sgf9b
-.section sgf0a
-.section sgf0b
-.section sggaa
-.section sggab
-.section sggba
-.section sggbb
-.section sggca
-.section sggcb
-.section sggda
-.section sggdb
-.section sggea
-.section sggeb
-.section sggfa
-.section sggfb
-.section sggga
-.section sgggb
-.section sggha
-.section sgghb
-.section sggia
-.section sggib
-.section sggja
-.section sggjb
-.section sggka
-.section sggkb
-.section sggla
-.section sgglb
-.section sggma
-.section sggmb
-.section sggna
-.section sggnb
-.section sggoa
-.section sggob
-.section sggpa
-.section sggpb
-.section sggqa
-.section sggqb
-.section sggra
-.section sggrb
-.section sggsa
-.section sggsb
-.section sggta
-.section sggtb
-.section sggua
-.section sggub
-.section sggva
-.section sggvb
-.section sggwa
-.section sggwb
-.section sggxa
-.section sggxb
-.section sggya
-.section sggyb
-.section sggza
-.section sggzb
-.section sgg1a
-.section sgg1b
-.section sgg2a
-.section sgg2b
-.section sgg3a
-.section sgg3b
-.section sgg4a
-.section sgg4b
-.section sgg5a
-.section sgg5b
-.section sgg6a
-.section sgg6b
-.section sgg7a
-.section sgg7b
-.section sgg8a
-.section sgg8b
-.section sgg9a
-.section sgg9b
-.section sgg0a
-.section sgg0b
-.section sghaa
-.section sghab
-.section sghba
-.section sghbb
-.section sghca
-.section sghcb
-.section sghda
-.section sghdb
-.section sghea
-.section sgheb
-.section sghfa
-.section sghfb
-.section sghga
-.section sghgb
-.section sghha
-.section sghhb
-.section sghia
-.section sghib
-.section sghja
-.section sghjb
-.section sghka
-.section sghkb
-.section sghla
-.section sghlb
-.section sghma
-.section sghmb
-.section sghna
-.section sghnb
-.section sghoa
-.section sghob
-.section sghpa
-.section sghpb
-.section sghqa
-.section sghqb
-.section sghra
-.section sghrb
-.section sghsa
-.section sghsb
-.section sghta
-.section sghtb
-.section sghua
-.section sghub
-.section sghva
-.section sghvb
-.section sghwa
-.section sghwb
-.section sghxa
-.section sghxb
-.section sghya
-.section sghyb
-.section sghza
-.section sghzb
-.section sgh1a
-.section sgh1b
-.section sgh2a
-.section sgh2b
-.section sgh3a
-.section sgh3b
-.section sgh4a
-.section sgh4b
-.section sgh5a
-.section sgh5b
-.section sgh6a
-.section sgh6b
-.section sgh7a
-.section sgh7b
-.section sgh8a
-.section sgh8b
-.section sgh9a
-.section sgh9b
-.section sgh0a
-.section sgh0b
-.section sgiaa
-.section sgiab
-.section sgiba
-.section sgibb
-.section sgica
-.section sgicb
-.section sgida
-.section sgidb
-.section sgiea
-.section sgieb
-.section sgifa
-.section sgifb
-.section sgiga
-.section sgigb
-.section sgiha
-.section sgihb
-.section sgiia
-.section sgiib
-.section sgija
-.section sgijb
-.section sgika
-.section sgikb
-.section sgila
-.section sgilb
-.section sgima
-.section sgimb
-.section sgina
-.section sginb
-.section sgioa
-.section sgiob
-.section sgipa
-.section sgipb
-.section sgiqa
-.section sgiqb
-.section sgira
-.section sgirb
-.section sgisa
-.section sgisb
-.section sgita
-.section sgitb
-.section sgiua
-.section sgiub
-.section sgiva
-.section sgivb
-.section sgiwa
-.section sgiwb
-.section sgixa
-.section sgixb
-.section sgiya
-.section sgiyb
-.section sgiza
-.section sgizb
-.section sgi1a
-.section sgi1b
-.section sgi2a
-.section sgi2b
-.section sgi3a
-.section sgi3b
-.section sgi4a
-.section sgi4b
-.section sgi5a
-.section sgi5b
-.section sgi6a
-.section sgi6b
-.section sgi7a
-.section sgi7b
-.section sgi8a
-.section sgi8b
-.section sgi9a
-.section sgi9b
-.section sgi0a
-.section sgi0b
-.section sgjaa
-.section sgjab
-.section sgjba
-.section sgjbb
-.section sgjca
-.section sgjcb
-.section sgjda
-.section sgjdb
-.section sgjea
-.section sgjeb
-.section sgjfa
-.section sgjfb
-.section sgjga
-.section sgjgb
-.section sgjha
-.section sgjhb
-.section sgjia
-.section sgjib
-.section sgjja
-.section sgjjb
-.section sgjka
-.section sgjkb
-.section sgjla
-.section sgjlb
-.section sgjma
-.section sgjmb
-.section sgjna
-.section sgjnb
-.section sgjoa
-.section sgjob
-.section sgjpa
-.section sgjpb
-.section sgjqa
-.section sgjqb
-.section sgjra
-.section sgjrb
-.section sgjsa
-.section sgjsb
-.section sgjta
-.section sgjtb
-.section sgjua
-.section sgjub
-.section sgjva
-.section sgjvb
-.section sgjwa
-.section sgjwb
-.section sgjxa
-.section sgjxb
-.section sgjya
-.section sgjyb
-.section sgjza
-.section sgjzb
-.section sgj1a
-.section sgj1b
-.section sgj2a
-.section sgj2b
-.section sgj3a
-.section sgj3b
-.section sgj4a
-.section sgj4b
-.section sgj5a
-.section sgj5b
-.section sgj6a
-.section sgj6b
-.section sgj7a
-.section sgj7b
-.section sgj8a
-.section sgj8b
-.section sgj9a
-.section sgj9b
-.section sgj0a
-.section sgj0b
-.section sgkaa
-.section sgkab
-.section sgkba
-.section sgkbb
-.section sgkca
-.section sgkcb
-.section sgkda
-.section sgkdb
-.section sgkea
-.section sgkeb
-.section sgkfa
-.section sgkfb
-.section sgkga
-.section sgkgb
-.section sgkha
-.section sgkhb
-.section sgkia
-.section sgkib
-.section sgkja
-.section sgkjb
-.section sgkka
-.section sgkkb
-.section sgkla
-.section sgklb
-.section sgkma
-.section sgkmb
-.section sgkna
-.section sgknb
-.section sgkoa
-.section sgkob
-.section sgkpa
-.section sgkpb
-.section sgkqa
-.section sgkqb
-.section sgkra
-.section sgkrb
-.section sgksa
-.section sgksb
-.section sgkta
-.section sgktb
-.section sgkua
-.section sgkub
-.section sgkva
-.section sgkvb
-.section sgkwa
-.section sgkwb
-.section sgkxa
-.section sgkxb
-.section sgkya
-.section sgkyb
-.section sgkza
-.section sgkzb
-.section sgk1a
-.section sgk1b
-.section sgk2a
-.section sgk2b
-.section sgk3a
-.section sgk3b
-.section sgk4a
-.section sgk4b
-.section sgk5a
-.section sgk5b
-.section sgk6a
-.section sgk6b
-.section sgk7a
-.section sgk7b
-.section sgk8a
-.section sgk8b
-.section sgk9a
-.section sgk9b
-.section sgk0a
-.section sgk0b
-.section sglaa
-.section sglab
-.section sglba
-.section sglbb
-.section sglca
-.section sglcb
-.section sglda
-.section sgldb
-.section sglea
-.section sgleb
-.section sglfa
-.section sglfb
-.section sglga
-.section sglgb
-.section sglha
-.section sglhb
-.section sglia
-.section sglib
-.section sglja
-.section sgljb
-.section sglka
-.section sglkb
-.section sglla
-.section sgllb
-.section sglma
-.section sglmb
-.section sglna
-.section sglnb
-.section sgloa
-.section sglob
-.section sglpa
-.section sglpb
-.section sglqa
-.section sglqb
-.section sglra
-.section sglrb
-.section sglsa
-.section sglsb
-.section sglta
-.section sgltb
-.section sglua
-.section sglub
-.section sglva
-.section sglvb
-.section sglwa
-.section sglwb
-.section sglxa
-.section sglxb
-.section sglya
-.section sglyb
-.section sglza
-.section sglzb
-.section sgl1a
-.section sgl1b
-.section sgl2a
-.section sgl2b
-.section sgl3a
-.section sgl3b
-.section sgl4a
-.section sgl4b
-.section sgl5a
-.section sgl5b
-.section sgl6a
-.section sgl6b
-.section sgl7a
-.section sgl7b
-.section sgl8a
-.section sgl8b
-.section sgl9a
-.section sgl9b
-.section sgl0a
-.section sgl0b
-.section sgmaa
-.section sgmab
-.section sgmba
-.section sgmbb
-.section sgmca
-.section sgmcb
-.section sgmda
-.section sgmdb
-.section sgmea
-.section sgmeb
-.section sgmfa
-.section sgmfb
-.section sgmga
-.section sgmgb
-.section sgmha
-.section sgmhb
-.section sgmia
-.section sgmib
-.section sgmja
-.section sgmjb
-.section sgmka
-.section sgmkb
-.section sgmla
-.section sgmlb
-.section sgmma
-.section sgmmb
-.section sgmna
-.section sgmnb
-.section sgmoa
-.section sgmob
-.section sgmpa
-.section sgmpb
-.section sgmqa
-.section sgmqb
-.section sgmra
-.section sgmrb
-.section sgmsa
-.section sgmsb
-.section sgmta
-.section sgmtb
-.section sgmua
-.section sgmub
-.section sgmva
-.section sgmvb
-.section sgmwa
-.section sgmwb
-.section sgmxa
-.section sgmxb
-.section sgmya
-.section sgmyb
-.section sgmza
-.section sgmzb
-.section sgm1a
-.section sgm1b
-.section sgm2a
-.section sgm2b
-.section sgm3a
-.section sgm3b
-.section sgm4a
-.section sgm4b
-.section sgm5a
-.section sgm5b
-.section sgm6a
-.section sgm6b
-.section sgm7a
-.section sgm7b
-.section sgm8a
-.section sgm8b
-.section sgm9a
-.section sgm9b
-.section sgm0a
-.section sgm0b
-.section sgnaa
-.section sgnab
-.section sgnba
-.section sgnbb
-.section sgnca
-.section sgncb
-.section sgnda
-.section sgndb
-.section sgnea
-.section sgneb
-.section sgnfa
-.section sgnfb
-.section sgnga
-.section sgngb
-.section sgnha
-.section sgnhb
-.section sgnia
-.section sgnib
-.section sgnja
-.section sgnjb
-.section sgnka
-.section sgnkb
-.section sgnla
-.section sgnlb
-.section sgnma
-.section sgnmb
-.section sgnna
-.section sgnnb
-.section sgnoa
-.section sgnob
-.section sgnpa
-.section sgnpb
-.section sgnqa
-.section sgnqb
-.section sgnra
-.section sgnrb
-.section sgnsa
-.section sgnsb
-.section sgnta
-.section sgntb
-.section sgnua
-.section sgnub
-.section sgnva
-.section sgnvb
-.section sgnwa
-.section sgnwb
-.section sgnxa
-.section sgnxb
-.section sgnya
-.section sgnyb
-.section sgnza
-.section sgnzb
-.section sgn1a
-.section sgn1b
-.section sgn2a
-.section sgn2b
-.section sgn3a
-.section sgn3b
-.section sgn4a
-.section sgn4b
-.section sgn5a
-.section sgn5b
-.section sgn6a
-.section sgn6b
-.section sgn7a
-.section sgn7b
-.section sgn8a
-.section sgn8b
-.section sgn9a
-.section sgn9b
-.section sgn0a
-.section sgn0b
-.section sgoaa
-.section sgoab
-.section sgoba
-.section sgobb
-.section sgoca
-.section sgocb
-.section sgoda
-.section sgodb
-.section sgoea
-.section sgoeb
-.section sgofa
-.section sgofb
-.section sgoga
-.section sgogb
-.section sgoha
-.section sgohb
-.section sgoia
-.section sgoib
-.section sgoja
-.section sgojb
-.section sgoka
-.section sgokb
-.section sgola
-.section sgolb
-.section sgoma
-.section sgomb
-.section sgona
-.section sgonb
-.section sgooa
-.section sgoob
-.section sgopa
-.section sgopb
-.section sgoqa
-.section sgoqb
-.section sgora
-.section sgorb
-.section sgosa
-.section sgosb
-.section sgota
-.section sgotb
-.section sgoua
-.section sgoub
-.section sgova
-.section sgovb
-.section sgowa
-.section sgowb
-.section sgoxa
-.section sgoxb
-.section sgoya
-.section sgoyb
-.section sgoza
-.section sgozb
-.section sgo1a
-.section sgo1b
-.section sgo2a
-.section sgo2b
-.section sgo3a
-.section sgo3b
-.section sgo4a
-.section sgo4b
-.section sgo5a
-.section sgo5b
-.section sgo6a
-.section sgo6b
-.section sgo7a
-.section sgo7b
-.section sgo8a
-.section sgo8b
-.section sgo9a
-.section sgo9b
-.section sgo0a
-.section sgo0b
-.section sgpaa
-.section sgpab
-.section sgpba
-.section sgpbb
-.section sgpca
-.section sgpcb
-.section sgpda
-.section sgpdb
-.section sgpea
-.section sgpeb
-.section sgpfa
-.section sgpfb
-.section sgpga
-.section sgpgb
-.section sgpha
-.section sgphb
-.section sgpia
-.section sgpib
-.section sgpja
-.section sgpjb
-.section sgpka
-.section sgpkb
-.section sgpla
-.section sgplb
-.section sgpma
-.section sgpmb
-.section sgpna
-.section sgpnb
-.section sgpoa
-.section sgpob
-.section sgppa
-.section sgppb
-.section sgpqa
-.section sgpqb
-.section sgpra
-.section sgprb
-.section sgpsa
-.section sgpsb
-.section sgpta
-.section sgptb
-.section sgpua
-.section sgpub
-.section sgpva
-.section sgpvb
-.section sgpwa
-.section sgpwb
-.section sgpxa
-.section sgpxb
-.section sgpya
-.section sgpyb
-.section sgpza
-.section sgpzb
-.section sgp1a
-.section sgp1b
-.section sgp2a
-.section sgp2b
-.section sgp3a
-.section sgp3b
-.section sgp4a
-.section sgp4b
-.section sgp5a
-.section sgp5b
-.section sgp6a
-.section sgp6b
-.section sgp7a
-.section sgp7b
-.section sgp8a
-.section sgp8b
-.section sgp9a
-.section sgp9b
-.section sgp0a
-.section sgp0b
-.section sgqaa
-.section sgqab
-.section sgqba
-.section sgqbb
-.section sgqca
-.section sgqcb
-.section sgqda
-.section sgqdb
-.section sgqea
-.section sgqeb
-.section sgqfa
-.section sgqfb
-.section sgqga
-.section sgqgb
-.section sgqha
-.section sgqhb
-.section sgqia
-.section sgqib
-.section sgqja
-.section sgqjb
-.section sgqka
-.section sgqkb
-.section sgqla
-.section sgqlb
-.section sgqma
-.section sgqmb
-.section sgqna
-.section sgqnb
-.section sgqoa
-.section sgqob
-.section sgqpa
-.section sgqpb
-.section sgqqa
-.section sgqqb
-.section sgqra
-.section sgqrb
-.section sgqsa
-.section sgqsb
-.section sgqta
-.section sgqtb
-.section sgqua
-.section sgqub
-.section sgqva
-.section sgqvb
-.section sgqwa
-.section sgqwb
-.section sgqxa
-.section sgqxb
-.section sgqya
-.section sgqyb
-.section sgqza
-.section sgqzb
-.section sgq1a
-.section sgq1b
-.section sgq2a
-.section sgq2b
-.section sgq3a
-.section sgq3b
-.section sgq4a
-.section sgq4b
-.section sgq5a
-.section sgq5b
-.section sgq6a
-.section sgq6b
-.section sgq7a
-.section sgq7b
-.section sgq8a
-.section sgq8b
-.section sgq9a
-.section sgq9b
-.section sgq0a
-.section sgq0b
-.section sgraa
-.section sgrab
-.section sgrba
-.section sgrbb
-.section sgrca
-.section sgrcb
-.section sgrda
-.section sgrdb
-.section sgrea
-.section sgreb
-.section sgrfa
-.section sgrfb
-.section sgrga
-.section sgrgb
-.section sgrha
-.section sgrhb
-.section sgria
-.section sgrib
-.section sgrja
-.section sgrjb
-.section sgrka
-.section sgrkb
-.section sgrla
-.section sgrlb
-.section sgrma
-.section sgrmb
-.section sgrna
-.section sgrnb
-.section sgroa
-.section sgrob
-.section sgrpa
-.section sgrpb
-.section sgrqa
-.section sgrqb
-.section sgrra
-.section sgrrb
-.section sgrsa
-.section sgrsb
-.section sgrta
-.section sgrtb
-.section sgrua
-.section sgrub
-.section sgrva
-.section sgrvb
-.section sgrwa
-.section sgrwb
-.section sgrxa
-.section sgrxb
-.section sgrya
-.section sgryb
-.section sgrza
-.section sgrzb
-.section sgr1a
-.section sgr1b
-.section sgr2a
-.section sgr2b
-.section sgr3a
-.section sgr3b
-.section sgr4a
-.section sgr4b
-.section sgr5a
-.section sgr5b
-.section sgr6a
-.section sgr6b
-.section sgr7a
-.section sgr7b
-.section sgr8a
-.section sgr8b
-.section sgr9a
-.section sgr9b
-.section sgr0a
-.section sgr0b
-.section sgsaa
-.section sgsab
-.section sgsba
-.section sgsbb
-.section sgsca
-.section sgscb
-.section sgsda
-.section sgsdb
-.section sgsea
-.section sgseb
-.section sgsfa
-.section sgsfb
-.section sgsga
-.section sgsgb
-.section sgsha
-.section sgshb
-.section sgsia
-.section sgsib
-.section sgsja
-.section sgsjb
-.section sgska
-.section sgskb
-.section sgsla
-.section sgslb
-.section sgsma
-.section sgsmb
-.section sgsna
-.section sgsnb
-.section sgsoa
-.section sgsob
-.section sgspa
-.section sgspb
-.section sgsqa
-.section sgsqb
-.section sgsra
-.section sgsrb
-.section sgssa
-.section sgssb
-.section sgsta
-.section sgstb
-.section sgsua
-.section sgsub
-.section sgsva
-.section sgsvb
-.section sgswa
-.section sgswb
-.section sgsxa
-.section sgsxb
-.section sgsya
-.section sgsyb
-.section sgsza
-.section sgszb
-.section sgs1a
-.section sgs1b
-.section sgs2a
-.section sgs2b
-.section sgs3a
-.section sgs3b
-.section sgs4a
-.section sgs4b
-.section sgs5a
-.section sgs5b
-.section sgs6a
-.section sgs6b
-.section sgs7a
-.section sgs7b
-.section sgs8a
-.section sgs8b
-.section sgs9a
-.section sgs9b
-.section sgs0a
-.section sgs0b
-.section sgtaa
-.section sgtab
-.section sgtba
-.section sgtbb
-.section sgtca
-.section sgtcb
-.section sgtda
-.section sgtdb
-.section sgtea
-.section sgteb
-.section sgtfa
-.section sgtfb
-.section sgtga
-.section sgtgb
-.section sgtha
-.section sgthb
-.section sgtia
-.section sgtib
-.section sgtja
-.section sgtjb
-.section sgtka
-.section sgtkb
-.section sgtla
-.section sgtlb
-.section sgtma
-.section sgtmb
-.section sgtna
-.section sgtnb
-.section sgtoa
-.section sgtob
-.section sgtpa
-.section sgtpb
-.section sgtqa
-.section sgtqb
-.section sgtra
-.section sgtrb
-.section sgtsa
-.section sgtsb
-.section sgtta
-.section sgttb
-.section sgtua
-.section sgtub
-.section sgtva
-.section sgtvb
-.section sgtwa
-.section sgtwb
-.section sgtxa
-.section sgtxb
-.section sgtya
-.section sgtyb
-.section sgtza
-.section sgtzb
-.section sgt1a
-.section sgt1b
-.section sgt2a
-.section sgt2b
-.section sgt3a
-.section sgt3b
-.section sgt4a
-.section sgt4b
-.section sgt5a
-.section sgt5b
-.section sgt6a
-.section sgt6b
-.section sgt7a
-.section sgt7b
-.section sgt8a
-.section sgt8b
-.section sgt9a
-.section sgt9b
-.section sgt0a
-.section sgt0b
-.section sguaa
-.section sguab
-.section sguba
-.section sgubb
-.section sguca
-.section sgucb
-.section sguda
-.section sgudb
-.section sguea
-.section sgueb
-.section sgufa
-.section sgufb
-.section sguga
-.section sgugb
-.section sguha
-.section sguhb
-.section sguia
-.section sguib
-.section sguja
-.section sgujb
-.section sguka
-.section sgukb
-.section sgula
-.section sgulb
-.section sguma
-.section sgumb
-.section sguna
-.section sgunb
-.section sguoa
-.section sguob
-.section sgupa
-.section sgupb
-.section sguqa
-.section sguqb
-.section sgura
-.section sgurb
-.section sgusa
-.section sgusb
-.section sguta
-.section sgutb
-.section sguua
-.section sguub
-.section sguva
-.section sguvb
-.section sguwa
-.section sguwb
-.section sguxa
-.section sguxb
-.section sguya
-.section sguyb
-.section sguza
-.section sguzb
-.section sgu1a
-.section sgu1b
-.section sgu2a
-.section sgu2b
-.section sgu3a
-.section sgu3b
-.section sgu4a
-.section sgu4b
-.section sgu5a
-.section sgu5b
-.section sgu6a
-.section sgu6b
-.section sgu7a
-.section sgu7b
-.section sgu8a
-.section sgu8b
-.section sgu9a
-.section sgu9b
-.section sgu0a
-.section sgu0b
-.section sgvaa
-.section sgvab
-.section sgvba
-.section sgvbb
-.section sgvca
-.section sgvcb
-.section sgvda
-.section sgvdb
-.section sgvea
-.section sgveb
-.section sgvfa
-.section sgvfb
-.section sgvga
-.section sgvgb
-.section sgvha
-.section sgvhb
-.section sgvia
-.section sgvib
-.section sgvja
-.section sgvjb
-.section sgvka
-.section sgvkb
-.section sgvla
-.section sgvlb
-.section sgvma
-.section sgvmb
-.section sgvna
-.section sgvnb
-.section sgvoa
-.section sgvob
-.section sgvpa
-.section sgvpb
-.section sgvqa
-.section sgvqb
-.section sgvra
-.section sgvrb
-.section sgvsa
-.section sgvsb
-.section sgvta
-.section sgvtb
-.section sgvua
-.section sgvub
-.section sgvva
-.section sgvvb
-.section sgvwa
-.section sgvwb
-.section sgvxa
-.section sgvxb
-.section sgvya
-.section sgvyb
-.section sgvza
-.section sgvzb
-.section sgv1a
-.section sgv1b
-.section sgv2a
-.section sgv2b
-.section sgv3a
-.section sgv3b
-.section sgv4a
-.section sgv4b
-.section sgv5a
-.section sgv5b
-.section sgv6a
-.section sgv6b
-.section sgv7a
-.section sgv7b
-.section sgv8a
-.section sgv8b
-.section sgv9a
-.section sgv9b
-.section sgv0a
-.section sgv0b
-.section sgwaa
-.section sgwab
-.section sgwba
-.section sgwbb
-.section sgwca
-.section sgwcb
-.section sgwda
-.section sgwdb
-.section sgwea
-.section sgweb
-.section sgwfa
-.section sgwfb
-.section sgwga
-.section sgwgb
-.section sgwha
-.section sgwhb
-.section sgwia
-.section sgwib
-.section sgwja
-.section sgwjb
-.section sgwka
-.section sgwkb
-.section sgwla
-.section sgwlb
-.section sgwma
-.section sgwmb
-.section sgwna
-.section sgwnb
-.section sgwoa
-.section sgwob
-.section sgwpa
-.section sgwpb
-.section sgwqa
-.section sgwqb
-.section sgwra
-.section sgwrb
-.section sgwsa
-.section sgwsb
-.section sgwta
-.section sgwtb
-.section sgwua
-.section sgwub
-.section sgwva
-.section sgwvb
-.section sgwwa
-.section sgwwb
-.section sgwxa
-.section sgwxb
-.section sgwya
-.section sgwyb
-.section sgwza
-.section sgwzb
-.section sgw1a
-.section sgw1b
-.section sgw2a
-.section sgw2b
-.section sgw3a
-.section sgw3b
-.section sgw4a
-.section sgw4b
-.section sgw5a
-.section sgw5b
-.section sgw6a
-.section sgw6b
-.section sgw7a
-.section sgw7b
-.section sgw8a
-.section sgw8b
-.section sgw9a
-.section sgw9b
-.section sgw0a
-.section sgw0b
-.section sgxaa
-.section sgxab
-.section sgxba
-.section sgxbb
-.section sgxca
-.section sgxcb
-.section sgxda
-.section sgxdb
-.section sgxea
-.section sgxeb
-.section sgxfa
-.section sgxfb
-.section sgxga
-.section sgxgb
-.section sgxha
-.section sgxhb
-.section sgxia
-.section sgxib
-.section sgxja
-.section sgxjb
-.section sgxka
-.section sgxkb
-.section sgxla
-.section sgxlb
-.section sgxma
-.section sgxmb
-.section sgxna
-.section sgxnb
-.section sgxoa
-.section sgxob
-.section sgxpa
-.section sgxpb
-.section sgxqa
-.section sgxqb
-.section sgxra
-.section sgxrb
-.section sgxsa
-.section sgxsb
-.section sgxta
-.section sgxtb
-.section sgxua
-.section sgxub
-.section sgxva
-.section sgxvb
-.section sgxwa
-.section sgxwb
-.section sgxxa
-.section sgxxb
-.section sgxya
-.section sgxyb
-.section sgxza
-.section sgxzb
-.section sgx1a
-.section sgx1b
-.section sgx2a
-.section sgx2b
-.section sgx3a
-.section sgx3b
-.section sgx4a
-.section sgx4b
-.section sgx5a
-.section sgx5b
-.section sgx6a
-.section sgx6b
-.section sgx7a
-.section sgx7b
-.section sgx8a
-.section sgx8b
-.section sgx9a
-.section sgx9b
-.section sgx0a
-.section sgx0b
-.section sgyaa
-.section sgyab
-.section sgyba
-.section sgybb
-.section sgyca
-.section sgycb
-.section sgyda
-.section sgydb
-.section sgyea
-.section sgyeb
-.section sgyfa
-.section sgyfb
-.section sgyga
-.section sgygb
-.section sgyha
-.section sgyhb
-.section sgyia
-.section sgyib
-.section sgyja
-.section sgyjb
-.section sgyka
-.section sgykb
-.section sgyla
-.section sgylb
-.section sgyma
-.section sgymb
-.section sgyna
-.section sgynb
-.section sgyoa
-.section sgyob
-.section sgypa
-.section sgypb
-.section sgyqa
-.section sgyqb
-.section sgyra
-.section sgyrb
-.section sgysa
-.section sgysb
-.section sgyta
-.section sgytb
-.section sgyua
-.section sgyub
-.section sgyva
-.section sgyvb
-.section sgywa
-.section sgywb
-.section sgyxa
-.section sgyxb
-.section sgyya
-.section sgyyb
-.section sgyza
-.section sgyzb
-.section sgy1a
-.section sgy1b
-.section sgy2a
-.section sgy2b
-.section sgy3a
-.section sgy3b
-.section sgy4a
-.section sgy4b
-.section sgy5a
-.section sgy5b
-.section sgy6a
-.section sgy6b
-.section sgy7a
-.section sgy7b
-.section sgy8a
-.section sgy8b
-.section sgy9a
-.section sgy9b
-.section sgy0a
-.section sgy0b
-.section sgzaa
-.section sgzab
-.section sgzba
-.section sgzbb
-.section sgzca
-.section sgzcb
-.section sgzda
-.section sgzdb
-.section sgzea
-.section sgzeb
-.section sgzfa
-.section sgzfb
-.section sgzga
-.section sgzgb
-.section sgzha
-.section sgzhb
-.section sgzia
-.section sgzib
-.section sgzja
-.section sgzjb
-.section sgzka
-.section sgzkb
-.section sgzla
-.section sgzlb
-.section sgzma
-.section sgzmb
-.section sgzna
-.section sgznb
-.section sgzoa
-.section sgzob
-.section sgzpa
-.section sgzpb
-.section sgzqa
-.section sgzqb
-.section sgzra
-.section sgzrb
-.section sgzsa
-.section sgzsb
-.section sgzta
-.section sgztb
-.section sgzua
-.section sgzub
-.section sgzva
-.section sgzvb
-.section sgzwa
-.section sgzwb
-.section sgzxa
-.section sgzxb
-.section sgzya
-.section sgzyb
-.section sgzza
-.section sgzzb
-.section sgz1a
-.section sgz1b
-.section sgz2a
-.section sgz2b
-.section sgz3a
-.section sgz3b
-.section sgz4a
-.section sgz4b
-.section sgz5a
-.section sgz5b
-.section sgz6a
-.section sgz6b
-.section sgz7a
-.section sgz7b
-.section sgz8a
-.section sgz8b
-.section sgz9a
-.section sgz9b
-.section sgz0a
-.section sgz0b
-.section sg1aa
-.section sg1ab
-.section sg1ba
-.section sg1bb
-.section sg1ca
-.section sg1cb
-.section sg1da
-.section sg1db
-.section sg1ea
-.section sg1eb
-.section sg1fa
-.section sg1fb
-.section sg1ga
-.section sg1gb
-.section sg1ha
-.section sg1hb
-.section sg1ia
-.section sg1ib
-.section sg1ja
-.section sg1jb
-.section sg1ka
-.section sg1kb
-.section sg1la
-.section sg1lb
-.section sg1ma
-.section sg1mb
-.section sg1na
-.section sg1nb
-.section sg1oa
-.section sg1ob
-.section sg1pa
-.section sg1pb
-.section sg1qa
-.section sg1qb
-.section sg1ra
-.section sg1rb
-.section sg1sa
-.section sg1sb
-.section sg1ta
-.section sg1tb
-.section sg1ua
-.section sg1ub
-.section sg1va
-.section sg1vb
-.section sg1wa
-.section sg1wb
-.section sg1xa
-.section sg1xb
-.section sg1ya
-.section sg1yb
-.section sg1za
-.section sg1zb
-.section sg11a
-.section sg11b
-.section sg12a
-.section sg12b
-.section sg13a
-.section sg13b
-.section sg14a
-.section sg14b
-.section sg15a
-.section sg15b
-.section sg16a
-.section sg16b
-.section sg17a
-.section sg17b
-.section sg18a
-.section sg18b
-.section sg19a
-.section sg19b
-.section sg10a
-.section sg10b
-.section sg2aa
-.section sg2ab
-.section sg2ba
-.section sg2bb
-.section sg2ca
-.section sg2cb
-.section sg2da
-.section sg2db
-.section sg2ea
-.section sg2eb
-.section sg2fa
-.section sg2fb
-.section sg2ga
-.section sg2gb
-.section sg2ha
-.section sg2hb
-.section sg2ia
-.section sg2ib
-.section sg2ja
-.section sg2jb
-.section sg2ka
-.section sg2kb
-.section sg2la
-.section sg2lb
-.section sg2ma
-.section sg2mb
-.section sg2na
-.section sg2nb
-.section sg2oa
-.section sg2ob
-.section sg2pa
-.section sg2pb
-.section sg2qa
-.section sg2qb
-.section sg2ra
-.section sg2rb
-.section sg2sa
-.section sg2sb
-.section sg2ta
-.section sg2tb
-.section sg2ua
-.section sg2ub
-.section sg2va
-.section sg2vb
-.section sg2wa
-.section sg2wb
-.section sg2xa
-.section sg2xb
-.section sg2ya
-.section sg2yb
-.section sg2za
-.section sg2zb
-.section sg21a
-.section sg21b
-.section sg22a
-.section sg22b
-.section sg23a
-.section sg23b
-.section sg24a
-.section sg24b
-.section sg25a
-.section sg25b
-.section sg26a
-.section sg26b
-.section sg27a
-.section sg27b
-.section sg28a
-.section sg28b
-.section sg29a
-.section sg29b
-.section sg20a
-.section sg20b
-.section sg3aa
-.section sg3ab
-.section sg3ba
-.section sg3bb
-.section sg3ca
-.section sg3cb
-.section sg3da
-.section sg3db
-.section sg3ea
-.section sg3eb
-.section sg3fa
-.section sg3fb
-.section sg3ga
-.section sg3gb
-.section sg3ha
-.section sg3hb
-.section sg3ia
-.section sg3ib
-.section sg3ja
-.section sg3jb
-.section sg3ka
-.section sg3kb
-.section sg3la
-.section sg3lb
-.section sg3ma
-.section sg3mb
-.section sg3na
-.section sg3nb
-.section sg3oa
-.section sg3ob
-.section sg3pa
-.section sg3pb
-.section sg3qa
-.section sg3qb
-.section sg3ra
-.section sg3rb
-.section sg3sa
-.section sg3sb
-.section sg3ta
-.section sg3tb
-.section sg3ua
-.section sg3ub
-.section sg3va
-.section sg3vb
-.section sg3wa
-.section sg3wb
-.section sg3xa
-.section sg3xb
-.section sg3ya
-.section sg3yb
-.section sg3za
-.section sg3zb
-.section sg31a
-.section sg31b
-.section sg32a
-.section sg32b
-.section sg33a
-.section sg33b
-.section sg34a
-.section sg34b
-.section sg35a
-.section sg35b
-.section sg36a
-.section sg36b
-.section sg37a
-.section sg37b
-.section sg38a
-.section sg38b
-.section sg39a
-.section sg39b
-.section sg30a
-.section sg30b
-.section sg4aa
-.section sg4ab
-.section sg4ba
-.section sg4bb
-.section sg4ca
-.section sg4cb
-.section sg4da
-.section sg4db
-.section sg4ea
-.section sg4eb
-.section sg4fa
-.section sg4fb
-.section sg4ga
-.section sg4gb
-.section sg4ha
-.section sg4hb
-.section sg4ia
-.section sg4ib
-.section sg4ja
-.section sg4jb
-.section sg4ka
-.section sg4kb
-.section sg4la
-.section sg4lb
-.section sg4ma
-.section sg4mb
-.section sg4na
-.section sg4nb
-.section sg4oa
-.section sg4ob
-.section sg4pa
-.section sg4pb
-.section sg4qa
-.section sg4qb
-.section sg4ra
-.section sg4rb
-.section sg4sa
-.section sg4sb
-.section sg4ta
-.section sg4tb
-.section sg4ua
-.section sg4ub
-.section sg4va
-.section sg4vb
-.section sg4wa
-.section sg4wb
-.section sg4xa
-.section sg4xb
-.section sg4ya
-.section sg4yb
-.section sg4za
-.section sg4zb
-.section sg41a
-.section sg41b
-.section sg42a
-.section sg42b
-.section sg43a
-.section sg43b
-.section sg44a
-.section sg44b
-.section sg45a
-.section sg45b
-.section sg46a
-.section sg46b
-.section sg47a
-.section sg47b
-.section sg48a
-.section sg48b
-.section sg49a
-.section sg49b
-.section sg40a
-.section sg40b
-.section sg5aa
-.section sg5ab
-.section sg5ba
-.section sg5bb
-.section sg5ca
-.section sg5cb
-.section sg5da
-.section sg5db
-.section sg5ea
-.section sg5eb
-.section sg5fa
-.section sg5fb
-.section sg5ga
-.section sg5gb
-.section sg5ha
-.section sg5hb
-.section sg5ia
-.section sg5ib
-.section sg5ja
-.section sg5jb
-.section sg5ka
-.section sg5kb
-.section sg5la
-.section sg5lb
-.section sg5ma
-.section sg5mb
-.section sg5na
-.section sg5nb
-.section sg5oa
-.section sg5ob
-.section sg5pa
-.section sg5pb
-.section sg5qa
-.section sg5qb
-.section sg5ra
-.section sg5rb
-.section sg5sa
-.section sg5sb
-.section sg5ta
-.section sg5tb
-.section sg5ua
-.section sg5ub
-.section sg5va
-.section sg5vb
-.section sg5wa
-.section sg5wb
-.section sg5xa
-.section sg5xb
-.section sg5ya
-.section sg5yb
-.section sg5za
-.section sg5zb
-.section sg51a
-.section sg51b
-.section sg52a
-.section sg52b
-.section sg53a
-.section sg53b
-.section sg54a
-.section sg54b
-.section sg55a
-.section sg55b
-.section sg56a
-.section sg56b
-.section sg57a
-.section sg57b
-.section sg58a
-.section sg58b
-.section sg59a
-.section sg59b
-.section sg50a
-.section sg50b
-.section sg6aa
-.section sg6ab
-.section sg6ba
-.section sg6bb
-.section sg6ca
-.section sg6cb
-.section sg6da
-.section sg6db
-.section sg6ea
-.section sg6eb
-.section sg6fa
-.section sg6fb
-.section sg6ga
-.section sg6gb
-.section sg6ha
-.section sg6hb
-.section sg6ia
-.section sg6ib
-.section sg6ja
-.section sg6jb
-.section sg6ka
-.section sg6kb
-.section sg6la
-.section sg6lb
-.section sg6ma
-.section sg6mb
-.section sg6na
-.section sg6nb
-.section sg6oa
-.section sg6ob
-.section sg6pa
-.section sg6pb
-.section sg6qa
-.section sg6qb
-.section sg6ra
-.section sg6rb
-.section sg6sa
-.section sg6sb
-.section sg6ta
-.section sg6tb
-.section sg6ua
-.section sg6ub
-.section sg6va
-.section sg6vb
-.section sg6wa
-.section sg6wb
-.section sg6xa
-.section sg6xb
-.section sg6ya
-.section sg6yb
-.section sg6za
-.section sg6zb
-.section sg61a
-.section sg61b
-.section sg62a
-.section sg62b
-.section sg63a
-.section sg63b
-.section sg64a
-.section sg64b
-.section sg65a
-.section sg65b
-.section sg66a
-.section sg66b
-.section sg67a
-.section sg67b
-.section sg68a
-.section sg68b
-.section sg69a
-.section sg69b
-.section sg60a
-.section sg60b
-.section sg7aa
-.section sg7ab
-.section sg7ba
-.section sg7bb
-.section sg7ca
-.section sg7cb
-.section sg7da
-.section sg7db
-.section sg7ea
-.section sg7eb
-.section sg7fa
-.section sg7fb
-.section sg7ga
-.section sg7gb
-.section sg7ha
-.section sg7hb
-.section sg7ia
-.section sg7ib
-.section sg7ja
-.section sg7jb
-.section sg7ka
-.section sg7kb
-.section sg7la
-.section sg7lb
-.section sg7ma
-.section sg7mb
-.section sg7na
-.section sg7nb
-.section sg7oa
-.section sg7ob
-.section sg7pa
-.section sg7pb
-.section sg7qa
-.section sg7qb
-.section sg7ra
-.section sg7rb
-.section sg7sa
-.section sg7sb
-.section sg7ta
-.section sg7tb
-.section sg7ua
-.section sg7ub
-.section sg7va
-.section sg7vb
-.section sg7wa
-.section sg7wb
-.section sg7xa
-.section sg7xb
-.section sg7ya
-.section sg7yb
-.section sg7za
-.section sg7zb
-.section sg71a
-.section sg71b
-.section sg72a
-.section sg72b
-.section sg73a
-.section sg73b
-.section sg74a
-.section sg74b
-.section sg75a
-.section sg75b
-.section sg76a
-.section sg76b
-.section sg77a
-.section sg77b
-.section sg78a
-.section sg78b
-.section sg79a
-.section sg79b
-.section sg70a
-.section sg70b
-.section sg8aa
-.section sg8ab
-.section sg8ba
-.section sg8bb
-.section sg8ca
-.section sg8cb
-.section sg8da
-.section sg8db
-.section sg8ea
-.section sg8eb
-.section sg8fa
-.section sg8fb
-.section sg8ga
-.section sg8gb
-.section sg8ha
-.section sg8hb
-.section sg8ia
-.section sg8ib
-.section sg8ja
-.section sg8jb
-.section sg8ka
-.section sg8kb
-.section sg8la
-.section sg8lb
-.section sg8ma
-.section sg8mb
-.section sg8na
-.section sg8nb
-.section sg8oa
-.section sg8ob
-.section sg8pa
-.section sg8pb
-.section sg8qa
-.section sg8qb
-.section sg8ra
-.section sg8rb
-.section sg8sa
-.section sg8sb
-.section sg8ta
-.section sg8tb
-.section sg8ua
-.section sg8ub
-.section sg8va
-.section sg8vb
-.section sg8wa
-.section sg8wb
-.section sg8xa
-.section sg8xb
-.section sg8ya
-.section sg8yb
-.section sg8za
-.section sg8zb
-.section sg81a
-.section sg81b
-.section sg82a
-.section sg82b
-.section sg83a
-.section sg83b
-.section sg84a
-.section sg84b
-.section sg85a
-.section sg85b
-.section sg86a
-.section sg86b
-.section sg87a
-.section sg87b
-.section sg88a
-.section sg88b
-.section sg89a
-.section sg89b
-.section sg80a
-.section sg80b
-.section sg9aa
-.section sg9ab
-.section sg9ba
-.section sg9bb
-.section sg9ca
-.section sg9cb
-.section sg9da
-.section sg9db
-.section sg9ea
-.section sg9eb
-.section sg9fa
-.section sg9fb
-.section sg9ga
-.section sg9gb
-.section sg9ha
-.section sg9hb
-.section sg9ia
-.section sg9ib
-.section sg9ja
-.section sg9jb
-.section sg9ka
-.section sg9kb
-.section sg9la
-.section sg9lb
-.section sg9ma
-.section sg9mb
-.section sg9na
-.section sg9nb
-.section sg9oa
-.section sg9ob
-.section sg9pa
-.section sg9pb
-.section sg9qa
-.section sg9qb
-.section sg9ra
-.section sg9rb
-.section sg9sa
-.section sg9sb
-.section sg9ta
-.section sg9tb
-.section sg9ua
-.section sg9ub
-.section sg9va
-.section sg9vb
-.section sg9wa
-.section sg9wb
-.section sg9xa
-.section sg9xb
-.section sg9ya
-.section sg9yb
-.section sg9za
-.section sg9zb
-.section sg91a
-.section sg91b
-.section sg92a
-.section sg92b
-.section sg93a
-.section sg93b
-.section sg94a
-.section sg94b
-.section sg95a
-.section sg95b
-.section sg96a
-.section sg96b
-.section sg97a
-.section sg97b
-.section sg98a
-.section sg98b
-.section sg99a
-.section sg99b
-.section sg90a
-.section sg90b
-.section sg0aa
-.section sg0ab
-.section sg0ba
-.section sg0bb
-.section sg0ca
-.section sg0cb
-.section sg0da
-.section sg0db
-.section sg0ea
-.section sg0eb
-.section sg0fa
-.section sg0fb
-.section sg0ga
-.section sg0gb
-.section sg0ha
-.section sg0hb
-.section sg0ia
-.section sg0ib
-.section sg0ja
-.section sg0jb
-.section sg0ka
-.section sg0kb
-.section sg0la
-.section sg0lb
-.section sg0ma
-.section sg0mb
-.section sg0na
-.section sg0nb
-.section sg0oa
-.section sg0ob
-.section sg0pa
-.section sg0pb
-.section sg0qa
-.section sg0qb
-.section sg0ra
-.section sg0rb
-.section sg0sa
-.section sg0sb
-.section sg0ta
-.section sg0tb
-.section sg0ua
-.section sg0ub
-.section sg0va
-.section sg0vb
-.section sg0wa
-.section sg0wb
-.section sg0xa
-.section sg0xb
-.section sg0ya
-.section sg0yb
-.section sg0za
-.section sg0zb
-.section sg01a
-.section sg01b
-.section sg02a
-.section sg02b
-.section sg03a
-.section sg03b
-.section sg04a
-.section sg04b
-.section sg05a
-.section sg05b
-.section sg06a
-.section sg06b
-.section sg07a
-.section sg07b
-.section sg08a
-.section sg08b
-.section sg09a
-.section sg09b
-.section sg00a
-.section sg00b
-.section shaaa
-.section shaab
-.section shaba
-.section shabb
-.section shaca
-.section shacb
-.section shada
-.section shadb
-.section shaea
-.section shaeb
-.section shafa
-.section shafb
-.section shaga
-.section shagb
-.section shaha
-.section shahb
-.section shaia
-.section shaib
-.section shaja
-.section shajb
-.section shaka
-.section shakb
-.section shala
-.section shalb
-.section shama
-.section shamb
-.section shana
-.section shanb
-.section shaoa
-.section shaob
-.section shapa
-.section shapb
-.section shaqa
-.section shaqb
-.section shara
-.section sharb
-.section shasa
-.section shasb
-.section shata
-.section shatb
-.section shaua
-.section shaub
-.section shava
-.section shavb
-.section shawa
-.section shawb
-.section shaxa
-.section shaxb
-.section shaya
-.section shayb
-.section shaza
-.section shazb
-.section sha1a
-.section sha1b
-.section sha2a
-.section sha2b
-.section sha3a
-.section sha3b
-.section sha4a
-.section sha4b
-.section sha5a
-.section sha5b
-.section sha6a
-.section sha6b
-.section sha7a
-.section sha7b
-.section sha8a
-.section sha8b
-.section sha9a
-.section sha9b
-.section sha0a
-.section sha0b
-.section shbaa
-.section shbab
-.section shbba
-.section shbbb
-.section shbca
-.section shbcb
-.section shbda
-.section shbdb
-.section shbea
-.section shbeb
-.section shbfa
-.section shbfb
-.section shbga
-.section shbgb
-.section shbha
-.section shbhb
-.section shbia
-.section shbib
-.section shbja
-.section shbjb
-.section shbka
-.section shbkb
-.section shbla
-.section shblb
-.section shbma
-.section shbmb
-.section shbna
-.section shbnb
-.section shboa
-.section shbob
-.section shbpa
-.section shbpb
-.section shbqa
-.section shbqb
-.section shbra
-.section shbrb
-.section shbsa
-.section shbsb
-.section shbta
-.section shbtb
-.section shbua
-.section shbub
-.section shbva
-.section shbvb
-.section shbwa
-.section shbwb
-.section shbxa
-.section shbxb
-.section shbya
-.section shbyb
-.section shbza
-.section shbzb
-.section shb1a
-.section shb1b
-.section shb2a
-.section shb2b
-.section shb3a
-.section shb3b
-.section shb4a
-.section shb4b
-.section shb5a
-.section shb5b
-.section shb6a
-.section shb6b
-.section shb7a
-.section shb7b
-.section shb8a
-.section shb8b
-.section shb9a
-.section shb9b
-.section shb0a
-.section shb0b
-.section shcaa
-.section shcab
-.section shcba
-.section shcbb
-.section shcca
-.section shccb
-.section shcda
-.section shcdb
-.section shcea
-.section shceb
-.section shcfa
-.section shcfb
-.section shcga
-.section shcgb
-.section shcha
-.section shchb
-.section shcia
-.section shcib
-.section shcja
-.section shcjb
-.section shcka
-.section shckb
-.section shcla
-.section shclb
-.section shcma
-.section shcmb
-.section shcna
-.section shcnb
-.section shcoa
-.section shcob
-.section shcpa
-.section shcpb
-.section shcqa
-.section shcqb
-.section shcra
-.section shcrb
-.section shcsa
-.section shcsb
-.section shcta
-.section shctb
-.section shcua
-.section shcub
-.section shcva
-.section shcvb
-.section shcwa
-.section shcwb
-.section shcxa
-.section shcxb
-.section shcya
-.section shcyb
-.section shcza
-.section shczb
-.section shc1a
-.section shc1b
-.section shc2a
-.section shc2b
-.section shc3a
-.section shc3b
-.section shc4a
-.section shc4b
-.section shc5a
-.section shc5b
-.section shc6a
-.section shc6b
-.section shc7a
-.section shc7b
-.section shc8a
-.section shc8b
-.section shc9a
-.section shc9b
-.section shc0a
-.section shc0b
-.section shdaa
-.section shdab
-.section shdba
-.section shdbb
-.section shdca
-.section shdcb
-.section shdda
-.section shddb
-.section shdea
-.section shdeb
-.section shdfa
-.section shdfb
-.section shdga
-.section shdgb
-.section shdha
-.section shdhb
-.section shdia
-.section shdib
-.section shdja
-.section shdjb
-.section shdka
-.section shdkb
-.section shdla
-.section shdlb
-.section shdma
-.section shdmb
-.section shdna
-.section shdnb
-.section shdoa
-.section shdob
-.section shdpa
-.section shdpb
-.section shdqa
-.section shdqb
-.section shdra
-.section shdrb
-.section shdsa
-.section shdsb
-.section shdta
-.section shdtb
-.section shdua
-.section shdub
-.section shdva
-.section shdvb
-.section shdwa
-.section shdwb
-.section shdxa
-.section shdxb
-.section shdya
-.section shdyb
-.section shdza
-.section shdzb
-.section shd1a
-.section shd1b
-.section shd2a
-.section shd2b
-.section shd3a
-.section shd3b
-.section shd4a
-.section shd4b
-.section shd5a
-.section shd5b
-.section shd6a
-.section shd6b
-.section shd7a
-.section shd7b
-.section shd8a
-.section shd8b
-.section shd9a
-.section shd9b
-.section shd0a
-.section shd0b
-.section sheaa
-.section sheab
-.section sheba
-.section shebb
-.section sheca
-.section shecb
-.section sheda
-.section shedb
-.section sheea
-.section sheeb
-.section shefa
-.section shefb
-.section shega
-.section shegb
-.section sheha
-.section shehb
-.section sheia
-.section sheib
-.section sheja
-.section shejb
-.section sheka
-.section shekb
-.section shela
-.section shelb
-.section shema
-.section shemb
-.section shena
-.section shenb
-.section sheoa
-.section sheob
-.section shepa
-.section shepb
-.section sheqa
-.section sheqb
-.section shera
-.section sherb
-.section shesa
-.section shesb
-.section sheta
-.section shetb
-.section sheua
-.section sheub
-.section sheva
-.section shevb
-.section shewa
-.section shewb
-.section shexa
-.section shexb
-.section sheya
-.section sheyb
-.section sheza
-.section shezb
-.section she1a
-.section she1b
-.section she2a
-.section she2b
-.section she3a
-.section she3b
-.section she4a
-.section she4b
-.section she5a
-.section she5b
-.section she6a
-.section she6b
-.section she7a
-.section she7b
-.section she8a
-.section she8b
-.section she9a
-.section she9b
-.section she0a
-.section she0b
-.section shfaa
-.section shfab
-.section shfba
-.section shfbb
-.section shfca
-.section shfcb
-.section shfda
-.section shfdb
-.section shfea
-.section shfeb
-.section shffa
-.section shffb
-.section shfga
-.section shfgb
-.section shfha
-.section shfhb
-.section shfia
-.section shfib
-.section shfja
-.section shfjb
-.section shfka
-.section shfkb
-.section shfla
-.section shflb
-.section shfma
-.section shfmb
-.section shfna
-.section shfnb
-.section shfoa
-.section shfob
-.section shfpa
-.section shfpb
-.section shfqa
-.section shfqb
-.section shfra
-.section shfrb
-.section shfsa
-.section shfsb
-.section shfta
-.section shftb
-.section shfua
-.section shfub
-.section shfva
-.section shfvb
-.section shfwa
-.section shfwb
-.section shfxa
-.section shfxb
-.section shfya
-.section shfyb
-.section shfza
-.section shfzb
-.section shf1a
-.section shf1b
-.section shf2a
-.section shf2b
-.section shf3a
-.section shf3b
-.section shf4a
-.section shf4b
-.section shf5a
-.section shf5b
-.section shf6a
-.section shf6b
-.section shf7a
-.section shf7b
-.section shf8a
-.section shf8b
-.section shf9a
-.section shf9b
-.section shf0a
-.section shf0b
-.section shgaa
-.section shgab
-.section shgba
-.section shgbb
-.section shgca
-.section shgcb
-.section shgda
-.section shgdb
-.section shgea
-.section shgeb
-.section shgfa
-.section shgfb
-.section shgga
-.section shggb
-.section shgha
-.section shghb
-.section shgia
-.section shgib
-.section shgja
-.section shgjb
-.section shgka
-.section shgkb
-.section shgla
-.section shglb
-.section shgma
-.section shgmb
-.section shgna
-.section shgnb
-.section shgoa
-.section shgob
-.section shgpa
-.section shgpb
-.section shgqa
-.section shgqb
-.section shgra
-.section shgrb
-.section shgsa
-.section shgsb
-.section shgta
-.section shgtb
-.section shgua
-.section shgub
-.section shgva
-.section shgvb
-.section shgwa
-.section shgwb
-.section shgxa
-.section shgxb
-.section shgya
-.section shgyb
-.section shgza
-.section shgzb
-.section shg1a
-.section shg1b
-.section shg2a
-.section shg2b
-.section shg3a
-.section shg3b
-.section shg4a
-.section shg4b
-.section shg5a
-.section shg5b
-.section shg6a
-.section shg6b
-.section shg7a
-.section shg7b
-.section shg8a
-.section shg8b
-.section shg9a
-.section shg9b
-.section shg0a
-.section shg0b
-.section shhaa
-.section shhab
-.section shhba
-.section shhbb
-.section shhca
-.section shhcb
-.section shhda
-.section shhdb
-.section shhea
-.section shheb
-.section shhfa
-.section shhfb
-.section shhga
-.section shhgb
-.section shhha
-.section shhhb
-.section shhia
-.section shhib
-.section shhja
-.section shhjb
-.section shhka
-.section shhkb
-.section shhla
-.section shhlb
-.section shhma
-.section shhmb
-.section shhna
-.section shhnb
-.section shhoa
-.section shhob
-.section shhpa
-.section shhpb
-.section shhqa
-.section shhqb
-.section shhra
-.section shhrb
-.section shhsa
-.section shhsb
-.section shhta
-.section shhtb
-.section shhua
-.section shhub
-.section shhva
-.section shhvb
-.section shhwa
-.section shhwb
-.section shhxa
-.section shhxb
-.section shhya
-.section shhyb
-.section shhza
-.section shhzb
-.section shh1a
-.section shh1b
-.section shh2a
-.section shh2b
-.section shh3a
-.section shh3b
-.section shh4a
-.section shh4b
-.section shh5a
-.section shh5b
-.section shh6a
-.section shh6b
-.section shh7a
-.section shh7b
-.section shh8a
-.section shh8b
-.section shh9a
-.section shh9b
-.section shh0a
-.section shh0b
-.section shiaa
-.section shiab
-.section shiba
-.section shibb
-.section shica
-.section shicb
-.section shida
-.section shidb
-.section shiea
-.section shieb
-.section shifa
-.section shifb
-.section shiga
-.section shigb
-.section shiha
-.section shihb
-.section shiia
-.section shiib
-.section shija
-.section shijb
-.section shika
-.section shikb
-.section shila
-.section shilb
-.section shima
-.section shimb
-.section shina
-.section shinb
-.section shioa
-.section shiob
-.section shipa
-.section shipb
-.section shiqa
-.section shiqb
-.section shira
-.section shirb
-.section shisa
-.section shisb
-.section shita
-.section shitb
-.section shiua
-.section shiub
-.section shiva
-.section shivb
-.section shiwa
-.section shiwb
-.section shixa
-.section shixb
-.section shiya
-.section shiyb
-.section shiza
-.section shizb
-.section shi1a
-.section shi1b
-.section shi2a
-.section shi2b
-.section shi3a
-.section shi3b
-.section shi4a
-.section shi4b
-.section shi5a
-.section shi5b
-.section shi6a
-.section shi6b
-.section shi7a
-.section shi7b
-.section shi8a
-.section shi8b
-.section shi9a
-.section shi9b
-.section shi0a
-.section shi0b
-.section shjaa
-.section shjab
-.section shjba
-.section shjbb
-.section shjca
-.section shjcb
-.section shjda
-.section shjdb
-.section shjea
-.section shjeb
-.section shjfa
-.section shjfb
-.section shjga
-.section shjgb
-.section shjha
-.section shjhb
-.section shjia
-.section shjib
-.section shjja
-.section shjjb
-.section shjka
-.section shjkb
-.section shjla
-.section shjlb
-.section shjma
-.section shjmb
-.section shjna
-.section shjnb
-.section shjoa
-.section shjob
-.section shjpa
-.section shjpb
-.section shjqa
-.section shjqb
-.section shjra
-.section shjrb
-.section shjsa
-.section shjsb
-.section shjta
-.section shjtb
-.section shjua
-.section shjub
-.section shjva
-.section shjvb
-.section shjwa
-.section shjwb
-.section shjxa
-.section shjxb
-.section shjya
-.section shjyb
-.section shjza
-.section shjzb
-.section shj1a
-.section shj1b
-.section shj2a
-.section shj2b
-.section shj3a
-.section shj3b
-.section shj4a
-.section shj4b
-.section shj5a
-.section shj5b
-.section shj6a
-.section shj6b
-.section shj7a
-.section shj7b
-.section shj8a
-.section shj8b
-.section shj9a
-.section shj9b
-.section shj0a
-.section shj0b
-.section shkaa
-.section shkab
-.section shkba
-.section shkbb
-.section shkca
-.section shkcb
-.section shkda
-.section shkdb
-.section shkea
-.section shkeb
-.section shkfa
-.section shkfb
-.section shkga
-.section shkgb
-.section shkha
-.section shkhb
-.section shkia
-.section shkib
-.section shkja
-.section shkjb
-.section shkka
-.section shkkb
-.section shkla
-.section shklb
-.section shkma
-.section shkmb
-.section shkna
-.section shknb
-.section shkoa
-.section shkob
-.section shkpa
-.section shkpb
-.section shkqa
-.section shkqb
-.section shkra
-.section shkrb
-.section shksa
-.section shksb
-.section shkta
-.section shktb
-.section shkua
-.section shkub
-.section shkva
-.section shkvb
-.section shkwa
-.section shkwb
-.section shkxa
-.section shkxb
-.section shkya
-.section shkyb
-.section shkza
-.section shkzb
-.section shk1a
-.section shk1b
-.section shk2a
-.section shk2b
-.section shk3a
-.section shk3b
-.section shk4a
-.section shk4b
-.section shk5a
-.section shk5b
-.section shk6a
-.section shk6b
-.section shk7a
-.section shk7b
-.section shk8a
-.section shk8b
-.section shk9a
-.section shk9b
-.section shk0a
-.section shk0b
-.section shlaa
-.section shlab
-.section shlba
-.section shlbb
-.section shlca
-.section shlcb
-.section shlda
-.section shldb
-.section shlea
-.section shleb
-.section shlfa
-.section shlfb
-.section shlga
-.section shlgb
-.section shlha
-.section shlhb
-.section shlia
-.section shlib
-.section shlja
-.section shljb
-.section shlka
-.section shlkb
-.section shlla
-.section shllb
-.section shlma
-.section shlmb
-.section shlna
-.section shlnb
-.section shloa
-.section shlob
-.section shlpa
-.section shlpb
-.section shlqa
-.section shlqb
-.section shlra
-.section shlrb
-.section shlsa
-.section shlsb
-.section shlta
-.section shltb
-.section shlua
-.section shlub
-.section shlva
-.section shlvb
-.section shlwa
-.section shlwb
-.section shlxa
-.section shlxb
-.section shlya
-.section shlyb
-.section shlza
-.section shlzb
-.section shl1a
-.section shl1b
-.section shl2a
-.section shl2b
-.section shl3a
-.section shl3b
-.section shl4a
-.section shl4b
-.section shl5a
-.section shl5b
-.section shl6a
-.section shl6b
-.section shl7a
-.section shl7b
-.section shl8a
-.section shl8b
-.section shl9a
-.section shl9b
-.section shl0a
-.section shl0b
-.section shmaa
-.section shmab
-.section shmba
-.section shmbb
-.section shmca
-.section shmcb
-.section shmda
-.section shmdb
-.section shmea
-.section shmeb
-.section shmfa
-.section shmfb
-.section shmga
-.section shmgb
-.section shmha
-.section shmhb
-.section shmia
-.section shmib
-.section shmja
-.section shmjb
-.section shmka
-.section shmkb
-.section shmla
-.section shmlb
-.section shmma
-.section shmmb
-.section shmna
-.section shmnb
-.section shmoa
-.section shmob
-.section shmpa
-.section shmpb
-.section shmqa
-.section shmqb
-.section shmra
-.section shmrb
-.section shmsa
-.section shmsb
-.section shmta
-.section shmtb
-.section shmua
-.section shmub
-.section shmva
-.section shmvb
-.section shmwa
-.section shmwb
-.section shmxa
-.section shmxb
-.section shmya
-.section shmyb
-.section shmza
-.section shmzb
-.section shm1a
-.section shm1b
-.section shm2a
-.section shm2b
-.section shm3a
-.section shm3b
-.section shm4a
-.section shm4b
-.section shm5a
-.section shm5b
-.section shm6a
-.section shm6b
-.section shm7a
-.section shm7b
-.section shm8a
-.section shm8b
-.section shm9a
-.section shm9b
-.section shm0a
-.section shm0b
-.section shnaa
-.section shnab
-.section shnba
-.section shnbb
-.section shnca
-.section shncb
-.section shnda
-.section shndb
-.section shnea
-.section shneb
-.section shnfa
-.section shnfb
-.section shnga
-.section shngb
-.section shnha
-.section shnhb
-.section shnia
-.section shnib
-.section shnja
-.section shnjb
-.section shnka
-.section shnkb
-.section shnla
-.section shnlb
-.section shnma
-.section shnmb
-.section shnna
-.section shnnb
-.section shnoa
-.section shnob
-.section shnpa
-.section shnpb
-.section shnqa
-.section shnqb
-.section shnra
-.section shnrb
-.section shnsa
-.section shnsb
-.section shnta
-.section shntb
-.section shnua
-.section shnub
-.section shnva
-.section shnvb
-.section shnwa
-.section shnwb
-.section shnxa
-.section shnxb
-.section shnya
-.section shnyb
-.section shnza
-.section shnzb
-.section shn1a
-.section shn1b
-.section shn2a
-.section shn2b
-.section shn3a
-.section shn3b
-.section shn4a
-.section shn4b
-.section shn5a
-.section shn5b
-.section shn6a
-.section shn6b
-.section shn7a
-.section shn7b
-.section shn8a
-.section shn8b
-.section shn9a
-.section shn9b
-.section shn0a
-.section shn0b
-.section shoaa
-.section shoab
-.section shoba
-.section shobb
-.section shoca
-.section shocb
-.section shoda
-.section shodb
-.section shoea
-.section shoeb
-.section shofa
-.section shofb
-.section shoga
-.section shogb
-.section shoha
-.section shohb
-.section shoia
-.section shoib
-.section shoja
-.section shojb
-.section shoka
-.section shokb
-.section shola
-.section sholb
-.section shoma
-.section shomb
-.section shona
-.section shonb
-.section shooa
-.section shoob
-.section shopa
-.section shopb
-.section shoqa
-.section shoqb
-.section shora
-.section shorb
-.section shosa
-.section shosb
-.section shota
-.section shotb
-.section shoua
-.section shoub
-.section shova
-.section shovb
-.section showa
-.section showb
-.section shoxa
-.section shoxb
-.section shoya
-.section shoyb
-.section shoza
-.section shozb
-.section sho1a
-.section sho1b
-.section sho2a
-.section sho2b
-.section sho3a
-.section sho3b
-.section sho4a
-.section sho4b
-.section sho5a
-.section sho5b
-.section sho6a
-.section sho6b
-.section sho7a
-.section sho7b
-.section sho8a
-.section sho8b
-.section sho9a
-.section sho9b
-.section sho0a
-.section sho0b
-.section shpaa
-.section shpab
-.section shpba
-.section shpbb
-.section shpca
-.section shpcb
-.section shpda
-.section shpdb
-.section shpea
-.section shpeb
-.section shpfa
-.section shpfb
-.section shpga
-.section shpgb
-.section shpha
-.section shphb
-.section shpia
-.section shpib
-.section shpja
-.section shpjb
-.section shpka
-.section shpkb
-.section shpla
-.section shplb
-.section shpma
-.section shpmb
-.section shpna
-.section shpnb
-.section shpoa
-.section shpob
-.section shppa
-.section shppb
-.section shpqa
-.section shpqb
-.section shpra
-.section shprb
-.section shpsa
-.section shpsb
-.section shpta
-.section shptb
-.section shpua
-.section shpub
-.section shpva
-.section shpvb
-.section shpwa
-.section shpwb
-.section shpxa
-.section shpxb
-.section shpya
-.section shpyb
-.section shpza
-.section shpzb
-.section shp1a
-.section shp1b
-.section shp2a
-.section shp2b
-.section shp3a
-.section shp3b
-.section shp4a
-.section shp4b
-.section shp5a
-.section shp5b
-.section shp6a
-.section shp6b
-.section shp7a
-.section shp7b
-.section shp8a
-.section shp8b
-.section shp9a
-.section shp9b
-.section shp0a
-.section shp0b
-.section shqaa
-.section shqab
-.section shqba
-.section shqbb
-.section shqca
-.section shqcb
-.section shqda
-.section shqdb
-.section shqea
-.section shqeb
-.section shqfa
-.section shqfb
-.section shqga
-.section shqgb
-.section shqha
-.section shqhb
-.section shqia
-.section shqib
-.section shqja
-.section shqjb
-.section shqka
-.section shqkb
-.section shqla
-.section shqlb
-.section shqma
-.section shqmb
-.section shqna
-.section shqnb
-.section shqoa
-.section shqob
-.section shqpa
-.section shqpb
-.section shqqa
-.section shqqb
-.section shqra
-.section shqrb
-.section shqsa
-.section shqsb
-.section shqta
-.section shqtb
-.section shqua
-.section shqub
-.section shqva
-.section shqvb
-.section shqwa
-.section shqwb
-.section shqxa
-.section shqxb
-.section shqya
-.section shqyb
-.section shqza
-.section shqzb
-.section shq1a
-.section shq1b
-.section shq2a
-.section shq2b
-.section shq3a
-.section shq3b
-.section shq4a
-.section shq4b
-.section shq5a
-.section shq5b
-.section shq6a
-.section shq6b
-.section shq7a
-.section shq7b
-.section shq8a
-.section shq8b
-.section shq9a
-.section shq9b
-.section shq0a
-.section shq0b
-.section shraa
-.section shrab
-.section shrba
-.section shrbb
-.section shrca
-.section shrcb
-.section shrda
-.section shrdb
-.section shrea
-.section shreb
-.section shrfa
-.section shrfb
-.section shrga
-.section shrgb
-.section shrha
-.section shrhb
-.section shria
-.section shrib
-.section shrja
-.section shrjb
-.section shrka
-.section shrkb
-.section shrla
-.section shrlb
-.section shrma
-.section shrmb
-.section shrna
-.section shrnb
-.section shroa
-.section shrob
-.section shrpa
-.section shrpb
-.section shrqa
-.section shrqb
-.section shrra
-.section shrrb
-.section shrsa
-.section shrsb
-.section shrta
-.section shrtb
-.section shrua
-.section shrub
-.section shrva
-.section shrvb
-.section shrwa
-.section shrwb
-.section shrxa
-.section shrxb
-.section shrya
-.section shryb
-.section shrza
-.section shrzb
-.section shr1a
-.section shr1b
-.section shr2a
-.section shr2b
-.section shr3a
-.section shr3b
-.section shr4a
-.section shr4b
-.section shr5a
-.section shr5b
-.section shr6a
-.section shr6b
-.section shr7a
-.section shr7b
-.section shr8a
-.section shr8b
-.section shr9a
-.section shr9b
-.section shr0a
-.section shr0b
-.section shsaa
-.section shsab
-.section shsba
-.section shsbb
-.section shsca
-.section shscb
-.section shsda
-.section shsdb
-.section shsea
-.section shseb
-.section shsfa
-.section shsfb
-.section shsga
-.section shsgb
-.section shsha
-.section shshb
-.section shsia
-.section shsib
-.section shsja
-.section shsjb
-.section shska
-.section shskb
-.section shsla
-.section shslb
-.section shsma
-.section shsmb
-.section shsna
-.section shsnb
-.section shsoa
-.section shsob
-.section shspa
-.section shspb
-.section shsqa
-.section shsqb
-.section shsra
-.section shsrb
-.section shssa
-.section shssb
-.section shsta
-.section shstb
-.section shsua
-.section shsub
-.section shsva
-.section shsvb
-.section shswa
-.section shswb
-.section shsxa
-.section shsxb
-.section shsya
-.section shsyb
-.section shsza
-.section shszb
-.section shs1a
-.section shs1b
-.section shs2a
-.section shs2b
-.section shs3a
-.section shs3b
-.section shs4a
-.section shs4b
-.section shs5a
-.section shs5b
-.section shs6a
-.section shs6b
-.section shs7a
-.section shs7b
-.section shs8a
-.section shs8b
-.section shs9a
-.section shs9b
-.section shs0a
-.section shs0b
-.section shtaa
-.section shtab
-.section shtba
-.section shtbb
-.section shtca
-.section shtcb
-.section shtda
-.section shtdb
-.section shtea
-.section shteb
-.section shtfa
-.section shtfb
-.section shtga
-.section shtgb
-.section shtha
-.section shthb
-.section shtia
-.section shtib
-.section shtja
-.section shtjb
-.section shtka
-.section shtkb
-.section shtla
-.section shtlb
-.section shtma
-.section shtmb
-.section shtna
-.section shtnb
-.section shtoa
-.section shtob
-.section shtpa
-.section shtpb
-.section shtqa
-.section shtqb
-.section shtra
-.section shtrb
-.section shtsa
-.section shtsb
-.section shtta
-.section shttb
-.section shtua
-.section shtub
-.section shtva
-.section shtvb
-.section shtwa
-.section shtwb
-.section shtxa
-.section shtxb
-.section shtya
-.section shtyb
-.section shtza
-.section shtzb
-.section sht1a
-.section sht1b
-.section sht2a
-.section sht2b
-.section sht3a
-.section sht3b
-.section sht4a
-.section sht4b
-.section sht5a
-.section sht5b
-.section sht6a
-.section sht6b
-.section sht7a
-.section sht7b
-.section sht8a
-.section sht8b
-.section sht9a
-.section sht9b
-.section sht0a
-.section sht0b
-.section shuaa
-.section shuab
-.section shuba
-.section shubb
-.section shuca
-.section shucb
-.section shuda
-.section shudb
-.section shuea
-.section shueb
-.section shufa
-.section shufb
-.section shuga
-.section shugb
-.section shuha
-.section shuhb
-.section shuia
-.section shuib
-.section shuja
-.section shujb
-.section shuka
-.section shukb
-.section shula
-.section shulb
-.section shuma
-.section shumb
-.section shuna
-.section shunb
-.section shuoa
-.section shuob
-.section shupa
-.section shupb
-.section shuqa
-.section shuqb
-.section shura
-.section shurb
-.section shusa
-.section shusb
-.section shuta
-.section shutb
-.section shuua
-.section shuub
-.section shuva
-.section shuvb
-.section shuwa
-.section shuwb
-.section shuxa
-.section shuxb
-.section shuya
-.section shuyb
-.section shuza
-.section shuzb
-.section shu1a
-.section shu1b
-.section shu2a
-.section shu2b
-.section shu3a
-.section shu3b
-.section shu4a
-.section shu4b
-.section shu5a
-.section shu5b
-.section shu6a
-.section shu6b
-.section shu7a
-.section shu7b
-.section shu8a
-.section shu8b
-.section shu9a
-.section shu9b
-.section shu0a
-.section shu0b
-.section shvaa
-.section shvab
-.section shvba
-.section shvbb
-.section shvca
-.section shvcb
-.section shvda
-.section shvdb
-.section shvea
-.section shveb
-.section shvfa
-.section shvfb
-.section shvga
-.section shvgb
-.section shvha
-.section shvhb
-.section shvia
-.section shvib
-.section shvja
-.section shvjb
-.section shvka
-.section shvkb
-.section shvla
-.section shvlb
-.section shvma
-.section shvmb
-.section shvna
-.section shvnb
-.section shvoa
-.section shvob
-.section shvpa
-.section shvpb
-.section shvqa
-.section shvqb
-.section shvra
-.section shvrb
-.section shvsa
-.section shvsb
-.section shvta
-.section shvtb
-.section shvua
-.section shvub
-.section shvva
-.section shvvb
-.section shvwa
-.section shvwb
-.section shvxa
-.section shvxb
-.section shvya
-.section shvyb
-.section shvza
-.section shvzb
-.section shv1a
-.section shv1b
-.section shv2a
-.section shv2b
-.section shv3a
-.section shv3b
-.section shv4a
-.section shv4b
-.section shv5a
-.section shv5b
-.section shv6a
-.section shv6b
-.section shv7a
-.section shv7b
-.section shv8a
-.section shv8b
-.section shv9a
-.section shv9b
-.section shv0a
-.section shv0b
-.section shwaa
-.section shwab
-.section shwba
-.section shwbb
-.section shwca
-.section shwcb
-.section shwda
-.section shwdb
-.section shwea
-.section shweb
-.section shwfa
-.section shwfb
-.section shwga
-.section shwgb
-.section shwha
-.section shwhb
-.section shwia
-.section shwib
-.section shwja
-.section shwjb
-.section shwka
-.section shwkb
-.section shwla
-.section shwlb
-.section shwma
-.section shwmb
-.section shwna
-.section shwnb
-.section shwoa
-.section shwob
-.section shwpa
-.section shwpb
-.section shwqa
-.section shwqb
-.section shwra
-.section shwrb
-.section shwsa
-.section shwsb
-.section shwta
-.section shwtb
-.section shwua
-.section shwub
-.section shwva
-.section shwvb
-.section shwwa
-.section shwwb
-.section shwxa
-.section shwxb
-.section shwya
-.section shwyb
-.section shwza
-.section shwzb
-.section shw1a
-.section shw1b
-.section shw2a
-.section shw2b
-.section shw3a
-.section shw3b
-.section shw4a
-.section shw4b
-.section shw5a
-.section shw5b
-.section shw6a
-.section shw6b
-.section shw7a
-.section shw7b
-.section shw8a
-.section shw8b
-.section shw9a
-.section shw9b
-.section shw0a
-.section shw0b
-.section shxaa
-.section shxab
-.section shxba
-.section shxbb
-.section shxca
-.section shxcb
-.section shxda
-.section shxdb
-.section shxea
-.section shxeb
-.section shxfa
-.section shxfb
-.section shxga
-.section shxgb
-.section shxha
-.section shxhb
-.section shxia
-.section shxib
-.section shxja
-.section shxjb
-.section shxka
-.section shxkb
-.section shxla
-.section shxlb
-.section shxma
-.section shxmb
-.section shxna
-.section shxnb
-.section shxoa
-.section shxob
-.section shxpa
-.section shxpb
-.section shxqa
-.section shxqb
-.section shxra
-.section shxrb
-.section shxsa
-.section shxsb
-.section shxta
-.section shxtb
-.section shxua
-.section shxub
-.section shxva
-.section shxvb
-.section shxwa
-.section shxwb
-.section shxxa
-.section shxxb
-.section shxya
-.section shxyb
-.section shxza
-.section shxzb
-.section shx1a
-.section shx1b
-.section shx2a
-.section shx2b
-.section shx3a
-.section shx3b
-.section shx4a
-.section shx4b
-.section shx5a
-.section shx5b
-.section shx6a
-.section shx6b
-.section shx7a
-.section shx7b
-.section shx8a
-.section shx8b
-.section shx9a
-.section shx9b
-.section shx0a
-.section shx0b
-.section shyaa
-.section shyab
-.section shyba
-.section shybb
-.section shyca
-.section shycb
-.section shyda
-.section shydb
-.section shyea
-.section shyeb
-.section shyfa
-.section shyfb
-.section shyga
-.section shygb
-.section shyha
-.section shyhb
-.section shyia
-.section shyib
-.section shyja
-.section shyjb
-.section shyka
-.section shykb
-.section shyla
-.section shylb
-.section shyma
-.section shymb
-.section shyna
-.section shynb
-.section shyoa
-.section shyob
-.section shypa
-.section shypb
-.section shyqa
-.section shyqb
-.section shyra
-.section shyrb
-.section shysa
-.section shysb
-.section shyta
-.section shytb
-.section shyua
-.section shyub
-.section shyva
-.section shyvb
-.section shywa
-.section shywb
-.section shyxa
-.section shyxb
-.section shyya
-.section shyyb
-.section shyza
-.section shyzb
-.section shy1a
-.section shy1b
-.section shy2a
-.section shy2b
-.section shy3a
-.section shy3b
-.section shy4a
-.section shy4b
-.section shy5a
-.section shy5b
-.section shy6a
-.section shy6b
-.section shy7a
-.section shy7b
-.section shy8a
-.section shy8b
-.section shy9a
-.section shy9b
-.section shy0a
-.section shy0b
-.section shzaa
-.section shzab
-.section shzba
-.section shzbb
-.section shzca
-.section shzcb
-.section shzda
-.section shzdb
-.section shzea
-.section shzeb
-.section shzfa
-.section shzfb
-.section shzga
-.section shzgb
-.section shzha
-.section shzhb
-.section shzia
-.section shzib
-.section shzja
-.section shzjb
-.section shzka
-.section shzkb
-.section shzla
-.section shzlb
-.section shzma
-.section shzmb
-.section shzna
-.section shznb
-.section shzoa
-.section shzob
-.section shzpa
-.section shzpb
-.section shzqa
-.section shzqb
-.section shzra
-.section shzrb
-.section shzsa
-.section shzsb
-.section shzta
-.section shztb
-.section shzua
-.section shzub
-.section shzva
-.section shzvb
-.section shzwa
-.section shzwb
-.section shzxa
-.section shzxb
-.section shzya
-.section shzyb
-.section shzza
-.section shzzb
-.section shz1a
-.section shz1b
-.section shz2a
-.section shz2b
-.section shz3a
-.section shz3b
-.section shz4a
-.section shz4b
-.section shz5a
-.section shz5b
-.section shz6a
-.section shz6b
-.section shz7a
-.section shz7b
-.section shz8a
-.section shz8b
-.section shz9a
-.section shz9b
-.section shz0a
-.section shz0b
-.section sh1aa
-.section sh1ab
-.section sh1ba
-.section sh1bb
-.section sh1ca
-.section sh1cb
-.section sh1da
-.section sh1db
-.section sh1ea
-.section sh1eb
-.section sh1fa
-.section sh1fb
-.section sh1ga
-.section sh1gb
-.section sh1ha
-.section sh1hb
-.section sh1ia
-.section sh1ib
-.section sh1ja
-.section sh1jb
-.section sh1ka
-.section sh1kb
-.section sh1la
-.section sh1lb
-.section sh1ma
-.section sh1mb
-.section sh1na
-.section sh1nb
-.section sh1oa
-.section sh1ob
-.section sh1pa
-.section sh1pb
-.section sh1qa
-.section sh1qb
-.section sh1ra
-.section sh1rb
-.section sh1sa
-.section sh1sb
-.section sh1ta
-.section sh1tb
-.section sh1ua
-.section sh1ub
-.section sh1va
-.section sh1vb
-.section sh1wa
-.section sh1wb
-.section sh1xa
-.section sh1xb
-.section sh1ya
-.section sh1yb
-.section sh1za
-.section sh1zb
-.section sh11a
-.section sh11b
-.section sh12a
-.section sh12b
-.section sh13a
-.section sh13b
-.section sh14a
-.section sh14b
-.section sh15a
-.section sh15b
-.section sh16a
-.section sh16b
-.section sh17a
-.section sh17b
-.section sh18a
-.section sh18b
-.section sh19a
-.section sh19b
-.section sh10a
-.section sh10b
-.section sh2aa
-.section sh2ab
-.section sh2ba
-.section sh2bb
-.section sh2ca
-.section sh2cb
-.section sh2da
-.section sh2db
-.section sh2ea
-.section sh2eb
-.section sh2fa
-.section sh2fb
-.section sh2ga
-.section sh2gb
-.section sh2ha
-.section sh2hb
-.section sh2ia
-.section sh2ib
-.section sh2ja
-.section sh2jb
-.section sh2ka
-.section sh2kb
-.section sh2la
-.section sh2lb
-.section sh2ma
-.section sh2mb
-.section sh2na
-.section sh2nb
-.section sh2oa
-.section sh2ob
-.section sh2pa
-.section sh2pb
-.section sh2qa
-.section sh2qb
-.section sh2ra
-.section sh2rb
-.section sh2sa
-.section sh2sb
-.section sh2ta
-.section sh2tb
-.section sh2ua
-.section sh2ub
-.section sh2va
-.section sh2vb
-.section sh2wa
-.section sh2wb
-.section sh2xa
-.section sh2xb
-.section sh2ya
-.section sh2yb
-.section sh2za
-.section sh2zb
-.section sh21a
-.section sh21b
-.section sh22a
-.section sh22b
-.section sh23a
-.section sh23b
-.section sh24a
-.section sh24b
-.section sh25a
-.section sh25b
-.section sh26a
-.section sh26b
-.section sh27a
-.section sh27b
-.section sh28a
-.section sh28b
-.section sh29a
-.section sh29b
-.section sh20a
-.section sh20b
-.section sh3aa
-.section sh3ab
-.section sh3ba
-.section sh3bb
-.section sh3ca
-.section sh3cb
-.section sh3da
-.section sh3db
-.section sh3ea
-.section sh3eb
-.section sh3fa
-.section sh3fb
-.section sh3ga
-.section sh3gb
-.section sh3ha
-.section sh3hb
-.section sh3ia
-.section sh3ib
-.section sh3ja
-.section sh3jb
-.section sh3ka
-.section sh3kb
-.section sh3la
-.section sh3lb
-.section sh3ma
-.section sh3mb
-.section sh3na
-.section sh3nb
-.section sh3oa
-.section sh3ob
-.section sh3pa
-.section sh3pb
-.section sh3qa
-.section sh3qb
-.section sh3ra
-.section sh3rb
-.section sh3sa
-.section sh3sb
-.section sh3ta
-.section sh3tb
-.section sh3ua
-.section sh3ub
-.section sh3va
-.section sh3vb
-.section sh3wa
-.section sh3wb
-.section sh3xa
-.section sh3xb
-.section sh3ya
-.section sh3yb
-.section sh3za
-.section sh3zb
-.section sh31a
-.section sh31b
-.section sh32a
-.section sh32b
-.section sh33a
-.section sh33b
-.section sh34a
-.section sh34b
-.section sh35a
-.section sh35b
-.section sh36a
-.section sh36b
-.section sh37a
-.section sh37b
-.section sh38a
-.section sh38b
-.section sh39a
-.section sh39b
-.section sh30a
-.section sh30b
-.section sh4aa
-.section sh4ab
-.section sh4ba
-.section sh4bb
-.section sh4ca
-.section sh4cb
-.section sh4da
-.section sh4db
-.section sh4ea
-.section sh4eb
-.section sh4fa
-.section sh4fb
-.section sh4ga
-.section sh4gb
-.section sh4ha
-.section sh4hb
-.section sh4ia
-.section sh4ib
-.section sh4ja
-.section sh4jb
-.section sh4ka
-.section sh4kb
-.section sh4la
-.section sh4lb
-.section sh4ma
-.section sh4mb
-.section sh4na
-.section sh4nb
-.section sh4oa
-.section sh4ob
-.section sh4pa
-.section sh4pb
-.section sh4qa
-.section sh4qb
-.section sh4ra
-.section sh4rb
-.section sh4sa
-.section sh4sb
-.section sh4ta
-.section sh4tb
-.section sh4ua
-.section sh4ub
-.section sh4va
-.section sh4vb
-.section sh4wa
-.section sh4wb
-.section sh4xa
-.section sh4xb
-.section sh4ya
-.section sh4yb
-.section sh4za
-.section sh4zb
-.section sh41a
-.section sh41b
-.section sh42a
-.section sh42b
-.section sh43a
-.section sh43b
-.section sh44a
-.section sh44b
-.section sh45a
-.section sh45b
-.section sh46a
-.section sh46b
-.section sh47a
-.section sh47b
-.section sh48a
-.section sh48b
-.section sh49a
-.section sh49b
-.section sh40a
-.section sh40b
-.section sh5aa
-.section sh5ab
-.section sh5ba
-.section sh5bb
-.section sh5ca
-.section sh5cb
-.section sh5da
-.section sh5db
-.section sh5ea
-.section sh5eb
-.section sh5fa
-.section sh5fb
-.section sh5ga
-.section sh5gb
-.section sh5ha
-.section sh5hb
-.section sh5ia
-.section sh5ib
-.section sh5ja
-.section sh5jb
-.section sh5ka
-.section sh5kb
-.section sh5la
-.section sh5lb
-.section sh5ma
-.section sh5mb
-.section sh5na
-.section sh5nb
-.section sh5oa
-.section sh5ob
-.section sh5pa
-.section sh5pb
-.section sh5qa
-.section sh5qb
-.section sh5ra
-.section sh5rb
-.section sh5sa
-.section sh5sb
-.section sh5ta
-.section sh5tb
-.section sh5ua
-.section sh5ub
-.section sh5va
-.section sh5vb
-.section sh5wa
-.section sh5wb
-.section sh5xa
-.section sh5xb
-.section sh5ya
-.section sh5yb
-.section sh5za
-.section sh5zb
-.section sh51a
-.section sh51b
-.section sh52a
-.section sh52b
-.section sh53a
-.section sh53b
-.section sh54a
-.section sh54b
-.section sh55a
-.section sh55b
-.section sh56a
-.section sh56b
-.section sh57a
-.section sh57b
-.section sh58a
-.section sh58b
-.section sh59a
-.section sh59b
-.section sh50a
-.section sh50b
-.section sh6aa
-.section sh6ab
-.section sh6ba
-.section sh6bb
-.section sh6ca
-.section sh6cb
-.section sh6da
-.section sh6db
-.section sh6ea
-.section sh6eb
-.section sh6fa
-.section sh6fb
-.section sh6ga
-.section sh6gb
-.section sh6ha
-.section sh6hb
-.section sh6ia
-.section sh6ib
-.section sh6ja
-.section sh6jb
-.section sh6ka
-.section sh6kb
-.section sh6la
-.section sh6lb
-.section sh6ma
-.section sh6mb
-.section sh6na
-.section sh6nb
-.section sh6oa
-.section sh6ob
-.section sh6pa
-.section sh6pb
-.section sh6qa
-.section sh6qb
-.section sh6ra
-.section sh6rb
-.section sh6sa
-.section sh6sb
-.section sh6ta
-.section sh6tb
-.section sh6ua
-.section sh6ub
-.section sh6va
-.section sh6vb
-.section sh6wa
-.section sh6wb
-.section sh6xa
-.section sh6xb
-.section sh6ya
-.section sh6yb
-.section sh6za
-.section sh6zb
-.section sh61a
-.section sh61b
-.section sh62a
-.section sh62b
-.section sh63a
-.section sh63b
-.section sh64a
-.section sh64b
-.section sh65a
-.section sh65b
-.section sh66a
-.section sh66b
-.section sh67a
-.section sh67b
-.section sh68a
-.section sh68b
-.section sh69a
-.section sh69b
-.section sh60a
-.section sh60b
-.section sh7aa
-.section sh7ab
-.section sh7ba
-.section sh7bb
-.section sh7ca
-.section sh7cb
-.section sh7da
-.section sh7db
-.section sh7ea
-.section sh7eb
-.section sh7fa
-.section sh7fb
-.section sh7ga
-.section sh7gb
-.section sh7ha
-.section sh7hb
-.section sh7ia
-.section sh7ib
-.section sh7ja
-.section sh7jb
-.section sh7ka
-.section sh7kb
-.section sh7la
-.section sh7lb
-.section sh7ma
-.section sh7mb
-.section sh7na
-.section sh7nb
-.section sh7oa
-.section sh7ob
-.section sh7pa
-.section sh7pb
-.section sh7qa
-.section sh7qb
-.section sh7ra
-.section sh7rb
-.section sh7sa
-.section sh7sb
-.section sh7ta
-.section sh7tb
-.section sh7ua
-.section sh7ub
-.section sh7va
-.section sh7vb
-.section sh7wa
-.section sh7wb
-.section sh7xa
-.section sh7xb
-.section sh7ya
-.section sh7yb
-.section sh7za
-.section sh7zb
-.section sh71a
-.section sh71b
-.section sh72a
-.section sh72b
-.section sh73a
-.section sh73b
-.section sh74a
-.section sh74b
-.section sh75a
-.section sh75b
-.section sh76a
-.section sh76b
-.section sh77a
-.section sh77b
-.section sh78a
-.section sh78b
-.section sh79a
-.section sh79b
-.section sh70a
-.section sh70b
-.section sh8aa
-.section sh8ab
-.section sh8ba
-.section sh8bb
-.section sh8ca
-.section sh8cb
-.section sh8da
-.section sh8db
-.section sh8ea
-.section sh8eb
-.section sh8fa
-.section sh8fb
-.section sh8ga
-.section sh8gb
-.section sh8ha
-.section sh8hb
-.section sh8ia
-.section sh8ib
-.section sh8ja
-.section sh8jb
-.section sh8ka
-.section sh8kb
-.section sh8la
-.section sh8lb
-.section sh8ma
-.section sh8mb
-.section sh8na
-.section sh8nb
-.section sh8oa
-.section sh8ob
-.section sh8pa
-.section sh8pb
-.section sh8qa
-.section sh8qb
-.section sh8ra
-.section sh8rb
-.section sh8sa
-.section sh8sb
-.section sh8ta
-.section sh8tb
-.section sh8ua
-.section sh8ub
-.section sh8va
-.section sh8vb
-.section sh8wa
-.section sh8wb
-.section sh8xa
-.section sh8xb
-.section sh8ya
-.section sh8yb
-.section sh8za
-.section sh8zb
-.section sh81a
-.section sh81b
-.section sh82a
-.section sh82b
-.section sh83a
-.section sh83b
-.section sh84a
-.section sh84b
-.section sh85a
-.section sh85b
-.section sh86a
-.section sh86b
-.section sh87a
-.section sh87b
-.section sh88a
-.section sh88b
-.section sh89a
-.section sh89b
-.section sh80a
-.section sh80b
-.section sh9aa
-.section sh9ab
-.section sh9ba
-.section sh9bb
-.section sh9ca
-.section sh9cb
-.section sh9da
-.section sh9db
-.section sh9ea
-.section sh9eb
-.section sh9fa
-.section sh9fb
-.section sh9ga
-.section sh9gb
-.section sh9ha
-.section sh9hb
-.section sh9ia
-.section sh9ib
-.section sh9ja
-.section sh9jb
-.section sh9ka
-.section sh9kb
-.section sh9la
-.section sh9lb
-.section sh9ma
-.section sh9mb
-.section sh9na
-.section sh9nb
-.section sh9oa
-.section sh9ob
-.section sh9pa
-.section sh9pb
-.section sh9qa
-.section sh9qb
-.section sh9ra
-.section sh9rb
-.section sh9sa
-.section sh9sb
-.section sh9ta
-.section sh9tb
-.section sh9ua
-.section sh9ub
-.section sh9va
-.section sh9vb
-.section sh9wa
-.section sh9wb
-.section sh9xa
-.section sh9xb
-.section sh9ya
-.section sh9yb
-.section sh9za
-.section sh9zb
-.section sh91a
-.section sh91b
-.section sh92a
-.section sh92b
-.section sh93a
-.section sh93b
-.section sh94a
-.section sh94b
-.section sh95a
-.section sh95b
-.section sh96a
-.section sh96b
-.section sh97a
-.section sh97b
-.section sh98a
-.section sh98b
-.section sh99a
-.section sh99b
-.section sh90a
-.section sh90b
-.section sh0aa
-.section sh0ab
-.section sh0ba
-.section sh0bb
-.section sh0ca
-.section sh0cb
-.section sh0da
-.section sh0db
-.section sh0ea
-.section sh0eb
-.section sh0fa
-.section sh0fb
-.section sh0ga
-.section sh0gb
-.section sh0ha
-.section sh0hb
-.section sh0ia
-.section sh0ib
-.section sh0ja
-.section sh0jb
-.section sh0ka
-.section sh0kb
-.section sh0la
-.section sh0lb
-.section sh0ma
-.section sh0mb
-.section sh0na
-.section sh0nb
-.section sh0oa
-.section sh0ob
-.section sh0pa
-.section sh0pb
-.section sh0qa
-.section sh0qb
-.section sh0ra
-.section sh0rb
-.section sh0sa
-.section sh0sb
-.section sh0ta
-.section sh0tb
-.section sh0ua
-.section sh0ub
-.section sh0va
-.section sh0vb
-.section sh0wa
-.section sh0wb
-.section sh0xa
-.section sh0xb
-.section sh0ya
-.section sh0yb
-.section sh0za
-.section sh0zb
-.section sh01a
-.section sh01b
-.section sh02a
-.section sh02b
-.section sh03a
-.section sh03b
-.section sh04a
-.section sh04b
-.section sh05a
-.section sh05b
-.section sh06a
-.section sh06b
-.section sh07a
-.section sh07b
-.section sh08a
-.section sh08b
-.section sh09a
-.section sh09b
-.section sh00a
-.section sh00b
-.section siaaa
-.section siaab
-.section siaba
-.section siabb
-.section siaca
-.section siacb
-.section siada
-.section siadb
-.section siaea
-.section siaeb
-.section siafa
-.section siafb
-.section siaga
-.section siagb
-.section siaha
-.section siahb
-.section siaia
-.section siaib
-.section siaja
-.section siajb
-.section siaka
-.section siakb
-.section siala
-.section sialb
-.section siama
-.section siamb
-.section siana
-.section sianb
-.section siaoa
-.section siaob
-.section siapa
-.section siapb
-.section siaqa
-.section siaqb
-.section siara
-.section siarb
-.section siasa
-.section siasb
-.section siata
-.section siatb
-.section siaua
-.section siaub
-.section siava
-.section siavb
-.section siawa
-.section siawb
-.section siaxa
-.section siaxb
-.section siaya
-.section siayb
-.section siaza
-.section siazb
-.section sia1a
-.section sia1b
-.section sia2a
-.section sia2b
-.section sia3a
-.section sia3b
-.section sia4a
-.section sia4b
-.section sia5a
-.section sia5b
-.section sia6a
-.section sia6b
-.section sia7a
-.section sia7b
-.section sia8a
-.section sia8b
-.section sia9a
-.section sia9b
-.section sia0a
-.section sia0b
-.section sibaa
-.section sibab
-.section sibba
-.section sibbb
-.section sibca
-.section sibcb
-.section sibda
-.section sibdb
-.section sibea
-.section sibeb
-.section sibfa
-.section sibfb
-.section sibga
-.section sibgb
-.section sibha
-.section sibhb
-.section sibia
-.section sibib
-.section sibja
-.section sibjb
-.section sibka
-.section sibkb
-.section sibla
-.section siblb
-.section sibma
-.section sibmb
-.section sibna
-.section sibnb
-.section siboa
-.section sibob
-.section sibpa
-.section sibpb
-.section sibqa
-.section sibqb
-.section sibra
-.section sibrb
-.section sibsa
-.section sibsb
-.section sibta
-.section sibtb
-.section sibua
-.section sibub
-.section sibva
-.section sibvb
-.section sibwa
-.section sibwb
-.section sibxa
-.section sibxb
-.section sibya
-.section sibyb
-.section sibza
-.section sibzb
-.section sib1a
-.section sib1b
-.section sib2a
-.section sib2b
-.section sib3a
-.section sib3b
-.section sib4a
-.section sib4b
-.section sib5a
-.section sib5b
-.section sib6a
-.section sib6b
-.section sib7a
-.section sib7b
-.section sib8a
-.section sib8b
-.section sib9a
-.section sib9b
-.section sib0a
-.section sib0b
-.section sicaa
-.section sicab
-.section sicba
-.section sicbb
-.section sicca
-.section siccb
-.section sicda
-.section sicdb
-.section sicea
-.section siceb
-.section sicfa
-.section sicfb
-.section sicga
-.section sicgb
-.section sicha
-.section sichb
-.section sicia
-.section sicib
-.section sicja
-.section sicjb
-.section sicka
-.section sickb
-.section sicla
-.section siclb
-.section sicma
-.section sicmb
-.section sicna
-.section sicnb
-.section sicoa
-.section sicob
-.section sicpa
-.section sicpb
-.section sicqa
-.section sicqb
-.section sicra
-.section sicrb
-.section sicsa
-.section sicsb
-.section sicta
-.section sictb
-.section sicua
-.section sicub
-.section sicva
-.section sicvb
-.section sicwa
-.section sicwb
-.section sicxa
-.section sicxb
-.section sicya
-.section sicyb
-.section sicza
-.section siczb
-.section sic1a
-.section sic1b
-.section sic2a
-.section sic2b
-.section sic3a
-.section sic3b
-.section sic4a
-.section sic4b
-.section sic5a
-.section sic5b
-.section sic6a
-.section sic6b
-.section sic7a
-.section sic7b
-.section sic8a
-.section sic8b
-.section sic9a
-.section sic9b
-.section sic0a
-.section sic0b
-.section sidaa
-.section sidab
-.section sidba
-.section sidbb
-.section sidca
-.section sidcb
-.section sidda
-.section siddb
-.section sidea
-.section sideb
-.section sidfa
-.section sidfb
-.section sidga
-.section sidgb
-.section sidha
-.section sidhb
-.section sidia
-.section sidib
-.section sidja
-.section sidjb
-.section sidka
-.section sidkb
-.section sidla
-.section sidlb
-.section sidma
-.section sidmb
-.section sidna
-.section sidnb
-.section sidoa
-.section sidob
-.section sidpa
-.section sidpb
-.section sidqa
-.section sidqb
-.section sidra
-.section sidrb
-.section sidsa
-.section sidsb
-.section sidta
-.section sidtb
-.section sidua
-.section sidub
-.section sidva
-.section sidvb
-.section sidwa
-.section sidwb
-.section sidxa
-.section sidxb
-.section sidya
-.section sidyb
-.section sidza
-.section sidzb
-.section sid1a
-.section sid1b
-.section sid2a
-.section sid2b
-.section sid3a
-.section sid3b
-.section sid4a
-.section sid4b
-.section sid5a
-.section sid5b
-.section sid6a
-.section sid6b
-.section sid7a
-.section sid7b
-.section sid8a
-.section sid8b
-.section sid9a
-.section sid9b
-.section sid0a
-.section sid0b
-.section sieaa
-.section sieab
-.section sieba
-.section siebb
-.section sieca
-.section siecb
-.section sieda
-.section siedb
-.section sieea
-.section sieeb
-.section siefa
-.section siefb
-.section siega
-.section siegb
-.section sieha
-.section siehb
-.section sieia
-.section sieib
-.section sieja
-.section siejb
-.section sieka
-.section siekb
-.section siela
-.section sielb
-.section siema
-.section siemb
-.section siena
-.section sienb
-.section sieoa
-.section sieob
-.section siepa
-.section siepb
-.section sieqa
-.section sieqb
-.section siera
-.section sierb
-.section siesa
-.section siesb
-.section sieta
-.section sietb
-.section sieua
-.section sieub
-.section sieva
-.section sievb
-.section siewa
-.section siewb
-.section siexa
-.section siexb
-.section sieya
-.section sieyb
-.section sieza
-.section siezb
-.section sie1a
-.section sie1b
-.section sie2a
-.section sie2b
-.section sie3a
-.section sie3b
-.section sie4a
-.section sie4b
-.section sie5a
-.section sie5b
-.section sie6a
-.section sie6b
-.section sie7a
-.section sie7b
-.section sie8a
-.section sie8b
-.section sie9a
-.section sie9b
-.section sie0a
-.section sie0b
-.section sifaa
-.section sifab
-.section sifba
-.section sifbb
-.section sifca
-.section sifcb
-.section sifda
-.section sifdb
-.section sifea
-.section sifeb
-.section siffa
-.section siffb
-.section sifga
-.section sifgb
-.section sifha
-.section sifhb
-.section sifia
-.section sifib
-.section sifja
-.section sifjb
-.section sifka
-.section sifkb
-.section sifla
-.section siflb
-.section sifma
-.section sifmb
-.section sifna
-.section sifnb
-.section sifoa
-.section sifob
-.section sifpa
-.section sifpb
-.section sifqa
-.section sifqb
-.section sifra
-.section sifrb
-.section sifsa
-.section sifsb
-.section sifta
-.section siftb
-.section sifua
-.section sifub
-.section sifva
-.section sifvb
-.section sifwa
-.section sifwb
-.section sifxa
-.section sifxb
-.section sifya
-.section sifyb
-.section sifza
-.section sifzb
-.section sif1a
-.section sif1b
-.section sif2a
-.section sif2b
-.section sif3a
-.section sif3b
-.section sif4a
-.section sif4b
-.section sif5a
-.section sif5b
-.section sif6a
-.section sif6b
-.section sif7a
-.section sif7b
-.section sif8a
-.section sif8b
-.section sif9a
-.section sif9b
-.section sif0a
-.section sif0b
-.section sigaa
-.section sigab
-.section sigba
-.section sigbb
-.section sigca
-.section sigcb
-.section sigda
-.section sigdb
-.section sigea
-.section sigeb
-.section sigfa
-.section sigfb
-.section sigga
-.section siggb
-.section sigha
-.section sighb
-.section sigia
-.section sigib
-.section sigja
-.section sigjb
-.section sigka
-.section sigkb
-.section sigla
-.section siglb
-.section sigma
-.section sigmb
-.section signa
-.section signb
-.section sigoa
-.section sigob
-.section sigpa
-.section sigpb
-.section sigqa
-.section sigqb
-.section sigra
-.section sigrb
-.section sigsa
-.section sigsb
-.section sigta
-.section sigtb
-.section sigua
-.section sigub
-.section sigva
-.section sigvb
-.section sigwa
-.section sigwb
-.section sigxa
-.section sigxb
-.section sigya
-.section sigyb
-.section sigza
-.section sigzb
-.section sig1a
-.section sig1b
-.section sig2a
-.section sig2b
-.section sig3a
-.section sig3b
-.section sig4a
-.section sig4b
-.section sig5a
-.section sig5b
-.section sig6a
-.section sig6b
-.section sig7a
-.section sig7b
-.section sig8a
-.section sig8b
-.section sig9a
-.section sig9b
-.section sig0a
-.section sig0b
-.section sihaa
-.section sihab
-.section sihba
-.section sihbb
-.section sihca
-.section sihcb
-.section sihda
-.section sihdb
-.section sihea
-.section siheb
-.section sihfa
-.section sihfb
-.section sihga
-.section sihgb
-.section sihha
-.section sihhb
-.section sihia
-.section sihib
-.section sihja
-.section sihjb
-.section sihka
-.section sihkb
-.section sihla
-.section sihlb
-.section sihma
-.section sihmb
-.section sihna
-.section sihnb
-.section sihoa
-.section sihob
-.section sihpa
-.section sihpb
-.section sihqa
-.section sihqb
-.section sihra
-.section sihrb
-.section sihsa
-.section sihsb
-.section sihta
-.section sihtb
-.section sihua
-.section sihub
-.section sihva
-.section sihvb
-.section sihwa
-.section sihwb
-.section sihxa
-.section sihxb
-.section sihya
-.section sihyb
-.section sihza
-.section sihzb
-.section sih1a
-.section sih1b
-.section sih2a
-.section sih2b
-.section sih3a
-.section sih3b
-.section sih4a
-.section sih4b
-.section sih5a
-.section sih5b
-.section sih6a
-.section sih6b
-.section sih7a
-.section sih7b
-.section sih8a
-.section sih8b
-.section sih9a
-.section sih9b
-.section sih0a
-.section sih0b
-.section siiaa
-.section siiab
-.section siiba
-.section siibb
-.section siica
-.section siicb
-.section siida
-.section siidb
-.section siiea
-.section siieb
-.section siifa
-.section siifb
-.section siiga
-.section siigb
-.section siiha
-.section siihb
-.section siiia
-.section siiib
-.section siija
-.section siijb
-.section siika
-.section siikb
-.section siila
-.section siilb
-.section siima
-.section siimb
-.section siina
-.section siinb
-.section siioa
-.section siiob
-.section siipa
-.section siipb
-.section siiqa
-.section siiqb
-.section siira
-.section siirb
-.section siisa
-.section siisb
-.section siita
-.section siitb
-.section siiua
-.section siiub
-.section siiva
-.section siivb
-.section siiwa
-.section siiwb
-.section siixa
-.section siixb
-.section siiya
-.section siiyb
-.section siiza
-.section siizb
-.section sii1a
-.section sii1b
-.section sii2a
-.section sii2b
-.section sii3a
-.section sii3b
-.section sii4a
-.section sii4b
-.section sii5a
-.section sii5b
-.section sii6a
-.section sii6b
-.section sii7a
-.section sii7b
-.section sii8a
-.section sii8b
-.section sii9a
-.section sii9b
-.section sii0a
-.section sii0b
-.section sijaa
-.section sijab
-.section sijba
-.section sijbb
-.section sijca
-.section sijcb
-.section sijda
-.section sijdb
-.section sijea
-.section sijeb
-.section sijfa
-.section sijfb
-.section sijga
-.section sijgb
-.section sijha
-.section sijhb
-.section sijia
-.section sijib
-.section sijja
-.section sijjb
-.section sijka
-.section sijkb
-.section sijla
-.section sijlb
-.section sijma
-.section sijmb
-.section sijna
-.section sijnb
-.section sijoa
-.section sijob
-.section sijpa
-.section sijpb
-.section sijqa
-.section sijqb
-.section sijra
-.section sijrb
-.section sijsa
-.section sijsb
-.section sijta
-.section sijtb
-.section sijua
-.section sijub
-.section sijva
-.section sijvb
-.section sijwa
-.section sijwb
-.section sijxa
-.section sijxb
-.section sijya
-.section sijyb
-.section sijza
-.section sijzb
-.section sij1a
-.section sij1b
-.section sij2a
-.section sij2b
-.section sij3a
-.section sij3b
-.section sij4a
-.section sij4b
-.section sij5a
-.section sij5b
-.section sij6a
-.section sij6b
-.section sij7a
-.section sij7b
-.section sij8a
-.section sij8b
-.section sij9a
-.section sij9b
-.section sij0a
-.section sij0b
-.section sikaa
-.section sikab
-.section sikba
-.section sikbb
-.section sikca
-.section sikcb
-.section sikda
-.section sikdb
-.section sikea
-.section sikeb
-.section sikfa
-.section sikfb
-.section sikga
-.section sikgb
-.section sikha
-.section sikhb
-.section sikia
-.section sikib
-.section sikja
-.section sikjb
-.section sikka
-.section sikkb
-.section sikla
-.section siklb
-.section sikma
-.section sikmb
-.section sikna
-.section siknb
-.section sikoa
-.section sikob
-.section sikpa
-.section sikpb
-.section sikqa
-.section sikqb
-.section sikra
-.section sikrb
-.section siksa
-.section siksb
-.section sikta
-.section siktb
-.section sikua
-.section sikub
-.section sikva
-.section sikvb
-.section sikwa
-.section sikwb
-.section sikxa
-.section sikxb
-.section sikya
-.section sikyb
-.section sikza
-.section sikzb
-.section sik1a
-.section sik1b
-.section sik2a
-.section sik2b
-.section sik3a
-.section sik3b
-.section sik4a
-.section sik4b
-.section sik5a
-.section sik5b
-.section sik6a
-.section sik6b
-.section sik7a
-.section sik7b
-.section sik8a
-.section sik8b
-.section sik9a
-.section sik9b
-.section sik0a
-.section sik0b
-.section silaa
-.section silab
-.section silba
-.section silbb
-.section silca
-.section silcb
-.section silda
-.section sildb
-.section silea
-.section sileb
-.section silfa
-.section silfb
-.section silga
-.section silgb
-.section silha
-.section silhb
-.section silia
-.section silib
-.section silja
-.section siljb
-.section silka
-.section silkb
-.section silla
-.section sillb
-.section silma
-.section silmb
-.section silna
-.section silnb
-.section siloa
-.section silob
-.section silpa
-.section silpb
-.section silqa
-.section silqb
-.section silra
-.section silrb
-.section silsa
-.section silsb
-.section silta
-.section siltb
-.section silua
-.section silub
-.section silva
-.section silvb
-.section silwa
-.section silwb
-.section silxa
-.section silxb
-.section silya
-.section silyb
-.section silza
-.section silzb
-.section sil1a
-.section sil1b
-.section sil2a
-.section sil2b
-.section sil3a
-.section sil3b
-.section sil4a
-.section sil4b
-.section sil5a
-.section sil5b
-.section sil6a
-.section sil6b
-.section sil7a
-.section sil7b
-.section sil8a
-.section sil8b
-.section sil9a
-.section sil9b
-.section sil0a
-.section sil0b
-.section simaa
-.section simab
-.section simba
-.section simbb
-.section simca
-.section simcb
-.section simda
-.section simdb
-.section simea
-.section simeb
-.section simfa
-.section simfb
-.section simga
-.section simgb
-.section simha
-.section simhb
-.section simia
-.section simib
-.section simja
-.section simjb
-.section simka
-.section simkb
-.section simla
-.section simlb
-.section simma
-.section simmb
-.section simna
-.section simnb
-.section simoa
-.section simob
-.section simpa
-.section simpb
-.section simqa
-.section simqb
-.section simra
-.section simrb
-.section simsa
-.section simsb
-.section simta
-.section simtb
-.section simua
-.section simub
-.section simva
-.section simvb
-.section simwa
-.section simwb
-.section simxa
-.section simxb
-.section simya
-.section simyb
-.section simza
-.section simzb
-.section sim1a
-.section sim1b
-.section sim2a
-.section sim2b
-.section sim3a
-.section sim3b
-.section sim4a
-.section sim4b
-.section sim5a
-.section sim5b
-.section sim6a
-.section sim6b
-.section sim7a
-.section sim7b
-.section sim8a
-.section sim8b
-.section sim9a
-.section sim9b
-.section sim0a
-.section sim0b
-.section sinaa
-.section sinab
-.section sinba
-.section sinbb
-.section sinca
-.section sincb
-.section sinda
-.section sindb
-.section sinea
-.section sineb
-.section sinfa
-.section sinfb
-.section singa
-.section singb
-.section sinha
-.section sinhb
-.section sinia
-.section sinib
-.section sinja
-.section sinjb
-.section sinka
-.section sinkb
-.section sinla
-.section sinlb
-.section sinma
-.section sinmb
-.section sinna
-.section sinnb
-.section sinoa
-.section sinob
-.section sinpa
-.section sinpb
-.section sinqa
-.section sinqb
-.section sinra
-.section sinrb
-.section sinsa
-.section sinsb
-.section sinta
-.section sintb
-.section sinua
-.section sinub
-.section sinva
-.section sinvb
-.section sinwa
-.section sinwb
-.section sinxa
-.section sinxb
-.section sinya
-.section sinyb
-.section sinza
-.section sinzb
-.section sin1a
-.section sin1b
-.section sin2a
-.section sin2b
-.section sin3a
-.section sin3b
-.section sin4a
-.section sin4b
-.section sin5a
-.section sin5b
-.section sin6a
-.section sin6b
-.section sin7a
-.section sin7b
-.section sin8a
-.section sin8b
-.section sin9a
-.section sin9b
-.section sin0a
-.section sin0b
-.section sioaa
-.section sioab
-.section sioba
-.section siobb
-.section sioca
-.section siocb
-.section sioda
-.section siodb
-.section sioea
-.section sioeb
-.section siofa
-.section siofb
-.section sioga
-.section siogb
-.section sioha
-.section siohb
-.section sioia
-.section sioib
-.section sioja
-.section siojb
-.section sioka
-.section siokb
-.section siola
-.section siolb
-.section sioma
-.section siomb
-.section siona
-.section sionb
-.section siooa
-.section sioob
-.section siopa
-.section siopb
-.section sioqa
-.section sioqb
-.section siora
-.section siorb
-.section siosa
-.section siosb
-.section siota
-.section siotb
-.section sioua
-.section sioub
-.section siova
-.section siovb
-.section siowa
-.section siowb
-.section sioxa
-.section sioxb
-.section sioya
-.section sioyb
-.section sioza
-.section siozb
-.section sio1a
-.section sio1b
-.section sio2a
-.section sio2b
-.section sio3a
-.section sio3b
-.section sio4a
-.section sio4b
-.section sio5a
-.section sio5b
-.section sio6a
-.section sio6b
-.section sio7a
-.section sio7b
-.section sio8a
-.section sio8b
-.section sio9a
-.section sio9b
-.section sio0a
-.section sio0b
-.section sipaa
-.section sipab
-.section sipba
-.section sipbb
-.section sipca
-.section sipcb
-.section sipda
-.section sipdb
-.section sipea
-.section sipeb
-.section sipfa
-.section sipfb
-.section sipga
-.section sipgb
-.section sipha
-.section siphb
-.section sipia
-.section sipib
-.section sipja
-.section sipjb
-.section sipka
-.section sipkb
-.section sipla
-.section siplb
-.section sipma
-.section sipmb
-.section sipna
-.section sipnb
-.section sipoa
-.section sipob
-.section sippa
-.section sippb
-.section sipqa
-.section sipqb
-.section sipra
-.section siprb
-.section sipsa
-.section sipsb
-.section sipta
-.section siptb
-.section sipua
-.section sipub
-.section sipva
-.section sipvb
-.section sipwa
-.section sipwb
-.section sipxa
-.section sipxb
-.section sipya
-.section sipyb
-.section sipza
-.section sipzb
-.section sip1a
-.section sip1b
-.section sip2a
-.section sip2b
-.section sip3a
-.section sip3b
-.section sip4a
-.section sip4b
-.section sip5a
-.section sip5b
-.section sip6a
-.section sip6b
-.section sip7a
-.section sip7b
-.section sip8a
-.section sip8b
-.section sip9a
-.section sip9b
-.section sip0a
-.section sip0b
-.section siqaa
-.section siqab
-.section siqba
-.section siqbb
-.section siqca
-.section siqcb
-.section siqda
-.section siqdb
-.section siqea
-.section siqeb
-.section siqfa
-.section siqfb
-.section siqga
-.section siqgb
-.section siqha
-.section siqhb
-.section siqia
-.section siqib
-.section siqja
-.section siqjb
-.section siqka
-.section siqkb
-.section siqla
-.section siqlb
-.section siqma
-.section siqmb
-.section siqna
-.section siqnb
-.section siqoa
-.section siqob
-.section siqpa
-.section siqpb
-.section siqqa
-.section siqqb
-.section siqra
-.section siqrb
-.section siqsa
-.section siqsb
-.section siqta
-.section siqtb
-.section siqua
-.section siqub
-.section siqva
-.section siqvb
-.section siqwa
-.section siqwb
-.section siqxa
-.section siqxb
-.section siqya
-.section siqyb
-.section siqza
-.section siqzb
-.section siq1a
-.section siq1b
-.section siq2a
-.section siq2b
-.section siq3a
-.section siq3b
-.section siq4a
-.section siq4b
-.section siq5a
-.section siq5b
-.section siq6a
-.section siq6b
-.section siq7a
-.section siq7b
-.section siq8a
-.section siq8b
-.section siq9a
-.section siq9b
-.section siq0a
-.section siq0b
-.section siraa
-.section sirab
-.section sirba
-.section sirbb
-.section sirca
-.section sircb
-.section sirda
-.section sirdb
-.section sirea
-.section sireb
-.section sirfa
-.section sirfb
-.section sirga
-.section sirgb
-.section sirha
-.section sirhb
-.section siria
-.section sirib
-.section sirja
-.section sirjb
-.section sirka
-.section sirkb
-.section sirla
-.section sirlb
-.section sirma
-.section sirmb
-.section sirna
-.section sirnb
-.section siroa
-.section sirob
-.section sirpa
-.section sirpb
-.section sirqa
-.section sirqb
-.section sirra
-.section sirrb
-.section sirsa
-.section sirsb
-.section sirta
-.section sirtb
-.section sirua
-.section sirub
-.section sirva
-.section sirvb
-.section sirwa
-.section sirwb
-.section sirxa
-.section sirxb
-.section sirya
-.section siryb
-.section sirza
-.section sirzb
-.section sir1a
-.section sir1b
-.section sir2a
-.section sir2b
-.section sir3a
-.section sir3b
-.section sir4a
-.section sir4b
-.section sir5a
-.section sir5b
-.section sir6a
-.section sir6b
-.section sir7a
-.section sir7b
-.section sir8a
-.section sir8b
-.section sir9a
-.section sir9b
-.section sir0a
-.section sir0b
-.section sisaa
-.section sisab
-.section sisba
-.section sisbb
-.section sisca
-.section siscb
-.section sisda
-.section sisdb
-.section sisea
-.section siseb
-.section sisfa
-.section sisfb
-.section sisga
-.section sisgb
-.section sisha
-.section sishb
-.section sisia
-.section sisib
-.section sisja
-.section sisjb
-.section siska
-.section siskb
-.section sisla
-.section sislb
-.section sisma
-.section sismb
-.section sisna
-.section sisnb
-.section sisoa
-.section sisob
-.section sispa
-.section sispb
-.section sisqa
-.section sisqb
-.section sisra
-.section sisrb
-.section sissa
-.section sissb
-.section sista
-.section sistb
-.section sisua
-.section sisub
-.section sisva
-.section sisvb
-.section siswa
-.section siswb
-.section sisxa
-.section sisxb
-.section sisya
-.section sisyb
-.section sisza
-.section siszb
-.section sis1a
-.section sis1b
-.section sis2a
-.section sis2b
-.section sis3a
-.section sis3b
-.section sis4a
-.section sis4b
-.section sis5a
-.section sis5b
-.section sis6a
-.section sis6b
-.section sis7a
-.section sis7b
-.section sis8a
-.section sis8b
-.section sis9a
-.section sis9b
-.section sis0a
-.section sis0b
-.section sitaa
-.section sitab
-.section sitba
-.section sitbb
-.section sitca
-.section sitcb
-.section sitda
-.section sitdb
-.section sitea
-.section siteb
-.section sitfa
-.section sitfb
-.section sitga
-.section sitgb
-.section sitha
-.section sithb
-.section sitia
-.section sitib
-.section sitja
-.section sitjb
-.section sitka
-.section sitkb
-.section sitla
-.section sitlb
-.section sitma
-.section sitmb
-.section sitna
-.section sitnb
-.section sitoa
-.section sitob
-.section sitpa
-.section sitpb
-.section sitqa
-.section sitqb
-.section sitra
-.section sitrb
-.section sitsa
-.section sitsb
-.section sitta
-.section sittb
-.section situa
-.section situb
-.section sitva
-.section sitvb
-.section sitwa
-.section sitwb
-.section sitxa
-.section sitxb
-.section sitya
-.section sityb
-.section sitza
-.section sitzb
-.section sit1a
-.section sit1b
-.section sit2a
-.section sit2b
-.section sit3a
-.section sit3b
-.section sit4a
-.section sit4b
-.section sit5a
-.section sit5b
-.section sit6a
-.section sit6b
-.section sit7a
-.section sit7b
-.section sit8a
-.section sit8b
-.section sit9a
-.section sit9b
-.section sit0a
-.section sit0b
-.section siuaa
-.section siuab
-.section siuba
-.section siubb
-.section siuca
-.section siucb
-.section siuda
-.section siudb
-.section siuea
-.section siueb
-.section siufa
-.section siufb
-.section siuga
-.section siugb
-.section siuha
-.section siuhb
-.section siuia
-.section siuib
-.section siuja
-.section siujb
-.section siuka
-.section siukb
-.section siula
-.section siulb
-.section siuma
-.section siumb
-.section siuna
-.section siunb
-.section siuoa
-.section siuob
-.section siupa
-.section siupb
-.section siuqa
-.section siuqb
-.section siura
-.section siurb
-.section siusa
-.section siusb
-.section siuta
-.section siutb
-.section siuua
-.section siuub
-.section siuva
-.section siuvb
-.section siuwa
-.section siuwb
-.section siuxa
-.section siuxb
-.section siuya
-.section siuyb
-.section siuza
-.section siuzb
-.section siu1a
-.section siu1b
-.section siu2a
-.section siu2b
-.section siu3a
-.section siu3b
-.section siu4a
-.section siu4b
-.section siu5a
-.section siu5b
-.section siu6a
-.section siu6b
-.section siu7a
-.section siu7b
-.section siu8a
-.section siu8b
-.section siu9a
-.section siu9b
-.section siu0a
-.section siu0b
-.section sivaa
-.section sivab
-.section sivba
-.section sivbb
-.section sivca
-.section sivcb
-.section sivda
-.section sivdb
-.section sivea
-.section siveb
-.section sivfa
-.section sivfb
-.section sivga
-.section sivgb
-.section sivha
-.section sivhb
-.section sivia
-.section sivib
-.section sivja
-.section sivjb
-.section sivka
-.section sivkb
-.section sivla
-.section sivlb
-.section sivma
-.section sivmb
-.section sivna
-.section sivnb
-.section sivoa
-.section sivob
-.section sivpa
-.section sivpb
-.section sivqa
-.section sivqb
-.section sivra
-.section sivrb
-.section sivsa
-.section sivsb
-.section sivta
-.section sivtb
-.section sivua
-.section sivub
-.section sivva
-.section sivvb
-.section sivwa
-.section sivwb
-.section sivxa
-.section sivxb
-.section sivya
-.section sivyb
-.section sivza
-.section sivzb
-.section siv1a
-.section siv1b
-.section siv2a
-.section siv2b
-.section siv3a
-.section siv3b
-.section siv4a
-.section siv4b
-.section siv5a
-.section siv5b
-.section siv6a
-.section siv6b
-.section siv7a
-.section siv7b
-.section siv8a
-.section siv8b
-.section siv9a
-.section siv9b
-.section siv0a
-.section siv0b
-.section siwaa
-.section siwab
-.section siwba
-.section siwbb
-.section siwca
-.section siwcb
-.section siwda
-.section siwdb
-.section siwea
-.section siweb
-.section siwfa
-.section siwfb
-.section siwga
-.section siwgb
-.section siwha
-.section siwhb
-.section siwia
-.section siwib
-.section siwja
-.section siwjb
-.section siwka
-.section siwkb
-.section siwla
-.section siwlb
-.section siwma
-.section siwmb
-.section siwna
-.section siwnb
-.section siwoa
-.section siwob
-.section siwpa
-.section siwpb
-.section siwqa
-.section siwqb
-.section siwra
-.section siwrb
-.section siwsa
-.section siwsb
-.section siwta
-.section siwtb
-.section siwua
-.section siwub
-.section siwva
-.section siwvb
-.section siwwa
-.section siwwb
-.section siwxa
-.section siwxb
-.section siwya
-.section siwyb
-.section siwza
-.section siwzb
-.section siw1a
-.section siw1b
-.section siw2a
-.section siw2b
-.section siw3a
-.section siw3b
-.section siw4a
-.section siw4b
-.section siw5a
-.section siw5b
-.section siw6a
-.section siw6b
-.section siw7a
-.section siw7b
-.section siw8a
-.section siw8b
-.section siw9a
-.section siw9b
-.section siw0a
-.section siw0b
-.section sixaa
-.section sixab
-.section sixba
-.section sixbb
-.section sixca
-.section sixcb
-.section sixda
-.section sixdb
-.section sixea
-.section sixeb
-.section sixfa
-.section sixfb
-.section sixga
-.section sixgb
-.section sixha
-.section sixhb
-.section sixia
-.section sixib
-.section sixja
-.section sixjb
-.section sixka
-.section sixkb
-.section sixla
-.section sixlb
-.section sixma
-.section sixmb
-.section sixna
-.section sixnb
-.section sixoa
-.section sixob
-.section sixpa
-.section sixpb
-.section sixqa
-.section sixqb
-.section sixra
-.section sixrb
-.section sixsa
-.section sixsb
-.section sixta
-.section sixtb
-.section sixua
-.section sixub
-.section sixva
-.section sixvb
-.section sixwa
-.section sixwb
-.section sixxa
-.section sixxb
-.section sixya
-.section sixyb
-.section sixza
-.section sixzb
-.section six1a
-.section six1b
-.section six2a
-.section six2b
-.section six3a
-.section six3b
-.section six4a
-.section six4b
-.section six5a
-.section six5b
-.section six6a
-.section six6b
-.section six7a
-.section six7b
-.section six8a
-.section six8b
-.section six9a
-.section six9b
-.section six0a
-.section six0b
-.section siyaa
-.section siyab
-.section siyba
-.section siybb
-.section siyca
-.section siycb
-.section siyda
-.section siydb
-.section siyea
-.section siyeb
-.section siyfa
-.section siyfb
-.section siyga
-.section siygb
-.section siyha
-.section siyhb
-.section siyia
-.section siyib
-.section siyja
-.section siyjb
-.section siyka
-.section siykb
-.section siyla
-.section siylb
-.section siyma
-.section siymb
-.section siyna
-.section siynb
-.section siyoa
-.section siyob
-.section siypa
-.section siypb
-.section siyqa
-.section siyqb
-.section siyra
-.section siyrb
-.section siysa
-.section siysb
-.section siyta
-.section siytb
-.section siyua
-.section siyub
-.section siyva
-.section siyvb
-.section siywa
-.section siywb
-.section siyxa
-.section siyxb
-.section siyya
-.section siyyb
-.section siyza
-.section siyzb
-.section siy1a
-.section siy1b
-.section siy2a
-.section siy2b
-.section siy3a
-.section siy3b
-.section siy4a
-.section siy4b
-.section siy5a
-.section siy5b
-.section siy6a
-.section siy6b
-.section siy7a
-.section siy7b
-.section siy8a
-.section siy8b
-.section siy9a
-.section siy9b
-.section siy0a
-.section siy0b
-.section sizaa
-.section sizab
-.section sizba
-.section sizbb
-.section sizca
-.section sizcb
-.section sizda
-.section sizdb
-.section sizea
-.section sizeb
-.section sizfa
-.section sizfb
-.section sizga
-.section sizgb
-.section sizha
-.section sizhb
-.section sizia
-.section sizib
-.section sizja
-.section sizjb
-.section sizka
-.section sizkb
-.section sizla
-.section sizlb
-.section sizma
-.section sizmb
-.section sizna
-.section siznb
-.section sizoa
-.section sizob
-.section sizpa
-.section sizpb
-.section sizqa
-.section sizqb
-.section sizra
-.section sizrb
-.section sizsa
-.section sizsb
-.section sizta
-.section siztb
-.section sizua
-.section sizub
-.section sizva
-.section sizvb
-.section sizwa
-.section sizwb
-.section sizxa
-.section sizxb
-.section sizya
-.section sizyb
-.section sizza
-.section sizzb
-.section siz1a
-.section siz1b
-.section siz2a
-.section siz2b
-.section siz3a
-.section siz3b
-.section siz4a
-.section siz4b
-.section siz5a
-.section siz5b
-.section siz6a
-.section siz6b
-.section siz7a
-.section siz7b
-.section siz8a
-.section siz8b
-.section siz9a
-.section siz9b
-.section siz0a
-.section siz0b
-.section si1aa
-.section si1ab
-.section si1ba
-.section si1bb
-.section si1ca
-.section si1cb
-.section si1da
-.section si1db
-.section si1ea
-.section si1eb
-.section si1fa
-.section si1fb
-.section si1ga
-.section si1gb
-.section si1ha
-.section si1hb
-.section si1ia
-.section si1ib
-.section si1ja
-.section si1jb
-.section si1ka
-.section si1kb
-.section si1la
-.section si1lb
-.section si1ma
-.section si1mb
-.section si1na
-.section si1nb
-.section si1oa
-.section si1ob
-.section si1pa
-.section si1pb
-.section si1qa
-.section si1qb
-.section si1ra
-.section si1rb
-.section si1sa
-.section si1sb
-.section si1ta
-.section si1tb
-.section si1ua
-.section si1ub
-.section si1va
-.section si1vb
-.section si1wa
-.section si1wb
-.section si1xa
-.section si1xb
-.section si1ya
-.section si1yb
-.section si1za
-.section si1zb
-.section si11a
-.section si11b
-.section si12a
-.section si12b
-.section si13a
-.section si13b
-.section si14a
-.section si14b
-.section si15a
-.section si15b
-.section si16a
-.section si16b
-.section si17a
-.section si17b
-.section si18a
-.section si18b
-.section si19a
-.section si19b
-.section si10a
-.section si10b
-.section si2aa
-.section si2ab
-.section si2ba
-.section si2bb
-.section si2ca
-.section si2cb
-.section si2da
-.section si2db
-.section si2ea
-.section si2eb
-.section si2fa
-.section si2fb
-.section si2ga
-.section si2gb
-.section si2ha
-.section si2hb
-.section si2ia
-.section si2ib
-.section si2ja
-.section si2jb
-.section si2ka
-.section si2kb
-.section si2la
-.section si2lb
-.section si2ma
-.section si2mb
-.section si2na
-.section si2nb
-.section si2oa
-.section si2ob
-.section si2pa
-.section si2pb
-.section si2qa
-.section si2qb
-.section si2ra
-.section si2rb
-.section si2sa
-.section si2sb
-.section si2ta
-.section si2tb
-.section si2ua
-.section si2ub
-.section si2va
-.section si2vb
-.section si2wa
-.section si2wb
-.section si2xa
-.section si2xb
-.section si2ya
-.section si2yb
-.section si2za
-.section si2zb
-.section si21a
-.section si21b
-.section si22a
-.section si22b
-.section si23a
-.section si23b
-.section si24a
-.section si24b
-.section si25a
-.section si25b
-.section si26a
-.section si26b
-.section si27a
-.section si27b
-.section si28a
-.section si28b
-.section si29a
-.section si29b
-.section si20a
-.section si20b
-.section si3aa
-.section si3ab
-.section si3ba
-.section si3bb
-.section si3ca
-.section si3cb
-.section si3da
-.section si3db
-.section si3ea
-.section si3eb
-.section si3fa
-.section si3fb
-.section si3ga
-.section si3gb
-.section si3ha
-.section si3hb
-.section si3ia
-.section si3ib
-.section si3ja
-.section si3jb
-.section si3ka
-.section si3kb
-.section si3la
-.section si3lb
-.section si3ma
-.section si3mb
-.section si3na
-.section si3nb
-.section si3oa
-.section si3ob
-.section si3pa
-.section si3pb
-.section si3qa
-.section si3qb
-.section si3ra
-.section si3rb
-.section si3sa
-.section si3sb
-.section si3ta
-.section si3tb
-.section si3ua
-.section si3ub
-.section si3va
-.section si3vb
-.section si3wa
-.section si3wb
-.section si3xa
-.section si3xb
-.section si3ya
-.section si3yb
-.section si3za
-.section si3zb
-.section si31a
-.section si31b
-.section si32a
-.section si32b
-.section si33a
-.section si33b
-.section si34a
-.section si34b
-.section si35a
-.section si35b
-.section si36a
-.section si36b
-.section si37a
-.section si37b
-.section si38a
-.section si38b
-.section si39a
-.section si39b
-.section si30a
-.section si30b
-.section si4aa
-.section si4ab
-.section si4ba
-.section si4bb
-.section si4ca
-.section si4cb
-.section si4da
-.section si4db
-.section si4ea
-.section si4eb
-.section si4fa
-.section si4fb
-.section si4ga
-.section si4gb
-.section si4ha
-.section si4hb
-.section si4ia
-.section si4ib
-.section si4ja
-.section si4jb
-.section si4ka
-.section si4kb
-.section si4la
-.section si4lb
-.section si4ma
-.section si4mb
-.section si4na
-.section si4nb
-.section si4oa
-.section si4ob
-.section si4pa
-.section si4pb
-.section si4qa
-.section si4qb
-.section si4ra
-.section si4rb
-.section si4sa
-.section si4sb
-.section si4ta
-.section si4tb
-.section si4ua
-.section si4ub
-.section si4va
-.section si4vb
-.section si4wa
-.section si4wb
-.section si4xa
-.section si4xb
-.section si4ya
-.section si4yb
-.section si4za
-.section si4zb
-.section si41a
-.section si41b
-.section si42a
-.section si42b
-.section si43a
-.section si43b
-.section si44a
-.section si44b
-.section si45a
-.section si45b
-.section si46a
-.section si46b
-.section si47a
-.section si47b
-.section si48a
-.section si48b
-.section si49a
-.section si49b
-.section si40a
-.section si40b
-.section si5aa
-.section si5ab
-.section si5ba
-.section si5bb
-.section si5ca
-.section si5cb
-.section si5da
-.section si5db
-.section si5ea
-.section si5eb
-.section si5fa
-.section si5fb
-.section si5ga
-.section si5gb
-.section si5ha
-.section si5hb
-.section si5ia
-.section si5ib
-.section si5ja
-.section si5jb
-.section si5ka
-.section si5kb
-.section si5la
-.section si5lb
-.section si5ma
-.section si5mb
-.section si5na
-.section si5nb
-.section si5oa
-.section si5ob
-.section si5pa
-.section si5pb
-.section si5qa
-.section si5qb
-.section si5ra
-.section si5rb
-.section si5sa
-.section si5sb
-.section si5ta
-.section si5tb
-.section si5ua
-.section si5ub
-.section si5va
-.section si5vb
-.section si5wa
-.section si5wb
-.section si5xa
-.section si5xb
-.section si5ya
-.section si5yb
-.section si5za
-.section si5zb
-.section si51a
-.section si51b
-.section si52a
-.section si52b
-.section si53a
-.section si53b
-.section si54a
-.section si54b
-.section si55a
-.section si55b
-.section si56a
-.section si56b
-.section si57a
-.section si57b
-.section si58a
-.section si58b
-.section si59a
-.section si59b
-.section si50a
-.section si50b
-.section si6aa
-.section si6ab
-.section si6ba
-.section si6bb
-.section si6ca
-.section si6cb
-.section si6da
-.section si6db
-.section si6ea
-.section si6eb
-.section si6fa
-.section si6fb
-.section si6ga
-.section si6gb
-.section si6ha
-.section si6hb
-.section si6ia
-.section si6ib
-.section si6ja
-.section si6jb
-.section si6ka
-.section si6kb
-.section si6la
-.section si6lb
-.section si6ma
-.section si6mb
-.section si6na
-.section si6nb
-.section si6oa
-.section si6ob
-.section si6pa
-.section si6pb
-.section si6qa
-.section si6qb
-.section si6ra
-.section si6rb
-.section si6sa
-.section si6sb
-.section si6ta
-.section si6tb
-.section si6ua
-.section si6ub
-.section si6va
-.section si6vb
-.section si6wa
-.section si6wb
-.section si6xa
-.section si6xb
-.section si6ya
-.section si6yb
-.section si6za
-.section si6zb
-.section si61a
-.section si61b
-.section si62a
-.section si62b
-.section si63a
-.section si63b
-.section si64a
-.section si64b
-.section si65a
-.section si65b
-.section si66a
-.section si66b
-.section si67a
-.section si67b
-.section si68a
-.section si68b
-.section si69a
-.section si69b
-.section si60a
-.section si60b
-.section si7aa
-.section si7ab
-.section si7ba
-.section si7bb
-.section si7ca
-.section si7cb
-.section si7da
-.section si7db
-.section si7ea
-.section si7eb
-.section si7fa
-.section si7fb
-.section si7ga
-.section si7gb
-.section si7ha
-.section si7hb
-.section si7ia
-.section si7ib
-.section si7ja
-.section si7jb
-.section si7ka
-.section si7kb
-.section si7la
-.section si7lb
-.section si7ma
-.section si7mb
-.section si7na
-.section si7nb
-.section si7oa
-.section si7ob
-.section si7pa
-.section si7pb
-.section si7qa
-.section si7qb
-.section si7ra
-.section si7rb
-.section si7sa
-.section si7sb
-.section si7ta
-.section si7tb
-.section si7ua
-.section si7ub
-.section si7va
-.section si7vb
-.section si7wa
-.section si7wb
-.section si7xa
-.section si7xb
-.section si7ya
-.section si7yb
-.section si7za
-.section si7zb
-.section si71a
-.section si71b
-.section si72a
-.section si72b
-.section si73a
-.section si73b
-.section si74a
-.section si74b
-.section si75a
-.section si75b
-.section si76a
-.section si76b
-.section si77a
-.section si77b
-.section si78a
-.section si78b
-.section si79a
-.section si79b
-.section si70a
-.section si70b
-.section si8aa
-.section si8ab
-.section si8ba
-.section si8bb
-.section si8ca
-.section si8cb
-.section si8da
-.section si8db
-.section si8ea
-.section si8eb
-.section si8fa
-.section si8fb
-.section si8ga
-.section si8gb
-.section si8ha
-.section si8hb
-.section si8ia
-.section si8ib
-.section si8ja
-.section si8jb
-.section si8ka
-.section si8kb
-.section si8la
-.section si8lb
-.section si8ma
-.section si8mb
-.section si8na
-.section si8nb
-.section si8oa
-.section si8ob
-.section si8pa
-.section si8pb
-.section si8qa
-.section si8qb
-.section si8ra
-.section si8rb
-.section si8sa
-.section si8sb
-.section si8ta
-.section si8tb
-.section si8ua
-.section si8ub
-.section si8va
-.section si8vb
-.section si8wa
-.section si8wb
-.section si8xa
-.section si8xb
-.section si8ya
-.section si8yb
-.section si8za
-.section si8zb
-.section si81a
-.section si81b
-.section si82a
-.section si82b
-.section si83a
-.section si83b
-.section si84a
-.section si84b
-.section si85a
-.section si85b
-.section si86a
-.section si86b
-.section si87a
-.section si87b
-.section si88a
-.section si88b
-.section si89a
-.section si89b
-.section si80a
-.section si80b
-.section si9aa
-.section si9ab
-.section si9ba
-.section si9bb
-.section si9ca
-.section si9cb
-.section si9da
-.section si9db
-.section si9ea
-.section si9eb
-.section si9fa
-.section si9fb
-.section si9ga
-.section si9gb
-.section si9ha
-.section si9hb
-.section si9ia
-.section si9ib
-.section si9ja
-.section si9jb
-.section si9ka
-.section si9kb
-.section si9la
-.section si9lb
-.section si9ma
-.section si9mb
-.section si9na
-.section si9nb
-.section si9oa
-.section si9ob
-.section si9pa
-.section si9pb
-.section si9qa
-.section si9qb
-.section si9ra
-.section si9rb
-.section si9sa
-.section si9sb
-.section si9ta
-.section si9tb
-.section si9ua
-.section si9ub
-.section si9va
-.section si9vb
-.section si9wa
-.section si9wb
-.section si9xa
-.section si9xb
-.section si9ya
-.section si9yb
-.section si9za
-.section si9zb
-.section si91a
-.section si91b
-.section si92a
-.section si92b
-.section si93a
-.section si93b
-.section si94a
-.section si94b
-.section si95a
-.section si95b
-.section si96a
-.section si96b
-.section si97a
-.section si97b
-.section si98a
-.section si98b
-.section si99a
-.section si99b
-.section si90a
-.section si90b
-.section si0aa
-.section si0ab
-.section si0ba
-.section si0bb
-.section si0ca
-.section si0cb
-.section si0da
-.section si0db
-.section si0ea
-.section si0eb
-.section si0fa
-.section si0fb
-.section si0ga
-.section si0gb
-.section si0ha
-.section si0hb
-.section si0ia
-.section si0ib
-.section si0ja
-.section si0jb
-.section si0ka
-.section si0kb
-.section si0la
-.section si0lb
-.section si0ma
-.section si0mb
-.section si0na
-.section si0nb
-.section si0oa
-.section si0ob
-.section si0pa
-.section si0pb
-.section si0qa
-.section si0qb
-.section si0ra
-.section si0rb
-.section si0sa
-.section si0sb
-.section si0ta
-.section si0tb
-.section si0ua
-.section si0ub
-.section si0va
-.section si0vb
-.section si0wa
-.section si0wb
-.section si0xa
-.section si0xb
-.section si0ya
-.section si0yb
-.section si0za
-.section si0zb
-.section si01a
-.section si01b
-.section si02a
-.section si02b
-.section si03a
-.section si03b
-.section si04a
-.section si04b
-.section si05a
-.section si05b
-.section si06a
-.section si06b
-.section si07a
-.section si07b
-.section si08a
-.section si08b
-.section si09a
-.section si09b
-.section si00a
-.section si00b
-.section sjaaa
-.section sjaab
-.section sjaba
-.section sjabb
-.section sjaca
-.section sjacb
-.section sjada
-.section sjadb
-.section sjaea
-.section sjaeb
-.section sjafa
-.section sjafb
-.section sjaga
-.section sjagb
-.section sjaha
-.section sjahb
-.section sjaia
-.section sjaib
-.section sjaja
-.section sjajb
-.section sjaka
-.section sjakb
-.section sjala
-.section sjalb
-.section sjama
-.section sjamb
-.section sjana
-.section sjanb
-.section sjaoa
-.section sjaob
-.section sjapa
-.section sjapb
-.section sjaqa
-.section sjaqb
-.section sjara
-.section sjarb
-.section sjasa
-.section sjasb
-.section sjata
-.section sjatb
-.section sjaua
-.section sjaub
-.section sjava
-.section sjavb
-.section sjawa
-.section sjawb
-.section sjaxa
-.section sjaxb
-.section sjaya
-.section sjayb
-.section sjaza
-.section sjazb
-.section sja1a
-.section sja1b
-.section sja2a
-.section sja2b
-.section sja3a
-.section sja3b
-.section sja4a
-.section sja4b
-.section sja5a
-.section sja5b
-.section sja6a
-.section sja6b
-.section sja7a
-.section sja7b
-.section sja8a
-.section sja8b
-.section sja9a
-.section sja9b
-.section sja0a
-.section sja0b
-.section sjbaa
-.section sjbab
-.section sjbba
-.section sjbbb
-.section sjbca
-.section sjbcb
-.section sjbda
-.section sjbdb
-.section sjbea
-.section sjbeb
-.section sjbfa
-.section sjbfb
-.section sjbga
-.section sjbgb
-.section sjbha
-.section sjbhb
-.section sjbia
-.section sjbib
-.section sjbja
-.section sjbjb
-.section sjbka
-.section sjbkb
-.section sjbla
-.section sjblb
-.section sjbma
-.section sjbmb
-.section sjbna
-.section sjbnb
-.section sjboa
-.section sjbob
-.section sjbpa
-.section sjbpb
-.section sjbqa
-.section sjbqb
-.section sjbra
-.section sjbrb
-.section sjbsa
-.section sjbsb
-.section sjbta
-.section sjbtb
-.section sjbua
-.section sjbub
-.section sjbva
-.section sjbvb
-.section sjbwa
-.section sjbwb
-.section sjbxa
-.section sjbxb
-.section sjbya
-.section sjbyb
-.section sjbza
-.section sjbzb
-.section sjb1a
-.section sjb1b
-.section sjb2a
-.section sjb2b
-.section sjb3a
-.section sjb3b
-.section sjb4a
-.section sjb4b
-.section sjb5a
-.section sjb5b
-.section sjb6a
-.section sjb6b
-.section sjb7a
-.section sjb7b
-.section sjb8a
-.section sjb8b
-.section sjb9a
-.section sjb9b
-.section sjb0a
-.section sjb0b
-.section sjcaa
-.section sjcab
-.section sjcba
-.section sjcbb
-.section sjcca
-.section sjccb
-.section sjcda
-.section sjcdb
-.section sjcea
-.section sjceb
-.section sjcfa
-.section sjcfb
-.section sjcga
-.section sjcgb
-.section sjcha
-.section sjchb
-.section sjcia
-.section sjcib
-.section sjcja
-.section sjcjb
-.section sjcka
-.section sjckb
-.section sjcla
-.section sjclb
-.section sjcma
-.section sjcmb
-.section sjcna
-.section sjcnb
-.section sjcoa
-.section sjcob
-.section sjcpa
-.section sjcpb
-.section sjcqa
-.section sjcqb
-.section sjcra
-.section sjcrb
-.section sjcsa
-.section sjcsb
-.section sjcta
-.section sjctb
-.section sjcua
-.section sjcub
-.section sjcva
-.section sjcvb
-.section sjcwa
-.section sjcwb
-.section sjcxa
-.section sjcxb
-.section sjcya
-.section sjcyb
-.section sjcza
-.section sjczb
-.section sjc1a
-.section sjc1b
-.section sjc2a
-.section sjc2b
-.section sjc3a
-.section sjc3b
-.section sjc4a
-.section sjc4b
-.section sjc5a
-.section sjc5b
-.section sjc6a
-.section sjc6b
-.section sjc7a
-.section sjc7b
-.section sjc8a
-.section sjc8b
-.section sjc9a
-.section sjc9b
-.section sjc0a
-.section sjc0b
-.section sjdaa
-.section sjdab
-.section sjdba
-.section sjdbb
-.section sjdca
-.section sjdcb
-.section sjdda
-.section sjddb
-.section sjdea
-.section sjdeb
-.section sjdfa
-.section sjdfb
-.section sjdga
-.section sjdgb
-.section sjdha
-.section sjdhb
-.section sjdia
-.section sjdib
-.section sjdja
-.section sjdjb
-.section sjdka
-.section sjdkb
-.section sjdla
-.section sjdlb
-.section sjdma
-.section sjdmb
-.section sjdna
-.section sjdnb
-.section sjdoa
-.section sjdob
-.section sjdpa
-.section sjdpb
-.section sjdqa
-.section sjdqb
-.section sjdra
-.section sjdrb
-.section sjdsa
-.section sjdsb
-.section sjdta
-.section sjdtb
-.section sjdua
-.section sjdub
-.section sjdva
-.section sjdvb
-.section sjdwa
-.section sjdwb
-.section sjdxa
-.section sjdxb
-.section sjdya
-.section sjdyb
-.section sjdza
-.section sjdzb
-.section sjd1a
-.section sjd1b
-.section sjd2a
-.section sjd2b
-.section sjd3a
-.section sjd3b
-.section sjd4a
-.section sjd4b
-.section sjd5a
-.section sjd5b
-.section sjd6a
-.section sjd6b
-.section sjd7a
-.section sjd7b
-.section sjd8a
-.section sjd8b
-.section sjd9a
-.section sjd9b
-.section sjd0a
-.section sjd0b
-.section sjeaa
-.section sjeab
-.section sjeba
-.section sjebb
-.section sjeca
-.section sjecb
-.section sjeda
-.section sjedb
-.section sjeea
-.section sjeeb
-.section sjefa
-.section sjefb
-.section sjega
-.section sjegb
-.section sjeha
-.section sjehb
-.section sjeia
-.section sjeib
-.section sjeja
-.section sjejb
-.section sjeka
-.section sjekb
-.section sjela
-.section sjelb
-.section sjema
-.section sjemb
-.section sjena
-.section sjenb
-.section sjeoa
-.section sjeob
-.section sjepa
-.section sjepb
-.section sjeqa
-.section sjeqb
-.section sjera
-.section sjerb
-.section sjesa
-.section sjesb
-.section sjeta
-.section sjetb
-.section sjeua
-.section sjeub
-.section sjeva
-.section sjevb
-.section sjewa
-.section sjewb
-.section sjexa
-.section sjexb
-.section sjeya
-.section sjeyb
-.section sjeza
-.section sjezb
-.section sje1a
-.section sje1b
-.section sje2a
-.section sje2b
-.section sje3a
-.section sje3b
-.section sje4a
-.section sje4b
-.section sje5a
-.section sje5b
-.section sje6a
-.section sje6b
-.section sje7a
-.section sje7b
-.section sje8a
-.section sje8b
-.section sje9a
-.section sje9b
-.section sje0a
-.section sje0b
-.section sjfaa
-.section sjfab
-.section sjfba
-.section sjfbb
-.section sjfca
-.section sjfcb
-.section sjfda
-.section sjfdb
-.section sjfea
-.section sjfeb
-.section sjffa
-.section sjffb
-.section sjfga
-.section sjfgb
-.section sjfha
-.section sjfhb
-.section sjfia
-.section sjfib
-.section sjfja
-.section sjfjb
-.section sjfka
-.section sjfkb
-.section sjfla
-.section sjflb
-.section sjfma
-.section sjfmb
-.section sjfna
-.section sjfnb
-.section sjfoa
-.section sjfob
-.section sjfpa
-.section sjfpb
-.section sjfqa
-.section sjfqb
-.section sjfra
-.section sjfrb
-.section sjfsa
-.section sjfsb
-.section sjfta
-.section sjftb
-.section sjfua
-.section sjfub
-.section sjfva
-.section sjfvb
-.section sjfwa
-.section sjfwb
-.section sjfxa
-.section sjfxb
-.section sjfya
-.section sjfyb
-.section sjfza
-.section sjfzb
-.section sjf1a
-.section sjf1b
-.section sjf2a
-.section sjf2b
-.section sjf3a
-.section sjf3b
-.section sjf4a
-.section sjf4b
-.section sjf5a
-.section sjf5b
-.section sjf6a
-.section sjf6b
-.section sjf7a
-.section sjf7b
-.section sjf8a
-.section sjf8b
-.section sjf9a
-.section sjf9b
-.section sjf0a
-.section sjf0b
-.section sjgaa
-.section sjgab
-.section sjgba
-.section sjgbb
-.section sjgca
-.section sjgcb
-.section sjgda
-.section sjgdb
-.section sjgea
-.section sjgeb
-.section sjgfa
-.section sjgfb
-.section sjgga
-.section sjggb
-.section sjgha
-.section sjghb
-.section sjgia
-.section sjgib
-.section sjgja
-.section sjgjb
-.section sjgka
-.section sjgkb
-.section sjgla
-.section sjglb
-.section sjgma
-.section sjgmb
-.section sjgna
-.section sjgnb
-.section sjgoa
-.section sjgob
-.section sjgpa
-.section sjgpb
-.section sjgqa
-.section sjgqb
-.section sjgra
-.section sjgrb
-.section sjgsa
-.section sjgsb
-.section sjgta
-.section sjgtb
-.section sjgua
-.section sjgub
-.section sjgva
-.section sjgvb
-.section sjgwa
-.section sjgwb
-.section sjgxa
-.section sjgxb
-.section sjgya
-.section sjgyb
-.section sjgza
-.section sjgzb
-.section sjg1a
-.section sjg1b
-.section sjg2a
-.section sjg2b
-.section sjg3a
-.section sjg3b
-.section sjg4a
-.section sjg4b
-.section sjg5a
-.section sjg5b
-.section sjg6a
-.section sjg6b
-.section sjg7a
-.section sjg7b
-.section sjg8a
-.section sjg8b
-.section sjg9a
-.section sjg9b
-.section sjg0a
-.section sjg0b
-.section sjhaa
-.section sjhab
-.section sjhba
-.section sjhbb
-.section sjhca
-.section sjhcb
-.section sjhda
-.section sjhdb
-.section sjhea
-.section sjheb
-.section sjhfa
-.section sjhfb
-.section sjhga
-.section sjhgb
-.section sjhha
-.section sjhhb
-.section sjhia
-.section sjhib
-.section sjhja
-.section sjhjb
-.section sjhka
-.section sjhkb
-.section sjhla
-.section sjhlb
-.section sjhma
-.section sjhmb
-.section sjhna
-.section sjhnb
-.section sjhoa
-.section sjhob
-.section sjhpa
-.section sjhpb
-.section sjhqa
-.section sjhqb
-.section sjhra
-.section sjhrb
-.section sjhsa
-.section sjhsb
-.section sjhta
-.section sjhtb
-.section sjhua
-.section sjhub
-.section sjhva
-.section sjhvb
-.section sjhwa
-.section sjhwb
-.section sjhxa
-.section sjhxb
-.section sjhya
-.section sjhyb
-.section sjhza
-.section sjhzb
-.section sjh1a
-.section sjh1b
-.section sjh2a
-.section sjh2b
-.section sjh3a
-.section sjh3b
-.section sjh4a
-.section sjh4b
-.section sjh5a
-.section sjh5b
-.section sjh6a
-.section sjh6b
-.section sjh7a
-.section sjh7b
-.section sjh8a
-.section sjh8b
-.section sjh9a
-.section sjh9b
-.section sjh0a
-.section sjh0b
-.section sjiaa
-.section sjiab
-.section sjiba
-.section sjibb
-.section sjica
-.section sjicb
-.section sjida
-.section sjidb
-.section sjiea
-.section sjieb
-.section sjifa
-.section sjifb
-.section sjiga
-.section sjigb
-.section sjiha
-.section sjihb
-.section sjiia
-.section sjiib
-.section sjija
-.section sjijb
-.section sjika
-.section sjikb
-.section sjila
-.section sjilb
-.section sjima
-.section sjimb
-.section sjina
-.section sjinb
-.section sjioa
-.section sjiob
-.section sjipa
-.section sjipb
-.section sjiqa
-.section sjiqb
-.section sjira
-.section sjirb
-.section sjisa
-.section sjisb
-.section sjita
-.section sjitb
-.section sjiua
-.section sjiub
-.section sjiva
-.section sjivb
-.section sjiwa
-.section sjiwb
-.section sjixa
-.section sjixb
-.section sjiya
-.section sjiyb
-.section sjiza
-.section sjizb
-.section sji1a
-.section sji1b
-.section sji2a
-.section sji2b
-.section sji3a
-.section sji3b
-.section sji4a
-.section sji4b
-.section sji5a
-.section sji5b
-.section sji6a
-.section sji6b
-.section sji7a
-.section sji7b
-.section sji8a
-.section sji8b
-.section sji9a
-.section sji9b
-.section sji0a
-.section sji0b
-.section sjjaa
-.section sjjab
-.section sjjba
-.section sjjbb
-.section sjjca
-.section sjjcb
-.section sjjda
-.section sjjdb
-.section sjjea
-.section sjjeb
-.section sjjfa
-.section sjjfb
-.section sjjga
-.section sjjgb
-.section sjjha
-.section sjjhb
-.section sjjia
-.section sjjib
-.section sjjja
-.section sjjjb
-.section sjjka
-.section sjjkb
-.section sjjla
-.section sjjlb
-.section sjjma
-.section sjjmb
-.section sjjna
-.section sjjnb
-.section sjjoa
-.section sjjob
-.section sjjpa
-.section sjjpb
-.section sjjqa
-.section sjjqb
-.section sjjra
-.section sjjrb
-.section sjjsa
-.section sjjsb
-.section sjjta
-.section sjjtb
-.section sjjua
-.section sjjub
-.section sjjva
-.section sjjvb
-.section sjjwa
-.section sjjwb
-.section sjjxa
-.section sjjxb
-.section sjjya
-.section sjjyb
-.section sjjza
-.section sjjzb
-.section sjj1a
-.section sjj1b
-.section sjj2a
-.section sjj2b
-.section sjj3a
-.section sjj3b
-.section sjj4a
-.section sjj4b
-.section sjj5a
-.section sjj5b
-.section sjj6a
-.section sjj6b
-.section sjj7a
-.section sjj7b
-.section sjj8a
-.section sjj8b
-.section sjj9a
-.section sjj9b
-.section sjj0a
-.section sjj0b
-.section sjkaa
-.section sjkab
-.section sjkba
-.section sjkbb
-.section sjkca
-.section sjkcb
-.section sjkda
-.section sjkdb
-.section sjkea
-.section sjkeb
-.section sjkfa
-.section sjkfb
-.section sjkga
-.section sjkgb
-.section sjkha
-.section sjkhb
-.section sjkia
-.section sjkib
-.section sjkja
-.section sjkjb
-.section sjkka
-.section sjkkb
-.section sjkla
-.section sjklb
-.section sjkma
-.section sjkmb
-.section sjkna
-.section sjknb
-.section sjkoa
-.section sjkob
-.section sjkpa
-.section sjkpb
-.section sjkqa
-.section sjkqb
-.section sjkra
-.section sjkrb
-.section sjksa
-.section sjksb
-.section sjkta
-.section sjktb
-.section sjkua
-.section sjkub
-.section sjkva
-.section sjkvb
-.section sjkwa
-.section sjkwb
-.section sjkxa
-.section sjkxb
-.section sjkya
-.section sjkyb
-.section sjkza
-.section sjkzb
-.section sjk1a
-.section sjk1b
-.section sjk2a
-.section sjk2b
-.section sjk3a
-.section sjk3b
-.section sjk4a
-.section sjk4b
-.section sjk5a
-.section sjk5b
-.section sjk6a
-.section sjk6b
-.section sjk7a
-.section sjk7b
-.section sjk8a
-.section sjk8b
-.section sjk9a
-.section sjk9b
-.section sjk0a
-.section sjk0b
-.section sjlaa
-.section sjlab
-.section sjlba
-.section sjlbb
-.section sjlca
-.section sjlcb
-.section sjlda
-.section sjldb
-.section sjlea
-.section sjleb
-.section sjlfa
-.section sjlfb
-.section sjlga
-.section sjlgb
-.section sjlha
-.section sjlhb
-.section sjlia
-.section sjlib
-.section sjlja
-.section sjljb
-.section sjlka
-.section sjlkb
-.section sjlla
-.section sjllb
-.section sjlma
-.section sjlmb
-.section sjlna
-.section sjlnb
-.section sjloa
-.section sjlob
-.section sjlpa
-.section sjlpb
-.section sjlqa
-.section sjlqb
-.section sjlra
-.section sjlrb
-.section sjlsa
-.section sjlsb
-.section sjlta
-.section sjltb
-.section sjlua
-.section sjlub
-.section sjlva
-.section sjlvb
-.section sjlwa
-.section sjlwb
-.section sjlxa
-.section sjlxb
-.section sjlya
-.section sjlyb
-.section sjlza
-.section sjlzb
-.section sjl1a
-.section sjl1b
-.section sjl2a
-.section sjl2b
-.section sjl3a
-.section sjl3b
-.section sjl4a
-.section sjl4b
-.section sjl5a
-.section sjl5b
-.section sjl6a
-.section sjl6b
-.section sjl7a
-.section sjl7b
-.section sjl8a
-.section sjl8b
-.section sjl9a
-.section sjl9b
-.section sjl0a
-.section sjl0b
-.section sjmaa
-.section sjmab
-.section sjmba
-.section sjmbb
-.section sjmca
-.section sjmcb
-.section sjmda
-.section sjmdb
-.section sjmea
-.section sjmeb
-.section sjmfa
-.section sjmfb
-.section sjmga
-.section sjmgb
-.section sjmha
-.section sjmhb
-.section sjmia
-.section sjmib
-.section sjmja
-.section sjmjb
-.section sjmka
-.section sjmkb
-.section sjmla
-.section sjmlb
-.section sjmma
-.section sjmmb
-.section sjmna
-.section sjmnb
-.section sjmoa
-.section sjmob
-.section sjmpa
-.section sjmpb
-.section sjmqa
-.section sjmqb
-.section sjmra
-.section sjmrb
-.section sjmsa
-.section sjmsb
-.section sjmta
-.section sjmtb
-.section sjmua
-.section sjmub
-.section sjmva
-.section sjmvb
-.section sjmwa
-.section sjmwb
-.section sjmxa
-.section sjmxb
-.section sjmya
-.section sjmyb
-.section sjmza
-.section sjmzb
-.section sjm1a
-.section sjm1b
-.section sjm2a
-.section sjm2b
-.section sjm3a
-.section sjm3b
-.section sjm4a
-.section sjm4b
-.section sjm5a
-.section sjm5b
-.section sjm6a
-.section sjm6b
-.section sjm7a
-.section sjm7b
-.section sjm8a
-.section sjm8b
-.section sjm9a
-.section sjm9b
-.section sjm0a
-.section sjm0b
-.section sjnaa
-.section sjnab
-.section sjnba
-.section sjnbb
-.section sjnca
-.section sjncb
-.section sjnda
-.section sjndb
-.section sjnea
-.section sjneb
-.section sjnfa
-.section sjnfb
-.section sjnga
-.section sjngb
-.section sjnha
-.section sjnhb
-.section sjnia
-.section sjnib
-.section sjnja
-.section sjnjb
-.section sjnka
-.section sjnkb
-.section sjnla
-.section sjnlb
-.section sjnma
-.section sjnmb
-.section sjnna
-.section sjnnb
-.section sjnoa
-.section sjnob
-.section sjnpa
-.section sjnpb
-.section sjnqa
-.section sjnqb
-.section sjnra
-.section sjnrb
-.section sjnsa
-.section sjnsb
-.section sjnta
-.section sjntb
-.section sjnua
-.section sjnub
-.section sjnva
-.section sjnvb
-.section sjnwa
-.section sjnwb
-.section sjnxa
-.section sjnxb
-.section sjnya
-.section sjnyb
-.section sjnza
-.section sjnzb
-.section sjn1a
-.section sjn1b
-.section sjn2a
-.section sjn2b
-.section sjn3a
-.section sjn3b
-.section sjn4a
-.section sjn4b
-.section sjn5a
-.section sjn5b
-.section sjn6a
-.section sjn6b
-.section sjn7a
-.section sjn7b
-.section sjn8a
-.section sjn8b
-.section sjn9a
-.section sjn9b
-.section sjn0a
-.section sjn0b
-.section sjoaa
-.section sjoab
-.section sjoba
-.section sjobb
-.section sjoca
-.section sjocb
-.section sjoda
-.section sjodb
-.section sjoea
-.section sjoeb
-.section sjofa
-.section sjofb
-.section sjoga
-.section sjogb
-.section sjoha
-.section sjohb
-.section sjoia
-.section sjoib
-.section sjoja
-.section sjojb
-.section sjoka
-.section sjokb
-.section sjola
-.section sjolb
-.section sjoma
-.section sjomb
-.section sjona
-.section sjonb
-.section sjooa
-.section sjoob
-.section sjopa
-.section sjopb
-.section sjoqa
-.section sjoqb
-.section sjora
-.section sjorb
-.section sjosa
-.section sjosb
-.section sjota
-.section sjotb
-.section sjoua
-.section sjoub
-.section sjova
-.section sjovb
-.section sjowa
-.section sjowb
-.section sjoxa
-.section sjoxb
-.section sjoya
-.section sjoyb
-.section sjoza
-.section sjozb
-.section sjo1a
-.section sjo1b
-.section sjo2a
-.section sjo2b
-.section sjo3a
-.section sjo3b
-.section sjo4a
-.section sjo4b
-.section sjo5a
-.section sjo5b
-.section sjo6a
-.section sjo6b
-.section sjo7a
-.section sjo7b
-.section sjo8a
-.section sjo8b
-.section sjo9a
-.section sjo9b
-.section sjo0a
-.section sjo0b
-.section sjpaa
-.section sjpab
-.section sjpba
-.section sjpbb
-.section sjpca
-.section sjpcb
-.section sjpda
-.section sjpdb
-.section sjpea
-.section sjpeb
-.section sjpfa
-.section sjpfb
-.section sjpga
-.section sjpgb
-.section sjpha
-.section sjphb
-.section sjpia
-.section sjpib
-.section sjpja
-.section sjpjb
-.section sjpka
-.section sjpkb
-.section sjpla
-.section sjplb
-.section sjpma
-.section sjpmb
-.section sjpna
-.section sjpnb
-.section sjpoa
-.section sjpob
-.section sjppa
-.section sjppb
-.section sjpqa
-.section sjpqb
-.section sjpra
-.section sjprb
-.section sjpsa
-.section sjpsb
-.section sjpta
-.section sjptb
-.section sjpua
-.section sjpub
-.section sjpva
-.section sjpvb
-.section sjpwa
-.section sjpwb
-.section sjpxa
-.section sjpxb
-.section sjpya
-.section sjpyb
-.section sjpza
-.section sjpzb
-.section sjp1a
-.section sjp1b
-.section sjp2a
-.section sjp2b
-.section sjp3a
-.section sjp3b
-.section sjp4a
-.section sjp4b
-.section sjp5a
-.section sjp5b
-.section sjp6a
-.section sjp6b
-.section sjp7a
-.section sjp7b
-.section sjp8a
-.section sjp8b
-.section sjp9a
-.section sjp9b
-.section sjp0a
-.section sjp0b
-.section sjqaa
-.section sjqab
-.section sjqba
-.section sjqbb
-.section sjqca
-.section sjqcb
-.section sjqda
-.section sjqdb
-.section sjqea
-.section sjqeb
-.section sjqfa
-.section sjqfb
-.section sjqga
-.section sjqgb
-.section sjqha
-.section sjqhb
-.section sjqia
-.section sjqib
-.section sjqja
-.section sjqjb
-.section sjqka
-.section sjqkb
-.section sjqla
-.section sjqlb
-.section sjqma
-.section sjqmb
-.section sjqna
-.section sjqnb
-.section sjqoa
-.section sjqob
-.section sjqpa
-.section sjqpb
-.section sjqqa
-.section sjqqb
-.section sjqra
-.section sjqrb
-.section sjqsa
-.section sjqsb
-.section sjqta
-.section sjqtb
-.section sjqua
-.section sjqub
-.section sjqva
-.section sjqvb
-.section sjqwa
-.section sjqwb
-.section sjqxa
-.section sjqxb
-.section sjqya
-.section sjqyb
-.section sjqza
-.section sjqzb
-.section sjq1a
-.section sjq1b
-.section sjq2a
-.section sjq2b
-.section sjq3a
-.section sjq3b
-.section sjq4a
-.section sjq4b
-.section sjq5a
-.section sjq5b
-.section sjq6a
-.section sjq6b
-.section sjq7a
-.section sjq7b
-.section sjq8a
-.section sjq8b
-.section sjq9a
-.section sjq9b
-.section sjq0a
-.section sjq0b
-.section sjraa
-.section sjrab
-.section sjrba
-.section sjrbb
-.section sjrca
-.section sjrcb
-.section sjrda
-.section sjrdb
-.section sjrea
-.section sjreb
-.section sjrfa
-.section sjrfb
-.section sjrga
-.section sjrgb
-.section sjrha
-.section sjrhb
-.section sjria
-.section sjrib
-.section sjrja
-.section sjrjb
-.section sjrka
-.section sjrkb
-.section sjrla
-.section sjrlb
-.section sjrma
-.section sjrmb
-.section sjrna
-.section sjrnb
-.section sjroa
-.section sjrob
-.section sjrpa
-.section sjrpb
-.section sjrqa
-.section sjrqb
-.section sjrra
-.section sjrrb
-.section sjrsa
-.section sjrsb
-.section sjrta
-.section sjrtb
-.section sjrua
-.section sjrub
-.section sjrva
-.section sjrvb
-.section sjrwa
-.section sjrwb
-.section sjrxa
-.section sjrxb
-.section sjrya
-.section sjryb
-.section sjrza
-.section sjrzb
-.section sjr1a
-.section sjr1b
-.section sjr2a
-.section sjr2b
-.section sjr3a
-.section sjr3b
-.section sjr4a
-.section sjr4b
-.section sjr5a
-.section sjr5b
-.section sjr6a
-.section sjr6b
-.section sjr7a
-.section sjr7b
-.section sjr8a
-.section sjr8b
-.section sjr9a
-.section sjr9b
-.section sjr0a
-.section sjr0b
-.section sjsaa
-.section sjsab
-.section sjsba
-.section sjsbb
-.section sjsca
-.section sjscb
-.section sjsda
-.section sjsdb
-.section sjsea
-.section sjseb
-.section sjsfa
-.section sjsfb
-.section sjsga
-.section sjsgb
-.section sjsha
-.section sjshb
-.section sjsia
-.section sjsib
-.section sjsja
-.section sjsjb
-.section sjska
-.section sjskb
-.section sjsla
-.section sjslb
-.section sjsma
-.section sjsmb
-.section sjsna
-.section sjsnb
-.section sjsoa
-.section sjsob
-.section sjspa
-.section sjspb
-.section sjsqa
-.section sjsqb
-.section sjsra
-.section sjsrb
-.section sjssa
-.section sjssb
-.section sjsta
-.section sjstb
-.section sjsua
-.section sjsub
-.section sjsva
-.section sjsvb
-.section sjswa
-.section sjswb
-.section sjsxa
-.section sjsxb
-.section sjsya
-.section sjsyb
-.section sjsza
-.section sjszb
-.section sjs1a
-.section sjs1b
-.section sjs2a
-.section sjs2b
-.section sjs3a
-.section sjs3b
-.section sjs4a
-.section sjs4b
-.section sjs5a
-.section sjs5b
-.section sjs6a
-.section sjs6b
-.section sjs7a
-.section sjs7b
-.section sjs8a
-.section sjs8b
-.section sjs9a
-.section sjs9b
-.section sjs0a
-.section sjs0b
-.section sjtaa
-.section sjtab
-.section sjtba
-.section sjtbb
-.section sjtca
-.section sjtcb
-.section sjtda
-.section sjtdb
-.section sjtea
-.section sjteb
-.section sjtfa
-.section sjtfb
-.section sjtga
-.section sjtgb
-.section sjtha
-.section sjthb
-.section sjtia
-.section sjtib
-.section sjtja
-.section sjtjb
-.section sjtka
-.section sjtkb
-.section sjtla
-.section sjtlb
-.section sjtma
-.section sjtmb
-.section sjtna
-.section sjtnb
-.section sjtoa
-.section sjtob
-.section sjtpa
-.section sjtpb
-.section sjtqa
-.section sjtqb
-.section sjtra
-.section sjtrb
-.section sjtsa
-.section sjtsb
-.section sjtta
-.section sjttb
-.section sjtua
-.section sjtub
-.section sjtva
-.section sjtvb
-.section sjtwa
-.section sjtwb
-.section sjtxa
-.section sjtxb
-.section sjtya
-.section sjtyb
-.section sjtza
-.section sjtzb
-.section sjt1a
-.section sjt1b
-.section sjt2a
-.section sjt2b
-.section sjt3a
-.section sjt3b
-.section sjt4a
-.section sjt4b
-.section sjt5a
-.section sjt5b
-.section sjt6a
-.section sjt6b
-.section sjt7a
-.section sjt7b
-.section sjt8a
-.section sjt8b
-.section sjt9a
-.section sjt9b
-.section sjt0a
-.section sjt0b
-.section sjuaa
-.section sjuab
-.section sjuba
-.section sjubb
-.section sjuca
-.section sjucb
-.section sjuda
-.section sjudb
-.section sjuea
-.section sjueb
-.section sjufa
-.section sjufb
-.section sjuga
-.section sjugb
-.section sjuha
-.section sjuhb
-.section sjuia
-.section sjuib
-.section sjuja
-.section sjujb
-.section sjuka
-.section sjukb
-.section sjula
-.section sjulb
-.section sjuma
-.section sjumb
-.section sjuna
-.section sjunb
-.section sjuoa
-.section sjuob
-.section sjupa
-.section sjupb
-.section sjuqa
-.section sjuqb
-.section sjura
-.section sjurb
-.section sjusa
-.section sjusb
-.section sjuta
-.section sjutb
-.section sjuua
-.section sjuub
-.section sjuva
-.section sjuvb
-.section sjuwa
-.section sjuwb
-.section sjuxa
-.section sjuxb
-.section sjuya
-.section sjuyb
-.section sjuza
-.section sjuzb
-.section sju1a
-.section sju1b
-.section sju2a
-.section sju2b
-.section sju3a
-.section sju3b
-.section sju4a
-.section sju4b
-.section sju5a
-.section sju5b
-.section sju6a
-.section sju6b
-.section sju7a
-.section sju7b
-.section sju8a
-.section sju8b
-.section sju9a
-.section sju9b
-.section sju0a
-.section sju0b
-.section sjvaa
-.section sjvab
-.section sjvba
-.section sjvbb
-.section sjvca
-.section sjvcb
-.section sjvda
-.section sjvdb
-.section sjvea
-.section sjveb
-.section sjvfa
-.section sjvfb
-.section sjvga
-.section sjvgb
-.section sjvha
-.section sjvhb
-.section sjvia
-.section sjvib
-.section sjvja
-.section sjvjb
-.section sjvka
-.section sjvkb
-.section sjvla
-.section sjvlb
-.section sjvma
-.section sjvmb
-.section sjvna
-.section sjvnb
-.section sjvoa
-.section sjvob
-.section sjvpa
-.section sjvpb
-.section sjvqa
-.section sjvqb
-.section sjvra
-.section sjvrb
-.section sjvsa
-.section sjvsb
-.section sjvta
-.section sjvtb
-.section sjvua
-.section sjvub
-.section sjvva
-.section sjvvb
-.section sjvwa
-.section sjvwb
-.section sjvxa
-.section sjvxb
-.section sjvya
-.section sjvyb
-.section sjvza
-.section sjvzb
-.section sjv1a
-.section sjv1b
-.section sjv2a
-.section sjv2b
-.section sjv3a
-.section sjv3b
-.section sjv4a
-.section sjv4b
-.section sjv5a
-.section sjv5b
-.section sjv6a
-.section sjv6b
-.section sjv7a
-.section sjv7b
-.section sjv8a
-.section sjv8b
-.section sjv9a
-.section sjv9b
-.section sjv0a
-.section sjv0b
-.section sjwaa
-.section sjwab
-.section sjwba
-.section sjwbb
-.section sjwca
-.section sjwcb
-.section sjwda
-.section sjwdb
-.section sjwea
-.section sjweb
-.section sjwfa
-.section sjwfb
-.section sjwga
-.section sjwgb
-.section sjwha
-.section sjwhb
-.section sjwia
-.section sjwib
-.section sjwja
-.section sjwjb
-.section sjwka
-.section sjwkb
-.section sjwla
-.section sjwlb
-.section sjwma
-.section sjwmb
-.section sjwna
-.section sjwnb
-.section sjwoa
-.section sjwob
-.section sjwpa
-.section sjwpb
-.section sjwqa
-.section sjwqb
-.section sjwra
-.section sjwrb
-.section sjwsa
-.section sjwsb
-.section sjwta
-.section sjwtb
-.section sjwua
-.section sjwub
-.section sjwva
-.section sjwvb
-.section sjwwa
-.section sjwwb
-.section sjwxa
-.section sjwxb
-.section sjwya
-.section sjwyb
-.section sjwza
-.section sjwzb
-.section sjw1a
-.section sjw1b
-.section sjw2a
-.section sjw2b
-.section sjw3a
-.section sjw3b
-.section sjw4a
-.section sjw4b
-.section sjw5a
-.section sjw5b
-.section sjw6a
-.section sjw6b
-.section sjw7a
-.section sjw7b
-.section sjw8a
-.section sjw8b
-.section sjw9a
-.section sjw9b
-.section sjw0a
-.section sjw0b
-.section sjxaa
-.section sjxab
-.section sjxba
-.section sjxbb
-.section sjxca
-.section sjxcb
-.section sjxda
-.section sjxdb
-.section sjxea
-.section sjxeb
-.section sjxfa
-.section sjxfb
-.section sjxga
-.section sjxgb
-.section sjxha
-.section sjxhb
-.section sjxia
-.section sjxib
-.section sjxja
-.section sjxjb
-.section sjxka
-.section sjxkb
-.section sjxla
-.section sjxlb
-.section sjxma
-.section sjxmb
-.section sjxna
-.section sjxnb
-.section sjxoa
-.section sjxob
-.section sjxpa
-.section sjxpb
-.section sjxqa
-.section sjxqb
-.section sjxra
-.section sjxrb
-.section sjxsa
-.section sjxsb
-.section sjxta
-.section sjxtb
-.section sjxua
-.section sjxub
-.section sjxva
-.section sjxvb
-.section sjxwa
-.section sjxwb
-.section sjxxa
-.section sjxxb
-.section sjxya
-.section sjxyb
-.section sjxza
-.section sjxzb
-.section sjx1a
-.section sjx1b
-.section sjx2a
-.section sjx2b
-.section sjx3a
-.section sjx3b
-.section sjx4a
-.section sjx4b
-.section sjx5a
-.section sjx5b
-.section sjx6a
-.section sjx6b
-.section sjx7a
-.section sjx7b
-.section sjx8a
-.section sjx8b
-.section sjx9a
-.section sjx9b
-.section sjx0a
-.section sjx0b
-.section sjyaa
-.section sjyab
-.section sjyba
-.section sjybb
-.section sjyca
-.section sjycb
-.section sjyda
-.section sjydb
-.section sjyea
-.section sjyeb
-.section sjyfa
-.section sjyfb
-.section sjyga
-.section sjygb
-.section sjyha
-.section sjyhb
-.section sjyia
-.section sjyib
-.section sjyja
-.section sjyjb
-.section sjyka
-.section sjykb
-.section sjyla
-.section sjylb
-.section sjyma
-.section sjymb
-.section sjyna
-.section sjynb
-.section sjyoa
-.section sjyob
-.section sjypa
-.section sjypb
-.section sjyqa
-.section sjyqb
-.section sjyra
-.section sjyrb
-.section sjysa
-.section sjysb
-.section sjyta
-.section sjytb
-.section sjyua
-.section sjyub
-.section sjyva
-.section sjyvb
-.section sjywa
-.section sjywb
-.section sjyxa
-.section sjyxb
-.section sjyya
-.section sjyyb
-.section sjyza
-.section sjyzb
-.section sjy1a
-.section sjy1b
-.section sjy2a
-.section sjy2b
-.section sjy3a
-.section sjy3b
-.section sjy4a
-.section sjy4b
-.section sjy5a
-.section sjy5b
-.section sjy6a
-.section sjy6b
-.section sjy7a
-.section sjy7b
-.section sjy8a
-.section sjy8b
-.section sjy9a
-.section sjy9b
-.section sjy0a
-.section sjy0b
-.section sjzaa
-.section sjzab
-.section sjzba
-.section sjzbb
-.section sjzca
-.section sjzcb
-.section sjzda
-.section sjzdb
-.section sjzea
-.section sjzeb
-.section sjzfa
-.section sjzfb
-.section sjzga
-.section sjzgb
-.section sjzha
-.section sjzhb
-.section sjzia
-.section sjzib
-.section sjzja
-.section sjzjb
-.section sjzka
-.section sjzkb
-.section sjzla
-.section sjzlb
-.section sjzma
-.section sjzmb
-.section sjzna
-.section sjznb
-.section sjzoa
-.section sjzob
-.section sjzpa
-.section sjzpb
-.section sjzqa
-.section sjzqb
-.section sjzra
-.section sjzrb
-.section sjzsa
-.section sjzsb
-.section sjzta
-.section sjztb
-.section sjzua
-.section sjzub
-.section sjzva
-.section sjzvb
-.section sjzwa
-.section sjzwb
-.section sjzxa
-.section sjzxb
-.section sjzya
-.section sjzyb
-.section sjzza
-.section sjzzb
-.section sjz1a
-.section sjz1b
-.section sjz2a
-.section sjz2b
-.section sjz3a
-.section sjz3b
-.section sjz4a
-.section sjz4b
-.section sjz5a
-.section sjz5b
-.section sjz6a
-.section sjz6b
-.section sjz7a
-.section sjz7b
-.section sjz8a
-.section sjz8b
-.section sjz9a
-.section sjz9b
-.section sjz0a
-.section sjz0b
-.section sj1aa
-.section sj1ab
-.section sj1ba
-.section sj1bb
-.section sj1ca
-.section sj1cb
-.section sj1da
-.section sj1db
-.section sj1ea
-.section sj1eb
-.section sj1fa
-.section sj1fb
-.section sj1ga
-.section sj1gb
-.section sj1ha
-.section sj1hb
-.section sj1ia
-.section sj1ib
-.section sj1ja
-.section sj1jb
-.section sj1ka
-.section sj1kb
-.section sj1la
-.section sj1lb
-.section sj1ma
-.section sj1mb
-.section sj1na
-.section sj1nb
-.section sj1oa
-.section sj1ob
-.section sj1pa
-.section sj1pb
-.section sj1qa
-.section sj1qb
-.section sj1ra
-.section sj1rb
-.section sj1sa
-.section sj1sb
-.section sj1ta
-.section sj1tb
-.section sj1ua
-.section sj1ub
-.section sj1va
-.section sj1vb
-.section sj1wa
-.section sj1wb
-.section sj1xa
-.section sj1xb
-.section sj1ya
-.section sj1yb
-.section sj1za
-.section sj1zb
-.section sj11a
-.section sj11b
-.section sj12a
-.section sj12b
-.section sj13a
-.section sj13b
-.section sj14a
-.section sj14b
-.section sj15a
-.section sj15b
-.section sj16a
-.section sj16b
-.section sj17a
-.section sj17b
-.section sj18a
-.section sj18b
-.section sj19a
-.section sj19b
-.section sj10a
-.section sj10b
-.section sj2aa
-.section sj2ab
-.section sj2ba
-.section sj2bb
-.section sj2ca
-.section sj2cb
-.section sj2da
-.section sj2db
-.section sj2ea
-.section sj2eb
-.section sj2fa
-.section sj2fb
-.section sj2ga
-.section sj2gb
-.section sj2ha
-.section sj2hb
-.section sj2ia
-.section sj2ib
-.section sj2ja
-.section sj2jb
-.section sj2ka
-.section sj2kb
-.section sj2la
-.section sj2lb
-.section sj2ma
-.section sj2mb
-.section sj2na
-.section sj2nb
-.section sj2oa
-.section sj2ob
-.section sj2pa
-.section sj2pb
-.section sj2qa
-.section sj2qb
-.section sj2ra
-.section sj2rb
-.section sj2sa
-.section sj2sb
-.section sj2ta
-.section sj2tb
-.section sj2ua
-.section sj2ub
-.section sj2va
-.section sj2vb
-.section sj2wa
-.section sj2wb
-.section sj2xa
-.section sj2xb
-.section sj2ya
-.section sj2yb
-.section sj2za
-.section sj2zb
-.section sj21a
-.section sj21b
-.section sj22a
-.section sj22b
-.section sj23a
-.section sj23b
-.section sj24a
-.section sj24b
-.section sj25a
-.section sj25b
-.section sj26a
-.section sj26b
-.section sj27a
-.section sj27b
-.section sj28a
-.section sj28b
-.section sj29a
-.section sj29b
-.section sj20a
-.section sj20b
-.section sj3aa
-.section sj3ab
-.section sj3ba
-.section sj3bb
-.section sj3ca
-.section sj3cb
-.section sj3da
-.section sj3db
-.section sj3ea
-.section sj3eb
-.section sj3fa
-.section sj3fb
-.section sj3ga
-.section sj3gb
-.section sj3ha
-.section sj3hb
-.section sj3ia
-.section sj3ib
-.section sj3ja
-.section sj3jb
-.section sj3ka
-.section sj3kb
-.section sj3la
-.section sj3lb
-.section sj3ma
-.section sj3mb
-.section sj3na
-.section sj3nb
-.section sj3oa
-.section sj3ob
-.section sj3pa
-.section sj3pb
-.section sj3qa
-.section sj3qb
-.section sj3ra
-.section sj3rb
-.section sj3sa
-.section sj3sb
-.section sj3ta
-.section sj3tb
-.section sj3ua
-.section sj3ub
-.section sj3va
-.section sj3vb
-.section sj3wa
-.section sj3wb
-.section sj3xa
-.section sj3xb
-.section sj3ya
-.section sj3yb
-.section sj3za
-.section sj3zb
-.section sj31a
-.section sj31b
-.section sj32a
-.section sj32b
-.section sj33a
-.section sj33b
-.section sj34a
-.section sj34b
-.section sj35a
-.section sj35b
-.section sj36a
-.section sj36b
-.section sj37a
-.section sj37b
-.section sj38a
-.section sj38b
-.section sj39a
-.section sj39b
-.section sj30a
-.section sj30b
-.section sj4aa
-.section sj4ab
-.section sj4ba
-.section sj4bb
-.section sj4ca
-.section sj4cb
-.section sj4da
-.section sj4db
-.section sj4ea
-.section sj4eb
-.section sj4fa
-.section sj4fb
-.section sj4ga
-.section sj4gb
-.section sj4ha
-.section sj4hb
-.section sj4ia
-.section sj4ib
-.section sj4ja
-.section sj4jb
-.section sj4ka
-.section sj4kb
-.section sj4la
-.section sj4lb
-.section sj4ma
-.section sj4mb
-.section sj4na
-.section sj4nb
-.section sj4oa
-.section sj4ob
-.section sj4pa
-.section sj4pb
-.section sj4qa
-.section sj4qb
-.section sj4ra
-.section sj4rb
-.section sj4sa
-.section sj4sb
-.section sj4ta
-.section sj4tb
-.section sj4ua
-.section sj4ub
-.section sj4va
-.section sj4vb
-.section sj4wa
-.section sj4wb
-.section sj4xa
-.section sj4xb
-.section sj4ya
-.section sj4yb
-.section sj4za
-.section sj4zb
-.section sj41a
-.section sj41b
-.section sj42a
-.section sj42b
-.section sj43a
-.section sj43b
-.section sj44a
-.section sj44b
-.section sj45a
-.section sj45b
-.section sj46a
-.section sj46b
-.section sj47a
-.section sj47b
-.section sj48a
-.section sj48b
-.section sj49a
-.section sj49b
-.section sj40a
-.section sj40b
-.section sj5aa
-.section sj5ab
-.section sj5ba
-.section sj5bb
-.section sj5ca
-.section sj5cb
-.section sj5da
-.section sj5db
-.section sj5ea
-.section sj5eb
-.section sj5fa
-.section sj5fb
-.section sj5ga
-.section sj5gb
-.section sj5ha
-.section sj5hb
-.section sj5ia
-.section sj5ib
-.section sj5ja
-.section sj5jb
-.section sj5ka
-.section sj5kb
-.section sj5la
-.section sj5lb
-.section sj5ma
-.section sj5mb
-.section sj5na
-.section sj5nb
-.section sj5oa
-.section sj5ob
-.section sj5pa
-.section sj5pb
-.section sj5qa
-.section sj5qb
-.section sj5ra
-.section sj5rb
-.section sj5sa
-.section sj5sb
-.section sj5ta
-.section sj5tb
-.section sj5ua
-.section sj5ub
-.section sj5va
-.section sj5vb
-.section sj5wa
-.section sj5wb
-.section sj5xa
-.section sj5xb
-.section sj5ya
-.section sj5yb
-.section sj5za
-.section sj5zb
-.section sj51a
-.section sj51b
-.section sj52a
-.section sj52b
-.section sj53a
-.section sj53b
-.section sj54a
-.section sj54b
-.section sj55a
-.section sj55b
-.section sj56a
-.section sj56b
-.section sj57a
-.section sj57b
-.section sj58a
-.section sj58b
-.section sj59a
-.section sj59b
-.section sj50a
-.section sj50b
-.section sj6aa
-.section sj6ab
-.section sj6ba
-.section sj6bb
-.section sj6ca
-.section sj6cb
-.section sj6da
-.section sj6db
-.section sj6ea
-.section sj6eb
-.section sj6fa
-.section sj6fb
-.section sj6ga
-.section sj6gb
-.section sj6ha
-.section sj6hb
-.section sj6ia
-.section sj6ib
-.section sj6ja
-.section sj6jb
-.section sj6ka
-.section sj6kb
-.section sj6la
-.section sj6lb
-.section sj6ma
-.section sj6mb
-.section sj6na
-.section sj6nb
-.section sj6oa
-.section sj6ob
-.section sj6pa
-.section sj6pb
-.section sj6qa
-.section sj6qb
-.section sj6ra
-.section sj6rb
-.section sj6sa
-.section sj6sb
-.section sj6ta
-.section sj6tb
-.section sj6ua
-.section sj6ub
-.section sj6va
-.section sj6vb
-.section sj6wa
-.section sj6wb
-.section sj6xa
-.section sj6xb
-.section sj6ya
-.section sj6yb
-.section sj6za
-.section sj6zb
-.section sj61a
-.section sj61b
-.section sj62a
-.section sj62b
-.section sj63a
-.section sj63b
-.section sj64a
-.section sj64b
-.section sj65a
-.section sj65b
-.section sj66a
-.section sj66b
-.section sj67a
-.section sj67b
-.section sj68a
-.section sj68b
-.section sj69a
-.section sj69b
-.section sj60a
-.section sj60b
-.section sj7aa
-.section sj7ab
-.section sj7ba
-.section sj7bb
-.section sj7ca
-.section sj7cb
-.section sj7da
-.section sj7db
-.section sj7ea
-.section sj7eb
-.section sj7fa
-.section sj7fb
-.section sj7ga
-.section sj7gb
-.section sj7ha
-.section sj7hb
-.section sj7ia
-.section sj7ib
-.section sj7ja
-.section sj7jb
-.section sj7ka
-.section sj7kb
-.section sj7la
-.section sj7lb
-.section sj7ma
-.section sj7mb
-.section sj7na
-.section sj7nb
-.section sj7oa
-.section sj7ob
-.section sj7pa
-.section sj7pb
-.section sj7qa
-.section sj7qb
-.section sj7ra
-.section sj7rb
-.section sj7sa
-.section sj7sb
-.section sj7ta
-.section sj7tb
-.section sj7ua
-.section sj7ub
-.section sj7va
-.section sj7vb
-.section sj7wa
-.section sj7wb
-.section sj7xa
-.section sj7xb
-.section sj7ya
-.section sj7yb
-.section sj7za
-.section sj7zb
-.section sj71a
-.section sj71b
-.section sj72a
-.section sj72b
-.section sj73a
-.section sj73b
-.section sj74a
-.section sj74b
-.section sj75a
-.section sj75b
-.section sj76a
-.section sj76b
-.section sj77a
-.section sj77b
-.section sj78a
-.section sj78b
-.section sj79a
-.section sj79b
-.section sj70a
-.section sj70b
-.section sj8aa
-.section sj8ab
-.section sj8ba
-.section sj8bb
-.section sj8ca
-.section sj8cb
-.section sj8da
-.section sj8db
-.section sj8ea
-.section sj8eb
-.section sj8fa
-.section sj8fb
-.section sj8ga
-.section sj8gb
-.section sj8ha
-.section sj8hb
-.section sj8ia
-.section sj8ib
-.section sj8ja
-.section sj8jb
-.section sj8ka
-.section sj8kb
-.section sj8la
-.section sj8lb
-.section sj8ma
-.section sj8mb
-.section sj8na
-.section sj8nb
-.section sj8oa
-.section sj8ob
-.section sj8pa
-.section sj8pb
-.section sj8qa
-.section sj8qb
-.section sj8ra
-.section sj8rb
-.section sj8sa
-.section sj8sb
-.section sj8ta
-.section sj8tb
-.section sj8ua
-.section sj8ub
-.section sj8va
-.section sj8vb
-.section sj8wa
-.section sj8wb
-.section sj8xa
-.section sj8xb
-.section sj8ya
-.section sj8yb
-.section sj8za
-.section sj8zb
-.section sj81a
-.section sj81b
-.section sj82a
-.section sj82b
-.section sj83a
-.section sj83b
-.section sj84a
-.section sj84b
-.section sj85a
-.section sj85b
-.section sj86a
-.section sj86b
-.section sj87a
-.section sj87b
-.section sj88a
-.section sj88b
-.section sj89a
-.section sj89b
-.section sj80a
-.section sj80b
-.section sj9aa
-.section sj9ab
-.section sj9ba
-.section sj9bb
-.section sj9ca
-.section sj9cb
-.section sj9da
-.section sj9db
-.section sj9ea
-.section sj9eb
-.section sj9fa
-.section sj9fb
-.section sj9ga
-.section sj9gb
-.section sj9ha
-.section sj9hb
-.section sj9ia
-.section sj9ib
-.section sj9ja
-.section sj9jb
-.section sj9ka
-.section sj9kb
-.section sj9la
-.section sj9lb
-.section sj9ma
-.section sj9mb
-.section sj9na
-.section sj9nb
-.section sj9oa
-.section sj9ob
-.section sj9pa
-.section sj9pb
-.section sj9qa
-.section sj9qb
-.section sj9ra
-.section sj9rb
-.section sj9sa
-.section sj9sb
-.section sj9ta
-.section sj9tb
-.section sj9ua
-.section sj9ub
-.section sj9va
-.section sj9vb
-.section sj9wa
-.section sj9wb
-.section sj9xa
-.section sj9xb
-.section sj9ya
-.section sj9yb
-.section sj9za
-.section sj9zb
-.section sj91a
-.section sj91b
-.section sj92a
-.section sj92b
-.section sj93a
-.section sj93b
-.section sj94a
-.section sj94b
-.section sj95a
-.section sj95b
-.section sj96a
-.section sj96b
-.section sj97a
-.section sj97b
-.section sj98a
-.section sj98b
-.section sj99a
-.section sj99b
-.section sj90a
-.section sj90b
-.section sj0aa
-.section sj0ab
-.section sj0ba
-.section sj0bb
-.section sj0ca
-.section sj0cb
-.section sj0da
-.section sj0db
-.section sj0ea
-.section sj0eb
-.section sj0fa
-.section sj0fb
-.section sj0ga
-.section sj0gb
-.section sj0ha
-.section sj0hb
-.section sj0ia
-.section sj0ib
-.section sj0ja
-.section sj0jb
-.section sj0ka
-.section sj0kb
-.section sj0la
-.section sj0lb
-.section sj0ma
-.section sj0mb
-.section sj0na
-.section sj0nb
-.section sj0oa
-.section sj0ob
-.section sj0pa
-.section sj0pb
-.section sj0qa
-.section sj0qb
-.section sj0ra
-.section sj0rb
-.section sj0sa
-.section sj0sb
-.section sj0ta
-.section sj0tb
-.section sj0ua
-.section sj0ub
-.section sj0va
-.section sj0vb
-.section sj0wa
-.section sj0wb
-.section sj0xa
-.section sj0xb
-.section sj0ya
-.section sj0yb
-.section sj0za
-.section sj0zb
-.section sj01a
-.section sj01b
-.section sj02a
-.section sj02b
-.section sj03a
-.section sj03b
-.section sj04a
-.section sj04b
-.section sj05a
-.section sj05b
-.section sj06a
-.section sj06b
-.section sj07a
-.section sj07b
-.section sj08a
-.section sj08b
-.section sj09a
-.section sj09b
-.section sj00a
-.section sj00b
-.section skaaa
-.section skaab
-.section skaba
-.section skabb
-.section skaca
-.section skacb
-.section skada
-.section skadb
-.section skaea
-.section skaeb
-.section skafa
-.section skafb
-.section skaga
-.section skagb
-.section skaha
-.section skahb
-.section skaia
-.section skaib
-.section skaja
-.section skajb
-.section skaka
-.section skakb
-.section skala
-.section skalb
-.section skama
-.section skamb
-.section skana
-.section skanb
-.section skaoa
-.section skaob
-.section skapa
-.section skapb
-.section skaqa
-.section skaqb
-.section skara
-.section skarb
-.section skasa
-.section skasb
-.section skata
-.section skatb
-.section skaua
-.section skaub
-.section skava
-.section skavb
-.section skawa
-.section skawb
-.section skaxa
-.section skaxb
-.section skaya
-.section skayb
-.section skaza
-.section skazb
-.section ska1a
-.section ska1b
-.section ska2a
-.section ska2b
-.section ska3a
-.section ska3b
-.section ska4a
-.section ska4b
-.section ska5a
-.section ska5b
-.section ska6a
-.section ska6b
-.section ska7a
-.section ska7b
-.section ska8a
-.section ska8b
-.section ska9a
-.section ska9b
-.section ska0a
-.section ska0b
-.section skbaa
-.section skbab
-.section skbba
-.section skbbb
-.section skbca
-.section skbcb
-.section skbda
-.section skbdb
-.section skbea
-.section skbeb
-.section skbfa
-.section skbfb
-.section skbga
-.section skbgb
-.section skbha
-.section skbhb
-.section skbia
-.section skbib
-.section skbja
-.section skbjb
-.section skbka
-.section skbkb
-.section skbla
-.section skblb
-.section skbma
-.section skbmb
-.section skbna
-.section skbnb
-.section skboa
-.section skbob
-.section skbpa
-.section skbpb
-.section skbqa
-.section skbqb
-.section skbra
-.section skbrb
-.section skbsa
-.section skbsb
-.section skbta
-.section skbtb
-.section skbua
-.section skbub
-.section skbva
-.section skbvb
-.section skbwa
-.section skbwb
-.section skbxa
-.section skbxb
-.section skbya
-.section skbyb
-.section skbza
-.section skbzb
-.section skb1a
-.section skb1b
-.section skb2a
-.section skb2b
-.section skb3a
-.section skb3b
-.section skb4a
-.section skb4b
-.section skb5a
-.section skb5b
-.section skb6a
-.section skb6b
-.section skb7a
-.section skb7b
-.section skb8a
-.section skb8b
-.section skb9a
-.section skb9b
-.section skb0a
-.section skb0b
-.section skcaa
-.section skcab
-.section skcba
-.section skcbb
-.section skcca
-.section skccb
-.section skcda
-.section skcdb
-.section skcea
-.section skceb
-.section skcfa
-.section skcfb
-.section skcga
-.section skcgb
-.section skcha
-.section skchb
-.section skcia
-.section skcib
-.section skcja
-.section skcjb
-.section skcka
-.section skckb
-.section skcla
-.section skclb
-.section skcma
-.section skcmb
-.section skcna
-.section skcnb
-.section skcoa
-.section skcob
-.section skcpa
-.section skcpb
-.section skcqa
-.section skcqb
-.section skcra
-.section skcrb
-.section skcsa
-.section skcsb
-.section skcta
-.section skctb
-.section skcua
-.section skcub
-.section skcva
-.section skcvb
-.section skcwa
-.section skcwb
-.section skcxa
-.section skcxb
-.section skcya
-.section skcyb
-.section skcza
-.section skczb
-.section skc1a
-.section skc1b
-.section skc2a
-.section skc2b
-.section skc3a
-.section skc3b
-.section skc4a
-.section skc4b
-.section skc5a
-.section skc5b
-.section skc6a
-.section skc6b
-.section skc7a
-.section skc7b
-.section skc8a
-.section skc8b
-.section skc9a
-.section skc9b
-.section skc0a
-.section skc0b
-.section skdaa
-.section skdab
-.section skdba
-.section skdbb
-.section skdca
-.section skdcb
-.section skdda
-.section skddb
-.section skdea
-.section skdeb
-.section skdfa
-.section skdfb
-.section skdga
-.section skdgb
-.section skdha
-.section skdhb
-.section skdia
-.section skdib
-.section skdja
-.section skdjb
-.section skdka
-.section skdkb
-.section skdla
-.section skdlb
-.section skdma
-.section skdmb
-.section skdna
-.section skdnb
-.section skdoa
-.section skdob
-.section skdpa
-.section skdpb
-.section skdqa
-.section skdqb
-.section skdra
-.section skdrb
-.section skdsa
-.section skdsb
-.section skdta
-.section skdtb
-.section skdua
-.section skdub
-.section skdva
-.section skdvb
-.section skdwa
-.section skdwb
-.section skdxa
-.section skdxb
-.section skdya
-.section skdyb
-.section skdza
-.section skdzb
-.section skd1a
-.section skd1b
-.section skd2a
-.section skd2b
-.section skd3a
-.section skd3b
-.section skd4a
-.section skd4b
-.section skd5a
-.section skd5b
-.section skd6a
-.section skd6b
-.section skd7a
-.section skd7b
-.section skd8a
-.section skd8b
-.section skd9a
-.section skd9b
-.section skd0a
-.section skd0b
-.section skeaa
-.section skeab
-.section skeba
-.section skebb
-.section skeca
-.section skecb
-.section skeda
-.section skedb
-.section skeea
-.section skeeb
-.section skefa
-.section skefb
-.section skega
-.section skegb
-.section skeha
-.section skehb
-.section skeia
-.section skeib
-.section skeja
-.section skejb
-.section skeka
-.section skekb
-.section skela
-.section skelb
-.section skema
-.section skemb
-.section skena
-.section skenb
-.section skeoa
-.section skeob
-.section skepa
-.section skepb
-.section skeqa
-.section skeqb
-.section skera
-.section skerb
-.section skesa
-.section skesb
-.section sketa
-.section sketb
-.section skeua
-.section skeub
-.section skeva
-.section skevb
-.section skewa
-.section skewb
-.section skexa
-.section skexb
-.section skeya
-.section skeyb
-.section skeza
-.section skezb
-.section ske1a
-.section ske1b
-.section ske2a
-.section ske2b
-.section ske3a
-.section ske3b
-.section ske4a
-.section ske4b
-.section ske5a
-.section ske5b
-.section ske6a
-.section ske6b
-.section ske7a
-.section ske7b
-.section ske8a
-.section ske8b
-.section ske9a
-.section ske9b
-.section ske0a
-.section ske0b
-.section skfaa
-.section skfab
-.section skfba
-.section skfbb
-.section skfca
-.section skfcb
-.section skfda
-.section skfdb
-.section skfea
-.section skfeb
-.section skffa
-.section skffb
-.section skfga
-.section skfgb
-.section skfha
-.section skfhb
-.section skfia
-.section skfib
-.section skfja
-.section skfjb
-.section skfka
-.section skfkb
-.section skfla
-.section skflb
-.section skfma
-.section skfmb
-.section skfna
-.section skfnb
-.section skfoa
-.section skfob
-.section skfpa
-.section skfpb
-.section skfqa
-.section skfqb
-.section skfra
-.section skfrb
-.section skfsa
-.section skfsb
-.section skfta
-.section skftb
-.section skfua
-.section skfub
-.section skfva
-.section skfvb
-.section skfwa
-.section skfwb
-.section skfxa
-.section skfxb
-.section skfya
-.section skfyb
-.section skfza
-.section skfzb
-.section skf1a
-.section skf1b
-.section skf2a
-.section skf2b
-.section skf3a
-.section skf3b
-.section skf4a
-.section skf4b
-.section skf5a
-.section skf5b
-.section skf6a
-.section skf6b
-.section skf7a
-.section skf7b
-.section skf8a
-.section skf8b
-.section skf9a
-.section skf9b
-.section skf0a
-.section skf0b
-.section skgaa
-.section skgab
-.section skgba
-.section skgbb
-.section skgca
-.section skgcb
-.section skgda
-.section skgdb
-.section skgea
-.section skgeb
-.section skgfa
-.section skgfb
-.section skgga
-.section skggb
-.section skgha
-.section skghb
-.section skgia
-.section skgib
-.section skgja
-.section skgjb
-.section skgka
-.section skgkb
-.section skgla
-.section skglb
-.section skgma
-.section skgmb
-.section skgna
-.section skgnb
-.section skgoa
-.section skgob
-.section skgpa
-.section skgpb
-.section skgqa
-.section skgqb
-.section skgra
-.section skgrb
-.section skgsa
-.section skgsb
-.section skgta
-.section skgtb
-.section skgua
-.section skgub
-.section skgva
-.section skgvb
-.section skgwa
-.section skgwb
-.section skgxa
-.section skgxb
-.section skgya
-.section skgyb
-.section skgza
-.section skgzb
-.section skg1a
-.section skg1b
-.section skg2a
-.section skg2b
-.section skg3a
-.section skg3b
-.section skg4a
-.section skg4b
-.section skg5a
-.section skg5b
-.section skg6a
-.section skg6b
-.section skg7a
-.section skg7b
-.section skg8a
-.section skg8b
-.section skg9a
-.section skg9b
-.section skg0a
-.section skg0b
-.section skhaa
-.section skhab
-.section skhba
-.section skhbb
-.section skhca
-.section skhcb
-.section skhda
-.section skhdb
-.section skhea
-.section skheb
-.section skhfa
-.section skhfb
-.section skhga
-.section skhgb
-.section skhha
-.section skhhb
-.section skhia
-.section skhib
-.section skhja
-.section skhjb
-.section skhka
-.section skhkb
-.section skhla
-.section skhlb
-.section skhma
-.section skhmb
-.section skhna
-.section skhnb
-.section skhoa
-.section skhob
-.section skhpa
-.section skhpb
-.section skhqa
-.section skhqb
-.section skhra
-.section skhrb
-.section skhsa
-.section skhsb
-.section skhta
-.section skhtb
-.section skhua
-.section skhub
-.section skhva
-.section skhvb
-.section skhwa
-.section skhwb
-.section skhxa
-.section skhxb
-.section skhya
-.section skhyb
-.section skhza
-.section skhzb
-.section skh1a
-.section skh1b
-.section skh2a
-.section skh2b
-.section skh3a
-.section skh3b
-.section skh4a
-.section skh4b
-.section skh5a
-.section skh5b
-.section skh6a
-.section skh6b
-.section skh7a
-.section skh7b
-.section skh8a
-.section skh8b
-.section skh9a
-.section skh9b
-.section skh0a
-.section skh0b
-.section skiaa
-.section skiab
-.section skiba
-.section skibb
-.section skica
-.section skicb
-.section skida
-.section skidb
-.section skiea
-.section skieb
-.section skifa
-.section skifb
-.section skiga
-.section skigb
-.section skiha
-.section skihb
-.section skiia
-.section skiib
-.section skija
-.section skijb
-.section skika
-.section skikb
-.section skila
-.section skilb
-.section skima
-.section skimb
-.section skina
-.section skinb
-.section skioa
-.section skiob
-.section skipa
-.section skipb
-.section skiqa
-.section skiqb
-.section skira
-.section skirb
-.section skisa
-.section skisb
-.section skita
-.section skitb
-.section skiua
-.section skiub
-.section skiva
-.section skivb
-.section skiwa
-.section skiwb
-.section skixa
-.section skixb
-.section skiya
-.section skiyb
-.section skiza
-.section skizb
-.section ski1a
-.section ski1b
-.section ski2a
-.section ski2b
-.section ski3a
-.section ski3b
-.section ski4a
-.section ski4b
-.section ski5a
-.section ski5b
-.section ski6a
-.section ski6b
-.section ski7a
-.section ski7b
-.section ski8a
-.section ski8b
-.section ski9a
-.section ski9b
-.section ski0a
-.section ski0b
-.section skjaa
-.section skjab
-.section skjba
-.section skjbb
-.section skjca
-.section skjcb
-.section skjda
-.section skjdb
-.section skjea
-.section skjeb
-.section skjfa
-.section skjfb
-.section skjga
-.section skjgb
-.section skjha
-.section skjhb
-.section skjia
-.section skjib
-.section skjja
-.section skjjb
-.section skjka
-.section skjkb
-.section skjla
-.section skjlb
-.section skjma
-.section skjmb
-.section skjna
-.section skjnb
-.section skjoa
-.section skjob
-.section skjpa
-.section skjpb
-.section skjqa
-.section skjqb
-.section skjra
-.section skjrb
-.section skjsa
-.section skjsb
-.section skjta
-.section skjtb
-.section skjua
-.section skjub
-.section skjva
-.section skjvb
-.section skjwa
-.section skjwb
-.section skjxa
-.section skjxb
-.section skjya
-.section skjyb
-.section skjza
-.section skjzb
-.section skj1a
-.section skj1b
-.section skj2a
-.section skj2b
-.section skj3a
-.section skj3b
-.section skj4a
-.section skj4b
-.section skj5a
-.section skj5b
-.section skj6a
-.section skj6b
-.section skj7a
-.section skj7b
-.section skj8a
-.section skj8b
-.section skj9a
-.section skj9b
-.section skj0a
-.section skj0b
-.section skkaa
-.section skkab
-.section skkba
-.section skkbb
-.section skkca
-.section skkcb
-.section skkda
-.section skkdb
-.section skkea
-.section skkeb
-.section skkfa
-.section skkfb
-.section skkga
-.section skkgb
-.section skkha
-.section skkhb
-.section skkia
-.section skkib
-.section skkja
-.section skkjb
-.section skkka
-.section skkkb
-.section skkla
-.section skklb
-.section skkma
-.section skkmb
-.section skkna
-.section skknb
-.section skkoa
-.section skkob
-.section skkpa
-.section skkpb
-.section skkqa
-.section skkqb
-.section skkra
-.section skkrb
-.section skksa
-.section skksb
-.section skkta
-.section skktb
-.section skkua
-.section skkub
-.section skkva
-.section skkvb
-.section skkwa
-.section skkwb
-.section skkxa
-.section skkxb
-.section skkya
-.section skkyb
-.section skkza
-.section skkzb
-.section skk1a
-.section skk1b
-.section skk2a
-.section skk2b
-.section skk3a
-.section skk3b
-.section skk4a
-.section skk4b
-.section skk5a
-.section skk5b
-.section skk6a
-.section skk6b
-.section skk7a
-.section skk7b
-.section skk8a
-.section skk8b
-.section skk9a
-.section skk9b
-.section skk0a
-.section skk0b
-.section sklaa
-.section sklab
-.section sklba
-.section sklbb
-.section sklca
-.section sklcb
-.section sklda
-.section skldb
-.section sklea
-.section skleb
-.section sklfa
-.section sklfb
-.section sklga
-.section sklgb
-.section sklha
-.section sklhb
-.section sklia
-.section sklib
-.section sklja
-.section skljb
-.section sklka
-.section sklkb
-.section sklla
-.section skllb
-.section sklma
-.section sklmb
-.section sklna
-.section sklnb
-.section skloa
-.section sklob
-.section sklpa
-.section sklpb
-.section sklqa
-.section sklqb
-.section sklra
-.section sklrb
-.section sklsa
-.section sklsb
-.section sklta
-.section skltb
-.section sklua
-.section sklub
-.section sklva
-.section sklvb
-.section sklwa
-.section sklwb
-.section sklxa
-.section sklxb
-.section sklya
-.section sklyb
-.section sklza
-.section sklzb
-.section skl1a
-.section skl1b
-.section skl2a
-.section skl2b
-.section skl3a
-.section skl3b
-.section skl4a
-.section skl4b
-.section skl5a
-.section skl5b
-.section skl6a
-.section skl6b
-.section skl7a
-.section skl7b
-.section skl8a
-.section skl8b
-.section skl9a
-.section skl9b
-.section skl0a
-.section skl0b
-.section skmaa
-.section skmab
-.section skmba
-.section skmbb
-.section skmca
-.section skmcb
-.section skmda
-.section skmdb
-.section skmea
-.section skmeb
-.section skmfa
-.section skmfb
-.section skmga
-.section skmgb
-.section skmha
-.section skmhb
-.section skmia
-.section skmib
-.section skmja
-.section skmjb
-.section skmka
-.section skmkb
-.section skmla
-.section skmlb
-.section skmma
-.section skmmb
-.section skmna
-.section skmnb
-.section skmoa
-.section skmob
-.section skmpa
-.section skmpb
-.section skmqa
-.section skmqb
-.section skmra
-.section skmrb
-.section skmsa
-.section skmsb
-.section skmta
-.section skmtb
-.section skmua
-.section skmub
-.section skmva
-.section skmvb
-.section skmwa
-.section skmwb
-.section skmxa
-.section skmxb
-.section skmya
-.section skmyb
-.section skmza
-.section skmzb
-.section skm1a
-.section skm1b
-.section skm2a
-.section skm2b
-.section skm3a
-.section skm3b
-.section skm4a
-.section skm4b
-.section skm5a
-.section skm5b
-.section skm6a
-.section skm6b
-.section skm7a
-.section skm7b
-.section skm8a
-.section skm8b
-.section skm9a
-.section skm9b
-.section skm0a
-.section skm0b
-.section sknaa
-.section sknab
-.section sknba
-.section sknbb
-.section sknca
-.section skncb
-.section sknda
-.section skndb
-.section sknea
-.section skneb
-.section sknfa
-.section sknfb
-.section sknga
-.section skngb
-.section sknha
-.section sknhb
-.section sknia
-.section sknib
-.section sknja
-.section sknjb
-.section sknka
-.section sknkb
-.section sknla
-.section sknlb
-.section sknma
-.section sknmb
-.section sknna
-.section sknnb
-.section sknoa
-.section sknob
-.section sknpa
-.section sknpb
-.section sknqa
-.section sknqb
-.section sknra
-.section sknrb
-.section sknsa
-.section sknsb
-.section sknta
-.section skntb
-.section sknua
-.section sknub
-.section sknva
-.section sknvb
-.section sknwa
-.section sknwb
-.section sknxa
-.section sknxb
-.section sknya
-.section sknyb
-.section sknza
-.section sknzb
-.section skn1a
-.section skn1b
-.section skn2a
-.section skn2b
-.section skn3a
-.section skn3b
-.section skn4a
-.section skn4b
-.section skn5a
-.section skn5b
-.section skn6a
-.section skn6b
-.section skn7a
-.section skn7b
-.section skn8a
-.section skn8b
-.section skn9a
-.section skn9b
-.section skn0a
-.section skn0b
-.section skoaa
-.section skoab
-.section skoba
-.section skobb
-.section skoca
-.section skocb
-.section skoda
-.section skodb
-.section skoea
-.section skoeb
-.section skofa
-.section skofb
-.section skoga
-.section skogb
-.section skoha
-.section skohb
-.section skoia
-.section skoib
-.section skoja
-.section skojb
-.section skoka
-.section skokb
-.section skola
-.section skolb
-.section skoma
-.section skomb
-.section skona
-.section skonb
-.section skooa
-.section skoob
-.section skopa
-.section skopb
-.section skoqa
-.section skoqb
-.section skora
-.section skorb
-.section skosa
-.section skosb
-.section skota
-.section skotb
-.section skoua
-.section skoub
-.section skova
-.section skovb
-.section skowa
-.section skowb
-.section skoxa
-.section skoxb
-.section skoya
-.section skoyb
-.section skoza
-.section skozb
-.section sko1a
-.section sko1b
-.section sko2a
-.section sko2b
-.section sko3a
-.section sko3b
-.section sko4a
-.section sko4b
-.section sko5a
-.section sko5b
-.section sko6a
-.section sko6b
-.section sko7a
-.section sko7b
-.section sko8a
-.section sko8b
-.section sko9a
-.section sko9b
-.section sko0a
-.section sko0b
-.section skpaa
-.section skpab
-.section skpba
-.section skpbb
-.section skpca
-.section skpcb
-.section skpda
-.section skpdb
-.section skpea
-.section skpeb
-.section skpfa
-.section skpfb
-.section skpga
-.section skpgb
-.section skpha
-.section skphb
-.section skpia
-.section skpib
-.section skpja
-.section skpjb
-.section skpka
-.section skpkb
-.section skpla
-.section skplb
-.section skpma
-.section skpmb
-.section skpna
-.section skpnb
-.section skpoa
-.section skpob
-.section skppa
-.section skppb
-.section skpqa
-.section skpqb
-.section skpra
-.section skprb
-.section skpsa
-.section skpsb
-.section skpta
-.section skptb
-.section skpua
-.section skpub
-.section skpva
-.section skpvb
-.section skpwa
-.section skpwb
-.section skpxa
-.section skpxb
-.section skpya
-.section skpyb
-.section skpza
-.section skpzb
-.section skp1a
-.section skp1b
-.section skp2a
-.section skp2b
-.section skp3a
-.section skp3b
-.section skp4a
-.section skp4b
-.section skp5a
-.section skp5b
-.section skp6a
-.section skp6b
-.section skp7a
-.section skp7b
-.section skp8a
-.section skp8b
-.section skp9a
-.section skp9b
-.section skp0a
-.section skp0b
-.section skqaa
-.section skqab
-.section skqba
-.section skqbb
-.section skqca
-.section skqcb
-.section skqda
-.section skqdb
-.section skqea
-.section skqeb
-.section skqfa
-.section skqfb
-.section skqga
-.section skqgb
-.section skqha
-.section skqhb
-.section skqia
-.section skqib
-.section skqja
-.section skqjb
-.section skqka
-.section skqkb
-.section skqla
-.section skqlb
-.section skqma
-.section skqmb
-.section skqna
-.section skqnb
-.section skqoa
-.section skqob
-.section skqpa
-.section skqpb
-.section skqqa
-.section skqqb
-.section skqra
-.section skqrb
-.section skqsa
-.section skqsb
-.section skqta
-.section skqtb
-.section skqua
-.section skqub
-.section skqva
-.section skqvb
-.section skqwa
-.section skqwb
-.section skqxa
-.section skqxb
-.section skqya
-.section skqyb
-.section skqza
-.section skqzb
-.section skq1a
-.section skq1b
-.section skq2a
-.section skq2b
-.section skq3a
-.section skq3b
-.section skq4a
-.section skq4b
-.section skq5a
-.section skq5b
-.section skq6a
-.section skq6b
-.section skq7a
-.section skq7b
-.section skq8a
-.section skq8b
-.section skq9a
-.section skq9b
-.section skq0a
-.section skq0b
-.section skraa
-.section skrab
-.section skrba
-.section skrbb
-.section skrca
-.section skrcb
-.section skrda
-.section skrdb
-.section skrea
-.section skreb
-.section skrfa
-.section skrfb
-.section skrga
-.section skrgb
-.section skrha
-.section skrhb
-.section skria
-.section skrib
-.section skrja
-.section skrjb
-.section skrka
-.section skrkb
-.section skrla
-.section skrlb
-.section skrma
-.section skrmb
-.section skrna
-.section skrnb
-.section skroa
-.section skrob
-.section skrpa
-.section skrpb
-.section skrqa
-.section skrqb
-.section skrra
-.section skrrb
-.section skrsa
-.section skrsb
-.section skrta
-.section skrtb
-.section skrua
-.section skrub
-.section skrva
-.section skrvb
-.section skrwa
-.section skrwb
-.section skrxa
-.section skrxb
-.section skrya
-.section skryb
-.section skrza
-.section skrzb
-.section skr1a
-.section skr1b
-.section skr2a
-.section skr2b
-.section skr3a
-.section skr3b
-.section skr4a
-.section skr4b
-.section skr5a
-.section skr5b
-.section skr6a
-.section skr6b
-.section skr7a
-.section skr7b
-.section skr8a
-.section skr8b
-.section skr9a
-.section skr9b
-.section skr0a
-.section skr0b
-.section sksaa
-.section sksab
-.section sksba
-.section sksbb
-.section sksca
-.section skscb
-.section sksda
-.section sksdb
-.section sksea
-.section skseb
-.section sksfa
-.section sksfb
-.section sksga
-.section sksgb
-.section sksha
-.section skshb
-.section sksia
-.section sksib
-.section sksja
-.section sksjb
-.section skska
-.section skskb
-.section sksla
-.section skslb
-.section sksma
-.section sksmb
-.section sksna
-.section sksnb
-.section sksoa
-.section sksob
-.section skspa
-.section skspb
-.section sksqa
-.section sksqb
-.section sksra
-.section sksrb
-.section skssa
-.section skssb
-.section sksta
-.section skstb
-.section sksua
-.section sksub
-.section sksva
-.section sksvb
-.section skswa
-.section skswb
-.section sksxa
-.section sksxb
-.section sksya
-.section sksyb
-.section sksza
-.section skszb
-.section sks1a
-.section sks1b
-.section sks2a
-.section sks2b
-.section sks3a
-.section sks3b
-.section sks4a
-.section sks4b
-.section sks5a
-.section sks5b
-.section sks6a
-.section sks6b
-.section sks7a
-.section sks7b
-.section sks8a
-.section sks8b
-.section sks9a
-.section sks9b
-.section sks0a
-.section sks0b
-.section sktaa
-.section sktab
-.section sktba
-.section sktbb
-.section sktca
-.section sktcb
-.section sktda
-.section sktdb
-.section sktea
-.section skteb
-.section sktfa
-.section sktfb
-.section sktga
-.section sktgb
-.section sktha
-.section skthb
-.section sktia
-.section sktib
-.section sktja
-.section sktjb
-.section sktka
-.section sktkb
-.section sktla
-.section sktlb
-.section sktma
-.section sktmb
-.section sktna
-.section sktnb
-.section sktoa
-.section sktob
-.section sktpa
-.section sktpb
-.section sktqa
-.section sktqb
-.section sktra
-.section sktrb
-.section sktsa
-.section sktsb
-.section sktta
-.section skttb
-.section sktua
-.section sktub
-.section sktva
-.section sktvb
-.section sktwa
-.section sktwb
-.section sktxa
-.section sktxb
-.section sktya
-.section sktyb
-.section sktza
-.section sktzb
-.section skt1a
-.section skt1b
-.section skt2a
-.section skt2b
-.section skt3a
-.section skt3b
-.section skt4a
-.section skt4b
-.section skt5a
-.section skt5b
-.section skt6a
-.section skt6b
-.section skt7a
-.section skt7b
-.section skt8a
-.section skt8b
-.section skt9a
-.section skt9b
-.section skt0a
-.section skt0b
-.section skuaa
-.section skuab
-.section skuba
-.section skubb
-.section skuca
-.section skucb
-.section skuda
-.section skudb
-.section skuea
-.section skueb
-.section skufa
-.section skufb
-.section skuga
-.section skugb
-.section skuha
-.section skuhb
-.section skuia
-.section skuib
-.section skuja
-.section skujb
-.section skuka
-.section skukb
-.section skula
-.section skulb
-.section skuma
-.section skumb
-.section skuna
-.section skunb
-.section skuoa
-.section skuob
-.section skupa
-.section skupb
-.section skuqa
-.section skuqb
-.section skura
-.section skurb
-.section skusa
-.section skusb
-.section skuta
-.section skutb
-.section skuua
-.section skuub
-.section skuva
-.section skuvb
-.section skuwa
-.section skuwb
-.section skuxa
-.section skuxb
-.section skuya
-.section skuyb
-.section skuza
-.section skuzb
-.section sku1a
-.section sku1b
-.section sku2a
-.section sku2b
-.section sku3a
-.section sku3b
-.section sku4a
-.section sku4b
-.section sku5a
-.section sku5b
-.section sku6a
-.section sku6b
-.section sku7a
-.section sku7b
-.section sku8a
-.section sku8b
-.section sku9a
-.section sku9b
-.section sku0a
-.section sku0b
-.section skvaa
-.section skvab
-.section skvba
-.section skvbb
-.section skvca
-.section skvcb
-.section skvda
-.section skvdb
-.section skvea
-.section skveb
-.section skvfa
-.section skvfb
-.section skvga
-.section skvgb
-.section skvha
-.section skvhb
-.section skvia
-.section skvib
-.section skvja
-.section skvjb
-.section skvka
-.section skvkb
-.section skvla
-.section skvlb
-.section skvma
-.section skvmb
-.section skvna
-.section skvnb
-.section skvoa
-.section skvob
-.section skvpa
-.section skvpb
-.section skvqa
-.section skvqb
-.section skvra
-.section skvrb
-.section skvsa
-.section skvsb
-.section skvta
-.section skvtb
-.section skvua
-.section skvub
-.section skvva
-.section skvvb
-.section skvwa
-.section skvwb
-.section skvxa
-.section skvxb
-.section skvya
-.section skvyb
-.section skvza
-.section skvzb
-.section skv1a
-.section skv1b
-.section skv2a
-.section skv2b
-.section skv3a
-.section skv3b
-.section skv4a
-.section skv4b
-.section skv5a
-.section skv5b
-.section skv6a
-.section skv6b
-.section skv7a
-.section skv7b
-.section skv8a
-.section skv8b
-.section skv9a
-.section skv9b
-.section skv0a
-.section skv0b
-.section skwaa
-.section skwab
-.section skwba
-.section skwbb
-.section skwca
-.section skwcb
-.section skwda
-.section skwdb
-.section skwea
-.section skweb
-.section skwfa
-.section skwfb
-.section skwga
-.section skwgb
-.section skwha
-.section skwhb
-.section skwia
-.section skwib
-.section skwja
-.section skwjb
-.section skwka
-.section skwkb
-.section skwla
-.section skwlb
-.section skwma
-.section skwmb
-.section skwna
-.section skwnb
-.section skwoa
-.section skwob
-.section skwpa
-.section skwpb
-.section skwqa
-.section skwqb
-.section skwra
-.section skwrb
-.section skwsa
-.section skwsb
-.section skwta
-.section skwtb
-.section skwua
-.section skwub
-.section skwva
-.section skwvb
-.section skwwa
-.section skwwb
-.section skwxa
-.section skwxb
-.section skwya
-.section skwyb
-.section skwza
-.section skwzb
-.section skw1a
-.section skw1b
-.section skw2a
-.section skw2b
-.section skw3a
-.section skw3b
-.section skw4a
-.section skw4b
-.section skw5a
-.section skw5b
-.section skw6a
-.section skw6b
-.section skw7a
-.section skw7b
-.section skw8a
-.section skw8b
-.section skw9a
-.section skw9b
-.section skw0a
-.section skw0b
-.section skxaa
-.section skxab
-.section skxba
-.section skxbb
-.section skxca
-.section skxcb
-.section skxda
-.section skxdb
-.section skxea
-.section skxeb
-.section skxfa
-.section skxfb
-.section skxga
-.section skxgb
-.section skxha
-.section skxhb
-.section skxia
-.section skxib
-.section skxja
-.section skxjb
-.section skxka
-.section skxkb
-.section skxla
-.section skxlb
-.section skxma
-.section skxmb
-.section skxna
-.section skxnb
-.section skxoa
-.section skxob
-.section skxpa
-.section skxpb
-.section skxqa
-.section skxqb
-.section skxra
-.section skxrb
-.section skxsa
-.section skxsb
-.section skxta
-.section skxtb
-.section skxua
-.section skxub
-.section skxva
-.section skxvb
-.section skxwa
-.section skxwb
-.section skxxa
-.section skxxb
-.section skxya
-.section skxyb
-.section skxza
-.section skxzb
-.section skx1a
-.section skx1b
-.section skx2a
-.section skx2b
-.section skx3a
-.section skx3b
-.section skx4a
-.section skx4b
-.section skx5a
-.section skx5b
-.section skx6a
-.section skx6b
-.section skx7a
-.section skx7b
-.section skx8a
-.section skx8b
-.section skx9a
-.section skx9b
-.section skx0a
-.section skx0b
-.section skyaa
-.section skyab
-.section skyba
-.section skybb
-.section skyca
-.section skycb
-.section skyda
-.section skydb
-.section skyea
-.section skyeb
-.section skyfa
-.section skyfb
-.section skyga
-.section skygb
-.section skyha
-.section skyhb
-.section skyia
-.section skyib
-.section skyja
-.section skyjb
-.section skyka
-.section skykb
-.section skyla
-.section skylb
-.section skyma
-.section skymb
-.section skyna
-.section skynb
-.section skyoa
-.section skyob
-.section skypa
-.section skypb
-.section skyqa
-.section skyqb
-.section skyra
-.section skyrb
-.section skysa
-.section skysb
-.section skyta
-.section skytb
-.section skyua
-.section skyub
-.section skyva
-.section skyvb
-.section skywa
-.section skywb
-.section skyxa
-.section skyxb
-.section skyya
-.section skyyb
-.section skyza
-.section skyzb
-.section sky1a
-.section sky1b
-.section sky2a
-.section sky2b
-.section sky3a
-.section sky3b
-.section sky4a
-.section sky4b
-.section sky5a
-.section sky5b
-.section sky6a
-.section sky6b
-.section sky7a
-.section sky7b
-.section sky8a
-.section sky8b
-.section sky9a
-.section sky9b
-.section sky0a
-.section sky0b
-.section skzaa
-.section skzab
-.section skzba
-.section skzbb
-.section skzca
-.section skzcb
-.section skzda
-.section skzdb
-.section skzea
-.section skzeb
-.section skzfa
-.section skzfb
-.section skzga
-.section skzgb
-.section skzha
-.section skzhb
-.section skzia
-.section skzib
-.section skzja
-.section skzjb
-.section skzka
-.section skzkb
-.section skzla
-.section skzlb
-.section skzma
-.section skzmb
-.section skzna
-.section skznb
-.section skzoa
-.section skzob
-.section skzpa
-.section skzpb
-.section skzqa
-.section skzqb
-.section skzra
-.section skzrb
-.section skzsa
-.section skzsb
-.section skzta
-.section skztb
-.section skzua
-.section skzub
-.section skzva
-.section skzvb
-.section skzwa
-.section skzwb
-.section skzxa
-.section skzxb
-.section skzya
-.section skzyb
-.section skzza
-.section skzzb
-.section skz1a
-.section skz1b
-.section skz2a
-.section skz2b
-.section skz3a
-.section skz3b
-.section skz4a
-.section skz4b
-.section skz5a
-.section skz5b
-.section skz6a
-.section skz6b
-.section skz7a
-.section skz7b
-.section skz8a
-.section skz8b
-.section skz9a
-.section skz9b
-.section skz0a
-.section skz0b
-.section sk1aa
-.section sk1ab
-.section sk1ba
-.section sk1bb
-.section sk1ca
-.section sk1cb
-.section sk1da
-.section sk1db
-.section sk1ea
-.section sk1eb
-.section sk1fa
-.section sk1fb
-.section sk1ga
-.section sk1gb
-.section sk1ha
-.section sk1hb
-.section sk1ia
-.section sk1ib
-.section sk1ja
-.section sk1jb
-.section sk1ka
-.section sk1kb
-.section sk1la
-.section sk1lb
-.section sk1ma
-.section sk1mb
-.section sk1na
-.section sk1nb
-.section sk1oa
-.section sk1ob
-.section sk1pa
-.section sk1pb
-.section sk1qa
-.section sk1qb
-.section sk1ra
-.section sk1rb
-.section sk1sa
-.section sk1sb
-.section sk1ta
-.section sk1tb
-.section sk1ua
-.section sk1ub
-.section sk1va
-.section sk1vb
-.section sk1wa
-.section sk1wb
-.section sk1xa
-.section sk1xb
-.section sk1ya
-.section sk1yb
-.section sk1za
-.section sk1zb
-.section sk11a
-.section sk11b
-.section sk12a
-.section sk12b
-.section sk13a
-.section sk13b
-.section sk14a
-.section sk14b
-.section sk15a
-.section sk15b
-.section sk16a
-.section sk16b
-.section sk17a
-.section sk17b
-.section sk18a
-.section sk18b
-.section sk19a
-.section sk19b
-.section sk10a
-.section sk10b
-.section sk2aa
-.section sk2ab
-.section sk2ba
-.section sk2bb
-.section sk2ca
-.section sk2cb
-.section sk2da
-.section sk2db
-.section sk2ea
-.section sk2eb
-.section sk2fa
-.section sk2fb
-.section sk2ga
-.section sk2gb
-.section sk2ha
-.section sk2hb
-.section sk2ia
-.section sk2ib
-.section sk2ja
-.section sk2jb
-.section sk2ka
-.section sk2kb
-.section sk2la
-.section sk2lb
-.section sk2ma
-.section sk2mb
-.section sk2na
-.section sk2nb
-.section sk2oa
-.section sk2ob
-.section sk2pa
-.section sk2pb
-.section sk2qa
-.section sk2qb
-.section sk2ra
-.section sk2rb
-.section sk2sa
-.section sk2sb
-.section sk2ta
-.section sk2tb
-.section sk2ua
-.section sk2ub
-.section sk2va
-.section sk2vb
-.section sk2wa
-.section sk2wb
-.section sk2xa
-.section sk2xb
-.section sk2ya
-.section sk2yb
-.section sk2za
-.section sk2zb
-.section sk21a
-.section sk21b
-.section sk22a
-.section sk22b
-.section sk23a
-.section sk23b
-.section sk24a
-.section sk24b
-.section sk25a
-.section sk25b
-.section sk26a
-.section sk26b
-.section sk27a
-.section sk27b
-.section sk28a
-.section sk28b
-.section sk29a
-.section sk29b
-.section sk20a
-.section sk20b
-.section sk3aa
-.section sk3ab
-.section sk3ba
-.section sk3bb
-.section sk3ca
-.section sk3cb
-.section sk3da
-.section sk3db
-.section sk3ea
-.section sk3eb
-.section sk3fa
-.section sk3fb
-.section sk3ga
-.section sk3gb
-.section sk3ha
-.section sk3hb
-.section sk3ia
-.section sk3ib
-.section sk3ja
-.section sk3jb
-.section sk3ka
-.section sk3kb
-.section sk3la
-.section sk3lb
-.section sk3ma
-.section sk3mb
-.section sk3na
-.section sk3nb
-.section sk3oa
-.section sk3ob
-.section sk3pa
-.section sk3pb
-.section sk3qa
-.section sk3qb
-.section sk3ra
-.section sk3rb
-.section sk3sa
-.section sk3sb
-.section sk3ta
-.section sk3tb
-.section sk3ua
-.section sk3ub
-.section sk3va
-.section sk3vb
-.section sk3wa
-.section sk3wb
-.section sk3xa
-.section sk3xb
-.section sk3ya
-.section sk3yb
-.section sk3za
-.section sk3zb
-.section sk31a
-.section sk31b
-.section sk32a
-.section sk32b
-.section sk33a
-.section sk33b
-.section sk34a
-.section sk34b
-.section sk35a
-.section sk35b
-.section sk36a
-.section sk36b
-.section sk37a
-.section sk37b
-.section sk38a
-.section sk38b
-.section sk39a
-.section sk39b
-.section sk30a
-.section sk30b
-.section sk4aa
-.section sk4ab
-.section sk4ba
-.section sk4bb
-.section sk4ca
-.section sk4cb
-.section sk4da
-.section sk4db
-.section sk4ea
-.section sk4eb
-.section sk4fa
-.section sk4fb
-.section sk4ga
-.section sk4gb
-.section sk4ha
-.section sk4hb
-.section sk4ia
-.section sk4ib
-.section sk4ja
-.section sk4jb
-.section sk4ka
-.section sk4kb
-.section sk4la
-.section sk4lb
-.section sk4ma
-.section sk4mb
-.section sk4na
-.section sk4nb
-.section sk4oa
-.section sk4ob
-.section sk4pa
-.section sk4pb
-.section sk4qa
-.section sk4qb
-.section sk4ra
-.section sk4rb
-.section sk4sa
-.section sk4sb
-.section sk4ta
-.section sk4tb
-.section sk4ua
-.section sk4ub
-.section sk4va
-.section sk4vb
-.section sk4wa
-.section sk4wb
-.section sk4xa
-.section sk4xb
-.section sk4ya
-.section sk4yb
-.section sk4za
-.section sk4zb
-.section sk41a
-.section sk41b
-.section sk42a
-.section sk42b
-.section sk43a
-.section sk43b
-.section sk44a
-.section sk44b
-.section sk45a
-.section sk45b
-.section sk46a
-.section sk46b
-.section sk47a
-.section sk47b
-.section sk48a
-.section sk48b
-.section sk49a
-.section sk49b
-.section sk40a
-.section sk40b
-.section sk5aa
-.section sk5ab
-.section sk5ba
-.section sk5bb
-.section sk5ca
-.section sk5cb
-.section sk5da
-.section sk5db
-.section sk5ea
-.section sk5eb
-.section sk5fa
-.section sk5fb
-.section sk5ga
-.section sk5gb
-.section sk5ha
-.section sk5hb
-.section sk5ia
-.section sk5ib
-.section sk5ja
-.section sk5jb
-.section sk5ka
-.section sk5kb
-.section sk5la
-.section sk5lb
-.section sk5ma
-.section sk5mb
-.section sk5na
-.section sk5nb
-.section sk5oa
-.section sk5ob
-.section sk5pa
-.section sk5pb
-.section sk5qa
-.section sk5qb
-.section sk5ra
-.section sk5rb
-.section sk5sa
-.section sk5sb
-.section sk5ta
-.section sk5tb
-.section sk5ua
-.section sk5ub
-.section sk5va
-.section sk5vb
-.section sk5wa
-.section sk5wb
-.section sk5xa
-.section sk5xb
-.section sk5ya
-.section sk5yb
-.section sk5za
-.section sk5zb
-.section sk51a
-.section sk51b
-.section sk52a
-.section sk52b
-.section sk53a
-.section sk53b
-.section sk54a
-.section sk54b
-.section sk55a
-.section sk55b
-.section sk56a
-.section sk56b
-.section sk57a
-.section sk57b
-.section sk58a
-.section sk58b
-.section sk59a
-.section sk59b
-.section sk50a
-.section sk50b
-.section sk6aa
-.section sk6ab
-.section sk6ba
-.section sk6bb
-.section sk6ca
-.section sk6cb
-.section sk6da
-.section sk6db
-.section sk6ea
-.section sk6eb
-.section sk6fa
-.section sk6fb
-.section sk6ga
-.section sk6gb
-.section sk6ha
-.section sk6hb
-.section sk6ia
-.section sk6ib
-.section sk6ja
-.section sk6jb
-.section sk6ka
-.section sk6kb
-.section sk6la
-.section sk6lb
-.section sk6ma
-.section sk6mb
-.section sk6na
-.section sk6nb
-.section sk6oa
-.section sk6ob
-.section sk6pa
-.section sk6pb
-.section sk6qa
-.section sk6qb
-.section sk6ra
-.section sk6rb
-.section sk6sa
-.section sk6sb
-.section sk6ta
-.section sk6tb
-.section sk6ua
-.section sk6ub
-.section sk6va
-.section sk6vb
-.section sk6wa
-.section sk6wb
-.section sk6xa
-.section sk6xb
-.section sk6ya
-.section sk6yb
-.section sk6za
-.section sk6zb
-.section sk61a
-.section sk61b
-.section sk62a
-.section sk62b
-.section sk63a
-.section sk63b
-.section sk64a
-.section sk64b
-.section sk65a
-.section sk65b
-.section sk66a
-.section sk66b
-.section sk67a
-.section sk67b
-.section sk68a
-.section sk68b
-.section sk69a
-.section sk69b
-.section sk60a
-.section sk60b
-.section sk7aa
-.section sk7ab
-.section sk7ba
-.section sk7bb
-.section sk7ca
-.section sk7cb
-.section sk7da
-.section sk7db
-.section sk7ea
-.section sk7eb
-.section sk7fa
-.section sk7fb
-.section sk7ga
-.section sk7gb
-.section sk7ha
-.section sk7hb
-.section sk7ia
-.section sk7ib
-.section sk7ja
-.section sk7jb
-.section sk7ka
-.section sk7kb
-.section sk7la
-.section sk7lb
-.section sk7ma
-.section sk7mb
-.section sk7na
-.section sk7nb
-.section sk7oa
-.section sk7ob
-.section sk7pa
-.section sk7pb
-.section sk7qa
-.section sk7qb
-.section sk7ra
-.section sk7rb
-.section sk7sa
-.section sk7sb
-.section sk7ta
-.section sk7tb
-.section sk7ua
-.section sk7ub
-.section sk7va
-.section sk7vb
-.section sk7wa
-.section sk7wb
-.section sk7xa
-.section sk7xb
-.section sk7ya
-.section sk7yb
-.section sk7za
-.section sk7zb
-.section sk71a
-.section sk71b
-.section sk72a
-.section sk72b
-.section sk73a
-.section sk73b
-.section sk74a
-.section sk74b
-.section sk75a
-.section sk75b
-.section sk76a
-.section sk76b
-.section sk77a
-.section sk77b
-.section sk78a
-.section sk78b
-.section sk79a
-.section sk79b
-.section sk70a
-.section sk70b
-.section sk8aa
-.section sk8ab
-.section sk8ba
-.section sk8bb
-.section sk8ca
-.section sk8cb
-.section sk8da
-.section sk8db
-.section sk8ea
-.section sk8eb
-.section sk8fa
-.section sk8fb
-.section sk8ga
-.section sk8gb
-.section sk8ha
-.section sk8hb
-.section sk8ia
-.section sk8ib
-.section sk8ja
-.section sk8jb
-.section sk8ka
-.section sk8kb
-.section sk8la
-.section sk8lb
-.section sk8ma
-.section sk8mb
-.section sk8na
-.section sk8nb
-.section sk8oa
-.section sk8ob
-.section sk8pa
-.section sk8pb
-.section sk8qa
-.section sk8qb
-.section sk8ra
-.section sk8rb
-.section sk8sa
-.section sk8sb
-.section sk8ta
-.section sk8tb
-.section sk8ua
-.section sk8ub
-.section sk8va
-.section sk8vb
-.section sk8wa
-.section sk8wb
-.section sk8xa
-.section sk8xb
-.section sk8ya
-.section sk8yb
-.section sk8za
-.section sk8zb
-.section sk81a
-.section sk81b
-.section sk82a
-.section sk82b
-.section sk83a
-.section sk83b
-.section sk84a
-.section sk84b
-.section sk85a
-.section sk85b
-.section sk86a
-.section sk86b
-.section sk87a
-.section sk87b
-.section sk88a
-.section sk88b
-.section sk89a
-.section sk89b
-.section sk80a
-.section sk80b
-.section sk9aa
-.section sk9ab
-.section sk9ba
-.section sk9bb
-.section sk9ca
-.section sk9cb
-.section sk9da
-.section sk9db
-.section sk9ea
-.section sk9eb
-.section sk9fa
-.section sk9fb
-.section sk9ga
-.section sk9gb
-.section sk9ha
-.section sk9hb
-.section sk9ia
-.section sk9ib
-.section sk9ja
-.section sk9jb
-.section sk9ka
-.section sk9kb
-.section sk9la
-.section sk9lb
-.section sk9ma
-.section sk9mb
-.section sk9na
-.section sk9nb
-.section sk9oa
-.section sk9ob
-.section sk9pa
-.section sk9pb
-.section sk9qa
-.section sk9qb
-.section sk9ra
-.section sk9rb
-.section sk9sa
-.section sk9sb
-.section sk9ta
-.section sk9tb
-.section sk9ua
-.section sk9ub
-.section sk9va
-.section sk9vb
-.section sk9wa
-.section sk9wb
-.section sk9xa
-.section sk9xb
-.section sk9ya
-.section sk9yb
-.section sk9za
-.section sk9zb
-.section sk91a
-.section sk91b
-.section sk92a
-.section sk92b
-.section sk93a
-.section sk93b
-.section sk94a
-.section sk94b
-.section sk95a
-.section sk95b
-.section sk96a
-.section sk96b
-.section sk97a
-.section sk97b
-.section sk98a
-.section sk98b
-.section sk99a
-.section sk99b
-.section sk90a
-.section sk90b
-.section sk0aa
-.section sk0ab
-.section sk0ba
-.section sk0bb
-.section sk0ca
-.section sk0cb
-.section sk0da
-.section sk0db
-.section sk0ea
-.section sk0eb
-.section sk0fa
-.section sk0fb
-.section sk0ga
-.section sk0gb
-.section sk0ha
-.section sk0hb
-.section sk0ia
-.section sk0ib
-.section sk0ja
-.section sk0jb
-.section sk0ka
-.section sk0kb
-.section sk0la
-.section sk0lb
-.section sk0ma
-.section sk0mb
-.section sk0na
-.section sk0nb
-.section sk0oa
-.section sk0ob
-.section sk0pa
-.section sk0pb
-.section sk0qa
-.section sk0qb
-.section sk0ra
-.section sk0rb
-.section sk0sa
-.section sk0sb
-.section sk0ta
-.section sk0tb
-.section sk0ua
-.section sk0ub
-.section sk0va
-.section sk0vb
-.section sk0wa
-.section sk0wb
-.section sk0xa
-.section sk0xb
-.section sk0ya
-.section sk0yb
-.section sk0za
-.section sk0zb
-.section sk01a
-.section sk01b
-.section sk02a
-.section sk02b
-.section sk03a
-.section sk03b
-.section sk04a
-.section sk04b
-.section sk05a
-.section sk05b
-.section sk06a
-.section sk06b
-.section sk07a
-.section sk07b
-.section sk08a
-.section sk08b
-.section sk09a
-.section sk09b
-.section sk00a
-.section sk00b
-.section slaaa
-.section slaab
-.section slaba
-.section slabb
-.section slaca
-.section slacb
-.section slada
-.section sladb
-.section slaea
-.section slaeb
-.section slafa
-.section slafb
-.section slaga
-.section slagb
-.section slaha
-.section slahb
-.section slaia
-.section slaib
-.section slaja
-.section slajb
-.section slaka
-.section slakb
-.section slala
-.section slalb
-.section slama
-.section slamb
-.section slana
-.section slanb
-.section slaoa
-.section slaob
-.section slapa
-.section slapb
-.section slaqa
-.section slaqb
-.section slara
-.section slarb
-.section slasa
-.section slasb
-.section slata
-.section slatb
-.section slaua
-.section slaub
-.section slava
-.section slavb
-.section slawa
-.section slawb
-.section slaxa
-.section slaxb
-.section slaya
-.section slayb
-.section slaza
-.section slazb
-.section sla1a
-.section sla1b
-.section sla2a
-.section sla2b
-.section sla3a
-.section sla3b
-.section sla4a
-.section sla4b
-.section sla5a
-.section sla5b
-.section sla6a
-.section sla6b
-.section sla7a
-.section sla7b
-.section sla8a
-.section sla8b
-.section sla9a
-.section sla9b
-.section sla0a
-.section sla0b
-.section slbaa
-.section slbab
-.section slbba
-.section slbbb
-.section slbca
-.section slbcb
-.section slbda
-.section slbdb
-.section slbea
-.section slbeb
-.section slbfa
-.section slbfb
-.section slbga
-.section slbgb
-.section slbha
-.section slbhb
-.section slbia
-.section slbib
-.section slbja
-.section slbjb
-.section slbka
-.section slbkb
-.section slbla
-.section slblb
-.section slbma
-.section slbmb
-.section slbna
-.section slbnb
-.section slboa
-.section slbob
-.section slbpa
-.section slbpb
-.section slbqa
-.section slbqb
-.section slbra
-.section slbrb
-.section slbsa
-.section slbsb
-.section slbta
-.section slbtb
-.section slbua
-.section slbub
-.section slbva
-.section slbvb
-.section slbwa
-.section slbwb
-.section slbxa
-.section slbxb
-.section slbya
-.section slbyb
-.section slbza
-.section slbzb
-.section slb1a
-.section slb1b
-.section slb2a
-.section slb2b
-.section slb3a
-.section slb3b
-.section slb4a
-.section slb4b
-.section slb5a
-.section slb5b
-.section slb6a
-.section slb6b
-.section slb7a
-.section slb7b
-.section slb8a
-.section slb8b
-.section slb9a
-.section slb9b
-.section slb0a
-.section slb0b
-.section slcaa
-.section slcab
-.section slcba
-.section slcbb
-.section slcca
-.section slccb
-.section slcda
-.section slcdb
-.section slcea
-.section slceb
-.section slcfa
-.section slcfb
-.section slcga
-.section slcgb
-.section slcha
-.section slchb
-.section slcia
-.section slcib
-.section slcja
-.section slcjb
-.section slcka
-.section slckb
-.section slcla
-.section slclb
-.section slcma
-.section slcmb
-.section slcna
-.section slcnb
-.section slcoa
-.section slcob
-.section slcpa
-.section slcpb
-.section slcqa
-.section slcqb
-.section slcra
-.section slcrb
-.section slcsa
-.section slcsb
-.section slcta
-.section slctb
-.section slcua
-.section slcub
-.section slcva
-.section slcvb
-.section slcwa
-.section slcwb
-.section slcxa
-.section slcxb
-.section slcya
-.section slcyb
-.section slcza
-.section slczb
-.section slc1a
-.section slc1b
-.section slc2a
-.section slc2b
-.section slc3a
-.section slc3b
-.section slc4a
-.section slc4b
-.section slc5a
-.section slc5b
-.section slc6a
-.section slc6b
-.section slc7a
-.section slc7b
-.section slc8a
-.section slc8b
-.section slc9a
-.section slc9b
-.section slc0a
-.section slc0b
-.section sldaa
-.section sldab
-.section sldba
-.section sldbb
-.section sldca
-.section sldcb
-.section sldda
-.section slddb
-.section sldea
-.section sldeb
-.section sldfa
-.section sldfb
-.section sldga
-.section sldgb
-.section sldha
-.section sldhb
-.section sldia
-.section sldib
-.section sldja
-.section sldjb
-.section sldka
-.section sldkb
-.section sldla
-.section sldlb
-.section sldma
-.section sldmb
-.section sldna
-.section sldnb
-.section sldoa
-.section sldob
-.section sldpa
-.section sldpb
-.section sldqa
-.section sldqb
-.section sldra
-.section sldrb
-.section sldsa
-.section sldsb
-.section sldta
-.section sldtb
-.section sldua
-.section sldub
-.section sldva
-.section sldvb
-.section sldwa
-.section sldwb
-.section sldxa
-.section sldxb
-.section sldya
-.section sldyb
-.section sldza
-.section sldzb
-.section sld1a
-.section sld1b
-.section sld2a
-.section sld2b
-.section sld3a
-.section sld3b
-.section sld4a
-.section sld4b
-.section sld5a
-.section sld5b
-.section sld6a
-.section sld6b
-.section sld7a
-.section sld7b
-.section sld8a
-.section sld8b
-.section sld9a
-.section sld9b
-.section sld0a
-.section sld0b
-.section sleaa
-.section sleab
-.section sleba
-.section slebb
-.section sleca
-.section slecb
-.section sleda
-.section sledb
-.section sleea
-.section sleeb
-.section slefa
-.section slefb
-.section slega
-.section slegb
-.section sleha
-.section slehb
-.section sleia
-.section sleib
-.section sleja
-.section slejb
-.section sleka
-.section slekb
-.section slela
-.section slelb
-.section slema
-.section slemb
-.section slena
-.section slenb
-.section sleoa
-.section sleob
-.section slepa
-.section slepb
-.section sleqa
-.section sleqb
-.section slera
-.section slerb
-.section slesa
-.section slesb
-.section sleta
-.section sletb
-.section sleua
-.section sleub
-.section sleva
-.section slevb
-.section slewa
-.section slewb
-.section slexa
-.section slexb
-.section sleya
-.section sleyb
-.section sleza
-.section slezb
-.section sle1a
-.section sle1b
-.section sle2a
-.section sle2b
-.section sle3a
-.section sle3b
-.section sle4a
-.section sle4b
-.section sle5a
-.section sle5b
-.section sle6a
-.section sle6b
-.section sle7a
-.section sle7b
-.section sle8a
-.section sle8b
-.section sle9a
-.section sle9b
-.section sle0a
-.section sle0b
-.section slfaa
-.section slfab
-.section slfba
-.section slfbb
-.section slfca
-.section slfcb
-.section slfda
-.section slfdb
-.section slfea
-.section slfeb
-.section slffa
-.section slffb
-.section slfga
-.section slfgb
-.section slfha
-.section slfhb
-.section slfia
-.section slfib
-.section slfja
-.section slfjb
-.section slfka
-.section slfkb
-.section slfla
-.section slflb
-.section slfma
-.section slfmb
-.section slfna
-.section slfnb
-.section slfoa
-.section slfob
-.section slfpa
-.section slfpb
-.section slfqa
-.section slfqb
-.section slfra
-.section slfrb
-.section slfsa
-.section slfsb
-.section slfta
-.section slftb
-.section slfua
-.section slfub
-.section slfva
-.section slfvb
-.section slfwa
-.section slfwb
-.section slfxa
-.section slfxb
-.section slfya
-.section slfyb
-.section slfza
-.section slfzb
-.section slf1a
-.section slf1b
-.section slf2a
-.section slf2b
-.section slf3a
-.section slf3b
-.section slf4a
-.section slf4b
-.section slf5a
-.section slf5b
-.section slf6a
-.section slf6b
-.section slf7a
-.section slf7b
-.section slf8a
-.section slf8b
-.section slf9a
-.section slf9b
-.section slf0a
-.section slf0b
-.section slgaa
-.section slgab
-.section slgba
-.section slgbb
-.section slgca
-.section slgcb
-.section slgda
-.section slgdb
-.section slgea
-.section slgeb
-.section slgfa
-.section slgfb
-.section slgga
-.section slggb
-.section slgha
-.section slghb
-.section slgia
-.section slgib
-.section slgja
-.section slgjb
-.section slgka
-.section slgkb
-.section slgla
-.section slglb
-.section slgma
-.section slgmb
-.section slgna
-.section slgnb
-.section slgoa
-.section slgob
-.section slgpa
-.section slgpb
-.section slgqa
-.section slgqb
-.section slgra
-.section slgrb
-.section slgsa
-.section slgsb
-.section slgta
-.section slgtb
-.section slgua
-.section slgub
-.section slgva
-.section slgvb
-.section slgwa
-.section slgwb
-.section slgxa
-.section slgxb
-.section slgya
-.section slgyb
-.section slgza
-.section slgzb
-.section slg1a
-.section slg1b
-.section slg2a
-.section slg2b
-.section slg3a
-.section slg3b
-.section slg4a
-.section slg4b
-.section slg5a
-.section slg5b
-.section slg6a
-.section slg6b
-.section slg7a
-.section slg7b
-.section slg8a
-.section slg8b
-.section slg9a
-.section slg9b
-.section slg0a
-.section slg0b
-.section slhaa
-.section slhab
-.section slhba
-.section slhbb
-.section slhca
-.section slhcb
-.section slhda
-.section slhdb
-.section slhea
-.section slheb
-.section slhfa
-.section slhfb
-.section slhga
-.section slhgb
-.section slhha
-.section slhhb
-.section slhia
-.section slhib
-.section slhja
-.section slhjb
-.section slhka
-.section slhkb
-.section slhla
-.section slhlb
-.section slhma
-.section slhmb
-.section slhna
-.section slhnb
-.section slhoa
-.section slhob
-.section slhpa
-.section slhpb
-.section slhqa
-.section slhqb
-.section slhra
-.section slhrb
-.section slhsa
-.section slhsb
-.section slhta
-.section slhtb
-.section slhua
-.section slhub
-.section slhva
-.section slhvb
-.section slhwa
-.section slhwb
-.section slhxa
-.section slhxb
-.section slhya
-.section slhyb
-.section slhza
-.section slhzb
-.section slh1a
-.section slh1b
-.section slh2a
-.section slh2b
-.section slh3a
-.section slh3b
-.section slh4a
-.section slh4b
-.section slh5a
-.section slh5b
-.section slh6a
-.section slh6b
-.section slh7a
-.section slh7b
-.section slh8a
-.section slh8b
-.section slh9a
-.section slh9b
-.section slh0a
-.section slh0b
-.section sliaa
-.section sliab
-.section sliba
-.section slibb
-.section slica
-.section slicb
-.section slida
-.section slidb
-.section sliea
-.section slieb
-.section slifa
-.section slifb
-.section sliga
-.section sligb
-.section sliha
-.section slihb
-.section sliia
-.section sliib
-.section slija
-.section slijb
-.section slika
-.section slikb
-.section slila
-.section slilb
-.section slima
-.section slimb
-.section slina
-.section slinb
-.section slioa
-.section sliob
-.section slipa
-.section slipb
-.section sliqa
-.section sliqb
-.section slira
-.section slirb
-.section slisa
-.section slisb
-.section slita
-.section slitb
-.section sliua
-.section sliub
-.section sliva
-.section slivb
-.section sliwa
-.section sliwb
-.section slixa
-.section slixb
-.section sliya
-.section sliyb
-.section sliza
-.section slizb
-.section sli1a
-.section sli1b
-.section sli2a
-.section sli2b
-.section sli3a
-.section sli3b
-.section sli4a
-.section sli4b
-.section sli5a
-.section sli5b
-.section sli6a
-.section sli6b
-.section sli7a
-.section sli7b
-.section sli8a
-.section sli8b
-.section sli9a
-.section sli9b
-.section sli0a
-.section sli0b
-.section sljaa
-.section sljab
-.section sljba
-.section sljbb
-.section sljca
-.section sljcb
-.section sljda
-.section sljdb
-.section sljea
-.section sljeb
-.section sljfa
-.section sljfb
-.section sljga
-.section sljgb
-.section sljha
-.section sljhb
-.section sljia
-.section sljib
-.section sljja
-.section sljjb
-.section sljka
-.section sljkb
-.section sljla
-.section sljlb
-.section sljma
-.section sljmb
-.section sljna
-.section sljnb
-.section sljoa
-.section sljob
-.section sljpa
-.section sljpb
-.section sljqa
-.section sljqb
-.section sljra
-.section sljrb
-.section sljsa
-.section sljsb
-.section sljta
-.section sljtb
-.section sljua
-.section sljub
-.section sljva
-.section sljvb
-.section sljwa
-.section sljwb
-.section sljxa
-.section sljxb
-.section sljya
-.section sljyb
-.section sljza
-.section sljzb
-.section slj1a
-.section slj1b
-.section slj2a
-.section slj2b
-.section slj3a
-.section slj3b
-.section slj4a
-.section slj4b
-.section slj5a
-.section slj5b
-.section slj6a
-.section slj6b
-.section slj7a
-.section slj7b
-.section slj8a
-.section slj8b
-.section slj9a
-.section slj9b
-.section slj0a
-.section slj0b
-.section slkaa
-.section slkab
-.section slkba
-.section slkbb
-.section slkca
-.section slkcb
-.section slkda
-.section slkdb
-.section slkea
-.section slkeb
-.section slkfa
-.section slkfb
-.section slkga
-.section slkgb
-.section slkha
-.section slkhb
-.section slkia
-.section slkib
-.section slkja
-.section slkjb
-.section slkka
-.section slkkb
-.section slkla
-.section slklb
-.section slkma
-.section slkmb
-.section slkna
-.section slknb
-.section slkoa
-.section slkob
-.section slkpa
-.section slkpb
-.section slkqa
-.section slkqb
-.section slkra
-.section slkrb
-.section slksa
-.section slksb
-.section slkta
-.section slktb
-.section slkua
-.section slkub
-.section slkva
-.section slkvb
-.section slkwa
-.section slkwb
-.section slkxa
-.section slkxb
-.section slkya
-.section slkyb
-.section slkza
-.section slkzb
-.section slk1a
-.section slk1b
-.section slk2a
-.section slk2b
-.section slk3a
-.section slk3b
-.section slk4a
-.section slk4b
-.section slk5a
-.section slk5b
-.section slk6a
-.section slk6b
-.section slk7a
-.section slk7b
-.section slk8a
-.section slk8b
-.section slk9a
-.section slk9b
-.section slk0a
-.section slk0b
-.section sllaa
-.section sllab
-.section sllba
-.section sllbb
-.section sllca
-.section sllcb
-.section sllda
-.section slldb
-.section sllea
-.section slleb
-.section sllfa
-.section sllfb
-.section sllga
-.section sllgb
-.section sllha
-.section sllhb
-.section sllia
-.section sllib
-.section sllja
-.section slljb
-.section sllka
-.section sllkb
-.section sllla
-.section slllb
-.section sllma
-.section sllmb
-.section sllna
-.section sllnb
-.section slloa
-.section sllob
-.section sllpa
-.section sllpb
-.section sllqa
-.section sllqb
-.section sllra
-.section sllrb
-.section sllsa
-.section sllsb
-.section sllta
-.section slltb
-.section sllua
-.section sllub
-.section sllva
-.section sllvb
-.section sllwa
-.section sllwb
-.section sllxa
-.section sllxb
-.section sllya
-.section sllyb
-.section sllza
-.section sllzb
-.section sll1a
-.section sll1b
-.section sll2a
-.section sll2b
-.section sll3a
-.section sll3b
-.section sll4a
-.section sll4b
-.section sll5a
-.section sll5b
-.section sll6a
-.section sll6b
-.section sll7a
-.section sll7b
-.section sll8a
-.section sll8b
-.section sll9a
-.section sll9b
-.section sll0a
-.section sll0b
-.section slmaa
-.section slmab
-.section slmba
-.section slmbb
-.section slmca
-.section slmcb
-.section slmda
-.section slmdb
-.section slmea
-.section slmeb
-.section slmfa
-.section slmfb
-.section slmga
-.section slmgb
-.section slmha
-.section slmhb
-.section slmia
-.section slmib
-.section slmja
-.section slmjb
-.section slmka
-.section slmkb
-.section slmla
-.section slmlb
-.section slmma
-.section slmmb
-.section slmna
-.section slmnb
-.section slmoa
-.section slmob
-.section slmpa
-.section slmpb
-.section slmqa
-.section slmqb
-.section slmra
-.section slmrb
-.section slmsa
-.section slmsb
-.section slmta
-.section slmtb
-.section slmua
-.section slmub
-.section slmva
-.section slmvb
-.section slmwa
-.section slmwb
-.section slmxa
-.section slmxb
-.section slmya
-.section slmyb
-.section slmza
-.section slmzb
-.section slm1a
-.section slm1b
-.section slm2a
-.section slm2b
-.section slm3a
-.section slm3b
-.section slm4a
-.section slm4b
-.section slm5a
-.section slm5b
-.section slm6a
-.section slm6b
-.section slm7a
-.section slm7b
-.section slm8a
-.section slm8b
-.section slm9a
-.section slm9b
-.section slm0a
-.section slm0b
-.section slnaa
-.section slnab
-.section slnba
-.section slnbb
-.section slnca
-.section slncb
-.section slnda
-.section slndb
-.section slnea
-.section slneb
-.section slnfa
-.section slnfb
-.section slnga
-.section slngb
-.section slnha
-.section slnhb
-.section slnia
-.section slnib
-.section slnja
-.section slnjb
-.section slnka
-.section slnkb
-.section slnla
-.section slnlb
-.section slnma
-.section slnmb
-.section slnna
-.section slnnb
-.section slnoa
-.section slnob
-.section slnpa
-.section slnpb
-.section slnqa
-.section slnqb
-.section slnra
-.section slnrb
-.section slnsa
-.section slnsb
-.section slnta
-.section slntb
-.section slnua
-.section slnub
-.section slnva
-.section slnvb
-.section slnwa
-.section slnwb
-.section slnxa
-.section slnxb
-.section slnya
-.section slnyb
-.section slnza
-.section slnzb
-.section sln1a
-.section sln1b
-.section sln2a
-.section sln2b
-.section sln3a
-.section sln3b
-.section sln4a
-.section sln4b
-.section sln5a
-.section sln5b
-.section sln6a
-.section sln6b
-.section sln7a
-.section sln7b
-.section sln8a
-.section sln8b
-.section sln9a
-.section sln9b
-.section sln0a
-.section sln0b
-.section sloaa
-.section sloab
-.section sloba
-.section slobb
-.section sloca
-.section slocb
-.section sloda
-.section slodb
-.section sloea
-.section sloeb
-.section slofa
-.section slofb
-.section sloga
-.section slogb
-.section sloha
-.section slohb
-.section sloia
-.section sloib
-.section sloja
-.section slojb
-.section sloka
-.section slokb
-.section slola
-.section slolb
-.section sloma
-.section slomb
-.section slona
-.section slonb
-.section slooa
-.section sloob
-.section slopa
-.section slopb
-.section sloqa
-.section sloqb
-.section slora
-.section slorb
-.section slosa
-.section slosb
-.section slota
-.section slotb
-.section sloua
-.section sloub
-.section slova
-.section slovb
-.section slowa
-.section slowb
-.section sloxa
-.section sloxb
-.section sloya
-.section sloyb
-.section sloza
-.section slozb
-.section slo1a
-.section slo1b
-.section slo2a
-.section slo2b
-.section slo3a
-.section slo3b
-.section slo4a
-.section slo4b
-.section slo5a
-.section slo5b
-.section slo6a
-.section slo6b
-.section slo7a
-.section slo7b
-.section slo8a
-.section slo8b
-.section slo9a
-.section slo9b
-.section slo0a
-.section slo0b
-.section slpaa
-.section slpab
-.section slpba
-.section slpbb
-.section slpca
-.section slpcb
-.section slpda
-.section slpdb
-.section slpea
-.section slpeb
-.section slpfa
-.section slpfb
-.section slpga
-.section slpgb
-.section slpha
-.section slphb
-.section slpia
-.section slpib
-.section slpja
-.section slpjb
-.section slpka
-.section slpkb
-.section slpla
-.section slplb
-.section slpma
-.section slpmb
-.section slpna
-.section slpnb
-.section slpoa
-.section slpob
-.section slppa
-.section slppb
-.section slpqa
-.section slpqb
-.section slpra
-.section slprb
-.section slpsa
-.section slpsb
-.section slpta
-.section slptb
-.section slpua
-.section slpub
-.section slpva
-.section slpvb
-.section slpwa
-.section slpwb
-.section slpxa
-.section slpxb
-.section slpya
-.section slpyb
-.section slpza
-.section slpzb
-.section slp1a
-.section slp1b
-.section slp2a
-.section slp2b
-.section slp3a
-.section slp3b
-.section slp4a
-.section slp4b
-.section slp5a
-.section slp5b
-.section slp6a
-.section slp6b
-.section slp7a
-.section slp7b
-.section slp8a
-.section slp8b
-.section slp9a
-.section slp9b
-.section slp0a
-.section slp0b
-.section slqaa
-.section slqab
-.section slqba
-.section slqbb
-.section slqca
-.section slqcb
-.section slqda
-.section slqdb
-.section slqea
-.section slqeb
-.section slqfa
-.section slqfb
-.section slqga
-.section slqgb
-.section slqha
-.section slqhb
-.section slqia
-.section slqib
-.section slqja
-.section slqjb
-.section slqka
-.section slqkb
-.section slqla
-.section slqlb
-.section slqma
-.section slqmb
-.section slqna
-.section slqnb
-.section slqoa
-.section slqob
-.section slqpa
-.section slqpb
-.section slqqa
-.section slqqb
-.section slqra
-.section slqrb
-.section slqsa
-.section slqsb
-.section slqta
-.section slqtb
-.section slqua
-.section slqub
-.section slqva
-.section slqvb
-.section slqwa
-.section slqwb
-.section slqxa
-.section slqxb
-.section slqya
-.section slqyb
-.section slqza
-.section slqzb
-.section slq1a
-.section slq1b
-.section slq2a
-.section slq2b
-.section slq3a
-.section slq3b
-.section slq4a
-.section slq4b
-.section slq5a
-.section slq5b
-.section slq6a
-.section slq6b
-.section slq7a
-.section slq7b
-.section slq8a
-.section slq8b
-.section slq9a
-.section slq9b
-.section slq0a
-.section slq0b
-.section slraa
-.section slrab
-.section slrba
-.section slrbb
-.section slrca
-.section slrcb
-.section slrda
-.section slrdb
-.section slrea
-.section slreb
-.section slrfa
-.section slrfb
-.section slrga
-.section slrgb
-.section slrha
-.section slrhb
-.section slria
-.section slrib
-.section slrja
-.section slrjb
-.section slrka
-.section slrkb
-.section slrla
-.section slrlb
-.section slrma
-.section slrmb
-.section slrna
-.section slrnb
-.section slroa
-.section slrob
-.section slrpa
-.section slrpb
-.section slrqa
-.section slrqb
-.section slrra
-.section slrrb
-.section slrsa
-.section slrsb
-.section slrta
-.section slrtb
-.section slrua
-.section slrub
-.section slrva
-.section slrvb
-.section slrwa
-.section slrwb
-.section slrxa
-.section slrxb
-.section slrya
-.section slryb
-.section slrza
-.section slrzb
-.section slr1a
-.section slr1b
-.section slr2a
-.section slr2b
-.section slr3a
-.section slr3b
-.section slr4a
-.section slr4b
-.section slr5a
-.section slr5b
-.section slr6a
-.section slr6b
-.section slr7a
-.section slr7b
-.section slr8a
-.section slr8b
-.section slr9a
-.section slr9b
-.section slr0a
-.section slr0b
-.section slsaa
-.section slsab
-.section slsba
-.section slsbb
-.section slsca
-.section slscb
-.section slsda
-.section slsdb
-.section slsea
-.section slseb
-.section slsfa
-.section slsfb
-.section slsga
-.section slsgb
-.section slsha
-.section slshb
-.section slsia
-.section slsib
-.section slsja
-.section slsjb
-.section slska
-.section slskb
-.section slsla
-.section slslb
-.section slsma
-.section slsmb
-.section slsna
-.section slsnb
-.section slsoa
-.section slsob
-.section slspa
-.section slspb
-.section slsqa
-.section slsqb
-.section slsra
-.section slsrb
-.section slssa
-.section slssb
-.section slsta
-.section slstb
-.section slsua
-.section slsub
-.section slsva
-.section slsvb
-.section slswa
-.section slswb
-.section slsxa
-.section slsxb
-.section slsya
-.section slsyb
-.section slsza
-.section slszb
-.section sls1a
-.section sls1b
-.section sls2a
-.section sls2b
-.section sls3a
-.section sls3b
-.section sls4a
-.section sls4b
-.section sls5a
-.section sls5b
-.section sls6a
-.section sls6b
-.section sls7a
-.section sls7b
-.section sls8a
-.section sls8b
-.section sls9a
-.section sls9b
-.section sls0a
-.section sls0b
-.section sltaa
-.section sltab
-.section sltba
-.section sltbb
-.section sltca
-.section sltcb
-.section sltda
-.section sltdb
-.section sltea
-.section slteb
-.section sltfa
-.section sltfb
-.section sltga
-.section sltgb
-.section sltha
-.section slthb
-.section sltia
-.section sltib
-.section sltja
-.section sltjb
-.section sltka
-.section sltkb
-.section sltla
-.section sltlb
-.section sltma
-.section sltmb
-.section sltna
-.section sltnb
-.section sltoa
-.section sltob
-.section sltpa
-.section sltpb
-.section sltqa
-.section sltqb
-.section sltra
-.section sltrb
-.section sltsa
-.section sltsb
-.section sltta
-.section slttb
-.section sltua
-.section sltub
-.section sltva
-.section sltvb
-.section sltwa
-.section sltwb
-.section sltxa
-.section sltxb
-.section sltya
-.section sltyb
-.section sltza
-.section sltzb
-.section slt1a
-.section slt1b
-.section slt2a
-.section slt2b
-.section slt3a
-.section slt3b
-.section slt4a
-.section slt4b
-.section slt5a
-.section slt5b
-.section slt6a
-.section slt6b
-.section slt7a
-.section slt7b
-.section slt8a
-.section slt8b
-.section slt9a
-.section slt9b
-.section slt0a
-.section slt0b
-.section sluaa
-.section sluab
-.section sluba
-.section slubb
-.section sluca
-.section slucb
-.section sluda
-.section sludb
-.section sluea
-.section slueb
-.section slufa
-.section slufb
-.section sluga
-.section slugb
-.section sluha
-.section sluhb
-.section sluia
-.section sluib
-.section sluja
-.section slujb
-.section sluka
-.section slukb
-.section slula
-.section slulb
-.section sluma
-.section slumb
-.section sluna
-.section slunb
-.section sluoa
-.section sluob
-.section slupa
-.section slupb
-.section sluqa
-.section sluqb
-.section slura
-.section slurb
-.section slusa
-.section slusb
-.section sluta
-.section slutb
-.section sluua
-.section sluub
-.section sluva
-.section sluvb
-.section sluwa
-.section sluwb
-.section sluxa
-.section sluxb
-.section sluya
-.section sluyb
-.section sluza
-.section sluzb
-.section slu1a
-.section slu1b
-.section slu2a
-.section slu2b
-.section slu3a
-.section slu3b
-.section slu4a
-.section slu4b
-.section slu5a
-.section slu5b
-.section slu6a
-.section slu6b
-.section slu7a
-.section slu7b
-.section slu8a
-.section slu8b
-.section slu9a
-.section slu9b
-.section slu0a
-.section slu0b
-.section slvaa
-.section slvab
-.section slvba
-.section slvbb
-.section slvca
-.section slvcb
-.section slvda
-.section slvdb
-.section slvea
-.section slveb
-.section slvfa
-.section slvfb
-.section slvga
-.section slvgb
-.section slvha
-.section slvhb
-.section slvia
-.section slvib
-.section slvja
-.section slvjb
-.section slvka
-.section slvkb
-.section slvla
-.section slvlb
-.section slvma
-.section slvmb
-.section slvna
-.section slvnb
-.section slvoa
-.section slvob
-.section slvpa
-.section slvpb
-.section slvqa
-.section slvqb
-.section slvra
-.section slvrb
-.section slvsa
-.section slvsb
-.section slvta
-.section slvtb
-.section slvua
-.section slvub
-.section slvva
-.section slvvb
-.section slvwa
-.section slvwb
-.section slvxa
-.section slvxb
-.section slvya
-.section slvyb
-.section slvza
-.section slvzb
-.section slv1a
-.section slv1b
-.section slv2a
-.section slv2b
-.section slv3a
-.section slv3b
-.section slv4a
-.section slv4b
-.section slv5a
-.section slv5b
-.section slv6a
-.section slv6b
-.section slv7a
-.section slv7b
-.section slv8a
-.section slv8b
-.section slv9a
-.section slv9b
-.section slv0a
-.section slv0b
-.section slwaa
-.section slwab
-.section slwba
-.section slwbb
-.section slwca
-.section slwcb
-.section slwda
-.section slwdb
-.section slwea
-.section slweb
-.section slwfa
-.section slwfb
-.section slwga
-.section slwgb
-.section slwha
-.section slwhb
-.section slwia
-.section slwib
-.section slwja
-.section slwjb
-.section slwka
-.section slwkb
-.section slwla
-.section slwlb
-.section slwma
-.section slwmb
-.section slwna
-.section slwnb
-.section slwoa
-.section slwob
-.section slwpa
-.section slwpb
-.section slwqa
-.section slwqb
-.section slwra
-.section slwrb
-.section slwsa
-.section slwsb
-.section slwta
-.section slwtb
-.section slwua
-.section slwub
-.section slwva
-.section slwvb
-.section slwwa
-.section slwwb
-.section slwxa
-.section slwxb
-.section slwya
-.section slwyb
-.section slwza
-.section slwzb
-.section slw1a
-.section slw1b
-.section slw2a
-.section slw2b
-.section slw3a
-.section slw3b
-.section slw4a
-.section slw4b
-.section slw5a
-.section slw5b
-.section slw6a
-.section slw6b
-.section slw7a
-.section slw7b
-.section slw8a
-.section slw8b
-.section slw9a
-.section slw9b
-.section slw0a
-.section slw0b
-.section slxaa
-.section slxab
-.section slxba
-.section slxbb
-.section slxca
-.section slxcb
-.section slxda
-.section slxdb
-.section slxea
-.section slxeb
-.section slxfa
-.section slxfb
-.section slxga
-.section slxgb
-.section slxha
-.section slxhb
-.section slxia
-.section slxib
-.section slxja
-.section slxjb
-.section slxka
-.section slxkb
-.section slxla
-.section slxlb
-.section slxma
-.section slxmb
-.section slxna
-.section slxnb
-.section slxoa
-.section slxob
-.section slxpa
-.section slxpb
-.section slxqa
-.section slxqb
-.section slxra
-.section slxrb
-.section slxsa
-.section slxsb
-.section slxta
-.section slxtb
-.section slxua
-.section slxub
-.section slxva
-.section slxvb
-.section slxwa
-.section slxwb
-.section slxxa
-.section slxxb
-.section slxya
-.section slxyb
-.section slxza
-.section slxzb
-.section slx1a
-.section slx1b
-.section slx2a
-.section slx2b
-.section slx3a
-.section slx3b
-.section slx4a
-.section slx4b
-.section slx5a
-.section slx5b
-.section slx6a
-.section slx6b
-.section slx7a
-.section slx7b
-.section slx8a
-.section slx8b
-.section slx9a
-.section slx9b
-.section slx0a
-.section slx0b
-.section slyaa
-.section slyab
-.section slyba
-.section slybb
-.section slyca
-.section slycb
-.section slyda
-.section slydb
-.section slyea
-.section slyeb
-.section slyfa
-.section slyfb
-.section slyga
-.section slygb
-.section slyha
-.section slyhb
-.section slyia
-.section slyib
-.section slyja
-.section slyjb
-.section slyka
-.section slykb
-.section slyla
-.section slylb
-.section slyma
-.section slymb
-.section slyna
-.section slynb
-.section slyoa
-.section slyob
-.section slypa
-.section slypb
-.section slyqa
-.section slyqb
-.section slyra
-.section slyrb
-.section slysa
-.section slysb
-.section slyta
-.section slytb
-.section slyua
-.section slyub
-.section slyva
-.section slyvb
-.section slywa
-.section slywb
-.section slyxa
-.section slyxb
-.section slyya
-.section slyyb
-.section slyza
-.section slyzb
-.section sly1a
-.section sly1b
-.section sly2a
-.section sly2b
-.section sly3a
-.section sly3b
-.section sly4a
-.section sly4b
-.section sly5a
-.section sly5b
-.section sly6a
-.section sly6b
-.section sly7a
-.section sly7b
-.section sly8a
-.section sly8b
-.section sly9a
-.section sly9b
-.section sly0a
-.section sly0b
-.section slzaa
-.section slzab
-.section slzba
-.section slzbb
-.section slzca
-.section slzcb
-.section slzda
-.section slzdb
-.section slzea
-.section slzeb
-.section slzfa
-.section slzfb
-.section slzga
-.section slzgb
-.section slzha
-.section slzhb
-.section slzia
-.section slzib
-.section slzja
-.section slzjb
-.section slzka
-.section slzkb
-.section slzla
-.section slzlb
-.section slzma
-.section slzmb
-.section slzna
-.section slznb
-.section slzoa
-.section slzob
-.section slzpa
-.section slzpb
-.section slzqa
-.section slzqb
-.section slzra
-.section slzrb
-.section slzsa
-.section slzsb
-.section slzta
-.section slztb
-.section slzua
-.section slzub
-.section slzva
-.section slzvb
-.section slzwa
-.section slzwb
-.section slzxa
-.section slzxb
-.section slzya
-.section slzyb
-.section slzza
-.section slzzb
-.section slz1a
-.section slz1b
-.section slz2a
-.section slz2b
-.section slz3a
-.section slz3b
-.section slz4a
-.section slz4b
-.section slz5a
-.section slz5b
-.section slz6a
-.section slz6b
-.section slz7a
-.section slz7b
-.section slz8a
-.section slz8b
-.section slz9a
-.section slz9b
-.section slz0a
-.section slz0b
-.section sl1aa
-.section sl1ab
-.section sl1ba
-.section sl1bb
-.section sl1ca
-.section sl1cb
-.section sl1da
-.section sl1db
-.section sl1ea
-.section sl1eb
-.section sl1fa
-.section sl1fb
-.section sl1ga
-.section sl1gb
-.section sl1ha
-.section sl1hb
-.section sl1ia
-.section sl1ib
-.section sl1ja
-.section sl1jb
-.section sl1ka
-.section sl1kb
-.section sl1la
-.section sl1lb
-.section sl1ma
-.section sl1mb
-.section sl1na
-.section sl1nb
-.section sl1oa
-.section sl1ob
-.section sl1pa
-.section sl1pb
-.section sl1qa
-.section sl1qb
-.section sl1ra
-.section sl1rb
-.section sl1sa
-.section sl1sb
-.section sl1ta
-.section sl1tb
-.section sl1ua
-.section sl1ub
-.section sl1va
-.section sl1vb
-.section sl1wa
-.section sl1wb
-.section sl1xa
-.section sl1xb
-.section sl1ya
-.section sl1yb
-.section sl1za
-.section sl1zb
-.section sl11a
-.section sl11b
-.section sl12a
-.section sl12b
-.section sl13a
-.section sl13b
-.section sl14a
-.section sl14b
-.section sl15a
-.section sl15b
-.section sl16a
-.section sl16b
-.section sl17a
-.section sl17b
-.section sl18a
-.section sl18b
-.section sl19a
-.section sl19b
-.section sl10a
-.section sl10b
-.section sl2aa
-.section sl2ab
-.section sl2ba
-.section sl2bb
-.section sl2ca
-.section sl2cb
-.section sl2da
-.section sl2db
-.section sl2ea
-.section sl2eb
-.section sl2fa
-.section sl2fb
-.section sl2ga
-.section sl2gb
-.section sl2ha
-.section sl2hb
-.section sl2ia
-.section sl2ib
-.section sl2ja
-.section sl2jb
-.section sl2ka
-.section sl2kb
-.section sl2la
-.section sl2lb
-.section sl2ma
-.section sl2mb
-.section sl2na
-.section sl2nb
-.section sl2oa
-.section sl2ob
-.section sl2pa
-.section sl2pb
-.section sl2qa
-.section sl2qb
-.section sl2ra
-.section sl2rb
-.section sl2sa
-.section sl2sb
-.section sl2ta
-.section sl2tb
-.section sl2ua
-.section sl2ub
-.section sl2va
-.section sl2vb
-.section sl2wa
-.section sl2wb
-.section sl2xa
-.section sl2xb
-.section sl2ya
-.section sl2yb
-.section sl2za
-.section sl2zb
-.section sl21a
-.section sl21b
-.section sl22a
-.section sl22b
-.section sl23a
-.section sl23b
-.section sl24a
-.section sl24b
-.section sl25a
-.section sl25b
-.section sl26a
-.section sl26b
-.section sl27a
-.section sl27b
-.section sl28a
-.section sl28b
-.section sl29a
-.section sl29b
-.section sl20a
-.section sl20b
-.section sl3aa
-.section sl3ab
-.section sl3ba
-.section sl3bb
-.section sl3ca
-.section sl3cb
-.section sl3da
-.section sl3db
-.section sl3ea
-.section sl3eb
-.section sl3fa
-.section sl3fb
-.section sl3ga
-.section sl3gb
-.section sl3ha
-.section sl3hb
-.section sl3ia
-.section sl3ib
-.section sl3ja
-.section sl3jb
-.section sl3ka
-.section sl3kb
-.section sl3la
-.section sl3lb
-.section sl3ma
-.section sl3mb
-.section sl3na
-.section sl3nb
-.section sl3oa
-.section sl3ob
-.section sl3pa
-.section sl3pb
-.section sl3qa
-.section sl3qb
-.section sl3ra
-.section sl3rb
-.section sl3sa
-.section sl3sb
-.section sl3ta
-.section sl3tb
-.section sl3ua
-.section sl3ub
-.section sl3va
-.section sl3vb
-.section sl3wa
-.section sl3wb
-.section sl3xa
-.section sl3xb
-.section sl3ya
-.section sl3yb
-.section sl3za
-.section sl3zb
-.section sl31a
-.section sl31b
-.section sl32a
-.section sl32b
-.section sl33a
-.section sl33b
-.section sl34a
-.section sl34b
-.section sl35a
-.section sl35b
-.section sl36a
-.section sl36b
-.section sl37a
-.section sl37b
-.section sl38a
-.section sl38b
-.section sl39a
-.section sl39b
-.section sl30a
-.section sl30b
-.section sl4aa
-.section sl4ab
-.section sl4ba
-.section sl4bb
-.section sl4ca
-.section sl4cb
-.section sl4da
-.section sl4db
-.section sl4ea
-.section sl4eb
-.section sl4fa
-.section sl4fb
-.section sl4ga
-.section sl4gb
-.section sl4ha
-.section sl4hb
-.section sl4ia
-.section sl4ib
-.section sl4ja
-.section sl4jb
-.section sl4ka
-.section sl4kb
-.section sl4la
-.section sl4lb
-.section sl4ma
-.section sl4mb
-.section sl4na
-.section sl4nb
-.section sl4oa
-.section sl4ob
-.section sl4pa
-.section sl4pb
-.section sl4qa
-.section sl4qb
-.section sl4ra
-.section sl4rb
-.section sl4sa
-.section sl4sb
-.section sl4ta
-.section sl4tb
-.section sl4ua
-.section sl4ub
-.section sl4va
-.section sl4vb
-.section sl4wa
-.section sl4wb
-.section sl4xa
-.section sl4xb
-.section sl4ya
-.section sl4yb
-.section sl4za
-.section sl4zb
-.section sl41a
-.section sl41b
-.section sl42a
-.section sl42b
-.section sl43a
-.section sl43b
-.section sl44a
-.section sl44b
-.section sl45a
-.section sl45b
-.section sl46a
-.section sl46b
-.section sl47a
-.section sl47b
-.section sl48a
-.section sl48b
-.section sl49a
-.section sl49b
-.section sl40a
-.section sl40b
-.section sl5aa
-.section sl5ab
-.section sl5ba
-.section sl5bb
-.section sl5ca
-.section sl5cb
-.section sl5da
-.section sl5db
-.section sl5ea
-.section sl5eb
-.section sl5fa
-.section sl5fb
-.section sl5ga
-.section sl5gb
-.section sl5ha
-.section sl5hb
-.section sl5ia
-.section sl5ib
-.section sl5ja
-.section sl5jb
-.section sl5ka
-.section sl5kb
-.section sl5la
-.section sl5lb
-.section sl5ma
-.section sl5mb
-.section sl5na
-.section sl5nb
-.section sl5oa
-.section sl5ob
-.section sl5pa
-.section sl5pb
-.section sl5qa
-.section sl5qb
-.section sl5ra
-.section sl5rb
-.section sl5sa
-.section sl5sb
-.section sl5ta
-.section sl5tb
-.section sl5ua
-.section sl5ub
-.section sl5va
-.section sl5vb
-.section sl5wa
-.section sl5wb
-.section sl5xa
-.section sl5xb
-.section sl5ya
-.section sl5yb
-.section sl5za
-.section sl5zb
-.section sl51a
-.section sl51b
-.section sl52a
-.section sl52b
-.section sl53a
-.section sl53b
-.section sl54a
-.section sl54b
-.section sl55a
-.section sl55b
-.section sl56a
-.section sl56b
-.section sl57a
-.section sl57b
-.section sl58a
-.section sl58b
-.section sl59a
-.section sl59b
-.section sl50a
-.section sl50b
-.section sl6aa
-.section sl6ab
-.section sl6ba
-.section sl6bb
-.section sl6ca
-.section sl6cb
-.section sl6da
-.section sl6db
-.section sl6ea
-.section sl6eb
-.section sl6fa
-.section sl6fb
-.section sl6ga
-.section sl6gb
-.section sl6ha
-.section sl6hb
-.section sl6ia
-.section sl6ib
-.section sl6ja
-.section sl6jb
-.section sl6ka
-.section sl6kb
-.section sl6la
-.section sl6lb
-.section sl6ma
-.section sl6mb
-.section sl6na
-.section sl6nb
-.section sl6oa
-.section sl6ob
-.section sl6pa
-.section sl6pb
-.section sl6qa
-.section sl6qb
-.section sl6ra
-.section sl6rb
-.section sl6sa
-.section sl6sb
-.section sl6ta
-.section sl6tb
-.section sl6ua
-.section sl6ub
-.section sl6va
-.section sl6vb
-.section sl6wa
-.section sl6wb
-.section sl6xa
-.section sl6xb
-.section sl6ya
-.section sl6yb
-.section sl6za
-.section sl6zb
-.section sl61a
-.section sl61b
-.section sl62a
-.section sl62b
-.section sl63a
-.section sl63b
-.section sl64a
-.section sl64b
-.section sl65a
-.section sl65b
-.section sl66a
-.section sl66b
-.section sl67a
-.section sl67b
-.section sl68a
-.section sl68b
-.section sl69a
-.section sl69b
-.section sl60a
-.section sl60b
-.section sl7aa
-.section sl7ab
-.section sl7ba
-.section sl7bb
-.section sl7ca
-.section sl7cb
-.section sl7da
-.section sl7db
-.section sl7ea
-.section sl7eb
-.section sl7fa
-.section sl7fb
-.section sl7ga
-.section sl7gb
-.section sl7ha
-.section sl7hb
-.section sl7ia
-.section sl7ib
-.section sl7ja
-.section sl7jb
-.section sl7ka
-.section sl7kb
-.section sl7la
-.section sl7lb
-.section sl7ma
-.section sl7mb
-.section sl7na
-.section sl7nb
-.section sl7oa
-.section sl7ob
-.section sl7pa
-.section sl7pb
-.section sl7qa
-.section sl7qb
-.section sl7ra
-.section sl7rb
-.section sl7sa
-.section sl7sb
-.section sl7ta
-.section sl7tb
-.section sl7ua
-.section sl7ub
-.section sl7va
-.section sl7vb
-.section sl7wa
-.section sl7wb
-.section sl7xa
-.section sl7xb
-.section sl7ya
-.section sl7yb
-.section sl7za
-.section sl7zb
-.section sl71a
-.section sl71b
-.section sl72a
-.section sl72b
-.section sl73a
-.section sl73b
-.section sl74a
-.section sl74b
-.section sl75a
-.section sl75b
-.section sl76a
-.section sl76b
-.section sl77a
-.section sl77b
-.section sl78a
-.section sl78b
-.section sl79a
-.section sl79b
-.section sl70a
-.section sl70b
-.section sl8aa
-.section sl8ab
-.section sl8ba
-.section sl8bb
-.section sl8ca
-.section sl8cb
-.section sl8da
-.section sl8db
-.section sl8ea
-.section sl8eb
-.section sl8fa
-.section sl8fb
-.section sl8ga
-.section sl8gb
-.section sl8ha
-.section sl8hb
-.section sl8ia
-.section sl8ib
-.section sl8ja
-.section sl8jb
-.section sl8ka
-.section sl8kb
-.section sl8la
-.section sl8lb
-.section sl8ma
-.section sl8mb
-.section sl8na
-.section sl8nb
-.section sl8oa
-.section sl8ob
-.section sl8pa
-.section sl8pb
-.section sl8qa
-.section sl8qb
-.section sl8ra
-.section sl8rb
-.section sl8sa
-.section sl8sb
-.section sl8ta
-.section sl8tb
-.section sl8ua
-.section sl8ub
-.section sl8va
-.section sl8vb
-.section sl8wa
-.section sl8wb
-.section sl8xa
-.section sl8xb
-.section sl8ya
-.section sl8yb
-.section sl8za
-.section sl8zb
-.section sl81a
-.section sl81b
-.section sl82a
-.section sl82b
-.section sl83a
-.section sl83b
-.section sl84a
-.section sl84b
-.section sl85a
-.section sl85b
-.section sl86a
-.section sl86b
-.section sl87a
-.section sl87b
-.section sl88a
-.section sl88b
-.section sl89a
-.section sl89b
-.section sl80a
-.section sl80b
-.section sl9aa
-.section sl9ab
-.section sl9ba
-.section sl9bb
-.section sl9ca
-.section sl9cb
-.section sl9da
-.section sl9db
-.section sl9ea
-.section sl9eb
-.section sl9fa
-.section sl9fb
-.section sl9ga
-.section sl9gb
-.section sl9ha
-.section sl9hb
-.section sl9ia
-.section sl9ib
-.section sl9ja
-.section sl9jb
-.section sl9ka
-.section sl9kb
-.section sl9la
-.section sl9lb
-.section sl9ma
-.section sl9mb
-.section sl9na
-.section sl9nb
-.section sl9oa
-.section sl9ob
-.section sl9pa
-.section sl9pb
-.section sl9qa
-.section sl9qb
-.section sl9ra
-.section sl9rb
-.section sl9sa
-.section sl9sb
-.section sl9ta
-.section sl9tb
-.section sl9ua
-.section sl9ub
-.section sl9va
-.section sl9vb
-.section sl9wa
-.section sl9wb
-.section sl9xa
-.section sl9xb
-.section sl9ya
-.section sl9yb
-.section sl9za
-.section sl9zb
-.section sl91a
-.section sl91b
-.section sl92a
-.section sl92b
-.section sl93a
-.section sl93b
-.section sl94a
-.section sl94b
-.section sl95a
-.section sl95b
-.section sl96a
-.section sl96b
-.section sl97a
-.section sl97b
-.section sl98a
-.section sl98b
-.section sl99a
-.section sl99b
-.section sl90a
-.section sl90b
-.section sl0aa
-.section sl0ab
-.section sl0ba
-.section sl0bb
-.section sl0ca
-.section sl0cb
-.section sl0da
-.section sl0db
-.section sl0ea
-.section sl0eb
-.section sl0fa
-.section sl0fb
-.section sl0ga
-.section sl0gb
-.section sl0ha
-.section sl0hb
-.section sl0ia
-.section sl0ib
-.section sl0ja
-.section sl0jb
-.section sl0ka
-.section sl0kb
-.section sl0la
-.section sl0lb
-.section sl0ma
-.section sl0mb
-.section sl0na
-.section sl0nb
-.section sl0oa
-.section sl0ob
-.section sl0pa
-.section sl0pb
-.section sl0qa
-.section sl0qb
-.section sl0ra
-.section sl0rb
-.section sl0sa
-.section sl0sb
-.section sl0ta
-.section sl0tb
-.section sl0ua
-.section sl0ub
-.section sl0va
-.section sl0vb
-.section sl0wa
-.section sl0wb
-.section sl0xa
-.section sl0xb
-.section sl0ya
-.section sl0yb
-.section sl0za
-.section sl0zb
-.section sl01a
-.section sl01b
-.section sl02a
-.section sl02b
-.section sl03a
-.section sl03b
-.section sl04a
-.section sl04b
-.section sl05a
-.section sl05b
-.section sl06a
-.section sl06b
-.section sl07a
-.section sl07b
-.section sl08a
-.section sl08b
-.section sl09a
-.section sl09b
-.section sl00a
-.section sl00b
-.section smaaa
-.section smaab
-.section smaba
-.section smabb
-.section smaca
-.section smacb
-.section smada
-.section smadb
-.section smaea
-.section smaeb
-.section smafa
-.section smafb
-.section smaga
-.section smagb
-.section smaha
-.section smahb
-.section smaia
-.section smaib
-.section smaja
-.section smajb
-.section smaka
-.section smakb
-.section smala
-.section smalb
-.section smama
-.section smamb
-.section smana
-.section smanb
-.section smaoa
-.section smaob
-.section smapa
-.section smapb
-.section smaqa
-.section smaqb
-.section smara
-.section smarb
-.section smasa
-.section smasb
-.section smata
-.section smatb
-.section smaua
-.section smaub
-.section smava
-.section smavb
-.section smawa
-.section smawb
-.section smaxa
-.section smaxb
-.section smaya
-.section smayb
-.section smaza
-.section smazb
-.section sma1a
-.section sma1b
-.section sma2a
-.section sma2b
-.section sma3a
-.section sma3b
-.section sma4a
-.section sma4b
-.section sma5a
-.section sma5b
-.section sma6a
-.section sma6b
-.section sma7a
-.section sma7b
-.section sma8a
-.section sma8b
-.section sma9a
-.section sma9b
-.section sma0a
-.section sma0b
-.section smbaa
-.section smbab
-.section smbba
-.section smbbb
-.section smbca
-.section smbcb
-.section smbda
-.section smbdb
-.section smbea
-.section smbeb
-.section smbfa
-.section smbfb
-.section smbga
-.section smbgb
-.section smbha
-.section smbhb
-.section smbia
-.section smbib
-.section smbja
-.section smbjb
-.section smbka
-.section smbkb
-.section smbla
-.section smblb
-.section smbma
-.section smbmb
-.section smbna
-.section smbnb
-.section smboa
-.section smbob
-.section smbpa
-.section smbpb
-.section smbqa
-.section smbqb
-.section smbra
-.section smbrb
-.section smbsa
-.section smbsb
-.section smbta
-.section smbtb
-.section smbua
-.section smbub
-.section smbva
-.section smbvb
-.section smbwa
-.section smbwb
-.section smbxa
-.section smbxb
-.section smbya
-.section smbyb
-.section smbza
-.section smbzb
-.section smb1a
-.section smb1b
-.section smb2a
-.section smb2b
-.section smb3a
-.section smb3b
-.section smb4a
-.section smb4b
-.section smb5a
-.section smb5b
-.section smb6a
-.section smb6b
-.section smb7a
-.section smb7b
-.section smb8a
-.section smb8b
-.section smb9a
-.section smb9b
-.section smb0a
-.section smb0b
-.section smcaa
-.section smcab
-.section smcba
-.section smcbb
-.section smcca
-.section smccb
-.section smcda
-.section smcdb
-.section smcea
-.section smceb
-.section smcfa
-.section smcfb
-.section smcga
-.section smcgb
-.section smcha
-.section smchb
-.section smcia
-.section smcib
-.section smcja
-.section smcjb
-.section smcka
-.section smckb
-.section smcla
-.section smclb
-.section smcma
-.section smcmb
-.section smcna
-.section smcnb
-.section smcoa
-.section smcob
-.section smcpa
-.section smcpb
-.section smcqa
-.section smcqb
-.section smcra
-.section smcrb
-.section smcsa
-.section smcsb
-.section smcta
-.section smctb
-.section smcua
-.section smcub
-.section smcva
-.section smcvb
-.section smcwa
-.section smcwb
-.section smcxa
-.section smcxb
-.section smcya
-.section smcyb
-.section smcza
-.section smczb
-.section smc1a
-.section smc1b
-.section smc2a
-.section smc2b
-.section smc3a
-.section smc3b
-.section smc4a
-.section smc4b
-.section smc5a
-.section smc5b
-.section smc6a
-.section smc6b
-.section smc7a
-.section smc7b
-.section smc8a
-.section smc8b
-.section smc9a
-.section smc9b
-.section smc0a
-.section smc0b
-.section smdaa
-.section smdab
-.section smdba
-.section smdbb
-.section smdca
-.section smdcb
-.section smdda
-.section smddb
-.section smdea
-.section smdeb
-.section smdfa
-.section smdfb
-.section smdga
-.section smdgb
-.section smdha
-.section smdhb
-.section smdia
-.section smdib
-.section smdja
-.section smdjb
-.section smdka
-.section smdkb
-.section smdla
-.section smdlb
-.section smdma
-.section smdmb
-.section smdna
-.section smdnb
-.section smdoa
-.section smdob
-.section smdpa
-.section smdpb
-.section smdqa
-.section smdqb
-.section smdra
-.section smdrb
-.section smdsa
-.section smdsb
-.section smdta
-.section smdtb
-.section smdua
-.section smdub
-.section smdva
-.section smdvb
-.section smdwa
-.section smdwb
-.section smdxa
-.section smdxb
-.section smdya
-.section smdyb
-.section smdza
-.section smdzb
-.section smd1a
-.section smd1b
-.section smd2a
-.section smd2b
-.section smd3a
-.section smd3b
-.section smd4a
-.section smd4b
-.section smd5a
-.section smd5b
-.section smd6a
-.section smd6b
-.section smd7a
-.section smd7b
-.section smd8a
-.section smd8b
-.section smd9a
-.section smd9b
-.section smd0a
-.section smd0b
-.section smeaa
-.section smeab
-.section smeba
-.section smebb
-.section smeca
-.section smecb
-.section smeda
-.section smedb
-.section smeea
-.section smeeb
-.section smefa
-.section smefb
-.section smega
-.section smegb
-.section smeha
-.section smehb
-.section smeia
-.section smeib
-.section smeja
-.section smejb
-.section smeka
-.section smekb
-.section smela
-.section smelb
-.section smema
-.section smemb
-.section smena
-.section smenb
-.section smeoa
-.section smeob
-.section smepa
-.section smepb
-.section smeqa
-.section smeqb
-.section smera
-.section smerb
-.section smesa
-.section smesb
-.section smeta
-.section smetb
-.section smeua
-.section smeub
-.section smeva
-.section smevb
-.section smewa
-.section smewb
-.section smexa
-.section smexb
-.section smeya
-.section smeyb
-.section smeza
-.section smezb
-.section sme1a
-.section sme1b
-.section sme2a
-.section sme2b
-.section sme3a
-.section sme3b
-.section sme4a
-.section sme4b
-.section sme5a
-.section sme5b
-.section sme6a
-.section sme6b
-.section sme7a
-.section sme7b
-.section sme8a
-.section sme8b
-.section sme9a
-.section sme9b
-.section sme0a
-.section sme0b
-.section smfaa
-.section smfab
-.section smfba
-.section smfbb
-.section smfca
-.section smfcb
-.section smfda
-.section smfdb
-.section smfea
-.section smfeb
-.section smffa
-.section smffb
-.section smfga
-.section smfgb
-.section smfha
-.section smfhb
-.section smfia
-.section smfib
-.section smfja
-.section smfjb
-.section smfka
-.section smfkb
-.section smfla
-.section smflb
-.section smfma
-.section smfmb
-.section smfna
-.section smfnb
-.section smfoa
-.section smfob
-.section smfpa
-.section smfpb
-.section smfqa
-.section smfqb
-.section smfra
-.section smfrb
-.section smfsa
-.section smfsb
-.section smfta
-.section smftb
-.section smfua
-.section smfub
-.section smfva
-.section smfvb
-.section smfwa
-.section smfwb
-.section smfxa
-.section smfxb
-.section smfya
-.section smfyb
-.section smfza
-.section smfzb
-.section smf1a
-.section smf1b
-.section smf2a
-.section smf2b
-.section smf3a
-.section smf3b
-.section smf4a
-.section smf4b
-.section smf5a
-.section smf5b
-.section smf6a
-.section smf6b
-.section smf7a
-.section smf7b
-.section smf8a
-.section smf8b
-.section smf9a
-.section smf9b
-.section smf0a
-.section smf0b
-.section smgaa
-.section smgab
-.section smgba
-.section smgbb
-.section smgca
-.section smgcb
-.section smgda
-.section smgdb
-.section smgea
-.section smgeb
-.section smgfa
-.section smgfb
-.section smgga
-.section smggb
-.section smgha
-.section smghb
-.section smgia
-.section smgib
-.section smgja
-.section smgjb
-.section smgka
-.section smgkb
-.section smgla
-.section smglb
-.section smgma
-.section smgmb
-.section smgna
-.section smgnb
-.section smgoa
-.section smgob
-.section smgpa
-.section smgpb
-.section smgqa
-.section smgqb
-.section smgra
-.section smgrb
-.section smgsa
-.section smgsb
-.section smgta
-.section smgtb
-.section smgua
-.section smgub
-.section smgva
-.section smgvb
-.section smgwa
-.section smgwb
-.section smgxa
-.section smgxb
-.section smgya
-.section smgyb
-.section smgza
-.section smgzb
-.section smg1a
-.section smg1b
-.section smg2a
-.section smg2b
-.section smg3a
-.section smg3b
-.section smg4a
-.section smg4b
-.section smg5a
-.section smg5b
-.section smg6a
-.section smg6b
-.section smg7a
-.section smg7b
-.section smg8a
-.section smg8b
-.section smg9a
-.section smg9b
-.section smg0a
-.section smg0b
-.section smhaa
-.section smhab
-.section smhba
-.section smhbb
-.section smhca
-.section smhcb
-.section smhda
-.section smhdb
-.section smhea
-.section smheb
-.section smhfa
-.section smhfb
-.section smhga
-.section smhgb
-.section smhha
-.section smhhb
-.section smhia
-.section smhib
-.section smhja
-.section smhjb
-.section smhka
-.section smhkb
-.section smhla
-.section smhlb
-.section smhma
-.section smhmb
-.section smhna
-.section smhnb
-.section smhoa
-.section smhob
-.section smhpa
-.section smhpb
-.section smhqa
-.section smhqb
-.section smhra
-.section smhrb
-.section smhsa
-.section smhsb
-.section smhta
-.section smhtb
-.section smhua
-.section smhub
-.section smhva
-.section smhvb
-.section smhwa
-.section smhwb
-.section smhxa
-.section smhxb
-.section smhya
-.section smhyb
-.section smhza
-.section smhzb
-.section smh1a
-.section smh1b
-.section smh2a
-.section smh2b
-.section smh3a
-.section smh3b
-.section smh4a
-.section smh4b
-.section smh5a
-.section smh5b
-.section smh6a
-.section smh6b
-.section smh7a
-.section smh7b
-.section smh8a
-.section smh8b
-.section smh9a
-.section smh9b
-.section smh0a
-.section smh0b
-.section smiaa
-.section smiab
-.section smiba
-.section smibb
-.section smica
-.section smicb
-.section smida
-.section smidb
-.section smiea
-.section smieb
-.section smifa
-.section smifb
-.section smiga
-.section smigb
-.section smiha
-.section smihb
-.section smiia
-.section smiib
-.section smija
-.section smijb
-.section smika
-.section smikb
-.section smila
-.section smilb
-.section smima
-.section smimb
-.section smina
-.section sminb
-.section smioa
-.section smiob
-.section smipa
-.section smipb
-.section smiqa
-.section smiqb
-.section smira
-.section smirb
-.section smisa
-.section smisb
-.section smita
-.section smitb
-.section smiua
-.section smiub
-.section smiva
-.section smivb
-.section smiwa
-.section smiwb
-.section smixa
-.section smixb
-.section smiya
-.section smiyb
-.section smiza
-.section smizb
-.section smi1a
-.section smi1b
-.section smi2a
-.section smi2b
-.section smi3a
-.section smi3b
-.section smi4a
-.section smi4b
-.section smi5a
-.section smi5b
-.section smi6a
-.section smi6b
-.section smi7a
-.section smi7b
-.section smi8a
-.section smi8b
-.section smi9a
-.section smi9b
-.section smi0a
-.section smi0b
-.section smjaa
-.section smjab
-.section smjba
-.section smjbb
-.section smjca
-.section smjcb
-.section smjda
-.section smjdb
-.section smjea
-.section smjeb
-.section smjfa
-.section smjfb
-.section smjga
-.section smjgb
-.section smjha
-.section smjhb
-.section smjia
-.section smjib
-.section smjja
-.section smjjb
-.section smjka
-.section smjkb
-.section smjla
-.section smjlb
-.section smjma
-.section smjmb
-.section smjna
-.section smjnb
-.section smjoa
-.section smjob
-.section smjpa
-.section smjpb
-.section smjqa
-.section smjqb
-.section smjra
-.section smjrb
-.section smjsa
-.section smjsb
-.section smjta
-.section smjtb
-.section smjua
-.section smjub
-.section smjva
-.section smjvb
-.section smjwa
-.section smjwb
-.section smjxa
-.section smjxb
-.section smjya
-.section smjyb
-.section smjza
-.section smjzb
-.section smj1a
-.section smj1b
-.section smj2a
-.section smj2b
-.section smj3a
-.section smj3b
-.section smj4a
-.section smj4b
-.section smj5a
-.section smj5b
-.section smj6a
-.section smj6b
-.section smj7a
-.section smj7b
-.section smj8a
-.section smj8b
-.section smj9a
-.section smj9b
-.section smj0a
-.section smj0b
-.section smkaa
-.section smkab
-.section smkba
-.section smkbb
-.section smkca
-.section smkcb
-.section smkda
-.section smkdb
-.section smkea
-.section smkeb
-.section smkfa
-.section smkfb
-.section smkga
-.section smkgb
-.section smkha
-.section smkhb
-.section smkia
-.section smkib
-.section smkja
-.section smkjb
-.section smkka
-.section smkkb
-.section smkla
-.section smklb
-.section smkma
-.section smkmb
-.section smkna
-.section smknb
-.section smkoa
-.section smkob
-.section smkpa
-.section smkpb
-.section smkqa
-.section smkqb
-.section smkra
-.section smkrb
-.section smksa
-.section smksb
-.section smkta
-.section smktb
-.section smkua
-.section smkub
-.section smkva
-.section smkvb
-.section smkwa
-.section smkwb
-.section smkxa
-.section smkxb
-.section smkya
-.section smkyb
-.section smkza
-.section smkzb
-.section smk1a
-.section smk1b
-.section smk2a
-.section smk2b
-.section smk3a
-.section smk3b
-.section smk4a
-.section smk4b
-.section smk5a
-.section smk5b
-.section smk6a
-.section smk6b
-.section smk7a
-.section smk7b
-.section smk8a
-.section smk8b
-.section smk9a
-.section smk9b
-.section smk0a
-.section smk0b
-.section smlaa
-.section smlab
-.section smlba
-.section smlbb
-.section smlca
-.section smlcb
-.section smlda
-.section smldb
-.section smlea
-.section smleb
-.section smlfa
-.section smlfb
-.section smlga
-.section smlgb
-.section smlha
-.section smlhb
-.section smlia
-.section smlib
-.section smlja
-.section smljb
-.section smlka
-.section smlkb
-.section smlla
-.section smllb
-.section smlma
-.section smlmb
-.section smlna
-.section smlnb
-.section smloa
-.section smlob
-.section smlpa
-.section smlpb
-.section smlqa
-.section smlqb
-.section smlra
-.section smlrb
-.section smlsa
-.section smlsb
-.section smlta
-.section smltb
-.section smlua
-.section smlub
-.section smlva
-.section smlvb
-.section smlwa
-.section smlwb
-.section smlxa
-.section smlxb
-.section smlya
-.section smlyb
-.section smlza
-.section smlzb
-.section sml1a
-.section sml1b
-.section sml2a
-.section sml2b
-.section sml3a
-.section sml3b
-.section sml4a
-.section sml4b
-.section sml5a
-.section sml5b
-.section sml6a
-.section sml6b
-.section sml7a
-.section sml7b
-.section sml8a
-.section sml8b
-.section sml9a
-.section sml9b
-.section sml0a
-.section sml0b
-.section smmaa
-.section smmab
-.section smmba
-.section smmbb
-.section smmca
-.section smmcb
-.section smmda
-.section smmdb
-.section smmea
-.section smmeb
-.section smmfa
-.section smmfb
-.section smmga
-.section smmgb
-.section smmha
-.section smmhb
-.section smmia
-.section smmib
-.section smmja
-.section smmjb
-.section smmka
-.section smmkb
-.section smmla
-.section smmlb
-.section smmma
-.section smmmb
-.section smmna
-.section smmnb
-.section smmoa
-.section smmob
-.section smmpa
-.section smmpb
-.section smmqa
-.section smmqb
-.section smmra
-.section smmrb
-.section smmsa
-.section smmsb
-.section smmta
-.section smmtb
-.section smmua
-.section smmub
-.section smmva
-.section smmvb
-.section smmwa
-.section smmwb
-.section smmxa
-.section smmxb
-.section smmya
-.section smmyb
-.section smmza
-.section smmzb
-.section smm1a
-.section smm1b
-.section smm2a
-.section smm2b
-.section smm3a
-.section smm3b
-.section smm4a
-.section smm4b
-.section smm5a
-.section smm5b
-.section smm6a
-.section smm6b
-.section smm7a
-.section smm7b
-.section smm8a
-.section smm8b
-.section smm9a
-.section smm9b
-.section smm0a
-.section smm0b
-.section smnaa
-.section smnab
-.section smnba
-.section smnbb
-.section smnca
-.section smncb
-.section smnda
-.section smndb
-.section smnea
-.section smneb
-.section smnfa
-.section smnfb
-.section smnga
-.section smngb
-.section smnha
-.section smnhb
-.section smnia
-.section smnib
-.section smnja
-.section smnjb
-.section smnka
-.section smnkb
-.section smnla
-.section smnlb
-.section smnma
-.section smnmb
-.section smnna
-.section smnnb
-.section smnoa
-.section smnob
-.section smnpa
-.section smnpb
-.section smnqa
-.section smnqb
-.section smnra
-.section smnrb
-.section smnsa
-.section smnsb
-.section smnta
-.section smntb
-.section smnua
-.section smnub
-.section smnva
-.section smnvb
-.section smnwa
-.section smnwb
-.section smnxa
-.section smnxb
-.section smnya
-.section smnyb
-.section smnza
-.section smnzb
-.section smn1a
-.section smn1b
-.section smn2a
-.section smn2b
-.section smn3a
-.section smn3b
-.section smn4a
-.section smn4b
-.section smn5a
-.section smn5b
-.section smn6a
-.section smn6b
-.section smn7a
-.section smn7b
-.section smn8a
-.section smn8b
-.section smn9a
-.section smn9b
-.section smn0a
-.section smn0b
-.section smoaa
-.section smoab
-.section smoba
-.section smobb
-.section smoca
-.section smocb
-.section smoda
-.section smodb
-.section smoea
-.section smoeb
-.section smofa
-.section smofb
-.section smoga
-.section smogb
-.section smoha
-.section smohb
-.section smoia
-.section smoib
-.section smoja
-.section smojb
-.section smoka
-.section smokb
-.section smola
-.section smolb
-.section smoma
-.section smomb
-.section smona
-.section smonb
-.section smooa
-.section smoob
-.section smopa
-.section smopb
-.section smoqa
-.section smoqb
-.section smora
-.section smorb
-.section smosa
-.section smosb
-.section smota
-.section smotb
-.section smoua
-.section smoub
-.section smova
-.section smovb
-.section smowa
-.section smowb
-.section smoxa
-.section smoxb
-.section smoya
-.section smoyb
-.section smoza
-.section smozb
-.section smo1a
-.section smo1b
-.section smo2a
-.section smo2b
-.section smo3a
-.section smo3b
-.section smo4a
-.section smo4b
-.section smo5a
-.section smo5b
-.section smo6a
-.section smo6b
-.section smo7a
-.section smo7b
-.section smo8a
-.section smo8b
-.section smo9a
-.section smo9b
-.section smo0a
-.section smo0b
-.section smpaa
-.section smpab
-.section smpba
-.section smpbb
-.section smpca
-.section smpcb
-.section smpda
-.section smpdb
-.section smpea
-.section smpeb
-.section smpfa
-.section smpfb
-.section smpga
-.section smpgb
-.section smpha
-.section smphb
-.section smpia
-.section smpib
-.section smpja
-.section smpjb
-.section smpka
-.section smpkb
-.section smpla
-.section smplb
-.section smpma
-.section smpmb
-.section smpna
-.section smpnb
-.section smpoa
-.section smpob
-.section smppa
-.section smppb
-.section smpqa
-.section smpqb
-.section smpra
-.section smprb
-.section smpsa
-.section smpsb
-.section smpta
-.section smptb
-.section smpua
-.section smpub
-.section smpva
-.section smpvb
-.section smpwa
-.section smpwb
-.section smpxa
-.section smpxb
-.section smpya
-.section smpyb
-.section smpza
-.section smpzb
-.section smp1a
-.section smp1b
-.section smp2a
-.section smp2b
-.section smp3a
-.section smp3b
-.section smp4a
-.section smp4b
-.section smp5a
-.section smp5b
-.section smp6a
-.section smp6b
-.section smp7a
-.section smp7b
-.section smp8a
-.section smp8b
-.section smp9a
-.section smp9b
-.section smp0a
-.section smp0b
-.section smqaa
-.section smqab
-.section smqba
-.section smqbb
-.section smqca
-.section smqcb
-.section smqda
-.section smqdb
-.section smqea
-.section smqeb
-.section smqfa
-.section smqfb
-.section smqga
-.section smqgb
-.section smqha
-.section smqhb
-.section smqia
-.section smqib
-.section smqja
-.section smqjb
-.section smqka
-.section smqkb
-.section smqla
-.section smqlb
-.section smqma
-.section smqmb
-.section smqna
-.section smqnb
-.section smqoa
-.section smqob
-.section smqpa
-.section smqpb
-.section smqqa
-.section smqqb
-.section smqra
-.section smqrb
-.section smqsa
-.section smqsb
-.section smqta
-.section smqtb
-.section smqua
-.section smqub
-.section smqva
-.section smqvb
-.section smqwa
-.section smqwb
-.section smqxa
-.section smqxb
-.section smqya
-.section smqyb
-.section smqza
-.section smqzb
-.section smq1a
-.section smq1b
-.section smq2a
-.section smq2b
-.section smq3a
-.section smq3b
-.section smq4a
-.section smq4b
-.section smq5a
-.section smq5b
-.section smq6a
-.section smq6b
-.section smq7a
-.section smq7b
-.section smq8a
-.section smq8b
-.section smq9a
-.section smq9b
-.section smq0a
-.section smq0b
-.section smraa
-.section smrab
-.section smrba
-.section smrbb
-.section smrca
-.section smrcb
-.section smrda
-.section smrdb
-.section smrea
-.section smreb
-.section smrfa
-.section smrfb
-.section smrga
-.section smrgb
-.section smrha
-.section smrhb
-.section smria
-.section smrib
-.section smrja
-.section smrjb
-.section smrka
-.section smrkb
-.section smrla
-.section smrlb
-.section smrma
-.section smrmb
-.section smrna
-.section smrnb
-.section smroa
-.section smrob
-.section smrpa
-.section smrpb
-.section smrqa
-.section smrqb
-.section smrra
-.section smrrb
-.section smrsa
-.section smrsb
-.section smrta
-.section smrtb
-.section smrua
-.section smrub
-.section smrva
-.section smrvb
-.section smrwa
-.section smrwb
-.section smrxa
-.section smrxb
-.section smrya
-.section smryb
-.section smrza
-.section smrzb
-.section smr1a
-.section smr1b
-.section smr2a
-.section smr2b
-.section smr3a
-.section smr3b
-.section smr4a
-.section smr4b
-.section smr5a
-.section smr5b
-.section smr6a
-.section smr6b
-.section smr7a
-.section smr7b
-.section smr8a
-.section smr8b
-.section smr9a
-.section smr9b
-.section smr0a
-.section smr0b
-.section smsaa
-.section smsab
-.section smsba
-.section smsbb
-.section smsca
-.section smscb
-.section smsda
-.section smsdb
-.section smsea
-.section smseb
-.section smsfa
-.section smsfb
-.section smsga
-.section smsgb
-.section smsha
-.section smshb
-.section smsia
-.section smsib
-.section smsja
-.section smsjb
-.section smska
-.section smskb
-.section smsla
-.section smslb
-.section smsma
-.section smsmb
-.section smsna
-.section smsnb
-.section smsoa
-.section smsob
-.section smspa
-.section smspb
-.section smsqa
-.section smsqb
-.section smsra
-.section smsrb
-.section smssa
-.section smssb
-.section smsta
-.section smstb
-.section smsua
-.section smsub
-.section smsva
-.section smsvb
-.section smswa
-.section smswb
-.section smsxa
-.section smsxb
-.section smsya
-.section smsyb
-.section smsza
-.section smszb
-.section sms1a
-.section sms1b
-.section sms2a
-.section sms2b
-.section sms3a
-.section sms3b
-.section sms4a
-.section sms4b
-.section sms5a
-.section sms5b
-.section sms6a
-.section sms6b
-.section sms7a
-.section sms7b
-.section sms8a
-.section sms8b
-.section sms9a
-.section sms9b
-.section sms0a
-.section sms0b
-.section smtaa
-.section smtab
-.section smtba
-.section smtbb
-.section smtca
-.section smtcb
-.section smtda
-.section smtdb
-.section smtea
-.section smteb
-.section smtfa
-.section smtfb
-.section smtga
-.section smtgb
-.section smtha
-.section smthb
-.section smtia
-.section smtib
-.section smtja
-.section smtjb
-.section smtka
-.section smtkb
-.section smtla
-.section smtlb
-.section smtma
-.section smtmb
-.section smtna
-.section smtnb
-.section smtoa
-.section smtob
-.section smtpa
-.section smtpb
-.section smtqa
-.section smtqb
-.section smtra
-.section smtrb
-.section smtsa
-.section smtsb
-.section smtta
-.section smttb
-.section smtua
-.section smtub
-.section smtva
-.section smtvb
-.section smtwa
-.section smtwb
-.section smtxa
-.section smtxb
-.section smtya
-.section smtyb
-.section smtza
-.section smtzb
-.section smt1a
-.section smt1b
-.section smt2a
-.section smt2b
-.section smt3a
-.section smt3b
-.section smt4a
-.section smt4b
-.section smt5a
-.section smt5b
-.section smt6a
-.section smt6b
-.section smt7a
-.section smt7b
-.section smt8a
-.section smt8b
-.section smt9a
-.section smt9b
-.section smt0a
-.section smt0b
-.section smuaa
-.section smuab
-.section smuba
-.section smubb
-.section smuca
-.section smucb
-.section smuda
-.section smudb
-.section smuea
-.section smueb
-.section smufa
-.section smufb
-.section smuga
-.section smugb
-.section smuha
-.section smuhb
-.section smuia
-.section smuib
-.section smuja
-.section smujb
-.section smuka
-.section smukb
-.section smula
-.section smulb
-.section smuma
-.section smumb
-.section smuna
-.section smunb
-.section smuoa
-.section smuob
-.section smupa
-.section smupb
-.section smuqa
-.section smuqb
-.section smura
-.section smurb
-.section smusa
-.section smusb
-.section smuta
-.section smutb
-.section smuua
-.section smuub
-.section smuva
-.section smuvb
-.section smuwa
-.section smuwb
-.section smuxa
-.section smuxb
-.section smuya
-.section smuyb
-.section smuza
-.section smuzb
-.section smu1a
-.section smu1b
-.section smu2a
-.section smu2b
-.section smu3a
-.section smu3b
-.section smu4a
-.section smu4b
-.section smu5a
-.section smu5b
-.section smu6a
-.section smu6b
-.section smu7a
-.section smu7b
-.section smu8a
-.section smu8b
-.section smu9a
-.section smu9b
-.section smu0a
-.section smu0b
-.section smvaa
-.section smvab
-.section smvba
-.section smvbb
-.section smvca
-.section smvcb
-.section smvda
-.section smvdb
-.section smvea
-.section smveb
-.section smvfa
-.section smvfb
-.section smvga
-.section smvgb
-.section smvha
-.section smvhb
-.section smvia
-.section smvib
-.section smvja
-.section smvjb
-.section smvka
-.section smvkb
-.section smvla
-.section smvlb
-.section smvma
-.section smvmb
-.section smvna
-.section smvnb
-.section smvoa
-.section smvob
-.section smvpa
-.section smvpb
-.section smvqa
-.section smvqb
-.section smvra
-.section smvrb
-.section smvsa
-.section smvsb
-.section smvta
-.section smvtb
-.section smvua
-.section smvub
-.section smvva
-.section smvvb
-.section smvwa
-.section smvwb
-.section smvxa
-.section smvxb
-.section smvya
-.section smvyb
-.section smvza
-.section smvzb
-.section smv1a
-.section smv1b
-.section smv2a
-.section smv2b
-.section smv3a
-.section smv3b
-.section smv4a
-.section smv4b
-.section smv5a
-.section smv5b
-.section smv6a
-.section smv6b
-.section smv7a
-.section smv7b
-.section smv8a
-.section smv8b
-.section smv9a
-.section smv9b
-.section smv0a
-.section smv0b
-.section smwaa
-.section smwab
-.section smwba
-.section smwbb
-.section smwca
-.section smwcb
-.section smwda
-.section smwdb
-.section smwea
-.section smweb
-.section smwfa
-.section smwfb
-.section smwga
-.section smwgb
-.section smwha
-.section smwhb
-.section smwia
-.section smwib
-.section smwja
-.section smwjb
-.section smwka
-.section smwkb
-.section smwla
-.section smwlb
-.section smwma
-.section smwmb
-.section smwna
-.section smwnb
-.section smwoa
-.section smwob
-.section smwpa
-.section smwpb
-.section smwqa
-.section smwqb
-.section smwra
-.section smwrb
-.section smwsa
-.section smwsb
-.section smwta
-.section smwtb
-.section smwua
-.section smwub
-.section smwva
-.section smwvb
-.section smwwa
-.section smwwb
-.section smwxa
-.section smwxb
-.section smwya
-.section smwyb
-.section smwza
-.section smwzb
-.section smw1a
-.section smw1b
-.section smw2a
-.section smw2b
-.section smw3a
-.section smw3b
-.section smw4a
-.section smw4b
-.section smw5a
-.section smw5b
-.section smw6a
-.section smw6b
-.section smw7a
-.section smw7b
-.section smw8a
-.section smw8b
-.section smw9a
-.section smw9b
-.section smw0a
-.section smw0b
-.section smxaa
-.section smxab
-.section smxba
-.section smxbb
-.section smxca
-.section smxcb
-.section smxda
-.section smxdb
-.section smxea
-.section smxeb
-.section smxfa
-.section smxfb
-.section smxga
-.section smxgb
-.section smxha
-.section smxhb
-.section smxia
-.section smxib
-.section smxja
-.section smxjb
-.section smxka
-.section smxkb
-.section smxla
-.section smxlb
-.section smxma
-.section smxmb
-.section smxna
-.section smxnb
-.section smxoa
-.section smxob
-.section smxpa
-.section smxpb
-.section smxqa
-.section smxqb
-.section smxra
-.section smxrb
-.section smxsa
-.section smxsb
-.section smxta
-.section smxtb
-.section smxua
-.section smxub
-.section smxva
-.section smxvb
-.section smxwa
-.section smxwb
-.section smxxa
-.section smxxb
-.section smxya
-.section smxyb
-.section smxza
-.section smxzb
-.section smx1a
-.section smx1b
-.section smx2a
-.section smx2b
-.section smx3a
-.section smx3b
-.section smx4a
-.section smx4b
-.section smx5a
-.section smx5b
-.section smx6a
-.section smx6b
-.section smx7a
-.section smx7b
-.section smx8a
-.section smx8b
-.section smx9a
-.section smx9b
-.section smx0a
-.section smx0b
-.section smyaa
-.section smyab
-.section smyba
-.section smybb
-.section smyca
-.section smycb
-.section smyda
-.section smydb
-.section smyea
-.section smyeb
-.section smyfa
-.section smyfb
-.section smyga
-.section smygb
-.section smyha
-.section smyhb
-.section smyia
-.section smyib
-.section smyja
-.section smyjb
-.section smyka
-.section smykb
-.section smyla
-.section smylb
-.section smyma
-.section smymb
-.section smyna
-.section smynb
-.section smyoa
-.section smyob
-.section smypa
-.section smypb
-.section smyqa
-.section smyqb
-.section smyra
-.section smyrb
-.section smysa
-.section smysb
-.section smyta
-.section smytb
-.section smyua
-.section smyub
-.section smyva
-.section smyvb
-.section smywa
-.section smywb
-.section smyxa
-.section smyxb
-.section smyya
-.section smyyb
-.section smyza
-.section smyzb
-.section smy1a
-.section smy1b
-.section smy2a
-.section smy2b
-.section smy3a
-.section smy3b
-.section smy4a
-.section smy4b
-.section smy5a
-.section smy5b
-.section smy6a
-.section smy6b
-.section smy7a
-.section smy7b
-.section smy8a
-.section smy8b
-.section smy9a
-.section smy9b
-.section smy0a
-.section smy0b
-.section smzaa
-.section smzab
-.section smzba
-.section smzbb
-.section smzca
-.section smzcb
-.section smzda
-.section smzdb
-.section smzea
-.section smzeb
-.section smzfa
-.section smzfb
-.section smzga
-.section smzgb
-.section smzha
-.section smzhb
-.section smzia
-.section smzib
-.section smzja
-.section smzjb
-.section smzka
-.section smzkb
-.section smzla
-.section smzlb
-.section smzma
-.section smzmb
-.section smzna
-.section smznb
-.section smzoa
-.section smzob
-.section smzpa
-.section smzpb
-.section smzqa
-.section smzqb
-.section smzra
-.section smzrb
-.section smzsa
-.section smzsb
-.section smzta
-.section smztb
-.section smzua
-.section smzub
-.section smzva
-.section smzvb
-.section smzwa
-.section smzwb
-.section smzxa
-.section smzxb
-.section smzya
-.section smzyb
-.section smzza
-.section smzzb
-.section smz1a
-.section smz1b
-.section smz2a
-.section smz2b
-.section smz3a
-.section smz3b
-.section smz4a
-.section smz4b
-.section smz5a
-.section smz5b
-.section smz6a
-.section smz6b
-.section smz7a
-.section smz7b
-.section smz8a
-.section smz8b
-.section smz9a
-.section smz9b
-.section smz0a
-.section smz0b
-.section sm1aa
-.section sm1ab
-.section sm1ba
-.section sm1bb
-.section sm1ca
-.section sm1cb
-.section sm1da
-.section sm1db
-.section sm1ea
-.section sm1eb
-.section sm1fa
-.section sm1fb
-.section sm1ga
-.section sm1gb
-.section sm1ha
-.section sm1hb
-.section sm1ia
-.section sm1ib
-.section sm1ja
-.section sm1jb
-.section sm1ka
-.section sm1kb
-.section sm1la
-.section sm1lb
-.section sm1ma
-.section sm1mb
-.section sm1na
-.section sm1nb
-.section sm1oa
-.section sm1ob
-.section sm1pa
-.section sm1pb
-.section sm1qa
-.section sm1qb
-.section sm1ra
-.section sm1rb
-.section sm1sa
-.section sm1sb
-.section sm1ta
-.section sm1tb
-.section sm1ua
-.section sm1ub
-.section sm1va
-.section sm1vb
-.section sm1wa
-.section sm1wb
-.section sm1xa
-.section sm1xb
-.section sm1ya
-.section sm1yb
-.section sm1za
-.section sm1zb
-.section sm11a
-.section sm11b
-.section sm12a
-.section sm12b
-.section sm13a
-.section sm13b
-.section sm14a
-.section sm14b
-.section sm15a
-.section sm15b
-.section sm16a
-.section sm16b
-.section sm17a
-.section sm17b
-.section sm18a
-.section sm18b
-.section sm19a
-.section sm19b
-.section sm10a
-.section sm10b
-.section sm2aa
-.section sm2ab
-.section sm2ba
-.section sm2bb
-.section sm2ca
-.section sm2cb
-.section sm2da
-.section sm2db
-.section sm2ea
-.section sm2eb
-.section sm2fa
-.section sm2fb
-.section sm2ga
-.section sm2gb
-.section sm2ha
-.section sm2hb
-.section sm2ia
-.section sm2ib
-.section sm2ja
-.section sm2jb
-.section sm2ka
-.section sm2kb
-.section sm2la
-.section sm2lb
-.section sm2ma
-.section sm2mb
-.section sm2na
-.section sm2nb
-.section sm2oa
-.section sm2ob
-.section sm2pa
-.section sm2pb
-.section sm2qa
-.section sm2qb
-.section sm2ra
-.section sm2rb
-.section sm2sa
-.section sm2sb
-.section sm2ta
-.section sm2tb
-.section sm2ua
-.section sm2ub
-.section sm2va
-.section sm2vb
-.section sm2wa
-.section sm2wb
-.section sm2xa
-.section sm2xb
-.section sm2ya
-.section sm2yb
-.section sm2za
-.section sm2zb
-.section sm21a
-.section sm21b
-.section sm22a
-.section sm22b
-.section sm23a
-.section sm23b
-.section sm24a
-.section sm24b
-.section sm25a
-.section sm25b
-.section sm26a
-.section sm26b
-.section sm27a
-.section sm27b
-.section sm28a
-.section sm28b
-.section sm29a
-.section sm29b
-.section sm20a
-.section sm20b
-.section sm3aa
-.section sm3ab
-.section sm3ba
-.section sm3bb
-.section sm3ca
-.section sm3cb
-.section sm3da
-.section sm3db
-.section sm3ea
-.section sm3eb
-.section sm3fa
-.section sm3fb
-.section sm3ga
-.section sm3gb
-.section sm3ha
-.section sm3hb
-.section sm3ia
-.section sm3ib
-.section sm3ja
-.section sm3jb
-.section sm3ka
-.section sm3kb
-.section sm3la
-.section sm3lb
-.section sm3ma
-.section sm3mb
-.section sm3na
-.section sm3nb
-.section sm3oa
-.section sm3ob
-.section sm3pa
-.section sm3pb
-.section sm3qa
-.section sm3qb
-.section sm3ra
-.section sm3rb
-.section sm3sa
-.section sm3sb
-.section sm3ta
-.section sm3tb
-.section sm3ua
-.section sm3ub
-.section sm3va
-.section sm3vb
-.section sm3wa
-.section sm3wb
-.section sm3xa
-.section sm3xb
-.section sm3ya
-.section sm3yb
-.section sm3za
-.section sm3zb
-.section sm31a
-.section sm31b
-.section sm32a
-.section sm32b
-.section sm33a
-.section sm33b
-.section sm34a
-.section sm34b
-.section sm35a
-.section sm35b
-.section sm36a
-.section sm36b
-.section sm37a
-.section sm37b
-.section sm38a
-.section sm38b
-.section sm39a
-.section sm39b
-.section sm30a
-.section sm30b
-.section sm4aa
-.section sm4ab
-.section sm4ba
-.section sm4bb
-.section sm4ca
-.section sm4cb
-.section sm4da
-.section sm4db
-.section sm4ea
-.section sm4eb
-.section sm4fa
-.section sm4fb
-.section sm4ga
-.section sm4gb
-.section sm4ha
-.section sm4hb
-.section sm4ia
-.section sm4ib
-.section sm4ja
-.section sm4jb
-.section sm4ka
-.section sm4kb
-.section sm4la
-.section sm4lb
-.section sm4ma
-.section sm4mb
-.section sm4na
-.section sm4nb
-.section sm4oa
-.section sm4ob
-.section sm4pa
-.section sm4pb
-.section sm4qa
-.section sm4qb
-.section sm4ra
-.section sm4rb
-.section sm4sa
-.section sm4sb
-.section sm4ta
-.section sm4tb
-.section sm4ua
-.section sm4ub
-.section sm4va
-.section sm4vb
-.section sm4wa
-.section sm4wb
-.section sm4xa
-.section sm4xb
-.section sm4ya
-.section sm4yb
-.section sm4za
-.section sm4zb
-.section sm41a
-.section sm41b
-.section sm42a
-.section sm42b
-.section sm43a
-.section sm43b
-.section sm44a
-.section sm44b
-.section sm45a
-.section sm45b
-.section sm46a
-.section sm46b
-.section sm47a
-.section sm47b
-.section sm48a
-.section sm48b
-.section sm49a
-.section sm49b
-.section sm40a
-.section sm40b
-.section sm5aa
-.section sm5ab
-.section sm5ba
-.section sm5bb
-.section sm5ca
-.section sm5cb
-.section sm5da
-.section sm5db
-.section sm5ea
-.section sm5eb
-.section sm5fa
-.section sm5fb
-.section sm5ga
-.section sm5gb
-.section sm5ha
-.section sm5hb
-.section sm5ia
-.section sm5ib
-.section sm5ja
-.section sm5jb
-.section sm5ka
-.section sm5kb
-.section sm5la
-.section sm5lb
-.section sm5ma
-.section sm5mb
-.section sm5na
-.section sm5nb
-.section sm5oa
-.section sm5ob
-.section sm5pa
-.section sm5pb
-.section sm5qa
-.section sm5qb
-.section sm5ra
-.section sm5rb
-.section sm5sa
-.section sm5sb
-.section sm5ta
-.section sm5tb
-.section sm5ua
-.section sm5ub
-.section sm5va
-.section sm5vb
-.section sm5wa
-.section sm5wb
-.section sm5xa
-.section sm5xb
-.section sm5ya
-.section sm5yb
-.section sm5za
-.section sm5zb
-.section sm51a
-.section sm51b
-.section sm52a
-.section sm52b
-.section sm53a
-.section sm53b
-.section sm54a
-.section sm54b
-.section sm55a
-.section sm55b
-.section sm56a
-.section sm56b
-.section sm57a
-.section sm57b
-.section sm58a
-.section sm58b
-.section sm59a
-.section sm59b
-.section sm50a
-.section sm50b
-.section sm6aa
-.section sm6ab
-.section sm6ba
-.section sm6bb
-.section sm6ca
-.section sm6cb
-.section sm6da
-.section sm6db
-.section sm6ea
-.section sm6eb
-.section sm6fa
-.section sm6fb
-.section sm6ga
-.section sm6gb
-.section sm6ha
-.section sm6hb
-.section sm6ia
-.section sm6ib
-.section sm6ja
-.section sm6jb
-.section sm6ka
-.section sm6kb
-.section sm6la
-.section sm6lb
-.section sm6ma
-.section sm6mb
-.section sm6na
-.section sm6nb
-.section sm6oa
-.section sm6ob
-.section sm6pa
-.section sm6pb
-.section sm6qa
-.section sm6qb
-.section sm6ra
-.section sm6rb
-.section sm6sa
-.section sm6sb
-.section sm6ta
-.section sm6tb
-.section sm6ua
-.section sm6ub
-.section sm6va
-.section sm6vb
-.section sm6wa
-.section sm6wb
-.section sm6xa
-.section sm6xb
-.section sm6ya
-.section sm6yb
-.section sm6za
-.section sm6zb
-.section sm61a
-.section sm61b
-.section sm62a
-.section sm62b
-.section sm63a
-.section sm63b
-.section sm64a
-.section sm64b
-.section sm65a
-.section sm65b
-.section sm66a
-.section sm66b
-.section sm67a
-.section sm67b
-.section sm68a
-.section sm68b
-.section sm69a
-.section sm69b
-.section sm60a
-.section sm60b
-.section sm7aa
-.section sm7ab
-.section sm7ba
-.section sm7bb
-.section sm7ca
-.section sm7cb
-.section sm7da
-.section sm7db
-.section sm7ea
-.section sm7eb
-.section sm7fa
-.section sm7fb
-.section sm7ga
-.section sm7gb
-.section sm7ha
-.section sm7hb
-.section sm7ia
-.section sm7ib
-.section sm7ja
-.section sm7jb
-.section sm7ka
-.section sm7kb
-.section sm7la
-.section sm7lb
-.section sm7ma
-.section sm7mb
-.section sm7na
-.section sm7nb
-.section sm7oa
-.section sm7ob
-.section sm7pa
-.section sm7pb
-.section sm7qa
-.section sm7qb
-.section sm7ra
-.section sm7rb
-.section sm7sa
-.section sm7sb
-.section sm7ta
-.section sm7tb
-.section sm7ua
-.section sm7ub
-.section sm7va
-.section sm7vb
-.section sm7wa
-.section sm7wb
-.section sm7xa
-.section sm7xb
-.section sm7ya
-.section sm7yb
-.section sm7za
-.section sm7zb
-.section sm71a
-.section sm71b
-.section sm72a
-.section sm72b
-.section sm73a
-.section sm73b
-.section sm74a
-.section sm74b
-.section sm75a
-.section sm75b
-.section sm76a
-.section sm76b
-.section sm77a
-.section sm77b
-.section sm78a
-.section sm78b
-.section sm79a
-.section sm79b
-.section sm70a
-.section sm70b
-.section sm8aa
-.section sm8ab
-.section sm8ba
-.section sm8bb
-.section sm8ca
-.section sm8cb
-.section sm8da
-.section sm8db
-.section sm8ea
-.section sm8eb
-.section sm8fa
-.section sm8fb
-.section sm8ga
-.section sm8gb
-.section sm8ha
-.section sm8hb
-.section sm8ia
-.section sm8ib
-.section sm8ja
-.section sm8jb
-.section sm8ka
-.section sm8kb
-.section sm8la
-.section sm8lb
-.section sm8ma
-.section sm8mb
-.section sm8na
-.section sm8nb
-.section sm8oa
-.section sm8ob
-.section sm8pa
-.section sm8pb
-.section sm8qa
-.section sm8qb
-.section sm8ra
-.section sm8rb
-.section sm8sa
-.section sm8sb
-.section sm8ta
-.section sm8tb
-.section sm8ua
-.section sm8ub
-.section sm8va
-.section sm8vb
-.section sm8wa
-.section sm8wb
-.section sm8xa
-.section sm8xb
-.section sm8ya
-.section sm8yb
-.section sm8za
-.section sm8zb
-.section sm81a
-.section sm81b
-.section sm82a
-.section sm82b
-.section sm83a
-.section sm83b
-.section sm84a
-.section sm84b
-.section sm85a
-.section sm85b
-.section sm86a
-.section sm86b
-.section sm87a
-.section sm87b
-.section sm88a
-.section sm88b
-.section sm89a
-.section sm89b
-.section sm80a
-.section sm80b
-.section sm9aa
-.section sm9ab
-.section sm9ba
-.section sm9bb
-.section sm9ca
-.section sm9cb
-.section sm9da
-.section sm9db
-.section sm9ea
-.section sm9eb
-.section sm9fa
-.section sm9fb
-.section sm9ga
-.section sm9gb
-.section sm9ha
-.section sm9hb
-.section sm9ia
-.section sm9ib
-.section sm9ja
-.section sm9jb
-.section sm9ka
-.section sm9kb
-.section sm9la
-.section sm9lb
-.section sm9ma
-.section sm9mb
-.section sm9na
-.section sm9nb
-.section sm9oa
-.section sm9ob
-.section sm9pa
-.section sm9pb
-.section sm9qa
-.section sm9qb
-.section sm9ra
-.section sm9rb
-.section sm9sa
-.section sm9sb
-.section sm9ta
-.section sm9tb
-.section sm9ua
-.section sm9ub
-.section sm9va
-.section sm9vb
-.section sm9wa
-.section sm9wb
-.section sm9xa
-.section sm9xb
-.section sm9ya
-.section sm9yb
-.section sm9za
-.section sm9zb
-.section sm91a
-.section sm91b
-.section sm92a
-.section sm92b
-.section sm93a
-.section sm93b
-.section sm94a
-.section sm94b
-.section sm95a
-.section sm95b
-.section sm96a
-.section sm96b
-.section sm97a
-.section sm97b
-.section sm98a
-.section sm98b
-.section sm99a
-.section sm99b
-.section sm90a
-.section sm90b
-.section sm0aa
-.section sm0ab
-.section sm0ba
-.section sm0bb
-.section sm0ca
-.section sm0cb
-.section sm0da
-.section sm0db
-.section sm0ea
-.section sm0eb
-.section sm0fa
-.section sm0fb
-.section sm0ga
-.section sm0gb
-.section sm0ha
-.section sm0hb
-.section sm0ia
-.section sm0ib
-.section sm0ja
-.section sm0jb
-.section sm0ka
-.section sm0kb
-.section sm0la
-.section sm0lb
-.section sm0ma
-.section sm0mb
-.section sm0na
-.section sm0nb
-.section sm0oa
-.section sm0ob
-.section sm0pa
-.section sm0pb
-.section sm0qa
-.section sm0qb
-.section sm0ra
-.section sm0rb
-.section sm0sa
-.section sm0sb
-.section sm0ta
-.section sm0tb
-.section sm0ua
-.section sm0ub
-.section sm0va
-.section sm0vb
-.section sm0wa
-.section sm0wb
-.section sm0xa
-.section sm0xb
-.section sm0ya
-.section sm0yb
-.section sm0za
-.section sm0zb
-.section sm01a
-.section sm01b
-.section sm02a
-.section sm02b
-.section sm03a
-.section sm03b
-.section sm04a
-.section sm04b
-.section sm05a
-.section sm05b
-.section sm06a
-.section sm06b
-.section sm07a
-.section sm07b
-.section sm08a
-.section sm08b
-.section sm09a
-.section sm09b
-.section sm00a
-.section sm00b
-.section snaaa
-.section snaab
-.section snaba
-.section snabb
-.section snaca
-.section snacb
-.section snada
-.section snadb
-.section snaea
-.section snaeb
-.section snafa
-.section snafb
-.section snaga
-.section snagb
-.section snaha
-.section snahb
-.section snaia
-.section snaib
-.section snaja
-.section snajb
-.section snaka
-.section snakb
-.section snala
-.section snalb
-.section snama
-.section snamb
-.section snana
-.section snanb
-.section snaoa
-.section snaob
-.section snapa
-.section snapb
-.section snaqa
-.section snaqb
-.section snara
-.section snarb
-.section snasa
-.section snasb
-.section snata
-.section snatb
-.section snaua
-.section snaub
-.section snava
-.section snavb
-.section snawa
-.section snawb
-.section snaxa
-.section snaxb
-.section snaya
-.section snayb
-.section snaza
-.section snazb
-.section sna1a
-.section sna1b
-.section sna2a
-.section sna2b
-.section sna3a
-.section sna3b
-.section sna4a
-.section sna4b
-.section sna5a
-.section sna5b
-.section sna6a
-.section sna6b
-.section sna7a
-.section sna7b
-.section sna8a
-.section sna8b
-.section sna9a
-.section sna9b
-.section sna0a
-.section sna0b
-.section snbaa
-.section snbab
-.section snbba
-.section snbbb
-.section snbca
-.section snbcb
-.section snbda
-.section snbdb
-.section snbea
-.section snbeb
-.section snbfa
-.section snbfb
-.section snbga
-.section snbgb
-.section snbha
-.section snbhb
-.section snbia
-.section snbib
-.section snbja
-.section snbjb
-.section snbka
-.section snbkb
-.section snbla
-.section snblb
-.section snbma
-.section snbmb
-.section snbna
-.section snbnb
-.section snboa
-.section snbob
-.section snbpa
-.section snbpb
-.section snbqa
-.section snbqb
-.section snbra
-.section snbrb
-.section snbsa
-.section snbsb
-.section snbta
-.section snbtb
-.section snbua
-.section snbub
-.section snbva
-.section snbvb
-.section snbwa
-.section snbwb
-.section snbxa
-.section snbxb
-.section snbya
-.section snbyb
-.section snbza
-.section snbzb
-.section snb1a
-.section snb1b
-.section snb2a
-.section snb2b
-.section snb3a
-.section snb3b
-.section snb4a
-.section snb4b
-.section snb5a
-.section snb5b
-.section snb6a
-.section snb6b
-.section snb7a
-.section snb7b
-.section snb8a
-.section snb8b
-.section snb9a
-.section snb9b
-.section snb0a
-.section snb0b
-.section sncaa
-.section sncab
-.section sncba
-.section sncbb
-.section sncca
-.section snccb
-.section sncda
-.section sncdb
-.section sncea
-.section snceb
-.section sncfa
-.section sncfb
-.section sncga
-.section sncgb
-.section sncha
-.section snchb
-.section sncia
-.section sncib
-.section sncja
-.section sncjb
-.section sncka
-.section snckb
-.section sncla
-.section snclb
-.section sncma
-.section sncmb
-.section sncna
-.section sncnb
-.section sncoa
-.section sncob
-.section sncpa
-.section sncpb
-.section sncqa
-.section sncqb
-.section sncra
-.section sncrb
-.section sncsa
-.section sncsb
-.section sncta
-.section snctb
-.section sncua
-.section sncub
-.section sncva
-.section sncvb
-.section sncwa
-.section sncwb
-.section sncxa
-.section sncxb
-.section sncya
-.section sncyb
-.section sncza
-.section snczb
-.section snc1a
-.section snc1b
-.section snc2a
-.section snc2b
-.section snc3a
-.section snc3b
-.section snc4a
-.section snc4b
-.section snc5a
-.section snc5b
-.section snc6a
-.section snc6b
-.section snc7a
-.section snc7b
-.section snc8a
-.section snc8b
-.section snc9a
-.section snc9b
-.section snc0a
-.section snc0b
-.section sndaa
-.section sndab
-.section sndba
-.section sndbb
-.section sndca
-.section sndcb
-.section sndda
-.section snddb
-.section sndea
-.section sndeb
-.section sndfa
-.section sndfb
-.section sndga
-.section sndgb
-.section sndha
-.section sndhb
-.section sndia
-.section sndib
-.section sndja
-.section sndjb
-.section sndka
-.section sndkb
-.section sndla
-.section sndlb
-.section sndma
-.section sndmb
-.section sndna
-.section sndnb
-.section sndoa
-.section sndob
-.section sndpa
-.section sndpb
-.section sndqa
-.section sndqb
-.section sndra
-.section sndrb
-.section sndsa
-.section sndsb
-.section sndta
-.section sndtb
-.section sndua
-.section sndub
-.section sndva
-.section sndvb
-.section sndwa
-.section sndwb
-.section sndxa
-.section sndxb
-.section sndya
-.section sndyb
-.section sndza
-.section sndzb
-.section snd1a
-.section snd1b
-.section snd2a
-.section snd2b
-.section snd3a
-.section snd3b
-.section snd4a
-.section snd4b
-.section snd5a
-.section snd5b
-.section snd6a
-.section snd6b
-.section snd7a
-.section snd7b
-.section snd8a
-.section snd8b
-.section snd9a
-.section snd9b
-.section snd0a
-.section snd0b
-.section sneaa
-.section sneab
-.section sneba
-.section snebb
-.section sneca
-.section snecb
-.section sneda
-.section snedb
-.section sneea
-.section sneeb
-.section snefa
-.section snefb
-.section snega
-.section snegb
-.section sneha
-.section snehb
-.section sneia
-.section sneib
-.section sneja
-.section snejb
-.section sneka
-.section snekb
-.section snela
-.section snelb
-.section snema
-.section snemb
-.section snena
-.section snenb
-.section sneoa
-.section sneob
-.section snepa
-.section snepb
-.section sneqa
-.section sneqb
-.section snera
-.section snerb
-.section snesa
-.section snesb
-.section sneta
-.section snetb
-.section sneua
-.section sneub
-.section sneva
-.section snevb
-.section snewa
-.section snewb
-.section snexa
-.section snexb
-.section sneya
-.section sneyb
-.section sneza
-.section snezb
-.section sne1a
-.section sne1b
-.section sne2a
-.section sne2b
-.section sne3a
-.section sne3b
-.section sne4a
-.section sne4b
-.section sne5a
-.section sne5b
-.section sne6a
-.section sne6b
-.section sne7a
-.section sne7b
-.section sne8a
-.section sne8b
-.section sne9a
-.section sne9b
-.section sne0a
-.section sne0b
-.section snfaa
-.section snfab
-.section snfba
-.section snfbb
-.section snfca
-.section snfcb
-.section snfda
-.section snfdb
-.section snfea
-.section snfeb
-.section snffa
-.section snffb
-.section snfga
-.section snfgb
-.section snfha
-.section snfhb
-.section snfia
-.section snfib
-.section snfja
-.section snfjb
-.section snfka
-.section snfkb
-.section snfla
-.section snflb
-.section snfma
-.section snfmb
-.section snfna
-.section snfnb
-.section snfoa
-.section snfob
-.section snfpa
-.section snfpb
-.section snfqa
-.section snfqb
-.section snfra
-.section snfrb
-.section snfsa
-.section snfsb
-.section snfta
-.section snftb
-.section snfua
-.section snfub
-.section snfva
-.section snfvb
-.section snfwa
-.section snfwb
-.section snfxa
-.section snfxb
-.section snfya
-.section snfyb
-.section snfza
-.section snfzb
-.section snf1a
-.section snf1b
-.section snf2a
-.section snf2b
-.section snf3a
-.section snf3b
-.section snf4a
-.section snf4b
-.section snf5a
-.section snf5b
-.section snf6a
-.section snf6b
-.section snf7a
-.section snf7b
-.section snf8a
-.section snf8b
-.section snf9a
-.section snf9b
-.section snf0a
-.section snf0b
-.section sngaa
-.section sngab
-.section sngba
-.section sngbb
-.section sngca
-.section sngcb
-.section sngda
-.section sngdb
-.section sngea
-.section sngeb
-.section sngfa
-.section sngfb
-.section sngga
-.section snggb
-.section sngha
-.section snghb
-.section sngia
-.section sngib
-.section sngja
-.section sngjb
-.section sngka
-.section sngkb
-.section sngla
-.section snglb
-.section sngma
-.section sngmb
-.section sngna
-.section sngnb
-.section sngoa
-.section sngob
-.section sngpa
-.section sngpb
-.section sngqa
-.section sngqb
-.section sngra
-.section sngrb
-.section sngsa
-.section sngsb
-.section sngta
-.section sngtb
-.section sngua
-.section sngub
-.section sngva
-.section sngvb
-.section sngwa
-.section sngwb
-.section sngxa
-.section sngxb
-.section sngya
-.section sngyb
-.section sngza
-.section sngzb
-.section sng1a
-.section sng1b
-.section sng2a
-.section sng2b
-.section sng3a
-.section sng3b
-.section sng4a
-.section sng4b
-.section sng5a
-.section sng5b
-.section sng6a
-.section sng6b
-.section sng7a
-.section sng7b
-.section sng8a
-.section sng8b
-.section sng9a
-.section sng9b
-.section sng0a
-.section sng0b
-.section snhaa
-.section snhab
-.section snhba
-.section snhbb
-.section snhca
-.section snhcb
-.section snhda
-.section snhdb
-.section snhea
-.section snheb
-.section snhfa
-.section snhfb
-.section snhga
-.section snhgb
-.section snhha
-.section snhhb
-.section snhia
-.section snhib
-.section snhja
-.section snhjb
-.section snhka
-.section snhkb
-.section snhla
-.section snhlb
-.section snhma
-.section snhmb
-.section snhna
-.section snhnb
-.section snhoa
-.section snhob
-.section snhpa
-.section snhpb
-.section snhqa
-.section snhqb
-.section snhra
-.section snhrb
-.section snhsa
-.section snhsb
-.section snhta
-.section snhtb
-.section snhua
-.section snhub
-.section snhva
-.section snhvb
-.section snhwa
-.section snhwb
-.section snhxa
-.section snhxb
-.section snhya
-.section snhyb
-.section snhza
-.section snhzb
-.section snh1a
-.section snh1b
-.section snh2a
-.section snh2b
-.section snh3a
-.section snh3b
-.section snh4a
-.section snh4b
-.section snh5a
-.section snh5b
-.section snh6a
-.section snh6b
-.section snh7a
-.section snh7b
-.section snh8a
-.section snh8b
-.section snh9a
-.section snh9b
-.section snh0a
-.section snh0b
-.section sniaa
-.section sniab
-.section sniba
-.section snibb
-.section snica
-.section snicb
-.section snida
-.section snidb
-.section sniea
-.section snieb
-.section snifa
-.section snifb
-.section sniga
-.section snigb
-.section sniha
-.section snihb
-.section sniia
-.section sniib
-.section snija
-.section snijb
-.section snika
-.section snikb
-.section snila
-.section snilb
-.section snima
-.section snimb
-.section snina
-.section sninb
-.section snioa
-.section sniob
-.section snipa
-.section snipb
-.section sniqa
-.section sniqb
-.section snira
-.section snirb
-.section snisa
-.section snisb
-.section snita
-.section snitb
-.section sniua
-.section sniub
-.section sniva
-.section snivb
-.section sniwa
-.section sniwb
-.section snixa
-.section snixb
-.section sniya
-.section sniyb
-.section sniza
-.section snizb
-.section sni1a
-.section sni1b
-.section sni2a
-.section sni2b
-.section sni3a
-.section sni3b
-.section sni4a
-.section sni4b
-.section sni5a
-.section sni5b
-.section sni6a
-.section sni6b
-.section sni7a
-.section sni7b
-.section sni8a
-.section sni8b
-.section sni9a
-.section sni9b
-.section sni0a
-.section sni0b
-.section snjaa
-.section snjab
-.section snjba
-.section snjbb
-.section snjca
-.section snjcb
-.section snjda
-.section snjdb
-.section snjea
-.section snjeb
-.section snjfa
-.section snjfb
-.section snjga
-.section snjgb
-.section snjha
-.section snjhb
-.section snjia
-.section snjib
-.section snjja
-.section snjjb
-.section snjka
-.section snjkb
-.section snjla
-.section snjlb
-.section snjma
-.section snjmb
-.section snjna
-.section snjnb
-.section snjoa
-.section snjob
-.section snjpa
-.section snjpb
-.section snjqa
-.section snjqb
-.section snjra
-.section snjrb
-.section snjsa
-.section snjsb
-.section snjta
-.section snjtb
-.section snjua
-.section snjub
-.section snjva
-.section snjvb
-.section snjwa
-.section snjwb
-.section snjxa
-.section snjxb
-.section snjya
-.section snjyb
-.section snjza
-.section snjzb
-.section snj1a
-.section snj1b
-.section snj2a
-.section snj2b
-.section snj3a
-.section snj3b
-.section snj4a
-.section snj4b
-.section snj5a
-.section snj5b
-.section snj6a
-.section snj6b
-.section snj7a
-.section snj7b
-.section snj8a
-.section snj8b
-.section snj9a
-.section snj9b
-.section snj0a
-.section snj0b
-.section snkaa
-.section snkab
-.section snkba
-.section snkbb
-.section snkca
-.section snkcb
-.section snkda
-.section snkdb
-.section snkea
-.section snkeb
-.section snkfa
-.section snkfb
-.section snkga
-.section snkgb
-.section snkha
-.section snkhb
-.section snkia
-.section snkib
-.section snkja
-.section snkjb
-.section snkka
-.section snkkb
-.section snkla
-.section snklb
-.section snkma
-.section snkmb
-.section snkna
-.section snknb
-.section snkoa
-.section snkob
-.section snkpa
-.section snkpb
-.section snkqa
-.section snkqb
-.section snkra
-.section snkrb
-.section snksa
-.section snksb
-.section snkta
-.section snktb
-.section snkua
-.section snkub
-.section snkva
-.section snkvb
-.section snkwa
-.section snkwb
-.section snkxa
-.section snkxb
-.section snkya
-.section snkyb
-.section snkza
-.section snkzb
-.section snk1a
-.section snk1b
-.section snk2a
-.section snk2b
-.section snk3a
-.section snk3b
-.section snk4a
-.section snk4b
-.section snk5a
-.section snk5b
-.section snk6a
-.section snk6b
-.section snk7a
-.section snk7b
-.section snk8a
-.section snk8b
-.section snk9a
-.section snk9b
-.section snk0a
-.section snk0b
-.section snlaa
-.section snlab
-.section snlba
-.section snlbb
-.section snlca
-.section snlcb
-.section snlda
-.section snldb
-.section snlea
-.section snleb
-.section snlfa
-.section snlfb
-.section snlga
-.section snlgb
-.section snlha
-.section snlhb
-.section snlia
-.section snlib
-.section snlja
-.section snljb
-.section snlka
-.section snlkb
-.section snlla
-.section snllb
-.section snlma
-.section snlmb
-.section snlna
-.section snlnb
-.section snloa
-.section snlob
-.section snlpa
-.section snlpb
-.section snlqa
-.section snlqb
-.section snlra
-.section snlrb
-.section snlsa
-.section snlsb
-.section snlta
-.section snltb
-.section snlua
-.section snlub
-.section snlva
-.section snlvb
-.section snlwa
-.section snlwb
-.section snlxa
-.section snlxb
-.section snlya
-.section snlyb
-.section snlza
-.section snlzb
-.section snl1a
-.section snl1b
-.section snl2a
-.section snl2b
-.section snl3a
-.section snl3b
-.section snl4a
-.section snl4b
-.section snl5a
-.section snl5b
-.section snl6a
-.section snl6b
-.section snl7a
-.section snl7b
-.section snl8a
-.section snl8b
-.section snl9a
-.section snl9b
-.section snl0a
-.section snl0b
-.section snmaa
-.section snmab
-.section snmba
-.section snmbb
-.section snmca
-.section snmcb
-.section snmda
-.section snmdb
-.section snmea
-.section snmeb
-.section snmfa
-.section snmfb
-.section snmga
-.section snmgb
-.section snmha
-.section snmhb
-.section snmia
-.section snmib
-.section snmja
-.section snmjb
-.section snmka
-.section snmkb
-.section snmla
-.section snmlb
-.section snmma
-.section snmmb
-.section snmna
-.section snmnb
-.section snmoa
-.section snmob
-.section snmpa
-.section snmpb
-.section snmqa
-.section snmqb
-.section snmra
-.section snmrb
-.section snmsa
-.section snmsb
-.section snmta
-.section snmtb
-.section snmua
-.section snmub
-.section snmva
-.section snmvb
-.section snmwa
-.section snmwb
-.section snmxa
-.section snmxb
-.section snmya
-.section snmyb
-.section snmza
-.section snmzb
-.section snm1a
-.section snm1b
-.section snm2a
-.section snm2b
-.section snm3a
-.section snm3b
-.section snm4a
-.section snm4b
-.section snm5a
-.section snm5b
-.section snm6a
-.section snm6b
-.section snm7a
-.section snm7b
-.section snm8a
-.section snm8b
-.section snm9a
-.section snm9b
-.section snm0a
-.section snm0b
-.section snnaa
-.section snnab
-.section snnba
-.section snnbb
-.section snnca
-.section snncb
-.section snnda
-.section snndb
-.section snnea
-.section snneb
-.section snnfa
-.section snnfb
-.section snnga
-.section snngb
-.section snnha
-.section snnhb
-.section snnia
-.section snnib
-.section snnja
-.section snnjb
-.section snnka
-.section snnkb
-.section snnla
-.section snnlb
-.section snnma
-.section snnmb
-.section snnna
-.section snnnb
-.section snnoa
-.section snnob
-.section snnpa
-.section snnpb
-.section snnqa
-.section snnqb
-.section snnra
-.section snnrb
-.section snnsa
-.section snnsb
-.section snnta
-.section snntb
-.section snnua
-.section snnub
-.section snnva
-.section snnvb
-.section snnwa
-.section snnwb
-.section snnxa
-.section snnxb
-.section snnya
-.section snnyb
-.section snnza
-.section snnzb
-.section snn1a
-.section snn1b
-.section snn2a
-.section snn2b
-.section snn3a
-.section snn3b
-.section snn4a
-.section snn4b
-.section snn5a
-.section snn5b
-.section snn6a
-.section snn6b
-.section snn7a
-.section snn7b
-.section snn8a
-.section snn8b
-.section snn9a
-.section snn9b
-.section snn0a
-.section snn0b
-.section snoaa
-.section snoab
-.section snoba
-.section snobb
-.section snoca
-.section snocb
-.section snoda
-.section snodb
-.section snoea
-.section snoeb
-.section snofa
-.section snofb
-.section snoga
-.section snogb
-.section snoha
-.section snohb
-.section snoia
-.section snoib
-.section snoja
-.section snojb
-.section snoka
-.section snokb
-.section snola
-.section snolb
-.section snoma
-.section snomb
-.section snona
-.section snonb
-.section snooa
-.section snoob
-.section snopa
-.section snopb
-.section snoqa
-.section snoqb
-.section snora
-.section snorb
-.section snosa
-.section snosb
-.section snota
-.section snotb
-.section snoua
-.section snoub
-.section snova
-.section snovb
-.section snowa
-.section snowb
-.section snoxa
-.section snoxb
-.section snoya
-.section snoyb
-.section snoza
-.section snozb
-.section sno1a
-.section sno1b
-.section sno2a
-.section sno2b
-.section sno3a
-.section sno3b
-.section sno4a
-.section sno4b
-.section sno5a
-.section sno5b
-.section sno6a
-.section sno6b
-.section sno7a
-.section sno7b
-.section sno8a
-.section sno8b
-.section sno9a
-.section sno9b
-.section sno0a
-.section sno0b
-.section snpaa
-.section snpab
-.section snpba
-.section snpbb
-.section snpca
-.section snpcb
-.section snpda
-.section snpdb
-.section snpea
-.section snpeb
-.section snpfa
-.section snpfb
-.section snpga
-.section snpgb
-.section snpha
-.section snphb
-.section snpia
-.section snpib
-.section snpja
-.section snpjb
-.section snpka
-.section snpkb
-.section snpla
-.section snplb
-.section snpma
-.section snpmb
-.section snpna
-.section snpnb
-.section snpoa
-.section snpob
-.section snppa
-.section snppb
-.section snpqa
-.section snpqb
-.section snpra
-.section snprb
-.section snpsa
-.section snpsb
-.section snpta
-.section snptb
-.section snpua
-.section snpub
-.section snpva
-.section snpvb
-.section snpwa
-.section snpwb
-.section snpxa
-.section snpxb
-.section snpya
-.section snpyb
-.section snpza
-.section snpzb
-.section snp1a
-.section snp1b
-.section snp2a
-.section snp2b
-.section snp3a
-.section snp3b
-.section snp4a
-.section snp4b
-.section snp5a
-.section snp5b
-.section snp6a
-.section snp6b
-.section snp7a
-.section snp7b
-.section snp8a
-.section snp8b
-.section snp9a
-.section snp9b
-.section snp0a
-.section snp0b
-.section snqaa
-.section snqab
-.section snqba
-.section snqbb
-.section snqca
-.section snqcb
-.section snqda
-.section snqdb
-.section snqea
-.section snqeb
-.section snqfa
-.section snqfb
-.section snqga
-.section snqgb
-.section snqha
-.section snqhb
-.section snqia
-.section snqib
-.section snqja
-.section snqjb
-.section snqka
-.section snqkb
-.section snqla
-.section snqlb
-.section snqma
-.section snqmb
-.section snqna
-.section snqnb
-.section snqoa
-.section snqob
-.section snqpa
-.section snqpb
-.section snqqa
-.section snqqb
-.section snqra
-.section snqrb
-.section snqsa
-.section snqsb
-.section snqta
-.section snqtb
-.section snqua
-.section snqub
-.section snqva
-.section snqvb
-.section snqwa
-.section snqwb
-.section snqxa
-.section snqxb
-.section snqya
-.section snqyb
-.section snqza
-.section snqzb
-.section snq1a
-.section snq1b
-.section snq2a
-.section snq2b
-.section snq3a
-.section snq3b
-.section snq4a
-.section snq4b
-.section snq5a
-.section snq5b
-.section snq6a
-.section snq6b
-.section snq7a
-.section snq7b
-.section snq8a
-.section snq8b
-.section snq9a
-.section snq9b
-.section snq0a
-.section snq0b
-.section snraa
-.section snrab
-.section snrba
-.section snrbb
-.section snrca
-.section snrcb
-.section snrda
-.section snrdb
-.section snrea
-.section snreb
-.section snrfa
-.section snrfb
-.section snrga
-.section snrgb
-.section snrha
-.section snrhb
-.section snria
-.section snrib
-.section snrja
-.section snrjb
-.section snrka
-.section snrkb
-.section snrla
-.section snrlb
-.section snrma
-.section snrmb
-.section snrna
-.section snrnb
-.section snroa
-.section snrob
-.section snrpa
-.section snrpb
-.section snrqa
-.section snrqb
-.section snrra
-.section snrrb
-.section snrsa
-.section snrsb
-.section snrta
-.section snrtb
-.section snrua
-.section snrub
-.section snrva
-.section snrvb
-.section snrwa
-.section snrwb
-.section snrxa
-.section snrxb
-.section snrya
-.section snryb
-.section snrza
-.section snrzb
-.section snr1a
-.section snr1b
-.section snr2a
-.section snr2b
-.section snr3a
-.section snr3b
-.section snr4a
-.section snr4b
-.section snr5a
-.section snr5b
-.section snr6a
-.section snr6b
-.section snr7a
-.section snr7b
-.section snr8a
-.section snr8b
-.section snr9a
-.section snr9b
-.section snr0a
-.section snr0b
-.section snsaa
-.section snsab
-.section snsba
-.section snsbb
-.section snsca
-.section snscb
-.section snsda
-.section snsdb
-.section snsea
-.section snseb
-.section snsfa
-.section snsfb
-.section snsga
-.section snsgb
-.section snsha
-.section snshb
-.section snsia
-.section snsib
-.section snsja
-.section snsjb
-.section snska
-.section snskb
-.section snsla
-.section snslb
-.section snsma
-.section snsmb
-.section snsna
-.section snsnb
-.section snsoa
-.section snsob
-.section snspa
-.section snspb
-.section snsqa
-.section snsqb
-.section snsra
-.section snsrb
-.section snssa
-.section snssb
-.section snsta
-.section snstb
-.section snsua
-.section snsub
-.section snsva
-.section snsvb
-.section snswa
-.section snswb
-.section snsxa
-.section snsxb
-.section snsya
-.section snsyb
-.section snsza
-.section snszb
-.section sns1a
-.section sns1b
-.section sns2a
-.section sns2b
-.section sns3a
-.section sns3b
-.section sns4a
-.section sns4b
-.section sns5a
-.section sns5b
-.section sns6a
-.section sns6b
-.section sns7a
-.section sns7b
-.section sns8a
-.section sns8b
-.section sns9a
-.section sns9b
-.section sns0a
-.section sns0b
-.section sntaa
-.section sntab
-.section sntba
-.section sntbb
-.section sntca
-.section sntcb
-.section sntda
-.section sntdb
-.section sntea
-.section snteb
-.section sntfa
-.section sntfb
-.section sntga
-.section sntgb
-.section sntha
-.section snthb
-.section sntia
-.section sntib
-.section sntja
-.section sntjb
-.section sntka
-.section sntkb
-.section sntla
-.section sntlb
-.section sntma
-.section sntmb
-.section sntna
-.section sntnb
-.section sntoa
-.section sntob
-.section sntpa
-.section sntpb
-.section sntqa
-.section sntqb
-.section sntra
-.section sntrb
-.section sntsa
-.section sntsb
-.section sntta
-.section snttb
-.section sntua
-.section sntub
-.section sntva
-.section sntvb
-.section sntwa
-.section sntwb
-.section sntxa
-.section sntxb
-.section sntya
-.section sntyb
-.section sntza
-.section sntzb
-.section snt1a
-.section snt1b
-.section snt2a
-.section snt2b
-.section snt3a
-.section snt3b
-.section snt4a
-.section snt4b
-.section snt5a
-.section snt5b
-.section snt6a
-.section snt6b
-.section snt7a
-.section snt7b
-.section snt8a
-.section snt8b
-.section snt9a
-.section snt9b
-.section snt0a
-.section snt0b
-.section snuaa
-.section snuab
-.section snuba
-.section snubb
-.section snuca
-.section snucb
-.section snuda
-.section snudb
-.section snuea
-.section snueb
-.section snufa
-.section snufb
-.section snuga
-.section snugb
-.section snuha
-.section snuhb
-.section snuia
-.section snuib
-.section snuja
-.section snujb
-.section snuka
-.section snukb
-.section snula
-.section snulb
-.section snuma
-.section snumb
-.section snuna
-.section snunb
-.section snuoa
-.section snuob
-.section snupa
-.section snupb
-.section snuqa
-.section snuqb
-.section snura
-.section snurb
-.section snusa
-.section snusb
-.section snuta
-.section snutb
-.section snuua
-.section snuub
-.section snuva
-.section snuvb
-.section snuwa
-.section snuwb
-.section snuxa
-.section snuxb
-.section snuya
-.section snuyb
-.section snuza
-.section snuzb
-.section snu1a
-.section snu1b
-.section snu2a
-.section snu2b
-.section snu3a
-.section snu3b
-.section snu4a
-.section snu4b
-.section snu5a
-.section snu5b
-.section snu6a
-.section snu6b
-.section snu7a
-.section snu7b
-.section snu8a
-.section snu8b
-.section snu9a
-.section snu9b
-.section snu0a
-.section snu0b
-.section snvaa
-.section snvab
-.section snvba
-.section snvbb
-.section snvca
-.section snvcb
-.section snvda
-.section snvdb
-.section snvea
-.section snveb
-.section snvfa
-.section snvfb
-.section snvga
-.section snvgb
-.section snvha
-.section snvhb
-.section snvia
-.section snvib
-.section snvja
-.section snvjb
-.section snvka
-.section snvkb
-.section snvla
-.section snvlb
-.section snvma
-.section snvmb
-.section snvna
-.section snvnb
-.section snvoa
-.section snvob
-.section snvpa
-.section snvpb
-.section snvqa
-.section snvqb
-.section snvra
-.section snvrb
-.section snvsa
-.section snvsb
-.section snvta
-.section snvtb
-.section snvua
-.section snvub
-.section snvva
-.section snvvb
-.section snvwa
-.section snvwb
-.section snvxa
-.section snvxb
-.section snvya
-.section snvyb
-.section snvza
-.section snvzb
-.section snv1a
-.section snv1b
-.section snv2a
-.section snv2b
-.section snv3a
-.section snv3b
-.section snv4a
-.section snv4b
-.section snv5a
-.section snv5b
-.section snv6a
-.section snv6b
-.section snv7a
-.section snv7b
-.section snv8a
-.section snv8b
-.section snv9a
-.section snv9b
-.section snv0a
-.section snv0b
-.section snwaa
-.section snwab
-.section snwba
-.section snwbb
-.section snwca
-.section snwcb
-.section snwda
-.section snwdb
-.section snwea
-.section snweb
-.section snwfa
-.section snwfb
-.section snwga
-.section snwgb
-.section snwha
-.section snwhb
-.section snwia
-.section snwib
-.section snwja
-.section snwjb
-.section snwka
-.section snwkb
-.section snwla
-.section snwlb
-.section snwma
-.section snwmb
-.section snwna
-.section snwnb
-.section snwoa
-.section snwob
-.section snwpa
-.section snwpb
-.section snwqa
-.section snwqb
-.section snwra
-.section snwrb
-.section snwsa
-.section snwsb
-.section snwta
-.section snwtb
-.section snwua
-.section snwub
-.section snwva
-.section snwvb
-.section snwwa
-.section snwwb
-.section snwxa
-.section snwxb
-.section snwya
-.section snwyb
-.section snwza
-.section snwzb
-.section snw1a
-.section snw1b
-.section snw2a
-.section snw2b
-.section snw3a
-.section snw3b
-.section snw4a
-.section snw4b
-.section snw5a
-.section snw5b
-.section snw6a
-.section snw6b
-.section snw7a
-.section snw7b
-.section snw8a
-.section snw8b
-.section snw9a
-.section snw9b
-.section snw0a
-.section snw0b
-.section snxaa
-.section snxab
-.section snxba
-.section snxbb
-.section snxca
-.section snxcb
-.section snxda
-.section snxdb
-.section snxea
-.section snxeb
-.section snxfa
-.section snxfb
-.section snxga
-.section snxgb
-.section snxha
-.section snxhb
-.section snxia
-.section snxib
-.section snxja
-.section snxjb
-.section snxka
-.section snxkb
-.section snxla
-.section snxlb
-.section snxma
-.section snxmb
-.section snxna
-.section snxnb
-.section snxoa
-.section snxob
-.section snxpa
-.section snxpb
-.section snxqa
-.section snxqb
-.section snxra
-.section snxrb
-.section snxsa
-.section snxsb
-.section snxta
-.section snxtb
-.section snxua
-.section snxub
-.section snxva
-.section snxvb
-.section snxwa
-.section snxwb
-.section snxxa
-.section snxxb
-.section snxya
-.section snxyb
-.section snxza
-.section snxzb
-.section snx1a
-.section snx1b
-.section snx2a
-.section snx2b
-.section snx3a
-.section snx3b
-.section snx4a
-.section snx4b
-.section snx5a
-.section snx5b
-.section snx6a
-.section snx6b
-.section snx7a
-.section snx7b
-.section snx8a
-.section snx8b
-.section snx9a
-.section snx9b
-.section snx0a
-.section snx0b
-.section snyaa
-.section snyab
-.section snyba
-.section snybb
-.section snyca
-.section snycb
-.section snyda
-.section snydb
-.section snyea
-.section snyeb
-.section snyfa
-.section snyfb
-.section snyga
-.section snygb
-.section snyha
-.section snyhb
-.section snyia
-.section snyib
-.section snyja
-.section snyjb
-.section snyka
-.section snykb
-.section snyla
-.section snylb
-.section snyma
-.section snymb
-.section snyna
-.section snynb
-.section snyoa
-.section snyob
-.section snypa
-.section snypb
-.section snyqa
-.section snyqb
-.section snyra
-.section snyrb
-.section snysa
-.section snysb
-.section snyta
-.section snytb
-.section snyua
-.section snyub
-.section snyva
-.section snyvb
-.section snywa
-.section snywb
-.section snyxa
-.section snyxb
-.section snyya
-.section snyyb
-.section snyza
-.section snyzb
-.section sny1a
-.section sny1b
-.section sny2a
-.section sny2b
-.section sny3a
-.section sny3b
-.section sny4a
-.section sny4b
-.section sny5a
-.section sny5b
-.section sny6a
-.section sny6b
-.section sny7a
-.section sny7b
-.section sny8a
-.section sny8b
-.section sny9a
-.section sny9b
-.section sny0a
-.section sny0b
-.section snzaa
-.section snzab
-.section snzba
-.section snzbb
-.section snzca
-.section snzcb
-.section snzda
-.section snzdb
-.section snzea
-.section snzeb
-.section snzfa
-.section snzfb
-.section snzga
-.section snzgb
-.section snzha
-.section snzhb
-.section snzia
-.section snzib
-.section snzja
-.section snzjb
-.section snzka
-.section snzkb
-.section snzla
-.section snzlb
-.section snzma
-.section snzmb
-.section snzna
-.section snznb
-.section snzoa
-.section snzob
-.section snzpa
-.section snzpb
-.section snzqa
-.section snzqb
-.section snzra
-.section snzrb
-.section snzsa
-.section snzsb
-.section snzta
-.section snztb
-.section snzua
-.section snzub
-.section snzva
-.section snzvb
-.section snzwa
-.section snzwb
-.section snzxa
-.section snzxb
-.section snzya
-.section snzyb
-.section snzza
-.section snzzb
-.section snz1a
-.section snz1b
-.section snz2a
-.section snz2b
-.section snz3a
-.section snz3b
-.section snz4a
-.section snz4b
-.section snz5a
-.section snz5b
-.section snz6a
-.section snz6b
-.section snz7a
-.section snz7b
-.section snz8a
-.section snz8b
-.section snz9a
-.section snz9b
-.section snz0a
-.section snz0b
-.section sn1aa
-.section sn1ab
-.section sn1ba
-.section sn1bb
-.section sn1ca
-.section sn1cb
-.section sn1da
-.section sn1db
-.section sn1ea
-.section sn1eb
-.section sn1fa
-.section sn1fb
-.section sn1ga
-.section sn1gb
-.section sn1ha
-.section sn1hb
-.section sn1ia
-.section sn1ib
-.section sn1ja
-.section sn1jb
-.section sn1ka
-.section sn1kb
-.section sn1la
-.section sn1lb
-.section sn1ma
-.section sn1mb
-.section sn1na
-.section sn1nb
-.section sn1oa
-.section sn1ob
-.section sn1pa
-.section sn1pb
-.section sn1qa
-.section sn1qb
-.section sn1ra
-.section sn1rb
-.section sn1sa
-.section sn1sb
-.section sn1ta
-.section sn1tb
-.section sn1ua
-.section sn1ub
-.section sn1va
-.section sn1vb
-.section sn1wa
-.section sn1wb
-.section sn1xa
-.section sn1xb
-.section sn1ya
-.section sn1yb
-.section sn1za
-.section sn1zb
-.section sn11a
-.section sn11b
-.section sn12a
-.section sn12b
-.section sn13a
-.section sn13b
-.section sn14a
-.section sn14b
-.section sn15a
-.section sn15b
-.section sn16a
-.section sn16b
-.section sn17a
-.section sn17b
-.section sn18a
-.section sn18b
-.section sn19a
-.section sn19b
-.section sn10a
-.section sn10b
-.section sn2aa
-.section sn2ab
-.section sn2ba
-.section sn2bb
-.section sn2ca
-.section sn2cb
-.section sn2da
-.section sn2db
-.section sn2ea
-.section sn2eb
-.section sn2fa
-.section sn2fb
-.section sn2ga
-.section sn2gb
-.section sn2ha
-.section sn2hb
-.section sn2ia
-.section sn2ib
-.section sn2ja
-.section sn2jb
-.section sn2ka
-.section sn2kb
-.section sn2la
-.section sn2lb
-.section sn2ma
-.section sn2mb
-.section sn2na
-.section sn2nb
-.section sn2oa
-.section sn2ob
-.section sn2pa
-.section sn2pb
-.section sn2qa
-.section sn2qb
-.section sn2ra
-.section sn2rb
-.section sn2sa
-.section sn2sb
-.section sn2ta
-.section sn2tb
-.section sn2ua
-.section sn2ub
-.section sn2va
-.section sn2vb
-.section sn2wa
-.section sn2wb
-.section sn2xa
-.section sn2xb
-.section sn2ya
-.section sn2yb
-.section sn2za
-.section sn2zb
-.section sn21a
-.section sn21b
-.section sn22a
-.section sn22b
-.section sn23a
-.section sn23b
-.section sn24a
-.section sn24b
-.section sn25a
-.section sn25b
-.section sn26a
-.section sn26b
-.section sn27a
-.section sn27b
-.section sn28a
-.section sn28b
-.section sn29a
-.section sn29b
-.section sn20a
-.section sn20b
-.section sn3aa
-.section sn3ab
-.section sn3ba
-.section sn3bb
-.section sn3ca
-.section sn3cb
-.section sn3da
-.section sn3db
-.section sn3ea
-.section sn3eb
-.section sn3fa
-.section sn3fb
-.section sn3ga
-.section sn3gb
-.section sn3ha
-.section sn3hb
-.section sn3ia
-.section sn3ib
-.section sn3ja
-.section sn3jb
-.section sn3ka
-.section sn3kb
-.section sn3la
-.section sn3lb
-.section sn3ma
-.section sn3mb
-.section sn3na
-.section sn3nb
-.section sn3oa
-.section sn3ob
-.section sn3pa
-.section sn3pb
-.section sn3qa
-.section sn3qb
-.section sn3ra
-.section sn3rb
-.section sn3sa
-.section sn3sb
-.section sn3ta
-.section sn3tb
-.section sn3ua
-.section sn3ub
-.section sn3va
-.section sn3vb
-.section sn3wa
-.section sn3wb
-.section sn3xa
-.section sn3xb
-.section sn3ya
-.section sn3yb
-.section sn3za
-.section sn3zb
-.section sn31a
-.section sn31b
-.section sn32a
-.section sn32b
-.section sn33a
-.section sn33b
-.section sn34a
-.section sn34b
-.section sn35a
-.section sn35b
-.section sn36a
-.section sn36b
-.section sn37a
-.section sn37b
-.section sn38a
-.section sn38b
-.section sn39a
-.section sn39b
-.section sn30a
-.section sn30b
-.section sn4aa
-.section sn4ab
-.section sn4ba
-.section sn4bb
-.section sn4ca
-.section sn4cb
-.section sn4da
-.section sn4db
-.section sn4ea
-.section sn4eb
-.section sn4fa
-.section sn4fb
-.section sn4ga
-.section sn4gb
-.section sn4ha
-.section sn4hb
-.section sn4ia
-.section sn4ib
-.section sn4ja
-.section sn4jb
-.section sn4ka
-.section sn4kb
-.section sn4la
-.section sn4lb
-.section sn4ma
-.section sn4mb
-.section sn4na
-.section sn4nb
-.section sn4oa
-.section sn4ob
-.section sn4pa
-.section sn4pb
-.section sn4qa
-.section sn4qb
-.section sn4ra
-.section sn4rb
-.section sn4sa
-.section sn4sb
-.section sn4ta
-.section sn4tb
-.section sn4ua
-.section sn4ub
-.section sn4va
-.section sn4vb
-.section sn4wa
-.section sn4wb
-.section sn4xa
-.section sn4xb
-.section sn4ya
-.section sn4yb
-.section sn4za
-.section sn4zb
-.section sn41a
-.section sn41b
-.section sn42a
-.section sn42b
-.section sn43a
-.section sn43b
-.section sn44a
-.section sn44b
-.section sn45a
-.section sn45b
-.section sn46a
-.section sn46b
-.section sn47a
-.section sn47b
-.section sn48a
-.section sn48b
-.section sn49a
-.section sn49b
-.section sn40a
-.section sn40b
-.section sn5aa
-.section sn5ab
-.section sn5ba
-.section sn5bb
-.section sn5ca
-.section sn5cb
-.section sn5da
-.section sn5db
-.section sn5ea
-.section sn5eb
-.section sn5fa
-.section sn5fb
-.section sn5ga
-.section sn5gb
-.section sn5ha
-.section sn5hb
-.section sn5ia
-.section sn5ib
-.section sn5ja
-.section sn5jb
-.section sn5ka
-.section sn5kb
-.section sn5la
-.section sn5lb
-.section sn5ma
-.section sn5mb
-.section sn5na
-.section sn5nb
-.section sn5oa
-.section sn5ob
-.section sn5pa
-.section sn5pb
-.section sn5qa
-.section sn5qb
-.section sn5ra
-.section sn5rb
-.section sn5sa
-.section sn5sb
-.section sn5ta
-.section sn5tb
-.section sn5ua
-.section sn5ub
-.section sn5va
-.section sn5vb
-.section sn5wa
-.section sn5wb
-.section sn5xa
-.section sn5xb
-.section sn5ya
-.section sn5yb
-.section sn5za
-.section sn5zb
-.section sn51a
-.section sn51b
-.section sn52a
-.section sn52b
-.section sn53a
-.section sn53b
-.section sn54a
-.section sn54b
-.section sn55a
-.section sn55b
-.section sn56a
-.section sn56b
-.section sn57a
-.section sn57b
-.section sn58a
-.section sn58b
-.section sn59a
-.section sn59b
-.section sn50a
-.section sn50b
-.section sn6aa
-.section sn6ab
-.section sn6ba
-.section sn6bb
-.section sn6ca
-.section sn6cb
-.section sn6da
-.section sn6db
-.section sn6ea
-.section sn6eb
-.section sn6fa
-.section sn6fb
-.section sn6ga
-.section sn6gb
-.section sn6ha
-.section sn6hb
-.section sn6ia
-.section sn6ib
-.section sn6ja
-.section sn6jb
-.section sn6ka
-.section sn6kb
-.section sn6la
-.section sn6lb
-.section sn6ma
-.section sn6mb
-.section sn6na
-.section sn6nb
-.section sn6oa
-.section sn6ob
-.section sn6pa
-.section sn6pb
-.section sn6qa
-.section sn6qb
-.section sn6ra
-.section sn6rb
-.section sn6sa
-.section sn6sb
-.section sn6ta
-.section sn6tb
-.section sn6ua
-.section sn6ub
-.section sn6va
-.section sn6vb
-.section sn6wa
-.section sn6wb
-.section sn6xa
-.section sn6xb
-.section sn6ya
-.section sn6yb
-.section sn6za
-.section sn6zb
-.section sn61a
-.section sn61b
-.section sn62a
-.section sn62b
-.section sn63a
-.section sn63b
-.section sn64a
-.section sn64b
-.section sn65a
-.section sn65b
-.section sn66a
-.section sn66b
-.section sn67a
-.section sn67b
-.section sn68a
-.section sn68b
-.section sn69a
-.section sn69b
-.section sn60a
-.section sn60b
-.section sn7aa
-.section sn7ab
-.section sn7ba
-.section sn7bb
-.section sn7ca
-.section sn7cb
-.section sn7da
-.section sn7db
-.section sn7ea
-.section sn7eb
-.section sn7fa
-.section sn7fb
-.section sn7ga
-.section sn7gb
-.section sn7ha
-.section sn7hb
-.section sn7ia
-.section sn7ib
-.section sn7ja
-.section sn7jb
-.section sn7ka
-.section sn7kb
-.section sn7la
-.section sn7lb
-.section sn7ma
-.section sn7mb
-.section sn7na
-.section sn7nb
-.section sn7oa
-.section sn7ob
-.section sn7pa
-.section sn7pb
-.section sn7qa
-.section sn7qb
-.section sn7ra
-.section sn7rb
-.section sn7sa
-.section sn7sb
-.section sn7ta
-.section sn7tb
-.section sn7ua
-.section sn7ub
-.section sn7va
-.section sn7vb
-.section sn7wa
-.section sn7wb
-.section sn7xa
-.section sn7xb
-.section sn7ya
-.section sn7yb
-.section sn7za
-.section sn7zb
-.section sn71a
-.section sn71b
-.section sn72a
-.section sn72b
-.section sn73a
-.section sn73b
-.section sn74a
-.section sn74b
-.section sn75a
-.section sn75b
-.section sn76a
-.section sn76b
-.section sn77a
-.section sn77b
-.section sn78a
-.section sn78b
-.section sn79a
-.section sn79b
-.section sn70a
-.section sn70b
-.section sn8aa
-.section sn8ab
-.section sn8ba
-.section sn8bb
-.section sn8ca
-.section sn8cb
-.section sn8da
-.section sn8db
-.section sn8ea
-.section sn8eb
-.section sn8fa
-.section sn8fb
-.section sn8ga
-.section sn8gb
-.section sn8ha
-.section sn8hb
-.section sn8ia
-.section sn8ib
-.section sn8ja
-.section sn8jb
-.section sn8ka
-.section sn8kb
-.section sn8la
-.section sn8lb
-.section sn8ma
-.section sn8mb
-.section sn8na
-.section sn8nb
-.section sn8oa
-.section sn8ob
-.section sn8pa
-.section sn8pb
-.section sn8qa
-.section sn8qb
-.section sn8ra
-.section sn8rb
-.section sn8sa
-.section sn8sb
-.section sn8ta
-.section sn8tb
-.section sn8ua
-.section sn8ub
-.section sn8va
-.section sn8vb
-.section sn8wa
-.section sn8wb
-.section sn8xa
-.section sn8xb
-.section sn8ya
-.section sn8yb
-.section sn8za
-.section sn8zb
-.section sn81a
-.section sn81b
-.section sn82a
-.section sn82b
-.section sn83a
-.section sn83b
-.section sn84a
-.section sn84b
-.section sn85a
-.section sn85b
-.section sn86a
-.section sn86b
-.section sn87a
-.section sn87b
-.section sn88a
-.section sn88b
-.section sn89a
-.section sn89b
-.section sn80a
-.section sn80b
-.section sn9aa
-.section sn9ab
-.section sn9ba
-.section sn9bb
-.section sn9ca
-.section sn9cb
-.section sn9da
-.section sn9db
-.section sn9ea
-.section sn9eb
-.section sn9fa
-.section sn9fb
-.section sn9ga
-.section sn9gb
-.section sn9ha
-.section sn9hb
-.section sn9ia
-.section sn9ib
-.section sn9ja
-.section sn9jb
-.section sn9ka
-.section sn9kb
-.section sn9la
-.section sn9lb
-.section sn9ma
-.section sn9mb
-.section sn9na
-.section sn9nb
-.section sn9oa
-.section sn9ob
-.section sn9pa
-.section sn9pb
-.section sn9qa
-.section sn9qb
-.section sn9ra
-.section sn9rb
-.section sn9sa
-.section sn9sb
-.section sn9ta
-.section sn9tb
-.section sn9ua
-.section sn9ub
-.section sn9va
-.section sn9vb
-.section sn9wa
-.section sn9wb
-.section sn9xa
-.section sn9xb
-.section sn9ya
-.section sn9yb
-.section sn9za
-.section sn9zb
-.section sn91a
-.section sn91b
-.section sn92a
-.section sn92b
-.section sn93a
-.section sn93b
-.section sn94a
-.section sn94b
-.section sn95a
-.section sn95b
-.section sn96a
-.section sn96b
-.section sn97a
-.section sn97b
-.section sn98a
-.section sn98b
-.section sn99a
-.section sn99b
-.section sn90a
-.section sn90b
-.section sn0aa
-.section sn0ab
-.section sn0ba
-.section sn0bb
-.section sn0ca
-.section sn0cb
-.section sn0da
-.section sn0db
-.section sn0ea
-.section sn0eb
-.section sn0fa
-.section sn0fb
-.section sn0ga
-.section sn0gb
-.section sn0ha
-.section sn0hb
-.section sn0ia
-.section sn0ib
-.section sn0ja
-.section sn0jb
-.section sn0ka
-.section sn0kb
-.section sn0la
-.section sn0lb
-.section sn0ma
-.section sn0mb
-.section sn0na
-.section sn0nb
-.section sn0oa
-.section sn0ob
-.section sn0pa
-.section sn0pb
-.section sn0qa
-.section sn0qb
-.section sn0ra
-.section sn0rb
-.section sn0sa
-.section sn0sb
-.section sn0ta
-.section sn0tb
-.section sn0ua
-.section sn0ub
-.section sn0va
-.section sn0vb
-.section sn0wa
-.section sn0wb
-.section sn0xa
-.section sn0xb
-.section sn0ya
-.section sn0yb
-.section sn0za
-.section sn0zb
-.section sn01a
-.section sn01b
-.section sn02a
-.section sn02b
-.section sn03a
-.section sn03b
-.section sn04a
-.section sn04b
-.section sn05a
-.section sn05b
-.section sn06a
-.section sn06b
-.section sn07a
-.section sn07b
-.section sn08a
-.section sn08b
-.section sn09a
-.section sn09b
-.section sn00a
-.section sn00b
-.section soaaa
-.section soaab
-.section soaba
-.section soabb
-.section soaca
-.section soacb
-.section soada
-.section soadb
-.section soaea
-.section soaeb
-.section soafa
-.section soafb
-.section soaga
-.section soagb
-.section soaha
-.section soahb
-.section soaia
-.section soaib
-.section soaja
-.section soajb
-.section soaka
-.section soakb
-.section soala
-.section soalb
-.section soama
-.section soamb
-.section soana
-.section soanb
-.section soaoa
-.section soaob
-.section soapa
-.section soapb
-.section soaqa
-.section soaqb
-.section soara
-.section soarb
-.section soasa
-.section soasb
-.section soata
-.section soatb
-.section soaua
-.section soaub
-.section soava
-.section soavb
-.section soawa
-.section soawb
-.section soaxa
-.section soaxb
-.section soaya
-.section soayb
-.section soaza
-.section soazb
-.section soa1a
-.section soa1b
-.section soa2a
-.section soa2b
-.section soa3a
-.section soa3b
-.section soa4a
-.section soa4b
-.section soa5a
-.section soa5b
-.section soa6a
-.section soa6b
-.section soa7a
-.section soa7b
-.section soa8a
-.section soa8b
-.section soa9a
-.section soa9b
-.section soa0a
-.section soa0b
-.section sobaa
-.section sobab
-.section sobba
-.section sobbb
-.section sobca
-.section sobcb
-.section sobda
-.section sobdb
-.section sobea
-.section sobeb
-.section sobfa
-.section sobfb
-.section sobga
-.section sobgb
-.section sobha
-.section sobhb
-.section sobia
-.section sobib
-.section sobja
-.section sobjb
-.section sobka
-.section sobkb
-.section sobla
-.section soblb
-.section sobma
-.section sobmb
-.section sobna
-.section sobnb
-.section soboa
-.section sobob
-.section sobpa
-.section sobpb
-.section sobqa
-.section sobqb
-.section sobra
-.section sobrb
-.section sobsa
-.section sobsb
-.section sobta
-.section sobtb
-.section sobua
-.section sobub
-.section sobva
-.section sobvb
-.section sobwa
-.section sobwb
-.section sobxa
-.section sobxb
-.section sobya
-.section sobyb
-.section sobza
-.section sobzb
-.section sob1a
-.section sob1b
-.section sob2a
-.section sob2b
-.section sob3a
-.section sob3b
-.section sob4a
-.section sob4b
-.section sob5a
-.section sob5b
-.section sob6a
-.section sob6b
-.section sob7a
-.section sob7b
-.section sob8a
-.section sob8b
-.section sob9a
-.section sob9b
-.section sob0a
-.section sob0b
-.section socaa
-.section socab
-.section socba
-.section socbb
-.section socca
-.section soccb
-.section socda
-.section socdb
-.section socea
-.section soceb
-.section socfa
-.section socfb
-.section socga
-.section socgb
-.section socha
-.section sochb
-.section socia
-.section socib
-.section socja
-.section socjb
-.section socka
-.section sockb
-.section socla
-.section soclb
-.section socma
-.section socmb
-.section socna
-.section socnb
-.section socoa
-.section socob
-.section socpa
-.section socpb
-.section socqa
-.section socqb
-.section socra
-.section socrb
-.section socsa
-.section socsb
-.section socta
-.section soctb
-.section socua
-.section socub
-.section socva
-.section socvb
-.section socwa
-.section socwb
-.section socxa
-.section socxb
-.section socya
-.section socyb
-.section socza
-.section soczb
-.section soc1a
-.section soc1b
-.section soc2a
-.section soc2b
-.section soc3a
-.section soc3b
-.section soc4a
-.section soc4b
-.section soc5a
-.section soc5b
-.section soc6a
-.section soc6b
-.section soc7a
-.section soc7b
-.section soc8a
-.section soc8b
-.section soc9a
-.section soc9b
-.section soc0a
-.section soc0b
-.section sodaa
-.section sodab
-.section sodba
-.section sodbb
-.section sodca
-.section sodcb
-.section sodda
-.section soddb
-.section sodea
-.section sodeb
-.section sodfa
-.section sodfb
-.section sodga
-.section sodgb
-.section sodha
-.section sodhb
-.section sodia
-.section sodib
-.section sodja
-.section sodjb
-.section sodka
-.section sodkb
-.section sodla
-.section sodlb
-.section sodma
-.section sodmb
-.section sodna
-.section sodnb
-.section sodoa
-.section sodob
-.section sodpa
-.section sodpb
-.section sodqa
-.section sodqb
-.section sodra
-.section sodrb
-.section sodsa
-.section sodsb
-.section sodta
-.section sodtb
-.section sodua
-.section sodub
-.section sodva
-.section sodvb
-.section sodwa
-.section sodwb
-.section sodxa
-.section sodxb
-.section sodya
-.section sodyb
-.section sodza
-.section sodzb
-.section sod1a
-.section sod1b
-.section sod2a
-.section sod2b
-.section sod3a
-.section sod3b
-.section sod4a
-.section sod4b
-.section sod5a
-.section sod5b
-.section sod6a
-.section sod6b
-.section sod7a
-.section sod7b
-.section sod8a
-.section sod8b
-.section sod9a
-.section sod9b
-.section sod0a
-.section sod0b
-.section soeaa
-.section soeab
-.section soeba
-.section soebb
-.section soeca
-.section soecb
-.section soeda
-.section soedb
-.section soeea
-.section soeeb
-.section soefa
-.section soefb
-.section soega
-.section soegb
-.section soeha
-.section soehb
-.section soeia
-.section soeib
-.section soeja
-.section soejb
-.section soeka
-.section soekb
-.section soela
-.section soelb
-.section soema
-.section soemb
-.section soena
-.section soenb
-.section soeoa
-.section soeob
-.section soepa
-.section soepb
-.section soeqa
-.section soeqb
-.section soera
-.section soerb
-.section soesa
-.section soesb
-.section soeta
-.section soetb
-.section soeua
-.section soeub
-.section soeva
-.section soevb
-.section soewa
-.section soewb
-.section soexa
-.section soexb
-.section soeya
-.section soeyb
-.section soeza
-.section soezb
-.section soe1a
-.section soe1b
-.section soe2a
-.section soe2b
-.section soe3a
-.section soe3b
-.section soe4a
-.section soe4b
-.section soe5a
-.section soe5b
-.section soe6a
-.section soe6b
-.section soe7a
-.section soe7b
-.section soe8a
-.section soe8b
-.section soe9a
-.section soe9b
-.section soe0a
-.section soe0b
-.section sofaa
-.section sofab
-.section sofba
-.section sofbb
-.section sofca
-.section sofcb
-.section sofda
-.section sofdb
-.section sofea
-.section sofeb
-.section soffa
-.section soffb
-.section sofga
-.section sofgb
-.section sofha
-.section sofhb
-.section sofia
-.section sofib
-.section sofja
-.section sofjb
-.section sofka
-.section sofkb
-.section sofla
-.section soflb
-.section sofma
-.section sofmb
-.section sofna
-.section sofnb
-.section sofoa
-.section sofob
-.section sofpa
-.section sofpb
-.section sofqa
-.section sofqb
-.section sofra
-.section sofrb
-.section sofsa
-.section sofsb
-.section softa
-.section softb
-.section sofua
-.section sofub
-.section sofva
-.section sofvb
-.section sofwa
-.section sofwb
-.section sofxa
-.section sofxb
-.section sofya
-.section sofyb
-.section sofza
-.section sofzb
-.section sof1a
-.section sof1b
-.section sof2a
-.section sof2b
-.section sof3a
-.section sof3b
-.section sof4a
-.section sof4b
-.section sof5a
-.section sof5b
-.section sof6a
-.section sof6b
-.section sof7a
-.section sof7b
-.section sof8a
-.section sof8b
-.section sof9a
-.section sof9b
-.section sof0a
-.section sof0b
-.section sogaa
-.section sogab
-.section sogba
-.section sogbb
-.section sogca
-.section sogcb
-.section sogda
-.section sogdb
-.section sogea
-.section sogeb
-.section sogfa
-.section sogfb
-.section sogga
-.section soggb
-.section sogha
-.section soghb
-.section sogia
-.section sogib
-.section sogja
-.section sogjb
-.section sogka
-.section sogkb
-.section sogla
-.section soglb
-.section sogma
-.section sogmb
-.section sogna
-.section sognb
-.section sogoa
-.section sogob
-.section sogpa
-.section sogpb
-.section sogqa
-.section sogqb
-.section sogra
-.section sogrb
-.section sogsa
-.section sogsb
-.section sogta
-.section sogtb
-.section sogua
-.section sogub
-.section sogva
-.section sogvb
-.section sogwa
-.section sogwb
-.section sogxa
-.section sogxb
-.section sogya
-.section sogyb
-.section sogza
-.section sogzb
-.section sog1a
-.section sog1b
-.section sog2a
-.section sog2b
-.section sog3a
-.section sog3b
-.section sog4a
-.section sog4b
-.section sog5a
-.section sog5b
-.section sog6a
-.section sog6b
-.section sog7a
-.section sog7b
-.section sog8a
-.section sog8b
-.section sog9a
-.section sog9b
-.section sog0a
-.section sog0b
-.section sohaa
-.section sohab
-.section sohba
-.section sohbb
-.section sohca
-.section sohcb
-.section sohda
-.section sohdb
-.section sohea
-.section soheb
-.section sohfa
-.section sohfb
-.section sohga
-.section sohgb
-.section sohha
-.section sohhb
-.section sohia
-.section sohib
-.section sohja
-.section sohjb
-.section sohka
-.section sohkb
-.section sohla
-.section sohlb
-.section sohma
-.section sohmb
-.section sohna
-.section sohnb
-.section sohoa
-.section sohob
-.section sohpa
-.section sohpb
-.section sohqa
-.section sohqb
-.section sohra
-.section sohrb
-.section sohsa
-.section sohsb
-.section sohta
-.section sohtb
-.section sohua
-.section sohub
-.section sohva
-.section sohvb
-.section sohwa
-.section sohwb
-.section sohxa
-.section sohxb
-.section sohya
-.section sohyb
-.section sohza
-.section sohzb
-.section soh1a
-.section soh1b
-.section soh2a
-.section soh2b
-.section soh3a
-.section soh3b
-.section soh4a
-.section soh4b
-.section soh5a
-.section soh5b
-.section soh6a
-.section soh6b
-.section soh7a
-.section soh7b
-.section soh8a
-.section soh8b
-.section soh9a
-.section soh9b
-.section soh0a
-.section soh0b
-.section soiaa
-.section soiab
-.section soiba
-.section soibb
-.section soica
-.section soicb
-.section soida
-.section soidb
-.section soiea
-.section soieb
-.section soifa
-.section soifb
-.section soiga
-.section soigb
-.section soiha
-.section soihb
-.section soiia
-.section soiib
-.section soija
-.section soijb
-.section soika
-.section soikb
-.section soila
-.section soilb
-.section soima
-.section soimb
-.section soina
-.section soinb
-.section soioa
-.section soiob
-.section soipa
-.section soipb
-.section soiqa
-.section soiqb
-.section soira
-.section soirb
-.section soisa
-.section soisb
-.section soita
-.section soitb
-.section soiua
-.section soiub
-.section soiva
-.section soivb
-.section soiwa
-.section soiwb
-.section soixa
-.section soixb
-.section soiya
-.section soiyb
-.section soiza
-.section soizb
-.section soi1a
-.section soi1b
-.section soi2a
-.section soi2b
-.section soi3a
-.section soi3b
-.section soi4a
-.section soi4b
-.section soi5a
-.section soi5b
-.section soi6a
-.section soi6b
-.section soi7a
-.section soi7b
-.section soi8a
-.section soi8b
-.section soi9a
-.section soi9b
-.section soi0a
-.section soi0b
-.section sojaa
-.section sojab
-.section sojba
-.section sojbb
-.section sojca
-.section sojcb
-.section sojda
-.section sojdb
-.section sojea
-.section sojeb
-.section sojfa
-.section sojfb
-.section sojga
-.section sojgb
-.section sojha
-.section sojhb
-.section sojia
-.section sojib
-.section sojja
-.section sojjb
-.section sojka
-.section sojkb
-.section sojla
-.section sojlb
-.section sojma
-.section sojmb
-.section sojna
-.section sojnb
-.section sojoa
-.section sojob
-.section sojpa
-.section sojpb
-.section sojqa
-.section sojqb
-.section sojra
-.section sojrb
-.section sojsa
-.section sojsb
-.section sojta
-.section sojtb
-.section sojua
-.section sojub
-.section sojva
-.section sojvb
-.section sojwa
-.section sojwb
-.section sojxa
-.section sojxb
-.section sojya
-.section sojyb
-.section sojza
-.section sojzb
-.section soj1a
-.section soj1b
-.section soj2a
-.section soj2b
-.section soj3a
-.section soj3b
-.section soj4a
-.section soj4b
-.section soj5a
-.section soj5b
-.section soj6a
-.section soj6b
-.section soj7a
-.section soj7b
-.section soj8a
-.section soj8b
-.section soj9a
-.section soj9b
-.section soj0a
-.section soj0b
-.section sokaa
-.section sokab
-.section sokba
-.section sokbb
-.section sokca
-.section sokcb
-.section sokda
-.section sokdb
-.section sokea
-.section sokeb
-.section sokfa
-.section sokfb
-.section sokga
-.section sokgb
-.section sokha
-.section sokhb
-.section sokia
-.section sokib
-.section sokja
-.section sokjb
-.section sokka
-.section sokkb
-.section sokla
-.section soklb
-.section sokma
-.section sokmb
-.section sokna
-.section soknb
-.section sokoa
-.section sokob
-.section sokpa
-.section sokpb
-.section sokqa
-.section sokqb
-.section sokra
-.section sokrb
-.section soksa
-.section soksb
-.section sokta
-.section soktb
-.section sokua
-.section sokub
-.section sokva
-.section sokvb
-.section sokwa
-.section sokwb
-.section sokxa
-.section sokxb
-.section sokya
-.section sokyb
-.section sokza
-.section sokzb
-.section sok1a
-.section sok1b
-.section sok2a
-.section sok2b
-.section sok3a
-.section sok3b
-.section sok4a
-.section sok4b
-.section sok5a
-.section sok5b
-.section sok6a
-.section sok6b
-.section sok7a
-.section sok7b
-.section sok8a
-.section sok8b
-.section sok9a
-.section sok9b
-.section sok0a
-.section sok0b
-.section solaa
-.section solab
-.section solba
-.section solbb
-.section solca
-.section solcb
-.section solda
-.section soldb
-.section solea
-.section soleb
-.section solfa
-.section solfb
-.section solga
-.section solgb
-.section solha
-.section solhb
-.section solia
-.section solib
-.section solja
-.section soljb
-.section solka
-.section solkb
-.section solla
-.section sollb
-.section solma
-.section solmb
-.section solna
-.section solnb
-.section soloa
-.section solob
-.section solpa
-.section solpb
-.section solqa
-.section solqb
-.section solra
-.section solrb
-.section solsa
-.section solsb
-.section solta
-.section soltb
-.section solua
-.section solub
-.section solva
-.section solvb
-.section solwa
-.section solwb
-.section solxa
-.section solxb
-.section solya
-.section solyb
-.section solza
-.section solzb
-.section sol1a
-.section sol1b
-.section sol2a
-.section sol2b
-.section sol3a
-.section sol3b
-.section sol4a
-.section sol4b
-.section sol5a
-.section sol5b
-.section sol6a
-.section sol6b
-.section sol7a
-.section sol7b
-.section sol8a
-.section sol8b
-.section sol9a
-.section sol9b
-.section sol0a
-.section sol0b
-.section somaa
-.section somab
-.section somba
-.section sombb
-.section somca
-.section somcb
-.section somda
-.section somdb
-.section somea
-.section someb
-.section somfa
-.section somfb
-.section somga
-.section somgb
-.section somha
-.section somhb
-.section somia
-.section somib
-.section somja
-.section somjb
-.section somka
-.section somkb
-.section somla
-.section somlb
-.section somma
-.section sommb
-.section somna
-.section somnb
-.section somoa
-.section somob
-.section sompa
-.section sompb
-.section somqa
-.section somqb
-.section somra
-.section somrb
-.section somsa
-.section somsb
-.section somta
-.section somtb
-.section somua
-.section somub
-.section somva
-.section somvb
-.section somwa
-.section somwb
-.section somxa
-.section somxb
-.section somya
-.section somyb
-.section somza
-.section somzb
-.section som1a
-.section som1b
-.section som2a
-.section som2b
-.section som3a
-.section som3b
-.section som4a
-.section som4b
-.section som5a
-.section som5b
-.section som6a
-.section som6b
-.section som7a
-.section som7b
-.section som8a
-.section som8b
-.section som9a
-.section som9b
-.section som0a
-.section som0b
-.section sonaa
-.section sonab
-.section sonba
-.section sonbb
-.section sonca
-.section soncb
-.section sonda
-.section sondb
-.section sonea
-.section soneb
-.section sonfa
-.section sonfb
-.section songa
-.section songb
-.section sonha
-.section sonhb
-.section sonia
-.section sonib
-.section sonja
-.section sonjb
-.section sonka
-.section sonkb
-.section sonla
-.section sonlb
-.section sonma
-.section sonmb
-.section sonna
-.section sonnb
-.section sonoa
-.section sonob
-.section sonpa
-.section sonpb
-.section sonqa
-.section sonqb
-.section sonra
-.section sonrb
-.section sonsa
-.section sonsb
-.section sonta
-.section sontb
-.section sonua
-.section sonub
-.section sonva
-.section sonvb
-.section sonwa
-.section sonwb
-.section sonxa
-.section sonxb
-.section sonya
-.section sonyb
-.section sonza
-.section sonzb
-.section son1a
-.section son1b
-.section son2a
-.section son2b
-.section son3a
-.section son3b
-.section son4a
-.section son4b
-.section son5a
-.section son5b
-.section son6a
-.section son6b
-.section son7a
-.section son7b
-.section son8a
-.section son8b
-.section son9a
-.section son9b
-.section son0a
-.section son0b
-.section sooaa
-.section sooab
-.section sooba
-.section soobb
-.section sooca
-.section soocb
-.section sooda
-.section soodb
-.section sooea
-.section sooeb
-.section soofa
-.section soofb
-.section sooga
-.section soogb
-.section sooha
-.section soohb
-.section sooia
-.section sooib
-.section sooja
-.section soojb
-.section sooka
-.section sookb
-.section soola
-.section soolb
-.section sooma
-.section soomb
-.section soona
-.section soonb
-.section soooa
-.section sooob
-.section soopa
-.section soopb
-.section sooqa
-.section sooqb
-.section soora
-.section soorb
-.section soosa
-.section soosb
-.section soota
-.section sootb
-.section sooua
-.section sooub
-.section soova
-.section soovb
-.section soowa
-.section soowb
-.section sooxa
-.section sooxb
-.section sooya
-.section sooyb
-.section sooza
-.section soozb
-.section soo1a
-.section soo1b
-.section soo2a
-.section soo2b
-.section soo3a
-.section soo3b
-.section soo4a
-.section soo4b
-.section soo5a
-.section soo5b
-.section soo6a
-.section soo6b
-.section soo7a
-.section soo7b
-.section soo8a
-.section soo8b
-.section soo9a
-.section soo9b
-.section soo0a
-.section soo0b
-.section sopaa
-.section sopab
-.section sopba
-.section sopbb
-.section sopca
-.section sopcb
-.section sopda
-.section sopdb
-.section sopea
-.section sopeb
-.section sopfa
-.section sopfb
-.section sopga
-.section sopgb
-.section sopha
-.section sophb
-.section sopia
-.section sopib
-.section sopja
-.section sopjb
-.section sopka
-.section sopkb
-.section sopla
-.section soplb
-.section sopma
-.section sopmb
-.section sopna
-.section sopnb
-.section sopoa
-.section sopob
-.section soppa
-.section soppb
-.section sopqa
-.section sopqb
-.section sopra
-.section soprb
-.section sopsa
-.section sopsb
-.section sopta
-.section soptb
-.section sopua
-.section sopub
-.section sopva
-.section sopvb
-.section sopwa
-.section sopwb
-.section sopxa
-.section sopxb
-.section sopya
-.section sopyb
-.section sopza
-.section sopzb
-.section sop1a
-.section sop1b
-.section sop2a
-.section sop2b
-.section sop3a
-.section sop3b
-.section sop4a
-.section sop4b
-.section sop5a
-.section sop5b
-.section sop6a
-.section sop6b
-.section sop7a
-.section sop7b
-.section sop8a
-.section sop8b
-.section sop9a
-.section sop9b
-.section sop0a
-.section sop0b
-.section soqaa
-.section soqab
-.section soqba
-.section soqbb
-.section soqca
-.section soqcb
-.section soqda
-.section soqdb
-.section soqea
-.section soqeb
-.section soqfa
-.section soqfb
-.section soqga
-.section soqgb
-.section soqha
-.section soqhb
-.section soqia
-.section soqib
-.section soqja
-.section soqjb
-.section soqka
-.section soqkb
-.section soqla
-.section soqlb
-.section soqma
-.section soqmb
-.section soqna
-.section soqnb
-.section soqoa
-.section soqob
-.section soqpa
-.section soqpb
-.section soqqa
-.section soqqb
-.section soqra
-.section soqrb
-.section soqsa
-.section soqsb
-.section soqta
-.section soqtb
-.section soqua
-.section soqub
-.section soqva
-.section soqvb
-.section soqwa
-.section soqwb
-.section soqxa
-.section soqxb
-.section soqya
-.section soqyb
-.section soqza
-.section soqzb
-.section soq1a
-.section soq1b
-.section soq2a
-.section soq2b
-.section soq3a
-.section soq3b
-.section soq4a
-.section soq4b
-.section soq5a
-.section soq5b
-.section soq6a
-.section soq6b
-.section soq7a
-.section soq7b
-.section soq8a
-.section soq8b
-.section soq9a
-.section soq9b
-.section soq0a
-.section soq0b
-.section soraa
-.section sorab
-.section sorba
-.section sorbb
-.section sorca
-.section sorcb
-.section sorda
-.section sordb
-.section sorea
-.section soreb
-.section sorfa
-.section sorfb
-.section sorga
-.section sorgb
-.section sorha
-.section sorhb
-.section soria
-.section sorib
-.section sorja
-.section sorjb
-.section sorka
-.section sorkb
-.section sorla
-.section sorlb
-.section sorma
-.section sormb
-.section sorna
-.section sornb
-.section soroa
-.section sorob
-.section sorpa
-.section sorpb
-.section sorqa
-.section sorqb
-.section sorra
-.section sorrb
-.section sorsa
-.section sorsb
-.section sorta
-.section sortb
-.section sorua
-.section sorub
-.section sorva
-.section sorvb
-.section sorwa
-.section sorwb
-.section sorxa
-.section sorxb
-.section sorya
-.section soryb
-.section sorza
-.section sorzb
-.section sor1a
-.section sor1b
-.section sor2a
-.section sor2b
-.section sor3a
-.section sor3b
-.section sor4a
-.section sor4b
-.section sor5a
-.section sor5b
-.section sor6a
-.section sor6b
-.section sor7a
-.section sor7b
-.section sor8a
-.section sor8b
-.section sor9a
-.section sor9b
-.section sor0a
-.section sor0b
-.section sosaa
-.section sosab
-.section sosba
-.section sosbb
-.section sosca
-.section soscb
-.section sosda
-.section sosdb
-.section sosea
-.section soseb
-.section sosfa
-.section sosfb
-.section sosga
-.section sosgb
-.section sosha
-.section soshb
-.section sosia
-.section sosib
-.section sosja
-.section sosjb
-.section soska
-.section soskb
-.section sosla
-.section soslb
-.section sosma
-.section sosmb
-.section sosna
-.section sosnb
-.section sosoa
-.section sosob
-.section sospa
-.section sospb
-.section sosqa
-.section sosqb
-.section sosra
-.section sosrb
-.section sossa
-.section sossb
-.section sosta
-.section sostb
-.section sosua
-.section sosub
-.section sosva
-.section sosvb
-.section soswa
-.section soswb
-.section sosxa
-.section sosxb
-.section sosya
-.section sosyb
-.section sosza
-.section soszb
-.section sos1a
-.section sos1b
-.section sos2a
-.section sos2b
-.section sos3a
-.section sos3b
-.section sos4a
-.section sos4b
-.section sos5a
-.section sos5b
-.section sos6a
-.section sos6b
-.section sos7a
-.section sos7b
-.section sos8a
-.section sos8b
-.section sos9a
-.section sos9b
-.section sos0a
-.section sos0b
-.section sotaa
-.section sotab
-.section sotba
-.section sotbb
-.section sotca
-.section sotcb
-.section sotda
-.section sotdb
-.section sotea
-.section soteb
-.section sotfa
-.section sotfb
-.section sotga
-.section sotgb
-.section sotha
-.section sothb
-.section sotia
-.section sotib
-.section sotja
-.section sotjb
-.section sotka
-.section sotkb
-.section sotla
-.section sotlb
-.section sotma
-.section sotmb
-.section sotna
-.section sotnb
-.section sotoa
-.section sotob
-.section sotpa
-.section sotpb
-.section sotqa
-.section sotqb
-.section sotra
-.section sotrb
-.section sotsa
-.section sotsb
-.section sotta
-.section sottb
-.section sotua
-.section sotub
-.section sotva
-.section sotvb
-.section sotwa
-.section sotwb
-.section sotxa
-.section sotxb
-.section sotya
-.section sotyb
-.section sotza
-.section sotzb
-.section sot1a
-.section sot1b
-.section sot2a
-.section sot2b
-.section sot3a
-.section sot3b
-.section sot4a
-.section sot4b
-.section sot5a
-.section sot5b
-.section sot6a
-.section sot6b
-.section sot7a
-.section sot7b
-.section sot8a
-.section sot8b
-.section sot9a
-.section sot9b
-.section sot0a
-.section sot0b
-.section souaa
-.section souab
-.section souba
-.section soubb
-.section souca
-.section soucb
-.section souda
-.section soudb
-.section souea
-.section soueb
-.section soufa
-.section soufb
-.section souga
-.section sougb
-.section souha
-.section souhb
-.section souia
-.section souib
-.section souja
-.section soujb
-.section souka
-.section soukb
-.section soula
-.section soulb
-.section souma
-.section soumb
-.section souna
-.section sounb
-.section souoa
-.section souob
-.section soupa
-.section soupb
-.section souqa
-.section souqb
-.section soura
-.section sourb
-.section sousa
-.section sousb
-.section souta
-.section soutb
-.section souua
-.section souub
-.section souva
-.section souvb
-.section souwa
-.section souwb
-.section souxa
-.section souxb
-.section souya
-.section souyb
-.section souza
-.section souzb
-.section sou1a
-.section sou1b
-.section sou2a
-.section sou2b
-.section sou3a
-.section sou3b
-.section sou4a
-.section sou4b
-.section sou5a
-.section sou5b
-.section sou6a
-.section sou6b
-.section sou7a
-.section sou7b
-.section sou8a
-.section sou8b
-.section sou9a
-.section sou9b
-.section sou0a
-.section sou0b
-.section sovaa
-.section sovab
-.section sovba
-.section sovbb
-.section sovca
-.section sovcb
-.section sovda
-.section sovdb
-.section sovea
-.section soveb
-.section sovfa
-.section sovfb
-.section sovga
-.section sovgb
-.section sovha
-.section sovhb
-.section sovia
-.section sovib
-.section sovja
-.section sovjb
-.section sovka
-.section sovkb
-.section sovla
-.section sovlb
-.section sovma
-.section sovmb
-.section sovna
-.section sovnb
-.section sovoa
-.section sovob
-.section sovpa
-.section sovpb
-.section sovqa
-.section sovqb
-.section sovra
-.section sovrb
-.section sovsa
-.section sovsb
-.section sovta
-.section sovtb
-.section sovua
-.section sovub
-.section sovva
-.section sovvb
-.section sovwa
-.section sovwb
-.section sovxa
-.section sovxb
-.section sovya
-.section sovyb
-.section sovza
-.section sovzb
-.section sov1a
-.section sov1b
-.section sov2a
-.section sov2b
-.section sov3a
-.section sov3b
-.section sov4a
-.section sov4b
-.section sov5a
-.section sov5b
-.section sov6a
-.section sov6b
-.section sov7a
-.section sov7b
-.section sov8a
-.section sov8b
-.section sov9a
-.section sov9b
-.section sov0a
-.section sov0b
-.section sowaa
-.section sowab
-.section sowba
-.section sowbb
-.section sowca
-.section sowcb
-.section sowda
-.section sowdb
-.section sowea
-.section soweb
-.section sowfa
-.section sowfb
-.section sowga
-.section sowgb
-.section sowha
-.section sowhb
-.section sowia
-.section sowib
-.section sowja
-.section sowjb
-.section sowka
-.section sowkb
-.section sowla
-.section sowlb
-.section sowma
-.section sowmb
-.section sowna
-.section sownb
-.section sowoa
-.section sowob
-.section sowpa
-.section sowpb
-.section sowqa
-.section sowqb
-.section sowra
-.section sowrb
-.section sowsa
-.section sowsb
-.section sowta
-.section sowtb
-.section sowua
-.section sowub
-.section sowva
-.section sowvb
-.section sowwa
-.section sowwb
-.section sowxa
-.section sowxb
-.section sowya
-.section sowyb
-.section sowza
-.section sowzb
-.section sow1a
-.section sow1b
-.section sow2a
-.section sow2b
-.section sow3a
-.section sow3b
-.section sow4a
-.section sow4b
-.section sow5a
-.section sow5b
-.section sow6a
-.section sow6b
-.section sow7a
-.section sow7b
-.section sow8a
-.section sow8b
-.section sow9a
-.section sow9b
-.section sow0a
-.section sow0b
-.section soxaa
-.section soxab
-.section soxba
-.section soxbb
-.section soxca
-.section soxcb
-.section soxda
-.section soxdb
-.section soxea
-.section soxeb
-.section soxfa
-.section soxfb
-.section soxga
-.section soxgb
-.section soxha
-.section soxhb
-.section soxia
-.section soxib
-.section soxja
-.section soxjb
-.section soxka
-.section soxkb
-.section soxla
-.section soxlb
-.section soxma
-.section soxmb
-.section soxna
-.section soxnb
-.section soxoa
-.section soxob
-.section soxpa
-.section soxpb
-.section soxqa
-.section soxqb
-.section soxra
-.section soxrb
-.section soxsa
-.section soxsb
-.section soxta
-.section soxtb
-.section soxua
-.section soxub
-.section soxva
-.section soxvb
-.section soxwa
-.section soxwb
-.section soxxa
-.section soxxb
-.section soxya
-.section soxyb
-.section soxza
-.section soxzb
-.section sox1a
-.section sox1b
-.section sox2a
-.section sox2b
-.section sox3a
-.section sox3b
-.section sox4a
-.section sox4b
-.section sox5a
-.section sox5b
-.section sox6a
-.section sox6b
-.section sox7a
-.section sox7b
-.section sox8a
-.section sox8b
-.section sox9a
-.section sox9b
-.section sox0a
-.section sox0b
-.section soyaa
-.section soyab
-.section soyba
-.section soybb
-.section soyca
-.section soycb
-.section soyda
-.section soydb
-.section soyea
-.section soyeb
-.section soyfa
-.section soyfb
-.section soyga
-.section soygb
-.section soyha
-.section soyhb
-.section soyia
-.section soyib
-.section soyja
-.section soyjb
-.section soyka
-.section soykb
-.section soyla
-.section soylb
-.section soyma
-.section soymb
-.section soyna
-.section soynb
-.section soyoa
-.section soyob
-.section soypa
-.section soypb
-.section soyqa
-.section soyqb
-.section soyra
-.section soyrb
-.section soysa
-.section soysb
-.section soyta
-.section soytb
-.section soyua
-.section soyub
-.section soyva
-.section soyvb
-.section soywa
-.section soywb
-.section soyxa
-.section soyxb
-.section soyya
-.section soyyb
-.section soyza
-.section soyzb
-.section soy1a
-.section soy1b
-.section soy2a
-.section soy2b
-.section soy3a
-.section soy3b
-.section soy4a
-.section soy4b
-.section soy5a
-.section soy5b
-.section soy6a
-.section soy6b
-.section soy7a
-.section soy7b
-.section soy8a
-.section soy8b
-.section soy9a
-.section soy9b
-.section soy0a
-.section soy0b
-.section sozaa
-.section sozab
-.section sozba
-.section sozbb
-.section sozca
-.section sozcb
-.section sozda
-.section sozdb
-.section sozea
-.section sozeb
-.section sozfa
-.section sozfb
-.section sozga
-.section sozgb
-.section sozha
-.section sozhb
-.section sozia
-.section sozib
-.section sozja
-.section sozjb
-.section sozka
-.section sozkb
-.section sozla
-.section sozlb
-.section sozma
-.section sozmb
-.section sozna
-.section soznb
-.section sozoa
-.section sozob
-.section sozpa
-.section sozpb
-.section sozqa
-.section sozqb
-.section sozra
-.section sozrb
-.section sozsa
-.section sozsb
-.section sozta
-.section soztb
-.section sozua
-.section sozub
-.section sozva
-.section sozvb
-.section sozwa
-.section sozwb
-.section sozxa
-.section sozxb
-.section sozya
-.section sozyb
-.section sozza
-.section sozzb
-.section soz1a
-.section soz1b
-.section soz2a
-.section soz2b
-.section soz3a
-.section soz3b
-.section soz4a
-.section soz4b
-.section soz5a
-.section soz5b
-.section soz6a
-.section soz6b
-.section soz7a
-.section soz7b
-.section soz8a
-.section soz8b
-.section soz9a
-.section soz9b
-.section soz0a
-.section soz0b
-.section so1aa
-.section so1ab
-.section so1ba
-.section so1bb
-.section so1ca
-.section so1cb
-.section so1da
-.section so1db
-.section so1ea
-.section so1eb
-.section so1fa
-.section so1fb
-.section so1ga
-.section so1gb
-.section so1ha
-.section so1hb
-.section so1ia
-.section so1ib
-.section so1ja
-.section so1jb
-.section so1ka
-.section so1kb
-.section so1la
-.section so1lb
-.section so1ma
-.section so1mb
-.section so1na
-.section so1nb
-.section so1oa
-.section so1ob
-.section so1pa
-.section so1pb
-.section so1qa
-.section so1qb
-.section so1ra
-.section so1rb
-.section so1sa
-.section so1sb
-.section so1ta
-.section so1tb
-.section so1ua
-.section so1ub
-.section so1va
-.section so1vb
-.section so1wa
-.section so1wb
-.section so1xa
-.section so1xb
-.section so1ya
-.section so1yb
-.section so1za
-.section so1zb
-.section so11a
-.section so11b
-.section so12a
-.section so12b
-.section so13a
-.section so13b
-.section so14a
-.section so14b
-.section so15a
-.section so15b
-.section so16a
-.section so16b
-.section so17a
-.section so17b
-.section so18a
-.section so18b
-.section so19a
-.section so19b
-.section so10a
-.section so10b
-.section so2aa
-.section so2ab
-.section so2ba
-.section so2bb
-.section so2ca
-.section so2cb
-.section so2da
-.section so2db
-.section so2ea
-.section so2eb
-.section so2fa
-.section so2fb
-.section so2ga
-.section so2gb
-.section so2ha
-.section so2hb
-.section so2ia
-.section so2ib
-.section so2ja
-.section so2jb
-.section so2ka
-.section so2kb
-.section so2la
-.section so2lb
-.section so2ma
-.section so2mb
-.section so2na
-.section so2nb
-.section so2oa
-.section so2ob
-.section so2pa
-.section so2pb
-.section so2qa
-.section so2qb
-.section so2ra
-.section so2rb
-.section so2sa
-.section so2sb
-.section so2ta
-.section so2tb
-.section so2ua
-.section so2ub
-.section so2va
-.section so2vb
-.section so2wa
-.section so2wb
-.section so2xa
-.section so2xb
-.section so2ya
-.section so2yb
-.section so2za
-.section so2zb
-.section so21a
-.section so21b
-.section so22a
-.section so22b
-.section so23a
-.section so23b
-.section so24a
-.section so24b
-.section so25a
-.section so25b
-.section so26a
-.section so26b
-.section so27a
-.section so27b
-.section so28a
-.section so28b
-.section so29a
-.section so29b
-.section so20a
-.section so20b
-.section so3aa
-.section so3ab
-.section so3ba
-.section so3bb
-.section so3ca
-.section so3cb
-.section so3da
-.section so3db
-.section so3ea
-.section so3eb
-.section so3fa
-.section so3fb
-.section so3ga
-.section so3gb
-.section so3ha
-.section so3hb
-.section so3ia
-.section so3ib
-.section so3ja
-.section so3jb
-.section so3ka
-.section so3kb
-.section so3la
-.section so3lb
-.section so3ma
-.section so3mb
-.section so3na
-.section so3nb
-.section so3oa
-.section so3ob
-.section so3pa
-.section so3pb
-.section so3qa
-.section so3qb
-.section so3ra
-.section so3rb
-.section so3sa
-.section so3sb
-.section so3ta
-.section so3tb
-.section so3ua
-.section so3ub
-.section so3va
-.section so3vb
-.section so3wa
-.section so3wb
-.section so3xa
-.section so3xb
-.section so3ya
-.section so3yb
-.section so3za
-.section so3zb
-.section so31a
-.section so31b
-.section so32a
-.section so32b
-.section so33a
-.section so33b
-.section so34a
-.section so34b
-.section so35a
-.section so35b
-.section so36a
-.section so36b
-.section so37a
-.section so37b
-.section so38a
-.section so38b
-.section so39a
-.section so39b
-.section so30a
-.section so30b
-.section so4aa
-.section so4ab
-.section so4ba
-.section so4bb
-.section so4ca
-.section so4cb
-.section so4da
-.section so4db
-.section so4ea
-.section so4eb
-.section so4fa
-.section so4fb
-.section so4ga
-.section so4gb
-.section so4ha
-.section so4hb
-.section so4ia
-.section so4ib
-.section so4ja
-.section so4jb
-.section so4ka
-.section so4kb
-.section so4la
-.section so4lb
-.section so4ma
-.section so4mb
-.section so4na
-.section so4nb
-.section so4oa
-.section so4ob
-.section so4pa
-.section so4pb
-.section so4qa
-.section so4qb
-.section so4ra
-.section so4rb
-.section so4sa
-.section so4sb
-.section so4ta
-.section so4tb
-.section so4ua
-.section so4ub
-.section so4va
-.section so4vb
-.section so4wa
-.section so4wb
-.section so4xa
-.section so4xb
-.section so4ya
-.section so4yb
-.section so4za
-.section so4zb
-.section so41a
-.section so41b
-.section so42a
-.section so42b
-.section so43a
-.section so43b
-.section so44a
-.section so44b
-.section so45a
-.section so45b
-.section so46a
-.section so46b
-.section so47a
-.section so47b
-.section so48a
-.section so48b
-.section so49a
-.section so49b
-.section so40a
-.section so40b
-.section so5aa
-.section so5ab
-.section so5ba
-.section so5bb
-.section so5ca
-.section so5cb
-.section so5da
-.section so5db
-.section so5ea
-.section so5eb
-.section so5fa
-.section so5fb
-.section so5ga
-.section so5gb
-.section so5ha
-.section so5hb
-.section so5ia
-.section so5ib
-.section so5ja
-.section so5jb
-.section so5ka
-.section so5kb
-.section so5la
-.section so5lb
-.section so5ma
-.section so5mb
-.section so5na
-.section so5nb
-.section so5oa
-.section so5ob
-.section so5pa
-.section so5pb
-.section so5qa
-.section so5qb
-.section so5ra
-.section so5rb
-.section so5sa
-.section so5sb
-.section so5ta
-.section so5tb
-.section so5ua
-.section so5ub
-.section so5va
-.section so5vb
-.section so5wa
-.section so5wb
-.section so5xa
-.section so5xb
-.section so5ya
-.section so5yb
-.section so5za
-.section so5zb
-.section so51a
-.section so51b
-.section so52a
-.section so52b
-.section so53a
-.section so53b
-.section so54a
-.section so54b
-.section so55a
-.section so55b
-.section so56a
-.section so56b
-.section so57a
-.section so57b
-.section so58a
-.section so58b
-.section so59a
-.section so59b
-.section so50a
-.section so50b
-.section so6aa
-.section so6ab
-.section so6ba
-.section so6bb
-.section so6ca
-.section so6cb
-.section so6da
-.section so6db
-.section so6ea
-.section so6eb
-.section so6fa
-.section so6fb
-.section so6ga
-.section so6gb
-.section so6ha
-.section so6hb
-.section so6ia
-.section so6ib
-.section so6ja
-.section so6jb
-.section so6ka
-.section so6kb
-.section so6la
-.section so6lb
-.section so6ma
-.section so6mb
-.section so6na
-.section so6nb
-.section so6oa
-.section so6ob
-.section so6pa
-.section so6pb
-.section so6qa
-.section so6qb
-.section so6ra
-.section so6rb
-.section so6sa
-.section so6sb
-.section so6ta
-.section so6tb
-.section so6ua
-.section so6ub
-.section so6va
-.section so6vb
-.section so6wa
-.section so6wb
-.section so6xa
-.section so6xb
-.section so6ya
-.section so6yb
-.section so6za
-.section so6zb
-.section so61a
-.section so61b
-.section so62a
-.section so62b
-.section so63a
-.section so63b
-.section so64a
-.section so64b
-.section so65a
-.section so65b
-.section so66a
-.section so66b
-.section so67a
-.section so67b
-.section so68a
-.section so68b
-.section so69a
-.section so69b
-.section so60a
-.section so60b
-.section so7aa
-.section so7ab
-.section so7ba
-.section so7bb
-.section so7ca
-.section so7cb
-.section so7da
-.section so7db
-.section so7ea
-.section so7eb
-.section so7fa
-.section so7fb
-.section so7ga
-.section so7gb
-.section so7ha
-.section so7hb
-.section so7ia
-.section so7ib
-.section so7ja
-.section so7jb
-.section so7ka
-.section so7kb
-.section so7la
-.section so7lb
-.section so7ma
-.section so7mb
-.section so7na
-.section so7nb
-.section so7oa
-.section so7ob
-.section so7pa
-.section so7pb
-.section so7qa
-.section so7qb
-.section so7ra
-.section so7rb
-.section so7sa
-.section so7sb
-.section so7ta
-.section so7tb
-.section so7ua
-.section so7ub
-.section so7va
-.section so7vb
-.section so7wa
-.section so7wb
-.section so7xa
-.section so7xb
-.section so7ya
-.section so7yb
-.section so7za
-.section so7zb
-.section so71a
-.section so71b
-.section so72a
-.section so72b
-.section so73a
-.section so73b
-.section so74a
-.section so74b
-.section so75a
-.section so75b
-.section so76a
-.section so76b
-.section so77a
-.section so77b
-.section so78a
-.section so78b
-.section so79a
-.section so79b
-.section so70a
-.section so70b
-.section so8aa
-.section so8ab
-.section so8ba
-.section so8bb
-.section so8ca
-.section so8cb
-.section so8da
-.section so8db
-.section so8ea
-.section so8eb
-.section so8fa
-.section so8fb
-.section so8ga
-.section so8gb
-.section so8ha
-.section so8hb
-.section so8ia
-.section so8ib
-.section so8ja
-.section so8jb
-.section so8ka
-.section so8kb
-.section so8la
-.section so8lb
-.section so8ma
-.section so8mb
-.section so8na
-.section so8nb
-.section so8oa
-.section so8ob
-.section so8pa
-.section so8pb
-.section so8qa
-.section so8qb
-.section so8ra
-.section so8rb
-.section so8sa
-.section so8sb
-.section so8ta
-.section so8tb
-.section so8ua
-.section so8ub
-.section so8va
-.section so8vb
-.section so8wa
-.section so8wb
-.section so8xa
-.section so8xb
-.section so8ya
-.section so8yb
-.section so8za
-.section so8zb
-.section so81a
-.section so81b
-.section so82a
-.section so82b
-.section so83a
-.section so83b
-.section so84a
-.section so84b
-.section so85a
-.section so85b
-.section so86a
-.section so86b
-.section so87a
-.section so87b
-.section so88a
-.section so88b
-.section so89a
-.section so89b
-.section so80a
-.section so80b
-.section so9aa
-.section so9ab
-.section so9ba
-.section so9bb
-.section so9ca
-.section so9cb
-.section so9da
-.section so9db
-.section so9ea
-.section so9eb
-.section so9fa
-.section so9fb
-.section so9ga
-.section so9gb
-.section so9ha
-.section so9hb
-.section so9ia
-.section so9ib
-.section so9ja
-.section so9jb
-.section so9ka
-.section so9kb
-.section so9la
-.section so9lb
-.section so9ma
-.section so9mb
-.section so9na
-.section so9nb
-.section so9oa
-.section so9ob
-.section so9pa
-.section so9pb
-.section so9qa
-.section so9qb
-.section so9ra
-.section so9rb
-.section so9sa
-.section so9sb
-.section so9ta
-.section so9tb
-.section so9ua
-.section so9ub
-.section so9va
-.section so9vb
-.section so9wa
-.section so9wb
-.section so9xa
-.section so9xb
-.section so9ya
-.section so9yb
-.section so9za
-.section so9zb
-.section so91a
-.section so91b
-.section so92a
-.section so92b
-.section so93a
-.section so93b
-.section so94a
-.section so94b
-.section so95a
-.section so95b
-.section so96a
-.section so96b
-.section so97a
-.section so97b
-.section so98a
-.section so98b
-.section so99a
-.section so99b
-.section so90a
-.section so90b
-.section so0aa
-.section so0ab
-.section so0ba
-.section so0bb
-.section so0ca
-.section so0cb
-.section so0da
-.section so0db
-.section so0ea
-.section so0eb
-.section so0fa
-.section so0fb
-.section so0ga
-.section so0gb
-.section so0ha
-.section so0hb
-.section so0ia
-.section so0ib
-.section so0ja
-.section so0jb
-.section so0ka
-.section so0kb
-.section so0la
-.section so0lb
-.section so0ma
-.section so0mb
-.section so0na
-.section so0nb
-.section so0oa
-.section so0ob
-.section so0pa
-.section so0pb
-.section so0qa
-.section so0qb
-.section so0ra
-.section so0rb
-.section so0sa
-.section so0sb
-.section so0ta
-.section so0tb
-.section so0ua
-.section so0ub
-.section so0va
-.section so0vb
-.section so0wa
-.section so0wb
-.section so0xa
-.section so0xb
-.section so0ya
-.section so0yb
-.section so0za
-.section so0zb
-.section so01a
-.section so01b
-.section so02a
-.section so02b
-.section so03a
-.section so03b
-.section so04a
-.section so04b
-.section so05a
-.section so05b
-.section so06a
-.section so06b
-.section so07a
-.section so07b
-.section so08a
-.section so08b
-.section so09a
-.section so09b
-.section so00a
-.section so00b
-.section spaaa
-.section spaab
-.section spaba
-.section spabb
-.section spaca
-.section spacb
-.section spada
-.section spadb
-.section spaea
-.section spaeb
-.section spafa
-.section spafb
-.section spaga
-.section spagb
-.section spaha
-.section spahb
-.section spaia
-.section spaib
-.section spaja
-.section spajb
-.section spaka
-.section spakb
-.section spala
-.section spalb
-.section spama
-.section spamb
-.section spana
-.section spanb
-.section spaoa
-.section spaob
-.section spapa
-.section spapb
-.section spaqa
-.section spaqb
-.section spara
-.section sparb
-.section spasa
-.section spasb
-.section spata
-.section spatb
-.section spaua
-.section spaub
-.section spava
-.section spavb
-.section spawa
-.section spawb
-.section spaxa
-.section spaxb
-.section spaya
-.section spayb
-.section spaza
-.section spazb
-.section spa1a
-.section spa1b
-.section spa2a
-.section spa2b
-.section spa3a
-.section spa3b
-.section spa4a
-.section spa4b
-.section spa5a
-.section spa5b
-.section spa6a
-.section spa6b
-.section spa7a
-.section spa7b
-.section spa8a
-.section spa8b
-.section spa9a
-.section spa9b
-.section spa0a
-.section spa0b
-.section spbaa
-.section spbab
-.section spbba
-.section spbbb
-.section spbca
-.section spbcb
-.section spbda
-.section spbdb
-.section spbea
-.section spbeb
-.section spbfa
-.section spbfb
-.section spbga
-.section spbgb
-.section spbha
-.section spbhb
-.section spbia
-.section spbib
-.section spbja
-.section spbjb
-.section spbka
-.section spbkb
-.section spbla
-.section spblb
-.section spbma
-.section spbmb
-.section spbna
-.section spbnb
-.section spboa
-.section spbob
-.section spbpa
-.section spbpb
-.section spbqa
-.section spbqb
-.section spbra
-.section spbrb
-.section spbsa
-.section spbsb
-.section spbta
-.section spbtb
-.section spbua
-.section spbub
-.section spbva
-.section spbvb
-.section spbwa
-.section spbwb
-.section spbxa
-.section spbxb
-.section spbya
-.section spbyb
-.section spbza
-.section spbzb
-.section spb1a
-.section spb1b
-.section spb2a
-.section spb2b
-.section spb3a
-.section spb3b
-.section spb4a
-.section spb4b
-.section spb5a
-.section spb5b
-.section spb6a
-.section spb6b
-.section spb7a
-.section spb7b
-.section spb8a
-.section spb8b
-.section spb9a
-.section spb9b
-.section spb0a
-.section spb0b
-.section spcaa
-.section spcab
-.section spcba
-.section spcbb
-.section spcca
-.section spccb
-.section spcda
-.section spcdb
-.section spcea
-.section spceb
-.section spcfa
-.section spcfb
-.section spcga
-.section spcgb
-.section spcha
-.section spchb
-.section spcia
-.section spcib
-.section spcja
-.section spcjb
-.section spcka
-.section spckb
-.section spcla
-.section spclb
-.section spcma
-.section spcmb
-.section spcna
-.section spcnb
-.section spcoa
-.section spcob
-.section spcpa
-.section spcpb
-.section spcqa
-.section spcqb
-.section spcra
-.section spcrb
-.section spcsa
-.section spcsb
-.section spcta
-.section spctb
-.section spcua
-.section spcub
-.section spcva
-.section spcvb
-.section spcwa
-.section spcwb
-.section spcxa
-.section spcxb
-.section spcya
-.section spcyb
-.section spcza
-.section spczb
-.section spc1a
-.section spc1b
-.section spc2a
-.section spc2b
-.section spc3a
-.section spc3b
-.section spc4a
-.section spc4b
-.section spc5a
-.section spc5b
-.section spc6a
-.section spc6b
-.section spc7a
-.section spc7b
-.section spc8a
-.section spc8b
-.section spc9a
-.section spc9b
-.section spc0a
-.section spc0b
-.section spdaa
-.section spdab
-.section spdba
-.section spdbb
-.section spdca
-.section spdcb
-.section spdda
-.section spddb
-.section spdea
-.section spdeb
-.section spdfa
-.section spdfb
-.section spdga
-.section spdgb
-.section spdha
-.section spdhb
-.section spdia
-.section spdib
-.section spdja
-.section spdjb
-.section spdka
-.section spdkb
-.section spdla
-.section spdlb
-.section spdma
-.section spdmb
-.section spdna
-.section spdnb
-.section spdoa
-.section spdob
-.section spdpa
-.section spdpb
-.section spdqa
-.section spdqb
-.section spdra
-.section spdrb
-.section spdsa
-.section spdsb
-.section spdta
-.section spdtb
-.section spdua
-.section spdub
-.section spdva
-.section spdvb
-.section spdwa
-.section spdwb
-.section spdxa
-.section spdxb
-.section spdya
-.section spdyb
-.section spdza
-.section spdzb
-.section spd1a
-.section spd1b
-.section spd2a
-.section spd2b
-.section spd3a
-.section spd3b
-.section spd4a
-.section spd4b
-.section spd5a
-.section spd5b
-.section spd6a
-.section spd6b
-.section spd7a
-.section spd7b
-.section spd8a
-.section spd8b
-.section spd9a
-.section spd9b
-.section spd0a
-.section spd0b
-.section speaa
-.section speab
-.section speba
-.section spebb
-.section speca
-.section specb
-.section speda
-.section spedb
-.section speea
-.section speeb
-.section spefa
-.section spefb
-.section spega
-.section spegb
-.section speha
-.section spehb
-.section speia
-.section speib
-.section speja
-.section spejb
-.section speka
-.section spekb
-.section spela
-.section spelb
-.section spema
-.section spemb
-.section spena
-.section spenb
-.section speoa
-.section speob
-.section spepa
-.section spepb
-.section speqa
-.section speqb
-.section spera
-.section sperb
-.section spesa
-.section spesb
-.section speta
-.section spetb
-.section speua
-.section speub
-.section speva
-.section spevb
-.section spewa
-.section spewb
-.section spexa
-.section spexb
-.section speya
-.section speyb
-.section speza
-.section spezb
-.section spe1a
-.section spe1b
-.section spe2a
-.section spe2b
-.section spe3a
-.section spe3b
-.section spe4a
-.section spe4b
-.section spe5a
-.section spe5b
-.section spe6a
-.section spe6b
-.section spe7a
-.section spe7b
-.section spe8a
-.section spe8b
-.section spe9a
-.section spe9b
-.section spe0a
-.section spe0b
-.section spfaa
-.section spfab
-.section spfba
-.section spfbb
-.section spfca
-.section spfcb
-.section spfda
-.section spfdb
-.section spfea
-.section spfeb
-.section spffa
-.section spffb
-.section spfga
-.section spfgb
-.section spfha
-.section spfhb
-.section spfia
-.section spfib
-.section spfja
-.section spfjb
-.section spfka
-.section spfkb
-.section spfla
-.section spflb
-.section spfma
-.section spfmb
-.section spfna
-.section spfnb
-.section spfoa
-.section spfob
-.section spfpa
-.section spfpb
-.section spfqa
-.section spfqb
-.section spfra
-.section spfrb
-.section spfsa
-.section spfsb
-.section spfta
-.section spftb
-.section spfua
-.section spfub
-.section spfva
-.section spfvb
-.section spfwa
-.section spfwb
-.section spfxa
-.section spfxb
-.section spfya
-.section spfyb
-.section spfza
-.section spfzb
-.section spf1a
-.section spf1b
-.section spf2a
-.section spf2b
-.section spf3a
-.section spf3b
-.section spf4a
-.section spf4b
-.section spf5a
-.section spf5b
-.section spf6a
-.section spf6b
-.section spf7a
-.section spf7b
-.section spf8a
-.section spf8b
-.section spf9a
-.section spf9b
-.section spf0a
-.section spf0b
-.section spgaa
-.section spgab
-.section spgba
-.section spgbb
-.section spgca
-.section spgcb
-.section spgda
-.section spgdb
-.section spgea
-.section spgeb
-.section spgfa
-.section spgfb
-.section spgga
-.section spggb
-.section spgha
-.section spghb
-.section spgia
-.section spgib
-.section spgja
-.section spgjb
-.section spgka
-.section spgkb
-.section spgla
-.section spglb
-.section spgma
-.section spgmb
-.section spgna
-.section spgnb
-.section spgoa
-.section spgob
-.section spgpa
-.section spgpb
-.section spgqa
-.section spgqb
-.section spgra
-.section spgrb
-.section spgsa
-.section spgsb
-.section spgta
-.section spgtb
-.section spgua
-.section spgub
-.section spgva
-.section spgvb
-.section spgwa
-.section spgwb
-.section spgxa
-.section spgxb
-.section spgya
-.section spgyb
-.section spgza
-.section spgzb
-.section spg1a
-.section spg1b
-.section spg2a
-.section spg2b
-.section spg3a
-.section spg3b
-.section spg4a
-.section spg4b
-.section spg5a
-.section spg5b
-.section spg6a
-.section spg6b
-.section spg7a
-.section spg7b
-.section spg8a
-.section spg8b
-.section spg9a
-.section spg9b
-.section spg0a
-.section spg0b
-.section sphaa
-.section sphab
-.section sphba
-.section sphbb
-.section sphca
-.section sphcb
-.section sphda
-.section sphdb
-.section sphea
-.section spheb
-.section sphfa
-.section sphfb
-.section sphga
-.section sphgb
-.section sphha
-.section sphhb
-.section sphia
-.section sphib
-.section sphja
-.section sphjb
-.section sphka
-.section sphkb
-.section sphla
-.section sphlb
-.section sphma
-.section sphmb
-.section sphna
-.section sphnb
-.section sphoa
-.section sphob
-.section sphpa
-.section sphpb
-.section sphqa
-.section sphqb
-.section sphra
-.section sphrb
-.section sphsa
-.section sphsb
-.section sphta
-.section sphtb
-.section sphua
-.section sphub
-.section sphva
-.section sphvb
-.section sphwa
-.section sphwb
-.section sphxa
-.section sphxb
-.section sphya
-.section sphyb
-.section sphza
-.section sphzb
-.section sph1a
-.section sph1b
-.section sph2a
-.section sph2b
-.section sph3a
-.section sph3b
-.section sph4a
-.section sph4b
-.section sph5a
-.section sph5b
-.section sph6a
-.section sph6b
-.section sph7a
-.section sph7b
-.section sph8a
-.section sph8b
-.section sph9a
-.section sph9b
-.section sph0a
-.section sph0b
-.section spiaa
-.section spiab
-.section spiba
-.section spibb
-.section spica
-.section spicb
-.section spida
-.section spidb
-.section spiea
-.section spieb
-.section spifa
-.section spifb
-.section spiga
-.section spigb
-.section spiha
-.section spihb
-.section spiia
-.section spiib
-.section spija
-.section spijb
-.section spika
-.section spikb
-.section spila
-.section spilb
-.section spima
-.section spimb
-.section spina
-.section spinb
-.section spioa
-.section spiob
-.section spipa
-.section spipb
-.section spiqa
-.section spiqb
-.section spira
-.section spirb
-.section spisa
-.section spisb
-.section spita
-.section spitb
-.section spiua
-.section spiub
-.section spiva
-.section spivb
-.section spiwa
-.section spiwb
-.section spixa
-.section spixb
-.section spiya
-.section spiyb
-.section spiza
-.section spizb
-.section spi1a
-.section spi1b
-.section spi2a
-.section spi2b
-.section spi3a
-.section spi3b
-.section spi4a
-.section spi4b
-.section spi5a
-.section spi5b
-.section spi6a
-.section spi6b
-.section spi7a
-.section spi7b
-.section spi8a
-.section spi8b
-.section spi9a
-.section spi9b
-.section spi0a
-.section spi0b
-.section spjaa
-.section spjab
-.section spjba
-.section spjbb
-.section spjca
-.section spjcb
-.section spjda
-.section spjdb
-.section spjea
-.section spjeb
-.section spjfa
-.section spjfb
-.section spjga
-.section spjgb
-.section spjha
-.section spjhb
-.section spjia
-.section spjib
-.section spjja
-.section spjjb
-.section spjka
-.section spjkb
-.section spjla
-.section spjlb
-.section spjma
-.section spjmb
-.section spjna
-.section spjnb
-.section spjoa
-.section spjob
-.section spjpa
-.section spjpb
-.section spjqa
-.section spjqb
-.section spjra
-.section spjrb
-.section spjsa
-.section spjsb
-.section spjta
-.section spjtb
-.section spjua
-.section spjub
-.section spjva
-.section spjvb
-.section spjwa
-.section spjwb
-.section spjxa
-.section spjxb
-.section spjya
-.section spjyb
-.section spjza
-.section spjzb
-.section spj1a
-.section spj1b
-.section spj2a
-.section spj2b
-.section spj3a
-.section spj3b
-.section spj4a
-.section spj4b
-.section spj5a
-.section spj5b
-.section spj6a
-.section spj6b
-.section spj7a
-.section spj7b
-.section spj8a
-.section spj8b
-.section spj9a
-.section spj9b
-.section spj0a
-.section spj0b
-.section spkaa
-.section spkab
-.section spkba
-.section spkbb
-.section spkca
-.section spkcb
-.section spkda
-.section spkdb
-.section spkea
-.section spkeb
-.section spkfa
-.section spkfb
-.section spkga
-.section spkgb
-.section spkha
-.section spkhb
-.section spkia
-.section spkib
-.section spkja
-.section spkjb
-.section spkka
-.section spkkb
-.section spkla
-.section spklb
-.section spkma
-.section spkmb
-.section spkna
-.section spknb
-.section spkoa
-.section spkob
-.section spkpa
-.section spkpb
-.section spkqa
-.section spkqb
-.section spkra
-.section spkrb
-.section spksa
-.section spksb
-.section spkta
-.section spktb
-.section spkua
-.section spkub
-.section spkva
-.section spkvb
-.section spkwa
-.section spkwb
-.section spkxa
-.section spkxb
-.section spkya
-.section spkyb
-.section spkza
-.section spkzb
-.section spk1a
-.section spk1b
-.section spk2a
-.section spk2b
-.section spk3a
-.section spk3b
-.section spk4a
-.section spk4b
-.section spk5a
-.section spk5b
-.section spk6a
-.section spk6b
-.section spk7a
-.section spk7b
-.section spk8a
-.section spk8b
-.section spk9a
-.section spk9b
-.section spk0a
-.section spk0b
-.section splaa
-.section splab
-.section splba
-.section splbb
-.section splca
-.section splcb
-.section splda
-.section spldb
-.section splea
-.section spleb
-.section splfa
-.section splfb
-.section splga
-.section splgb
-.section splha
-.section splhb
-.section splia
-.section splib
-.section splja
-.section spljb
-.section splka
-.section splkb
-.section splla
-.section spllb
-.section splma
-.section splmb
-.section splna
-.section splnb
-.section sploa
-.section splob
-.section splpa
-.section splpb
-.section splqa
-.section splqb
-.section splra
-.section splrb
-.section splsa
-.section splsb
-.section splta
-.section spltb
-.section splua
-.section splub
-.section splva
-.section splvb
-.section splwa
-.section splwb
-.section splxa
-.section splxb
-.section splya
-.section splyb
-.section splza
-.section splzb
-.section spl1a
-.section spl1b
-.section spl2a
-.section spl2b
-.section spl3a
-.section spl3b
-.section spl4a
-.section spl4b
-.section spl5a
-.section spl5b
-.section spl6a
-.section spl6b
-.section spl7a
-.section spl7b
-.section spl8a
-.section spl8b
-.section spl9a
-.section spl9b
-.section spl0a
-.section spl0b
-.section spmaa
-.section spmab
-.section spmba
-.section spmbb
-.section spmca
-.section spmcb
-.section spmda
-.section spmdb
-.section spmea
-.section spmeb
-.section spmfa
-.section spmfb
-.section spmga
-.section spmgb
-.section spmha
-.section spmhb
-.section spmia
-.section spmib
-.section spmja
-.section spmjb
-.section spmka
-.section spmkb
-.section spmla
-.section spmlb
-.section spmma
-.section spmmb
-.section spmna
-.section spmnb
-.section spmoa
-.section spmob
-.section spmpa
-.section spmpb
-.section spmqa
-.section spmqb
-.section spmra
-.section spmrb
-.section spmsa
-.section spmsb
-.section spmta
-.section spmtb
-.section spmua
-.section spmub
-.section spmva
-.section spmvb
-.section spmwa
-.section spmwb
-.section spmxa
-.section spmxb
-.section spmya
-.section spmyb
-.section spmza
-.section spmzb
-.section spm1a
-.section spm1b
-.section spm2a
-.section spm2b
-.section spm3a
-.section spm3b
-.section spm4a
-.section spm4b
-.section spm5a
-.section spm5b
-.section spm6a
-.section spm6b
-.section spm7a
-.section spm7b
-.section spm8a
-.section spm8b
-.section spm9a
-.section spm9b
-.section spm0a
-.section spm0b
-.section spnaa
-.section spnab
-.section spnba
-.section spnbb
-.section spnca
-.section spncb
-.section spnda
-.section spndb
-.section spnea
-.section spneb
-.section spnfa
-.section spnfb
-.section spnga
-.section spngb
-.section spnha
-.section spnhb
-.section spnia
-.section spnib
-.section spnja
-.section spnjb
-.section spnka
-.section spnkb
-.section spnla
-.section spnlb
-.section spnma
-.section spnmb
-.section spnna
-.section spnnb
-.section spnoa
-.section spnob
-.section spnpa
-.section spnpb
-.section spnqa
-.section spnqb
-.section spnra
-.section spnrb
-.section spnsa
-.section spnsb
-.section spnta
-.section spntb
-.section spnua
-.section spnub
-.section spnva
-.section spnvb
-.section spnwa
-.section spnwb
-.section spnxa
-.section spnxb
-.section spnya
-.section spnyb
-.section spnza
-.section spnzb
-.section spn1a
-.section spn1b
-.section spn2a
-.section spn2b
-.section spn3a
-.section spn3b
-.section spn4a
-.section spn4b
-.section spn5a
-.section spn5b
-.section spn6a
-.section spn6b
-.section spn7a
-.section spn7b
-.section spn8a
-.section spn8b
-.section spn9a
-.section spn9b
-.section spn0a
-.section spn0b
-.section spoaa
-.section spoab
-.section spoba
-.section spobb
-.section spoca
-.section spocb
-.section spoda
-.section spodb
-.section spoea
-.section spoeb
-.section spofa
-.section spofb
-.section spoga
-.section spogb
-.section spoha
-.section spohb
-.section spoia
-.section spoib
-.section spoja
-.section spojb
-.section spoka
-.section spokb
-.section spola
-.section spolb
-.section spoma
-.section spomb
-.section spona
-.section sponb
-.section spooa
-.section spoob
-.section spopa
-.section spopb
-.section spoqa
-.section spoqb
-.section spora
-.section sporb
-.section sposa
-.section sposb
-.section spota
-.section spotb
-.section spoua
-.section spoub
-.section spova
-.section spovb
-.section spowa
-.section spowb
-.section spoxa
-.section spoxb
-.section spoya
-.section spoyb
-.section spoza
-.section spozb
-.section spo1a
-.section spo1b
-.section spo2a
-.section spo2b
-.section spo3a
-.section spo3b
-.section spo4a
-.section spo4b
-.section spo5a
-.section spo5b
-.section spo6a
-.section spo6b
-.section spo7a
-.section spo7b
-.section spo8a
-.section spo8b
-.section spo9a
-.section spo9b
-.section spo0a
-.section spo0b
-.section sppaa
-.section sppab
-.section sppba
-.section sppbb
-.section sppca
-.section sppcb
-.section sppda
-.section sppdb
-.section sppea
-.section sppeb
-.section sppfa
-.section sppfb
-.section sppga
-.section sppgb
-.section sppha
-.section spphb
-.section sppia
-.section sppib
-.section sppja
-.section sppjb
-.section sppka
-.section sppkb
-.section sppla
-.section spplb
-.section sppma
-.section sppmb
-.section sppna
-.section sppnb
-.section sppoa
-.section sppob
-.section spppa
-.section spppb
-.section sppqa
-.section sppqb
-.section sppra
-.section spprb
-.section sppsa
-.section sppsb
-.section sppta
-.section spptb
-.section sppua
-.section sppub
-.section sppva
-.section sppvb
-.section sppwa
-.section sppwb
-.section sppxa
-.section sppxb
-.section sppya
-.section sppyb
-.section sppza
-.section sppzb
-.section spp1a
-.section spp1b
-.section spp2a
-.section spp2b
-.section spp3a
-.section spp3b
-.section spp4a
-.section spp4b
-.section spp5a
-.section spp5b
-.section spp6a
-.section spp6b
-.section spp7a
-.section spp7b
-.section spp8a
-.section spp8b
-.section spp9a
-.section spp9b
-.section spp0a
-.section spp0b
-.section spqaa
-.section spqab
-.section spqba
-.section spqbb
-.section spqca
-.section spqcb
-.section spqda
-.section spqdb
-.section spqea
-.section spqeb
-.section spqfa
-.section spqfb
-.section spqga
-.section spqgb
-.section spqha
-.section spqhb
-.section spqia
-.section spqib
-.section spqja
-.section spqjb
-.section spqka
-.section spqkb
-.section spqla
-.section spqlb
-.section spqma
-.section spqmb
-.section spqna
-.section spqnb
-.section spqoa
-.section spqob
-.section spqpa
-.section spqpb
-.section spqqa
-.section spqqb
-.section spqra
-.section spqrb
-.section spqsa
-.section spqsb
-.section spqta
-.section spqtb
-.section spqua
-.section spqub
-.section spqva
-.section spqvb
-.section spqwa
-.section spqwb
-.section spqxa
-.section spqxb
-.section spqya
-.section spqyb
-.section spqza
-.section spqzb
-.section spq1a
-.section spq1b
-.section spq2a
-.section spq2b
-.section spq3a
-.section spq3b
-.section spq4a
-.section spq4b
-.section spq5a
-.section spq5b
-.section spq6a
-.section spq6b
-.section spq7a
-.section spq7b
-.section spq8a
-.section spq8b
-.section spq9a
-.section spq9b
-.section spq0a
-.section spq0b
-.section spraa
-.section sprab
-.section sprba
-.section sprbb
-.section sprca
-.section sprcb
-.section sprda
-.section sprdb
-.section sprea
-.section spreb
-.section sprfa
-.section sprfb
-.section sprga
-.section sprgb
-.section sprha
-.section sprhb
-.section spria
-.section sprib
-.section sprja
-.section sprjb
-.section sprka
-.section sprkb
-.section sprla
-.section sprlb
-.section sprma
-.section sprmb
-.section sprna
-.section sprnb
-.section sproa
-.section sprob
-.section sprpa
-.section sprpb
-.section sprqa
-.section sprqb
-.section sprra
-.section sprrb
-.section sprsa
-.section sprsb
-.section sprta
-.section sprtb
-.section sprua
-.section sprub
-.section sprva
-.section sprvb
-.section sprwa
-.section sprwb
-.section sprxa
-.section sprxb
-.section sprya
-.section spryb
-.section sprza
-.section sprzb
-.section spr1a
-.section spr1b
-.section spr2a
-.section spr2b
-.section spr3a
-.section spr3b
-.section spr4a
-.section spr4b
-.section spr5a
-.section spr5b
-.section spr6a
-.section spr6b
-.section spr7a
-.section spr7b
-.section spr8a
-.section spr8b
-.section spr9a
-.section spr9b
-.section spr0a
-.section spr0b
-.section spsaa
-.section spsab
-.section spsba
-.section spsbb
-.section spsca
-.section spscb
-.section spsda
-.section spsdb
-.section spsea
-.section spseb
-.section spsfa
-.section spsfb
-.section spsga
-.section spsgb
-.section spsha
-.section spshb
-.section spsia
-.section spsib
-.section spsja
-.section spsjb
-.section spska
-.section spskb
-.section spsla
-.section spslb
-.section spsma
-.section spsmb
-.section spsna
-.section spsnb
-.section spsoa
-.section spsob
-.section spspa
-.section spspb
-.section spsqa
-.section spsqb
-.section spsra
-.section spsrb
-.section spssa
-.section spssb
-.section spsta
-.section spstb
-.section spsua
-.section spsub
-.section spsva
-.section spsvb
-.section spswa
-.section spswb
-.section spsxa
-.section spsxb
-.section spsya
-.section spsyb
-.section spsza
-.section spszb
-.section sps1a
-.section sps1b
-.section sps2a
-.section sps2b
-.section sps3a
-.section sps3b
-.section sps4a
-.section sps4b
-.section sps5a
-.section sps5b
-.section sps6a
-.section sps6b
-.section sps7a
-.section sps7b
-.section sps8a
-.section sps8b
-.section sps9a
-.section sps9b
-.section sps0a
-.section sps0b
-.section sptaa
-.section sptab
-.section sptba
-.section sptbb
-.section sptca
-.section sptcb
-.section sptda
-.section sptdb
-.section sptea
-.section spteb
-.section sptfa
-.section sptfb
-.section sptga
-.section sptgb
-.section sptha
-.section spthb
-.section sptia
-.section sptib
-.section sptja
-.section sptjb
-.section sptka
-.section sptkb
-.section sptla
-.section sptlb
-.section sptma
-.section sptmb
-.section sptna
-.section sptnb
-.section sptoa
-.section sptob
-.section sptpa
-.section sptpb
-.section sptqa
-.section sptqb
-.section sptra
-.section sptrb
-.section sptsa
-.section sptsb
-.section sptta
-.section spttb
-.section sptua
-.section sptub
-.section sptva
-.section sptvb
-.section sptwa
-.section sptwb
-.section sptxa
-.section sptxb
-.section sptya
-.section sptyb
-.section sptza
-.section sptzb
-.section spt1a
-.section spt1b
-.section spt2a
-.section spt2b
-.section spt3a
-.section spt3b
-.section spt4a
-.section spt4b
-.section spt5a
-.section spt5b
-.section spt6a
-.section spt6b
-.section spt7a
-.section spt7b
-.section spt8a
-.section spt8b
-.section spt9a
-.section spt9b
-.section spt0a
-.section spt0b
-.section spuaa
-.section spuab
-.section spuba
-.section spubb
-.section spuca
-.section spucb
-.section spuda
-.section spudb
-.section spuea
-.section spueb
-.section spufa
-.section spufb
-.section spuga
-.section spugb
-.section spuha
-.section spuhb
-.section spuia
-.section spuib
-.section spuja
-.section spujb
-.section spuka
-.section spukb
-.section spula
-.section spulb
-.section spuma
-.section spumb
-.section spuna
-.section spunb
-.section spuoa
-.section spuob
-.section spupa
-.section spupb
-.section spuqa
-.section spuqb
-.section spura
-.section spurb
-.section spusa
-.section spusb
-.section sputa
-.section sputb
-.section spuua
-.section spuub
-.section spuva
-.section spuvb
-.section spuwa
-.section spuwb
-.section spuxa
-.section spuxb
-.section spuya
-.section spuyb
-.section spuza
-.section spuzb
-.section spu1a
-.section spu1b
-.section spu2a
-.section spu2b
-.section spu3a
-.section spu3b
-.section spu4a
-.section spu4b
-.section spu5a
-.section spu5b
-.section spu6a
-.section spu6b
-.section spu7a
-.section spu7b
-.section spu8a
-.section spu8b
-.section spu9a
-.section spu9b
-.section spu0a
-.section spu0b
-.section spvaa
-.section spvab
-.section spvba
-.section spvbb
-.section spvca
-.section spvcb
-.section spvda
-.section spvdb
-.section spvea
-.section spveb
-.section spvfa
-.section spvfb
-.section spvga
-.section spvgb
-.section spvha
-.section spvhb
-.section spvia
-.section spvib
-.section spvja
-.section spvjb
-.section spvka
-.section spvkb
-.section spvla
-.section spvlb
-.section spvma
-.section spvmb
-.section spvna
-.section spvnb
-.section spvoa
-.section spvob
-.section spvpa
-.section spvpb
-.section spvqa
-.section spvqb
-.section spvra
-.section spvrb
-.section spvsa
-.section spvsb
-.section spvta
-.section spvtb
-.section spvua
-.section spvub
-.section spvva
-.section spvvb
-.section spvwa
-.section spvwb
-.section spvxa
-.section spvxb
-.section spvya
-.section spvyb
-.section spvza
-.section spvzb
-.section spv1a
-.section spv1b
-.section spv2a
-.section spv2b
-.section spv3a
-.section spv3b
-.section spv4a
-.section spv4b
-.section spv5a
-.section spv5b
-.section spv6a
-.section spv6b
-.section spv7a
-.section spv7b
-.section spv8a
-.section spv8b
-.section spv9a
-.section spv9b
-.section spv0a
-.section spv0b
-.section spwaa
-.section spwab
-.section spwba
-.section spwbb
-.section spwca
-.section spwcb
-.section spwda
-.section spwdb
-.section spwea
-.section spweb
-.section spwfa
-.section spwfb
-.section spwga
-.section spwgb
-.section spwha
-.section spwhb
-.section spwia
-.section spwib
-.section spwja
-.section spwjb
-.section spwka
-.section spwkb
-.section spwla
-.section spwlb
-.section spwma
-.section spwmb
-.section spwna
-.section spwnb
-.section spwoa
-.section spwob
-.section spwpa
-.section spwpb
-.section spwqa
-.section spwqb
-.section spwra
-.section spwrb
-.section spwsa
-.section spwsb
-.section spwta
-.section spwtb
-.section spwua
-.section spwub
-.section spwva
-.section spwvb
-.section spwwa
-.section spwwb
-.section spwxa
-.section spwxb
-.section spwya
-.section spwyb
-.section spwza
-.section spwzb
-.section spw1a
-.section spw1b
-.section spw2a
-.section spw2b
-.section spw3a
-.section spw3b
-.section spw4a
-.section spw4b
-.section spw5a
-.section spw5b
-.section spw6a
-.section spw6b
-.section spw7a
-.section spw7b
-.section spw8a
-.section spw8b
-.section spw9a
-.section spw9b
-.section spw0a
-.section spw0b
-.section spxaa
-.section spxab
-.section spxba
-.section spxbb
-.section spxca
-.section spxcb
-.section spxda
-.section spxdb
-.section spxea
-.section spxeb
-.section spxfa
-.section spxfb
-.section spxga
-.section spxgb
-.section spxha
-.section spxhb
-.section spxia
-.section spxib
-.section spxja
-.section spxjb
-.section spxka
-.section spxkb
-.section spxla
-.section spxlb
-.section spxma
-.section spxmb
-.section spxna
-.section spxnb
-.section spxoa
-.section spxob
-.section spxpa
-.section spxpb
-.section spxqa
-.section spxqb
-.section spxra
-.section spxrb
-.section spxsa
-.section spxsb
-.section spxta
-.section spxtb
-.section spxua
-.section spxub
-.section spxva
-.section spxvb
-.section spxwa
-.section spxwb
-.section spxxa
-.section spxxb
-.section spxya
-.section spxyb
-.section spxza
-.section spxzb
-.section spx1a
-.section spx1b
-.section spx2a
-.section spx2b
-.section spx3a
-.section spx3b
-.section spx4a
-.section spx4b
-.section spx5a
-.section spx5b
-.section spx6a
-.section spx6b
-.section spx7a
-.section spx7b
-.section spx8a
-.section spx8b
-.section spx9a
-.section spx9b
-.section spx0a
-.section spx0b
-.section spyaa
-.section spyab
-.section spyba
-.section spybb
-.section spyca
-.section spycb
-.section spyda
-.section spydb
-.section spyea
-.section spyeb
-.section spyfa
-.section spyfb
-.section spyga
-.section spygb
-.section spyha
-.section spyhb
-.section spyia
-.section spyib
-.section spyja
-.section spyjb
-.section spyka
-.section spykb
-.section spyla
-.section spylb
-.section spyma
-.section spymb
-.section spyna
-.section spynb
-.section spyoa
-.section spyob
-.section spypa
-.section spypb
-.section spyqa
-.section spyqb
-.section spyra
-.section spyrb
-.section spysa
-.section spysb
-.section spyta
-.section spytb
-.section spyua
-.section spyub
-.section spyva
-.section spyvb
-.section spywa
-.section spywb
-.section spyxa
-.section spyxb
-.section spyya
-.section spyyb
-.section spyza
-.section spyzb
-.section spy1a
-.section spy1b
-.section spy2a
-.section spy2b
-.section spy3a
-.section spy3b
-.section spy4a
-.section spy4b
-.section spy5a
-.section spy5b
-.section spy6a
-.section spy6b
-.section spy7a
-.section spy7b
-.section spy8a
-.section spy8b
-.section spy9a
-.section spy9b
-.section spy0a
-.section spy0b
-.section spzaa
-.section spzab
-.section spzba
-.section spzbb
-.section spzca
-.section spzcb
-.section spzda
-.section spzdb
-.section spzea
-.section spzeb
-.section spzfa
-.section spzfb
-.section spzga
-.section spzgb
-.section spzha
-.section spzhb
-.section spzia
-.section spzib
-.section spzja
-.section spzjb
-.section spzka
-.section spzkb
-.section spzla
-.section spzlb
-.section spzma
-.section spzmb
-.section spzna
-.section spznb
-.section spzoa
-.section spzob
-.section spzpa
-.section spzpb
-.section spzqa
-.section spzqb
-.section spzra
-.section spzrb
-.section spzsa
-.section spzsb
-.section spzta
-.section spztb
-.section spzua
-.section spzub
-.section spzva
-.section spzvb
-.section spzwa
-.section spzwb
-.section spzxa
-.section spzxb
-.section spzya
-.section spzyb
-.section spzza
-.section spzzb
-.section spz1a
-.section spz1b
-.section spz2a
-.section spz2b
-.section spz3a
-.section spz3b
-.section spz4a
-.section spz4b
-.section spz5a
-.section spz5b
-.section spz6a
-.section spz6b
-.section spz7a
-.section spz7b
-.section spz8a
-.section spz8b
-.section spz9a
-.section spz9b
-.section spz0a
-.section spz0b
-.section sp1aa
-.section sp1ab
-.section sp1ba
-.section sp1bb
-.section sp1ca
-.section sp1cb
-.section sp1da
-.section sp1db
-.section sp1ea
-.section sp1eb
-.section sp1fa
-.section sp1fb
-.section sp1ga
-.section sp1gb
-.section sp1ha
-.section sp1hb
-.section sp1ia
-.section sp1ib
-.section sp1ja
-.section sp1jb
-.section sp1ka
-.section sp1kb
-.section sp1la
-.section sp1lb
-.section sp1ma
-.section sp1mb
-.section sp1na
-.section sp1nb
-.section sp1oa
-.section sp1ob
-.section sp1pa
-.section sp1pb
-.section sp1qa
-.section sp1qb
-.section sp1ra
-.section sp1rb
-.section sp1sa
-.section sp1sb
-.section sp1ta
-.section sp1tb
-.section sp1ua
-.section sp1ub
-.section sp1va
-.section sp1vb
-.section sp1wa
-.section sp1wb
-.section sp1xa
-.section sp1xb
-.section sp1ya
-.section sp1yb
-.section sp1za
-.section sp1zb
-.section sp11a
-.section sp11b
-.section sp12a
-.section sp12b
-.section sp13a
-.section sp13b
-.section sp14a
-.section sp14b
-.section sp15a
-.section sp15b
-.section sp16a
-.section sp16b
-.section sp17a
-.section sp17b
-.section sp18a
-.section sp18b
-.section sp19a
-.section sp19b
-.section sp10a
-.section sp10b
-.section sp2aa
-.section sp2ab
-.section sp2ba
-.section sp2bb
-.section sp2ca
-.section sp2cb
-.section sp2da
-.section sp2db
-.section sp2ea
-.section sp2eb
-.section sp2fa
-.section sp2fb
-.section sp2ga
-.section sp2gb
-.section sp2ha
-.section sp2hb
-.section sp2ia
-.section sp2ib
-.section sp2ja
-.section sp2jb
-.section sp2ka
-.section sp2kb
-.section sp2la
-.section sp2lb
-.section sp2ma
-.section sp2mb
-.section sp2na
-.section sp2nb
-.section sp2oa
-.section sp2ob
-.section sp2pa
-.section sp2pb
-.section sp2qa
-.section sp2qb
-.section sp2ra
-.section sp2rb
-.section sp2sa
-.section sp2sb
-.section sp2ta
-.section sp2tb
-.section sp2ua
-.section sp2ub
-.section sp2va
-.section sp2vb
-.section sp2wa
-.section sp2wb
-.section sp2xa
-.section sp2xb
-.section sp2ya
-.section sp2yb
-.section sp2za
-.section sp2zb
-.section sp21a
-.section sp21b
-.section sp22a
-.section sp22b
-.section sp23a
-.section sp23b
-.section sp24a
-.section sp24b
-.section sp25a
-.section sp25b
-.section sp26a
-.section sp26b
-.section sp27a
-.section sp27b
-.section sp28a
-.section sp28b
-.section sp29a
-.section sp29b
-.section sp20a
-.section sp20b
-.section sp3aa
-.section sp3ab
-.section sp3ba
-.section sp3bb
-.section sp3ca
-.section sp3cb
-.section sp3da
-.section sp3db
-.section sp3ea
-.section sp3eb
-.section sp3fa
-.section sp3fb
-.section sp3ga
-.section sp3gb
-.section sp3ha
-.section sp3hb
-.section sp3ia
-.section sp3ib
-.section sp3ja
-.section sp3jb
-.section sp3ka
-.section sp3kb
-.section sp3la
-.section sp3lb
-.section sp3ma
-.section sp3mb
-.section sp3na
-.section sp3nb
-.section sp3oa
-.section sp3ob
-.section sp3pa
-.section sp3pb
-.section sp3qa
-.section sp3qb
-.section sp3ra
-.section sp3rb
-.section sp3sa
-.section sp3sb
-.section sp3ta
-.section sp3tb
-.section sp3ua
-.section sp3ub
-.section sp3va
-.section sp3vb
-.section sp3wa
-.section sp3wb
-.section sp3xa
-.section sp3xb
-.section sp3ya
-.section sp3yb
-.section sp3za
-.section sp3zb
-.section sp31a
-.section sp31b
-.section sp32a
-.section sp32b
-.section sp33a
-.section sp33b
-.section sp34a
-.section sp34b
-.section sp35a
-.section sp35b
-.section sp36a
-.section sp36b
-.section sp37a
-.section sp37b
-.section sp38a
-.section sp38b
-.section sp39a
-.section sp39b
-.section sp30a
-.section sp30b
-.section sp4aa
-.section sp4ab
-.section sp4ba
-.section sp4bb
-.section sp4ca
-.section sp4cb
-.section sp4da
-.section sp4db
-.section sp4ea
-.section sp4eb
-.section sp4fa
-.section sp4fb
-.section sp4ga
-.section sp4gb
-.section sp4ha
-.section sp4hb
-.section sp4ia
-.section sp4ib
-.section sp4ja
-.section sp4jb
-.section sp4ka
-.section sp4kb
-.section sp4la
-.section sp4lb
-.section sp4ma
-.section sp4mb
-.section sp4na
-.section sp4nb
-.section sp4oa
-.section sp4ob
-.section sp4pa
-.section sp4pb
-.section sp4qa
-.section sp4qb
-.section sp4ra
-.section sp4rb
-.section sp4sa
-.section sp4sb
-.section sp4ta
-.section sp4tb
-.section sp4ua
-.section sp4ub
-.section sp4va
-.section sp4vb
-.section sp4wa
-.section sp4wb
-.section sp4xa
-.section sp4xb
-.section sp4ya
-.section sp4yb
-.section sp4za
-.section sp4zb
-.section sp41a
-.section sp41b
-.section sp42a
-.section sp42b
-.section sp43a
-.section sp43b
-.section sp44a
-.section sp44b
-.section sp45a
-.section sp45b
-.section sp46a
-.section sp46b
-.section sp47a
-.section sp47b
-.section sp48a
-.section sp48b
-.section sp49a
-.section sp49b
-.section sp40a
-.section sp40b
-.section sp5aa
-.section sp5ab
-.section sp5ba
-.section sp5bb
-.section sp5ca
-.section sp5cb
-.section sp5da
-.section sp5db
-.section sp5ea
-.section sp5eb
-.section sp5fa
-.section sp5fb
-.section sp5ga
-.section sp5gb
-.section sp5ha
-.section sp5hb
-.section sp5ia
-.section sp5ib
-.section sp5ja
-.section sp5jb
-.section sp5ka
-.section sp5kb
-.section sp5la
-.section sp5lb
-.section sp5ma
-.section sp5mb
-.section sp5na
-.section sp5nb
-.section sp5oa
-.section sp5ob
-.section sp5pa
-.section sp5pb
-.section sp5qa
-.section sp5qb
-.section sp5ra
-.section sp5rb
-.section sp5sa
-.section sp5sb
-.section sp5ta
-.section sp5tb
-.section sp5ua
-.section sp5ub
-.section sp5va
-.section sp5vb
-.section sp5wa
-.section sp5wb
-.section sp5xa
-.section sp5xb
-.section sp5ya
-.section sp5yb
-.section sp5za
-.section sp5zb
-.section sp51a
-.section sp51b
-.section sp52a
-.section sp52b
-.section sp53a
-.section sp53b
-.section sp54a
-.section sp54b
-.section sp55a
-.section sp55b
-.section sp56a
-.section sp56b
-.section sp57a
-.section sp57b
-.section sp58a
-.section sp58b
-.section sp59a
-.section sp59b
-.section sp50a
-.section sp50b
-.section sp6aa
-.section sp6ab
-.section sp6ba
-.section sp6bb
-.section sp6ca
-.section sp6cb
-.section sp6da
-.section sp6db
-.section sp6ea
-.section sp6eb
-.section sp6fa
-.section sp6fb
-.section sp6ga
-.section sp6gb
-.section sp6ha
-.section sp6hb
-.section sp6ia
-.section sp6ib
-.section sp6ja
-.section sp6jb
-.section sp6ka
-.section sp6kb
-.section sp6la
-.section sp6lb
-.section sp6ma
-.section sp6mb
-.section sp6na
-.section sp6nb
-.section sp6oa
-.section sp6ob
-.section sp6pa
-.section sp6pb
-.section sp6qa
-.section sp6qb
-.section sp6ra
-.section sp6rb
-.section sp6sa
-.section sp6sb
-.section sp6ta
-.section sp6tb
-.section sp6ua
-.section sp6ub
-.section sp6va
-.section sp6vb
-.section sp6wa
-.section sp6wb
-.section sp6xa
-.section sp6xb
-.section sp6ya
-.section sp6yb
-.section sp6za
-.section sp6zb
-.section sp61a
-.section sp61b
-.section sp62a
-.section sp62b
-.section sp63a
-.section sp63b
-.section sp64a
-.section sp64b
-.section sp65a
-.section sp65b
-.section sp66a
-.section sp66b
-.section sp67a
-.section sp67b
-.section sp68a
-.section sp68b
-.section sp69a
-.section sp69b
-.section sp60a
-.section sp60b
-.section sp7aa
-.section sp7ab
-.section sp7ba
-.section sp7bb
-.section sp7ca
-.section sp7cb
-.section sp7da
-.section sp7db
-.section sp7ea
-.section sp7eb
-.section sp7fa
-.section sp7fb
-.section sp7ga
-.section sp7gb
-.section sp7ha
-.section sp7hb
-.section sp7ia
-.section sp7ib
-.section sp7ja
-.section sp7jb
-.section sp7ka
-.section sp7kb
-.section sp7la
-.section sp7lb
-.section sp7ma
-.section sp7mb
-.section sp7na
-.section sp7nb
-.section sp7oa
-.section sp7ob
-.section sp7pa
-.section sp7pb
-.section sp7qa
-.section sp7qb
-.section sp7ra
-.section sp7rb
-.section sp7sa
-.section sp7sb
-.section sp7ta
-.section sp7tb
-.section sp7ua
-.section sp7ub
-.section sp7va
-.section sp7vb
-.section sp7wa
-.section sp7wb
-.section sp7xa
-.section sp7xb
-.section sp7ya
-.section sp7yb
-.section sp7za
-.section sp7zb
-.section sp71a
-.section sp71b
-.section sp72a
-.section sp72b
-.section sp73a
-.section sp73b
-.section sp74a
-.section sp74b
-.section sp75a
-.section sp75b
-.section sp76a
-.section sp76b
-.section sp77a
-.section sp77b
-.section sp78a
-.section sp78b
-.section sp79a
-.section sp79b
-.section sp70a
-.section sp70b
-.section sp8aa
-.section sp8ab
-.section sp8ba
-.section sp8bb
-.section sp8ca
-.section sp8cb
-.section sp8da
-.section sp8db
-.section sp8ea
-.section sp8eb
-.section sp8fa
-.section sp8fb
-.section sp8ga
-.section sp8gb
-.section sp8ha
-.section sp8hb
-.section sp8ia
-.section sp8ib
-.section sp8ja
-.section sp8jb
-.section sp8ka
-.section sp8kb
-.section sp8la
-.section sp8lb
-.section sp8ma
-.section sp8mb
-.section sp8na
-.section sp8nb
-.section sp8oa
-.section sp8ob
-.section sp8pa
-.section sp8pb
-.section sp8qa
-.section sp8qb
-.section sp8ra
-.section sp8rb
-.section sp8sa
-.section sp8sb
-.section sp8ta
-.section sp8tb
-.section sp8ua
-.section sp8ub
-.section sp8va
-.section sp8vb
-.section sp8wa
-.section sp8wb
-.section sp8xa
-.section sp8xb
-.section sp8ya
-.section sp8yb
-.section sp8za
-.section sp8zb
-.section sp81a
-.section sp81b
-.section sp82a
-.section sp82b
-.section sp83a
-.section sp83b
-.section sp84a
-.section sp84b
-.section sp85a
-.section sp85b
-.section sp86a
-.section sp86b
-.section sp87a
-.section sp87b
-.section sp88a
-.section sp88b
-.section sp89a
-.section sp89b
-.section sp80a
-.section sp80b
-.section sp9aa
-.section sp9ab
-.section sp9ba
-.section sp9bb
-.section sp9ca
-.section sp9cb
-.section sp9da
-.section sp9db
-.section sp9ea
-.section sp9eb
-.section sp9fa
-.section sp9fb
-.section sp9ga
-.section sp9gb
-.section sp9ha
-.section sp9hb
-.section sp9ia
-.section sp9ib
-.section sp9ja
-.section sp9jb
-.section sp9ka
-.section sp9kb
-.section sp9la
-.section sp9lb
-.section sp9ma
-.section sp9mb
-.section sp9na
-.section sp9nb
-.section sp9oa
-.section sp9ob
-.section sp9pa
-.section sp9pb
-.section sp9qa
-.section sp9qb
-.section sp9ra
-.section sp9rb
-.section sp9sa
-.section sp9sb
-.section sp9ta
-.section sp9tb
-.section sp9ua
-.section sp9ub
-.section sp9va
-.section sp9vb
-.section sp9wa
-.section sp9wb
-.section sp9xa
-.section sp9xb
-.section sp9ya
-.section sp9yb
-.section sp9za
-.section sp9zb
-.section sp91a
-.section sp91b
-.section sp92a
-.section sp92b
-.section sp93a
-.section sp93b
-.section sp94a
-.section sp94b
-.section sp95a
-.section sp95b
-.section sp96a
-.section sp96b
-.section sp97a
-.section sp97b
-.section sp98a
-.section sp98b
-.section sp99a
-.section sp99b
-.section sp90a
-.section sp90b
-.section sp0aa
-.section sp0ab
-.section sp0ba
-.section sp0bb
-.section sp0ca
-.section sp0cb
-.section sp0da
-.section sp0db
-.section sp0ea
-.section sp0eb
-.section sp0fa
-.section sp0fb
-.section sp0ga
-.section sp0gb
-.section sp0ha
-.section sp0hb
-.section sp0ia
-.section sp0ib
-.section sp0ja
-.section sp0jb
-.section sp0ka
-.section sp0kb
-.section sp0la
-.section sp0lb
-.section sp0ma
-.section sp0mb
-.section sp0na
-.section sp0nb
-.section sp0oa
-.section sp0ob
-.section sp0pa
-.section sp0pb
-.section sp0qa
-.section sp0qb
-.section sp0ra
-.section sp0rb
-.section sp0sa
-.section sp0sb
-.section sp0ta
-.section sp0tb
-.section sp0ua
-.section sp0ub
-.section sp0va
-.section sp0vb
-.section sp0wa
-.section sp0wb
-.section sp0xa
-.section sp0xb
-.section sp0ya
-.section sp0yb
-.section sp0za
-.section sp0zb
-.section sp01a
-.section sp01b
-.section sp02a
-.section sp02b
-.section sp03a
-.section sp03b
-.section sp04a
-.section sp04b
-.section sp05a
-.section sp05b
-.section sp06a
-.section sp06b
-.section sp07a
-.section sp07b
-.section sp08a
-.section sp08b
-.section sp09a
-.section sp09b
-.section sp00a
-.section sp00b
-.section sqaaa
-.section sqaab
-.section sqaba
-.section sqabb
-.section sqaca
-.section sqacb
-.section sqada
-.section sqadb
-.section sqaea
-.section sqaeb
-.section sqafa
-.section sqafb
-.section sqaga
-.section sqagb
-.section sqaha
-.section sqahb
-.section sqaia
-.section sqaib
-.section sqaja
-.section sqajb
-.section sqaka
-.section sqakb
-.section sqala
-.section sqalb
-.section sqama
-.section sqamb
-.section sqana
-.section sqanb
-.section sqaoa
-.section sqaob
-.section sqapa
-.section sqapb
-.section sqaqa
-.section sqaqb
-.section sqara
-.section sqarb
-.section sqasa
-.section sqasb
-.section sqata
-.section sqatb
-.section sqaua
-.section sqaub
-.section sqava
-.section sqavb
-.section sqawa
-.section sqawb
-.section sqaxa
-.section sqaxb
-.section sqaya
-.section sqayb
-.section sqaza
-.section sqazb
-.section sqa1a
-.section sqa1b
-.section sqa2a
-.section sqa2b
-.section sqa3a
-.section sqa3b
-.section sqa4a
-.section sqa4b
-.section sqa5a
-.section sqa5b
-.section sqa6a
-.section sqa6b
-.section sqa7a
-.section sqa7b
-.section sqa8a
-.section sqa8b
-.section sqa9a
-.section sqa9b
-.section sqa0a
-.section sqa0b
-.section sqbaa
-.section sqbab
-.section sqbba
-.section sqbbb
-.section sqbca
-.section sqbcb
-.section sqbda
-.section sqbdb
-.section sqbea
-.section sqbeb
-.section sqbfa
-.section sqbfb
-.section sqbga
-.section sqbgb
-.section sqbha
-.section sqbhb
-.section sqbia
-.section sqbib
-.section sqbja
-.section sqbjb
-.section sqbka
-.section sqbkb
-.section sqbla
-.section sqblb
-.section sqbma
-.section sqbmb
-.section sqbna
-.section sqbnb
-.section sqboa
-.section sqbob
-.section sqbpa
-.section sqbpb
-.section sqbqa
-.section sqbqb
-.section sqbra
-.section sqbrb
-.section sqbsa
-.section sqbsb
-.section sqbta
-.section sqbtb
-.section sqbua
-.section sqbub
-.section sqbva
-.section sqbvb
-.section sqbwa
-.section sqbwb
-.section sqbxa
-.section sqbxb
-.section sqbya
-.section sqbyb
-.section sqbza
-.section sqbzb
-.section sqb1a
-.section sqb1b
-.section sqb2a
-.section sqb2b
-.section sqb3a
-.section sqb3b
-.section sqb4a
-.section sqb4b
-.section sqb5a
-.section sqb5b
-.section sqb6a
-.section sqb6b
-.section sqb7a
-.section sqb7b
-.section sqb8a
-.section sqb8b
-.section sqb9a
-.section sqb9b
-.section sqb0a
-.section sqb0b
-.section sqcaa
-.section sqcab
-.section sqcba
-.section sqcbb
-.section sqcca
-.section sqccb
-.section sqcda
-.section sqcdb
-.section sqcea
-.section sqceb
-.section sqcfa
-.section sqcfb
-.section sqcga
-.section sqcgb
-.section sqcha
-.section sqchb
-.section sqcia
-.section sqcib
-.section sqcja
-.section sqcjb
-.section sqcka
-.section sqckb
-.section sqcla
-.section sqclb
-.section sqcma
-.section sqcmb
-.section sqcna
-.section sqcnb
-.section sqcoa
-.section sqcob
-.section sqcpa
-.section sqcpb
-.section sqcqa
-.section sqcqb
-.section sqcra
-.section sqcrb
-.section sqcsa
-.section sqcsb
-.section sqcta
-.section sqctb
-.section sqcua
-.section sqcub
-.section sqcva
-.section sqcvb
-.section sqcwa
-.section sqcwb
-.section sqcxa
-.section sqcxb
-.section sqcya
-.section sqcyb
-.section sqcza
-.section sqczb
-.section sqc1a
-.section sqc1b
-.section sqc2a
-.section sqc2b
-.section sqc3a
-.section sqc3b
-.section sqc4a
-.section sqc4b
-.section sqc5a
-.section sqc5b
-.section sqc6a
-.section sqc6b
-.section sqc7a
-.section sqc7b
-.section sqc8a
-.section sqc8b
-.section sqc9a
-.section sqc9b
-.section sqc0a
-.section sqc0b
-.section sqdaa
-.section sqdab
-.section sqdba
-.section sqdbb
-.section sqdca
-.section sqdcb
-.section sqdda
-.section sqddb
-.section sqdea
-.section sqdeb
-.section sqdfa
-.section sqdfb
-.section sqdga
-.section sqdgb
-.section sqdha
-.section sqdhb
-.section sqdia
-.section sqdib
-.section sqdja
-.section sqdjb
-.section sqdka
-.section sqdkb
-.section sqdla
-.section sqdlb
-.section sqdma
-.section sqdmb
-.section sqdna
-.section sqdnb
-.section sqdoa
-.section sqdob
-.section sqdpa
-.section sqdpb
-.section sqdqa
-.section sqdqb
-.section sqdra
-.section sqdrb
-.section sqdsa
-.section sqdsb
-.section sqdta
-.section sqdtb
-.section sqdua
-.section sqdub
-.section sqdva
-.section sqdvb
-.section sqdwa
-.section sqdwb
-.section sqdxa
-.section sqdxb
-.section sqdya
-.section sqdyb
-.section sqdza
-.section sqdzb
-.section sqd1a
-.section sqd1b
-.section sqd2a
-.section sqd2b
-.section sqd3a
-.section sqd3b
-.section sqd4a
-.section sqd4b
-.section sqd5a
-.section sqd5b
-.section sqd6a
-.section sqd6b
-.section sqd7a
-.section sqd7b
-.section sqd8a
-.section sqd8b
-.section sqd9a
-.section sqd9b
-.section sqd0a
-.section sqd0b
-.section sqeaa
-.section sqeab
-.section sqeba
-.section sqebb
-.section sqeca
-.section sqecb
-.section sqeda
-.section sqedb
-.section sqeea
-.section sqeeb
-.section sqefa
-.section sqefb
-.section sqega
-.section sqegb
-.section sqeha
-.section sqehb
-.section sqeia
-.section sqeib
-.section sqeja
-.section sqejb
-.section sqeka
-.section sqekb
-.section sqela
-.section sqelb
-.section sqema
-.section sqemb
-.section sqena
-.section sqenb
-.section sqeoa
-.section sqeob
-.section sqepa
-.section sqepb
-.section sqeqa
-.section sqeqb
-.section sqera
-.section sqerb
-.section sqesa
-.section sqesb
-.section sqeta
-.section sqetb
-.section sqeua
-.section sqeub
-.section sqeva
-.section sqevb
-.section sqewa
-.section sqewb
-.section sqexa
-.section sqexb
-.section sqeya
-.section sqeyb
-.section sqeza
-.section sqezb
-.section sqe1a
-.section sqe1b
-.section sqe2a
-.section sqe2b
-.section sqe3a
-.section sqe3b
-.section sqe4a
-.section sqe4b
-.section sqe5a
-.section sqe5b
-.section sqe6a
-.section sqe6b
-.section sqe7a
-.section sqe7b
-.section sqe8a
-.section sqe8b
-.section sqe9a
-.section sqe9b
-.section sqe0a
-.section sqe0b
-.section sqfaa
-.section sqfab
-.section sqfba
-.section sqfbb
-.section sqfca
-.section sqfcb
-.section sqfda
-.section sqfdb
-.section sqfea
-.section sqfeb
-.section sqffa
-.section sqffb
-.section sqfga
-.section sqfgb
-.section sqfha
-.section sqfhb
-.section sqfia
-.section sqfib
-.section sqfja
-.section sqfjb
-.section sqfka
-.section sqfkb
-.section sqfla
-.section sqflb
-.section sqfma
-.section sqfmb
-.section sqfna
-.section sqfnb
-.section sqfoa
-.section sqfob
-.section sqfpa
-.section sqfpb
-.section sqfqa
-.section sqfqb
-.section sqfra
-.section sqfrb
-.section sqfsa
-.section sqfsb
-.section sqfta
-.section sqftb
-.section sqfua
-.section sqfub
-.section sqfva
-.section sqfvb
-.section sqfwa
-.section sqfwb
-.section sqfxa
-.section sqfxb
-.section sqfya
-.section sqfyb
-.section sqfza
-.section sqfzb
-.section sqf1a
-.section sqf1b
-.section sqf2a
-.section sqf2b
-.section sqf3a
-.section sqf3b
-.section sqf4a
-.section sqf4b
-.section sqf5a
-.section sqf5b
-.section sqf6a
-.section sqf6b
-.section sqf7a
-.section sqf7b
-.section sqf8a
-.section sqf8b
-.section sqf9a
-.section sqf9b
-.section sqf0a
-.section sqf0b
-.section sqgaa
-.section sqgab
-.section sqgba
-.section sqgbb
-.section sqgca
-.section sqgcb
-.section sqgda
-.section sqgdb
-.section sqgea
-.section sqgeb
-.section sqgfa
-.section sqgfb
-.section sqgga
-.section sqggb
-.section sqgha
-.section sqghb
-.section sqgia
-.section sqgib
-.section sqgja
-.section sqgjb
-.section sqgka
-.section sqgkb
-.section sqgla
-.section sqglb
-.section sqgma
-.section sqgmb
-.section sqgna
-.section sqgnb
-.section sqgoa
-.section sqgob
-.section sqgpa
-.section sqgpb
-.section sqgqa
-.section sqgqb
-.section sqgra
-.section sqgrb
-.section sqgsa
-.section sqgsb
-.section sqgta
-.section sqgtb
-.section sqgua
-.section sqgub
-.section sqgva
-.section sqgvb
-.section sqgwa
-.section sqgwb
-.section sqgxa
-.section sqgxb
-.section sqgya
-.section sqgyb
-.section sqgza
-.section sqgzb
-.section sqg1a
-.section sqg1b
-.section sqg2a
-.section sqg2b
-.section sqg3a
-.section sqg3b
-.section sqg4a
-.section sqg4b
-.section sqg5a
-.section sqg5b
-.section sqg6a
-.section sqg6b
-.section sqg7a
-.section sqg7b
-.section sqg8a
-.section sqg8b
-.section sqg9a
-.section sqg9b
-.section sqg0a
-.section sqg0b
-.section sqhaa
-.section sqhab
-.section sqhba
-.section sqhbb
-.section sqhca
-.section sqhcb
-.section sqhda
-.section sqhdb
-.section sqhea
-.section sqheb
-.section sqhfa
-.section sqhfb
-.section sqhga
-.section sqhgb
-.section sqhha
-.section sqhhb
-.section sqhia
-.section sqhib
-.section sqhja
-.section sqhjb
-.section sqhka
-.section sqhkb
-.section sqhla
-.section sqhlb
-.section sqhma
-.section sqhmb
-.section sqhna
-.section sqhnb
-.section sqhoa
-.section sqhob
-.section sqhpa
-.section sqhpb
-.section sqhqa
-.section sqhqb
-.section sqhra
-.section sqhrb
-.section sqhsa
-.section sqhsb
-.section sqhta
-.section sqhtb
-.section sqhua
-.section sqhub
-.section sqhva
-.section sqhvb
-.section sqhwa
-.section sqhwb
-.section sqhxa
-.section sqhxb
-.section sqhya
-.section sqhyb
-.section sqhza
-.section sqhzb
-.section sqh1a
-.section sqh1b
-.section sqh2a
-.section sqh2b
-.section sqh3a
-.section sqh3b
-.section sqh4a
-.section sqh4b
-.section sqh5a
-.section sqh5b
-.section sqh6a
-.section sqh6b
-.section sqh7a
-.section sqh7b
-.section sqh8a
-.section sqh8b
-.section sqh9a
-.section sqh9b
-.section sqh0a
-.section sqh0b
-.section sqiaa
-.section sqiab
-.section sqiba
-.section sqibb
-.section sqica
-.section sqicb
-.section sqida
-.section sqidb
-.section sqiea
-.section sqieb
-.section sqifa
-.section sqifb
-.section sqiga
-.section sqigb
-.section sqiha
-.section sqihb
-.section sqiia
-.section sqiib
-.section sqija
-.section sqijb
-.section sqika
-.section sqikb
-.section sqila
-.section sqilb
-.section sqima
-.section sqimb
-.section sqina
-.section sqinb
-.section sqioa
-.section sqiob
-.section sqipa
-.section sqipb
-.section sqiqa
-.section sqiqb
-.section sqira
-.section sqirb
-.section sqisa
-.section sqisb
-.section sqita
-.section sqitb
-.section sqiua
-.section sqiub
-.section sqiva
-.section sqivb
-.section sqiwa
-.section sqiwb
-.section sqixa
-.section sqixb
-.section sqiya
-.section sqiyb
-.section sqiza
-.section sqizb
-.section sqi1a
-.section sqi1b
-.section sqi2a
-.section sqi2b
-.section sqi3a
-.section sqi3b
-.section sqi4a
-.section sqi4b
-.section sqi5a
-.section sqi5b
-.section sqi6a
-.section sqi6b
-.section sqi7a
-.section sqi7b
-.section sqi8a
-.section sqi8b
-.section sqi9a
-.section sqi9b
-.section sqi0a
-.section sqi0b
-.section sqjaa
-.section sqjab
-.section sqjba
-.section sqjbb
-.section sqjca
-.section sqjcb
-.section sqjda
-.section sqjdb
-.section sqjea
-.section sqjeb
-.section sqjfa
-.section sqjfb
-.section sqjga
-.section sqjgb
-.section sqjha
-.section sqjhb
-.section sqjia
-.section sqjib
-.section sqjja
-.section sqjjb
-.section sqjka
-.section sqjkb
-.section sqjla
-.section sqjlb
-.section sqjma
-.section sqjmb
-.section sqjna
-.section sqjnb
-.section sqjoa
-.section sqjob
-.section sqjpa
-.section sqjpb
-.section sqjqa
-.section sqjqb
-.section sqjra
-.section sqjrb
-.section sqjsa
-.section sqjsb
-.section sqjta
-.section sqjtb
-.section sqjua
-.section sqjub
-.section sqjva
-.section sqjvb
-.section sqjwa
-.section sqjwb
-.section sqjxa
-.section sqjxb
-.section sqjya
-.section sqjyb
-.section sqjza
-.section sqjzb
-.section sqj1a
-.section sqj1b
-.section sqj2a
-.section sqj2b
-.section sqj3a
-.section sqj3b
-.section sqj4a
-.section sqj4b
-.section sqj5a
-.section sqj5b
-.section sqj6a
-.section sqj6b
-.section sqj7a
-.section sqj7b
-.section sqj8a
-.section sqj8b
-.section sqj9a
-.section sqj9b
-.section sqj0a
-.section sqj0b
-.section sqkaa
-.section sqkab
-.section sqkba
-.section sqkbb
-.section sqkca
-.section sqkcb
-.section sqkda
-.section sqkdb
-.section sqkea
-.section sqkeb
-.section sqkfa
-.section sqkfb
-.section sqkga
-.section sqkgb
-.section sqkha
-.section sqkhb
-.section sqkia
-.section sqkib
-.section sqkja
-.section sqkjb
-.section sqkka
-.section sqkkb
-.section sqkla
-.section sqklb
-.section sqkma
-.section sqkmb
-.section sqkna
-.section sqknb
-.section sqkoa
-.section sqkob
-.section sqkpa
-.section sqkpb
-.section sqkqa
-.section sqkqb
-.section sqkra
-.section sqkrb
-.section sqksa
-.section sqksb
-.section sqkta
-.section sqktb
-.section sqkua
-.section sqkub
-.section sqkva
-.section sqkvb
-.section sqkwa
-.section sqkwb
-.section sqkxa
-.section sqkxb
-.section sqkya
-.section sqkyb
-.section sqkza
-.section sqkzb
-.section sqk1a
-.section sqk1b
-.section sqk2a
-.section sqk2b
-.section sqk3a
-.section sqk3b
-.section sqk4a
-.section sqk4b
-.section sqk5a
-.section sqk5b
-.section sqk6a
-.section sqk6b
-.section sqk7a
-.section sqk7b
-.section sqk8a
-.section sqk8b
-.section sqk9a
-.section sqk9b
-.section sqk0a
-.section sqk0b
-.section sqlaa
-.section sqlab
-.section sqlba
-.section sqlbb
-.section sqlca
-.section sqlcb
-.section sqlda
-.section sqldb
-.section sqlea
-.section sqleb
-.section sqlfa
-.section sqlfb
-.section sqlga
-.section sqlgb
-.section sqlha
-.section sqlhb
-.section sqlia
-.section sqlib
-.section sqlja
-.section sqljb
-.section sqlka
-.section sqlkb
-.section sqlla
-.section sqllb
-.section sqlma
-.section sqlmb
-.section sqlna
-.section sqlnb
-.section sqloa
-.section sqlob
-.section sqlpa
-.section sqlpb
-.section sqlqa
-.section sqlqb
-.section sqlra
-.section sqlrb
-.section sqlsa
-.section sqlsb
-.section sqlta
-.section sqltb
-.section sqlua
-.section sqlub
-.section sqlva
-.section sqlvb
-.section sqlwa
-.section sqlwb
-.section sqlxa
-.section sqlxb
-.section sqlya
-.section sqlyb
-.section sqlza
-.section sqlzb
-.section sql1a
-.section sql1b
-.section sql2a
-.section sql2b
-.section sql3a
-.section sql3b
-.section sql4a
-.section sql4b
-.section sql5a
-.section sql5b
-.section sql6a
-.section sql6b
-.section sql7a
-.section sql7b
-.section sql8a
-.section sql8b
-.section sql9a
-.section sql9b
-.section sql0a
-.section sql0b
-.section sqmaa
-.section sqmab
-.section sqmba
-.section sqmbb
-.section sqmca
-.section sqmcb
-.section sqmda
-.section sqmdb
-.section sqmea
-.section sqmeb
-.section sqmfa
-.section sqmfb
-.section sqmga
-.section sqmgb
-.section sqmha
-.section sqmhb
-.section sqmia
-.section sqmib
-.section sqmja
-.section sqmjb
-.section sqmka
-.section sqmkb
-.section sqmla
-.section sqmlb
-.section sqmma
-.section sqmmb
-.section sqmna
-.section sqmnb
-.section sqmoa
-.section sqmob
-.section sqmpa
-.section sqmpb
-.section sqmqa
-.section sqmqb
-.section sqmra
-.section sqmrb
-.section sqmsa
-.section sqmsb
-.section sqmta
-.section sqmtb
-.section sqmua
-.section sqmub
-.section sqmva
-.section sqmvb
-.section sqmwa
-.section sqmwb
-.section sqmxa
-.section sqmxb
-.section sqmya
-.section sqmyb
-.section sqmza
-.section sqmzb
-.section sqm1a
-.section sqm1b
-.section sqm2a
-.section sqm2b
-.section sqm3a
-.section sqm3b
-.section sqm4a
-.section sqm4b
-.section sqm5a
-.section sqm5b
-.section sqm6a
-.section sqm6b
-.section sqm7a
-.section sqm7b
-.section sqm8a
-.section sqm8b
-.section sqm9a
-.section sqm9b
-.section sqm0a
-.section sqm0b
-.section sqnaa
-.section sqnab
-.section sqnba
-.section sqnbb
-.section sqnca
-.section sqncb
-.section sqnda
-.section sqndb
-.section sqnea
-.section sqneb
-.section sqnfa
-.section sqnfb
-.section sqnga
-.section sqngb
-.section sqnha
-.section sqnhb
-.section sqnia
-.section sqnib
-.section sqnja
-.section sqnjb
-.section sqnka
-.section sqnkb
-.section sqnla
-.section sqnlb
-.section sqnma
-.section sqnmb
-.section sqnna
-.section sqnnb
-.section sqnoa
-.section sqnob
-.section sqnpa
-.section sqnpb
-.section sqnqa
-.section sqnqb
-.section sqnra
-.section sqnrb
-.section sqnsa
-.section sqnsb
-.section sqnta
-.section sqntb
-.section sqnua
-.section sqnub
-.section sqnva
-.section sqnvb
-.section sqnwa
-.section sqnwb
-.section sqnxa
-.section sqnxb
-.section sqnya
-.section sqnyb
-.section sqnza
-.section sqnzb
-.section sqn1a
-.section sqn1b
-.section sqn2a
-.section sqn2b
-.section sqn3a
-.section sqn3b
-.section sqn4a
-.section sqn4b
-.section sqn5a
-.section sqn5b
-.section sqn6a
-.section sqn6b
-.section sqn7a
-.section sqn7b
-.section sqn8a
-.section sqn8b
-.section sqn9a
-.section sqn9b
-.section sqn0a
-.section sqn0b
-.section sqoaa
-.section sqoab
-.section sqoba
-.section sqobb
-.section sqoca
-.section sqocb
-.section sqoda
-.section sqodb
-.section sqoea
-.section sqoeb
-.section sqofa
-.section sqofb
-.section sqoga
-.section sqogb
-.section sqoha
-.section sqohb
-.section sqoia
-.section sqoib
-.section sqoja
-.section sqojb
-.section sqoka
-.section sqokb
-.section sqola
-.section sqolb
-.section sqoma
-.section sqomb
-.section sqona
-.section sqonb
-.section sqooa
-.section sqoob
-.section sqopa
-.section sqopb
-.section sqoqa
-.section sqoqb
-.section sqora
-.section sqorb
-.section sqosa
-.section sqosb
-.section sqota
-.section sqotb
-.section sqoua
-.section sqoub
-.section sqova
-.section sqovb
-.section sqowa
-.section sqowb
-.section sqoxa
-.section sqoxb
-.section sqoya
-.section sqoyb
-.section sqoza
-.section sqozb
-.section sqo1a
-.section sqo1b
-.section sqo2a
-.section sqo2b
-.section sqo3a
-.section sqo3b
-.section sqo4a
-.section sqo4b
-.section sqo5a
-.section sqo5b
-.section sqo6a
-.section sqo6b
-.section sqo7a
-.section sqo7b
-.section sqo8a
-.section sqo8b
-.section sqo9a
-.section sqo9b
-.section sqo0a
-.section sqo0b
-.section sqpaa
-.section sqpab
-.section sqpba
-.section sqpbb
-.section sqpca
-.section sqpcb
-.section sqpda
-.section sqpdb
-.section sqpea
-.section sqpeb
-.section sqpfa
-.section sqpfb
-.section sqpga
-.section sqpgb
-.section sqpha
-.section sqphb
-.section sqpia
-.section sqpib
-.section sqpja
-.section sqpjb
-.section sqpka
-.section sqpkb
-.section sqpla
-.section sqplb
-.section sqpma
-.section sqpmb
-.section sqpna
-.section sqpnb
-.section sqpoa
-.section sqpob
-.section sqppa
-.section sqppb
-.section sqpqa
-.section sqpqb
-.section sqpra
-.section sqprb
-.section sqpsa
-.section sqpsb
-.section sqpta
-.section sqptb
-.section sqpua
-.section sqpub
-.section sqpva
-.section sqpvb
-.section sqpwa
-.section sqpwb
-.section sqpxa
-.section sqpxb
-.section sqpya
-.section sqpyb
-.section sqpza
-.section sqpzb
-.section sqp1a
-.section sqp1b
-.section sqp2a
-.section sqp2b
-.section sqp3a
-.section sqp3b
-.section sqp4a
-.section sqp4b
-.section sqp5a
-.section sqp5b
-.section sqp6a
-.section sqp6b
-.section sqp7a
-.section sqp7b
-.section sqp8a
-.section sqp8b
-.section sqp9a
-.section sqp9b
-.section sqp0a
-.section sqp0b
-.section sqqaa
-.section sqqab
-.section sqqba
-.section sqqbb
-.section sqqca
-.section sqqcb
-.section sqqda
-.section sqqdb
-.section sqqea
-.section sqqeb
-.section sqqfa
-.section sqqfb
-.section sqqga
-.section sqqgb
-.section sqqha
-.section sqqhb
-.section sqqia
-.section sqqib
-.section sqqja
-.section sqqjb
-.section sqqka
-.section sqqkb
-.section sqqla
-.section sqqlb
-.section sqqma
-.section sqqmb
-.section sqqna
-.section sqqnb
-.section sqqoa
-.section sqqob
-.section sqqpa
-.section sqqpb
-.section sqqqa
-.section sqqqb
-.section sqqra
-.section sqqrb
-.section sqqsa
-.section sqqsb
-.section sqqta
-.section sqqtb
-.section sqqua
-.section sqqub
-.section sqqva
-.section sqqvb
-.section sqqwa
-.section sqqwb
-.section sqqxa
-.section sqqxb
-.section sqqya
-.section sqqyb
-.section sqqza
-.section sqqzb
-.section sqq1a
-.section sqq1b
-.section sqq2a
-.section sqq2b
-.section sqq3a
-.section sqq3b
-.section sqq4a
-.section sqq4b
-.section sqq5a
-.section sqq5b
-.section sqq6a
-.section sqq6b
-.section sqq7a
-.section sqq7b
-.section sqq8a
-.section sqq8b
-.section sqq9a
-.section sqq9b
-.section sqq0a
-.section sqq0b
-.section sqraa
-.section sqrab
-.section sqrba
-.section sqrbb
-.section sqrca
-.section sqrcb
-.section sqrda
-.section sqrdb
-.section sqrea
-.section sqreb
-.section sqrfa
-.section sqrfb
-.section sqrga
-.section sqrgb
-.section sqrha
-.section sqrhb
-.section sqria
-.section sqrib
-.section sqrja
-.section sqrjb
-.section sqrka
-.section sqrkb
-.section sqrla
-.section sqrlb
-.section sqrma
-.section sqrmb
-.section sqrna
-.section sqrnb
-.section sqroa
-.section sqrob
-.section sqrpa
-.section sqrpb
-.section sqrqa
-.section sqrqb
-.section sqrra
-.section sqrrb
-.section sqrsa
-.section sqrsb
-.section sqrta
-.section sqrtb
-.section sqrua
-.section sqrub
-.section sqrva
-.section sqrvb
-.section sqrwa
-.section sqrwb
-.section sqrxa
-.section sqrxb
-.section sqrya
-.section sqryb
-.section sqrza
-.section sqrzb
-.section sqr1a
-.section sqr1b
-.section sqr2a
-.section sqr2b
-.section sqr3a
-.section sqr3b
-.section sqr4a
-.section sqr4b
-.section sqr5a
-.section sqr5b
-.section sqr6a
-.section sqr6b
-.section sqr7a
-.section sqr7b
-.section sqr8a
-.section sqr8b
-.section sqr9a
-.section sqr9b
-.section sqr0a
-.section sqr0b
-.section sqsaa
-.section sqsab
-.section sqsba
-.section sqsbb
-.section sqsca
-.section sqscb
-.section sqsda
-.section sqsdb
-.section sqsea
-.section sqseb
-.section sqsfa
-.section sqsfb
-.section sqsga
-.section sqsgb
-.section sqsha
-.section sqshb
-.section sqsia
-.section sqsib
-.section sqsja
-.section sqsjb
-.section sqska
-.section sqskb
-.section sqsla
-.section sqslb
-.section sqsma
-.section sqsmb
-.section sqsna
-.section sqsnb
-.section sqsoa
-.section sqsob
-.section sqspa
-.section sqspb
-.section sqsqa
-.section sqsqb
-.section sqsra
-.section sqsrb
-.section sqssa
-.section sqssb
-.section sqsta
-.section sqstb
-.section sqsua
-.section sqsub
-.section sqsva
-.section sqsvb
-.section sqswa
-.section sqswb
-.section sqsxa
-.section sqsxb
-.section sqsya
-.section sqsyb
-.section sqsza
-.section sqszb
-.section sqs1a
-.section sqs1b
-.section sqs2a
-.section sqs2b
-.section sqs3a
-.section sqs3b
-.section sqs4a
-.section sqs4b
-.section sqs5a
-.section sqs5b
-.section sqs6a
-.section sqs6b
-.section sqs7a
-.section sqs7b
-.section sqs8a
-.section sqs8b
-.section sqs9a
-.section sqs9b
-.section sqs0a
-.section sqs0b
-.section sqtaa
-.section sqtab
-.section sqtba
-.section sqtbb
-.section sqtca
-.section sqtcb
-.section sqtda
-.section sqtdb
-.section sqtea
-.section sqteb
-.section sqtfa
-.section sqtfb
-.section sqtga
-.section sqtgb
-.section sqtha
-.section sqthb
-.section sqtia
-.section sqtib
-.section sqtja
-.section sqtjb
-.section sqtka
-.section sqtkb
-.section sqtla
-.section sqtlb
-.section sqtma
-.section sqtmb
-.section sqtna
-.section sqtnb
-.section sqtoa
-.section sqtob
-.section sqtpa
-.section sqtpb
-.section sqtqa
-.section sqtqb
-.section sqtra
-.section sqtrb
-.section sqtsa
-.section sqtsb
-.section sqtta
-.section sqttb
-.section sqtua
-.section sqtub
-.section sqtva
-.section sqtvb
-.section sqtwa
-.section sqtwb
-.section sqtxa
-.section sqtxb
-.section sqtya
-.section sqtyb
-.section sqtza
-.section sqtzb
-.section sqt1a
-.section sqt1b
-.section sqt2a
-.section sqt2b
-.section sqt3a
-.section sqt3b
-.section sqt4a
-.section sqt4b
-.section sqt5a
-.section sqt5b
-.section sqt6a
-.section sqt6b
-.section sqt7a
-.section sqt7b
-.section sqt8a
-.section sqt8b
-.section sqt9a
-.section sqt9b
-.section sqt0a
-.section sqt0b
-.section squaa
-.section squab
-.section squba
-.section squbb
-.section squca
-.section squcb
-.section squda
-.section squdb
-.section squea
-.section squeb
-.section squfa
-.section squfb
-.section squga
-.section squgb
-.section squha
-.section squhb
-.section squia
-.section squib
-.section squja
-.section squjb
-.section squka
-.section squkb
-.section squla
-.section squlb
-.section squma
-.section squmb
-.section squna
-.section squnb
-.section squoa
-.section squob
-.section squpa
-.section squpb
-.section squqa
-.section squqb
-.section squra
-.section squrb
-.section squsa
-.section squsb
-.section squta
-.section squtb
-.section squua
-.section squub
-.section squva
-.section squvb
-.section squwa
-.section squwb
-.section squxa
-.section squxb
-.section squya
-.section squyb
-.section squza
-.section squzb
-.section squ1a
-.section squ1b
-.section squ2a
-.section squ2b
-.section squ3a
-.section squ3b
-.section squ4a
-.section squ4b
-.section squ5a
-.section squ5b
-.section squ6a
-.section squ6b
-.section squ7a
-.section squ7b
-.section squ8a
-.section squ8b
-.section squ9a
-.section squ9b
-.section squ0a
-.section squ0b
-.section sqvaa
-.section sqvab
-.section sqvba
-.section sqvbb
-.section sqvca
-.section sqvcb
-.section sqvda
-.section sqvdb
-.section sqvea
-.section sqveb
-.section sqvfa
-.section sqvfb
-.section sqvga
-.section sqvgb
-.section sqvha
-.section sqvhb
-.section sqvia
-.section sqvib
-.section sqvja
-.section sqvjb
-.section sqvka
-.section sqvkb
-.section sqvla
-.section sqvlb
-.section sqvma
-.section sqvmb
-.section sqvna
-.section sqvnb
-.section sqvoa
-.section sqvob
-.section sqvpa
-.section sqvpb
-.section sqvqa
-.section sqvqb
-.section sqvra
-.section sqvrb
-.section sqvsa
-.section sqvsb
-.section sqvta
-.section sqvtb
-.section sqvua
-.section sqvub
-.section sqvva
-.section sqvvb
-.section sqvwa
-.section sqvwb
-.section sqvxa
-.section sqvxb
-.section sqvya
-.section sqvyb
-.section sqvza
-.section sqvzb
-.section sqv1a
-.section sqv1b
-.section sqv2a
-.section sqv2b
-.section sqv3a
-.section sqv3b
-.section sqv4a
-.section sqv4b
-.section sqv5a
-.section sqv5b
-.section sqv6a
-.section sqv6b
-.section sqv7a
-.section sqv7b
-.section sqv8a
-.section sqv8b
-.section sqv9a
-.section sqv9b
-.section sqv0a
-.section sqv0b
-.section sqwaa
-.section sqwab
-.section sqwba
-.section sqwbb
-.section sqwca
-.section sqwcb
-.section sqwda
-.section sqwdb
-.section sqwea
-.section sqweb
-.section sqwfa
-.section sqwfb
-.section sqwga
-.section sqwgb
-.section sqwha
-.section sqwhb
-.section sqwia
-.section sqwib
-.section sqwja
-.section sqwjb
-.section sqwka
-.section sqwkb
-.section sqwla
-.section sqwlb
-.section sqwma
-.section sqwmb
-.section sqwna
-.section sqwnb
-.section sqwoa
-.section sqwob
-.section sqwpa
-.section sqwpb
-.section sqwqa
-.section sqwqb
-.section sqwra
-.section sqwrb
-.section sqwsa
-.section sqwsb
-.section sqwta
-.section sqwtb
-.section sqwua
-.section sqwub
-.section sqwva
-.section sqwvb
-.section sqwwa
-.section sqwwb
-.section sqwxa
-.section sqwxb
-.section sqwya
-.section sqwyb
-.section sqwza
-.section sqwzb
-.section sqw1a
-.section sqw1b
-.section sqw2a
-.section sqw2b
-.section sqw3a
-.section sqw3b
-.section sqw4a
-.section sqw4b
-.section sqw5a
-.section sqw5b
-.section sqw6a
-.section sqw6b
-.section sqw7a
-.section sqw7b
-.section sqw8a
-.section sqw8b
-.section sqw9a
-.section sqw9b
-.section sqw0a
-.section sqw0b
-.section sqxaa
-.section sqxab
-.section sqxba
-.section sqxbb
-.section sqxca
-.section sqxcb
-.section sqxda
-.section sqxdb
-.section sqxea
-.section sqxeb
-.section sqxfa
-.section sqxfb
-.section sqxga
-.section sqxgb
-.section sqxha
-.section sqxhb
-.section sqxia
-.section sqxib
-.section sqxja
-.section sqxjb
-.section sqxka
-.section sqxkb
-.section sqxla
-.section sqxlb
-.section sqxma
-.section sqxmb
-.section sqxna
-.section sqxnb
-.section sqxoa
-.section sqxob
-.section sqxpa
-.section sqxpb
-.section sqxqa
-.section sqxqb
-.section sqxra
-.section sqxrb
-.section sqxsa
-.section sqxsb
-.section sqxta
-.section sqxtb
-.section sqxua
-.section sqxub
-.section sqxva
-.section sqxvb
-.section sqxwa
-.section sqxwb
-.section sqxxa
-.section sqxxb
-.section sqxya
-.section sqxyb
-.section sqxza
-.section sqxzb
-.section sqx1a
-.section sqx1b
-.section sqx2a
-.section sqx2b
-.section sqx3a
-.section sqx3b
-.section sqx4a
-.section sqx4b
-.section sqx5a
-.section sqx5b
-.section sqx6a
-.section sqx6b
-.section sqx7a
-.section sqx7b
-.section sqx8a
-.section sqx8b
-.section sqx9a
-.section sqx9b
-.section sqx0a
-.section sqx0b
-.section sqyaa
-.section sqyab
-.section sqyba
-.section sqybb
-.section sqyca
-.section sqycb
-.section sqyda
-.section sqydb
-.section sqyea
-.section sqyeb
-.section sqyfa
-.section sqyfb
-.section sqyga
-.section sqygb
-.section sqyha
-.section sqyhb
-.section sqyia
-.section sqyib
-.section sqyja
-.section sqyjb
-.section sqyka
-.section sqykb
-.section sqyla
-.section sqylb
-.section sqyma
-.section sqymb
-.section sqyna
-.section sqynb
-.section sqyoa
-.section sqyob
-.section sqypa
-.section sqypb
-.section sqyqa
-.section sqyqb
-.section sqyra
-.section sqyrb
-.section sqysa
-.section sqysb
-.section sqyta
-.section sqytb
-.section sqyua
-.section sqyub
-.section sqyva
-.section sqyvb
-.section sqywa
-.section sqywb
-.section sqyxa
-.section sqyxb
-.section sqyya
-.section sqyyb
-.section sqyza
-.section sqyzb
-.section sqy1a
-.section sqy1b
-.section sqy2a
-.section sqy2b
-.section sqy3a
-.section sqy3b
-.section sqy4a
-.section sqy4b
-.section sqy5a
-.section sqy5b
-.section sqy6a
-.section sqy6b
-.section sqy7a
-.section sqy7b
-.section sqy8a
-.section sqy8b
-.section sqy9a
-.section sqy9b
-.section sqy0a
-.section sqy0b
-.section sqzaa
-.section sqzab
-.section sqzba
-.section sqzbb
-.section sqzca
-.section sqzcb
-.section sqzda
-.section sqzdb
-.section sqzea
-.section sqzeb
-.section sqzfa
-.section sqzfb
-.section sqzga
-.section sqzgb
-.section sqzha
-.section sqzhb
-.section sqzia
-.section sqzib
-.section sqzja
-.section sqzjb
-.section sqzka
-.section sqzkb
-.section sqzla
-.section sqzlb
-.section sqzma
-.section sqzmb
-.section sqzna
-.section sqznb
-.section sqzoa
-.section sqzob
-.section sqzpa
-.section sqzpb
-.section sqzqa
-.section sqzqb
-.section sqzra
-.section sqzrb
-.section sqzsa
-.section sqzsb
-.section sqzta
-.section sqztb
-.section sqzua
-.section sqzub
-.section sqzva
-.section sqzvb
-.section sqzwa
-.section sqzwb
-.section sqzxa
-.section sqzxb
-.section sqzya
-.section sqzyb
-.section sqzza
-.section sqzzb
-.section sqz1a
-.section sqz1b
-.section sqz2a
-.section sqz2b
-.section sqz3a
-.section sqz3b
-.section sqz4a
-.section sqz4b
-.section sqz5a
-.section sqz5b
-.section sqz6a
-.section sqz6b
-.section sqz7a
-.section sqz7b
-.section sqz8a
-.section sqz8b
-.section sqz9a
-.section sqz9b
-.section sqz0a
-.section sqz0b
-.section sq1aa
-.section sq1ab
-.section sq1ba
-.section sq1bb
-.section sq1ca
-.section sq1cb
-.section sq1da
-.section sq1db
-.section sq1ea
-.section sq1eb
-.section sq1fa
-.section sq1fb
-.section sq1ga
-.section sq1gb
-.section sq1ha
-.section sq1hb
-.section sq1ia
-.section sq1ib
-.section sq1ja
-.section sq1jb
-.section sq1ka
-.section sq1kb
-.section sq1la
-.section sq1lb
-.section sq1ma
-.section sq1mb
-.section sq1na
-.section sq1nb
-.section sq1oa
-.section sq1ob
-.section sq1pa
-.section sq1pb
-.section sq1qa
-.section sq1qb
-.section sq1ra
-.section sq1rb
-.section sq1sa
-.section sq1sb
-.section sq1ta
-.section sq1tb
-.section sq1ua
-.section sq1ub
-.section sq1va
-.section sq1vb
-.section sq1wa
-.section sq1wb
-.section sq1xa
-.section sq1xb
-.section sq1ya
-.section sq1yb
-.section sq1za
-.section sq1zb
-.section sq11a
-.section sq11b
-.section sq12a
-.section sq12b
-.section sq13a
-.section sq13b
-.section sq14a
-.section sq14b
-.section sq15a
-.section sq15b
-.section sq16a
-.section sq16b
-.section sq17a
-.section sq17b
-.section sq18a
-.section sq18b
-.section sq19a
-.section sq19b
-.section sq10a
-.section sq10b
-.section sq2aa
-.section sq2ab
-.section sq2ba
-.section sq2bb
-.section sq2ca
-.section sq2cb
-.section sq2da
-.section sq2db
-.section sq2ea
-.section sq2eb
-.section sq2fa
-.section sq2fb
-.section sq2ga
-.section sq2gb
-.section sq2ha
-.section sq2hb
-.section sq2ia
-.section sq2ib
-.section sq2ja
-.section sq2jb
-.section sq2ka
-.section sq2kb
-.section sq2la
-.section sq2lb
-.section sq2ma
-.section sq2mb
-.section sq2na
-.section sq2nb
-.section sq2oa
-.section sq2ob
-.section sq2pa
-.section sq2pb
-.section sq2qa
-.section sq2qb
-.section sq2ra
-.section sq2rb
-.section sq2sa
-.section sq2sb
-.section sq2ta
-.section sq2tb
-.section sq2ua
-.section sq2ub
-.section sq2va
-.section sq2vb
-.section sq2wa
-.section sq2wb
-.section sq2xa
-.section sq2xb
-.section sq2ya
-.section sq2yb
-.section sq2za
-.section sq2zb
-.section sq21a
-.section sq21b
-.section sq22a
-.section sq22b
-.section sq23a
-.section sq23b
-.section sq24a
-.section sq24b
-.section sq25a
-.section sq25b
-.section sq26a
-.section sq26b
-.section sq27a
-.section sq27b
-.section sq28a
-.section sq28b
-.section sq29a
-.section sq29b
-.section sq20a
-.section sq20b
-.section sq3aa
-.section sq3ab
-.section sq3ba
-.section sq3bb
-.section sq3ca
-.section sq3cb
-.section sq3da
-.section sq3db
-.section sq3ea
-.section sq3eb
-.section sq3fa
-.section sq3fb
-.section sq3ga
-.section sq3gb
-.section sq3ha
-.section sq3hb
-.section sq3ia
-.section sq3ib
-.section sq3ja
-.section sq3jb
-.section sq3ka
-.section sq3kb
-.section sq3la
-.section sq3lb
-.section sq3ma
-.section sq3mb
-.section sq3na
-.section sq3nb
-.section sq3oa
-.section sq3ob
-.section sq3pa
-.section sq3pb
-.section sq3qa
-.section sq3qb
-.section sq3ra
-.section sq3rb
-.section sq3sa
-.section sq3sb
-.section sq3ta
-.section sq3tb
-.section sq3ua
-.section sq3ub
-.section sq3va
-.section sq3vb
-.section sq3wa
-.section sq3wb
-.section sq3xa
-.section sq3xb
-.section sq3ya
-.section sq3yb
-.section sq3za
-.section sq3zb
-.section sq31a
-.section sq31b
-.section sq32a
-.section sq32b
-.section sq33a
-.section sq33b
-.section sq34a
-.section sq34b
-.section sq35a
-.section sq35b
-.section sq36a
-.section sq36b
-.section sq37a
-.section sq37b
-.section sq38a
-.section sq38b
-.section sq39a
-.section sq39b
-.section sq30a
-.section sq30b
-.section sq4aa
-.section sq4ab
-.section sq4ba
-.section sq4bb
-.section sq4ca
-.section sq4cb
-.section sq4da
-.section sq4db
-.section sq4ea
-.section sq4eb
-.section sq4fa
-.section sq4fb
-.section sq4ga
-.section sq4gb
-.section sq4ha
-.section sq4hb
-.section sq4ia
-.section sq4ib
-.section sq4ja
-.section sq4jb
-.section sq4ka
-.section sq4kb
-.section sq4la
-.section sq4lb
-.section sq4ma
-.section sq4mb
-.section sq4na
-.section sq4nb
-.section sq4oa
-.section sq4ob
-.section sq4pa
-.section sq4pb
-.section sq4qa
-.section sq4qb
-.section sq4ra
-.section sq4rb
-.section sq4sa
-.section sq4sb
-.section sq4ta
-.section sq4tb
-.section sq4ua
-.section sq4ub
-.section sq4va
-.section sq4vb
-.section sq4wa
-.section sq4wb
-.section sq4xa
-.section sq4xb
-.section sq4ya
-.section sq4yb
-.section sq4za
-.section sq4zb
-.section sq41a
-.section sq41b
-.section sq42a
-.section sq42b
-.section sq43a
-.section sq43b
-.section sq44a
-.section sq44b
-.section sq45a
-.section sq45b
-.section sq46a
-.section sq46b
-.section sq47a
-.section sq47b
-.section sq48a
-.section sq48b
-.section sq49a
-.section sq49b
-.section sq40a
-.section sq40b
-.section sq5aa
-.section sq5ab
-.section sq5ba
-.section sq5bb
-.section sq5ca
-.section sq5cb
-.section sq5da
-.section sq5db
-.section sq5ea
-.section sq5eb
-.section sq5fa
-.section sq5fb
-.section sq5ga
-.section sq5gb
-.section sq5ha
-.section sq5hb
-.section sq5ia
-.section sq5ib
-.section sq5ja
-.section sq5jb
-.section sq5ka
-.section sq5kb
-.section sq5la
-.section sq5lb
-.section sq5ma
-.section sq5mb
-.section sq5na
-.section sq5nb
-.section sq5oa
-.section sq5ob
-.section sq5pa
-.section sq5pb
-.section sq5qa
-.section sq5qb
-.section sq5ra
-.section sq5rb
-.section sq5sa
-.section sq5sb
-.section sq5ta
-.section sq5tb
-.section sq5ua
-.section sq5ub
-.section sq5va
-.section sq5vb
-.section sq5wa
-.section sq5wb
-.section sq5xa
-.section sq5xb
-.section sq5ya
-.section sq5yb
-.section sq5za
-.section sq5zb
-.section sq51a
-.section sq51b
-.section sq52a
-.section sq52b
-.section sq53a
-.section sq53b
-.section sq54a
-.section sq54b
-.section sq55a
-.section sq55b
-.section sq56a
-.section sq56b
-.section sq57a
-.section sq57b
-.section sq58a
-.section sq58b
-.section sq59a
-.section sq59b
-.section sq50a
-.section sq50b
-.section sq6aa
-.section sq6ab
-.section sq6ba
-.section sq6bb
-.section sq6ca
-.section sq6cb
-.section sq6da
-.section sq6db
-.section sq6ea
-.section sq6eb
-.section sq6fa
-.section sq6fb
-.section sq6ga
-.section sq6gb
-.section sq6ha
-.section sq6hb
-.section sq6ia
-.section sq6ib
-.section sq6ja
-.section sq6jb
-.section sq6ka
-.section sq6kb
-.section sq6la
-.section sq6lb
-.section sq6ma
-.section sq6mb
-.section sq6na
-.section sq6nb
-.section sq6oa
-.section sq6ob
-.section sq6pa
-.section sq6pb
-.section sq6qa
-.section sq6qb
-.section sq6ra
-.section sq6rb
-.section sq6sa
-.section sq6sb
-.section sq6ta
-.section sq6tb
-.section sq6ua
-.section sq6ub
-.section sq6va
-.section sq6vb
-.section sq6wa
-.section sq6wb
-.section sq6xa
-.section sq6xb
-.section sq6ya
-.section sq6yb
-.section sq6za
-.section sq6zb
-.section sq61a
-.section sq61b
-.section sq62a
-.section sq62b
-.section sq63a
-.section sq63b
-.section sq64a
-.section sq64b
-.section sq65a
-.section sq65b
-.section sq66a
-.section sq66b
-.section sq67a
-.section sq67b
-.section sq68a
-.section sq68b
-.section sq69a
-.section sq69b
-.section sq60a
-.section sq60b
-.section sq7aa
-.section sq7ab
-.section sq7ba
-.section sq7bb
-.section sq7ca
-.section sq7cb
-.section sq7da
-.section sq7db
-.section sq7ea
-.section sq7eb
-.section sq7fa
-.section sq7fb
-.section sq7ga
-.section sq7gb
-.section sq7ha
-.section sq7hb
-.section sq7ia
-.section sq7ib
-.section sq7ja
-.section sq7jb
-.section sq7ka
-.section sq7kb
-.section sq7la
-.section sq7lb
-.section sq7ma
-.section sq7mb
-.section sq7na
-.section sq7nb
-.section sq7oa
-.section sq7ob
-.section sq7pa
-.section sq7pb
-.section sq7qa
-.section sq7qb
-.section sq7ra
-.section sq7rb
-.section sq7sa
-.section sq7sb
-.section sq7ta
-.section sq7tb
-.section sq7ua
-.section sq7ub
-.section sq7va
-.section sq7vb
-.section sq7wa
-.section sq7wb
-.section sq7xa
-.section sq7xb
-.section sq7ya
-.section sq7yb
-.section sq7za
-.section sq7zb
-.section sq71a
-.section sq71b
-.section sq72a
-.section sq72b
-.section sq73a
-.section sq73b
-.section sq74a
-.section sq74b
-.section sq75a
-.section sq75b
-.section sq76a
-.section sq76b
-.section sq77a
-.section sq77b
-.section sq78a
-.section sq78b
-.section sq79a
-.section sq79b
-.section sq70a
-.section sq70b
-.section sq8aa
-.section sq8ab
-.section sq8ba
-.section sq8bb
-.section sq8ca
-.section sq8cb
-.section sq8da
-.section sq8db
-.section sq8ea
-.section sq8eb
-.section sq8fa
-.section sq8fb
-.section sq8ga
-.section sq8gb
-.section sq8ha
-.section sq8hb
-.section sq8ia
-.section sq8ib
-.section sq8ja
-.section sq8jb
-.section sq8ka
-.section sq8kb
-.section sq8la
-.section sq8lb
-.section sq8ma
-.section sq8mb
-.section sq8na
-.section sq8nb
-.section sq8oa
-.section sq8ob
-.section sq8pa
-.section sq8pb
-.section sq8qa
-.section sq8qb
-.section sq8ra
-.section sq8rb
-.section sq8sa
-.section sq8sb
-.section sq8ta
-.section sq8tb
-.section sq8ua
-.section sq8ub
-.section sq8va
-.section sq8vb
-.section sq8wa
-.section sq8wb
-.section sq8xa
-.section sq8xb
-.section sq8ya
-.section sq8yb
-.section sq8za
-.section sq8zb
-.section sq81a
-.section sq81b
-.section sq82a
-.section sq82b
-.section sq83a
-.section sq83b
-.section sq84a
-.section sq84b
-.section sq85a
-.section sq85b
-.section sq86a
-.section sq86b
-.section sq87a
-.section sq87b
-.section sq88a
-.section sq88b
-.section sq89a
-.section sq89b
-.section sq80a
-.section sq80b
-.section sq9aa
-.section sq9ab
-.section sq9ba
-.section sq9bb
-.section sq9ca
-.section sq9cb
-.section sq9da
-.section sq9db
-.section sq9ea
-.section sq9eb
-.section sq9fa
-.section sq9fb
-.section sq9ga
-.section sq9gb
-.section sq9ha
-.section sq9hb
-.section sq9ia
-.section sq9ib
-.section sq9ja
-.section sq9jb
-.section sq9ka
-.section sq9kb
-.section sq9la
-.section sq9lb
-.section sq9ma
-.section sq9mb
-.section sq9na
-.section sq9nb
-.section sq9oa
-.section sq9ob
-.section sq9pa
-.section sq9pb
-.section sq9qa
-.section sq9qb
-.section sq9ra
-.section sq9rb
-.section sq9sa
-.section sq9sb
-.section sq9ta
-.section sq9tb
-.section sq9ua
-.section sq9ub
-.section sq9va
-.section sq9vb
-.section sq9wa
-.section sq9wb
-.section sq9xa
-.section sq9xb
-.section sq9ya
-.section sq9yb
-.section sq9za
-.section sq9zb
-.section sq91a
-.section sq91b
-.section sq92a
-.section sq92b
-.section sq93a
-.section sq93b
-.section sq94a
-.section sq94b
-.section sq95a
-.section sq95b
-.section sq96a
-.section sq96b
-.section sq97a
-.section sq97b
-.section sq98a
-.section sq98b
-.section sq99a
-.section sq99b
-.section sq90a
-.section sq90b
-.section sq0aa
-.section sq0ab
-.section sq0ba
-.section sq0bb
-.section sq0ca
-.section sq0cb
-.section sq0da
-.section sq0db
-.section sq0ea
-.section sq0eb
-.section sq0fa
-.section sq0fb
-.section sq0ga
-.section sq0gb
-.section sq0ha
-.section sq0hb
-.section sq0ia
-.section sq0ib
-.section sq0ja
-.section sq0jb
-.section sq0ka
-.section sq0kb
-.section sq0la
-.section sq0lb
-.section sq0ma
-.section sq0mb
-.section sq0na
-.section sq0nb
-.section sq0oa
-.section sq0ob
-.section sq0pa
-.section sq0pb
-.section sq0qa
-.section sq0qb
-.section sq0ra
-.section sq0rb
-.section sq0sa
-.section sq0sb
-.section sq0ta
-.section sq0tb
-.section sq0ua
-.section sq0ub
-.section sq0va
-.section sq0vb
-.section sq0wa
-.section sq0wb
-.section sq0xa
-.section sq0xb
-.section sq0ya
-.section sq0yb
-.section sq0za
-.section sq0zb
-.section sq01a
-.section sq01b
-.section sq02a
-.section sq02b
-.section sq03a
-.section sq03b
-.section sq04a
-.section sq04b
-.section sq05a
-.section sq05b
-.section sq06a
-.section sq06b
-.section sq07a
-.section sq07b
-.section sq08a
-.section sq08b
-.section sq09a
-.section sq09b
-.section sq00a
-.section sq00b
-.section sraaa
-.section sraab
-.section sraba
-.section srabb
-.section sraca
-.section sracb
-.section srada
-.section sradb
-.section sraea
-.section sraeb
-.section srafa
-.section srafb
-.section sraga
-.section sragb
-.section sraha
-.section srahb
-.section sraia
-.section sraib
-.section sraja
-.section srajb
-.section sraka
-.section srakb
-.section srala
-.section sralb
-.section srama
-.section sramb
-.section srana
-.section sranb
-.section sraoa
-.section sraob
-.section srapa
-.section srapb
-.section sraqa
-.section sraqb
-.section srara
-.section srarb
-.section srasa
-.section srasb
-.section srata
-.section sratb
-.section sraua
-.section sraub
-.section srava
-.section sravb
-.section srawa
-.section srawb
-.section sraxa
-.section sraxb
-.section sraya
-.section srayb
-.section sraza
-.section srazb
-.section sra1a
-.section sra1b
-.section sra2a
-.section sra2b
-.section sra3a
-.section sra3b
-.section sra4a
-.section sra4b
-.section sra5a
-.section sra5b
-.section sra6a
-.section sra6b
-.section sra7a
-.section sra7b
-.section sra8a
-.section sra8b
-.section sra9a
-.section sra9b
-.section sra0a
-.section sra0b
-.section srbaa
-.section srbab
-.section srbba
-.section srbbb
-.section srbca
-.section srbcb
-.section srbda
-.section srbdb
-.section srbea
-.section srbeb
-.section srbfa
-.section srbfb
-.section srbga
-.section srbgb
-.section srbha
-.section srbhb
-.section srbia
-.section srbib
-.section srbja
-.section srbjb
-.section srbka
-.section srbkb
-.section srbla
-.section srblb
-.section srbma
-.section srbmb
-.section srbna
-.section srbnb
-.section srboa
-.section srbob
-.section srbpa
-.section srbpb
-.section srbqa
-.section srbqb
-.section srbra
-.section srbrb
-.section srbsa
-.section srbsb
-.section srbta
-.section srbtb
-.section srbua
-.section srbub
-.section srbva
-.section srbvb
-.section srbwa
-.section srbwb
-.section srbxa
-.section srbxb
-.section srbya
-.section srbyb
-.section srbza
-.section srbzb
-.section srb1a
-.section srb1b
-.section srb2a
-.section srb2b
-.section srb3a
-.section srb3b
-.section srb4a
-.section srb4b
-.section srb5a
-.section srb5b
-.section srb6a
-.section srb6b
-.section srb7a
-.section srb7b
-.section srb8a
-.section srb8b
-.section srb9a
-.section srb9b
-.section srb0a
-.section srb0b
-.section srcaa
-.section srcab
-.section srcba
-.section srcbb
-.section srcca
-.section srccb
-.section srcda
-.section srcdb
-.section srcea
-.section srceb
-.section srcfa
-.section srcfb
-.section srcga
-.section srcgb
-.section srcha
-.section srchb
-.section srcia
-.section srcib
-.section srcja
-.section srcjb
-.section srcka
-.section srckb
-.section srcla
-.section srclb
-.section srcma
-.section srcmb
-.section srcna
-.section srcnb
-.section srcoa
-.section srcob
-.section srcpa
-.section srcpb
-.section srcqa
-.section srcqb
-.section srcra
-.section srcrb
-.section srcsa
-.section srcsb
-.section srcta
-.section srctb
-.section srcua
-.section srcub
-.section srcva
-.section srcvb
-.section srcwa
-.section srcwb
-.section srcxa
-.section srcxb
-.section srcya
-.section srcyb
-.section srcza
-.section srczb
-.section src1a
-.section src1b
-.section src2a
-.section src2b
-.section src3a
-.section src3b
-.section src4a
-.section src4b
-.section src5a
-.section src5b
-.section src6a
-.section src6b
-.section src7a
-.section src7b
-.section src8a
-.section src8b
-.section src9a
-.section src9b
-.section src0a
-.section src0b
-.section srdaa
-.section srdab
-.section srdba
-.section srdbb
-.section srdca
-.section srdcb
-.section srdda
-.section srddb
-.section srdea
-.section srdeb
-.section srdfa
-.section srdfb
-.section srdga
-.section srdgb
-.section srdha
-.section srdhb
-.section srdia
-.section srdib
-.section srdja
-.section srdjb
-.section srdka
-.section srdkb
-.section srdla
-.section srdlb
-.section srdma
-.section srdmb
-.section srdna
-.section srdnb
-.section srdoa
-.section srdob
-.section srdpa
-.section srdpb
-.section srdqa
-.section srdqb
-.section srdra
-.section srdrb
-.section srdsa
-.section srdsb
-.section srdta
-.section srdtb
-.section srdua
-.section srdub
-.section srdva
-.section srdvb
-.section srdwa
-.section srdwb
-.section srdxa
-.section srdxb
-.section srdya
-.section srdyb
-.section srdza
-.section srdzb
-.section srd1a
-.section srd1b
-.section srd2a
-.section srd2b
-.section srd3a
-.section srd3b
-.section srd4a
-.section srd4b
-.section srd5a
-.section srd5b
-.section srd6a
-.section srd6b
-.section srd7a
-.section srd7b
-.section srd8a
-.section srd8b
-.section srd9a
-.section srd9b
-.section srd0a
-.section srd0b
-.section sreaa
-.section sreab
-.section sreba
-.section srebb
-.section sreca
-.section srecb
-.section sreda
-.section sredb
-.section sreea
-.section sreeb
-.section srefa
-.section srefb
-.section srega
-.section sregb
-.section sreha
-.section srehb
-.section sreia
-.section sreib
-.section sreja
-.section srejb
-.section sreka
-.section srekb
-.section srela
-.section srelb
-.section srema
-.section sremb
-.section srena
-.section srenb
-.section sreoa
-.section sreob
-.section srepa
-.section srepb
-.section sreqa
-.section sreqb
-.section srera
-.section srerb
-.section sresa
-.section sresb
-.section sreta
-.section sretb
-.section sreua
-.section sreub
-.section sreva
-.section srevb
-.section srewa
-.section srewb
-.section srexa
-.section srexb
-.section sreya
-.section sreyb
-.section sreza
-.section srezb
-.section sre1a
-.section sre1b
-.section sre2a
-.section sre2b
-.section sre3a
-.section sre3b
-.section sre4a
-.section sre4b
-.section sre5a
-.section sre5b
-.section sre6a
-.section sre6b
-.section sre7a
-.section sre7b
-.section sre8a
-.section sre8b
-.section sre9a
-.section sre9b
-.section sre0a
-.section sre0b
-.section srfaa
-.section srfab
-.section srfba
-.section srfbb
-.section srfca
-.section srfcb
-.section srfda
-.section srfdb
-.section srfea
-.section srfeb
-.section srffa
-.section srffb
-.section srfga
-.section srfgb
-.section srfha
-.section srfhb
-.section srfia
-.section srfib
-.section srfja
-.section srfjb
-.section srfka
-.section srfkb
-.section srfla
-.section srflb
-.section srfma
-.section srfmb
-.section srfna
-.section srfnb
-.section srfoa
-.section srfob
-.section srfpa
-.section srfpb
-.section srfqa
-.section srfqb
-.section srfra
-.section srfrb
-.section srfsa
-.section srfsb
-.section srfta
-.section srftb
-.section srfua
-.section srfub
-.section srfva
-.section srfvb
-.section srfwa
-.section srfwb
-.section srfxa
-.section srfxb
-.section srfya
-.section srfyb
-.section srfza
-.section srfzb
-.section srf1a
-.section srf1b
-.section srf2a
-.section srf2b
-.section srf3a
-.section srf3b
-.section srf4a
-.section srf4b
-.section srf5a
-.section srf5b
-.section srf6a
-.section srf6b
-.section srf7a
-.section srf7b
-.section srf8a
-.section srf8b
-.section srf9a
-.section srf9b
-.section srf0a
-.section srf0b
-.section srgaa
-.section srgab
-.section srgba
-.section srgbb
-.section srgca
-.section srgcb
-.section srgda
-.section srgdb
-.section srgea
-.section srgeb
-.section srgfa
-.section srgfb
-.section srgga
-.section srggb
-.section srgha
-.section srghb
-.section srgia
-.section srgib
-.section srgja
-.section srgjb
-.section srgka
-.section srgkb
-.section srgla
-.section srglb
-.section srgma
-.section srgmb
-.section srgna
-.section srgnb
-.section srgoa
-.section srgob
-.section srgpa
-.section srgpb
-.section srgqa
-.section srgqb
-.section srgra
-.section srgrb
-.section srgsa
-.section srgsb
-.section srgta
-.section srgtb
-.section srgua
-.section srgub
-.section srgva
-.section srgvb
-.section srgwa
-.section srgwb
-.section srgxa
-.section srgxb
-.section srgya
-.section srgyb
-.section srgza
-.section srgzb
-.section srg1a
-.section srg1b
-.section srg2a
-.section srg2b
-.section srg3a
-.section srg3b
-.section srg4a
-.section srg4b
-.section srg5a
-.section srg5b
-.section srg6a
-.section srg6b
-.section srg7a
-.section srg7b
-.section srg8a
-.section srg8b
-.section srg9a
-.section srg9b
-.section srg0a
-.section srg0b
-.section srhaa
-.section srhab
-.section srhba
-.section srhbb
-.section srhca
-.section srhcb
-.section srhda
-.section srhdb
-.section srhea
-.section srheb
-.section srhfa
-.section srhfb
-.section srhga
-.section srhgb
-.section srhha
-.section srhhb
-.section srhia
-.section srhib
-.section srhja
-.section srhjb
-.section srhka
-.section srhkb
-.section srhla
-.section srhlb
-.section srhma
-.section srhmb
-.section srhna
-.section srhnb
-.section srhoa
-.section srhob
-.section srhpa
-.section srhpb
-.section srhqa
-.section srhqb
-.section srhra
-.section srhrb
-.section srhsa
-.section srhsb
-.section srhta
-.section srhtb
-.section srhua
-.section srhub
-.section srhva
-.section srhvb
-.section srhwa
-.section srhwb
-.section srhxa
-.section srhxb
-.section srhya
-.section srhyb
-.section srhza
-.section srhzb
-.section srh1a
-.section srh1b
-.section srh2a
-.section srh2b
-.section srh3a
-.section srh3b
-.section srh4a
-.section srh4b
-.section srh5a
-.section srh5b
-.section srh6a
-.section srh6b
-.section srh7a
-.section srh7b
-.section srh8a
-.section srh8b
-.section srh9a
-.section srh9b
-.section srh0a
-.section srh0b
-.section sriaa
-.section sriab
-.section sriba
-.section sribb
-.section srica
-.section sricb
-.section srida
-.section sridb
-.section sriea
-.section srieb
-.section srifa
-.section srifb
-.section sriga
-.section srigb
-.section sriha
-.section srihb
-.section sriia
-.section sriib
-.section srija
-.section srijb
-.section srika
-.section srikb
-.section srila
-.section srilb
-.section srima
-.section srimb
-.section srina
-.section srinb
-.section srioa
-.section sriob
-.section sripa
-.section sripb
-.section sriqa
-.section sriqb
-.section srira
-.section srirb
-.section srisa
-.section srisb
-.section srita
-.section sritb
-.section sriua
-.section sriub
-.section sriva
-.section srivb
-.section sriwa
-.section sriwb
-.section srixa
-.section srixb
-.section sriya
-.section sriyb
-.section sriza
-.section srizb
-.section sri1a
-.section sri1b
-.section sri2a
-.section sri2b
-.section sri3a
-.section sri3b
-.section sri4a
-.section sri4b
-.section sri5a
-.section sri5b
-.section sri6a
-.section sri6b
-.section sri7a
-.section sri7b
-.section sri8a
-.section sri8b
-.section sri9a
-.section sri9b
-.section sri0a
-.section sri0b
-.section srjaa
-.section srjab
-.section srjba
-.section srjbb
-.section srjca
-.section srjcb
-.section srjda
-.section srjdb
-.section srjea
-.section srjeb
-.section srjfa
-.section srjfb
-.section srjga
-.section srjgb
-.section srjha
-.section srjhb
-.section srjia
-.section srjib
-.section srjja
-.section srjjb
-.section srjka
-.section srjkb
-.section srjla
-.section srjlb
-.section srjma
-.section srjmb
-.section srjna
-.section srjnb
-.section srjoa
-.section srjob
-.section srjpa
-.section srjpb
-.section srjqa
-.section srjqb
-.section srjra
-.section srjrb
-.section srjsa
-.section srjsb
-.section srjta
-.section srjtb
-.section srjua
-.section srjub
-.section srjva
-.section srjvb
-.section srjwa
-.section srjwb
-.section srjxa
-.section srjxb
-.section srjya
-.section srjyb
-.section srjza
-.section srjzb
-.section srj1a
-.section srj1b
-.section srj2a
-.section srj2b
-.section srj3a
-.section srj3b
-.section srj4a
-.section srj4b
-.section srj5a
-.section srj5b
-.section srj6a
-.section srj6b
-.section srj7a
-.section srj7b
-.section srj8a
-.section srj8b
-.section srj9a
-.section srj9b
-.section srj0a
-.section srj0b
-.section srkaa
-.section srkab
-.section srkba
-.section srkbb
-.section srkca
-.section srkcb
-.section srkda
-.section srkdb
-.section srkea
-.section srkeb
-.section srkfa
-.section srkfb
-.section srkga
-.section srkgb
-.section srkha
-.section srkhb
-.section srkia
-.section srkib
-.section srkja
-.section srkjb
-.section srkka
-.section srkkb
-.section srkla
-.section srklb
-.section srkma
-.section srkmb
-.section srkna
-.section srknb
-.section srkoa
-.section srkob
-.section srkpa
-.section srkpb
-.section srkqa
-.section srkqb
-.section srkra
-.section srkrb
-.section srksa
-.section srksb
-.section srkta
-.section srktb
-.section srkua
-.section srkub
-.section srkva
-.section srkvb
-.section srkwa
-.section srkwb
-.section srkxa
-.section srkxb
-.section srkya
-.section srkyb
-.section srkza
-.section srkzb
-.section srk1a
-.section srk1b
-.section srk2a
-.section srk2b
-.section srk3a
-.section srk3b
-.section srk4a
-.section srk4b
-.section srk5a
-.section srk5b
-.section srk6a
-.section srk6b
-.section srk7a
-.section srk7b
-.section srk8a
-.section srk8b
-.section srk9a
-.section srk9b
-.section srk0a
-.section srk0b
-.section srlaa
-.section srlab
-.section srlba
-.section srlbb
-.section srlca
-.section srlcb
-.section srlda
-.section srldb
-.section srlea
-.section srleb
-.section srlfa
-.section srlfb
-.section srlga
-.section srlgb
-.section srlha
-.section srlhb
-.section srlia
-.section srlib
-.section srlja
-.section srljb
-.section srlka
-.section srlkb
-.section srlla
-.section srllb
-.section srlma
-.section srlmb
-.section srlna
-.section srlnb
-.section srloa
-.section srlob
-.section srlpa
-.section srlpb
-.section srlqa
-.section srlqb
-.section srlra
-.section srlrb
-.section srlsa
-.section srlsb
-.section srlta
-.section srltb
-.section srlua
-.section srlub
-.section srlva
-.section srlvb
-.section srlwa
-.section srlwb
-.section srlxa
-.section srlxb
-.section srlya
-.section srlyb
-.section srlza
-.section srlzb
-.section srl1a
-.section srl1b
-.section srl2a
-.section srl2b
-.section srl3a
-.section srl3b
-.section srl4a
-.section srl4b
-.section srl5a
-.section srl5b
-.section srl6a
-.section srl6b
-.section srl7a
-.section srl7b
-.section srl8a
-.section srl8b
-.section srl9a
-.section srl9b
-.section srl0a
-.section srl0b
-.section srmaa
-.section srmab
-.section srmba
-.section srmbb
-.section srmca
-.section srmcb
-.section srmda
-.section srmdb
-.section srmea
-.section srmeb
-.section srmfa
-.section srmfb
-.section srmga
-.section srmgb
-.section srmha
-.section srmhb
-.section srmia
-.section srmib
-.section srmja
-.section srmjb
-.section srmka
-.section srmkb
-.section srmla
-.section srmlb
-.section srmma
-.section srmmb
-.section srmna
-.section srmnb
-.section srmoa
-.section srmob
-.section srmpa
-.section srmpb
-.section srmqa
-.section srmqb
-.section srmra
-.section srmrb
-.section srmsa
-.section srmsb
-.section srmta
-.section srmtb
-.section srmua
-.section srmub
-.section srmva
-.section srmvb
-.section srmwa
-.section srmwb
-.section srmxa
-.section srmxb
-.section srmya
-.section srmyb
-.section srmza
-.section srmzb
-.section srm1a
-.section srm1b
-.section srm2a
-.section srm2b
-.section srm3a
-.section srm3b
-.section srm4a
-.section srm4b
-.section srm5a
-.section srm5b
-.section srm6a
-.section srm6b
-.section srm7a
-.section srm7b
-.section srm8a
-.section srm8b
-.section srm9a
-.section srm9b
-.section srm0a
-.section srm0b
-.section srnaa
-.section srnab
-.section srnba
-.section srnbb
-.section srnca
-.section srncb
-.section srnda
-.section srndb
-.section srnea
-.section srneb
-.section srnfa
-.section srnfb
-.section srnga
-.section srngb
-.section srnha
-.section srnhb
-.section srnia
-.section srnib
-.section srnja
-.section srnjb
-.section srnka
-.section srnkb
-.section srnla
-.section srnlb
-.section srnma
-.section srnmb
-.section srnna
-.section srnnb
-.section srnoa
-.section srnob
-.section srnpa
-.section srnpb
-.section srnqa
-.section srnqb
-.section srnra
-.section srnrb
-.section srnsa
-.section srnsb
-.section srnta
-.section srntb
-.section srnua
-.section srnub
-.section srnva
-.section srnvb
-.section srnwa
-.section srnwb
-.section srnxa
-.section srnxb
-.section srnya
-.section srnyb
-.section srnza
-.section srnzb
-.section srn1a
-.section srn1b
-.section srn2a
-.section srn2b
-.section srn3a
-.section srn3b
-.section srn4a
-.section srn4b
-.section srn5a
-.section srn5b
-.section srn6a
-.section srn6b
-.section srn7a
-.section srn7b
-.section srn8a
-.section srn8b
-.section srn9a
-.section srn9b
-.section srn0a
-.section srn0b
-.section sroaa
-.section sroab
-.section sroba
-.section srobb
-.section sroca
-.section srocb
-.section sroda
-.section srodb
-.section sroea
-.section sroeb
-.section srofa
-.section srofb
-.section sroga
-.section srogb
-.section sroha
-.section srohb
-.section sroia
-.section sroib
-.section sroja
-.section srojb
-.section sroka
-.section srokb
-.section srola
-.section srolb
-.section sroma
-.section sromb
-.section srona
-.section sronb
-.section srooa
-.section sroob
-.section sropa
-.section sropb
-.section sroqa
-.section sroqb
-.section srora
-.section srorb
-.section srosa
-.section srosb
-.section srota
-.section srotb
-.section sroua
-.section sroub
-.section srova
-.section srovb
-.section srowa
-.section srowb
-.section sroxa
-.section sroxb
-.section sroya
-.section sroyb
-.section sroza
-.section srozb
-.section sro1a
-.section sro1b
-.section sro2a
-.section sro2b
-.section sro3a
-.section sro3b
-.section sro4a
-.section sro4b
-.section sro5a
-.section sro5b
-.section sro6a
-.section sro6b
-.section sro7a
-.section sro7b
-.section sro8a
-.section sro8b
-.section sro9a
-.section sro9b
-.section sro0a
-.section sro0b
-.section srpaa
-.section srpab
-.section srpba
-.section srpbb
-.section srpca
-.section srpcb
-.section srpda
-.section srpdb
-.section srpea
-.section srpeb
-.section srpfa
-.section srpfb
-.section srpga
-.section srpgb
-.section srpha
-.section srphb
-.section srpia
-.section srpib
-.section srpja
-.section srpjb
-.section srpka
-.section srpkb
-.section srpla
-.section srplb
-.section srpma
-.section srpmb
-.section srpna
-.section srpnb
-.section srpoa
-.section srpob
-.section srppa
-.section srppb
-.section srpqa
-.section srpqb
-.section srpra
-.section srprb
-.section srpsa
-.section srpsb
-.section srpta
-.section srptb
-.section srpua
-.section srpub
-.section srpva
-.section srpvb
-.section srpwa
-.section srpwb
-.section srpxa
-.section srpxb
-.section srpya
-.section srpyb
-.section srpza
-.section srpzb
-.section srp1a
-.section srp1b
-.section srp2a
-.section srp2b
-.section srp3a
-.section srp3b
-.section srp4a
-.section srp4b
-.section srp5a
-.section srp5b
-.section srp6a
-.section srp6b
-.section srp7a
-.section srp7b
-.section srp8a
-.section srp8b
-.section srp9a
-.section srp9b
-.section srp0a
-.section srp0b
-.section srqaa
-.section srqab
-.section srqba
-.section srqbb
-.section srqca
-.section srqcb
-.section srqda
-.section srqdb
-.section srqea
-.section srqeb
-.section srqfa
-.section srqfb
-.section srqga
-.section srqgb
-.section srqha
-.section srqhb
-.section srqia
-.section srqib
-.section srqja
-.section srqjb
-.section srqka
-.section srqkb
-.section srqla
-.section srqlb
-.section srqma
-.section srqmb
-.section srqna
-.section srqnb
-.section srqoa
-.section srqob
-.section srqpa
-.section srqpb
-.section srqqa
-.section srqqb
-.section srqra
-.section srqrb
-.section srqsa
-.section srqsb
-.section srqta
-.section srqtb
-.section srqua
-.section srqub
-.section srqva
-.section srqvb
-.section srqwa
-.section srqwb
-.section srqxa
-.section srqxb
-.section srqya
-.section srqyb
-.section srqza
-.section srqzb
-.section srq1a
-.section srq1b
-.section srq2a
-.section srq2b
-.section srq3a
-.section srq3b
-.section srq4a
-.section srq4b
-.section srq5a
-.section srq5b
-.section srq6a
-.section srq6b
-.section srq7a
-.section srq7b
-.section srq8a
-.section srq8b
-.section srq9a
-.section srq9b
-.section srq0a
-.section srq0b
-.section srraa
-.section srrab
-.section srrba
-.section srrbb
-.section srrca
-.section srrcb
-.section srrda
-.section srrdb
-.section srrea
-.section srreb
-.section srrfa
-.section srrfb
-.section srrga
-.section srrgb
-.section srrha
-.section srrhb
-.section srria
-.section srrib
-.section srrja
-.section srrjb
-.section srrka
-.section srrkb
-.section srrla
-.section srrlb
-.section srrma
-.section srrmb
-.section srrna
-.section srrnb
-.section srroa
-.section srrob
-.section srrpa
-.section srrpb
-.section srrqa
-.section srrqb
-.section srrra
-.section srrrb
-.section srrsa
-.section srrsb
-.section srrta
-.section srrtb
-.section srrua
-.section srrub
-.section srrva
-.section srrvb
-.section srrwa
-.section srrwb
-.section srrxa
-.section srrxb
-.section srrya
-.section srryb
-.section srrza
-.section srrzb
-.section srr1a
-.section srr1b
-.section srr2a
-.section srr2b
-.section srr3a
-.section srr3b
-.section srr4a
-.section srr4b
-.section srr5a
-.section srr5b
-.section srr6a
-.section srr6b
-.section srr7a
-.section srr7b
-.section srr8a
-.section srr8b
-.section srr9a
-.section srr9b
-.section srr0a
-.section srr0b
-.section srsaa
-.section srsab
-.section srsba
-.section srsbb
-.section srsca
-.section srscb
-.section srsda
-.section srsdb
-.section srsea
-.section srseb
-.section srsfa
-.section srsfb
-.section srsga
-.section srsgb
-.section srsha
-.section srshb
-.section srsia
-.section srsib
-.section srsja
-.section srsjb
-.section srska
-.section srskb
-.section srsla
-.section srslb
-.section srsma
-.section srsmb
-.section srsna
-.section srsnb
-.section srsoa
-.section srsob
-.section srspa
-.section srspb
-.section srsqa
-.section srsqb
-.section srsra
-.section srsrb
-.section srssa
-.section srssb
-.section srsta
-.section srstb
-.section srsua
-.section srsub
-.section srsva
-.section srsvb
-.section srswa
-.section srswb
-.section srsxa
-.section srsxb
-.section srsya
-.section srsyb
-.section srsza
-.section srszb
-.section srs1a
-.section srs1b
-.section srs2a
-.section srs2b
-.section srs3a
-.section srs3b
-.section srs4a
-.section srs4b
-.section srs5a
-.section srs5b
-.section srs6a
-.section srs6b
-.section srs7a
-.section srs7b
-.section srs8a
-.section srs8b
-.section srs9a
-.section srs9b
-.section srs0a
-.section srs0b
-.section srtaa
-.section srtab
-.section srtba
-.section srtbb
-.section srtca
-.section srtcb
-.section srtda
-.section srtdb
-.section srtea
-.section srteb
-.section srtfa
-.section srtfb
-.section srtga
-.section srtgb
-.section srtha
-.section srthb
-.section srtia
-.section srtib
-.section srtja
-.section srtjb
-.section srtka
-.section srtkb
-.section srtla
-.section srtlb
-.section srtma
-.section srtmb
-.section srtna
-.section srtnb
-.section srtoa
-.section srtob
-.section srtpa
-.section srtpb
-.section srtqa
-.section srtqb
-.section srtra
-.section srtrb
-.section srtsa
-.section srtsb
-.section srtta
-.section srttb
-.section srtua
-.section srtub
-.section srtva
-.section srtvb
-.section srtwa
-.section srtwb
-.section srtxa
-.section srtxb
-.section srtya
-.section srtyb
-.section srtza
-.section srtzb
-.section srt1a
-.section srt1b
-.section srt2a
-.section srt2b
-.section srt3a
-.section srt3b
-.section srt4a
-.section srt4b
-.section srt5a
-.section srt5b
-.section srt6a
-.section srt6b
-.section srt7a
-.section srt7b
-.section srt8a
-.section srt8b
-.section srt9a
-.section srt9b
-.section srt0a
-.section srt0b
-.section sruaa
-.section sruab
-.section sruba
-.section srubb
-.section sruca
-.section srucb
-.section sruda
-.section srudb
-.section sruea
-.section srueb
-.section srufa
-.section srufb
-.section sruga
-.section srugb
-.section sruha
-.section sruhb
-.section sruia
-.section sruib
-.section sruja
-.section srujb
-.section sruka
-.section srukb
-.section srula
-.section srulb
-.section sruma
-.section srumb
-.section sruna
-.section srunb
-.section sruoa
-.section sruob
-.section srupa
-.section srupb
-.section sruqa
-.section sruqb
-.section srura
-.section srurb
-.section srusa
-.section srusb
-.section sruta
-.section srutb
-.section sruua
-.section sruub
-.section sruva
-.section sruvb
-.section sruwa
-.section sruwb
-.section sruxa
-.section sruxb
-.section sruya
-.section sruyb
-.section sruza
-.section sruzb
-.section sru1a
-.section sru1b
-.section sru2a
-.section sru2b
-.section sru3a
-.section sru3b
-.section sru4a
-.section sru4b
-.section sru5a
-.section sru5b
-.section sru6a
-.section sru6b
-.section sru7a
-.section sru7b
-.section sru8a
-.section sru8b
-.section sru9a
-.section sru9b
-.section sru0a
-.section sru0b
-.section srvaa
-.section srvab
-.section srvba
-.section srvbb
-.section srvca
-.section srvcb
-.section srvda
-.section srvdb
-.section srvea
-.section srveb
-.section srvfa
-.section srvfb
-.section srvga
-.section srvgb
-.section srvha
-.section srvhb
-.section srvia
-.section srvib
-.section srvja
-.section srvjb
-.section srvka
-.section srvkb
-.section srvla
-.section srvlb
-.section srvma
-.section srvmb
-.section srvna
-.section srvnb
-.section srvoa
-.section srvob
-.section srvpa
-.section srvpb
-.section srvqa
-.section srvqb
-.section srvra
-.section srvrb
-.section srvsa
-.section srvsb
-.section srvta
-.section srvtb
-.section srvua
-.section srvub
-.section srvva
-.section srvvb
-.section srvwa
-.section srvwb
-.section srvxa
-.section srvxb
-.section srvya
-.section srvyb
-.section srvza
-.section srvzb
-.section srv1a
-.section srv1b
-.section srv2a
-.section srv2b
-.section srv3a
-.section srv3b
-.section srv4a
-.section srv4b
-.section srv5a
-.section srv5b
-.section srv6a
-.section srv6b
-.section srv7a
-.section srv7b
-.section srv8a
-.section srv8b
-.section srv9a
-.section srv9b
-.section srv0a
-.section srv0b
-.section srwaa
-.section srwab
-.section srwba
-.section srwbb
-.section srwca
-.section srwcb
-.section srwda
-.section srwdb
-.section srwea
-.section srweb
-.section srwfa
-.section srwfb
-.section srwga
-.section srwgb
-.section srwha
-.section srwhb
-.section srwia
-.section srwib
-.section srwja
-.section srwjb
-.section srwka
-.section srwkb
-.section srwla
-.section srwlb
-.section srwma
-.section srwmb
-.section srwna
-.section srwnb
-.section srwoa
-.section srwob
-.section srwpa
-.section srwpb
-.section srwqa
-.section srwqb
-.section srwra
-.section srwrb
-.section srwsa
-.section srwsb
-.section srwta
-.section srwtb
-.section srwua
-.section srwub
-.section srwva
-.section srwvb
-.section srwwa
-.section srwwb
-.section srwxa
-.section srwxb
-.section srwya
-.section srwyb
-.section srwza
-.section srwzb
-.section srw1a
-.section srw1b
-.section srw2a
-.section srw2b
-.section srw3a
-.section srw3b
-.section srw4a
-.section srw4b
-.section srw5a
-.section srw5b
-.section srw6a
-.section srw6b
-.section srw7a
-.section srw7b
-.section srw8a
-.section srw8b
-.section srw9a
-.section srw9b
-.section srw0a
-.section srw0b
-.section srxaa
-.section srxab
-.section srxba
-.section srxbb
-.section srxca
-.section srxcb
-.section srxda
-.section srxdb
-.section srxea
-.section srxeb
-.section srxfa
-.section srxfb
-.section srxga
-.section srxgb
-.section srxha
-.section srxhb
-.section srxia
-.section srxib
-.section srxja
-.section srxjb
-.section srxka
-.section srxkb
-.section srxla
-.section srxlb
-.section srxma
-.section srxmb
-.section srxna
-.section srxnb
-.section srxoa
-.section srxob
-.section srxpa
-.section srxpb
-.section srxqa
-.section srxqb
-.section srxra
-.section srxrb
-.section srxsa
-.section srxsb
-.section srxta
-.section srxtb
-.section srxua
-.section srxub
-.section srxva
-.section srxvb
-.section srxwa
-.section srxwb
-.section srxxa
-.section srxxb
-.section srxya
-.section srxyb
-.section srxza
-.section srxzb
-.section srx1a
-.section srx1b
-.section srx2a
-.section srx2b
-.section srx3a
-.section srx3b
-.section srx4a
-.section srx4b
-.section srx5a
-.section srx5b
-.section srx6a
-.section srx6b
-.section srx7a
-.section srx7b
-.section srx8a
-.section srx8b
-.section srx9a
-.section srx9b
-.section srx0a
-.section srx0b
-.section sryaa
-.section sryab
-.section sryba
-.section srybb
-.section sryca
-.section srycb
-.section sryda
-.section srydb
-.section sryea
-.section sryeb
-.section sryfa
-.section sryfb
-.section sryga
-.section srygb
-.section sryha
-.section sryhb
-.section sryia
-.section sryib
-.section sryja
-.section sryjb
-.section sryka
-.section srykb
-.section sryla
-.section srylb
-.section sryma
-.section srymb
-.section sryna
-.section srynb
-.section sryoa
-.section sryob
-.section srypa
-.section srypb
-.section sryqa
-.section sryqb
-.section sryra
-.section sryrb
-.section srysa
-.section srysb
-.section sryta
-.section srytb
-.section sryua
-.section sryub
-.section sryva
-.section sryvb
-.section srywa
-.section srywb
-.section sryxa
-.section sryxb
-.section sryya
-.section sryyb
-.section sryza
-.section sryzb
-.section sry1a
-.section sry1b
-.section sry2a
-.section sry2b
-.section sry3a
-.section sry3b
-.section sry4a
-.section sry4b
-.section sry5a
-.section sry5b
-.section sry6a
-.section sry6b
-.section sry7a
-.section sry7b
-.section sry8a
-.section sry8b
-.section sry9a
-.section sry9b
-.section sry0a
-.section sry0b
-.section srzaa
-.section srzab
-.section srzba
-.section srzbb
-.section srzca
-.section srzcb
-.section srzda
-.section srzdb
-.section srzea
-.section srzeb
-.section srzfa
-.section srzfb
-.section srzga
-.section srzgb
-.section srzha
-.section srzhb
-.section srzia
-.section srzib
-.section srzja
-.section srzjb
-.section srzka
-.section srzkb
-.section srzla
-.section srzlb
-.section srzma
-.section srzmb
-.section srzna
-.section srznb
-.section srzoa
-.section srzob
-.section srzpa
-.section srzpb
-.section srzqa
-.section srzqb
-.section srzra
-.section srzrb
-.section srzsa
-.section srzsb
-.section srzta
-.section srztb
-.section srzua
-.section srzub
-.section srzva
-.section srzvb
-.section srzwa
-.section srzwb
-.section srzxa
-.section srzxb
-.section srzya
-.section srzyb
-.section srzza
-.section srzzb
-.section srz1a
-.section srz1b
-.section srz2a
-.section srz2b
-.section srz3a
-.section srz3b
-.section srz4a
-.section srz4b
-.section srz5a
-.section srz5b
-.section srz6a
-.section srz6b
-.section srz7a
-.section srz7b
-.section srz8a
-.section srz8b
-.section srz9a
-.section srz9b
-.section srz0a
-.section srz0b
-.section sr1aa
-.section sr1ab
-.section sr1ba
-.section sr1bb
-.section sr1ca
-.section sr1cb
-.section sr1da
-.section sr1db
-.section sr1ea
-.section sr1eb
-.section sr1fa
-.section sr1fb
-.section sr1ga
-.section sr1gb
-.section sr1ha
-.section sr1hb
-.section sr1ia
-.section sr1ib
-.section sr1ja
-.section sr1jb
-.section sr1ka
-.section sr1kb
-.section sr1la
-.section sr1lb
-.section sr1ma
-.section sr1mb
-.section sr1na
-.section sr1nb
-.section sr1oa
-.section sr1ob
-.section sr1pa
-.section sr1pb
-.section sr1qa
-.section sr1qb
-.section sr1ra
-.section sr1rb
-.section sr1sa
-.section sr1sb
-.section sr1ta
-.section sr1tb
-.section sr1ua
-.section sr1ub
-.section sr1va
-.section sr1vb
-.section sr1wa
-.section sr1wb
-.section sr1xa
-.section sr1xb
-.section sr1ya
-.section sr1yb
-.section sr1za
-.section sr1zb
-.section sr11a
-.section sr11b
-.section sr12a
-.section sr12b
-.section sr13a
-.section sr13b
-.section sr14a
-.section sr14b
-.section sr15a
-.section sr15b
-.section sr16a
-.section sr16b
-.section sr17a
-.section sr17b
-.section sr18a
-.section sr18b
-.section sr19a
-.section sr19b
-.section sr10a
-.section sr10b
-.section sr2aa
-.section sr2ab
-.section sr2ba
-.section sr2bb
-.section sr2ca
-.section sr2cb
-.section sr2da
-.section sr2db
-.section sr2ea
-.section sr2eb
-.section sr2fa
-.section sr2fb
-.section sr2ga
-.section sr2gb
-.section sr2ha
-.section sr2hb
-.section sr2ia
-.section sr2ib
-.section sr2ja
-.section sr2jb
-.section sr2ka
-.section sr2kb
-.section sr2la
-.section sr2lb
-.section sr2ma
-.section sr2mb
-.section sr2na
-.section sr2nb
-.section sr2oa
-.section sr2ob
-.section sr2pa
-.section sr2pb
-.section sr2qa
-.section sr2qb
-.section sr2ra
-.section sr2rb
-.section sr2sa
-.section sr2sb
-.section sr2ta
-.section sr2tb
-.section sr2ua
-.section sr2ub
-.section sr2va
-.section sr2vb
-.section sr2wa
-.section sr2wb
-.section sr2xa
-.section sr2xb
-.section sr2ya
-.section sr2yb
-.section sr2za
-.section sr2zb
-.section sr21a
-.section sr21b
-.section sr22a
-.section sr22b
-.section sr23a
-.section sr23b
-.section sr24a
-.section sr24b
-.section sr25a
-.section sr25b
-.section sr26a
-.section sr26b
-.section sr27a
-.section sr27b
-.section sr28a
-.section sr28b
-.section sr29a
-.section sr29b
-.section sr20a
-.section sr20b
-.section sr3aa
-.section sr3ab
-.section sr3ba
-.section sr3bb
-.section sr3ca
-.section sr3cb
-.section sr3da
-.section sr3db
-.section sr3ea
-.section sr3eb
-.section sr3fa
-.section sr3fb
-.section sr3ga
-.section sr3gb
-.section sr3ha
-.section sr3hb
-.section sr3ia
-.section sr3ib
-.section sr3ja
-.section sr3jb
-.section sr3ka
-.section sr3kb
-.section sr3la
-.section sr3lb
-.section sr3ma
-.section sr3mb
-.section sr3na
-.section sr3nb
-.section sr3oa
-.section sr3ob
-.section sr3pa
-.section sr3pb
-.section sr3qa
-.section sr3qb
-.section sr3ra
-.section sr3rb
-.section sr3sa
-.section sr3sb
-.section sr3ta
-.section sr3tb
-.section sr3ua
-.section sr3ub
-.section sr3va
-.section sr3vb
-.section sr3wa
-.section sr3wb
-.section sr3xa
-.section sr3xb
-.section sr3ya
-.section sr3yb
-.section sr3za
-.section sr3zb
-.section sr31a
-.section sr31b
-.section sr32a
-.section sr32b
-.section sr33a
-.section sr33b
-.section sr34a
-.section sr34b
-.section sr35a
-.section sr35b
-.section sr36a
-.section sr36b
-.section sr37a
-.section sr37b
-.section sr38a
-.section sr38b
-.section sr39a
-.section sr39b
-.section sr30a
-.section sr30b
-.section sr4aa
-.section sr4ab
-.section sr4ba
-.section sr4bb
-.section sr4ca
-.section sr4cb
-.section sr4da
-.section sr4db
-.section sr4ea
-.section sr4eb
-.section sr4fa
-.section sr4fb
-.section sr4ga
-.section sr4gb
-.section sr4ha
-.section sr4hb
-.section sr4ia
-.section sr4ib
-.section sr4ja
-.section sr4jb
-.section sr4ka
-.section sr4kb
-.section sr4la
-.section sr4lb
-.section sr4ma
-.section sr4mb
-.section sr4na
-.section sr4nb
-.section sr4oa
-.section sr4ob
-.section sr4pa
-.section sr4pb
-.section sr4qa
-.section sr4qb
-.section sr4ra
-.section sr4rb
-.section sr4sa
-.section sr4sb
-.section sr4ta
-.section sr4tb
-.section sr4ua
-.section sr4ub
-.section sr4va
-.section sr4vb
-.section sr4wa
-.section sr4wb
-.section sr4xa
-.section sr4xb
-.section sr4ya
-.section sr4yb
-.section sr4za
-.section sr4zb
-.section sr41a
-.section sr41b
-.section sr42a
-.section sr42b
-.section sr43a
-.section sr43b
-.section sr44a
-.section sr44b
-.section sr45a
-.section sr45b
-.section sr46a
-.section sr46b
-.section sr47a
-.section sr47b
-.section sr48a
-.section sr48b
-.section sr49a
-.section sr49b
-.section sr40a
-.section sr40b
-.section sr5aa
-.section sr5ab
-.section sr5ba
-.section sr5bb
-.section sr5ca
-.section sr5cb
-.section sr5da
-.section sr5db
-.section sr5ea
-.section sr5eb
-.section sr5fa
-.section sr5fb
-.section sr5ga
-.section sr5gb
-.section sr5ha
-.section sr5hb
-.section sr5ia
-.section sr5ib
-.section sr5ja
-.section sr5jb
-.section sr5ka
-.section sr5kb
-.section sr5la
-.section sr5lb
-.section sr5ma
-.section sr5mb
-.section sr5na
-.section sr5nb
-.section sr5oa
-.section sr5ob
-.section sr5pa
-.section sr5pb
-.section sr5qa
-.section sr5qb
-.section sr5ra
-.section sr5rb
-.section sr5sa
-.section sr5sb
-.section sr5ta
-.section sr5tb
-.section sr5ua
-.section sr5ub
-.section sr5va
-.section sr5vb
-.section sr5wa
-.section sr5wb
-.section sr5xa
-.section sr5xb
-.section sr5ya
-.section sr5yb
-.section sr5za
-.section sr5zb
-.section sr51a
-.section sr51b
-.section sr52a
-.section sr52b
-.section sr53a
-.section sr53b
-.section sr54a
-.section sr54b
-.section sr55a
-.section sr55b
-.section sr56a
-.section sr56b
-.section sr57a
-.section sr57b
-.section sr58a
-.section sr58b
-.section sr59a
-.section sr59b
-.section sr50a
-.section sr50b
-.section sr6aa
-.section sr6ab
-.section sr6ba
-.section sr6bb
-.section sr6ca
-.section sr6cb
-.section sr6da
-.section sr6db
-.section sr6ea
-.section sr6eb
-.section sr6fa
-.section sr6fb
-.section sr6ga
-.section sr6gb
-.section sr6ha
-.section sr6hb
-.section sr6ia
-.section sr6ib
-.section sr6ja
-.section sr6jb
-.section sr6ka
-.section sr6kb
-.section sr6la
-.section sr6lb
-.section sr6ma
-.section sr6mb
-.section sr6na
-.section sr6nb
-.section sr6oa
-.section sr6ob
-.section sr6pa
-.section sr6pb
-.section sr6qa
-.section sr6qb
-.section sr6ra
-.section sr6rb
-.section sr6sa
-.section sr6sb
-.section sr6ta
-.section sr6tb
-.section sr6ua
-.section sr6ub
-.section sr6va
-.section sr6vb
-.section sr6wa
-.section sr6wb
-.section sr6xa
-.section sr6xb
-.section sr6ya
-.section sr6yb
-.section sr6za
-.section sr6zb
-.section sr61a
-.section sr61b
-.section sr62a
-.section sr62b
-.section sr63a
-.section sr63b
-.section sr64a
-.section sr64b
-.section sr65a
-.section sr65b
-.section sr66a
-.section sr66b
-.section sr67a
-.section sr67b
-.section sr68a
-.section sr68b
-.section sr69a
-.section sr69b
-.section sr60a
-.section sr60b
-.section sr7aa
-.section sr7ab
-.section sr7ba
-.section sr7bb
-.section sr7ca
-.section sr7cb
-.section sr7da
-.section sr7db
-.section sr7ea
-.section sr7eb
-.section sr7fa
-.section sr7fb
-.section sr7ga
-.section sr7gb
-.section sr7ha
-.section sr7hb
-.section sr7ia
-.section sr7ib
-.section sr7ja
-.section sr7jb
-.section sr7ka
-.section sr7kb
-.section sr7la
-.section sr7lb
-.section sr7ma
-.section sr7mb
-.section sr7na
-.section sr7nb
-.section sr7oa
-.section sr7ob
-.section sr7pa
-.section sr7pb
-.section sr7qa
-.section sr7qb
-.section sr7ra
-.section sr7rb
-.section sr7sa
-.section sr7sb
-.section sr7ta
-.section sr7tb
-.section sr7ua
-.section sr7ub
-.section sr7va
-.section sr7vb
-.section sr7wa
-.section sr7wb
-.section sr7xa
-.section sr7xb
-.section sr7ya
-.section sr7yb
-.section sr7za
-.section sr7zb
-.section sr71a
-.section sr71b
-.section sr72a
-.section sr72b
-.section sr73a
-.section sr73b
-.section sr74a
-.section sr74b
-.section sr75a
-.section sr75b
-.section sr76a
-.section sr76b
-.section sr77a
-.section sr77b
-.section sr78a
-.section sr78b
-.section sr79a
-.section sr79b
-.section sr70a
-.section sr70b
-.section sr8aa
-.section sr8ab
-.section sr8ba
-.section sr8bb
-.section sr8ca
-.section sr8cb
-.section sr8da
-.section sr8db
-.section sr8ea
-.section sr8eb
-.section sr8fa
-.section sr8fb
-.section sr8ga
-.section sr8gb
-.section sr8ha
-.section sr8hb
-.section sr8ia
-.section sr8ib
-.section sr8ja
-.section sr8jb
-.section sr8ka
-.section sr8kb
-.section sr8la
-.section sr8lb
-.section sr8ma
-.section sr8mb
-.section sr8na
-.section sr8nb
-.section sr8oa
-.section sr8ob
-.section sr8pa
-.section sr8pb
-.section sr8qa
-.section sr8qb
-.section sr8ra
-.section sr8rb
-.section sr8sa
-.section sr8sb
-.section sr8ta
-.section sr8tb
-.section sr8ua
-.section sr8ub
-.section sr8va
-.section sr8vb
-.section sr8wa
-.section sr8wb
-.section sr8xa
-.section sr8xb
-.section sr8ya
-.section sr8yb
-.section sr8za
-.section sr8zb
-.section sr81a
-.section sr81b
-.section sr82a
-.section sr82b
-.section sr83a
-.section sr83b
-.section sr84a
-.section sr84b
-.section sr85a
-.section sr85b
-.section sr86a
-.section sr86b
-.section sr87a
-.section sr87b
-.section sr88a
-.section sr88b
-.section sr89a
-.section sr89b
-.section sr80a
-.section sr80b
-.section sr9aa
-.section sr9ab
-.section sr9ba
-.section sr9bb
-.section sr9ca
-.section sr9cb
-.section sr9da
-.section sr9db
-.section sr9ea
-.section sr9eb
-.section sr9fa
-.section sr9fb
-.section sr9ga
-.section sr9gb
-.section sr9ha
-.section sr9hb
-.section sr9ia
-.section sr9ib
-.section sr9ja
-.section sr9jb
-.section sr9ka
-.section sr9kb
-.section sr9la
-.section sr9lb
-.section sr9ma
-.section sr9mb
-.section sr9na
-.section sr9nb
-.section sr9oa
-.section sr9ob
-.section sr9pa
-.section sr9pb
-.section sr9qa
-.section sr9qb
-.section sr9ra
-.section sr9rb
-.section sr9sa
-.section sr9sb
-.section sr9ta
-.section sr9tb
-.section sr9ua
-.section sr9ub
-.section sr9va
-.section sr9vb
-.section sr9wa
-.section sr9wb
-.section sr9xa
-.section sr9xb
-.section sr9ya
-.section sr9yb
-.section sr9za
-.section sr9zb
-.section sr91a
-.section sr91b
-.section sr92a
-.section sr92b
-.section sr93a
-.section sr93b
-.section sr94a
-.section sr94b
-.section sr95a
-.section sr95b
-.section sr96a
-.section sr96b
-.section sr97a
-.section sr97b
-.section sr98a
-.section sr98b
-.section sr99a
-.section sr99b
-.section sr90a
-.section sr90b
-.section sr0aa
-.section sr0ab
-.section sr0ba
-.section sr0bb
-.section sr0ca
-.section sr0cb
-.section sr0da
-.section sr0db
-.section sr0ea
-.section sr0eb
-.section sr0fa
-.section sr0fb
-.section sr0ga
-.section sr0gb
-.section sr0ha
-.section sr0hb
-.section sr0ia
-.section sr0ib
-.section sr0ja
-.section sr0jb
-.section sr0ka
-.section sr0kb
-.section sr0la
-.section sr0lb
-.section sr0ma
-.section sr0mb
-.section sr0na
-.section sr0nb
-.section sr0oa
-.section sr0ob
-.section sr0pa
-.section sr0pb
-.section sr0qa
-.section sr0qb
-.section sr0ra
-.section sr0rb
-.section sr0sa
-.section sr0sb
-.section sr0ta
-.section sr0tb
-.section sr0ua
-.section sr0ub
-.section sr0va
-.section sr0vb
-.section sr0wa
-.section sr0wb
-.section sr0xa
-.section sr0xb
-.section sr0ya
-.section sr0yb
-.section sr0za
-.section sr0zb
-.section sr01a
-.section sr01b
-.section sr02a
-.section sr02b
-.section sr03a
-.section sr03b
-.section sr04a
-.section sr04b
-.section sr05a
-.section sr05b
-.section sr06a
-.section sr06b
-.section sr07a
-.section sr07b
-.section sr08a
-.section sr08b
-.section sr09a
-.section sr09b
-.section sr00a
-.section sr00b
-.section ssaaa
-.section ssaab
-.section ssaba
-.section ssabb
-.section ssaca
-.section ssacb
-.section ssada
-.section ssadb
-.section ssaea
-.section ssaeb
-.section ssafa
-.section ssafb
-.section ssaga
-.section ssagb
-.section ssaha
-.section ssahb
-.section ssaia
-.section ssaib
-.section ssaja
-.section ssajb
-.section ssaka
-.section ssakb
-.section ssala
-.section ssalb
-.section ssama
-.section ssamb
-.section ssana
-.section ssanb
-.section ssaoa
-.section ssaob
-.section ssapa
-.section ssapb
-.section ssaqa
-.section ssaqb
-.section ssara
-.section ssarb
-.section ssasa
-.section ssasb
-.section ssata
-.section ssatb
-.section ssaua
-.section ssaub
-.section ssava
-.section ssavb
-.section ssawa
-.section ssawb
-.section ssaxa
-.section ssaxb
-.section ssaya
-.section ssayb
-.section ssaza
-.section ssazb
-.section ssa1a
-.section ssa1b
-.section ssa2a
-.section ssa2b
-.section ssa3a
-.section ssa3b
-.section ssa4a
-.section ssa4b
-.section ssa5a
-.section ssa5b
-.section ssa6a
-.section ssa6b
-.section ssa7a
-.section ssa7b
-.section ssa8a
-.section ssa8b
-.section ssa9a
-.section ssa9b
-.section ssa0a
-.section ssa0b
-.section ssbaa
-.section ssbab
-.section ssbba
-.section ssbbb
-.section ssbca
-.section ssbcb
-.section ssbda
-.section ssbdb
-.section ssbea
-.section ssbeb
-.section ssbfa
-.section ssbfb
-.section ssbga
-.section ssbgb
-.section ssbha
-.section ssbhb
-.section ssbia
-.section ssbib
-.section ssbja
-.section ssbjb
-.section ssbka
-.section ssbkb
-.section ssbla
-.section ssblb
-.section ssbma
-.section ssbmb
-.section ssbna
-.section ssbnb
-.section ssboa
-.section ssbob
-.section ssbpa
-.section ssbpb
-.section ssbqa
-.section ssbqb
-.section ssbra
-.section ssbrb
-.section ssbsa
-.section ssbsb
-.section ssbta
-.section ssbtb
-.section ssbua
-.section ssbub
-.section ssbva
-.section ssbvb
-.section ssbwa
-.section ssbwb
-.section ssbxa
-.section ssbxb
-.section ssbya
-.section ssbyb
-.section ssbza
-.section ssbzb
-.section ssb1a
-.section ssb1b
-.section ssb2a
-.section ssb2b
-.section ssb3a
-.section ssb3b
-.section ssb4a
-.section ssb4b
-.section ssb5a
-.section ssb5b
-.section ssb6a
-.section ssb6b
-.section ssb7a
-.section ssb7b
-.section ssb8a
-.section ssb8b
-.section ssb9a
-.section ssb9b
-.section ssb0a
-.section ssb0b
-.section sscaa
-.section sscab
-.section sscba
-.section sscbb
-.section sscca
-.section ssccb
-.section sscda
-.section sscdb
-.section sscea
-.section ssceb
-.section sscfa
-.section sscfb
-.section sscga
-.section sscgb
-.section sscha
-.section sschb
-.section sscia
-.section sscib
-.section sscja
-.section sscjb
-.section sscka
-.section ssckb
-.section sscla
-.section ssclb
-.section sscma
-.section sscmb
-.section sscna
-.section sscnb
-.section sscoa
-.section sscob
-.section sscpa
-.section sscpb
-.section sscqa
-.section sscqb
-.section sscra
-.section sscrb
-.section sscsa
-.section sscsb
-.section sscta
-.section ssctb
-.section sscua
-.section sscub
-.section sscva
-.section sscvb
-.section sscwa
-.section sscwb
-.section sscxa
-.section sscxb
-.section sscya
-.section sscyb
-.section sscza
-.section ssczb
-.section ssc1a
-.section ssc1b
-.section ssc2a
-.section ssc2b
-.section ssc3a
-.section ssc3b
-.section ssc4a
-.section ssc4b
-.section ssc5a
-.section ssc5b
-.section ssc6a
-.section ssc6b
-.section ssc7a
-.section ssc7b
-.section ssc8a
-.section ssc8b
-.section ssc9a
-.section ssc9b
-.section ssc0a
-.section ssc0b
-.section ssdaa
-.section ssdab
-.section ssdba
-.section ssdbb
-.section ssdca
-.section ssdcb
-.section ssdda
-.section ssddb
-.section ssdea
-.section ssdeb
-.section ssdfa
-.section ssdfb
-.section ssdga
-.section ssdgb
-.section ssdha
-.section ssdhb
-.section ssdia
-.section ssdib
-.section ssdja
-.section ssdjb
-.section ssdka
-.section ssdkb
-.section ssdla
-.section ssdlb
-.section ssdma
-.section ssdmb
-.section ssdna
-.section ssdnb
-.section ssdoa
-.section ssdob
-.section ssdpa
-.section ssdpb
-.section ssdqa
-.section ssdqb
-.section ssdra
-.section ssdrb
-.section ssdsa
-.section ssdsb
-.section ssdta
-.section ssdtb
-.section ssdua
-.section ssdub
-.section ssdva
-.section ssdvb
-.section ssdwa
-.section ssdwb
-.section ssdxa
-.section ssdxb
-.section ssdya
-.section ssdyb
-.section ssdza
-.section ssdzb
-.section ssd1a
-.section ssd1b
-.section ssd2a
-.section ssd2b
-.section ssd3a
-.section ssd3b
-.section ssd4a
-.section ssd4b
-.section ssd5a
-.section ssd5b
-.section ssd6a
-.section ssd6b
-.section ssd7a
-.section ssd7b
-.section ssd8a
-.section ssd8b
-.section ssd9a
-.section ssd9b
-.section ssd0a
-.section ssd0b
-.section sseaa
-.section sseab
-.section sseba
-.section ssebb
-.section sseca
-.section ssecb
-.section sseda
-.section ssedb
-.section sseea
-.section sseeb
-.section ssefa
-.section ssefb
-.section ssega
-.section ssegb
-.section sseha
-.section ssehb
-.section sseia
-.section sseib
-.section sseja
-.section ssejb
-.section sseka
-.section ssekb
-.section ssela
-.section sselb
-.section ssema
-.section ssemb
-.section ssena
-.section ssenb
-.section sseoa
-.section sseob
-.section ssepa
-.section ssepb
-.section sseqa
-.section sseqb
-.section ssera
-.section sserb
-.section ssesa
-.section ssesb
-.section sseta
-.section ssetb
-.section sseua
-.section sseub
-.section sseva
-.section ssevb
-.section ssewa
-.section ssewb
-.section ssexa
-.section ssexb
-.section sseya
-.section sseyb
-.section sseza
-.section ssezb
-.section sse1a
-.section sse1b
-.section sse2a
-.section sse2b
-.section sse3a
-.section sse3b
-.section sse4a
-.section sse4b
-.section sse5a
-.section sse5b
-.section sse6a
-.section sse6b
-.section sse7a
-.section sse7b
-.section sse8a
-.section sse8b
-.section sse9a
-.section sse9b
-.section sse0a
-.section sse0b
-.section ssfaa
-.section ssfab
-.section ssfba
-.section ssfbb
-.section ssfca
-.section ssfcb
-.section ssfda
-.section ssfdb
-.section ssfea
-.section ssfeb
-.section ssffa
-.section ssffb
-.section ssfga
-.section ssfgb
-.section ssfha
-.section ssfhb
-.section ssfia
-.section ssfib
-.section ssfja
-.section ssfjb
-.section ssfka
-.section ssfkb
-.section ssfla
-.section ssflb
-.section ssfma
-.section ssfmb
-.section ssfna
-.section ssfnb
-.section ssfoa
-.section ssfob
-.section ssfpa
-.section ssfpb
-.section ssfqa
-.section ssfqb
-.section ssfra
-.section ssfrb
-.section ssfsa
-.section ssfsb
-.section ssfta
-.section ssftb
-.section ssfua
-.section ssfub
-.section ssfva
-.section ssfvb
-.section ssfwa
-.section ssfwb
-.section ssfxa
-.section ssfxb
-.section ssfya
-.section ssfyb
-.section ssfza
-.section ssfzb
-.section ssf1a
-.section ssf1b
-.section ssf2a
-.section ssf2b
-.section ssf3a
-.section ssf3b
-.section ssf4a
-.section ssf4b
-.section ssf5a
-.section ssf5b
-.section ssf6a
-.section ssf6b
-.section ssf7a
-.section ssf7b
-.section ssf8a
-.section ssf8b
-.section ssf9a
-.section ssf9b
-.section ssf0a
-.section ssf0b
-.section ssgaa
-.section ssgab
-.section ssgba
-.section ssgbb
-.section ssgca
-.section ssgcb
-.section ssgda
-.section ssgdb
-.section ssgea
-.section ssgeb
-.section ssgfa
-.section ssgfb
-.section ssgga
-.section ssggb
-.section ssgha
-.section ssghb
-.section ssgia
-.section ssgib
-.section ssgja
-.section ssgjb
-.section ssgka
-.section ssgkb
-.section ssgla
-.section ssglb
-.section ssgma
-.section ssgmb
-.section ssgna
-.section ssgnb
-.section ssgoa
-.section ssgob
-.section ssgpa
-.section ssgpb
-.section ssgqa
-.section ssgqb
-.section ssgra
-.section ssgrb
-.section ssgsa
-.section ssgsb
-.section ssgta
-.section ssgtb
-.section ssgua
-.section ssgub
-.section ssgva
-.section ssgvb
-.section ssgwa
-.section ssgwb
-.section ssgxa
-.section ssgxb
-.section ssgya
-.section ssgyb
-.section ssgza
-.section ssgzb
-.section ssg1a
-.section ssg1b
-.section ssg2a
-.section ssg2b
-.section ssg3a
-.section ssg3b
-.section ssg4a
-.section ssg4b
-.section ssg5a
-.section ssg5b
-.section ssg6a
-.section ssg6b
-.section ssg7a
-.section ssg7b
-.section ssg8a
-.section ssg8b
-.section ssg9a
-.section ssg9b
-.section ssg0a
-.section ssg0b
-.section sshaa
-.section sshab
-.section sshba
-.section sshbb
-.section sshca
-.section sshcb
-.section sshda
-.section sshdb
-.section sshea
-.section ssheb
-.section sshfa
-.section sshfb
-.section sshga
-.section sshgb
-.section sshha
-.section sshhb
-.section sshia
-.section sshib
-.section sshja
-.section sshjb
-.section sshka
-.section sshkb
-.section sshla
-.section sshlb
-.section sshma
-.section sshmb
-.section sshna
-.section sshnb
-.section sshoa
-.section sshob
-.section sshpa
-.section sshpb
-.section sshqa
-.section sshqb
-.section sshra
-.section sshrb
-.section sshsa
-.section sshsb
-.section sshta
-.section sshtb
-.section sshua
-.section sshub
-.section sshva
-.section sshvb
-.section sshwa
-.section sshwb
-.section sshxa
-.section sshxb
-.section sshya
-.section sshyb
-.section sshza
-.section sshzb
-.section ssh1a
-.section ssh1b
-.section ssh2a
-.section ssh2b
-.section ssh3a
-.section ssh3b
-.section ssh4a
-.section ssh4b
-.section ssh5a
-.section ssh5b
-.section ssh6a
-.section ssh6b
-.section ssh7a
-.section ssh7b
-.section ssh8a
-.section ssh8b
-.section ssh9a
-.section ssh9b
-.section ssh0a
-.section ssh0b
-.section ssiaa
-.section ssiab
-.section ssiba
-.section ssibb
-.section ssica
-.section ssicb
-.section ssida
-.section ssidb
-.section ssiea
-.section ssieb
-.section ssifa
-.section ssifb
-.section ssiga
-.section ssigb
-.section ssiha
-.section ssihb
-.section ssiia
-.section ssiib
-.section ssija
-.section ssijb
-.section ssika
-.section ssikb
-.section ssila
-.section ssilb
-.section ssima
-.section ssimb
-.section ssina
-.section ssinb
-.section ssioa
-.section ssiob
-.section ssipa
-.section ssipb
-.section ssiqa
-.section ssiqb
-.section ssira
-.section ssirb
-.section ssisa
-.section ssisb
-.section ssita
-.section ssitb
-.section ssiua
-.section ssiub
-.section ssiva
-.section ssivb
-.section ssiwa
-.section ssiwb
-.section ssixa
-.section ssixb
-.section ssiya
-.section ssiyb
-.section ssiza
-.section ssizb
-.section ssi1a
-.section ssi1b
-.section ssi2a
-.section ssi2b
-.section ssi3a
-.section ssi3b
-.section ssi4a
-.section ssi4b
-.section ssi5a
-.section ssi5b
-.section ssi6a
-.section ssi6b
-.section ssi7a
-.section ssi7b
-.section ssi8a
-.section ssi8b
-.section ssi9a
-.section ssi9b
-.section ssi0a
-.section ssi0b
-.section ssjaa
-.section ssjab
-.section ssjba
-.section ssjbb
-.section ssjca
-.section ssjcb
-.section ssjda
-.section ssjdb
-.section ssjea
-.section ssjeb
-.section ssjfa
-.section ssjfb
-.section ssjga
-.section ssjgb
-.section ssjha
-.section ssjhb
-.section ssjia
-.section ssjib
-.section ssjja
-.section ssjjb
-.section ssjka
-.section ssjkb
-.section ssjla
-.section ssjlb
-.section ssjma
-.section ssjmb
-.section ssjna
-.section ssjnb
-.section ssjoa
-.section ssjob
-.section ssjpa
-.section ssjpb
-.section ssjqa
-.section ssjqb
-.section ssjra
-.section ssjrb
-.section ssjsa
-.section ssjsb
-.section ssjta
-.section ssjtb
-.section ssjua
-.section ssjub
-.section ssjva
-.section ssjvb
-.section ssjwa
-.section ssjwb
-.section ssjxa
-.section ssjxb
-.section ssjya
-.section ssjyb
-.section ssjza
-.section ssjzb
-.section ssj1a
-.section ssj1b
-.section ssj2a
-.section ssj2b
-.section ssj3a
-.section ssj3b
-.section ssj4a
-.section ssj4b
-.section ssj5a
-.section ssj5b
-.section ssj6a
-.section ssj6b
-.section ssj7a
-.section ssj7b
-.section ssj8a
-.section ssj8b
-.section ssj9a
-.section ssj9b
-.section ssj0a
-.section ssj0b
-.section sskaa
-.section sskab
-.section sskba
-.section sskbb
-.section sskca
-.section sskcb
-.section sskda
-.section sskdb
-.section sskea
-.section sskeb
-.section sskfa
-.section sskfb
-.section sskga
-.section sskgb
-.section sskha
-.section sskhb
-.section sskia
-.section sskib
-.section sskja
-.section sskjb
-.section sskka
-.section sskkb
-.section sskla
-.section ssklb
-.section sskma
-.section sskmb
-.section sskna
-.section ssknb
-.section sskoa
-.section sskob
-.section sskpa
-.section sskpb
-.section sskqa
-.section sskqb
-.section sskra
-.section sskrb
-.section ssksa
-.section ssksb
-.section sskta
-.section ssktb
-.section sskua
-.section sskub
-.section sskva
-.section sskvb
-.section sskwa
-.section sskwb
-.section sskxa
-.section sskxb
-.section sskya
-.section sskyb
-.section sskza
-.section sskzb
-.section ssk1a
-.section ssk1b
-.section ssk2a
-.section ssk2b
-.section ssk3a
-.section ssk3b
-.section ssk4a
-.section ssk4b
-.section ssk5a
-.section ssk5b
-.section ssk6a
-.section ssk6b
-.section ssk7a
-.section ssk7b
-.section ssk8a
-.section ssk8b
-.section ssk9a
-.section ssk9b
-.section ssk0a
-.section ssk0b
-.section sslaa
-.section sslab
-.section sslba
-.section sslbb
-.section sslca
-.section sslcb
-.section sslda
-.section ssldb
-.section sslea
-.section ssleb
-.section sslfa
-.section sslfb
-.section sslga
-.section sslgb
-.section sslha
-.section sslhb
-.section sslia
-.section sslib
-.section sslja
-.section ssljb
-.section sslka
-.section sslkb
-.section sslla
-.section ssllb
-.section sslma
-.section sslmb
-.section sslna
-.section sslnb
-.section ssloa
-.section sslob
-.section sslpa
-.section sslpb
-.section sslqa
-.section sslqb
-.section sslra
-.section sslrb
-.section sslsa
-.section sslsb
-.section sslta
-.section ssltb
-.section sslua
-.section sslub
-.section sslva
-.section sslvb
-.section sslwa
-.section sslwb
-.section sslxa
-.section sslxb
-.section sslya
-.section sslyb
-.section sslza
-.section sslzb
-.section ssl1a
-.section ssl1b
-.section ssl2a
-.section ssl2b
-.section ssl3a
-.section ssl3b
-.section ssl4a
-.section ssl4b
-.section ssl5a
-.section ssl5b
-.section ssl6a
-.section ssl6b
-.section ssl7a
-.section ssl7b
-.section ssl8a
-.section ssl8b
-.section ssl9a
-.section ssl9b
-.section ssl0a
-.section ssl0b
-.section ssmaa
-.section ssmab
-.section ssmba
-.section ssmbb
-.section ssmca
-.section ssmcb
-.section ssmda
-.section ssmdb
-.section ssmea
-.section ssmeb
-.section ssmfa
-.section ssmfb
-.section ssmga
-.section ssmgb
-.section ssmha
-.section ssmhb
-.section ssmia
-.section ssmib
-.section ssmja
-.section ssmjb
-.section ssmka
-.section ssmkb
-.section ssmla
-.section ssmlb
-.section ssmma
-.section ssmmb
-.section ssmna
-.section ssmnb
-.section ssmoa
-.section ssmob
-.section ssmpa
-.section ssmpb
-.section ssmqa
-.section ssmqb
-.section ssmra
-.section ssmrb
-.section ssmsa
-.section ssmsb
-.section ssmta
-.section ssmtb
-.section ssmua
-.section ssmub
-.section ssmva
-.section ssmvb
-.section ssmwa
-.section ssmwb
-.section ssmxa
-.section ssmxb
-.section ssmya
-.section ssmyb
-.section ssmza
-.section ssmzb
-.section ssm1a
-.section ssm1b
-.section ssm2a
-.section ssm2b
-.section ssm3a
-.section ssm3b
-.section ssm4a
-.section ssm4b
-.section ssm5a
-.section ssm5b
-.section ssm6a
-.section ssm6b
-.section ssm7a
-.section ssm7b
-.section ssm8a
-.section ssm8b
-.section ssm9a
-.section ssm9b
-.section ssm0a
-.section ssm0b
-.section ssnaa
-.section ssnab
-.section ssnba
-.section ssnbb
-.section ssnca
-.section ssncb
-.section ssnda
-.section ssndb
-.section ssnea
-.section ssneb
-.section ssnfa
-.section ssnfb
-.section ssnga
-.section ssngb
-.section ssnha
-.section ssnhb
-.section ssnia
-.section ssnib
-.section ssnja
-.section ssnjb
-.section ssnka
-.section ssnkb
-.section ssnla
-.section ssnlb
-.section ssnma
-.section ssnmb
-.section ssnna
-.section ssnnb
-.section ssnoa
-.section ssnob
-.section ssnpa
-.section ssnpb
-.section ssnqa
-.section ssnqb
-.section ssnra
-.section ssnrb
-.section ssnsa
-.section ssnsb
-.section ssnta
-.section ssntb
-.section ssnua
-.section ssnub
-.section ssnva
-.section ssnvb
-.section ssnwa
-.section ssnwb
-.section ssnxa
-.section ssnxb
-.section ssnya
-.section ssnyb
-.section ssnza
-.section ssnzb
-.section ssn1a
-.section ssn1b
-.section ssn2a
-.section ssn2b
-.section ssn3a
-.section ssn3b
-.section ssn4a
-.section ssn4b
-.section ssn5a
-.section ssn5b
-.section ssn6a
-.section ssn6b
-.section ssn7a
-.section ssn7b
-.section ssn8a
-.section ssn8b
-.section ssn9a
-.section ssn9b
-.section ssn0a
-.section ssn0b
-.section ssoaa
-.section ssoab
-.section ssoba
-.section ssobb
-.section ssoca
-.section ssocb
-.section ssoda
-.section ssodb
-.section ssoea
-.section ssoeb
-.section ssofa
-.section ssofb
-.section ssoga
-.section ssogb
-.section ssoha
-.section ssohb
-.section ssoia
-.section ssoib
-.section ssoja
-.section ssojb
-.section ssoka
-.section ssokb
-.section ssola
-.section ssolb
-.section ssoma
-.section ssomb
-.section ssona
-.section ssonb
-.section ssooa
-.section ssoob
-.section ssopa
-.section ssopb
-.section ssoqa
-.section ssoqb
-.section ssora
-.section ssorb
-.section ssosa
-.section ssosb
-.section ssota
-.section ssotb
-.section ssoua
-.section ssoub
-.section ssova
-.section ssovb
-.section ssowa
-.section ssowb
-.section ssoxa
-.section ssoxb
-.section ssoya
-.section ssoyb
-.section ssoza
-.section ssozb
-.section sso1a
-.section sso1b
-.section sso2a
-.section sso2b
-.section sso3a
-.section sso3b
-.section sso4a
-.section sso4b
-.section sso5a
-.section sso5b
-.section sso6a
-.section sso6b
-.section sso7a
-.section sso7b
-.section sso8a
-.section sso8b
-.section sso9a
-.section sso9b
-.section sso0a
-.section sso0b
-.section sspaa
-.section sspab
-.section sspba
-.section sspbb
-.section sspca
-.section sspcb
-.section sspda
-.section sspdb
-.section sspea
-.section sspeb
-.section sspfa
-.section sspfb
-.section sspga
-.section sspgb
-.section sspha
-.section ssphb
-.section sspia
-.section sspib
-.section sspja
-.section sspjb
-.section sspka
-.section sspkb
-.section sspla
-.section ssplb
-.section sspma
-.section sspmb
-.section sspna
-.section sspnb
-.section sspoa
-.section sspob
-.section ssppa
-.section ssppb
-.section sspqa
-.section sspqb
-.section sspra
-.section ssprb
-.section sspsa
-.section sspsb
-.section sspta
-.section ssptb
-.section sspua
-.section sspub
-.section sspva
-.section sspvb
-.section sspwa
-.section sspwb
-.section sspxa
-.section sspxb
-.section sspya
-.section sspyb
-.section sspza
-.section sspzb
-.section ssp1a
-.section ssp1b
-.section ssp2a
-.section ssp2b
-.section ssp3a
-.section ssp3b
-.section ssp4a
-.section ssp4b
-.section ssp5a
-.section ssp5b
-.section ssp6a
-.section ssp6b
-.section ssp7a
-.section ssp7b
-.section ssp8a
-.section ssp8b
-.section ssp9a
-.section ssp9b
-.section ssp0a
-.section ssp0b
-.section ssqaa
-.section ssqab
-.section ssqba
-.section ssqbb
-.section ssqca
-.section ssqcb
-.section ssqda
-.section ssqdb
-.section ssqea
-.section ssqeb
-.section ssqfa
-.section ssqfb
-.section ssqga
-.section ssqgb
-.section ssqha
-.section ssqhb
-.section ssqia
-.section ssqib
-.section ssqja
-.section ssqjb
-.section ssqka
-.section ssqkb
-.section ssqla
-.section ssqlb
-.section ssqma
-.section ssqmb
-.section ssqna
-.section ssqnb
-.section ssqoa
-.section ssqob
-.section ssqpa
-.section ssqpb
-.section ssqqa
-.section ssqqb
-.section ssqra
-.section ssqrb
-.section ssqsa
-.section ssqsb
-.section ssqta
-.section ssqtb
-.section ssqua
-.section ssqub
-.section ssqva
-.section ssqvb
-.section ssqwa
-.section ssqwb
-.section ssqxa
-.section ssqxb
-.section ssqya
-.section ssqyb
-.section ssqza
-.section ssqzb
-.section ssq1a
-.section ssq1b
-.section ssq2a
-.section ssq2b
-.section ssq3a
-.section ssq3b
-.section ssq4a
-.section ssq4b
-.section ssq5a
-.section ssq5b
-.section ssq6a
-.section ssq6b
-.section ssq7a
-.section ssq7b
-.section ssq8a
-.section ssq8b
-.section ssq9a
-.section ssq9b
-.section ssq0a
-.section ssq0b
-.section ssraa
-.section ssrab
-.section ssrba
-.section ssrbb
-.section ssrca
-.section ssrcb
-.section ssrda
-.section ssrdb
-.section ssrea
-.section ssreb
-.section ssrfa
-.section ssrfb
-.section ssrga
-.section ssrgb
-.section ssrha
-.section ssrhb
-.section ssria
-.section ssrib
-.section ssrja
-.section ssrjb
-.section ssrka
-.section ssrkb
-.section ssrla
-.section ssrlb
-.section ssrma
-.section ssrmb
-.section ssrna
-.section ssrnb
-.section ssroa
-.section ssrob
-.section ssrpa
-.section ssrpb
-.section ssrqa
-.section ssrqb
-.section ssrra
-.section ssrrb
-.section ssrsa
-.section ssrsb
-.section ssrta
-.section ssrtb
-.section ssrua
-.section ssrub
-.section ssrva
-.section ssrvb
-.section ssrwa
-.section ssrwb
-.section ssrxa
-.section ssrxb
-.section ssrya
-.section ssryb
-.section ssrza
-.section ssrzb
-.section ssr1a
-.section ssr1b
-.section ssr2a
-.section ssr2b
-.section ssr3a
-.section ssr3b
-.section ssr4a
-.section ssr4b
-.section ssr5a
-.section ssr5b
-.section ssr6a
-.section ssr6b
-.section ssr7a
-.section ssr7b
-.section ssr8a
-.section ssr8b
-.section ssr9a
-.section ssr9b
-.section ssr0a
-.section ssr0b
-.section sssaa
-.section sssab
-.section sssba
-.section sssbb
-.section sssca
-.section ssscb
-.section sssda
-.section sssdb
-.section sssea
-.section ssseb
-.section sssfa
-.section sssfb
-.section sssga
-.section sssgb
-.section sssha
-.section ssshb
-.section sssia
-.section sssib
-.section sssja
-.section sssjb
-.section ssska
-.section ssskb
-.section sssla
-.section ssslb
-.section sssma
-.section sssmb
-.section sssna
-.section sssnb
-.section sssoa
-.section sssob
-.section ssspa
-.section ssspb
-.section sssqa
-.section sssqb
-.section sssra
-.section sssrb
-.section ssssa
-.section ssssb
-.section sssta
-.section ssstb
-.section sssua
-.section sssub
-.section sssva
-.section sssvb
-.section ssswa
-.section ssswb
-.section sssxa
-.section sssxb
-.section sssya
-.section sssyb
-.section sssza
-.section ssszb
-.section sss1a
-.section sss1b
-.section sss2a
-.section sss2b
-.section sss3a
-.section sss3b
-.section sss4a
-.section sss4b
-.section sss5a
-.section sss5b
-.section sss6a
-.section sss6b
-.section sss7a
-.section sss7b
-.section sss8a
-.section sss8b
-.section sss9a
-.section sss9b
-.section sss0a
-.section sss0b
-.section sstaa
-.section sstab
-.section sstba
-.section sstbb
-.section sstca
-.section sstcb
-.section sstda
-.section sstdb
-.section sstea
-.section ssteb
-.section sstfa
-.section sstfb
-.section sstga
-.section sstgb
-.section sstha
-.section ssthb
-.section sstia
-.section sstib
-.section sstja
-.section sstjb
-.section sstka
-.section sstkb
-.section sstla
-.section sstlb
-.section sstma
-.section sstmb
-.section sstna
-.section sstnb
-.section sstoa
-.section sstob
-.section sstpa
-.section sstpb
-.section sstqa
-.section sstqb
-.section sstra
-.section sstrb
-.section sstsa
-.section sstsb
-.section sstta
-.section ssttb
-.section sstua
-.section sstub
-.section sstva
-.section sstvb
-.section sstwa
-.section sstwb
-.section sstxa
-.section sstxb
-.section sstya
-.section sstyb
-.section sstza
-.section sstzb
-.section sst1a
-.section sst1b
-.section sst2a
-.section sst2b
-.section sst3a
-.section sst3b
-.section sst4a
-.section sst4b
-.section sst5a
-.section sst5b
-.section sst6a
-.section sst6b
-.section sst7a
-.section sst7b
-.section sst8a
-.section sst8b
-.section sst9a
-.section sst9b
-.section sst0a
-.section sst0b
-.section ssuaa
-.section ssuab
-.section ssuba
-.section ssubb
-.section ssuca
-.section ssucb
-.section ssuda
-.section ssudb
-.section ssuea
-.section ssueb
-.section ssufa
-.section ssufb
-.section ssuga
-.section ssugb
-.section ssuha
-.section ssuhb
-.section ssuia
-.section ssuib
-.section ssuja
-.section ssujb
-.section ssuka
-.section ssukb
-.section ssula
-.section ssulb
-.section ssuma
-.section ssumb
-.section ssuna
-.section ssunb
-.section ssuoa
-.section ssuob
-.section ssupa
-.section ssupb
-.section ssuqa
-.section ssuqb
-.section ssura
-.section ssurb
-.section ssusa
-.section ssusb
-.section ssuta
-.section ssutb
-.section ssuua
-.section ssuub
-.section ssuva
-.section ssuvb
-.section ssuwa
-.section ssuwb
-.section ssuxa
-.section ssuxb
-.section ssuya
-.section ssuyb
-.section ssuza
-.section ssuzb
-.section ssu1a
-.section ssu1b
-.section ssu2a
-.section ssu2b
-.section ssu3a
-.section ssu3b
-.section ssu4a
-.section ssu4b
-.section ssu5a
-.section ssu5b
-.section ssu6a
-.section ssu6b
-.section ssu7a
-.section ssu7b
-.section ssu8a
-.section ssu8b
-.section ssu9a
-.section ssu9b
-.section ssu0a
-.section ssu0b
-.section ssvaa
-.section ssvab
-.section ssvba
-.section ssvbb
-.section ssvca
-.section ssvcb
-.section ssvda
-.section ssvdb
-.section ssvea
-.section ssveb
-.section ssvfa
-.section ssvfb
-.section ssvga
-.section ssvgb
-.section ssvha
-.section ssvhb
-.section ssvia
-.section ssvib
-.section ssvja
-.section ssvjb
-.section ssvka
-.section ssvkb
-.section ssvla
-.section ssvlb
-.section ssvma
-.section ssvmb
-.section ssvna
-.section ssvnb
-.section ssvoa
-.section ssvob
-.section ssvpa
-.section ssvpb
-.section ssvqa
-.section ssvqb
-.section ssvra
-.section ssvrb
-.section ssvsa
-.section ssvsb
-.section ssvta
-.section ssvtb
-.section ssvua
-.section ssvub
-.section ssvva
-.section ssvvb
-.section ssvwa
-.section ssvwb
-.section ssvxa
-.section ssvxb
-.section ssvya
-.section ssvyb
-.section ssvza
-.section ssvzb
-.section ssv1a
-.section ssv1b
-.section ssv2a
-.section ssv2b
-.section ssv3a
-.section ssv3b
-.section ssv4a
-.section ssv4b
-.section ssv5a
-.section ssv5b
-.section ssv6a
-.section ssv6b
-.section ssv7a
-.section ssv7b
-.section ssv8a
-.section ssv8b
-.section ssv9a
-.section ssv9b
-.section ssv0a
-.section ssv0b
-.section sswaa
-.section sswab
-.section sswba
-.section sswbb
-.section sswca
-.section sswcb
-.section sswda
-.section sswdb
-.section sswea
-.section ssweb
-.section sswfa
-.section sswfb
-.section sswga
-.section sswgb
-.section sswha
-.section sswhb
-.section sswia
-.section sswib
-.section sswja
-.section sswjb
-.section sswka
-.section sswkb
-.section sswla
-.section sswlb
-.section sswma
-.section sswmb
-.section sswna
-.section sswnb
-.section sswoa
-.section sswob
-.section sswpa
-.section sswpb
-.section sswqa
-.section sswqb
-.section sswra
-.section sswrb
-.section sswsa
-.section sswsb
-.section sswta
-.section sswtb
-.section sswua
-.section sswub
-.section sswva
-.section sswvb
-.section sswwa
-.section sswwb
-.section sswxa
-.section sswxb
-.section sswya
-.section sswyb
-.section sswza
-.section sswzb
-.section ssw1a
-.section ssw1b
-.section ssw2a
-.section ssw2b
-.section ssw3a
-.section ssw3b
-.section ssw4a
-.section ssw4b
-.section ssw5a
-.section ssw5b
-.section ssw6a
-.section ssw6b
-.section ssw7a
-.section ssw7b
-.section ssw8a
-.section ssw8b
-.section ssw9a
-.section ssw9b
-.section ssw0a
-.section ssw0b
-.section ssxaa
-.section ssxab
-.section ssxba
-.section ssxbb
-.section ssxca
-.section ssxcb
-.section ssxda
-.section ssxdb
-.section ssxea
-.section ssxeb
-.section ssxfa
-.section ssxfb
-.section ssxga
-.section ssxgb
-.section ssxha
-.section ssxhb
-.section ssxia
-.section ssxib
-.section ssxja
-.section ssxjb
-.section ssxka
-.section ssxkb
-.section ssxla
-.section ssxlb
-.section ssxma
-.section ssxmb
-.section ssxna
-.section ssxnb
-.section ssxoa
-.section ssxob
-.section ssxpa
-.section ssxpb
-.section ssxqa
-.section ssxqb
-.section ssxra
-.section ssxrb
-.section ssxsa
-.section ssxsb
-.section ssxta
-.section ssxtb
-.section ssxua
-.section ssxub
-.section ssxva
-.section ssxvb
-.section ssxwa
-.section ssxwb
-.section ssxxa
-.section ssxxb
-.section ssxya
-.section ssxyb
-.section ssxza
-.section ssxzb
-.section ssx1a
-.section ssx1b
-.section ssx2a
-.section ssx2b
-.section ssx3a
-.section ssx3b
-.section ssx4a
-.section ssx4b
-.section ssx5a
-.section ssx5b
-.section ssx6a
-.section ssx6b
-.section ssx7a
-.section ssx7b
-.section ssx8a
-.section ssx8b
-.section ssx9a
-.section ssx9b
-.section ssx0a
-.section ssx0b
-.section ssyaa
-.section ssyab
-.section ssyba
-.section ssybb
-.section ssyca
-.section ssycb
-.section ssyda
-.section ssydb
-.section ssyea
-.section ssyeb
-.section ssyfa
-.section ssyfb
-.section ssyga
-.section ssygb
-.section ssyha
-.section ssyhb
-.section ssyia
-.section ssyib
-.section ssyja
-.section ssyjb
-.section ssyka
-.section ssykb
-.section ssyla
-.section ssylb
-.section ssyma
-.section ssymb
-.section ssyna
-.section ssynb
-.section ssyoa
-.section ssyob
-.section ssypa
-.section ssypb
-.section ssyqa
-.section ssyqb
-.section ssyra
-.section ssyrb
-.section ssysa
-.section ssysb
-.section ssyta
-.section ssytb
-.section ssyua
-.section ssyub
-.section ssyva
-.section ssyvb
-.section ssywa
-.section ssywb
-.section ssyxa
-.section ssyxb
-.section ssyya
-.section ssyyb
-.section ssyza
-.section ssyzb
-.section ssy1a
-.section ssy1b
-.section ssy2a
-.section ssy2b
-.section ssy3a
-.section ssy3b
-.section ssy4a
-.section ssy4b
-.section ssy5a
-.section ssy5b
-.section ssy6a
-.section ssy6b
-.section ssy7a
-.section ssy7b
-.section ssy8a
-.section ssy8b
-.section ssy9a
-.section ssy9b
-.section ssy0a
-.section ssy0b
-.section sszaa
-.section sszab
-.section sszba
-.section sszbb
-.section sszca
-.section sszcb
-.section sszda
-.section sszdb
-.section sszea
-.section sszeb
-.section sszfa
-.section sszfb
-.section sszga
-.section sszgb
-.section sszha
-.section sszhb
-.section sszia
-.section sszib
-.section sszja
-.section sszjb
-.section sszka
-.section sszkb
-.section sszla
-.section sszlb
-.section sszma
-.section sszmb
-.section sszna
-.section ssznb
-.section sszoa
-.section sszob
-.section sszpa
-.section sszpb
-.section sszqa
-.section sszqb
-.section sszra
-.section sszrb
-.section sszsa
-.section sszsb
-.section sszta
-.section ssztb
-.section sszua
-.section sszub
-.section sszva
-.section sszvb
-.section sszwa
-.section sszwb
-.section sszxa
-.section sszxb
-.section sszya
-.section sszyb
-.section sszza
-.section sszzb
-.section ssz1a
-.section ssz1b
-.section ssz2a
-.section ssz2b
-.section ssz3a
-.section ssz3b
-.section ssz4a
-.section ssz4b
-.section ssz5a
-.section ssz5b
-.section ssz6a
-.section ssz6b
-.section ssz7a
-.section ssz7b
-.section ssz8a
-.section ssz8b
-.section ssz9a
-.section ssz9b
-.section ssz0a
-.section ssz0b
-.section ss1aa
-.section ss1ab
-.section ss1ba
-.section ss1bb
-.section ss1ca
-.section ss1cb
-.section ss1da
-.section ss1db
-.section ss1ea
-.section ss1eb
-.section ss1fa
-.section ss1fb
-.section ss1ga
-.section ss1gb
-.section ss1ha
-.section ss1hb
-.section ss1ia
-.section ss1ib
-.section ss1ja
-.section ss1jb
-.section ss1ka
-.section ss1kb
-.section ss1la
-.section ss1lb
-.section ss1ma
-.section ss1mb
-.section ss1na
-.section ss1nb
-.section ss1oa
-.section ss1ob
-.section ss1pa
-.section ss1pb
-.section ss1qa
-.section ss1qb
-.section ss1ra
-.section ss1rb
-.section ss1sa
-.section ss1sb
-.section ss1ta
-.section ss1tb
-.section ss1ua
-.section ss1ub
-.section ss1va
-.section ss1vb
-.section ss1wa
-.section ss1wb
-.section ss1xa
-.section ss1xb
-.section ss1ya
-.section ss1yb
-.section ss1za
-.section ss1zb
-.section ss11a
-.section ss11b
-.section ss12a
-.section ss12b
-.section ss13a
-.section ss13b
-.section ss14a
-.section ss14b
-.section ss15a
-.section ss15b
-.section ss16a
-.section ss16b
-.section ss17a
-.section ss17b
-.section ss18a
-.section ss18b
-.section ss19a
-.section ss19b
-.section ss10a
-.section ss10b
-.section ss2aa
-.section ss2ab
-.section ss2ba
-.section ss2bb
-.section ss2ca
-.section ss2cb
-.section ss2da
-.section ss2db
-.section ss2ea
-.section ss2eb
-.section ss2fa
-.section ss2fb
-.section ss2ga
-.section ss2gb
-.section ss2ha
-.section ss2hb
-.section ss2ia
-.section ss2ib
-.section ss2ja
-.section ss2jb
-.section ss2ka
-.section ss2kb
-.section ss2la
-.section ss2lb
-.section ss2ma
-.section ss2mb
-.section ss2na
-.section ss2nb
-.section ss2oa
-.section ss2ob
-.section ss2pa
-.section ss2pb
-.section ss2qa
-.section ss2qb
-.section ss2ra
-.section ss2rb
-.section ss2sa
-.section ss2sb
-.section ss2ta
-.section ss2tb
-.section ss2ua
-.section ss2ub
-.section ss2va
-.section ss2vb
-.section ss2wa
-.section ss2wb
-.section ss2xa
-.section ss2xb
-.section ss2ya
-.section ss2yb
-.section ss2za
-.section ss2zb
-.section ss21a
-.section ss21b
-.section ss22a
-.section ss22b
-.section ss23a
-.section ss23b
-.section ss24a
-.section ss24b
-.section ss25a
-.section ss25b
-.section ss26a
-.section ss26b
-.section ss27a
-.section ss27b
-.section ss28a
-.section ss28b
-.section ss29a
-.section ss29b
-.section ss20a
-.section ss20b
-.section ss3aa
-.section ss3ab
-.section ss3ba
-.section ss3bb
-.section ss3ca
-.section ss3cb
-.section ss3da
-.section ss3db
-.section ss3ea
-.section ss3eb
-.section ss3fa
-.section ss3fb
-.section ss3ga
-.section ss3gb
-.section ss3ha
-.section ss3hb
-.section ss3ia
-.section ss3ib
-.section ss3ja
-.section ss3jb
-.section ss3ka
-.section ss3kb
-.section ss3la
-.section ss3lb
-.section ss3ma
-.section ss3mb
-.section ss3na
-.section ss3nb
-.section ss3oa
-.section ss3ob
-.section ss3pa
-.section ss3pb
-.section ss3qa
-.section ss3qb
-.section ss3ra
-.section ss3rb
-.section ss3sa
-.section ss3sb
-.section ss3ta
-.section ss3tb
-.section ss3ua
-.section ss3ub
-.section ss3va
-.section ss3vb
-.section ss3wa
-.section ss3wb
-.section ss3xa
-.section ss3xb
-.section ss3ya
-.section ss3yb
-.section ss3za
-.section ss3zb
-.section ss31a
-.section ss31b
-.section ss32a
-.section ss32b
-.section ss33a
-.section ss33b
-.section ss34a
-.section ss34b
-.section ss35a
-.section ss35b
-.section ss36a
-.section ss36b
-.section ss37a
-.section ss37b
-.section ss38a
-.section ss38b
-.section ss39a
-.section ss39b
-.section ss30a
-.section ss30b
-.section ss4aa
-.section ss4ab
-.section ss4ba
-.section ss4bb
-.section ss4ca
-.section ss4cb
-.section ss4da
-.section ss4db
-.section ss4ea
-.section ss4eb
-.section ss4fa
-.section ss4fb
-.section ss4ga
-.section ss4gb
-.section ss4ha
-.section ss4hb
-.section ss4ia
-.section ss4ib
-.section ss4ja
-.section ss4jb
-.section ss4ka
-.section ss4kb
-.section ss4la
-.section ss4lb
-.section ss4ma
-.section ss4mb
-.section ss4na
-.section ss4nb
-.section ss4oa
-.section ss4ob
-.section ss4pa
-.section ss4pb
-.section ss4qa
-.section ss4qb
-.section ss4ra
-.section ss4rb
-.section ss4sa
-.section ss4sb
-.section ss4ta
-.section ss4tb
-.section ss4ua
-.section ss4ub
-.section ss4va
-.section ss4vb
-.section ss4wa
-.section ss4wb
-.section ss4xa
-.section ss4xb
-.section ss4ya
-.section ss4yb
-.section ss4za
-.section ss4zb
-.section ss41a
-.section ss41b
-.section ss42a
-.section ss42b
-.section ss43a
-.section ss43b
-.section ss44a
-.section ss44b
-.section ss45a
-.section ss45b
-.section ss46a
-.section ss46b
-.section ss47a
-.section ss47b
-.section ss48a
-.section ss48b
-.section ss49a
-.section ss49b
-.section ss40a
-.section ss40b
-.section ss5aa
-.section ss5ab
-.section ss5ba
-.section ss5bb
-.section ss5ca
-.section ss5cb
-.section ss5da
-.section ss5db
-.section ss5ea
-.section ss5eb
-.section ss5fa
-.section ss5fb
-.section ss5ga
-.section ss5gb
-.section ss5ha
-.section ss5hb
-.section ss5ia
-.section ss5ib
-.section ss5ja
-.section ss5jb
-.section ss5ka
-.section ss5kb
-.section ss5la
-.section ss5lb
-.section ss5ma
-.section ss5mb
-.section ss5na
-.section ss5nb
-.section ss5oa
-.section ss5ob
-.section ss5pa
-.section ss5pb
-.section ss5qa
-.section ss5qb
-.section ss5ra
-.section ss5rb
-.section ss5sa
-.section ss5sb
-.section ss5ta
-.section ss5tb
-.section ss5ua
-.section ss5ub
-.section ss5va
-.section ss5vb
-.section ss5wa
-.section ss5wb
-.section ss5xa
-.section ss5xb
-.section ss5ya
-.section ss5yb
-.section ss5za
-.section ss5zb
-.section ss51a
-.section ss51b
-.section ss52a
-.section ss52b
-.section ss53a
-.section ss53b
-.section ss54a
-.section ss54b
-.section ss55a
-.section ss55b
-.section ss56a
-.section ss56b
-.section ss57a
-.section ss57b
-.section ss58a
-.section ss58b
-.section ss59a
-.section ss59b
-.section ss50a
-.section ss50b
-.section ss6aa
-.section ss6ab
-.section ss6ba
-.section ss6bb
-.section ss6ca
-.section ss6cb
-.section ss6da
-.section ss6db
-.section ss6ea
-.section ss6eb
-.section ss6fa
-.section ss6fb
-.section ss6ga
-.section ss6gb
-.section ss6ha
-.section ss6hb
-.section ss6ia
-.section ss6ib
-.section ss6ja
-.section ss6jb
-.section ss6ka
-.section ss6kb
-.section ss6la
-.section ss6lb
-.section ss6ma
-.section ss6mb
-.section ss6na
-.section ss6nb
-.section ss6oa
-.section ss6ob
-.section ss6pa
-.section ss6pb
-.section ss6qa
-.section ss6qb
-.section ss6ra
-.section ss6rb
-.section ss6sa
-.section ss6sb
-.section ss6ta
-.section ss6tb
-.section ss6ua
-.section ss6ub
-.section ss6va
-.section ss6vb
-.section ss6wa
-.section ss6wb
-.section ss6xa
-.section ss6xb
-.section ss6ya
-.section ss6yb
-.section ss6za
-.section ss6zb
-.section ss61a
-.section ss61b
-.section ss62a
-.section ss62b
-.section ss63a
-.section ss63b
-.section ss64a
-.section ss64b
-.section ss65a
-.section ss65b
-.section ss66a
-.section ss66b
-.section ss67a
-.section ss67b
-.section ss68a
-.section ss68b
-.section ss69a
-.section ss69b
-.section ss60a
-.section ss60b
-.section ss7aa
-.section ss7ab
-.section ss7ba
-.section ss7bb
-.section ss7ca
-.section ss7cb
-.section ss7da
-.section ss7db
-.section ss7ea
-.section ss7eb
-.section ss7fa
-.section ss7fb
-.section ss7ga
-.section ss7gb
-.section ss7ha
-.section ss7hb
-.section ss7ia
-.section ss7ib
-.section ss7ja
-.section ss7jb
-.section ss7ka
-.section ss7kb
-.section ss7la
-.section ss7lb
-.section ss7ma
-.section ss7mb
-.section ss7na
-.section ss7nb
-.section ss7oa
-.section ss7ob
-.section ss7pa
-.section ss7pb
-.section ss7qa
-.section ss7qb
-.section ss7ra
-.section ss7rb
-.section ss7sa
-.section ss7sb
-.section ss7ta
-.section ss7tb
-.section ss7ua
-.section ss7ub
-.section ss7va
-.section ss7vb
-.section ss7wa
-.section ss7wb
-.section ss7xa
-.section ss7xb
-.section ss7ya
-.section ss7yb
-.section ss7za
-.section ss7zb
-.section ss71a
-.section ss71b
-.section ss72a
-.section ss72b
-.section ss73a
-.section ss73b
-.section ss74a
-.section ss74b
-.section ss75a
-.section ss75b
-.section ss76a
-.section ss76b
-.section ss77a
-.section ss77b
-.section ss78a
-.section ss78b
-.section ss79a
-.section ss79b
-.section ss70a
-.section ss70b
-.section ss8aa
-.section ss8ab
-.section ss8ba
-.section ss8bb
-.section ss8ca
-.section ss8cb
-.section ss8da
-.section ss8db
-.section ss8ea
-.section ss8eb
-.section ss8fa
-.section ss8fb
-.section ss8ga
-.section ss8gb
-.section ss8ha
-.section ss8hb
-.section ss8ia
-.section ss8ib
-.section ss8ja
-.section ss8jb
-.section ss8ka
-.section ss8kb
-.section ss8la
-.section ss8lb
-.section ss8ma
-.section ss8mb
-.section ss8na
-.section ss8nb
-.section ss8oa
-.section ss8ob
-.section ss8pa
-.section ss8pb
-.section ss8qa
-.section ss8qb
-.section ss8ra
-.section ss8rb
-.section ss8sa
-.section ss8sb
-.section ss8ta
-.section ss8tb
-.section ss8ua
-.section ss8ub
-.section ss8va
-.section ss8vb
-.section ss8wa
-.section ss8wb
-.section ss8xa
-.section ss8xb
-.section ss8ya
-.section ss8yb
-.section ss8za
-.section ss8zb
-.section ss81a
-.section ss81b
-.section ss82a
-.section ss82b
-.section ss83a
-.section ss83b
-.section ss84a
-.section ss84b
-.section ss85a
-.section ss85b
-.section ss86a
-.section ss86b
-.section ss87a
-.section ss87b
-.section ss88a
-.section ss88b
-.section ss89a
-.section ss89b
-.section ss80a
-.section ss80b
-.section ss9aa
-.section ss9ab
-.section ss9ba
-.section ss9bb
-.section ss9ca
-.section ss9cb
-.section ss9da
-.section ss9db
-.section ss9ea
-.section ss9eb
-.section ss9fa
-.section ss9fb
-.section ss9ga
-.section ss9gb
-.section ss9ha
-.section ss9hb
-.section ss9ia
-.section ss9ib
-.section ss9ja
-.section ss9jb
-.section ss9ka
-.section ss9kb
-.section ss9la
-.section ss9lb
-.section ss9ma
-.section ss9mb
-.section ss9na
-.section ss9nb
-.section ss9oa
-.section ss9ob
-.section ss9pa
-.section ss9pb
-.section ss9qa
-.section ss9qb
-.section ss9ra
-.section ss9rb
-.section ss9sa
-.section ss9sb
-.section ss9ta
-.section ss9tb
-.section ss9ua
-.section ss9ub
-.section ss9va
-.section ss9vb
-.section ss9wa
-.section ss9wb
-.section ss9xa
-.section ss9xb
-.section ss9ya
-.section ss9yb
-.section ss9za
-.section ss9zb
-.section ss91a
-.section ss91b
-.section ss92a
-.section ss92b
-.section ss93a
-.section ss93b
-.section ss94a
-.section ss94b
-.section ss95a
-.section ss95b
-.section ss96a
-.section ss96b
-.section ss97a
-.section ss97b
-.section ss98a
-.section ss98b
-.section ss99a
-.section ss99b
-.section ss90a
-.section ss90b
-.section ss0aa
-.section ss0ab
-.section ss0ba
-.section ss0bb
-.section ss0ca
-.section ss0cb
-.section ss0da
-.section ss0db
-.section ss0ea
-.section ss0eb
-.section ss0fa
-.section ss0fb
-.section ss0ga
-.section ss0gb
-.section ss0ha
-.section ss0hb
-.section ss0ia
-.section ss0ib
-.section ss0ja
-.section ss0jb
-.section ss0ka
-.section ss0kb
-.section ss0la
-.section ss0lb
-.section ss0ma
-.section ss0mb
-.section ss0na
-.section ss0nb
-.section ss0oa
-.section ss0ob
-.section ss0pa
-.section ss0pb
-.section ss0qa
-.section ss0qb
-.section ss0ra
-.section ss0rb
-.section ss0sa
-.section ss0sb
-.section ss0ta
-.section ss0tb
-.section ss0ua
-.section ss0ub
-.section ss0va
-.section ss0vb
-.section ss0wa
-.section ss0wb
-.section ss0xa
-.section ss0xb
-.section ss0ya
-.section ss0yb
-.section ss0za
-.section ss0zb
-.section ss01a
-.section ss01b
-.section ss02a
-.section ss02b
-.section ss03a
-.section ss03b
-.section ss04a
-.section ss04b
-.section ss05a
-.section ss05b
-.section ss06a
-.section ss06b
-.section ss07a
-.section ss07b
-.section ss08a
-.section ss08b
-.section ss09a
-.section ss09b
-.section ss00a
-.section ss00b
-.section staaa
-.section staab
-.section staba
-.section stabb
-.section staca
-.section stacb
-.section stada
-.section stadb
-.section staea
-.section staeb
-.section stafa
-.section stafb
-.section staga
-.section stagb
-.section staha
-.section stahb
-.section staia
-.section staib
-.section staja
-.section stajb
-.section staka
-.section stakb
-.section stala
-.section stalb
-.section stama
-.section stamb
-.section stana
-.section stanb
-.section staoa
-.section staob
-.section stapa
-.section stapb
-.section staqa
-.section staqb
-.section stara
-.section starb
-.section stasa
-.section stasb
-.section stata
-.section statb
-.section staua
-.section staub
-.section stava
-.section stavb
-.section stawa
-.section stawb
-.section staxa
-.section staxb
-.section staya
-.section stayb
-.section staza
-.section stazb
-.section sta1a
-.section sta1b
-.section sta2a
-.section sta2b
-.section sta3a
-.section sta3b
-.section sta4a
-.section sta4b
-.section sta5a
-.section sta5b
-.section sta6a
-.section sta6b
-.section sta7a
-.section sta7b
-.section sta8a
-.section sta8b
-.section sta9a
-.section sta9b
-.section sta0a
-.section sta0b
-.section stbaa
-.section stbab
-.section stbba
-.section stbbb
-.section stbca
-.section stbcb
-.section stbda
-.section stbdb
-.section stbea
-.section stbeb
-.section stbfa
-.section stbfb
-.section stbga
-.section stbgb
-.section stbha
-.section stbhb
-.section stbia
-.section stbib
-.section stbja
-.section stbjb
-.section stbka
-.section stbkb
-.section stbla
-.section stblb
-.section stbma
-.section stbmb
-.section stbna
-.section stbnb
-.section stboa
-.section stbob
-.section stbpa
-.section stbpb
-.section stbqa
-.section stbqb
-.section stbra
-.section stbrb
-.section stbsa
-.section stbsb
-.section stbta
-.section stbtb
-.section stbua
-.section stbub
-.section stbva
-.section stbvb
-.section stbwa
-.section stbwb
-.section stbxa
-.section stbxb
-.section stbya
-.section stbyb
-.section stbza
-.section stbzb
-.section stb1a
-.section stb1b
-.section stb2a
-.section stb2b
-.section stb3a
-.section stb3b
-.section stb4a
-.section stb4b
-.section stb5a
-.section stb5b
-.section stb6a
-.section stb6b
-.section stb7a
-.section stb7b
-.section stb8a
-.section stb8b
-.section stb9a
-.section stb9b
-.section stb0a
-.section stb0b
-.section stcaa
-.section stcab
-.section stcba
-.section stcbb
-.section stcca
-.section stccb
-.section stcda
-.section stcdb
-.section stcea
-.section stceb
-.section stcfa
-.section stcfb
-.section stcga
-.section stcgb
-.section stcha
-.section stchb
-.section stcia
-.section stcib
-.section stcja
-.section stcjb
-.section stcka
-.section stckb
-.section stcla
-.section stclb
-.section stcma
-.section stcmb
-.section stcna
-.section stcnb
-.section stcoa
-.section stcob
-.section stcpa
-.section stcpb
-.section stcqa
-.section stcqb
-.section stcra
-.section stcrb
-.section stcsa
-.section stcsb
-.section stcta
-.section stctb
-.section stcua
-.section stcub
-.section stcva
-.section stcvb
-.section stcwa
-.section stcwb
-.section stcxa
-.section stcxb
-.section stcya
-.section stcyb
-.section stcza
-.section stczb
-.section stc1a
-.section stc1b
-.section stc2a
-.section stc2b
-.section stc3a
-.section stc3b
-.section stc4a
-.section stc4b
-.section stc5a
-.section stc5b
-.section stc6a
-.section stc6b
-.section stc7a
-.section stc7b
-.section stc8a
-.section stc8b
-.section stc9a
-.section stc9b
-.section stc0a
-.section stc0b
-.section stdaa
-.section stdab
-.section stdba
-.section stdbb
-.section stdca
-.section stdcb
-.section stdda
-.section stddb
-.section stdea
-.section stdeb
-.section stdfa
-.section stdfb
-.section stdga
-.section stdgb
-.section stdha
-.section stdhb
-.section stdia
-.section stdib
-.section stdja
-.section stdjb
-.section stdka
-.section stdkb
-.section stdla
-.section stdlb
-.section stdma
-.section stdmb
-.section stdna
-.section stdnb
-.section stdoa
-.section stdob
-.section stdpa
-.section stdpb
-.section stdqa
-.section stdqb
-.section stdra
-.section stdrb
-.section stdsa
-.section stdsb
-.section stdta
-.section stdtb
-.section stdua
-.section stdub
-.section stdva
-.section stdvb
-.section stdwa
-.section stdwb
-.section stdxa
-.section stdxb
-.section stdya
-.section stdyb
-.section stdza
-.section stdzb
-.section std1a
-.section std1b
-.section std2a
-.section std2b
-.section std3a
-.section std3b
-.section std4a
-.section std4b
-.section std5a
-.section std5b
-.section std6a
-.section std6b
-.section std7a
-.section std7b
-.section std8a
-.section std8b
-.section std9a
-.section std9b
-.section std0a
-.section std0b
-.section steaa
-.section steab
-.section steba
-.section stebb
-.section steca
-.section stecb
-.section steda
-.section stedb
-.section steea
-.section steeb
-.section stefa
-.section stefb
-.section stega
-.section stegb
-.section steha
-.section stehb
-.section steia
-.section steib
-.section steja
-.section stejb
-.section steka
-.section stekb
-.section stela
-.section stelb
-.section stema
-.section stemb
-.section stena
-.section stenb
-.section steoa
-.section steob
-.section stepa
-.section stepb
-.section steqa
-.section steqb
-.section stera
-.section sterb
-.section stesa
-.section stesb
-.section steta
-.section stetb
-.section steua
-.section steub
-.section steva
-.section stevb
-.section stewa
-.section stewb
-.section stexa
-.section stexb
-.section steya
-.section steyb
-.section steza
-.section stezb
-.section ste1a
-.section ste1b
-.section ste2a
-.section ste2b
-.section ste3a
-.section ste3b
-.section ste4a
-.section ste4b
-.section ste5a
-.section ste5b
-.section ste6a
-.section ste6b
-.section ste7a
-.section ste7b
-.section ste8a
-.section ste8b
-.section ste9a
-.section ste9b
-.section ste0a
-.section ste0b
-.section stfaa
-.section stfab
-.section stfba
-.section stfbb
-.section stfca
-.section stfcb
-.section stfda
-.section stfdb
-.section stfea
-.section stfeb
-.section stffa
-.section stffb
-.section stfga
-.section stfgb
-.section stfha
-.section stfhb
-.section stfia
-.section stfib
-.section stfja
-.section stfjb
-.section stfka
-.section stfkb
-.section stfla
-.section stflb
-.section stfma
-.section stfmb
-.section stfna
-.section stfnb
-.section stfoa
-.section stfob
-.section stfpa
-.section stfpb
-.section stfqa
-.section stfqb
-.section stfra
-.section stfrb
-.section stfsa
-.section stfsb
-.section stfta
-.section stftb
-.section stfua
-.section stfub
-.section stfva
-.section stfvb
-.section stfwa
-.section stfwb
-.section stfxa
-.section stfxb
-.section stfya
-.section stfyb
-.section stfza
-.section stfzb
-.section stf1a
-.section stf1b
-.section stf2a
-.section stf2b
-.section stf3a
-.section stf3b
-.section stf4a
-.section stf4b
-.section stf5a
-.section stf5b
-.section stf6a
-.section stf6b
-.section stf7a
-.section stf7b
-.section stf8a
-.section stf8b
-.section stf9a
-.section stf9b
-.section stf0a
-.section stf0b
-.section stgaa
-.section stgab
-.section stgba
-.section stgbb
-.section stgca
-.section stgcb
-.section stgda
-.section stgdb
-.section stgea
-.section stgeb
-.section stgfa
-.section stgfb
-.section stgga
-.section stggb
-.section stgha
-.section stghb
-.section stgia
-.section stgib
-.section stgja
-.section stgjb
-.section stgka
-.section stgkb
-.section stgla
-.section stglb
-.section stgma
-.section stgmb
-.section stgna
-.section stgnb
-.section stgoa
-.section stgob
-.section stgpa
-.section stgpb
-.section stgqa
-.section stgqb
-.section stgra
-.section stgrb
-.section stgsa
-.section stgsb
-.section stgta
-.section stgtb
-.section stgua
-.section stgub
-.section stgva
-.section stgvb
-.section stgwa
-.section stgwb
-.section stgxa
-.section stgxb
-.section stgya
-.section stgyb
-.section stgza
-.section stgzb
-.section stg1a
-.section stg1b
-.section stg2a
-.section stg2b
-.section stg3a
-.section stg3b
-.section stg4a
-.section stg4b
-.section stg5a
-.section stg5b
-.section stg6a
-.section stg6b
-.section stg7a
-.section stg7b
-.section stg8a
-.section stg8b
-.section stg9a
-.section stg9b
-.section stg0a
-.section stg0b
-.section sthaa
-.section sthab
-.section sthba
-.section sthbb
-.section sthca
-.section sthcb
-.section sthda
-.section sthdb
-.section sthea
-.section stheb
-.section sthfa
-.section sthfb
-.section sthga
-.section sthgb
-.section sthha
-.section sthhb
-.section sthia
-.section sthib
-.section sthja
-.section sthjb
-.section sthka
-.section sthkb
-.section sthla
-.section sthlb
-.section sthma
-.section sthmb
-.section sthna
-.section sthnb
-.section sthoa
-.section sthob
-.section sthpa
-.section sthpb
-.section sthqa
-.section sthqb
-.section sthra
-.section sthrb
-.section sthsa
-.section sthsb
-.section sthta
-.section sthtb
-.section sthua
-.section sthub
-.section sthva
-.section sthvb
-.section sthwa
-.section sthwb
-.section sthxa
-.section sthxb
-.section sthya
-.section sthyb
-.section sthza
-.section sthzb
-.section sth1a
-.section sth1b
-.section sth2a
-.section sth2b
-.section sth3a
-.section sth3b
-.section sth4a
-.section sth4b
-.section sth5a
-.section sth5b
-.section sth6a
-.section sth6b
-.section sth7a
-.section sth7b
-.section sth8a
-.section sth8b
-.section sth9a
-.section sth9b
-.section sth0a
-.section sth0b
-.section stiaa
-.section stiab
-.section stiba
-.section stibb
-.section stica
-.section sticb
-.section stida
-.section stidb
-.section stiea
-.section stieb
-.section stifa
-.section stifb
-.section stiga
-.section stigb
-.section stiha
-.section stihb
-.section stiia
-.section stiib
-.section stija
-.section stijb
-.section stika
-.section stikb
-.section stila
-.section stilb
-.section stima
-.section stimb
-.section stina
-.section stinb
-.section stioa
-.section stiob
-.section stipa
-.section stipb
-.section stiqa
-.section stiqb
-.section stira
-.section stirb
-.section stisa
-.section stisb
-.section stita
-.section stitb
-.section stiua
-.section stiub
-.section stiva
-.section stivb
-.section stiwa
-.section stiwb
-.section stixa
-.section stixb
-.section stiya
-.section stiyb
-.section stiza
-.section stizb
-.section sti1a
-.section sti1b
-.section sti2a
-.section sti2b
-.section sti3a
-.section sti3b
-.section sti4a
-.section sti4b
-.section sti5a
-.section sti5b
-.section sti6a
-.section sti6b
-.section sti7a
-.section sti7b
-.section sti8a
-.section sti8b
-.section sti9a
-.section sti9b
-.section sti0a
-.section sti0b
-.section stjaa
-.section stjab
-.section stjba
-.section stjbb
-.section stjca
-.section stjcb
-.section stjda
-.section stjdb
-.section stjea
-.section stjeb
-.section stjfa
-.section stjfb
-.section stjga
-.section stjgb
-.section stjha
-.section stjhb
-.section stjia
-.section stjib
-.section stjja
-.section stjjb
-.section stjka
-.section stjkb
-.section stjla
-.section stjlb
-.section stjma
-.section stjmb
-.section stjna
-.section stjnb
-.section stjoa
-.section stjob
-.section stjpa
-.section stjpb
-.section stjqa
-.section stjqb
-.section stjra
-.section stjrb
-.section stjsa
-.section stjsb
-.section stjta
-.section stjtb
-.section stjua
-.section stjub
-.section stjva
-.section stjvb
-.section stjwa
-.section stjwb
-.section stjxa
-.section stjxb
-.section stjya
-.section stjyb
-.section stjza
-.section stjzb
-.section stj1a
-.section stj1b
-.section stj2a
-.section stj2b
-.section stj3a
-.section stj3b
-.section stj4a
-.section stj4b
-.section stj5a
-.section stj5b
-.section stj6a
-.section stj6b
-.section stj7a
-.section stj7b
-.section stj8a
-.section stj8b
-.section stj9a
-.section stj9b
-.section stj0a
-.section stj0b
-.section stkaa
-.section stkab
-.section stkba
-.section stkbb
-.section stkca
-.section stkcb
-.section stkda
-.section stkdb
-.section stkea
-.section stkeb
-.section stkfa
-.section stkfb
-.section stkga
-.section stkgb
-.section stkha
-.section stkhb
-.section stkia
-.section stkib
-.section stkja
-.section stkjb
-.section stkka
-.section stkkb
-.section stkla
-.section stklb
-.section stkma
-.section stkmb
-.section stkna
-.section stknb
-.section stkoa
-.section stkob
-.section stkpa
-.section stkpb
-.section stkqa
-.section stkqb
-.section stkra
-.section stkrb
-.section stksa
-.section stksb
-.section stkta
-.section stktb
-.section stkua
-.section stkub
-.section stkva
-.section stkvb
-.section stkwa
-.section stkwb
-.section stkxa
-.section stkxb
-.section stkya
-.section stkyb
-.section stkza
-.section stkzb
-.section stk1a
-.section stk1b
-.section stk2a
-.section stk2b
-.section stk3a
-.section stk3b
-.section stk4a
-.section stk4b
-.section stk5a
-.section stk5b
-.section stk6a
-.section stk6b
-.section stk7a
-.section stk7b
-.section stk8a
-.section stk8b
-.section stk9a
-.section stk9b
-.section stk0a
-.section stk0b
-.section stlaa
-.section stlab
-.section stlba
-.section stlbb
-.section stlca
-.section stlcb
-.section stlda
-.section stldb
-.section stlea
-.section stleb
-.section stlfa
-.section stlfb
-.section stlga
-.section stlgb
-.section stlha
-.section stlhb
-.section stlia
-.section stlib
-.section stlja
-.section stljb
-.section stlka
-.section stlkb
-.section stlla
-.section stllb
-.section stlma
-.section stlmb
-.section stlna
-.section stlnb
-.section stloa
-.section stlob
-.section stlpa
-.section stlpb
-.section stlqa
-.section stlqb
-.section stlra
-.section stlrb
-.section stlsa
-.section stlsb
-.section stlta
-.section stltb
-.section stlua
-.section stlub
-.section stlva
-.section stlvb
-.section stlwa
-.section stlwb
-.section stlxa
-.section stlxb
-.section stlya
-.section stlyb
-.section stlza
-.section stlzb
-.section stl1a
-.section stl1b
-.section stl2a
-.section stl2b
-.section stl3a
-.section stl3b
-.section stl4a
-.section stl4b
-.section stl5a
-.section stl5b
-.section stl6a
-.section stl6b
-.section stl7a
-.section stl7b
-.section stl8a
-.section stl8b
-.section stl9a
-.section stl9b
-.section stl0a
-.section stl0b
-.section stmaa
-.section stmab
-.section stmba
-.section stmbb
-.section stmca
-.section stmcb
-.section stmda
-.section stmdb
-.section stmea
-.section stmeb
-.section stmfa
-.section stmfb
-.section stmga
-.section stmgb
-.section stmha
-.section stmhb
-.section stmia
-.section stmib
-.section stmja
-.section stmjb
-.section stmka
-.section stmkb
-.section stmla
-.section stmlb
-.section stmma
-.section stmmb
-.section stmna
-.section stmnb
-.section stmoa
-.section stmob
-.section stmpa
-.section stmpb
-.section stmqa
-.section stmqb
-.section stmra
-.section stmrb
-.section stmsa
-.section stmsb
-.section stmta
-.section stmtb
-.section stmua
-.section stmub
-.section stmva
-.section stmvb
-.section stmwa
-.section stmwb
-.section stmxa
-.section stmxb
-.section stmya
-.section stmyb
-.section stmza
-.section stmzb
-.section stm1a
-.section stm1b
-.section stm2a
-.section stm2b
-.section stm3a
-.section stm3b
-.section stm4a
-.section stm4b
-.section stm5a
-.section stm5b
-.section stm6a
-.section stm6b
-.section stm7a
-.section stm7b
-.section stm8a
-.section stm8b
-.section stm9a
-.section stm9b
-.section stm0a
-.section stm0b
-.section stnaa
-.section stnab
-.section stnba
-.section stnbb
-.section stnca
-.section stncb
-.section stnda
-.section stndb
-.section stnea
-.section stneb
-.section stnfa
-.section stnfb
-.section stnga
-.section stngb
-.section stnha
-.section stnhb
-.section stnia
-.section stnib
-.section stnja
-.section stnjb
-.section stnka
-.section stnkb
-.section stnla
-.section stnlb
-.section stnma
-.section stnmb
-.section stnna
-.section stnnb
-.section stnoa
-.section stnob
-.section stnpa
-.section stnpb
-.section stnqa
-.section stnqb
-.section stnra
-.section stnrb
-.section stnsa
-.section stnsb
-.section stnta
-.section stntb
-.section stnua
-.section stnub
-.section stnva
-.section stnvb
-.section stnwa
-.section stnwb
-.section stnxa
-.section stnxb
-.section stnya
-.section stnyb
-.section stnza
-.section stnzb
-.section stn1a
-.section stn1b
-.section stn2a
-.section stn2b
-.section stn3a
-.section stn3b
-.section stn4a
-.section stn4b
-.section stn5a
-.section stn5b
-.section stn6a
-.section stn6b
-.section stn7a
-.section stn7b
-.section stn8a
-.section stn8b
-.section stn9a
-.section stn9b
-.section stn0a
-.section stn0b
-.section stoaa
-.section stoab
-.section stoba
-.section stobb
-.section stoca
-.section stocb
-.section stoda
-.section stodb
-.section stoea
-.section stoeb
-.section stofa
-.section stofb
-.section stoga
-.section stogb
-.section stoha
-.section stohb
-.section stoia
-.section stoib
-.section stoja
-.section stojb
-.section stoka
-.section stokb
-.section stola
-.section stolb
-.section stoma
-.section stomb
-.section stona
-.section stonb
-.section stooa
-.section stoob
-.section stopa
-.section stopb
-.section stoqa
-.section stoqb
-.section stora
-.section storb
-.section stosa
-.section stosb
-.section stota
-.section stotb
-.section stoua
-.section stoub
-.section stova
-.section stovb
-.section stowa
-.section stowb
-.section stoxa
-.section stoxb
-.section stoya
-.section stoyb
-.section stoza
-.section stozb
-.section sto1a
-.section sto1b
-.section sto2a
-.section sto2b
-.section sto3a
-.section sto3b
-.section sto4a
-.section sto4b
-.section sto5a
-.section sto5b
-.section sto6a
-.section sto6b
-.section sto7a
-.section sto7b
-.section sto8a
-.section sto8b
-.section sto9a
-.section sto9b
-.section sto0a
-.section sto0b
-.section stpaa
-.section stpab
-.section stpba
-.section stpbb
-.section stpca
-.section stpcb
-.section stpda
-.section stpdb
-.section stpea
-.section stpeb
-.section stpfa
-.section stpfb
-.section stpga
-.section stpgb
-.section stpha
-.section stphb
-.section stpia
-.section stpib
-.section stpja
-.section stpjb
-.section stpka
-.section stpkb
-.section stpla
-.section stplb
-.section stpma
-.section stpmb
-.section stpna
-.section stpnb
-.section stpoa
-.section stpob
-.section stppa
-.section stppb
-.section stpqa
-.section stpqb
-.section stpra
-.section stprb
-.section stpsa
-.section stpsb
-.section stpta
-.section stptb
-.section stpua
-.section stpub
-.section stpva
-.section stpvb
-.section stpwa
-.section stpwb
-.section stpxa
-.section stpxb
-.section stpya
-.section stpyb
-.section stpza
-.section stpzb
-.section stp1a
-.section stp1b
-.section stp2a
-.section stp2b
-.section stp3a
-.section stp3b
-.section stp4a
-.section stp4b
-.section stp5a
-.section stp5b
-.section stp6a
-.section stp6b
-.section stp7a
-.section stp7b
-.section stp8a
-.section stp8b
-.section stp9a
-.section stp9b
-.section stp0a
-.section stp0b
-.section stqaa
-.section stqab
-.section stqba
-.section stqbb
-.section stqca
-.section stqcb
-.section stqda
-.section stqdb
-.section stqea
-.section stqeb
-.section stqfa
-.section stqfb
-.section stqga
-.section stqgb
-.section stqha
-.section stqhb
-.section stqia
-.section stqib
-.section stqja
-.section stqjb
-.section stqka
-.section stqkb
-.section stqla
-.section stqlb
-.section stqma
-.section stqmb
-.section stqna
-.section stqnb
-.section stqoa
-.section stqob
-.section stqpa
-.section stqpb
-.section stqqa
-.section stqqb
-.section stqra
-.section stqrb
-.section stqsa
-.section stqsb
-.section stqta
-.section stqtb
-.section stqua
-.section stqub
-.section stqva
-.section stqvb
-.section stqwa
-.section stqwb
-.section stqxa
-.section stqxb
-.section stqya
-.section stqyb
-.section stqza
-.section stqzb
-.section stq1a
-.section stq1b
-.section stq2a
-.section stq2b
-.section stq3a
-.section stq3b
-.section stq4a
-.section stq4b
-.section stq5a
-.section stq5b
-.section stq6a
-.section stq6b
-.section stq7a
-.section stq7b
-.section stq8a
-.section stq8b
-.section stq9a
-.section stq9b
-.section stq0a
-.section stq0b
-.section straa
-.section strab
-.section strba
-.section strbb
-.section strca
-.section strcb
-.section strda
-.section strdb
-.section strea
-.section streb
-.section strfa
-.section strfb
-.section strga
-.section strgb
-.section strha
-.section strhb
-.section stria
-.section strib
-.section strja
-.section strjb
-.section strka
-.section strkb
-.section strla
-.section strlb
-.section strma
-.section strmb
-.section strna
-.section strnb
-.section stroa
-.section strob
-.section strpa
-.section strpb
-.section strqa
-.section strqb
-.section strra
-.section strrb
-.section strsa
-.section strsb
-.section strta
-.section strtb
-.section strua
-.section strub
-.section strva
-.section strvb
-.section strwa
-.section strwb
-.section strxa
-.section strxb
-.section strya
-.section stryb
-.section strza
-.section strzb
-.section str1a
-.section str1b
-.section str2a
-.section str2b
-.section str3a
-.section str3b
-.section str4a
-.section str4b
-.section str5a
-.section str5b
-.section str6a
-.section str6b
-.section str7a
-.section str7b
-.section str8a
-.section str8b
-.section str9a
-.section str9b
-.section str0a
-.section str0b
-.section stsaa
-.section stsab
-.section stsba
-.section stsbb
-.section stsca
-.section stscb
-.section stsda
-.section stsdb
-.section stsea
-.section stseb
-.section stsfa
-.section stsfb
-.section stsga
-.section stsgb
-.section stsha
-.section stshb
-.section stsia
-.section stsib
-.section stsja
-.section stsjb
-.section stska
-.section stskb
-.section stsla
-.section stslb
-.section stsma
-.section stsmb
-.section stsna
-.section stsnb
-.section stsoa
-.section stsob
-.section stspa
-.section stspb
-.section stsqa
-.section stsqb
-.section stsra
-.section stsrb
-.section stssa
-.section stssb
-.section ststa
-.section ststb
-.section stsua
-.section stsub
-.section stsva
-.section stsvb
-.section stswa
-.section stswb
-.section stsxa
-.section stsxb
-.section stsya
-.section stsyb
-.section stsza
-.section stszb
-.section sts1a
-.section sts1b
-.section sts2a
-.section sts2b
-.section sts3a
-.section sts3b
-.section sts4a
-.section sts4b
-.section sts5a
-.section sts5b
-.section sts6a
-.section sts6b
-.section sts7a
-.section sts7b
-.section sts8a
-.section sts8b
-.section sts9a
-.section sts9b
-.section sts0a
-.section sts0b
-.section sttaa
-.section sttab
-.section sttba
-.section sttbb
-.section sttca
-.section sttcb
-.section sttda
-.section sttdb
-.section sttea
-.section stteb
-.section sttfa
-.section sttfb
-.section sttga
-.section sttgb
-.section sttha
-.section stthb
-.section sttia
-.section sttib
-.section sttja
-.section sttjb
-.section sttka
-.section sttkb
-.section sttla
-.section sttlb
-.section sttma
-.section sttmb
-.section sttna
-.section sttnb
-.section sttoa
-.section sttob
-.section sttpa
-.section sttpb
-.section sttqa
-.section sttqb
-.section sttra
-.section sttrb
-.section sttsa
-.section sttsb
-.section sttta
-.section stttb
-.section sttua
-.section sttub
-.section sttva
-.section sttvb
-.section sttwa
-.section sttwb
-.section sttxa
-.section sttxb
-.section sttya
-.section sttyb
-.section sttza
-.section sttzb
-.section stt1a
-.section stt1b
-.section stt2a
-.section stt2b
-.section stt3a
-.section stt3b
-.section stt4a
-.section stt4b
-.section stt5a
-.section stt5b
-.section stt6a
-.section stt6b
-.section stt7a
-.section stt7b
-.section stt8a
-.section stt8b
-.section stt9a
-.section stt9b
-.section stt0a
-.section stt0b
-.section stuaa
-.section stuab
-.section stuba
-.section stubb
-.section stuca
-.section stucb
-.section studa
-.section studb
-.section stuea
-.section stueb
-.section stufa
-.section stufb
-.section stuga
-.section stugb
-.section stuha
-.section stuhb
-.section stuia
-.section stuib
-.section stuja
-.section stujb
-.section stuka
-.section stukb
-.section stula
-.section stulb
-.section stuma
-.section stumb
-.section stuna
-.section stunb
-.section stuoa
-.section stuob
-.section stupa
-.section stupb
-.section stuqa
-.section stuqb
-.section stura
-.section sturb
-.section stusa
-.section stusb
-.section stuta
-.section stutb
-.section stuua
-.section stuub
-.section stuva
-.section stuvb
-.section stuwa
-.section stuwb
-.section stuxa
-.section stuxb
-.section stuya
-.section stuyb
-.section stuza
-.section stuzb
-.section stu1a
-.section stu1b
-.section stu2a
-.section stu2b
-.section stu3a
-.section stu3b
-.section stu4a
-.section stu4b
-.section stu5a
-.section stu5b
-.section stu6a
-.section stu6b
-.section stu7a
-.section stu7b
-.section stu8a
-.section stu8b
-.section stu9a
-.section stu9b
-.section stu0a
-.section stu0b
-.section stvaa
-.section stvab
-.section stvba
-.section stvbb
-.section stvca
-.section stvcb
-.section stvda
-.section stvdb
-.section stvea
-.section stveb
-.section stvfa
-.section stvfb
-.section stvga
-.section stvgb
-.section stvha
-.section stvhb
-.section stvia
-.section stvib
-.section stvja
-.section stvjb
-.section stvka
-.section stvkb
-.section stvla
-.section stvlb
-.section stvma
-.section stvmb
-.section stvna
-.section stvnb
-.section stvoa
-.section stvob
-.section stvpa
-.section stvpb
-.section stvqa
-.section stvqb
-.section stvra
-.section stvrb
-.section stvsa
-.section stvsb
-.section stvta
-.section stvtb
-.section stvua
-.section stvub
-.section stvva
-.section stvvb
-.section stvwa
-.section stvwb
-.section stvxa
-.section stvxb
-.section stvya
-.section stvyb
-.section stvza
-.section stvzb
-.section stv1a
-.section stv1b
-.section stv2a
-.section stv2b
-.section stv3a
-.section stv3b
-.section stv4a
-.section stv4b
-.section stv5a
-.section stv5b
-.section stv6a
-.section stv6b
-.section stv7a
-.section stv7b
-.section stv8a
-.section stv8b
-.section stv9a
-.section stv9b
-.section stv0a
-.section stv0b
-.section stwaa
-.section stwab
-.section stwba
-.section stwbb
-.section stwca
-.section stwcb
-.section stwda
-.section stwdb
-.section stwea
-.section stweb
-.section stwfa
-.section stwfb
-.section stwga
-.section stwgb
-.section stwha
-.section stwhb
-.section stwia
-.section stwib
-.section stwja
-.section stwjb
-.section stwka
-.section stwkb
-.section stwla
-.section stwlb
-.section stwma
-.section stwmb
-.section stwna
-.section stwnb
-.section stwoa
-.section stwob
-.section stwpa
-.section stwpb
-.section stwqa
-.section stwqb
-.section stwra
-.section stwrb
-.section stwsa
-.section stwsb
-.section stwta
-.section stwtb
-.section stwua
-.section stwub
-.section stwva
-.section stwvb
-.section stwwa
-.section stwwb
-.section stwxa
-.section stwxb
-.section stwya
-.section stwyb
-.section stwza
-.section stwzb
-.section stw1a
-.section stw1b
-.section stw2a
-.section stw2b
-.section stw3a
-.section stw3b
-.section stw4a
-.section stw4b
-.section stw5a
-.section stw5b
-.section stw6a
-.section stw6b
-.section stw7a
-.section stw7b
-.section stw8a
-.section stw8b
-.section stw9a
-.section stw9b
-.section stw0a
-.section stw0b
-.section stxaa
-.section stxab
-.section stxba
-.section stxbb
-.section stxca
-.section stxcb
-.section stxda
-.section stxdb
-.section stxea
-.section stxeb
-.section stxfa
-.section stxfb
-.section stxga
-.section stxgb
-.section stxha
-.section stxhb
-.section stxia
-.section stxib
-.section stxja
-.section stxjb
-.section stxka
-.section stxkb
-.section stxla
-.section stxlb
-.section stxma
-.section stxmb
-.section stxna
-.section stxnb
-.section stxoa
-.section stxob
-.section stxpa
-.section stxpb
-.section stxqa
-.section stxqb
-.section stxra
-.section stxrb
-.section stxsa
-.section stxsb
-.section stxta
-.section stxtb
-.section stxua
-.section stxub
-.section stxva
-.section stxvb
-.section stxwa
-.section stxwb
-.section stxxa
-.section stxxb
-.section stxya
-.section stxyb
-.section stxza
-.section stxzb
-.section stx1a
-.section stx1b
-.section stx2a
-.section stx2b
-.section stx3a
-.section stx3b
-.section stx4a
-.section stx4b
-.section stx5a
-.section stx5b
-.section stx6a
-.section stx6b
-.section stx7a
-.section stx7b
-.section stx8a
-.section stx8b
-.section stx9a
-.section stx9b
-.section stx0a
-.section stx0b
-.section styaa
-.section styab
-.section styba
-.section stybb
-.section styca
-.section stycb
-.section styda
-.section stydb
-.section styea
-.section styeb
-.section styfa
-.section styfb
-.section styga
-.section stygb
-.section styha
-.section styhb
-.section styia
-.section styib
-.section styja
-.section styjb
-.section styka
-.section stykb
-.section styla
-.section stylb
-.section styma
-.section stymb
-.section styna
-.section stynb
-.section styoa
-.section styob
-.section stypa
-.section stypb
-.section styqa
-.section styqb
-.section styra
-.section styrb
-.section stysa
-.section stysb
-.section styta
-.section stytb
-.section styua
-.section styub
-.section styva
-.section styvb
-.section stywa
-.section stywb
-.section styxa
-.section styxb
-.section styya
-.section styyb
-.section styza
-.section styzb
-.section sty1a
-.section sty1b
-.section sty2a
-.section sty2b
-.section sty3a
-.section sty3b
-.section sty4a
-.section sty4b
-.section sty5a
-.section sty5b
-.section sty6a
-.section sty6b
-.section sty7a
-.section sty7b
-.section sty8a
-.section sty8b
-.section sty9a
-.section sty9b
-.section sty0a
-.section sty0b
-.section stzaa
-.section stzab
-.section stzba
-.section stzbb
-.section stzca
-.section stzcb
-.section stzda
-.section stzdb
-.section stzea
-.section stzeb
-.section stzfa
-.section stzfb
-.section stzga
-.section stzgb
-.section stzha
-.section stzhb
-.section stzia
-.section stzib
-.section stzja
-.section stzjb
-.section stzka
-.section stzkb
-.section stzla
-.section stzlb
-.section stzma
-.section stzmb
-.section stzna
-.section stznb
-.section stzoa
-.section stzob
-.section stzpa
-.section stzpb
-.section stzqa
-.section stzqb
-.section stzra
-.section stzrb
-.section stzsa
-.section stzsb
-.section stzta
-.section stztb
-.section stzua
-.section stzub
-.section stzva
-.section stzvb
-.section stzwa
-.section stzwb
-.section stzxa
-.section stzxb
-.section stzya
-.section stzyb
-.section stzza
-.section stzzb
-.section stz1a
-.section stz1b
-.section stz2a
-.section stz2b
-.section stz3a
-.section stz3b
-.section stz4a
-.section stz4b
-.section stz5a
-.section stz5b
-.section stz6a
-.section stz6b
-.section stz7a
-.section stz7b
-.section stz8a
-.section stz8b
-.section stz9a
-.section stz9b
-.section stz0a
-.section stz0b
-.section st1aa
-.section st1ab
-.section st1ba
-.section st1bb
-.section st1ca
-.section st1cb
-.section st1da
-.section st1db
-.section st1ea
-.section st1eb
-.section st1fa
-.section st1fb
-.section st1ga
-.section st1gb
-.section st1ha
-.section st1hb
-.section st1ia
-.section st1ib
-.section st1ja
-.section st1jb
-.section st1ka
-.section st1kb
-.section st1la
-.section st1lb
-.section st1ma
-.section st1mb
-.section st1na
-.section st1nb
-.section st1oa
-.section st1ob
-.section st1pa
-.section st1pb
-.section st1qa
-.section st1qb
-.section st1ra
-.section st1rb
-.section st1sa
-.section st1sb
-.section st1ta
-.section st1tb
-.section st1ua
-.section st1ub
-.section st1va
-.section st1vb
-.section st1wa
-.section st1wb
-.section st1xa
-.section st1xb
-.section st1ya
-.section st1yb
-.section st1za
-.section st1zb
-.section st11a
-.section st11b
-.section st12a
-.section st12b
-.section st13a
-.section st13b
-.section st14a
-.section st14b
-.section st15a
-.section st15b
-.section st16a
-.section st16b
-.section st17a
-.section st17b
-.section st18a
-.section st18b
-.section st19a
-.section st19b
-.section st10a
-.section st10b
-.section st2aa
-.section st2ab
-.section st2ba
-.section st2bb
-.section st2ca
-.section st2cb
-.section st2da
-.section st2db
-.section st2ea
-.section st2eb
-.section st2fa
-.section st2fb
-.section st2ga
-.section st2gb
-.section st2ha
-.section st2hb
-.section st2ia
-.section st2ib
-.section st2ja
-.section st2jb
-.section st2ka
-.section st2kb
-.section st2la
-.section st2lb
-.section st2ma
-.section st2mb
-.section st2na
-.section st2nb
-.section st2oa
-.section st2ob
-.section st2pa
-.section st2pb
-.section st2qa
-.section st2qb
-.section st2ra
-.section st2rb
-.section st2sa
-.section st2sb
-.section st2ta
-.section st2tb
-.section st2ua
-.section st2ub
-.section st2va
-.section st2vb
-.section st2wa
-.section st2wb
-.section st2xa
-.section st2xb
-.section st2ya
-.section st2yb
-.section st2za
-.section st2zb
-.section st21a
-.section st21b
-.section st22a
-.section st22b
-.section st23a
-.section st23b
-.section st24a
-.section st24b
-.section st25a
-.section st25b
-.section st26a
-.section st26b
-.section st27a
-.section st27b
-.section st28a
-.section st28b
-.section st29a
-.section st29b
-.section st20a
-.section st20b
-.section st3aa
-.section st3ab
-.section st3ba
-.section st3bb
-.section st3ca
-.section st3cb
-.section st3da
-.section st3db
-.section st3ea
-.section st3eb
-.section st3fa
-.section st3fb
-.section st3ga
-.section st3gb
-.section st3ha
-.section st3hb
-.section st3ia
-.section st3ib
-.section st3ja
-.section st3jb
-.section st3ka
-.section st3kb
-.section st3la
-.section st3lb
-.section st3ma
-.section st3mb
-.section st3na
-.section st3nb
-.section st3oa
-.section st3ob
-.section st3pa
-.section st3pb
-.section st3qa
-.section st3qb
-.section st3ra
-.section st3rb
-.section st3sa
-.section st3sb
-.section st3ta
-.section st3tb
-.section st3ua
-.section st3ub
-.section st3va
-.section st3vb
-.section st3wa
-.section st3wb
-.section st3xa
-.section st3xb
-.section st3ya
-.section st3yb
-.section st3za
-.section st3zb
-.section st31a
-.section st31b
-.section st32a
-.section st32b
-.section st33a
-.section st33b
-.section st34a
-.section st34b
-.section st35a
-.section st35b
-.section st36a
-.section st36b
-.section st37a
-.section st37b
-.section st38a
-.section st38b
-.section st39a
-.section st39b
-.section st30a
-.section st30b
-.section st4aa
-.section st4ab
-.section st4ba
-.section st4bb
-.section st4ca
-.section st4cb
-.section st4da
-.section st4db
-.section st4ea
-.section st4eb
-.section st4fa
-.section st4fb
-.section st4ga
-.section st4gb
-.section st4ha
-.section st4hb
-.section st4ia
-.section st4ib
-.section st4ja
-.section st4jb
-.section st4ka
-.section st4kb
-.section st4la
-.section st4lb
-.section st4ma
-.section st4mb
-.section st4na
-.section st4nb
-.section st4oa
-.section st4ob
-.section st4pa
-.section st4pb
-.section st4qa
-.section st4qb
-.section st4ra
-.section st4rb
-.section st4sa
-.section st4sb
-.section st4ta
-.section st4tb
-.section st4ua
-.section st4ub
-.section st4va
-.section st4vb
-.section st4wa
-.section st4wb
-.section st4xa
-.section st4xb
-.section st4ya
-.section st4yb
-.section st4za
-.section st4zb
-.section st41a
-.section st41b
-.section st42a
-.section st42b
-.section st43a
-.section st43b
-.section st44a
-.section st44b
-.section st45a
-.section st45b
-.section st46a
-.section st46b
-.section st47a
-.section st47b
-.section st48a
-.section st48b
-.section st49a
-.section st49b
-.section st40a
-.section st40b
-.section st5aa
-.section st5ab
-.section st5ba
-.section st5bb
-.section st5ca
-.section st5cb
-.section st5da
-.section st5db
-.section st5ea
-.section st5eb
-.section st5fa
-.section st5fb
-.section st5ga
-.section st5gb
-.section st5ha
-.section st5hb
-.section st5ia
-.section st5ib
-.section st5ja
-.section st5jb
-.section st5ka
-.section st5kb
-.section st5la
-.section st5lb
-.section st5ma
-.section st5mb
-.section st5na
-.section st5nb
-.section st5oa
-.section st5ob
-.section st5pa
-.section st5pb
-.section st5qa
-.section st5qb
-.section st5ra
-.section st5rb
-.section st5sa
-.section st5sb
-.section st5ta
-.section st5tb
-.section st5ua
-.section st5ub
-.section st5va
-.section st5vb
-.section st5wa
-.section st5wb
-.section st5xa
-.section st5xb
-.section st5ya
-.section st5yb
-.section st5za
-.section st5zb
-.section st51a
-.section st51b
-.section st52a
-.section st52b
-.section st53a
-.section st53b
-.section st54a
-.section st54b
-.section st55a
-.section st55b
-.section st56a
-.section st56b
-.section st57a
-.section st57b
-.section st58a
-.section st58b
-.section st59a
-.section st59b
-.section st50a
-.section st50b
-.section st6aa
-.section st6ab
-.section st6ba
-.section st6bb
-.section st6ca
-.section st6cb
-.section st6da
-.section st6db
-.section st6ea
-.section st6eb
-.section st6fa
-.section st6fb
-.section st6ga
-.section st6gb
-.section st6ha
-.section st6hb
-.section st6ia
-.section st6ib
-.section st6ja
-.section st6jb
-.section st6ka
-.section st6kb
-.section st6la
-.section st6lb
-.section st6ma
-.section st6mb
-.section st6na
-.section st6nb
-.section st6oa
-.section st6ob
-.section st6pa
-.section st6pb
-.section st6qa
-.section st6qb
-.section st6ra
-.section st6rb
-.section st6sa
-.section st6sb
-.section st6ta
-.section st6tb
-.section st6ua
-.section st6ub
-.section st6va
-.section st6vb
-.section st6wa
-.section st6wb
-.section st6xa
-.section st6xb
-.section st6ya
-.section st6yb
-.section st6za
-.section st6zb
-.section st61a
-.section st61b
-.section st62a
-.section st62b
-.section st63a
-.section st63b
-.section st64a
-.section st64b
-.section st65a
-.section st65b
-.section st66a
-.section st66b
-.section st67a
-.section st67b
-.section st68a
-.section st68b
-.section st69a
-.section st69b
-.section st60a
-.section st60b
-.section st7aa
-.section st7ab
-.section st7ba
-.section st7bb
-.section st7ca
-.section st7cb
-.section st7da
-.section st7db
-.section st7ea
-.section st7eb
-.section st7fa
-.section st7fb
-.section st7ga
-.section st7gb
-.section st7ha
-.section st7hb
-.section st7ia
-.section st7ib
-.section st7ja
-.section st7jb
-.section st7ka
-.section st7kb
-.section st7la
-.section st7lb
-.section st7ma
-.section st7mb
-.section st7na
-.section st7nb
-.section st7oa
-.section st7ob
-.section st7pa
-.section st7pb
-.section st7qa
-.section st7qb
-.section st7ra
-.section st7rb
-.section st7sa
-.section st7sb
-.section st7ta
-.section st7tb
-.section st7ua
-.section st7ub
-.section st7va
-.section st7vb
-.section st7wa
-.section st7wb
-.section st7xa
-.section st7xb
-.section st7ya
-.section st7yb
-.section st7za
-.section st7zb
-.section st71a
-.section st71b
-.section st72a
-.section st72b
-.section st73a
-.section st73b
-.section st74a
-.section st74b
-.section st75a
-.section st75b
-.section st76a
-.section st76b
-.section st77a
-.section st77b
-.section st78a
-.section st78b
-.section st79a
-.section st79b
-.section st70a
-.section st70b
-.section st8aa
-.section st8ab
-.section st8ba
-.section st8bb
-.section st8ca
-.section st8cb
-.section st8da
-.section st8db
-.section st8ea
-.section st8eb
-.section st8fa
-.section st8fb
-.section st8ga
-.section st8gb
-.section st8ha
-.section st8hb
-.section st8ia
-.section st8ib
-.section st8ja
-.section st8jb
-.section st8ka
-.section st8kb
-.section st8la
-.section st8lb
-.section st8ma
-.section st8mb
-.section st8na
-.section st8nb
-.section st8oa
-.section st8ob
-.section st8pa
-.section st8pb
-.section st8qa
-.section st8qb
-.section st8ra
-.section st8rb
-.section st8sa
-.section st8sb
-.section st8ta
-.section st8tb
-.section st8ua
-.section st8ub
-.section st8va
-.section st8vb
-.section st8wa
-.section st8wb
-.section st8xa
-.section st8xb
-.section st8ya
-.section st8yb
-.section st8za
-.section st8zb
-.section st81a
-.section st81b
-.section st82a
-.section st82b
-.section st83a
-.section st83b
-.section st84a
-.section st84b
-.section st85a
-.section st85b
-.section st86a
-.section st86b
-.section st87a
-.section st87b
-.section st88a
-.section st88b
-.section st89a
-.section st89b
-.section st80a
-.section st80b
-.section st9aa
-.section st9ab
-.section st9ba
-.section st9bb
-.section st9ca
-.section st9cb
-.section st9da
-.section st9db
-.section st9ea
-.section st9eb
-.section st9fa
-.section st9fb
-.section st9ga
-.section st9gb
-.section st9ha
-.section st9hb
-.section st9ia
-.section st9ib
-.section st9ja
-.section st9jb
-.section st9ka
-.section st9kb
-.section st9la
-.section st9lb
-.section st9ma
-.section st9mb
-.section st9na
-.section st9nb
-.section st9oa
-.section st9ob
-.section st9pa
-.section st9pb
-.section st9qa
-.section st9qb
-.section st9ra
-.section st9rb
-.section st9sa
-.section st9sb
-.section st9ta
-.section st9tb
-.section st9ua
-.section st9ub
-.section st9va
-.section st9vb
-.section st9wa
-.section st9wb
-.section st9xa
-.section st9xb
-.section st9ya
-.section st9yb
-.section st9za
-.section st9zb
-.section st91a
-.section st91b
-.section st92a
-.section st92b
-.section st93a
-.section st93b
-.section st94a
-.section st94b
-.section st95a
-.section st95b
-.section st96a
-.section st96b
-.section st97a
-.section st97b
-.section st98a
-.section st98b
-.section st99a
-.section st99b
-.section st90a
-.section st90b
-.section st0aa
-.section st0ab
-.section st0ba
-.section st0bb
-.section st0ca
-.section st0cb
-.section st0da
-.section st0db
-.section st0ea
-.section st0eb
-.section st0fa
-.section st0fb
-.section st0ga
-.section st0gb
-.section st0ha
-.section st0hb
-.section st0ia
-.section st0ib
-.section st0ja
-.section st0jb
-.section st0ka
-.section st0kb
-.section st0la
-.section st0lb
-.section st0ma
-.section st0mb
-.section st0na
-.section st0nb
-.section st0oa
-.section st0ob
-.section st0pa
-.section st0pb
-.section st0qa
-.section st0qb
-.section st0ra
-.section st0rb
-.section st0sa
-.section st0sb
-.section st0ta
-.section st0tb
-.section st0ua
-.section st0ub
-.section st0va
-.section st0vb
-.section st0wa
-.section st0wb
-.section st0xa
-.section st0xb
-.section st0ya
-.section st0yb
-.section st0za
-.section st0zb
-.section st01a
-.section st01b
-.section st02a
-.section st02b
-.section st03a
-.section st03b
-.section st04a
-.section st04b
-.section st05a
-.section st05b
-.section st06a
-.section st06b
-.section st07a
-.section st07b
-.section st08a
-.section st08b
-.section st09a
-.section st09b
-.section st00a
-.section st00b
-.section suaaa
-.section suaab
-.section suaba
-.section suabb
-.section suaca
-.section suacb
-.section suada
-.section suadb
-.section suaea
-.section suaeb
-.section suafa
-.section suafb
-.section suaga
-.section suagb
-.section suaha
-.section suahb
-.section suaia
-.section suaib
-.section suaja
-.section suajb
-.section suaka
-.section suakb
-.section suala
-.section sualb
-.section suama
-.section suamb
-.section suana
-.section suanb
-.section suaoa
-.section suaob
-.section suapa
-.section suapb
-.section suaqa
-.section suaqb
-.section suara
-.section suarb
-.section suasa
-.section suasb
-.section suata
-.section suatb
-.section suaua
-.section suaub
-.section suava
-.section suavb
-.section suawa
-.section suawb
-.section suaxa
-.section suaxb
-.section suaya
-.section suayb
-.section suaza
-.section suazb
-.section sua1a
-.section sua1b
-.section sua2a
-.section sua2b
-.section sua3a
-.section sua3b
-.section sua4a
-.section sua4b
-.section sua5a
-.section sua5b
-.section sua6a
-.section sua6b
-.section sua7a
-.section sua7b
-.section sua8a
-.section sua8b
-.section sua9a
-.section sua9b
-.section sua0a
-.section sua0b
-.section subaa
-.section subab
-.section subba
-.section subbb
-.section subca
-.section subcb
-.section subda
-.section subdb
-.section subea
-.section subeb
-.section subfa
-.section subfb
-.section subga
-.section subgb
-.section subha
-.section subhb
-.section subia
-.section subib
-.section subja
-.section subjb
-.section subka
-.section subkb
-.section subla
-.section sublb
-.section subma
-.section submb
-.section subna
-.section subnb
-.section suboa
-.section subob
-.section subpa
-.section subpb
-.section subqa
-.section subqb
-.section subra
-.section subrb
-.section subsa
-.section subsb
-.section subta
-.section subtb
-.section subua
-.section subub
-.section subva
-.section subvb
-.section subwa
-.section subwb
-.section subxa
-.section subxb
-.section subya
-.section subyb
-.section subza
-.section subzb
-.section sub1a
-.section sub1b
-.section sub2a
-.section sub2b
-.section sub3a
-.section sub3b
-.section sub4a
-.section sub4b
-.section sub5a
-.section sub5b
-.section sub6a
-.section sub6b
-.section sub7a
-.section sub7b
-.section sub8a
-.section sub8b
-.section sub9a
-.section sub9b
-.section sub0a
-.section sub0b
-.section sucaa
-.section sucab
-.section sucba
-.section sucbb
-.section succa
-.section succb
-.section sucda
-.section sucdb
-.section sucea
-.section suceb
-.section sucfa
-.section sucfb
-.section sucga
-.section sucgb
-.section sucha
-.section suchb
-.section sucia
-.section sucib
-.section sucja
-.section sucjb
-.section sucka
-.section suckb
-.section sucla
-.section suclb
-.section sucma
-.section sucmb
-.section sucna
-.section sucnb
-.section sucoa
-.section sucob
-.section sucpa
-.section sucpb
-.section sucqa
-.section sucqb
-.section sucra
-.section sucrb
-.section sucsa
-.section sucsb
-.section sucta
-.section suctb
-.section sucua
-.section sucub
-.section sucva
-.section sucvb
-.section sucwa
-.section sucwb
-.section sucxa
-.section sucxb
-.section sucya
-.section sucyb
-.section sucza
-.section suczb
-.section suc1a
-.section suc1b
-.section suc2a
-.section suc2b
-.section suc3a
-.section suc3b
-.section suc4a
-.section suc4b
-.section suc5a
-.section suc5b
-.section suc6a
-.section suc6b
-.section suc7a
-.section suc7b
-.section suc8a
-.section suc8b
-.section suc9a
-.section suc9b
-.section suc0a
-.section suc0b
-.section sudaa
-.section sudab
-.section sudba
-.section sudbb
-.section sudca
-.section sudcb
-.section sudda
-.section suddb
-.section sudea
-.section sudeb
-.section sudfa
-.section sudfb
-.section sudga
-.section sudgb
-.section sudha
-.section sudhb
-.section sudia
-.section sudib
-.section sudja
-.section sudjb
-.section sudka
-.section sudkb
-.section sudla
-.section sudlb
-.section sudma
-.section sudmb
-.section sudna
-.section sudnb
-.section sudoa
-.section sudob
-.section sudpa
-.section sudpb
-.section sudqa
-.section sudqb
-.section sudra
-.section sudrb
-.section sudsa
-.section sudsb
-.section sudta
-.section sudtb
-.section sudua
-.section sudub
-.section sudva
-.section sudvb
-.section sudwa
-.section sudwb
-.section sudxa
-.section sudxb
-.section sudya
-.section sudyb
-.section sudza
-.section sudzb
-.section sud1a
-.section sud1b
-.section sud2a
-.section sud2b
-.section sud3a
-.section sud3b
-.section sud4a
-.section sud4b
-.section sud5a
-.section sud5b
-.section sud6a
-.section sud6b
-.section sud7a
-.section sud7b
-.section sud8a
-.section sud8b
-.section sud9a
-.section sud9b
-.section sud0a
-.section sud0b
-.section sueaa
-.section sueab
-.section sueba
-.section suebb
-.section sueca
-.section suecb
-.section sueda
-.section suedb
-.section sueea
-.section sueeb
-.section suefa
-.section suefb
-.section suega
-.section suegb
-.section sueha
-.section suehb
-.section sueia
-.section sueib
-.section sueja
-.section suejb
-.section sueka
-.section suekb
-.section suela
-.section suelb
-.section suema
-.section suemb
-.section suena
-.section suenb
-.section sueoa
-.section sueob
-.section suepa
-.section suepb
-.section sueqa
-.section sueqb
-.section suera
-.section suerb
-.section suesa
-.section suesb
-.section sueta
-.section suetb
-.section sueua
-.section sueub
-.section sueva
-.section suevb
-.section suewa
-.section suewb
-.section suexa
-.section suexb
-.section sueya
-.section sueyb
-.section sueza
-.section suezb
-.section sue1a
-.section sue1b
-.section sue2a
-.section sue2b
-.section sue3a
-.section sue3b
-.section sue4a
-.section sue4b
-.section sue5a
-.section sue5b
-.section sue6a
-.section sue6b
-.section sue7a
-.section sue7b
-.section sue8a
-.section sue8b
-.section sue9a
-.section sue9b
-.section sue0a
-.section sue0b
-.section sufaa
-.section sufab
-.section sufba
-.section sufbb
-.section sufca
-.section sufcb
-.section sufda
-.section sufdb
-.section sufea
-.section sufeb
-.section suffa
-.section suffb
-.section sufga
-.section sufgb
-.section sufha
-.section sufhb
-.section sufia
-.section sufib
-.section sufja
-.section sufjb
-.section sufka
-.section sufkb
-.section sufla
-.section suflb
-.section sufma
-.section sufmb
-.section sufna
-.section sufnb
-.section sufoa
-.section sufob
-.section sufpa
-.section sufpb
-.section sufqa
-.section sufqb
-.section sufra
-.section sufrb
-.section sufsa
-.section sufsb
-.section sufta
-.section suftb
-.section sufua
-.section sufub
-.section sufva
-.section sufvb
-.section sufwa
-.section sufwb
-.section sufxa
-.section sufxb
-.section sufya
-.section sufyb
-.section sufza
-.section sufzb
-.section suf1a
-.section suf1b
-.section suf2a
-.section suf2b
-.section suf3a
-.section suf3b
-.section suf4a
-.section suf4b
-.section suf5a
-.section suf5b
-.section suf6a
-.section suf6b
-.section suf7a
-.section suf7b
-.section suf8a
-.section suf8b
-.section suf9a
-.section suf9b
-.section suf0a
-.section suf0b
-.section sugaa
-.section sugab
-.section sugba
-.section sugbb
-.section sugca
-.section sugcb
-.section sugda
-.section sugdb
-.section sugea
-.section sugeb
-.section sugfa
-.section sugfb
-.section sugga
-.section suggb
-.section sugha
-.section sughb
-.section sugia
-.section sugib
-.section sugja
-.section sugjb
-.section sugka
-.section sugkb
-.section sugla
-.section suglb
-.section sugma
-.section sugmb
-.section sugna
-.section sugnb
-.section sugoa
-.section sugob
-.section sugpa
-.section sugpb
-.section sugqa
-.section sugqb
-.section sugra
-.section sugrb
-.section sugsa
-.section sugsb
-.section sugta
-.section sugtb
-.section sugua
-.section sugub
-.section sugva
-.section sugvb
-.section sugwa
-.section sugwb
-.section sugxa
-.section sugxb
-.section sugya
-.section sugyb
-.section sugza
-.section sugzb
-.section sug1a
-.section sug1b
-.section sug2a
-.section sug2b
-.section sug3a
-.section sug3b
-.section sug4a
-.section sug4b
-.section sug5a
-.section sug5b
-.section sug6a
-.section sug6b
-.section sug7a
-.section sug7b
-.section sug8a
-.section sug8b
-.section sug9a
-.section sug9b
-.section sug0a
-.section sug0b
-.section suhaa
-.section suhab
-.section suhba
-.section suhbb
-.section suhca
-.section suhcb
-.section suhda
-.section suhdb
-.section suhea
-.section suheb
-.section suhfa
-.section suhfb
-.section suhga
-.section suhgb
-.section suhha
-.section suhhb
-.section suhia
-.section suhib
-.section suhja
-.section suhjb
-.section suhka
-.section suhkb
-.section suhla
-.section suhlb
-.section suhma
-.section suhmb
-.section suhna
-.section suhnb
-.section suhoa
-.section suhob
-.section suhpa
-.section suhpb
-.section suhqa
-.section suhqb
-.section suhra
-.section suhrb
-.section suhsa
-.section suhsb
-.section suhta
-.section suhtb
-.section suhua
-.section suhub
-.section suhva
-.section suhvb
-.section suhwa
-.section suhwb
-.section suhxa
-.section suhxb
-.section suhya
-.section suhyb
-.section suhza
-.section suhzb
-.section suh1a
-.section suh1b
-.section suh2a
-.section suh2b
-.section suh3a
-.section suh3b
-.section suh4a
-.section suh4b
-.section suh5a
-.section suh5b
-.section suh6a
-.section suh6b
-.section suh7a
-.section suh7b
-.section suh8a
-.section suh8b
-.section suh9a
-.section suh9b
-.section suh0a
-.section suh0b
-.section suiaa
-.section suiab
-.section suiba
-.section suibb
-.section suica
-.section suicb
-.section suida
-.section suidb
-.section suiea
-.section suieb
-.section suifa
-.section suifb
-.section suiga
-.section suigb
-.section suiha
-.section suihb
-.section suiia
-.section suiib
-.section suija
-.section suijb
-.section suika
-.section suikb
-.section suila
-.section suilb
-.section suima
-.section suimb
-.section suina
-.section suinb
-.section suioa
-.section suiob
-.section suipa
-.section suipb
-.section suiqa
-.section suiqb
-.section suira
-.section suirb
-.section suisa
-.section suisb
-.section suita
-.section suitb
-.section suiua
-.section suiub
-.section suiva
-.section suivb
-.section suiwa
-.section suiwb
-.section suixa
-.section suixb
-.section suiya
-.section suiyb
-.section suiza
-.section suizb
-.section sui1a
-.section sui1b
-.section sui2a
-.section sui2b
-.section sui3a
-.section sui3b
-.section sui4a
-.section sui4b
-.section sui5a
-.section sui5b
-.section sui6a
-.section sui6b
-.section sui7a
-.section sui7b
-.section sui8a
-.section sui8b
-.section sui9a
-.section sui9b
-.section sui0a
-.section sui0b
-.section sujaa
-.section sujab
-.section sujba
-.section sujbb
-.section sujca
-.section sujcb
-.section sujda
-.section sujdb
-.section sujea
-.section sujeb
-.section sujfa
-.section sujfb
-.section sujga
-.section sujgb
-.section sujha
-.section sujhb
-.section sujia
-.section sujib
-.section sujja
-.section sujjb
-.section sujka
-.section sujkb
-.section sujla
-.section sujlb
-.section sujma
-.section sujmb
-.section sujna
-.section sujnb
-.section sujoa
-.section sujob
-.section sujpa
-.section sujpb
-.section sujqa
-.section sujqb
-.section sujra
-.section sujrb
-.section sujsa
-.section sujsb
-.section sujta
-.section sujtb
-.section sujua
-.section sujub
-.section sujva
-.section sujvb
-.section sujwa
-.section sujwb
-.section sujxa
-.section sujxb
-.section sujya
-.section sujyb
-.section sujza
-.section sujzb
-.section suj1a
-.section suj1b
-.section suj2a
-.section suj2b
-.section suj3a
-.section suj3b
-.section suj4a
-.section suj4b
-.section suj5a
-.section suj5b
-.section suj6a
-.section suj6b
-.section suj7a
-.section suj7b
-.section suj8a
-.section suj8b
-.section suj9a
-.section suj9b
-.section suj0a
-.section suj0b
-.section sukaa
-.section sukab
-.section sukba
-.section sukbb
-.section sukca
-.section sukcb
-.section sukda
-.section sukdb
-.section sukea
-.section sukeb
-.section sukfa
-.section sukfb
-.section sukga
-.section sukgb
-.section sukha
-.section sukhb
-.section sukia
-.section sukib
-.section sukja
-.section sukjb
-.section sukka
-.section sukkb
-.section sukla
-.section suklb
-.section sukma
-.section sukmb
-.section sukna
-.section suknb
-.section sukoa
-.section sukob
-.section sukpa
-.section sukpb
-.section sukqa
-.section sukqb
-.section sukra
-.section sukrb
-.section suksa
-.section suksb
-.section sukta
-.section suktb
-.section sukua
-.section sukub
-.section sukva
-.section sukvb
-.section sukwa
-.section sukwb
-.section sukxa
-.section sukxb
-.section sukya
-.section sukyb
-.section sukza
-.section sukzb
-.section suk1a
-.section suk1b
-.section suk2a
-.section suk2b
-.section suk3a
-.section suk3b
-.section suk4a
-.section suk4b
-.section suk5a
-.section suk5b
-.section suk6a
-.section suk6b
-.section suk7a
-.section suk7b
-.section suk8a
-.section suk8b
-.section suk9a
-.section suk9b
-.section suk0a
-.section suk0b
-.section sulaa
-.section sulab
-.section sulba
-.section sulbb
-.section sulca
-.section sulcb
-.section sulda
-.section suldb
-.section sulea
-.section suleb
-.section sulfa
-.section sulfb
-.section sulga
-.section sulgb
-.section sulha
-.section sulhb
-.section sulia
-.section sulib
-.section sulja
-.section suljb
-.section sulka
-.section sulkb
-.section sulla
-.section sullb
-.section sulma
-.section sulmb
-.section sulna
-.section sulnb
-.section suloa
-.section sulob
-.section sulpa
-.section sulpb
-.section sulqa
-.section sulqb
-.section sulra
-.section sulrb
-.section sulsa
-.section sulsb
-.section sulta
-.section sultb
-.section sulua
-.section sulub
-.section sulva
-.section sulvb
-.section sulwa
-.section sulwb
-.section sulxa
-.section sulxb
-.section sulya
-.section sulyb
-.section sulza
-.section sulzb
-.section sul1a
-.section sul1b
-.section sul2a
-.section sul2b
-.section sul3a
-.section sul3b
-.section sul4a
-.section sul4b
-.section sul5a
-.section sul5b
-.section sul6a
-.section sul6b
-.section sul7a
-.section sul7b
-.section sul8a
-.section sul8b
-.section sul9a
-.section sul9b
-.section sul0a
-.section sul0b
-.section sumaa
-.section sumab
-.section sumba
-.section sumbb
-.section sumca
-.section sumcb
-.section sumda
-.section sumdb
-.section sumea
-.section sumeb
-.section sumfa
-.section sumfb
-.section sumga
-.section sumgb
-.section sumha
-.section sumhb
-.section sumia
-.section sumib
-.section sumja
-.section sumjb
-.section sumka
-.section sumkb
-.section sumla
-.section sumlb
-.section summa
-.section summb
-.section sumna
-.section sumnb
-.section sumoa
-.section sumob
-.section sumpa
-.section sumpb
-.section sumqa
-.section sumqb
-.section sumra
-.section sumrb
-.section sumsa
-.section sumsb
-.section sumta
-.section sumtb
-.section sumua
-.section sumub
-.section sumva
-.section sumvb
-.section sumwa
-.section sumwb
-.section sumxa
-.section sumxb
-.section sumya
-.section sumyb
-.section sumza
-.section sumzb
-.section sum1a
-.section sum1b
-.section sum2a
-.section sum2b
-.section sum3a
-.section sum3b
-.section sum4a
-.section sum4b
-.section sum5a
-.section sum5b
-.section sum6a
-.section sum6b
-.section sum7a
-.section sum7b
-.section sum8a
-.section sum8b
-.section sum9a
-.section sum9b
-.section sum0a
-.section sum0b
-.section sunaa
-.section sunab
-.section sunba
-.section sunbb
-.section sunca
-.section suncb
-.section sunda
-.section sundb
-.section sunea
-.section suneb
-.section sunfa
-.section sunfb
-.section sunga
-.section sungb
-.section sunha
-.section sunhb
-.section sunia
-.section sunib
-.section sunja
-.section sunjb
-.section sunka
-.section sunkb
-.section sunla
-.section sunlb
-.section sunma
-.section sunmb
-.section sunna
-.section sunnb
-.section sunoa
-.section sunob
-.section sunpa
-.section sunpb
-.section sunqa
-.section sunqb
-.section sunra
-.section sunrb
-.section sunsa
-.section sunsb
-.section sunta
-.section suntb
-.section sunua
-.section sunub
-.section sunva
-.section sunvb
-.section sunwa
-.section sunwb
-.section sunxa
-.section sunxb
-.section sunya
-.section sunyb
-.section sunza
-.section sunzb
-.section sun1a
-.section sun1b
-.section sun2a
-.section sun2b
-.section sun3a
-.section sun3b
-.section sun4a
-.section sun4b
-.section sun5a
-.section sun5b
-.section sun6a
-.section sun6b
-.section sun7a
-.section sun7b
-.section sun8a
-.section sun8b
-.section sun9a
-.section sun9b
-.section sun0a
-.section sun0b
-.section suoaa
-.section suoab
-.section suoba
-.section suobb
-.section suoca
-.section suocb
-.section suoda
-.section suodb
-.section suoea
-.section suoeb
-.section suofa
-.section suofb
-.section suoga
-.section suogb
-.section suoha
-.section suohb
-.section suoia
-.section suoib
-.section suoja
-.section suojb
-.section suoka
-.section suokb
-.section suola
-.section suolb
-.section suoma
-.section suomb
-.section suona
-.section suonb
-.section suooa
-.section suoob
-.section suopa
-.section suopb
-.section suoqa
-.section suoqb
-.section suora
-.section suorb
-.section suosa
-.section suosb
-.section suota
-.section suotb
-.section suoua
-.section suoub
-.section suova
-.section suovb
-.section suowa
-.section suowb
-.section suoxa
-.section suoxb
-.section suoya
-.section suoyb
-.section suoza
-.section suozb
-.section suo1a
-.section suo1b
-.section suo2a
-.section suo2b
-.section suo3a
-.section suo3b
-.section suo4a
-.section suo4b
-.section suo5a
-.section suo5b
-.section suo6a
-.section suo6b
-.section suo7a
-.section suo7b
-.section suo8a
-.section suo8b
-.section suo9a
-.section suo9b
-.section suo0a
-.section suo0b
-.section supaa
-.section supab
-.section supba
-.section supbb
-.section supca
-.section supcb
-.section supda
-.section supdb
-.section supea
-.section supeb
-.section supfa
-.section supfb
-.section supga
-.section supgb
-.section supha
-.section suphb
-.section supia
-.section supib
-.section supja
-.section supjb
-.section supka
-.section supkb
-.section supla
-.section suplb
-.section supma
-.section supmb
-.section supna
-.section supnb
-.section supoa
-.section supob
-.section suppa
-.section suppb
-.section supqa
-.section supqb
-.section supra
-.section suprb
-.section supsa
-.section supsb
-.section supta
-.section suptb
-.section supua
-.section supub
-.section supva
-.section supvb
-.section supwa
-.section supwb
-.section supxa
-.section supxb
-.section supya
-.section supyb
-.section supza
-.section supzb
-.section sup1a
-.section sup1b
-.section sup2a
-.section sup2b
-.section sup3a
-.section sup3b
-.section sup4a
-.section sup4b
-.section sup5a
-.section sup5b
-.section sup6a
-.section sup6b
-.section sup7a
-.section sup7b
-.section sup8a
-.section sup8b
-.section sup9a
-.section sup9b
-.section sup0a
-.section sup0b
-.section suqaa
-.section suqab
-.section suqba
-.section suqbb
-.section suqca
-.section suqcb
-.section suqda
-.section suqdb
-.section suqea
-.section suqeb
-.section suqfa
-.section suqfb
-.section suqga
-.section suqgb
-.section suqha
-.section suqhb
-.section suqia
-.section suqib
-.section suqja
-.section suqjb
-.section suqka
-.section suqkb
-.section suqla
-.section suqlb
-.section suqma
-.section suqmb
-.section suqna
-.section suqnb
-.section suqoa
-.section suqob
-.section suqpa
-.section suqpb
-.section suqqa
-.section suqqb
-.section suqra
-.section suqrb
-.section suqsa
-.section suqsb
-.section suqta
-.section suqtb
-.section suqua
-.section suqub
-.section suqva
-.section suqvb
-.section suqwa
-.section suqwb
-.section suqxa
-.section suqxb
-.section suqya
-.section suqyb
-.section suqza
-.section suqzb
-.section suq1a
-.section suq1b
-.section suq2a
-.section suq2b
-.section suq3a
-.section suq3b
-.section suq4a
-.section suq4b
-.section suq5a
-.section suq5b
-.section suq6a
-.section suq6b
-.section suq7a
-.section suq7b
-.section suq8a
-.section suq8b
-.section suq9a
-.section suq9b
-.section suq0a
-.section suq0b
-.section suraa
-.section surab
-.section surba
-.section surbb
-.section surca
-.section surcb
-.section surda
-.section surdb
-.section surea
-.section sureb
-.section surfa
-.section surfb
-.section surga
-.section surgb
-.section surha
-.section surhb
-.section suria
-.section surib
-.section surja
-.section surjb
-.section surka
-.section surkb
-.section surla
-.section surlb
-.section surma
-.section surmb
-.section surna
-.section surnb
-.section suroa
-.section surob
-.section surpa
-.section surpb
-.section surqa
-.section surqb
-.section surra
-.section surrb
-.section sursa
-.section sursb
-.section surta
-.section surtb
-.section surua
-.section surub
-.section surva
-.section survb
-.section surwa
-.section surwb
-.section surxa
-.section surxb
-.section surya
-.section suryb
-.section surza
-.section surzb
-.section sur1a
-.section sur1b
-.section sur2a
-.section sur2b
-.section sur3a
-.section sur3b
-.section sur4a
-.section sur4b
-.section sur5a
-.section sur5b
-.section sur6a
-.section sur6b
-.section sur7a
-.section sur7b
-.section sur8a
-.section sur8b
-.section sur9a
-.section sur9b
-.section sur0a
-.section sur0b
-.section susaa
-.section susab
-.section susba
-.section susbb
-.section susca
-.section suscb
-.section susda
-.section susdb
-.section susea
-.section suseb
-.section susfa
-.section susfb
-.section susga
-.section susgb
-.section susha
-.section sushb
-.section susia
-.section susib
-.section susja
-.section susjb
-.section suska
-.section suskb
-.section susla
-.section suslb
-.section susma
-.section susmb
-.section susna
-.section susnb
-.section susoa
-.section susob
-.section suspa
-.section suspb
-.section susqa
-.section susqb
-.section susra
-.section susrb
-.section sussa
-.section sussb
-.section susta
-.section sustb
-.section susua
-.section susub
-.section susva
-.section susvb
-.section suswa
-.section suswb
-.section susxa
-.section susxb
-.section susya
-.section susyb
-.section susza
-.section suszb
-.section sus1a
-.section sus1b
-.section sus2a
-.section sus2b
-.section sus3a
-.section sus3b
-.section sus4a
-.section sus4b
-.section sus5a
-.section sus5b
-.section sus6a
-.section sus6b
-.section sus7a
-.section sus7b
-.section sus8a
-.section sus8b
-.section sus9a
-.section sus9b
-.section sus0a
-.section sus0b
-.section sutaa
-.section sutab
-.section sutba
-.section sutbb
-.section sutca
-.section sutcb
-.section sutda
-.section sutdb
-.section sutea
-.section suteb
-.section sutfa
-.section sutfb
-.section sutga
-.section sutgb
-.section sutha
-.section suthb
-.section sutia
-.section sutib
-.section sutja
-.section sutjb
-.section sutka
-.section sutkb
-.section sutla
-.section sutlb
-.section sutma
-.section sutmb
-.section sutna
-.section sutnb
-.section sutoa
-.section sutob
-.section sutpa
-.section sutpb
-.section sutqa
-.section sutqb
-.section sutra
-.section sutrb
-.section sutsa
-.section sutsb
-.section sutta
-.section suttb
-.section sutua
-.section sutub
-.section sutva
-.section sutvb
-.section sutwa
-.section sutwb
-.section sutxa
-.section sutxb
-.section sutya
-.section sutyb
-.section sutza
-.section sutzb
-.section sut1a
-.section sut1b
-.section sut2a
-.section sut2b
-.section sut3a
-.section sut3b
-.section sut4a
-.section sut4b
-.section sut5a
-.section sut5b
-.section sut6a
-.section sut6b
-.section sut7a
-.section sut7b
-.section sut8a
-.section sut8b
-.section sut9a
-.section sut9b
-.section sut0a
-.section sut0b
-.section suuaa
-.section suuab
-.section suuba
-.section suubb
-.section suuca
-.section suucb
-.section suuda
-.section suudb
-.section suuea
-.section suueb
-.section suufa
-.section suufb
-.section suuga
-.section suugb
-.section suuha
-.section suuhb
-.section suuia
-.section suuib
-.section suuja
-.section suujb
-.section suuka
-.section suukb
-.section suula
-.section suulb
-.section suuma
-.section suumb
-.section suuna
-.section suunb
-.section suuoa
-.section suuob
-.section suupa
-.section suupb
-.section suuqa
-.section suuqb
-.section suura
-.section suurb
-.section suusa
-.section suusb
-.section suuta
-.section suutb
-.section suuua
-.section suuub
-.section suuva
-.section suuvb
-.section suuwa
-.section suuwb
-.section suuxa
-.section suuxb
-.section suuya
-.section suuyb
-.section suuza
-.section suuzb
-.section suu1a
-.section suu1b
-.section suu2a
-.section suu2b
-.section suu3a
-.section suu3b
-.section suu4a
-.section suu4b
-.section suu5a
-.section suu5b
-.section suu6a
-.section suu6b
-.section suu7a
-.section suu7b
-.section suu8a
-.section suu8b
-.section suu9a
-.section suu9b
-.section suu0a
-.section suu0b
-.section suvaa
-.section suvab
-.section suvba
-.section suvbb
-.section suvca
-.section suvcb
-.section suvda
-.section suvdb
-.section suvea
-.section suveb
-.section suvfa
-.section suvfb
-.section suvga
-.section suvgb
-.section suvha
-.section suvhb
-.section suvia
-.section suvib
-.section suvja
-.section suvjb
-.section suvka
-.section suvkb
-.section suvla
-.section suvlb
-.section suvma
-.section suvmb
-.section suvna
-.section suvnb
-.section suvoa
-.section suvob
-.section suvpa
-.section suvpb
-.section suvqa
-.section suvqb
-.section suvra
-.section suvrb
-.section suvsa
-.section suvsb
-.section suvta
-.section suvtb
-.section suvua
-.section suvub
-.section suvva
-.section suvvb
-.section suvwa
-.section suvwb
-.section suvxa
-.section suvxb
-.section suvya
-.section suvyb
-.section suvza
-.section suvzb
-.section suv1a
-.section suv1b
-.section suv2a
-.section suv2b
-.section suv3a
-.section suv3b
-.section suv4a
-.section suv4b
-.section suv5a
-.section suv5b
-.section suv6a
-.section suv6b
-.section suv7a
-.section suv7b
-.section suv8a
-.section suv8b
-.section suv9a
-.section suv9b
-.section suv0a
-.section suv0b
-.section suwaa
-.section suwab
-.section suwba
-.section suwbb
-.section suwca
-.section suwcb
-.section suwda
-.section suwdb
-.section suwea
-.section suweb
-.section suwfa
-.section suwfb
-.section suwga
-.section suwgb
-.section suwha
-.section suwhb
-.section suwia
-.section suwib
-.section suwja
-.section suwjb
-.section suwka
-.section suwkb
-.section suwla
-.section suwlb
-.section suwma
-.section suwmb
-.section suwna
-.section suwnb
-.section suwoa
-.section suwob
-.section suwpa
-.section suwpb
-.section suwqa
-.section suwqb
-.section suwra
-.section suwrb
-.section suwsa
-.section suwsb
-.section suwta
-.section suwtb
-.section suwua
-.section suwub
-.section suwva
-.section suwvb
-.section suwwa
-.section suwwb
-.section suwxa
-.section suwxb
-.section suwya
-.section suwyb
-.section suwza
-.section suwzb
-.section suw1a
-.section suw1b
-.section suw2a
-.section suw2b
-.section suw3a
-.section suw3b
-.section suw4a
-.section suw4b
-.section suw5a
-.section suw5b
-.section suw6a
-.section suw6b
-.section suw7a
-.section suw7b
-.section suw8a
-.section suw8b
-.section suw9a
-.section suw9b
-.section suw0a
-.section suw0b
-.section suxaa
-.section suxab
-.section suxba
-.section suxbb
-.section suxca
-.section suxcb
-.section suxda
-.section suxdb
-.section suxea
-.section suxeb
-.section suxfa
-.section suxfb
-.section suxga
-.section suxgb
-.section suxha
-.section suxhb
-.section suxia
-.section suxib
-.section suxja
-.section suxjb
-.section suxka
-.section suxkb
-.section suxla
-.section suxlb
-.section suxma
-.section suxmb
-.section suxna
-.section suxnb
-.section suxoa
-.section suxob
-.section suxpa
-.section suxpb
-.section suxqa
-.section suxqb
-.section suxra
-.section suxrb
-.section suxsa
-.section suxsb
-.section suxta
-.section suxtb
-.section suxua
-.section suxub
-.section suxva
-.section suxvb
-.section suxwa
-.section suxwb
-.section suxxa
-.section suxxb
-.section suxya
-.section suxyb
-.section suxza
-.section suxzb
-.section sux1a
-.section sux1b
-.section sux2a
-.section sux2b
-.section sux3a
-.section sux3b
-.section sux4a
-.section sux4b
-.section sux5a
-.section sux5b
-.section sux6a
-.section sux6b
-.section sux7a
-.section sux7b
-.section sux8a
-.section sux8b
-.section sux9a
-.section sux9b
-.section sux0a
-.section sux0b
-.section suyaa
-.section suyab
-.section suyba
-.section suybb
-.section suyca
-.section suycb
-.section suyda
-.section suydb
-.section suyea
-.section suyeb
-.section suyfa
-.section suyfb
-.section suyga
-.section suygb
-.section suyha
-.section suyhb
-.section suyia
-.section suyib
-.section suyja
-.section suyjb
-.section suyka
-.section suykb
-.section suyla
-.section suylb
-.section suyma
-.section suymb
-.section suyna
-.section suynb
-.section suyoa
-.section suyob
-.section suypa
-.section suypb
-.section suyqa
-.section suyqb
-.section suyra
-.section suyrb
-.section suysa
-.section suysb
-.section suyta
-.section suytb
-.section suyua
-.section suyub
-.section suyva
-.section suyvb
-.section suywa
-.section suywb
-.section suyxa
-.section suyxb
-.section suyya
-.section suyyb
-.section suyza
-.section suyzb
-.section suy1a
-.section suy1b
-.section suy2a
-.section suy2b
-.section suy3a
-.section suy3b
-.section suy4a
-.section suy4b
-.section suy5a
-.section suy5b
-.section suy6a
-.section suy6b
-.section suy7a
-.section suy7b
-.section suy8a
-.section suy8b
-.section suy9a
-.section suy9b
-.section suy0a
-.section suy0b
-.section suzaa
-.section suzab
-.section suzba
-.section suzbb
-.section suzca
-.section suzcb
-.section suzda
-.section suzdb
-.section suzea
-.section suzeb
-.section suzfa
-.section suzfb
-.section suzga
-.section suzgb
-.section suzha
-.section suzhb
-.section suzia
-.section suzib
-.section suzja
-.section suzjb
-.section suzka
-.section suzkb
-.section suzla
-.section suzlb
-.section suzma
-.section suzmb
-.section suzna
-.section suznb
-.section suzoa
-.section suzob
-.section suzpa
-.section suzpb
-.section suzqa
-.section suzqb
-.section suzra
-.section suzrb
-.section suzsa
-.section suzsb
-.section suzta
-.section suztb
-.section suzua
-.section suzub
-.section suzva
-.section suzvb
-.section suzwa
-.section suzwb
-.section suzxa
-.section suzxb
-.section suzya
-.section suzyb
-.section suzza
-.section suzzb
-.section suz1a
-.section suz1b
-.section suz2a
-.section suz2b
-.section suz3a
-.section suz3b
-.section suz4a
-.section suz4b
-.section suz5a
-.section suz5b
-.section suz6a
-.section suz6b
-.section suz7a
-.section suz7b
-.section suz8a
-.section suz8b
-.section suz9a
-.section suz9b
-.section suz0a
-.section suz0b
-.section su1aa
-.section su1ab
-.section su1ba
-.section su1bb
-.section su1ca
-.section su1cb
-.section su1da
-.section su1db
-.section su1ea
-.section su1eb
-.section su1fa
-.section su1fb
-.section su1ga
-.section su1gb
-.section su1ha
-.section su1hb
-.section su1ia
-.section su1ib
-.section su1ja
-.section su1jb
-.section su1ka
-.section su1kb
-.section su1la
-.section su1lb
-.section su1ma
-.section su1mb
-.section su1na
-.section su1nb
-.section su1oa
-.section su1ob
-.section su1pa
-.section su1pb
-.section su1qa
-.section su1qb
-.section su1ra
-.section su1rb
-.section su1sa
-.section su1sb
-.section su1ta
-.section su1tb
-.section su1ua
-.section su1ub
-.section su1va
-.section su1vb
-.section su1wa
-.section su1wb
-.section su1xa
-.section su1xb
-.section su1ya
-.section su1yb
-.section su1za
-.section su1zb
-.section su11a
-.section su11b
-.section su12a
-.section su12b
-.section su13a
-.section su13b
-.section su14a
-.section su14b
-.section su15a
-.section su15b
-.section su16a
-.section su16b
-.section su17a
-.section su17b
-.section su18a
-.section su18b
-.section su19a
-.section su19b
-.section su10a
-.section su10b
-.section su2aa
-.section su2ab
-.section su2ba
-.section su2bb
-.section su2ca
-.section su2cb
-.section su2da
-.section su2db
-.section su2ea
-.section su2eb
-.section su2fa
-.section su2fb
-.section su2ga
-.section su2gb
-.section su2ha
-.section su2hb
-.section su2ia
-.section su2ib
-.section su2ja
-.section su2jb
-.section su2ka
-.section su2kb
-.section su2la
-.section su2lb
-.section su2ma
-.section su2mb
-.section su2na
-.section su2nb
-.section su2oa
-.section su2ob
-.section su2pa
-.section su2pb
-.section su2qa
-.section su2qb
-.section su2ra
-.section su2rb
-.section su2sa
-.section su2sb
-.section su2ta
-.section su2tb
-.section su2ua
-.section su2ub
-.section su2va
-.section su2vb
-.section su2wa
-.section su2wb
-.section su2xa
-.section su2xb
-.section su2ya
-.section su2yb
-.section su2za
-.section su2zb
-.section su21a
-.section su21b
-.section su22a
-.section su22b
-.section su23a
-.section su23b
-.section su24a
-.section su24b
-.section su25a
-.section su25b
-.section su26a
-.section su26b
-.section su27a
-.section su27b
-.section su28a
-.section su28b
-.section su29a
-.section su29b
-.section su20a
-.section su20b
-.section su3aa
-.section su3ab
-.section su3ba
-.section su3bb
-.section su3ca
-.section su3cb
-.section su3da
-.section su3db
-.section su3ea
-.section su3eb
-.section su3fa
-.section su3fb
-.section su3ga
-.section su3gb
-.section su3ha
-.section su3hb
-.section su3ia
-.section su3ib
-.section su3ja
-.section su3jb
-.section su3ka
-.section su3kb
-.section su3la
-.section su3lb
-.section su3ma
-.section su3mb
-.section su3na
-.section su3nb
-.section su3oa
-.section su3ob
-.section su3pa
-.section su3pb
-.section su3qa
-.section su3qb
-.section su3ra
-.section su3rb
-.section su3sa
-.section su3sb
-.section su3ta
-.section su3tb
-.section su3ua
-.section su3ub
-.section su3va
-.section su3vb
-.section su3wa
-.section su3wb
-.section su3xa
-.section su3xb
-.section su3ya
-.section su3yb
-.section su3za
-.section su3zb
-.section su31a
-.section su31b
-.section su32a
-.section su32b
-.section su33a
-.section su33b
-.section su34a
-.section su34b
-.section su35a
-.section su35b
-.section su36a
-.section su36b
-.section su37a
-.section su37b
-.section su38a
-.section su38b
-.section su39a
-.section su39b
-.section su30a
-.section su30b
-.section su4aa
-.section su4ab
-.section su4ba
-.section su4bb
-.section su4ca
-.section su4cb
-.section su4da
-.section su4db
-.section su4ea
-.section su4eb
-.section su4fa
-.section su4fb
-.section su4ga
-.section su4gb
-.section su4ha
-.section su4hb
-.section su4ia
-.section su4ib
-.section su4ja
-.section su4jb
-.section su4ka
-.section su4kb
-.section su4la
-.section su4lb
-.section su4ma
-.section su4mb
-.section su4na
-.section su4nb
-.section su4oa
-.section su4ob
-.section su4pa
-.section su4pb
-.section su4qa
-.section su4qb
-.section su4ra
-.section su4rb
-.section su4sa
-.section su4sb
-.section su4ta
-.section su4tb
-.section su4ua
-.section su4ub
-.section su4va
-.section su4vb
-.section su4wa
-.section su4wb
-.section su4xa
-.section su4xb
-.section su4ya
-.section su4yb
-.section su4za
-.section su4zb
-.section su41a
-.section su41b
-.section su42a
-.section su42b
-.section su43a
-.section su43b
-.section su44a
-.section su44b
-.section su45a
-.section su45b
-.section su46a
-.section su46b
-.section su47a
-.section su47b
-.section su48a
-.section su48b
-.section su49a
-.section su49b
-.section su40a
-.section su40b
-.section su5aa
-.section su5ab
-.section su5ba
-.section su5bb
-.section su5ca
-.section su5cb
-.section su5da
-.section su5db
-.section su5ea
-.section su5eb
-.section su5fa
-.section su5fb
-.section su5ga
-.section su5gb
-.section su5ha
-.section su5hb
-.section su5ia
-.section su5ib
-.section su5ja
-.section su5jb
-.section su5ka
-.section su5kb
-.section su5la
-.section su5lb
-.section su5ma
-.section su5mb
-.section su5na
-.section su5nb
-.section su5oa
-.section su5ob
-.section su5pa
-.section su5pb
-.section su5qa
-.section su5qb
-.section su5ra
-.section su5rb
-.section su5sa
-.section su5sb
-.section su5ta
-.section su5tb
-.section su5ua
-.section su5ub
-.section su5va
-.section su5vb
-.section su5wa
-.section su5wb
-.section su5xa
-.section su5xb
-.section su5ya
-.section su5yb
-.section su5za
-.section su5zb
-.section su51a
-.section su51b
-.section su52a
-.section su52b
-.section su53a
-.section su53b
-.section su54a
-.section su54b
-.section su55a
-.section su55b
-.section su56a
-.section su56b
-.section su57a
-.section su57b
-.section su58a
-.section su58b
-.section su59a
-.section su59b
-.section su50a
-.section su50b
-.section su6aa
-.section su6ab
-.section su6ba
-.section su6bb
-.section su6ca
-.section su6cb
-.section su6da
-.section su6db
-.section su6ea
-.section su6eb
-.section su6fa
-.section su6fb
-.section su6ga
-.section su6gb
-.section su6ha
-.section su6hb
-.section su6ia
-.section su6ib
-.section su6ja
-.section su6jb
-.section su6ka
-.section su6kb
-.section su6la
-.section su6lb
-.section su6ma
-.section su6mb
-.section su6na
-.section su6nb
-.section su6oa
-.section su6ob
-.section su6pa
-.section su6pb
-.section su6qa
-.section su6qb
-.section su6ra
-.section su6rb
-.section su6sa
-.section su6sb
-.section su6ta
-.section su6tb
-.section su6ua
-.section su6ub
-.section su6va
-.section su6vb
-.section su6wa
-.section su6wb
-.section su6xa
-.section su6xb
-.section su6ya
-.section su6yb
-.section su6za
-.section su6zb
-.section su61a
-.section su61b
-.section su62a
-.section su62b
-.section su63a
-.section su63b
-.section su64a
-.section su64b
-.section su65a
-.section su65b
-.section su66a
-.section su66b
-.section su67a
-.section su67b
-.section su68a
-.section su68b
-.section su69a
-.section su69b
-.section su60a
-.section su60b
-.section su7aa
-.section su7ab
-.section su7ba
-.section su7bb
-.section su7ca
-.section su7cb
-.section su7da
-.section su7db
-.section su7ea
-.section su7eb
-.section su7fa
-.section su7fb
-.section su7ga
-.section su7gb
-.section su7ha
-.section su7hb
-.section su7ia
-.section su7ib
-.section su7ja
-.section su7jb
-.section su7ka
-.section su7kb
-.section su7la
-.section su7lb
-.section su7ma
-.section su7mb
-.section su7na
-.section su7nb
-.section su7oa
-.section su7ob
-.section su7pa
-.section su7pb
-.section su7qa
-.section su7qb
-.section su7ra
-.section su7rb
-.section su7sa
-.section su7sb
-.section su7ta
-.section su7tb
-.section su7ua
-.section su7ub
-.section su7va
-.section su7vb
-.section su7wa
-.section su7wb
-.section su7xa
-.section su7xb
-.section su7ya
-.section su7yb
-.section su7za
-.section su7zb
-.section su71a
-.section su71b
-.section su72a
-.section su72b
-.section su73a
-.section su73b
-.section su74a
-.section su74b
-.section su75a
-.section su75b
-.section su76a
-.section su76b
-.section su77a
-.section su77b
-.section su78a
-.section su78b
-.section su79a
-.section su79b
-.section su70a
-.section su70b
-.section su8aa
-.section su8ab
-.section su8ba
-.section su8bb
-.section su8ca
-.section su8cb
-.section su8da
-.section su8db
-.section su8ea
-.section su8eb
-.section su8fa
-.section su8fb
-.section su8ga
-.section su8gb
-.section su8ha
-.section su8hb
-.section su8ia
-.section su8ib
-.section su8ja
-.section su8jb
-.section su8ka
-.section su8kb
-.section su8la
-.section su8lb
-.section su8ma
-.section su8mb
-.section su8na
-.section su8nb
-.section su8oa
-.section su8ob
-.section su8pa
-.section su8pb
-.section su8qa
-.section su8qb
-.section su8ra
-.section su8rb
-.section su8sa
-.section su8sb
-.section su8ta
-.section su8tb
-.section su8ua
-.section su8ub
-.section su8va
-.section su8vb
-.section su8wa
-.section su8wb
-.section su8xa
-.section su8xb
-.section su8ya
-.section su8yb
-.section su8za
-.section su8zb
-.section su81a
-.section su81b
-.section su82a
-.section su82b
-.section su83a
-.section su83b
-.section su84a
-.section su84b
-.section su85a
-.section su85b
-.section su86a
-.section su86b
-.section su87a
-.section su87b
-.section su88a
-.section su88b
-.section su89a
-.section su89b
-.section su80a
-.section su80b
-.section su9aa
-.section su9ab
-.section su9ba
-.section su9bb
-.section su9ca
-.section su9cb
-.section su9da
-.section su9db
-.section su9ea
-.section su9eb
-.section su9fa
-.section su9fb
-.section su9ga
-.section su9gb
-.section su9ha
-.section su9hb
-.section su9ia
-.section su9ib
-.section su9ja
-.section su9jb
-.section su9ka
-.section su9kb
-.section su9la
-.section su9lb
-.section su9ma
-.section su9mb
-.section su9na
-.section su9nb
-.section su9oa
-.section su9ob
-.section su9pa
-.section su9pb
-.section su9qa
-.section su9qb
-.section su9ra
-.section su9rb
-.section su9sa
-.section su9sb
-.section su9ta
-.section su9tb
-.section su9ua
-.section su9ub
-.section su9va
-.section su9vb
-.section su9wa
-.section su9wb
-.section su9xa
-.section su9xb
-.section su9ya
-.section su9yb
-.section su9za
-.section su9zb
-.section su91a
-.section su91b
-.section su92a
-.section su92b
-.section su93a
-.section su93b
-.section su94a
-.section su94b
-.section su95a
-.section su95b
-.section su96a
-.section su96b
-.section su97a
-.section su97b
-.section su98a
-.section su98b
-.section su99a
-.section su99b
-.section su90a
-.section su90b
-.section su0aa
-.section su0ab
-.section su0ba
-.section su0bb
-.section su0ca
-.section su0cb
-.section su0da
-.section su0db
-.section su0ea
-.section su0eb
-.section su0fa
-.section su0fb
-.section su0ga
-.section su0gb
-.section su0ha
-.section su0hb
-.section su0ia
-.section su0ib
-.section su0ja
-.section su0jb
-.section su0ka
-.section su0kb
-.section su0la
-.section su0lb
-.section su0ma
-.section su0mb
-.section su0na
-.section su0nb
-.section su0oa
-.section su0ob
-.section su0pa
-.section su0pb
-.section su0qa
-.section su0qb
-.section su0ra
-.section su0rb
-.section su0sa
-.section su0sb
-.section su0ta
-.section su0tb
-.section su0ua
-.section su0ub
-.section su0va
-.section su0vb
-.section su0wa
-.section su0wb
-.section su0xa
-.section su0xb
-.section su0ya
-.section su0yb
-.section su0za
-.section su0zb
-.section su01a
-.section su01b
-.section su02a
-.section su02b
-.section su03a
-.section su03b
-.section su04a
-.section su04b
-.section su05a
-.section su05b
-.section su06a
-.section su06b
-.section su07a
-.section su07b
-.section su08a
-.section su08b
-.section su09a
-.section su09b
-.section su00a
-.section su00b
-.section svaaa
-.section svaab
-.section svaba
-.section svabb
-.section svaca
-.section svacb
-.section svada
-.section svadb
-.section svaea
-.section svaeb
-.section svafa
-.section svafb
-.section svaga
-.section svagb
-.section svaha
-.section svahb
-.section svaia
-.section svaib
-.section svaja
-.section svajb
-.section svaka
-.section svakb
-.section svala
-.section svalb
-.section svama
-.section svamb
-.section svana
-.section svanb
-.section svaoa
-.section svaob
-.section svapa
-.section svapb
-.section svaqa
-.section svaqb
-.section svara
-.section svarb
-.section svasa
-.section svasb
-.section svata
-.section svatb
-.section svaua
-.section svaub
-.section svava
-.section svavb
-.section svawa
-.section svawb
-.section svaxa
-.section svaxb
-.section svaya
-.section svayb
-.section svaza
-.section svazb
-.section sva1a
-.section sva1b
-.section sva2a
-.section sva2b
-.section sva3a
-.section sva3b
-.section sva4a
-.section sva4b
-.section sva5a
-.section sva5b
-.section sva6a
-.section sva6b
-.section sva7a
-.section sva7b
-.section sva8a
-.section sva8b
-.section sva9a
-.section sva9b
-.section sva0a
-.section sva0b
-.section svbaa
-.section svbab
-.section svbba
-.section svbbb
-.section svbca
-.section svbcb
-.section svbda
-.section svbdb
-.section svbea
-.section svbeb
-.section svbfa
-.section svbfb
-.section svbga
-.section svbgb
-.section svbha
-.section svbhb
-.section svbia
-.section svbib
-.section svbja
-.section svbjb
-.section svbka
-.section svbkb
-.section svbla
-.section svblb
-.section svbma
-.section svbmb
-.section svbna
-.section svbnb
-.section svboa
-.section svbob
-.section svbpa
-.section svbpb
-.section svbqa
-.section svbqb
-.section svbra
-.section svbrb
-.section svbsa
-.section svbsb
-.section svbta
-.section svbtb
-.section svbua
-.section svbub
-.section svbva
-.section svbvb
-.section svbwa
-.section svbwb
-.section svbxa
-.section svbxb
-.section svbya
-.section svbyb
-.section svbza
-.section svbzb
-.section svb1a
-.section svb1b
-.section svb2a
-.section svb2b
-.section svb3a
-.section svb3b
-.section svb4a
-.section svb4b
-.section svb5a
-.section svb5b
-.section svb6a
-.section svb6b
-.section svb7a
-.section svb7b
-.section svb8a
-.section svb8b
-.section svb9a
-.section svb9b
-.section svb0a
-.section svb0b
-.section svcaa
-.section svcab
-.section svcba
-.section svcbb
-.section svcca
-.section svccb
-.section svcda
-.section svcdb
-.section svcea
-.section svceb
-.section svcfa
-.section svcfb
-.section svcga
-.section svcgb
-.section svcha
-.section svchb
-.section svcia
-.section svcib
-.section svcja
-.section svcjb
-.section svcka
-.section svckb
-.section svcla
-.section svclb
-.section svcma
-.section svcmb
-.section svcna
-.section svcnb
-.section svcoa
-.section svcob
-.section svcpa
-.section svcpb
-.section svcqa
-.section svcqb
-.section svcra
-.section svcrb
-.section svcsa
-.section svcsb
-.section svcta
-.section svctb
-.section svcua
-.section svcub
-.section svcva
-.section svcvb
-.section svcwa
-.section svcwb
-.section svcxa
-.section svcxb
-.section svcya
-.section svcyb
-.section svcza
-.section svczb
-.section svc1a
-.section svc1b
-.section svc2a
-.section svc2b
-.section svc3a
-.section svc3b
-.section svc4a
-.section svc4b
-.section svc5a
-.section svc5b
-.section svc6a
-.section svc6b
-.section svc7a
-.section svc7b
-.section svc8a
-.section svc8b
-.section svc9a
-.section svc9b
-.section svc0a
-.section svc0b
-.section svdaa
-.section svdab
-.section svdba
-.section svdbb
-.section svdca
-.section svdcb
-.section svdda
-.section svddb
-.section svdea
-.section svdeb
-.section svdfa
-.section svdfb
-.section svdga
-.section svdgb
-.section svdha
-.section svdhb
-.section svdia
-.section svdib
-.section svdja
-.section svdjb
-.section svdka
-.section svdkb
-.section svdla
-.section svdlb
-.section svdma
-.section svdmb
-.section svdna
-.section svdnb
-.section svdoa
-.section svdob
-.section svdpa
-.section svdpb
-.section svdqa
-.section svdqb
-.section svdra
-.section svdrb
-.section svdsa
-.section svdsb
-.section svdta
-.section svdtb
-.section svdua
-.section svdub
-.section svdva
-.section svdvb
-.section svdwa
-.section svdwb
-.section svdxa
-.section svdxb
-.section svdya
-.section svdyb
-.section svdza
-.section svdzb
-.section svd1a
-.section svd1b
-.section svd2a
-.section svd2b
-.section svd3a
-.section svd3b
-.section svd4a
-.section svd4b
-.section svd5a
-.section svd5b
-.section svd6a
-.section svd6b
-.section svd7a
-.section svd7b
-.section svd8a
-.section svd8b
-.section svd9a
-.section svd9b
-.section svd0a
-.section svd0b
-.section sveaa
-.section sveab
-.section sveba
-.section svebb
-.section sveca
-.section svecb
-.section sveda
-.section svedb
-.section sveea
-.section sveeb
-.section svefa
-.section svefb
-.section svega
-.section svegb
-.section sveha
-.section svehb
-.section sveia
-.section sveib
-.section sveja
-.section svejb
-.section sveka
-.section svekb
-.section svela
-.section svelb
-.section svema
-.section svemb
-.section svena
-.section svenb
-.section sveoa
-.section sveob
-.section svepa
-.section svepb
-.section sveqa
-.section sveqb
-.section svera
-.section sverb
-.section svesa
-.section svesb
-.section sveta
-.section svetb
-.section sveua
-.section sveub
-.section sveva
-.section svevb
-.section svewa
-.section svewb
-.section svexa
-.section svexb
-.section sveya
-.section sveyb
-.section sveza
-.section svezb
-.section sve1a
-.section sve1b
-.section sve2a
-.section sve2b
-.section sve3a
-.section sve3b
-.section sve4a
-.section sve4b
-.section sve5a
-.section sve5b
-.section sve6a
-.section sve6b
-.section sve7a
-.section sve7b
-.section sve8a
-.section sve8b
-.section sve9a
-.section sve9b
-.section sve0a
-.section sve0b
-.section svfaa
-.section svfab
-.section svfba
-.section svfbb
-.section svfca
-.section svfcb
-.section svfda
-.section svfdb
-.section svfea
-.section svfeb
-.section svffa
-.section svffb
-.section svfga
-.section svfgb
-.section svfha
-.section svfhb
-.section svfia
-.section svfib
-.section svfja
-.section svfjb
-.section svfka
-.section svfkb
-.section svfla
-.section svflb
-.section svfma
-.section svfmb
-.section svfna
-.section svfnb
-.section svfoa
-.section svfob
-.section svfpa
-.section svfpb
-.section svfqa
-.section svfqb
-.section svfra
-.section svfrb
-.section svfsa
-.section svfsb
-.section svfta
-.section svftb
-.section svfua
-.section svfub
-.section svfva
-.section svfvb
-.section svfwa
-.section svfwb
-.section svfxa
-.section svfxb
-.section svfya
-.section svfyb
-.section svfza
-.section svfzb
-.section svf1a
-.section svf1b
-.section svf2a
-.section svf2b
-.section svf3a
-.section svf3b
-.section svf4a
-.section svf4b
-.section svf5a
-.section svf5b
-.section svf6a
-.section svf6b
-.section svf7a
-.section svf7b
-.section svf8a
-.section svf8b
-.section svf9a
-.section svf9b
-.section svf0a
-.section svf0b
-.section svgaa
-.section svgab
-.section svgba
-.section svgbb
-.section svgca
-.section svgcb
-.section svgda
-.section svgdb
-.section svgea
-.section svgeb
-.section svgfa
-.section svgfb
-.section svgga
-.section svggb
-.section svgha
-.section svghb
-.section svgia
-.section svgib
-.section svgja
-.section svgjb
-.section svgka
-.section svgkb
-.section svgla
-.section svglb
-.section svgma
-.section svgmb
-.section svgna
-.section svgnb
-.section svgoa
-.section svgob
-.section svgpa
-.section svgpb
-.section svgqa
-.section svgqb
-.section svgra
-.section svgrb
-.section svgsa
-.section svgsb
-.section svgta
-.section svgtb
-.section svgua
-.section svgub
-.section svgva
-.section svgvb
-.section svgwa
-.section svgwb
-.section svgxa
-.section svgxb
-.section svgya
-.section svgyb
-.section svgza
-.section svgzb
-.section svg1a
-.section svg1b
-.section svg2a
-.section svg2b
-.section svg3a
-.section svg3b
-.section svg4a
-.section svg4b
-.section svg5a
-.section svg5b
-.section svg6a
-.section svg6b
-.section svg7a
-.section svg7b
-.section svg8a
-.section svg8b
-.section svg9a
-.section svg9b
-.section svg0a
-.section svg0b
-.section svhaa
-.section svhab
-.section svhba
-.section svhbb
-.section svhca
-.section svhcb
-.section svhda
-.section svhdb
-.section svhea
-.section svheb
-.section svhfa
-.section svhfb
-.section svhga
-.section svhgb
-.section svhha
-.section svhhb
-.section svhia
-.section svhib
-.section svhja
-.section svhjb
-.section svhka
-.section svhkb
-.section svhla
-.section svhlb
-.section svhma
-.section svhmb
-.section svhna
-.section svhnb
-.section svhoa
-.section svhob
-.section svhpa
-.section svhpb
-.section svhqa
-.section svhqb
-.section svhra
-.section svhrb
-.section svhsa
-.section svhsb
-.section svhta
-.section svhtb
-.section svhua
-.section svhub
-.section svhva
-.section svhvb
-.section svhwa
-.section svhwb
-.section svhxa
-.section svhxb
-.section svhya
-.section svhyb
-.section svhza
-.section svhzb
-.section svh1a
-.section svh1b
-.section svh2a
-.section svh2b
-.section svh3a
-.section svh3b
-.section svh4a
-.section svh4b
-.section svh5a
-.section svh5b
-.section svh6a
-.section svh6b
-.section svh7a
-.section svh7b
-.section svh8a
-.section svh8b
-.section svh9a
-.section svh9b
-.section svh0a
-.section svh0b
-.section sviaa
-.section sviab
-.section sviba
-.section svibb
-.section svica
-.section svicb
-.section svida
-.section svidb
-.section sviea
-.section svieb
-.section svifa
-.section svifb
-.section sviga
-.section svigb
-.section sviha
-.section svihb
-.section sviia
-.section sviib
-.section svija
-.section svijb
-.section svika
-.section svikb
-.section svila
-.section svilb
-.section svima
-.section svimb
-.section svina
-.section svinb
-.section svioa
-.section sviob
-.section svipa
-.section svipb
-.section sviqa
-.section sviqb
-.section svira
-.section svirb
-.section svisa
-.section svisb
-.section svita
-.section svitb
-.section sviua
-.section sviub
-.section sviva
-.section svivb
-.section sviwa
-.section sviwb
-.section svixa
-.section svixb
-.section sviya
-.section sviyb
-.section sviza
-.section svizb
-.section svi1a
-.section svi1b
-.section svi2a
-.section svi2b
-.section svi3a
-.section svi3b
-.section svi4a
-.section svi4b
-.section svi5a
-.section svi5b
-.section svi6a
-.section svi6b
-.section svi7a
-.section svi7b
-.section svi8a
-.section svi8b
-.section svi9a
-.section svi9b
-.section svi0a
-.section svi0b
-.section svjaa
-.section svjab
-.section svjba
-.section svjbb
-.section svjca
-.section svjcb
-.section svjda
-.section svjdb
-.section svjea
-.section svjeb
-.section svjfa
-.section svjfb
-.section svjga
-.section svjgb
-.section svjha
-.section svjhb
-.section svjia
-.section svjib
-.section svjja
-.section svjjb
-.section svjka
-.section svjkb
-.section svjla
-.section svjlb
-.section svjma
-.section svjmb
-.section svjna
-.section svjnb
-.section svjoa
-.section svjob
-.section svjpa
-.section svjpb
-.section svjqa
-.section svjqb
-.section svjra
-.section svjrb
-.section svjsa
-.section svjsb
-.section svjta
-.section svjtb
-.section svjua
-.section svjub
-.section svjva
-.section svjvb
-.section svjwa
-.section svjwb
-.section svjxa
-.section svjxb
-.section svjya
-.section svjyb
-.section svjza
-.section svjzb
-.section svj1a
-.section svj1b
-.section svj2a
-.section svj2b
-.section svj3a
-.section svj3b
-.section svj4a
-.section svj4b
-.section svj5a
-.section svj5b
-.section svj6a
-.section svj6b
-.section svj7a
-.section svj7b
-.section svj8a
-.section svj8b
-.section svj9a
-.section svj9b
-.section svj0a
-.section svj0b
-.section svkaa
-.section svkab
-.section svkba
-.section svkbb
-.section svkca
-.section svkcb
-.section svkda
-.section svkdb
-.section svkea
-.section svkeb
-.section svkfa
-.section svkfb
-.section svkga
-.section svkgb
-.section svkha
-.section svkhb
-.section svkia
-.section svkib
-.section svkja
-.section svkjb
-.section svkka
-.section svkkb
-.section svkla
-.section svklb
-.section svkma
-.section svkmb
-.section svkna
-.section svknb
-.section svkoa
-.section svkob
-.section svkpa
-.section svkpb
-.section svkqa
-.section svkqb
-.section svkra
-.section svkrb
-.section svksa
-.section svksb
-.section svkta
-.section svktb
-.section svkua
-.section svkub
-.section svkva
-.section svkvb
-.section svkwa
-.section svkwb
-.section svkxa
-.section svkxb
-.section svkya
-.section svkyb
-.section svkza
-.section svkzb
-.section svk1a
-.section svk1b
-.section svk2a
-.section svk2b
-.section svk3a
-.section svk3b
-.section svk4a
-.section svk4b
-.section svk5a
-.section svk5b
-.section svk6a
-.section svk6b
-.section svk7a
-.section svk7b
-.section svk8a
-.section svk8b
-.section svk9a
-.section svk9b
-.section svk0a
-.section svk0b
-.section svlaa
-.section svlab
-.section svlba
-.section svlbb
-.section svlca
-.section svlcb
-.section svlda
-.section svldb
-.section svlea
-.section svleb
-.section svlfa
-.section svlfb
-.section svlga
-.section svlgb
-.section svlha
-.section svlhb
-.section svlia
-.section svlib
-.section svlja
-.section svljb
-.section svlka
-.section svlkb
-.section svlla
-.section svllb
-.section svlma
-.section svlmb
-.section svlna
-.section svlnb
-.section svloa
-.section svlob
-.section svlpa
-.section svlpb
-.section svlqa
-.section svlqb
-.section svlra
-.section svlrb
-.section svlsa
-.section svlsb
-.section svlta
-.section svltb
-.section svlua
-.section svlub
-.section svlva
-.section svlvb
-.section svlwa
-.section svlwb
-.section svlxa
-.section svlxb
-.section svlya
-.section svlyb
-.section svlza
-.section svlzb
-.section svl1a
-.section svl1b
-.section svl2a
-.section svl2b
-.section svl3a
-.section svl3b
-.section svl4a
-.section svl4b
-.section svl5a
-.section svl5b
-.section svl6a
-.section svl6b
-.section svl7a
-.section svl7b
-.section svl8a
-.section svl8b
-.section svl9a
-.section svl9b
-.section svl0a
-.section svl0b
-.section svmaa
-.section svmab
-.section svmba
-.section svmbb
-.section svmca
-.section svmcb
-.section svmda
-.section svmdb
-.section svmea
-.section svmeb
-.section svmfa
-.section svmfb
-.section svmga
-.section svmgb
-.section svmha
-.section svmhb
-.section svmia
-.section svmib
-.section svmja
-.section svmjb
-.section svmka
-.section svmkb
-.section svmla
-.section svmlb
-.section svmma
-.section svmmb
-.section svmna
-.section svmnb
-.section svmoa
-.section svmob
-.section svmpa
-.section svmpb
-.section svmqa
-.section svmqb
-.section svmra
-.section svmrb
-.section svmsa
-.section svmsb
-.section svmta
-.section svmtb
-.section svmua
-.section svmub
-.section svmva
-.section svmvb
-.section svmwa
-.section svmwb
-.section svmxa
-.section svmxb
-.section svmya
-.section svmyb
-.section svmza
-.section svmzb
-.section svm1a
-.section svm1b
-.section svm2a
-.section svm2b
-.section svm3a
-.section svm3b
-.section svm4a
-.section svm4b
-.section svm5a
-.section svm5b
-.section svm6a
-.section svm6b
-.section svm7a
-.section svm7b
-.section svm8a
-.section svm8b
-.section svm9a
-.section svm9b
-.section svm0a
-.section svm0b
-.section svnaa
-.section svnab
-.section svnba
-.section svnbb
-.section svnca
-.section svncb
-.section svnda
-.section svndb
-.section svnea
-.section svneb
-.section svnfa
-.section svnfb
-.section svnga
-.section svngb
-.section svnha
-.section svnhb
-.section svnia
-.section svnib
-.section svnja
-.section svnjb
-.section svnka
-.section svnkb
-.section svnla
-.section svnlb
-.section svnma
-.section svnmb
-.section svnna
-.section svnnb
-.section svnoa
-.section svnob
-.section svnpa
-.section svnpb
-.section svnqa
-.section svnqb
-.section svnra
-.section svnrb
-.section svnsa
-.section svnsb
-.section svnta
-.section svntb
-.section svnua
-.section svnub
-.section svnva
-.section svnvb
-.section svnwa
-.section svnwb
-.section svnxa
-.section svnxb
-.section svnya
-.section svnyb
-.section svnza
-.section svnzb
-.section svn1a
-.section svn1b
-.section svn2a
-.section svn2b
-.section svn3a
-.section svn3b
-.section svn4a
-.section svn4b
-.section svn5a
-.section svn5b
-.section svn6a
-.section svn6b
-.section svn7a
-.section svn7b
-.section svn8a
-.section svn8b
-.section svn9a
-.section svn9b
-.section svn0a
-.section svn0b
-.section svoaa
-.section svoab
-.section svoba
-.section svobb
-.section svoca
-.section svocb
-.section svoda
-.section svodb
-.section svoea
-.section svoeb
-.section svofa
-.section svofb
-.section svoga
-.section svogb
-.section svoha
-.section svohb
-.section svoia
-.section svoib
-.section svoja
-.section svojb
-.section svoka
-.section svokb
-.section svola
-.section svolb
-.section svoma
-.section svomb
-.section svona
-.section svonb
-.section svooa
-.section svoob
-.section svopa
-.section svopb
-.section svoqa
-.section svoqb
-.section svora
-.section svorb
-.section svosa
-.section svosb
-.section svota
-.section svotb
-.section svoua
-.section svoub
-.section svova
-.section svovb
-.section svowa
-.section svowb
-.section svoxa
-.section svoxb
-.section svoya
-.section svoyb
-.section svoza
-.section svozb
-.section svo1a
-.section svo1b
-.section svo2a
-.section svo2b
-.section svo3a
-.section svo3b
-.section svo4a
-.section svo4b
-.section svo5a
-.section svo5b
-.section svo6a
-.section svo6b
-.section svo7a
-.section svo7b
-.section svo8a
-.section svo8b
-.section svo9a
-.section svo9b
-.section svo0a
-.section svo0b
-.section svpaa
-.section svpab
-.section svpba
-.section svpbb
-.section svpca
-.section svpcb
-.section svpda
-.section svpdb
-.section svpea
-.section svpeb
-.section svpfa
-.section svpfb
-.section svpga
-.section svpgb
-.section svpha
-.section svphb
-.section svpia
-.section svpib
-.section svpja
-.section svpjb
-.section svpka
-.section svpkb
-.section svpla
-.section svplb
-.section svpma
-.section svpmb
-.section svpna
-.section svpnb
-.section svpoa
-.section svpob
-.section svppa
-.section svppb
-.section svpqa
-.section svpqb
-.section svpra
-.section svprb
-.section svpsa
-.section svpsb
-.section svpta
-.section svptb
-.section svpua
-.section svpub
-.section svpva
-.section svpvb
-.section svpwa
-.section svpwb
-.section svpxa
-.section svpxb
-.section svpya
-.section svpyb
-.section svpza
-.section svpzb
-.section svp1a
-.section svp1b
-.section svp2a
-.section svp2b
-.section svp3a
-.section svp3b
-.section svp4a
-.section svp4b
-.section svp5a
-.section svp5b
-.section svp6a
-.section svp6b
-.section svp7a
-.section svp7b
-.section svp8a
-.section svp8b
-.section svp9a
-.section svp9b
-.section svp0a
-.section svp0b
-.section svqaa
-.section svqab
-.section svqba
-.section svqbb
-.section svqca
-.section svqcb
-.section svqda
-.section svqdb
-.section svqea
-.section svqeb
-.section svqfa
-.section svqfb
-.section svqga
-.section svqgb
-.section svqha
-.section svqhb
-.section svqia
-.section svqib
-.section svqja
-.section svqjb
-.section svqka
-.section svqkb
-.section svqla
-.section svqlb
-.section svqma
-.section svqmb
-.section svqna
-.section svqnb
-.section svqoa
-.section svqob
-.section svqpa
-.section svqpb
-.section svqqa
-.section svqqb
-.section svqra
-.section svqrb
-.section svqsa
-.section svqsb
-.section svqta
-.section svqtb
-.section svqua
-.section svqub
-.section svqva
-.section svqvb
-.section svqwa
-.section svqwb
-.section svqxa
-.section svqxb
-.section svqya
-.section svqyb
-.section svqza
-.section svqzb
-.section svq1a
-.section svq1b
-.section svq2a
-.section svq2b
-.section svq3a
-.section svq3b
-.section svq4a
-.section svq4b
-.section svq5a
-.section svq5b
-.section svq6a
-.section svq6b
-.section svq7a
-.section svq7b
-.section svq8a
-.section svq8b
-.section svq9a
-.section svq9b
-.section svq0a
-.section svq0b
-.section svraa
-.section svrab
-.section svrba
-.section svrbb
-.section svrca
-.section svrcb
-.section svrda
-.section svrdb
-.section svrea
-.section svreb
-.section svrfa
-.section svrfb
-.section svrga
-.section svrgb
-.section svrha
-.section svrhb
-.section svria
-.section svrib
-.section svrja
-.section svrjb
-.section svrka
-.section svrkb
-.section svrla
-.section svrlb
-.section svrma
-.section svrmb
-.section svrna
-.section svrnb
-.section svroa
-.section svrob
-.section svrpa
-.section svrpb
-.section svrqa
-.section svrqb
-.section svrra
-.section svrrb
-.section svrsa
-.section svrsb
-.section svrta
-.section svrtb
-.section svrua
-.section svrub
-.section svrva
-.section svrvb
-.section svrwa
-.section svrwb
-.section svrxa
-.section svrxb
-.section svrya
-.section svryb
-.section svrza
-.section svrzb
-.section svr1a
-.section svr1b
-.section svr2a
-.section svr2b
-.section svr3a
-.section svr3b
-.section svr4a
-.section svr4b
-.section svr5a
-.section svr5b
-.section svr6a
-.section svr6b
-.section svr7a
-.section svr7b
-.section svr8a
-.section svr8b
-.section svr9a
-.section svr9b
-.section svr0a
-.section svr0b
-.section svsaa
-.section svsab
-.section svsba
-.section svsbb
-.section svsca
-.section svscb
-.section svsda
-.section svsdb
-.section svsea
-.section svseb
-.section svsfa
-.section svsfb
-.section svsga
-.section svsgb
-.section svsha
-.section svshb
-.section svsia
-.section svsib
-.section svsja
-.section svsjb
-.section svska
-.section svskb
-.section svsla
-.section svslb
-.section svsma
-.section svsmb
-.section svsna
-.section svsnb
-.section svsoa
-.section svsob
-.section svspa
-.section svspb
-.section svsqa
-.section svsqb
-.section svsra
-.section svsrb
-.section svssa
-.section svssb
-.section svsta
-.section svstb
-.section svsua
-.section svsub
-.section svsva
-.section svsvb
-.section svswa
-.section svswb
-.section svsxa
-.section svsxb
-.section svsya
-.section svsyb
-.section svsza
-.section svszb
-.section svs1a
-.section svs1b
-.section svs2a
-.section svs2b
-.section svs3a
-.section svs3b
-.section svs4a
-.section svs4b
-.section svs5a
-.section svs5b
-.section svs6a
-.section svs6b
-.section svs7a
-.section svs7b
-.section svs8a
-.section svs8b
-.section svs9a
-.section svs9b
-.section svs0a
-.section svs0b
-.section svtaa
-.section svtab
-.section svtba
-.section svtbb
-.section svtca
-.section svtcb
-.section svtda
-.section svtdb
-.section svtea
-.section svteb
-.section svtfa
-.section svtfb
-.section svtga
-.section svtgb
-.section svtha
-.section svthb
-.section svtia
-.section svtib
-.section svtja
-.section svtjb
-.section svtka
-.section svtkb
-.section svtla
-.section svtlb
-.section svtma
-.section svtmb
-.section svtna
-.section svtnb
-.section svtoa
-.section svtob
-.section svtpa
-.section svtpb
-.section svtqa
-.section svtqb
-.section svtra
-.section svtrb
-.section svtsa
-.section svtsb
-.section svtta
-.section svttb
-.section svtua
-.section svtub
-.section svtva
-.section svtvb
-.section svtwa
-.section svtwb
-.section svtxa
-.section svtxb
-.section svtya
-.section svtyb
-.section svtza
-.section svtzb
-.section svt1a
-.section svt1b
-.section svt2a
-.section svt2b
-.section svt3a
-.section svt3b
-.section svt4a
-.section svt4b
-.section svt5a
-.section svt5b
-.section svt6a
-.section svt6b
-.section svt7a
-.section svt7b
-.section svt8a
-.section svt8b
-.section svt9a
-.section svt9b
-.section svt0a
-.section svt0b
-.section svuaa
-.section svuab
-.section svuba
-.section svubb
-.section svuca
-.section svucb
-.section svuda
-.section svudb
-.section svuea
-.section svueb
-.section svufa
-.section svufb
-.section svuga
-.section svugb
-.section svuha
-.section svuhb
-.section svuia
-.section svuib
-.section svuja
-.section svujb
-.section svuka
-.section svukb
-.section svula
-.section svulb
-.section svuma
-.section svumb
-.section svuna
-.section svunb
-.section svuoa
-.section svuob
-.section svupa
-.section svupb
-.section svuqa
-.section svuqb
-.section svura
-.section svurb
-.section svusa
-.section svusb
-.section svuta
-.section svutb
-.section svuua
-.section svuub
-.section svuva
-.section svuvb
-.section svuwa
-.section svuwb
-.section svuxa
-.section svuxb
-.section svuya
-.section svuyb
-.section svuza
-.section svuzb
-.section svu1a
-.section svu1b
-.section svu2a
-.section svu2b
-.section svu3a
-.section svu3b
-.section svu4a
-.section svu4b
-.section svu5a
-.section svu5b
-.section svu6a
-.section svu6b
-.section svu7a
-.section svu7b
-.section svu8a
-.section svu8b
-.section svu9a
-.section svu9b
-.section svu0a
-.section svu0b
-.section svvaa
-.section svvab
-.section svvba
-.section svvbb
-.section svvca
-.section svvcb
-.section svvda
-.section svvdb
-.section svvea
-.section svveb
-.section svvfa
-.section svvfb
-.section svvga
-.section svvgb
-.section svvha
-.section svvhb
-.section svvia
-.section svvib
-.section svvja
-.section svvjb
-.section svvka
-.section svvkb
-.section svvla
-.section svvlb
-.section svvma
-.section svvmb
-.section svvna
-.section svvnb
-.section svvoa
-.section svvob
-.section svvpa
-.section svvpb
-.section svvqa
-.section svvqb
-.section svvra
-.section svvrb
-.section svvsa
-.section svvsb
-.section svvta
-.section svvtb
-.section svvua
-.section svvub
-.section svvva
-.section svvvb
-.section svvwa
-.section svvwb
-.section svvxa
-.section svvxb
-.section svvya
-.section svvyb
-.section svvza
-.section svvzb
-.section svv1a
-.section svv1b
-.section svv2a
-.section svv2b
-.section svv3a
-.section svv3b
-.section svv4a
-.section svv4b
-.section svv5a
-.section svv5b
-.section svv6a
-.section svv6b
-.section svv7a
-.section svv7b
-.section svv8a
-.section svv8b
-.section svv9a
-.section svv9b
-.section svv0a
-.section svv0b
-.section svwaa
-.section svwab
-.section svwba
-.section svwbb
-.section svwca
-.section svwcb
-.section svwda
-.section svwdb
-.section svwea
-.section svweb
-.section svwfa
-.section svwfb
-.section svwga
-.section svwgb
-.section svwha
-.section svwhb
-.section svwia
-.section svwib
-.section svwja
-.section svwjb
-.section svwka
-.section svwkb
-.section svwla
-.section svwlb
-.section svwma
-.section svwmb
-.section svwna
-.section svwnb
-.section svwoa
-.section svwob
-.section svwpa
-.section svwpb
-.section svwqa
-.section svwqb
-.section svwra
-.section svwrb
-.section svwsa
-.section svwsb
-.section svwta
-.section svwtb
-.section svwua
-.section svwub
-.section svwva
-.section svwvb
-.section svwwa
-.section svwwb
-.section svwxa
-.section svwxb
-.section svwya
-.section svwyb
-.section svwza
-.section svwzb
-.section svw1a
-.section svw1b
-.section svw2a
-.section svw2b
-.section svw3a
-.section svw3b
-.section svw4a
-.section svw4b
-.section svw5a
-.section svw5b
-.section svw6a
-.section svw6b
-.section svw7a
-.section svw7b
-.section svw8a
-.section svw8b
-.section svw9a
-.section svw9b
-.section svw0a
-.section svw0b
-.section svxaa
-.section svxab
-.section svxba
-.section svxbb
-.section svxca
-.section svxcb
-.section svxda
-.section svxdb
-.section svxea
-.section svxeb
-.section svxfa
-.section svxfb
-.section svxga
-.section svxgb
-.section svxha
-.section svxhb
-.section svxia
-.section svxib
-.section svxja
-.section svxjb
-.section svxka
-.section svxkb
-.section svxla
-.section svxlb
-.section svxma
-.section svxmb
-.section svxna
-.section svxnb
-.section svxoa
-.section svxob
-.section svxpa
-.section svxpb
-.section svxqa
-.section svxqb
-.section svxra
-.section svxrb
-.section svxsa
-.section svxsb
-.section svxta
-.section svxtb
-.section svxua
-.section svxub
-.section svxva
-.section svxvb
-.section svxwa
-.section svxwb
-.section svxxa
-.section svxxb
-.section svxya
-.section svxyb
-.section svxza
-.section svxzb
-.section svx1a
-.section svx1b
-.section svx2a
-.section svx2b
-.section svx3a
-.section svx3b
-.section svx4a
-.section svx4b
-.section svx5a
-.section svx5b
-.section svx6a
-.section svx6b
-.section svx7a
-.section svx7b
-.section svx8a
-.section svx8b
-.section svx9a
-.section svx9b
-.section svx0a
-.section svx0b
-.section svyaa
-.section svyab
-.section svyba
-.section svybb
-.section svyca
-.section svycb
-.section svyda
-.section svydb
-.section svyea
-.section svyeb
-.section svyfa
-.section svyfb
-.section svyga
-.section svygb
-.section svyha
-.section svyhb
-.section svyia
-.section svyib
-.section svyja
-.section svyjb
-.section svyka
-.section svykb
-.section svyla
-.section svylb
-.section svyma
-.section svymb
-.section svyna
-.section svynb
-.section svyoa
-.section svyob
-.section svypa
-.section svypb
-.section svyqa
-.section svyqb
-.section svyra
-.section svyrb
-.section svysa
-.section svysb
-.section svyta
-.section svytb
-.section svyua
-.section svyub
-.section svyva
-.section svyvb
-.section svywa
-.section svywb
-.section svyxa
-.section svyxb
-.section svyya
-.section svyyb
-.section svyza
-.section svyzb
-.section svy1a
-.section svy1b
-.section svy2a
-.section svy2b
-.section svy3a
-.section svy3b
-.section svy4a
-.section svy4b
-.section svy5a
-.section svy5b
-.section svy6a
-.section svy6b
-.section svy7a
-.section svy7b
-.section svy8a
-.section svy8b
-.section svy9a
-.section svy9b
-.section svy0a
-.section svy0b
-.section svzaa
-.section svzab
-.section svzba
-.section svzbb
-.section svzca
-.section svzcb
-.section svzda
-.section svzdb
-.section svzea
-.section svzeb
-.section svzfa
-.section svzfb
-.section svzga
-.section svzgb
-.section svzha
-.section svzhb
-.section svzia
-.section svzib
-.section svzja
-.section svzjb
-.section svzka
-.section svzkb
-.section svzla
-.section svzlb
-.section svzma
-.section svzmb
-.section svzna
-.section svznb
-.section svzoa
-.section svzob
-.section svzpa
-.section svzpb
-.section svzqa
-.section svzqb
-.section svzra
-.section svzrb
-.section svzsa
-.section svzsb
-.section svzta
-.section svztb
-.section svzua
-.section svzub
-.section svzva
-.section svzvb
-.section svzwa
-.section svzwb
-.section svzxa
-.section svzxb
-.section svzya
-.section svzyb
-.section svzza
-.section svzzb
-.section svz1a
-.section svz1b
-.section svz2a
-.section svz2b
-.section svz3a
-.section svz3b
-.section svz4a
-.section svz4b
-.section svz5a
-.section svz5b
-.section svz6a
-.section svz6b
-.section svz7a
-.section svz7b
-.section svz8a
-.section svz8b
-.section svz9a
-.section svz9b
-.section svz0a
-.section svz0b
-.section sv1aa
-.section sv1ab
-.section sv1ba
-.section sv1bb
-.section sv1ca
-.section sv1cb
-.section sv1da
-.section sv1db
-.section sv1ea
-.section sv1eb
-.section sv1fa
-.section sv1fb
-.section sv1ga
-.section sv1gb
-.section sv1ha
-.section sv1hb
-.section sv1ia
-.section sv1ib
-.section sv1ja
-.section sv1jb
-.section sv1ka
-.section sv1kb
-.section sv1la
-.section sv1lb
-.section sv1ma
-.section sv1mb
-.section sv1na
-.section sv1nb
-.section sv1oa
-.section sv1ob
-.section sv1pa
-.section sv1pb
-.section sv1qa
-.section sv1qb
-.section sv1ra
-.section sv1rb
-.section sv1sa
-.section sv1sb
-.section sv1ta
-.section sv1tb
-.section sv1ua
-.section sv1ub
-.section sv1va
-.section sv1vb
-.section sv1wa
-.section sv1wb
-.section sv1xa
-.section sv1xb
-.section sv1ya
-.section sv1yb
-.section sv1za
-.section sv1zb
-.section sv11a
-.section sv11b
-.section sv12a
-.section sv12b
-.section sv13a
-.section sv13b
-.section sv14a
-.section sv14b
-.section sv15a
-.section sv15b
-.section sv16a
-.section sv16b
-.section sv17a
-.section sv17b
-.section sv18a
-.section sv18b
-.section sv19a
-.section sv19b
-.section sv10a
-.section sv10b
-.section sv2aa
-.section sv2ab
-.section sv2ba
-.section sv2bb
-.section sv2ca
-.section sv2cb
-.section sv2da
-.section sv2db
-.section sv2ea
-.section sv2eb
-.section sv2fa
-.section sv2fb
-.section sv2ga
-.section sv2gb
-.section sv2ha
-.section sv2hb
-.section sv2ia
-.section sv2ib
-.section sv2ja
-.section sv2jb
-.section sv2ka
-.section sv2kb
-.section sv2la
-.section sv2lb
-.section sv2ma
-.section sv2mb
-.section sv2na
-.section sv2nb
-.section sv2oa
-.section sv2ob
-.section sv2pa
-.section sv2pb
-.section sv2qa
-.section sv2qb
-.section sv2ra
-.section sv2rb
-.section sv2sa
-.section sv2sb
-.section sv2ta
-.section sv2tb
-.section sv2ua
-.section sv2ub
-.section sv2va
-.section sv2vb
-.section sv2wa
-.section sv2wb
-.section sv2xa
-.section sv2xb
-.section sv2ya
-.section sv2yb
-.section sv2za
-.section sv2zb
-.section sv21a
-.section sv21b
-.section sv22a
-.section sv22b
-.section sv23a
-.section sv23b
-.section sv24a
-.section sv24b
-.section sv25a
-.section sv25b
-.section sv26a
-.section sv26b
-.section sv27a
-.section sv27b
-.section sv28a
-.section sv28b
-.section sv29a
-.section sv29b
-.section sv20a
-.section sv20b
-.section sv3aa
-.section sv3ab
-.section sv3ba
-.section sv3bb
-.section sv3ca
-.section sv3cb
-.section sv3da
-.section sv3db
-.section sv3ea
-.section sv3eb
-.section sv3fa
-.section sv3fb
-.section sv3ga
-.section sv3gb
-.section sv3ha
-.section sv3hb
-.section sv3ia
-.section sv3ib
-.section sv3ja
-.section sv3jb
-.section sv3ka
-.section sv3kb
-.section sv3la
-.section sv3lb
-.section sv3ma
-.section sv3mb
-.section sv3na
-.section sv3nb
-.section sv3oa
-.section sv3ob
-.section sv3pa
-.section sv3pb
-.section sv3qa
-.section sv3qb
-.section sv3ra
-.section sv3rb
-.section sv3sa
-.section sv3sb
-.section sv3ta
-.section sv3tb
-.section sv3ua
-.section sv3ub
-.section sv3va
-.section sv3vb
-.section sv3wa
-.section sv3wb
-.section sv3xa
-.section sv3xb
-.section sv3ya
-.section sv3yb
-.section sv3za
-.section sv3zb
-.section sv31a
-.section sv31b
-.section sv32a
-.section sv32b
-.section sv33a
-.section sv33b
-.section sv34a
-.section sv34b
-.section sv35a
-.section sv35b
-.section sv36a
-.section sv36b
-.section sv37a
-.section sv37b
-.section sv38a
-.section sv38b
-.section sv39a
-.section sv39b
-.section sv30a
-.section sv30b
-.section sv4aa
-.section sv4ab
-.section sv4ba
-.section sv4bb
-.section sv4ca
-.section sv4cb
-.section sv4da
-.section sv4db
-.section sv4ea
-.section sv4eb
-.section sv4fa
-.section sv4fb
-.section sv4ga
-.section sv4gb
-.section sv4ha
-.section sv4hb
-.section sv4ia
-.section sv4ib
-.section sv4ja
-.section sv4jb
-.section sv4ka
-.section sv4kb
-.section sv4la
-.section sv4lb
-.section sv4ma
-.section sv4mb
-.section sv4na
-.section sv4nb
-.section sv4oa
-.section sv4ob
-.section sv4pa
-.section sv4pb
-.section sv4qa
-.section sv4qb
-.section sv4ra
-.section sv4rb
-.section sv4sa
-.section sv4sb
-.section sv4ta
-.section sv4tb
-.section sv4ua
-.section sv4ub
-.section sv4va
-.section sv4vb
-.section sv4wa
-.section sv4wb
-.section sv4xa
-.section sv4xb
-.section sv4ya
-.section sv4yb
-.section sv4za
-.section sv4zb
-.section sv41a
-.section sv41b
-.section sv42a
-.section sv42b
-.section sv43a
-.section sv43b
-.section sv44a
-.section sv44b
-.section sv45a
-.section sv45b
-.section sv46a
-.section sv46b
-.section sv47a
-.section sv47b
-.section sv48a
-.section sv48b
-.section sv49a
-.section sv49b
-.section sv40a
-.section sv40b
-.section sv5aa
-.section sv5ab
-.section sv5ba
-.section sv5bb
-.section sv5ca
-.section sv5cb
-.section sv5da
-.section sv5db
-.section sv5ea
-.section sv5eb
-.section sv5fa
-.section sv5fb
-.section sv5ga
-.section sv5gb
-.section sv5ha
-.section sv5hb
-.section sv5ia
-.section sv5ib
-.section sv5ja
-.section sv5jb
-.section sv5ka
-.section sv5kb
-.section sv5la
-.section sv5lb
-.section sv5ma
-.section sv5mb
-.section sv5na
-.section sv5nb
-.section sv5oa
-.section sv5ob
-.section sv5pa
-.section sv5pb
-.section sv5qa
-.section sv5qb
-.section sv5ra
-.section sv5rb
-.section sv5sa
-.section sv5sb
-.section sv5ta
-.section sv5tb
-.section sv5ua
-.section sv5ub
-.section sv5va
-.section sv5vb
-.section sv5wa
-.section sv5wb
-.section sv5xa
-.section sv5xb
-.section sv5ya
-.section sv5yb
-.section sv5za
-.section sv5zb
-.section sv51a
-.section sv51b
-.section sv52a
-.section sv52b
-.section sv53a
-.section sv53b
-.section sv54a
-.section sv54b
-.section sv55a
-.section sv55b
-.section sv56a
-.section sv56b
-.section sv57a
-.section sv57b
-.section sv58a
-.section sv58b
-.section sv59a
-.section sv59b
-.section sv50a
-.section sv50b
-.section sv6aa
-.section sv6ab
-.section sv6ba
-.section sv6bb
-.section sv6ca
-.section sv6cb
-.section sv6da
-.section sv6db
-.section sv6ea
-.section sv6eb
-.section sv6fa
-.section sv6fb
-.section sv6ga
-.section sv6gb
-.section sv6ha
-.section sv6hb
-.section sv6ia
-.section sv6ib
-.section sv6ja
-.section sv6jb
-.section sv6ka
-.section sv6kb
-.section sv6la
-.section sv6lb
-.section sv6ma
-.section sv6mb
-.section sv6na
-.section sv6nb
-.section sv6oa
-.section sv6ob
-.section sv6pa
-.section sv6pb
-.section sv6qa
-.section sv6qb
-.section sv6ra
-.section sv6rb
-.section sv6sa
-.section sv6sb
-.section sv6ta
-.section sv6tb
-.section sv6ua
-.section sv6ub
-.section sv6va
-.section sv6vb
-.section sv6wa
-.section sv6wb
-.section sv6xa
-.section sv6xb
-.section sv6ya
-.section sv6yb
-.section sv6za
-.section sv6zb
-.section sv61a
-.section sv61b
-.section sv62a
-.section sv62b
-.section sv63a
-.section sv63b
-.section sv64a
-.section sv64b
-.section sv65a
-.section sv65b
-.section sv66a
-.section sv66b
-.section sv67a
-.section sv67b
-.section sv68a
-.section sv68b
-.section sv69a
-.section sv69b
-.section sv60a
-.section sv60b
-.section sv7aa
-.section sv7ab
-.section sv7ba
-.section sv7bb
-.section sv7ca
-.section sv7cb
-.section sv7da
-.section sv7db
-.section sv7ea
-.section sv7eb
-.section sv7fa
-.section sv7fb
-.section sv7ga
-.section sv7gb
-.section sv7ha
-.section sv7hb
-.section sv7ia
-.section sv7ib
-.section sv7ja
-.section sv7jb
-.section sv7ka
-.section sv7kb
-.section sv7la
-.section sv7lb
-.section sv7ma
-.section sv7mb
-.section sv7na
-.section sv7nb
-.section sv7oa
-.section sv7ob
-.section sv7pa
-.section sv7pb
-.section sv7qa
-.section sv7qb
-.section sv7ra
-.section sv7rb
-.section sv7sa
-.section sv7sb
-.section sv7ta
-.section sv7tb
-.section sv7ua
-.section sv7ub
-.section sv7va
-.section sv7vb
-.section sv7wa
-.section sv7wb
-.section sv7xa
-.section sv7xb
-.section sv7ya
-.section sv7yb
-.section sv7za
-.section sv7zb
-.section sv71a
-.section sv71b
-.section sv72a
-.section sv72b
-.section sv73a
-.section sv73b
-.section sv74a
-.section sv74b
-.section sv75a
-.section sv75b
-.section sv76a
-.section sv76b
-.section sv77a
-.section sv77b
-.section sv78a
-.section sv78b
-.section sv79a
-.section sv79b
-.section sv70a
-.section sv70b
-.section sv8aa
-.section sv8ab
-.section sv8ba
-.section sv8bb
-.section sv8ca
-.section sv8cb
-.section sv8da
-.section sv8db
-.section sv8ea
-.section sv8eb
-.section sv8fa
-.section sv8fb
-.section sv8ga
-.section sv8gb
-.section sv8ha
-.section sv8hb
-.section sv8ia
-.section sv8ib
-.section sv8ja
-.section sv8jb
-.section sv8ka
-.section sv8kb
-.section sv8la
-.section sv8lb
-.section sv8ma
-.section sv8mb
-.section sv8na
-.section sv8nb
-.section sv8oa
-.section sv8ob
-.section sv8pa
-.section sv8pb
-.section sv8qa
-.section sv8qb
-.section sv8ra
-.section sv8rb
-.section sv8sa
-.section sv8sb
-.section sv8ta
-.section sv8tb
-.section sv8ua
-.section sv8ub
-.section sv8va
-.section sv8vb
-.section sv8wa
-.section sv8wb
-.section sv8xa
-.section sv8xb
-.section sv8ya
-.section sv8yb
-.section sv8za
-.section sv8zb
-.section sv81a
-.section sv81b
-.section sv82a
-.section sv82b
-.section sv83a
-.section sv83b
-.section sv84a
-.section sv84b
-.section sv85a
-.section sv85b
-.section sv86a
-.section sv86b
-.section sv87a
-.section sv87b
-.section sv88a
-.section sv88b
-.section sv89a
-.section sv89b
-.section sv80a
-.section sv80b
-.section sv9aa
-.section sv9ab
-.section sv9ba
-.section sv9bb
-.section sv9ca
-.section sv9cb
-.section sv9da
-.section sv9db
-.section sv9ea
-.section sv9eb
-.section sv9fa
-.section sv9fb
-.section sv9ga
-.section sv9gb
-.section sv9ha
-.section sv9hb
-.section sv9ia
-.section sv9ib
-.section sv9ja
-.section sv9jb
-.section sv9ka
-.section sv9kb
-.section sv9la
-.section sv9lb
-.section sv9ma
-.section sv9mb
-.section sv9na
-.section sv9nb
-.section sv9oa
-.section sv9ob
-.section sv9pa
-.section sv9pb
-.section sv9qa
-.section sv9qb
-.section sv9ra
-.section sv9rb
-.section sv9sa
-.section sv9sb
-.section sv9ta
-.section sv9tb
-.section sv9ua
-.section sv9ub
-.section sv9va
-.section sv9vb
-.section sv9wa
-.section sv9wb
-.section sv9xa
-.section sv9xb
-.section sv9ya
-.section sv9yb
-.section sv9za
-.section sv9zb
-.section sv91a
-.section sv91b
-.section sv92a
-.section sv92b
-.section sv93a
-.section sv93b
-.section sv94a
-.section sv94b
-.section sv95a
-.section sv95b
-.section sv96a
-.section sv96b
-.section sv97a
-.section sv97b
-.section sv98a
-.section sv98b
-.section sv99a
-.section sv99b
-.section sv90a
-.section sv90b
-.section sv0aa
-.section sv0ab
-.section sv0ba
-.section sv0bb
-.section sv0ca
-.section sv0cb
-.section sv0da
-.section sv0db
-.section sv0ea
-.section sv0eb
-.section sv0fa
-.section sv0fb
-.section sv0ga
-.section sv0gb
-.section sv0ha
-.section sv0hb
-.section sv0ia
-.section sv0ib
-.section sv0ja
-.section sv0jb
-.section sv0ka
-.section sv0kb
-.section sv0la
-.section sv0lb
-.section sv0ma
-.section sv0mb
-.section sv0na
-.section sv0nb
-.section sv0oa
-.section sv0ob
-.section sv0pa
-.section sv0pb
-.section sv0qa
-.section sv0qb
-.section sv0ra
-.section sv0rb
-.section sv0sa
-.section sv0sb
-.section sv0ta
-.section sv0tb
-.section sv0ua
-.section sv0ub
-.section sv0va
-.section sv0vb
-.section sv0wa
-.section sv0wb
-.section sv0xa
-.section sv0xb
-.section sv0ya
-.section sv0yb
-.section sv0za
-.section sv0zb
-.section sv01a
-.section sv01b
-.section sv02a
-.section sv02b
-.section sv03a
-.section sv03b
-.section sv04a
-.section sv04b
-.section sv05a
-.section sv05b
-.section sv06a
-.section sv06b
-.section sv07a
-.section sv07b
-.section sv08a
-.section sv08b
-.section sv09a
-.section sv09b
-.section sv00a
-.section sv00b
-.section swaaa
-.section swaab
-.section swaba
-.section swabb
-.section swaca
-.section swacb
-.section swada
-.section swadb
-.section swaea
-.section swaeb
-.section swafa
-.section swafb
-.section swaga
-.section swagb
-.section swaha
-.section swahb
-.section swaia
-.section swaib
-.section swaja
-.section swajb
-.section swaka
-.section swakb
-.section swala
-.section swalb
-.section swama
-.section swamb
-.section swana
-.section swanb
-.section swaoa
-.section swaob
-.section swapa
-.section swapb
-.section swaqa
-.section swaqb
-.section swara
-.section swarb
-.section swasa
-.section swasb
-.section swata
-.section swatb
-.section swaua
-.section swaub
-.section swava
-.section swavb
-.section swawa
-.section swawb
-.section swaxa
-.section swaxb
-.section swaya
-.section swayb
-.section swaza
-.section swazb
-.section swa1a
-.section swa1b
-.section swa2a
-.section swa2b
-.section swa3a
-.section swa3b
-.section swa4a
-.section swa4b
-.section swa5a
-.section swa5b
-.section swa6a
-.section swa6b
-.section swa7a
-.section swa7b
-.section swa8a
-.section swa8b
-.section swa9a
-.section swa9b
-.section swa0a
-.section swa0b
-.section swbaa
-.section swbab
-.section swbba
-.section swbbb
-.section swbca
-.section swbcb
-.section swbda
-.section swbdb
-.section swbea
-.section swbeb
-.section swbfa
-.section swbfb
-.section swbga
-.section swbgb
-.section swbha
-.section swbhb
-.section swbia
-.section swbib
-.section swbja
-.section swbjb
-.section swbka
-.section swbkb
-.section swbla
-.section swblb
-.section swbma
-.section swbmb
-.section swbna
-.section swbnb
-.section swboa
-.section swbob
-.section swbpa
-.section swbpb
-.section swbqa
-.section swbqb
-.section swbra
-.section swbrb
-.section swbsa
-.section swbsb
-.section swbta
-.section swbtb
-.section swbua
-.section swbub
-.section swbva
-.section swbvb
-.section swbwa
-.section swbwb
-.section swbxa
-.section swbxb
-.section swbya
-.section swbyb
-.section swbza
-.section swbzb
-.section swb1a
-.section swb1b
-.section swb2a
-.section swb2b
-.section swb3a
-.section swb3b
-.section swb4a
-.section swb4b
-.section swb5a
-.section swb5b
-.section swb6a
-.section swb6b
-.section swb7a
-.section swb7b
-.section swb8a
-.section swb8b
-.section swb9a
-.section swb9b
-.section swb0a
-.section swb0b
-.section swcaa
-.section swcab
-.section swcba
-.section swcbb
-.section swcca
-.section swccb
-.section swcda
-.section swcdb
-.section swcea
-.section swceb
-.section swcfa
-.section swcfb
-.section swcga
-.section swcgb
-.section swcha
-.section swchb
-.section swcia
-.section swcib
-.section swcja
-.section swcjb
-.section swcka
-.section swckb
-.section swcla
-.section swclb
-.section swcma
-.section swcmb
-.section swcna
-.section swcnb
-.section swcoa
-.section swcob
-.section swcpa
-.section swcpb
-.section swcqa
-.section swcqb
-.section swcra
-.section swcrb
-.section swcsa
-.section swcsb
-.section swcta
-.section swctb
-.section swcua
-.section swcub
-.section swcva
-.section swcvb
-.section swcwa
-.section swcwb
-.section swcxa
-.section swcxb
-.section swcya
-.section swcyb
-.section swcza
-.section swczb
-.section swc1a
-.section swc1b
-.section swc2a
-.section swc2b
-.section swc3a
-.section swc3b
-.section swc4a
-.section swc4b
-.section swc5a
-.section swc5b
-.section swc6a
-.section swc6b
-.section swc7a
-.section swc7b
-.section swc8a
-.section swc8b
-.section swc9a
-.section swc9b
-.section swc0a
-.section swc0b
-.section swdaa
-.section swdab
-.section swdba
-.section swdbb
-.section swdca
-.section swdcb
-.section swdda
-.section swddb
-.section swdea
-.section swdeb
-.section swdfa
-.section swdfb
-.section swdga
-.section swdgb
-.section swdha
-.section swdhb
-.section swdia
-.section swdib
-.section swdja
-.section swdjb
-.section swdka
-.section swdkb
-.section swdla
-.section swdlb
-.section swdma
-.section swdmb
-.section swdna
-.section swdnb
-.section swdoa
-.section swdob
-.section swdpa
-.section swdpb
-.section swdqa
-.section swdqb
-.section swdra
-.section swdrb
-.section swdsa
-.section swdsb
-.section swdta
-.section swdtb
-.section swdua
-.section swdub
-.section swdva
-.section swdvb
-.section swdwa
-.section swdwb
-.section swdxa
-.section swdxb
-.section swdya
-.section swdyb
-.section swdza
-.section swdzb
-.section swd1a
-.section swd1b
-.section swd2a
-.section swd2b
-.section swd3a
-.section swd3b
-.section swd4a
-.section swd4b
-.section swd5a
-.section swd5b
-.section swd6a
-.section swd6b
-.section swd7a
-.section swd7b
-.section swd8a
-.section swd8b
-.section swd9a
-.section swd9b
-.section swd0a
-.section swd0b
-.section sweaa
-.section sweab
-.section sweba
-.section swebb
-.section sweca
-.section swecb
-.section sweda
-.section swedb
-.section sweea
-.section sweeb
-.section swefa
-.section swefb
-.section swega
-.section swegb
-.section sweha
-.section swehb
-.section sweia
-.section sweib
-.section sweja
-.section swejb
-.section sweka
-.section swekb
-.section swela
-.section swelb
-.section swema
-.section swemb
-.section swena
-.section swenb
-.section sweoa
-.section sweob
-.section swepa
-.section swepb
-.section sweqa
-.section sweqb
-.section swera
-.section swerb
-.section swesa
-.section swesb
-.section sweta
-.section swetb
-.section sweua
-.section sweub
-.section sweva
-.section swevb
-.section swewa
-.section swewb
-.section swexa
-.section swexb
-.section sweya
-.section sweyb
-.section sweza
-.section swezb
-.section swe1a
-.section swe1b
-.section swe2a
-.section swe2b
-.section swe3a
-.section swe3b
-.section swe4a
-.section swe4b
-.section swe5a
-.section swe5b
-.section swe6a
-.section swe6b
-.section swe7a
-.section swe7b
-.section swe8a
-.section swe8b
-.section swe9a
-.section swe9b
-.section swe0a
-.section swe0b
-.section swfaa
-.section swfab
-.section swfba
-.section swfbb
-.section swfca
-.section swfcb
-.section swfda
-.section swfdb
-.section swfea
-.section swfeb
-.section swffa
-.section swffb
-.section swfga
-.section swfgb
-.section swfha
-.section swfhb
-.section swfia
-.section swfib
-.section swfja
-.section swfjb
-.section swfka
-.section swfkb
-.section swfla
-.section swflb
-.section swfma
-.section swfmb
-.section swfna
-.section swfnb
-.section swfoa
-.section swfob
-.section swfpa
-.section swfpb
-.section swfqa
-.section swfqb
-.section swfra
-.section swfrb
-.section swfsa
-.section swfsb
-.section swfta
-.section swftb
-.section swfua
-.section swfub
-.section swfva
-.section swfvb
-.section swfwa
-.section swfwb
-.section swfxa
-.section swfxb
-.section swfya
-.section swfyb
-.section swfza
-.section swfzb
-.section swf1a
-.section swf1b
-.section swf2a
-.section swf2b
-.section swf3a
-.section swf3b
-.section swf4a
-.section swf4b
-.section swf5a
-.section swf5b
-.section swf6a
-.section swf6b
-.section swf7a
-.section swf7b
-.section swf8a
-.section swf8b
-.section swf9a
-.section swf9b
-.section swf0a
-.section swf0b
-.section swgaa
-.section swgab
-.section swgba
-.section swgbb
-.section swgca
-.section swgcb
-.section swgda
-.section swgdb
-.section swgea
-.section swgeb
-.section swgfa
-.section swgfb
-.section swgga
-.section swggb
-.section swgha
-.section swghb
-.section swgia
-.section swgib
-.section swgja
-.section swgjb
-.section swgka
-.section swgkb
-.section swgla
-.section swglb
-.section swgma
-.section swgmb
-.section swgna
-.section swgnb
-.section swgoa
-.section swgob
-.section swgpa
-.section swgpb
-.section swgqa
-.section swgqb
-.section swgra
-.section swgrb
-.section swgsa
-.section swgsb
-.section swgta
-.section swgtb
-.section swgua
-.section swgub
-.section swgva
-.section swgvb
-.section swgwa
-.section swgwb
-.section swgxa
-.section swgxb
-.section swgya
-.section swgyb
-.section swgza
-.section swgzb
-.section swg1a
-.section swg1b
-.section swg2a
-.section swg2b
-.section swg3a
-.section swg3b
-.section swg4a
-.section swg4b
-.section swg5a
-.section swg5b
-.section swg6a
-.section swg6b
-.section swg7a
-.section swg7b
-.section swg8a
-.section swg8b
-.section swg9a
-.section swg9b
-.section swg0a
-.section swg0b
-.section swhaa
-.section swhab
-.section swhba
-.section swhbb
-.section swhca
-.section swhcb
-.section swhda
-.section swhdb
-.section swhea
-.section swheb
-.section swhfa
-.section swhfb
-.section swhga
-.section swhgb
-.section swhha
-.section swhhb
-.section swhia
-.section swhib
-.section swhja
-.section swhjb
-.section swhka
-.section swhkb
-.section swhla
-.section swhlb
-.section swhma
-.section swhmb
-.section swhna
-.section swhnb
-.section swhoa
-.section swhob
-.section swhpa
-.section swhpb
-.section swhqa
-.section swhqb
-.section swhra
-.section swhrb
-.section swhsa
-.section swhsb
-.section swhta
-.section swhtb
-.section swhua
-.section swhub
-.section swhva
-.section swhvb
-.section swhwa
-.section swhwb
-.section swhxa
-.section swhxb
-.section swhya
-.section swhyb
-.section swhza
-.section swhzb
-.section swh1a
-.section swh1b
-.section swh2a
-.section swh2b
-.section swh3a
-.section swh3b
-.section swh4a
-.section swh4b
-.section swh5a
-.section swh5b
-.section swh6a
-.section swh6b
-.section swh7a
-.section swh7b
-.section swh8a
-.section swh8b
-.section swh9a
-.section swh9b
-.section swh0a
-.section swh0b
-.section swiaa
-.section swiab
-.section swiba
-.section swibb
-.section swica
-.section swicb
-.section swida
-.section swidb
-.section swiea
-.section swieb
-.section swifa
-.section swifb
-.section swiga
-.section swigb
-.section swiha
-.section swihb
-.section swiia
-.section swiib
-.section swija
-.section swijb
-.section swika
-.section swikb
-.section swila
-.section swilb
-.section swima
-.section swimb
-.section swina
-.section swinb
-.section swioa
-.section swiob
-.section swipa
-.section swipb
-.section swiqa
-.section swiqb
-.section swira
-.section swirb
-.section swisa
-.section swisb
-.section swita
-.section switb
-.section swiua
-.section swiub
-.section swiva
-.section swivb
-.section swiwa
-.section swiwb
-.section swixa
-.section swixb
-.section swiya
-.section swiyb
-.section swiza
-.section swizb
-.section swi1a
-.section swi1b
-.section swi2a
-.section swi2b
-.section swi3a
-.section swi3b
-.section swi4a
-.section swi4b
-.section swi5a
-.section swi5b
-.section swi6a
-.section swi6b
-.section swi7a
-.section swi7b
-.section swi8a
-.section swi8b
-.section swi9a
-.section swi9b
-.section swi0a
-.section swi0b
-.section swjaa
-.section swjab
-.section swjba
-.section swjbb
-.section swjca
-.section swjcb
-.section swjda
-.section swjdb
-.section swjea
-.section swjeb
-.section swjfa
-.section swjfb
-.section swjga
-.section swjgb
-.section swjha
-.section swjhb
-.section swjia
-.section swjib
-.section swjja
-.section swjjb
-.section swjka
-.section swjkb
-.section swjla
-.section swjlb
-.section swjma
-.section swjmb
-.section swjna
-.section swjnb
-.section swjoa
-.section swjob
-.section swjpa
-.section swjpb
-.section swjqa
-.section swjqb
-.section swjra
-.section swjrb
-.section swjsa
-.section swjsb
-.section swjta
-.section swjtb
-.section swjua
-.section swjub
-.section swjva
-.section swjvb
-.section swjwa
-.section swjwb
-.section swjxa
-.section swjxb
-.section swjya
-.section swjyb
-.section swjza
-.section swjzb
-.section swj1a
-.section swj1b
-.section swj2a
-.section swj2b
-.section swj3a
-.section swj3b
-.section swj4a
-.section swj4b
-.section swj5a
-.section swj5b
-.section swj6a
-.section swj6b
-.section swj7a
-.section swj7b
-.section swj8a
-.section swj8b
-.section swj9a
-.section swj9b
-.section swj0a
-.section swj0b
-.section swkaa
-.section swkab
-.section swkba
-.section swkbb
-.section swkca
-.section swkcb
-.section swkda
-.section swkdb
-.section swkea
-.section swkeb
-.section swkfa
-.section swkfb
-.section swkga
-.section swkgb
-.section swkha
-.section swkhb
-.section swkia
-.section swkib
-.section swkja
-.section swkjb
-.section swkka
-.section swkkb
-.section swkla
-.section swklb
-.section swkma
-.section swkmb
-.section swkna
-.section swknb
-.section swkoa
-.section swkob
-.section swkpa
-.section swkpb
-.section swkqa
-.section swkqb
-.section swkra
-.section swkrb
-.section swksa
-.section swksb
-.section swkta
-.section swktb
-.section swkua
-.section swkub
-.section swkva
-.section swkvb
-.section swkwa
-.section swkwb
-.section swkxa
-.section swkxb
-.section swkya
-.section swkyb
-.section swkza
-.section swkzb
-.section swk1a
-.section swk1b
-.section swk2a
-.section swk2b
-.section swk3a
-.section swk3b
-.section swk4a
-.section swk4b
-.section swk5a
-.section swk5b
-.section swk6a
-.section swk6b
-.section swk7a
-.section swk7b
-.section swk8a
-.section swk8b
-.section swk9a
-.section swk9b
-.section swk0a
-.section swk0b
-.section swlaa
-.section swlab
-.section swlba
-.section swlbb
-.section swlca
-.section swlcb
-.section swlda
-.section swldb
-.section swlea
-.section swleb
-.section swlfa
-.section swlfb
-.section swlga
-.section swlgb
-.section swlha
-.section swlhb
-.section swlia
-.section swlib
-.section swlja
-.section swljb
-.section swlka
-.section swlkb
-.section swlla
-.section swllb
-.section swlma
-.section swlmb
-.section swlna
-.section swlnb
-.section swloa
-.section swlob
-.section swlpa
-.section swlpb
-.section swlqa
-.section swlqb
-.section swlra
-.section swlrb
-.section swlsa
-.section swlsb
-.section swlta
-.section swltb
-.section swlua
-.section swlub
-.section swlva
-.section swlvb
-.section swlwa
-.section swlwb
-.section swlxa
-.section swlxb
-.section swlya
-.section swlyb
-.section swlza
-.section swlzb
-.section swl1a
-.section swl1b
-.section swl2a
-.section swl2b
-.section swl3a
-.section swl3b
-.section swl4a
-.section swl4b
-.section swl5a
-.section swl5b
-.section swl6a
-.section swl6b
-.section swl7a
-.section swl7b
-.section swl8a
-.section swl8b
-.section swl9a
-.section swl9b
-.section swl0a
-.section swl0b
-.section swmaa
-.section swmab
-.section swmba
-.section swmbb
-.section swmca
-.section swmcb
-.section swmda
-.section swmdb
-.section swmea
-.section swmeb
-.section swmfa
-.section swmfb
-.section swmga
-.section swmgb
-.section swmha
-.section swmhb
-.section swmia
-.section swmib
-.section swmja
-.section swmjb
-.section swmka
-.section swmkb
-.section swmla
-.section swmlb
-.section swmma
-.section swmmb
-.section swmna
-.section swmnb
-.section swmoa
-.section swmob
-.section swmpa
-.section swmpb
-.section swmqa
-.section swmqb
-.section swmra
-.section swmrb
-.section swmsa
-.section swmsb
-.section swmta
-.section swmtb
-.section swmua
-.section swmub
-.section swmva
-.section swmvb
-.section swmwa
-.section swmwb
-.section swmxa
-.section swmxb
-.section swmya
-.section swmyb
-.section swmza
-.section swmzb
-.section swm1a
-.section swm1b
-.section swm2a
-.section swm2b
-.section swm3a
-.section swm3b
-.section swm4a
-.section swm4b
-.section swm5a
-.section swm5b
-.section swm6a
-.section swm6b
-.section swm7a
-.section swm7b
-.section swm8a
-.section swm8b
-.section swm9a
-.section swm9b
-.section swm0a
-.section swm0b
-.section swnaa
-.section swnab
-.section swnba
-.section swnbb
-.section swnca
-.section swncb
-.section swnda
-.section swndb
-.section swnea
-.section swneb
-.section swnfa
-.section swnfb
-.section swnga
-.section swngb
-.section swnha
-.section swnhb
-.section swnia
-.section swnib
-.section swnja
-.section swnjb
-.section swnka
-.section swnkb
-.section swnla
-.section swnlb
-.section swnma
-.section swnmb
-.section swnna
-.section swnnb
-.section swnoa
-.section swnob
-.section swnpa
-.section swnpb
-.section swnqa
-.section swnqb
-.section swnra
-.section swnrb
-.section swnsa
-.section swnsb
-.section swnta
-.section swntb
-.section swnua
-.section swnub
-.section swnva
-.section swnvb
-.section swnwa
-.section swnwb
-.section swnxa
-.section swnxb
-.section swnya
-.section swnyb
-.section swnza
-.section swnzb
-.section swn1a
-.section swn1b
-.section swn2a
-.section swn2b
-.section swn3a
-.section swn3b
-.section swn4a
-.section swn4b
-.section swn5a
-.section swn5b
-.section swn6a
-.section swn6b
-.section swn7a
-.section swn7b
-.section swn8a
-.section swn8b
-.section swn9a
-.section swn9b
-.section swn0a
-.section swn0b
-.section swoaa
-.section swoab
-.section swoba
-.section swobb
-.section swoca
-.section swocb
-.section swoda
-.section swodb
-.section swoea
-.section swoeb
-.section swofa
-.section swofb
-.section swoga
-.section swogb
-.section swoha
-.section swohb
-.section swoia
-.section swoib
-.section swoja
-.section swojb
-.section swoka
-.section swokb
-.section swola
-.section swolb
-.section swoma
-.section swomb
-.section swona
-.section swonb
-.section swooa
-.section swoob
-.section swopa
-.section swopb
-.section swoqa
-.section swoqb
-.section swora
-.section sworb
-.section swosa
-.section swosb
-.section swota
-.section swotb
-.section swoua
-.section swoub
-.section swova
-.section swovb
-.section swowa
-.section swowb
-.section swoxa
-.section swoxb
-.section swoya
-.section swoyb
-.section swoza
-.section swozb
-.section swo1a
-.section swo1b
-.section swo2a
-.section swo2b
-.section swo3a
-.section swo3b
-.section swo4a
-.section swo4b
-.section swo5a
-.section swo5b
-.section swo6a
-.section swo6b
-.section swo7a
-.section swo7b
-.section swo8a
-.section swo8b
-.section swo9a
-.section swo9b
-.section swo0a
-.section swo0b
-.section swpaa
-.section swpab
-.section swpba
-.section swpbb
-.section swpca
-.section swpcb
-.section swpda
-.section swpdb
-.section swpea
-.section swpeb
-.section swpfa
-.section swpfb
-.section swpga
-.section swpgb
-.section swpha
-.section swphb
-.section swpia
-.section swpib
-.section swpja
-.section swpjb
-.section swpka
-.section swpkb
-.section swpla
-.section swplb
-.section swpma
-.section swpmb
-.section swpna
-.section swpnb
-.section swpoa
-.section swpob
-.section swppa
-.section swppb
-.section swpqa
-.section swpqb
-.section swpra
-.section swprb
-.section swpsa
-.section swpsb
-.section swpta
-.section swptb
-.section swpua
-.section swpub
-.section swpva
-.section swpvb
-.section swpwa
-.section swpwb
-.section swpxa
-.section swpxb
-.section swpya
-.section swpyb
-.section swpza
-.section swpzb
-.section swp1a
-.section swp1b
-.section swp2a
-.section swp2b
-.section swp3a
-.section swp3b
-.section swp4a
-.section swp4b
-.section swp5a
-.section swp5b
-.section swp6a
-.section swp6b
-.section swp7a
-.section swp7b
-.section swp8a
-.section swp8b
-.section swp9a
-.section swp9b
-.section swp0a
-.section swp0b
-.section swqaa
-.section swqab
-.section swqba
-.section swqbb
-.section swqca
-.section swqcb
-.section swqda
-.section swqdb
-.section swqea
-.section swqeb
-.section swqfa
-.section swqfb
-.section swqga
-.section swqgb
-.section swqha
-.section swqhb
-.section swqia
-.section swqib
-.section swqja
-.section swqjb
-.section swqka
-.section swqkb
-.section swqla
-.section swqlb
-.section swqma
-.section swqmb
-.section swqna
-.section swqnb
-.section swqoa
-.section swqob
-.section swqpa
-.section swqpb
-.section swqqa
-.section swqqb
-.section swqra
-.section swqrb
-.section swqsa
-.section swqsb
-.section swqta
-.section swqtb
-.section swqua
-.section swqub
-.section swqva
-.section swqvb
-.section swqwa
-.section swqwb
-.section swqxa
-.section swqxb
-.section swqya
-.section swqyb
-.section swqza
-.section swqzb
-.section swq1a
-.section swq1b
-.section swq2a
-.section swq2b
-.section swq3a
-.section swq3b
-.section swq4a
-.section swq4b
-.section swq5a
-.section swq5b
-.section swq6a
-.section swq6b
-.section swq7a
-.section swq7b
-.section swq8a
-.section swq8b
-.section swq9a
-.section swq9b
-.section swq0a
-.section swq0b
-.section swraa
-.section swrab
-.section swrba
-.section swrbb
-.section swrca
-.section swrcb
-.section swrda
-.section swrdb
-.section swrea
-.section swreb
-.section swrfa
-.section swrfb
-.section swrga
-.section swrgb
-.section swrha
-.section swrhb
-.section swria
-.section swrib
-.section swrja
-.section swrjb
-.section swrka
-.section swrkb
-.section swrla
-.section swrlb
-.section swrma
-.section swrmb
-.section swrna
-.section swrnb
-.section swroa
-.section swrob
-.section swrpa
-.section swrpb
-.section swrqa
-.section swrqb
-.section swrra
-.section swrrb
-.section swrsa
-.section swrsb
-.section swrta
-.section swrtb
-.section swrua
-.section swrub
-.section swrva
-.section swrvb
-.section swrwa
-.section swrwb
-.section swrxa
-.section swrxb
-.section swrya
-.section swryb
-.section swrza
-.section swrzb
-.section swr1a
-.section swr1b
-.section swr2a
-.section swr2b
-.section swr3a
-.section swr3b
-.section swr4a
-.section swr4b
-.section swr5a
-.section swr5b
-.section swr6a
-.section swr6b
-.section swr7a
-.section swr7b
-.section swr8a
-.section swr8b
-.section swr9a
-.section swr9b
-.section swr0a
-.section swr0b
-.section swsaa
-.section swsab
-.section swsba
-.section swsbb
-.section swsca
-.section swscb
-.section swsda
-.section swsdb
-.section swsea
-.section swseb
-.section swsfa
-.section swsfb
-.section swsga
-.section swsgb
-.section swsha
-.section swshb
-.section swsia
-.section swsib
-.section swsja
-.section swsjb
-.section swska
-.section swskb
-.section swsla
-.section swslb
-.section swsma
-.section swsmb
-.section swsna
-.section swsnb
-.section swsoa
-.section swsob
-.section swspa
-.section swspb
-.section swsqa
-.section swsqb
-.section swsra
-.section swsrb
-.section swssa
-.section swssb
-.section swsta
-.section swstb
-.section swsua
-.section swsub
-.section swsva
-.section swsvb
-.section swswa
-.section swswb
-.section swsxa
-.section swsxb
-.section swsya
-.section swsyb
-.section swsza
-.section swszb
-.section sws1a
-.section sws1b
-.section sws2a
-.section sws2b
-.section sws3a
-.section sws3b
-.section sws4a
-.section sws4b
-.section sws5a
-.section sws5b
-.section sws6a
-.section sws6b
-.section sws7a
-.section sws7b
-.section sws8a
-.section sws8b
-.section sws9a
-.section sws9b
-.section sws0a
-.section sws0b
-.section swtaa
-.section swtab
-.section swtba
-.section swtbb
-.section swtca
-.section swtcb
-.section swtda
-.section swtdb
-.section swtea
-.section swteb
-.section swtfa
-.section swtfb
-.section swtga
-.section swtgb
-.section swtha
-.section swthb
-.section swtia
-.section swtib
-.section swtja
-.section swtjb
-.section swtka
-.section swtkb
-.section swtla
-.section swtlb
-.section swtma
-.section swtmb
-.section swtna
-.section swtnb
-.section swtoa
-.section swtob
-.section swtpa
-.section swtpb
-.section swtqa
-.section swtqb
-.section swtra
-.section swtrb
-.section swtsa
-.section swtsb
-.section swtta
-.section swttb
-.section swtua
-.section swtub
-.section swtva
-.section swtvb
-.section swtwa
-.section swtwb
-.section swtxa
-.section swtxb
-.section swtya
-.section swtyb
-.section swtza
-.section swtzb
-.section swt1a
-.section swt1b
-.section swt2a
-.section swt2b
-.section swt3a
-.section swt3b
-.section swt4a
-.section swt4b
-.section swt5a
-.section swt5b
-.section swt6a
-.section swt6b
-.section swt7a
-.section swt7b
-.section swt8a
-.section swt8b
-.section swt9a
-.section swt9b
-.section swt0a
-.section swt0b
-.section swuaa
-.section swuab
-.section swuba
-.section swubb
-.section swuca
-.section swucb
-.section swuda
-.section swudb
-.section swuea
-.section swueb
-.section swufa
-.section swufb
-.section swuga
-.section swugb
-.section swuha
-.section swuhb
-.section swuia
-.section swuib
-.section swuja
-.section swujb
-.section swuka
-.section swukb
-.section swula
-.section swulb
-.section swuma
-.section swumb
-.section swuna
-.section swunb
-.section swuoa
-.section swuob
-.section swupa
-.section swupb
-.section swuqa
-.section swuqb
-.section swura
-.section swurb
-.section swusa
-.section swusb
-.section swuta
-.section swutb
-.section swuua
-.section swuub
-.section swuva
-.section swuvb
-.section swuwa
-.section swuwb
-.section swuxa
-.section swuxb
-.section swuya
-.section swuyb
-.section swuza
-.section swuzb
-.section swu1a
-.section swu1b
-.section swu2a
-.section swu2b
-.section swu3a
-.section swu3b
-.section swu4a
-.section swu4b
-.section swu5a
-.section swu5b
-.section swu6a
-.section swu6b
-.section swu7a
-.section swu7b
-.section swu8a
-.section swu8b
-.section swu9a
-.section swu9b
-.section swu0a
-.section swu0b
-.section swvaa
-.section swvab
-.section swvba
-.section swvbb
-.section swvca
-.section swvcb
-.section swvda
-.section swvdb
-.section swvea
-.section swveb
-.section swvfa
-.section swvfb
-.section swvga
-.section swvgb
-.section swvha
-.section swvhb
-.section swvia
-.section swvib
-.section swvja
-.section swvjb
-.section swvka
-.section swvkb
-.section swvla
-.section swvlb
-.section swvma
-.section swvmb
-.section swvna
-.section swvnb
-.section swvoa
-.section swvob
-.section swvpa
-.section swvpb
-.section swvqa
-.section swvqb
-.section swvra
-.section swvrb
-.section swvsa
-.section swvsb
-.section swvta
-.section swvtb
-.section swvua
-.section swvub
-.section swvva
-.section swvvb
-.section swvwa
-.section swvwb
-.section swvxa
-.section swvxb
-.section swvya
-.section swvyb
-.section swvza
-.section swvzb
-.section swv1a
-.section swv1b
-.section swv2a
-.section swv2b
-.section swv3a
-.section swv3b
-.section swv4a
-.section swv4b
-.section swv5a
-.section swv5b
-.section swv6a
-.section swv6b
-.section swv7a
-.section swv7b
-.section swv8a
-.section swv8b
-.section swv9a
-.section swv9b
-.section swv0a
-.section swv0b
-.section swwaa
-.section swwab
-.section swwba
-.section swwbb
-.section swwca
-.section swwcb
-.section swwda
-.section swwdb
-.section swwea
-.section swweb
-.section swwfa
-.section swwfb
-.section swwga
-.section swwgb
-.section swwha
-.section swwhb
-.section swwia
-.section swwib
-.section swwja
-.section swwjb
-.section swwka
-.section swwkb
-.section swwla
-.section swwlb
-.section swwma
-.section swwmb
-.section swwna
-.section swwnb
-.section swwoa
-.section swwob
-.section swwpa
-.section swwpb
-.section swwqa
-.section swwqb
-.section swwra
-.section swwrb
-.section swwsa
-.section swwsb
-.section swwta
-.section swwtb
-.section swwua
-.section swwub
-.section swwva
-.section swwvb
-.section swwwa
-.section swwwb
-.section swwxa
-.section swwxb
-.section swwya
-.section swwyb
-.section swwza
-.section swwzb
-.section sww1a
-.section sww1b
-.section sww2a
-.section sww2b
-.section sww3a
-.section sww3b
-.section sww4a
-.section sww4b
-.section sww5a
-.section sww5b
-.section sww6a
-.section sww6b
-.section sww7a
-.section sww7b
-.section sww8a
-.section sww8b
-.section sww9a
-.section sww9b
-.section sww0a
-.section sww0b
-.section swxaa
-.section swxab
-.section swxba
-.section swxbb
-.section swxca
-.section swxcb
-.section swxda
-.section swxdb
-.section swxea
-.section swxeb
-.section swxfa
-.section swxfb
-.section swxga
-.section swxgb
-.section swxha
-.section swxhb
-.section swxia
-.section swxib
-.section swxja
-.section swxjb
-.section swxka
-.section swxkb
-.section swxla
-.section swxlb
-.section swxma
-.section swxmb
-.section swxna
-.section swxnb
-.section swxoa
-.section swxob
-.section swxpa
-.section swxpb
-.section swxqa
-.section swxqb
-.section swxra
-.section swxrb
-.section swxsa
-.section swxsb
-.section swxta
-.section swxtb
-.section swxua
-.section swxub
-.section swxva
-.section swxvb
-.section swxwa
-.section swxwb
-.section swxxa
-.section swxxb
-.section swxya
-.section swxyb
-.section swxza
-.section swxzb
-.section swx1a
-.section swx1b
-.section swx2a
-.section swx2b
-.section swx3a
-.section swx3b
-.section swx4a
-.section swx4b
-.section swx5a
-.section swx5b
-.section swx6a
-.section swx6b
-.section swx7a
-.section swx7b
-.section swx8a
-.section swx8b
-.section swx9a
-.section swx9b
-.section swx0a
-.section swx0b
-.section swyaa
-.section swyab
-.section swyba
-.section swybb
-.section swyca
-.section swycb
-.section swyda
-.section swydb
-.section swyea
-.section swyeb
-.section swyfa
-.section swyfb
-.section swyga
-.section swygb
-.section swyha
-.section swyhb
-.section swyia
-.section swyib
-.section swyja
-.section swyjb
-.section swyka
-.section swykb
-.section swyla
-.section swylb
-.section swyma
-.section swymb
-.section swyna
-.section swynb
-.section swyoa
-.section swyob
-.section swypa
-.section swypb
-.section swyqa
-.section swyqb
-.section swyra
-.section swyrb
-.section swysa
-.section swysb
-.section swyta
-.section swytb
-.section swyua
-.section swyub
-.section swyva
-.section swyvb
-.section swywa
-.section swywb
-.section swyxa
-.section swyxb
-.section swyya
-.section swyyb
-.section swyza
-.section swyzb
-.section swy1a
-.section swy1b
-.section swy2a
-.section swy2b
-.section swy3a
-.section swy3b
-.section swy4a
-.section swy4b
-.section swy5a
-.section swy5b
-.section swy6a
-.section swy6b
-.section swy7a
-.section swy7b
-.section swy8a
-.section swy8b
-.section swy9a
-.section swy9b
-.section swy0a
-.section swy0b
-.section swzaa
-.section swzab
-.section swzba
-.section swzbb
-.section swzca
-.section swzcb
-.section swzda
-.section swzdb
-.section swzea
-.section swzeb
-.section swzfa
-.section swzfb
-.section swzga
-.section swzgb
-.section swzha
-.section swzhb
-.section swzia
-.section swzib
-.section swzja
-.section swzjb
-.section swzka
-.section swzkb
-.section swzla
-.section swzlb
-.section swzma
-.section swzmb
-.section swzna
-.section swznb
-.section swzoa
-.section swzob
-.section swzpa
-.section swzpb
-.section swzqa
-.section swzqb
-.section swzra
-.section swzrb
-.section swzsa
-.section swzsb
-.section swzta
-.section swztb
-.section swzua
-.section swzub
-.section swzva
-.section swzvb
-.section swzwa
-.section swzwb
-.section swzxa
-.section swzxb
-.section swzya
-.section swzyb
-.section swzza
-.section swzzb
-.section swz1a
-.section swz1b
-.section swz2a
-.section swz2b
-.section swz3a
-.section swz3b
-.section swz4a
-.section swz4b
-.section swz5a
-.section swz5b
-.section swz6a
-.section swz6b
-.section swz7a
-.section swz7b
-.section swz8a
-.section swz8b
-.section swz9a
-.section swz9b
-.section swz0a
-.section swz0b
-.section sw1aa
-.section sw1ab
-.section sw1ba
-.section sw1bb
-.section sw1ca
-.section sw1cb
-.section sw1da
-.section sw1db
-.section sw1ea
-.section sw1eb
-.section sw1fa
-.section sw1fb
-.section sw1ga
-.section sw1gb
-.section sw1ha
-.section sw1hb
-.section sw1ia
-.section sw1ib
-.section sw1ja
-.section sw1jb
-.section sw1ka
-.section sw1kb
-.section sw1la
-.section sw1lb
-.section sw1ma
-.section sw1mb
-.section sw1na
-.section sw1nb
-.section sw1oa
-.section sw1ob
-.section sw1pa
-.section sw1pb
-.section sw1qa
-.section sw1qb
-.section sw1ra
-.section sw1rb
-.section sw1sa
-.section sw1sb
-.section sw1ta
-.section sw1tb
-.section sw1ua
-.section sw1ub
-.section sw1va
-.section sw1vb
-.section sw1wa
-.section sw1wb
-.section sw1xa
-.section sw1xb
-.section sw1ya
-.section sw1yb
-.section sw1za
-.section sw1zb
-.section sw11a
-.section sw11b
-.section sw12a
-.section sw12b
-.section sw13a
-.section sw13b
-.section sw14a
-.section sw14b
-.section sw15a
-.section sw15b
-.section sw16a
-.section sw16b
-.section sw17a
-.section sw17b
-.section sw18a
-.section sw18b
-.section sw19a
-.section sw19b
-.section sw10a
-.section sw10b
-.section sw2aa
-.section sw2ab
-.section sw2ba
-.section sw2bb
-.section sw2ca
-.section sw2cb
-.section sw2da
-.section sw2db
-.section sw2ea
-.section sw2eb
-.section sw2fa
-.section sw2fb
-.section sw2ga
-.section sw2gb
-.section sw2ha
-.section sw2hb
-.section sw2ia
-.section sw2ib
-.section sw2ja
-.section sw2jb
-.section sw2ka
-.section sw2kb
-.section sw2la
-.section sw2lb
-.section sw2ma
-.section sw2mb
-.section sw2na
-.section sw2nb
-.section sw2oa
-.section sw2ob
-.section sw2pa
-.section sw2pb
-.section sw2qa
-.section sw2qb
-.section sw2ra
-.section sw2rb
-.section sw2sa
-.section sw2sb
-.section sw2ta
-.section sw2tb
-.section sw2ua
-.section sw2ub
-.section sw2va
-.section sw2vb
-.section sw2wa
-.section sw2wb
-.section sw2xa
-.section sw2xb
-.section sw2ya
-.section sw2yb
-.section sw2za
-.section sw2zb
-.section sw21a
-.section sw21b
-.section sw22a
-.section sw22b
-.section sw23a
-.section sw23b
-.section sw24a
-.section sw24b
-.section sw25a
-.section sw25b
-.section sw26a
-.section sw26b
-.section sw27a
-.section sw27b
-.section sw28a
-.section sw28b
-.section sw29a
-.section sw29b
-.section sw20a
-.section sw20b
-.section sw3aa
-.section sw3ab
-.section sw3ba
-.section sw3bb
-.section sw3ca
-.section sw3cb
-.section sw3da
-.section sw3db
-.section sw3ea
-.section sw3eb
-.section sw3fa
-.section sw3fb
-.section sw3ga
-.section sw3gb
-.section sw3ha
-.section sw3hb
-.section sw3ia
-.section sw3ib
-.section sw3ja
-.section sw3jb
-.section sw3ka
-.section sw3kb
-.section sw3la
-.section sw3lb
-.section sw3ma
-.section sw3mb
-.section sw3na
-.section sw3nb
-.section sw3oa
-.section sw3ob
-.section sw3pa
-.section sw3pb
-.section sw3qa
-.section sw3qb
-.section sw3ra
-.section sw3rb
-.section sw3sa
-.section sw3sb
-.section sw3ta
-.section sw3tb
-.section sw3ua
-.section sw3ub
-.section sw3va
-.section sw3vb
-.section sw3wa
-.section sw3wb
-.section sw3xa
-.section sw3xb
-.section sw3ya
-.section sw3yb
-.section sw3za
-.section sw3zb
-.section sw31a
-.section sw31b
-.section sw32a
-.section sw32b
-.section sw33a
-.section sw33b
-.section sw34a
-.section sw34b
-.section sw35a
-.section sw35b
-.section sw36a
-.section sw36b
-.section sw37a
-.section sw37b
-.section sw38a
-.section sw38b
-.section sw39a
-.section sw39b
-.section sw30a
-.section sw30b
-.section sw4aa
-.section sw4ab
-.section sw4ba
-.section sw4bb
-.section sw4ca
-.section sw4cb
-.section sw4da
-.section sw4db
-.section sw4ea
-.section sw4eb
-.section sw4fa
-.section sw4fb
-.section sw4ga
-.section sw4gb
-.section sw4ha
-.section sw4hb
-.section sw4ia
-.section sw4ib
-.section sw4ja
-.section sw4jb
-.section sw4ka
-.section sw4kb
-.section sw4la
-.section sw4lb
-.section sw4ma
-.section sw4mb
-.section sw4na
-.section sw4nb
-.section sw4oa
-.section sw4ob
-.section sw4pa
-.section sw4pb
-.section sw4qa
-.section sw4qb
-.section sw4ra
-.section sw4rb
-.section sw4sa
-.section sw4sb
-.section sw4ta
-.section sw4tb
-.section sw4ua
-.section sw4ub
-.section sw4va
-.section sw4vb
-.section sw4wa
-.section sw4wb
-.section sw4xa
-.section sw4xb
-.section sw4ya
-.section sw4yb
-.section sw4za
-.section sw4zb
-.section sw41a
-.section sw41b
-.section sw42a
-.section sw42b
-.section sw43a
-.section sw43b
-.section sw44a
-.section sw44b
-.section sw45a
-.section sw45b
-.section sw46a
-.section sw46b
-.section sw47a
-.section sw47b
-.section sw48a
-.section sw48b
-.section sw49a
-.section sw49b
-.section sw40a
-.section sw40b
-.section sw5aa
-.section sw5ab
-.section sw5ba
-.section sw5bb
-.section sw5ca
-.section sw5cb
-.section sw5da
-.section sw5db
-.section sw5ea
-.section sw5eb
-.section sw5fa
-.section sw5fb
-.section sw5ga
-.section sw5gb
-.section sw5ha
-.section sw5hb
-.section sw5ia
-.section sw5ib
-.section sw5ja
-.section sw5jb
-.section sw5ka
-.section sw5kb
-.section sw5la
-.section sw5lb
-.section sw5ma
-.section sw5mb
-.section sw5na
-.section sw5nb
-.section sw5oa
-.section sw5ob
-.section sw5pa
-.section sw5pb
-.section sw5qa
-.section sw5qb
-.section sw5ra
-.section sw5rb
-.section sw5sa
-.section sw5sb
-.section sw5ta
-.section sw5tb
-.section sw5ua
-.section sw5ub
-.section sw5va
-.section sw5vb
-.section sw5wa
-.section sw5wb
-.section sw5xa
-.section sw5xb
-.section sw5ya
-.section sw5yb
-.section sw5za
-.section sw5zb
-.section sw51a
-.section sw51b
-.section sw52a
-.section sw52b
-.section sw53a
-.section sw53b
-.section sw54a
-.section sw54b
-.section sw55a
-.section sw55b
-.section sw56a
-.section sw56b
-.section sw57a
-.section sw57b
-.section sw58a
-.section sw58b
-.section sw59a
-.section sw59b
-.section sw50a
-.section sw50b
-.section sw6aa
-.section sw6ab
-.section sw6ba
-.section sw6bb
-.section sw6ca
-.section sw6cb
-.section sw6da
-.section sw6db
-.section sw6ea
-.section sw6eb
-.section sw6fa
-.section sw6fb
-.section sw6ga
-.section sw6gb
-.section sw6ha
-.section sw6hb
-.section sw6ia
-.section sw6ib
-.section sw6ja
-.section sw6jb
-.section sw6ka
-.section sw6kb
-.section sw6la
-.section sw6lb
-.section sw6ma
-.section sw6mb
-.section sw6na
-.section sw6nb
-.section sw6oa
-.section sw6ob
-.section sw6pa
-.section sw6pb
-.section sw6qa
-.section sw6qb
-.section sw6ra
-.section sw6rb
-.section sw6sa
-.section sw6sb
-.section sw6ta
-.section sw6tb
-.section sw6ua
-.section sw6ub
-.section sw6va
-.section sw6vb
-.section sw6wa
-.section sw6wb
-.section sw6xa
-.section sw6xb
-.section sw6ya
-.section sw6yb
-.section sw6za
-.section sw6zb
-.section sw61a
-.section sw61b
-.section sw62a
-.section sw62b
-.section sw63a
-.section sw63b
-.section sw64a
-.section sw64b
-.section sw65a
-.section sw65b
-.section sw66a
-.section sw66b
-.section sw67a
-.section sw67b
-.section sw68a
-.section sw68b
-.section sw69a
-.section sw69b
-.section sw60a
-.section sw60b
-.section sw7aa
-.section sw7ab
-.section sw7ba
-.section sw7bb
-.section sw7ca
-.section sw7cb
-.section sw7da
-.section sw7db
-.section sw7ea
-.section sw7eb
-.section sw7fa
-.section sw7fb
-.section sw7ga
-.section sw7gb
-.section sw7ha
-.section sw7hb
-.section sw7ia
-.section sw7ib
-.section sw7ja
-.section sw7jb
-.section sw7ka
-.section sw7kb
-.section sw7la
-.section sw7lb
-.section sw7ma
-.section sw7mb
-.section sw7na
-.section sw7nb
-.section sw7oa
-.section sw7ob
-.section sw7pa
-.section sw7pb
-.section sw7qa
-.section sw7qb
-.section sw7ra
-.section sw7rb
-.section sw7sa
-.section sw7sb
-.section sw7ta
-.section sw7tb
-.section sw7ua
-.section sw7ub
-.section sw7va
-.section sw7vb
-.section sw7wa
-.section sw7wb
-.section sw7xa
-.section sw7xb
-.section sw7ya
-.section sw7yb
-.section sw7za
-.section sw7zb
-.section sw71a
-.section sw71b
-.section sw72a
-.section sw72b
-.section sw73a
-.section sw73b
-.section sw74a
-.section sw74b
-.section sw75a
-.section sw75b
-.section sw76a
-.section sw76b
-.section sw77a
-.section sw77b
-.section sw78a
-.section sw78b
-.section sw79a
-.section sw79b
-.section sw70a
-.section sw70b
-.section sw8aa
-.section sw8ab
-.section sw8ba
-.section sw8bb
-.section sw8ca
-.section sw8cb
-.section sw8da
-.section sw8db
-.section sw8ea
-.section sw8eb
-.section sw8fa
-.section sw8fb
-.section sw8ga
-.section sw8gb
-.section sw8ha
-.section sw8hb
-.section sw8ia
-.section sw8ib
-.section sw8ja
-.section sw8jb
-.section sw8ka
-.section sw8kb
-.section sw8la
-.section sw8lb
-.section sw8ma
-.section sw8mb
-.section sw8na
-.section sw8nb
-.section sw8oa
-.section sw8ob
-.section sw8pa
-.section sw8pb
-.section sw8qa
-.section sw8qb
-.section sw8ra
-.section sw8rb
-.section sw8sa
-.section sw8sb
-.section sw8ta
-.section sw8tb
-.section sw8ua
-.section sw8ub
-.section sw8va
-.section sw8vb
-.section sw8wa
-.section sw8wb
-.section sw8xa
-.section sw8xb
-.section sw8ya
-.section sw8yb
-.section sw8za
-.section sw8zb
-.section sw81a
-.section sw81b
-.section sw82a
-.section sw82b
-.section sw83a
-.section sw83b
-.section sw84a
-.section sw84b
-.section sw85a
-.section sw85b
-.section sw86a
-.section sw86b
-.section sw87a
-.section sw87b
-.section sw88a
-.section sw88b
-.section sw89a
-.section sw89b
-.section sw80a
-.section sw80b
-.section sw9aa
-.section sw9ab
-.section sw9ba
-.section sw9bb
-.section sw9ca
-.section sw9cb
-.section sw9da
-.section sw9db
-.section sw9ea
-.section sw9eb
-.section sw9fa
-.section sw9fb
-.section sw9ga
-.section sw9gb
-.section sw9ha
-.section sw9hb
-.section sw9ia
-.section sw9ib
-.section sw9ja
-.section sw9jb
-.section sw9ka
-.section sw9kb
-.section sw9la
-.section sw9lb
-.section sw9ma
-.section sw9mb
-.section sw9na
-.section sw9nb
-.section sw9oa
-.section sw9ob
-.section sw9pa
-.section sw9pb
-.section sw9qa
-.section sw9qb
-.section sw9ra
-.section sw9rb
-.section sw9sa
-.section sw9sb
-.section sw9ta
-.section sw9tb
-.section sw9ua
-.section sw9ub
-.section sw9va
-.section sw9vb
-.section sw9wa
-.section sw9wb
-.section sw9xa
-.section sw9xb
-.section sw9ya
-.section sw9yb
-.section sw9za
-.section sw9zb
-.section sw91a
-.section sw91b
-.section sw92a
-.section sw92b
-.section sw93a
-.section sw93b
-.section sw94a
-.section sw94b
-.section sw95a
-.section sw95b
-.section sw96a
-.section sw96b
-.section sw97a
-.section sw97b
-.section sw98a
-.section sw98b
-.section sw99a
-.section sw99b
-.section sw90a
-.section sw90b
-.section sw0aa
-.section sw0ab
-.section sw0ba
-.section sw0bb
-.section sw0ca
-.section sw0cb
-.section sw0da
-.section sw0db
-.section sw0ea
-.section sw0eb
-.section sw0fa
-.section sw0fb
-.section sw0ga
-.section sw0gb
-.section sw0ha
-.section sw0hb
-.section sw0ia
-.section sw0ib
-.section sw0ja
-.section sw0jb
-.section sw0ka
-.section sw0kb
-.section sw0la
-.section sw0lb
-.section sw0ma
-.section sw0mb
-.section sw0na
-.section sw0nb
-.section sw0oa
-.section sw0ob
-.section sw0pa
-.section sw0pb
-.section sw0qa
-.section sw0qb
-.section sw0ra
-.section sw0rb
-.section sw0sa
-.section sw0sb
-.section sw0ta
-.section sw0tb
-.section sw0ua
-.section sw0ub
-.section sw0va
-.section sw0vb
-.section sw0wa
-.section sw0wb
-.section sw0xa
-.section sw0xb
-.section sw0ya
-.section sw0yb
-.section sw0za
-.section sw0zb
-.section sw01a
-.section sw01b
-.section sw02a
-.section sw02b
-.section sw03a
-.section sw03b
-.section sw04a
-.section sw04b
-.section sw05a
-.section sw05b
-.section sw06a
-.section sw06b
-.section sw07a
-.section sw07b
-.section sw08a
-.section sw08b
-.section sw09a
-.section sw09b
-.section sw00a
-.section sw00b
-.section sxaaa
-.section sxaab
-.section sxaba
-.section sxabb
-.section sxaca
-.section sxacb
-.section sxada
-.section sxadb
-.section sxaea
-.section sxaeb
-.section sxafa
-.section sxafb
-.section sxaga
-.section sxagb
-.section sxaha
-.section sxahb
-.section sxaia
-.section sxaib
-.section sxaja
-.section sxajb
-.section sxaka
-.section sxakb
-.section sxala
-.section sxalb
-.section sxama
-.section sxamb
-.section sxana
-.section sxanb
-.section sxaoa
-.section sxaob
-.section sxapa
-.section sxapb
-.section sxaqa
-.section sxaqb
-.section sxara
-.section sxarb
-.section sxasa
-.section sxasb
-.section sxata
-.section sxatb
-.section sxaua
-.section sxaub
-.section sxava
-.section sxavb
-.section sxawa
-.section sxawb
-.section sxaxa
-.section sxaxb
-.section sxaya
-.section sxayb
-.section sxaza
-.section sxazb
-.section sxa1a
-.section sxa1b
-.section sxa2a
-.section sxa2b
-.section sxa3a
-.section sxa3b
-.section sxa4a
-.section sxa4b
-.section sxa5a
-.section sxa5b
-.section sxa6a
-.section sxa6b
-.section sxa7a
-.section sxa7b
-.section sxa8a
-.section sxa8b
-.section sxa9a
-.section sxa9b
-.section sxa0a
-.section sxa0b
-.section sxbaa
-.section sxbab
-.section sxbba
-.section sxbbb
-.section sxbca
-.section sxbcb
-.section sxbda
-.section sxbdb
-.section sxbea
-.section sxbeb
-.section sxbfa
-.section sxbfb
-.section sxbga
-.section sxbgb
-.section sxbha
-.section sxbhb
-.section sxbia
-.section sxbib
-.section sxbja
-.section sxbjb
-.section sxbka
-.section sxbkb
-.section sxbla
-.section sxblb
-.section sxbma
-.section sxbmb
-.section sxbna
-.section sxbnb
-.section sxboa
-.section sxbob
-.section sxbpa
-.section sxbpb
-.section sxbqa
-.section sxbqb
-.section sxbra
-.section sxbrb
-.section sxbsa
-.section sxbsb
-.section sxbta
-.section sxbtb
-.section sxbua
-.section sxbub
-.section sxbva
-.section sxbvb
-.section sxbwa
-.section sxbwb
-.section sxbxa
-.section sxbxb
-.section sxbya
-.section sxbyb
-.section sxbza
-.section sxbzb
-.section sxb1a
-.section sxb1b
-.section sxb2a
-.section sxb2b
-.section sxb3a
-.section sxb3b
-.section sxb4a
-.section sxb4b
-.section sxb5a
-.section sxb5b
-.section sxb6a
-.section sxb6b
-.section sxb7a
-.section sxb7b
-.section sxb8a
-.section sxb8b
-.section sxb9a
-.section sxb9b
-.section sxb0a
-.section sxb0b
-.section sxcaa
-.section sxcab
-.section sxcba
-.section sxcbb
-.section sxcca
-.section sxccb
-.section sxcda
-.section sxcdb
-.section sxcea
-.section sxceb
-.section sxcfa
-.section sxcfb
-.section sxcga
-.section sxcgb
-.section sxcha
-.section sxchb
-.section sxcia
-.section sxcib
-.section sxcja
-.section sxcjb
-.section sxcka
-.section sxckb
-.section sxcla
-.section sxclb
-.section sxcma
-.section sxcmb
-.section sxcna
-.section sxcnb
-.section sxcoa
-.section sxcob
-.section sxcpa
-.section sxcpb
-.section sxcqa
-.section sxcqb
-.section sxcra
-.section sxcrb
-.section sxcsa
-.section sxcsb
-.section sxcta
-.section sxctb
-.section sxcua
-.section sxcub
-.section sxcva
-.section sxcvb
-.section sxcwa
-.section sxcwb
-.section sxcxa
-.section sxcxb
-.section sxcya
-.section sxcyb
-.section sxcza
-.section sxczb
-.section sxc1a
-.section sxc1b
-.section sxc2a
-.section sxc2b
-.section sxc3a
-.section sxc3b
-.section sxc4a
-.section sxc4b
-.section sxc5a
-.section sxc5b
-.section sxc6a
-.section sxc6b
-.section sxc7a
-.section sxc7b
-.section sxc8a
-.section sxc8b
-.section sxc9a
-.section sxc9b
-.section sxc0a
-.section sxc0b
-.section sxdaa
-.section sxdab
-.section sxdba
-.section sxdbb
-.section sxdca
-.section sxdcb
-.section sxdda
-.section sxddb
-.section sxdea
-.section sxdeb
-.section sxdfa
-.section sxdfb
-.section sxdga
-.section sxdgb
-.section sxdha
-.section sxdhb
-.section sxdia
-.section sxdib
-.section sxdja
-.section sxdjb
-.section sxdka
-.section sxdkb
-.section sxdla
-.section sxdlb
-.section sxdma
-.section sxdmb
-.section sxdna
-.section sxdnb
-.section sxdoa
-.section sxdob
-.section sxdpa
-.section sxdpb
-.section sxdqa
-.section sxdqb
-.section sxdra
-.section sxdrb
-.section sxdsa
-.section sxdsb
-.section sxdta
-.section sxdtb
-.section sxdua
-.section sxdub
-.section sxdva
-.section sxdvb
-.section sxdwa
-.section sxdwb
-.section sxdxa
-.section sxdxb
-.section sxdya
-.section sxdyb
-.section sxdza
-.section sxdzb
-.section sxd1a
-.section sxd1b
-.section sxd2a
-.section sxd2b
-.section sxd3a
-.section sxd3b
-.section sxd4a
-.section sxd4b
-.section sxd5a
-.section sxd5b
-.section sxd6a
-.section sxd6b
-.section sxd7a
-.section sxd7b
-.section sxd8a
-.section sxd8b
-.section sxd9a
-.section sxd9b
-.section sxd0a
-.section sxd0b
-.section sxeaa
-.section sxeab
-.section sxeba
-.section sxebb
-.section sxeca
-.section sxecb
-.section sxeda
-.section sxedb
-.section sxeea
-.section sxeeb
-.section sxefa
-.section sxefb
-.section sxega
-.section sxegb
-.section sxeha
-.section sxehb
-.section sxeia
-.section sxeib
-.section sxeja
-.section sxejb
-.section sxeka
-.section sxekb
-.section sxela
-.section sxelb
-.section sxema
-.section sxemb
-.section sxena
-.section sxenb
-.section sxeoa
-.section sxeob
-.section sxepa
-.section sxepb
-.section sxeqa
-.section sxeqb
-.section sxera
-.section sxerb
-.section sxesa
-.section sxesb
-.section sxeta
-.section sxetb
-.section sxeua
-.section sxeub
-.section sxeva
-.section sxevb
-.section sxewa
-.section sxewb
-.section sxexa
-.section sxexb
-.section sxeya
-.section sxeyb
-.section sxeza
-.section sxezb
-.section sxe1a
-.section sxe1b
-.section sxe2a
-.section sxe2b
-.section sxe3a
-.section sxe3b
-.section sxe4a
-.section sxe4b
-.section sxe5a
-.section sxe5b
-.section sxe6a
-.section sxe6b
-.section sxe7a
-.section sxe7b
-.section sxe8a
-.section sxe8b
-.section sxe9a
-.section sxe9b
-.section sxe0a
-.section sxe0b
-.section sxfaa
-.section sxfab
-.section sxfba
-.section sxfbb
-.section sxfca
-.section sxfcb
-.section sxfda
-.section sxfdb
-.section sxfea
-.section sxfeb
-.section sxffa
-.section sxffb
-.section sxfga
-.section sxfgb
-.section sxfha
-.section sxfhb
-.section sxfia
-.section sxfib
-.section sxfja
-.section sxfjb
-.section sxfka
-.section sxfkb
-.section sxfla
-.section sxflb
-.section sxfma
-.section sxfmb
-.section sxfna
-.section sxfnb
-.section sxfoa
-.section sxfob
-.section sxfpa
-.section sxfpb
-.section sxfqa
-.section sxfqb
-.section sxfra
-.section sxfrb
-.section sxfsa
-.section sxfsb
-.section sxfta
-.section sxftb
-.section sxfua
-.section sxfub
-.section sxfva
-.section sxfvb
-.section sxfwa
-.section sxfwb
-.section sxfxa
-.section sxfxb
-.section sxfya
-.section sxfyb
-.section sxfza
-.section sxfzb
-.section sxf1a
-.section sxf1b
-.section sxf2a
-.section sxf2b
-.section sxf3a
-.section sxf3b
-.section sxf4a
-.section sxf4b
-.section sxf5a
-.section sxf5b
-.section sxf6a
-.section sxf6b
-.section sxf7a
-.section sxf7b
-.section sxf8a
-.section sxf8b
-.section sxf9a
-.section sxf9b
-.section sxf0a
-.section sxf0b
-.section sxgaa
-.section sxgab
-.section sxgba
-.section sxgbb
-.section sxgca
-.section sxgcb
-.section sxgda
-.section sxgdb
-.section sxgea
-.section sxgeb
-.section sxgfa
-.section sxgfb
-.section sxgga
-.section sxggb
-.section sxgha
-.section sxghb
-.section sxgia
-.section sxgib
-.section sxgja
-.section sxgjb
-.section sxgka
-.section sxgkb
-.section sxgla
-.section sxglb
-.section sxgma
-.section sxgmb
-.section sxgna
-.section sxgnb
-.section sxgoa
-.section sxgob
-.section sxgpa
-.section sxgpb
-.section sxgqa
-.section sxgqb
-.section sxgra
-.section sxgrb
-.section sxgsa
-.section sxgsb
-.section sxgta
-.section sxgtb
-.section sxgua
-.section sxgub
-.section sxgva
-.section sxgvb
-.section sxgwa
-.section sxgwb
-.section sxgxa
-.section sxgxb
-.section sxgya
-.section sxgyb
-.section sxgza
-.section sxgzb
-.section sxg1a
-.section sxg1b
-.section sxg2a
-.section sxg2b
-.section sxg3a
-.section sxg3b
-.section sxg4a
-.section sxg4b
-.section sxg5a
-.section sxg5b
-.section sxg6a
-.section sxg6b
-.section sxg7a
-.section sxg7b
-.section sxg8a
-.section sxg8b
-.section sxg9a
-.section sxg9b
-.section sxg0a
-.section sxg0b
-.section sxhaa
-.section sxhab
-.section sxhba
-.section sxhbb
-.section sxhca
-.section sxhcb
-.section sxhda
-.section sxhdb
-.section sxhea
-.section sxheb
-.section sxhfa
-.section sxhfb
-.section sxhga
-.section sxhgb
-.section sxhha
-.section sxhhb
-.section sxhia
-.section sxhib
-.section sxhja
-.section sxhjb
-.section sxhka
-.section sxhkb
-.section sxhla
-.section sxhlb
-.section sxhma
-.section sxhmb
-.section sxhna
-.section sxhnb
-.section sxhoa
-.section sxhob
-.section sxhpa
-.section sxhpb
-.section sxhqa
-.section sxhqb
-.section sxhra
-.section sxhrb
-.section sxhsa
-.section sxhsb
-.section sxhta
-.section sxhtb
-.section sxhua
-.section sxhub
-.section sxhva
-.section sxhvb
-.section sxhwa
-.section sxhwb
-.section sxhxa
-.section sxhxb
-.section sxhya
-.section sxhyb
-.section sxhza
-.section sxhzb
-.section sxh1a
-.section sxh1b
-.section sxh2a
-.section sxh2b
-.section sxh3a
-.section sxh3b
-.section sxh4a
-.section sxh4b
-.section sxh5a
-.section sxh5b
-.section sxh6a
-.section sxh6b
-.section sxh7a
-.section sxh7b
-.section sxh8a
-.section sxh8b
-.section sxh9a
-.section sxh9b
-.section sxh0a
-.section sxh0b
-.section sxiaa
-.section sxiab
-.section sxiba
-.section sxibb
-.section sxica
-.section sxicb
-.section sxida
-.section sxidb
-.section sxiea
-.section sxieb
-.section sxifa
-.section sxifb
-.section sxiga
-.section sxigb
-.section sxiha
-.section sxihb
-.section sxiia
-.section sxiib
-.section sxija
-.section sxijb
-.section sxika
-.section sxikb
-.section sxila
-.section sxilb
-.section sxima
-.section sximb
-.section sxina
-.section sxinb
-.section sxioa
-.section sxiob
-.section sxipa
-.section sxipb
-.section sxiqa
-.section sxiqb
-.section sxira
-.section sxirb
-.section sxisa
-.section sxisb
-.section sxita
-.section sxitb
-.section sxiua
-.section sxiub
-.section sxiva
-.section sxivb
-.section sxiwa
-.section sxiwb
-.section sxixa
-.section sxixb
-.section sxiya
-.section sxiyb
-.section sxiza
-.section sxizb
-.section sxi1a
-.section sxi1b
-.section sxi2a
-.section sxi2b
-.section sxi3a
-.section sxi3b
-.section sxi4a
-.section sxi4b
-.section sxi5a
-.section sxi5b
-.section sxi6a
-.section sxi6b
-.section sxi7a
-.section sxi7b
-.section sxi8a
-.section sxi8b
-.section sxi9a
-.section sxi9b
-.section sxi0a
-.section sxi0b
-.section sxjaa
-.section sxjab
-.section sxjba
-.section sxjbb
-.section sxjca
-.section sxjcb
-.section sxjda
-.section sxjdb
-.section sxjea
-.section sxjeb
-.section sxjfa
-.section sxjfb
-.section sxjga
-.section sxjgb
-.section sxjha
-.section sxjhb
-.section sxjia
-.section sxjib
-.section sxjja
-.section sxjjb
-.section sxjka
-.section sxjkb
-.section sxjla
-.section sxjlb
-.section sxjma
-.section sxjmb
-.section sxjna
-.section sxjnb
-.section sxjoa
-.section sxjob
-.section sxjpa
-.section sxjpb
-.section sxjqa
-.section sxjqb
-.section sxjra
-.section sxjrb
-.section sxjsa
-.section sxjsb
-.section sxjta
-.section sxjtb
-.section sxjua
-.section sxjub
-.section sxjva
-.section sxjvb
-.section sxjwa
-.section sxjwb
-.section sxjxa
-.section sxjxb
-.section sxjya
-.section sxjyb
-.section sxjza
-.section sxjzb
-.section sxj1a
-.section sxj1b
-.section sxj2a
-.section sxj2b
-.section sxj3a
-.section sxj3b
-.section sxj4a
-.section sxj4b
-.section sxj5a
-.section sxj5b
-.section sxj6a
-.section sxj6b
-.section sxj7a
-.section sxj7b
-.section sxj8a
-.section sxj8b
-.section sxj9a
-.section sxj9b
-.section sxj0a
-.section sxj0b
-.section sxkaa
-.section sxkab
-.section sxkba
-.section sxkbb
-.section sxkca
-.section sxkcb
-.section sxkda
-.section sxkdb
-.section sxkea
-.section sxkeb
-.section sxkfa
-.section sxkfb
-.section sxkga
-.section sxkgb
-.section sxkha
-.section sxkhb
-.section sxkia
-.section sxkib
-.section sxkja
-.section sxkjb
-.section sxkka
-.section sxkkb
-.section sxkla
-.section sxklb
-.section sxkma
-.section sxkmb
-.section sxkna
-.section sxknb
-.section sxkoa
-.section sxkob
-.section sxkpa
-.section sxkpb
-.section sxkqa
-.section sxkqb
-.section sxkra
-.section sxkrb
-.section sxksa
-.section sxksb
-.section sxkta
-.section sxktb
-.section sxkua
-.section sxkub
-.section sxkva
-.section sxkvb
-.section sxkwa
-.section sxkwb
-.section sxkxa
-.section sxkxb
-.section sxkya
-.section sxkyb
-.section sxkza
-.section sxkzb
-.section sxk1a
-.section sxk1b
-.section sxk2a
-.section sxk2b
-.section sxk3a
-.section sxk3b
-.section sxk4a
-.section sxk4b
-.section sxk5a
-.section sxk5b
-.section sxk6a
-.section sxk6b
-.section sxk7a
-.section sxk7b
-.section sxk8a
-.section sxk8b
-.section sxk9a
-.section sxk9b
-.section sxk0a
-.section sxk0b
-.section sxlaa
-.section sxlab
-.section sxlba
-.section sxlbb
-.section sxlca
-.section sxlcb
-.section sxlda
-.section sxldb
-.section sxlea
-.section sxleb
-.section sxlfa
-.section sxlfb
-.section sxlga
-.section sxlgb
-.section sxlha
-.section sxlhb
-.section sxlia
-.section sxlib
-.section sxlja
-.section sxljb
-.section sxlka
-.section sxlkb
-.section sxlla
-.section sxllb
-.section sxlma
-.section sxlmb
-.section sxlna
-.section sxlnb
-.section sxloa
-.section sxlob
-.section sxlpa
-.section sxlpb
-.section sxlqa
-.section sxlqb
-.section sxlra
-.section sxlrb
-.section sxlsa
-.section sxlsb
-.section sxlta
-.section sxltb
-.section sxlua
-.section sxlub
-.section sxlva
-.section sxlvb
-.section sxlwa
-.section sxlwb
-.section sxlxa
-.section sxlxb
-.section sxlya
-.section sxlyb
-.section sxlza
-.section sxlzb
-.section sxl1a
-.section sxl1b
-.section sxl2a
-.section sxl2b
-.section sxl3a
-.section sxl3b
-.section sxl4a
-.section sxl4b
-.section sxl5a
-.section sxl5b
-.section sxl6a
-.section sxl6b
-.section sxl7a
-.section sxl7b
-.section sxl8a
-.section sxl8b
-.section sxl9a
-.section sxl9b
-.section sxl0a
-.section sxl0b
-.section sxmaa
-.section sxmab
-.section sxmba
-.section sxmbb
-.section sxmca
-.section sxmcb
-.section sxmda
-.section sxmdb
-.section sxmea
-.section sxmeb
-.section sxmfa
-.section sxmfb
-.section sxmga
-.section sxmgb
-.section sxmha
-.section sxmhb
-.section sxmia
-.section sxmib
-.section sxmja
-.section sxmjb
-.section sxmka
-.section sxmkb
-.section sxmla
-.section sxmlb
-.section sxmma
-.section sxmmb
-.section sxmna
-.section sxmnb
-.section sxmoa
-.section sxmob
-.section sxmpa
-.section sxmpb
-.section sxmqa
-.section sxmqb
-.section sxmra
-.section sxmrb
-.section sxmsa
-.section sxmsb
-.section sxmta
-.section sxmtb
-.section sxmua
-.section sxmub
-.section sxmva
-.section sxmvb
-.section sxmwa
-.section sxmwb
-.section sxmxa
-.section sxmxb
-.section sxmya
-.section sxmyb
-.section sxmza
-.section sxmzb
-.section sxm1a
-.section sxm1b
-.section sxm2a
-.section sxm2b
-.section sxm3a
-.section sxm3b
-.section sxm4a
-.section sxm4b
-.section sxm5a
-.section sxm5b
-.section sxm6a
-.section sxm6b
-.section sxm7a
-.section sxm7b
-.section sxm8a
-.section sxm8b
-.section sxm9a
-.section sxm9b
-.section sxm0a
-.section sxm0b
-.section sxnaa
-.section sxnab
-.section sxnba
-.section sxnbb
-.section sxnca
-.section sxncb
-.section sxnda
-.section sxndb
-.section sxnea
-.section sxneb
-.section sxnfa
-.section sxnfb
-.section sxnga
-.section sxngb
-.section sxnha
-.section sxnhb
-.section sxnia
-.section sxnib
-.section sxnja
-.section sxnjb
-.section sxnka
-.section sxnkb
-.section sxnla
-.section sxnlb
-.section sxnma
-.section sxnmb
-.section sxnna
-.section sxnnb
-.section sxnoa
-.section sxnob
-.section sxnpa
-.section sxnpb
-.section sxnqa
-.section sxnqb
-.section sxnra
-.section sxnrb
-.section sxnsa
-.section sxnsb
-.section sxnta
-.section sxntb
-.section sxnua
-.section sxnub
-.section sxnva
-.section sxnvb
-.section sxnwa
-.section sxnwb
-.section sxnxa
-.section sxnxb
-.section sxnya
-.section sxnyb
-.section sxnza
-.section sxnzb
-.section sxn1a
-.section sxn1b
-.section sxn2a
-.section sxn2b
-.section sxn3a
-.section sxn3b
-.section sxn4a
-.section sxn4b
-.section sxn5a
-.section sxn5b
-.section sxn6a
-.section sxn6b
-.section sxn7a
-.section sxn7b
-.section sxn8a
-.section sxn8b
-.section sxn9a
-.section sxn9b
-.section sxn0a
-.section sxn0b
-.section sxoaa
-.section sxoab
-.section sxoba
-.section sxobb
-.section sxoca
-.section sxocb
-.section sxoda
-.section sxodb
-.section sxoea
-.section sxoeb
-.section sxofa
-.section sxofb
-.section sxoga
-.section sxogb
-.section sxoha
-.section sxohb
-.section sxoia
-.section sxoib
-.section sxoja
-.section sxojb
-.section sxoka
-.section sxokb
-.section sxola
-.section sxolb
-.section sxoma
-.section sxomb
-.section sxona
-.section sxonb
-.section sxooa
-.section sxoob
-.section sxopa
-.section sxopb
-.section sxoqa
-.section sxoqb
-.section sxora
-.section sxorb
-.section sxosa
-.section sxosb
-.section sxota
-.section sxotb
-.section sxoua
-.section sxoub
-.section sxova
-.section sxovb
-.section sxowa
-.section sxowb
-.section sxoxa
-.section sxoxb
-.section sxoya
-.section sxoyb
-.section sxoza
-.section sxozb
-.section sxo1a
-.section sxo1b
-.section sxo2a
-.section sxo2b
-.section sxo3a
-.section sxo3b
-.section sxo4a
-.section sxo4b
-.section sxo5a
-.section sxo5b
-.section sxo6a
-.section sxo6b
-.section sxo7a
-.section sxo7b
-.section sxo8a
-.section sxo8b
-.section sxo9a
-.section sxo9b
-.section sxo0a
-.section sxo0b
-.section sxpaa
-.section sxpab
-.section sxpba
-.section sxpbb
-.section sxpca
-.section sxpcb
-.section sxpda
-.section sxpdb
-.section sxpea
-.section sxpeb
-.section sxpfa
-.section sxpfb
-.section sxpga
-.section sxpgb
-.section sxpha
-.section sxphb
-.section sxpia
-.section sxpib
-.section sxpja
-.section sxpjb
-.section sxpka
-.section sxpkb
-.section sxpla
-.section sxplb
-.section sxpma
-.section sxpmb
-.section sxpna
-.section sxpnb
-.section sxpoa
-.section sxpob
-.section sxppa
-.section sxppb
-.section sxpqa
-.section sxpqb
-.section sxpra
-.section sxprb
-.section sxpsa
-.section sxpsb
-.section sxpta
-.section sxptb
-.section sxpua
-.section sxpub
-.section sxpva
-.section sxpvb
-.section sxpwa
-.section sxpwb
-.section sxpxa
-.section sxpxb
-.section sxpya
-.section sxpyb
-.section sxpza
-.section sxpzb
-.section sxp1a
-.section sxp1b
-.section sxp2a
-.section sxp2b
-.section sxp3a
-.section sxp3b
-.section sxp4a
-.section sxp4b
-.section sxp5a
-.section sxp5b
-.section sxp6a
-.section sxp6b
-.section sxp7a
-.section sxp7b
-.section sxp8a
-.section sxp8b
-.section sxp9a
-.section sxp9b
-.section sxp0a
-.section sxp0b
-.section sxqaa
-.section sxqab
-.section sxqba
-.section sxqbb
-.section sxqca
-.section sxqcb
-.section sxqda
-.section sxqdb
-.section sxqea
-.section sxqeb
-.section sxqfa
-.section sxqfb
-.section sxqga
-.section sxqgb
-.section sxqha
-.section sxqhb
-.section sxqia
-.section sxqib
-.section sxqja
-.section sxqjb
-.section sxqka
-.section sxqkb
-.section sxqla
-.section sxqlb
-.section sxqma
-.section sxqmb
-.section sxqna
-.section sxqnb
-.section sxqoa
-.section sxqob
-.section sxqpa
-.section sxqpb
-.section sxqqa
-.section sxqqb
-.section sxqra
-.section sxqrb
-.section sxqsa
-.section sxqsb
-.section sxqta
-.section sxqtb
-.section sxqua
-.section sxqub
-.section sxqva
-.section sxqvb
-.section sxqwa
-.section sxqwb
-.section sxqxa
-.section sxqxb
-.section sxqya
-.section sxqyb
-.section sxqza
-.section sxqzb
-.section sxq1a
-.section sxq1b
-.section sxq2a
-.section sxq2b
-.section sxq3a
-.section sxq3b
-.section sxq4a
-.section sxq4b
-.section sxq5a
-.section sxq5b
-.section sxq6a
-.section sxq6b
-.section sxq7a
-.section sxq7b
-.section sxq8a
-.section sxq8b
-.section sxq9a
-.section sxq9b
-.section sxq0a
-.section sxq0b
-.section sxraa
-.section sxrab
-.section sxrba
-.section sxrbb
-.section sxrca
-.section sxrcb
-.section sxrda
-.section sxrdb
-.section sxrea
-.section sxreb
-.section sxrfa
-.section sxrfb
-.section sxrga
-.section sxrgb
-.section sxrha
-.section sxrhb
-.section sxria
-.section sxrib
-.section sxrja
-.section sxrjb
-.section sxrka
-.section sxrkb
-.section sxrla
-.section sxrlb
-.section sxrma
-.section sxrmb
-.section sxrna
-.section sxrnb
-.section sxroa
-.section sxrob
-.section sxrpa
-.section sxrpb
-.section sxrqa
-.section sxrqb
-.section sxrra
-.section sxrrb
-.section sxrsa
-.section sxrsb
-.section sxrta
-.section sxrtb
-.section sxrua
-.section sxrub
-.section sxrva
-.section sxrvb
-.section sxrwa
-.section sxrwb
-.section sxrxa
-.section sxrxb
-.section sxrya
-.section sxryb
-.section sxrza
-.section sxrzb
-.section sxr1a
-.section sxr1b
-.section sxr2a
-.section sxr2b
-.section sxr3a
-.section sxr3b
-.section sxr4a
-.section sxr4b
-.section sxr5a
-.section sxr5b
-.section sxr6a
-.section sxr6b
-.section sxr7a
-.section sxr7b
-.section sxr8a
-.section sxr8b
-.section sxr9a
-.section sxr9b
-.section sxr0a
-.section sxr0b
-.section sxsaa
-.section sxsab
-.section sxsba
-.section sxsbb
-.section sxsca
-.section sxscb
-.section sxsda
-.section sxsdb
-.section sxsea
-.section sxseb
-.section sxsfa
-.section sxsfb
-.section sxsga
-.section sxsgb
-.section sxsha
-.section sxshb
-.section sxsia
-.section sxsib
-.section sxsja
-.section sxsjb
-.section sxska
-.section sxskb
-.section sxsla
-.section sxslb
-.section sxsma
-.section sxsmb
-.section sxsna
-.section sxsnb
-.section sxsoa
-.section sxsob
-.section sxspa
-.section sxspb
-.section sxsqa
-.section sxsqb
-.section sxsra
-.section sxsrb
-.section sxssa
-.section sxssb
-.section sxsta
-.section sxstb
-.section sxsua
-.section sxsub
-.section sxsva
-.section sxsvb
-.section sxswa
-.section sxswb
-.section sxsxa
-.section sxsxb
-.section sxsya
-.section sxsyb
-.section sxsza
-.section sxszb
-.section sxs1a
-.section sxs1b
-.section sxs2a
-.section sxs2b
-.section sxs3a
-.section sxs3b
-.section sxs4a
-.section sxs4b
-.section sxs5a
-.section sxs5b
-.section sxs6a
-.section sxs6b
-.section sxs7a
-.section sxs7b
-.section sxs8a
-.section sxs8b
-.section sxs9a
-.section sxs9b
-.section sxs0a
-.section sxs0b
-.section sxtaa
-.section sxtab
-.section sxtba
-.section sxtbb
-.section sxtca
-.section sxtcb
-.section sxtda
-.section sxtdb
-.section sxtea
-.section sxteb
-.section sxtfa
-.section sxtfb
-.section sxtga
-.section sxtgb
-.section sxtha
-.section sxthb
-.section sxtia
-.section sxtib
-.section sxtja
-.section sxtjb
-.section sxtka
-.section sxtkb
-.section sxtla
-.section sxtlb
-.section sxtma
-.section sxtmb
-.section sxtna
-.section sxtnb
-.section sxtoa
-.section sxtob
-.section sxtpa
-.section sxtpb
-.section sxtqa
-.section sxtqb
-.section sxtra
-.section sxtrb
-.section sxtsa
-.section sxtsb
-.section sxtta
-.section sxttb
-.section sxtua
-.section sxtub
-.section sxtva
-.section sxtvb
-.section sxtwa
-.section sxtwb
-.section sxtxa
-.section sxtxb
-.section sxtya
-.section sxtyb
-.section sxtza
-.section sxtzb
-.section sxt1a
-.section sxt1b
-.section sxt2a
-.section sxt2b
-.section sxt3a
-.section sxt3b
-.section sxt4a
-.section sxt4b
-.section sxt5a
-.section sxt5b
-.section sxt6a
-.section sxt6b
-.section sxt7a
-.section sxt7b
-.section sxt8a
-.section sxt8b
-.section sxt9a
-.section sxt9b
-.section sxt0a
-.section sxt0b
-.section sxuaa
-.section sxuab
-.section sxuba
-.section sxubb
-.section sxuca
-.section sxucb
-.section sxuda
-.section sxudb
-.section sxuea
-.section sxueb
-.section sxufa
-.section sxufb
-.section sxuga
-.section sxugb
-.section sxuha
-.section sxuhb
-.section sxuia
-.section sxuib
-.section sxuja
-.section sxujb
-.section sxuka
-.section sxukb
-.section sxula
-.section sxulb
-.section sxuma
-.section sxumb
-.section sxuna
-.section sxunb
-.section sxuoa
-.section sxuob
-.section sxupa
-.section sxupb
-.section sxuqa
-.section sxuqb
-.section sxura
-.section sxurb
-.section sxusa
-.section sxusb
-.section sxuta
-.section sxutb
-.section sxuua
-.section sxuub
-.section sxuva
-.section sxuvb
-.section sxuwa
-.section sxuwb
-.section sxuxa
-.section sxuxb
-.section sxuya
-.section sxuyb
-.section sxuza
-.section sxuzb
-.section sxu1a
-.section sxu1b
-.section sxu2a
-.section sxu2b
-.section sxu3a
-.section sxu3b
-.section sxu4a
-.section sxu4b
-.section sxu5a
-.section sxu5b
-.section sxu6a
-.section sxu6b
-.section sxu7a
-.section sxu7b
-.section sxu8a
-.section sxu8b
-.section sxu9a
-.section sxu9b
-.section sxu0a
-.section sxu0b
-.section sxvaa
-.section sxvab
-.section sxvba
-.section sxvbb
-.section sxvca
-.section sxvcb
-.section sxvda
-.section sxvdb
-.section sxvea
-.section sxveb
-.section sxvfa
-.section sxvfb
-.section sxvga
-.section sxvgb
-.section sxvha
-.section sxvhb
-.section sxvia
-.section sxvib
-.section sxvja
-.section sxvjb
-.section sxvka
-.section sxvkb
-.section sxvla
-.section sxvlb
-.section sxvma
-.section sxvmb
-.section sxvna
-.section sxvnb
-.section sxvoa
-.section sxvob
-.section sxvpa
-.section sxvpb
-.section sxvqa
-.section sxvqb
-.section sxvra
-.section sxvrb
-.section sxvsa
-.section sxvsb
-.section sxvta
-.section sxvtb
-.section sxvua
-.section sxvub
-.section sxvva
-.section sxvvb
-.section sxvwa
-.section sxvwb
-.section sxvxa
-.section sxvxb
-.section sxvya
-.section sxvyb
-.section sxvza
-.section sxvzb
-.section sxv1a
-.section sxv1b
-.section sxv2a
-.section sxv2b
-.section sxv3a
-.section sxv3b
-.section sxv4a
-.section sxv4b
-.section sxv5a
-.section sxv5b
-.section sxv6a
-.section sxv6b
-.section sxv7a
-.section sxv7b
-.section sxv8a
-.section sxv8b
-.section sxv9a
-.section sxv9b
-.section sxv0a
-.section sxv0b
-.section sxwaa
-.section sxwab
-.section sxwba
-.section sxwbb
-.section sxwca
-.section sxwcb
-.section sxwda
-.section sxwdb
-.section sxwea
-.section sxweb
-.section sxwfa
-.section sxwfb
-.section sxwga
-.section sxwgb
-.section sxwha
-.section sxwhb
-.section sxwia
-.section sxwib
-.section sxwja
-.section sxwjb
-.section sxwka
-.section sxwkb
-.section sxwla
-.section sxwlb
-.section sxwma
-.section sxwmb
-.section sxwna
-.section sxwnb
-.section sxwoa
-.section sxwob
-.section sxwpa
-.section sxwpb
-.section sxwqa
-.section sxwqb
-.section sxwra
-.section sxwrb
-.section sxwsa
-.section sxwsb
-.section sxwta
-.section sxwtb
-.section sxwua
-.section sxwub
-.section sxwva
-.section sxwvb
-.section sxwwa
-.section sxwwb
-.section sxwxa
-.section sxwxb
-.section sxwya
-.section sxwyb
-.section sxwza
-.section sxwzb
-.section sxw1a
-.section sxw1b
-.section sxw2a
-.section sxw2b
-.section sxw3a
-.section sxw3b
-.section sxw4a
-.section sxw4b
-.section sxw5a
-.section sxw5b
-.section sxw6a
-.section sxw6b
-.section sxw7a
-.section sxw7b
-.section sxw8a
-.section sxw8b
-.section sxw9a
-.section sxw9b
-.section sxw0a
-.section sxw0b
-.section sxxaa
-.section sxxab
-.section sxxba
-.section sxxbb
-.section sxxca
-.section sxxcb
-.section sxxda
-.section sxxdb
-.section sxxea
-.section sxxeb
-.section sxxfa
-.section sxxfb
-.section sxxga
-.section sxxgb
-.section sxxha
-.section sxxhb
-.section sxxia
-.section sxxib
-.section sxxja
-.section sxxjb
-.section sxxka
-.section sxxkb
-.section sxxla
-.section sxxlb
-.section sxxma
-.section sxxmb
-.section sxxna
-.section sxxnb
-.section sxxoa
-.section sxxob
-.section sxxpa
-.section sxxpb
-.section sxxqa
-.section sxxqb
-.section sxxra
-.section sxxrb
-.section sxxsa
-.section sxxsb
-.section sxxta
-.section sxxtb
-.section sxxua
-.section sxxub
-.section sxxva
-.section sxxvb
-.section sxxwa
-.section sxxwb
-.section sxxxa
-.section sxxxb
-.section sxxya
-.section sxxyb
-.section sxxza
-.section sxxzb
-.section sxx1a
-.section sxx1b
-.section sxx2a
-.section sxx2b
-.section sxx3a
-.section sxx3b
-.section sxx4a
-.section sxx4b
-.section sxx5a
-.section sxx5b
-.section sxx6a
-.section sxx6b
-.section sxx7a
-.section sxx7b
-.section sxx8a
-.section sxx8b
-.section sxx9a
-.section sxx9b
-.section sxx0a
-.section sxx0b
-.section sxyaa
-.section sxyab
-.section sxyba
-.section sxybb
-.section sxyca
-.section sxycb
-.section sxyda
-.section sxydb
-.section sxyea
-.section sxyeb
-.section sxyfa
-.section sxyfb
-.section sxyga
-.section sxygb
-.section sxyha
-.section sxyhb
-.section sxyia
-.section sxyib
-.section sxyja
-.section sxyjb
-.section sxyka
-.section sxykb
-.section sxyla
-.section sxylb
-.section sxyma
-.section sxymb
-.section sxyna
-.section sxynb
-.section sxyoa
-.section sxyob
-.section sxypa
-.section sxypb
-.section sxyqa
-.section sxyqb
-.section sxyra
-.section sxyrb
-.section sxysa
-.section sxysb
-.section sxyta
-.section sxytb
-.section sxyua
-.section sxyub
-.section sxyva
-.section sxyvb
-.section sxywa
-.section sxywb
-.section sxyxa
-.section sxyxb
-.section sxyya
-.section sxyyb
-.section sxyza
-.section sxyzb
-.section sxy1a
-.section sxy1b
-.section sxy2a
-.section sxy2b
-.section sxy3a
-.section sxy3b
-.section sxy4a
-.section sxy4b
-.section sxy5a
-.section sxy5b
-.section sxy6a
-.section sxy6b
-.section sxy7a
-.section sxy7b
-.section sxy8a
-.section sxy8b
-.section sxy9a
-.section sxy9b
-.section sxy0a
-.section sxy0b
-.section sxzaa
-.section sxzab
-.section sxzba
-.section sxzbb
-.section sxzca
-.section sxzcb
-.section sxzda
-.section sxzdb
-.section sxzea
-.section sxzeb
-.section sxzfa
-.section sxzfb
-.section sxzga
-.section sxzgb
-.section sxzha
-.section sxzhb
-.section sxzia
-.section sxzib
-.section sxzja
-.section sxzjb
-.section sxzka
-.section sxzkb
-.section sxzla
-.section sxzlb
-.section sxzma
-.section sxzmb
-.section sxzna
-.section sxznb
-.section sxzoa
-.section sxzob
-.section sxzpa
-.section sxzpb
-.section sxzqa
-.section sxzqb
-.section sxzra
-.section sxzrb
-.section sxzsa
-.section sxzsb
-.section sxzta
-.section sxztb
-.section sxzua
-.section sxzub
-.section sxzva
-.section sxzvb
-.section sxzwa
-.section sxzwb
-.section sxzxa
-.section sxzxb
-.section sxzya
-.section sxzyb
-.section sxzza
-.section sxzzb
-.section sxz1a
-.section sxz1b
-.section sxz2a
-.section sxz2b
-.section sxz3a
-.section sxz3b
-.section sxz4a
-.section sxz4b
-.section sxz5a
-.section sxz5b
-.section sxz6a
-.section sxz6b
-.section sxz7a
-.section sxz7b
-.section sxz8a
-.section sxz8b
-.section sxz9a
-.section sxz9b
-.section sxz0a
-.section sxz0b
-.section sx1aa
-.section sx1ab
-.section sx1ba
-.section sx1bb
-.section sx1ca
-.section sx1cb
-.section sx1da
-.section sx1db
-.section sx1ea
-.section sx1eb
-.section sx1fa
-.section sx1fb
-.section sx1ga
-.section sx1gb
-.section sx1ha
-.section sx1hb
-.section sx1ia
-.section sx1ib
-.section sx1ja
-.section sx1jb
-.section sx1ka
-.section sx1kb
-.section sx1la
-.section sx1lb
-.section sx1ma
-.section sx1mb
-.section sx1na
-.section sx1nb
-.section sx1oa
-.section sx1ob
-.section sx1pa
-.section sx1pb
-.section sx1qa
-.section sx1qb
-.section sx1ra
-.section sx1rb
-.section sx1sa
-.section sx1sb
-.section sx1ta
-.section sx1tb
-.section sx1ua
-.section sx1ub
-.section sx1va
-.section sx1vb
-.section sx1wa
-.section sx1wb
-.section sx1xa
-.section sx1xb
-.section sx1ya
-.section sx1yb
-.section sx1za
-.section sx1zb
-.section sx11a
-.section sx11b
-.section sx12a
-.section sx12b
-.section sx13a
-.section sx13b
-.section sx14a
-.section sx14b
-.section sx15a
-.section sx15b
-.section sx16a
-.section sx16b
-.section sx17a
-.section sx17b
-.section sx18a
-.section sx18b
-.section sx19a
-.section sx19b
-.section sx10a
-.section sx10b
-.section sx2aa
-.section sx2ab
-.section sx2ba
-.section sx2bb
-.section sx2ca
-.section sx2cb
-.section sx2da
-.section sx2db
-.section sx2ea
-.section sx2eb
-.section sx2fa
-.section sx2fb
-.section sx2ga
-.section sx2gb
-.section sx2ha
-.section sx2hb
-.section sx2ia
-.section sx2ib
-.section sx2ja
-.section sx2jb
-.section sx2ka
-.section sx2kb
-.section sx2la
-.section sx2lb
-.section sx2ma
-.section sx2mb
-.section sx2na
-.section sx2nb
-.section sx2oa
-.section sx2ob
-.section sx2pa
-.section sx2pb
-.section sx2qa
-.section sx2qb
-.section sx2ra
-.section sx2rb
-.section sx2sa
-.section sx2sb
-.section sx2ta
-.section sx2tb
-.section sx2ua
-.section sx2ub
-.section sx2va
-.section sx2vb
-.section sx2wa
-.section sx2wb
-.section sx2xa
-.section sx2xb
-.section sx2ya
-.section sx2yb
-.section sx2za
-.section sx2zb
-.section sx21a
-.section sx21b
-.section sx22a
-.section sx22b
-.section sx23a
-.section sx23b
-.section sx24a
-.section sx24b
-.section sx25a
-.section sx25b
-.section sx26a
-.section sx26b
-.section sx27a
-.section sx27b
-.section sx28a
-.section sx28b
-.section sx29a
-.section sx29b
-.section sx20a
-.section sx20b
-.section sx3aa
-.section sx3ab
-.section sx3ba
-.section sx3bb
-.section sx3ca
-.section sx3cb
-.section sx3da
-.section sx3db
-.section sx3ea
-.section sx3eb
-.section sx3fa
-.section sx3fb
-.section sx3ga
-.section sx3gb
-.section sx3ha
-.section sx3hb
-.section sx3ia
-.section sx3ib
-.section sx3ja
-.section sx3jb
-.section sx3ka
-.section sx3kb
-.section sx3la
-.section sx3lb
-.section sx3ma
-.section sx3mb
-.section sx3na
-.section sx3nb
-.section sx3oa
-.section sx3ob
-.section sx3pa
-.section sx3pb
-.section sx3qa
-.section sx3qb
-.section sx3ra
-.section sx3rb
-.section sx3sa
-.section sx3sb
-.section sx3ta
-.section sx3tb
-.section sx3ua
-.section sx3ub
-.section sx3va
-.section sx3vb
-.section sx3wa
-.section sx3wb
-.section sx3xa
-.section sx3xb
-.section sx3ya
-.section sx3yb
-.section sx3za
-.section sx3zb
-.section sx31a
-.section sx31b
-.section sx32a
-.section sx32b
-.section sx33a
-.section sx33b
-.section sx34a
-.section sx34b
-.section sx35a
-.section sx35b
-.section sx36a
-.section sx36b
-.section sx37a
-.section sx37b
-.section sx38a
-.section sx38b
-.section sx39a
-.section sx39b
-.section sx30a
-.section sx30b
-.section sx4aa
-.section sx4ab
-.section sx4ba
-.section sx4bb
-.section sx4ca
-.section sx4cb
-.section sx4da
-.section sx4db
-.section sx4ea
-.section sx4eb
-.section sx4fa
-.section sx4fb
-.section sx4ga
-.section sx4gb
-.section sx4ha
-.section sx4hb
-.section sx4ia
-.section sx4ib
-.section sx4ja
-.section sx4jb
-.section sx4ka
-.section sx4kb
-.section sx4la
-.section sx4lb
-.section sx4ma
-.section sx4mb
-.section sx4na
-.section sx4nb
-.section sx4oa
-.section sx4ob
-.section sx4pa
-.section sx4pb
-.section sx4qa
-.section sx4qb
-.section sx4ra
-.section sx4rb
-.section sx4sa
-.section sx4sb
-.section sx4ta
-.section sx4tb
-.section sx4ua
-.section sx4ub
-.section sx4va
-.section sx4vb
-.section sx4wa
-.section sx4wb
-.section sx4xa
-.section sx4xb
-.section sx4ya
-.section sx4yb
-.section sx4za
-.section sx4zb
-.section sx41a
-.section sx41b
-.section sx42a
-.section sx42b
-.section sx43a
-.section sx43b
-.section sx44a
-.section sx44b
-.section sx45a
-.section sx45b
-.section sx46a
-.section sx46b
-.section sx47a
-.section sx47b
-.section sx48a
-.section sx48b
-.section sx49a
-.section sx49b
-.section sx40a
-.section sx40b
-.section sx5aa
-.section sx5ab
-.section sx5ba
-.section sx5bb
-.section sx5ca
-.section sx5cb
-.section sx5da
-.section sx5db
-.section sx5ea
-.section sx5eb
-.section sx5fa
-.section sx5fb
-.section sx5ga
-.section sx5gb
-.section sx5ha
-.section sx5hb
-.section sx5ia
-.section sx5ib
-.section sx5ja
-.section sx5jb
-.section sx5ka
-.section sx5kb
-.section sx5la
-.section sx5lb
-.section sx5ma
-.section sx5mb
-.section sx5na
-.section sx5nb
-.section sx5oa
-.section sx5ob
-.section sx5pa
-.section sx5pb
-.section sx5qa
-.section sx5qb
-.section sx5ra
-.section sx5rb
-.section sx5sa
-.section sx5sb
-.section sx5ta
-.section sx5tb
-.section sx5ua
-.section sx5ub
-.section sx5va
-.section sx5vb
-.section sx5wa
-.section sx5wb
-.section sx5xa
-.section sx5xb
-.section sx5ya
-.section sx5yb
-.section sx5za
-.section sx5zb
-.section sx51a
-.section sx51b
-.section sx52a
-.section sx52b
-.section sx53a
-.section sx53b
-.section sx54a
-.section sx54b
-.section sx55a
-.section sx55b
-.section sx56a
-.section sx56b
-.section sx57a
-.section sx57b
-.section sx58a
-.section sx58b
-.section sx59a
-.section sx59b
-.section sx50a
-.section sx50b
-.section sx6aa
-.section sx6ab
-.section sx6ba
-.section sx6bb
-.section sx6ca
-.section sx6cb
-.section sx6da
-.section sx6db
-.section sx6ea
-.section sx6eb
-.section sx6fa
-.section sx6fb
-.section sx6ga
-.section sx6gb
-.section sx6ha
-.section sx6hb
-.section sx6ia
-.section sx6ib
-.section sx6ja
-.section sx6jb
-.section sx6ka
-.section sx6kb
-.section sx6la
-.section sx6lb
-.section sx6ma
-.section sx6mb
-.section sx6na
-.section sx6nb
-.section sx6oa
-.section sx6ob
-.section sx6pa
-.section sx6pb
-.section sx6qa
-.section sx6qb
-.section sx6ra
-.section sx6rb
-.section sx6sa
-.section sx6sb
-.section sx6ta
-.section sx6tb
-.section sx6ua
-.section sx6ub
-.section sx6va
-.section sx6vb
-.section sx6wa
-.section sx6wb
-.section sx6xa
-.section sx6xb
-.section sx6ya
-.section sx6yb
-.section sx6za
-.section sx6zb
-.section sx61a
-.section sx61b
-.section sx62a
-.section sx62b
-.section sx63a
-.section sx63b
-.section sx64a
-.section sx64b
-.section sx65a
-.section sx65b
-.section sx66a
-.section sx66b
-.section sx67a
-.section sx67b
-.section sx68a
-.section sx68b
-.section sx69a
-.section sx69b
-.section sx60a
-.section sx60b
-.section sx7aa
-.section sx7ab
-.section sx7ba
-.section sx7bb
-.section sx7ca
-.section sx7cb
-.section sx7da
-.section sx7db
-.section sx7ea
-.section sx7eb
-.section sx7fa
-.section sx7fb
-.section sx7ga
-.section sx7gb
-.section sx7ha
-.section sx7hb
-.section sx7ia
-.section sx7ib
-.section sx7ja
-.section sx7jb
-.section sx7ka
-.section sx7kb
-.section sx7la
-.section sx7lb
-.section sx7ma
-.section sx7mb
-.section sx7na
-.section sx7nb
-.section sx7oa
-.section sx7ob
-.section sx7pa
-.section sx7pb
-.section sx7qa
-.section sx7qb
-.section sx7ra
-.section sx7rb
-.section sx7sa
-.section sx7sb
-.section sx7ta
-.section sx7tb
-.section sx7ua
-.section sx7ub
-.section sx7va
-.section sx7vb
-.section sx7wa
-.section sx7wb
-.section sx7xa
-.section sx7xb
-.section sx7ya
-.section sx7yb
-.section sx7za
-.section sx7zb
-.section sx71a
-.section sx71b
-.section sx72a
-.section sx72b
-.section sx73a
-.section sx73b
-.section sx74a
-.section sx74b
-.section sx75a
-.section sx75b
-.section sx76a
-.section sx76b
-.section sx77a
-.section sx77b
-.section sx78a
-.section sx78b
-.section sx79a
-.section sx79b
-.section sx70a
-.section sx70b
-.section sx8aa
-.section sx8ab
-.section sx8ba
-.section sx8bb
-.section sx8ca
-.section sx8cb
-.section sx8da
-.section sx8db
-.section sx8ea
-.section sx8eb
-.section sx8fa
-.section sx8fb
-.section sx8ga
-.section sx8gb
-.section sx8ha
-.section sx8hb
-.section sx8ia
-.section sx8ib
-.section sx8ja
-.section sx8jb
-.section sx8ka
-.section sx8kb
-.section sx8la
-.section sx8lb
-.section sx8ma
-.section sx8mb
-.section sx8na
-.section sx8nb
-.section sx8oa
-.section sx8ob
-.section sx8pa
-.section sx8pb
-.section sx8qa
-.section sx8qb
-.section sx8ra
-.section sx8rb
-.section sx8sa
-.section sx8sb
-.section sx8ta
-.section sx8tb
-.section sx8ua
-.section sx8ub
-.section sx8va
-.section sx8vb
-.section sx8wa
-.section sx8wb
-.section sx8xa
-.section sx8xb
-.section sx8ya
-.section sx8yb
-.section sx8za
-.section sx8zb
-.section sx81a
-.section sx81b
-.section sx82a
-.section sx82b
-.section sx83a
-.section sx83b
-.section sx84a
-.section sx84b
-.section sx85a
-.section sx85b
-.section sx86a
-.section sx86b
-.section sx87a
-.section sx87b
-.section sx88a
-.section sx88b
-.section sx89a
-.section sx89b
-.section sx80a
-.section sx80b
-.section sx9aa
-.section sx9ab
-.section sx9ba
-.section sx9bb
-.section sx9ca
-.section sx9cb
-.section sx9da
-.section sx9db
-.section sx9ea
-.section sx9eb
-.section sx9fa
-.section sx9fb
-.section sx9ga
-.section sx9gb
-.section sx9ha
-.section sx9hb
-.section sx9ia
-.section sx9ib
-.section sx9ja
-.section sx9jb
-.section sx9ka
-.section sx9kb
-.section sx9la
-.section sx9lb
-.section sx9ma
-.section sx9mb
-.section sx9na
-.section sx9nb
-.section sx9oa
-.section sx9ob
-.section sx9pa
-.section sx9pb
-.section sx9qa
-.section sx9qb
-.section sx9ra
-.section sx9rb
-.section sx9sa
-.section sx9sb
-.section sx9ta
-.section sx9tb
-.section sx9ua
-.section sx9ub
-.section sx9va
-.section sx9vb
-.section sx9wa
-.section sx9wb
-.section sx9xa
-.section sx9xb
-.section sx9ya
-.section sx9yb
-.section sx9za
-.section sx9zb
-.section sx91a
-.section sx91b
-.section sx92a
-.section sx92b
-.section sx93a
-.section sx93b
-.section sx94a
-.section sx94b
-.section sx95a
-.section sx95b
-.section sx96a
-.section sx96b
-.section sx97a
-.section sx97b
-.section sx98a
-.section sx98b
-.section sx99a
-.section sx99b
-.section sx90a
-.section sx90b
-.section sx0aa
-.section sx0ab
-.section sx0ba
-.section sx0bb
-.section sx0ca
-.section sx0cb
-.section sx0da
-.section sx0db
-.section sx0ea
-.section sx0eb
-.section sx0fa
-.section sx0fb
-.section sx0ga
-.section sx0gb
-.section sx0ha
-.section sx0hb
-.section sx0ia
-.section sx0ib
-.section sx0ja
-.section sx0jb
-.section sx0ka
-.section sx0kb
-.section sx0la
-.section sx0lb
-.section sx0ma
-.section sx0mb
-.section sx0na
-.section sx0nb
-.section sx0oa
-.section sx0ob
-.section sx0pa
-.section sx0pb
-.section sx0qa
-.section sx0qb
-.section sx0ra
-.section sx0rb
-.section sx0sa
-.section sx0sb
-.section sx0ta
-.section sx0tb
-.section sx0ua
-.section sx0ub
-.section sx0va
-.section sx0vb
-.section sx0wa
-.section sx0wb
-.section sx0xa
-.section sx0xb
-.section sx0ya
-.section sx0yb
-.section sx0za
-.section sx0zb
-.section sx01a
-.section sx01b
-.section sx02a
-.section sx02b
-.section sx03a
-.section sx03b
-.section sx04a
-.section sx04b
-.section sx05a
-.section sx05b
-.section sx06a
-.section sx06b
-.section sx07a
-.section sx07b
-.section sx08a
-.section sx08b
-.section sx09a
-.section sx09b
-.section sx00a
-.section sx00b
-.section syaaa
-.section syaab
-.section syaba
-.section syabb
-.section syaca
-.section syacb
-.section syada
-.section syadb
-.section syaea
-.section syaeb
-.section syafa
-.section syafb
-.section syaga
-.section syagb
-.section syaha
-.section syahb
-.section syaia
-.section syaib
-.section syaja
-.section syajb
-.section syaka
-.section syakb
-.section syala
-.section syalb
-.section syama
-.section syamb
-.section syana
-.section syanb
-.section syaoa
-.section syaob
-.section syapa
-.section syapb
-.section syaqa
-.section syaqb
-.section syara
-.section syarb
-.section syasa
-.section syasb
-.section syata
-.section syatb
-.section syaua
-.section syaub
-.section syava
-.section syavb
-.section syawa
-.section syawb
-.section syaxa
-.section syaxb
-.section syaya
-.section syayb
-.section syaza
-.section syazb
-.section sya1a
-.section sya1b
-.section sya2a
-.section sya2b
-.section sya3a
-.section sya3b
-.section sya4a
-.section sya4b
-.section sya5a
-.section sya5b
-.section sya6a
-.section sya6b
-.section sya7a
-.section sya7b
-.section sya8a
-.section sya8b
-.section sya9a
-.section sya9b
-.section sya0a
-.section sya0b
-.section sybaa
-.section sybab
-.section sybba
-.section sybbb
-.section sybca
-.section sybcb
-.section sybda
-.section sybdb
-.section sybea
-.section sybeb
-.section sybfa
-.section sybfb
-.section sybga
-.section sybgb
-.section sybha
-.section sybhb
-.section sybia
-.section sybib
-.section sybja
-.section sybjb
-.section sybka
-.section sybkb
-.section sybla
-.section syblb
-.section sybma
-.section sybmb
-.section sybna
-.section sybnb
-.section syboa
-.section sybob
-.section sybpa
-.section sybpb
-.section sybqa
-.section sybqb
-.section sybra
-.section sybrb
-.section sybsa
-.section sybsb
-.section sybta
-.section sybtb
-.section sybua
-.section sybub
-.section sybva
-.section sybvb
-.section sybwa
-.section sybwb
-.section sybxa
-.section sybxb
-.section sybya
-.section sybyb
-.section sybza
-.section sybzb
-.section syb1a
-.section syb1b
-.section syb2a
-.section syb2b
-.section syb3a
-.section syb3b
-.section syb4a
-.section syb4b
-.section syb5a
-.section syb5b
-.section syb6a
-.section syb6b
-.section syb7a
-.section syb7b
-.section syb8a
-.section syb8b
-.section syb9a
-.section syb9b
-.section syb0a
-.section syb0b
-.section sycaa
-.section sycab
-.section sycba
-.section sycbb
-.section sycca
-.section syccb
-.section sycda
-.section sycdb
-.section sycea
-.section syceb
-.section sycfa
-.section sycfb
-.section sycga
-.section sycgb
-.section sycha
-.section sychb
-.section sycia
-.section sycib
-.section sycja
-.section sycjb
-.section sycka
-.section syckb
-.section sycla
-.section syclb
-.section sycma
-.section sycmb
-.section sycna
-.section sycnb
-.section sycoa
-.section sycob
-.section sycpa
-.section sycpb
-.section sycqa
-.section sycqb
-.section sycra
-.section sycrb
-.section sycsa
-.section sycsb
-.section sycta
-.section syctb
-.section sycua
-.section sycub
-.section sycva
-.section sycvb
-.section sycwa
-.section sycwb
-.section sycxa
-.section sycxb
-.section sycya
-.section sycyb
-.section sycza
-.section syczb
-.section syc1a
-.section syc1b
-.section syc2a
-.section syc2b
-.section syc3a
-.section syc3b
-.section syc4a
-.section syc4b
-.section syc5a
-.section syc5b
-.section syc6a
-.section syc6b
-.section syc7a
-.section syc7b
-.section syc8a
-.section syc8b
-.section syc9a
-.section syc9b
-.section syc0a
-.section syc0b
-.section sydaa
-.section sydab
-.section sydba
-.section sydbb
-.section sydca
-.section sydcb
-.section sydda
-.section syddb
-.section sydea
-.section sydeb
-.section sydfa
-.section sydfb
-.section sydga
-.section sydgb
-.section sydha
-.section sydhb
-.section sydia
-.section sydib
-.section sydja
-.section sydjb
-.section sydka
-.section sydkb
-.section sydla
-.section sydlb
-.section sydma
-.section sydmb
-.section sydna
-.section sydnb
-.section sydoa
-.section sydob
-.section sydpa
-.section sydpb
-.section sydqa
-.section sydqb
-.section sydra
-.section sydrb
-.section sydsa
-.section sydsb
-.section sydta
-.section sydtb
-.section sydua
-.section sydub
-.section sydva
-.section sydvb
-.section sydwa
-.section sydwb
-.section sydxa
-.section sydxb
-.section sydya
-.section sydyb
-.section sydza
-.section sydzb
-.section syd1a
-.section syd1b
-.section syd2a
-.section syd2b
-.section syd3a
-.section syd3b
-.section syd4a
-.section syd4b
-.section syd5a
-.section syd5b
-.section syd6a
-.section syd6b
-.section syd7a
-.section syd7b
-.section syd8a
-.section syd8b
-.section syd9a
-.section syd9b
-.section syd0a
-.section syd0b
-.section syeaa
-.section syeab
-.section syeba
-.section syebb
-.section syeca
-.section syecb
-.section syeda
-.section syedb
-.section syeea
-.section syeeb
-.section syefa
-.section syefb
-.section syega
-.section syegb
-.section syeha
-.section syehb
-.section syeia
-.section syeib
-.section syeja
-.section syejb
-.section syeka
-.section syekb
-.section syela
-.section syelb
-.section syema
-.section syemb
-.section syena
-.section syenb
-.section syeoa
-.section syeob
-.section syepa
-.section syepb
-.section syeqa
-.section syeqb
-.section syera
-.section syerb
-.section syesa
-.section syesb
-.section syeta
-.section syetb
-.section syeua
-.section syeub
-.section syeva
-.section syevb
-.section syewa
-.section syewb
-.section syexa
-.section syexb
-.section syeya
-.section syeyb
-.section syeza
-.section syezb
-.section sye1a
-.section sye1b
-.section sye2a
-.section sye2b
-.section sye3a
-.section sye3b
-.section sye4a
-.section sye4b
-.section sye5a
-.section sye5b
-.section sye6a
-.section sye6b
-.section sye7a
-.section sye7b
-.section sye8a
-.section sye8b
-.section sye9a
-.section sye9b
-.section sye0a
-.section sye0b
-.section syfaa
-.section syfab
-.section syfba
-.section syfbb
-.section syfca
-.section syfcb
-.section syfda
-.section syfdb
-.section syfea
-.section syfeb
-.section syffa
-.section syffb
-.section syfga
-.section syfgb
-.section syfha
-.section syfhb
-.section syfia
-.section syfib
-.section syfja
-.section syfjb
-.section syfka
-.section syfkb
-.section syfla
-.section syflb
-.section syfma
-.section syfmb
-.section syfna
-.section syfnb
-.section syfoa
-.section syfob
-.section syfpa
-.section syfpb
-.section syfqa
-.section syfqb
-.section syfra
-.section syfrb
-.section syfsa
-.section syfsb
-.section syfta
-.section syftb
-.section syfua
-.section syfub
-.section syfva
-.section syfvb
-.section syfwa
-.section syfwb
-.section syfxa
-.section syfxb
-.section syfya
-.section syfyb
-.section syfza
-.section syfzb
-.section syf1a
-.section syf1b
-.section syf2a
-.section syf2b
-.section syf3a
-.section syf3b
-.section syf4a
-.section syf4b
-.section syf5a
-.section syf5b
-.section syf6a
-.section syf6b
-.section syf7a
-.section syf7b
-.section syf8a
-.section syf8b
-.section syf9a
-.section syf9b
-.section syf0a
-.section syf0b
-.section sygaa
-.section sygab
-.section sygba
-.section sygbb
-.section sygca
-.section sygcb
-.section sygda
-.section sygdb
-.section sygea
-.section sygeb
-.section sygfa
-.section sygfb
-.section sygga
-.section syggb
-.section sygha
-.section syghb
-.section sygia
-.section sygib
-.section sygja
-.section sygjb
-.section sygka
-.section sygkb
-.section sygla
-.section syglb
-.section sygma
-.section sygmb
-.section sygna
-.section sygnb
-.section sygoa
-.section sygob
-.section sygpa
-.section sygpb
-.section sygqa
-.section sygqb
-.section sygra
-.section sygrb
-.section sygsa
-.section sygsb
-.section sygta
-.section sygtb
-.section sygua
-.section sygub
-.section sygva
-.section sygvb
-.section sygwa
-.section sygwb
-.section sygxa
-.section sygxb
-.section sygya
-.section sygyb
-.section sygza
-.section sygzb
-.section syg1a
-.section syg1b
-.section syg2a
-.section syg2b
-.section syg3a
-.section syg3b
-.section syg4a
-.section syg4b
-.section syg5a
-.section syg5b
-.section syg6a
-.section syg6b
-.section syg7a
-.section syg7b
-.section syg8a
-.section syg8b
-.section syg9a
-.section syg9b
-.section syg0a
-.section syg0b
-.section syhaa
-.section syhab
-.section syhba
-.section syhbb
-.section syhca
-.section syhcb
-.section syhda
-.section syhdb
-.section syhea
-.section syheb
-.section syhfa
-.section syhfb
-.section syhga
-.section syhgb
-.section syhha
-.section syhhb
-.section syhia
-.section syhib
-.section syhja
-.section syhjb
-.section syhka
-.section syhkb
-.section syhla
-.section syhlb
-.section syhma
-.section syhmb
-.section syhna
-.section syhnb
-.section syhoa
-.section syhob
-.section syhpa
-.section syhpb
-.section syhqa
-.section syhqb
-.section syhra
-.section syhrb
-.section syhsa
-.section syhsb
-.section syhta
-.section syhtb
-.section syhua
-.section syhub
-.section syhva
-.section syhvb
-.section syhwa
-.section syhwb
-.section syhxa
-.section syhxb
-.section syhya
-.section syhyb
-.section syhza
-.section syhzb
-.section syh1a
-.section syh1b
-.section syh2a
-.section syh2b
-.section syh3a
-.section syh3b
-.section syh4a
-.section syh4b
-.section syh5a
-.section syh5b
-.section syh6a
-.section syh6b
-.section syh7a
-.section syh7b
-.section syh8a
-.section syh8b
-.section syh9a
-.section syh9b
-.section syh0a
-.section syh0b
-.section syiaa
-.section syiab
-.section syiba
-.section syibb
-.section syica
-.section syicb
-.section syida
-.section syidb
-.section syiea
-.section syieb
-.section syifa
-.section syifb
-.section syiga
-.section syigb
-.section syiha
-.section syihb
-.section syiia
-.section syiib
-.section syija
-.section syijb
-.section syika
-.section syikb
-.section syila
-.section syilb
-.section syima
-.section syimb
-.section syina
-.section syinb
-.section syioa
-.section syiob
-.section syipa
-.section syipb
-.section syiqa
-.section syiqb
-.section syira
-.section syirb
-.section syisa
-.section syisb
-.section syita
-.section syitb
-.section syiua
-.section syiub
-.section syiva
-.section syivb
-.section syiwa
-.section syiwb
-.section syixa
-.section syixb
-.section syiya
-.section syiyb
-.section syiza
-.section syizb
-.section syi1a
-.section syi1b
-.section syi2a
-.section syi2b
-.section syi3a
-.section syi3b
-.section syi4a
-.section syi4b
-.section syi5a
-.section syi5b
-.section syi6a
-.section syi6b
-.section syi7a
-.section syi7b
-.section syi8a
-.section syi8b
-.section syi9a
-.section syi9b
-.section syi0a
-.section syi0b
-.section syjaa
-.section syjab
-.section syjba
-.section syjbb
-.section syjca
-.section syjcb
-.section syjda
-.section syjdb
-.section syjea
-.section syjeb
-.section syjfa
-.section syjfb
-.section syjga
-.section syjgb
-.section syjha
-.section syjhb
-.section syjia
-.section syjib
-.section syjja
-.section syjjb
-.section syjka
-.section syjkb
-.section syjla
-.section syjlb
-.section syjma
-.section syjmb
-.section syjna
-.section syjnb
-.section syjoa
-.section syjob
-.section syjpa
-.section syjpb
-.section syjqa
-.section syjqb
-.section syjra
-.section syjrb
-.section syjsa
-.section syjsb
-.section syjta
-.section syjtb
-.section syjua
-.section syjub
-.section syjva
-.section syjvb
-.section syjwa
-.section syjwb
-.section syjxa
-.section syjxb
-.section syjya
-.section syjyb
-.section syjza
-.section syjzb
-.section syj1a
-.section syj1b
-.section syj2a
-.section syj2b
-.section syj3a
-.section syj3b
-.section syj4a
-.section syj4b
-.section syj5a
-.section syj5b
-.section syj6a
-.section syj6b
-.section syj7a
-.section syj7b
-.section syj8a
-.section syj8b
-.section syj9a
-.section syj9b
-.section syj0a
-.section syj0b
-.section sykaa
-.section sykab
-.section sykba
-.section sykbb
-.section sykca
-.section sykcb
-.section sykda
-.section sykdb
-.section sykea
-.section sykeb
-.section sykfa
-.section sykfb
-.section sykga
-.section sykgb
-.section sykha
-.section sykhb
-.section sykia
-.section sykib
-.section sykja
-.section sykjb
-.section sykka
-.section sykkb
-.section sykla
-.section syklb
-.section sykma
-.section sykmb
-.section sykna
-.section syknb
-.section sykoa
-.section sykob
-.section sykpa
-.section sykpb
-.section sykqa
-.section sykqb
-.section sykra
-.section sykrb
-.section syksa
-.section syksb
-.section sykta
-.section syktb
-.section sykua
-.section sykub
-.section sykva
-.section sykvb
-.section sykwa
-.section sykwb
-.section sykxa
-.section sykxb
-.section sykya
-.section sykyb
-.section sykza
-.section sykzb
-.section syk1a
-.section syk1b
-.section syk2a
-.section syk2b
-.section syk3a
-.section syk3b
-.section syk4a
-.section syk4b
-.section syk5a
-.section syk5b
-.section syk6a
-.section syk6b
-.section syk7a
-.section syk7b
-.section syk8a
-.section syk8b
-.section syk9a
-.section syk9b
-.section syk0a
-.section syk0b
-.section sylaa
-.section sylab
-.section sylba
-.section sylbb
-.section sylca
-.section sylcb
-.section sylda
-.section syldb
-.section sylea
-.section syleb
-.section sylfa
-.section sylfb
-.section sylga
-.section sylgb
-.section sylha
-.section sylhb
-.section sylia
-.section sylib
-.section sylja
-.section syljb
-.section sylka
-.section sylkb
-.section sylla
-.section syllb
-.section sylma
-.section sylmb
-.section sylna
-.section sylnb
-.section syloa
-.section sylob
-.section sylpa
-.section sylpb
-.section sylqa
-.section sylqb
-.section sylra
-.section sylrb
-.section sylsa
-.section sylsb
-.section sylta
-.section syltb
-.section sylua
-.section sylub
-.section sylva
-.section sylvb
-.section sylwa
-.section sylwb
-.section sylxa
-.section sylxb
-.section sylya
-.section sylyb
-.section sylza
-.section sylzb
-.section syl1a
-.section syl1b
-.section syl2a
-.section syl2b
-.section syl3a
-.section syl3b
-.section syl4a
-.section syl4b
-.section syl5a
-.section syl5b
-.section syl6a
-.section syl6b
-.section syl7a
-.section syl7b
-.section syl8a
-.section syl8b
-.section syl9a
-.section syl9b
-.section syl0a
-.section syl0b
-.section symaa
-.section symab
-.section symba
-.section symbb
-.section symca
-.section symcb
-.section symda
-.section symdb
-.section symea
-.section symeb
-.section symfa
-.section symfb
-.section symga
-.section symgb
-.section symha
-.section symhb
-.section symia
-.section symib
-.section symja
-.section symjb
-.section symka
-.section symkb
-.section symla
-.section symlb
-.section symma
-.section symmb
-.section symna
-.section symnb
-.section symoa
-.section symob
-.section sympa
-.section sympb
-.section symqa
-.section symqb
-.section symra
-.section symrb
-.section symsa
-.section symsb
-.section symta
-.section symtb
-.section symua
-.section symub
-.section symva
-.section symvb
-.section symwa
-.section symwb
-.section symxa
-.section symxb
-.section symya
-.section symyb
-.section symza
-.section symzb
-.section sym1a
-.section sym1b
-.section sym2a
-.section sym2b
-.section sym3a
-.section sym3b
-.section sym4a
-.section sym4b
-.section sym5a
-.section sym5b
-.section sym6a
-.section sym6b
-.section sym7a
-.section sym7b
-.section sym8a
-.section sym8b
-.section sym9a
-.section sym9b
-.section sym0a
-.section sym0b
-.section synaa
-.section synab
-.section synba
-.section synbb
-.section synca
-.section syncb
-.section synda
-.section syndb
-.section synea
-.section syneb
-.section synfa
-.section synfb
-.section synga
-.section syngb
-.section synha
-.section synhb
-.section synia
-.section synib
-.section synja
-.section synjb
-.section synka
-.section synkb
-.section synla
-.section synlb
-.section synma
-.section synmb
-.section synna
-.section synnb
-.section synoa
-.section synob
-.section synpa
-.section synpb
-.section synqa
-.section synqb
-.section synra
-.section synrb
-.section synsa
-.section synsb
-.section synta
-.section syntb
-.section synua
-.section synub
-.section synva
-.section synvb
-.section synwa
-.section synwb
-.section synxa
-.section synxb
-.section synya
-.section synyb
-.section synza
-.section synzb
-.section syn1a
-.section syn1b
-.section syn2a
-.section syn2b
-.section syn3a
-.section syn3b
-.section syn4a
-.section syn4b
-.section syn5a
-.section syn5b
-.section syn6a
-.section syn6b
-.section syn7a
-.section syn7b
-.section syn8a
-.section syn8b
-.section syn9a
-.section syn9b
-.section syn0a
-.section syn0b
-.section syoaa
-.section syoab
-.section syoba
-.section syobb
-.section syoca
-.section syocb
-.section syoda
-.section syodb
-.section syoea
-.section syoeb
-.section syofa
-.section syofb
-.section syoga
-.section syogb
-.section syoha
-.section syohb
-.section syoia
-.section syoib
-.section syoja
-.section syojb
-.section syoka
-.section syokb
-.section syola
-.section syolb
-.section syoma
-.section syomb
-.section syona
-.section syonb
-.section syooa
-.section syoob
-.section syopa
-.section syopb
-.section syoqa
-.section syoqb
-.section syora
-.section syorb
-.section syosa
-.section syosb
-.section syota
-.section syotb
-.section syoua
-.section syoub
-.section syova
-.section syovb
-.section syowa
-.section syowb
-.section syoxa
-.section syoxb
-.section syoya
-.section syoyb
-.section syoza
-.section syozb
-.section syo1a
-.section syo1b
-.section syo2a
-.section syo2b
-.section syo3a
-.section syo3b
-.section syo4a
-.section syo4b
-.section syo5a
-.section syo5b
-.section syo6a
-.section syo6b
-.section syo7a
-.section syo7b
-.section syo8a
-.section syo8b
-.section syo9a
-.section syo9b
-.section syo0a
-.section syo0b
-.section sypaa
-.section sypab
-.section sypba
-.section sypbb
-.section sypca
-.section sypcb
-.section sypda
-.section sypdb
-.section sypea
-.section sypeb
-.section sypfa
-.section sypfb
-.section sypga
-.section sypgb
-.section sypha
-.section syphb
-.section sypia
-.section sypib
-.section sypja
-.section sypjb
-.section sypka
-.section sypkb
-.section sypla
-.section syplb
-.section sypma
-.section sypmb
-.section sypna
-.section sypnb
-.section sypoa
-.section sypob
-.section syppa
-.section syppb
-.section sypqa
-.section sypqb
-.section sypra
-.section syprb
-.section sypsa
-.section sypsb
-.section sypta
-.section syptb
-.section sypua
-.section sypub
-.section sypva
-.section sypvb
-.section sypwa
-.section sypwb
-.section sypxa
-.section sypxb
-.section sypya
-.section sypyb
-.section sypza
-.section sypzb
-.section syp1a
-.section syp1b
-.section syp2a
-.section syp2b
-.section syp3a
-.section syp3b
-.section syp4a
-.section syp4b
-.section syp5a
-.section syp5b
-.section syp6a
-.section syp6b
-.section syp7a
-.section syp7b
-.section syp8a
-.section syp8b
-.section syp9a
-.section syp9b
-.section syp0a
-.section syp0b
-.section syqaa
-.section syqab
-.section syqba
-.section syqbb
-.section syqca
-.section syqcb
-.section syqda
-.section syqdb
-.section syqea
-.section syqeb
-.section syqfa
-.section syqfb
-.section syqga
-.section syqgb
-.section syqha
-.section syqhb
-.section syqia
-.section syqib
-.section syqja
-.section syqjb
-.section syqka
-.section syqkb
-.section syqla
-.section syqlb
-.section syqma
-.section syqmb
-.section syqna
-.section syqnb
-.section syqoa
-.section syqob
-.section syqpa
-.section syqpb
-.section syqqa
-.section syqqb
-.section syqra
-.section syqrb
-.section syqsa
-.section syqsb
-.section syqta
-.section syqtb
-.section syqua
-.section syqub
-.section syqva
-.section syqvb
-.section syqwa
-.section syqwb
-.section syqxa
-.section syqxb
-.section syqya
-.section syqyb
-.section syqza
-.section syqzb
-.section syq1a
-.section syq1b
-.section syq2a
-.section syq2b
-.section syq3a
-.section syq3b
-.section syq4a
-.section syq4b
-.section syq5a
-.section syq5b
-.section syq6a
-.section syq6b
-.section syq7a
-.section syq7b
-.section syq8a
-.section syq8b
-.section syq9a
-.section syq9b
-.section syq0a
-.section syq0b
-.section syraa
-.section syrab
-.section syrba
-.section syrbb
-.section syrca
-.section syrcb
-.section syrda
-.section syrdb
-.section syrea
-.section syreb
-.section syrfa
-.section syrfb
-.section syrga
-.section syrgb
-.section syrha
-.section syrhb
-.section syria
-.section syrib
-.section syrja
-.section syrjb
-.section syrka
-.section syrkb
-.section syrla
-.section syrlb
-.section syrma
-.section syrmb
-.section syrna
-.section syrnb
-.section syroa
-.section syrob
-.section syrpa
-.section syrpb
-.section syrqa
-.section syrqb
-.section syrra
-.section syrrb
-.section syrsa
-.section syrsb
-.section syrta
-.section syrtb
-.section syrua
-.section syrub
-.section syrva
-.section syrvb
-.section syrwa
-.section syrwb
-.section syrxa
-.section syrxb
-.section syrya
-.section syryb
-.section syrza
-.section syrzb
-.section syr1a
-.section syr1b
-.section syr2a
-.section syr2b
-.section syr3a
-.section syr3b
-.section syr4a
-.section syr4b
-.section syr5a
-.section syr5b
-.section syr6a
-.section syr6b
-.section syr7a
-.section syr7b
-.section syr8a
-.section syr8b
-.section syr9a
-.section syr9b
-.section syr0a
-.section syr0b
-.section sysaa
-.section sysab
-.section sysba
-.section sysbb
-.section sysca
-.section syscb
-.section sysda
-.section sysdb
-.section sysea
-.section syseb
-.section sysfa
-.section sysfb
-.section sysga
-.section sysgb
-.section sysha
-.section syshb
-.section sysia
-.section sysib
-.section sysja
-.section sysjb
-.section syska
-.section syskb
-.section sysla
-.section syslb
-.section sysma
-.section sysmb
-.section sysna
-.section sysnb
-.section sysoa
-.section sysob
-.section syspa
-.section syspb
-.section sysqa
-.section sysqb
-.section sysra
-.section sysrb
-.section syssa
-.section syssb
-.section systa
-.section systb
-.section sysua
-.section sysub
-.section sysva
-.section sysvb
-.section syswa
-.section syswb
-.section sysxa
-.section sysxb
-.section sysya
-.section sysyb
-.section sysza
-.section syszb
-.section sys1a
-.section sys1b
-.section sys2a
-.section sys2b
-.section sys3a
-.section sys3b
-.section sys4a
-.section sys4b
-.section sys5a
-.section sys5b
-.section sys6a
-.section sys6b
-.section sys7a
-.section sys7b
-.section sys8a
-.section sys8b
-.section sys9a
-.section sys9b
-.section sys0a
-.section sys0b
-.section sytaa
-.section sytab
-.section sytba
-.section sytbb
-.section sytca
-.section sytcb
-.section sytda
-.section sytdb
-.section sytea
-.section syteb
-.section sytfa
-.section sytfb
-.section sytga
-.section sytgb
-.section sytha
-.section sythb
-.section sytia
-.section sytib
-.section sytja
-.section sytjb
-.section sytka
-.section sytkb
-.section sytla
-.section sytlb
-.section sytma
-.section sytmb
-.section sytna
-.section sytnb
-.section sytoa
-.section sytob
-.section sytpa
-.section sytpb
-.section sytqa
-.section sytqb
-.section sytra
-.section sytrb
-.section sytsa
-.section sytsb
-.section sytta
-.section syttb
-.section sytua
-.section sytub
-.section sytva
-.section sytvb
-.section sytwa
-.section sytwb
-.section sytxa
-.section sytxb
-.section sytya
-.section sytyb
-.section sytza
-.section sytzb
-.section syt1a
-.section syt1b
-.section syt2a
-.section syt2b
-.section syt3a
-.section syt3b
-.section syt4a
-.section syt4b
-.section syt5a
-.section syt5b
-.section syt6a
-.section syt6b
-.section syt7a
-.section syt7b
-.section syt8a
-.section syt8b
-.section syt9a
-.section syt9b
-.section syt0a
-.section syt0b
-.section syuaa
-.section syuab
-.section syuba
-.section syubb
-.section syuca
-.section syucb
-.section syuda
-.section syudb
-.section syuea
-.section syueb
-.section syufa
-.section syufb
-.section syuga
-.section syugb
-.section syuha
-.section syuhb
-.section syuia
-.section syuib
-.section syuja
-.section syujb
-.section syuka
-.section syukb
-.section syula
-.section syulb
-.section syuma
-.section syumb
-.section syuna
-.section syunb
-.section syuoa
-.section syuob
-.section syupa
-.section syupb
-.section syuqa
-.section syuqb
-.section syura
-.section syurb
-.section syusa
-.section syusb
-.section syuta
-.section syutb
-.section syuua
-.section syuub
-.section syuva
-.section syuvb
-.section syuwa
-.section syuwb
-.section syuxa
-.section syuxb
-.section syuya
-.section syuyb
-.section syuza
-.section syuzb
-.section syu1a
-.section syu1b
-.section syu2a
-.section syu2b
-.section syu3a
-.section syu3b
-.section syu4a
-.section syu4b
-.section syu5a
-.section syu5b
-.section syu6a
-.section syu6b
-.section syu7a
-.section syu7b
-.section syu8a
-.section syu8b
-.section syu9a
-.section syu9b
-.section syu0a
-.section syu0b
-.section syvaa
-.section syvab
-.section syvba
-.section syvbb
-.section syvca
-.section syvcb
-.section syvda
-.section syvdb
-.section syvea
-.section syveb
-.section syvfa
-.section syvfb
-.section syvga
-.section syvgb
-.section syvha
-.section syvhb
-.section syvia
-.section syvib
-.section syvja
-.section syvjb
-.section syvka
-.section syvkb
-.section syvla
-.section syvlb
-.section syvma
-.section syvmb
-.section syvna
-.section syvnb
-.section syvoa
-.section syvob
-.section syvpa
-.section syvpb
-.section syvqa
-.section syvqb
-.section syvra
-.section syvrb
-.section syvsa
-.section syvsb
-.section syvta
-.section syvtb
-.section syvua
-.section syvub
-.section syvva
-.section syvvb
-.section syvwa
-.section syvwb
-.section syvxa
-.section syvxb
-.section syvya
-.section syvyb
-.section syvza
-.section syvzb
-.section syv1a
-.section syv1b
-.section syv2a
-.section syv2b
-.section syv3a
-.section syv3b
-.section syv4a
-.section syv4b
-.section syv5a
-.section syv5b
-.section syv6a
-.section syv6b
-.section syv7a
-.section syv7b
-.section syv8a
-.section syv8b
-.section syv9a
-.section syv9b
-.section syv0a
-.section syv0b
-.section sywaa
-.section sywab
-.section sywba
-.section sywbb
-.section sywca
-.section sywcb
-.section sywda
-.section sywdb
-.section sywea
-.section syweb
-.section sywfa
-.section sywfb
-.section sywga
-.section sywgb
-.section sywha
-.section sywhb
-.section sywia
-.section sywib
-.section sywja
-.section sywjb
-.section sywka
-.section sywkb
-.section sywla
-.section sywlb
-.section sywma
-.section sywmb
-.section sywna
-.section sywnb
-.section sywoa
-.section sywob
-.section sywpa
-.section sywpb
-.section sywqa
-.section sywqb
-.section sywra
-.section sywrb
-.section sywsa
-.section sywsb
-.section sywta
-.section sywtb
-.section sywua
-.section sywub
-.section sywva
-.section sywvb
-.section sywwa
-.section sywwb
-.section sywxa
-.section sywxb
-.section sywya
-.section sywyb
-.section sywza
-.section sywzb
-.section syw1a
-.section syw1b
-.section syw2a
-.section syw2b
-.section syw3a
-.section syw3b
-.section syw4a
-.section syw4b
-.section syw5a
-.section syw5b
-.section syw6a
-.section syw6b
-.section syw7a
-.section syw7b
-.section syw8a
-.section syw8b
-.section syw9a
-.section syw9b
-.section syw0a
-.section syw0b
-.section syxaa
-.section syxab
-.section syxba
-.section syxbb
-.section syxca
-.section syxcb
-.section syxda
-.section syxdb
-.section syxea
-.section syxeb
-.section syxfa
-.section syxfb
-.section syxga
-.section syxgb
-.section syxha
-.section syxhb
-.section syxia
-.section syxib
-.section syxja
-.section syxjb
-.section syxka
-.section syxkb
-.section syxla
-.section syxlb
-.section syxma
-.section syxmb
-.section syxna
-.section syxnb
-.section syxoa
-.section syxob
-.section syxpa
-.section syxpb
-.section syxqa
-.section syxqb
-.section syxra
-.section syxrb
-.section syxsa
-.section syxsb
-.section syxta
-.section syxtb
-.section syxua
-.section syxub
-.section syxva
-.section syxvb
-.section syxwa
-.section syxwb
-.section syxxa
-.section syxxb
-.section syxya
-.section syxyb
-.section syxza
-.section syxzb
-.section syx1a
-.section syx1b
-.section syx2a
-.section syx2b
-.section syx3a
-.section syx3b
-.section syx4a
-.section syx4b
-.section syx5a
-.section syx5b
-.section syx6a
-.section syx6b
-.section syx7a
-.section syx7b
-.section syx8a
-.section syx8b
-.section syx9a
-.section syx9b
-.section syx0a
-.section syx0b
-.section syyaa
-.section syyab
-.section syyba
-.section syybb
-.section syyca
-.section syycb
-.section syyda
-.section syydb
-.section syyea
-.section syyeb
-.section syyfa
-.section syyfb
-.section syyga
-.section syygb
-.section syyha
-.section syyhb
-.section syyia
-.section syyib
-.section syyja
-.section syyjb
-.section syyka
-.section syykb
-.section syyla
-.section syylb
-.section syyma
-.section syymb
-.section syyna
-.section syynb
-.section syyoa
-.section syyob
-.section syypa
-.section syypb
-.section syyqa
-.section syyqb
-.section syyra
-.section syyrb
-.section syysa
-.section syysb
-.section syyta
-.section syytb
-.section syyua
-.section syyub
-.section syyva
-.section syyvb
-.section syywa
-.section syywb
-.section syyxa
-.section syyxb
-.section syyya
-.section syyyb
-.section syyza
-.section syyzb
-.section syy1a
-.section syy1b
-.section syy2a
-.section syy2b
-.section syy3a
-.section syy3b
-.section syy4a
-.section syy4b
-.section syy5a
-.section syy5b
-.section syy6a
-.section syy6b
-.section syy7a
-.section syy7b
-.section syy8a
-.section syy8b
-.section syy9a
-.section syy9b
-.section syy0a
-.section syy0b
-.section syzaa
-.section syzab
-.section syzba
-.section syzbb
-.section syzca
-.section syzcb
-.section syzda
-.section syzdb
-.section syzea
-.section syzeb
-.section syzfa
-.section syzfb
-.section syzga
-.section syzgb
-.section syzha
-.section syzhb
-.section syzia
-.section syzib
-.section syzja
-.section syzjb
-.section syzka
-.section syzkb
-.section syzla
-.section syzlb
-.section syzma
-.section syzmb
-.section syzna
-.section syznb
-.section syzoa
-.section syzob
-.section syzpa
-.section syzpb
-.section syzqa
-.section syzqb
-.section syzra
-.section syzrb
-.section syzsa
-.section syzsb
-.section syzta
-.section syztb
-.section syzua
-.section syzub
-.section syzva
-.section syzvb
-.section syzwa
-.section syzwb
-.section syzxa
-.section syzxb
-.section syzya
-.section syzyb
-.section syzza
-.section syzzb
-.section syz1a
-.section syz1b
-.section syz2a
-.section syz2b
-.section syz3a
-.section syz3b
-.section syz4a
-.section syz4b
-.section syz5a
-.section syz5b
-.section syz6a
-.section syz6b
-.section syz7a
-.section syz7b
-.section syz8a
-.section syz8b
-.section syz9a
-.section syz9b
-.section syz0a
-.section syz0b
-.section sy1aa
-.section sy1ab
-.section sy1ba
-.section sy1bb
-.section sy1ca
-.section sy1cb
-.section sy1da
-.section sy1db
-.section sy1ea
-.section sy1eb
-.section sy1fa
-.section sy1fb
-.section sy1ga
-.section sy1gb
-.section sy1ha
-.section sy1hb
-.section sy1ia
-.section sy1ib
-.section sy1ja
-.section sy1jb
-.section sy1ka
-.section sy1kb
-.section sy1la
-.section sy1lb
-.section sy1ma
-.section sy1mb
-.section sy1na
-.section sy1nb
-.section sy1oa
-.section sy1ob
-.section sy1pa
-.section sy1pb
-.section sy1qa
-.section sy1qb
-.section sy1ra
-.section sy1rb
-.section sy1sa
-.section sy1sb
-.section sy1ta
-.section sy1tb
-.section sy1ua
-.section sy1ub
-.section sy1va
-.section sy1vb
-.section sy1wa
-.section sy1wb
-.section sy1xa
-.section sy1xb
-.section sy1ya
-.section sy1yb
-.section sy1za
-.section sy1zb
-.section sy11a
-.section sy11b
-.section sy12a
-.section sy12b
-.section sy13a
-.section sy13b
-.section sy14a
-.section sy14b
-.section sy15a
-.section sy15b
-.section sy16a
-.section sy16b
-.section sy17a
-.section sy17b
-.section sy18a
-.section sy18b
-.section sy19a
-.section sy19b
-.section sy10a
-.section sy10b
-.section sy2aa
-.section sy2ab
-.section sy2ba
-.section sy2bb
-.section sy2ca
-.section sy2cb
-.section sy2da
-.section sy2db
-.section sy2ea
-.section sy2eb
-.section sy2fa
-.section sy2fb
-.section sy2ga
-.section sy2gb
-.section sy2ha
-.section sy2hb
-.section sy2ia
-.section sy2ib
-.section sy2ja
-.section sy2jb
-.section sy2ka
-.section sy2kb
-.section sy2la
-.section sy2lb
-.section sy2ma
-.section sy2mb
-.section sy2na
-.section sy2nb
-.section sy2oa
-.section sy2ob
-.section sy2pa
-.section sy2pb
-.section sy2qa
-.section sy2qb
-.section sy2ra
-.section sy2rb
-.section sy2sa
-.section sy2sb
-.section sy2ta
-.section sy2tb
-.section sy2ua
-.section sy2ub
-.section sy2va
-.section sy2vb
-.section sy2wa
-.section sy2wb
-.section sy2xa
-.section sy2xb
-.section sy2ya
-.section sy2yb
-.section sy2za
-.section sy2zb
-.section sy21a
-.section sy21b
-.section sy22a
-.section sy22b
-.section sy23a
-.section sy23b
-.section sy24a
-.section sy24b
-.section sy25a
-.section sy25b
-.section sy26a
-.section sy26b
-.section sy27a
-.section sy27b
-.section sy28a
-.section sy28b
-.section sy29a
-.section sy29b
-.section sy20a
-.section sy20b
-.section sy3aa
-.section sy3ab
-.section sy3ba
-.section sy3bb
-.section sy3ca
-.section sy3cb
-.section sy3da
-.section sy3db
-.section sy3ea
-.section sy3eb
-.section sy3fa
-.section sy3fb
-.section sy3ga
-.section sy3gb
-.section sy3ha
-.section sy3hb
-.section sy3ia
-.section sy3ib
-.section sy3ja
-.section sy3jb
-.section sy3ka
-.section sy3kb
-.section sy3la
-.section sy3lb
-.section sy3ma
-.section sy3mb
-.section sy3na
-.section sy3nb
-.section sy3oa
-.section sy3ob
-.section sy3pa
-.section sy3pb
-.section sy3qa
-.section sy3qb
-.section sy3ra
-.section sy3rb
-.section sy3sa
-.section sy3sb
-.section sy3ta
-.section sy3tb
-.section sy3ua
-.section sy3ub
-.section sy3va
-.section sy3vb
-.section sy3wa
-.section sy3wb
-.section sy3xa
-.section sy3xb
-.section sy3ya
-.section sy3yb
-.section sy3za
-.section sy3zb
-.section sy31a
-.section sy31b
-.section sy32a
-.section sy32b
-.section sy33a
-.section sy33b
-.section sy34a
-.section sy34b
-.section sy35a
-.section sy35b
-.section sy36a
-.section sy36b
-.section sy37a
-.section sy37b
-.section sy38a
-.section sy38b
-.section sy39a
-.section sy39b
-.section sy30a
-.section sy30b
-.section sy4aa
-.section sy4ab
-.section sy4ba
-.section sy4bb
-.section sy4ca
-.section sy4cb
-.section sy4da
-.section sy4db
-.section sy4ea
-.section sy4eb
-.section sy4fa
-.section sy4fb
-.section sy4ga
-.section sy4gb
-.section sy4ha
-.section sy4hb
-.section sy4ia
-.section sy4ib
-.section sy4ja
-.section sy4jb
-.section sy4ka
-.section sy4kb
-.section sy4la
-.section sy4lb
-.section sy4ma
-.section sy4mb
-.section sy4na
-.section sy4nb
-.section sy4oa
-.section sy4ob
-.section sy4pa
-.section sy4pb
-.section sy4qa
-.section sy4qb
-.section sy4ra
-.section sy4rb
-.section sy4sa
-.section sy4sb
-.section sy4ta
-.section sy4tb
-.section sy4ua
-.section sy4ub
-.section sy4va
-.section sy4vb
-.section sy4wa
-.section sy4wb
-.section sy4xa
-.section sy4xb
-.section sy4ya
-.section sy4yb
-.section sy4za
-.section sy4zb
-.section sy41a
-.section sy41b
-.section sy42a
-.section sy42b
-.section sy43a
-.section sy43b
-.section sy44a
-.section sy44b
-.section sy45a
-.section sy45b
-.section sy46a
-.section sy46b
-.section sy47a
-.section sy47b
-.section sy48a
-.section sy48b
-.section sy49a
-.section sy49b
-.section sy40a
-.section sy40b
-.section sy5aa
-.section sy5ab
-.section sy5ba
-.section sy5bb
-.section sy5ca
-.section sy5cb
-.section sy5da
-.section sy5db
-.section sy5ea
-.section sy5eb
-.section sy5fa
-.section sy5fb
-.section sy5ga
-.section sy5gb
-.section sy5ha
-.section sy5hb
-.section sy5ia
-.section sy5ib
-.section sy5ja
-.section sy5jb
-.section sy5ka
-.section sy5kb
-.section sy5la
-.section sy5lb
-.section sy5ma
-.section sy5mb
-.section sy5na
-.section sy5nb
-.section sy5oa
-.section sy5ob
-.section sy5pa
-.section sy5pb
-.section sy5qa
-.section sy5qb
-.section sy5ra
-.section sy5rb
-.section sy5sa
-.section sy5sb
-.section sy5ta
-.section sy5tb
-.section sy5ua
-.section sy5ub
-.section sy5va
-.section sy5vb
-.section sy5wa
-.section sy5wb
-.section sy5xa
-.section sy5xb
-.section sy5ya
-.section sy5yb
-.section sy5za
-.section sy5zb
-.section sy51a
-.section sy51b
-.section sy52a
-.section sy52b
-.section sy53a
-.section sy53b
-.section sy54a
-.section sy54b
-.section sy55a
-.section sy55b
-.section sy56a
-.section sy56b
-.section sy57a
-.section sy57b
-.section sy58a
-.section sy58b
-.section sy59a
-.section sy59b
-.section sy50a
-.section sy50b
-.section sy6aa
-.section sy6ab
-.section sy6ba
-.section sy6bb
-.section sy6ca
-.section sy6cb
-.section sy6da
-.section sy6db
-.section sy6ea
-.section sy6eb
-.section sy6fa
-.section sy6fb
-.section sy6ga
-.section sy6gb
-.section sy6ha
-.section sy6hb
-.section sy6ia
-.section sy6ib
-.section sy6ja
-.section sy6jb
-.section sy6ka
-.section sy6kb
-.section sy6la
-.section sy6lb
-.section sy6ma
-.section sy6mb
-.section sy6na
-.section sy6nb
-.section sy6oa
-.section sy6ob
-.section sy6pa
-.section sy6pb
-.section sy6qa
-.section sy6qb
-.section sy6ra
-.section sy6rb
-.section sy6sa
-.section sy6sb
-.section sy6ta
-.section sy6tb
-.section sy6ua
-.section sy6ub
-.section sy6va
-.section sy6vb
-.section sy6wa
-.section sy6wb
-.section sy6xa
-.section sy6xb
-.section sy6ya
-.section sy6yb
-.section sy6za
-.section sy6zb
-.section sy61a
-.section sy61b
-.section sy62a
-.section sy62b
-.section sy63a
-.section sy63b
-.section sy64a
-.section sy64b
-.section sy65a
-.section sy65b
-.section sy66a
-.section sy66b
-.section sy67a
-.section sy67b
-.section sy68a
-.section sy68b
-.section sy69a
-.section sy69b
-.section sy60a
-.section sy60b
-.section sy7aa
-.section sy7ab
-.section sy7ba
-.section sy7bb
-.section sy7ca
-.section sy7cb
-.section sy7da
-.section sy7db
-.section sy7ea
-.section sy7eb
-.section sy7fa
-.section sy7fb
-.section sy7ga
-.section sy7gb
-.section sy7ha
-.section sy7hb
-.section sy7ia
-.section sy7ib
-.section sy7ja
-.section sy7jb
-.section sy7ka
-.section sy7kb
-.section sy7la
-.section sy7lb
-.section sy7ma
-.section sy7mb
-.section sy7na
-.section sy7nb
-.section sy7oa
-.section sy7ob
-.section sy7pa
-.section sy7pb
-.section sy7qa
-.section sy7qb
-.section sy7ra
-.section sy7rb
-.section sy7sa
-.section sy7sb
-.section sy7ta
-.section sy7tb
-.section sy7ua
-.section sy7ub
-.section sy7va
-.section sy7vb
-.section sy7wa
-.section sy7wb
-.section sy7xa
-.section sy7xb
-.section sy7ya
-.section sy7yb
-.section sy7za
-.section sy7zb
-.section sy71a
-.section sy71b
-.section sy72a
-.section sy72b
-.section sy73a
-.section sy73b
-.section sy74a
-.section sy74b
-.section sy75a
-.section sy75b
-.section sy76a
-.section sy76b
-.section sy77a
-.section sy77b
-.section sy78a
-.section sy78b
-.section sy79a
-.section sy79b
-.section sy70a
-.section sy70b
-.section sy8aa
-.section sy8ab
-.section sy8ba
-.section sy8bb
-.section sy8ca
-.section sy8cb
-.section sy8da
-.section sy8db
-.section sy8ea
-.section sy8eb
-.section sy8fa
-.section sy8fb
-.section sy8ga
-.section sy8gb
-.section sy8ha
-.section sy8hb
-.section sy8ia
-.section sy8ib
-.section sy8ja
-.section sy8jb
-.section sy8ka
-.section sy8kb
-.section sy8la
-.section sy8lb
-.section sy8ma
-.section sy8mb
-.section sy8na
-.section sy8nb
-.section sy8oa
-.section sy8ob
-.section sy8pa
-.section sy8pb
-.section sy8qa
-.section sy8qb
-.section sy8ra
-.section sy8rb
-.section sy8sa
-.section sy8sb
-.section sy8ta
-.section sy8tb
-.section sy8ua
-.section sy8ub
-.section sy8va
-.section sy8vb
-.section sy8wa
-.section sy8wb
-.section sy8xa
-.section sy8xb
-.section sy8ya
-.section sy8yb
-.section sy8za
-.section sy8zb
-.section sy81a
-.section sy81b
-.section sy82a
-.section sy82b
-.section sy83a
-.section sy83b
-.section sy84a
-.section sy84b
-.section sy85a
-.section sy85b
-.section sy86a
-.section sy86b
-.section sy87a
-.section sy87b
-.section sy88a
-.section sy88b
-.section sy89a
-.section sy89b
-.section sy80a
-.section sy80b
-.section sy9aa
-.section sy9ab
-.section sy9ba
-.section sy9bb
-.section sy9ca
-.section sy9cb
-.section sy9da
-.section sy9db
-.section sy9ea
-.section sy9eb
-.section sy9fa
-.section sy9fb
-.section sy9ga
-.section sy9gb
-.section sy9ha
-.section sy9hb
-.section sy9ia
-.section sy9ib
-.section sy9ja
-.section sy9jb
-.section sy9ka
-.section sy9kb
-.section sy9la
-.section sy9lb
-.section sy9ma
-.section sy9mb
-.section sy9na
-.section sy9nb
-.section sy9oa
-.section sy9ob
-.section sy9pa
-.section sy9pb
-.section sy9qa
-.section sy9qb
-.section sy9ra
-.section sy9rb
-.section sy9sa
-.section sy9sb
-.section sy9ta
-.section sy9tb
-.section sy9ua
-.section sy9ub
-.section sy9va
-.section sy9vb
-.section sy9wa
-.section sy9wb
-.section sy9xa
-.section sy9xb
-.section sy9ya
-.section sy9yb
-.section sy9za
-.section sy9zb
-.section sy91a
-.section sy91b
-.section sy92a
-.section sy92b
-.section sy93a
-.section sy93b
-.section sy94a
-.section sy94b
-.section sy95a
-.section sy95b
-.section sy96a
-.section sy96b
-.section sy97a
-.section sy97b
-.section sy98a
-.section sy98b
-.section sy99a
-.section sy99b
-.section sy90a
-.section sy90b
-.section sy0aa
-.section sy0ab
-.section sy0ba
-.section sy0bb
-.section sy0ca
-.section sy0cb
-.section sy0da
-.section sy0db
-.section sy0ea
-.section sy0eb
-.section sy0fa
-.section sy0fb
-.section sy0ga
-.section sy0gb
-.section sy0ha
-.section sy0hb
-.section sy0ia
-.section sy0ib
-.section sy0ja
-.section sy0jb
-.section sy0ka
-.section sy0kb
-.section sy0la
-.section sy0lb
-.section sy0ma
-.section sy0mb
-.section sy0na
-.section sy0nb
-.section sy0oa
-.section sy0ob
-.section sy0pa
-.section sy0pb
-.section sy0qa
-.section sy0qb
-.section sy0ra
-.section sy0rb
-.section sy0sa
-.section sy0sb
-.section sy0ta
-.section sy0tb
-.section sy0ua
-.section sy0ub
-.section sy0va
-.section sy0vb
-.section sy0wa
-.section sy0wb
-.section sy0xa
-.section sy0xb
-.section sy0ya
-.section sy0yb
-.section sy0za
-.section sy0zb
-.section sy01a
-.section sy01b
-.section sy02a
-.section sy02b
-.section sy03a
-.section sy03b
-.section sy04a
-.section sy04b
-.section sy05a
-.section sy05b
-.section sy06a
-.section sy06b
-.section sy07a
-.section sy07b
-.section sy08a
-.section sy08b
-.section sy09a
-.section sy09b
-.section sy00a
-.section sy00b
-.section szaaa
-.section szaab
-.section szaba
-.section szabb
-.section szaca
-.section szacb
-.section szada
-.section szadb
-.section szaea
-.section szaeb
-.section szafa
-.section szafb
-.section szaga
-.section szagb
-.section szaha
-.section szahb
-.section szaia
-.section szaib
-.section szaja
-.section szajb
-.section szaka
-.section szakb
-.section szala
-.section szalb
-.section szama
-.section szamb
-.section szana
-.section szanb
-.section szaoa
-.section szaob
-.section szapa
-.section szapb
-.section szaqa
-.section szaqb
-.section szara
-.section szarb
-.section szasa
-.section szasb
-.section szata
-.section szatb
-.section szaua
-.section szaub
-.section szava
-.section szavb
-.section szawa
-.section szawb
-.section szaxa
-.section szaxb
-.section szaya
-.section szayb
-.section szaza
-.section szazb
-.section sza1a
-.section sza1b
-.section sza2a
-.section sza2b
-.section sza3a
-.section sza3b
-.section sza4a
-.section sza4b
-.section sza5a
-.section sza5b
-.section sza6a
-.section sza6b
-.section sza7a
-.section sza7b
-.section sza8a
-.section sza8b
-.section sza9a
-.section sza9b
-.section sza0a
-.section sza0b
-.section szbaa
-.section szbab
-.section szbba
-.section szbbb
-.section szbca
-.section szbcb
-.section szbda
-.section szbdb
-.section szbea
-.section szbeb
-.section szbfa
-.section szbfb
-.section szbga
-.section szbgb
-.section szbha
-.section szbhb
-.section szbia
-.section szbib
-.section szbja
-.section szbjb
-.section szbka
-.section szbkb
-.section szbla
-.section szblb
-.section szbma
-.section szbmb
-.section szbna
-.section szbnb
-.section szboa
-.section szbob
-.section szbpa
-.section szbpb
-.section szbqa
-.section szbqb
-.section szbra
-.section szbrb
-.section szbsa
-.section szbsb
-.section szbta
-.section szbtb
-.section szbua
-.section szbub
-.section szbva
-.section szbvb
-.section szbwa
-.section szbwb
-.section szbxa
-.section szbxb
-.section szbya
-.section szbyb
-.section szbza
-.section szbzb
-.section szb1a
-.section szb1b
-.section szb2a
-.section szb2b
-.section szb3a
-.section szb3b
-.section szb4a
-.section szb4b
-.section szb5a
-.section szb5b
-.section szb6a
-.section szb6b
-.section szb7a
-.section szb7b
-.section szb8a
-.section szb8b
-.section szb9a
-.section szb9b
-.section szb0a
-.section szb0b
-.section szcaa
-.section szcab
-.section szcba
-.section szcbb
-.section szcca
-.section szccb
-.section szcda
-.section szcdb
-.section szcea
-.section szceb
-.section szcfa
-.section szcfb
-.section szcga
-.section szcgb
-.section szcha
-.section szchb
-.section szcia
-.section szcib
-.section szcja
-.section szcjb
-.section szcka
-.section szckb
-.section szcla
-.section szclb
-.section szcma
-.section szcmb
-.section szcna
-.section szcnb
-.section szcoa
-.section szcob
-.section szcpa
-.section szcpb
-.section szcqa
-.section szcqb
-.section szcra
-.section szcrb
-.section szcsa
-.section szcsb
-.section szcta
-.section szctb
-.section szcua
-.section szcub
-.section szcva
-.section szcvb
-.section szcwa
-.section szcwb
-.section szcxa
-.section szcxb
-.section szcya
-.section szcyb
-.section szcza
-.section szczb
-.section szc1a
-.section szc1b
-.section szc2a
-.section szc2b
-.section szc3a
-.section szc3b
-.section szc4a
-.section szc4b
-.section szc5a
-.section szc5b
-.section szc6a
-.section szc6b
-.section szc7a
-.section szc7b
-.section szc8a
-.section szc8b
-.section szc9a
-.section szc9b
-.section szc0a
-.section szc0b
-.section szdaa
-.section szdab
-.section szdba
-.section szdbb
-.section szdca
-.section szdcb
-.section szdda
-.section szddb
-.section szdea
-.section szdeb
-.section szdfa
-.section szdfb
-.section szdga
-.section szdgb
-.section szdha
-.section szdhb
-.section szdia
-.section szdib
-.section szdja
-.section szdjb
-.section szdka
-.section szdkb
-.section szdla
-.section szdlb
-.section szdma
-.section szdmb
-.section szdna
-.section szdnb
-.section szdoa
-.section szdob
-.section szdpa
-.section szdpb
-.section szdqa
-.section szdqb
-.section szdra
-.section szdrb
-.section szdsa
-.section szdsb
-.section szdta
-.section szdtb
-.section szdua
-.section szdub
-.section szdva
-.section szdvb
-.section szdwa
-.section szdwb
-.section szdxa
-.section szdxb
-.section szdya
-.section szdyb
-.section szdza
-.section szdzb
-.section szd1a
-.section szd1b
-.section szd2a
-.section szd2b
-.section szd3a
-.section szd3b
-.section szd4a
-.section szd4b
-.section szd5a
-.section szd5b
-.section szd6a
-.section szd6b
-.section szd7a
-.section szd7b
-.section szd8a
-.section szd8b
-.section szd9a
-.section szd9b
-.section szd0a
-.section szd0b
-.section szeaa
-.section szeab
-.section szeba
-.section szebb
-.section szeca
-.section szecb
-.section szeda
-.section szedb
-.section szeea
-.section szeeb
-.section szefa
-.section szefb
-.section szega
-.section szegb
-.section szeha
-.section szehb
-.section szeia
-.section szeib
-.section szeja
-.section szejb
-.section szeka
-.section szekb
-.section szela
-.section szelb
-.section szema
-.section szemb
-.section szena
-.section szenb
-.section szeoa
-.section szeob
-.section szepa
-.section szepb
-.section szeqa
-.section szeqb
-.section szera
-.section szerb
-.section szesa
-.section szesb
-.section szeta
-.section szetb
-.section szeua
-.section szeub
-.section szeva
-.section szevb
-.section szewa
-.section szewb
-.section szexa
-.section szexb
-.section szeya
-.section szeyb
-.section szeza
-.section szezb
-.section sze1a
-.section sze1b
-.section sze2a
-.section sze2b
-.section sze3a
-.section sze3b
-.section sze4a
-.section sze4b
-.section sze5a
-.section sze5b
-.section sze6a
-.section sze6b
-.section sze7a
-.section sze7b
-.section sze8a
-.section sze8b
-.section sze9a
-.section sze9b
-.section sze0a
-.section sze0b
-.section szfaa
-.section szfab
-.section szfba
-.section szfbb
-.section szfca
-.section szfcb
-.section szfda
-.section szfdb
-.section szfea
-.section szfeb
-.section szffa
-.section szffb
-.section szfga
-.section szfgb
-.section szfha
-.section szfhb
-.section szfia
-.section szfib
-.section szfja
-.section szfjb
-.section szfka
-.section szfkb
-.section szfla
-.section szflb
-.section szfma
-.section szfmb
-.section szfna
-.section szfnb
-.section szfoa
-.section szfob
-.section szfpa
-.section szfpb
-.section szfqa
-.section szfqb
-.section szfra
-.section szfrb
-.section szfsa
-.section szfsb
-.section szfta
-.section szftb
-.section szfua
-.section szfub
-.section szfva
-.section szfvb
-.section szfwa
-.section szfwb
-.section szfxa
-.section szfxb
-.section szfya
-.section szfyb
-.section szfza
-.section szfzb
-.section szf1a
-.section szf1b
-.section szf2a
-.section szf2b
-.section szf3a
-.section szf3b
-.section szf4a
-.section szf4b
-.section szf5a
-.section szf5b
-.section szf6a
-.section szf6b
-.section szf7a
-.section szf7b
-.section szf8a
-.section szf8b
-.section szf9a
-.section szf9b
-.section szf0a
-.section szf0b
-.section szgaa
-.section szgab
-.section szgba
-.section szgbb
-.section szgca
-.section szgcb
-.section szgda
-.section szgdb
-.section szgea
-.section szgeb
-.section szgfa
-.section szgfb
-.section szgga
-.section szggb
-.section szgha
-.section szghb
-.section szgia
-.section szgib
-.section szgja
-.section szgjb
-.section szgka
-.section szgkb
-.section szgla
-.section szglb
-.section szgma
-.section szgmb
-.section szgna
-.section szgnb
-.section szgoa
-.section szgob
-.section szgpa
-.section szgpb
-.section szgqa
-.section szgqb
-.section szgra
-.section szgrb
-.section szgsa
-.section szgsb
-.section szgta
-.section szgtb
-.section szgua
-.section szgub
-.section szgva
-.section szgvb
+// SECTIONS: Name: .symtab_shndx
+
+// Test that we don't create a symbol for the symtab_shndx section.
+// SYMBOLS-NOT: symtab_shndx
+
+
+// Test that both a and b show up in the correct section.
+// SYMBOLS: Name: a
+// SYMBOLS-NEXT: Value: 0x0
+// SYMBOLS-NEXT: Size: 0
+// SYMBOLS-NEXT: Binding: Local (0x0)
+// SYMBOLS-NEXT: Type: None (0x0)
+// SYMBOLS-NEXT: Other: 0
+// SYMBOLS-NEXT: Section: last (0xFF00)
+// SYMBOLS-NEXT: }
+// SYMBOLS-NEXT: Symbol {
+// SYMBOLS-NEXT: Name: b
+// SYMBOLS-NEXT: Value: 0x1
+// SYMBOLS-NEXT: Size: 0
+// SYMBOLS-NEXT: Binding: Local (0x0)
+// SYMBOLS-NEXT: Type: None (0x0)
+// SYMBOLS-NEXT: Other: 0
+// SYMBOLS-NEXT: Section: last (0xFF00)
+// SYMBOLS-NEXT: }
+
+
+// Test that this file has one section too many.
+// SYMBOLS: Name: last
+// SYMBOLS-NEXT: Value: 0x0
+// SYMBOLS-NEXT: Size: 0
+// SYMBOLS-NEXT: Binding: Local (0x0)
+// SYMBOLS-NEXT: Type: Section (0x3)
+// SYMBOLS-NEXT: Other: 0
+// SYMBOLS-NEXT: Section: last (0xFF00)
+// SYMBOLS-NEXT: }
+// SYMBOLS-NEXT:]
+
+.macro gen_sections4 x
+ .section a\x
+ .section b\x
+ .section c\x
+ .section d\x
+.endm
+
+.macro gen_sections8 x
+ gen_sections4 a\x
+ gen_sections4 b\x
+.endm
+
+.macro gen_sections16 x
+ gen_sections8 a\x
+ gen_sections8 b\x
+.endm
+
+.macro gen_sections32 x
+ gen_sections16 a\x
+ gen_sections16 b\x
+.endm
+
+.macro gen_sections64 x
+ gen_sections32 a\x
+ gen_sections32 b\x
+.endm
+
+.macro gen_sections128 x
+ gen_sections64 a\x
+ gen_sections64 b\x
+.endm
+
+.macro gen_sections256 x
+ gen_sections128 a\x
+ gen_sections128 b\x
+.endm
+
+.macro gen_sections512 x
+ gen_sections256 a\x
+ gen_sections256 b\x
+.endm
+
+.macro gen_sections1024 x
+ gen_sections512 a\x
+ gen_sections512 b\x
+.endm
+
+.macro gen_sections2048 x
+ gen_sections1024 a\x
+ gen_sections1024 b\x
+.endm
+
+.macro gen_sections4096 x
+ gen_sections2048 a\x
+ gen_sections2048 b\x
+.endm
+
+.macro gen_sections8192 x
+ gen_sections4096 a\x
+ gen_sections4096 b\x
+.endm
+
+.macro gen_sections16384 x
+ gen_sections8192 a\x
+ gen_sections8192 b\x
+.endm
+
+.macro gen_sections32768 x
+ gen_sections16384 a\x
+ gen_sections16384 b\x
+.endm
+
+gen_sections32768 a
+gen_sections16384 b
+gen_sections8192 c
+gen_sections4096 d
+gen_sections2048 e
+gen_sections1024 f
+gen_sections512 g
+gen_sections128 h
+gen_sections64 i
+gen_sections32 j
+gen_sections16 k
+gen_sections8 l
+gen_sections4 m
+
+.section last
+a:
+b = a + 1
diff --git a/test/MC/ELF/many-sections.s b/test/MC/ELF/many-sections.s
new file mode 100644
index 000000000000..93ea8e7821e2
--- /dev/null
+++ b/test/MC/ELF/many-sections.s
@@ -0,0 +1,106 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o %t
+// RUN: llvm-readobj -s %t | FileCheck --check-prefix=SECTIONS %s
+// RUN: llvm-readobj -t %t | FileCheck --check-prefix=SYMBOLS %s
+
+// Test that we don't create a .symtab_shndx since we are one section short of
+// SHN_LORESERVE (0xFF00).
+
+// SECTIONS-NOT: Name: .symtab_shndx
+
+// Check the last referenced section.
+
+// SYMBOLS: Name: dm (0)
+// SYMBOLS-NEXT: Value: 0x0
+// SYMBOLS-NEXT: Size: 0
+// SYMBOLS-NEXT: Binding: Local (0x0)
+// SYMBOLS-NEXT: Type: Section (0x3)
+// SYMBOLS-NEXT: Other: 0
+// SYMBOLS-NEXT: Section: dm (0xFEFF)
+// SYMBOLS-NEXT: }
+// SYMBOLS-NEXT:]
+
+.macro gen_sections4 x
+ .section a\x
+ .section b\x
+ .section c\x
+ .section d\x
+.endm
+
+.macro gen_sections8 x
+ gen_sections4 a\x
+ gen_sections4 b\x
+.endm
+
+.macro gen_sections16 x
+ gen_sections8 a\x
+ gen_sections8 b\x
+.endm
+
+.macro gen_sections32 x
+ gen_sections16 a\x
+ gen_sections16 b\x
+.endm
+
+.macro gen_sections64 x
+ gen_sections32 a\x
+ gen_sections32 b\x
+.endm
+
+.macro gen_sections128 x
+ gen_sections64 a\x
+ gen_sections64 b\x
+.endm
+
+.macro gen_sections256 x
+ gen_sections128 a\x
+ gen_sections128 b\x
+.endm
+
+.macro gen_sections512 x
+ gen_sections256 a\x
+ gen_sections256 b\x
+.endm
+
+.macro gen_sections1024 x
+ gen_sections512 a\x
+ gen_sections512 b\x
+.endm
+
+.macro gen_sections2048 x
+ gen_sections1024 a\x
+ gen_sections1024 b\x
+.endm
+
+.macro gen_sections4096 x
+ gen_sections2048 a\x
+ gen_sections2048 b\x
+.endm
+
+.macro gen_sections8192 x
+ gen_sections4096 a\x
+ gen_sections4096 b\x
+.endm
+
+.macro gen_sections16384 x
+ gen_sections8192 a\x
+ gen_sections8192 b\x
+.endm
+
+.macro gen_sections32768 x
+ gen_sections16384 a\x
+ gen_sections16384 b\x
+.endm
+
+gen_sections32768 a
+gen_sections16384 b
+gen_sections8192 c
+gen_sections4096 d
+gen_sections2048 e
+gen_sections1024 f
+gen_sections512 g
+gen_sections128 h
+gen_sections64 i
+gen_sections32 j
+gen_sections16 k
+gen_sections8 l
+gen_sections4 m
diff --git a/test/MC/ELF/merge.s b/test/MC/ELF/merge.s
index 0e92583192d4..d6e0b7c4b7dc 100644
--- a/test/MC/ELF/merge.s
+++ b/test/MC/ELF/merge.s
@@ -1,10 +1,7 @@
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -r | FileCheck %s
-// Test that PIC relocations with local symbols in a mergeable section are done
-// with a reference to the symbol. Not sure if this is a linker limitation,
-// but this matches the behavior of gas.
-
-// Non-PIC relocations with 0 offset don't use the symbol.
+// Test that relocations with local symbols in a mergeable section are done
+// with a reference to the symbol if the offset is non zero.
movsd .Lfoo(%rip), %xmm1
@@ -13,6 +10,7 @@
jmp foo@PLT
movq foo@GOTPCREL, %rax
movq zed, %rax
+ movsd .Lfoo+4(%rip), %xmm1
.section .sec1,"aM",@progbits,16
.Lfoo:
@@ -30,5 +28,6 @@ foo:
// CHECK-NEXT: 0x{{[^ ]+}} R_X86_64_PLT32 foo 0x{{[^ ]+}}
// CHECK-NEXT: 0x{{[^ ]+}} R_X86_64_GOTPCREL foo 0x{{[^ ]+}}
// CHECK-NEXT: 0x{{[^ ]+}} R_X86_64_32S zed 0x{{[^ ]+}}
+// CHECK-NEXT: 0x{{[^ ]+}} R_X86_64_PC32 .sec1 0x{{[^ ]+}}
// CHECK-NEXT: }
// CHECK-NEXT: ]
diff --git a/test/MC/ELF/no-reloc.s b/test/MC/ELF/no-reloc.s
new file mode 100644
index 000000000000..78f1b88cebbe
--- /dev/null
+++ b/test/MC/ELF/no-reloc.s
@@ -0,0 +1,19 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -r | FileCheck %s
+
+// CHECK: Relocations [
+// CHECK-NEXT: ]
+
+ .section .test1_foo
+.Ltest1_1:
+.Ltest1_2 = .Ltest1_1
+ .section .test1_bar
+ .long .Ltest1_1-.Ltest1_2
+
+
+ .section test2
+
+.Ltest2_a:
+.Ltest2_b = .Ltest2_a
+.Ltest2_c:
+.Ltest2_d = .Ltest2_c-.Ltest2_b
+ .long .Ltest2_d
diff --git a/test/MC/ELF/nocompression.s b/test/MC/ELF/nocompression.s
new file mode 100644
index 000000000000..e7b01f7ad479
--- /dev/null
+++ b/test/MC/ELF/nocompression.s
@@ -0,0 +1,5 @@
+// RUN: not llvm-mc -filetype=obj -compress-debug-sections -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s
+
+// REQUIRES: nozlib
+
+// CHECK: llvm-mc{{[^:]*}}: build tools with zlib to enable -compress-debug-sections
diff --git a/test/MC/ELF/noexec.s b/test/MC/ELF/noexec.s
index 33cb8ae3452b..28f50cb7f692 100644
--- a/test/MC/ELF/noexec.s
+++ b/test/MC/ELF/noexec.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -mc-no-exec-stack -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -s -t | FileCheck %s
+// RUN: llvm-mc -no-exec-stack -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -s -t | FileCheck %s
// CHECK: Section {
// CHECK: Index: 4
diff --git a/test/MC/ELF/offset.s b/test/MC/ELF/offset.s
new file mode 100644
index 000000000000..f44833299490
--- /dev/null
+++ b/test/MC/ELF/offset.s
@@ -0,0 +1,132 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -t - | FileCheck %s
+
+// Test that a variable declared with "var = other_var + cst" is in the same
+// section as other_var and its value is the value of other_var + cst.
+
+ .data
+ .globl sym_a
+ .size sym_a, 42
+ .byte 42
+ .type sym_a, @object
+sym_a:
+
+// CHECK: Symbol {
+// CHECK: Name: sym_a
+// CHECK-NEXT: Value: 0x1
+// CHECK-NEXT: Size: 42
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: Object
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+
+ .long 42
+ .globl sym_b
+sym_b:
+ .globl sym_c
+sym_c = sym_a
+// CHECK: Symbol {
+// CHECK: Name: sym_c
+// CHECK-NEXT: Value: 0x1
+// CHECK-NEXT: Size: 42
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: Object
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+
+ .globl sym_d
+sym_d = sym_a + 1
+// CHECK: Symbol {
+// CHECK: Name: sym_d
+// CHECK-NEXT: Value: 0x2
+// CHECK-NEXT: Size: 42
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: Object
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+
+ .globl sym_e
+sym_e = sym_a + (sym_b - sym_a) * 3
+// CHECK: Symbol {
+// CHECK: Name: sym_e
+// CHECK-NEXT: Value: 0xD
+// CHECK-NEXT: Size: 42
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: Object
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+
+
+ .globl sym_f
+sym_f = sym_a + (1 - 1)
+// CHECK: Symbol {
+// CHECK: Name: sym_f
+// CHECK-NEXT: Value: 0x1
+// CHECK-NEXT: Size: 42
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: Object
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+
+
+ .globl test2_a
+ .globl test2_b
+ .globl test2_c
+ .globl test2_d
+ .globl test2_e
+test2_a:
+ .long 0
+test2_b = test2_a
+test2_c:
+ .long 0
+test2_d = test2_c
+test2_e = test2_d - test2_b
+// CHECK: Symbol {
+// CHECK: Name: test2_a
+// CHECK-NEXT: Value: 0x5
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: test2_b
+// CHECK-NEXT: Value: 0x5
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: test2_c
+// CHECK-NEXT: Value: 0x9
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: test2_d
+// CHECK-NEXT: Value: 0x9
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: test2_e
+// CHECK-NEXT: Value: 0x4
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: Absolute
+// CHECK-NEXT: }
diff --git a/test/MC/ELF/pic-diff.s b/test/MC/ELF/pic-diff.s
index cffa0dd368eb..5f0b1459ba26 100644
--- a/test/MC/ELF/pic-diff.s
+++ b/test/MC/ELF/pic-diff.s
@@ -7,13 +7,13 @@
// CHECK-NEXT: ]
// CHECK: Symbol {
-// CHECK: Name: baz (5)
+// CHECK: Name: baz
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
.zero 4
diff --git a/test/MC/ELF/pr19430.s b/test/MC/ELF/pr19430.s
new file mode 100644
index 000000000000..a1e524662a96
--- /dev/null
+++ b/test/MC/ELF/pr19430.s
@@ -0,0 +1,14 @@
+// RUN: llvm-mc -triple x86_64-pc-linux-gnu %s -filetype=obj -o - | llvm-readobj -r | FileCheck %s
+
+// Test that we can use .cfi_startproc without a global symbol.
+
+.text
+.space 1000
+.cfi_startproc
+ .cfi_endproc
+
+// CHECK: Relocations [
+// CHECK-NEXT: Section (5) .rela.eh_frame {
+// CHECK-NEXT: 0x20 R_X86_64_PC32 .text 0x3E8
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
diff --git a/test/MC/ELF/pr9292.s b/test/MC/ELF/pr9292.s
index a6e78dc992a8..1e01194c701c 100644
--- a/test/MC/ELF/pr9292.s
+++ b/test/MC/ELF/pr9292.s
@@ -8,20 +8,20 @@ mov %eax,bar
// CHECK: Symbol {
-// CHECK: Name: bar (5)
+// CHECK: Name: bar
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo (1)
+// CHECK-NEXT: Name: foo
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
diff --git a/test/MC/ELF/relocation-386.s b/test/MC/ELF/relocation-386.s
index 9bc831056ae3..ba12df0d3b63 100644
--- a/test/MC/ELF/relocation-386.s
+++ b/test/MC/ELF/relocation-386.s
@@ -58,12 +58,17 @@
// CHECK-NEXT: 0x94 R_386_GOTPC _GLOBAL_OFFSET_TABLE_ 0x0
// Relocation 26 (und_symbol-bar2) is of type R_386_PC32
// CHECK-NEXT: 0x9A R_386_PC32 und_symbol 0x0
+// Relocation 27 (und_symbol-bar2) is of type R_386_PC16
+// CHECK-NEXT: 0x9E R_386_PC16 und_symbol 0x0
+// Relocation 28 (und_symbol-bar2) is of type R_386_PC8
+// CHECK-NEXT: 0xA0 R_386_PC8 und_symbol 0x0
+// CHECK-NEXT: 0xA3 R_386_GOTOFF und_symbol 0x0
// CHECK-NEXT: }
// CHECK-NEXT: ]
// Symbol 4 is zed
// CHECK: Symbol {
-// CHECK: Name: zed (53)
+// CHECK: Name: zed
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -120,6 +125,10 @@ bar2:
addl foo@GOTTPOFF(%edx), %eax
subl _GLOBAL_OFFSET_TABLE_-bar2, %ebx
leal und_symbol-bar2(%edx),%ecx
+ .word und_symbol-bar2
+ .byte und_symbol-bar2
+
+ leal 1 + und_symbol@GOTOFF, %edi
.section zedsec,"awT",@progbits
zed:
diff --git a/test/MC/ELF/relocation-pc.s b/test/MC/ELF/relocation-pc.s
index fc7420c39898..0ce32010cf0e 100644
--- a/test/MC/ELF/relocation-pc.s
+++ b/test/MC/ELF/relocation-pc.s
@@ -26,7 +26,7 @@
// CHECK-NEXT: AddressAlignment: 8
// CHECK-NEXT: EntrySize: 24
// CHECK-NEXT: Relocations [
-// CHECK-NEXT: 0x1 R_X86_64_PC8 - 0x0
-// CHECK-NEXT: 0x3 R_X86_64_PC32 - 0x0
+// CHECK-NEXT: 0x1 R_X86_64_PC8 - 0xFFFFFFFFFFFFFFFF
+// CHECK-NEXT: 0x3 R_X86_64_PC32 - 0xFFFFFFFFFFFFFEFC
// CHECK-NEXT: ]
// CHECK-NEXT: }
diff --git a/test/MC/ELF/relocation.s b/test/MC/ELF/relocation.s
index 682307501d66..c0e6007dc4e7 100644
--- a/test/MC/ELF/relocation.s
+++ b/test/MC/ELF/relocation.s
@@ -22,6 +22,17 @@ bar:
addq $bar,%rax # R_X86_64_32S
.quad foo@DTPOFF
movabsq $baz@TPOFF, %rax
+ .word foo-bar
+ .byte foo-bar
+
+ # this should probably be an error...
+ zed = foo +2
+ call zed@PLT
+
+ leaq -1+foo(%rip), %r11
+
+ movl $_GLOBAL_OFFSET_TABLE_, %eax
+ movabs $_GLOBAL_OFFSET_TABLE_, %rax
// CHECK: Section {
// CHECK: Name: .rela.text
@@ -45,6 +56,12 @@ bar:
// CHECK-NEXT: 0x77 R_X86_64_32S .text 0x0
// CHECK-NEXT: 0x7B R_X86_64_DTPOFF64 foo 0x0
// CHECK-NEXT: 0x85 R_X86_64_TPOFF64 baz 0x0
+// CHECK-NEXT: 0x8D R_X86_64_PC16 foo 0x8D
+// CHECK-NEXT: 0x8F R_X86_64_PC8 foo 0x8F
+// CHECK-NEXT: 0x91 R_X86_64_PLT32 zed 0xFFFFFFFFFFFFFFFC
+// CHECK-NEXT: 0x98 R_X86_64_PC32 foo 0xFFFFFFFFFFFFFFFB
+// CHECK-NEXT: 0x9D R_X86_64_GOTPC32 _GLOBAL_OFFSET_TABLE_ 0x1
+// CHECK-NEXT: 0xA3 R_X86_64_GOTPC64 _GLOBAL_OFFSET_TABLE_ 0x2
// CHECK-NEXT: ]
// CHECK-NEXT: }
diff --git a/test/MC/ELF/set.s b/test/MC/ELF/set.s
index f6965a583a9f..b4f77f5a0901 100644
--- a/test/MC/ELF/set.s
+++ b/test/MC/ELF/set.s
@@ -5,13 +5,13 @@
.set kernbase,0xffffffff80000000
// CHECK: Symbol {
-// CHECK: Name: kernbase (1)
+// CHECK: Name: kernbase
// CHECK-NEXT: Value: 0xFFFFFFFF80000000
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0xFFF1)
+// CHECK-NEXT: Section: Absolute (0xFFF1)
// CHECK-NEXT: }
// Test that we accept .set of a symbol after it has been used in a statement.
@@ -26,11 +26,11 @@
// Test that there is an undefined reference to bar
// CHECK: Symbol {
-// CHECK: Name: bar (10)
+// CHECK: Name: bar
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
diff --git a/test/MC/ELF/strtab-suffix-opt.s b/test/MC/ELF/strtab-suffix-opt.s
new file mode 100644
index 000000000000..eb5da8a01552
--- /dev/null
+++ b/test/MC/ELF/strtab-suffix-opt.s
@@ -0,0 +1,21 @@
+// RUN: llvm-mc -filetype=obj -triple i686-pc-linux-gnu %s -o - | llvm-readobj -symbols | FileCheck %s
+
+ .text
+ .globl foobar
+ .align 16, 0x90
+ .type foobar,@function
+foobar:
+ pushl %ebp
+ movl %esp, %ebp
+ subl $8, %esp
+ calll foo
+ calll bar
+ addl $8, %esp
+ popl %ebp
+ retl
+.Ltmp3:
+ .size foobar, .Ltmp3-foobar
+
+// CHECK: Name: foobar (1)
+// CHECK: Name: bar (4)
+// CHECK: Name: foo (8)
diff --git a/test/MC/ELF/subtraction-error.s b/test/MC/ELF/subtraction-error.s
new file mode 100644
index 000000000000..6b93d3aee5bd
--- /dev/null
+++ b/test/MC/ELF/subtraction-error.s
@@ -0,0 +1,8 @@
+// RUN: not llvm-mc -filetype=obj -triple x86_64-pc-linux < %s 2>&1 | FileCheck %s
+
+a:
+ .section foo
+b:
+c = b - a
+
+; CHECK: symbol 'a' could not be evaluated in a subtraction expression
diff --git a/test/MC/ELF/symref.s b/test/MC/ELF/symref.s
deleted file mode 100644
index c8015b96a316..000000000000
--- a/test/MC/ELF/symref.s
+++ /dev/null
@@ -1,142 +0,0 @@
-// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -r -t | FileCheck %s
-
-defined1:
-defined2:
-defined3:
- .symver defined1, bar1@zed
- .symver undefined1, bar2@zed
-
- .symver defined2, bar3@@zed
-
- .symver defined3, bar5@@@zed
- .symver undefined3, bar6@@@zed
-
- .long defined1
- .long undefined1
- .long defined2
- .long defined3
- .long undefined3
-
- .global global1
- .symver global1, g1@@zed
-global1:
-
-// CHECK: Relocations [
-// CHECK-NEXT: Section (2) .rela.text {
-// CHECK-NEXT: 0x0 R_X86_64_32 .text 0x0
-// CHECK-NEXT: 0x4 R_X86_64_32 bar2@zed 0x0
-// CHECK-NEXT: 0x8 R_X86_64_32 .text 0x0
-// CHECK-NEXT: 0xC R_X86_64_32 .text 0x0
-// CHECK-NEXT: 0x10 R_X86_64_32 bar6@zed 0x0
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
-
-// CHECK: Symbol {
-// CHECK: Name: bar1@zed (19)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text (0x1)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar3@@zed (37)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text (0x1)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar5@@zed (47)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text (0x1)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: defined1 (1)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text (0x1)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: defined2 (10)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text (0x1)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .text (0)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text (0x1)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .data (0)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .data (0x3)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .bss (0)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: Section
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .bss (0x4)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: g1@@zed (74)
-// CHECK-NEXT: Value: 0x14
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text (0x1)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: global1 (66)
-// CHECK-NEXT: Value: 0x14
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text (0x1)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar2@zed (28)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar6@zed (57)
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
diff --git a/test/MC/ELF/symver.s b/test/MC/ELF/symver.s
new file mode 100644
index 000000000000..6e5825f24313
--- /dev/null
+++ b/test/MC/ELF/symver.s
@@ -0,0 +1,142 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -r -t | FileCheck %s
+
+defined1:
+defined2:
+defined3:
+ .symver defined1, bar1@zed
+ .symver undefined1, bar2@zed
+
+ .symver defined2, bar3@@zed
+
+ .symver defined3, bar5@@@zed
+ .symver undefined3, bar6@@@zed
+
+ .long defined1
+ .long undefined1
+ .long defined2
+ .long defined3
+ .long undefined3
+
+ .global global1
+ .symver global1, g1@@zed
+global1:
+
+// CHECK: Relocations [
+// CHECK-NEXT: Section (2) .rela.text {
+// CHECK-NEXT: 0x0 R_X86_64_32 .text 0x0
+// CHECK-NEXT: 0x4 R_X86_64_32 bar2@zed 0x0
+// CHECK-NEXT: 0x8 R_X86_64_32 .text 0x0
+// CHECK-NEXT: 0xC R_X86_64_32 .text 0x0
+// CHECK-NEXT: 0x10 R_X86_64_32 bar6@zed 0x0
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+
+// CHECK: Symbol {
+// CHECK: Name: bar1@zed
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: bar3@@zed
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: bar5@@zed
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: defined1
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: defined2
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .text
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .data
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .bss
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .bss
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: g1@@zed
+// CHECK-NEXT: Value: 0x14
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: global1
+// CHECK-NEXT: Value: 0x14
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: bar2@zed
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: Undefined
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: bar6@zed
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: Undefined
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
diff --git a/test/MC/ELF/tls-i386.s b/test/MC/ELF/tls-i386.s
index 267046ef5bfb..5ee36681e2d8 100644
--- a/test/MC/ELF/tls-i386.s
+++ b/test/MC/ELF/tls-i386.s
@@ -18,128 +18,128 @@
.long fooE@INDNTPOFF
// CHECK: Symbol {
-// CHECK: Name: foo1 (1)
+// CHECK: Name: foo1
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo2 (6)
+// CHECK-NEXT: Name: foo2
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo3 (11)
+// CHECK-NEXT: Name: foo3
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo4 (16)
+// CHECK-NEXT: Name: foo4
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo5 (21)
+// CHECK-NEXT: Name: foo5
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo6 (26)
+// CHECK-NEXT: Name: foo6
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo7 (31)
+// CHECK-NEXT: Name: foo7
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo8 (36)
+// CHECK-NEXT: Name: foo8
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo9 (41)
+// CHECK-NEXT: Name: foo9
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: fooA (46)
+// CHECK-NEXT: Name: fooA
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: fooB (51)
+// CHECK-NEXT: Name: fooB
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: fooC (56)
+// CHECK-NEXT: Name: fooC
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: fooD (61)
+// CHECK-NEXT: Name: fooD
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: fooE (66)
+// CHECK-NEXT: Name: fooE
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
diff --git a/test/MC/ELF/tls.s b/test/MC/ELF/tls.s
index c71e3962bb49..79865cd17be1 100644
--- a/test/MC/ELF/tls.s
+++ b/test/MC/ELF/tls.s
@@ -13,7 +13,7 @@ foobar:
.long 43
// CHECK: Symbol {
-// CHECK: Name: foobar (31)
+// CHECK: Name: foobar
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -23,56 +23,56 @@ foobar:
// CHECK-NEXT: }
// CHECK: Symbol {
-// CHECK: Name: foo1 (1)
+// CHECK: Name: foo1
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo2 (6)
+// CHECK-NEXT: Name: foo2
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo3 (11)
+// CHECK-NEXT: Name: foo3
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo4 (16)
+// CHECK-NEXT: Name: foo4
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo5 (21)
+// CHECK-NEXT: Name: foo5
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: foo6 (26)
+// CHECK-NEXT: Name: foo6
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: TLS
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
diff --git a/test/MC/ELF/type-propagate.s b/test/MC/ELF/type-propagate.s
new file mode 100644
index 000000000000..15d05af7f2fa
--- /dev/null
+++ b/test/MC/ELF/type-propagate.s
@@ -0,0 +1,151 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o - | llvm-readobj -t - | FileCheck %s
+
+// This tests that types are propagated from symbols to their aliases. Our
+// behavior is a bit different than gas. If the type of a symbol changes,
+// gas will update the type of the aliases only if those aliases were declare
+// at a point in the file where the aliased symbol was already define.
+
+// The lines marked with GAS illustrate this difference.
+
+
+ .type sym01, @object
+sym01:
+ .type sym02, @function
+sym02:
+
+ sym03 = sym01
+ sym04 = sym03
+.type sym03, @function
+ sym05 = sym03
+ sym06 = sym01 - sym02
+ sym07 = sym02 - sym01
+
+ sym08 = sym10
+ sym09 = sym10 + 1
+ .type sym10, @object
+sym10:
+
+ sym11 = sym10
+ sym12 = sym10 + 1
+ .type sym10, @function
+
+// CHECK: Symbol {
+// CHECK: Name: sym01
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: Object (0x1)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym02
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: Function (0x2)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym03
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: Function (0x2)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym04
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: Object (0x1)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym05
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+
+// GAS: Type: Function (0x2)
+// CHECK-NEXT: Type: Object (0x1)
+
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym06
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: None (0x0)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: Absolute (0xFFF1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym07
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: None (0x0)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: Absolute (0xFFF1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym08
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: Function (0x2)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym09
+// CHECK-NEXT: Value: 0x1
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+
+// GAS: Type: None (0x0)
+// CHECK-NEXT: Type: Function (0x2)
+
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym10
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+// CHECK-NEXT: Type: Function (0x2)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym11
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+
+// GAS: Type: Object (0x1)
+// CHECK-NEXT: Type: Function (0x2)
+
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym12
+// CHECK-NEXT: Value: 0x1
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local (0x0)
+
+// GAS: Type: Object (0x1)
+// CHECK-NEXT: Type: Function (0x2)
+
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
diff --git a/test/MC/ELF/type.s b/test/MC/ELF/type.s
index c2f3631ec432..c82d3006cfe0 100644
--- a/test/MC/ELF/type.s
+++ b/test/MC/ELF/type.s
@@ -41,6 +41,95 @@ tls_upper_case:
.global tls_upper_case
.type tls_upper_case,STT_TLS
+// Test that .set doesnt downgrade the type:
+// IFUNC > FUNC > OBJECT > NOTYPE
+// TLS_OBJECT > OBJECT > NOTYPE
+// also TLS_OBJECT is incompatible with IFUNC and FUNC
+
+ .global sym1
+ .type sym1, @gnu_indirect_function
+alias1:
+ .global alias1
+ .type alias1, @function
+ .set sym1, alias1
+
+ .global sym2
+ .type sym2, @gnu_indirect_function
+alias2:
+ .global alias2
+ .type alias2, @object
+ .set sym2, alias2
+
+ .global sym3
+ .type sym3, @gnu_indirect_function
+alias3:
+ .global alias3
+ .type alias3, @notype
+ .set sym3, alias3
+
+ .global sym4
+ .type sym4, @function
+alias4:
+ .global alias4
+ .type alias4, @object
+ .set sym4, alias4
+
+ .global sym5
+ .type sym5, @function
+alias5:
+ .global alias5
+ .type alias5, @notype
+ .set sym5, alias5
+
+ .global sym6
+ .type sym6, @object
+alias6:
+ .global alias6
+ .type alias6, @notype
+ .set sym6, alias6
+
+ .global sym7
+ .type sym7, @gnu_indirect_function
+alias7:
+ .global alias7
+ .type alias7, @tls_object
+ .set sym7, alias7
+
+ .global sym8
+ .type sym8, @function
+ .global alias8
+alias8:
+ .type alias8, @tls_object
+ .set sym8, alias8
+
+ .global sym9
+ .type sym9, @tls_object
+alias9:
+ .global alias9
+ .type alias9, @object
+ .set sym9, alias9
+
+ .global sym10
+ .type sym10, @tls_object
+alias10:
+ .global alias10
+ .type alias10, @notype
+ .set sym10, alias10
+
+ .global sym11
+ .type sym11, @tls_object
+alias11:
+ .global alias11
+ .type alias11, @gnu_indirect_function
+ .set sym11, alias11
+
+ .global sym12
+ .type sym12, @tls_object
+alias12:
+ .global alias12
+ .type alias12, @function
+ .set sym12, alias12
+
// CHECK: Symbol {
// CHECK: Name: bar
// CHECK-NEXT: Value: 0x0
@@ -86,6 +175,114 @@ tls_upper_case:
// CHECK-NEXT: Other: 0
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym1
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: GNU_IFunc (0xA)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym10
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: TLS (0x6)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym11
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: TLS (0x6)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym12
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: TLS (0x6)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym2
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: GNU_IFunc (0xA)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym3
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: GNU_IFunc (0xA)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym4
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: Function (0x2)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym5
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: Function (0x2)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym6
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: Object (0x1)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym7
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: GNU_IFunc (0xA)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym8
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: Function (0x2)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym9
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global (0x1)
+// CHECK-NEXT: Type: TLS (0x6)
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text (0x1)
+// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: tls
// CHECK-NEXT: Value: 0x0
diff --git a/test/MC/ELF/undef.s b/test/MC/ELF/undef.s
index 0d89fb129361..245b56328248 100644
--- a/test/MC/ELF/undef.s
+++ b/test/MC/ELF/undef.s
@@ -19,21 +19,80 @@
.text
movsd .Lsym8(%rip), %xmm1
-// CHECK: Symbols [
-
-// CHECK: Symbol {
-// CHECK: Name: .Lsym8
-
-// CHECK: Symbol {
-// CHECK: Name: .Lsym1
+test2_a = undef
+test2_b = undef + 1
-// CHECK: Symbol {
-// CHECK: Name: sym6
+// CHECK: Symbols [
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: (0)
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: Undefined
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .Lsym8
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .rodata.str1.1
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .text
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .text
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .data
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .data
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .bss
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .bss
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .rodata.str1.1
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Local
+// CHECK-NEXT: Type: Section
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: .rodata.str1.1
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: .Lsym1
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: Size: 0
+// CHECK-NEXT: Binding: Global
+// CHECK-NEXT: Type: None
+// CHECK-NEXT: Other: 0
+// CHECK-NEXT: Section: Undefined
+// CHECK-NEXT: }
+// CHECK-NEXT: Symbol {
+// CHECK-NEXT: Name: sym6
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: Object
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined
// CHECK-NEXT: }
// CHECK-NEXT: ]
diff --git a/test/MC/ELF/weak.s b/test/MC/ELF/weak.s
index 2ed3eb7b2bd4..99d427333d14 100644
--- a/test/MC/ELF/weak.s
+++ b/test/MC/ELF/weak.s
@@ -5,7 +5,7 @@
.weak foo
.long foo
-// And that bar is after all local symbols and has non zero value.
+// And that bar is after all local symbols and has non-zero value.
.weak bar
bar:
@@ -25,6 +25,6 @@ bar:
// CHECK-NEXT: Binding: Weak
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: ]
diff --git a/test/MC/ELF/weakref-reloc.s b/test/MC/ELF/weakref-reloc.s
index 48bda8748fde..582c6946c609 100644
--- a/test/MC/ELF/weakref-reloc.s
+++ b/test/MC/ELF/weakref-reloc.s
@@ -22,7 +22,7 @@
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: foo (1)
@@ -31,7 +31,7 @@
// CHECK-NEXT: Binding: Weak
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
// CHECK-NEXT: Name: zed (5)
@@ -40,5 +40,5 @@
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
diff --git a/test/MC/ELF/weakref.s b/test/MC/ELF/weakref.s
index 6c2d33397c85..2288264bd6c7 100644
--- a/test/MC/ELF/weakref.s
+++ b/test/MC/ELF/weakref.s
@@ -77,10 +77,10 @@ bar15:
// CHECK-NEXT: Binding: Local (0x0)
// CHECK-NEXT: Type: None (0x0)
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar6 (21)
+// CHECK-NEXT: Name: bar6
// CHECK-NEXT: Value: 0x18
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -89,7 +89,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar7 (26)
+// CHECK-NEXT: Name: bar7
// CHECK-NEXT: Value: 0x18
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -98,7 +98,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar8 (31)
+// CHECK-NEXT: Name: bar8
// CHECK-NEXT: Value: 0x1C
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -107,7 +107,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar9 (36)
+// CHECK-NEXT: Name: bar9
// CHECK-NEXT: Value: 0x20
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -116,7 +116,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .text (0)
+// CHECK-NEXT: Name: .text
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -125,7 +125,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .data (0)
+// CHECK-NEXT: Name: .data
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -134,7 +134,7 @@ bar15:
// CHECK-NEXT: Section: .data (0x3)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: .bss (0)
+// CHECK-NEXT: Name: .bss
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Local
@@ -143,7 +143,7 @@ bar15:
// CHECK-NEXT: Section: .bss (0x4)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar10 (41)
+// CHECK-NEXT: Name: bar10
// CHECK-NEXT: Value: 0x28
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
@@ -152,7 +152,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar11 (47)
+// CHECK-NEXT: Name: bar11
// CHECK-NEXT: Value: 0x30
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
@@ -161,7 +161,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar12 (53)
+// CHECK-NEXT: Name: bar12
// CHECK-NEXT: Value: 0x30
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
@@ -170,7 +170,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar13 (59)
+// CHECK-NEXT: Name: bar13
// CHECK-NEXT: Value: 0x34
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
@@ -179,7 +179,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar14 (65)
+// CHECK-NEXT: Name: bar14
// CHECK-NEXT: Value: 0x38
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
@@ -188,7 +188,7 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar15 (71)
+// CHECK-NEXT: Name: bar15
// CHECK-NEXT: Value: 0x40
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
@@ -197,39 +197,39 @@ bar15:
// CHECK-NEXT: Section: .text (0x1)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar2 (1)
+// CHECK-NEXT: Name: bar2
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar3 (6)
+// CHECK-NEXT: Name: bar3
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Weak
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar4 (11)
+// CHECK-NEXT: Name: bar4
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: bar5 (16)
+// CHECK-NEXT: Name: bar5
// CHECK-NEXT: Value: 0x0
// CHECK-NEXT: Size: 0
// CHECK-NEXT: Binding: Global
// CHECK-NEXT: Type: None
// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: Section: Undefined (0x0)
// CHECK-NEXT: }
// CHECK-NEXT: ]
diff --git a/test/MC/MachO/AArch64/darwin-ARM64-local-label-diff.s b/test/MC/MachO/AArch64/darwin-ARM64-local-label-diff.s
new file mode 100644
index 000000000000..d98c257c8586
--- /dev/null
+++ b/test/MC/MachO/AArch64/darwin-ARM64-local-label-diff.s
@@ -0,0 +1,21 @@
+; RUN: llvm-mc -triple arm64-apple-darwin -filetype=obj -o - < %s | macho-dump -dump-section-data | FileCheck %s
+; rdar://13028719
+
+ .globl context_save0
+ .align 6
+Lcontext_save0:
+context_save0:
+ .fill 2, 8, 5
+Lcontext_save0_end:
+Lcontext_save0_size: .quad (Lcontext_save0_end - Lcontext_save0)
+
+ .align 6
+Lcontext_save1:
+ .fill 2, 8, 0
+Lcontext_save1_end:
+Lcontext_save1_size: .quad (Lcontext_save1_end - Lcontext_save1)
+
+Llockup_release:
+ .quad 0
+
+; CHECK: ('_section_data', '05000000 00000000 05000000 00000000 10000000 00000000 1f2003d5 1f2003d5 1f2003d5 1f2003d5 1f2003d5 1f2003d5 1f2003d5 1f2003d5 1f2003d5 1f2003d5 00000000 00000000 00000000 00000000 10000000 00000000 00000000 00000000')
diff --git a/test/MC/MachO/AArch64/darwin-ARM64-reloc.s b/test/MC/MachO/AArch64/darwin-ARM64-reloc.s
new file mode 100644
index 000000000000..7f586aedd636
--- /dev/null
+++ b/test/MC/MachO/AArch64/darwin-ARM64-reloc.s
@@ -0,0 +1,157 @@
+; RUN: llvm-mc -n -triple arm64-apple-darwin10 %s -filetype=obj -o - | macho-dump --dump-section-data | FileCheck %s
+
+ .text
+_fred:
+ bl _func
+ bl _func + 20
+
+ adrp x3, _data@page
+ ldr w2, [x3, _data@pageoff]
+
+ add x3, x3, _data@pageoff + 4
+
+ adrp x3, _data@page+1
+ ldr w2, [x3, _data@pageoff + 4]
+
+ adrp x3, _data_ext@gotpage
+ ldr w2, [x3, _data_ext@gotpageoff]
+
+ .data
+_data:
+ .quad _foo
+ .quad _foo + 4
+ .quad _foo - _bar
+ .quad _foo - _bar + 4
+
+ .long _foo - _bar
+
+ .quad _foo@got
+ .long _foo@got - .
+
+
+; CHECK: ('cputype', 16777228)
+; CHECK: ('cpusubtype', 0)
+; CHECK: ('filetype', 1)
+; CHECK: ('num_load_commands', 3)
+; CHECK: ('load_commands_size', 336)
+; CHECK: ('flag', 0)
+; CHECK: ('reserved', 0)
+; CHECK: ('load_commands', [
+; CHECK: # Load Command 0
+; CHECK: (('command', 25)
+; CHECK: ('size', 232)
+; CHECK: ('segment_name', '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
+; CHECK: ('vm_addr', 0)
+; CHECK: ('vm_size', 84)
+; CHECK: ('file_offset', 368)
+; CHECK: ('file_size', 84)
+; CHECK: ('maxprot', 7)
+; CHECK: ('initprot', 7)
+; CHECK: ('num_sections', 2)
+; CHECK: ('flags', 0)
+; CHECK: ('sections', [
+; CHECK: # Section 0
+; CHECK: (('section_name', '__text\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
+; CHECK: ('segment_name', '__TEXT\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
+; CHECK: ('address', 0)
+; CHECK: ('size', 36)
+; CHECK: ('offset', 368)
+; CHECK: ('alignment', 0)
+; CHECK: ('reloc_offset', 452)
+; CHECK: ('num_reloc', 13)
+; CHECK: ('flags', 0x80000400)
+; CHECK: ('reserved1', 0)
+; CHECK: ('reserved2', 0)
+; CHECK: ('reserved3', 0)
+; CHECK: ),
+; CHECK: ('_relocations', [
+; CHECK: # Relocation 0
+; CHECK: (('word-0', 0x20),
+; CHECK: ('word-1', 0x6c000005)),
+; CHECK: # Relocation 1
+; CHECK: (('word-0', 0x1c),
+; CHECK: ('word-1', 0x5d000005)),
+; CHECK: # Relocation 2
+; CHECK: (('word-0', 0x18),
+; CHECK: ('word-1', 0xa4000004)),
+; CHECK: # Relocation 3
+; CHECK: (('word-0', 0x18),
+; CHECK: ('word-1', 0x4c000002)),
+; CHECK: # Relocation 4
+; CHECK: (('word-0', 0x14),
+; CHECK: ('word-1', 0xa4000001)),
+; CHECK: # Relocation 5
+; CHECK: (('word-0', 0x14),
+; CHECK: ('word-1', 0x3d000002)),
+; CHECK: # Relocation 6
+; CHECK: (('word-0', 0x10),
+; CHECK: ('word-1', 0xa4000004)),
+; CHECK: # Relocation 7
+; CHECK: (('word-0', 0x10),
+; CHECK: ('word-1', 0x4c000002)),
+; CHECK: # Relocation 8
+; CHECK: (('word-0', 0xc),
+; CHECK: ('word-1', 0x4c000002)),
+; CHECK: # Relocation 9
+; CHECK: (('word-0', 0x8),
+; CHECK: ('word-1', 0x3d000002)),
+; CHECK: # Relocation 10
+; CHECK: (('word-0', 0x4),
+; CHECK: ('word-1', 0xa4000014)),
+; CHECK: # Relocation 11
+; CHECK: (('word-0', 0x4),
+; CHECK: ('word-1', 0x2d000007)),
+; CHECK: # Relocation 12
+; CHECK: (('word-0', 0x0),
+; CHECK: ('word-1', 0x2d000007)),
+; CHECK: ])
+; CHECK: ('_section_data', '00000094 00000094 03000090 620040b9 63000091 03000090 620040b9 03000090 620040b9')
+; CHECK: # Section 1
+; CHECK: (('section_name', '__data\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
+; CHECK: ('segment_name', '__DATA\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
+; CHECK: ('address', 36)
+; CHECK: ('size', 48)
+; CHECK: ('offset', 404)
+; CHECK: ('alignment', 0)
+; CHECK: ('reloc_offset', 556)
+; CHECK: ('num_reloc', 10)
+; CHECK: ('flags', 0x0)
+; CHECK: ('reserved1', 0)
+; CHECK: ('reserved2', 0)
+; CHECK: ('reserved3', 0)
+; CHECK: ),
+; CHECK: ('_relocations', [
+; CHECK: # Relocation 0
+; CHECK: (('word-0', 0x2c),
+; CHECK: ('word-1', 0x7d000006)),
+; CHECK: # Relocation 1
+; CHECK: (('word-0', 0x24),
+; CHECK: ('word-1', 0x7e000006)),
+; CHECK: # Relocation 2
+; CHECK: (('word-0', 0x20),
+; CHECK: ('word-1', 0x1c000004)),
+; CHECK: # Relocation 3
+; CHECK: (('word-0', 0x20),
+; CHECK: ('word-1', 0xc000006)),
+; CHECK: # Relocation 4
+; CHECK: (('word-0', 0x18),
+; CHECK: ('word-1', 0x1e000004)),
+; CHECK: # Relocation 5
+; CHECK: (('word-0', 0x18),
+; CHECK: ('word-1', 0xe000006)),
+; CHECK: # Relocation 6
+; CHECK: (('word-0', 0x10),
+; CHECK: ('word-1', 0x1e000004)),
+; CHECK: # Relocation 7
+; CHECK: (('word-0', 0x10),
+; CHECK: ('word-1', 0xe000006)),
+; CHECK: # Relocation 8
+; CHECK: (('word-0', 0x8),
+; CHECK: ('word-1', 0xe000006)),
+; CHECK: # Relocation 9
+; CHECK: (('word-0', 0x0),
+; CHECK: ('word-1', 0xe000006)),
+; CHECK: ])
+; CHECK: ('_section_data', '00000000 00000000 04000000 00000000 00000000 00000000 04000000 00000000 00000000 00000000 00000000 d4ffffff')
+; CHECK: ])
+; CHECK: ),
diff --git a/test/MC/MachO/AArch64/lit.local.cfg b/test/MC/MachO/AArch64/lit.local.cfg
new file mode 100644
index 000000000000..cec29af5bbe4
--- /dev/null
+++ b/test/MC/MachO/AArch64/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/MC/MachO/ARM/aliased-symbols.s b/test/MC/MachO/ARM/aliased-symbols.s
new file mode 100644
index 000000000000..0b4463d055aa
--- /dev/null
+++ b/test/MC/MachO/ARM/aliased-symbols.s
@@ -0,0 +1,115 @@
+// RUN: llvm-mc -triple thumbv7m-apple-darwin-eabi %s -filetype=obj -o %t
+// RUN: llvm-readobj -symbols %t | FileCheck %s
+
+ .data
+ var1 = var2
+ .long var1
+ .long var2
+ .long var2 + 4
+defined_early:
+ .long 0
+
+ alias_to_early = defined_early
+ alias_to_late = defined_late
+
+defined_late:
+ .long 0
+
+ .global extern_test
+ extern_test = var2
+
+ alias_to_local = Ltmp0
+Ltmp0:
+
+// CHECK: Symbols [
+
+ // defined_early was defined. Actually has value 0xc.
+// CHECK: Symbol {
+// CHECK-NEXT: Name: defined_early
+// CHECK-NEXT: Type: Section (0xE)
+// CHECK-NEXT: Section: __data (0x2)
+// CHECK-NEXT: RefType: UndefinedNonLazy (0x0)
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Value: 0x[[DEFINED_EARLY:[0-9A-F]+]]
+// CHECK-NEXT: }
+
+ // alias_to_early was an alias to defined_early. But we can resolve it.
+// CHECK: Symbol {
+// CHECK-NEXT: Name: alias_to_early
+// CHECK-NEXT: Type: Section (0xE)
+// CHECK-NEXT: Section: __data (0x2)
+// CHECK-NEXT: RefType: UndefinedNonLazy (0x0)
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Value: 0x[[DEFINED_EARLY]]
+// CHECK-NEXT: }
+
+ // defined_late was defined. Just after defined_early.
+// CHECK: Symbol {
+// CHECK-NEXT: Name: defined_late
+// CHECK-NEXT: Type: Section (0xE)
+// CHECK-NEXT: Section: __data (0x2)
+// CHECK-NEXT: RefType: UndefinedNonLazy (0x0)
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Value: 0x[[DEFINED_LATE:[0-9A-F]+]]
+// CHECK-NEXT: }
+
+ // alias_to_late was an alias to defined_late. But we can resolve it.
+// CHECK: Symbol {
+// CHECK-NEXT: Name: alias_to_late
+// CHECK-NEXT: Type: Section (0xE)
+// CHECK-NEXT: Section: __data (0x2)
+// CHECK-NEXT: RefType: UndefinedNonLazy (0x0)
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Value: 0x[[DEFINED_LATE]]
+// CHECK-NEXT: }
+
+ // alias_to_local is an alias, but what it points to has no
+ // MachO representation. We must resolve it.
+// CHECK: Symbol {
+// CHECK-NEXT: Name: alias_to_local (37)
+// CHECK-NEXT: Type: Section (0xE)
+// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: RefType: UndefinedNonLazy (0x0)
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Value: 0x14
+// CHECK-NEXT: }
+
+ // extern_test was a pure alias to the unknown "var2".
+ // N_INDR and Extern.
+// CHECK: Name: extern_test
+// CHECK-NEXT: Extern
+// CHECK-NEXT: Type: Indirect (0xA)
+// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: RefType: UndefinedNonLazy (0x0)
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Value: 0x[[VAR2_STRINGINDEX:[0-9a-f]+]]
+// CHECK-NEXT: }
+
+ // var1 was another alias to an unknown variable. Not extern this time.
+// CHECK: Symbol {
+// CHECK-NEXT: Name: var1 (1)
+// CHECK-NEXT: Type: Indirect (0xA)
+// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: RefType: UndefinedNonLazy (0x0)
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Value: 0x[[VAR2_STRINGINDEX]]
+// CHECK-NEXT: }
+
+ // var2 was a normal undefined (extern) symbol.
+// CHECK: Symbol {
+// CHECK-NEXT: Name: var2
+// CHECK-NEXT: Extern
+// CHECK-NEXT: Type: Undef (0x0)
+// CHECK-NEXT: Section: (0x0)
+// CHECK-NEXT: RefType: UndefinedNonLazy (0x0)
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Value: 0x0
+// CHECK-NEXT: }
diff --git a/test/MC/MachO/ARM/bad-darwin-ARM-reloc.s b/test/MC/MachO/ARM/bad-darwin-ARM-reloc.s
new file mode 100644
index 000000000000..7ad91df3ce0d
--- /dev/null
+++ b/test/MC/MachO/ARM/bad-darwin-ARM-reloc.s
@@ -0,0 +1,9 @@
+@ RUN: not llvm-mc -n -triple armv7-apple-darwin10 %s -filetype=obj -o - 2> %t.err > %t
+@ RUN: FileCheck --check-prefix=CHECK-ERROR < %t.err %s
+@ rdar://15586725
+.text
+ ldr r3, L___fcommon
+.section myseg, mysect
+L___fcommon:
+ .word 0
+@ CHECK-ERROR: unsupported relocation on symbol
diff --git a/test/MC/MachO/ARM/bad-darwin-directives.s b/test/MC/MachO/ARM/bad-darwin-directives.s
new file mode 100644
index 000000000000..7ac0f6f7f0f4
--- /dev/null
+++ b/test/MC/MachO/ARM/bad-darwin-directives.s
@@ -0,0 +1,29 @@
+@ RUN: not llvm-mc -n -triple armv7-apple-darwin10 %s -filetype asm -o /dev/null 2>&1 \
+@ RUN: | FileCheck --check-prefix CHECK-ERROR %s
+
+@ RUN: not llvm-mc -n -triple armv7-apple-darwin10 %s -filetype obj -o /dev/null 2>&1 \
+@ RUN: | FileCheck --check-prefix CHECK-ERROR %s
+
+@ rdar://16335232
+
+.eabi_attribute 8, 1
+@ CHECK-ERROR: error: unknown directive
+
+.cpu
+@ CHECK-ERROR: error: unknown directive
+
+.fpu neon
+@ CHECK-ERROR: error: unknown directive
+
+.arch armv7
+@ CHECK-ERROR: error: unknown directive
+
+.fnstart
+@ CHECK-ERROR: error: unknown directive
+
+.tlsdescseq
+@ CHECK-ERROR: error: unknown directive
+
+.object_arch armv7
+@ CHECK-ERROR: error: unknown directive
+
diff --git a/test/MC/MachO/ARM/ios-version-min-load-command.s b/test/MC/MachO/ARM/ios-version-min-load-command.s
new file mode 100644
index 000000000000..e065d147be76
--- /dev/null
+++ b/test/MC/MachO/ARM/ios-version-min-load-command.s
@@ -0,0 +1,10 @@
+// RUN: llvm-mc -triple armv7-apple-ios %s -filetype=obj -o - | macho-dump | FileCheck %s
+
+// Test the formation of the version-min load command in the MachO.
+// use a nonsense but well formed version.
+.ios_version_min 99,8,7
+// CHECK: (('command', 37)
+// CHECK: ('size', 16)
+// CHECK: ('version, 6490119)
+// CHECK: ('reserved, 0)
+// CHECK: ),
diff --git a/test/MC/MachO/ARM/lit.local.cfg b/test/MC/MachO/ARM/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/MC/MachO/ARM/lit.local.cfg
+++ b/test/MC/MachO/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/MachO/ARM/version-min-diagnostics.s b/test/MC/MachO/ARM/version-min-diagnostics.s
new file mode 100644
index 000000000000..15d44d31661a
--- /dev/null
+++ b/test/MC/MachO/ARM/version-min-diagnostics.s
@@ -0,0 +1,49 @@
+// RUN: not llvm-mc -triple i386-apple-darwin %s 2> %t
+// RUN: FileCheck %s < %t
+// RUN: not llvm-mc -triple x86_64-apple-darwin %s 2> %t
+// RUN: FileCheck %s < %t
+// RUN: not llvm-mc -triple armv7-apple-ios %s 2> %t
+// RUN: FileCheck %s < %t
+
+.ios_version_min 5,2,257
+.ios_version_min 5,256,1
+.ios_version_min 5,-1,1
+.ios_version_min 0,1,1
+.ios_version_min 70000,1
+.macosx_version_min 99,2,257
+.macosx_version_min 50,256,1
+.macosx_version_min 10,-1,1
+.macosx_version_min 0,1,1
+.macosx_version_min 70000,1
+
+
+// CHECK: error: invalid OS update number
+// CHECK: .ios_version_min 5,2,257
+// CHECK: ^
+// CHECK: error: invalid OS minor version number
+// CHECK: .ios_version_min 5,256,1
+// CHECK: ^
+// CHECK: error: invalid OS minor version number
+// CHECK: .ios_version_min 5,-1,1
+// CHECK: ^
+// CHECK: error: invalid OS major version number
+// CHECK: .ios_version_min 0,1,1
+// CHECK: ^
+// CHECK: error: invalid OS major version number
+// CHECK: .ios_version_min 70000,1
+// CHECK: ^
+// CHECK: error: invalid OS update number
+// CHECK: .macosx_version_min 99,2,257
+// CHECK: ^
+// CHECK: error: invalid OS minor version number
+// CHECK: .macosx_version_min 50,256,1
+// CHECK: ^
+// CHECK: error: invalid OS minor version number
+// CHECK: .macosx_version_min 10,-1,1
+// CHECK: ^
+// CHECK: error: invalid OS major version number
+// CHECK: .macosx_version_min 0,1,1
+// CHECK: ^
+// CHECK: error: invalid OS major version number
+// CHECK: .macosx_version_min 70000,1
+// CHECK: ^
diff --git a/test/MC/MachO/ARM/version-min.s b/test/MC/MachO/ARM/version-min.s
new file mode 100644
index 000000000000..0a40338ed5e0
--- /dev/null
+++ b/test/MC/MachO/ARM/version-min.s
@@ -0,0 +1,21 @@
+// RUN: llvm-mc -triple i386-apple-darwin %s | FileCheck %s
+// RUN: llvm-mc -triple x86_64-apple-darwin %s | FileCheck %s
+// RUN: llvm-mc -triple armv7s-apple-ios %s | FileCheck %s
+
+// Test the parsing of well-formed version-min directives.
+
+.ios_version_min 5,2,0
+.ios_version_min 3,2,1
+.ios_version_min 5,0
+
+// CHECK: .ios_version_min 5, 2
+// CHECK: .ios_version_min 3, 2, 1
+// CHECK: .ios_version_min 5, 0
+
+.macosx_version_min 10,2,0
+.macosx_version_min 10,8,1
+.macosx_version_min 2,0
+
+// CHECK: .macosx_version_min 10, 2
+// CHECK: .macosx_version_min 10, 8, 1
+// CHECK: .macosx_version_min 2, 0
diff --git a/test/MC/MachO/bad-darwin-x86_64-reloc-expr.s b/test/MC/MachO/bad-darwin-x86_64-reloc-expr.s
new file mode 100644
index 000000000000..2b4271f349dc
--- /dev/null
+++ b/test/MC/MachO/bad-darwin-x86_64-reloc-expr.s
@@ -0,0 +1,6 @@
+// RUN: not llvm-mc -triple x86_64-apple-darwin10 %s -filetype=obj -o - 2> %t.err > %t
+// RUN: FileCheck --check-prefix=CHECK-ERROR < %t.err %s
+
+.quad (0x1234 + (4 * SOME_VALUE))
+// CHECK-ERROR: error: expected relocatable expression
+// CHECK-ERROR: ^
diff --git a/test/MC/MachO/bss.s b/test/MC/MachO/bss.s
index 15d490ad5eba..c5afe107782b 100644
--- a/test/MC/MachO/bss.s
+++ b/test/MC/MachO/bss.s
@@ -1,4 +1,4 @@
-// The purpose of this test is to verify that bss sections are emited correctly.
+// The purpose of this test is to verify that bss sections are emitted correctly.
// RUN: llvm-mc -filetype=obj -triple i686-apple-darwin9 %s | llvm-readobj -s | FileCheck %s
// RUN: llvm-mc -filetype=obj -triple x86_64-apple-darwin9 %s | llvm-readobj -s | FileCheck %s
diff --git a/test/MC/MachO/debug_frame.s b/test/MC/MachO/debug_frame.s
index 20bfd8dde2e7..247347d252a7 100644
--- a/test/MC/MachO/debug_frame.s
+++ b/test/MC/MachO/debug_frame.s
@@ -3,6 +3,7 @@
// Make sure MC can handle file level .cfi_startproc and .cfi_endproc that creates
// an empty frame.
// rdar://10017184
+_proc:
.cfi_startproc
.cfi_endproc
diff --git a/test/MC/MachO/eh-frame-reloc.s b/test/MC/MachO/eh-frame-reloc.s
new file mode 100644
index 000000000000..c39ce8479ccc
--- /dev/null
+++ b/test/MC/MachO/eh-frame-reloc.s
@@ -0,0 +1,16 @@
+// RUN: llvm-mc < %s -triple=x86_64-apple-macosx10.7 -filetype=obj | llvm-readobj -r | FileCheck %s
+// RUN: llvm-mc < %s -triple=x86_64-apple-macosx10.6 -filetype=obj | llvm-readobj -r | FileCheck %s
+// RUN: llvm-mc < %s -triple=x86_64-apple-ios7.0.0 -filetype=obj | llvm-readobj -r | FileCheck %s
+// RUN: llvm-mc < %s -triple=x86_64-apple-macosx10.5 -filetype=obj | llvm-readobj -r | FileCheck %s
+// RUN: llvm-mc < %s -triple=i686-apple-macosx10.6 -filetype=obj | llvm-readobj -r | FileCheck %s
+// RUN: llvm-mc < %s -triple=i686-apple-macosx10.5 -filetype=obj | llvm-readobj -r | FileCheck %s
+// RUN: llvm-mc < %s -triple=i686-apple-macosx10.4 -filetype=obj | llvm-readobj -r | FileCheck %s
+
+ .globl _bar
+ .align 4, 0x90
+_bar:
+ .cfi_startproc
+ .cfi_endproc
+
+// CHECK: Relocations [
+// CHECK-NEXT: ]
diff --git a/test/MC/MachO/eh_symbol.s b/test/MC/MachO/eh_symbol.s
new file mode 100644
index 000000000000..738e2b67d0de
--- /dev/null
+++ b/test/MC/MachO/eh_symbol.s
@@ -0,0 +1,14 @@
+// RUN: llvm-mc -triple i386-apple-darwin9 %s -filetype=obj -o - | llvm-nm - | FileCheck %s
+
+// test that we don't produce foo.eh symbols in a debug_frame section.
+// CHECK-NOT: _f.eh
+// CHECK: T _f
+// CHECK-NOT: _f.eh
+
+ .globl _f
+_f:
+ .cfi_startproc
+ retl
+ .cfi_endproc
+
+ .cfi_sections .debug_frame
diff --git a/test/MC/MachO/gen-dwarf-cpp.s b/test/MC/MachO/gen-dwarf-cpp.s
index e42a63a191b1..04a9508aae22 100644
--- a/test/MC/MachO/gen-dwarf-cpp.s
+++ b/test/MC/MachO/gen-dwarf-cpp.s
@@ -5,18 +5,25 @@
.globl _bar
_bar:
movl $0, %eax
+# 3 "inc/g.s"
+ movl $0, %eax
L1: leave
+# 42 "t.s"
ret
// rdar://9275556
// We check that the source name "t.s" is picked up
+// CHECK: include_directories[ 1] = '{{.*[/\\]}}test{{[/\\]}}MC{{[/\\]}}MachO'
+// CHECK: include_directories[ 2] = 'inc'
// CHECK: Dir Mod Time File Len File Name
// CHECK: ---- ---------- ---------- ---------------------------
// CHECK: file_names[ 1] 1 0x00000000 0x00000000 gen-dwarf-cpp.s
// CHECK: file_names[ 2] 0 0x00000000 0x00000000 t.s
+// CHECK: file_names[ 3] 2 0x00000000 0x00000000 g.s
+// CHECK-NOT: file_names
// We check that the source line number 100 is picked up before the "movl"
-// CHECK: Address Line Column File ISA Flags
-// CHECK: ------------------ ------ ------ ------ --- -------------
-// CHECK: 0x0000000000000000 102 0 2 0 is_stmt
+// CHECK: Address Line Column File ISA Discriminator Flags
+// CHECK: ------------------ ------ ------ ------ --- ------------- -------------
+// CHECK: 0x0000000000000000 102 0 2 0 0 is_stmt
diff --git a/test/MC/MachO/gen-dwarf.s b/test/MC/MachO/gen-dwarf.s
index d763dd120ab2..997c83498ef1 100644
--- a/test/MC/MachO/gen-dwarf.s
+++ b/test/MC/MachO/gen-dwarf.s
@@ -113,10 +113,10 @@ _x: .long 1
// CHECK: ---- ---------- ---------- ---------------------------
// CHECK: file_names[ 1] 1 0x00000000 0x00000000 gen-dwarf.s
-// CHECK: Address Line Column File ISA Flags
-// CHECK: ------------------ ------ ------ ------ --- -------------
-// CHECK: 0x0000000000000000 6 0 1 0 is_stmt
-// CHECK: 0x0000000000000005 7 0 1 0 is_stmt
-// CHECK: 0x0000000000000006 8 0 1 0 is_stmt
-// CHECK: 0x0000000000000007 11 0 1 0 is_stmt
-// CHECK: 0x0000000000000008 11 0 1 0 is_stmt end_sequence
+// CHECK: Address Line Column File ISA Discriminator Flags
+// CHECK: ------------------ ------ ------ ------ --- ------------- -------------
+// CHECK: 0x0000000000000000 6 0 1 0 0 is_stmt
+// CHECK: 0x0000000000000005 7 0 1 0 0 is_stmt
+// CHECK: 0x0000000000000006 8 0 1 0 0 is_stmt
+// CHECK: 0x0000000000000007 11 0 1 0 0 is_stmt
+// CHECK: 0x0000000000000008 11 0 1 0 0 is_stmt end_sequence
diff --git a/test/MC/MachO/lit.local.cfg b/test/MC/MachO/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/MC/MachO/lit.local.cfg
+++ b/test/MC/MachO/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/MachO/osx-version-min-load-command.s b/test/MC/MachO/osx-version-min-load-command.s
new file mode 100644
index 000000000000..2a73609dc012
--- /dev/null
+++ b/test/MC/MachO/osx-version-min-load-command.s
@@ -0,0 +1,10 @@
+// RUN: llvm-mc -triple x86_64-apple-darwin %s -filetype=obj -o - | macho-dump | FileCheck %s
+
+// Test the formation of the version-min load command in the MachO.
+// use a nonsense but well formed version.
+.macosx_version_min 25,3,1
+// CHECK: (('command', 36)
+// CHECK: ('size', 16)
+// CHECK: ('version, 1639169)
+// CHECK: ('reserved, 0)
+// CHECK: ),
diff --git a/test/MC/MachO/pr19185.s b/test/MC/MachO/pr19185.s
new file mode 100644
index 000000000000..fb21e51b70eb
--- /dev/null
+++ b/test/MC/MachO/pr19185.s
@@ -0,0 +1,6 @@
+// RUN: llvm-mc -triple x86_64-apple-darwin %s -filetype=obj -o %t.o
+f:
+ .cfi_startproc
+ .cfi_endproc
+
+EH_frame0:
diff --git a/test/MC/MachO/temp-labels.s b/test/MC/MachO/temp-labels.s
index b7382b7d2c82..ac0f6203aef1 100644
--- a/test/MC/MachO/temp-labels.s
+++ b/test/MC/MachO/temp-labels.s
@@ -1,4 +1,4 @@
-// RUN: llvm-mc -triple x86_64-apple-darwin10 %s -filetype=obj -L -o - | macho-dump --dump-section-data | FileCheck %s
+// RUN: llvm-mc -triple x86_64-apple-darwin10 %s -filetype=obj -save-temp-labels -o - | macho-dump --dump-section-data | FileCheck %s
// CHECK: # Load Command 1
// CHECK: (('command', 2)
diff --git a/test/MC/MachO/variable-exprs.s b/test/MC/MachO/variable-exprs.s
index 8eeb82f0faf7..a7fa45d571a3 100644
--- a/test/MC/MachO/variable-exprs.s
+++ b/test/MC/MachO/variable-exprs.s
@@ -202,10 +202,10 @@ Lt0_x = Lt0_a - Lt0_b
// CHECK-I386: ),
// CHECK-I386: # Symbol 8
// CHECK-I386: (('n_strx', 1)
-// CHECK-I386: ('n_type', 0x1)
+// CHECK-I386: ('n_type', 0xb)
// CHECK-I386: ('n_sect', 0)
// CHECK-I386: ('n_desc', 0)
-// CHECK-I386: ('n_value', 0)
+// CHECK-I386: ('n_value', 4)
// CHECK-I386: ('_string', 'd2')
// CHECK-I386: ),
// CHECK-I386: # Symbol 9
@@ -403,10 +403,10 @@ Lt0_x = Lt0_a - Lt0_b
// CHECK-X86_64: ),
// CHECK-X86_64: # Symbol 8
// CHECK-X86_64: (('n_strx', 1)
-// CHECK-X86_64: ('n_type', 0x1)
+// CHECK-X86_64: ('n_type', 0xb)
// CHECK-X86_64: ('n_sect', 0)
// CHECK-X86_64: ('n_desc', 0)
-// CHECK-X86_64: ('n_value', 0)
+// CHECK-X86_64: ('n_value', 4)
// CHECK-X86_64: ('_string', 'd2')
// CHECK-X86_64: ),
// CHECK-X86_64: # Symbol 9
diff --git a/test/MC/MachO/x86_32-scattered-reloc-fallback.s b/test/MC/MachO/x86_32-scattered-reloc-fallback.s
new file mode 100644
index 000000000000..3de52b4228d0
--- /dev/null
+++ b/test/MC/MachO/x86_32-scattered-reloc-fallback.s
@@ -0,0 +1,27 @@
+// RUN: llvm-mc -triple i386-apple-darwin9 %s -filetype=obj -o - | macho-dump --dump-section-data | FileCheck %s
+
+// rdar://15526046
+
+.text
+.globl _main
+_main:
+ .space 0x01020f55, 0x90
+bug:
+ movl $0, _key64b_9+4
+.section __TEXT, __padding
+ .space 0x515b91, 0
+.data
+ .space 0xa70, 0
+.globl _key64b_9
+_key64b_9:
+ .long 1
+ .long 2
+
+// The movl instruction above should produce this encoding where the address
+// of _key64b_9 is at 0x01537560. This is testing falling back from using a
+// scattered relocation to a normal relocation because the offset from the
+// start of the section is more than 24-bits. But need to get the item to
+// be relocated, in this case _key64b_9+4, value correct in the instruction.
+// 01020f55 c7056475530100000000 movl $0x0, 0x1537564
+
+// CHECK: 90c70564 75530100 000000')
diff --git a/test/MC/Mips/abicalls.ll b/test/MC/Mips/abicalls.ll
deleted file mode 100644
index 7b98b02d05ad..000000000000
--- a/test/MC/Mips/abicalls.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-;
-; When the assembler is ready a .s file for it will
-; be created.
-
-; Note that EF_MIPS_CPIC is set by -mabicalls which is the default on Linux
-; TODO need to support -mno-abicalls
-
-; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-STATIC %s
-; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | FileCheck -check-prefix=CHECK-PIC %s
-; RUN: llc -filetype=asm -mtriple mips64el-unknown-linux -mcpu=mips64 -relocation-model=static %s -o - | FileCheck -check-prefix=CHECK-PIC %s
-
-; CHECK-STATIC: .abicalls
-; CHECK-STATIC-NEXT: pic0
-; CHECK-PIC: .abicalls
-; CHECK-PIC-NOT: pic0
diff --git a/test/MC/Mips/cfi.s b/test/MC/Mips/cfi.s
new file mode 100644
index 000000000000..a3247b5479a0
--- /dev/null
+++ b/test/MC/Mips/cfi.s
@@ -0,0 +1,13 @@
+# RUN: llvm-mc %s -triple=mips-unknown-unknown -show-encoding -mcpu=mips32 | \
+# RUN: FileCheck %s
+# RUN: llvm-mc %s -triple=mips64-unknown-unknown -show-encoding -mcpu=mips64 | \
+# RUN: FileCheck %s
+
+# Check that we can accept register names in CFI directives and that they are
+# canonicalised to their DWARF register numbers.
+
+ .cfi_startproc # CHECK: .cfi_startproc
+ .cfi_register $6, $5 # CHECK: .cfi_register 6, 5
+ .cfi_def_cfa $fp, 8 # CHECK: .cfi_def_cfa 30, 8
+ .cfi_def_cfa $2, 16 # CHECK: .cfi_def_cfa 2, 16
+ .cfi_endproc # CHECK: .cfi_endproc
diff --git a/test/MC/Mips/cpload-bad.s b/test/MC/Mips/cpload-bad.s
new file mode 100644
index 000000000000..7d186f66f728
--- /dev/null
+++ b/test/MC/Mips/cpload-bad.s
@@ -0,0 +1,15 @@
+# RUN: not llvm-mc %s -arch=mips -mcpu=mips32r2 2>%t1
+# RUN: FileCheck %s < %t1 -check-prefix=ASM
+
+ .text
+ .option pic2
+ .set reorder
+ .cpload $25
+# ASM: :[[@LINE-1]]:9: warning: .cpload in reorder section
+ .set noreorder
+ .cpload $32
+# ASM: :[[@LINE-1]]:17: error: invalid register
+ .cpload $foo
+# ASM: :[[@LINE-1]]:17: error: expected register containing function address
+ .cpload bar
+# ASM: :[[@LINE-1]]:17: error: expected register containing function address
diff --git a/test/MC/Mips/cpload.s b/test/MC/Mips/cpload.s
new file mode 100644
index 000000000000..bc5e79787ba3
--- /dev/null
+++ b/test/MC/Mips/cpload.s
@@ -0,0 +1,33 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -filetype=obj -o -| \
+# RUN: llvm-objdump -d -r -arch=mips - | \
+# RUN: FileCheck %s -check-prefix=OBJ
+
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64r2 -filetype=obj -o -| \
+# RUN: llvm-objdump -d -r -arch=mips - | \
+# RUN: FileCheck %s -check-prefix=OBJ64
+
+# ASM: .text
+# ASM: .option pic2
+# ASM: .set noreorder
+# ASM: .cpload $25
+# ASM: .set reorder
+
+# OBJ: .text
+# OBJ: lui $gp, 0
+# OBJ: R_MIPS_HI16 _gp_disp
+# OBJ: addiu $gp, $gp, 0
+# OBJ: R_MIPS_LO16 _gp_disp
+# OBJ: addu $gp, $gp, $25
+
+# OBJ64: .text
+# OBJ64-NOT: lui $gp, 0
+# OBJ64-NOT: addiu $gp, $gp, 0
+# OBJ64-NOT: addu $gp, $gp, $25
+
+ .text
+ .option pic2
+ .set noreorder
+ .cpload $25
+ .set reorder
diff --git a/test/MC/Mips/cpsetup-bad.s b/test/MC/Mips/cpsetup-bad.s
new file mode 100644
index 000000000000..09252a1310ed
--- /dev/null
+++ b/test/MC/Mips/cpsetup-bad.s
@@ -0,0 +1,14 @@
+# RUN: not llvm-mc %s -triple mips64-unknown-unknown 2>%t1
+# RUN: FileCheck %s < %t1 -check-prefix=ASM
+
+ .text
+ .option pic2
+t1:
+ .cpsetup $bar, 8, __cerror
+# ASM: :[[@LINE-1]]:18: error: expected register containing function address
+ .cpsetup $33, 8, __cerror
+# ASM: :[[@LINE-1]]:18: error: invalid register
+ .cpsetup $31, foo, __cerror
+# ASM: :[[@LINE-1]]:23: error: expected save register or stack offset
+ .cpsetup $31, $32, __cerror
+# ASM: :[[@LINE-1]]:23: error: invalid register
diff --git a/test/MC/Mips/cpsetup.s b/test/MC/Mips/cpsetup.s
new file mode 100644
index 000000000000..a21a1e3b2a3e
--- /dev/null
+++ b/test/MC/Mips/cpsetup.s
@@ -0,0 +1,78 @@
+# RUN: llvm-mc -triple mips64-unknown-unknown -mattr=-n64,+o32 -filetype=obj -o - %s | \
+# RUN: llvm-objdump -d -r -arch=mips64 - | \
+# RUN: FileCheck -check-prefix=O32 %s
+
+# RUN: llvm-mc -triple mips64-unknown-unknown -mattr=-n64,+o32 %s | \
+# RUN: FileCheck -check-prefix=ASM %s
+
+# RUN: llvm-mc -triple mips64-unknown-unknown -mattr=-n64,+n32 -filetype=obj -o - %s | \
+# RUN: llvm-objdump -d -r -arch=mips64 - | \
+# RUN: FileCheck -check-prefix=NXX -check-prefix=N32 %s
+
+# RUN: llvm-mc -triple mips64-unknown-unknown -mattr=-n64,+n32 %s | \
+# RUN: FileCheck -check-prefix=ASM %s
+
+# RUN: llvm-mc -triple mips64-unknown-unknown %s -filetype=obj -o - | \
+# RUN: llvm-objdump -d -r -arch=mips64 - | \
+# RUN: FileCheck -check-prefix=NXX -check-prefix=N64 %s
+
+# RUN: llvm-mc -triple mips64-unknown-unknown %s | \
+# RUN: FileCheck -check-prefix=ASM %s
+
+ .text
+ .option pic2
+t1:
+ .cpsetup $25, 8, __cerror
+
+
+# O32-NOT: __cerror
+
+# FIXME: Direct object emission for N32 is still under development.
+# N32 doesn't allow 3 operations to be specified in the same relocation
+# record like N64 does.
+
+# NXX: sd $gp, 8($sp)
+# NXX: lui $gp, 0
+# NXX: R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_HI16 __cerror
+# NXX: addiu $gp, $gp, 0
+# NXX: R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_LO16 __cerror
+# N32: addu $gp, $gp, $25
+# N64: daddu $gp, $gp, $25
+
+# ASM: .cpsetup $25, 8, __cerror
+
+t2:
+
+ .cpsetup $25, $2, __cerror
+
+# O32-NOT: __cerror
+
+# FIXME: Direct object emission for N32 is still under development.
+# N32 doesn't allow 3 operations to be specified in the same relocation
+# record like N64 does.
+
+# NXX: move $2, $gp
+# NXX: lui $gp, 0
+# NXX: R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_HI16 __cerror
+# NXX: addiu $gp, $gp, 0
+# NXX: R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_LO16 __cerror
+# N32: addu $gp, $gp, $25
+# N64: daddu $gp, $gp, $25
+
+# ASM: .cpsetup $25, $2, __cerror
+
+t3:
+ .option pic0
+ nop
+ .cpsetup $25, 8, __cerror
+ nop
+
+# Testing that .cpsetup expands to nothing in this case
+# by checking that the next instruction after the first
+# nop is also a 'nop'.
+# NXX: nop
+# NXX-NEXT: nop
+
+# ASM: nop
+# ASM: .cpsetup $25, 8, __cerror
+# ASM: nop
diff --git a/test/MC/Mips/do_switch.ll b/test/MC/Mips/do_switch.ll
deleted file mode 100644
index 7eda1b41d18c..000000000000
--- a/test/MC/Mips/do_switch.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; This test case will cause an internal EK_GPRel64BlockAddress to be
-; produced. This was not handled for direct object and an assertion
-; to occur. This is a variation on test case test/CodeGen/Mips/do_switch.ll
-
-; RUN: llc < %s -filetype=obj -march=mips -relocation-model=static
-
-; RUN: llc < %s -filetype=obj -march=mips -relocation-model=pic
-
-; RUN: llc < %s -filetype=obj -march=mips64 -relocation-model=pic -mcpu=mips64 -mattr=n64
-
-define i32 @main() nounwind readnone {
-entry:
- %x = alloca i32, align 4 ; <i32*> [#uses=2]
- store volatile i32 2, i32* %x, align 4
- %0 = load volatile i32* %x, align 4 ; <i32> [#uses=1]
-
- switch i32 %0, label %bb4 [
- i32 0, label %bb5
- i32 1, label %bb1
- i32 2, label %bb2
- i32 3, label %bb3
- ]
-
-bb1: ; preds = %entry
- ret i32 2
-
-bb2: ; preds = %entry
- ret i32 0
-
-bb3: ; preds = %entry
- ret i32 3
-
-bb4: ; preds = %entry
- ret i32 4
-
-bb5: ; preds = %entry
- ret i32 1
-}
-
diff --git a/test/MC/Mips/do_switch1.s b/test/MC/Mips/do_switch1.s
new file mode 100644
index 000000000000..331a4e25a829
--- /dev/null
+++ b/test/MC/Mips/do_switch1.s
@@ -0,0 +1,75 @@
+// This test case will cause an internal EK_GPRel64BlockAddress to be
+// produced. This was not handled for direct object and an assertion
+// to occur. This is a variation on test case test/CodeGen/Mips/do_switch.ll
+
+// RUN: llvm-mc < %s -filetype=obj -triple=mips-pc-linux -relocation-model=static
+
+ .text
+ .abicalls
+ .option pic0
+ .section .mdebug.abi32,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/do_switch.ll"
+ .text
+ .globl main
+ .align 2
+ .type main,@function
+ .set nomips16
+ .ent main
+main: # @main
+ .frame $sp,8,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ addiu $sp, $sp, -8
+ addiu $1, $zero, 2
+ sw $1, 4($sp)
+ lw $2, 4($sp)
+ sltiu $1, $2, 4
+ bnez $1, $BB0_2
+ nop
+$BB0_1: # %bb4
+ addiu $2, $zero, 4
+ jr $ra
+ addiu $sp, $sp, 8
+$BB0_2: # %entry
+ sll $1, $2, 2
+ lui $2, %hi($JTI0_0)
+ addu $1, $1, $2
+ lw $1, %lo($JTI0_0)($1)
+ jr $1
+ nop
+$BB0_3: # %bb5
+ addiu $2, $zero, 1
+ jr $ra
+ addiu $sp, $sp, 8
+$BB0_4: # %bb1
+ addiu $2, $zero, 2
+ jr $ra
+ addiu $sp, $sp, 8
+$BB0_5: # %bb2
+ addiu $2, $zero, 0
+ jr $ra
+ addiu $sp, $sp, 8
+$BB0_6: # %bb3
+ addiu $2, $zero, 3
+ jr $ra
+ addiu $sp, $sp, 8
+ .set at
+ .set macro
+ .set reorder
+ .end main
+$tmp0:
+ .size main, ($tmp0)-main
+ .section .rodata,"a",@progbits
+ .align 2
+$JTI0_0:
+ .4byte ($BB0_3)
+ .4byte ($BB0_4)
+ .4byte ($BB0_5)
+ .4byte ($BB0_6)
+
+
+ .text
diff --git a/test/MC/Mips/do_switch2.s b/test/MC/Mips/do_switch2.s
new file mode 100644
index 000000000000..824054f7c874
--- /dev/null
+++ b/test/MC/Mips/do_switch2.s
@@ -0,0 +1,77 @@
+// This test case will cause an internal EK_GPRel64BlockAddress to be
+// produced. This was not handled for direct object and an assertion
+// to occur. This is a variation on test case test/CodeGen/Mips/do_switch.ll
+
+// RUN: llvm-mc < %s -filetype=obj -triple=mips-pc-linux -relocation-model=pic
+
+ .text
+ .abicalls
+ .section .mdebug.abi32,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/do_switch.ll"
+ .text
+ .globl main
+ .align 2
+ .type main,@function
+ .set nomips16
+ .ent main
+main: # @main
+ .frame $sp,8,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ lui $2, %hi(_gp_disp)
+ addiu $2, $2, %lo(_gp_disp)
+ addiu $sp, $sp, -8
+ addiu $1, $zero, 2
+ sw $1, 4($sp)
+ lw $3, 4($sp)
+ sltiu $1, $3, 4
+ bnez $1, $BB0_2
+ addu $2, $2, $25
+$BB0_1: # %bb4
+ addiu $2, $zero, 4
+ jr $ra
+ addiu $sp, $sp, 8
+$BB0_2: # %entry
+ sll $1, $3, 2
+ lw $3, %got($JTI0_0)($2)
+ addu $1, $1, $3
+ lw $1, %lo($JTI0_0)($1)
+ addu $1, $1, $2
+ jr $1
+ nop
+$BB0_3: # %bb5
+ addiu $2, $zero, 1
+ jr $ra
+ addiu $sp, $sp, 8
+$BB0_4: # %bb1
+ addiu $2, $zero, 2
+ jr $ra
+ addiu $sp, $sp, 8
+$BB0_5: # %bb2
+ addiu $2, $zero, 0
+ jr $ra
+ addiu $sp, $sp, 8
+$BB0_6: # %bb3
+ addiu $2, $zero, 3
+ jr $ra
+ addiu $sp, $sp, 8
+ .set at
+ .set macro
+ .set reorder
+ .end main
+$tmp0:
+ .size main, ($tmp0)-main
+ .section .rodata,"a",@progbits
+ .align 2
+$JTI0_0:
+ .gpword ($BB0_3)
+ .gpword ($BB0_4)
+ .gpword ($BB0_5)
+ .gpword ($BB0_6)
+
+
+ .text
diff --git a/test/MC/Mips/do_switch3.s b/test/MC/Mips/do_switch3.s
new file mode 100644
index 000000000000..02ad08714639
--- /dev/null
+++ b/test/MC/Mips/do_switch3.s
@@ -0,0 +1,82 @@
+// This test case will cause an internal EK_GPRel64BlockAddress to be
+// produced. This was not handled for direct object and an assertion
+// to occur. This is a variation on test case test/CodeGen/Mips/do_switch.ll
+
+// RUN: llvm-mc < %s -filetype=obj -triple=mips64-pc-linux -relocation-model=pic -mcpu=mips64 -mattr=n64
+
+ .text
+ .abicalls
+ .section .mdebug.abi64,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/do_switch.ll"
+ .text
+ .globl main
+ .align 3
+ .type main,@function
+ .set nomips16
+ .ent main
+main: # @main
+ .frame $sp,16,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ daddiu $sp, $sp, -16
+ lui $1, %hi(%neg(%gp_rel(main)))
+ daddu $2, $1, $25
+ addiu $1, $zero, 2
+ sw $1, 12($sp)
+ lw $1, 12($sp)
+ sltiu $4, $1, 4
+ dsll $3, $1, 32
+ bnez $4, $BB0_2
+ nop
+$BB0_1: # %bb4
+ addiu $2, $zero, 4
+ jr $ra
+ daddiu $sp, $sp, 16
+$BB0_2: # %entry
+ daddiu $1, $2, %lo(%neg(%gp_rel(main)))
+ dsrl $2, $3, 32
+ daddiu $3, $zero, 8
+ dmult $2, $3
+ mflo $2
+ ld $3, %got_page($JTI0_0)($1)
+ daddu $2, $2, $3
+ ld $2, %got_ofst($JTI0_0)($2)
+ daddu $1, $2, $1
+ jr $1
+ nop
+$BB0_3: # %bb5
+ addiu $2, $zero, 1
+ jr $ra
+ daddiu $sp, $sp, 16
+$BB0_4: # %bb1
+ addiu $2, $zero, 2
+ jr $ra
+ daddiu $sp, $sp, 16
+$BB0_5: # %bb2
+ addiu $2, $zero, 0
+ jr $ra
+ daddiu $sp, $sp, 16
+$BB0_6: # %bb3
+ addiu $2, $zero, 3
+ jr $ra
+ daddiu $sp, $sp, 16
+ .set at
+ .set macro
+ .set reorder
+ .end main
+$tmp0:
+ .size main, ($tmp0)-main
+ .section .rodata,"a",@progbits
+ .align 3
+$JTI0_0:
+ .gpdword ($BB0_3)
+ .gpdword ($BB0_4)
+ .gpdword ($BB0_5)
+ .gpdword ($BB0_6)
+
+
+ .text
diff --git a/test/MC/Mips/eh-frame.s b/test/MC/Mips/eh-frame.s
index 167159885d72..d6b9cf0a5405 100644
--- a/test/MC/Mips/eh-frame.s
+++ b/test/MC/Mips/eh-frame.s
@@ -31,7 +31,7 @@ func:
// MIPS32: 00000000
// Version
-// MIPS32: 01
+// MIPS32: 03
// Augmentation String
// MIPS32: 7a5200
@@ -67,7 +67,7 @@ func:
// MIPS32EL: 00000000
// Version
-// MIPS32EL: 01
+// MIPS32EL: 03
// Augmentation String
// MIPS32EL: 7a5200
@@ -103,7 +103,7 @@ func:
// MIPS64: 00000000
// Version
-// MIPS64: 01
+// MIPS64: 03
// Augmentation String
// MIPS64: 7a5200
@@ -141,7 +141,7 @@ func:
// MIPS64EL: 00000000
// Version
-// MIPS64EL: 01
+// MIPS64EL: 03
// Augmentation String
// MIPS64EL: 7a5200
diff --git a/test/MC/Mips/elf-N64.ll b/test/MC/Mips/elf-N64.ll
deleted file mode 100644
index a1ea34a80a71..000000000000
--- a/test/MC/Mips/elf-N64.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc -filetype=obj -march=mips64el -mcpu=mips64 -disable-mips-delay-filler %s -o - | llvm-readobj -r | FileCheck %s
-
-; Check for N64 relocation production.
-;
-; ModuleID = '../hello.c'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v64:64:64-n32"
-target triple = "mips64el-unknown-linux"
-
-@str = private unnamed_addr constant [12 x i8] c"hello world\00"
-
-define i32 @main() nounwind {
-entry:
-; Check that the appropriate relocations were created.
-
-; CHECK: Relocations [
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_HI16
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_LO16
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT_PAGE/R_MIPS_NONE/R_MIPS_NONE
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT_OFST/R_MIPS_NONE/R_MIPS_NONE
-; CHECK: ]
-
- %puts = tail call i32 @puts(i8* getelementptr inbounds ([12 x i8]* @str, i64 0, i64 0))
- ret i32 0
-
-}
-declare i32 @puts(i8* nocapture) nounwind
diff --git a/test/MC/Mips/elf-N64.s b/test/MC/Mips/elf-N64.s
new file mode 100644
index 000000000000..bf6ebd730913
--- /dev/null
+++ b/test/MC/Mips/elf-N64.s
@@ -0,0 +1,65 @@
+// RUN: llvm-mc -filetype=obj -triple=mips64el-pc-linux -mcpu=mips64 %s -o - | llvm-readobj -r | FileCheck %s
+// RUN: llvm-mc -filetype=obj -triple=mips64-pc-linux -mcpu=mips64 %s -o - | llvm-readobj -r | FileCheck %s
+
+// Check for N64 relocation production.
+// Check that the appropriate relocations were created.
+
+// CHECK: Relocations [
+// CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_HI16
+// CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GPREL16/R_MIPS_SUB/R_MIPS_LO16
+// CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT_PAGE/R_MIPS_NONE/R_MIPS_NONE
+// CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT_OFST/R_MIPS_NONE/R_MIPS_NONE
+// CHECK: ]
+
+
+ .text
+ .abicalls
+ .section .mdebug.abi64,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/elf-N64.ll"
+ .text
+ .globl main
+ .align 3
+ .type main,@function
+ .set nomips16
+ .ent main
+main: # @main
+ .frame $sp,16,$ra
+ .mask 0x00000000,0
+ .fmask 0x90000000,-4
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ daddiu $sp, $sp, -16
+ sd $ra, 8($sp) # 8-byte Folded Spill
+ sd $gp, 0($sp) # 8-byte Folded Spill
+ lui $1, %hi(%neg(%gp_rel(main)))
+ daddu $1, $1, $25
+ daddiu $gp, $1, %lo(%neg(%gp_rel(main)))
+ ld $1, %got_page($str)($gp)
+ daddiu $4, $1, %got_ofst($str)
+ ld $25, %call16(puts)($gp)
+ jalr $25
+ nop
+ addiu $2, $zero, 0
+ ld $gp, 0($sp) # 8-byte Folded Reload
+ ld $ra, 8($sp) # 8-byte Folded Reload
+ daddiu $sp, $sp, 16
+ jr $ra
+ nop
+ .set at
+ .set macro
+ .set reorder
+ .end main
+$tmp0:
+ .size main, ($tmp0)-main
+
+ .type $str,@object # @str
+ .section .rodata.str1.4,"aMS",@progbits,1
+ .align 2
+$str:
+ .asciz "hello world"
+ .size $str, 12
+
+
+ .text
diff --git a/test/MC/Mips/elf-gprel-32-64.ll b/test/MC/Mips/elf-gprel-32-64.ll
deleted file mode 100644
index 4057eb823a5a..000000000000
--- a/test/MC/Mips/elf-gprel-32-64.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc -filetype=obj -march=mips64el -mcpu=mips64 %s -o - \
-; RUN: | llvm-readobj -r \
-; RUN: | FileCheck %s
-
-define i32 @test(i32 %c) nounwind {
-entry:
- switch i32 %c, label %sw.default [
- i32 0, label %sw.bb
- i32 1, label %sw.bb2
- i32 2, label %sw.bb5
- i32 3, label %sw.bb8
- ]
-
-sw.bb:
- br label %return
-sw.bb2:
- br label %return
-sw.bb5:
- br label %return
-sw.bb8:
- br label %return
-sw.default:
- br label %return
-
-return:
- %retval.0 = phi i32 [ -1, %sw.default ], [ 7, %sw.bb8 ], [ 2, %sw.bb5 ], [ 3, %sw.bb2 ], [ 1, %sw.bb ]
- ret i32 %retval.0
-}
-
-; Check that the appropriate relocations were created.
-
-; R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
-; CHECK: Relocations [
-; CHECK: Section ({{[a-z0-9]+}}) .rela.rodata {
-; CHECK-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
-; CHECK-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
-; CHECK-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
-; CHECK-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
-; CHECK-NEXT: }
-; CHECK-NEXT: ]
diff --git a/test/MC/Mips/elf-gprel-32-64.s b/test/MC/Mips/elf-gprel-32-64.s
new file mode 100644
index 000000000000..2f5ac6652a33
--- /dev/null
+++ b/test/MC/Mips/elf-gprel-32-64.s
@@ -0,0 +1,86 @@
+// RUN: llvm-mc -filetype=obj -triple=mips64el-pc-linux -mcpu=mips64 %s -o - \
+// RUN: | llvm-readobj -r \
+// RUN: | FileCheck %s
+// RUN: llvm-mc -filetype=obj -triple=mips64-pc-linux -mcpu=mips64 %s -o - \
+// RUN: | llvm-readobj -r \
+// RUN: | FileCheck %s
+
+// Check that the appropriate relocations were created.
+
+// R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
+// CHECK: Relocations [
+// CHECK: Section ({{[a-z0-9]+}}) .rela.rodata {
+// CHECK-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
+// CHECK-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
+// CHECK-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
+// CHECK-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_GPREL32/R_MIPS_64/R_MIPS_NONE
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+
+ .text
+ .abicalls
+ .section .mdebug.abi64,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/elf-gprel-32-64.ll"
+ .text
+ .globl test
+ .align 3
+ .type test,@function
+ .set nomips16
+ .ent test
+test: # @test
+ .frame $sp,0,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ lui $1, %hi(%neg(%gp_rel(test)))
+ daddu $2, $1, $25
+ sltiu $1, $4, 4
+ dsll $3, $4, 32
+ bnez $1, $BB0_2
+ nop
+$BB0_1: # %sw.default
+ b $BB0_3
+ addiu $2, $zero, -1
+$BB0_2: # %entry
+ daddiu $1, $2, %lo(%neg(%gp_rel(test)))
+ dsrl $3, $3, 32
+ daddiu $4, $zero, 8
+ dmult $3, $4
+ mflo $3
+ ld $4, %got_page($JTI0_0)($1)
+ daddu $3, $3, $4
+ ld $3, %got_ofst($JTI0_0)($3)
+ daddu $1, $3, $1
+ jr $1
+ addiu $2, $zero, 1
+$BB0_3: # %return
+ jr $ra
+ nop
+$BB0_4: # %sw.bb2
+ jr $ra
+ addiu $2, $zero, 3
+$BB0_5: # %sw.bb5
+ jr $ra
+ addiu $2, $zero, 2
+$BB0_6: # %sw.bb8
+ jr $ra
+ addiu $2, $zero, 7
+ .set at
+ .set macro
+ .set reorder
+ .end test
+$tmp0:
+ .size test, ($tmp0)-test
+ .section .rodata,"a",@progbits
+ .align 3
+$JTI0_0:
+ .gpdword ($BB0_3)
+ .gpdword ($BB0_4)
+ .gpdword ($BB0_5)
+ .gpdword ($BB0_6)
+
+
+ .text
diff --git a/test/MC/Mips/elf-reginfo.ll b/test/MC/Mips/elf-reginfo.ll
deleted file mode 100644
index a255af931d83..000000000000
--- a/test/MC/Mips/elf-reginfo.ll
+++ /dev/null
@@ -1,34 +0,0 @@
- ; RUN: llc -filetype=obj -march=mips64el -mcpu=mips64 %s -o - \
- ; RUN: | llvm-readobj -s | FileCheck --check-prefix=CHECK_64 %s
- ; RUN: llc -filetype=obj -march=mipsel -mcpu=mips32 %s -o - \
- ; RUN: | llvm-readobj -s | FileCheck --check-prefix=CHECK_32 %s
-
-; Check for register information sections.
-;
-
-@str = private unnamed_addr constant [12 x i8] c"hello world\00"
-
-define i32 @main() nounwind {
-entry:
-; Check that the appropriate relocations were created.
-
-; check for .MIPS.options
-; CHECK_64: Sections [
-; CHECK_64: Section {
-; CHECK_64: Name: .MIPS.options
-; CHECK_64-NEXT: Type: SHT_MIPS_OPTIONS
-; CHECK_64-NEXT: Flags [ (0x8000002)
-
-; check for .reginfo
-; CHECK_32: Sections [
-; CHECK_32: Section {
-; CHECK_32: Name: .reginfo
-; CHECK_32-NEXT: Type: SHT_MIPS_REGINFO
-; CHECK_32-NEXT: Flags [ (0x2)
-
-
- %puts = tail call i32 @puts(i8* getelementptr inbounds ([12 x i8]* @str, i64 0, i64 0))
- ret i32 0
-
-}
-declare i32 @puts(i8* nocapture) nounwind
diff --git a/test/MC/Mips/elf-relsym.ll b/test/MC/Mips/elf-relsym.ll
deleted file mode 100644
index 6da926273ab7..000000000000
--- a/test/MC/Mips/elf-relsym.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc -filetype=obj -mtriple mipsel-unknown-linux %s -o - | llvm-readobj -t | FileCheck %s
-
-; Check that the appropriate symbols were created.
-
-; CHECK: Symbols [
-; CHECK: Symbol {
-; CHECK: Name: $.str
-; CHECK: }
-; CHECK: Symbol {
-; CHECK: Name: $.str1
-; CHECK: }
-; CHECK: Symbol {
-; CHECK: Name: $CPI0_0
-; CHECK: }
-; CHECK: Symbol {
-; CHECK: Name: $CPI0_1
-; CHECK: }
-; CHECK: ]
-
-@.str = private unnamed_addr constant [6 x i8] c"abcde\00", align 1
-@gc1 = external global i8*
-@.str1 = private unnamed_addr constant [5 x i8] c"fghi\00", align 1
-@gc2 = external global i8*
-@gd1 = external global double
-@gd2 = external global double
-
-define void @foo1() nounwind {
-entry:
- store i8* getelementptr inbounds ([6 x i8]* @.str, i32 0, i32 0), i8** @gc1, align 4
- store i8* getelementptr inbounds ([5 x i8]* @.str1, i32 0, i32 0), i8** @gc2, align 4
- %0 = load double* @gd1, align 8
- %add = fadd double %0, 2.500000e+00
- store double %add, double* @gd1, align 8
- %1 = load double* @gd2, align 8
- %add1 = fadd double %1, 4.500000e+00
- store double %add1, double* @gd2, align 8
- ret void
-}
-
diff --git a/test/MC/Mips/elf-relsym.s b/test/MC/Mips/elf-relsym.s
new file mode 100644
index 000000000000..d19065e0cd70
--- /dev/null
+++ b/test/MC/Mips/elf-relsym.s
@@ -0,0 +1,87 @@
+// RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux %s -o - | llvm-readobj -t | FileCheck %s
+
+// Check that the appropriate symbols were created.
+
+// CHECK: Symbols [
+// CHECK: Symbol {
+// CHECK: Name: $.str
+// CHECK: }
+// CHECK: Symbol {
+// CHECK: Name: $.str1
+// CHECK: }
+// CHECK: Symbol {
+// CHECK: Name: $CPI0_0
+// CHECK: }
+// CHECK: Symbol {
+// CHECK: Name: $CPI0_1
+// CHECK: }
+// CHECK: ]
+
+ .text
+ .abicalls
+ .section .mdebug.abi32,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/elf-relsym.ll"
+ .section .rodata.cst8,"aM",@progbits,8
+ .align 3
+$CPI0_0:
+ .8byte 4612811918334230528 # double 2.5
+$CPI0_1:
+ .8byte 4616752568008179712 # double 4.5
+ .text
+ .globl foo1
+ .align 2
+ .type foo1,@function
+ .set nomips16
+ .ent foo1
+foo1: # @foo1
+ .frame $sp,0,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ lui $2, %hi(_gp_disp)
+ addiu $2, $2, %lo(_gp_disp)
+ addu $1, $2, $25
+ lw $2, %got($.str)($1)
+ addiu $2, $2, %lo($.str)
+ lw $3, %got(gc1)($1)
+ sw $2, 0($3)
+ lw $2, %got($.str1)($1)
+ addiu $2, $2, %lo($.str1)
+ lw $3, %got(gc2)($1)
+ sw $2, 0($3)
+ lw $2, %got($CPI0_0)($1)
+ ldc1 $f0, %lo($CPI0_0)($2)
+ lw $2, %got(gd1)($1)
+ ldc1 $f2, 0($2)
+ lw $3, %got($CPI0_1)($1)
+ ldc1 $f4, %lo($CPI0_1)($3)
+ lw $1, %got(gd2)($1)
+ add.d $f0, $f2, $f0
+ sdc1 $f0, 0($2)
+ ldc1 $f0, 0($1)
+ add.d $f0, $f0, $f4
+ jr $ra
+ sdc1 $f0, 0($1)
+ .set at
+ .set macro
+ .set reorder
+ .end foo1
+$tmp0:
+ .size foo1, ($tmp0)-foo1
+
+ .type $.str,@object # @.str
+ .section .rodata.str1.1,"aMS",@progbits,1
+$.str:
+ .asciz "abcde"
+ .size $.str, 6
+
+ .type $.str1,@object # @.str1
+$.str1:
+ .asciz "fghi"
+ .size $.str1, 5
+
+
+ .text
diff --git a/test/MC/Mips/elf-tls.ll b/test/MC/Mips/elf-tls.ll
deleted file mode 100644
index bcce3d515e7d..000000000000
--- a/test/MC/Mips/elf-tls.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc -filetype=obj -mtriple mipsel-unknown-linux %s -o - | llvm-readobj -r | FileCheck %s
-
-; Check that the appropriate relocations were created.
-
-; CHECK: Relocations [
-; CHECK: Section (2) .rel.text {
-; CHECK: R_MIPS_TLS_LDM
-; CHECK: R_MIPS_TLS_DTPREL_HI16
-; CHECK: R_MIPS_TLS_DTPREL_LO16
-; CHECK: }
-; CHECK: ]
-
-@t1 = thread_local global i32 0, align 4
-
-define i32 @f1() nounwind {
-entry:
- %tmp = load i32* @t1, align 4
- ret i32 %tmp
-
-}
-
-
-@t2 = external thread_local global i32
-
-define i32 @f2() nounwind {
-entry:
- %tmp = load i32* @t2, align 4
- ret i32 %tmp
-
-}
-
-@f3.i = internal thread_local unnamed_addr global i32 1, align 4
-
-define i32 @f3() nounwind {
-entry:
- %0 = load i32* @f3.i, align 4
- %inc = add nsw i32 %0, 1
- store i32 %inc, i32* @f3.i, align 4
- ret i32 %inc
-}
diff --git a/test/MC/Mips/elf-tls.s b/test/MC/Mips/elf-tls.s
new file mode 100644
index 000000000000..d32a699fb08f
--- /dev/null
+++ b/test/MC/Mips/elf-tls.s
@@ -0,0 +1,134 @@
+// RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux %s -o - | llvm-readobj -r | FileCheck %s
+
+// Check that the appropriate relocations were created.
+
+// CHECK: Relocations [
+// CHECK: Section (2) .rel.text {
+// CHECK: R_MIPS_TLS_LDM
+// CHECK: R_MIPS_TLS_DTPREL_HI16
+// CHECK: R_MIPS_TLS_DTPREL_LO16
+// CHECK: }
+// CHECK: ]
+
+ .text
+ .abicalls
+ .section .mdebug.abi32,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/elf-tls.ll"
+ .text
+ .globl f1
+ .align 2
+ .type f1,@function
+ .set nomips16
+ .ent f1
+f1: # @f1
+ .frame $sp,24,$ra
+ .mask 0x80000000,-4
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ lui $2, %hi(_gp_disp)
+ addiu $2, $2, %lo(_gp_disp)
+ addiu $sp, $sp, -24
+ sw $ra, 20($sp) # 4-byte Folded Spill
+ addu $gp, $2, $25
+ lw $25, %call16(__tls_get_addr)($gp)
+ jalr $25
+ addiu $4, $gp, %tlsgd(t1)
+ lw $2, 0($2)
+ lw $ra, 20($sp) # 4-byte Folded Reload
+ jr $ra
+ addiu $sp, $sp, 24
+ .set at
+ .set macro
+ .set reorder
+ .end f1
+$tmp0:
+ .size f1, ($tmp0)-f1
+
+ .globl f2
+ .align 2
+ .type f2,@function
+ .set nomips16
+ .ent f2
+f2: # @f2
+ .frame $sp,24,$ra
+ .mask 0x80000000,-4
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ lui $2, %hi(_gp_disp)
+ addiu $2, $2, %lo(_gp_disp)
+ addiu $sp, $sp, -24
+ sw $ra, 20($sp) # 4-byte Folded Spill
+ addu $gp, $2, $25
+ lw $25, %call16(__tls_get_addr)($gp)
+ jalr $25
+ addiu $4, $gp, %tlsgd(t2)
+ lw $2, 0($2)
+ lw $ra, 20($sp) # 4-byte Folded Reload
+ jr $ra
+ addiu $sp, $sp, 24
+ .set at
+ .set macro
+ .set reorder
+ .end f2
+$tmp1:
+ .size f2, ($tmp1)-f2
+
+ .globl f3
+ .align 2
+ .type f3,@function
+ .set nomips16
+ .ent f3
+f3: # @f3
+ .frame $sp,24,$ra
+ .mask 0x80000000,-4
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ lui $2, %hi(_gp_disp)
+ addiu $2, $2, %lo(_gp_disp)
+ addiu $sp, $sp, -24
+ sw $ra, 20($sp) # 4-byte Folded Spill
+ addu $gp, $2, $25
+ lw $25, %call16(__tls_get_addr)($gp)
+ jalr $25
+ addiu $4, $gp, %tlsldm(f3.i)
+ lui $1, %dtprel_hi(f3.i)
+ addu $1, $1, $2
+ lw $2, %dtprel_lo(f3.i)($1)
+ addiu $2, $2, 1
+ sw $2, %dtprel_lo(f3.i)($1)
+ lw $ra, 20($sp) # 4-byte Folded Reload
+ jr $ra
+ addiu $sp, $sp, 24
+ .set at
+ .set macro
+ .set reorder
+ .end f3
+$tmp2:
+ .size f3, ($tmp2)-f3
+
+ .type t1,@object # @t1
+ .section .tbss,"awT",@nobits
+ .globl t1
+ .align 2
+t1:
+ .4byte 0 # 0x0
+ .size t1, 4
+
+ .type f3.i,@object # @f3.i
+ .section .tdata,"awT",@progbits
+ .align 2
+f3.i:
+ .4byte 1 # 0x1
+ .size f3.i, 4
+
+
+ .text
diff --git a/test/MC/Mips/elf_eflags.ll b/test/MC/Mips/elf_eflags.ll
deleted file mode 100644
index 9432dcf59c32..000000000000
--- a/test/MC/Mips/elf_eflags.ll
+++ /dev/null
@@ -1,69 +0,0 @@
-; This tests ELF EFLAGS setting with direct object.
-; When the assembler is ready a .s file for it will
-; be created.
-
-; Non-shared (static) is the absence of pic and or cpic.
-
-; EF_MIPS_NOREORDER (0x00000001) is always on by default currently
-; EF_MIPS_PIC (0x00000002)
-; EF_MIPS_CPIC (0x00000004) - See note below
-; EF_MIPS_ABI2 (0x00000020) - n32 not tested yet
-; EF_MIPS_ARCH_32 (0x50000000)
-; EF_MIPS_ARCH_64 (0x60000000)
-; EF_MIPS_ARCH_32R2 (0x70000000)
-; EF_MIPS_ARCH_64R2 (0x80000000)
-
-; Note that EF_MIPS_CPIC is set by -mabicalls which is the default on Linux
-; TODO need to support -mno-abicalls
-
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32 -relocation-model=static %s -print-hack-directives -o - | FileCheck -check-prefix=CHECK-BE32 %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32 -print-hack-directives %s -o - | FileCheck -check-prefix=CHECK-BE32_PIC %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -relocation-model=static %s -print-hack-directives -o - | FileCheck -check-prefix=CHECK-BE32R2 %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -print-hack-directives %s -o - | FileCheck -check-prefix=CHECK-BE32R2_PIC %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+micromips -relocation-model=static -print-hack-directives %s -o - | FileCheck -check-prefix=CHECK-BE32R2-MICROMIPS %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+micromips -print-hack-directives %s -o - | FileCheck -check-prefix=CHECK-BE32R2-MICROMIPS_PIC %s
-
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64 -relocation-model=static %s -print-hack-directives -o - | FileCheck -check-prefix=CHECK-BE64 %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64 %s -print-hack-directives -o - | FileCheck -check-prefix=CHECK-BE64_PIC %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64r2 -relocation-model=static -print-hack-directives %s -o - | FileCheck -check-prefix=CHECK-BE64R2 %s
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips64r2 -print-hack-directives %s -o - | FileCheck -check-prefix=CHECK-BE64R2_PIC %s
-
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+mips16 -relocation-model=pic -print-hack-directives %s -o - | FileCheck -check-prefix=CHECK-LE32R2-MIPS16 %s
-
-; 32(R1) bit with NO_REORDER and static
-; CHECK-BE32: .mips_hack_elf_flags 0x50001005
-;
-; 32(R1) bit with NO_REORDER and PIC
-; CHECK-BE32_PIC: .mips_hack_elf_flags 0x50001007
-;
-; 32R2 bit with NO_REORDER and static
-; CHECK-BE32R2: .mips_hack_elf_flags 0x70001005
-;
-; 32R2 bit with NO_REORDER and PIC
-; CHECK-BE32R2_PIC: .mips_hack_elf_flags 0x70001007
-;
-; 32R2 bit MICROMIPS with NO_REORDER and static
-; CHECK-BE32R2-MICROMIPS: .mips_hack_elf_flags 0x72001005
-;
-; 32R2 bit MICROMIPS with NO_REORDER and PIC
-; CHECK-BE32R2-MICROMIPS_PIC: .mips_hack_elf_flags 0x72001007
-;
-; 64(R1) bit with NO_REORDER and static
-; CHECK-BE64: .mips_hack_elf_flags 0x60000005
-;
-; 64(R1) bit with NO_REORDER and PIC
-; CHECK-BE64_PIC: .mips_hack_elf_flags 0x60000007
-;
-; 64R2 bit with NO_REORDER and static
-; CHECK-BE64R2: .mips_hack_elf_flags 0x80000005
-;
-; 64R2 bit with NO_REORDER and PIC
-; CHECK-BE64R2_PIC: .mips_hack_elf_flags 0x80000007
-;
-; 32R2 bit MIPS16 with PIC
-; CHECK-LE32R2-MIPS16: .mips_hack_elf_flags 0x74001006
-
-define i32 @main() nounwind {
-entry:
- ret i32 0
-}
diff --git a/test/MC/Mips/elf_eflags.s b/test/MC/Mips/elf_eflags.s
index c56596444aea..1f28ee0ff89c 100644
--- a/test/MC/Mips/elf_eflags.s
+++ b/test/MC/Mips/elf_eflags.s
@@ -1,5 +1,119 @@
-// RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux %s -o -| llvm-readobj -h | FileCheck %s
+# These *MUST* match the output of 'gcc -c' compiled with the same triple and
+# corresponding options (-mcpu=mips32 -> -mips32 for example).
- .mips_hack_elf_flags 0x50001005
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r6 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R6 %s
+# MIPSEL-MIPS64R6: Flags [ (0xA0000406)
-// CHECK: Flags [ (0x50001005)
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r6 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R6-NAN2008 %s
+# MIPSEL-MIPS64R6-NAN2008: Flags [ (0xA0000406)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r2 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2 %s
+# MIPSEL-MIPS64R2: Flags [ (0x80000006)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r2 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64R2-NAN2008 %s
+# MIPSEL-MIPS64R2-NAN2008: Flags [ (0x80000406)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64 %s
+# MIPSEL-MIPS64: Flags [ (0x60000006)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS64-NAN2008 %s
+# MIPSEL-MIPS64-NAN2008: Flags [ (0x60000406)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32r6 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS32R6 %s
+# MIPSEL-MIPS32R6: Flags [ (0x90001404)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32r6 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS32R6-NAN2008 %s
+# MIPSEL-MIPS32R6-NAN2008: Flags [ (0x90001404)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32r2 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS32R2 %s
+# MIPSEL-MIPS32R2: Flags [ (0x70001004)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS32R2-NAN2008 %s
+# MIPSEL-MIPS32R2-NAN2008: Flags [ (0x70001404)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS32 %s
+# MIPSEL-MIPS32: Flags [ (0x50001004)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS32-NAN2008 %s
+# MIPSEL-MIPS32-NAN2008: Flags [ (0x50001404)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips64r2 -mattr=-n64,n32 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N32 %s
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -mattr=-n64,n32 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N32 %s
+# MIPS64EL-MIPS64R2-N32: Flags [ (0x80000024)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -mattr=-n64,n32,+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N32-NAN2008 %s
+# MIPS64EL-MIPS64R2-N32-NAN2008: Flags [ (0x80000424)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 -mattr=-n64,n32 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-N32 %s
+# MIPS64EL-MIPS64-N32: Flags [ (0x60000024)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 -mattr=-n64,n32,+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-N32-NAN2008 %s
+# MIPS64EL-MIPS64-N32-NAN2008: Flags [ (0x60000424)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -mattr=n64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N64 %s
+# MIPS64EL-MIPS64R2-N64: Flags [ (0x80000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -mattr=n64,+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-N64-NAN2008 %s
+# MIPS64EL-MIPS64R2-N64-NAN2008: Flags [ (0x80000406)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -mattr=n64 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-N64 %s
+# MIPS64EL-MIPS64-N64: Flags [ (0x60000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -mattr=n64,+nan2008 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-N64-NAN2008 %s
+# MIPS64EL-MIPS64-N64-NAN2008: Flags [ (0x60000406)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -mattr=-n64,o32 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-O32 %s
+# MIPS64EL-MIPS64R2-O32: Flags [ (0x80001104)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -mattr=-n64,o32,+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-O32-NAN2008 %s
+# MIPS64EL-MIPS64R2-O32-NAN2008: Flags [ (0x80001504)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips5 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS5 %s
+# MIPS5: Flags [ (0x40000006)
+
+ # RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips5 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS5-NAN2008 %s
+# MIPS5-NAN2008: Flags [ (0x40000406)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips4 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS4 %s
+# MIPS4: Flags [ (0x30000006)
+
+ # RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips4 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS4-NAN2008 %s
+# MIPS4-NAN2008: Flags [ (0x30000406)
+
+# RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips3 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS3 %s
+# MIPS3: Flags [ (0x20000006)
+
+ # RUN: llvm-mc -filetype=obj -triple mips64-unknown-linux -mcpu=mips3 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS3-NAN2008 %s
+# MIPS3-NAN2008: Flags [ (0x20000406)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips2 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS2 %s
+# MIPSEL-MIPS2: Flags [ (0x10001004)
+
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips2 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPSEL-MIPS2-NAN2008 %s
+# MIPSEL-MIPS2-NAN2008: Flags [ (0x10001404)
+
+# RUN: llvm-mc -filetype=obj -triple mips-unknown-linux -mcpu=mips1 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS1 %s
+# MIPS1: Flags [ (0x1004)
+
+ # RUN: llvm-mc -filetype=obj -triple mips-unknown-linux -mcpu=mips1 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS1-NAN2008 %s
+# MIPS1-NAN2008: Flags [ (0x1404)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -mattr=-n64,o32 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-O32 %s
+# MIPS64EL-MIPS64-O32: Flags [ (0x60001104)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -mattr=-n64,o32,+nan2008 -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-O32-NAN2008 %s
+# MIPS64EL-MIPS64-O32-NAN2008: Flags [ (0x60001504)
+
+# Default ABI for MIPS64 is N64 as opposed to GCC/GAS (N32)
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2 %s
+# MIPS64EL-MIPS64R2: Flags [ (0x80000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64r2 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64R2-NAN2008 %s
+# MIPS64EL-MIPS64R2-NAN2008: Flags [ (0x80000406)
+
+# Default ABI for MIPS64 is N64 as opposed to GCC/GAS (N32)
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64 %s
+# MIPS64EL-MIPS64: Flags [ (0x60000006)
+
+# RUN: llvm-mc -filetype=obj -triple mips64el-unknown-linux -mcpu=mips64 -mattr=+nan2008 %s -o -| llvm-readobj -h | FileCheck --check-prefix=MIPS64EL-MIPS64-NAN2008 %s
+# MIPS64EL-MIPS64-NAN2008: Flags [ (0x60000406)
diff --git a/test/MC/Mips/elf_eflags_abicalls.s b/test/MC/Mips/elf_eflags_abicalls.s
new file mode 100644
index 000000000000..9e9c013fb3f2
--- /dev/null
+++ b/test/MC/Mips/elf_eflags_abicalls.s
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| llvm-readobj -h | FileCheck %s
+
+# This *MUST* match the output of 'gcc -c' compiled with the same triple.
+# CHECK: Flags [ (0x50001006)
+
+.abicalls
diff --git a/test/MC/Mips/elf_eflags_micromips.s b/test/MC/Mips/elf_eflags_micromips.s
new file mode 100644
index 000000000000..9b7de12d6e79
--- /dev/null
+++ b/test/MC/Mips/elf_eflags_micromips.s
@@ -0,0 +1,8 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| llvm-readobj -h | FileCheck %s
+
+# This *MUST* match the output of 'gcc -c' compiled with the same triple.
+# CHECK: Flags [ (0x52001004)
+
+ .set micromips
+f:
+ nop
diff --git a/test/MC/Mips/elf_eflags_mips16.s b/test/MC/Mips/elf_eflags_mips16.s
new file mode 100644
index 000000000000..5143d36df25e
--- /dev/null
+++ b/test/MC/Mips/elf_eflags_mips16.s
@@ -0,0 +1,8 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| llvm-readobj -h | FileCheck %s
+
+# This *MUST* match the output of 'gcc -c' compiled with the same triple.
+# CHECK: Flags [ (0x54001004)
+
+ .set mips16
+f:
+ nop
diff --git a/test/MC/Mips/elf_eflags_nan2008.s b/test/MC/Mips/elf_eflags_nan2008.s
new file mode 100644
index 000000000000..f6903429c0df
--- /dev/null
+++ b/test/MC/Mips/elf_eflags_nan2008.s
@@ -0,0 +1,12 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o - | \
+# RUN: llvm-readobj -h | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+# RUN: llvm-mc -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+
+# This *MUST* match the output of 'gcc -c' compiled with the same triple.
+# CHECK-OBJ: Flags [ (0x50001404)
+
+# CHECK-ASM: .nan 2008
+
+.nan 2008
diff --git a/test/MC/Mips/elf_eflags_nanlegacy.s b/test/MC/Mips/elf_eflags_nanlegacy.s
new file mode 100644
index 000000000000..0fa07879d374
--- /dev/null
+++ b/test/MC/Mips/elf_eflags_nanlegacy.s
@@ -0,0 +1,15 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o - | \
+# RUN: llvm-readobj -h | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+# RUN: llvm-mc -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+
+# This *MUST* match the output of 'gcc -c' compiled with the same triple.
+# CHECK-OBJ: Flags [ (0x50001004)
+
+# CHECK-ASM: .nan 2008
+# CHECK-ASM: .nan legacy
+
+.nan 2008
+# Let's override the previous directive!
+.nan legacy
diff --git a/test/MC/Mips/elf_eflags_noreorder.s b/test/MC/Mips/elf_eflags_noreorder.s
new file mode 100644
index 000000000000..fe46b41ae2f5
--- /dev/null
+++ b/test/MC/Mips/elf_eflags_noreorder.s
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| llvm-readobj -h | FileCheck %s
+
+# This *MUST* match the output of 'gcc -c' compiled with the same triple.
+# CHECK: Flags [ (0x50001005)
+
+.set noreorder
diff --git a/test/MC/Mips/elf_eflags_pic0.s b/test/MC/Mips/elf_eflags_pic0.s
new file mode 100644
index 000000000000..04115fad1a0a
--- /dev/null
+++ b/test/MC/Mips/elf_eflags_pic0.s
@@ -0,0 +1,7 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| llvm-readobj -h | FileCheck %s
+
+# This *MUST* match the output of 'gcc -c' compiled with the same triple.
+# CHECK: Flags [ (0x50001004)
+
+.abicalls
+.option pic0
diff --git a/test/MC/Mips/elf_eflags_pic2.s b/test/MC/Mips/elf_eflags_pic2.s
new file mode 100644
index 000000000000..692c478d0bca
--- /dev/null
+++ b/test/MC/Mips/elf_eflags_pic2.s
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o -| llvm-readobj -h | FileCheck %s
+
+# This *MUST* match the output of 'gcc -c' compiled with the same triple.
+# CHECK: Flags [ (0x50001006)
+
+.option pic2
diff --git a/test/MC/Mips/elf_reginfo.s b/test/MC/Mips/elf_reginfo.s
new file mode 100644
index 000000000000..ba4788a39d9b
--- /dev/null
+++ b/test/MC/Mips/elf_reginfo.s
@@ -0,0 +1,32 @@
+# These *MUST* match the output of gas compiled with the same triple and
+# corresponding options (-mabi=64 -> -mattr=+n64 for example).
+
+# RUN: llvm-mc -filetype=obj -triple=mips64el-linux -mattr=-n64,+n64 %s -o - \
+# RUN: | llvm-readobj -s | FileCheck --check-prefix=CHECK_64 %s
+# RUN: llvm-mc -filetype=obj -triple=mipsel %s -mattr=-o32,+n32 -o - \
+# RUN: | llvm-readobj -s | FileCheck --check-prefix=CHECK_32 %s
+
+# Check for register information sections.
+#
+
+# Check that the appropriate relocations were created.
+
+# check for .MIPS.options
+# CHECK_64: Sections [
+# CHECK_64: Section {
+# CHECK_64-LABEL: Name: .MIPS.options
+# CHECK_64-NEXT: Type: SHT_MIPS_OPTIONS
+# CHECK_64-NEXT: Flags [ (0x8000002)
+# CHECK_64: AddressAlignment: 8
+# CHECK_64: EntrySize: 1
+# CHECK_64-LABEL: }
+
+# check for .reginfo
+# CHECK_32: Sections [
+# CHECK_32: Section {
+# CHECK_32-LABEL: Name: .reginfo
+# CHECK_32-NEXT: Type: SHT_MIPS_REGINFO
+# CHECK_32-NEXT: Flags [ (0x2)
+# CHECK_32: AddressAlignment: 8
+# CHECK_32: EntrySize: 24
+# CHECK_32-LABEL: }
diff --git a/test/MC/Mips/elf_st_other.ll b/test/MC/Mips/elf_st_other.ll
deleted file mode 100644
index 31294c88f87d..000000000000
--- a/test/MC/Mips/elf_st_other.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; This tests value of ELF st_other field for function symbol table entries.
-; For microMIPS value should be equal to STO_MIPS_MICROMIPS.
-
-; RUN: llc -mtriple mipsel-unknown-linux -mcpu=mips32r2 -mattr=+micromips -print-hack-directives %s -o - | FileCheck %s
-
-define i32 @main() nounwind {
-entry:
- ret i32 0
-}
-
-; CHECK: .mips_hack_stocg main, 128
diff --git a/test/MC/Mips/elf_st_other.s b/test/MC/Mips/elf_st_other.s
index 2d632887799a..579707b6be38 100644
--- a/test/MC/Mips/elf_st_other.s
+++ b/test/MC/Mips/elf_st_other.s
@@ -1,13 +1,26 @@
// RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux %s -o -| llvm-readobj -t | FileCheck %s
- .text
- .globl main
- .align 2
- .type main,@function
- .set nomips16 # @main
- .ent main
- .mips_hack_stocg main, 128
-main:
-
-// CHECK: Name: main
-// CHECK: Other: 128
+
+.globl f1
+.type f1, @function
+.set micromips
+f1:
+ nop
+
+.globl d1
+.type d1, @object
+d1:
+.word 42
+
+.globl f2
+.type f2, @function
+.set nomicromips
+f2:
+ nop
+
+// CHECK-LABEL: Name: d1
+// CHECK: Other: 0
+// CHECK-LABEL: Name: f1
+// CHECK: Other: 128
+// CHECK-LABEL: Name: f2
+// CHECK: Other: 0
diff --git a/test/MC/Mips/higher-highest-addressing.s b/test/MC/Mips/higher-highest-addressing.s
new file mode 100644
index 000000000000..2973a6400ae2
--- /dev/null
+++ b/test/MC/Mips/higher-highest-addressing.s
@@ -0,0 +1,54 @@
+# RUN: llvm-mc -filetype=obj -triple=mips64el-unknown-linux -mcpu=mips64r2 %s \
+# RUN: | llvm-objdump -disassemble -triple mips64el - | FileCheck %s
+
+# RUN: llvm-mc -filetype=obj -triple=mips64el-unknown-linux -mcpu=mips64r2 %s \
+# RUN: | llvm-readobj -r | FileCheck %s -check-prefix=CHECK-REL
+
+
+# Test that R_MIPS_HIGHER and R_MIPS_HIGHEST relocations are created. By using
+# NEXT we also test that none of the expressions from the test2 generates
+# relocations.
+
+test1:
+# CHECK-LABEL: test1:
+
+ lui $5, %highest(func)
+ daddiu $5, $5, %higher(func)
+
+# CHECK-REL: Relocations [
+# CHECK-REL-NEXT: {
+# CHECK-REL-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_HIGHEST
+# CHECK-REL-NEXT: 0x{{[0-9,A-F]+}} R_MIPS_HIGHER
+# CHECK-REL-NEXT: }
+
+
+# Test the calculation of %higher and %highest:
+# ((x + 0x80008000) >> 32) & 0xffff (higher)
+# ((x + 0x800080008000) >> 48) & 0xffff (highest).
+
+test2:
+# CHECK-LABEL: test2:
+
+# Check the case where relocations are not modified by adding +1. The constant
+# is chosen so that it is just below the value that triggers the addition of +1
+# to %higher.
+$L1:
+ lui $6, %highest($L2-$L1+0x300047FFF7FF7)
+ daddiu $6, $6, %higher($L2-$L1+0x300047FFF7FF7)
+$L2:
+# CHECK: lui $6, 3
+# CHECK: daddiu $6, $6, 4
+
+
+# Check the case where %higher is modified by adding +1.
+ lui $7, %highest($L2-$L1+0x300047FFF7FF8)
+ ld $7, %higher ($L2-$L1+0x300047FFF7FF8)($7)
+# CHECK: lui $7, 3
+# CHECK: ld $7, 5($7)
+
+
+# Check the case where both %higher and %highest are modified by adding +1.
+ lui $8, %highest(0x37FFF7FFF8000)
+ ld $8, %higher (0x37FFF7FFF8000)($8)
+# CHECK: lui $8, 4
+# CHECK: ld $8, -32768($8)
diff --git a/test/MC/Mips/higher_highest.ll b/test/MC/Mips/higher_highest.ll
deleted file mode 100644
index 6c3d71f6a4b1..000000000000
--- a/test/MC/Mips/higher_highest.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; DISABLE: llc -march=mips64el -mcpu=mips64 -mattr=n64 -force-mips-long-branch -filetype=obj < %s -o - | llvm-readobj -r | FileCheck %s
-; RUN: false
-; XFAIL: *
-; Disabled because currently we don't have a way to generate these relocations.
-;
-; Check that the R_MIPS_HIGHER and R_MIPS_HIGHEST relocations were created.
-
-; CHECK: Relocations [
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_HIGHEST
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_HIGHEST
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_HIGHER
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_HIGHER
-; CHECK: ]
-
-@g0 = external global i32
-
-define void @foo1(i32 %s) nounwind {
-entry:
-
- %tobool = icmp eq i32 %s, 0
- br i1 %tobool, label %if.end, label %if.then
-
-if.then: ; preds = %entry
- %0 = load i32* @g0, align 4
- %add = add nsw i32 %0, 12
- store i32 %add, i32* @g0, align 4
- br label %if.end
-
-if.end: ; preds = %entry, %if.then
- ret void
-}
-
diff --git a/test/MC/Mips/hilo-addressing.s b/test/MC/Mips/hilo-addressing.s
index 28459c206728..720c7e256bb1 100644
--- a/test/MC/Mips/hilo-addressing.s
+++ b/test/MC/Mips/hilo-addressing.s
@@ -1,11 +1,42 @@
-# RUN: llvm-mc -show-encoding -triple mips-unknown-unknown %s | FileCheck %s
-
- .ent hilo_test
- .equ addr, 0xdeadbeef
-# CHECK: # encoding: [0x3c,0x04,0xde,0xae]
- lui $4,%hi(addr)
-# CHECK: # encoding: [0x03,0xe0,0x00,0x08]
- jr $31
-# CHECK: # encoding: [0x80,0x82,0xbe,0xef]
- lb $2,%lo(addr)($4)
- .end hilo_test
+# RUN: llvm-mc -show-encoding -triple mips-unknown-unknown %s \
+# RUN: | FileCheck %s -check-prefix=CHECK-ENC
+
+# RUN: llvm-mc -filetype=obj -triple=mipsel-unknown-linux %s \
+# RUN: | llvm-objdump -disassemble - | FileCheck %s -check-prefix=CHECK-INSTR
+
+# RUN: llvm-mc -filetype=obj -triple=mipsel-unknown-linux %s \
+# RUN: | llvm-readobj -r | FileCheck %s -check-prefix=CHECK-REL
+
+
+# Check that 1 is added to the high 16 bits if bit 15 of the low part is 1.
+
+ .equ addr, 0xdeadbeef
+ lui $4, %hi(addr)
+ lb $2, %lo(addr)($4)
+# CHECK-ENC: # encoding: [0x3c,0x04,0xde,0xae]
+# CHECK-ENC: # encoding: [0x80,0x82,0xbe,0xef]
+
+
+# Check that assembler can handle %hi(label1 - label2) and %lo(label1 - label2)
+# expressions.
+
+$L1:
+ # Emit zeros so that difference between $L3 and $L1 is 0x30124 bytes.
+ .fill 0x30124-8
+$L2:
+ lui $4, %hi($L3-$L1)
+ addiu $4, $4, %lo($L3-$L1)
+# CHECK-INSTR: lui $4, 3
+# CHECK-INSTR: addiu $4, $4, 292
+
+$L3:
+ lui $5, %hi($L2-$L3)
+ lw $5, %lo($L2-$L3)($5)
+# CHECK-INSTR: lui $5, 0
+# CHECK-INSTR: lw $5, -8($5)
+
+
+# Check that relocation isn't emitted for %hi(label1 - label2) and
+# %lo(label1 - label2) expressions.
+
+# CHECK-REL-NOT: R_MIPS
diff --git a/test/MC/Mips/lea_64.ll b/test/MC/Mips/lea_64.ll
deleted file mode 100644
index 2e7a37befc84..000000000000
--- a/test/MC/Mips/lea_64.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 %s -o - \
-; RUN: | llvm-objdump -disassemble -triple mips64el - \
-; RUN: | FileCheck %s
-
-@p = external global i32*
-
-define void @f1() nounwind {
-entry:
-; CHECK: .text:
-; CHECK-NOT: addiu {{[0-9,a-f]+}}, {{[0-9,a-f]+}}, {{[0-9]+}}
-
- %a = alloca [10 x i32], align 4
- %arraydecay = getelementptr inbounds [10 x i32]* %a, i64 0, i64 0
- store i32* %arraydecay, i32** @p, align 8
- ret void
-
-; CHECK: jr $ra
-}
diff --git a/test/MC/Mips/lit.local.cfg b/test/MC/Mips/lit.local.cfg
index 1fa54b428cd9..a3183a25afaa 100644
--- a/test/MC/Mips/lit.local.cfg
+++ b/test/MC/Mips/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Mips' in targets:
+if not 'Mips' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/Mips/llvm-mc-fixup-endianness.s b/test/MC/Mips/llvm-mc-fixup-endianness.s
new file mode 100644
index 000000000000..bc6a5d96632c
--- /dev/null
+++ b/test/MC/Mips/llvm-mc-fixup-endianness.s
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -show-encoding -mcpu=mips32 -triple mips-unknown-unknown %s | FileCheck -check-prefix=BE %s
+# RUN: llvm-mc -show-encoding -mcpu=mips32 -triple mipsel-unknown-unknown %s | FileCheck -check-prefix=LE %s
+#
+ .text
+ b foo # BE: b foo # encoding: [0x10,0x00,A,A]
+ # LE: b foo # encoding: [A,A,0x00,0x10]
diff --git a/test/MC/Mips/micromips-16-bit-instructions.s b/test/MC/Mips/micromips-16-bit-instructions.s
new file mode 100644
index 000000000000..31bddcc58d7f
--- /dev/null
+++ b/test/MC/Mips/micromips-16-bit-instructions.s
@@ -0,0 +1,27 @@
+# RUN: llvm-mc %s -triple=mipsel -show-encoding -mattr=micromips | \
+# RUN: FileCheck -check-prefix=CHECK-EL %s
+# RUN: llvm-mc %s -triple=mips -show-encoding -mattr=micromips | \
+# RUN: FileCheck -check-prefix=CHECK-EB %s
+# Check that the assembler can handle the documented syntax
+# for arithmetic and logical instructions.
+#------------------------------------------------------------------------------
+# MicroMIPS 16-bit Instructions
+#------------------------------------------------------------------------------
+# Little endian
+#------------------------------------------------------------------------------
+# CHECK-EL: mfhi $9 # encoding: [0x09,0x46]
+# CHECK-EL: mflo $9 # encoding: [0x49,0x46]
+# CHECK-EL: move $25, $1 # encoding: [0x21,0x0f]
+# CHECK-EL: jalr $9 # encoding: [0xc9,0x45]
+#------------------------------------------------------------------------------
+# Big endian
+#------------------------------------------------------------------------------
+# CHECK-EB: mfhi $9 # encoding: [0x46,0x09]
+# CHECK-EB: mflo $9 # encoding: [0x46,0x49]
+# CHECK-EB: move $25, $1 # encoding: [0x0f,0x21]
+# CHECK-EB: jalr $9 # encoding: [0x45,0xc9]
+
+ mfhi $9
+ mflo $9
+ move $25, $1
+ jalr $9
diff --git a/test/MC/Mips/micromips-alias.s b/test/MC/Mips/micromips-alias.s
new file mode 100644
index 000000000000..c0bf4b3a8e33
--- /dev/null
+++ b/test/MC/Mips/micromips-alias.s
@@ -0,0 +1,16 @@
+# RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux -mcpu=mips32 %s -o - \
+# RUN: | llvm-readobj -t | FileCheck %s
+
+# Symbol bar must be marked as micromips.
+# CHECK: Name: bar
+# CHECK: Other: 128
+ .align 2
+ .type f,@function
+ .set nomips16
+ .set micromips
+f:
+ nop
+ .set nomicromips
+ nop
+ .globl bar
+bar = f
diff --git a/test/MC/Mips/micromips-alu-instructions.s b/test/MC/Mips/micromips-alu-instructions.s
index 276a83e82c03..1131d1f3eae9 100644
--- a/test/MC/Mips/micromips-alu-instructions.s
+++ b/test/MC/Mips/micromips-alu-instructions.s
@@ -17,7 +17,6 @@
# CHECK-EL: subu $4, $3, $5 # encoding: [0xa3,0x00,0xd0,0x21]
# CHECK-EL: neg $6, $7 # encoding: [0xe0,0x00,0x90,0x31]
# CHECK-EL: negu $6, $7 # encoding: [0xe0,0x00,0xd0,0x31]
-# CHECK-EL: move $7, $8 # encoding: [0x08,0x00,0x50,0x39]
# CHECK-EL: slt $3, $3, $5 # encoding: [0xa3,0x00,0x50,0x1b]
# CHECK-EL: slti $3, $3, 103 # encoding: [0x63,0x90,0x67,0x00]
# CHECK-EL: slti $3, $3, 103 # encoding: [0x63,0x90,0x67,0x00]
@@ -52,7 +51,6 @@
# CHECK-EB: subu $4, $3, $5 # encoding: [0x00,0xa3,0x21,0xd0]
# CHECK-EB: neg $6, $7 # encoding: [0x00,0xe0,0x31,0x90]
# CHECK-EB: negu $6, $7 # encoding: [0x00,0xe0,0x31,0xd0]
-# CHECK-EB: move $7, $8 # encoding: [0x00,0x08,0x39,0x50]
# CHECK-EB: slt $3, $3, $5 # encoding: [0x00,0xa3,0x1b,0x50]
# CHECK-EB: slti $3, $3, 103 # encoding: [0x90,0x63,0x00,0x67]
# CHECK-EB: slti $3, $3, 103 # encoding: [0x90,0x63,0x00,0x67]
diff --git a/test/MC/Mips/micromips-bad-branches.s b/test/MC/Mips/micromips-bad-branches.s
new file mode 100644
index 000000000000..573605e18d6b
--- /dev/null
+++ b/test/MC/Mips/micromips-bad-branches.s
@@ -0,0 +1,225 @@
+# RUN: not llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -mattr=+micromips 2>&1 | FileCheck %s
+#
+# CHECK: error: branch to misaligned address
+# CHECK: b -65535
+# CHECK: error: branch target out of range
+# CHECK: b -65537
+# CHECK: error: branch to misaligned address
+# CHECK: b 65535
+# CHECK: error: branch target out of range
+# CHECK: b 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: beq $1, $1, -65535
+# CHECK: error: branch target out of range
+# CHECK: beq $1, $1, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: beq $1, $1, 65535
+# CHECK: error: branch target out of range
+# CHECK: beq $1, $1, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bne $1, $1, -65535
+# CHECK: error: branch target out of range
+# CHECK: bne $1, $1, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bne $1, $1, 65535
+# CHECK: error: branch target out of range
+# CHECK: bne $1, $1, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bal -65535
+# CHECK: error: branch target out of range
+# CHECK: bal -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bal 65535
+# CHECK: error: branch target out of range
+# CHECK: bal 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bgez $1, -65535
+# CHECK: error: branch target out of range
+# CHECK: bgez $1, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bgez $1, 65535
+# CHECK: error: branch target out of range
+# CHECK: bgez $1, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bgtz $1, -65535
+# CHECK: error: branch target out of range
+# CHECK: bgtz $1, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bgtz $1, 65535
+# CHECK: error: branch target out of range
+# CHECK: bgtz $1, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: blez $1, -65535
+# CHECK: error: branch target out of range
+# CHECK: blez $1, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: blez $1, 65535
+# CHECK: error: branch target out of range
+# CHECK: blez $1, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bltz $1, -65535
+# CHECK: error: branch target out of range
+# CHECK: bltz $1, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bltz $1, 65535
+# CHECK: error: branch target out of range
+# CHECK: bltz $1, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bgezal $1, -65535
+# CHECK: error: branch target out of range
+# CHECK: bgezal $1, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bgezal $1, 65535
+# CHECK: error: branch target out of range
+# CHECK: bgezal $1, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bltzal $1, -65535
+# CHECK: error: branch target out of range
+# CHECK: bltzal $1, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bltzal $1, 65535
+# CHECK: error: branch target out of range
+# CHECK: bltzal $1, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f -65535
+# CHECK: error: branch target out of range
+# CHECK: bc1f -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f 65535
+# CHECK: error: branch target out of range
+# CHECK: bc1f 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f $fcc0, -65535
+# CHECK: error: branch target out of range
+# CHECK: bc1f $fcc0, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f $fcc0, 65535
+# CHECK: error: branch target out of range
+# CHECK: bc1f $fcc0, 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t -65535
+# CHECK: error: branch target out of range
+# CHECK: bc1t -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t 65535
+# CHECK: error: branch target out of range
+# CHECK: bc1t 65536
+
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t $fcc0, -65535
+# CHECK: error: branch target out of range
+# CHECK: bc1t $fcc0, -65537
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t $fcc0, 65535
+# CHECK: error: branch target out of range
+# CHECK: bc1t $fcc0, 65536
+
+ b -65535
+ b -65536
+ b -65537
+ b 65534
+ b 65535
+ b 65536
+
+ beq $1, $1, -65535
+ beq $1, $1, -65536
+ beq $1, $1, -65537
+ beq $1, $1, 65534
+ beq $1, $1, 65535
+ beq $1, $1, 65536
+
+ bne $1, $1, -65535
+ bne $1, $1, -65536
+ bne $1, $1, -65537
+ bne $1, $1, 65534
+ bne $1, $1, 65535
+ bne $1, $1, 65536
+
+ bal -65535
+ bal -65536
+ bal -65537
+ bal 65534
+ bal 65535
+ bal 65536
+
+ bgez $1, -65535
+ bgez $1, -65536
+ bgez $1, -65537
+ bgez $1, 65534
+ bgez $1, 65535
+ bgez $1, 65536
+
+ bgtz $1, -65535
+ bgtz $1, -65536
+ bgtz $1, -65537
+ bgtz $1, 65534
+ bgtz $1, 65535
+ bgtz $1, 65536
+
+ blez $1, -65535
+ blez $1, -65536
+ blez $1, -65537
+ blez $1, 65534
+ blez $1, 65535
+ blez $1, 65536
+
+ bltz $1, -65535
+ bltz $1, -65536
+ bltz $1, -65537
+ bltz $1, 65534
+ bltz $1, 65535
+ bltz $1, 65536
+
+ bgezal $1, -65535
+ bgezal $1, -65536
+ bgezal $1, -65537
+ bgezal $1, 65534
+ bgezal $1, 65535
+ bgezal $1, 65536
+
+ bltzal $1, -65535
+ bltzal $1, -65536
+ bltzal $1, -65537
+ bltzal $1, 65534
+ bltzal $1, 65535
+ bltzal $1, 65536
+
+ bc1f -65535
+ bc1f -65536
+ bc1f -65537
+ bc1f 65534
+ bc1f 65535
+ bc1f 65536
+
+ bc1f $fcc0, -65535
+ bc1f $fcc0, -65536
+ bc1f $fcc0, -65537
+ bc1f $fcc0, 65534
+ bc1f $fcc0, 65535
+ bc1f $fcc0, 65536
+
+ bc1t -65535
+ bc1t -65536
+ bc1t -65537
+ bc1t 65534
+ bc1t 65535
+ bc1t 65536
+
+ bc1t $fcc0, -65535
+ bc1t $fcc0, -65536
+ bc1t $fcc0, -65537
+ bc1t $fcc0, 65534
+ bc1t $fcc0, 65535
+ bc1t $fcc0, 65536
diff --git a/test/MC/Mips/micromips-control-instructions.s b/test/MC/Mips/micromips-control-instructions.s
new file mode 100644
index 000000000000..aff84c245941
--- /dev/null
+++ b/test/MC/Mips/micromips-control-instructions.s
@@ -0,0 +1,60 @@
+# RUN: llvm-mc %s -triple=mipsel -show-encoding -mcpu=mips32r2 -mattr=micromips \
+# RUN: | FileCheck -check-prefix=CHECK-EL %s
+# RUN: llvm-mc %s -triple=mips -show-encoding -mcpu=mips32r2 -mattr=micromips \
+# RUN: | FileCheck -check-prefix=CHECK-EB %s
+# Check that the assembler can handle the documented syntax
+# for control instructions.
+#------------------------------------------------------------------------------
+# microMIPS Control Instructions
+#------------------------------------------------------------------------------
+# Little endian
+#------------------------------------------------------------------------------
+# CHECK-EL: break # encoding: [0x00,0x00,0x07,0x00]
+# CHECK-EL: break 7 # encoding: [0x07,0x00,0x07,0x00]
+# CHECK-EL: break 7, 5 # encoding: [0x07,0x00,0x47,0x01]
+# CHECK-EL: syscall # encoding: [0x00,0x00,0x7c,0x8b]
+# CHECK-EL: syscall 396 # encoding: [0x8c,0x01,0x7c,0x8b]
+# CHECK-EL: eret # encoding: [0x00,0x00,0x7c,0xf3]
+# CHECK-EL: deret # encoding: [0x00,0x00,0x7c,0xe3]
+# CHECK-EL: di # encoding: [0x00,0x00,0x7c,0x47]
+# CHECK-EL: di # encoding: [0x00,0x00,0x7c,0x47]
+# CHECK-EL: di $10 # encoding: [0x0a,0x00,0x7c,0x47]
+# CHECK-EL: ei # encoding: [0x00,0x00,0x7c,0x57]
+# CHECK-EL: ei # encoding: [0x00,0x00,0x7c,0x57]
+# CHECK-EL: ei $10 # encoding: [0x0a,0x00,0x7c,0x57]
+# CHECK-EL: wait # encoding: [0x00,0x00,0x7c,0x93]
+# CHECK-EL: wait 17 # encoding: [0x11,0x00,0x7c,0x93]
+#------------------------------------------------------------------------------
+# Big endian
+#------------------------------------------------------------------------------
+# CHECK-EB: break # encoding: [0x00,0x00,0x00,0x07]
+# CHECK-EB: break 7 # encoding: [0x00,0x07,0x00,0x07]
+# CHECK-EB: break 7, 5 # encoding: [0x00,0x07,0x01,0x47]
+# CHECK-EB: syscall # encoding: [0x00,0x00,0x8b,0x7c]
+# CHECK-EB: syscall 396 # encoding: [0x01,0x8c,0x8b,0x7c]
+# CHECK-EB: eret # encoding: [0x00,0x00,0xf3,0x7c]
+# CHECK-EB: deret # encoding: [0x00,0x00,0xe3,0x7c]
+# CHECK-EB: di # encoding: [0x00,0x00,0x47,0x7c]
+# CHECK-EB: di # encoding: [0x00,0x00,0x47,0x7c]
+# CHECK-EB: di $10 # encoding: [0x00,0x0a,0x47,0x7c]
+# CHECK-EB: ei # encoding: [0x00,0x00,0x57,0x7c]
+# CHECK-EB: ei # encoding: [0x00,0x00,0x57,0x7c]
+# CHECK-EB: ei $10 # encoding: [0x00,0x0a,0x57,0x7c]
+# CHECK-EB: wait # encoding: [0x00,0x00,0x93,0x7c]
+# CHECK-EB: wait 17 # encoding: [0x00,0x11,0x93,0x7c]
+
+ break
+ break 7
+ break 7,5
+ syscall
+ syscall 0x18c
+ eret
+ deret
+ di
+ di $0
+ di $10
+ ei
+ ei $0
+ ei $10
+ wait
+ wait 17
diff --git a/test/MC/Mips/micromips-diagnostic-fixup.s b/test/MC/Mips/micromips-diagnostic-fixup.s
new file mode 100644
index 000000000000..f8fe447c283c
--- /dev/null
+++ b/test/MC/Mips/micromips-diagnostic-fixup.s
@@ -0,0 +1,10 @@
+# RUN: not llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -arch=mips -mattr=+micromips 2>&1 -filetype=obj | FileCheck %s
+#
+# CHECK: LLVM ERROR: out of range PC16 fixup
+
+.text
+ b foo
+ .space 65536 - 8, 1 # -8 = size of b instr plus size of automatically inserted nop
+ nop # This instr makes the branch too long to fit into a 17-bit offset
+foo:
+ add $0,$0,$0
diff --git a/test/MC/Mips/micromips-el-fixup-data.s b/test/MC/Mips/micromips-el-fixup-data.s
new file mode 100644
index 000000000000..47538356bf37
--- /dev/null
+++ b/test/MC/Mips/micromips-el-fixup-data.s
@@ -0,0 +1,25 @@
+# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 \
+# RUN: -mattr=+micromips 2>&1 -filetype=obj > %t.o
+# RUN: llvm-objdump %t.o -triple mipsel -mattr=+micromips -d | FileCheck %s
+
+# Check that fixup data is written in the microMIPS specific little endian
+# byte order.
+
+ .text
+ .globl main
+ .align 2
+ .type main,@function
+ .set micromips
+ .set nomips16
+ .ent main
+main:
+ addiu $sp, $sp, -16
+ bnez $9, lab1
+
+# CHECK: 09 b4 04 00 bne $9, $zero, 8
+
+ addu $zero, $zero, $zero
+lab1:
+ jr $ra
+ addiu $sp, $sp, 16
+ .end main
diff --git a/test/MC/Mips/micromips-fpu-instructions.s b/test/MC/Mips/micromips-fpu-instructions.s
new file mode 100644
index 000000000000..5af4f98670e7
--- /dev/null
+++ b/test/MC/Mips/micromips-fpu-instructions.s
@@ -0,0 +1,193 @@
+# RUN: llvm-mc %s -triple=mipsel -show-encoding -mattr=micromips \
+# RUN: -mcpu=mips32r2 | FileCheck -check-prefix=CHECK-EL %s
+# RUN: llvm-mc %s -triple=mips -show-encoding -mattr=micromips \
+# RUN: -mcpu=mips32r2 | FileCheck -check-prefix=CHECK-EB %s
+# Check that the assembler can handle the documented syntax
+# for fpu instructions
+#------------------------------------------------------------------------------
+# FPU Instructions
+#------------------------------------------------------------------------------
+# Little endian
+#------------------------------------------------------------------------------
+# CHECK-EL: add.s $f4, $f6, $f8 # encoding: [0x06,0x55,0x30,0x20]
+# CHECK-EL: add.d $f4, $f6, $f8 # encoding: [0x06,0x55,0x30,0x21]
+# CHECK-EL: div.s $f4, $f6, $f8 # encoding: [0x06,0x55,0xf0,0x20]
+# CHECK-EL: div.d $f4, $f6, $f8 # encoding: [0x06,0x55,0xf0,0x21]
+# CHECK-EL: mul.s $f4, $f6, $f8 # encoding: [0x06,0x55,0xb0,0x20]
+# CHECK-EL: mul.d $f4, $f6, $f8 # encoding: [0x06,0x55,0xb0,0x21]
+# CHECK-EL: sub.s $f4, $f6, $f8 # encoding: [0x06,0x55,0x70,0x20]
+# CHECK-EL: sub.d $f4, $f6, $f8 # encoding: [0x06,0x55,0x70,0x21]
+# CHECK-EL: lwc1 $f2, 4($6) # encoding: [0x46,0x9c,0x04,0x00]
+# CHECK-EL: ldc1 $f2, 4($6) # encoding: [0x46,0xbc,0x04,0x00]
+# CHECK-EL: swc1 $f2, 4($6) # encoding: [0x46,0x98,0x04,0x00]
+# CHECK-EL: sdc1 $f2, 4($6) # encoding: [0x46,0xb8,0x04,0x00]
+# CHECK-EL: bc1f 1332 # encoding: [0x80,0x43,0x9a,0x02]
+# CHECK-EL: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL: bc1t 1332 # encoding: [0xa0,0x43,0x9a,0x02]
+# CHECK-EL: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EL: luxc1 $f2, $4($6) # encoding: [0x86,0x54,0x48,0x11]
+# CHECK-EL: suxc1 $f2, $4($6) # encoding: [0x86,0x54,0x88,0x11]
+# CHECK-EL: ceil.w.s $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x1b]
+# CHECK-EL: ceil.w.d $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x5b]
+# CHECK-EL: cvt.w.s $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x09]
+# CHECK-EL: cvt.w.d $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x49]
+# CHECK-EL: floor.w.s $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x0b]
+# CHECK-EL: floor.w.d $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x4b]
+# CHECK-EL: round.w.s $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x3b]
+# CHECK-EL: round.w.d $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x7b]
+# CHECK-EL: sqrt.s $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x0a]
+# CHECK-EL: sqrt.d $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x4a]
+# CHECK-EL: trunc.w.s $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x2b]
+# CHECK-EL: trunc.w.d $f6, $f8 # encoding: [0xc8,0x54,0x3b,0x6b]
+# CHECK-EL: abs.s $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x03]
+# CHECK-EL: abs.d $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x23]
+# CHECK-EL: mov.s $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x00]
+# CHECK-EL: mov.d $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x20]
+# CHECK-EL: neg.s $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x0b]
+# CHECK-EL: neg.d $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x2b]
+# CHECK-EL: cvt.d.s $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x13]
+# CHECK-EL: cvt.d.w $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x33]
+# CHECK-EL: cvt.s.d $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x1b]
+# CHECK-EL: cvt.s.w $f6, $f8 # encoding: [0xc8,0x54,0x7b,0x3b]
+# CHECK-EL: cfc1 $6, $0 # encoding: [0xc0,0x54,0x3b,0x10]
+# CHECK-EL: ctc1 $6, $0 # encoding: [0xc0,0x54,0x3b,0x18]
+# CHECK-EL: mfc1 $6, $f8 # encoding: [0xc8,0x54,0x3b,0x20]
+# CHECK-EL: mtc1 $6, $f8 # encoding: [0xc8,0x54,0x3b,0x28]
+# CHECK-EL: movz.s $f4, $f6, $7 # encoding: [0xe6,0x54,0x78,0x20]
+# CHECK-EL: movz.d $f4, $f6, $7 # encoding: [0xe6,0x54,0x78,0x21]
+# CHECK-EL: movn.s $f4, $f6, $7 # encoding: [0xe6,0x54,0x38,0x20]
+# CHECK-EL: movn.d $f4, $f6, $7 # encoding: [0xe6,0x54,0x38,0x21]
+# CHECK-EL: movt.s $f4, $f6, $fcc0 # encoding: [0x86,0x54,0x60,0x00]
+# CHECK-EL: movt.d $f4, $f6, $fcc0 # encoding: [0x86,0x54,0x60,0x02]
+# CHECK-EL: movf.s $f4, $f6, $fcc0 # encoding: [0x86,0x54,0x20,0x00]
+# CHECK-EL: movf.d $f4, $f6, $fcc0 # encoding: [0x86,0x54,0x20,0x02]
+# CHECK-EL: madd.s $f2, $f4, $f6, $f8 # encoding: [0x06,0x55,0x01,0x11]
+# CHECK-EL: madd.d $f2, $f4, $f6, $f8 # encoding: [0x06,0x55,0x09,0x11]
+# CHECK-EL: msub.s $f2, $f4, $f6, $f8 # encoding: [0x06,0x55,0x21,0x11]
+# CHECK-EL: msub.d $f2, $f4, $f6, $f8 # encoding: [0x06,0x55,0x29,0x11]
+# CHECK-EL: nmadd.s $f2, $f4, $f6, $f8 # encoding: [0x06,0x55,0x02,0x11]
+# CHECK-EL: nmadd.d $f2, $f4, $f6, $f8 # encoding: [0x06,0x55,0x0a,0x11]
+# CHECK-EL: nmsub.s $f2, $f4, $f6, $f8 # encoding: [0x06,0x55,0x22,0x11]
+# CHECK-EL: nmsub.d $f2, $f4, $f6, $f8 # encoding: [0x06,0x55,0x2a,0x11]
+#------------------------------------------------------------------------------
+# Big endian
+#------------------------------------------------------------------------------
+# CHECK-EB: add.s $f4, $f6, $f8 # encoding: [0x55,0x06,0x20,0x30]
+# CHECK-EB: add.d $f4, $f6, $f8 # encoding: [0x55,0x06,0x21,0x30]
+# CHECK-EB: div.s $f4, $f6, $f8 # encoding: [0x55,0x06,0x20,0xf0]
+# CHECK-EB: div.d $f4, $f6, $f8 # encoding: [0x55,0x06,0x21,0xf0]
+# CHECK-EB: mul.s $f4, $f6, $f8 # encoding: [0x55,0x06,0x20,0xb0]
+# CHECK-EB: mul.d $f4, $f6, $f8 # encoding: [0x55,0x06,0x21,0xb0]
+# CHECK-EB: sub.s $f4, $f6, $f8 # encoding: [0x55,0x06,0x20,0x70]
+# CHECK-EB: sub.d $f4, $f6, $f8 # encoding: [0x55,0x06,0x21,0x70]
+# CHECK-EB: lwc1 $f2, 4($6) # encoding: [0x9c,0x46,0x00,0x04]
+# CHECK-EB: ldc1 $f2, 4($6) # encoding: [0xbc,0x46,0x00,0x04]
+# CHECK-EB: swc1 $f2, 4($6) # encoding: [0x98,0x46,0x00,0x04]
+# CHECK-EB: sdc1 $f2, 4($6) # encoding: [0xb8,0x46,0x00,0x04]
+# CHECK-EB: bc1f 1332 # encoding: [0x43,0x80,0x02,0x9a]
+# CHECK-EB: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB: bc1t 1332 # encoding: [0x43,0xa0,0x02,0x9a]
+# CHECK-EB: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK-EB: luxc1 $f2, $4($6) # encoding: [0x54,0x86,0x11,0x48]
+# CHECK-EB: suxc1 $f2, $4($6) # encoding: [0x54,0x86,0x11,0x88]
+# CHECK-EB: ceil.w.s $f6, $f8 # encoding: [0x54,0xc8,0x1b,0x3b]
+# CHECK-EB: ceil.w.d $f6, $f8 # encoding: [0x54,0xc8,0x5b,0x3b]
+# CHECK-EB: cvt.w.s $f6, $f8 # encoding: [0x54,0xc8,0x09,0x3b]
+# CHECK-EB: cvt.w.d $f6, $f8 # encoding: [0x54,0xc8,0x49,0x3b]
+# CHECK-EB: floor.w.s $f6, $f8 # encoding: [0x54,0xc8,0x0b,0x3b]
+# CHECK-EB: floor.w.d $f6, $f8 # encoding: [0x54,0xc8,0x4b,0x3b]
+# CHECK-EB: round.w.s $f6, $f8 # encoding: [0x54,0xc8,0x3b,0x3b]
+# CHECK-EB: round.w.d $f6, $f8 # encoding: [0x54,0xc8,0x7b,0x3b]
+# CHECK-EB: sqrt.s $f6, $f8 # encoding: [0x54,0xc8,0x0a,0x3b]
+# CHECK-EB: sqrt.d $f6, $f8 # encoding: [0x54,0xc8,0x4a,0x3b]
+# CHECK-EB: trunc.w.s $f6, $f8 # encoding: [0x54,0xc8,0x2b,0x3b]
+# CHECK-EB: trunc.w.d $f6, $f8 # encoding: [0x54,0xc8,0x6b,0x3b]
+# CHECK-EB: abs.s $f6, $f8 # encoding: [0x54,0xc8,0x03,0x7b]
+# CHECK-EB: abs.d $f6, $f8 # encoding: [0x54,0xc8,0x23,0x7b]
+# CHECK-EB: mov.s $f6, $f8 # encoding: [0x54,0xc8,0x00,0x7b]
+# CHECK-EB: mov.d $f6, $f8 # encoding: [0x54,0xc8,0x20,0x7b]
+# CHECK-EB: neg.s $f6, $f8 # encoding: [0x54,0xc8,0x0b,0x7b]
+# CHECK-EB: neg.d $f6, $f8 # encoding: [0x54,0xc8,0x2b,0x7b]
+# CHECK-EB: cvt.d.s $f6, $f8 # encoding: [0x54,0xc8,0x13,0x7b]
+# CHECK-EB: cvt.d.w $f6, $f8 # encoding: [0x54,0xc8,0x33,0x7b]
+# CHECK-EB: cvt.s.d $f6, $f8 # encoding: [0x54,0xc8,0x1b,0x7b]
+# CHECK-EB: cvt.s.w $f6, $f8 # encoding: [0x54,0xc8,0x3b,0x7b]
+# CHECK-EB: cfc1 $6, $0 # encoding: [0x54,0xc0,0x10,0x3b]
+# CHECK-EB: ctc1 $6, $0 # encoding: [0x54,0xc0,0x18,0x3b]
+# CHECK-EB: mfc1 $6, $f8 # encoding: [0x54,0xc8,0x20,0x3b]
+# CHECK-EB: mtc1 $6, $f8 # encoding: [0x54,0xc8,0x28,0x3b]
+# CHECK-EB: movz.s $f4, $f6, $7 # encoding: [0x54,0xe6,0x20,0x78]
+# CHECK-EB: movz.d $f4, $f6, $7 # encoding: [0x54,0xe6,0x21,0x78]
+# CHECK-EB: movn.s $f4, $f6, $7 # encoding: [0x54,0xe6,0x20,0x38]
+# CHECK-EB: movn.d $f4, $f6, $7 # encoding: [0x54,0xe6,0x21,0x38]
+# CHECK-EB: movt.s $f4, $f6, $fcc0 # encoding: [0x54,0x86,0x00,0x60]
+# CHECK-EB: movt.d $f4, $f6, $fcc0 # encoding: [0x54,0x86,0x02,0x60]
+# CHECK-EB: movf.s $f4, $f6, $fcc0 # encoding: [0x54,0x86,0x00,0x20]
+# CHECK-EB: movf.d $f4, $f6, $fcc0 # encoding: [0x54,0x86,0x02,0x20]
+# CHECK-EB: madd.s $f2, $f4, $f6, $f8 # encoding: [0x55,0x06,0x11,0x01]
+# CHECK-EB: madd.d $f2, $f4, $f6, $f8 # encoding: [0x55,0x06,0x11,0x09]
+# CHECK-EB: msub.s $f2, $f4, $f6, $f8 # encoding: [0x55,0x06,0x11,0x21]
+# CHECK-EB: msub.d $f2, $f4, $f6, $f8 # encoding: [0x55,0x06,0x11,0x29]
+# CHECK-EB: nmadd.s $f2, $f4, $f6, $f8 # encoding: [0x55,0x06,0x11,0x02]
+# CHECK-EB: nmadd.d $f2, $f4, $f6, $f8 # encoding: [0x55,0x06,0x11,0x0a]
+# CHECK-EB: nmsub.s $f2, $f4, $f6, $f8 # encoding: [0x55,0x06,0x11,0x22]
+# CHECK-EB: nmsub.d $f2, $f4, $f6, $f8 # encoding: [0x55,0x06,0x11,0x2a]
+
+ add.s $f4, $f6, $f8
+ add.d $f4, $f6, $f8
+ div.s $f4, $f6, $f8
+ div.d $f4, $f6, $f8
+ mul.s $f4, $f6, $f8
+ mul.d $f4, $f6, $f8
+ sub.s $f4, $f6, $f8
+ sub.d $f4, $f6, $f8
+ lwc1 $f2, 4($6)
+ ldc1 $f2, 4($6)
+ swc1 $f2, 4($6)
+ sdc1 $f2, 4($6)
+ bc1f 1332
+ bc1t 1332
+ luxc1 $f2, $4($6)
+ suxc1 $f2, $4($6)
+ ceil.w.s $f6, $f8
+ ceil.w.d $f6, $f8
+ cvt.w.s $f6, $f8
+ cvt.w.d $f6, $f8
+ floor.w.s $f6, $f8
+ floor.w.d $f6, $f8
+ round.w.s $f6, $f8
+ round.w.d $f6, $f8
+ sqrt.s $f6, $f8
+ sqrt.d $f6, $f8
+ trunc.w.s $f6, $f8
+ trunc.w.d $f6, $f8
+ abs.s $f6, $f8
+ abs.d $f6, $f8
+ mov.s $f6, $f8
+ mov.d $f6, $f8
+ neg.s $f6, $f8
+ neg.d $f6, $f8
+ cvt.d.s $f6, $f8
+ cvt.d.w $f6, $f8
+ cvt.s.d $f6, $f8
+ cvt.s.w $f6, $f8
+ cfc1 $6, $0
+ ctc1 $6, $0
+ mfc1 $6, $f8
+ mtc1 $6, $f8
+ movz.s $f4, $f6, $7
+ movz.d $f4, $f6, $7
+ movn.s $f4, $f6, $7
+ movn.d $f4, $f6, $7
+ movt.s $f4, $f6, $fcc0
+ movt.d $f4, $f6, $fcc0
+ movf.s $f4, $f6, $fcc0
+ movf.d $f4, $f6, $fcc0
+ madd.s $f2, $f4, $f6, $f8
+ madd.d $f2, $f4, $f6, $f8
+ msub.s $f2, $f4, $f6, $f8
+ msub.d $f2, $f4, $f6, $f8
+ nmadd.s $f2, $f4, $f6, $f8
+ nmadd.d $f2, $f4, $f6, $f8
+ nmsub.s $f2, $f4, $f6, $f8
+ nmsub.d $f2, $f4, $f6, $f8
diff --git a/test/MC/Mips/micromips-jump-instructions.s b/test/MC/Mips/micromips-jump-instructions.s
index 6f571b687911..a6c7676f8093 100644
--- a/test/MC/Mips/micromips-jump-instructions.s
+++ b/test/MC/Mips/micromips-jump-instructions.s
@@ -13,7 +13,7 @@
# CHECK-EL: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK-EL: jal 1328 # encoding: [0x00,0xf4,0x98,0x02]
# CHECK-EL: nop # encoding: [0x00,0x00,0x00,0x00]
-# CHECK-EL: jalr $6 # encoding: [0xe6,0x03,0x3c,0x0f]
+# CHECK-EL: jalr $ra, $6 # encoding: [0xe6,0x03,0x3c,0x0f]
# CHECK-EL: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK-EL: jr $7 # encoding: [0x07,0x00,0x3c,0x0f]
# CHECK-EL: nop # encoding: [0x00,0x00,0x00,0x00]
@@ -26,7 +26,7 @@
# CHECK-EB: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK-EB: jal 1328 # encoding: [0xf4,0x00,0x02,0x98]
# CHECK-EB: nop # encoding: [0x00,0x00,0x00,0x00]
-# CHECK-EB: jalr $6 # encoding: [0x03,0xe6,0x0f,0x3c]
+# CHECK-EB: jalr $ra, $6 # encoding: [0x03,0xe6,0x0f,0x3c]
# CHECK-EB: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK-EB: jr $7 # encoding: [0x00,0x07,0x0f,0x3c]
# CHECK-EB: nop # encoding: [0x00,0x00,0x00,0x00]
@@ -35,6 +35,6 @@
j 1328
jal 1328
- jalr $6
+ jalr $ra, $6
jr $7
j $7
diff --git a/test/MC/Mips/micromips-loadstore-instructions.s b/test/MC/Mips/micromips-loadstore-instructions.s
index cc7514b3231a..8a1b93babdd8 100644
--- a/test/MC/Mips/micromips-loadstore-instructions.s
+++ b/test/MC/Mips/micromips-loadstore-instructions.s
@@ -1,5 +1,7 @@
-# RUN: llvm-mc %s -triple=mipsel -show-encoding -mattr=micromips | FileCheck -check-prefix=CHECK-EL %s
-# RUN: llvm-mc %s -triple=mips -show-encoding -mattr=micromips | FileCheck -check-prefix=CHECK-EB %s
+# RUN: llvm-mc %s -triple=mipsel -show-encoding -mattr=micromips \
+# RUN: | FileCheck -check-prefix=CHECK-EL %s
+# RUN: llvm-mc %s -triple=mips -show-encoding -mattr=micromips \
+# RUN: | FileCheck -check-prefix=CHECK-EB %s
# Check that the assembler can handle the documented syntax
# for load and store instructions.
#------------------------------------------------------------------------------
@@ -15,6 +17,9 @@
# CHECK-EL: sb $5, 8($4) # encoding: [0xa4,0x18,0x08,0x00]
# CHECK-EL: sh $2, 8($4) # encoding: [0x44,0x38,0x08,0x00]
# CHECK-EL: sw $5, 4($6) # encoding: [0xa6,0xf8,0x04,0x00]
+# CHECK-EL: ll $2, 8($4) # encoding: [0x44,0x60,0x08,0x30]
+# CHECK-EL: sc $2, 8($4) # encoding: [0x44,0x60,0x08,0xb0]
+# CHECK-EL: lwu $2, 8($4) # encoding: [0x44,0x60,0x08,0xe0]
#------------------------------------------------------------------------------
# Big endian
#------------------------------------------------------------------------------
@@ -26,6 +31,9 @@
# CHECK-EB: sb $5, 8($4) # encoding: [0x18,0xa4,0x00,0x08]
# CHECK-EB: sh $2, 8($4) # encoding: [0x38,0x44,0x00,0x08]
# CHECK-EB: sw $5, 4($6) # encoding: [0xf8,0xa6,0x00,0x04]
+# CHECK-EB: ll $2, 8($4) # encoding: [0x60,0x44,0x30,0x08]
+# CHECK-EB: sc $2, 8($4) # encoding: [0x60,0x44,0xb0,0x08]
+# CHECK-EB: lwu $2, 8($4) # encoding: [0x60,0x44,0xe0,0x08]
lb $5, 8($4)
lbu $6, 8($4)
lh $2, 8($4)
@@ -34,3 +42,6 @@
sb $5, 8($4)
sh $2, 8($4)
sw $5, 4($6)
+ ll $2, 8($4)
+ sc $2, 8($4)
+ lwu $2, 8($4)
diff --git a/test/MC/Mips/micromips-long-branch.ll b/test/MC/Mips/micromips-long-branch.ll
deleted file mode 100644
index 3267f4a729ac..000000000000
--- a/test/MC/Mips/micromips-long-branch.ll
+++ /dev/null
@@ -1,16437 +0,0 @@
-; RUN: llc %s -march=mipsel -mcpu=mips32r2 -mattr=micromips -filetype=asm \
-; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
-
-@a = common global [10 x i32] zeroinitializer, align 16
-
-; Function Attrs: nounwind uwtable
-define i32 @main() #0 {
-entry:
- %retval = alloca i32, align 4
- %i = alloca i32, align 4
- store i32 0, i32* %retval
- store i32 0, i32* %i, align 4
- br label %for.cond
-
-for.cond:
- %0 = load i32* %i, align 4
- %cmp = icmp slt i32 %0, 10
- br i1 %cmp, label %for.body, label %for.end
-
-; CHECK: addiu $sp, $sp, -8
-; CHECK: sw $ra, 0($sp)
-; CHECK: lui $[[REG1:[0-9]+]], 2
-; CHECK: addiu $[[REG1]], $[[REG1]], 0
-; CHECK: addu $[[REG1]], $ra, $[[REG1]]
-; CHECK: lw $ra, 0($sp)
-; CHECK: jr $[[REG1]]
-; CHECK: addiu $sp, $sp, 8
-
-for.body:
- %1 = load i32* %i, align 4
- %2 = load i32* %i, align 4
- %idxprom = sext i32 %2 to i64
- %arrayidx = getelementptr inbounds [10 x i32]* @a, i32 0, i64 %idxprom
- store i32 %1, i32* %arrayidx, align 4 %nop0 = alloca i1, i1 0
- %nop1 = alloca i1, i1 0
- %nop2 = alloca i1, i1 0
- %nop3 = alloca i1, i1 0
- %nop4 = alloca i1, i1 0
- %nop5 = alloca i1, i1 0
- %nop6 = alloca i1, i1 0
- %nop7 = alloca i1, i1 0
- %nop8 = alloca i1, i1 0
- %nop9 = alloca i1, i1 0
- %nop10 = alloca i1, i1 0
- %nop11 = alloca i1, i1 0
- %nop12 = alloca i1, i1 0
- %nop13 = alloca i1, i1 0
- %nop14 = alloca i1, i1 0
- %nop15 = alloca i1, i1 0
- %nop16 = alloca i1, i1 0
- %nop17 = alloca i1, i1 0
- %nop18 = alloca i1, i1 0
- %nop19 = alloca i1, i1 0
- %nop20 = alloca i1, i1 0
- %nop21 = alloca i1, i1 0
- %nop22 = alloca i1, i1 0
- %nop23 = alloca i1, i1 0
- %nop24 = alloca i1, i1 0
- %nop25 = alloca i1, i1 0
- %nop26 = alloca i1, i1 0
- %nop27 = alloca i1, i1 0
- %nop28 = alloca i1, i1 0
- %nop29 = alloca i1, i1 0
- %nop30 = alloca i1, i1 0
- %nop31 = alloca i1, i1 0
- %nop32 = alloca i1, i1 0
- %nop33 = alloca i1, i1 0
- %nop34 = alloca i1, i1 0
- %nop35 = alloca i1, i1 0
- %nop36 = alloca i1, i1 0
- %nop37 = alloca i1, i1 0
- %nop38 = alloca i1, i1 0
- %nop39 = alloca i1, i1 0
- %nop40 = alloca i1, i1 0
- %nop41 = alloca i1, i1 0
- %nop42 = alloca i1, i1 0
- %nop43 = alloca i1, i1 0
- %nop44 = alloca i1, i1 0
- %nop45 = alloca i1, i1 0
- %nop46 = alloca i1, i1 0
- %nop47 = alloca i1, i1 0
- %nop48 = alloca i1, i1 0
- %nop49 = alloca i1, i1 0
- %nop50 = alloca i1, i1 0
- %nop51 = alloca i1, i1 0
- %nop52 = alloca i1, i1 0
- %nop53 = alloca i1, i1 0
- %nop54 = alloca i1, i1 0
- %nop55 = alloca i1, i1 0
- %nop56 = alloca i1, i1 0
- %nop57 = alloca i1, i1 0
- %nop58 = alloca i1, i1 0
- %nop59 = alloca i1, i1 0
- %nop60 = alloca i1, i1 0
- %nop61 = alloca i1, i1 0
- %nop62 = alloca i1, i1 0
- %nop63 = alloca i1, i1 0
- %nop64 = alloca i1, i1 0
- %nop65 = alloca i1, i1 0
- %nop66 = alloca i1, i1 0
- %nop67 = alloca i1, i1 0
- %nop68 = alloca i1, i1 0
- %nop69 = alloca i1, i1 0
- %nop70 = alloca i1, i1 0
- %nop71 = alloca i1, i1 0
- %nop72 = alloca i1, i1 0
- %nop73 = alloca i1, i1 0
- %nop74 = alloca i1, i1 0
- %nop75 = alloca i1, i1 0
- %nop76 = alloca i1, i1 0
- %nop77 = alloca i1, i1 0
- %nop78 = alloca i1, i1 0
- %nop79 = alloca i1, i1 0
- %nop80 = alloca i1, i1 0
- %nop81 = alloca i1, i1 0
- %nop82 = alloca i1, i1 0
- %nop83 = alloca i1, i1 0
- %nop84 = alloca i1, i1 0
- %nop85 = alloca i1, i1 0
- %nop86 = alloca i1, i1 0
- %nop87 = alloca i1, i1 0
- %nop88 = alloca i1, i1 0
- %nop89 = alloca i1, i1 0
- %nop90 = alloca i1, i1 0
- %nop91 = alloca i1, i1 0
- %nop92 = alloca i1, i1 0
- %nop93 = alloca i1, i1 0
- %nop94 = alloca i1, i1 0
- %nop95 = alloca i1, i1 0
- %nop96 = alloca i1, i1 0
- %nop97 = alloca i1, i1 0
- %nop98 = alloca i1, i1 0
- %nop99 = alloca i1, i1 0
- %nop100 = alloca i1, i1 0
- %nop101 = alloca i1, i1 0
- %nop102 = alloca i1, i1 0
- %nop103 = alloca i1, i1 0
- %nop104 = alloca i1, i1 0
- %nop105 = alloca i1, i1 0
- %nop106 = alloca i1, i1 0
- %nop107 = alloca i1, i1 0
- %nop108 = alloca i1, i1 0
- %nop109 = alloca i1, i1 0
- %nop110 = alloca i1, i1 0
- %nop111 = alloca i1, i1 0
- %nop112 = alloca i1, i1 0
- %nop113 = alloca i1, i1 0
- %nop114 = alloca i1, i1 0
- %nop115 = alloca i1, i1 0
- %nop116 = alloca i1, i1 0
- %nop117 = alloca i1, i1 0
- %nop118 = alloca i1, i1 0
- %nop119 = alloca i1, i1 0
- %nop120 = alloca i1, i1 0
- %nop121 = alloca i1, i1 0
- %nop122 = alloca i1, i1 0
- %nop123 = alloca i1, i1 0
- %nop124 = alloca i1, i1 0
- %nop125 = alloca i1, i1 0
- %nop126 = alloca i1, i1 0
- %nop127 = alloca i1, i1 0
- %nop128 = alloca i1, i1 0
- %nop129 = alloca i1, i1 0
- %nop130 = alloca i1, i1 0
- %nop131 = alloca i1, i1 0
- %nop132 = alloca i1, i1 0
- %nop133 = alloca i1, i1 0
- %nop134 = alloca i1, i1 0
- %nop135 = alloca i1, i1 0
- %nop136 = alloca i1, i1 0
- %nop137 = alloca i1, i1 0
- %nop138 = alloca i1, i1 0
- %nop139 = alloca i1, i1 0
- %nop140 = alloca i1, i1 0
- %nop141 = alloca i1, i1 0
- %nop142 = alloca i1, i1 0
- %nop143 = alloca i1, i1 0
- %nop144 = alloca i1, i1 0
- %nop145 = alloca i1, i1 0
- %nop146 = alloca i1, i1 0
- %nop147 = alloca i1, i1 0
- %nop148 = alloca i1, i1 0
- %nop149 = alloca i1, i1 0
- %nop150 = alloca i1, i1 0
- %nop151 = alloca i1, i1 0
- %nop152 = alloca i1, i1 0
- %nop153 = alloca i1, i1 0
- %nop154 = alloca i1, i1 0
- %nop155 = alloca i1, i1 0
- %nop156 = alloca i1, i1 0
- %nop157 = alloca i1, i1 0
- %nop158 = alloca i1, i1 0
- %nop159 = alloca i1, i1 0
- %nop160 = alloca i1, i1 0
- %nop161 = alloca i1, i1 0
- %nop162 = alloca i1, i1 0
- %nop163 = alloca i1, i1 0
- %nop164 = alloca i1, i1 0
- %nop165 = alloca i1, i1 0
- %nop166 = alloca i1, i1 0
- %nop167 = alloca i1, i1 0
- %nop168 = alloca i1, i1 0
- %nop169 = alloca i1, i1 0
- %nop170 = alloca i1, i1 0
- %nop171 = alloca i1, i1 0
- %nop172 = alloca i1, i1 0
- %nop173 = alloca i1, i1 0
- %nop174 = alloca i1, i1 0
- %nop175 = alloca i1, i1 0
- %nop176 = alloca i1, i1 0
- %nop177 = alloca i1, i1 0
- %nop178 = alloca i1, i1 0
- %nop179 = alloca i1, i1 0
- %nop180 = alloca i1, i1 0
- %nop181 = alloca i1, i1 0
- %nop182 = alloca i1, i1 0
- %nop183 = alloca i1, i1 0
- %nop184 = alloca i1, i1 0
- %nop185 = alloca i1, i1 0
- %nop186 = alloca i1, i1 0
- %nop187 = alloca i1, i1 0
- %nop188 = alloca i1, i1 0
- %nop189 = alloca i1, i1 0
- %nop190 = alloca i1, i1 0
- %nop191 = alloca i1, i1 0
- %nop192 = alloca i1, i1 0
- %nop193 = alloca i1, i1 0
- %nop194 = alloca i1, i1 0
- %nop195 = alloca i1, i1 0
- %nop196 = alloca i1, i1 0
- %nop197 = alloca i1, i1 0
- %nop198 = alloca i1, i1 0
- %nop199 = alloca i1, i1 0
- %nop200 = alloca i1, i1 0
- %nop201 = alloca i1, i1 0
- %nop202 = alloca i1, i1 0
- %nop203 = alloca i1, i1 0
- %nop204 = alloca i1, i1 0
- %nop205 = alloca i1, i1 0
- %nop206 = alloca i1, i1 0
- %nop207 = alloca i1, i1 0
- %nop208 = alloca i1, i1 0
- %nop209 = alloca i1, i1 0
- %nop210 = alloca i1, i1 0
- %nop211 = alloca i1, i1 0
- %nop212 = alloca i1, i1 0
- %nop213 = alloca i1, i1 0
- %nop214 = alloca i1, i1 0
- %nop215 = alloca i1, i1 0
- %nop216 = alloca i1, i1 0
- %nop217 = alloca i1, i1 0
- %nop218 = alloca i1, i1 0
- %nop219 = alloca i1, i1 0
- %nop220 = alloca i1, i1 0
- %nop221 = alloca i1, i1 0
- %nop222 = alloca i1, i1 0
- %nop223 = alloca i1, i1 0
- %nop224 = alloca i1, i1 0
- %nop225 = alloca i1, i1 0
- %nop226 = alloca i1, i1 0
- %nop227 = alloca i1, i1 0
- %nop228 = alloca i1, i1 0
- %nop229 = alloca i1, i1 0
- %nop230 = alloca i1, i1 0
- %nop231 = alloca i1, i1 0
- %nop232 = alloca i1, i1 0
- %nop233 = alloca i1, i1 0
- %nop234 = alloca i1, i1 0
- %nop235 = alloca i1, i1 0
- %nop236 = alloca i1, i1 0
- %nop237 = alloca i1, i1 0
- %nop238 = alloca i1, i1 0
- %nop239 = alloca i1, i1 0
- %nop240 = alloca i1, i1 0
- %nop241 = alloca i1, i1 0
- %nop242 = alloca i1, i1 0
- %nop243 = alloca i1, i1 0
- %nop244 = alloca i1, i1 0
- %nop245 = alloca i1, i1 0
- %nop246 = alloca i1, i1 0
- %nop247 = alloca i1, i1 0
- %nop248 = alloca i1, i1 0
- %nop249 = alloca i1, i1 0
- %nop250 = alloca i1, i1 0
- %nop251 = alloca i1, i1 0
- %nop252 = alloca i1, i1 0
- %nop253 = alloca i1, i1 0
- %nop254 = alloca i1, i1 0
- %nop255 = alloca i1, i1 0
- %nop256 = alloca i1, i1 0
- %nop257 = alloca i1, i1 0
- %nop258 = alloca i1, i1 0
- %nop259 = alloca i1, i1 0
- %nop260 = alloca i1, i1 0
- %nop261 = alloca i1, i1 0
- %nop262 = alloca i1, i1 0
- %nop263 = alloca i1, i1 0
- %nop264 = alloca i1, i1 0
- %nop265 = alloca i1, i1 0
- %nop266 = alloca i1, i1 0
- %nop267 = alloca i1, i1 0
- %nop268 = alloca i1, i1 0
- %nop269 = alloca i1, i1 0
- %nop270 = alloca i1, i1 0
- %nop271 = alloca i1, i1 0
- %nop272 = alloca i1, i1 0
- %nop273 = alloca i1, i1 0
- %nop274 = alloca i1, i1 0
- %nop275 = alloca i1, i1 0
- %nop276 = alloca i1, i1 0
- %nop277 = alloca i1, i1 0
- %nop278 = alloca i1, i1 0
- %nop279 = alloca i1, i1 0
- %nop280 = alloca i1, i1 0
- %nop281 = alloca i1, i1 0
- %nop282 = alloca i1, i1 0
- %nop283 = alloca i1, i1 0
- %nop284 = alloca i1, i1 0
- %nop285 = alloca i1, i1 0
- %nop286 = alloca i1, i1 0
- %nop287 = alloca i1, i1 0
- %nop288 = alloca i1, i1 0
- %nop289 = alloca i1, i1 0
- %nop290 = alloca i1, i1 0
- %nop291 = alloca i1, i1 0
- %nop292 = alloca i1, i1 0
- %nop293 = alloca i1, i1 0
- %nop294 = alloca i1, i1 0
- %nop295 = alloca i1, i1 0
- %nop296 = alloca i1, i1 0
- %nop297 = alloca i1, i1 0
- %nop298 = alloca i1, i1 0
- %nop299 = alloca i1, i1 0
- %nop300 = alloca i1, i1 0
- %nop301 = alloca i1, i1 0
- %nop302 = alloca i1, i1 0
- %nop303 = alloca i1, i1 0
- %nop304 = alloca i1, i1 0
- %nop305 = alloca i1, i1 0
- %nop306 = alloca i1, i1 0
- %nop307 = alloca i1, i1 0
- %nop308 = alloca i1, i1 0
- %nop309 = alloca i1, i1 0
- %nop310 = alloca i1, i1 0
- %nop311 = alloca i1, i1 0
- %nop312 = alloca i1, i1 0
- %nop313 = alloca i1, i1 0
- %nop314 = alloca i1, i1 0
- %nop315 = alloca i1, i1 0
- %nop316 = alloca i1, i1 0
- %nop317 = alloca i1, i1 0
- %nop318 = alloca i1, i1 0
- %nop319 = alloca i1, i1 0
- %nop320 = alloca i1, i1 0
- %nop321 = alloca i1, i1 0
- %nop322 = alloca i1, i1 0
- %nop323 = alloca i1, i1 0
- %nop324 = alloca i1, i1 0
- %nop325 = alloca i1, i1 0
- %nop326 = alloca i1, i1 0
- %nop327 = alloca i1, i1 0
- %nop328 = alloca i1, i1 0
- %nop329 = alloca i1, i1 0
- %nop330 = alloca i1, i1 0
- %nop331 = alloca i1, i1 0
- %nop332 = alloca i1, i1 0
- %nop333 = alloca i1, i1 0
- %nop334 = alloca i1, i1 0
- %nop335 = alloca i1, i1 0
- %nop336 = alloca i1, i1 0
- %nop337 = alloca i1, i1 0
- %nop338 = alloca i1, i1 0
- %nop339 = alloca i1, i1 0
- %nop340 = alloca i1, i1 0
- %nop341 = alloca i1, i1 0
- %nop342 = alloca i1, i1 0
- %nop343 = alloca i1, i1 0
- %nop344 = alloca i1, i1 0
- %nop345 = alloca i1, i1 0
- %nop346 = alloca i1, i1 0
- %nop347 = alloca i1, i1 0
- %nop348 = alloca i1, i1 0
- %nop349 = alloca i1, i1 0
- %nop350 = alloca i1, i1 0
- %nop351 = alloca i1, i1 0
- %nop352 = alloca i1, i1 0
- %nop353 = alloca i1, i1 0
- %nop354 = alloca i1, i1 0
- %nop355 = alloca i1, i1 0
- %nop356 = alloca i1, i1 0
- %nop357 = alloca i1, i1 0
- %nop358 = alloca i1, i1 0
- %nop359 = alloca i1, i1 0
- %nop360 = alloca i1, i1 0
- %nop361 = alloca i1, i1 0
- %nop362 = alloca i1, i1 0
- %nop363 = alloca i1, i1 0
- %nop364 = alloca i1, i1 0
- %nop365 = alloca i1, i1 0
- %nop366 = alloca i1, i1 0
- %nop367 = alloca i1, i1 0
- %nop368 = alloca i1, i1 0
- %nop369 = alloca i1, i1 0
- %nop370 = alloca i1, i1 0
- %nop371 = alloca i1, i1 0
- %nop372 = alloca i1, i1 0
- %nop373 = alloca i1, i1 0
- %nop374 = alloca i1, i1 0
- %nop375 = alloca i1, i1 0
- %nop376 = alloca i1, i1 0
- %nop377 = alloca i1, i1 0
- %nop378 = alloca i1, i1 0
- %nop379 = alloca i1, i1 0
- %nop380 = alloca i1, i1 0
- %nop381 = alloca i1, i1 0
- %nop382 = alloca i1, i1 0
- %nop383 = alloca i1, i1 0
- %nop384 = alloca i1, i1 0
- %nop385 = alloca i1, i1 0
- %nop386 = alloca i1, i1 0
- %nop387 = alloca i1, i1 0
- %nop388 = alloca i1, i1 0
- %nop389 = alloca i1, i1 0
- %nop390 = alloca i1, i1 0
- %nop391 = alloca i1, i1 0
- %nop392 = alloca i1, i1 0
- %nop393 = alloca i1, i1 0
- %nop394 = alloca i1, i1 0
- %nop395 = alloca i1, i1 0
- %nop396 = alloca i1, i1 0
- %nop397 = alloca i1, i1 0
- %nop398 = alloca i1, i1 0
- %nop399 = alloca i1, i1 0
- %nop400 = alloca i1, i1 0
- %nop401 = alloca i1, i1 0
- %nop402 = alloca i1, i1 0
- %nop403 = alloca i1, i1 0
- %nop404 = alloca i1, i1 0
- %nop405 = alloca i1, i1 0
- %nop406 = alloca i1, i1 0
- %nop407 = alloca i1, i1 0
- %nop408 = alloca i1, i1 0
- %nop409 = alloca i1, i1 0
- %nop410 = alloca i1, i1 0
- %nop411 = alloca i1, i1 0
- %nop412 = alloca i1, i1 0
- %nop413 = alloca i1, i1 0
- %nop414 = alloca i1, i1 0
- %nop415 = alloca i1, i1 0
- %nop416 = alloca i1, i1 0
- %nop417 = alloca i1, i1 0
- %nop418 = alloca i1, i1 0
- %nop419 = alloca i1, i1 0
- %nop420 = alloca i1, i1 0
- %nop421 = alloca i1, i1 0
- %nop422 = alloca i1, i1 0
- %nop423 = alloca i1, i1 0
- %nop424 = alloca i1, i1 0
- %nop425 = alloca i1, i1 0
- %nop426 = alloca i1, i1 0
- %nop427 = alloca i1, i1 0
- %nop428 = alloca i1, i1 0
- %nop429 = alloca i1, i1 0
- %nop430 = alloca i1, i1 0
- %nop431 = alloca i1, i1 0
- %nop432 = alloca i1, i1 0
- %nop433 = alloca i1, i1 0
- %nop434 = alloca i1, i1 0
- %nop435 = alloca i1, i1 0
- %nop436 = alloca i1, i1 0
- %nop437 = alloca i1, i1 0
- %nop438 = alloca i1, i1 0
- %nop439 = alloca i1, i1 0
- %nop440 = alloca i1, i1 0
- %nop441 = alloca i1, i1 0
- %nop442 = alloca i1, i1 0
- %nop443 = alloca i1, i1 0
- %nop444 = alloca i1, i1 0
- %nop445 = alloca i1, i1 0
- %nop446 = alloca i1, i1 0
- %nop447 = alloca i1, i1 0
- %nop448 = alloca i1, i1 0
- %nop449 = alloca i1, i1 0
- %nop450 = alloca i1, i1 0
- %nop451 = alloca i1, i1 0
- %nop452 = alloca i1, i1 0
- %nop453 = alloca i1, i1 0
- %nop454 = alloca i1, i1 0
- %nop455 = alloca i1, i1 0
- %nop456 = alloca i1, i1 0
- %nop457 = alloca i1, i1 0
- %nop458 = alloca i1, i1 0
- %nop459 = alloca i1, i1 0
- %nop460 = alloca i1, i1 0
- %nop461 = alloca i1, i1 0
- %nop462 = alloca i1, i1 0
- %nop463 = alloca i1, i1 0
- %nop464 = alloca i1, i1 0
- %nop465 = alloca i1, i1 0
- %nop466 = alloca i1, i1 0
- %nop467 = alloca i1, i1 0
- %nop468 = alloca i1, i1 0
- %nop469 = alloca i1, i1 0
- %nop470 = alloca i1, i1 0
- %nop471 = alloca i1, i1 0
- %nop472 = alloca i1, i1 0
- %nop473 = alloca i1, i1 0
- %nop474 = alloca i1, i1 0
- %nop475 = alloca i1, i1 0
- %nop476 = alloca i1, i1 0
- %nop477 = alloca i1, i1 0
- %nop478 = alloca i1, i1 0
- %nop479 = alloca i1, i1 0
- %nop480 = alloca i1, i1 0
- %nop481 = alloca i1, i1 0
- %nop482 = alloca i1, i1 0
- %nop483 = alloca i1, i1 0
- %nop484 = alloca i1, i1 0
- %nop485 = alloca i1, i1 0
- %nop486 = alloca i1, i1 0
- %nop487 = alloca i1, i1 0
- %nop488 = alloca i1, i1 0
- %nop489 = alloca i1, i1 0
- %nop490 = alloca i1, i1 0
- %nop491 = alloca i1, i1 0
- %nop492 = alloca i1, i1 0
- %nop493 = alloca i1, i1 0
- %nop494 = alloca i1, i1 0
- %nop495 = alloca i1, i1 0
- %nop496 = alloca i1, i1 0
- %nop497 = alloca i1, i1 0
- %nop498 = alloca i1, i1 0
- %nop499 = alloca i1, i1 0
- %nop500 = alloca i1, i1 0
- %nop501 = alloca i1, i1 0
- %nop502 = alloca i1, i1 0
- %nop503 = alloca i1, i1 0
- %nop504 = alloca i1, i1 0
- %nop505 = alloca i1, i1 0
- %nop506 = alloca i1, i1 0
- %nop507 = alloca i1, i1 0
- %nop508 = alloca i1, i1 0
- %nop509 = alloca i1, i1 0
- %nop510 = alloca i1, i1 0
- %nop511 = alloca i1, i1 0
- %nop512 = alloca i1, i1 0
- %nop513 = alloca i1, i1 0
- %nop514 = alloca i1, i1 0
- %nop515 = alloca i1, i1 0
- %nop516 = alloca i1, i1 0
- %nop517 = alloca i1, i1 0
- %nop518 = alloca i1, i1 0
- %nop519 = alloca i1, i1 0
- %nop520 = alloca i1, i1 0
- %nop521 = alloca i1, i1 0
- %nop522 = alloca i1, i1 0
- %nop523 = alloca i1, i1 0
- %nop524 = alloca i1, i1 0
- %nop525 = alloca i1, i1 0
- %nop526 = alloca i1, i1 0
- %nop527 = alloca i1, i1 0
- %nop528 = alloca i1, i1 0
- %nop529 = alloca i1, i1 0
- %nop530 = alloca i1, i1 0
- %nop531 = alloca i1, i1 0
- %nop532 = alloca i1, i1 0
- %nop533 = alloca i1, i1 0
- %nop534 = alloca i1, i1 0
- %nop535 = alloca i1, i1 0
- %nop536 = alloca i1, i1 0
- %nop537 = alloca i1, i1 0
- %nop538 = alloca i1, i1 0
- %nop539 = alloca i1, i1 0
- %nop540 = alloca i1, i1 0
- %nop541 = alloca i1, i1 0
- %nop542 = alloca i1, i1 0
- %nop543 = alloca i1, i1 0
- %nop544 = alloca i1, i1 0
- %nop545 = alloca i1, i1 0
- %nop546 = alloca i1, i1 0
- %nop547 = alloca i1, i1 0
- %nop548 = alloca i1, i1 0
- %nop549 = alloca i1, i1 0
- %nop550 = alloca i1, i1 0
- %nop551 = alloca i1, i1 0
- %nop552 = alloca i1, i1 0
- %nop553 = alloca i1, i1 0
- %nop554 = alloca i1, i1 0
- %nop555 = alloca i1, i1 0
- %nop556 = alloca i1, i1 0
- %nop557 = alloca i1, i1 0
- %nop558 = alloca i1, i1 0
- %nop559 = alloca i1, i1 0
- %nop560 = alloca i1, i1 0
- %nop561 = alloca i1, i1 0
- %nop562 = alloca i1, i1 0
- %nop563 = alloca i1, i1 0
- %nop564 = alloca i1, i1 0
- %nop565 = alloca i1, i1 0
- %nop566 = alloca i1, i1 0
- %nop567 = alloca i1, i1 0
- %nop568 = alloca i1, i1 0
- %nop569 = alloca i1, i1 0
- %nop570 = alloca i1, i1 0
- %nop571 = alloca i1, i1 0
- %nop572 = alloca i1, i1 0
- %nop573 = alloca i1, i1 0
- %nop574 = alloca i1, i1 0
- %nop575 = alloca i1, i1 0
- %nop576 = alloca i1, i1 0
- %nop577 = alloca i1, i1 0
- %nop578 = alloca i1, i1 0
- %nop579 = alloca i1, i1 0
- %nop580 = alloca i1, i1 0
- %nop581 = alloca i1, i1 0
- %nop582 = alloca i1, i1 0
- %nop583 = alloca i1, i1 0
- %nop584 = alloca i1, i1 0
- %nop585 = alloca i1, i1 0
- %nop586 = alloca i1, i1 0
- %nop587 = alloca i1, i1 0
- %nop588 = alloca i1, i1 0
- %nop589 = alloca i1, i1 0
- %nop590 = alloca i1, i1 0
- %nop591 = alloca i1, i1 0
- %nop592 = alloca i1, i1 0
- %nop593 = alloca i1, i1 0
- %nop594 = alloca i1, i1 0
- %nop595 = alloca i1, i1 0
- %nop596 = alloca i1, i1 0
- %nop597 = alloca i1, i1 0
- %nop598 = alloca i1, i1 0
- %nop599 = alloca i1, i1 0
- %nop600 = alloca i1, i1 0
- %nop601 = alloca i1, i1 0
- %nop602 = alloca i1, i1 0
- %nop603 = alloca i1, i1 0
- %nop604 = alloca i1, i1 0
- %nop605 = alloca i1, i1 0
- %nop606 = alloca i1, i1 0
- %nop607 = alloca i1, i1 0
- %nop608 = alloca i1, i1 0
- %nop609 = alloca i1, i1 0
- %nop610 = alloca i1, i1 0
- %nop611 = alloca i1, i1 0
- %nop612 = alloca i1, i1 0
- %nop613 = alloca i1, i1 0
- %nop614 = alloca i1, i1 0
- %nop615 = alloca i1, i1 0
- %nop616 = alloca i1, i1 0
- %nop617 = alloca i1, i1 0
- %nop618 = alloca i1, i1 0
- %nop619 = alloca i1, i1 0
- %nop620 = alloca i1, i1 0
- %nop621 = alloca i1, i1 0
- %nop622 = alloca i1, i1 0
- %nop623 = alloca i1, i1 0
- %nop624 = alloca i1, i1 0
- %nop625 = alloca i1, i1 0
- %nop626 = alloca i1, i1 0
- %nop627 = alloca i1, i1 0
- %nop628 = alloca i1, i1 0
- %nop629 = alloca i1, i1 0
- %nop630 = alloca i1, i1 0
- %nop631 = alloca i1, i1 0
- %nop632 = alloca i1, i1 0
- %nop633 = alloca i1, i1 0
- %nop634 = alloca i1, i1 0
- %nop635 = alloca i1, i1 0
- %nop636 = alloca i1, i1 0
- %nop637 = alloca i1, i1 0
- %nop638 = alloca i1, i1 0
- %nop639 = alloca i1, i1 0
- %nop640 = alloca i1, i1 0
- %nop641 = alloca i1, i1 0
- %nop642 = alloca i1, i1 0
- %nop643 = alloca i1, i1 0
- %nop644 = alloca i1, i1 0
- %nop645 = alloca i1, i1 0
- %nop646 = alloca i1, i1 0
- %nop647 = alloca i1, i1 0
- %nop648 = alloca i1, i1 0
- %nop649 = alloca i1, i1 0
- %nop650 = alloca i1, i1 0
- %nop651 = alloca i1, i1 0
- %nop652 = alloca i1, i1 0
- %nop653 = alloca i1, i1 0
- %nop654 = alloca i1, i1 0
- %nop655 = alloca i1, i1 0
- %nop656 = alloca i1, i1 0
- %nop657 = alloca i1, i1 0
- %nop658 = alloca i1, i1 0
- %nop659 = alloca i1, i1 0
- %nop660 = alloca i1, i1 0
- %nop661 = alloca i1, i1 0
- %nop662 = alloca i1, i1 0
- %nop663 = alloca i1, i1 0
- %nop664 = alloca i1, i1 0
- %nop665 = alloca i1, i1 0
- %nop666 = alloca i1, i1 0
- %nop667 = alloca i1, i1 0
- %nop668 = alloca i1, i1 0
- %nop669 = alloca i1, i1 0
- %nop670 = alloca i1, i1 0
- %nop671 = alloca i1, i1 0
- %nop672 = alloca i1, i1 0
- %nop673 = alloca i1, i1 0
- %nop674 = alloca i1, i1 0
- %nop675 = alloca i1, i1 0
- %nop676 = alloca i1, i1 0
- %nop677 = alloca i1, i1 0
- %nop678 = alloca i1, i1 0
- %nop679 = alloca i1, i1 0
- %nop680 = alloca i1, i1 0
- %nop681 = alloca i1, i1 0
- %nop682 = alloca i1, i1 0
- %nop683 = alloca i1, i1 0
- %nop684 = alloca i1, i1 0
- %nop685 = alloca i1, i1 0
- %nop686 = alloca i1, i1 0
- %nop687 = alloca i1, i1 0
- %nop688 = alloca i1, i1 0
- %nop689 = alloca i1, i1 0
- %nop690 = alloca i1, i1 0
- %nop691 = alloca i1, i1 0
- %nop692 = alloca i1, i1 0
- %nop693 = alloca i1, i1 0
- %nop694 = alloca i1, i1 0
- %nop695 = alloca i1, i1 0
- %nop696 = alloca i1, i1 0
- %nop697 = alloca i1, i1 0
- %nop698 = alloca i1, i1 0
- %nop699 = alloca i1, i1 0
- %nop700 = alloca i1, i1 0
- %nop701 = alloca i1, i1 0
- %nop702 = alloca i1, i1 0
- %nop703 = alloca i1, i1 0
- %nop704 = alloca i1, i1 0
- %nop705 = alloca i1, i1 0
- %nop706 = alloca i1, i1 0
- %nop707 = alloca i1, i1 0
- %nop708 = alloca i1, i1 0
- %nop709 = alloca i1, i1 0
- %nop710 = alloca i1, i1 0
- %nop711 = alloca i1, i1 0
- %nop712 = alloca i1, i1 0
- %nop713 = alloca i1, i1 0
- %nop714 = alloca i1, i1 0
- %nop715 = alloca i1, i1 0
- %nop716 = alloca i1, i1 0
- %nop717 = alloca i1, i1 0
- %nop718 = alloca i1, i1 0
- %nop719 = alloca i1, i1 0
- %nop720 = alloca i1, i1 0
- %nop721 = alloca i1, i1 0
- %nop722 = alloca i1, i1 0
- %nop723 = alloca i1, i1 0
- %nop724 = alloca i1, i1 0
- %nop725 = alloca i1, i1 0
- %nop726 = alloca i1, i1 0
- %nop727 = alloca i1, i1 0
- %nop728 = alloca i1, i1 0
- %nop729 = alloca i1, i1 0
- %nop730 = alloca i1, i1 0
- %nop731 = alloca i1, i1 0
- %nop732 = alloca i1, i1 0
- %nop733 = alloca i1, i1 0
- %nop734 = alloca i1, i1 0
- %nop735 = alloca i1, i1 0
- %nop736 = alloca i1, i1 0
- %nop737 = alloca i1, i1 0
- %nop738 = alloca i1, i1 0
- %nop739 = alloca i1, i1 0
- %nop740 = alloca i1, i1 0
- %nop741 = alloca i1, i1 0
- %nop742 = alloca i1, i1 0
- %nop743 = alloca i1, i1 0
- %nop744 = alloca i1, i1 0
- %nop745 = alloca i1, i1 0
- %nop746 = alloca i1, i1 0
- %nop747 = alloca i1, i1 0
- %nop748 = alloca i1, i1 0
- %nop749 = alloca i1, i1 0
- %nop750 = alloca i1, i1 0
- %nop751 = alloca i1, i1 0
- %nop752 = alloca i1, i1 0
- %nop753 = alloca i1, i1 0
- %nop754 = alloca i1, i1 0
- %nop755 = alloca i1, i1 0
- %nop756 = alloca i1, i1 0
- %nop757 = alloca i1, i1 0
- %nop758 = alloca i1, i1 0
- %nop759 = alloca i1, i1 0
- %nop760 = alloca i1, i1 0
- %nop761 = alloca i1, i1 0
- %nop762 = alloca i1, i1 0
- %nop763 = alloca i1, i1 0
- %nop764 = alloca i1, i1 0
- %nop765 = alloca i1, i1 0
- %nop766 = alloca i1, i1 0
- %nop767 = alloca i1, i1 0
- %nop768 = alloca i1, i1 0
- %nop769 = alloca i1, i1 0
- %nop770 = alloca i1, i1 0
- %nop771 = alloca i1, i1 0
- %nop772 = alloca i1, i1 0
- %nop773 = alloca i1, i1 0
- %nop774 = alloca i1, i1 0
- %nop775 = alloca i1, i1 0
- %nop776 = alloca i1, i1 0
- %nop777 = alloca i1, i1 0
- %nop778 = alloca i1, i1 0
- %nop779 = alloca i1, i1 0
- %nop780 = alloca i1, i1 0
- %nop781 = alloca i1, i1 0
- %nop782 = alloca i1, i1 0
- %nop783 = alloca i1, i1 0
- %nop784 = alloca i1, i1 0
- %nop785 = alloca i1, i1 0
- %nop786 = alloca i1, i1 0
- %nop787 = alloca i1, i1 0
- %nop788 = alloca i1, i1 0
- %nop789 = alloca i1, i1 0
- %nop790 = alloca i1, i1 0
- %nop791 = alloca i1, i1 0
- %nop792 = alloca i1, i1 0
- %nop793 = alloca i1, i1 0
- %nop794 = alloca i1, i1 0
- %nop795 = alloca i1, i1 0
- %nop796 = alloca i1, i1 0
- %nop797 = alloca i1, i1 0
- %nop798 = alloca i1, i1 0
- %nop799 = alloca i1, i1 0
- %nop800 = alloca i1, i1 0
- %nop801 = alloca i1, i1 0
- %nop802 = alloca i1, i1 0
- %nop803 = alloca i1, i1 0
- %nop804 = alloca i1, i1 0
- %nop805 = alloca i1, i1 0
- %nop806 = alloca i1, i1 0
- %nop807 = alloca i1, i1 0
- %nop808 = alloca i1, i1 0
- %nop809 = alloca i1, i1 0
- %nop810 = alloca i1, i1 0
- %nop811 = alloca i1, i1 0
- %nop812 = alloca i1, i1 0
- %nop813 = alloca i1, i1 0
- %nop814 = alloca i1, i1 0
- %nop815 = alloca i1, i1 0
- %nop816 = alloca i1, i1 0
- %nop817 = alloca i1, i1 0
- %nop818 = alloca i1, i1 0
- %nop819 = alloca i1, i1 0
- %nop820 = alloca i1, i1 0
- %nop821 = alloca i1, i1 0
- %nop822 = alloca i1, i1 0
- %nop823 = alloca i1, i1 0
- %nop824 = alloca i1, i1 0
- %nop825 = alloca i1, i1 0
- %nop826 = alloca i1, i1 0
- %nop827 = alloca i1, i1 0
- %nop828 = alloca i1, i1 0
- %nop829 = alloca i1, i1 0
- %nop830 = alloca i1, i1 0
- %nop831 = alloca i1, i1 0
- %nop832 = alloca i1, i1 0
- %nop833 = alloca i1, i1 0
- %nop834 = alloca i1, i1 0
- %nop835 = alloca i1, i1 0
- %nop836 = alloca i1, i1 0
- %nop837 = alloca i1, i1 0
- %nop838 = alloca i1, i1 0
- %nop839 = alloca i1, i1 0
- %nop840 = alloca i1, i1 0
- %nop841 = alloca i1, i1 0
- %nop842 = alloca i1, i1 0
- %nop843 = alloca i1, i1 0
- %nop844 = alloca i1, i1 0
- %nop845 = alloca i1, i1 0
- %nop846 = alloca i1, i1 0
- %nop847 = alloca i1, i1 0
- %nop848 = alloca i1, i1 0
- %nop849 = alloca i1, i1 0
- %nop850 = alloca i1, i1 0
- %nop851 = alloca i1, i1 0
- %nop852 = alloca i1, i1 0
- %nop853 = alloca i1, i1 0
- %nop854 = alloca i1, i1 0
- %nop855 = alloca i1, i1 0
- %nop856 = alloca i1, i1 0
- %nop857 = alloca i1, i1 0
- %nop858 = alloca i1, i1 0
- %nop859 = alloca i1, i1 0
- %nop860 = alloca i1, i1 0
- %nop861 = alloca i1, i1 0
- %nop862 = alloca i1, i1 0
- %nop863 = alloca i1, i1 0
- %nop864 = alloca i1, i1 0
- %nop865 = alloca i1, i1 0
- %nop866 = alloca i1, i1 0
- %nop867 = alloca i1, i1 0
- %nop868 = alloca i1, i1 0
- %nop869 = alloca i1, i1 0
- %nop870 = alloca i1, i1 0
- %nop871 = alloca i1, i1 0
- %nop872 = alloca i1, i1 0
- %nop873 = alloca i1, i1 0
- %nop874 = alloca i1, i1 0
- %nop875 = alloca i1, i1 0
- %nop876 = alloca i1, i1 0
- %nop877 = alloca i1, i1 0
- %nop878 = alloca i1, i1 0
- %nop879 = alloca i1, i1 0
- %nop880 = alloca i1, i1 0
- %nop881 = alloca i1, i1 0
- %nop882 = alloca i1, i1 0
- %nop883 = alloca i1, i1 0
- %nop884 = alloca i1, i1 0
- %nop885 = alloca i1, i1 0
- %nop886 = alloca i1, i1 0
- %nop887 = alloca i1, i1 0
- %nop888 = alloca i1, i1 0
- %nop889 = alloca i1, i1 0
- %nop890 = alloca i1, i1 0
- %nop891 = alloca i1, i1 0
- %nop892 = alloca i1, i1 0
- %nop893 = alloca i1, i1 0
- %nop894 = alloca i1, i1 0
- %nop895 = alloca i1, i1 0
- %nop896 = alloca i1, i1 0
- %nop897 = alloca i1, i1 0
- %nop898 = alloca i1, i1 0
- %nop899 = alloca i1, i1 0
- %nop900 = alloca i1, i1 0
- %nop901 = alloca i1, i1 0
- %nop902 = alloca i1, i1 0
- %nop903 = alloca i1, i1 0
- %nop904 = alloca i1, i1 0
- %nop905 = alloca i1, i1 0
- %nop906 = alloca i1, i1 0
- %nop907 = alloca i1, i1 0
- %nop908 = alloca i1, i1 0
- %nop909 = alloca i1, i1 0
- %nop910 = alloca i1, i1 0
- %nop911 = alloca i1, i1 0
- %nop912 = alloca i1, i1 0
- %nop913 = alloca i1, i1 0
- %nop914 = alloca i1, i1 0
- %nop915 = alloca i1, i1 0
- %nop916 = alloca i1, i1 0
- %nop917 = alloca i1, i1 0
- %nop918 = alloca i1, i1 0
- %nop919 = alloca i1, i1 0
- %nop920 = alloca i1, i1 0
- %nop921 = alloca i1, i1 0
- %nop922 = alloca i1, i1 0
- %nop923 = alloca i1, i1 0
- %nop924 = alloca i1, i1 0
- %nop925 = alloca i1, i1 0
- %nop926 = alloca i1, i1 0
- %nop927 = alloca i1, i1 0
- %nop928 = alloca i1, i1 0
- %nop929 = alloca i1, i1 0
- %nop930 = alloca i1, i1 0
- %nop931 = alloca i1, i1 0
- %nop932 = alloca i1, i1 0
- %nop933 = alloca i1, i1 0
- %nop934 = alloca i1, i1 0
- %nop935 = alloca i1, i1 0
- %nop936 = alloca i1, i1 0
- %nop937 = alloca i1, i1 0
- %nop938 = alloca i1, i1 0
- %nop939 = alloca i1, i1 0
- %nop940 = alloca i1, i1 0
- %nop941 = alloca i1, i1 0
- %nop942 = alloca i1, i1 0
- %nop943 = alloca i1, i1 0
- %nop944 = alloca i1, i1 0
- %nop945 = alloca i1, i1 0
- %nop946 = alloca i1, i1 0
- %nop947 = alloca i1, i1 0
- %nop948 = alloca i1, i1 0
- %nop949 = alloca i1, i1 0
- %nop950 = alloca i1, i1 0
- %nop951 = alloca i1, i1 0
- %nop952 = alloca i1, i1 0
- %nop953 = alloca i1, i1 0
- %nop954 = alloca i1, i1 0
- %nop955 = alloca i1, i1 0
- %nop956 = alloca i1, i1 0
- %nop957 = alloca i1, i1 0
- %nop958 = alloca i1, i1 0
- %nop959 = alloca i1, i1 0
- %nop960 = alloca i1, i1 0
- %nop961 = alloca i1, i1 0
- %nop962 = alloca i1, i1 0
- %nop963 = alloca i1, i1 0
- %nop964 = alloca i1, i1 0
- %nop965 = alloca i1, i1 0
- %nop966 = alloca i1, i1 0
- %nop967 = alloca i1, i1 0
- %nop968 = alloca i1, i1 0
- %nop969 = alloca i1, i1 0
- %nop970 = alloca i1, i1 0
- %nop971 = alloca i1, i1 0
- %nop972 = alloca i1, i1 0
- %nop973 = alloca i1, i1 0
- %nop974 = alloca i1, i1 0
- %nop975 = alloca i1, i1 0
- %nop976 = alloca i1, i1 0
- %nop977 = alloca i1, i1 0
- %nop978 = alloca i1, i1 0
- %nop979 = alloca i1, i1 0
- %nop980 = alloca i1, i1 0
- %nop981 = alloca i1, i1 0
- %nop982 = alloca i1, i1 0
- %nop983 = alloca i1, i1 0
- %nop984 = alloca i1, i1 0
- %nop985 = alloca i1, i1 0
- %nop986 = alloca i1, i1 0
- %nop987 = alloca i1, i1 0
- %nop988 = alloca i1, i1 0
- %nop989 = alloca i1, i1 0
- %nop990 = alloca i1, i1 0
- %nop991 = alloca i1, i1 0
- %nop992 = alloca i1, i1 0
- %nop993 = alloca i1, i1 0
- %nop994 = alloca i1, i1 0
- %nop995 = alloca i1, i1 0
- %nop996 = alloca i1, i1 0
- %nop997 = alloca i1, i1 0
- %nop998 = alloca i1, i1 0
- %nop999 = alloca i1, i1 0
- %nop1000 = alloca i1, i1 0
- %nop1001 = alloca i1, i1 0
- %nop1002 = alloca i1, i1 0
- %nop1003 = alloca i1, i1 0
- %nop1004 = alloca i1, i1 0
- %nop1005 = alloca i1, i1 0
- %nop1006 = alloca i1, i1 0
- %nop1007 = alloca i1, i1 0
- %nop1008 = alloca i1, i1 0
- %nop1009 = alloca i1, i1 0
- %nop1010 = alloca i1, i1 0
- %nop1011 = alloca i1, i1 0
- %nop1012 = alloca i1, i1 0
- %nop1013 = alloca i1, i1 0
- %nop1014 = alloca i1, i1 0
- %nop1015 = alloca i1, i1 0
- %nop1016 = alloca i1, i1 0
- %nop1017 = alloca i1, i1 0
- %nop1018 = alloca i1, i1 0
- %nop1019 = alloca i1, i1 0
- %nop1020 = alloca i1, i1 0
- %nop1021 = alloca i1, i1 0
- %nop1022 = alloca i1, i1 0
- %nop1023 = alloca i1, i1 0
- %nop1024 = alloca i1, i1 0
- %nop1025 = alloca i1, i1 0
- %nop1026 = alloca i1, i1 0
- %nop1027 = alloca i1, i1 0
- %nop1028 = alloca i1, i1 0
- %nop1029 = alloca i1, i1 0
- %nop1030 = alloca i1, i1 0
- %nop1031 = alloca i1, i1 0
- %nop1032 = alloca i1, i1 0
- %nop1033 = alloca i1, i1 0
- %nop1034 = alloca i1, i1 0
- %nop1035 = alloca i1, i1 0
- %nop1036 = alloca i1, i1 0
- %nop1037 = alloca i1, i1 0
- %nop1038 = alloca i1, i1 0
- %nop1039 = alloca i1, i1 0
- %nop1040 = alloca i1, i1 0
- %nop1041 = alloca i1, i1 0
- %nop1042 = alloca i1, i1 0
- %nop1043 = alloca i1, i1 0
- %nop1044 = alloca i1, i1 0
- %nop1045 = alloca i1, i1 0
- %nop1046 = alloca i1, i1 0
- %nop1047 = alloca i1, i1 0
- %nop1048 = alloca i1, i1 0
- %nop1049 = alloca i1, i1 0
- %nop1050 = alloca i1, i1 0
- %nop1051 = alloca i1, i1 0
- %nop1052 = alloca i1, i1 0
- %nop1053 = alloca i1, i1 0
- %nop1054 = alloca i1, i1 0
- %nop1055 = alloca i1, i1 0
- %nop1056 = alloca i1, i1 0
- %nop1057 = alloca i1, i1 0
- %nop1058 = alloca i1, i1 0
- %nop1059 = alloca i1, i1 0
- %nop1060 = alloca i1, i1 0
- %nop1061 = alloca i1, i1 0
- %nop1062 = alloca i1, i1 0
- %nop1063 = alloca i1, i1 0
- %nop1064 = alloca i1, i1 0
- %nop1065 = alloca i1, i1 0
- %nop1066 = alloca i1, i1 0
- %nop1067 = alloca i1, i1 0
- %nop1068 = alloca i1, i1 0
- %nop1069 = alloca i1, i1 0
- %nop1070 = alloca i1, i1 0
- %nop1071 = alloca i1, i1 0
- %nop1072 = alloca i1, i1 0
- %nop1073 = alloca i1, i1 0
- %nop1074 = alloca i1, i1 0
- %nop1075 = alloca i1, i1 0
- %nop1076 = alloca i1, i1 0
- %nop1077 = alloca i1, i1 0
- %nop1078 = alloca i1, i1 0
- %nop1079 = alloca i1, i1 0
- %nop1080 = alloca i1, i1 0
- %nop1081 = alloca i1, i1 0
- %nop1082 = alloca i1, i1 0
- %nop1083 = alloca i1, i1 0
- %nop1084 = alloca i1, i1 0
- %nop1085 = alloca i1, i1 0
- %nop1086 = alloca i1, i1 0
- %nop1087 = alloca i1, i1 0
- %nop1088 = alloca i1, i1 0
- %nop1089 = alloca i1, i1 0
- %nop1090 = alloca i1, i1 0
- %nop1091 = alloca i1, i1 0
- %nop1092 = alloca i1, i1 0
- %nop1093 = alloca i1, i1 0
- %nop1094 = alloca i1, i1 0
- %nop1095 = alloca i1, i1 0
- %nop1096 = alloca i1, i1 0
- %nop1097 = alloca i1, i1 0
- %nop1098 = alloca i1, i1 0
- %nop1099 = alloca i1, i1 0
- %nop1100 = alloca i1, i1 0
- %nop1101 = alloca i1, i1 0
- %nop1102 = alloca i1, i1 0
- %nop1103 = alloca i1, i1 0
- %nop1104 = alloca i1, i1 0
- %nop1105 = alloca i1, i1 0
- %nop1106 = alloca i1, i1 0
- %nop1107 = alloca i1, i1 0
- %nop1108 = alloca i1, i1 0
- %nop1109 = alloca i1, i1 0
- %nop1110 = alloca i1, i1 0
- %nop1111 = alloca i1, i1 0
- %nop1112 = alloca i1, i1 0
- %nop1113 = alloca i1, i1 0
- %nop1114 = alloca i1, i1 0
- %nop1115 = alloca i1, i1 0
- %nop1116 = alloca i1, i1 0
- %nop1117 = alloca i1, i1 0
- %nop1118 = alloca i1, i1 0
- %nop1119 = alloca i1, i1 0
- %nop1120 = alloca i1, i1 0
- %nop1121 = alloca i1, i1 0
- %nop1122 = alloca i1, i1 0
- %nop1123 = alloca i1, i1 0
- %nop1124 = alloca i1, i1 0
- %nop1125 = alloca i1, i1 0
- %nop1126 = alloca i1, i1 0
- %nop1127 = alloca i1, i1 0
- %nop1128 = alloca i1, i1 0
- %nop1129 = alloca i1, i1 0
- %nop1130 = alloca i1, i1 0
- %nop1131 = alloca i1, i1 0
- %nop1132 = alloca i1, i1 0
- %nop1133 = alloca i1, i1 0
- %nop1134 = alloca i1, i1 0
- %nop1135 = alloca i1, i1 0
- %nop1136 = alloca i1, i1 0
- %nop1137 = alloca i1, i1 0
- %nop1138 = alloca i1, i1 0
- %nop1139 = alloca i1, i1 0
- %nop1140 = alloca i1, i1 0
- %nop1141 = alloca i1, i1 0
- %nop1142 = alloca i1, i1 0
- %nop1143 = alloca i1, i1 0
- %nop1144 = alloca i1, i1 0
- %nop1145 = alloca i1, i1 0
- %nop1146 = alloca i1, i1 0
- %nop1147 = alloca i1, i1 0
- %nop1148 = alloca i1, i1 0
- %nop1149 = alloca i1, i1 0
- %nop1150 = alloca i1, i1 0
- %nop1151 = alloca i1, i1 0
- %nop1152 = alloca i1, i1 0
- %nop1153 = alloca i1, i1 0
- %nop1154 = alloca i1, i1 0
- %nop1155 = alloca i1, i1 0
- %nop1156 = alloca i1, i1 0
- %nop1157 = alloca i1, i1 0
- %nop1158 = alloca i1, i1 0
- %nop1159 = alloca i1, i1 0
- %nop1160 = alloca i1, i1 0
- %nop1161 = alloca i1, i1 0
- %nop1162 = alloca i1, i1 0
- %nop1163 = alloca i1, i1 0
- %nop1164 = alloca i1, i1 0
- %nop1165 = alloca i1, i1 0
- %nop1166 = alloca i1, i1 0
- %nop1167 = alloca i1, i1 0
- %nop1168 = alloca i1, i1 0
- %nop1169 = alloca i1, i1 0
- %nop1170 = alloca i1, i1 0
- %nop1171 = alloca i1, i1 0
- %nop1172 = alloca i1, i1 0
- %nop1173 = alloca i1, i1 0
- %nop1174 = alloca i1, i1 0
- %nop1175 = alloca i1, i1 0
- %nop1176 = alloca i1, i1 0
- %nop1177 = alloca i1, i1 0
- %nop1178 = alloca i1, i1 0
- %nop1179 = alloca i1, i1 0
- %nop1180 = alloca i1, i1 0
- %nop1181 = alloca i1, i1 0
- %nop1182 = alloca i1, i1 0
- %nop1183 = alloca i1, i1 0
- %nop1184 = alloca i1, i1 0
- %nop1185 = alloca i1, i1 0
- %nop1186 = alloca i1, i1 0
- %nop1187 = alloca i1, i1 0
- %nop1188 = alloca i1, i1 0
- %nop1189 = alloca i1, i1 0
- %nop1190 = alloca i1, i1 0
- %nop1191 = alloca i1, i1 0
- %nop1192 = alloca i1, i1 0
- %nop1193 = alloca i1, i1 0
- %nop1194 = alloca i1, i1 0
- %nop1195 = alloca i1, i1 0
- %nop1196 = alloca i1, i1 0
- %nop1197 = alloca i1, i1 0
- %nop1198 = alloca i1, i1 0
- %nop1199 = alloca i1, i1 0
- %nop1200 = alloca i1, i1 0
- %nop1201 = alloca i1, i1 0
- %nop1202 = alloca i1, i1 0
- %nop1203 = alloca i1, i1 0
- %nop1204 = alloca i1, i1 0
- %nop1205 = alloca i1, i1 0
- %nop1206 = alloca i1, i1 0
- %nop1207 = alloca i1, i1 0
- %nop1208 = alloca i1, i1 0
- %nop1209 = alloca i1, i1 0
- %nop1210 = alloca i1, i1 0
- %nop1211 = alloca i1, i1 0
- %nop1212 = alloca i1, i1 0
- %nop1213 = alloca i1, i1 0
- %nop1214 = alloca i1, i1 0
- %nop1215 = alloca i1, i1 0
- %nop1216 = alloca i1, i1 0
- %nop1217 = alloca i1, i1 0
- %nop1218 = alloca i1, i1 0
- %nop1219 = alloca i1, i1 0
- %nop1220 = alloca i1, i1 0
- %nop1221 = alloca i1, i1 0
- %nop1222 = alloca i1, i1 0
- %nop1223 = alloca i1, i1 0
- %nop1224 = alloca i1, i1 0
- %nop1225 = alloca i1, i1 0
- %nop1226 = alloca i1, i1 0
- %nop1227 = alloca i1, i1 0
- %nop1228 = alloca i1, i1 0
- %nop1229 = alloca i1, i1 0
- %nop1230 = alloca i1, i1 0
- %nop1231 = alloca i1, i1 0
- %nop1232 = alloca i1, i1 0
- %nop1233 = alloca i1, i1 0
- %nop1234 = alloca i1, i1 0
- %nop1235 = alloca i1, i1 0
- %nop1236 = alloca i1, i1 0
- %nop1237 = alloca i1, i1 0
- %nop1238 = alloca i1, i1 0
- %nop1239 = alloca i1, i1 0
- %nop1240 = alloca i1, i1 0
- %nop1241 = alloca i1, i1 0
- %nop1242 = alloca i1, i1 0
- %nop1243 = alloca i1, i1 0
- %nop1244 = alloca i1, i1 0
- %nop1245 = alloca i1, i1 0
- %nop1246 = alloca i1, i1 0
- %nop1247 = alloca i1, i1 0
- %nop1248 = alloca i1, i1 0
- %nop1249 = alloca i1, i1 0
- %nop1250 = alloca i1, i1 0
- %nop1251 = alloca i1, i1 0
- %nop1252 = alloca i1, i1 0
- %nop1253 = alloca i1, i1 0
- %nop1254 = alloca i1, i1 0
- %nop1255 = alloca i1, i1 0
- %nop1256 = alloca i1, i1 0
- %nop1257 = alloca i1, i1 0
- %nop1258 = alloca i1, i1 0
- %nop1259 = alloca i1, i1 0
- %nop1260 = alloca i1, i1 0
- %nop1261 = alloca i1, i1 0
- %nop1262 = alloca i1, i1 0
- %nop1263 = alloca i1, i1 0
- %nop1264 = alloca i1, i1 0
- %nop1265 = alloca i1, i1 0
- %nop1266 = alloca i1, i1 0
- %nop1267 = alloca i1, i1 0
- %nop1268 = alloca i1, i1 0
- %nop1269 = alloca i1, i1 0
- %nop1270 = alloca i1, i1 0
- %nop1271 = alloca i1, i1 0
- %nop1272 = alloca i1, i1 0
- %nop1273 = alloca i1, i1 0
- %nop1274 = alloca i1, i1 0
- %nop1275 = alloca i1, i1 0
- %nop1276 = alloca i1, i1 0
- %nop1277 = alloca i1, i1 0
- %nop1278 = alloca i1, i1 0
- %nop1279 = alloca i1, i1 0
- %nop1280 = alloca i1, i1 0
- %nop1281 = alloca i1, i1 0
- %nop1282 = alloca i1, i1 0
- %nop1283 = alloca i1, i1 0
- %nop1284 = alloca i1, i1 0
- %nop1285 = alloca i1, i1 0
- %nop1286 = alloca i1, i1 0
- %nop1287 = alloca i1, i1 0
- %nop1288 = alloca i1, i1 0
- %nop1289 = alloca i1, i1 0
- %nop1290 = alloca i1, i1 0
- %nop1291 = alloca i1, i1 0
- %nop1292 = alloca i1, i1 0
- %nop1293 = alloca i1, i1 0
- %nop1294 = alloca i1, i1 0
- %nop1295 = alloca i1, i1 0
- %nop1296 = alloca i1, i1 0
- %nop1297 = alloca i1, i1 0
- %nop1298 = alloca i1, i1 0
- %nop1299 = alloca i1, i1 0
- %nop1300 = alloca i1, i1 0
- %nop1301 = alloca i1, i1 0
- %nop1302 = alloca i1, i1 0
- %nop1303 = alloca i1, i1 0
- %nop1304 = alloca i1, i1 0
- %nop1305 = alloca i1, i1 0
- %nop1306 = alloca i1, i1 0
- %nop1307 = alloca i1, i1 0
- %nop1308 = alloca i1, i1 0
- %nop1309 = alloca i1, i1 0
- %nop1310 = alloca i1, i1 0
- %nop1311 = alloca i1, i1 0
- %nop1312 = alloca i1, i1 0
- %nop1313 = alloca i1, i1 0
- %nop1314 = alloca i1, i1 0
- %nop1315 = alloca i1, i1 0
- %nop1316 = alloca i1, i1 0
- %nop1317 = alloca i1, i1 0
- %nop1318 = alloca i1, i1 0
- %nop1319 = alloca i1, i1 0
- %nop1320 = alloca i1, i1 0
- %nop1321 = alloca i1, i1 0
- %nop1322 = alloca i1, i1 0
- %nop1323 = alloca i1, i1 0
- %nop1324 = alloca i1, i1 0
- %nop1325 = alloca i1, i1 0
- %nop1326 = alloca i1, i1 0
- %nop1327 = alloca i1, i1 0
- %nop1328 = alloca i1, i1 0
- %nop1329 = alloca i1, i1 0
- %nop1330 = alloca i1, i1 0
- %nop1331 = alloca i1, i1 0
- %nop1332 = alloca i1, i1 0
- %nop1333 = alloca i1, i1 0
- %nop1334 = alloca i1, i1 0
- %nop1335 = alloca i1, i1 0
- %nop1336 = alloca i1, i1 0
- %nop1337 = alloca i1, i1 0
- %nop1338 = alloca i1, i1 0
- %nop1339 = alloca i1, i1 0
- %nop1340 = alloca i1, i1 0
- %nop1341 = alloca i1, i1 0
- %nop1342 = alloca i1, i1 0
- %nop1343 = alloca i1, i1 0
- %nop1344 = alloca i1, i1 0
- %nop1345 = alloca i1, i1 0
- %nop1346 = alloca i1, i1 0
- %nop1347 = alloca i1, i1 0
- %nop1348 = alloca i1, i1 0
- %nop1349 = alloca i1, i1 0
- %nop1350 = alloca i1, i1 0
- %nop1351 = alloca i1, i1 0
- %nop1352 = alloca i1, i1 0
- %nop1353 = alloca i1, i1 0
- %nop1354 = alloca i1, i1 0
- %nop1355 = alloca i1, i1 0
- %nop1356 = alloca i1, i1 0
- %nop1357 = alloca i1, i1 0
- %nop1358 = alloca i1, i1 0
- %nop1359 = alloca i1, i1 0
- %nop1360 = alloca i1, i1 0
- %nop1361 = alloca i1, i1 0
- %nop1362 = alloca i1, i1 0
- %nop1363 = alloca i1, i1 0
- %nop1364 = alloca i1, i1 0
- %nop1365 = alloca i1, i1 0
- %nop1366 = alloca i1, i1 0
- %nop1367 = alloca i1, i1 0
- %nop1368 = alloca i1, i1 0
- %nop1369 = alloca i1, i1 0
- %nop1370 = alloca i1, i1 0
- %nop1371 = alloca i1, i1 0
- %nop1372 = alloca i1, i1 0
- %nop1373 = alloca i1, i1 0
- %nop1374 = alloca i1, i1 0
- %nop1375 = alloca i1, i1 0
- %nop1376 = alloca i1, i1 0
- %nop1377 = alloca i1, i1 0
- %nop1378 = alloca i1, i1 0
- %nop1379 = alloca i1, i1 0
- %nop1380 = alloca i1, i1 0
- %nop1381 = alloca i1, i1 0
- %nop1382 = alloca i1, i1 0
- %nop1383 = alloca i1, i1 0
- %nop1384 = alloca i1, i1 0
- %nop1385 = alloca i1, i1 0
- %nop1386 = alloca i1, i1 0
- %nop1387 = alloca i1, i1 0
- %nop1388 = alloca i1, i1 0
- %nop1389 = alloca i1, i1 0
- %nop1390 = alloca i1, i1 0
- %nop1391 = alloca i1, i1 0
- %nop1392 = alloca i1, i1 0
- %nop1393 = alloca i1, i1 0
- %nop1394 = alloca i1, i1 0
- %nop1395 = alloca i1, i1 0
- %nop1396 = alloca i1, i1 0
- %nop1397 = alloca i1, i1 0
- %nop1398 = alloca i1, i1 0
- %nop1399 = alloca i1, i1 0
- %nop1400 = alloca i1, i1 0
- %nop1401 = alloca i1, i1 0
- %nop1402 = alloca i1, i1 0
- %nop1403 = alloca i1, i1 0
- %nop1404 = alloca i1, i1 0
- %nop1405 = alloca i1, i1 0
- %nop1406 = alloca i1, i1 0
- %nop1407 = alloca i1, i1 0
- %nop1408 = alloca i1, i1 0
- %nop1409 = alloca i1, i1 0
- %nop1410 = alloca i1, i1 0
- %nop1411 = alloca i1, i1 0
- %nop1412 = alloca i1, i1 0
- %nop1413 = alloca i1, i1 0
- %nop1414 = alloca i1, i1 0
- %nop1415 = alloca i1, i1 0
- %nop1416 = alloca i1, i1 0
- %nop1417 = alloca i1, i1 0
- %nop1418 = alloca i1, i1 0
- %nop1419 = alloca i1, i1 0
- %nop1420 = alloca i1, i1 0
- %nop1421 = alloca i1, i1 0
- %nop1422 = alloca i1, i1 0
- %nop1423 = alloca i1, i1 0
- %nop1424 = alloca i1, i1 0
- %nop1425 = alloca i1, i1 0
- %nop1426 = alloca i1, i1 0
- %nop1427 = alloca i1, i1 0
- %nop1428 = alloca i1, i1 0
- %nop1429 = alloca i1, i1 0
- %nop1430 = alloca i1, i1 0
- %nop1431 = alloca i1, i1 0
- %nop1432 = alloca i1, i1 0
- %nop1433 = alloca i1, i1 0
- %nop1434 = alloca i1, i1 0
- %nop1435 = alloca i1, i1 0
- %nop1436 = alloca i1, i1 0
- %nop1437 = alloca i1, i1 0
- %nop1438 = alloca i1, i1 0
- %nop1439 = alloca i1, i1 0
- %nop1440 = alloca i1, i1 0
- %nop1441 = alloca i1, i1 0
- %nop1442 = alloca i1, i1 0
- %nop1443 = alloca i1, i1 0
- %nop1444 = alloca i1, i1 0
- %nop1445 = alloca i1, i1 0
- %nop1446 = alloca i1, i1 0
- %nop1447 = alloca i1, i1 0
- %nop1448 = alloca i1, i1 0
- %nop1449 = alloca i1, i1 0
- %nop1450 = alloca i1, i1 0
- %nop1451 = alloca i1, i1 0
- %nop1452 = alloca i1, i1 0
- %nop1453 = alloca i1, i1 0
- %nop1454 = alloca i1, i1 0
- %nop1455 = alloca i1, i1 0
- %nop1456 = alloca i1, i1 0
- %nop1457 = alloca i1, i1 0
- %nop1458 = alloca i1, i1 0
- %nop1459 = alloca i1, i1 0
- %nop1460 = alloca i1, i1 0
- %nop1461 = alloca i1, i1 0
- %nop1462 = alloca i1, i1 0
- %nop1463 = alloca i1, i1 0
- %nop1464 = alloca i1, i1 0
- %nop1465 = alloca i1, i1 0
- %nop1466 = alloca i1, i1 0
- %nop1467 = alloca i1, i1 0
- %nop1468 = alloca i1, i1 0
- %nop1469 = alloca i1, i1 0
- %nop1470 = alloca i1, i1 0
- %nop1471 = alloca i1, i1 0
- %nop1472 = alloca i1, i1 0
- %nop1473 = alloca i1, i1 0
- %nop1474 = alloca i1, i1 0
- %nop1475 = alloca i1, i1 0
- %nop1476 = alloca i1, i1 0
- %nop1477 = alloca i1, i1 0
- %nop1478 = alloca i1, i1 0
- %nop1479 = alloca i1, i1 0
- %nop1480 = alloca i1, i1 0
- %nop1481 = alloca i1, i1 0
- %nop1482 = alloca i1, i1 0
- %nop1483 = alloca i1, i1 0
- %nop1484 = alloca i1, i1 0
- %nop1485 = alloca i1, i1 0
- %nop1486 = alloca i1, i1 0
- %nop1487 = alloca i1, i1 0
- %nop1488 = alloca i1, i1 0
- %nop1489 = alloca i1, i1 0
- %nop1490 = alloca i1, i1 0
- %nop1491 = alloca i1, i1 0
- %nop1492 = alloca i1, i1 0
- %nop1493 = alloca i1, i1 0
- %nop1494 = alloca i1, i1 0
- %nop1495 = alloca i1, i1 0
- %nop1496 = alloca i1, i1 0
- %nop1497 = alloca i1, i1 0
- %nop1498 = alloca i1, i1 0
- %nop1499 = alloca i1, i1 0
- %nop1500 = alloca i1, i1 0
- %nop1501 = alloca i1, i1 0
- %nop1502 = alloca i1, i1 0
- %nop1503 = alloca i1, i1 0
- %nop1504 = alloca i1, i1 0
- %nop1505 = alloca i1, i1 0
- %nop1506 = alloca i1, i1 0
- %nop1507 = alloca i1, i1 0
- %nop1508 = alloca i1, i1 0
- %nop1509 = alloca i1, i1 0
- %nop1510 = alloca i1, i1 0
- %nop1511 = alloca i1, i1 0
- %nop1512 = alloca i1, i1 0
- %nop1513 = alloca i1, i1 0
- %nop1514 = alloca i1, i1 0
- %nop1515 = alloca i1, i1 0
- %nop1516 = alloca i1, i1 0
- %nop1517 = alloca i1, i1 0
- %nop1518 = alloca i1, i1 0
- %nop1519 = alloca i1, i1 0
- %nop1520 = alloca i1, i1 0
- %nop1521 = alloca i1, i1 0
- %nop1522 = alloca i1, i1 0
- %nop1523 = alloca i1, i1 0
- %nop1524 = alloca i1, i1 0
- %nop1525 = alloca i1, i1 0
- %nop1526 = alloca i1, i1 0
- %nop1527 = alloca i1, i1 0
- %nop1528 = alloca i1, i1 0
- %nop1529 = alloca i1, i1 0
- %nop1530 = alloca i1, i1 0
- %nop1531 = alloca i1, i1 0
- %nop1532 = alloca i1, i1 0
- %nop1533 = alloca i1, i1 0
- %nop1534 = alloca i1, i1 0
- %nop1535 = alloca i1, i1 0
- %nop1536 = alloca i1, i1 0
- %nop1537 = alloca i1, i1 0
- %nop1538 = alloca i1, i1 0
- %nop1539 = alloca i1, i1 0
- %nop1540 = alloca i1, i1 0
- %nop1541 = alloca i1, i1 0
- %nop1542 = alloca i1, i1 0
- %nop1543 = alloca i1, i1 0
- %nop1544 = alloca i1, i1 0
- %nop1545 = alloca i1, i1 0
- %nop1546 = alloca i1, i1 0
- %nop1547 = alloca i1, i1 0
- %nop1548 = alloca i1, i1 0
- %nop1549 = alloca i1, i1 0
- %nop1550 = alloca i1, i1 0
- %nop1551 = alloca i1, i1 0
- %nop1552 = alloca i1, i1 0
- %nop1553 = alloca i1, i1 0
- %nop1554 = alloca i1, i1 0
- %nop1555 = alloca i1, i1 0
- %nop1556 = alloca i1, i1 0
- %nop1557 = alloca i1, i1 0
- %nop1558 = alloca i1, i1 0
- %nop1559 = alloca i1, i1 0
- %nop1560 = alloca i1, i1 0
- %nop1561 = alloca i1, i1 0
- %nop1562 = alloca i1, i1 0
- %nop1563 = alloca i1, i1 0
- %nop1564 = alloca i1, i1 0
- %nop1565 = alloca i1, i1 0
- %nop1566 = alloca i1, i1 0
- %nop1567 = alloca i1, i1 0
- %nop1568 = alloca i1, i1 0
- %nop1569 = alloca i1, i1 0
- %nop1570 = alloca i1, i1 0
- %nop1571 = alloca i1, i1 0
- %nop1572 = alloca i1, i1 0
- %nop1573 = alloca i1, i1 0
- %nop1574 = alloca i1, i1 0
- %nop1575 = alloca i1, i1 0
- %nop1576 = alloca i1, i1 0
- %nop1577 = alloca i1, i1 0
- %nop1578 = alloca i1, i1 0
- %nop1579 = alloca i1, i1 0
- %nop1580 = alloca i1, i1 0
- %nop1581 = alloca i1, i1 0
- %nop1582 = alloca i1, i1 0
- %nop1583 = alloca i1, i1 0
- %nop1584 = alloca i1, i1 0
- %nop1585 = alloca i1, i1 0
- %nop1586 = alloca i1, i1 0
- %nop1587 = alloca i1, i1 0
- %nop1588 = alloca i1, i1 0
- %nop1589 = alloca i1, i1 0
- %nop1590 = alloca i1, i1 0
- %nop1591 = alloca i1, i1 0
- %nop1592 = alloca i1, i1 0
- %nop1593 = alloca i1, i1 0
- %nop1594 = alloca i1, i1 0
- %nop1595 = alloca i1, i1 0
- %nop1596 = alloca i1, i1 0
- %nop1597 = alloca i1, i1 0
- %nop1598 = alloca i1, i1 0
- %nop1599 = alloca i1, i1 0
- %nop1600 = alloca i1, i1 0
- %nop1601 = alloca i1, i1 0
- %nop1602 = alloca i1, i1 0
- %nop1603 = alloca i1, i1 0
- %nop1604 = alloca i1, i1 0
- %nop1605 = alloca i1, i1 0
- %nop1606 = alloca i1, i1 0
- %nop1607 = alloca i1, i1 0
- %nop1608 = alloca i1, i1 0
- %nop1609 = alloca i1, i1 0
- %nop1610 = alloca i1, i1 0
- %nop1611 = alloca i1, i1 0
- %nop1612 = alloca i1, i1 0
- %nop1613 = alloca i1, i1 0
- %nop1614 = alloca i1, i1 0
- %nop1615 = alloca i1, i1 0
- %nop1616 = alloca i1, i1 0
- %nop1617 = alloca i1, i1 0
- %nop1618 = alloca i1, i1 0
- %nop1619 = alloca i1, i1 0
- %nop1620 = alloca i1, i1 0
- %nop1621 = alloca i1, i1 0
- %nop1622 = alloca i1, i1 0
- %nop1623 = alloca i1, i1 0
- %nop1624 = alloca i1, i1 0
- %nop1625 = alloca i1, i1 0
- %nop1626 = alloca i1, i1 0
- %nop1627 = alloca i1, i1 0
- %nop1628 = alloca i1, i1 0
- %nop1629 = alloca i1, i1 0
- %nop1630 = alloca i1, i1 0
- %nop1631 = alloca i1, i1 0
- %nop1632 = alloca i1, i1 0
- %nop1633 = alloca i1, i1 0
- %nop1634 = alloca i1, i1 0
- %nop1635 = alloca i1, i1 0
- %nop1636 = alloca i1, i1 0
- %nop1637 = alloca i1, i1 0
- %nop1638 = alloca i1, i1 0
- %nop1639 = alloca i1, i1 0
- %nop1640 = alloca i1, i1 0
- %nop1641 = alloca i1, i1 0
- %nop1642 = alloca i1, i1 0
- %nop1643 = alloca i1, i1 0
- %nop1644 = alloca i1, i1 0
- %nop1645 = alloca i1, i1 0
- %nop1646 = alloca i1, i1 0
- %nop1647 = alloca i1, i1 0
- %nop1648 = alloca i1, i1 0
- %nop1649 = alloca i1, i1 0
- %nop1650 = alloca i1, i1 0
- %nop1651 = alloca i1, i1 0
- %nop1652 = alloca i1, i1 0
- %nop1653 = alloca i1, i1 0
- %nop1654 = alloca i1, i1 0
- %nop1655 = alloca i1, i1 0
- %nop1656 = alloca i1, i1 0
- %nop1657 = alloca i1, i1 0
- %nop1658 = alloca i1, i1 0
- %nop1659 = alloca i1, i1 0
- %nop1660 = alloca i1, i1 0
- %nop1661 = alloca i1, i1 0
- %nop1662 = alloca i1, i1 0
- %nop1663 = alloca i1, i1 0
- %nop1664 = alloca i1, i1 0
- %nop1665 = alloca i1, i1 0
- %nop1666 = alloca i1, i1 0
- %nop1667 = alloca i1, i1 0
- %nop1668 = alloca i1, i1 0
- %nop1669 = alloca i1, i1 0
- %nop1670 = alloca i1, i1 0
- %nop1671 = alloca i1, i1 0
- %nop1672 = alloca i1, i1 0
- %nop1673 = alloca i1, i1 0
- %nop1674 = alloca i1, i1 0
- %nop1675 = alloca i1, i1 0
- %nop1676 = alloca i1, i1 0
- %nop1677 = alloca i1, i1 0
- %nop1678 = alloca i1, i1 0
- %nop1679 = alloca i1, i1 0
- %nop1680 = alloca i1, i1 0
- %nop1681 = alloca i1, i1 0
- %nop1682 = alloca i1, i1 0
- %nop1683 = alloca i1, i1 0
- %nop1684 = alloca i1, i1 0
- %nop1685 = alloca i1, i1 0
- %nop1686 = alloca i1, i1 0
- %nop1687 = alloca i1, i1 0
- %nop1688 = alloca i1, i1 0
- %nop1689 = alloca i1, i1 0
- %nop1690 = alloca i1, i1 0
- %nop1691 = alloca i1, i1 0
- %nop1692 = alloca i1, i1 0
- %nop1693 = alloca i1, i1 0
- %nop1694 = alloca i1, i1 0
- %nop1695 = alloca i1, i1 0
- %nop1696 = alloca i1, i1 0
- %nop1697 = alloca i1, i1 0
- %nop1698 = alloca i1, i1 0
- %nop1699 = alloca i1, i1 0
- %nop1700 = alloca i1, i1 0
- %nop1701 = alloca i1, i1 0
- %nop1702 = alloca i1, i1 0
- %nop1703 = alloca i1, i1 0
- %nop1704 = alloca i1, i1 0
- %nop1705 = alloca i1, i1 0
- %nop1706 = alloca i1, i1 0
- %nop1707 = alloca i1, i1 0
- %nop1708 = alloca i1, i1 0
- %nop1709 = alloca i1, i1 0
- %nop1710 = alloca i1, i1 0
- %nop1711 = alloca i1, i1 0
- %nop1712 = alloca i1, i1 0
- %nop1713 = alloca i1, i1 0
- %nop1714 = alloca i1, i1 0
- %nop1715 = alloca i1, i1 0
- %nop1716 = alloca i1, i1 0
- %nop1717 = alloca i1, i1 0
- %nop1718 = alloca i1, i1 0
- %nop1719 = alloca i1, i1 0
- %nop1720 = alloca i1, i1 0
- %nop1721 = alloca i1, i1 0
- %nop1722 = alloca i1, i1 0
- %nop1723 = alloca i1, i1 0
- %nop1724 = alloca i1, i1 0
- %nop1725 = alloca i1, i1 0
- %nop1726 = alloca i1, i1 0
- %nop1727 = alloca i1, i1 0
- %nop1728 = alloca i1, i1 0
- %nop1729 = alloca i1, i1 0
- %nop1730 = alloca i1, i1 0
- %nop1731 = alloca i1, i1 0
- %nop1732 = alloca i1, i1 0
- %nop1733 = alloca i1, i1 0
- %nop1734 = alloca i1, i1 0
- %nop1735 = alloca i1, i1 0
- %nop1736 = alloca i1, i1 0
- %nop1737 = alloca i1, i1 0
- %nop1738 = alloca i1, i1 0
- %nop1739 = alloca i1, i1 0
- %nop1740 = alloca i1, i1 0
- %nop1741 = alloca i1, i1 0
- %nop1742 = alloca i1, i1 0
- %nop1743 = alloca i1, i1 0
- %nop1744 = alloca i1, i1 0
- %nop1745 = alloca i1, i1 0
- %nop1746 = alloca i1, i1 0
- %nop1747 = alloca i1, i1 0
- %nop1748 = alloca i1, i1 0
- %nop1749 = alloca i1, i1 0
- %nop1750 = alloca i1, i1 0
- %nop1751 = alloca i1, i1 0
- %nop1752 = alloca i1, i1 0
- %nop1753 = alloca i1, i1 0
- %nop1754 = alloca i1, i1 0
- %nop1755 = alloca i1, i1 0
- %nop1756 = alloca i1, i1 0
- %nop1757 = alloca i1, i1 0
- %nop1758 = alloca i1, i1 0
- %nop1759 = alloca i1, i1 0
- %nop1760 = alloca i1, i1 0
- %nop1761 = alloca i1, i1 0
- %nop1762 = alloca i1, i1 0
- %nop1763 = alloca i1, i1 0
- %nop1764 = alloca i1, i1 0
- %nop1765 = alloca i1, i1 0
- %nop1766 = alloca i1, i1 0
- %nop1767 = alloca i1, i1 0
- %nop1768 = alloca i1, i1 0
- %nop1769 = alloca i1, i1 0
- %nop1770 = alloca i1, i1 0
- %nop1771 = alloca i1, i1 0
- %nop1772 = alloca i1, i1 0
- %nop1773 = alloca i1, i1 0
- %nop1774 = alloca i1, i1 0
- %nop1775 = alloca i1, i1 0
- %nop1776 = alloca i1, i1 0
- %nop1777 = alloca i1, i1 0
- %nop1778 = alloca i1, i1 0
- %nop1779 = alloca i1, i1 0
- %nop1780 = alloca i1, i1 0
- %nop1781 = alloca i1, i1 0
- %nop1782 = alloca i1, i1 0
- %nop1783 = alloca i1, i1 0
- %nop1784 = alloca i1, i1 0
- %nop1785 = alloca i1, i1 0
- %nop1786 = alloca i1, i1 0
- %nop1787 = alloca i1, i1 0
- %nop1788 = alloca i1, i1 0
- %nop1789 = alloca i1, i1 0
- %nop1790 = alloca i1, i1 0
- %nop1791 = alloca i1, i1 0
- %nop1792 = alloca i1, i1 0
- %nop1793 = alloca i1, i1 0
- %nop1794 = alloca i1, i1 0
- %nop1795 = alloca i1, i1 0
- %nop1796 = alloca i1, i1 0
- %nop1797 = alloca i1, i1 0
- %nop1798 = alloca i1, i1 0
- %nop1799 = alloca i1, i1 0
- %nop1800 = alloca i1, i1 0
- %nop1801 = alloca i1, i1 0
- %nop1802 = alloca i1, i1 0
- %nop1803 = alloca i1, i1 0
- %nop1804 = alloca i1, i1 0
- %nop1805 = alloca i1, i1 0
- %nop1806 = alloca i1, i1 0
- %nop1807 = alloca i1, i1 0
- %nop1808 = alloca i1, i1 0
- %nop1809 = alloca i1, i1 0
- %nop1810 = alloca i1, i1 0
- %nop1811 = alloca i1, i1 0
- %nop1812 = alloca i1, i1 0
- %nop1813 = alloca i1, i1 0
- %nop1814 = alloca i1, i1 0
- %nop1815 = alloca i1, i1 0
- %nop1816 = alloca i1, i1 0
- %nop1817 = alloca i1, i1 0
- %nop1818 = alloca i1, i1 0
- %nop1819 = alloca i1, i1 0
- %nop1820 = alloca i1, i1 0
- %nop1821 = alloca i1, i1 0
- %nop1822 = alloca i1, i1 0
- %nop1823 = alloca i1, i1 0
- %nop1824 = alloca i1, i1 0
- %nop1825 = alloca i1, i1 0
- %nop1826 = alloca i1, i1 0
- %nop1827 = alloca i1, i1 0
- %nop1828 = alloca i1, i1 0
- %nop1829 = alloca i1, i1 0
- %nop1830 = alloca i1, i1 0
- %nop1831 = alloca i1, i1 0
- %nop1832 = alloca i1, i1 0
- %nop1833 = alloca i1, i1 0
- %nop1834 = alloca i1, i1 0
- %nop1835 = alloca i1, i1 0
- %nop1836 = alloca i1, i1 0
- %nop1837 = alloca i1, i1 0
- %nop1838 = alloca i1, i1 0
- %nop1839 = alloca i1, i1 0
- %nop1840 = alloca i1, i1 0
- %nop1841 = alloca i1, i1 0
- %nop1842 = alloca i1, i1 0
- %nop1843 = alloca i1, i1 0
- %nop1844 = alloca i1, i1 0
- %nop1845 = alloca i1, i1 0
- %nop1846 = alloca i1, i1 0
- %nop1847 = alloca i1, i1 0
- %nop1848 = alloca i1, i1 0
- %nop1849 = alloca i1, i1 0
- %nop1850 = alloca i1, i1 0
- %nop1851 = alloca i1, i1 0
- %nop1852 = alloca i1, i1 0
- %nop1853 = alloca i1, i1 0
- %nop1854 = alloca i1, i1 0
- %nop1855 = alloca i1, i1 0
- %nop1856 = alloca i1, i1 0
- %nop1857 = alloca i1, i1 0
- %nop1858 = alloca i1, i1 0
- %nop1859 = alloca i1, i1 0
- %nop1860 = alloca i1, i1 0
- %nop1861 = alloca i1, i1 0
- %nop1862 = alloca i1, i1 0
- %nop1863 = alloca i1, i1 0
- %nop1864 = alloca i1, i1 0
- %nop1865 = alloca i1, i1 0
- %nop1866 = alloca i1, i1 0
- %nop1867 = alloca i1, i1 0
- %nop1868 = alloca i1, i1 0
- %nop1869 = alloca i1, i1 0
- %nop1870 = alloca i1, i1 0
- %nop1871 = alloca i1, i1 0
- %nop1872 = alloca i1, i1 0
- %nop1873 = alloca i1, i1 0
- %nop1874 = alloca i1, i1 0
- %nop1875 = alloca i1, i1 0
- %nop1876 = alloca i1, i1 0
- %nop1877 = alloca i1, i1 0
- %nop1878 = alloca i1, i1 0
- %nop1879 = alloca i1, i1 0
- %nop1880 = alloca i1, i1 0
- %nop1881 = alloca i1, i1 0
- %nop1882 = alloca i1, i1 0
- %nop1883 = alloca i1, i1 0
- %nop1884 = alloca i1, i1 0
- %nop1885 = alloca i1, i1 0
- %nop1886 = alloca i1, i1 0
- %nop1887 = alloca i1, i1 0
- %nop1888 = alloca i1, i1 0
- %nop1889 = alloca i1, i1 0
- %nop1890 = alloca i1, i1 0
- %nop1891 = alloca i1, i1 0
- %nop1892 = alloca i1, i1 0
- %nop1893 = alloca i1, i1 0
- %nop1894 = alloca i1, i1 0
- %nop1895 = alloca i1, i1 0
- %nop1896 = alloca i1, i1 0
- %nop1897 = alloca i1, i1 0
- %nop1898 = alloca i1, i1 0
- %nop1899 = alloca i1, i1 0
- %nop1900 = alloca i1, i1 0
- %nop1901 = alloca i1, i1 0
- %nop1902 = alloca i1, i1 0
- %nop1903 = alloca i1, i1 0
- %nop1904 = alloca i1, i1 0
- %nop1905 = alloca i1, i1 0
- %nop1906 = alloca i1, i1 0
- %nop1907 = alloca i1, i1 0
- %nop1908 = alloca i1, i1 0
- %nop1909 = alloca i1, i1 0
- %nop1910 = alloca i1, i1 0
- %nop1911 = alloca i1, i1 0
- %nop1912 = alloca i1, i1 0
- %nop1913 = alloca i1, i1 0
- %nop1914 = alloca i1, i1 0
- %nop1915 = alloca i1, i1 0
- %nop1916 = alloca i1, i1 0
- %nop1917 = alloca i1, i1 0
- %nop1918 = alloca i1, i1 0
- %nop1919 = alloca i1, i1 0
- %nop1920 = alloca i1, i1 0
- %nop1921 = alloca i1, i1 0
- %nop1922 = alloca i1, i1 0
- %nop1923 = alloca i1, i1 0
- %nop1924 = alloca i1, i1 0
- %nop1925 = alloca i1, i1 0
- %nop1926 = alloca i1, i1 0
- %nop1927 = alloca i1, i1 0
- %nop1928 = alloca i1, i1 0
- %nop1929 = alloca i1, i1 0
- %nop1930 = alloca i1, i1 0
- %nop1931 = alloca i1, i1 0
- %nop1932 = alloca i1, i1 0
- %nop1933 = alloca i1, i1 0
- %nop1934 = alloca i1, i1 0
- %nop1935 = alloca i1, i1 0
- %nop1936 = alloca i1, i1 0
- %nop1937 = alloca i1, i1 0
- %nop1938 = alloca i1, i1 0
- %nop1939 = alloca i1, i1 0
- %nop1940 = alloca i1, i1 0
- %nop1941 = alloca i1, i1 0
- %nop1942 = alloca i1, i1 0
- %nop1943 = alloca i1, i1 0
- %nop1944 = alloca i1, i1 0
- %nop1945 = alloca i1, i1 0
- %nop1946 = alloca i1, i1 0
- %nop1947 = alloca i1, i1 0
- %nop1948 = alloca i1, i1 0
- %nop1949 = alloca i1, i1 0
- %nop1950 = alloca i1, i1 0
- %nop1951 = alloca i1, i1 0
- %nop1952 = alloca i1, i1 0
- %nop1953 = alloca i1, i1 0
- %nop1954 = alloca i1, i1 0
- %nop1955 = alloca i1, i1 0
- %nop1956 = alloca i1, i1 0
- %nop1957 = alloca i1, i1 0
- %nop1958 = alloca i1, i1 0
- %nop1959 = alloca i1, i1 0
- %nop1960 = alloca i1, i1 0
- %nop1961 = alloca i1, i1 0
- %nop1962 = alloca i1, i1 0
- %nop1963 = alloca i1, i1 0
- %nop1964 = alloca i1, i1 0
- %nop1965 = alloca i1, i1 0
- %nop1966 = alloca i1, i1 0
- %nop1967 = alloca i1, i1 0
- %nop1968 = alloca i1, i1 0
- %nop1969 = alloca i1, i1 0
- %nop1970 = alloca i1, i1 0
- %nop1971 = alloca i1, i1 0
- %nop1972 = alloca i1, i1 0
- %nop1973 = alloca i1, i1 0
- %nop1974 = alloca i1, i1 0
- %nop1975 = alloca i1, i1 0
- %nop1976 = alloca i1, i1 0
- %nop1977 = alloca i1, i1 0
- %nop1978 = alloca i1, i1 0
- %nop1979 = alloca i1, i1 0
- %nop1980 = alloca i1, i1 0
- %nop1981 = alloca i1, i1 0
- %nop1982 = alloca i1, i1 0
- %nop1983 = alloca i1, i1 0
- %nop1984 = alloca i1, i1 0
- %nop1985 = alloca i1, i1 0
- %nop1986 = alloca i1, i1 0
- %nop1987 = alloca i1, i1 0
- %nop1988 = alloca i1, i1 0
- %nop1989 = alloca i1, i1 0
- %nop1990 = alloca i1, i1 0
- %nop1991 = alloca i1, i1 0
- %nop1992 = alloca i1, i1 0
- %nop1993 = alloca i1, i1 0
- %nop1994 = alloca i1, i1 0
- %nop1995 = alloca i1, i1 0
- %nop1996 = alloca i1, i1 0
- %nop1997 = alloca i1, i1 0
- %nop1998 = alloca i1, i1 0
- %nop1999 = alloca i1, i1 0
- %nop2000 = alloca i1, i1 0
- %nop2001 = alloca i1, i1 0
- %nop2002 = alloca i1, i1 0
- %nop2003 = alloca i1, i1 0
- %nop2004 = alloca i1, i1 0
- %nop2005 = alloca i1, i1 0
- %nop2006 = alloca i1, i1 0
- %nop2007 = alloca i1, i1 0
- %nop2008 = alloca i1, i1 0
- %nop2009 = alloca i1, i1 0
- %nop2010 = alloca i1, i1 0
- %nop2011 = alloca i1, i1 0
- %nop2012 = alloca i1, i1 0
- %nop2013 = alloca i1, i1 0
- %nop2014 = alloca i1, i1 0
- %nop2015 = alloca i1, i1 0
- %nop2016 = alloca i1, i1 0
- %nop2017 = alloca i1, i1 0
- %nop2018 = alloca i1, i1 0
- %nop2019 = alloca i1, i1 0
- %nop2020 = alloca i1, i1 0
- %nop2021 = alloca i1, i1 0
- %nop2022 = alloca i1, i1 0
- %nop2023 = alloca i1, i1 0
- %nop2024 = alloca i1, i1 0
- %nop2025 = alloca i1, i1 0
- %nop2026 = alloca i1, i1 0
- %nop2027 = alloca i1, i1 0
- %nop2028 = alloca i1, i1 0
- %nop2029 = alloca i1, i1 0
- %nop2030 = alloca i1, i1 0
- %nop2031 = alloca i1, i1 0
- %nop2032 = alloca i1, i1 0
- %nop2033 = alloca i1, i1 0
- %nop2034 = alloca i1, i1 0
- %nop2035 = alloca i1, i1 0
- %nop2036 = alloca i1, i1 0
- %nop2037 = alloca i1, i1 0
- %nop2038 = alloca i1, i1 0
- %nop2039 = alloca i1, i1 0
- %nop2040 = alloca i1, i1 0
- %nop2041 = alloca i1, i1 0
- %nop2042 = alloca i1, i1 0
- %nop2043 = alloca i1, i1 0
- %nop2044 = alloca i1, i1 0
- %nop2045 = alloca i1, i1 0
- %nop2046 = alloca i1, i1 0
- %nop2047 = alloca i1, i1 0
- %nop2048 = alloca i1, i1 0
- %nop2049 = alloca i1, i1 0
- %nop2050 = alloca i1, i1 0
- %nop2051 = alloca i1, i1 0
- %nop2052 = alloca i1, i1 0
- %nop2053 = alloca i1, i1 0
- %nop2054 = alloca i1, i1 0
- %nop2055 = alloca i1, i1 0
- %nop2056 = alloca i1, i1 0
- %nop2057 = alloca i1, i1 0
- %nop2058 = alloca i1, i1 0
- %nop2059 = alloca i1, i1 0
- %nop2060 = alloca i1, i1 0
- %nop2061 = alloca i1, i1 0
- %nop2062 = alloca i1, i1 0
- %nop2063 = alloca i1, i1 0
- %nop2064 = alloca i1, i1 0
- %nop2065 = alloca i1, i1 0
- %nop2066 = alloca i1, i1 0
- %nop2067 = alloca i1, i1 0
- %nop2068 = alloca i1, i1 0
- %nop2069 = alloca i1, i1 0
- %nop2070 = alloca i1, i1 0
- %nop2071 = alloca i1, i1 0
- %nop2072 = alloca i1, i1 0
- %nop2073 = alloca i1, i1 0
- %nop2074 = alloca i1, i1 0
- %nop2075 = alloca i1, i1 0
- %nop2076 = alloca i1, i1 0
- %nop2077 = alloca i1, i1 0
- %nop2078 = alloca i1, i1 0
- %nop2079 = alloca i1, i1 0
- %nop2080 = alloca i1, i1 0
- %nop2081 = alloca i1, i1 0
- %nop2082 = alloca i1, i1 0
- %nop2083 = alloca i1, i1 0
- %nop2084 = alloca i1, i1 0
- %nop2085 = alloca i1, i1 0
- %nop2086 = alloca i1, i1 0
- %nop2087 = alloca i1, i1 0
- %nop2088 = alloca i1, i1 0
- %nop2089 = alloca i1, i1 0
- %nop2090 = alloca i1, i1 0
- %nop2091 = alloca i1, i1 0
- %nop2092 = alloca i1, i1 0
- %nop2093 = alloca i1, i1 0
- %nop2094 = alloca i1, i1 0
- %nop2095 = alloca i1, i1 0
- %nop2096 = alloca i1, i1 0
- %nop2097 = alloca i1, i1 0
- %nop2098 = alloca i1, i1 0
- %nop2099 = alloca i1, i1 0
- %nop2100 = alloca i1, i1 0
- %nop2101 = alloca i1, i1 0
- %nop2102 = alloca i1, i1 0
- %nop2103 = alloca i1, i1 0
- %nop2104 = alloca i1, i1 0
- %nop2105 = alloca i1, i1 0
- %nop2106 = alloca i1, i1 0
- %nop2107 = alloca i1, i1 0
- %nop2108 = alloca i1, i1 0
- %nop2109 = alloca i1, i1 0
- %nop2110 = alloca i1, i1 0
- %nop2111 = alloca i1, i1 0
- %nop2112 = alloca i1, i1 0
- %nop2113 = alloca i1, i1 0
- %nop2114 = alloca i1, i1 0
- %nop2115 = alloca i1, i1 0
- %nop2116 = alloca i1, i1 0
- %nop2117 = alloca i1, i1 0
- %nop2118 = alloca i1, i1 0
- %nop2119 = alloca i1, i1 0
- %nop2120 = alloca i1, i1 0
- %nop2121 = alloca i1, i1 0
- %nop2122 = alloca i1, i1 0
- %nop2123 = alloca i1, i1 0
- %nop2124 = alloca i1, i1 0
- %nop2125 = alloca i1, i1 0
- %nop2126 = alloca i1, i1 0
- %nop2127 = alloca i1, i1 0
- %nop2128 = alloca i1, i1 0
- %nop2129 = alloca i1, i1 0
- %nop2130 = alloca i1, i1 0
- %nop2131 = alloca i1, i1 0
- %nop2132 = alloca i1, i1 0
- %nop2133 = alloca i1, i1 0
- %nop2134 = alloca i1, i1 0
- %nop2135 = alloca i1, i1 0
- %nop2136 = alloca i1, i1 0
- %nop2137 = alloca i1, i1 0
- %nop2138 = alloca i1, i1 0
- %nop2139 = alloca i1, i1 0
- %nop2140 = alloca i1, i1 0
- %nop2141 = alloca i1, i1 0
- %nop2142 = alloca i1, i1 0
- %nop2143 = alloca i1, i1 0
- %nop2144 = alloca i1, i1 0
- %nop2145 = alloca i1, i1 0
- %nop2146 = alloca i1, i1 0
- %nop2147 = alloca i1, i1 0
- %nop2148 = alloca i1, i1 0
- %nop2149 = alloca i1, i1 0
- %nop2150 = alloca i1, i1 0
- %nop2151 = alloca i1, i1 0
- %nop2152 = alloca i1, i1 0
- %nop2153 = alloca i1, i1 0
- %nop2154 = alloca i1, i1 0
- %nop2155 = alloca i1, i1 0
- %nop2156 = alloca i1, i1 0
- %nop2157 = alloca i1, i1 0
- %nop2158 = alloca i1, i1 0
- %nop2159 = alloca i1, i1 0
- %nop2160 = alloca i1, i1 0
- %nop2161 = alloca i1, i1 0
- %nop2162 = alloca i1, i1 0
- %nop2163 = alloca i1, i1 0
- %nop2164 = alloca i1, i1 0
- %nop2165 = alloca i1, i1 0
- %nop2166 = alloca i1, i1 0
- %nop2167 = alloca i1, i1 0
- %nop2168 = alloca i1, i1 0
- %nop2169 = alloca i1, i1 0
- %nop2170 = alloca i1, i1 0
- %nop2171 = alloca i1, i1 0
- %nop2172 = alloca i1, i1 0
- %nop2173 = alloca i1, i1 0
- %nop2174 = alloca i1, i1 0
- %nop2175 = alloca i1, i1 0
- %nop2176 = alloca i1, i1 0
- %nop2177 = alloca i1, i1 0
- %nop2178 = alloca i1, i1 0
- %nop2179 = alloca i1, i1 0
- %nop2180 = alloca i1, i1 0
- %nop2181 = alloca i1, i1 0
- %nop2182 = alloca i1, i1 0
- %nop2183 = alloca i1, i1 0
- %nop2184 = alloca i1, i1 0
- %nop2185 = alloca i1, i1 0
- %nop2186 = alloca i1, i1 0
- %nop2187 = alloca i1, i1 0
- %nop2188 = alloca i1, i1 0
- %nop2189 = alloca i1, i1 0
- %nop2190 = alloca i1, i1 0
- %nop2191 = alloca i1, i1 0
- %nop2192 = alloca i1, i1 0
- %nop2193 = alloca i1, i1 0
- %nop2194 = alloca i1, i1 0
- %nop2195 = alloca i1, i1 0
- %nop2196 = alloca i1, i1 0
- %nop2197 = alloca i1, i1 0
- %nop2198 = alloca i1, i1 0
- %nop2199 = alloca i1, i1 0
- %nop2200 = alloca i1, i1 0
- %nop2201 = alloca i1, i1 0
- %nop2202 = alloca i1, i1 0
- %nop2203 = alloca i1, i1 0
- %nop2204 = alloca i1, i1 0
- %nop2205 = alloca i1, i1 0
- %nop2206 = alloca i1, i1 0
- %nop2207 = alloca i1, i1 0
- %nop2208 = alloca i1, i1 0
- %nop2209 = alloca i1, i1 0
- %nop2210 = alloca i1, i1 0
- %nop2211 = alloca i1, i1 0
- %nop2212 = alloca i1, i1 0
- %nop2213 = alloca i1, i1 0
- %nop2214 = alloca i1, i1 0
- %nop2215 = alloca i1, i1 0
- %nop2216 = alloca i1, i1 0
- %nop2217 = alloca i1, i1 0
- %nop2218 = alloca i1, i1 0
- %nop2219 = alloca i1, i1 0
- %nop2220 = alloca i1, i1 0
- %nop2221 = alloca i1, i1 0
- %nop2222 = alloca i1, i1 0
- %nop2223 = alloca i1, i1 0
- %nop2224 = alloca i1, i1 0
- %nop2225 = alloca i1, i1 0
- %nop2226 = alloca i1, i1 0
- %nop2227 = alloca i1, i1 0
- %nop2228 = alloca i1, i1 0
- %nop2229 = alloca i1, i1 0
- %nop2230 = alloca i1, i1 0
- %nop2231 = alloca i1, i1 0
- %nop2232 = alloca i1, i1 0
- %nop2233 = alloca i1, i1 0
- %nop2234 = alloca i1, i1 0
- %nop2235 = alloca i1, i1 0
- %nop2236 = alloca i1, i1 0
- %nop2237 = alloca i1, i1 0
- %nop2238 = alloca i1, i1 0
- %nop2239 = alloca i1, i1 0
- %nop2240 = alloca i1, i1 0
- %nop2241 = alloca i1, i1 0
- %nop2242 = alloca i1, i1 0
- %nop2243 = alloca i1, i1 0
- %nop2244 = alloca i1, i1 0
- %nop2245 = alloca i1, i1 0
- %nop2246 = alloca i1, i1 0
- %nop2247 = alloca i1, i1 0
- %nop2248 = alloca i1, i1 0
- %nop2249 = alloca i1, i1 0
- %nop2250 = alloca i1, i1 0
- %nop2251 = alloca i1, i1 0
- %nop2252 = alloca i1, i1 0
- %nop2253 = alloca i1, i1 0
- %nop2254 = alloca i1, i1 0
- %nop2255 = alloca i1, i1 0
- %nop2256 = alloca i1, i1 0
- %nop2257 = alloca i1, i1 0
- %nop2258 = alloca i1, i1 0
- %nop2259 = alloca i1, i1 0
- %nop2260 = alloca i1, i1 0
- %nop2261 = alloca i1, i1 0
- %nop2262 = alloca i1, i1 0
- %nop2263 = alloca i1, i1 0
- %nop2264 = alloca i1, i1 0
- %nop2265 = alloca i1, i1 0
- %nop2266 = alloca i1, i1 0
- %nop2267 = alloca i1, i1 0
- %nop2268 = alloca i1, i1 0
- %nop2269 = alloca i1, i1 0
- %nop2270 = alloca i1, i1 0
- %nop2271 = alloca i1, i1 0
- %nop2272 = alloca i1, i1 0
- %nop2273 = alloca i1, i1 0
- %nop2274 = alloca i1, i1 0
- %nop2275 = alloca i1, i1 0
- %nop2276 = alloca i1, i1 0
- %nop2277 = alloca i1, i1 0
- %nop2278 = alloca i1, i1 0
- %nop2279 = alloca i1, i1 0
- %nop2280 = alloca i1, i1 0
- %nop2281 = alloca i1, i1 0
- %nop2282 = alloca i1, i1 0
- %nop2283 = alloca i1, i1 0
- %nop2284 = alloca i1, i1 0
- %nop2285 = alloca i1, i1 0
- %nop2286 = alloca i1, i1 0
- %nop2287 = alloca i1, i1 0
- %nop2288 = alloca i1, i1 0
- %nop2289 = alloca i1, i1 0
- %nop2290 = alloca i1, i1 0
- %nop2291 = alloca i1, i1 0
- %nop2292 = alloca i1, i1 0
- %nop2293 = alloca i1, i1 0
- %nop2294 = alloca i1, i1 0
- %nop2295 = alloca i1, i1 0
- %nop2296 = alloca i1, i1 0
- %nop2297 = alloca i1, i1 0
- %nop2298 = alloca i1, i1 0
- %nop2299 = alloca i1, i1 0
- %nop2300 = alloca i1, i1 0
- %nop2301 = alloca i1, i1 0
- %nop2302 = alloca i1, i1 0
- %nop2303 = alloca i1, i1 0
- %nop2304 = alloca i1, i1 0
- %nop2305 = alloca i1, i1 0
- %nop2306 = alloca i1, i1 0
- %nop2307 = alloca i1, i1 0
- %nop2308 = alloca i1, i1 0
- %nop2309 = alloca i1, i1 0
- %nop2310 = alloca i1, i1 0
- %nop2311 = alloca i1, i1 0
- %nop2312 = alloca i1, i1 0
- %nop2313 = alloca i1, i1 0
- %nop2314 = alloca i1, i1 0
- %nop2315 = alloca i1, i1 0
- %nop2316 = alloca i1, i1 0
- %nop2317 = alloca i1, i1 0
- %nop2318 = alloca i1, i1 0
- %nop2319 = alloca i1, i1 0
- %nop2320 = alloca i1, i1 0
- %nop2321 = alloca i1, i1 0
- %nop2322 = alloca i1, i1 0
- %nop2323 = alloca i1, i1 0
- %nop2324 = alloca i1, i1 0
- %nop2325 = alloca i1, i1 0
- %nop2326 = alloca i1, i1 0
- %nop2327 = alloca i1, i1 0
- %nop2328 = alloca i1, i1 0
- %nop2329 = alloca i1, i1 0
- %nop2330 = alloca i1, i1 0
- %nop2331 = alloca i1, i1 0
- %nop2332 = alloca i1, i1 0
- %nop2333 = alloca i1, i1 0
- %nop2334 = alloca i1, i1 0
- %nop2335 = alloca i1, i1 0
- %nop2336 = alloca i1, i1 0
- %nop2337 = alloca i1, i1 0
- %nop2338 = alloca i1, i1 0
- %nop2339 = alloca i1, i1 0
- %nop2340 = alloca i1, i1 0
- %nop2341 = alloca i1, i1 0
- %nop2342 = alloca i1, i1 0
- %nop2343 = alloca i1, i1 0
- %nop2344 = alloca i1, i1 0
- %nop2345 = alloca i1, i1 0
- %nop2346 = alloca i1, i1 0
- %nop2347 = alloca i1, i1 0
- %nop2348 = alloca i1, i1 0
- %nop2349 = alloca i1, i1 0
- %nop2350 = alloca i1, i1 0
- %nop2351 = alloca i1, i1 0
- %nop2352 = alloca i1, i1 0
- %nop2353 = alloca i1, i1 0
- %nop2354 = alloca i1, i1 0
- %nop2355 = alloca i1, i1 0
- %nop2356 = alloca i1, i1 0
- %nop2357 = alloca i1, i1 0
- %nop2358 = alloca i1, i1 0
- %nop2359 = alloca i1, i1 0
- %nop2360 = alloca i1, i1 0
- %nop2361 = alloca i1, i1 0
- %nop2362 = alloca i1, i1 0
- %nop2363 = alloca i1, i1 0
- %nop2364 = alloca i1, i1 0
- %nop2365 = alloca i1, i1 0
- %nop2366 = alloca i1, i1 0
- %nop2367 = alloca i1, i1 0
- %nop2368 = alloca i1, i1 0
- %nop2369 = alloca i1, i1 0
- %nop2370 = alloca i1, i1 0
- %nop2371 = alloca i1, i1 0
- %nop2372 = alloca i1, i1 0
- %nop2373 = alloca i1, i1 0
- %nop2374 = alloca i1, i1 0
- %nop2375 = alloca i1, i1 0
- %nop2376 = alloca i1, i1 0
- %nop2377 = alloca i1, i1 0
- %nop2378 = alloca i1, i1 0
- %nop2379 = alloca i1, i1 0
- %nop2380 = alloca i1, i1 0
- %nop2381 = alloca i1, i1 0
- %nop2382 = alloca i1, i1 0
- %nop2383 = alloca i1, i1 0
- %nop2384 = alloca i1, i1 0
- %nop2385 = alloca i1, i1 0
- %nop2386 = alloca i1, i1 0
- %nop2387 = alloca i1, i1 0
- %nop2388 = alloca i1, i1 0
- %nop2389 = alloca i1, i1 0
- %nop2390 = alloca i1, i1 0
- %nop2391 = alloca i1, i1 0
- %nop2392 = alloca i1, i1 0
- %nop2393 = alloca i1, i1 0
- %nop2394 = alloca i1, i1 0
- %nop2395 = alloca i1, i1 0
- %nop2396 = alloca i1, i1 0
- %nop2397 = alloca i1, i1 0
- %nop2398 = alloca i1, i1 0
- %nop2399 = alloca i1, i1 0
- %nop2400 = alloca i1, i1 0
- %nop2401 = alloca i1, i1 0
- %nop2402 = alloca i1, i1 0
- %nop2403 = alloca i1, i1 0
- %nop2404 = alloca i1, i1 0
- %nop2405 = alloca i1, i1 0
- %nop2406 = alloca i1, i1 0
- %nop2407 = alloca i1, i1 0
- %nop2408 = alloca i1, i1 0
- %nop2409 = alloca i1, i1 0
- %nop2410 = alloca i1, i1 0
- %nop2411 = alloca i1, i1 0
- %nop2412 = alloca i1, i1 0
- %nop2413 = alloca i1, i1 0
- %nop2414 = alloca i1, i1 0
- %nop2415 = alloca i1, i1 0
- %nop2416 = alloca i1, i1 0
- %nop2417 = alloca i1, i1 0
- %nop2418 = alloca i1, i1 0
- %nop2419 = alloca i1, i1 0
- %nop2420 = alloca i1, i1 0
- %nop2421 = alloca i1, i1 0
- %nop2422 = alloca i1, i1 0
- %nop2423 = alloca i1, i1 0
- %nop2424 = alloca i1, i1 0
- %nop2425 = alloca i1, i1 0
- %nop2426 = alloca i1, i1 0
- %nop2427 = alloca i1, i1 0
- %nop2428 = alloca i1, i1 0
- %nop2429 = alloca i1, i1 0
- %nop2430 = alloca i1, i1 0
- %nop2431 = alloca i1, i1 0
- %nop2432 = alloca i1, i1 0
- %nop2433 = alloca i1, i1 0
- %nop2434 = alloca i1, i1 0
- %nop2435 = alloca i1, i1 0
- %nop2436 = alloca i1, i1 0
- %nop2437 = alloca i1, i1 0
- %nop2438 = alloca i1, i1 0
- %nop2439 = alloca i1, i1 0
- %nop2440 = alloca i1, i1 0
- %nop2441 = alloca i1, i1 0
- %nop2442 = alloca i1, i1 0
- %nop2443 = alloca i1, i1 0
- %nop2444 = alloca i1, i1 0
- %nop2445 = alloca i1, i1 0
- %nop2446 = alloca i1, i1 0
- %nop2447 = alloca i1, i1 0
- %nop2448 = alloca i1, i1 0
- %nop2449 = alloca i1, i1 0
- %nop2450 = alloca i1, i1 0
- %nop2451 = alloca i1, i1 0
- %nop2452 = alloca i1, i1 0
- %nop2453 = alloca i1, i1 0
- %nop2454 = alloca i1, i1 0
- %nop2455 = alloca i1, i1 0
- %nop2456 = alloca i1, i1 0
- %nop2457 = alloca i1, i1 0
- %nop2458 = alloca i1, i1 0
- %nop2459 = alloca i1, i1 0
- %nop2460 = alloca i1, i1 0
- %nop2461 = alloca i1, i1 0
- %nop2462 = alloca i1, i1 0
- %nop2463 = alloca i1, i1 0
- %nop2464 = alloca i1, i1 0
- %nop2465 = alloca i1, i1 0
- %nop2466 = alloca i1, i1 0
- %nop2467 = alloca i1, i1 0
- %nop2468 = alloca i1, i1 0
- %nop2469 = alloca i1, i1 0
- %nop2470 = alloca i1, i1 0
- %nop2471 = alloca i1, i1 0
- %nop2472 = alloca i1, i1 0
- %nop2473 = alloca i1, i1 0
- %nop2474 = alloca i1, i1 0
- %nop2475 = alloca i1, i1 0
- %nop2476 = alloca i1, i1 0
- %nop2477 = alloca i1, i1 0
- %nop2478 = alloca i1, i1 0
- %nop2479 = alloca i1, i1 0
- %nop2480 = alloca i1, i1 0
- %nop2481 = alloca i1, i1 0
- %nop2482 = alloca i1, i1 0
- %nop2483 = alloca i1, i1 0
- %nop2484 = alloca i1, i1 0
- %nop2485 = alloca i1, i1 0
- %nop2486 = alloca i1, i1 0
- %nop2487 = alloca i1, i1 0
- %nop2488 = alloca i1, i1 0
- %nop2489 = alloca i1, i1 0
- %nop2490 = alloca i1, i1 0
- %nop2491 = alloca i1, i1 0
- %nop2492 = alloca i1, i1 0
- %nop2493 = alloca i1, i1 0
- %nop2494 = alloca i1, i1 0
- %nop2495 = alloca i1, i1 0
- %nop2496 = alloca i1, i1 0
- %nop2497 = alloca i1, i1 0
- %nop2498 = alloca i1, i1 0
- %nop2499 = alloca i1, i1 0
- %nop2500 = alloca i1, i1 0
- %nop2501 = alloca i1, i1 0
- %nop2502 = alloca i1, i1 0
- %nop2503 = alloca i1, i1 0
- %nop2504 = alloca i1, i1 0
- %nop2505 = alloca i1, i1 0
- %nop2506 = alloca i1, i1 0
- %nop2507 = alloca i1, i1 0
- %nop2508 = alloca i1, i1 0
- %nop2509 = alloca i1, i1 0
- %nop2510 = alloca i1, i1 0
- %nop2511 = alloca i1, i1 0
- %nop2512 = alloca i1, i1 0
- %nop2513 = alloca i1, i1 0
- %nop2514 = alloca i1, i1 0
- %nop2515 = alloca i1, i1 0
- %nop2516 = alloca i1, i1 0
- %nop2517 = alloca i1, i1 0
- %nop2518 = alloca i1, i1 0
- %nop2519 = alloca i1, i1 0
- %nop2520 = alloca i1, i1 0
- %nop2521 = alloca i1, i1 0
- %nop2522 = alloca i1, i1 0
- %nop2523 = alloca i1, i1 0
- %nop2524 = alloca i1, i1 0
- %nop2525 = alloca i1, i1 0
- %nop2526 = alloca i1, i1 0
- %nop2527 = alloca i1, i1 0
- %nop2528 = alloca i1, i1 0
- %nop2529 = alloca i1, i1 0
- %nop2530 = alloca i1, i1 0
- %nop2531 = alloca i1, i1 0
- %nop2532 = alloca i1, i1 0
- %nop2533 = alloca i1, i1 0
- %nop2534 = alloca i1, i1 0
- %nop2535 = alloca i1, i1 0
- %nop2536 = alloca i1, i1 0
- %nop2537 = alloca i1, i1 0
- %nop2538 = alloca i1, i1 0
- %nop2539 = alloca i1, i1 0
- %nop2540 = alloca i1, i1 0
- %nop2541 = alloca i1, i1 0
- %nop2542 = alloca i1, i1 0
- %nop2543 = alloca i1, i1 0
- %nop2544 = alloca i1, i1 0
- %nop2545 = alloca i1, i1 0
- %nop2546 = alloca i1, i1 0
- %nop2547 = alloca i1, i1 0
- %nop2548 = alloca i1, i1 0
- %nop2549 = alloca i1, i1 0
- %nop2550 = alloca i1, i1 0
- %nop2551 = alloca i1, i1 0
- %nop2552 = alloca i1, i1 0
- %nop2553 = alloca i1, i1 0
- %nop2554 = alloca i1, i1 0
- %nop2555 = alloca i1, i1 0
- %nop2556 = alloca i1, i1 0
- %nop2557 = alloca i1, i1 0
- %nop2558 = alloca i1, i1 0
- %nop2559 = alloca i1, i1 0
- %nop2560 = alloca i1, i1 0
- %nop2561 = alloca i1, i1 0
- %nop2562 = alloca i1, i1 0
- %nop2563 = alloca i1, i1 0
- %nop2564 = alloca i1, i1 0
- %nop2565 = alloca i1, i1 0
- %nop2566 = alloca i1, i1 0
- %nop2567 = alloca i1, i1 0
- %nop2568 = alloca i1, i1 0
- %nop2569 = alloca i1, i1 0
- %nop2570 = alloca i1, i1 0
- %nop2571 = alloca i1, i1 0
- %nop2572 = alloca i1, i1 0
- %nop2573 = alloca i1, i1 0
- %nop2574 = alloca i1, i1 0
- %nop2575 = alloca i1, i1 0
- %nop2576 = alloca i1, i1 0
- %nop2577 = alloca i1, i1 0
- %nop2578 = alloca i1, i1 0
- %nop2579 = alloca i1, i1 0
- %nop2580 = alloca i1, i1 0
- %nop2581 = alloca i1, i1 0
- %nop2582 = alloca i1, i1 0
- %nop2583 = alloca i1, i1 0
- %nop2584 = alloca i1, i1 0
- %nop2585 = alloca i1, i1 0
- %nop2586 = alloca i1, i1 0
- %nop2587 = alloca i1, i1 0
- %nop2588 = alloca i1, i1 0
- %nop2589 = alloca i1, i1 0
- %nop2590 = alloca i1, i1 0
- %nop2591 = alloca i1, i1 0
- %nop2592 = alloca i1, i1 0
- %nop2593 = alloca i1, i1 0
- %nop2594 = alloca i1, i1 0
- %nop2595 = alloca i1, i1 0
- %nop2596 = alloca i1, i1 0
- %nop2597 = alloca i1, i1 0
- %nop2598 = alloca i1, i1 0
- %nop2599 = alloca i1, i1 0
- %nop2600 = alloca i1, i1 0
- %nop2601 = alloca i1, i1 0
- %nop2602 = alloca i1, i1 0
- %nop2603 = alloca i1, i1 0
- %nop2604 = alloca i1, i1 0
- %nop2605 = alloca i1, i1 0
- %nop2606 = alloca i1, i1 0
- %nop2607 = alloca i1, i1 0
- %nop2608 = alloca i1, i1 0
- %nop2609 = alloca i1, i1 0
- %nop2610 = alloca i1, i1 0
- %nop2611 = alloca i1, i1 0
- %nop2612 = alloca i1, i1 0
- %nop2613 = alloca i1, i1 0
- %nop2614 = alloca i1, i1 0
- %nop2615 = alloca i1, i1 0
- %nop2616 = alloca i1, i1 0
- %nop2617 = alloca i1, i1 0
- %nop2618 = alloca i1, i1 0
- %nop2619 = alloca i1, i1 0
- %nop2620 = alloca i1, i1 0
- %nop2621 = alloca i1, i1 0
- %nop2622 = alloca i1, i1 0
- %nop2623 = alloca i1, i1 0
- %nop2624 = alloca i1, i1 0
- %nop2625 = alloca i1, i1 0
- %nop2626 = alloca i1, i1 0
- %nop2627 = alloca i1, i1 0
- %nop2628 = alloca i1, i1 0
- %nop2629 = alloca i1, i1 0
- %nop2630 = alloca i1, i1 0
- %nop2631 = alloca i1, i1 0
- %nop2632 = alloca i1, i1 0
- %nop2633 = alloca i1, i1 0
- %nop2634 = alloca i1, i1 0
- %nop2635 = alloca i1, i1 0
- %nop2636 = alloca i1, i1 0
- %nop2637 = alloca i1, i1 0
- %nop2638 = alloca i1, i1 0
- %nop2639 = alloca i1, i1 0
- %nop2640 = alloca i1, i1 0
- %nop2641 = alloca i1, i1 0
- %nop2642 = alloca i1, i1 0
- %nop2643 = alloca i1, i1 0
- %nop2644 = alloca i1, i1 0
- %nop2645 = alloca i1, i1 0
- %nop2646 = alloca i1, i1 0
- %nop2647 = alloca i1, i1 0
- %nop2648 = alloca i1, i1 0
- %nop2649 = alloca i1, i1 0
- %nop2650 = alloca i1, i1 0
- %nop2651 = alloca i1, i1 0
- %nop2652 = alloca i1, i1 0
- %nop2653 = alloca i1, i1 0
- %nop2654 = alloca i1, i1 0
- %nop2655 = alloca i1, i1 0
- %nop2656 = alloca i1, i1 0
- %nop2657 = alloca i1, i1 0
- %nop2658 = alloca i1, i1 0
- %nop2659 = alloca i1, i1 0
- %nop2660 = alloca i1, i1 0
- %nop2661 = alloca i1, i1 0
- %nop2662 = alloca i1, i1 0
- %nop2663 = alloca i1, i1 0
- %nop2664 = alloca i1, i1 0
- %nop2665 = alloca i1, i1 0
- %nop2666 = alloca i1, i1 0
- %nop2667 = alloca i1, i1 0
- %nop2668 = alloca i1, i1 0
- %nop2669 = alloca i1, i1 0
- %nop2670 = alloca i1, i1 0
- %nop2671 = alloca i1, i1 0
- %nop2672 = alloca i1, i1 0
- %nop2673 = alloca i1, i1 0
- %nop2674 = alloca i1, i1 0
- %nop2675 = alloca i1, i1 0
- %nop2676 = alloca i1, i1 0
- %nop2677 = alloca i1, i1 0
- %nop2678 = alloca i1, i1 0
- %nop2679 = alloca i1, i1 0
- %nop2680 = alloca i1, i1 0
- %nop2681 = alloca i1, i1 0
- %nop2682 = alloca i1, i1 0
- %nop2683 = alloca i1, i1 0
- %nop2684 = alloca i1, i1 0
- %nop2685 = alloca i1, i1 0
- %nop2686 = alloca i1, i1 0
- %nop2687 = alloca i1, i1 0
- %nop2688 = alloca i1, i1 0
- %nop2689 = alloca i1, i1 0
- %nop2690 = alloca i1, i1 0
- %nop2691 = alloca i1, i1 0
- %nop2692 = alloca i1, i1 0
- %nop2693 = alloca i1, i1 0
- %nop2694 = alloca i1, i1 0
- %nop2695 = alloca i1, i1 0
- %nop2696 = alloca i1, i1 0
- %nop2697 = alloca i1, i1 0
- %nop2698 = alloca i1, i1 0
- %nop2699 = alloca i1, i1 0
- %nop2700 = alloca i1, i1 0
- %nop2701 = alloca i1, i1 0
- %nop2702 = alloca i1, i1 0
- %nop2703 = alloca i1, i1 0
- %nop2704 = alloca i1, i1 0
- %nop2705 = alloca i1, i1 0
- %nop2706 = alloca i1, i1 0
- %nop2707 = alloca i1, i1 0
- %nop2708 = alloca i1, i1 0
- %nop2709 = alloca i1, i1 0
- %nop2710 = alloca i1, i1 0
- %nop2711 = alloca i1, i1 0
- %nop2712 = alloca i1, i1 0
- %nop2713 = alloca i1, i1 0
- %nop2714 = alloca i1, i1 0
- %nop2715 = alloca i1, i1 0
- %nop2716 = alloca i1, i1 0
- %nop2717 = alloca i1, i1 0
- %nop2718 = alloca i1, i1 0
- %nop2719 = alloca i1, i1 0
- %nop2720 = alloca i1, i1 0
- %nop2721 = alloca i1, i1 0
- %nop2722 = alloca i1, i1 0
- %nop2723 = alloca i1, i1 0
- %nop2724 = alloca i1, i1 0
- %nop2725 = alloca i1, i1 0
- %nop2726 = alloca i1, i1 0
- %nop2727 = alloca i1, i1 0
- %nop2728 = alloca i1, i1 0
- %nop2729 = alloca i1, i1 0
- %nop2730 = alloca i1, i1 0
- %nop2731 = alloca i1, i1 0
- %nop2732 = alloca i1, i1 0
- %nop2733 = alloca i1, i1 0
- %nop2734 = alloca i1, i1 0
- %nop2735 = alloca i1, i1 0
- %nop2736 = alloca i1, i1 0
- %nop2737 = alloca i1, i1 0
- %nop2738 = alloca i1, i1 0
- %nop2739 = alloca i1, i1 0
- %nop2740 = alloca i1, i1 0
- %nop2741 = alloca i1, i1 0
- %nop2742 = alloca i1, i1 0
- %nop2743 = alloca i1, i1 0
- %nop2744 = alloca i1, i1 0
- %nop2745 = alloca i1, i1 0
- %nop2746 = alloca i1, i1 0
- %nop2747 = alloca i1, i1 0
- %nop2748 = alloca i1, i1 0
- %nop2749 = alloca i1, i1 0
- %nop2750 = alloca i1, i1 0
- %nop2751 = alloca i1, i1 0
- %nop2752 = alloca i1, i1 0
- %nop2753 = alloca i1, i1 0
- %nop2754 = alloca i1, i1 0
- %nop2755 = alloca i1, i1 0
- %nop2756 = alloca i1, i1 0
- %nop2757 = alloca i1, i1 0
- %nop2758 = alloca i1, i1 0
- %nop2759 = alloca i1, i1 0
- %nop2760 = alloca i1, i1 0
- %nop2761 = alloca i1, i1 0
- %nop2762 = alloca i1, i1 0
- %nop2763 = alloca i1, i1 0
- %nop2764 = alloca i1, i1 0
- %nop2765 = alloca i1, i1 0
- %nop2766 = alloca i1, i1 0
- %nop2767 = alloca i1, i1 0
- %nop2768 = alloca i1, i1 0
- %nop2769 = alloca i1, i1 0
- %nop2770 = alloca i1, i1 0
- %nop2771 = alloca i1, i1 0
- %nop2772 = alloca i1, i1 0
- %nop2773 = alloca i1, i1 0
- %nop2774 = alloca i1, i1 0
- %nop2775 = alloca i1, i1 0
- %nop2776 = alloca i1, i1 0
- %nop2777 = alloca i1, i1 0
- %nop2778 = alloca i1, i1 0
- %nop2779 = alloca i1, i1 0
- %nop2780 = alloca i1, i1 0
- %nop2781 = alloca i1, i1 0
- %nop2782 = alloca i1, i1 0
- %nop2783 = alloca i1, i1 0
- %nop2784 = alloca i1, i1 0
- %nop2785 = alloca i1, i1 0
- %nop2786 = alloca i1, i1 0
- %nop2787 = alloca i1, i1 0
- %nop2788 = alloca i1, i1 0
- %nop2789 = alloca i1, i1 0
- %nop2790 = alloca i1, i1 0
- %nop2791 = alloca i1, i1 0
- %nop2792 = alloca i1, i1 0
- %nop2793 = alloca i1, i1 0
- %nop2794 = alloca i1, i1 0
- %nop2795 = alloca i1, i1 0
- %nop2796 = alloca i1, i1 0
- %nop2797 = alloca i1, i1 0
- %nop2798 = alloca i1, i1 0
- %nop2799 = alloca i1, i1 0
- %nop2800 = alloca i1, i1 0
- %nop2801 = alloca i1, i1 0
- %nop2802 = alloca i1, i1 0
- %nop2803 = alloca i1, i1 0
- %nop2804 = alloca i1, i1 0
- %nop2805 = alloca i1, i1 0
- %nop2806 = alloca i1, i1 0
- %nop2807 = alloca i1, i1 0
- %nop2808 = alloca i1, i1 0
- %nop2809 = alloca i1, i1 0
- %nop2810 = alloca i1, i1 0
- %nop2811 = alloca i1, i1 0
- %nop2812 = alloca i1, i1 0
- %nop2813 = alloca i1, i1 0
- %nop2814 = alloca i1, i1 0
- %nop2815 = alloca i1, i1 0
- %nop2816 = alloca i1, i1 0
- %nop2817 = alloca i1, i1 0
- %nop2818 = alloca i1, i1 0
- %nop2819 = alloca i1, i1 0
- %nop2820 = alloca i1, i1 0
- %nop2821 = alloca i1, i1 0
- %nop2822 = alloca i1, i1 0
- %nop2823 = alloca i1, i1 0
- %nop2824 = alloca i1, i1 0
- %nop2825 = alloca i1, i1 0
- %nop2826 = alloca i1, i1 0
- %nop2827 = alloca i1, i1 0
- %nop2828 = alloca i1, i1 0
- %nop2829 = alloca i1, i1 0
- %nop2830 = alloca i1, i1 0
- %nop2831 = alloca i1, i1 0
- %nop2832 = alloca i1, i1 0
- %nop2833 = alloca i1, i1 0
- %nop2834 = alloca i1, i1 0
- %nop2835 = alloca i1, i1 0
- %nop2836 = alloca i1, i1 0
- %nop2837 = alloca i1, i1 0
- %nop2838 = alloca i1, i1 0
- %nop2839 = alloca i1, i1 0
- %nop2840 = alloca i1, i1 0
- %nop2841 = alloca i1, i1 0
- %nop2842 = alloca i1, i1 0
- %nop2843 = alloca i1, i1 0
- %nop2844 = alloca i1, i1 0
- %nop2845 = alloca i1, i1 0
- %nop2846 = alloca i1, i1 0
- %nop2847 = alloca i1, i1 0
- %nop2848 = alloca i1, i1 0
- %nop2849 = alloca i1, i1 0
- %nop2850 = alloca i1, i1 0
- %nop2851 = alloca i1, i1 0
- %nop2852 = alloca i1, i1 0
- %nop2853 = alloca i1, i1 0
- %nop2854 = alloca i1, i1 0
- %nop2855 = alloca i1, i1 0
- %nop2856 = alloca i1, i1 0
- %nop2857 = alloca i1, i1 0
- %nop2858 = alloca i1, i1 0
- %nop2859 = alloca i1, i1 0
- %nop2860 = alloca i1, i1 0
- %nop2861 = alloca i1, i1 0
- %nop2862 = alloca i1, i1 0
- %nop2863 = alloca i1, i1 0
- %nop2864 = alloca i1, i1 0
- %nop2865 = alloca i1, i1 0
- %nop2866 = alloca i1, i1 0
- %nop2867 = alloca i1, i1 0
- %nop2868 = alloca i1, i1 0
- %nop2869 = alloca i1, i1 0
- %nop2870 = alloca i1, i1 0
- %nop2871 = alloca i1, i1 0
- %nop2872 = alloca i1, i1 0
- %nop2873 = alloca i1, i1 0
- %nop2874 = alloca i1, i1 0
- %nop2875 = alloca i1, i1 0
- %nop2876 = alloca i1, i1 0
- %nop2877 = alloca i1, i1 0
- %nop2878 = alloca i1, i1 0
- %nop2879 = alloca i1, i1 0
- %nop2880 = alloca i1, i1 0
- %nop2881 = alloca i1, i1 0
- %nop2882 = alloca i1, i1 0
- %nop2883 = alloca i1, i1 0
- %nop2884 = alloca i1, i1 0
- %nop2885 = alloca i1, i1 0
- %nop2886 = alloca i1, i1 0
- %nop2887 = alloca i1, i1 0
- %nop2888 = alloca i1, i1 0
- %nop2889 = alloca i1, i1 0
- %nop2890 = alloca i1, i1 0
- %nop2891 = alloca i1, i1 0
- %nop2892 = alloca i1, i1 0
- %nop2893 = alloca i1, i1 0
- %nop2894 = alloca i1, i1 0
- %nop2895 = alloca i1, i1 0
- %nop2896 = alloca i1, i1 0
- %nop2897 = alloca i1, i1 0
- %nop2898 = alloca i1, i1 0
- %nop2899 = alloca i1, i1 0
- %nop2900 = alloca i1, i1 0
- %nop2901 = alloca i1, i1 0
- %nop2902 = alloca i1, i1 0
- %nop2903 = alloca i1, i1 0
- %nop2904 = alloca i1, i1 0
- %nop2905 = alloca i1, i1 0
- %nop2906 = alloca i1, i1 0
- %nop2907 = alloca i1, i1 0
- %nop2908 = alloca i1, i1 0
- %nop2909 = alloca i1, i1 0
- %nop2910 = alloca i1, i1 0
- %nop2911 = alloca i1, i1 0
- %nop2912 = alloca i1, i1 0
- %nop2913 = alloca i1, i1 0
- %nop2914 = alloca i1, i1 0
- %nop2915 = alloca i1, i1 0
- %nop2916 = alloca i1, i1 0
- %nop2917 = alloca i1, i1 0
- %nop2918 = alloca i1, i1 0
- %nop2919 = alloca i1, i1 0
- %nop2920 = alloca i1, i1 0
- %nop2921 = alloca i1, i1 0
- %nop2922 = alloca i1, i1 0
- %nop2923 = alloca i1, i1 0
- %nop2924 = alloca i1, i1 0
- %nop2925 = alloca i1, i1 0
- %nop2926 = alloca i1, i1 0
- %nop2927 = alloca i1, i1 0
- %nop2928 = alloca i1, i1 0
- %nop2929 = alloca i1, i1 0
- %nop2930 = alloca i1, i1 0
- %nop2931 = alloca i1, i1 0
- %nop2932 = alloca i1, i1 0
- %nop2933 = alloca i1, i1 0
- %nop2934 = alloca i1, i1 0
- %nop2935 = alloca i1, i1 0
- %nop2936 = alloca i1, i1 0
- %nop2937 = alloca i1, i1 0
- %nop2938 = alloca i1, i1 0
- %nop2939 = alloca i1, i1 0
- %nop2940 = alloca i1, i1 0
- %nop2941 = alloca i1, i1 0
- %nop2942 = alloca i1, i1 0
- %nop2943 = alloca i1, i1 0
- %nop2944 = alloca i1, i1 0
- %nop2945 = alloca i1, i1 0
- %nop2946 = alloca i1, i1 0
- %nop2947 = alloca i1, i1 0
- %nop2948 = alloca i1, i1 0
- %nop2949 = alloca i1, i1 0
- %nop2950 = alloca i1, i1 0
- %nop2951 = alloca i1, i1 0
- %nop2952 = alloca i1, i1 0
- %nop2953 = alloca i1, i1 0
- %nop2954 = alloca i1, i1 0
- %nop2955 = alloca i1, i1 0
- %nop2956 = alloca i1, i1 0
- %nop2957 = alloca i1, i1 0
- %nop2958 = alloca i1, i1 0
- %nop2959 = alloca i1, i1 0
- %nop2960 = alloca i1, i1 0
- %nop2961 = alloca i1, i1 0
- %nop2962 = alloca i1, i1 0
- %nop2963 = alloca i1, i1 0
- %nop2964 = alloca i1, i1 0
- %nop2965 = alloca i1, i1 0
- %nop2966 = alloca i1, i1 0
- %nop2967 = alloca i1, i1 0
- %nop2968 = alloca i1, i1 0
- %nop2969 = alloca i1, i1 0
- %nop2970 = alloca i1, i1 0
- %nop2971 = alloca i1, i1 0
- %nop2972 = alloca i1, i1 0
- %nop2973 = alloca i1, i1 0
- %nop2974 = alloca i1, i1 0
- %nop2975 = alloca i1, i1 0
- %nop2976 = alloca i1, i1 0
- %nop2977 = alloca i1, i1 0
- %nop2978 = alloca i1, i1 0
- %nop2979 = alloca i1, i1 0
- %nop2980 = alloca i1, i1 0
- %nop2981 = alloca i1, i1 0
- %nop2982 = alloca i1, i1 0
- %nop2983 = alloca i1, i1 0
- %nop2984 = alloca i1, i1 0
- %nop2985 = alloca i1, i1 0
- %nop2986 = alloca i1, i1 0
- %nop2987 = alloca i1, i1 0
- %nop2988 = alloca i1, i1 0
- %nop2989 = alloca i1, i1 0
- %nop2990 = alloca i1, i1 0
- %nop2991 = alloca i1, i1 0
- %nop2992 = alloca i1, i1 0
- %nop2993 = alloca i1, i1 0
- %nop2994 = alloca i1, i1 0
- %nop2995 = alloca i1, i1 0
- %nop2996 = alloca i1, i1 0
- %nop2997 = alloca i1, i1 0
- %nop2998 = alloca i1, i1 0
- %nop2999 = alloca i1, i1 0
- %nop3000 = alloca i1, i1 0
- %nop3001 = alloca i1, i1 0
- %nop3002 = alloca i1, i1 0
- %nop3003 = alloca i1, i1 0
- %nop3004 = alloca i1, i1 0
- %nop3005 = alloca i1, i1 0
- %nop3006 = alloca i1, i1 0
- %nop3007 = alloca i1, i1 0
- %nop3008 = alloca i1, i1 0
- %nop3009 = alloca i1, i1 0
- %nop3010 = alloca i1, i1 0
- %nop3011 = alloca i1, i1 0
- %nop3012 = alloca i1, i1 0
- %nop3013 = alloca i1, i1 0
- %nop3014 = alloca i1, i1 0
- %nop3015 = alloca i1, i1 0
- %nop3016 = alloca i1, i1 0
- %nop3017 = alloca i1, i1 0
- %nop3018 = alloca i1, i1 0
- %nop3019 = alloca i1, i1 0
- %nop3020 = alloca i1, i1 0
- %nop3021 = alloca i1, i1 0
- %nop3022 = alloca i1, i1 0
- %nop3023 = alloca i1, i1 0
- %nop3024 = alloca i1, i1 0
- %nop3025 = alloca i1, i1 0
- %nop3026 = alloca i1, i1 0
- %nop3027 = alloca i1, i1 0
- %nop3028 = alloca i1, i1 0
- %nop3029 = alloca i1, i1 0
- %nop3030 = alloca i1, i1 0
- %nop3031 = alloca i1, i1 0
- %nop3032 = alloca i1, i1 0
- %nop3033 = alloca i1, i1 0
- %nop3034 = alloca i1, i1 0
- %nop3035 = alloca i1, i1 0
- %nop3036 = alloca i1, i1 0
- %nop3037 = alloca i1, i1 0
- %nop3038 = alloca i1, i1 0
- %nop3039 = alloca i1, i1 0
- %nop3040 = alloca i1, i1 0
- %nop3041 = alloca i1, i1 0
- %nop3042 = alloca i1, i1 0
- %nop3043 = alloca i1, i1 0
- %nop3044 = alloca i1, i1 0
- %nop3045 = alloca i1, i1 0
- %nop3046 = alloca i1, i1 0
- %nop3047 = alloca i1, i1 0
- %nop3048 = alloca i1, i1 0
- %nop3049 = alloca i1, i1 0
- %nop3050 = alloca i1, i1 0
- %nop3051 = alloca i1, i1 0
- %nop3052 = alloca i1, i1 0
- %nop3053 = alloca i1, i1 0
- %nop3054 = alloca i1, i1 0
- %nop3055 = alloca i1, i1 0
- %nop3056 = alloca i1, i1 0
- %nop3057 = alloca i1, i1 0
- %nop3058 = alloca i1, i1 0
- %nop3059 = alloca i1, i1 0
- %nop3060 = alloca i1, i1 0
- %nop3061 = alloca i1, i1 0
- %nop3062 = alloca i1, i1 0
- %nop3063 = alloca i1, i1 0
- %nop3064 = alloca i1, i1 0
- %nop3065 = alloca i1, i1 0
- %nop3066 = alloca i1, i1 0
- %nop3067 = alloca i1, i1 0
- %nop3068 = alloca i1, i1 0
- %nop3069 = alloca i1, i1 0
- %nop3070 = alloca i1, i1 0
- %nop3071 = alloca i1, i1 0
- %nop3072 = alloca i1, i1 0
- %nop3073 = alloca i1, i1 0
- %nop3074 = alloca i1, i1 0
- %nop3075 = alloca i1, i1 0
- %nop3076 = alloca i1, i1 0
- %nop3077 = alloca i1, i1 0
- %nop3078 = alloca i1, i1 0
- %nop3079 = alloca i1, i1 0
- %nop3080 = alloca i1, i1 0
- %nop3081 = alloca i1, i1 0
- %nop3082 = alloca i1, i1 0
- %nop3083 = alloca i1, i1 0
- %nop3084 = alloca i1, i1 0
- %nop3085 = alloca i1, i1 0
- %nop3086 = alloca i1, i1 0
- %nop3087 = alloca i1, i1 0
- %nop3088 = alloca i1, i1 0
- %nop3089 = alloca i1, i1 0
- %nop3090 = alloca i1, i1 0
- %nop3091 = alloca i1, i1 0
- %nop3092 = alloca i1, i1 0
- %nop3093 = alloca i1, i1 0
- %nop3094 = alloca i1, i1 0
- %nop3095 = alloca i1, i1 0
- %nop3096 = alloca i1, i1 0
- %nop3097 = alloca i1, i1 0
- %nop3098 = alloca i1, i1 0
- %nop3099 = alloca i1, i1 0
- %nop3100 = alloca i1, i1 0
- %nop3101 = alloca i1, i1 0
- %nop3102 = alloca i1, i1 0
- %nop3103 = alloca i1, i1 0
- %nop3104 = alloca i1, i1 0
- %nop3105 = alloca i1, i1 0
- %nop3106 = alloca i1, i1 0
- %nop3107 = alloca i1, i1 0
- %nop3108 = alloca i1, i1 0
- %nop3109 = alloca i1, i1 0
- %nop3110 = alloca i1, i1 0
- %nop3111 = alloca i1, i1 0
- %nop3112 = alloca i1, i1 0
- %nop3113 = alloca i1, i1 0
- %nop3114 = alloca i1, i1 0
- %nop3115 = alloca i1, i1 0
- %nop3116 = alloca i1, i1 0
- %nop3117 = alloca i1, i1 0
- %nop3118 = alloca i1, i1 0
- %nop3119 = alloca i1, i1 0
- %nop3120 = alloca i1, i1 0
- %nop3121 = alloca i1, i1 0
- %nop3122 = alloca i1, i1 0
- %nop3123 = alloca i1, i1 0
- %nop3124 = alloca i1, i1 0
- %nop3125 = alloca i1, i1 0
- %nop3126 = alloca i1, i1 0
- %nop3127 = alloca i1, i1 0
- %nop3128 = alloca i1, i1 0
- %nop3129 = alloca i1, i1 0
- %nop3130 = alloca i1, i1 0
- %nop3131 = alloca i1, i1 0
- %nop3132 = alloca i1, i1 0
- %nop3133 = alloca i1, i1 0
- %nop3134 = alloca i1, i1 0
- %nop3135 = alloca i1, i1 0
- %nop3136 = alloca i1, i1 0
- %nop3137 = alloca i1, i1 0
- %nop3138 = alloca i1, i1 0
- %nop3139 = alloca i1, i1 0
- %nop3140 = alloca i1, i1 0
- %nop3141 = alloca i1, i1 0
- %nop3142 = alloca i1, i1 0
- %nop3143 = alloca i1, i1 0
- %nop3144 = alloca i1, i1 0
- %nop3145 = alloca i1, i1 0
- %nop3146 = alloca i1, i1 0
- %nop3147 = alloca i1, i1 0
- %nop3148 = alloca i1, i1 0
- %nop3149 = alloca i1, i1 0
- %nop3150 = alloca i1, i1 0
- %nop3151 = alloca i1, i1 0
- %nop3152 = alloca i1, i1 0
- %nop3153 = alloca i1, i1 0
- %nop3154 = alloca i1, i1 0
- %nop3155 = alloca i1, i1 0
- %nop3156 = alloca i1, i1 0
- %nop3157 = alloca i1, i1 0
- %nop3158 = alloca i1, i1 0
- %nop3159 = alloca i1, i1 0
- %nop3160 = alloca i1, i1 0
- %nop3161 = alloca i1, i1 0
- %nop3162 = alloca i1, i1 0
- %nop3163 = alloca i1, i1 0
- %nop3164 = alloca i1, i1 0
- %nop3165 = alloca i1, i1 0
- %nop3166 = alloca i1, i1 0
- %nop3167 = alloca i1, i1 0
- %nop3168 = alloca i1, i1 0
- %nop3169 = alloca i1, i1 0
- %nop3170 = alloca i1, i1 0
- %nop3171 = alloca i1, i1 0
- %nop3172 = alloca i1, i1 0
- %nop3173 = alloca i1, i1 0
- %nop3174 = alloca i1, i1 0
- %nop3175 = alloca i1, i1 0
- %nop3176 = alloca i1, i1 0
- %nop3177 = alloca i1, i1 0
- %nop3178 = alloca i1, i1 0
- %nop3179 = alloca i1, i1 0
- %nop3180 = alloca i1, i1 0
- %nop3181 = alloca i1, i1 0
- %nop3182 = alloca i1, i1 0
- %nop3183 = alloca i1, i1 0
- %nop3184 = alloca i1, i1 0
- %nop3185 = alloca i1, i1 0
- %nop3186 = alloca i1, i1 0
- %nop3187 = alloca i1, i1 0
- %nop3188 = alloca i1, i1 0
- %nop3189 = alloca i1, i1 0
- %nop3190 = alloca i1, i1 0
- %nop3191 = alloca i1, i1 0
- %nop3192 = alloca i1, i1 0
- %nop3193 = alloca i1, i1 0
- %nop3194 = alloca i1, i1 0
- %nop3195 = alloca i1, i1 0
- %nop3196 = alloca i1, i1 0
- %nop3197 = alloca i1, i1 0
- %nop3198 = alloca i1, i1 0
- %nop3199 = alloca i1, i1 0
- %nop3200 = alloca i1, i1 0
- %nop3201 = alloca i1, i1 0
- %nop3202 = alloca i1, i1 0
- %nop3203 = alloca i1, i1 0
- %nop3204 = alloca i1, i1 0
- %nop3205 = alloca i1, i1 0
- %nop3206 = alloca i1, i1 0
- %nop3207 = alloca i1, i1 0
- %nop3208 = alloca i1, i1 0
- %nop3209 = alloca i1, i1 0
- %nop3210 = alloca i1, i1 0
- %nop3211 = alloca i1, i1 0
- %nop3212 = alloca i1, i1 0
- %nop3213 = alloca i1, i1 0
- %nop3214 = alloca i1, i1 0
- %nop3215 = alloca i1, i1 0
- %nop3216 = alloca i1, i1 0
- %nop3217 = alloca i1, i1 0
- %nop3218 = alloca i1, i1 0
- %nop3219 = alloca i1, i1 0
- %nop3220 = alloca i1, i1 0
- %nop3221 = alloca i1, i1 0
- %nop3222 = alloca i1, i1 0
- %nop3223 = alloca i1, i1 0
- %nop3224 = alloca i1, i1 0
- %nop3225 = alloca i1, i1 0
- %nop3226 = alloca i1, i1 0
- %nop3227 = alloca i1, i1 0
- %nop3228 = alloca i1, i1 0
- %nop3229 = alloca i1, i1 0
- %nop3230 = alloca i1, i1 0
- %nop3231 = alloca i1, i1 0
- %nop3232 = alloca i1, i1 0
- %nop3233 = alloca i1, i1 0
- %nop3234 = alloca i1, i1 0
- %nop3235 = alloca i1, i1 0
- %nop3236 = alloca i1, i1 0
- %nop3237 = alloca i1, i1 0
- %nop3238 = alloca i1, i1 0
- %nop3239 = alloca i1, i1 0
- %nop3240 = alloca i1, i1 0
- %nop3241 = alloca i1, i1 0
- %nop3242 = alloca i1, i1 0
- %nop3243 = alloca i1, i1 0
- %nop3244 = alloca i1, i1 0
- %nop3245 = alloca i1, i1 0
- %nop3246 = alloca i1, i1 0
- %nop3247 = alloca i1, i1 0
- %nop3248 = alloca i1, i1 0
- %nop3249 = alloca i1, i1 0
- %nop3250 = alloca i1, i1 0
- %nop3251 = alloca i1, i1 0
- %nop3252 = alloca i1, i1 0
- %nop3253 = alloca i1, i1 0
- %nop3254 = alloca i1, i1 0
- %nop3255 = alloca i1, i1 0
- %nop3256 = alloca i1, i1 0
- %nop3257 = alloca i1, i1 0
- %nop3258 = alloca i1, i1 0
- %nop3259 = alloca i1, i1 0
- %nop3260 = alloca i1, i1 0
- %nop3261 = alloca i1, i1 0
- %nop3262 = alloca i1, i1 0
- %nop3263 = alloca i1, i1 0
- %nop3264 = alloca i1, i1 0
- %nop3265 = alloca i1, i1 0
- %nop3266 = alloca i1, i1 0
- %nop3267 = alloca i1, i1 0
- %nop3268 = alloca i1, i1 0
- %nop3269 = alloca i1, i1 0
- %nop3270 = alloca i1, i1 0
- %nop3271 = alloca i1, i1 0
- %nop3272 = alloca i1, i1 0
- %nop3273 = alloca i1, i1 0
- %nop3274 = alloca i1, i1 0
- %nop3275 = alloca i1, i1 0
- %nop3276 = alloca i1, i1 0
- %nop3277 = alloca i1, i1 0
- %nop3278 = alloca i1, i1 0
- %nop3279 = alloca i1, i1 0
- %nop3280 = alloca i1, i1 0
- %nop3281 = alloca i1, i1 0
- %nop3282 = alloca i1, i1 0
- %nop3283 = alloca i1, i1 0
- %nop3284 = alloca i1, i1 0
- %nop3285 = alloca i1, i1 0
- %nop3286 = alloca i1, i1 0
- %nop3287 = alloca i1, i1 0
- %nop3288 = alloca i1, i1 0
- %nop3289 = alloca i1, i1 0
- %nop3290 = alloca i1, i1 0
- %nop3291 = alloca i1, i1 0
- %nop3292 = alloca i1, i1 0
- %nop3293 = alloca i1, i1 0
- %nop3294 = alloca i1, i1 0
- %nop3295 = alloca i1, i1 0
- %nop3296 = alloca i1, i1 0
- %nop3297 = alloca i1, i1 0
- %nop3298 = alloca i1, i1 0
- %nop3299 = alloca i1, i1 0
- %nop3300 = alloca i1, i1 0
- %nop3301 = alloca i1, i1 0
- %nop3302 = alloca i1, i1 0
- %nop3303 = alloca i1, i1 0
- %nop3304 = alloca i1, i1 0
- %nop3305 = alloca i1, i1 0
- %nop3306 = alloca i1, i1 0
- %nop3307 = alloca i1, i1 0
- %nop3308 = alloca i1, i1 0
- %nop3309 = alloca i1, i1 0
- %nop3310 = alloca i1, i1 0
- %nop3311 = alloca i1, i1 0
- %nop3312 = alloca i1, i1 0
- %nop3313 = alloca i1, i1 0
- %nop3314 = alloca i1, i1 0
- %nop3315 = alloca i1, i1 0
- %nop3316 = alloca i1, i1 0
- %nop3317 = alloca i1, i1 0
- %nop3318 = alloca i1, i1 0
- %nop3319 = alloca i1, i1 0
- %nop3320 = alloca i1, i1 0
- %nop3321 = alloca i1, i1 0
- %nop3322 = alloca i1, i1 0
- %nop3323 = alloca i1, i1 0
- %nop3324 = alloca i1, i1 0
- %nop3325 = alloca i1, i1 0
- %nop3326 = alloca i1, i1 0
- %nop3327 = alloca i1, i1 0
- %nop3328 = alloca i1, i1 0
- %nop3329 = alloca i1, i1 0
- %nop3330 = alloca i1, i1 0
- %nop3331 = alloca i1, i1 0
- %nop3332 = alloca i1, i1 0
- %nop3333 = alloca i1, i1 0
- %nop3334 = alloca i1, i1 0
- %nop3335 = alloca i1, i1 0
- %nop3336 = alloca i1, i1 0
- %nop3337 = alloca i1, i1 0
- %nop3338 = alloca i1, i1 0
- %nop3339 = alloca i1, i1 0
- %nop3340 = alloca i1, i1 0
- %nop3341 = alloca i1, i1 0
- %nop3342 = alloca i1, i1 0
- %nop3343 = alloca i1, i1 0
- %nop3344 = alloca i1, i1 0
- %nop3345 = alloca i1, i1 0
- %nop3346 = alloca i1, i1 0
- %nop3347 = alloca i1, i1 0
- %nop3348 = alloca i1, i1 0
- %nop3349 = alloca i1, i1 0
- %nop3350 = alloca i1, i1 0
- %nop3351 = alloca i1, i1 0
- %nop3352 = alloca i1, i1 0
- %nop3353 = alloca i1, i1 0
- %nop3354 = alloca i1, i1 0
- %nop3355 = alloca i1, i1 0
- %nop3356 = alloca i1, i1 0
- %nop3357 = alloca i1, i1 0
- %nop3358 = alloca i1, i1 0
- %nop3359 = alloca i1, i1 0
- %nop3360 = alloca i1, i1 0
- %nop3361 = alloca i1, i1 0
- %nop3362 = alloca i1, i1 0
- %nop3363 = alloca i1, i1 0
- %nop3364 = alloca i1, i1 0
- %nop3365 = alloca i1, i1 0
- %nop3366 = alloca i1, i1 0
- %nop3367 = alloca i1, i1 0
- %nop3368 = alloca i1, i1 0
- %nop3369 = alloca i1, i1 0
- %nop3370 = alloca i1, i1 0
- %nop3371 = alloca i1, i1 0
- %nop3372 = alloca i1, i1 0
- %nop3373 = alloca i1, i1 0
- %nop3374 = alloca i1, i1 0
- %nop3375 = alloca i1, i1 0
- %nop3376 = alloca i1, i1 0
- %nop3377 = alloca i1, i1 0
- %nop3378 = alloca i1, i1 0
- %nop3379 = alloca i1, i1 0
- %nop3380 = alloca i1, i1 0
- %nop3381 = alloca i1, i1 0
- %nop3382 = alloca i1, i1 0
- %nop3383 = alloca i1, i1 0
- %nop3384 = alloca i1, i1 0
- %nop3385 = alloca i1, i1 0
- %nop3386 = alloca i1, i1 0
- %nop3387 = alloca i1, i1 0
- %nop3388 = alloca i1, i1 0
- %nop3389 = alloca i1, i1 0
- %nop3390 = alloca i1, i1 0
- %nop3391 = alloca i1, i1 0
- %nop3392 = alloca i1, i1 0
- %nop3393 = alloca i1, i1 0
- %nop3394 = alloca i1, i1 0
- %nop3395 = alloca i1, i1 0
- %nop3396 = alloca i1, i1 0
- %nop3397 = alloca i1, i1 0
- %nop3398 = alloca i1, i1 0
- %nop3399 = alloca i1, i1 0
- %nop3400 = alloca i1, i1 0
- %nop3401 = alloca i1, i1 0
- %nop3402 = alloca i1, i1 0
- %nop3403 = alloca i1, i1 0
- %nop3404 = alloca i1, i1 0
- %nop3405 = alloca i1, i1 0
- %nop3406 = alloca i1, i1 0
- %nop3407 = alloca i1, i1 0
- %nop3408 = alloca i1, i1 0
- %nop3409 = alloca i1, i1 0
- %nop3410 = alloca i1, i1 0
- %nop3411 = alloca i1, i1 0
- %nop3412 = alloca i1, i1 0
- %nop3413 = alloca i1, i1 0
- %nop3414 = alloca i1, i1 0
- %nop3415 = alloca i1, i1 0
- %nop3416 = alloca i1, i1 0
- %nop3417 = alloca i1, i1 0
- %nop3418 = alloca i1, i1 0
- %nop3419 = alloca i1, i1 0
- %nop3420 = alloca i1, i1 0
- %nop3421 = alloca i1, i1 0
- %nop3422 = alloca i1, i1 0
- %nop3423 = alloca i1, i1 0
- %nop3424 = alloca i1, i1 0
- %nop3425 = alloca i1, i1 0
- %nop3426 = alloca i1, i1 0
- %nop3427 = alloca i1, i1 0
- %nop3428 = alloca i1, i1 0
- %nop3429 = alloca i1, i1 0
- %nop3430 = alloca i1, i1 0
- %nop3431 = alloca i1, i1 0
- %nop3432 = alloca i1, i1 0
- %nop3433 = alloca i1, i1 0
- %nop3434 = alloca i1, i1 0
- %nop3435 = alloca i1, i1 0
- %nop3436 = alloca i1, i1 0
- %nop3437 = alloca i1, i1 0
- %nop3438 = alloca i1, i1 0
- %nop3439 = alloca i1, i1 0
- %nop3440 = alloca i1, i1 0
- %nop3441 = alloca i1, i1 0
- %nop3442 = alloca i1, i1 0
- %nop3443 = alloca i1, i1 0
- %nop3444 = alloca i1, i1 0
- %nop3445 = alloca i1, i1 0
- %nop3446 = alloca i1, i1 0
- %nop3447 = alloca i1, i1 0
- %nop3448 = alloca i1, i1 0
- %nop3449 = alloca i1, i1 0
- %nop3450 = alloca i1, i1 0
- %nop3451 = alloca i1, i1 0
- %nop3452 = alloca i1, i1 0
- %nop3453 = alloca i1, i1 0
- %nop3454 = alloca i1, i1 0
- %nop3455 = alloca i1, i1 0
- %nop3456 = alloca i1, i1 0
- %nop3457 = alloca i1, i1 0
- %nop3458 = alloca i1, i1 0
- %nop3459 = alloca i1, i1 0
- %nop3460 = alloca i1, i1 0
- %nop3461 = alloca i1, i1 0
- %nop3462 = alloca i1, i1 0
- %nop3463 = alloca i1, i1 0
- %nop3464 = alloca i1, i1 0
- %nop3465 = alloca i1, i1 0
- %nop3466 = alloca i1, i1 0
- %nop3467 = alloca i1, i1 0
- %nop3468 = alloca i1, i1 0
- %nop3469 = alloca i1, i1 0
- %nop3470 = alloca i1, i1 0
- %nop3471 = alloca i1, i1 0
- %nop3472 = alloca i1, i1 0
- %nop3473 = alloca i1, i1 0
- %nop3474 = alloca i1, i1 0
- %nop3475 = alloca i1, i1 0
- %nop3476 = alloca i1, i1 0
- %nop3477 = alloca i1, i1 0
- %nop3478 = alloca i1, i1 0
- %nop3479 = alloca i1, i1 0
- %nop3480 = alloca i1, i1 0
- %nop3481 = alloca i1, i1 0
- %nop3482 = alloca i1, i1 0
- %nop3483 = alloca i1, i1 0
- %nop3484 = alloca i1, i1 0
- %nop3485 = alloca i1, i1 0
- %nop3486 = alloca i1, i1 0
- %nop3487 = alloca i1, i1 0
- %nop3488 = alloca i1, i1 0
- %nop3489 = alloca i1, i1 0
- %nop3490 = alloca i1, i1 0
- %nop3491 = alloca i1, i1 0
- %nop3492 = alloca i1, i1 0
- %nop3493 = alloca i1, i1 0
- %nop3494 = alloca i1, i1 0
- %nop3495 = alloca i1, i1 0
- %nop3496 = alloca i1, i1 0
- %nop3497 = alloca i1, i1 0
- %nop3498 = alloca i1, i1 0
- %nop3499 = alloca i1, i1 0
- %nop3500 = alloca i1, i1 0
- %nop3501 = alloca i1, i1 0
- %nop3502 = alloca i1, i1 0
- %nop3503 = alloca i1, i1 0
- %nop3504 = alloca i1, i1 0
- %nop3505 = alloca i1, i1 0
- %nop3506 = alloca i1, i1 0
- %nop3507 = alloca i1, i1 0
- %nop3508 = alloca i1, i1 0
- %nop3509 = alloca i1, i1 0
- %nop3510 = alloca i1, i1 0
- %nop3511 = alloca i1, i1 0
- %nop3512 = alloca i1, i1 0
- %nop3513 = alloca i1, i1 0
- %nop3514 = alloca i1, i1 0
- %nop3515 = alloca i1, i1 0
- %nop3516 = alloca i1, i1 0
- %nop3517 = alloca i1, i1 0
- %nop3518 = alloca i1, i1 0
- %nop3519 = alloca i1, i1 0
- %nop3520 = alloca i1, i1 0
- %nop3521 = alloca i1, i1 0
- %nop3522 = alloca i1, i1 0
- %nop3523 = alloca i1, i1 0
- %nop3524 = alloca i1, i1 0
- %nop3525 = alloca i1, i1 0
- %nop3526 = alloca i1, i1 0
- %nop3527 = alloca i1, i1 0
- %nop3528 = alloca i1, i1 0
- %nop3529 = alloca i1, i1 0
- %nop3530 = alloca i1, i1 0
- %nop3531 = alloca i1, i1 0
- %nop3532 = alloca i1, i1 0
- %nop3533 = alloca i1, i1 0
- %nop3534 = alloca i1, i1 0
- %nop3535 = alloca i1, i1 0
- %nop3536 = alloca i1, i1 0
- %nop3537 = alloca i1, i1 0
- %nop3538 = alloca i1, i1 0
- %nop3539 = alloca i1, i1 0
- %nop3540 = alloca i1, i1 0
- %nop3541 = alloca i1, i1 0
- %nop3542 = alloca i1, i1 0
- %nop3543 = alloca i1, i1 0
- %nop3544 = alloca i1, i1 0
- %nop3545 = alloca i1, i1 0
- %nop3546 = alloca i1, i1 0
- %nop3547 = alloca i1, i1 0
- %nop3548 = alloca i1, i1 0
- %nop3549 = alloca i1, i1 0
- %nop3550 = alloca i1, i1 0
- %nop3551 = alloca i1, i1 0
- %nop3552 = alloca i1, i1 0
- %nop3553 = alloca i1, i1 0
- %nop3554 = alloca i1, i1 0
- %nop3555 = alloca i1, i1 0
- %nop3556 = alloca i1, i1 0
- %nop3557 = alloca i1, i1 0
- %nop3558 = alloca i1, i1 0
- %nop3559 = alloca i1, i1 0
- %nop3560 = alloca i1, i1 0
- %nop3561 = alloca i1, i1 0
- %nop3562 = alloca i1, i1 0
- %nop3563 = alloca i1, i1 0
- %nop3564 = alloca i1, i1 0
- %nop3565 = alloca i1, i1 0
- %nop3566 = alloca i1, i1 0
- %nop3567 = alloca i1, i1 0
- %nop3568 = alloca i1, i1 0
- %nop3569 = alloca i1, i1 0
- %nop3570 = alloca i1, i1 0
- %nop3571 = alloca i1, i1 0
- %nop3572 = alloca i1, i1 0
- %nop3573 = alloca i1, i1 0
- %nop3574 = alloca i1, i1 0
- %nop3575 = alloca i1, i1 0
- %nop3576 = alloca i1, i1 0
- %nop3577 = alloca i1, i1 0
- %nop3578 = alloca i1, i1 0
- %nop3579 = alloca i1, i1 0
- %nop3580 = alloca i1, i1 0
- %nop3581 = alloca i1, i1 0
- %nop3582 = alloca i1, i1 0
- %nop3583 = alloca i1, i1 0
- %nop3584 = alloca i1, i1 0
- %nop3585 = alloca i1, i1 0
- %nop3586 = alloca i1, i1 0
- %nop3587 = alloca i1, i1 0
- %nop3588 = alloca i1, i1 0
- %nop3589 = alloca i1, i1 0
- %nop3590 = alloca i1, i1 0
- %nop3591 = alloca i1, i1 0
- %nop3592 = alloca i1, i1 0
- %nop3593 = alloca i1, i1 0
- %nop3594 = alloca i1, i1 0
- %nop3595 = alloca i1, i1 0
- %nop3596 = alloca i1, i1 0
- %nop3597 = alloca i1, i1 0
- %nop3598 = alloca i1, i1 0
- %nop3599 = alloca i1, i1 0
- %nop3600 = alloca i1, i1 0
- %nop3601 = alloca i1, i1 0
- %nop3602 = alloca i1, i1 0
- %nop3603 = alloca i1, i1 0
- %nop3604 = alloca i1, i1 0
- %nop3605 = alloca i1, i1 0
- %nop3606 = alloca i1, i1 0
- %nop3607 = alloca i1, i1 0
- %nop3608 = alloca i1, i1 0
- %nop3609 = alloca i1, i1 0
- %nop3610 = alloca i1, i1 0
- %nop3611 = alloca i1, i1 0
- %nop3612 = alloca i1, i1 0
- %nop3613 = alloca i1, i1 0
- %nop3614 = alloca i1, i1 0
- %nop3615 = alloca i1, i1 0
- %nop3616 = alloca i1, i1 0
- %nop3617 = alloca i1, i1 0
- %nop3618 = alloca i1, i1 0
- %nop3619 = alloca i1, i1 0
- %nop3620 = alloca i1, i1 0
- %nop3621 = alloca i1, i1 0
- %nop3622 = alloca i1, i1 0
- %nop3623 = alloca i1, i1 0
- %nop3624 = alloca i1, i1 0
- %nop3625 = alloca i1, i1 0
- %nop3626 = alloca i1, i1 0
- %nop3627 = alloca i1, i1 0
- %nop3628 = alloca i1, i1 0
- %nop3629 = alloca i1, i1 0
- %nop3630 = alloca i1, i1 0
- %nop3631 = alloca i1, i1 0
- %nop3632 = alloca i1, i1 0
- %nop3633 = alloca i1, i1 0
- %nop3634 = alloca i1, i1 0
- %nop3635 = alloca i1, i1 0
- %nop3636 = alloca i1, i1 0
- %nop3637 = alloca i1, i1 0
- %nop3638 = alloca i1, i1 0
- %nop3639 = alloca i1, i1 0
- %nop3640 = alloca i1, i1 0
- %nop3641 = alloca i1, i1 0
- %nop3642 = alloca i1, i1 0
- %nop3643 = alloca i1, i1 0
- %nop3644 = alloca i1, i1 0
- %nop3645 = alloca i1, i1 0
- %nop3646 = alloca i1, i1 0
- %nop3647 = alloca i1, i1 0
- %nop3648 = alloca i1, i1 0
- %nop3649 = alloca i1, i1 0
- %nop3650 = alloca i1, i1 0
- %nop3651 = alloca i1, i1 0
- %nop3652 = alloca i1, i1 0
- %nop3653 = alloca i1, i1 0
- %nop3654 = alloca i1, i1 0
- %nop3655 = alloca i1, i1 0
- %nop3656 = alloca i1, i1 0
- %nop3657 = alloca i1, i1 0
- %nop3658 = alloca i1, i1 0
- %nop3659 = alloca i1, i1 0
- %nop3660 = alloca i1, i1 0
- %nop3661 = alloca i1, i1 0
- %nop3662 = alloca i1, i1 0
- %nop3663 = alloca i1, i1 0
- %nop3664 = alloca i1, i1 0
- %nop3665 = alloca i1, i1 0
- %nop3666 = alloca i1, i1 0
- %nop3667 = alloca i1, i1 0
- %nop3668 = alloca i1, i1 0
- %nop3669 = alloca i1, i1 0
- %nop3670 = alloca i1, i1 0
- %nop3671 = alloca i1, i1 0
- %nop3672 = alloca i1, i1 0
- %nop3673 = alloca i1, i1 0
- %nop3674 = alloca i1, i1 0
- %nop3675 = alloca i1, i1 0
- %nop3676 = alloca i1, i1 0
- %nop3677 = alloca i1, i1 0
- %nop3678 = alloca i1, i1 0
- %nop3679 = alloca i1, i1 0
- %nop3680 = alloca i1, i1 0
- %nop3681 = alloca i1, i1 0
- %nop3682 = alloca i1, i1 0
- %nop3683 = alloca i1, i1 0
- %nop3684 = alloca i1, i1 0
- %nop3685 = alloca i1, i1 0
- %nop3686 = alloca i1, i1 0
- %nop3687 = alloca i1, i1 0
- %nop3688 = alloca i1, i1 0
- %nop3689 = alloca i1, i1 0
- %nop3690 = alloca i1, i1 0
- %nop3691 = alloca i1, i1 0
- %nop3692 = alloca i1, i1 0
- %nop3693 = alloca i1, i1 0
- %nop3694 = alloca i1, i1 0
- %nop3695 = alloca i1, i1 0
- %nop3696 = alloca i1, i1 0
- %nop3697 = alloca i1, i1 0
- %nop3698 = alloca i1, i1 0
- %nop3699 = alloca i1, i1 0
- %nop3700 = alloca i1, i1 0
- %nop3701 = alloca i1, i1 0
- %nop3702 = alloca i1, i1 0
- %nop3703 = alloca i1, i1 0
- %nop3704 = alloca i1, i1 0
- %nop3705 = alloca i1, i1 0
- %nop3706 = alloca i1, i1 0
- %nop3707 = alloca i1, i1 0
- %nop3708 = alloca i1, i1 0
- %nop3709 = alloca i1, i1 0
- %nop3710 = alloca i1, i1 0
- %nop3711 = alloca i1, i1 0
- %nop3712 = alloca i1, i1 0
- %nop3713 = alloca i1, i1 0
- %nop3714 = alloca i1, i1 0
- %nop3715 = alloca i1, i1 0
- %nop3716 = alloca i1, i1 0
- %nop3717 = alloca i1, i1 0
- %nop3718 = alloca i1, i1 0
- %nop3719 = alloca i1, i1 0
- %nop3720 = alloca i1, i1 0
- %nop3721 = alloca i1, i1 0
- %nop3722 = alloca i1, i1 0
- %nop3723 = alloca i1, i1 0
- %nop3724 = alloca i1, i1 0
- %nop3725 = alloca i1, i1 0
- %nop3726 = alloca i1, i1 0
- %nop3727 = alloca i1, i1 0
- %nop3728 = alloca i1, i1 0
- %nop3729 = alloca i1, i1 0
- %nop3730 = alloca i1, i1 0
- %nop3731 = alloca i1, i1 0
- %nop3732 = alloca i1, i1 0
- %nop3733 = alloca i1, i1 0
- %nop3734 = alloca i1, i1 0
- %nop3735 = alloca i1, i1 0
- %nop3736 = alloca i1, i1 0
- %nop3737 = alloca i1, i1 0
- %nop3738 = alloca i1, i1 0
- %nop3739 = alloca i1, i1 0
- %nop3740 = alloca i1, i1 0
- %nop3741 = alloca i1, i1 0
- %nop3742 = alloca i1, i1 0
- %nop3743 = alloca i1, i1 0
- %nop3744 = alloca i1, i1 0
- %nop3745 = alloca i1, i1 0
- %nop3746 = alloca i1, i1 0
- %nop3747 = alloca i1, i1 0
- %nop3748 = alloca i1, i1 0
- %nop3749 = alloca i1, i1 0
- %nop3750 = alloca i1, i1 0
- %nop3751 = alloca i1, i1 0
- %nop3752 = alloca i1, i1 0
- %nop3753 = alloca i1, i1 0
- %nop3754 = alloca i1, i1 0
- %nop3755 = alloca i1, i1 0
- %nop3756 = alloca i1, i1 0
- %nop3757 = alloca i1, i1 0
- %nop3758 = alloca i1, i1 0
- %nop3759 = alloca i1, i1 0
- %nop3760 = alloca i1, i1 0
- %nop3761 = alloca i1, i1 0
- %nop3762 = alloca i1, i1 0
- %nop3763 = alloca i1, i1 0
- %nop3764 = alloca i1, i1 0
- %nop3765 = alloca i1, i1 0
- %nop3766 = alloca i1, i1 0
- %nop3767 = alloca i1, i1 0
- %nop3768 = alloca i1, i1 0
- %nop3769 = alloca i1, i1 0
- %nop3770 = alloca i1, i1 0
- %nop3771 = alloca i1, i1 0
- %nop3772 = alloca i1, i1 0
- %nop3773 = alloca i1, i1 0
- %nop3774 = alloca i1, i1 0
- %nop3775 = alloca i1, i1 0
- %nop3776 = alloca i1, i1 0
- %nop3777 = alloca i1, i1 0
- %nop3778 = alloca i1, i1 0
- %nop3779 = alloca i1, i1 0
- %nop3780 = alloca i1, i1 0
- %nop3781 = alloca i1, i1 0
- %nop3782 = alloca i1, i1 0
- %nop3783 = alloca i1, i1 0
- %nop3784 = alloca i1, i1 0
- %nop3785 = alloca i1, i1 0
- %nop3786 = alloca i1, i1 0
- %nop3787 = alloca i1, i1 0
- %nop3788 = alloca i1, i1 0
- %nop3789 = alloca i1, i1 0
- %nop3790 = alloca i1, i1 0
- %nop3791 = alloca i1, i1 0
- %nop3792 = alloca i1, i1 0
- %nop3793 = alloca i1, i1 0
- %nop3794 = alloca i1, i1 0
- %nop3795 = alloca i1, i1 0
- %nop3796 = alloca i1, i1 0
- %nop3797 = alloca i1, i1 0
- %nop3798 = alloca i1, i1 0
- %nop3799 = alloca i1, i1 0
- %nop3800 = alloca i1, i1 0
- %nop3801 = alloca i1, i1 0
- %nop3802 = alloca i1, i1 0
- %nop3803 = alloca i1, i1 0
- %nop3804 = alloca i1, i1 0
- %nop3805 = alloca i1, i1 0
- %nop3806 = alloca i1, i1 0
- %nop3807 = alloca i1, i1 0
- %nop3808 = alloca i1, i1 0
- %nop3809 = alloca i1, i1 0
- %nop3810 = alloca i1, i1 0
- %nop3811 = alloca i1, i1 0
- %nop3812 = alloca i1, i1 0
- %nop3813 = alloca i1, i1 0
- %nop3814 = alloca i1, i1 0
- %nop3815 = alloca i1, i1 0
- %nop3816 = alloca i1, i1 0
- %nop3817 = alloca i1, i1 0
- %nop3818 = alloca i1, i1 0
- %nop3819 = alloca i1, i1 0
- %nop3820 = alloca i1, i1 0
- %nop3821 = alloca i1, i1 0
- %nop3822 = alloca i1, i1 0
- %nop3823 = alloca i1, i1 0
- %nop3824 = alloca i1, i1 0
- %nop3825 = alloca i1, i1 0
- %nop3826 = alloca i1, i1 0
- %nop3827 = alloca i1, i1 0
- %nop3828 = alloca i1, i1 0
- %nop3829 = alloca i1, i1 0
- %nop3830 = alloca i1, i1 0
- %nop3831 = alloca i1, i1 0
- %nop3832 = alloca i1, i1 0
- %nop3833 = alloca i1, i1 0
- %nop3834 = alloca i1, i1 0
- %nop3835 = alloca i1, i1 0
- %nop3836 = alloca i1, i1 0
- %nop3837 = alloca i1, i1 0
- %nop3838 = alloca i1, i1 0
- %nop3839 = alloca i1, i1 0
- %nop3840 = alloca i1, i1 0
- %nop3841 = alloca i1, i1 0
- %nop3842 = alloca i1, i1 0
- %nop3843 = alloca i1, i1 0
- %nop3844 = alloca i1, i1 0
- %nop3845 = alloca i1, i1 0
- %nop3846 = alloca i1, i1 0
- %nop3847 = alloca i1, i1 0
- %nop3848 = alloca i1, i1 0
- %nop3849 = alloca i1, i1 0
- %nop3850 = alloca i1, i1 0
- %nop3851 = alloca i1, i1 0
- %nop3852 = alloca i1, i1 0
- %nop3853 = alloca i1, i1 0
- %nop3854 = alloca i1, i1 0
- %nop3855 = alloca i1, i1 0
- %nop3856 = alloca i1, i1 0
- %nop3857 = alloca i1, i1 0
- %nop3858 = alloca i1, i1 0
- %nop3859 = alloca i1, i1 0
- %nop3860 = alloca i1, i1 0
- %nop3861 = alloca i1, i1 0
- %nop3862 = alloca i1, i1 0
- %nop3863 = alloca i1, i1 0
- %nop3864 = alloca i1, i1 0
- %nop3865 = alloca i1, i1 0
- %nop3866 = alloca i1, i1 0
- %nop3867 = alloca i1, i1 0
- %nop3868 = alloca i1, i1 0
- %nop3869 = alloca i1, i1 0
- %nop3870 = alloca i1, i1 0
- %nop3871 = alloca i1, i1 0
- %nop3872 = alloca i1, i1 0
- %nop3873 = alloca i1, i1 0
- %nop3874 = alloca i1, i1 0
- %nop3875 = alloca i1, i1 0
- %nop3876 = alloca i1, i1 0
- %nop3877 = alloca i1, i1 0
- %nop3878 = alloca i1, i1 0
- %nop3879 = alloca i1, i1 0
- %nop3880 = alloca i1, i1 0
- %nop3881 = alloca i1, i1 0
- %nop3882 = alloca i1, i1 0
- %nop3883 = alloca i1, i1 0
- %nop3884 = alloca i1, i1 0
- %nop3885 = alloca i1, i1 0
- %nop3886 = alloca i1, i1 0
- %nop3887 = alloca i1, i1 0
- %nop3888 = alloca i1, i1 0
- %nop3889 = alloca i1, i1 0
- %nop3890 = alloca i1, i1 0
- %nop3891 = alloca i1, i1 0
- %nop3892 = alloca i1, i1 0
- %nop3893 = alloca i1, i1 0
- %nop3894 = alloca i1, i1 0
- %nop3895 = alloca i1, i1 0
- %nop3896 = alloca i1, i1 0
- %nop3897 = alloca i1, i1 0
- %nop3898 = alloca i1, i1 0
- %nop3899 = alloca i1, i1 0
- %nop3900 = alloca i1, i1 0
- %nop3901 = alloca i1, i1 0
- %nop3902 = alloca i1, i1 0
- %nop3903 = alloca i1, i1 0
- %nop3904 = alloca i1, i1 0
- %nop3905 = alloca i1, i1 0
- %nop3906 = alloca i1, i1 0
- %nop3907 = alloca i1, i1 0
- %nop3908 = alloca i1, i1 0
- %nop3909 = alloca i1, i1 0
- %nop3910 = alloca i1, i1 0
- %nop3911 = alloca i1, i1 0
- %nop3912 = alloca i1, i1 0
- %nop3913 = alloca i1, i1 0
- %nop3914 = alloca i1, i1 0
- %nop3915 = alloca i1, i1 0
- %nop3916 = alloca i1, i1 0
- %nop3917 = alloca i1, i1 0
- %nop3918 = alloca i1, i1 0
- %nop3919 = alloca i1, i1 0
- %nop3920 = alloca i1, i1 0
- %nop3921 = alloca i1, i1 0
- %nop3922 = alloca i1, i1 0
- %nop3923 = alloca i1, i1 0
- %nop3924 = alloca i1, i1 0
- %nop3925 = alloca i1, i1 0
- %nop3926 = alloca i1, i1 0
- %nop3927 = alloca i1, i1 0
- %nop3928 = alloca i1, i1 0
- %nop3929 = alloca i1, i1 0
- %nop3930 = alloca i1, i1 0
- %nop3931 = alloca i1, i1 0
- %nop3932 = alloca i1, i1 0
- %nop3933 = alloca i1, i1 0
- %nop3934 = alloca i1, i1 0
- %nop3935 = alloca i1, i1 0
- %nop3936 = alloca i1, i1 0
- %nop3937 = alloca i1, i1 0
- %nop3938 = alloca i1, i1 0
- %nop3939 = alloca i1, i1 0
- %nop3940 = alloca i1, i1 0
- %nop3941 = alloca i1, i1 0
- %nop3942 = alloca i1, i1 0
- %nop3943 = alloca i1, i1 0
- %nop3944 = alloca i1, i1 0
- %nop3945 = alloca i1, i1 0
- %nop3946 = alloca i1, i1 0
- %nop3947 = alloca i1, i1 0
- %nop3948 = alloca i1, i1 0
- %nop3949 = alloca i1, i1 0
- %nop3950 = alloca i1, i1 0
- %nop3951 = alloca i1, i1 0
- %nop3952 = alloca i1, i1 0
- %nop3953 = alloca i1, i1 0
- %nop3954 = alloca i1, i1 0
- %nop3955 = alloca i1, i1 0
- %nop3956 = alloca i1, i1 0
- %nop3957 = alloca i1, i1 0
- %nop3958 = alloca i1, i1 0
- %nop3959 = alloca i1, i1 0
- %nop3960 = alloca i1, i1 0
- %nop3961 = alloca i1, i1 0
- %nop3962 = alloca i1, i1 0
- %nop3963 = alloca i1, i1 0
- %nop3964 = alloca i1, i1 0
- %nop3965 = alloca i1, i1 0
- %nop3966 = alloca i1, i1 0
- %nop3967 = alloca i1, i1 0
- %nop3968 = alloca i1, i1 0
- %nop3969 = alloca i1, i1 0
- %nop3970 = alloca i1, i1 0
- %nop3971 = alloca i1, i1 0
- %nop3972 = alloca i1, i1 0
- %nop3973 = alloca i1, i1 0
- %nop3974 = alloca i1, i1 0
- %nop3975 = alloca i1, i1 0
- %nop3976 = alloca i1, i1 0
- %nop3977 = alloca i1, i1 0
- %nop3978 = alloca i1, i1 0
- %nop3979 = alloca i1, i1 0
- %nop3980 = alloca i1, i1 0
- %nop3981 = alloca i1, i1 0
- %nop3982 = alloca i1, i1 0
- %nop3983 = alloca i1, i1 0
- %nop3984 = alloca i1, i1 0
- %nop3985 = alloca i1, i1 0
- %nop3986 = alloca i1, i1 0
- %nop3987 = alloca i1, i1 0
- %nop3988 = alloca i1, i1 0
- %nop3989 = alloca i1, i1 0
- %nop3990 = alloca i1, i1 0
- %nop3991 = alloca i1, i1 0
- %nop3992 = alloca i1, i1 0
- %nop3993 = alloca i1, i1 0
- %nop3994 = alloca i1, i1 0
- %nop3995 = alloca i1, i1 0
- %nop3996 = alloca i1, i1 0
- %nop3997 = alloca i1, i1 0
- %nop3998 = alloca i1, i1 0
- %nop3999 = alloca i1, i1 0
- %nop4000 = alloca i1, i1 0
- %nop4001 = alloca i1, i1 0
- %nop4002 = alloca i1, i1 0
- %nop4003 = alloca i1, i1 0
- %nop4004 = alloca i1, i1 0
- %nop4005 = alloca i1, i1 0
- %nop4006 = alloca i1, i1 0
- %nop4007 = alloca i1, i1 0
- %nop4008 = alloca i1, i1 0
- %nop4009 = alloca i1, i1 0
- %nop4010 = alloca i1, i1 0
- %nop4011 = alloca i1, i1 0
- %nop4012 = alloca i1, i1 0
- %nop4013 = alloca i1, i1 0
- %nop4014 = alloca i1, i1 0
- %nop4015 = alloca i1, i1 0
- %nop4016 = alloca i1, i1 0
- %nop4017 = alloca i1, i1 0
- %nop4018 = alloca i1, i1 0
- %nop4019 = alloca i1, i1 0
- %nop4020 = alloca i1, i1 0
- %nop4021 = alloca i1, i1 0
- %nop4022 = alloca i1, i1 0
- %nop4023 = alloca i1, i1 0
- %nop4024 = alloca i1, i1 0
- %nop4025 = alloca i1, i1 0
- %nop4026 = alloca i1, i1 0
- %nop4027 = alloca i1, i1 0
- %nop4028 = alloca i1, i1 0
- %nop4029 = alloca i1, i1 0
- %nop4030 = alloca i1, i1 0
- %nop4031 = alloca i1, i1 0
- %nop4032 = alloca i1, i1 0
- %nop4033 = alloca i1, i1 0
- %nop4034 = alloca i1, i1 0
- %nop4035 = alloca i1, i1 0
- %nop4036 = alloca i1, i1 0
- %nop4037 = alloca i1, i1 0
- %nop4038 = alloca i1, i1 0
- %nop4039 = alloca i1, i1 0
- %nop4040 = alloca i1, i1 0
- %nop4041 = alloca i1, i1 0
- %nop4042 = alloca i1, i1 0
- %nop4043 = alloca i1, i1 0
- %nop4044 = alloca i1, i1 0
- %nop4045 = alloca i1, i1 0
- %nop4046 = alloca i1, i1 0
- %nop4047 = alloca i1, i1 0
- %nop4048 = alloca i1, i1 0
- %nop4049 = alloca i1, i1 0
- %nop4050 = alloca i1, i1 0
- %nop4051 = alloca i1, i1 0
- %nop4052 = alloca i1, i1 0
- %nop4053 = alloca i1, i1 0
- %nop4054 = alloca i1, i1 0
- %nop4055 = alloca i1, i1 0
- %nop4056 = alloca i1, i1 0
- %nop4057 = alloca i1, i1 0
- %nop4058 = alloca i1, i1 0
- %nop4059 = alloca i1, i1 0
- %nop4060 = alloca i1, i1 0
- %nop4061 = alloca i1, i1 0
- %nop4062 = alloca i1, i1 0
- %nop4063 = alloca i1, i1 0
- %nop4064 = alloca i1, i1 0
- %nop4065 = alloca i1, i1 0
- %nop4066 = alloca i1, i1 0
- %nop4067 = alloca i1, i1 0
- %nop4068 = alloca i1, i1 0
- %nop4069 = alloca i1, i1 0
- %nop4070 = alloca i1, i1 0
- %nop4071 = alloca i1, i1 0
- %nop4072 = alloca i1, i1 0
- %nop4073 = alloca i1, i1 0
- %nop4074 = alloca i1, i1 0
- %nop4075 = alloca i1, i1 0
- %nop4076 = alloca i1, i1 0
- %nop4077 = alloca i1, i1 0
- %nop4078 = alloca i1, i1 0
- %nop4079 = alloca i1, i1 0
- %nop4080 = alloca i1, i1 0
- %nop4081 = alloca i1, i1 0
- %nop4082 = alloca i1, i1 0
- %nop4083 = alloca i1, i1 0
- %nop4084 = alloca i1, i1 0
- %nop4085 = alloca i1, i1 0
- %nop4086 = alloca i1, i1 0
- %nop4087 = alloca i1, i1 0
- %nop4088 = alloca i1, i1 0
- %nop4089 = alloca i1, i1 0
- %nop4090 = alloca i1, i1 0
- %nop4091 = alloca i1, i1 0
- %nop4092 = alloca i1, i1 0
- %nop4093 = alloca i1, i1 0
- %nop4094 = alloca i1, i1 0
- %nop4095 = alloca i1, i1 0
- %nop4096 = alloca i1, i1 0
- %nop4097 = alloca i1, i1 0
- %nop4098 = alloca i1, i1 0
- %nop4099 = alloca i1, i1 0
- %nop4100 = alloca i1, i1 0
- %nop4101 = alloca i1, i1 0
- %nop4102 = alloca i1, i1 0
- %nop4103 = alloca i1, i1 0
- %nop4104 = alloca i1, i1 0
- %nop4105 = alloca i1, i1 0
- %nop4106 = alloca i1, i1 0
- %nop4107 = alloca i1, i1 0
- %nop4108 = alloca i1, i1 0
- %nop4109 = alloca i1, i1 0
- %nop4110 = alloca i1, i1 0
- %nop4111 = alloca i1, i1 0
- %nop4112 = alloca i1, i1 0
- %nop4113 = alloca i1, i1 0
- %nop4114 = alloca i1, i1 0
- %nop4115 = alloca i1, i1 0
- %nop4116 = alloca i1, i1 0
- %nop4117 = alloca i1, i1 0
- %nop4118 = alloca i1, i1 0
- %nop4119 = alloca i1, i1 0
- %nop4120 = alloca i1, i1 0
- %nop4121 = alloca i1, i1 0
- %nop4122 = alloca i1, i1 0
- %nop4123 = alloca i1, i1 0
- %nop4124 = alloca i1, i1 0
- %nop4125 = alloca i1, i1 0
- %nop4126 = alloca i1, i1 0
- %nop4127 = alloca i1, i1 0
- %nop4128 = alloca i1, i1 0
- %nop4129 = alloca i1, i1 0
- %nop4130 = alloca i1, i1 0
- %nop4131 = alloca i1, i1 0
- %nop4132 = alloca i1, i1 0
- %nop4133 = alloca i1, i1 0
- %nop4134 = alloca i1, i1 0
- %nop4135 = alloca i1, i1 0
- %nop4136 = alloca i1, i1 0
- %nop4137 = alloca i1, i1 0
- %nop4138 = alloca i1, i1 0
- %nop4139 = alloca i1, i1 0
- %nop4140 = alloca i1, i1 0
- %nop4141 = alloca i1, i1 0
- %nop4142 = alloca i1, i1 0
- %nop4143 = alloca i1, i1 0
- %nop4144 = alloca i1, i1 0
- %nop4145 = alloca i1, i1 0
- %nop4146 = alloca i1, i1 0
- %nop4147 = alloca i1, i1 0
- %nop4148 = alloca i1, i1 0
- %nop4149 = alloca i1, i1 0
- %nop4150 = alloca i1, i1 0
- %nop4151 = alloca i1, i1 0
- %nop4152 = alloca i1, i1 0
- %nop4153 = alloca i1, i1 0
- %nop4154 = alloca i1, i1 0
- %nop4155 = alloca i1, i1 0
- %nop4156 = alloca i1, i1 0
- %nop4157 = alloca i1, i1 0
- %nop4158 = alloca i1, i1 0
- %nop4159 = alloca i1, i1 0
- %nop4160 = alloca i1, i1 0
- %nop4161 = alloca i1, i1 0
- %nop4162 = alloca i1, i1 0
- %nop4163 = alloca i1, i1 0
- %nop4164 = alloca i1, i1 0
- %nop4165 = alloca i1, i1 0
- %nop4166 = alloca i1, i1 0
- %nop4167 = alloca i1, i1 0
- %nop4168 = alloca i1, i1 0
- %nop4169 = alloca i1, i1 0
- %nop4170 = alloca i1, i1 0
- %nop4171 = alloca i1, i1 0
- %nop4172 = alloca i1, i1 0
- %nop4173 = alloca i1, i1 0
- %nop4174 = alloca i1, i1 0
- %nop4175 = alloca i1, i1 0
- %nop4176 = alloca i1, i1 0
- %nop4177 = alloca i1, i1 0
- %nop4178 = alloca i1, i1 0
- %nop4179 = alloca i1, i1 0
- %nop4180 = alloca i1, i1 0
- %nop4181 = alloca i1, i1 0
- %nop4182 = alloca i1, i1 0
- %nop4183 = alloca i1, i1 0
- %nop4184 = alloca i1, i1 0
- %nop4185 = alloca i1, i1 0
- %nop4186 = alloca i1, i1 0
- %nop4187 = alloca i1, i1 0
- %nop4188 = alloca i1, i1 0
- %nop4189 = alloca i1, i1 0
- %nop4190 = alloca i1, i1 0
- %nop4191 = alloca i1, i1 0
- %nop4192 = alloca i1, i1 0
- %nop4193 = alloca i1, i1 0
- %nop4194 = alloca i1, i1 0
- %nop4195 = alloca i1, i1 0
- %nop4196 = alloca i1, i1 0
- %nop4197 = alloca i1, i1 0
- %nop4198 = alloca i1, i1 0
- %nop4199 = alloca i1, i1 0
- %nop4200 = alloca i1, i1 0
- %nop4201 = alloca i1, i1 0
- %nop4202 = alloca i1, i1 0
- %nop4203 = alloca i1, i1 0
- %nop4204 = alloca i1, i1 0
- %nop4205 = alloca i1, i1 0
- %nop4206 = alloca i1, i1 0
- %nop4207 = alloca i1, i1 0
- %nop4208 = alloca i1, i1 0
- %nop4209 = alloca i1, i1 0
- %nop4210 = alloca i1, i1 0
- %nop4211 = alloca i1, i1 0
- %nop4212 = alloca i1, i1 0
- %nop4213 = alloca i1, i1 0
- %nop4214 = alloca i1, i1 0
- %nop4215 = alloca i1, i1 0
- %nop4216 = alloca i1, i1 0
- %nop4217 = alloca i1, i1 0
- %nop4218 = alloca i1, i1 0
- %nop4219 = alloca i1, i1 0
- %nop4220 = alloca i1, i1 0
- %nop4221 = alloca i1, i1 0
- %nop4222 = alloca i1, i1 0
- %nop4223 = alloca i1, i1 0
- %nop4224 = alloca i1, i1 0
- %nop4225 = alloca i1, i1 0
- %nop4226 = alloca i1, i1 0
- %nop4227 = alloca i1, i1 0
- %nop4228 = alloca i1, i1 0
- %nop4229 = alloca i1, i1 0
- %nop4230 = alloca i1, i1 0
- %nop4231 = alloca i1, i1 0
- %nop4232 = alloca i1, i1 0
- %nop4233 = alloca i1, i1 0
- %nop4234 = alloca i1, i1 0
- %nop4235 = alloca i1, i1 0
- %nop4236 = alloca i1, i1 0
- %nop4237 = alloca i1, i1 0
- %nop4238 = alloca i1, i1 0
- %nop4239 = alloca i1, i1 0
- %nop4240 = alloca i1, i1 0
- %nop4241 = alloca i1, i1 0
- %nop4242 = alloca i1, i1 0
- %nop4243 = alloca i1, i1 0
- %nop4244 = alloca i1, i1 0
- %nop4245 = alloca i1, i1 0
- %nop4246 = alloca i1, i1 0
- %nop4247 = alloca i1, i1 0
- %nop4248 = alloca i1, i1 0
- %nop4249 = alloca i1, i1 0
- %nop4250 = alloca i1, i1 0
- %nop4251 = alloca i1, i1 0
- %nop4252 = alloca i1, i1 0
- %nop4253 = alloca i1, i1 0
- %nop4254 = alloca i1, i1 0
- %nop4255 = alloca i1, i1 0
- %nop4256 = alloca i1, i1 0
- %nop4257 = alloca i1, i1 0
- %nop4258 = alloca i1, i1 0
- %nop4259 = alloca i1, i1 0
- %nop4260 = alloca i1, i1 0
- %nop4261 = alloca i1, i1 0
- %nop4262 = alloca i1, i1 0
- %nop4263 = alloca i1, i1 0
- %nop4264 = alloca i1, i1 0
- %nop4265 = alloca i1, i1 0
- %nop4266 = alloca i1, i1 0
- %nop4267 = alloca i1, i1 0
- %nop4268 = alloca i1, i1 0
- %nop4269 = alloca i1, i1 0
- %nop4270 = alloca i1, i1 0
- %nop4271 = alloca i1, i1 0
- %nop4272 = alloca i1, i1 0
- %nop4273 = alloca i1, i1 0
- %nop4274 = alloca i1, i1 0
- %nop4275 = alloca i1, i1 0
- %nop4276 = alloca i1, i1 0
- %nop4277 = alloca i1, i1 0
- %nop4278 = alloca i1, i1 0
- %nop4279 = alloca i1, i1 0
- %nop4280 = alloca i1, i1 0
- %nop4281 = alloca i1, i1 0
- %nop4282 = alloca i1, i1 0
- %nop4283 = alloca i1, i1 0
- %nop4284 = alloca i1, i1 0
- %nop4285 = alloca i1, i1 0
- %nop4286 = alloca i1, i1 0
- %nop4287 = alloca i1, i1 0
- %nop4288 = alloca i1, i1 0
- %nop4289 = alloca i1, i1 0
- %nop4290 = alloca i1, i1 0
- %nop4291 = alloca i1, i1 0
- %nop4292 = alloca i1, i1 0
- %nop4293 = alloca i1, i1 0
- %nop4294 = alloca i1, i1 0
- %nop4295 = alloca i1, i1 0
- %nop4296 = alloca i1, i1 0
- %nop4297 = alloca i1, i1 0
- %nop4298 = alloca i1, i1 0
- %nop4299 = alloca i1, i1 0
- %nop4300 = alloca i1, i1 0
- %nop4301 = alloca i1, i1 0
- %nop4302 = alloca i1, i1 0
- %nop4303 = alloca i1, i1 0
- %nop4304 = alloca i1, i1 0
- %nop4305 = alloca i1, i1 0
- %nop4306 = alloca i1, i1 0
- %nop4307 = alloca i1, i1 0
- %nop4308 = alloca i1, i1 0
- %nop4309 = alloca i1, i1 0
- %nop4310 = alloca i1, i1 0
- %nop4311 = alloca i1, i1 0
- %nop4312 = alloca i1, i1 0
- %nop4313 = alloca i1, i1 0
- %nop4314 = alloca i1, i1 0
- %nop4315 = alloca i1, i1 0
- %nop4316 = alloca i1, i1 0
- %nop4317 = alloca i1, i1 0
- %nop4318 = alloca i1, i1 0
- %nop4319 = alloca i1, i1 0
- %nop4320 = alloca i1, i1 0
- %nop4321 = alloca i1, i1 0
- %nop4322 = alloca i1, i1 0
- %nop4323 = alloca i1, i1 0
- %nop4324 = alloca i1, i1 0
- %nop4325 = alloca i1, i1 0
- %nop4326 = alloca i1, i1 0
- %nop4327 = alloca i1, i1 0
- %nop4328 = alloca i1, i1 0
- %nop4329 = alloca i1, i1 0
- %nop4330 = alloca i1, i1 0
- %nop4331 = alloca i1, i1 0
- %nop4332 = alloca i1, i1 0
- %nop4333 = alloca i1, i1 0
- %nop4334 = alloca i1, i1 0
- %nop4335 = alloca i1, i1 0
- %nop4336 = alloca i1, i1 0
- %nop4337 = alloca i1, i1 0
- %nop4338 = alloca i1, i1 0
- %nop4339 = alloca i1, i1 0
- %nop4340 = alloca i1, i1 0
- %nop4341 = alloca i1, i1 0
- %nop4342 = alloca i1, i1 0
- %nop4343 = alloca i1, i1 0
- %nop4344 = alloca i1, i1 0
- %nop4345 = alloca i1, i1 0
- %nop4346 = alloca i1, i1 0
- %nop4347 = alloca i1, i1 0
- %nop4348 = alloca i1, i1 0
- %nop4349 = alloca i1, i1 0
- %nop4350 = alloca i1, i1 0
- %nop4351 = alloca i1, i1 0
- %nop4352 = alloca i1, i1 0
- %nop4353 = alloca i1, i1 0
- %nop4354 = alloca i1, i1 0
- %nop4355 = alloca i1, i1 0
- %nop4356 = alloca i1, i1 0
- %nop4357 = alloca i1, i1 0
- %nop4358 = alloca i1, i1 0
- %nop4359 = alloca i1, i1 0
- %nop4360 = alloca i1, i1 0
- %nop4361 = alloca i1, i1 0
- %nop4362 = alloca i1, i1 0
- %nop4363 = alloca i1, i1 0
- %nop4364 = alloca i1, i1 0
- %nop4365 = alloca i1, i1 0
- %nop4366 = alloca i1, i1 0
- %nop4367 = alloca i1, i1 0
- %nop4368 = alloca i1, i1 0
- %nop4369 = alloca i1, i1 0
- %nop4370 = alloca i1, i1 0
- %nop4371 = alloca i1, i1 0
- %nop4372 = alloca i1, i1 0
- %nop4373 = alloca i1, i1 0
- %nop4374 = alloca i1, i1 0
- %nop4375 = alloca i1, i1 0
- %nop4376 = alloca i1, i1 0
- %nop4377 = alloca i1, i1 0
- %nop4378 = alloca i1, i1 0
- %nop4379 = alloca i1, i1 0
- %nop4380 = alloca i1, i1 0
- %nop4381 = alloca i1, i1 0
- %nop4382 = alloca i1, i1 0
- %nop4383 = alloca i1, i1 0
- %nop4384 = alloca i1, i1 0
- %nop4385 = alloca i1, i1 0
- %nop4386 = alloca i1, i1 0
- %nop4387 = alloca i1, i1 0
- %nop4388 = alloca i1, i1 0
- %nop4389 = alloca i1, i1 0
- %nop4390 = alloca i1, i1 0
- %nop4391 = alloca i1, i1 0
- %nop4392 = alloca i1, i1 0
- %nop4393 = alloca i1, i1 0
- %nop4394 = alloca i1, i1 0
- %nop4395 = alloca i1, i1 0
- %nop4396 = alloca i1, i1 0
- %nop4397 = alloca i1, i1 0
- %nop4398 = alloca i1, i1 0
- %nop4399 = alloca i1, i1 0
- %nop4400 = alloca i1, i1 0
- %nop4401 = alloca i1, i1 0
- %nop4402 = alloca i1, i1 0
- %nop4403 = alloca i1, i1 0
- %nop4404 = alloca i1, i1 0
- %nop4405 = alloca i1, i1 0
- %nop4406 = alloca i1, i1 0
- %nop4407 = alloca i1, i1 0
- %nop4408 = alloca i1, i1 0
- %nop4409 = alloca i1, i1 0
- %nop4410 = alloca i1, i1 0
- %nop4411 = alloca i1, i1 0
- %nop4412 = alloca i1, i1 0
- %nop4413 = alloca i1, i1 0
- %nop4414 = alloca i1, i1 0
- %nop4415 = alloca i1, i1 0
- %nop4416 = alloca i1, i1 0
- %nop4417 = alloca i1, i1 0
- %nop4418 = alloca i1, i1 0
- %nop4419 = alloca i1, i1 0
- %nop4420 = alloca i1, i1 0
- %nop4421 = alloca i1, i1 0
- %nop4422 = alloca i1, i1 0
- %nop4423 = alloca i1, i1 0
- %nop4424 = alloca i1, i1 0
- %nop4425 = alloca i1, i1 0
- %nop4426 = alloca i1, i1 0
- %nop4427 = alloca i1, i1 0
- %nop4428 = alloca i1, i1 0
- %nop4429 = alloca i1, i1 0
- %nop4430 = alloca i1, i1 0
- %nop4431 = alloca i1, i1 0
- %nop4432 = alloca i1, i1 0
- %nop4433 = alloca i1, i1 0
- %nop4434 = alloca i1, i1 0
- %nop4435 = alloca i1, i1 0
- %nop4436 = alloca i1, i1 0
- %nop4437 = alloca i1, i1 0
- %nop4438 = alloca i1, i1 0
- %nop4439 = alloca i1, i1 0
- %nop4440 = alloca i1, i1 0
- %nop4441 = alloca i1, i1 0
- %nop4442 = alloca i1, i1 0
- %nop4443 = alloca i1, i1 0
- %nop4444 = alloca i1, i1 0
- %nop4445 = alloca i1, i1 0
- %nop4446 = alloca i1, i1 0
- %nop4447 = alloca i1, i1 0
- %nop4448 = alloca i1, i1 0
- %nop4449 = alloca i1, i1 0
- %nop4450 = alloca i1, i1 0
- %nop4451 = alloca i1, i1 0
- %nop4452 = alloca i1, i1 0
- %nop4453 = alloca i1, i1 0
- %nop4454 = alloca i1, i1 0
- %nop4455 = alloca i1, i1 0
- %nop4456 = alloca i1, i1 0
- %nop4457 = alloca i1, i1 0
- %nop4458 = alloca i1, i1 0
- %nop4459 = alloca i1, i1 0
- %nop4460 = alloca i1, i1 0
- %nop4461 = alloca i1, i1 0
- %nop4462 = alloca i1, i1 0
- %nop4463 = alloca i1, i1 0
- %nop4464 = alloca i1, i1 0
- %nop4465 = alloca i1, i1 0
- %nop4466 = alloca i1, i1 0
- %nop4467 = alloca i1, i1 0
- %nop4468 = alloca i1, i1 0
- %nop4469 = alloca i1, i1 0
- %nop4470 = alloca i1, i1 0
- %nop4471 = alloca i1, i1 0
- %nop4472 = alloca i1, i1 0
- %nop4473 = alloca i1, i1 0
- %nop4474 = alloca i1, i1 0
- %nop4475 = alloca i1, i1 0
- %nop4476 = alloca i1, i1 0
- %nop4477 = alloca i1, i1 0
- %nop4478 = alloca i1, i1 0
- %nop4479 = alloca i1, i1 0
- %nop4480 = alloca i1, i1 0
- %nop4481 = alloca i1, i1 0
- %nop4482 = alloca i1, i1 0
- %nop4483 = alloca i1, i1 0
- %nop4484 = alloca i1, i1 0
- %nop4485 = alloca i1, i1 0
- %nop4486 = alloca i1, i1 0
- %nop4487 = alloca i1, i1 0
- %nop4488 = alloca i1, i1 0
- %nop4489 = alloca i1, i1 0
- %nop4490 = alloca i1, i1 0
- %nop4491 = alloca i1, i1 0
- %nop4492 = alloca i1, i1 0
- %nop4493 = alloca i1, i1 0
- %nop4494 = alloca i1, i1 0
- %nop4495 = alloca i1, i1 0
- %nop4496 = alloca i1, i1 0
- %nop4497 = alloca i1, i1 0
- %nop4498 = alloca i1, i1 0
- %nop4499 = alloca i1, i1 0
- %nop4500 = alloca i1, i1 0
- %nop4501 = alloca i1, i1 0
- %nop4502 = alloca i1, i1 0
- %nop4503 = alloca i1, i1 0
- %nop4504 = alloca i1, i1 0
- %nop4505 = alloca i1, i1 0
- %nop4506 = alloca i1, i1 0
- %nop4507 = alloca i1, i1 0
- %nop4508 = alloca i1, i1 0
- %nop4509 = alloca i1, i1 0
- %nop4510 = alloca i1, i1 0
- %nop4511 = alloca i1, i1 0
- %nop4512 = alloca i1, i1 0
- %nop4513 = alloca i1, i1 0
- %nop4514 = alloca i1, i1 0
- %nop4515 = alloca i1, i1 0
- %nop4516 = alloca i1, i1 0
- %nop4517 = alloca i1, i1 0
- %nop4518 = alloca i1, i1 0
- %nop4519 = alloca i1, i1 0
- %nop4520 = alloca i1, i1 0
- %nop4521 = alloca i1, i1 0
- %nop4522 = alloca i1, i1 0
- %nop4523 = alloca i1, i1 0
- %nop4524 = alloca i1, i1 0
- %nop4525 = alloca i1, i1 0
- %nop4526 = alloca i1, i1 0
- %nop4527 = alloca i1, i1 0
- %nop4528 = alloca i1, i1 0
- %nop4529 = alloca i1, i1 0
- %nop4530 = alloca i1, i1 0
- %nop4531 = alloca i1, i1 0
- %nop4532 = alloca i1, i1 0
- %nop4533 = alloca i1, i1 0
- %nop4534 = alloca i1, i1 0
- %nop4535 = alloca i1, i1 0
- %nop4536 = alloca i1, i1 0
- %nop4537 = alloca i1, i1 0
- %nop4538 = alloca i1, i1 0
- %nop4539 = alloca i1, i1 0
- %nop4540 = alloca i1, i1 0
- %nop4541 = alloca i1, i1 0
- %nop4542 = alloca i1, i1 0
- %nop4543 = alloca i1, i1 0
- %nop4544 = alloca i1, i1 0
- %nop4545 = alloca i1, i1 0
- %nop4546 = alloca i1, i1 0
- %nop4547 = alloca i1, i1 0
- %nop4548 = alloca i1, i1 0
- %nop4549 = alloca i1, i1 0
- %nop4550 = alloca i1, i1 0
- %nop4551 = alloca i1, i1 0
- %nop4552 = alloca i1, i1 0
- %nop4553 = alloca i1, i1 0
- %nop4554 = alloca i1, i1 0
- %nop4555 = alloca i1, i1 0
- %nop4556 = alloca i1, i1 0
- %nop4557 = alloca i1, i1 0
- %nop4558 = alloca i1, i1 0
- %nop4559 = alloca i1, i1 0
- %nop4560 = alloca i1, i1 0
- %nop4561 = alloca i1, i1 0
- %nop4562 = alloca i1, i1 0
- %nop4563 = alloca i1, i1 0
- %nop4564 = alloca i1, i1 0
- %nop4565 = alloca i1, i1 0
- %nop4566 = alloca i1, i1 0
- %nop4567 = alloca i1, i1 0
- %nop4568 = alloca i1, i1 0
- %nop4569 = alloca i1, i1 0
- %nop4570 = alloca i1, i1 0
- %nop4571 = alloca i1, i1 0
- %nop4572 = alloca i1, i1 0
- %nop4573 = alloca i1, i1 0
- %nop4574 = alloca i1, i1 0
- %nop4575 = alloca i1, i1 0
- %nop4576 = alloca i1, i1 0
- %nop4577 = alloca i1, i1 0
- %nop4578 = alloca i1, i1 0
- %nop4579 = alloca i1, i1 0
- %nop4580 = alloca i1, i1 0
- %nop4581 = alloca i1, i1 0
- %nop4582 = alloca i1, i1 0
- %nop4583 = alloca i1, i1 0
- %nop4584 = alloca i1, i1 0
- %nop4585 = alloca i1, i1 0
- %nop4586 = alloca i1, i1 0
- %nop4587 = alloca i1, i1 0
- %nop4588 = alloca i1, i1 0
- %nop4589 = alloca i1, i1 0
- %nop4590 = alloca i1, i1 0
- %nop4591 = alloca i1, i1 0
- %nop4592 = alloca i1, i1 0
- %nop4593 = alloca i1, i1 0
- %nop4594 = alloca i1, i1 0
- %nop4595 = alloca i1, i1 0
- %nop4596 = alloca i1, i1 0
- %nop4597 = alloca i1, i1 0
- %nop4598 = alloca i1, i1 0
- %nop4599 = alloca i1, i1 0
- %nop4600 = alloca i1, i1 0
- %nop4601 = alloca i1, i1 0
- %nop4602 = alloca i1, i1 0
- %nop4603 = alloca i1, i1 0
- %nop4604 = alloca i1, i1 0
- %nop4605 = alloca i1, i1 0
- %nop4606 = alloca i1, i1 0
- %nop4607 = alloca i1, i1 0
- %nop4608 = alloca i1, i1 0
- %nop4609 = alloca i1, i1 0
- %nop4610 = alloca i1, i1 0
- %nop4611 = alloca i1, i1 0
- %nop4612 = alloca i1, i1 0
- %nop4613 = alloca i1, i1 0
- %nop4614 = alloca i1, i1 0
- %nop4615 = alloca i1, i1 0
- %nop4616 = alloca i1, i1 0
- %nop4617 = alloca i1, i1 0
- %nop4618 = alloca i1, i1 0
- %nop4619 = alloca i1, i1 0
- %nop4620 = alloca i1, i1 0
- %nop4621 = alloca i1, i1 0
- %nop4622 = alloca i1, i1 0
- %nop4623 = alloca i1, i1 0
- %nop4624 = alloca i1, i1 0
- %nop4625 = alloca i1, i1 0
- %nop4626 = alloca i1, i1 0
- %nop4627 = alloca i1, i1 0
- %nop4628 = alloca i1, i1 0
- %nop4629 = alloca i1, i1 0
- %nop4630 = alloca i1, i1 0
- %nop4631 = alloca i1, i1 0
- %nop4632 = alloca i1, i1 0
- %nop4633 = alloca i1, i1 0
- %nop4634 = alloca i1, i1 0
- %nop4635 = alloca i1, i1 0
- %nop4636 = alloca i1, i1 0
- %nop4637 = alloca i1, i1 0
- %nop4638 = alloca i1, i1 0
- %nop4639 = alloca i1, i1 0
- %nop4640 = alloca i1, i1 0
- %nop4641 = alloca i1, i1 0
- %nop4642 = alloca i1, i1 0
- %nop4643 = alloca i1, i1 0
- %nop4644 = alloca i1, i1 0
- %nop4645 = alloca i1, i1 0
- %nop4646 = alloca i1, i1 0
- %nop4647 = alloca i1, i1 0
- %nop4648 = alloca i1, i1 0
- %nop4649 = alloca i1, i1 0
- %nop4650 = alloca i1, i1 0
- %nop4651 = alloca i1, i1 0
- %nop4652 = alloca i1, i1 0
- %nop4653 = alloca i1, i1 0
- %nop4654 = alloca i1, i1 0
- %nop4655 = alloca i1, i1 0
- %nop4656 = alloca i1, i1 0
- %nop4657 = alloca i1, i1 0
- %nop4658 = alloca i1, i1 0
- %nop4659 = alloca i1, i1 0
- %nop4660 = alloca i1, i1 0
- %nop4661 = alloca i1, i1 0
- %nop4662 = alloca i1, i1 0
- %nop4663 = alloca i1, i1 0
- %nop4664 = alloca i1, i1 0
- %nop4665 = alloca i1, i1 0
- %nop4666 = alloca i1, i1 0
- %nop4667 = alloca i1, i1 0
- %nop4668 = alloca i1, i1 0
- %nop4669 = alloca i1, i1 0
- %nop4670 = alloca i1, i1 0
- %nop4671 = alloca i1, i1 0
- %nop4672 = alloca i1, i1 0
- %nop4673 = alloca i1, i1 0
- %nop4674 = alloca i1, i1 0
- %nop4675 = alloca i1, i1 0
- %nop4676 = alloca i1, i1 0
- %nop4677 = alloca i1, i1 0
- %nop4678 = alloca i1, i1 0
- %nop4679 = alloca i1, i1 0
- %nop4680 = alloca i1, i1 0
- %nop4681 = alloca i1, i1 0
- %nop4682 = alloca i1, i1 0
- %nop4683 = alloca i1, i1 0
- %nop4684 = alloca i1, i1 0
- %nop4685 = alloca i1, i1 0
- %nop4686 = alloca i1, i1 0
- %nop4687 = alloca i1, i1 0
- %nop4688 = alloca i1, i1 0
- %nop4689 = alloca i1, i1 0
- %nop4690 = alloca i1, i1 0
- %nop4691 = alloca i1, i1 0
- %nop4692 = alloca i1, i1 0
- %nop4693 = alloca i1, i1 0
- %nop4694 = alloca i1, i1 0
- %nop4695 = alloca i1, i1 0
- %nop4696 = alloca i1, i1 0
- %nop4697 = alloca i1, i1 0
- %nop4698 = alloca i1, i1 0
- %nop4699 = alloca i1, i1 0
- %nop4700 = alloca i1, i1 0
- %nop4701 = alloca i1, i1 0
- %nop4702 = alloca i1, i1 0
- %nop4703 = alloca i1, i1 0
- %nop4704 = alloca i1, i1 0
- %nop4705 = alloca i1, i1 0
- %nop4706 = alloca i1, i1 0
- %nop4707 = alloca i1, i1 0
- %nop4708 = alloca i1, i1 0
- %nop4709 = alloca i1, i1 0
- %nop4710 = alloca i1, i1 0
- %nop4711 = alloca i1, i1 0
- %nop4712 = alloca i1, i1 0
- %nop4713 = alloca i1, i1 0
- %nop4714 = alloca i1, i1 0
- %nop4715 = alloca i1, i1 0
- %nop4716 = alloca i1, i1 0
- %nop4717 = alloca i1, i1 0
- %nop4718 = alloca i1, i1 0
- %nop4719 = alloca i1, i1 0
- %nop4720 = alloca i1, i1 0
- %nop4721 = alloca i1, i1 0
- %nop4722 = alloca i1, i1 0
- %nop4723 = alloca i1, i1 0
- %nop4724 = alloca i1, i1 0
- %nop4725 = alloca i1, i1 0
- %nop4726 = alloca i1, i1 0
- %nop4727 = alloca i1, i1 0
- %nop4728 = alloca i1, i1 0
- %nop4729 = alloca i1, i1 0
- %nop4730 = alloca i1, i1 0
- %nop4731 = alloca i1, i1 0
- %nop4732 = alloca i1, i1 0
- %nop4733 = alloca i1, i1 0
- %nop4734 = alloca i1, i1 0
- %nop4735 = alloca i1, i1 0
- %nop4736 = alloca i1, i1 0
- %nop4737 = alloca i1, i1 0
- %nop4738 = alloca i1, i1 0
- %nop4739 = alloca i1, i1 0
- %nop4740 = alloca i1, i1 0
- %nop4741 = alloca i1, i1 0
- %nop4742 = alloca i1, i1 0
- %nop4743 = alloca i1, i1 0
- %nop4744 = alloca i1, i1 0
- %nop4745 = alloca i1, i1 0
- %nop4746 = alloca i1, i1 0
- %nop4747 = alloca i1, i1 0
- %nop4748 = alloca i1, i1 0
- %nop4749 = alloca i1, i1 0
- %nop4750 = alloca i1, i1 0
- %nop4751 = alloca i1, i1 0
- %nop4752 = alloca i1, i1 0
- %nop4753 = alloca i1, i1 0
- %nop4754 = alloca i1, i1 0
- %nop4755 = alloca i1, i1 0
- %nop4756 = alloca i1, i1 0
- %nop4757 = alloca i1, i1 0
- %nop4758 = alloca i1, i1 0
- %nop4759 = alloca i1, i1 0
- %nop4760 = alloca i1, i1 0
- %nop4761 = alloca i1, i1 0
- %nop4762 = alloca i1, i1 0
- %nop4763 = alloca i1, i1 0
- %nop4764 = alloca i1, i1 0
- %nop4765 = alloca i1, i1 0
- %nop4766 = alloca i1, i1 0
- %nop4767 = alloca i1, i1 0
- %nop4768 = alloca i1, i1 0
- %nop4769 = alloca i1, i1 0
- %nop4770 = alloca i1, i1 0
- %nop4771 = alloca i1, i1 0
- %nop4772 = alloca i1, i1 0
- %nop4773 = alloca i1, i1 0
- %nop4774 = alloca i1, i1 0
- %nop4775 = alloca i1, i1 0
- %nop4776 = alloca i1, i1 0
- %nop4777 = alloca i1, i1 0
- %nop4778 = alloca i1, i1 0
- %nop4779 = alloca i1, i1 0
- %nop4780 = alloca i1, i1 0
- %nop4781 = alloca i1, i1 0
- %nop4782 = alloca i1, i1 0
- %nop4783 = alloca i1, i1 0
- %nop4784 = alloca i1, i1 0
- %nop4785 = alloca i1, i1 0
- %nop4786 = alloca i1, i1 0
- %nop4787 = alloca i1, i1 0
- %nop4788 = alloca i1, i1 0
- %nop4789 = alloca i1, i1 0
- %nop4790 = alloca i1, i1 0
- %nop4791 = alloca i1, i1 0
- %nop4792 = alloca i1, i1 0
- %nop4793 = alloca i1, i1 0
- %nop4794 = alloca i1, i1 0
- %nop4795 = alloca i1, i1 0
- %nop4796 = alloca i1, i1 0
- %nop4797 = alloca i1, i1 0
- %nop4798 = alloca i1, i1 0
- %nop4799 = alloca i1, i1 0
- %nop4800 = alloca i1, i1 0
- %nop4801 = alloca i1, i1 0
- %nop4802 = alloca i1, i1 0
- %nop4803 = alloca i1, i1 0
- %nop4804 = alloca i1, i1 0
- %nop4805 = alloca i1, i1 0
- %nop4806 = alloca i1, i1 0
- %nop4807 = alloca i1, i1 0
- %nop4808 = alloca i1, i1 0
- %nop4809 = alloca i1, i1 0
- %nop4810 = alloca i1, i1 0
- %nop4811 = alloca i1, i1 0
- %nop4812 = alloca i1, i1 0
- %nop4813 = alloca i1, i1 0
- %nop4814 = alloca i1, i1 0
- %nop4815 = alloca i1, i1 0
- %nop4816 = alloca i1, i1 0
- %nop4817 = alloca i1, i1 0
- %nop4818 = alloca i1, i1 0
- %nop4819 = alloca i1, i1 0
- %nop4820 = alloca i1, i1 0
- %nop4821 = alloca i1, i1 0
- %nop4822 = alloca i1, i1 0
- %nop4823 = alloca i1, i1 0
- %nop4824 = alloca i1, i1 0
- %nop4825 = alloca i1, i1 0
- %nop4826 = alloca i1, i1 0
- %nop4827 = alloca i1, i1 0
- %nop4828 = alloca i1, i1 0
- %nop4829 = alloca i1, i1 0
- %nop4830 = alloca i1, i1 0
- %nop4831 = alloca i1, i1 0
- %nop4832 = alloca i1, i1 0
- %nop4833 = alloca i1, i1 0
- %nop4834 = alloca i1, i1 0
- %nop4835 = alloca i1, i1 0
- %nop4836 = alloca i1, i1 0
- %nop4837 = alloca i1, i1 0
- %nop4838 = alloca i1, i1 0
- %nop4839 = alloca i1, i1 0
- %nop4840 = alloca i1, i1 0
- %nop4841 = alloca i1, i1 0
- %nop4842 = alloca i1, i1 0
- %nop4843 = alloca i1, i1 0
- %nop4844 = alloca i1, i1 0
- %nop4845 = alloca i1, i1 0
- %nop4846 = alloca i1, i1 0
- %nop4847 = alloca i1, i1 0
- %nop4848 = alloca i1, i1 0
- %nop4849 = alloca i1, i1 0
- %nop4850 = alloca i1, i1 0
- %nop4851 = alloca i1, i1 0
- %nop4852 = alloca i1, i1 0
- %nop4853 = alloca i1, i1 0
- %nop4854 = alloca i1, i1 0
- %nop4855 = alloca i1, i1 0
- %nop4856 = alloca i1, i1 0
- %nop4857 = alloca i1, i1 0
- %nop4858 = alloca i1, i1 0
- %nop4859 = alloca i1, i1 0
- %nop4860 = alloca i1, i1 0
- %nop4861 = alloca i1, i1 0
- %nop4862 = alloca i1, i1 0
- %nop4863 = alloca i1, i1 0
- %nop4864 = alloca i1, i1 0
- %nop4865 = alloca i1, i1 0
- %nop4866 = alloca i1, i1 0
- %nop4867 = alloca i1, i1 0
- %nop4868 = alloca i1, i1 0
- %nop4869 = alloca i1, i1 0
- %nop4870 = alloca i1, i1 0
- %nop4871 = alloca i1, i1 0
- %nop4872 = alloca i1, i1 0
- %nop4873 = alloca i1, i1 0
- %nop4874 = alloca i1, i1 0
- %nop4875 = alloca i1, i1 0
- %nop4876 = alloca i1, i1 0
- %nop4877 = alloca i1, i1 0
- %nop4878 = alloca i1, i1 0
- %nop4879 = alloca i1, i1 0
- %nop4880 = alloca i1, i1 0
- %nop4881 = alloca i1, i1 0
- %nop4882 = alloca i1, i1 0
- %nop4883 = alloca i1, i1 0
- %nop4884 = alloca i1, i1 0
- %nop4885 = alloca i1, i1 0
- %nop4886 = alloca i1, i1 0
- %nop4887 = alloca i1, i1 0
- %nop4888 = alloca i1, i1 0
- %nop4889 = alloca i1, i1 0
- %nop4890 = alloca i1, i1 0
- %nop4891 = alloca i1, i1 0
- %nop4892 = alloca i1, i1 0
- %nop4893 = alloca i1, i1 0
- %nop4894 = alloca i1, i1 0
- %nop4895 = alloca i1, i1 0
- %nop4896 = alloca i1, i1 0
- %nop4897 = alloca i1, i1 0
- %nop4898 = alloca i1, i1 0
- %nop4899 = alloca i1, i1 0
- %nop4900 = alloca i1, i1 0
- %nop4901 = alloca i1, i1 0
- %nop4902 = alloca i1, i1 0
- %nop4903 = alloca i1, i1 0
- %nop4904 = alloca i1, i1 0
- %nop4905 = alloca i1, i1 0
- %nop4906 = alloca i1, i1 0
- %nop4907 = alloca i1, i1 0
- %nop4908 = alloca i1, i1 0
- %nop4909 = alloca i1, i1 0
- %nop4910 = alloca i1, i1 0
- %nop4911 = alloca i1, i1 0
- %nop4912 = alloca i1, i1 0
- %nop4913 = alloca i1, i1 0
- %nop4914 = alloca i1, i1 0
- %nop4915 = alloca i1, i1 0
- %nop4916 = alloca i1, i1 0
- %nop4917 = alloca i1, i1 0
- %nop4918 = alloca i1, i1 0
- %nop4919 = alloca i1, i1 0
- %nop4920 = alloca i1, i1 0
- %nop4921 = alloca i1, i1 0
- %nop4922 = alloca i1, i1 0
- %nop4923 = alloca i1, i1 0
- %nop4924 = alloca i1, i1 0
- %nop4925 = alloca i1, i1 0
- %nop4926 = alloca i1, i1 0
- %nop4927 = alloca i1, i1 0
- %nop4928 = alloca i1, i1 0
- %nop4929 = alloca i1, i1 0
- %nop4930 = alloca i1, i1 0
- %nop4931 = alloca i1, i1 0
- %nop4932 = alloca i1, i1 0
- %nop4933 = alloca i1, i1 0
- %nop4934 = alloca i1, i1 0
- %nop4935 = alloca i1, i1 0
- %nop4936 = alloca i1, i1 0
- %nop4937 = alloca i1, i1 0
- %nop4938 = alloca i1, i1 0
- %nop4939 = alloca i1, i1 0
- %nop4940 = alloca i1, i1 0
- %nop4941 = alloca i1, i1 0
- %nop4942 = alloca i1, i1 0
- %nop4943 = alloca i1, i1 0
- %nop4944 = alloca i1, i1 0
- %nop4945 = alloca i1, i1 0
- %nop4946 = alloca i1, i1 0
- %nop4947 = alloca i1, i1 0
- %nop4948 = alloca i1, i1 0
- %nop4949 = alloca i1, i1 0
- %nop4950 = alloca i1, i1 0
- %nop4951 = alloca i1, i1 0
- %nop4952 = alloca i1, i1 0
- %nop4953 = alloca i1, i1 0
- %nop4954 = alloca i1, i1 0
- %nop4955 = alloca i1, i1 0
- %nop4956 = alloca i1, i1 0
- %nop4957 = alloca i1, i1 0
- %nop4958 = alloca i1, i1 0
- %nop4959 = alloca i1, i1 0
- %nop4960 = alloca i1, i1 0
- %nop4961 = alloca i1, i1 0
- %nop4962 = alloca i1, i1 0
- %nop4963 = alloca i1, i1 0
- %nop4964 = alloca i1, i1 0
- %nop4965 = alloca i1, i1 0
- %nop4966 = alloca i1, i1 0
- %nop4967 = alloca i1, i1 0
- %nop4968 = alloca i1, i1 0
- %nop4969 = alloca i1, i1 0
- %nop4970 = alloca i1, i1 0
- %nop4971 = alloca i1, i1 0
- %nop4972 = alloca i1, i1 0
- %nop4973 = alloca i1, i1 0
- %nop4974 = alloca i1, i1 0
- %nop4975 = alloca i1, i1 0
- %nop4976 = alloca i1, i1 0
- %nop4977 = alloca i1, i1 0
- %nop4978 = alloca i1, i1 0
- %nop4979 = alloca i1, i1 0
- %nop4980 = alloca i1, i1 0
- %nop4981 = alloca i1, i1 0
- %nop4982 = alloca i1, i1 0
- %nop4983 = alloca i1, i1 0
- %nop4984 = alloca i1, i1 0
- %nop4985 = alloca i1, i1 0
- %nop4986 = alloca i1, i1 0
- %nop4987 = alloca i1, i1 0
- %nop4988 = alloca i1, i1 0
- %nop4989 = alloca i1, i1 0
- %nop4990 = alloca i1, i1 0
- %nop4991 = alloca i1, i1 0
- %nop4992 = alloca i1, i1 0
- %nop4993 = alloca i1, i1 0
- %nop4994 = alloca i1, i1 0
- %nop4995 = alloca i1, i1 0
- %nop4996 = alloca i1, i1 0
- %nop4997 = alloca i1, i1 0
- %nop4998 = alloca i1, i1 0
- %nop4999 = alloca i1, i1 0
- %nop5000 = alloca i1, i1 0
- %nop5001 = alloca i1, i1 0
- %nop5002 = alloca i1, i1 0
- %nop5003 = alloca i1, i1 0
- %nop5004 = alloca i1, i1 0
- %nop5005 = alloca i1, i1 0
- %nop5006 = alloca i1, i1 0
- %nop5007 = alloca i1, i1 0
- %nop5008 = alloca i1, i1 0
- %nop5009 = alloca i1, i1 0
- %nop5010 = alloca i1, i1 0
- %nop5011 = alloca i1, i1 0
- %nop5012 = alloca i1, i1 0
- %nop5013 = alloca i1, i1 0
- %nop5014 = alloca i1, i1 0
- %nop5015 = alloca i1, i1 0
- %nop5016 = alloca i1, i1 0
- %nop5017 = alloca i1, i1 0
- %nop5018 = alloca i1, i1 0
- %nop5019 = alloca i1, i1 0
- %nop5020 = alloca i1, i1 0
- %nop5021 = alloca i1, i1 0
- %nop5022 = alloca i1, i1 0
- %nop5023 = alloca i1, i1 0
- %nop5024 = alloca i1, i1 0
- %nop5025 = alloca i1, i1 0
- %nop5026 = alloca i1, i1 0
- %nop5027 = alloca i1, i1 0
- %nop5028 = alloca i1, i1 0
- %nop5029 = alloca i1, i1 0
- %nop5030 = alloca i1, i1 0
- %nop5031 = alloca i1, i1 0
- %nop5032 = alloca i1, i1 0
- %nop5033 = alloca i1, i1 0
- %nop5034 = alloca i1, i1 0
- %nop5035 = alloca i1, i1 0
- %nop5036 = alloca i1, i1 0
- %nop5037 = alloca i1, i1 0
- %nop5038 = alloca i1, i1 0
- %nop5039 = alloca i1, i1 0
- %nop5040 = alloca i1, i1 0
- %nop5041 = alloca i1, i1 0
- %nop5042 = alloca i1, i1 0
- %nop5043 = alloca i1, i1 0
- %nop5044 = alloca i1, i1 0
- %nop5045 = alloca i1, i1 0
- %nop5046 = alloca i1, i1 0
- %nop5047 = alloca i1, i1 0
- %nop5048 = alloca i1, i1 0
- %nop5049 = alloca i1, i1 0
- %nop5050 = alloca i1, i1 0
- %nop5051 = alloca i1, i1 0
- %nop5052 = alloca i1, i1 0
- %nop5053 = alloca i1, i1 0
- %nop5054 = alloca i1, i1 0
- %nop5055 = alloca i1, i1 0
- %nop5056 = alloca i1, i1 0
- %nop5057 = alloca i1, i1 0
- %nop5058 = alloca i1, i1 0
- %nop5059 = alloca i1, i1 0
- %nop5060 = alloca i1, i1 0
- %nop5061 = alloca i1, i1 0
- %nop5062 = alloca i1, i1 0
- %nop5063 = alloca i1, i1 0
- %nop5064 = alloca i1, i1 0
- %nop5065 = alloca i1, i1 0
- %nop5066 = alloca i1, i1 0
- %nop5067 = alloca i1, i1 0
- %nop5068 = alloca i1, i1 0
- %nop5069 = alloca i1, i1 0
- %nop5070 = alloca i1, i1 0
- %nop5071 = alloca i1, i1 0
- %nop5072 = alloca i1, i1 0
- %nop5073 = alloca i1, i1 0
- %nop5074 = alloca i1, i1 0
- %nop5075 = alloca i1, i1 0
- %nop5076 = alloca i1, i1 0
- %nop5077 = alloca i1, i1 0
- %nop5078 = alloca i1, i1 0
- %nop5079 = alloca i1, i1 0
- %nop5080 = alloca i1, i1 0
- %nop5081 = alloca i1, i1 0
- %nop5082 = alloca i1, i1 0
- %nop5083 = alloca i1, i1 0
- %nop5084 = alloca i1, i1 0
- %nop5085 = alloca i1, i1 0
- %nop5086 = alloca i1, i1 0
- %nop5087 = alloca i1, i1 0
- %nop5088 = alloca i1, i1 0
- %nop5089 = alloca i1, i1 0
- %nop5090 = alloca i1, i1 0
- %nop5091 = alloca i1, i1 0
- %nop5092 = alloca i1, i1 0
- %nop5093 = alloca i1, i1 0
- %nop5094 = alloca i1, i1 0
- %nop5095 = alloca i1, i1 0
- %nop5096 = alloca i1, i1 0
- %nop5097 = alloca i1, i1 0
- %nop5098 = alloca i1, i1 0
- %nop5099 = alloca i1, i1 0
- %nop5100 = alloca i1, i1 0
- %nop5101 = alloca i1, i1 0
- %nop5102 = alloca i1, i1 0
- %nop5103 = alloca i1, i1 0
- %nop5104 = alloca i1, i1 0
- %nop5105 = alloca i1, i1 0
- %nop5106 = alloca i1, i1 0
- %nop5107 = alloca i1, i1 0
- %nop5108 = alloca i1, i1 0
- %nop5109 = alloca i1, i1 0
- %nop5110 = alloca i1, i1 0
- %nop5111 = alloca i1, i1 0
- %nop5112 = alloca i1, i1 0
- %nop5113 = alloca i1, i1 0
- %nop5114 = alloca i1, i1 0
- %nop5115 = alloca i1, i1 0
- %nop5116 = alloca i1, i1 0
- %nop5117 = alloca i1, i1 0
- %nop5118 = alloca i1, i1 0
- %nop5119 = alloca i1, i1 0
- %nop5120 = alloca i1, i1 0
- %nop5121 = alloca i1, i1 0
- %nop5122 = alloca i1, i1 0
- %nop5123 = alloca i1, i1 0
- %nop5124 = alloca i1, i1 0
- %nop5125 = alloca i1, i1 0
- %nop5126 = alloca i1, i1 0
- %nop5127 = alloca i1, i1 0
- %nop5128 = alloca i1, i1 0
- %nop5129 = alloca i1, i1 0
- %nop5130 = alloca i1, i1 0
- %nop5131 = alloca i1, i1 0
- %nop5132 = alloca i1, i1 0
- %nop5133 = alloca i1, i1 0
- %nop5134 = alloca i1, i1 0
- %nop5135 = alloca i1, i1 0
- %nop5136 = alloca i1, i1 0
- %nop5137 = alloca i1, i1 0
- %nop5138 = alloca i1, i1 0
- %nop5139 = alloca i1, i1 0
- %nop5140 = alloca i1, i1 0
- %nop5141 = alloca i1, i1 0
- %nop5142 = alloca i1, i1 0
- %nop5143 = alloca i1, i1 0
- %nop5144 = alloca i1, i1 0
- %nop5145 = alloca i1, i1 0
- %nop5146 = alloca i1, i1 0
- %nop5147 = alloca i1, i1 0
- %nop5148 = alloca i1, i1 0
- %nop5149 = alloca i1, i1 0
- %nop5150 = alloca i1, i1 0
- %nop5151 = alloca i1, i1 0
- %nop5152 = alloca i1, i1 0
- %nop5153 = alloca i1, i1 0
- %nop5154 = alloca i1, i1 0
- %nop5155 = alloca i1, i1 0
- %nop5156 = alloca i1, i1 0
- %nop5157 = alloca i1, i1 0
- %nop5158 = alloca i1, i1 0
- %nop5159 = alloca i1, i1 0
- %nop5160 = alloca i1, i1 0
- %nop5161 = alloca i1, i1 0
- %nop5162 = alloca i1, i1 0
- %nop5163 = alloca i1, i1 0
- %nop5164 = alloca i1, i1 0
- %nop5165 = alloca i1, i1 0
- %nop5166 = alloca i1, i1 0
- %nop5167 = alloca i1, i1 0
- %nop5168 = alloca i1, i1 0
- %nop5169 = alloca i1, i1 0
- %nop5170 = alloca i1, i1 0
- %nop5171 = alloca i1, i1 0
- %nop5172 = alloca i1, i1 0
- %nop5173 = alloca i1, i1 0
- %nop5174 = alloca i1, i1 0
- %nop5175 = alloca i1, i1 0
- %nop5176 = alloca i1, i1 0
- %nop5177 = alloca i1, i1 0
- %nop5178 = alloca i1, i1 0
- %nop5179 = alloca i1, i1 0
- %nop5180 = alloca i1, i1 0
- %nop5181 = alloca i1, i1 0
- %nop5182 = alloca i1, i1 0
- %nop5183 = alloca i1, i1 0
- %nop5184 = alloca i1, i1 0
- %nop5185 = alloca i1, i1 0
- %nop5186 = alloca i1, i1 0
- %nop5187 = alloca i1, i1 0
- %nop5188 = alloca i1, i1 0
- %nop5189 = alloca i1, i1 0
- %nop5190 = alloca i1, i1 0
- %nop5191 = alloca i1, i1 0
- %nop5192 = alloca i1, i1 0
- %nop5193 = alloca i1, i1 0
- %nop5194 = alloca i1, i1 0
- %nop5195 = alloca i1, i1 0
- %nop5196 = alloca i1, i1 0
- %nop5197 = alloca i1, i1 0
- %nop5198 = alloca i1, i1 0
- %nop5199 = alloca i1, i1 0
- %nop5200 = alloca i1, i1 0
- %nop5201 = alloca i1, i1 0
- %nop5202 = alloca i1, i1 0
- %nop5203 = alloca i1, i1 0
- %nop5204 = alloca i1, i1 0
- %nop5205 = alloca i1, i1 0
- %nop5206 = alloca i1, i1 0
- %nop5207 = alloca i1, i1 0
- %nop5208 = alloca i1, i1 0
- %nop5209 = alloca i1, i1 0
- %nop5210 = alloca i1, i1 0
- %nop5211 = alloca i1, i1 0
- %nop5212 = alloca i1, i1 0
- %nop5213 = alloca i1, i1 0
- %nop5214 = alloca i1, i1 0
- %nop5215 = alloca i1, i1 0
- %nop5216 = alloca i1, i1 0
- %nop5217 = alloca i1, i1 0
- %nop5218 = alloca i1, i1 0
- %nop5219 = alloca i1, i1 0
- %nop5220 = alloca i1, i1 0
- %nop5221 = alloca i1, i1 0
- %nop5222 = alloca i1, i1 0
- %nop5223 = alloca i1, i1 0
- %nop5224 = alloca i1, i1 0
- %nop5225 = alloca i1, i1 0
- %nop5226 = alloca i1, i1 0
- %nop5227 = alloca i1, i1 0
- %nop5228 = alloca i1, i1 0
- %nop5229 = alloca i1, i1 0
- %nop5230 = alloca i1, i1 0
- %nop5231 = alloca i1, i1 0
- %nop5232 = alloca i1, i1 0
- %nop5233 = alloca i1, i1 0
- %nop5234 = alloca i1, i1 0
- %nop5235 = alloca i1, i1 0
- %nop5236 = alloca i1, i1 0
- %nop5237 = alloca i1, i1 0
- %nop5238 = alloca i1, i1 0
- %nop5239 = alloca i1, i1 0
- %nop5240 = alloca i1, i1 0
- %nop5241 = alloca i1, i1 0
- %nop5242 = alloca i1, i1 0
- %nop5243 = alloca i1, i1 0
- %nop5244 = alloca i1, i1 0
- %nop5245 = alloca i1, i1 0
- %nop5246 = alloca i1, i1 0
- %nop5247 = alloca i1, i1 0
- %nop5248 = alloca i1, i1 0
- %nop5249 = alloca i1, i1 0
- %nop5250 = alloca i1, i1 0
- %nop5251 = alloca i1, i1 0
- %nop5252 = alloca i1, i1 0
- %nop5253 = alloca i1, i1 0
- %nop5254 = alloca i1, i1 0
- %nop5255 = alloca i1, i1 0
- %nop5256 = alloca i1, i1 0
- %nop5257 = alloca i1, i1 0
- %nop5258 = alloca i1, i1 0
- %nop5259 = alloca i1, i1 0
- %nop5260 = alloca i1, i1 0
- %nop5261 = alloca i1, i1 0
- %nop5262 = alloca i1, i1 0
- %nop5263 = alloca i1, i1 0
- %nop5264 = alloca i1, i1 0
- %nop5265 = alloca i1, i1 0
- %nop5266 = alloca i1, i1 0
- %nop5267 = alloca i1, i1 0
- %nop5268 = alloca i1, i1 0
- %nop5269 = alloca i1, i1 0
- %nop5270 = alloca i1, i1 0
- %nop5271 = alloca i1, i1 0
- %nop5272 = alloca i1, i1 0
- %nop5273 = alloca i1, i1 0
- %nop5274 = alloca i1, i1 0
- %nop5275 = alloca i1, i1 0
- %nop5276 = alloca i1, i1 0
- %nop5277 = alloca i1, i1 0
- %nop5278 = alloca i1, i1 0
- %nop5279 = alloca i1, i1 0
- %nop5280 = alloca i1, i1 0
- %nop5281 = alloca i1, i1 0
- %nop5282 = alloca i1, i1 0
- %nop5283 = alloca i1, i1 0
- %nop5284 = alloca i1, i1 0
- %nop5285 = alloca i1, i1 0
- %nop5286 = alloca i1, i1 0
- %nop5287 = alloca i1, i1 0
- %nop5288 = alloca i1, i1 0
- %nop5289 = alloca i1, i1 0
- %nop5290 = alloca i1, i1 0
- %nop5291 = alloca i1, i1 0
- %nop5292 = alloca i1, i1 0
- %nop5293 = alloca i1, i1 0
- %nop5294 = alloca i1, i1 0
- %nop5295 = alloca i1, i1 0
- %nop5296 = alloca i1, i1 0
- %nop5297 = alloca i1, i1 0
- %nop5298 = alloca i1, i1 0
- %nop5299 = alloca i1, i1 0
- %nop5300 = alloca i1, i1 0
- %nop5301 = alloca i1, i1 0
- %nop5302 = alloca i1, i1 0
- %nop5303 = alloca i1, i1 0
- %nop5304 = alloca i1, i1 0
- %nop5305 = alloca i1, i1 0
- %nop5306 = alloca i1, i1 0
- %nop5307 = alloca i1, i1 0
- %nop5308 = alloca i1, i1 0
- %nop5309 = alloca i1, i1 0
- %nop5310 = alloca i1, i1 0
- %nop5311 = alloca i1, i1 0
- %nop5312 = alloca i1, i1 0
- %nop5313 = alloca i1, i1 0
- %nop5314 = alloca i1, i1 0
- %nop5315 = alloca i1, i1 0
- %nop5316 = alloca i1, i1 0
- %nop5317 = alloca i1, i1 0
- %nop5318 = alloca i1, i1 0
- %nop5319 = alloca i1, i1 0
- %nop5320 = alloca i1, i1 0
- %nop5321 = alloca i1, i1 0
- %nop5322 = alloca i1, i1 0
- %nop5323 = alloca i1, i1 0
- %nop5324 = alloca i1, i1 0
- %nop5325 = alloca i1, i1 0
- %nop5326 = alloca i1, i1 0
- %nop5327 = alloca i1, i1 0
- %nop5328 = alloca i1, i1 0
- %nop5329 = alloca i1, i1 0
- %nop5330 = alloca i1, i1 0
- %nop5331 = alloca i1, i1 0
- %nop5332 = alloca i1, i1 0
- %nop5333 = alloca i1, i1 0
- %nop5334 = alloca i1, i1 0
- %nop5335 = alloca i1, i1 0
- %nop5336 = alloca i1, i1 0
- %nop5337 = alloca i1, i1 0
- %nop5338 = alloca i1, i1 0
- %nop5339 = alloca i1, i1 0
- %nop5340 = alloca i1, i1 0
- %nop5341 = alloca i1, i1 0
- %nop5342 = alloca i1, i1 0
- %nop5343 = alloca i1, i1 0
- %nop5344 = alloca i1, i1 0
- %nop5345 = alloca i1, i1 0
- %nop5346 = alloca i1, i1 0
- %nop5347 = alloca i1, i1 0
- %nop5348 = alloca i1, i1 0
- %nop5349 = alloca i1, i1 0
- %nop5350 = alloca i1, i1 0
- %nop5351 = alloca i1, i1 0
- %nop5352 = alloca i1, i1 0
- %nop5353 = alloca i1, i1 0
- %nop5354 = alloca i1, i1 0
- %nop5355 = alloca i1, i1 0
- %nop5356 = alloca i1, i1 0
- %nop5357 = alloca i1, i1 0
- %nop5358 = alloca i1, i1 0
- %nop5359 = alloca i1, i1 0
- %nop5360 = alloca i1, i1 0
- %nop5361 = alloca i1, i1 0
- %nop5362 = alloca i1, i1 0
- %nop5363 = alloca i1, i1 0
- %nop5364 = alloca i1, i1 0
- %nop5365 = alloca i1, i1 0
- %nop5366 = alloca i1, i1 0
- %nop5367 = alloca i1, i1 0
- %nop5368 = alloca i1, i1 0
- %nop5369 = alloca i1, i1 0
- %nop5370 = alloca i1, i1 0
- %nop5371 = alloca i1, i1 0
- %nop5372 = alloca i1, i1 0
- %nop5373 = alloca i1, i1 0
- %nop5374 = alloca i1, i1 0
- %nop5375 = alloca i1, i1 0
- %nop5376 = alloca i1, i1 0
- %nop5377 = alloca i1, i1 0
- %nop5378 = alloca i1, i1 0
- %nop5379 = alloca i1, i1 0
- %nop5380 = alloca i1, i1 0
- %nop5381 = alloca i1, i1 0
- %nop5382 = alloca i1, i1 0
- %nop5383 = alloca i1, i1 0
- %nop5384 = alloca i1, i1 0
- %nop5385 = alloca i1, i1 0
- %nop5386 = alloca i1, i1 0
- %nop5387 = alloca i1, i1 0
- %nop5388 = alloca i1, i1 0
- %nop5389 = alloca i1, i1 0
- %nop5390 = alloca i1, i1 0
- %nop5391 = alloca i1, i1 0
- %nop5392 = alloca i1, i1 0
- %nop5393 = alloca i1, i1 0
- %nop5394 = alloca i1, i1 0
- %nop5395 = alloca i1, i1 0
- %nop5396 = alloca i1, i1 0
- %nop5397 = alloca i1, i1 0
- %nop5398 = alloca i1, i1 0
- %nop5399 = alloca i1, i1 0
- %nop5400 = alloca i1, i1 0
- %nop5401 = alloca i1, i1 0
- %nop5402 = alloca i1, i1 0
- %nop5403 = alloca i1, i1 0
- %nop5404 = alloca i1, i1 0
- %nop5405 = alloca i1, i1 0
- %nop5406 = alloca i1, i1 0
- %nop5407 = alloca i1, i1 0
- %nop5408 = alloca i1, i1 0
- %nop5409 = alloca i1, i1 0
- %nop5410 = alloca i1, i1 0
- %nop5411 = alloca i1, i1 0
- %nop5412 = alloca i1, i1 0
- %nop5413 = alloca i1, i1 0
- %nop5414 = alloca i1, i1 0
- %nop5415 = alloca i1, i1 0
- %nop5416 = alloca i1, i1 0
- %nop5417 = alloca i1, i1 0
- %nop5418 = alloca i1, i1 0
- %nop5419 = alloca i1, i1 0
- %nop5420 = alloca i1, i1 0
- %nop5421 = alloca i1, i1 0
- %nop5422 = alloca i1, i1 0
- %nop5423 = alloca i1, i1 0
- %nop5424 = alloca i1, i1 0
- %nop5425 = alloca i1, i1 0
- %nop5426 = alloca i1, i1 0
- %nop5427 = alloca i1, i1 0
- %nop5428 = alloca i1, i1 0
- %nop5429 = alloca i1, i1 0
- %nop5430 = alloca i1, i1 0
- %nop5431 = alloca i1, i1 0
- %nop5432 = alloca i1, i1 0
- %nop5433 = alloca i1, i1 0
- %nop5434 = alloca i1, i1 0
- %nop5435 = alloca i1, i1 0
- %nop5436 = alloca i1, i1 0
- %nop5437 = alloca i1, i1 0
- %nop5438 = alloca i1, i1 0
- %nop5439 = alloca i1, i1 0
- %nop5440 = alloca i1, i1 0
- %nop5441 = alloca i1, i1 0
- %nop5442 = alloca i1, i1 0
- %nop5443 = alloca i1, i1 0
- %nop5444 = alloca i1, i1 0
- %nop5445 = alloca i1, i1 0
- %nop5446 = alloca i1, i1 0
- %nop5447 = alloca i1, i1 0
- %nop5448 = alloca i1, i1 0
- %nop5449 = alloca i1, i1 0
- %nop5450 = alloca i1, i1 0
- %nop5451 = alloca i1, i1 0
- %nop5452 = alloca i1, i1 0
- %nop5453 = alloca i1, i1 0
- %nop5454 = alloca i1, i1 0
- %nop5455 = alloca i1, i1 0
- %nop5456 = alloca i1, i1 0
- %nop5457 = alloca i1, i1 0
- %nop5458 = alloca i1, i1 0
- %nop5459 = alloca i1, i1 0
- %nop5460 = alloca i1, i1 0
- %nop5461 = alloca i1, i1 0
- %nop5462 = alloca i1, i1 0
- %nop5463 = alloca i1, i1 0
- %nop5464 = alloca i1, i1 0
- %nop5465 = alloca i1, i1 0
- %nop5466 = alloca i1, i1 0
- %nop5467 = alloca i1, i1 0
- %nop5468 = alloca i1, i1 0
- %nop5469 = alloca i1, i1 0
- %nop5470 = alloca i1, i1 0
- %nop5471 = alloca i1, i1 0
- %nop5472 = alloca i1, i1 0
- %nop5473 = alloca i1, i1 0
- %nop5474 = alloca i1, i1 0
- %nop5475 = alloca i1, i1 0
- %nop5476 = alloca i1, i1 0
- %nop5477 = alloca i1, i1 0
- %nop5478 = alloca i1, i1 0
- %nop5479 = alloca i1, i1 0
- %nop5480 = alloca i1, i1 0
- %nop5481 = alloca i1, i1 0
- %nop5482 = alloca i1, i1 0
- %nop5483 = alloca i1, i1 0
- %nop5484 = alloca i1, i1 0
- %nop5485 = alloca i1, i1 0
- %nop5486 = alloca i1, i1 0
- %nop5487 = alloca i1, i1 0
- %nop5488 = alloca i1, i1 0
- %nop5489 = alloca i1, i1 0
- %nop5490 = alloca i1, i1 0
- %nop5491 = alloca i1, i1 0
- %nop5492 = alloca i1, i1 0
- %nop5493 = alloca i1, i1 0
- %nop5494 = alloca i1, i1 0
- %nop5495 = alloca i1, i1 0
- %nop5496 = alloca i1, i1 0
- %nop5497 = alloca i1, i1 0
- %nop5498 = alloca i1, i1 0
- %nop5499 = alloca i1, i1 0
- %nop5500 = alloca i1, i1 0
- %nop5501 = alloca i1, i1 0
- %nop5502 = alloca i1, i1 0
- %nop5503 = alloca i1, i1 0
- %nop5504 = alloca i1, i1 0
- %nop5505 = alloca i1, i1 0
- %nop5506 = alloca i1, i1 0
- %nop5507 = alloca i1, i1 0
- %nop5508 = alloca i1, i1 0
- %nop5509 = alloca i1, i1 0
- %nop5510 = alloca i1, i1 0
- %nop5511 = alloca i1, i1 0
- %nop5512 = alloca i1, i1 0
- %nop5513 = alloca i1, i1 0
- %nop5514 = alloca i1, i1 0
- %nop5515 = alloca i1, i1 0
- %nop5516 = alloca i1, i1 0
- %nop5517 = alloca i1, i1 0
- %nop5518 = alloca i1, i1 0
- %nop5519 = alloca i1, i1 0
- %nop5520 = alloca i1, i1 0
- %nop5521 = alloca i1, i1 0
- %nop5522 = alloca i1, i1 0
- %nop5523 = alloca i1, i1 0
- %nop5524 = alloca i1, i1 0
- %nop5525 = alloca i1, i1 0
- %nop5526 = alloca i1, i1 0
- %nop5527 = alloca i1, i1 0
- %nop5528 = alloca i1, i1 0
- %nop5529 = alloca i1, i1 0
- %nop5530 = alloca i1, i1 0
- %nop5531 = alloca i1, i1 0
- %nop5532 = alloca i1, i1 0
- %nop5533 = alloca i1, i1 0
- %nop5534 = alloca i1, i1 0
- %nop5535 = alloca i1, i1 0
- %nop5536 = alloca i1, i1 0
- %nop5537 = alloca i1, i1 0
- %nop5538 = alloca i1, i1 0
- %nop5539 = alloca i1, i1 0
- %nop5540 = alloca i1, i1 0
- %nop5541 = alloca i1, i1 0
- %nop5542 = alloca i1, i1 0
- %nop5543 = alloca i1, i1 0
- %nop5544 = alloca i1, i1 0
- %nop5545 = alloca i1, i1 0
- %nop5546 = alloca i1, i1 0
- %nop5547 = alloca i1, i1 0
- %nop5548 = alloca i1, i1 0
- %nop5549 = alloca i1, i1 0
- %nop5550 = alloca i1, i1 0
- %nop5551 = alloca i1, i1 0
- %nop5552 = alloca i1, i1 0
- %nop5553 = alloca i1, i1 0
- %nop5554 = alloca i1, i1 0
- %nop5555 = alloca i1, i1 0
- %nop5556 = alloca i1, i1 0
- %nop5557 = alloca i1, i1 0
- %nop5558 = alloca i1, i1 0
- %nop5559 = alloca i1, i1 0
- %nop5560 = alloca i1, i1 0
- %nop5561 = alloca i1, i1 0
- %nop5562 = alloca i1, i1 0
- %nop5563 = alloca i1, i1 0
- %nop5564 = alloca i1, i1 0
- %nop5565 = alloca i1, i1 0
- %nop5566 = alloca i1, i1 0
- %nop5567 = alloca i1, i1 0
- %nop5568 = alloca i1, i1 0
- %nop5569 = alloca i1, i1 0
- %nop5570 = alloca i1, i1 0
- %nop5571 = alloca i1, i1 0
- %nop5572 = alloca i1, i1 0
- %nop5573 = alloca i1, i1 0
- %nop5574 = alloca i1, i1 0
- %nop5575 = alloca i1, i1 0
- %nop5576 = alloca i1, i1 0
- %nop5577 = alloca i1, i1 0
- %nop5578 = alloca i1, i1 0
- %nop5579 = alloca i1, i1 0
- %nop5580 = alloca i1, i1 0
- %nop5581 = alloca i1, i1 0
- %nop5582 = alloca i1, i1 0
- %nop5583 = alloca i1, i1 0
- %nop5584 = alloca i1, i1 0
- %nop5585 = alloca i1, i1 0
- %nop5586 = alloca i1, i1 0
- %nop5587 = alloca i1, i1 0
- %nop5588 = alloca i1, i1 0
- %nop5589 = alloca i1, i1 0
- %nop5590 = alloca i1, i1 0
- %nop5591 = alloca i1, i1 0
- %nop5592 = alloca i1, i1 0
- %nop5593 = alloca i1, i1 0
- %nop5594 = alloca i1, i1 0
- %nop5595 = alloca i1, i1 0
- %nop5596 = alloca i1, i1 0
- %nop5597 = alloca i1, i1 0
- %nop5598 = alloca i1, i1 0
- %nop5599 = alloca i1, i1 0
- %nop5600 = alloca i1, i1 0
- %nop5601 = alloca i1, i1 0
- %nop5602 = alloca i1, i1 0
- %nop5603 = alloca i1, i1 0
- %nop5604 = alloca i1, i1 0
- %nop5605 = alloca i1, i1 0
- %nop5606 = alloca i1, i1 0
- %nop5607 = alloca i1, i1 0
- %nop5608 = alloca i1, i1 0
- %nop5609 = alloca i1, i1 0
- %nop5610 = alloca i1, i1 0
- %nop5611 = alloca i1, i1 0
- %nop5612 = alloca i1, i1 0
- %nop5613 = alloca i1, i1 0
- %nop5614 = alloca i1, i1 0
- %nop5615 = alloca i1, i1 0
- %nop5616 = alloca i1, i1 0
- %nop5617 = alloca i1, i1 0
- %nop5618 = alloca i1, i1 0
- %nop5619 = alloca i1, i1 0
- %nop5620 = alloca i1, i1 0
- %nop5621 = alloca i1, i1 0
- %nop5622 = alloca i1, i1 0
- %nop5623 = alloca i1, i1 0
- %nop5624 = alloca i1, i1 0
- %nop5625 = alloca i1, i1 0
- %nop5626 = alloca i1, i1 0
- %nop5627 = alloca i1, i1 0
- %nop5628 = alloca i1, i1 0
- %nop5629 = alloca i1, i1 0
- %nop5630 = alloca i1, i1 0
- %nop5631 = alloca i1, i1 0
- %nop5632 = alloca i1, i1 0
- %nop5633 = alloca i1, i1 0
- %nop5634 = alloca i1, i1 0
- %nop5635 = alloca i1, i1 0
- %nop5636 = alloca i1, i1 0
- %nop5637 = alloca i1, i1 0
- %nop5638 = alloca i1, i1 0
- %nop5639 = alloca i1, i1 0
- %nop5640 = alloca i1, i1 0
- %nop5641 = alloca i1, i1 0
- %nop5642 = alloca i1, i1 0
- %nop5643 = alloca i1, i1 0
- %nop5644 = alloca i1, i1 0
- %nop5645 = alloca i1, i1 0
- %nop5646 = alloca i1, i1 0
- %nop5647 = alloca i1, i1 0
- %nop5648 = alloca i1, i1 0
- %nop5649 = alloca i1, i1 0
- %nop5650 = alloca i1, i1 0
- %nop5651 = alloca i1, i1 0
- %nop5652 = alloca i1, i1 0
- %nop5653 = alloca i1, i1 0
- %nop5654 = alloca i1, i1 0
- %nop5655 = alloca i1, i1 0
- %nop5656 = alloca i1, i1 0
- %nop5657 = alloca i1, i1 0
- %nop5658 = alloca i1, i1 0
- %nop5659 = alloca i1, i1 0
- %nop5660 = alloca i1, i1 0
- %nop5661 = alloca i1, i1 0
- %nop5662 = alloca i1, i1 0
- %nop5663 = alloca i1, i1 0
- %nop5664 = alloca i1, i1 0
- %nop5665 = alloca i1, i1 0
- %nop5666 = alloca i1, i1 0
- %nop5667 = alloca i1, i1 0
- %nop5668 = alloca i1, i1 0
- %nop5669 = alloca i1, i1 0
- %nop5670 = alloca i1, i1 0
- %nop5671 = alloca i1, i1 0
- %nop5672 = alloca i1, i1 0
- %nop5673 = alloca i1, i1 0
- %nop5674 = alloca i1, i1 0
- %nop5675 = alloca i1, i1 0
- %nop5676 = alloca i1, i1 0
- %nop5677 = alloca i1, i1 0
- %nop5678 = alloca i1, i1 0
- %nop5679 = alloca i1, i1 0
- %nop5680 = alloca i1, i1 0
- %nop5681 = alloca i1, i1 0
- %nop5682 = alloca i1, i1 0
- %nop5683 = alloca i1, i1 0
- %nop5684 = alloca i1, i1 0
- %nop5685 = alloca i1, i1 0
- %nop5686 = alloca i1, i1 0
- %nop5687 = alloca i1, i1 0
- %nop5688 = alloca i1, i1 0
- %nop5689 = alloca i1, i1 0
- %nop5690 = alloca i1, i1 0
- %nop5691 = alloca i1, i1 0
- %nop5692 = alloca i1, i1 0
- %nop5693 = alloca i1, i1 0
- %nop5694 = alloca i1, i1 0
- %nop5695 = alloca i1, i1 0
- %nop5696 = alloca i1, i1 0
- %nop5697 = alloca i1, i1 0
- %nop5698 = alloca i1, i1 0
- %nop5699 = alloca i1, i1 0
- %nop5700 = alloca i1, i1 0
- %nop5701 = alloca i1, i1 0
- %nop5702 = alloca i1, i1 0
- %nop5703 = alloca i1, i1 0
- %nop5704 = alloca i1, i1 0
- %nop5705 = alloca i1, i1 0
- %nop5706 = alloca i1, i1 0
- %nop5707 = alloca i1, i1 0
- %nop5708 = alloca i1, i1 0
- %nop5709 = alloca i1, i1 0
- %nop5710 = alloca i1, i1 0
- %nop5711 = alloca i1, i1 0
- %nop5712 = alloca i1, i1 0
- %nop5713 = alloca i1, i1 0
- %nop5714 = alloca i1, i1 0
- %nop5715 = alloca i1, i1 0
- %nop5716 = alloca i1, i1 0
- %nop5717 = alloca i1, i1 0
- %nop5718 = alloca i1, i1 0
- %nop5719 = alloca i1, i1 0
- %nop5720 = alloca i1, i1 0
- %nop5721 = alloca i1, i1 0
- %nop5722 = alloca i1, i1 0
- %nop5723 = alloca i1, i1 0
- %nop5724 = alloca i1, i1 0
- %nop5725 = alloca i1, i1 0
- %nop5726 = alloca i1, i1 0
- %nop5727 = alloca i1, i1 0
- %nop5728 = alloca i1, i1 0
- %nop5729 = alloca i1, i1 0
- %nop5730 = alloca i1, i1 0
- %nop5731 = alloca i1, i1 0
- %nop5732 = alloca i1, i1 0
- %nop5733 = alloca i1, i1 0
- %nop5734 = alloca i1, i1 0
- %nop5735 = alloca i1, i1 0
- %nop5736 = alloca i1, i1 0
- %nop5737 = alloca i1, i1 0
- %nop5738 = alloca i1, i1 0
- %nop5739 = alloca i1, i1 0
- %nop5740 = alloca i1, i1 0
- %nop5741 = alloca i1, i1 0
- %nop5742 = alloca i1, i1 0
- %nop5743 = alloca i1, i1 0
- %nop5744 = alloca i1, i1 0
- %nop5745 = alloca i1, i1 0
- %nop5746 = alloca i1, i1 0
- %nop5747 = alloca i1, i1 0
- %nop5748 = alloca i1, i1 0
- %nop5749 = alloca i1, i1 0
- %nop5750 = alloca i1, i1 0
- %nop5751 = alloca i1, i1 0
- %nop5752 = alloca i1, i1 0
- %nop5753 = alloca i1, i1 0
- %nop5754 = alloca i1, i1 0
- %nop5755 = alloca i1, i1 0
- %nop5756 = alloca i1, i1 0
- %nop5757 = alloca i1, i1 0
- %nop5758 = alloca i1, i1 0
- %nop5759 = alloca i1, i1 0
- %nop5760 = alloca i1, i1 0
- %nop5761 = alloca i1, i1 0
- %nop5762 = alloca i1, i1 0
- %nop5763 = alloca i1, i1 0
- %nop5764 = alloca i1, i1 0
- %nop5765 = alloca i1, i1 0
- %nop5766 = alloca i1, i1 0
- %nop5767 = alloca i1, i1 0
- %nop5768 = alloca i1, i1 0
- %nop5769 = alloca i1, i1 0
- %nop5770 = alloca i1, i1 0
- %nop5771 = alloca i1, i1 0
- %nop5772 = alloca i1, i1 0
- %nop5773 = alloca i1, i1 0
- %nop5774 = alloca i1, i1 0
- %nop5775 = alloca i1, i1 0
- %nop5776 = alloca i1, i1 0
- %nop5777 = alloca i1, i1 0
- %nop5778 = alloca i1, i1 0
- %nop5779 = alloca i1, i1 0
- %nop5780 = alloca i1, i1 0
- %nop5781 = alloca i1, i1 0
- %nop5782 = alloca i1, i1 0
- %nop5783 = alloca i1, i1 0
- %nop5784 = alloca i1, i1 0
- %nop5785 = alloca i1, i1 0
- %nop5786 = alloca i1, i1 0
- %nop5787 = alloca i1, i1 0
- %nop5788 = alloca i1, i1 0
- %nop5789 = alloca i1, i1 0
- %nop5790 = alloca i1, i1 0
- %nop5791 = alloca i1, i1 0
- %nop5792 = alloca i1, i1 0
- %nop5793 = alloca i1, i1 0
- %nop5794 = alloca i1, i1 0
- %nop5795 = alloca i1, i1 0
- %nop5796 = alloca i1, i1 0
- %nop5797 = alloca i1, i1 0
- %nop5798 = alloca i1, i1 0
- %nop5799 = alloca i1, i1 0
- %nop5800 = alloca i1, i1 0
- %nop5801 = alloca i1, i1 0
- %nop5802 = alloca i1, i1 0
- %nop5803 = alloca i1, i1 0
- %nop5804 = alloca i1, i1 0
- %nop5805 = alloca i1, i1 0
- %nop5806 = alloca i1, i1 0
- %nop5807 = alloca i1, i1 0
- %nop5808 = alloca i1, i1 0
- %nop5809 = alloca i1, i1 0
- %nop5810 = alloca i1, i1 0
- %nop5811 = alloca i1, i1 0
- %nop5812 = alloca i1, i1 0
- %nop5813 = alloca i1, i1 0
- %nop5814 = alloca i1, i1 0
- %nop5815 = alloca i1, i1 0
- %nop5816 = alloca i1, i1 0
- %nop5817 = alloca i1, i1 0
- %nop5818 = alloca i1, i1 0
- %nop5819 = alloca i1, i1 0
- %nop5820 = alloca i1, i1 0
- %nop5821 = alloca i1, i1 0
- %nop5822 = alloca i1, i1 0
- %nop5823 = alloca i1, i1 0
- %nop5824 = alloca i1, i1 0
- %nop5825 = alloca i1, i1 0
- %nop5826 = alloca i1, i1 0
- %nop5827 = alloca i1, i1 0
- %nop5828 = alloca i1, i1 0
- %nop5829 = alloca i1, i1 0
- %nop5830 = alloca i1, i1 0
- %nop5831 = alloca i1, i1 0
- %nop5832 = alloca i1, i1 0
- %nop5833 = alloca i1, i1 0
- %nop5834 = alloca i1, i1 0
- %nop5835 = alloca i1, i1 0
- %nop5836 = alloca i1, i1 0
- %nop5837 = alloca i1, i1 0
- %nop5838 = alloca i1, i1 0
- %nop5839 = alloca i1, i1 0
- %nop5840 = alloca i1, i1 0
- %nop5841 = alloca i1, i1 0
- %nop5842 = alloca i1, i1 0
- %nop5843 = alloca i1, i1 0
- %nop5844 = alloca i1, i1 0
- %nop5845 = alloca i1, i1 0
- %nop5846 = alloca i1, i1 0
- %nop5847 = alloca i1, i1 0
- %nop5848 = alloca i1, i1 0
- %nop5849 = alloca i1, i1 0
- %nop5850 = alloca i1, i1 0
- %nop5851 = alloca i1, i1 0
- %nop5852 = alloca i1, i1 0
- %nop5853 = alloca i1, i1 0
- %nop5854 = alloca i1, i1 0
- %nop5855 = alloca i1, i1 0
- %nop5856 = alloca i1, i1 0
- %nop5857 = alloca i1, i1 0
- %nop5858 = alloca i1, i1 0
- %nop5859 = alloca i1, i1 0
- %nop5860 = alloca i1, i1 0
- %nop5861 = alloca i1, i1 0
- %nop5862 = alloca i1, i1 0
- %nop5863 = alloca i1, i1 0
- %nop5864 = alloca i1, i1 0
- %nop5865 = alloca i1, i1 0
- %nop5866 = alloca i1, i1 0
- %nop5867 = alloca i1, i1 0
- %nop5868 = alloca i1, i1 0
- %nop5869 = alloca i1, i1 0
- %nop5870 = alloca i1, i1 0
- %nop5871 = alloca i1, i1 0
- %nop5872 = alloca i1, i1 0
- %nop5873 = alloca i1, i1 0
- %nop5874 = alloca i1, i1 0
- %nop5875 = alloca i1, i1 0
- %nop5876 = alloca i1, i1 0
- %nop5877 = alloca i1, i1 0
- %nop5878 = alloca i1, i1 0
- %nop5879 = alloca i1, i1 0
- %nop5880 = alloca i1, i1 0
- %nop5881 = alloca i1, i1 0
- %nop5882 = alloca i1, i1 0
- %nop5883 = alloca i1, i1 0
- %nop5884 = alloca i1, i1 0
- %nop5885 = alloca i1, i1 0
- %nop5886 = alloca i1, i1 0
- %nop5887 = alloca i1, i1 0
- %nop5888 = alloca i1, i1 0
- %nop5889 = alloca i1, i1 0
- %nop5890 = alloca i1, i1 0
- %nop5891 = alloca i1, i1 0
- %nop5892 = alloca i1, i1 0
- %nop5893 = alloca i1, i1 0
- %nop5894 = alloca i1, i1 0
- %nop5895 = alloca i1, i1 0
- %nop5896 = alloca i1, i1 0
- %nop5897 = alloca i1, i1 0
- %nop5898 = alloca i1, i1 0
- %nop5899 = alloca i1, i1 0
- %nop5900 = alloca i1, i1 0
- %nop5901 = alloca i1, i1 0
- %nop5902 = alloca i1, i1 0
- %nop5903 = alloca i1, i1 0
- %nop5904 = alloca i1, i1 0
- %nop5905 = alloca i1, i1 0
- %nop5906 = alloca i1, i1 0
- %nop5907 = alloca i1, i1 0
- %nop5908 = alloca i1, i1 0
- %nop5909 = alloca i1, i1 0
- %nop5910 = alloca i1, i1 0
- %nop5911 = alloca i1, i1 0
- %nop5912 = alloca i1, i1 0
- %nop5913 = alloca i1, i1 0
- %nop5914 = alloca i1, i1 0
- %nop5915 = alloca i1, i1 0
- %nop5916 = alloca i1, i1 0
- %nop5917 = alloca i1, i1 0
- %nop5918 = alloca i1, i1 0
- %nop5919 = alloca i1, i1 0
- %nop5920 = alloca i1, i1 0
- %nop5921 = alloca i1, i1 0
- %nop5922 = alloca i1, i1 0
- %nop5923 = alloca i1, i1 0
- %nop5924 = alloca i1, i1 0
- %nop5925 = alloca i1, i1 0
- %nop5926 = alloca i1, i1 0
- %nop5927 = alloca i1, i1 0
- %nop5928 = alloca i1, i1 0
- %nop5929 = alloca i1, i1 0
- %nop5930 = alloca i1, i1 0
- %nop5931 = alloca i1, i1 0
- %nop5932 = alloca i1, i1 0
- %nop5933 = alloca i1, i1 0
- %nop5934 = alloca i1, i1 0
- %nop5935 = alloca i1, i1 0
- %nop5936 = alloca i1, i1 0
- %nop5937 = alloca i1, i1 0
- %nop5938 = alloca i1, i1 0
- %nop5939 = alloca i1, i1 0
- %nop5940 = alloca i1, i1 0
- %nop5941 = alloca i1, i1 0
- %nop5942 = alloca i1, i1 0
- %nop5943 = alloca i1, i1 0
- %nop5944 = alloca i1, i1 0
- %nop5945 = alloca i1, i1 0
- %nop5946 = alloca i1, i1 0
- %nop5947 = alloca i1, i1 0
- %nop5948 = alloca i1, i1 0
- %nop5949 = alloca i1, i1 0
- %nop5950 = alloca i1, i1 0
- %nop5951 = alloca i1, i1 0
- %nop5952 = alloca i1, i1 0
- %nop5953 = alloca i1, i1 0
- %nop5954 = alloca i1, i1 0
- %nop5955 = alloca i1, i1 0
- %nop5956 = alloca i1, i1 0
- %nop5957 = alloca i1, i1 0
- %nop5958 = alloca i1, i1 0
- %nop5959 = alloca i1, i1 0
- %nop5960 = alloca i1, i1 0
- %nop5961 = alloca i1, i1 0
- %nop5962 = alloca i1, i1 0
- %nop5963 = alloca i1, i1 0
- %nop5964 = alloca i1, i1 0
- %nop5965 = alloca i1, i1 0
- %nop5966 = alloca i1, i1 0
- %nop5967 = alloca i1, i1 0
- %nop5968 = alloca i1, i1 0
- %nop5969 = alloca i1, i1 0
- %nop5970 = alloca i1, i1 0
- %nop5971 = alloca i1, i1 0
- %nop5972 = alloca i1, i1 0
- %nop5973 = alloca i1, i1 0
- %nop5974 = alloca i1, i1 0
- %nop5975 = alloca i1, i1 0
- %nop5976 = alloca i1, i1 0
- %nop5977 = alloca i1, i1 0
- %nop5978 = alloca i1, i1 0
- %nop5979 = alloca i1, i1 0
- %nop5980 = alloca i1, i1 0
- %nop5981 = alloca i1, i1 0
- %nop5982 = alloca i1, i1 0
- %nop5983 = alloca i1, i1 0
- %nop5984 = alloca i1, i1 0
- %nop5985 = alloca i1, i1 0
- %nop5986 = alloca i1, i1 0
- %nop5987 = alloca i1, i1 0
- %nop5988 = alloca i1, i1 0
- %nop5989 = alloca i1, i1 0
- %nop5990 = alloca i1, i1 0
- %nop5991 = alloca i1, i1 0
- %nop5992 = alloca i1, i1 0
- %nop5993 = alloca i1, i1 0
- %nop5994 = alloca i1, i1 0
- %nop5995 = alloca i1, i1 0
- %nop5996 = alloca i1, i1 0
- %nop5997 = alloca i1, i1 0
- %nop5998 = alloca i1, i1 0
- %nop5999 = alloca i1, i1 0
- %nop6000 = alloca i1, i1 0
- %nop6001 = alloca i1, i1 0
- %nop6002 = alloca i1, i1 0
- %nop6003 = alloca i1, i1 0
- %nop6004 = alloca i1, i1 0
- %nop6005 = alloca i1, i1 0
- %nop6006 = alloca i1, i1 0
- %nop6007 = alloca i1, i1 0
- %nop6008 = alloca i1, i1 0
- %nop6009 = alloca i1, i1 0
- %nop6010 = alloca i1, i1 0
- %nop6011 = alloca i1, i1 0
- %nop6012 = alloca i1, i1 0
- %nop6013 = alloca i1, i1 0
- %nop6014 = alloca i1, i1 0
- %nop6015 = alloca i1, i1 0
- %nop6016 = alloca i1, i1 0
- %nop6017 = alloca i1, i1 0
- %nop6018 = alloca i1, i1 0
- %nop6019 = alloca i1, i1 0
- %nop6020 = alloca i1, i1 0
- %nop6021 = alloca i1, i1 0
- %nop6022 = alloca i1, i1 0
- %nop6023 = alloca i1, i1 0
- %nop6024 = alloca i1, i1 0
- %nop6025 = alloca i1, i1 0
- %nop6026 = alloca i1, i1 0
- %nop6027 = alloca i1, i1 0
- %nop6028 = alloca i1, i1 0
- %nop6029 = alloca i1, i1 0
- %nop6030 = alloca i1, i1 0
- %nop6031 = alloca i1, i1 0
- %nop6032 = alloca i1, i1 0
- %nop6033 = alloca i1, i1 0
- %nop6034 = alloca i1, i1 0
- %nop6035 = alloca i1, i1 0
- %nop6036 = alloca i1, i1 0
- %nop6037 = alloca i1, i1 0
- %nop6038 = alloca i1, i1 0
- %nop6039 = alloca i1, i1 0
- %nop6040 = alloca i1, i1 0
- %nop6041 = alloca i1, i1 0
- %nop6042 = alloca i1, i1 0
- %nop6043 = alloca i1, i1 0
- %nop6044 = alloca i1, i1 0
- %nop6045 = alloca i1, i1 0
- %nop6046 = alloca i1, i1 0
- %nop6047 = alloca i1, i1 0
- %nop6048 = alloca i1, i1 0
- %nop6049 = alloca i1, i1 0
- %nop6050 = alloca i1, i1 0
- %nop6051 = alloca i1, i1 0
- %nop6052 = alloca i1, i1 0
- %nop6053 = alloca i1, i1 0
- %nop6054 = alloca i1, i1 0
- %nop6055 = alloca i1, i1 0
- %nop6056 = alloca i1, i1 0
- %nop6057 = alloca i1, i1 0
- %nop6058 = alloca i1, i1 0
- %nop6059 = alloca i1, i1 0
- %nop6060 = alloca i1, i1 0
- %nop6061 = alloca i1, i1 0
- %nop6062 = alloca i1, i1 0
- %nop6063 = alloca i1, i1 0
- %nop6064 = alloca i1, i1 0
- %nop6065 = alloca i1, i1 0
- %nop6066 = alloca i1, i1 0
- %nop6067 = alloca i1, i1 0
- %nop6068 = alloca i1, i1 0
- %nop6069 = alloca i1, i1 0
- %nop6070 = alloca i1, i1 0
- %nop6071 = alloca i1, i1 0
- %nop6072 = alloca i1, i1 0
- %nop6073 = alloca i1, i1 0
- %nop6074 = alloca i1, i1 0
- %nop6075 = alloca i1, i1 0
- %nop6076 = alloca i1, i1 0
- %nop6077 = alloca i1, i1 0
- %nop6078 = alloca i1, i1 0
- %nop6079 = alloca i1, i1 0
- %nop6080 = alloca i1, i1 0
- %nop6081 = alloca i1, i1 0
- %nop6082 = alloca i1, i1 0
- %nop6083 = alloca i1, i1 0
- %nop6084 = alloca i1, i1 0
- %nop6085 = alloca i1, i1 0
- %nop6086 = alloca i1, i1 0
- %nop6087 = alloca i1, i1 0
- %nop6088 = alloca i1, i1 0
- %nop6089 = alloca i1, i1 0
- %nop6090 = alloca i1, i1 0
- %nop6091 = alloca i1, i1 0
- %nop6092 = alloca i1, i1 0
- %nop6093 = alloca i1, i1 0
- %nop6094 = alloca i1, i1 0
- %nop6095 = alloca i1, i1 0
- %nop6096 = alloca i1, i1 0
- %nop6097 = alloca i1, i1 0
- %nop6098 = alloca i1, i1 0
- %nop6099 = alloca i1, i1 0
- %nop6100 = alloca i1, i1 0
- %nop6101 = alloca i1, i1 0
- %nop6102 = alloca i1, i1 0
- %nop6103 = alloca i1, i1 0
- %nop6104 = alloca i1, i1 0
- %nop6105 = alloca i1, i1 0
- %nop6106 = alloca i1, i1 0
- %nop6107 = alloca i1, i1 0
- %nop6108 = alloca i1, i1 0
- %nop6109 = alloca i1, i1 0
- %nop6110 = alloca i1, i1 0
- %nop6111 = alloca i1, i1 0
- %nop6112 = alloca i1, i1 0
- %nop6113 = alloca i1, i1 0
- %nop6114 = alloca i1, i1 0
- %nop6115 = alloca i1, i1 0
- %nop6116 = alloca i1, i1 0
- %nop6117 = alloca i1, i1 0
- %nop6118 = alloca i1, i1 0
- %nop6119 = alloca i1, i1 0
- %nop6120 = alloca i1, i1 0
- %nop6121 = alloca i1, i1 0
- %nop6122 = alloca i1, i1 0
- %nop6123 = alloca i1, i1 0
- %nop6124 = alloca i1, i1 0
- %nop6125 = alloca i1, i1 0
- %nop6126 = alloca i1, i1 0
- %nop6127 = alloca i1, i1 0
- %nop6128 = alloca i1, i1 0
- %nop6129 = alloca i1, i1 0
- %nop6130 = alloca i1, i1 0
- %nop6131 = alloca i1, i1 0
- %nop6132 = alloca i1, i1 0
- %nop6133 = alloca i1, i1 0
- %nop6134 = alloca i1, i1 0
- %nop6135 = alloca i1, i1 0
- %nop6136 = alloca i1, i1 0
- %nop6137 = alloca i1, i1 0
- %nop6138 = alloca i1, i1 0
- %nop6139 = alloca i1, i1 0
- %nop6140 = alloca i1, i1 0
- %nop6141 = alloca i1, i1 0
- %nop6142 = alloca i1, i1 0
- %nop6143 = alloca i1, i1 0
- %nop6144 = alloca i1, i1 0
- %nop6145 = alloca i1, i1 0
- %nop6146 = alloca i1, i1 0
- %nop6147 = alloca i1, i1 0
- %nop6148 = alloca i1, i1 0
- %nop6149 = alloca i1, i1 0
- %nop6150 = alloca i1, i1 0
- %nop6151 = alloca i1, i1 0
- %nop6152 = alloca i1, i1 0
- %nop6153 = alloca i1, i1 0
- %nop6154 = alloca i1, i1 0
- %nop6155 = alloca i1, i1 0
- %nop6156 = alloca i1, i1 0
- %nop6157 = alloca i1, i1 0
- %nop6158 = alloca i1, i1 0
- %nop6159 = alloca i1, i1 0
- %nop6160 = alloca i1, i1 0
- %nop6161 = alloca i1, i1 0
- %nop6162 = alloca i1, i1 0
- %nop6163 = alloca i1, i1 0
- %nop6164 = alloca i1, i1 0
- %nop6165 = alloca i1, i1 0
- %nop6166 = alloca i1, i1 0
- %nop6167 = alloca i1, i1 0
- %nop6168 = alloca i1, i1 0
- %nop6169 = alloca i1, i1 0
- %nop6170 = alloca i1, i1 0
- %nop6171 = alloca i1, i1 0
- %nop6172 = alloca i1, i1 0
- %nop6173 = alloca i1, i1 0
- %nop6174 = alloca i1, i1 0
- %nop6175 = alloca i1, i1 0
- %nop6176 = alloca i1, i1 0
- %nop6177 = alloca i1, i1 0
- %nop6178 = alloca i1, i1 0
- %nop6179 = alloca i1, i1 0
- %nop6180 = alloca i1, i1 0
- %nop6181 = alloca i1, i1 0
- %nop6182 = alloca i1, i1 0
- %nop6183 = alloca i1, i1 0
- %nop6184 = alloca i1, i1 0
- %nop6185 = alloca i1, i1 0
- %nop6186 = alloca i1, i1 0
- %nop6187 = alloca i1, i1 0
- %nop6188 = alloca i1, i1 0
- %nop6189 = alloca i1, i1 0
- %nop6190 = alloca i1, i1 0
- %nop6191 = alloca i1, i1 0
- %nop6192 = alloca i1, i1 0
- %nop6193 = alloca i1, i1 0
- %nop6194 = alloca i1, i1 0
- %nop6195 = alloca i1, i1 0
- %nop6196 = alloca i1, i1 0
- %nop6197 = alloca i1, i1 0
- %nop6198 = alloca i1, i1 0
- %nop6199 = alloca i1, i1 0
- %nop6200 = alloca i1, i1 0
- %nop6201 = alloca i1, i1 0
- %nop6202 = alloca i1, i1 0
- %nop6203 = alloca i1, i1 0
- %nop6204 = alloca i1, i1 0
- %nop6205 = alloca i1, i1 0
- %nop6206 = alloca i1, i1 0
- %nop6207 = alloca i1, i1 0
- %nop6208 = alloca i1, i1 0
- %nop6209 = alloca i1, i1 0
- %nop6210 = alloca i1, i1 0
- %nop6211 = alloca i1, i1 0
- %nop6212 = alloca i1, i1 0
- %nop6213 = alloca i1, i1 0
- %nop6214 = alloca i1, i1 0
- %nop6215 = alloca i1, i1 0
- %nop6216 = alloca i1, i1 0
- %nop6217 = alloca i1, i1 0
- %nop6218 = alloca i1, i1 0
- %nop6219 = alloca i1, i1 0
- %nop6220 = alloca i1, i1 0
- %nop6221 = alloca i1, i1 0
- %nop6222 = alloca i1, i1 0
- %nop6223 = alloca i1, i1 0
- %nop6224 = alloca i1, i1 0
- %nop6225 = alloca i1, i1 0
- %nop6226 = alloca i1, i1 0
- %nop6227 = alloca i1, i1 0
- %nop6228 = alloca i1, i1 0
- %nop6229 = alloca i1, i1 0
- %nop6230 = alloca i1, i1 0
- %nop6231 = alloca i1, i1 0
- %nop6232 = alloca i1, i1 0
- %nop6233 = alloca i1, i1 0
- %nop6234 = alloca i1, i1 0
- %nop6235 = alloca i1, i1 0
- %nop6236 = alloca i1, i1 0
- %nop6237 = alloca i1, i1 0
- %nop6238 = alloca i1, i1 0
- %nop6239 = alloca i1, i1 0
- %nop6240 = alloca i1, i1 0
- %nop6241 = alloca i1, i1 0
- %nop6242 = alloca i1, i1 0
- %nop6243 = alloca i1, i1 0
- %nop6244 = alloca i1, i1 0
- %nop6245 = alloca i1, i1 0
- %nop6246 = alloca i1, i1 0
- %nop6247 = alloca i1, i1 0
- %nop6248 = alloca i1, i1 0
- %nop6249 = alloca i1, i1 0
- %nop6250 = alloca i1, i1 0
- %nop6251 = alloca i1, i1 0
- %nop6252 = alloca i1, i1 0
- %nop6253 = alloca i1, i1 0
- %nop6254 = alloca i1, i1 0
- %nop6255 = alloca i1, i1 0
- %nop6256 = alloca i1, i1 0
- %nop6257 = alloca i1, i1 0
- %nop6258 = alloca i1, i1 0
- %nop6259 = alloca i1, i1 0
- %nop6260 = alloca i1, i1 0
- %nop6261 = alloca i1, i1 0
- %nop6262 = alloca i1, i1 0
- %nop6263 = alloca i1, i1 0
- %nop6264 = alloca i1, i1 0
- %nop6265 = alloca i1, i1 0
- %nop6266 = alloca i1, i1 0
- %nop6267 = alloca i1, i1 0
- %nop6268 = alloca i1, i1 0
- %nop6269 = alloca i1, i1 0
- %nop6270 = alloca i1, i1 0
- %nop6271 = alloca i1, i1 0
- %nop6272 = alloca i1, i1 0
- %nop6273 = alloca i1, i1 0
- %nop6274 = alloca i1, i1 0
- %nop6275 = alloca i1, i1 0
- %nop6276 = alloca i1, i1 0
- %nop6277 = alloca i1, i1 0
- %nop6278 = alloca i1, i1 0
- %nop6279 = alloca i1, i1 0
- %nop6280 = alloca i1, i1 0
- %nop6281 = alloca i1, i1 0
- %nop6282 = alloca i1, i1 0
- %nop6283 = alloca i1, i1 0
- %nop6284 = alloca i1, i1 0
- %nop6285 = alloca i1, i1 0
- %nop6286 = alloca i1, i1 0
- %nop6287 = alloca i1, i1 0
- %nop6288 = alloca i1, i1 0
- %nop6289 = alloca i1, i1 0
- %nop6290 = alloca i1, i1 0
- %nop6291 = alloca i1, i1 0
- %nop6292 = alloca i1, i1 0
- %nop6293 = alloca i1, i1 0
- %nop6294 = alloca i1, i1 0
- %nop6295 = alloca i1, i1 0
- %nop6296 = alloca i1, i1 0
- %nop6297 = alloca i1, i1 0
- %nop6298 = alloca i1, i1 0
- %nop6299 = alloca i1, i1 0
- %nop6300 = alloca i1, i1 0
- %nop6301 = alloca i1, i1 0
- %nop6302 = alloca i1, i1 0
- %nop6303 = alloca i1, i1 0
- %nop6304 = alloca i1, i1 0
- %nop6305 = alloca i1, i1 0
- %nop6306 = alloca i1, i1 0
- %nop6307 = alloca i1, i1 0
- %nop6308 = alloca i1, i1 0
- %nop6309 = alloca i1, i1 0
- %nop6310 = alloca i1, i1 0
- %nop6311 = alloca i1, i1 0
- %nop6312 = alloca i1, i1 0
- %nop6313 = alloca i1, i1 0
- %nop6314 = alloca i1, i1 0
- %nop6315 = alloca i1, i1 0
- %nop6316 = alloca i1, i1 0
- %nop6317 = alloca i1, i1 0
- %nop6318 = alloca i1, i1 0
- %nop6319 = alloca i1, i1 0
- %nop6320 = alloca i1, i1 0
- %nop6321 = alloca i1, i1 0
- %nop6322 = alloca i1, i1 0
- %nop6323 = alloca i1, i1 0
- %nop6324 = alloca i1, i1 0
- %nop6325 = alloca i1, i1 0
- %nop6326 = alloca i1, i1 0
- %nop6327 = alloca i1, i1 0
- %nop6328 = alloca i1, i1 0
- %nop6329 = alloca i1, i1 0
- %nop6330 = alloca i1, i1 0
- %nop6331 = alloca i1, i1 0
- %nop6332 = alloca i1, i1 0
- %nop6333 = alloca i1, i1 0
- %nop6334 = alloca i1, i1 0
- %nop6335 = alloca i1, i1 0
- %nop6336 = alloca i1, i1 0
- %nop6337 = alloca i1, i1 0
- %nop6338 = alloca i1, i1 0
- %nop6339 = alloca i1, i1 0
- %nop6340 = alloca i1, i1 0
- %nop6341 = alloca i1, i1 0
- %nop6342 = alloca i1, i1 0
- %nop6343 = alloca i1, i1 0
- %nop6344 = alloca i1, i1 0
- %nop6345 = alloca i1, i1 0
- %nop6346 = alloca i1, i1 0
- %nop6347 = alloca i1, i1 0
- %nop6348 = alloca i1, i1 0
- %nop6349 = alloca i1, i1 0
- %nop6350 = alloca i1, i1 0
- %nop6351 = alloca i1, i1 0
- %nop6352 = alloca i1, i1 0
- %nop6353 = alloca i1, i1 0
- %nop6354 = alloca i1, i1 0
- %nop6355 = alloca i1, i1 0
- %nop6356 = alloca i1, i1 0
- %nop6357 = alloca i1, i1 0
- %nop6358 = alloca i1, i1 0
- %nop6359 = alloca i1, i1 0
- %nop6360 = alloca i1, i1 0
- %nop6361 = alloca i1, i1 0
- %nop6362 = alloca i1, i1 0
- %nop6363 = alloca i1, i1 0
- %nop6364 = alloca i1, i1 0
- %nop6365 = alloca i1, i1 0
- %nop6366 = alloca i1, i1 0
- %nop6367 = alloca i1, i1 0
- %nop6368 = alloca i1, i1 0
- %nop6369 = alloca i1, i1 0
- %nop6370 = alloca i1, i1 0
- %nop6371 = alloca i1, i1 0
- %nop6372 = alloca i1, i1 0
- %nop6373 = alloca i1, i1 0
- %nop6374 = alloca i1, i1 0
- %nop6375 = alloca i1, i1 0
- %nop6376 = alloca i1, i1 0
- %nop6377 = alloca i1, i1 0
- %nop6378 = alloca i1, i1 0
- %nop6379 = alloca i1, i1 0
- %nop6380 = alloca i1, i1 0
- %nop6381 = alloca i1, i1 0
- %nop6382 = alloca i1, i1 0
- %nop6383 = alloca i1, i1 0
- %nop6384 = alloca i1, i1 0
- %nop6385 = alloca i1, i1 0
- %nop6386 = alloca i1, i1 0
- %nop6387 = alloca i1, i1 0
- %nop6388 = alloca i1, i1 0
- %nop6389 = alloca i1, i1 0
- %nop6390 = alloca i1, i1 0
- %nop6391 = alloca i1, i1 0
- %nop6392 = alloca i1, i1 0
- %nop6393 = alloca i1, i1 0
- %nop6394 = alloca i1, i1 0
- %nop6395 = alloca i1, i1 0
- %nop6396 = alloca i1, i1 0
- %nop6397 = alloca i1, i1 0
- %nop6398 = alloca i1, i1 0
- %nop6399 = alloca i1, i1 0
- %nop6400 = alloca i1, i1 0
- %nop6401 = alloca i1, i1 0
- %nop6402 = alloca i1, i1 0
- %nop6403 = alloca i1, i1 0
- %nop6404 = alloca i1, i1 0
- %nop6405 = alloca i1, i1 0
- %nop6406 = alloca i1, i1 0
- %nop6407 = alloca i1, i1 0
- %nop6408 = alloca i1, i1 0
- %nop6409 = alloca i1, i1 0
- %nop6410 = alloca i1, i1 0
- %nop6411 = alloca i1, i1 0
- %nop6412 = alloca i1, i1 0
- %nop6413 = alloca i1, i1 0
- %nop6414 = alloca i1, i1 0
- %nop6415 = alloca i1, i1 0
- %nop6416 = alloca i1, i1 0
- %nop6417 = alloca i1, i1 0
- %nop6418 = alloca i1, i1 0
- %nop6419 = alloca i1, i1 0
- %nop6420 = alloca i1, i1 0
- %nop6421 = alloca i1, i1 0
- %nop6422 = alloca i1, i1 0
- %nop6423 = alloca i1, i1 0
- %nop6424 = alloca i1, i1 0
- %nop6425 = alloca i1, i1 0
- %nop6426 = alloca i1, i1 0
- %nop6427 = alloca i1, i1 0
- %nop6428 = alloca i1, i1 0
- %nop6429 = alloca i1, i1 0
- %nop6430 = alloca i1, i1 0
- %nop6431 = alloca i1, i1 0
- %nop6432 = alloca i1, i1 0
- %nop6433 = alloca i1, i1 0
- %nop6434 = alloca i1, i1 0
- %nop6435 = alloca i1, i1 0
- %nop6436 = alloca i1, i1 0
- %nop6437 = alloca i1, i1 0
- %nop6438 = alloca i1, i1 0
- %nop6439 = alloca i1, i1 0
- %nop6440 = alloca i1, i1 0
- %nop6441 = alloca i1, i1 0
- %nop6442 = alloca i1, i1 0
- %nop6443 = alloca i1, i1 0
- %nop6444 = alloca i1, i1 0
- %nop6445 = alloca i1, i1 0
- %nop6446 = alloca i1, i1 0
- %nop6447 = alloca i1, i1 0
- %nop6448 = alloca i1, i1 0
- %nop6449 = alloca i1, i1 0
- %nop6450 = alloca i1, i1 0
- %nop6451 = alloca i1, i1 0
- %nop6452 = alloca i1, i1 0
- %nop6453 = alloca i1, i1 0
- %nop6454 = alloca i1, i1 0
- %nop6455 = alloca i1, i1 0
- %nop6456 = alloca i1, i1 0
- %nop6457 = alloca i1, i1 0
- %nop6458 = alloca i1, i1 0
- %nop6459 = alloca i1, i1 0
- %nop6460 = alloca i1, i1 0
- %nop6461 = alloca i1, i1 0
- %nop6462 = alloca i1, i1 0
- %nop6463 = alloca i1, i1 0
- %nop6464 = alloca i1, i1 0
- %nop6465 = alloca i1, i1 0
- %nop6466 = alloca i1, i1 0
- %nop6467 = alloca i1, i1 0
- %nop6468 = alloca i1, i1 0
- %nop6469 = alloca i1, i1 0
- %nop6470 = alloca i1, i1 0
- %nop6471 = alloca i1, i1 0
- %nop6472 = alloca i1, i1 0
- %nop6473 = alloca i1, i1 0
- %nop6474 = alloca i1, i1 0
- %nop6475 = alloca i1, i1 0
- %nop6476 = alloca i1, i1 0
- %nop6477 = alloca i1, i1 0
- %nop6478 = alloca i1, i1 0
- %nop6479 = alloca i1, i1 0
- %nop6480 = alloca i1, i1 0
- %nop6481 = alloca i1, i1 0
- %nop6482 = alloca i1, i1 0
- %nop6483 = alloca i1, i1 0
- %nop6484 = alloca i1, i1 0
- %nop6485 = alloca i1, i1 0
- %nop6486 = alloca i1, i1 0
- %nop6487 = alloca i1, i1 0
- %nop6488 = alloca i1, i1 0
- %nop6489 = alloca i1, i1 0
- %nop6490 = alloca i1, i1 0
- %nop6491 = alloca i1, i1 0
- %nop6492 = alloca i1, i1 0
- %nop6493 = alloca i1, i1 0
- %nop6494 = alloca i1, i1 0
- %nop6495 = alloca i1, i1 0
- %nop6496 = alloca i1, i1 0
- %nop6497 = alloca i1, i1 0
- %nop6498 = alloca i1, i1 0
- %nop6499 = alloca i1, i1 0
- %nop6500 = alloca i1, i1 0
- %nop6501 = alloca i1, i1 0
- %nop6502 = alloca i1, i1 0
- %nop6503 = alloca i1, i1 0
- %nop6504 = alloca i1, i1 0
- %nop6505 = alloca i1, i1 0
- %nop6506 = alloca i1, i1 0
- %nop6507 = alloca i1, i1 0
- %nop6508 = alloca i1, i1 0
- %nop6509 = alloca i1, i1 0
- %nop6510 = alloca i1, i1 0
- %nop6511 = alloca i1, i1 0
- %nop6512 = alloca i1, i1 0
- %nop6513 = alloca i1, i1 0
- %nop6514 = alloca i1, i1 0
- %nop6515 = alloca i1, i1 0
- %nop6516 = alloca i1, i1 0
- %nop6517 = alloca i1, i1 0
- %nop6518 = alloca i1, i1 0
- %nop6519 = alloca i1, i1 0
- %nop6520 = alloca i1, i1 0
- %nop6521 = alloca i1, i1 0
- %nop6522 = alloca i1, i1 0
- %nop6523 = alloca i1, i1 0
- %nop6524 = alloca i1, i1 0
- %nop6525 = alloca i1, i1 0
- %nop6526 = alloca i1, i1 0
- %nop6527 = alloca i1, i1 0
- %nop6528 = alloca i1, i1 0
- %nop6529 = alloca i1, i1 0
- %nop6530 = alloca i1, i1 0
- %nop6531 = alloca i1, i1 0
- %nop6532 = alloca i1, i1 0
- %nop6533 = alloca i1, i1 0
- %nop6534 = alloca i1, i1 0
- %nop6535 = alloca i1, i1 0
- %nop6536 = alloca i1, i1 0
- %nop6537 = alloca i1, i1 0
- %nop6538 = alloca i1, i1 0
- %nop6539 = alloca i1, i1 0
- %nop6540 = alloca i1, i1 0
- %nop6541 = alloca i1, i1 0
- %nop6542 = alloca i1, i1 0
- %nop6543 = alloca i1, i1 0
- %nop6544 = alloca i1, i1 0
- %nop6545 = alloca i1, i1 0
- %nop6546 = alloca i1, i1 0
- %nop6547 = alloca i1, i1 0
- %nop6548 = alloca i1, i1 0
- %nop6549 = alloca i1, i1 0
- %nop6550 = alloca i1, i1 0
- %nop6551 = alloca i1, i1 0
- %nop6552 = alloca i1, i1 0
- %nop6553 = alloca i1, i1 0
- %nop6554 = alloca i1, i1 0
- %nop6555 = alloca i1, i1 0
- %nop6556 = alloca i1, i1 0
- %nop6557 = alloca i1, i1 0
- %nop6558 = alloca i1, i1 0
- %nop6559 = alloca i1, i1 0
- %nop6560 = alloca i1, i1 0
- %nop6561 = alloca i1, i1 0
- %nop6562 = alloca i1, i1 0
- %nop6563 = alloca i1, i1 0
- %nop6564 = alloca i1, i1 0
- %nop6565 = alloca i1, i1 0
- %nop6566 = alloca i1, i1 0
- %nop6567 = alloca i1, i1 0
- %nop6568 = alloca i1, i1 0
- %nop6569 = alloca i1, i1 0
- %nop6570 = alloca i1, i1 0
- %nop6571 = alloca i1, i1 0
- %nop6572 = alloca i1, i1 0
- %nop6573 = alloca i1, i1 0
- %nop6574 = alloca i1, i1 0
- %nop6575 = alloca i1, i1 0
- %nop6576 = alloca i1, i1 0
- %nop6577 = alloca i1, i1 0
- %nop6578 = alloca i1, i1 0
- %nop6579 = alloca i1, i1 0
- %nop6580 = alloca i1, i1 0
- %nop6581 = alloca i1, i1 0
- %nop6582 = alloca i1, i1 0
- %nop6583 = alloca i1, i1 0
- %nop6584 = alloca i1, i1 0
- %nop6585 = alloca i1, i1 0
- %nop6586 = alloca i1, i1 0
- %nop6587 = alloca i1, i1 0
- %nop6588 = alloca i1, i1 0
- %nop6589 = alloca i1, i1 0
- %nop6590 = alloca i1, i1 0
- %nop6591 = alloca i1, i1 0
- %nop6592 = alloca i1, i1 0
- %nop6593 = alloca i1, i1 0
- %nop6594 = alloca i1, i1 0
- %nop6595 = alloca i1, i1 0
- %nop6596 = alloca i1, i1 0
- %nop6597 = alloca i1, i1 0
- %nop6598 = alloca i1, i1 0
- %nop6599 = alloca i1, i1 0
- %nop6600 = alloca i1, i1 0
- %nop6601 = alloca i1, i1 0
- %nop6602 = alloca i1, i1 0
- %nop6603 = alloca i1, i1 0
- %nop6604 = alloca i1, i1 0
- %nop6605 = alloca i1, i1 0
- %nop6606 = alloca i1, i1 0
- %nop6607 = alloca i1, i1 0
- %nop6608 = alloca i1, i1 0
- %nop6609 = alloca i1, i1 0
- %nop6610 = alloca i1, i1 0
- %nop6611 = alloca i1, i1 0
- %nop6612 = alloca i1, i1 0
- %nop6613 = alloca i1, i1 0
- %nop6614 = alloca i1, i1 0
- %nop6615 = alloca i1, i1 0
- %nop6616 = alloca i1, i1 0
- %nop6617 = alloca i1, i1 0
- %nop6618 = alloca i1, i1 0
- %nop6619 = alloca i1, i1 0
- %nop6620 = alloca i1, i1 0
- %nop6621 = alloca i1, i1 0
- %nop6622 = alloca i1, i1 0
- %nop6623 = alloca i1, i1 0
- %nop6624 = alloca i1, i1 0
- %nop6625 = alloca i1, i1 0
- %nop6626 = alloca i1, i1 0
- %nop6627 = alloca i1, i1 0
- %nop6628 = alloca i1, i1 0
- %nop6629 = alloca i1, i1 0
- %nop6630 = alloca i1, i1 0
- %nop6631 = alloca i1, i1 0
- %nop6632 = alloca i1, i1 0
- %nop6633 = alloca i1, i1 0
- %nop6634 = alloca i1, i1 0
- %nop6635 = alloca i1, i1 0
- %nop6636 = alloca i1, i1 0
- %nop6637 = alloca i1, i1 0
- %nop6638 = alloca i1, i1 0
- %nop6639 = alloca i1, i1 0
- %nop6640 = alloca i1, i1 0
- %nop6641 = alloca i1, i1 0
- %nop6642 = alloca i1, i1 0
- %nop6643 = alloca i1, i1 0
- %nop6644 = alloca i1, i1 0
- %nop6645 = alloca i1, i1 0
- %nop6646 = alloca i1, i1 0
- %nop6647 = alloca i1, i1 0
- %nop6648 = alloca i1, i1 0
- %nop6649 = alloca i1, i1 0
- %nop6650 = alloca i1, i1 0
- %nop6651 = alloca i1, i1 0
- %nop6652 = alloca i1, i1 0
- %nop6653 = alloca i1, i1 0
- %nop6654 = alloca i1, i1 0
- %nop6655 = alloca i1, i1 0
- %nop6656 = alloca i1, i1 0
- %nop6657 = alloca i1, i1 0
- %nop6658 = alloca i1, i1 0
- %nop6659 = alloca i1, i1 0
- %nop6660 = alloca i1, i1 0
- %nop6661 = alloca i1, i1 0
- %nop6662 = alloca i1, i1 0
- %nop6663 = alloca i1, i1 0
- %nop6664 = alloca i1, i1 0
- %nop6665 = alloca i1, i1 0
- %nop6666 = alloca i1, i1 0
- %nop6667 = alloca i1, i1 0
- %nop6668 = alloca i1, i1 0
- %nop6669 = alloca i1, i1 0
- %nop6670 = alloca i1, i1 0
- %nop6671 = alloca i1, i1 0
- %nop6672 = alloca i1, i1 0
- %nop6673 = alloca i1, i1 0
- %nop6674 = alloca i1, i1 0
- %nop6675 = alloca i1, i1 0
- %nop6676 = alloca i1, i1 0
- %nop6677 = alloca i1, i1 0
- %nop6678 = alloca i1, i1 0
- %nop6679 = alloca i1, i1 0
- %nop6680 = alloca i1, i1 0
- %nop6681 = alloca i1, i1 0
- %nop6682 = alloca i1, i1 0
- %nop6683 = alloca i1, i1 0
- %nop6684 = alloca i1, i1 0
- %nop6685 = alloca i1, i1 0
- %nop6686 = alloca i1, i1 0
- %nop6687 = alloca i1, i1 0
- %nop6688 = alloca i1, i1 0
- %nop6689 = alloca i1, i1 0
- %nop6690 = alloca i1, i1 0
- %nop6691 = alloca i1, i1 0
- %nop6692 = alloca i1, i1 0
- %nop6693 = alloca i1, i1 0
- %nop6694 = alloca i1, i1 0
- %nop6695 = alloca i1, i1 0
- %nop6696 = alloca i1, i1 0
- %nop6697 = alloca i1, i1 0
- %nop6698 = alloca i1, i1 0
- %nop6699 = alloca i1, i1 0
- %nop6700 = alloca i1, i1 0
- %nop6701 = alloca i1, i1 0
- %nop6702 = alloca i1, i1 0
- %nop6703 = alloca i1, i1 0
- %nop6704 = alloca i1, i1 0
- %nop6705 = alloca i1, i1 0
- %nop6706 = alloca i1, i1 0
- %nop6707 = alloca i1, i1 0
- %nop6708 = alloca i1, i1 0
- %nop6709 = alloca i1, i1 0
- %nop6710 = alloca i1, i1 0
- %nop6711 = alloca i1, i1 0
- %nop6712 = alloca i1, i1 0
- %nop6713 = alloca i1, i1 0
- %nop6714 = alloca i1, i1 0
- %nop6715 = alloca i1, i1 0
- %nop6716 = alloca i1, i1 0
- %nop6717 = alloca i1, i1 0
- %nop6718 = alloca i1, i1 0
- %nop6719 = alloca i1, i1 0
- %nop6720 = alloca i1, i1 0
- %nop6721 = alloca i1, i1 0
- %nop6722 = alloca i1, i1 0
- %nop6723 = alloca i1, i1 0
- %nop6724 = alloca i1, i1 0
- %nop6725 = alloca i1, i1 0
- %nop6726 = alloca i1, i1 0
- %nop6727 = alloca i1, i1 0
- %nop6728 = alloca i1, i1 0
- %nop6729 = alloca i1, i1 0
- %nop6730 = alloca i1, i1 0
- %nop6731 = alloca i1, i1 0
- %nop6732 = alloca i1, i1 0
- %nop6733 = alloca i1, i1 0
- %nop6734 = alloca i1, i1 0
- %nop6735 = alloca i1, i1 0
- %nop6736 = alloca i1, i1 0
- %nop6737 = alloca i1, i1 0
- %nop6738 = alloca i1, i1 0
- %nop6739 = alloca i1, i1 0
- %nop6740 = alloca i1, i1 0
- %nop6741 = alloca i1, i1 0
- %nop6742 = alloca i1, i1 0
- %nop6743 = alloca i1, i1 0
- %nop6744 = alloca i1, i1 0
- %nop6745 = alloca i1, i1 0
- %nop6746 = alloca i1, i1 0
- %nop6747 = alloca i1, i1 0
- %nop6748 = alloca i1, i1 0
- %nop6749 = alloca i1, i1 0
- %nop6750 = alloca i1, i1 0
- %nop6751 = alloca i1, i1 0
- %nop6752 = alloca i1, i1 0
- %nop6753 = alloca i1, i1 0
- %nop6754 = alloca i1, i1 0
- %nop6755 = alloca i1, i1 0
- %nop6756 = alloca i1, i1 0
- %nop6757 = alloca i1, i1 0
- %nop6758 = alloca i1, i1 0
- %nop6759 = alloca i1, i1 0
- %nop6760 = alloca i1, i1 0
- %nop6761 = alloca i1, i1 0
- %nop6762 = alloca i1, i1 0
- %nop6763 = alloca i1, i1 0
- %nop6764 = alloca i1, i1 0
- %nop6765 = alloca i1, i1 0
- %nop6766 = alloca i1, i1 0
- %nop6767 = alloca i1, i1 0
- %nop6768 = alloca i1, i1 0
- %nop6769 = alloca i1, i1 0
- %nop6770 = alloca i1, i1 0
- %nop6771 = alloca i1, i1 0
- %nop6772 = alloca i1, i1 0
- %nop6773 = alloca i1, i1 0
- %nop6774 = alloca i1, i1 0
- %nop6775 = alloca i1, i1 0
- %nop6776 = alloca i1, i1 0
- %nop6777 = alloca i1, i1 0
- %nop6778 = alloca i1, i1 0
- %nop6779 = alloca i1, i1 0
- %nop6780 = alloca i1, i1 0
- %nop6781 = alloca i1, i1 0
- %nop6782 = alloca i1, i1 0
- %nop6783 = alloca i1, i1 0
- %nop6784 = alloca i1, i1 0
- %nop6785 = alloca i1, i1 0
- %nop6786 = alloca i1, i1 0
- %nop6787 = alloca i1, i1 0
- %nop6788 = alloca i1, i1 0
- %nop6789 = alloca i1, i1 0
- %nop6790 = alloca i1, i1 0
- %nop6791 = alloca i1, i1 0
- %nop6792 = alloca i1, i1 0
- %nop6793 = alloca i1, i1 0
- %nop6794 = alloca i1, i1 0
- %nop6795 = alloca i1, i1 0
- %nop6796 = alloca i1, i1 0
- %nop6797 = alloca i1, i1 0
- %nop6798 = alloca i1, i1 0
- %nop6799 = alloca i1, i1 0
- %nop6800 = alloca i1, i1 0
- %nop6801 = alloca i1, i1 0
- %nop6802 = alloca i1, i1 0
- %nop6803 = alloca i1, i1 0
- %nop6804 = alloca i1, i1 0
- %nop6805 = alloca i1, i1 0
- %nop6806 = alloca i1, i1 0
- %nop6807 = alloca i1, i1 0
- %nop6808 = alloca i1, i1 0
- %nop6809 = alloca i1, i1 0
- %nop6810 = alloca i1, i1 0
- %nop6811 = alloca i1, i1 0
- %nop6812 = alloca i1, i1 0
- %nop6813 = alloca i1, i1 0
- %nop6814 = alloca i1, i1 0
- %nop6815 = alloca i1, i1 0
- %nop6816 = alloca i1, i1 0
- %nop6817 = alloca i1, i1 0
- %nop6818 = alloca i1, i1 0
- %nop6819 = alloca i1, i1 0
- %nop6820 = alloca i1, i1 0
- %nop6821 = alloca i1, i1 0
- %nop6822 = alloca i1, i1 0
- %nop6823 = alloca i1, i1 0
- %nop6824 = alloca i1, i1 0
- %nop6825 = alloca i1, i1 0
- %nop6826 = alloca i1, i1 0
- %nop6827 = alloca i1, i1 0
- %nop6828 = alloca i1, i1 0
- %nop6829 = alloca i1, i1 0
- %nop6830 = alloca i1, i1 0
- %nop6831 = alloca i1, i1 0
- %nop6832 = alloca i1, i1 0
- %nop6833 = alloca i1, i1 0
- %nop6834 = alloca i1, i1 0
- %nop6835 = alloca i1, i1 0
- %nop6836 = alloca i1, i1 0
- %nop6837 = alloca i1, i1 0
- %nop6838 = alloca i1, i1 0
- %nop6839 = alloca i1, i1 0
- %nop6840 = alloca i1, i1 0
- %nop6841 = alloca i1, i1 0
- %nop6842 = alloca i1, i1 0
- %nop6843 = alloca i1, i1 0
- %nop6844 = alloca i1, i1 0
- %nop6845 = alloca i1, i1 0
- %nop6846 = alloca i1, i1 0
- %nop6847 = alloca i1, i1 0
- %nop6848 = alloca i1, i1 0
- %nop6849 = alloca i1, i1 0
- %nop6850 = alloca i1, i1 0
- %nop6851 = alloca i1, i1 0
- %nop6852 = alloca i1, i1 0
- %nop6853 = alloca i1, i1 0
- %nop6854 = alloca i1, i1 0
- %nop6855 = alloca i1, i1 0
- %nop6856 = alloca i1, i1 0
- %nop6857 = alloca i1, i1 0
- %nop6858 = alloca i1, i1 0
- %nop6859 = alloca i1, i1 0
- %nop6860 = alloca i1, i1 0
- %nop6861 = alloca i1, i1 0
- %nop6862 = alloca i1, i1 0
- %nop6863 = alloca i1, i1 0
- %nop6864 = alloca i1, i1 0
- %nop6865 = alloca i1, i1 0
- %nop6866 = alloca i1, i1 0
- %nop6867 = alloca i1, i1 0
- %nop6868 = alloca i1, i1 0
- %nop6869 = alloca i1, i1 0
- %nop6870 = alloca i1, i1 0
- %nop6871 = alloca i1, i1 0
- %nop6872 = alloca i1, i1 0
- %nop6873 = alloca i1, i1 0
- %nop6874 = alloca i1, i1 0
- %nop6875 = alloca i1, i1 0
- %nop6876 = alloca i1, i1 0
- %nop6877 = alloca i1, i1 0
- %nop6878 = alloca i1, i1 0
- %nop6879 = alloca i1, i1 0
- %nop6880 = alloca i1, i1 0
- %nop6881 = alloca i1, i1 0
- %nop6882 = alloca i1, i1 0
- %nop6883 = alloca i1, i1 0
- %nop6884 = alloca i1, i1 0
- %nop6885 = alloca i1, i1 0
- %nop6886 = alloca i1, i1 0
- %nop6887 = alloca i1, i1 0
- %nop6888 = alloca i1, i1 0
- %nop6889 = alloca i1, i1 0
- %nop6890 = alloca i1, i1 0
- %nop6891 = alloca i1, i1 0
- %nop6892 = alloca i1, i1 0
- %nop6893 = alloca i1, i1 0
- %nop6894 = alloca i1, i1 0
- %nop6895 = alloca i1, i1 0
- %nop6896 = alloca i1, i1 0
- %nop6897 = alloca i1, i1 0
- %nop6898 = alloca i1, i1 0
- %nop6899 = alloca i1, i1 0
- %nop6900 = alloca i1, i1 0
- %nop6901 = alloca i1, i1 0
- %nop6902 = alloca i1, i1 0
- %nop6903 = alloca i1, i1 0
- %nop6904 = alloca i1, i1 0
- %nop6905 = alloca i1, i1 0
- %nop6906 = alloca i1, i1 0
- %nop6907 = alloca i1, i1 0
- %nop6908 = alloca i1, i1 0
- %nop6909 = alloca i1, i1 0
- %nop6910 = alloca i1, i1 0
- %nop6911 = alloca i1, i1 0
- %nop6912 = alloca i1, i1 0
- %nop6913 = alloca i1, i1 0
- %nop6914 = alloca i1, i1 0
- %nop6915 = alloca i1, i1 0
- %nop6916 = alloca i1, i1 0
- %nop6917 = alloca i1, i1 0
- %nop6918 = alloca i1, i1 0
- %nop6919 = alloca i1, i1 0
- %nop6920 = alloca i1, i1 0
- %nop6921 = alloca i1, i1 0
- %nop6922 = alloca i1, i1 0
- %nop6923 = alloca i1, i1 0
- %nop6924 = alloca i1, i1 0
- %nop6925 = alloca i1, i1 0
- %nop6926 = alloca i1, i1 0
- %nop6927 = alloca i1, i1 0
- %nop6928 = alloca i1, i1 0
- %nop6929 = alloca i1, i1 0
- %nop6930 = alloca i1, i1 0
- %nop6931 = alloca i1, i1 0
- %nop6932 = alloca i1, i1 0
- %nop6933 = alloca i1, i1 0
- %nop6934 = alloca i1, i1 0
- %nop6935 = alloca i1, i1 0
- %nop6936 = alloca i1, i1 0
- %nop6937 = alloca i1, i1 0
- %nop6938 = alloca i1, i1 0
- %nop6939 = alloca i1, i1 0
- %nop6940 = alloca i1, i1 0
- %nop6941 = alloca i1, i1 0
- %nop6942 = alloca i1, i1 0
- %nop6943 = alloca i1, i1 0
- %nop6944 = alloca i1, i1 0
- %nop6945 = alloca i1, i1 0
- %nop6946 = alloca i1, i1 0
- %nop6947 = alloca i1, i1 0
- %nop6948 = alloca i1, i1 0
- %nop6949 = alloca i1, i1 0
- %nop6950 = alloca i1, i1 0
- %nop6951 = alloca i1, i1 0
- %nop6952 = alloca i1, i1 0
- %nop6953 = alloca i1, i1 0
- %nop6954 = alloca i1, i1 0
- %nop6955 = alloca i1, i1 0
- %nop6956 = alloca i1, i1 0
- %nop6957 = alloca i1, i1 0
- %nop6958 = alloca i1, i1 0
- %nop6959 = alloca i1, i1 0
- %nop6960 = alloca i1, i1 0
- %nop6961 = alloca i1, i1 0
- %nop6962 = alloca i1, i1 0
- %nop6963 = alloca i1, i1 0
- %nop6964 = alloca i1, i1 0
- %nop6965 = alloca i1, i1 0
- %nop6966 = alloca i1, i1 0
- %nop6967 = alloca i1, i1 0
- %nop6968 = alloca i1, i1 0
- %nop6969 = alloca i1, i1 0
- %nop6970 = alloca i1, i1 0
- %nop6971 = alloca i1, i1 0
- %nop6972 = alloca i1, i1 0
- %nop6973 = alloca i1, i1 0
- %nop6974 = alloca i1, i1 0
- %nop6975 = alloca i1, i1 0
- %nop6976 = alloca i1, i1 0
- %nop6977 = alloca i1, i1 0
- %nop6978 = alloca i1, i1 0
- %nop6979 = alloca i1, i1 0
- %nop6980 = alloca i1, i1 0
- %nop6981 = alloca i1, i1 0
- %nop6982 = alloca i1, i1 0
- %nop6983 = alloca i1, i1 0
- %nop6984 = alloca i1, i1 0
- %nop6985 = alloca i1, i1 0
- %nop6986 = alloca i1, i1 0
- %nop6987 = alloca i1, i1 0
- %nop6988 = alloca i1, i1 0
- %nop6989 = alloca i1, i1 0
- %nop6990 = alloca i1, i1 0
- %nop6991 = alloca i1, i1 0
- %nop6992 = alloca i1, i1 0
- %nop6993 = alloca i1, i1 0
- %nop6994 = alloca i1, i1 0
- %nop6995 = alloca i1, i1 0
- %nop6996 = alloca i1, i1 0
- %nop6997 = alloca i1, i1 0
- %nop6998 = alloca i1, i1 0
- %nop6999 = alloca i1, i1 0
- %nop7000 = alloca i1, i1 0
- %nop7001 = alloca i1, i1 0
- %nop7002 = alloca i1, i1 0
- %nop7003 = alloca i1, i1 0
- %nop7004 = alloca i1, i1 0
- %nop7005 = alloca i1, i1 0
- %nop7006 = alloca i1, i1 0
- %nop7007 = alloca i1, i1 0
- %nop7008 = alloca i1, i1 0
- %nop7009 = alloca i1, i1 0
- %nop7010 = alloca i1, i1 0
- %nop7011 = alloca i1, i1 0
- %nop7012 = alloca i1, i1 0
- %nop7013 = alloca i1, i1 0
- %nop7014 = alloca i1, i1 0
- %nop7015 = alloca i1, i1 0
- %nop7016 = alloca i1, i1 0
- %nop7017 = alloca i1, i1 0
- %nop7018 = alloca i1, i1 0
- %nop7019 = alloca i1, i1 0
- %nop7020 = alloca i1, i1 0
- %nop7021 = alloca i1, i1 0
- %nop7022 = alloca i1, i1 0
- %nop7023 = alloca i1, i1 0
- %nop7024 = alloca i1, i1 0
- %nop7025 = alloca i1, i1 0
- %nop7026 = alloca i1, i1 0
- %nop7027 = alloca i1, i1 0
- %nop7028 = alloca i1, i1 0
- %nop7029 = alloca i1, i1 0
- %nop7030 = alloca i1, i1 0
- %nop7031 = alloca i1, i1 0
- %nop7032 = alloca i1, i1 0
- %nop7033 = alloca i1, i1 0
- %nop7034 = alloca i1, i1 0
- %nop7035 = alloca i1, i1 0
- %nop7036 = alloca i1, i1 0
- %nop7037 = alloca i1, i1 0
- %nop7038 = alloca i1, i1 0
- %nop7039 = alloca i1, i1 0
- %nop7040 = alloca i1, i1 0
- %nop7041 = alloca i1, i1 0
- %nop7042 = alloca i1, i1 0
- %nop7043 = alloca i1, i1 0
- %nop7044 = alloca i1, i1 0
- %nop7045 = alloca i1, i1 0
- %nop7046 = alloca i1, i1 0
- %nop7047 = alloca i1, i1 0
- %nop7048 = alloca i1, i1 0
- %nop7049 = alloca i1, i1 0
- %nop7050 = alloca i1, i1 0
- %nop7051 = alloca i1, i1 0
- %nop7052 = alloca i1, i1 0
- %nop7053 = alloca i1, i1 0
- %nop7054 = alloca i1, i1 0
- %nop7055 = alloca i1, i1 0
- %nop7056 = alloca i1, i1 0
- %nop7057 = alloca i1, i1 0
- %nop7058 = alloca i1, i1 0
- %nop7059 = alloca i1, i1 0
- %nop7060 = alloca i1, i1 0
- %nop7061 = alloca i1, i1 0
- %nop7062 = alloca i1, i1 0
- %nop7063 = alloca i1, i1 0
- %nop7064 = alloca i1, i1 0
- %nop7065 = alloca i1, i1 0
- %nop7066 = alloca i1, i1 0
- %nop7067 = alloca i1, i1 0
- %nop7068 = alloca i1, i1 0
- %nop7069 = alloca i1, i1 0
- %nop7070 = alloca i1, i1 0
- %nop7071 = alloca i1, i1 0
- %nop7072 = alloca i1, i1 0
- %nop7073 = alloca i1, i1 0
- %nop7074 = alloca i1, i1 0
- %nop7075 = alloca i1, i1 0
- %nop7076 = alloca i1, i1 0
- %nop7077 = alloca i1, i1 0
- %nop7078 = alloca i1, i1 0
- %nop7079 = alloca i1, i1 0
- %nop7080 = alloca i1, i1 0
- %nop7081 = alloca i1, i1 0
- %nop7082 = alloca i1, i1 0
- %nop7083 = alloca i1, i1 0
- %nop7084 = alloca i1, i1 0
- %nop7085 = alloca i1, i1 0
- %nop7086 = alloca i1, i1 0
- %nop7087 = alloca i1, i1 0
- %nop7088 = alloca i1, i1 0
- %nop7089 = alloca i1, i1 0
- %nop7090 = alloca i1, i1 0
- %nop7091 = alloca i1, i1 0
- %nop7092 = alloca i1, i1 0
- %nop7093 = alloca i1, i1 0
- %nop7094 = alloca i1, i1 0
- %nop7095 = alloca i1, i1 0
- %nop7096 = alloca i1, i1 0
- %nop7097 = alloca i1, i1 0
- %nop7098 = alloca i1, i1 0
- %nop7099 = alloca i1, i1 0
- %nop7100 = alloca i1, i1 0
- %nop7101 = alloca i1, i1 0
- %nop7102 = alloca i1, i1 0
- %nop7103 = alloca i1, i1 0
- %nop7104 = alloca i1, i1 0
- %nop7105 = alloca i1, i1 0
- %nop7106 = alloca i1, i1 0
- %nop7107 = alloca i1, i1 0
- %nop7108 = alloca i1, i1 0
- %nop7109 = alloca i1, i1 0
- %nop7110 = alloca i1, i1 0
- %nop7111 = alloca i1, i1 0
- %nop7112 = alloca i1, i1 0
- %nop7113 = alloca i1, i1 0
- %nop7114 = alloca i1, i1 0
- %nop7115 = alloca i1, i1 0
- %nop7116 = alloca i1, i1 0
- %nop7117 = alloca i1, i1 0
- %nop7118 = alloca i1, i1 0
- %nop7119 = alloca i1, i1 0
- %nop7120 = alloca i1, i1 0
- %nop7121 = alloca i1, i1 0
- %nop7122 = alloca i1, i1 0
- %nop7123 = alloca i1, i1 0
- %nop7124 = alloca i1, i1 0
- %nop7125 = alloca i1, i1 0
- %nop7126 = alloca i1, i1 0
- %nop7127 = alloca i1, i1 0
- %nop7128 = alloca i1, i1 0
- %nop7129 = alloca i1, i1 0
- %nop7130 = alloca i1, i1 0
- %nop7131 = alloca i1, i1 0
- %nop7132 = alloca i1, i1 0
- %nop7133 = alloca i1, i1 0
- %nop7134 = alloca i1, i1 0
- %nop7135 = alloca i1, i1 0
- %nop7136 = alloca i1, i1 0
- %nop7137 = alloca i1, i1 0
- %nop7138 = alloca i1, i1 0
- %nop7139 = alloca i1, i1 0
- %nop7140 = alloca i1, i1 0
- %nop7141 = alloca i1, i1 0
- %nop7142 = alloca i1, i1 0
- %nop7143 = alloca i1, i1 0
- %nop7144 = alloca i1, i1 0
- %nop7145 = alloca i1, i1 0
- %nop7146 = alloca i1, i1 0
- %nop7147 = alloca i1, i1 0
- %nop7148 = alloca i1, i1 0
- %nop7149 = alloca i1, i1 0
- %nop7150 = alloca i1, i1 0
- %nop7151 = alloca i1, i1 0
- %nop7152 = alloca i1, i1 0
- %nop7153 = alloca i1, i1 0
- %nop7154 = alloca i1, i1 0
- %nop7155 = alloca i1, i1 0
- %nop7156 = alloca i1, i1 0
- %nop7157 = alloca i1, i1 0
- %nop7158 = alloca i1, i1 0
- %nop7159 = alloca i1, i1 0
- %nop7160 = alloca i1, i1 0
- %nop7161 = alloca i1, i1 0
- %nop7162 = alloca i1, i1 0
- %nop7163 = alloca i1, i1 0
- %nop7164 = alloca i1, i1 0
- %nop7165 = alloca i1, i1 0
- %nop7166 = alloca i1, i1 0
- %nop7167 = alloca i1, i1 0
- %nop7168 = alloca i1, i1 0
- %nop7169 = alloca i1, i1 0
- %nop7170 = alloca i1, i1 0
- %nop7171 = alloca i1, i1 0
- %nop7172 = alloca i1, i1 0
- %nop7173 = alloca i1, i1 0
- %nop7174 = alloca i1, i1 0
- %nop7175 = alloca i1, i1 0
- %nop7176 = alloca i1, i1 0
- %nop7177 = alloca i1, i1 0
- %nop7178 = alloca i1, i1 0
- %nop7179 = alloca i1, i1 0
- %nop7180 = alloca i1, i1 0
- %nop7181 = alloca i1, i1 0
- %nop7182 = alloca i1, i1 0
- %nop7183 = alloca i1, i1 0
- %nop7184 = alloca i1, i1 0
- %nop7185 = alloca i1, i1 0
- %nop7186 = alloca i1, i1 0
- %nop7187 = alloca i1, i1 0
- %nop7188 = alloca i1, i1 0
- %nop7189 = alloca i1, i1 0
- %nop7190 = alloca i1, i1 0
- %nop7191 = alloca i1, i1 0
- %nop7192 = alloca i1, i1 0
- %nop7193 = alloca i1, i1 0
- %nop7194 = alloca i1, i1 0
- %nop7195 = alloca i1, i1 0
- %nop7196 = alloca i1, i1 0
- %nop7197 = alloca i1, i1 0
- %nop7198 = alloca i1, i1 0
- %nop7199 = alloca i1, i1 0
- %nop7200 = alloca i1, i1 0
- %nop7201 = alloca i1, i1 0
- %nop7202 = alloca i1, i1 0
- %nop7203 = alloca i1, i1 0
- %nop7204 = alloca i1, i1 0
- %nop7205 = alloca i1, i1 0
- %nop7206 = alloca i1, i1 0
- %nop7207 = alloca i1, i1 0
- %nop7208 = alloca i1, i1 0
- %nop7209 = alloca i1, i1 0
- %nop7210 = alloca i1, i1 0
- %nop7211 = alloca i1, i1 0
- %nop7212 = alloca i1, i1 0
- %nop7213 = alloca i1, i1 0
- %nop7214 = alloca i1, i1 0
- %nop7215 = alloca i1, i1 0
- %nop7216 = alloca i1, i1 0
- %nop7217 = alloca i1, i1 0
- %nop7218 = alloca i1, i1 0
- %nop7219 = alloca i1, i1 0
- %nop7220 = alloca i1, i1 0
- %nop7221 = alloca i1, i1 0
- %nop7222 = alloca i1, i1 0
- %nop7223 = alloca i1, i1 0
- %nop7224 = alloca i1, i1 0
- %nop7225 = alloca i1, i1 0
- %nop7226 = alloca i1, i1 0
- %nop7227 = alloca i1, i1 0
- %nop7228 = alloca i1, i1 0
- %nop7229 = alloca i1, i1 0
- %nop7230 = alloca i1, i1 0
- %nop7231 = alloca i1, i1 0
- %nop7232 = alloca i1, i1 0
- %nop7233 = alloca i1, i1 0
- %nop7234 = alloca i1, i1 0
- %nop7235 = alloca i1, i1 0
- %nop7236 = alloca i1, i1 0
- %nop7237 = alloca i1, i1 0
- %nop7238 = alloca i1, i1 0
- %nop7239 = alloca i1, i1 0
- %nop7240 = alloca i1, i1 0
- %nop7241 = alloca i1, i1 0
- %nop7242 = alloca i1, i1 0
- %nop7243 = alloca i1, i1 0
- %nop7244 = alloca i1, i1 0
- %nop7245 = alloca i1, i1 0
- %nop7246 = alloca i1, i1 0
- %nop7247 = alloca i1, i1 0
- %nop7248 = alloca i1, i1 0
- %nop7249 = alloca i1, i1 0
- %nop7250 = alloca i1, i1 0
- %nop7251 = alloca i1, i1 0
- %nop7252 = alloca i1, i1 0
- %nop7253 = alloca i1, i1 0
- %nop7254 = alloca i1, i1 0
- %nop7255 = alloca i1, i1 0
- %nop7256 = alloca i1, i1 0
- %nop7257 = alloca i1, i1 0
- %nop7258 = alloca i1, i1 0
- %nop7259 = alloca i1, i1 0
- %nop7260 = alloca i1, i1 0
- %nop7261 = alloca i1, i1 0
- %nop7262 = alloca i1, i1 0
- %nop7263 = alloca i1, i1 0
- %nop7264 = alloca i1, i1 0
- %nop7265 = alloca i1, i1 0
- %nop7266 = alloca i1, i1 0
- %nop7267 = alloca i1, i1 0
- %nop7268 = alloca i1, i1 0
- %nop7269 = alloca i1, i1 0
- %nop7270 = alloca i1, i1 0
- %nop7271 = alloca i1, i1 0
- %nop7272 = alloca i1, i1 0
- %nop7273 = alloca i1, i1 0
- %nop7274 = alloca i1, i1 0
- %nop7275 = alloca i1, i1 0
- %nop7276 = alloca i1, i1 0
- %nop7277 = alloca i1, i1 0
- %nop7278 = alloca i1, i1 0
- %nop7279 = alloca i1, i1 0
- %nop7280 = alloca i1, i1 0
- %nop7281 = alloca i1, i1 0
- %nop7282 = alloca i1, i1 0
- %nop7283 = alloca i1, i1 0
- %nop7284 = alloca i1, i1 0
- %nop7285 = alloca i1, i1 0
- %nop7286 = alloca i1, i1 0
- %nop7287 = alloca i1, i1 0
- %nop7288 = alloca i1, i1 0
- %nop7289 = alloca i1, i1 0
- %nop7290 = alloca i1, i1 0
- %nop7291 = alloca i1, i1 0
- %nop7292 = alloca i1, i1 0
- %nop7293 = alloca i1, i1 0
- %nop7294 = alloca i1, i1 0
- %nop7295 = alloca i1, i1 0
- %nop7296 = alloca i1, i1 0
- %nop7297 = alloca i1, i1 0
- %nop7298 = alloca i1, i1 0
- %nop7299 = alloca i1, i1 0
- %nop7300 = alloca i1, i1 0
- %nop7301 = alloca i1, i1 0
- %nop7302 = alloca i1, i1 0
- %nop7303 = alloca i1, i1 0
- %nop7304 = alloca i1, i1 0
- %nop7305 = alloca i1, i1 0
- %nop7306 = alloca i1, i1 0
- %nop7307 = alloca i1, i1 0
- %nop7308 = alloca i1, i1 0
- %nop7309 = alloca i1, i1 0
- %nop7310 = alloca i1, i1 0
- %nop7311 = alloca i1, i1 0
- %nop7312 = alloca i1, i1 0
- %nop7313 = alloca i1, i1 0
- %nop7314 = alloca i1, i1 0
- %nop7315 = alloca i1, i1 0
- %nop7316 = alloca i1, i1 0
- %nop7317 = alloca i1, i1 0
- %nop7318 = alloca i1, i1 0
- %nop7319 = alloca i1, i1 0
- %nop7320 = alloca i1, i1 0
- %nop7321 = alloca i1, i1 0
- %nop7322 = alloca i1, i1 0
- %nop7323 = alloca i1, i1 0
- %nop7324 = alloca i1, i1 0
- %nop7325 = alloca i1, i1 0
- %nop7326 = alloca i1, i1 0
- %nop7327 = alloca i1, i1 0
- %nop7328 = alloca i1, i1 0
- %nop7329 = alloca i1, i1 0
- %nop7330 = alloca i1, i1 0
- %nop7331 = alloca i1, i1 0
- %nop7332 = alloca i1, i1 0
- %nop7333 = alloca i1, i1 0
- %nop7334 = alloca i1, i1 0
- %nop7335 = alloca i1, i1 0
- %nop7336 = alloca i1, i1 0
- %nop7337 = alloca i1, i1 0
- %nop7338 = alloca i1, i1 0
- %nop7339 = alloca i1, i1 0
- %nop7340 = alloca i1, i1 0
- %nop7341 = alloca i1, i1 0
- %nop7342 = alloca i1, i1 0
- %nop7343 = alloca i1, i1 0
- %nop7344 = alloca i1, i1 0
- %nop7345 = alloca i1, i1 0
- %nop7346 = alloca i1, i1 0
- %nop7347 = alloca i1, i1 0
- %nop7348 = alloca i1, i1 0
- %nop7349 = alloca i1, i1 0
- %nop7350 = alloca i1, i1 0
- %nop7351 = alloca i1, i1 0
- %nop7352 = alloca i1, i1 0
- %nop7353 = alloca i1, i1 0
- %nop7354 = alloca i1, i1 0
- %nop7355 = alloca i1, i1 0
- %nop7356 = alloca i1, i1 0
- %nop7357 = alloca i1, i1 0
- %nop7358 = alloca i1, i1 0
- %nop7359 = alloca i1, i1 0
- %nop7360 = alloca i1, i1 0
- %nop7361 = alloca i1, i1 0
- %nop7362 = alloca i1, i1 0
- %nop7363 = alloca i1, i1 0
- %nop7364 = alloca i1, i1 0
- %nop7365 = alloca i1, i1 0
- %nop7366 = alloca i1, i1 0
- %nop7367 = alloca i1, i1 0
- %nop7368 = alloca i1, i1 0
- %nop7369 = alloca i1, i1 0
- %nop7370 = alloca i1, i1 0
- %nop7371 = alloca i1, i1 0
- %nop7372 = alloca i1, i1 0
- %nop7373 = alloca i1, i1 0
- %nop7374 = alloca i1, i1 0
- %nop7375 = alloca i1, i1 0
- %nop7376 = alloca i1, i1 0
- %nop7377 = alloca i1, i1 0
- %nop7378 = alloca i1, i1 0
- %nop7379 = alloca i1, i1 0
- %nop7380 = alloca i1, i1 0
- %nop7381 = alloca i1, i1 0
- %nop7382 = alloca i1, i1 0
- %nop7383 = alloca i1, i1 0
- %nop7384 = alloca i1, i1 0
- %nop7385 = alloca i1, i1 0
- %nop7386 = alloca i1, i1 0
- %nop7387 = alloca i1, i1 0
- %nop7388 = alloca i1, i1 0
- %nop7389 = alloca i1, i1 0
- %nop7390 = alloca i1, i1 0
- %nop7391 = alloca i1, i1 0
- %nop7392 = alloca i1, i1 0
- %nop7393 = alloca i1, i1 0
- %nop7394 = alloca i1, i1 0
- %nop7395 = alloca i1, i1 0
- %nop7396 = alloca i1, i1 0
- %nop7397 = alloca i1, i1 0
- %nop7398 = alloca i1, i1 0
- %nop7399 = alloca i1, i1 0
- %nop7400 = alloca i1, i1 0
- %nop7401 = alloca i1, i1 0
- %nop7402 = alloca i1, i1 0
- %nop7403 = alloca i1, i1 0
- %nop7404 = alloca i1, i1 0
- %nop7405 = alloca i1, i1 0
- %nop7406 = alloca i1, i1 0
- %nop7407 = alloca i1, i1 0
- %nop7408 = alloca i1, i1 0
- %nop7409 = alloca i1, i1 0
- %nop7410 = alloca i1, i1 0
- %nop7411 = alloca i1, i1 0
- %nop7412 = alloca i1, i1 0
- %nop7413 = alloca i1, i1 0
- %nop7414 = alloca i1, i1 0
- %nop7415 = alloca i1, i1 0
- %nop7416 = alloca i1, i1 0
- %nop7417 = alloca i1, i1 0
- %nop7418 = alloca i1, i1 0
- %nop7419 = alloca i1, i1 0
- %nop7420 = alloca i1, i1 0
- %nop7421 = alloca i1, i1 0
- %nop7422 = alloca i1, i1 0
- %nop7423 = alloca i1, i1 0
- %nop7424 = alloca i1, i1 0
- %nop7425 = alloca i1, i1 0
- %nop7426 = alloca i1, i1 0
- %nop7427 = alloca i1, i1 0
- %nop7428 = alloca i1, i1 0
- %nop7429 = alloca i1, i1 0
- %nop7430 = alloca i1, i1 0
- %nop7431 = alloca i1, i1 0
- %nop7432 = alloca i1, i1 0
- %nop7433 = alloca i1, i1 0
- %nop7434 = alloca i1, i1 0
- %nop7435 = alloca i1, i1 0
- %nop7436 = alloca i1, i1 0
- %nop7437 = alloca i1, i1 0
- %nop7438 = alloca i1, i1 0
- %nop7439 = alloca i1, i1 0
- %nop7440 = alloca i1, i1 0
- %nop7441 = alloca i1, i1 0
- %nop7442 = alloca i1, i1 0
- %nop7443 = alloca i1, i1 0
- %nop7444 = alloca i1, i1 0
- %nop7445 = alloca i1, i1 0
- %nop7446 = alloca i1, i1 0
- %nop7447 = alloca i1, i1 0
- %nop7448 = alloca i1, i1 0
- %nop7449 = alloca i1, i1 0
- %nop7450 = alloca i1, i1 0
- %nop7451 = alloca i1, i1 0
- %nop7452 = alloca i1, i1 0
- %nop7453 = alloca i1, i1 0
- %nop7454 = alloca i1, i1 0
- %nop7455 = alloca i1, i1 0
- %nop7456 = alloca i1, i1 0
- %nop7457 = alloca i1, i1 0
- %nop7458 = alloca i1, i1 0
- %nop7459 = alloca i1, i1 0
- %nop7460 = alloca i1, i1 0
- %nop7461 = alloca i1, i1 0
- %nop7462 = alloca i1, i1 0
- %nop7463 = alloca i1, i1 0
- %nop7464 = alloca i1, i1 0
- %nop7465 = alloca i1, i1 0
- %nop7466 = alloca i1, i1 0
- %nop7467 = alloca i1, i1 0
- %nop7468 = alloca i1, i1 0
- %nop7469 = alloca i1, i1 0
- %nop7470 = alloca i1, i1 0
- %nop7471 = alloca i1, i1 0
- %nop7472 = alloca i1, i1 0
- %nop7473 = alloca i1, i1 0
- %nop7474 = alloca i1, i1 0
- %nop7475 = alloca i1, i1 0
- %nop7476 = alloca i1, i1 0
- %nop7477 = alloca i1, i1 0
- %nop7478 = alloca i1, i1 0
- %nop7479 = alloca i1, i1 0
- %nop7480 = alloca i1, i1 0
- %nop7481 = alloca i1, i1 0
- %nop7482 = alloca i1, i1 0
- %nop7483 = alloca i1, i1 0
- %nop7484 = alloca i1, i1 0
- %nop7485 = alloca i1, i1 0
- %nop7486 = alloca i1, i1 0
- %nop7487 = alloca i1, i1 0
- %nop7488 = alloca i1, i1 0
- %nop7489 = alloca i1, i1 0
- %nop7490 = alloca i1, i1 0
- %nop7491 = alloca i1, i1 0
- %nop7492 = alloca i1, i1 0
- %nop7493 = alloca i1, i1 0
- %nop7494 = alloca i1, i1 0
- %nop7495 = alloca i1, i1 0
- %nop7496 = alloca i1, i1 0
- %nop7497 = alloca i1, i1 0
- %nop7498 = alloca i1, i1 0
- %nop7499 = alloca i1, i1 0
- %nop7500 = alloca i1, i1 0
- %nop7501 = alloca i1, i1 0
- %nop7502 = alloca i1, i1 0
- %nop7503 = alloca i1, i1 0
- %nop7504 = alloca i1, i1 0
- %nop7505 = alloca i1, i1 0
- %nop7506 = alloca i1, i1 0
- %nop7507 = alloca i1, i1 0
- %nop7508 = alloca i1, i1 0
- %nop7509 = alloca i1, i1 0
- %nop7510 = alloca i1, i1 0
- %nop7511 = alloca i1, i1 0
- %nop7512 = alloca i1, i1 0
- %nop7513 = alloca i1, i1 0
- %nop7514 = alloca i1, i1 0
- %nop7515 = alloca i1, i1 0
- %nop7516 = alloca i1, i1 0
- %nop7517 = alloca i1, i1 0
- %nop7518 = alloca i1, i1 0
- %nop7519 = alloca i1, i1 0
- %nop7520 = alloca i1, i1 0
- %nop7521 = alloca i1, i1 0
- %nop7522 = alloca i1, i1 0
- %nop7523 = alloca i1, i1 0
- %nop7524 = alloca i1, i1 0
- %nop7525 = alloca i1, i1 0
- %nop7526 = alloca i1, i1 0
- %nop7527 = alloca i1, i1 0
- %nop7528 = alloca i1, i1 0
- %nop7529 = alloca i1, i1 0
- %nop7530 = alloca i1, i1 0
- %nop7531 = alloca i1, i1 0
- %nop7532 = alloca i1, i1 0
- %nop7533 = alloca i1, i1 0
- %nop7534 = alloca i1, i1 0
- %nop7535 = alloca i1, i1 0
- %nop7536 = alloca i1, i1 0
- %nop7537 = alloca i1, i1 0
- %nop7538 = alloca i1, i1 0
- %nop7539 = alloca i1, i1 0
- %nop7540 = alloca i1, i1 0
- %nop7541 = alloca i1, i1 0
- %nop7542 = alloca i1, i1 0
- %nop7543 = alloca i1, i1 0
- %nop7544 = alloca i1, i1 0
- %nop7545 = alloca i1, i1 0
- %nop7546 = alloca i1, i1 0
- %nop7547 = alloca i1, i1 0
- %nop7548 = alloca i1, i1 0
- %nop7549 = alloca i1, i1 0
- %nop7550 = alloca i1, i1 0
- %nop7551 = alloca i1, i1 0
- %nop7552 = alloca i1, i1 0
- %nop7553 = alloca i1, i1 0
- %nop7554 = alloca i1, i1 0
- %nop7555 = alloca i1, i1 0
- %nop7556 = alloca i1, i1 0
- %nop7557 = alloca i1, i1 0
- %nop7558 = alloca i1, i1 0
- %nop7559 = alloca i1, i1 0
- %nop7560 = alloca i1, i1 0
- %nop7561 = alloca i1, i1 0
- %nop7562 = alloca i1, i1 0
- %nop7563 = alloca i1, i1 0
- %nop7564 = alloca i1, i1 0
- %nop7565 = alloca i1, i1 0
- %nop7566 = alloca i1, i1 0
- %nop7567 = alloca i1, i1 0
- %nop7568 = alloca i1, i1 0
- %nop7569 = alloca i1, i1 0
- %nop7570 = alloca i1, i1 0
- %nop7571 = alloca i1, i1 0
- %nop7572 = alloca i1, i1 0
- %nop7573 = alloca i1, i1 0
- %nop7574 = alloca i1, i1 0
- %nop7575 = alloca i1, i1 0
- %nop7576 = alloca i1, i1 0
- %nop7577 = alloca i1, i1 0
- %nop7578 = alloca i1, i1 0
- %nop7579 = alloca i1, i1 0
- %nop7580 = alloca i1, i1 0
- %nop7581 = alloca i1, i1 0
- %nop7582 = alloca i1, i1 0
- %nop7583 = alloca i1, i1 0
- %nop7584 = alloca i1, i1 0
- %nop7585 = alloca i1, i1 0
- %nop7586 = alloca i1, i1 0
- %nop7587 = alloca i1, i1 0
- %nop7588 = alloca i1, i1 0
- %nop7589 = alloca i1, i1 0
- %nop7590 = alloca i1, i1 0
- %nop7591 = alloca i1, i1 0
- %nop7592 = alloca i1, i1 0
- %nop7593 = alloca i1, i1 0
- %nop7594 = alloca i1, i1 0
- %nop7595 = alloca i1, i1 0
- %nop7596 = alloca i1, i1 0
- %nop7597 = alloca i1, i1 0
- %nop7598 = alloca i1, i1 0
- %nop7599 = alloca i1, i1 0
- %nop7600 = alloca i1, i1 0
- %nop7601 = alloca i1, i1 0
- %nop7602 = alloca i1, i1 0
- %nop7603 = alloca i1, i1 0
- %nop7604 = alloca i1, i1 0
- %nop7605 = alloca i1, i1 0
- %nop7606 = alloca i1, i1 0
- %nop7607 = alloca i1, i1 0
- %nop7608 = alloca i1, i1 0
- %nop7609 = alloca i1, i1 0
- %nop7610 = alloca i1, i1 0
- %nop7611 = alloca i1, i1 0
- %nop7612 = alloca i1, i1 0
- %nop7613 = alloca i1, i1 0
- %nop7614 = alloca i1, i1 0
- %nop7615 = alloca i1, i1 0
- %nop7616 = alloca i1, i1 0
- %nop7617 = alloca i1, i1 0
- %nop7618 = alloca i1, i1 0
- %nop7619 = alloca i1, i1 0
- %nop7620 = alloca i1, i1 0
- %nop7621 = alloca i1, i1 0
- %nop7622 = alloca i1, i1 0
- %nop7623 = alloca i1, i1 0
- %nop7624 = alloca i1, i1 0
- %nop7625 = alloca i1, i1 0
- %nop7626 = alloca i1, i1 0
- %nop7627 = alloca i1, i1 0
- %nop7628 = alloca i1, i1 0
- %nop7629 = alloca i1, i1 0
- %nop7630 = alloca i1, i1 0
- %nop7631 = alloca i1, i1 0
- %nop7632 = alloca i1, i1 0
- %nop7633 = alloca i1, i1 0
- %nop7634 = alloca i1, i1 0
- %nop7635 = alloca i1, i1 0
- %nop7636 = alloca i1, i1 0
- %nop7637 = alloca i1, i1 0
- %nop7638 = alloca i1, i1 0
- %nop7639 = alloca i1, i1 0
- %nop7640 = alloca i1, i1 0
- %nop7641 = alloca i1, i1 0
- %nop7642 = alloca i1, i1 0
- %nop7643 = alloca i1, i1 0
- %nop7644 = alloca i1, i1 0
- %nop7645 = alloca i1, i1 0
- %nop7646 = alloca i1, i1 0
- %nop7647 = alloca i1, i1 0
- %nop7648 = alloca i1, i1 0
- %nop7649 = alloca i1, i1 0
- %nop7650 = alloca i1, i1 0
- %nop7651 = alloca i1, i1 0
- %nop7652 = alloca i1, i1 0
- %nop7653 = alloca i1, i1 0
- %nop7654 = alloca i1, i1 0
- %nop7655 = alloca i1, i1 0
- %nop7656 = alloca i1, i1 0
- %nop7657 = alloca i1, i1 0
- %nop7658 = alloca i1, i1 0
- %nop7659 = alloca i1, i1 0
- %nop7660 = alloca i1, i1 0
- %nop7661 = alloca i1, i1 0
- %nop7662 = alloca i1, i1 0
- %nop7663 = alloca i1, i1 0
- %nop7664 = alloca i1, i1 0
- %nop7665 = alloca i1, i1 0
- %nop7666 = alloca i1, i1 0
- %nop7667 = alloca i1, i1 0
- %nop7668 = alloca i1, i1 0
- %nop7669 = alloca i1, i1 0
- %nop7670 = alloca i1, i1 0
- %nop7671 = alloca i1, i1 0
- %nop7672 = alloca i1, i1 0
- %nop7673 = alloca i1, i1 0
- %nop7674 = alloca i1, i1 0
- %nop7675 = alloca i1, i1 0
- %nop7676 = alloca i1, i1 0
- %nop7677 = alloca i1, i1 0
- %nop7678 = alloca i1, i1 0
- %nop7679 = alloca i1, i1 0
- %nop7680 = alloca i1, i1 0
- %nop7681 = alloca i1, i1 0
- %nop7682 = alloca i1, i1 0
- %nop7683 = alloca i1, i1 0
- %nop7684 = alloca i1, i1 0
- %nop7685 = alloca i1, i1 0
- %nop7686 = alloca i1, i1 0
- %nop7687 = alloca i1, i1 0
- %nop7688 = alloca i1, i1 0
- %nop7689 = alloca i1, i1 0
- %nop7690 = alloca i1, i1 0
- %nop7691 = alloca i1, i1 0
- %nop7692 = alloca i1, i1 0
- %nop7693 = alloca i1, i1 0
- %nop7694 = alloca i1, i1 0
- %nop7695 = alloca i1, i1 0
- %nop7696 = alloca i1, i1 0
- %nop7697 = alloca i1, i1 0
- %nop7698 = alloca i1, i1 0
- %nop7699 = alloca i1, i1 0
- %nop7700 = alloca i1, i1 0
- %nop7701 = alloca i1, i1 0
- %nop7702 = alloca i1, i1 0
- %nop7703 = alloca i1, i1 0
- %nop7704 = alloca i1, i1 0
- %nop7705 = alloca i1, i1 0
- %nop7706 = alloca i1, i1 0
- %nop7707 = alloca i1, i1 0
- %nop7708 = alloca i1, i1 0
- %nop7709 = alloca i1, i1 0
- %nop7710 = alloca i1, i1 0
- %nop7711 = alloca i1, i1 0
- %nop7712 = alloca i1, i1 0
- %nop7713 = alloca i1, i1 0
- %nop7714 = alloca i1, i1 0
- %nop7715 = alloca i1, i1 0
- %nop7716 = alloca i1, i1 0
- %nop7717 = alloca i1, i1 0
- %nop7718 = alloca i1, i1 0
- %nop7719 = alloca i1, i1 0
- %nop7720 = alloca i1, i1 0
- %nop7721 = alloca i1, i1 0
- %nop7722 = alloca i1, i1 0
- %nop7723 = alloca i1, i1 0
- %nop7724 = alloca i1, i1 0
- %nop7725 = alloca i1, i1 0
- %nop7726 = alloca i1, i1 0
- %nop7727 = alloca i1, i1 0
- %nop7728 = alloca i1, i1 0
- %nop7729 = alloca i1, i1 0
- %nop7730 = alloca i1, i1 0
- %nop7731 = alloca i1, i1 0
- %nop7732 = alloca i1, i1 0
- %nop7733 = alloca i1, i1 0
- %nop7734 = alloca i1, i1 0
- %nop7735 = alloca i1, i1 0
- %nop7736 = alloca i1, i1 0
- %nop7737 = alloca i1, i1 0
- %nop7738 = alloca i1, i1 0
- %nop7739 = alloca i1, i1 0
- %nop7740 = alloca i1, i1 0
- %nop7741 = alloca i1, i1 0
- %nop7742 = alloca i1, i1 0
- %nop7743 = alloca i1, i1 0
- %nop7744 = alloca i1, i1 0
- %nop7745 = alloca i1, i1 0
- %nop7746 = alloca i1, i1 0
- %nop7747 = alloca i1, i1 0
- %nop7748 = alloca i1, i1 0
- %nop7749 = alloca i1, i1 0
- %nop7750 = alloca i1, i1 0
- %nop7751 = alloca i1, i1 0
- %nop7752 = alloca i1, i1 0
- %nop7753 = alloca i1, i1 0
- %nop7754 = alloca i1, i1 0
- %nop7755 = alloca i1, i1 0
- %nop7756 = alloca i1, i1 0
- %nop7757 = alloca i1, i1 0
- %nop7758 = alloca i1, i1 0
- %nop7759 = alloca i1, i1 0
- %nop7760 = alloca i1, i1 0
- %nop7761 = alloca i1, i1 0
- %nop7762 = alloca i1, i1 0
- %nop7763 = alloca i1, i1 0
- %nop7764 = alloca i1, i1 0
- %nop7765 = alloca i1, i1 0
- %nop7766 = alloca i1, i1 0
- %nop7767 = alloca i1, i1 0
- %nop7768 = alloca i1, i1 0
- %nop7769 = alloca i1, i1 0
- %nop7770 = alloca i1, i1 0
- %nop7771 = alloca i1, i1 0
- %nop7772 = alloca i1, i1 0
- %nop7773 = alloca i1, i1 0
- %nop7774 = alloca i1, i1 0
- %nop7775 = alloca i1, i1 0
- %nop7776 = alloca i1, i1 0
- %nop7777 = alloca i1, i1 0
- %nop7778 = alloca i1, i1 0
- %nop7779 = alloca i1, i1 0
- %nop7780 = alloca i1, i1 0
- %nop7781 = alloca i1, i1 0
- %nop7782 = alloca i1, i1 0
- %nop7783 = alloca i1, i1 0
- %nop7784 = alloca i1, i1 0
- %nop7785 = alloca i1, i1 0
- %nop7786 = alloca i1, i1 0
- %nop7787 = alloca i1, i1 0
- %nop7788 = alloca i1, i1 0
- %nop7789 = alloca i1, i1 0
- %nop7790 = alloca i1, i1 0
- %nop7791 = alloca i1, i1 0
- %nop7792 = alloca i1, i1 0
- %nop7793 = alloca i1, i1 0
- %nop7794 = alloca i1, i1 0
- %nop7795 = alloca i1, i1 0
- %nop7796 = alloca i1, i1 0
- %nop7797 = alloca i1, i1 0
- %nop7798 = alloca i1, i1 0
- %nop7799 = alloca i1, i1 0
- %nop7800 = alloca i1, i1 0
- %nop7801 = alloca i1, i1 0
- %nop7802 = alloca i1, i1 0
- %nop7803 = alloca i1, i1 0
- %nop7804 = alloca i1, i1 0
- %nop7805 = alloca i1, i1 0
- %nop7806 = alloca i1, i1 0
- %nop7807 = alloca i1, i1 0
- %nop7808 = alloca i1, i1 0
- %nop7809 = alloca i1, i1 0
- %nop7810 = alloca i1, i1 0
- %nop7811 = alloca i1, i1 0
- %nop7812 = alloca i1, i1 0
- %nop7813 = alloca i1, i1 0
- %nop7814 = alloca i1, i1 0
- %nop7815 = alloca i1, i1 0
- %nop7816 = alloca i1, i1 0
- %nop7817 = alloca i1, i1 0
- %nop7818 = alloca i1, i1 0
- %nop7819 = alloca i1, i1 0
- %nop7820 = alloca i1, i1 0
- %nop7821 = alloca i1, i1 0
- %nop7822 = alloca i1, i1 0
- %nop7823 = alloca i1, i1 0
- %nop7824 = alloca i1, i1 0
- %nop7825 = alloca i1, i1 0
- %nop7826 = alloca i1, i1 0
- %nop7827 = alloca i1, i1 0
- %nop7828 = alloca i1, i1 0
- %nop7829 = alloca i1, i1 0
- %nop7830 = alloca i1, i1 0
- %nop7831 = alloca i1, i1 0
- %nop7832 = alloca i1, i1 0
- %nop7833 = alloca i1, i1 0
- %nop7834 = alloca i1, i1 0
- %nop7835 = alloca i1, i1 0
- %nop7836 = alloca i1, i1 0
- %nop7837 = alloca i1, i1 0
- %nop7838 = alloca i1, i1 0
- %nop7839 = alloca i1, i1 0
- %nop7840 = alloca i1, i1 0
- %nop7841 = alloca i1, i1 0
- %nop7842 = alloca i1, i1 0
- %nop7843 = alloca i1, i1 0
- %nop7844 = alloca i1, i1 0
- %nop7845 = alloca i1, i1 0
- %nop7846 = alloca i1, i1 0
- %nop7847 = alloca i1, i1 0
- %nop7848 = alloca i1, i1 0
- %nop7849 = alloca i1, i1 0
- %nop7850 = alloca i1, i1 0
- %nop7851 = alloca i1, i1 0
- %nop7852 = alloca i1, i1 0
- %nop7853 = alloca i1, i1 0
- %nop7854 = alloca i1, i1 0
- %nop7855 = alloca i1, i1 0
- %nop7856 = alloca i1, i1 0
- %nop7857 = alloca i1, i1 0
- %nop7858 = alloca i1, i1 0
- %nop7859 = alloca i1, i1 0
- %nop7860 = alloca i1, i1 0
- %nop7861 = alloca i1, i1 0
- %nop7862 = alloca i1, i1 0
- %nop7863 = alloca i1, i1 0
- %nop7864 = alloca i1, i1 0
- %nop7865 = alloca i1, i1 0
- %nop7866 = alloca i1, i1 0
- %nop7867 = alloca i1, i1 0
- %nop7868 = alloca i1, i1 0
- %nop7869 = alloca i1, i1 0
- %nop7870 = alloca i1, i1 0
- %nop7871 = alloca i1, i1 0
- %nop7872 = alloca i1, i1 0
- %nop7873 = alloca i1, i1 0
- %nop7874 = alloca i1, i1 0
- %nop7875 = alloca i1, i1 0
- %nop7876 = alloca i1, i1 0
- %nop7877 = alloca i1, i1 0
- %nop7878 = alloca i1, i1 0
- %nop7879 = alloca i1, i1 0
- %nop7880 = alloca i1, i1 0
- %nop7881 = alloca i1, i1 0
- %nop7882 = alloca i1, i1 0
- %nop7883 = alloca i1, i1 0
- %nop7884 = alloca i1, i1 0
- %nop7885 = alloca i1, i1 0
- %nop7886 = alloca i1, i1 0
- %nop7887 = alloca i1, i1 0
- %nop7888 = alloca i1, i1 0
- %nop7889 = alloca i1, i1 0
- %nop7890 = alloca i1, i1 0
- %nop7891 = alloca i1, i1 0
- %nop7892 = alloca i1, i1 0
- %nop7893 = alloca i1, i1 0
- %nop7894 = alloca i1, i1 0
- %nop7895 = alloca i1, i1 0
- %nop7896 = alloca i1, i1 0
- %nop7897 = alloca i1, i1 0
- %nop7898 = alloca i1, i1 0
- %nop7899 = alloca i1, i1 0
- %nop7900 = alloca i1, i1 0
- %nop7901 = alloca i1, i1 0
- %nop7902 = alloca i1, i1 0
- %nop7903 = alloca i1, i1 0
- %nop7904 = alloca i1, i1 0
- %nop7905 = alloca i1, i1 0
- %nop7906 = alloca i1, i1 0
- %nop7907 = alloca i1, i1 0
- %nop7908 = alloca i1, i1 0
- %nop7909 = alloca i1, i1 0
- %nop7910 = alloca i1, i1 0
- %nop7911 = alloca i1, i1 0
- %nop7912 = alloca i1, i1 0
- %nop7913 = alloca i1, i1 0
- %nop7914 = alloca i1, i1 0
- %nop7915 = alloca i1, i1 0
- %nop7916 = alloca i1, i1 0
- %nop7917 = alloca i1, i1 0
- %nop7918 = alloca i1, i1 0
- %nop7919 = alloca i1, i1 0
- %nop7920 = alloca i1, i1 0
- %nop7921 = alloca i1, i1 0
- %nop7922 = alloca i1, i1 0
- %nop7923 = alloca i1, i1 0
- %nop7924 = alloca i1, i1 0
- %nop7925 = alloca i1, i1 0
- %nop7926 = alloca i1, i1 0
- %nop7927 = alloca i1, i1 0
- %nop7928 = alloca i1, i1 0
- %nop7929 = alloca i1, i1 0
- %nop7930 = alloca i1, i1 0
- %nop7931 = alloca i1, i1 0
- %nop7932 = alloca i1, i1 0
- %nop7933 = alloca i1, i1 0
- %nop7934 = alloca i1, i1 0
- %nop7935 = alloca i1, i1 0
- %nop7936 = alloca i1, i1 0
- %nop7937 = alloca i1, i1 0
- %nop7938 = alloca i1, i1 0
- %nop7939 = alloca i1, i1 0
- %nop7940 = alloca i1, i1 0
- %nop7941 = alloca i1, i1 0
- %nop7942 = alloca i1, i1 0
- %nop7943 = alloca i1, i1 0
- %nop7944 = alloca i1, i1 0
- %nop7945 = alloca i1, i1 0
- %nop7946 = alloca i1, i1 0
- %nop7947 = alloca i1, i1 0
- %nop7948 = alloca i1, i1 0
- %nop7949 = alloca i1, i1 0
- %nop7950 = alloca i1, i1 0
- %nop7951 = alloca i1, i1 0
- %nop7952 = alloca i1, i1 0
- %nop7953 = alloca i1, i1 0
- %nop7954 = alloca i1, i1 0
- %nop7955 = alloca i1, i1 0
- %nop7956 = alloca i1, i1 0
- %nop7957 = alloca i1, i1 0
- %nop7958 = alloca i1, i1 0
- %nop7959 = alloca i1, i1 0
- %nop7960 = alloca i1, i1 0
- %nop7961 = alloca i1, i1 0
- %nop7962 = alloca i1, i1 0
- %nop7963 = alloca i1, i1 0
- %nop7964 = alloca i1, i1 0
- %nop7965 = alloca i1, i1 0
- %nop7966 = alloca i1, i1 0
- %nop7967 = alloca i1, i1 0
- %nop7968 = alloca i1, i1 0
- %nop7969 = alloca i1, i1 0
- %nop7970 = alloca i1, i1 0
- %nop7971 = alloca i1, i1 0
- %nop7972 = alloca i1, i1 0
- %nop7973 = alloca i1, i1 0
- %nop7974 = alloca i1, i1 0
- %nop7975 = alloca i1, i1 0
- %nop7976 = alloca i1, i1 0
- %nop7977 = alloca i1, i1 0
- %nop7978 = alloca i1, i1 0
- %nop7979 = alloca i1, i1 0
- %nop7980 = alloca i1, i1 0
- %nop7981 = alloca i1, i1 0
- %nop7982 = alloca i1, i1 0
- %nop7983 = alloca i1, i1 0
- %nop7984 = alloca i1, i1 0
- %nop7985 = alloca i1, i1 0
- %nop7986 = alloca i1, i1 0
- %nop7987 = alloca i1, i1 0
- %nop7988 = alloca i1, i1 0
- %nop7989 = alloca i1, i1 0
- %nop7990 = alloca i1, i1 0
- %nop7991 = alloca i1, i1 0
- %nop7992 = alloca i1, i1 0
- %nop7993 = alloca i1, i1 0
- %nop7994 = alloca i1, i1 0
- %nop7995 = alloca i1, i1 0
- %nop7996 = alloca i1, i1 0
- %nop7997 = alloca i1, i1 0
- %nop7998 = alloca i1, i1 0
- %nop7999 = alloca i1, i1 0
- %nop8000 = alloca i1, i1 0
- %nop8001 = alloca i1, i1 0
- %nop8002 = alloca i1, i1 0
- %nop8003 = alloca i1, i1 0
- %nop8004 = alloca i1, i1 0
- %nop8005 = alloca i1, i1 0
- %nop8006 = alloca i1, i1 0
- %nop8007 = alloca i1, i1 0
- %nop8008 = alloca i1, i1 0
- %nop8009 = alloca i1, i1 0
- %nop8010 = alloca i1, i1 0
- %nop8011 = alloca i1, i1 0
- %nop8012 = alloca i1, i1 0
- %nop8013 = alloca i1, i1 0
- %nop8014 = alloca i1, i1 0
- %nop8015 = alloca i1, i1 0
- %nop8016 = alloca i1, i1 0
- %nop8017 = alloca i1, i1 0
- %nop8018 = alloca i1, i1 0
- %nop8019 = alloca i1, i1 0
- %nop8020 = alloca i1, i1 0
- %nop8021 = alloca i1, i1 0
- %nop8022 = alloca i1, i1 0
- %nop8023 = alloca i1, i1 0
- %nop8024 = alloca i1, i1 0
- %nop8025 = alloca i1, i1 0
- %nop8026 = alloca i1, i1 0
- %nop8027 = alloca i1, i1 0
- %nop8028 = alloca i1, i1 0
- %nop8029 = alloca i1, i1 0
- %nop8030 = alloca i1, i1 0
- %nop8031 = alloca i1, i1 0
- %nop8032 = alloca i1, i1 0
- %nop8033 = alloca i1, i1 0
- %nop8034 = alloca i1, i1 0
- %nop8035 = alloca i1, i1 0
- %nop8036 = alloca i1, i1 0
- %nop8037 = alloca i1, i1 0
- %nop8038 = alloca i1, i1 0
- %nop8039 = alloca i1, i1 0
- %nop8040 = alloca i1, i1 0
- %nop8041 = alloca i1, i1 0
- %nop8042 = alloca i1, i1 0
- %nop8043 = alloca i1, i1 0
- %nop8044 = alloca i1, i1 0
- %nop8045 = alloca i1, i1 0
- %nop8046 = alloca i1, i1 0
- %nop8047 = alloca i1, i1 0
- %nop8048 = alloca i1, i1 0
- %nop8049 = alloca i1, i1 0
- %nop8050 = alloca i1, i1 0
- %nop8051 = alloca i1, i1 0
- %nop8052 = alloca i1, i1 0
- %nop8053 = alloca i1, i1 0
- %nop8054 = alloca i1, i1 0
- %nop8055 = alloca i1, i1 0
- %nop8056 = alloca i1, i1 0
- %nop8057 = alloca i1, i1 0
- %nop8058 = alloca i1, i1 0
- %nop8059 = alloca i1, i1 0
- %nop8060 = alloca i1, i1 0
- %nop8061 = alloca i1, i1 0
- %nop8062 = alloca i1, i1 0
- %nop8063 = alloca i1, i1 0
- %nop8064 = alloca i1, i1 0
- %nop8065 = alloca i1, i1 0
- %nop8066 = alloca i1, i1 0
- %nop8067 = alloca i1, i1 0
- %nop8068 = alloca i1, i1 0
- %nop8069 = alloca i1, i1 0
- %nop8070 = alloca i1, i1 0
- %nop8071 = alloca i1, i1 0
- %nop8072 = alloca i1, i1 0
- %nop8073 = alloca i1, i1 0
- %nop8074 = alloca i1, i1 0
- %nop8075 = alloca i1, i1 0
- %nop8076 = alloca i1, i1 0
- %nop8077 = alloca i1, i1 0
- %nop8078 = alloca i1, i1 0
- %nop8079 = alloca i1, i1 0
- %nop8080 = alloca i1, i1 0
- %nop8081 = alloca i1, i1 0
- %nop8082 = alloca i1, i1 0
- %nop8083 = alloca i1, i1 0
- %nop8084 = alloca i1, i1 0
- %nop8085 = alloca i1, i1 0
- %nop8086 = alloca i1, i1 0
- %nop8087 = alloca i1, i1 0
- %nop8088 = alloca i1, i1 0
- %nop8089 = alloca i1, i1 0
- %nop8090 = alloca i1, i1 0
- %nop8091 = alloca i1, i1 0
- %nop8092 = alloca i1, i1 0
- %nop8093 = alloca i1, i1 0
- %nop8094 = alloca i1, i1 0
- %nop8095 = alloca i1, i1 0
- %nop8096 = alloca i1, i1 0
- %nop8097 = alloca i1, i1 0
- %nop8098 = alloca i1, i1 0
- %nop8099 = alloca i1, i1 0
- %nop8100 = alloca i1, i1 0
- %nop8101 = alloca i1, i1 0
- %nop8102 = alloca i1, i1 0
- %nop8103 = alloca i1, i1 0
- %nop8104 = alloca i1, i1 0
- %nop8105 = alloca i1, i1 0
- %nop8106 = alloca i1, i1 0
- %nop8107 = alloca i1, i1 0
- %nop8108 = alloca i1, i1 0
- %nop8109 = alloca i1, i1 0
- %nop8110 = alloca i1, i1 0
- %nop8111 = alloca i1, i1 0
- %nop8112 = alloca i1, i1 0
- %nop8113 = alloca i1, i1 0
- %nop8114 = alloca i1, i1 0
- %nop8115 = alloca i1, i1 0
- %nop8116 = alloca i1, i1 0
- %nop8117 = alloca i1, i1 0
- %nop8118 = alloca i1, i1 0
- %nop8119 = alloca i1, i1 0
- %nop8120 = alloca i1, i1 0
- %nop8121 = alloca i1, i1 0
- %nop8122 = alloca i1, i1 0
- %nop8123 = alloca i1, i1 0
- %nop8124 = alloca i1, i1 0
- %nop8125 = alloca i1, i1 0
- %nop8126 = alloca i1, i1 0
- %nop8127 = alloca i1, i1 0
- %nop8128 = alloca i1, i1 0
- %nop8129 = alloca i1, i1 0
- %nop8130 = alloca i1, i1 0
- %nop8131 = alloca i1, i1 0
- %nop8132 = alloca i1, i1 0
- %nop8133 = alloca i1, i1 0
- %nop8134 = alloca i1, i1 0
- %nop8135 = alloca i1, i1 0
- %nop8136 = alloca i1, i1 0
- %nop8137 = alloca i1, i1 0
- %nop8138 = alloca i1, i1 0
- %nop8139 = alloca i1, i1 0
- %nop8140 = alloca i1, i1 0
- %nop8141 = alloca i1, i1 0
- %nop8142 = alloca i1, i1 0
- %nop8143 = alloca i1, i1 0
- %nop8144 = alloca i1, i1 0
- %nop8145 = alloca i1, i1 0
- %nop8146 = alloca i1, i1 0
- %nop8147 = alloca i1, i1 0
- %nop8148 = alloca i1, i1 0
- %nop8149 = alloca i1, i1 0
- %nop8150 = alloca i1, i1 0
- %nop8151 = alloca i1, i1 0
- %nop8152 = alloca i1, i1 0
- %nop8153 = alloca i1, i1 0
- %nop8154 = alloca i1, i1 0
- %nop8155 = alloca i1, i1 0
- %nop8156 = alloca i1, i1 0
- %nop8157 = alloca i1, i1 0
- %nop8158 = alloca i1, i1 0
- %nop8159 = alloca i1, i1 0
- %nop8160 = alloca i1, i1 0
- %nop8161 = alloca i1, i1 0
- %nop8162 = alloca i1, i1 0
- %nop8163 = alloca i1, i1 0
- %nop8164 = alloca i1, i1 0
- %nop8165 = alloca i1, i1 0
- %nop8166 = alloca i1, i1 0
- %nop8167 = alloca i1, i1 0
- %nop8168 = alloca i1, i1 0
- %nop8169 = alloca i1, i1 0
- %nop8170 = alloca i1, i1 0
- %nop8171 = alloca i1, i1 0
- %nop8172 = alloca i1, i1 0
- %nop8173 = alloca i1, i1 0
- %nop8174 = alloca i1, i1 0
- %nop8175 = alloca i1, i1 0
- %nop8176 = alloca i1, i1 0
- %nop8177 = alloca i1, i1 0
- %nop8178 = alloca i1, i1 0
- %nop8179 = alloca i1, i1 0
- %nop8180 = alloca i1, i1 0
- %nop8181 = alloca i1, i1 0
- %nop8182 = alloca i1, i1 0
- %nop8183 = alloca i1, i1 0
- %nop8184 = alloca i1, i1 0
- %nop8185 = alloca i1, i1 0
- %nop8186 = alloca i1, i1 0
- %nop8187 = alloca i1, i1 0
- %nop8188 = alloca i1, i1 0
- %nop8189 = alloca i1, i1 0
- %nop8190 = alloca i1, i1 0
- %nop8191 = alloca i1, i1 0
- %nop8192 = alloca i1, i1 0
- %nop8193 = alloca i1, i1 0
- %nop8194 = alloca i1, i1 0
- %nop8195 = alloca i1, i1 0
- %nop8196 = alloca i1, i1 0
- %nop8197 = alloca i1, i1 0
- %nop8198 = alloca i1, i1 0
- %nop8199 = alloca i1, i1 0
- %nop8200 = alloca i1, i1 0
- %nop8201 = alloca i1, i1 0
- %nop8202 = alloca i1, i1 0
- %nop8203 = alloca i1, i1 0
- %nop8204 = alloca i1, i1 0
- %nop8205 = alloca i1, i1 0
- %nop8206 = alloca i1, i1 0
- %nop8207 = alloca i1, i1 0
- %nop8208 = alloca i1, i1 0
- %nop8209 = alloca i1, i1 0
- %nop8210 = alloca i1, i1 0
- %nop8211 = alloca i1, i1 0
- %nop8212 = alloca i1, i1 0
- %nop8213 = alloca i1, i1 0
- %nop8214 = alloca i1, i1 0
- %nop8215 = alloca i1, i1 0
- %nop8216 = alloca i1, i1 0
- %nop8217 = alloca i1, i1 0
- %nop8218 = alloca i1, i1 0
- %nop8219 = alloca i1, i1 0
- %nop8220 = alloca i1, i1 0
- %nop8221 = alloca i1, i1 0
- %nop8222 = alloca i1, i1 0
- %nop8223 = alloca i1, i1 0
- %nop8224 = alloca i1, i1 0
- %nop8225 = alloca i1, i1 0
- %nop8226 = alloca i1, i1 0
- %nop8227 = alloca i1, i1 0
- %nop8228 = alloca i1, i1 0
- %nop8229 = alloca i1, i1 0
- %nop8230 = alloca i1, i1 0
- %nop8231 = alloca i1, i1 0
- %nop8232 = alloca i1, i1 0
- %nop8233 = alloca i1, i1 0
- %nop8234 = alloca i1, i1 0
- %nop8235 = alloca i1, i1 0
- %nop8236 = alloca i1, i1 0
- %nop8237 = alloca i1, i1 0
- %nop8238 = alloca i1, i1 0
- %nop8239 = alloca i1, i1 0
- %nop8240 = alloca i1, i1 0
- %nop8241 = alloca i1, i1 0
- %nop8242 = alloca i1, i1 0
- %nop8243 = alloca i1, i1 0
- %nop8244 = alloca i1, i1 0
- %nop8245 = alloca i1, i1 0
- %nop8246 = alloca i1, i1 0
- %nop8247 = alloca i1, i1 0
- %nop8248 = alloca i1, i1 0
- %nop8249 = alloca i1, i1 0
- %nop8250 = alloca i1, i1 0
- %nop8251 = alloca i1, i1 0
- %nop8252 = alloca i1, i1 0
- %nop8253 = alloca i1, i1 0
- %nop8254 = alloca i1, i1 0
- %nop8255 = alloca i1, i1 0
- %nop8256 = alloca i1, i1 0
- %nop8257 = alloca i1, i1 0
- %nop8258 = alloca i1, i1 0
- %nop8259 = alloca i1, i1 0
- %nop8260 = alloca i1, i1 0
- %nop8261 = alloca i1, i1 0
- %nop8262 = alloca i1, i1 0
- %nop8263 = alloca i1, i1 0
- %nop8264 = alloca i1, i1 0
- %nop8265 = alloca i1, i1 0
- %nop8266 = alloca i1, i1 0
- %nop8267 = alloca i1, i1 0
- %nop8268 = alloca i1, i1 0
- %nop8269 = alloca i1, i1 0
- %nop8270 = alloca i1, i1 0
- %nop8271 = alloca i1, i1 0
- %nop8272 = alloca i1, i1 0
- %nop8273 = alloca i1, i1 0
- %nop8274 = alloca i1, i1 0
- %nop8275 = alloca i1, i1 0
- %nop8276 = alloca i1, i1 0
- %nop8277 = alloca i1, i1 0
- %nop8278 = alloca i1, i1 0
- %nop8279 = alloca i1, i1 0
- %nop8280 = alloca i1, i1 0
- %nop8281 = alloca i1, i1 0
- %nop8282 = alloca i1, i1 0
- %nop8283 = alloca i1, i1 0
- %nop8284 = alloca i1, i1 0
- %nop8285 = alloca i1, i1 0
- %nop8286 = alloca i1, i1 0
- %nop8287 = alloca i1, i1 0
- %nop8288 = alloca i1, i1 0
- %nop8289 = alloca i1, i1 0
- %nop8290 = alloca i1, i1 0
- %nop8291 = alloca i1, i1 0
- %nop8292 = alloca i1, i1 0
- %nop8293 = alloca i1, i1 0
- %nop8294 = alloca i1, i1 0
- %nop8295 = alloca i1, i1 0
- %nop8296 = alloca i1, i1 0
- %nop8297 = alloca i1, i1 0
- %nop8298 = alloca i1, i1 0
- %nop8299 = alloca i1, i1 0
- %nop8300 = alloca i1, i1 0
- %nop8301 = alloca i1, i1 0
- %nop8302 = alloca i1, i1 0
- %nop8303 = alloca i1, i1 0
- %nop8304 = alloca i1, i1 0
- %nop8305 = alloca i1, i1 0
- %nop8306 = alloca i1, i1 0
- %nop8307 = alloca i1, i1 0
- %nop8308 = alloca i1, i1 0
- %nop8309 = alloca i1, i1 0
- %nop8310 = alloca i1, i1 0
- %nop8311 = alloca i1, i1 0
- %nop8312 = alloca i1, i1 0
- %nop8313 = alloca i1, i1 0
- %nop8314 = alloca i1, i1 0
- %nop8315 = alloca i1, i1 0
- %nop8316 = alloca i1, i1 0
- %nop8317 = alloca i1, i1 0
- %nop8318 = alloca i1, i1 0
- %nop8319 = alloca i1, i1 0
- %nop8320 = alloca i1, i1 0
- %nop8321 = alloca i1, i1 0
- %nop8322 = alloca i1, i1 0
- %nop8323 = alloca i1, i1 0
- %nop8324 = alloca i1, i1 0
- %nop8325 = alloca i1, i1 0
- %nop8326 = alloca i1, i1 0
- %nop8327 = alloca i1, i1 0
- %nop8328 = alloca i1, i1 0
- %nop8329 = alloca i1, i1 0
- %nop8330 = alloca i1, i1 0
- %nop8331 = alloca i1, i1 0
- %nop8332 = alloca i1, i1 0
- %nop8333 = alloca i1, i1 0
- %nop8334 = alloca i1, i1 0
- %nop8335 = alloca i1, i1 0
- %nop8336 = alloca i1, i1 0
- %nop8337 = alloca i1, i1 0
- %nop8338 = alloca i1, i1 0
- %nop8339 = alloca i1, i1 0
- %nop8340 = alloca i1, i1 0
- %nop8341 = alloca i1, i1 0
- %nop8342 = alloca i1, i1 0
- %nop8343 = alloca i1, i1 0
- %nop8344 = alloca i1, i1 0
- %nop8345 = alloca i1, i1 0
- %nop8346 = alloca i1, i1 0
- %nop8347 = alloca i1, i1 0
- %nop8348 = alloca i1, i1 0
- %nop8349 = alloca i1, i1 0
- %nop8350 = alloca i1, i1 0
- %nop8351 = alloca i1, i1 0
- %nop8352 = alloca i1, i1 0
- %nop8353 = alloca i1, i1 0
- %nop8354 = alloca i1, i1 0
- %nop8355 = alloca i1, i1 0
- %nop8356 = alloca i1, i1 0
- %nop8357 = alloca i1, i1 0
- %nop8358 = alloca i1, i1 0
- %nop8359 = alloca i1, i1 0
- %nop8360 = alloca i1, i1 0
- %nop8361 = alloca i1, i1 0
- %nop8362 = alloca i1, i1 0
- %nop8363 = alloca i1, i1 0
- %nop8364 = alloca i1, i1 0
- %nop8365 = alloca i1, i1 0
- %nop8366 = alloca i1, i1 0
- %nop8367 = alloca i1, i1 0
- %nop8368 = alloca i1, i1 0
- %nop8369 = alloca i1, i1 0
- %nop8370 = alloca i1, i1 0
- %nop8371 = alloca i1, i1 0
- %nop8372 = alloca i1, i1 0
- %nop8373 = alloca i1, i1 0
- %nop8374 = alloca i1, i1 0
- %nop8375 = alloca i1, i1 0
- %nop8376 = alloca i1, i1 0
- %nop8377 = alloca i1, i1 0
- %nop8378 = alloca i1, i1 0
- %nop8379 = alloca i1, i1 0
- %nop8380 = alloca i1, i1 0
- %nop8381 = alloca i1, i1 0
- %nop8382 = alloca i1, i1 0
- %nop8383 = alloca i1, i1 0
- %nop8384 = alloca i1, i1 0
- %nop8385 = alloca i1, i1 0
- %nop8386 = alloca i1, i1 0
- %nop8387 = alloca i1, i1 0
- %nop8388 = alloca i1, i1 0
- %nop8389 = alloca i1, i1 0
- %nop8390 = alloca i1, i1 0
- %nop8391 = alloca i1, i1 0
- %nop8392 = alloca i1, i1 0
- %nop8393 = alloca i1, i1 0
- %nop8394 = alloca i1, i1 0
- %nop8395 = alloca i1, i1 0
- %nop8396 = alloca i1, i1 0
- %nop8397 = alloca i1, i1 0
- %nop8398 = alloca i1, i1 0
- %nop8399 = alloca i1, i1 0
- %nop8400 = alloca i1, i1 0
- %nop8401 = alloca i1, i1 0
- %nop8402 = alloca i1, i1 0
- %nop8403 = alloca i1, i1 0
- %nop8404 = alloca i1, i1 0
- %nop8405 = alloca i1, i1 0
- %nop8406 = alloca i1, i1 0
- %nop8407 = alloca i1, i1 0
- %nop8408 = alloca i1, i1 0
- %nop8409 = alloca i1, i1 0
- %nop8410 = alloca i1, i1 0
- %nop8411 = alloca i1, i1 0
- %nop8412 = alloca i1, i1 0
- %nop8413 = alloca i1, i1 0
- %nop8414 = alloca i1, i1 0
- %nop8415 = alloca i1, i1 0
- %nop8416 = alloca i1, i1 0
- %nop8417 = alloca i1, i1 0
- %nop8418 = alloca i1, i1 0
- %nop8419 = alloca i1, i1 0
- %nop8420 = alloca i1, i1 0
- %nop8421 = alloca i1, i1 0
- %nop8422 = alloca i1, i1 0
- %nop8423 = alloca i1, i1 0
- %nop8424 = alloca i1, i1 0
- %nop8425 = alloca i1, i1 0
- %nop8426 = alloca i1, i1 0
- %nop8427 = alloca i1, i1 0
- %nop8428 = alloca i1, i1 0
- %nop8429 = alloca i1, i1 0
- %nop8430 = alloca i1, i1 0
- %nop8431 = alloca i1, i1 0
- %nop8432 = alloca i1, i1 0
- %nop8433 = alloca i1, i1 0
- %nop8434 = alloca i1, i1 0
- %nop8435 = alloca i1, i1 0
- %nop8436 = alloca i1, i1 0
- %nop8437 = alloca i1, i1 0
- %nop8438 = alloca i1, i1 0
- %nop8439 = alloca i1, i1 0
- %nop8440 = alloca i1, i1 0
- %nop8441 = alloca i1, i1 0
- %nop8442 = alloca i1, i1 0
- %nop8443 = alloca i1, i1 0
- %nop8444 = alloca i1, i1 0
- %nop8445 = alloca i1, i1 0
- %nop8446 = alloca i1, i1 0
- %nop8447 = alloca i1, i1 0
- %nop8448 = alloca i1, i1 0
- %nop8449 = alloca i1, i1 0
- %nop8450 = alloca i1, i1 0
- %nop8451 = alloca i1, i1 0
- %nop8452 = alloca i1, i1 0
- %nop8453 = alloca i1, i1 0
- %nop8454 = alloca i1, i1 0
- %nop8455 = alloca i1, i1 0
- %nop8456 = alloca i1, i1 0
- %nop8457 = alloca i1, i1 0
- %nop8458 = alloca i1, i1 0
- %nop8459 = alloca i1, i1 0
- %nop8460 = alloca i1, i1 0
- %nop8461 = alloca i1, i1 0
- %nop8462 = alloca i1, i1 0
- %nop8463 = alloca i1, i1 0
- %nop8464 = alloca i1, i1 0
- %nop8465 = alloca i1, i1 0
- %nop8466 = alloca i1, i1 0
- %nop8467 = alloca i1, i1 0
- %nop8468 = alloca i1, i1 0
- %nop8469 = alloca i1, i1 0
- %nop8470 = alloca i1, i1 0
- %nop8471 = alloca i1, i1 0
- %nop8472 = alloca i1, i1 0
- %nop8473 = alloca i1, i1 0
- %nop8474 = alloca i1, i1 0
- %nop8475 = alloca i1, i1 0
- %nop8476 = alloca i1, i1 0
- %nop8477 = alloca i1, i1 0
- %nop8478 = alloca i1, i1 0
- %nop8479 = alloca i1, i1 0
- %nop8480 = alloca i1, i1 0
- %nop8481 = alloca i1, i1 0
- %nop8482 = alloca i1, i1 0
- %nop8483 = alloca i1, i1 0
- %nop8484 = alloca i1, i1 0
- %nop8485 = alloca i1, i1 0
- %nop8486 = alloca i1, i1 0
- %nop8487 = alloca i1, i1 0
- %nop8488 = alloca i1, i1 0
- %nop8489 = alloca i1, i1 0
- %nop8490 = alloca i1, i1 0
- %nop8491 = alloca i1, i1 0
- %nop8492 = alloca i1, i1 0
- %nop8493 = alloca i1, i1 0
- %nop8494 = alloca i1, i1 0
- %nop8495 = alloca i1, i1 0
- %nop8496 = alloca i1, i1 0
- %nop8497 = alloca i1, i1 0
- %nop8498 = alloca i1, i1 0
- %nop8499 = alloca i1, i1 0
- %nop8500 = alloca i1, i1 0
- %nop8501 = alloca i1, i1 0
- %nop8502 = alloca i1, i1 0
- %nop8503 = alloca i1, i1 0
- %nop8504 = alloca i1, i1 0
- %nop8505 = alloca i1, i1 0
- %nop8506 = alloca i1, i1 0
- %nop8507 = alloca i1, i1 0
- %nop8508 = alloca i1, i1 0
- %nop8509 = alloca i1, i1 0
- %nop8510 = alloca i1, i1 0
- %nop8511 = alloca i1, i1 0
- %nop8512 = alloca i1, i1 0
- %nop8513 = alloca i1, i1 0
- %nop8514 = alloca i1, i1 0
- %nop8515 = alloca i1, i1 0
- %nop8516 = alloca i1, i1 0
- %nop8517 = alloca i1, i1 0
- %nop8518 = alloca i1, i1 0
- %nop8519 = alloca i1, i1 0
- %nop8520 = alloca i1, i1 0
- %nop8521 = alloca i1, i1 0
- %nop8522 = alloca i1, i1 0
- %nop8523 = alloca i1, i1 0
- %nop8524 = alloca i1, i1 0
- %nop8525 = alloca i1, i1 0
- %nop8526 = alloca i1, i1 0
- %nop8527 = alloca i1, i1 0
- %nop8528 = alloca i1, i1 0
- %nop8529 = alloca i1, i1 0
- %nop8530 = alloca i1, i1 0
- %nop8531 = alloca i1, i1 0
- %nop8532 = alloca i1, i1 0
- %nop8533 = alloca i1, i1 0
- %nop8534 = alloca i1, i1 0
- %nop8535 = alloca i1, i1 0
- %nop8536 = alloca i1, i1 0
- %nop8537 = alloca i1, i1 0
- %nop8538 = alloca i1, i1 0
- %nop8539 = alloca i1, i1 0
- %nop8540 = alloca i1, i1 0
- %nop8541 = alloca i1, i1 0
- %nop8542 = alloca i1, i1 0
- %nop8543 = alloca i1, i1 0
- %nop8544 = alloca i1, i1 0
- %nop8545 = alloca i1, i1 0
- %nop8546 = alloca i1, i1 0
- %nop8547 = alloca i1, i1 0
- %nop8548 = alloca i1, i1 0
- %nop8549 = alloca i1, i1 0
- %nop8550 = alloca i1, i1 0
- %nop8551 = alloca i1, i1 0
- %nop8552 = alloca i1, i1 0
- %nop8553 = alloca i1, i1 0
- %nop8554 = alloca i1, i1 0
- %nop8555 = alloca i1, i1 0
- %nop8556 = alloca i1, i1 0
- %nop8557 = alloca i1, i1 0
- %nop8558 = alloca i1, i1 0
- %nop8559 = alloca i1, i1 0
- %nop8560 = alloca i1, i1 0
- %nop8561 = alloca i1, i1 0
- %nop8562 = alloca i1, i1 0
- %nop8563 = alloca i1, i1 0
- %nop8564 = alloca i1, i1 0
- %nop8565 = alloca i1, i1 0
- %nop8566 = alloca i1, i1 0
- %nop8567 = alloca i1, i1 0
- %nop8568 = alloca i1, i1 0
- %nop8569 = alloca i1, i1 0
- %nop8570 = alloca i1, i1 0
- %nop8571 = alloca i1, i1 0
- %nop8572 = alloca i1, i1 0
- %nop8573 = alloca i1, i1 0
- %nop8574 = alloca i1, i1 0
- %nop8575 = alloca i1, i1 0
- %nop8576 = alloca i1, i1 0
- %nop8577 = alloca i1, i1 0
- %nop8578 = alloca i1, i1 0
- %nop8579 = alloca i1, i1 0
- %nop8580 = alloca i1, i1 0
- %nop8581 = alloca i1, i1 0
- %nop8582 = alloca i1, i1 0
- %nop8583 = alloca i1, i1 0
- %nop8584 = alloca i1, i1 0
- %nop8585 = alloca i1, i1 0
- %nop8586 = alloca i1, i1 0
- %nop8587 = alloca i1, i1 0
- %nop8588 = alloca i1, i1 0
- %nop8589 = alloca i1, i1 0
- %nop8590 = alloca i1, i1 0
- %nop8591 = alloca i1, i1 0
- %nop8592 = alloca i1, i1 0
- %nop8593 = alloca i1, i1 0
- %nop8594 = alloca i1, i1 0
- %nop8595 = alloca i1, i1 0
- %nop8596 = alloca i1, i1 0
- %nop8597 = alloca i1, i1 0
- %nop8598 = alloca i1, i1 0
- %nop8599 = alloca i1, i1 0
- %nop8600 = alloca i1, i1 0
- %nop8601 = alloca i1, i1 0
- %nop8602 = alloca i1, i1 0
- %nop8603 = alloca i1, i1 0
- %nop8604 = alloca i1, i1 0
- %nop8605 = alloca i1, i1 0
- %nop8606 = alloca i1, i1 0
- %nop8607 = alloca i1, i1 0
- %nop8608 = alloca i1, i1 0
- %nop8609 = alloca i1, i1 0
- %nop8610 = alloca i1, i1 0
- %nop8611 = alloca i1, i1 0
- %nop8612 = alloca i1, i1 0
- %nop8613 = alloca i1, i1 0
- %nop8614 = alloca i1, i1 0
- %nop8615 = alloca i1, i1 0
- %nop8616 = alloca i1, i1 0
- %nop8617 = alloca i1, i1 0
- %nop8618 = alloca i1, i1 0
- %nop8619 = alloca i1, i1 0
- %nop8620 = alloca i1, i1 0
- %nop8621 = alloca i1, i1 0
- %nop8622 = alloca i1, i1 0
- %nop8623 = alloca i1, i1 0
- %nop8624 = alloca i1, i1 0
- %nop8625 = alloca i1, i1 0
- %nop8626 = alloca i1, i1 0
- %nop8627 = alloca i1, i1 0
- %nop8628 = alloca i1, i1 0
- %nop8629 = alloca i1, i1 0
- %nop8630 = alloca i1, i1 0
- %nop8631 = alloca i1, i1 0
- %nop8632 = alloca i1, i1 0
- %nop8633 = alloca i1, i1 0
- %nop8634 = alloca i1, i1 0
- %nop8635 = alloca i1, i1 0
- %nop8636 = alloca i1, i1 0
- %nop8637 = alloca i1, i1 0
- %nop8638 = alloca i1, i1 0
- %nop8639 = alloca i1, i1 0
- %nop8640 = alloca i1, i1 0
- %nop8641 = alloca i1, i1 0
- %nop8642 = alloca i1, i1 0
- %nop8643 = alloca i1, i1 0
- %nop8644 = alloca i1, i1 0
- %nop8645 = alloca i1, i1 0
- %nop8646 = alloca i1, i1 0
- %nop8647 = alloca i1, i1 0
- %nop8648 = alloca i1, i1 0
- %nop8649 = alloca i1, i1 0
- %nop8650 = alloca i1, i1 0
- %nop8651 = alloca i1, i1 0
- %nop8652 = alloca i1, i1 0
- %nop8653 = alloca i1, i1 0
- %nop8654 = alloca i1, i1 0
- %nop8655 = alloca i1, i1 0
- %nop8656 = alloca i1, i1 0
- %nop8657 = alloca i1, i1 0
- %nop8658 = alloca i1, i1 0
- %nop8659 = alloca i1, i1 0
- %nop8660 = alloca i1, i1 0
- %nop8661 = alloca i1, i1 0
- %nop8662 = alloca i1, i1 0
- %nop8663 = alloca i1, i1 0
- %nop8664 = alloca i1, i1 0
- %nop8665 = alloca i1, i1 0
- %nop8666 = alloca i1, i1 0
- %nop8667 = alloca i1, i1 0
- %nop8668 = alloca i1, i1 0
- %nop8669 = alloca i1, i1 0
- %nop8670 = alloca i1, i1 0
- %nop8671 = alloca i1, i1 0
- %nop8672 = alloca i1, i1 0
- %nop8673 = alloca i1, i1 0
- %nop8674 = alloca i1, i1 0
- %nop8675 = alloca i1, i1 0
- %nop8676 = alloca i1, i1 0
- %nop8677 = alloca i1, i1 0
- %nop8678 = alloca i1, i1 0
- %nop8679 = alloca i1, i1 0
- %nop8680 = alloca i1, i1 0
- %nop8681 = alloca i1, i1 0
- %nop8682 = alloca i1, i1 0
- %nop8683 = alloca i1, i1 0
- %nop8684 = alloca i1, i1 0
- %nop8685 = alloca i1, i1 0
- %nop8686 = alloca i1, i1 0
- %nop8687 = alloca i1, i1 0
- %nop8688 = alloca i1, i1 0
- %nop8689 = alloca i1, i1 0
- %nop8690 = alloca i1, i1 0
- %nop8691 = alloca i1, i1 0
- %nop8692 = alloca i1, i1 0
- %nop8693 = alloca i1, i1 0
- %nop8694 = alloca i1, i1 0
- %nop8695 = alloca i1, i1 0
- %nop8696 = alloca i1, i1 0
- %nop8697 = alloca i1, i1 0
- %nop8698 = alloca i1, i1 0
- %nop8699 = alloca i1, i1 0
- %nop8700 = alloca i1, i1 0
- %nop8701 = alloca i1, i1 0
- %nop8702 = alloca i1, i1 0
- %nop8703 = alloca i1, i1 0
- %nop8704 = alloca i1, i1 0
- %nop8705 = alloca i1, i1 0
- %nop8706 = alloca i1, i1 0
- %nop8707 = alloca i1, i1 0
- %nop8708 = alloca i1, i1 0
- %nop8709 = alloca i1, i1 0
- %nop8710 = alloca i1, i1 0
- %nop8711 = alloca i1, i1 0
- %nop8712 = alloca i1, i1 0
- %nop8713 = alloca i1, i1 0
- %nop8714 = alloca i1, i1 0
- %nop8715 = alloca i1, i1 0
- %nop8716 = alloca i1, i1 0
- %nop8717 = alloca i1, i1 0
- %nop8718 = alloca i1, i1 0
- %nop8719 = alloca i1, i1 0
- %nop8720 = alloca i1, i1 0
- %nop8721 = alloca i1, i1 0
- %nop8722 = alloca i1, i1 0
- %nop8723 = alloca i1, i1 0
- %nop8724 = alloca i1, i1 0
- %nop8725 = alloca i1, i1 0
- %nop8726 = alloca i1, i1 0
- %nop8727 = alloca i1, i1 0
- %nop8728 = alloca i1, i1 0
- %nop8729 = alloca i1, i1 0
- %nop8730 = alloca i1, i1 0
- %nop8731 = alloca i1, i1 0
- %nop8732 = alloca i1, i1 0
- %nop8733 = alloca i1, i1 0
- %nop8734 = alloca i1, i1 0
- %nop8735 = alloca i1, i1 0
- %nop8736 = alloca i1, i1 0
- %nop8737 = alloca i1, i1 0
- %nop8738 = alloca i1, i1 0
- %nop8739 = alloca i1, i1 0
- %nop8740 = alloca i1, i1 0
- %nop8741 = alloca i1, i1 0
- %nop8742 = alloca i1, i1 0
- %nop8743 = alloca i1, i1 0
- %nop8744 = alloca i1, i1 0
- %nop8745 = alloca i1, i1 0
- %nop8746 = alloca i1, i1 0
- %nop8747 = alloca i1, i1 0
- %nop8748 = alloca i1, i1 0
- %nop8749 = alloca i1, i1 0
- %nop8750 = alloca i1, i1 0
- %nop8751 = alloca i1, i1 0
- %nop8752 = alloca i1, i1 0
- %nop8753 = alloca i1, i1 0
- %nop8754 = alloca i1, i1 0
- %nop8755 = alloca i1, i1 0
- %nop8756 = alloca i1, i1 0
- %nop8757 = alloca i1, i1 0
- %nop8758 = alloca i1, i1 0
- %nop8759 = alloca i1, i1 0
- %nop8760 = alloca i1, i1 0
- %nop8761 = alloca i1, i1 0
- %nop8762 = alloca i1, i1 0
- %nop8763 = alloca i1, i1 0
- %nop8764 = alloca i1, i1 0
- %nop8765 = alloca i1, i1 0
- %nop8766 = alloca i1, i1 0
- %nop8767 = alloca i1, i1 0
- %nop8768 = alloca i1, i1 0
- %nop8769 = alloca i1, i1 0
- %nop8770 = alloca i1, i1 0
- %nop8771 = alloca i1, i1 0
- %nop8772 = alloca i1, i1 0
- %nop8773 = alloca i1, i1 0
- %nop8774 = alloca i1, i1 0
- %nop8775 = alloca i1, i1 0
- %nop8776 = alloca i1, i1 0
- %nop8777 = alloca i1, i1 0
- %nop8778 = alloca i1, i1 0
- %nop8779 = alloca i1, i1 0
- %nop8780 = alloca i1, i1 0
- %nop8781 = alloca i1, i1 0
- %nop8782 = alloca i1, i1 0
- %nop8783 = alloca i1, i1 0
- %nop8784 = alloca i1, i1 0
- %nop8785 = alloca i1, i1 0
- %nop8786 = alloca i1, i1 0
- %nop8787 = alloca i1, i1 0
- %nop8788 = alloca i1, i1 0
- %nop8789 = alloca i1, i1 0
- %nop8790 = alloca i1, i1 0
- %nop8791 = alloca i1, i1 0
- %nop8792 = alloca i1, i1 0
- %nop8793 = alloca i1, i1 0
- %nop8794 = alloca i1, i1 0
- %nop8795 = alloca i1, i1 0
- %nop8796 = alloca i1, i1 0
- %nop8797 = alloca i1, i1 0
- %nop8798 = alloca i1, i1 0
- %nop8799 = alloca i1, i1 0
- %nop8800 = alloca i1, i1 0
- %nop8801 = alloca i1, i1 0
- %nop8802 = alloca i1, i1 0
- %nop8803 = alloca i1, i1 0
- %nop8804 = alloca i1, i1 0
- %nop8805 = alloca i1, i1 0
- %nop8806 = alloca i1, i1 0
- %nop8807 = alloca i1, i1 0
- %nop8808 = alloca i1, i1 0
- %nop8809 = alloca i1, i1 0
- %nop8810 = alloca i1, i1 0
- %nop8811 = alloca i1, i1 0
- %nop8812 = alloca i1, i1 0
- %nop8813 = alloca i1, i1 0
- %nop8814 = alloca i1, i1 0
- %nop8815 = alloca i1, i1 0
- %nop8816 = alloca i1, i1 0
- %nop8817 = alloca i1, i1 0
- %nop8818 = alloca i1, i1 0
- %nop8819 = alloca i1, i1 0
- %nop8820 = alloca i1, i1 0
- %nop8821 = alloca i1, i1 0
- %nop8822 = alloca i1, i1 0
- %nop8823 = alloca i1, i1 0
- %nop8824 = alloca i1, i1 0
- %nop8825 = alloca i1, i1 0
- %nop8826 = alloca i1, i1 0
- %nop8827 = alloca i1, i1 0
- %nop8828 = alloca i1, i1 0
- %nop8829 = alloca i1, i1 0
- %nop8830 = alloca i1, i1 0
- %nop8831 = alloca i1, i1 0
- %nop8832 = alloca i1, i1 0
- %nop8833 = alloca i1, i1 0
- %nop8834 = alloca i1, i1 0
- %nop8835 = alloca i1, i1 0
- %nop8836 = alloca i1, i1 0
- %nop8837 = alloca i1, i1 0
- %nop8838 = alloca i1, i1 0
- %nop8839 = alloca i1, i1 0
- %nop8840 = alloca i1, i1 0
- %nop8841 = alloca i1, i1 0
- %nop8842 = alloca i1, i1 0
- %nop8843 = alloca i1, i1 0
- %nop8844 = alloca i1, i1 0
- %nop8845 = alloca i1, i1 0
- %nop8846 = alloca i1, i1 0
- %nop8847 = alloca i1, i1 0
- %nop8848 = alloca i1, i1 0
- %nop8849 = alloca i1, i1 0
- %nop8850 = alloca i1, i1 0
- %nop8851 = alloca i1, i1 0
- %nop8852 = alloca i1, i1 0
- %nop8853 = alloca i1, i1 0
- %nop8854 = alloca i1, i1 0
- %nop8855 = alloca i1, i1 0
- %nop8856 = alloca i1, i1 0
- %nop8857 = alloca i1, i1 0
- %nop8858 = alloca i1, i1 0
- %nop8859 = alloca i1, i1 0
- %nop8860 = alloca i1, i1 0
- %nop8861 = alloca i1, i1 0
- %nop8862 = alloca i1, i1 0
- %nop8863 = alloca i1, i1 0
- %nop8864 = alloca i1, i1 0
- %nop8865 = alloca i1, i1 0
- %nop8866 = alloca i1, i1 0
- %nop8867 = alloca i1, i1 0
- %nop8868 = alloca i1, i1 0
- %nop8869 = alloca i1, i1 0
- %nop8870 = alloca i1, i1 0
- %nop8871 = alloca i1, i1 0
- %nop8872 = alloca i1, i1 0
- %nop8873 = alloca i1, i1 0
- %nop8874 = alloca i1, i1 0
- %nop8875 = alloca i1, i1 0
- %nop8876 = alloca i1, i1 0
- %nop8877 = alloca i1, i1 0
- %nop8878 = alloca i1, i1 0
- %nop8879 = alloca i1, i1 0
- %nop8880 = alloca i1, i1 0
- %nop8881 = alloca i1, i1 0
- %nop8882 = alloca i1, i1 0
- %nop8883 = alloca i1, i1 0
- %nop8884 = alloca i1, i1 0
- %nop8885 = alloca i1, i1 0
- %nop8886 = alloca i1, i1 0
- %nop8887 = alloca i1, i1 0
- %nop8888 = alloca i1, i1 0
- %nop8889 = alloca i1, i1 0
- %nop8890 = alloca i1, i1 0
- %nop8891 = alloca i1, i1 0
- %nop8892 = alloca i1, i1 0
- %nop8893 = alloca i1, i1 0
- %nop8894 = alloca i1, i1 0
- %nop8895 = alloca i1, i1 0
- %nop8896 = alloca i1, i1 0
- %nop8897 = alloca i1, i1 0
- %nop8898 = alloca i1, i1 0
- %nop8899 = alloca i1, i1 0
- %nop8900 = alloca i1, i1 0
- %nop8901 = alloca i1, i1 0
- %nop8902 = alloca i1, i1 0
- %nop8903 = alloca i1, i1 0
- %nop8904 = alloca i1, i1 0
- %nop8905 = alloca i1, i1 0
- %nop8906 = alloca i1, i1 0
- %nop8907 = alloca i1, i1 0
- %nop8908 = alloca i1, i1 0
- %nop8909 = alloca i1, i1 0
- %nop8910 = alloca i1, i1 0
- %nop8911 = alloca i1, i1 0
- %nop8912 = alloca i1, i1 0
- %nop8913 = alloca i1, i1 0
- %nop8914 = alloca i1, i1 0
- %nop8915 = alloca i1, i1 0
- %nop8916 = alloca i1, i1 0
- %nop8917 = alloca i1, i1 0
- %nop8918 = alloca i1, i1 0
- %nop8919 = alloca i1, i1 0
- %nop8920 = alloca i1, i1 0
- %nop8921 = alloca i1, i1 0
- %nop8922 = alloca i1, i1 0
- %nop8923 = alloca i1, i1 0
- %nop8924 = alloca i1, i1 0
- %nop8925 = alloca i1, i1 0
- %nop8926 = alloca i1, i1 0
- %nop8927 = alloca i1, i1 0
- %nop8928 = alloca i1, i1 0
- %nop8929 = alloca i1, i1 0
- %nop8930 = alloca i1, i1 0
- %nop8931 = alloca i1, i1 0
- %nop8932 = alloca i1, i1 0
- %nop8933 = alloca i1, i1 0
- %nop8934 = alloca i1, i1 0
- %nop8935 = alloca i1, i1 0
- %nop8936 = alloca i1, i1 0
- %nop8937 = alloca i1, i1 0
- %nop8938 = alloca i1, i1 0
- %nop8939 = alloca i1, i1 0
- %nop8940 = alloca i1, i1 0
- %nop8941 = alloca i1, i1 0
- %nop8942 = alloca i1, i1 0
- %nop8943 = alloca i1, i1 0
- %nop8944 = alloca i1, i1 0
- %nop8945 = alloca i1, i1 0
- %nop8946 = alloca i1, i1 0
- %nop8947 = alloca i1, i1 0
- %nop8948 = alloca i1, i1 0
- %nop8949 = alloca i1, i1 0
- %nop8950 = alloca i1, i1 0
- %nop8951 = alloca i1, i1 0
- %nop8952 = alloca i1, i1 0
- %nop8953 = alloca i1, i1 0
- %nop8954 = alloca i1, i1 0
- %nop8955 = alloca i1, i1 0
- %nop8956 = alloca i1, i1 0
- %nop8957 = alloca i1, i1 0
- %nop8958 = alloca i1, i1 0
- %nop8959 = alloca i1, i1 0
- %nop8960 = alloca i1, i1 0
- %nop8961 = alloca i1, i1 0
- %nop8962 = alloca i1, i1 0
- %nop8963 = alloca i1, i1 0
- %nop8964 = alloca i1, i1 0
- %nop8965 = alloca i1, i1 0
- %nop8966 = alloca i1, i1 0
- %nop8967 = alloca i1, i1 0
- %nop8968 = alloca i1, i1 0
- %nop8969 = alloca i1, i1 0
- %nop8970 = alloca i1, i1 0
- %nop8971 = alloca i1, i1 0
- %nop8972 = alloca i1, i1 0
- %nop8973 = alloca i1, i1 0
- %nop8974 = alloca i1, i1 0
- %nop8975 = alloca i1, i1 0
- %nop8976 = alloca i1, i1 0
- %nop8977 = alloca i1, i1 0
- %nop8978 = alloca i1, i1 0
- %nop8979 = alloca i1, i1 0
- %nop8980 = alloca i1, i1 0
- %nop8981 = alloca i1, i1 0
- %nop8982 = alloca i1, i1 0
- %nop8983 = alloca i1, i1 0
- %nop8984 = alloca i1, i1 0
- %nop8985 = alloca i1, i1 0
- %nop8986 = alloca i1, i1 0
- %nop8987 = alloca i1, i1 0
- %nop8988 = alloca i1, i1 0
- %nop8989 = alloca i1, i1 0
- %nop8990 = alloca i1, i1 0
- %nop8991 = alloca i1, i1 0
- %nop8992 = alloca i1, i1 0
- %nop8993 = alloca i1, i1 0
- %nop8994 = alloca i1, i1 0
- %nop8995 = alloca i1, i1 0
- %nop8996 = alloca i1, i1 0
- %nop8997 = alloca i1, i1 0
- %nop8998 = alloca i1, i1 0
- %nop8999 = alloca i1, i1 0
- %nop9000 = alloca i1, i1 0
- %nop9001 = alloca i1, i1 0
- %nop9002 = alloca i1, i1 0
- %nop9003 = alloca i1, i1 0
- %nop9004 = alloca i1, i1 0
- %nop9005 = alloca i1, i1 0
- %nop9006 = alloca i1, i1 0
- %nop9007 = alloca i1, i1 0
- %nop9008 = alloca i1, i1 0
- %nop9009 = alloca i1, i1 0
- %nop9010 = alloca i1, i1 0
- %nop9011 = alloca i1, i1 0
- %nop9012 = alloca i1, i1 0
- %nop9013 = alloca i1, i1 0
- %nop9014 = alloca i1, i1 0
- %nop9015 = alloca i1, i1 0
- %nop9016 = alloca i1, i1 0
- %nop9017 = alloca i1, i1 0
- %nop9018 = alloca i1, i1 0
- %nop9019 = alloca i1, i1 0
- %nop9020 = alloca i1, i1 0
- %nop9021 = alloca i1, i1 0
- %nop9022 = alloca i1, i1 0
- %nop9023 = alloca i1, i1 0
- %nop9024 = alloca i1, i1 0
- %nop9025 = alloca i1, i1 0
- %nop9026 = alloca i1, i1 0
- %nop9027 = alloca i1, i1 0
- %nop9028 = alloca i1, i1 0
- %nop9029 = alloca i1, i1 0
- %nop9030 = alloca i1, i1 0
- %nop9031 = alloca i1, i1 0
- %nop9032 = alloca i1, i1 0
- %nop9033 = alloca i1, i1 0
- %nop9034 = alloca i1, i1 0
- %nop9035 = alloca i1, i1 0
- %nop9036 = alloca i1, i1 0
- %nop9037 = alloca i1, i1 0
- %nop9038 = alloca i1, i1 0
- %nop9039 = alloca i1, i1 0
- %nop9040 = alloca i1, i1 0
- %nop9041 = alloca i1, i1 0
- %nop9042 = alloca i1, i1 0
- %nop9043 = alloca i1, i1 0
- %nop9044 = alloca i1, i1 0
- %nop9045 = alloca i1, i1 0
- %nop9046 = alloca i1, i1 0
- %nop9047 = alloca i1, i1 0
- %nop9048 = alloca i1, i1 0
- %nop9049 = alloca i1, i1 0
- %nop9050 = alloca i1, i1 0
- %nop9051 = alloca i1, i1 0
- %nop9052 = alloca i1, i1 0
- %nop9053 = alloca i1, i1 0
- %nop9054 = alloca i1, i1 0
- %nop9055 = alloca i1, i1 0
- %nop9056 = alloca i1, i1 0
- %nop9057 = alloca i1, i1 0
- %nop9058 = alloca i1, i1 0
- %nop9059 = alloca i1, i1 0
- %nop9060 = alloca i1, i1 0
- %nop9061 = alloca i1, i1 0
- %nop9062 = alloca i1, i1 0
- %nop9063 = alloca i1, i1 0
- %nop9064 = alloca i1, i1 0
- %nop9065 = alloca i1, i1 0
- %nop9066 = alloca i1, i1 0
- %nop9067 = alloca i1, i1 0
- %nop9068 = alloca i1, i1 0
- %nop9069 = alloca i1, i1 0
- %nop9070 = alloca i1, i1 0
- %nop9071 = alloca i1, i1 0
- %nop9072 = alloca i1, i1 0
- %nop9073 = alloca i1, i1 0
- %nop9074 = alloca i1, i1 0
- %nop9075 = alloca i1, i1 0
- %nop9076 = alloca i1, i1 0
- %nop9077 = alloca i1, i1 0
- %nop9078 = alloca i1, i1 0
- %nop9079 = alloca i1, i1 0
- %nop9080 = alloca i1, i1 0
- %nop9081 = alloca i1, i1 0
- %nop9082 = alloca i1, i1 0
- %nop9083 = alloca i1, i1 0
- %nop9084 = alloca i1, i1 0
- %nop9085 = alloca i1, i1 0
- %nop9086 = alloca i1, i1 0
- %nop9087 = alloca i1, i1 0
- %nop9088 = alloca i1, i1 0
- %nop9089 = alloca i1, i1 0
- %nop9090 = alloca i1, i1 0
- %nop9091 = alloca i1, i1 0
- %nop9092 = alloca i1, i1 0
- %nop9093 = alloca i1, i1 0
- %nop9094 = alloca i1, i1 0
- %nop9095 = alloca i1, i1 0
- %nop9096 = alloca i1, i1 0
- %nop9097 = alloca i1, i1 0
- %nop9098 = alloca i1, i1 0
- %nop9099 = alloca i1, i1 0
- %nop9100 = alloca i1, i1 0
- %nop9101 = alloca i1, i1 0
- %nop9102 = alloca i1, i1 0
- %nop9103 = alloca i1, i1 0
- %nop9104 = alloca i1, i1 0
- %nop9105 = alloca i1, i1 0
- %nop9106 = alloca i1, i1 0
- %nop9107 = alloca i1, i1 0
- %nop9108 = alloca i1, i1 0
- %nop9109 = alloca i1, i1 0
- %nop9110 = alloca i1, i1 0
- %nop9111 = alloca i1, i1 0
- %nop9112 = alloca i1, i1 0
- %nop9113 = alloca i1, i1 0
- %nop9114 = alloca i1, i1 0
- %nop9115 = alloca i1, i1 0
- %nop9116 = alloca i1, i1 0
- %nop9117 = alloca i1, i1 0
- %nop9118 = alloca i1, i1 0
- %nop9119 = alloca i1, i1 0
- %nop9120 = alloca i1, i1 0
- %nop9121 = alloca i1, i1 0
- %nop9122 = alloca i1, i1 0
- %nop9123 = alloca i1, i1 0
- %nop9124 = alloca i1, i1 0
- %nop9125 = alloca i1, i1 0
- %nop9126 = alloca i1, i1 0
- %nop9127 = alloca i1, i1 0
- %nop9128 = alloca i1, i1 0
- %nop9129 = alloca i1, i1 0
- %nop9130 = alloca i1, i1 0
- %nop9131 = alloca i1, i1 0
- %nop9132 = alloca i1, i1 0
- %nop9133 = alloca i1, i1 0
- %nop9134 = alloca i1, i1 0
- %nop9135 = alloca i1, i1 0
- %nop9136 = alloca i1, i1 0
- %nop9137 = alloca i1, i1 0
- %nop9138 = alloca i1, i1 0
- %nop9139 = alloca i1, i1 0
- %nop9140 = alloca i1, i1 0
- %nop9141 = alloca i1, i1 0
- %nop9142 = alloca i1, i1 0
- %nop9143 = alloca i1, i1 0
- %nop9144 = alloca i1, i1 0
- %nop9145 = alloca i1, i1 0
- %nop9146 = alloca i1, i1 0
- %nop9147 = alloca i1, i1 0
- %nop9148 = alloca i1, i1 0
- %nop9149 = alloca i1, i1 0
- %nop9150 = alloca i1, i1 0
- %nop9151 = alloca i1, i1 0
- %nop9152 = alloca i1, i1 0
- %nop9153 = alloca i1, i1 0
- %nop9154 = alloca i1, i1 0
- %nop9155 = alloca i1, i1 0
- %nop9156 = alloca i1, i1 0
- %nop9157 = alloca i1, i1 0
- %nop9158 = alloca i1, i1 0
- %nop9159 = alloca i1, i1 0
- %nop9160 = alloca i1, i1 0
- %nop9161 = alloca i1, i1 0
- %nop9162 = alloca i1, i1 0
- %nop9163 = alloca i1, i1 0
- %nop9164 = alloca i1, i1 0
- %nop9165 = alloca i1, i1 0
- %nop9166 = alloca i1, i1 0
- %nop9167 = alloca i1, i1 0
- %nop9168 = alloca i1, i1 0
- %nop9169 = alloca i1, i1 0
- %nop9170 = alloca i1, i1 0
- %nop9171 = alloca i1, i1 0
- %nop9172 = alloca i1, i1 0
- %nop9173 = alloca i1, i1 0
- %nop9174 = alloca i1, i1 0
- %nop9175 = alloca i1, i1 0
- %nop9176 = alloca i1, i1 0
- %nop9177 = alloca i1, i1 0
- %nop9178 = alloca i1, i1 0
- %nop9179 = alloca i1, i1 0
- %nop9180 = alloca i1, i1 0
- %nop9181 = alloca i1, i1 0
- %nop9182 = alloca i1, i1 0
- %nop9183 = alloca i1, i1 0
- %nop9184 = alloca i1, i1 0
- %nop9185 = alloca i1, i1 0
- %nop9186 = alloca i1, i1 0
- %nop9187 = alloca i1, i1 0
- %nop9188 = alloca i1, i1 0
- %nop9189 = alloca i1, i1 0
- %nop9190 = alloca i1, i1 0
- %nop9191 = alloca i1, i1 0
- %nop9192 = alloca i1, i1 0
- %nop9193 = alloca i1, i1 0
- %nop9194 = alloca i1, i1 0
- %nop9195 = alloca i1, i1 0
- %nop9196 = alloca i1, i1 0
- %nop9197 = alloca i1, i1 0
- %nop9198 = alloca i1, i1 0
- %nop9199 = alloca i1, i1 0
- %nop9200 = alloca i1, i1 0
- %nop9201 = alloca i1, i1 0
- %nop9202 = alloca i1, i1 0
- %nop9203 = alloca i1, i1 0
- %nop9204 = alloca i1, i1 0
- %nop9205 = alloca i1, i1 0
- %nop9206 = alloca i1, i1 0
- %nop9207 = alloca i1, i1 0
- %nop9208 = alloca i1, i1 0
- %nop9209 = alloca i1, i1 0
- %nop9210 = alloca i1, i1 0
- %nop9211 = alloca i1, i1 0
- %nop9212 = alloca i1, i1 0
- %nop9213 = alloca i1, i1 0
- %nop9214 = alloca i1, i1 0
- %nop9215 = alloca i1, i1 0
- %nop9216 = alloca i1, i1 0
- %nop9217 = alloca i1, i1 0
- %nop9218 = alloca i1, i1 0
- %nop9219 = alloca i1, i1 0
- %nop9220 = alloca i1, i1 0
- %nop9221 = alloca i1, i1 0
- %nop9222 = alloca i1, i1 0
- %nop9223 = alloca i1, i1 0
- %nop9224 = alloca i1, i1 0
- %nop9225 = alloca i1, i1 0
- %nop9226 = alloca i1, i1 0
- %nop9227 = alloca i1, i1 0
- %nop9228 = alloca i1, i1 0
- %nop9229 = alloca i1, i1 0
- %nop9230 = alloca i1, i1 0
- %nop9231 = alloca i1, i1 0
- %nop9232 = alloca i1, i1 0
- %nop9233 = alloca i1, i1 0
- %nop9234 = alloca i1, i1 0
- %nop9235 = alloca i1, i1 0
- %nop9236 = alloca i1, i1 0
- %nop9237 = alloca i1, i1 0
- %nop9238 = alloca i1, i1 0
- %nop9239 = alloca i1, i1 0
- %nop9240 = alloca i1, i1 0
- %nop9241 = alloca i1, i1 0
- %nop9242 = alloca i1, i1 0
- %nop9243 = alloca i1, i1 0
- %nop9244 = alloca i1, i1 0
- %nop9245 = alloca i1, i1 0
- %nop9246 = alloca i1, i1 0
- %nop9247 = alloca i1, i1 0
- %nop9248 = alloca i1, i1 0
- %nop9249 = alloca i1, i1 0
- %nop9250 = alloca i1, i1 0
- %nop9251 = alloca i1, i1 0
- %nop9252 = alloca i1, i1 0
- %nop9253 = alloca i1, i1 0
- %nop9254 = alloca i1, i1 0
- %nop9255 = alloca i1, i1 0
- %nop9256 = alloca i1, i1 0
- %nop9257 = alloca i1, i1 0
- %nop9258 = alloca i1, i1 0
- %nop9259 = alloca i1, i1 0
- %nop9260 = alloca i1, i1 0
- %nop9261 = alloca i1, i1 0
- %nop9262 = alloca i1, i1 0
- %nop9263 = alloca i1, i1 0
- %nop9264 = alloca i1, i1 0
- %nop9265 = alloca i1, i1 0
- %nop9266 = alloca i1, i1 0
- %nop9267 = alloca i1, i1 0
- %nop9268 = alloca i1, i1 0
- %nop9269 = alloca i1, i1 0
- %nop9270 = alloca i1, i1 0
- %nop9271 = alloca i1, i1 0
- %nop9272 = alloca i1, i1 0
- %nop9273 = alloca i1, i1 0
- %nop9274 = alloca i1, i1 0
- %nop9275 = alloca i1, i1 0
- %nop9276 = alloca i1, i1 0
- %nop9277 = alloca i1, i1 0
- %nop9278 = alloca i1, i1 0
- %nop9279 = alloca i1, i1 0
- %nop9280 = alloca i1, i1 0
- %nop9281 = alloca i1, i1 0
- %nop9282 = alloca i1, i1 0
- %nop9283 = alloca i1, i1 0
- %nop9284 = alloca i1, i1 0
- %nop9285 = alloca i1, i1 0
- %nop9286 = alloca i1, i1 0
- %nop9287 = alloca i1, i1 0
- %nop9288 = alloca i1, i1 0
- %nop9289 = alloca i1, i1 0
- %nop9290 = alloca i1, i1 0
- %nop9291 = alloca i1, i1 0
- %nop9292 = alloca i1, i1 0
- %nop9293 = alloca i1, i1 0
- %nop9294 = alloca i1, i1 0
- %nop9295 = alloca i1, i1 0
- %nop9296 = alloca i1, i1 0
- %nop9297 = alloca i1, i1 0
- %nop9298 = alloca i1, i1 0
- %nop9299 = alloca i1, i1 0
- %nop9300 = alloca i1, i1 0
- %nop9301 = alloca i1, i1 0
- %nop9302 = alloca i1, i1 0
- %nop9303 = alloca i1, i1 0
- %nop9304 = alloca i1, i1 0
- %nop9305 = alloca i1, i1 0
- %nop9306 = alloca i1, i1 0
- %nop9307 = alloca i1, i1 0
- %nop9308 = alloca i1, i1 0
- %nop9309 = alloca i1, i1 0
- %nop9310 = alloca i1, i1 0
- %nop9311 = alloca i1, i1 0
- %nop9312 = alloca i1, i1 0
- %nop9313 = alloca i1, i1 0
- %nop9314 = alloca i1, i1 0
- %nop9315 = alloca i1, i1 0
- %nop9316 = alloca i1, i1 0
- %nop9317 = alloca i1, i1 0
- %nop9318 = alloca i1, i1 0
- %nop9319 = alloca i1, i1 0
- %nop9320 = alloca i1, i1 0
- %nop9321 = alloca i1, i1 0
- %nop9322 = alloca i1, i1 0
- %nop9323 = alloca i1, i1 0
- %nop9324 = alloca i1, i1 0
- %nop9325 = alloca i1, i1 0
- %nop9326 = alloca i1, i1 0
- %nop9327 = alloca i1, i1 0
- %nop9328 = alloca i1, i1 0
- %nop9329 = alloca i1, i1 0
- %nop9330 = alloca i1, i1 0
- %nop9331 = alloca i1, i1 0
- %nop9332 = alloca i1, i1 0
- %nop9333 = alloca i1, i1 0
- %nop9334 = alloca i1, i1 0
- %nop9335 = alloca i1, i1 0
- %nop9336 = alloca i1, i1 0
- %nop9337 = alloca i1, i1 0
- %nop9338 = alloca i1, i1 0
- %nop9339 = alloca i1, i1 0
- %nop9340 = alloca i1, i1 0
- %nop9341 = alloca i1, i1 0
- %nop9342 = alloca i1, i1 0
- %nop9343 = alloca i1, i1 0
- %nop9344 = alloca i1, i1 0
- %nop9345 = alloca i1, i1 0
- %nop9346 = alloca i1, i1 0
- %nop9347 = alloca i1, i1 0
- %nop9348 = alloca i1, i1 0
- %nop9349 = alloca i1, i1 0
- %nop9350 = alloca i1, i1 0
- %nop9351 = alloca i1, i1 0
- %nop9352 = alloca i1, i1 0
- %nop9353 = alloca i1, i1 0
- %nop9354 = alloca i1, i1 0
- %nop9355 = alloca i1, i1 0
- %nop9356 = alloca i1, i1 0
- %nop9357 = alloca i1, i1 0
- %nop9358 = alloca i1, i1 0
- %nop9359 = alloca i1, i1 0
- %nop9360 = alloca i1, i1 0
- %nop9361 = alloca i1, i1 0
- %nop9362 = alloca i1, i1 0
- %nop9363 = alloca i1, i1 0
- %nop9364 = alloca i1, i1 0
- %nop9365 = alloca i1, i1 0
- %nop9366 = alloca i1, i1 0
- %nop9367 = alloca i1, i1 0
- %nop9368 = alloca i1, i1 0
- %nop9369 = alloca i1, i1 0
- %nop9370 = alloca i1, i1 0
- %nop9371 = alloca i1, i1 0
- %nop9372 = alloca i1, i1 0
- %nop9373 = alloca i1, i1 0
- %nop9374 = alloca i1, i1 0
- %nop9375 = alloca i1, i1 0
- %nop9376 = alloca i1, i1 0
- %nop9377 = alloca i1, i1 0
- %nop9378 = alloca i1, i1 0
- %nop9379 = alloca i1, i1 0
- %nop9380 = alloca i1, i1 0
- %nop9381 = alloca i1, i1 0
- %nop9382 = alloca i1, i1 0
- %nop9383 = alloca i1, i1 0
- %nop9384 = alloca i1, i1 0
- %nop9385 = alloca i1, i1 0
- %nop9386 = alloca i1, i1 0
- %nop9387 = alloca i1, i1 0
- %nop9388 = alloca i1, i1 0
- %nop9389 = alloca i1, i1 0
- %nop9390 = alloca i1, i1 0
- %nop9391 = alloca i1, i1 0
- %nop9392 = alloca i1, i1 0
- %nop9393 = alloca i1, i1 0
- %nop9394 = alloca i1, i1 0
- %nop9395 = alloca i1, i1 0
- %nop9396 = alloca i1, i1 0
- %nop9397 = alloca i1, i1 0
- %nop9398 = alloca i1, i1 0
- %nop9399 = alloca i1, i1 0
- %nop9400 = alloca i1, i1 0
- %nop9401 = alloca i1, i1 0
- %nop9402 = alloca i1, i1 0
- %nop9403 = alloca i1, i1 0
- %nop9404 = alloca i1, i1 0
- %nop9405 = alloca i1, i1 0
- %nop9406 = alloca i1, i1 0
- %nop9407 = alloca i1, i1 0
- %nop9408 = alloca i1, i1 0
- %nop9409 = alloca i1, i1 0
- %nop9410 = alloca i1, i1 0
- %nop9411 = alloca i1, i1 0
- %nop9412 = alloca i1, i1 0
- %nop9413 = alloca i1, i1 0
- %nop9414 = alloca i1, i1 0
- %nop9415 = alloca i1, i1 0
- %nop9416 = alloca i1, i1 0
- %nop9417 = alloca i1, i1 0
- %nop9418 = alloca i1, i1 0
- %nop9419 = alloca i1, i1 0
- %nop9420 = alloca i1, i1 0
- %nop9421 = alloca i1, i1 0
- %nop9422 = alloca i1, i1 0
- %nop9423 = alloca i1, i1 0
- %nop9424 = alloca i1, i1 0
- %nop9425 = alloca i1, i1 0
- %nop9426 = alloca i1, i1 0
- %nop9427 = alloca i1, i1 0
- %nop9428 = alloca i1, i1 0
- %nop9429 = alloca i1, i1 0
- %nop9430 = alloca i1, i1 0
- %nop9431 = alloca i1, i1 0
- %nop9432 = alloca i1, i1 0
- %nop9433 = alloca i1, i1 0
- %nop9434 = alloca i1, i1 0
- %nop9435 = alloca i1, i1 0
- %nop9436 = alloca i1, i1 0
- %nop9437 = alloca i1, i1 0
- %nop9438 = alloca i1, i1 0
- %nop9439 = alloca i1, i1 0
- %nop9440 = alloca i1, i1 0
- %nop9441 = alloca i1, i1 0
- %nop9442 = alloca i1, i1 0
- %nop9443 = alloca i1, i1 0
- %nop9444 = alloca i1, i1 0
- %nop9445 = alloca i1, i1 0
- %nop9446 = alloca i1, i1 0
- %nop9447 = alloca i1, i1 0
- %nop9448 = alloca i1, i1 0
- %nop9449 = alloca i1, i1 0
- %nop9450 = alloca i1, i1 0
- %nop9451 = alloca i1, i1 0
- %nop9452 = alloca i1, i1 0
- %nop9453 = alloca i1, i1 0
- %nop9454 = alloca i1, i1 0
- %nop9455 = alloca i1, i1 0
- %nop9456 = alloca i1, i1 0
- %nop9457 = alloca i1, i1 0
- %nop9458 = alloca i1, i1 0
- %nop9459 = alloca i1, i1 0
- %nop9460 = alloca i1, i1 0
- %nop9461 = alloca i1, i1 0
- %nop9462 = alloca i1, i1 0
- %nop9463 = alloca i1, i1 0
- %nop9464 = alloca i1, i1 0
- %nop9465 = alloca i1, i1 0
- %nop9466 = alloca i1, i1 0
- %nop9467 = alloca i1, i1 0
- %nop9468 = alloca i1, i1 0
- %nop9469 = alloca i1, i1 0
- %nop9470 = alloca i1, i1 0
- %nop9471 = alloca i1, i1 0
- %nop9472 = alloca i1, i1 0
- %nop9473 = alloca i1, i1 0
- %nop9474 = alloca i1, i1 0
- %nop9475 = alloca i1, i1 0
- %nop9476 = alloca i1, i1 0
- %nop9477 = alloca i1, i1 0
- %nop9478 = alloca i1, i1 0
- %nop9479 = alloca i1, i1 0
- %nop9480 = alloca i1, i1 0
- %nop9481 = alloca i1, i1 0
- %nop9482 = alloca i1, i1 0
- %nop9483 = alloca i1, i1 0
- %nop9484 = alloca i1, i1 0
- %nop9485 = alloca i1, i1 0
- %nop9486 = alloca i1, i1 0
- %nop9487 = alloca i1, i1 0
- %nop9488 = alloca i1, i1 0
- %nop9489 = alloca i1, i1 0
- %nop9490 = alloca i1, i1 0
- %nop9491 = alloca i1, i1 0
- %nop9492 = alloca i1, i1 0
- %nop9493 = alloca i1, i1 0
- %nop9494 = alloca i1, i1 0
- %nop9495 = alloca i1, i1 0
- %nop9496 = alloca i1, i1 0
- %nop9497 = alloca i1, i1 0
- %nop9498 = alloca i1, i1 0
- %nop9499 = alloca i1, i1 0
- %nop9500 = alloca i1, i1 0
- %nop9501 = alloca i1, i1 0
- %nop9502 = alloca i1, i1 0
- %nop9503 = alloca i1, i1 0
- %nop9504 = alloca i1, i1 0
- %nop9505 = alloca i1, i1 0
- %nop9506 = alloca i1, i1 0
- %nop9507 = alloca i1, i1 0
- %nop9508 = alloca i1, i1 0
- %nop9509 = alloca i1, i1 0
- %nop9510 = alloca i1, i1 0
- %nop9511 = alloca i1, i1 0
- %nop9512 = alloca i1, i1 0
- %nop9513 = alloca i1, i1 0
- %nop9514 = alloca i1, i1 0
- %nop9515 = alloca i1, i1 0
- %nop9516 = alloca i1, i1 0
- %nop9517 = alloca i1, i1 0
- %nop9518 = alloca i1, i1 0
- %nop9519 = alloca i1, i1 0
- %nop9520 = alloca i1, i1 0
- %nop9521 = alloca i1, i1 0
- %nop9522 = alloca i1, i1 0
- %nop9523 = alloca i1, i1 0
- %nop9524 = alloca i1, i1 0
- %nop9525 = alloca i1, i1 0
- %nop9526 = alloca i1, i1 0
- %nop9527 = alloca i1, i1 0
- %nop9528 = alloca i1, i1 0
- %nop9529 = alloca i1, i1 0
- %nop9530 = alloca i1, i1 0
- %nop9531 = alloca i1, i1 0
- %nop9532 = alloca i1, i1 0
- %nop9533 = alloca i1, i1 0
- %nop9534 = alloca i1, i1 0
- %nop9535 = alloca i1, i1 0
- %nop9536 = alloca i1, i1 0
- %nop9537 = alloca i1, i1 0
- %nop9538 = alloca i1, i1 0
- %nop9539 = alloca i1, i1 0
- %nop9540 = alloca i1, i1 0
- %nop9541 = alloca i1, i1 0
- %nop9542 = alloca i1, i1 0
- %nop9543 = alloca i1, i1 0
- %nop9544 = alloca i1, i1 0
- %nop9545 = alloca i1, i1 0
- %nop9546 = alloca i1, i1 0
- %nop9547 = alloca i1, i1 0
- %nop9548 = alloca i1, i1 0
- %nop9549 = alloca i1, i1 0
- %nop9550 = alloca i1, i1 0
- %nop9551 = alloca i1, i1 0
- %nop9552 = alloca i1, i1 0
- %nop9553 = alloca i1, i1 0
- %nop9554 = alloca i1, i1 0
- %nop9555 = alloca i1, i1 0
- %nop9556 = alloca i1, i1 0
- %nop9557 = alloca i1, i1 0
- %nop9558 = alloca i1, i1 0
- %nop9559 = alloca i1, i1 0
- %nop9560 = alloca i1, i1 0
- %nop9561 = alloca i1, i1 0
- %nop9562 = alloca i1, i1 0
- %nop9563 = alloca i1, i1 0
- %nop9564 = alloca i1, i1 0
- %nop9565 = alloca i1, i1 0
- %nop9566 = alloca i1, i1 0
- %nop9567 = alloca i1, i1 0
- %nop9568 = alloca i1, i1 0
- %nop9569 = alloca i1, i1 0
- %nop9570 = alloca i1, i1 0
- %nop9571 = alloca i1, i1 0
- %nop9572 = alloca i1, i1 0
- %nop9573 = alloca i1, i1 0
- %nop9574 = alloca i1, i1 0
- %nop9575 = alloca i1, i1 0
- %nop9576 = alloca i1, i1 0
- %nop9577 = alloca i1, i1 0
- %nop9578 = alloca i1, i1 0
- %nop9579 = alloca i1, i1 0
- %nop9580 = alloca i1, i1 0
- %nop9581 = alloca i1, i1 0
- %nop9582 = alloca i1, i1 0
- %nop9583 = alloca i1, i1 0
- %nop9584 = alloca i1, i1 0
- %nop9585 = alloca i1, i1 0
- %nop9586 = alloca i1, i1 0
- %nop9587 = alloca i1, i1 0
- %nop9588 = alloca i1, i1 0
- %nop9589 = alloca i1, i1 0
- %nop9590 = alloca i1, i1 0
- %nop9591 = alloca i1, i1 0
- %nop9592 = alloca i1, i1 0
- %nop9593 = alloca i1, i1 0
- %nop9594 = alloca i1, i1 0
- %nop9595 = alloca i1, i1 0
- %nop9596 = alloca i1, i1 0
- %nop9597 = alloca i1, i1 0
- %nop9598 = alloca i1, i1 0
- %nop9599 = alloca i1, i1 0
- %nop9600 = alloca i1, i1 0
- %nop9601 = alloca i1, i1 0
- %nop9602 = alloca i1, i1 0
- %nop9603 = alloca i1, i1 0
- %nop9604 = alloca i1, i1 0
- %nop9605 = alloca i1, i1 0
- %nop9606 = alloca i1, i1 0
- %nop9607 = alloca i1, i1 0
- %nop9608 = alloca i1, i1 0
- %nop9609 = alloca i1, i1 0
- %nop9610 = alloca i1, i1 0
- %nop9611 = alloca i1, i1 0
- %nop9612 = alloca i1, i1 0
- %nop9613 = alloca i1, i1 0
- %nop9614 = alloca i1, i1 0
- %nop9615 = alloca i1, i1 0
- %nop9616 = alloca i1, i1 0
- %nop9617 = alloca i1, i1 0
- %nop9618 = alloca i1, i1 0
- %nop9619 = alloca i1, i1 0
- %nop9620 = alloca i1, i1 0
- %nop9621 = alloca i1, i1 0
- %nop9622 = alloca i1, i1 0
- %nop9623 = alloca i1, i1 0
- %nop9624 = alloca i1, i1 0
- %nop9625 = alloca i1, i1 0
- %nop9626 = alloca i1, i1 0
- %nop9627 = alloca i1, i1 0
- %nop9628 = alloca i1, i1 0
- %nop9629 = alloca i1, i1 0
- %nop9630 = alloca i1, i1 0
- %nop9631 = alloca i1, i1 0
- %nop9632 = alloca i1, i1 0
- %nop9633 = alloca i1, i1 0
- %nop9634 = alloca i1, i1 0
- %nop9635 = alloca i1, i1 0
- %nop9636 = alloca i1, i1 0
- %nop9637 = alloca i1, i1 0
- %nop9638 = alloca i1, i1 0
- %nop9639 = alloca i1, i1 0
- %nop9640 = alloca i1, i1 0
- %nop9641 = alloca i1, i1 0
- %nop9642 = alloca i1, i1 0
- %nop9643 = alloca i1, i1 0
- %nop9644 = alloca i1, i1 0
- %nop9645 = alloca i1, i1 0
- %nop9646 = alloca i1, i1 0
- %nop9647 = alloca i1, i1 0
- %nop9648 = alloca i1, i1 0
- %nop9649 = alloca i1, i1 0
- %nop9650 = alloca i1, i1 0
- %nop9651 = alloca i1, i1 0
- %nop9652 = alloca i1, i1 0
- %nop9653 = alloca i1, i1 0
- %nop9654 = alloca i1, i1 0
- %nop9655 = alloca i1, i1 0
- %nop9656 = alloca i1, i1 0
- %nop9657 = alloca i1, i1 0
- %nop9658 = alloca i1, i1 0
- %nop9659 = alloca i1, i1 0
- %nop9660 = alloca i1, i1 0
- %nop9661 = alloca i1, i1 0
- %nop9662 = alloca i1, i1 0
- %nop9663 = alloca i1, i1 0
- %nop9664 = alloca i1, i1 0
- %nop9665 = alloca i1, i1 0
- %nop9666 = alloca i1, i1 0
- %nop9667 = alloca i1, i1 0
- %nop9668 = alloca i1, i1 0
- %nop9669 = alloca i1, i1 0
- %nop9670 = alloca i1, i1 0
- %nop9671 = alloca i1, i1 0
- %nop9672 = alloca i1, i1 0
- %nop9673 = alloca i1, i1 0
- %nop9674 = alloca i1, i1 0
- %nop9675 = alloca i1, i1 0
- %nop9676 = alloca i1, i1 0
- %nop9677 = alloca i1, i1 0
- %nop9678 = alloca i1, i1 0
- %nop9679 = alloca i1, i1 0
- %nop9680 = alloca i1, i1 0
- %nop9681 = alloca i1, i1 0
- %nop9682 = alloca i1, i1 0
- %nop9683 = alloca i1, i1 0
- %nop9684 = alloca i1, i1 0
- %nop9685 = alloca i1, i1 0
- %nop9686 = alloca i1, i1 0
- %nop9687 = alloca i1, i1 0
- %nop9688 = alloca i1, i1 0
- %nop9689 = alloca i1, i1 0
- %nop9690 = alloca i1, i1 0
- %nop9691 = alloca i1, i1 0
- %nop9692 = alloca i1, i1 0
- %nop9693 = alloca i1, i1 0
- %nop9694 = alloca i1, i1 0
- %nop9695 = alloca i1, i1 0
- %nop9696 = alloca i1, i1 0
- %nop9697 = alloca i1, i1 0
- %nop9698 = alloca i1, i1 0
- %nop9699 = alloca i1, i1 0
- %nop9700 = alloca i1, i1 0
- %nop9701 = alloca i1, i1 0
- %nop9702 = alloca i1, i1 0
- %nop9703 = alloca i1, i1 0
- %nop9704 = alloca i1, i1 0
- %nop9705 = alloca i1, i1 0
- %nop9706 = alloca i1, i1 0
- %nop9707 = alloca i1, i1 0
- %nop9708 = alloca i1, i1 0
- %nop9709 = alloca i1, i1 0
- %nop9710 = alloca i1, i1 0
- %nop9711 = alloca i1, i1 0
- %nop9712 = alloca i1, i1 0
- %nop9713 = alloca i1, i1 0
- %nop9714 = alloca i1, i1 0
- %nop9715 = alloca i1, i1 0
- %nop9716 = alloca i1, i1 0
- %nop9717 = alloca i1, i1 0
- %nop9718 = alloca i1, i1 0
- %nop9719 = alloca i1, i1 0
- %nop9720 = alloca i1, i1 0
- %nop9721 = alloca i1, i1 0
- %nop9722 = alloca i1, i1 0
- %nop9723 = alloca i1, i1 0
- %nop9724 = alloca i1, i1 0
- %nop9725 = alloca i1, i1 0
- %nop9726 = alloca i1, i1 0
- %nop9727 = alloca i1, i1 0
- %nop9728 = alloca i1, i1 0
- %nop9729 = alloca i1, i1 0
- %nop9730 = alloca i1, i1 0
- %nop9731 = alloca i1, i1 0
- %nop9732 = alloca i1, i1 0
- %nop9733 = alloca i1, i1 0
- %nop9734 = alloca i1, i1 0
- %nop9735 = alloca i1, i1 0
- %nop9736 = alloca i1, i1 0
- %nop9737 = alloca i1, i1 0
- %nop9738 = alloca i1, i1 0
- %nop9739 = alloca i1, i1 0
- %nop9740 = alloca i1, i1 0
- %nop9741 = alloca i1, i1 0
- %nop9742 = alloca i1, i1 0
- %nop9743 = alloca i1, i1 0
- %nop9744 = alloca i1, i1 0
- %nop9745 = alloca i1, i1 0
- %nop9746 = alloca i1, i1 0
- %nop9747 = alloca i1, i1 0
- %nop9748 = alloca i1, i1 0
- %nop9749 = alloca i1, i1 0
- %nop9750 = alloca i1, i1 0
- %nop9751 = alloca i1, i1 0
- %nop9752 = alloca i1, i1 0
- %nop9753 = alloca i1, i1 0
- %nop9754 = alloca i1, i1 0
- %nop9755 = alloca i1, i1 0
- %nop9756 = alloca i1, i1 0
- %nop9757 = alloca i1, i1 0
- %nop9758 = alloca i1, i1 0
- %nop9759 = alloca i1, i1 0
- %nop9760 = alloca i1, i1 0
- %nop9761 = alloca i1, i1 0
- %nop9762 = alloca i1, i1 0
- %nop9763 = alloca i1, i1 0
- %nop9764 = alloca i1, i1 0
- %nop9765 = alloca i1, i1 0
- %nop9766 = alloca i1, i1 0
- %nop9767 = alloca i1, i1 0
- %nop9768 = alloca i1, i1 0
- %nop9769 = alloca i1, i1 0
- %nop9770 = alloca i1, i1 0
- %nop9771 = alloca i1, i1 0
- %nop9772 = alloca i1, i1 0
- %nop9773 = alloca i1, i1 0
- %nop9774 = alloca i1, i1 0
- %nop9775 = alloca i1, i1 0
- %nop9776 = alloca i1, i1 0
- %nop9777 = alloca i1, i1 0
- %nop9778 = alloca i1, i1 0
- %nop9779 = alloca i1, i1 0
- %nop9780 = alloca i1, i1 0
- %nop9781 = alloca i1, i1 0
- %nop9782 = alloca i1, i1 0
- %nop9783 = alloca i1, i1 0
- %nop9784 = alloca i1, i1 0
- %nop9785 = alloca i1, i1 0
- %nop9786 = alloca i1, i1 0
- %nop9787 = alloca i1, i1 0
- %nop9788 = alloca i1, i1 0
- %nop9789 = alloca i1, i1 0
- %nop9790 = alloca i1, i1 0
- %nop9791 = alloca i1, i1 0
- %nop9792 = alloca i1, i1 0
- %nop9793 = alloca i1, i1 0
- %nop9794 = alloca i1, i1 0
- %nop9795 = alloca i1, i1 0
- %nop9796 = alloca i1, i1 0
- %nop9797 = alloca i1, i1 0
- %nop9798 = alloca i1, i1 0
- %nop9799 = alloca i1, i1 0
- %nop9800 = alloca i1, i1 0
- %nop9801 = alloca i1, i1 0
- %nop9802 = alloca i1, i1 0
- %nop9803 = alloca i1, i1 0
- %nop9804 = alloca i1, i1 0
- %nop9805 = alloca i1, i1 0
- %nop9806 = alloca i1, i1 0
- %nop9807 = alloca i1, i1 0
- %nop9808 = alloca i1, i1 0
- %nop9809 = alloca i1, i1 0
- %nop9810 = alloca i1, i1 0
- %nop9811 = alloca i1, i1 0
- %nop9812 = alloca i1, i1 0
- %nop9813 = alloca i1, i1 0
- %nop9814 = alloca i1, i1 0
- %nop9815 = alloca i1, i1 0
- %nop9816 = alloca i1, i1 0
- %nop9817 = alloca i1, i1 0
- %nop9818 = alloca i1, i1 0
- %nop9819 = alloca i1, i1 0
- %nop9820 = alloca i1, i1 0
- %nop9821 = alloca i1, i1 0
- %nop9822 = alloca i1, i1 0
- %nop9823 = alloca i1, i1 0
- %nop9824 = alloca i1, i1 0
- %nop9825 = alloca i1, i1 0
- %nop9826 = alloca i1, i1 0
- %nop9827 = alloca i1, i1 0
- %nop9828 = alloca i1, i1 0
- %nop9829 = alloca i1, i1 0
- %nop9830 = alloca i1, i1 0
- %nop9831 = alloca i1, i1 0
- %nop9832 = alloca i1, i1 0
- %nop9833 = alloca i1, i1 0
- %nop9834 = alloca i1, i1 0
- %nop9835 = alloca i1, i1 0
- %nop9836 = alloca i1, i1 0
- %nop9837 = alloca i1, i1 0
- %nop9838 = alloca i1, i1 0
- %nop9839 = alloca i1, i1 0
- %nop9840 = alloca i1, i1 0
- %nop9841 = alloca i1, i1 0
- %nop9842 = alloca i1, i1 0
- %nop9843 = alloca i1, i1 0
- %nop9844 = alloca i1, i1 0
- %nop9845 = alloca i1, i1 0
- %nop9846 = alloca i1, i1 0
- %nop9847 = alloca i1, i1 0
- %nop9848 = alloca i1, i1 0
- %nop9849 = alloca i1, i1 0
- %nop9850 = alloca i1, i1 0
- %nop9851 = alloca i1, i1 0
- %nop9852 = alloca i1, i1 0
- %nop9853 = alloca i1, i1 0
- %nop9854 = alloca i1, i1 0
- %nop9855 = alloca i1, i1 0
- %nop9856 = alloca i1, i1 0
- %nop9857 = alloca i1, i1 0
- %nop9858 = alloca i1, i1 0
- %nop9859 = alloca i1, i1 0
- %nop9860 = alloca i1, i1 0
- %nop9861 = alloca i1, i1 0
- %nop9862 = alloca i1, i1 0
- %nop9863 = alloca i1, i1 0
- %nop9864 = alloca i1, i1 0
- %nop9865 = alloca i1, i1 0
- %nop9866 = alloca i1, i1 0
- %nop9867 = alloca i1, i1 0
- %nop9868 = alloca i1, i1 0
- %nop9869 = alloca i1, i1 0
- %nop9870 = alloca i1, i1 0
- %nop9871 = alloca i1, i1 0
- %nop9872 = alloca i1, i1 0
- %nop9873 = alloca i1, i1 0
- %nop9874 = alloca i1, i1 0
- %nop9875 = alloca i1, i1 0
- %nop9876 = alloca i1, i1 0
- %nop9877 = alloca i1, i1 0
- %nop9878 = alloca i1, i1 0
- %nop9879 = alloca i1, i1 0
- %nop9880 = alloca i1, i1 0
- %nop9881 = alloca i1, i1 0
- %nop9882 = alloca i1, i1 0
- %nop9883 = alloca i1, i1 0
- %nop9884 = alloca i1, i1 0
- %nop9885 = alloca i1, i1 0
- %nop9886 = alloca i1, i1 0
- %nop9887 = alloca i1, i1 0
- %nop9888 = alloca i1, i1 0
- %nop9889 = alloca i1, i1 0
- %nop9890 = alloca i1, i1 0
- %nop9891 = alloca i1, i1 0
- %nop9892 = alloca i1, i1 0
- %nop9893 = alloca i1, i1 0
- %nop9894 = alloca i1, i1 0
- %nop9895 = alloca i1, i1 0
- %nop9896 = alloca i1, i1 0
- %nop9897 = alloca i1, i1 0
- %nop9898 = alloca i1, i1 0
- %nop9899 = alloca i1, i1 0
- %nop9900 = alloca i1, i1 0
- %nop9901 = alloca i1, i1 0
- %nop9902 = alloca i1, i1 0
- %nop9903 = alloca i1, i1 0
- %nop9904 = alloca i1, i1 0
- %nop9905 = alloca i1, i1 0
- %nop9906 = alloca i1, i1 0
- %nop9907 = alloca i1, i1 0
- %nop9908 = alloca i1, i1 0
- %nop9909 = alloca i1, i1 0
- %nop9910 = alloca i1, i1 0
- %nop9911 = alloca i1, i1 0
- %nop9912 = alloca i1, i1 0
- %nop9913 = alloca i1, i1 0
- %nop9914 = alloca i1, i1 0
- %nop9915 = alloca i1, i1 0
- %nop9916 = alloca i1, i1 0
- %nop9917 = alloca i1, i1 0
- %nop9918 = alloca i1, i1 0
- %nop9919 = alloca i1, i1 0
- %nop9920 = alloca i1, i1 0
- %nop9921 = alloca i1, i1 0
- %nop9922 = alloca i1, i1 0
- %nop9923 = alloca i1, i1 0
- %nop9924 = alloca i1, i1 0
- %nop9925 = alloca i1, i1 0
- %nop9926 = alloca i1, i1 0
- %nop9927 = alloca i1, i1 0
- %nop9928 = alloca i1, i1 0
- %nop9929 = alloca i1, i1 0
- %nop9930 = alloca i1, i1 0
- %nop9931 = alloca i1, i1 0
- %nop9932 = alloca i1, i1 0
- %nop9933 = alloca i1, i1 0
- %nop9934 = alloca i1, i1 0
- %nop9935 = alloca i1, i1 0
- %nop9936 = alloca i1, i1 0
- %nop9937 = alloca i1, i1 0
- %nop9938 = alloca i1, i1 0
- %nop9939 = alloca i1, i1 0
- %nop9940 = alloca i1, i1 0
- %nop9941 = alloca i1, i1 0
- %nop9942 = alloca i1, i1 0
- %nop9943 = alloca i1, i1 0
- %nop9944 = alloca i1, i1 0
- %nop9945 = alloca i1, i1 0
- %nop9946 = alloca i1, i1 0
- %nop9947 = alloca i1, i1 0
- %nop9948 = alloca i1, i1 0
- %nop9949 = alloca i1, i1 0
- %nop9950 = alloca i1, i1 0
- %nop9951 = alloca i1, i1 0
- %nop9952 = alloca i1, i1 0
- %nop9953 = alloca i1, i1 0
- %nop9954 = alloca i1, i1 0
- %nop9955 = alloca i1, i1 0
- %nop9956 = alloca i1, i1 0
- %nop9957 = alloca i1, i1 0
- %nop9958 = alloca i1, i1 0
- %nop9959 = alloca i1, i1 0
- %nop9960 = alloca i1, i1 0
- %nop9961 = alloca i1, i1 0
- %nop9962 = alloca i1, i1 0
- %nop9963 = alloca i1, i1 0
- %nop9964 = alloca i1, i1 0
- %nop9965 = alloca i1, i1 0
- %nop9966 = alloca i1, i1 0
- %nop9967 = alloca i1, i1 0
- %nop9968 = alloca i1, i1 0
- %nop9969 = alloca i1, i1 0
- %nop9970 = alloca i1, i1 0
- %nop9971 = alloca i1, i1 0
- %nop9972 = alloca i1, i1 0
- %nop9973 = alloca i1, i1 0
- %nop9974 = alloca i1, i1 0
- %nop9975 = alloca i1, i1 0
- %nop9976 = alloca i1, i1 0
- %nop9977 = alloca i1, i1 0
- %nop9978 = alloca i1, i1 0
- %nop9979 = alloca i1, i1 0
- %nop9980 = alloca i1, i1 0
- %nop9981 = alloca i1, i1 0
- %nop9982 = alloca i1, i1 0
- %nop9983 = alloca i1, i1 0
- %nop9984 = alloca i1, i1 0
- %nop9985 = alloca i1, i1 0
- %nop9986 = alloca i1, i1 0
- %nop9987 = alloca i1, i1 0
- %nop9988 = alloca i1, i1 0
- %nop9989 = alloca i1, i1 0
- %nop9990 = alloca i1, i1 0
- %nop9991 = alloca i1, i1 0
- %nop9992 = alloca i1, i1 0
- %nop9993 = alloca i1, i1 0
- %nop9994 = alloca i1, i1 0
- %nop9995 = alloca i1, i1 0
- %nop9996 = alloca i1, i1 0
- %nop9997 = alloca i1, i1 0
- %nop9998 = alloca i1, i1 0
- %nop9999 = alloca i1, i1 0
- %nop10000 = alloca i1, i1 0
- %nop10001 = alloca i1, i1 0
- %nop10002 = alloca i1, i1 0
- %nop10003 = alloca i1, i1 0
- %nop10004 = alloca i1, i1 0
- %nop10005 = alloca i1, i1 0
- %nop10006 = alloca i1, i1 0
- %nop10007 = alloca i1, i1 0
- %nop10008 = alloca i1, i1 0
- %nop10009 = alloca i1, i1 0
- %nop10010 = alloca i1, i1 0
- %nop10011 = alloca i1, i1 0
- %nop10012 = alloca i1, i1 0
- %nop10013 = alloca i1, i1 0
- %nop10014 = alloca i1, i1 0
- %nop10015 = alloca i1, i1 0
- %nop10016 = alloca i1, i1 0
- %nop10017 = alloca i1, i1 0
- %nop10018 = alloca i1, i1 0
- %nop10019 = alloca i1, i1 0
- %nop10020 = alloca i1, i1 0
- %nop10021 = alloca i1, i1 0
- %nop10022 = alloca i1, i1 0
- %nop10023 = alloca i1, i1 0
- %nop10024 = alloca i1, i1 0
- %nop10025 = alloca i1, i1 0
- %nop10026 = alloca i1, i1 0
- %nop10027 = alloca i1, i1 0
- %nop10028 = alloca i1, i1 0
- %nop10029 = alloca i1, i1 0
- %nop10030 = alloca i1, i1 0
- %nop10031 = alloca i1, i1 0
- %nop10032 = alloca i1, i1 0
- %nop10033 = alloca i1, i1 0
- %nop10034 = alloca i1, i1 0
- %nop10035 = alloca i1, i1 0
- %nop10036 = alloca i1, i1 0
- %nop10037 = alloca i1, i1 0
- %nop10038 = alloca i1, i1 0
- %nop10039 = alloca i1, i1 0
- %nop10040 = alloca i1, i1 0
- %nop10041 = alloca i1, i1 0
- %nop10042 = alloca i1, i1 0
- %nop10043 = alloca i1, i1 0
- %nop10044 = alloca i1, i1 0
- %nop10045 = alloca i1, i1 0
- %nop10046 = alloca i1, i1 0
- %nop10047 = alloca i1, i1 0
- %nop10048 = alloca i1, i1 0
- %nop10049 = alloca i1, i1 0
- %nop10050 = alloca i1, i1 0
- %nop10051 = alloca i1, i1 0
- %nop10052 = alloca i1, i1 0
- %nop10053 = alloca i1, i1 0
- %nop10054 = alloca i1, i1 0
- %nop10055 = alloca i1, i1 0
- %nop10056 = alloca i1, i1 0
- %nop10057 = alloca i1, i1 0
- %nop10058 = alloca i1, i1 0
- %nop10059 = alloca i1, i1 0
- %nop10060 = alloca i1, i1 0
- %nop10061 = alloca i1, i1 0
- %nop10062 = alloca i1, i1 0
- %nop10063 = alloca i1, i1 0
- %nop10064 = alloca i1, i1 0
- %nop10065 = alloca i1, i1 0
- %nop10066 = alloca i1, i1 0
- %nop10067 = alloca i1, i1 0
- %nop10068 = alloca i1, i1 0
- %nop10069 = alloca i1, i1 0
- %nop10070 = alloca i1, i1 0
- %nop10071 = alloca i1, i1 0
- %nop10072 = alloca i1, i1 0
- %nop10073 = alloca i1, i1 0
- %nop10074 = alloca i1, i1 0
- %nop10075 = alloca i1, i1 0
- %nop10076 = alloca i1, i1 0
- %nop10077 = alloca i1, i1 0
- %nop10078 = alloca i1, i1 0
- %nop10079 = alloca i1, i1 0
- %nop10080 = alloca i1, i1 0
- %nop10081 = alloca i1, i1 0
- %nop10082 = alloca i1, i1 0
- %nop10083 = alloca i1, i1 0
- %nop10084 = alloca i1, i1 0
- %nop10085 = alloca i1, i1 0
- %nop10086 = alloca i1, i1 0
- %nop10087 = alloca i1, i1 0
- %nop10088 = alloca i1, i1 0
- %nop10089 = alloca i1, i1 0
- %nop10090 = alloca i1, i1 0
- %nop10091 = alloca i1, i1 0
- %nop10092 = alloca i1, i1 0
- %nop10093 = alloca i1, i1 0
- %nop10094 = alloca i1, i1 0
- %nop10095 = alloca i1, i1 0
- %nop10096 = alloca i1, i1 0
- %nop10097 = alloca i1, i1 0
- %nop10098 = alloca i1, i1 0
- %nop10099 = alloca i1, i1 0
- %nop10100 = alloca i1, i1 0
- %nop10101 = alloca i1, i1 0
- %nop10102 = alloca i1, i1 0
- %nop10103 = alloca i1, i1 0
- %nop10104 = alloca i1, i1 0
- %nop10105 = alloca i1, i1 0
- %nop10106 = alloca i1, i1 0
- %nop10107 = alloca i1, i1 0
- %nop10108 = alloca i1, i1 0
- %nop10109 = alloca i1, i1 0
- %nop10110 = alloca i1, i1 0
- %nop10111 = alloca i1, i1 0
- %nop10112 = alloca i1, i1 0
- %nop10113 = alloca i1, i1 0
- %nop10114 = alloca i1, i1 0
- %nop10115 = alloca i1, i1 0
- %nop10116 = alloca i1, i1 0
- %nop10117 = alloca i1, i1 0
- %nop10118 = alloca i1, i1 0
- %nop10119 = alloca i1, i1 0
- %nop10120 = alloca i1, i1 0
- %nop10121 = alloca i1, i1 0
- %nop10122 = alloca i1, i1 0
- %nop10123 = alloca i1, i1 0
- %nop10124 = alloca i1, i1 0
- %nop10125 = alloca i1, i1 0
- %nop10126 = alloca i1, i1 0
- %nop10127 = alloca i1, i1 0
- %nop10128 = alloca i1, i1 0
- %nop10129 = alloca i1, i1 0
- %nop10130 = alloca i1, i1 0
- %nop10131 = alloca i1, i1 0
- %nop10132 = alloca i1, i1 0
- %nop10133 = alloca i1, i1 0
- %nop10134 = alloca i1, i1 0
- %nop10135 = alloca i1, i1 0
- %nop10136 = alloca i1, i1 0
- %nop10137 = alloca i1, i1 0
- %nop10138 = alloca i1, i1 0
- %nop10139 = alloca i1, i1 0
- %nop10140 = alloca i1, i1 0
- %nop10141 = alloca i1, i1 0
- %nop10142 = alloca i1, i1 0
- %nop10143 = alloca i1, i1 0
- %nop10144 = alloca i1, i1 0
- %nop10145 = alloca i1, i1 0
- %nop10146 = alloca i1, i1 0
- %nop10147 = alloca i1, i1 0
- %nop10148 = alloca i1, i1 0
- %nop10149 = alloca i1, i1 0
- %nop10150 = alloca i1, i1 0
- %nop10151 = alloca i1, i1 0
- %nop10152 = alloca i1, i1 0
- %nop10153 = alloca i1, i1 0
- %nop10154 = alloca i1, i1 0
- %nop10155 = alloca i1, i1 0
- %nop10156 = alloca i1, i1 0
- %nop10157 = alloca i1, i1 0
- %nop10158 = alloca i1, i1 0
- %nop10159 = alloca i1, i1 0
- %nop10160 = alloca i1, i1 0
- %nop10161 = alloca i1, i1 0
- %nop10162 = alloca i1, i1 0
- %nop10163 = alloca i1, i1 0
- %nop10164 = alloca i1, i1 0
- %nop10165 = alloca i1, i1 0
- %nop10166 = alloca i1, i1 0
- %nop10167 = alloca i1, i1 0
- %nop10168 = alloca i1, i1 0
- %nop10169 = alloca i1, i1 0
- %nop10170 = alloca i1, i1 0
- %nop10171 = alloca i1, i1 0
- %nop10172 = alloca i1, i1 0
- %nop10173 = alloca i1, i1 0
- %nop10174 = alloca i1, i1 0
- %nop10175 = alloca i1, i1 0
- %nop10176 = alloca i1, i1 0
- %nop10177 = alloca i1, i1 0
- %nop10178 = alloca i1, i1 0
- %nop10179 = alloca i1, i1 0
- %nop10180 = alloca i1, i1 0
- %nop10181 = alloca i1, i1 0
- %nop10182 = alloca i1, i1 0
- %nop10183 = alloca i1, i1 0
- %nop10184 = alloca i1, i1 0
- %nop10185 = alloca i1, i1 0
- %nop10186 = alloca i1, i1 0
- %nop10187 = alloca i1, i1 0
- %nop10188 = alloca i1, i1 0
- %nop10189 = alloca i1, i1 0
- %nop10190 = alloca i1, i1 0
- %nop10191 = alloca i1, i1 0
- %nop10192 = alloca i1, i1 0
- %nop10193 = alloca i1, i1 0
- %nop10194 = alloca i1, i1 0
- %nop10195 = alloca i1, i1 0
- %nop10196 = alloca i1, i1 0
- %nop10197 = alloca i1, i1 0
- %nop10198 = alloca i1, i1 0
- %nop10199 = alloca i1, i1 0
- %nop10200 = alloca i1, i1 0
- %nop10201 = alloca i1, i1 0
- %nop10202 = alloca i1, i1 0
- %nop10203 = alloca i1, i1 0
- %nop10204 = alloca i1, i1 0
- %nop10205 = alloca i1, i1 0
- %nop10206 = alloca i1, i1 0
- %nop10207 = alloca i1, i1 0
- %nop10208 = alloca i1, i1 0
- %nop10209 = alloca i1, i1 0
- %nop10210 = alloca i1, i1 0
- %nop10211 = alloca i1, i1 0
- %nop10212 = alloca i1, i1 0
- %nop10213 = alloca i1, i1 0
- %nop10214 = alloca i1, i1 0
- %nop10215 = alloca i1, i1 0
- %nop10216 = alloca i1, i1 0
- %nop10217 = alloca i1, i1 0
- %nop10218 = alloca i1, i1 0
- %nop10219 = alloca i1, i1 0
- %nop10220 = alloca i1, i1 0
- %nop10221 = alloca i1, i1 0
- %nop10222 = alloca i1, i1 0
- %nop10223 = alloca i1, i1 0
- %nop10224 = alloca i1, i1 0
- %nop10225 = alloca i1, i1 0
- %nop10226 = alloca i1, i1 0
- %nop10227 = alloca i1, i1 0
- %nop10228 = alloca i1, i1 0
- %nop10229 = alloca i1, i1 0
- %nop10230 = alloca i1, i1 0
- %nop10231 = alloca i1, i1 0
- %nop10232 = alloca i1, i1 0
- %nop10233 = alloca i1, i1 0
- %nop10234 = alloca i1, i1 0
- %nop10235 = alloca i1, i1 0
- %nop10236 = alloca i1, i1 0
- %nop10237 = alloca i1, i1 0
- %nop10238 = alloca i1, i1 0
- %nop10239 = alloca i1, i1 0
- %nop10240 = alloca i1, i1 0
- %nop10241 = alloca i1, i1 0
- %nop10242 = alloca i1, i1 0
- %nop10243 = alloca i1, i1 0
- %nop10244 = alloca i1, i1 0
- %nop10245 = alloca i1, i1 0
- %nop10246 = alloca i1, i1 0
- %nop10247 = alloca i1, i1 0
- %nop10248 = alloca i1, i1 0
- %nop10249 = alloca i1, i1 0
- %nop10250 = alloca i1, i1 0
- %nop10251 = alloca i1, i1 0
- %nop10252 = alloca i1, i1 0
- %nop10253 = alloca i1, i1 0
- %nop10254 = alloca i1, i1 0
- %nop10255 = alloca i1, i1 0
- %nop10256 = alloca i1, i1 0
- %nop10257 = alloca i1, i1 0
- %nop10258 = alloca i1, i1 0
- %nop10259 = alloca i1, i1 0
- %nop10260 = alloca i1, i1 0
- %nop10261 = alloca i1, i1 0
- %nop10262 = alloca i1, i1 0
- %nop10263 = alloca i1, i1 0
- %nop10264 = alloca i1, i1 0
- %nop10265 = alloca i1, i1 0
- %nop10266 = alloca i1, i1 0
- %nop10267 = alloca i1, i1 0
- %nop10268 = alloca i1, i1 0
- %nop10269 = alloca i1, i1 0
- %nop10270 = alloca i1, i1 0
- %nop10271 = alloca i1, i1 0
- %nop10272 = alloca i1, i1 0
- %nop10273 = alloca i1, i1 0
- %nop10274 = alloca i1, i1 0
- %nop10275 = alloca i1, i1 0
- %nop10276 = alloca i1, i1 0
- %nop10277 = alloca i1, i1 0
- %nop10278 = alloca i1, i1 0
- %nop10279 = alloca i1, i1 0
- %nop10280 = alloca i1, i1 0
- %nop10281 = alloca i1, i1 0
- %nop10282 = alloca i1, i1 0
- %nop10283 = alloca i1, i1 0
- %nop10284 = alloca i1, i1 0
- %nop10285 = alloca i1, i1 0
- %nop10286 = alloca i1, i1 0
- %nop10287 = alloca i1, i1 0
- %nop10288 = alloca i1, i1 0
- %nop10289 = alloca i1, i1 0
- %nop10290 = alloca i1, i1 0
- %nop10291 = alloca i1, i1 0
- %nop10292 = alloca i1, i1 0
- %nop10293 = alloca i1, i1 0
- %nop10294 = alloca i1, i1 0
- %nop10295 = alloca i1, i1 0
- %nop10296 = alloca i1, i1 0
- %nop10297 = alloca i1, i1 0
- %nop10298 = alloca i1, i1 0
- %nop10299 = alloca i1, i1 0
- %nop10300 = alloca i1, i1 0
- %nop10301 = alloca i1, i1 0
- %nop10302 = alloca i1, i1 0
- %nop10303 = alloca i1, i1 0
- %nop10304 = alloca i1, i1 0
- %nop10305 = alloca i1, i1 0
- %nop10306 = alloca i1, i1 0
- %nop10307 = alloca i1, i1 0
- %nop10308 = alloca i1, i1 0
- %nop10309 = alloca i1, i1 0
- %nop10310 = alloca i1, i1 0
- %nop10311 = alloca i1, i1 0
- %nop10312 = alloca i1, i1 0
- %nop10313 = alloca i1, i1 0
- %nop10314 = alloca i1, i1 0
- %nop10315 = alloca i1, i1 0
- %nop10316 = alloca i1, i1 0
- %nop10317 = alloca i1, i1 0
- %nop10318 = alloca i1, i1 0
- %nop10319 = alloca i1, i1 0
- %nop10320 = alloca i1, i1 0
- %nop10321 = alloca i1, i1 0
- %nop10322 = alloca i1, i1 0
- %nop10323 = alloca i1, i1 0
- %nop10324 = alloca i1, i1 0
- %nop10325 = alloca i1, i1 0
- %nop10326 = alloca i1, i1 0
- %nop10327 = alloca i1, i1 0
- %nop10328 = alloca i1, i1 0
- %nop10329 = alloca i1, i1 0
- %nop10330 = alloca i1, i1 0
- %nop10331 = alloca i1, i1 0
- %nop10332 = alloca i1, i1 0
- %nop10333 = alloca i1, i1 0
- %nop10334 = alloca i1, i1 0
- %nop10335 = alloca i1, i1 0
- %nop10336 = alloca i1, i1 0
- %nop10337 = alloca i1, i1 0
- %nop10338 = alloca i1, i1 0
- %nop10339 = alloca i1, i1 0
- %nop10340 = alloca i1, i1 0
- %nop10341 = alloca i1, i1 0
- %nop10342 = alloca i1, i1 0
- %nop10343 = alloca i1, i1 0
- %nop10344 = alloca i1, i1 0
- %nop10345 = alloca i1, i1 0
- %nop10346 = alloca i1, i1 0
- %nop10347 = alloca i1, i1 0
- %nop10348 = alloca i1, i1 0
- %nop10349 = alloca i1, i1 0
- %nop10350 = alloca i1, i1 0
- %nop10351 = alloca i1, i1 0
- %nop10352 = alloca i1, i1 0
- %nop10353 = alloca i1, i1 0
- %nop10354 = alloca i1, i1 0
- %nop10355 = alloca i1, i1 0
- %nop10356 = alloca i1, i1 0
- %nop10357 = alloca i1, i1 0
- %nop10358 = alloca i1, i1 0
- %nop10359 = alloca i1, i1 0
- %nop10360 = alloca i1, i1 0
- %nop10361 = alloca i1, i1 0
- %nop10362 = alloca i1, i1 0
- %nop10363 = alloca i1, i1 0
- %nop10364 = alloca i1, i1 0
- %nop10365 = alloca i1, i1 0
- %nop10366 = alloca i1, i1 0
- %nop10367 = alloca i1, i1 0
- %nop10368 = alloca i1, i1 0
- %nop10369 = alloca i1, i1 0
- %nop10370 = alloca i1, i1 0
- %nop10371 = alloca i1, i1 0
- %nop10372 = alloca i1, i1 0
- %nop10373 = alloca i1, i1 0
- %nop10374 = alloca i1, i1 0
- %nop10375 = alloca i1, i1 0
- %nop10376 = alloca i1, i1 0
- %nop10377 = alloca i1, i1 0
- %nop10378 = alloca i1, i1 0
- %nop10379 = alloca i1, i1 0
- %nop10380 = alloca i1, i1 0
- %nop10381 = alloca i1, i1 0
- %nop10382 = alloca i1, i1 0
- %nop10383 = alloca i1, i1 0
- %nop10384 = alloca i1, i1 0
- %nop10385 = alloca i1, i1 0
- %nop10386 = alloca i1, i1 0
- %nop10387 = alloca i1, i1 0
- %nop10388 = alloca i1, i1 0
- %nop10389 = alloca i1, i1 0
- %nop10390 = alloca i1, i1 0
- %nop10391 = alloca i1, i1 0
- %nop10392 = alloca i1, i1 0
- %nop10393 = alloca i1, i1 0
- %nop10394 = alloca i1, i1 0
- %nop10395 = alloca i1, i1 0
- %nop10396 = alloca i1, i1 0
- %nop10397 = alloca i1, i1 0
- %nop10398 = alloca i1, i1 0
- %nop10399 = alloca i1, i1 0
- %nop10400 = alloca i1, i1 0
- %nop10401 = alloca i1, i1 0
- %nop10402 = alloca i1, i1 0
- %nop10403 = alloca i1, i1 0
- %nop10404 = alloca i1, i1 0
- %nop10405 = alloca i1, i1 0
- %nop10406 = alloca i1, i1 0
- %nop10407 = alloca i1, i1 0
- %nop10408 = alloca i1, i1 0
- %nop10409 = alloca i1, i1 0
- %nop10410 = alloca i1, i1 0
- %nop10411 = alloca i1, i1 0
- %nop10412 = alloca i1, i1 0
- %nop10413 = alloca i1, i1 0
- %nop10414 = alloca i1, i1 0
- %nop10415 = alloca i1, i1 0
- %nop10416 = alloca i1, i1 0
- %nop10417 = alloca i1, i1 0
- %nop10418 = alloca i1, i1 0
- %nop10419 = alloca i1, i1 0
- %nop10420 = alloca i1, i1 0
- %nop10421 = alloca i1, i1 0
- %nop10422 = alloca i1, i1 0
- %nop10423 = alloca i1, i1 0
- %nop10424 = alloca i1, i1 0
- %nop10425 = alloca i1, i1 0
- %nop10426 = alloca i1, i1 0
- %nop10427 = alloca i1, i1 0
- %nop10428 = alloca i1, i1 0
- %nop10429 = alloca i1, i1 0
- %nop10430 = alloca i1, i1 0
- %nop10431 = alloca i1, i1 0
- %nop10432 = alloca i1, i1 0
- %nop10433 = alloca i1, i1 0
- %nop10434 = alloca i1, i1 0
- %nop10435 = alloca i1, i1 0
- %nop10436 = alloca i1, i1 0
- %nop10437 = alloca i1, i1 0
- %nop10438 = alloca i1, i1 0
- %nop10439 = alloca i1, i1 0
- %nop10440 = alloca i1, i1 0
- %nop10441 = alloca i1, i1 0
- %nop10442 = alloca i1, i1 0
- %nop10443 = alloca i1, i1 0
- %nop10444 = alloca i1, i1 0
- %nop10445 = alloca i1, i1 0
- %nop10446 = alloca i1, i1 0
- %nop10447 = alloca i1, i1 0
- %nop10448 = alloca i1, i1 0
- %nop10449 = alloca i1, i1 0
- %nop10450 = alloca i1, i1 0
- %nop10451 = alloca i1, i1 0
- %nop10452 = alloca i1, i1 0
- %nop10453 = alloca i1, i1 0
- %nop10454 = alloca i1, i1 0
- %nop10455 = alloca i1, i1 0
- %nop10456 = alloca i1, i1 0
- %nop10457 = alloca i1, i1 0
- %nop10458 = alloca i1, i1 0
- %nop10459 = alloca i1, i1 0
- %nop10460 = alloca i1, i1 0
- %nop10461 = alloca i1, i1 0
- %nop10462 = alloca i1, i1 0
- %nop10463 = alloca i1, i1 0
- %nop10464 = alloca i1, i1 0
- %nop10465 = alloca i1, i1 0
- %nop10466 = alloca i1, i1 0
- %nop10467 = alloca i1, i1 0
- %nop10468 = alloca i1, i1 0
- %nop10469 = alloca i1, i1 0
- %nop10470 = alloca i1, i1 0
- %nop10471 = alloca i1, i1 0
- %nop10472 = alloca i1, i1 0
- %nop10473 = alloca i1, i1 0
- %nop10474 = alloca i1, i1 0
- %nop10475 = alloca i1, i1 0
- %nop10476 = alloca i1, i1 0
- %nop10477 = alloca i1, i1 0
- %nop10478 = alloca i1, i1 0
- %nop10479 = alloca i1, i1 0
- %nop10480 = alloca i1, i1 0
- %nop10481 = alloca i1, i1 0
- %nop10482 = alloca i1, i1 0
- %nop10483 = alloca i1, i1 0
- %nop10484 = alloca i1, i1 0
- %nop10485 = alloca i1, i1 0
- %nop10486 = alloca i1, i1 0
- %nop10487 = alloca i1, i1 0
- %nop10488 = alloca i1, i1 0
- %nop10489 = alloca i1, i1 0
- %nop10490 = alloca i1, i1 0
- %nop10491 = alloca i1, i1 0
- %nop10492 = alloca i1, i1 0
- %nop10493 = alloca i1, i1 0
- %nop10494 = alloca i1, i1 0
- %nop10495 = alloca i1, i1 0
- %nop10496 = alloca i1, i1 0
- %nop10497 = alloca i1, i1 0
- %nop10498 = alloca i1, i1 0
- %nop10499 = alloca i1, i1 0
- %nop10500 = alloca i1, i1 0
- %nop10501 = alloca i1, i1 0
- %nop10502 = alloca i1, i1 0
- %nop10503 = alloca i1, i1 0
- %nop10504 = alloca i1, i1 0
- %nop10505 = alloca i1, i1 0
- %nop10506 = alloca i1, i1 0
- %nop10507 = alloca i1, i1 0
- %nop10508 = alloca i1, i1 0
- %nop10509 = alloca i1, i1 0
- %nop10510 = alloca i1, i1 0
- %nop10511 = alloca i1, i1 0
- %nop10512 = alloca i1, i1 0
- %nop10513 = alloca i1, i1 0
- %nop10514 = alloca i1, i1 0
- %nop10515 = alloca i1, i1 0
- %nop10516 = alloca i1, i1 0
- %nop10517 = alloca i1, i1 0
- %nop10518 = alloca i1, i1 0
- %nop10519 = alloca i1, i1 0
- %nop10520 = alloca i1, i1 0
- %nop10521 = alloca i1, i1 0
- %nop10522 = alloca i1, i1 0
- %nop10523 = alloca i1, i1 0
- %nop10524 = alloca i1, i1 0
- %nop10525 = alloca i1, i1 0
- %nop10526 = alloca i1, i1 0
- %nop10527 = alloca i1, i1 0
- %nop10528 = alloca i1, i1 0
- %nop10529 = alloca i1, i1 0
- %nop10530 = alloca i1, i1 0
- %nop10531 = alloca i1, i1 0
- %nop10532 = alloca i1, i1 0
- %nop10533 = alloca i1, i1 0
- %nop10534 = alloca i1, i1 0
- %nop10535 = alloca i1, i1 0
- %nop10536 = alloca i1, i1 0
- %nop10537 = alloca i1, i1 0
- %nop10538 = alloca i1, i1 0
- %nop10539 = alloca i1, i1 0
- %nop10540 = alloca i1, i1 0
- %nop10541 = alloca i1, i1 0
- %nop10542 = alloca i1, i1 0
- %nop10543 = alloca i1, i1 0
- %nop10544 = alloca i1, i1 0
- %nop10545 = alloca i1, i1 0
- %nop10546 = alloca i1, i1 0
- %nop10547 = alloca i1, i1 0
- %nop10548 = alloca i1, i1 0
- %nop10549 = alloca i1, i1 0
- %nop10550 = alloca i1, i1 0
- %nop10551 = alloca i1, i1 0
- %nop10552 = alloca i1, i1 0
- %nop10553 = alloca i1, i1 0
- %nop10554 = alloca i1, i1 0
- %nop10555 = alloca i1, i1 0
- %nop10556 = alloca i1, i1 0
- %nop10557 = alloca i1, i1 0
- %nop10558 = alloca i1, i1 0
- %nop10559 = alloca i1, i1 0
- %nop10560 = alloca i1, i1 0
- %nop10561 = alloca i1, i1 0
- %nop10562 = alloca i1, i1 0
- %nop10563 = alloca i1, i1 0
- %nop10564 = alloca i1, i1 0
- %nop10565 = alloca i1, i1 0
- %nop10566 = alloca i1, i1 0
- %nop10567 = alloca i1, i1 0
- %nop10568 = alloca i1, i1 0
- %nop10569 = alloca i1, i1 0
- %nop10570 = alloca i1, i1 0
- %nop10571 = alloca i1, i1 0
- %nop10572 = alloca i1, i1 0
- %nop10573 = alloca i1, i1 0
- %nop10574 = alloca i1, i1 0
- %nop10575 = alloca i1, i1 0
- %nop10576 = alloca i1, i1 0
- %nop10577 = alloca i1, i1 0
- %nop10578 = alloca i1, i1 0
- %nop10579 = alloca i1, i1 0
- %nop10580 = alloca i1, i1 0
- %nop10581 = alloca i1, i1 0
- %nop10582 = alloca i1, i1 0
- %nop10583 = alloca i1, i1 0
- %nop10584 = alloca i1, i1 0
- %nop10585 = alloca i1, i1 0
- %nop10586 = alloca i1, i1 0
- %nop10587 = alloca i1, i1 0
- %nop10588 = alloca i1, i1 0
- %nop10589 = alloca i1, i1 0
- %nop10590 = alloca i1, i1 0
- %nop10591 = alloca i1, i1 0
- %nop10592 = alloca i1, i1 0
- %nop10593 = alloca i1, i1 0
- %nop10594 = alloca i1, i1 0
- %nop10595 = alloca i1, i1 0
- %nop10596 = alloca i1, i1 0
- %nop10597 = alloca i1, i1 0
- %nop10598 = alloca i1, i1 0
- %nop10599 = alloca i1, i1 0
- %nop10600 = alloca i1, i1 0
- %nop10601 = alloca i1, i1 0
- %nop10602 = alloca i1, i1 0
- %nop10603 = alloca i1, i1 0
- %nop10604 = alloca i1, i1 0
- %nop10605 = alloca i1, i1 0
- %nop10606 = alloca i1, i1 0
- %nop10607 = alloca i1, i1 0
- %nop10608 = alloca i1, i1 0
- %nop10609 = alloca i1, i1 0
- %nop10610 = alloca i1, i1 0
- %nop10611 = alloca i1, i1 0
- %nop10612 = alloca i1, i1 0
- %nop10613 = alloca i1, i1 0
- %nop10614 = alloca i1, i1 0
- %nop10615 = alloca i1, i1 0
- %nop10616 = alloca i1, i1 0
- %nop10617 = alloca i1, i1 0
- %nop10618 = alloca i1, i1 0
- %nop10619 = alloca i1, i1 0
- %nop10620 = alloca i1, i1 0
- %nop10621 = alloca i1, i1 0
- %nop10622 = alloca i1, i1 0
- %nop10623 = alloca i1, i1 0
- %nop10624 = alloca i1, i1 0
- %nop10625 = alloca i1, i1 0
- %nop10626 = alloca i1, i1 0
- %nop10627 = alloca i1, i1 0
- %nop10628 = alloca i1, i1 0
- %nop10629 = alloca i1, i1 0
- %nop10630 = alloca i1, i1 0
- %nop10631 = alloca i1, i1 0
- %nop10632 = alloca i1, i1 0
- %nop10633 = alloca i1, i1 0
- %nop10634 = alloca i1, i1 0
- %nop10635 = alloca i1, i1 0
- %nop10636 = alloca i1, i1 0
- %nop10637 = alloca i1, i1 0
- %nop10638 = alloca i1, i1 0
- %nop10639 = alloca i1, i1 0
- %nop10640 = alloca i1, i1 0
- %nop10641 = alloca i1, i1 0
- %nop10642 = alloca i1, i1 0
- %nop10643 = alloca i1, i1 0
- %nop10644 = alloca i1, i1 0
- %nop10645 = alloca i1, i1 0
- %nop10646 = alloca i1, i1 0
- %nop10647 = alloca i1, i1 0
- %nop10648 = alloca i1, i1 0
- %nop10649 = alloca i1, i1 0
- %nop10650 = alloca i1, i1 0
- %nop10651 = alloca i1, i1 0
- %nop10652 = alloca i1, i1 0
- %nop10653 = alloca i1, i1 0
- %nop10654 = alloca i1, i1 0
- %nop10655 = alloca i1, i1 0
- %nop10656 = alloca i1, i1 0
- %nop10657 = alloca i1, i1 0
- %nop10658 = alloca i1, i1 0
- %nop10659 = alloca i1, i1 0
- %nop10660 = alloca i1, i1 0
- %nop10661 = alloca i1, i1 0
- %nop10662 = alloca i1, i1 0
- %nop10663 = alloca i1, i1 0
- %nop10664 = alloca i1, i1 0
- %nop10665 = alloca i1, i1 0
- %nop10666 = alloca i1, i1 0
- %nop10667 = alloca i1, i1 0
- %nop10668 = alloca i1, i1 0
- %nop10669 = alloca i1, i1 0
- %nop10670 = alloca i1, i1 0
- %nop10671 = alloca i1, i1 0
- %nop10672 = alloca i1, i1 0
- %nop10673 = alloca i1, i1 0
- %nop10674 = alloca i1, i1 0
- %nop10675 = alloca i1, i1 0
- %nop10676 = alloca i1, i1 0
- %nop10677 = alloca i1, i1 0
- %nop10678 = alloca i1, i1 0
- %nop10679 = alloca i1, i1 0
- %nop10680 = alloca i1, i1 0
- %nop10681 = alloca i1, i1 0
- %nop10682 = alloca i1, i1 0
- %nop10683 = alloca i1, i1 0
- %nop10684 = alloca i1, i1 0
- %nop10685 = alloca i1, i1 0
- %nop10686 = alloca i1, i1 0
- %nop10687 = alloca i1, i1 0
- %nop10688 = alloca i1, i1 0
- %nop10689 = alloca i1, i1 0
- %nop10690 = alloca i1, i1 0
- %nop10691 = alloca i1, i1 0
- %nop10692 = alloca i1, i1 0
- %nop10693 = alloca i1, i1 0
- %nop10694 = alloca i1, i1 0
- %nop10695 = alloca i1, i1 0
- %nop10696 = alloca i1, i1 0
- %nop10697 = alloca i1, i1 0
- %nop10698 = alloca i1, i1 0
- %nop10699 = alloca i1, i1 0
- %nop10700 = alloca i1, i1 0
- %nop10701 = alloca i1, i1 0
- %nop10702 = alloca i1, i1 0
- %nop10703 = alloca i1, i1 0
- %nop10704 = alloca i1, i1 0
- %nop10705 = alloca i1, i1 0
- %nop10706 = alloca i1, i1 0
- %nop10707 = alloca i1, i1 0
- %nop10708 = alloca i1, i1 0
- %nop10709 = alloca i1, i1 0
- %nop10710 = alloca i1, i1 0
- %nop10711 = alloca i1, i1 0
- %nop10712 = alloca i1, i1 0
- %nop10713 = alloca i1, i1 0
- %nop10714 = alloca i1, i1 0
- %nop10715 = alloca i1, i1 0
- %nop10716 = alloca i1, i1 0
- %nop10717 = alloca i1, i1 0
- %nop10718 = alloca i1, i1 0
- %nop10719 = alloca i1, i1 0
- %nop10720 = alloca i1, i1 0
- %nop10721 = alloca i1, i1 0
- %nop10722 = alloca i1, i1 0
- %nop10723 = alloca i1, i1 0
- %nop10724 = alloca i1, i1 0
- %nop10725 = alloca i1, i1 0
- %nop10726 = alloca i1, i1 0
- %nop10727 = alloca i1, i1 0
- %nop10728 = alloca i1, i1 0
- %nop10729 = alloca i1, i1 0
- %nop10730 = alloca i1, i1 0
- %nop10731 = alloca i1, i1 0
- %nop10732 = alloca i1, i1 0
- %nop10733 = alloca i1, i1 0
- %nop10734 = alloca i1, i1 0
- %nop10735 = alloca i1, i1 0
- %nop10736 = alloca i1, i1 0
- %nop10737 = alloca i1, i1 0
- %nop10738 = alloca i1, i1 0
- %nop10739 = alloca i1, i1 0
- %nop10740 = alloca i1, i1 0
- %nop10741 = alloca i1, i1 0
- %nop10742 = alloca i1, i1 0
- %nop10743 = alloca i1, i1 0
- %nop10744 = alloca i1, i1 0
- %nop10745 = alloca i1, i1 0
- %nop10746 = alloca i1, i1 0
- %nop10747 = alloca i1, i1 0
- %nop10748 = alloca i1, i1 0
- %nop10749 = alloca i1, i1 0
- %nop10750 = alloca i1, i1 0
- %nop10751 = alloca i1, i1 0
- %nop10752 = alloca i1, i1 0
- %nop10753 = alloca i1, i1 0
- %nop10754 = alloca i1, i1 0
- %nop10755 = alloca i1, i1 0
- %nop10756 = alloca i1, i1 0
- %nop10757 = alloca i1, i1 0
- %nop10758 = alloca i1, i1 0
- %nop10759 = alloca i1, i1 0
- %nop10760 = alloca i1, i1 0
- %nop10761 = alloca i1, i1 0
- %nop10762 = alloca i1, i1 0
- %nop10763 = alloca i1, i1 0
- %nop10764 = alloca i1, i1 0
- %nop10765 = alloca i1, i1 0
- %nop10766 = alloca i1, i1 0
- %nop10767 = alloca i1, i1 0
- %nop10768 = alloca i1, i1 0
- %nop10769 = alloca i1, i1 0
- %nop10770 = alloca i1, i1 0
- %nop10771 = alloca i1, i1 0
- %nop10772 = alloca i1, i1 0
- %nop10773 = alloca i1, i1 0
- %nop10774 = alloca i1, i1 0
- %nop10775 = alloca i1, i1 0
- %nop10776 = alloca i1, i1 0
- %nop10777 = alloca i1, i1 0
- %nop10778 = alloca i1, i1 0
- %nop10779 = alloca i1, i1 0
- %nop10780 = alloca i1, i1 0
- %nop10781 = alloca i1, i1 0
- %nop10782 = alloca i1, i1 0
- %nop10783 = alloca i1, i1 0
- %nop10784 = alloca i1, i1 0
- %nop10785 = alloca i1, i1 0
- %nop10786 = alloca i1, i1 0
- %nop10787 = alloca i1, i1 0
- %nop10788 = alloca i1, i1 0
- %nop10789 = alloca i1, i1 0
- %nop10790 = alloca i1, i1 0
- %nop10791 = alloca i1, i1 0
- %nop10792 = alloca i1, i1 0
- %nop10793 = alloca i1, i1 0
- %nop10794 = alloca i1, i1 0
- %nop10795 = alloca i1, i1 0
- %nop10796 = alloca i1, i1 0
- %nop10797 = alloca i1, i1 0
- %nop10798 = alloca i1, i1 0
- %nop10799 = alloca i1, i1 0
- %nop10800 = alloca i1, i1 0
- %nop10801 = alloca i1, i1 0
- %nop10802 = alloca i1, i1 0
- %nop10803 = alloca i1, i1 0
- %nop10804 = alloca i1, i1 0
- %nop10805 = alloca i1, i1 0
- %nop10806 = alloca i1, i1 0
- %nop10807 = alloca i1, i1 0
- %nop10808 = alloca i1, i1 0
- %nop10809 = alloca i1, i1 0
- %nop10810 = alloca i1, i1 0
- %nop10811 = alloca i1, i1 0
- %nop10812 = alloca i1, i1 0
- %nop10813 = alloca i1, i1 0
- %nop10814 = alloca i1, i1 0
- %nop10815 = alloca i1, i1 0
- %nop10816 = alloca i1, i1 0
- %nop10817 = alloca i1, i1 0
- %nop10818 = alloca i1, i1 0
- %nop10819 = alloca i1, i1 0
- %nop10820 = alloca i1, i1 0
- %nop10821 = alloca i1, i1 0
- %nop10822 = alloca i1, i1 0
- %nop10823 = alloca i1, i1 0
- %nop10824 = alloca i1, i1 0
- %nop10825 = alloca i1, i1 0
- %nop10826 = alloca i1, i1 0
- %nop10827 = alloca i1, i1 0
- %nop10828 = alloca i1, i1 0
- %nop10829 = alloca i1, i1 0
- %nop10830 = alloca i1, i1 0
- %nop10831 = alloca i1, i1 0
- %nop10832 = alloca i1, i1 0
- %nop10833 = alloca i1, i1 0
- %nop10834 = alloca i1, i1 0
- %nop10835 = alloca i1, i1 0
- %nop10836 = alloca i1, i1 0
- %nop10837 = alloca i1, i1 0
- %nop10838 = alloca i1, i1 0
- %nop10839 = alloca i1, i1 0
- %nop10840 = alloca i1, i1 0
- %nop10841 = alloca i1, i1 0
- %nop10842 = alloca i1, i1 0
- %nop10843 = alloca i1, i1 0
- %nop10844 = alloca i1, i1 0
- %nop10845 = alloca i1, i1 0
- %nop10846 = alloca i1, i1 0
- %nop10847 = alloca i1, i1 0
- %nop10848 = alloca i1, i1 0
- %nop10849 = alloca i1, i1 0
- %nop10850 = alloca i1, i1 0
- %nop10851 = alloca i1, i1 0
- %nop10852 = alloca i1, i1 0
- %nop10853 = alloca i1, i1 0
- %nop10854 = alloca i1, i1 0
- %nop10855 = alloca i1, i1 0
- %nop10856 = alloca i1, i1 0
- %nop10857 = alloca i1, i1 0
- %nop10858 = alloca i1, i1 0
- %nop10859 = alloca i1, i1 0
- %nop10860 = alloca i1, i1 0
- %nop10861 = alloca i1, i1 0
- %nop10862 = alloca i1, i1 0
- %nop10863 = alloca i1, i1 0
- %nop10864 = alloca i1, i1 0
- %nop10865 = alloca i1, i1 0
- %nop10866 = alloca i1, i1 0
- %nop10867 = alloca i1, i1 0
- %nop10868 = alloca i1, i1 0
- %nop10869 = alloca i1, i1 0
- %nop10870 = alloca i1, i1 0
- %nop10871 = alloca i1, i1 0
- %nop10872 = alloca i1, i1 0
- %nop10873 = alloca i1, i1 0
- %nop10874 = alloca i1, i1 0
- %nop10875 = alloca i1, i1 0
- %nop10876 = alloca i1, i1 0
- %nop10877 = alloca i1, i1 0
- %nop10878 = alloca i1, i1 0
- %nop10879 = alloca i1, i1 0
- %nop10880 = alloca i1, i1 0
- %nop10881 = alloca i1, i1 0
- %nop10882 = alloca i1, i1 0
- %nop10883 = alloca i1, i1 0
- %nop10884 = alloca i1, i1 0
- %nop10885 = alloca i1, i1 0
- %nop10886 = alloca i1, i1 0
- %nop10887 = alloca i1, i1 0
- %nop10888 = alloca i1, i1 0
- %nop10889 = alloca i1, i1 0
- %nop10890 = alloca i1, i1 0
- %nop10891 = alloca i1, i1 0
- %nop10892 = alloca i1, i1 0
- %nop10893 = alloca i1, i1 0
- %nop10894 = alloca i1, i1 0
- %nop10895 = alloca i1, i1 0
- %nop10896 = alloca i1, i1 0
- %nop10897 = alloca i1, i1 0
- %nop10898 = alloca i1, i1 0
- %nop10899 = alloca i1, i1 0
- %nop10900 = alloca i1, i1 0
- %nop10901 = alloca i1, i1 0
- %nop10902 = alloca i1, i1 0
- %nop10903 = alloca i1, i1 0
- %nop10904 = alloca i1, i1 0
- %nop10905 = alloca i1, i1 0
- %nop10906 = alloca i1, i1 0
- %nop10907 = alloca i1, i1 0
- %nop10908 = alloca i1, i1 0
- %nop10909 = alloca i1, i1 0
- %nop10910 = alloca i1, i1 0
- %nop10911 = alloca i1, i1 0
- %nop10912 = alloca i1, i1 0
- %nop10913 = alloca i1, i1 0
- %nop10914 = alloca i1, i1 0
- %nop10915 = alloca i1, i1 0
- %nop10916 = alloca i1, i1 0
- %nop10917 = alloca i1, i1 0
- %nop10918 = alloca i1, i1 0
- %nop10919 = alloca i1, i1 0
- %nop10920 = alloca i1, i1 0
- %nop10921 = alloca i1, i1 0
- %nop10922 = alloca i1, i1 0
- %nop10923 = alloca i1, i1 0
- %nop10924 = alloca i1, i1 0
- %nop10925 = alloca i1, i1 0
- %nop10926 = alloca i1, i1 0
- %nop10927 = alloca i1, i1 0
- %nop10928 = alloca i1, i1 0
- %nop10929 = alloca i1, i1 0
- %nop10930 = alloca i1, i1 0
- %nop10931 = alloca i1, i1 0
- %nop10932 = alloca i1, i1 0
- %nop10933 = alloca i1, i1 0
- %nop10934 = alloca i1, i1 0
- %nop10935 = alloca i1, i1 0
- %nop10936 = alloca i1, i1 0
- %nop10937 = alloca i1, i1 0
- %nop10938 = alloca i1, i1 0
- %nop10939 = alloca i1, i1 0
- %nop10940 = alloca i1, i1 0
- %nop10941 = alloca i1, i1 0
- %nop10942 = alloca i1, i1 0
- %nop10943 = alloca i1, i1 0
- %nop10944 = alloca i1, i1 0
- %nop10945 = alloca i1, i1 0
- %nop10946 = alloca i1, i1 0
- %nop10947 = alloca i1, i1 0
- %nop10948 = alloca i1, i1 0
- %nop10949 = alloca i1, i1 0
- %nop10950 = alloca i1, i1 0
- %nop10951 = alloca i1, i1 0
- %nop10952 = alloca i1, i1 0
- %nop10953 = alloca i1, i1 0
- %nop10954 = alloca i1, i1 0
- %nop10955 = alloca i1, i1 0
- %nop10956 = alloca i1, i1 0
- %nop10957 = alloca i1, i1 0
- %nop10958 = alloca i1, i1 0
- %nop10959 = alloca i1, i1 0
- %nop10960 = alloca i1, i1 0
- %nop10961 = alloca i1, i1 0
- %nop10962 = alloca i1, i1 0
- %nop10963 = alloca i1, i1 0
- %nop10964 = alloca i1, i1 0
- %nop10965 = alloca i1, i1 0
- %nop10966 = alloca i1, i1 0
- %nop10967 = alloca i1, i1 0
- %nop10968 = alloca i1, i1 0
- %nop10969 = alloca i1, i1 0
- %nop10970 = alloca i1, i1 0
- %nop10971 = alloca i1, i1 0
- %nop10972 = alloca i1, i1 0
- %nop10973 = alloca i1, i1 0
- %nop10974 = alloca i1, i1 0
- %nop10975 = alloca i1, i1 0
- %nop10976 = alloca i1, i1 0
- %nop10977 = alloca i1, i1 0
- %nop10978 = alloca i1, i1 0
- %nop10979 = alloca i1, i1 0
- %nop10980 = alloca i1, i1 0
- %nop10981 = alloca i1, i1 0
- %nop10982 = alloca i1, i1 0
- %nop10983 = alloca i1, i1 0
- %nop10984 = alloca i1, i1 0
- %nop10985 = alloca i1, i1 0
- %nop10986 = alloca i1, i1 0
- %nop10987 = alloca i1, i1 0
- %nop10988 = alloca i1, i1 0
- %nop10989 = alloca i1, i1 0
- %nop10990 = alloca i1, i1 0
- %nop10991 = alloca i1, i1 0
- %nop10992 = alloca i1, i1 0
- %nop10993 = alloca i1, i1 0
- %nop10994 = alloca i1, i1 0
- %nop10995 = alloca i1, i1 0
- %nop10996 = alloca i1, i1 0
- %nop10997 = alloca i1, i1 0
- %nop10998 = alloca i1, i1 0
- %nop10999 = alloca i1, i1 0
- %nop11000 = alloca i1, i1 0
- %nop11001 = alloca i1, i1 0
- %nop11002 = alloca i1, i1 0
- %nop11003 = alloca i1, i1 0
- %nop11004 = alloca i1, i1 0
- %nop11005 = alloca i1, i1 0
- %nop11006 = alloca i1, i1 0
- %nop11007 = alloca i1, i1 0
- %nop11008 = alloca i1, i1 0
- %nop11009 = alloca i1, i1 0
- %nop11010 = alloca i1, i1 0
- %nop11011 = alloca i1, i1 0
- %nop11012 = alloca i1, i1 0
- %nop11013 = alloca i1, i1 0
- %nop11014 = alloca i1, i1 0
- %nop11015 = alloca i1, i1 0
- %nop11016 = alloca i1, i1 0
- %nop11017 = alloca i1, i1 0
- %nop11018 = alloca i1, i1 0
- %nop11019 = alloca i1, i1 0
- %nop11020 = alloca i1, i1 0
- %nop11021 = alloca i1, i1 0
- %nop11022 = alloca i1, i1 0
- %nop11023 = alloca i1, i1 0
- %nop11024 = alloca i1, i1 0
- %nop11025 = alloca i1, i1 0
- %nop11026 = alloca i1, i1 0
- %nop11027 = alloca i1, i1 0
- %nop11028 = alloca i1, i1 0
- %nop11029 = alloca i1, i1 0
- %nop11030 = alloca i1, i1 0
- %nop11031 = alloca i1, i1 0
- %nop11032 = alloca i1, i1 0
- %nop11033 = alloca i1, i1 0
- %nop11034 = alloca i1, i1 0
- %nop11035 = alloca i1, i1 0
- %nop11036 = alloca i1, i1 0
- %nop11037 = alloca i1, i1 0
- %nop11038 = alloca i1, i1 0
- %nop11039 = alloca i1, i1 0
- %nop11040 = alloca i1, i1 0
- %nop11041 = alloca i1, i1 0
- %nop11042 = alloca i1, i1 0
- %nop11043 = alloca i1, i1 0
- %nop11044 = alloca i1, i1 0
- %nop11045 = alloca i1, i1 0
- %nop11046 = alloca i1, i1 0
- %nop11047 = alloca i1, i1 0
- %nop11048 = alloca i1, i1 0
- %nop11049 = alloca i1, i1 0
- %nop11050 = alloca i1, i1 0
- %nop11051 = alloca i1, i1 0
- %nop11052 = alloca i1, i1 0
- %nop11053 = alloca i1, i1 0
- %nop11054 = alloca i1, i1 0
- %nop11055 = alloca i1, i1 0
- %nop11056 = alloca i1, i1 0
- %nop11057 = alloca i1, i1 0
- %nop11058 = alloca i1, i1 0
- %nop11059 = alloca i1, i1 0
- %nop11060 = alloca i1, i1 0
- %nop11061 = alloca i1, i1 0
- %nop11062 = alloca i1, i1 0
- %nop11063 = alloca i1, i1 0
- %nop11064 = alloca i1, i1 0
- %nop11065 = alloca i1, i1 0
- %nop11066 = alloca i1, i1 0
- %nop11067 = alloca i1, i1 0
- %nop11068 = alloca i1, i1 0
- %nop11069 = alloca i1, i1 0
- %nop11070 = alloca i1, i1 0
- %nop11071 = alloca i1, i1 0
- %nop11072 = alloca i1, i1 0
- %nop11073 = alloca i1, i1 0
- %nop11074 = alloca i1, i1 0
- %nop11075 = alloca i1, i1 0
- %nop11076 = alloca i1, i1 0
- %nop11077 = alloca i1, i1 0
- %nop11078 = alloca i1, i1 0
- %nop11079 = alloca i1, i1 0
- %nop11080 = alloca i1, i1 0
- %nop11081 = alloca i1, i1 0
- %nop11082 = alloca i1, i1 0
- %nop11083 = alloca i1, i1 0
- %nop11084 = alloca i1, i1 0
- %nop11085 = alloca i1, i1 0
- %nop11086 = alloca i1, i1 0
- %nop11087 = alloca i1, i1 0
- %nop11088 = alloca i1, i1 0
- %nop11089 = alloca i1, i1 0
- %nop11090 = alloca i1, i1 0
- %nop11091 = alloca i1, i1 0
- %nop11092 = alloca i1, i1 0
- %nop11093 = alloca i1, i1 0
- %nop11094 = alloca i1, i1 0
- %nop11095 = alloca i1, i1 0
- %nop11096 = alloca i1, i1 0
- %nop11097 = alloca i1, i1 0
- %nop11098 = alloca i1, i1 0
- %nop11099 = alloca i1, i1 0
- %nop11100 = alloca i1, i1 0
- %nop11101 = alloca i1, i1 0
- %nop11102 = alloca i1, i1 0
- %nop11103 = alloca i1, i1 0
- %nop11104 = alloca i1, i1 0
- %nop11105 = alloca i1, i1 0
- %nop11106 = alloca i1, i1 0
- %nop11107 = alloca i1, i1 0
- %nop11108 = alloca i1, i1 0
- %nop11109 = alloca i1, i1 0
- %nop11110 = alloca i1, i1 0
- %nop11111 = alloca i1, i1 0
- %nop11112 = alloca i1, i1 0
- %nop11113 = alloca i1, i1 0
- %nop11114 = alloca i1, i1 0
- %nop11115 = alloca i1, i1 0
- %nop11116 = alloca i1, i1 0
- %nop11117 = alloca i1, i1 0
- %nop11118 = alloca i1, i1 0
- %nop11119 = alloca i1, i1 0
- %nop11120 = alloca i1, i1 0
- %nop11121 = alloca i1, i1 0
- %nop11122 = alloca i1, i1 0
- %nop11123 = alloca i1, i1 0
- %nop11124 = alloca i1, i1 0
- %nop11125 = alloca i1, i1 0
- %nop11126 = alloca i1, i1 0
- %nop11127 = alloca i1, i1 0
- %nop11128 = alloca i1, i1 0
- %nop11129 = alloca i1, i1 0
- %nop11130 = alloca i1, i1 0
- %nop11131 = alloca i1, i1 0
- %nop11132 = alloca i1, i1 0
- %nop11133 = alloca i1, i1 0
- %nop11134 = alloca i1, i1 0
- %nop11135 = alloca i1, i1 0
- %nop11136 = alloca i1, i1 0
- %nop11137 = alloca i1, i1 0
- %nop11138 = alloca i1, i1 0
- %nop11139 = alloca i1, i1 0
- %nop11140 = alloca i1, i1 0
- %nop11141 = alloca i1, i1 0
- %nop11142 = alloca i1, i1 0
- %nop11143 = alloca i1, i1 0
- %nop11144 = alloca i1, i1 0
- %nop11145 = alloca i1, i1 0
- %nop11146 = alloca i1, i1 0
- %nop11147 = alloca i1, i1 0
- %nop11148 = alloca i1, i1 0
- %nop11149 = alloca i1, i1 0
- %nop11150 = alloca i1, i1 0
- %nop11151 = alloca i1, i1 0
- %nop11152 = alloca i1, i1 0
- %nop11153 = alloca i1, i1 0
- %nop11154 = alloca i1, i1 0
- %nop11155 = alloca i1, i1 0
- %nop11156 = alloca i1, i1 0
- %nop11157 = alloca i1, i1 0
- %nop11158 = alloca i1, i1 0
- %nop11159 = alloca i1, i1 0
- %nop11160 = alloca i1, i1 0
- %nop11161 = alloca i1, i1 0
- %nop11162 = alloca i1, i1 0
- %nop11163 = alloca i1, i1 0
- %nop11164 = alloca i1, i1 0
- %nop11165 = alloca i1, i1 0
- %nop11166 = alloca i1, i1 0
- %nop11167 = alloca i1, i1 0
- %nop11168 = alloca i1, i1 0
- %nop11169 = alloca i1, i1 0
- %nop11170 = alloca i1, i1 0
- %nop11171 = alloca i1, i1 0
- %nop11172 = alloca i1, i1 0
- %nop11173 = alloca i1, i1 0
- %nop11174 = alloca i1, i1 0
- %nop11175 = alloca i1, i1 0
- %nop11176 = alloca i1, i1 0
- %nop11177 = alloca i1, i1 0
- %nop11178 = alloca i1, i1 0
- %nop11179 = alloca i1, i1 0
- %nop11180 = alloca i1, i1 0
- %nop11181 = alloca i1, i1 0
- %nop11182 = alloca i1, i1 0
- %nop11183 = alloca i1, i1 0
- %nop11184 = alloca i1, i1 0
- %nop11185 = alloca i1, i1 0
- %nop11186 = alloca i1, i1 0
- %nop11187 = alloca i1, i1 0
- %nop11188 = alloca i1, i1 0
- %nop11189 = alloca i1, i1 0
- %nop11190 = alloca i1, i1 0
- %nop11191 = alloca i1, i1 0
- %nop11192 = alloca i1, i1 0
- %nop11193 = alloca i1, i1 0
- %nop11194 = alloca i1, i1 0
- %nop11195 = alloca i1, i1 0
- %nop11196 = alloca i1, i1 0
- %nop11197 = alloca i1, i1 0
- %nop11198 = alloca i1, i1 0
- %nop11199 = alloca i1, i1 0
- %nop11200 = alloca i1, i1 0
- %nop11201 = alloca i1, i1 0
- %nop11202 = alloca i1, i1 0
- %nop11203 = alloca i1, i1 0
- %nop11204 = alloca i1, i1 0
- %nop11205 = alloca i1, i1 0
- %nop11206 = alloca i1, i1 0
- %nop11207 = alloca i1, i1 0
- %nop11208 = alloca i1, i1 0
- %nop11209 = alloca i1, i1 0
- %nop11210 = alloca i1, i1 0
- %nop11211 = alloca i1, i1 0
- %nop11212 = alloca i1, i1 0
- %nop11213 = alloca i1, i1 0
- %nop11214 = alloca i1, i1 0
- %nop11215 = alloca i1, i1 0
- %nop11216 = alloca i1, i1 0
- %nop11217 = alloca i1, i1 0
- %nop11218 = alloca i1, i1 0
- %nop11219 = alloca i1, i1 0
- %nop11220 = alloca i1, i1 0
- %nop11221 = alloca i1, i1 0
- %nop11222 = alloca i1, i1 0
- %nop11223 = alloca i1, i1 0
- %nop11224 = alloca i1, i1 0
- %nop11225 = alloca i1, i1 0
- %nop11226 = alloca i1, i1 0
- %nop11227 = alloca i1, i1 0
- %nop11228 = alloca i1, i1 0
- %nop11229 = alloca i1, i1 0
- %nop11230 = alloca i1, i1 0
- %nop11231 = alloca i1, i1 0
- %nop11232 = alloca i1, i1 0
- %nop11233 = alloca i1, i1 0
- %nop11234 = alloca i1, i1 0
- %nop11235 = alloca i1, i1 0
- %nop11236 = alloca i1, i1 0
- %nop11237 = alloca i1, i1 0
- %nop11238 = alloca i1, i1 0
- %nop11239 = alloca i1, i1 0
- %nop11240 = alloca i1, i1 0
- %nop11241 = alloca i1, i1 0
- %nop11242 = alloca i1, i1 0
- %nop11243 = alloca i1, i1 0
- %nop11244 = alloca i1, i1 0
- %nop11245 = alloca i1, i1 0
- %nop11246 = alloca i1, i1 0
- %nop11247 = alloca i1, i1 0
- %nop11248 = alloca i1, i1 0
- %nop11249 = alloca i1, i1 0
- %nop11250 = alloca i1, i1 0
- %nop11251 = alloca i1, i1 0
- %nop11252 = alloca i1, i1 0
- %nop11253 = alloca i1, i1 0
- %nop11254 = alloca i1, i1 0
- %nop11255 = alloca i1, i1 0
- %nop11256 = alloca i1, i1 0
- %nop11257 = alloca i1, i1 0
- %nop11258 = alloca i1, i1 0
- %nop11259 = alloca i1, i1 0
- %nop11260 = alloca i1, i1 0
- %nop11261 = alloca i1, i1 0
- %nop11262 = alloca i1, i1 0
- %nop11263 = alloca i1, i1 0
- %nop11264 = alloca i1, i1 0
- %nop11265 = alloca i1, i1 0
- %nop11266 = alloca i1, i1 0
- %nop11267 = alloca i1, i1 0
- %nop11268 = alloca i1, i1 0
- %nop11269 = alloca i1, i1 0
- %nop11270 = alloca i1, i1 0
- %nop11271 = alloca i1, i1 0
- %nop11272 = alloca i1, i1 0
- %nop11273 = alloca i1, i1 0
- %nop11274 = alloca i1, i1 0
- %nop11275 = alloca i1, i1 0
- %nop11276 = alloca i1, i1 0
- %nop11277 = alloca i1, i1 0
- %nop11278 = alloca i1, i1 0
- %nop11279 = alloca i1, i1 0
- %nop11280 = alloca i1, i1 0
- %nop11281 = alloca i1, i1 0
- %nop11282 = alloca i1, i1 0
- %nop11283 = alloca i1, i1 0
- %nop11284 = alloca i1, i1 0
- %nop11285 = alloca i1, i1 0
- %nop11286 = alloca i1, i1 0
- %nop11287 = alloca i1, i1 0
- %nop11288 = alloca i1, i1 0
- %nop11289 = alloca i1, i1 0
- %nop11290 = alloca i1, i1 0
- %nop11291 = alloca i1, i1 0
- %nop11292 = alloca i1, i1 0
- %nop11293 = alloca i1, i1 0
- %nop11294 = alloca i1, i1 0
- %nop11295 = alloca i1, i1 0
- %nop11296 = alloca i1, i1 0
- %nop11297 = alloca i1, i1 0
- %nop11298 = alloca i1, i1 0
- %nop11299 = alloca i1, i1 0
- %nop11300 = alloca i1, i1 0
- %nop11301 = alloca i1, i1 0
- %nop11302 = alloca i1, i1 0
- %nop11303 = alloca i1, i1 0
- %nop11304 = alloca i1, i1 0
- %nop11305 = alloca i1, i1 0
- %nop11306 = alloca i1, i1 0
- %nop11307 = alloca i1, i1 0
- %nop11308 = alloca i1, i1 0
- %nop11309 = alloca i1, i1 0
- %nop11310 = alloca i1, i1 0
- %nop11311 = alloca i1, i1 0
- %nop11312 = alloca i1, i1 0
- %nop11313 = alloca i1, i1 0
- %nop11314 = alloca i1, i1 0
- %nop11315 = alloca i1, i1 0
- %nop11316 = alloca i1, i1 0
- %nop11317 = alloca i1, i1 0
- %nop11318 = alloca i1, i1 0
- %nop11319 = alloca i1, i1 0
- %nop11320 = alloca i1, i1 0
- %nop11321 = alloca i1, i1 0
- %nop11322 = alloca i1, i1 0
- %nop11323 = alloca i1, i1 0
- %nop11324 = alloca i1, i1 0
- %nop11325 = alloca i1, i1 0
- %nop11326 = alloca i1, i1 0
- %nop11327 = alloca i1, i1 0
- %nop11328 = alloca i1, i1 0
- %nop11329 = alloca i1, i1 0
- %nop11330 = alloca i1, i1 0
- %nop11331 = alloca i1, i1 0
- %nop11332 = alloca i1, i1 0
- %nop11333 = alloca i1, i1 0
- %nop11334 = alloca i1, i1 0
- %nop11335 = alloca i1, i1 0
- %nop11336 = alloca i1, i1 0
- %nop11337 = alloca i1, i1 0
- %nop11338 = alloca i1, i1 0
- %nop11339 = alloca i1, i1 0
- %nop11340 = alloca i1, i1 0
- %nop11341 = alloca i1, i1 0
- %nop11342 = alloca i1, i1 0
- %nop11343 = alloca i1, i1 0
- %nop11344 = alloca i1, i1 0
- %nop11345 = alloca i1, i1 0
- %nop11346 = alloca i1, i1 0
- %nop11347 = alloca i1, i1 0
- %nop11348 = alloca i1, i1 0
- %nop11349 = alloca i1, i1 0
- %nop11350 = alloca i1, i1 0
- %nop11351 = alloca i1, i1 0
- %nop11352 = alloca i1, i1 0
- %nop11353 = alloca i1, i1 0
- %nop11354 = alloca i1, i1 0
- %nop11355 = alloca i1, i1 0
- %nop11356 = alloca i1, i1 0
- %nop11357 = alloca i1, i1 0
- %nop11358 = alloca i1, i1 0
- %nop11359 = alloca i1, i1 0
- %nop11360 = alloca i1, i1 0
- %nop11361 = alloca i1, i1 0
- %nop11362 = alloca i1, i1 0
- %nop11363 = alloca i1, i1 0
- %nop11364 = alloca i1, i1 0
- %nop11365 = alloca i1, i1 0
- %nop11366 = alloca i1, i1 0
- %nop11367 = alloca i1, i1 0
- %nop11368 = alloca i1, i1 0
- %nop11369 = alloca i1, i1 0
- %nop11370 = alloca i1, i1 0
- %nop11371 = alloca i1, i1 0
- %nop11372 = alloca i1, i1 0
- %nop11373 = alloca i1, i1 0
- %nop11374 = alloca i1, i1 0
- %nop11375 = alloca i1, i1 0
- %nop11376 = alloca i1, i1 0
- %nop11377 = alloca i1, i1 0
- %nop11378 = alloca i1, i1 0
- %nop11379 = alloca i1, i1 0
- %nop11380 = alloca i1, i1 0
- %nop11381 = alloca i1, i1 0
- %nop11382 = alloca i1, i1 0
- %nop11383 = alloca i1, i1 0
- %nop11384 = alloca i1, i1 0
- %nop11385 = alloca i1, i1 0
- %nop11386 = alloca i1, i1 0
- %nop11387 = alloca i1, i1 0
- %nop11388 = alloca i1, i1 0
- %nop11389 = alloca i1, i1 0
- %nop11390 = alloca i1, i1 0
- %nop11391 = alloca i1, i1 0
- %nop11392 = alloca i1, i1 0
- %nop11393 = alloca i1, i1 0
- %nop11394 = alloca i1, i1 0
- %nop11395 = alloca i1, i1 0
- %nop11396 = alloca i1, i1 0
- %nop11397 = alloca i1, i1 0
- %nop11398 = alloca i1, i1 0
- %nop11399 = alloca i1, i1 0
- %nop11400 = alloca i1, i1 0
- %nop11401 = alloca i1, i1 0
- %nop11402 = alloca i1, i1 0
- %nop11403 = alloca i1, i1 0
- %nop11404 = alloca i1, i1 0
- %nop11405 = alloca i1, i1 0
- %nop11406 = alloca i1, i1 0
- %nop11407 = alloca i1, i1 0
- %nop11408 = alloca i1, i1 0
- %nop11409 = alloca i1, i1 0
- %nop11410 = alloca i1, i1 0
- %nop11411 = alloca i1, i1 0
- %nop11412 = alloca i1, i1 0
- %nop11413 = alloca i1, i1 0
- %nop11414 = alloca i1, i1 0
- %nop11415 = alloca i1, i1 0
- %nop11416 = alloca i1, i1 0
- %nop11417 = alloca i1, i1 0
- %nop11418 = alloca i1, i1 0
- %nop11419 = alloca i1, i1 0
- %nop11420 = alloca i1, i1 0
- %nop11421 = alloca i1, i1 0
- %nop11422 = alloca i1, i1 0
- %nop11423 = alloca i1, i1 0
- %nop11424 = alloca i1, i1 0
- %nop11425 = alloca i1, i1 0
- %nop11426 = alloca i1, i1 0
- %nop11427 = alloca i1, i1 0
- %nop11428 = alloca i1, i1 0
- %nop11429 = alloca i1, i1 0
- %nop11430 = alloca i1, i1 0
- %nop11431 = alloca i1, i1 0
- %nop11432 = alloca i1, i1 0
- %nop11433 = alloca i1, i1 0
- %nop11434 = alloca i1, i1 0
- %nop11435 = alloca i1, i1 0
- %nop11436 = alloca i1, i1 0
- %nop11437 = alloca i1, i1 0
- %nop11438 = alloca i1, i1 0
- %nop11439 = alloca i1, i1 0
- %nop11440 = alloca i1, i1 0
- %nop11441 = alloca i1, i1 0
- %nop11442 = alloca i1, i1 0
- %nop11443 = alloca i1, i1 0
- %nop11444 = alloca i1, i1 0
- %nop11445 = alloca i1, i1 0
- %nop11446 = alloca i1, i1 0
- %nop11447 = alloca i1, i1 0
- %nop11448 = alloca i1, i1 0
- %nop11449 = alloca i1, i1 0
- %nop11450 = alloca i1, i1 0
- %nop11451 = alloca i1, i1 0
- %nop11452 = alloca i1, i1 0
- %nop11453 = alloca i1, i1 0
- %nop11454 = alloca i1, i1 0
- %nop11455 = alloca i1, i1 0
- %nop11456 = alloca i1, i1 0
- %nop11457 = alloca i1, i1 0
- %nop11458 = alloca i1, i1 0
- %nop11459 = alloca i1, i1 0
- %nop11460 = alloca i1, i1 0
- %nop11461 = alloca i1, i1 0
- %nop11462 = alloca i1, i1 0
- %nop11463 = alloca i1, i1 0
- %nop11464 = alloca i1, i1 0
- %nop11465 = alloca i1, i1 0
- %nop11466 = alloca i1, i1 0
- %nop11467 = alloca i1, i1 0
- %nop11468 = alloca i1, i1 0
- %nop11469 = alloca i1, i1 0
- %nop11470 = alloca i1, i1 0
- %nop11471 = alloca i1, i1 0
- %nop11472 = alloca i1, i1 0
- %nop11473 = alloca i1, i1 0
- %nop11474 = alloca i1, i1 0
- %nop11475 = alloca i1, i1 0
- %nop11476 = alloca i1, i1 0
- %nop11477 = alloca i1, i1 0
- %nop11478 = alloca i1, i1 0
- %nop11479 = alloca i1, i1 0
- %nop11480 = alloca i1, i1 0
- %nop11481 = alloca i1, i1 0
- %nop11482 = alloca i1, i1 0
- %nop11483 = alloca i1, i1 0
- %nop11484 = alloca i1, i1 0
- %nop11485 = alloca i1, i1 0
- %nop11486 = alloca i1, i1 0
- %nop11487 = alloca i1, i1 0
- %nop11488 = alloca i1, i1 0
- %nop11489 = alloca i1, i1 0
- %nop11490 = alloca i1, i1 0
- %nop11491 = alloca i1, i1 0
- %nop11492 = alloca i1, i1 0
- %nop11493 = alloca i1, i1 0
- %nop11494 = alloca i1, i1 0
- %nop11495 = alloca i1, i1 0
- %nop11496 = alloca i1, i1 0
- %nop11497 = alloca i1, i1 0
- %nop11498 = alloca i1, i1 0
- %nop11499 = alloca i1, i1 0
- %nop11500 = alloca i1, i1 0
- %nop11501 = alloca i1, i1 0
- %nop11502 = alloca i1, i1 0
- %nop11503 = alloca i1, i1 0
- %nop11504 = alloca i1, i1 0
- %nop11505 = alloca i1, i1 0
- %nop11506 = alloca i1, i1 0
- %nop11507 = alloca i1, i1 0
- %nop11508 = alloca i1, i1 0
- %nop11509 = alloca i1, i1 0
- %nop11510 = alloca i1, i1 0
- %nop11511 = alloca i1, i1 0
- %nop11512 = alloca i1, i1 0
- %nop11513 = alloca i1, i1 0
- %nop11514 = alloca i1, i1 0
- %nop11515 = alloca i1, i1 0
- %nop11516 = alloca i1, i1 0
- %nop11517 = alloca i1, i1 0
- %nop11518 = alloca i1, i1 0
- %nop11519 = alloca i1, i1 0
- %nop11520 = alloca i1, i1 0
- %nop11521 = alloca i1, i1 0
- %nop11522 = alloca i1, i1 0
- %nop11523 = alloca i1, i1 0
- %nop11524 = alloca i1, i1 0
- %nop11525 = alloca i1, i1 0
- %nop11526 = alloca i1, i1 0
- %nop11527 = alloca i1, i1 0
- %nop11528 = alloca i1, i1 0
- %nop11529 = alloca i1, i1 0
- %nop11530 = alloca i1, i1 0
- %nop11531 = alloca i1, i1 0
- %nop11532 = alloca i1, i1 0
- %nop11533 = alloca i1, i1 0
- %nop11534 = alloca i1, i1 0
- %nop11535 = alloca i1, i1 0
- %nop11536 = alloca i1, i1 0
- %nop11537 = alloca i1, i1 0
- %nop11538 = alloca i1, i1 0
- %nop11539 = alloca i1, i1 0
- %nop11540 = alloca i1, i1 0
- %nop11541 = alloca i1, i1 0
- %nop11542 = alloca i1, i1 0
- %nop11543 = alloca i1, i1 0
- %nop11544 = alloca i1, i1 0
- %nop11545 = alloca i1, i1 0
- %nop11546 = alloca i1, i1 0
- %nop11547 = alloca i1, i1 0
- %nop11548 = alloca i1, i1 0
- %nop11549 = alloca i1, i1 0
- %nop11550 = alloca i1, i1 0
- %nop11551 = alloca i1, i1 0
- %nop11552 = alloca i1, i1 0
- %nop11553 = alloca i1, i1 0
- %nop11554 = alloca i1, i1 0
- %nop11555 = alloca i1, i1 0
- %nop11556 = alloca i1, i1 0
- %nop11557 = alloca i1, i1 0
- %nop11558 = alloca i1, i1 0
- %nop11559 = alloca i1, i1 0
- %nop11560 = alloca i1, i1 0
- %nop11561 = alloca i1, i1 0
- %nop11562 = alloca i1, i1 0
- %nop11563 = alloca i1, i1 0
- %nop11564 = alloca i1, i1 0
- %nop11565 = alloca i1, i1 0
- %nop11566 = alloca i1, i1 0
- %nop11567 = alloca i1, i1 0
- %nop11568 = alloca i1, i1 0
- %nop11569 = alloca i1, i1 0
- %nop11570 = alloca i1, i1 0
- %nop11571 = alloca i1, i1 0
- %nop11572 = alloca i1, i1 0
- %nop11573 = alloca i1, i1 0
- %nop11574 = alloca i1, i1 0
- %nop11575 = alloca i1, i1 0
- %nop11576 = alloca i1, i1 0
- %nop11577 = alloca i1, i1 0
- %nop11578 = alloca i1, i1 0
- %nop11579 = alloca i1, i1 0
- %nop11580 = alloca i1, i1 0
- %nop11581 = alloca i1, i1 0
- %nop11582 = alloca i1, i1 0
- %nop11583 = alloca i1, i1 0
- %nop11584 = alloca i1, i1 0
- %nop11585 = alloca i1, i1 0
- %nop11586 = alloca i1, i1 0
- %nop11587 = alloca i1, i1 0
- %nop11588 = alloca i1, i1 0
- %nop11589 = alloca i1, i1 0
- %nop11590 = alloca i1, i1 0
- %nop11591 = alloca i1, i1 0
- %nop11592 = alloca i1, i1 0
- %nop11593 = alloca i1, i1 0
- %nop11594 = alloca i1, i1 0
- %nop11595 = alloca i1, i1 0
- %nop11596 = alloca i1, i1 0
- %nop11597 = alloca i1, i1 0
- %nop11598 = alloca i1, i1 0
- %nop11599 = alloca i1, i1 0
- %nop11600 = alloca i1, i1 0
- %nop11601 = alloca i1, i1 0
- %nop11602 = alloca i1, i1 0
- %nop11603 = alloca i1, i1 0
- %nop11604 = alloca i1, i1 0
- %nop11605 = alloca i1, i1 0
- %nop11606 = alloca i1, i1 0
- %nop11607 = alloca i1, i1 0
- %nop11608 = alloca i1, i1 0
- %nop11609 = alloca i1, i1 0
- %nop11610 = alloca i1, i1 0
- %nop11611 = alloca i1, i1 0
- %nop11612 = alloca i1, i1 0
- %nop11613 = alloca i1, i1 0
- %nop11614 = alloca i1, i1 0
- %nop11615 = alloca i1, i1 0
- %nop11616 = alloca i1, i1 0
- %nop11617 = alloca i1, i1 0
- %nop11618 = alloca i1, i1 0
- %nop11619 = alloca i1, i1 0
- %nop11620 = alloca i1, i1 0
- %nop11621 = alloca i1, i1 0
- %nop11622 = alloca i1, i1 0
- %nop11623 = alloca i1, i1 0
- %nop11624 = alloca i1, i1 0
- %nop11625 = alloca i1, i1 0
- %nop11626 = alloca i1, i1 0
- %nop11627 = alloca i1, i1 0
- %nop11628 = alloca i1, i1 0
- %nop11629 = alloca i1, i1 0
- %nop11630 = alloca i1, i1 0
- %nop11631 = alloca i1, i1 0
- %nop11632 = alloca i1, i1 0
- %nop11633 = alloca i1, i1 0
- %nop11634 = alloca i1, i1 0
- %nop11635 = alloca i1, i1 0
- %nop11636 = alloca i1, i1 0
- %nop11637 = alloca i1, i1 0
- %nop11638 = alloca i1, i1 0
- %nop11639 = alloca i1, i1 0
- %nop11640 = alloca i1, i1 0
- %nop11641 = alloca i1, i1 0
- %nop11642 = alloca i1, i1 0
- %nop11643 = alloca i1, i1 0
- %nop11644 = alloca i1, i1 0
- %nop11645 = alloca i1, i1 0
- %nop11646 = alloca i1, i1 0
- %nop11647 = alloca i1, i1 0
- %nop11648 = alloca i1, i1 0
- %nop11649 = alloca i1, i1 0
- %nop11650 = alloca i1, i1 0
- %nop11651 = alloca i1, i1 0
- %nop11652 = alloca i1, i1 0
- %nop11653 = alloca i1, i1 0
- %nop11654 = alloca i1, i1 0
- %nop11655 = alloca i1, i1 0
- %nop11656 = alloca i1, i1 0
- %nop11657 = alloca i1, i1 0
- %nop11658 = alloca i1, i1 0
- %nop11659 = alloca i1, i1 0
- %nop11660 = alloca i1, i1 0
- %nop11661 = alloca i1, i1 0
- %nop11662 = alloca i1, i1 0
- %nop11663 = alloca i1, i1 0
- %nop11664 = alloca i1, i1 0
- %nop11665 = alloca i1, i1 0
- %nop11666 = alloca i1, i1 0
- %nop11667 = alloca i1, i1 0
- %nop11668 = alloca i1, i1 0
- %nop11669 = alloca i1, i1 0
- %nop11670 = alloca i1, i1 0
- %nop11671 = alloca i1, i1 0
- %nop11672 = alloca i1, i1 0
- %nop11673 = alloca i1, i1 0
- %nop11674 = alloca i1, i1 0
- %nop11675 = alloca i1, i1 0
- %nop11676 = alloca i1, i1 0
- %nop11677 = alloca i1, i1 0
- %nop11678 = alloca i1, i1 0
- %nop11679 = alloca i1, i1 0
- %nop11680 = alloca i1, i1 0
- %nop11681 = alloca i1, i1 0
- %nop11682 = alloca i1, i1 0
- %nop11683 = alloca i1, i1 0
- %nop11684 = alloca i1, i1 0
- %nop11685 = alloca i1, i1 0
- %nop11686 = alloca i1, i1 0
- %nop11687 = alloca i1, i1 0
- %nop11688 = alloca i1, i1 0
- %nop11689 = alloca i1, i1 0
- %nop11690 = alloca i1, i1 0
- %nop11691 = alloca i1, i1 0
- %nop11692 = alloca i1, i1 0
- %nop11693 = alloca i1, i1 0
- %nop11694 = alloca i1, i1 0
- %nop11695 = alloca i1, i1 0
- %nop11696 = alloca i1, i1 0
- %nop11697 = alloca i1, i1 0
- %nop11698 = alloca i1, i1 0
- %nop11699 = alloca i1, i1 0
- %nop11700 = alloca i1, i1 0
- %nop11701 = alloca i1, i1 0
- %nop11702 = alloca i1, i1 0
- %nop11703 = alloca i1, i1 0
- %nop11704 = alloca i1, i1 0
- %nop11705 = alloca i1, i1 0
- %nop11706 = alloca i1, i1 0
- %nop11707 = alloca i1, i1 0
- %nop11708 = alloca i1, i1 0
- %nop11709 = alloca i1, i1 0
- %nop11710 = alloca i1, i1 0
- %nop11711 = alloca i1, i1 0
- %nop11712 = alloca i1, i1 0
- %nop11713 = alloca i1, i1 0
- %nop11714 = alloca i1, i1 0
- %nop11715 = alloca i1, i1 0
- %nop11716 = alloca i1, i1 0
- %nop11717 = alloca i1, i1 0
- %nop11718 = alloca i1, i1 0
- %nop11719 = alloca i1, i1 0
- %nop11720 = alloca i1, i1 0
- %nop11721 = alloca i1, i1 0
- %nop11722 = alloca i1, i1 0
- %nop11723 = alloca i1, i1 0
- %nop11724 = alloca i1, i1 0
- %nop11725 = alloca i1, i1 0
- %nop11726 = alloca i1, i1 0
- %nop11727 = alloca i1, i1 0
- %nop11728 = alloca i1, i1 0
- %nop11729 = alloca i1, i1 0
- %nop11730 = alloca i1, i1 0
- %nop11731 = alloca i1, i1 0
- %nop11732 = alloca i1, i1 0
- %nop11733 = alloca i1, i1 0
- %nop11734 = alloca i1, i1 0
- %nop11735 = alloca i1, i1 0
- %nop11736 = alloca i1, i1 0
- %nop11737 = alloca i1, i1 0
- %nop11738 = alloca i1, i1 0
- %nop11739 = alloca i1, i1 0
- %nop11740 = alloca i1, i1 0
- %nop11741 = alloca i1, i1 0
- %nop11742 = alloca i1, i1 0
- %nop11743 = alloca i1, i1 0
- %nop11744 = alloca i1, i1 0
- %nop11745 = alloca i1, i1 0
- %nop11746 = alloca i1, i1 0
- %nop11747 = alloca i1, i1 0
- %nop11748 = alloca i1, i1 0
- %nop11749 = alloca i1, i1 0
- %nop11750 = alloca i1, i1 0
- %nop11751 = alloca i1, i1 0
- %nop11752 = alloca i1, i1 0
- %nop11753 = alloca i1, i1 0
- %nop11754 = alloca i1, i1 0
- %nop11755 = alloca i1, i1 0
- %nop11756 = alloca i1, i1 0
- %nop11757 = alloca i1, i1 0
- %nop11758 = alloca i1, i1 0
- %nop11759 = alloca i1, i1 0
- %nop11760 = alloca i1, i1 0
- %nop11761 = alloca i1, i1 0
- %nop11762 = alloca i1, i1 0
- %nop11763 = alloca i1, i1 0
- %nop11764 = alloca i1, i1 0
- %nop11765 = alloca i1, i1 0
- %nop11766 = alloca i1, i1 0
- %nop11767 = alloca i1, i1 0
- %nop11768 = alloca i1, i1 0
- %nop11769 = alloca i1, i1 0
- %nop11770 = alloca i1, i1 0
- %nop11771 = alloca i1, i1 0
- %nop11772 = alloca i1, i1 0
- %nop11773 = alloca i1, i1 0
- %nop11774 = alloca i1, i1 0
- %nop11775 = alloca i1, i1 0
- %nop11776 = alloca i1, i1 0
- %nop11777 = alloca i1, i1 0
- %nop11778 = alloca i1, i1 0
- %nop11779 = alloca i1, i1 0
- %nop11780 = alloca i1, i1 0
- %nop11781 = alloca i1, i1 0
- %nop11782 = alloca i1, i1 0
- %nop11783 = alloca i1, i1 0
- %nop11784 = alloca i1, i1 0
- %nop11785 = alloca i1, i1 0
- %nop11786 = alloca i1, i1 0
- %nop11787 = alloca i1, i1 0
- %nop11788 = alloca i1, i1 0
- %nop11789 = alloca i1, i1 0
- %nop11790 = alloca i1, i1 0
- %nop11791 = alloca i1, i1 0
- %nop11792 = alloca i1, i1 0
- %nop11793 = alloca i1, i1 0
- %nop11794 = alloca i1, i1 0
- %nop11795 = alloca i1, i1 0
- %nop11796 = alloca i1, i1 0
- %nop11797 = alloca i1, i1 0
- %nop11798 = alloca i1, i1 0
- %nop11799 = alloca i1, i1 0
- %nop11800 = alloca i1, i1 0
- %nop11801 = alloca i1, i1 0
- %nop11802 = alloca i1, i1 0
- %nop11803 = alloca i1, i1 0
- %nop11804 = alloca i1, i1 0
- %nop11805 = alloca i1, i1 0
- %nop11806 = alloca i1, i1 0
- %nop11807 = alloca i1, i1 0
- %nop11808 = alloca i1, i1 0
- %nop11809 = alloca i1, i1 0
- %nop11810 = alloca i1, i1 0
- %nop11811 = alloca i1, i1 0
- %nop11812 = alloca i1, i1 0
- %nop11813 = alloca i1, i1 0
- %nop11814 = alloca i1, i1 0
- %nop11815 = alloca i1, i1 0
- %nop11816 = alloca i1, i1 0
- %nop11817 = alloca i1, i1 0
- %nop11818 = alloca i1, i1 0
- %nop11819 = alloca i1, i1 0
- %nop11820 = alloca i1, i1 0
- %nop11821 = alloca i1, i1 0
- %nop11822 = alloca i1, i1 0
- %nop11823 = alloca i1, i1 0
- %nop11824 = alloca i1, i1 0
- %nop11825 = alloca i1, i1 0
- %nop11826 = alloca i1, i1 0
- %nop11827 = alloca i1, i1 0
- %nop11828 = alloca i1, i1 0
- %nop11829 = alloca i1, i1 0
- %nop11830 = alloca i1, i1 0
- %nop11831 = alloca i1, i1 0
- %nop11832 = alloca i1, i1 0
- %nop11833 = alloca i1, i1 0
- %nop11834 = alloca i1, i1 0
- %nop11835 = alloca i1, i1 0
- %nop11836 = alloca i1, i1 0
- %nop11837 = alloca i1, i1 0
- %nop11838 = alloca i1, i1 0
- %nop11839 = alloca i1, i1 0
- %nop11840 = alloca i1, i1 0
- %nop11841 = alloca i1, i1 0
- %nop11842 = alloca i1, i1 0
- %nop11843 = alloca i1, i1 0
- %nop11844 = alloca i1, i1 0
- %nop11845 = alloca i1, i1 0
- %nop11846 = alloca i1, i1 0
- %nop11847 = alloca i1, i1 0
- %nop11848 = alloca i1, i1 0
- %nop11849 = alloca i1, i1 0
- %nop11850 = alloca i1, i1 0
- %nop11851 = alloca i1, i1 0
- %nop11852 = alloca i1, i1 0
- %nop11853 = alloca i1, i1 0
- %nop11854 = alloca i1, i1 0
- %nop11855 = alloca i1, i1 0
- %nop11856 = alloca i1, i1 0
- %nop11857 = alloca i1, i1 0
- %nop11858 = alloca i1, i1 0
- %nop11859 = alloca i1, i1 0
- %nop11860 = alloca i1, i1 0
- %nop11861 = alloca i1, i1 0
- %nop11862 = alloca i1, i1 0
- %nop11863 = alloca i1, i1 0
- %nop11864 = alloca i1, i1 0
- %nop11865 = alloca i1, i1 0
- %nop11866 = alloca i1, i1 0
- %nop11867 = alloca i1, i1 0
- %nop11868 = alloca i1, i1 0
- %nop11869 = alloca i1, i1 0
- %nop11870 = alloca i1, i1 0
- %nop11871 = alloca i1, i1 0
- %nop11872 = alloca i1, i1 0
- %nop11873 = alloca i1, i1 0
- %nop11874 = alloca i1, i1 0
- %nop11875 = alloca i1, i1 0
- %nop11876 = alloca i1, i1 0
- %nop11877 = alloca i1, i1 0
- %nop11878 = alloca i1, i1 0
- %nop11879 = alloca i1, i1 0
- %nop11880 = alloca i1, i1 0
- %nop11881 = alloca i1, i1 0
- %nop11882 = alloca i1, i1 0
- %nop11883 = alloca i1, i1 0
- %nop11884 = alloca i1, i1 0
- %nop11885 = alloca i1, i1 0
- %nop11886 = alloca i1, i1 0
- %nop11887 = alloca i1, i1 0
- %nop11888 = alloca i1, i1 0
- %nop11889 = alloca i1, i1 0
- %nop11890 = alloca i1, i1 0
- %nop11891 = alloca i1, i1 0
- %nop11892 = alloca i1, i1 0
- %nop11893 = alloca i1, i1 0
- %nop11894 = alloca i1, i1 0
- %nop11895 = alloca i1, i1 0
- %nop11896 = alloca i1, i1 0
- %nop11897 = alloca i1, i1 0
- %nop11898 = alloca i1, i1 0
- %nop11899 = alloca i1, i1 0
- %nop11900 = alloca i1, i1 0
- %nop11901 = alloca i1, i1 0
- %nop11902 = alloca i1, i1 0
- %nop11903 = alloca i1, i1 0
- %nop11904 = alloca i1, i1 0
- %nop11905 = alloca i1, i1 0
- %nop11906 = alloca i1, i1 0
- %nop11907 = alloca i1, i1 0
- %nop11908 = alloca i1, i1 0
- %nop11909 = alloca i1, i1 0
- %nop11910 = alloca i1, i1 0
- %nop11911 = alloca i1, i1 0
- %nop11912 = alloca i1, i1 0
- %nop11913 = alloca i1, i1 0
- %nop11914 = alloca i1, i1 0
- %nop11915 = alloca i1, i1 0
- %nop11916 = alloca i1, i1 0
- %nop11917 = alloca i1, i1 0
- %nop11918 = alloca i1, i1 0
- %nop11919 = alloca i1, i1 0
- %nop11920 = alloca i1, i1 0
- %nop11921 = alloca i1, i1 0
- %nop11922 = alloca i1, i1 0
- %nop11923 = alloca i1, i1 0
- %nop11924 = alloca i1, i1 0
- %nop11925 = alloca i1, i1 0
- %nop11926 = alloca i1, i1 0
- %nop11927 = alloca i1, i1 0
- %nop11928 = alloca i1, i1 0
- %nop11929 = alloca i1, i1 0
- %nop11930 = alloca i1, i1 0
- %nop11931 = alloca i1, i1 0
- %nop11932 = alloca i1, i1 0
- %nop11933 = alloca i1, i1 0
- %nop11934 = alloca i1, i1 0
- %nop11935 = alloca i1, i1 0
- %nop11936 = alloca i1, i1 0
- %nop11937 = alloca i1, i1 0
- %nop11938 = alloca i1, i1 0
- %nop11939 = alloca i1, i1 0
- %nop11940 = alloca i1, i1 0
- %nop11941 = alloca i1, i1 0
- %nop11942 = alloca i1, i1 0
- %nop11943 = alloca i1, i1 0
- %nop11944 = alloca i1, i1 0
- %nop11945 = alloca i1, i1 0
- %nop11946 = alloca i1, i1 0
- %nop11947 = alloca i1, i1 0
- %nop11948 = alloca i1, i1 0
- %nop11949 = alloca i1, i1 0
- %nop11950 = alloca i1, i1 0
- %nop11951 = alloca i1, i1 0
- %nop11952 = alloca i1, i1 0
- %nop11953 = alloca i1, i1 0
- %nop11954 = alloca i1, i1 0
- %nop11955 = alloca i1, i1 0
- %nop11956 = alloca i1, i1 0
- %nop11957 = alloca i1, i1 0
- %nop11958 = alloca i1, i1 0
- %nop11959 = alloca i1, i1 0
- %nop11960 = alloca i1, i1 0
- %nop11961 = alloca i1, i1 0
- %nop11962 = alloca i1, i1 0
- %nop11963 = alloca i1, i1 0
- %nop11964 = alloca i1, i1 0
- %nop11965 = alloca i1, i1 0
- %nop11966 = alloca i1, i1 0
- %nop11967 = alloca i1, i1 0
- %nop11968 = alloca i1, i1 0
- %nop11969 = alloca i1, i1 0
- %nop11970 = alloca i1, i1 0
- %nop11971 = alloca i1, i1 0
- %nop11972 = alloca i1, i1 0
- %nop11973 = alloca i1, i1 0
- %nop11974 = alloca i1, i1 0
- %nop11975 = alloca i1, i1 0
- %nop11976 = alloca i1, i1 0
- %nop11977 = alloca i1, i1 0
- %nop11978 = alloca i1, i1 0
- %nop11979 = alloca i1, i1 0
- %nop11980 = alloca i1, i1 0
- %nop11981 = alloca i1, i1 0
- %nop11982 = alloca i1, i1 0
- %nop11983 = alloca i1, i1 0
- %nop11984 = alloca i1, i1 0
- %nop11985 = alloca i1, i1 0
- %nop11986 = alloca i1, i1 0
- %nop11987 = alloca i1, i1 0
- %nop11988 = alloca i1, i1 0
- %nop11989 = alloca i1, i1 0
- %nop11990 = alloca i1, i1 0
- %nop11991 = alloca i1, i1 0
- %nop11992 = alloca i1, i1 0
- %nop11993 = alloca i1, i1 0
- %nop11994 = alloca i1, i1 0
- %nop11995 = alloca i1, i1 0
- %nop11996 = alloca i1, i1 0
- %nop11997 = alloca i1, i1 0
- %nop11998 = alloca i1, i1 0
- %nop11999 = alloca i1, i1 0
- %nop12000 = alloca i1, i1 0
- %nop12001 = alloca i1, i1 0
- %nop12002 = alloca i1, i1 0
- %nop12003 = alloca i1, i1 0
- %nop12004 = alloca i1, i1 0
- %nop12005 = alloca i1, i1 0
- %nop12006 = alloca i1, i1 0
- %nop12007 = alloca i1, i1 0
- %nop12008 = alloca i1, i1 0
- %nop12009 = alloca i1, i1 0
- %nop12010 = alloca i1, i1 0
- %nop12011 = alloca i1, i1 0
- %nop12012 = alloca i1, i1 0
- %nop12013 = alloca i1, i1 0
- %nop12014 = alloca i1, i1 0
- %nop12015 = alloca i1, i1 0
- %nop12016 = alloca i1, i1 0
- %nop12017 = alloca i1, i1 0
- %nop12018 = alloca i1, i1 0
- %nop12019 = alloca i1, i1 0
- %nop12020 = alloca i1, i1 0
- %nop12021 = alloca i1, i1 0
- %nop12022 = alloca i1, i1 0
- %nop12023 = alloca i1, i1 0
- %nop12024 = alloca i1, i1 0
- %nop12025 = alloca i1, i1 0
- %nop12026 = alloca i1, i1 0
- %nop12027 = alloca i1, i1 0
- %nop12028 = alloca i1, i1 0
- %nop12029 = alloca i1, i1 0
- %nop12030 = alloca i1, i1 0
- %nop12031 = alloca i1, i1 0
- %nop12032 = alloca i1, i1 0
- %nop12033 = alloca i1, i1 0
- %nop12034 = alloca i1, i1 0
- %nop12035 = alloca i1, i1 0
- %nop12036 = alloca i1, i1 0
- %nop12037 = alloca i1, i1 0
- %nop12038 = alloca i1, i1 0
- %nop12039 = alloca i1, i1 0
- %nop12040 = alloca i1, i1 0
- %nop12041 = alloca i1, i1 0
- %nop12042 = alloca i1, i1 0
- %nop12043 = alloca i1, i1 0
- %nop12044 = alloca i1, i1 0
- %nop12045 = alloca i1, i1 0
- %nop12046 = alloca i1, i1 0
- %nop12047 = alloca i1, i1 0
- %nop12048 = alloca i1, i1 0
- %nop12049 = alloca i1, i1 0
- %nop12050 = alloca i1, i1 0
- %nop12051 = alloca i1, i1 0
- %nop12052 = alloca i1, i1 0
- %nop12053 = alloca i1, i1 0
- %nop12054 = alloca i1, i1 0
- %nop12055 = alloca i1, i1 0
- %nop12056 = alloca i1, i1 0
- %nop12057 = alloca i1, i1 0
- %nop12058 = alloca i1, i1 0
- %nop12059 = alloca i1, i1 0
- %nop12060 = alloca i1, i1 0
- %nop12061 = alloca i1, i1 0
- %nop12062 = alloca i1, i1 0
- %nop12063 = alloca i1, i1 0
- %nop12064 = alloca i1, i1 0
- %nop12065 = alloca i1, i1 0
- %nop12066 = alloca i1, i1 0
- %nop12067 = alloca i1, i1 0
- %nop12068 = alloca i1, i1 0
- %nop12069 = alloca i1, i1 0
- %nop12070 = alloca i1, i1 0
- %nop12071 = alloca i1, i1 0
- %nop12072 = alloca i1, i1 0
- %nop12073 = alloca i1, i1 0
- %nop12074 = alloca i1, i1 0
- %nop12075 = alloca i1, i1 0
- %nop12076 = alloca i1, i1 0
- %nop12077 = alloca i1, i1 0
- %nop12078 = alloca i1, i1 0
- %nop12079 = alloca i1, i1 0
- %nop12080 = alloca i1, i1 0
- %nop12081 = alloca i1, i1 0
- %nop12082 = alloca i1, i1 0
- %nop12083 = alloca i1, i1 0
- %nop12084 = alloca i1, i1 0
- %nop12085 = alloca i1, i1 0
- %nop12086 = alloca i1, i1 0
- %nop12087 = alloca i1, i1 0
- %nop12088 = alloca i1, i1 0
- %nop12089 = alloca i1, i1 0
- %nop12090 = alloca i1, i1 0
- %nop12091 = alloca i1, i1 0
- %nop12092 = alloca i1, i1 0
- %nop12093 = alloca i1, i1 0
- %nop12094 = alloca i1, i1 0
- %nop12095 = alloca i1, i1 0
- %nop12096 = alloca i1, i1 0
- %nop12097 = alloca i1, i1 0
- %nop12098 = alloca i1, i1 0
- %nop12099 = alloca i1, i1 0
- %nop12100 = alloca i1, i1 0
- %nop12101 = alloca i1, i1 0
- %nop12102 = alloca i1, i1 0
- %nop12103 = alloca i1, i1 0
- %nop12104 = alloca i1, i1 0
- %nop12105 = alloca i1, i1 0
- %nop12106 = alloca i1, i1 0
- %nop12107 = alloca i1, i1 0
- %nop12108 = alloca i1, i1 0
- %nop12109 = alloca i1, i1 0
- %nop12110 = alloca i1, i1 0
- %nop12111 = alloca i1, i1 0
- %nop12112 = alloca i1, i1 0
- %nop12113 = alloca i1, i1 0
- %nop12114 = alloca i1, i1 0
- %nop12115 = alloca i1, i1 0
- %nop12116 = alloca i1, i1 0
- %nop12117 = alloca i1, i1 0
- %nop12118 = alloca i1, i1 0
- %nop12119 = alloca i1, i1 0
- %nop12120 = alloca i1, i1 0
- %nop12121 = alloca i1, i1 0
- %nop12122 = alloca i1, i1 0
- %nop12123 = alloca i1, i1 0
- %nop12124 = alloca i1, i1 0
- %nop12125 = alloca i1, i1 0
- %nop12126 = alloca i1, i1 0
- %nop12127 = alloca i1, i1 0
- %nop12128 = alloca i1, i1 0
- %nop12129 = alloca i1, i1 0
- %nop12130 = alloca i1, i1 0
- %nop12131 = alloca i1, i1 0
- %nop12132 = alloca i1, i1 0
- %nop12133 = alloca i1, i1 0
- %nop12134 = alloca i1, i1 0
- %nop12135 = alloca i1, i1 0
- %nop12136 = alloca i1, i1 0
- %nop12137 = alloca i1, i1 0
- %nop12138 = alloca i1, i1 0
- %nop12139 = alloca i1, i1 0
- %nop12140 = alloca i1, i1 0
- %nop12141 = alloca i1, i1 0
- %nop12142 = alloca i1, i1 0
- %nop12143 = alloca i1, i1 0
- %nop12144 = alloca i1, i1 0
- %nop12145 = alloca i1, i1 0
- %nop12146 = alloca i1, i1 0
- %nop12147 = alloca i1, i1 0
- %nop12148 = alloca i1, i1 0
- %nop12149 = alloca i1, i1 0
- %nop12150 = alloca i1, i1 0
- %nop12151 = alloca i1, i1 0
- %nop12152 = alloca i1, i1 0
- %nop12153 = alloca i1, i1 0
- %nop12154 = alloca i1, i1 0
- %nop12155 = alloca i1, i1 0
- %nop12156 = alloca i1, i1 0
- %nop12157 = alloca i1, i1 0
- %nop12158 = alloca i1, i1 0
- %nop12159 = alloca i1, i1 0
- %nop12160 = alloca i1, i1 0
- %nop12161 = alloca i1, i1 0
- %nop12162 = alloca i1, i1 0
- %nop12163 = alloca i1, i1 0
- %nop12164 = alloca i1, i1 0
- %nop12165 = alloca i1, i1 0
- %nop12166 = alloca i1, i1 0
- %nop12167 = alloca i1, i1 0
- %nop12168 = alloca i1, i1 0
- %nop12169 = alloca i1, i1 0
- %nop12170 = alloca i1, i1 0
- %nop12171 = alloca i1, i1 0
- %nop12172 = alloca i1, i1 0
- %nop12173 = alloca i1, i1 0
- %nop12174 = alloca i1, i1 0
- %nop12175 = alloca i1, i1 0
- %nop12176 = alloca i1, i1 0
- %nop12177 = alloca i1, i1 0
- %nop12178 = alloca i1, i1 0
- %nop12179 = alloca i1, i1 0
- %nop12180 = alloca i1, i1 0
- %nop12181 = alloca i1, i1 0
- %nop12182 = alloca i1, i1 0
- %nop12183 = alloca i1, i1 0
- %nop12184 = alloca i1, i1 0
- %nop12185 = alloca i1, i1 0
- %nop12186 = alloca i1, i1 0
- %nop12187 = alloca i1, i1 0
- %nop12188 = alloca i1, i1 0
- %nop12189 = alloca i1, i1 0
- %nop12190 = alloca i1, i1 0
- %nop12191 = alloca i1, i1 0
- %nop12192 = alloca i1, i1 0
- %nop12193 = alloca i1, i1 0
- %nop12194 = alloca i1, i1 0
- %nop12195 = alloca i1, i1 0
- %nop12196 = alloca i1, i1 0
- %nop12197 = alloca i1, i1 0
- %nop12198 = alloca i1, i1 0
- %nop12199 = alloca i1, i1 0
- %nop12200 = alloca i1, i1 0
- %nop12201 = alloca i1, i1 0
- %nop12202 = alloca i1, i1 0
- %nop12203 = alloca i1, i1 0
- %nop12204 = alloca i1, i1 0
- %nop12205 = alloca i1, i1 0
- %nop12206 = alloca i1, i1 0
- %nop12207 = alloca i1, i1 0
- %nop12208 = alloca i1, i1 0
- %nop12209 = alloca i1, i1 0
- %nop12210 = alloca i1, i1 0
- %nop12211 = alloca i1, i1 0
- %nop12212 = alloca i1, i1 0
- %nop12213 = alloca i1, i1 0
- %nop12214 = alloca i1, i1 0
- %nop12215 = alloca i1, i1 0
- %nop12216 = alloca i1, i1 0
- %nop12217 = alloca i1, i1 0
- %nop12218 = alloca i1, i1 0
- %nop12219 = alloca i1, i1 0
- %nop12220 = alloca i1, i1 0
- %nop12221 = alloca i1, i1 0
- %nop12222 = alloca i1, i1 0
- %nop12223 = alloca i1, i1 0
- %nop12224 = alloca i1, i1 0
- %nop12225 = alloca i1, i1 0
- %nop12226 = alloca i1, i1 0
- %nop12227 = alloca i1, i1 0
- %nop12228 = alloca i1, i1 0
- %nop12229 = alloca i1, i1 0
- %nop12230 = alloca i1, i1 0
- %nop12231 = alloca i1, i1 0
- %nop12232 = alloca i1, i1 0
- %nop12233 = alloca i1, i1 0
- %nop12234 = alloca i1, i1 0
- %nop12235 = alloca i1, i1 0
- %nop12236 = alloca i1, i1 0
- %nop12237 = alloca i1, i1 0
- %nop12238 = alloca i1, i1 0
- %nop12239 = alloca i1, i1 0
- %nop12240 = alloca i1, i1 0
- %nop12241 = alloca i1, i1 0
- %nop12242 = alloca i1, i1 0
- %nop12243 = alloca i1, i1 0
- %nop12244 = alloca i1, i1 0
- %nop12245 = alloca i1, i1 0
- %nop12246 = alloca i1, i1 0
- %nop12247 = alloca i1, i1 0
- %nop12248 = alloca i1, i1 0
- %nop12249 = alloca i1, i1 0
- %nop12250 = alloca i1, i1 0
- %nop12251 = alloca i1, i1 0
- %nop12252 = alloca i1, i1 0
- %nop12253 = alloca i1, i1 0
- %nop12254 = alloca i1, i1 0
- %nop12255 = alloca i1, i1 0
- %nop12256 = alloca i1, i1 0
- %nop12257 = alloca i1, i1 0
- %nop12258 = alloca i1, i1 0
- %nop12259 = alloca i1, i1 0
- %nop12260 = alloca i1, i1 0
- %nop12261 = alloca i1, i1 0
- %nop12262 = alloca i1, i1 0
- %nop12263 = alloca i1, i1 0
- %nop12264 = alloca i1, i1 0
- %nop12265 = alloca i1, i1 0
- %nop12266 = alloca i1, i1 0
- %nop12267 = alloca i1, i1 0
- %nop12268 = alloca i1, i1 0
- %nop12269 = alloca i1, i1 0
- %nop12270 = alloca i1, i1 0
- %nop12271 = alloca i1, i1 0
- %nop12272 = alloca i1, i1 0
- %nop12273 = alloca i1, i1 0
- %nop12274 = alloca i1, i1 0
- %nop12275 = alloca i1, i1 0
- %nop12276 = alloca i1, i1 0
- %nop12277 = alloca i1, i1 0
- %nop12278 = alloca i1, i1 0
- %nop12279 = alloca i1, i1 0
- %nop12280 = alloca i1, i1 0
- %nop12281 = alloca i1, i1 0
- %nop12282 = alloca i1, i1 0
- %nop12283 = alloca i1, i1 0
- %nop12284 = alloca i1, i1 0
- %nop12285 = alloca i1, i1 0
- %nop12286 = alloca i1, i1 0
- %nop12287 = alloca i1, i1 0
- %nop12288 = alloca i1, i1 0
- %nop12289 = alloca i1, i1 0
- %nop12290 = alloca i1, i1 0
- %nop12291 = alloca i1, i1 0
- %nop12292 = alloca i1, i1 0
- %nop12293 = alloca i1, i1 0
- %nop12294 = alloca i1, i1 0
- %nop12295 = alloca i1, i1 0
- %nop12296 = alloca i1, i1 0
- %nop12297 = alloca i1, i1 0
- %nop12298 = alloca i1, i1 0
- %nop12299 = alloca i1, i1 0
- %nop12300 = alloca i1, i1 0
- %nop12301 = alloca i1, i1 0
- %nop12302 = alloca i1, i1 0
- %nop12303 = alloca i1, i1 0
- %nop12304 = alloca i1, i1 0
- %nop12305 = alloca i1, i1 0
- %nop12306 = alloca i1, i1 0
- %nop12307 = alloca i1, i1 0
- %nop12308 = alloca i1, i1 0
- %nop12309 = alloca i1, i1 0
- %nop12310 = alloca i1, i1 0
- %nop12311 = alloca i1, i1 0
- %nop12312 = alloca i1, i1 0
- %nop12313 = alloca i1, i1 0
- %nop12314 = alloca i1, i1 0
- %nop12315 = alloca i1, i1 0
- %nop12316 = alloca i1, i1 0
- %nop12317 = alloca i1, i1 0
- %nop12318 = alloca i1, i1 0
- %nop12319 = alloca i1, i1 0
- %nop12320 = alloca i1, i1 0
- %nop12321 = alloca i1, i1 0
- %nop12322 = alloca i1, i1 0
- %nop12323 = alloca i1, i1 0
- %nop12324 = alloca i1, i1 0
- %nop12325 = alloca i1, i1 0
- %nop12326 = alloca i1, i1 0
- %nop12327 = alloca i1, i1 0
- %nop12328 = alloca i1, i1 0
- %nop12329 = alloca i1, i1 0
- %nop12330 = alloca i1, i1 0
- %nop12331 = alloca i1, i1 0
- %nop12332 = alloca i1, i1 0
- %nop12333 = alloca i1, i1 0
- %nop12334 = alloca i1, i1 0
- %nop12335 = alloca i1, i1 0
- %nop12336 = alloca i1, i1 0
- %nop12337 = alloca i1, i1 0
- %nop12338 = alloca i1, i1 0
- %nop12339 = alloca i1, i1 0
- %nop12340 = alloca i1, i1 0
- %nop12341 = alloca i1, i1 0
- %nop12342 = alloca i1, i1 0
- %nop12343 = alloca i1, i1 0
- %nop12344 = alloca i1, i1 0
- %nop12345 = alloca i1, i1 0
- %nop12346 = alloca i1, i1 0
- %nop12347 = alloca i1, i1 0
- %nop12348 = alloca i1, i1 0
- %nop12349 = alloca i1, i1 0
- %nop12350 = alloca i1, i1 0
- %nop12351 = alloca i1, i1 0
- %nop12352 = alloca i1, i1 0
- %nop12353 = alloca i1, i1 0
- %nop12354 = alloca i1, i1 0
- %nop12355 = alloca i1, i1 0
- %nop12356 = alloca i1, i1 0
- %nop12357 = alloca i1, i1 0
- %nop12358 = alloca i1, i1 0
- %nop12359 = alloca i1, i1 0
- %nop12360 = alloca i1, i1 0
- %nop12361 = alloca i1, i1 0
- %nop12362 = alloca i1, i1 0
- %nop12363 = alloca i1, i1 0
- %nop12364 = alloca i1, i1 0
- %nop12365 = alloca i1, i1 0
- %nop12366 = alloca i1, i1 0
- %nop12367 = alloca i1, i1 0
- %nop12368 = alloca i1, i1 0
- %nop12369 = alloca i1, i1 0
- %nop12370 = alloca i1, i1 0
- %nop12371 = alloca i1, i1 0
- %nop12372 = alloca i1, i1 0
- %nop12373 = alloca i1, i1 0
- %nop12374 = alloca i1, i1 0
- %nop12375 = alloca i1, i1 0
- %nop12376 = alloca i1, i1 0
- %nop12377 = alloca i1, i1 0
- %nop12378 = alloca i1, i1 0
- %nop12379 = alloca i1, i1 0
- %nop12380 = alloca i1, i1 0
- %nop12381 = alloca i1, i1 0
- %nop12382 = alloca i1, i1 0
- %nop12383 = alloca i1, i1 0
- %nop12384 = alloca i1, i1 0
- %nop12385 = alloca i1, i1 0
- %nop12386 = alloca i1, i1 0
- %nop12387 = alloca i1, i1 0
- %nop12388 = alloca i1, i1 0
- %nop12389 = alloca i1, i1 0
- %nop12390 = alloca i1, i1 0
- %nop12391 = alloca i1, i1 0
- %nop12392 = alloca i1, i1 0
- %nop12393 = alloca i1, i1 0
- %nop12394 = alloca i1, i1 0
- %nop12395 = alloca i1, i1 0
- %nop12396 = alloca i1, i1 0
- %nop12397 = alloca i1, i1 0
- %nop12398 = alloca i1, i1 0
- %nop12399 = alloca i1, i1 0
- %nop12400 = alloca i1, i1 0
- %nop12401 = alloca i1, i1 0
- %nop12402 = alloca i1, i1 0
- %nop12403 = alloca i1, i1 0
- %nop12404 = alloca i1, i1 0
- %nop12405 = alloca i1, i1 0
- %nop12406 = alloca i1, i1 0
- %nop12407 = alloca i1, i1 0
- %nop12408 = alloca i1, i1 0
- %nop12409 = alloca i1, i1 0
- %nop12410 = alloca i1, i1 0
- %nop12411 = alloca i1, i1 0
- %nop12412 = alloca i1, i1 0
- %nop12413 = alloca i1, i1 0
- %nop12414 = alloca i1, i1 0
- %nop12415 = alloca i1, i1 0
- %nop12416 = alloca i1, i1 0
- %nop12417 = alloca i1, i1 0
- %nop12418 = alloca i1, i1 0
- %nop12419 = alloca i1, i1 0
- %nop12420 = alloca i1, i1 0
- %nop12421 = alloca i1, i1 0
- %nop12422 = alloca i1, i1 0
- %nop12423 = alloca i1, i1 0
- %nop12424 = alloca i1, i1 0
- %nop12425 = alloca i1, i1 0
- %nop12426 = alloca i1, i1 0
- %nop12427 = alloca i1, i1 0
- %nop12428 = alloca i1, i1 0
- %nop12429 = alloca i1, i1 0
- %nop12430 = alloca i1, i1 0
- %nop12431 = alloca i1, i1 0
- %nop12432 = alloca i1, i1 0
- %nop12433 = alloca i1, i1 0
- %nop12434 = alloca i1, i1 0
- %nop12435 = alloca i1, i1 0
- %nop12436 = alloca i1, i1 0
- %nop12437 = alloca i1, i1 0
- %nop12438 = alloca i1, i1 0
- %nop12439 = alloca i1, i1 0
- %nop12440 = alloca i1, i1 0
- %nop12441 = alloca i1, i1 0
- %nop12442 = alloca i1, i1 0
- %nop12443 = alloca i1, i1 0
- %nop12444 = alloca i1, i1 0
- %nop12445 = alloca i1, i1 0
- %nop12446 = alloca i1, i1 0
- %nop12447 = alloca i1, i1 0
- %nop12448 = alloca i1, i1 0
- %nop12449 = alloca i1, i1 0
- %nop12450 = alloca i1, i1 0
- %nop12451 = alloca i1, i1 0
- %nop12452 = alloca i1, i1 0
- %nop12453 = alloca i1, i1 0
- %nop12454 = alloca i1, i1 0
- %nop12455 = alloca i1, i1 0
- %nop12456 = alloca i1, i1 0
- %nop12457 = alloca i1, i1 0
- %nop12458 = alloca i1, i1 0
- %nop12459 = alloca i1, i1 0
- %nop12460 = alloca i1, i1 0
- %nop12461 = alloca i1, i1 0
- %nop12462 = alloca i1, i1 0
- %nop12463 = alloca i1, i1 0
- %nop12464 = alloca i1, i1 0
- %nop12465 = alloca i1, i1 0
- %nop12466 = alloca i1, i1 0
- %nop12467 = alloca i1, i1 0
- %nop12468 = alloca i1, i1 0
- %nop12469 = alloca i1, i1 0
- %nop12470 = alloca i1, i1 0
- %nop12471 = alloca i1, i1 0
- %nop12472 = alloca i1, i1 0
- %nop12473 = alloca i1, i1 0
- %nop12474 = alloca i1, i1 0
- %nop12475 = alloca i1, i1 0
- %nop12476 = alloca i1, i1 0
- %nop12477 = alloca i1, i1 0
- %nop12478 = alloca i1, i1 0
- %nop12479 = alloca i1, i1 0
- %nop12480 = alloca i1, i1 0
- %nop12481 = alloca i1, i1 0
- %nop12482 = alloca i1, i1 0
- %nop12483 = alloca i1, i1 0
- %nop12484 = alloca i1, i1 0
- %nop12485 = alloca i1, i1 0
- %nop12486 = alloca i1, i1 0
- %nop12487 = alloca i1, i1 0
- %nop12488 = alloca i1, i1 0
- %nop12489 = alloca i1, i1 0
- %nop12490 = alloca i1, i1 0
- %nop12491 = alloca i1, i1 0
- %nop12492 = alloca i1, i1 0
- %nop12493 = alloca i1, i1 0
- %nop12494 = alloca i1, i1 0
- %nop12495 = alloca i1, i1 0
- %nop12496 = alloca i1, i1 0
- %nop12497 = alloca i1, i1 0
- %nop12498 = alloca i1, i1 0
- %nop12499 = alloca i1, i1 0
- %nop12500 = alloca i1, i1 0
- %nop12501 = alloca i1, i1 0
- %nop12502 = alloca i1, i1 0
- %nop12503 = alloca i1, i1 0
- %nop12504 = alloca i1, i1 0
- %nop12505 = alloca i1, i1 0
- %nop12506 = alloca i1, i1 0
- %nop12507 = alloca i1, i1 0
- %nop12508 = alloca i1, i1 0
- %nop12509 = alloca i1, i1 0
- %nop12510 = alloca i1, i1 0
- %nop12511 = alloca i1, i1 0
- %nop12512 = alloca i1, i1 0
- %nop12513 = alloca i1, i1 0
- %nop12514 = alloca i1, i1 0
- %nop12515 = alloca i1, i1 0
- %nop12516 = alloca i1, i1 0
- %nop12517 = alloca i1, i1 0
- %nop12518 = alloca i1, i1 0
- %nop12519 = alloca i1, i1 0
- %nop12520 = alloca i1, i1 0
- %nop12521 = alloca i1, i1 0
- %nop12522 = alloca i1, i1 0
- %nop12523 = alloca i1, i1 0
- %nop12524 = alloca i1, i1 0
- %nop12525 = alloca i1, i1 0
- %nop12526 = alloca i1, i1 0
- %nop12527 = alloca i1, i1 0
- %nop12528 = alloca i1, i1 0
- %nop12529 = alloca i1, i1 0
- %nop12530 = alloca i1, i1 0
- %nop12531 = alloca i1, i1 0
- %nop12532 = alloca i1, i1 0
- %nop12533 = alloca i1, i1 0
- %nop12534 = alloca i1, i1 0
- %nop12535 = alloca i1, i1 0
- %nop12536 = alloca i1, i1 0
- %nop12537 = alloca i1, i1 0
- %nop12538 = alloca i1, i1 0
- %nop12539 = alloca i1, i1 0
- %nop12540 = alloca i1, i1 0
- %nop12541 = alloca i1, i1 0
- %nop12542 = alloca i1, i1 0
- %nop12543 = alloca i1, i1 0
- %nop12544 = alloca i1, i1 0
- %nop12545 = alloca i1, i1 0
- %nop12546 = alloca i1, i1 0
- %nop12547 = alloca i1, i1 0
- %nop12548 = alloca i1, i1 0
- %nop12549 = alloca i1, i1 0
- %nop12550 = alloca i1, i1 0
- %nop12551 = alloca i1, i1 0
- %nop12552 = alloca i1, i1 0
- %nop12553 = alloca i1, i1 0
- %nop12554 = alloca i1, i1 0
- %nop12555 = alloca i1, i1 0
- %nop12556 = alloca i1, i1 0
- %nop12557 = alloca i1, i1 0
- %nop12558 = alloca i1, i1 0
- %nop12559 = alloca i1, i1 0
- %nop12560 = alloca i1, i1 0
- %nop12561 = alloca i1, i1 0
- %nop12562 = alloca i1, i1 0
- %nop12563 = alloca i1, i1 0
- %nop12564 = alloca i1, i1 0
- %nop12565 = alloca i1, i1 0
- %nop12566 = alloca i1, i1 0
- %nop12567 = alloca i1, i1 0
- %nop12568 = alloca i1, i1 0
- %nop12569 = alloca i1, i1 0
- %nop12570 = alloca i1, i1 0
- %nop12571 = alloca i1, i1 0
- %nop12572 = alloca i1, i1 0
- %nop12573 = alloca i1, i1 0
- %nop12574 = alloca i1, i1 0
- %nop12575 = alloca i1, i1 0
- %nop12576 = alloca i1, i1 0
- %nop12577 = alloca i1, i1 0
- %nop12578 = alloca i1, i1 0
- %nop12579 = alloca i1, i1 0
- %nop12580 = alloca i1, i1 0
- %nop12581 = alloca i1, i1 0
- %nop12582 = alloca i1, i1 0
- %nop12583 = alloca i1, i1 0
- %nop12584 = alloca i1, i1 0
- %nop12585 = alloca i1, i1 0
- %nop12586 = alloca i1, i1 0
- %nop12587 = alloca i1, i1 0
- %nop12588 = alloca i1, i1 0
- %nop12589 = alloca i1, i1 0
- %nop12590 = alloca i1, i1 0
- %nop12591 = alloca i1, i1 0
- %nop12592 = alloca i1, i1 0
- %nop12593 = alloca i1, i1 0
- %nop12594 = alloca i1, i1 0
- %nop12595 = alloca i1, i1 0
- %nop12596 = alloca i1, i1 0
- %nop12597 = alloca i1, i1 0
- %nop12598 = alloca i1, i1 0
- %nop12599 = alloca i1, i1 0
- %nop12600 = alloca i1, i1 0
- %nop12601 = alloca i1, i1 0
- %nop12602 = alloca i1, i1 0
- %nop12603 = alloca i1, i1 0
- %nop12604 = alloca i1, i1 0
- %nop12605 = alloca i1, i1 0
- %nop12606 = alloca i1, i1 0
- %nop12607 = alloca i1, i1 0
- %nop12608 = alloca i1, i1 0
- %nop12609 = alloca i1, i1 0
- %nop12610 = alloca i1, i1 0
- %nop12611 = alloca i1, i1 0
- %nop12612 = alloca i1, i1 0
- %nop12613 = alloca i1, i1 0
- %nop12614 = alloca i1, i1 0
- %nop12615 = alloca i1, i1 0
- %nop12616 = alloca i1, i1 0
- %nop12617 = alloca i1, i1 0
- %nop12618 = alloca i1, i1 0
- %nop12619 = alloca i1, i1 0
- %nop12620 = alloca i1, i1 0
- %nop12621 = alloca i1, i1 0
- %nop12622 = alloca i1, i1 0
- %nop12623 = alloca i1, i1 0
- %nop12624 = alloca i1, i1 0
- %nop12625 = alloca i1, i1 0
- %nop12626 = alloca i1, i1 0
- %nop12627 = alloca i1, i1 0
- %nop12628 = alloca i1, i1 0
- %nop12629 = alloca i1, i1 0
- %nop12630 = alloca i1, i1 0
- %nop12631 = alloca i1, i1 0
- %nop12632 = alloca i1, i1 0
- %nop12633 = alloca i1, i1 0
- %nop12634 = alloca i1, i1 0
- %nop12635 = alloca i1, i1 0
- %nop12636 = alloca i1, i1 0
- %nop12637 = alloca i1, i1 0
- %nop12638 = alloca i1, i1 0
- %nop12639 = alloca i1, i1 0
- %nop12640 = alloca i1, i1 0
- %nop12641 = alloca i1, i1 0
- %nop12642 = alloca i1, i1 0
- %nop12643 = alloca i1, i1 0
- %nop12644 = alloca i1, i1 0
- %nop12645 = alloca i1, i1 0
- %nop12646 = alloca i1, i1 0
- %nop12647 = alloca i1, i1 0
- %nop12648 = alloca i1, i1 0
- %nop12649 = alloca i1, i1 0
- %nop12650 = alloca i1, i1 0
- %nop12651 = alloca i1, i1 0
- %nop12652 = alloca i1, i1 0
- %nop12653 = alloca i1, i1 0
- %nop12654 = alloca i1, i1 0
- %nop12655 = alloca i1, i1 0
- %nop12656 = alloca i1, i1 0
- %nop12657 = alloca i1, i1 0
- %nop12658 = alloca i1, i1 0
- %nop12659 = alloca i1, i1 0
- %nop12660 = alloca i1, i1 0
- %nop12661 = alloca i1, i1 0
- %nop12662 = alloca i1, i1 0
- %nop12663 = alloca i1, i1 0
- %nop12664 = alloca i1, i1 0
- %nop12665 = alloca i1, i1 0
- %nop12666 = alloca i1, i1 0
- %nop12667 = alloca i1, i1 0
- %nop12668 = alloca i1, i1 0
- %nop12669 = alloca i1, i1 0
- %nop12670 = alloca i1, i1 0
- %nop12671 = alloca i1, i1 0
- %nop12672 = alloca i1, i1 0
- %nop12673 = alloca i1, i1 0
- %nop12674 = alloca i1, i1 0
- %nop12675 = alloca i1, i1 0
- %nop12676 = alloca i1, i1 0
- %nop12677 = alloca i1, i1 0
- %nop12678 = alloca i1, i1 0
- %nop12679 = alloca i1, i1 0
- %nop12680 = alloca i1, i1 0
- %nop12681 = alloca i1, i1 0
- %nop12682 = alloca i1, i1 0
- %nop12683 = alloca i1, i1 0
- %nop12684 = alloca i1, i1 0
- %nop12685 = alloca i1, i1 0
- %nop12686 = alloca i1, i1 0
- %nop12687 = alloca i1, i1 0
- %nop12688 = alloca i1, i1 0
- %nop12689 = alloca i1, i1 0
- %nop12690 = alloca i1, i1 0
- %nop12691 = alloca i1, i1 0
- %nop12692 = alloca i1, i1 0
- %nop12693 = alloca i1, i1 0
- %nop12694 = alloca i1, i1 0
- %nop12695 = alloca i1, i1 0
- %nop12696 = alloca i1, i1 0
- %nop12697 = alloca i1, i1 0
- %nop12698 = alloca i1, i1 0
- %nop12699 = alloca i1, i1 0
- %nop12700 = alloca i1, i1 0
- %nop12701 = alloca i1, i1 0
- %nop12702 = alloca i1, i1 0
- %nop12703 = alloca i1, i1 0
- %nop12704 = alloca i1, i1 0
- %nop12705 = alloca i1, i1 0
- %nop12706 = alloca i1, i1 0
- %nop12707 = alloca i1, i1 0
- %nop12708 = alloca i1, i1 0
- %nop12709 = alloca i1, i1 0
- %nop12710 = alloca i1, i1 0
- %nop12711 = alloca i1, i1 0
- %nop12712 = alloca i1, i1 0
- %nop12713 = alloca i1, i1 0
- %nop12714 = alloca i1, i1 0
- %nop12715 = alloca i1, i1 0
- %nop12716 = alloca i1, i1 0
- %nop12717 = alloca i1, i1 0
- %nop12718 = alloca i1, i1 0
- %nop12719 = alloca i1, i1 0
- %nop12720 = alloca i1, i1 0
- %nop12721 = alloca i1, i1 0
- %nop12722 = alloca i1, i1 0
- %nop12723 = alloca i1, i1 0
- %nop12724 = alloca i1, i1 0
- %nop12725 = alloca i1, i1 0
- %nop12726 = alloca i1, i1 0
- %nop12727 = alloca i1, i1 0
- %nop12728 = alloca i1, i1 0
- %nop12729 = alloca i1, i1 0
- %nop12730 = alloca i1, i1 0
- %nop12731 = alloca i1, i1 0
- %nop12732 = alloca i1, i1 0
- %nop12733 = alloca i1, i1 0
- %nop12734 = alloca i1, i1 0
- %nop12735 = alloca i1, i1 0
- %nop12736 = alloca i1, i1 0
- %nop12737 = alloca i1, i1 0
- %nop12738 = alloca i1, i1 0
- %nop12739 = alloca i1, i1 0
- %nop12740 = alloca i1, i1 0
- %nop12741 = alloca i1, i1 0
- %nop12742 = alloca i1, i1 0
- %nop12743 = alloca i1, i1 0
- %nop12744 = alloca i1, i1 0
- %nop12745 = alloca i1, i1 0
- %nop12746 = alloca i1, i1 0
- %nop12747 = alloca i1, i1 0
- %nop12748 = alloca i1, i1 0
- %nop12749 = alloca i1, i1 0
- %nop12750 = alloca i1, i1 0
- %nop12751 = alloca i1, i1 0
- %nop12752 = alloca i1, i1 0
- %nop12753 = alloca i1, i1 0
- %nop12754 = alloca i1, i1 0
- %nop12755 = alloca i1, i1 0
- %nop12756 = alloca i1, i1 0
- %nop12757 = alloca i1, i1 0
- %nop12758 = alloca i1, i1 0
- %nop12759 = alloca i1, i1 0
- %nop12760 = alloca i1, i1 0
- %nop12761 = alloca i1, i1 0
- %nop12762 = alloca i1, i1 0
- %nop12763 = alloca i1, i1 0
- %nop12764 = alloca i1, i1 0
- %nop12765 = alloca i1, i1 0
- %nop12766 = alloca i1, i1 0
- %nop12767 = alloca i1, i1 0
- %nop12768 = alloca i1, i1 0
- %nop12769 = alloca i1, i1 0
- %nop12770 = alloca i1, i1 0
- %nop12771 = alloca i1, i1 0
- %nop12772 = alloca i1, i1 0
- %nop12773 = alloca i1, i1 0
- %nop12774 = alloca i1, i1 0
- %nop12775 = alloca i1, i1 0
- %nop12776 = alloca i1, i1 0
- %nop12777 = alloca i1, i1 0
- %nop12778 = alloca i1, i1 0
- %nop12779 = alloca i1, i1 0
- %nop12780 = alloca i1, i1 0
- %nop12781 = alloca i1, i1 0
- %nop12782 = alloca i1, i1 0
- %nop12783 = alloca i1, i1 0
- %nop12784 = alloca i1, i1 0
- %nop12785 = alloca i1, i1 0
- %nop12786 = alloca i1, i1 0
- %nop12787 = alloca i1, i1 0
- %nop12788 = alloca i1, i1 0
- %nop12789 = alloca i1, i1 0
- %nop12790 = alloca i1, i1 0
- %nop12791 = alloca i1, i1 0
- %nop12792 = alloca i1, i1 0
- %nop12793 = alloca i1, i1 0
- %nop12794 = alloca i1, i1 0
- %nop12795 = alloca i1, i1 0
- %nop12796 = alloca i1, i1 0
- %nop12797 = alloca i1, i1 0
- %nop12798 = alloca i1, i1 0
- %nop12799 = alloca i1, i1 0
- %nop12800 = alloca i1, i1 0
- %nop12801 = alloca i1, i1 0
- %nop12802 = alloca i1, i1 0
- %nop12803 = alloca i1, i1 0
- %nop12804 = alloca i1, i1 0
- %nop12805 = alloca i1, i1 0
- %nop12806 = alloca i1, i1 0
- %nop12807 = alloca i1, i1 0
- %nop12808 = alloca i1, i1 0
- %nop12809 = alloca i1, i1 0
- %nop12810 = alloca i1, i1 0
- %nop12811 = alloca i1, i1 0
- %nop12812 = alloca i1, i1 0
- %nop12813 = alloca i1, i1 0
- %nop12814 = alloca i1, i1 0
- %nop12815 = alloca i1, i1 0
- %nop12816 = alloca i1, i1 0
- %nop12817 = alloca i1, i1 0
- %nop12818 = alloca i1, i1 0
- %nop12819 = alloca i1, i1 0
- %nop12820 = alloca i1, i1 0
- %nop12821 = alloca i1, i1 0
- %nop12822 = alloca i1, i1 0
- %nop12823 = alloca i1, i1 0
- %nop12824 = alloca i1, i1 0
- %nop12825 = alloca i1, i1 0
- %nop12826 = alloca i1, i1 0
- %nop12827 = alloca i1, i1 0
- %nop12828 = alloca i1, i1 0
- %nop12829 = alloca i1, i1 0
- %nop12830 = alloca i1, i1 0
- %nop12831 = alloca i1, i1 0
- %nop12832 = alloca i1, i1 0
- %nop12833 = alloca i1, i1 0
- %nop12834 = alloca i1, i1 0
- %nop12835 = alloca i1, i1 0
- %nop12836 = alloca i1, i1 0
- %nop12837 = alloca i1, i1 0
- %nop12838 = alloca i1, i1 0
- %nop12839 = alloca i1, i1 0
- %nop12840 = alloca i1, i1 0
- %nop12841 = alloca i1, i1 0
- %nop12842 = alloca i1, i1 0
- %nop12843 = alloca i1, i1 0
- %nop12844 = alloca i1, i1 0
- %nop12845 = alloca i1, i1 0
- %nop12846 = alloca i1, i1 0
- %nop12847 = alloca i1, i1 0
- %nop12848 = alloca i1, i1 0
- %nop12849 = alloca i1, i1 0
- %nop12850 = alloca i1, i1 0
- %nop12851 = alloca i1, i1 0
- %nop12852 = alloca i1, i1 0
- %nop12853 = alloca i1, i1 0
- %nop12854 = alloca i1, i1 0
- %nop12855 = alloca i1, i1 0
- %nop12856 = alloca i1, i1 0
- %nop12857 = alloca i1, i1 0
- %nop12858 = alloca i1, i1 0
- %nop12859 = alloca i1, i1 0
- %nop12860 = alloca i1, i1 0
- %nop12861 = alloca i1, i1 0
- %nop12862 = alloca i1, i1 0
- %nop12863 = alloca i1, i1 0
- %nop12864 = alloca i1, i1 0
- %nop12865 = alloca i1, i1 0
- %nop12866 = alloca i1, i1 0
- %nop12867 = alloca i1, i1 0
- %nop12868 = alloca i1, i1 0
- %nop12869 = alloca i1, i1 0
- %nop12870 = alloca i1, i1 0
- %nop12871 = alloca i1, i1 0
- %nop12872 = alloca i1, i1 0
- %nop12873 = alloca i1, i1 0
- %nop12874 = alloca i1, i1 0
- %nop12875 = alloca i1, i1 0
- %nop12876 = alloca i1, i1 0
- %nop12877 = alloca i1, i1 0
- %nop12878 = alloca i1, i1 0
- %nop12879 = alloca i1, i1 0
- %nop12880 = alloca i1, i1 0
- %nop12881 = alloca i1, i1 0
- %nop12882 = alloca i1, i1 0
- %nop12883 = alloca i1, i1 0
- %nop12884 = alloca i1, i1 0
- %nop12885 = alloca i1, i1 0
- %nop12886 = alloca i1, i1 0
- %nop12887 = alloca i1, i1 0
- %nop12888 = alloca i1, i1 0
- %nop12889 = alloca i1, i1 0
- %nop12890 = alloca i1, i1 0
- %nop12891 = alloca i1, i1 0
- %nop12892 = alloca i1, i1 0
- %nop12893 = alloca i1, i1 0
- %nop12894 = alloca i1, i1 0
- %nop12895 = alloca i1, i1 0
- %nop12896 = alloca i1, i1 0
- %nop12897 = alloca i1, i1 0
- %nop12898 = alloca i1, i1 0
- %nop12899 = alloca i1, i1 0
- %nop12900 = alloca i1, i1 0
- %nop12901 = alloca i1, i1 0
- %nop12902 = alloca i1, i1 0
- %nop12903 = alloca i1, i1 0
- %nop12904 = alloca i1, i1 0
- %nop12905 = alloca i1, i1 0
- %nop12906 = alloca i1, i1 0
- %nop12907 = alloca i1, i1 0
- %nop12908 = alloca i1, i1 0
- %nop12909 = alloca i1, i1 0
- %nop12910 = alloca i1, i1 0
- %nop12911 = alloca i1, i1 0
- %nop12912 = alloca i1, i1 0
- %nop12913 = alloca i1, i1 0
- %nop12914 = alloca i1, i1 0
- %nop12915 = alloca i1, i1 0
- %nop12916 = alloca i1, i1 0
- %nop12917 = alloca i1, i1 0
- %nop12918 = alloca i1, i1 0
- %nop12919 = alloca i1, i1 0
- %nop12920 = alloca i1, i1 0
- %nop12921 = alloca i1, i1 0
- %nop12922 = alloca i1, i1 0
- %nop12923 = alloca i1, i1 0
- %nop12924 = alloca i1, i1 0
- %nop12925 = alloca i1, i1 0
- %nop12926 = alloca i1, i1 0
- %nop12927 = alloca i1, i1 0
- %nop12928 = alloca i1, i1 0
- %nop12929 = alloca i1, i1 0
- %nop12930 = alloca i1, i1 0
- %nop12931 = alloca i1, i1 0
- %nop12932 = alloca i1, i1 0
- %nop12933 = alloca i1, i1 0
- %nop12934 = alloca i1, i1 0
- %nop12935 = alloca i1, i1 0
- %nop12936 = alloca i1, i1 0
- %nop12937 = alloca i1, i1 0
- %nop12938 = alloca i1, i1 0
- %nop12939 = alloca i1, i1 0
- %nop12940 = alloca i1, i1 0
- %nop12941 = alloca i1, i1 0
- %nop12942 = alloca i1, i1 0
- %nop12943 = alloca i1, i1 0
- %nop12944 = alloca i1, i1 0
- %nop12945 = alloca i1, i1 0
- %nop12946 = alloca i1, i1 0
- %nop12947 = alloca i1, i1 0
- %nop12948 = alloca i1, i1 0
- %nop12949 = alloca i1, i1 0
- %nop12950 = alloca i1, i1 0
- %nop12951 = alloca i1, i1 0
- %nop12952 = alloca i1, i1 0
- %nop12953 = alloca i1, i1 0
- %nop12954 = alloca i1, i1 0
- %nop12955 = alloca i1, i1 0
- %nop12956 = alloca i1, i1 0
- %nop12957 = alloca i1, i1 0
- %nop12958 = alloca i1, i1 0
- %nop12959 = alloca i1, i1 0
- %nop12960 = alloca i1, i1 0
- %nop12961 = alloca i1, i1 0
- %nop12962 = alloca i1, i1 0
- %nop12963 = alloca i1, i1 0
- %nop12964 = alloca i1, i1 0
- %nop12965 = alloca i1, i1 0
- %nop12966 = alloca i1, i1 0
- %nop12967 = alloca i1, i1 0
- %nop12968 = alloca i1, i1 0
- %nop12969 = alloca i1, i1 0
- %nop12970 = alloca i1, i1 0
- %nop12971 = alloca i1, i1 0
- %nop12972 = alloca i1, i1 0
- %nop12973 = alloca i1, i1 0
- %nop12974 = alloca i1, i1 0
- %nop12975 = alloca i1, i1 0
- %nop12976 = alloca i1, i1 0
- %nop12977 = alloca i1, i1 0
- %nop12978 = alloca i1, i1 0
- %nop12979 = alloca i1, i1 0
- %nop12980 = alloca i1, i1 0
- %nop12981 = alloca i1, i1 0
- %nop12982 = alloca i1, i1 0
- %nop12983 = alloca i1, i1 0
- %nop12984 = alloca i1, i1 0
- %nop12985 = alloca i1, i1 0
- %nop12986 = alloca i1, i1 0
- %nop12987 = alloca i1, i1 0
- %nop12988 = alloca i1, i1 0
- %nop12989 = alloca i1, i1 0
- %nop12990 = alloca i1, i1 0
- %nop12991 = alloca i1, i1 0
- %nop12992 = alloca i1, i1 0
- %nop12993 = alloca i1, i1 0
- %nop12994 = alloca i1, i1 0
- %nop12995 = alloca i1, i1 0
- %nop12996 = alloca i1, i1 0
- %nop12997 = alloca i1, i1 0
- %nop12998 = alloca i1, i1 0
- %nop12999 = alloca i1, i1 0
- %nop13000 = alloca i1, i1 0
- %nop13001 = alloca i1, i1 0
- %nop13002 = alloca i1, i1 0
- %nop13003 = alloca i1, i1 0
- %nop13004 = alloca i1, i1 0
- %nop13005 = alloca i1, i1 0
- %nop13006 = alloca i1, i1 0
- %nop13007 = alloca i1, i1 0
- %nop13008 = alloca i1, i1 0
- %nop13009 = alloca i1, i1 0
- %nop13010 = alloca i1, i1 0
- %nop13011 = alloca i1, i1 0
- %nop13012 = alloca i1, i1 0
- %nop13013 = alloca i1, i1 0
- %nop13014 = alloca i1, i1 0
- %nop13015 = alloca i1, i1 0
- %nop13016 = alloca i1, i1 0
- %nop13017 = alloca i1, i1 0
- %nop13018 = alloca i1, i1 0
- %nop13019 = alloca i1, i1 0
- %nop13020 = alloca i1, i1 0
- %nop13021 = alloca i1, i1 0
- %nop13022 = alloca i1, i1 0
- %nop13023 = alloca i1, i1 0
- %nop13024 = alloca i1, i1 0
- %nop13025 = alloca i1, i1 0
- %nop13026 = alloca i1, i1 0
- %nop13027 = alloca i1, i1 0
- %nop13028 = alloca i1, i1 0
- %nop13029 = alloca i1, i1 0
- %nop13030 = alloca i1, i1 0
- %nop13031 = alloca i1, i1 0
- %nop13032 = alloca i1, i1 0
- %nop13033 = alloca i1, i1 0
- %nop13034 = alloca i1, i1 0
- %nop13035 = alloca i1, i1 0
- %nop13036 = alloca i1, i1 0
- %nop13037 = alloca i1, i1 0
- %nop13038 = alloca i1, i1 0
- %nop13039 = alloca i1, i1 0
- %nop13040 = alloca i1, i1 0
- %nop13041 = alloca i1, i1 0
- %nop13042 = alloca i1, i1 0
- %nop13043 = alloca i1, i1 0
- %nop13044 = alloca i1, i1 0
- %nop13045 = alloca i1, i1 0
- %nop13046 = alloca i1, i1 0
- %nop13047 = alloca i1, i1 0
- %nop13048 = alloca i1, i1 0
- %nop13049 = alloca i1, i1 0
- %nop13050 = alloca i1, i1 0
- %nop13051 = alloca i1, i1 0
- %nop13052 = alloca i1, i1 0
- %nop13053 = alloca i1, i1 0
- %nop13054 = alloca i1, i1 0
- %nop13055 = alloca i1, i1 0
- %nop13056 = alloca i1, i1 0
- %nop13057 = alloca i1, i1 0
- %nop13058 = alloca i1, i1 0
- %nop13059 = alloca i1, i1 0
- %nop13060 = alloca i1, i1 0
- %nop13061 = alloca i1, i1 0
- %nop13062 = alloca i1, i1 0
- %nop13063 = alloca i1, i1 0
- %nop13064 = alloca i1, i1 0
- %nop13065 = alloca i1, i1 0
- %nop13066 = alloca i1, i1 0
- %nop13067 = alloca i1, i1 0
- %nop13068 = alloca i1, i1 0
- %nop13069 = alloca i1, i1 0
- %nop13070 = alloca i1, i1 0
- %nop13071 = alloca i1, i1 0
- %nop13072 = alloca i1, i1 0
- %nop13073 = alloca i1, i1 0
- %nop13074 = alloca i1, i1 0
- %nop13075 = alloca i1, i1 0
- %nop13076 = alloca i1, i1 0
- %nop13077 = alloca i1, i1 0
- %nop13078 = alloca i1, i1 0
- %nop13079 = alloca i1, i1 0
- %nop13080 = alloca i1, i1 0
- %nop13081 = alloca i1, i1 0
- %nop13082 = alloca i1, i1 0
- %nop13083 = alloca i1, i1 0
- %nop13084 = alloca i1, i1 0
- %nop13085 = alloca i1, i1 0
- %nop13086 = alloca i1, i1 0
- %nop13087 = alloca i1, i1 0
- %nop13088 = alloca i1, i1 0
- %nop13089 = alloca i1, i1 0
- %nop13090 = alloca i1, i1 0
- %nop13091 = alloca i1, i1 0
- %nop13092 = alloca i1, i1 0
- %nop13093 = alloca i1, i1 0
- %nop13094 = alloca i1, i1 0
- %nop13095 = alloca i1, i1 0
- %nop13096 = alloca i1, i1 0
- %nop13097 = alloca i1, i1 0
- %nop13098 = alloca i1, i1 0
- %nop13099 = alloca i1, i1 0
- %nop13100 = alloca i1, i1 0
- %nop13101 = alloca i1, i1 0
- %nop13102 = alloca i1, i1 0
- %nop13103 = alloca i1, i1 0
- %nop13104 = alloca i1, i1 0
- %nop13105 = alloca i1, i1 0
- %nop13106 = alloca i1, i1 0
- %nop13107 = alloca i1, i1 0
- %nop13108 = alloca i1, i1 0
- %nop13109 = alloca i1, i1 0
- %nop13110 = alloca i1, i1 0
- %nop13111 = alloca i1, i1 0
- %nop13112 = alloca i1, i1 0
- %nop13113 = alloca i1, i1 0
- %nop13114 = alloca i1, i1 0
- %nop13115 = alloca i1, i1 0
- %nop13116 = alloca i1, i1 0
- %nop13117 = alloca i1, i1 0
- %nop13118 = alloca i1, i1 0
- %nop13119 = alloca i1, i1 0
- %nop13120 = alloca i1, i1 0
- %nop13121 = alloca i1, i1 0
- %nop13122 = alloca i1, i1 0
- %nop13123 = alloca i1, i1 0
- %nop13124 = alloca i1, i1 0
- %nop13125 = alloca i1, i1 0
- %nop13126 = alloca i1, i1 0
- %nop13127 = alloca i1, i1 0
- %nop13128 = alloca i1, i1 0
- %nop13129 = alloca i1, i1 0
- %nop13130 = alloca i1, i1 0
- %nop13131 = alloca i1, i1 0
- %nop13132 = alloca i1, i1 0
- %nop13133 = alloca i1, i1 0
- %nop13134 = alloca i1, i1 0
- %nop13135 = alloca i1, i1 0
- %nop13136 = alloca i1, i1 0
- %nop13137 = alloca i1, i1 0
- %nop13138 = alloca i1, i1 0
- %nop13139 = alloca i1, i1 0
- %nop13140 = alloca i1, i1 0
- %nop13141 = alloca i1, i1 0
- %nop13142 = alloca i1, i1 0
- %nop13143 = alloca i1, i1 0
- %nop13144 = alloca i1, i1 0
- %nop13145 = alloca i1, i1 0
- %nop13146 = alloca i1, i1 0
- %nop13147 = alloca i1, i1 0
- %nop13148 = alloca i1, i1 0
- %nop13149 = alloca i1, i1 0
- %nop13150 = alloca i1, i1 0
- %nop13151 = alloca i1, i1 0
- %nop13152 = alloca i1, i1 0
- %nop13153 = alloca i1, i1 0
- %nop13154 = alloca i1, i1 0
- %nop13155 = alloca i1, i1 0
- %nop13156 = alloca i1, i1 0
- %nop13157 = alloca i1, i1 0
- %nop13158 = alloca i1, i1 0
- %nop13159 = alloca i1, i1 0
- %nop13160 = alloca i1, i1 0
- %nop13161 = alloca i1, i1 0
- %nop13162 = alloca i1, i1 0
- %nop13163 = alloca i1, i1 0
- %nop13164 = alloca i1, i1 0
- %nop13165 = alloca i1, i1 0
- %nop13166 = alloca i1, i1 0
- %nop13167 = alloca i1, i1 0
- %nop13168 = alloca i1, i1 0
- %nop13169 = alloca i1, i1 0
- %nop13170 = alloca i1, i1 0
- %nop13171 = alloca i1, i1 0
- %nop13172 = alloca i1, i1 0
- %nop13173 = alloca i1, i1 0
- %nop13174 = alloca i1, i1 0
- %nop13175 = alloca i1, i1 0
- %nop13176 = alloca i1, i1 0
- %nop13177 = alloca i1, i1 0
- %nop13178 = alloca i1, i1 0
- %nop13179 = alloca i1, i1 0
- %nop13180 = alloca i1, i1 0
- %nop13181 = alloca i1, i1 0
- %nop13182 = alloca i1, i1 0
- %nop13183 = alloca i1, i1 0
- %nop13184 = alloca i1, i1 0
- %nop13185 = alloca i1, i1 0
- %nop13186 = alloca i1, i1 0
- %nop13187 = alloca i1, i1 0
- %nop13188 = alloca i1, i1 0
- %nop13189 = alloca i1, i1 0
- %nop13190 = alloca i1, i1 0
- %nop13191 = alloca i1, i1 0
- %nop13192 = alloca i1, i1 0
- %nop13193 = alloca i1, i1 0
- %nop13194 = alloca i1, i1 0
- %nop13195 = alloca i1, i1 0
- %nop13196 = alloca i1, i1 0
- %nop13197 = alloca i1, i1 0
- %nop13198 = alloca i1, i1 0
- %nop13199 = alloca i1, i1 0
- %nop13200 = alloca i1, i1 0
- %nop13201 = alloca i1, i1 0
- %nop13202 = alloca i1, i1 0
- %nop13203 = alloca i1, i1 0
- %nop13204 = alloca i1, i1 0
- %nop13205 = alloca i1, i1 0
- %nop13206 = alloca i1, i1 0
- %nop13207 = alloca i1, i1 0
- %nop13208 = alloca i1, i1 0
- %nop13209 = alloca i1, i1 0
- %nop13210 = alloca i1, i1 0
- %nop13211 = alloca i1, i1 0
- %nop13212 = alloca i1, i1 0
- %nop13213 = alloca i1, i1 0
- %nop13214 = alloca i1, i1 0
- %nop13215 = alloca i1, i1 0
- %nop13216 = alloca i1, i1 0
- %nop13217 = alloca i1, i1 0
- %nop13218 = alloca i1, i1 0
- %nop13219 = alloca i1, i1 0
- %nop13220 = alloca i1, i1 0
- %nop13221 = alloca i1, i1 0
- %nop13222 = alloca i1, i1 0
- %nop13223 = alloca i1, i1 0
- %nop13224 = alloca i1, i1 0
- %nop13225 = alloca i1, i1 0
- %nop13226 = alloca i1, i1 0
- %nop13227 = alloca i1, i1 0
- %nop13228 = alloca i1, i1 0
- %nop13229 = alloca i1, i1 0
- %nop13230 = alloca i1, i1 0
- %nop13231 = alloca i1, i1 0
- %nop13232 = alloca i1, i1 0
- %nop13233 = alloca i1, i1 0
- %nop13234 = alloca i1, i1 0
- %nop13235 = alloca i1, i1 0
- %nop13236 = alloca i1, i1 0
- %nop13237 = alloca i1, i1 0
- %nop13238 = alloca i1, i1 0
- %nop13239 = alloca i1, i1 0
- %nop13240 = alloca i1, i1 0
- %nop13241 = alloca i1, i1 0
- %nop13242 = alloca i1, i1 0
- %nop13243 = alloca i1, i1 0
- %nop13244 = alloca i1, i1 0
- %nop13245 = alloca i1, i1 0
- %nop13246 = alloca i1, i1 0
- %nop13247 = alloca i1, i1 0
- %nop13248 = alloca i1, i1 0
- %nop13249 = alloca i1, i1 0
- %nop13250 = alloca i1, i1 0
- %nop13251 = alloca i1, i1 0
- %nop13252 = alloca i1, i1 0
- %nop13253 = alloca i1, i1 0
- %nop13254 = alloca i1, i1 0
- %nop13255 = alloca i1, i1 0
- %nop13256 = alloca i1, i1 0
- %nop13257 = alloca i1, i1 0
- %nop13258 = alloca i1, i1 0
- %nop13259 = alloca i1, i1 0
- %nop13260 = alloca i1, i1 0
- %nop13261 = alloca i1, i1 0
- %nop13262 = alloca i1, i1 0
- %nop13263 = alloca i1, i1 0
- %nop13264 = alloca i1, i1 0
- %nop13265 = alloca i1, i1 0
- %nop13266 = alloca i1, i1 0
- %nop13267 = alloca i1, i1 0
- %nop13268 = alloca i1, i1 0
- %nop13269 = alloca i1, i1 0
- %nop13270 = alloca i1, i1 0
- %nop13271 = alloca i1, i1 0
- %nop13272 = alloca i1, i1 0
- %nop13273 = alloca i1, i1 0
- %nop13274 = alloca i1, i1 0
- %nop13275 = alloca i1, i1 0
- %nop13276 = alloca i1, i1 0
- %nop13277 = alloca i1, i1 0
- %nop13278 = alloca i1, i1 0
- %nop13279 = alloca i1, i1 0
- %nop13280 = alloca i1, i1 0
- %nop13281 = alloca i1, i1 0
- %nop13282 = alloca i1, i1 0
- %nop13283 = alloca i1, i1 0
- %nop13284 = alloca i1, i1 0
- %nop13285 = alloca i1, i1 0
- %nop13286 = alloca i1, i1 0
- %nop13287 = alloca i1, i1 0
- %nop13288 = alloca i1, i1 0
- %nop13289 = alloca i1, i1 0
- %nop13290 = alloca i1, i1 0
- %nop13291 = alloca i1, i1 0
- %nop13292 = alloca i1, i1 0
- %nop13293 = alloca i1, i1 0
- %nop13294 = alloca i1, i1 0
- %nop13295 = alloca i1, i1 0
- %nop13296 = alloca i1, i1 0
- %nop13297 = alloca i1, i1 0
- %nop13298 = alloca i1, i1 0
- %nop13299 = alloca i1, i1 0
- %nop13300 = alloca i1, i1 0
- %nop13301 = alloca i1, i1 0
- %nop13302 = alloca i1, i1 0
- %nop13303 = alloca i1, i1 0
- %nop13304 = alloca i1, i1 0
- %nop13305 = alloca i1, i1 0
- %nop13306 = alloca i1, i1 0
- %nop13307 = alloca i1, i1 0
- %nop13308 = alloca i1, i1 0
- %nop13309 = alloca i1, i1 0
- %nop13310 = alloca i1, i1 0
- %nop13311 = alloca i1, i1 0
- %nop13312 = alloca i1, i1 0
- %nop13313 = alloca i1, i1 0
- %nop13314 = alloca i1, i1 0
- %nop13315 = alloca i1, i1 0
- %nop13316 = alloca i1, i1 0
- %nop13317 = alloca i1, i1 0
- %nop13318 = alloca i1, i1 0
- %nop13319 = alloca i1, i1 0
- %nop13320 = alloca i1, i1 0
- %nop13321 = alloca i1, i1 0
- %nop13322 = alloca i1, i1 0
- %nop13323 = alloca i1, i1 0
- %nop13324 = alloca i1, i1 0
- %nop13325 = alloca i1, i1 0
- %nop13326 = alloca i1, i1 0
- %nop13327 = alloca i1, i1 0
- %nop13328 = alloca i1, i1 0
- %nop13329 = alloca i1, i1 0
- %nop13330 = alloca i1, i1 0
- %nop13331 = alloca i1, i1 0
- %nop13332 = alloca i1, i1 0
- %nop13333 = alloca i1, i1 0
- %nop13334 = alloca i1, i1 0
- %nop13335 = alloca i1, i1 0
- %nop13336 = alloca i1, i1 0
- %nop13337 = alloca i1, i1 0
- %nop13338 = alloca i1, i1 0
- %nop13339 = alloca i1, i1 0
- %nop13340 = alloca i1, i1 0
- %nop13341 = alloca i1, i1 0
- %nop13342 = alloca i1, i1 0
- %nop13343 = alloca i1, i1 0
- %nop13344 = alloca i1, i1 0
- %nop13345 = alloca i1, i1 0
- %nop13346 = alloca i1, i1 0
- %nop13347 = alloca i1, i1 0
- %nop13348 = alloca i1, i1 0
- %nop13349 = alloca i1, i1 0
- %nop13350 = alloca i1, i1 0
- %nop13351 = alloca i1, i1 0
- %nop13352 = alloca i1, i1 0
- %nop13353 = alloca i1, i1 0
- %nop13354 = alloca i1, i1 0
- %nop13355 = alloca i1, i1 0
- %nop13356 = alloca i1, i1 0
- %nop13357 = alloca i1, i1 0
- %nop13358 = alloca i1, i1 0
- %nop13359 = alloca i1, i1 0
- %nop13360 = alloca i1, i1 0
- %nop13361 = alloca i1, i1 0
- %nop13362 = alloca i1, i1 0
- %nop13363 = alloca i1, i1 0
- %nop13364 = alloca i1, i1 0
- %nop13365 = alloca i1, i1 0
- %nop13366 = alloca i1, i1 0
- %nop13367 = alloca i1, i1 0
- %nop13368 = alloca i1, i1 0
- %nop13369 = alloca i1, i1 0
- %nop13370 = alloca i1, i1 0
- %nop13371 = alloca i1, i1 0
- %nop13372 = alloca i1, i1 0
- %nop13373 = alloca i1, i1 0
- %nop13374 = alloca i1, i1 0
- %nop13375 = alloca i1, i1 0
- %nop13376 = alloca i1, i1 0
- %nop13377 = alloca i1, i1 0
- %nop13378 = alloca i1, i1 0
- %nop13379 = alloca i1, i1 0
- %nop13380 = alloca i1, i1 0
- %nop13381 = alloca i1, i1 0
- %nop13382 = alloca i1, i1 0
- %nop13383 = alloca i1, i1 0
- %nop13384 = alloca i1, i1 0
- %nop13385 = alloca i1, i1 0
- %nop13386 = alloca i1, i1 0
- %nop13387 = alloca i1, i1 0
- %nop13388 = alloca i1, i1 0
- %nop13389 = alloca i1, i1 0
- %nop13390 = alloca i1, i1 0
- %nop13391 = alloca i1, i1 0
- %nop13392 = alloca i1, i1 0
- %nop13393 = alloca i1, i1 0
- %nop13394 = alloca i1, i1 0
- %nop13395 = alloca i1, i1 0
- %nop13396 = alloca i1, i1 0
- %nop13397 = alloca i1, i1 0
- %nop13398 = alloca i1, i1 0
- %nop13399 = alloca i1, i1 0
- %nop13400 = alloca i1, i1 0
- %nop13401 = alloca i1, i1 0
- %nop13402 = alloca i1, i1 0
- %nop13403 = alloca i1, i1 0
- %nop13404 = alloca i1, i1 0
- %nop13405 = alloca i1, i1 0
- %nop13406 = alloca i1, i1 0
- %nop13407 = alloca i1, i1 0
- %nop13408 = alloca i1, i1 0
- %nop13409 = alloca i1, i1 0
- %nop13410 = alloca i1, i1 0
- %nop13411 = alloca i1, i1 0
- %nop13412 = alloca i1, i1 0
- %nop13413 = alloca i1, i1 0
- %nop13414 = alloca i1, i1 0
- %nop13415 = alloca i1, i1 0
- %nop13416 = alloca i1, i1 0
- %nop13417 = alloca i1, i1 0
- %nop13418 = alloca i1, i1 0
- %nop13419 = alloca i1, i1 0
- %nop13420 = alloca i1, i1 0
- %nop13421 = alloca i1, i1 0
- %nop13422 = alloca i1, i1 0
- %nop13423 = alloca i1, i1 0
- %nop13424 = alloca i1, i1 0
- %nop13425 = alloca i1, i1 0
- %nop13426 = alloca i1, i1 0
- %nop13427 = alloca i1, i1 0
- %nop13428 = alloca i1, i1 0
- %nop13429 = alloca i1, i1 0
- %nop13430 = alloca i1, i1 0
- %nop13431 = alloca i1, i1 0
- %nop13432 = alloca i1, i1 0
- %nop13433 = alloca i1, i1 0
- %nop13434 = alloca i1, i1 0
- %nop13435 = alloca i1, i1 0
- %nop13436 = alloca i1, i1 0
- %nop13437 = alloca i1, i1 0
- %nop13438 = alloca i1, i1 0
- %nop13439 = alloca i1, i1 0
- %nop13440 = alloca i1, i1 0
- %nop13441 = alloca i1, i1 0
- %nop13442 = alloca i1, i1 0
- %nop13443 = alloca i1, i1 0
- %nop13444 = alloca i1, i1 0
- %nop13445 = alloca i1, i1 0
- %nop13446 = alloca i1, i1 0
- %nop13447 = alloca i1, i1 0
- %nop13448 = alloca i1, i1 0
- %nop13449 = alloca i1, i1 0
- %nop13450 = alloca i1, i1 0
- %nop13451 = alloca i1, i1 0
- %nop13452 = alloca i1, i1 0
- %nop13453 = alloca i1, i1 0
- %nop13454 = alloca i1, i1 0
- %nop13455 = alloca i1, i1 0
- %nop13456 = alloca i1, i1 0
- %nop13457 = alloca i1, i1 0
- %nop13458 = alloca i1, i1 0
- %nop13459 = alloca i1, i1 0
- %nop13460 = alloca i1, i1 0
- %nop13461 = alloca i1, i1 0
- %nop13462 = alloca i1, i1 0
- %nop13463 = alloca i1, i1 0
- %nop13464 = alloca i1, i1 0
- %nop13465 = alloca i1, i1 0
- %nop13466 = alloca i1, i1 0
- %nop13467 = alloca i1, i1 0
- %nop13468 = alloca i1, i1 0
- %nop13469 = alloca i1, i1 0
- %nop13470 = alloca i1, i1 0
- %nop13471 = alloca i1, i1 0
- %nop13472 = alloca i1, i1 0
- %nop13473 = alloca i1, i1 0
- %nop13474 = alloca i1, i1 0
- %nop13475 = alloca i1, i1 0
- %nop13476 = alloca i1, i1 0
- %nop13477 = alloca i1, i1 0
- %nop13478 = alloca i1, i1 0
- %nop13479 = alloca i1, i1 0
- %nop13480 = alloca i1, i1 0
- %nop13481 = alloca i1, i1 0
- %nop13482 = alloca i1, i1 0
- %nop13483 = alloca i1, i1 0
- %nop13484 = alloca i1, i1 0
- %nop13485 = alloca i1, i1 0
- %nop13486 = alloca i1, i1 0
- %nop13487 = alloca i1, i1 0
- %nop13488 = alloca i1, i1 0
- %nop13489 = alloca i1, i1 0
- %nop13490 = alloca i1, i1 0
- %nop13491 = alloca i1, i1 0
- %nop13492 = alloca i1, i1 0
- %nop13493 = alloca i1, i1 0
- %nop13494 = alloca i1, i1 0
- %nop13495 = alloca i1, i1 0
- %nop13496 = alloca i1, i1 0
- %nop13497 = alloca i1, i1 0
- %nop13498 = alloca i1, i1 0
- %nop13499 = alloca i1, i1 0
- %nop13500 = alloca i1, i1 0
- %nop13501 = alloca i1, i1 0
- %nop13502 = alloca i1, i1 0
- %nop13503 = alloca i1, i1 0
- %nop13504 = alloca i1, i1 0
- %nop13505 = alloca i1, i1 0
- %nop13506 = alloca i1, i1 0
- %nop13507 = alloca i1, i1 0
- %nop13508 = alloca i1, i1 0
- %nop13509 = alloca i1, i1 0
- %nop13510 = alloca i1, i1 0
- %nop13511 = alloca i1, i1 0
- %nop13512 = alloca i1, i1 0
- %nop13513 = alloca i1, i1 0
- %nop13514 = alloca i1, i1 0
- %nop13515 = alloca i1, i1 0
- %nop13516 = alloca i1, i1 0
- %nop13517 = alloca i1, i1 0
- %nop13518 = alloca i1, i1 0
- %nop13519 = alloca i1, i1 0
- %nop13520 = alloca i1, i1 0
- %nop13521 = alloca i1, i1 0
- %nop13522 = alloca i1, i1 0
- %nop13523 = alloca i1, i1 0
- %nop13524 = alloca i1, i1 0
- %nop13525 = alloca i1, i1 0
- %nop13526 = alloca i1, i1 0
- %nop13527 = alloca i1, i1 0
- %nop13528 = alloca i1, i1 0
- %nop13529 = alloca i1, i1 0
- %nop13530 = alloca i1, i1 0
- %nop13531 = alloca i1, i1 0
- %nop13532 = alloca i1, i1 0
- %nop13533 = alloca i1, i1 0
- %nop13534 = alloca i1, i1 0
- %nop13535 = alloca i1, i1 0
- %nop13536 = alloca i1, i1 0
- %nop13537 = alloca i1, i1 0
- %nop13538 = alloca i1, i1 0
- %nop13539 = alloca i1, i1 0
- %nop13540 = alloca i1, i1 0
- %nop13541 = alloca i1, i1 0
- %nop13542 = alloca i1, i1 0
- %nop13543 = alloca i1, i1 0
- %nop13544 = alloca i1, i1 0
- %nop13545 = alloca i1, i1 0
- %nop13546 = alloca i1, i1 0
- %nop13547 = alloca i1, i1 0
- %nop13548 = alloca i1, i1 0
- %nop13549 = alloca i1, i1 0
- %nop13550 = alloca i1, i1 0
- %nop13551 = alloca i1, i1 0
- %nop13552 = alloca i1, i1 0
- %nop13553 = alloca i1, i1 0
- %nop13554 = alloca i1, i1 0
- %nop13555 = alloca i1, i1 0
- %nop13556 = alloca i1, i1 0
- %nop13557 = alloca i1, i1 0
- %nop13558 = alloca i1, i1 0
- %nop13559 = alloca i1, i1 0
- %nop13560 = alloca i1, i1 0
- %nop13561 = alloca i1, i1 0
- %nop13562 = alloca i1, i1 0
- %nop13563 = alloca i1, i1 0
- %nop13564 = alloca i1, i1 0
- %nop13565 = alloca i1, i1 0
- %nop13566 = alloca i1, i1 0
- %nop13567 = alloca i1, i1 0
- %nop13568 = alloca i1, i1 0
- %nop13569 = alloca i1, i1 0
- %nop13570 = alloca i1, i1 0
- %nop13571 = alloca i1, i1 0
- %nop13572 = alloca i1, i1 0
- %nop13573 = alloca i1, i1 0
- %nop13574 = alloca i1, i1 0
- %nop13575 = alloca i1, i1 0
- %nop13576 = alloca i1, i1 0
- %nop13577 = alloca i1, i1 0
- %nop13578 = alloca i1, i1 0
- %nop13579 = alloca i1, i1 0
- %nop13580 = alloca i1, i1 0
- %nop13581 = alloca i1, i1 0
- %nop13582 = alloca i1, i1 0
- %nop13583 = alloca i1, i1 0
- %nop13584 = alloca i1, i1 0
- %nop13585 = alloca i1, i1 0
- %nop13586 = alloca i1, i1 0
- %nop13587 = alloca i1, i1 0
- %nop13588 = alloca i1, i1 0
- %nop13589 = alloca i1, i1 0
- %nop13590 = alloca i1, i1 0
- %nop13591 = alloca i1, i1 0
- %nop13592 = alloca i1, i1 0
- %nop13593 = alloca i1, i1 0
- %nop13594 = alloca i1, i1 0
- %nop13595 = alloca i1, i1 0
- %nop13596 = alloca i1, i1 0
- %nop13597 = alloca i1, i1 0
- %nop13598 = alloca i1, i1 0
- %nop13599 = alloca i1, i1 0
- %nop13600 = alloca i1, i1 0
- %nop13601 = alloca i1, i1 0
- %nop13602 = alloca i1, i1 0
- %nop13603 = alloca i1, i1 0
- %nop13604 = alloca i1, i1 0
- %nop13605 = alloca i1, i1 0
- %nop13606 = alloca i1, i1 0
- %nop13607 = alloca i1, i1 0
- %nop13608 = alloca i1, i1 0
- %nop13609 = alloca i1, i1 0
- %nop13610 = alloca i1, i1 0
- %nop13611 = alloca i1, i1 0
- %nop13612 = alloca i1, i1 0
- %nop13613 = alloca i1, i1 0
- %nop13614 = alloca i1, i1 0
- %nop13615 = alloca i1, i1 0
- %nop13616 = alloca i1, i1 0
- %nop13617 = alloca i1, i1 0
- %nop13618 = alloca i1, i1 0
- %nop13619 = alloca i1, i1 0
- %nop13620 = alloca i1, i1 0
- %nop13621 = alloca i1, i1 0
- %nop13622 = alloca i1, i1 0
- %nop13623 = alloca i1, i1 0
- %nop13624 = alloca i1, i1 0
- %nop13625 = alloca i1, i1 0
- %nop13626 = alloca i1, i1 0
- %nop13627 = alloca i1, i1 0
- %nop13628 = alloca i1, i1 0
- %nop13629 = alloca i1, i1 0
- %nop13630 = alloca i1, i1 0
- %nop13631 = alloca i1, i1 0
- %nop13632 = alloca i1, i1 0
- %nop13633 = alloca i1, i1 0
- %nop13634 = alloca i1, i1 0
- %nop13635 = alloca i1, i1 0
- %nop13636 = alloca i1, i1 0
- %nop13637 = alloca i1, i1 0
- %nop13638 = alloca i1, i1 0
- %nop13639 = alloca i1, i1 0
- %nop13640 = alloca i1, i1 0
- %nop13641 = alloca i1, i1 0
- %nop13642 = alloca i1, i1 0
- %nop13643 = alloca i1, i1 0
- %nop13644 = alloca i1, i1 0
- %nop13645 = alloca i1, i1 0
- %nop13646 = alloca i1, i1 0
- %nop13647 = alloca i1, i1 0
- %nop13648 = alloca i1, i1 0
- %nop13649 = alloca i1, i1 0
- %nop13650 = alloca i1, i1 0
- %nop13651 = alloca i1, i1 0
- %nop13652 = alloca i1, i1 0
- %nop13653 = alloca i1, i1 0
- %nop13654 = alloca i1, i1 0
- %nop13655 = alloca i1, i1 0
- %nop13656 = alloca i1, i1 0
- %nop13657 = alloca i1, i1 0
- %nop13658 = alloca i1, i1 0
- %nop13659 = alloca i1, i1 0
- %nop13660 = alloca i1, i1 0
- %nop13661 = alloca i1, i1 0
- %nop13662 = alloca i1, i1 0
- %nop13663 = alloca i1, i1 0
- %nop13664 = alloca i1, i1 0
- %nop13665 = alloca i1, i1 0
- %nop13666 = alloca i1, i1 0
- %nop13667 = alloca i1, i1 0
- %nop13668 = alloca i1, i1 0
- %nop13669 = alloca i1, i1 0
- %nop13670 = alloca i1, i1 0
- %nop13671 = alloca i1, i1 0
- %nop13672 = alloca i1, i1 0
- %nop13673 = alloca i1, i1 0
- %nop13674 = alloca i1, i1 0
- %nop13675 = alloca i1, i1 0
- %nop13676 = alloca i1, i1 0
- %nop13677 = alloca i1, i1 0
- %nop13678 = alloca i1, i1 0
- %nop13679 = alloca i1, i1 0
- %nop13680 = alloca i1, i1 0
- %nop13681 = alloca i1, i1 0
- %nop13682 = alloca i1, i1 0
- %nop13683 = alloca i1, i1 0
- %nop13684 = alloca i1, i1 0
- %nop13685 = alloca i1, i1 0
- %nop13686 = alloca i1, i1 0
- %nop13687 = alloca i1, i1 0
- %nop13688 = alloca i1, i1 0
- %nop13689 = alloca i1, i1 0
- %nop13690 = alloca i1, i1 0
- %nop13691 = alloca i1, i1 0
- %nop13692 = alloca i1, i1 0
- %nop13693 = alloca i1, i1 0
- %nop13694 = alloca i1, i1 0
- %nop13695 = alloca i1, i1 0
- %nop13696 = alloca i1, i1 0
- %nop13697 = alloca i1, i1 0
- %nop13698 = alloca i1, i1 0
- %nop13699 = alloca i1, i1 0
- %nop13700 = alloca i1, i1 0
- %nop13701 = alloca i1, i1 0
- %nop13702 = alloca i1, i1 0
- %nop13703 = alloca i1, i1 0
- %nop13704 = alloca i1, i1 0
- %nop13705 = alloca i1, i1 0
- %nop13706 = alloca i1, i1 0
- %nop13707 = alloca i1, i1 0
- %nop13708 = alloca i1, i1 0
- %nop13709 = alloca i1, i1 0
- %nop13710 = alloca i1, i1 0
- %nop13711 = alloca i1, i1 0
- %nop13712 = alloca i1, i1 0
- %nop13713 = alloca i1, i1 0
- %nop13714 = alloca i1, i1 0
- %nop13715 = alloca i1, i1 0
- %nop13716 = alloca i1, i1 0
- %nop13717 = alloca i1, i1 0
- %nop13718 = alloca i1, i1 0
- %nop13719 = alloca i1, i1 0
- %nop13720 = alloca i1, i1 0
- %nop13721 = alloca i1, i1 0
- %nop13722 = alloca i1, i1 0
- %nop13723 = alloca i1, i1 0
- %nop13724 = alloca i1, i1 0
- %nop13725 = alloca i1, i1 0
- %nop13726 = alloca i1, i1 0
- %nop13727 = alloca i1, i1 0
- %nop13728 = alloca i1, i1 0
- %nop13729 = alloca i1, i1 0
- %nop13730 = alloca i1, i1 0
- %nop13731 = alloca i1, i1 0
- %nop13732 = alloca i1, i1 0
- %nop13733 = alloca i1, i1 0
- %nop13734 = alloca i1, i1 0
- %nop13735 = alloca i1, i1 0
- %nop13736 = alloca i1, i1 0
- %nop13737 = alloca i1, i1 0
- %nop13738 = alloca i1, i1 0
- %nop13739 = alloca i1, i1 0
- %nop13740 = alloca i1, i1 0
- %nop13741 = alloca i1, i1 0
- %nop13742 = alloca i1, i1 0
- %nop13743 = alloca i1, i1 0
- %nop13744 = alloca i1, i1 0
- %nop13745 = alloca i1, i1 0
- %nop13746 = alloca i1, i1 0
- %nop13747 = alloca i1, i1 0
- %nop13748 = alloca i1, i1 0
- %nop13749 = alloca i1, i1 0
- %nop13750 = alloca i1, i1 0
- %nop13751 = alloca i1, i1 0
- %nop13752 = alloca i1, i1 0
- %nop13753 = alloca i1, i1 0
- %nop13754 = alloca i1, i1 0
- %nop13755 = alloca i1, i1 0
- %nop13756 = alloca i1, i1 0
- %nop13757 = alloca i1, i1 0
- %nop13758 = alloca i1, i1 0
- %nop13759 = alloca i1, i1 0
- %nop13760 = alloca i1, i1 0
- %nop13761 = alloca i1, i1 0
- %nop13762 = alloca i1, i1 0
- %nop13763 = alloca i1, i1 0
- %nop13764 = alloca i1, i1 0
- %nop13765 = alloca i1, i1 0
- %nop13766 = alloca i1, i1 0
- %nop13767 = alloca i1, i1 0
- %nop13768 = alloca i1, i1 0
- %nop13769 = alloca i1, i1 0
- %nop13770 = alloca i1, i1 0
- %nop13771 = alloca i1, i1 0
- %nop13772 = alloca i1, i1 0
- %nop13773 = alloca i1, i1 0
- %nop13774 = alloca i1, i1 0
- %nop13775 = alloca i1, i1 0
- %nop13776 = alloca i1, i1 0
- %nop13777 = alloca i1, i1 0
- %nop13778 = alloca i1, i1 0
- %nop13779 = alloca i1, i1 0
- %nop13780 = alloca i1, i1 0
- %nop13781 = alloca i1, i1 0
- %nop13782 = alloca i1, i1 0
- %nop13783 = alloca i1, i1 0
- %nop13784 = alloca i1, i1 0
- %nop13785 = alloca i1, i1 0
- %nop13786 = alloca i1, i1 0
- %nop13787 = alloca i1, i1 0
- %nop13788 = alloca i1, i1 0
- %nop13789 = alloca i1, i1 0
- %nop13790 = alloca i1, i1 0
- %nop13791 = alloca i1, i1 0
- %nop13792 = alloca i1, i1 0
- %nop13793 = alloca i1, i1 0
- %nop13794 = alloca i1, i1 0
- %nop13795 = alloca i1, i1 0
- %nop13796 = alloca i1, i1 0
- %nop13797 = alloca i1, i1 0
- %nop13798 = alloca i1, i1 0
- %nop13799 = alloca i1, i1 0
- %nop13800 = alloca i1, i1 0
- %nop13801 = alloca i1, i1 0
- %nop13802 = alloca i1, i1 0
- %nop13803 = alloca i1, i1 0
- %nop13804 = alloca i1, i1 0
- %nop13805 = alloca i1, i1 0
- %nop13806 = alloca i1, i1 0
- %nop13807 = alloca i1, i1 0
- %nop13808 = alloca i1, i1 0
- %nop13809 = alloca i1, i1 0
- %nop13810 = alloca i1, i1 0
- %nop13811 = alloca i1, i1 0
- %nop13812 = alloca i1, i1 0
- %nop13813 = alloca i1, i1 0
- %nop13814 = alloca i1, i1 0
- %nop13815 = alloca i1, i1 0
- %nop13816 = alloca i1, i1 0
- %nop13817 = alloca i1, i1 0
- %nop13818 = alloca i1, i1 0
- %nop13819 = alloca i1, i1 0
- %nop13820 = alloca i1, i1 0
- %nop13821 = alloca i1, i1 0
- %nop13822 = alloca i1, i1 0
- %nop13823 = alloca i1, i1 0
- %nop13824 = alloca i1, i1 0
- %nop13825 = alloca i1, i1 0
- %nop13826 = alloca i1, i1 0
- %nop13827 = alloca i1, i1 0
- %nop13828 = alloca i1, i1 0
- %nop13829 = alloca i1, i1 0
- %nop13830 = alloca i1, i1 0
- %nop13831 = alloca i1, i1 0
- %nop13832 = alloca i1, i1 0
- %nop13833 = alloca i1, i1 0
- %nop13834 = alloca i1, i1 0
- %nop13835 = alloca i1, i1 0
- %nop13836 = alloca i1, i1 0
- %nop13837 = alloca i1, i1 0
- %nop13838 = alloca i1, i1 0
- %nop13839 = alloca i1, i1 0
- %nop13840 = alloca i1, i1 0
- %nop13841 = alloca i1, i1 0
- %nop13842 = alloca i1, i1 0
- %nop13843 = alloca i1, i1 0
- %nop13844 = alloca i1, i1 0
- %nop13845 = alloca i1, i1 0
- %nop13846 = alloca i1, i1 0
- %nop13847 = alloca i1, i1 0
- %nop13848 = alloca i1, i1 0
- %nop13849 = alloca i1, i1 0
- %nop13850 = alloca i1, i1 0
- %nop13851 = alloca i1, i1 0
- %nop13852 = alloca i1, i1 0
- %nop13853 = alloca i1, i1 0
- %nop13854 = alloca i1, i1 0
- %nop13855 = alloca i1, i1 0
- %nop13856 = alloca i1, i1 0
- %nop13857 = alloca i1, i1 0
- %nop13858 = alloca i1, i1 0
- %nop13859 = alloca i1, i1 0
- %nop13860 = alloca i1, i1 0
- %nop13861 = alloca i1, i1 0
- %nop13862 = alloca i1, i1 0
- %nop13863 = alloca i1, i1 0
- %nop13864 = alloca i1, i1 0
- %nop13865 = alloca i1, i1 0
- %nop13866 = alloca i1, i1 0
- %nop13867 = alloca i1, i1 0
- %nop13868 = alloca i1, i1 0
- %nop13869 = alloca i1, i1 0
- %nop13870 = alloca i1, i1 0
- %nop13871 = alloca i1, i1 0
- %nop13872 = alloca i1, i1 0
- %nop13873 = alloca i1, i1 0
- %nop13874 = alloca i1, i1 0
- %nop13875 = alloca i1, i1 0
- %nop13876 = alloca i1, i1 0
- %nop13877 = alloca i1, i1 0
- %nop13878 = alloca i1, i1 0
- %nop13879 = alloca i1, i1 0
- %nop13880 = alloca i1, i1 0
- %nop13881 = alloca i1, i1 0
- %nop13882 = alloca i1, i1 0
- %nop13883 = alloca i1, i1 0
- %nop13884 = alloca i1, i1 0
- %nop13885 = alloca i1, i1 0
- %nop13886 = alloca i1, i1 0
- %nop13887 = alloca i1, i1 0
- %nop13888 = alloca i1, i1 0
- %nop13889 = alloca i1, i1 0
- %nop13890 = alloca i1, i1 0
- %nop13891 = alloca i1, i1 0
- %nop13892 = alloca i1, i1 0
- %nop13893 = alloca i1, i1 0
- %nop13894 = alloca i1, i1 0
- %nop13895 = alloca i1, i1 0
- %nop13896 = alloca i1, i1 0
- %nop13897 = alloca i1, i1 0
- %nop13898 = alloca i1, i1 0
- %nop13899 = alloca i1, i1 0
- %nop13900 = alloca i1, i1 0
- %nop13901 = alloca i1, i1 0
- %nop13902 = alloca i1, i1 0
- %nop13903 = alloca i1, i1 0
- %nop13904 = alloca i1, i1 0
- %nop13905 = alloca i1, i1 0
- %nop13906 = alloca i1, i1 0
- %nop13907 = alloca i1, i1 0
- %nop13908 = alloca i1, i1 0
- %nop13909 = alloca i1, i1 0
- %nop13910 = alloca i1, i1 0
- %nop13911 = alloca i1, i1 0
- %nop13912 = alloca i1, i1 0
- %nop13913 = alloca i1, i1 0
- %nop13914 = alloca i1, i1 0
- %nop13915 = alloca i1, i1 0
- %nop13916 = alloca i1, i1 0
- %nop13917 = alloca i1, i1 0
- %nop13918 = alloca i1, i1 0
- %nop13919 = alloca i1, i1 0
- %nop13920 = alloca i1, i1 0
- %nop13921 = alloca i1, i1 0
- %nop13922 = alloca i1, i1 0
- %nop13923 = alloca i1, i1 0
- %nop13924 = alloca i1, i1 0
- %nop13925 = alloca i1, i1 0
- %nop13926 = alloca i1, i1 0
- %nop13927 = alloca i1, i1 0
- %nop13928 = alloca i1, i1 0
- %nop13929 = alloca i1, i1 0
- %nop13930 = alloca i1, i1 0
- %nop13931 = alloca i1, i1 0
- %nop13932 = alloca i1, i1 0
- %nop13933 = alloca i1, i1 0
- %nop13934 = alloca i1, i1 0
- %nop13935 = alloca i1, i1 0
- %nop13936 = alloca i1, i1 0
- %nop13937 = alloca i1, i1 0
- %nop13938 = alloca i1, i1 0
- %nop13939 = alloca i1, i1 0
- %nop13940 = alloca i1, i1 0
- %nop13941 = alloca i1, i1 0
- %nop13942 = alloca i1, i1 0
- %nop13943 = alloca i1, i1 0
- %nop13944 = alloca i1, i1 0
- %nop13945 = alloca i1, i1 0
- %nop13946 = alloca i1, i1 0
- %nop13947 = alloca i1, i1 0
- %nop13948 = alloca i1, i1 0
- %nop13949 = alloca i1, i1 0
- %nop13950 = alloca i1, i1 0
- %nop13951 = alloca i1, i1 0
- %nop13952 = alloca i1, i1 0
- %nop13953 = alloca i1, i1 0
- %nop13954 = alloca i1, i1 0
- %nop13955 = alloca i1, i1 0
- %nop13956 = alloca i1, i1 0
- %nop13957 = alloca i1, i1 0
- %nop13958 = alloca i1, i1 0
- %nop13959 = alloca i1, i1 0
- %nop13960 = alloca i1, i1 0
- %nop13961 = alloca i1, i1 0
- %nop13962 = alloca i1, i1 0
- %nop13963 = alloca i1, i1 0
- %nop13964 = alloca i1, i1 0
- %nop13965 = alloca i1, i1 0
- %nop13966 = alloca i1, i1 0
- %nop13967 = alloca i1, i1 0
- %nop13968 = alloca i1, i1 0
- %nop13969 = alloca i1, i1 0
- %nop13970 = alloca i1, i1 0
- %nop13971 = alloca i1, i1 0
- %nop13972 = alloca i1, i1 0
- %nop13973 = alloca i1, i1 0
- %nop13974 = alloca i1, i1 0
- %nop13975 = alloca i1, i1 0
- %nop13976 = alloca i1, i1 0
- %nop13977 = alloca i1, i1 0
- %nop13978 = alloca i1, i1 0
- %nop13979 = alloca i1, i1 0
- %nop13980 = alloca i1, i1 0
- %nop13981 = alloca i1, i1 0
- %nop13982 = alloca i1, i1 0
- %nop13983 = alloca i1, i1 0
- %nop13984 = alloca i1, i1 0
- %nop13985 = alloca i1, i1 0
- %nop13986 = alloca i1, i1 0
- %nop13987 = alloca i1, i1 0
- %nop13988 = alloca i1, i1 0
- %nop13989 = alloca i1, i1 0
- %nop13990 = alloca i1, i1 0
- %nop13991 = alloca i1, i1 0
- %nop13992 = alloca i1, i1 0
- %nop13993 = alloca i1, i1 0
- %nop13994 = alloca i1, i1 0
- %nop13995 = alloca i1, i1 0
- %nop13996 = alloca i1, i1 0
- %nop13997 = alloca i1, i1 0
- %nop13998 = alloca i1, i1 0
- %nop13999 = alloca i1, i1 0
- %nop14000 = alloca i1, i1 0
- %nop14001 = alloca i1, i1 0
- %nop14002 = alloca i1, i1 0
- %nop14003 = alloca i1, i1 0
- %nop14004 = alloca i1, i1 0
- %nop14005 = alloca i1, i1 0
- %nop14006 = alloca i1, i1 0
- %nop14007 = alloca i1, i1 0
- %nop14008 = alloca i1, i1 0
- %nop14009 = alloca i1, i1 0
- %nop14010 = alloca i1, i1 0
- %nop14011 = alloca i1, i1 0
- %nop14012 = alloca i1, i1 0
- %nop14013 = alloca i1, i1 0
- %nop14014 = alloca i1, i1 0
- %nop14015 = alloca i1, i1 0
- %nop14016 = alloca i1, i1 0
- %nop14017 = alloca i1, i1 0
- %nop14018 = alloca i1, i1 0
- %nop14019 = alloca i1, i1 0
- %nop14020 = alloca i1, i1 0
- %nop14021 = alloca i1, i1 0
- %nop14022 = alloca i1, i1 0
- %nop14023 = alloca i1, i1 0
- %nop14024 = alloca i1, i1 0
- %nop14025 = alloca i1, i1 0
- %nop14026 = alloca i1, i1 0
- %nop14027 = alloca i1, i1 0
- %nop14028 = alloca i1, i1 0
- %nop14029 = alloca i1, i1 0
- %nop14030 = alloca i1, i1 0
- %nop14031 = alloca i1, i1 0
- %nop14032 = alloca i1, i1 0
- %nop14033 = alloca i1, i1 0
- %nop14034 = alloca i1, i1 0
- %nop14035 = alloca i1, i1 0
- %nop14036 = alloca i1, i1 0
- %nop14037 = alloca i1, i1 0
- %nop14038 = alloca i1, i1 0
- %nop14039 = alloca i1, i1 0
- %nop14040 = alloca i1, i1 0
- %nop14041 = alloca i1, i1 0
- %nop14042 = alloca i1, i1 0
- %nop14043 = alloca i1, i1 0
- %nop14044 = alloca i1, i1 0
- %nop14045 = alloca i1, i1 0
- %nop14046 = alloca i1, i1 0
- %nop14047 = alloca i1, i1 0
- %nop14048 = alloca i1, i1 0
- %nop14049 = alloca i1, i1 0
- %nop14050 = alloca i1, i1 0
- %nop14051 = alloca i1, i1 0
- %nop14052 = alloca i1, i1 0
- %nop14053 = alloca i1, i1 0
- %nop14054 = alloca i1, i1 0
- %nop14055 = alloca i1, i1 0
- %nop14056 = alloca i1, i1 0
- %nop14057 = alloca i1, i1 0
- %nop14058 = alloca i1, i1 0
- %nop14059 = alloca i1, i1 0
- %nop14060 = alloca i1, i1 0
- %nop14061 = alloca i1, i1 0
- %nop14062 = alloca i1, i1 0
- %nop14063 = alloca i1, i1 0
- %nop14064 = alloca i1, i1 0
- %nop14065 = alloca i1, i1 0
- %nop14066 = alloca i1, i1 0
- %nop14067 = alloca i1, i1 0
- %nop14068 = alloca i1, i1 0
- %nop14069 = alloca i1, i1 0
- %nop14070 = alloca i1, i1 0
- %nop14071 = alloca i1, i1 0
- %nop14072 = alloca i1, i1 0
- %nop14073 = alloca i1, i1 0
- %nop14074 = alloca i1, i1 0
- %nop14075 = alloca i1, i1 0
- %nop14076 = alloca i1, i1 0
- %nop14077 = alloca i1, i1 0
- %nop14078 = alloca i1, i1 0
- %nop14079 = alloca i1, i1 0
- %nop14080 = alloca i1, i1 0
- %nop14081 = alloca i1, i1 0
- %nop14082 = alloca i1, i1 0
- %nop14083 = alloca i1, i1 0
- %nop14084 = alloca i1, i1 0
- %nop14085 = alloca i1, i1 0
- %nop14086 = alloca i1, i1 0
- %nop14087 = alloca i1, i1 0
- %nop14088 = alloca i1, i1 0
- %nop14089 = alloca i1, i1 0
- %nop14090 = alloca i1, i1 0
- %nop14091 = alloca i1, i1 0
- %nop14092 = alloca i1, i1 0
- %nop14093 = alloca i1, i1 0
- %nop14094 = alloca i1, i1 0
- %nop14095 = alloca i1, i1 0
- %nop14096 = alloca i1, i1 0
- %nop14097 = alloca i1, i1 0
- %nop14098 = alloca i1, i1 0
- %nop14099 = alloca i1, i1 0
- %nop14100 = alloca i1, i1 0
- %nop14101 = alloca i1, i1 0
- %nop14102 = alloca i1, i1 0
- %nop14103 = alloca i1, i1 0
- %nop14104 = alloca i1, i1 0
- %nop14105 = alloca i1, i1 0
- %nop14106 = alloca i1, i1 0
- %nop14107 = alloca i1, i1 0
- %nop14108 = alloca i1, i1 0
- %nop14109 = alloca i1, i1 0
- %nop14110 = alloca i1, i1 0
- %nop14111 = alloca i1, i1 0
- %nop14112 = alloca i1, i1 0
- %nop14113 = alloca i1, i1 0
- %nop14114 = alloca i1, i1 0
- %nop14115 = alloca i1, i1 0
- %nop14116 = alloca i1, i1 0
- %nop14117 = alloca i1, i1 0
- %nop14118 = alloca i1, i1 0
- %nop14119 = alloca i1, i1 0
- %nop14120 = alloca i1, i1 0
- %nop14121 = alloca i1, i1 0
- %nop14122 = alloca i1, i1 0
- %nop14123 = alloca i1, i1 0
- %nop14124 = alloca i1, i1 0
- %nop14125 = alloca i1, i1 0
- %nop14126 = alloca i1, i1 0
- %nop14127 = alloca i1, i1 0
- %nop14128 = alloca i1, i1 0
- %nop14129 = alloca i1, i1 0
- %nop14130 = alloca i1, i1 0
- %nop14131 = alloca i1, i1 0
- %nop14132 = alloca i1, i1 0
- %nop14133 = alloca i1, i1 0
- %nop14134 = alloca i1, i1 0
- %nop14135 = alloca i1, i1 0
- %nop14136 = alloca i1, i1 0
- %nop14137 = alloca i1, i1 0
- %nop14138 = alloca i1, i1 0
- %nop14139 = alloca i1, i1 0
- %nop14140 = alloca i1, i1 0
- %nop14141 = alloca i1, i1 0
- %nop14142 = alloca i1, i1 0
- %nop14143 = alloca i1, i1 0
- %nop14144 = alloca i1, i1 0
- %nop14145 = alloca i1, i1 0
- %nop14146 = alloca i1, i1 0
- %nop14147 = alloca i1, i1 0
- %nop14148 = alloca i1, i1 0
- %nop14149 = alloca i1, i1 0
- %nop14150 = alloca i1, i1 0
- %nop14151 = alloca i1, i1 0
- %nop14152 = alloca i1, i1 0
- %nop14153 = alloca i1, i1 0
- %nop14154 = alloca i1, i1 0
- %nop14155 = alloca i1, i1 0
- %nop14156 = alloca i1, i1 0
- %nop14157 = alloca i1, i1 0
- %nop14158 = alloca i1, i1 0
- %nop14159 = alloca i1, i1 0
- %nop14160 = alloca i1, i1 0
- %nop14161 = alloca i1, i1 0
- %nop14162 = alloca i1, i1 0
- %nop14163 = alloca i1, i1 0
- %nop14164 = alloca i1, i1 0
- %nop14165 = alloca i1, i1 0
- %nop14166 = alloca i1, i1 0
- %nop14167 = alloca i1, i1 0
- %nop14168 = alloca i1, i1 0
- %nop14169 = alloca i1, i1 0
- %nop14170 = alloca i1, i1 0
- %nop14171 = alloca i1, i1 0
- %nop14172 = alloca i1, i1 0
- %nop14173 = alloca i1, i1 0
- %nop14174 = alloca i1, i1 0
- %nop14175 = alloca i1, i1 0
- %nop14176 = alloca i1, i1 0
- %nop14177 = alloca i1, i1 0
- %nop14178 = alloca i1, i1 0
- %nop14179 = alloca i1, i1 0
- %nop14180 = alloca i1, i1 0
- %nop14181 = alloca i1, i1 0
- %nop14182 = alloca i1, i1 0
- %nop14183 = alloca i1, i1 0
- %nop14184 = alloca i1, i1 0
- %nop14185 = alloca i1, i1 0
- %nop14186 = alloca i1, i1 0
- %nop14187 = alloca i1, i1 0
- %nop14188 = alloca i1, i1 0
- %nop14189 = alloca i1, i1 0
- %nop14190 = alloca i1, i1 0
- %nop14191 = alloca i1, i1 0
- %nop14192 = alloca i1, i1 0
- %nop14193 = alloca i1, i1 0
- %nop14194 = alloca i1, i1 0
- %nop14195 = alloca i1, i1 0
- %nop14196 = alloca i1, i1 0
- %nop14197 = alloca i1, i1 0
- %nop14198 = alloca i1, i1 0
- %nop14199 = alloca i1, i1 0
- %nop14200 = alloca i1, i1 0
- %nop14201 = alloca i1, i1 0
- %nop14202 = alloca i1, i1 0
- %nop14203 = alloca i1, i1 0
- %nop14204 = alloca i1, i1 0
- %nop14205 = alloca i1, i1 0
- %nop14206 = alloca i1, i1 0
- %nop14207 = alloca i1, i1 0
- %nop14208 = alloca i1, i1 0
- %nop14209 = alloca i1, i1 0
- %nop14210 = alloca i1, i1 0
- %nop14211 = alloca i1, i1 0
- %nop14212 = alloca i1, i1 0
- %nop14213 = alloca i1, i1 0
- %nop14214 = alloca i1, i1 0
- %nop14215 = alloca i1, i1 0
- %nop14216 = alloca i1, i1 0
- %nop14217 = alloca i1, i1 0
- %nop14218 = alloca i1, i1 0
- %nop14219 = alloca i1, i1 0
- %nop14220 = alloca i1, i1 0
- %nop14221 = alloca i1, i1 0
- %nop14222 = alloca i1, i1 0
- %nop14223 = alloca i1, i1 0
- %nop14224 = alloca i1, i1 0
- %nop14225 = alloca i1, i1 0
- %nop14226 = alloca i1, i1 0
- %nop14227 = alloca i1, i1 0
- %nop14228 = alloca i1, i1 0
- %nop14229 = alloca i1, i1 0
- %nop14230 = alloca i1, i1 0
- %nop14231 = alloca i1, i1 0
- %nop14232 = alloca i1, i1 0
- %nop14233 = alloca i1, i1 0
- %nop14234 = alloca i1, i1 0
- %nop14235 = alloca i1, i1 0
- %nop14236 = alloca i1, i1 0
- %nop14237 = alloca i1, i1 0
- %nop14238 = alloca i1, i1 0
- %nop14239 = alloca i1, i1 0
- %nop14240 = alloca i1, i1 0
- %nop14241 = alloca i1, i1 0
- %nop14242 = alloca i1, i1 0
- %nop14243 = alloca i1, i1 0
- %nop14244 = alloca i1, i1 0
- %nop14245 = alloca i1, i1 0
- %nop14246 = alloca i1, i1 0
- %nop14247 = alloca i1, i1 0
- %nop14248 = alloca i1, i1 0
- %nop14249 = alloca i1, i1 0
- %nop14250 = alloca i1, i1 0
- %nop14251 = alloca i1, i1 0
- %nop14252 = alloca i1, i1 0
- %nop14253 = alloca i1, i1 0
- %nop14254 = alloca i1, i1 0
- %nop14255 = alloca i1, i1 0
- %nop14256 = alloca i1, i1 0
- %nop14257 = alloca i1, i1 0
- %nop14258 = alloca i1, i1 0
- %nop14259 = alloca i1, i1 0
- %nop14260 = alloca i1, i1 0
- %nop14261 = alloca i1, i1 0
- %nop14262 = alloca i1, i1 0
- %nop14263 = alloca i1, i1 0
- %nop14264 = alloca i1, i1 0
- %nop14265 = alloca i1, i1 0
- %nop14266 = alloca i1, i1 0
- %nop14267 = alloca i1, i1 0
- %nop14268 = alloca i1, i1 0
- %nop14269 = alloca i1, i1 0
- %nop14270 = alloca i1, i1 0
- %nop14271 = alloca i1, i1 0
- %nop14272 = alloca i1, i1 0
- %nop14273 = alloca i1, i1 0
- %nop14274 = alloca i1, i1 0
- %nop14275 = alloca i1, i1 0
- %nop14276 = alloca i1, i1 0
- %nop14277 = alloca i1, i1 0
- %nop14278 = alloca i1, i1 0
- %nop14279 = alloca i1, i1 0
- %nop14280 = alloca i1, i1 0
- %nop14281 = alloca i1, i1 0
- %nop14282 = alloca i1, i1 0
- %nop14283 = alloca i1, i1 0
- %nop14284 = alloca i1, i1 0
- %nop14285 = alloca i1, i1 0
- %nop14286 = alloca i1, i1 0
- %nop14287 = alloca i1, i1 0
- %nop14288 = alloca i1, i1 0
- %nop14289 = alloca i1, i1 0
- %nop14290 = alloca i1, i1 0
- %nop14291 = alloca i1, i1 0
- %nop14292 = alloca i1, i1 0
- %nop14293 = alloca i1, i1 0
- %nop14294 = alloca i1, i1 0
- %nop14295 = alloca i1, i1 0
- %nop14296 = alloca i1, i1 0
- %nop14297 = alloca i1, i1 0
- %nop14298 = alloca i1, i1 0
- %nop14299 = alloca i1, i1 0
- %nop14300 = alloca i1, i1 0
- %nop14301 = alloca i1, i1 0
- %nop14302 = alloca i1, i1 0
- %nop14303 = alloca i1, i1 0
- %nop14304 = alloca i1, i1 0
- %nop14305 = alloca i1, i1 0
- %nop14306 = alloca i1, i1 0
- %nop14307 = alloca i1, i1 0
- %nop14308 = alloca i1, i1 0
- %nop14309 = alloca i1, i1 0
- %nop14310 = alloca i1, i1 0
- %nop14311 = alloca i1, i1 0
- %nop14312 = alloca i1, i1 0
- %nop14313 = alloca i1, i1 0
- %nop14314 = alloca i1, i1 0
- %nop14315 = alloca i1, i1 0
- %nop14316 = alloca i1, i1 0
- %nop14317 = alloca i1, i1 0
- %nop14318 = alloca i1, i1 0
- %nop14319 = alloca i1, i1 0
- %nop14320 = alloca i1, i1 0
- %nop14321 = alloca i1, i1 0
- %nop14322 = alloca i1, i1 0
- %nop14323 = alloca i1, i1 0
- %nop14324 = alloca i1, i1 0
- %nop14325 = alloca i1, i1 0
- %nop14326 = alloca i1, i1 0
- %nop14327 = alloca i1, i1 0
- %nop14328 = alloca i1, i1 0
- %nop14329 = alloca i1, i1 0
- %nop14330 = alloca i1, i1 0
- %nop14331 = alloca i1, i1 0
- %nop14332 = alloca i1, i1 0
- %nop14333 = alloca i1, i1 0
- %nop14334 = alloca i1, i1 0
- %nop14335 = alloca i1, i1 0
- %nop14336 = alloca i1, i1 0
- %nop14337 = alloca i1, i1 0
- %nop14338 = alloca i1, i1 0
- %nop14339 = alloca i1, i1 0
- %nop14340 = alloca i1, i1 0
- %nop14341 = alloca i1, i1 0
- %nop14342 = alloca i1, i1 0
- %nop14343 = alloca i1, i1 0
- %nop14344 = alloca i1, i1 0
- %nop14345 = alloca i1, i1 0
- %nop14346 = alloca i1, i1 0
- %nop14347 = alloca i1, i1 0
- %nop14348 = alloca i1, i1 0
- %nop14349 = alloca i1, i1 0
- %nop14350 = alloca i1, i1 0
- %nop14351 = alloca i1, i1 0
- %nop14352 = alloca i1, i1 0
- %nop14353 = alloca i1, i1 0
- %nop14354 = alloca i1, i1 0
- %nop14355 = alloca i1, i1 0
- %nop14356 = alloca i1, i1 0
- %nop14357 = alloca i1, i1 0
- %nop14358 = alloca i1, i1 0
- %nop14359 = alloca i1, i1 0
- %nop14360 = alloca i1, i1 0
- %nop14361 = alloca i1, i1 0
- %nop14362 = alloca i1, i1 0
- %nop14363 = alloca i1, i1 0
- %nop14364 = alloca i1, i1 0
- %nop14365 = alloca i1, i1 0
- %nop14366 = alloca i1, i1 0
- %nop14367 = alloca i1, i1 0
- %nop14368 = alloca i1, i1 0
- %nop14369 = alloca i1, i1 0
- %nop14370 = alloca i1, i1 0
- %nop14371 = alloca i1, i1 0
- %nop14372 = alloca i1, i1 0
- %nop14373 = alloca i1, i1 0
- %nop14374 = alloca i1, i1 0
- %nop14375 = alloca i1, i1 0
- %nop14376 = alloca i1, i1 0
- %nop14377 = alloca i1, i1 0
- %nop14378 = alloca i1, i1 0
- %nop14379 = alloca i1, i1 0
- %nop14380 = alloca i1, i1 0
- %nop14381 = alloca i1, i1 0
- %nop14382 = alloca i1, i1 0
- %nop14383 = alloca i1, i1 0
- %nop14384 = alloca i1, i1 0
- %nop14385 = alloca i1, i1 0
- %nop14386 = alloca i1, i1 0
- %nop14387 = alloca i1, i1 0
- %nop14388 = alloca i1, i1 0
- %nop14389 = alloca i1, i1 0
- %nop14390 = alloca i1, i1 0
- %nop14391 = alloca i1, i1 0
- %nop14392 = alloca i1, i1 0
- %nop14393 = alloca i1, i1 0
- %nop14394 = alloca i1, i1 0
- %nop14395 = alloca i1, i1 0
- %nop14396 = alloca i1, i1 0
- %nop14397 = alloca i1, i1 0
- %nop14398 = alloca i1, i1 0
- %nop14399 = alloca i1, i1 0
- %nop14400 = alloca i1, i1 0
- %nop14401 = alloca i1, i1 0
- %nop14402 = alloca i1, i1 0
- %nop14403 = alloca i1, i1 0
- %nop14404 = alloca i1, i1 0
- %nop14405 = alloca i1, i1 0
- %nop14406 = alloca i1, i1 0
- %nop14407 = alloca i1, i1 0
- %nop14408 = alloca i1, i1 0
- %nop14409 = alloca i1, i1 0
- %nop14410 = alloca i1, i1 0
- %nop14411 = alloca i1, i1 0
- %nop14412 = alloca i1, i1 0
- %nop14413 = alloca i1, i1 0
- %nop14414 = alloca i1, i1 0
- %nop14415 = alloca i1, i1 0
- %nop14416 = alloca i1, i1 0
- %nop14417 = alloca i1, i1 0
- %nop14418 = alloca i1, i1 0
- %nop14419 = alloca i1, i1 0
- %nop14420 = alloca i1, i1 0
- %nop14421 = alloca i1, i1 0
- %nop14422 = alloca i1, i1 0
- %nop14423 = alloca i1, i1 0
- %nop14424 = alloca i1, i1 0
- %nop14425 = alloca i1, i1 0
- %nop14426 = alloca i1, i1 0
- %nop14427 = alloca i1, i1 0
- %nop14428 = alloca i1, i1 0
- %nop14429 = alloca i1, i1 0
- %nop14430 = alloca i1, i1 0
- %nop14431 = alloca i1, i1 0
- %nop14432 = alloca i1, i1 0
- %nop14433 = alloca i1, i1 0
- %nop14434 = alloca i1, i1 0
- %nop14435 = alloca i1, i1 0
- %nop14436 = alloca i1, i1 0
- %nop14437 = alloca i1, i1 0
- %nop14438 = alloca i1, i1 0
- %nop14439 = alloca i1, i1 0
- %nop14440 = alloca i1, i1 0
- %nop14441 = alloca i1, i1 0
- %nop14442 = alloca i1, i1 0
- %nop14443 = alloca i1, i1 0
- %nop14444 = alloca i1, i1 0
- %nop14445 = alloca i1, i1 0
- %nop14446 = alloca i1, i1 0
- %nop14447 = alloca i1, i1 0
- %nop14448 = alloca i1, i1 0
- %nop14449 = alloca i1, i1 0
- %nop14450 = alloca i1, i1 0
- %nop14451 = alloca i1, i1 0
- %nop14452 = alloca i1, i1 0
- %nop14453 = alloca i1, i1 0
- %nop14454 = alloca i1, i1 0
- %nop14455 = alloca i1, i1 0
- %nop14456 = alloca i1, i1 0
- %nop14457 = alloca i1, i1 0
- %nop14458 = alloca i1, i1 0
- %nop14459 = alloca i1, i1 0
- %nop14460 = alloca i1, i1 0
- %nop14461 = alloca i1, i1 0
- %nop14462 = alloca i1, i1 0
- %nop14463 = alloca i1, i1 0
- %nop14464 = alloca i1, i1 0
- %nop14465 = alloca i1, i1 0
- %nop14466 = alloca i1, i1 0
- %nop14467 = alloca i1, i1 0
- %nop14468 = alloca i1, i1 0
- %nop14469 = alloca i1, i1 0
- %nop14470 = alloca i1, i1 0
- %nop14471 = alloca i1, i1 0
- %nop14472 = alloca i1, i1 0
- %nop14473 = alloca i1, i1 0
- %nop14474 = alloca i1, i1 0
- %nop14475 = alloca i1, i1 0
- %nop14476 = alloca i1, i1 0
- %nop14477 = alloca i1, i1 0
- %nop14478 = alloca i1, i1 0
- %nop14479 = alloca i1, i1 0
- %nop14480 = alloca i1, i1 0
- %nop14481 = alloca i1, i1 0
- %nop14482 = alloca i1, i1 0
- %nop14483 = alloca i1, i1 0
- %nop14484 = alloca i1, i1 0
- %nop14485 = alloca i1, i1 0
- %nop14486 = alloca i1, i1 0
- %nop14487 = alloca i1, i1 0
- %nop14488 = alloca i1, i1 0
- %nop14489 = alloca i1, i1 0
- %nop14490 = alloca i1, i1 0
- %nop14491 = alloca i1, i1 0
- %nop14492 = alloca i1, i1 0
- %nop14493 = alloca i1, i1 0
- %nop14494 = alloca i1, i1 0
- %nop14495 = alloca i1, i1 0
- %nop14496 = alloca i1, i1 0
- %nop14497 = alloca i1, i1 0
- %nop14498 = alloca i1, i1 0
- %nop14499 = alloca i1, i1 0
- %nop14500 = alloca i1, i1 0
- %nop14501 = alloca i1, i1 0
- %nop14502 = alloca i1, i1 0
- %nop14503 = alloca i1, i1 0
- %nop14504 = alloca i1, i1 0
- %nop14505 = alloca i1, i1 0
- %nop14506 = alloca i1, i1 0
- %nop14507 = alloca i1, i1 0
- %nop14508 = alloca i1, i1 0
- %nop14509 = alloca i1, i1 0
- %nop14510 = alloca i1, i1 0
- %nop14511 = alloca i1, i1 0
- %nop14512 = alloca i1, i1 0
- %nop14513 = alloca i1, i1 0
- %nop14514 = alloca i1, i1 0
- %nop14515 = alloca i1, i1 0
- %nop14516 = alloca i1, i1 0
- %nop14517 = alloca i1, i1 0
- %nop14518 = alloca i1, i1 0
- %nop14519 = alloca i1, i1 0
- %nop14520 = alloca i1, i1 0
- %nop14521 = alloca i1, i1 0
- %nop14522 = alloca i1, i1 0
- %nop14523 = alloca i1, i1 0
- %nop14524 = alloca i1, i1 0
- %nop14525 = alloca i1, i1 0
- %nop14526 = alloca i1, i1 0
- %nop14527 = alloca i1, i1 0
- %nop14528 = alloca i1, i1 0
- %nop14529 = alloca i1, i1 0
- %nop14530 = alloca i1, i1 0
- %nop14531 = alloca i1, i1 0
- %nop14532 = alloca i1, i1 0
- %nop14533 = alloca i1, i1 0
- %nop14534 = alloca i1, i1 0
- %nop14535 = alloca i1, i1 0
- %nop14536 = alloca i1, i1 0
- %nop14537 = alloca i1, i1 0
- %nop14538 = alloca i1, i1 0
- %nop14539 = alloca i1, i1 0
- %nop14540 = alloca i1, i1 0
- %nop14541 = alloca i1, i1 0
- %nop14542 = alloca i1, i1 0
- %nop14543 = alloca i1, i1 0
- %nop14544 = alloca i1, i1 0
- %nop14545 = alloca i1, i1 0
- %nop14546 = alloca i1, i1 0
- %nop14547 = alloca i1, i1 0
- %nop14548 = alloca i1, i1 0
- %nop14549 = alloca i1, i1 0
- %nop14550 = alloca i1, i1 0
- %nop14551 = alloca i1, i1 0
- %nop14552 = alloca i1, i1 0
- %nop14553 = alloca i1, i1 0
- %nop14554 = alloca i1, i1 0
- %nop14555 = alloca i1, i1 0
- %nop14556 = alloca i1, i1 0
- %nop14557 = alloca i1, i1 0
- %nop14558 = alloca i1, i1 0
- %nop14559 = alloca i1, i1 0
- %nop14560 = alloca i1, i1 0
- %nop14561 = alloca i1, i1 0
- %nop14562 = alloca i1, i1 0
- %nop14563 = alloca i1, i1 0
- %nop14564 = alloca i1, i1 0
- %nop14565 = alloca i1, i1 0
- %nop14566 = alloca i1, i1 0
- %nop14567 = alloca i1, i1 0
- %nop14568 = alloca i1, i1 0
- %nop14569 = alloca i1, i1 0
- %nop14570 = alloca i1, i1 0
- %nop14571 = alloca i1, i1 0
- %nop14572 = alloca i1, i1 0
- %nop14573 = alloca i1, i1 0
- %nop14574 = alloca i1, i1 0
- %nop14575 = alloca i1, i1 0
- %nop14576 = alloca i1, i1 0
- %nop14577 = alloca i1, i1 0
- %nop14578 = alloca i1, i1 0
- %nop14579 = alloca i1, i1 0
- %nop14580 = alloca i1, i1 0
- %nop14581 = alloca i1, i1 0
- %nop14582 = alloca i1, i1 0
- %nop14583 = alloca i1, i1 0
- %nop14584 = alloca i1, i1 0
- %nop14585 = alloca i1, i1 0
- %nop14586 = alloca i1, i1 0
- %nop14587 = alloca i1, i1 0
- %nop14588 = alloca i1, i1 0
- %nop14589 = alloca i1, i1 0
- %nop14590 = alloca i1, i1 0
- %nop14591 = alloca i1, i1 0
- %nop14592 = alloca i1, i1 0
- %nop14593 = alloca i1, i1 0
- %nop14594 = alloca i1, i1 0
- %nop14595 = alloca i1, i1 0
- %nop14596 = alloca i1, i1 0
- %nop14597 = alloca i1, i1 0
- %nop14598 = alloca i1, i1 0
- %nop14599 = alloca i1, i1 0
- %nop14600 = alloca i1, i1 0
- %nop14601 = alloca i1, i1 0
- %nop14602 = alloca i1, i1 0
- %nop14603 = alloca i1, i1 0
- %nop14604 = alloca i1, i1 0
- %nop14605 = alloca i1, i1 0
- %nop14606 = alloca i1, i1 0
- %nop14607 = alloca i1, i1 0
- %nop14608 = alloca i1, i1 0
- %nop14609 = alloca i1, i1 0
- %nop14610 = alloca i1, i1 0
- %nop14611 = alloca i1, i1 0
- %nop14612 = alloca i1, i1 0
- %nop14613 = alloca i1, i1 0
- %nop14614 = alloca i1, i1 0
- %nop14615 = alloca i1, i1 0
- %nop14616 = alloca i1, i1 0
- %nop14617 = alloca i1, i1 0
- %nop14618 = alloca i1, i1 0
- %nop14619 = alloca i1, i1 0
- %nop14620 = alloca i1, i1 0
- %nop14621 = alloca i1, i1 0
- %nop14622 = alloca i1, i1 0
- %nop14623 = alloca i1, i1 0
- %nop14624 = alloca i1, i1 0
- %nop14625 = alloca i1, i1 0
- %nop14626 = alloca i1, i1 0
- %nop14627 = alloca i1, i1 0
- %nop14628 = alloca i1, i1 0
- %nop14629 = alloca i1, i1 0
- %nop14630 = alloca i1, i1 0
- %nop14631 = alloca i1, i1 0
- %nop14632 = alloca i1, i1 0
- %nop14633 = alloca i1, i1 0
- %nop14634 = alloca i1, i1 0
- %nop14635 = alloca i1, i1 0
- %nop14636 = alloca i1, i1 0
- %nop14637 = alloca i1, i1 0
- %nop14638 = alloca i1, i1 0
- %nop14639 = alloca i1, i1 0
- %nop14640 = alloca i1, i1 0
- %nop14641 = alloca i1, i1 0
- %nop14642 = alloca i1, i1 0
- %nop14643 = alloca i1, i1 0
- %nop14644 = alloca i1, i1 0
- %nop14645 = alloca i1, i1 0
- %nop14646 = alloca i1, i1 0
- %nop14647 = alloca i1, i1 0
- %nop14648 = alloca i1, i1 0
- %nop14649 = alloca i1, i1 0
- %nop14650 = alloca i1, i1 0
- %nop14651 = alloca i1, i1 0
- %nop14652 = alloca i1, i1 0
- %nop14653 = alloca i1, i1 0
- %nop14654 = alloca i1, i1 0
- %nop14655 = alloca i1, i1 0
- %nop14656 = alloca i1, i1 0
- %nop14657 = alloca i1, i1 0
- %nop14658 = alloca i1, i1 0
- %nop14659 = alloca i1, i1 0
- %nop14660 = alloca i1, i1 0
- %nop14661 = alloca i1, i1 0
- %nop14662 = alloca i1, i1 0
- %nop14663 = alloca i1, i1 0
- %nop14664 = alloca i1, i1 0
- %nop14665 = alloca i1, i1 0
- %nop14666 = alloca i1, i1 0
- %nop14667 = alloca i1, i1 0
- %nop14668 = alloca i1, i1 0
- %nop14669 = alloca i1, i1 0
- %nop14670 = alloca i1, i1 0
- %nop14671 = alloca i1, i1 0
- %nop14672 = alloca i1, i1 0
- %nop14673 = alloca i1, i1 0
- %nop14674 = alloca i1, i1 0
- %nop14675 = alloca i1, i1 0
- %nop14676 = alloca i1, i1 0
- %nop14677 = alloca i1, i1 0
- %nop14678 = alloca i1, i1 0
- %nop14679 = alloca i1, i1 0
- %nop14680 = alloca i1, i1 0
- %nop14681 = alloca i1, i1 0
- %nop14682 = alloca i1, i1 0
- %nop14683 = alloca i1, i1 0
- %nop14684 = alloca i1, i1 0
- %nop14685 = alloca i1, i1 0
- %nop14686 = alloca i1, i1 0
- %nop14687 = alloca i1, i1 0
- %nop14688 = alloca i1, i1 0
- %nop14689 = alloca i1, i1 0
- %nop14690 = alloca i1, i1 0
- %nop14691 = alloca i1, i1 0
- %nop14692 = alloca i1, i1 0
- %nop14693 = alloca i1, i1 0
- %nop14694 = alloca i1, i1 0
- %nop14695 = alloca i1, i1 0
- %nop14696 = alloca i1, i1 0
- %nop14697 = alloca i1, i1 0
- %nop14698 = alloca i1, i1 0
- %nop14699 = alloca i1, i1 0
- %nop14700 = alloca i1, i1 0
- %nop14701 = alloca i1, i1 0
- %nop14702 = alloca i1, i1 0
- %nop14703 = alloca i1, i1 0
- %nop14704 = alloca i1, i1 0
- %nop14705 = alloca i1, i1 0
- %nop14706 = alloca i1, i1 0
- %nop14707 = alloca i1, i1 0
- %nop14708 = alloca i1, i1 0
- %nop14709 = alloca i1, i1 0
- %nop14710 = alloca i1, i1 0
- %nop14711 = alloca i1, i1 0
- %nop14712 = alloca i1, i1 0
- %nop14713 = alloca i1, i1 0
- %nop14714 = alloca i1, i1 0
- %nop14715 = alloca i1, i1 0
- %nop14716 = alloca i1, i1 0
- %nop14717 = alloca i1, i1 0
- %nop14718 = alloca i1, i1 0
- %nop14719 = alloca i1, i1 0
- %nop14720 = alloca i1, i1 0
- %nop14721 = alloca i1, i1 0
- %nop14722 = alloca i1, i1 0
- %nop14723 = alloca i1, i1 0
- %nop14724 = alloca i1, i1 0
- %nop14725 = alloca i1, i1 0
- %nop14726 = alloca i1, i1 0
- %nop14727 = alloca i1, i1 0
- %nop14728 = alloca i1, i1 0
- %nop14729 = alloca i1, i1 0
- %nop14730 = alloca i1, i1 0
- %nop14731 = alloca i1, i1 0
- %nop14732 = alloca i1, i1 0
- %nop14733 = alloca i1, i1 0
- %nop14734 = alloca i1, i1 0
- %nop14735 = alloca i1, i1 0
- %nop14736 = alloca i1, i1 0
- %nop14737 = alloca i1, i1 0
- %nop14738 = alloca i1, i1 0
- %nop14739 = alloca i1, i1 0
- %nop14740 = alloca i1, i1 0
- %nop14741 = alloca i1, i1 0
- %nop14742 = alloca i1, i1 0
- %nop14743 = alloca i1, i1 0
- %nop14744 = alloca i1, i1 0
- %nop14745 = alloca i1, i1 0
- %nop14746 = alloca i1, i1 0
- %nop14747 = alloca i1, i1 0
- %nop14748 = alloca i1, i1 0
- %nop14749 = alloca i1, i1 0
- %nop14750 = alloca i1, i1 0
- %nop14751 = alloca i1, i1 0
- %nop14752 = alloca i1, i1 0
- %nop14753 = alloca i1, i1 0
- %nop14754 = alloca i1, i1 0
- %nop14755 = alloca i1, i1 0
- %nop14756 = alloca i1, i1 0
- %nop14757 = alloca i1, i1 0
- %nop14758 = alloca i1, i1 0
- %nop14759 = alloca i1, i1 0
- %nop14760 = alloca i1, i1 0
- %nop14761 = alloca i1, i1 0
- %nop14762 = alloca i1, i1 0
- %nop14763 = alloca i1, i1 0
- %nop14764 = alloca i1, i1 0
- %nop14765 = alloca i1, i1 0
- %nop14766 = alloca i1, i1 0
- %nop14767 = alloca i1, i1 0
- %nop14768 = alloca i1, i1 0
- %nop14769 = alloca i1, i1 0
- %nop14770 = alloca i1, i1 0
- %nop14771 = alloca i1, i1 0
- %nop14772 = alloca i1, i1 0
- %nop14773 = alloca i1, i1 0
- %nop14774 = alloca i1, i1 0
- %nop14775 = alloca i1, i1 0
- %nop14776 = alloca i1, i1 0
- %nop14777 = alloca i1, i1 0
- %nop14778 = alloca i1, i1 0
- %nop14779 = alloca i1, i1 0
- %nop14780 = alloca i1, i1 0
- %nop14781 = alloca i1, i1 0
- %nop14782 = alloca i1, i1 0
- %nop14783 = alloca i1, i1 0
- %nop14784 = alloca i1, i1 0
- %nop14785 = alloca i1, i1 0
- %nop14786 = alloca i1, i1 0
- %nop14787 = alloca i1, i1 0
- %nop14788 = alloca i1, i1 0
- %nop14789 = alloca i1, i1 0
- %nop14790 = alloca i1, i1 0
- %nop14791 = alloca i1, i1 0
- %nop14792 = alloca i1, i1 0
- %nop14793 = alloca i1, i1 0
- %nop14794 = alloca i1, i1 0
- %nop14795 = alloca i1, i1 0
- %nop14796 = alloca i1, i1 0
- %nop14797 = alloca i1, i1 0
- %nop14798 = alloca i1, i1 0
- %nop14799 = alloca i1, i1 0
- %nop14800 = alloca i1, i1 0
- %nop14801 = alloca i1, i1 0
- %nop14802 = alloca i1, i1 0
- %nop14803 = alloca i1, i1 0
- %nop14804 = alloca i1, i1 0
- %nop14805 = alloca i1, i1 0
- %nop14806 = alloca i1, i1 0
- %nop14807 = alloca i1, i1 0
- %nop14808 = alloca i1, i1 0
- %nop14809 = alloca i1, i1 0
- %nop14810 = alloca i1, i1 0
- %nop14811 = alloca i1, i1 0
- %nop14812 = alloca i1, i1 0
- %nop14813 = alloca i1, i1 0
- %nop14814 = alloca i1, i1 0
- %nop14815 = alloca i1, i1 0
- %nop14816 = alloca i1, i1 0
- %nop14817 = alloca i1, i1 0
- %nop14818 = alloca i1, i1 0
- %nop14819 = alloca i1, i1 0
- %nop14820 = alloca i1, i1 0
- %nop14821 = alloca i1, i1 0
- %nop14822 = alloca i1, i1 0
- %nop14823 = alloca i1, i1 0
- %nop14824 = alloca i1, i1 0
- %nop14825 = alloca i1, i1 0
- %nop14826 = alloca i1, i1 0
- %nop14827 = alloca i1, i1 0
- %nop14828 = alloca i1, i1 0
- %nop14829 = alloca i1, i1 0
- %nop14830 = alloca i1, i1 0
- %nop14831 = alloca i1, i1 0
- %nop14832 = alloca i1, i1 0
- %nop14833 = alloca i1, i1 0
- %nop14834 = alloca i1, i1 0
- %nop14835 = alloca i1, i1 0
- %nop14836 = alloca i1, i1 0
- %nop14837 = alloca i1, i1 0
- %nop14838 = alloca i1, i1 0
- %nop14839 = alloca i1, i1 0
- %nop14840 = alloca i1, i1 0
- %nop14841 = alloca i1, i1 0
- %nop14842 = alloca i1, i1 0
- %nop14843 = alloca i1, i1 0
- %nop14844 = alloca i1, i1 0
- %nop14845 = alloca i1, i1 0
- %nop14846 = alloca i1, i1 0
- %nop14847 = alloca i1, i1 0
- %nop14848 = alloca i1, i1 0
- %nop14849 = alloca i1, i1 0
- %nop14850 = alloca i1, i1 0
- %nop14851 = alloca i1, i1 0
- %nop14852 = alloca i1, i1 0
- %nop14853 = alloca i1, i1 0
- %nop14854 = alloca i1, i1 0
- %nop14855 = alloca i1, i1 0
- %nop14856 = alloca i1, i1 0
- %nop14857 = alloca i1, i1 0
- %nop14858 = alloca i1, i1 0
- %nop14859 = alloca i1, i1 0
- %nop14860 = alloca i1, i1 0
- %nop14861 = alloca i1, i1 0
- %nop14862 = alloca i1, i1 0
- %nop14863 = alloca i1, i1 0
- %nop14864 = alloca i1, i1 0
- %nop14865 = alloca i1, i1 0
- %nop14866 = alloca i1, i1 0
- %nop14867 = alloca i1, i1 0
- %nop14868 = alloca i1, i1 0
- %nop14869 = alloca i1, i1 0
- %nop14870 = alloca i1, i1 0
- %nop14871 = alloca i1, i1 0
- %nop14872 = alloca i1, i1 0
- %nop14873 = alloca i1, i1 0
- %nop14874 = alloca i1, i1 0
- %nop14875 = alloca i1, i1 0
- %nop14876 = alloca i1, i1 0
- %nop14877 = alloca i1, i1 0
- %nop14878 = alloca i1, i1 0
- %nop14879 = alloca i1, i1 0
- %nop14880 = alloca i1, i1 0
- %nop14881 = alloca i1, i1 0
- %nop14882 = alloca i1, i1 0
- %nop14883 = alloca i1, i1 0
- %nop14884 = alloca i1, i1 0
- %nop14885 = alloca i1, i1 0
- %nop14886 = alloca i1, i1 0
- %nop14887 = alloca i1, i1 0
- %nop14888 = alloca i1, i1 0
- %nop14889 = alloca i1, i1 0
- %nop14890 = alloca i1, i1 0
- %nop14891 = alloca i1, i1 0
- %nop14892 = alloca i1, i1 0
- %nop14893 = alloca i1, i1 0
- %nop14894 = alloca i1, i1 0
- %nop14895 = alloca i1, i1 0
- %nop14896 = alloca i1, i1 0
- %nop14897 = alloca i1, i1 0
- %nop14898 = alloca i1, i1 0
- %nop14899 = alloca i1, i1 0
- %nop14900 = alloca i1, i1 0
- %nop14901 = alloca i1, i1 0
- %nop14902 = alloca i1, i1 0
- %nop14903 = alloca i1, i1 0
- %nop14904 = alloca i1, i1 0
- %nop14905 = alloca i1, i1 0
- %nop14906 = alloca i1, i1 0
- %nop14907 = alloca i1, i1 0
- %nop14908 = alloca i1, i1 0
- %nop14909 = alloca i1, i1 0
- %nop14910 = alloca i1, i1 0
- %nop14911 = alloca i1, i1 0
- %nop14912 = alloca i1, i1 0
- %nop14913 = alloca i1, i1 0
- %nop14914 = alloca i1, i1 0
- %nop14915 = alloca i1, i1 0
- %nop14916 = alloca i1, i1 0
- %nop14917 = alloca i1, i1 0
- %nop14918 = alloca i1, i1 0
- %nop14919 = alloca i1, i1 0
- %nop14920 = alloca i1, i1 0
- %nop14921 = alloca i1, i1 0
- %nop14922 = alloca i1, i1 0
- %nop14923 = alloca i1, i1 0
- %nop14924 = alloca i1, i1 0
- %nop14925 = alloca i1, i1 0
- %nop14926 = alloca i1, i1 0
- %nop14927 = alloca i1, i1 0
- %nop14928 = alloca i1, i1 0
- %nop14929 = alloca i1, i1 0
- %nop14930 = alloca i1, i1 0
- %nop14931 = alloca i1, i1 0
- %nop14932 = alloca i1, i1 0
- %nop14933 = alloca i1, i1 0
- %nop14934 = alloca i1, i1 0
- %nop14935 = alloca i1, i1 0
- %nop14936 = alloca i1, i1 0
- %nop14937 = alloca i1, i1 0
- %nop14938 = alloca i1, i1 0
- %nop14939 = alloca i1, i1 0
- %nop14940 = alloca i1, i1 0
- %nop14941 = alloca i1, i1 0
- %nop14942 = alloca i1, i1 0
- %nop14943 = alloca i1, i1 0
- %nop14944 = alloca i1, i1 0
- %nop14945 = alloca i1, i1 0
- %nop14946 = alloca i1, i1 0
- %nop14947 = alloca i1, i1 0
- %nop14948 = alloca i1, i1 0
- %nop14949 = alloca i1, i1 0
- %nop14950 = alloca i1, i1 0
- %nop14951 = alloca i1, i1 0
- %nop14952 = alloca i1, i1 0
- %nop14953 = alloca i1, i1 0
- %nop14954 = alloca i1, i1 0
- %nop14955 = alloca i1, i1 0
- %nop14956 = alloca i1, i1 0
- %nop14957 = alloca i1, i1 0
- %nop14958 = alloca i1, i1 0
- %nop14959 = alloca i1, i1 0
- %nop14960 = alloca i1, i1 0
- %nop14961 = alloca i1, i1 0
- %nop14962 = alloca i1, i1 0
- %nop14963 = alloca i1, i1 0
- %nop14964 = alloca i1, i1 0
- %nop14965 = alloca i1, i1 0
- %nop14966 = alloca i1, i1 0
- %nop14967 = alloca i1, i1 0
- %nop14968 = alloca i1, i1 0
- %nop14969 = alloca i1, i1 0
- %nop14970 = alloca i1, i1 0
- %nop14971 = alloca i1, i1 0
- %nop14972 = alloca i1, i1 0
- %nop14973 = alloca i1, i1 0
- %nop14974 = alloca i1, i1 0
- %nop14975 = alloca i1, i1 0
- %nop14976 = alloca i1, i1 0
- %nop14977 = alloca i1, i1 0
- %nop14978 = alloca i1, i1 0
- %nop14979 = alloca i1, i1 0
- %nop14980 = alloca i1, i1 0
- %nop14981 = alloca i1, i1 0
- %nop14982 = alloca i1, i1 0
- %nop14983 = alloca i1, i1 0
- %nop14984 = alloca i1, i1 0
- %nop14985 = alloca i1, i1 0
- %nop14986 = alloca i1, i1 0
- %nop14987 = alloca i1, i1 0
- %nop14988 = alloca i1, i1 0
- %nop14989 = alloca i1, i1 0
- %nop14990 = alloca i1, i1 0
- %nop14991 = alloca i1, i1 0
- %nop14992 = alloca i1, i1 0
- %nop14993 = alloca i1, i1 0
- %nop14994 = alloca i1, i1 0
- %nop14995 = alloca i1, i1 0
- %nop14996 = alloca i1, i1 0
- %nop14997 = alloca i1, i1 0
- %nop14998 = alloca i1, i1 0
- %nop14999 = alloca i1, i1 0
- %nop15000 = alloca i1, i1 0
- %nop15001 = alloca i1, i1 0
- %nop15002 = alloca i1, i1 0
- %nop15003 = alloca i1, i1 0
- %nop15004 = alloca i1, i1 0
- %nop15005 = alloca i1, i1 0
- %nop15006 = alloca i1, i1 0
- %nop15007 = alloca i1, i1 0
- %nop15008 = alloca i1, i1 0
- %nop15009 = alloca i1, i1 0
- %nop15010 = alloca i1, i1 0
- %nop15011 = alloca i1, i1 0
- %nop15012 = alloca i1, i1 0
- %nop15013 = alloca i1, i1 0
- %nop15014 = alloca i1, i1 0
- %nop15015 = alloca i1, i1 0
- %nop15016 = alloca i1, i1 0
- %nop15017 = alloca i1, i1 0
- %nop15018 = alloca i1, i1 0
- %nop15019 = alloca i1, i1 0
- %nop15020 = alloca i1, i1 0
- %nop15021 = alloca i1, i1 0
- %nop15022 = alloca i1, i1 0
- %nop15023 = alloca i1, i1 0
- %nop15024 = alloca i1, i1 0
- %nop15025 = alloca i1, i1 0
- %nop15026 = alloca i1, i1 0
- %nop15027 = alloca i1, i1 0
- %nop15028 = alloca i1, i1 0
- %nop15029 = alloca i1, i1 0
- %nop15030 = alloca i1, i1 0
- %nop15031 = alloca i1, i1 0
- %nop15032 = alloca i1, i1 0
- %nop15033 = alloca i1, i1 0
- %nop15034 = alloca i1, i1 0
- %nop15035 = alloca i1, i1 0
- %nop15036 = alloca i1, i1 0
- %nop15037 = alloca i1, i1 0
- %nop15038 = alloca i1, i1 0
- %nop15039 = alloca i1, i1 0
- %nop15040 = alloca i1, i1 0
- %nop15041 = alloca i1, i1 0
- %nop15042 = alloca i1, i1 0
- %nop15043 = alloca i1, i1 0
- %nop15044 = alloca i1, i1 0
- %nop15045 = alloca i1, i1 0
- %nop15046 = alloca i1, i1 0
- %nop15047 = alloca i1, i1 0
- %nop15048 = alloca i1, i1 0
- %nop15049 = alloca i1, i1 0
- %nop15050 = alloca i1, i1 0
- %nop15051 = alloca i1, i1 0
- %nop15052 = alloca i1, i1 0
- %nop15053 = alloca i1, i1 0
- %nop15054 = alloca i1, i1 0
- %nop15055 = alloca i1, i1 0
- %nop15056 = alloca i1, i1 0
- %nop15057 = alloca i1, i1 0
- %nop15058 = alloca i1, i1 0
- %nop15059 = alloca i1, i1 0
- %nop15060 = alloca i1, i1 0
- %nop15061 = alloca i1, i1 0
- %nop15062 = alloca i1, i1 0
- %nop15063 = alloca i1, i1 0
- %nop15064 = alloca i1, i1 0
- %nop15065 = alloca i1, i1 0
- %nop15066 = alloca i1, i1 0
- %nop15067 = alloca i1, i1 0
- %nop15068 = alloca i1, i1 0
- %nop15069 = alloca i1, i1 0
- %nop15070 = alloca i1, i1 0
- %nop15071 = alloca i1, i1 0
- %nop15072 = alloca i1, i1 0
- %nop15073 = alloca i1, i1 0
- %nop15074 = alloca i1, i1 0
- %nop15075 = alloca i1, i1 0
- %nop15076 = alloca i1, i1 0
- %nop15077 = alloca i1, i1 0
- %nop15078 = alloca i1, i1 0
- %nop15079 = alloca i1, i1 0
- %nop15080 = alloca i1, i1 0
- %nop15081 = alloca i1, i1 0
- %nop15082 = alloca i1, i1 0
- %nop15083 = alloca i1, i1 0
- %nop15084 = alloca i1, i1 0
- %nop15085 = alloca i1, i1 0
- %nop15086 = alloca i1, i1 0
- %nop15087 = alloca i1, i1 0
- %nop15088 = alloca i1, i1 0
- %nop15089 = alloca i1, i1 0
- %nop15090 = alloca i1, i1 0
- %nop15091 = alloca i1, i1 0
- %nop15092 = alloca i1, i1 0
- %nop15093 = alloca i1, i1 0
- %nop15094 = alloca i1, i1 0
- %nop15095 = alloca i1, i1 0
- %nop15096 = alloca i1, i1 0
- %nop15097 = alloca i1, i1 0
- %nop15098 = alloca i1, i1 0
- %nop15099 = alloca i1, i1 0
- %nop15100 = alloca i1, i1 0
- %nop15101 = alloca i1, i1 0
- %nop15102 = alloca i1, i1 0
- %nop15103 = alloca i1, i1 0
- %nop15104 = alloca i1, i1 0
- %nop15105 = alloca i1, i1 0
- %nop15106 = alloca i1, i1 0
- %nop15107 = alloca i1, i1 0
- %nop15108 = alloca i1, i1 0
- %nop15109 = alloca i1, i1 0
- %nop15110 = alloca i1, i1 0
- %nop15111 = alloca i1, i1 0
- %nop15112 = alloca i1, i1 0
- %nop15113 = alloca i1, i1 0
- %nop15114 = alloca i1, i1 0
- %nop15115 = alloca i1, i1 0
- %nop15116 = alloca i1, i1 0
- %nop15117 = alloca i1, i1 0
- %nop15118 = alloca i1, i1 0
- %nop15119 = alloca i1, i1 0
- %nop15120 = alloca i1, i1 0
- %nop15121 = alloca i1, i1 0
- %nop15122 = alloca i1, i1 0
- %nop15123 = alloca i1, i1 0
- %nop15124 = alloca i1, i1 0
- %nop15125 = alloca i1, i1 0
- %nop15126 = alloca i1, i1 0
- %nop15127 = alloca i1, i1 0
- %nop15128 = alloca i1, i1 0
- %nop15129 = alloca i1, i1 0
- %nop15130 = alloca i1, i1 0
- %nop15131 = alloca i1, i1 0
- %nop15132 = alloca i1, i1 0
- %nop15133 = alloca i1, i1 0
- %nop15134 = alloca i1, i1 0
- %nop15135 = alloca i1, i1 0
- %nop15136 = alloca i1, i1 0
- %nop15137 = alloca i1, i1 0
- %nop15138 = alloca i1, i1 0
- %nop15139 = alloca i1, i1 0
- %nop15140 = alloca i1, i1 0
- %nop15141 = alloca i1, i1 0
- %nop15142 = alloca i1, i1 0
- %nop15143 = alloca i1, i1 0
- %nop15144 = alloca i1, i1 0
- %nop15145 = alloca i1, i1 0
- %nop15146 = alloca i1, i1 0
- %nop15147 = alloca i1, i1 0
- %nop15148 = alloca i1, i1 0
- %nop15149 = alloca i1, i1 0
- %nop15150 = alloca i1, i1 0
- %nop15151 = alloca i1, i1 0
- %nop15152 = alloca i1, i1 0
- %nop15153 = alloca i1, i1 0
- %nop15154 = alloca i1, i1 0
- %nop15155 = alloca i1, i1 0
- %nop15156 = alloca i1, i1 0
- %nop15157 = alloca i1, i1 0
- %nop15158 = alloca i1, i1 0
- %nop15159 = alloca i1, i1 0
- %nop15160 = alloca i1, i1 0
- %nop15161 = alloca i1, i1 0
- %nop15162 = alloca i1, i1 0
- %nop15163 = alloca i1, i1 0
- %nop15164 = alloca i1, i1 0
- %nop15165 = alloca i1, i1 0
- %nop15166 = alloca i1, i1 0
- %nop15167 = alloca i1, i1 0
- %nop15168 = alloca i1, i1 0
- %nop15169 = alloca i1, i1 0
- %nop15170 = alloca i1, i1 0
- %nop15171 = alloca i1, i1 0
- %nop15172 = alloca i1, i1 0
- %nop15173 = alloca i1, i1 0
- %nop15174 = alloca i1, i1 0
- %nop15175 = alloca i1, i1 0
- %nop15176 = alloca i1, i1 0
- %nop15177 = alloca i1, i1 0
- %nop15178 = alloca i1, i1 0
- %nop15179 = alloca i1, i1 0
- %nop15180 = alloca i1, i1 0
- %nop15181 = alloca i1, i1 0
- %nop15182 = alloca i1, i1 0
- %nop15183 = alloca i1, i1 0
- %nop15184 = alloca i1, i1 0
- %nop15185 = alloca i1, i1 0
- %nop15186 = alloca i1, i1 0
- %nop15187 = alloca i1, i1 0
- %nop15188 = alloca i1, i1 0
- %nop15189 = alloca i1, i1 0
- %nop15190 = alloca i1, i1 0
- %nop15191 = alloca i1, i1 0
- %nop15192 = alloca i1, i1 0
- %nop15193 = alloca i1, i1 0
- %nop15194 = alloca i1, i1 0
- %nop15195 = alloca i1, i1 0
- %nop15196 = alloca i1, i1 0
- %nop15197 = alloca i1, i1 0
- %nop15198 = alloca i1, i1 0
- %nop15199 = alloca i1, i1 0
- %nop15200 = alloca i1, i1 0
- %nop15201 = alloca i1, i1 0
- %nop15202 = alloca i1, i1 0
- %nop15203 = alloca i1, i1 0
- %nop15204 = alloca i1, i1 0
- %nop15205 = alloca i1, i1 0
- %nop15206 = alloca i1, i1 0
- %nop15207 = alloca i1, i1 0
- %nop15208 = alloca i1, i1 0
- %nop15209 = alloca i1, i1 0
- %nop15210 = alloca i1, i1 0
- %nop15211 = alloca i1, i1 0
- %nop15212 = alloca i1, i1 0
- %nop15213 = alloca i1, i1 0
- %nop15214 = alloca i1, i1 0
- %nop15215 = alloca i1, i1 0
- %nop15216 = alloca i1, i1 0
- %nop15217 = alloca i1, i1 0
- %nop15218 = alloca i1, i1 0
- %nop15219 = alloca i1, i1 0
- %nop15220 = alloca i1, i1 0
- %nop15221 = alloca i1, i1 0
- %nop15222 = alloca i1, i1 0
- %nop15223 = alloca i1, i1 0
- %nop15224 = alloca i1, i1 0
- %nop15225 = alloca i1, i1 0
- %nop15226 = alloca i1, i1 0
- %nop15227 = alloca i1, i1 0
- %nop15228 = alloca i1, i1 0
- %nop15229 = alloca i1, i1 0
- %nop15230 = alloca i1, i1 0
- %nop15231 = alloca i1, i1 0
- %nop15232 = alloca i1, i1 0
- %nop15233 = alloca i1, i1 0
- %nop15234 = alloca i1, i1 0
- %nop15235 = alloca i1, i1 0
- %nop15236 = alloca i1, i1 0
- %nop15237 = alloca i1, i1 0
- %nop15238 = alloca i1, i1 0
- %nop15239 = alloca i1, i1 0
- %nop15240 = alloca i1, i1 0
- %nop15241 = alloca i1, i1 0
- %nop15242 = alloca i1, i1 0
- %nop15243 = alloca i1, i1 0
- %nop15244 = alloca i1, i1 0
- %nop15245 = alloca i1, i1 0
- %nop15246 = alloca i1, i1 0
- %nop15247 = alloca i1, i1 0
- %nop15248 = alloca i1, i1 0
- %nop15249 = alloca i1, i1 0
- %nop15250 = alloca i1, i1 0
- %nop15251 = alloca i1, i1 0
- %nop15252 = alloca i1, i1 0
- %nop15253 = alloca i1, i1 0
- %nop15254 = alloca i1, i1 0
- %nop15255 = alloca i1, i1 0
- %nop15256 = alloca i1, i1 0
- %nop15257 = alloca i1, i1 0
- %nop15258 = alloca i1, i1 0
- %nop15259 = alloca i1, i1 0
- %nop15260 = alloca i1, i1 0
- %nop15261 = alloca i1, i1 0
- %nop15262 = alloca i1, i1 0
- %nop15263 = alloca i1, i1 0
- %nop15264 = alloca i1, i1 0
- %nop15265 = alloca i1, i1 0
- %nop15266 = alloca i1, i1 0
- %nop15267 = alloca i1, i1 0
- %nop15268 = alloca i1, i1 0
- %nop15269 = alloca i1, i1 0
- %nop15270 = alloca i1, i1 0
- %nop15271 = alloca i1, i1 0
- %nop15272 = alloca i1, i1 0
- %nop15273 = alloca i1, i1 0
- %nop15274 = alloca i1, i1 0
- %nop15275 = alloca i1, i1 0
- %nop15276 = alloca i1, i1 0
- %nop15277 = alloca i1, i1 0
- %nop15278 = alloca i1, i1 0
- %nop15279 = alloca i1, i1 0
- %nop15280 = alloca i1, i1 0
- %nop15281 = alloca i1, i1 0
- %nop15282 = alloca i1, i1 0
- %nop15283 = alloca i1, i1 0
- %nop15284 = alloca i1, i1 0
- %nop15285 = alloca i1, i1 0
- %nop15286 = alloca i1, i1 0
- %nop15287 = alloca i1, i1 0
- %nop15288 = alloca i1, i1 0
- %nop15289 = alloca i1, i1 0
- %nop15290 = alloca i1, i1 0
- %nop15291 = alloca i1, i1 0
- %nop15292 = alloca i1, i1 0
- %nop15293 = alloca i1, i1 0
- %nop15294 = alloca i1, i1 0
- %nop15295 = alloca i1, i1 0
- %nop15296 = alloca i1, i1 0
- %nop15297 = alloca i1, i1 0
- %nop15298 = alloca i1, i1 0
- %nop15299 = alloca i1, i1 0
- %nop15300 = alloca i1, i1 0
- %nop15301 = alloca i1, i1 0
- %nop15302 = alloca i1, i1 0
- %nop15303 = alloca i1, i1 0
- %nop15304 = alloca i1, i1 0
- %nop15305 = alloca i1, i1 0
- %nop15306 = alloca i1, i1 0
- %nop15307 = alloca i1, i1 0
- %nop15308 = alloca i1, i1 0
- %nop15309 = alloca i1, i1 0
- %nop15310 = alloca i1, i1 0
- %nop15311 = alloca i1, i1 0
- %nop15312 = alloca i1, i1 0
- %nop15313 = alloca i1, i1 0
- %nop15314 = alloca i1, i1 0
- %nop15315 = alloca i1, i1 0
- %nop15316 = alloca i1, i1 0
- %nop15317 = alloca i1, i1 0
- %nop15318 = alloca i1, i1 0
- %nop15319 = alloca i1, i1 0
- %nop15320 = alloca i1, i1 0
- %nop15321 = alloca i1, i1 0
- %nop15322 = alloca i1, i1 0
- %nop15323 = alloca i1, i1 0
- %nop15324 = alloca i1, i1 0
- %nop15325 = alloca i1, i1 0
- %nop15326 = alloca i1, i1 0
- %nop15327 = alloca i1, i1 0
- %nop15328 = alloca i1, i1 0
- %nop15329 = alloca i1, i1 0
- %nop15330 = alloca i1, i1 0
- %nop15331 = alloca i1, i1 0
- %nop15332 = alloca i1, i1 0
- %nop15333 = alloca i1, i1 0
- %nop15334 = alloca i1, i1 0
- %nop15335 = alloca i1, i1 0
- %nop15336 = alloca i1, i1 0
- %nop15337 = alloca i1, i1 0
- %nop15338 = alloca i1, i1 0
- %nop15339 = alloca i1, i1 0
- %nop15340 = alloca i1, i1 0
- %nop15341 = alloca i1, i1 0
- %nop15342 = alloca i1, i1 0
- %nop15343 = alloca i1, i1 0
- %nop15344 = alloca i1, i1 0
- %nop15345 = alloca i1, i1 0
- %nop15346 = alloca i1, i1 0
- %nop15347 = alloca i1, i1 0
- %nop15348 = alloca i1, i1 0
- %nop15349 = alloca i1, i1 0
- %nop15350 = alloca i1, i1 0
- %nop15351 = alloca i1, i1 0
- %nop15352 = alloca i1, i1 0
- %nop15353 = alloca i1, i1 0
- %nop15354 = alloca i1, i1 0
- %nop15355 = alloca i1, i1 0
- %nop15356 = alloca i1, i1 0
- %nop15357 = alloca i1, i1 0
- %nop15358 = alloca i1, i1 0
- %nop15359 = alloca i1, i1 0
- %nop15360 = alloca i1, i1 0
- %nop15361 = alloca i1, i1 0
- %nop15362 = alloca i1, i1 0
- %nop15363 = alloca i1, i1 0
- %nop15364 = alloca i1, i1 0
- %nop15365 = alloca i1, i1 0
- %nop15366 = alloca i1, i1 0
- %nop15367 = alloca i1, i1 0
- %nop15368 = alloca i1, i1 0
- %nop15369 = alloca i1, i1 0
- %nop15370 = alloca i1, i1 0
- %nop15371 = alloca i1, i1 0
- %nop15372 = alloca i1, i1 0
- %nop15373 = alloca i1, i1 0
- %nop15374 = alloca i1, i1 0
- %nop15375 = alloca i1, i1 0
- %nop15376 = alloca i1, i1 0
- %nop15377 = alloca i1, i1 0
- %nop15378 = alloca i1, i1 0
- %nop15379 = alloca i1, i1 0
- %nop15380 = alloca i1, i1 0
- %nop15381 = alloca i1, i1 0
- %nop15382 = alloca i1, i1 0
- %nop15383 = alloca i1, i1 0
- %nop15384 = alloca i1, i1 0
- %nop15385 = alloca i1, i1 0
- %nop15386 = alloca i1, i1 0
- %nop15387 = alloca i1, i1 0
- %nop15388 = alloca i1, i1 0
- %nop15389 = alloca i1, i1 0
- %nop15390 = alloca i1, i1 0
- %nop15391 = alloca i1, i1 0
- %nop15392 = alloca i1, i1 0
- %nop15393 = alloca i1, i1 0
- %nop15394 = alloca i1, i1 0
- %nop15395 = alloca i1, i1 0
- %nop15396 = alloca i1, i1 0
- %nop15397 = alloca i1, i1 0
- %nop15398 = alloca i1, i1 0
- %nop15399 = alloca i1, i1 0
- %nop15400 = alloca i1, i1 0
- %nop15401 = alloca i1, i1 0
- %nop15402 = alloca i1, i1 0
- %nop15403 = alloca i1, i1 0
- %nop15404 = alloca i1, i1 0
- %nop15405 = alloca i1, i1 0
- %nop15406 = alloca i1, i1 0
- %nop15407 = alloca i1, i1 0
- %nop15408 = alloca i1, i1 0
- %nop15409 = alloca i1, i1 0
- %nop15410 = alloca i1, i1 0
- %nop15411 = alloca i1, i1 0
- %nop15412 = alloca i1, i1 0
- %nop15413 = alloca i1, i1 0
- %nop15414 = alloca i1, i1 0
- %nop15415 = alloca i1, i1 0
- %nop15416 = alloca i1, i1 0
- %nop15417 = alloca i1, i1 0
- %nop15418 = alloca i1, i1 0
- %nop15419 = alloca i1, i1 0
- %nop15420 = alloca i1, i1 0
- %nop15421 = alloca i1, i1 0
- %nop15422 = alloca i1, i1 0
- %nop15423 = alloca i1, i1 0
- %nop15424 = alloca i1, i1 0
- %nop15425 = alloca i1, i1 0
- %nop15426 = alloca i1, i1 0
- %nop15427 = alloca i1, i1 0
- %nop15428 = alloca i1, i1 0
- %nop15429 = alloca i1, i1 0
- %nop15430 = alloca i1, i1 0
- %nop15431 = alloca i1, i1 0
- %nop15432 = alloca i1, i1 0
- %nop15433 = alloca i1, i1 0
- %nop15434 = alloca i1, i1 0
- %nop15435 = alloca i1, i1 0
- %nop15436 = alloca i1, i1 0
- %nop15437 = alloca i1, i1 0
- %nop15438 = alloca i1, i1 0
- %nop15439 = alloca i1, i1 0
- %nop15440 = alloca i1, i1 0
- %nop15441 = alloca i1, i1 0
- %nop15442 = alloca i1, i1 0
- %nop15443 = alloca i1, i1 0
- %nop15444 = alloca i1, i1 0
- %nop15445 = alloca i1, i1 0
- %nop15446 = alloca i1, i1 0
- %nop15447 = alloca i1, i1 0
- %nop15448 = alloca i1, i1 0
- %nop15449 = alloca i1, i1 0
- %nop15450 = alloca i1, i1 0
- %nop15451 = alloca i1, i1 0
- %nop15452 = alloca i1, i1 0
- %nop15453 = alloca i1, i1 0
- %nop15454 = alloca i1, i1 0
- %nop15455 = alloca i1, i1 0
- %nop15456 = alloca i1, i1 0
- %nop15457 = alloca i1, i1 0
- %nop15458 = alloca i1, i1 0
- %nop15459 = alloca i1, i1 0
- %nop15460 = alloca i1, i1 0
- %nop15461 = alloca i1, i1 0
- %nop15462 = alloca i1, i1 0
- %nop15463 = alloca i1, i1 0
- %nop15464 = alloca i1, i1 0
- %nop15465 = alloca i1, i1 0
- %nop15466 = alloca i1, i1 0
- %nop15467 = alloca i1, i1 0
- %nop15468 = alloca i1, i1 0
- %nop15469 = alloca i1, i1 0
- %nop15470 = alloca i1, i1 0
- %nop15471 = alloca i1, i1 0
- %nop15472 = alloca i1, i1 0
- %nop15473 = alloca i1, i1 0
- %nop15474 = alloca i1, i1 0
- %nop15475 = alloca i1, i1 0
- %nop15476 = alloca i1, i1 0
- %nop15477 = alloca i1, i1 0
- %nop15478 = alloca i1, i1 0
- %nop15479 = alloca i1, i1 0
- %nop15480 = alloca i1, i1 0
- %nop15481 = alloca i1, i1 0
- %nop15482 = alloca i1, i1 0
- %nop15483 = alloca i1, i1 0
- %nop15484 = alloca i1, i1 0
- %nop15485 = alloca i1, i1 0
- %nop15486 = alloca i1, i1 0
- %nop15487 = alloca i1, i1 0
- %nop15488 = alloca i1, i1 0
- %nop15489 = alloca i1, i1 0
- %nop15490 = alloca i1, i1 0
- %nop15491 = alloca i1, i1 0
- %nop15492 = alloca i1, i1 0
- %nop15493 = alloca i1, i1 0
- %nop15494 = alloca i1, i1 0
- %nop15495 = alloca i1, i1 0
- %nop15496 = alloca i1, i1 0
- %nop15497 = alloca i1, i1 0
- %nop15498 = alloca i1, i1 0
- %nop15499 = alloca i1, i1 0
- %nop15500 = alloca i1, i1 0
- %nop15501 = alloca i1, i1 0
- %nop15502 = alloca i1, i1 0
- %nop15503 = alloca i1, i1 0
- %nop15504 = alloca i1, i1 0
- %nop15505 = alloca i1, i1 0
- %nop15506 = alloca i1, i1 0
- %nop15507 = alloca i1, i1 0
- %nop15508 = alloca i1, i1 0
- %nop15509 = alloca i1, i1 0
- %nop15510 = alloca i1, i1 0
- %nop15511 = alloca i1, i1 0
- %nop15512 = alloca i1, i1 0
- %nop15513 = alloca i1, i1 0
- %nop15514 = alloca i1, i1 0
- %nop15515 = alloca i1, i1 0
- %nop15516 = alloca i1, i1 0
- %nop15517 = alloca i1, i1 0
- %nop15518 = alloca i1, i1 0
- %nop15519 = alloca i1, i1 0
- %nop15520 = alloca i1, i1 0
- %nop15521 = alloca i1, i1 0
- %nop15522 = alloca i1, i1 0
- %nop15523 = alloca i1, i1 0
- %nop15524 = alloca i1, i1 0
- %nop15525 = alloca i1, i1 0
- %nop15526 = alloca i1, i1 0
- %nop15527 = alloca i1, i1 0
- %nop15528 = alloca i1, i1 0
- %nop15529 = alloca i1, i1 0
- %nop15530 = alloca i1, i1 0
- %nop15531 = alloca i1, i1 0
- %nop15532 = alloca i1, i1 0
- %nop15533 = alloca i1, i1 0
- %nop15534 = alloca i1, i1 0
- %nop15535 = alloca i1, i1 0
- %nop15536 = alloca i1, i1 0
- %nop15537 = alloca i1, i1 0
- %nop15538 = alloca i1, i1 0
- %nop15539 = alloca i1, i1 0
- %nop15540 = alloca i1, i1 0
- %nop15541 = alloca i1, i1 0
- %nop15542 = alloca i1, i1 0
- %nop15543 = alloca i1, i1 0
- %nop15544 = alloca i1, i1 0
- %nop15545 = alloca i1, i1 0
- %nop15546 = alloca i1, i1 0
- %nop15547 = alloca i1, i1 0
- %nop15548 = alloca i1, i1 0
- %nop15549 = alloca i1, i1 0
- %nop15550 = alloca i1, i1 0
- %nop15551 = alloca i1, i1 0
- %nop15552 = alloca i1, i1 0
- %nop15553 = alloca i1, i1 0
- %nop15554 = alloca i1, i1 0
- %nop15555 = alloca i1, i1 0
- %nop15556 = alloca i1, i1 0
- %nop15557 = alloca i1, i1 0
- %nop15558 = alloca i1, i1 0
- %nop15559 = alloca i1, i1 0
- %nop15560 = alloca i1, i1 0
- %nop15561 = alloca i1, i1 0
- %nop15562 = alloca i1, i1 0
- %nop15563 = alloca i1, i1 0
- %nop15564 = alloca i1, i1 0
- %nop15565 = alloca i1, i1 0
- %nop15566 = alloca i1, i1 0
- %nop15567 = alloca i1, i1 0
- %nop15568 = alloca i1, i1 0
- %nop15569 = alloca i1, i1 0
- %nop15570 = alloca i1, i1 0
- %nop15571 = alloca i1, i1 0
- %nop15572 = alloca i1, i1 0
- %nop15573 = alloca i1, i1 0
- %nop15574 = alloca i1, i1 0
- %nop15575 = alloca i1, i1 0
- %nop15576 = alloca i1, i1 0
- %nop15577 = alloca i1, i1 0
- %nop15578 = alloca i1, i1 0
- %nop15579 = alloca i1, i1 0
- %nop15580 = alloca i1, i1 0
- %nop15581 = alloca i1, i1 0
- %nop15582 = alloca i1, i1 0
- %nop15583 = alloca i1, i1 0
- %nop15584 = alloca i1, i1 0
- %nop15585 = alloca i1, i1 0
- %nop15586 = alloca i1, i1 0
- %nop15587 = alloca i1, i1 0
- %nop15588 = alloca i1, i1 0
- %nop15589 = alloca i1, i1 0
- %nop15590 = alloca i1, i1 0
- %nop15591 = alloca i1, i1 0
- %nop15592 = alloca i1, i1 0
- %nop15593 = alloca i1, i1 0
- %nop15594 = alloca i1, i1 0
- %nop15595 = alloca i1, i1 0
- %nop15596 = alloca i1, i1 0
- %nop15597 = alloca i1, i1 0
- %nop15598 = alloca i1, i1 0
- %nop15599 = alloca i1, i1 0
- %nop15600 = alloca i1, i1 0
- %nop15601 = alloca i1, i1 0
- %nop15602 = alloca i1, i1 0
- %nop15603 = alloca i1, i1 0
- %nop15604 = alloca i1, i1 0
- %nop15605 = alloca i1, i1 0
- %nop15606 = alloca i1, i1 0
- %nop15607 = alloca i1, i1 0
- %nop15608 = alloca i1, i1 0
- %nop15609 = alloca i1, i1 0
- %nop15610 = alloca i1, i1 0
- %nop15611 = alloca i1, i1 0
- %nop15612 = alloca i1, i1 0
- %nop15613 = alloca i1, i1 0
- %nop15614 = alloca i1, i1 0
- %nop15615 = alloca i1, i1 0
- %nop15616 = alloca i1, i1 0
- %nop15617 = alloca i1, i1 0
- %nop15618 = alloca i1, i1 0
- %nop15619 = alloca i1, i1 0
- %nop15620 = alloca i1, i1 0
- %nop15621 = alloca i1, i1 0
- %nop15622 = alloca i1, i1 0
- %nop15623 = alloca i1, i1 0
- %nop15624 = alloca i1, i1 0
- %nop15625 = alloca i1, i1 0
- %nop15626 = alloca i1, i1 0
- %nop15627 = alloca i1, i1 0
- %nop15628 = alloca i1, i1 0
- %nop15629 = alloca i1, i1 0
- %nop15630 = alloca i1, i1 0
- %nop15631 = alloca i1, i1 0
- %nop15632 = alloca i1, i1 0
- %nop15633 = alloca i1, i1 0
- %nop15634 = alloca i1, i1 0
- %nop15635 = alloca i1, i1 0
- %nop15636 = alloca i1, i1 0
- %nop15637 = alloca i1, i1 0
- %nop15638 = alloca i1, i1 0
- %nop15639 = alloca i1, i1 0
- %nop15640 = alloca i1, i1 0
- %nop15641 = alloca i1, i1 0
- %nop15642 = alloca i1, i1 0
- %nop15643 = alloca i1, i1 0
- %nop15644 = alloca i1, i1 0
- %nop15645 = alloca i1, i1 0
- %nop15646 = alloca i1, i1 0
- %nop15647 = alloca i1, i1 0
- %nop15648 = alloca i1, i1 0
- %nop15649 = alloca i1, i1 0
- %nop15650 = alloca i1, i1 0
- %nop15651 = alloca i1, i1 0
- %nop15652 = alloca i1, i1 0
- %nop15653 = alloca i1, i1 0
- %nop15654 = alloca i1, i1 0
- %nop15655 = alloca i1, i1 0
- %nop15656 = alloca i1, i1 0
- %nop15657 = alloca i1, i1 0
- %nop15658 = alloca i1, i1 0
- %nop15659 = alloca i1, i1 0
- %nop15660 = alloca i1, i1 0
- %nop15661 = alloca i1, i1 0
- %nop15662 = alloca i1, i1 0
- %nop15663 = alloca i1, i1 0
- %nop15664 = alloca i1, i1 0
- %nop15665 = alloca i1, i1 0
- %nop15666 = alloca i1, i1 0
- %nop15667 = alloca i1, i1 0
- %nop15668 = alloca i1, i1 0
- %nop15669 = alloca i1, i1 0
- %nop15670 = alloca i1, i1 0
- %nop15671 = alloca i1, i1 0
- %nop15672 = alloca i1, i1 0
- %nop15673 = alloca i1, i1 0
- %nop15674 = alloca i1, i1 0
- %nop15675 = alloca i1, i1 0
- %nop15676 = alloca i1, i1 0
- %nop15677 = alloca i1, i1 0
- %nop15678 = alloca i1, i1 0
- %nop15679 = alloca i1, i1 0
- %nop15680 = alloca i1, i1 0
- %nop15681 = alloca i1, i1 0
- %nop15682 = alloca i1, i1 0
- %nop15683 = alloca i1, i1 0
- %nop15684 = alloca i1, i1 0
- %nop15685 = alloca i1, i1 0
- %nop15686 = alloca i1, i1 0
- %nop15687 = alloca i1, i1 0
- %nop15688 = alloca i1, i1 0
- %nop15689 = alloca i1, i1 0
- %nop15690 = alloca i1, i1 0
- %nop15691 = alloca i1, i1 0
- %nop15692 = alloca i1, i1 0
- %nop15693 = alloca i1, i1 0
- %nop15694 = alloca i1, i1 0
- %nop15695 = alloca i1, i1 0
- %nop15696 = alloca i1, i1 0
- %nop15697 = alloca i1, i1 0
- %nop15698 = alloca i1, i1 0
- %nop15699 = alloca i1, i1 0
- %nop15700 = alloca i1, i1 0
- %nop15701 = alloca i1, i1 0
- %nop15702 = alloca i1, i1 0
- %nop15703 = alloca i1, i1 0
- %nop15704 = alloca i1, i1 0
- %nop15705 = alloca i1, i1 0
- %nop15706 = alloca i1, i1 0
- %nop15707 = alloca i1, i1 0
- %nop15708 = alloca i1, i1 0
- %nop15709 = alloca i1, i1 0
- %nop15710 = alloca i1, i1 0
- %nop15711 = alloca i1, i1 0
- %nop15712 = alloca i1, i1 0
- %nop15713 = alloca i1, i1 0
- %nop15714 = alloca i1, i1 0
- %nop15715 = alloca i1, i1 0
- %nop15716 = alloca i1, i1 0
- %nop15717 = alloca i1, i1 0
- %nop15718 = alloca i1, i1 0
- %nop15719 = alloca i1, i1 0
- %nop15720 = alloca i1, i1 0
- %nop15721 = alloca i1, i1 0
- %nop15722 = alloca i1, i1 0
- %nop15723 = alloca i1, i1 0
- %nop15724 = alloca i1, i1 0
- %nop15725 = alloca i1, i1 0
- %nop15726 = alloca i1, i1 0
- %nop15727 = alloca i1, i1 0
- %nop15728 = alloca i1, i1 0
- %nop15729 = alloca i1, i1 0
- %nop15730 = alloca i1, i1 0
- %nop15731 = alloca i1, i1 0
- %nop15732 = alloca i1, i1 0
- %nop15733 = alloca i1, i1 0
- %nop15734 = alloca i1, i1 0
- %nop15735 = alloca i1, i1 0
- %nop15736 = alloca i1, i1 0
- %nop15737 = alloca i1, i1 0
- %nop15738 = alloca i1, i1 0
- %nop15739 = alloca i1, i1 0
- %nop15740 = alloca i1, i1 0
- %nop15741 = alloca i1, i1 0
- %nop15742 = alloca i1, i1 0
- %nop15743 = alloca i1, i1 0
- %nop15744 = alloca i1, i1 0
- %nop15745 = alloca i1, i1 0
- %nop15746 = alloca i1, i1 0
- %nop15747 = alloca i1, i1 0
- %nop15748 = alloca i1, i1 0
- %nop15749 = alloca i1, i1 0
- %nop15750 = alloca i1, i1 0
- %nop15751 = alloca i1, i1 0
- %nop15752 = alloca i1, i1 0
- %nop15753 = alloca i1, i1 0
- %nop15754 = alloca i1, i1 0
- %nop15755 = alloca i1, i1 0
- %nop15756 = alloca i1, i1 0
- %nop15757 = alloca i1, i1 0
- %nop15758 = alloca i1, i1 0
- %nop15759 = alloca i1, i1 0
- %nop15760 = alloca i1, i1 0
- %nop15761 = alloca i1, i1 0
- %nop15762 = alloca i1, i1 0
- %nop15763 = alloca i1, i1 0
- %nop15764 = alloca i1, i1 0
- %nop15765 = alloca i1, i1 0
- %nop15766 = alloca i1, i1 0
- %nop15767 = alloca i1, i1 0
- %nop15768 = alloca i1, i1 0
- %nop15769 = alloca i1, i1 0
- %nop15770 = alloca i1, i1 0
- %nop15771 = alloca i1, i1 0
- %nop15772 = alloca i1, i1 0
- %nop15773 = alloca i1, i1 0
- %nop15774 = alloca i1, i1 0
- %nop15775 = alloca i1, i1 0
- %nop15776 = alloca i1, i1 0
- %nop15777 = alloca i1, i1 0
- %nop15778 = alloca i1, i1 0
- %nop15779 = alloca i1, i1 0
- %nop15780 = alloca i1, i1 0
- %nop15781 = alloca i1, i1 0
- %nop15782 = alloca i1, i1 0
- %nop15783 = alloca i1, i1 0
- %nop15784 = alloca i1, i1 0
- %nop15785 = alloca i1, i1 0
- %nop15786 = alloca i1, i1 0
- %nop15787 = alloca i1, i1 0
- %nop15788 = alloca i1, i1 0
- %nop15789 = alloca i1, i1 0
- %nop15790 = alloca i1, i1 0
- %nop15791 = alloca i1, i1 0
- %nop15792 = alloca i1, i1 0
- %nop15793 = alloca i1, i1 0
- %nop15794 = alloca i1, i1 0
- %nop15795 = alloca i1, i1 0
- %nop15796 = alloca i1, i1 0
- %nop15797 = alloca i1, i1 0
- %nop15798 = alloca i1, i1 0
- %nop15799 = alloca i1, i1 0
- %nop15800 = alloca i1, i1 0
- %nop15801 = alloca i1, i1 0
- %nop15802 = alloca i1, i1 0
- %nop15803 = alloca i1, i1 0
- %nop15804 = alloca i1, i1 0
- %nop15805 = alloca i1, i1 0
- %nop15806 = alloca i1, i1 0
- %nop15807 = alloca i1, i1 0
- %nop15808 = alloca i1, i1 0
- %nop15809 = alloca i1, i1 0
- %nop15810 = alloca i1, i1 0
- %nop15811 = alloca i1, i1 0
- %nop15812 = alloca i1, i1 0
- %nop15813 = alloca i1, i1 0
- %nop15814 = alloca i1, i1 0
- %nop15815 = alloca i1, i1 0
- %nop15816 = alloca i1, i1 0
- %nop15817 = alloca i1, i1 0
- %nop15818 = alloca i1, i1 0
- %nop15819 = alloca i1, i1 0
- %nop15820 = alloca i1, i1 0
- %nop15821 = alloca i1, i1 0
- %nop15822 = alloca i1, i1 0
- %nop15823 = alloca i1, i1 0
- %nop15824 = alloca i1, i1 0
- %nop15825 = alloca i1, i1 0
- %nop15826 = alloca i1, i1 0
- %nop15827 = alloca i1, i1 0
- %nop15828 = alloca i1, i1 0
- %nop15829 = alloca i1, i1 0
- %nop15830 = alloca i1, i1 0
- %nop15831 = alloca i1, i1 0
- %nop15832 = alloca i1, i1 0
- %nop15833 = alloca i1, i1 0
- %nop15834 = alloca i1, i1 0
- %nop15835 = alloca i1, i1 0
- %nop15836 = alloca i1, i1 0
- %nop15837 = alloca i1, i1 0
- %nop15838 = alloca i1, i1 0
- %nop15839 = alloca i1, i1 0
- %nop15840 = alloca i1, i1 0
- %nop15841 = alloca i1, i1 0
- %nop15842 = alloca i1, i1 0
- %nop15843 = alloca i1, i1 0
- %nop15844 = alloca i1, i1 0
- %nop15845 = alloca i1, i1 0
- %nop15846 = alloca i1, i1 0
- %nop15847 = alloca i1, i1 0
- %nop15848 = alloca i1, i1 0
- %nop15849 = alloca i1, i1 0
- %nop15850 = alloca i1, i1 0
- %nop15851 = alloca i1, i1 0
- %nop15852 = alloca i1, i1 0
- %nop15853 = alloca i1, i1 0
- %nop15854 = alloca i1, i1 0
- %nop15855 = alloca i1, i1 0
- %nop15856 = alloca i1, i1 0
- %nop15857 = alloca i1, i1 0
- %nop15858 = alloca i1, i1 0
- %nop15859 = alloca i1, i1 0
- %nop15860 = alloca i1, i1 0
- %nop15861 = alloca i1, i1 0
- %nop15862 = alloca i1, i1 0
- %nop15863 = alloca i1, i1 0
- %nop15864 = alloca i1, i1 0
- %nop15865 = alloca i1, i1 0
- %nop15866 = alloca i1, i1 0
- %nop15867 = alloca i1, i1 0
- %nop15868 = alloca i1, i1 0
- %nop15869 = alloca i1, i1 0
- %nop15870 = alloca i1, i1 0
- %nop15871 = alloca i1, i1 0
- %nop15872 = alloca i1, i1 0
- %nop15873 = alloca i1, i1 0
- %nop15874 = alloca i1, i1 0
- %nop15875 = alloca i1, i1 0
- %nop15876 = alloca i1, i1 0
- %nop15877 = alloca i1, i1 0
- %nop15878 = alloca i1, i1 0
- %nop15879 = alloca i1, i1 0
- %nop15880 = alloca i1, i1 0
- %nop15881 = alloca i1, i1 0
- %nop15882 = alloca i1, i1 0
- %nop15883 = alloca i1, i1 0
- %nop15884 = alloca i1, i1 0
- %nop15885 = alloca i1, i1 0
- %nop15886 = alloca i1, i1 0
- %nop15887 = alloca i1, i1 0
- %nop15888 = alloca i1, i1 0
- %nop15889 = alloca i1, i1 0
- %nop15890 = alloca i1, i1 0
- %nop15891 = alloca i1, i1 0
- %nop15892 = alloca i1, i1 0
- %nop15893 = alloca i1, i1 0
- %nop15894 = alloca i1, i1 0
- %nop15895 = alloca i1, i1 0
- %nop15896 = alloca i1, i1 0
- %nop15897 = alloca i1, i1 0
- %nop15898 = alloca i1, i1 0
- %nop15899 = alloca i1, i1 0
- %nop15900 = alloca i1, i1 0
- %nop15901 = alloca i1, i1 0
- %nop15902 = alloca i1, i1 0
- %nop15903 = alloca i1, i1 0
- %nop15904 = alloca i1, i1 0
- %nop15905 = alloca i1, i1 0
- %nop15906 = alloca i1, i1 0
- %nop15907 = alloca i1, i1 0
- %nop15908 = alloca i1, i1 0
- %nop15909 = alloca i1, i1 0
- %nop15910 = alloca i1, i1 0
- %nop15911 = alloca i1, i1 0
- %nop15912 = alloca i1, i1 0
- %nop15913 = alloca i1, i1 0
- %nop15914 = alloca i1, i1 0
- %nop15915 = alloca i1, i1 0
- %nop15916 = alloca i1, i1 0
- %nop15917 = alloca i1, i1 0
- %nop15918 = alloca i1, i1 0
- %nop15919 = alloca i1, i1 0
- %nop15920 = alloca i1, i1 0
- %nop15921 = alloca i1, i1 0
- %nop15922 = alloca i1, i1 0
- %nop15923 = alloca i1, i1 0
- %nop15924 = alloca i1, i1 0
- %nop15925 = alloca i1, i1 0
- %nop15926 = alloca i1, i1 0
- %nop15927 = alloca i1, i1 0
- %nop15928 = alloca i1, i1 0
- %nop15929 = alloca i1, i1 0
- %nop15930 = alloca i1, i1 0
- %nop15931 = alloca i1, i1 0
- %nop15932 = alloca i1, i1 0
- %nop15933 = alloca i1, i1 0
- %nop15934 = alloca i1, i1 0
- %nop15935 = alloca i1, i1 0
- %nop15936 = alloca i1, i1 0
- %nop15937 = alloca i1, i1 0
- %nop15938 = alloca i1, i1 0
- %nop15939 = alloca i1, i1 0
- %nop15940 = alloca i1, i1 0
- %nop15941 = alloca i1, i1 0
- %nop15942 = alloca i1, i1 0
- %nop15943 = alloca i1, i1 0
- %nop15944 = alloca i1, i1 0
- %nop15945 = alloca i1, i1 0
- %nop15946 = alloca i1, i1 0
- %nop15947 = alloca i1, i1 0
- %nop15948 = alloca i1, i1 0
- %nop15949 = alloca i1, i1 0
- %nop15950 = alloca i1, i1 0
- %nop15951 = alloca i1, i1 0
- %nop15952 = alloca i1, i1 0
- %nop15953 = alloca i1, i1 0
- %nop15954 = alloca i1, i1 0
- %nop15955 = alloca i1, i1 0
- %nop15956 = alloca i1, i1 0
- %nop15957 = alloca i1, i1 0
- %nop15958 = alloca i1, i1 0
- %nop15959 = alloca i1, i1 0
- %nop15960 = alloca i1, i1 0
- %nop15961 = alloca i1, i1 0
- %nop15962 = alloca i1, i1 0
- %nop15963 = alloca i1, i1 0
- %nop15964 = alloca i1, i1 0
- %nop15965 = alloca i1, i1 0
- %nop15966 = alloca i1, i1 0
- %nop15967 = alloca i1, i1 0
- %nop15968 = alloca i1, i1 0
- %nop15969 = alloca i1, i1 0
- %nop15970 = alloca i1, i1 0
- %nop15971 = alloca i1, i1 0
- %nop15972 = alloca i1, i1 0
- %nop15973 = alloca i1, i1 0
- %nop15974 = alloca i1, i1 0
- %nop15975 = alloca i1, i1 0
- %nop15976 = alloca i1, i1 0
- %nop15977 = alloca i1, i1 0
- %nop15978 = alloca i1, i1 0
- %nop15979 = alloca i1, i1 0
- %nop15980 = alloca i1, i1 0
- %nop15981 = alloca i1, i1 0
- %nop15982 = alloca i1, i1 0
- %nop15983 = alloca i1, i1 0
- %nop15984 = alloca i1, i1 0
- %nop15985 = alloca i1, i1 0
- %nop15986 = alloca i1, i1 0
- %nop15987 = alloca i1, i1 0
- %nop15988 = alloca i1, i1 0
- %nop15989 = alloca i1, i1 0
- %nop15990 = alloca i1, i1 0
- %nop15991 = alloca i1, i1 0
- %nop15992 = alloca i1, i1 0
- %nop15993 = alloca i1, i1 0
- %nop15994 = alloca i1, i1 0
- %nop15995 = alloca i1, i1 0
- %nop15996 = alloca i1, i1 0
- %nop15997 = alloca i1, i1 0
- %nop15998 = alloca i1, i1 0
- %nop15999 = alloca i1, i1 0
- %nop16000 = alloca i1, i1 0
- %nop16001 = alloca i1, i1 0
- %nop16002 = alloca i1, i1 0
- %nop16003 = alloca i1, i1 0
- %nop16004 = alloca i1, i1 0
- %nop16005 = alloca i1, i1 0
- %nop16006 = alloca i1, i1 0
- %nop16007 = alloca i1, i1 0
- %nop16008 = alloca i1, i1 0
- %nop16009 = alloca i1, i1 0
- %nop16010 = alloca i1, i1 0
- %nop16011 = alloca i1, i1 0
- %nop16012 = alloca i1, i1 0
- %nop16013 = alloca i1, i1 0
- %nop16014 = alloca i1, i1 0
- %nop16015 = alloca i1, i1 0
- %nop16016 = alloca i1, i1 0
- %nop16017 = alloca i1, i1 0
- %nop16018 = alloca i1, i1 0
- %nop16019 = alloca i1, i1 0
- %nop16020 = alloca i1, i1 0
- %nop16021 = alloca i1, i1 0
- %nop16022 = alloca i1, i1 0
- %nop16023 = alloca i1, i1 0
- %nop16024 = alloca i1, i1 0
- %nop16025 = alloca i1, i1 0
- %nop16026 = alloca i1, i1 0
- %nop16027 = alloca i1, i1 0
- %nop16028 = alloca i1, i1 0
- %nop16029 = alloca i1, i1 0
- %nop16030 = alloca i1, i1 0
- %nop16031 = alloca i1, i1 0
- %nop16032 = alloca i1, i1 0
- %nop16033 = alloca i1, i1 0
- %nop16034 = alloca i1, i1 0
- %nop16035 = alloca i1, i1 0
- %nop16036 = alloca i1, i1 0
- %nop16037 = alloca i1, i1 0
- %nop16038 = alloca i1, i1 0
- %nop16039 = alloca i1, i1 0
- %nop16040 = alloca i1, i1 0
- %nop16041 = alloca i1, i1 0
- %nop16042 = alloca i1, i1 0
- %nop16043 = alloca i1, i1 0
- %nop16044 = alloca i1, i1 0
- %nop16045 = alloca i1, i1 0
- %nop16046 = alloca i1, i1 0
- %nop16047 = alloca i1, i1 0
- %nop16048 = alloca i1, i1 0
- %nop16049 = alloca i1, i1 0
- %nop16050 = alloca i1, i1 0
- %nop16051 = alloca i1, i1 0
- %nop16052 = alloca i1, i1 0
- %nop16053 = alloca i1, i1 0
- %nop16054 = alloca i1, i1 0
- %nop16055 = alloca i1, i1 0
- %nop16056 = alloca i1, i1 0
- %nop16057 = alloca i1, i1 0
- %nop16058 = alloca i1, i1 0
- %nop16059 = alloca i1, i1 0
- %nop16060 = alloca i1, i1 0
- %nop16061 = alloca i1, i1 0
- %nop16062 = alloca i1, i1 0
- %nop16063 = alloca i1, i1 0
- %nop16064 = alloca i1, i1 0
- %nop16065 = alloca i1, i1 0
- %nop16066 = alloca i1, i1 0
- %nop16067 = alloca i1, i1 0
- %nop16068 = alloca i1, i1 0
- %nop16069 = alloca i1, i1 0
- %nop16070 = alloca i1, i1 0
- %nop16071 = alloca i1, i1 0
- %nop16072 = alloca i1, i1 0
- %nop16073 = alloca i1, i1 0
- %nop16074 = alloca i1, i1 0
- %nop16075 = alloca i1, i1 0
- %nop16076 = alloca i1, i1 0
- %nop16077 = alloca i1, i1 0
- %nop16078 = alloca i1, i1 0
- %nop16079 = alloca i1, i1 0
- %nop16080 = alloca i1, i1 0
- %nop16081 = alloca i1, i1 0
- %nop16082 = alloca i1, i1 0
- %nop16083 = alloca i1, i1 0
- %nop16084 = alloca i1, i1 0
- %nop16085 = alloca i1, i1 0
- %nop16086 = alloca i1, i1 0
- %nop16087 = alloca i1, i1 0
- %nop16088 = alloca i1, i1 0
- %nop16089 = alloca i1, i1 0
- %nop16090 = alloca i1, i1 0
- %nop16091 = alloca i1, i1 0
- %nop16092 = alloca i1, i1 0
- %nop16093 = alloca i1, i1 0
- %nop16094 = alloca i1, i1 0
- %nop16095 = alloca i1, i1 0
- %nop16096 = alloca i1, i1 0
- %nop16097 = alloca i1, i1 0
- %nop16098 = alloca i1, i1 0
- %nop16099 = alloca i1, i1 0
- %nop16100 = alloca i1, i1 0
- %nop16101 = alloca i1, i1 0
- %nop16102 = alloca i1, i1 0
- %nop16103 = alloca i1, i1 0
- %nop16104 = alloca i1, i1 0
- %nop16105 = alloca i1, i1 0
- %nop16106 = alloca i1, i1 0
- %nop16107 = alloca i1, i1 0
- %nop16108 = alloca i1, i1 0
- %nop16109 = alloca i1, i1 0
- %nop16110 = alloca i1, i1 0
- %nop16111 = alloca i1, i1 0
- %nop16112 = alloca i1, i1 0
- %nop16113 = alloca i1, i1 0
- %nop16114 = alloca i1, i1 0
- %nop16115 = alloca i1, i1 0
- %nop16116 = alloca i1, i1 0
- %nop16117 = alloca i1, i1 0
- %nop16118 = alloca i1, i1 0
- %nop16119 = alloca i1, i1 0
- %nop16120 = alloca i1, i1 0
- %nop16121 = alloca i1, i1 0
- %nop16122 = alloca i1, i1 0
- %nop16123 = alloca i1, i1 0
- %nop16124 = alloca i1, i1 0
- %nop16125 = alloca i1, i1 0
- %nop16126 = alloca i1, i1 0
- %nop16127 = alloca i1, i1 0
- %nop16128 = alloca i1, i1 0
- %nop16129 = alloca i1, i1 0
- %nop16130 = alloca i1, i1 0
- %nop16131 = alloca i1, i1 0
- %nop16132 = alloca i1, i1 0
- %nop16133 = alloca i1, i1 0
- %nop16134 = alloca i1, i1 0
- %nop16135 = alloca i1, i1 0
- %nop16136 = alloca i1, i1 0
- %nop16137 = alloca i1, i1 0
- %nop16138 = alloca i1, i1 0
- %nop16139 = alloca i1, i1 0
- %nop16140 = alloca i1, i1 0
- %nop16141 = alloca i1, i1 0
- %nop16142 = alloca i1, i1 0
- %nop16143 = alloca i1, i1 0
- %nop16144 = alloca i1, i1 0
- %nop16145 = alloca i1, i1 0
- %nop16146 = alloca i1, i1 0
- %nop16147 = alloca i1, i1 0
- %nop16148 = alloca i1, i1 0
- %nop16149 = alloca i1, i1 0
- %nop16150 = alloca i1, i1 0
- %nop16151 = alloca i1, i1 0
- %nop16152 = alloca i1, i1 0
- %nop16153 = alloca i1, i1 0
- %nop16154 = alloca i1, i1 0
- %nop16155 = alloca i1, i1 0
- %nop16156 = alloca i1, i1 0
- %nop16157 = alloca i1, i1 0
- %nop16158 = alloca i1, i1 0
- %nop16159 = alloca i1, i1 0
- %nop16160 = alloca i1, i1 0
- %nop16161 = alloca i1, i1 0
- %nop16162 = alloca i1, i1 0
- %nop16163 = alloca i1, i1 0
- %nop16164 = alloca i1, i1 0
- %nop16165 = alloca i1, i1 0
- %nop16166 = alloca i1, i1 0
- %nop16167 = alloca i1, i1 0
- %nop16168 = alloca i1, i1 0
- %nop16169 = alloca i1, i1 0
- %nop16170 = alloca i1, i1 0
- %nop16171 = alloca i1, i1 0
- %nop16172 = alloca i1, i1 0
- %nop16173 = alloca i1, i1 0
- %nop16174 = alloca i1, i1 0
- %nop16175 = alloca i1, i1 0
- %nop16176 = alloca i1, i1 0
- %nop16177 = alloca i1, i1 0
- %nop16178 = alloca i1, i1 0
- %nop16179 = alloca i1, i1 0
- %nop16180 = alloca i1, i1 0
- %nop16181 = alloca i1, i1 0
- %nop16182 = alloca i1, i1 0
- %nop16183 = alloca i1, i1 0
- %nop16184 = alloca i1, i1 0
- %nop16185 = alloca i1, i1 0
- %nop16186 = alloca i1, i1 0
- %nop16187 = alloca i1, i1 0
- %nop16188 = alloca i1, i1 0
- %nop16189 = alloca i1, i1 0
- %nop16190 = alloca i1, i1 0
- %nop16191 = alloca i1, i1 0
- %nop16192 = alloca i1, i1 0
- %nop16193 = alloca i1, i1 0
- %nop16194 = alloca i1, i1 0
- %nop16195 = alloca i1, i1 0
- %nop16196 = alloca i1, i1 0
- %nop16197 = alloca i1, i1 0
- %nop16198 = alloca i1, i1 0
- %nop16199 = alloca i1, i1 0
- %nop16200 = alloca i1, i1 0
- %nop16201 = alloca i1, i1 0
- %nop16202 = alloca i1, i1 0
- %nop16203 = alloca i1, i1 0
- %nop16204 = alloca i1, i1 0
- %nop16205 = alloca i1, i1 0
- %nop16206 = alloca i1, i1 0
- %nop16207 = alloca i1, i1 0
- %nop16208 = alloca i1, i1 0
- %nop16209 = alloca i1, i1 0
- %nop16210 = alloca i1, i1 0
- %nop16211 = alloca i1, i1 0
- %nop16212 = alloca i1, i1 0
- %nop16213 = alloca i1, i1 0
- %nop16214 = alloca i1, i1 0
- %nop16215 = alloca i1, i1 0
- %nop16216 = alloca i1, i1 0
- %nop16217 = alloca i1, i1 0
- %nop16218 = alloca i1, i1 0
- %nop16219 = alloca i1, i1 0
- %nop16220 = alloca i1, i1 0
- %nop16221 = alloca i1, i1 0
- %nop16222 = alloca i1, i1 0
- %nop16223 = alloca i1, i1 0
- %nop16224 = alloca i1, i1 0
- %nop16225 = alloca i1, i1 0
- %nop16226 = alloca i1, i1 0
- %nop16227 = alloca i1, i1 0
- %nop16228 = alloca i1, i1 0
- %nop16229 = alloca i1, i1 0
- %nop16230 = alloca i1, i1 0
- %nop16231 = alloca i1, i1 0
- %nop16232 = alloca i1, i1 0
- %nop16233 = alloca i1, i1 0
- %nop16234 = alloca i1, i1 0
- %nop16235 = alloca i1, i1 0
- %nop16236 = alloca i1, i1 0
- %nop16237 = alloca i1, i1 0
- %nop16238 = alloca i1, i1 0
- %nop16239 = alloca i1, i1 0
- %nop16240 = alloca i1, i1 0
- %nop16241 = alloca i1, i1 0
- %nop16242 = alloca i1, i1 0
- %nop16243 = alloca i1, i1 0
- %nop16244 = alloca i1, i1 0
- %nop16245 = alloca i1, i1 0
- %nop16246 = alloca i1, i1 0
- %nop16247 = alloca i1, i1 0
- %nop16248 = alloca i1, i1 0
- %nop16249 = alloca i1, i1 0
- %nop16250 = alloca i1, i1 0
- %nop16251 = alloca i1, i1 0
- %nop16252 = alloca i1, i1 0
- %nop16253 = alloca i1, i1 0
- %nop16254 = alloca i1, i1 0
- %nop16255 = alloca i1, i1 0
- %nop16256 = alloca i1, i1 0
- %nop16257 = alloca i1, i1 0
- %nop16258 = alloca i1, i1 0
- %nop16259 = alloca i1, i1 0
- %nop16260 = alloca i1, i1 0
- %nop16261 = alloca i1, i1 0
- %nop16262 = alloca i1, i1 0
- %nop16263 = alloca i1, i1 0
- %nop16264 = alloca i1, i1 0
- %nop16265 = alloca i1, i1 0
- %nop16266 = alloca i1, i1 0
- %nop16267 = alloca i1, i1 0
- %nop16268 = alloca i1, i1 0
- %nop16269 = alloca i1, i1 0
- %nop16270 = alloca i1, i1 0
- %nop16271 = alloca i1, i1 0
- %nop16272 = alloca i1, i1 0
- %nop16273 = alloca i1, i1 0
- %nop16274 = alloca i1, i1 0
- %nop16275 = alloca i1, i1 0
- %nop16276 = alloca i1, i1 0
- %nop16277 = alloca i1, i1 0
- %nop16278 = alloca i1, i1 0
- %nop16279 = alloca i1, i1 0
- %nop16280 = alloca i1, i1 0
- %nop16281 = alloca i1, i1 0
- %nop16282 = alloca i1, i1 0
- %nop16283 = alloca i1, i1 0
- %nop16284 = alloca i1, i1 0
- %nop16285 = alloca i1, i1 0
- %nop16286 = alloca i1, i1 0
- %nop16287 = alloca i1, i1 0
- %nop16288 = alloca i1, i1 0
- %nop16289 = alloca i1, i1 0
- %nop16290 = alloca i1, i1 0
- %nop16291 = alloca i1, i1 0
- %nop16292 = alloca i1, i1 0
- %nop16293 = alloca i1, i1 0
- %nop16294 = alloca i1, i1 0
- %nop16295 = alloca i1, i1 0
- %nop16296 = alloca i1, i1 0
- %nop16297 = alloca i1, i1 0
- %nop16298 = alloca i1, i1 0
- %nop16299 = alloca i1, i1 0
- %nop16300 = alloca i1, i1 0
- %nop16301 = alloca i1, i1 0
- %nop16302 = alloca i1, i1 0
- %nop16303 = alloca i1, i1 0
- %nop16304 = alloca i1, i1 0
- %nop16305 = alloca i1, i1 0
- %nop16306 = alloca i1, i1 0
- %nop16307 = alloca i1, i1 0
- %nop16308 = alloca i1, i1 0
- %nop16309 = alloca i1, i1 0
- %nop16310 = alloca i1, i1 0
- %nop16311 = alloca i1, i1 0
- %nop16312 = alloca i1, i1 0
- %nop16313 = alloca i1, i1 0
- %nop16314 = alloca i1, i1 0
- %nop16315 = alloca i1, i1 0
- %nop16316 = alloca i1, i1 0
- %nop16317 = alloca i1, i1 0
- %nop16318 = alloca i1, i1 0
- %nop16319 = alloca i1, i1 0
- %nop16320 = alloca i1, i1 0
- %nop16321 = alloca i1, i1 0
- %nop16322 = alloca i1, i1 0
- %nop16323 = alloca i1, i1 0
- %nop16324 = alloca i1, i1 0
- %nop16325 = alloca i1, i1 0
- %nop16326 = alloca i1, i1 0
- %nop16327 = alloca i1, i1 0
- %nop16328 = alloca i1, i1 0
- %nop16329 = alloca i1, i1 0
- %nop16330 = alloca i1, i1 0
- %nop16331 = alloca i1, i1 0
- %nop16332 = alloca i1, i1 0
- %nop16333 = alloca i1, i1 0
- %nop16334 = alloca i1, i1 0
- %nop16335 = alloca i1, i1 0
- %nop16336 = alloca i1, i1 0
- %nop16337 = alloca i1, i1 0
- %nop16338 = alloca i1, i1 0
- %nop16339 = alloca i1, i1 0
- %nop16340 = alloca i1, i1 0
- %nop16341 = alloca i1, i1 0
- %nop16342 = alloca i1, i1 0
- %nop16343 = alloca i1, i1 0
- %nop16344 = alloca i1, i1 0
- %nop16345 = alloca i1, i1 0
- %nop16346 = alloca i1, i1 0
- %nop16347 = alloca i1, i1 0
- %nop16348 = alloca i1, i1 0
- %nop16349 = alloca i1, i1 0
- %nop16350 = alloca i1, i1 0
- %nop16351 = alloca i1, i1 0
- %nop16352 = alloca i1, i1 0
- %nop16353 = alloca i1, i1 0
- %nop16354 = alloca i1, i1 0
- %nop16355 = alloca i1, i1 0
- %nop16356 = alloca i1, i1 0
- %nop16357 = alloca i1, i1 0
- %nop16358 = alloca i1, i1 0
- %nop16359 = alloca i1, i1 0
- %nop16360 = alloca i1, i1 0
- %nop16361 = alloca i1, i1 0
- %nop16362 = alloca i1, i1 0
- %nop16363 = alloca i1, i1 0
- %nop16364 = alloca i1, i1 0
- %nop16365 = alloca i1, i1 0
- %nop16366 = alloca i1, i1 0
- %nop16367 = alloca i1, i1 0
- %nop16368 = alloca i1, i1 0
- %nop16369 = alloca i1, i1 0
- %nop16370 = alloca i1, i1 0
- %nop16371 = alloca i1, i1 0
- %nop16372 = alloca i1, i1 0
- %nop16373 = alloca i1, i1 0
- %nop16374 = alloca i1, i1 0
- %nop16375 = alloca i1, i1 0
- %nop16376 = alloca i1, i1 0
- %nop16377 = alloca i1, i1 0
- br label %for.inc
-
-for.inc:
- %3 = load i32* %i, align 4
- %inc = add nsw i32 %3, 1
- store i32 %inc, i32* %i, align 4
- br label %for.cond
-
-; CHECK: addiu $sp, $sp, -8
-; CHECK: sw $ra, 0($sp)
-; CHECK: lui $[[REG1:[0-9]+]], 65534
-; CHECK: addiu $[[REG1]], $[[REG1]], -12
-; CHECK: addu $[[REG1]], $ra, $[[REG1]]
-; CHECK: lw $ra, 0($sp)
-; CHECK: jr $[[REG1]]
-; CHECK: addiu $sp, $sp, 8
-
-for.end:
- ret i32 0
-}
-
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false"
- "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"
- "no-infs-fp-math"="false" "no-nans-fp-math"="false"
- "stack-protector-buffer-size"="8" "unsafe-fp-math"="false"
- "use-soft-float"="false" }
diff --git a/test/MC/Mips/micromips-pc16-fixup.s b/test/MC/Mips/micromips-pc16-fixup.s
new file mode 100644
index 000000000000..146a1550b499
--- /dev/null
+++ b/test/MC/Mips/micromips-pc16-fixup.s
@@ -0,0 +1,10 @@
+# RUN: llvm-mc %s -triple=mips-unknown-linux -mcpu=mips32r2 -arch=mips -mattr=+micromips 2>&1 -filetype=obj | FileCheck %s
+#
+# CHECK-NOT: LLVM ERROR: out of range PC16 fixup
+
+.text
+ b foo
+ .space 65536 - 8, 1 # -8 = size of b instr plus size of automatically inserted nop
+foo:
+ add $0,$0,$0
+
diff --git a/test/MC/Mips/micromips-relocations.s b/test/MC/Mips/micromips-relocations.s
index 804dd2f595f7..1633845444f2 100644
--- a/test/MC/Mips/micromips-relocations.s
+++ b/test/MC/Mips/micromips-relocations.s
@@ -67,6 +67,14 @@
# CHECK-FIXUP: # fixup A - offset: 0,
# CHECK-FIXUP: value: _gp_disp@TPREL_LO,
# CHECK-FIXUP: kind: fixup_MICROMIPS_TLS_TPREL_LO16
+# CHECK-FIXUP: addiu $4, $gp, %tlsgd(a)
+# CHECK-FIXUP: # encoding: [0x9c'A',0x30'A',0x00,0x00]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: a@TLSGD, kind: fixup_MICROMIPS_TLS_GD
+# CHECK-FIXUP: addiu $4, $gp, %tlsldm(f.i)
+# CHECK-FIXUP: # encoding: [0x9c'A',0x30'A',0x00,0x00]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: f.i@TLSLDM, kind: fixup_MICROMIPS_TLS_LDM
#------------------------------------------------------------------------------
# Check that the appropriate relocations were created.
#------------------------------------------------------------------------------
@@ -83,6 +91,8 @@
# CHECK-ELF: 0x{{[0-9,A-F]+}} R_MICROMIPS_GOT_OFST
# CHECK-ELF: 0x{{[0-9,A-F]+}} R_MICROMIPS_TLS_TPREL_HI16
# CHECK-ELF: 0x{{[0-9,A-F]+}} R_MICROMIPS_TLS_TPREL_LO16
+# CHECK-ELF: 0x{{[0-9,A-F]+}} R_MICROMIPS_TLS_GD
+# CHECK-ELF: 0x{{[0-9,A-F]+}} R_MICROMIPS_TLS_LDM
# CHECK-ELF: ]
lui $2, %hi(_gp_disp)
@@ -97,3 +107,5 @@
lw $6, %got_ofst(loop_4)($5)
lui $2, %tprel_hi(_gp_disp)
addiu $2, $2, %tprel_lo(_gp_disp)
+ addiu $4, $gp, %tlsgd(a)
+ addiu $4, $gp, %tlsldm(f.i)
diff --git a/test/MC/Mips/mips-abi-bad.s b/test/MC/Mips/mips-abi-bad.s
new file mode 100644
index 000000000000..c4653cfee642
--- /dev/null
+++ b/test/MC/Mips/mips-abi-bad.s
@@ -0,0 +1,20 @@
+# Error checking for malformed abi related directives
+# RUN: not llvm-mc -triple mips-unknown-unknown %s 2>&1 | FileCheck %s
+# CHECK: .text
+ .module fp=3
+# CHECK : mips-abi-bad.s:4:16: error: unsupported option
+# CHECK-NEXT : .module fp=3
+# CHECK-NEXT : ^
+
+ .set fp=xx,6
+# CHECK :mips-abi-bad.s:5:15: error: unexpected token in statement
+# CHECK-NEXT : .set fp=xx,6
+# CHECK-NEXT : ^
+
+# CHECK :.set mips16
+ .set mips16
+ .module fp=32
+
+# CHECK :mips-abi-bad.s:14:13: error: .module directive must come before any code
+# CHECK-NEXT : .module fp=32
+# CHECK-NEXT : ^
diff --git a/test/MC/Mips/mips-alu-instructions.s b/test/MC/Mips/mips-alu-instructions.s
index 68a8da07c2b1..b25394b39a67 100644
--- a/test/MC/Mips/mips-alu-instructions.s
+++ b/test/MC/Mips/mips-alu-instructions.s
@@ -118,3 +118,32 @@
negu $6,$7
move $7,$8
rdhwr $5, $29
+
+#------------------------------------------------------------------------------
+# Shortcuts for arithmetic instructions
+#------------------------------------------------------------------------------
+
+# CHECK: add $9, $9, $3 # encoding: [0x20,0x48,0x23,0x01]
+# CHECK: addu $9, $9, $3 # encoding: [0x21,0x48,0x23,0x01]
+# CHECK: addi $9, $9, 10 # encoding: [0x0a,0x00,0x29,0x21]
+# CHECK: addiu $9, $9, 10 # encoding: [0x0a,0x00,0x29,0x25]
+# CHECK: and $5, $5, $6 # encoding: [0x24,0x28,0xa6,0x00]
+# CHECK: mul $9, $9, $3 # encoding: [0x02,0x48,0x23,0x71]
+# CHECK: or $2, $2, $4 # encoding: [0x25,0x10,0x44,0x00]
+# CHECK: sub $9, $9, $3 # encoding: [0x22,0x48,0x23,0x01]
+# CHECK: subu $9, $9, $3 # encoding: [0x23,0x48,0x23,0x01]
+# CHECK: addi $9, $9, -10 # encoding: [0xf6,0xff,0x29,0x21]
+# CHECK: addiu $9, $9, -10 # encoding: [0xf6,0xff,0x29,0x25]
+# CHECK: xor $9, $9, $10 # encoding: [0x26,0x48,0x2a,0x01]
+ add $9, $3
+ addu $9, $3
+ add $9, 10
+ addu $9, 10
+ and $5, $6
+ mul $9, $3
+ or $2, $4
+ sub $9, $3
+ subu $9, $3
+ sub $9, 10
+ subu $9, 10
+ xor $9, $10
diff --git a/test/MC/Mips/mips-bad-branches.s b/test/MC/Mips/mips-bad-branches.s
new file mode 100644
index 000000000000..321b3c45f873
--- /dev/null
+++ b/test/MC/Mips/mips-bad-branches.s
@@ -0,0 +1,409 @@
+# RUN: not llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -arch=mips 2>&1 | FileCheck %s
+#
+# CHECK: error: branch to misaligned address
+# CHECK: b -131069
+# CHECK: error: branch to misaligned address
+# CHECK: b -131070
+# CHECK: error: branch to misaligned address
+# CHECK: b -131071
+# CHECK: error: branch target out of range
+# CHECK: b -131073
+# CHECK: error: branch to misaligned address
+# CHECK: b 131069
+# CHECK: error: branch to misaligned address
+# CHECK: b 131070
+# CHECK: error: branch to misaligned address
+# CHECK: b 131071
+# CHECK: error: branch target out of range
+# CHECK: b 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: beq $1, $1, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: beq $1, $1, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: beq $1, $1, -131071
+# CHECK: error: branch target out of range
+# CHECK: beq $1, $1, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: beq $1, $1, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: beq $1, $1, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: beq $1, $1, 131071
+# CHECK: error: branch target out of range
+# CHECK: beq $1, $1, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bne $1, $1, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bne $1, $1, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bne $1, $1, -131071
+# CHECK: error: branch target out of range
+# CHECK: bne $1, $1, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bne $1, $1, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bne $1, $1, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bne $1, $1, 131071
+# CHECK: error: branch target out of range
+# CHECK: bne $1, $1, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bal -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bal -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bal -131071
+# CHECK: error: branch target out of range
+# CHECK: bal -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bal 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bal 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bal 131071
+# CHECK: error: branch target out of range
+# CHECK: bal 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bgez $1, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bgez $1, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bgez $1, -131071
+# CHECK: error: branch target out of range
+# CHECK: bgez $1, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bgez $1, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bgez $1, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bgez $1, 131071
+# CHECK: error: branch target out of range
+# CHECK: bgez $1, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bgtz $1, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bgtz $1, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bgtz $1, -131071
+# CHECK: error: branch target out of range
+# CHECK: bgtz $1, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bgtz $1, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bgtz $1, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bgtz $1, 131071
+# CHECK: error: branch target out of range
+# CHECK: bgtz $1, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: blez $1, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: blez $1, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: blez $1, -131071
+# CHECK: error: branch target out of range
+# CHECK: blez $1, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: blez $1, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: blez $1, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: blez $1, 131071
+# CHECK: error: branch target out of range
+# CHECK: blez $1, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bltz $1, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bltz $1, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bltz $1, -131071
+# CHECK: error: branch target out of range
+# CHECK: bltz $1, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bltz $1, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bltz $1, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bltz $1, 131071
+# CHECK: error: branch target out of range
+# CHECK: bltz $1, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bgezal $1, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bgezal $1, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bgezal $1, -131071
+# CHECK: error: branch target out of range
+# CHECK: bgezal $1, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bgezal $1, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bgezal $1, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bgezal $1, 131071
+# CHECK: error: branch target out of range
+# CHECK: bgezal $1, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bltzal $1, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bltzal $1, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bltzal $1, -131071
+# CHECK: error: branch target out of range
+# CHECK: bltzal $1, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bltzal $1, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bltzal $1, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bltzal $1, 131071
+# CHECK: error: branch target out of range
+# CHECK: bltzal $1, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f -131071
+# CHECK: error: branch target out of range
+# CHECK: bc1f -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f 131071
+# CHECK: error: branch target out of range
+# CHECK: bc1f 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f $fcc0, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f $fcc0, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f $fcc0, -131071
+# CHECK: error: branch target out of range
+# CHECK: bc1f $fcc0, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f $fcc0, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f $fcc0, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bc1f $fcc0, 131071
+# CHECK: error: branch target out of range
+# CHECK: bc1f $fcc0, 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t -131071
+# CHECK: error: branch target out of range
+# CHECK: bc1t -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t 131071
+# CHECK: error: branch target out of range
+# CHECK: bc1t 131072
+
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t $fcc0, -131069
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t $fcc0, -131070
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t $fcc0, -131071
+# CHECK: error: branch target out of range
+# CHECK: bc1t $fcc0, -131073
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t $fcc0, 131069
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t $fcc0, 131070
+# CHECK: error: branch to misaligned address
+# CHECK: bc1t $fcc0, 131071
+# CHECK: error: branch target out of range
+# CHECK: bc1t $fcc0, 131072
+
+.text
+.set noat
+ b -131068
+ b -131069
+ b -131070
+ b -131071
+ b -131072
+ b -131073
+ b 131068
+ b 131069
+ b 131070
+ b 131071
+ b 131072
+
+ beq $1, $1, -131068
+ beq $1, $1, -131069
+ beq $1, $1, -131070
+ beq $1, $1, -131071
+ beq $1, $1, -131072
+ beq $1, $1, -131073
+ beq $1, $1, 131068
+ beq $1, $1, 131069
+ beq $1, $1, 131070
+ beq $1, $1, 131071
+ beq $1, $1, 131072
+
+ bne $1, $1, -131068
+ bne $1, $1, -131069
+ bne $1, $1, -131070
+ bne $1, $1, -131071
+ bne $1, $1, -131072
+ bne $1, $1, -131073
+ bne $1, $1, 131068
+ bne $1, $1, 131069
+ bne $1, $1, 131070
+ bne $1, $1, 131071
+ bne $1, $1, 131072
+
+ bal -131068
+ bal -131069
+ bal -131070
+ bal -131071
+ bal -131072
+ bal -131073
+ bal 131068
+ bal 131069
+ bal 131070
+ bal 131071
+ bal 131072
+
+ bgez $1, -131068
+ bgez $1, -131069
+ bgez $1, -131070
+ bgez $1, -131071
+ bgez $1, -131072
+ bgez $1, -131073
+ bgez $1, 131068
+ bgez $1, 131069
+ bgez $1, 131070
+ bgez $1, 131071
+ bgez $1, 131072
+
+ bgtz $1, -131068
+ bgtz $1, -131069
+ bgtz $1, -131070
+ bgtz $1, -131071
+ bgtz $1, -131072
+ bgtz $1, -131073
+ bgtz $1, 131068
+ bgtz $1, 131069
+ bgtz $1, 131070
+ bgtz $1, 131071
+ bgtz $1, 131072
+
+ blez $1, -131068
+ blez $1, -131069
+ blez $1, -131070
+ blez $1, -131071
+ blez $1, -131072
+ blez $1, -131073
+ blez $1, 131068
+ blez $1, 131069
+ blez $1, 131070
+ blez $1, 131071
+ blez $1, 131072
+
+ bltz $1, -131068
+ bltz $1, -131069
+ bltz $1, -131070
+ bltz $1, -131071
+ bltz $1, -131072
+ bltz $1, -131073
+ bltz $1, 131068
+ bltz $1, 131069
+ bltz $1, 131070
+ bltz $1, 131071
+ bltz $1, 131072
+
+ bgezal $1, -131068
+ bgezal $1, -131069
+ bgezal $1, -131070
+ bgezal $1, -131071
+ bgezal $1, -131072
+ bgezal $1, -131073
+ bgezal $1, 131068
+ bgezal $1, 131069
+ bgezal $1, 131070
+ bgezal $1, 131071
+ bgezal $1, 131072
+
+ bltzal $1, -131068
+ bltzal $1, -131069
+ bltzal $1, -131070
+ bltzal $1, -131071
+ bltzal $1, -131072
+ bltzal $1, -131073
+ bltzal $1, 131068
+ bltzal $1, 131069
+ bltzal $1, 131070
+ bltzal $1, 131071
+ bltzal $1, 131072
+
+ bc1f -131068
+ bc1f -131069
+ bc1f -131070
+ bc1f -131071
+ bc1f -131072
+ bc1f -131073
+ bc1f 131068
+ bc1f 131069
+ bc1f 131070
+ bc1f 131071
+ bc1f 131072
+
+ bc1f $fcc0, -131068
+ bc1f $fcc0, -131069
+ bc1f $fcc0, -131070
+ bc1f $fcc0, -131071
+ bc1f $fcc0, -131072
+ bc1f $fcc0, -131073
+ bc1f $fcc0, 131068
+ bc1f $fcc0, 131069
+ bc1f $fcc0, 131070
+ bc1f $fcc0, 131071
+ bc1f $fcc0, 131072
+
+ bc1t -131068
+ bc1t -131069
+ bc1t -131070
+ bc1t -131071
+ bc1t -131072
+ bc1t -131073
+ bc1t 131068
+ bc1t 131069
+ bc1t 131070
+ bc1t 131071
+ bc1t 131072
+
+ bc1t $fcc0, -131068
+ bc1t $fcc0, -131069
+ bc1t $fcc0, -131070
+ bc1t $fcc0, -131071
+ bc1t $fcc0, -131072
+ bc1t $fcc0, -131073
+ bc1t $fcc0, 131068
+ bc1t $fcc0, 131069
+ bc1t $fcc0, 131070
+ bc1t $fcc0, 131071
+ bc1t $fcc0, 131072
diff --git a/test/MC/Mips/mips-control-instructions.s b/test/MC/Mips/mips-control-instructions.s
index 4a16c535637a..47da8ccca3c4 100644
--- a/test/MC/Mips/mips-control-instructions.s
+++ b/test/MC/Mips/mips-control-instructions.s
@@ -4,7 +4,7 @@
# RUN: | FileCheck -check-prefix=CHECK64 %s
# CHECK32: break # encoding: [0x00,0x00,0x00,0x0d]
-# CHECK32: break 7, 0 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK32: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK32: break 7, 5 # encoding: [0x00,0x07,0x01,0x4d]
# CHECK32: syscall # encoding: [0x00,0x00,0x00,0x0c]
# CHECK32: syscall 13396 # encoding: [0x00,0x0d,0x15,0x0c]
@@ -37,7 +37,7 @@
# CHECK32: tnei $3, 1023 # encoding: [0x04,0x6e,0x03,0xff]
# CHECK64: break # encoding: [0x00,0x00,0x00,0x0d]
-# CHECK64: break 7, 0 # encoding: [0x00,0x07,0x00,0x0d]
+# CHECK64: break 7 # encoding: [0x00,0x07,0x00,0x0d]
# CHECK64: break 7, 5 # encoding: [0x00,0x07,0x01,0x4d]
# CHECK64: syscall # encoding: [0x00,0x00,0x00,0x0c]
# CHECK64: syscall 13396 # encoding: [0x00,0x0d,0x15,0x0c]
diff --git a/test/MC/Mips/mips-data-directives.s b/test/MC/Mips/mips-data-directives.s
new file mode 100644
index 000000000000..8b3e0b3aa19a
--- /dev/null
+++ b/test/MC/Mips/mips-data-directives.s
@@ -0,0 +1,36 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# CHECK-ASM: .4byte 3735929054
+# CHECK-ASM: .8byte -2401050962867405073
+# CHECK-ASM: .4byte label
+# CHECK-ASM: .8byte label
+
+# Checking if the data and reloations were correctly emitted
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Name: .data (66)
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: DEADC0DE DEADC0DE DEADBEEF 00000000
+# CHECK-OBJ: 0010: 00000000 00000000
+# CHECK-OBJ: )
+# CHECK-OBJ: }
+
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Name: .rel.data (62)
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: 0xC R_MIPS_32 .data 0x0
+# CHECK-OBJ: 0x10 R_MIPS_64 .data 0x0
+# CHECK-OBJ: ]
+# CHECK-OBJ: }
+
+.data
+label:
+ .word 0xdeadc0de
+ .dword 0xdeadc0dedeadbeef
+
+ .word label
+ .dword label
diff --git a/test/MC/Mips/mips-diagnostic-fixup.s b/test/MC/Mips/mips-diagnostic-fixup.s
new file mode 100644
index 000000000000..864d7397271d
--- /dev/null
+++ b/test/MC/Mips/mips-diagnostic-fixup.s
@@ -0,0 +1,10 @@
+# RUN: not llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -arch=mips 2>&1 -filetype=obj | FileCheck %s
+#
+# CHECK: LLVM ERROR: out of range PC16 fixup
+
+.text
+ b foo
+ .space 131072 - 8, 1 # -8 = size of b instr plus size of automatically inserted nop
+ nop # This instr makes the branch too long to fit into a 18-bit offset
+foo:
+ add $0,$0,$0
diff --git a/test/MC/Mips/mips-expansions-bad.s b/test/MC/Mips/mips-expansions-bad.s
new file mode 100644
index 000000000000..a137deb8d172
--- /dev/null
+++ b/test/MC/Mips/mips-expansions-bad.s
@@ -0,0 +1,6 @@
+# RUN: not llvm-mc %s -arch=mips -mcpu=mips32r2 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .text
+ li $5, 0x100000000 # CHECK: :[[@LINE]]:9: error: instruction requires a CPU feature not currently enabled
+ dli $5, 1 # CHECK: :[[@LINE]]:9: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips-expansions.s b/test/MC/Mips/mips-expansions.s
index 1622965a4139..f0a04a5a2515 100644
--- a/test/MC/Mips/mips-expansions.s
+++ b/test/MC/Mips/mips-expansions.s
@@ -8,6 +8,8 @@
# CHECK: addiu $6, $zero, -2345 # encoding: [0xd7,0xf6,0x06,0x24]
# CHECK: lui $7, 1 # encoding: [0x01,0x00,0x07,0x3c]
# CHECK: ori $7, $7, 2 # encoding: [0x02,0x00,0xe7,0x34]
+# CHECK: addiu $8, $zero, -8 # encoding: [0xf8,0xff,0x08,0x24]
+
# CHECK: addiu $4, $zero, 20 # encoding: [0x14,0x00,0x04,0x24]
# CHECK: lui $7, 1 # encoding: [0x01,0x00,0x07,0x3c]
# CHECK: ori $7, $7, 2 # encoding: [0x02,0x00,0xe7,0x34]
@@ -32,17 +34,28 @@
# CHECK: addu $1, $1, $9 # encoding: [0x21,0x08,0x29,0x00]
# CHECK: sw $10, 57920($1) # encoding: [0x40,0xe2,0x2a,0xac]
+# CHECK: lui $1, %hi(symbol)
+# CHECK: ldc1 $f0, %lo(symbol)($1)
+# CHECK: lui $1, %hi(symbol)
+# CHECK: sdc1 $f0, %lo(symbol)($1)
+
li $5,123
li $6,-2345
li $7,65538
+ li $8, ~7
la $a0, 20
la $7,65538
la $a0, 20($a1)
la $7,65538($8)
+ .set noat
lw $t2, symbol($a0)
+ .set at
sw $t2, symbol($t1)
lw $t2, 655483($a0)
sw $t2, 123456($t1)
+
+ ldc1 $f0, symbol
+ sdc1 $f0, symbol
diff --git a/test/MC/Mips/mips-jump-instructions.s b/test/MC/Mips/mips-jump-instructions.s
index 989826a1a2b3..596c8a2f7633 100644
--- a/test/MC/Mips/mips-jump-instructions.s
+++ b/test/MC/Mips/mips-jump-instructions.s
@@ -101,12 +101,16 @@ end_of_code:
# CHECK32: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK32: jr $7 # encoding: [0x08,0x00,0xe0,0x00]
# CHECK32: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK32:lab:
# CHECK32: jr $7 # encoding: [0x08,0x00,0xe0,0x00]
# CHECK32: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK32: jalr $25 # encoding: [0x09,0xf8,0x20,0x03]
# CHECK32: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK32: jalr $4, $25 # encoding: [0x09,0x20,0x20,0x03]
# CHECK32: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK32: jalx lab # encoding: [A,A,A,0b011101AA]
+# CHECK32: # fixup A - offset: 0, value: lab, kind: fixup_Mips_26
+# CHECK32: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK64: j 1328 # encoding: [0x4c,0x01,0x00,0x08]
# CHECK64: nop # encoding: [0x00,0x00,0x00,0x00]
@@ -120,13 +124,16 @@ end_of_code:
# CHECK64: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK64: jr $7 # encoding: [0x08,0x00,0xe0,0x00]
# CHECK64: nop # encoding: [0x00,0x00,0x00,0x00]
+# CHECK64:lab:
# CHECK64: jr $7 # encoding: [0x08,0x00,0xe0,0x00]
# CHECK64: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK64: jalr $25 # encoding: [0x09,0xf8,0x20,0x03]
# CHECK64: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK64: jalr $4, $25 # encoding: [0x09,0x20,0x20,0x03]
# CHECK64: nop # encoding: [0x00,0x00,0x00,0x00]
-
+# CHECK64: jalx lab # encoding: [A,A,A,0b011101AA]
+# CHECK64: # fixup A - offset: 0, value: lab, kind: fixup_Mips_26
+# CHECK64: nop # encoding: [0x00,0x00,0x00,0x00]
j 1328
nop
@@ -140,9 +147,12 @@ end_of_code:
nop
jr $7
nop
+lab:
j $7
nop
jal $25
nop
jal $4,$25
nop
+ jalx lab
+ nop
diff --git a/test/MC/Mips/mips-noat.s b/test/MC/Mips/mips-noat.s
new file mode 100644
index 000000000000..07db251b0506
--- /dev/null
+++ b/test/MC/Mips/mips-noat.s
@@ -0,0 +1,29 @@
+# RUN: not llvm-mc %s -triple=mips-unknown-linux 2>%t0 | FileCheck %s
+# RUN: FileCheck -check-prefix=ERROR %s < %t0
+# Check that using the assembler temporary when .set noat is in effect is an error.
+
+# We start with the assembler temporary enabled
+# CHECK-LABEL: test1:
+# CHECK: lui $1, 1
+# CHECK: addu $1, $1, $2
+# CHECK: lw $2, 0($1)
+test1:
+ lw $2, 65536($2)
+
+test2:
+ .set noat
+ lw $2, 65536($2) # ERROR: mips-noat.s:[[@LINE]]:9: error: Pseudo instruction requires $at, which is not available
+
+
+# Can we switch it back on successfully?
+# CHECK-LABEL: test3:
+# CHECK: lui $1, 1
+# CHECK: addu $1, $1, $2
+# CHECK: lw $2, 0($1)
+test3:
+ .set at
+ lw $2, 65536($2)
+
+test4:
+ .set at=$0
+ lw $2, 65536($2) # ERROR: mips-noat.s:[[@LINE]]:9: error: Pseudo instruction requires $at, which is not available
diff --git a/test/MC/Mips/mips-pc16-fixup.s b/test/MC/Mips/mips-pc16-fixup.s
new file mode 100644
index 000000000000..5443532d6125
--- /dev/null
+++ b/test/MC/Mips/mips-pc16-fixup.s
@@ -0,0 +1,10 @@
+# RUN: llvm-mc %s -triple=mips-unknown-linux -mcpu=mips32r2 -arch=mips 2>&1 -filetype=obj | FileCheck %s
+#
+# CHECK-NOT: LLVM ERROR: out of range PC16 fixup
+
+.text
+ b foo
+ .space 131072 - 8, 1 # -8 = size of b instr plus size of automatically inserted nop
+foo:
+ add $0,$0,$0
+
diff --git a/test/MC/Mips/mips-reginfo-fp32.s b/test/MC/Mips/mips-reginfo-fp32.s
new file mode 100644
index 000000000000..5b31884ec16f
--- /dev/null
+++ b/test/MC/Mips/mips-reginfo-fp32.s
@@ -0,0 +1,34 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -filetype=obj -o - | \
+# RUN: llvm-readobj -s -section-data | \
+# RUN: FileCheck %s
+
+# CHECK: Section {
+# CHECK: Index:
+# CHECK: Name: .reginfo
+# CHECK: Type: SHT_MIPS_REGINFO (0x70000006)
+# CHECK: Flags [ (0x2)
+# CHECK: SHF_ALLOC (0x2)
+# CHECK: ]
+# CHECK: Size: 24
+# CHECK: SectionData (
+# CHECK: 0000: 01010101 00000000 C0007535 00000000
+# CHECK: 0010: 00000000 00000000
+# CHECK: )
+# CHECK: }
+
+.text
+ add $0,$0,$0
+ add $8,$0,$0
+ add $16,$0,$0
+ add $24,$0,$0
+
+# abs.s - Reads and writes from/to $f0.
+ abs.s $f0,$f0
+# round.w.d - Reads $f4 and $f5 and writes to $f2.
+ round.w.d $f2,$f4
+# ceil.w.s - Reads $f8 and writes to $f10.
+ ceil.w.s $f10, $f8
+# cvt.s.d - Reads from $f12 and $f13 and writes to $f14
+ cvt.s.d $f14, $f12
+# abs.d - Reads from $f30 and $f31 and writes to $f30 and $f31.
+ abs.d $f30,$f30
diff --git a/test/MC/Mips/mips-reginfo-fp64.s b/test/MC/Mips/mips-reginfo-fp64.s
new file mode 100644
index 000000000000..b60e54ea405c
--- /dev/null
+++ b/test/MC/Mips/mips-reginfo-fp64.s
@@ -0,0 +1,60 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa,+fp64 -filetype=obj -o - | \
+# RUN: llvm-readobj -s -section-data | \
+# RUN: FileCheck %s -check-prefix=ELF32
+
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64,-n64,+n32 -filetype=obj -o - | \
+# RUN: llvm-readobj -s -section-data | \
+# RUN: FileCheck %s -check-prefix=ELF32
+
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64r2 -mattr=+msa,+fp64,+n64 -filetype=obj -o - | \
+# RUN: llvm-readobj -s -section-data | \
+# RUN: FileCheck %s -check-prefix=ELF64
+
+# ELF32: Section {
+# ELF32: Name: .reginfo
+# ELF32: Type: SHT_MIPS_REGINFO (0x70000006)
+# ELF32: Flags [ (0x2)
+# ELF32: SHF_ALLOC (0x2)
+# ELF32: ]
+# ELF32: Size: 24
+# ELF32: SectionData (
+# ELF32: 0000: 01010101 00000000 4C005515 00000000
+# ELF32: 0010: 00000000 00000000
+# ELF32: )
+# ELF32: }
+
+# ELF64: Section {
+# ELF64: Name: .MIPS.options
+# ELF64: Type: SHT_MIPS_OPTIONS (0x7000000D)
+# ELF64: Flags [ (0x8000002)
+# ELF64: SHF_ALLOC (0x2)
+# ELF64: SHF_MIPS_NOSTRIP (0x8000000)
+# ELF64: ]
+# ELF64: Size: 40
+# ELF64: SectionData (
+# ELF64: 0000: 01280000 00000000 01010101 00000000
+# ELF64: 0010: 00000000 4C005515 00000000 00000000
+# ELF64: 0020: 00000000 00000000
+# ELF64: )
+# ELF64: }
+
+.text
+ add $0,$0,$0
+ add $8,$0,$0
+ add $16,$0,$0
+ add $24,$0,$0
+
+# abs.s - Reads and writes from/to $f0.
+ abs.s $f0,$f0
+# round.w.d - Reads $f4 and writes to $f2.
+ round.w.d $f2,$f4
+# ceil.w.s - Reads $f8 and writes to $f10.
+ ceil.w.s $f10, $f8
+# cvt.s.d - Reads from $f12 and writes to $f14.
+ cvt.s.d $f14, $f12
+# abs.d - Reads from $f30 and writes to $f30.
+ abs.d $f30,$f30
+
+# Read and write from/to $f26 and $f27
+ add_a.b $w26,$w26,$w26
+ add_a.b $w27,$w27,$w27
diff --git a/test/MC/Mips/mips-register-names-invalid.s b/test/MC/Mips/mips-register-names-invalid.s
new file mode 100644
index 000000000000..e6f8416a41eb
--- /dev/null
+++ b/test/MC/Mips/mips-register-names-invalid.s
@@ -0,0 +1,8 @@
+# RUN: not llvm-mc %s -triple=mips-unknown-freebsd -show-encoding 2>%t0
+# RUN: FileCheck %s < %t0
+
+# $32 used to trigger an assertion instead of the usual error message due to
+# an off-by-one bug.
+
+# CHECK: :[[@LINE+1]]:17: error: invalid operand for instruction
+ add $32, $0, $0
diff --git a/test/MC/Mips/mips-register-names-o32.s b/test/MC/Mips/mips-register-names-o32.s
new file mode 100644
index 000000000000..c1e30240389a
--- /dev/null
+++ b/test/MC/Mips/mips-register-names-o32.s
@@ -0,0 +1,40 @@
+# RUN: llvm-mc %s -triple=mips-unknown-freebsd -show-encoding | FileCheck %s
+
+# Check that the register names are mapped to their correct numbers for o32
+# Second byte of addiu with $zero at rt contains the number of the source
+# register.
+
+.set noat
+addiu $zero, $zero, 0 # CHECK: encoding: [0x24,0x00,0x00,0x00]
+addiu $at, $zero, 0 # CHECK: encoding: [0x24,0x01,0x00,0x00]
+addiu $v0, $zero, 0 # CHECK: encoding: [0x24,0x02,0x00,0x00]
+addiu $v1, $zero, 0 # CHECK: encoding: [0x24,0x03,0x00,0x00]
+addiu $a0, $zero, 0 # CHECK: encoding: [0x24,0x04,0x00,0x00]
+addiu $a1, $zero, 0 # CHECK: encoding: [0x24,0x05,0x00,0x00]
+addiu $a2, $zero, 0 # CHECK: encoding: [0x24,0x06,0x00,0x00]
+addiu $a3, $zero, 0 # CHECK: encoding: [0x24,0x07,0x00,0x00]
+addiu $t0, $zero, 0 # CHECK: encoding: [0x24,0x08,0x00,0x00]
+addiu $t1, $zero, 0 # CHECK: encoding: [0x24,0x09,0x00,0x00]
+addiu $t2, $zero, 0 # CHECK: encoding: [0x24,0x0a,0x00,0x00]
+addiu $t3, $zero, 0 # CHECK: encoding: [0x24,0x0b,0x00,0x00]
+addiu $t4, $zero, 0 # CHECK: encoding: [0x24,0x0c,0x00,0x00]
+addiu $t5, $zero, 0 # CHECK: encoding: [0x24,0x0d,0x00,0x00]
+addiu $t6, $zero, 0 # CHECK: encoding: [0x24,0x0e,0x00,0x00]
+addiu $t7, $zero, 0 # CHECK: encoding: [0x24,0x0f,0x00,0x00]
+addiu $s0, $zero, 0 # CHECK: encoding: [0x24,0x10,0x00,0x00]
+addiu $s1, $zero, 0 # CHECK: encoding: [0x24,0x11,0x00,0x00]
+addiu $s2, $zero, 0 # CHECK: encoding: [0x24,0x12,0x00,0x00]
+addiu $s3, $zero, 0 # CHECK: encoding: [0x24,0x13,0x00,0x00]
+addiu $s4, $zero, 0 # CHECK: encoding: [0x24,0x14,0x00,0x00]
+addiu $s5, $zero, 0 # CHECK: encoding: [0x24,0x15,0x00,0x00]
+addiu $s6, $zero, 0 # CHECK: encoding: [0x24,0x16,0x00,0x00]
+addiu $s7, $zero, 0 # CHECK: encoding: [0x24,0x17,0x00,0x00]
+addiu $t8, $zero, 0 # CHECK: encoding: [0x24,0x18,0x00,0x00]
+addiu $t9, $zero, 0 # CHECK: encoding: [0x24,0x19,0x00,0x00]
+addiu $k0, $zero, 0 # CHECK: encoding: [0x24,0x1a,0x00,0x00]
+addiu $k1, $zero, 0 # CHECK: encoding: [0x24,0x1b,0x00,0x00]
+addiu $gp, $zero, 0 # CHECK: encoding: [0x24,0x1c,0x00,0x00]
+addiu $sp, $zero, 0 # CHECK: encoding: [0x24,0x1d,0x00,0x00]
+addiu $fp, $zero, 0 # CHECK: encoding: [0x24,0x1e,0x00,0x00]
+addiu $s8, $zero, 0 # CHECK: encoding: [0x24,0x1e,0x00,0x00]
+addiu $ra, $zero, 0 # CHECK: encoding: [0x24,0x1f,0x00,0x00]
diff --git a/test/MC/Mips/mips-register-names.s b/test/MC/Mips/mips-register-names.s
deleted file mode 100644
index 26187ce58875..000000000000
--- a/test/MC/Mips/mips-register-names.s
+++ /dev/null
@@ -1,71 +0,0 @@
-# RUN: llvm-mc %s -triple=mips-unknown-freebsd -show-encoding | FileCheck %s
-
-# Check that the register names are mapped to their correct numbers for o32
-# Second byte of addiu with $zero at rt contains the number of the source
-# register.
-
-# CHECK: encoding: [0x24,0x00,0x00,0x00]
-# CHECK: encoding: [0x24,0x01,0x00,0x00]
-# CHECK: encoding: [0x24,0x02,0x00,0x00]
-# CHECK: encoding: [0x24,0x03,0x00,0x00]
-# CHECK: encoding: [0x24,0x04,0x00,0x00]
-# CHECK: encoding: [0x24,0x05,0x00,0x00]
-# CHECK: encoding: [0x24,0x06,0x00,0x00]
-# CHECK: encoding: [0x24,0x07,0x00,0x00]
-# CHECK: encoding: [0x24,0x08,0x00,0x00]
-# CHECK: encoding: [0x24,0x09,0x00,0x00]
-# CHECK: encoding: [0x24,0x0a,0x00,0x00]
-# CHECK: encoding: [0x24,0x0b,0x00,0x00]
-# CHECK: encoding: [0x24,0x0c,0x00,0x00]
-# CHECK: encoding: [0x24,0x0d,0x00,0x00]
-# CHECK: encoding: [0x24,0x0e,0x00,0x00]
-# CHECK: encoding: [0x24,0x0f,0x00,0x00]
-# CHECK: encoding: [0x24,0x10,0x00,0x00]
-# CHECK: encoding: [0x24,0x11,0x00,0x00]
-# CHECK: encoding: [0x24,0x12,0x00,0x00]
-# CHECK: encoding: [0x24,0x13,0x00,0x00]
-# CHECK: encoding: [0x24,0x14,0x00,0x00]
-# CHECK: encoding: [0x24,0x15,0x00,0x00]
-# CHECK: encoding: [0x24,0x16,0x00,0x00]
-# CHECK: encoding: [0x24,0x17,0x00,0x00]
-# CHECK: encoding: [0x24,0x18,0x00,0x00]
-# CHECK: encoding: [0x24,0x19,0x00,0x00]
-# CHECK: encoding: [0x24,0x1a,0x00,0x00]
-# CHECK: encoding: [0x24,0x1b,0x00,0x00]
-# CHECK: encoding: [0x24,0x1c,0x00,0x00]
-# CHECK: encoding: [0x24,0x1d,0x00,0x00]
-# CHECK: encoding: [0x24,0x1e,0x00,0x00]
-# CHECK: encoding: [0x24,0x1f,0x00,0x00]
-addiu $zero, $zero, 0
-addiu $at, $zero, 0
-addiu $v0, $zero, 0
-addiu $v1, $zero, 0
-addiu $a0, $zero, 0
-addiu $a1, $zero, 0
-addiu $a2, $zero, 0
-addiu $a3, $zero, 0
-addiu $t0, $zero, 0
-addiu $t1, $zero, 0
-addiu $t2, $zero, 0
-addiu $t3, $zero, 0
-addiu $t4, $zero, 0
-addiu $t5, $zero, 0
-addiu $t6, $zero, 0
-addiu $t7, $zero, 0
-addiu $s0, $zero, 0
-addiu $s1, $zero, 0
-addiu $s2, $zero, 0
-addiu $s3, $zero, 0
-addiu $s4, $zero, 0
-addiu $s5, $zero, 0
-addiu $s6, $zero, 0
-addiu $s7, $zero, 0
-addiu $t8, $zero, 0
-addiu $t9, $zero, 0
-addiu $k0, $zero, 0
-addiu $k1, $zero, 0
-addiu $gp, $zero, 0
-addiu $sp, $zero, 0
-addiu $fp, $zero, 0
-addiu $sp, $zero, 0
-addiu $ra, $zero, 0
diff --git a/test/MC/Mips/mips1/invalid-mips2-wrong-error.s b/test/MC/Mips/mips1/invalid-mips2-wrong-error.s
new file mode 100644
index 000000000000..8e878fef1eab
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips2-wrong-error.s
@@ -0,0 +1,16 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ ldc1 $f11,16391($s0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldc2 $8,-21181($at) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldc3 $29,-28645($s1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ll $v0,-7321($s2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sc $t7,18904($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdc1 $f31,30574($t5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdc2 $20,23157($s2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdc3 $12,5835($t2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/test/MC/Mips/mips1/invalid-mips2.s b/test/MC/Mips/mips1/invalid-mips2.s
new file mode 100644
index 000000000000..7db261d42c98
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips2.s
@@ -0,0 +1,24 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ ceil.w.d $f11,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.w.s $f6,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.w.d $f14,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.w.s $f8,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.w.d $f6,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.w.s $f27,$f28 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sqrt.d $f17,$f22 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sqrt.s $f0,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ teqi $s5,-17504 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgei $s1,5025 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgeiu $sp,-28621 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tlti $t6,-21059 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tltiu $ra,-5076 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tnei $t4,-29647 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.w.d $f22,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.w.s $f28,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sync # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips1/invalid-mips3-wrong-error.s b/test/MC/Mips/mips1/invalid-mips3-wrong-error.s
new file mode 100644
index 000000000000..2016e701b0f9
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips3-wrong-error.s
@@ -0,0 +1,23 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ ld $sp,-28645($s1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldc1 $f11,16391($s0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldc2 $8,-21181($at) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldl $24,-4167($24) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldr $14,-30358($s4) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ll $v0,-7321($s2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lld $zero,-14736($ra) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwu $s3,-24086($v1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sc $15,18904($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ scd $15,-8243($sp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sd $12,5835($10) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdc1 $f31,30574($13) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdc2 $20,23157($s2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdl $a3,-20961($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdr $11,-20423($12) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/test/MC/Mips/mips1/invalid-mips3.s b/test/MC/Mips/mips1/invalid-mips3.s
new file mode 100644
index 000000000000..d1b0eec17a88
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips3.s
@@ -0,0 +1,65 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ dmult $s7,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsub $a3,$s6,$8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.l.d $f1,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.l.s $f18,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.w.d $f11,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.w.s $f6,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.d.l $f4,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.d $f24,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.s $f11,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.s.l $f15,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dadd $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddi $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddiu $k0,$s6,-4586 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddu $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddiv $zero,$k0,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddivu $zero,$s0,$s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmfc1 $12,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmtc1 $s0,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmultu $a1,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,$zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsllv $zero,$s4,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrav $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrlv $s3,$14,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsubu $a1,$a1,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.d $f26,$f7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.s $f12,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.w.d $f14,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.w.s $f8,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.d $f12,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.s $f25,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.w.d $f6,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.w.s $f27,$f28 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sqrt.d $f17,$f22 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sqrt.s $f0,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ teqi $s5,-17504 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgei $s1,5025 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgeiu $sp,-28621 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tlti $14,-21059 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tltiu $ra,-5076 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tnei $12,-29647 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.d $f23,$f23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.s $f28,$f31 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.w.d $f22,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.w.s $f28,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips1/invalid-mips32.s b/test/MC/Mips/mips1/invalid-mips32.s
new file mode 100644
index 000000000000..4ad8d63eb2c8
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips32.s
@@ -0,0 +1,10 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+
+ sync 0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sync 1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips1/invalid-mips4-wrong-error.s b/test/MC/Mips/mips1/invalid-mips4-wrong-error.s
new file mode 100644
index 000000000000..2016e701b0f9
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips4-wrong-error.s
@@ -0,0 +1,23 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ ld $sp,-28645($s1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldc1 $f11,16391($s0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldc2 $8,-21181($at) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldl $24,-4167($24) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldr $14,-30358($s4) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ll $v0,-7321($s2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lld $zero,-14736($ra) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwu $s3,-24086($v1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sc $15,18904($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ scd $15,-8243($sp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sd $12,5835($10) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdc1 $f31,30574($13) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdc2 $20,23157($s2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdl $a3,-20961($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdr $11,-20423($12) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/test/MC/Mips/mips1/invalid-mips4.s b/test/MC/Mips/mips1/invalid-mips4.s
new file mode 100644
index 000000000000..9f246bc16bd0
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips4.s
@@ -0,0 +1,89 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1f $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ bc1t $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ceil.l.d $f1,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.l.s $f18,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.w.d $f11,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.w.s $f6,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.d.l $f4,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.d $f24,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.s $f11,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.s.l $f15,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dadd $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddi $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddiu $k0,$s6,-4586 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddu $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddiv $zero,$k0,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddivu $zero,$s0,$s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmfc1 $12,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmtc1 $s0,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmult $s7,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmultu $a1,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,$zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsllv $zero,$s4,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrav $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrlv $s3,$14,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsub $a3,$s6,$8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsubu $a1,$a1,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ eret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.d $f26,$f7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.s $f12,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.w.d $f14,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.w.s $f8,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ldxc1 $f8,$s7($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.d $f6,$f10,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f10,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.s $f23,$f5,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f26,$f20,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movz $a1,$s6,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.d $f12,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.s $f25,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.w.d $f6,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.w.s $f27,$f28 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$10($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sqrt.d $f17,$f22 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sqrt.s $f0,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$12($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ teqi $s5,-17504 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgei $s1,5025 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgeiu $sp,-28621 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tlti $14,-21059 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tltiu $ra,-5076 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tnei $12,-29647 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.d $f23,$f23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.s $f28,$f31 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.w.d $f22,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.w.s $f28,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips1/invalid-mips5-wrong-error.s b/test/MC/Mips/mips1/invalid-mips5-wrong-error.s
new file mode 100644
index 000000000000..74473a3bf343
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips5-wrong-error.s
@@ -0,0 +1,46 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ abs.ps $f22,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ add.ps $f25,$f27,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ alnv.ps $f12,$f18,$f30,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.eq.ps $fcc5,$f0,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.f.ps $fcc6,$f11,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.le.ps $fcc1,$f7,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.lt.ps $f19,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.nge.ps $f1,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngl.ps $f21,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngle.ps $fcc7,$f12,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngt.ps $fcc5,$f30,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ole.ps $fcc7,$f21,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.olt.ps $fcc3,$f7,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.seq.ps $fcc6,$f31,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.sf.ps $fcc6,$f4,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ueq.ps $fcc1,$f5,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ule.ps $fcc6,$f17,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ult.ps $fcc7,$f14,$f0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.un.ps $fcc4,$f2,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.ps.s $f3,$f18,$f19 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.s.pl $f30,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.s.pu $f14,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ madd.ps $f22,$f3,$f14,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mov.ps $f22,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movf.ps $f10,$f28,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movn.ps $f31,$f31,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movt.ps $f20,$f25,$fcc2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movz.ps $f18,$f17,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ msub.ps $f12,$f14,$f29,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mul.ps $f14,$f0,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ neg.ps $f19,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmadd.ps $f27,$f4,$f9,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmsub.ps $f6,$f12,$f14,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pll.ps $f25,$f9,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ plu.ps $f1,$f26,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pul.ps $f9,$f30,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ puu.ps $f24,$f9,$f2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ sub.ps $f5,$f14,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips1/invalid-mips5.s b/test/MC/Mips/mips1/invalid-mips5.s
new file mode 100644
index 000000000000..af5b278c4098
--- /dev/null
+++ b/test/MC/Mips/mips1/invalid-mips5.s
@@ -0,0 +1,90 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1f $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ bc1t $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ceil.l.d $f1,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.l.s $f18,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.w.d $f11,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.w.s $f6,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.d.l $f4,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.d $f24,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.s $f11,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.s.l $f15,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dadd $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddi $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddiu $k0,$s6,-4586 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddu $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddiv $zero,$k0,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddivu $zero,$s0,$s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmfc1 $t0,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmtc1 $s0,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmultu $a1,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,$zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsllv $zero,$s4,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrav $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrlv $s3,$t2,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsubu $a1,$a1,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ eret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.d $f26,$f7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.s $f12,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.w.d $f14,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.w.s $f8,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ldxc1 $f8,$s7($t3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.d $f6,$f10,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f10,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.s $f23,$f5,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movz $a1,$s6,$a3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$a3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.d $f12,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.s $f25,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.w.d $f6,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.w.s $f27,$f28 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sqrt.d $f17,$f22 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sqrt.s $f0,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$t0($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ teqi $s5,-17504 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgei $s1,5025 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgeiu $sp,-28621 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tlti $t2,-21059 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tltiu $ra,-5076 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tnei $t0,-29647 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.d $f23,$f23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.s $f28,$f31 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.w.d $f22,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.w.s $f28,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$a2($t2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($t1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$t0($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips1/valid-xfail.s b/test/MC/Mips/mips1/valid-xfail.s
new file mode 100644
index 000000000000..7696c9eb4cd2
--- /dev/null
+++ b/test/MC/Mips/mips1/valid-xfail.s
@@ -0,0 +1,11 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ lwc0 c0_entrylo,-7321($s2)
+ swc0 c0_prid,18904($s3)
diff --git a/test/MC/Mips/mips1/valid.s b/test/MC/Mips/mips1/valid.s
new file mode 100644
index 000000000000..66e11ba2fe52
--- /dev/null
+++ b/test/MC/Mips/mips1/valid.s
@@ -0,0 +1,113 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips1 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ cfc1 $s1,$21
+ ctc1 $a2,$26
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.s.d $f26,$f8
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwc3 $10,-32265($k0)
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ mfc1 $a3,$f27
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $s8,$a0
+ move $25,$a2
+ mtc1 $s8,$f9
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ sb $s6,-19857($14)
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swc3 $10,-32265($k0)
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ xor $s2,$a0,$s8
diff --git a/test/MC/Mips/mips2/invalid-mips3-wrong-error.s b/test/MC/Mips/mips2/invalid-mips3-wrong-error.s
new file mode 100644
index 000000000000..3eb4ef3afb94
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips3-wrong-error.s
@@ -0,0 +1,18 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ dmult $s7,$a5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ld $sp,-28645($s1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldl $t8,-4167($t8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldr $t2,-30358($s4) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lld $zero,-14736($ra) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwu $s3,-24086($v1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ scd $t3,-8243($sp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sd $t0,5835($a6) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdl $a3,-20961($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdr $a7,-20423($t0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/test/MC/Mips/mips2/invalid-mips3.s b/test/MC/Mips/mips2/invalid-mips3.s
new file mode 100644
index 000000000000..458c416c0e9e
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips3.s
@@ -0,0 +1,49 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ ceil.l.d $f1,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.l.s $f18,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.d.l $f4,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.d $f24,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.s $f11,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.s.l $f15,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dadd $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddi $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddiu $k0,$s6,-4586 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddu $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddiv $zero,$k0,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddivu $zero,$s0,$s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmfc1 $t0,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmtc1 $s0,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmultu $a1,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,$zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsllv $zero,$s4,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrav $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrlv $s3,$t2,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsub $a3,$s6,$a4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsubu $a1,$a1,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ eret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.d $f26,$f7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.s $f12,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.d $f12,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.s $f25,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.d $f23,$f23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.s $f28,$f31 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips2/invalid-mips32.s b/test/MC/Mips/mips2/invalid-mips32.s
new file mode 100644
index 000000000000..43ea345441c5
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips32.s
@@ -0,0 +1,44 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1f $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ bc1t $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ clo $11,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ clz $sp,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ deret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ eret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ jr.hb $4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ jalr.hb $4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ jalr.hb $4, $5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $s6,$13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $zero,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $s3,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $24,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfc0 $a2,$14,1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.d $f6,$f11,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f11,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.s $f23,$f5,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movz $a1,$s6,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub $s7,$k1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msubu $15,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtc0 $9,$29,3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mul $s0,$s4,$at # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sync 0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sync 1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips2/invalid-mips32r2-xfail.s b/test/MC/Mips/mips2/invalid-mips32r2-xfail.s
new file mode 100644
index 000000000000..073f7777b33f
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips32r2-xfail.s
@@ -0,0 +1,11 @@
+# Instructions that are supposed to be invalid but currently aren't
+# This test will XPASS if any insn stops assembling.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2> %t1
+# RUN: not FileCheck %s < %t1
+# XFAIL: *
+
+# CHECK-NOT: error
+ .set noat
+ rdhwr $sp,$11
diff --git a/test/MC/Mips/mips2/invalid-mips32r2.s b/test/MC/Mips/mips2/invalid-mips32r2.s
new file mode 100644
index 000000000000..72a570aa69ab
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips32r2.s
@@ -0,0 +1,66 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1f $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ bc1t $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ clo $t3,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ clz $sp,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.d $f24,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.s $f11,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ deret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ di $s8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ei $t6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ eret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ldxc1 $f8,$s7($t7) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $s6,$t5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $zero,$t1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd.d $f18,$f19,$f26,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd.s $f1,$f31,$f19,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $s3,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $t8,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfc0 $a2,$14,1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhc1 $s8,$f24 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.d $f6,$f11,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f11,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.s $f23,$f5,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movz $a1,$s6,$t1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$t1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub $s7,$k1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.d $f10,$f1,$f31,$f18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.s $f12,$f19,$f10,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msubu $t7,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtc0 $t1,$29,3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthc1 $zero,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mul $s0,$s4,$at # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.d $f18,$f9,$f14,$f19 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.s $f0,$f5,$f25,$f12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.d $f30,$f8,$f16,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.s $f1,$f24,$f19,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ pause # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotr $1,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotr $1,$14,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotrv $1,$14,$15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$t2($t6) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seb $t9,$t7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seh $v1,$t4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($t5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$t4($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ wsbh $k1,$t1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips2/invalid-mips4-wrong-error.s b/test/MC/Mips/mips2/invalid-mips4-wrong-error.s
new file mode 100644
index 000000000000..193f6d796a13
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips4-wrong-error.s
@@ -0,0 +1,14 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ ld $sp,-28645($s1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwu $s3,-24086($v1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ scd $15,-8243($sp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sd $12,5835($10) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdl $a3,-20961($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdr $11,-20423($12) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/test/MC/Mips/mips2/invalid-mips4.s b/test/MC/Mips/mips2/invalid-mips4.s
new file mode 100644
index 000000000000..13923f01df86
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips4.s
@@ -0,0 +1,72 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1f $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ bc1t $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ceil.l.d $f1,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.l.s $f18,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.d.l $f4,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.d $f24,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.s $f11,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.s.l $f15,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dadd $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddi $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddiu $k0,$s6,-4586 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddu $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddiv $zero,$k0,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddivu $zero,$s0,$s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmfc1 $12,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmtc1 $s0,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmult $s7,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmultu $a1,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,$zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsllv $zero,$s4,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrav $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrlv $s3,$14,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsub $a3,$s6,$8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsubu $a1,$a1,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ eret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.d $f26,$f7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.s $f12,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ldxc1 $f8,$s7($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.d $f6,$f11,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f11,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.s $f23,$f5,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movz $a1,$s6,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.d $f12,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.s $f25,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$10($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.d $f23,$f23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.s $f28,$f31 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips2/invalid-mips5-wrong-error.s b/test/MC/Mips/mips2/invalid-mips5-wrong-error.s
new file mode 100644
index 000000000000..0c58c6c960eb
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips5-wrong-error.s
@@ -0,0 +1,46 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ abs.ps $f22,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ add.ps $f25,$f27,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ alnv.ps $f12,$f18,$f30,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.eq.ps $fcc5,$f0,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.f.ps $fcc6,$f11,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.le.ps $fcc1,$f7,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.lt.ps $f19,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.nge.ps $f1,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngl.ps $f21,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngle.ps $fcc7,$f12,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngt.ps $fcc5,$f30,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ole.ps $fcc7,$f21,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.olt.ps $fcc3,$f7,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.seq.ps $fcc6,$f31,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.sf.ps $fcc6,$f4,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ueq.ps $fcc1,$f5,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ule.ps $fcc6,$f17,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ult.ps $fcc7,$f14,$f0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.un.ps $fcc4,$f2,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.ps.s $f3,$f18,$f19 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.s.pl $f30,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.s.pu $f14,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ madd.ps $f22,$f3,$f14,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mov.ps $f22,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movf.ps $f10,$f28,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movn.ps $f31,$f31,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movt.ps $f20,$f25,$fcc2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movz.ps $f18,$f17,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ msub.ps $f12,$f14,$f29,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mul.ps $f14,$f0,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ neg.ps $f19,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmadd.ps $f27,$f4,$f9,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmsub.ps $f6,$f12,$f14,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pll.ps $f25,$f9,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ plu.ps $f1,$f26,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pul.ps $f9,$f30,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ puu.ps $f24,$f9,$f2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ sub.ps $f5,$f14,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips2/invalid-mips5.s b/test/MC/Mips/mips2/invalid-mips5.s
new file mode 100644
index 000000000000..8f460c7b2732
--- /dev/null
+++ b/test/MC/Mips/mips2/invalid-mips5.s
@@ -0,0 +1,73 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1f $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ bc1t $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ceil.l.d $f1,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ceil.l.s $f18,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.d.l $f4,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.d $f24,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.s $f11,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.s.l $f15,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dadd $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddi $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddiu $k0,$s6,-4586 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddu $s3,$at,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddiv $zero,$k0,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ddivu $zero,$s0,$s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmfc1 $t0,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmtc1 $s0,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmultu $a1,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll $zero,$s4,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsll32 $zero,$zero,18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsllv $zero,$s4,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsra32 $gp,$s2,10 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrav $gp,$s2,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl $s3,$6,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrl32 $s3,$6,23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsrlv $s3,$t2,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsubu $a1,$a1,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ eret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.d $f26,$f7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ floor.l.s $f12,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ldxc1 $f8,$s7($t3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$a0,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$a0,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.d $f6,$f11,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f11,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.s $f23,$f5,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movz $a1,$s6,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.d $f12,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ round.l.s $f25,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.d $f23,$f23 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ trunc.l.s $f28,$f31 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$a2($t2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($t1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$t0($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips2/valid.s b/test/MC/Mips/mips2/valid.s
new file mode 100644
index 000000000000..9c3706ee3ff5
--- /dev/null
+++ b/test/MC/Mips/mips2/valid.s
@@ -0,0 +1,138 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips2 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ ceil.w.d $f11,$f25
+ ceil.w.s $f6,$f20
+ cfc1 $s1,$21
+ ctc1 $a2,$26
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.s.d $f26,$f8
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ floor.w.d $f14,$f11
+ floor.w.s $f8,$f9
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ ldc1 $f11,16391($s0)
+ ldc2 $8,-21181($at) # CHECK: ldc2 $8, -21181($1) # encoding: [0xd8,0x28,0xad,0x43]
+ ldc3 $29,-28645($s1)
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ ll $v0,-7321($s2) # CHECK: ll $2, -7321($18) # encoding: [0xc2,0x42,0xe3,0x67]
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwc3 $10,-32265($k0)
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ mfc1 $a3,$f27
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $s8,$a0
+ move $25,$a2
+ mtc1 $s8,$f9
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ round.w.d $f6,$f4
+ round.w.s $f27,$f28
+ sb $s6,-19857($14)
+ sc $15,18904($s3) # CHECK: sc $15, 18904($19) # encoding: [0xe2,0x6f,0x49,0xd8]
+ sdc1 $f31,30574($13)
+ sdc2 $20,23157($s2) # CHECK: sdc2 $20, 23157($18) # encoding: [0xfa,0x54,0x5a,0x75]
+ sdc3 $12,5835($10)
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sqrt.d $f17,$f22
+ sqrt.s $f0,$f1
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swc3 $10,-32265($k0)
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ teqi $s5,-17504
+ tgei $s1,5025
+ tgeiu $sp,-28621
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ tlti $14,-21059
+ tltiu $ra,-5076
+ tnei $12,-29647
+ trunc.w.d $f22,$f15
+ trunc.w.s $f28,$f30
+ xor $s2,$a0,$s8
diff --git a/test/MC/Mips/mips3/invalid-mips32.s b/test/MC/Mips/mips3/invalid-mips32.s
new file mode 100644
index 000000000000..3acd7651e629
--- /dev/null
+++ b/test/MC/Mips/mips3/invalid-mips32.s
@@ -0,0 +1,10 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips3 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+
+ sync 0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sync 1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips3/invalid-mips4.s b/test/MC/Mips/mips3/invalid-mips4.s
new file mode 100644
index 000000000000..9cd92d39e315
--- /dev/null
+++ b/test/MC/Mips/mips3/invalid-mips4.s
@@ -0,0 +1,30 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips3 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1f $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ bc1t $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldxc1 $f8,$s7($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.d $f6,$f11,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f11,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.s $f23,$f5,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movz $a1,$s6,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$10($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$12($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips3/invalid-mips5-wrong-error.s b/test/MC/Mips/mips3/invalid-mips5-wrong-error.s
new file mode 100644
index 000000000000..2c0246a746b5
--- /dev/null
+++ b/test/MC/Mips/mips3/invalid-mips5-wrong-error.s
@@ -0,0 +1,46 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips3 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ abs.ps $f22,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ add.ps $f25,$f27,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ alnv.ps $f12,$f18,$f30,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.eq.ps $fcc5,$f0,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.f.ps $fcc6,$f11,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.le.ps $fcc1,$f7,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.lt.ps $f19,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.nge.ps $f1,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngl.ps $f21,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngle.ps $fcc7,$f12,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngt.ps $fcc5,$f30,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ole.ps $fcc7,$f21,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.olt.ps $fcc3,$f7,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.seq.ps $fcc6,$f31,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.sf.ps $fcc6,$f4,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ueq.ps $fcc1,$f5,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ule.ps $fcc6,$f17,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ult.ps $fcc7,$f14,$f0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.un.ps $fcc4,$f2,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.ps.s $f3,$f18,$f19 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.s.pl $f30,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.s.pu $f14,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ madd.ps $f22,$f3,$f14,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mov.ps $f22,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movf.ps $f10,$f28,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movn.ps $f31,$f31,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movt.ps $f20,$f25,$fcc2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movz.ps $f18,$f17,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ msub.ps $f12,$f14,$f29,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mul.ps $f14,$f0,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ neg.ps $f19,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmadd.ps $f27,$f4,$f9,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmsub.ps $f6,$f12,$f14,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pll.ps $f25,$f9,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ plu.ps $f1,$f26,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pul.ps $f9,$f30,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ puu.ps $f24,$f9,$f2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ sub.ps $f5,$f14,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips3/invalid-mips5.s b/test/MC/Mips/mips3/invalid-mips5.s
new file mode 100644
index 000000000000..307eee82075c
--- /dev/null
+++ b/test/MC/Mips/mips3/invalid-mips5.s
@@ -0,0 +1,32 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips3 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1f $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ bc1t $fcc1, 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldxc1 $f8,$s7($t3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.d $f6,$f11,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f11,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movf.s $f23,$f5,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ movz $a1,$s6,$a5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$a5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$a6($t2) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($t1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$t0($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips3/valid.s b/test/MC/Mips/mips3/valid.s
new file mode 100644
index 000000000000..cb209fdb208f
--- /dev/null
+++ b/test/MC/Mips/mips3/valid.s
@@ -0,0 +1,197 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips3 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0xbc,0xa1,0x00,0x08]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ ceil.l.d $f1,$f3
+ ceil.l.s $f18,$f13
+ ceil.w.d $f11,$f25
+ ceil.w.s $f6,$f20
+ cfc1 $s1,$21
+ ctc1 $a2,$26
+ cvt.d.l $f4,$f16
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.l.d $f24,$f15
+ cvt.l.s $f11,$f29
+ cvt.s.d $f26,$f8
+ cvt.s.l $f15,$f30
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ dadd $s3,$at,$ra
+ dadd $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ dadd $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddi $sp,$s4,-27705
+ daddi $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ daddi $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddiu $k0,$s6,-4586
+ daddu $s3,$at,$ra
+ ddiv $zero,$k0,$s3
+ ddivu $zero,$s0,$s1
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ dmfc1 $12,$f13
+ dmtc1 $s0,$f14
+ dmult $s7,$9
+ dmultu $a1,$a2
+ dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
+ dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
+ dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsra $gp,10 # CHECK: dsra $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbb]
+ dsra $gp,$s2,10 # CHECK: dsra $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbb]
+ dsra $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsra32 $gp,10 # CHECK: dsra32 $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbf]
+ dsra32 $gp,$s2,10 # CHECK: dsra32 $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbf]
+ dsrav $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
+ dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
+ dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
+ dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
+ dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsub $a3,$s6,$8
+ dsub $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsub $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubi $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsubi $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubu $a1,$a1,$k0
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ eret
+ floor.l.d $f26,$f7
+ floor.l.s $f12,$f5
+ floor.w.d $f14,$f11
+ floor.w.s $f8,$f9
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ ld $sp,-28645($s1)
+ ldc1 $f11,16391($s0)
+ ldc2 $8,-21181($at) # CHECK: ldc2 $8, -21181($1) # encoding: [0xd8,0x28,0xad,0x43]
+ ldl $24,-4167($24)
+ ldr $14,-30358($s4)
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ ll $v0,-7321($s2) # CHECK: ll $2, -7321($18) # encoding: [0xc2,0x42,0xe3,0x67]
+ lld $zero,-14736($ra) # CHECK: lld $zero, -14736($ra) # encoding: [0xd3,0xe0,0xc6,0x70]
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ lwu $s3,-24086($v1)
+ mfc1 $a3,$f27
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $a0,$a3
+ move $s5,$a0
+ move $s8,$a0
+ move $25,$a2
+ mtc1 $s8,$f9
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ round.l.d $f12,$f1
+ round.l.s $f25,$f5
+ round.w.d $f6,$f4
+ round.w.s $f27,$f28
+ sb $s6,-19857($14)
+ sc $15,18904($s3) # CHECK: sc $15, 18904($19) # encoding: [0xe2,0x6f,0x49,0xd8]
+ scd $15,-8243($sp) # CHECK: scd $15, -8243($sp) # encoding: [0xf3,0xaf,0xdf,0xcd]
+ sd $12,5835($10)
+ sdc1 $f31,30574($13)
+ sdc2 $20,23157($s2) # CHECK: sdc2 $20, 23157($18) # encoding: [0xfa,0x54,0x5a,0x75]
+ sdl $a3,-20961($s8)
+ sdr $11,-20423($12)
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sqrt.d $f17,$f22
+ sqrt.s $f0,$f1
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ teqi $s5,-17504
+ tgei $s1,5025
+ tgeiu $sp,-28621
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ tlti $14,-21059
+ tltiu $ra,-5076
+ tnei $12,-29647
+ trunc.l.d $f23,$f23
+ trunc.l.s $f28,$f31
+ trunc.w.d $f22,$f15
+ trunc.w.s $f28,$f30
+ xor $s2,$a0,$s8
diff --git a/test/MC/Mips/mips32/abiflags.s b/test/MC/Mips/mips32/abiflags.s
new file mode 100644
index 000000000000..dd772c0ba5a7
--- /dev/null
+++ b/test/MC/Mips/mips32/abiflags.s
@@ -0,0 +1,36 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# CHECK-ASM: .module fp=32
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags (12)
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: 00002001 01010001 00000000 00000000 |.. .............|
+# CHECK-OBJ: 0010: 00000001 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+ .module fp=32
+
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/mips32/invalid-mips32r2-xfail.s b/test/MC/Mips/mips32/invalid-mips32r2-xfail.s
new file mode 100644
index 000000000000..604ddbf97439
--- /dev/null
+++ b/test/MC/Mips/mips32/invalid-mips32r2-xfail.s
@@ -0,0 +1,11 @@
+# Instructions that are supposed to be invalid but currently aren't
+# This test will XPASS if any insn stops assembling.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32 \
+# RUN: 2> %t1
+# RUN: not FileCheck %s < %t1
+# XFAIL: *
+
+# CHECK-NOT: error
+ .set noat
+ rdhwr $sp,$11
diff --git a/test/MC/Mips/mips32/invalid-mips32r2.s b/test/MC/Mips/mips32/invalid-mips32r2.s
new file mode 100644
index 000000000000..fa6fe326e163
--- /dev/null
+++ b/test/MC/Mips/mips32/invalid-mips32r2.s
@@ -0,0 +1,34 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ cvt.l.d $f24,$f15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ cvt.l.s $f11,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ di $s8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ei $t6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ldxc1 $f8,$s7($t7) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd.d $f18,$f19,$f26,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd.s $f1,$f31,$f19,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhc1 $s8,$f24 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.d $f10,$f1,$f31,$f18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.s $f12,$f19,$f10,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthc1 $zero,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.d $f18,$f9,$f14,$f19 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.s $f0,$f5,$f25,$f12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.d $f30,$f8,$f16,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.s $f1,$f24,$f19,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ pause # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotr $1,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotr $1,$14,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotrv $1,$14,$15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$t2($t6) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seb $t9,$t7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seh $v1,$t4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($t5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$t4($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ wsbh $k1,$t1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips32/invalid-mips64.s b/test/MC/Mips/mips32/invalid-mips64.s
new file mode 100644
index 000000000000..41040edb7db1
--- /dev/null
+++ b/test/MC/Mips/mips32/invalid-mips64.s
@@ -0,0 +1,9 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips32 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ dclo $s2,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclz $s0,$t9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips32/valid-xfail.s b/test/MC/Mips/mips32/valid-xfail.s
new file mode 100644
index 000000000000..d680740babf8
--- /dev/null
+++ b/test/MC/Mips/mips32/valid-xfail.s
@@ -0,0 +1,38 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ c.eq.d $fcc1,$f15,$f15
+ c.eq.s $fcc5,$f24,$f17
+ c.f.d $fcc4,$f11,$f21
+ c.f.s $fcc4,$f30,$f7
+ c.le.d $fcc4,$f18,$f1
+ c.le.s $fcc6,$f24,$f4
+ c.lt.d $fcc3,$f9,$f3
+ c.lt.s $fcc2,$f17,$f14
+ c.nge.d $fcc5,$f21,$f16
+ c.nge.s $fcc3,$f11,$f8
+ c.ngl.s $fcc2,$f31,$f23
+ c.ngle.s $fcc2,$f18,$f23
+ c.ngt.d $fcc4,$f24,$f7
+ c.ngt.s $fcc5,$f8,$f13
+ c.ole.d $fcc2,$f16,$f31
+ c.ole.s $fcc3,$f7,$f20
+ c.olt.d $fcc4,$f19,$f28
+ c.olt.s $fcc6,$f20,$f7
+ c.seq.d $fcc4,$f31,$f7
+ c.seq.s $fcc7,$f1,$f25
+ c.ueq.d $fcc4,$f13,$f25
+ c.ueq.s $fcc6,$f3,$f30
+ c.ule.d $fcc7,$f25,$f18
+ c.ule.s $fcc7,$f21,$f30
+ c.ult.d $fcc6,$f6,$f17
+ c.ult.s $fcc7,$f24,$f10
+ c.un.d $fcc6,$f23,$f24
+ c.un.s $fcc1,$f30,$f4
+ rorv $13,$a3,$s5
diff --git a/test/MC/Mips/mips32/valid.s b/test/MC/Mips/mips32/valid.s
new file mode 100644
index 000000000000..d330905ae29a
--- /dev/null
+++ b/test/MC/Mips/mips32/valid.s
@@ -0,0 +1,166 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f $fcc1, 4 # CHECK: bc1f $fcc1, 4 # encoding: [0x45,0x04,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t $fcc1, 4 # CHECK: bc1t $fcc1, 4 # encoding: [0x45,0x05,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0xbc,0xa1,0x00,0x08]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ ceil.w.d $f11,$f25
+ ceil.w.s $f6,$f20
+ cfc1 $s1,$21
+ clo $11,$a1 # CHECK: clo $11, $5 # encoding: [0x70,0xab,0x58,0x21]
+ clz $sp,$gp # CHECK: clz $sp, $gp # encoding: [0x73,0x9d,0xe8,0x20]
+ ctc1 $a2,$26
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.s.d $f26,$f8
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ deret
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ eret
+ floor.w.d $f14,$f11
+ floor.w.s $f8,$f9
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ ldc1 $f11,16391($s0)
+ ldc2 $8,-21181($at) # CHECK: ldc2 $8, -21181($1) # encoding: [0xd8,0x28,0xad,0x43]
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ ll $v0,-7321($s2) # CHECK: ll $2, -7321($18) # encoding: [0xc2,0x42,0xe3,0x67]
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ madd $s6,$13
+ madd $zero,$9
+ maddu $s3,$gp
+ maddu $24,$s2
+ mfc0 $a2,$14,1
+ mfc1 $a3,$f27
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $s8,$a0
+ move $25,$a2
+ movf $gp,$8,$fcc7
+ movf.d $f6,$f11,$fcc5
+ movf.s $f23,$f5,$fcc6
+ movn $v1,$s1,$s0
+ movn.d $f27,$f21,$k0
+ movn.s $f12,$f0,$s7
+ movt $zero,$s4,$fcc5
+ movt.d $f0,$f2,$fcc0
+ movt.s $f30,$f2,$fcc1
+ movz $a1,$s6,$9
+ movz.d $f12,$f29,$9
+ movz.s $f25,$f7,$v1
+ msub $s7,$k1
+ msubu $15,$a1
+ mtc0 $9,$29,3
+ mtc1 $s8,$f9
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul $s0,$s4,$at
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ pref 1, 8($5) # CHECK: pref 1, 8($5) # encoding: [0xcc,0xa1,0x00,0x08]
+ round.w.d $f6,$f4
+ round.w.s $f27,$f28
+ sb $s6,-19857($14)
+ sc $15,18904($s3) # CHECK: sc $15, 18904($19) # encoding: [0xe2,0x6f,0x49,0xd8]
+ sdbbp # CHECK: sdbbp # encoding: [0x70,0x00,0x00,0x3f]
+ sdbbp 34 # CHECK: sdbbp 34 # encoding: [0x70,0x00,0x08,0xbf]
+ sdc1 $f31,30574($13)
+ sdc2 $20,23157($s2) # CHECK: sdc2 $20, 23157($18) # encoding: [0xfa,0x54,0x5a,0x75]
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sqrt.d $f17,$f22
+ sqrt.s $f0,$f1
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ sync 1 # CHECK: sync 1 # encoding: [0x00,0x00,0x00,0x4f]
+ teqi $s5,-17504
+ tgei $s1,5025
+ tgeiu $sp,-28621
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ tlti $14,-21059
+ tltiu $ra,-5076
+ tnei $12,-29647
+ trunc.w.d $f22,$f15
+ trunc.w.s $f28,$f30
+ xor $s2,$a0,$s8
diff --git a/test/MC/Mips/mips32r2/abiflags.s b/test/MC/Mips/mips32r2/abiflags.s
new file mode 100644
index 000000000000..e3bb15bdddee
--- /dev/null
+++ b/test/MC/Mips/mips32r2/abiflags.s
@@ -0,0 +1,37 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# CHECK-ASM: .module fp=32
+# CHECK-ASM: .set fp=64
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags (12)
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: 00002002 01010001 00000000 00000000 |.. .............|
+# CHECK-OBJ: 0010: 00000001 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+ .module fp=32
+ .set fp=64
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/mips32r2/invalid-mips64r2.s b/test/MC/Mips/mips32r2/invalid-mips64r2.s
new file mode 100644
index 000000000000..293e58eaed35
--- /dev/null
+++ b/test/MC/Mips/mips32r2/invalid-mips64r2.s
@@ -0,0 +1,10 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding \
+# RUN: -mcpu=mips32r2 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ dsbh $v1,$t6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dshd $v0,$sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+
diff --git a/test/MC/Mips/mips32r2/invalid.s b/test/MC/Mips/mips32r2/invalid.s
new file mode 100644
index 000000000000..ebccc43834e0
--- /dev/null
+++ b/test/MC/Mips/mips32r2/invalid.s
@@ -0,0 +1,10 @@
+# Instructions that are valid for the current ISA but should be rejected by the assembler (e.g.
+# invalid set of operands or operand's restrictions not met).
+
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -mcpu=mips32r2 2>%t1
+# RUN: FileCheck %s < %t1 -check-prefix=ASM
+
+ .text
+ .set noreorder
+ jalr.hb $31 # ASM: :[[@LINE]]:9: error: source and destination must be different
+ jalr.hb $31, $31 # ASM: :[[@LINE]]:9: error: source and destination must be different
diff --git a/test/MC/Mips/mips32r2/valid-xfail.s b/test/MC/Mips/mips32r2/valid-xfail.s
new file mode 100644
index 000000000000..ef02d51b53fd
--- /dev/null
+++ b/test/MC/Mips/mips32r2/valid-xfail.s
@@ -0,0 +1,309 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r2 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ abs.ps $f22,$f8
+ absq_s.ph $8,$a0
+ absq_s.qb $15,$s1
+ absq_s.w $s3,$ra
+ add.ps $f25,$f27,$f13
+ addq.ph $s1,$15,$at
+ addq_s.ph $s3,$s6,$s2
+ addq_s.w $a2,$8,$at
+ addqh.ph $s4,$14,$s1
+ addqh.w $s7,$s7,$k1
+ addqh_r.ph $sp,$25,$s8
+ addqh_r.w $8,$v1,$zero
+ addsc $s8,$15,$12
+ addu.ph $a2,$14,$s3
+ addu.qb $s6,$v1,$v1
+ addu_s.ph $a3,$s3,$gp
+ addu_s.qb $s4,$s8,$s1
+ adduh.qb $a1,$a1,$at
+ adduh_r.qb $a0,$9,$12
+ addwc $k0,$s6,$s7
+ alnv.ps $f12,$f18,$f30,$12
+ and.v $w10,$w25,$w29
+ bitrev $14,$at
+ bmnz.v $w15,$w2,$w28
+ bmz.v $w13,$w11,$w21
+ bsel.v $w28,$w7,$w0
+ c.eq.d $fcc1,$f15,$f15
+ c.eq.ps $fcc5,$f0,$f9
+ c.eq.s $fcc5,$f24,$f17
+ c.f.d $fcc4,$f11,$f21
+ c.f.ps $fcc6,$f11,$f11
+ c.f.s $fcc4,$f30,$f7
+ c.le.d $fcc4,$f18,$f1
+ c.le.ps $fcc1,$f7,$f20
+ c.le.s $fcc6,$f24,$f4
+ c.lt.d $fcc3,$f9,$f3
+ c.lt.ps $f19,$f5
+ c.lt.s $fcc2,$f17,$f14
+ c.nge.d $fcc5,$f21,$f16
+ c.nge.ps $f1,$f26
+ c.nge.s $fcc3,$f11,$f8
+ c.ngl.ps $f21,$f30
+ c.ngl.s $fcc2,$f31,$f23
+ c.ngle.ps $fcc7,$f12,$f20
+ c.ngle.s $fcc2,$f18,$f23
+ c.ngt.d $fcc4,$f24,$f7
+ c.ngt.ps $fcc5,$f30,$f6
+ c.ngt.s $fcc5,$f8,$f13
+ c.ole.d $fcc2,$f16,$f31
+ c.ole.ps $fcc7,$f21,$f8
+ c.ole.s $fcc3,$f7,$f20
+ c.olt.d $fcc4,$f19,$f28
+ c.olt.ps $fcc3,$f7,$f16
+ c.olt.s $fcc6,$f20,$f7
+ c.seq.d $fcc4,$f31,$f7
+ c.seq.ps $fcc6,$f31,$f14
+ c.seq.s $fcc7,$f1,$f25
+ c.sf.ps $fcc6,$f4,$f6
+ c.ueq.d $fcc4,$f13,$f25
+ c.ueq.ps $fcc1,$f5,$f29
+ c.ueq.s $fcc6,$f3,$f30
+ c.ule.d $fcc7,$f25,$f18
+ c.ule.ps $fcc6,$f17,$f3
+ c.ule.s $fcc7,$f21,$f30
+ c.ult.d $fcc6,$f6,$f17
+ c.ult.ps $fcc7,$f14,$f0
+ c.ult.s $fcc7,$f24,$f10
+ c.un.d $fcc6,$f23,$f24
+ c.un.ps $fcc4,$f2,$f26
+ c.un.s $fcc1,$f30,$f4
+ ceil.l.d $f1,$f3
+ ceil.l.s $f18,$f13
+ cfcmsa $s6,$19
+ cmp.eq.ph $s7,$14
+ cmp.le.ph $8,$14
+ cmp.lt.ph $k0,$sp
+ cmpgdu.eq.qb $s3,$zero,$k0
+ cmpgdu.le.qb $v1,$15,$s2
+ cmpgdu.lt.qb $s0,$gp,$sp
+ cmpgu.eq.qb $14,$s6,$s8
+ cmpgu.le.qb $9,$a3,$s4
+ cmpgu.lt.qb $sp,$at,$8
+ cmpu.eq.qb $v0,$24
+ cmpu.le.qb $s1,$a1
+ cmpu.lt.qb $at,$a3
+ ctcmsa $31,$s7
+ cvt.d.l $f4,$f16
+ cvt.ps.s $f3,$f18,$f19
+ cvt.s.l $f15,$f30
+ cvt.s.pl $f30,$f1
+ cvt.s.pu $f14,$f25
+ dmt $k0
+ dpa.w.ph $ac1,$s7,$k0
+ dpaq_s.w.ph $ac2,$a0,$13
+ dpaq_sa.l.w $ac0,$a2,$14
+ dpaqx_s.w.ph $ac3,$a0,$24
+ dpaqx_sa.w.ph $ac1,$zero,$s5
+ dpau.h.qbl $ac1,$10,$24
+ dpau.h.qbr $ac1,$s7,$s6
+ dpax.w.ph $ac3,$a0,$k0
+ dps.w.ph $ac1,$a3,$a1
+ dpsq_s.w.ph $ac0,$gp,$k0
+ dpsq_sa.l.w $ac0,$a3,$15
+ dpsqx_s.w.ph $ac3,$13,$a3
+ dpsqx_sa.w.ph $ac3,$sp,$s2
+ dpsu.h.qbl $ac2,$14,$10
+ dpsu.h.qbr $ac2,$a1,$s6
+ dpsx.w.ph $ac0,$s7,$gp
+ dvpe $s6
+ emt $8
+ evpe $v0
+ extpdpv $s6,$ac0,$s8
+ extpv $13,$ac0,$14
+ extrv.w $8,$ac3,$at
+ extrv_r.w $8,$ac1,$s6
+ extrv_rs.w $gp,$ac1,$s6
+ extrv_s.h $s2,$ac1,$14
+ fclass.d $w14,$w27
+ fclass.w $w19,$w28
+ fexupl.d $w10,$w29
+ fexupl.w $w12,$w27
+ fexupr.d $w31,$w15
+ fexupr.w $w29,$w12
+ ffint_s.d $w1,$w30
+ ffint_s.w $w16,$w14
+ ffint_u.d $w23,$w18
+ ffint_u.w $w19,$w12
+ ffql.d $w2,$w3
+ ffql.w $w9,$w0
+ ffqr.d $w25,$w24
+ ffqr.w $w10,$w6
+ fill.b $w9,$v1
+ fill.h $w9,$8
+ fill.w $w31,$15
+ flog2.d $w12,$w16
+ flog2.w $w19,$w23
+ floor.l.d $f26,$f7
+ floor.l.s $f12,$f5
+ fork $s2,$8,$a0
+ frcp.d $w12,$w4
+ frcp.w $w30,$w8
+ frint.d $w20,$w8
+ frint.w $w11,$w29
+ frsqrt.d $w29,$w2
+ frsqrt.w $w9,$w8
+ fsqrt.d $w3,$w1
+ fsqrt.w $w5,$w15
+ ftint_s.d $w31,$w26
+ ftint_s.w $w27,$w14
+ ftint_u.d $w5,$w31
+ ftint_u.w $w12,$w29
+ ftrunc_s.d $w4,$w22
+ ftrunc_s.w $w24,$w7
+ ftrunc_u.d $w20,$w25
+ ftrunc_u.w $w7,$w26
+ insv $s2,$at
+ iret
+ lbe $14,122($9)
+ lbue $11,-108($10)
+ lbux $9,$14($v0)
+ lhe $s6,219($v1)
+ lhue $gp,118($11)
+ lhx $sp,$k0($15)
+ lle $gp,-237($ra)
+ lwe $ra,-145($14)
+ lwle $11,-42($11)
+ lwre $sp,-152($24)
+ lwx $12,$12($s4)
+ madd.ps $f22,$f3,$f14,$f3
+ maq_s.w.phl $ac2,$25,$11
+ maq_s.w.phr $ac0,$10,$25
+ maq_sa.w.phl $ac3,$a1,$v1
+ maq_sa.w.phr $ac1,$at,$10
+ mfgc0 $s6,c0_datahi1
+ mflo $9,$ac2
+ modsub $a3,$12,$a3
+ mov.ps $f22,$f17
+ move.v $w8,$w17
+ movf.ps $f10,$f28,$fcc6
+ movn.ps $f31,$f31,$s3
+ movt.ps $f20,$f25,$fcc2
+ movz.ps $f18,$f17,$ra
+ msub $ac2,$sp,$14
+ msub.ps $f12,$f14,$f29,$f17
+ msubu $ac2,$a1,$24
+ mtc0 $9,c0_datahi1
+ mtgc0 $s4,$21,7
+ mthi $v0,$ac1
+ mthlip $a3,$ac0
+ mul.ph $s4,$24,$s0
+ mul.ps $f14,$f0,$f16
+ mul_s.ph $10,$14,$15
+ muleq_s.w.phl $11,$s4,$s4
+ muleq_s.w.phr $s6,$a0,$s8
+ muleu_s.ph.qbl $a2,$14,$8
+ muleu_s.ph.qbr $a1,$ra,$9
+ mulq_rs.ph $s2,$14,$15
+ mulq_rs.w $at,$s4,$25
+ mulq_s.ph $s0,$k1,$15
+ mulq_s.w $9,$a3,$s0
+ mulsa.w.ph $ac1,$s4,$s6
+ mulsaq_s.w.ph $ac0,$ra,$s2
+ neg.ps $f19,$f13
+ nloc.b $w12,$w30
+ nloc.d $w16,$w7
+ nloc.h $w21,$w17
+ nloc.w $w17,$w16
+ nlzc.b $w12,$w7
+ nlzc.d $w14,$w14
+ nlzc.h $w24,$w24
+ nlzc.w $w10,$w4
+ nmadd.ps $f27,$f4,$f9,$f25
+ nmsub.ps $f6,$f12,$f14,$f17
+ nor.v $w20,$w20,$w15
+ or.v $w13,$w23,$w12
+ packrl.ph $ra,$24,$14
+ pcnt.b $w30,$w15
+ pcnt.d $w5,$w16
+ pcnt.h $w20,$w24
+ pcnt.w $w22,$w20
+ pick.ph $ra,$a2,$gp
+ pick.qb $11,$a0,$gp
+ pll.ps $f25,$f9,$f30
+ plu.ps $f1,$f26,$f29
+ preceq.w.phl $s8,$gp
+ preceq.w.phr $s5,$15
+ precequ.ph.qbl $s7,$ra
+ precequ.ph.qbla $a0,$9
+ precequ.ph.qbr $ra,$s3
+ precequ.ph.qbra $24,$8
+ preceu.ph.qbl $sp,$8
+ preceu.ph.qbla $s6,$11
+ preceu.ph.qbr $gp,$s1
+ preceu.ph.qbra $k1,$s0
+ precr.qb.ph $v0,$12,$s8
+ precrq.ph.w $14,$s8,$24
+ precrq.qb.ph $a2,$12,$12
+ precrq_rs.ph.w $a1,$k0,$a3
+ precrqu_s.qb.ph $zero,$gp,$s5
+ pul.ps $f9,$f30,$f26
+ puu.ps $f24,$f9,$f2
+ raddu.w.qb $25,$s3
+ rdpgpr $s3,$9
+ recip.d $f19,$f6
+ recip.s $f3,$f30
+ repl.ph $at,-307
+ replv.ph $v1,$s7
+ replv.qb $25,$12
+ rorv $13,$a3,$s5
+ round.l.d $f12,$f1
+ round.l.s $f25,$f5
+ rsqrt.d $f3,$f28
+ rsqrt.s $f4,$f8
+ sbe $s7,33($s1)
+ sce $sp,189($10)
+ she $24,105($v0)
+ shilo $ac1,26
+ shilov $ac2,$10
+ shllv.ph $10,$s0,$s0
+ shllv.qb $gp,$v1,$zero
+ shllv_s.ph $k1,$at,$13
+ shllv_s.w $s1,$ra,$k0
+ shrav.ph $25,$s2,$s1
+ shrav.qb $zero,$24,$11
+ shrav_r.ph $s3,$11,$25
+ shrav_r.qb $a0,$sp,$s5
+ shrav_r.w $s7,$s4,$s6
+ shrlv.ph $14,$10,$9
+ shrlv.qb $a2,$s2,$11
+ sub.ps $f5,$f14,$f26
+ subq.ph $ra,$9,$s8
+ subq_s.ph $13,$s8,$s5
+ subq_s.w $k1,$a2,$a3
+ subqh.ph $10,$at,$9
+ subqh.w $v0,$a2,$zero
+ subqh_r.ph $a0,$12,$s6
+ subqh_r.w $10,$a2,$gp
+ subu.ph $9,$s6,$s4
+ subu.qb $s6,$a2,$s6
+ subu_s.ph $v1,$a1,$s3
+ subu_s.qb $s1,$at,$ra
+ subuh.qb $zero,$gp,$gp
+ subuh_r.qb $s4,$s8,$s6
+ swe $24,94($k0)
+ swle $v1,-209($gp)
+ swre $k0,-202($s2)
+ synci 20023($s0)
+ tlbginv
+ tlbginvf
+ tlbgp
+ tlbgr
+ tlbgwi
+ tlbgwr
+ tlbinv
+ tlbinvf
+ trunc.l.d $f23,$f23
+ trunc.l.s $f28,$f31
+ wrpgpr $zero,$13
+ xor.v $w20,$w21,$w30
+ yield $v1,$s0
diff --git a/test/MC/Mips/mips32r2/valid.s b/test/MC/Mips/mips32r2/valid.s
new file mode 100644
index 000000000000..631c691ed711
--- /dev/null
+++ b/test/MC/Mips/mips32r2/valid.s
@@ -0,0 +1,197 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r2 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f $fcc1, 4 # CHECK: bc1f $fcc1, 4 # encoding: [0x45,0x04,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t $fcc1, 4 # CHECK: bc1t $fcc1, 4 # encoding: [0x45,0x05,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0xbc,0xa1,0x00,0x08]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ ceil.w.d $f11,$f25
+ ceil.w.s $f6,$f20
+ cfc1 $s1,$21
+ clo $11,$a1 # CHECK: clo $11, $5 # encoding: [0x70,0xab,0x58,0x21]
+ clz $sp,$gp # CHECK: clz $sp, $gp # encoding: [0x73,0x9d,0xe8,0x20]
+ ctc1 $a2,$26
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.l.d $f24,$f15
+ cvt.l.s $f11,$f29
+ cvt.s.d $f26,$f8
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ deret
+ di $s8
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ ei $14
+ eret
+ floor.w.d $f14,$f11
+ floor.w.s $f8,$f9
+ jr.hb $4 # CHECK: jr.hb $4 # encoding: [0x00,0x80,0x04,0x08]
+ jalr.hb $4 # CHECK: jalr.hb $4 # encoding: [0x00,0x80,0xfc,0x09]
+ jalr.hb $4, $5 # CHECK: jalr.hb $4, $5 # encoding: [0x00,0xa0,0x24,0x09]
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ ldc1 $f11,16391($s0)
+ ldc2 $8,-21181($at) # CHECK: ldc2 $8, -21181($1) # encoding: [0xd8,0x28,0xad,0x43]
+ ldxc1 $f8,$s7($15)
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ ll $v0,-7321($s2) # CHECK: ll $2, -7321($18) # encoding: [0xc2,0x42,0xe3,0x67]
+ luxc1 $f19,$s6($s5)
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ lwxc1 $f12,$s1($s8)
+ madd $s6,$13
+ madd $zero,$9
+ madd.d $f18,$f19,$f26,$f20
+ madd.s $f1,$f31,$f19,$f25
+ maddu $s3,$gp
+ maddu $24,$s2
+ mfc0 $a2,$14,1
+ mfc1 $a3,$f27
+ mfhc1 $s8,$f24
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $s8,$a0
+ move $25,$a2
+ movf $gp,$8,$fcc7
+ movf.d $f6,$f11,$fcc5
+ movf.s $f23,$f5,$fcc6
+ movn $v1,$s1,$s0
+ movn.d $f27,$f21,$k0
+ movn.s $f12,$f0,$s7
+ movt $zero,$s4,$fcc5
+ movt.d $f0,$f2,$fcc0
+ movt.s $f30,$f2,$fcc1
+ movz $a1,$s6,$9
+ movz.d $f12,$f29,$9
+ movz.s $f25,$f7,$v1
+ msub $s7,$k1
+ msub.d $f10,$f1,$f31,$f18
+ msub.s $f12,$f19,$f10,$f16
+ msubu $15,$a1
+ mtc0 $9,$29,3
+ mtc1 $s8,$f9
+ mthc1 $zero,$f16
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul $s0,$s4,$at
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nmadd.d $f18,$f9,$f14,$f19
+ nmadd.s $f0,$f5,$f25,$f12
+ nmsub.d $f30,$f8,$f16,$f30
+ nmsub.s $f1,$f24,$f19,$f4
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ pause # CHECK: pause # encoding: [0x00,0x00,0x01,0x40]
+ pref 1, 8($5) # CHECK: pref 1, 8($5) # encoding: [0xcc,0xa1,0x00,0x08]
+ rdhwr $sp,$11
+ rotr $1,15 # CHECK: rotr $1, $1, 15 # encoding: [0x00,0x21,0x0b,0xc2]
+ rotr $1,$14,15 # CHECK: rotr $1, $14, 15 # encoding: [0x00,0x2e,0x0b,0xc2]
+ rotrv $1,$14,$15 # CHECK: rotrv $1, $14, $15 # encoding: [0x01,0xee,0x08,0x46]
+ round.w.d $f6,$f4
+ round.w.s $f27,$f28
+ sb $s6,-19857($14)
+ sc $15,18904($s3) # CHECK: sc $15, 18904($19) # encoding: [0xe2,0x6f,0x49,0xd8]
+ sdbbp # CHECK: sdbbp # encoding: [0x70,0x00,0x00,0x3f]
+ sdbbp 34 # CHECK: sdbbp 34 # encoding: [0x70,0x00,0x08,0xbf]
+ sdc1 $f31,30574($13)
+ sdc2 $20,23157($s2) # CHECK: sdc2 $20, 23157($18) # encoding: [0xfa,0x54,0x5a,0x75]
+ sdxc1 $f11,$10($14)
+ seb $25,$15
+ seh $v1,$12
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sqrt.d $f17,$f22
+ sqrt.s $f0,$f1
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ suxc1 $f12,$k1($13)
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ swxc1 $f19,$12($k0)
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ sync 1 # CHECK: sync 1 # encoding: [0x00,0x00,0x00,0x4f]
+ teqi $s5,-17504
+ tgei $s1,5025
+ tgeiu $sp,-28621
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ tlti $14,-21059
+ tltiu $ra,-5076
+ tnei $12,-29647
+ trunc.w.d $f22,$f15
+ trunc.w.s $f28,$f30
+ wsbh $k1,$9
+ xor $s2,$a0,$s8
diff --git a/test/MC/Mips/mips32r6/invalid-mips1-wrong-error.s b/test/MC/Mips/mips32r6/invalid-mips1-wrong-error.s
new file mode 100644
index 000000000000..52fa5f52b8db
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips1-wrong-error.s
@@ -0,0 +1,17 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc2f 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2t 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ lwl $s4,-4231($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwr $zero,-19147($gp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ swl $15,13694($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ swr $s1,-26590($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwle $s4,-4231($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ lwre $zero,-19147($gp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ swle $15,13694($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ swre $s1,-26590($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips32r6/invalid-mips1.s b/test/MC/Mips/mips32r6/invalid-mips1.s
new file mode 100644
index 000000000000..44d4fbb86623
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips1.s
@@ -0,0 +1,24 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ addi $13,$9,26322 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ c.ngl.d $f29,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ c.ngle.d $f0,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ c.sf.d $f30,$f0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ c.sf.s $f14,$f22 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mflo $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthi $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$v0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $9,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $gp,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+# div has been re-encoded. See valid.s
+# divu has been re-encoded. See valid.s
diff --git a/test/MC/Mips/mips32r6/invalid-mips2-wrong-error.s b/test/MC/Mips/mips32r6/invalid-mips2-wrong-error.s
new file mode 100644
index 000000000000..b799c8e3fcd7
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips2-wrong-error.s
@@ -0,0 +1,20 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ beql $1,$2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgezall $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgezl $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgtzl $4,16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ blezl $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bltzall $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bltzl $4,16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bnel $1,$2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips32r6/invalid-mips2.s b/test/MC/Mips/mips32r6/invalid-mips2.s
new file mode 100644
index 000000000000..bfa2c4c3ee74
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips2.s
@@ -0,0 +1,26 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ addi $13,$9,26322 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mflo $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthi $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$v0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $9,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $gp,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ teqi $s5,-17504 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgei $s1,5025 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgeiu $sp,-28621 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tlti $14,-21059 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tltiu $ra,-5076 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tnei $12,-29647 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+# div has been re-encoded. See valid.s
+# divu has been re-encoded. See valid.s
diff --git a/test/MC/Mips/mips32r6/invalid-mips32-wrong-error.s b/test/MC/Mips/mips32r6/invalid-mips32-wrong-error.s
new file mode 100644
index 000000000000..e63bdd4e7078
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips32-wrong-error.s
@@ -0,0 +1,20 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1tl $fcc1,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1fl $fcc1,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2f 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2f $fcc0,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2t 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2t $fcc0,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2tl $fcc1,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2fl $fcc1,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips32r6/invalid-mips32.s b/test/MC/Mips/mips32r6/invalid-mips32.s
new file mode 100644
index 000000000000..e0889ea07bba
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips32.s
@@ -0,0 +1,25 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ madd $s6,$13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $zero,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $s3,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $24,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f11,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz $a1,$s6,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub $s7,$k1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msubu $15,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips32r6/invalid-mips32r2.s b/test/MC/Mips/mips32r6/invalid-mips32r2.s
new file mode 100644
index 000000000000..25694e330486
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips32r2.s
@@ -0,0 +1,15 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ madd.d $f18,$f19,$f26,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd.s $f1,$f31,$f19,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.d $f10,$f1,$f31,$f18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.s $f12,$f19,$f10,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.d $f18,$f9,$f14,$f19 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.s $f0,$f5,$f25,$f12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.d $f30,$f8,$f16,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.s $f1,$f24,$f19,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips32r6/invalid-mips4-wrong-error.s b/test/MC/Mips/mips32r6/invalid-mips4-wrong-error.s
new file mode 100644
index 000000000000..f3131a947367
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips4-wrong-error.s
@@ -0,0 +1,21 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ beql $1,$2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgezall $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgezl $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgtzl $4,16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ blezl $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bltzall $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bltzl $4,16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bnel $1,$2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ prefx 0,$2($31) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips32r6/invalid-mips4.s b/test/MC/Mips/mips32r6/invalid-mips4.s
new file mode 100644
index 000000000000..8ba2ed88ad6e
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips4.s
@@ -0,0 +1,11 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ ldxc1 $f8,$s7($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$10($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$12($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips32r6/invalid-mips5-wrong-error.s b/test/MC/Mips/mips32r6/invalid-mips5-wrong-error.s
new file mode 100644
index 000000000000..99d10c327a4b
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips5-wrong-error.s
@@ -0,0 +1,11 @@
+# Instructions that are invalid but currently emit the wrong error message.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1any2f $fcc2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1any2t $fcc2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1any4f $fcc2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1any4t $fcc2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips32r6/invalid-mips5.s b/test/MC/Mips/mips32r6/invalid-mips5.s
new file mode 100644
index 000000000000..63f1ccaef8f3
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid-mips5.s
@@ -0,0 +1,9 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($13) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips32r6/invalid.s b/test/MC/Mips/mips32r6/invalid.s
new file mode 100644
index 000000000000..82cb5ab49430
--- /dev/null
+++ b/test/MC/Mips/mips32r6/invalid.s
@@ -0,0 +1,14 @@
+# Instructions that are available for the current ISA but should be rejected by
+# the assembler (e.g. invalid set of operands or operand's restrictions not met).
+
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -mcpu=mips32r6 2>%t1
+# RUN: FileCheck %s < %t1 -check-prefix=ASM
+
+ .text
+ .set noreorder
+ .set noat
+ jalr.hb $31 # ASM: :[[@LINE]]:9: error: source and destination must be different
+ jalr.hb $31, $31 # ASM: :[[@LINE]]:9: error: source and destination must be different
+ ldc2 $8,-21181($at) # ASM: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdc2 $20,23157($s2) # ASM: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swc2 $25,24880($s0) # ASM: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips32r6/relocations.s b/test/MC/Mips/mips32r6/relocations.s
new file mode 100644
index 000000000000..13b3387e5910
--- /dev/null
+++ b/test/MC/Mips/mips32r6/relocations.s
@@ -0,0 +1,70 @@
+# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: | FileCheck %s -check-prefix=CHECK-FIXUP
+# RUN: llvm-mc %s -filetype=obj -triple=mips-unknown-linux -mcpu=mips32r6 \
+# RUN: | llvm-readobj -r | FileCheck %s -check-prefix=CHECK-ELF
+#------------------------------------------------------------------------------
+# Check that the assembler can handle the documented syntax for fixups.
+#------------------------------------------------------------------------------
+# CHECK-FIXUP: addiupc $2, bar # encoding: [0xec,0b01000AAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC19_S2
+# CHECK-FIXUP: beqc $5, $6, bar # encoding: [0x20,0xa6,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_Mips_PC16
+# CHECK-FIXUP: bnec $5, $6, bar # encoding: [0x60,0xa6,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_Mips_PC16
+# CHECK-FIXUP: beqzc $9, bar # encoding: [0xd9,0b001AAAAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC21_S2
+# CHECK-FIXUP: bnezc $9, bar # encoding: [0xf9,0b001AAAAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC21_S2
+# CHECK-FIXUP: balc bar # encoding: [0b111010AA,A,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC26_S2
+# CHECK-FIXUP: bc bar # encoding: [0b110010AA,A,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC26_S2
+# CHECK-FIXUP: aluipc $2, %pcrel_hi(bar) # encoding: [0xec,0x5f,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar@PCREL_HI16,
+# CHECK-FIXUP: kind: fixup_MIPS_PCHI16
+# CHECK-FIXUP: addiu $2, $2, %pcrel_lo(bar) # encoding: [0x24,0x42,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar@PCREL_LO16,
+# CHECK-FIXUP: kind: fixup_MIPS_PCLO16
+# CHECK-FIXUP: lwpc $2, bar # encoding: [0xec,0b01001AAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC19_S2
+# CHECK-FIXUP: lwupc $2, bar # encoding: [0xec,0b01010AAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC19_S2
+#------------------------------------------------------------------------------
+# Check that the appropriate relocations were created.
+#------------------------------------------------------------------------------
+# CHECK-ELF: Relocations [
+# CHECK-ELF: 0x0 R_MIPS_PC19_S2 bar 0x0
+# CHECK-ELF: 0x4 R_MIPS_PC16 bar 0x0
+# CHECK-ELF: 0x8 R_MIPS_PC16 bar 0x0
+# CHECK-ELF: 0xC R_MIPS_PC21_S2 bar 0x0
+# CHECK-ELF: 0x10 R_MIPS_PC21_S2 bar 0x0
+# CHECK-ELF: 0x14 R_MIPS_PC26_S2 bar 0x0
+# CHECK-ELF: 0x18 R_MIPS_PC26_S2 bar 0x0
+# CHECK-ELF: 0x1C R_MIPS_PCHI16 bar 0x0
+# CHECK-ELF: 0x20 R_MIPS_PCLO16 bar 0x0
+# CHECK-ELF: 0x24 R_MIPS_PC19_S2 bar 0x0
+# CHECK-ELF: 0x28 R_MIPS_PC19_S2 bar 0x0
+# CHECK-ELF: ]
+
+ addiupc $2,bar
+ beqc $5, $6, bar
+ bnec $5, $6, bar
+ beqzc $9, bar
+ bnezc $9, bar
+ balc bar
+ bc bar
+ aluipc $2, %pcrel_hi(bar)
+ addiu $2, $2, %pcrel_lo(bar)
+ lwpc $2,bar
+ lwupc $2,bar
diff --git a/test/MC/Mips/mips32r6/valid-xfail.s b/test/MC/Mips/mips32r6/valid-xfail.s
new file mode 100644
index 000000000000..0c911d71f47e
--- /dev/null
+++ b/test/MC/Mips/mips32r6/valid-xfail.s
@@ -0,0 +1,19 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ bovc $0, $2, 4 # TODO: bovc $0, $2, 4 # encoding: [0x20,0x40,0x00,0x01]
+ bovc $2, $4, 4 # TODO: bovc $2, $4, 4 # encoding: [0x20,0x82,0x00,0x01]
+ bnvc $0, $2, 4 # TODO: bnvc $0, $2, 4 # encoding: [0x60,0x40,0x00,0x01]
+ bnvc $2, $4, 4 # TODO: bnvc $2, $4, 4 # encoding: [0x60,0x82,0x00,0x01]
+ beqc $0, $6, 256 # TODO: beqc $6, $zero, 256 # encoding: [0x20,0xc0,0x00,0x40]
+ beqc $5, $0, 256 # TODO: beqc $5, $zero, 256 # encoding: [0x20,0xa0,0x00,0x40]
+ beqc $6, $5, 256 # TODO: beqc $5, $6, 256 # encoding: [0x20,0xa6,0x00,0x40]
+ bnec $0, $6, 256 # TODO: bnec $6, $zero, 256 # encoding: [0x60,0xc0,0x00,0x40]
+ bnec $5, $0, 256 # TODO: bnec $5, $zero, 256 # encoding: [0x60,0xa0,0x00,0x40]
+ bnec $6, $5, 256 # TODO: bnec $5, $6, 256 # encoding: [0x60,0xa6,0x00,0x40]
diff --git a/test/MC/Mips/mips32r6/valid.s b/test/MC/Mips/mips32r6/valid.s
new file mode 100644
index 000000000000..f23dbd7302f9
--- /dev/null
+++ b/test/MC/Mips/mips32r6/valid.s
@@ -0,0 +1,154 @@
+# Instructions that are valid
+#
+# Branches have some unusual encoding rules in MIPS32r6 so we need to test:
+# rs == 0
+# rs != 0
+# rt == 0
+# rt != 0
+# rs < rt
+# rs == rt
+# rs > rt
+# appropriately for each branch instruction
+#
+# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 2> %t0 | FileCheck %s
+# RUN: FileCheck %s -check-prefix=WARNING < %t0
+
+ .set noat
+ # FIXME: Add the instructions carried forward from older ISA's
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ addiupc $4, 100 # CHECK: addiupc $4, 100 # encoding: [0xec,0x80,0x00,0x19]
+ align $4, $2, $3, 2 # CHECK: align $4, $2, $3, 2 # encoding: [0x7c,0x43,0x22,0xa0]
+ aluipc $3, 56 # CHECK: aluipc $3, 56 # encoding: [0xec,0x7f,0x00,0x38]
+ aui $3,$2,-23 # CHECK: aui $3, $2, -23 # encoding: [0x3c,0x62,0xff,0xe9]
+ auipc $3, -1 # CHECK: auipc $3, -1 # encoding: [0xec,0x7e,0xff,0xff]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ balc 14572256 # CHECK: balc 14572256 # encoding: [0xe8,0x37,0x96,0xb8]
+ bc 14572256 # CHECK: bc 14572256 # encoding: [0xc8,0x37,0x96,0xb8]
+ bc1eqz $f0,4 # CHECK: bc1eqz $f0, 4 # encoding: [0x45,0x20,0x00,0x01]
+ bc1eqz $f31,4 # CHECK: bc1eqz $f31, 4 # encoding: [0x45,0x3f,0x00,0x01]
+ bc1nez $f0,4 # CHECK: bc1nez $f0, 4 # encoding: [0x45,0xa0,0x00,0x01]
+ bc1nez $f31,4 # CHECK: bc1nez $f31, 4 # encoding: [0x45,0xbf,0x00,0x01]
+ bc2eqz $0,8 # CHECK: bc2eqz $0, 8 # encoding: [0x49,0x20,0x00,0x02]
+ bc2eqz $31,8 # CHECK: bc2eqz $31, 8 # encoding: [0x49,0x3f,0x00,0x02]
+ bc2nez $0,8 # CHECK: bc2nez $0, 8 # encoding: [0x49,0xa0,0x00,0x02]
+ bc2nez $31,8 # CHECK: bc2nez $31, 8 # encoding: [0x49,0xbf,0x00,0x02]
+ # beqc requires rs < rt && rs != 0 but we also accept when this is not true. See also bovc
+ # FIXME: Testcases are in valid-xfail.s at the moment
+ beqc $5, $6, 256 # CHECK: beqc $5, $6, 256 # encoding: [0x20,0xa6,0x00,0x40]
+ beqzalc $2, 1332 # CHECK: beqzalc $2, 1332 # encoding: [0x20,0x02,0x01,0x4d]
+ # bnec requires rs < rt && rs != 0 but we accept when this is not true. See also bnvc
+ # FIXME: Testcases are in valid-xfail.s at the moment
+ bnec $5, $6, 256 # CHECK: bnec $5, $6, 256 # encoding: [0x60,0xa6,0x00,0x40]
+ bnezalc $2, 1332 # CHECK: bnezalc $2, 1332 # encoding: [0x60,0x02,0x01,0x4d]
+ beqzc $5, 72256 # CHECK: beqzc $5, 72256 # encoding: [0xd8,0xa0,0x46,0x90]
+ bgec $2, $3, 256 # CHECK: bgec $2, $3, 256 # encoding: [0x58,0x43,0x00,0x40]
+ bgeuc $2, $3, 256 # CHECK: bgeuc $2, $3, 256 # encoding: [0x18,0x43,0x00,0x40]
+ bgezalc $2, 1332 # CHECK: bgezalc $2, 1332 # encoding: [0x18,0x42,0x01,0x4d]
+ bnezc $5, 72256 # CHECK: bnezc $5, 72256 # encoding: [0xf8,0xa0,0x46,0x90]
+ bltzc $5, 256 # CHECK: bltzc $5, 256 # encoding: [0x5c,0xa5,0x00,0x40]
+ bgezc $5, 256 # CHECK: bgezc $5, 256 # encoding: [0x58,0xa5,0x00,0x40]
+ bgtzalc $2, 1332 # CHECK: bgtzalc $2, 1332 # encoding: [0x1c,0x02,0x01,0x4d]
+ blezc $5, 256 # CHECK: blezc $5, 256 # encoding: [0x58,0x05,0x00,0x40]
+ bltzalc $2, 1332 # CHECK: bltzalc $2, 1332 # encoding: [0x1c,0x42,0x01,0x4d]
+ bgtzc $5, 256 # CHECK: bgtzc $5, 256 # encoding: [0x5c,0x05,0x00,0x40]
+ bitswap $4, $2 # CHECK: bitswap $4, $2 # encoding: [0x7c,0x02,0x20,0x20]
+ blezalc $2, 1332 # CHECK: blezalc $2, 1332 # encoding: [0x18,0x02,0x01,0x4d]
+ bltc $5, $6, 256 # CHECK: bltc $5, $6, 256 # encoding: [0x5c,0xa6,0x00,0x40]
+ bltuc $5, $6, 256 # CHECK: bltuc $5, $6, 256 # encoding: [0x1c,0xa6,0x00,0x40]
+ # bnvc requires that rs >= rt but we accept both. See also bnec
+ bnvc $0, $0, 4 # CHECK: bnvc $zero, $zero, 4 # encoding: [0x60,0x00,0x00,0x01]
+ bnvc $2, $0, 4 # CHECK: bnvc $2, $zero, 4 # encoding: [0x60,0x40,0x00,0x01]
+ bnvc $4, $2, 4 # CHECK: bnvc $4, $2, 4 # encoding: [0x60,0x82,0x00,0x01]
+ # bovc requires that rs >= rt but we accept both. See also beqc
+ bovc $0, $0, 4 # CHECK: bovc $zero, $zero, 4 # encoding: [0x20,0x00,0x00,0x01]
+ bovc $2, $0, 4 # CHECK: bovc $2, $zero, 4 # encoding: [0x20,0x40,0x00,0x01]
+ bovc $4, $2, 4 # CHECK: bovc $4, $2, 4 # encoding: [0x20,0x82,0x00,0x01]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0x7c,0xa1,0x04,0x25]
+ cmp.af.s $f2,$f3,$f4 # CHECK: cmp.af.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x80]
+ cmp.af.d $f2,$f3,$f4 # CHECK: cmp.af.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x80]
+ cmp.un.s $f2,$f3,$f4 # CHECK: cmp.un.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x81]
+ cmp.un.d $f2,$f3,$f4 # CHECK: cmp.un.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x81]
+ cmp.eq.s $f2,$f3,$f4 # CHECK: cmp.eq.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x82]
+ cmp.eq.d $f2,$f3,$f4 # CHECK: cmp.eq.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x82]
+ cmp.ueq.s $f2,$f3,$f4 # CHECK: cmp.ueq.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x83]
+ cmp.ueq.d $f2,$f3,$f4 # CHECK: cmp.ueq.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x83]
+ cmp.lt.s $f2,$f3,$f4 # CHECK: cmp.lt.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x84]
+ cmp.lt.d $f2,$f3,$f4 # CHECK: cmp.lt.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x84]
+ cmp.ult.s $f2,$f3,$f4 # CHECK: cmp.ult.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x85]
+ cmp.ult.d $f2,$f3,$f4 # CHECK: cmp.ult.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x85]
+ cmp.le.s $f2,$f3,$f4 # CHECK: cmp.le.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x86]
+ cmp.le.d $f2,$f3,$f4 # CHECK: cmp.le.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x86]
+ cmp.ule.s $f2,$f3,$f4 # CHECK: cmp.ule.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x87]
+ cmp.ule.d $f2,$f3,$f4 # CHECK: cmp.ule.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x87]
+ cmp.saf.s $f2,$f3,$f4 # CHECK: cmp.saf.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x88]
+ cmp.saf.d $f2,$f3,$f4 # CHECK: cmp.saf.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x88]
+ cmp.sun.s $f2,$f3,$f4 # CHECK: cmp.sun.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x89]
+ cmp.sun.d $f2,$f3,$f4 # CHECK: cmp.sun.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x89]
+ cmp.seq.s $f2,$f3,$f4 # CHECK: cmp.seq.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8a]
+ cmp.seq.d $f2,$f3,$f4 # CHECK: cmp.seq.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8a]
+ cmp.sueq.s $f2,$f3,$f4 # CHECK: cmp.sueq.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8b]
+ cmp.sueq.d $f2,$f3,$f4 # CHECK: cmp.sueq.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8b]
+ cmp.slt.s $f2,$f3,$f4 # CHECK: cmp.slt.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8c]
+ cmp.slt.d $f2,$f3,$f4 # CHECK: cmp.slt.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8c]
+ cmp.sult.s $f2,$f3,$f4 # CHECK: cmp.sult.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8d]
+ cmp.sult.d $f2,$f3,$f4 # CHECK: cmp.sult.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8d]
+ cmp.sle.s $f2,$f3,$f4 # CHECK: cmp.sle.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8e]
+ cmp.sle.d $f2,$f3,$f4 # CHECK: cmp.sle.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8e]
+ cmp.sule.s $f2,$f3,$f4 # CHECK: cmp.sule.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8f]
+ cmp.sule.d $f2,$f3,$f4 # CHECK: cmp.sule.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8f]
+ div $2,$3,$4 # CHECK: div $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9a]
+ divu $2,$3,$4 # CHECK: divu $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9b]
+ jialc $5, 256 # CHECK: jialc $5, 256 # encoding: [0xf8,0x05,0x01,0x00]
+ jic $5, 256 # CHECK: jic $5, 256 # encoding: [0xd8,0x05,0x01,0x00]
+ lsa $2, $3, $4, 3 # CHECK: lsa $2, $3, $4, 3 # encoding: [0x00,0x64,0x10,0xc5]
+ lwpc $2,268 # CHECK: lwpc $2, 268 # encoding: [0xec,0x48,0x00,0x43]
+ lwupc $2,268 # CHECK: lwupc $2, 268 # encoding: [0xec,0x50,0x00,0x43]
+ mod $2,$3,$4 # CHECK: mod $2, $3, $4 # encoding: [0x00,0x64,0x10,0xda]
+ modu $2,$3,$4 # CHECK: modu $2, $3, $4 # encoding: [0x00,0x64,0x10,0xdb]
+ mul $2,$3,$4 # CHECK: mul $2, $3, $4 # encoding: [0x00,0x64,0x10,0x98]
+ muh $2,$3,$4 # CHECK: muh $2, $3, $4 # encoding: [0x00,0x64,0x10,0xd8]
+ mulu $2,$3,$4 # CHECK: mulu $2, $3, $4 # encoding: [0x00,0x64,0x10,0x99]
+ muhu $2,$3,$4 # CHECK: muhu $2, $3, $4 # encoding: [0x00,0x64,0x10,0xd9]
+ maddf.s $f2,$f3,$f4 # CHECK: maddf.s $f2, $f3, $f4 # encoding: [0x46,0x04,0x18,0x98]
+ maddf.d $f2,$f3,$f4 # CHECK: maddf.d $f2, $f3, $f4 # encoding: [0x46,0x24,0x18,0x98]
+ msubf.s $f2,$f3,$f4 # CHECK: msubf.s $f2, $f3, $f4 # encoding: [0x46,0x04,0x18,0x99]
+ msubf.d $f2,$f3,$f4 # CHECK: msubf.d $f2, $f3, $f4 # encoding: [0x46,0x24,0x18,0x99]
+ pref 1, 8($5) # CHECK: pref 1, 8($5) # encoding: [0x7c,0xa1,0x04,0x35]
+ sel.d $f0,$f1,$f2 # CHECK: sel.d $f0, $f1, $f2 # encoding: [0x46,0x22,0x08,0x10]
+ sel.s $f0,$f1,$f2 # CHECK: sel.s $f0, $f1, $f2 # encoding: [0x46,0x02,0x08,0x10]
+ seleqz $2,$3,$4 # CHECK: seleqz $2, $3, $4 # encoding: [0x00,0x64,0x10,0x35]
+ selnez $2,$3,$4 # CHECK: selnez $2, $3, $4 # encoding: [0x00,0x64,0x10,0x37]
+ max.s $f0, $f2, $f4 # CHECK: max.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x1d]
+ max.d $f0, $f2, $f4 # CHECK: max.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x1d]
+ min.s $f0, $f2, $f4 # CHECK: min.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x1c]
+ min.d $f0, $f2, $f4 # CHECK: min.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x1c]
+ maxa.s $f0, $f2, $f4 # CHECK: maxa.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x1f]
+ maxa.d $f0, $f2, $f4 # CHECK: maxa.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x1f]
+ mina.s $f0, $f2, $f4 # CHECK: mina.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x1e]
+ mina.d $f0, $f2, $f4 # CHECK: mina.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x1e]
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ seleqz.s $f0, $f2, $f4 # CHECK: seleqz.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x14]
+ seleqz.d $f0, $f2, $f4 # CHECK: seleqz.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x14]
+ selnez.s $f0, $f2, $f4 # CHECK: selnez.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x17]
+ selnez.d $f0, $f2, $f4 # CHECK: selnez.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x17]
+ rint.s $f2, $f4 # CHECK: rint.s $f2, $f4 # encoding: [0x46,0x00,0x20,0x9a]
+ rint.d $f2, $f4 # CHECK: rint.d $f2, $f4 # encoding: [0x46,0x20,0x20,0x9a]
+ class.s $f2, $f4 # CHECK: class.s $f2, $f4 # encoding: [0x46,0x00,0x20,0x9b]
+ class.d $f2, $f4 # CHECK: class.d $f2, $f4 # encoding: [0x46,0x20,0x20,0x9b]
+ jr.hb $4 # CHECK: jr.hb $4 # encoding: [0x00,0x80,0x04,0x09]
+ jalr.hb $4 # CHECK: jalr.hb $4 # encoding: [0x00,0x80,0xfc,0x09]
+ jalr.hb $4, $5 # CHECK: jalr.hb $4, $5 # encoding: [0x00,0xa0,0x24,0x09]
+ ldc2 $8, -701($at) # CHECK: ldc2 $8, -701($1) # encoding: [0x49,0xc8,0x0d,0x43]
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0x49,0x52,0x34,0xb7]
+ sdc2 $20,629($s2) # CHECK: sdc2 $20, 629($18) # encoding: [0x49,0xf4,0x92,0x75]
+ swc2 $25,304($s0) # CHECK: swc2 $25, 304($16) # encoding: [0x49,0x79,0x81,0x30]
+ ll $v0,-153($s2) # CHECK: ll $2, -153($18) # encoding: [0x7e,0x42,0xb3,0xb6]
+ sc $15,-40($s3) # CHECK: sc $15, -40($19) # encoding: [0x7e,0x6f,0xec,0x26]
+ clo $11,$a1 # CHECK: clo $11, $5 # encoding: [0x00,0xa0,0x58,0x51]
+ clz $sp,$gp # CHECK: clz $sp, $gp # encoding: [0x03,0x80,0xe8,0x50]
+ ssnop # WARNING: [[@LINE]]:9: warning: ssnop is deprecated for MIPS32r6 and is equivalent to a nop instruction
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sdbbp # CHECK: sdbbp # encoding: [0x00,0x00,0x00,0x0e]
+ sdbbp 34 # CHECK: sdbbp 34 # encoding: [0x00,0x00,0x08,0x8e]
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ sync 1 # CHECK: sync 1 # encoding: [0x00,0x00,0x00,0x4f]
diff --git a/test/MC/Mips/mips4/invalid-mips32.s b/test/MC/Mips/mips4/invalid-mips32.s
new file mode 100644
index 000000000000..52dea02d10c6
--- /dev/null
+++ b/test/MC/Mips/mips4/invalid-mips32.s
@@ -0,0 +1,10 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips4 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+
+ sync 0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sync 1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips4/invalid-mips5-wrong-error.s b/test/MC/Mips/mips4/invalid-mips5-wrong-error.s
new file mode 100644
index 000000000000..c6c8968d2554
--- /dev/null
+++ b/test/MC/Mips/mips4/invalid-mips5-wrong-error.s
@@ -0,0 +1,46 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips4 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ abs.ps $f22,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ add.ps $f25,$f27,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ alnv.ps $f12,$f18,$f30,$t0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.eq.ps $fcc5,$f0,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.f.ps $fcc6,$f11,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.le.ps $fcc1,$f7,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.lt.ps $f19,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.nge.ps $f1,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngl.ps $f21,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngle.ps $fcc7,$f12,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngt.ps $fcc5,$f30,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ole.ps $fcc7,$f21,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.olt.ps $fcc3,$f7,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.seq.ps $fcc6,$f31,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.sf.ps $fcc6,$f4,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ueq.ps $fcc1,$f5,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ule.ps $fcc6,$f17,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ult.ps $fcc7,$f14,$f0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.un.ps $fcc4,$f2,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.ps.s $f3,$f18,$f19 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.s.pl $f30,$f1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.s.pu $f14,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ madd.ps $f22,$f3,$f14,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mov.ps $f22,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movf.ps $f10,$f28,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movn.ps $f31,$f31,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movt.ps $f20,$f25,$fcc2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movz.ps $f18,$f17,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ msub.ps $f12,$f14,$f29,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mul.ps $f14,$f0,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ neg.ps $f19,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmadd.ps $f27,$f4,$f9,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmsub.ps $f6,$f12,$f14,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pll.ps $f25,$f9,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ plu.ps $f1,$f26,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pul.ps $f9,$f30,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ puu.ps $f24,$f9,$f2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ sub.ps $f5,$f14,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips4/invalid-mips5.s b/test/MC/Mips/mips4/invalid-mips5.s
new file mode 100644
index 000000000000..8c0db00b88c5
--- /dev/null
+++ b/test/MC/Mips/mips4/invalid-mips5.s
@@ -0,0 +1,9 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips4 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($t1) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips4/invalid-mips64.s b/test/MC/Mips/mips4/invalid-mips64.s
new file mode 100644
index 000000000000..c6245ccd1067
--- /dev/null
+++ b/test/MC/Mips/mips4/invalid-mips64.s
@@ -0,0 +1,24 @@
+# Instructions that are invalid
+#
+# FIXME: This test should be moved to the mips5 directory when mips5 is supported
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips4 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ clo $t3,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ clz $sp,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclo $s2,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclz $s0,$t9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ deret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $s6,$t5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $zero,$t1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $s3,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $t8,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfc0 $a2,$14,1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub $s7,$k1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msubu $t7,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtc0 $t1,$29,3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mul $s0,$s4,$at # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($t5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips4/invalid-mips64r2-xfail.s b/test/MC/Mips/mips4/invalid-mips64r2-xfail.s
new file mode 100644
index 000000000000..a5581fd44fb2
--- /dev/null
+++ b/test/MC/Mips/mips4/invalid-mips64r2-xfail.s
@@ -0,0 +1,11 @@
+# Instructions that are supposed to be invalid but currently aren't
+# This test will XPASS if any insn stops assembling.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips4 \
+# RUN: 2> %t1
+# RUN: not FileCheck %s < %t1
+# XFAIL: *
+
+# CHECK-NOT: error
+ .set noat
+ rdhwr $sp,$11
diff --git a/test/MC/Mips/mips4/invalid-mips64r2.s b/test/MC/Mips/mips4/invalid-mips64r2.s
new file mode 100644
index 000000000000..b259706265a2
--- /dev/null
+++ b/test/MC/Mips/mips4/invalid-mips64r2.s
@@ -0,0 +1,37 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips4 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ clo $t3,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ clz $sp,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclo $s2,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclz $s0,$t9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ deret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ di $s8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsbh $v1,$t6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dshd $v0,$sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ei $t6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $s6,$t5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $zero,$t1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd.s $f1,$f31,$f19,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $s3,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $t8,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfc0 $a2,$14,1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhc1 $s8,$f24 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub $s7,$k1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.s $f12,$f19,$f10,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msubu $t7,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtc0 $t1,$29,3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthc1 $zero,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mul $s0,$s4,$at # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.s $f0,$f5,$f25,$f12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.s $f1,$f24,$f19,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ pause # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seb $t9,$t7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seh $v1,$t4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($t5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ wsbh $k1,$t1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips4/valid-xfail.s b/test/MC/Mips/mips4/valid-xfail.s
new file mode 100644
index 000000000000..ff6f457ca838
--- /dev/null
+++ b/test/MC/Mips/mips4/valid-xfail.s
@@ -0,0 +1,49 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips4 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ c.eq.d $fcc1,$f15,$f15
+ c.eq.s $fcc5,$f24,$f17
+ c.f.d $fcc4,$f11,$f21
+ c.f.s $fcc4,$f30,$f7
+ c.le.d $fcc4,$f18,$f1
+ c.le.s $fcc6,$f24,$f4
+ c.lt.d $fcc3,$f9,$f3
+ c.lt.s $fcc2,$f17,$f14
+ c.nge.d $fcc5,$f21,$f16
+ c.nge.s $fcc3,$f11,$f8
+ c.ngl.s $fcc2,$f31,$f23
+ c.ngle.s $fcc2,$f18,$f23
+ c.ngt.d $fcc4,$f24,$f7
+ c.ngt.s $fcc5,$f8,$f13
+ c.ole.d $fcc2,$f16,$f31
+ c.ole.s $fcc3,$f7,$f20
+ c.olt.d $fcc4,$f19,$f28
+ c.olt.s $fcc6,$f20,$f7
+ c.seq.d $fcc4,$f31,$f7
+ c.seq.s $fcc7,$f1,$f25
+ c.ueq.d $fcc4,$f13,$f25
+ c.ueq.s $fcc6,$f3,$f30
+ c.ule.d $fcc7,$f25,$f18
+ c.ule.s $fcc7,$f21,$f30
+ c.ult.d $fcc6,$f6,$f17
+ c.ult.s $fcc7,$f24,$f10
+ c.un.d $fcc6,$f23,$f24
+ c.un.s $fcc1,$f30,$f4
+ madd.d $f18,$f19,$f26,$f20
+ madd.s $f1,$f31,$f19,$f25
+ msub.d $f10,$f1,$f31,$f18
+ msub.s $f12,$f19,$f10,$f16
+ nmadd.d $f18,$f9,$f14,$f19
+ nmadd.s $f0,$f5,$f25,$f12
+ nmsub.d $f30,$f8,$f16,$f30
+ nmsub.s $f1,$f24,$f19,$f4
+ recip.d $f19,$f6
+ recip.s $f3,$f30
+ rsqrt.d $f3,$f28
+ rsqrt.s $f4,$f8
diff --git a/test/MC/Mips/mips4/valid.s b/test/MC/Mips/mips4/valid.s
new file mode 100644
index 000000000000..949b91da922c
--- /dev/null
+++ b/test/MC/Mips/mips4/valid.s
@@ -0,0 +1,216 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips4 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f $fcc1, 4 # CHECK: bc1f $fcc1, 4 # encoding: [0x45,0x04,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t $fcc1, 4 # CHECK: bc1t $fcc1, 4 # encoding: [0x45,0x05,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0xbc,0xa1,0x00,0x08]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ ceil.l.d $f1,$f3
+ ceil.l.s $f18,$f13
+ ceil.w.d $f11,$f25
+ ceil.w.s $f6,$f20
+ cfc1 $s1,$21
+ ctc1 $a2,$26
+ cvt.d.l $f4,$f16
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.l.d $f24,$f15
+ cvt.l.s $f11,$f29
+ cvt.s.d $f26,$f8
+ cvt.s.l $f15,$f30
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ dadd $s3,$at,$ra
+ dadd $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ dadd $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddi $sp,$s4,-27705
+ daddi $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ daddi $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddiu $k0,$s6,-4586
+ daddu $s3,$at,$ra
+ ddiv $zero,$k0,$s3
+ ddivu $zero,$s0,$s1
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ dmfc1 $12,$f13
+ dmtc1 $s0,$f14
+ dmult $s7,$9
+ dmultu $a1,$a2
+ dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
+ dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
+ dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsra $gp,10 # CHECK: dsra $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbb]
+ dsra $gp,$s2,10 # CHECK: dsra $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbb]
+ dsra $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsra32 $gp,10 # CHECK: dsra32 $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbf]
+ dsra32 $gp,$s2,10 # CHECK: dsra32 $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbf]
+ dsrav $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
+ dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
+ dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
+ dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
+ dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsub $a3,$s6,$8
+ dsub $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsub $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubi $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsubi $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubu $a1,$a1,$k0
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ eret
+ floor.l.d $f26,$f7
+ floor.l.s $f12,$f5
+ floor.w.d $f14,$f11
+ floor.w.s $f8,$f9
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ ld $sp,-28645($s1)
+ ldc1 $f11,16391($s0)
+ ldc2 $8,-21181($at) # CHECK: ldc2 $8, -21181($1) # encoding: [0xd8,0x28,0xad,0x43]
+ ldl $24,-4167($24)
+ ldr $14,-30358($s4)
+ ldxc1 $f8,$s7($15)
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ ll $v0,-7321($s2) # CHECK: ll $2, -7321($18) # encoding: [0xc2,0x42,0xe3,0x67]
+ lld $zero,-14736($ra) # CHECK: lld $zero, -14736($ra) # encoding: [0xd3,0xe0,0xc6,0x70]
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ lwu $s3,-24086($v1)
+ lwxc1 $f12,$s1($s8)
+ mfc1 $a3,$f27
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $a0,$a3
+ move $s5,$a0
+ move $s8,$a0
+ move $25,$a2
+ movf $gp,$8,$fcc7
+ movf.d $f6,$f11,$fcc5
+ movf.s $f23,$f5,$fcc6
+ movn $v1,$s1,$s0
+ movn.d $f27,$f21,$k0
+ movn.s $f12,$f0,$s7
+ movt $zero,$s4,$fcc5
+ movt.d $f0,$f2,$fcc0
+ movt.s $f30,$f2,$fcc1
+ movz $a1,$s6,$9
+ movz.d $f12,$f29,$9
+ movz.s $f25,$f7,$v1
+ mtc1 $s8,$f9
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ pref 1, 8($5) # CHECK: pref 1, 8($5) # encoding: [0xcc,0xa1,0x00,0x08]
+ round.l.d $f12,$f1
+ round.l.s $f25,$f5
+ round.w.d $f6,$f4
+ round.w.s $f27,$f28
+ sb $s6,-19857($14)
+ sc $15,18904($s3) # CHECK: sc $15, 18904($19) # encoding: [0xe2,0x6f,0x49,0xd8]
+ scd $15,-8243($sp) # CHECK: scd $15, -8243($sp) # encoding: [0xf3,0xaf,0xdf,0xcd]
+ sd $12,5835($10)
+ sdc1 $f31,30574($13)
+ sdc2 $20,23157($s2) # CHECK: sdc2 $20, 23157($18) # encoding: [0xfa,0x54,0x5a,0x75]
+ sdl $a3,-20961($s8)
+ sdr $11,-20423($12)
+ sdxc1 $f11,$10($14)
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sqrt.d $f17,$f22
+ sqrt.s $f0,$f1
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ swxc1 $f19,$12($k0)
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ teqi $s5,-17504
+ tgei $s1,5025
+ tgeiu $sp,-28621
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ tlti $14,-21059
+ tltiu $ra,-5076
+ tnei $12,-29647
+ trunc.l.d $f23,$f23
+ trunc.l.s $f28,$f31
+ trunc.w.d $f22,$f15
+ trunc.w.s $f28,$f30
+ xor $s2,$a0,$s8
diff --git a/test/MC/Mips/mips5/invalid-mips32.s b/test/MC/Mips/mips5/invalid-mips32.s
new file mode 100644
index 000000000000..2e2c8da462df
--- /dev/null
+++ b/test/MC/Mips/mips5/invalid-mips32.s
@@ -0,0 +1,10 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips5 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+
+ sync 0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sync 1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips5/invalid-mips64.s b/test/MC/Mips/mips5/invalid-mips64.s
new file mode 100644
index 000000000000..0a15da823676
--- /dev/null
+++ b/test/MC/Mips/mips5/invalid-mips64.s
@@ -0,0 +1,24 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips5 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ clo $11,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ clz $sp,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclo $s2,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclz $s0,$25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ deret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ jr.hb $4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ jalr.hb $4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ jalr.hb $4, $5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $s6,$13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $zero,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $s3,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $24,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfc0 $a2,$14,1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub $s7,$k1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msubu $15,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtc0 $9,$29,3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mul $s0,$s4,$at # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips5/invalid-mips64r2-xfail.s b/test/MC/Mips/mips5/invalid-mips64r2-xfail.s
new file mode 100644
index 000000000000..b2b612d8e5ea
--- /dev/null
+++ b/test/MC/Mips/mips5/invalid-mips64r2-xfail.s
@@ -0,0 +1,11 @@
+# Instructions that are supposed to be invalid but currently aren't
+# This test will XPASS if any insn stops assembling.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips4 \
+# RUN: 2> %t1
+# RUN: not FileCheck %s < %t1
+# XFAIL: *
+
+# CHECK-NOT: error
+ .set noat
+ rdhwr $sp,$11
diff --git a/test/MC/Mips/mips5/invalid-mips64r2.s b/test/MC/Mips/mips5/invalid-mips64r2.s
new file mode 100644
index 000000000000..b91e5205d80c
--- /dev/null
+++ b/test/MC/Mips/mips5/invalid-mips64r2.s
@@ -0,0 +1,43 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips5 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ clo $11,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ clz $sp,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclo $s2,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dclz $s0,$25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ deret # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ di $s8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotr $1,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotr $1,$14,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotr32 $1,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotr32 $1,$14,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotrv $1,$14,$15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsbh $v1,$14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dshd $v0,$sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ei $14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $s6,$13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd $zero,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd.s $f1,$f31,$f19,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $s3,$gp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ maddu $24,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfc0 $a2,$14,1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhc1 $s8,$f24 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub $s7,$k1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.s $f12,$f19,$f10,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msubu $15,$a1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtc0 $9,$29,3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthc1 $zero,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mul $s0,$s4,$at # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.s $f0,$f5,$f25,$f12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.s $f1,$f24,$f19,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ pause # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotr $1,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotr $1,$14,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotrv $1,$14,$15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seb $25,$15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seh $v1,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ wsbh $k1,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips5/valid-xfail.s b/test/MC/Mips/mips5/valid-xfail.s
new file mode 100644
index 000000000000..8d1d0d78d0e4
--- /dev/null
+++ b/test/MC/Mips/mips5/valid-xfail.s
@@ -0,0 +1,87 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips5 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ abs.ps $f22,$f8
+ add.ps $f25,$f27,$f13
+ alnv.ps $f12,$f18,$f30,$12
+ c.eq.d $fcc1,$f15,$f15
+ c.eq.ps $fcc5,$f0,$f9
+ c.eq.s $fcc5,$f24,$f17
+ c.f.d $fcc4,$f11,$f21
+ c.f.ps $fcc6,$f11,$f11
+ c.f.s $fcc4,$f30,$f7
+ c.le.d $fcc4,$f18,$f1
+ c.le.ps $fcc1,$f7,$f20
+ c.le.s $fcc6,$f24,$f4
+ c.lt.d $fcc3,$f9,$f3
+ c.lt.ps $f19,$f5
+ c.lt.s $fcc2,$f17,$f14
+ c.nge.d $fcc5,$f21,$f16
+ c.nge.ps $f1,$f26
+ c.nge.s $fcc3,$f11,$f8
+ c.ngl.ps $f21,$f30
+ c.ngl.s $fcc2,$f31,$f23
+ c.ngle.ps $fcc7,$f12,$f20
+ c.ngle.s $fcc2,$f18,$f23
+ c.ngt.d $fcc4,$f24,$f7
+ c.ngt.ps $fcc5,$f30,$f6
+ c.ngt.s $fcc5,$f8,$f13
+ c.ole.d $fcc2,$f16,$f31
+ c.ole.ps $fcc7,$f21,$f8
+ c.ole.s $fcc3,$f7,$f20
+ c.olt.d $fcc4,$f19,$f28
+ c.olt.ps $fcc3,$f7,$f16
+ c.olt.s $fcc6,$f20,$f7
+ c.seq.d $fcc4,$f31,$f7
+ c.seq.ps $fcc6,$f31,$f14
+ c.seq.s $fcc7,$f1,$f25
+ c.sf.ps $fcc6,$f4,$f6
+ c.ueq.d $fcc4,$f13,$f25
+ c.ueq.ps $fcc1,$f5,$f29
+ c.ueq.s $fcc6,$f3,$f30
+ c.ule.d $fcc7,$f25,$f18
+ c.ule.ps $fcc6,$f17,$f3
+ c.ule.s $fcc7,$f21,$f30
+ c.ult.d $fcc6,$f6,$f17
+ c.ult.ps $fcc7,$f14,$f0
+ c.ult.s $fcc7,$f24,$f10
+ c.un.d $fcc6,$f23,$f24
+ c.un.ps $fcc4,$f2,$f26
+ c.un.s $fcc1,$f30,$f4
+ cvt.ps.s $f3,$f18,$f19
+ cvt.s.pl $f30,$f1
+ cvt.s.pu $f14,$f25
+ madd.d $f18,$f19,$f26,$f20
+ madd.ps $f22,$f3,$f14,$f3
+ madd.s $f1,$f31,$f19,$f25
+ mov.ps $f22,$f17
+ movf.ps $f10,$f28,$fcc6
+ movn.ps $f31,$f31,$s3
+ movt.ps $f20,$f25,$fcc2
+ movz.ps $f18,$f17,$ra
+ msub.d $f10,$f1,$f31,$f18
+ msub.ps $f12,$f14,$f29,$f17
+ msub.s $f12,$f19,$f10,$f16
+ mul.ps $f14,$f0,$f16
+ neg.ps $f19,$f13
+ nmadd.d $f18,$f9,$f14,$f19
+ nmadd.ps $f27,$f4,$f9,$f25
+ nmadd.s $f0,$f5,$f25,$f12
+ nmsub.d $f30,$f8,$f16,$f30
+ nmsub.ps $f6,$f12,$f14,$f17
+ nmsub.s $f1,$f24,$f19,$f4
+ pll.ps $f25,$f9,$f30
+ plu.ps $f1,$f26,$f29
+ pul.ps $f9,$f30,$f26
+ puu.ps $f24,$f9,$f2
+ recip.d $f19,$f6
+ recip.s $f3,$f30
+ rsqrt.d $f3,$f28
+ rsqrt.s $f4,$f8
+ sub.ps $f5,$f14,$f26
diff --git a/test/MC/Mips/mips5/valid.s b/test/MC/Mips/mips5/valid.s
new file mode 100644
index 000000000000..3afdee1887c1
--- /dev/null
+++ b/test/MC/Mips/mips5/valid.s
@@ -0,0 +1,218 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips5 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f $fcc1, 4 # CHECK: bc1f $fcc1, 4 # encoding: [0x45,0x04,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t $fcc1, 4 # CHECK: bc1t $fcc1, 4 # encoding: [0x45,0x05,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0xbc,0xa1,0x00,0x08]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ ceil.l.d $f1,$f3
+ ceil.l.s $f18,$f13
+ ceil.w.d $f11,$f25
+ ceil.w.s $f6,$f20
+ cfc1 $s1,$21
+ ctc1 $a2,$26
+ cvt.d.l $f4,$f16
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.l.d $f24,$f15
+ cvt.l.s $f11,$f29
+ cvt.s.d $f26,$f8
+ cvt.s.l $f15,$f30
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ dadd $s3,$at,$ra
+ dadd $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ dadd $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddi $sp,$s4,-27705
+ daddi $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ daddi $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddiu $k0,$s6,-4586
+ daddu $s3,$at,$ra
+ ddiv $zero,$k0,$s3
+ ddivu $zero,$s0,$s1
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ dmfc1 $12,$f13
+ dmtc1 $s0,$f14
+ dmult $s7,$9
+ dmultu $a1,$a2
+ dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
+ dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
+ dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsra $gp,10 # CHECK: dsra $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbb]
+ dsra $gp,$s2,10 # CHECK: dsra $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbb]
+ dsra $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsra32 $gp,10 # CHECK: dsra32 $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbf]
+ dsra32 $gp,$s2,10 # CHECK: dsra32 $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbf]
+ dsrav $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
+ dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
+ dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
+ dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
+ dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsub $a3,$s6,$8
+ dsub $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsub $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubi $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsubi $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubu $a1,$a1,$k0
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ eret
+ floor.l.d $f26,$f7
+ floor.l.s $f12,$f5
+ floor.w.d $f14,$f11
+ floor.w.s $f8,$f9
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ ld $sp,-28645($s1)
+ ldc1 $f11,16391($s0)
+ ldc2 $8,-21181($at) # CHECK: ldc2 $8, -21181($1) # encoding: [0xd8,0x28,0xad,0x43]
+ ldl $24,-4167($24)
+ ldr $14,-30358($s4)
+ ldxc1 $f8,$s7($15)
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ ll $v0,-7321($s2) # CHECK: ll $2, -7321($18) # encoding: [0xc2,0x42,0xe3,0x67]
+ lld $zero,-14736($ra) # CHECK: lld $zero, -14736($ra) # encoding: [0xd3,0xe0,0xc6,0x70]
+ luxc1 $f19,$s6($s5)
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ lwu $s3,-24086($v1)
+ lwxc1 $f12,$s1($s8)
+ mfc1 $a3,$f27
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $a0,$a3
+ move $s5,$a0
+ move $s8,$a0
+ move $25,$a2
+ movf $gp,$8,$fcc7
+ movf.d $f6,$f11,$fcc5
+ movf.s $f23,$f5,$fcc6
+ movn $v1,$s1,$s0
+ movn.d $f27,$f21,$k0
+ movn.s $f12,$f0,$s7
+ movt $zero,$s4,$fcc5
+ movt.d $f0,$f2,$fcc0
+ movt.s $f30,$f2,$fcc1
+ movz $a1,$s6,$9
+ movz.d $f12,$f29,$9
+ movz.s $f25,$f7,$v1
+ mtc1 $s8,$f9
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ pref 1, 8($5) # CHECK: pref 1, 8($5) # encoding: [0xcc,0xa1,0x00,0x08]
+ round.l.d $f12,$f1
+ round.l.s $f25,$f5
+ round.w.d $f6,$f4
+ round.w.s $f27,$f28
+ sb $s6,-19857($14)
+ sc $15,18904($s3) # CHECK: sc $15, 18904($19) # encoding: [0xe2,0x6f,0x49,0xd8]
+ scd $15,-8243($sp) # CHECK: scd $15, -8243($sp) # encoding: [0xf3,0xaf,0xdf,0xcd]
+ sd $12,5835($10)
+ sdc1 $f31,30574($13)
+ sdc2 $20,23157($s2) # CHECK: sdc2 $20, 23157($18) # encoding: [0xfa,0x54,0x5a,0x75]
+ sdl $a3,-20961($s8)
+ sdr $11,-20423($12)
+ sdxc1 $f11,$10($14)
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sqrt.d $f17,$f22
+ sqrt.s $f0,$f1
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ suxc1 $f12,$k1($13)
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ swxc1 $f19,$12($k0)
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ teqi $s5,-17504
+ tgei $s1,5025
+ tgeiu $sp,-28621
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ tlti $14,-21059
+ tltiu $ra,-5076
+ tnei $12,-29647
+ trunc.l.d $f23,$f23
+ trunc.l.s $f28,$f31
+ trunc.w.d $f22,$f15
+ trunc.w.s $f28,$f30
+ xor $s2,$a0,$s8
diff --git a/test/MC/Mips/mips64-alu-instructions.s b/test/MC/Mips/mips64-alu-instructions.s
index 8262a46ee4f3..19ed1ffad1ca 100644
--- a/test/MC/Mips/mips64-alu-instructions.s
+++ b/test/MC/Mips/mips64-alu-instructions.s
@@ -81,7 +81,9 @@
# CHECK: msubu $6, $7 # encoding: [0x05,0x00,0xc7,0x70]
# CHECK: mult $3, $5 # encoding: [0x18,0x00,0x65,0x00]
# CHECK: multu $3, $5 # encoding: [0x19,0x00,0x65,0x00]
+# CHECK: dsub $9, $6, $7 # encoding: [0x2e,0x48,0xc7,0x00]
# CHECK: dsubu $4, $3, $5 # encoding: [0x2f,0x20,0x65,0x00]
+# CHECK: daddiu $9, $6, -17767 # encoding: [0x99,0xba,0xc9,0x64]
# CHECK: move $7, $8 # encoding: [0x2d,0x38,0x00,0x01]
# CHECK: .set push
# CHECK: .set mips32r2
@@ -104,6 +106,47 @@
msubu $6,$7
mult $3,$5
multu $3,$5
+ dsub $9,$6,$7
dsubu $4,$3,$5
+ dsubu $9,$6,17767
move $7,$8
rdhwr $5, $29
+
+#------------------------------------------------------------------------------
+# Shortcuts for arithmetic instructions
+#------------------------------------------------------------------------------
+
+# CHECK: and $9, $9, $3 # encoding: [0x24,0x48,0x23,0x01]
+# CHECK: dadd $9, $9, $3 # encoding: [0x2c,0x48,0x23,0x01]
+# CHECK: daddu $9, $9, $3 # encoding: [0x2d,0x48,0x23,0x01]
+# CHECK: daddi $9, $9, 10 # encoding: [0x0a,0x00,0x29,0x61]
+# CHECK: daddiu $9, $9, 10 # encoding: [0x0a,0x00,0x29,0x65]
+# CHECK: dsub $9, $9, $3 # encoding: [0x2e,0x48,0x23,0x01]
+# CHECK: dsubu $9, $9, $3 # encoding: [0x2f,0x48,0x23,0x01]
+# CHECK: daddi $9, $9, -10 # encoding: [0xf6,0xff,0x29,0x61]
+# CHECK: daddiu $9, $9, -10 # encoding: [0xf6,0xff,0x29,0x65]
+# CHECK: or $9, $9, $3 # encoding: [0x25,0x48,0x23,0x01]
+# CHECK: xor $9, $9, $3 # encoding: [0x26,0x48,0x23,0x01]
+ and $9, $3
+ dadd $9, $3
+ daddu $9, $3
+ dadd $9, 10
+ daddu $9, 10
+ dsub $9, $3
+ dsubu $9, $3
+ dsub $9, 10
+ dsubu $9, 10
+ or $9, $3
+ xor $9, $3
+
+#------------------------------------------------------------------------------
+# Did you know that GAS supports complex arithmetic expressions in assembly?
+#------------------------------------------------------------------------------
+# CHECK: daddiu $9, $3, 32 # encoding: [0x20,0x00,0x69,0x64]
+# CHECK: daddiu $9, $3, 32 # encoding: [0x20,0x00,0x69,0x64]
+# CHECK: daddiu $9, $3, -32 # encoding: [0xe0,0xff,0x69,0x64]
+# CHECK: daddiu $9, $3, -32 # encoding: [0xe0,0xff,0x69,0x64]
+ daddiu $9, $3, 8 * 4
+ daddiu $9, $3, (8 * 4)
+ dsubu $9, $3, 8 * 4
+ dsubu $9, $3, (8 * 4)
diff --git a/test/MC/Mips/mips64-expansions.s b/test/MC/Mips/mips64-expansions.s
new file mode 100644
index 000000000000..0efdd2fa5c81
--- /dev/null
+++ b/test/MC/Mips/mips64-expansions.s
@@ -0,0 +1,209 @@
+# RUN: llvm-mc %s -triple=mips64el-unknown-linux -show-encoding -mcpu=mips64r2 | FileCheck %s
+#
+# The GNU assembler implements 'dli' and 'dla' variants on 'li' and 'la'
+# supporting double-word lengths. Test that not only are they present, bu
+# that they also seem to handle 64-bit values.
+#
+# XXXRW: Does using powers of ten make me a bad person?
+#
+# CHECK: ori $12, $zero, 1 # encoding: [0x01,0x00,0x0c,0x34]
+# CHECK: ori $12, $zero, 10 # encoding: [0x0a,0x00,0x0c,0x34]
+# CHECK: ori $12, $zero, 100 # encoding: [0x64,0x00,0x0c,0x34]
+# CHECK: ori $12, $zero, 1000 # encoding: [0xe8,0x03,0x0c,0x34]
+# CHECK: ori $12, $zero, 10000 # encoding: [0x10,0x27,0x0c,0x34]
+# CHECK: lui $12, 1 # encoding: [0x01,0x00,0x0c,0x3c]
+# CHECK: ori $12, $12, 34464 # encoding: [0xa0,0x86,0x8c,0x35]
+# CHECK: lui $12, 15 # encoding: [0x0f,0x00,0x0c,0x3c]
+# CHECK: ori $12, $12, 16960 # encoding: [0x40,0x42,0x8c,0x35]
+# CHECK: lui $12, 152 # encoding: [0x98,0x00,0x0c,0x3c]
+# CHECK: ori $12, $12, 38528 # encoding: [0x80,0x96,0x8c,0x35]
+# CHECK: lui $12, 1525 # encoding: [0xf5,0x05,0x0c,0x3c]
+# CHECK: ori $12, $12, 57600 # encoding: [0x00,0xe1,0x8c,0x35]
+# CHECK: lui $12, 15258 # encoding: [0x9a,0x3b,0x0c,0x3c]
+# CHECK: ori $12, $12, 51712 # encoding: [0x00,0xca,0x8c,0x35]
+# CHECK: lui $12, 2 # encoding: [0x02,0x00,0x0c,0x3c]
+# CHECK: ori $12, $12, 21515 # encoding: [0x0b,0x54,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 58368 # encoding: [0x00,0xe4,0x8c,0x35]
+# CHECK: lui $12, 23 # encoding: [0x17,0x00,0x0c,0x3c]
+# CHECK: ori $12, $12, 18550 # encoding: [0x76,0x48,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 59392 # encoding: [0x00,0xe8,0x8c,0x35]
+# CHECK: lui $12, 232 # encoding: [0xe8,0x00,0x0c,0x3c]
+# CHECK: ori $12, $12, 54437 # encoding: [0xa5,0xd4,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 4096 # encoding: [0x00,0x10,0x8c,0x35]
+# CHECK: lui $12, 2328 # encoding: [0x18,0x09,0x0c,0x3c]
+# CHECK: ori $12, $12, 20082 # encoding: [0x72,0x4e,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 40960 # encoding: [0x00,0xa0,0x8c,0x35]
+# CHECK: lui $12, 23283 # encoding: [0xf3,0x5a,0x0c,0x3c]
+# CHECK: ori $12, $12, 4218 # encoding: [0x7a,0x10,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 16384 # encoding: [0x00,0x40,0x8c,0x35]
+# CHECK: lui $12, 3 # encoding: [0x03,0x00,0x0c,0x3c]
+# CHECK: ori $12, $12, 36222 # encoding: [0x7e,0x8d,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 42182 # encoding: [0xc6,0xa4,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 32768 # encoding: [0x00,0x80,0x8c,0x35]
+# CHECK: lui $12, 35 # encoding: [0x23,0x00,0x0c,0x3c]
+# CHECK: ori $12, $12, 34546 # encoding: [0xf2,0x86,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 28609 # encoding: [0xc1,0x6f,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 0 # encoding: [0x00,0x00,0x8c,0x35]
+# CHECK: lui $12, 355 # encoding: [0x63,0x01,0x0c,0x3c]
+# CHECK: ori $12, $12, 17784 # encoding: [0x78,0x45,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 23946 # encoding: [0x8a,0x5d,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 0 # encoding: [0x00,0x00,0x8c,0x35]
+# CHECK: lui $12, 3552 # encoding: [0xe0,0x0d,0x0c,0x3c]
+# CHECK: ori $12, $12, 46771 # encoding: [0xb3,0xb6,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 42852 # encoding: [0x64,0xa7,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 0 # encoding: [0x00,0x00,0x8c,0x35]
+# CHECK: lui $12, 35527 # encoding: [0xc7,0x8a,0x0c,0x3c]
+# CHECK: ori $12, $12, 8964 # encoding: [0x04,0x23,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 35304 # encoding: [0xe8,0x89,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 0 # encoding: [0x00,0x00,0x8c,0x35]
+# CHECK: addiu $12, $zero, -1 # encoding: [0xff,0xff,0x0c,0x24]
+# CHECK: addiu $12, $zero, -10 # encoding: [0xf6,0xff,0x0c,0x24]
+# CHECK: addiu $12, $zero, -100 # encoding: [0x9c,0xff,0x0c,0x24]
+# CHECK: addiu $12, $zero, -1000 # encoding: [0x18,0xfc,0x0c,0x24]
+# CHECK: addiu $12, $zero, -10000 # encoding: [0xf0,0xd8,0x0c,0x24]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 65535 # encoding: [0xff,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 65534 # encoding: [0xfe,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 31072 # encoding: [0x60,0x79,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 65535 # encoding: [0xff,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 65520 # encoding: [0xf0,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 48576 # encoding: [0xc0,0xbd,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 65535 # encoding: [0xff,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 65383 # encoding: [0x67,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 27008 # encoding: [0x80,0x69,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 65535 # encoding: [0xff,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 64010 # encoding: [0x0a,0xfa,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 7936 # encoding: [0x00,0x1f,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 65535 # encoding: [0xff,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 50277 # encoding: [0x65,0xc4,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 13824 # encoding: [0x00,0x36,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 65533 # encoding: [0xfd,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 44020 # encoding: [0xf4,0xab,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 7168 # encoding: [0x00,0x1c,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 65512 # encoding: [0xe8,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 46985 # encoding: [0x89,0xb7,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 6144 # encoding: [0x00,0x18,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 65303 # encoding: [0x17,0xff,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 11098 # encoding: [0x5a,0x2b,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 61440 # encoding: [0x00,0xf0,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 63207 # encoding: [0xe7,0xf6,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 45453 # encoding: [0x8d,0xb1,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 24576 # encoding: [0x00,0x60,0x8c,0x35]
+# CHECK: lui $12, 65535 # encoding: [0xff,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 42252 # encoding: [0x0c,0xa5,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 61317 # encoding: [0x85,0xef,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 49152 # encoding: [0x00,0xc0,0x8c,0x35]
+# CHECK: lui $12, 65532 # encoding: [0xfc,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 29313 # encoding: [0x81,0x72,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 23353 # encoding: [0x39,0x5b,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 32768 # encoding: [0x00,0x80,0x8c,0x35]
+# CHECK: lui $12, 65500 # encoding: [0xdc,0xff,0x0c,0x3c]
+# CHECK: ori $12, $12, 30989 # encoding: [0x0d,0x79,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 36927 # encoding: [0x3f,0x90,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 0 # encoding: [0x00,0x00,0x8c,0x35]
+# CHECK: lui $12, 65180 # encoding: [0x9c,0xfe,0x0c,0x3c]
+# CHECK: ori $12, $12, 47751 # encoding: [0x87,0xba,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 41590 # encoding: [0x76,0xa2,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 0 # encoding: [0x00,0x00,0x8c,0x35]
+# CHECK: lui $12, 61983 # encoding: [0x1f,0xf2,0x0c,0x3c]
+# CHECK: ori $12, $12, 18764 # encoding: [0x4c,0x49,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 22684 # encoding: [0x9c,0x58,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 0 # encoding: [0x00,0x00,0x8c,0x35]
+# CHECK: lui $12, 30008 # encoding: [0x38,0x75,0x0c,0x3c]
+# CHECK: ori $12, $12, 56571 # encoding: [0xfb,0xdc,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 30232 # encoding: [0x18,0x76,0x8c,0x35]
+# CHECK: dsll $12, $12, 16 # encoding: [0x38,0x64,0x0c,0x00]
+# CHECK: ori $12, $12, 0 # encoding: [0x00,0x00,0x8c,0x35]
+
+ dli $t0, 1
+ dli $t0, 10
+ dli $t0, 100
+ dli $t0, 1000
+ dli $t0, 10000
+ dli $t0, 100000
+ dli $t0, 1000000
+ dli $t0, 10000000
+ dli $t0, 100000000
+ dli $t0, 1000000000
+ dli $t0, 10000000000
+ dli $t0, 100000000000
+ dli $t0, 1000000000000
+ dli $t0, 10000000000000
+ dli $t0, 100000000000000
+ dli $t0, 1000000000000000
+ dli $t0, 10000000000000000
+ dli $t0, 100000000000000000
+ dli $t0, 1000000000000000000
+ dli $t0, 10000000000000000000
+ dli $t0, -1
+ dli $t0, -10
+ dli $t0, -100
+ dli $t0, -1000
+ dli $t0, -10000
+ dli $t0, -100000
+ dli $t0, -1000000
+ dli $t0, -10000000
+ dli $t0, -100000000
+ dli $t0, -1000000000
+ dli $t0, -10000000000
+ dli $t0, -100000000000
+ dli $t0, -1000000000000
+ dli $t0, -10000000000000
+ dli $t0, -100000000000000
+ dli $t0, -1000000000000000
+ dli $t0, -10000000000000000
+ dli $t0, -100000000000000000
+ dli $t0, -1000000000000000000
+ dli $t0, -10000000000000000000
diff --git a/test/MC/Mips/mips64-register-names-n32-n64.s b/test/MC/Mips/mips64-register-names-n32-n64.s
new file mode 100644
index 000000000000..ee6f88fe6ab4
--- /dev/null
+++ b/test/MC/Mips/mips64-register-names-n32-n64.s
@@ -0,0 +1,49 @@
+# RUN: llvm-mc %s -triple=mips64-unknown-freebsd -show-encoding | FileCheck %s
+# RUN: llvm-mc %s -triple=mips64-unknown-freebsd -show-encoding \
+# RUN: -mattr=-n64,+n32 | FileCheck %s
+
+# Check that the register names are mapped to their correct numbers for n32/n64
+# Second byte of addiu with $zero at rt contains the number of the source
+# register.
+
+.set noat
+daddiu $zero, $zero, 0 # CHECK: encoding: [0x64,0x00,0x00,0x00]
+daddiu $at, $zero, 0 # CHECK: encoding: [0x64,0x01,0x00,0x00]
+daddiu $v0, $zero, 0 # CHECK: encoding: [0x64,0x02,0x00,0x00]
+daddiu $v1, $zero, 0 # CHECK: encoding: [0x64,0x03,0x00,0x00]
+daddiu $a0, $zero, 0 # CHECK: encoding: [0x64,0x04,0x00,0x00]
+daddiu $a1, $zero, 0 # CHECK: encoding: [0x64,0x05,0x00,0x00]
+daddiu $a2, $zero, 0 # CHECK: encoding: [0x64,0x06,0x00,0x00]
+daddiu $a3, $zero, 0 # CHECK: encoding: [0x64,0x07,0x00,0x00]
+daddiu $a4, $zero, 0 # CHECK: encoding: [0x64,0x08,0x00,0x00]
+daddiu $a5, $zero, 0 # CHECK: encoding: [0x64,0x09,0x00,0x00]
+daddiu $a6, $zero, 0 # CHECK: encoding: [0x64,0x0a,0x00,0x00]
+daddiu $a7, $zero, 0 # CHECK: encoding: [0x64,0x0b,0x00,0x00]
+daddiu $t0, $zero, 0 # [*] # CHECK: encoding: [0x64,0x0c,0x00,0x00]
+daddiu $t1, $zero, 0 # [*] # CHECK: encoding: [0x64,0x0d,0x00,0x00]
+daddiu $t2, $zero, 0 # [*] # CHECK: encoding: [0x64,0x0e,0x00,0x00]
+daddiu $t3, $zero, 0 # [*] # CHECK: encoding: [0x64,0x0f,0x00,0x00]
+daddiu $t4, $zero, 0 # CHECK: encoding: [0x64,0x0c,0x00,0x00]
+daddiu $t5, $zero, 0 # CHECK: encoding: [0x64,0x0d,0x00,0x00]
+daddiu $t6, $zero, 0 # CHECK: encoding: [0x64,0x0e,0x00,0x00]
+daddiu $t7, $zero, 0 # CHECK: encoding: [0x64,0x0f,0x00,0x00]
+daddiu $s0, $zero, 0 # CHECK: encoding: [0x64,0x10,0x00,0x00]
+daddiu $s1, $zero, 0 # CHECK: encoding: [0x64,0x11,0x00,0x00]
+daddiu $s2, $zero, 0 # CHECK: encoding: [0x64,0x12,0x00,0x00]
+daddiu $s3, $zero, 0 # CHECK: encoding: [0x64,0x13,0x00,0x00]
+daddiu $s4, $zero, 0 # CHECK: encoding: [0x64,0x14,0x00,0x00]
+daddiu $s5, $zero, 0 # CHECK: encoding: [0x64,0x15,0x00,0x00]
+daddiu $s6, $zero, 0 # CHECK: encoding: [0x64,0x16,0x00,0x00]
+daddiu $s7, $zero, 0 # CHECK: encoding: [0x64,0x17,0x00,0x00]
+daddiu $t8, $zero, 0 # CHECK: encoding: [0x64,0x18,0x00,0x00]
+daddiu $t9, $zero, 0 # CHECK: encoding: [0x64,0x19,0x00,0x00]
+daddiu $kt0, $zero, 0 # CHECK: encoding: [0x64,0x1a,0x00,0x00]
+daddiu $kt1, $zero, 0 # CHECK: encoding: [0x64,0x1b,0x00,0x00]
+daddiu $gp, $zero, 0 # CHECK: encoding: [0x64,0x1c,0x00,0x00]
+daddiu $sp, $zero, 0 # CHECK: encoding: [0x64,0x1d,0x00,0x00]
+daddiu $s8, $zero, 0 # CHECK: encoding: [0x64,0x1e,0x00,0x00]
+daddiu $fp, $zero, 0 # CHECK: encoding: [0x64,0x1e,0x00,0x00]
+daddiu $ra, $zero, 0 # CHECK: encoding: [0x64,0x1f,0x00,0x00]
+
+# [*] - t0-t3 are aliases of t4-t7 for compatibility with both the original
+# ABI documentation (using t4-t7) and GNU As (using t0-t3)
diff --git a/test/MC/Mips/mips64-register-names-o32.s b/test/MC/Mips/mips64-register-names-o32.s
new file mode 100644
index 000000000000..c17057816ed3
--- /dev/null
+++ b/test/MC/Mips/mips64-register-names-o32.s
@@ -0,0 +1,41 @@
+# RUN: llvm-mc %s -triple=mips64-unknown-freebsd -show-encoding \
+# RUN: -mattr=-n64,+o32 | FileCheck %s
+
+# Check that the register names are mapped to their correct numbers for o32
+# Second byte of daddiu with $zero at rt contains the number of the source
+# register.
+
+.set noat
+daddiu $zero, $zero, 0 # CHECK: encoding: [0x64,0x00,0x00,0x00]
+daddiu $at, $zero, 0 # CHECK: encoding: [0x64,0x01,0x00,0x00]
+daddiu $v0, $zero, 0 # CHECK: encoding: [0x64,0x02,0x00,0x00]
+daddiu $v1, $zero, 0 # CHECK: encoding: [0x64,0x03,0x00,0x00]
+daddiu $a0, $zero, 0 # CHECK: encoding: [0x64,0x04,0x00,0x00]
+daddiu $a1, $zero, 0 # CHECK: encoding: [0x64,0x05,0x00,0x00]
+daddiu $a2, $zero, 0 # CHECK: encoding: [0x64,0x06,0x00,0x00]
+daddiu $a3, $zero, 0 # CHECK: encoding: [0x64,0x07,0x00,0x00]
+daddiu $t0, $zero, 0 # CHECK: encoding: [0x64,0x08,0x00,0x00]
+daddiu $t1, $zero, 0 # CHECK: encoding: [0x64,0x09,0x00,0x00]
+daddiu $t2, $zero, 0 # CHECK: encoding: [0x64,0x0a,0x00,0x00]
+daddiu $t3, $zero, 0 # CHECK: encoding: [0x64,0x0b,0x00,0x00]
+daddiu $t4, $zero, 0 # CHECK: encoding: [0x64,0x0c,0x00,0x00]
+daddiu $t5, $zero, 0 # CHECK: encoding: [0x64,0x0d,0x00,0x00]
+daddiu $t6, $zero, 0 # CHECK: encoding: [0x64,0x0e,0x00,0x00]
+daddiu $t7, $zero, 0 # CHECK: encoding: [0x64,0x0f,0x00,0x00]
+daddiu $s0, $zero, 0 # CHECK: encoding: [0x64,0x10,0x00,0x00]
+daddiu $s1, $zero, 0 # CHECK: encoding: [0x64,0x11,0x00,0x00]
+daddiu $s2, $zero, 0 # CHECK: encoding: [0x64,0x12,0x00,0x00]
+daddiu $s3, $zero, 0 # CHECK: encoding: [0x64,0x13,0x00,0x00]
+daddiu $s4, $zero, 0 # CHECK: encoding: [0x64,0x14,0x00,0x00]
+daddiu $s5, $zero, 0 # CHECK: encoding: [0x64,0x15,0x00,0x00]
+daddiu $s6, $zero, 0 # CHECK: encoding: [0x64,0x16,0x00,0x00]
+daddiu $s7, $zero, 0 # CHECK: encoding: [0x64,0x17,0x00,0x00]
+daddiu $t8, $zero, 0 # CHECK: encoding: [0x64,0x18,0x00,0x00]
+daddiu $t9, $zero, 0 # CHECK: encoding: [0x64,0x19,0x00,0x00]
+daddiu $k0, $zero, 0 # CHECK: encoding: [0x64,0x1a,0x00,0x00]
+daddiu $k1, $zero, 0 # CHECK: encoding: [0x64,0x1b,0x00,0x00]
+daddiu $gp, $zero, 0 # CHECK: encoding: [0x64,0x1c,0x00,0x00]
+daddiu $sp, $zero, 0 # CHECK: encoding: [0x64,0x1d,0x00,0x00]
+daddiu $fp, $zero, 0 # CHECK: encoding: [0x64,0x1e,0x00,0x00]
+daddiu $s8, $zero, 0 # CHECK: encoding: [0x64,0x1e,0x00,0x00]
+daddiu $ra, $zero, 0 # CHECK: encoding: [0x64,0x1f,0x00,0x00]
diff --git a/test/MC/Mips/mips64-register-names.s b/test/MC/Mips/mips64-register-names.s
deleted file mode 100644
index 16783ee1a68c..000000000000
--- a/test/MC/Mips/mips64-register-names.s
+++ /dev/null
@@ -1,70 +0,0 @@
-# RUN: llvm-mc %s -triple=mips64-unknown-freebsd -show-encoding | FileCheck %s
-
-# Check that the register names are mapped to their correct numbers for n64
-# Second byte of addiu with $zero at rt contains the number of the source
-# register.
-
-# CHECK: encoding: [0x64,0x00,0x00,0x00]
-# CHECK: encoding: [0x64,0x01,0x00,0x00]
-# CHECK: encoding: [0x64,0x02,0x00,0x00]
-# CHECK: encoding: [0x64,0x03,0x00,0x00]
-# CHECK: encoding: [0x64,0x04,0x00,0x00]
-# CHECK: encoding: [0x64,0x05,0x00,0x00]
-# CHECK: encoding: [0x64,0x06,0x00,0x00]
-# CHECK: encoding: [0x64,0x07,0x00,0x00]
-# CHECK: encoding: [0x64,0x08,0x00,0x00]
-# CHECK: encoding: [0x64,0x09,0x00,0x00]
-# CHECK: encoding: [0x64,0x0a,0x00,0x00]
-# CHECK: encoding: [0x64,0x0b,0x00,0x00]
-# CHECK: encoding: [0x64,0x0c,0x00,0x00]
-# CHECK: encoding: [0x64,0x0d,0x00,0x00]
-# CHECK: encoding: [0x64,0x0e,0x00,0x00]
-# CHECK: encoding: [0x64,0x0f,0x00,0x00]
-# CHECK: encoding: [0x64,0x10,0x00,0x00]
-# CHECK: encoding: [0x64,0x11,0x00,0x00]
-# CHECK: encoding: [0x64,0x12,0x00,0x00]
-# CHECK: encoding: [0x64,0x13,0x00,0x00]
-# CHECK: encoding: [0x64,0x14,0x00,0x00]
-# CHECK: encoding: [0x64,0x15,0x00,0x00]
-# CHECK: encoding: [0x64,0x16,0x00,0x00]
-# CHECK: encoding: [0x64,0x17,0x00,0x00]
-# CHECK: encoding: [0x64,0x18,0x00,0x00]
-# CHECK: encoding: [0x64,0x19,0x00,0x00]
-# CHECK: encoding: [0x64,0x1a,0x00,0x00]
-# CHECK: encoding: [0x64,0x1b,0x00,0x00]
-# CHECK: encoding: [0x64,0x1c,0x00,0x00]
-# CHECK: encoding: [0x64,0x1d,0x00,0x00]
-# CHECK: encoding: [0x64,0x1e,0x00,0x00]
-# CHECK: encoding: [0x64,0x1f,0x00,0x00]
-daddiu $zero, $zero, 0
-daddiu $at, $zero, 0
-daddiu $v0, $zero, 0
-daddiu $v1, $zero, 0
-daddiu $a0, $zero, 0
-daddiu $a1, $zero, 0
-daddiu $a2, $zero, 0
-daddiu $a3, $zero, 0
-daddiu $a4, $zero, 0
-daddiu $a5, $zero, 0
-daddiu $a6, $zero, 0
-daddiu $a7, $zero, 0
-daddiu $t4, $zero, 0
-daddiu $t5, $zero, 0
-daddiu $t6, $zero, 0
-daddiu $t7, $zero, 0
-daddiu $s0, $zero, 0
-daddiu $s1, $zero, 0
-daddiu $s2, $zero, 0
-daddiu $s3, $zero, 0
-daddiu $s4, $zero, 0
-daddiu $s5, $zero, 0
-daddiu $s6, $zero, 0
-daddiu $s7, $zero, 0
-daddiu $t8, $zero, 0
-daddiu $t9, $zero, 0
-daddiu $kt0, $zero, 0
-daddiu $kt1, $zero, 0
-daddiu $gp, $zero, 0
-daddiu $sp, $zero, 0
-daddiu $s8, $zero, 0
-daddiu $ra, $zero, 0
diff --git a/test/MC/Mips/mips64/abiflags.s b/test/MC/Mips/mips64/abiflags.s
new file mode 100644
index 000000000000..ecaffcc3465c
--- /dev/null
+++ b/test/MC/Mips/mips64/abiflags.s
@@ -0,0 +1,36 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips64 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips64 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# CHECK-ASM: .module fp=64
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: 00004001 02020001 00000000 00000000 |..@.............|
+# CHECK-OBJ: 0010: 00000001 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+ .module fp=64
+
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/mips64/invalid-mips64r2-xfail.s b/test/MC/Mips/mips64/invalid-mips64r2-xfail.s
new file mode 100644
index 000000000000..b2b612d8e5ea
--- /dev/null
+++ b/test/MC/Mips/mips64/invalid-mips64r2-xfail.s
@@ -0,0 +1,11 @@
+# Instructions that are supposed to be invalid but currently aren't
+# This test will XPASS if any insn stops assembling.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips4 \
+# RUN: 2> %t1
+# RUN: not FileCheck %s < %t1
+# XFAIL: *
+
+# CHECK-NOT: error
+ .set noat
+ rdhwr $sp,$11
diff --git a/test/MC/Mips/mips64/invalid-mips64r2.s b/test/MC/Mips/mips64/invalid-mips64r2.s
new file mode 100644
index 000000000000..1a5abb645596
--- /dev/null
+++ b/test/MC/Mips/mips64/invalid-mips64r2.s
@@ -0,0 +1,29 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ di $s8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotr $1,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotr $1,$14,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotr32 $1,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotr32 $1,$14,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ drotrv $1,$14,$15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsbh $v1,$14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dshd $v0,$sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ei $14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ madd.s $f1,$f31,$f19,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhc1 $s8,$f24 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ msub.s $f12,$f19,$f10,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthc1 $zero,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmadd.s $f0,$f5,$f25,$f12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ nmsub.s $f1,$f24,$f19,$f4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ pause # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotr $1,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotr $1,$14,15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ rotrv $1,$14,$15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seb $25,$15 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ seh $v1,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ wsbh $k1,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips64/valid-xfail.s b/test/MC/Mips/mips64/valid-xfail.s
new file mode 100644
index 000000000000..e5455f5d03df
--- /dev/null
+++ b/test/MC/Mips/mips64/valid-xfail.s
@@ -0,0 +1,94 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ abs.ps $f22,$f8
+ add.ps $f25,$f27,$f13
+ alnv.ob $v22,$v19,$v30,$v1
+ alnv.ob $v31,$v23,$v30,$at
+ alnv.ob $v8,$v17,$v30,$a1
+ alnv.ps $f12,$f18,$f30,$12
+ c.eq.d $fcc1,$f15,$f15
+ c.eq.ps $fcc5,$f0,$f9
+ c.eq.s $fcc5,$f24,$f17
+ c.f.d $fcc4,$f11,$f21
+ c.f.ps $fcc6,$f11,$f11
+ c.f.s $fcc4,$f30,$f7
+ c.le.d $fcc4,$f18,$f1
+ c.le.ps $fcc1,$f7,$f20
+ c.le.s $fcc6,$f24,$f4
+ c.lt.d $fcc3,$f9,$f3
+ c.lt.ps $f19,$f5
+ c.lt.s $fcc2,$f17,$f14
+ c.nge.d $fcc5,$f21,$f16
+ c.nge.ps $f1,$f26
+ c.nge.s $fcc3,$f11,$f8
+ c.ngl.ps $f21,$f30
+ c.ngl.s $fcc2,$f31,$f23
+ c.ngle.ps $fcc7,$f12,$f20
+ c.ngle.s $fcc2,$f18,$f23
+ c.ngt.d $fcc4,$f24,$f7
+ c.ngt.ps $fcc5,$f30,$f6
+ c.ngt.s $fcc5,$f8,$f13
+ c.ole.d $fcc2,$f16,$f31
+ c.ole.ps $fcc7,$f21,$f8
+ c.ole.s $fcc3,$f7,$f20
+ c.olt.d $fcc4,$f19,$f28
+ c.olt.ps $fcc3,$f7,$f16
+ c.olt.s $fcc6,$f20,$f7
+ c.seq.d $fcc4,$f31,$f7
+ c.seq.ps $fcc6,$f31,$f14
+ c.seq.s $fcc7,$f1,$f25
+ c.sf.ps $fcc6,$f4,$f6
+ c.ueq.d $fcc4,$f13,$f25
+ c.ueq.ps $fcc1,$f5,$f29
+ c.ueq.s $fcc6,$f3,$f30
+ c.ule.d $fcc7,$f25,$f18
+ c.ule.ps $fcc6,$f17,$f3
+ c.ule.s $fcc7,$f21,$f30
+ c.ult.d $fcc6,$f6,$f17
+ c.ult.ps $fcc7,$f14,$f0
+ c.ult.s $fcc7,$f24,$f10
+ c.un.d $fcc6,$f23,$f24
+ c.un.ps $fcc4,$f2,$f26
+ c.un.s $fcc1,$f30,$f4
+ cvt.ps.s $f3,$f18,$f19
+ cvt.s.pl $f30,$f1
+ cvt.s.pu $f14,$f25
+ dmfc0 $10,c0_watchhi,2
+ dmtc0 $15,c0_datalo
+ madd.d $f18,$f19,$f26,$f20
+ madd.ps $f22,$f3,$f14,$f3
+ madd.s $f1,$f31,$f19,$f25
+ mov.ps $f22,$f17
+ movf.ps $f10,$f28,$fcc6
+ movn.ps $f31,$f31,$s3
+ movt.ps $f20,$f25,$fcc2
+ movz.ps $f18,$f17,$ra
+ msgn.qh $v0,$v24,$v20
+ msgn.qh $v12,$v21,$v0[1]
+ msub.d $f10,$f1,$f31,$f18
+ msub.ps $f12,$f14,$f29,$f17
+ msub.s $f12,$f19,$f10,$f16
+ mul.ps $f14,$f0,$f16
+ neg.ps $f19,$f13
+ nmadd.d $f18,$f9,$f14,$f19
+ nmadd.ps $f27,$f4,$f9,$f25
+ nmadd.s $f0,$f5,$f25,$f12
+ nmsub.d $f30,$f8,$f16,$f30
+ nmsub.ps $f6,$f12,$f14,$f17
+ nmsub.s $f1,$f24,$f19,$f4
+ pll.ps $f25,$f9,$f30
+ plu.ps $f1,$f26,$f29
+ pul.ps $f9,$f30,$f26
+ puu.ps $f24,$f9,$f2
+ recip.d $f19,$f6
+ recip.s $f3,$f30
+ rsqrt.d $f3,$f28
+ rsqrt.s $f4,$f8
+ sub.ps $f5,$f14,$f26
diff --git a/test/MC/Mips/mips64/valid.s b/test/MC/Mips/mips64/valid.s
new file mode 100644
index 000000000000..1bd057d757df
--- /dev/null
+++ b/test/MC/Mips/mips64/valid.s
@@ -0,0 +1,235 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f $fcc1, 4 # CHECK: bc1f $fcc1, 4 # encoding: [0x45,0x04,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t $fcc1, 4 # CHECK: bc1t $fcc1, 4 # encoding: [0x45,0x05,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0xbc,0xa1,0x00,0x08]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ ceil.l.d $f1,$f3
+ ceil.l.s $f18,$f13
+ ceil.w.d $f11,$f25
+ ceil.w.s $f6,$f20
+ cfc1 $s1,$21
+ clo $11,$a1 # CHECK: clo $11, $5 # encoding: [0x70,0xab,0x58,0x21]
+ clz $sp,$gp # CHECK: clz $sp, $gp # encoding: [0x73,0x9d,0xe8,0x20]
+ ctc1 $a2,$26
+ cvt.d.l $f4,$f16
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.l.d $f24,$f15
+ cvt.l.s $f11,$f29
+ cvt.s.d $f26,$f8
+ cvt.s.l $f15,$f30
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ dadd $s3,$at,$ra
+ dadd $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ dadd $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddi $sp,$s4,-27705
+ daddi $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ daddi $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddiu $k0,$s6,-4586
+ daddu $s3,$at,$ra
+ dclo $s2,$a2 # CHECK: dclo $18, $6 # encoding: [0x70,0xd2,0x90,0x25]
+ dclz $s0,$25 # CHECK: dclz $16, $25 # encoding: [0x73,0x30,0x80,0x24]
+ deret
+ ddiv $zero,$k0,$s3
+ ddivu $zero,$s0,$s1
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ dmfc1 $12,$f13
+ dmtc1 $s0,$f14
+ dmult $s7,$9
+ dmultu $a1,$a2
+ dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
+ dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
+ dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsra $gp,10 # CHECK: dsra $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbb]
+ dsra $gp,$s2,10 # CHECK: dsra $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbb]
+ dsra $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsra32 $gp,10 # CHECK: dsra32 $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbf]
+ dsra32 $gp,$s2,10 # CHECK: dsra32 $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbf]
+ dsrav $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
+ dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
+ dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
+ dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
+ dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsub $a3,$s6,$8
+ dsub $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsub $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubi $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsubi $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubu $a1,$a1,$k0
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ eret
+ floor.l.d $f26,$f7
+ floor.l.s $f12,$f5
+ floor.w.d $f14,$f11
+ floor.w.s $f8,$f9
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ ld $sp,-28645($s1)
+ ldc1 $f11,16391($s0)
+ ldc2 $8,-21181($at) # CHECK: ldc2 $8, -21181($1) # encoding: [0xd8,0x28,0xad,0x43]
+ ldl $24,-4167($24)
+ ldr $14,-30358($s4)
+ ldxc1 $f8,$s7($15)
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ ll $v0,-7321($s2) # CHECK: ll $2, -7321($18) # encoding: [0xc2,0x42,0xe3,0x67]
+ lld $zero,-14736($ra) # CHECK: lld $zero, -14736($ra) # encoding: [0xd3,0xe0,0xc6,0x70]
+ luxc1 $f19,$s6($s5)
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ lwu $s3,-24086($v1)
+ lwxc1 $f12,$s1($s8)
+ madd $s6,$13
+ madd $zero,$9
+ maddu $s3,$gp
+ maddu $24,$s2
+ mfc0 $a2,$14,1
+ mfc1 $a3,$f27
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $a0,$a3
+ move $s5,$a0
+ move $s8,$a0
+ move $25,$a2
+ movf $gp,$8,$fcc7
+ movf.d $f6,$f11,$fcc5
+ movf.s $f23,$f5,$fcc6
+ movn $v1,$s1,$s0
+ movn.d $f27,$f21,$k0
+ movn.s $f12,$f0,$s7
+ movt $zero,$s4,$fcc5
+ movt.d $f0,$f2,$fcc0
+ movt.s $f30,$f2,$fcc1
+ movz $a1,$s6,$9
+ movz.d $f12,$f29,$9
+ movz.s $f25,$f7,$v1
+ msub $s7,$k1
+ msubu $15,$a1
+ mtc0 $9,$29,3
+ mtc1 $s8,$f9
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul $s0,$s4,$at
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ pref 1, 8($5) # CHECK: pref 1, 8($5) # encoding: [0xcc,0xa1,0x00,0x08]
+ round.l.d $f12,$f1
+ round.l.s $f25,$f5
+ round.w.d $f6,$f4
+ round.w.s $f27,$f28
+ sb $s6,-19857($14)
+ sc $15,18904($s3) # CHECK: sc $15, 18904($19) # encoding: [0xe2,0x6f,0x49,0xd8]
+ scd $15,-8243($sp) # CHECK: scd $15, -8243($sp) # encoding: [0xf3,0xaf,0xdf,0xcd]
+ sdbbp # CHECK: sdbbp # encoding: [0x70,0x00,0x00,0x3f]
+ sdbbp 34 # CHECK: sdbbp 34 # encoding: [0x70,0x00,0x08,0xbf]
+ sd $12,5835($10)
+ sdc1 $f31,30574($13)
+ sdc2 $20,23157($s2) # CHECK: sdc2 $20, 23157($18) # encoding: [0xfa,0x54,0x5a,0x75]
+ sdl $a3,-20961($s8)
+ sdr $11,-20423($12)
+ sdxc1 $f11,$10($14)
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sqrt.d $f17,$f22
+ sqrt.s $f0,$f1
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ suxc1 $f12,$k1($13)
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ swxc1 $f19,$12($k0)
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ sync 1 # CHECK: sync 1 # encoding: [0x00,0x00,0x00,0x4f]
+ teqi $s5,-17504
+ tgei $s1,5025
+ tgeiu $sp,-28621
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ tlti $14,-21059
+ tltiu $ra,-5076
+ tnei $12,-29647
+ trunc.l.d $f23,$f23
+ trunc.l.s $f28,$f31
+ trunc.w.d $f22,$f15
+ trunc.w.s $f28,$f30
+ xor $s2,$a0,$s8
diff --git a/test/MC/Mips/mips64eb-fixups.s b/test/MC/Mips/mips64eb-fixups.s
new file mode 100644
index 000000000000..1ca8b0ae5294
--- /dev/null
+++ b/test/MC/Mips/mips64eb-fixups.s
@@ -0,0 +1,43 @@
+# RUN: llvm-mc -filetype=obj -triple mips64eb-unknown-freebsd %s -o - | llvm-readobj -s -sd | FileCheck %s
+
+ .section .fixups,"",@progbits
+ .byte 0xff
+$diff0 = ($loc1)-($loc0)
+ .2byte ($diff0)
+
+ .byte 0xff
+$diff1 = ($loc2)-($loc0)
+ .4byte ($diff1)
+
+ .byte 0xff
+$diff2 = ($loc3)-($loc0)
+ .8byte ($diff2)
+ .byte 0xff
+
+$loc0:
+ .byte 0xee
+$loc1:
+ .byte 0xdd
+$loc2:
+ .byte 0xcc
+$loc3:
+
+# CHECK: AddressSize: 64bit
+# CHECK: Section {
+# CHECK: Name: .fixups (12)
+# CHECK-NEXT: Type: SHT_PROGBITS (0x1)
+# CHECK-NEXT: Flags [ (0x0)
+# CHECK-NEXT: ]
+# CHECK-NEXT: Address: 0x0
+# CHECK-NEXT: Offset: 0x40
+# CHECK-NEXT: Size: 21
+# CHECK-NEXT: Link: 0
+# CHECK-NEXT: Info: 0
+# CHECK-NEXT: AddressAlignment: 1
+# CHECK-NEXT: EntrySize: 0
+# CHECK-NEXT: SectionData (
+# CHECK-NEXT: 0000: FF0001FF 00000002 FF000000 00000000 |................|
+# CHECK-NEXT: 0010: 03FFEEDD CC |.....|
+# CHECK-NEXT: )
+# CHECK-NEXT: }
+# CHECK: ]
diff --git a/test/MC/Mips/mips64r2/abi-bad.s b/test/MC/Mips/mips64r2/abi-bad.s
new file mode 100644
index 000000000000..31d13abc909f
--- /dev/null
+++ b/test/MC/Mips/mips64r2/abi-bad.s
@@ -0,0 +1,9 @@
+# RUN: not llvm-mc %s -triple mips-unknown-unknown -mcpu=mips64r2 2>&1 | FileCheck %s
+# CHECK: .text
+
+
+
+ .set fp=xx
+# CHECK : error: 'set fp=xx'option requires O32 ABI
+# CHECK : .set fp=xx
+# CHECK : ^
diff --git a/test/MC/Mips/mips64r2/abiflags.s b/test/MC/Mips/mips64r2/abiflags.s
new file mode 100644
index 000000000000..dc4a1e946e3e
--- /dev/null
+++ b/test/MC/Mips/mips64r2/abiflags.s
@@ -0,0 +1,36 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips64r2 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips64r2 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# CHECK-ASM: .module fp=64
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: 00004002 02020001 00000000 00000000 |..@.............|
+# CHECK-OBJ: 0010: 00000001 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+ .module fp=64
+
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/mips64r2/invalid.s b/test/MC/Mips/mips64r2/invalid.s
new file mode 100644
index 000000000000..f53cfff40438
--- /dev/null
+++ b/test/MC/Mips/mips64r2/invalid.s
@@ -0,0 +1,10 @@
+# Instructions that are valid for the current ISA but should be rejected by the assembler (e.g.
+# invalid set of operands or operand's restrictions not met).
+
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -mcpu=mips64r2 2>%t1
+# RUN: FileCheck %s < %t1 -check-prefix=ASM
+
+ .text
+ .set noreorder
+ jalr.hb $31 # ASM: :[[@LINE]]:9: error: source and destination must be different
+ jalr.hb $31, $31 # ASM: :[[@LINE]]:9: error: source and destination must be different
diff --git a/test/MC/Mips/mips64r2/valid-xfail.s b/test/MC/Mips/mips64r2/valid-xfail.s
new file mode 100644
index 000000000000..9ac47f631546
--- /dev/null
+++ b/test/MC/Mips/mips64r2/valid-xfail.s
@@ -0,0 +1,311 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r2 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ abs.ps $f22,$f8
+ absq_s.ph $8,$a0
+ absq_s.qb $15,$s1
+ absq_s.w $s3,$ra
+ add.ps $f25,$f27,$f13
+ addq.ph $s1,$15,$at
+ addq_s.ph $s3,$s6,$s2
+ addq_s.w $a2,$8,$at
+ addqh.ph $s4,$14,$s1
+ addqh.w $s7,$s7,$k1
+ addqh_r.ph $sp,$25,$s8
+ addqh_r.w $8,$v1,$zero
+ addsc $s8,$15,$12
+ addu.ph $a2,$14,$s3
+ addu.qb $s6,$v1,$v1
+ addu_s.ph $a3,$s3,$gp
+ addu_s.qb $s4,$s8,$s1
+ adduh.qb $a1,$a1,$at
+ adduh_r.qb $a0,$9,$12
+ addwc $k0,$s6,$s7
+ alnv.ob $v22,$v19,$v30,$v1
+ alnv.ob $v31,$v23,$v30,$at
+ alnv.ob $v8,$v17,$v30,$a1
+ alnv.ps $f12,$f18,$f30,$12
+ and.v $w10,$w25,$w29
+ bitrev $14,$at
+ bmnz.v $w15,$w2,$w28
+ bmz.v $w13,$w11,$w21
+ bsel.v $w28,$w7,$w0
+ c.eq.d $fcc1,$f15,$f15
+ c.eq.ps $fcc5,$f0,$f9
+ c.eq.s $fcc5,$f24,$f17
+ c.f.d $fcc4,$f11,$f21
+ c.f.ps $fcc6,$f11,$f11
+ c.f.s $fcc4,$f30,$f7
+ c.le.d $fcc4,$f18,$f1
+ c.le.ps $fcc1,$f7,$f20
+ c.le.s $fcc6,$f24,$f4
+ c.lt.d $fcc3,$f9,$f3
+ c.lt.ps $f19,$f5
+ c.lt.s $fcc2,$f17,$f14
+ c.nge.d $fcc5,$f21,$f16
+ c.nge.ps $f1,$f26
+ c.nge.s $fcc3,$f11,$f8
+ c.ngl.ps $f21,$f30
+ c.ngl.s $fcc2,$f31,$f23
+ c.ngle.ps $fcc7,$f12,$f20
+ c.ngle.s $fcc2,$f18,$f23
+ c.ngt.d $fcc4,$f24,$f7
+ c.ngt.ps $fcc5,$f30,$f6
+ c.ngt.s $fcc5,$f8,$f13
+ c.ole.d $fcc2,$f16,$f31
+ c.ole.ps $fcc7,$f21,$f8
+ c.ole.s $fcc3,$f7,$f20
+ c.olt.d $fcc4,$f19,$f28
+ c.olt.ps $fcc3,$f7,$f16
+ c.olt.s $fcc6,$f20,$f7
+ c.seq.d $fcc4,$f31,$f7
+ c.seq.ps $fcc6,$f31,$f14
+ c.seq.s $fcc7,$f1,$f25
+ c.sf.ps $fcc6,$f4,$f6
+ c.ueq.d $fcc4,$f13,$f25
+ c.ueq.ps $fcc1,$f5,$f29
+ c.ueq.s $fcc6,$f3,$f30
+ c.ule.d $fcc7,$f25,$f18
+ c.ule.ps $fcc6,$f17,$f3
+ c.ule.s $fcc7,$f21,$f30
+ c.ult.d $fcc6,$f6,$f17
+ c.ult.ps $fcc7,$f14,$f0
+ c.ult.s $fcc7,$f24,$f10
+ c.un.d $fcc6,$f23,$f24
+ c.un.ps $fcc4,$f2,$f26
+ c.un.s $fcc1,$f30,$f4
+ cvt.ps.s $f3,$f18,$f19
+ cmp.eq.ph $s7,$14
+ cmp.le.ph $8,$14
+ cmp.lt.ph $k0,$sp
+ cmpgdu.eq.qb $s3,$zero,$k0
+ cmpgdu.le.qb $v1,$15,$s2
+ cmpgdu.lt.qb $s0,$gp,$sp
+ cmpgu.eq.qb $14,$s6,$s8
+ cmpgu.le.qb $9,$a3,$s4
+ cmpgu.lt.qb $sp,$at,$8
+ cmpu.eq.qb $v0,$24
+ cmpu.le.qb $s1,$a1
+ cmpu.lt.qb $at,$a3
+ cvt.s.pl $f30,$f1
+ cvt.s.pu $f14,$f25
+ dmfc0 $10,c0_watchhi,2
+ dmfgc0 $gp,c0_perfcnt,6
+ dmt $k0
+ dmtc0 $15,c0_datalo
+ dmtgc0 $a2,c0_watchlo,2
+ dpa.w.ph $ac1,$s7,$k0
+ dpaq_s.w.ph $ac2,$a0,$13
+ dpaq_sa.l.w $ac0,$a2,$14
+ dpaqx_s.w.ph $ac3,$a0,$24
+ dpaqx_sa.w.ph $ac1,$zero,$s5
+ dpau.h.qbl $ac1,$10,$24
+ dpau.h.qbr $ac1,$s7,$s6
+ dpax.w.ph $ac3,$a0,$k0
+ dps.w.ph $ac1,$a3,$a1
+ dpsq_s.w.ph $ac0,$gp,$k0
+ dpsq_sa.l.w $ac0,$a3,$15
+ dpsqx_s.w.ph $ac3,$13,$a3
+ dpsqx_sa.w.ph $ac3,$sp,$s2
+ dpsu.h.qbl $ac2,$14,$10
+ dpsu.h.qbr $ac2,$a1,$s6
+ dpsx.w.ph $ac0,$s7,$gp
+ drorv $at,$a1,$s7
+ dvpe $s6
+ emt $8
+ evpe $v0
+ extpdpv $s6,$ac0,$s8
+ extpv $13,$ac0,$14
+ extrv.w $8,$ac3,$at
+ extrv_r.w $8,$ac1,$s6
+ extrv_rs.w $gp,$ac1,$s6
+ extrv_s.h $s2,$ac1,$14
+ fclass.d $w14,$w27
+ fclass.w $w19,$w28
+ fexupl.d $w10,$w29
+ fexupl.w $w12,$w27
+ fexupr.d $w31,$w15
+ fexupr.w $w29,$w12
+ ffint_s.d $w1,$w30
+ ffint_s.w $w16,$w14
+ ffint_u.d $w23,$w18
+ ffint_u.w $w19,$w12
+ ffql.d $w2,$w3
+ ffql.w $w9,$w0
+ ffqr.d $w25,$w24
+ ffqr.w $w10,$w6
+ fill.b $w9,$v1
+ fill.d $w28,$8
+ fill.h $w9,$8
+ fill.w $w31,$15
+ flog2.d $w12,$w16
+ flog2.w $w19,$w23
+ fork $s2,$8,$a0
+ frcp.d $w12,$w4
+ frcp.w $w30,$w8
+ frint.d $w20,$w8
+ frint.w $w11,$w29
+ frsqrt.d $w29,$w2
+ frsqrt.w $w9,$w8
+ fsqrt.d $w3,$w1
+ fsqrt.w $w5,$w15
+ ftint_s.d $w31,$w26
+ ftint_s.w $w27,$w14
+ ftint_u.d $w5,$w31
+ ftint_u.w $w12,$w29
+ ftrunc_s.d $w4,$w22
+ ftrunc_s.w $w24,$w7
+ ftrunc_u.d $w20,$w25
+ ftrunc_u.w $w7,$w26
+ insv $s2,$at
+ iret
+ lbe $14,122($9)
+ lbue $11,-108($10)
+ lbux $9,$14($v0)
+ lhe $s6,219($v1)
+ lhue $gp,118($11)
+ lhx $sp,$k0($15)
+ lle $gp,-237($ra)
+ lwe $ra,-145($14)
+ lwle $11,-42($11)
+ lwre $sp,-152($24)
+ lwx $12,$12($s4)
+ madd.d $f18,$f19,$f26,$f20
+ madd.ps $f22,$f3,$f14,$f3
+ maq_s.w.phl $ac2,$25,$11
+ maq_s.w.phr $ac0,$10,$25
+ maq_sa.w.phl $ac3,$a1,$v1
+ maq_sa.w.phr $ac1,$at,$10
+ mfgc0 $s6,c0_datahi1
+ mflo $9,$ac2
+ modsub $a3,$12,$a3
+ mov.ps $f22,$f17
+ movf.ps $f10,$f28,$fcc6
+ movn.ps $f31,$f31,$s3
+ movt.ps $f20,$f25,$fcc2
+ movz.ps $f18,$f17,$ra
+ msgn.qh $v0,$v24,$v20
+ msgn.qh $v12,$v21,$v0[1]
+ msub $ac2,$sp,$14
+ msub.d $f10,$f1,$f31,$f18
+ msub.ps $f12,$f14,$f29,$f17
+ msubu $ac2,$a1,$24
+ mtc0 $9,c0_datahi1
+ mtgc0 $s4,$21,7
+ mthi $v0,$ac1
+ mthlip $a3,$ac0
+ mul.ph $s4,$24,$s0
+ mul.ps $f14,$f0,$f16
+ mul_s.ph $10,$14,$15
+ muleq_s.w.phl $11,$s4,$s4
+ muleq_s.w.phr $s6,$a0,$s8
+ muleu_s.ph.qbl $a2,$14,$8
+ muleu_s.ph.qbr $a1,$ra,$9
+ mulq_rs.ph $s2,$14,$15
+ mulq_rs.w $at,$s4,$25
+ mulq_s.ph $s0,$k1,$15
+ mulq_s.w $9,$a3,$s0
+ mulsa.w.ph $ac1,$s4,$s6
+ mulsaq_s.w.ph $ac0,$ra,$s2
+ neg.ps $f19,$f13
+ nloc.b $w12,$w30
+ nloc.d $w16,$w7
+ nloc.h $w21,$w17
+ nloc.w $w17,$w16
+ nlzc.b $w12,$w7
+ nlzc.d $w14,$w14
+ nlzc.h $w24,$w24
+ nlzc.w $w10,$w4
+ nmadd.d $f18,$f9,$f14,$f19
+ nmadd.ps $f27,$f4,$f9,$f25
+ nmsub.d $f30,$f8,$f16,$f30
+ nmsub.ps $f6,$f12,$f14,$f17
+ nor.v $w20,$w20,$w15
+ or.v $w13,$w23,$w12
+ packrl.ph $ra,$24,$14
+ pcnt.b $w30,$w15
+ pcnt.d $w5,$w16
+ pcnt.h $w20,$w24
+ pcnt.w $w22,$w20
+ pick.ph $ra,$a2,$gp
+ pick.qb $11,$a0,$gp
+ pll.ps $f25,$f9,$f30
+ plu.ps $f1,$f26,$f29
+ preceq.w.phl $s8,$gp
+ preceq.w.phr $s5,$15
+ precequ.ph.qbl $s7,$ra
+ precequ.ph.qbla $a0,$9
+ precequ.ph.qbr $ra,$s3
+ precequ.ph.qbra $24,$8
+ preceu.ph.qbl $sp,$8
+ preceu.ph.qbla $s6,$11
+ preceu.ph.qbr $gp,$s1
+ preceu.ph.qbra $k1,$s0
+ precr.qb.ph $v0,$12,$s8
+ precrq.ph.w $14,$s8,$24
+ precrq.qb.ph $a2,$12,$12
+ precrq_rs.ph.w $a1,$k0,$a3
+ precrqu_s.qb.ph $zero,$gp,$s5
+ pul.ps $f9,$f30,$f26
+ puu.ps $f24,$f9,$f2
+ raddu.w.qb $25,$s3
+ rdpgpr $s3,$9
+ recip.d $f19,$f6
+ recip.s $f3,$f30
+ repl.ph $at,-307
+ replv.ph $v1,$s7
+ replv.qb $25,$12
+ rorv $13,$a3,$s5
+ rsqrt.d $f3,$f28
+ rsqrt.s $f4,$f8
+ sbe $s7,33($s1)
+ sce $sp,189($10)
+ she $24,105($v0)
+ shilo $ac1,26
+ shilov $ac2,$10
+ shllv.ph $10,$s0,$s0
+ shllv.qb $gp,$v1,$zero
+ shllv_s.ph $k1,$at,$13
+ shllv_s.w $s1,$ra,$k0
+ shrav.ph $25,$s2,$s1
+ shrav.qb $zero,$24,$11
+ shrav_r.ph $s3,$11,$25
+ shrav_r.qb $a0,$sp,$s5
+ shrav_r.w $s7,$s4,$s6
+ shrlv.ph $14,$10,$9
+ shrlv.qb $a2,$s2,$11
+ sub.ps $f5,$f14,$f26
+ subq.ph $ra,$9,$s8
+ subq_s.ph $13,$s8,$s5
+ subq_s.w $k1,$a2,$a3
+ subqh.ph $10,$at,$9
+ subqh.w $v0,$a2,$zero
+ subqh_r.ph $a0,$12,$s6
+ subqh_r.w $10,$a2,$gp
+ subu.ph $9,$s6,$s4
+ subu.qb $s6,$a2,$s6
+ subu_s.ph $v1,$a1,$s3
+ subu_s.qb $s1,$at,$ra
+ subuh.qb $zero,$gp,$gp
+ subuh_r.qb $s4,$s8,$s6
+ swe $24,94($k0)
+ swle $v1,-209($gp)
+ swre $k0,-202($s2)
+ synci 20023($s0)
+ tlbginv
+ tlbginvf
+ tlbgp
+ tlbgr
+ tlbgwi
+ tlbgwr
+ tlbinv
+ tlbinvf
+ wrpgpr $zero,$13
+ xor.v $w20,$w21,$w30
+ yield $v1,$s0
diff --git a/test/MC/Mips/mips64r2/valid.s b/test/MC/Mips/mips64r2/valid.s
new file mode 100644
index 000000000000..7a2244a66420
--- /dev/null
+++ b/test/MC/Mips/mips64r2/valid.s
@@ -0,0 +1,263 @@
+# Instructions that are valid
+#
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r2 | FileCheck %s
+
+ .set noat
+ abs.d $f7,$f25 # CHECK: encoding:
+ abs.s $f9,$f16
+ add $s7,$s2,$a1
+ add.d $f1,$f7,$f29
+ add.s $f8,$f21,$f24
+ addi $13,$9,26322
+ addi $8,$8,~1 # CHECK: addi $8, $8, -2 # encoding: [0x21,0x08,0xff,0xfe]
+ addu $9,$a0,$a2
+ and $s7,$v0,$12
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ bc1f $fcc0, 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1f $fcc1, 4 # CHECK: bc1f $fcc1, 4 # encoding: [0x45,0x04,0x00,0x01]
+ bc1f 4 # CHECK: bc1f 4 # encoding: [0x45,0x00,0x00,0x01]
+ bc1t $fcc0, 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bc1t $fcc1, 4 # CHECK: bc1t $fcc1, 4 # encoding: [0x45,0x05,0x00,0x01]
+ bc1t 4 # CHECK: bc1t 4 # encoding: [0x45,0x01,0x00,0x01]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $0, 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ bgezal $6, 21100 # CHECK: bgezal $6, 21100 # encoding: [0x04,0xd1,0x14,0x9b]
+ bltzal $6, 21100 # CHECK: bltzal $6, 21100 # encoding: [0x04,0xd0,0x14,0x9b]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0xbc,0xa1,0x00,0x08]
+ c.ngl.d $f29,$f29
+ c.ngle.d $f0,$f16
+ c.sf.d $f30,$f0
+ c.sf.s $f14,$f22
+ ceil.l.d $f1,$f3
+ ceil.l.s $f18,$f13
+ ceil.w.d $f11,$f25
+ ceil.w.s $f6,$f20
+ cfc1 $s1,$21
+ clo $11,$a1 # CHECK: clo $11, $5 # encoding: [0x70,0xab,0x58,0x21]
+ clz $sp,$gp # CHECK: clz $sp, $gp # encoding: [0x73,0x9d,0xe8,0x20]
+ ctc1 $a2,$26
+ cvt.d.l $f4,$f16
+ cvt.d.s $f22,$f28
+ cvt.d.w $f26,$f11
+ cvt.l.d $f24,$f15
+ cvt.l.s $f11,$f29
+ cvt.s.d $f26,$f8
+ cvt.s.l $f15,$f30
+ cvt.s.w $f22,$f15
+ cvt.w.d $f20,$f14
+ cvt.w.s $f20,$f24
+ dadd $s3,$at,$ra
+ dadd $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ dadd $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddi $sp,$s4,-27705
+ daddi $sp,$s4,-27705 # CHECK: daddi $sp, $20, -27705 # encoding: [0x62,0x9d,0x93,0xc7]
+ daddi $sp,-27705 # CHECK: daddi $sp, $sp, -27705 # encoding: [0x63,0xbd,0x93,0xc7]
+ daddiu $k0,$s6,-4586
+ daddu $s3,$at,$ra
+ dclo $s2,$a2 # CHECK: dclo $18, $6 # encoding: [0x70,0xd2,0x90,0x25]
+ dclz $s0,$25 # CHECK: dclz $16, $25 # encoding: [0x73,0x30,0x80,0x24]
+ deret
+ di $s8
+ ddiv $zero,$k0,$s3
+ ddivu $zero,$s0,$s1
+ div $zero,$25,$11
+ div.d $f29,$f20,$f27
+ div.s $f4,$f5,$f15
+ divu $zero,$25,$15
+ dmfc1 $12,$f13
+ dmtc1 $s0,$f14
+ dmult $s7,$9
+ dmultu $a1,$a2
+ drotr $1,15 # CHECK: drotr $1, $1, 15 # encoding: [0x00,0x21,0x0b,0xfa]
+ drotr $1,$14,15 # CHECK: drotr $1, $14, 15 # encoding: [0x00,0x2e,0x0b,0xfa]
+ drotr32 $1,15 # CHECK: drotr32 $1, $1, 15 # encoding: [0x00,0x21,0x0b,0xfe]
+ drotr32 $1,$14,15 # CHECK: drotr32 $1, $14, 15 # encoding: [0x00,0x2e,0x0b,0xfe]
+ drotrv $1,$14,$15 # CHECK: drotrv $1, $14, $15 # encoding: [0x01,0xee,0x08,0x56]
+ dsbh $v1,$14
+ dshd $v0,$sp
+ dsll $zero,18 # CHECK: dsll $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xb8]
+ dsll $zero,$s4,18 # CHECK: dsll $zero, $20, 18 # encoding: [0x00,0x14,0x04,0xb8]
+ dsll $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsll32 $zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsll32 $zero,$zero,18 # CHECK: dsll32 $zero, $zero, 18 # encoding: [0x00,0x00,0x04,0xbc]
+ dsllv $zero,$s4,$12 # CHECK: dsllv $zero, $20, $12 # encoding: [0x01,0x94,0x00,0x14]
+ dsra $gp,10 # CHECK: dsra $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbb]
+ dsra $gp,$s2,10 # CHECK: dsra $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbb]
+ dsra $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsra32 $gp,10 # CHECK: dsra32 $gp, $gp, 10 # encoding: [0x00,0x1c,0xe2,0xbf]
+ dsra32 $gp,$s2,10 # CHECK: dsra32 $gp, $18, 10 # encoding: [0x00,0x12,0xe2,0xbf]
+ dsrav $gp,$s2,$s3 # CHECK: dsrav $gp, $18, $19 # encoding: [0x02,0x72,0xe0,0x17]
+ dsrl $s3,23 # CHECK: dsrl $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfa]
+ dsrl $s3,$6,23 # CHECK: dsrl $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfa]
+ dsrl $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsrl32 $s3,23 # CHECK: dsrl32 $19, $19, 23 # encoding: [0x00,0x13,0x9d,0xfe]
+ dsrl32 $s3,$6,23 # CHECK: dsrl32 $19, $6, 23 # encoding: [0x00,0x06,0x9d,0xfe]
+ dsrlv $s3,$6,$s4 # CHECK: dsrlv $19, $6, $20 # encoding: [0x02,0x86,0x98,0x16]
+ dsub $a3,$s6,$8
+ dsub $a3,$s6,$8
+ dsub $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsub $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubi $sp,$s4,-27705 # CHECK: daddi $sp, $20, 27705 # encoding: [0x62,0x9d,0x6c,0x39]
+ dsubi $sp,-27705 # CHECK: daddi $sp, $sp, 27705 # encoding: [0x63,0xbd,0x6c,0x39]
+ dsubu $a1,$a1,$k0
+ dsubu $a1,$a1,$k0
+ ehb # CHECK: ehb # encoding: [0x00,0x00,0x00,0xc0]
+ ei $14
+ eret
+ floor.l.d $f26,$f7
+ floor.l.s $f12,$f5
+ floor.w.d $f14,$f11
+ floor.w.s $f8,$f9
+ jr.hb $4 # CHECK: jr.hb $4 # encoding: [0x00,0x80,0x04,0x08]
+ jalr.hb $4 # CHECK: jalr.hb $4 # encoding: [0x00,0x80,0xfc,0x09]
+ jalr.hb $4, $5 # CHECK: jalr.hb $4, $5 # encoding: [0x00,0xa0,0x24,0x09]
+ lb $24,-14515($10)
+ lbu $8,30195($v1)
+ ld $sp,-28645($s1)
+ ldc1 $f11,16391($s0)
+ ldc2 $8,-21181($at) # CHECK: ldc2 $8, -21181($1) # encoding: [0xd8,0x28,0xad,0x43]
+ ldl $24,-4167($24)
+ ldr $14,-30358($s4)
+ ldxc1 $f8,$s7($15)
+ lh $11,-8556($s5)
+ lhu $s3,-22851($v0)
+ li $at,-29773
+ li $zero,-29889
+ ll $v0,-7321($s2) # CHECK: ll $2, -7321($18) # encoding: [0xc2,0x42,0xe3,0x67]
+ lld $zero,-14736($ra) # CHECK: lld $zero, -14736($ra) # encoding: [0xd3,0xe0,0xc6,0x70]
+ luxc1 $f19,$s6($s5)
+ lw $8,5674($a1)
+ lwc1 $f16,10225($k0)
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0xc8,0xd2,0xfc,0xb7]
+ lwl $s4,-4231($15)
+ lwr $zero,-19147($gp)
+ lwu $s3,-24086($v1)
+ lwxc1 $f12,$s1($s8)
+ madd $s6,$13
+ madd $zero,$9
+ madd.s $f1,$f31,$f19,$f25
+ maddu $s3,$gp
+ maddu $24,$s2
+ mfc0 $a2,$14,1
+ mfc1 $a3,$f27
+ mfhc1 $s8,$f24
+ mfhi $s3
+ mfhi $sp
+ mflo $s1
+ mov.d $f20,$f14
+ mov.s $f2,$f27
+ move $a0,$a3
+ move $s5,$a0
+ move $s8,$a0
+ move $25,$a2
+ movf $gp,$8,$fcc7
+ movf.d $f6,$f11,$fcc5
+ movf.s $f23,$f5,$fcc6
+ movn $v1,$s1,$s0
+ movn.d $f27,$f21,$k0
+ movn.s $f12,$f0,$s7
+ movt $zero,$s4,$fcc5
+ movt.d $f0,$f2,$fcc0
+ movt.s $f30,$f2,$fcc1
+ movz $a1,$s6,$9
+ movz.d $f12,$f29,$9
+ movz.s $f25,$f7,$v1
+ msub $s7,$k1
+ msub.s $f12,$f19,$f10,$f16
+ msubu $15,$a1
+ mtc0 $9,$29,3
+ mtc1 $s8,$f9
+ mthc1 $zero,$f16
+ mthi $s1
+ mtlo $sp
+ mtlo $25
+ mul $s0,$s4,$at
+ mul.d $f20,$f20,$f16
+ mul.s $f30,$f10,$f2
+ mult $sp,$s4
+ mult $sp,$v0
+ multu $gp,$k0
+ multu $9,$s2
+ negu $2 # CHECK: negu $2, $2 # encoding: [0x00,0x02,0x10,0x23]
+ negu $2,$3 # CHECK: negu $2, $3 # encoding: [0x00,0x03,0x10,0x23]
+ neg.d $f27,$f18
+ neg.s $f1,$f15
+ nmadd.s $f0,$f5,$f25,$f12
+ nmsub.s $f1,$f24,$f19,$f4
+ nop
+ nor $a3,$zero,$a3
+ or $12,$s0,$sp
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ pause # CHECK: pause # encoding: [0x00,0x00,0x01,0x40]
+ pref 1, 8($5) # CHECK: pref 1, 8($5) # encoding: [0xcc,0xa1,0x00,0x08]
+ rdhwr $sp,$11
+ rotr $1,15 # CHECK: rotr $1, $1, 15 # encoding: [0x00,0x21,0x0b,0xc2]
+ rotr $1,$14,15 # CHECK: rotr $1, $14, 15 # encoding: [0x00,0x2e,0x0b,0xc2]
+ rotrv $1,$14,$15 # CHECK: rotrv $1, $14, $15 # encoding: [0x01,0xee,0x08,0x46]
+ round.l.d $f12,$f1
+ round.l.s $f25,$f5
+ round.w.d $f6,$f4
+ round.w.s $f27,$f28
+ sb $s6,-19857($14)
+ sc $15,18904($s3) # CHECK: sc $15, 18904($19) # encoding: [0xe2,0x6f,0x49,0xd8]
+ scd $15,-8243($sp) # CHECK: scd $15, -8243($sp) # encoding: [0xf3,0xaf,0xdf,0xcd]
+ sdbbp # CHECK: sdbbp # encoding: [0x70,0x00,0x00,0x3f]
+ sdbbp 34 # CHECK: sdbbp 34 # encoding: [0x70,0x00,0x08,0xbf]
+ sd $12,5835($10)
+ sdc1 $f31,30574($13)
+ sdc2 $20,23157($s2) # CHECK: sdc2 $20, 23157($18) # encoding: [0xfa,0x54,0x5a,0x75]
+ sdl $a3,-20961($s8)
+ sdr $11,-20423($12)
+ sdxc1 $f11,$10($14)
+ seb $25,$15
+ seh $v1,$12
+ sh $14,-6704($15)
+ sll $a3,18 # CHECK: sll $7, $7, 18 # encoding: [0x00,0x07,0x3c,0x80]
+ sll $a3,$zero,18 # CHECK: sll $7, $zero, 18 # encoding: [0x00,0x00,0x3c,0x80]
+ sll $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ sllv $a3,$zero,$9 # CHECK: sllv $7, $zero, $9 # encoding: [0x01,0x20,0x38,0x04]
+ slt $s7,$11,$k1 # CHECK: slt $23, $11, $27 # encoding: [0x01,0x7b,0xb8,0x2a]
+ slti $s1,$10,9489 # CHECK: slti $17, $10, 9489 # encoding: [0x29,0x51,0x25,0x11]
+ sltiu $25,$25,-15531 # CHECK: sltiu $25, $25, -15531 # encoding: [0x2f,0x39,0xc3,0x55]
+ sltu $s4,$s5,$11 # CHECK: sltu $20, $21, $11 # encoding: [0x02,0xab,0xa0,0x2b]
+ sltu $24,$25,-15531 # CHECK: sltiu $24, $25, -15531 # encoding: [0x2f,0x38,0xc3,0x55]
+ sqrt.d $f17,$f22
+ sqrt.s $f0,$f1
+ sra $s1,15 # CHECK: sra $17, $17, 15 # encoding: [0x00,0x11,0x8b,0xc3]
+ sra $s1,$s7,15 # CHECK: sra $17, $23, 15 # encoding: [0x00,0x17,0x8b,0xc3]
+ sra $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srav $s1,$s7,$sp # CHECK: srav $17, $23, $sp # encoding: [0x03,0xb7,0x88,0x07]
+ srl $2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $2,$2,7 # CHECK: srl $2, $2, 7 # encoding: [0x00,0x02,0x11,0xc2]
+ srl $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ srlv $25,$s4,$a0 # CHECK: srlv $25, $20, $4 # encoding: [0x00,0x94,0xc8,0x06]
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sub $s6,$s3,$12
+ sub.d $f18,$f3,$f17
+ sub.s $f23,$f22,$f22
+ subu $sp,$s6,$s6
+ suxc1 $f12,$k1($13)
+ sw $ra,-10160($sp)
+ swc1 $f6,-8465($24)
+ swc2 $25,24880($s0) # CHECK: swc2 $25, 24880($16) # encoding: [0xea,0x19,0x61,0x30]
+ swl $15,13694($s3)
+ swr $s1,-26590($14)
+ swxc1 $f19,$12($k0)
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ sync 1 # CHECK: sync 1 # encoding: [0x00,0x00,0x00,0x4f]
+ teqi $s5,-17504
+ tgei $s1,5025
+ tgeiu $sp,-28621
+ tlbp # CHECK: tlbp # encoding: [0x42,0x00,0x00,0x08]
+ tlbr # CHECK: tlbr # encoding: [0x42,0x00,0x00,0x01]
+ tlbwi # CHECK: tlbwi # encoding: [0x42,0x00,0x00,0x02]
+ tlbwr # CHECK: tlbwr # encoding: [0x42,0x00,0x00,0x06]
+ tlti $14,-21059
+ tltiu $ra,-5076
+ tnei $12,-29647
+ trunc.l.d $f23,$f23
+ trunc.l.s $f28,$f31
+ trunc.w.d $f22,$f15
+ trunc.w.s $f28,$f30
+ xor $s2,$a0,$s8
+ wsbh $k1,$9
diff --git a/test/MC/Mips/mips64r6/invalid-mips1-wrong-error.s b/test/MC/Mips/mips64r6/invalid-mips1-wrong-error.s
new file mode 100644
index 000000000000..e914c899f66b
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips1-wrong-error.s
@@ -0,0 +1,17 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc2f 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2t 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ lwl $s4,-4231($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwr $zero,-19147($gp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ swl $15,13694($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ swr $s1,-26590($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwle $s4,-4231($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ lwre $zero,-19147($gp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ swle $15,13694($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ swre $s1,-26590($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips64r6/invalid-mips1.s b/test/MC/Mips/mips64r6/invalid-mips1.s
new file mode 100644
index 000000000000..6efd8f4cf601
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips1.s
@@ -0,0 +1,27 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ addi $13,$9,26322 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $0, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bltzal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ c.ngl.d $f29,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ c.ngle.d $f0,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ c.sf.d $f30,$f0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ c.sf.s $f14,$f22 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mflo $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthi $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$v0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $9,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $gp,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+# div has been re-encoded. See valid.s
+# divu has been re-encoded. See valid.s
diff --git a/test/MC/Mips/mips64r6/invalid-mips2.s b/test/MC/Mips/mips64r6/invalid-mips2.s
new file mode 100644
index 000000000000..8a5c50ca3582
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips2.s
@@ -0,0 +1,29 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ addi $13,$9,26322 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $0, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bltzal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mflo $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthi $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$v0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $9,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $gp,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ teqi $s5,-17504 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgei $s1,5025 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgeiu $sp,-28621 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tlti $14,-21059 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tltiu $ra,-5076 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tnei $12,-29647 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+# div has been re-encoded. See valid.s
+# divu has been re-encoded. See valid.s
diff --git a/test/MC/Mips/mips64r6/invalid-mips3-wrong-error.s b/test/MC/Mips/mips64r6/invalid-mips3-wrong-error.s
new file mode 100644
index 000000000000..7424f493bf56
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips3-wrong-error.s
@@ -0,0 +1,23 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ ldl $s4,-4231($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldr $zero,-19147($gp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdl $15,13694($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ sdr $s1,-26590($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ ldle $s4,-4231($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ ldre $zero,-19147($gp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ sdle $15,13694($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ sdre $s1,-26590($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ lwl $s4,-4231($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwr $zero,-19147($gp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ swl $15,13694($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ swr $s1,-26590($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: invalid operand for instruction
+ lwle $s4,-4231($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ lwre $zero,-19147($gp) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ swle $15,13694($s3) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ swre $s1,-26590($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips64r6/invalid-mips3.s b/test/MC/Mips/mips64r6/invalid-mips3.s
new file mode 100644
index 000000000000..322dabd97829
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips3.s
@@ -0,0 +1,33 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ addi $13,$9,26322 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $0, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bltzal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmult $s7,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmultu $a1,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mflo $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthi $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$v0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $9,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $gp,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ teqi $s5,-17504 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgei $s1,5025 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgeiu $sp,-28621 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tlti $14,-21059 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tltiu $ra,-5076 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tnei $12,-29647 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+# ddiv has been re-encoded. See valid.s
+# ddivu has been re-encoded. See valid.s
+# div has been re-encoded. See valid.s
+# divu has been re-encoded. See valid.s
diff --git a/test/MC/Mips/mips64r6/invalid-mips32-wrong-error.s b/test/MC/Mips/mips64r6/invalid-mips32-wrong-error.s
new file mode 100644
index 000000000000..cc85f1885635
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips32-wrong-error.s
@@ -0,0 +1,20 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bc1fl $fcc1,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1tl $fcc1,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2f $fcc0,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2f 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2fl $fcc1,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2t $fcc0,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2t 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2tl $fcc1,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips64r6/invalid-mips4-wrong-error.s b/test/MC/Mips/mips64r6/invalid-mips4-wrong-error.s
new file mode 100644
index 000000000000..f3131a947367
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips4-wrong-error.s
@@ -0,0 +1,21 @@
+# Instructions that are invalid and are correctly rejected but use the wrong
+# error message at the moment.
+#
+# RUN: not llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips32r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ beql $1,$2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgezall $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgezl $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bgtzl $4,16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ blezl $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bltzall $3,8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bltzl $4,16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bnel $1,$2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2tl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc2fl 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ prefx 0,$2($31) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips64r6/invalid-mips4.s b/test/MC/Mips/mips64r6/invalid-mips4.s
new file mode 100644
index 000000000000..706db27835ef
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips4.s
@@ -0,0 +1,14 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bgezal $0, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bltzal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ ldxc1 $f8,$s7($15) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ lwxc1 $f12,$s1($s8) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ sdxc1 $f11,$10($14) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ swxc1 $f19,$12($k0) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips64r6/invalid-mips5-wrong-error.s b/test/MC/Mips/mips64r6/invalid-mips5-wrong-error.s
new file mode 100644
index 000000000000..4fc94e26eb10
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips5-wrong-error.s
@@ -0,0 +1,48 @@
+# Instructions that are invalid but currently emit the wrong error message.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ abs.ps $f22,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ add.ps $f25,$f27,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ alnv.ps $f12,$f18,$f30,$12 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1any2f $fcc2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1any2t $fcc2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1any4f $fcc2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ bc1any4t $fcc2,4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.eq.ps $fcc5,$f0,$f9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.f.ps $fcc6,$f11,$f11 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.le.ps $fcc1,$f7,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.lt.ps $f19,$f5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.nge.ps $f1,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngl.ps $f21,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngle.ps $fcc7,$f12,$f20 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ngt.ps $fcc5,$f30,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ole.ps $fcc7,$f21,$f8 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.olt.ps $fcc3,$f7,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.seq.ps $fcc6,$f31,$f14 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.sf.ps $fcc6,$f4,$f6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ueq.ps $fcc1,$f5,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ule.ps $fcc6,$f17,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.ult.ps $fcc7,$f14,$f0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ c.un.ps $fcc4,$f2,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.ps.s $f3,$f18,$f19 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ cvt.ps.pw $f3,$f18 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ madd.ps $f22,$f3,$f14,$f3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mov.ps $f22,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movf.ps $f10,$f28,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movn.ps $f31,$f31,$s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movt.ps $f20,$f25,$fcc2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ movz.ps $f18,$f17,$ra # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ msub.ps $f12,$f14,$f29,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ mul.ps $f14,$f0,$f16 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ neg.ps $f19,$f13 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmadd.ps $f27,$f4,$f9,$f25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ nmsub.ps $f6,$f12,$f14,$f17 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pll.ps $f25,$f9,$f30 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ plu.ps $f1,$f26,$f29 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ pul.ps $f9,$f30,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ puu.ps $f24,$f9,$f2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
+ sub.ps $f5,$f14,$f26 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: Unknown instruction
diff --git a/test/MC/Mips/mips64r6/invalid-mips5.s b/test/MC/Mips/mips64r6/invalid-mips5.s
new file mode 100644
index 000000000000..e7fd99a6b05b
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips5.s
@@ -0,0 +1,12 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ bgezal $0, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bltzal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ luxc1 $f19,$s6($s5) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ suxc1 $f12,$k1($13) # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips64r6/invalid-mips64.s b/test/MC/Mips/mips64r6/invalid-mips64.s
new file mode 100644
index 000000000000..51e57083b7f9
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid-mips64.s
@@ -0,0 +1,54 @@
+# Instructions that are invalid
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: 2>%t1
+# RUN: FileCheck %s < %t1
+
+ .set noat
+ addi $13,$9,26322 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $0, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bgezal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ bltzal $6, 21100 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddi $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ daddi $sp,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dadd $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dadd $sp,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmult $s7,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dmultu $a1,$a2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsubi $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsubi $sp,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsub $sp,$s4,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ dsub $sp,-27705 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ jalx 4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $s3 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mfhi $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mflo $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf $gp,$8,$fcc7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.d $f6,$f11,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movf.s $f23,$f5,$fcc6 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn $v1,$s1,$s0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.d $f27,$f21,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movn.s $f12,$f0,$s7 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt $zero,$s4,$fcc5 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.d $f0,$f2,$fcc0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movt.s $f30,$f2,$fcc1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz $a1,$s6,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.d $f12,$f29,$9 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ movz.s $f25,$f7,$v1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mthi $s1 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $25 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mtlo $sp # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$s4 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ mult $sp,$v0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $9,$s2 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ multu $gp,$k0 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ teqi $s5,-17504 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgei $s1,5025 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tgeiu $sp,-28621 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tlti $14,-21059 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tltiu $ra,-5076 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+ tnei $12,-29647 # CHECK: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
+# ddiv has been re-encoded. See valid.s
+# ddivu has been re-encoded. See valid.s
+# div has been re-encoded. See valid.s
+# divu has been re-encoded. See valid.s
diff --git a/test/MC/Mips/mips64r6/invalid.s b/test/MC/Mips/mips64r6/invalid.s
new file mode 100644
index 000000000000..1b01827368a5
--- /dev/null
+++ b/test/MC/Mips/mips64r6/invalid.s
@@ -0,0 +1,12 @@
+# Instructions that are available for the current ISA but should be rejected by
+# the assembler (e.g. invalid set of operands or operand's restrictions not met).
+
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -mcpu=mips64r6 2>%t1
+# RUN: FileCheck %s < %t1 -check-prefix=ASM
+
+ .text
+ .set noreorder
+ .set noat
+ jalr.hb $31 # ASM: :[[@LINE]]:9: error: source and destination must be different
+ jalr.hb $31, $31 # ASM: :[[@LINE]]:9: error: source and destination must be different
+ ldc2 $8,-21181($at) # ASM: :[[@LINE]]:{{[0-9]+}}: error: instruction requires a CPU feature not currently enabled
diff --git a/test/MC/Mips/mips64r6/relocations.s b/test/MC/Mips/mips64r6/relocations.s
new file mode 100644
index 000000000000..651ebfb6c4c1
--- /dev/null
+++ b/test/MC/Mips/mips64r6/relocations.s
@@ -0,0 +1,76 @@
+# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips64r6 \
+# RUN: | FileCheck %s -check-prefix=CHECK-FIXUP
+# RUN: llvm-mc %s -filetype=obj -triple=mips-unknown-linux -mcpu=mips64r6 \
+# RUN: | llvm-readobj -r | FileCheck %s -check-prefix=CHECK-ELF
+#------------------------------------------------------------------------------
+# Check that the assembler can handle the documented syntax for fixups.
+#------------------------------------------------------------------------------
+# CHECK-FIXUP: addiupc $2, bar # encoding: [0xec,0b01000AAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC19_S2
+# CHECK-FIXUP: beqc $5, $6, bar # encoding: [0x20,0xa6,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_Mips_PC16
+# CHECK-FIXUP: bnec $5, $6, bar # encoding: [0x60,0xa6,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_Mips_PC16
+# CHECK-FIXUP: beqzc $9, bar # encoding: [0xd9,0b001AAAAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC21_S2
+# CHECK-FIXUP: bnezc $9, bar # encoding: [0xf9,0b001AAAAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC21_S2
+# CHECK-FIXUP: balc bar # encoding: [0b111010AA,A,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC26_S2
+# CHECK-FIXUP: bc bar # encoding: [0b110010AA,A,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC26_S2
+# CHECK-FIXUP: aluipc $2, %pcrel_hi(bar) # encoding: [0xec,0x5f,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar@PCREL_HI16,
+# CHECK-FIXUP: kind: fixup_MIPS_PCHI16
+# CHECK-FIXUP: addiu $2, $2, %pcrel_lo(bar) # encoding: [0x24,0x42,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar@PCREL_LO16,
+# CHECK-FIXUP: kind: fixup_MIPS_PCLO16
+# CHECK-FIXUP: ldpc $2, bar # encoding: [0xec,0b010110AA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar,
+# CHECK-FIXUP: kind: fixup_Mips_PC18_S3
+# CHECK-FIXUP: lwpc $2, bar # encoding: [0xec,0b01001AAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC19_S2
+# CHECK-FIXUP: lwupc $2, bar # encoding: [0xec,0b01010AAA,A,A]
+# CHECK-FIXUP: # fixup A - offset: 0,
+# CHECK-FIXUP: value: bar, kind: fixup_MIPS_PC19_S2
+#------------------------------------------------------------------------------
+# Check that the appropriate relocations were created.
+#------------------------------------------------------------------------------
+# CHECK-ELF: Relocations [
+# CHECK-ELF: 0x0 R_MIPS_PC19_S2 bar 0x0
+# CHECK-ELF: 0x4 R_MIPS_PC16 bar 0x0
+# CHECK-ELF: 0x8 R_MIPS_PC16 bar 0x0
+# CHECK-ELF: 0xC R_MIPS_PC21_S2 bar 0x0
+# CHECK-ELF: 0x10 R_MIPS_PC21_S2 bar 0x0
+# CHECK-ELF: 0x14 R_MIPS_PC26_S2 bar 0x0
+# CHECK-ELF: 0x18 R_MIPS_PC26_S2 bar 0x0
+# CHECK-ELF: 0x1C R_MIPS_PCHI16 bar 0x0
+# CHECK-ELF: 0x20 R_MIPS_PCLO16 bar 0x0
+# CHECK-ELF: 0x24 R_MIPS_PC18_S3 bar 0x0
+# CHECK-ELF: 0x28 R_MIPS_PC19_S2 bar 0x0
+# CHECK-ELF: 0x2C R_MIPS_PC19_S2 bar 0x0
+# CHECK-ELF: ]
+
+ addiupc $2,bar
+ beqc $5, $6, bar
+ bnec $5, $6, bar
+ beqzc $9, bar
+ bnezc $9, bar
+ balc bar
+ bc bar
+ aluipc $2, %pcrel_hi(bar)
+ addiu $2, $2, %pcrel_lo(bar)
+ ldpc $2,bar
+ lwpc $2,bar
+ lwupc $2,bar
diff --git a/test/MC/Mips/mips64r6/valid-xfail.s b/test/MC/Mips/mips64r6/valid-xfail.s
new file mode 100644
index 000000000000..a75122571d93
--- /dev/null
+++ b/test/MC/Mips/mips64r6/valid-xfail.s
@@ -0,0 +1,19 @@
+# Instructions that should be valid but currently fail for known reasons (e.g.
+# they aren't implemented yet).
+# This test is set up to XPASS if any instruction generates an encoding.
+#
+# RUN: not llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=mips64r6 | not FileCheck %s
+# CHECK-NOT: encoding
+# XFAIL: *
+
+ .set noat
+ bovc $0, $2, 4 # TODO: bovc $0, $2, 4 # encoding: [0x20,0x40,0x00,0x01]
+ bovc $2, $4, 4 # TODO: bovc $2, $4, 4 # encoding: [0x20,0x82,0x00,0x01]
+ bnvc $0, $2, 4 # TODO: bnvc $0, $2, 4 # encoding: [0x60,0x40,0x00,0x01]
+ bnvc $2, $4, 4 # TODO: bnvc $2, $4, 4 # encoding: [0x60,0x82,0x00,0x01]
+ beqc $0, $6, 256 # TODO: beqc $6, $zero, 256 # encoding: [0x20,0xc0,0x00,0x40]
+ beqc $5, $0, 256 # TODO: beqc $5, $zero, 256 # encoding: [0x20,0xa0,0x00,0x40]
+ beqc $6, $5, 256 # TODO: beqc $5, $6, 256 # encoding: [0x20,0xa6,0x00,0x40]
+ bnec $0, $6, 256 # TODO: bnec $6, $zero, 256 # encoding: [0x60,0xc0,0x00,0x40]
+ bnec $5, $0, 256 # TODO: bnec $5, $zero, 256 # encoding: [0x60,0xa0,0x00,0x40]
+ bnec $6, $5, 256 # TODO: bnec $5, $6, 256 # encoding: [0x60,0xa6,0x00,0x40]
diff --git a/test/MC/Mips/mips64r6/valid.s b/test/MC/Mips/mips64r6/valid.s
new file mode 100644
index 000000000000..34c1dac5be55
--- /dev/null
+++ b/test/MC/Mips/mips64r6/valid.s
@@ -0,0 +1,173 @@
+# Instructions that are valid
+#
+# Branches have some unusual encoding rules in MIPS32r6 so we need to test:
+# rs == 0
+# rs != 0
+# rt == 0
+# rt != 0
+# rs < rt
+# rs == rt
+# rs > rt
+# appropriately for each branch instruction
+#
+# RUN: llvm-mc %s -triple=mips-unknown-linux -show-encoding -mcpu=mips64r6 2> %t0 | FileCheck %s
+# RUN: FileCheck %s -check-prefix=WARNING < %t0
+
+ .set noat
+ # FIXME: Add the instructions carried forward from older ISA's
+ and $2,4 # CHECK: andi $2, $2, 4 # encoding: [0x30,0x42,0x00,0x04]
+ addiupc $4, 100 # CHECK: addiupc $4, 100 # encoding: [0xec,0x80,0x00,0x19]
+ align $4, $2, $3, 2 # CHECK: align $4, $2, $3, 2 # encoding: [0x7c,0x43,0x22,0xa0]
+ aluipc $3, 56 # CHECK: aluipc $3, 56 # encoding: [0xec,0x7f,0x00,0x38]
+ aui $3,$2,-23 # CHECK: aui $3, $2, -23 # encoding: [0x3c,0x62,0xff,0xe9]
+ auipc $3, -1 # CHECK: auipc $3, -1 # encoding: [0xec,0x7e,0xff,0xff]
+ bal 21100 # CHECK: bal 21100 # encoding: [0x04,0x11,0x14,0x9b]
+ balc 14572256 # CHECK: balc 14572256 # encoding: [0xe8,0x37,0x96,0xb8]
+ bc 14572256 # CHECK: bc 14572256 # encoding: [0xc8,0x37,0x96,0xb8]
+ bc1eqz $f0,4 # CHECK: bc1eqz $f0, 4 # encoding: [0x45,0x20,0x00,0x01]
+ bc1eqz $f31,4 # CHECK: bc1eqz $f31, 4 # encoding: [0x45,0x3f,0x00,0x01]
+ bc1nez $f0,4 # CHECK: bc1nez $f0, 4 # encoding: [0x45,0xa0,0x00,0x01]
+ bc1nez $f31,4 # CHECK: bc1nez $f31, 4 # encoding: [0x45,0xbf,0x00,0x01]
+ bc2eqz $0,8 # CHECK: bc2eqz $0, 8 # encoding: [0x49,0x20,0x00,0x02]
+ bc2eqz $31,8 # CHECK: bc2eqz $31, 8 # encoding: [0x49,0x3f,0x00,0x02]
+ bc2nez $0,8 # CHECK: bc2nez $0, 8 # encoding: [0x49,0xa0,0x00,0x02]
+ bc2nez $31,8 # CHECK: bc2nez $31, 8 # encoding: [0x49,0xbf,0x00,0x02]
+ # beqc requires rs < rt && rs != 0 but we also accept when this is not true. See also bovc
+ # FIXME: Testcases are in valid-xfail.s at the moment
+ beqc $5, $6, 256 # CHECK: beqc $5, $6, 256 # encoding: [0x20,0xa6,0x00,0x40]
+ beqzalc $2, 1332 # CHECK: beqzalc $2, 1332 # encoding: [0x20,0x02,0x01,0x4d]
+ # bnec requires rs < rt && rs != 0 but we accept when this is not true. See also bnvc
+ # FIXME: Testcases are in valid-xfail.s at the moment
+ bnec $5, $6, 256 # CHECK: bnec $5, $6, 256 # encoding: [0x60,0xa6,0x00,0x40]
+ bnezalc $2, 1332 # CHECK: bnezalc $2, 1332 # encoding: [0x60,0x02,0x01,0x4d]
+ beqzc $5, 72256 # CHECK: beqzc $5, 72256 # encoding: [0xd8,0xa0,0x46,0x90]
+ bgec $2, $3, 256 # CHECK: bgec $2, $3, 256 # encoding: [0x58,0x43,0x00,0x40]
+ bgeuc $2, $3, 256 # CHECK: bgeuc $2, $3, 256 # encoding: [0x18,0x43,0x00,0x40]
+ bgezalc $2, 1332 # CHECK: bgezalc $2, 1332 # encoding: [0x18,0x42,0x01,0x4d]
+ bnezc $5, 72256 # CHECK: bnezc $5, 72256 # encoding: [0xf8,0xa0,0x46,0x90]
+ bltzc $5, 256 # CHECK: bltzc $5, 256 # encoding: [0x5c,0xa5,0x00,0x40]
+ bgezc $5, 256 # CHECK: bgezc $5, 256 # encoding: [0x58,0xa5,0x00,0x40]
+ bgtzalc $2, 1332 # CHECK: bgtzalc $2, 1332 # encoding: [0x1c,0x02,0x01,0x4d]
+ blezc $5, 256 # CHECK: blezc $5, 256 # encoding: [0x58,0x05,0x00,0x40]
+ bltzalc $2, 1332 # CHECK: bltzalc $2, 1332 # encoding: [0x1c,0x42,0x01,0x4d]
+ bgtzc $5, 256 # CHECK: bgtzc $5, 256 # encoding: [0x5c,0x05,0x00,0x40]
+ bitswap $4, $2 # CHECK: bitswap $4, $2 # encoding: [0x7c,0x02,0x20,0x20]
+ blezalc $2, 1332 # CHECK: blezalc $2, 1332 # encoding: [0x18,0x02,0x01,0x4d]
+ bltc $5, $6, 256 # CHECK: bltc $5, $6, 256 # encoding: [0x5c,0xa6,0x00,0x40]
+ bltuc $5, $6, 256 # CHECK: bltuc $5, $6, 256 # encoding: [0x1c,0xa6,0x00,0x40]
+ # bnvc requires that rs >= rt but we accept both. See also bnec
+ bnvc $0, $0, 4 # CHECK: bnvc $zero, $zero, 4 # encoding: [0x60,0x00,0x00,0x01]
+ bnvc $2, $0, 4 # CHECK: bnvc $2, $zero, 4 # encoding: [0x60,0x40,0x00,0x01]
+ bnvc $4, $2, 4 # CHECK: bnvc $4, $2, 4 # encoding: [0x60,0x82,0x00,0x01]
+ # bovc requires that rs >= rt but we accept both. See also beqc
+ bovc $0, $0, 4 # CHECK: bovc $zero, $zero, 4 # encoding: [0x20,0x00,0x00,0x01]
+ bovc $2, $0, 4 # CHECK: bovc $2, $zero, 4 # encoding: [0x20,0x40,0x00,0x01]
+ bovc $4, $2, 4 # CHECK: bovc $4, $2, 4 # encoding: [0x20,0x82,0x00,0x01]
+ cache 1, 8($5) # CHECK: cache 1, 8($5) # encoding: [0x7c,0xa1,0x04,0x25]
+ cmp.af.s $f2,$f3,$f4 # CHECK: cmp.af.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x80]
+ cmp.af.d $f2,$f3,$f4 # CHECK: cmp.af.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x80]
+ cmp.un.s $f2,$f3,$f4 # CHECK: cmp.un.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x81]
+ cmp.un.d $f2,$f3,$f4 # CHECK: cmp.un.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x81]
+ cmp.eq.s $f2,$f3,$f4 # CHECK: cmp.eq.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x82]
+ cmp.eq.d $f2,$f3,$f4 # CHECK: cmp.eq.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x82]
+ cmp.ueq.s $f2,$f3,$f4 # CHECK: cmp.ueq.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x83]
+ cmp.ueq.d $f2,$f3,$f4 # CHECK: cmp.ueq.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x83]
+ cmp.lt.s $f2,$f3,$f4 # CHECK: cmp.lt.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x84]
+ cmp.lt.d $f2,$f3,$f4 # CHECK: cmp.lt.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x84]
+ cmp.ult.s $f2,$f3,$f4 # CHECK: cmp.ult.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x85]
+ cmp.ult.d $f2,$f3,$f4 # CHECK: cmp.ult.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x85]
+ cmp.le.s $f2,$f3,$f4 # CHECK: cmp.le.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x86]
+ cmp.le.d $f2,$f3,$f4 # CHECK: cmp.le.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x86]
+ cmp.ule.s $f2,$f3,$f4 # CHECK: cmp.ule.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x87]
+ cmp.ule.d $f2,$f3,$f4 # CHECK: cmp.ule.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x87]
+ cmp.saf.s $f2,$f3,$f4 # CHECK: cmp.saf.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x88]
+ cmp.saf.d $f2,$f3,$f4 # CHECK: cmp.saf.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x88]
+ cmp.sun.s $f2,$f3,$f4 # CHECK: cmp.sun.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x89]
+ cmp.sun.d $f2,$f3,$f4 # CHECK: cmp.sun.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x89]
+ cmp.seq.s $f2,$f3,$f4 # CHECK: cmp.seq.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8a]
+ cmp.seq.d $f2,$f3,$f4 # CHECK: cmp.seq.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8a]
+ cmp.sueq.s $f2,$f3,$f4 # CHECK: cmp.sueq.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8b]
+ cmp.sueq.d $f2,$f3,$f4 # CHECK: cmp.sueq.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8b]
+ cmp.slt.s $f2,$f3,$f4 # CHECK: cmp.slt.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8c]
+ cmp.slt.d $f2,$f3,$f4 # CHECK: cmp.slt.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8c]
+ cmp.sult.s $f2,$f3,$f4 # CHECK: cmp.sult.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8d]
+ cmp.sult.d $f2,$f3,$f4 # CHECK: cmp.sult.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8d]
+ cmp.sle.s $f2,$f3,$f4 # CHECK: cmp.sle.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8e]
+ cmp.sle.d $f2,$f3,$f4 # CHECK: cmp.sle.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8e]
+ cmp.sule.s $f2,$f3,$f4 # CHECK: cmp.sule.s $f2, $f3, $f4 # encoding: [0x46,0x84,0x18,0x8f]
+ cmp.sule.d $f2,$f3,$f4 # CHECK: cmp.sule.d $f2, $f3, $f4 # encoding: [0x46,0xa4,0x18,0x8f]
+ dalign $4,$2,$3,5 # CHECK: dalign $4, $2, $3, 5 # encoding: [0x7c,0x43,0x23,0x64]
+ daui $3,$2,0x1234 # CHECK: daui $3, $2, 4660 # encoding: [0x74,0x62,0x12,0x34]
+ dahi $3,0x5678 # CHECK: dahi $3, 22136 # encoding: [0x04,0x66,0x56,0x78]
+ dati $3,0xabcd # CHECK: dati $3, 43981 # encoding: [0x04,0x7e,0xab,0xcd]
+ dbitswap $4, $2 # CHECK: dbitswap $4, $2 # encoding: [0x7c,0x02,0x20,0x24]
+ div $2,$3,$4 # CHECK: div $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9a]
+ divu $2,$3,$4 # CHECK: divu $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9b]
+ jialc $5, 256 # CHECK: jialc $5, 256 # encoding: [0xf8,0x05,0x01,0x00]
+ jic $5, 256 # CHECK: jic $5, 256 # encoding: [0xd8,0x05,0x01,0x00]
+ mod $2,$3,$4 # CHECK: mod $2, $3, $4 # encoding: [0x00,0x64,0x10,0xda]
+ modu $2,$3,$4 # CHECK: modu $2, $3, $4 # encoding: [0x00,0x64,0x10,0xdb]
+ ddiv $2,$3,$4 # CHECK: ddiv $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9e]
+ ddivu $2,$3,$4 # CHECK: ddivu $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9f]
+ dmod $2,$3,$4 # CHECK: dmod $2, $3, $4 # encoding: [0x00,0x64,0x10,0xde]
+ dmodu $2,$3,$4 # CHECK: dmodu $2, $3, $4 # encoding: [0x00,0x64,0x10,0xdf]
+ lsa $2, $3, $4, 3 # CHECK: lsa $2, $3, $4, 3 # encoding: [0x00,0x64,0x10,0xc5]
+ dlsa $2, $3, $4, 3 # CHECK: dlsa $2, $3, $4, 3 # encoding: [0x00,0x64,0x10,0xd5]
+ ldpc $2,123456 # CHECK: ldpc $2, 123456 # encoding: [0xec,0x58,0x3c,0x48]
+ lwpc $2,268 # CHECK: lwpc $2, 268 # encoding: [0xec,0x48,0x00,0x43]
+ lwupc $2,268 # CHECK: lwupc $2, 268 # encoding: [0xec,0x50,0x00,0x43]
+ mul $2,$3,$4 # CHECK: mul $2, $3, $4 # encoding: [0x00,0x64,0x10,0x98]
+ muh $2,$3,$4 # CHECK: muh $2, $3, $4 # encoding: [0x00,0x64,0x10,0xd8]
+ mulu $2,$3,$4 # CHECK: mulu $2, $3, $4 # encoding: [0x00,0x64,0x10,0x99]
+ muhu $2,$3,$4 # CHECK: muhu $2, $3, $4 # encoding: [0x00,0x64,0x10,0xd9]
+ dmul $2,$3,$4 # CHECK: dmul $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9c]
+ dmuh $2,$3,$4 # CHECK: dmuh $2, $3, $4 # encoding: [0x00,0x64,0x10,0xdc]
+ dmulu $2,$3,$4 # CHECK: dmulu $2, $3, $4 # encoding: [0x00,0x64,0x10,0x9d]
+ dmuhu $2,$3,$4 # CHECK: dmuhu $2, $3, $4 # encoding: [0x00,0x64,0x10,0xdd]
+ maddf.s $f2,$f3,$f4 # CHECK: maddf.s $f2, $f3, $f4 # encoding: [0x46,0x04,0x18,0x98]
+ maddf.d $f2,$f3,$f4 # CHECK: maddf.d $f2, $f3, $f4 # encoding: [0x46,0x24,0x18,0x98]
+ msubf.s $f2,$f3,$f4 # CHECK: msubf.s $f2, $f3, $f4 # encoding: [0x46,0x04,0x18,0x99]
+ msubf.d $f2,$f3,$f4 # CHECK: msubf.d $f2, $f3, $f4 # encoding: [0x46,0x24,0x18,0x99]
+ pref 1, 8($5) # CHECK: pref 1, 8($5) # encoding: [0x7c,0xa1,0x04,0x35]
+ sel.d $f0,$f1,$f2 # CHECK: sel.d $f0, $f1, $f2 # encoding: [0x46,0x22,0x08,0x10]
+ sel.s $f0,$f1,$f2 # CHECK: sel.s $f0, $f1, $f2 # encoding: [0x46,0x02,0x08,0x10]
+ seleqz $2,$3,$4 # CHECK: seleqz $2, $3, $4 # encoding: [0x00,0x64,0x10,0x35]
+ selnez $2,$3,$4 # CHECK: selnez $2, $3, $4 # encoding: [0x00,0x64,0x10,0x37]
+ max.s $f0, $f2, $f4 # CHECK: max.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x1d]
+ max.d $f0, $f2, $f4 # CHECK: max.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x1d]
+ min.s $f0, $f2, $f4 # CHECK: min.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x1c]
+ min.d $f0, $f2, $f4 # CHECK: min.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x1c]
+ maxa.s $f0, $f2, $f4 # CHECK: maxa.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x1f]
+ maxa.d $f0, $f2, $f4 # CHECK: maxa.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x1f]
+ mina.s $f0, $f2, $f4 # CHECK: mina.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x1e]
+ mina.d $f0, $f2, $f4 # CHECK: mina.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x1e]
+ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04]
+ seleqz.s $f0, $f2, $f4 # CHECK: seleqz.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x14]
+ seleqz.d $f0, $f2, $f4 # CHECK: seleqz.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x14]
+ selnez.s $f0, $f2, $f4 # CHECK: selnez.s $f0, $f2, $f4 # encoding: [0x46,0x04,0x10,0x17]
+ selnez.d $f0, $f2, $f4 # CHECK: selnez.d $f0, $f2, $f4 # encoding: [0x46,0x24,0x10,0x17]
+ rint.s $f2, $f4 # CHECK: rint.s $f2, $f4 # encoding: [0x46,0x00,0x20,0x9a]
+ rint.d $f2, $f4 # CHECK: rint.d $f2, $f4 # encoding: [0x46,0x20,0x20,0x9a]
+ class.s $f2, $f4 # CHECK: class.s $f2, $f4 # encoding: [0x46,0x00,0x20,0x9b]
+ class.d $f2, $f4 # CHECK: class.d $f2, $f4 # encoding: [0x46,0x20,0x20,0x9b]
+ jr.hb $4 # CHECK: jr.hb $4 # encoding: [0x00,0x80,0x04,0x09]
+ jalr.hb $4 # CHECK: jalr.hb $4 # encoding: [0x00,0x80,0xfc,0x09]
+ jalr.hb $4, $5 # CHECK: jalr.hb $4, $5 # encoding: [0x00,0xa0,0x24,0x09]
+ ldc2 $8, -701($at) # CHECK: ldc2 $8, -701($1) # encoding: [0x49,0xc8,0x0d,0x43]
+ lwc2 $18,-841($a2) # CHECK: lwc2 $18, -841($6) # encoding: [0x49,0x52,0x34,0xb7]
+ sdc2 $20,629($s2) # CHECK: sdc2 $20, 629($18) # encoding: [0x49,0xf4,0x92,0x75]
+ swc2 $25,304($s0) # CHECK: swc2 $25, 304($16) # encoding: [0x49,0x79,0x81,0x30]
+ ll $v0,-153($s2) # CHECK: ll $2, -153($18) # encoding: [0x7e,0x42,0xb3,0xb6]
+ lld $zero,112($ra) # CHECK: lld $zero, 112($ra) # encoding: [0x7f,0xe0,0x38,0x37]
+ sc $15,-40($s3) # CHECK: sc $15, -40($19) # encoding: [0x7e,0x6f,0xec,0x26]
+ scd $15,-51($sp) # CHECK: scd $15, -51($sp) # encoding: [0x7f,0xaf,0xe6,0xa7]
+ clo $11,$a1 # CHECK: clo $11, $5 # encoding: [0x00,0xa0,0x58,0x51]
+ clz $sp,$gp # CHECK: clz $sp, $gp # encoding: [0x03,0x80,0xe8,0x50]
+ dclo $s2,$a2 # CHECK: dclo $18, $6 # encoding: [0x00,0xc0,0x90,0x53]
+ dclz $s0,$25 # CHECK: dclz $16, $25 # encoding: [0x03,0x20,0x80,0x52]
+ ssnop # WARNING: [[@LINE]]:9: warning: ssnop is deprecated for MIPS64r6 and is equivalent to a nop instruction
+ ssnop # CHECK: ssnop # encoding: [0x00,0x00,0x00,0x40]
+ sdbbp # CHECK: sdbbp # encoding: [0x00,0x00,0x00,0x0e]
+ sdbbp 34 # CHECK: sdbbp 34 # encoding: [0x00,0x00,0x08,0x8e]
+ sync # CHECK: sync # encoding: [0x00,0x00,0x00,0x0f]
+ sync 1 # CHECK: sync 1 # encoding: [0x00,0x00,0x00,0x4f]
diff --git a/test/MC/Mips/mips_abi_flags_xx.s b/test/MC/Mips/mips_abi_flags_xx.s
new file mode 100644
index 000000000000..cd6c9de4fac4
--- /dev/null
+++ b/test/MC/Mips/mips_abi_flags_xx.s
@@ -0,0 +1,45 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ -check-prefix=CHECK-OBJ-R1
+
+# RUN: llvm-mc /dev/null -arch=mips -mcpu=mips32 -mattr=fpxx -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ -check-prefix=CHECK-OBJ-R1
+
+# RUN: llvm-mc /dev/null -arch=mips -mcpu=mips32r6 -mattr=fpxx -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ -check-prefix=CHECK-OBJ-R6
+
+# CHECK-ASM: .module fp=xx
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags (12)
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ-R1: 0000: 00002001 01010005 00000000 00000000 |.. .............|
+# CHECK-OBJ-R6: 0000: 00002006 01010005 00000000 00000000 |.. .............|
+# CHECK-OBJ: 0010: 00000001 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+ .module fp=xx
+
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/mips_abi_flags_xx_set.s b/test/MC/Mips/mips_abi_flags_xx_set.s
new file mode 100644
index 000000000000..a548972db0d6
--- /dev/null
+++ b/test/MC/Mips/mips_abi_flags_xx_set.s
@@ -0,0 +1,37 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# CHECK-ASM: .module fp=xx
+# CHECK-ASM: .set fp=64
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags (12)
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: 00002001 01010005 00000000 00000000 |.. .............|
+# CHECK-OBJ: 0010: 00000001 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+ .module fp=xx
+ .set fp=64
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/mips_directives.s b/test/MC/Mips/mips_directives.s
index 44e707c89452..1a7d61f3ad4f 100644
--- a/test/MC/Mips/mips_directives.s
+++ b/test/MC/Mips/mips_directives.s
@@ -1,15 +1,22 @@
-# RUN: llvm-mc -show-encoding -triple mips-unknown-unknown %s | FileCheck %s
+# RUN: llvm-mc -show-encoding -mcpu=mips32 -triple mips-unknown-unknown %s | FileCheck %s
#
# CHECK: .text
# CHECK: $BB0_2:
+# CHECK: .abicalls
$BB0_2:
.ent directives_test
+ .abicalls
.frame $sp,0,$ra
.mask 0x00000000,0
.fmask 0x00000000,0
+
+# CHECK: .set noreorder
# CHECK: b 1332 # encoding: [0x10,0x00,0x01,0x4d]
+# CHECK-NOT: nop
# CHECK: j 1328 # encoding: [0x08,0x00,0x01,0x4c]
+# CHECK-NOT: nop
# CHECK: jal 1328 # encoding: [0x0c,0x00,0x01,0x4c]
+# CHECK-NOT: nop
.set noreorder
b 1332
@@ -26,6 +33,7 @@ $JTI0_0:
# CHECK: .4byte 2013265916
.set at=$12
.set macro
+# CHECK: .set reorder
# CHECK: b 1332 # encoding: [0x10,0x00,0x01,0x4d]
# CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
# CHECK: j 1328 # encoding: [0x08,0x00,0x01,0x4c]
@@ -33,15 +41,43 @@ $JTI0_0:
# CHECK: jal 1328 # encoding: [0x0c,0x00,0x01,0x4c]
# CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
.set reorder
+$BB0_4:
b 1332
j 1328
jal 1328
.set at=$a0
.set STORE_MASK,$t7
.set FPU_MASK,$f7
- .set r3,$3
+ .set $tmp7, $BB0_4-$BB0_2
.set f6,$f6
# CHECK: abs.s $f6, $f7 # encoding: [0x46,0x00,0x39,0x85]
-# CHECK: and $3, $15, $15 # encoding: [0x01,0xef,0x18,0x24]
+# CHECK: lui $1, %hi($tmp7) # encoding: [0x3c,0x01,A,A]
+# CHECK: # fixup A - offset: 0, value: ($tmp7)@ABS_HI, kind: fixup_Mips_HI16
abs.s f6,FPU_MASK
- and r3,$t7,STORE_MASK
+ lui $1, %hi($tmp7)
+
+# CHECK: .set mips32r2
+# CHECK: ldxc1 $f0, $zero($5) # encoding: [0x4c,0xa0,0x00,0x01]
+# CHECK: luxc1 $f0, $6($5) # encoding: [0x4c,0xa6,0x00,0x05]
+# CHECK: lwxc1 $f6, $2($5) # encoding: [0x4c,0xa2,0x01,0x80]
+ .set mips32r2
+ ldxc1 $f0, $zero($5)
+ luxc1 $f0, $6($5)
+ lwxc1 $f6, $2($5)
+
+# CHECK: .set mips64
+# CHECK: dadd $3, $3, $3
+ .set mips64
+ dadd $3, $3, $3 # encoding: [0x00,0x62,0x18,0x2c]
+
+# CHECK: .set mips64r2
+# CHECK: drotr $9, $6, 30 # encoding: [0x00,0x26,0x4f,0xba]
+ .set mips64r2
+ drotr $9, $6, 30
+
+# CHECK: .set dsp
+# CHECK: lbux $7, $10($11) # encoding: [0x7d,0x6a,0x39,0x8a]
+# CHECK: lhx $5, $6($7) # encoding: [0x7c,0xe6,0x29,0x0a]
+ .set dsp
+ lbux $7, $10($11)
+ lhx $5, $6($7)
diff --git a/test/MC/Mips/mips_directives_bad.s b/test/MC/Mips/mips_directives_bad.s
new file mode 100644
index 000000000000..c823cacf0cb8
--- /dev/null
+++ b/test/MC/Mips/mips_directives_bad.s
@@ -0,0 +1,59 @@
+# Error checking for malformed directives
+# RUN: not llvm-mc -triple mips-unknown-unknown %s 2>&1 | FileCheck %s
+
+ .abicalls should have no operands
+# CHECK: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in directive
+# CHECK-NEXT: .abicalls should have no operands
+# CHECK-NEXT: ^
+
+# We don't know yet how to represent a list of options
+# pic2 will eventually be legal so we will probably want
+# to change it to something silly.
+
+# Blank option operand
+ .option
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in .option directive
+# CHECK-NEXT: .option
+# CHECK-NEXT: ^
+
+# Numeric option operand
+ .option 2
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in .option directive
+# CHECK-NEXT: .option 2
+# CHECK-NEXT: ^
+
+# Register option operand
+ .option $2
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in .option directive
+# CHECK-NEXT: .option $2
+# CHECK-NEXT: ^
+
+ .option WithBadOption
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: warning: unknown option in .option directive
+# CHECK-NEXT: .option WithBadOption
+# CHECK-NEXT: ^
+
+ .option pic0,
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in .option pic0 directive
+# CHECK-NEXT: .option pic0,
+# CHECK-NEXT: ^
+
+ .option pic0,pic2
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in .option pic0 directive
+# CHECK-NEXT: .option pic0,pic2
+# CHECK-NEXT: ^
+
+ .option pic0 pic2
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in .option pic0 directive
+# CHECK-NEXT: .option pic0 pic2
+# CHECK-NEXT: ^
+
+ .option pic2,
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in .option pic2 directive
+# CHECK-NEXT: .option pic2,
+# CHECK-NEXT: ^
+
+ .option pic2 pic3
+# CHECK-NEXT: :{{[0-9]+}}:{{[0-9]+}}: error: unexpected token in .option pic2 directive
+# CHECK-NEXT: .option pic2 pic3
+# CHECK-NEXT: ^
diff --git a/test/MC/Mips/mips_gprel16.ll b/test/MC/Mips/mips_gprel16.ll
deleted file mode 100644
index b5a282de560b..000000000000
--- a/test/MC/Mips/mips_gprel16.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; This addresses bug 14456. We were not writing
-; out the addend to the gprel16 relocation. The
-; addend is stored in the instruction immediate
-; field.
-;llc gprel16.ll -o gprel16.o -mcpu=mips32r2 -march=mipsel -filetype=obj -relocation-model=static
-
-; RUN: llc -mcpu=mips32r2 -march=mipsel -filetype=obj -relocation-model=static %s -o - \
-; RUN: | llvm-objdump -disassemble -mattr +mips32r2 - \
-; RUN: | FileCheck %s
-
-target triple = "mipsel-sde--elf-gcc"
-
-@var1 = internal global i32 0, align 4
-@var2 = internal global i32 0, align 4
-
-define i32 @testvar1() nounwind {
-entry:
-; CHECK: lw ${{[0-9]+}}, 0($gp)
- %0 = load i32* @var1, align 4
- %tobool = icmp ne i32 %0, 0
- %cond = select i1 %tobool, i32 1, i32 0
- ret i32 %cond
-}
-
-define i32 @testvar2() nounwind {
-entry:
-; CHECK: lw ${{[0-9]+}}, 4($gp)
- %0 = load i32* @var2, align 4
- %tobool = icmp ne i32 %0, 0
- %cond = select i1 %tobool, i32 1, i32 0
- ret i32 %cond
-}
-
diff --git a/test/MC/Mips/mips_gprel16.s b/test/MC/Mips/mips_gprel16.s
new file mode 100644
index 000000000000..9dd3fa3281c2
--- /dev/null
+++ b/test/MC/Mips/mips_gprel16.s
@@ -0,0 +1,73 @@
+// This addresses bug 14456. We were not writing
+// out the addend to the gprel16 relocation. The
+// addend is stored in the instruction immediate
+// field.
+
+// RUN: llvm-mc -mcpu=mips32r2 -triple=mipsel-pc-linux -filetype=obj -relocation-model=static %s -o - \
+// RUN: | llvm-objdump -disassemble -mattr +mips32r2 - \
+// RUN: | FileCheck %s
+// RUN: llvm-mc -mcpu=mips32r2 -triple=mips-pc-linux -filetype=obj -relocation-model=static %s -o - \
+// RUN: | llvm-objdump -disassemble -mattr +mips32r2 - \
+// RUN: | FileCheck %s
+
+ .text
+ .abicalls
+ .option pic0
+ .section .mdebug.abi32,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/mips_gprel16.ll"
+ .text
+ .globl testvar1
+ .align 2
+ .type testvar1,@function
+ .set nomips16
+ .ent testvar1
+testvar1: # @testvar1
+ .frame $sp,0,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+// CHECK: lw ${{[0-9]+}}, 0($gp)
+ lw $1, %gp_rel(var1)($gp)
+ jr $ra
+ sltu $2, $zero, $1
+ .set at
+ .set macro
+ .set reorder
+ .end testvar1
+$tmp0:
+ .size testvar1, ($tmp0)-testvar1
+
+ .globl testvar2
+ .align 2
+ .type testvar2,@function
+ .set nomips16
+ .ent testvar2
+testvar2: # @testvar2
+ .frame $sp,0,$ra
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+// CHECK: lw ${{[0-9]+}}, 4($gp)
+ lw $1, %gp_rel(var2)($gp)
+ jr $ra
+ sltu $2, $zero, $1
+ .set at
+ .set macro
+ .set reorder
+ .end testvar2
+$tmp1:
+ .size testvar2, ($tmp1)-testvar2
+
+ .type var1,@object # @var1
+ .local var1
+ .comm var1,4,4
+ .type var2,@object # @var2
+ .local var2
+ .comm var2,4,4
+
diff --git a/test/MC/Mips/msa/abiflags.s b/test/MC/Mips/msa/abiflags.s
new file mode 100644
index 000000000000..136c035593e4
--- /dev/null
+++ b/test/MC/Mips/msa/abiflags.s
@@ -0,0 +1,37 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# CHECK-ASM: .module fp=32
+# CHECK-ASM: .set fp=64
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags (12)
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: 00002002 01030001 00000000 00000200 |.. .............|
+# CHECK-OBJ: 0010: 00000001 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+ .module fp=32
+ .set fp=64
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/msa/test_2r.s b/test/MC/Mips/msa/test_2r.s
index 67a2b6f0164e..01bea645e1a5 100644
--- a/test/MC/Mips/msa/test_2r.s
+++ b/test/MC/Mips/msa/test_2r.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: fill.b $w30, $9 # encoding: [0x7b,0x00,0x4f,0x9e]
# CHECK: fill.h $w31, $23 # encoding: [0x7b,0x01,0xbf,0xde]
@@ -18,22 +16,6 @@
# CHECK: pcnt.w $w23, $w9 # encoding: [0x7b,0x06,0x4d,0xde]
# CHECK: pcnt.d $w21, $w24 # encoding: [0x7b,0x07,0xc5,0x5e]
-# CHECKOBJDUMP: fill.b $w30, $9
-# CHECKOBJDUMP: fill.h $w31, $23
-# CHECKOBJDUMP: fill.w $w16, $24
-# CHECKOBJDUMP: nloc.b $w21, $w0
-# CHECKOBJDUMP: nloc.h $w18, $w31
-# CHECKOBJDUMP: nloc.w $w2, $w23
-# CHECKOBJDUMP: nloc.d $w4, $w10
-# CHECKOBJDUMP: nlzc.b $w31, $w2
-# CHECKOBJDUMP: nlzc.h $w27, $w22
-# CHECKOBJDUMP: nlzc.w $w10, $w29
-# CHECKOBJDUMP: nlzc.d $w25, $w9
-# CHECKOBJDUMP: pcnt.b $w20, $w18
-# CHECKOBJDUMP: pcnt.h $w0, $w8
-# CHECKOBJDUMP: pcnt.w $w23, $w9
-# CHECKOBJDUMP: pcnt.d $w21, $w24
-
fill.b $w30, $9
fill.h $w31, $23
fill.w $w16, $24
diff --git a/test/MC/Mips/msa/test_2r_msa64.s b/test/MC/Mips/msa/test_2r_msa64.s
new file mode 100644
index 000000000000..f6e35c461469
--- /dev/null
+++ b/test/MC/Mips/msa/test_2r_msa64.s
@@ -0,0 +1,5 @@
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64r2 -mattr=+msa -show-encoding | FileCheck %s
+#
+# CHECK: fill.d $w27, $9 # encoding: [0x7b,0x03,0x4e,0xde]
+
+ fill.d $w27, $9
diff --git a/test/MC/Mips/msa/test_2rf.s b/test/MC/Mips/msa/test_2rf.s
index 64025a41e1bf..5d41545c3376 100644
--- a/test/MC/Mips/msa/test_2rf.s
+++ b/test/MC/Mips/msa/test_2rf.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: fclass.w $w26, $w12 # encoding: [0x7b,0x20,0x66,0x9e]
# CHECK: fclass.d $w24, $w17 # encoding: [0x7b,0x21,0x8e,0x1e]
@@ -35,39 +33,6 @@
# CHECK: ftrunc_u.w $w17, $w15 # encoding: [0x7b,0x24,0x7c,0x5e]
# CHECK: ftrunc_u.d $w5, $w27 # encoding: [0x7b,0x25,0xd9,0x5e]
-# CHECKOBJDUMP: fclass.w $w26, $w12
-# CHECKOBJDUMP: fclass.d $w24, $w17
-# CHECKOBJDUMP: fexupl.w $w8, $w0
-# CHECKOBJDUMP: fexupl.d $w17, $w29
-# CHECKOBJDUMP: fexupr.w $w13, $w4
-# CHECKOBJDUMP: fexupr.d $w5, $w2
-# CHECKOBJDUMP: ffint_s.w $w20, $w29
-# CHECKOBJDUMP: ffint_s.d $w12, $w15
-# CHECKOBJDUMP: ffint_u.w $w7, $w27
-# CHECKOBJDUMP: ffint_u.d $w19, $w16
-# CHECKOBJDUMP: ffql.w $w31, $w13
-# CHECKOBJDUMP: ffql.d $w12, $w13
-# CHECKOBJDUMP: ffqr.w $w27, $w30
-# CHECKOBJDUMP: ffqr.d $w30, $w15
-# CHECKOBJDUMP: flog2.w $w25, $w31
-# CHECKOBJDUMP: flog2.d $w18, $w10
-# CHECKOBJDUMP: frint.w $w7, $w15
-# CHECKOBJDUMP: frint.d $w21, $w22
-# CHECKOBJDUMP: frcp.w $w19, $w0
-# CHECKOBJDUMP: frcp.d $w4, $w14
-# CHECKOBJDUMP: frsqrt.w $w12, $w17
-# CHECKOBJDUMP: frsqrt.d $w23, $w11
-# CHECKOBJDUMP: fsqrt.w $w0, $w11
-# CHECKOBJDUMP: fsqrt.d $w15, $w12
-# CHECKOBJDUMP: ftint_s.w $w30, $w5
-# CHECKOBJDUMP: ftint_s.d $w5, $w23
-# CHECKOBJDUMP: ftint_u.w $w20, $w14
-# CHECKOBJDUMP: ftint_u.d $w23, $w21
-# CHECKOBJDUMP: ftrunc_s.w $w29, $w17
-# CHECKOBJDUMP: ftrunc_s.d $w12, $w27
-# CHECKOBJDUMP: ftrunc_u.w $w17, $w15
-# CHECKOBJDUMP: ftrunc_u.d $w5, $w27
-
fclass.w $w26, $w12
fclass.d $w24, $w17
fexupl.w $w8, $w0
diff --git a/test/MC/Mips/msa/test_3r.s b/test/MC/Mips/msa/test_3r.s
index 3047ecb7aa8d..df2e1e112606 100644
--- a/test/MC/Mips/msa/test_3r.s
+++ b/test/MC/Mips/msa/test_3r.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: add_a.b $w26, $w9, $w4 # encoding: [0x78,0x04,0x4e,0x90]
# CHECK: add_a.h $w23, $w27, $w31 # encoding: [0x78,0x3f,0xdd,0xd0]
@@ -245,249 +243,6 @@
# CHECK: vshf.w $w16, $w30, $w25 # encoding: [0x78,0x59,0xf4,0x15]
# CHECK: vshf.d $w19, $w11, $w15 # encoding: [0x78,0x6f,0x5c,0xd5]
-# CHECKOBJDUMP: add_a.b $w26, $w9, $w4
-# CHECKOBJDUMP: add_a.h $w23, $w27, $w31
-# CHECKOBJDUMP: add_a.w $w11, $w6, $w22
-# CHECKOBJDUMP: add_a.d $w6, $w10, $w0
-# CHECKOBJDUMP: adds_a.b $w19, $w24, $w19
-# CHECKOBJDUMP: adds_a.h $w25, $w6, $w4
-# CHECKOBJDUMP: adds_a.w $w25, $w17, $w27
-# CHECKOBJDUMP: adds_a.d $w15, $w18, $w26
-# CHECKOBJDUMP: adds_s.b $w29, $w11, $w19
-# CHECKOBJDUMP: adds_s.h $w5, $w23, $w26
-# CHECKOBJDUMP: adds_s.w $w16, $w14, $w13
-# CHECKOBJDUMP: adds_s.d $w2, $w14, $w28
-# CHECKOBJDUMP: adds_u.b $w3, $w17, $w14
-# CHECKOBJDUMP: adds_u.h $w10, $w30, $w4
-# CHECKOBJDUMP: adds_u.w $w15, $w18, $w20
-# CHECKOBJDUMP: adds_u.d $w30, $w10, $w9
-# CHECKOBJDUMP: addv.b $w24, $w20, $w21
-# CHECKOBJDUMP: addv.h $w4, $w13, $w27
-# CHECKOBJDUMP: addv.w $w19, $w11, $w14
-# CHECKOBJDUMP: addv.d $w2, $w21, $w31
-# CHECKOBJDUMP: asub_s.b $w23, $w16, $w3
-# CHECKOBJDUMP: asub_s.h $w22, $w17, $w25
-# CHECKOBJDUMP: asub_s.w $w24, $w1, $w9
-# CHECKOBJDUMP: asub_s.d $w13, $w12, $w12
-# CHECKOBJDUMP: asub_u.b $w10, $w29, $w11
-# CHECKOBJDUMP: asub_u.h $w18, $w9, $w15
-# CHECKOBJDUMP: asub_u.w $w10, $w19, $w31
-# CHECKOBJDUMP: asub_u.d $w17, $w10, $w0
-# CHECKOBJDUMP: ave_s.b $w2, $w5, $w1
-# CHECKOBJDUMP: ave_s.h $w16, $w19, $w9
-# CHECKOBJDUMP: ave_s.w $w17, $w31, $w5
-# CHECKOBJDUMP: ave_s.d $w27, $w25, $w10
-# CHECKOBJDUMP: ave_u.b $w16, $w19, $w9
-# CHECKOBJDUMP: ave_u.h $w28, $w28, $w11
-# CHECKOBJDUMP: ave_u.w $w11, $w12, $w11
-# CHECKOBJDUMP: ave_u.d $w30, $w19, $w28
-# CHECKOBJDUMP: aver_s.b $w26, $w16, $w2
-# CHECKOBJDUMP: aver_s.h $w31, $w27, $w27
-# CHECKOBJDUMP: aver_s.w $w28, $w18, $w25
-# CHECKOBJDUMP: aver_s.d $w29, $w21, $w27
-# CHECKOBJDUMP: aver_u.b $w29, $w26, $w3
-# CHECKOBJDUMP: aver_u.h $w18, $w18, $w9
-# CHECKOBJDUMP: aver_u.w $w17, $w25, $w29
-# CHECKOBJDUMP: aver_u.d $w22, $w22, $w19
-# CHECKOBJDUMP: bclr.b $w2, $w15, $w29
-# CHECKOBJDUMP: bclr.h $w16, $w21, $w28
-# CHECKOBJDUMP: bclr.w $w19, $w2, $w9
-# CHECKOBJDUMP: bclr.d $w27, $w31, $w4
-# CHECKOBJDUMP: binsl.b $w5, $w16, $w24
-# CHECKOBJDUMP: binsl.h $w30, $w5, $w10
-# CHECKOBJDUMP: binsl.w $w14, $w15, $w13
-# CHECKOBJDUMP: binsl.d $w23, $w20, $w12
-# CHECKOBJDUMP: binsr.b $w22, $w11, $w2
-# CHECKOBJDUMP: binsr.h $w0, $w26, $w6
-# CHECKOBJDUMP: binsr.w $w26, $w3, $w28
-# CHECKOBJDUMP: binsr.d $w0, $w0, $w21
-# CHECKOBJDUMP: bneg.b $w0, $w11, $w24
-# CHECKOBJDUMP: bneg.h $w28, $w16, $w4
-# CHECKOBJDUMP: bneg.w $w3, $w26, $w19
-# CHECKOBJDUMP: bneg.d $w13, $w29, $w15
-# CHECKOBJDUMP: bset.b $w31, $w5, $w31
-# CHECKOBJDUMP: bset.h $w14, $w12, $w6
-# CHECKOBJDUMP: bset.w $w31, $w9, $w12
-# CHECKOBJDUMP: bset.d $w5, $w22, $w5
-# CHECKOBJDUMP: ceq.b $w31, $w31, $w18
-# CHECKOBJDUMP: ceq.h $w10, $w27, $w9
-# CHECKOBJDUMP: ceq.w $w9, $w5, $w14
-# CHECKOBJDUMP: ceq.d $w5, $w17, $w0
-# CHECKOBJDUMP: cle_s.b $w23, $w4, $w9
-# CHECKOBJDUMP: cle_s.h $w22, $w27, $w19
-# CHECKOBJDUMP: cle_s.w $w30, $w26, $w10
-# CHECKOBJDUMP: cle_s.d $w18, $w5, $w10
-# CHECKOBJDUMP: cle_u.b $w1, $w25, $w0
-# CHECKOBJDUMP: cle_u.h $w7, $w0, $w29
-# CHECKOBJDUMP: cle_u.w $w25, $w18, $w1
-# CHECKOBJDUMP: cle_u.d $w6, $w0, $w30
-# CHECKOBJDUMP: clt_s.b $w25, $w2, $w21
-# CHECKOBJDUMP: clt_s.h $w2, $w19, $w9
-# CHECKOBJDUMP: clt_s.w $w23, $w8, $w16
-# CHECKOBJDUMP: clt_s.d $w7, $w30, $w12
-# CHECKOBJDUMP: clt_u.b $w2, $w31, $w13
-# CHECKOBJDUMP: clt_u.h $w16, $w31, $w23
-# CHECKOBJDUMP: clt_u.w $w3, $w24, $w9
-# CHECKOBJDUMP: clt_u.d $w7, $w0, $w1
-# CHECKOBJDUMP: div_s.b $w29, $w3, $w18
-# CHECKOBJDUMP: div_s.h $w17, $w16, $w13
-# CHECKOBJDUMP: div_s.w $w4, $w25, $w30
-# CHECKOBJDUMP: div_s.d $w31, $w9, $w20
-# CHECKOBJDUMP: div_u.b $w6, $w29, $w10
-# CHECKOBJDUMP: div_u.h $w24, $w21, $w14
-# CHECKOBJDUMP: div_u.w $w29, $w14, $w25
-# CHECKOBJDUMP: div_u.d $w31, $w1, $w21
-# CHECKOBJDUMP: dotp_s.h $w23, $w22, $w25
-# CHECKOBJDUMP: dotp_s.w $w20, $w14, $w5
-# CHECKOBJDUMP: dotp_s.d $w17, $w2, $w22
-# CHECKOBJDUMP: dotp_u.h $w13, $w2, $w6
-# CHECKOBJDUMP: dotp_u.w $w15, $w22, $w21
-# CHECKOBJDUMP: dotp_u.d $w4, $w16, $w26
-# CHECKOBJDUMP: dpadd_s.h $w1, $w28, $w22
-# CHECKOBJDUMP: dpadd_s.w $w10, $w1, $w12
-# CHECKOBJDUMP: dpadd_s.d $w3, $w21, $w27
-# CHECKOBJDUMP: dpadd_u.h $w17, $w5, $w20
-# CHECKOBJDUMP: dpadd_u.w $w24, $w8, $w16
-# CHECKOBJDUMP: dpadd_u.d $w15, $w29, $w16
-# CHECKOBJDUMP: dpsub_s.h $w4, $w11, $w12
-# CHECKOBJDUMP: dpsub_s.w $w4, $w7, $w6
-# CHECKOBJDUMP: dpsub_s.d $w31, $w12, $w28
-# CHECKOBJDUMP: dpsub_u.h $w4, $w25, $w17
-# CHECKOBJDUMP: dpsub_u.w $w19, $w25, $w16
-# CHECKOBJDUMP: dpsub_u.d $w7, $w10, $w26
-# CHECKOBJDUMP: hadd_s.h $w28, $w24, $w2
-# CHECKOBJDUMP: hadd_s.w $w24, $w17, $w11
-# CHECKOBJDUMP: hadd_s.d $w17, $w15, $w20
-# CHECKOBJDUMP: hadd_u.h $w12, $w29, $w17
-# CHECKOBJDUMP: hadd_u.w $w9, $w5, $w6
-# CHECKOBJDUMP: hadd_u.d $w1, $w20, $w6
-# CHECKOBJDUMP: hsub_s.h $w16, $w14, $w29
-# CHECKOBJDUMP: hsub_s.w $w9, $w13, $w11
-# CHECKOBJDUMP: hsub_s.d $w30, $w18, $w14
-# CHECKOBJDUMP: hsub_u.h $w7, $w12, $w14
-# CHECKOBJDUMP: hsub_u.w $w21, $w5, $w5
-# CHECKOBJDUMP: hsub_u.d $w11, $w12, $w31
-# CHECKOBJDUMP: ilvev.b $w18, $w16, $w30
-# CHECKOBJDUMP: ilvev.h $w14, $w0, $w13
-# CHECKOBJDUMP: ilvev.w $w12, $w25, $w22
-# CHECKOBJDUMP: ilvev.d $w30, $w27, $w3
-# CHECKOBJDUMP: ilvl.b $w29, $w3, $w21
-# CHECKOBJDUMP: ilvl.h $w27, $w10, $w17
-# CHECKOBJDUMP: ilvl.w $w6, $w1, $w0
-# CHECKOBJDUMP: ilvl.d $w3, $w16, $w24
-# CHECKOBJDUMP: ilvod.b $w11, $w5, $w20
-# CHECKOBJDUMP: ilvod.h $w18, $w13, $w31
-# CHECKOBJDUMP: ilvod.w $w29, $w16, $w24
-# CHECKOBJDUMP: ilvod.d $w22, $w12, $w29
-# CHECKOBJDUMP: ilvr.b $w4, $w30, $w6
-# CHECKOBJDUMP: ilvr.h $w28, $w19, $w29
-# CHECKOBJDUMP: ilvr.w $w18, $w20, $w21
-# CHECKOBJDUMP: ilvr.d $w23, $w30, $w12
-# CHECKOBJDUMP: maddv.b $w17, $w31, $w29
-# CHECKOBJDUMP: maddv.h $w7, $w24, $w9
-# CHECKOBJDUMP: maddv.w $w22, $w22, $w20
-# CHECKOBJDUMP: maddv.d $w30, $w26, $w20
-# CHECKOBJDUMP: max_a.b $w23, $w11, $w23
-# CHECKOBJDUMP: max_a.h $w20, $w5, $w30
-# CHECKOBJDUMP: max_a.w $w7, $w18, $w30
-# CHECKOBJDUMP: max_a.d $w8, $w8, $w31
-# CHECKOBJDUMP: max_s.b $w10, $w1, $w19
-# CHECKOBJDUMP: max_s.h $w15, $w29, $w17
-# CHECKOBJDUMP: max_s.w $w15, $w29, $w14
-# CHECKOBJDUMP: max_s.d $w25, $w24, $w3
-# CHECKOBJDUMP: max_u.b $w12, $w24, $w5
-# CHECKOBJDUMP: max_u.h $w5, $w6, $w7
-# CHECKOBJDUMP: max_u.w $w16, $w4, $w7
-# CHECKOBJDUMP: max_u.d $w26, $w12, $w24
-# CHECKOBJDUMP: min_a.b $w4, $w26, $w1
-# CHECKOBJDUMP: min_a.h $w12, $w13, $w31
-# CHECKOBJDUMP: min_a.w $w28, $w20, $w0
-# CHECKOBJDUMP: min_a.d $w12, $w20, $w19
-# CHECKOBJDUMP: min_s.b $w19, $w3, $w14
-# CHECKOBJDUMP: min_s.h $w27, $w21, $w8
-# CHECKOBJDUMP: min_s.w $w0, $w14, $w30
-# CHECKOBJDUMP: min_s.d $w6, $w8, $w21
-# CHECKOBJDUMP: min_u.b $w22, $w26, $w8
-# CHECKOBJDUMP: min_u.h $w7, $w27, $w12
-# CHECKOBJDUMP: min_u.w $w8, $w20, $w14
-# CHECKOBJDUMP: min_u.d $w26, $w14, $w15
-# CHECKOBJDUMP: mod_s.b $w18, $w1, $w26
-# CHECKOBJDUMP: mod_s.h $w31, $w30, $w28
-# CHECKOBJDUMP: mod_s.w $w2, $w6, $w13
-# CHECKOBJDUMP: mod_s.d $w21, $w27, $w22
-# CHECKOBJDUMP: mod_u.b $w16, $w7, $w13
-# CHECKOBJDUMP: mod_u.h $w24, $w8, $w7
-# CHECKOBJDUMP: mod_u.w $w30, $w2, $w17
-# CHECKOBJDUMP: mod_u.d $w31, $w2, $w25
-# CHECKOBJDUMP: msubv.b $w14, $w5, $w12
-# CHECKOBJDUMP: msubv.h $w6, $w7, $w30
-# CHECKOBJDUMP: msubv.w $w13, $w2, $w21
-# CHECKOBJDUMP: msubv.d $w16, $w14, $w27
-# CHECKOBJDUMP: mulv.b $w20, $w3, $w13
-# CHECKOBJDUMP: mulv.h $w27, $w26, $w14
-# CHECKOBJDUMP: mulv.w $w10, $w29, $w3
-# CHECKOBJDUMP: mulv.d $w7, $w19, $w29
-# CHECKOBJDUMP: pckev.b $w5, $w27, $w7
-# CHECKOBJDUMP: pckev.h $w1, $w4, $w27
-# CHECKOBJDUMP: pckev.w $w30, $w20, $w0
-# CHECKOBJDUMP: pckev.d $w6, $w1, $w15
-# CHECKOBJDUMP: pckod.b $w18, $w28, $w30
-# CHECKOBJDUMP: pckod.h $w26, $w5, $w8
-# CHECKOBJDUMP: pckod.w $w9, $w4, $w2
-# CHECKOBJDUMP: pckod.d $w30, $w22, $w20
-# CHECKOBJDUMP: sld.b $w5, $w23[$12]
-# CHECKOBJDUMP: sld.h $w1, $w23[$3]
-# CHECKOBJDUMP: sld.w $w20, $w8[$9]
-# CHECKOBJDUMP: sld.d $w7, $w23[$fp]
-# CHECKOBJDUMP: sll.b $w3, $w0, $w17
-# CHECKOBJDUMP: sll.h $w17, $w27, $w3
-# CHECKOBJDUMP: sll.w $w16, $w7, $w6
-# CHECKOBJDUMP: sll.d $w9, $w0, $w26
-# CHECKOBJDUMP: splat.b $w28, $w1[$1]
-# CHECKOBJDUMP: splat.h $w2, $w11[$11]
-# CHECKOBJDUMP: splat.w $w22, $w0[$11]
-# CHECKOBJDUMP: splat.d $w0, $w0[$2]
-# CHECKOBJDUMP: sra.b $w28, $w4, $w17
-# CHECKOBJDUMP: sra.h $w13, $w9, $w3
-# CHECKOBJDUMP: sra.w $w27, $w21, $w19
-# CHECKOBJDUMP: sra.d $w30, $w8, $w23
-# CHECKOBJDUMP: srar.b $w19, $w18, $w18
-# CHECKOBJDUMP: srar.h $w7, $w23, $w8
-# CHECKOBJDUMP: srar.w $w1, $w12, $w2
-# CHECKOBJDUMP: srar.d $w21, $w7, $w14
-# CHECKOBJDUMP: srl.b $w12, $w3, $w19
-# CHECKOBJDUMP: srl.h $w23, $w31, $w20
-# CHECKOBJDUMP: srl.w $w18, $w27, $w11
-# CHECKOBJDUMP: srl.d $w3, $w12, $w26
-# CHECKOBJDUMP: srlr.b $w15, $w21, $w11
-# CHECKOBJDUMP: srlr.h $w21, $w13, $w19
-# CHECKOBJDUMP: srlr.w $w6, $w30, $w3
-# CHECKOBJDUMP: srlr.d $w1, $w2, $w14
-# CHECKOBJDUMP: subs_s.b $w25, $w15, $w1
-# CHECKOBJDUMP: subs_s.h $w28, $w25, $w22
-# CHECKOBJDUMP: subs_s.w $w10, $w12, $w21
-# CHECKOBJDUMP: subs_s.d $w4, $w20, $w18
-# CHECKOBJDUMP: subs_u.b $w21, $w6, $w25
-# CHECKOBJDUMP: subs_u.h $w3, $w10, $w7
-# CHECKOBJDUMP: subs_u.w $w9, $w15, $w10
-# CHECKOBJDUMP: subs_u.d $w7, $w19, $w10
-# CHECKOBJDUMP: subsus_u.b $w6, $w7, $w12
-# CHECKOBJDUMP: subsus_u.h $w6, $w29, $w19
-# CHECKOBJDUMP: subsus_u.w $w7, $w15, $w7
-# CHECKOBJDUMP: subsus_u.d $w9, $w3, $w15
-# CHECKOBJDUMP: subsuu_s.b $w22, $w3, $w31
-# CHECKOBJDUMP: subsuu_s.h $w19, $w23, $w22
-# CHECKOBJDUMP: subsuu_s.w $w9, $w10, $w13
-# CHECKOBJDUMP: subsuu_s.d $w5, $w6, $w0
-# CHECKOBJDUMP: subv.b $w6, $w13, $w19
-# CHECKOBJDUMP: subv.h $w4, $w25, $w12
-# CHECKOBJDUMP: subv.w $w27, $w27, $w11
-# CHECKOBJDUMP: subv.d $w9, $w24, $w10
-# CHECKOBJDUMP: vshf.b $w3, $w16, $w5
-# CHECKOBJDUMP: vshf.h $w20, $w19, $w8
-# CHECKOBJDUMP: vshf.w $w16, $w30, $w25
-# CHECKOBJDUMP: vshf.d $w19, $w11, $w15
-
add_a.b $w26, $w9, $w4
add_a.h $w23, $w27, $w31
add_a.w $w11, $w6, $w22
diff --git a/test/MC/Mips/msa/test_3rf.s b/test/MC/Mips/msa/test_3rf.s
index f45557ee0ec8..c5896d78c5b9 100644
--- a/test/MC/Mips/msa/test_3rf.s
+++ b/test/MC/Mips/msa/test_3rf.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: fadd.w $w28, $w19, $w28 # encoding: [0x78,0x1c,0x9f,0x1b]
# CHECK: fadd.d $w13, $w2, $w29 # encoding: [0x78,0x3d,0x13,0x5b]
@@ -85,89 +83,6 @@
# CHECK: mulr_q.h $w6, $w20, $w19 # encoding: [0x7b,0x13,0xa1,0x9c]
# CHECK: mulr_q.w $w27, $w1, $w20 # encoding: [0x7b,0x34,0x0e,0xdc]
-# CHECKOBJDUMP: fadd.w $w28, $w19, $w28
-# CHECKOBJDUMP: fadd.d $w13, $w2, $w29
-# CHECKOBJDUMP: fcaf.w $w14, $w11, $w25
-# CHECKOBJDUMP: fcaf.d $w1, $w1, $w19
-# CHECKOBJDUMP: fceq.w $w1, $w23, $w16
-# CHECKOBJDUMP: fceq.d $w0, $w8, $w16
-# CHECKOBJDUMP: fcle.w $w16, $w9, $w24
-# CHECKOBJDUMP: fcle.d $w27, $w14, $w1
-# CHECKOBJDUMP: fclt.w $w28, $w8, $w8
-# CHECKOBJDUMP: fclt.d $w30, $w25, $w11
-# CHECKOBJDUMP: fcne.w $w2, $w18, $w23
-# CHECKOBJDUMP: fcne.d $w14, $w20, $w15
-# CHECKOBJDUMP: fcor.w $w10, $w18, $w25
-# CHECKOBJDUMP: fcor.d $w17, $w25, $w11
-# CHECKOBJDUMP: fcueq.w $w14, $w2, $w21
-# CHECKOBJDUMP: fcueq.d $w29, $w3, $w7
-# CHECKOBJDUMP: fcule.w $w17, $w5, $w3
-# CHECKOBJDUMP: fcule.d $w31, $w1, $w30
-# CHECKOBJDUMP: fcult.w $w6, $w25, $w9
-# CHECKOBJDUMP: fcult.d $w27, $w8, $w17
-# CHECKOBJDUMP: fcun.w $w4, $w20, $w8
-# CHECKOBJDUMP: fcun.d $w29, $w11, $w3
-# CHECKOBJDUMP: fcune.w $w13, $w18, $w19
-# CHECKOBJDUMP: fcune.d $w16, $w26, $w21
-# CHECKOBJDUMP: fdiv.w $w13, $w24, $w2
-# CHECKOBJDUMP: fdiv.d $w19, $w4, $w25
-# CHECKOBJDUMP: fexdo.h $w8, $w0, $w16
-# CHECKOBJDUMP: fexdo.w $w0, $w13, $w27
-# CHECKOBJDUMP: fexp2.w $w17, $w0, $w3
-# CHECKOBJDUMP: fexp2.d $w22, $w0, $w10
-# CHECKOBJDUMP: fmadd.w $w29, $w6, $w23
-# CHECKOBJDUMP: fmadd.d $w11, $w28, $w21
-# CHECKOBJDUMP: fmax.w $w0, $w23, $w13
-# CHECKOBJDUMP: fmax.d $w26, $w18, $w8
-# CHECKOBJDUMP: fmax_a.w $w10, $w16, $w10
-# CHECKOBJDUMP: fmax_a.d $w30, $w9, $w22
-# CHECKOBJDUMP: fmin.w $w24, $w1, $w30
-# CHECKOBJDUMP: fmin.d $w27, $w27, $w10
-# CHECKOBJDUMP: fmin_a.w $w10, $w29, $w20
-# CHECKOBJDUMP: fmin_a.d $w13, $w30, $w24
-# CHECKOBJDUMP: fmsub.w $w17, $w25, $w0
-# CHECKOBJDUMP: fmsub.d $w8, $w18, $w16
-# CHECKOBJDUMP: fmul.w $w3, $w15, $w15
-# CHECKOBJDUMP: fmul.d $w9, $w30, $w10
-# CHECKOBJDUMP: fsaf.w $w25, $w5, $w10
-# CHECKOBJDUMP: fsaf.d $w25, $w3, $w29
-# CHECKOBJDUMP: fseq.w $w11, $w17, $w13
-# CHECKOBJDUMP: fseq.d $w29, $w0, $w31
-# CHECKOBJDUMP: fsle.w $w30, $w31, $w31
-# CHECKOBJDUMP: fsle.d $w18, $w23, $w24
-# CHECKOBJDUMP: fslt.w $w12, $w5, $w6
-# CHECKOBJDUMP: fslt.d $w16, $w26, $w21
-# CHECKOBJDUMP: fsne.w $w30, $w1, $w12
-# CHECKOBJDUMP: fsne.d $w14, $w13, $w23
-# CHECKOBJDUMP: fsor.w $w27, $w13, $w27
-# CHECKOBJDUMP: fsor.d $w12, $w24, $w11
-# CHECKOBJDUMP: fsub.w $w31, $w26, $w1
-# CHECKOBJDUMP: fsub.d $w19, $w17, $w27
-# CHECKOBJDUMP: fsueq.w $w16, $w24, $w25
-# CHECKOBJDUMP: fsueq.d $w18, $w14, $w14
-# CHECKOBJDUMP: fsule.w $w23, $w30, $w13
-# CHECKOBJDUMP: fsule.d $w2, $w11, $w26
-# CHECKOBJDUMP: fsult.w $w11, $w26, $w22
-# CHECKOBJDUMP: fsult.d $w6, $w23, $w30
-# CHECKOBJDUMP: fsun.w $w3, $w18, $w28
-# CHECKOBJDUMP: fsun.d $w18, $w11, $w19
-# CHECKOBJDUMP: fsune.w $w16, $w31, $w2
-# CHECKOBJDUMP: fsune.d $w3, $w26, $w17
-# CHECKOBJDUMP: ftq.h $w16, $w4, $w24
-# CHECKOBJDUMP: ftq.w $w5, $w5, $w25
-# CHECKOBJDUMP: madd_q.h $w16, $w20, $w10
-# CHECKOBJDUMP: madd_q.w $w28, $w2, $w9
-# CHECKOBJDUMP: maddr_q.h $w8, $w18, $w9
-# CHECKOBJDUMP: maddr_q.w $w29, $w12, $w16
-# CHECKOBJDUMP: msub_q.h $w24, $w26, $w10
-# CHECKOBJDUMP: msub_q.w $w13, $w30, $w28
-# CHECKOBJDUMP: msubr_q.h $w12, $w21, $w11
-# CHECKOBJDUMP: msubr_q.w $w1, $w14, $w20
-# CHECKOBJDUMP: mul_q.h $w6, $w16, $w30
-# CHECKOBJDUMP: mul_q.w $w16, $w1, $w4
-# CHECKOBJDUMP: mulr_q.h $w6, $w20, $w19
-# CHECKOBJDUMP: mulr_q.w $w27, $w1, $w20
-
fadd.w $w28, $w19, $w28
fadd.d $w13, $w2, $w29
fcaf.w $w14, $w11, $w25
diff --git a/test/MC/Mips/msa/test_bit.s b/test/MC/Mips/msa/test_bit.s
index 7c2313116c3e..85ebe54fea47 100644
--- a/test/MC/Mips/msa/test_bit.s
+++ b/test/MC/Mips/msa/test_bit.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: bclri.b $w21, $w30, 2 # encoding: [0x79,0xf2,0xf5,0x49]
# CHECK: bclri.h $w24, $w21, 0 # encoding: [0x79,0xe0,0xae,0x09]
@@ -51,55 +49,6 @@
# CHECK: srlri.w $w11, $w22, 2 # encoding: [0x79,0xc2,0xb2,0xca]
# CHECK: srlri.d $w24, $w10, 6 # encoding: [0x79,0x86,0x56,0x0a]
-# CHECKOBJDUMP: bclri.b $w21, $w30, 2
-# CHECKOBJDUMP: bclri.h $w24, $w21, 0
-# CHECKOBJDUMP: bclri.w $w23, $w30, 3
-# CHECKOBJDUMP: bclri.d $w9, $w11, 0
-# CHECKOBJDUMP: binsli.b $w25, $w12, 1
-# CHECKOBJDUMP: binsli.h $w21, $w22, 0
-# CHECKOBJDUMP: binsli.w $w22, $w4, 0
-# CHECKOBJDUMP: binsli.d $w6, $w2, 6
-# CHECKOBJDUMP: binsri.b $w15, $w19, 0
-# CHECKOBJDUMP: binsri.h $w8, $w30, 1
-# CHECKOBJDUMP: binsri.w $w2, $w19, 5
-# CHECKOBJDUMP: binsri.d $w18, $w20, 1
-# CHECKOBJDUMP: bnegi.b $w24, $w19, 0
-# CHECKOBJDUMP: bnegi.h $w28, $w11, 3
-# CHECKOBJDUMP: bnegi.w $w1, $w27, 5
-# CHECKOBJDUMP: bnegi.d $w4, $w21, 1
-# CHECKOBJDUMP: bseti.b $w18, $w8, 0
-# CHECKOBJDUMP: bseti.h $w24, $w14, 2
-# CHECKOBJDUMP: bseti.w $w9, $w18, 4
-# CHECKOBJDUMP: bseti.d $w7, $w15, 1
-# CHECKOBJDUMP: sat_s.b $w31, $w31, 2
-# CHECKOBJDUMP: sat_s.h $w19, $w19, 0
-# CHECKOBJDUMP: sat_s.w $w19, $w29, 0
-# CHECKOBJDUMP: sat_s.d $w11, $w22, 0
-# CHECKOBJDUMP: sat_u.b $w1, $w13, 3
-# CHECKOBJDUMP: sat_u.h $w30, $w24, 4
-# CHECKOBJDUMP: sat_u.w $w31, $w13, 0
-# CHECKOBJDUMP: sat_u.d $w29, $w16, 5
-# CHECKOBJDUMP: slli.b $w23, $w10, 1
-# CHECKOBJDUMP: slli.h $w9, $w18, 1
-# CHECKOBJDUMP: slli.w $w11, $w29, 4
-# CHECKOBJDUMP: slli.d $w25, $w20, 1
-# CHECKOBJDUMP: srai.b $w24, $w29, 1
-# CHECKOBJDUMP: srai.h $w1, $w6, 0
-# CHECKOBJDUMP: srai.w $w7, $w26, 1
-# CHECKOBJDUMP: srai.d $w20, $w25, 3
-# CHECKOBJDUMP: srari.b $w5, $w25, 0
-# CHECKOBJDUMP: srari.h $w7, $w6, 4
-# CHECKOBJDUMP: srari.w $w17, $w11, 5
-# CHECKOBJDUMP: srari.d $w21, $w25, 5
-# CHECKOBJDUMP: srli.b $w2, $w0, 2
-# CHECKOBJDUMP: srli.h $w31, $w31, 2
-# CHECKOBJDUMP: srli.w $w5, $w9, 4
-# CHECKOBJDUMP: srli.d $w27, $w26, 5
-# CHECKOBJDUMP: srlri.b $w18, $w3, 0
-# CHECKOBJDUMP: srlri.h $w1, $w2, 3
-# CHECKOBJDUMP: srlri.w $w11, $w22, 2
-# CHECKOBJDUMP: srlri.d $w24, $w10, 6
-
bclri.b $w21, $w30, 2
bclri.h $w24, $w21, 0
bclri.w $w23, $w30, 3
diff --git a/test/MC/Mips/msa/test_cbranch.s b/test/MC/Mips/msa/test_cbranch.s
index 2fc65afc1c9f..aa6779b1b46e 100644
--- a/test/MC/Mips/msa/test_cbranch.s
+++ b/test/MC/Mips/msa/test_cbranch.s
@@ -1,4 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
#CHECK: bnz.b $w0, 4 # encoding: [0x47,0x80,0x00,0x01]
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
@@ -7,22 +7,22 @@
#CHECK: bnz.w $w2, 128 # encoding: [0x47,0xc2,0x00,0x20]
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
#CHECK: bnz.d $w3, -128 # encoding: [0x47,0xe3,0xff,0xe0]
-#CHECK: bnz.b $w0, SYMBOL0 # encoding: [0x47'A',0x80'A',0x00,0x00]
+#CHECK: bnz.b $w0, SYMBOL0 # encoding: [0x47,0x80,A,A]
# fixup A - offset: 0, value: SYMBOL0, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bnz.h $w1, SYMBOL1 # encoding: [0x47'A',0xa1'A',0x00,0x00]
+#CHECK: bnz.h $w1, SYMBOL1 # encoding: [0x47,0xa1,A,A]
# fixup A - offset: 0, value: SYMBOL1, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bnz.w $w2, SYMBOL2 # encoding: [0x47'A',0xc2'A',0x00,0x00]
+#CHECK: bnz.w $w2, SYMBOL2 # encoding: [0x47,0xc2,A,A]
# fixup A - offset: 0, value: SYMBOL2, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bnz.d $w3, SYMBOL3 # encoding: [0x47'A',0xe3'A',0x00,0x00]
+#CHECK: bnz.d $w3, SYMBOL3 # encoding: [0x47,0xe3,A,A]
# fixup A - offset: 0, value: SYMBOL3, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
#CHECK: bnz.v $w0, 4 # encoding: [0x45,0xe0,0x00,0x01]
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bnz.v $w0, SYMBOL0 # encoding: [0x45'A',0xe0'A',0x00,0x00]
+#CHECK: bnz.v $w0, SYMBOL0 # encoding: [0x45,0xe0,A,A]
# fixup A - offset: 0, value: SYMBOL0, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
@@ -34,22 +34,22 @@
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
#CHECK: bz.d $w3, -1024 # encoding: [0x47,0x63,0xff,0x00]
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bz.b $w0, SYMBOL0 # encoding: [0x47'A',A,0x00,0x00]
+#CHECK: bz.b $w0, SYMBOL0 # encoding: [0x47,0x00,A,A]
# fixup A - offset: 0, value: SYMBOL0, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bz.h $w1, SYMBOL1 # encoding: [0x47'A',0x21'A',0x00,0x00]
+#CHECK: bz.h $w1, SYMBOL1 # encoding: [0x47,0x21,A,A]
# fixup A - offset: 0, value: SYMBOL1, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bz.w $w2, SYMBOL2 # encoding: [0x47'A',0x42'A',0x00,0x00]
+#CHECK: bz.w $w2, SYMBOL2 # encoding: [0x47,0x42,A,A]
# fixup A - offset: 0, value: SYMBOL2, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bz.d $w3, SYMBOL3 # encoding: [0x47'A',0x63'A',0x00,0x00]
+#CHECK: bz.d $w3, SYMBOL3 # encoding: [0x47,0x63,A,A]
# fixup A - offset: 0, value: SYMBOL3, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
#CHECK: bz.v $w0, 4 # encoding: [0x45,0x60,0x00,0x01]
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
-#CHECK: bz.v $w0, SYMBOL0 # encoding: [0x45'A',0x60'A',0x00,0x00]
+#CHECK: bz.v $w0, SYMBOL0 # encoding: [0x45,0x60,A,A]
# fixup A - offset: 0, value: SYMBOL0, kind: fixup_Mips_PC16
#CHECK: nop # encoding: [0x00,0x00,0x00,0x00]
diff --git a/test/MC/Mips/msa/test_ctrlregs.s b/test/MC/Mips/msa/test_ctrlregs.s
index f8f4f9eafd26..3329072b310d 100644
--- a/test/MC/Mips/msa/test_ctrlregs.s
+++ b/test/MC/Mips/msa/test_ctrlregs.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
#CHECK: cfcmsa $1, $0 # encoding: [0x78,0x7e,0x00,0x59]
#CHECK: cfcmsa $1, $0 # encoding: [0x78,0x7e,0x00,0x59]
@@ -36,40 +34,6 @@
#CHECK: ctcmsa $7, $8 # encoding: [0x78,0x3e,0x41,0xd9]
#CHECK: ctcmsa $7, $8 # encoding: [0x78,0x3e,0x41,0xd9]
-#CHECKOBJDUMP: cfcmsa $1, $0
-#CHECKOBJDUMP: cfcmsa $1, $0
-#CHECKOBJDUMP: cfcmsa $2, $1
-#CHECKOBJDUMP: cfcmsa $2, $1
-#CHECKOBJDUMP: cfcmsa $3, $2
-#CHECKOBJDUMP: cfcmsa $3, $2
-#CHECKOBJDUMP: cfcmsa $4, $3
-#CHECKOBJDUMP: cfcmsa $4, $3
-#CHECKOBJDUMP: cfcmsa $5, $4
-#CHECKOBJDUMP: cfcmsa $5, $4
-#CHECKOBJDUMP: cfcmsa $6, $5
-#CHECKOBJDUMP: cfcmsa $6, $5
-#CHECKOBJDUMP: cfcmsa $7, $6
-#CHECKOBJDUMP: cfcmsa $7, $6
-#CHECKOBJDUMP: cfcmsa $8, $7
-#CHECKOBJDUMP: cfcmsa $8, $7
-
-#CHECKOBJDUMP: ctcmsa $0, $1
-#CHECKOBJDUMP: ctcmsa $0, $1
-#CHECKOBJDUMP: ctcmsa $1, $2
-#CHECKOBJDUMP: ctcmsa $1, $2
-#CHECKOBJDUMP: ctcmsa $2, $3
-#CHECKOBJDUMP: ctcmsa $2, $3
-#CHECKOBJDUMP: ctcmsa $3, $4
-#CHECKOBJDUMP: ctcmsa $3, $4
-#CHECKOBJDUMP: ctcmsa $4, $5
-#CHECKOBJDUMP: ctcmsa $4, $5
-#CHECKOBJDUMP: ctcmsa $5, $6
-#CHECKOBJDUMP: ctcmsa $5, $6
-#CHECKOBJDUMP: ctcmsa $6, $7
-#CHECKOBJDUMP: ctcmsa $6, $7
-#CHECKOBJDUMP: ctcmsa $7, $8
-#CHECKOBJDUMP: ctcmsa $7, $8
-
cfcmsa $1, $msair
cfcmsa $1, $0
cfcmsa $2, $msacsr
diff --git a/test/MC/Mips/msa/test_dlsa.s b/test/MC/Mips/msa/test_dlsa.s
new file mode 100644
index 000000000000..5e14571c84e7
--- /dev/null
+++ b/test/MC/Mips/msa/test_dlsa.s
@@ -0,0 +1,12 @@
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64r2 -mattr=+msa -show-encoding | \
+# RUN: FileCheck %s
+#
+# CHECK: dlsa $8, $9, $10, 1 # encoding: [0x01,0x2a,0x40,0x15]
+# CHECK: dlsa $8, $9, $10, 2 # encoding: [0x01,0x2a,0x40,0x55]
+# CHECK: dlsa $8, $9, $10, 3 # encoding: [0x01,0x2a,0x40,0x95]
+# CHECK: dlsa $8, $9, $10, 4 # encoding: [0x01,0x2a,0x40,0xd5]
+
+ dlsa $8, $9, $10, 1
+ dlsa $8, $9, $10, 2
+ dlsa $8, $9, $10, 3
+ dlsa $8, $9, $10, 4
diff --git a/test/MC/Mips/msa/test_elm.s b/test/MC/Mips/msa/test_elm.s
index 1d0483826a97..dbe6d5c700b5 100644
--- a/test/MC/Mips/msa/test_elm.s
+++ b/test/MC/Mips/msa/test_elm.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: copy_s.b $13, $w8[2] # encoding: [0x78,0x82,0x43,0x59]
# CHECK: copy_s.h $1, $w25[0] # encoding: [0x78,0xa0,0xc8,0x59]
@@ -18,22 +16,6 @@
# CHECK: splati.d $w28, $w1[0] # encoding: [0x78,0x78,0x0f,0x19]
# CHECK: move.v $w23, $w24 # encoding: [0x78,0xbe,0xc5,0xd9]
-# CHECKOBJDUMP: copy_s.b $13, $w8[2]
-# CHECKOBJDUMP: copy_s.h $1, $w25[0]
-# CHECKOBJDUMP: copy_s.w $22, $w5[1]
-# CHECKOBJDUMP: copy_u.b $22, $w20[4]
-# CHECKOBJDUMP: copy_u.h $20, $w4[0]
-# CHECKOBJDUMP: copy_u.w $fp, $w13[2]
-# CHECKOBJDUMP: sldi.b $w0, $w29[4]
-# CHECKOBJDUMP: sldi.h $w8, $w17[0]
-# CHECKOBJDUMP: sldi.w $w20, $w27[2]
-# CHECKOBJDUMP: sldi.d $w4, $w12[0]
-# CHECKOBJDUMP: splati.b $w25, $w3[2]
-# CHECKOBJDUMP: splati.h $w24, $w28[1]
-# CHECKOBJDUMP: splati.w $w13, $w18[0]
-# CHECKOBJDUMP: splati.d $w28, $w1[0]
-# CHECKOBJDUMP: move.v $w23, $w24
-
copy_s.b $13, $w8[2]
copy_s.h $1, $w25[0]
copy_s.w $22, $w5[1]
diff --git a/test/MC/Mips/msa/test_elm_insert.s b/test/MC/Mips/msa/test_elm_insert.s
index 5fc55f3ef0c8..d58a4e0b5e73 100644
--- a/test/MC/Mips/msa/test_elm_insert.s
+++ b/test/MC/Mips/msa/test_elm_insert.s
@@ -1,15 +1,9 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: insert.b $w23[3], $sp # encoding: [0x79,0x03,0xed,0xd9]
# CHECK: insert.h $w20[2], $5 # encoding: [0x79,0x22,0x2d,0x19]
# CHECK: insert.w $w8[2], $15 # encoding: [0x79,0x32,0x7a,0x19]
-# CHECKOBJDUMP: insert.b $w23[3], $sp
-# CHECKOBJDUMP: insert.h $w20[2], $5
-# CHECKOBJDUMP: insert.w $w8[2], $15
-
insert.b $w23[3], $sp
insert.h $w20[2], $5
insert.w $w8[2], $15
diff --git a/test/MC/Mips/msa/test_elm_insert_msa64.s b/test/MC/Mips/msa/test_elm_insert_msa64.s
new file mode 100644
index 000000000000..4e99bdb4e9e5
--- /dev/null
+++ b/test/MC/Mips/msa/test_elm_insert_msa64.s
@@ -0,0 +1,5 @@
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64r2 -mattr=+msa -show-encoding | FileCheck %s
+#
+# CHECK: insert.d $w1[1], $sp # encoding: [0x79,0x39,0xe8,0x59]
+
+ insert.d $w1[1], $sp
diff --git a/test/MC/Mips/msa/test_elm_insve.s b/test/MC/Mips/msa/test_elm_insve.s
index d63d687ddfe6..0053322dc7c9 100644
--- a/test/MC/Mips/msa/test_elm_insve.s
+++ b/test/MC/Mips/msa/test_elm_insve.s
@@ -1,17 +1,10 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: insve.b $w25[3], $w9[0] # encoding: [0x79,0x43,0x4e,0x59]
# CHECK: insve.h $w24[2], $w2[0] # encoding: [0x79,0x62,0x16,0x19]
# CHECK: insve.w $w0[2], $w13[0] # encoding: [0x79,0x72,0x68,0x19]
# CHECK: insve.d $w3[0], $w18[0] # encoding: [0x79,0x78,0x90,0xd9]
-# CHECKOBJDUMP: insve.b $w25[3], $w9[0]
-# CHECKOBJDUMP: insve.h $w24[2], $w2[0]
-# CHECKOBJDUMP: insve.w $w0[2], $w13[0]
-# CHECKOBJDUMP: insve.d $w3[0], $w18[0]
-
insve.b $w25[3], $w9[0]
insve.h $w24[2], $w2[0]
insve.w $w0[2], $w13[0]
diff --git a/test/MC/Mips/msa/test_elm_msa64.s b/test/MC/Mips/msa/test_elm_msa64.s
new file mode 100644
index 000000000000..5cc9147df77a
--- /dev/null
+++ b/test/MC/Mips/msa/test_elm_msa64.s
@@ -0,0 +1,7 @@
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64r2 -mattr=+msa -show-encoding | FileCheck %s
+#
+# CHECK: copy_s.d $19, $w31[0] # encoding: [0x78,0xb8,0xfc,0xd9]
+# CHECK: copy_u.d $18, $w29[1] # encoding: [0x78,0xf9,0xec,0x99]
+
+ copy_s.d $19, $w31[0]
+ copy_u.d $18, $w29[1]
diff --git a/test/MC/Mips/msa/test_i10.s b/test/MC/Mips/msa/test_i10.s
index 828ebb539875..d89218ae105a 100644
--- a/test/MC/Mips/msa/test_i10.s
+++ b/test/MC/Mips/msa/test_i10.s
@@ -1,18 +1,10 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
-#
-
# CHECK: ldi.b $w8, 198 # encoding: [0x7b,0x06,0x32,0x07]
# CHECK: ldi.h $w20, 313 # encoding: [0x7b,0x29,0xcd,0x07]
# CHECK: ldi.w $w24, 492 # encoding: [0x7b,0x4f,0x66,0x07]
# CHECK: ldi.d $w27, -180 # encoding: [0x7b,0x7a,0x66,0xc7]
-# CHECKOBJDUMP: ldi.b $w8, 198
-# CHECKOBJDUMP: ldi.h $w20, 313
-# CHECKOBJDUMP: ldi.w $w24, 492
-# CHECKOBJDUMP: ldi.d $w27, 844
-
ldi.b $w8, 198
ldi.h $w20, 313
ldi.w $w24, 492
diff --git a/test/MC/Mips/msa/test_i5.s b/test/MC/Mips/msa/test_i5.s
index 992bfe1a2a39..d923787550e8 100644
--- a/test/MC/Mips/msa/test_i5.s
+++ b/test/MC/Mips/msa/test_i5.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: addvi.b $w3, $w31, 30 # encoding: [0x78,0x1e,0xf8,0xc6]
# CHECK: addvi.h $w24, $w13, 26 # encoding: [0x78,0x3a,0x6e,0x06]
@@ -47,51 +45,6 @@
# CHECK: subvi.w $w12, $w10, 11 # encoding: [0x78,0xcb,0x53,0x06]
# CHECK: subvi.d $w19, $w16, 7 # encoding: [0x78,0xe7,0x84,0xc6]
-# CHECKOBJDUMP: addvi.b $w3, $w31, 30
-# CHECKOBJDUMP: addvi.h $w24, $w13, 26
-# CHECKOBJDUMP: addvi.w $w26, $w20, 26
-# CHECKOBJDUMP: addvi.d $w16, $w1, 21
-# CHECKOBJDUMP: ceqi.b $w24, $w21, 24
-# CHECKOBJDUMP: ceqi.h $w31, $w15, 2
-# CHECKOBJDUMP: ceqi.w $w12, $w1, 31
-# CHECKOBJDUMP: ceqi.d $w24, $w22, 7
-# CHECKOBJDUMP: clei_s.b $w12, $w16, 1
-# CHECKOBJDUMP: clei_s.h $w2, $w10, 23
-# CHECKOBJDUMP: clei_s.w $w4, $w11, 22
-# CHECKOBJDUMP: clei_s.d $w0, $w29, 22
-# CHECKOBJDUMP: clei_u.b $w21, $w17, 3
-# CHECKOBJDUMP: clei_u.h $w29, $w7, 17
-# CHECKOBJDUMP: clei_u.w $w1, $w1, 2
-# CHECKOBJDUMP: clei_u.d $w27, $w27, 29
-# CHECKOBJDUMP: clti_s.b $w19, $w13, 25
-# CHECKOBJDUMP: clti_s.h $w15, $w10, 20
-# CHECKOBJDUMP: clti_s.w $w12, $w12, 11
-# CHECKOBJDUMP: clti_s.d $w29, $w20, 17
-# CHECKOBJDUMP: clti_u.b $w14, $w9, 29
-# CHECKOBJDUMP: clti_u.h $w24, $w25, 25
-# CHECKOBJDUMP: clti_u.w $w1, $w1, 22
-# CHECKOBJDUMP: clti_u.d $w21, $w25, 1
-# CHECKOBJDUMP: maxi_s.b $w22, $w21, 1
-# CHECKOBJDUMP: maxi_s.h $w29, $w5, 24
-# CHECKOBJDUMP: maxi_s.w $w1, $w10, 20
-# CHECKOBJDUMP: maxi_s.d $w13, $w29, 16
-# CHECKOBJDUMP: maxi_u.b $w20, $w0, 12
-# CHECKOBJDUMP: maxi_u.h $w1, $w14, 3
-# CHECKOBJDUMP: maxi_u.w $w27, $w22, 11
-# CHECKOBJDUMP: maxi_u.d $w26, $w6, 4
-# CHECKOBJDUMP: mini_s.b $w4, $w1, 1
-# CHECKOBJDUMP: mini_s.h $w27, $w27, 23
-# CHECKOBJDUMP: mini_s.w $w28, $w11, 9
-# CHECKOBJDUMP: mini_s.d $w11, $w10, 10
-# CHECKOBJDUMP: mini_u.b $w18, $w23, 27
-# CHECKOBJDUMP: mini_u.h $w7, $w26, 18
-# CHECKOBJDUMP: mini_u.w $w11, $w12, 26
-# CHECKOBJDUMP: mini_u.d $w11, $w15, 2
-# CHECKOBJDUMP: subvi.b $w24, $w20, 19
-# CHECKOBJDUMP: subvi.h $w11, $w19, 4
-# CHECKOBJDUMP: subvi.w $w12, $w10, 11
-# CHECKOBJDUMP: subvi.d $w19, $w16, 7
-
addvi.b $w3, $w31, 30
addvi.h $w24, $w13, 26
addvi.w $w26, $w20, 26
diff --git a/test/MC/Mips/msa/test_i8.s b/test/MC/Mips/msa/test_i8.s
index 2604be0bb6d6..b520bb4452e7 100644
--- a/test/MC/Mips/msa/test_i8.s
+++ b/test/MC/Mips/msa/test_i8.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: andi.b $w2, $w29, 48 # encoding: [0x78,0x30,0xe8,0x80]
# CHECK: bmnzi.b $w6, $w22, 126 # encoding: [0x78,0x7e,0xb1,0x81]
@@ -13,17 +11,6 @@
# CHECK: shf.w $w14, $w3, 93 # encoding: [0x7a,0x5d,0x1b,0x82]
# CHECK: xori.b $w16, $w10, 20 # encoding: [0x7b,0x14,0x54,0x00]
-# CHECKOBJDUMP: andi.b $w2, $w29, 48
-# CHECKOBJDUMP: bmnzi.b $w6, $w22, 126
-# CHECKOBJDUMP: bmzi.b $w27, $w1, 88
-# CHECKOBJDUMP: bseli.b $w29, $w3, 189
-# CHECKOBJDUMP: nori.b $w1, $w17, 56
-# CHECKOBJDUMP: ori.b $w26, $w20, 135
-# CHECKOBJDUMP: shf.b $w19, $w30, 105
-# CHECKOBJDUMP: shf.h $w17, $w8, 76
-# CHECKOBJDUMP: shf.w $w14, $w3, 93
-# CHECKOBJDUMP: xori.b $w16, $w10, 20
-
andi.b $w2, $w29, 48
bmnzi.b $w6, $w22, 126
bmzi.b $w27, $w1, 88
diff --git a/test/MC/Mips/msa/test_lsa.s b/test/MC/Mips/msa/test_lsa.s
index 6d1d868fc861..22fd0b3039ed 100644
--- a/test/MC/Mips/msa/test_lsa.s
+++ b/test/MC/Mips/msa/test_lsa.s
@@ -1,17 +1,10 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: lsa $8, $9, $10, 1 # encoding: [0x01,0x2a,0x40,0x05]
# CHECK: lsa $8, $9, $10, 2 # encoding: [0x01,0x2a,0x40,0x45]
# CHECK: lsa $8, $9, $10, 3 # encoding: [0x01,0x2a,0x40,0x85]
# CHECK: lsa $8, $9, $10, 4 # encoding: [0x01,0x2a,0x40,0xc5]
-# CHECKOBJDUMP: lsa $8, $9, $10, 1
-# CHECKOBJDUMP: lsa $8, $9, $10, 2
-# CHECKOBJDUMP: lsa $8, $9, $10, 3
-# CHECKOBJDUMP: lsa $8, $9, $10, 4
-
lsa $8, $9, $10, 1
lsa $8, $9, $10, 2
lsa $8, $9, $10, 3
diff --git a/test/MC/Mips/msa/test_mi10.s b/test/MC/Mips/msa/test_mi10.s
index 80257cda8516..7269960531ca 100644
--- a/test/MC/Mips/msa/test_mi10.s
+++ b/test/MC/Mips/msa/test_mi10.s
@@ -1,30 +1,55 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
-#
-# CHECK: ld.b $w2, 1($7) # encoding: [0x78,0x01,0x38,0xa0]
-# CHECK: ld.h $w16, -9($zero) # encoding: [0x7b,0xf7,0x04,0x21]
-# CHECK: ld.w $w13, -6($4) # encoding: [0x7b,0xfa,0x23,0x62]
-# CHECK: ld.d $w1, -5($16) # encoding: [0x7b,0xfb,0x80,0x63]
-# CHECK: st.b $w29, 1($14) # encoding: [0x78,0x01,0x77,0x64]
-# CHECK: st.h $w6, -1($8) # encoding: [0x7b,0xff,0x41,0xa5]
-# CHECK: st.w $w18, 8($15) # encoding: [0x78,0x08,0x7c,0xa6]
-# CHECK: st.d $w3, -14($18) # encoding: [0x7b,0xf2,0x90,0xe7]
+# CHECK: ld.b $w0, -512($1) # encoding: [0x7a,0x00,0x08,0x20]
+# CHECK: ld.b $w1, 0($2) # encoding: [0x78,0x00,0x10,0x60]
+# CHECK: ld.b $w2, 511($3) # encoding: [0x79,0xff,0x18,0xa0]
+
+# CHECK: ld.h $w3, -1024($4) # encoding: [0x7a,0x00,0x20,0xe1]
+# CHECK: ld.h $w4, -512($5) # encoding: [0x7b,0x00,0x29,0x21]
+# CHECK: ld.h $w5, 0($6) # encoding: [0x78,0x00,0x31,0x61]
+# CHECK: ld.h $w6, 512($7) # encoding: [0x79,0x00,0x39,0xa1]
+# CHECK: ld.h $w7, 1022($8) # encoding: [0x79,0xff,0x41,0xe1]
+
+# CHECK: ld.w $w8, -2048($9) # encoding: [0x7a,0x00,0x4a,0x22]
+# CHECK: ld.w $w9, -1024($10) # encoding: [0x7b,0x00,0x52,0x62]
+# CHECK: ld.w $w10, -512($11) # encoding: [0x7b,0x80,0x5a,0xa2]
+# CHECK: ld.w $w11, 512($12) # encoding: [0x78,0x80,0x62,0xe2]
+# CHECK: ld.w $w12, 1024($13) # encoding: [0x79,0x00,0x6b,0x22]
+# CHECK: ld.w $w13, 2044($14) # encoding: [0x79,0xff,0x73,0x62]
+
+# CHECK: ld.d $w14, -4096($15) # encoding: [0x7a,0x00,0x7b,0xa3]
+# CHECK: ld.d $w15, -2048($16) # encoding: [0x7b,0x00,0x83,0xe3]
+# CHECK: ld.d $w16, -1024($17) # encoding: [0x7b,0x80,0x8c,0x23]
+# CHECK: ld.d $w17, -512($18) # encoding: [0x7b,0xc0,0x94,0x63]
+# CHECK: ld.d $w18, 0($19) # encoding: [0x78,0x00,0x9c,0xa3]
+# CHECK: ld.d $w19, 512($20) # encoding: [0x78,0x40,0xa4,0xe3]
+# CHECK: ld.d $w20, 1024($21) # encoding: [0x78,0x80,0xad,0x23]
+# CHECK: ld.d $w21, 2048($22) # encoding: [0x79,0x00,0xb5,0x63]
+# CHECK: ld.d $w22, 4088($23) # encoding: [0x79,0xff,0xbd,0xa3]
+
+ ld.b $w0, -512($1)
+ ld.b $w1, 0($2)
+ ld.b $w2, 511($3)
+
+ ld.h $w3, -1024($4)
+ ld.h $w4, -512($5)
+ ld.h $w5, 0($6)
+ ld.h $w6, 512($7)
+ ld.h $w7, 1022($8)
-# CHECKOBJDUMP: ld.b $w2, 1($7)
-# CHECKOBJDUMP: ld.h $w16, -9($zero)
-# CHECKOBJDUMP: ld.w $w13, -6($4)
-# CHECKOBJDUMP: ld.d $w1, -5($16)
-# CHECKOBJDUMP: st.b $w29, 1($14)
-# CHECKOBJDUMP: st.h $w6, -1($8)
-# CHECKOBJDUMP: st.w $w18, 8($15)
-# CHECKOBJDUMP: st.d $w3, -14($18)
+ ld.w $w8, -2048($9)
+ ld.w $w9, -1024($10)
+ ld.w $w10, -512($11)
+ ld.w $w11, 512($12)
+ ld.w $w12, 1024($13)
+ ld.w $w13, 2044($14)
- ld.b $w2, 1($7)
- ld.h $w16, -9($zero)
- ld.w $w13, -6($4)
- ld.d $w1, -5($16)
- st.b $w29, 1($14)
- st.h $w6, -1($8)
- st.w $w18, 8($15)
- st.d $w3, -14($18)
+ ld.d $w14, -4096($15)
+ ld.d $w15, -2048($16)
+ ld.d $w16, -1024($17)
+ ld.d $w17, -512($18)
+ ld.d $w18, 0($19)
+ ld.d $w19, 512($20)
+ ld.d $w20, 1024($21)
+ ld.d $w21, 2048($22)
+ ld.d $w22, 4088($23)
diff --git a/test/MC/Mips/msa/test_vec.s b/test/MC/Mips/msa/test_vec.s
index 9294f3703cb7..3f989d3a5946 100644
--- a/test/MC/Mips/msa/test_vec.s
+++ b/test/MC/Mips/msa/test_vec.s
@@ -1,6 +1,4 @@
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 -mattr=+msa -arch=mips | FileCheck %s
-#
-# RUN: llvm-mc %s -triple=mipsel-unknown-linux -mcpu=mips32r2 -mattr=+msa -arch=mips -filetype=obj -o - | llvm-objdump -d -triple=mipsel-unknown-linux -mattr=+msa -arch=mips - | FileCheck %s -check-prefix=CHECKOBJDUMP
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32r2 -mattr=+msa -show-encoding | FileCheck %s
#
# CHECK: and.v $w25, $w20, $w27 # encoding: [0x78,0x1b,0xa6,0x5e]
# CHECK: bmnz.v $w17, $w6, $w7 # encoding: [0x78,0x87,0x34,0x5e]
@@ -10,14 +8,6 @@
# CHECK: or.v $w24, $w26, $w30 # encoding: [0x78,0x3e,0xd6,0x1e]
# CHECK: xor.v $w7, $w27, $w15 # encoding: [0x78,0x6f,0xd9,0xde]
-# CHECKOBJDUMP: and.v $w25, $w20, $w27
-# CHECKOBJDUMP: bmnz.v $w17, $w6, $w7
-# CHECKOBJDUMP: bmz.v $w3, $w17, $w9
-# CHECKOBJDUMP: bsel.v $w8, $w0, $w14
-# CHECKOBJDUMP: nor.v $w7, $w31, $w0
-# CHECKOBJDUMP: or.v $w24, $w26, $w30
-# CHECKOBJDUMP: xor.v $w7, $w27, $w15
-
and.v $w25, $w20, $w27
bmnz.v $w17, $w6, $w7
bmz.v $w3, $w17, $w9
diff --git a/test/MC/Mips/nabi-regs.s b/test/MC/Mips/nabi-regs.s
index 050fb8134880..d79df4e28463 100644
--- a/test/MC/Mips/nabi-regs.s
+++ b/test/MC/Mips/nabi-regs.s
@@ -3,10 +3,14 @@
# for 4 more register parameters (A registers) offsetting
# the T registers.
#
-# For now just check N64
# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding \
-# RUN: -mcpu=mips64r2 -arch=mips64 | \
-# RUN: FileCheck %s
+# RUN: -mcpu=mips64r2 -arch=mips64 | FileCheck %s
+#
+# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding \
+# RUN: -mcpu=mips64r2 -arch=mips64 -mattr=-n64,+n32 | FileCheck %s
+#
+# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding \
+# RUN: -mcpu=mips64r2 -arch=mips64 -mattr=-n64,+n64 | FileCheck %s
.text
foo:
diff --git a/test/MC/Mips/nacl-mask.s b/test/MC/Mips/nacl-mask.s
new file mode 100644
index 000000000000..22286ac7dbbc
--- /dev/null
+++ b/test/MC/Mips/nacl-mask.s
@@ -0,0 +1,319 @@
+# RUN: llvm-mc -filetype=obj -triple=mipsel-unknown-nacl %s \
+# RUN: | llvm-objdump -triple mipsel -disassemble -no-show-raw-insn - \
+# RUN: | FileCheck %s
+
+# This test tests that address-masking sandboxing is added when given assembly
+# input.
+
+
+# Test that address-masking sandboxing is added before indirect branches and
+# returns.
+
+ .align 4
+test1:
+ .set noreorder
+
+ jr $a0
+ nop
+ jr $ra
+ nop
+
+# CHECK-LABEL: test1:
+
+# CHECK: and $4, $4, $14
+# CHECK-NEXT: jr $4
+
+# Check that additional nop is inserted, to align mask and jr to the next
+# bundle.
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+
+# CHECK: and $ra, $ra, $14
+# CHECK-NEXT: jr $ra
+
+
+
+# Test that address-masking sandboxing is added before load instructions.
+
+ .align 4
+test2:
+ .set noreorder
+
+ lb $4, 0($1)
+ nop
+ lbu $4, 0($2)
+ lh $4, 0($3)
+ lhu $1, 0($4)
+ lw $4, 0($5)
+ lwc1 $f0, 0($6)
+ ldc1 $f2, 0($7)
+ ll $4, 0($8)
+ lwl $4, 0($9)
+ lwr $4, 0($10)
+
+ lw $4, 0($sp)
+ lw $4, 0($t8)
+
+# CHECK-LABEL: test2:
+
+# CHECK: and $1, $1, $15
+# CHECK-NEXT: lb $4, 0($1)
+
+# Check that additional nop is inserted, to align mask and load to the next
+# bundle.
+
+# CHECK: nop
+# CHECK: nop
+
+# CHECK: and $2, $2, $15
+# CHECK-NEXT: lbu $4, 0($2)
+
+# CHECK: and $3, $3, $15
+# CHECK-NEXT: lh $4, 0($3)
+
+# CHECK: and $4, $4, $15
+# CHECK-NEXT: lhu $1, 0($4)
+
+# CHECK: and $5, $5, $15
+# CHECK-NEXT: lw $4, 0($5)
+
+# CHECK: and $6, $6, $15
+# CHECK-NEXT: lwc1 $f0, 0($6)
+
+# CHECK: and $7, $7, $15
+# CHECK-NEXT: ldc1 $f2, 0($7)
+
+# CHECK: and $8, $8, $15
+# CHECK-NEXT: ll $4, 0($8)
+
+# CHECK: and $9, $9, $15
+# CHECK-NEXT: lwl $4, 0($9)
+
+# CHECK: and $10, $10, $15
+# CHECK-NEXT: lwr $4, 0($10)
+
+
+# Check that loads where base register is $sp or $t8 (thread pointer register)
+# are not masked.
+
+# CHECK-NOT: and
+# CHECK: lw $4, 0($sp)
+# CHECK-NOT: and
+# CHECK: lw $4, 0($24)
+
+
+
+# Test that address-masking sandboxing is added before store instructions.
+
+ .align 4
+test3:
+ .set noreorder
+
+ sb $4, 0($1)
+ nop
+ sh $4, 0($2)
+ sw $4, 0($3)
+ swc1 $f0, 0($4)
+ sdc1 $f2, 0($5)
+ swl $4, 0($6)
+ swr $4, 0($7)
+ sc $4, 0($8)
+
+ sw $4, 0($sp)
+ sw $4, 0($t8)
+
+# CHECK-LABEL: test3:
+
+# CHECK: and $1, $1, $15
+# CHECK-NEXT: sb $4, 0($1)
+
+# Check that additional nop is inserted, to align mask and store to the next
+# bundle.
+
+# CHECK: nop
+# CHECK: nop
+
+# CHECK: and $2, $2, $15
+# CHECK-NEXT: sh $4, 0($2)
+
+# CHECK: and $3, $3, $15
+# CHECK-NEXT: sw $4, 0($3)
+
+# CHECK: and $4, $4, $15
+# CHECK-NEXT: swc1 $f0, 0($4)
+
+# CHECK: and $5, $5, $15
+# CHECK-NEXT: sdc1 $f2, 0($5)
+
+# CHECK: and $6, $6, $15
+# CHECK-NEXT: swl $4, 0($6)
+
+# CHECK: and $7, $7, $15
+# CHECK-NEXT: swr $4, 0($7)
+
+# CHECK: and $8, $8, $15
+# CHECK-NEXT: sc $4, 0($8)
+
+
+# Check that stores where base register is $sp or $t8 (thread pointer register)
+# are not masked.
+
+# CHECK-NOT: and
+# CHECK: sw $4, 0($sp)
+# CHECK-NOT: and
+# CHECK: sw $4, 0($24)
+
+
+
+# Test that address-masking sandboxing is added after instructions that change
+# stack pointer.
+
+ .align 4
+test4:
+ .set noreorder
+
+ addiu $sp, $sp, 24
+ nop
+ addu $sp, $sp, $1
+ lw $sp, 0($2)
+ lw $sp, 123($sp)
+ sw $sp, 123($sp)
+
+# CHECK-LABEL: test4:
+
+# CHECK: addiu $sp, $sp, 24
+# CHECK-NEXT: and $sp, $sp, $15
+
+# Check that additional nop is inserted, to align instruction and mask to the
+# next bundle.
+
+# CHECK: nop
+# CHECK: nop
+
+# CHECK: addu $sp, $sp, $1
+# CHECK-NEXT: and $sp, $sp, $15
+
+# Since we next check sandboxing sequence which consists of 3 instructions,
+# check that 2 additional nops are inserted, to align it to the next bundle.
+
+# CHECK: nop
+# CHECK: nop
+
+
+# Check that for instructions that change stack-pointer and load from memory
+# masks are added before and after the instruction.
+
+# CHECK: and $2, $2, $15
+# CHECK-NEXT: lw $sp, 0($2)
+# CHECK-NEXT: and $sp, $sp, $15
+
+# For loads where $sp is destination and base, check that mask is added after
+# but not before.
+
+# CHECK-NOT: and
+# CHECK: lw $sp, 123($sp)
+# CHECK-NEXT: and $sp, $sp, $15
+
+# For stores where $sp is destination and base, check that mask is added neither
+# before nor after.
+
+# CHECK-NOT: and
+# CHECK: sw $sp, 123($sp)
+# CHECK-NOT: and
+
+
+
+# Test that call + branch delay is aligned at bundle end. Test that mask is
+# added before indirect calls.
+
+ .align 4
+test5:
+ .set noreorder
+
+ jal func1
+ addiu $4, $zero, 1
+
+ nop
+ bal func2
+ addiu $4, $zero, 2
+
+ nop
+ nop
+ bltzal $t1, func3
+ addiu $4, $zero, 3
+
+ nop
+ nop
+ nop
+ bgezal $t2, func4
+ addiu $4, $zero, 4
+
+ jalr $t9
+ addiu $4, $zero, 5
+
+# CHECK-LABEL: test5:
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+# CHECK-NEXT: jal
+# CHECK-NEXT: addiu $4, $zero, 1
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+# CHECK-NEXT: bal
+# CHECK-NEXT: addiu $4, $zero, 2
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+# CHECK-NEXT: bltzal
+# CHECK-NEXT: addiu $4, $zero, 3
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+# CHECK-NEXT: bgezal
+# CHECK-NEXT: addiu $4, $zero, 4
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: and $25, $25, $14
+# CHECK-NEXT: jalr $25
+# CHECK-NEXT: addiu $4, $zero, 5
+
+
+
+# Test that we can put non-dangerous loads and stores in branch delay slot.
+
+ .align 4
+test6:
+ .set noreorder
+
+ jal func1
+ sw $4, 0($sp)
+
+ bal func2
+ lw $5, 0($t8)
+
+ jalr $t9
+ sw $sp, 0($sp)
+
+# CHECK-LABEL: test6:
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+# CHECK-NEXT: jal
+# CHECK-NEXT: sw $4, 0($sp)
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: nop
+# CHECK-NEXT: bal
+# CHECK-NEXT: lw $5, 0($24)
+
+# CHECK-NEXT: nop
+# CHECK-NEXT: and $25, $25, $14
+# CHECK-NEXT: jalr
+# CHECK-NEXT: sw $sp, 0($sp)
diff --git a/test/MC/Mips/nooddspreg-cmdarg.s b/test/MC/Mips/nooddspreg-cmdarg.s
new file mode 100644
index 000000000000..52b040e96f12
--- /dev/null
+++ b/test/MC/Mips/nooddspreg-cmdarg.s
@@ -0,0 +1,42 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -mattr=+fp64,+nooddspreg | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -mattr=+fp64,+nooddspreg -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# RUN: not llvm-mc %s -arch=mips -mcpu=mips64 -mattr=-n64,+n32,+nooddspreg 2> %t0
+# RUN: FileCheck %s -check-prefix=INVALID < %t0
+#
+# RUN: not llvm-mc %s -arch=mips -mcpu=mips64 -mattr=+nooddspreg 2> %t0
+# RUN: FileCheck %s -check-prefix=INVALID < %t0
+#
+# CHECK-ASM-NOT: .module nooddspreg
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags (12)
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: 00002001 01020007 00000000 00000000 |.. .............|
+# CHECK-OBJ: 0010: 00000000 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+# INVALID: ERROR: -mno-odd-spreg requires the O32 ABI
+
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/nooddspreg-error.s b/test/MC/Mips/nooddspreg-error.s
new file mode 100644
index 000000000000..b4aabbef7280
--- /dev/null
+++ b/test/MC/Mips/nooddspreg-error.s
@@ -0,0 +1,14 @@
+# RUN: not llvm-mc %s -arch=mips -mcpu=mips32 -mattr=+fp64 2> %t0 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+# RUN: FileCheck %s -check-prefix=CHECK-ERROR < %t0
+#
+ .module nooddspreg
+# CHECK-ASM: .module nooddspreg
+
+ add.s $f1, $f2, $f5
+# CHECK-ERROR: :[[@LINE-1]]:15: error: -mno-odd-spreg prohibits the use of odd FPU registers
+# CHECK-ERROR: :[[@LINE-2]]:25: error: -mno-odd-spreg prohibits the use of odd FPU registers
+
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/nooddspreg.s b/test/MC/Mips/nooddspreg.s
new file mode 100644
index 000000000000..f268ef48297d
--- /dev/null
+++ b/test/MC/Mips/nooddspreg.s
@@ -0,0 +1,44 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -mattr=+fp64 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -mattr=+fp64 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ
+
+# RUN: not llvm-mc %s -arch=mips -mcpu=mips64 -mattr=-n64,n32 2> %t1
+# RUN: FileCheck %s -check-prefix=INVALID < %t1
+#
+# RUN: not llvm-mc %s -arch=mips -mcpu=mips64 2> %t2
+# RUN: FileCheck %s -check-prefix=INVALID < %t2
+#
+# CHECK-ASM: .module nooddspreg
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ: Section {
+# CHECK-OBJ: Index: 5
+# CHECK-OBJ-LABEL: Name: .MIPS.abiflags (12)
+# CHECK-OBJ: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ: Flags [ (0x2)
+# CHECK-OBJ: SHF_ALLOC (0x2)
+# CHECK-OBJ: ]
+# CHECK-OBJ: Address: 0x0
+# CHECK-OBJ: Size: 24
+# CHECK-OBJ: Link: 0
+# CHECK-OBJ: Info: 0
+# CHECK-OBJ: AddressAlignment: 8
+# CHECK-OBJ: EntrySize: 24
+# CHECK-OBJ: Relocations [
+# CHECK-OBJ: ]
+# CHECK-OBJ: SectionData (
+# CHECK-OBJ: 0000: 00002001 01020007 00000000 00000000 |.. .............|
+# CHECK-OBJ: 0010: 00000000 00000000 |........|
+# CHECK-OBJ: )
+# CHECK-OBJ-LABEL: }
+
+# INVALID: '.module nooddspreg' requires the O32 ABI
+
+ .module nooddspreg
+
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/octeon-instructions.s b/test/MC/Mips/octeon-instructions.s
new file mode 100644
index 000000000000..b7c89b47f871
--- /dev/null
+++ b/test/MC/Mips/octeon-instructions.s
@@ -0,0 +1,85 @@
+# RUN: llvm-mc %s -triple=mips64-unknown-linux -show-encoding -mcpu=octeon | FileCheck %s
+
+# CHECK: baddu $9, $6, $7 # encoding: [0x70,0xc7,0x48,0x28]
+# CHECK: baddu $17, $18, $19 # encoding: [0x72,0x53,0x88,0x28]
+# CHECK: baddu $2, $2, $3 # encoding: [0x70,0x43,0x10,0x28]
+# CHECK: cins $25, $10, 22, 2 # encoding: [0x71,0x59,0x15,0xb2]
+# CHECK: cins $9, $9, 17, 29 # encoding: [0x71,0x29,0xec,0x72]
+# CHECK: cins32 $15, $2, 18, 8 # encoding: [0x70,0x4f,0x44,0xb3]
+# CHECK: cins32 $22, $22, 9, 22 # encoding: [0x72,0xd6,0xb2,0x73]
+# CHECK: dmul $9, $6, $7 # encoding: [0x70,0xc7,0x48,0x03]
+# CHECK: dmul $19, $24, $25 # encoding: [0x73,0x19,0x98,0x03]
+# CHECK: dmul $9, $9, $6 # encoding: [0x71,0x26,0x48,0x03]
+# CHECK: dmul $21, $21, $25 # encoding: [0x72,0xb9,0xa8,0x03]
+# CHECK: dpop $9, $6 # encoding: [0x70,0xc0,0x48,0x2d]
+# CHECK: dpop $15, $22 # encoding: [0x72,0xc0,0x78,0x2d]
+# CHECK: dpop $12, $12 # encoding: [0x71,0x80,0x60,0x2d]
+# CHECK: exts $4, $25, 27, 15 # encoding: [0x73,0x24,0x7e,0xfa]
+# CHECK: exts $15, $15, 17, 6 # encoding: [0x71,0xef,0x34,0x7a]
+# CHECK: exts32 $4, $13, 10, 8 # encoding: [0x71,0xa4,0x42,0xbb]
+# CHECK: exts32 $15, $15, 11, 20 # encoding: [0x71,0xef,0xa2,0xfb]
+# CHECK: mtm0 $15 # encoding: [0x71,0xe0,0x00,0x08]
+# CHECK: mtm1 $16 # encoding: [0x72,0x00,0x00,0x0c]
+# CHECK: mtm2 $17 # encoding: [0x72,0x20,0x00,0x0d]
+# CHECK: mtp0 $18 # encoding: [0x72,0x40,0x00,0x09]
+# CHECK: mtp1 $19 # encoding: [0x72,0x60,0x00,0x0a]
+# CHECK: mtp2 $20 # encoding: [0x72,0x80,0x00,0x0b]
+# CHECK: pop $9, $6 # encoding: [0x70,0xc0,0x48,0x2c]
+# CHECK: pop $8, $19 # encoding: [0x72,0x60,0x40,0x2c]
+# CHECK: pop $2, $2 # encoding: [0x70,0x40,0x10,0x2c]
+# CHECK: seq $25, $23, $24 # encoding: [0x72,0xf8,0xc8,0x2a]
+# CHECK: seq $6, $6, $24 # encoding: [0x70,0xd8,0x30,0x2a]
+# CHECK: seqi $17, $15, -512 # encoding: [0x71,0xf1,0x80,0x2e]
+# CHECK: seqi $16, $16, 38 # encoding: [0x72,0x10,0x09,0xae]
+# CHECK: sne $25, $23, $24 # encoding: [0x72,0xf8,0xc8,0x2b]
+# CHECK: sne $23, $23, $20 # encoding: [0x72,0xf4,0xb8,0x2b]
+# CHECK: snei $4, $16, -313 # encoding: [0x72,0x04,0xb1,0xef]
+# CHECK: snei $26, $26, 511 # encoding: [0x73,0x5a,0x7f,0xef]
+# CHECK: v3mulu $21, $10, $21 # encoding: [0x71,0x55,0xa8,0x11]
+# CHECK: v3mulu $20, $20, $10 # encoding: [0x72,0x8a,0xa0,0x11]
+# CHECK: vmm0 $3, $19, $16 # encoding: [0x72,0x70,0x18,0x10]
+# CHECK: vmm0 $ra, $ra, $9 # encoding: [0x73,0xe9,0xf8,0x10]
+# CHECK: vmulu $sp, $10, $17 # encoding: [0x71,0x51,0xe8,0x0f]
+# CHECK: vmulu $27, $27, $6 # encoding: [0x73,0x66,0xd8,0x0f]
+
+ baddu $9, $6, $7
+ baddu $17, $18, $19
+ baddu $2, $3
+ cins $25, $10, 22, 2
+ cins $9, 17, 29
+ cins32 $15, $2, 18, 8
+ cins32 $22, 9, 22
+ dmul $9, $6, $7
+ dmul $19, $24, $25
+ dmul $9, $6
+ dmul $21, $25
+ dpop $9, $6
+ dpop $15, $22
+ dpop $12
+ exts $4, $25, 27, 15
+ exts $15, 17, 6
+ exts32 $4, $13, 10, 8
+ exts32 $15, 11, 20
+ mtm0 $15
+ mtm1 $16
+ mtm2 $17
+ mtp0 $18
+ mtp1 $19
+ mtp2 $20
+ pop $9, $6
+ pop $8, $19
+ pop $2
+ seq $25, $23, $24
+ seq $6, $24
+ seqi $17, $15, -512
+ seqi $16, 38
+ sne $25, $23, $24
+ sne $23, $20
+ snei $4, $16, -313
+ snei $26, 511
+ v3mulu $21, $10, $21
+ v3mulu $20, $10
+ vmm0 $3, $19, $16
+ vmm0 $31, $9
+ vmulu $29, $10, $17
+ vmulu $27, $6
diff --git a/test/MC/Mips/oddspreg.s b/test/MC/Mips/oddspreg.s
new file mode 100644
index 000000000000..32ba9e0a3f93
--- /dev/null
+++ b/test/MC/Mips/oddspreg.s
@@ -0,0 +1,69 @@
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -mattr=+fp64 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips -mcpu=mips32 -mattr=+fp64 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ-ALL -check-prefix=CHECK-OBJ-O32
+#
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64 -mattr=-n64,+n32 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64 -mattr=-n64,+n32 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ-ALL -check-prefix=CHECK-OBJ-N32
+
+# RUN: llvm-mc %s -arch=mips64 -mcpu=mips64 | \
+# RUN: FileCheck %s -check-prefix=CHECK-ASM
+#
+# Repeat the -filetype=obj tests but this time use an empty assembly file. The
+# output should be unchanged.
+# RUN: llvm-mc /dev/null -arch=mips64 -mcpu=mips64 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ-ALL -check-prefix=CHECK-OBJ-N64
+
+# RUN: llvm-mc /dev/null -arch=mips -mcpu=mips32 -mattr=+fp64 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ-ALL -check-prefix=CHECK-OBJ-O32
+#
+# RUN: llvm-mc /dev/null -arch=mips64 -mcpu=mips64 -mattr=-n64,+n32 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ-ALL -check-prefix=CHECK-OBJ-N32
+
+# RUN: llvm-mc /dev/null -arch=mips64 -mcpu=mips64 -filetype=obj -o - | \
+# RUN: llvm-readobj -sections -section-data -section-relocations - | \
+# RUN: FileCheck %s -check-prefix=CHECK-OBJ-ALL -check-prefix=CHECK-OBJ-N64
+
+# CHECK-ASM: .module oddspreg
+
+# Checking if the Mips.abiflags were correctly emitted.
+# CHECK-OBJ-ALL: Section {
+# CHECK-OBJ-ALL: Index: 5
+# CHECK-OBJ-ALL-LABEL: Name: .MIPS.abiflags ({{[0-9]+}})
+# CHECK-OBJ-ALL: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+# CHECK-OBJ-ALL: Flags [ (0x2)
+# CHECK-OBJ-ALL: SHF_ALLOC (0x2)
+# CHECK-OBJ-ALL: ]
+# CHECK-OBJ-ALL: Address: 0x0
+# CHECK-OBJ-ALL: Size: 24
+# CHECK-OBJ-ALL: Link: 0
+# CHECK-OBJ-ALL: Info: 0
+# CHECK-OBJ-ALL: AddressAlignment: 8
+# CHECK-OBJ-ALL: EntrySize: 24
+# CHECK-OBJ-ALL: Relocations [
+# CHECK-OBJ-ALL: ]
+# CHECK-OBJ-ALL: SectionData (
+# CHECK-OBJ-O32: 0000: 00002001 01020006 00000000 00000000 |.. .............|
+# CHECK-OBJ-O32: 0010: 00000001 00000000 |........|
+# CHECK-OBJ-N32: 0000: 00004001 02020001 00000000 00000000 |..@.............|
+# CHECK-OBJ-N32: 0010: 00000001 00000000 |........|
+# CHECK-OBJ-N64: 0000: 00004001 02020001 00000000 00000000 |..@.............|
+# CHECK-OBJ-N64: 0010: 00000001 00000000 |........|
+# CHECK-OBJ-ALL: )
+# CHECK-OBJ-ALL-LABEL: }
+
+ .module oddspreg
+ add.s $f3, $f1, $f5
+
+# FIXME: Test should include gnu_attributes directive when implemented.
+# An explicit .gnu_attribute must be checked against the effective
+# command line options and any inconsistencies reported via a warning.
diff --git a/test/MC/Mips/r-mips-got-disp.ll b/test/MC/Mips/r-mips-got-disp.ll
deleted file mode 100644
index 7e78a46649d8..000000000000
--- a/test/MC/Mips/r-mips-got-disp.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc -march=mips64el -filetype=obj -mcpu=mips64r2 < %s -o - | llvm-readobj -r | FileCheck %s
-
-; Check that the R_MIPS_GOT_DISP relocations were created.
-
-; CHECK: Relocations [
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT_DISP
-
-@shl = global i64 1, align 8
-@.str = private unnamed_addr constant [8 x i8] c"0x%llx\0A\00", align 1
-
-define i32 @main() nounwind {
-entry:
- %0 = load i64* @shl, align 8
- %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i64 0, i64 0), i64 %0) nounwind
- ret i32 0
-}
-
-declare i32 @printf(i8* nocapture, ...) nounwind
-
diff --git a/test/MC/Mips/r-mips-got-disp.s b/test/MC/Mips/r-mips-got-disp.s
new file mode 100644
index 000000000000..3cadc2284223
--- /dev/null
+++ b/test/MC/Mips/r-mips-got-disp.s
@@ -0,0 +1,65 @@
+// RUN: llvm-mc -triple=mips64el-pc-linux -filetype=obj -mcpu=mips64r2 < %s -o - | llvm-readobj -r | FileCheck %s
+
+// Check that the R_MIPS_GOT_DISP relocations were created.
+
+// CHECK: Relocations [
+// CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT_DISP
+
+ .text
+ .abicalls
+ .section .mdebug.abi64,"",@progbits
+ .file "<stdin>"
+ .text
+ .globl main
+ .align 3
+ .type main,@function
+ .set nomips16
+ .ent main
+main: # @main
+ .frame $sp,16,$ra
+ .mask 0x00000000,0
+ .fmask 0x90000000,-4
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ daddiu $sp, $sp, -16
+ sd $ra, 8($sp) # 8-byte Folded Spill
+ sd $gp, 0($sp) # 8-byte Folded Spill
+ lui $1, %hi(%neg(%gp_rel(main)))
+ daddu $1, $1, $25
+ daddiu $gp, $1, %lo(%neg(%gp_rel(main)))
+ ld $1, %got_disp(shl)($gp)
+ ld $5, 0($1)
+ ld $1, %got_page($.str)($gp)
+ ld $25, %call16(printf)($gp)
+ jalr $25
+ daddiu $4, $1, %got_ofst($.str)
+ addiu $2, $zero, 0
+ ld $gp, 0($sp) # 8-byte Folded Reload
+ ld $ra, 8($sp) # 8-byte Folded Reload
+ jr $ra
+ daddiu $sp, $sp, 16
+ .set at
+ .set macro
+ .set reorder
+ .end main
+$tmp0:
+ .size main, ($tmp0)-main
+
+ .type shl,@object # @shl
+ .data
+ .globl shl
+ .align 3
+shl:
+ .8byte 1 # 0x1
+ .size shl, 8
+
+ .type $.str,@object # @.str
+ .section .rodata.str1.1,"aMS",@progbits,1
+$.str:
+ .asciz "0x%llx\n"
+ .size $.str, 8
+
+
+ .text
diff --git a/test/MC/Mips/set-at-directive-explicit-at.s b/test/MC/Mips/set-at-directive-explicit-at.s
new file mode 100644
index 000000000000..1bd26ffa855d
--- /dev/null
+++ b/test/MC/Mips/set-at-directive-explicit-at.s
@@ -0,0 +1,42 @@
+# RUN: llvm-mc %s -triple=mipsel-unknown-linux -show-encoding -mcpu=mips32r2 \
+# RUN: 2>%t1 | FileCheck %s
+# RUN: FileCheck -check-prefix=WARNINGS %s < %t1
+# Check that the assembler can handle the documented syntax
+# for ".set at" and set the correct value. The correct value for $at is always
+# $1 when written by the user.
+ .text
+foo:
+# CHECK: jr $1 # encoding: [0x08,0x00,0x20,0x00]
+# WARNINGS: :[[@LINE+2]]:11: warning: Used $at without ".set noat"
+ .set at=$1
+ jr $at
+
+# CHECK: jr $1 # encoding: [0x08,0x00,0x20,0x00]
+# WARNINGS: :[[@LINE+2]]:11: warning: Used $at without ".set noat"
+ .set at=$1
+ jr $1
+# WARNINGS-NOT: warning: Used $at without ".set noat"
+
+# CHECK: jr $1 # encoding: [0x08,0x00,0x20,0x00]
+ .set at=$2
+ jr $at
+# CHECK: jr $1 # encoding: [0x08,0x00,0x20,0x00]
+ .set at=$3
+ jr $at
+# CHECK: jr $1 # encoding: [0x08,0x00,0x20,0x00]
+ .set noat
+ jr $at
+# CHECK: jr $1 # encoding: [0x08,0x00,0x20,0x00]
+ .set at=$0
+ jr $at
+
+# CHECK: jr $16 # encoding: [0x08,0x00,0x00,0x02]
+# WARNINGS: :[[@LINE+2]]:11: warning: Used $16 with ".set at=$16"
+ .set at=$16
+ jr $s0
+
+# CHECK: jr $16 # encoding: [0x08,0x00,0x00,0x02]
+# WARNINGS: :[[@LINE+2]]:11: warning: Used $16 with ".set at=$16"
+ .set at=$16
+ jr $16
+# WARNINGS-NOT: warning
diff --git a/test/MC/Mips/set-at-directive.s b/test/MC/Mips/set-at-directive.s
index 828175a223a7..7e93f765ac79 100644
--- a/test/MC/Mips/set-at-directive.s
+++ b/test/MC/Mips/set-at-directive.s
@@ -2,130 +2,160 @@
# RUN: FileCheck %s
# Check that the assembler can handle the documented syntax
# for ".set at" and set the correct value.
-
.text
foo:
-# CHECK: jr $1 # encoding: [0x08,0x00,0x20,0x00]
+# CHECK: lui $1, 1
+# CHECK: addu $1, $1, $2
+# CHECK: lw $2, 0($1)
.set at=$1
- jr $at
- nop
-# CHECK: jr $2 # encoding: [0x08,0x00,0x40,0x00]
+ lw $2, 65536($2)
+# CHECK: lui $2, 1
+# CHECK: addu $2, $2, $1
+# CHECK: lw $1, 0($2)
.set at=$2
- jr $at
- nop
-# CHECK: jr $3 # encoding: [0x08,0x00,0x60,0x00]
+ lw $1, 65536($1)
+# CHECK: lui $3, 1
+# CHECK: addu $3, $3, $1
+# CHECK: lw $1, 0($3)
.set at=$3
- jr $at
- nop
-# CHECK: jr $4 # encoding: [0x08,0x00,0x80,0x00]
+ lw $1, 65536($1)
+# CHECK: lui $4, 1
+# CHECK: addu $4, $4, $1
+# CHECK: lw $1, 0($4)
.set at=$a0
- jr $at
- nop
-# CHECK: jr $5 # encoding: [0x08,0x00,0xa0,0x00]
+ lw $1, 65536($1)
+# CHECK: lui $5, 1
+# CHECK: addu $5, $5, $1
+# CHECK: lw $1, 0($5)
.set at=$a1
- jr $at
- nop
-# CHECK: jr $6 # encoding: [0x08,0x00,0xc0,0x00]
+ lw $1, 65536($1)
+# CHECK: lui $6, 1
+# CHECK: addu $6, $6, $1
+# CHECK: lw $1, 0($6)
.set at=$a2
- jr $at
- nop
-# CHECK: jr $7 # encoding: [0x08,0x00,0xe0,0x00]
+ lw $1, 65536($1)
+# CHECK: lui $7, 1
+# CHECK: addu $7, $7, $1
+# CHECK: lw $1, 0($7)
.set at=$a3
- jr $at
- nop
-# CHECK: jr $8 # encoding: [0x08,0x00,0x00,0x01]
+ lw $1, 65536($1)
+# CHECK: lui $8, 1
+# CHECK: addu $8, $8, $1
+# CHECK: lw $1, 0($8)
.set at=$8
- jr $at
- nop
-# CHECK: jr $9 # encoding: [0x08,0x00,0x20,0x01]
+ lw $1, 65536($1)
+# CHECK: lui $9, 1
+# CHECK: addu $9, $9, $1
+# CHECK: lw $1, 0($9)
.set at=$9
- jr $at
- nop
-# CHECK: jr $10 # encoding: [0x08,0x00,0x40,0x01]
+ lw $1, 65536($1)
+# CHECK: lui $10, 1
+# CHECK: addu $10, $10, $1
+# CHECK: lw $1, 0($10)
.set at=$10
- jr $at
- nop
-# CHECK: jr $11 # encoding: [0x08,0x00,0x60,0x01]
+ lw $1, 65536($1)
+# CHECK: lui $11, 1
+# CHECK: addu $11, $11, $1
+# CHECK: lw $1, 0($11)
.set at=$11
- jr $at
- nop
-# CHECK: jr $12 # encoding: [0x08,0x00,0x80,0x01]
+ lw $1, 65536($1)
+# CHECK: lui $12, 1
+# CHECK: addu $12, $12, $1
+# CHECK: lw $1, 0($12)
.set at=$12
- jr $at
- nop
-# CHECK: jr $13 # encoding: [0x08,0x00,0xa0,0x01]
+ lw $1, 65536($1)
+# CHECK: lui $13, 1
+# CHECK: addu $13, $13, $1
+# CHECK: lw $1, 0($13)
.set at=$13
- jr $at
- nop
-# CHECK: jr $14 # encoding: [0x08,0x00,0xc0,0x01]
+ lw $1, 65536($1)
+# CHECK: lui $14, 1
+# CHECK: addu $14, $14, $1
+# CHECK: lw $1, 0($14)
.set at=$14
- jr $at
- nop
-# CHECK: jr $15 # encoding: [0x08,0x00,0xe0,0x01]
+ lw $1, 65536($1)
+# CHECK: lui $15, 1
+# CHECK: addu $15, $15, $1
+# CHECK: lw $1, 0($15)
.set at=$15
- jr $at
- nop
-# CHECK: jr $16 # encoding: [0x08,0x00,0x00,0x02]
+ lw $1, 65536($1)
+# CHECK: lui $16, 1
+# CHECK: addu $16, $16, $1
+# CHECK: lw $1, 0($16)
.set at=$s0
- jr $at
- nop
-# CHECK: jr $17 # encoding: [0x08,0x00,0x20,0x02]
+ lw $1, 65536($1)
+# CHECK: lui $17, 1
+# CHECK: addu $17, $17, $1
+# CHECK: lw $1, 0($17)
.set at=$s1
- jr $at
- nop
-# CHECK: jr $18 # encoding: [0x08,0x00,0x40,0x02]
+ lw $1, 65536($1)
+# CHECK: lui $18, 1
+# CHECK: addu $18, $18, $1
+# CHECK: lw $1, 0($18)
.set at=$s2
- jr $at
- nop
-# CHECK: jr $19 # encoding: [0x08,0x00,0x60,0x02]
+ lw $1, 65536($1)
+# CHECK: lui $19, 1
+# CHECK: addu $19, $19, $1
+# CHECK: lw $1, 0($19)
.set at=$s3
- jr $at
- nop
-# CHECK: jr $20 # encoding: [0x08,0x00,0x80,0x02]
+ lw $1, 65536($1)
+# CHECK: lui $20, 1
+# CHECK: addu $20, $20, $1
+# CHECK: lw $1, 0($20)
.set at=$s4
- jr $at
- nop
-# CHECK: jr $21 # encoding: [0x08,0x00,0xa0,0x02]
+ lw $1, 65536($1)
+# CHECK: lui $21, 1
+# CHECK: addu $21, $21, $1
+# CHECK: lw $1, 0($21)
.set at=$s5
- jr $at
- nop
-# CHECK: jr $22 # encoding: [0x08,0x00,0xc0,0x02]
+ lw $1, 65536($1)
+# CHECK: lui $22, 1
+# CHECK: addu $22, $22, $1
+# CHECK: lw $1, 0($22)
.set at=$s6
- jr $at
- nop
-# CHECK: jr $23 # encoding: [0x08,0x00,0xe0,0x02]
+ lw $1, 65536($1)
+# CHECK: lui $23, 1
+# CHECK: addu $23, $23, $1
+# CHECK: lw $1, 0($23)
.set at=$s7
- jr $at
- nop
-# CHECK: jr $24 # encoding: [0x08,0x00,0x00,0x03]
+ lw $1, 65536($1)
+# CHECK: lui $24, 1
+# CHECK: addu $24, $24, $1
+# CHECK: lw $1, 0($24)
.set at=$24
- jr $at
- nop
-# CHECK: jr $25 # encoding: [0x08,0x00,0x20,0x03]
+ lw $1, 65536($1)
+# CHECK: lui $25, 1
+# CHECK: addu $25, $25, $1
+# CHECK: lw $1, 0($25)
.set at=$25
- jr $at
- nop
-# CHECK: jr $26 # encoding: [0x08,0x00,0x40,0x03]
+ lw $1, 65536($1)
+# CHECK: lui $26, 1
+# CHECK: addu $26, $26, $1
+# CHECK: lw $1, 0($26)
.set at=$26
- jr $at
- nop
-# CHECK: jr $27 # encoding: [0x08,0x00,0x60,0x03]
+ lw $1, 65536($1)
+# CHECK: lui $27, 1
+# CHECK: addu $27, $27, $1
+# CHECK: lw $1, 0($27)
.set at=$27
- jr $at
- nop
-# CHECK: jr $gp # encoding: [0x08,0x00,0x80,0x03]
+ lw $1, 65536($1)
+# CHECK: lui $gp, 1
+# CHECK: addu $gp, $gp, $1
+# CHECK: lw $1, 0($gp)
.set at=$gp
- jr $at
- nop
-# CHECK: jr $fp # encoding: [0x08,0x00,0xc0,0x03]
+ lw $1, 65536($1)
+# CHECK: lui $fp, 1
+# CHECK: addu $fp, $fp, $1
+# CHECK: lw $1, 0($fp)
.set at=$fp
- jr $at
- nop
-# CHECK: jr $sp # encoding: [0x08,0x00,0xa0,0x03]
+ lw $1, 65536($1)
+# CHECK: lui $sp, 1
+# CHECK: addu $sp, $sp, $1
+# CHECK: lw $1, 0($sp)
.set at=$sp
- jr $at
- nop
-# CHECK: jr $ra # encoding: [0x08,0x00,0xe0,0x03]
+ lw $1, 65536($1)
+# CHECK: lui $ra, 1
+# CHECK: addu $ra, $ra, $1
+# CHECK: lw $1, 0($ra)
.set at=$ra
- jr $at
- nop
+ lw $1, 65536($1)
diff --git a/test/MC/Mips/sym-expr.s b/test/MC/Mips/sym-expr.s
new file mode 100644
index 000000000000..efefb1d5c4a9
--- /dev/null
+++ b/test/MC/Mips/sym-expr.s
@@ -0,0 +1,14 @@
+# Check parsing symbol expressions
+
+# RUN: llvm-mc -triple=mipsel -show-inst-operands %s 2> %t0
+# RUN: FileCheck %s < %t0
+
+ .global __start
+ .ent __start
+__start:
+ nop
+loc:
+ jal __start + 0x4 # CHECK: instruction: [jal, Imm<__start+4>]
+ jal __start + (-0x10) # CHECK: instruction: [jal, Imm<__start-16>]
+ jal (__start + (-0x10)) # CHECK: instruction: [jal, Imm<__start-16>]
+ .end __start
diff --git a/test/MC/Mips/xgot.ll b/test/MC/Mips/xgot.ll
deleted file mode 100644
index cc336788aa85..000000000000
--- a/test/MC/Mips/xgot.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc -filetype=obj -mtriple mipsel-unknown-linux -mxgot %s -o - | llvm-readobj -r | FileCheck %s
-
-@.str = private unnamed_addr constant [16 x i8] c"ext_1=%d, i=%d\0A\00", align 1
-@ext_1 = external global i32
-
-define void @fill() nounwind {
-entry:
-
-; Check that the appropriate relocations were created.
-; For the xgot case we want to see R_MIPS_[GOT|CALL]_[HI|LO]16.
-
-; CHECK: Relocations [
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_HI16
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_LO16
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT_HI16
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT_LO16
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_CALL_HI16
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_CALL_LO16
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_GOT
-; CHECK: 0x{{[0-9,A-F]+}} R_MIPS_LO16
-; CHECK: ]
-
- %0 = load i32* @ext_1, align 4
- %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([16 x i8]* @.str, i32 0, i32 0), i32 %0) nounwind
- ret void
-}
-
-declare i32 @printf(i8* nocapture, ...) nounwind
-
diff --git a/test/MC/Mips/xgot.s b/test/MC/Mips/xgot.s
new file mode 100644
index 000000000000..30848066ce3a
--- /dev/null
+++ b/test/MC/Mips/xgot.s
@@ -0,0 +1,67 @@
+// RUN: llvm-mc -filetype=obj -triple mipsel-unknown-linux %s -o - | llvm-readobj -r | FileCheck %s
+
+// Check that the appropriate relocations were created.
+// For the xgot case we want to see R_MIPS_[GOT|CALL]_[HI|LO]16.
+
+// CHECK: Relocations [
+// CHECK: 0x0 R_MIPS_HI16 _gp_disp
+// CHECK: 0x4 R_MIPS_LO16 _gp_disp
+// CHECK: 0x14 R_MIPS_GOT_HI16 ext_1
+// CHECK: 0x1C R_MIPS_GOT_LO16 ext_1
+// CHECK: 0x24 R_MIPS_CALL_HI16 printf
+// CHECK: 0x2C R_MIPS_GOT16 $.str
+// CHECK: 0x30 R_MIPS_CALL_LO16 printf
+// CHECK: 0x38 R_MIPS_LO16 $.str
+// CHECK: ]
+
+ .text
+ .abicalls
+ .section .mdebug.abi32,"",@progbits
+ .file "/home/espindola/llvm/llvm/test/MC/Mips/xgot.ll"
+ .text
+ .globl fill
+ .align 2
+ .type fill,@function
+ .set nomips16
+ .ent fill
+fill: # @fill
+ .frame $sp,24,$ra
+ .mask 0x80000000,-4
+ .fmask 0x00000000,0
+ .set noreorder
+ .set nomacro
+ .set noat
+# BB#0: # %entry
+ lui $2, %hi(_gp_disp)
+ addiu $2, $2, %lo(_gp_disp)
+ addiu $sp, $sp, -24
+ sw $ra, 20($sp) # 4-byte Folded Spill
+ addu $gp, $2, $25
+ lui $1, %got_hi(ext_1)
+ addu $1, $1, $gp
+ lw $1, %got_lo(ext_1)($1)
+ lw $5, 0($1)
+ lui $1, %call_hi(printf)
+ addu $1, $1, $gp
+ lw $2, %got($.str)($gp)
+ lw $25, %call_lo(printf)($1)
+ jalr $25
+ addiu $4, $2, %lo($.str)
+ lw $ra, 20($sp) # 4-byte Folded Reload
+ jr $ra
+ addiu $sp, $sp, 24
+ .set at
+ .set macro
+ .set reorder
+ .end fill
+$tmp0:
+ .size fill, ($tmp0)-fill
+
+ .type $.str,@object # @.str
+ .section .rodata.str1.1,"aMS",@progbits,1
+$.str:
+ .asciz "ext_1=%d, i=%d\n"
+ .size $.str, 16
+
+
+ .text
diff --git a/test/MC/PowerPC/deprecated-p7.s b/test/MC/PowerPC/deprecated-p7.s
index ded992356004..21ef6d25a4ec 100644
--- a/test/MC/PowerPC/deprecated-p7.s
+++ b/test/MC/PowerPC/deprecated-p7.s
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple powerpc64-unknown-linux-gnu -mcpu=pwr7 -show-encoding < %s 2>&1 | FileCheck %s
+# RUN: llvm-mc -triple powerpc64le-unknown-linux-gnu -mcpu=pwr7 -show-encoding < %s 2>&1 | FileCheck %s
# RUN: llvm-mc -triple powerpc-unknown-linux-gnu -mcpu=601 -show-encoding < %s 2>&1 | FileCheck -check-prefix=CHECK-OLD %s
mftb 3
diff --git a/test/MC/PowerPC/lit.local.cfg b/test/MC/PowerPC/lit.local.cfg
index 193ebebcd50e..091332439b18 100644
--- a/test/MC/PowerPC/lit.local.cfg
+++ b/test/MC/PowerPC/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'PowerPC' in targets:
+if not 'PowerPC' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/PowerPC/ppc-llong.s b/test/MC/PowerPC/ppc-llong.s
index 0838e424fba6..5d92fe319679 100644
--- a/test/MC/PowerPC/ppc-llong.s
+++ b/test/MC/PowerPC/ppc-llong.s
@@ -3,6 +3,8 @@
# RUN: llvm-readobj -s -sd | FileCheck %s
# RUN: llvm-mc -triple powerpc64-unknown-unknown -filetype=obj %s | \
# RUN: llvm-readobj -s -sd | FileCheck %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -s -sd | FileCheck %s
.data
.llong 0
diff --git a/test/MC/PowerPC/ppc-machine.s b/test/MC/PowerPC/ppc-machine.s
index b8a7e3f88013..6b3bf3146cc7 100644
--- a/test/MC/PowerPC/ppc-machine.s
+++ b/test/MC/PowerPC/ppc-machine.s
@@ -1,5 +1,6 @@
# RUN: llvm-mc -triple powerpc-unknown-unknown %s
# RUN: llvm-mc -triple powerpc64-unknown-unknown %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown %s
# For now, the only thing we check is that the .machine directive
# is accepted without syntax error.
diff --git a/test/MC/PowerPC/ppc-nop.s b/test/MC/PowerPC/ppc-nop.s
index 50afae23b715..29fd8e4f7489 100644
--- a/test/MC/PowerPC/ppc-nop.s
+++ b/test/MC/PowerPC/ppc-nop.s
@@ -1,5 +1,6 @@
-# RUN: llvm-mc -filetype=obj -triple=powerpc-unknown-linux-gnu %s | llvm-readobj -s -sd - | FileCheck %s
-# RUN: llvm-mc -filetype=obj -triple=powerpc64-unknown-linux-gnu %s | llvm-readobj -s -sd - | FileCheck %s
+# RUN: llvm-mc -filetype=obj -triple=powerpc-unknown-linux-gnu %s | llvm-readobj -s -sd - | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -filetype=obj -triple=powerpc64-unknown-linux-gnu %s | llvm-readobj -s -sd - | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -filetype=obj -triple=powerpc64le-unknown-linux-gnu %s | llvm-readobj -s -sd - | FileCheck -check-prefix=CHECK-LE %s
blr
.p2align 3
@@ -8,5 +9,6 @@ blr
.byte 0x42
.p2align 2
-# CHECK: 0000: 4E800020 60000000 4E800020 42000000
+# CHECK-BE: 0000: 4E800020 60000000 4E800020 42000000
+# CHECK-LE: 0000: 2000804E 00000060 2000804E 42000000
diff --git a/test/MC/PowerPC/ppc-reloc.s b/test/MC/PowerPC/ppc-reloc.s
new file mode 100644
index 000000000000..19dd2a3cf4e8
--- /dev/null
+++ b/test/MC/PowerPC/ppc-reloc.s
@@ -0,0 +1,17 @@
+# RUN: llvm-mc -triple=powerpc-unknown-linux-gnu -filetype=obj %s | \
+# RUN: llvm-readobj -r | FileCheck %s
+ .section .text
+
+ .globl foo
+ .type foo,@function
+ .align 2
+foo:
+ bl printf@plt
+.LC1:
+ .size foo, . - foo
+
+# CHECK: Relocations [
+# CHECK-NEXT: Section (2) .rela.text {
+# CHECK-NEXT: 0x0 R_PPC_PLTREL24 printf 0x0
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
diff --git a/test/MC/PowerPC/ppc-word.s b/test/MC/PowerPC/ppc-word.s
index 773fa14bc41d..e69de54bca08 100644
--- a/test/MC/PowerPC/ppc-word.s
+++ b/test/MC/PowerPC/ppc-word.s
@@ -3,6 +3,8 @@
# RUN: llvm-readobj -s -sd | FileCheck %s
# RUN: llvm-mc -triple powerpc64-unknown-unknown -filetype=obj %s | \
# RUN: llvm-readobj -s -sd | FileCheck %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -s -sd | FileCheck %s
.data
.word 0
diff --git a/test/MC/PowerPC/ppc64-abiversion.s b/test/MC/PowerPC/ppc64-abiversion.s
new file mode 100644
index 000000000000..d2970f8c9059
--- /dev/null
+++ b/test/MC/PowerPC/ppc64-abiversion.s
@@ -0,0 +1,9 @@
+
+# RUN: llvm-mc -triple powerpc64-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -h | FileCheck %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -h | FileCheck %s
+
+ .abiversion 2
+# CHECK: Flags [ (0x2)
+
diff --git a/test/MC/PowerPC/ppc64-encoding-bookII.s b/test/MC/PowerPC/ppc64-encoding-bookII.s
index 9e68a4b3c254..99796ca61002 100644
--- a/test/MC/PowerPC/ppc64-encoding-bookII.s
+++ b/test/MC/PowerPC/ppc64-encoding-bookII.s
@@ -1,82 +1,107 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
# Cache management instruction
-# CHECK: icbi 2, 3 # encoding: [0x7c,0x02,0x1f,0xac]
- icbi 2, 3
-
-# FIXME: dcbt 2, 3, 10
-# CHECK: dcbt 2, 3 # encoding: [0x7c,0x02,0x1a,0x2c]
- dcbt 2, 3
-# FIXME: dcbtst 2, 3, 10
-# CHECK: dcbtst 2, 3 # encoding: [0x7c,0x02,0x19,0xec]
- dcbtst 2, 3
-# CHECK: dcbz 2, 3 # encoding: [0x7c,0x02,0x1f,0xec]
- dcbz 2, 3
-# CHECK: dcbst 2, 3 # encoding: [0x7c,0x02,0x18,0x6c]
- dcbst 2, 3
-# FIXME: dcbf 2, 3, 1
+# CHECK-BE: icbi 2, 3 # encoding: [0x7c,0x02,0x1f,0xac]
+# CHECK-LE: icbi 2, 3 # encoding: [0xac,0x1f,0x02,0x7c]
+ icbi 2, 3
+
+# FIXME: dcbt 2, 3, 10
+# CHECK-BE: dcbt 2, 3 # encoding: [0x7c,0x02,0x1a,0x2c]
+# CHECK-LE: dcbt 2, 3 # encoding: [0x2c,0x1a,0x02,0x7c]
+ dcbt 2, 3
+# FIXME: dcbtst 2, 3, 10
+# CHECK-BE: dcbtst 2, 3 # encoding: [0x7c,0x02,0x19,0xec]
+# CHECK-LE: dcbtst 2, 3 # encoding: [0xec,0x19,0x02,0x7c]
+ dcbtst 2, 3
+# CHECK-BE: dcbz 2, 3 # encoding: [0x7c,0x02,0x1f,0xec]
+# CHECK-LE: dcbz 2, 3 # encoding: [0xec,0x1f,0x02,0x7c]
+ dcbz 2, 3
+# CHECK-BE: dcbst 2, 3 # encoding: [0x7c,0x02,0x18,0x6c]
+# CHECK-LE: dcbst 2, 3 # encoding: [0x6c,0x18,0x02,0x7c]
+ dcbst 2, 3
+# FIXME: dcbf 2, 3, 1
# Synchronization instructions
-# CHECK: isync # encoding: [0x4c,0x00,0x01,0x2c]
- isync
-
-# FIXME: lbarx 2, 3, 4, 1
-# FIXME: lharx 2, 3, 4, 1
-# FIXME: lwarx 2, 3, 4, 1
-# FIXME: ldarx 2, 3, 4, 1
-
-# FIXME: stbcx. 2, 3, 4
-# FIXME: sthcx. 2, 3, 4
-# CHECK: stwcx. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x2d]
- stwcx. 2, 3, 4
-# CHECK: stdcx. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xad]
- stdcx. 2, 3, 4
-
-# CHECK: sync 2 # encoding: [0x7c,0x40,0x04,0xac]
- sync 2
-# CHECK: eieio # encoding: [0x7c,0x00,0x06,0xac]
- eieio
-# CHECK: wait 2 # encoding: [0x7c,0x40,0x00,0x7c]
- wait 2
+# CHECK-BE: isync # encoding: [0x4c,0x00,0x01,0x2c]
+# CHECK-LE: isync # encoding: [0x2c,0x01,0x00,0x4c]
+ isync
+
+# FIXME: lbarx 2, 3, 4, 1
+# FIXME: lharx 2, 3, 4, 1
+# FIXME: lwarx 2, 3, 4, 1
+# FIXME: ldarx 2, 3, 4, 1
+
+# FIXME: stbcx. 2, 3, 4
+# FIXME: sthcx. 2, 3, 4
+# CHECK-BE: stwcx. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x2d]
+# CHECK-LE: stwcx. 2, 3, 4 # encoding: [0x2d,0x21,0x43,0x7c]
+ stwcx. 2, 3, 4
+# CHECK-BE: stdcx. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xad]
+# CHECK-LE: stdcx. 2, 3, 4 # encoding: [0xad,0x21,0x43,0x7c]
+ stdcx. 2, 3, 4
+
+# CHECK-BE: sync 2 # encoding: [0x7c,0x40,0x04,0xac]
+# CHECK-LE: sync 2 # encoding: [0xac,0x04,0x40,0x7c]
+ sync 2
+# CHECK-BE: eieio # encoding: [0x7c,0x00,0x06,0xac]
+# CHECK-LE: eieio # encoding: [0xac,0x06,0x00,0x7c]
+ eieio
+# CHECK-BE: wait 2 # encoding: [0x7c,0x40,0x00,0x7c]
+# CHECK-LE: wait 2 # encoding: [0x7c,0x00,0x40,0x7c]
+ wait 2
# Extended mnemonics
-# CHECK: dcbf 2, 3 # encoding: [0x7c,0x02,0x18,0xac]
- dcbf 2, 3
-# FIXME: dcbfl 2, 3
-
-# FIXME: lbarx 2, 3, 4
-# FIXME: lharx 2, 3, 4
-# CHECK: lwarx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x28]
- lwarx 2, 3, 4
-# CHECK: ldarx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0xa8]
- ldarx 2, 3, 4
-
-# CHECK: sync 0 # encoding: [0x7c,0x00,0x04,0xac]
- sync
-# CHECK: sync 0 # encoding: [0x7c,0x00,0x04,0xac]
- msync
-# CHECK: sync 1 # encoding: [0x7c,0x20,0x04,0xac]
- lwsync
-# CHECK: sync 2 # encoding: [0x7c,0x40,0x04,0xac]
- ptesync
-
-# CHECK: wait 0 # encoding: [0x7c,0x00,0x00,0x7c]
- wait
-# CHECK: wait 1 # encoding: [0x7c,0x20,0x00,0x7c]
- waitrsv
-# CHECK: wait 2 # encoding: [0x7c,0x40,0x00,0x7c]
- waitimpl
+# CHECK-BE: dcbf 2, 3 # encoding: [0x7c,0x02,0x18,0xac]
+# CHECK-LE: dcbf 2, 3 # encoding: [0xac,0x18,0x02,0x7c]
+ dcbf 2, 3
+# FIXME: dcbfl 2, 3
+
+# FIXME: lbarx 2, 3, 4
+# FIXME: lharx 2, 3, 4
+# CHECK-BE: lwarx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x28]
+# CHECK-LE: lwarx 2, 3, 4 # encoding: [0x28,0x20,0x43,0x7c]
+ lwarx 2, 3, 4
+# CHECK-BE: ldarx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0xa8]
+# CHECK-LE: ldarx 2, 3, 4 # encoding: [0xa8,0x20,0x43,0x7c]
+ ldarx 2, 3, 4
+
+# CHECK-BE: sync 0 # encoding: [0x7c,0x00,0x04,0xac]
+# CHECK-LE: sync 0 # encoding: [0xac,0x04,0x00,0x7c]
+ sync
+# CHECK-BE: sync 0 # encoding: [0x7c,0x00,0x04,0xac]
+# CHECK-LE: sync 0 # encoding: [0xac,0x04,0x00,0x7c]
+ msync
+# CHECK-BE: sync 1 # encoding: [0x7c,0x20,0x04,0xac]
+# CHECK-LE: sync 1 # encoding: [0xac,0x04,0x20,0x7c]
+ lwsync
+# CHECK-BE: sync 2 # encoding: [0x7c,0x40,0x04,0xac]
+# CHECK-LE: sync 2 # encoding: [0xac,0x04,0x40,0x7c]
+ ptesync
+
+# CHECK-BE: wait 0 # encoding: [0x7c,0x00,0x00,0x7c]
+# CHECK-LE: wait 0 # encoding: [0x7c,0x00,0x00,0x7c]
+ wait
+# CHECK-BE: wait 1 # encoding: [0x7c,0x20,0x00,0x7c]
+# CHECK-LE: wait 1 # encoding: [0x7c,0x00,0x20,0x7c]
+ waitrsv
+# CHECK-BE: wait 2 # encoding: [0x7c,0x40,0x00,0x7c]
+# CHECK-LE: wait 2 # encoding: [0x7c,0x00,0x40,0x7c]
+ waitimpl
# Time base instructions
-# CHECK: mftb 2, 123 # encoding: [0x7c,0x5b,0x1a,0xe6]
- mftb 2, 123
-# CHECK: mftb 2, 268 # encoding: [0x7c,0x4c,0x42,0xe6]
- mftb 2
-# CHECK: mftb 2, 269 # encoding: [0x7c,0x4d,0x42,0xe6]
- mftbu 2
+# CHECK-BE: mftb 2, 123 # encoding: [0x7c,0x5b,0x1a,0xe6]
+# CHECK-LE: mftb 2, 123 # encoding: [0xe6,0x1a,0x5b,0x7c]
+ mftb 2, 123
+# CHECK-BE: mftb 2, 268 # encoding: [0x7c,0x4c,0x42,0xe6]
+# CHECK-LE: mftb 2, 268 # encoding: [0xe6,0x42,0x4c,0x7c]
+ mftb 2
+# CHECK-BE: mftb 2, 269 # encoding: [0x7c,0x4d,0x42,0xe6]
+# CHECK-LE: mftb 2, 269 # encoding: [0xe6,0x42,0x4d,0x7c]
+ mftbu 2
diff --git a/test/MC/PowerPC/ppc64-encoding-bookIII.s b/test/MC/PowerPC/ppc64-encoding-bookIII.s
index 318c30b04d4a..dfce39536e39 100644
--- a/test/MC/PowerPC/ppc64-encoding-bookIII.s
+++ b/test/MC/PowerPC/ppc64-encoding-bookIII.s
@@ -1,107 +1,143 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
-# CHECK: mtmsr 4, 0 # encoding: [0x7c,0x80,0x01,0x24]
- mtmsr %r4
+# CHECK-BE: mtmsr 4, 0 # encoding: [0x7c,0x80,0x01,0x24]
+# CHECK-LE: mtmsr 4, 0 # encoding: [0x24,0x01,0x80,0x7c]
+ mtmsr %r4
-# CHECK: mtmsr 4, 1 # encoding: [0x7c,0x81,0x01,0x24]
- mtmsr %r4, 1
+# CHECK-BE: mtmsr 4, 1 # encoding: [0x7c,0x81,0x01,0x24]
+# CHECK-LE: mtmsr 4, 1 # encoding: [0x24,0x01,0x81,0x7c]
+ mtmsr %r4, 1
-# CHECK: mfmsr 4 # encoding: [0x7c,0x80,0x00,0xa6]
- mfmsr %r4
+# CHECK-BE: mfmsr 4 # encoding: [0x7c,0x80,0x00,0xa6]
+# CHECK-LE: mfmsr 4 # encoding: [0xa6,0x00,0x80,0x7c]
+ mfmsr %r4
-# CHECK: mtmsrd 4, 0 # encoding: [0x7c,0x80,0x01,0x64]
- mtmsrd %r4
+# CHECK-BE: mtmsrd 4, 0 # encoding: [0x7c,0x80,0x01,0x64]
+# CHECK-LE: mtmsrd 4, 0 # encoding: [0x64,0x01,0x80,0x7c]
+ mtmsrd %r4
-# CHECK: mtmsrd 4, 1 # encoding: [0x7c,0x81,0x01,0x64]
- mtmsrd %r4, 1
+# CHECK-BE: mtmsrd 4, 1 # encoding: [0x7c,0x81,0x01,0x64]
+# CHECK-LE: mtmsrd 4, 1 # encoding: [0x64,0x01,0x81,0x7c]
+ mtmsrd %r4, 1
-# CHECK: mfspr 4, 272 # encoding: [0x7c,0x90,0x42,0xa6]
- mfsprg %r4, 0
+# CHECK-BE: mfspr 4, 272 # encoding: [0x7c,0x90,0x42,0xa6]
+# CHECK-LE: mfspr 4, 272 # encoding: [0xa6,0x42,0x90,0x7c]
+ mfsprg %r4, 0
-# CHECK: mfspr 4, 273 # encoding: [0x7c,0x91,0x42,0xa6]
- mfsprg %r4, 1
+# CHECK-BE: mfspr 4, 273 # encoding: [0x7c,0x91,0x42,0xa6]
+# CHECK-LE: mfspr 4, 273 # encoding: [0xa6,0x42,0x91,0x7c]
+ mfsprg %r4, 1
-# CHECK: mfspr 4, 274 # encoding: [0x7c,0x92,0x42,0xa6]
- mfsprg %r4, 2
+# CHECK-BE: mfspr 4, 274 # encoding: [0x7c,0x92,0x42,0xa6]
+# CHECK-LE: mfspr 4, 274 # encoding: [0xa6,0x42,0x92,0x7c]
+ mfsprg %r4, 2
-# CHECK: mfspr 4, 275 # encoding: [0x7c,0x93,0x42,0xa6]
- mfsprg %r4, 3
+# CHECK-BE: mfspr 4, 275 # encoding: [0x7c,0x93,0x42,0xa6]
+# CHECK-LE: mfspr 4, 275 # encoding: [0xa6,0x42,0x93,0x7c]
+ mfsprg %r4, 3
-# CHECK: mtspr 272, 4 # encoding: [0x7c,0x90,0x43,0xa6]
- mtsprg 0, %r4
+# CHECK-BE: mtspr 272, 4 # encoding: [0x7c,0x90,0x43,0xa6]
+# CHECK-LE: mtspr 272, 4 # encoding: [0xa6,0x43,0x90,0x7c]
+ mtsprg 0, %r4
-# CHECK: mtspr 273, 4 # encoding: [0x7c,0x91,0x43,0xa6]
- mtsprg 1, %r4
+# CHECK-BE: mtspr 273, 4 # encoding: [0x7c,0x91,0x43,0xa6]
+# CHECK-LE: mtspr 273, 4 # encoding: [0xa6,0x43,0x91,0x7c]
+ mtsprg 1, %r4
-# CHECK: mtspr 274, 4 # encoding: [0x7c,0x92,0x43,0xa6]
- mtsprg 2, %r4
+# CHECK-BE: mtspr 274, 4 # encoding: [0x7c,0x92,0x43,0xa6]
+# CHECK-LE: mtspr 274, 4 # encoding: [0xa6,0x43,0x92,0x7c]
+ mtsprg 2, %r4
-# CHECK: mtspr 275, 4 # encoding: [0x7c,0x93,0x43,0xa6]
- mtsprg 3, %r4
+# CHECK-BE: mtspr 275, 4 # encoding: [0x7c,0x93,0x43,0xa6]
+# CHECK-LE: mtspr 275, 4 # encoding: [0xa6,0x43,0x93,0x7c]
+ mtsprg 3, %r4
-# CHECK: mtspr 272, 4 # encoding: [0x7c,0x90,0x43,0xa6]
- mtsprg0 %r4
+# CHECK-BE: mtspr 272, 4 # encoding: [0x7c,0x90,0x43,0xa6]
+# CHECK-LE: mtspr 272, 4 # encoding: [0xa6,0x43,0x90,0x7c]
+ mtsprg0 %r4
-# CHECK: mtspr 273, 4 # encoding: [0x7c,0x91,0x43,0xa6]
- mtsprg1 %r4
+# CHECK-BE: mtspr 273, 4 # encoding: [0x7c,0x91,0x43,0xa6]
+# CHECK-LE: mtspr 273, 4 # encoding: [0xa6,0x43,0x91,0x7c]
+ mtsprg1 %r4
-# CHECK: mtspr 274, 4 # encoding: [0x7c,0x92,0x43,0xa6]
- mtsprg2 %r4
+# CHECK-BE: mtspr 274, 4 # encoding: [0x7c,0x92,0x43,0xa6]
+# CHECK-LE: mtspr 274, 4 # encoding: [0xa6,0x43,0x92,0x7c]
+ mtsprg2 %r4
-# CHECK: mtspr 275, 4 # encoding: [0x7c,0x93,0x43,0xa6]
- mtsprg3 %r4
+# CHECK-BE: mtspr 275, 4 # encoding: [0x7c,0x93,0x43,0xa6]
+# CHECK-LE: mtspr 275, 4 # encoding: [0xa6,0x43,0x93,0x7c]
+ mtsprg3 %r4
-# CHECK: mtspr 280, 4 # encoding: [0x7c,0x98,0x43,0xa6]
- mtasr %r4
+# CHECK-BE: mtspr 280, 4 # encoding: [0x7c,0x98,0x43,0xa6]
+# CHECK-LE: mtspr 280, 4 # encoding: [0xa6,0x43,0x98,0x7c]
+ mtasr %r4
-# CHECK: mfspr 4, 22 # encoding: [0x7c,0x96,0x02,0xa6]
- mfdec %r4
+# CHECK-BE: mfspr 4, 22 # encoding: [0x7c,0x96,0x02,0xa6]
+# CHECK-LE: mfspr 4, 22 # encoding: [0xa6,0x02,0x96,0x7c]
+ mfdec %r4
-# CHECK: mtspr 22, 4 # encoding: [0x7c,0x96,0x03,0xa6]
- mtdec %r4
+# CHECK-BE: mtspr 22, 4 # encoding: [0x7c,0x96,0x03,0xa6]
+# CHECK-LE: mtspr 22, 4 # encoding: [0xa6,0x03,0x96,0x7c]
+ mtdec %r4
-# CHECK: mfspr 4, 287 # encoding: [0x7c,0x9f,0x42,0xa6]
- mfpvr %r4
+# CHECK-BE: mfspr 4, 287 # encoding: [0x7c,0x9f,0x42,0xa6]
+# CHECK-LE: mfspr 4, 287 # encoding: [0xa6,0x42,0x9f,0x7c]
+ mfpvr %r4
-# CHECK: mfspr 4, 25 # encoding: [0x7c,0x99,0x02,0xa6]
- mfsdr1 %r4
+# CHECK-BE: mfspr 4, 25 # encoding: [0x7c,0x99,0x02,0xa6]
+# CHECK-LE: mfspr 4, 25 # encoding: [0xa6,0x02,0x99,0x7c]
+ mfsdr1 %r4
-# CHECK: mtspr 25, 4 # encoding: [0x7c,0x99,0x03,0xa6]
- mtsdr1 %r4
+# CHECK-BE: mtspr 25, 4 # encoding: [0x7c,0x99,0x03,0xa6]
+# CHECK-LE: mtspr 25, 4 # encoding: [0xa6,0x03,0x99,0x7c]
+ mtsdr1 %r4
-# CHECK: mfspr 4, 26 # encoding: [0x7c,0x9a,0x02,0xa6]
- mfsrr0 %r4
+# CHECK-BE: mfspr 4, 26 # encoding: [0x7c,0x9a,0x02,0xa6]
+# CHECK-LE: mfspr 4, 26 # encoding: [0xa6,0x02,0x9a,0x7c]
+ mfsrr0 %r4
-# CHECK: mtspr 26, 4 # encoding: [0x7c,0x9a,0x03,0xa6]
- mtsrr0 %r4
+# CHECK-BE: mtspr 26, 4 # encoding: [0x7c,0x9a,0x03,0xa6]
+# CHECK-LE: mtspr 26, 4 # encoding: [0xa6,0x03,0x9a,0x7c]
+ mtsrr0 %r4
-# CHECK: mfspr 4, 27 # encoding: [0x7c,0x9b,0x02,0xa6]
- mfsrr1 %r4
+# CHECK-BE: mfspr 4, 27 # encoding: [0x7c,0x9b,0x02,0xa6]
+# CHECK-LE: mfspr 4, 27 # encoding: [0xa6,0x02,0x9b,0x7c]
+ mfsrr1 %r4
-# CHECK: mtspr 27, 4 # encoding: [0x7c,0x9b,0x03,0xa6]
- mtsrr1 %r4
+# CHECK-BE: mtspr 27, 4 # encoding: [0x7c,0x9b,0x03,0xa6]
+# CHECK-LE: mtspr 27, 4 # encoding: [0xa6,0x03,0x9b,0x7c]
+ mtsrr1 %r4
-# CHECK: slbie 4 # encoding: [0x7c,0x00,0x23,0x64]
- slbie %r4
+# CHECK-BE: slbie 4 # encoding: [0x7c,0x00,0x23,0x64]
+# CHECK-LE: slbie 4 # encoding: [0x64,0x23,0x00,0x7c]
+ slbie %r4
-# CHECK: slbmte 4, 5 # encoding: [0x7c,0x80,0x2b,0x24]
- slbmte %r4, %r5
+# CHECK-BE: slbmte 4, 5 # encoding: [0x7c,0x80,0x2b,0x24]
+# CHECK-LE: slbmte 4, 5 # encoding: [0x24,0x2b,0x80,0x7c]
+ slbmte %r4, %r5
-# CHECK: slbmfee 4, 5 # encoding: [0x7c,0x80,0x2f,0x26]
- slbmfee %r4, %r5
+# CHECK-BE: slbmfee 4, 5 # encoding: [0x7c,0x80,0x2f,0x26]
+# CHECK-LE: slbmfee 4, 5 # encoding: [0x26,0x2f,0x80,0x7c]
+ slbmfee %r4, %r5
-# CHECK: slbia # encoding: [0x7c,0x00,0x03,0xe4]
- slbia
+# CHECK-BE: slbia # encoding: [0x7c,0x00,0x03,0xe4]
+# CHECK-LE: slbia # encoding: [0xe4,0x03,0x00,0x7c]
+ slbia
-# CHECK: tlbsync # encoding: [0x7c,0x00,0x04,0x6c]
- tlbsync
+# CHECK-BE: tlbsync # encoding: [0x7c,0x00,0x04,0x6c]
+# CHECK-LE: tlbsync # encoding: [0x6c,0x04,0x00,0x7c]
+ tlbsync
-# CHECK: tlbiel 4 # encoding: [0x7c,0x00,0x22,0x24]
- tlbiel %r4
+# CHECK-BE: tlbiel 4 # encoding: [0x7c,0x00,0x22,0x24]
+# CHECK-LE: tlbiel 4 # encoding: [0x24,0x22,0x00,0x7c]
+ tlbiel %r4
-# CHECK: tlbie 4,0 # encoding: [0x7c,0x00,0x22,0x64]
- tlbie %r4, 0
+# CHECK-BE: tlbie 4,0 # encoding: [0x7c,0x00,0x22,0x64]
+# CHECK-LE: tlbie 4,0 # encoding: [0x64,0x22,0x00,0x7c]
+ tlbie %r4, 0
-# CHECK: tlbie 4,0 # encoding: [0x7c,0x00,0x22,0x64]
- tlbie %r4
+# CHECK-BE: tlbie 4,0 # encoding: [0x7c,0x00,0x22,0x64]
+# CHECK-LE: tlbie 4,0 # encoding: [0x64,0x22,0x00,0x7c]
+ tlbie %r4
diff --git a/test/MC/PowerPC/ppc64-encoding-ext.s b/test/MC/PowerPC/ppc64-encoding-ext.s
index a9c313a0322b..2374675fae16 100644
--- a/test/MC/PowerPC/ppc64-encoding-ext.s
+++ b/test/MC/PowerPC/ppc64-encoding-ext.s
@@ -1,2227 +1,3467 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
# Condition register bit symbols
-# CHECK: beqlr 0 # encoding: [0x4d,0x82,0x00,0x20]
- beqlr cr0
-# CHECK: beqlr 1 # encoding: [0x4d,0x86,0x00,0x20]
- beqlr cr1
-# CHECK: beqlr 2 # encoding: [0x4d,0x8a,0x00,0x20]
- beqlr cr2
-# CHECK: beqlr 3 # encoding: [0x4d,0x8e,0x00,0x20]
- beqlr cr3
-# CHECK: beqlr 4 # encoding: [0x4d,0x92,0x00,0x20]
- beqlr cr4
-# CHECK: beqlr 5 # encoding: [0x4d,0x96,0x00,0x20]
- beqlr cr5
-# CHECK: beqlr 6 # encoding: [0x4d,0x9a,0x00,0x20]
- beqlr cr6
-# CHECK: beqlr 7 # encoding: [0x4d,0x9e,0x00,0x20]
- beqlr cr7
-
-# CHECK: bclr 12, 0, 0 # encoding: [0x4d,0x80,0x00,0x20]
- btlr 4*cr0+lt
-# CHECK: bclr 12, 1, 0 # encoding: [0x4d,0x81,0x00,0x20]
- btlr 4*cr0+gt
-# CHECK: bclr 12, 2, 0 # encoding: [0x4d,0x82,0x00,0x20]
- btlr 4*cr0+eq
-# CHECK: bclr 12, 3, 0 # encoding: [0x4d,0x83,0x00,0x20]
- btlr 4*cr0+so
-# CHECK: bclr 12, 3, 0 # encoding: [0x4d,0x83,0x00,0x20]
- btlr 4*cr0+un
-# CHECK: bclr 12, 4, 0 # encoding: [0x4d,0x84,0x00,0x20]
- btlr 4*cr1+lt
-# CHECK: bclr 12, 5, 0 # encoding: [0x4d,0x85,0x00,0x20]
- btlr 4*cr1+gt
-# CHECK: bclr 12, 6, 0 # encoding: [0x4d,0x86,0x00,0x20]
- btlr 4*cr1+eq
-# CHECK: bclr 12, 7, 0 # encoding: [0x4d,0x87,0x00,0x20]
- btlr 4*cr1+so
-# CHECK: bclr 12, 7, 0 # encoding: [0x4d,0x87,0x00,0x20]
- btlr 4*cr1+un
-# CHECK: bclr 12, 8, 0 # encoding: [0x4d,0x88,0x00,0x20]
- btlr 4*cr2+lt
-# CHECK: bclr 12, 9, 0 # encoding: [0x4d,0x89,0x00,0x20]
- btlr 4*cr2+gt
-# CHECK: bclr 12, 10, 0 # encoding: [0x4d,0x8a,0x00,0x20]
- btlr 4*cr2+eq
-# CHECK: bclr 12, 11, 0 # encoding: [0x4d,0x8b,0x00,0x20]
- btlr 4*cr2+so
-# CHECK: bclr 12, 11, 0 # encoding: [0x4d,0x8b,0x00,0x20]
- btlr 4*cr2+un
-# CHECK: bclr 12, 12, 0 # encoding: [0x4d,0x8c,0x00,0x20]
- btlr 4*cr3+lt
-# CHECK: bclr 12, 13, 0 # encoding: [0x4d,0x8d,0x00,0x20]
- btlr 4*cr3+gt
-# CHECK: bclr 12, 14, 0 # encoding: [0x4d,0x8e,0x00,0x20]
- btlr 4*cr3+eq
-# CHECK: bclr 12, 15, 0 # encoding: [0x4d,0x8f,0x00,0x20]
- btlr 4*cr3+so
-# CHECK: bclr 12, 15, 0 # encoding: [0x4d,0x8f,0x00,0x20]
- btlr 4*cr3+un
-# CHECK: bclr 12, 16, 0 # encoding: [0x4d,0x90,0x00,0x20]
- btlr 4*cr4+lt
-# CHECK: bclr 12, 17, 0 # encoding: [0x4d,0x91,0x00,0x20]
- btlr 4*cr4+gt
-# CHECK: bclr 12, 18, 0 # encoding: [0x4d,0x92,0x00,0x20]
- btlr 4*cr4+eq
-# CHECK: bclr 12, 19, 0 # encoding: [0x4d,0x93,0x00,0x20]
- btlr 4*cr4+so
-# CHECK: bclr 12, 19, 0 # encoding: [0x4d,0x93,0x00,0x20]
- btlr 4*cr4+un
-# CHECK: bclr 12, 20, 0 # encoding: [0x4d,0x94,0x00,0x20]
- btlr 4*cr5+lt
-# CHECK: bclr 12, 21, 0 # encoding: [0x4d,0x95,0x00,0x20]
- btlr 4*cr5+gt
-# CHECK: bclr 12, 22, 0 # encoding: [0x4d,0x96,0x00,0x20]
- btlr 4*cr5+eq
-# CHECK: bclr 12, 23, 0 # encoding: [0x4d,0x97,0x00,0x20]
- btlr 4*cr5+so
-# CHECK: bclr 12, 23, 0 # encoding: [0x4d,0x97,0x00,0x20]
- btlr 4*cr5+un
-# CHECK: bclr 12, 24, 0 # encoding: [0x4d,0x98,0x00,0x20]
- btlr 4*cr6+lt
-# CHECK: bclr 12, 25, 0 # encoding: [0x4d,0x99,0x00,0x20]
- btlr 4*cr6+gt
-# CHECK: bclr 12, 26, 0 # encoding: [0x4d,0x9a,0x00,0x20]
- btlr 4*cr6+eq
-# CHECK: bclr 12, 27, 0 # encoding: [0x4d,0x9b,0x00,0x20]
- btlr 4*cr6+so
-# CHECK: bclr 12, 27, 0 # encoding: [0x4d,0x9b,0x00,0x20]
- btlr 4*cr6+un
-# CHECK: bclr 12, 28, 0 # encoding: [0x4d,0x9c,0x00,0x20]
- btlr 4*cr7+lt
-# CHECK: bclr 12, 29, 0 # encoding: [0x4d,0x9d,0x00,0x20]
- btlr 4*cr7+gt
-# CHECK: bclr 12, 30, 0 # encoding: [0x4d,0x9e,0x00,0x20]
- btlr 4*cr7+eq
-# CHECK: bclr 12, 31, 0 # encoding: [0x4d,0x9f,0x00,0x20]
- btlr 4*cr7+so
-# CHECK: bclr 12, 31, 0 # encoding: [0x4d,0x9f,0x00,0x20]
- btlr 4*cr7+un
+# CHECK-BE: beqlr 0 # encoding: [0x4d,0x82,0x00,0x20]
+# CHECK-LE: beqlr 0 # encoding: [0x20,0x00,0x82,0x4d]
+ beqlr cr0
+# CHECK-BE: beqlr 1 # encoding: [0x4d,0x86,0x00,0x20]
+# CHECK-LE: beqlr 1 # encoding: [0x20,0x00,0x86,0x4d]
+ beqlr cr1
+# CHECK-BE: beqlr 2 # encoding: [0x4d,0x8a,0x00,0x20]
+# CHECK-LE: beqlr 2 # encoding: [0x20,0x00,0x8a,0x4d]
+ beqlr cr2
+# CHECK-BE: beqlr 3 # encoding: [0x4d,0x8e,0x00,0x20]
+# CHECK-LE: beqlr 3 # encoding: [0x20,0x00,0x8e,0x4d]
+ beqlr cr3
+# CHECK-BE: beqlr 4 # encoding: [0x4d,0x92,0x00,0x20]
+# CHECK-LE: beqlr 4 # encoding: [0x20,0x00,0x92,0x4d]
+ beqlr cr4
+# CHECK-BE: beqlr 5 # encoding: [0x4d,0x96,0x00,0x20]
+# CHECK-LE: beqlr 5 # encoding: [0x20,0x00,0x96,0x4d]
+ beqlr cr5
+# CHECK-BE: beqlr 6 # encoding: [0x4d,0x9a,0x00,0x20]
+# CHECK-LE: beqlr 6 # encoding: [0x20,0x00,0x9a,0x4d]
+ beqlr cr6
+# CHECK-BE: beqlr 7 # encoding: [0x4d,0x9e,0x00,0x20]
+# CHECK-LE: beqlr 7 # encoding: [0x20,0x00,0x9e,0x4d]
+ beqlr cr7
+
+# CHECK-BE: bclr 12, 0, 0 # encoding: [0x4d,0x80,0x00,0x20]
+# CHECK-LE: bclr 12, 0, 0 # encoding: [0x20,0x00,0x80,0x4d]
+ btlr 4*cr0+lt
+# CHECK-BE: bclr 12, 1, 0 # encoding: [0x4d,0x81,0x00,0x20]
+# CHECK-LE: bclr 12, 1, 0 # encoding: [0x20,0x00,0x81,0x4d]
+ btlr 4*cr0+gt
+# CHECK-BE: bclr 12, 2, 0 # encoding: [0x4d,0x82,0x00,0x20]
+# CHECK-LE: bclr 12, 2, 0 # encoding: [0x20,0x00,0x82,0x4d]
+ btlr 4*cr0+eq
+# CHECK-BE: bclr 12, 3, 0 # encoding: [0x4d,0x83,0x00,0x20]
+# CHECK-LE: bclr 12, 3, 0 # encoding: [0x20,0x00,0x83,0x4d]
+ btlr 4*cr0+so
+# CHECK-BE: bclr 12, 3, 0 # encoding: [0x4d,0x83,0x00,0x20]
+# CHECK-LE: bclr 12, 3, 0 # encoding: [0x20,0x00,0x83,0x4d]
+ btlr 4*cr0+un
+# CHECK-BE: bclr 12, 4, 0 # encoding: [0x4d,0x84,0x00,0x20]
+# CHECK-LE: bclr 12, 4, 0 # encoding: [0x20,0x00,0x84,0x4d]
+ btlr 4*cr1+lt
+# CHECK-BE: bclr 12, 5, 0 # encoding: [0x4d,0x85,0x00,0x20]
+# CHECK-LE: bclr 12, 5, 0 # encoding: [0x20,0x00,0x85,0x4d]
+ btlr 4*cr1+gt
+# CHECK-BE: bclr 12, 6, 0 # encoding: [0x4d,0x86,0x00,0x20]
+# CHECK-LE: bclr 12, 6, 0 # encoding: [0x20,0x00,0x86,0x4d]
+ btlr 4*cr1+eq
+# CHECK-BE: bclr 12, 7, 0 # encoding: [0x4d,0x87,0x00,0x20]
+# CHECK-LE: bclr 12, 7, 0 # encoding: [0x20,0x00,0x87,0x4d]
+ btlr 4*cr1+so
+# CHECK-BE: bclr 12, 7, 0 # encoding: [0x4d,0x87,0x00,0x20]
+# CHECK-LE: bclr 12, 7, 0 # encoding: [0x20,0x00,0x87,0x4d]
+ btlr 4*cr1+un
+# CHECK-BE: bclr 12, 8, 0 # encoding: [0x4d,0x88,0x00,0x20]
+# CHECK-LE: bclr 12, 8, 0 # encoding: [0x20,0x00,0x88,0x4d]
+ btlr 4*cr2+lt
+# CHECK-BE: bclr 12, 9, 0 # encoding: [0x4d,0x89,0x00,0x20]
+# CHECK-LE: bclr 12, 9, 0 # encoding: [0x20,0x00,0x89,0x4d]
+ btlr 4*cr2+gt
+# CHECK-BE: bclr 12, 10, 0 # encoding: [0x4d,0x8a,0x00,0x20]
+# CHECK-LE: bclr 12, 10, 0 # encoding: [0x20,0x00,0x8a,0x4d]
+ btlr 4*cr2+eq
+# CHECK-BE: bclr 12, 11, 0 # encoding: [0x4d,0x8b,0x00,0x20]
+# CHECK-LE: bclr 12, 11, 0 # encoding: [0x20,0x00,0x8b,0x4d]
+ btlr 4*cr2+so
+# CHECK-BE: bclr 12, 11, 0 # encoding: [0x4d,0x8b,0x00,0x20]
+# CHECK-LE: bclr 12, 11, 0 # encoding: [0x20,0x00,0x8b,0x4d]
+ btlr 4*cr2+un
+# CHECK-BE: bclr 12, 12, 0 # encoding: [0x4d,0x8c,0x00,0x20]
+# CHECK-LE: bclr 12, 12, 0 # encoding: [0x20,0x00,0x8c,0x4d]
+ btlr 4*cr3+lt
+# CHECK-BE: bclr 12, 13, 0 # encoding: [0x4d,0x8d,0x00,0x20]
+# CHECK-LE: bclr 12, 13, 0 # encoding: [0x20,0x00,0x8d,0x4d]
+ btlr 4*cr3+gt
+# CHECK-BE: bclr 12, 14, 0 # encoding: [0x4d,0x8e,0x00,0x20]
+# CHECK-LE: bclr 12, 14, 0 # encoding: [0x20,0x00,0x8e,0x4d]
+ btlr 4*cr3+eq
+# CHECK-BE: bclr 12, 15, 0 # encoding: [0x4d,0x8f,0x00,0x20]
+# CHECK-LE: bclr 12, 15, 0 # encoding: [0x20,0x00,0x8f,0x4d]
+ btlr 4*cr3+so
+# CHECK-BE: bclr 12, 15, 0 # encoding: [0x4d,0x8f,0x00,0x20]
+# CHECK-LE: bclr 12, 15, 0 # encoding: [0x20,0x00,0x8f,0x4d]
+ btlr 4*cr3+un
+# CHECK-BE: bclr 12, 16, 0 # encoding: [0x4d,0x90,0x00,0x20]
+# CHECK-LE: bclr 12, 16, 0 # encoding: [0x20,0x00,0x90,0x4d]
+ btlr 4*cr4+lt
+# CHECK-BE: bclr 12, 17, 0 # encoding: [0x4d,0x91,0x00,0x20]
+# CHECK-LE: bclr 12, 17, 0 # encoding: [0x20,0x00,0x91,0x4d]
+ btlr 4*cr4+gt
+# CHECK-BE: bclr 12, 18, 0 # encoding: [0x4d,0x92,0x00,0x20]
+# CHECK-LE: bclr 12, 18, 0 # encoding: [0x20,0x00,0x92,0x4d]
+ btlr 4*cr4+eq
+# CHECK-BE: bclr 12, 19, 0 # encoding: [0x4d,0x93,0x00,0x20]
+# CHECK-LE: bclr 12, 19, 0 # encoding: [0x20,0x00,0x93,0x4d]
+ btlr 4*cr4+so
+# CHECK-BE: bclr 12, 19, 0 # encoding: [0x4d,0x93,0x00,0x20]
+# CHECK-LE: bclr 12, 19, 0 # encoding: [0x20,0x00,0x93,0x4d]
+ btlr 4*cr4+un
+# CHECK-BE: bclr 12, 20, 0 # encoding: [0x4d,0x94,0x00,0x20]
+# CHECK-LE: bclr 12, 20, 0 # encoding: [0x20,0x00,0x94,0x4d]
+ btlr 4*cr5+lt
+# CHECK-BE: bclr 12, 21, 0 # encoding: [0x4d,0x95,0x00,0x20]
+# CHECK-LE: bclr 12, 21, 0 # encoding: [0x20,0x00,0x95,0x4d]
+ btlr 4*cr5+gt
+# CHECK-BE: bclr 12, 22, 0 # encoding: [0x4d,0x96,0x00,0x20]
+# CHECK-LE: bclr 12, 22, 0 # encoding: [0x20,0x00,0x96,0x4d]
+ btlr 4*cr5+eq
+# CHECK-BE: bclr 12, 23, 0 # encoding: [0x4d,0x97,0x00,0x20]
+# CHECK-LE: bclr 12, 23, 0 # encoding: [0x20,0x00,0x97,0x4d]
+ btlr 4*cr5+so
+# CHECK-BE: bclr 12, 23, 0 # encoding: [0x4d,0x97,0x00,0x20]
+# CHECK-LE: bclr 12, 23, 0 # encoding: [0x20,0x00,0x97,0x4d]
+ btlr 4*cr5+un
+# CHECK-BE: bclr 12, 24, 0 # encoding: [0x4d,0x98,0x00,0x20]
+# CHECK-LE: bclr 12, 24, 0 # encoding: [0x20,0x00,0x98,0x4d]
+ btlr 4*cr6+lt
+# CHECK-BE: bclr 12, 25, 0 # encoding: [0x4d,0x99,0x00,0x20]
+# CHECK-LE: bclr 12, 25, 0 # encoding: [0x20,0x00,0x99,0x4d]
+ btlr 4*cr6+gt
+# CHECK-BE: bclr 12, 26, 0 # encoding: [0x4d,0x9a,0x00,0x20]
+# CHECK-LE: bclr 12, 26, 0 # encoding: [0x20,0x00,0x9a,0x4d]
+ btlr 4*cr6+eq
+# CHECK-BE: bclr 12, 27, 0 # encoding: [0x4d,0x9b,0x00,0x20]
+# CHECK-LE: bclr 12, 27, 0 # encoding: [0x20,0x00,0x9b,0x4d]
+ btlr 4*cr6+so
+# CHECK-BE: bclr 12, 27, 0 # encoding: [0x4d,0x9b,0x00,0x20]
+# CHECK-LE: bclr 12, 27, 0 # encoding: [0x20,0x00,0x9b,0x4d]
+ btlr 4*cr6+un
+# CHECK-BE: bclr 12, 28, 0 # encoding: [0x4d,0x9c,0x00,0x20]
+# CHECK-LE: bclr 12, 28, 0 # encoding: [0x20,0x00,0x9c,0x4d]
+ btlr 4*cr7+lt
+# CHECK-BE: bclr 12, 29, 0 # encoding: [0x4d,0x9d,0x00,0x20]
+# CHECK-LE: bclr 12, 29, 0 # encoding: [0x20,0x00,0x9d,0x4d]
+ btlr 4*cr7+gt
+# CHECK-BE: bclr 12, 30, 0 # encoding: [0x4d,0x9e,0x00,0x20]
+# CHECK-LE: bclr 12, 30, 0 # encoding: [0x20,0x00,0x9e,0x4d]
+ btlr 4*cr7+eq
+# CHECK-BE: bclr 12, 31, 0 # encoding: [0x4d,0x9f,0x00,0x20]
+# CHECK-LE: bclr 12, 31, 0 # encoding: [0x20,0x00,0x9f,0x4d]
+ btlr 4*cr7+so
+# CHECK-BE: bclr 12, 31, 0 # encoding: [0x4d,0x9f,0x00,0x20]
+# CHECK-LE: bclr 12, 31, 0 # encoding: [0x20,0x00,0x9f,0x4d]
+ btlr 4*cr7+un
# Branch mnemonics
-# CHECK: blr # encoding: [0x4e,0x80,0x00,0x20]
- blr
-# CHECK: bctr # encoding: [0x4e,0x80,0x04,0x20]
- bctr
-# CHECK: blrl # encoding: [0x4e,0x80,0x00,0x21]
- blrl
-# CHECK: bctrl # encoding: [0x4e,0x80,0x04,0x21]
- bctrl
-
-# CHECK: bc 12, 2, target # encoding: [0x41,0x82,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bt 2, target
-# CHECK: bca 12, 2, target # encoding: [0x41,0x82,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bta 2, target
-# CHECK: bclr 12, 2, 0 # encoding: [0x4d,0x82,0x00,0x20]
- btlr 2
-# CHECK: bcctr 12, 2, 0 # encoding: [0x4d,0x82,0x04,0x20]
- btctr 2
-# CHECK: bcl 12, 2, target # encoding: [0x41,0x82,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- btl 2, target
-# CHECK: bcla 12, 2, target # encoding: [0x41,0x82,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- btla 2, target
-# CHECK: bclrl 12, 2, 0 # encoding: [0x4d,0x82,0x00,0x21]
- btlrl 2
-# CHECK: bcctrl 12, 2, 0 # encoding: [0x4d,0x82,0x04,0x21]
- btctrl 2
-
-# CHECK: bc 15, 2, target # encoding: [0x41,0xe2,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bt+ 2, target
-# CHECK: bca 15, 2, target # encoding: [0x41,0xe2,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bta+ 2, target
-# CHECK: bclr 15, 2, 0 # encoding: [0x4d,0xe2,0x00,0x20]
- btlr+ 2
-# CHECK: bcctr 15, 2, 0 # encoding: [0x4d,0xe2,0x04,0x20]
- btctr+ 2
-# CHECK: bcl 15, 2, target # encoding: [0x41,0xe2,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- btl+ 2, target
-# CHECK: bcla 15, 2, target # encoding: [0x41,0xe2,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- btla+ 2, target
-# CHECK: bclrl 15, 2, 0 # encoding: [0x4d,0xe2,0x00,0x21]
- btlrl+ 2
-# CHECK: bcctrl 15, 2, 0 # encoding: [0x4d,0xe2,0x04,0x21]
- btctrl+ 2
-
-# CHECK: bc 14, 2, target # encoding: [0x41,0xc2,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bt- 2, target
-# CHECK: bca 14, 2, target # encoding: [0x41,0xc2,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bta- 2, target
-# CHECK: bclr 14, 2, 0 # encoding: [0x4d,0xc2,0x00,0x20]
- btlr- 2
-# CHECK: bcctr 14, 2, 0 # encoding: [0x4d,0xc2,0x04,0x20]
- btctr- 2
-# CHECK: bcl 14, 2, target # encoding: [0x41,0xc2,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- btl- 2, target
-# CHECK: bcla 14, 2, target # encoding: [0x41,0xc2,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- btla- 2, target
-# CHECK: bclrl 14, 2, 0 # encoding: [0x4d,0xc2,0x00,0x21]
- btlrl- 2
-# CHECK: bcctrl 14, 2, 0 # encoding: [0x4d,0xc2,0x04,0x21]
- btctrl- 2
-
-# CHECK: bc 4, 2, target # encoding: [0x40,0x82,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bf 2, target
-# CHECK: bca 4, 2, target # encoding: [0x40,0x82,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bfa 2, target
-# CHECK: bclr 4, 2, 0 # encoding: [0x4c,0x82,0x00,0x20]
- bflr 2
-# CHECK: bcctr 4, 2, 0 # encoding: [0x4c,0x82,0x04,0x20]
- bfctr 2
-# CHECK: bcl 4, 2, target # encoding: [0x40,0x82,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bfl 2, target
-# CHECK: bcla 4, 2, target # encoding: [0x40,0x82,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bfla 2, target
-# CHECK: bclrl 4, 2, 0 # encoding: [0x4c,0x82,0x00,0x21]
- bflrl 2
-# CHECK: bcctrl 4, 2, 0 # encoding: [0x4c,0x82,0x04,0x21]
- bfctrl 2
-
-# CHECK: bc 7, 2, target # encoding: [0x40,0xe2,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bf+ 2, target
-# CHECK: bca 7, 2, target # encoding: [0x40,0xe2,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bfa+ 2, target
-# CHECK: bclr 7, 2, 0 # encoding: [0x4c,0xe2,0x00,0x20]
- bflr+ 2
-# CHECK: bcctr 7, 2, 0 # encoding: [0x4c,0xe2,0x04,0x20]
- bfctr+ 2
-# CHECK: bcl 7, 2, target # encoding: [0x40,0xe2,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bfl+ 2, target
-# CHECK: bcla 7, 2, target # encoding: [0x40,0xe2,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bfla+ 2, target
-# CHECK: bclrl 7, 2, 0 # encoding: [0x4c,0xe2,0x00,0x21]
- bflrl+ 2
-# CHECK: bcctrl 7, 2, 0 # encoding: [0x4c,0xe2,0x04,0x21]
- bfctrl+ 2
-
-# CHECK: bc 6, 2, target # encoding: [0x40,0xc2,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bf- 2, target
-# CHECK: bca 6, 2, target # encoding: [0x40,0xc2,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bfa- 2, target
-# CHECK: bclr 6, 2, 0 # encoding: [0x4c,0xc2,0x00,0x20]
- bflr- 2
-# CHECK: bcctr 6, 2, 0 # encoding: [0x4c,0xc2,0x04,0x20]
- bfctr- 2
-# CHECK: bcl 6, 2, target # encoding: [0x40,0xc2,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bfl- 2, target
-# CHECK: bcla 6, 2, target # encoding: [0x40,0xc2,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bfla- 2, target
-# CHECK: bclrl 6, 2, 0 # encoding: [0x4c,0xc2,0x00,0x21]
- bflrl- 2
-# CHECK: bcctrl 6, 2, 0 # encoding: [0x4c,0xc2,0x04,0x21]
- bfctrl- 2
-
-# CHECK: bdnz target # encoding: [0x42,0x00,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnz target
-# CHECK: bdnza target # encoding: [0x42,0x00,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnza target
-# CHECK: bdnzlr # encoding: [0x4e,0x00,0x00,0x20]
- bdnzlr
-# CHECK: bdnzl target # encoding: [0x42,0x00,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnzl target
-# CHECK: bdnzla target # encoding: [0x42,0x00,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnzla target
-# CHECK: bdnzlrl # encoding: [0x4e,0x00,0x00,0x21]
- bdnzlrl
-
-# CHECK: bdnz+ target # encoding: [0x43,0x20,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnz+ target
-# CHECK: bdnza+ target # encoding: [0x43,0x20,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnza+ target
-# CHECK: bdnzlr+ # encoding: [0x4f,0x20,0x00,0x20]
- bdnzlr+
-# CHECK: bdnzl+ target # encoding: [0x43,0x20,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnzl+ target
-# CHECK: bdnzla+ target # encoding: [0x43,0x20,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnzla+ target
-# CHECK: bdnzlrl+ # encoding: [0x4f,0x20,0x00,0x21]
- bdnzlrl+
-
-# CHECK: bdnz- target # encoding: [0x43,0x00,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnz- target
-# CHECK: bdnza- target # encoding: [0x43,0x00,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnza- target
-# CHECK: bdnzlr- # encoding: [0x4f,0x00,0x00,0x20]
- bdnzlr-
-# CHECK: bdnzl- target # encoding: [0x43,0x00,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnzl- target
-# CHECK: bdnzla- target # encoding: [0x43,0x00,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnzla- target
-# CHECK: bdnzlrl- # encoding: [0x4f,0x00,0x00,0x21]
- bdnzlrl-
-
-# CHECK: bc 8, 2, target # encoding: [0x41,0x02,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnzt 2, target
-# CHECK: bca 8, 2, target # encoding: [0x41,0x02,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnzta 2, target
-# CHECK: bclr 8, 2, 0 # encoding: [0x4d,0x02,0x00,0x20]
- bdnztlr 2
-# CHECK: bcl 8, 2, target # encoding: [0x41,0x02,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnztl 2, target
-# CHECK: bcla 8, 2, target # encoding: [0x41,0x02,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnztla 2, target
-# CHECK: bclrl 8, 2, 0 # encoding: [0x4d,0x02,0x00,0x21]
- bdnztlrl 2
-
-# CHECK: bc 0, 2, target # encoding: [0x40,0x02,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnzf 2, target
-# CHECK: bca 0, 2, target # encoding: [0x40,0x02,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnzfa 2, target
-# CHECK: bclr 0, 2, 0 # encoding: [0x4c,0x02,0x00,0x20]
- bdnzflr 2
-# CHECK: bcl 0, 2, target # encoding: [0x40,0x02,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdnzfl 2, target
-# CHECK: bcla 0, 2, target # encoding: [0x40,0x02,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdnzfla 2, target
-# CHECK: bclrl 0, 2, 0 # encoding: [0x4c,0x02,0x00,0x21]
- bdnzflrl 2
-
-# CHECK: bdz target # encoding: [0x42,0x40,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdz target
-# CHECK: bdza target # encoding: [0x42,0x40,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdza target
-# CHECK: bdzlr # encoding: [0x4e,0x40,0x00,0x20]
- bdzlr
-# CHECK: bdzl target # encoding: [0x42,0x40,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdzl target
-# CHECK: bdzla target # encoding: [0x42,0x40,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdzla target
-# CHECK: bdzlrl # encoding: [0x4e,0x40,0x00,0x21]
- bdzlrl
-
-# CHECK: bdz+ target # encoding: [0x43,0x60,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdz+ target
-# CHECK: bdza+ target # encoding: [0x43,0x60,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdza+ target
-# CHECK: bdzlr+ # encoding: [0x4f,0x60,0x00,0x20]
- bdzlr+
-# CHECK: bdzl+ target # encoding: [0x43,0x60,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdzl+ target
-# CHECK: bdzla+ target # encoding: [0x43,0x60,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdzla+ target
-# CHECK: bdzlrl+ # encoding: [0x4f,0x60,0x00,0x21]
- bdzlrl+
-
-# CHECK: bdz- target # encoding: [0x43,0x40,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdz- target
-# CHECK: bdza- target # encoding: [0x43,0x40,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdza- target
-# CHECK: bdzlr- # encoding: [0x4f,0x40,0x00,0x20]
- bdzlr-
-# CHECK: bdzl- target # encoding: [0x43,0x40,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdzl- target
-# CHECK: bdzla- target # encoding: [0x43,0x40,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdzla- target
-# CHECK: bdzlrl- # encoding: [0x4f,0x40,0x00,0x21]
- bdzlrl-
-
-# CHECK: bc 10, 2, target # encoding: [0x41,0x42,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdzt 2, target
-# CHECK: bca 10, 2, target # encoding: [0x41,0x42,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdzta 2, target
-# CHECK: bclr 10, 2, 0 # encoding: [0x4d,0x42,0x00,0x20]
- bdztlr 2
-# CHECK: bcl 10, 2, target # encoding: [0x41,0x42,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdztl 2, target
-# CHECK: bcla 10, 2, target # encoding: [0x41,0x42,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdztla 2, target
-# CHECK: bclrl 10, 2, 0 # encoding: [0x4d,0x42,0x00,0x21]
- bdztlrl 2
-
-# CHECK: bc 2, 2, target # encoding: [0x40,0x42,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdzf 2, target
-# CHECK: bca 2, 2, target # encoding: [0x40,0x42,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdzfa 2, target
-# CHECK: bclr 2, 2, 0 # encoding: [0x4c,0x42,0x00,0x20]
- bdzflr 2
-# CHECK: bcl 2, 2, target # encoding: [0x40,0x42,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bdzfl 2, target
-# CHECK: bcla 2, 2, target # encoding: [0x40,0x42,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bdzfla 2, target
-# CHECK: bclrl 2, 2, 0 # encoding: [0x4c,0x42,0x00,0x21]
- bdzflrl 2
-
-# CHECK: blt 2, target # encoding: [0x41,0x88,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blt 2, target
-# CHECK: blt 0, target # encoding: [0x41,0x80,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blt target
-# CHECK: blta 2, target # encoding: [0x41,0x88,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blta 2, target
-# CHECK: blta 0, target # encoding: [0x41,0x80,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blta target
-# CHECK: bltlr 2 # encoding: [0x4d,0x88,0x00,0x20]
- bltlr 2
-# CHECK: bltlr 0 # encoding: [0x4d,0x80,0x00,0x20]
- bltlr
-# CHECK: bltctr 2 # encoding: [0x4d,0x88,0x04,0x20]
- bltctr 2
-# CHECK: bltctr 0 # encoding: [0x4d,0x80,0x04,0x20]
- bltctr
-# CHECK: bltl 2, target # encoding: [0x41,0x88,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bltl 2, target
-# CHECK: bltl 0, target # encoding: [0x41,0x80,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bltl target
-# CHECK: bltla 2, target # encoding: [0x41,0x88,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bltla 2, target
-# CHECK: bltla 0, target # encoding: [0x41,0x80,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bltla target
-# CHECK: bltlrl 2 # encoding: [0x4d,0x88,0x00,0x21]
- bltlrl 2
-# CHECK: bltlrl 0 # encoding: [0x4d,0x80,0x00,0x21]
- bltlrl
-# CHECK: bltctrl 2 # encoding: [0x4d,0x88,0x04,0x21]
- bltctrl 2
-# CHECK: bltctrl 0 # encoding: [0x4d,0x80,0x04,0x21]
- bltctrl
-
-# CHECK: blt+ 2, target # encoding: [0x41,0xe8,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blt+ 2, target
-# CHECK: blt+ 0, target # encoding: [0x41,0xe0,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blt+ target
-# CHECK: blta+ 2, target # encoding: [0x41,0xe8,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blta+ 2, target
-# CHECK: blta+ 0, target # encoding: [0x41,0xe0,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blta+ target
-# CHECK: bltlr+ 2 # encoding: [0x4d,0xe8,0x00,0x20]
- bltlr+ 2
-# CHECK: bltlr+ 0 # encoding: [0x4d,0xe0,0x00,0x20]
- bltlr+
-# CHECK: bltctr+ 2 # encoding: [0x4d,0xe8,0x04,0x20]
- bltctr+ 2
-# CHECK: bltctr+ 0 # encoding: [0x4d,0xe0,0x04,0x20]
- bltctr+
-# CHECK: bltl+ 2, target # encoding: [0x41,0xe8,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bltl+ 2, target
-# CHECK: bltl+ 0, target # encoding: [0x41,0xe0,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bltl+ target
-# CHECK: bltla+ 2, target # encoding: [0x41,0xe8,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bltla+ 2, target
-# CHECK: bltla+ 0, target # encoding: [0x41,0xe0,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bltla+ target
-# CHECK: bltlrl+ 2 # encoding: [0x4d,0xe8,0x00,0x21]
- bltlrl+ 2
-# CHECK: bltlrl+ 0 # encoding: [0x4d,0xe0,0x00,0x21]
- bltlrl+
-# CHECK: bltctrl+ 2 # encoding: [0x4d,0xe8,0x04,0x21]
- bltctrl+ 2
-# CHECK: bltctrl+ 0 # encoding: [0x4d,0xe0,0x04,0x21]
- bltctrl+
-
-# CHECK: blt- 2, target # encoding: [0x41,0xc8,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blt- 2, target
-# CHECK: blt- 0, target # encoding: [0x41,0xc0,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blt- target
-# CHECK: blta- 2, target # encoding: [0x41,0xc8,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blta- 2, target
-# CHECK: blta- 0, target # encoding: [0x41,0xc0,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blta- target
-# CHECK: bltlr- 2 # encoding: [0x4d,0xc8,0x00,0x20]
- bltlr- 2
-# CHECK: bltlr- 0 # encoding: [0x4d,0xc0,0x00,0x20]
- bltlr-
-# CHECK: bltctr- 2 # encoding: [0x4d,0xc8,0x04,0x20]
- bltctr- 2
-# CHECK: bltctr- 0 # encoding: [0x4d,0xc0,0x04,0x20]
- bltctr-
-# CHECK: bltl- 2, target # encoding: [0x41,0xc8,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bltl- 2, target
-# CHECK: bltl- 0, target # encoding: [0x41,0xc0,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bltl- target
-# CHECK: bltla- 2, target # encoding: [0x41,0xc8,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bltla- 2, target
-# CHECK: bltla- 0, target # encoding: [0x41,0xc0,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bltla- target
-# CHECK: bltlrl- 2 # encoding: [0x4d,0xc8,0x00,0x21]
- bltlrl- 2
-# CHECK: bltlrl- 0 # encoding: [0x4d,0xc0,0x00,0x21]
- bltlrl-
-# CHECK: bltctrl- 2 # encoding: [0x4d,0xc8,0x04,0x21]
- bltctrl- 2
-# CHECK: bltctrl- 0 # encoding: [0x4d,0xc0,0x04,0x21]
- bltctrl-
-
-# CHECK: ble 2, target # encoding: [0x40,0x89,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- ble 2, target
-# CHECK: ble 0, target # encoding: [0x40,0x81,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- ble target
-# CHECK: blea 2, target # encoding: [0x40,0x89,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blea 2, target
-# CHECK: blea 0, target # encoding: [0x40,0x81,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blea target
-# CHECK: blelr 2 # encoding: [0x4c,0x89,0x00,0x20]
- blelr 2
-# CHECK: blelr 0 # encoding: [0x4c,0x81,0x00,0x20]
- blelr
-# CHECK: blectr 2 # encoding: [0x4c,0x89,0x04,0x20]
- blectr 2
-# CHECK: blectr 0 # encoding: [0x4c,0x81,0x04,0x20]
- blectr
-# CHECK: blel 2, target # encoding: [0x40,0x89,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blel 2, target
-# CHECK: blel 0, target # encoding: [0x40,0x81,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blel target
-# CHECK: blela 2, target # encoding: [0x40,0x89,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blela 2, target
-# CHECK: blela 0, target # encoding: [0x40,0x81,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blela target
-# CHECK: blelrl 2 # encoding: [0x4c,0x89,0x00,0x21]
- blelrl 2
-# CHECK: blelrl 0 # encoding: [0x4c,0x81,0x00,0x21]
- blelrl
-# CHECK: blectrl 2 # encoding: [0x4c,0x89,0x04,0x21]
- blectrl 2
-# CHECK: blectrl 0 # encoding: [0x4c,0x81,0x04,0x21]
- blectrl
-
-# CHECK: ble+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- ble+ 2, target
-# CHECK: ble+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- ble+ target
-# CHECK: blea+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blea+ 2, target
-# CHECK: blea+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blea+ target
-# CHECK: blelr+ 2 # encoding: [0x4c,0xe9,0x00,0x20]
- blelr+ 2
-# CHECK: blelr+ 0 # encoding: [0x4c,0xe1,0x00,0x20]
- blelr+
-# CHECK: blectr+ 2 # encoding: [0x4c,0xe9,0x04,0x20]
- blectr+ 2
-# CHECK: blectr+ 0 # encoding: [0x4c,0xe1,0x04,0x20]
- blectr+
-# CHECK: blel+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blel+ 2, target
-# CHECK: blel+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blel+ target
-# CHECK: blela+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blela+ 2, target
-# CHECK: blela+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blela+ target
-# CHECK: blelrl+ 2 # encoding: [0x4c,0xe9,0x00,0x21]
- blelrl+ 2
-# CHECK: blelrl+ 0 # encoding: [0x4c,0xe1,0x00,0x21]
- blelrl+
-# CHECK: blectrl+ 2 # encoding: [0x4c,0xe9,0x04,0x21]
- blectrl+ 2
-# CHECK: blectrl+ 0 # encoding: [0x4c,0xe1,0x04,0x21]
- blectrl+
-
-# CHECK: ble- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- ble- 2, target
-# CHECK: ble- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- ble- target
-# CHECK: blea- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blea- 2, target
-# CHECK: blea- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blea- target
-# CHECK: blelr- 2 # encoding: [0x4c,0xc9,0x00,0x20]
- blelr- 2
-# CHECK: blelr- 0 # encoding: [0x4c,0xc1,0x00,0x20]
- blelr-
-# CHECK: blectr- 2 # encoding: [0x4c,0xc9,0x04,0x20]
- blectr- 2
-# CHECK: blectr- 0 # encoding: [0x4c,0xc1,0x04,0x20]
- blectr-
-# CHECK: blel- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blel- 2, target
-# CHECK: blel- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- blel- target
-# CHECK: blela- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blela- 2, target
-# CHECK: blela- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- blela- target
-# CHECK: blelrl- 2 # encoding: [0x4c,0xc9,0x00,0x21]
- blelrl- 2
-# CHECK: blelrl- 0 # encoding: [0x4c,0xc1,0x00,0x21]
- blelrl-
-# CHECK: blectrl- 2 # encoding: [0x4c,0xc9,0x04,0x21]
- blectrl- 2
-# CHECK: blectrl- 0 # encoding: [0x4c,0xc1,0x04,0x21]
- blectrl-
-
-# CHECK: beq 2, target # encoding: [0x41,0x8a,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beq 2, target
-# CHECK: beq 0, target # encoding: [0x41,0x82,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beq target
-# CHECK: beqa 2, target # encoding: [0x41,0x8a,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqa 2, target
-# CHECK: beqa 0, target # encoding: [0x41,0x82,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqa target
-# CHECK: beqlr 2 # encoding: [0x4d,0x8a,0x00,0x20]
- beqlr 2
-# CHECK: beqlr 0 # encoding: [0x4d,0x82,0x00,0x20]
- beqlr
-# CHECK: beqctr 2 # encoding: [0x4d,0x8a,0x04,0x20]
- beqctr 2
-# CHECK: beqctr 0 # encoding: [0x4d,0x82,0x04,0x20]
- beqctr
-# CHECK: beql 2, target # encoding: [0x41,0x8a,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beql 2, target
-# CHECK: beql 0, target # encoding: [0x41,0x82,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beql target
-# CHECK: beqla 2, target # encoding: [0x41,0x8a,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqla 2, target
-# CHECK: beqla 0, target # encoding: [0x41,0x82,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqla target
-# CHECK: beqlrl 2 # encoding: [0x4d,0x8a,0x00,0x21]
- beqlrl 2
-# CHECK: beqlrl 0 # encoding: [0x4d,0x82,0x00,0x21]
- beqlrl
-# CHECK: beqctrl 2 # encoding: [0x4d,0x8a,0x04,0x21]
- beqctrl 2
-# CHECK: beqctrl 0 # encoding: [0x4d,0x82,0x04,0x21]
- beqctrl
-
-# CHECK: beq+ 2, target # encoding: [0x41,0xea,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beq+ 2, target
-# CHECK: beq+ 0, target # encoding: [0x41,0xe2,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beq+ target
-# CHECK: beqa+ 2, target # encoding: [0x41,0xea,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqa+ 2, target
-# CHECK: beqa+ 0, target # encoding: [0x41,0xe2,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqa+ target
-# CHECK: beqlr+ 2 # encoding: [0x4d,0xea,0x00,0x20]
- beqlr+ 2
-# CHECK: beqlr+ 0 # encoding: [0x4d,0xe2,0x00,0x20]
- beqlr+
-# CHECK: beqctr+ 2 # encoding: [0x4d,0xea,0x04,0x20]
- beqctr+ 2
-# CHECK: beqctr+ 0 # encoding: [0x4d,0xe2,0x04,0x20]
- beqctr+
-# CHECK: beql+ 2, target # encoding: [0x41,0xea,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beql+ 2, target
-# CHECK: beql+ 0, target # encoding: [0x41,0xe2,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beql+ target
-# CHECK: beqla+ 2, target # encoding: [0x41,0xea,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqla+ 2, target
-# CHECK: beqla+ 0, target # encoding: [0x41,0xe2,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqla+ target
-# CHECK: beqlrl+ 2 # encoding: [0x4d,0xea,0x00,0x21]
- beqlrl+ 2
-# CHECK: beqlrl+ 0 # encoding: [0x4d,0xe2,0x00,0x21]
- beqlrl+
-# CHECK: beqctrl+ 2 # encoding: [0x4d,0xea,0x04,0x21]
- beqctrl+ 2
-# CHECK: beqctrl+ 0 # encoding: [0x4d,0xe2,0x04,0x21]
- beqctrl+
-
-# CHECK: beq- 2, target # encoding: [0x41,0xca,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beq- 2, target
-# CHECK: beq- 0, target # encoding: [0x41,0xc2,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beq- target
-# CHECK: beqa- 2, target # encoding: [0x41,0xca,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqa- 2, target
-# CHECK: beqa- 0, target # encoding: [0x41,0xc2,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqa- target
-# CHECK: beqlr- 2 # encoding: [0x4d,0xca,0x00,0x20]
- beqlr- 2
-# CHECK: beqlr- 0 # encoding: [0x4d,0xc2,0x00,0x20]
- beqlr-
-# CHECK: beqctr- 2 # encoding: [0x4d,0xca,0x04,0x20]
- beqctr- 2
-# CHECK: beqctr- 0 # encoding: [0x4d,0xc2,0x04,0x20]
- beqctr-
-# CHECK: beql- 2, target # encoding: [0x41,0xca,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beql- 2, target
-# CHECK: beql- 0, target # encoding: [0x41,0xc2,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- beql- target
-# CHECK: beqla- 2, target # encoding: [0x41,0xca,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqla- 2, target
-# CHECK: beqla- 0, target # encoding: [0x41,0xc2,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- beqla- target
-# CHECK: beqlrl- 2 # encoding: [0x4d,0xca,0x00,0x21]
- beqlrl- 2
-# CHECK: beqlrl- 0 # encoding: [0x4d,0xc2,0x00,0x21]
- beqlrl-
-# CHECK: beqctrl- 2 # encoding: [0x4d,0xca,0x04,0x21]
- beqctrl- 2
-# CHECK: beqctrl- 0 # encoding: [0x4d,0xc2,0x04,0x21]
- beqctrl-
-
-# CHECK: bge 2, target # encoding: [0x40,0x88,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bge 2, target
-# CHECK: bge 0, target # encoding: [0x40,0x80,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bge target
-# CHECK: bgea 2, target # encoding: [0x40,0x88,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgea 2, target
-# CHECK: bgea 0, target # encoding: [0x40,0x80,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgea target
-# CHECK: bgelr 2 # encoding: [0x4c,0x88,0x00,0x20]
- bgelr 2
-# CHECK: bgelr 0 # encoding: [0x4c,0x80,0x00,0x20]
- bgelr
-# CHECK: bgectr 2 # encoding: [0x4c,0x88,0x04,0x20]
- bgectr 2
-# CHECK: bgectr 0 # encoding: [0x4c,0x80,0x04,0x20]
- bgectr
-# CHECK: bgel 2, target # encoding: [0x40,0x88,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgel 2, target
-# CHECK: bgel 0, target # encoding: [0x40,0x80,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgel target
-# CHECK: bgela 2, target # encoding: [0x40,0x88,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgela 2, target
-# CHECK: bgela 0, target # encoding: [0x40,0x80,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgela target
-# CHECK: bgelrl 2 # encoding: [0x4c,0x88,0x00,0x21]
- bgelrl 2
-# CHECK: bgelrl 0 # encoding: [0x4c,0x80,0x00,0x21]
- bgelrl
-# CHECK: bgectrl 2 # encoding: [0x4c,0x88,0x04,0x21]
- bgectrl 2
-# CHECK: bgectrl 0 # encoding: [0x4c,0x80,0x04,0x21]
- bgectrl
-
-# CHECK: bge+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bge+ 2, target
-# CHECK: bge+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bge+ target
-# CHECK: bgea+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgea+ 2, target
-# CHECK: bgea+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgea+ target
-# CHECK: bgelr+ 2 # encoding: [0x4c,0xe8,0x00,0x20]
- bgelr+ 2
-# CHECK: bgelr+ 0 # encoding: [0x4c,0xe0,0x00,0x20]
- bgelr+
-# CHECK: bgectr+ 2 # encoding: [0x4c,0xe8,0x04,0x20]
- bgectr+ 2
-# CHECK: bgectr+ 0 # encoding: [0x4c,0xe0,0x04,0x20]
- bgectr+
-# CHECK: bgel+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgel+ 2, target
-# CHECK: bgel+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgel+ target
-# CHECK: bgela+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgela+ 2, target
-# CHECK: bgela+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgela+ target
-# CHECK: bgelrl+ 2 # encoding: [0x4c,0xe8,0x00,0x21]
- bgelrl+ 2
-# CHECK: bgelrl+ 0 # encoding: [0x4c,0xe0,0x00,0x21]
- bgelrl+
-# CHECK: bgectrl+ 2 # encoding: [0x4c,0xe8,0x04,0x21]
- bgectrl+ 2
-# CHECK: bgectrl+ 0 # encoding: [0x4c,0xe0,0x04,0x21]
- bgectrl+
-
-# CHECK: bge- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bge- 2, target
-# CHECK: bge- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bge- target
-# CHECK: bgea- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgea- 2, target
-# CHECK: bgea- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgea- target
-# CHECK: bgelr- 2 # encoding: [0x4c,0xc8,0x00,0x20]
- bgelr- 2
-# CHECK: bgelr- 0 # encoding: [0x4c,0xc0,0x00,0x20]
- bgelr-
-# CHECK: bgectr- 2 # encoding: [0x4c,0xc8,0x04,0x20]
- bgectr- 2
-# CHECK: bgectr- 0 # encoding: [0x4c,0xc0,0x04,0x20]
- bgectr-
-# CHECK: bgel- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgel- 2, target
-# CHECK: bgel- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgel- target
-# CHECK: bgela- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgela- 2, target
-# CHECK: bgela- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgela- target
-# CHECK: bgelrl- 2 # encoding: [0x4c,0xc8,0x00,0x21]
- bgelrl- 2
-# CHECK: bgelrl- 0 # encoding: [0x4c,0xc0,0x00,0x21]
- bgelrl-
-# CHECK: bgectrl- 2 # encoding: [0x4c,0xc8,0x04,0x21]
- bgectrl- 2
-# CHECK: bgectrl- 0 # encoding: [0x4c,0xc0,0x04,0x21]
- bgectrl-
-
-# CHECK: bgt 2, target # encoding: [0x41,0x89,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgt 2, target
-# CHECK: bgt 0, target # encoding: [0x41,0x81,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgt target
-# CHECK: bgta 2, target # encoding: [0x41,0x89,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgta 2, target
-# CHECK: bgta 0, target # encoding: [0x41,0x81,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgta target
-# CHECK: bgtlr 2 # encoding: [0x4d,0x89,0x00,0x20]
- bgtlr 2
-# CHECK: bgtlr 0 # encoding: [0x4d,0x81,0x00,0x20]
- bgtlr
-# CHECK: bgtctr 2 # encoding: [0x4d,0x89,0x04,0x20]
- bgtctr 2
-# CHECK: bgtctr 0 # encoding: [0x4d,0x81,0x04,0x20]
- bgtctr
-# CHECK: bgtl 2, target # encoding: [0x41,0x89,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgtl 2, target
-# CHECK: bgtl 0, target # encoding: [0x41,0x81,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgtl target
-# CHECK: bgtla 2, target # encoding: [0x41,0x89,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgtla 2, target
-# CHECK: bgtla 0, target # encoding: [0x41,0x81,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgtla target
-# CHECK: bgtlrl 2 # encoding: [0x4d,0x89,0x00,0x21]
- bgtlrl 2
-# CHECK: bgtlrl 0 # encoding: [0x4d,0x81,0x00,0x21]
- bgtlrl
-# CHECK: bgtctrl 2 # encoding: [0x4d,0x89,0x04,0x21]
- bgtctrl 2
-# CHECK: bgtctrl 0 # encoding: [0x4d,0x81,0x04,0x21]
- bgtctrl
-
-# CHECK: bgt+ 2, target # encoding: [0x41,0xe9,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgt+ 2, target
-# CHECK: bgt+ 0, target # encoding: [0x41,0xe1,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgt+ target
-# CHECK: bgta+ 2, target # encoding: [0x41,0xe9,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgta+ 2, target
-# CHECK: bgta+ 0, target # encoding: [0x41,0xe1,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgta+ target
-# CHECK: bgtlr+ 2 # encoding: [0x4d,0xe9,0x00,0x20]
- bgtlr+ 2
-# CHECK: bgtlr+ 0 # encoding: [0x4d,0xe1,0x00,0x20]
- bgtlr+
-# CHECK: bgtctr+ 2 # encoding: [0x4d,0xe9,0x04,0x20]
- bgtctr+ 2
-# CHECK: bgtctr+ 0 # encoding: [0x4d,0xe1,0x04,0x20]
- bgtctr+
-# CHECK: bgtl+ 2, target # encoding: [0x41,0xe9,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgtl+ 2, target
-# CHECK: bgtl+ 0, target # encoding: [0x41,0xe1,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgtl+ target
-# CHECK: bgtla+ 2, target # encoding: [0x41,0xe9,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgtla+ 2, target
-# CHECK: bgtla+ 0, target # encoding: [0x41,0xe1,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgtla+ target
-# CHECK: bgtlrl+ 2 # encoding: [0x4d,0xe9,0x00,0x21]
- bgtlrl+ 2
-# CHECK: bgtlrl+ 0 # encoding: [0x4d,0xe1,0x00,0x21]
- bgtlrl+
-# CHECK: bgtctrl+ 2 # encoding: [0x4d,0xe9,0x04,0x21]
- bgtctrl+ 2
-# CHECK: bgtctrl+ 0 # encoding: [0x4d,0xe1,0x04,0x21]
- bgtctrl+
-
-# CHECK: bgt- 2, target # encoding: [0x41,0xc9,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgt- 2, target
-# CHECK: bgt- 0, target # encoding: [0x41,0xc1,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgt- target
-# CHECK: bgta- 2, target # encoding: [0x41,0xc9,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgta- 2, target
-# CHECK: bgta- 0, target # encoding: [0x41,0xc1,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgta- target
-# CHECK: bgtlr- 2 # encoding: [0x4d,0xc9,0x00,0x20]
- bgtlr- 2
-# CHECK: bgtlr- 0 # encoding: [0x4d,0xc1,0x00,0x20]
- bgtlr-
-# CHECK: bgtctr- 2 # encoding: [0x4d,0xc9,0x04,0x20]
- bgtctr- 2
-# CHECK: bgtctr- 0 # encoding: [0x4d,0xc1,0x04,0x20]
- bgtctr-
-# CHECK: bgtl- 2, target # encoding: [0x41,0xc9,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgtl- 2, target
-# CHECK: bgtl- 0, target # encoding: [0x41,0xc1,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bgtl- target
-# CHECK: bgtla- 2, target # encoding: [0x41,0xc9,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgtla- 2, target
-# CHECK: bgtla- 0, target # encoding: [0x41,0xc1,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bgtla- target
-# CHECK: bgtlrl- 2 # encoding: [0x4d,0xc9,0x00,0x21]
- bgtlrl- 2
-# CHECK: bgtlrl- 0 # encoding: [0x4d,0xc1,0x00,0x21]
- bgtlrl-
-# CHECK: bgtctrl- 2 # encoding: [0x4d,0xc9,0x04,0x21]
- bgtctrl- 2
-# CHECK: bgtctrl- 0 # encoding: [0x4d,0xc1,0x04,0x21]
- bgtctrl-
-
-# CHECK: bge 2, target # encoding: [0x40,0x88,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnl 2, target
-# CHECK: bge 0, target # encoding: [0x40,0x80,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnl target
-# CHECK: bgea 2, target # encoding: [0x40,0x88,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnla 2, target
-# CHECK: bgea 0, target # encoding: [0x40,0x80,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnla target
-# CHECK: bgelr 2 # encoding: [0x4c,0x88,0x00,0x20]
- bnllr 2
-# CHECK: bgelr 0 # encoding: [0x4c,0x80,0x00,0x20]
- bnllr
-# CHECK: bgectr 2 # encoding: [0x4c,0x88,0x04,0x20]
- bnlctr 2
-# CHECK: bgectr 0 # encoding: [0x4c,0x80,0x04,0x20]
- bnlctr
-# CHECK: bgel 2, target # encoding: [0x40,0x88,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnll 2, target
-# CHECK: bgel 0, target # encoding: [0x40,0x80,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnll target
-# CHECK: bgela 2, target # encoding: [0x40,0x88,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnlla 2, target
-# CHECK: bgela 0, target # encoding: [0x40,0x80,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnlla target
-# CHECK: bgelrl 2 # encoding: [0x4c,0x88,0x00,0x21]
- bnllrl 2
-# CHECK: bgelrl 0 # encoding: [0x4c,0x80,0x00,0x21]
- bnllrl
-# CHECK: bgectrl 2 # encoding: [0x4c,0x88,0x04,0x21]
- bnlctrl 2
-# CHECK: bgectrl 0 # encoding: [0x4c,0x80,0x04,0x21]
- bnlctrl
-
-# CHECK: bge+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnl+ 2, target
-# CHECK: bge+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnl+ target
-# CHECK: bgea+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnla+ 2, target
-# CHECK: bgea+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnla+ target
-# CHECK: bgelr+ 2 # encoding: [0x4c,0xe8,0x00,0x20]
- bnllr+ 2
-# CHECK: bgelr+ 0 # encoding: [0x4c,0xe0,0x00,0x20]
- bnllr+
-# CHECK: bgectr+ 2 # encoding: [0x4c,0xe8,0x04,0x20]
- bnlctr+ 2
-# CHECK: bgectr+ 0 # encoding: [0x4c,0xe0,0x04,0x20]
- bnlctr+
-# CHECK: bgel+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnll+ 2, target
-# CHECK: bgel+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnll+ target
-# CHECK: bgela+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnlla+ 2, target
-# CHECK: bgela+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnlla+ target
-# CHECK: bgelrl+ 2 # encoding: [0x4c,0xe8,0x00,0x21]
- bnllrl+ 2
-# CHECK: bgelrl+ 0 # encoding: [0x4c,0xe0,0x00,0x21]
- bnllrl+
-# CHECK: bgectrl+ 2 # encoding: [0x4c,0xe8,0x04,0x21]
- bnlctrl+ 2
-# CHECK: bgectrl+ 0 # encoding: [0x4c,0xe0,0x04,0x21]
- bnlctrl+
-
-# CHECK: bge- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnl- 2, target
-# CHECK: bge- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnl- target
-# CHECK: bgea- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnla- 2, target
-# CHECK: bgea- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnla- target
-# CHECK: bgelr- 2 # encoding: [0x4c,0xc8,0x00,0x20]
- bnllr- 2
-# CHECK: bgelr- 0 # encoding: [0x4c,0xc0,0x00,0x20]
- bnllr-
-# CHECK: bgectr- 2 # encoding: [0x4c,0xc8,0x04,0x20]
- bnlctr- 2
-# CHECK: bgectr- 0 # encoding: [0x4c,0xc0,0x04,0x20]
- bnlctr-
-# CHECK: bgel- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnll- 2, target
-# CHECK: bgel- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnll- target
-# CHECK: bgela- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnlla- 2, target
-# CHECK: bgela- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnlla- target
-# CHECK: bgelrl- 2 # encoding: [0x4c,0xc8,0x00,0x21]
- bnllrl- 2
-# CHECK: bgelrl- 0 # encoding: [0x4c,0xc0,0x00,0x21]
- bnllrl-
-# CHECK: bgectrl- 2 # encoding: [0x4c,0xc8,0x04,0x21]
- bnlctrl- 2
-# CHECK: bgectrl- 0 # encoding: [0x4c,0xc0,0x04,0x21]
- bnlctrl-
-
-# CHECK: bne 2, target # encoding: [0x40,0x8a,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bne 2, target
-# CHECK: bne 0, target # encoding: [0x40,0x82,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bne target
-# CHECK: bnea 2, target # encoding: [0x40,0x8a,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnea 2, target
-# CHECK: bnea 0, target # encoding: [0x40,0x82,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnea target
-# CHECK: bnelr 2 # encoding: [0x4c,0x8a,0x00,0x20]
- bnelr 2
-# CHECK: bnelr 0 # encoding: [0x4c,0x82,0x00,0x20]
- bnelr
-# CHECK: bnectr 2 # encoding: [0x4c,0x8a,0x04,0x20]
- bnectr 2
-# CHECK: bnectr 0 # encoding: [0x4c,0x82,0x04,0x20]
- bnectr
-# CHECK: bnel 2, target # encoding: [0x40,0x8a,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnel 2, target
-# CHECK: bnel 0, target # encoding: [0x40,0x82,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnel target
-# CHECK: bnela 2, target # encoding: [0x40,0x8a,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnela 2, target
-# CHECK: bnela 0, target # encoding: [0x40,0x82,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnela target
-# CHECK: bnelrl 2 # encoding: [0x4c,0x8a,0x00,0x21]
- bnelrl 2
-# CHECK: bnelrl 0 # encoding: [0x4c,0x82,0x00,0x21]
- bnelrl
-# CHECK: bnectrl 2 # encoding: [0x4c,0x8a,0x04,0x21]
- bnectrl 2
-# CHECK: bnectrl 0 # encoding: [0x4c,0x82,0x04,0x21]
- bnectrl
-
-# CHECK: bne+ 2, target # encoding: [0x40,0xea,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bne+ 2, target
-# CHECK: bne+ 0, target # encoding: [0x40,0xe2,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bne+ target
-# CHECK: bnea+ 2, target # encoding: [0x40,0xea,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnea+ 2, target
-# CHECK: bnea+ 0, target # encoding: [0x40,0xe2,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnea+ target
-# CHECK: bnelr+ 2 # encoding: [0x4c,0xea,0x00,0x20]
- bnelr+ 2
-# CHECK: bnelr+ 0 # encoding: [0x4c,0xe2,0x00,0x20]
- bnelr+
-# CHECK: bnectr+ 2 # encoding: [0x4c,0xea,0x04,0x20]
- bnectr+ 2
-# CHECK: bnectr+ 0 # encoding: [0x4c,0xe2,0x04,0x20]
- bnectr+
-# CHECK: bnel+ 2, target # encoding: [0x40,0xea,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnel+ 2, target
-# CHECK: bnel+ 0, target # encoding: [0x40,0xe2,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnel+ target
-# CHECK: bnela+ 2, target # encoding: [0x40,0xea,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnela+ 2, target
-# CHECK: bnela+ 0, target # encoding: [0x40,0xe2,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnela+ target
-# CHECK: bnelrl+ 2 # encoding: [0x4c,0xea,0x00,0x21]
- bnelrl+ 2
-# CHECK: bnelrl+ 0 # encoding: [0x4c,0xe2,0x00,0x21]
- bnelrl+
-# CHECK: bnectrl+ 2 # encoding: [0x4c,0xea,0x04,0x21]
- bnectrl+ 2
-# CHECK: bnectrl+ 0 # encoding: [0x4c,0xe2,0x04,0x21]
- bnectrl+
-
-# CHECK: bne- 2, target # encoding: [0x40,0xca,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bne- 2, target
-# CHECK: bne- 0, target # encoding: [0x40,0xc2,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bne- target
-# CHECK: bnea- 2, target # encoding: [0x40,0xca,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnea- 2, target
-# CHECK: bnea- 0, target # encoding: [0x40,0xc2,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnea- target
-# CHECK: bnelr- 2 # encoding: [0x4c,0xca,0x00,0x20]
- bnelr- 2
-# CHECK: bnelr- 0 # encoding: [0x4c,0xc2,0x00,0x20]
- bnelr-
-# CHECK: bnectr- 2 # encoding: [0x4c,0xca,0x04,0x20]
- bnectr- 2
-# CHECK: bnectr- 0 # encoding: [0x4c,0xc2,0x04,0x20]
- bnectr-
-# CHECK: bnel- 2, target # encoding: [0x40,0xca,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnel- 2, target
-# CHECK: bnel- 0, target # encoding: [0x40,0xc2,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnel- target
-# CHECK: bnela- 2, target # encoding: [0x40,0xca,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnela- 2, target
-# CHECK: bnela- 0, target # encoding: [0x40,0xc2,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnela- target
-# CHECK: bnelrl- 2 # encoding: [0x4c,0xca,0x00,0x21]
- bnelrl- 2
-# CHECK: bnelrl- 0 # encoding: [0x4c,0xc2,0x00,0x21]
- bnelrl-
-# CHECK: bnectrl- 2 # encoding: [0x4c,0xca,0x04,0x21]
- bnectrl- 2
-# CHECK: bnectrl- 0 # encoding: [0x4c,0xc2,0x04,0x21]
- bnectrl-
-
-# CHECK: ble 2, target # encoding: [0x40,0x89,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bng 2, target
-# CHECK: ble 0, target # encoding: [0x40,0x81,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bng target
-# CHECK: blea 2, target # encoding: [0x40,0x89,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnga 2, target
-# CHECK: blea 0, target # encoding: [0x40,0x81,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnga target
-# CHECK: blelr 2 # encoding: [0x4c,0x89,0x00,0x20]
- bnglr 2
-# CHECK: blelr 0 # encoding: [0x4c,0x81,0x00,0x20]
- bnglr
-# CHECK: blectr 2 # encoding: [0x4c,0x89,0x04,0x20]
- bngctr 2
-# CHECK: blectr 0 # encoding: [0x4c,0x81,0x04,0x20]
- bngctr
-# CHECK: blel 2, target # encoding: [0x40,0x89,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bngl 2, target
-# CHECK: blel 0, target # encoding: [0x40,0x81,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bngl target
-# CHECK: blela 2, target # encoding: [0x40,0x89,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bngla 2, target
-# CHECK: blela 0, target # encoding: [0x40,0x81,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bngla target
-# CHECK: blelrl 2 # encoding: [0x4c,0x89,0x00,0x21]
- bnglrl 2
-# CHECK: blelrl 0 # encoding: [0x4c,0x81,0x00,0x21]
- bnglrl
-# CHECK: blectrl 2 # encoding: [0x4c,0x89,0x04,0x21]
- bngctrl 2
-# CHECK: blectrl 0 # encoding: [0x4c,0x81,0x04,0x21]
- bngctrl
-
-# CHECK: ble+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bng+ 2, target
-# CHECK: ble+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bng+ target
-# CHECK: blea+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnga+ 2, target
-# CHECK: blea+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnga+ target
-# CHECK: blelr+ 2 # encoding: [0x4c,0xe9,0x00,0x20]
- bnglr+ 2
-# CHECK: blelr+ 0 # encoding: [0x4c,0xe1,0x00,0x20]
- bnglr+
-# CHECK: blectr+ 2 # encoding: [0x4c,0xe9,0x04,0x20]
- bngctr+ 2
-# CHECK: blectr+ 0 # encoding: [0x4c,0xe1,0x04,0x20]
- bngctr+
-# CHECK: blel+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bngl+ 2, target
-# CHECK: blel+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bngl+ target
-# CHECK: blela+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bngla+ 2, target
-# CHECK: blela+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bngla+ target
-# CHECK: blelrl+ 2 # encoding: [0x4c,0xe9,0x00,0x21]
- bnglrl+ 2
-# CHECK: blelrl+ 0 # encoding: [0x4c,0xe1,0x00,0x21]
- bnglrl+
-# CHECK: blectrl+ 2 # encoding: [0x4c,0xe9,0x04,0x21]
- bngctrl+ 2
-# CHECK: blectrl+ 0 # encoding: [0x4c,0xe1,0x04,0x21]
- bngctrl+
-
-# CHECK: ble- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bng- 2, target
-# CHECK: ble- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bng- target
-# CHECK: blea- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnga- 2, target
-# CHECK: blea- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnga- target
-# CHECK: blelr- 2 # encoding: [0x4c,0xc9,0x00,0x20]
- bnglr- 2
-# CHECK: blelr- 0 # encoding: [0x4c,0xc1,0x00,0x20]
- bnglr-
-# CHECK: blectr- 2 # encoding: [0x4c,0xc9,0x04,0x20]
- bngctr- 2
-# CHECK: blectr- 0 # encoding: [0x4c,0xc1,0x04,0x20]
- bngctr-
-# CHECK: blel- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bngl- 2, target
-# CHECK: blel- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bngl- target
-# CHECK: blela- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bngla- 2, target
-# CHECK: blela- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bngla- target
-# CHECK: blelrl- 2 # encoding: [0x4c,0xc9,0x00,0x21]
- bnglrl- 2
-# CHECK: blelrl- 0 # encoding: [0x4c,0xc1,0x00,0x21]
- bnglrl-
-# CHECK: blectrl- 2 # encoding: [0x4c,0xc9,0x04,0x21]
- bngctrl- 2
-# CHECK: blectrl- 0 # encoding: [0x4c,0xc1,0x04,0x21]
- bngctrl-
-
-# CHECK: bun 2, target # encoding: [0x41,0x8b,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bso 2, target
-# CHECK: bun 0, target # encoding: [0x41,0x83,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bso target
-# CHECK: buna 2, target # encoding: [0x41,0x8b,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsoa 2, target
-# CHECK: buna 0, target # encoding: [0x41,0x83,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsoa target
-# CHECK: bunlr 2 # encoding: [0x4d,0x8b,0x00,0x20]
- bsolr 2
-# CHECK: bunlr 0 # encoding: [0x4d,0x83,0x00,0x20]
- bsolr
-# CHECK: bunctr 2 # encoding: [0x4d,0x8b,0x04,0x20]
- bsoctr 2
-# CHECK: bunctr 0 # encoding: [0x4d,0x83,0x04,0x20]
- bsoctr
-# CHECK: bunl 2, target # encoding: [0x41,0x8b,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bsol 2, target
-# CHECK: bunl 0, target # encoding: [0x41,0x83,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bsol target
-# CHECK: bunla 2, target # encoding: [0x41,0x8b,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsola 2, target
-# CHECK: bunla 0, target # encoding: [0x41,0x83,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsola target
-# CHECK: bunlrl 2 # encoding: [0x4d,0x8b,0x00,0x21]
- bsolrl 2
-# CHECK: bunlrl 0 # encoding: [0x4d,0x83,0x00,0x21]
- bsolrl
-# CHECK: bunctrl 2 # encoding: [0x4d,0x8b,0x04,0x21]
- bsoctrl 2
-# CHECK: bunctrl 0 # encoding: [0x4d,0x83,0x04,0x21]
- bsoctrl
-
-# CHECK: bun+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bso+ 2, target
-# CHECK: bun+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bso+ target
-# CHECK: buna+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsoa+ 2, target
-# CHECK: buna+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsoa+ target
-# CHECK: bunlr+ 2 # encoding: [0x4d,0xeb,0x00,0x20]
- bsolr+ 2
-# CHECK: bunlr+ 0 # encoding: [0x4d,0xe3,0x00,0x20]
- bsolr+
-# CHECK: bunctr+ 2 # encoding: [0x4d,0xeb,0x04,0x20]
- bsoctr+ 2
-# CHECK: bunctr+ 0 # encoding: [0x4d,0xe3,0x04,0x20]
- bsoctr+
-# CHECK: bunl+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bsol+ 2, target
-# CHECK: bunl+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bsol+ target
-# CHECK: bunla+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsola+ 2, target
-# CHECK: bunla+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsola+ target
-# CHECK: bunlrl+ 2 # encoding: [0x4d,0xeb,0x00,0x21]
- bsolrl+ 2
-# CHECK: bunlrl+ 0 # encoding: [0x4d,0xe3,0x00,0x21]
- bsolrl+
-# CHECK: bunctrl+ 2 # encoding: [0x4d,0xeb,0x04,0x21]
- bsoctrl+ 2
-# CHECK: bunctrl+ 0 # encoding: [0x4d,0xe3,0x04,0x21]
- bsoctrl+
-
-# CHECK: bun- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bso- 2, target
-# CHECK: bun- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bso- target
-# CHECK: buna- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsoa- 2, target
-# CHECK: buna- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsoa- target
-# CHECK: bunlr- 2 # encoding: [0x4d,0xcb,0x00,0x20]
- bsolr- 2
-# CHECK: bunlr- 0 # encoding: [0x4d,0xc3,0x00,0x20]
- bsolr-
-# CHECK: bunctr- 2 # encoding: [0x4d,0xcb,0x04,0x20]
- bsoctr- 2
-# CHECK: bunctr- 0 # encoding: [0x4d,0xc3,0x04,0x20]
- bsoctr-
-# CHECK: bunl- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bsol- 2, target
-# CHECK: bunl- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bsol- target
-# CHECK: bunla- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsola- 2, target
-# CHECK: bunla- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bsola- target
-# CHECK: bunlrl- 2 # encoding: [0x4d,0xcb,0x00,0x21]
- bsolrl- 2
-# CHECK: bunlrl- 0 # encoding: [0x4d,0xc3,0x00,0x21]
- bsolrl-
-# CHECK: bunctrl- 2 # encoding: [0x4d,0xcb,0x04,0x21]
- bsoctrl- 2
-# CHECK: bunctrl- 0 # encoding: [0x4d,0xc3,0x04,0x21]
- bsoctrl-
-
-# CHECK: bnu 2, target # encoding: [0x40,0x8b,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bns 2, target
-# CHECK: bnu 0, target # encoding: [0x40,0x83,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bns target
-# CHECK: bnua 2, target # encoding: [0x40,0x8b,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsa 2, target
-# CHECK: bnua 0, target # encoding: [0x40,0x83,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsa target
-# CHECK: bnulr 2 # encoding: [0x4c,0x8b,0x00,0x20]
- bnslr 2
-# CHECK: bnulr 0 # encoding: [0x4c,0x83,0x00,0x20]
- bnslr
-# CHECK: bnuctr 2 # encoding: [0x4c,0x8b,0x04,0x20]
- bnsctr 2
-# CHECK: bnuctr 0 # encoding: [0x4c,0x83,0x04,0x20]
- bnsctr
-# CHECK: bnul 2, target # encoding: [0x40,0x8b,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnsl 2, target
-# CHECK: bnul 0, target # encoding: [0x40,0x83,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnsl target
-# CHECK: bnula 2, target # encoding: [0x40,0x8b,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsla 2, target
-# CHECK: bnula 0, target # encoding: [0x40,0x83,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsla target
-# CHECK: bnulrl 2 # encoding: [0x4c,0x8b,0x00,0x21]
- bnslrl 2
-# CHECK: bnulrl 0 # encoding: [0x4c,0x83,0x00,0x21]
- bnslrl
-# CHECK: bnuctrl 2 # encoding: [0x4c,0x8b,0x04,0x21]
- bnsctrl 2
-# CHECK: bnuctrl 0 # encoding: [0x4c,0x83,0x04,0x21]
- bnsctrl
-
-# CHECK: bnu+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bns+ 2, target
-# CHECK: bnu+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bns+ target
-# CHECK: bnua+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsa+ 2, target
-# CHECK: bnua+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsa+ target
-# CHECK: bnulr+ 2 # encoding: [0x4c,0xeb,0x00,0x20]
- bnslr+ 2
-# CHECK: bnulr+ 0 # encoding: [0x4c,0xe3,0x00,0x20]
- bnslr+
-# CHECK: bnuctr+ 2 # encoding: [0x4c,0xeb,0x04,0x20]
- bnsctr+ 2
-# CHECK: bnuctr+ 0 # encoding: [0x4c,0xe3,0x04,0x20]
- bnsctr+
-# CHECK: bnul+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnsl+ 2, target
-# CHECK: bnul+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnsl+ target
-# CHECK: bnula+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsla+ 2, target
-# CHECK: bnula+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsla+ target
-# CHECK: bnulrl+ 2 # encoding: [0x4c,0xeb,0x00,0x21]
- bnslrl+ 2
-# CHECK: bnulrl+ 0 # encoding: [0x4c,0xe3,0x00,0x21]
- bnslrl+
-# CHECK: bnuctrl+ 2 # encoding: [0x4c,0xeb,0x04,0x21]
- bnsctrl+ 2
-# CHECK: bnuctrl+ 0 # encoding: [0x4c,0xe3,0x04,0x21]
- bnsctrl+
-
-# CHECK: bnu- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bns- 2, target
-# CHECK: bnu- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bns- target
-# CHECK: bnua- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsa- 2, target
-# CHECK: bnua- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsa- target
-# CHECK: bnulr- 2 # encoding: [0x4c,0xcb,0x00,0x20]
- bnslr- 2
-# CHECK: bnulr- 0 # encoding: [0x4c,0xc3,0x00,0x20]
- bnslr-
-# CHECK: bnuctr- 2 # encoding: [0x4c,0xcb,0x04,0x20]
- bnsctr- 2
-# CHECK: bnuctr- 0 # encoding: [0x4c,0xc3,0x04,0x20]
- bnsctr-
-# CHECK: bnul- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnsl- 2, target
-# CHECK: bnul- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnsl- target
-# CHECK: bnula- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsla- 2, target
-# CHECK: bnula- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnsla- target
-# CHECK: bnulrl- 2 # encoding: [0x4c,0xcb,0x00,0x21]
- bnslrl- 2
-# CHECK: bnulrl- 0 # encoding: [0x4c,0xc3,0x00,0x21]
- bnslrl-
-# CHECK: bnuctrl- 2 # encoding: [0x4c,0xcb,0x04,0x21]
- bnsctrl- 2
-# CHECK: bnuctrl- 0 # encoding: [0x4c,0xc3,0x04,0x21]
- bnsctrl-
-
-# CHECK: bun 2, target # encoding: [0x41,0x8b,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bun 2, target
-# CHECK: bun 0, target # encoding: [0x41,0x83,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bun target
-# CHECK: buna 2, target # encoding: [0x41,0x8b,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- buna 2, target
-# CHECK: buna 0, target # encoding: [0x41,0x83,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- buna target
-# CHECK: bunlr 2 # encoding: [0x4d,0x8b,0x00,0x20]
- bunlr 2
-# CHECK: bunlr 0 # encoding: [0x4d,0x83,0x00,0x20]
- bunlr
-# CHECK: bunctr 2 # encoding: [0x4d,0x8b,0x04,0x20]
- bunctr 2
-# CHECK: bunctr 0 # encoding: [0x4d,0x83,0x04,0x20]
- bunctr
-# CHECK: bunl 2, target # encoding: [0x41,0x8b,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bunl 2, target
-# CHECK: bunl 0, target # encoding: [0x41,0x83,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bunl target
-# CHECK: bunla 2, target # encoding: [0x41,0x8b,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bunla 2, target
-# CHECK: bunla 0, target # encoding: [0x41,0x83,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bunla target
-# CHECK: bunlrl 2 # encoding: [0x4d,0x8b,0x00,0x21]
- bunlrl 2
-# CHECK: bunlrl 0 # encoding: [0x4d,0x83,0x00,0x21]
- bunlrl
-# CHECK: bunctrl 2 # encoding: [0x4d,0x8b,0x04,0x21]
- bunctrl 2
-# CHECK: bunctrl 0 # encoding: [0x4d,0x83,0x04,0x21]
- bunctrl
-
-# CHECK: bun+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bun+ 2, target
-# CHECK: bun+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bun+ target
-# CHECK: buna+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- buna+ 2, target
-# CHECK: buna+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- buna+ target
-# CHECK: bunlr+ 2 # encoding: [0x4d,0xeb,0x00,0x20]
- bunlr+ 2
-# CHECK: bunlr+ 0 # encoding: [0x4d,0xe3,0x00,0x20]
- bunlr+
-# CHECK: bunctr+ 2 # encoding: [0x4d,0xeb,0x04,0x20]
- bunctr+ 2
-# CHECK: bunctr+ 0 # encoding: [0x4d,0xe3,0x04,0x20]
- bunctr+
-# CHECK: bunl+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bunl+ 2, target
-# CHECK: bunl+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bunl+ target
-# CHECK: bunla+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bunla+ 2, target
-# CHECK: bunla+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bunla+ target
-# CHECK: bunlrl+ 2 # encoding: [0x4d,0xeb,0x00,0x21]
- bunlrl+ 2
-# CHECK: bunlrl+ 0 # encoding: [0x4d,0xe3,0x00,0x21]
- bunlrl+
-# CHECK: bunctrl+ 2 # encoding: [0x4d,0xeb,0x04,0x21]
- bunctrl+ 2
-# CHECK: bunctrl+ 0 # encoding: [0x4d,0xe3,0x04,0x21]
- bunctrl+
-
-# CHECK: bun- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bun- 2, target
-# CHECK: bun- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bun- target
-# CHECK: buna- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- buna- 2, target
-# CHECK: buna- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- buna- target
-# CHECK: bunlr- 2 # encoding: [0x4d,0xcb,0x00,0x20]
- bunlr- 2
-# CHECK: bunlr- 0 # encoding: [0x4d,0xc3,0x00,0x20]
- bunlr-
-# CHECK: bunctr- 2 # encoding: [0x4d,0xcb,0x04,0x20]
- bunctr- 2
-# CHECK: bunctr- 0 # encoding: [0x4d,0xc3,0x04,0x20]
- bunctr-
-# CHECK: bunl- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bunl- 2, target
-# CHECK: bunl- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bunl- target
-# CHECK: bunla- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bunla- 2, target
-# CHECK: bunla- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bunla- target
-# CHECK: bunlrl- 2 # encoding: [0x4d,0xcb,0x00,0x21]
- bunlrl- 2
-# CHECK: bunlrl- 0 # encoding: [0x4d,0xc3,0x00,0x21]
- bunlrl-
-# CHECK: bunctrl- 2 # encoding: [0x4d,0xcb,0x04,0x21]
- bunctrl- 2
-# CHECK: bunctrl- 0 # encoding: [0x4d,0xc3,0x04,0x21]
- bunctrl-
-
-# CHECK: bnu 2, target # encoding: [0x40,0x8b,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnu 2, target
-# CHECK: bnu 0, target # encoding: [0x40,0x83,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnu target
-# CHECK: bnua 2, target # encoding: [0x40,0x8b,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnua 2, target
-# CHECK: bnua 0, target # encoding: [0x40,0x83,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnua target
-# CHECK: bnulr 2 # encoding: [0x4c,0x8b,0x00,0x20]
- bnulr 2
-# CHECK: bnulr 0 # encoding: [0x4c,0x83,0x00,0x20]
- bnulr
-# CHECK: bnuctr 2 # encoding: [0x4c,0x8b,0x04,0x20]
- bnuctr 2
-# CHECK: bnuctr 0 # encoding: [0x4c,0x83,0x04,0x20]
- bnuctr
-# CHECK: bnul 2, target # encoding: [0x40,0x8b,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnul 2, target
-# CHECK: bnul 0, target # encoding: [0x40,0x83,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnul target
-# CHECK: bnula 2, target # encoding: [0x40,0x8b,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnula 2, target
-# CHECK: bnula 0, target # encoding: [0x40,0x83,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnula target
-# CHECK: bnulrl 2 # encoding: [0x4c,0x8b,0x00,0x21]
- bnulrl 2
-# CHECK: bnulrl 0 # encoding: [0x4c,0x83,0x00,0x21]
- bnulrl
-# CHECK: bnuctrl 2 # encoding: [0x4c,0x8b,0x04,0x21]
- bnuctrl 2
-# CHECK: bnuctrl 0 # encoding: [0x4c,0x83,0x04,0x21]
- bnuctrl
-
-# CHECK: bnu+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnu+ 2, target
-# CHECK: bnu+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnu+ target
-# CHECK: bnua+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnua+ 2, target
-# CHECK: bnua+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnua+ target
-# CHECK: bnulr+ 2 # encoding: [0x4c,0xeb,0x00,0x20]
- bnulr+ 2
-# CHECK: bnulr+ 0 # encoding: [0x4c,0xe3,0x00,0x20]
- bnulr+
-# CHECK: bnuctr+ 2 # encoding: [0x4c,0xeb,0x04,0x20]
- bnuctr+ 2
-# CHECK: bnuctr+ 0 # encoding: [0x4c,0xe3,0x04,0x20]
- bnuctr+
-# CHECK: bnul+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnul+ 2, target
-# CHECK: bnul+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnul+ target
-# CHECK: bnula+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnula+ 2, target
-# CHECK: bnula+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnula+ target
-# CHECK: bnulrl+ 2 # encoding: [0x4c,0xeb,0x00,0x21]
- bnulrl+ 2
-# CHECK: bnulrl+ 0 # encoding: [0x4c,0xe3,0x00,0x21]
- bnulrl+
-# CHECK: bnuctrl+ 2 # encoding: [0x4c,0xeb,0x04,0x21]
- bnuctrl+ 2
-# CHECK: bnuctrl+ 0 # encoding: [0x4c,0xe3,0x04,0x21]
- bnuctrl+
-
-# CHECK: bnu- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnu- 2, target
-# CHECK: bnu- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnu- target
-# CHECK: bnua- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnua- 2, target
-# CHECK: bnua- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnua- target
-# CHECK: bnulr- 2 # encoding: [0x4c,0xcb,0x00,0x20]
- bnulr- 2
-# CHECK: bnulr- 0 # encoding: [0x4c,0xc3,0x00,0x20]
- bnulr-
-# CHECK: bnuctr- 2 # encoding: [0x4c,0xcb,0x04,0x20]
- bnuctr- 2
-# CHECK: bnuctr- 0 # encoding: [0x4c,0xc3,0x04,0x20]
- bnuctr-
-# CHECK: bnul- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnul- 2, target
-# CHECK: bnul- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bnul- target
-# CHECK: bnula- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnula- 2, target
-# CHECK: bnula- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bnula- target
-# CHECK: bnulrl- 2 # encoding: [0x4c,0xcb,0x00,0x21]
- bnulrl- 2
-# CHECK: bnulrl- 0 # encoding: [0x4c,0xc3,0x00,0x21]
- bnulrl-
-# CHECK: bnuctrl- 2 # encoding: [0x4c,0xcb,0x04,0x21]
- bnuctrl- 2
-# CHECK: bnuctrl- 0 # encoding: [0x4c,0xc3,0x04,0x21]
- bnuctrl-
+# CHECK-BE: blr # encoding: [0x4e,0x80,0x00,0x20]
+# CHECK-LE: blr # encoding: [0x20,0x00,0x80,0x4e]
+ blr
+# CHECK-BE: bctr # encoding: [0x4e,0x80,0x04,0x20]
+# CHECK-LE: bctr # encoding: [0x20,0x04,0x80,0x4e]
+ bctr
+# CHECK-BE: blrl # encoding: [0x4e,0x80,0x00,0x21]
+# CHECK-LE: blrl # encoding: [0x21,0x00,0x80,0x4e]
+ blrl
+# CHECK-BE: bctrl # encoding: [0x4e,0x80,0x04,0x21]
+# CHECK-LE: bctrl # encoding: [0x21,0x04,0x80,0x4e]
+ bctrl
+
+# CHECK-BE: bc 12, 2, target # encoding: [0x41,0x82,A,0bAAAAAA00]
+# CHECK-LE: bc 12, 2, target # encoding: [0bAAAAAA00,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bt 2, target
+# CHECK-BE: bca 12, 2, target # encoding: [0x41,0x82,A,0bAAAAAA10]
+# CHECK-LE: bca 12, 2, target # encoding: [0bAAAAAA10,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bta 2, target
+# CHECK-BE: bclr 12, 2, 0 # encoding: [0x4d,0x82,0x00,0x20]
+# CHECK-LE: bclr 12, 2, 0 # encoding: [0x20,0x00,0x82,0x4d]
+ btlr 2
+# CHECK-BE: bcctr 12, 2, 0 # encoding: [0x4d,0x82,0x04,0x20]
+# CHECK-LE: bcctr 12, 2, 0 # encoding: [0x20,0x04,0x82,0x4d]
+ btctr 2
+# CHECK-BE: bcl 12, 2, target # encoding: [0x41,0x82,A,0bAAAAAA01]
+# CHECK-LE: bcl 12, 2, target # encoding: [0bAAAAAA01,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ btl 2, target
+# CHECK-BE: bcla 12, 2, target # encoding: [0x41,0x82,A,0bAAAAAA11]
+# CHECK-LE: bcla 12, 2, target # encoding: [0bAAAAAA11,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ btla 2, target
+# CHECK-BE: bclrl 12, 2, 0 # encoding: [0x4d,0x82,0x00,0x21]
+# CHECK-LE: bclrl 12, 2, 0 # encoding: [0x21,0x00,0x82,0x4d]
+ btlrl 2
+# CHECK-BE: bcctrl 12, 2, 0 # encoding: [0x4d,0x82,0x04,0x21]
+# CHECK-LE: bcctrl 12, 2, 0 # encoding: [0x21,0x04,0x82,0x4d]
+ btctrl 2
+
+# CHECK-BE: bc 15, 2, target # encoding: [0x41,0xe2,A,0bAAAAAA00]
+# CHECK-LE: bc 15, 2, target # encoding: [0bAAAAAA00,A,0xe2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bt+ 2, target
+# CHECK-BE: bca 15, 2, target # encoding: [0x41,0xe2,A,0bAAAAAA10]
+# CHECK-LE: bca 15, 2, target # encoding: [0bAAAAAA10,A,0xe2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bta+ 2, target
+# CHECK-BE: bclr 15, 2, 0 # encoding: [0x4d,0xe2,0x00,0x20]
+# CHECK-LE: bclr 15, 2, 0 # encoding: [0x20,0x00,0xe2,0x4d]
+ btlr+ 2
+# CHECK-BE: bcctr 15, 2, 0 # encoding: [0x4d,0xe2,0x04,0x20]
+# CHECK-LE: bcctr 15, 2, 0 # encoding: [0x20,0x04,0xe2,0x4d]
+ btctr+ 2
+# CHECK-BE: bcl 15, 2, target # encoding: [0x41,0xe2,A,0bAAAAAA01]
+# CHECK-LE: bcl 15, 2, target # encoding: [0bAAAAAA01,A,0xe2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ btl+ 2, target
+# CHECK-BE: bcla 15, 2, target # encoding: [0x41,0xe2,A,0bAAAAAA11]
+# CHECK-LE: bcla 15, 2, target # encoding: [0bAAAAAA11,A,0xe2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ btla+ 2, target
+# CHECK-BE: bclrl 15, 2, 0 # encoding: [0x4d,0xe2,0x00,0x21]
+# CHECK-LE: bclrl 15, 2, 0 # encoding: [0x21,0x00,0xe2,0x4d]
+ btlrl+ 2
+# CHECK-BE: bcctrl 15, 2, 0 # encoding: [0x4d,0xe2,0x04,0x21]
+# CHECK-LE: bcctrl 15, 2, 0 # encoding: [0x21,0x04,0xe2,0x4d]
+ btctrl+ 2
+
+# CHECK-BE: bc 14, 2, target # encoding: [0x41,0xc2,A,0bAAAAAA00]
+# CHECK-LE: bc 14, 2, target # encoding: [0bAAAAAA00,A,0xc2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bt- 2, target
+# CHECK-BE: bca 14, 2, target # encoding: [0x41,0xc2,A,0bAAAAAA10]
+# CHECK-LE: bca 14, 2, target # encoding: [0bAAAAAA10,A,0xc2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bta- 2, target
+# CHECK-BE: bclr 14, 2, 0 # encoding: [0x4d,0xc2,0x00,0x20]
+# CHECK-LE: bclr 14, 2, 0 # encoding: [0x20,0x00,0xc2,0x4d]
+ btlr- 2
+# CHECK-BE: bcctr 14, 2, 0 # encoding: [0x4d,0xc2,0x04,0x20]
+# CHECK-LE: bcctr 14, 2, 0 # encoding: [0x20,0x04,0xc2,0x4d]
+ btctr- 2
+# CHECK-BE: bcl 14, 2, target # encoding: [0x41,0xc2,A,0bAAAAAA01]
+# CHECK-LE: bcl 14, 2, target # encoding: [0bAAAAAA01,A,0xc2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ btl- 2, target
+# CHECK-BE: bcla 14, 2, target # encoding: [0x41,0xc2,A,0bAAAAAA11]
+# CHECK-LE: bcla 14, 2, target # encoding: [0bAAAAAA11,A,0xc2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ btla- 2, target
+# CHECK-BE: bclrl 14, 2, 0 # encoding: [0x4d,0xc2,0x00,0x21]
+# CHECK-LE: bclrl 14, 2, 0 # encoding: [0x21,0x00,0xc2,0x4d]
+ btlrl- 2
+# CHECK-BE: bcctrl 14, 2, 0 # encoding: [0x4d,0xc2,0x04,0x21]
+# CHECK-LE: bcctrl 14, 2, 0 # encoding: [0x21,0x04,0xc2,0x4d]
+ btctrl- 2
+
+# CHECK-BE: bc 4, 2, target # encoding: [0x40,0x82,A,0bAAAAAA00]
+# CHECK-LE: bc 4, 2, target # encoding: [0bAAAAAA00,A,0x82,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bf 2, target
+# CHECK-BE: bca 4, 2, target # encoding: [0x40,0x82,A,0bAAAAAA10]
+# CHECK-LE: bca 4, 2, target # encoding: [0bAAAAAA10,A,0x82,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bfa 2, target
+# CHECK-BE: bclr 4, 2, 0 # encoding: [0x4c,0x82,0x00,0x20]
+# CHECK-LE: bclr 4, 2, 0 # encoding: [0x20,0x00,0x82,0x4c]
+ bflr 2
+# CHECK-BE: bcctr 4, 2, 0 # encoding: [0x4c,0x82,0x04,0x20]
+# CHECK-LE: bcctr 4, 2, 0 # encoding: [0x20,0x04,0x82,0x4c]
+ bfctr 2
+# CHECK-BE: bcl 4, 2, target # encoding: [0x40,0x82,A,0bAAAAAA01]
+# CHECK-LE: bcl 4, 2, target # encoding: [0bAAAAAA01,A,0x82,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bfl 2, target
+# CHECK-BE: bcla 4, 2, target # encoding: [0x40,0x82,A,0bAAAAAA11]
+# CHECK-LE: bcla 4, 2, target # encoding: [0bAAAAAA11,A,0x82,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bfla 2, target
+# CHECK-BE: bclrl 4, 2, 0 # encoding: [0x4c,0x82,0x00,0x21]
+# CHECK-LE: bclrl 4, 2, 0 # encoding: [0x21,0x00,0x82,0x4c]
+ bflrl 2
+# CHECK-BE: bcctrl 4, 2, 0 # encoding: [0x4c,0x82,0x04,0x21]
+# CHECK-LE: bcctrl 4, 2, 0 # encoding: [0x21,0x04,0x82,0x4c]
+ bfctrl 2
+
+# CHECK-BE: bc 7, 2, target # encoding: [0x40,0xe2,A,0bAAAAAA00]
+# CHECK-LE: bc 7, 2, target # encoding: [0bAAAAAA00,A,0xe2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bf+ 2, target
+# CHECK-BE: bca 7, 2, target # encoding: [0x40,0xe2,A,0bAAAAAA10]
+# CHECK-LE: bca 7, 2, target # encoding: [0bAAAAAA10,A,0xe2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bfa+ 2, target
+# CHECK-BE: bclr 7, 2, 0 # encoding: [0x4c,0xe2,0x00,0x20]
+# CHECK-LE: bclr 7, 2, 0 # encoding: [0x20,0x00,0xe2,0x4c]
+ bflr+ 2
+# CHECK-BE: bcctr 7, 2, 0 # encoding: [0x4c,0xe2,0x04,0x20]
+# CHECK-LE: bcctr 7, 2, 0 # encoding: [0x20,0x04,0xe2,0x4c]
+ bfctr+ 2
+# CHECK-BE: bcl 7, 2, target # encoding: [0x40,0xe2,A,0bAAAAAA01]
+# CHECK-LE: bcl 7, 2, target # encoding: [0bAAAAAA01,A,0xe2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bfl+ 2, target
+# CHECK-BE: bcla 7, 2, target # encoding: [0x40,0xe2,A,0bAAAAAA11]
+# CHECK-LE: bcla 7, 2, target # encoding: [0bAAAAAA11,A,0xe2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bfla+ 2, target
+# CHECK-BE: bclrl 7, 2, 0 # encoding: [0x4c,0xe2,0x00,0x21]
+# CHECK-LE: bclrl 7, 2, 0 # encoding: [0x21,0x00,0xe2,0x4c]
+ bflrl+ 2
+# CHECK-BE: bcctrl 7, 2, 0 # encoding: [0x4c,0xe2,0x04,0x21]
+# CHECK-LE: bcctrl 7, 2, 0 # encoding: [0x21,0x04,0xe2,0x4c]
+ bfctrl+ 2
+
+# CHECK-BE: bc 6, 2, target # encoding: [0x40,0xc2,A,0bAAAAAA00]
+# CHECK-LE: bc 6, 2, target # encoding: [0bAAAAAA00,A,0xc2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bf- 2, target
+# CHECK-BE: bca 6, 2, target # encoding: [0x40,0xc2,A,0bAAAAAA10]
+# CHECK-LE: bca 6, 2, target # encoding: [0bAAAAAA10,A,0xc2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bfa- 2, target
+# CHECK-BE: bclr 6, 2, 0 # encoding: [0x4c,0xc2,0x00,0x20]
+# CHECK-LE: bclr 6, 2, 0 # encoding: [0x20,0x00,0xc2,0x4c]
+ bflr- 2
+# CHECK-BE: bcctr 6, 2, 0 # encoding: [0x4c,0xc2,0x04,0x20]
+# CHECK-LE: bcctr 6, 2, 0 # encoding: [0x20,0x04,0xc2,0x4c]
+ bfctr- 2
+# CHECK-BE: bcl 6, 2, target # encoding: [0x40,0xc2,A,0bAAAAAA01]
+# CHECK-LE: bcl 6, 2, target # encoding: [0bAAAAAA01,A,0xc2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bfl- 2, target
+# CHECK-BE: bcla 6, 2, target # encoding: [0x40,0xc2,A,0bAAAAAA11]
+# CHECK-LE: bcla 6, 2, target # encoding: [0bAAAAAA11,A,0xc2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bfla- 2, target
+# CHECK-BE: bclrl 6, 2, 0 # encoding: [0x4c,0xc2,0x00,0x21]
+# CHECK-LE: bclrl 6, 2, 0 # encoding: [0x21,0x00,0xc2,0x4c]
+ bflrl- 2
+# CHECK-BE: bcctrl 6, 2, 0 # encoding: [0x4c,0xc2,0x04,0x21]
+# CHECK-LE: bcctrl 6, 2, 0 # encoding: [0x21,0x04,0xc2,0x4c]
+ bfctrl- 2
+
+# CHECK-BE: bdnz target # encoding: [0x42,0x00,A,0bAAAAAA00]
+# CHECK-LE: bdnz target # encoding: [0bAAAAAA00,A,0x00,0x42]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnz target
+# CHECK-BE: bdnza target # encoding: [0x42,0x00,A,0bAAAAAA10]
+# CHECK-LE: bdnza target # encoding: [0bAAAAAA10,A,0x00,0x42]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnza target
+# CHECK-BE: bdnzlr # encoding: [0x4e,0x00,0x00,0x20]
+# CHECK-LE: bdnzlr # encoding: [0x20,0x00,0x00,0x4e]
+ bdnzlr
+# CHECK-BE: bdnzl target # encoding: [0x42,0x00,A,0bAAAAAA01]
+# CHECK-LE: bdnzl target # encoding: [0bAAAAAA01,A,0x00,0x42]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnzl target
+# CHECK-BE: bdnzla target # encoding: [0x42,0x00,A,0bAAAAAA11]
+# CHECK-LE: bdnzla target # encoding: [0bAAAAAA11,A,0x00,0x42]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnzla target
+# CHECK-BE: bdnzlrl # encoding: [0x4e,0x00,0x00,0x21]
+# CHECK-LE: bdnzlrl # encoding: [0x21,0x00,0x00,0x4e]
+ bdnzlrl
+
+# CHECK-BE: bdnz+ target # encoding: [0x43,0x20,A,0bAAAAAA00]
+# CHECK-LE: bdnz+ target # encoding: [0bAAAAAA00,A,0x20,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnz+ target
+# CHECK-BE: bdnza+ target # encoding: [0x43,0x20,A,0bAAAAAA10]
+# CHECK-LE: bdnza+ target # encoding: [0bAAAAAA10,A,0x20,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnza+ target
+# CHECK-BE: bdnzlr+ # encoding: [0x4f,0x20,0x00,0x20]
+# CHECK-LE: bdnzlr+ # encoding: [0x20,0x00,0x20,0x4f]
+ bdnzlr+
+# CHECK-BE: bdnzl+ target # encoding: [0x43,0x20,A,0bAAAAAA01]
+# CHECK-LE: bdnzl+ target # encoding: [0bAAAAAA01,A,0x20,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnzl+ target
+# CHECK-BE: bdnzla+ target # encoding: [0x43,0x20,A,0bAAAAAA11]
+# CHECK-LE: bdnzla+ target # encoding: [0bAAAAAA11,A,0x20,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnzla+ target
+# CHECK-BE: bdnzlrl+ # encoding: [0x4f,0x20,0x00,0x21]
+# CHECK-LE: bdnzlrl+ # encoding: [0x21,0x00,0x20,0x4f]
+ bdnzlrl+
+
+# CHECK-BE: bdnz- target # encoding: [0x43,0x00,A,0bAAAAAA00]
+# CHECK-LE: bdnz- target # encoding: [0bAAAAAA00,A,0x00,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnz- target
+# CHECK-BE: bdnza- target # encoding: [0x43,0x00,A,0bAAAAAA10]
+# CHECK-LE: bdnza- target # encoding: [0bAAAAAA10,A,0x00,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnza- target
+# CHECK-BE: bdnzlr- # encoding: [0x4f,0x00,0x00,0x20]
+# CHECK-LE: bdnzlr- # encoding: [0x20,0x00,0x00,0x4f]
+ bdnzlr-
+# CHECK-BE: bdnzl- target # encoding: [0x43,0x00,A,0bAAAAAA01]
+# CHECK-LE: bdnzl- target # encoding: [0bAAAAAA01,A,0x00,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnzl- target
+# CHECK-BE: bdnzla- target # encoding: [0x43,0x00,A,0bAAAAAA11]
+# CHECK-LE: bdnzla- target # encoding: [0bAAAAAA11,A,0x00,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnzla- target
+# CHECK-BE: bdnzlrl- # encoding: [0x4f,0x00,0x00,0x21]
+# CHECK-LE: bdnzlrl- # encoding: [0x21,0x00,0x00,0x4f]
+ bdnzlrl-
+
+# CHECK-BE: bc 8, 2, target # encoding: [0x41,0x02,A,0bAAAAAA00]
+# CHECK-LE: bc 8, 2, target # encoding: [0bAAAAAA00,A,0x02,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnzt 2, target
+# CHECK-BE: bca 8, 2, target # encoding: [0x41,0x02,A,0bAAAAAA10]
+# CHECK-LE: bca 8, 2, target # encoding: [0bAAAAAA10,A,0x02,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnzta 2, target
+# CHECK-BE: bclr 8, 2, 0 # encoding: [0x4d,0x02,0x00,0x20]
+# CHECK-LE: bclr 8, 2, 0 # encoding: [0x20,0x00,0x02,0x4d]
+ bdnztlr 2
+# CHECK-BE: bcl 8, 2, target # encoding: [0x41,0x02,A,0bAAAAAA01]
+# CHECK-LE: bcl 8, 2, target # encoding: [0bAAAAAA01,A,0x02,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnztl 2, target
+# CHECK-BE: bcla 8, 2, target # encoding: [0x41,0x02,A,0bAAAAAA11]
+# CHECK-LE: bcla 8, 2, target # encoding: [0bAAAAAA11,A,0x02,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnztla 2, target
+# CHECK-BE: bclrl 8, 2, 0 # encoding: [0x4d,0x02,0x00,0x21]
+# CHECK-LE: bclrl 8, 2, 0 # encoding: [0x21,0x00,0x02,0x4d]
+ bdnztlrl 2
+
+# CHECK-BE: bc 0, 2, target # encoding: [0x40,0x02,A,0bAAAAAA00]
+# CHECK-LE: bc 0, 2, target # encoding: [0bAAAAAA00,A,0x02,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnzf 2, target
+# CHECK-BE: bca 0, 2, target # encoding: [0x40,0x02,A,0bAAAAAA10]
+# CHECK-LE: bca 0, 2, target # encoding: [0bAAAAAA10,A,0x02,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnzfa 2, target
+# CHECK-BE: bclr 0, 2, 0 # encoding: [0x4c,0x02,0x00,0x20]
+# CHECK-LE: bclr 0, 2, 0 # encoding: [0x20,0x00,0x02,0x4c]
+ bdnzflr 2
+# CHECK-BE: bcl 0, 2, target # encoding: [0x40,0x02,A,0bAAAAAA01]
+# CHECK-LE: bcl 0, 2, target # encoding: [0bAAAAAA01,A,0x02,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdnzfl 2, target
+# CHECK-BE: bcla 0, 2, target # encoding: [0x40,0x02,A,0bAAAAAA11]
+# CHECK-LE: bcla 0, 2, target # encoding: [0bAAAAAA11,A,0x02,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdnzfla 2, target
+# CHECK-BE: bclrl 0, 2, 0 # encoding: [0x4c,0x02,0x00,0x21]
+# CHECK-LE: bclrl 0, 2, 0 # encoding: [0x21,0x00,0x02,0x4c]
+ bdnzflrl 2
+
+# CHECK-BE: bdz target # encoding: [0x42,0x40,A,0bAAAAAA00]
+# CHECK-LE: bdz target # encoding: [0bAAAAAA00,A,0x40,0x42]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdz target
+# CHECK-BE: bdza target # encoding: [0x42,0x40,A,0bAAAAAA10]
+# CHECK-LE: bdza target # encoding: [0bAAAAAA10,A,0x40,0x42]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdza target
+# CHECK-BE: bdzlr # encoding: [0x4e,0x40,0x00,0x20]
+# CHECK-LE: bdzlr # encoding: [0x20,0x00,0x40,0x4e]
+ bdzlr
+# CHECK-BE: bdzl target # encoding: [0x42,0x40,A,0bAAAAAA01]
+# CHECK-LE: bdzl target # encoding: [0bAAAAAA01,A,0x40,0x42]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdzl target
+# CHECK-BE: bdzla target # encoding: [0x42,0x40,A,0bAAAAAA11]
+# CHECK-LE: bdzla target # encoding: [0bAAAAAA11,A,0x40,0x42]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdzla target
+# CHECK-BE: bdzlrl # encoding: [0x4e,0x40,0x00,0x21]
+# CHECK-LE: bdzlrl # encoding: [0x21,0x00,0x40,0x4e]
+ bdzlrl
+
+# CHECK-BE: bdz+ target # encoding: [0x43,0x60,A,0bAAAAAA00]
+# CHECK-LE: bdz+ target # encoding: [0bAAAAAA00,A,0x60,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdz+ target
+# CHECK-BE: bdza+ target # encoding: [0x43,0x60,A,0bAAAAAA10]
+# CHECK-LE: bdza+ target # encoding: [0bAAAAAA10,A,0x60,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdza+ target
+# CHECK-BE: bdzlr+ # encoding: [0x4f,0x60,0x00,0x20]
+# CHECK-LE: bdzlr+ # encoding: [0x20,0x00,0x60,0x4f]
+ bdzlr+
+# CHECK-BE: bdzl+ target # encoding: [0x43,0x60,A,0bAAAAAA01]
+# CHECK-LE: bdzl+ target # encoding: [0bAAAAAA01,A,0x60,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdzl+ target
+# CHECK-BE: bdzla+ target # encoding: [0x43,0x60,A,0bAAAAAA11]
+# CHECK-LE: bdzla+ target # encoding: [0bAAAAAA11,A,0x60,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdzla+ target
+# CHECK-BE: bdzlrl+ # encoding: [0x4f,0x60,0x00,0x21]
+# CHECK-LE: bdzlrl+ # encoding: [0x21,0x00,0x60,0x4f]
+ bdzlrl+
+
+# CHECK-BE: bdz- target # encoding: [0x43,0x40,A,0bAAAAAA00]
+# CHECK-LE: bdz- target # encoding: [0bAAAAAA00,A,0x40,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdz- target
+# CHECK-BE: bdza- target # encoding: [0x43,0x40,A,0bAAAAAA10]
+# CHECK-LE: bdza- target # encoding: [0bAAAAAA10,A,0x40,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdza- target
+# CHECK-BE: bdzlr- # encoding: [0x4f,0x40,0x00,0x20]
+# CHECK-LE: bdzlr- # encoding: [0x20,0x00,0x40,0x4f]
+ bdzlr-
+# CHECK-BE: bdzl- target # encoding: [0x43,0x40,A,0bAAAAAA01]
+# CHECK-LE: bdzl- target # encoding: [0bAAAAAA01,A,0x40,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdzl- target
+# CHECK-BE: bdzla- target # encoding: [0x43,0x40,A,0bAAAAAA11]
+# CHECK-LE: bdzla- target # encoding: [0bAAAAAA11,A,0x40,0x43]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdzla- target
+# CHECK-BE: bdzlrl- # encoding: [0x4f,0x40,0x00,0x21]
+# CHECK-LE: bdzlrl- # encoding: [0x21,0x00,0x40,0x4f]
+ bdzlrl-
+
+# CHECK-BE: bc 10, 2, target # encoding: [0x41,0x42,A,0bAAAAAA00]
+# CHECK-LE: bc 10, 2, target # encoding: [0bAAAAAA00,A,0x42,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdzt 2, target
+# CHECK-BE: bca 10, 2, target # encoding: [0x41,0x42,A,0bAAAAAA10]
+# CHECK-LE: bca 10, 2, target # encoding: [0bAAAAAA10,A,0x42,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdzta 2, target
+# CHECK-BE: bclr 10, 2, 0 # encoding: [0x4d,0x42,0x00,0x20]
+# CHECK-LE: bclr 10, 2, 0 # encoding: [0x20,0x00,0x42,0x4d]
+ bdztlr 2
+# CHECK-BE: bcl 10, 2, target # encoding: [0x41,0x42,A,0bAAAAAA01]
+# CHECK-LE: bcl 10, 2, target # encoding: [0bAAAAAA01,A,0x42,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdztl 2, target
+# CHECK-BE: bcla 10, 2, target # encoding: [0x41,0x42,A,0bAAAAAA11]
+# CHECK-LE: bcla 10, 2, target # encoding: [0bAAAAAA11,A,0x42,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdztla 2, target
+# CHECK-BE: bclrl 10, 2, 0 # encoding: [0x4d,0x42,0x00,0x21]
+# CHECK-LE: bclrl 10, 2, 0 # encoding: [0x21,0x00,0x42,0x4d]
+ bdztlrl 2
+
+# CHECK-BE: bc 2, 2, target # encoding: [0x40,0x42,A,0bAAAAAA00]
+# CHECK-LE: bc 2, 2, target # encoding: [0bAAAAAA00,A,0x42,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdzf 2, target
+# CHECK-BE: bca 2, 2, target # encoding: [0x40,0x42,A,0bAAAAAA10]
+# CHECK-LE: bca 2, 2, target # encoding: [0bAAAAAA10,A,0x42,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdzfa 2, target
+# CHECK-BE: bclr 2, 2, 0 # encoding: [0x4c,0x42,0x00,0x20]
+# CHECK-LE: bclr 2, 2, 0 # encoding: [0x20,0x00,0x42,0x4c]
+ bdzflr 2
+# CHECK-BE: bcl 2, 2, target # encoding: [0x40,0x42,A,0bAAAAAA01]
+# CHECK-LE: bcl 2, 2, target # encoding: [0bAAAAAA01,A,0x42,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bdzfl 2, target
+# CHECK-BE: bcla 2, 2, target # encoding: [0x40,0x42,A,0bAAAAAA11]
+# CHECK-LE: bcla 2, 2, target # encoding: [0bAAAAAA11,A,0x42,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bdzfla 2, target
+# CHECK-BE: bclrl 2, 2, 0 # encoding: [0x4c,0x42,0x00,0x21]
+# CHECK-LE: bclrl 2, 2, 0 # encoding: [0x21,0x00,0x42,0x4c]
+ bdzflrl 2
+
+# CHECK-BE: blt 2, target # encoding: [0x41,0x88,A,0bAAAAAA00]
+# CHECK-LE: blt 2, target # encoding: [0bAAAAAA00,A,0x88,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blt 2, target
+# CHECK-BE: blt 0, target # encoding: [0x41,0x80,A,0bAAAAAA00]
+# CHECK-LE: blt 0, target # encoding: [0bAAAAAA00,A,0x80,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blt target
+# CHECK-BE: blta 2, target # encoding: [0x41,0x88,A,0bAAAAAA10]
+# CHECK-LE: blta 2, target # encoding: [0bAAAAAA10,A,0x88,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blta 2, target
+# CHECK-BE: blta 0, target # encoding: [0x41,0x80,A,0bAAAAAA10]
+# CHECK-LE: blta 0, target # encoding: [0bAAAAAA10,A,0x80,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blta target
+# CHECK-BE: bltlr 2 # encoding: [0x4d,0x88,0x00,0x20]
+# CHECK-LE: bltlr 2 # encoding: [0x20,0x00,0x88,0x4d]
+ bltlr 2
+# CHECK-BE: bltlr 0 # encoding: [0x4d,0x80,0x00,0x20]
+# CHECK-LE: bltlr 0 # encoding: [0x20,0x00,0x80,0x4d]
+ bltlr
+# CHECK-BE: bltctr 2 # encoding: [0x4d,0x88,0x04,0x20]
+# CHECK-LE: bltctr 2 # encoding: [0x20,0x04,0x88,0x4d]
+ bltctr 2
+# CHECK-BE: bltctr 0 # encoding: [0x4d,0x80,0x04,0x20]
+# CHECK-LE: bltctr 0 # encoding: [0x20,0x04,0x80,0x4d]
+ bltctr
+# CHECK-BE: bltl 2, target # encoding: [0x41,0x88,A,0bAAAAAA01]
+# CHECK-LE: bltl 2, target # encoding: [0bAAAAAA01,A,0x88,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bltl 2, target
+# CHECK-BE: bltl 0, target # encoding: [0x41,0x80,A,0bAAAAAA01]
+# CHECK-LE: bltl 0, target # encoding: [0bAAAAAA01,A,0x80,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bltl target
+# CHECK-BE: bltla 2, target # encoding: [0x41,0x88,A,0bAAAAAA11]
+# CHECK-LE: bltla 2, target # encoding: [0bAAAAAA11,A,0x88,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bltla 2, target
+# CHECK-BE: bltla 0, target # encoding: [0x41,0x80,A,0bAAAAAA11]
+# CHECK-LE: bltla 0, target # encoding: [0bAAAAAA11,A,0x80,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bltla target
+# CHECK-BE: bltlrl 2 # encoding: [0x4d,0x88,0x00,0x21]
+# CHECK-LE: bltlrl 2 # encoding: [0x21,0x00,0x88,0x4d]
+ bltlrl 2
+# CHECK-BE: bltlrl 0 # encoding: [0x4d,0x80,0x00,0x21]
+# CHECK-LE: bltlrl 0 # encoding: [0x21,0x00,0x80,0x4d]
+ bltlrl
+# CHECK-BE: bltctrl 2 # encoding: [0x4d,0x88,0x04,0x21]
+# CHECK-LE: bltctrl 2 # encoding: [0x21,0x04,0x88,0x4d]
+ bltctrl 2
+# CHECK-BE: bltctrl 0 # encoding: [0x4d,0x80,0x04,0x21]
+# CHECK-LE: bltctrl 0 # encoding: [0x21,0x04,0x80,0x4d]
+ bltctrl
+
+# CHECK-BE: blt+ 2, target # encoding: [0x41,0xe8,A,0bAAAAAA00]
+# CHECK-LE: blt+ 2, target # encoding: [0bAAAAAA00,A,0xe8,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blt+ 2, target
+# CHECK-BE: blt+ 0, target # encoding: [0x41,0xe0,A,0bAAAAAA00]
+# CHECK-LE: blt+ 0, target # encoding: [0bAAAAAA00,A,0xe0,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blt+ target
+# CHECK-BE: blta+ 2, target # encoding: [0x41,0xe8,A,0bAAAAAA10]
+# CHECK-LE: blta+ 2, target # encoding: [0bAAAAAA10,A,0xe8,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blta+ 2, target
+# CHECK-BE: blta+ 0, target # encoding: [0x41,0xe0,A,0bAAAAAA10]
+# CHECK-LE: blta+ 0, target # encoding: [0bAAAAAA10,A,0xe0,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blta+ target
+# CHECK-BE: bltlr+ 2 # encoding: [0x4d,0xe8,0x00,0x20]
+# CHECK-LE: bltlr+ 2 # encoding: [0x20,0x00,0xe8,0x4d]
+ bltlr+ 2
+# CHECK-BE: bltlr+ 0 # encoding: [0x4d,0xe0,0x00,0x20]
+# CHECK-LE: bltlr+ 0 # encoding: [0x20,0x00,0xe0,0x4d]
+ bltlr+
+# CHECK-BE: bltctr+ 2 # encoding: [0x4d,0xe8,0x04,0x20]
+# CHECK-LE: bltctr+ 2 # encoding: [0x20,0x04,0xe8,0x4d]
+ bltctr+ 2
+# CHECK-BE: bltctr+ 0 # encoding: [0x4d,0xe0,0x04,0x20]
+# CHECK-LE: bltctr+ 0 # encoding: [0x20,0x04,0xe0,0x4d]
+ bltctr+
+# CHECK-BE: bltl+ 2, target # encoding: [0x41,0xe8,A,0bAAAAAA01]
+# CHECK-LE: bltl+ 2, target # encoding: [0bAAAAAA01,A,0xe8,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bltl+ 2, target
+# CHECK-BE: bltl+ 0, target # encoding: [0x41,0xe0,A,0bAAAAAA01]
+# CHECK-LE: bltl+ 0, target # encoding: [0bAAAAAA01,A,0xe0,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bltl+ target
+# CHECK-BE: bltla+ 2, target # encoding: [0x41,0xe8,A,0bAAAAAA11]
+# CHECK-LE: bltla+ 2, target # encoding: [0bAAAAAA11,A,0xe8,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bltla+ 2, target
+# CHECK-BE: bltla+ 0, target # encoding: [0x41,0xe0,A,0bAAAAAA11]
+# CHECK-LE: bltla+ 0, target # encoding: [0bAAAAAA11,A,0xe0,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bltla+ target
+# CHECK-BE: bltlrl+ 2 # encoding: [0x4d,0xe8,0x00,0x21]
+# CHECK-LE: bltlrl+ 2 # encoding: [0x21,0x00,0xe8,0x4d]
+ bltlrl+ 2
+# CHECK-BE: bltlrl+ 0 # encoding: [0x4d,0xe0,0x00,0x21]
+# CHECK-LE: bltlrl+ 0 # encoding: [0x21,0x00,0xe0,0x4d]
+ bltlrl+
+# CHECK-BE: bltctrl+ 2 # encoding: [0x4d,0xe8,0x04,0x21]
+# CHECK-LE: bltctrl+ 2 # encoding: [0x21,0x04,0xe8,0x4d]
+ bltctrl+ 2
+# CHECK-BE: bltctrl+ 0 # encoding: [0x4d,0xe0,0x04,0x21]
+# CHECK-LE: bltctrl+ 0 # encoding: [0x21,0x04,0xe0,0x4d]
+ bltctrl+
+
+# CHECK-BE: blt- 2, target # encoding: [0x41,0xc8,A,0bAAAAAA00]
+# CHECK-LE: blt- 2, target # encoding: [0bAAAAAA00,A,0xc8,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blt- 2, target
+# CHECK-BE: blt- 0, target # encoding: [0x41,0xc0,A,0bAAAAAA00]
+# CHECK-LE: blt- 0, target # encoding: [0bAAAAAA00,A,0xc0,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blt- target
+# CHECK-BE: blta- 2, target # encoding: [0x41,0xc8,A,0bAAAAAA10]
+# CHECK-LE: blta- 2, target # encoding: [0bAAAAAA10,A,0xc8,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blta- 2, target
+# CHECK-BE: blta- 0, target # encoding: [0x41,0xc0,A,0bAAAAAA10]
+# CHECK-LE: blta- 0, target # encoding: [0bAAAAAA10,A,0xc0,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blta- target
+# CHECK-BE: bltlr- 2 # encoding: [0x4d,0xc8,0x00,0x20]
+# CHECK-LE: bltlr- 2 # encoding: [0x20,0x00,0xc8,0x4d]
+ bltlr- 2
+# CHECK-BE: bltlr- 0 # encoding: [0x4d,0xc0,0x00,0x20]
+# CHECK-LE: bltlr- 0 # encoding: [0x20,0x00,0xc0,0x4d]
+ bltlr-
+# CHECK-BE: bltctr- 2 # encoding: [0x4d,0xc8,0x04,0x20]
+# CHECK-LE: bltctr- 2 # encoding: [0x20,0x04,0xc8,0x4d]
+ bltctr- 2
+# CHECK-BE: bltctr- 0 # encoding: [0x4d,0xc0,0x04,0x20]
+# CHECK-LE: bltctr- 0 # encoding: [0x20,0x04,0xc0,0x4d]
+ bltctr-
+# CHECK-BE: bltl- 2, target # encoding: [0x41,0xc8,A,0bAAAAAA01]
+# CHECK-LE: bltl- 2, target # encoding: [0bAAAAAA01,A,0xc8,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bltl- 2, target
+# CHECK-BE: bltl- 0, target # encoding: [0x41,0xc0,A,0bAAAAAA01]
+# CHECK-LE: bltl- 0, target # encoding: [0bAAAAAA01,A,0xc0,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bltl- target
+# CHECK-BE: bltla- 2, target # encoding: [0x41,0xc8,A,0bAAAAAA11]
+# CHECK-LE: bltla- 2, target # encoding: [0bAAAAAA11,A,0xc8,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bltla- 2, target
+# CHECK-BE: bltla- 0, target # encoding: [0x41,0xc0,A,0bAAAAAA11]
+# CHECK-LE: bltla- 0, target # encoding: [0bAAAAAA11,A,0xc0,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bltla- target
+# CHECK-BE: bltlrl- 2 # encoding: [0x4d,0xc8,0x00,0x21]
+# CHECK-LE: bltlrl- 2 # encoding: [0x21,0x00,0xc8,0x4d]
+ bltlrl- 2
+# CHECK-BE: bltlrl- 0 # encoding: [0x4d,0xc0,0x00,0x21]
+# CHECK-LE: bltlrl- 0 # encoding: [0x21,0x00,0xc0,0x4d]
+ bltlrl-
+# CHECK-BE: bltctrl- 2 # encoding: [0x4d,0xc8,0x04,0x21]
+# CHECK-LE: bltctrl- 2 # encoding: [0x21,0x04,0xc8,0x4d]
+ bltctrl- 2
+# CHECK-BE: bltctrl- 0 # encoding: [0x4d,0xc0,0x04,0x21]
+# CHECK-LE: bltctrl- 0 # encoding: [0x21,0x04,0xc0,0x4d]
+ bltctrl-
+
+# CHECK-BE: ble 2, target # encoding: [0x40,0x89,A,0bAAAAAA00]
+# CHECK-LE: ble 2, target # encoding: [0bAAAAAA00,A,0x89,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ ble 2, target
+# CHECK-BE: ble 0, target # encoding: [0x40,0x81,A,0bAAAAAA00]
+# CHECK-LE: ble 0, target # encoding: [0bAAAAAA00,A,0x81,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ ble target
+# CHECK-BE: blea 2, target # encoding: [0x40,0x89,A,0bAAAAAA10]
+# CHECK-LE: blea 2, target # encoding: [0bAAAAAA10,A,0x89,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blea 2, target
+# CHECK-BE: blea 0, target # encoding: [0x40,0x81,A,0bAAAAAA10]
+# CHECK-LE: blea 0, target # encoding: [0bAAAAAA10,A,0x81,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blea target
+# CHECK-BE: blelr 2 # encoding: [0x4c,0x89,0x00,0x20]
+# CHECK-LE: blelr 2 # encoding: [0x20,0x00,0x89,0x4c]
+ blelr 2
+# CHECK-BE: blelr 0 # encoding: [0x4c,0x81,0x00,0x20]
+# CHECK-LE: blelr 0 # encoding: [0x20,0x00,0x81,0x4c]
+ blelr
+# CHECK-BE: blectr 2 # encoding: [0x4c,0x89,0x04,0x20]
+# CHECK-LE: blectr 2 # encoding: [0x20,0x04,0x89,0x4c]
+ blectr 2
+# CHECK-BE: blectr 0 # encoding: [0x4c,0x81,0x04,0x20]
+# CHECK-LE: blectr 0 # encoding: [0x20,0x04,0x81,0x4c]
+ blectr
+# CHECK-BE: blel 2, target # encoding: [0x40,0x89,A,0bAAAAAA01]
+# CHECK-LE: blel 2, target # encoding: [0bAAAAAA01,A,0x89,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blel 2, target
+# CHECK-BE: blel 0, target # encoding: [0x40,0x81,A,0bAAAAAA01]
+# CHECK-LE: blel 0, target # encoding: [0bAAAAAA01,A,0x81,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blel target
+# CHECK-BE: blela 2, target # encoding: [0x40,0x89,A,0bAAAAAA11]
+# CHECK-LE: blela 2, target # encoding: [0bAAAAAA11,A,0x89,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blela 2, target
+# CHECK-BE: blela 0, target # encoding: [0x40,0x81,A,0bAAAAAA11]
+# CHECK-LE: blela 0, target # encoding: [0bAAAAAA11,A,0x81,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blela target
+# CHECK-BE: blelrl 2 # encoding: [0x4c,0x89,0x00,0x21]
+# CHECK-LE: blelrl 2 # encoding: [0x21,0x00,0x89,0x4c]
+ blelrl 2
+# CHECK-BE: blelrl 0 # encoding: [0x4c,0x81,0x00,0x21]
+# CHECK-LE: blelrl 0 # encoding: [0x21,0x00,0x81,0x4c]
+ blelrl
+# CHECK-BE: blectrl 2 # encoding: [0x4c,0x89,0x04,0x21]
+# CHECK-LE: blectrl 2 # encoding: [0x21,0x04,0x89,0x4c]
+ blectrl 2
+# CHECK-BE: blectrl 0 # encoding: [0x4c,0x81,0x04,0x21]
+# CHECK-LE: blectrl 0 # encoding: [0x21,0x04,0x81,0x4c]
+ blectrl
+
+# CHECK-BE: ble+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA00]
+# CHECK-LE: ble+ 2, target # encoding: [0bAAAAAA00,A,0xe9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ ble+ 2, target
+# CHECK-BE: ble+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA00]
+# CHECK-LE: ble+ 0, target # encoding: [0bAAAAAA00,A,0xe1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ ble+ target
+# CHECK-BE: blea+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA10]
+# CHECK-LE: blea+ 2, target # encoding: [0bAAAAAA10,A,0xe9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blea+ 2, target
+# CHECK-BE: blea+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA10]
+# CHECK-LE: blea+ 0, target # encoding: [0bAAAAAA10,A,0xe1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blea+ target
+# CHECK-BE: blelr+ 2 # encoding: [0x4c,0xe9,0x00,0x20]
+# CHECK-LE: blelr+ 2 # encoding: [0x20,0x00,0xe9,0x4c]
+ blelr+ 2
+# CHECK-BE: blelr+ 0 # encoding: [0x4c,0xe1,0x00,0x20]
+# CHECK-LE: blelr+ 0 # encoding: [0x20,0x00,0xe1,0x4c]
+ blelr+
+# CHECK-BE: blectr+ 2 # encoding: [0x4c,0xe9,0x04,0x20]
+# CHECK-LE: blectr+ 2 # encoding: [0x20,0x04,0xe9,0x4c]
+ blectr+ 2
+# CHECK-BE: blectr+ 0 # encoding: [0x4c,0xe1,0x04,0x20]
+# CHECK-LE: blectr+ 0 # encoding: [0x20,0x04,0xe1,0x4c]
+ blectr+
+# CHECK-BE: blel+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA01]
+# CHECK-LE: blel+ 2, target # encoding: [0bAAAAAA01,A,0xe9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blel+ 2, target
+# CHECK-BE: blel+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA01]
+# CHECK-LE: blel+ 0, target # encoding: [0bAAAAAA01,A,0xe1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blel+ target
+# CHECK-BE: blela+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA11]
+# CHECK-LE: blela+ 2, target # encoding: [0bAAAAAA11,A,0xe9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blela+ 2, target
+# CHECK-BE: blela+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA11]
+# CHECK-LE: blela+ 0, target # encoding: [0bAAAAAA11,A,0xe1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blela+ target
+# CHECK-BE: blelrl+ 2 # encoding: [0x4c,0xe9,0x00,0x21]
+# CHECK-LE: blelrl+ 2 # encoding: [0x21,0x00,0xe9,0x4c]
+ blelrl+ 2
+# CHECK-BE: blelrl+ 0 # encoding: [0x4c,0xe1,0x00,0x21]
+# CHECK-LE: blelrl+ 0 # encoding: [0x21,0x00,0xe1,0x4c]
+ blelrl+
+# CHECK-BE: blectrl+ 2 # encoding: [0x4c,0xe9,0x04,0x21]
+# CHECK-LE: blectrl+ 2 # encoding: [0x21,0x04,0xe9,0x4c]
+ blectrl+ 2
+# CHECK-BE: blectrl+ 0 # encoding: [0x4c,0xe1,0x04,0x21]
+# CHECK-LE: blectrl+ 0 # encoding: [0x21,0x04,0xe1,0x4c]
+ blectrl+
+
+# CHECK-BE: ble- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA00]
+# CHECK-LE: ble- 2, target # encoding: [0bAAAAAA00,A,0xc9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ ble- 2, target
+# CHECK-BE: ble- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA00]
+# CHECK-LE: ble- 0, target # encoding: [0bAAAAAA00,A,0xc1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ ble- target
+# CHECK-BE: blea- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA10]
+# CHECK-LE: blea- 2, target # encoding: [0bAAAAAA10,A,0xc9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blea- 2, target
+# CHECK-BE: blea- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA10]
+# CHECK-LE: blea- 0, target # encoding: [0bAAAAAA10,A,0xc1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blea- target
+# CHECK-BE: blelr- 2 # encoding: [0x4c,0xc9,0x00,0x20]
+# CHECK-LE: blelr- 2 # encoding: [0x20,0x00,0xc9,0x4c]
+ blelr- 2
+# CHECK-BE: blelr- 0 # encoding: [0x4c,0xc1,0x00,0x20]
+# CHECK-LE: blelr- 0 # encoding: [0x20,0x00,0xc1,0x4c]
+ blelr-
+# CHECK-BE: blectr- 2 # encoding: [0x4c,0xc9,0x04,0x20]
+# CHECK-LE: blectr- 2 # encoding: [0x20,0x04,0xc9,0x4c]
+ blectr- 2
+# CHECK-BE: blectr- 0 # encoding: [0x4c,0xc1,0x04,0x20]
+# CHECK-LE: blectr- 0 # encoding: [0x20,0x04,0xc1,0x4c]
+ blectr-
+# CHECK-BE: blel- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA01]
+# CHECK-LE: blel- 2, target # encoding: [0bAAAAAA01,A,0xc9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blel- 2, target
+# CHECK-BE: blel- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA01]
+# CHECK-LE: blel- 0, target # encoding: [0bAAAAAA01,A,0xc1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ blel- target
+# CHECK-BE: blela- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA11]
+# CHECK-LE: blela- 2, target # encoding: [0bAAAAAA11,A,0xc9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blela- 2, target
+# CHECK-BE: blela- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA11]
+# CHECK-LE: blela- 0, target # encoding: [0bAAAAAA11,A,0xc1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ blela- target
+# CHECK-BE: blelrl- 2 # encoding: [0x4c,0xc9,0x00,0x21]
+# CHECK-LE: blelrl- 2 # encoding: [0x21,0x00,0xc9,0x4c]
+ blelrl- 2
+# CHECK-BE: blelrl- 0 # encoding: [0x4c,0xc1,0x00,0x21]
+# CHECK-LE: blelrl- 0 # encoding: [0x21,0x00,0xc1,0x4c]
+ blelrl-
+# CHECK-BE: blectrl- 2 # encoding: [0x4c,0xc9,0x04,0x21]
+# CHECK-LE: blectrl- 2 # encoding: [0x21,0x04,0xc9,0x4c]
+ blectrl- 2
+# CHECK-BE: blectrl- 0 # encoding: [0x4c,0xc1,0x04,0x21]
+# CHECK-LE: blectrl- 0 # encoding: [0x21,0x04,0xc1,0x4c]
+ blectrl-
+
+# CHECK-BE: beq 2, target # encoding: [0x41,0x8a,A,0bAAAAAA00]
+# CHECK-LE: beq 2, target # encoding: [0bAAAAAA00,A,0x8a,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beq 2, target
+# CHECK-BE: beq 0, target # encoding: [0x41,0x82,A,0bAAAAAA00]
+# CHECK-LE: beq 0, target # encoding: [0bAAAAAA00,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beq target
+# CHECK-BE: beqa 2, target # encoding: [0x41,0x8a,A,0bAAAAAA10]
+# CHECK-LE: beqa 2, target # encoding: [0bAAAAAA10,A,0x8a,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqa 2, target
+# CHECK-BE: beqa 0, target # encoding: [0x41,0x82,A,0bAAAAAA10]
+# CHECK-LE: beqa 0, target # encoding: [0bAAAAAA10,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqa target
+# CHECK-BE: beqlr 2 # encoding: [0x4d,0x8a,0x00,0x20]
+# CHECK-LE: beqlr 2 # encoding: [0x20,0x00,0x8a,0x4d]
+ beqlr 2
+# CHECK-BE: beqlr 0 # encoding: [0x4d,0x82,0x00,0x20]
+# CHECK-LE: beqlr 0 # encoding: [0x20,0x00,0x82,0x4d]
+ beqlr
+# CHECK-BE: beqctr 2 # encoding: [0x4d,0x8a,0x04,0x20]
+# CHECK-LE: beqctr 2 # encoding: [0x20,0x04,0x8a,0x4d]
+ beqctr 2
+# CHECK-BE: beqctr 0 # encoding: [0x4d,0x82,0x04,0x20]
+# CHECK-LE: beqctr 0 # encoding: [0x20,0x04,0x82,0x4d]
+ beqctr
+# CHECK-BE: beql 2, target # encoding: [0x41,0x8a,A,0bAAAAAA01]
+# CHECK-LE: beql 2, target # encoding: [0bAAAAAA01,A,0x8a,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beql 2, target
+# CHECK-BE: beql 0, target # encoding: [0x41,0x82,A,0bAAAAAA01]
+# CHECK-LE: beql 0, target # encoding: [0bAAAAAA01,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beql target
+# CHECK-BE: beqla 2, target # encoding: [0x41,0x8a,A,0bAAAAAA11]
+# CHECK-LE: beqla 2, target # encoding: [0bAAAAAA11,A,0x8a,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqla 2, target
+# CHECK-BE: beqla 0, target # encoding: [0x41,0x82,A,0bAAAAAA11]
+# CHECK-LE: beqla 0, target # encoding: [0bAAAAAA11,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqla target
+# CHECK-BE: beqlrl 2 # encoding: [0x4d,0x8a,0x00,0x21]
+# CHECK-LE: beqlrl 2 # encoding: [0x21,0x00,0x8a,0x4d]
+ beqlrl 2
+# CHECK-BE: beqlrl 0 # encoding: [0x4d,0x82,0x00,0x21]
+# CHECK-LE: beqlrl 0 # encoding: [0x21,0x00,0x82,0x4d]
+ beqlrl
+# CHECK-BE: beqctrl 2 # encoding: [0x4d,0x8a,0x04,0x21]
+# CHECK-LE: beqctrl 2 # encoding: [0x21,0x04,0x8a,0x4d]
+ beqctrl 2
+# CHECK-BE: beqctrl 0 # encoding: [0x4d,0x82,0x04,0x21]
+# CHECK-LE: beqctrl 0 # encoding: [0x21,0x04,0x82,0x4d]
+ beqctrl
+
+# CHECK-BE: beq+ 2, target # encoding: [0x41,0xea,A,0bAAAAAA00]
+# CHECK-LE: beq+ 2, target # encoding: [0bAAAAAA00,A,0xea,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beq+ 2, target
+# CHECK-BE: beq+ 0, target # encoding: [0x41,0xe2,A,0bAAAAAA00]
+# CHECK-LE: beq+ 0, target # encoding: [0bAAAAAA00,A,0xe2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beq+ target
+# CHECK-BE: beqa+ 2, target # encoding: [0x41,0xea,A,0bAAAAAA10]
+# CHECK-LE: beqa+ 2, target # encoding: [0bAAAAAA10,A,0xea,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqa+ 2, target
+# CHECK-BE: beqa+ 0, target # encoding: [0x41,0xe2,A,0bAAAAAA10]
+# CHECK-LE: beqa+ 0, target # encoding: [0bAAAAAA10,A,0xe2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqa+ target
+# CHECK-BE: beqlr+ 2 # encoding: [0x4d,0xea,0x00,0x20]
+# CHECK-LE: beqlr+ 2 # encoding: [0x20,0x00,0xea,0x4d]
+ beqlr+ 2
+# CHECK-BE: beqlr+ 0 # encoding: [0x4d,0xe2,0x00,0x20]
+# CHECK-LE: beqlr+ 0 # encoding: [0x20,0x00,0xe2,0x4d]
+ beqlr+
+# CHECK-BE: beqctr+ 2 # encoding: [0x4d,0xea,0x04,0x20]
+# CHECK-LE: beqctr+ 2 # encoding: [0x20,0x04,0xea,0x4d]
+ beqctr+ 2
+# CHECK-BE: beqctr+ 0 # encoding: [0x4d,0xe2,0x04,0x20]
+# CHECK-LE: beqctr+ 0 # encoding: [0x20,0x04,0xe2,0x4d]
+ beqctr+
+# CHECK-BE: beql+ 2, target # encoding: [0x41,0xea,A,0bAAAAAA01]
+# CHECK-LE: beql+ 2, target # encoding: [0bAAAAAA01,A,0xea,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beql+ 2, target
+# CHECK-BE: beql+ 0, target # encoding: [0x41,0xe2,A,0bAAAAAA01]
+# CHECK-LE: beql+ 0, target # encoding: [0bAAAAAA01,A,0xe2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beql+ target
+# CHECK-BE: beqla+ 2, target # encoding: [0x41,0xea,A,0bAAAAAA11]
+# CHECK-LE: beqla+ 2, target # encoding: [0bAAAAAA11,A,0xea,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqla+ 2, target
+# CHECK-BE: beqla+ 0, target # encoding: [0x41,0xe2,A,0bAAAAAA11]
+# CHECK-LE: beqla+ 0, target # encoding: [0bAAAAAA11,A,0xe2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqla+ target
+# CHECK-BE: beqlrl+ 2 # encoding: [0x4d,0xea,0x00,0x21]
+# CHECK-LE: beqlrl+ 2 # encoding: [0x21,0x00,0xea,0x4d]
+ beqlrl+ 2
+# CHECK-BE: beqlrl+ 0 # encoding: [0x4d,0xe2,0x00,0x21]
+# CHECK-LE: beqlrl+ 0 # encoding: [0x21,0x00,0xe2,0x4d]
+ beqlrl+
+# CHECK-BE: beqctrl+ 2 # encoding: [0x4d,0xea,0x04,0x21]
+# CHECK-LE: beqctrl+ 2 # encoding: [0x21,0x04,0xea,0x4d]
+ beqctrl+ 2
+# CHECK-BE: beqctrl+ 0 # encoding: [0x4d,0xe2,0x04,0x21]
+# CHECK-LE: beqctrl+ 0 # encoding: [0x21,0x04,0xe2,0x4d]
+ beqctrl+
+
+# CHECK-BE: beq- 2, target # encoding: [0x41,0xca,A,0bAAAAAA00]
+# CHECK-LE: beq- 2, target # encoding: [0bAAAAAA00,A,0xca,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beq- 2, target
+# CHECK-BE: beq- 0, target # encoding: [0x41,0xc2,A,0bAAAAAA00]
+# CHECK-LE: beq- 0, target # encoding: [0bAAAAAA00,A,0xc2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beq- target
+# CHECK-BE: beqa- 2, target # encoding: [0x41,0xca,A,0bAAAAAA10]
+# CHECK-LE: beqa- 2, target # encoding: [0bAAAAAA10,A,0xca,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqa- 2, target
+# CHECK-BE: beqa- 0, target # encoding: [0x41,0xc2,A,0bAAAAAA10]
+# CHECK-LE: beqa- 0, target # encoding: [0bAAAAAA10,A,0xc2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqa- target
+# CHECK-BE: beqlr- 2 # encoding: [0x4d,0xca,0x00,0x20]
+# CHECK-LE: beqlr- 2 # encoding: [0x20,0x00,0xca,0x4d]
+ beqlr- 2
+# CHECK-BE: beqlr- 0 # encoding: [0x4d,0xc2,0x00,0x20]
+# CHECK-LE: beqlr- 0 # encoding: [0x20,0x00,0xc2,0x4d]
+ beqlr-
+# CHECK-BE: beqctr- 2 # encoding: [0x4d,0xca,0x04,0x20]
+# CHECK-LE: beqctr- 2 # encoding: [0x20,0x04,0xca,0x4d]
+ beqctr- 2
+# CHECK-BE: beqctr- 0 # encoding: [0x4d,0xc2,0x04,0x20]
+# CHECK-LE: beqctr- 0 # encoding: [0x20,0x04,0xc2,0x4d]
+ beqctr-
+# CHECK-BE: beql- 2, target # encoding: [0x41,0xca,A,0bAAAAAA01]
+# CHECK-LE: beql- 2, target # encoding: [0bAAAAAA01,A,0xca,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beql- 2, target
+# CHECK-BE: beql- 0, target # encoding: [0x41,0xc2,A,0bAAAAAA01]
+# CHECK-LE: beql- 0, target # encoding: [0bAAAAAA01,A,0xc2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ beql- target
+# CHECK-BE: beqla- 2, target # encoding: [0x41,0xca,A,0bAAAAAA11]
+# CHECK-LE: beqla- 2, target # encoding: [0bAAAAAA11,A,0xca,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqla- 2, target
+# CHECK-BE: beqla- 0, target # encoding: [0x41,0xc2,A,0bAAAAAA11]
+# CHECK-LE: beqla- 0, target # encoding: [0bAAAAAA11,A,0xc2,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ beqla- target
+# CHECK-BE: beqlrl- 2 # encoding: [0x4d,0xca,0x00,0x21]
+# CHECK-LE: beqlrl- 2 # encoding: [0x21,0x00,0xca,0x4d]
+ beqlrl- 2
+# CHECK-BE: beqlrl- 0 # encoding: [0x4d,0xc2,0x00,0x21]
+# CHECK-LE: beqlrl- 0 # encoding: [0x21,0x00,0xc2,0x4d]
+ beqlrl-
+# CHECK-BE: beqctrl- 2 # encoding: [0x4d,0xca,0x04,0x21]
+# CHECK-LE: beqctrl- 2 # encoding: [0x21,0x04,0xca,0x4d]
+ beqctrl- 2
+# CHECK-BE: beqctrl- 0 # encoding: [0x4d,0xc2,0x04,0x21]
+# CHECK-LE: beqctrl- 0 # encoding: [0x21,0x04,0xc2,0x4d]
+ beqctrl-
+
+# CHECK-BE: bge 2, target # encoding: [0x40,0x88,A,0bAAAAAA00]
+# CHECK-LE: bge 2, target # encoding: [0bAAAAAA00,A,0x88,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bge 2, target
+# CHECK-BE: bge 0, target # encoding: [0x40,0x80,A,0bAAAAAA00]
+# CHECK-LE: bge 0, target # encoding: [0bAAAAAA00,A,0x80,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bge target
+# CHECK-BE: bgea 2, target # encoding: [0x40,0x88,A,0bAAAAAA10]
+# CHECK-LE: bgea 2, target # encoding: [0bAAAAAA10,A,0x88,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgea 2, target
+# CHECK-BE: bgea 0, target # encoding: [0x40,0x80,A,0bAAAAAA10]
+# CHECK-LE: bgea 0, target # encoding: [0bAAAAAA10,A,0x80,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgea target
+# CHECK-BE: bgelr 2 # encoding: [0x4c,0x88,0x00,0x20]
+# CHECK-LE: bgelr 2 # encoding: [0x20,0x00,0x88,0x4c]
+ bgelr 2
+# CHECK-BE: bgelr 0 # encoding: [0x4c,0x80,0x00,0x20]
+# CHECK-LE: bgelr 0 # encoding: [0x20,0x00,0x80,0x4c]
+ bgelr
+# CHECK-BE: bgectr 2 # encoding: [0x4c,0x88,0x04,0x20]
+# CHECK-LE: bgectr 2 # encoding: [0x20,0x04,0x88,0x4c]
+ bgectr 2
+# CHECK-BE: bgectr 0 # encoding: [0x4c,0x80,0x04,0x20]
+# CHECK-LE: bgectr 0 # encoding: [0x20,0x04,0x80,0x4c]
+ bgectr
+# CHECK-BE: bgel 2, target # encoding: [0x40,0x88,A,0bAAAAAA01]
+# CHECK-LE: bgel 2, target # encoding: [0bAAAAAA01,A,0x88,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgel 2, target
+# CHECK-BE: bgel 0, target # encoding: [0x40,0x80,A,0bAAAAAA01]
+# CHECK-LE: bgel 0, target # encoding: [0bAAAAAA01,A,0x80,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgel target
+# CHECK-BE: bgela 2, target # encoding: [0x40,0x88,A,0bAAAAAA11]
+# CHECK-LE: bgela 2, target # encoding: [0bAAAAAA11,A,0x88,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgela 2, target
+# CHECK-BE: bgela 0, target # encoding: [0x40,0x80,A,0bAAAAAA11]
+# CHECK-LE: bgela 0, target # encoding: [0bAAAAAA11,A,0x80,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgela target
+# CHECK-BE: bgelrl 2 # encoding: [0x4c,0x88,0x00,0x21]
+# CHECK-LE: bgelrl 2 # encoding: [0x21,0x00,0x88,0x4c]
+ bgelrl 2
+# CHECK-BE: bgelrl 0 # encoding: [0x4c,0x80,0x00,0x21]
+# CHECK-LE: bgelrl 0 # encoding: [0x21,0x00,0x80,0x4c]
+ bgelrl
+# CHECK-BE: bgectrl 2 # encoding: [0x4c,0x88,0x04,0x21]
+# CHECK-LE: bgectrl 2 # encoding: [0x21,0x04,0x88,0x4c]
+ bgectrl 2
+# CHECK-BE: bgectrl 0 # encoding: [0x4c,0x80,0x04,0x21]
+# CHECK-LE: bgectrl 0 # encoding: [0x21,0x04,0x80,0x4c]
+ bgectrl
+
+# CHECK-BE: bge+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA00]
+# CHECK-LE: bge+ 2, target # encoding: [0bAAAAAA00,A,0xe8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bge+ 2, target
+# CHECK-BE: bge+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA00]
+# CHECK-LE: bge+ 0, target # encoding: [0bAAAAAA00,A,0xe0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bge+ target
+# CHECK-BE: bgea+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA10]
+# CHECK-LE: bgea+ 2, target # encoding: [0bAAAAAA10,A,0xe8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgea+ 2, target
+# CHECK-BE: bgea+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA10]
+# CHECK-LE: bgea+ 0, target # encoding: [0bAAAAAA10,A,0xe0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgea+ target
+# CHECK-BE: bgelr+ 2 # encoding: [0x4c,0xe8,0x00,0x20]
+# CHECK-LE: bgelr+ 2 # encoding: [0x20,0x00,0xe8,0x4c]
+ bgelr+ 2
+# CHECK-BE: bgelr+ 0 # encoding: [0x4c,0xe0,0x00,0x20]
+# CHECK-LE: bgelr+ 0 # encoding: [0x20,0x00,0xe0,0x4c]
+ bgelr+
+# CHECK-BE: bgectr+ 2 # encoding: [0x4c,0xe8,0x04,0x20]
+# CHECK-LE: bgectr+ 2 # encoding: [0x20,0x04,0xe8,0x4c]
+ bgectr+ 2
+# CHECK-BE: bgectr+ 0 # encoding: [0x4c,0xe0,0x04,0x20]
+# CHECK-LE: bgectr+ 0 # encoding: [0x20,0x04,0xe0,0x4c]
+ bgectr+
+# CHECK-BE: bgel+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA01]
+# CHECK-LE: bgel+ 2, target # encoding: [0bAAAAAA01,A,0xe8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgel+ 2, target
+# CHECK-BE: bgel+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA01]
+# CHECK-LE: bgel+ 0, target # encoding: [0bAAAAAA01,A,0xe0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgel+ target
+# CHECK-BE: bgela+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA11]
+# CHECK-LE: bgela+ 2, target # encoding: [0bAAAAAA11,A,0xe8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgela+ 2, target
+# CHECK-BE: bgela+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA11]
+# CHECK-LE: bgela+ 0, target # encoding: [0bAAAAAA11,A,0xe0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgela+ target
+# CHECK-BE: bgelrl+ 2 # encoding: [0x4c,0xe8,0x00,0x21]
+# CHECK-LE: bgelrl+ 2 # encoding: [0x21,0x00,0xe8,0x4c]
+ bgelrl+ 2
+# CHECK-BE: bgelrl+ 0 # encoding: [0x4c,0xe0,0x00,0x21]
+# CHECK-LE: bgelrl+ 0 # encoding: [0x21,0x00,0xe0,0x4c]
+ bgelrl+
+# CHECK-BE: bgectrl+ 2 # encoding: [0x4c,0xe8,0x04,0x21]
+# CHECK-LE: bgectrl+ 2 # encoding: [0x21,0x04,0xe8,0x4c]
+ bgectrl+ 2
+# CHECK-BE: bgectrl+ 0 # encoding: [0x4c,0xe0,0x04,0x21]
+# CHECK-LE: bgectrl+ 0 # encoding: [0x21,0x04,0xe0,0x4c]
+ bgectrl+
+
+# CHECK-BE: bge- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA00]
+# CHECK-LE: bge- 2, target # encoding: [0bAAAAAA00,A,0xc8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bge- 2, target
+# CHECK-BE: bge- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA00]
+# CHECK-LE: bge- 0, target # encoding: [0bAAAAAA00,A,0xc0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bge- target
+# CHECK-BE: bgea- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA10]
+# CHECK-LE: bgea- 2, target # encoding: [0bAAAAAA10,A,0xc8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgea- 2, target
+# CHECK-BE: bgea- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA10]
+# CHECK-LE: bgea- 0, target # encoding: [0bAAAAAA10,A,0xc0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgea- target
+# CHECK-BE: bgelr- 2 # encoding: [0x4c,0xc8,0x00,0x20]
+# CHECK-LE: bgelr- 2 # encoding: [0x20,0x00,0xc8,0x4c]
+ bgelr- 2
+# CHECK-BE: bgelr- 0 # encoding: [0x4c,0xc0,0x00,0x20]
+# CHECK-LE: bgelr- 0 # encoding: [0x20,0x00,0xc0,0x4c]
+ bgelr-
+# CHECK-BE: bgectr- 2 # encoding: [0x4c,0xc8,0x04,0x20]
+# CHECK-LE: bgectr- 2 # encoding: [0x20,0x04,0xc8,0x4c]
+ bgectr- 2
+# CHECK-BE: bgectr- 0 # encoding: [0x4c,0xc0,0x04,0x20]
+# CHECK-LE: bgectr- 0 # encoding: [0x20,0x04,0xc0,0x4c]
+ bgectr-
+# CHECK-BE: bgel- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA01]
+# CHECK-LE: bgel- 2, target # encoding: [0bAAAAAA01,A,0xc8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgel- 2, target
+# CHECK-BE: bgel- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA01]
+# CHECK-LE: bgel- 0, target # encoding: [0bAAAAAA01,A,0xc0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgel- target
+# CHECK-BE: bgela- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA11]
+# CHECK-LE: bgela- 2, target # encoding: [0bAAAAAA11,A,0xc8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgela- 2, target
+# CHECK-BE: bgela- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA11]
+# CHECK-LE: bgela- 0, target # encoding: [0bAAAAAA11,A,0xc0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgela- target
+# CHECK-BE: bgelrl- 2 # encoding: [0x4c,0xc8,0x00,0x21]
+# CHECK-LE: bgelrl- 2 # encoding: [0x21,0x00,0xc8,0x4c]
+ bgelrl- 2
+# CHECK-BE: bgelrl- 0 # encoding: [0x4c,0xc0,0x00,0x21]
+# CHECK-LE: bgelrl- 0 # encoding: [0x21,0x00,0xc0,0x4c]
+ bgelrl-
+# CHECK-BE: bgectrl- 2 # encoding: [0x4c,0xc8,0x04,0x21]
+# CHECK-LE: bgectrl- 2 # encoding: [0x21,0x04,0xc8,0x4c]
+ bgectrl- 2
+# CHECK-BE: bgectrl- 0 # encoding: [0x4c,0xc0,0x04,0x21]
+# CHECK-LE: bgectrl- 0 # encoding: [0x21,0x04,0xc0,0x4c]
+ bgectrl-
+
+# CHECK-BE: bgt 2, target # encoding: [0x41,0x89,A,0bAAAAAA00]
+# CHECK-LE: bgt 2, target # encoding: [0bAAAAAA00,A,0x89,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgt 2, target
+# CHECK-BE: bgt 0, target # encoding: [0x41,0x81,A,0bAAAAAA00]
+# CHECK-LE: bgt 0, target # encoding: [0bAAAAAA00,A,0x81,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgt target
+# CHECK-BE: bgta 2, target # encoding: [0x41,0x89,A,0bAAAAAA10]
+# CHECK-LE: bgta 2, target # encoding: [0bAAAAAA10,A,0x89,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgta 2, target
+# CHECK-BE: bgta 0, target # encoding: [0x41,0x81,A,0bAAAAAA10]
+# CHECK-LE: bgta 0, target # encoding: [0bAAAAAA10,A,0x81,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgta target
+# CHECK-BE: bgtlr 2 # encoding: [0x4d,0x89,0x00,0x20]
+# CHECK-LE: bgtlr 2 # encoding: [0x20,0x00,0x89,0x4d]
+ bgtlr 2
+# CHECK-BE: bgtlr 0 # encoding: [0x4d,0x81,0x00,0x20]
+# CHECK-LE: bgtlr 0 # encoding: [0x20,0x00,0x81,0x4d]
+ bgtlr
+# CHECK-BE: bgtctr 2 # encoding: [0x4d,0x89,0x04,0x20]
+# CHECK-LE: bgtctr 2 # encoding: [0x20,0x04,0x89,0x4d]
+ bgtctr 2
+# CHECK-BE: bgtctr 0 # encoding: [0x4d,0x81,0x04,0x20]
+# CHECK-LE: bgtctr 0 # encoding: [0x20,0x04,0x81,0x4d]
+ bgtctr
+# CHECK-BE: bgtl 2, target # encoding: [0x41,0x89,A,0bAAAAAA01]
+# CHECK-LE: bgtl 2, target # encoding: [0bAAAAAA01,A,0x89,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgtl 2, target
+# CHECK-BE: bgtl 0, target # encoding: [0x41,0x81,A,0bAAAAAA01]
+# CHECK-LE: bgtl 0, target # encoding: [0bAAAAAA01,A,0x81,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgtl target
+# CHECK-BE: bgtla 2, target # encoding: [0x41,0x89,A,0bAAAAAA11]
+# CHECK-LE: bgtla 2, target # encoding: [0bAAAAAA11,A,0x89,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgtla 2, target
+# CHECK-BE: bgtla 0, target # encoding: [0x41,0x81,A,0bAAAAAA11]
+# CHECK-LE: bgtla 0, target # encoding: [0bAAAAAA11,A,0x81,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgtla target
+# CHECK-BE: bgtlrl 2 # encoding: [0x4d,0x89,0x00,0x21]
+# CHECK-LE: bgtlrl 2 # encoding: [0x21,0x00,0x89,0x4d]
+ bgtlrl 2
+# CHECK-BE: bgtlrl 0 # encoding: [0x4d,0x81,0x00,0x21]
+# CHECK-LE: bgtlrl 0 # encoding: [0x21,0x00,0x81,0x4d]
+ bgtlrl
+# CHECK-BE: bgtctrl 2 # encoding: [0x4d,0x89,0x04,0x21]
+# CHECK-LE: bgtctrl 2 # encoding: [0x21,0x04,0x89,0x4d]
+ bgtctrl 2
+# CHECK-BE: bgtctrl 0 # encoding: [0x4d,0x81,0x04,0x21]
+# CHECK-LE: bgtctrl 0 # encoding: [0x21,0x04,0x81,0x4d]
+ bgtctrl
+
+# CHECK-BE: bgt+ 2, target # encoding: [0x41,0xe9,A,0bAAAAAA00]
+# CHECK-LE: bgt+ 2, target # encoding: [0bAAAAAA00,A,0xe9,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgt+ 2, target
+# CHECK-BE: bgt+ 0, target # encoding: [0x41,0xe1,A,0bAAAAAA00]
+# CHECK-LE: bgt+ 0, target # encoding: [0bAAAAAA00,A,0xe1,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgt+ target
+# CHECK-BE: bgta+ 2, target # encoding: [0x41,0xe9,A,0bAAAAAA10]
+# CHECK-LE: bgta+ 2, target # encoding: [0bAAAAAA10,A,0xe9,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgta+ 2, target
+# CHECK-BE: bgta+ 0, target # encoding: [0x41,0xe1,A,0bAAAAAA10]
+# CHECK-LE: bgta+ 0, target # encoding: [0bAAAAAA10,A,0xe1,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgta+ target
+# CHECK-BE: bgtlr+ 2 # encoding: [0x4d,0xe9,0x00,0x20]
+# CHECK-LE: bgtlr+ 2 # encoding: [0x20,0x00,0xe9,0x4d]
+ bgtlr+ 2
+# CHECK-BE: bgtlr+ 0 # encoding: [0x4d,0xe1,0x00,0x20]
+# CHECK-LE: bgtlr+ 0 # encoding: [0x20,0x00,0xe1,0x4d]
+ bgtlr+
+# CHECK-BE: bgtctr+ 2 # encoding: [0x4d,0xe9,0x04,0x20]
+# CHECK-LE: bgtctr+ 2 # encoding: [0x20,0x04,0xe9,0x4d]
+ bgtctr+ 2
+# CHECK-BE: bgtctr+ 0 # encoding: [0x4d,0xe1,0x04,0x20]
+# CHECK-LE: bgtctr+ 0 # encoding: [0x20,0x04,0xe1,0x4d]
+ bgtctr+
+# CHECK-BE: bgtl+ 2, target # encoding: [0x41,0xe9,A,0bAAAAAA01]
+# CHECK-LE: bgtl+ 2, target # encoding: [0bAAAAAA01,A,0xe9,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgtl+ 2, target
+# CHECK-BE: bgtl+ 0, target # encoding: [0x41,0xe1,A,0bAAAAAA01]
+# CHECK-LE: bgtl+ 0, target # encoding: [0bAAAAAA01,A,0xe1,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgtl+ target
+# CHECK-BE: bgtla+ 2, target # encoding: [0x41,0xe9,A,0bAAAAAA11]
+# CHECK-LE: bgtla+ 2, target # encoding: [0bAAAAAA11,A,0xe9,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgtla+ 2, target
+# CHECK-BE: bgtla+ 0, target # encoding: [0x41,0xe1,A,0bAAAAAA11]
+# CHECK-LE: bgtla+ 0, target # encoding: [0bAAAAAA11,A,0xe1,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgtla+ target
+# CHECK-BE: bgtlrl+ 2 # encoding: [0x4d,0xe9,0x00,0x21]
+# CHECK-LE: bgtlrl+ 2 # encoding: [0x21,0x00,0xe9,0x4d]
+ bgtlrl+ 2
+# CHECK-BE: bgtlrl+ 0 # encoding: [0x4d,0xe1,0x00,0x21]
+# CHECK-LE: bgtlrl+ 0 # encoding: [0x21,0x00,0xe1,0x4d]
+ bgtlrl+
+# CHECK-BE: bgtctrl+ 2 # encoding: [0x4d,0xe9,0x04,0x21]
+# CHECK-LE: bgtctrl+ 2 # encoding: [0x21,0x04,0xe9,0x4d]
+ bgtctrl+ 2
+# CHECK-BE: bgtctrl+ 0 # encoding: [0x4d,0xe1,0x04,0x21]
+# CHECK-LE: bgtctrl+ 0 # encoding: [0x21,0x04,0xe1,0x4d]
+ bgtctrl+
+
+# CHECK-BE: bgt- 2, target # encoding: [0x41,0xc9,A,0bAAAAAA00]
+# CHECK-LE: bgt- 2, target # encoding: [0bAAAAAA00,A,0xc9,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgt- 2, target
+# CHECK-BE: bgt- 0, target # encoding: [0x41,0xc1,A,0bAAAAAA00]
+# CHECK-LE: bgt- 0, target # encoding: [0bAAAAAA00,A,0xc1,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgt- target
+# CHECK-BE: bgta- 2, target # encoding: [0x41,0xc9,A,0bAAAAAA10]
+# CHECK-LE: bgta- 2, target # encoding: [0bAAAAAA10,A,0xc9,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgta- 2, target
+# CHECK-BE: bgta- 0, target # encoding: [0x41,0xc1,A,0bAAAAAA10]
+# CHECK-LE: bgta- 0, target # encoding: [0bAAAAAA10,A,0xc1,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgta- target
+# CHECK-BE: bgtlr- 2 # encoding: [0x4d,0xc9,0x00,0x20]
+# CHECK-LE: bgtlr- 2 # encoding: [0x20,0x00,0xc9,0x4d]
+ bgtlr- 2
+# CHECK-BE: bgtlr- 0 # encoding: [0x4d,0xc1,0x00,0x20]
+# CHECK-LE: bgtlr- 0 # encoding: [0x20,0x00,0xc1,0x4d]
+ bgtlr-
+# CHECK-BE: bgtctr- 2 # encoding: [0x4d,0xc9,0x04,0x20]
+# CHECK-LE: bgtctr- 2 # encoding: [0x20,0x04,0xc9,0x4d]
+ bgtctr- 2
+# CHECK-BE: bgtctr- 0 # encoding: [0x4d,0xc1,0x04,0x20]
+# CHECK-LE: bgtctr- 0 # encoding: [0x20,0x04,0xc1,0x4d]
+ bgtctr-
+# CHECK-BE: bgtl- 2, target # encoding: [0x41,0xc9,A,0bAAAAAA01]
+# CHECK-LE: bgtl- 2, target # encoding: [0bAAAAAA01,A,0xc9,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgtl- 2, target
+# CHECK-BE: bgtl- 0, target # encoding: [0x41,0xc1,A,0bAAAAAA01]
+# CHECK-LE: bgtl- 0, target # encoding: [0bAAAAAA01,A,0xc1,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bgtl- target
+# CHECK-BE: bgtla- 2, target # encoding: [0x41,0xc9,A,0bAAAAAA11]
+# CHECK-LE: bgtla- 2, target # encoding: [0bAAAAAA11,A,0xc9,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgtla- 2, target
+# CHECK-BE: bgtla- 0, target # encoding: [0x41,0xc1,A,0bAAAAAA11]
+# CHECK-LE: bgtla- 0, target # encoding: [0bAAAAAA11,A,0xc1,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bgtla- target
+# CHECK-BE: bgtlrl- 2 # encoding: [0x4d,0xc9,0x00,0x21]
+# CHECK-LE: bgtlrl- 2 # encoding: [0x21,0x00,0xc9,0x4d]
+ bgtlrl- 2
+# CHECK-BE: bgtlrl- 0 # encoding: [0x4d,0xc1,0x00,0x21]
+# CHECK-LE: bgtlrl- 0 # encoding: [0x21,0x00,0xc1,0x4d]
+ bgtlrl-
+# CHECK-BE: bgtctrl- 2 # encoding: [0x4d,0xc9,0x04,0x21]
+# CHECK-LE: bgtctrl- 2 # encoding: [0x21,0x04,0xc9,0x4d]
+ bgtctrl- 2
+# CHECK-BE: bgtctrl- 0 # encoding: [0x4d,0xc1,0x04,0x21]
+# CHECK-LE: bgtctrl- 0 # encoding: [0x21,0x04,0xc1,0x4d]
+ bgtctrl-
+
+# CHECK-BE: bge 2, target # encoding: [0x40,0x88,A,0bAAAAAA00]
+# CHECK-LE: bge 2, target # encoding: [0bAAAAAA00,A,0x88,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnl 2, target
+# CHECK-BE: bge 0, target # encoding: [0x40,0x80,A,0bAAAAAA00]
+# CHECK-LE: bge 0, target # encoding: [0bAAAAAA00,A,0x80,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnl target
+# CHECK-BE: bgea 2, target # encoding: [0x40,0x88,A,0bAAAAAA10]
+# CHECK-LE: bgea 2, target # encoding: [0bAAAAAA10,A,0x88,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnla 2, target
+# CHECK-BE: bgea 0, target # encoding: [0x40,0x80,A,0bAAAAAA10]
+# CHECK-LE: bgea 0, target # encoding: [0bAAAAAA10,A,0x80,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnla target
+# CHECK-BE: bgelr 2 # encoding: [0x4c,0x88,0x00,0x20]
+# CHECK-LE: bgelr 2 # encoding: [0x20,0x00,0x88,0x4c]
+ bnllr 2
+# CHECK-BE: bgelr 0 # encoding: [0x4c,0x80,0x00,0x20]
+# CHECK-LE: bgelr 0 # encoding: [0x20,0x00,0x80,0x4c]
+ bnllr
+# CHECK-BE: bgectr 2 # encoding: [0x4c,0x88,0x04,0x20]
+# CHECK-LE: bgectr 2 # encoding: [0x20,0x04,0x88,0x4c]
+ bnlctr 2
+# CHECK-BE: bgectr 0 # encoding: [0x4c,0x80,0x04,0x20]
+# CHECK-LE: bgectr 0 # encoding: [0x20,0x04,0x80,0x4c]
+ bnlctr
+# CHECK-BE: bgel 2, target # encoding: [0x40,0x88,A,0bAAAAAA01]
+# CHECK-LE: bgel 2, target # encoding: [0bAAAAAA01,A,0x88,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnll 2, target
+# CHECK-BE: bgel 0, target # encoding: [0x40,0x80,A,0bAAAAAA01]
+# CHECK-LE: bgel 0, target # encoding: [0bAAAAAA01,A,0x80,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnll target
+# CHECK-BE: bgela 2, target # encoding: [0x40,0x88,A,0bAAAAAA11]
+# CHECK-LE: bgela 2, target # encoding: [0bAAAAAA11,A,0x88,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnlla 2, target
+# CHECK-BE: bgela 0, target # encoding: [0x40,0x80,A,0bAAAAAA11]
+# CHECK-LE: bgela 0, target # encoding: [0bAAAAAA11,A,0x80,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnlla target
+# CHECK-BE: bgelrl 2 # encoding: [0x4c,0x88,0x00,0x21]
+# CHECK-LE: bgelrl 2 # encoding: [0x21,0x00,0x88,0x4c]
+ bnllrl 2
+# CHECK-BE: bgelrl 0 # encoding: [0x4c,0x80,0x00,0x21]
+# CHECK-LE: bgelrl 0 # encoding: [0x21,0x00,0x80,0x4c]
+ bnllrl
+# CHECK-BE: bgectrl 2 # encoding: [0x4c,0x88,0x04,0x21]
+# CHECK-LE: bgectrl 2 # encoding: [0x21,0x04,0x88,0x4c]
+ bnlctrl 2
+# CHECK-BE: bgectrl 0 # encoding: [0x4c,0x80,0x04,0x21]
+# CHECK-LE: bgectrl 0 # encoding: [0x21,0x04,0x80,0x4c]
+ bnlctrl
+
+# CHECK-BE: bge+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA00]
+# CHECK-LE: bge+ 2, target # encoding: [0bAAAAAA00,A,0xe8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnl+ 2, target
+# CHECK-BE: bge+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA00]
+# CHECK-LE: bge+ 0, target # encoding: [0bAAAAAA00,A,0xe0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnl+ target
+# CHECK-BE: bgea+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA10]
+# CHECK-LE: bgea+ 2, target # encoding: [0bAAAAAA10,A,0xe8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnla+ 2, target
+# CHECK-BE: bgea+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA10]
+# CHECK-LE: bgea+ 0, target # encoding: [0bAAAAAA10,A,0xe0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnla+ target
+# CHECK-BE: bgelr+ 2 # encoding: [0x4c,0xe8,0x00,0x20]
+# CHECK-LE: bgelr+ 2 # encoding: [0x20,0x00,0xe8,0x4c]
+ bnllr+ 2
+# CHECK-BE: bgelr+ 0 # encoding: [0x4c,0xe0,0x00,0x20]
+# CHECK-LE: bgelr+ 0 # encoding: [0x20,0x00,0xe0,0x4c]
+ bnllr+
+# CHECK-BE: bgectr+ 2 # encoding: [0x4c,0xe8,0x04,0x20]
+# CHECK-LE: bgectr+ 2 # encoding: [0x20,0x04,0xe8,0x4c]
+ bnlctr+ 2
+# CHECK-BE: bgectr+ 0 # encoding: [0x4c,0xe0,0x04,0x20]
+# CHECK-LE: bgectr+ 0 # encoding: [0x20,0x04,0xe0,0x4c]
+ bnlctr+
+# CHECK-BE: bgel+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA01]
+# CHECK-LE: bgel+ 2, target # encoding: [0bAAAAAA01,A,0xe8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnll+ 2, target
+# CHECK-BE: bgel+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA01]
+# CHECK-LE: bgel+ 0, target # encoding: [0bAAAAAA01,A,0xe0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnll+ target
+# CHECK-BE: bgela+ 2, target # encoding: [0x40,0xe8,A,0bAAAAAA11]
+# CHECK-LE: bgela+ 2, target # encoding: [0bAAAAAA11,A,0xe8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnlla+ 2, target
+# CHECK-BE: bgela+ 0, target # encoding: [0x40,0xe0,A,0bAAAAAA11]
+# CHECK-LE: bgela+ 0, target # encoding: [0bAAAAAA11,A,0xe0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnlla+ target
+# CHECK-BE: bgelrl+ 2 # encoding: [0x4c,0xe8,0x00,0x21]
+# CHECK-LE: bgelrl+ 2 # encoding: [0x21,0x00,0xe8,0x4c]
+ bnllrl+ 2
+# CHECK-BE: bgelrl+ 0 # encoding: [0x4c,0xe0,0x00,0x21]
+# CHECK-LE: bgelrl+ 0 # encoding: [0x21,0x00,0xe0,0x4c]
+ bnllrl+
+# CHECK-BE: bgectrl+ 2 # encoding: [0x4c,0xe8,0x04,0x21]
+# CHECK-LE: bgectrl+ 2 # encoding: [0x21,0x04,0xe8,0x4c]
+ bnlctrl+ 2
+# CHECK-BE: bgectrl+ 0 # encoding: [0x4c,0xe0,0x04,0x21]
+# CHECK-LE: bgectrl+ 0 # encoding: [0x21,0x04,0xe0,0x4c]
+ bnlctrl+
+
+# CHECK-BE: bge- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA00]
+# CHECK-LE: bge- 2, target # encoding: [0bAAAAAA00,A,0xc8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnl- 2, target
+# CHECK-BE: bge- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA00]
+# CHECK-LE: bge- 0, target # encoding: [0bAAAAAA00,A,0xc0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnl- target
+# CHECK-BE: bgea- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA10]
+# CHECK-LE: bgea- 2, target # encoding: [0bAAAAAA10,A,0xc8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnla- 2, target
+# CHECK-BE: bgea- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA10]
+# CHECK-LE: bgea- 0, target # encoding: [0bAAAAAA10,A,0xc0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnla- target
+# CHECK-BE: bgelr- 2 # encoding: [0x4c,0xc8,0x00,0x20]
+# CHECK-LE: bgelr- 2 # encoding: [0x20,0x00,0xc8,0x4c]
+ bnllr- 2
+# CHECK-BE: bgelr- 0 # encoding: [0x4c,0xc0,0x00,0x20]
+# CHECK-LE: bgelr- 0 # encoding: [0x20,0x00,0xc0,0x4c]
+ bnllr-
+# CHECK-BE: bgectr- 2 # encoding: [0x4c,0xc8,0x04,0x20]
+# CHECK-LE: bgectr- 2 # encoding: [0x20,0x04,0xc8,0x4c]
+ bnlctr- 2
+# CHECK-BE: bgectr- 0 # encoding: [0x4c,0xc0,0x04,0x20]
+# CHECK-LE: bgectr- 0 # encoding: [0x20,0x04,0xc0,0x4c]
+ bnlctr-
+# CHECK-BE: bgel- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA01]
+# CHECK-LE: bgel- 2, target # encoding: [0bAAAAAA01,A,0xc8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnll- 2, target
+# CHECK-BE: bgel- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA01]
+# CHECK-LE: bgel- 0, target # encoding: [0bAAAAAA01,A,0xc0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnll- target
+# CHECK-BE: bgela- 2, target # encoding: [0x40,0xc8,A,0bAAAAAA11]
+# CHECK-LE: bgela- 2, target # encoding: [0bAAAAAA11,A,0xc8,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnlla- 2, target
+# CHECK-BE: bgela- 0, target # encoding: [0x40,0xc0,A,0bAAAAAA11]
+# CHECK-LE: bgela- 0, target # encoding: [0bAAAAAA11,A,0xc0,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnlla- target
+# CHECK-BE: bgelrl- 2 # encoding: [0x4c,0xc8,0x00,0x21]
+# CHECK-LE: bgelrl- 2 # encoding: [0x21,0x00,0xc8,0x4c]
+ bnllrl- 2
+# CHECK-BE: bgelrl- 0 # encoding: [0x4c,0xc0,0x00,0x21]
+# CHECK-LE: bgelrl- 0 # encoding: [0x21,0x00,0xc0,0x4c]
+ bnllrl-
+# CHECK-BE: bgectrl- 2 # encoding: [0x4c,0xc8,0x04,0x21]
+# CHECK-LE: bgectrl- 2 # encoding: [0x21,0x04,0xc8,0x4c]
+ bnlctrl- 2
+# CHECK-BE: bgectrl- 0 # encoding: [0x4c,0xc0,0x04,0x21]
+# CHECK-LE: bgectrl- 0 # encoding: [0x21,0x04,0xc0,0x4c]
+ bnlctrl-
+
+# CHECK-BE: bne 2, target # encoding: [0x40,0x8a,A,0bAAAAAA00]
+# CHECK-LE: bne 2, target # encoding: [0bAAAAAA00,A,0x8a,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bne 2, target
+# CHECK-BE: bne 0, target # encoding: [0x40,0x82,A,0bAAAAAA00]
+# CHECK-LE: bne 0, target # encoding: [0bAAAAAA00,A,0x82,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bne target
+# CHECK-BE: bnea 2, target # encoding: [0x40,0x8a,A,0bAAAAAA10]
+# CHECK-LE: bnea 2, target # encoding: [0bAAAAAA10,A,0x8a,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnea 2, target
+# CHECK-BE: bnea 0, target # encoding: [0x40,0x82,A,0bAAAAAA10]
+# CHECK-LE: bnea 0, target # encoding: [0bAAAAAA10,A,0x82,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnea target
+# CHECK-BE: bnelr 2 # encoding: [0x4c,0x8a,0x00,0x20]
+# CHECK-LE: bnelr 2 # encoding: [0x20,0x00,0x8a,0x4c]
+ bnelr 2
+# CHECK-BE: bnelr 0 # encoding: [0x4c,0x82,0x00,0x20]
+# CHECK-LE: bnelr 0 # encoding: [0x20,0x00,0x82,0x4c]
+ bnelr
+# CHECK-BE: bnectr 2 # encoding: [0x4c,0x8a,0x04,0x20]
+# CHECK-LE: bnectr 2 # encoding: [0x20,0x04,0x8a,0x4c]
+ bnectr 2
+# CHECK-BE: bnectr 0 # encoding: [0x4c,0x82,0x04,0x20]
+# CHECK-LE: bnectr 0 # encoding: [0x20,0x04,0x82,0x4c]
+ bnectr
+# CHECK-BE: bnel 2, target # encoding: [0x40,0x8a,A,0bAAAAAA01]
+# CHECK-LE: bnel 2, target # encoding: [0bAAAAAA01,A,0x8a,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnel 2, target
+# CHECK-BE: bnel 0, target # encoding: [0x40,0x82,A,0bAAAAAA01]
+# CHECK-LE: bnel 0, target # encoding: [0bAAAAAA01,A,0x82,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnel target
+# CHECK-BE: bnela 2, target # encoding: [0x40,0x8a,A,0bAAAAAA11]
+# CHECK-LE: bnela 2, target # encoding: [0bAAAAAA11,A,0x8a,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnela 2, target
+# CHECK-BE: bnela 0, target # encoding: [0x40,0x82,A,0bAAAAAA11]
+# CHECK-LE: bnela 0, target # encoding: [0bAAAAAA11,A,0x82,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnela target
+# CHECK-BE: bnelrl 2 # encoding: [0x4c,0x8a,0x00,0x21]
+# CHECK-LE: bnelrl 2 # encoding: [0x21,0x00,0x8a,0x4c]
+ bnelrl 2
+# CHECK-BE: bnelrl 0 # encoding: [0x4c,0x82,0x00,0x21]
+# CHECK-LE: bnelrl 0 # encoding: [0x21,0x00,0x82,0x4c]
+ bnelrl
+# CHECK-BE: bnectrl 2 # encoding: [0x4c,0x8a,0x04,0x21]
+# CHECK-LE: bnectrl 2 # encoding: [0x21,0x04,0x8a,0x4c]
+ bnectrl 2
+# CHECK-BE: bnectrl 0 # encoding: [0x4c,0x82,0x04,0x21]
+# CHECK-LE: bnectrl 0 # encoding: [0x21,0x04,0x82,0x4c]
+ bnectrl
+
+# CHECK-BE: bne+ 2, target # encoding: [0x40,0xea,A,0bAAAAAA00]
+# CHECK-LE: bne+ 2, target # encoding: [0bAAAAAA00,A,0xea,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bne+ 2, target
+# CHECK-BE: bne+ 0, target # encoding: [0x40,0xe2,A,0bAAAAAA00]
+# CHECK-LE: bne+ 0, target # encoding: [0bAAAAAA00,A,0xe2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bne+ target
+# CHECK-BE: bnea+ 2, target # encoding: [0x40,0xea,A,0bAAAAAA10]
+# CHECK-LE: bnea+ 2, target # encoding: [0bAAAAAA10,A,0xea,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnea+ 2, target
+# CHECK-BE: bnea+ 0, target # encoding: [0x40,0xe2,A,0bAAAAAA10]
+# CHECK-LE: bnea+ 0, target # encoding: [0bAAAAAA10,A,0xe2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnea+ target
+# CHECK-BE: bnelr+ 2 # encoding: [0x4c,0xea,0x00,0x20]
+# CHECK-LE: bnelr+ 2 # encoding: [0x20,0x00,0xea,0x4c]
+ bnelr+ 2
+# CHECK-BE: bnelr+ 0 # encoding: [0x4c,0xe2,0x00,0x20]
+# CHECK-LE: bnelr+ 0 # encoding: [0x20,0x00,0xe2,0x4c]
+ bnelr+
+# CHECK-BE: bnectr+ 2 # encoding: [0x4c,0xea,0x04,0x20]
+# CHECK-LE: bnectr+ 2 # encoding: [0x20,0x04,0xea,0x4c]
+ bnectr+ 2
+# CHECK-BE: bnectr+ 0 # encoding: [0x4c,0xe2,0x04,0x20]
+# CHECK-LE: bnectr+ 0 # encoding: [0x20,0x04,0xe2,0x4c]
+ bnectr+
+# CHECK-BE: bnel+ 2, target # encoding: [0x40,0xea,A,0bAAAAAA01]
+# CHECK-LE: bnel+ 2, target # encoding: [0bAAAAAA01,A,0xea,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnel+ 2, target
+# CHECK-BE: bnel+ 0, target # encoding: [0x40,0xe2,A,0bAAAAAA01]
+# CHECK-LE: bnel+ 0, target # encoding: [0bAAAAAA01,A,0xe2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnel+ target
+# CHECK-BE: bnela+ 2, target # encoding: [0x40,0xea,A,0bAAAAAA11]
+# CHECK-LE: bnela+ 2, target # encoding: [0bAAAAAA11,A,0xea,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnela+ 2, target
+# CHECK-BE: bnela+ 0, target # encoding: [0x40,0xe2,A,0bAAAAAA11]
+# CHECK-LE: bnela+ 0, target # encoding: [0bAAAAAA11,A,0xe2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnela+ target
+# CHECK-BE: bnelrl+ 2 # encoding: [0x4c,0xea,0x00,0x21]
+# CHECK-LE: bnelrl+ 2 # encoding: [0x21,0x00,0xea,0x4c]
+ bnelrl+ 2
+# CHECK-BE: bnelrl+ 0 # encoding: [0x4c,0xe2,0x00,0x21]
+# CHECK-LE: bnelrl+ 0 # encoding: [0x21,0x00,0xe2,0x4c]
+ bnelrl+
+# CHECK-BE: bnectrl+ 2 # encoding: [0x4c,0xea,0x04,0x21]
+# CHECK-LE: bnectrl+ 2 # encoding: [0x21,0x04,0xea,0x4c]
+ bnectrl+ 2
+# CHECK-BE: bnectrl+ 0 # encoding: [0x4c,0xe2,0x04,0x21]
+# CHECK-LE: bnectrl+ 0 # encoding: [0x21,0x04,0xe2,0x4c]
+ bnectrl+
+
+# CHECK-BE: bne- 2, target # encoding: [0x40,0xca,A,0bAAAAAA00]
+# CHECK-LE: bne- 2, target # encoding: [0bAAAAAA00,A,0xca,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bne- 2, target
+# CHECK-BE: bne- 0, target # encoding: [0x40,0xc2,A,0bAAAAAA00]
+# CHECK-LE: bne- 0, target # encoding: [0bAAAAAA00,A,0xc2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bne- target
+# CHECK-BE: bnea- 2, target # encoding: [0x40,0xca,A,0bAAAAAA10]
+# CHECK-LE: bnea- 2, target # encoding: [0bAAAAAA10,A,0xca,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnea- 2, target
+# CHECK-BE: bnea- 0, target # encoding: [0x40,0xc2,A,0bAAAAAA10]
+# CHECK-LE: bnea- 0, target # encoding: [0bAAAAAA10,A,0xc2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnea- target
+# CHECK-BE: bnelr- 2 # encoding: [0x4c,0xca,0x00,0x20]
+# CHECK-LE: bnelr- 2 # encoding: [0x20,0x00,0xca,0x4c]
+ bnelr- 2
+# CHECK-BE: bnelr- 0 # encoding: [0x4c,0xc2,0x00,0x20]
+# CHECK-LE: bnelr- 0 # encoding: [0x20,0x00,0xc2,0x4c]
+ bnelr-
+# CHECK-BE: bnectr- 2 # encoding: [0x4c,0xca,0x04,0x20]
+# CHECK-LE: bnectr- 2 # encoding: [0x20,0x04,0xca,0x4c]
+ bnectr- 2
+# CHECK-BE: bnectr- 0 # encoding: [0x4c,0xc2,0x04,0x20]
+# CHECK-LE: bnectr- 0 # encoding: [0x20,0x04,0xc2,0x4c]
+ bnectr-
+# CHECK-BE: bnel- 2, target # encoding: [0x40,0xca,A,0bAAAAAA01]
+# CHECK-LE: bnel- 2, target # encoding: [0bAAAAAA01,A,0xca,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnel- 2, target
+# CHECK-BE: bnel- 0, target # encoding: [0x40,0xc2,A,0bAAAAAA01]
+# CHECK-LE: bnel- 0, target # encoding: [0bAAAAAA01,A,0xc2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnel- target
+# CHECK-BE: bnela- 2, target # encoding: [0x40,0xca,A,0bAAAAAA11]
+# CHECK-LE: bnela- 2, target # encoding: [0bAAAAAA11,A,0xca,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnela- 2, target
+# CHECK-BE: bnela- 0, target # encoding: [0x40,0xc2,A,0bAAAAAA11]
+# CHECK-LE: bnela- 0, target # encoding: [0bAAAAAA11,A,0xc2,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnela- target
+# CHECK-BE: bnelrl- 2 # encoding: [0x4c,0xca,0x00,0x21]
+# CHECK-LE: bnelrl- 2 # encoding: [0x21,0x00,0xca,0x4c]
+ bnelrl- 2
+# CHECK-BE: bnelrl- 0 # encoding: [0x4c,0xc2,0x00,0x21]
+# CHECK-LE: bnelrl- 0 # encoding: [0x21,0x00,0xc2,0x4c]
+ bnelrl-
+# CHECK-BE: bnectrl- 2 # encoding: [0x4c,0xca,0x04,0x21]
+# CHECK-LE: bnectrl- 2 # encoding: [0x21,0x04,0xca,0x4c]
+ bnectrl- 2
+# CHECK-BE: bnectrl- 0 # encoding: [0x4c,0xc2,0x04,0x21]
+# CHECK-LE: bnectrl- 0 # encoding: [0x21,0x04,0xc2,0x4c]
+ bnectrl-
+
+# CHECK-BE: ble 2, target # encoding: [0x40,0x89,A,0bAAAAAA00]
+# CHECK-LE: ble 2, target # encoding: [0bAAAAAA00,A,0x89,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bng 2, target
+# CHECK-BE: ble 0, target # encoding: [0x40,0x81,A,0bAAAAAA00]
+# CHECK-LE: ble 0, target # encoding: [0bAAAAAA00,A,0x81,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bng target
+# CHECK-BE: blea 2, target # encoding: [0x40,0x89,A,0bAAAAAA10]
+# CHECK-LE: blea 2, target # encoding: [0bAAAAAA10,A,0x89,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnga 2, target
+# CHECK-BE: blea 0, target # encoding: [0x40,0x81,A,0bAAAAAA10]
+# CHECK-LE: blea 0, target # encoding: [0bAAAAAA10,A,0x81,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnga target
+# CHECK-BE: blelr 2 # encoding: [0x4c,0x89,0x00,0x20]
+# CHECK-LE: blelr 2 # encoding: [0x20,0x00,0x89,0x4c]
+ bnglr 2
+# CHECK-BE: blelr 0 # encoding: [0x4c,0x81,0x00,0x20]
+# CHECK-LE: blelr 0 # encoding: [0x20,0x00,0x81,0x4c]
+ bnglr
+# CHECK-BE: blectr 2 # encoding: [0x4c,0x89,0x04,0x20]
+# CHECK-LE: blectr 2 # encoding: [0x20,0x04,0x89,0x4c]
+ bngctr 2
+# CHECK-BE: blectr 0 # encoding: [0x4c,0x81,0x04,0x20]
+# CHECK-LE: blectr 0 # encoding: [0x20,0x04,0x81,0x4c]
+ bngctr
+# CHECK-BE: blel 2, target # encoding: [0x40,0x89,A,0bAAAAAA01]
+# CHECK-LE: blel 2, target # encoding: [0bAAAAAA01,A,0x89,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bngl 2, target
+# CHECK-BE: blel 0, target # encoding: [0x40,0x81,A,0bAAAAAA01]
+# CHECK-LE: blel 0, target # encoding: [0bAAAAAA01,A,0x81,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bngl target
+# CHECK-BE: blela 2, target # encoding: [0x40,0x89,A,0bAAAAAA11]
+# CHECK-LE: blela 2, target # encoding: [0bAAAAAA11,A,0x89,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bngla 2, target
+# CHECK-BE: blela 0, target # encoding: [0x40,0x81,A,0bAAAAAA11]
+# CHECK-LE: blela 0, target # encoding: [0bAAAAAA11,A,0x81,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bngla target
+# CHECK-BE: blelrl 2 # encoding: [0x4c,0x89,0x00,0x21]
+# CHECK-LE: blelrl 2 # encoding: [0x21,0x00,0x89,0x4c]
+ bnglrl 2
+# CHECK-BE: blelrl 0 # encoding: [0x4c,0x81,0x00,0x21]
+# CHECK-LE: blelrl 0 # encoding: [0x21,0x00,0x81,0x4c]
+ bnglrl
+# CHECK-BE: blectrl 2 # encoding: [0x4c,0x89,0x04,0x21]
+# CHECK-LE: blectrl 2 # encoding: [0x21,0x04,0x89,0x4c]
+ bngctrl 2
+# CHECK-BE: blectrl 0 # encoding: [0x4c,0x81,0x04,0x21]
+# CHECK-LE: blectrl 0 # encoding: [0x21,0x04,0x81,0x4c]
+ bngctrl
+
+# CHECK-BE: ble+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA00]
+# CHECK-LE: ble+ 2, target # encoding: [0bAAAAAA00,A,0xe9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bng+ 2, target
+# CHECK-BE: ble+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA00]
+# CHECK-LE: ble+ 0, target # encoding: [0bAAAAAA00,A,0xe1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bng+ target
+# CHECK-BE: blea+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA10]
+# CHECK-LE: blea+ 2, target # encoding: [0bAAAAAA10,A,0xe9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnga+ 2, target
+# CHECK-BE: blea+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA10]
+# CHECK-LE: blea+ 0, target # encoding: [0bAAAAAA10,A,0xe1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnga+ target
+# CHECK-BE: blelr+ 2 # encoding: [0x4c,0xe9,0x00,0x20]
+# CHECK-LE: blelr+ 2 # encoding: [0x20,0x00,0xe9,0x4c]
+ bnglr+ 2
+# CHECK-BE: blelr+ 0 # encoding: [0x4c,0xe1,0x00,0x20]
+# CHECK-LE: blelr+ 0 # encoding: [0x20,0x00,0xe1,0x4c]
+ bnglr+
+# CHECK-BE: blectr+ 2 # encoding: [0x4c,0xe9,0x04,0x20]
+# CHECK-LE: blectr+ 2 # encoding: [0x20,0x04,0xe9,0x4c]
+ bngctr+ 2
+# CHECK-BE: blectr+ 0 # encoding: [0x4c,0xe1,0x04,0x20]
+# CHECK-LE: blectr+ 0 # encoding: [0x20,0x04,0xe1,0x4c]
+ bngctr+
+# CHECK-BE: blel+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA01]
+# CHECK-LE: blel+ 2, target # encoding: [0bAAAAAA01,A,0xe9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bngl+ 2, target
+# CHECK-BE: blel+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA01]
+# CHECK-LE: blel+ 0, target # encoding: [0bAAAAAA01,A,0xe1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bngl+ target
+# CHECK-BE: blela+ 2, target # encoding: [0x40,0xe9,A,0bAAAAAA11]
+# CHECK-LE: blela+ 2, target # encoding: [0bAAAAAA11,A,0xe9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bngla+ 2, target
+# CHECK-BE: blela+ 0, target # encoding: [0x40,0xe1,A,0bAAAAAA11]
+# CHECK-LE: blela+ 0, target # encoding: [0bAAAAAA11,A,0xe1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bngla+ target
+# CHECK-BE: blelrl+ 2 # encoding: [0x4c,0xe9,0x00,0x21]
+# CHECK-LE: blelrl+ 2 # encoding: [0x21,0x00,0xe9,0x4c]
+ bnglrl+ 2
+# CHECK-BE: blelrl+ 0 # encoding: [0x4c,0xe1,0x00,0x21]
+# CHECK-LE: blelrl+ 0 # encoding: [0x21,0x00,0xe1,0x4c]
+ bnglrl+
+# CHECK-BE: blectrl+ 2 # encoding: [0x4c,0xe9,0x04,0x21]
+# CHECK-LE: blectrl+ 2 # encoding: [0x21,0x04,0xe9,0x4c]
+ bngctrl+ 2
+# CHECK-BE: blectrl+ 0 # encoding: [0x4c,0xe1,0x04,0x21]
+# CHECK-LE: blectrl+ 0 # encoding: [0x21,0x04,0xe1,0x4c]
+ bngctrl+
+
+# CHECK-BE: ble- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA00]
+# CHECK-LE: ble- 2, target # encoding: [0bAAAAAA00,A,0xc9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bng- 2, target
+# CHECK-BE: ble- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA00]
+# CHECK-LE: ble- 0, target # encoding: [0bAAAAAA00,A,0xc1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bng- target
+# CHECK-BE: blea- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA10]
+# CHECK-LE: blea- 2, target # encoding: [0bAAAAAA10,A,0xc9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnga- 2, target
+# CHECK-BE: blea- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA10]
+# CHECK-LE: blea- 0, target # encoding: [0bAAAAAA10,A,0xc1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnga- target
+# CHECK-BE: blelr- 2 # encoding: [0x4c,0xc9,0x00,0x20]
+# CHECK-LE: blelr- 2 # encoding: [0x20,0x00,0xc9,0x4c]
+ bnglr- 2
+# CHECK-BE: blelr- 0 # encoding: [0x4c,0xc1,0x00,0x20]
+# CHECK-LE: blelr- 0 # encoding: [0x20,0x00,0xc1,0x4c]
+ bnglr-
+# CHECK-BE: blectr- 2 # encoding: [0x4c,0xc9,0x04,0x20]
+# CHECK-LE: blectr- 2 # encoding: [0x20,0x04,0xc9,0x4c]
+ bngctr- 2
+# CHECK-BE: blectr- 0 # encoding: [0x4c,0xc1,0x04,0x20]
+# CHECK-LE: blectr- 0 # encoding: [0x20,0x04,0xc1,0x4c]
+ bngctr-
+# CHECK-BE: blel- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA01]
+# CHECK-LE: blel- 2, target # encoding: [0bAAAAAA01,A,0xc9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bngl- 2, target
+# CHECK-BE: blel- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA01]
+# CHECK-LE: blel- 0, target # encoding: [0bAAAAAA01,A,0xc1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bngl- target
+# CHECK-BE: blela- 2, target # encoding: [0x40,0xc9,A,0bAAAAAA11]
+# CHECK-LE: blela- 2, target # encoding: [0bAAAAAA11,A,0xc9,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bngla- 2, target
+# CHECK-BE: blela- 0, target # encoding: [0x40,0xc1,A,0bAAAAAA11]
+# CHECK-LE: blela- 0, target # encoding: [0bAAAAAA11,A,0xc1,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bngla- target
+# CHECK-BE: blelrl- 2 # encoding: [0x4c,0xc9,0x00,0x21]
+# CHECK-LE: blelrl- 2 # encoding: [0x21,0x00,0xc9,0x4c]
+ bnglrl- 2
+# CHECK-BE: blelrl- 0 # encoding: [0x4c,0xc1,0x00,0x21]
+# CHECK-LE: blelrl- 0 # encoding: [0x21,0x00,0xc1,0x4c]
+ bnglrl-
+# CHECK-BE: blectrl- 2 # encoding: [0x4c,0xc9,0x04,0x21]
+# CHECK-LE: blectrl- 2 # encoding: [0x21,0x04,0xc9,0x4c]
+ bngctrl- 2
+# CHECK-BE: blectrl- 0 # encoding: [0x4c,0xc1,0x04,0x21]
+# CHECK-LE: blectrl- 0 # encoding: [0x21,0x04,0xc1,0x4c]
+ bngctrl-
+
+# CHECK-BE: bun 2, target # encoding: [0x41,0x8b,A,0bAAAAAA00]
+# CHECK-LE: bun 2, target # encoding: [0bAAAAAA00,A,0x8b,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bso 2, target
+# CHECK-BE: bun 0, target # encoding: [0x41,0x83,A,0bAAAAAA00]
+# CHECK-LE: bun 0, target # encoding: [0bAAAAAA00,A,0x83,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bso target
+# CHECK-BE: buna 2, target # encoding: [0x41,0x8b,A,0bAAAAAA10]
+# CHECK-LE: buna 2, target # encoding: [0bAAAAAA10,A,0x8b,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsoa 2, target
+# CHECK-BE: buna 0, target # encoding: [0x41,0x83,A,0bAAAAAA10]
+# CHECK-LE: buna 0, target # encoding: [0bAAAAAA10,A,0x83,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsoa target
+# CHECK-BE: bunlr 2 # encoding: [0x4d,0x8b,0x00,0x20]
+# CHECK-LE: bunlr 2 # encoding: [0x20,0x00,0x8b,0x4d]
+ bsolr 2
+# CHECK-BE: bunlr 0 # encoding: [0x4d,0x83,0x00,0x20]
+# CHECK-LE: bunlr 0 # encoding: [0x20,0x00,0x83,0x4d]
+ bsolr
+# CHECK-BE: bunctr 2 # encoding: [0x4d,0x8b,0x04,0x20]
+# CHECK-LE: bunctr 2 # encoding: [0x20,0x04,0x8b,0x4d]
+ bsoctr 2
+# CHECK-BE: bunctr 0 # encoding: [0x4d,0x83,0x04,0x20]
+# CHECK-LE: bunctr 0 # encoding: [0x20,0x04,0x83,0x4d]
+ bsoctr
+# CHECK-BE: bunl 2, target # encoding: [0x41,0x8b,A,0bAAAAAA01]
+# CHECK-LE: bunl 2, target # encoding: [0bAAAAAA01,A,0x8b,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bsol 2, target
+# CHECK-BE: bunl 0, target # encoding: [0x41,0x83,A,0bAAAAAA01]
+# CHECK-LE: bunl 0, target # encoding: [0bAAAAAA01,A,0x83,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bsol target
+# CHECK-BE: bunla 2, target # encoding: [0x41,0x8b,A,0bAAAAAA11]
+# CHECK-LE: bunla 2, target # encoding: [0bAAAAAA11,A,0x8b,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsola 2, target
+# CHECK-BE: bunla 0, target # encoding: [0x41,0x83,A,0bAAAAAA11]
+# CHECK-LE: bunla 0, target # encoding: [0bAAAAAA11,A,0x83,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsola target
+# CHECK-BE: bunlrl 2 # encoding: [0x4d,0x8b,0x00,0x21]
+# CHECK-LE: bunlrl 2 # encoding: [0x21,0x00,0x8b,0x4d]
+ bsolrl 2
+# CHECK-BE: bunlrl 0 # encoding: [0x4d,0x83,0x00,0x21]
+# CHECK-LE: bunlrl 0 # encoding: [0x21,0x00,0x83,0x4d]
+ bsolrl
+# CHECK-BE: bunctrl 2 # encoding: [0x4d,0x8b,0x04,0x21]
+# CHECK-LE: bunctrl 2 # encoding: [0x21,0x04,0x8b,0x4d]
+ bsoctrl 2
+# CHECK-BE: bunctrl 0 # encoding: [0x4d,0x83,0x04,0x21]
+# CHECK-LE: bunctrl 0 # encoding: [0x21,0x04,0x83,0x4d]
+ bsoctrl
+
+# CHECK-BE: bun+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA00]
+# CHECK-LE: bun+ 2, target # encoding: [0bAAAAAA00,A,0xeb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bso+ 2, target
+# CHECK-BE: bun+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA00]
+# CHECK-LE: bun+ 0, target # encoding: [0bAAAAAA00,A,0xe3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bso+ target
+# CHECK-BE: buna+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA10]
+# CHECK-LE: buna+ 2, target # encoding: [0bAAAAAA10,A,0xeb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsoa+ 2, target
+# CHECK-BE: buna+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA10]
+# CHECK-LE: buna+ 0, target # encoding: [0bAAAAAA10,A,0xe3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsoa+ target
+# CHECK-BE: bunlr+ 2 # encoding: [0x4d,0xeb,0x00,0x20]
+# CHECK-LE: bunlr+ 2 # encoding: [0x20,0x00,0xeb,0x4d]
+ bsolr+ 2
+# CHECK-BE: bunlr+ 0 # encoding: [0x4d,0xe3,0x00,0x20]
+# CHECK-LE: bunlr+ 0 # encoding: [0x20,0x00,0xe3,0x4d]
+ bsolr+
+# CHECK-BE: bunctr+ 2 # encoding: [0x4d,0xeb,0x04,0x20]
+# CHECK-LE: bunctr+ 2 # encoding: [0x20,0x04,0xeb,0x4d]
+ bsoctr+ 2
+# CHECK-BE: bunctr+ 0 # encoding: [0x4d,0xe3,0x04,0x20]
+# CHECK-LE: bunctr+ 0 # encoding: [0x20,0x04,0xe3,0x4d]
+ bsoctr+
+# CHECK-BE: bunl+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA01]
+# CHECK-LE: bunl+ 2, target # encoding: [0bAAAAAA01,A,0xeb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bsol+ 2, target
+# CHECK-BE: bunl+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA01]
+# CHECK-LE: bunl+ 0, target # encoding: [0bAAAAAA01,A,0xe3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bsol+ target
+# CHECK-BE: bunla+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA11]
+# CHECK-LE: bunla+ 2, target # encoding: [0bAAAAAA11,A,0xeb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsola+ 2, target
+# CHECK-BE: bunla+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA11]
+# CHECK-LE: bunla+ 0, target # encoding: [0bAAAAAA11,A,0xe3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsola+ target
+# CHECK-BE: bunlrl+ 2 # encoding: [0x4d,0xeb,0x00,0x21]
+# CHECK-LE: bunlrl+ 2 # encoding: [0x21,0x00,0xeb,0x4d]
+ bsolrl+ 2
+# CHECK-BE: bunlrl+ 0 # encoding: [0x4d,0xe3,0x00,0x21]
+# CHECK-LE: bunlrl+ 0 # encoding: [0x21,0x00,0xe3,0x4d]
+ bsolrl+
+# CHECK-BE: bunctrl+ 2 # encoding: [0x4d,0xeb,0x04,0x21]
+# CHECK-LE: bunctrl+ 2 # encoding: [0x21,0x04,0xeb,0x4d]
+ bsoctrl+ 2
+# CHECK-BE: bunctrl+ 0 # encoding: [0x4d,0xe3,0x04,0x21]
+# CHECK-LE: bunctrl+ 0 # encoding: [0x21,0x04,0xe3,0x4d]
+ bsoctrl+
+
+# CHECK-BE: bun- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA00]
+# CHECK-LE: bun- 2, target # encoding: [0bAAAAAA00,A,0xcb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bso- 2, target
+# CHECK-BE: bun- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA00]
+# CHECK-LE: bun- 0, target # encoding: [0bAAAAAA00,A,0xc3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bso- target
+# CHECK-BE: buna- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA10]
+# CHECK-LE: buna- 2, target # encoding: [0bAAAAAA10,A,0xcb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsoa- 2, target
+# CHECK-BE: buna- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA10]
+# CHECK-LE: buna- 0, target # encoding: [0bAAAAAA10,A,0xc3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsoa- target
+# CHECK-BE: bunlr- 2 # encoding: [0x4d,0xcb,0x00,0x20]
+# CHECK-LE: bunlr- 2 # encoding: [0x20,0x00,0xcb,0x4d]
+ bsolr- 2
+# CHECK-BE: bunlr- 0 # encoding: [0x4d,0xc3,0x00,0x20]
+# CHECK-LE: bunlr- 0 # encoding: [0x20,0x00,0xc3,0x4d]
+ bsolr-
+# CHECK-BE: bunctr- 2 # encoding: [0x4d,0xcb,0x04,0x20]
+# CHECK-LE: bunctr- 2 # encoding: [0x20,0x04,0xcb,0x4d]
+ bsoctr- 2
+# CHECK-BE: bunctr- 0 # encoding: [0x4d,0xc3,0x04,0x20]
+# CHECK-LE: bunctr- 0 # encoding: [0x20,0x04,0xc3,0x4d]
+ bsoctr-
+# CHECK-BE: bunl- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA01]
+# CHECK-LE: bunl- 2, target # encoding: [0bAAAAAA01,A,0xcb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bsol- 2, target
+# CHECK-BE: bunl- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA01]
+# CHECK-LE: bunl- 0, target # encoding: [0bAAAAAA01,A,0xc3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bsol- target
+# CHECK-BE: bunla- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA11]
+# CHECK-LE: bunla- 2, target # encoding: [0bAAAAAA11,A,0xcb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsola- 2, target
+# CHECK-BE: bunla- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA11]
+# CHECK-LE: bunla- 0, target # encoding: [0bAAAAAA11,A,0xc3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bsola- target
+# CHECK-BE: bunlrl- 2 # encoding: [0x4d,0xcb,0x00,0x21]
+# CHECK-LE: bunlrl- 2 # encoding: [0x21,0x00,0xcb,0x4d]
+ bsolrl- 2
+# CHECK-BE: bunlrl- 0 # encoding: [0x4d,0xc3,0x00,0x21]
+# CHECK-LE: bunlrl- 0 # encoding: [0x21,0x00,0xc3,0x4d]
+ bsolrl-
+# CHECK-BE: bunctrl- 2 # encoding: [0x4d,0xcb,0x04,0x21]
+# CHECK-LE: bunctrl- 2 # encoding: [0x21,0x04,0xcb,0x4d]
+ bsoctrl- 2
+# CHECK-BE: bunctrl- 0 # encoding: [0x4d,0xc3,0x04,0x21]
+# CHECK-LE: bunctrl- 0 # encoding: [0x21,0x04,0xc3,0x4d]
+ bsoctrl-
+
+# CHECK-BE: bnu 2, target # encoding: [0x40,0x8b,A,0bAAAAAA00]
+# CHECK-LE: bnu 2, target # encoding: [0bAAAAAA00,A,0x8b,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bns 2, target
+# CHECK-BE: bnu 0, target # encoding: [0x40,0x83,A,0bAAAAAA00]
+# CHECK-LE: bnu 0, target # encoding: [0bAAAAAA00,A,0x83,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bns target
+# CHECK-BE: bnua 2, target # encoding: [0x40,0x8b,A,0bAAAAAA10]
+# CHECK-LE: bnua 2, target # encoding: [0bAAAAAA10,A,0x8b,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsa 2, target
+# CHECK-BE: bnua 0, target # encoding: [0x40,0x83,A,0bAAAAAA10]
+# CHECK-LE: bnua 0, target # encoding: [0bAAAAAA10,A,0x83,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsa target
+# CHECK-BE: bnulr 2 # encoding: [0x4c,0x8b,0x00,0x20]
+# CHECK-LE: bnulr 2 # encoding: [0x20,0x00,0x8b,0x4c]
+ bnslr 2
+# CHECK-BE: bnulr 0 # encoding: [0x4c,0x83,0x00,0x20]
+# CHECK-LE: bnulr 0 # encoding: [0x20,0x00,0x83,0x4c]
+ bnslr
+# CHECK-BE: bnuctr 2 # encoding: [0x4c,0x8b,0x04,0x20]
+# CHECK-LE: bnuctr 2 # encoding: [0x20,0x04,0x8b,0x4c]
+ bnsctr 2
+# CHECK-BE: bnuctr 0 # encoding: [0x4c,0x83,0x04,0x20]
+# CHECK-LE: bnuctr 0 # encoding: [0x20,0x04,0x83,0x4c]
+ bnsctr
+# CHECK-BE: bnul 2, target # encoding: [0x40,0x8b,A,0bAAAAAA01]
+# CHECK-LE: bnul 2, target # encoding: [0bAAAAAA01,A,0x8b,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnsl 2, target
+# CHECK-BE: bnul 0, target # encoding: [0x40,0x83,A,0bAAAAAA01]
+# CHECK-LE: bnul 0, target # encoding: [0bAAAAAA01,A,0x83,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnsl target
+# CHECK-BE: bnula 2, target # encoding: [0x40,0x8b,A,0bAAAAAA11]
+# CHECK-LE: bnula 2, target # encoding: [0bAAAAAA11,A,0x8b,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsla 2, target
+# CHECK-BE: bnula 0, target # encoding: [0x40,0x83,A,0bAAAAAA11]
+# CHECK-LE: bnula 0, target # encoding: [0bAAAAAA11,A,0x83,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsla target
+# CHECK-BE: bnulrl 2 # encoding: [0x4c,0x8b,0x00,0x21]
+# CHECK-LE: bnulrl 2 # encoding: [0x21,0x00,0x8b,0x4c]
+ bnslrl 2
+# CHECK-BE: bnulrl 0 # encoding: [0x4c,0x83,0x00,0x21]
+# CHECK-LE: bnulrl 0 # encoding: [0x21,0x00,0x83,0x4c]
+ bnslrl
+# CHECK-BE: bnuctrl 2 # encoding: [0x4c,0x8b,0x04,0x21]
+# CHECK-LE: bnuctrl 2 # encoding: [0x21,0x04,0x8b,0x4c]
+ bnsctrl 2
+# CHECK-BE: bnuctrl 0 # encoding: [0x4c,0x83,0x04,0x21]
+# CHECK-LE: bnuctrl 0 # encoding: [0x21,0x04,0x83,0x4c]
+ bnsctrl
+
+# CHECK-BE: bnu+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA00]
+# CHECK-LE: bnu+ 2, target # encoding: [0bAAAAAA00,A,0xeb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bns+ 2, target
+# CHECK-BE: bnu+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA00]
+# CHECK-LE: bnu+ 0, target # encoding: [0bAAAAAA00,A,0xe3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bns+ target
+# CHECK-BE: bnua+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA10]
+# CHECK-LE: bnua+ 2, target # encoding: [0bAAAAAA10,A,0xeb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsa+ 2, target
+# CHECK-BE: bnua+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA10]
+# CHECK-LE: bnua+ 0, target # encoding: [0bAAAAAA10,A,0xe3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsa+ target
+# CHECK-BE: bnulr+ 2 # encoding: [0x4c,0xeb,0x00,0x20]
+# CHECK-LE: bnulr+ 2 # encoding: [0x20,0x00,0xeb,0x4c]
+ bnslr+ 2
+# CHECK-BE: bnulr+ 0 # encoding: [0x4c,0xe3,0x00,0x20]
+# CHECK-LE: bnulr+ 0 # encoding: [0x20,0x00,0xe3,0x4c]
+ bnslr+
+# CHECK-BE: bnuctr+ 2 # encoding: [0x4c,0xeb,0x04,0x20]
+# CHECK-LE: bnuctr+ 2 # encoding: [0x20,0x04,0xeb,0x4c]
+ bnsctr+ 2
+# CHECK-BE: bnuctr+ 0 # encoding: [0x4c,0xe3,0x04,0x20]
+# CHECK-LE: bnuctr+ 0 # encoding: [0x20,0x04,0xe3,0x4c]
+ bnsctr+
+# CHECK-BE: bnul+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA01]
+# CHECK-LE: bnul+ 2, target # encoding: [0bAAAAAA01,A,0xeb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnsl+ 2, target
+# CHECK-BE: bnul+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA01]
+# CHECK-LE: bnul+ 0, target # encoding: [0bAAAAAA01,A,0xe3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnsl+ target
+# CHECK-BE: bnula+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA11]
+# CHECK-LE: bnula+ 2, target # encoding: [0bAAAAAA11,A,0xeb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsla+ 2, target
+# CHECK-BE: bnula+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA11]
+# CHECK-LE: bnula+ 0, target # encoding: [0bAAAAAA11,A,0xe3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsla+ target
+# CHECK-BE: bnulrl+ 2 # encoding: [0x4c,0xeb,0x00,0x21]
+# CHECK-LE: bnulrl+ 2 # encoding: [0x21,0x00,0xeb,0x4c]
+ bnslrl+ 2
+# CHECK-BE: bnulrl+ 0 # encoding: [0x4c,0xe3,0x00,0x21]
+# CHECK-LE: bnulrl+ 0 # encoding: [0x21,0x00,0xe3,0x4c]
+ bnslrl+
+# CHECK-BE: bnuctrl+ 2 # encoding: [0x4c,0xeb,0x04,0x21]
+# CHECK-LE: bnuctrl+ 2 # encoding: [0x21,0x04,0xeb,0x4c]
+ bnsctrl+ 2
+# CHECK-BE: bnuctrl+ 0 # encoding: [0x4c,0xe3,0x04,0x21]
+# CHECK-LE: bnuctrl+ 0 # encoding: [0x21,0x04,0xe3,0x4c]
+ bnsctrl+
+
+# CHECK-BE: bnu- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA00]
+# CHECK-LE: bnu- 2, target # encoding: [0bAAAAAA00,A,0xcb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bns- 2, target
+# CHECK-BE: bnu- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA00]
+# CHECK-LE: bnu- 0, target # encoding: [0bAAAAAA00,A,0xc3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bns- target
+# CHECK-BE: bnua- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA10]
+# CHECK-LE: bnua- 2, target # encoding: [0bAAAAAA10,A,0xcb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsa- 2, target
+# CHECK-BE: bnua- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA10]
+# CHECK-LE: bnua- 0, target # encoding: [0bAAAAAA10,A,0xc3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsa- target
+# CHECK-BE: bnulr- 2 # encoding: [0x4c,0xcb,0x00,0x20]
+# CHECK-LE: bnulr- 2 # encoding: [0x20,0x00,0xcb,0x4c]
+ bnslr- 2
+# CHECK-BE: bnulr- 0 # encoding: [0x4c,0xc3,0x00,0x20]
+# CHECK-LE: bnulr- 0 # encoding: [0x20,0x00,0xc3,0x4c]
+ bnslr-
+# CHECK-BE: bnuctr- 2 # encoding: [0x4c,0xcb,0x04,0x20]
+# CHECK-LE: bnuctr- 2 # encoding: [0x20,0x04,0xcb,0x4c]
+ bnsctr- 2
+# CHECK-BE: bnuctr- 0 # encoding: [0x4c,0xc3,0x04,0x20]
+# CHECK-LE: bnuctr- 0 # encoding: [0x20,0x04,0xc3,0x4c]
+ bnsctr-
+# CHECK-BE: bnul- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA01]
+# CHECK-LE: bnul- 2, target # encoding: [0bAAAAAA01,A,0xcb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnsl- 2, target
+# CHECK-BE: bnul- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA01]
+# CHECK-LE: bnul- 0, target # encoding: [0bAAAAAA01,A,0xc3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnsl- target
+# CHECK-BE: bnula- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA11]
+# CHECK-LE: bnula- 2, target # encoding: [0bAAAAAA11,A,0xcb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsla- 2, target
+# CHECK-BE: bnula- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA11]
+# CHECK-LE: bnula- 0, target # encoding: [0bAAAAAA11,A,0xc3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnsla- target
+# CHECK-BE: bnulrl- 2 # encoding: [0x4c,0xcb,0x00,0x21]
+# CHECK-LE: bnulrl- 2 # encoding: [0x21,0x00,0xcb,0x4c]
+ bnslrl- 2
+# CHECK-BE: bnulrl- 0 # encoding: [0x4c,0xc3,0x00,0x21]
+# CHECK-LE: bnulrl- 0 # encoding: [0x21,0x00,0xc3,0x4c]
+ bnslrl-
+# CHECK-BE: bnuctrl- 2 # encoding: [0x4c,0xcb,0x04,0x21]
+# CHECK-LE: bnuctrl- 2 # encoding: [0x21,0x04,0xcb,0x4c]
+ bnsctrl- 2
+# CHECK-BE: bnuctrl- 0 # encoding: [0x4c,0xc3,0x04,0x21]
+# CHECK-LE: bnuctrl- 0 # encoding: [0x21,0x04,0xc3,0x4c]
+ bnsctrl-
+
+# CHECK-BE: bun 2, target # encoding: [0x41,0x8b,A,0bAAAAAA00]
+# CHECK-LE: bun 2, target # encoding: [0bAAAAAA00,A,0x8b,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bun 2, target
+# CHECK-BE: bun 0, target # encoding: [0x41,0x83,A,0bAAAAAA00]
+# CHECK-LE: bun 0, target # encoding: [0bAAAAAA00,A,0x83,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bun target
+# CHECK-BE: buna 2, target # encoding: [0x41,0x8b,A,0bAAAAAA10]
+# CHECK-LE: buna 2, target # encoding: [0bAAAAAA10,A,0x8b,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ buna 2, target
+# CHECK-BE: buna 0, target # encoding: [0x41,0x83,A,0bAAAAAA10]
+# CHECK-LE: buna 0, target # encoding: [0bAAAAAA10,A,0x83,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ buna target
+# CHECK-BE: bunlr 2 # encoding: [0x4d,0x8b,0x00,0x20]
+# CHECK-LE: bunlr 2 # encoding: [0x20,0x00,0x8b,0x4d]
+ bunlr 2
+# CHECK-BE: bunlr 0 # encoding: [0x4d,0x83,0x00,0x20]
+# CHECK-LE: bunlr 0 # encoding: [0x20,0x00,0x83,0x4d]
+ bunlr
+# CHECK-BE: bunctr 2 # encoding: [0x4d,0x8b,0x04,0x20]
+# CHECK-LE: bunctr 2 # encoding: [0x20,0x04,0x8b,0x4d]
+ bunctr 2
+# CHECK-BE: bunctr 0 # encoding: [0x4d,0x83,0x04,0x20]
+# CHECK-LE: bunctr 0 # encoding: [0x20,0x04,0x83,0x4d]
+ bunctr
+# CHECK-BE: bunl 2, target # encoding: [0x41,0x8b,A,0bAAAAAA01]
+# CHECK-LE: bunl 2, target # encoding: [0bAAAAAA01,A,0x8b,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bunl 2, target
+# CHECK-BE: bunl 0, target # encoding: [0x41,0x83,A,0bAAAAAA01]
+# CHECK-LE: bunl 0, target # encoding: [0bAAAAAA01,A,0x83,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bunl target
+# CHECK-BE: bunla 2, target # encoding: [0x41,0x8b,A,0bAAAAAA11]
+# CHECK-LE: bunla 2, target # encoding: [0bAAAAAA11,A,0x8b,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bunla 2, target
+# CHECK-BE: bunla 0, target # encoding: [0x41,0x83,A,0bAAAAAA11]
+# CHECK-LE: bunla 0, target # encoding: [0bAAAAAA11,A,0x83,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bunla target
+# CHECK-BE: bunlrl 2 # encoding: [0x4d,0x8b,0x00,0x21]
+# CHECK-LE: bunlrl 2 # encoding: [0x21,0x00,0x8b,0x4d]
+ bunlrl 2
+# CHECK-BE: bunlrl 0 # encoding: [0x4d,0x83,0x00,0x21]
+# CHECK-LE: bunlrl 0 # encoding: [0x21,0x00,0x83,0x4d]
+ bunlrl
+# CHECK-BE: bunctrl 2 # encoding: [0x4d,0x8b,0x04,0x21]
+# CHECK-LE: bunctrl 2 # encoding: [0x21,0x04,0x8b,0x4d]
+ bunctrl 2
+# CHECK-BE: bunctrl 0 # encoding: [0x4d,0x83,0x04,0x21]
+# CHECK-LE: bunctrl 0 # encoding: [0x21,0x04,0x83,0x4d]
+ bunctrl
+
+# CHECK-BE: bun+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA00]
+# CHECK-LE: bun+ 2, target # encoding: [0bAAAAAA00,A,0xeb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bun+ 2, target
+# CHECK-BE: bun+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA00]
+# CHECK-LE: bun+ 0, target # encoding: [0bAAAAAA00,A,0xe3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bun+ target
+# CHECK-BE: buna+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA10]
+# CHECK-LE: buna+ 2, target # encoding: [0bAAAAAA10,A,0xeb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ buna+ 2, target
+# CHECK-BE: buna+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA10]
+# CHECK-LE: buna+ 0, target # encoding: [0bAAAAAA10,A,0xe3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ buna+ target
+# CHECK-BE: bunlr+ 2 # encoding: [0x4d,0xeb,0x00,0x20]
+# CHECK-LE: bunlr+ 2 # encoding: [0x20,0x00,0xeb,0x4d]
+ bunlr+ 2
+# CHECK-BE: bunlr+ 0 # encoding: [0x4d,0xe3,0x00,0x20]
+# CHECK-LE: bunlr+ 0 # encoding: [0x20,0x00,0xe3,0x4d]
+ bunlr+
+# CHECK-BE: bunctr+ 2 # encoding: [0x4d,0xeb,0x04,0x20]
+# CHECK-LE: bunctr+ 2 # encoding: [0x20,0x04,0xeb,0x4d]
+ bunctr+ 2
+# CHECK-BE: bunctr+ 0 # encoding: [0x4d,0xe3,0x04,0x20]
+# CHECK-LE: bunctr+ 0 # encoding: [0x20,0x04,0xe3,0x4d]
+ bunctr+
+# CHECK-BE: bunl+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA01]
+# CHECK-LE: bunl+ 2, target # encoding: [0bAAAAAA01,A,0xeb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bunl+ 2, target
+# CHECK-BE: bunl+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA01]
+# CHECK-LE: bunl+ 0, target # encoding: [0bAAAAAA01,A,0xe3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bunl+ target
+# CHECK-BE: bunla+ 2, target # encoding: [0x41,0xeb,A,0bAAAAAA11]
+# CHECK-LE: bunla+ 2, target # encoding: [0bAAAAAA11,A,0xeb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bunla+ 2, target
+# CHECK-BE: bunla+ 0, target # encoding: [0x41,0xe3,A,0bAAAAAA11]
+# CHECK-LE: bunla+ 0, target # encoding: [0bAAAAAA11,A,0xe3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bunla+ target
+# CHECK-BE: bunlrl+ 2 # encoding: [0x4d,0xeb,0x00,0x21]
+# CHECK-LE: bunlrl+ 2 # encoding: [0x21,0x00,0xeb,0x4d]
+ bunlrl+ 2
+# CHECK-BE: bunlrl+ 0 # encoding: [0x4d,0xe3,0x00,0x21]
+# CHECK-LE: bunlrl+ 0 # encoding: [0x21,0x00,0xe3,0x4d]
+ bunlrl+
+# CHECK-BE: bunctrl+ 2 # encoding: [0x4d,0xeb,0x04,0x21]
+# CHECK-LE: bunctrl+ 2 # encoding: [0x21,0x04,0xeb,0x4d]
+ bunctrl+ 2
+# CHECK-BE: bunctrl+ 0 # encoding: [0x4d,0xe3,0x04,0x21]
+# CHECK-LE: bunctrl+ 0 # encoding: [0x21,0x04,0xe3,0x4d]
+ bunctrl+
+
+# CHECK-BE: bun- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA00]
+# CHECK-LE: bun- 2, target # encoding: [0bAAAAAA00,A,0xcb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bun- 2, target
+# CHECK-BE: bun- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA00]
+# CHECK-LE: bun- 0, target # encoding: [0bAAAAAA00,A,0xc3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bun- target
+# CHECK-BE: buna- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA10]
+# CHECK-LE: buna- 2, target # encoding: [0bAAAAAA10,A,0xcb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ buna- 2, target
+# CHECK-BE: buna- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA10]
+# CHECK-LE: buna- 0, target # encoding: [0bAAAAAA10,A,0xc3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ buna- target
+# CHECK-BE: bunlr- 2 # encoding: [0x4d,0xcb,0x00,0x20]
+# CHECK-LE: bunlr- 2 # encoding: [0x20,0x00,0xcb,0x4d]
+ bunlr- 2
+# CHECK-BE: bunlr- 0 # encoding: [0x4d,0xc3,0x00,0x20]
+# CHECK-LE: bunlr- 0 # encoding: [0x20,0x00,0xc3,0x4d]
+ bunlr-
+# CHECK-BE: bunctr- 2 # encoding: [0x4d,0xcb,0x04,0x20]
+# CHECK-LE: bunctr- 2 # encoding: [0x20,0x04,0xcb,0x4d]
+ bunctr- 2
+# CHECK-BE: bunctr- 0 # encoding: [0x4d,0xc3,0x04,0x20]
+# CHECK-LE: bunctr- 0 # encoding: [0x20,0x04,0xc3,0x4d]
+ bunctr-
+# CHECK-BE: bunl- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA01]
+# CHECK-LE: bunl- 2, target # encoding: [0bAAAAAA01,A,0xcb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bunl- 2, target
+# CHECK-BE: bunl- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA01]
+# CHECK-LE: bunl- 0, target # encoding: [0bAAAAAA01,A,0xc3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bunl- target
+# CHECK-BE: bunla- 2, target # encoding: [0x41,0xcb,A,0bAAAAAA11]
+# CHECK-LE: bunla- 2, target # encoding: [0bAAAAAA11,A,0xcb,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bunla- 2, target
+# CHECK-BE: bunla- 0, target # encoding: [0x41,0xc3,A,0bAAAAAA11]
+# CHECK-LE: bunla- 0, target # encoding: [0bAAAAAA11,A,0xc3,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bunla- target
+# CHECK-BE: bunlrl- 2 # encoding: [0x4d,0xcb,0x00,0x21]
+# CHECK-LE: bunlrl- 2 # encoding: [0x21,0x00,0xcb,0x4d]
+ bunlrl- 2
+# CHECK-BE: bunlrl- 0 # encoding: [0x4d,0xc3,0x00,0x21]
+# CHECK-LE: bunlrl- 0 # encoding: [0x21,0x00,0xc3,0x4d]
+ bunlrl-
+# CHECK-BE: bunctrl- 2 # encoding: [0x4d,0xcb,0x04,0x21]
+# CHECK-LE: bunctrl- 2 # encoding: [0x21,0x04,0xcb,0x4d]
+ bunctrl- 2
+# CHECK-BE: bunctrl- 0 # encoding: [0x4d,0xc3,0x04,0x21]
+# CHECK-LE: bunctrl- 0 # encoding: [0x21,0x04,0xc3,0x4d]
+ bunctrl-
+
+# CHECK-BE: bnu 2, target # encoding: [0x40,0x8b,A,0bAAAAAA00]
+# CHECK-LE: bnu 2, target # encoding: [0bAAAAAA00,A,0x8b,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnu 2, target
+# CHECK-BE: bnu 0, target # encoding: [0x40,0x83,A,0bAAAAAA00]
+# CHECK-LE: bnu 0, target # encoding: [0bAAAAAA00,A,0x83,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnu target
+# CHECK-BE: bnua 2, target # encoding: [0x40,0x8b,A,0bAAAAAA10]
+# CHECK-LE: bnua 2, target # encoding: [0bAAAAAA10,A,0x8b,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnua 2, target
+# CHECK-BE: bnua 0, target # encoding: [0x40,0x83,A,0bAAAAAA10]
+# CHECK-LE: bnua 0, target # encoding: [0bAAAAAA10,A,0x83,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnua target
+# CHECK-BE: bnulr 2 # encoding: [0x4c,0x8b,0x00,0x20]
+# CHECK-LE: bnulr 2 # encoding: [0x20,0x00,0x8b,0x4c]
+ bnulr 2
+# CHECK-BE: bnulr 0 # encoding: [0x4c,0x83,0x00,0x20]
+# CHECK-LE: bnulr 0 # encoding: [0x20,0x00,0x83,0x4c]
+ bnulr
+# CHECK-BE: bnuctr 2 # encoding: [0x4c,0x8b,0x04,0x20]
+# CHECK-LE: bnuctr 2 # encoding: [0x20,0x04,0x8b,0x4c]
+ bnuctr 2
+# CHECK-BE: bnuctr 0 # encoding: [0x4c,0x83,0x04,0x20]
+# CHECK-LE: bnuctr 0 # encoding: [0x20,0x04,0x83,0x4c]
+ bnuctr
+# CHECK-BE: bnul 2, target # encoding: [0x40,0x8b,A,0bAAAAAA01]
+# CHECK-LE: bnul 2, target # encoding: [0bAAAAAA01,A,0x8b,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnul 2, target
+# CHECK-BE: bnul 0, target # encoding: [0x40,0x83,A,0bAAAAAA01]
+# CHECK-LE: bnul 0, target # encoding: [0bAAAAAA01,A,0x83,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnul target
+# CHECK-BE: bnula 2, target # encoding: [0x40,0x8b,A,0bAAAAAA11]
+# CHECK-LE: bnula 2, target # encoding: [0bAAAAAA11,A,0x8b,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnula 2, target
+# CHECK-BE: bnula 0, target # encoding: [0x40,0x83,A,0bAAAAAA11]
+# CHECK-LE: bnula 0, target # encoding: [0bAAAAAA11,A,0x83,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnula target
+# CHECK-BE: bnulrl 2 # encoding: [0x4c,0x8b,0x00,0x21]
+# CHECK-LE: bnulrl 2 # encoding: [0x21,0x00,0x8b,0x4c]
+ bnulrl 2
+# CHECK-BE: bnulrl 0 # encoding: [0x4c,0x83,0x00,0x21]
+# CHECK-LE: bnulrl 0 # encoding: [0x21,0x00,0x83,0x4c]
+ bnulrl
+# CHECK-BE: bnuctrl 2 # encoding: [0x4c,0x8b,0x04,0x21]
+# CHECK-LE: bnuctrl 2 # encoding: [0x21,0x04,0x8b,0x4c]
+ bnuctrl 2
+# CHECK-BE: bnuctrl 0 # encoding: [0x4c,0x83,0x04,0x21]
+# CHECK-LE: bnuctrl 0 # encoding: [0x21,0x04,0x83,0x4c]
+ bnuctrl
+
+# CHECK-BE: bnu+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA00]
+# CHECK-LE: bnu+ 2, target # encoding: [0bAAAAAA00,A,0xeb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnu+ 2, target
+# CHECK-BE: bnu+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA00]
+# CHECK-LE: bnu+ 0, target # encoding: [0bAAAAAA00,A,0xe3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnu+ target
+# CHECK-BE: bnua+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA10]
+# CHECK-LE: bnua+ 2, target # encoding: [0bAAAAAA10,A,0xeb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnua+ 2, target
+# CHECK-BE: bnua+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA10]
+# CHECK-LE: bnua+ 0, target # encoding: [0bAAAAAA10,A,0xe3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnua+ target
+# CHECK-BE: bnulr+ 2 # encoding: [0x4c,0xeb,0x00,0x20]
+# CHECK-LE: bnulr+ 2 # encoding: [0x20,0x00,0xeb,0x4c]
+ bnulr+ 2
+# CHECK-BE: bnulr+ 0 # encoding: [0x4c,0xe3,0x00,0x20]
+# CHECK-LE: bnulr+ 0 # encoding: [0x20,0x00,0xe3,0x4c]
+ bnulr+
+# CHECK-BE: bnuctr+ 2 # encoding: [0x4c,0xeb,0x04,0x20]
+# CHECK-LE: bnuctr+ 2 # encoding: [0x20,0x04,0xeb,0x4c]
+ bnuctr+ 2
+# CHECK-BE: bnuctr+ 0 # encoding: [0x4c,0xe3,0x04,0x20]
+# CHECK-LE: bnuctr+ 0 # encoding: [0x20,0x04,0xe3,0x4c]
+ bnuctr+
+# CHECK-BE: bnul+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA01]
+# CHECK-LE: bnul+ 2, target # encoding: [0bAAAAAA01,A,0xeb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnul+ 2, target
+# CHECK-BE: bnul+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA01]
+# CHECK-LE: bnul+ 0, target # encoding: [0bAAAAAA01,A,0xe3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnul+ target
+# CHECK-BE: bnula+ 2, target # encoding: [0x40,0xeb,A,0bAAAAAA11]
+# CHECK-LE: bnula+ 2, target # encoding: [0bAAAAAA11,A,0xeb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnula+ 2, target
+# CHECK-BE: bnula+ 0, target # encoding: [0x40,0xe3,A,0bAAAAAA11]
+# CHECK-LE: bnula+ 0, target # encoding: [0bAAAAAA11,A,0xe3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnula+ target
+# CHECK-BE: bnulrl+ 2 # encoding: [0x4c,0xeb,0x00,0x21]
+# CHECK-LE: bnulrl+ 2 # encoding: [0x21,0x00,0xeb,0x4c]
+ bnulrl+ 2
+# CHECK-BE: bnulrl+ 0 # encoding: [0x4c,0xe3,0x00,0x21]
+# CHECK-LE: bnulrl+ 0 # encoding: [0x21,0x00,0xe3,0x4c]
+ bnulrl+
+# CHECK-BE: bnuctrl+ 2 # encoding: [0x4c,0xeb,0x04,0x21]
+# CHECK-LE: bnuctrl+ 2 # encoding: [0x21,0x04,0xeb,0x4c]
+ bnuctrl+ 2
+# CHECK-BE: bnuctrl+ 0 # encoding: [0x4c,0xe3,0x04,0x21]
+# CHECK-LE: bnuctrl+ 0 # encoding: [0x21,0x04,0xe3,0x4c]
+ bnuctrl+
+
+# CHECK-BE: bnu- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA00]
+# CHECK-LE: bnu- 2, target # encoding: [0bAAAAAA00,A,0xcb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnu- 2, target
+# CHECK-BE: bnu- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA00]
+# CHECK-LE: bnu- 0, target # encoding: [0bAAAAAA00,A,0xc3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnu- target
+# CHECK-BE: bnua- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA10]
+# CHECK-LE: bnua- 2, target # encoding: [0bAAAAAA10,A,0xcb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnua- 2, target
+# CHECK-BE: bnua- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA10]
+# CHECK-LE: bnua- 0, target # encoding: [0bAAAAAA10,A,0xc3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnua- target
+# CHECK-BE: bnulr- 2 # encoding: [0x4c,0xcb,0x00,0x20]
+# CHECK-LE: bnulr- 2 # encoding: [0x20,0x00,0xcb,0x4c]
+ bnulr- 2
+# CHECK-BE: bnulr- 0 # encoding: [0x4c,0xc3,0x00,0x20]
+# CHECK-LE: bnulr- 0 # encoding: [0x20,0x00,0xc3,0x4c]
+ bnulr-
+# CHECK-BE: bnuctr- 2 # encoding: [0x4c,0xcb,0x04,0x20]
+# CHECK-LE: bnuctr- 2 # encoding: [0x20,0x04,0xcb,0x4c]
+ bnuctr- 2
+# CHECK-BE: bnuctr- 0 # encoding: [0x4c,0xc3,0x04,0x20]
+# CHECK-LE: bnuctr- 0 # encoding: [0x20,0x04,0xc3,0x4c]
+ bnuctr-
+# CHECK-BE: bnul- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA01]
+# CHECK-LE: bnul- 2, target # encoding: [0bAAAAAA01,A,0xcb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnul- 2, target
+# CHECK-BE: bnul- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA01]
+# CHECK-LE: bnul- 0, target # encoding: [0bAAAAAA01,A,0xc3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bnul- target
+# CHECK-BE: bnula- 2, target # encoding: [0x40,0xcb,A,0bAAAAAA11]
+# CHECK-LE: bnula- 2, target # encoding: [0bAAAAAA11,A,0xcb,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnula- 2, target
+# CHECK-BE: bnula- 0, target # encoding: [0x40,0xc3,A,0bAAAAAA11]
+# CHECK-LE: bnula- 0, target # encoding: [0bAAAAAA11,A,0xc3,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bnula- target
+# CHECK-BE: bnulrl- 2 # encoding: [0x4c,0xcb,0x00,0x21]
+# CHECK-LE: bnulrl- 2 # encoding: [0x21,0x00,0xcb,0x4c]
+ bnulrl- 2
+# CHECK-BE: bnulrl- 0 # encoding: [0x4c,0xc3,0x00,0x21]
+# CHECK-LE: bnulrl- 0 # encoding: [0x21,0x00,0xc3,0x4c]
+ bnulrl-
+# CHECK-BE: bnuctrl- 2 # encoding: [0x4c,0xcb,0x04,0x21]
+# CHECK-LE: bnuctrl- 2 # encoding: [0x21,0x04,0xcb,0x4c]
+ bnuctrl- 2
+# CHECK-BE: bnuctrl- 0 # encoding: [0x4c,0xc3,0x04,0x21]
+# CHECK-LE: bnuctrl- 0 # encoding: [0x21,0x04,0xc3,0x4c]
+ bnuctrl-
# Condition register logical mnemonics
-# CHECK: creqv 2, 2, 2 # encoding: [0x4c,0x42,0x12,0x42]
- crset 2
-# CHECK: crxor 2, 2, 2 # encoding: [0x4c,0x42,0x11,0x82]
- crclr 2
-# CHECK: cror 2, 3, 3 # encoding: [0x4c,0x43,0x1b,0x82]
- crmove 2, 3
-# CHECK: crnor 2, 3, 3 # encoding: [0x4c,0x43,0x18,0x42]
- crnot 2, 3
+# CHECK-BE: creqv 2, 2, 2 # encoding: [0x4c,0x42,0x12,0x42]
+# CHECK-LE: creqv 2, 2, 2 # encoding: [0x42,0x12,0x42,0x4c]
+ crset 2
+# CHECK-BE: crxor 2, 2, 2 # encoding: [0x4c,0x42,0x11,0x82]
+# CHECK-LE: crxor 2, 2, 2 # encoding: [0x82,0x11,0x42,0x4c]
+ crclr 2
+# CHECK-BE: cror 2, 3, 3 # encoding: [0x4c,0x43,0x1b,0x82]
+# CHECK-LE: cror 2, 3, 3 # encoding: [0x82,0x1b,0x43,0x4c]
+ crmove 2, 3
+# CHECK-BE: crnor 2, 3, 3 # encoding: [0x4c,0x43,0x18,0x42]
+# CHECK-LE: crnor 2, 3, 3 # encoding: [0x42,0x18,0x43,0x4c]
+ crnot 2, 3
# Subtract mnemonics
-# CHECK: addi 2, 3, -128 # encoding: [0x38,0x43,0xff,0x80]
- subi 2, 3, 128
-# CHECK: addis 2, 3, -128 # encoding: [0x3c,0x43,0xff,0x80]
- subis 2, 3, 128
-# CHECK: addic 2, 3, -128 # encoding: [0x30,0x43,0xff,0x80]
- subic 2, 3, 128
-# CHECK: addic. 2, 3, -128 # encoding: [0x34,0x43,0xff,0x80]
- subic. 2, 3, 128
-
-# CHECK: subf 2, 4, 3 # encoding: [0x7c,0x44,0x18,0x50]
- sub 2, 3, 4
-# CHECK: subf. 2, 4, 3 # encoding: [0x7c,0x44,0x18,0x51]
- sub. 2, 3, 4
-# CHECK: subfc 2, 4, 3 # encoding: [0x7c,0x44,0x18,0x10]
- subc 2, 3, 4
-# CHECK: subfc. 2, 4, 3 # encoding: [0x7c,0x44,0x18,0x11]
- subc. 2, 3, 4
+# CHECK-BE: addi 2, 3, -128 # encoding: [0x38,0x43,0xff,0x80]
+# CHECK-LE: addi 2, 3, -128 # encoding: [0x80,0xff,0x43,0x38]
+ subi 2, 3, 128
+# CHECK-BE: addis 2, 3, -128 # encoding: [0x3c,0x43,0xff,0x80]
+# CHECK-LE: addis 2, 3, -128 # encoding: [0x80,0xff,0x43,0x3c]
+ subis 2, 3, 128
+# CHECK-BE: addic 2, 3, -128 # encoding: [0x30,0x43,0xff,0x80]
+# CHECK-LE: addic 2, 3, -128 # encoding: [0x80,0xff,0x43,0x30]
+ subic 2, 3, 128
+# CHECK-BE: addic. 2, 3, -128 # encoding: [0x34,0x43,0xff,0x80]
+# CHECK-LE: addic. 2, 3, -128 # encoding: [0x80,0xff,0x43,0x34]
+ subic. 2, 3, 128
+
+# CHECK-BE: subf 2, 4, 3 # encoding: [0x7c,0x44,0x18,0x50]
+# CHECK-LE: subf 2, 4, 3 # encoding: [0x50,0x18,0x44,0x7c]
+ sub 2, 3, 4
+# CHECK-BE: subf. 2, 4, 3 # encoding: [0x7c,0x44,0x18,0x51]
+# CHECK-LE: subf. 2, 4, 3 # encoding: [0x51,0x18,0x44,0x7c]
+ sub. 2, 3, 4
+# CHECK-BE: subfc 2, 4, 3 # encoding: [0x7c,0x44,0x18,0x10]
+# CHECK-LE: subfc 2, 4, 3 # encoding: [0x10,0x18,0x44,0x7c]
+ subc 2, 3, 4
+# CHECK-BE: subfc. 2, 4, 3 # encoding: [0x7c,0x44,0x18,0x11]
+# CHECK-LE: subfc. 2, 4, 3 # encoding: [0x11,0x18,0x44,0x7c]
+ subc. 2, 3, 4
# Compare mnemonics
-# CHECK: cmpdi 2, 3, 128 # encoding: [0x2d,0x23,0x00,0x80]
- cmpdi 2, 3, 128
-# CHECK: cmpdi 0, 3, 128 # encoding: [0x2c,0x23,0x00,0x80]
- cmpdi 3, 128
-# CHECK: cmpd 2, 3, 4 # encoding: [0x7d,0x23,0x20,0x00]
- cmpd 2, 3, 4
-# CHECK: cmpd 0, 3, 4 # encoding: [0x7c,0x23,0x20,0x00]
- cmpd 3, 4
-# CHECK: cmpldi 2, 3, 128 # encoding: [0x29,0x23,0x00,0x80]
- cmpldi 2, 3, 128
-# CHECK: cmpldi 0, 3, 128 # encoding: [0x28,0x23,0x00,0x80]
- cmpldi 3, 128
-# CHECK: cmpld 2, 3, 4 # encoding: [0x7d,0x23,0x20,0x40]
- cmpld 2, 3, 4
-# CHECK: cmpld 0, 3, 4 # encoding: [0x7c,0x23,0x20,0x40]
- cmpld 3, 4
-
-# CHECK: cmpwi 2, 3, 128 # encoding: [0x2d,0x03,0x00,0x80]
- cmpwi 2, 3, 128
-# CHECK: cmpwi 0, 3, 128 # encoding: [0x2c,0x03,0x00,0x80]
- cmpwi 3, 128
-# CHECK: cmpw 2, 3, 4 # encoding: [0x7d,0x03,0x20,0x00]
- cmpw 2, 3, 4
-# CHECK: cmpw 0, 3, 4 # encoding: [0x7c,0x03,0x20,0x00]
- cmpw 3, 4
-# CHECK: cmplwi 2, 3, 128 # encoding: [0x29,0x03,0x00,0x80]
- cmplwi 2, 3, 128
-# CHECK: cmplwi 0, 3, 128 # encoding: [0x28,0x03,0x00,0x80]
- cmplwi 3, 128
-# CHECK: cmplw 2, 3, 4 # encoding: [0x7d,0x03,0x20,0x40]
- cmplw 2, 3, 4
-# CHECK: cmplw 0, 3, 4 # encoding: [0x7c,0x03,0x20,0x40]
- cmplw 3, 4
+# CHECK-BE: cmpdi 2, 3, 128 # encoding: [0x2d,0x23,0x00,0x80]
+# CHECK-LE: cmpdi 2, 3, 128 # encoding: [0x80,0x00,0x23,0x2d]
+ cmpdi 2, 3, 128
+# CHECK-BE: cmpdi 0, 3, 128 # encoding: [0x2c,0x23,0x00,0x80]
+# CHECK-LE: cmpdi 0, 3, 128 # encoding: [0x80,0x00,0x23,0x2c]
+ cmpdi 3, 128
+# CHECK-BE: cmpd 2, 3, 4 # encoding: [0x7d,0x23,0x20,0x00]
+# CHECK-LE: cmpd 2, 3, 4 # encoding: [0x00,0x20,0x23,0x7d]
+ cmpd 2, 3, 4
+# CHECK-BE: cmpd 0, 3, 4 # encoding: [0x7c,0x23,0x20,0x00]
+# CHECK-LE: cmpd 0, 3, 4 # encoding: [0x00,0x20,0x23,0x7c]
+ cmpd 3, 4
+# CHECK-BE: cmpldi 2, 3, 128 # encoding: [0x29,0x23,0x00,0x80]
+# CHECK-LE: cmpldi 2, 3, 128 # encoding: [0x80,0x00,0x23,0x29]
+ cmpldi 2, 3, 128
+# CHECK-BE: cmpldi 0, 3, 128 # encoding: [0x28,0x23,0x00,0x80]
+# CHECK-LE: cmpldi 0, 3, 128 # encoding: [0x80,0x00,0x23,0x28]
+ cmpldi 3, 128
+# CHECK-BE: cmpld 2, 3, 4 # encoding: [0x7d,0x23,0x20,0x40]
+# CHECK-LE: cmpld 2, 3, 4 # encoding: [0x40,0x20,0x23,0x7d]
+ cmpld 2, 3, 4
+# CHECK-BE: cmpld 0, 3, 4 # encoding: [0x7c,0x23,0x20,0x40]
+# CHECK-LE: cmpld 0, 3, 4 # encoding: [0x40,0x20,0x23,0x7c]
+ cmpld 3, 4
+
+# CHECK-BE: cmpwi 2, 3, 128 # encoding: [0x2d,0x03,0x00,0x80]
+# CHECK-LE: cmpwi 2, 3, 128 # encoding: [0x80,0x00,0x03,0x2d]
+ cmpwi 2, 3, 128
+# CHECK-BE: cmpwi 0, 3, 128 # encoding: [0x2c,0x03,0x00,0x80]
+# CHECK-LE: cmpwi 0, 3, 128 # encoding: [0x80,0x00,0x03,0x2c]
+ cmpwi 3, 128
+# CHECK-BE: cmpw 2, 3, 4 # encoding: [0x7d,0x03,0x20,0x00]
+# CHECK-LE: cmpw 2, 3, 4 # encoding: [0x00,0x20,0x03,0x7d]
+ cmpw 2, 3, 4
+# CHECK-BE: cmpw 0, 3, 4 # encoding: [0x7c,0x03,0x20,0x00]
+# CHECK-LE: cmpw 0, 3, 4 # encoding: [0x00,0x20,0x03,0x7c]
+ cmpw 3, 4
+# CHECK-BE: cmplwi 2, 3, 128 # encoding: [0x29,0x03,0x00,0x80]
+# CHECK-LE: cmplwi 2, 3, 128 # encoding: [0x80,0x00,0x03,0x29]
+ cmplwi 2, 3, 128
+# CHECK-BE: cmplwi 0, 3, 128 # encoding: [0x28,0x03,0x00,0x80]
+# CHECK-LE: cmplwi 0, 3, 128 # encoding: [0x80,0x00,0x03,0x28]
+ cmplwi 3, 128
+# CHECK-BE: cmplw 2, 3, 4 # encoding: [0x7d,0x03,0x20,0x40]
+# CHECK-LE: cmplw 2, 3, 4 # encoding: [0x40,0x20,0x03,0x7d]
+ cmplw 2, 3, 4
+# CHECK-BE: cmplw 0, 3, 4 # encoding: [0x7c,0x03,0x20,0x40]
+# CHECK-LE: cmplw 0, 3, 4 # encoding: [0x40,0x20,0x03,0x7c]
+ cmplw 3, 4
# Trap mnemonics
-# CHECK: twi 16, 3, 4 # encoding: [0x0e,0x03,0x00,0x04]
- twlti 3, 4
-# CHECK: tw 16, 3, 4 # encoding: [0x7e,0x03,0x20,0x08]
- twlt 3, 4
-# CHECK: tdi 16, 3, 4 # encoding: [0x0a,0x03,0x00,0x04]
- tdlti 3, 4
-# CHECK: td 16, 3, 4 # encoding: [0x7e,0x03,0x20,0x88]
- tdlt 3, 4
-
-# CHECK: twi 20, 3, 4 # encoding: [0x0e,0x83,0x00,0x04]
- twlei 3, 4
-# CHECK: tw 20, 3, 4 # encoding: [0x7e,0x83,0x20,0x08]
- twle 3, 4
-# CHECK: tdi 20, 3, 4 # encoding: [0x0a,0x83,0x00,0x04]
- tdlei 3, 4
-# CHECK: td 20, 3, 4 # encoding: [0x7e,0x83,0x20,0x88]
- tdle 3, 4
-
-# CHECK: twi 4, 3, 4 # encoding: [0x0c,0x83,0x00,0x04]
- tweqi 3, 4
-# CHECK: tw 4, 3, 4 # encoding: [0x7c,0x83,0x20,0x08]
- tweq 3, 4
-# CHECK: tdi 4, 3, 4 # encoding: [0x08,0x83,0x00,0x04]
- tdeqi 3, 4
-# CHECK: td 4, 3, 4 # encoding: [0x7c,0x83,0x20,0x88]
- tdeq 3, 4
-
-# CHECK: twi 12, 3, 4 # encoding: [0x0d,0x83,0x00,0x04]
- twgei 3, 4
-# CHECK: tw 12, 3, 4 # encoding: [0x7d,0x83,0x20,0x08]
- twge 3, 4
-# CHECK: tdi 12, 3, 4 # encoding: [0x09,0x83,0x00,0x04]
- tdgei 3, 4
-# CHECK: td 12, 3, 4 # encoding: [0x7d,0x83,0x20,0x88]
- tdge 3, 4
-
-# CHECK: twi 8, 3, 4 # encoding: [0x0d,0x03,0x00,0x04]
- twgti 3, 4
-# CHECK: tw 8, 3, 4 # encoding: [0x7d,0x03,0x20,0x08]
- twgt 3, 4
-# CHECK: tdi 8, 3, 4 # encoding: [0x09,0x03,0x00,0x04]
- tdgti 3, 4
-# CHECK: td 8, 3, 4 # encoding: [0x7d,0x03,0x20,0x88]
- tdgt 3, 4
-
-# CHECK: twi 12, 3, 4 # encoding: [0x0d,0x83,0x00,0x04]
- twnli 3, 4
-# CHECK: tw 12, 3, 4 # encoding: [0x7d,0x83,0x20,0x08]
- twnl 3, 4
-# CHECK: tdi 12, 3, 4 # encoding: [0x09,0x83,0x00,0x04]
- tdnli 3, 4
-# CHECK: td 12, 3, 4 # encoding: [0x7d,0x83,0x20,0x88]
- tdnl 3, 4
-
-# CHECK: twi 24, 3, 4 # encoding: [0x0f,0x03,0x00,0x04]
- twnei 3, 4
-# CHECK: tw 24, 3, 4 # encoding: [0x7f,0x03,0x20,0x08]
- twne 3, 4
-# CHECK: tdi 24, 3, 4 # encoding: [0x0b,0x03,0x00,0x04]
- tdnei 3, 4
-# CHECK: td 24, 3, 4 # encoding: [0x7f,0x03,0x20,0x88]
- tdne 3, 4
-
-# CHECK: twi 20, 3, 4 # encoding: [0x0e,0x83,0x00,0x04]
- twngi 3, 4
-# CHECK: tw 20, 3, 4 # encoding: [0x7e,0x83,0x20,0x08]
- twng 3, 4
-# CHECK: tdi 20, 3, 4 # encoding: [0x0a,0x83,0x00,0x04]
- tdngi 3, 4
-# CHECK: td 20, 3, 4 # encoding: [0x7e,0x83,0x20,0x88]
- tdng 3, 4
-
-# CHECK: twi 2, 3, 4 # encoding: [0x0c,0x43,0x00,0x04]
- twllti 3, 4
-# CHECK: tw 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x08]
- twllt 3, 4
-# CHECK: tdi 2, 3, 4 # encoding: [0x08,0x43,0x00,0x04]
- tdllti 3, 4
-# CHECK: td 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x88]
- tdllt 3, 4
-
-# CHECK: twi 6, 3, 4 # encoding: [0x0c,0xc3,0x00,0x04]
- twllei 3, 4
-# CHECK: tw 6, 3, 4 # encoding: [0x7c,0xc3,0x20,0x08]
- twlle 3, 4
-# CHECK: tdi 6, 3, 4 # encoding: [0x08,0xc3,0x00,0x04]
- tdllei 3, 4
-# CHECK: td 6, 3, 4 # encoding: [0x7c,0xc3,0x20,0x88]
- tdlle 3, 4
-
-# CHECK: twi 5, 3, 4 # encoding: [0x0c,0xa3,0x00,0x04]
- twlgei 3, 4
-# CHECK: tw 5, 3, 4 # encoding: [0x7c,0xa3,0x20,0x08]
- twlge 3, 4
-# CHECK: tdi 5, 3, 4 # encoding: [0x08,0xa3,0x00,0x04]
- tdlgei 3, 4
-# CHECK: td 5, 3, 4 # encoding: [0x7c,0xa3,0x20,0x88]
- tdlge 3, 4
-
-# CHECK: twi 1, 3, 4 # encoding: [0x0c,0x23,0x00,0x04]
- twlgti 3, 4
-# CHECK: tw 1, 3, 4 # encoding: [0x7c,0x23,0x20,0x08]
- twlgt 3, 4
-# CHECK: tdi 1, 3, 4 # encoding: [0x08,0x23,0x00,0x04]
- tdlgti 3, 4
-# CHECK: td 1, 3, 4 # encoding: [0x7c,0x23,0x20,0x88]
- tdlgt 3, 4
-
-# CHECK: twi 5, 3, 4 # encoding: [0x0c,0xa3,0x00,0x04]
- twlnli 3, 4
-# CHECK: tw 5, 3, 4 # encoding: [0x7c,0xa3,0x20,0x08]
- twlnl 3, 4
-# CHECK: tdi 5, 3, 4 # encoding: [0x08,0xa3,0x00,0x04]
- tdlnli 3, 4
-# CHECK: td 5, 3, 4 # encoding: [0x7c,0xa3,0x20,0x88]
- tdlnl 3, 4
-
-# CHECK: twi 6, 3, 4 # encoding: [0x0c,0xc3,0x00,0x04]
- twlngi 3, 4
-# CHECK: tw 6, 3, 4 # encoding: [0x7c,0xc3,0x20,0x08]
- twlng 3, 4
-# CHECK: tdi 6, 3, 4 # encoding: [0x08,0xc3,0x00,0x04]
- tdlngi 3, 4
-# CHECK: td 6, 3, 4 # encoding: [0x7c,0xc3,0x20,0x88]
- tdlng 3, 4
-
-# CHECK: twi 31, 3, 4 # encoding: [0x0f,0xe3,0x00,0x04]
- twui 3, 4
-# CHECK: tw 31, 3, 4 # encoding: [0x7f,0xe3,0x20,0x08]
- twu 3, 4
-# CHECK: tdi 31, 3, 4 # encoding: [0x0b,0xe3,0x00,0x04]
- tdui 3, 4
-# CHECK: td 31, 3, 4 # encoding: [0x7f,0xe3,0x20,0x88]
- tdu 3, 4
-
-# CHECK: trap # encoding: [0x7f,0xe0,0x00,0x08]
- trap
+# CHECK-BE: twi 16, 3, 4 # encoding: [0x0e,0x03,0x00,0x04]
+# CHECK-LE: twi 16, 3, 4 # encoding: [0x04,0x00,0x03,0x0e]
+ twlti 3, 4
+# CHECK-BE: tw 16, 3, 4 # encoding: [0x7e,0x03,0x20,0x08]
+# CHECK-LE: tw 16, 3, 4 # encoding: [0x08,0x20,0x03,0x7e]
+ twlt 3, 4
+# CHECK-BE: tdi 16, 3, 4 # encoding: [0x0a,0x03,0x00,0x04]
+# CHECK-LE: tdi 16, 3, 4 # encoding: [0x04,0x00,0x03,0x0a]
+ tdlti 3, 4
+# CHECK-BE: td 16, 3, 4 # encoding: [0x7e,0x03,0x20,0x88]
+# CHECK-LE: td 16, 3, 4 # encoding: [0x88,0x20,0x03,0x7e]
+ tdlt 3, 4
+
+# CHECK-BE: twi 20, 3, 4 # encoding: [0x0e,0x83,0x00,0x04]
+# CHECK-LE: twi 20, 3, 4 # encoding: [0x04,0x00,0x83,0x0e]
+ twlei 3, 4
+# CHECK-BE: tw 20, 3, 4 # encoding: [0x7e,0x83,0x20,0x08]
+# CHECK-LE: tw 20, 3, 4 # encoding: [0x08,0x20,0x83,0x7e]
+ twle 3, 4
+# CHECK-BE: tdi 20, 3, 4 # encoding: [0x0a,0x83,0x00,0x04]
+# CHECK-LE: tdi 20, 3, 4 # encoding: [0x04,0x00,0x83,0x0a]
+ tdlei 3, 4
+# CHECK-BE: td 20, 3, 4 # encoding: [0x7e,0x83,0x20,0x88]
+# CHECK-LE: td 20, 3, 4 # encoding: [0x88,0x20,0x83,0x7e]
+ tdle 3, 4
+
+# CHECK-BE: twi 4, 3, 4 # encoding: [0x0c,0x83,0x00,0x04]
+# CHECK-LE: twi 4, 3, 4 # encoding: [0x04,0x00,0x83,0x0c]
+ tweqi 3, 4
+# CHECK-BE: tw 4, 3, 4 # encoding: [0x7c,0x83,0x20,0x08]
+# CHECK-LE: tw 4, 3, 4 # encoding: [0x08,0x20,0x83,0x7c]
+ tweq 3, 4
+# CHECK-BE: tdi 4, 3, 4 # encoding: [0x08,0x83,0x00,0x04]
+# CHECK-LE: tdi 4, 3, 4 # encoding: [0x04,0x00,0x83,0x08]
+ tdeqi 3, 4
+# CHECK-BE: td 4, 3, 4 # encoding: [0x7c,0x83,0x20,0x88]
+# CHECK-LE: td 4, 3, 4 # encoding: [0x88,0x20,0x83,0x7c]
+ tdeq 3, 4
+
+# CHECK-BE: twi 12, 3, 4 # encoding: [0x0d,0x83,0x00,0x04]
+# CHECK-LE: twi 12, 3, 4 # encoding: [0x04,0x00,0x83,0x0d]
+ twgei 3, 4
+# CHECK-BE: tw 12, 3, 4 # encoding: [0x7d,0x83,0x20,0x08]
+# CHECK-LE: tw 12, 3, 4 # encoding: [0x08,0x20,0x83,0x7d]
+ twge 3, 4
+# CHECK-BE: tdi 12, 3, 4 # encoding: [0x09,0x83,0x00,0x04]
+# CHECK-LE: tdi 12, 3, 4 # encoding: [0x04,0x00,0x83,0x09]
+ tdgei 3, 4
+# CHECK-BE: td 12, 3, 4 # encoding: [0x7d,0x83,0x20,0x88]
+# CHECK-LE: td 12, 3, 4 # encoding: [0x88,0x20,0x83,0x7d]
+ tdge 3, 4
+
+# CHECK-BE: twi 8, 3, 4 # encoding: [0x0d,0x03,0x00,0x04]
+# CHECK-LE: twi 8, 3, 4 # encoding: [0x04,0x00,0x03,0x0d]
+ twgti 3, 4
+# CHECK-BE: tw 8, 3, 4 # encoding: [0x7d,0x03,0x20,0x08]
+# CHECK-LE: tw 8, 3, 4 # encoding: [0x08,0x20,0x03,0x7d]
+ twgt 3, 4
+# CHECK-BE: tdi 8, 3, 4 # encoding: [0x09,0x03,0x00,0x04]
+# CHECK-LE: tdi 8, 3, 4 # encoding: [0x04,0x00,0x03,0x09]
+ tdgti 3, 4
+# CHECK-BE: td 8, 3, 4 # encoding: [0x7d,0x03,0x20,0x88]
+# CHECK-LE: td 8, 3, 4 # encoding: [0x88,0x20,0x03,0x7d]
+ tdgt 3, 4
+
+# CHECK-BE: twi 12, 3, 4 # encoding: [0x0d,0x83,0x00,0x04]
+# CHECK-LE: twi 12, 3, 4 # encoding: [0x04,0x00,0x83,0x0d]
+ twnli 3, 4
+# CHECK-BE: tw 12, 3, 4 # encoding: [0x7d,0x83,0x20,0x08]
+# CHECK-LE: tw 12, 3, 4 # encoding: [0x08,0x20,0x83,0x7d]
+ twnl 3, 4
+# CHECK-BE: tdi 12, 3, 4 # encoding: [0x09,0x83,0x00,0x04]
+# CHECK-LE: tdi 12, 3, 4 # encoding: [0x04,0x00,0x83,0x09]
+ tdnli 3, 4
+# CHECK-BE: td 12, 3, 4 # encoding: [0x7d,0x83,0x20,0x88]
+# CHECK-LE: td 12, 3, 4 # encoding: [0x88,0x20,0x83,0x7d]
+ tdnl 3, 4
+
+# CHECK-BE: twi 24, 3, 4 # encoding: [0x0f,0x03,0x00,0x04]
+# CHECK-LE: twi 24, 3, 4 # encoding: [0x04,0x00,0x03,0x0f]
+ twnei 3, 4
+# CHECK-BE: tw 24, 3, 4 # encoding: [0x7f,0x03,0x20,0x08]
+# CHECK-LE: tw 24, 3, 4 # encoding: [0x08,0x20,0x03,0x7f]
+ twne 3, 4
+# CHECK-BE: tdi 24, 3, 4 # encoding: [0x0b,0x03,0x00,0x04]
+# CHECK-LE: tdi 24, 3, 4 # encoding: [0x04,0x00,0x03,0x0b]
+ tdnei 3, 4
+# CHECK-BE: td 24, 3, 4 # encoding: [0x7f,0x03,0x20,0x88]
+# CHECK-LE: td 24, 3, 4 # encoding: [0x88,0x20,0x03,0x7f]
+ tdne 3, 4
+
+# CHECK-BE: twi 20, 3, 4 # encoding: [0x0e,0x83,0x00,0x04]
+# CHECK-LE: twi 20, 3, 4 # encoding: [0x04,0x00,0x83,0x0e]
+ twngi 3, 4
+# CHECK-BE: tw 20, 3, 4 # encoding: [0x7e,0x83,0x20,0x08]
+# CHECK-LE: tw 20, 3, 4 # encoding: [0x08,0x20,0x83,0x7e]
+ twng 3, 4
+# CHECK-BE: tdi 20, 3, 4 # encoding: [0x0a,0x83,0x00,0x04]
+# CHECK-LE: tdi 20, 3, 4 # encoding: [0x04,0x00,0x83,0x0a]
+ tdngi 3, 4
+# CHECK-BE: td 20, 3, 4 # encoding: [0x7e,0x83,0x20,0x88]
+# CHECK-LE: td 20, 3, 4 # encoding: [0x88,0x20,0x83,0x7e]
+ tdng 3, 4
+
+# CHECK-BE: twi 2, 3, 4 # encoding: [0x0c,0x43,0x00,0x04]
+# CHECK-LE: twi 2, 3, 4 # encoding: [0x04,0x00,0x43,0x0c]
+ twllti 3, 4
+# CHECK-BE: tw 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x08]
+# CHECK-LE: tw 2, 3, 4 # encoding: [0x08,0x20,0x43,0x7c]
+ twllt 3, 4
+# CHECK-BE: tdi 2, 3, 4 # encoding: [0x08,0x43,0x00,0x04]
+# CHECK-LE: tdi 2, 3, 4 # encoding: [0x04,0x00,0x43,0x08]
+ tdllti 3, 4
+# CHECK-BE: td 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x88]
+# CHECK-LE: td 2, 3, 4 # encoding: [0x88,0x20,0x43,0x7c]
+ tdllt 3, 4
+
+# CHECK-BE: twi 6, 3, 4 # encoding: [0x0c,0xc3,0x00,0x04]
+# CHECK-LE: twi 6, 3, 4 # encoding: [0x04,0x00,0xc3,0x0c]
+ twllei 3, 4
+# CHECK-BE: tw 6, 3, 4 # encoding: [0x7c,0xc3,0x20,0x08]
+# CHECK-LE: tw 6, 3, 4 # encoding: [0x08,0x20,0xc3,0x7c]
+ twlle 3, 4
+# CHECK-BE: tdi 6, 3, 4 # encoding: [0x08,0xc3,0x00,0x04]
+# CHECK-LE: tdi 6, 3, 4 # encoding: [0x04,0x00,0xc3,0x08]
+ tdllei 3, 4
+# CHECK-BE: td 6, 3, 4 # encoding: [0x7c,0xc3,0x20,0x88]
+# CHECK-LE: td 6, 3, 4 # encoding: [0x88,0x20,0xc3,0x7c]
+ tdlle 3, 4
+
+# CHECK-BE: twi 5, 3, 4 # encoding: [0x0c,0xa3,0x00,0x04]
+# CHECK-LE: twi 5, 3, 4 # encoding: [0x04,0x00,0xa3,0x0c]
+ twlgei 3, 4
+# CHECK-BE: tw 5, 3, 4 # encoding: [0x7c,0xa3,0x20,0x08]
+# CHECK-LE: tw 5, 3, 4 # encoding: [0x08,0x20,0xa3,0x7c]
+ twlge 3, 4
+# CHECK-BE: tdi 5, 3, 4 # encoding: [0x08,0xa3,0x00,0x04]
+# CHECK-LE: tdi 5, 3, 4 # encoding: [0x04,0x00,0xa3,0x08]
+ tdlgei 3, 4
+# CHECK-BE: td 5, 3, 4 # encoding: [0x7c,0xa3,0x20,0x88]
+# CHECK-LE: td 5, 3, 4 # encoding: [0x88,0x20,0xa3,0x7c]
+ tdlge 3, 4
+
+# CHECK-BE: twi 1, 3, 4 # encoding: [0x0c,0x23,0x00,0x04]
+# CHECK-LE: twi 1, 3, 4 # encoding: [0x04,0x00,0x23,0x0c]
+ twlgti 3, 4
+# CHECK-BE: tw 1, 3, 4 # encoding: [0x7c,0x23,0x20,0x08]
+# CHECK-LE: tw 1, 3, 4 # encoding: [0x08,0x20,0x23,0x7c]
+ twlgt 3, 4
+# CHECK-BE: tdi 1, 3, 4 # encoding: [0x08,0x23,0x00,0x04]
+# CHECK-LE: tdi 1, 3, 4 # encoding: [0x04,0x00,0x23,0x08]
+ tdlgti 3, 4
+# CHECK-BE: td 1, 3, 4 # encoding: [0x7c,0x23,0x20,0x88]
+# CHECK-LE: td 1, 3, 4 # encoding: [0x88,0x20,0x23,0x7c]
+ tdlgt 3, 4
+
+# CHECK-BE: twi 5, 3, 4 # encoding: [0x0c,0xa3,0x00,0x04]
+# CHECK-LE: twi 5, 3, 4 # encoding: [0x04,0x00,0xa3,0x0c]
+ twlnli 3, 4
+# CHECK-BE: tw 5, 3, 4 # encoding: [0x7c,0xa3,0x20,0x08]
+# CHECK-LE: tw 5, 3, 4 # encoding: [0x08,0x20,0xa3,0x7c]
+ twlnl 3, 4
+# CHECK-BE: tdi 5, 3, 4 # encoding: [0x08,0xa3,0x00,0x04]
+# CHECK-LE: tdi 5, 3, 4 # encoding: [0x04,0x00,0xa3,0x08]
+ tdlnli 3, 4
+# CHECK-BE: td 5, 3, 4 # encoding: [0x7c,0xa3,0x20,0x88]
+# CHECK-LE: td 5, 3, 4 # encoding: [0x88,0x20,0xa3,0x7c]
+ tdlnl 3, 4
+
+# CHECK-BE: twi 6, 3, 4 # encoding: [0x0c,0xc3,0x00,0x04]
+# CHECK-LE: twi 6, 3, 4 # encoding: [0x04,0x00,0xc3,0x0c]
+ twlngi 3, 4
+# CHECK-BE: tw 6, 3, 4 # encoding: [0x7c,0xc3,0x20,0x08]
+# CHECK-LE: tw 6, 3, 4 # encoding: [0x08,0x20,0xc3,0x7c]
+ twlng 3, 4
+# CHECK-BE: tdi 6, 3, 4 # encoding: [0x08,0xc3,0x00,0x04]
+# CHECK-LE: tdi 6, 3, 4 # encoding: [0x04,0x00,0xc3,0x08]
+ tdlngi 3, 4
+# CHECK-BE: td 6, 3, 4 # encoding: [0x7c,0xc3,0x20,0x88]
+# CHECK-LE: td 6, 3, 4 # encoding: [0x88,0x20,0xc3,0x7c]
+ tdlng 3, 4
+
+# CHECK-BE: twi 31, 3, 4 # encoding: [0x0f,0xe3,0x00,0x04]
+# CHECK-LE: twi 31, 3, 4 # encoding: [0x04,0x00,0xe3,0x0f]
+ twui 3, 4
+# CHECK-BE: tw 31, 3, 4 # encoding: [0x7f,0xe3,0x20,0x08]
+# CHECK-LE: tw 31, 3, 4 # encoding: [0x08,0x20,0xe3,0x7f]
+ twu 3, 4
+# CHECK-BE: tdi 31, 3, 4 # encoding: [0x0b,0xe3,0x00,0x04]
+# CHECK-LE: tdi 31, 3, 4 # encoding: [0x04,0x00,0xe3,0x0b]
+ tdui 3, 4
+# CHECK-BE: td 31, 3, 4 # encoding: [0x7f,0xe3,0x20,0x88]
+# CHECK-LE: td 31, 3, 4 # encoding: [0x88,0x20,0xe3,0x7f]
+ tdu 3, 4
+
+# CHECK-BE: trap # encoding: [0x7f,0xe0,0x00,0x08]
+# CHECK-LE: trap # encoding: [0x08,0x00,0xe0,0x7f]
+ trap
# Rotate and shift mnemonics
-# CHECK: rldicr 2, 3, 5, 3 # encoding: [0x78,0x62,0x28,0xc4]
- extldi 2, 3, 4, 5
-# CHECK: rldicr. 2, 3, 5, 3 # encoding: [0x78,0x62,0x28,0xc5]
- extldi. 2, 3, 4, 5
-# CHECK: rldicl 2, 3, 9, 60 # encoding: [0x78,0x62,0x4f,0x20]
- extrdi 2, 3, 4, 5
-# CHECK: rldicl. 2, 3, 9, 60 # encoding: [0x78,0x62,0x4f,0x21]
- extrdi. 2, 3, 4, 5
-# CHECK: rldimi 2, 3, 55, 5 # encoding: [0x78,0x62,0xb9,0x4e]
- insrdi 2, 3, 4, 5
-# CHECK: rldimi. 2, 3, 55, 5 # encoding: [0x78,0x62,0xb9,0x4f]
- insrdi. 2, 3, 4, 5
-# CHECK: rldicl 2, 3, 4, 0 # encoding: [0x78,0x62,0x20,0x00]
- rotldi 2, 3, 4
-# CHECK: rldicl. 2, 3, 4, 0 # encoding: [0x78,0x62,0x20,0x01]
- rotldi. 2, 3, 4
-# CHECK: rldicl 2, 3, 60, 0 # encoding: [0x78,0x62,0xe0,0x02]
- rotrdi 2, 3, 4
-# CHECK: rldicl. 2, 3, 60, 0 # encoding: [0x78,0x62,0xe0,0x03]
- rotrdi. 2, 3, 4
-# CHECK: rldcl 2, 3, 4, 0 # encoding: [0x78,0x62,0x20,0x10]
- rotld 2, 3, 4
-# CHECK: rldcl. 2, 3, 4, 0 # encoding: [0x78,0x62,0x20,0x11]
- rotld. 2, 3, 4
-# CHECK: sldi 2, 3, 4 # encoding: [0x78,0x62,0x26,0xe4]
- sldi 2, 3, 4
-# CHECK: rldicr. 2, 3, 4, 59 # encoding: [0x78,0x62,0x26,0xe5]
- sldi. 2, 3, 4
-# CHECK: rldicl 2, 3, 60, 4 # encoding: [0x78,0x62,0xe1,0x02]
- srdi 2, 3, 4
-# CHECK: rldicl. 2, 3, 60, 4 # encoding: [0x78,0x62,0xe1,0x03]
- srdi. 2, 3, 4
-# CHECK: rldicl 2, 3, 0, 4 # encoding: [0x78,0x62,0x01,0x00]
- clrldi 2, 3, 4
-# CHECK: rldicl. 2, 3, 0, 4 # encoding: [0x78,0x62,0x01,0x01]
- clrldi. 2, 3, 4
-# CHECK: rldicr 2, 3, 0, 59 # encoding: [0x78,0x62,0x06,0xe4]
- clrrdi 2, 3, 4
-# CHECK: rldicr. 2, 3, 0, 59 # encoding: [0x78,0x62,0x06,0xe5]
- clrrdi. 2, 3, 4
-# CHECK: rldic 2, 3, 4, 1 # encoding: [0x78,0x62,0x20,0x48]
- clrlsldi 2, 3, 5, 4
-# CHECK: rldic. 2, 3, 4, 1 # encoding: [0x78,0x62,0x20,0x49]
- clrlsldi. 2, 3, 5, 4
-
-# CHECK: rlwinm 2, 3, 5, 0, 3 # encoding: [0x54,0x62,0x28,0x06]
- extlwi 2, 3, 4, 5
-# CHECK: rlwinm. 2, 3, 5, 0, 3 # encoding: [0x54,0x62,0x28,0x07]
- extlwi. 2, 3, 4, 5
-# CHECK: rlwinm 2, 3, 9, 28, 31 # encoding: [0x54,0x62,0x4f,0x3e]
- extrwi 2, 3, 4, 5
-# CHECK: rlwinm. 2, 3, 9, 28, 31 # encoding: [0x54,0x62,0x4f,0x3f]
- extrwi. 2, 3, 4, 5
-# CHECK: rlwimi 2, 3, 27, 5, 8 # encoding: [0x50,0x62,0xd9,0x50]
- inslwi 2, 3, 4, 5
-# CHECK: rlwimi. 2, 3, 27, 5, 8 # encoding: [0x50,0x62,0xd9,0x51]
- inslwi. 2, 3, 4, 5
-# CHECK: rlwimi 2, 3, 23, 5, 8 # encoding: [0x50,0x62,0xb9,0x50]
- insrwi 2, 3, 4, 5
-# CHECK: rlwimi. 2, 3, 23, 5, 8 # encoding: [0x50,0x62,0xb9,0x51]
- insrwi. 2, 3, 4, 5
-# CHECK: rlwinm 2, 3, 4, 0, 31 # encoding: [0x54,0x62,0x20,0x3e]
- rotlwi 2, 3, 4
-# CHECK: rlwinm. 2, 3, 4, 0, 31 # encoding: [0x54,0x62,0x20,0x3f]
- rotlwi. 2, 3, 4
-# CHECK: rlwinm 2, 3, 28, 0, 31 # encoding: [0x54,0x62,0xe0,0x3e]
- rotrwi 2, 3, 4
-# CHECK: rlwinm. 2, 3, 28, 0, 31 # encoding: [0x54,0x62,0xe0,0x3f]
- rotrwi. 2, 3, 4
-# CHECK: rlwnm 2, 3, 4, 0, 31 # encoding: [0x5c,0x62,0x20,0x3e]
- rotlw 2, 3, 4
-# CHECK: rlwnm. 2, 3, 4, 0, 31 # encoding: [0x5c,0x62,0x20,0x3f]
- rotlw. 2, 3, 4
-# CHECK: slwi 2, 3, 4 # encoding: [0x54,0x62,0x20,0x36]
- slwi 2, 3, 4
-# CHECK: rlwinm. 2, 3, 4, 0, 27 # encoding: [0x54,0x62,0x20,0x37]
- slwi. 2, 3, 4
-# CHECK: srwi 2, 3, 4 # encoding: [0x54,0x62,0xe1,0x3e]
- srwi 2, 3, 4
-# CHECK: rlwinm. 2, 3, 28, 4, 31 # encoding: [0x54,0x62,0xe1,0x3f]
- srwi. 2, 3, 4
-# CHECK: rlwinm 2, 3, 0, 4, 31 # encoding: [0x54,0x62,0x01,0x3e]
- clrlwi 2, 3, 4
-# CHECK: rlwinm. 2, 3, 0, 4, 31 # encoding: [0x54,0x62,0x01,0x3f]
- clrlwi. 2, 3, 4
-# CHECK: rlwinm 2, 3, 0, 0, 27 # encoding: [0x54,0x62,0x00,0x36]
- clrrwi 2, 3, 4
-# CHECK: rlwinm. 2, 3, 0, 0, 27 # encoding: [0x54,0x62,0x00,0x37]
- clrrwi. 2, 3, 4
-# CHECK: rlwinm 2, 3, 4, 1, 27 # encoding: [0x54,0x62,0x20,0x76]
- clrlslwi 2, 3, 5, 4
-# CHECK: rlwinm. 2, 3, 4, 1, 27 # encoding: [0x54,0x62,0x20,0x77]
- clrlslwi. 2, 3, 5, 4
+# CHECK-BE: rldicr 2, 3, 5, 3 # encoding: [0x78,0x62,0x28,0xc4]
+# CHECK-LE: rldicr 2, 3, 5, 3 # encoding: [0xc4,0x28,0x62,0x78]
+ extldi 2, 3, 4, 5
+# CHECK-BE: rldicr. 2, 3, 5, 3 # encoding: [0x78,0x62,0x28,0xc5]
+# CHECK-LE: rldicr. 2, 3, 5, 3 # encoding: [0xc5,0x28,0x62,0x78]
+ extldi. 2, 3, 4, 5
+# CHECK-BE: rldicl 2, 3, 9, 60 # encoding: [0x78,0x62,0x4f,0x20]
+# CHECK-LE: rldicl 2, 3, 9, 60 # encoding: [0x20,0x4f,0x62,0x78]
+ extrdi 2, 3, 4, 5
+# CHECK-BE: rldicl. 2, 3, 9, 60 # encoding: [0x78,0x62,0x4f,0x21]
+# CHECK-LE: rldicl. 2, 3, 9, 60 # encoding: [0x21,0x4f,0x62,0x78]
+ extrdi. 2, 3, 4, 5
+# CHECK-BE: rldimi 2, 3, 55, 5 # encoding: [0x78,0x62,0xb9,0x4e]
+# CHECK-LE: rldimi 2, 3, 55, 5 # encoding: [0x4e,0xb9,0x62,0x78]
+ insrdi 2, 3, 4, 5
+# CHECK-BE: rldimi. 2, 3, 55, 5 # encoding: [0x78,0x62,0xb9,0x4f]
+# CHECK-LE: rldimi. 2, 3, 55, 5 # encoding: [0x4f,0xb9,0x62,0x78]
+ insrdi. 2, 3, 4, 5
+# CHECK-BE: rldicl 2, 3, 4, 0 # encoding: [0x78,0x62,0x20,0x00]
+# CHECK-LE: rldicl 2, 3, 4, 0 # encoding: [0x00,0x20,0x62,0x78]
+ rotldi 2, 3, 4
+# CHECK-BE: rldicl. 2, 3, 4, 0 # encoding: [0x78,0x62,0x20,0x01]
+# CHECK-LE: rldicl. 2, 3, 4, 0 # encoding: [0x01,0x20,0x62,0x78]
+ rotldi. 2, 3, 4
+# CHECK-BE: rldicl 2, 3, 60, 0 # encoding: [0x78,0x62,0xe0,0x02]
+# CHECK-LE: rldicl 2, 3, 60, 0 # encoding: [0x02,0xe0,0x62,0x78]
+ rotrdi 2, 3, 4
+# CHECK-BE: rldicl. 2, 3, 60, 0 # encoding: [0x78,0x62,0xe0,0x03]
+# CHECK-LE: rldicl. 2, 3, 60, 0 # encoding: [0x03,0xe0,0x62,0x78]
+ rotrdi. 2, 3, 4
+# CHECK-BE: rldcl 2, 3, 4, 0 # encoding: [0x78,0x62,0x20,0x10]
+# CHECK-LE: rldcl 2, 3, 4, 0 # encoding: [0x10,0x20,0x62,0x78]
+ rotld 2, 3, 4
+# CHECK-BE: rldcl. 2, 3, 4, 0 # encoding: [0x78,0x62,0x20,0x11]
+# CHECK-LE: rldcl. 2, 3, 4, 0 # encoding: [0x11,0x20,0x62,0x78]
+ rotld. 2, 3, 4
+# CHECK-BE: sldi 2, 3, 4 # encoding: [0x78,0x62,0x26,0xe4]
+# CHECK-LE: sldi 2, 3, 4 # encoding: [0xe4,0x26,0x62,0x78]
+ sldi 2, 3, 4
+# CHECK-BE: rldicr. 2, 3, 4, 59 # encoding: [0x78,0x62,0x26,0xe5]
+# CHECK-LE: rldicr. 2, 3, 4, 59 # encoding: [0xe5,0x26,0x62,0x78]
+ sldi. 2, 3, 4
+# CHECK-BE: rldicl 2, 3, 60, 4 # encoding: [0x78,0x62,0xe1,0x02]
+# CHECK-LE: rldicl 2, 3, 60, 4 # encoding: [0x02,0xe1,0x62,0x78]
+ srdi 2, 3, 4
+# CHECK-BE: rldicl. 2, 3, 60, 4 # encoding: [0x78,0x62,0xe1,0x03]
+# CHECK-LE: rldicl. 2, 3, 60, 4 # encoding: [0x03,0xe1,0x62,0x78]
+ srdi. 2, 3, 4
+# CHECK-BE: rldicl 2, 3, 0, 4 # encoding: [0x78,0x62,0x01,0x00]
+# CHECK-LE: rldicl 2, 3, 0, 4 # encoding: [0x00,0x01,0x62,0x78]
+ clrldi 2, 3, 4
+# CHECK-BE: rldicl. 2, 3, 0, 4 # encoding: [0x78,0x62,0x01,0x01]
+# CHECK-LE: rldicl. 2, 3, 0, 4 # encoding: [0x01,0x01,0x62,0x78]
+ clrldi. 2, 3, 4
+# CHECK-BE: rldicr 2, 3, 0, 59 # encoding: [0x78,0x62,0x06,0xe4]
+# CHECK-LE: rldicr 2, 3, 0, 59 # encoding: [0xe4,0x06,0x62,0x78]
+ clrrdi 2, 3, 4
+# CHECK-BE: rldicr. 2, 3, 0, 59 # encoding: [0x78,0x62,0x06,0xe5]
+# CHECK-LE: rldicr. 2, 3, 0, 59 # encoding: [0xe5,0x06,0x62,0x78]
+ clrrdi. 2, 3, 4
+# CHECK-BE: rldic 2, 3, 4, 1 # encoding: [0x78,0x62,0x20,0x48]
+# CHECK-LE: rldic 2, 3, 4, 1 # encoding: [0x48,0x20,0x62,0x78]
+ clrlsldi 2, 3, 5, 4
+# CHECK-BE: rldic. 2, 3, 4, 1 # encoding: [0x78,0x62,0x20,0x49]
+# CHECK-LE: rldic. 2, 3, 4, 1 # encoding: [0x49,0x20,0x62,0x78]
+ clrlsldi. 2, 3, 5, 4
+
+# CHECK-BE: rlwinm 2, 3, 5, 0, 3 # encoding: [0x54,0x62,0x28,0x06]
+# CHECK-LE: rlwinm 2, 3, 5, 0, 3 # encoding: [0x06,0x28,0x62,0x54]
+ extlwi 2, 3, 4, 5
+# CHECK-BE: rlwinm. 2, 3, 5, 0, 3 # encoding: [0x54,0x62,0x28,0x07]
+# CHECK-LE: rlwinm. 2, 3, 5, 0, 3 # encoding: [0x07,0x28,0x62,0x54]
+ extlwi. 2, 3, 4, 5
+# CHECK-BE: rlwinm 2, 3, 9, 28, 31 # encoding: [0x54,0x62,0x4f,0x3e]
+# CHECK-LE: rlwinm 2, 3, 9, 28, 31 # encoding: [0x3e,0x4f,0x62,0x54]
+ extrwi 2, 3, 4, 5
+# CHECK-BE: rlwinm. 2, 3, 9, 28, 31 # encoding: [0x54,0x62,0x4f,0x3f]
+# CHECK-LE: rlwinm. 2, 3, 9, 28, 31 # encoding: [0x3f,0x4f,0x62,0x54]
+ extrwi. 2, 3, 4, 5
+# CHECK-BE: rlwimi 2, 3, 27, 5, 8 # encoding: [0x50,0x62,0xd9,0x50]
+# CHECK-LE: rlwimi 2, 3, 27, 5, 8 # encoding: [0x50,0xd9,0x62,0x50]
+ inslwi 2, 3, 4, 5
+# CHECK-BE: rlwimi. 2, 3, 27, 5, 8 # encoding: [0x50,0x62,0xd9,0x51]
+# CHECK-LE: rlwimi. 2, 3, 27, 5, 8 # encoding: [0x51,0xd9,0x62,0x50]
+ inslwi. 2, 3, 4, 5
+# CHECK-BE: rlwimi 2, 3, 23, 5, 8 # encoding: [0x50,0x62,0xb9,0x50]
+# CHECK-LE: rlwimi 2, 3, 23, 5, 8 # encoding: [0x50,0xb9,0x62,0x50]
+ insrwi 2, 3, 4, 5
+# CHECK-BE: rlwimi. 2, 3, 23, 5, 8 # encoding: [0x50,0x62,0xb9,0x51]
+# CHECK-LE: rlwimi. 2, 3, 23, 5, 8 # encoding: [0x51,0xb9,0x62,0x50]
+ insrwi. 2, 3, 4, 5
+# CHECK-BE: rlwinm 2, 3, 4, 0, 31 # encoding: [0x54,0x62,0x20,0x3e]
+# CHECK-LE: rlwinm 2, 3, 4, 0, 31 # encoding: [0x3e,0x20,0x62,0x54]
+ rotlwi 2, 3, 4
+# CHECK-BE: rlwinm. 2, 3, 4, 0, 31 # encoding: [0x54,0x62,0x20,0x3f]
+# CHECK-LE: rlwinm. 2, 3, 4, 0, 31 # encoding: [0x3f,0x20,0x62,0x54]
+ rotlwi. 2, 3, 4
+# CHECK-BE: rlwinm 2, 3, 28, 0, 31 # encoding: [0x54,0x62,0xe0,0x3e]
+# CHECK-LE: rlwinm 2, 3, 28, 0, 31 # encoding: [0x3e,0xe0,0x62,0x54]
+ rotrwi 2, 3, 4
+# CHECK-BE: rlwinm. 2, 3, 28, 0, 31 # encoding: [0x54,0x62,0xe0,0x3f]
+# CHECK-LE: rlwinm. 2, 3, 28, 0, 31 # encoding: [0x3f,0xe0,0x62,0x54]
+ rotrwi. 2, 3, 4
+# CHECK-BE: rlwnm 2, 3, 4, 0, 31 # encoding: [0x5c,0x62,0x20,0x3e]
+# CHECK-LE: rlwnm 2, 3, 4, 0, 31 # encoding: [0x3e,0x20,0x62,0x5c]
+ rotlw 2, 3, 4
+# CHECK-BE: rlwnm. 2, 3, 4, 0, 31 # encoding: [0x5c,0x62,0x20,0x3f]
+# CHECK-LE: rlwnm. 2, 3, 4, 0, 31 # encoding: [0x3f,0x20,0x62,0x5c]
+ rotlw. 2, 3, 4
+# CHECK-BE: slwi 2, 3, 4 # encoding: [0x54,0x62,0x20,0x36]
+# CHECK-LE: slwi 2, 3, 4 # encoding: [0x36,0x20,0x62,0x54]
+ slwi 2, 3, 4
+# CHECK-BE: rlwinm. 2, 3, 4, 0, 27 # encoding: [0x54,0x62,0x20,0x37]
+# CHECK-LE: rlwinm. 2, 3, 4, 0, 27 # encoding: [0x37,0x20,0x62,0x54]
+ slwi. 2, 3, 4
+# CHECK-BE: srwi 2, 3, 4 # encoding: [0x54,0x62,0xe1,0x3e]
+# CHECK-LE: srwi 2, 3, 4 # encoding: [0x3e,0xe1,0x62,0x54]
+ srwi 2, 3, 4
+# CHECK-BE: rlwinm. 2, 3, 28, 4, 31 # encoding: [0x54,0x62,0xe1,0x3f]
+# CHECK-LE: rlwinm. 2, 3, 28, 4, 31 # encoding: [0x3f,0xe1,0x62,0x54]
+ srwi. 2, 3, 4
+# CHECK-BE: rlwinm 2, 3, 0, 4, 31 # encoding: [0x54,0x62,0x01,0x3e]
+# CHECK-LE: rlwinm 2, 3, 0, 4, 31 # encoding: [0x3e,0x01,0x62,0x54]
+ clrlwi 2, 3, 4
+# CHECK-BE: rlwinm. 2, 3, 0, 4, 31 # encoding: [0x54,0x62,0x01,0x3f]
+# CHECK-LE: rlwinm. 2, 3, 0, 4, 31 # encoding: [0x3f,0x01,0x62,0x54]
+ clrlwi. 2, 3, 4
+# CHECK-BE: rlwinm 2, 3, 0, 0, 27 # encoding: [0x54,0x62,0x00,0x36]
+# CHECK-LE: rlwinm 2, 3, 0, 0, 27 # encoding: [0x36,0x00,0x62,0x54]
+ clrrwi 2, 3, 4
+# CHECK-BE: rlwinm. 2, 3, 0, 0, 27 # encoding: [0x54,0x62,0x00,0x37]
+# CHECK-LE: rlwinm. 2, 3, 0, 0, 27 # encoding: [0x37,0x00,0x62,0x54]
+ clrrwi. 2, 3, 4
+# CHECK-BE: rlwinm 2, 3, 4, 1, 27 # encoding: [0x54,0x62,0x20,0x76]
+# CHECK-LE: rlwinm 2, 3, 4, 1, 27 # encoding: [0x76,0x20,0x62,0x54]
+ clrlslwi 2, 3, 5, 4
+# CHECK-BE: rlwinm. 2, 3, 4, 1, 27 # encoding: [0x54,0x62,0x20,0x77]
+# CHECK-LE: rlwinm. 2, 3, 4, 1, 27 # encoding: [0x77,0x20,0x62,0x54]
+ clrlslwi. 2, 3, 5, 4
# Move to/from special purpose register mnemonics
-# CHECK: mtspr 1, 2 # encoding: [0x7c,0x41,0x03,0xa6]
- mtxer 2
-# CHECK: mfspr 2, 1 # encoding: [0x7c,0x41,0x02,0xa6]
- mfxer 2
-# CHECK: mtlr 2 # encoding: [0x7c,0x48,0x03,0xa6]
- mtlr 2
-# CHECK: mflr 2 # encoding: [0x7c,0x48,0x02,0xa6]
- mflr 2
-# CHECK: mtctr 2 # encoding: [0x7c,0x49,0x03,0xa6]
- mtctr 2
-# CHECK: mfctr 2 # encoding: [0x7c,0x49,0x02,0xa6]
- mfctr 2
+# CHECK-BE: mtspr 1, 2 # encoding: [0x7c,0x41,0x03,0xa6]
+# CHECK-LE: mtspr 1, 2 # encoding: [0xa6,0x03,0x41,0x7c]
+ mtxer 2
+# CHECK-BE: mfspr 2, 1 # encoding: [0x7c,0x41,0x02,0xa6]
+# CHECK-LE: mfspr 2, 1 # encoding: [0xa6,0x02,0x41,0x7c]
+ mfxer 2
+# CHECK-BE: mtlr 2 # encoding: [0x7c,0x48,0x03,0xa6]
+# CHECK-LE: mtlr 2 # encoding: [0xa6,0x03,0x48,0x7c]
+ mtlr 2
+# CHECK-BE: mflr 2 # encoding: [0x7c,0x48,0x02,0xa6]
+# CHECK-LE: mflr 2 # encoding: [0xa6,0x02,0x48,0x7c]
+ mflr 2
+# CHECK-BE: mtctr 2 # encoding: [0x7c,0x49,0x03,0xa6]
+# CHECK-LE: mtctr 2 # encoding: [0xa6,0x03,0x49,0x7c]
+ mtctr 2
+# CHECK-BE: mfctr 2 # encoding: [0x7c,0x49,0x02,0xa6]
+# CHECK-LE: mfctr 2 # encoding: [0xa6,0x02,0x49,0x7c]
+ mfctr 2
# Miscellaneous mnemonics
-# CHECK: nop # encoding: [0x60,0x00,0x00,0x00]
- nop
-# CHECK: xori 0, 0, 0 # encoding: [0x68,0x00,0x00,0x00]
- xnop
-# CHECK: li 2, 128 # encoding: [0x38,0x40,0x00,0x80]
- li 2, 128
-# CHECK: lis 2, 128 # encoding: [0x3c,0x40,0x00,0x80]
- lis 2, 128
-# CHECK: la 2, 128(4)
- la 2, 128(4)
-# CHECK: mr 2, 3 # encoding: [0x7c,0x62,0x1b,0x78]
- mr 2, 3
-# CHECK: or. 2, 3, 3 # encoding: [0x7c,0x62,0x1b,0x79]
- mr. 2, 3
-# CHECK: nor 2, 3, 3 # encoding: [0x7c,0x62,0x18,0xf8]
- not 2, 3
-# CHECK: nor. 2, 3, 3 # encoding: [0x7c,0x62,0x18,0xf9]
- not. 2, 3
-# CHECK: mtcrf 255, 2 # encoding: [0x7c,0x4f,0xf1,0x20]
- mtcr 2
+# CHECK-BE: nop # encoding: [0x60,0x00,0x00,0x00]
+# CHECK-LE: nop # encoding: [0x00,0x00,0x00,0x60]
+ nop
+# CHECK-BE: xori 0, 0, 0 # encoding: [0x68,0x00,0x00,0x00]
+# CHECK-LE: xori 0, 0, 0 # encoding: [0x00,0x00,0x00,0x68]
+ xnop
+# CHECK-BE: li 2, 128 # encoding: [0x38,0x40,0x00,0x80]
+# CHECK-LE: li 2, 128 # encoding: [0x80,0x00,0x40,0x38]
+ li 2, 128
+# CHECK-BE: lis 2, 128 # encoding: [0x3c,0x40,0x00,0x80]
+# CHECK-LE: lis 2, 128 # encoding: [0x80,0x00,0x40,0x3c]
+ lis 2, 128
+# CHECK-BE: la 2, 128(4)
+# CHECK-LE: la 2, 128(4)
+ la 2, 128(4)
+# CHECK-BE: mr 2, 3 # encoding: [0x7c,0x62,0x1b,0x78]
+# CHECK-LE: mr 2, 3 # encoding: [0x78,0x1b,0x62,0x7c]
+ mr 2, 3
+# CHECK-BE: or. 2, 3, 3 # encoding: [0x7c,0x62,0x1b,0x79]
+# CHECK-LE: or. 2, 3, 3 # encoding: [0x79,0x1b,0x62,0x7c]
+ mr. 2, 3
+# CHECK-BE: nor 2, 3, 3 # encoding: [0x7c,0x62,0x18,0xf8]
+# CHECK-LE: nor 2, 3, 3 # encoding: [0xf8,0x18,0x62,0x7c]
+ not 2, 3
+# CHECK-BE: nor. 2, 3, 3 # encoding: [0x7c,0x62,0x18,0xf9]
+# CHECK-LE: nor. 2, 3, 3 # encoding: [0xf9,0x18,0x62,0x7c]
+ not. 2, 3
+# CHECK-BE: mtcrf 255, 2 # encoding: [0x7c,0x4f,0xf1,0x20]
+# CHECK-LE: mtcrf 255, 2 # encoding: [0x20,0xf1,0x4f,0x7c]
+ mtcr 2
diff --git a/test/MC/PowerPC/ppc64-encoding-fp.s b/test/MC/PowerPC/ppc64-encoding-fp.s
index f9bdee14e157..c19f9b327642 100644
--- a/test/MC/PowerPC/ppc64-encoding-fp.s
+++ b/test/MC/PowerPC/ppc64-encoding-fp.s
@@ -1,269 +1,379 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
# Floating-point facility
# Floating-point load instructions
-# CHECK: lfs 2, 128(4) # encoding: [0xc0,0x44,0x00,0x80]
- lfs 2, 128(4)
-# CHECK: lfsx 2, 3, 4 # encoding: [0x7c,0x43,0x24,0x2e]
- lfsx 2, 3, 4
-# CHECK: lfsu 2, 128(4) # encoding: [0xc4,0x44,0x00,0x80]
- lfsu 2, 128(4)
-# CHECK: lfsux 2, 3, 4 # encoding: [0x7c,0x43,0x24,0x6e]
- lfsux 2, 3, 4
-# CHECK: lfd 2, 128(4) # encoding: [0xc8,0x44,0x00,0x80]
- lfd 2, 128(4)
-# CHECK: lfdx 2, 3, 4 # encoding: [0x7c,0x43,0x24,0xae]
- lfdx 2, 3, 4
-# CHECK: lfdu 2, 128(4) # encoding: [0xcc,0x44,0x00,0x80]
- lfdu 2, 128(4)
-# CHECK: lfdux 2, 3, 4 # encoding: [0x7c,0x43,0x24,0xee]
- lfdux 2, 3, 4
-# CHECK: lfiwax 2, 3, 4 # encoding: [0x7c,0x43,0x26,0xae]
- lfiwax 2, 3, 4
-# CHECK: lfiwzx 2, 3, 4 # encoding: [0x7c,0x43,0x26,0xee]
- lfiwzx 2, 3, 4
+# CHECK-BE: lfs 2, 128(4) # encoding: [0xc0,0x44,0x00,0x80]
+# CHECK-LE: lfs 2, 128(4) # encoding: [0x80,0x00,0x44,0xc0]
+ lfs 2, 128(4)
+# CHECK-BE: lfsx 2, 3, 4 # encoding: [0x7c,0x43,0x24,0x2e]
+# CHECK-LE: lfsx 2, 3, 4 # encoding: [0x2e,0x24,0x43,0x7c]
+ lfsx 2, 3, 4
+# CHECK-BE: lfsu 2, 128(4) # encoding: [0xc4,0x44,0x00,0x80]
+# CHECK-LE: lfsu 2, 128(4) # encoding: [0x80,0x00,0x44,0xc4]
+ lfsu 2, 128(4)
+# CHECK-BE: lfsux 2, 3, 4 # encoding: [0x7c,0x43,0x24,0x6e]
+# CHECK-LE: lfsux 2, 3, 4 # encoding: [0x6e,0x24,0x43,0x7c]
+ lfsux 2, 3, 4
+# CHECK-BE: lfd 2, 128(4) # encoding: [0xc8,0x44,0x00,0x80]
+# CHECK-LE: lfd 2, 128(4) # encoding: [0x80,0x00,0x44,0xc8]
+ lfd 2, 128(4)
+# CHECK-BE: lfdx 2, 3, 4 # encoding: [0x7c,0x43,0x24,0xae]
+# CHECK-LE: lfdx 2, 3, 4 # encoding: [0xae,0x24,0x43,0x7c]
+ lfdx 2, 3, 4
+# CHECK-BE: lfdu 2, 128(4) # encoding: [0xcc,0x44,0x00,0x80]
+# CHECK-LE: lfdu 2, 128(4) # encoding: [0x80,0x00,0x44,0xcc]
+ lfdu 2, 128(4)
+# CHECK-BE: lfdux 2, 3, 4 # encoding: [0x7c,0x43,0x24,0xee]
+# CHECK-LE: lfdux 2, 3, 4 # encoding: [0xee,0x24,0x43,0x7c]
+ lfdux 2, 3, 4
+# CHECK-BE: lfiwax 2, 3, 4 # encoding: [0x7c,0x43,0x26,0xae]
+# CHECK-LE: lfiwax 2, 3, 4 # encoding: [0xae,0x26,0x43,0x7c]
+ lfiwax 2, 3, 4
+# CHECK-BE: lfiwzx 2, 3, 4 # encoding: [0x7c,0x43,0x26,0xee]
+# CHECK-LE: lfiwzx 2, 3, 4 # encoding: [0xee,0x26,0x43,0x7c]
+ lfiwzx 2, 3, 4
# Floating-point store instructions
-# CHECK: stfs 2, 128(4) # encoding: [0xd0,0x44,0x00,0x80]
- stfs 2, 128(4)
-# CHECK: stfsx 2, 3, 4 # encoding: [0x7c,0x43,0x25,0x2e]
- stfsx 2, 3, 4
-# CHECK: stfsu 2, 128(4) # encoding: [0xd4,0x44,0x00,0x80]
- stfsu 2, 128(4)
-# CHECK: stfsux 2, 3, 4 # encoding: [0x7c,0x43,0x25,0x6e]
- stfsux 2, 3, 4
-# CHECK: stfd 2, 128(4) # encoding: [0xd8,0x44,0x00,0x80]
- stfd 2, 128(4)
-# CHECK: stfdx 2, 3, 4 # encoding: [0x7c,0x43,0x25,0xae]
- stfdx 2, 3, 4
-# CHECK: stfdu 2, 128(4) # encoding: [0xdc,0x44,0x00,0x80]
- stfdu 2, 128(4)
-# CHECK: stfdux 2, 3, 4 # encoding: [0x7c,0x43,0x25,0xee]
- stfdux 2, 3, 4
-# CHECK: stfiwx 2, 3, 4 # encoding: [0x7c,0x43,0x27,0xae]
- stfiwx 2, 3, 4
+# CHECK-BE: stfs 2, 128(4) # encoding: [0xd0,0x44,0x00,0x80]
+# CHECK-LE: stfs 2, 128(4) # encoding: [0x80,0x00,0x44,0xd0]
+ stfs 2, 128(4)
+# CHECK-BE: stfsx 2, 3, 4 # encoding: [0x7c,0x43,0x25,0x2e]
+# CHECK-LE: stfsx 2, 3, 4 # encoding: [0x2e,0x25,0x43,0x7c]
+ stfsx 2, 3, 4
+# CHECK-BE: stfsu 2, 128(4) # encoding: [0xd4,0x44,0x00,0x80]
+# CHECK-LE: stfsu 2, 128(4) # encoding: [0x80,0x00,0x44,0xd4]
+ stfsu 2, 128(4)
+# CHECK-BE: stfsux 2, 3, 4 # encoding: [0x7c,0x43,0x25,0x6e]
+# CHECK-LE: stfsux 2, 3, 4 # encoding: [0x6e,0x25,0x43,0x7c]
+ stfsux 2, 3, 4
+# CHECK-BE: stfd 2, 128(4) # encoding: [0xd8,0x44,0x00,0x80]
+# CHECK-LE: stfd 2, 128(4) # encoding: [0x80,0x00,0x44,0xd8]
+ stfd 2, 128(4)
+# CHECK-BE: stfdx 2, 3, 4 # encoding: [0x7c,0x43,0x25,0xae]
+# CHECK-LE: stfdx 2, 3, 4 # encoding: [0xae,0x25,0x43,0x7c]
+ stfdx 2, 3, 4
+# CHECK-BE: stfdu 2, 128(4) # encoding: [0xdc,0x44,0x00,0x80]
+# CHECK-LE: stfdu 2, 128(4) # encoding: [0x80,0x00,0x44,0xdc]
+ stfdu 2, 128(4)
+# CHECK-BE: stfdux 2, 3, 4 # encoding: [0x7c,0x43,0x25,0xee]
+# CHECK-LE: stfdux 2, 3, 4 # encoding: [0xee,0x25,0x43,0x7c]
+ stfdux 2, 3, 4
+# CHECK-BE: stfiwx 2, 3, 4 # encoding: [0x7c,0x43,0x27,0xae]
+# CHECK-LE: stfiwx 2, 3, 4 # encoding: [0xae,0x27,0x43,0x7c]
+ stfiwx 2, 3, 4
# Floating-point move instructions
-# CHECK: fmr 2, 3 # encoding: [0xfc,0x40,0x18,0x90]
- fmr 2, 3
-# CHECK: fmr. 2, 3 # encoding: [0xfc,0x40,0x18,0x91]
- fmr. 2, 3
-# CHECK: fneg 2, 3 # encoding: [0xfc,0x40,0x18,0x50]
- fneg 2, 3
-# CHECK: fneg. 2, 3 # encoding: [0xfc,0x40,0x18,0x51]
- fneg. 2, 3
-# CHECK: fabs 2, 3 # encoding: [0xfc,0x40,0x1a,0x10]
- fabs 2, 3
-# CHECK: fabs. 2, 3 # encoding: [0xfc,0x40,0x1a,0x11]
- fabs. 2, 3
-# CHECK: fnabs 2, 3 # encoding: [0xfc,0x40,0x19,0x10]
- fnabs 2, 3
-# CHECK: fnabs. 2, 3 # encoding: [0xfc,0x40,0x19,0x11]
- fnabs. 2, 3
-# CHECK: fcpsgn 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x10]
- fcpsgn 2, 3, 4
-# CHECK: fcpsgn. 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x11]
- fcpsgn. 2, 3, 4
+# CHECK-BE: fmr 2, 3 # encoding: [0xfc,0x40,0x18,0x90]
+# CHECK-LE: fmr 2, 3 # encoding: [0x90,0x18,0x40,0xfc]
+ fmr 2, 3
+# CHECK-BE: fmr. 2, 3 # encoding: [0xfc,0x40,0x18,0x91]
+# CHECK-LE: fmr. 2, 3 # encoding: [0x91,0x18,0x40,0xfc]
+ fmr. 2, 3
+# CHECK-BE: fneg 2, 3 # encoding: [0xfc,0x40,0x18,0x50]
+# CHECK-LE: fneg 2, 3 # encoding: [0x50,0x18,0x40,0xfc]
+ fneg 2, 3
+# CHECK-BE: fneg. 2, 3 # encoding: [0xfc,0x40,0x18,0x51]
+# CHECK-LE: fneg. 2, 3 # encoding: [0x51,0x18,0x40,0xfc]
+ fneg. 2, 3
+# CHECK-BE: fabs 2, 3 # encoding: [0xfc,0x40,0x1a,0x10]
+# CHECK-LE: fabs 2, 3 # encoding: [0x10,0x1a,0x40,0xfc]
+ fabs 2, 3
+# CHECK-BE: fabs. 2, 3 # encoding: [0xfc,0x40,0x1a,0x11]
+# CHECK-LE: fabs. 2, 3 # encoding: [0x11,0x1a,0x40,0xfc]
+ fabs. 2, 3
+# CHECK-BE: fnabs 2, 3 # encoding: [0xfc,0x40,0x19,0x10]
+# CHECK-LE: fnabs 2, 3 # encoding: [0x10,0x19,0x40,0xfc]
+ fnabs 2, 3
+# CHECK-BE: fnabs. 2, 3 # encoding: [0xfc,0x40,0x19,0x11]
+# CHECK-LE: fnabs. 2, 3 # encoding: [0x11,0x19,0x40,0xfc]
+ fnabs. 2, 3
+# CHECK-BE: fcpsgn 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x10]
+# CHECK-LE: fcpsgn 2, 3, 4 # encoding: [0x10,0x20,0x43,0xfc]
+ fcpsgn 2, 3, 4
+# CHECK-BE: fcpsgn. 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x11]
+# CHECK-LE: fcpsgn. 2, 3, 4 # encoding: [0x11,0x20,0x43,0xfc]
+ fcpsgn. 2, 3, 4
# Floating-point arithmetic instructions
-# CHECK: fadd 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x2a]
- fadd 2, 3, 4
-# CHECK: fadd. 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x2b]
- fadd. 2, 3, 4
-# CHECK: fadds 2, 3, 4 # encoding: [0xec,0x43,0x20,0x2a]
- fadds 2, 3, 4
-# CHECK: fadds. 2, 3, 4 # encoding: [0xec,0x43,0x20,0x2b]
- fadds. 2, 3, 4
-# CHECK: fsub 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x28]
- fsub 2, 3, 4
-# CHECK: fsub. 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x29]
- fsub. 2, 3, 4
-# CHECK: fsubs 2, 3, 4 # encoding: [0xec,0x43,0x20,0x28]
- fsubs 2, 3, 4
-# CHECK: fsubs. 2, 3, 4 # encoding: [0xec,0x43,0x20,0x29]
- fsubs. 2, 3, 4
+# CHECK-BE: fadd 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x2a]
+# CHECK-LE: fadd 2, 3, 4 # encoding: [0x2a,0x20,0x43,0xfc]
+ fadd 2, 3, 4
+# CHECK-BE: fadd. 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x2b]
+# CHECK-LE: fadd. 2, 3, 4 # encoding: [0x2b,0x20,0x43,0xfc]
+ fadd. 2, 3, 4
+# CHECK-BE: fadds 2, 3, 4 # encoding: [0xec,0x43,0x20,0x2a]
+# CHECK-LE: fadds 2, 3, 4 # encoding: [0x2a,0x20,0x43,0xec]
+ fadds 2, 3, 4
+# CHECK-BE: fadds. 2, 3, 4 # encoding: [0xec,0x43,0x20,0x2b]
+# CHECK-LE: fadds. 2, 3, 4 # encoding: [0x2b,0x20,0x43,0xec]
+ fadds. 2, 3, 4
+# CHECK-BE: fsub 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x28]
+# CHECK-LE: fsub 2, 3, 4 # encoding: [0x28,0x20,0x43,0xfc]
+ fsub 2, 3, 4
+# CHECK-BE: fsub. 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x29]
+# CHECK-LE: fsub. 2, 3, 4 # encoding: [0x29,0x20,0x43,0xfc]
+ fsub. 2, 3, 4
+# CHECK-BE: fsubs 2, 3, 4 # encoding: [0xec,0x43,0x20,0x28]
+# CHECK-LE: fsubs 2, 3, 4 # encoding: [0x28,0x20,0x43,0xec]
+ fsubs 2, 3, 4
+# CHECK-BE: fsubs. 2, 3, 4 # encoding: [0xec,0x43,0x20,0x29]
+# CHECK-LE: fsubs. 2, 3, 4 # encoding: [0x29,0x20,0x43,0xec]
+ fsubs. 2, 3, 4
-# CHECK: fmul 2, 3, 4 # encoding: [0xfc,0x43,0x01,0x32]
- fmul 2, 3, 4
-# CHECK: fmul. 2, 3, 4 # encoding: [0xfc,0x43,0x01,0x33]
- fmul. 2, 3, 4
-# CHECK: fmuls 2, 3, 4 # encoding: [0xec,0x43,0x01,0x32]
- fmuls 2, 3, 4
-# CHECK: fmuls. 2, 3, 4 # encoding: [0xec,0x43,0x01,0x33]
- fmuls. 2, 3, 4
-# CHECK: fdiv 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x24]
- fdiv 2, 3, 4
-# CHECK: fdiv. 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x25]
- fdiv. 2, 3, 4
-# CHECK: fdivs 2, 3, 4 # encoding: [0xec,0x43,0x20,0x24]
- fdivs 2, 3, 4
-# CHECK: fdivs. 2, 3, 4 # encoding: [0xec,0x43,0x20,0x25]
- fdivs. 2, 3, 4
-# CHECK: fsqrt 2, 3 # encoding: [0xfc,0x40,0x18,0x2c]
- fsqrt 2, 3
-# CHECK: fsqrt. 2, 3 # encoding: [0xfc,0x40,0x18,0x2d]
- fsqrt. 2, 3
-# CHECK: fsqrts 2, 3 # encoding: [0xec,0x40,0x18,0x2c]
- fsqrts 2, 3
-# CHECK: fsqrts. 2, 3 # encoding: [0xec,0x40,0x18,0x2d]
- fsqrts. 2, 3
+# CHECK-BE: fmul 2, 3, 4 # encoding: [0xfc,0x43,0x01,0x32]
+# CHECK-LE: fmul 2, 3, 4 # encoding: [0x32,0x01,0x43,0xfc]
+ fmul 2, 3, 4
+# CHECK-BE: fmul. 2, 3, 4 # encoding: [0xfc,0x43,0x01,0x33]
+# CHECK-LE: fmul. 2, 3, 4 # encoding: [0x33,0x01,0x43,0xfc]
+ fmul. 2, 3, 4
+# CHECK-BE: fmuls 2, 3, 4 # encoding: [0xec,0x43,0x01,0x32]
+# CHECK-LE: fmuls 2, 3, 4 # encoding: [0x32,0x01,0x43,0xec]
+ fmuls 2, 3, 4
+# CHECK-BE: fmuls. 2, 3, 4 # encoding: [0xec,0x43,0x01,0x33]
+# CHECK-LE: fmuls. 2, 3, 4 # encoding: [0x33,0x01,0x43,0xec]
+ fmuls. 2, 3, 4
+# CHECK-BE: fdiv 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x24]
+# CHECK-LE: fdiv 2, 3, 4 # encoding: [0x24,0x20,0x43,0xfc]
+ fdiv 2, 3, 4
+# CHECK-BE: fdiv. 2, 3, 4 # encoding: [0xfc,0x43,0x20,0x25]
+# CHECK-LE: fdiv. 2, 3, 4 # encoding: [0x25,0x20,0x43,0xfc]
+ fdiv. 2, 3, 4
+# CHECK-BE: fdivs 2, 3, 4 # encoding: [0xec,0x43,0x20,0x24]
+# CHECK-LE: fdivs 2, 3, 4 # encoding: [0x24,0x20,0x43,0xec]
+ fdivs 2, 3, 4
+# CHECK-BE: fdivs. 2, 3, 4 # encoding: [0xec,0x43,0x20,0x25]
+# CHECK-LE: fdivs. 2, 3, 4 # encoding: [0x25,0x20,0x43,0xec]
+ fdivs. 2, 3, 4
+# CHECK-BE: fsqrt 2, 3 # encoding: [0xfc,0x40,0x18,0x2c]
+# CHECK-LE: fsqrt 2, 3 # encoding: [0x2c,0x18,0x40,0xfc]
+ fsqrt 2, 3
+# CHECK-BE: fsqrt. 2, 3 # encoding: [0xfc,0x40,0x18,0x2d]
+# CHECK-LE: fsqrt. 2, 3 # encoding: [0x2d,0x18,0x40,0xfc]
+ fsqrt. 2, 3
+# CHECK-BE: fsqrts 2, 3 # encoding: [0xec,0x40,0x18,0x2c]
+# CHECK-LE: fsqrts 2, 3 # encoding: [0x2c,0x18,0x40,0xec]
+ fsqrts 2, 3
+# CHECK-BE: fsqrts. 2, 3 # encoding: [0xec,0x40,0x18,0x2d]
+# CHECK-LE: fsqrts. 2, 3 # encoding: [0x2d,0x18,0x40,0xec]
+ fsqrts. 2, 3
-# CHECK: fre 2, 3 # encoding: [0xfc,0x40,0x18,0x30]
- fre 2, 3
-# CHECK: fre. 2, 3 # encoding: [0xfc,0x40,0x18,0x31]
- fre. 2, 3
-# CHECK: fres 2, 3 # encoding: [0xec,0x40,0x18,0x30]
- fres 2, 3
-# CHECK: fres. 2, 3 # encoding: [0xec,0x40,0x18,0x31]
- fres. 2, 3
-# CHECK: frsqrte 2, 3 # encoding: [0xfc,0x40,0x18,0x34]
- frsqrte 2, 3
-# CHECK: frsqrte. 2, 3 # encoding: [0xfc,0x40,0x18,0x35]
- frsqrte. 2, 3
-# CHECK: frsqrtes 2, 3 # encoding: [0xec,0x40,0x18,0x34]
- frsqrtes 2, 3
-# CHECK: frsqrtes. 2, 3 # encoding: [0xec,0x40,0x18,0x35]
- frsqrtes. 2, 3
-# FIXME: ftdiv 2, 3, 4
-# FIXME: ftsqrt 2, 3, 4
+# CHECK-BE: fre 2, 3 # encoding: [0xfc,0x40,0x18,0x30]
+# CHECK-LE: fre 2, 3 # encoding: [0x30,0x18,0x40,0xfc]
+ fre 2, 3
+# CHECK-BE: fre. 2, 3 # encoding: [0xfc,0x40,0x18,0x31]
+# CHECK-LE: fre. 2, 3 # encoding: [0x31,0x18,0x40,0xfc]
+ fre. 2, 3
+# CHECK-BE: fres 2, 3 # encoding: [0xec,0x40,0x18,0x30]
+# CHECK-LE: fres 2, 3 # encoding: [0x30,0x18,0x40,0xec]
+ fres 2, 3
+# CHECK-BE: fres. 2, 3 # encoding: [0xec,0x40,0x18,0x31]
+# CHECK-LE: fres. 2, 3 # encoding: [0x31,0x18,0x40,0xec]
+ fres. 2, 3
+# CHECK-BE: frsqrte 2, 3 # encoding: [0xfc,0x40,0x18,0x34]
+# CHECK-LE: frsqrte 2, 3 # encoding: [0x34,0x18,0x40,0xfc]
+ frsqrte 2, 3
+# CHECK-BE: frsqrte. 2, 3 # encoding: [0xfc,0x40,0x18,0x35]
+# CHECK-LE: frsqrte. 2, 3 # encoding: [0x35,0x18,0x40,0xfc]
+ frsqrte. 2, 3
+# CHECK-BE: frsqrtes 2, 3 # encoding: [0xec,0x40,0x18,0x34]
+# CHECK-LE: frsqrtes 2, 3 # encoding: [0x34,0x18,0x40,0xec]
+ frsqrtes 2, 3
+# CHECK-BE: frsqrtes. 2, 3 # encoding: [0xec,0x40,0x18,0x35]
+# CHECK-LE: frsqrtes. 2, 3 # encoding: [0x35,0x18,0x40,0xec]
+ frsqrtes. 2, 3
+# FIXME: ftdiv 2, 3, 4
+# FIXME: ftsqrt 2, 3, 4
-# CHECK: fmadd 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3a]
- fmadd 2, 3, 4, 5
-# CHECK: fmadd. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3b]
- fmadd. 2, 3, 4, 5
-# CHECK: fmadds 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3a]
- fmadds 2, 3, 4, 5
-# CHECK: fmadds. 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3b]
- fmadds. 2, 3, 4, 5
-# CHECK: fmsub 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x38]
- fmsub 2, 3, 4, 5
-# CHECK: fmsub. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x39]
- fmsub. 2, 3, 4, 5
-# CHECK: fmsubs 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x38]
- fmsubs 2, 3, 4, 5
-# CHECK: fmsubs. 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x39]
- fmsubs. 2, 3, 4, 5
-# CHECK: fnmadd 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3e]
- fnmadd 2, 3, 4, 5
-# CHECK: fnmadd. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3f]
- fnmadd. 2, 3, 4, 5
-# CHECK: fnmadds 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3e]
- fnmadds 2, 3, 4, 5
-# CHECK: fnmadds. 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3f]
- fnmadds. 2, 3, 4, 5
-# CHECK: fnmsub 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3c]
- fnmsub 2, 3, 4, 5
-# CHECK: fnmsub. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3d]
- fnmsub. 2, 3, 4, 5
-# CHECK: fnmsubs 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3c]
- fnmsubs 2, 3, 4, 5
-# CHECK: fnmsubs. 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3d]
- fnmsubs. 2, 3, 4, 5
+# CHECK-BE: fmadd 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3a]
+# CHECK-LE: fmadd 2, 3, 4, 5 # encoding: [0x3a,0x29,0x43,0xfc]
+ fmadd 2, 3, 4, 5
+# CHECK-BE: fmadd. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3b]
+# CHECK-LE: fmadd. 2, 3, 4, 5 # encoding: [0x3b,0x29,0x43,0xfc]
+ fmadd. 2, 3, 4, 5
+# CHECK-BE: fmadds 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3a]
+# CHECK-LE: fmadds 2, 3, 4, 5 # encoding: [0x3a,0x29,0x43,0xec]
+ fmadds 2, 3, 4, 5
+# CHECK-BE: fmadds. 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3b]
+# CHECK-LE: fmadds. 2, 3, 4, 5 # encoding: [0x3b,0x29,0x43,0xec]
+ fmadds. 2, 3, 4, 5
+# CHECK-BE: fmsub 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x38]
+# CHECK-LE: fmsub 2, 3, 4, 5 # encoding: [0x38,0x29,0x43,0xfc]
+ fmsub 2, 3, 4, 5
+# CHECK-BE: fmsub. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x39]
+# CHECK-LE: fmsub. 2, 3, 4, 5 # encoding: [0x39,0x29,0x43,0xfc]
+ fmsub. 2, 3, 4, 5
+# CHECK-BE: fmsubs 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x38]
+# CHECK-LE: fmsubs 2, 3, 4, 5 # encoding: [0x38,0x29,0x43,0xec]
+ fmsubs 2, 3, 4, 5
+# CHECK-BE: fmsubs. 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x39]
+# CHECK-LE: fmsubs. 2, 3, 4, 5 # encoding: [0x39,0x29,0x43,0xec]
+ fmsubs. 2, 3, 4, 5
+# CHECK-BE: fnmadd 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3e]
+# CHECK-LE: fnmadd 2, 3, 4, 5 # encoding: [0x3e,0x29,0x43,0xfc]
+ fnmadd 2, 3, 4, 5
+# CHECK-BE: fnmadd. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3f]
+# CHECK-LE: fnmadd. 2, 3, 4, 5 # encoding: [0x3f,0x29,0x43,0xfc]
+ fnmadd. 2, 3, 4, 5
+# CHECK-BE: fnmadds 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3e]
+# CHECK-LE: fnmadds 2, 3, 4, 5 # encoding: [0x3e,0x29,0x43,0xec]
+ fnmadds 2, 3, 4, 5
+# CHECK-BE: fnmadds. 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3f]
+# CHECK-LE: fnmadds. 2, 3, 4, 5 # encoding: [0x3f,0x29,0x43,0xec]
+ fnmadds. 2, 3, 4, 5
+# CHECK-BE: fnmsub 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3c]
+# CHECK-LE: fnmsub 2, 3, 4, 5 # encoding: [0x3c,0x29,0x43,0xfc]
+ fnmsub 2, 3, 4, 5
+# CHECK-BE: fnmsub. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x3d]
+# CHECK-LE: fnmsub. 2, 3, 4, 5 # encoding: [0x3d,0x29,0x43,0xfc]
+ fnmsub. 2, 3, 4, 5
+# CHECK-BE: fnmsubs 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3c]
+# CHECK-LE: fnmsubs 2, 3, 4, 5 # encoding: [0x3c,0x29,0x43,0xec]
+ fnmsubs 2, 3, 4, 5
+# CHECK-BE: fnmsubs. 2, 3, 4, 5 # encoding: [0xec,0x43,0x29,0x3d]
+# CHECK-LE: fnmsubs. 2, 3, 4, 5 # encoding: [0x3d,0x29,0x43,0xec]
+ fnmsubs. 2, 3, 4, 5
# Floating-point rounding and conversion instructions
-# CHECK: frsp 2, 3 # encoding: [0xfc,0x40,0x18,0x18]
- frsp 2, 3
-# CHECK: frsp. 2, 3 # encoding: [0xfc,0x40,0x18,0x19]
- frsp. 2, 3
+# CHECK-BE: frsp 2, 3 # encoding: [0xfc,0x40,0x18,0x18]
+# CHECK-LE: frsp 2, 3 # encoding: [0x18,0x18,0x40,0xfc]
+ frsp 2, 3
+# CHECK-BE: frsp. 2, 3 # encoding: [0xfc,0x40,0x18,0x19]
+# CHECK-LE: frsp. 2, 3 # encoding: [0x19,0x18,0x40,0xfc]
+ frsp. 2, 3
-# CHECK: fctid 2, 3 # encoding: [0xfc,0x40,0x1e,0x5c]
- fctid 2, 3
-# CHECK: fctid. 2, 3 # encoding: [0xfc,0x40,0x1e,0x5d]
- fctid. 2, 3
-# CHECK: fctidz 2, 3 # encoding: [0xfc,0x40,0x1e,0x5e]
- fctidz 2, 3
-# CHECK: fctidz. 2, 3 # encoding: [0xfc,0x40,0x1e,0x5f]
- fctidz. 2, 3
-# FIXME: fctidu 2, 3
-# FIXME: fctidu. 2, 3
-# CHECK: fctiduz 2, 3 # encoding: [0xfc,0x40,0x1f,0x5e]
- fctiduz 2, 3
-# CHECK: fctiduz. 2, 3 # encoding: [0xfc,0x40,0x1f,0x5f]
- fctiduz. 2, 3
-# CHECK: fctiw 2, 3 # encoding: [0xfc,0x40,0x18,0x1c]
- fctiw 2, 3
-# CHECK: fctiw. 2, 3 # encoding: [0xfc,0x40,0x18,0x1d]
- fctiw. 2, 3
-# CHECK: fctiwz 2, 3 # encoding: [0xfc,0x40,0x18,0x1e]
- fctiwz 2, 3
-# CHECK: fctiwz. 2, 3 # encoding: [0xfc,0x40,0x18,0x1f]
- fctiwz. 2, 3
-# FIXME: fctiwu 2, 3
-# FIXME: fctiwu. 2, 3
-# CHECK: fctiwuz 2, 3 # encoding: [0xfc,0x40,0x19,0x1e]
- fctiwuz 2, 3
-# CHECK: fctiwuz. 2, 3 # encoding: [0xfc,0x40,0x19,0x1f]
- fctiwuz. 2, 3
-# CHECK: fcfid 2, 3 # encoding: [0xfc,0x40,0x1e,0x9c]
- fcfid 2, 3
-# CHECK: fcfid. 2, 3 # encoding: [0xfc,0x40,0x1e,0x9d]
- fcfid. 2, 3
-# CHECK: fcfidu 2, 3 # encoding: [0xfc,0x40,0x1f,0x9c]
- fcfidu 2, 3
-# CHECK: fcfidu. 2, 3 # encoding: [0xfc,0x40,0x1f,0x9d]
- fcfidu. 2, 3
-# CHECK: fcfids 2, 3 # encoding: [0xec,0x40,0x1e,0x9c]
- fcfids 2, 3
-# CHECK: fcfids. 2, 3 # encoding: [0xec,0x40,0x1e,0x9d]
- fcfids. 2, 3
-# CHECK: fcfidus 2, 3 # encoding: [0xec,0x40,0x1f,0x9c]
- fcfidus 2, 3
-# CHECK: fcfidus. 2, 3 # encoding: [0xec,0x40,0x1f,0x9d]
- fcfidus. 2, 3
-# CHECK: frin 2, 3 # encoding: [0xfc,0x40,0x1b,0x10]
- frin 2, 3
-# CHECK: frin. 2, 3 # encoding: [0xfc,0x40,0x1b,0x11]
- frin. 2, 3
-# CHECK: frip 2, 3 # encoding: [0xfc,0x40,0x1b,0x90]
- frip 2, 3
-# CHECK: frip. 2, 3 # encoding: [0xfc,0x40,0x1b,0x91]
- frip. 2, 3
-# CHECK: friz 2, 3 # encoding: [0xfc,0x40,0x1b,0x50]
- friz 2, 3
-# CHECK: friz. 2, 3 # encoding: [0xfc,0x40,0x1b,0x51]
- friz. 2, 3
-# CHECK: frim 2, 3 # encoding: [0xfc,0x40,0x1b,0xd0]
- frim 2, 3
-# CHECK: frim. 2, 3 # encoding: [0xfc,0x40,0x1b,0xd1]
- frim. 2, 3
+# CHECK-BE: fctid 2, 3 # encoding: [0xfc,0x40,0x1e,0x5c]
+# CHECK-LE: fctid 2, 3 # encoding: [0x5c,0x1e,0x40,0xfc]
+ fctid 2, 3
+# CHECK-BE: fctid. 2, 3 # encoding: [0xfc,0x40,0x1e,0x5d]
+# CHECK-LE: fctid. 2, 3 # encoding: [0x5d,0x1e,0x40,0xfc]
+ fctid. 2, 3
+# CHECK-BE: fctidz 2, 3 # encoding: [0xfc,0x40,0x1e,0x5e]
+# CHECK-LE: fctidz 2, 3 # encoding: [0x5e,0x1e,0x40,0xfc]
+ fctidz 2, 3
+# CHECK-BE: fctidz. 2, 3 # encoding: [0xfc,0x40,0x1e,0x5f]
+# CHECK-LE: fctidz. 2, 3 # encoding: [0x5f,0x1e,0x40,0xfc]
+ fctidz. 2, 3
+# FIXME: fctidu 2, 3
+# FIXME: fctidu. 2, 3
+# CHECK-BE: fctiduz 2, 3 # encoding: [0xfc,0x40,0x1f,0x5e]
+# CHECK-LE: fctiduz 2, 3 # encoding: [0x5e,0x1f,0x40,0xfc]
+ fctiduz 2, 3
+# CHECK-BE: fctiduz. 2, 3 # encoding: [0xfc,0x40,0x1f,0x5f]
+# CHECK-LE: fctiduz. 2, 3 # encoding: [0x5f,0x1f,0x40,0xfc]
+ fctiduz. 2, 3
+# CHECK-BE: fctiw 2, 3 # encoding: [0xfc,0x40,0x18,0x1c]
+# CHECK-LE: fctiw 2, 3 # encoding: [0x1c,0x18,0x40,0xfc]
+ fctiw 2, 3
+# CHECK-BE: fctiw. 2, 3 # encoding: [0xfc,0x40,0x18,0x1d]
+# CHECK-LE: fctiw. 2, 3 # encoding: [0x1d,0x18,0x40,0xfc]
+ fctiw. 2, 3
+# CHECK-BE: fctiwz 2, 3 # encoding: [0xfc,0x40,0x18,0x1e]
+# CHECK-LE: fctiwz 2, 3 # encoding: [0x1e,0x18,0x40,0xfc]
+ fctiwz 2, 3
+# CHECK-BE: fctiwz. 2, 3 # encoding: [0xfc,0x40,0x18,0x1f]
+# CHECK-LE: fctiwz. 2, 3 # encoding: [0x1f,0x18,0x40,0xfc]
+ fctiwz. 2, 3
+# FIXME: fctiwu 2, 3
+# FIXME: fctiwu. 2, 3
+# CHECK-BE: fctiwuz 2, 3 # encoding: [0xfc,0x40,0x19,0x1e]
+# CHECK-LE: fctiwuz 2, 3 # encoding: [0x1e,0x19,0x40,0xfc]
+ fctiwuz 2, 3
+# CHECK-BE: fctiwuz. 2, 3 # encoding: [0xfc,0x40,0x19,0x1f]
+# CHECK-LE: fctiwuz. 2, 3 # encoding: [0x1f,0x19,0x40,0xfc]
+ fctiwuz. 2, 3
+# CHECK-BE: fcfid 2, 3 # encoding: [0xfc,0x40,0x1e,0x9c]
+# CHECK-LE: fcfid 2, 3 # encoding: [0x9c,0x1e,0x40,0xfc]
+ fcfid 2, 3
+# CHECK-BE: fcfid. 2, 3 # encoding: [0xfc,0x40,0x1e,0x9d]
+# CHECK-LE: fcfid. 2, 3 # encoding: [0x9d,0x1e,0x40,0xfc]
+ fcfid. 2, 3
+# CHECK-BE: fcfidu 2, 3 # encoding: [0xfc,0x40,0x1f,0x9c]
+# CHECK-LE: fcfidu 2, 3 # encoding: [0x9c,0x1f,0x40,0xfc]
+ fcfidu 2, 3
+# CHECK-BE: fcfidu. 2, 3 # encoding: [0xfc,0x40,0x1f,0x9d]
+# CHECK-LE: fcfidu. 2, 3 # encoding: [0x9d,0x1f,0x40,0xfc]
+ fcfidu. 2, 3
+# CHECK-BE: fcfids 2, 3 # encoding: [0xec,0x40,0x1e,0x9c]
+# CHECK-LE: fcfids 2, 3 # encoding: [0x9c,0x1e,0x40,0xec]
+ fcfids 2, 3
+# CHECK-BE: fcfids. 2, 3 # encoding: [0xec,0x40,0x1e,0x9d]
+# CHECK-LE: fcfids. 2, 3 # encoding: [0x9d,0x1e,0x40,0xec]
+ fcfids. 2, 3
+# CHECK-BE: fcfidus 2, 3 # encoding: [0xec,0x40,0x1f,0x9c]
+# CHECK-LE: fcfidus 2, 3 # encoding: [0x9c,0x1f,0x40,0xec]
+ fcfidus 2, 3
+# CHECK-BE: fcfidus. 2, 3 # encoding: [0xec,0x40,0x1f,0x9d]
+# CHECK-LE: fcfidus. 2, 3 # encoding: [0x9d,0x1f,0x40,0xec]
+ fcfidus. 2, 3
+# CHECK-BE: frin 2, 3 # encoding: [0xfc,0x40,0x1b,0x10]
+# CHECK-LE: frin 2, 3 # encoding: [0x10,0x1b,0x40,0xfc]
+ frin 2, 3
+# CHECK-BE: frin. 2, 3 # encoding: [0xfc,0x40,0x1b,0x11]
+# CHECK-LE: frin. 2, 3 # encoding: [0x11,0x1b,0x40,0xfc]
+ frin. 2, 3
+# CHECK-BE: frip 2, 3 # encoding: [0xfc,0x40,0x1b,0x90]
+# CHECK-LE: frip 2, 3 # encoding: [0x90,0x1b,0x40,0xfc]
+ frip 2, 3
+# CHECK-BE: frip. 2, 3 # encoding: [0xfc,0x40,0x1b,0x91]
+# CHECK-LE: frip. 2, 3 # encoding: [0x91,0x1b,0x40,0xfc]
+ frip. 2, 3
+# CHECK-BE: friz 2, 3 # encoding: [0xfc,0x40,0x1b,0x50]
+# CHECK-LE: friz 2, 3 # encoding: [0x50,0x1b,0x40,0xfc]
+ friz 2, 3
+# CHECK-BE: friz. 2, 3 # encoding: [0xfc,0x40,0x1b,0x51]
+# CHECK-LE: friz. 2, 3 # encoding: [0x51,0x1b,0x40,0xfc]
+ friz. 2, 3
+# CHECK-BE: frim 2, 3 # encoding: [0xfc,0x40,0x1b,0xd0]
+# CHECK-LE: frim 2, 3 # encoding: [0xd0,0x1b,0x40,0xfc]
+ frim 2, 3
+# CHECK-BE: frim. 2, 3 # encoding: [0xfc,0x40,0x1b,0xd1]
+# CHECK-LE: frim. 2, 3 # encoding: [0xd1,0x1b,0x40,0xfc]
+ frim. 2, 3
# Floating-point compare instructions
-# CHECK: fcmpu 2, 3, 4 # encoding: [0xfd,0x03,0x20,0x00]
- fcmpu 2, 3, 4
-# FIXME: fcmpo 2, 3, 4
+# CHECK-BE: fcmpu 2, 3, 4 # encoding: [0xfd,0x03,0x20,0x00]
+# CHECK-LE: fcmpu 2, 3, 4 # encoding: [0x00,0x20,0x03,0xfd]
+ fcmpu 2, 3, 4
+# FIXME: fcmpo 2, 3, 4
# Floating-point select instruction
-# CHECK: fsel 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x2e]
- fsel 2, 3, 4, 5
-# CHECK: fsel. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x2f]
- fsel. 2, 3, 4, 5
+# CHECK-BE: fsel 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x2e]
+# CHECK-LE: fsel 2, 3, 4, 5 # encoding: [0x2e,0x29,0x43,0xfc]
+ fsel 2, 3, 4, 5
+# CHECK-BE: fsel. 2, 3, 4, 5 # encoding: [0xfc,0x43,0x29,0x2f]
+# CHECK-LE: fsel. 2, 3, 4, 5 # encoding: [0x2f,0x29,0x43,0xfc]
+ fsel. 2, 3, 4, 5
# Floating-point status and control register instructions
-# CHECK: mffs 2 # encoding: [0xfc,0x40,0x04,0x8e]
- mffs 2
-# FIXME: mffs. 2
+# CHECK-BE: mffs 2 # encoding: [0xfc,0x40,0x04,0x8e]
+# CHECK-LE: mffs 2 # encoding: [0x8e,0x04,0x40,0xfc]
+ mffs 2
+# FIXME: mffs. 2
-# FIXME: mcrfs 2, 3
+# FIXME: mcrfs 2, 3
-# FIXME: mtfsfi 2, 3, 1
-# FIXME: mtfsfi. 2, 3, 1
-# FIXME: mtfsf 2, 3, 1, 1
-# FIXME: mtfsf. 2, 3, 1, 1
+# FIXME: mtfsfi 2, 3, 1
+# FIXME: mtfsfi. 2, 3, 1
+# FIXME: mtfsf 2, 3, 1, 1
+# FIXME: mtfsf. 2, 3, 1, 1
-# CHECK: mtfsb0 31 # encoding: [0xff,0xe0,0x00,0x8c]
- mtfsb0 31
-# FIXME: mtfsb0. 31
-# CHECK: mtfsb1 31 # encoding: [0xff,0xe0,0x00,0x4c]
- mtfsb1 31
-# FIXME: mtfsb1. 31
+# CHECK-BE: mtfsb0 31 # encoding: [0xff,0xe0,0x00,0x8c]
+# CHECK-LE: mtfsb0 31 # encoding: [0x8c,0x00,0xe0,0xff]
+ mtfsb0 31
+# FIXME: mtfsb0. 31
+# CHECK-BE: mtfsb1 31 # encoding: [0xff,0xe0,0x00,0x4c]
+# CHECK-LE: mtfsb1 31 # encoding: [0x4c,0x00,0xe0,0xff]
+ mtfsb1 31
+# FIXME: mtfsb1. 31
diff --git a/test/MC/PowerPC/ppc64-encoding-vmx.s b/test/MC/PowerPC/ppc64-encoding-vmx.s
index 0154076390bc..3d2df8414157 100644
--- a/test/MC/PowerPC/ppc64-encoding-vmx.s
+++ b/test/MC/PowerPC/ppc64-encoding-vmx.s
@@ -1,384 +1,554 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
# Vector facility
# Vector storage access instructions
-# CHECK: lvebx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x0e]
- lvebx 2, 3, 4
-# CHECK: lvehx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x4e]
- lvehx 2, 3, 4
-# CHECK: lvewx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x8e]
- lvewx 2, 3, 4
-# CHECK: lvx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0xce]
- lvx 2, 3, 4
-# CHECK: lvxl 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xce]
- lvxl 2, 3, 4
-# CHECK: stvebx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x0e]
- stvebx 2, 3, 4
-# CHECK: stvehx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x4e]
- stvehx 2, 3, 4
-# CHECK: stvewx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x8e]
- stvewx 2, 3, 4
-# CHECK: stvx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xce]
- stvx 2, 3, 4
-# CHECK: stvxl 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xce]
- stvxl 2, 3, 4
-# CHECK: lvsl 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x0c]
- lvsl 2, 3, 4
-# CHECK: lvsr 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x4c]
- lvsr 2, 3, 4
+# CHECK-BE: lvebx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x0e]
+# CHECK-LE: lvebx 2, 3, 4 # encoding: [0x0e,0x20,0x43,0x7c]
+ lvebx 2, 3, 4
+# CHECK-BE: lvehx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x4e]
+# CHECK-LE: lvehx 2, 3, 4 # encoding: [0x4e,0x20,0x43,0x7c]
+ lvehx 2, 3, 4
+# CHECK-BE: lvewx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x8e]
+# CHECK-LE: lvewx 2, 3, 4 # encoding: [0x8e,0x20,0x43,0x7c]
+ lvewx 2, 3, 4
+# CHECK-BE: lvx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0xce]
+# CHECK-LE: lvx 2, 3, 4 # encoding: [0xce,0x20,0x43,0x7c]
+ lvx 2, 3, 4
+# CHECK-BE: lvxl 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xce]
+# CHECK-LE: lvxl 2, 3, 4 # encoding: [0xce,0x22,0x43,0x7c]
+ lvxl 2, 3, 4
+# CHECK-BE: stvebx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x0e]
+# CHECK-LE: stvebx 2, 3, 4 # encoding: [0x0e,0x21,0x43,0x7c]
+ stvebx 2, 3, 4
+# CHECK-BE: stvehx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x4e]
+# CHECK-LE: stvehx 2, 3, 4 # encoding: [0x4e,0x21,0x43,0x7c]
+ stvehx 2, 3, 4
+# CHECK-BE: stvewx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x8e]
+# CHECK-LE: stvewx 2, 3, 4 # encoding: [0x8e,0x21,0x43,0x7c]
+ stvewx 2, 3, 4
+# CHECK-BE: stvx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xce]
+# CHECK-LE: stvx 2, 3, 4 # encoding: [0xce,0x21,0x43,0x7c]
+ stvx 2, 3, 4
+# CHECK-BE: stvxl 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xce]
+# CHECK-LE: stvxl 2, 3, 4 # encoding: [0xce,0x23,0x43,0x7c]
+ stvxl 2, 3, 4
+# CHECK-BE: lvsl 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x0c]
+# CHECK-LE: lvsl 2, 3, 4 # encoding: [0x0c,0x20,0x43,0x7c]
+ lvsl 2, 3, 4
+# CHECK-BE: lvsr 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x4c]
+# CHECK-LE: lvsr 2, 3, 4 # encoding: [0x4c,0x20,0x43,0x7c]
+ lvsr 2, 3, 4
# Vector permute and formatting instructions
-# CHECK: vpkpx 2, 3, 4 # encoding: [0x10,0x43,0x23,0x0e]
- vpkpx 2, 3, 4
-# CHECK: vpkshss 2, 3, 4 # encoding: [0x10,0x43,0x21,0x8e]
- vpkshss 2, 3, 4
-# CHECK: vpkshus 2, 3, 4 # encoding: [0x10,0x43,0x21,0x0e]
- vpkshus 2, 3, 4
-# CHECK: vpkswss 2, 3, 4 # encoding: [0x10,0x43,0x21,0xce]
- vpkswss 2, 3, 4
-# CHECK: vpkswus 2, 3, 4 # encoding: [0x10,0x43,0x21,0x4e]
- vpkswus 2, 3, 4
-# CHECK: vpkuhum 2, 3, 4 # encoding: [0x10,0x43,0x20,0x0e]
- vpkuhum 2, 3, 4
-# CHECK: vpkuhus 2, 3, 4 # encoding: [0x10,0x43,0x20,0x8e]
- vpkuhus 2, 3, 4
-# CHECK: vpkuwum 2, 3, 4 # encoding: [0x10,0x43,0x20,0x4e]
- vpkuwum 2, 3, 4
-# CHECK: vpkuwus 2, 3, 4 # encoding: [0x10,0x43,0x20,0xce]
- vpkuwus 2, 3, 4
-
-# CHECK: vupkhpx 2, 3 # encoding: [0x10,0x40,0x1b,0x4e]
- vupkhpx 2, 3
-# CHECK: vupkhsb 2, 3 # encoding: [0x10,0x40,0x1a,0x0e]
- vupkhsb 2, 3
-# CHECK: vupkhsh 2, 3 # encoding: [0x10,0x40,0x1a,0x4e]
- vupkhsh 2, 3
-# CHECK: vupklpx 2, 3 # encoding: [0x10,0x40,0x1b,0xce]
- vupklpx 2, 3
-# CHECK: vupklsb 2, 3 # encoding: [0x10,0x40,0x1a,0x8e]
- vupklsb 2, 3
-# CHECK: vupklsh 2, 3 # encoding: [0x10,0x40,0x1a,0xce]
- vupklsh 2, 3
-
-# CHECK: vmrghb 2, 3, 4 # encoding: [0x10,0x43,0x20,0x0c]
- vmrghb 2, 3, 4
-# CHECK: vmrghh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x4c]
- vmrghh 2, 3, 4
-# CHECK: vmrghw 2, 3, 4 # encoding: [0x10,0x43,0x20,0x8c]
- vmrghw 2, 3, 4
-# CHECK: vmrglb 2, 3, 4 # encoding: [0x10,0x43,0x21,0x0c]
- vmrglb 2, 3, 4
-# CHECK: vmrglh 2, 3, 4 # encoding: [0x10,0x43,0x21,0x4c]
- vmrglh 2, 3, 4
-# CHECK: vmrglw 2, 3, 4 # encoding: [0x10,0x43,0x21,0x8c]
- vmrglw 2, 3, 4
-
-# CHECK: vspltb 2, 3, 1 # encoding: [0x10,0x41,0x1a,0x0c]
- vspltb 2, 3, 1
-# CHECK: vsplth 2, 3, 1 # encoding: [0x10,0x41,0x1a,0x4c]
- vsplth 2, 3, 1
-# CHECK: vspltw 2, 3, 1 # encoding: [0x10,0x41,0x1a,0x8c]
- vspltw 2, 3, 1
-# CHECK: vspltisb 2, 3 # encoding: [0x10,0x43,0x03,0x0c]
- vspltisb 2, 3
-# CHECK: vspltish 2, 3 # encoding: [0x10,0x43,0x03,0x4c]
- vspltish 2, 3
-# CHECK: vspltisw 2, 3 # encoding: [0x10,0x43,0x03,0x8c]
- vspltisw 2, 3
-
-# CHECK: vperm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x6b]
- vperm 2, 3, 4, 5
-# CHECK: vsel 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x6a]
- vsel 2, 3, 4, 5
-
-# CHECK: vsl 2, 3, 4 # encoding: [0x10,0x43,0x21,0xc4]
- vsl 2, 3, 4
-# CHECK: vsldoi 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x6c]
- vsldoi 2, 3, 4, 5
-# CHECK: vslo 2, 3, 4 # encoding: [0x10,0x43,0x24,0x0c]
- vslo 2, 3, 4
-# CHECK: vsr 2, 3, 4 # encoding: [0x10,0x43,0x22,0xc4]
- vsr 2, 3, 4
-# CHECK: vsro 2, 3, 4 # encoding: [0x10,0x43,0x24,0x4c]
- vsro 2, 3, 4
+# CHECK-BE: vpkpx 2, 3, 4 # encoding: [0x10,0x43,0x23,0x0e]
+# CHECK-LE: vpkpx 2, 3, 4 # encoding: [0x0e,0x23,0x43,0x10]
+ vpkpx 2, 3, 4
+# CHECK-BE: vpkshss 2, 3, 4 # encoding: [0x10,0x43,0x21,0x8e]
+# CHECK-LE: vpkshss 2, 3, 4 # encoding: [0x8e,0x21,0x43,0x10]
+ vpkshss 2, 3, 4
+# CHECK-BE: vpkshus 2, 3, 4 # encoding: [0x10,0x43,0x21,0x0e]
+# CHECK-LE: vpkshus 2, 3, 4 # encoding: [0x0e,0x21,0x43,0x10]
+ vpkshus 2, 3, 4
+# CHECK-BE: vpkswss 2, 3, 4 # encoding: [0x10,0x43,0x21,0xce]
+# CHECK-LE: vpkswss 2, 3, 4 # encoding: [0xce,0x21,0x43,0x10]
+ vpkswss 2, 3, 4
+# CHECK-BE: vpkswus 2, 3, 4 # encoding: [0x10,0x43,0x21,0x4e]
+# CHECK-LE: vpkswus 2, 3, 4 # encoding: [0x4e,0x21,0x43,0x10]
+ vpkswus 2, 3, 4
+# CHECK-BE: vpkuhum 2, 3, 4 # encoding: [0x10,0x43,0x20,0x0e]
+# CHECK-LE: vpkuhum 2, 3, 4 # encoding: [0x0e,0x20,0x43,0x10]
+ vpkuhum 2, 3, 4
+# CHECK-BE: vpkuhus 2, 3, 4 # encoding: [0x10,0x43,0x20,0x8e]
+# CHECK-LE: vpkuhus 2, 3, 4 # encoding: [0x8e,0x20,0x43,0x10]
+ vpkuhus 2, 3, 4
+# CHECK-BE: vpkuwum 2, 3, 4 # encoding: [0x10,0x43,0x20,0x4e]
+# CHECK-LE: vpkuwum 2, 3, 4 # encoding: [0x4e,0x20,0x43,0x10]
+ vpkuwum 2, 3, 4
+# CHECK-BE: vpkuwus 2, 3, 4 # encoding: [0x10,0x43,0x20,0xce]
+# CHECK-LE: vpkuwus 2, 3, 4 # encoding: [0xce,0x20,0x43,0x10]
+ vpkuwus 2, 3, 4
+
+# CHECK-BE: vupkhpx 2, 3 # encoding: [0x10,0x40,0x1b,0x4e]
+# CHECK-LE: vupkhpx 2, 3 # encoding: [0x4e,0x1b,0x40,0x10]
+ vupkhpx 2, 3
+# CHECK-BE: vupkhsb 2, 3 # encoding: [0x10,0x40,0x1a,0x0e]
+# CHECK-LE: vupkhsb 2, 3 # encoding: [0x0e,0x1a,0x40,0x10]
+ vupkhsb 2, 3
+# CHECK-BE: vupkhsh 2, 3 # encoding: [0x10,0x40,0x1a,0x4e]
+# CHECK-LE: vupkhsh 2, 3 # encoding: [0x4e,0x1a,0x40,0x10]
+ vupkhsh 2, 3
+# CHECK-BE: vupklpx 2, 3 # encoding: [0x10,0x40,0x1b,0xce]
+# CHECK-LE: vupklpx 2, 3 # encoding: [0xce,0x1b,0x40,0x10]
+ vupklpx 2, 3
+# CHECK-BE: vupklsb 2, 3 # encoding: [0x10,0x40,0x1a,0x8e]
+# CHECK-LE: vupklsb 2, 3 # encoding: [0x8e,0x1a,0x40,0x10]
+ vupklsb 2, 3
+# CHECK-BE: vupklsh 2, 3 # encoding: [0x10,0x40,0x1a,0xce]
+# CHECK-LE: vupklsh 2, 3 # encoding: [0xce,0x1a,0x40,0x10]
+ vupklsh 2, 3
+
+# CHECK-BE: vmrghb 2, 3, 4 # encoding: [0x10,0x43,0x20,0x0c]
+# CHECK-LE: vmrghb 2, 3, 4 # encoding: [0x0c,0x20,0x43,0x10]
+ vmrghb 2, 3, 4
+# CHECK-BE: vmrghh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x4c]
+# CHECK-LE: vmrghh 2, 3, 4 # encoding: [0x4c,0x20,0x43,0x10]
+ vmrghh 2, 3, 4
+# CHECK-BE: vmrghw 2, 3, 4 # encoding: [0x10,0x43,0x20,0x8c]
+# CHECK-LE: vmrghw 2, 3, 4 # encoding: [0x8c,0x20,0x43,0x10]
+ vmrghw 2, 3, 4
+# CHECK-BE: vmrglb 2, 3, 4 # encoding: [0x10,0x43,0x21,0x0c]
+# CHECK-LE: vmrglb 2, 3, 4 # encoding: [0x0c,0x21,0x43,0x10]
+ vmrglb 2, 3, 4
+# CHECK-BE: vmrglh 2, 3, 4 # encoding: [0x10,0x43,0x21,0x4c]
+# CHECK-LE: vmrglh 2, 3, 4 # encoding: [0x4c,0x21,0x43,0x10]
+ vmrglh 2, 3, 4
+# CHECK-BE: vmrglw 2, 3, 4 # encoding: [0x10,0x43,0x21,0x8c]
+# CHECK-LE: vmrglw 2, 3, 4 # encoding: [0x8c,0x21,0x43,0x10]
+ vmrglw 2, 3, 4
+
+# CHECK-BE: vspltb 2, 3, 1 # encoding: [0x10,0x41,0x1a,0x0c]
+# CHECK-LE: vspltb 2, 3, 1 # encoding: [0x0c,0x1a,0x41,0x10]
+ vspltb 2, 3, 1
+# CHECK-BE: vsplth 2, 3, 1 # encoding: [0x10,0x41,0x1a,0x4c]
+# CHECK-LE: vsplth 2, 3, 1 # encoding: [0x4c,0x1a,0x41,0x10]
+ vsplth 2, 3, 1
+# CHECK-BE: vspltw 2, 3, 1 # encoding: [0x10,0x41,0x1a,0x8c]
+# CHECK-LE: vspltw 2, 3, 1 # encoding: [0x8c,0x1a,0x41,0x10]
+ vspltw 2, 3, 1
+# CHECK-BE: vspltisb 2, 3 # encoding: [0x10,0x43,0x03,0x0c]
+# CHECK-LE: vspltisb 2, 3 # encoding: [0x0c,0x03,0x43,0x10]
+ vspltisb 2, 3
+# CHECK-BE: vspltish 2, 3 # encoding: [0x10,0x43,0x03,0x4c]
+# CHECK-LE: vspltish 2, 3 # encoding: [0x4c,0x03,0x43,0x10]
+ vspltish 2, 3
+# CHECK-BE: vspltisw 2, 3 # encoding: [0x10,0x43,0x03,0x8c]
+# CHECK-LE: vspltisw 2, 3 # encoding: [0x8c,0x03,0x43,0x10]
+ vspltisw 2, 3
+
+# CHECK-BE: vperm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x6b]
+# CHECK-LE: vperm 2, 3, 4, 5 # encoding: [0x6b,0x21,0x43,0x10]
+ vperm 2, 3, 4, 5
+# CHECK-BE: vsel 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x6a]
+# CHECK-LE: vsel 2, 3, 4, 5 # encoding: [0x6a,0x21,0x43,0x10]
+ vsel 2, 3, 4, 5
+
+# CHECK-BE: vsl 2, 3, 4 # encoding: [0x10,0x43,0x21,0xc4]
+# CHECK-LE: vsl 2, 3, 4 # encoding: [0xc4,0x21,0x43,0x10]
+ vsl 2, 3, 4
+# CHECK-BE: vsldoi 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x6c]
+# CHECK-LE: vsldoi 2, 3, 4, 5 # encoding: [0x6c,0x21,0x43,0x10]
+ vsldoi 2, 3, 4, 5
+# CHECK-BE: vslo 2, 3, 4 # encoding: [0x10,0x43,0x24,0x0c]
+# CHECK-LE: vslo 2, 3, 4 # encoding: [0x0c,0x24,0x43,0x10]
+ vslo 2, 3, 4
+# CHECK-BE: vsr 2, 3, 4 # encoding: [0x10,0x43,0x22,0xc4]
+# CHECK-LE: vsr 2, 3, 4 # encoding: [0xc4,0x22,0x43,0x10]
+ vsr 2, 3, 4
+# CHECK-BE: vsro 2, 3, 4 # encoding: [0x10,0x43,0x24,0x4c]
+# CHECK-LE: vsro 2, 3, 4 # encoding: [0x4c,0x24,0x43,0x10]
+ vsro 2, 3, 4
# Vector integer arithmetic instructions
-# CHECK: vaddcuw 2, 3, 4 # encoding: [0x10,0x43,0x21,0x80]
- vaddcuw 2, 3, 4
-# CHECK: vaddsbs 2, 3, 4 # encoding: [0x10,0x43,0x23,0x00]
- vaddsbs 2, 3, 4
-# CHECK: vaddshs 2, 3, 4 # encoding: [0x10,0x43,0x23,0x40]
- vaddshs 2, 3, 4
-# CHECK: vaddsws 2, 3, 4 # encoding: [0x10,0x43,0x23,0x80]
- vaddsws 2, 3, 4
-# CHECK: vaddubm 2, 3, 4 # encoding: [0x10,0x43,0x20,0x00]
- vaddubm 2, 3, 4
-# CHECK: vadduhm 2, 3, 4 # encoding: [0x10,0x43,0x20,0x40]
- vadduhm 2, 3, 4
-# CHECK: vadduwm 2, 3, 4 # encoding: [0x10,0x43,0x20,0x80]
- vadduwm 2, 3, 4
-# CHECK: vaddubs 2, 3, 4 # encoding: [0x10,0x43,0x22,0x00]
- vaddubs 2, 3, 4
-# CHECK: vadduhs 2, 3, 4 # encoding: [0x10,0x43,0x22,0x40]
- vadduhs 2, 3, 4
-# CHECK: vadduws 2, 3, 4 # encoding: [0x10,0x43,0x22,0x80]
- vadduws 2, 3, 4
-
-# CHECK: vsubcuw 2, 3, 4 # encoding: [0x10,0x43,0x25,0x80]
- vsubcuw 2, 3, 4
-# CHECK: vsubsbs 2, 3, 4 # encoding: [0x10,0x43,0x27,0x00]
- vsubsbs 2, 3, 4
-# CHECK: vsubshs 2, 3, 4 # encoding: [0x10,0x43,0x27,0x40]
- vsubshs 2, 3, 4
-# CHECK: vsubsws 2, 3, 4 # encoding: [0x10,0x43,0x27,0x80]
- vsubsws 2, 3, 4
-# CHECK: vsububm 2, 3, 4 # encoding: [0x10,0x43,0x24,0x00]
- vsububm 2, 3, 4
-# CHECK: vsubuhm 2, 3, 4 # encoding: [0x10,0x43,0x24,0x40]
- vsubuhm 2, 3, 4
-# CHECK: vsubuwm 2, 3, 4 # encoding: [0x10,0x43,0x24,0x80]
- vsubuwm 2, 3, 4
-# CHECK: vsububs 2, 3, 4 # encoding: [0x10,0x43,0x26,0x00]
- vsububs 2, 3, 4
-# CHECK: vsubuhs 2, 3, 4 # encoding: [0x10,0x43,0x26,0x40]
- vsubuhs 2, 3, 4
-# CHECK: vsubuws 2, 3, 4 # encoding: [0x10,0x43,0x26,0x80]
- vsubuws 2, 3, 4
-
-# CHECK: vmulesb 2, 3, 4 # encoding: [0x10,0x43,0x23,0x08]
- vmulesb 2, 3, 4
-# CHECK: vmulesh 2, 3, 4 # encoding: [0x10,0x43,0x23,0x48]
- vmulesh 2, 3, 4
-# CHECK: vmuleub 2, 3, 4 # encoding: [0x10,0x43,0x22,0x08]
- vmuleub 2, 3, 4
-# CHECK: vmuleuh 2, 3, 4 # encoding: [0x10,0x43,0x22,0x48]
- vmuleuh 2, 3, 4
-# CHECK: vmulosb 2, 3, 4 # encoding: [0x10,0x43,0x21,0x08]
- vmulosb 2, 3, 4
-# CHECK: vmulosh 2, 3, 4 # encoding: [0x10,0x43,0x21,0x48]
- vmulosh 2, 3, 4
-# CHECK: vmuloub 2, 3, 4 # encoding: [0x10,0x43,0x20,0x08]
- vmuloub 2, 3, 4
-# CHECK: vmulouh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x48]
- vmulouh 2, 3, 4
-
-# CHECK: vmhaddshs 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x60]
- vmhaddshs 2, 3, 4, 5
-# CHECK: vmhraddshs 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x61]
- vmhraddshs 2, 3, 4, 5
-# CHECK: vmladduhm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x62]
- vmladduhm 2, 3, 4, 5
-# CHECK: vmsumubm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x64]
- vmsumubm 2, 3, 4, 5
-# CHECK: vmsummbm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x65]
- vmsummbm 2, 3, 4, 5
-# CHECK: vmsumshm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x68]
- vmsumshm 2, 3, 4, 5
-# CHECK: vmsumshs 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x69]
- vmsumshs 2, 3, 4, 5
-# CHECK: vmsumuhm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x66]
- vmsumuhm 2, 3, 4, 5
-# CHECK: vmsumuhs 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x67]
- vmsumuhs 2, 3, 4, 5
-
-# CHECK: vsumsws 2, 3, 4 # encoding: [0x10,0x43,0x27,0x88]
- vsumsws 2, 3, 4
-# CHECK: vsum2sws 2, 3, 4 # encoding: [0x10,0x43,0x26,0x88]
- vsum2sws 2, 3, 4
-# CHECK: vsum4sbs 2, 3, 4 # encoding: [0x10,0x43,0x27,0x08]
- vsum4sbs 2, 3, 4
-# CHECK: vsum4shs 2, 3, 4 # encoding: [0x10,0x43,0x26,0x48]
- vsum4shs 2, 3, 4
-# CHECK: vsum4ubs 2, 3, 4 # encoding: [0x10,0x43,0x26,0x08]
- vsum4ubs 2, 3, 4
-
-# CHECK: vavgsb 2, 3, 4 # encoding: [0x10,0x43,0x25,0x02]
- vavgsb 2, 3, 4
-# CHECK: vavgsh 2, 3, 4 # encoding: [0x10,0x43,0x25,0x42]
- vavgsh 2, 3, 4
-# CHECK: vavgsw 2, 3, 4 # encoding: [0x10,0x43,0x25,0x82]
- vavgsw 2, 3, 4
-# CHECK: vavgub 2, 3, 4 # encoding: [0x10,0x43,0x24,0x02]
- vavgub 2, 3, 4
-# CHECK: vavguh 2, 3, 4 # encoding: [0x10,0x43,0x24,0x42]
- vavguh 2, 3, 4
-# CHECK: vavguw 2, 3, 4 # encoding: [0x10,0x43,0x24,0x82]
- vavguw 2, 3, 4
-
-# CHECK: vmaxsb 2, 3, 4 # encoding: [0x10,0x43,0x21,0x02]
- vmaxsb 2, 3, 4
-# CHECK: vmaxsh 2, 3, 4 # encoding: [0x10,0x43,0x21,0x42]
- vmaxsh 2, 3, 4
-# CHECK: vmaxsw 2, 3, 4 # encoding: [0x10,0x43,0x21,0x82]
- vmaxsw 2, 3, 4
-# CHECK: vmaxub 2, 3, 4 # encoding: [0x10,0x43,0x20,0x02]
- vmaxub 2, 3, 4
-# CHECK: vmaxuh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x42]
- vmaxuh 2, 3, 4
-# CHECK: vmaxuw 2, 3, 4 # encoding: [0x10,0x43,0x20,0x82]
- vmaxuw 2, 3, 4
-
-# CHECK: vminsb 2, 3, 4 # encoding: [0x10,0x43,0x23,0x02]
- vminsb 2, 3, 4
-# CHECK: vminsh 2, 3, 4 # encoding: [0x10,0x43,0x23,0x42]
- vminsh 2, 3, 4
-# CHECK: vminsw 2, 3, 4 # encoding: [0x10,0x43,0x23,0x82]
- vminsw 2, 3, 4
-# CHECK: vminub 2, 3, 4 # encoding: [0x10,0x43,0x22,0x02]
- vminub 2, 3, 4
-# CHECK: vminuh 2, 3, 4 # encoding: [0x10,0x43,0x22,0x42]
- vminuh 2, 3, 4
-# CHECK: vminuw 2, 3, 4 # encoding: [0x10,0x43,0x22,0x82]
- vminuw 2, 3, 4
+# CHECK-BE: vaddcuw 2, 3, 4 # encoding: [0x10,0x43,0x21,0x80]
+# CHECK-LE: vaddcuw 2, 3, 4 # encoding: [0x80,0x21,0x43,0x10]
+ vaddcuw 2, 3, 4
+# CHECK-BE: vaddsbs 2, 3, 4 # encoding: [0x10,0x43,0x23,0x00]
+# CHECK-LE: vaddsbs 2, 3, 4 # encoding: [0x00,0x23,0x43,0x10]
+ vaddsbs 2, 3, 4
+# CHECK-BE: vaddshs 2, 3, 4 # encoding: [0x10,0x43,0x23,0x40]
+# CHECK-LE: vaddshs 2, 3, 4 # encoding: [0x40,0x23,0x43,0x10]
+ vaddshs 2, 3, 4
+# CHECK-BE: vaddsws 2, 3, 4 # encoding: [0x10,0x43,0x23,0x80]
+# CHECK-LE: vaddsws 2, 3, 4 # encoding: [0x80,0x23,0x43,0x10]
+ vaddsws 2, 3, 4
+# CHECK-BE: vaddubm 2, 3, 4 # encoding: [0x10,0x43,0x20,0x00]
+# CHECK-LE: vaddubm 2, 3, 4 # encoding: [0x00,0x20,0x43,0x10]
+ vaddubm 2, 3, 4
+# CHECK-BE: vadduhm 2, 3, 4 # encoding: [0x10,0x43,0x20,0x40]
+# CHECK-LE: vadduhm 2, 3, 4 # encoding: [0x40,0x20,0x43,0x10]
+ vadduhm 2, 3, 4
+# CHECK-BE: vadduwm 2, 3, 4 # encoding: [0x10,0x43,0x20,0x80]
+# CHECK-LE: vadduwm 2, 3, 4 # encoding: [0x80,0x20,0x43,0x10]
+ vadduwm 2, 3, 4
+# CHECK-BE: vaddubs 2, 3, 4 # encoding: [0x10,0x43,0x22,0x00]
+# CHECK-LE: vaddubs 2, 3, 4 # encoding: [0x00,0x22,0x43,0x10]
+ vaddubs 2, 3, 4
+# CHECK-BE: vadduhs 2, 3, 4 # encoding: [0x10,0x43,0x22,0x40]
+# CHECK-LE: vadduhs 2, 3, 4 # encoding: [0x40,0x22,0x43,0x10]
+ vadduhs 2, 3, 4
+# CHECK-BE: vadduws 2, 3, 4 # encoding: [0x10,0x43,0x22,0x80]
+# CHECK-LE: vadduws 2, 3, 4 # encoding: [0x80,0x22,0x43,0x10]
+ vadduws 2, 3, 4
+
+# CHECK-BE: vsubcuw 2, 3, 4 # encoding: [0x10,0x43,0x25,0x80]
+# CHECK-LE: vsubcuw 2, 3, 4 # encoding: [0x80,0x25,0x43,0x10]
+ vsubcuw 2, 3, 4
+# CHECK-BE: vsubsbs 2, 3, 4 # encoding: [0x10,0x43,0x27,0x00]
+# CHECK-LE: vsubsbs 2, 3, 4 # encoding: [0x00,0x27,0x43,0x10]
+ vsubsbs 2, 3, 4
+# CHECK-BE: vsubshs 2, 3, 4 # encoding: [0x10,0x43,0x27,0x40]
+# CHECK-LE: vsubshs 2, 3, 4 # encoding: [0x40,0x27,0x43,0x10]
+ vsubshs 2, 3, 4
+# CHECK-BE: vsubsws 2, 3, 4 # encoding: [0x10,0x43,0x27,0x80]
+# CHECK-LE: vsubsws 2, 3, 4 # encoding: [0x80,0x27,0x43,0x10]
+ vsubsws 2, 3, 4
+# CHECK-BE: vsububm 2, 3, 4 # encoding: [0x10,0x43,0x24,0x00]
+# CHECK-LE: vsububm 2, 3, 4 # encoding: [0x00,0x24,0x43,0x10]
+ vsububm 2, 3, 4
+# CHECK-BE: vsubuhm 2, 3, 4 # encoding: [0x10,0x43,0x24,0x40]
+# CHECK-LE: vsubuhm 2, 3, 4 # encoding: [0x40,0x24,0x43,0x10]
+ vsubuhm 2, 3, 4
+# CHECK-BE: vsubuwm 2, 3, 4 # encoding: [0x10,0x43,0x24,0x80]
+# CHECK-LE: vsubuwm 2, 3, 4 # encoding: [0x80,0x24,0x43,0x10]
+ vsubuwm 2, 3, 4
+# CHECK-BE: vsububs 2, 3, 4 # encoding: [0x10,0x43,0x26,0x00]
+# CHECK-LE: vsububs 2, 3, 4 # encoding: [0x00,0x26,0x43,0x10]
+ vsububs 2, 3, 4
+# CHECK-BE: vsubuhs 2, 3, 4 # encoding: [0x10,0x43,0x26,0x40]
+# CHECK-LE: vsubuhs 2, 3, 4 # encoding: [0x40,0x26,0x43,0x10]
+ vsubuhs 2, 3, 4
+# CHECK-BE: vsubuws 2, 3, 4 # encoding: [0x10,0x43,0x26,0x80]
+# CHECK-LE: vsubuws 2, 3, 4 # encoding: [0x80,0x26,0x43,0x10]
+ vsubuws 2, 3, 4
+
+# CHECK-BE: vmulesb 2, 3, 4 # encoding: [0x10,0x43,0x23,0x08]
+# CHECK-LE: vmulesb 2, 3, 4 # encoding: [0x08,0x23,0x43,0x10]
+ vmulesb 2, 3, 4
+# CHECK-BE: vmulesh 2, 3, 4 # encoding: [0x10,0x43,0x23,0x48]
+# CHECK-LE: vmulesh 2, 3, 4 # encoding: [0x48,0x23,0x43,0x10]
+ vmulesh 2, 3, 4
+# CHECK-BE: vmuleub 2, 3, 4 # encoding: [0x10,0x43,0x22,0x08]
+# CHECK-LE: vmuleub 2, 3, 4 # encoding: [0x08,0x22,0x43,0x10]
+ vmuleub 2, 3, 4
+# CHECK-BE: vmuleuh 2, 3, 4 # encoding: [0x10,0x43,0x22,0x48]
+# CHECK-LE: vmuleuh 2, 3, 4 # encoding: [0x48,0x22,0x43,0x10]
+ vmuleuh 2, 3, 4
+# CHECK-BE: vmulosb 2, 3, 4 # encoding: [0x10,0x43,0x21,0x08]
+# CHECK-LE: vmulosb 2, 3, 4 # encoding: [0x08,0x21,0x43,0x10]
+ vmulosb 2, 3, 4
+# CHECK-BE: vmulosh 2, 3, 4 # encoding: [0x10,0x43,0x21,0x48]
+# CHECK-LE: vmulosh 2, 3, 4 # encoding: [0x48,0x21,0x43,0x10]
+ vmulosh 2, 3, 4
+# CHECK-BE: vmuloub 2, 3, 4 # encoding: [0x10,0x43,0x20,0x08]
+# CHECK-LE: vmuloub 2, 3, 4 # encoding: [0x08,0x20,0x43,0x10]
+ vmuloub 2, 3, 4
+# CHECK-BE: vmulouh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x48]
+# CHECK-LE: vmulouh 2, 3, 4 # encoding: [0x48,0x20,0x43,0x10]
+ vmulouh 2, 3, 4
+
+# CHECK-BE: vmhaddshs 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x60]
+# CHECK-LE: vmhaddshs 2, 3, 4, 5 # encoding: [0x60,0x21,0x43,0x10]
+ vmhaddshs 2, 3, 4, 5
+# CHECK-BE: vmhraddshs 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x61]
+# CHECK-LE: vmhraddshs 2, 3, 4, 5 # encoding: [0x61,0x21,0x43,0x10]
+ vmhraddshs 2, 3, 4, 5
+# CHECK-BE: vmladduhm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x62]
+# CHECK-LE: vmladduhm 2, 3, 4, 5 # encoding: [0x62,0x21,0x43,0x10]
+ vmladduhm 2, 3, 4, 5
+# CHECK-BE: vmsumubm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x64]
+# CHECK-LE: vmsumubm 2, 3, 4, 5 # encoding: [0x64,0x21,0x43,0x10]
+ vmsumubm 2, 3, 4, 5
+# CHECK-BE: vmsummbm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x65]
+# CHECK-LE: vmsummbm 2, 3, 4, 5 # encoding: [0x65,0x21,0x43,0x10]
+ vmsummbm 2, 3, 4, 5
+# CHECK-BE: vmsumshm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x68]
+# CHECK-LE: vmsumshm 2, 3, 4, 5 # encoding: [0x68,0x21,0x43,0x10]
+ vmsumshm 2, 3, 4, 5
+# CHECK-BE: vmsumshs 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x69]
+# CHECK-LE: vmsumshs 2, 3, 4, 5 # encoding: [0x69,0x21,0x43,0x10]
+ vmsumshs 2, 3, 4, 5
+# CHECK-BE: vmsumuhm 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x66]
+# CHECK-LE: vmsumuhm 2, 3, 4, 5 # encoding: [0x66,0x21,0x43,0x10]
+ vmsumuhm 2, 3, 4, 5
+# CHECK-BE: vmsumuhs 2, 3, 4, 5 # encoding: [0x10,0x43,0x21,0x67]
+# CHECK-LE: vmsumuhs 2, 3, 4, 5 # encoding: [0x67,0x21,0x43,0x10]
+ vmsumuhs 2, 3, 4, 5
+
+# CHECK-BE: vsumsws 2, 3, 4 # encoding: [0x10,0x43,0x27,0x88]
+# CHECK-LE: vsumsws 2, 3, 4 # encoding: [0x88,0x27,0x43,0x10]
+ vsumsws 2, 3, 4
+# CHECK-BE: vsum2sws 2, 3, 4 # encoding: [0x10,0x43,0x26,0x88]
+# CHECK-LE: vsum2sws 2, 3, 4 # encoding: [0x88,0x26,0x43,0x10]
+ vsum2sws 2, 3, 4
+# CHECK-BE: vsum4sbs 2, 3, 4 # encoding: [0x10,0x43,0x27,0x08]
+# CHECK-LE: vsum4sbs 2, 3, 4 # encoding: [0x08,0x27,0x43,0x10]
+ vsum4sbs 2, 3, 4
+# CHECK-BE: vsum4shs 2, 3, 4 # encoding: [0x10,0x43,0x26,0x48]
+# CHECK-LE: vsum4shs 2, 3, 4 # encoding: [0x48,0x26,0x43,0x10]
+ vsum4shs 2, 3, 4
+# CHECK-BE: vsum4ubs 2, 3, 4 # encoding: [0x10,0x43,0x26,0x08]
+# CHECK-LE: vsum4ubs 2, 3, 4 # encoding: [0x08,0x26,0x43,0x10]
+ vsum4ubs 2, 3, 4
+
+# CHECK-BE: vavgsb 2, 3, 4 # encoding: [0x10,0x43,0x25,0x02]
+# CHECK-LE: vavgsb 2, 3, 4 # encoding: [0x02,0x25,0x43,0x10]
+ vavgsb 2, 3, 4
+# CHECK-BE: vavgsh 2, 3, 4 # encoding: [0x10,0x43,0x25,0x42]
+# CHECK-LE: vavgsh 2, 3, 4 # encoding: [0x42,0x25,0x43,0x10]
+ vavgsh 2, 3, 4
+# CHECK-BE: vavgsw 2, 3, 4 # encoding: [0x10,0x43,0x25,0x82]
+# CHECK-LE: vavgsw 2, 3, 4 # encoding: [0x82,0x25,0x43,0x10]
+ vavgsw 2, 3, 4
+# CHECK-BE: vavgub 2, 3, 4 # encoding: [0x10,0x43,0x24,0x02]
+# CHECK-LE: vavgub 2, 3, 4 # encoding: [0x02,0x24,0x43,0x10]
+ vavgub 2, 3, 4
+# CHECK-BE: vavguh 2, 3, 4 # encoding: [0x10,0x43,0x24,0x42]
+# CHECK-LE: vavguh 2, 3, 4 # encoding: [0x42,0x24,0x43,0x10]
+ vavguh 2, 3, 4
+# CHECK-BE: vavguw 2, 3, 4 # encoding: [0x10,0x43,0x24,0x82]
+# CHECK-LE: vavguw 2, 3, 4 # encoding: [0x82,0x24,0x43,0x10]
+ vavguw 2, 3, 4
+
+# CHECK-BE: vmaxsb 2, 3, 4 # encoding: [0x10,0x43,0x21,0x02]
+# CHECK-LE: vmaxsb 2, 3, 4 # encoding: [0x02,0x21,0x43,0x10]
+ vmaxsb 2, 3, 4
+# CHECK-BE: vmaxsh 2, 3, 4 # encoding: [0x10,0x43,0x21,0x42]
+# CHECK-LE: vmaxsh 2, 3, 4 # encoding: [0x42,0x21,0x43,0x10]
+ vmaxsh 2, 3, 4
+# CHECK-BE: vmaxsw 2, 3, 4 # encoding: [0x10,0x43,0x21,0x82]
+# CHECK-LE: vmaxsw 2, 3, 4 # encoding: [0x82,0x21,0x43,0x10]
+ vmaxsw 2, 3, 4
+# CHECK-BE: vmaxub 2, 3, 4 # encoding: [0x10,0x43,0x20,0x02]
+# CHECK-LE: vmaxub 2, 3, 4 # encoding: [0x02,0x20,0x43,0x10]
+ vmaxub 2, 3, 4
+# CHECK-BE: vmaxuh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x42]
+# CHECK-LE: vmaxuh 2, 3, 4 # encoding: [0x42,0x20,0x43,0x10]
+ vmaxuh 2, 3, 4
+# CHECK-BE: vmaxuw 2, 3, 4 # encoding: [0x10,0x43,0x20,0x82]
+# CHECK-LE: vmaxuw 2, 3, 4 # encoding: [0x82,0x20,0x43,0x10]
+ vmaxuw 2, 3, 4
+
+# CHECK-BE: vminsb 2, 3, 4 # encoding: [0x10,0x43,0x23,0x02]
+# CHECK-LE: vminsb 2, 3, 4 # encoding: [0x02,0x23,0x43,0x10]
+ vminsb 2, 3, 4
+# CHECK-BE: vminsh 2, 3, 4 # encoding: [0x10,0x43,0x23,0x42]
+# CHECK-LE: vminsh 2, 3, 4 # encoding: [0x42,0x23,0x43,0x10]
+ vminsh 2, 3, 4
+# CHECK-BE: vminsw 2, 3, 4 # encoding: [0x10,0x43,0x23,0x82]
+# CHECK-LE: vminsw 2, 3, 4 # encoding: [0x82,0x23,0x43,0x10]
+ vminsw 2, 3, 4
+# CHECK-BE: vminub 2, 3, 4 # encoding: [0x10,0x43,0x22,0x02]
+# CHECK-LE: vminub 2, 3, 4 # encoding: [0x02,0x22,0x43,0x10]
+ vminub 2, 3, 4
+# CHECK-BE: vminuh 2, 3, 4 # encoding: [0x10,0x43,0x22,0x42]
+# CHECK-LE: vminuh 2, 3, 4 # encoding: [0x42,0x22,0x43,0x10]
+ vminuh 2, 3, 4
+# CHECK-BE: vminuw 2, 3, 4 # encoding: [0x10,0x43,0x22,0x82]
+# CHECK-LE: vminuw 2, 3, 4 # encoding: [0x82,0x22,0x43,0x10]
+ vminuw 2, 3, 4
# Vector integer compare instructions
-# CHECK: vcmpequb 2, 3, 4 # encoding: [0x10,0x43,0x20,0x06]
- vcmpequb 2, 3, 4
-# CHECK: vcmpequb. 2, 3, 4 # encoding: [0x10,0x43,0x24,0x06]
- vcmpequb. 2, 3, 4
-# CHECK: vcmpequh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x46]
- vcmpequh 2, 3, 4
-# CHECK: vcmpequh. 2, 3, 4 # encoding: [0x10,0x43,0x24,0x46]
- vcmpequh. 2, 3, 4
-# CHECK: vcmpequw 2, 3, 4 # encoding: [0x10,0x43,0x20,0x86]
- vcmpequw 2, 3, 4
-# CHECK: vcmpequw. 2, 3, 4 # encoding: [0x10,0x43,0x24,0x86]
- vcmpequw. 2, 3, 4
-# CHECK: vcmpgtsb 2, 3, 4 # encoding: [0x10,0x43,0x23,0x06]
- vcmpgtsb 2, 3, 4
-# CHECK: vcmpgtsb. 2, 3, 4 # encoding: [0x10,0x43,0x27,0x06]
- vcmpgtsb. 2, 3, 4
-# CHECK: vcmpgtsh 2, 3, 4 # encoding: [0x10,0x43,0x23,0x46]
- vcmpgtsh 2, 3, 4
-# CHECK: vcmpgtsh. 2, 3, 4 # encoding: [0x10,0x43,0x27,0x46]
- vcmpgtsh. 2, 3, 4
-# CHECK: vcmpgtsw 2, 3, 4 # encoding: [0x10,0x43,0x23,0x86]
- vcmpgtsw 2, 3, 4
-# CHECK: vcmpgtsw. 2, 3, 4 # encoding: [0x10,0x43,0x27,0x86]
- vcmpgtsw. 2, 3, 4
-# CHECK: vcmpgtub 2, 3, 4 # encoding: [0x10,0x43,0x22,0x06]
- vcmpgtub 2, 3, 4
-# CHECK: vcmpgtub. 2, 3, 4 # encoding: [0x10,0x43,0x26,0x06]
- vcmpgtub. 2, 3, 4
-# CHECK: vcmpgtuh 2, 3, 4 # encoding: [0x10,0x43,0x22,0x46]
- vcmpgtuh 2, 3, 4
-# CHECK: vcmpgtuh. 2, 3, 4 # encoding: [0x10,0x43,0x26,0x46]
- vcmpgtuh. 2, 3, 4
-# CHECK: vcmpgtuw 2, 3, 4 # encoding: [0x10,0x43,0x22,0x86]
- vcmpgtuw 2, 3, 4
-# CHECK: vcmpgtuw. 2, 3, 4 # encoding: [0x10,0x43,0x26,0x86]
- vcmpgtuw. 2, 3, 4
+# CHECK-BE: vcmpequb 2, 3, 4 # encoding: [0x10,0x43,0x20,0x06]
+# CHECK-LE: vcmpequb 2, 3, 4 # encoding: [0x06,0x20,0x43,0x10]
+ vcmpequb 2, 3, 4
+# CHECK-BE: vcmpequb. 2, 3, 4 # encoding: [0x10,0x43,0x24,0x06]
+# CHECK-LE: vcmpequb. 2, 3, 4 # encoding: [0x06,0x24,0x43,0x10]
+ vcmpequb. 2, 3, 4
+# CHECK-BE: vcmpequh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x46]
+# CHECK-LE: vcmpequh 2, 3, 4 # encoding: [0x46,0x20,0x43,0x10]
+ vcmpequh 2, 3, 4
+# CHECK-BE: vcmpequh. 2, 3, 4 # encoding: [0x10,0x43,0x24,0x46]
+# CHECK-LE: vcmpequh. 2, 3, 4 # encoding: [0x46,0x24,0x43,0x10]
+ vcmpequh. 2, 3, 4
+# CHECK-BE: vcmpequw 2, 3, 4 # encoding: [0x10,0x43,0x20,0x86]
+# CHECK-LE: vcmpequw 2, 3, 4 # encoding: [0x86,0x20,0x43,0x10]
+ vcmpequw 2, 3, 4
+# CHECK-BE: vcmpequw. 2, 3, 4 # encoding: [0x10,0x43,0x24,0x86]
+# CHECK-LE: vcmpequw. 2, 3, 4 # encoding: [0x86,0x24,0x43,0x10]
+ vcmpequw. 2, 3, 4
+# CHECK-BE: vcmpgtsb 2, 3, 4 # encoding: [0x10,0x43,0x23,0x06]
+# CHECK-LE: vcmpgtsb 2, 3, 4 # encoding: [0x06,0x23,0x43,0x10]
+ vcmpgtsb 2, 3, 4
+# CHECK-BE: vcmpgtsb. 2, 3, 4 # encoding: [0x10,0x43,0x27,0x06]
+# CHECK-LE: vcmpgtsb. 2, 3, 4 # encoding: [0x06,0x27,0x43,0x10]
+ vcmpgtsb. 2, 3, 4
+# CHECK-BE: vcmpgtsh 2, 3, 4 # encoding: [0x10,0x43,0x23,0x46]
+# CHECK-LE: vcmpgtsh 2, 3, 4 # encoding: [0x46,0x23,0x43,0x10]
+ vcmpgtsh 2, 3, 4
+# CHECK-BE: vcmpgtsh. 2, 3, 4 # encoding: [0x10,0x43,0x27,0x46]
+# CHECK-LE: vcmpgtsh. 2, 3, 4 # encoding: [0x46,0x27,0x43,0x10]
+ vcmpgtsh. 2, 3, 4
+# CHECK-BE: vcmpgtsw 2, 3, 4 # encoding: [0x10,0x43,0x23,0x86]
+# CHECK-LE: vcmpgtsw 2, 3, 4 # encoding: [0x86,0x23,0x43,0x10]
+ vcmpgtsw 2, 3, 4
+# CHECK-BE: vcmpgtsw. 2, 3, 4 # encoding: [0x10,0x43,0x27,0x86]
+# CHECK-LE: vcmpgtsw. 2, 3, 4 # encoding: [0x86,0x27,0x43,0x10]
+ vcmpgtsw. 2, 3, 4
+# CHECK-BE: vcmpgtub 2, 3, 4 # encoding: [0x10,0x43,0x22,0x06]
+# CHECK-LE: vcmpgtub 2, 3, 4 # encoding: [0x06,0x22,0x43,0x10]
+ vcmpgtub 2, 3, 4
+# CHECK-BE: vcmpgtub. 2, 3, 4 # encoding: [0x10,0x43,0x26,0x06]
+# CHECK-LE: vcmpgtub. 2, 3, 4 # encoding: [0x06,0x26,0x43,0x10]
+ vcmpgtub. 2, 3, 4
+# CHECK-BE: vcmpgtuh 2, 3, 4 # encoding: [0x10,0x43,0x22,0x46]
+# CHECK-LE: vcmpgtuh 2, 3, 4 # encoding: [0x46,0x22,0x43,0x10]
+ vcmpgtuh 2, 3, 4
+# CHECK-BE: vcmpgtuh. 2, 3, 4 # encoding: [0x10,0x43,0x26,0x46]
+# CHECK-LE: vcmpgtuh. 2, 3, 4 # encoding: [0x46,0x26,0x43,0x10]
+ vcmpgtuh. 2, 3, 4
+# CHECK-BE: vcmpgtuw 2, 3, 4 # encoding: [0x10,0x43,0x22,0x86]
+# CHECK-LE: vcmpgtuw 2, 3, 4 # encoding: [0x86,0x22,0x43,0x10]
+ vcmpgtuw 2, 3, 4
+# CHECK-BE: vcmpgtuw. 2, 3, 4 # encoding: [0x10,0x43,0x26,0x86]
+# CHECK-LE: vcmpgtuw. 2, 3, 4 # encoding: [0x86,0x26,0x43,0x10]
+ vcmpgtuw. 2, 3, 4
# Vector integer logical instructions
-# CHECK: vand 2, 3, 4 # encoding: [0x10,0x43,0x24,0x04]
- vand 2, 3, 4
-# CHECK: vandc 2, 3, 4 # encoding: [0x10,0x43,0x24,0x44]
- vandc 2, 3, 4
-# CHECK: vnor 2, 3, 4 # encoding: [0x10,0x43,0x25,0x04]
- vnor 2, 3, 4
-# CHECK: vor 2, 3, 4 # encoding: [0x10,0x43,0x24,0x84]
- vor 2, 3, 4
-# CHECK: vxor 2, 3, 4 # encoding: [0x10,0x43,0x24,0xc4]
- vxor 2, 3, 4
+# CHECK-BE: vand 2, 3, 4 # encoding: [0x10,0x43,0x24,0x04]
+# CHECK-LE: vand 2, 3, 4 # encoding: [0x04,0x24,0x43,0x10]
+ vand 2, 3, 4
+# CHECK-BE: vandc 2, 3, 4 # encoding: [0x10,0x43,0x24,0x44]
+# CHECK-LE: vandc 2, 3, 4 # encoding: [0x44,0x24,0x43,0x10]
+ vandc 2, 3, 4
+# CHECK-BE: vnor 2, 3, 4 # encoding: [0x10,0x43,0x25,0x04]
+# CHECK-LE: vnor 2, 3, 4 # encoding: [0x04,0x25,0x43,0x10]
+ vnor 2, 3, 4
+# CHECK-BE: vor 2, 3, 4 # encoding: [0x10,0x43,0x24,0x84]
+# CHECK-LE: vor 2, 3, 4 # encoding: [0x84,0x24,0x43,0x10]
+ vor 2, 3, 4
+# CHECK-BE: vxor 2, 3, 4 # encoding: [0x10,0x43,0x24,0xc4]
+# CHECK-LE: vxor 2, 3, 4 # encoding: [0xc4,0x24,0x43,0x10]
+ vxor 2, 3, 4
# Vector integer rotate and shift instructions
-# CHECK: vrlb 2, 3, 4 # encoding: [0x10,0x43,0x20,0x04]
- vrlb 2, 3, 4
-# CHECK: vrlh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x44]
- vrlh 2, 3, 4
-# CHECK: vrlw 2, 3, 4 # encoding: [0x10,0x43,0x20,0x84]
- vrlw 2, 3, 4
-
-# CHECK: vslb 2, 3, 4 # encoding: [0x10,0x43,0x21,0x04]
- vslb 2, 3, 4
-# CHECK: vslh 2, 3, 4 # encoding: [0x10,0x43,0x21,0x44]
- vslh 2, 3, 4
-# CHECK: vslw 2, 3, 4 # encoding: [0x10,0x43,0x21,0x84]
- vslw 2, 3, 4
-# CHECK: vsrb 2, 3, 4 # encoding: [0x10,0x43,0x22,0x04]
- vsrb 2, 3, 4
-# CHECK: vsrh 2, 3, 4 # encoding: [0x10,0x43,0x22,0x44]
- vsrh 2, 3, 4
-# CHECK: vsrw 2, 3, 4 # encoding: [0x10,0x43,0x22,0x84]
- vsrw 2, 3, 4
-# CHECK: vsrab 2, 3, 4 # encoding: [0x10,0x43,0x23,0x04]
- vsrab 2, 3, 4
-# CHECK: vsrah 2, 3, 4 # encoding: [0x10,0x43,0x23,0x44]
- vsrah 2, 3, 4
-# CHECK: vsraw 2, 3, 4 # encoding: [0x10,0x43,0x23,0x84]
- vsraw 2, 3, 4
+# CHECK-BE: vrlb 2, 3, 4 # encoding: [0x10,0x43,0x20,0x04]
+# CHECK-LE: vrlb 2, 3, 4 # encoding: [0x04,0x20,0x43,0x10]
+ vrlb 2, 3, 4
+# CHECK-BE: vrlh 2, 3, 4 # encoding: [0x10,0x43,0x20,0x44]
+# CHECK-LE: vrlh 2, 3, 4 # encoding: [0x44,0x20,0x43,0x10]
+ vrlh 2, 3, 4
+# CHECK-BE: vrlw 2, 3, 4 # encoding: [0x10,0x43,0x20,0x84]
+# CHECK-LE: vrlw 2, 3, 4 # encoding: [0x84,0x20,0x43,0x10]
+ vrlw 2, 3, 4
+
+# CHECK-BE: vslb 2, 3, 4 # encoding: [0x10,0x43,0x21,0x04]
+# CHECK-LE: vslb 2, 3, 4 # encoding: [0x04,0x21,0x43,0x10]
+ vslb 2, 3, 4
+# CHECK-BE: vslh 2, 3, 4 # encoding: [0x10,0x43,0x21,0x44]
+# CHECK-LE: vslh 2, 3, 4 # encoding: [0x44,0x21,0x43,0x10]
+ vslh 2, 3, 4
+# CHECK-BE: vslw 2, 3, 4 # encoding: [0x10,0x43,0x21,0x84]
+# CHECK-LE: vslw 2, 3, 4 # encoding: [0x84,0x21,0x43,0x10]
+ vslw 2, 3, 4
+# CHECK-BE: vsrb 2, 3, 4 # encoding: [0x10,0x43,0x22,0x04]
+# CHECK-LE: vsrb 2, 3, 4 # encoding: [0x04,0x22,0x43,0x10]
+ vsrb 2, 3, 4
+# CHECK-BE: vsrh 2, 3, 4 # encoding: [0x10,0x43,0x22,0x44]
+# CHECK-LE: vsrh 2, 3, 4 # encoding: [0x44,0x22,0x43,0x10]
+ vsrh 2, 3, 4
+# CHECK-BE: vsrw 2, 3, 4 # encoding: [0x10,0x43,0x22,0x84]
+# CHECK-LE: vsrw 2, 3, 4 # encoding: [0x84,0x22,0x43,0x10]
+ vsrw 2, 3, 4
+# CHECK-BE: vsrab 2, 3, 4 # encoding: [0x10,0x43,0x23,0x04]
+# CHECK-LE: vsrab 2, 3, 4 # encoding: [0x04,0x23,0x43,0x10]
+ vsrab 2, 3, 4
+# CHECK-BE: vsrah 2, 3, 4 # encoding: [0x10,0x43,0x23,0x44]
+# CHECK-LE: vsrah 2, 3, 4 # encoding: [0x44,0x23,0x43,0x10]
+ vsrah 2, 3, 4
+# CHECK-BE: vsraw 2, 3, 4 # encoding: [0x10,0x43,0x23,0x84]
+# CHECK-LE: vsraw 2, 3, 4 # encoding: [0x84,0x23,0x43,0x10]
+ vsraw 2, 3, 4
# Vector floating-point instructions
-# CHECK: vaddfp 2, 3, 4 # encoding: [0x10,0x43,0x20,0x0a]
- vaddfp 2, 3, 4
-# CHECK: vsubfp 2, 3, 4 # encoding: [0x10,0x43,0x20,0x4a]
- vsubfp 2, 3, 4
-# CHECK: vmaddfp 2, 3, 4, 5 # encoding: [0x10,0x43,0x29,0x2e]
- vmaddfp 2, 3, 4, 5
-# CHECK: vnmsubfp 2, 3, 4, 5 # encoding: [0x10,0x43,0x29,0x2f]
- vnmsubfp 2, 3, 4, 5
-
-# CHECK: vmaxfp 2, 3, 4 # encoding: [0x10,0x43,0x24,0x0a]
- vmaxfp 2, 3, 4
-# CHECK: vminfp 2, 3, 4 # encoding: [0x10,0x43,0x24,0x4a]
- vminfp 2, 3, 4
-
-# CHECK: vctsxs 2, 3, 4 # encoding: [0x10,0x44,0x1b,0xca]
- vctsxs 2, 3, 4
-# CHECK: vctuxs 2, 3, 4 # encoding: [0x10,0x44,0x1b,0x8a]
- vctuxs 2, 3, 4
-# CHECK: vcfsx 2, 3, 4 # encoding: [0x10,0x44,0x1b,0x4a]
- vcfsx 2, 3, 4
-# CHECK: vcfux 2, 3, 4 # encoding: [0x10,0x44,0x1b,0x0a]
- vcfux 2, 3, 4
-# CHECK: vrfim 2, 3 # encoding: [0x10,0x40,0x1a,0xca]
- vrfim 2, 3
-# CHECK: vrfin 2, 3 # encoding: [0x10,0x40,0x1a,0x0a]
- vrfin 2, 3
-# CHECK: vrfip 2, 3 # encoding: [0x10,0x40,0x1a,0x8a]
- vrfip 2, 3
-# CHECK: vrfiz 2, 3 # encoding: [0x10,0x40,0x1a,0x4a]
- vrfiz 2, 3
-
-# CHECK: vcmpbfp 2, 3, 4 # encoding: [0x10,0x43,0x23,0xc6]
- vcmpbfp 2, 3, 4
-# CHECK: vcmpbfp. 2, 3, 4 # encoding: [0x10,0x43,0x27,0xc6]
- vcmpbfp. 2, 3, 4
-# CHECK: vcmpeqfp 2, 3, 4 # encoding: [0x10,0x43,0x20,0xc6]
- vcmpeqfp 2, 3, 4
-# CHECK: vcmpeqfp. 2, 3, 4 # encoding: [0x10,0x43,0x24,0xc6]
- vcmpeqfp. 2, 3, 4
-# CHECK: vcmpgefp 2, 3, 4 # encoding: [0x10,0x43,0x21,0xc6]
- vcmpgefp 2, 3, 4
-# CHECK: vcmpgefp. 2, 3, 4 # encoding: [0x10,0x43,0x25,0xc6]
- vcmpgefp. 2, 3, 4
-# CHECK: vcmpgtfp 2, 3, 4 # encoding: [0x10,0x43,0x22,0xc6]
- vcmpgtfp 2, 3, 4
-# CHECK: vcmpgtfp. 2, 3, 4 # encoding: [0x10,0x43,0x26,0xc6]
- vcmpgtfp. 2, 3, 4
-
-# CHECK: vexptefp 2, 3 # encoding: [0x10,0x40,0x19,0x8a]
- vexptefp 2, 3
-# CHECK: vlogefp 2, 3 # encoding: [0x10,0x40,0x19,0xca]
- vlogefp 2, 3
-# CHECK: vrefp 2, 3 # encoding: [0x10,0x40,0x19,0x0a]
- vrefp 2, 3
-# CHECK: vrsqrtefp 2, 3 # encoding: [0x10,0x40,0x19,0x4a]
- vrsqrtefp 2, 3
+# CHECK-BE: vaddfp 2, 3, 4 # encoding: [0x10,0x43,0x20,0x0a]
+# CHECK-LE: vaddfp 2, 3, 4 # encoding: [0x0a,0x20,0x43,0x10]
+ vaddfp 2, 3, 4
+# CHECK-BE: vsubfp 2, 3, 4 # encoding: [0x10,0x43,0x20,0x4a]
+# CHECK-LE: vsubfp 2, 3, 4 # encoding: [0x4a,0x20,0x43,0x10]
+ vsubfp 2, 3, 4
+# CHECK-BE: vmaddfp 2, 3, 4, 5 # encoding: [0x10,0x43,0x29,0x2e]
+# CHECK-LE: vmaddfp 2, 3, 4, 5 # encoding: [0x2e,0x29,0x43,0x10]
+ vmaddfp 2, 3, 4, 5
+# CHECK-BE: vnmsubfp 2, 3, 4, 5 # encoding: [0x10,0x43,0x29,0x2f]
+# CHECK-LE: vnmsubfp 2, 3, 4, 5 # encoding: [0x2f,0x29,0x43,0x10]
+ vnmsubfp 2, 3, 4, 5
+
+# CHECK-BE: vmaxfp 2, 3, 4 # encoding: [0x10,0x43,0x24,0x0a]
+# CHECK-LE: vmaxfp 2, 3, 4 # encoding: [0x0a,0x24,0x43,0x10]
+ vmaxfp 2, 3, 4
+# CHECK-BE: vminfp 2, 3, 4 # encoding: [0x10,0x43,0x24,0x4a]
+# CHECK-LE: vminfp 2, 3, 4 # encoding: [0x4a,0x24,0x43,0x10]
+ vminfp 2, 3, 4
+
+# CHECK-BE: vctsxs 2, 3, 4 # encoding: [0x10,0x44,0x1b,0xca]
+# CHECK-LE: vctsxs 2, 3, 4 # encoding: [0xca,0x1b,0x44,0x10]
+ vctsxs 2, 3, 4
+# CHECK-BE: vctuxs 2, 3, 4 # encoding: [0x10,0x44,0x1b,0x8a]
+# CHECK-LE: vctuxs 2, 3, 4 # encoding: [0x8a,0x1b,0x44,0x10]
+ vctuxs 2, 3, 4
+# CHECK-BE: vcfsx 2, 3, 4 # encoding: [0x10,0x44,0x1b,0x4a]
+# CHECK-LE: vcfsx 2, 3, 4 # encoding: [0x4a,0x1b,0x44,0x10]
+ vcfsx 2, 3, 4
+# CHECK-BE: vcfux 2, 3, 4 # encoding: [0x10,0x44,0x1b,0x0a]
+# CHECK-LE: vcfux 2, 3, 4 # encoding: [0x0a,0x1b,0x44,0x10]
+ vcfux 2, 3, 4
+# CHECK-BE: vrfim 2, 3 # encoding: [0x10,0x40,0x1a,0xca]
+# CHECK-LE: vrfim 2, 3 # encoding: [0xca,0x1a,0x40,0x10]
+ vrfim 2, 3
+# CHECK-BE: vrfin 2, 3 # encoding: [0x10,0x40,0x1a,0x0a]
+# CHECK-LE: vrfin 2, 3 # encoding: [0x0a,0x1a,0x40,0x10]
+ vrfin 2, 3
+# CHECK-BE: vrfip 2, 3 # encoding: [0x10,0x40,0x1a,0x8a]
+# CHECK-LE: vrfip 2, 3 # encoding: [0x8a,0x1a,0x40,0x10]
+ vrfip 2, 3
+# CHECK-BE: vrfiz 2, 3 # encoding: [0x10,0x40,0x1a,0x4a]
+# CHECK-LE: vrfiz 2, 3 # encoding: [0x4a,0x1a,0x40,0x10]
+ vrfiz 2, 3
+
+# CHECK-BE: vcmpbfp 2, 3, 4 # encoding: [0x10,0x43,0x23,0xc6]
+# CHECK-LE: vcmpbfp 2, 3, 4 # encoding: [0xc6,0x23,0x43,0x10]
+ vcmpbfp 2, 3, 4
+# CHECK-BE: vcmpbfp. 2, 3, 4 # encoding: [0x10,0x43,0x27,0xc6]
+# CHECK-LE: vcmpbfp. 2, 3, 4 # encoding: [0xc6,0x27,0x43,0x10]
+ vcmpbfp. 2, 3, 4
+# CHECK-BE: vcmpeqfp 2, 3, 4 # encoding: [0x10,0x43,0x20,0xc6]
+# CHECK-LE: vcmpeqfp 2, 3, 4 # encoding: [0xc6,0x20,0x43,0x10]
+ vcmpeqfp 2, 3, 4
+# CHECK-BE: vcmpeqfp. 2, 3, 4 # encoding: [0x10,0x43,0x24,0xc6]
+# CHECK-LE: vcmpeqfp. 2, 3, 4 # encoding: [0xc6,0x24,0x43,0x10]
+ vcmpeqfp. 2, 3, 4
+# CHECK-BE: vcmpgefp 2, 3, 4 # encoding: [0x10,0x43,0x21,0xc6]
+# CHECK-LE: vcmpgefp 2, 3, 4 # encoding: [0xc6,0x21,0x43,0x10]
+ vcmpgefp 2, 3, 4
+# CHECK-BE: vcmpgefp. 2, 3, 4 # encoding: [0x10,0x43,0x25,0xc6]
+# CHECK-LE: vcmpgefp. 2, 3, 4 # encoding: [0xc6,0x25,0x43,0x10]
+ vcmpgefp. 2, 3, 4
+# CHECK-BE: vcmpgtfp 2, 3, 4 # encoding: [0x10,0x43,0x22,0xc6]
+# CHECK-LE: vcmpgtfp 2, 3, 4 # encoding: [0xc6,0x22,0x43,0x10]
+ vcmpgtfp 2, 3, 4
+# CHECK-BE: vcmpgtfp. 2, 3, 4 # encoding: [0x10,0x43,0x26,0xc6]
+# CHECK-LE: vcmpgtfp. 2, 3, 4 # encoding: [0xc6,0x26,0x43,0x10]
+ vcmpgtfp. 2, 3, 4
+
+# CHECK-BE: vexptefp 2, 3 # encoding: [0x10,0x40,0x19,0x8a]
+# CHECK-LE: vexptefp 2, 3 # encoding: [0x8a,0x19,0x40,0x10]
+ vexptefp 2, 3
+# CHECK-BE: vlogefp 2, 3 # encoding: [0x10,0x40,0x19,0xca]
+# CHECK-LE: vlogefp 2, 3 # encoding: [0xca,0x19,0x40,0x10]
+ vlogefp 2, 3
+# CHECK-BE: vrefp 2, 3 # encoding: [0x10,0x40,0x19,0x0a]
+# CHECK-LE: vrefp 2, 3 # encoding: [0x0a,0x19,0x40,0x10]
+ vrefp 2, 3
+# CHECK-BE: vrsqrtefp 2, 3 # encoding: [0x10,0x40,0x19,0x4a]
+# CHECK-LE: vrsqrtefp 2, 3 # encoding: [0x4a,0x19,0x40,0x10]
+ vrsqrtefp 2, 3
# Vector status and control register instructions
-# CHECK: mtvscr 2 # encoding: [0x10,0x00,0x16,0x44]
- mtvscr 2
-# CHECK: mfvscr 2 # encoding: [0x10,0x40,0x06,0x04]
- mfvscr 2
+# CHECK-BE: mtvscr 2 # encoding: [0x10,0x00,0x16,0x44]
+# CHECK-LE: mtvscr 2 # encoding: [0x44,0x16,0x00,0x10]
+ mtvscr 2
+# CHECK-BE: mfvscr 2 # encoding: [0x10,0x40,0x06,0x04]
+# CHECK-LE: mfvscr 2 # encoding: [0x04,0x06,0x40,0x10]
+ mfvscr 2
diff --git a/test/MC/PowerPC/ppc64-encoding.s b/test/MC/PowerPC/ppc64-encoding.s
index d82d86fd0102..4c3530d9c628 100644
--- a/test/MC/PowerPC/ppc64-encoding.s
+++ b/test/MC/PowerPC/ppc64-encoding.s
@@ -1,551 +1,769 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
# Branch facility
# Branch instructions
-# CHECK: b target # encoding: [0b010010AA,A,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
- b target
-# CHECK: ba target # encoding: [0b010010AA,A,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
- ba target
-# CHECK: bl target # encoding: [0b010010AA,A,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
- bl target
-# CHECK: bla target # encoding: [0b010010AA,A,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
- bla target
-
-# CHECK: bc 4, 10, target # encoding: [0x40,0x8a,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bc 4, 10, target
-# CHECK: bca 4, 10, target # encoding: [0x40,0x8a,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bca 4, 10, target
-# CHECK: bcl 4, 10, target # encoding: [0x40,0x8a,A,0bAAAAAA01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
- bcl 4, 10, target
-# CHECK: bcla 4, 10, target # encoding: [0x40,0x8a,A,0bAAAAAA11]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
- bcla 4, 10, target
-
-# CHECK: bclr 4, 10, 3 # encoding: [0x4c,0x8a,0x18,0x20]
- bclr 4, 10, 3
-# CHECK: bclr 4, 10, 0 # encoding: [0x4c,0x8a,0x00,0x20]
- bclr 4, 10
-# CHECK: bclrl 4, 10, 3 # encoding: [0x4c,0x8a,0x18,0x21]
- bclrl 4, 10, 3
-# CHECK: bclrl 4, 10, 0 # encoding: [0x4c,0x8a,0x00,0x21]
- bclrl 4, 10
-# CHECK: bcctr 4, 10, 3 # encoding: [0x4c,0x8a,0x1c,0x20]
- bcctr 4, 10, 3
-# CHECK: bcctr 4, 10, 0 # encoding: [0x4c,0x8a,0x04,0x20]
- bcctr 4, 10
-# CHECK: bcctrl 4, 10, 3 # encoding: [0x4c,0x8a,0x1c,0x21]
- bcctrl 4, 10, 3
-# CHECK: bcctrl 4, 10, 0 # encoding: [0x4c,0x8a,0x04,0x21]
- bcctrl 4, 10
+# CHECK-BE: b target # encoding: [0b010010AA,A,A,0bAAAAAA00]
+# CHECK-LE: b target # encoding: [0bAAAAAA00,A,A,0b010010AA]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
+ b target
+# CHECK-BE: ba target # encoding: [0b010010AA,A,A,0bAAAAAA10]
+# CHECK-LE: ba target # encoding: [0bAAAAAA10,A,A,0b010010AA]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
+ ba target
+# CHECK-BE: bl target # encoding: [0b010010AA,A,A,0bAAAAAA01]
+# CHECK-LE: bl target # encoding: [0bAAAAAA01,A,A,0b010010AA]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
+ bl target
+# CHECK-BE: bla target # encoding: [0b010010AA,A,A,0bAAAAAA11]
+# CHECK-LE: bla target # encoding: [0bAAAAAA11,A,A,0b010010AA]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
+ bla target
+
+# CHECK-BE: bc 4, 10, target # encoding: [0x40,0x8a,A,0bAAAAAA00]
+# CHECK-LE: bc 4, 10, target # encoding: [0bAAAAAA00,A,0x8a,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bc 4, 10, target
+# CHECK-BE: bca 4, 10, target # encoding: [0x40,0x8a,A,0bAAAAAA10]
+# CHECK-LE: bca 4, 10, target # encoding: [0bAAAAAA10,A,0x8a,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bca 4, 10, target
+# CHECK-BE: bcl 4, 10, target # encoding: [0x40,0x8a,A,0bAAAAAA01]
+# CHECK-LE: bcl 4, 10, target # encoding: [0bAAAAAA01,A,0x8a,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+ bcl 4, 10, target
+# CHECK-BE: bcla 4, 10, target # encoding: [0x40,0x8a,A,0bAAAAAA11]
+# CHECK-LE: bcla 4, 10, target # encoding: [0bAAAAAA11,A,0x8a,0x40]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+ bcla 4, 10, target
+
+# CHECK-BE: bclr 4, 10, 3 # encoding: [0x4c,0x8a,0x18,0x20]
+# CHECK-LE: bclr 4, 10, 3 # encoding: [0x20,0x18,0x8a,0x4c]
+ bclr 4, 10, 3
+# CHECK-BE: bclr 4, 10, 0 # encoding: [0x4c,0x8a,0x00,0x20]
+# CHECK-LE: bclr 4, 10, 0 # encoding: [0x20,0x00,0x8a,0x4c]
+ bclr 4, 10
+# CHECK-BE: bclrl 4, 10, 3 # encoding: [0x4c,0x8a,0x18,0x21]
+# CHECK-LE: bclrl 4, 10, 3 # encoding: [0x21,0x18,0x8a,0x4c]
+ bclrl 4, 10, 3
+# CHECK-BE: bclrl 4, 10, 0 # encoding: [0x4c,0x8a,0x00,0x21]
+# CHECK-LE: bclrl 4, 10, 0 # encoding: [0x21,0x00,0x8a,0x4c]
+ bclrl 4, 10
+# CHECK-BE: bcctr 4, 10, 3 # encoding: [0x4c,0x8a,0x1c,0x20]
+# CHECK-LE: bcctr 4, 10, 3 # encoding: [0x20,0x1c,0x8a,0x4c]
+ bcctr 4, 10, 3
+# CHECK-BE: bcctr 4, 10, 0 # encoding: [0x4c,0x8a,0x04,0x20]
+# CHECK-LE: bcctr 4, 10, 0 # encoding: [0x20,0x04,0x8a,0x4c]
+ bcctr 4, 10
+# CHECK-BE: bcctrl 4, 10, 3 # encoding: [0x4c,0x8a,0x1c,0x21]
+# CHECK-LE: bcctrl 4, 10, 3 # encoding: [0x21,0x1c,0x8a,0x4c]
+ bcctrl 4, 10, 3
+# CHECK-BE: bcctrl 4, 10, 0 # encoding: [0x4c,0x8a,0x04,0x21]
+# CHECK-LE: bcctrl 4, 10, 0 # encoding: [0x21,0x04,0x8a,0x4c]
+ bcctrl 4, 10
# Condition register instructions
-# CHECK: crand 2, 3, 4 # encoding: [0x4c,0x43,0x22,0x02]
- crand 2, 3, 4
-# CHECK: crnand 2, 3, 4 # encoding: [0x4c,0x43,0x21,0xc2]
- crnand 2, 3, 4
-# CHECK: cror 2, 3, 4 # encoding: [0x4c,0x43,0x23,0x82]
- cror 2, 3, 4
-# CHECK: crxor 2, 3, 4 # encoding: [0x4c,0x43,0x21,0x82]
- crxor 2, 3, 4
-# CHECK: crnor 2, 3, 4 # encoding: [0x4c,0x43,0x20,0x42]
- crnor 2, 3, 4
-# CHECK: creqv 2, 3, 4 # encoding: [0x4c,0x43,0x22,0x42]
- creqv 2, 3, 4
-# CHECK: crandc 2, 3, 4 # encoding: [0x4c,0x43,0x21,0x02]
- crandc 2, 3, 4
-# CHECK: crorc 2, 3, 4 # encoding: [0x4c,0x43,0x23,0x42]
- crorc 2, 3, 4
-# CHECK: mcrf 2, 3 # encoding: [0x4d,0x0c,0x00,0x00]
- mcrf 2, 3
+# CHECK-BE: crand 2, 3, 4 # encoding: [0x4c,0x43,0x22,0x02]
+# CHECK-LE: crand 2, 3, 4 # encoding: [0x02,0x22,0x43,0x4c]
+ crand 2, 3, 4
+# CHECK-BE: crnand 2, 3, 4 # encoding: [0x4c,0x43,0x21,0xc2]
+# CHECK-LE: crnand 2, 3, 4 # encoding: [0xc2,0x21,0x43,0x4c]
+ crnand 2, 3, 4
+# CHECK-BE: cror 2, 3, 4 # encoding: [0x4c,0x43,0x23,0x82]
+# CHECK-LE: cror 2, 3, 4 # encoding: [0x82,0x23,0x43,0x4c]
+ cror 2, 3, 4
+# CHECK-BE: crxor 2, 3, 4 # encoding: [0x4c,0x43,0x21,0x82]
+# CHECK-LE: crxor 2, 3, 4 # encoding: [0x82,0x21,0x43,0x4c]
+ crxor 2, 3, 4
+# CHECK-BE: crnor 2, 3, 4 # encoding: [0x4c,0x43,0x20,0x42]
+# CHECK-LE: crnor 2, 3, 4 # encoding: [0x42,0x20,0x43,0x4c]
+ crnor 2, 3, 4
+# CHECK-BE: creqv 2, 3, 4 # encoding: [0x4c,0x43,0x22,0x42]
+# CHECK-LE: creqv 2, 3, 4 # encoding: [0x42,0x22,0x43,0x4c]
+ creqv 2, 3, 4
+# CHECK-BE: crandc 2, 3, 4 # encoding: [0x4c,0x43,0x21,0x02]
+# CHECK-LE: crandc 2, 3, 4 # encoding: [0x02,0x21,0x43,0x4c]
+ crandc 2, 3, 4
+# CHECK-BE: crorc 2, 3, 4 # encoding: [0x4c,0x43,0x23,0x42]
+# CHECK-LE: crorc 2, 3, 4 # encoding: [0x42,0x23,0x43,0x4c]
+ crorc 2, 3, 4
+# CHECK-BE: mcrf 2, 3 # encoding: [0x4d,0x0c,0x00,0x00]
+# CHECK-LE: mcrf 2, 3 # encoding: [0x00,0x00,0x0c,0x4d]
+ mcrf 2, 3
# System call instruction
-# CHECK: sc 1 # encoding: [0x44,0x00,0x00,0x22]
- sc 1
-# CHECK: sc 0 # encoding: [0x44,0x00,0x00,0x02]
- sc
+# CHECK-BE: sc 1 # encoding: [0x44,0x00,0x00,0x22]
+# CHECK-LE: sc 1 # encoding: [0x22,0x00,0x00,0x44]
+ sc 1
+# CHECK-BE: sc 0 # encoding: [0x44,0x00,0x00,0x02]
+# CHECK-LE: sc 0 # encoding: [0x02,0x00,0x00,0x44]
+ sc
# Fixed-point facility
# Fixed-point load instructions
-# CHECK: lbz 2, 128(4) # encoding: [0x88,0x44,0x00,0x80]
- lbz 2, 128(4)
-# CHECK: lbzx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0xae]
- lbzx 2, 3, 4
-# CHECK: lbzu 2, 128(4) # encoding: [0x8c,0x44,0x00,0x80]
- lbzu 2, 128(4)
-# CHECK: lbzux 2, 3, 4 # encoding: [0x7c,0x43,0x20,0xee]
- lbzux 2, 3, 4
-# CHECK: lhz 2, 128(4) # encoding: [0xa0,0x44,0x00,0x80]
- lhz 2, 128(4)
-# CHECK: lhzx 2, 3, 4 # encoding: [0x7c,0x43,0x22,0x2e]
- lhzx 2, 3, 4
-# CHECK: lhzu 2, 128(4) # encoding: [0xa4,0x44,0x00,0x80]
- lhzu 2, 128(4)
-# CHECK: lhzux 2, 3, 4 # encoding: [0x7c,0x43,0x22,0x6e]
- lhzux 2, 3, 4
-# CHECK: lha 2, 128(4) # encoding: [0xa8,0x44,0x00,0x80]
- lha 2, 128(4)
-# CHECK: lhax 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xae]
- lhax 2, 3, 4
-# CHECK: lhau 2, 128(4) # encoding: [0xac,0x44,0x00,0x80]
- lhau 2, 128(4)
-# CHECK: lhaux 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xee]
- lhaux 2, 3, 4
-# CHECK: lwz 2, 128(4) # encoding: [0x80,0x44,0x00,0x80]
- lwz 2, 128(4)
-# CHECK: lwzx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x2e]
- lwzx 2, 3, 4
-# CHECK: lwzu 2, 128(4) # encoding: [0x84,0x44,0x00,0x80]
- lwzu 2, 128(4)
-# CHECK: lwzux 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x6e]
- lwzux 2, 3, 4
-# CHECK: lwa 2, 128(4) # encoding: [0xe8,0x44,0x00,0x82]
- lwa 2, 128(4)
-# CHECK: lwax 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xaa]
- lwax 2, 3, 4
-# CHECK: lwaux 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xea]
- lwaux 2, 3, 4
-# CHECK: ld 2, 128(4) # encoding: [0xe8,0x44,0x00,0x80]
- ld 2, 128(4)
-# CHECK: ldx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x2a]
- ldx 2, 3, 4
-# CHECK: ldu 2, 128(4) # encoding: [0xe8,0x44,0x00,0x81]
- ldu 2, 128(4)
-# CHECK: ldux 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x6a]
- ldux 2, 3, 4
+# CHECK-BE: lbz 2, 128(4) # encoding: [0x88,0x44,0x00,0x80]
+# CHECK-LE: lbz 2, 128(4) # encoding: [0x80,0x00,0x44,0x88]
+ lbz 2, 128(4)
+# CHECK-BE: lbzx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0xae]
+# CHECK-LE: lbzx 2, 3, 4 # encoding: [0xae,0x20,0x43,0x7c]
+ lbzx 2, 3, 4
+# CHECK-BE: lbzu 2, 128(4) # encoding: [0x8c,0x44,0x00,0x80]
+# CHECK-LE: lbzu 2, 128(4) # encoding: [0x80,0x00,0x44,0x8c]
+ lbzu 2, 128(4)
+# CHECK-BE: lbzux 2, 3, 4 # encoding: [0x7c,0x43,0x20,0xee]
+# CHECK-LE: lbzux 2, 3, 4 # encoding: [0xee,0x20,0x43,0x7c]
+ lbzux 2, 3, 4
+# CHECK-BE: lhz 2, 128(4) # encoding: [0xa0,0x44,0x00,0x80]
+# CHECK-LE: lhz 2, 128(4) # encoding: [0x80,0x00,0x44,0xa0]
+ lhz 2, 128(4)
+# CHECK-BE: lhzx 2, 3, 4 # encoding: [0x7c,0x43,0x22,0x2e]
+# CHECK-LE: lhzx 2, 3, 4 # encoding: [0x2e,0x22,0x43,0x7c]
+ lhzx 2, 3, 4
+# CHECK-BE: lhzu 2, 128(4) # encoding: [0xa4,0x44,0x00,0x80]
+# CHECK-LE: lhzu 2, 128(4) # encoding: [0x80,0x00,0x44,0xa4]
+ lhzu 2, 128(4)
+# CHECK-BE: lhzux 2, 3, 4 # encoding: [0x7c,0x43,0x22,0x6e]
+# CHECK-LE: lhzux 2, 3, 4 # encoding: [0x6e,0x22,0x43,0x7c]
+ lhzux 2, 3, 4
+# CHECK-BE: lha 2, 128(4) # encoding: [0xa8,0x44,0x00,0x80]
+# CHECK-LE: lha 2, 128(4) # encoding: [0x80,0x00,0x44,0xa8]
+ lha 2, 128(4)
+# CHECK-BE: lhax 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xae]
+# CHECK-LE: lhax 2, 3, 4 # encoding: [0xae,0x22,0x43,0x7c]
+ lhax 2, 3, 4
+# CHECK-BE: lhau 2, 128(4) # encoding: [0xac,0x44,0x00,0x80]
+# CHECK-LE: lhau 2, 128(4) # encoding: [0x80,0x00,0x44,0xac]
+ lhau 2, 128(4)
+# CHECK-BE: lhaux 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xee]
+# CHECK-LE: lhaux 2, 3, 4 # encoding: [0xee,0x22,0x43,0x7c]
+ lhaux 2, 3, 4
+# CHECK-BE: lwz 2, 128(4) # encoding: [0x80,0x44,0x00,0x80]
+# CHECK-LE: lwz 2, 128(4) # encoding: [0x80,0x00,0x44,0x80]
+ lwz 2, 128(4)
+# CHECK-BE: lwzx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x2e]
+# CHECK-LE: lwzx 2, 3, 4 # encoding: [0x2e,0x20,0x43,0x7c]
+ lwzx 2, 3, 4
+# CHECK-BE: lwzu 2, 128(4) # encoding: [0x84,0x44,0x00,0x80]
+# CHECK-LE: lwzu 2, 128(4) # encoding: [0x80,0x00,0x44,0x84]
+ lwzu 2, 128(4)
+# CHECK-BE: lwzux 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x6e]
+# CHECK-LE: lwzux 2, 3, 4 # encoding: [0x6e,0x20,0x43,0x7c]
+ lwzux 2, 3, 4
+# CHECK-BE: lwa 2, 128(4) # encoding: [0xe8,0x44,0x00,0x82]
+# CHECK-LE: lwa 2, 128(4) # encoding: [0x82,0x00,0x44,0xe8]
+ lwa 2, 128(4)
+# CHECK-BE: lwax 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xaa]
+# CHECK-LE: lwax 2, 3, 4 # encoding: [0xaa,0x22,0x43,0x7c]
+ lwax 2, 3, 4
+# CHECK-BE: lwaux 2, 3, 4 # encoding: [0x7c,0x43,0x22,0xea]
+# CHECK-LE: lwaux 2, 3, 4 # encoding: [0xea,0x22,0x43,0x7c]
+ lwaux 2, 3, 4
+# CHECK-BE: ld 2, 128(4) # encoding: [0xe8,0x44,0x00,0x80]
+# CHECK-LE: ld 2, 128(4) # encoding: [0x80,0x00,0x44,0xe8]
+ ld 2, 128(4)
+# CHECK-BE: ldx 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x2a]
+# CHECK-LE: ldx 2, 3, 4 # encoding: [0x2a,0x20,0x43,0x7c]
+ ldx 2, 3, 4
+# CHECK-BE: ldu 2, 128(4) # encoding: [0xe8,0x44,0x00,0x81]
+# CHECK-LE: ldu 2, 128(4) # encoding: [0x81,0x00,0x44,0xe8]
+ ldu 2, 128(4)
+# CHECK-BE: ldux 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x6a]
+# CHECK-LE: ldux 2, 3, 4 # encoding: [0x6a,0x20,0x43,0x7c]
+ ldux 2, 3, 4
# Fixed-point store instructions
-# CHECK: stb 2, 128(4) # encoding: [0x98,0x44,0x00,0x80]
- stb 2, 128(4)
-# CHECK: stbx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xae]
- stbx 2, 3, 4
-# CHECK: stbu 2, 128(4) # encoding: [0x9c,0x44,0x00,0x80]
- stbu 2, 128(4)
-# CHECK: stbux 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xee]
- stbux 2, 3, 4
-# CHECK: sth 2, 128(4) # encoding: [0xb0,0x44,0x00,0x80]
- sth 2, 128(4)
-# CHECK: sthx 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x2e]
- sthx 2, 3, 4
-# CHECK: sthu 2, 128(4) # encoding: [0xb4,0x44,0x00,0x80]
- sthu 2, 128(4)
-# CHECK: sthux 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x6e]
- sthux 2, 3, 4
-# CHECK: stw 2, 128(4) # encoding: [0x90,0x44,0x00,0x80]
- stw 2, 128(4)
-# CHECK: stwx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x2e]
- stwx 2, 3, 4
-# CHECK: stwu 2, 128(4) # encoding: [0x94,0x44,0x00,0x80]
- stwu 2, 128(4)
-# CHECK: stwux 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x6e]
- stwux 2, 3, 4
-# CHECK: std 2, 128(4) # encoding: [0xf8,0x44,0x00,0x80]
- std 2, 128(4)
-# CHECK: stdx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x2a]
- stdx 2, 3, 4
-# CHECK: stdu 2, 128(4) # encoding: [0xf8,0x44,0x00,0x81]
- stdu 2, 128(4)
-# CHECK: stdux 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x6a]
- stdux 2, 3, 4
+# CHECK-BE: stb 2, 128(4) # encoding: [0x98,0x44,0x00,0x80]
+# CHECK-LE: stb 2, 128(4) # encoding: [0x80,0x00,0x44,0x98]
+ stb 2, 128(4)
+# CHECK-BE: stbx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xae]
+# CHECK-LE: stbx 2, 3, 4 # encoding: [0xae,0x21,0x43,0x7c]
+ stbx 2, 3, 4
+# CHECK-BE: stbu 2, 128(4) # encoding: [0x9c,0x44,0x00,0x80]
+# CHECK-LE: stbu 2, 128(4) # encoding: [0x80,0x00,0x44,0x9c]
+ stbu 2, 128(4)
+# CHECK-BE: stbux 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xee]
+# CHECK-LE: stbux 2, 3, 4 # encoding: [0xee,0x21,0x43,0x7c]
+ stbux 2, 3, 4
+# CHECK-BE: sth 2, 128(4) # encoding: [0xb0,0x44,0x00,0x80]
+# CHECK-LE: sth 2, 128(4) # encoding: [0x80,0x00,0x44,0xb0]
+ sth 2, 128(4)
+# CHECK-BE: sthx 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x2e]
+# CHECK-LE: sthx 2, 3, 4 # encoding: [0x2e,0x23,0x43,0x7c]
+ sthx 2, 3, 4
+# CHECK-BE: sthu 2, 128(4) # encoding: [0xb4,0x44,0x00,0x80]
+# CHECK-LE: sthu 2, 128(4) # encoding: [0x80,0x00,0x44,0xb4]
+ sthu 2, 128(4)
+# CHECK-BE: sthux 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x6e]
+# CHECK-LE: sthux 2, 3, 4 # encoding: [0x6e,0x23,0x43,0x7c]
+ sthux 2, 3, 4
+# CHECK-BE: stw 2, 128(4) # encoding: [0x90,0x44,0x00,0x80]
+# CHECK-LE: stw 2, 128(4) # encoding: [0x80,0x00,0x44,0x90]
+ stw 2, 128(4)
+# CHECK-BE: stwx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x2e]
+# CHECK-LE: stwx 2, 3, 4 # encoding: [0x2e,0x21,0x43,0x7c]
+ stwx 2, 3, 4
+# CHECK-BE: stwu 2, 128(4) # encoding: [0x94,0x44,0x00,0x80]
+# CHECK-LE: stwu 2, 128(4) # encoding: [0x80,0x00,0x44,0x94]
+ stwu 2, 128(4)
+# CHECK-BE: stwux 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x6e]
+# CHECK-LE: stwux 2, 3, 4 # encoding: [0x6e,0x21,0x43,0x7c]
+ stwux 2, 3, 4
+# CHECK-BE: std 2, 128(4) # encoding: [0xf8,0x44,0x00,0x80]
+# CHECK-LE: std 2, 128(4) # encoding: [0x80,0x00,0x44,0xf8]
+ std 2, 128(4)
+# CHECK-BE: stdx 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x2a]
+# CHECK-LE: stdx 2, 3, 4 # encoding: [0x2a,0x21,0x43,0x7c]
+ stdx 2, 3, 4
+# CHECK-BE: stdu 2, 128(4) # encoding: [0xf8,0x44,0x00,0x81]
+# CHECK-LE: stdu 2, 128(4) # encoding: [0x81,0x00,0x44,0xf8]
+ stdu 2, 128(4)
+# CHECK-BE: stdux 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x6a]
+# CHECK-LE: stdux 2, 3, 4 # encoding: [0x6a,0x21,0x43,0x7c]
+ stdux 2, 3, 4
# Fixed-point load and store with byte reversal instructions
-# CHECK: lhbrx 2, 3, 4 # encoding: [0x7c,0x43,0x26,0x2c]
- lhbrx 2, 3, 4
-# CHECK: sthbrx 2, 3, 4 # encoding: [0x7c,0x43,0x27,0x2c]
- sthbrx 2, 3, 4
-# CHECK: lwbrx 2, 3, 4 # encoding: [0x7c,0x43,0x24,0x2c]
- lwbrx 2, 3, 4
-# CHECK: stwbrx 2, 3, 4 # encoding: [0x7c,0x43,0x25,0x2c]
- stwbrx 2, 3, 4
-# CHECK: ldbrx 2, 3, 4 # encoding: [0x7c,0x43,0x24,0x28]
- ldbrx 2, 3, 4
-# CHECK: stdbrx 2, 3, 4 # encoding: [0x7c,0x43,0x25,0x28]
- stdbrx 2, 3, 4
+# CHECK-BE: lhbrx 2, 3, 4 # encoding: [0x7c,0x43,0x26,0x2c]
+# CHECK-LE: lhbrx 2, 3, 4 # encoding: [0x2c,0x26,0x43,0x7c]
+ lhbrx 2, 3, 4
+# CHECK-BE: sthbrx 2, 3, 4 # encoding: [0x7c,0x43,0x27,0x2c]
+# CHECK-LE: sthbrx 2, 3, 4 # encoding: [0x2c,0x27,0x43,0x7c]
+ sthbrx 2, 3, 4
+# CHECK-BE: lwbrx 2, 3, 4 # encoding: [0x7c,0x43,0x24,0x2c]
+# CHECK-LE: lwbrx 2, 3, 4 # encoding: [0x2c,0x24,0x43,0x7c]
+ lwbrx 2, 3, 4
+# CHECK-BE: stwbrx 2, 3, 4 # encoding: [0x7c,0x43,0x25,0x2c]
+# CHECK-LE: stwbrx 2, 3, 4 # encoding: [0x2c,0x25,0x43,0x7c]
+ stwbrx 2, 3, 4
+# CHECK-BE: ldbrx 2, 3, 4 # encoding: [0x7c,0x43,0x24,0x28]
+# CHECK-LE: ldbrx 2, 3, 4 # encoding: [0x28,0x24,0x43,0x7c]
+ ldbrx 2, 3, 4
+# CHECK-BE: stdbrx 2, 3, 4 # encoding: [0x7c,0x43,0x25,0x28]
+# CHECK-LE: stdbrx 2, 3, 4 # encoding: [0x28,0x25,0x43,0x7c]
+ stdbrx 2, 3, 4
# Fixed-point load and store multiple instructions
-# CHECK: lmw 2, 128(1) # encoding: [0xb8,0x41,0x00,0x80]
- lmw 2, 128(1)
-# CHECK: stmw 2, 128(1) # encoding: [0xbc,0x41,0x00,0x80]
- stmw 2, 128(1)
+# CHECK-BE: lmw 2, 128(1) # encoding: [0xb8,0x41,0x00,0x80]
+# CHECK-LE: lmw 2, 128(1) # encoding: [0x80,0x00,0x41,0xb8]
+ lmw 2, 128(1)
+# CHECK-BE: stmw 2, 128(1) # encoding: [0xbc,0x41,0x00,0x80]
+# CHECK-LE: stmw 2, 128(1) # encoding: [0x80,0x00,0x41,0xbc]
+ stmw 2, 128(1)
# FIXME: Fixed-point move assist instructions
# Fixed-point arithmetic instructions
-# CHECK: addi 2, 3, 128 # encoding: [0x38,0x43,0x00,0x80]
- addi 2, 3, 128
-# CHECK: addis 2, 3, 128 # encoding: [0x3c,0x43,0x00,0x80]
- addis 2, 3, 128
-# CHECK: add 2, 3, 4 # encoding: [0x7c,0x43,0x22,0x14]
- add 2, 3, 4
-# CHECK: add. 2, 3, 4 # encoding: [0x7c,0x43,0x22,0x15]
- add. 2, 3, 4
-# FIXME: addo 2, 3, 4
-# FIXME: addo. 2, 3, 4
-# CHECK: subf 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x50]
- subf 2, 3, 4
-# CHECK: subf. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x51]
- subf. 2, 3, 4
-# FIXME: subfo 2, 3, 4
-# FIXME: subfo. 2, 3, 4
-# CHECK: addic 2, 3, 128 # encoding: [0x30,0x43,0x00,0x80]
- addic 2, 3, 128
-# CHECK: addic. 2, 3, 128 # encoding: [0x34,0x43,0x00,0x80]
- addic. 2, 3, 128
-# CHECK: subfic 2, 3, 4 # encoding: [0x20,0x43,0x00,0x04]
- subfic 2, 3, 4
-
-# CHECK: addc 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x14]
- addc 2, 3, 4
-# CHECK: addc. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x15]
- addc. 2, 3, 4
-# FIXME: addco 2, 3, 4
-# FIXME: addco. 2, 3, 4
-# CHECK: subfc 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x10]
- subfc 2, 3, 4
-# CHECK: subfc 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x10]
- subfc 2, 3, 4
-# FIXME: subfco 2, 3, 4
-# FIXME: subfco. 2, 3, 4
-
-# CHECK: adde 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x14]
- adde 2, 3, 4
-# CHECK: adde. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x15]
- adde. 2, 3, 4
-# FIXME: addeo 2, 3, 4
-# FIXME: addeo. 2, 3, 4
-# CHECK: subfe 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x10]
- subfe 2, 3, 4
-# CHECK: subfe. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x11]
- subfe. 2, 3, 4
-# FIXME: subfeo 2, 3, 4
-# FIXME: subfeo. 2, 3, 4
-
-# CHECK: addme 2, 3 # encoding: [0x7c,0x43,0x01,0xd4]
- addme 2, 3
-# CHECK: addme. 2, 3 # encoding: [0x7c,0x43,0x01,0xd5]
- addme. 2, 3
-# FIXME: addmeo 2, 3
-# FIXME: addmeo. 2, 3
-# CHECK: subfme 2, 3 # encoding: [0x7c,0x43,0x01,0xd0]
- subfme 2, 3
-# CHECK: subfme. 2, 3 # encoding: [0x7c,0x43,0x01,0xd1]
- subfme. 2, 3
-# FIXME: subfmeo 2, 3
-# FIXME: subfmeo. 2, 3
-
-# CHECK: addze 2, 3 # encoding: [0x7c,0x43,0x01,0x94]
- addze 2, 3
-# CHECK: addze. 2, 3 # encoding: [0x7c,0x43,0x01,0x95]
- addze. 2, 3
-# FIXME: addzeo 2, 3
-# FIXME: addzeo. 2, 3
-# CHECK: subfze 2, 3 # encoding: [0x7c,0x43,0x01,0x90]
- subfze 2, 3
-# CHECK: subfze. 2, 3 # encoding: [0x7c,0x43,0x01,0x91]
- subfze. 2, 3
-# FIXME: subfzeo 2, 3
-# FIXME: subfzeo. 2, 3
-
-# CHECK: neg 2, 3 # encoding: [0x7c,0x43,0x00,0xd0]
- neg 2, 3
-# CHECK: neg. 2, 3 # encoding: [0x7c,0x43,0x00,0xd1]
- neg. 2, 3
-# FIXME: nego 2, 3
-# FIXME: nego. 2, 3
-
-# CHECK: mulli 2, 3, 128 # encoding: [0x1c,0x43,0x00,0x80]
- mulli 2, 3, 128
-# CHECK: mulhw 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x96]
- mulhw 2, 3, 4
-# CHECK: mulhw. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x97]
- mulhw. 2, 3, 4
-# CHECK: mullw 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xd6]
- mullw 2, 3, 4
-# CHECK: mullw. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xd7]
- mullw. 2, 3, 4
-# FIXME: mullwo 2, 3, 4
-# FIXME: mullwo. 2, 3, 4
-# CHECK: mulhwu 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x16]
- mulhwu 2, 3, 4
-# CHECK: mulhwu. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x17]
- mulhwu. 2, 3, 4
-
-# CHECK: divw 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xd6]
- divw 2, 3, 4
-# CHECK: divw. 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xd7]
- divw. 2, 3, 4
-# FIXME: divwo 2, 3, 4
-# FIXME: divwo. 2, 3, 4
-# CHECK: divwu 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x96]
- divwu 2, 3, 4
-# CHECK: divwu. 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x97]
- divwu. 2, 3, 4
-# FIXME: divwuo 2, 3, 4
-# FIXME: divwuo. 2, 3, 4
-# FIXME: divwe 2, 3, 4
-# FIXME: divwe. 2, 3, 4
-# FIXME: divweo 2, 3, 4
-# FIXME: divweo. 2, 3, 4
-# FIXME: divweu 2, 3, 4
-# FIXME: divweu. 2, 3, 4
-# FIXME: divweuo 2, 3, 4
-# FIXME: divweuo. 2, 3, 4
-
-# CHECK: mulld 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xd2]
- mulld 2, 3, 4
-# CHECK: mulld. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xd3]
- mulld. 2, 3, 4
-# FIXME: mulldo 2, 3, 4
-# FIXME: mulldo. 2, 3, 4
-# CHECK: mulhd 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x92]
- mulhd 2, 3, 4
-# CHECK: mulhd. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x93]
- mulhd. 2, 3, 4
-# CHECK: mulhdu 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x12]
- mulhdu 2, 3, 4
-# CHECK: mulhdu. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x13]
- mulhdu. 2, 3, 4
-
-# CHECK: divd 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xd2]
- divd 2, 3, 4
-# CHECK: divd. 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xd3]
- divd. 2, 3, 4
-# FIXME: divdo 2, 3, 4
-# FIXME: divdo. 2, 3, 4
-# CHECK: divdu 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x92]
- divdu 2, 3, 4
-# CHECK: divdu. 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x93]
- divdu. 2, 3, 4
-# FIXME: divduo 2, 3, 4
-# FIXME: divduo. 2, 3, 4
-# FIXME: divde 2, 3, 4
-# FIXME: divde. 2, 3, 4
-# FIXME: divdeo 2, 3, 4
-# FIXME: divdeo. 2, 3, 4
-# FIXME: divdeu 2, 3, 4
-# FIXME: divdeu. 2, 3, 4
-# FIXME: divdeuo 2, 3, 4
-# FIXME: divdeuo. 2, 3, 4
+# CHECK-BE: addi 2, 3, 128 # encoding: [0x38,0x43,0x00,0x80]
+# CHECK-LE: addi 2, 3, 128 # encoding: [0x80,0x00,0x43,0x38]
+ addi 2, 3, 128
+# CHECK-BE: addis 2, 3, 128 # encoding: [0x3c,0x43,0x00,0x80]
+# CHECK-LE: addis 2, 3, 128 # encoding: [0x80,0x00,0x43,0x3c]
+ addis 2, 3, 128
+# CHECK-BE: add 2, 3, 4 # encoding: [0x7c,0x43,0x22,0x14]
+# CHECK-LE: add 2, 3, 4 # encoding: [0x14,0x22,0x43,0x7c]
+ add 2, 3, 4
+# CHECK-BE: add. 2, 3, 4 # encoding: [0x7c,0x43,0x22,0x15]
+# CHECK-LE: add. 2, 3, 4 # encoding: [0x15,0x22,0x43,0x7c]
+ add. 2, 3, 4
+# FIXME: addo 2, 3, 4
+# FIXME: addo. 2, 3, 4
+# CHECK-BE: subf 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x50]
+# CHECK-LE: subf 2, 3, 4 # encoding: [0x50,0x20,0x43,0x7c]
+ subf 2, 3, 4
+# CHECK-BE: subf. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x51]
+# CHECK-LE: subf. 2, 3, 4 # encoding: [0x51,0x20,0x43,0x7c]
+ subf. 2, 3, 4
+# FIXME: subfo 2, 3, 4
+# FIXME: subfo. 2, 3, 4
+# CHECK-BE: addic 2, 3, 128 # encoding: [0x30,0x43,0x00,0x80]
+# CHECK-LE: addic 2, 3, 128 # encoding: [0x80,0x00,0x43,0x30]
+ addic 2, 3, 128
+# CHECK-BE: addic. 2, 3, 128 # encoding: [0x34,0x43,0x00,0x80]
+# CHECK-LE: addic. 2, 3, 128 # encoding: [0x80,0x00,0x43,0x34]
+ addic. 2, 3, 128
+# CHECK-BE: subfic 2, 3, 4 # encoding: [0x20,0x43,0x00,0x04]
+# CHECK-LE: subfic 2, 3, 4 # encoding: [0x04,0x00,0x43,0x20]
+ subfic 2, 3, 4
+
+# CHECK-BE: addc 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x14]
+# CHECK-LE: addc 2, 3, 4 # encoding: [0x14,0x20,0x43,0x7c]
+ addc 2, 3, 4
+# CHECK-BE: addc. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x15]
+# CHECK-LE: addc. 2, 3, 4 # encoding: [0x15,0x20,0x43,0x7c]
+ addc. 2, 3, 4
+# FIXME: addco 2, 3, 4
+# FIXME: addco. 2, 3, 4
+# CHECK-BE: subfc 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x10]
+# CHECK-LE: subfc 2, 3, 4 # encoding: [0x10,0x20,0x43,0x7c]
+ subfc 2, 3, 4
+# CHECK-BE: subfc 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x10]
+# CHECK-LE: subfc 2, 3, 4 # encoding: [0x10,0x20,0x43,0x7c]
+ subfc 2, 3, 4
+# FIXME: subfco 2, 3, 4
+# FIXME: subfco. 2, 3, 4
+
+# CHECK-BE: adde 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x14]
+# CHECK-LE: adde 2, 3, 4 # encoding: [0x14,0x21,0x43,0x7c]
+ adde 2, 3, 4
+# CHECK-BE: adde. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x15]
+# CHECK-LE: adde. 2, 3, 4 # encoding: [0x15,0x21,0x43,0x7c]
+ adde. 2, 3, 4
+# FIXME: addeo 2, 3, 4
+# FIXME: addeo. 2, 3, 4
+# CHECK-BE: subfe 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x10]
+# CHECK-LE: subfe 2, 3, 4 # encoding: [0x10,0x21,0x43,0x7c]
+ subfe 2, 3, 4
+# CHECK-BE: subfe. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0x11]
+# CHECK-LE: subfe. 2, 3, 4 # encoding: [0x11,0x21,0x43,0x7c]
+ subfe. 2, 3, 4
+# FIXME: subfeo 2, 3, 4
+# FIXME: subfeo. 2, 3, 4
+
+# CHECK-BE: addme 2, 3 # encoding: [0x7c,0x43,0x01,0xd4]
+# CHECK-LE: addme 2, 3 # encoding: [0xd4,0x01,0x43,0x7c]
+ addme 2, 3
+# CHECK-BE: addme. 2, 3 # encoding: [0x7c,0x43,0x01,0xd5]
+# CHECK-LE: addme. 2, 3 # encoding: [0xd5,0x01,0x43,0x7c]
+ addme. 2, 3
+# FIXME: addmeo 2, 3
+# FIXME: addmeo. 2, 3
+# CHECK-BE: subfme 2, 3 # encoding: [0x7c,0x43,0x01,0xd0]
+# CHECK-LE: subfme 2, 3 # encoding: [0xd0,0x01,0x43,0x7c]
+ subfme 2, 3
+# CHECK-BE: subfme. 2, 3 # encoding: [0x7c,0x43,0x01,0xd1]
+# CHECK-LE: subfme. 2, 3 # encoding: [0xd1,0x01,0x43,0x7c]
+ subfme. 2, 3
+# FIXME: subfmeo 2, 3
+# FIXME: subfmeo. 2, 3
+
+# CHECK-BE: addze 2, 3 # encoding: [0x7c,0x43,0x01,0x94]
+# CHECK-LE: addze 2, 3 # encoding: [0x94,0x01,0x43,0x7c]
+ addze 2, 3
+# CHECK-BE: addze. 2, 3 # encoding: [0x7c,0x43,0x01,0x95]
+# CHECK-LE: addze. 2, 3 # encoding: [0x95,0x01,0x43,0x7c]
+ addze. 2, 3
+# FIXME: addzeo 2, 3
+# FIXME: addzeo. 2, 3
+# CHECK-BE: subfze 2, 3 # encoding: [0x7c,0x43,0x01,0x90]
+# CHECK-LE: subfze 2, 3 # encoding: [0x90,0x01,0x43,0x7c]
+ subfze 2, 3
+# CHECK-BE: subfze. 2, 3 # encoding: [0x7c,0x43,0x01,0x91]
+# CHECK-LE: subfze. 2, 3 # encoding: [0x91,0x01,0x43,0x7c]
+ subfze. 2, 3
+# FIXME: subfzeo 2, 3
+# FIXME: subfzeo. 2, 3
+
+# CHECK-BE: neg 2, 3 # encoding: [0x7c,0x43,0x00,0xd0]
+# CHECK-LE: neg 2, 3 # encoding: [0xd0,0x00,0x43,0x7c]
+ neg 2, 3
+# CHECK-BE: neg. 2, 3 # encoding: [0x7c,0x43,0x00,0xd1]
+# CHECK-LE: neg. 2, 3 # encoding: [0xd1,0x00,0x43,0x7c]
+ neg. 2, 3
+# FIXME: nego 2, 3
+# FIXME: nego. 2, 3
+
+# CHECK-BE: mulli 2, 3, 128 # encoding: [0x1c,0x43,0x00,0x80]
+# CHECK-LE: mulli 2, 3, 128 # encoding: [0x80,0x00,0x43,0x1c]
+ mulli 2, 3, 128
+# CHECK-BE: mulhw 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x96]
+# CHECK-LE: mulhw 2, 3, 4 # encoding: [0x96,0x20,0x43,0x7c]
+ mulhw 2, 3, 4
+# CHECK-BE: mulhw. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x97]
+# CHECK-LE: mulhw. 2, 3, 4 # encoding: [0x97,0x20,0x43,0x7c]
+ mulhw. 2, 3, 4
+# CHECK-BE: mullw 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xd6]
+# CHECK-LE: mullw 2, 3, 4 # encoding: [0xd6,0x21,0x43,0x7c]
+ mullw 2, 3, 4
+# CHECK-BE: mullw. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xd7]
+# CHECK-LE: mullw. 2, 3, 4 # encoding: [0xd7,0x21,0x43,0x7c]
+ mullw. 2, 3, 4
+# FIXME: mullwo 2, 3, 4
+# FIXME: mullwo. 2, 3, 4
+# CHECK-BE: mulhwu 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x16]
+# CHECK-LE: mulhwu 2, 3, 4 # encoding: [0x16,0x20,0x43,0x7c]
+ mulhwu 2, 3, 4
+# CHECK-BE: mulhwu. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x17]
+# CHECK-LE: mulhwu. 2, 3, 4 # encoding: [0x17,0x20,0x43,0x7c]
+ mulhwu. 2, 3, 4
+
+# CHECK-BE: divw 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xd6]
+# CHECK-LE: divw 2, 3, 4 # encoding: [0xd6,0x23,0x43,0x7c]
+ divw 2, 3, 4
+# CHECK-BE: divw. 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xd7]
+# CHECK-LE: divw. 2, 3, 4 # encoding: [0xd7,0x23,0x43,0x7c]
+ divw. 2, 3, 4
+# FIXME: divwo 2, 3, 4
+# FIXME: divwo. 2, 3, 4
+# CHECK-BE: divwu 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x96]
+# CHECK-LE: divwu 2, 3, 4 # encoding: [0x96,0x23,0x43,0x7c]
+ divwu 2, 3, 4
+# CHECK-BE: divwu. 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x97]
+# CHECK-LE: divwu. 2, 3, 4 # encoding: [0x97,0x23,0x43,0x7c]
+ divwu. 2, 3, 4
+# FIXME: divwuo 2, 3, 4
+# FIXME: divwuo. 2, 3, 4
+# FIXME: divwe 2, 3, 4
+# FIXME: divwe. 2, 3, 4
+# FIXME: divweo 2, 3, 4
+# FIXME: divweo. 2, 3, 4
+# FIXME: divweu 2, 3, 4
+# FIXME: divweu. 2, 3, 4
+# FIXME: divweuo 2, 3, 4
+# FIXME: divweuo. 2, 3, 4
+
+# CHECK-BE: mulld 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xd2]
+# CHECK-LE: mulld 2, 3, 4 # encoding: [0xd2,0x21,0x43,0x7c]
+ mulld 2, 3, 4
+# CHECK-BE: mulld. 2, 3, 4 # encoding: [0x7c,0x43,0x21,0xd3]
+# CHECK-LE: mulld. 2, 3, 4 # encoding: [0xd3,0x21,0x43,0x7c]
+ mulld. 2, 3, 4
+# FIXME: mulldo 2, 3, 4
+# FIXME: mulldo. 2, 3, 4
+# CHECK-BE: mulhd 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x92]
+# CHECK-LE: mulhd 2, 3, 4 # encoding: [0x92,0x20,0x43,0x7c]
+ mulhd 2, 3, 4
+# CHECK-BE: mulhd. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x93]
+# CHECK-LE: mulhd. 2, 3, 4 # encoding: [0x93,0x20,0x43,0x7c]
+ mulhd. 2, 3, 4
+# CHECK-BE: mulhdu 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x12]
+# CHECK-LE: mulhdu 2, 3, 4 # encoding: [0x12,0x20,0x43,0x7c]
+ mulhdu 2, 3, 4
+# CHECK-BE: mulhdu. 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x13]
+# CHECK-LE: mulhdu. 2, 3, 4 # encoding: [0x13,0x20,0x43,0x7c]
+ mulhdu. 2, 3, 4
+
+# CHECK-BE: divd 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xd2]
+# CHECK-LE: divd 2, 3, 4 # encoding: [0xd2,0x23,0x43,0x7c]
+ divd 2, 3, 4
+# CHECK-BE: divd. 2, 3, 4 # encoding: [0x7c,0x43,0x23,0xd3]
+# CHECK-LE: divd. 2, 3, 4 # encoding: [0xd3,0x23,0x43,0x7c]
+ divd. 2, 3, 4
+# FIXME: divdo 2, 3, 4
+# FIXME: divdo. 2, 3, 4
+# CHECK-BE: divdu 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x92]
+# CHECK-LE: divdu 2, 3, 4 # encoding: [0x92,0x23,0x43,0x7c]
+ divdu 2, 3, 4
+# CHECK-BE: divdu. 2, 3, 4 # encoding: [0x7c,0x43,0x23,0x93]
+# CHECK-LE: divdu. 2, 3, 4 # encoding: [0x93,0x23,0x43,0x7c]
+ divdu. 2, 3, 4
+# FIXME: divduo 2, 3, 4
+# FIXME: divduo. 2, 3, 4
+# FIXME: divde 2, 3, 4
+# FIXME: divde. 2, 3, 4
+# FIXME: divdeo 2, 3, 4
+# FIXME: divdeo. 2, 3, 4
+# FIXME: divdeu 2, 3, 4
+# FIXME: divdeu. 2, 3, 4
+# FIXME: divdeuo 2, 3, 4
+# FIXME: divdeuo. 2, 3, 4
# Fixed-point compare instructions
-# CHECK: cmpdi 2, 3, 128 # encoding: [0x2d,0x23,0x00,0x80]
- cmpi 2, 1, 3, 128
-# CHECK: cmpd 2, 3, 4 # encoding: [0x7d,0x23,0x20,0x00]
- cmp 2, 1, 3, 4
-# CHECK: cmpldi 2, 3, 128 # encoding: [0x29,0x23,0x00,0x80]
- cmpli 2, 1, 3, 128
-# CHECK: cmpld 2, 3, 4 # encoding: [0x7d,0x23,0x20,0x40]
- cmpl 2, 1, 3, 4
-
-# CHECK: cmpwi 2, 3, 128 # encoding: [0x2d,0x03,0x00,0x80]
- cmpi 2, 0, 3, 128
-# CHECK: cmpw 2, 3, 4 # encoding: [0x7d,0x03,0x20,0x00]
- cmp 2, 0, 3, 4
-# CHECK: cmplwi 2, 3, 128 # encoding: [0x29,0x03,0x00,0x80]
- cmpli 2, 0, 3, 128
-# CHECK: cmplw 2, 3, 4 # encoding: [0x7d,0x03,0x20,0x40]
- cmpl 2, 0, 3, 4
+# CHECK-BE: cmpdi 2, 3, 128 # encoding: [0x2d,0x23,0x00,0x80]
+# CHECK-LE: cmpdi 2, 3, 128 # encoding: [0x80,0x00,0x23,0x2d]
+ cmpi 2, 1, 3, 128
+# CHECK-BE: cmpd 2, 3, 4 # encoding: [0x7d,0x23,0x20,0x00]
+# CHECK-LE: cmpd 2, 3, 4 # encoding: [0x00,0x20,0x23,0x7d]
+ cmp 2, 1, 3, 4
+# CHECK-BE: cmpldi 2, 3, 128 # encoding: [0x29,0x23,0x00,0x80]
+# CHECK-LE: cmpldi 2, 3, 128 # encoding: [0x80,0x00,0x23,0x29]
+ cmpli 2, 1, 3, 128
+# CHECK-BE: cmpld 2, 3, 4 # encoding: [0x7d,0x23,0x20,0x40]
+# CHECK-LE: cmpld 2, 3, 4 # encoding: [0x40,0x20,0x23,0x7d]
+ cmpl 2, 1, 3, 4
+
+# CHECK-BE: cmpwi 2, 3, 128 # encoding: [0x2d,0x03,0x00,0x80]
+# CHECK-LE: cmpwi 2, 3, 128 # encoding: [0x80,0x00,0x03,0x2d]
+ cmpi 2, 0, 3, 128
+# CHECK-BE: cmpw 2, 3, 4 # encoding: [0x7d,0x03,0x20,0x00]
+# CHECK-LE: cmpw 2, 3, 4 # encoding: [0x00,0x20,0x03,0x7d]
+ cmp 2, 0, 3, 4
+# CHECK-BE: cmplwi 2, 3, 128 # encoding: [0x29,0x03,0x00,0x80]
+# CHECK-LE: cmplwi 2, 3, 128 # encoding: [0x80,0x00,0x03,0x29]
+ cmpli 2, 0, 3, 128
+# CHECK-BE: cmplw 2, 3, 4 # encoding: [0x7d,0x03,0x20,0x40]
+# CHECK-LE: cmplw 2, 3, 4 # encoding: [0x40,0x20,0x03,0x7d]
+ cmpl 2, 0, 3, 4
# Fixed-point trap instructions
-# CHECK: twi 2, 3, 4 # encoding: [0x0c,0x43,0x00,0x04]
- twi 2, 3, 4
-# CHECK: tw 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x08]
- tw 2, 3, 4
-# CHECK: tdi 2, 3, 4 # encoding: [0x08,0x43,0x00,0x04]
- tdi 2, 3, 4
-# CHECK: td 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x88]
- td 2, 3, 4
+# CHECK-BE: twi 2, 3, 4 # encoding: [0x0c,0x43,0x00,0x04]
+# CHECK-LE: twi 2, 3, 4 # encoding: [0x04,0x00,0x43,0x0c]
+ twi 2, 3, 4
+# CHECK-BE: tw 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x08]
+# CHECK-LE: tw 2, 3, 4 # encoding: [0x08,0x20,0x43,0x7c]
+ tw 2, 3, 4
+# CHECK-BE: tdi 2, 3, 4 # encoding: [0x08,0x43,0x00,0x04]
+# CHECK-LE: tdi 2, 3, 4 # encoding: [0x04,0x00,0x43,0x08]
+ tdi 2, 3, 4
+# CHECK-BE: td 2, 3, 4 # encoding: [0x7c,0x43,0x20,0x88]
+# CHECK-LE: td 2, 3, 4 # encoding: [0x88,0x20,0x43,0x7c]
+ td 2, 3, 4
# Fixed-point select
-# CHECK: isel 2, 3, 4, 5 # encoding: [0x7c,0x43,0x21,0x5e]
- isel 2, 3, 4, 5
+# CHECK-BE: isel 2, 3, 4, 5 # encoding: [0x7c,0x43,0x21,0x5e]
+# CHECK-LE: isel 2, 3, 4, 5 # encoding: [0x5e,0x21,0x43,0x7c]
+ isel 2, 3, 4, 5
# Fixed-point logical instructions
-# CHECK: andi. 2, 3, 128 # encoding: [0x70,0x62,0x00,0x80]
- andi. 2, 3, 128
-# CHECK: andis. 2, 3, 128 # encoding: [0x74,0x62,0x00,0x80]
- andis. 2, 3, 128
-# CHECK: ori 2, 3, 128 # encoding: [0x60,0x62,0x00,0x80]
- ori 2, 3, 128
-# CHECK: oris 2, 3, 128 # encoding: [0x64,0x62,0x00,0x80]
- oris 2, 3, 128
-# CHECK: xori 2, 3, 128 # encoding: [0x68,0x62,0x00,0x80]
- xori 2, 3, 128
-# CHECK: xoris 2, 3, 128 # encoding: [0x6c,0x62,0x00,0x80]
- xoris 2, 3, 128
-# CHECK: and 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x38]
- and 2, 3, 4
-# CHECK: and. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x39]
- and. 2, 3, 4
-# CHECK: xor 2, 3, 4 # encoding: [0x7c,0x62,0x22,0x78]
- xor 2, 3, 4
-# CHECK: xor. 2, 3, 4 # encoding: [0x7c,0x62,0x22,0x79]
- xor. 2, 3, 4
-# CHECK: nand 2, 3, 4 # encoding: [0x7c,0x62,0x23,0xb8]
- nand 2, 3, 4
-# CHECK: nand. 2, 3, 4 # encoding: [0x7c,0x62,0x23,0xb9]
- nand. 2, 3, 4
-# CHECK: or 2, 3, 4 # encoding: [0x7c,0x62,0x23,0x78]
- or 2, 3, 4
-# CHECK: or. 2, 3, 4 # encoding: [0x7c,0x62,0x23,0x79]
- or. 2, 3, 4
-# CHECK: nor 2, 3, 4 # encoding: [0x7c,0x62,0x20,0xf8]
- nor 2, 3, 4
-# CHECK: nor. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0xf9]
- nor. 2, 3, 4
-# CHECK: eqv 2, 3, 4 # encoding: [0x7c,0x62,0x22,0x38]
- eqv 2, 3, 4
-# CHECK: eqv. 2, 3, 4 # encoding: [0x7c,0x62,0x22,0x39]
- eqv. 2, 3, 4
-# CHECK: andc 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x78]
- andc 2, 3, 4
-# CHECK: andc. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x79]
- andc. 2, 3, 4
-# CHECK: orc 2, 3, 4 # encoding: [0x7c,0x62,0x23,0x38]
- orc 2, 3, 4
-# CHECK: orc. 2, 3, 4 # encoding: [0x7c,0x62,0x23,0x39]
- orc. 2, 3, 4
-
-# CHECK: extsb 2, 3 # encoding: [0x7c,0x62,0x07,0x74]
- extsb 2, 3
-# CHECK: extsb. 2, 3 # encoding: [0x7c,0x62,0x07,0x75]
- extsb. 2, 3
-# CHECK: extsh 2, 3 # encoding: [0x7c,0x62,0x07,0x34]
- extsh 2, 3
-# CHECK: extsh. 2, 3 # encoding: [0x7c,0x62,0x07,0x35]
- extsh. 2, 3
-
-# CHECK: cntlzw 2, 3 # encoding: [0x7c,0x62,0x00,0x34]
- cntlzw 2, 3
-# CHECK: cntlzw. 2, 3 # encoding: [0x7c,0x62,0x00,0x35]
- cntlzw. 2, 3
-# FIXME: cmpb 2, 3, 4
-# FIXME: popcntb 2, 3
-# CHECK: popcntw 2, 3 # encoding: [0x7c,0x62,0x02,0xf4]
- popcntw 2, 3
-# FIXME: prtyd 2, 3
-# FIXME: prtyw 2, 3
-
-# CHECK: extsw 2, 3 # encoding: [0x7c,0x62,0x07,0xb4]
- extsw 2, 3
-# CHECK: extsw. 2, 3 # encoding: [0x7c,0x62,0x07,0xb5]
- extsw. 2, 3
-
-# CHECK: cntlzd 2, 3 # encoding: [0x7c,0x62,0x00,0x74]
- cntlzd 2, 3
-# CHECK: cntlzd. 2, 3 # encoding: [0x7c,0x62,0x00,0x75]
- cntlzd. 2, 3
-# CHECK: popcntd 2, 3 # encoding: [0x7c,0x62,0x03,0xf4]
- popcntd 2, 3
-# FIXME: bpermd 2, 3, 4
+# CHECK-BE: andi. 2, 3, 128 # encoding: [0x70,0x62,0x00,0x80]
+# CHECK-LE: andi. 2, 3, 128 # encoding: [0x80,0x00,0x62,0x70]
+ andi. 2, 3, 128
+# CHECK-BE: andis. 2, 3, 128 # encoding: [0x74,0x62,0x00,0x80]
+# CHECK-LE: andis. 2, 3, 128 # encoding: [0x80,0x00,0x62,0x74]
+ andis. 2, 3, 128
+# CHECK-BE: ori 2, 3, 128 # encoding: [0x60,0x62,0x00,0x80]
+# CHECK-LE: ori 2, 3, 128 # encoding: [0x80,0x00,0x62,0x60]
+ ori 2, 3, 128
+# CHECK-BE: oris 2, 3, 128 # encoding: [0x64,0x62,0x00,0x80]
+# CHECK-LE: oris 2, 3, 128 # encoding: [0x80,0x00,0x62,0x64]
+ oris 2, 3, 128
+# CHECK-BE: xori 2, 3, 128 # encoding: [0x68,0x62,0x00,0x80]
+# CHECK-LE: xori 2, 3, 128 # encoding: [0x80,0x00,0x62,0x68]
+ xori 2, 3, 128
+# CHECK-BE: xoris 2, 3, 128 # encoding: [0x6c,0x62,0x00,0x80]
+# CHECK-LE: xoris 2, 3, 128 # encoding: [0x80,0x00,0x62,0x6c]
+ xoris 2, 3, 128
+# CHECK-BE: and 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x38]
+# CHECK-LE: and 2, 3, 4 # encoding: [0x38,0x20,0x62,0x7c]
+ and 2, 3, 4
+# CHECK-BE: and. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x39]
+# CHECK-LE: and. 2, 3, 4 # encoding: [0x39,0x20,0x62,0x7c]
+ and. 2, 3, 4
+# CHECK-BE: xor 2, 3, 4 # encoding: [0x7c,0x62,0x22,0x78]
+# CHECK-LE: xor 2, 3, 4 # encoding: [0x78,0x22,0x62,0x7c]
+ xor 2, 3, 4
+# CHECK-BE: xor. 2, 3, 4 # encoding: [0x7c,0x62,0x22,0x79]
+# CHECK-LE: xor. 2, 3, 4 # encoding: [0x79,0x22,0x62,0x7c]
+ xor. 2, 3, 4
+# CHECK-BE: nand 2, 3, 4 # encoding: [0x7c,0x62,0x23,0xb8]
+# CHECK-LE: nand 2, 3, 4 # encoding: [0xb8,0x23,0x62,0x7c]
+ nand 2, 3, 4
+# CHECK-BE: nand. 2, 3, 4 # encoding: [0x7c,0x62,0x23,0xb9]
+# CHECK-LE: nand. 2, 3, 4 # encoding: [0xb9,0x23,0x62,0x7c]
+ nand. 2, 3, 4
+# CHECK-BE: or 2, 3, 4 # encoding: [0x7c,0x62,0x23,0x78]
+# CHECK-LE: or 2, 3, 4 # encoding: [0x78,0x23,0x62,0x7c]
+ or 2, 3, 4
+# CHECK-BE: or. 2, 3, 4 # encoding: [0x7c,0x62,0x23,0x79]
+# CHECK-LE: or. 2, 3, 4 # encoding: [0x79,0x23,0x62,0x7c]
+ or. 2, 3, 4
+# CHECK-BE: nor 2, 3, 4 # encoding: [0x7c,0x62,0x20,0xf8]
+# CHECK-LE: nor 2, 3, 4 # encoding: [0xf8,0x20,0x62,0x7c]
+ nor 2, 3, 4
+# CHECK-BE: nor. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0xf9]
+# CHECK-LE: nor. 2, 3, 4 # encoding: [0xf9,0x20,0x62,0x7c]
+ nor. 2, 3, 4
+# CHECK-BE: eqv 2, 3, 4 # encoding: [0x7c,0x62,0x22,0x38]
+# CHECK-LE: eqv 2, 3, 4 # encoding: [0x38,0x22,0x62,0x7c]
+ eqv 2, 3, 4
+# CHECK-BE: eqv. 2, 3, 4 # encoding: [0x7c,0x62,0x22,0x39]
+# CHECK-LE: eqv. 2, 3, 4 # encoding: [0x39,0x22,0x62,0x7c]
+ eqv. 2, 3, 4
+# CHECK-BE: andc 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x78]
+# CHECK-LE: andc 2, 3, 4 # encoding: [0x78,0x20,0x62,0x7c]
+ andc 2, 3, 4
+# CHECK-BE: andc. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x79]
+# CHECK-LE: andc. 2, 3, 4 # encoding: [0x79,0x20,0x62,0x7c]
+ andc. 2, 3, 4
+# CHECK-BE: orc 2, 3, 4 # encoding: [0x7c,0x62,0x23,0x38]
+# CHECK-LE: orc 2, 3, 4 # encoding: [0x38,0x23,0x62,0x7c]
+ orc 2, 3, 4
+# CHECK-BE: orc. 2, 3, 4 # encoding: [0x7c,0x62,0x23,0x39]
+# CHECK-LE: orc. 2, 3, 4 # encoding: [0x39,0x23,0x62,0x7c]
+ orc. 2, 3, 4
+
+# CHECK-BE: extsb 2, 3 # encoding: [0x7c,0x62,0x07,0x74]
+# CHECK-LE: extsb 2, 3 # encoding: [0x74,0x07,0x62,0x7c]
+ extsb 2, 3
+# CHECK-BE: extsb. 2, 3 # encoding: [0x7c,0x62,0x07,0x75]
+# CHECK-LE: extsb. 2, 3 # encoding: [0x75,0x07,0x62,0x7c]
+ extsb. 2, 3
+# CHECK-BE: extsh 2, 3 # encoding: [0x7c,0x62,0x07,0x34]
+# CHECK-LE: extsh 2, 3 # encoding: [0x34,0x07,0x62,0x7c]
+ extsh 2, 3
+# CHECK-BE: extsh. 2, 3 # encoding: [0x7c,0x62,0x07,0x35]
+# CHECK-LE: extsh. 2, 3 # encoding: [0x35,0x07,0x62,0x7c]
+ extsh. 2, 3
+
+# CHECK-BE: cntlzw 2, 3 # encoding: [0x7c,0x62,0x00,0x34]
+# CHECK-LE: cntlzw 2, 3 # encoding: [0x34,0x00,0x62,0x7c]
+ cntlzw 2, 3
+# CHECK-BE: cntlzw. 2, 3 # encoding: [0x7c,0x62,0x00,0x35]
+# CHECK-LE: cntlzw. 2, 3 # encoding: [0x35,0x00,0x62,0x7c]
+ cntlzw. 2, 3
+# FIXME: cmpb 2, 3, 4
+# FIXME: popcntb 2, 3
+# CHECK-BE: popcntw 2, 3 # encoding: [0x7c,0x62,0x02,0xf4]
+# CHECK-LE: popcntw 2, 3 # encoding: [0xf4,0x02,0x62,0x7c]
+ popcntw 2, 3
+# FIXME: prtyd 2, 3
+# FIXME: prtyw 2, 3
+
+# CHECK-BE: extsw 2, 3 # encoding: [0x7c,0x62,0x07,0xb4]
+# CHECK-LE: extsw 2, 3 # encoding: [0xb4,0x07,0x62,0x7c]
+ extsw 2, 3
+# CHECK-BE: extsw. 2, 3 # encoding: [0x7c,0x62,0x07,0xb5]
+# CHECK-LE: extsw. 2, 3 # encoding: [0xb5,0x07,0x62,0x7c]
+ extsw. 2, 3
+
+# CHECK-BE: cntlzd 2, 3 # encoding: [0x7c,0x62,0x00,0x74]
+# CHECK-LE: cntlzd 2, 3 # encoding: [0x74,0x00,0x62,0x7c]
+ cntlzd 2, 3
+# CHECK-BE: cntlzd. 2, 3 # encoding: [0x7c,0x62,0x00,0x75]
+# CHECK-LE: cntlzd. 2, 3 # encoding: [0x75,0x00,0x62,0x7c]
+ cntlzd. 2, 3
+# CHECK-BE: popcntd 2, 3 # encoding: [0x7c,0x62,0x03,0xf4]
+# CHECK-LE: popcntd 2, 3 # encoding: [0xf4,0x03,0x62,0x7c]
+ popcntd 2, 3
+# FIXME: bpermd 2, 3, 4
# Fixed-point rotate and shift instructions
-# CHECK: rlwinm 2, 3, 4, 5, 6 # encoding: [0x54,0x62,0x21,0x4c]
- rlwinm 2, 3, 4, 5, 6
-# CHECK: rlwinm. 2, 3, 4, 5, 6 # encoding: [0x54,0x62,0x21,0x4d]
- rlwinm. 2, 3, 4, 5, 6
-# CHECK: rlwnm 2, 3, 4, 5, 6 # encoding: [0x5c,0x62,0x21,0x4c]
- rlwnm 2, 3, 4, 5, 6
-# CHECK: rlwnm. 2, 3, 4, 5, 6 # encoding: [0x5c,0x62,0x21,0x4d]
- rlwnm. 2, 3, 4, 5, 6
-# CHECK: rlwimi 2, 3, 4, 5, 6 # encoding: [0x50,0x62,0x21,0x4c]
- rlwimi 2, 3, 4, 5, 6
-# CHECK: rlwimi. 2, 3, 4, 5, 6 # encoding: [0x50,0x62,0x21,0x4d]
- rlwimi. 2, 3, 4, 5, 6
-# CHECK: rldicl 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x40]
- rldicl 2, 3, 4, 5
-# CHECK: rldicl. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x41]
- rldicl. 2, 3, 4, 5
-# CHECK: rldicr 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x44]
- rldicr 2, 3, 4, 5
-# CHECK: rldicr. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x45]
- rldicr. 2, 3, 4, 5
-# CHECK: rldic 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x48]
- rldic 2, 3, 4, 5
-# CHECK: rldic. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x49]
- rldic. 2, 3, 4, 5
-# CHECK: rldcl 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x50]
- rldcl 2, 3, 4, 5
-# CHECK: rldcl. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x51]
- rldcl. 2, 3, 4, 5
-# CHECK: rldcr 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x52]
- rldcr 2, 3, 4, 5
-# CHECK: rldcr. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x53]
- rldcr. 2, 3, 4, 5
-# CHECK: rldimi 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x4c]
- rldimi 2, 3, 4, 5
-# CHECK: rldimi. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x4d]
- rldimi. 2, 3, 4, 5
-
-# CHECK: slw 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x30]
- slw 2, 3, 4
-# CHECK: slw. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x31]
- slw. 2, 3, 4
-# CHECK: srw 2, 3, 4 # encoding: [0x7c,0x62,0x24,0x30]
- srw 2, 3, 4
-# CHECK: srw. 2, 3, 4 # encoding: [0x7c,0x62,0x24,0x31]
- srw. 2, 3, 4
-# CHECK: srawi 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x70]
- srawi 2, 3, 4
-# CHECK: srawi. 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x71]
- srawi. 2, 3, 4
-# CHECK: sraw 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x30]
- sraw 2, 3, 4
-# CHECK: sraw. 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x31]
- sraw. 2, 3, 4
-# CHECK: sld 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x36]
- sld 2, 3, 4
-# CHECK: sld. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x37]
- sld. 2, 3, 4
-# CHECK: srd 2, 3, 4 # encoding: [0x7c,0x62,0x24,0x36]
- srd 2, 3, 4
-# CHECK: srd. 2, 3, 4 # encoding: [0x7c,0x62,0x24,0x37]
- srd. 2, 3, 4
-# CHECK: sradi 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x74]
- sradi 2, 3, 4
-# CHECK: sradi. 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x75]
- sradi. 2, 3, 4
-# CHECK: srad 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x34]
- srad 2, 3, 4
-# CHECK: srad. 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x35]
- srad. 2, 3, 4
+# CHECK-BE: rlwinm 2, 3, 4, 5, 6 # encoding: [0x54,0x62,0x21,0x4c]
+# CHECK-LE: rlwinm 2, 3, 4, 5, 6 # encoding: [0x4c,0x21,0x62,0x54]
+ rlwinm 2, 3, 4, 5, 6
+# CHECK-BE: rlwinm. 2, 3, 4, 5, 6 # encoding: [0x54,0x62,0x21,0x4d]
+# CHECK-LE: rlwinm. 2, 3, 4, 5, 6 # encoding: [0x4d,0x21,0x62,0x54]
+ rlwinm. 2, 3, 4, 5, 6
+# CHECK-BE: rlwnm 2, 3, 4, 5, 6 # encoding: [0x5c,0x62,0x21,0x4c]
+# CHECK-LE: rlwnm 2, 3, 4, 5, 6 # encoding: [0x4c,0x21,0x62,0x5c]
+ rlwnm 2, 3, 4, 5, 6
+# CHECK-BE: rlwnm. 2, 3, 4, 5, 6 # encoding: [0x5c,0x62,0x21,0x4d]
+# CHECK-LE: rlwnm. 2, 3, 4, 5, 6 # encoding: [0x4d,0x21,0x62,0x5c]
+ rlwnm. 2, 3, 4, 5, 6
+# CHECK-BE: rlwimi 2, 3, 4, 5, 6 # encoding: [0x50,0x62,0x21,0x4c]
+# CHECK-LE: rlwimi 2, 3, 4, 5, 6 # encoding: [0x4c,0x21,0x62,0x50]
+ rlwimi 2, 3, 4, 5, 6
+# CHECK-BE: rlwimi. 2, 3, 4, 5, 6 # encoding: [0x50,0x62,0x21,0x4d]
+# CHECK-LE: rlwimi. 2, 3, 4, 5, 6 # encoding: [0x4d,0x21,0x62,0x50]
+ rlwimi. 2, 3, 4, 5, 6
+# CHECK-BE: rldicl 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x40]
+# CHECK-LE: rldicl 2, 3, 4, 5 # encoding: [0x40,0x21,0x62,0x78]
+ rldicl 2, 3, 4, 5
+# CHECK-BE: rldicl. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x41]
+# CHECK-LE: rldicl. 2, 3, 4, 5 # encoding: [0x41,0x21,0x62,0x78]
+ rldicl. 2, 3, 4, 5
+# CHECK-BE: rldicr 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x44]
+# CHECK-LE: rldicr 2, 3, 4, 5 # encoding: [0x44,0x21,0x62,0x78]
+ rldicr 2, 3, 4, 5
+# CHECK-BE: rldicr. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x45]
+# CHECK-LE: rldicr. 2, 3, 4, 5 # encoding: [0x45,0x21,0x62,0x78]
+ rldicr. 2, 3, 4, 5
+# CHECK-BE: rldic 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x48]
+# CHECK-LE: rldic 2, 3, 4, 5 # encoding: [0x48,0x21,0x62,0x78]
+ rldic 2, 3, 4, 5
+# CHECK-BE: rldic. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x49]
+# CHECK-LE: rldic. 2, 3, 4, 5 # encoding: [0x49,0x21,0x62,0x78]
+ rldic. 2, 3, 4, 5
+# CHECK-BE: rldcl 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x50]
+# CHECK-LE: rldcl 2, 3, 4, 5 # encoding: [0x50,0x21,0x62,0x78]
+ rldcl 2, 3, 4, 5
+# CHECK-BE: rldcl. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x51]
+# CHECK-LE: rldcl. 2, 3, 4, 5 # encoding: [0x51,0x21,0x62,0x78]
+ rldcl. 2, 3, 4, 5
+# CHECK-BE: rldcr 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x52]
+# CHECK-LE: rldcr 2, 3, 4, 5 # encoding: [0x52,0x21,0x62,0x78]
+ rldcr 2, 3, 4, 5
+# CHECK-BE: rldcr. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x53]
+# CHECK-LE: rldcr. 2, 3, 4, 5 # encoding: [0x53,0x21,0x62,0x78]
+ rldcr. 2, 3, 4, 5
+# CHECK-BE: rldimi 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x4c]
+# CHECK-LE: rldimi 2, 3, 4, 5 # encoding: [0x4c,0x21,0x62,0x78]
+ rldimi 2, 3, 4, 5
+# CHECK-BE: rldimi. 2, 3, 4, 5 # encoding: [0x78,0x62,0x21,0x4d]
+# CHECK-LE: rldimi. 2, 3, 4, 5 # encoding: [0x4d,0x21,0x62,0x78]
+ rldimi. 2, 3, 4, 5
+
+# CHECK-BE: slw 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x30]
+# CHECK-LE: slw 2, 3, 4 # encoding: [0x30,0x20,0x62,0x7c]
+ slw 2, 3, 4
+# CHECK-BE: slw. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x31]
+# CHECK-LE: slw. 2, 3, 4 # encoding: [0x31,0x20,0x62,0x7c]
+ slw. 2, 3, 4
+# CHECK-BE: srw 2, 3, 4 # encoding: [0x7c,0x62,0x24,0x30]
+# CHECK-LE: srw 2, 3, 4 # encoding: [0x30,0x24,0x62,0x7c]
+ srw 2, 3, 4
+# CHECK-BE: srw. 2, 3, 4 # encoding: [0x7c,0x62,0x24,0x31]
+# CHECK-LE: srw. 2, 3, 4 # encoding: [0x31,0x24,0x62,0x7c]
+ srw. 2, 3, 4
+# CHECK-BE: srawi 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x70]
+# CHECK-LE: srawi 2, 3, 4 # encoding: [0x70,0x26,0x62,0x7c]
+ srawi 2, 3, 4
+# CHECK-BE: srawi. 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x71]
+# CHECK-LE: srawi. 2, 3, 4 # encoding: [0x71,0x26,0x62,0x7c]
+ srawi. 2, 3, 4
+# CHECK-BE: sraw 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x30]
+# CHECK-LE: sraw 2, 3, 4 # encoding: [0x30,0x26,0x62,0x7c]
+ sraw 2, 3, 4
+# CHECK-BE: sraw. 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x31]
+# CHECK-LE: sraw. 2, 3, 4 # encoding: [0x31,0x26,0x62,0x7c]
+ sraw. 2, 3, 4
+# CHECK-BE: sld 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x36]
+# CHECK-LE: sld 2, 3, 4 # encoding: [0x36,0x20,0x62,0x7c]
+ sld 2, 3, 4
+# CHECK-BE: sld. 2, 3, 4 # encoding: [0x7c,0x62,0x20,0x37]
+# CHECK-LE: sld. 2, 3, 4 # encoding: [0x37,0x20,0x62,0x7c]
+ sld. 2, 3, 4
+# CHECK-BE: srd 2, 3, 4 # encoding: [0x7c,0x62,0x24,0x36]
+# CHECK-LE: srd 2, 3, 4 # encoding: [0x36,0x24,0x62,0x7c]
+ srd 2, 3, 4
+# CHECK-BE: srd. 2, 3, 4 # encoding: [0x7c,0x62,0x24,0x37]
+# CHECK-LE: srd. 2, 3, 4 # encoding: [0x37,0x24,0x62,0x7c]
+ srd. 2, 3, 4
+# CHECK-BE: sradi 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x74]
+# CHECK-LE: sradi 2, 3, 4 # encoding: [0x74,0x26,0x62,0x7c]
+ sradi 2, 3, 4
+# CHECK-BE: sradi. 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x75]
+# CHECK-LE: sradi. 2, 3, 4 # encoding: [0x75,0x26,0x62,0x7c]
+ sradi. 2, 3, 4
+# CHECK-BE: srad 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x34]
+# CHECK-LE: srad 2, 3, 4 # encoding: [0x34,0x26,0x62,0x7c]
+ srad 2, 3, 4
+# CHECK-BE: srad. 2, 3, 4 # encoding: [0x7c,0x62,0x26,0x35]
+# CHECK-LE: srad. 2, 3, 4 # encoding: [0x35,0x26,0x62,0x7c]
+ srad. 2, 3, 4
# FIXME: BCD assist instructions
# Move to/from system register instructions
-# CHECK: mtspr 600, 2 # encoding: [0x7c,0x58,0x93,0xa6]
- mtspr 600, 2
-# CHECK: mfspr 2, 600 # encoding: [0x7c,0x58,0x92,0xa6]
- mfspr 2, 600
-# CHECK: mtcrf 123, 2 # encoding: [0x7c,0x47,0xb1,0x20]
- mtcrf 123, 2
-# CHECK: mfcr 2 # encoding: [0x7c,0x40,0x00,0x26]
- mfcr 2
-# CHECK: mtocrf 16, 2 # encoding: [0x7c,0x51,0x01,0x20]
- mtocrf 16, 2
-# CHECK: mfocrf 16, 8 # encoding: [0x7e,0x10,0x80,0x26]
- mfocrf 16, 8
+# CHECK-BE: mtspr 600, 2 # encoding: [0x7c,0x58,0x93,0xa6]
+# CHECK-LE: mtspr 600, 2 # encoding: [0xa6,0x93,0x58,0x7c]
+ mtspr 600, 2
+# CHECK-BE: mfspr 2, 600 # encoding: [0x7c,0x58,0x92,0xa6]
+# CHECK-LE: mfspr 2, 600 # encoding: [0xa6,0x92,0x58,0x7c]
+ mfspr 2, 600
+# CHECK-BE: mtcrf 123, 2 # encoding: [0x7c,0x47,0xb1,0x20]
+# CHECK-LE: mtcrf 123, 2 # encoding: [0x20,0xb1,0x47,0x7c]
+ mtcrf 123, 2
+# CHECK-BE: mfcr 2 # encoding: [0x7c,0x40,0x00,0x26]
+# CHECK-LE: mfcr 2 # encoding: [0x26,0x00,0x40,0x7c]
+ mfcr 2
+# CHECK-BE: mtocrf 16, 2 # encoding: [0x7c,0x51,0x01,0x20]
+# CHECK-LE: mtocrf 16, 2 # encoding: [0x20,0x01,0x51,0x7c]
+ mtocrf 16, 2
+# CHECK-BE: mfocrf 16, 8 # encoding: [0x7e,0x10,0x80,0x26]
+# CHECK-LE: mfocrf 16, 8 # encoding: [0x26,0x80,0x10,0x7e]
+ mfocrf 16, 8
diff --git a/test/MC/PowerPC/ppc64-errors.s b/test/MC/PowerPC/ppc64-errors.s
index 53197ba13476..ef5d9e8e1e12 100644
--- a/test/MC/PowerPC/ppc64-errors.s
+++ b/test/MC/PowerPC/ppc64-errors.s
@@ -1,6 +1,8 @@
# RUN: not llvm-mc -triple powerpc64-unknown-unknown < %s 2> %t
# RUN: FileCheck < %t %s
+# RUN: not llvm-mc -triple powerpc64le-unknown-unknown < %s 2> %t
+# RUN: FileCheck < %t %s
# Register operands
diff --git a/test/MC/PowerPC/ppc64-fixup-apply.s b/test/MC/PowerPC/ppc64-fixup-apply.s
index ba141e4227a1..169340524775 100644
--- a/test/MC/PowerPC/ppc64-fixup-apply.s
+++ b/test/MC/PowerPC/ppc64-fixup-apply.s
@@ -1,6 +1,8 @@
# RUN: llvm-mc -triple powerpc64-unknown-unknown -filetype=obj %s | \
-# RUN: llvm-readobj -s -sd | FileCheck %s
+# RUN: llvm-readobj -s -sd | FileCheck -check-prefix=CHECK -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -s -sd | FileCheck -check-prefix=CHECK -check-prefix=CHECK-LE %s
# This checks that fixups that can be resolved within the same
# object file are applied correctly.
@@ -72,10 +74,14 @@ addis 1, 1, target7@highesta
# CHECK-NEXT: AddressAlignment: 4
# CHECK-NEXT: EntrySize: 0
# CHECK-NEXT: SectionData (
-# CHECK-NEXT: 0000: 38211234 3C211234 38215678 3C211234
-# CHECK-NEXT: 0010: 38214444 3C211111 38218001 3C211001
-# CHECK-NEXT: 0020: 38210008 3C210000 38214321 3C214321
-# CHECK-NEXT: 0030: 3821FFFF 3C211234 38210000 3C211235
+# CHECK-BE-NEXT: 0000: 38211234 3C211234 38215678 3C211234
+# CHECK-LE-NEXT: 0000: 34122138 3412213C 78562138 3412213C
+# CHECK-BE-NEXT: 0010: 38214444 3C211111 38218001 3C211001
+# CHECK-LE-NEXT: 0010: 44442138 1111213C 01802138 0110213C
+# CHECK-BE-NEXT: 0020: 38210008 3C210000 38214321 3C214321
+# CHECK-LE-NEXT: 0020: 08002138 0000213C 21432138 2143213C
+# CHECK-BE-NEXT: 0030: 3821FFFF 3C211234 38210000 3C211235
+# CHECK-LE-NEXT: 0030: FFFF2138 3412213C 00002138 3512213C
# CHECK-NEXT: )
# CHECK-NEXT: }
@@ -94,7 +100,8 @@ addis 1, 1, target7@highesta
# CHECK-NEXT: AddressAlignment: 4
# CHECK-NEXT: EntrySize: 0
# CHECK-NEXT: SectionData (
-# CHECK-NEXT: 0000: 12345678 9ABCDEF0 87654321 BEEF42
+# CHECK-BE-NEXT: 0000: 12345678 9ABCDEF0 87654321 BEEF42
+# CHECK-LE-NEXT: 0000: F0DEBC9A 78563412 21436587 EFBE42
# CHECK-NEXT: )
# CHECK-NEXT: }
diff --git a/test/MC/PowerPC/ppc64-fixup-explicit.s b/test/MC/PowerPC/ppc64-fixup-explicit.s
index 7c56fe882809..e0720ab5ea0a 100644
--- a/test/MC/PowerPC/ppc64-fixup-explicit.s
+++ b/test/MC/PowerPC/ppc64-fixup-explicit.s
@@ -1,46 +1,70 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
# RUN: llvm-mc -triple powerpc64-unknown-unknown -filetype=obj %s | \
-# RUN: llvm-readobj -r | FileCheck %s -check-prefix=CHECK-REL
+# RUN: llvm-readobj -r | FileCheck %s -check-prefix=CHECK-BE-REL
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -r | FileCheck %s -check-prefix=CHECK-LE-REL
# GOT references must result in explicit relocations
# even if the target symbol is local.
target:
-# CHECK: addi 4, 3, target@GOT # encoding: [0x38,0x83,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@GOT, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16 target 0x0
- addi 4, 3, target@got
-
-# CHECK: ld 1, target@GOT(2) # encoding: [0xe8,0x22,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@GOT, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_DS target 0x0
- ld 1, target@got(2)
-
-# CHECK: addis 3, 2, target@got@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_HA target 0x0
- addis 3, 2, target@got@ha
-
-# CHECK: addi 4, 3, target@got@l # encoding: [0x38,0x83,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO target 0x0
- addi 4, 3, target@got@l
-
-# CHECK: addis 3, 2, target@got@h # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_HI target 0x0
- addis 3, 2, target@got@h
-
-# CHECK: lwz 1, target@got@l(3) # encoding: [0x80,0x23,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO target 0x0
- lwz 1, target@got@l(3)
-
-# CHECK: ld 1, target@got@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO_DS target 0x0
- ld 1, target@got@l(3)
+# CHECK-BE: addi 4, 3, target@GOT # encoding: [0x38,0x83,A,A]
+# CHECK-LE: addi 4, 3, target@GOT # encoding: [A,A,0x83,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@GOT, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@GOT, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16 target 0x0
+ addi 4, 3, target@got
+
+# CHECK-BE: ld 1, target@GOT(2) # encoding: [0xe8,0x22,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@GOT(2) # encoding: [0bAAAAAA00,A,0x22,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@GOT, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@GOT, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_DS target 0x0
+ ld 1, target@got(2)
+
+# CHECK-BE: addis 3, 2, target@got@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_HA target 0x0
+ addis 3, 2, target@got@ha
+
+# CHECK-BE: addi 4, 3, target@got@l # encoding: [0x38,0x83,A,A]
+# CHECK-LE: addi 4, 3, target@got@l # encoding: [A,A,0x83,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_LO target 0x0
+ addi 4, 3, target@got@l
+
+# CHECK-BE: addis 3, 2, target@got@h # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@h # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_HI target 0x0
+ addis 3, 2, target@got@h
+
+# CHECK-BE: lwz 1, target@got@l(3) # encoding: [0x80,0x23,A,A]
+# CHECK-LE: lwz 1, target@got@l(3) # encoding: [A,A,0x23,0x80]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_LO target 0x0
+ lwz 1, target@got@l(3)
+
+# CHECK-BE: ld 1, target@got@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@got@l(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@l, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_LO_DS target 0x0
+ ld 1, target@got@l(3)
diff --git a/test/MC/PowerPC/ppc64-fixups.s b/test/MC/PowerPC/ppc64-fixups.s
index a0750664c2c7..d3769f525805 100644
--- a/test/MC/PowerPC/ppc64-fixups.s
+++ b/test/MC/PowerPC/ppc64-fixups.s
@@ -1,448 +1,713 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
# RUN: llvm-mc -triple powerpc64-unknown-unknown -filetype=obj %s | \
-# RUN: llvm-readobj -r | FileCheck %s -check-prefix=CHECK-REL
-
-# CHECK: b target # encoding: [0b010010AA,A,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
-# CHECK-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 target 0x0
- b target
-
-# CHECK: ba target # encoding: [0b010010AA,A,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
-# CHECK-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR24 target 0x0
- ba target
-
-# CHECK: beq 0, target # encoding: [0x41,0x82,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
-# CHECK-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL14 target 0x0
- beq target
-
-# CHECK: beqa 0, target # encoding: [0x41,0x82,A,0bAAAAAA10]
-# CHECK-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
-# CHECK-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR14 target 0x0
- beqa target
-
-
-# CHECK: li 3, target@l # encoding: [0x38,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
- li 3, target@l
-
-# CHECK: addis 3, 3, target@ha # encoding: [0x3c,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HA target 0x0
- addis 3, 3, target@ha
-
-# CHECK: lis 3, target@ha # encoding: [0x3c,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HA target 0x0
- lis 3, target@ha
-
-# CHECK: addi 4, 3, target@l # encoding: [0x38,0x83,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
- addi 4, 3, target@l
-
-# CHECK: li 3, target@ha # encoding: [0x38,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HA target 0x0
- li 3, target@ha
-
-# CHECK: lis 3, target@l # encoding: [0x3c,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
- lis 3, target@l
-
-# CHECK: li 3, target@h # encoding: [0x38,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HI target 0x0
- li 3, target@h
-
-# CHECK: lis 3, target@h # encoding: [0x3c,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HI target 0x0
- lis 3, target@h
-
-# CHECK: li 3, target@higher # encoding: [0x38,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@higher, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HIGHER target 0x0
- li 3, target@higher
-
-# CHECK: lis 3, target@highest # encoding: [0x3c,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@highest, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HIGHEST target 0x0
- lis 3, target@highest
-
-# CHECK: li 3, target@highera # encoding: [0x38,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@highera, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HIGHERA target 0x0
- li 3, target@highera
-
-# CHECK: lis 3, target@highesta # encoding: [0x3c,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@highesta, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HIGHESTA target 0x0
- lis 3, target@highesta
-
-# CHECK: lwz 1, target@l(3) # encoding: [0x80,0x23,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
- lwz 1, target@l(3)
-
-# CHECK: lwz 1, target(3) # encoding: [0x80,0x23,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16 target 0x0
- lwz 1, target(3)
-
-# CHECK: ld 1, target@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO_DS target 0x0
- ld 1, target@l(3)
-
-# CHECK: ld 1, target(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_DS target 0x0
- ld 1, target(3)
+# RUN: llvm-readobj -r | FileCheck %s -check-prefix=CHECK-BE-REL
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -r | FileCheck %s -check-prefix=CHECK-LE-REL
+
+# CHECK-BE: b target # encoding: [0b010010AA,A,A,0bAAAAAA00]
+# CHECK-LE: b target # encoding: [0bAAAAAA00,A,A,0b010010AA]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24
+# CHECK-BE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 target 0x0
+ b target
+
+# CHECK-BE: ba target # encoding: [0b010010AA,A,A,0bAAAAAA10]
+# CHECK-LE: ba target # encoding: [0bAAAAAA10,A,A,0b010010AA]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_br24abs
+# CHECK-BE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR24 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR24 target 0x0
+ ba target
+
+# CHECK-BE: beq 0, target # encoding: [0x41,0x82,A,0bAAAAAA00]
+# CHECK-LE: beq 0, target # encoding: [0bAAAAAA00,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14
+# CHECK-BE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL14 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL14 target 0x0
+ beq target
+
+# CHECK-BE: beqa 0, target # encoding: [0x41,0x82,A,0bAAAAAA10]
+# CHECK-LE: beqa 0, target # encoding: [0bAAAAAA10,A,0x82,0x41]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_brcond14abs
+# CHECK-BE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR14 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR14 target 0x0
+ beqa target
+
+
+# CHECK-BE: li 3, target@l # encoding: [0x38,0x60,A,A]
+# CHECK-LE: li 3, target@l # encoding: [A,A,0x60,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_LO target 0x0
+ li 3, target@l
+
+# CHECK-BE: addis 3, 3, target@ha # encoding: [0x3c,0x63,A,A]
+# CHECK-LE: addis 3, 3, target@ha # encoding: [A,A,0x63,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HA target 0x0
+ addis 3, 3, target@ha
+
+# CHECK-BE: lis 3, target@ha # encoding: [0x3c,0x60,A,A]
+# CHECK-LE: lis 3, target@ha # encoding: [A,A,0x60,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HA target 0x0
+ lis 3, target@ha
+
+# CHECK-BE: addi 4, 3, target@l # encoding: [0x38,0x83,A,A]
+# CHECK-LE: addi 4, 3, target@l # encoding: [A,A,0x83,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_LO target 0x0
+ addi 4, 3, target@l
+
+# CHECK-BE: li 3, target@ha # encoding: [0x38,0x60,A,A]
+# CHECK-LE: li 3, target@ha # encoding: [A,A,0x60,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HA target 0x0
+ li 3, target@ha
+
+# CHECK-BE: lis 3, target@l # encoding: [0x3c,0x60,A,A]
+# CHECK-LE: lis 3, target@l # encoding: [A,A,0x60,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_LO target 0x0
+ lis 3, target@l
+
+# CHECK-BE: li 3, target@h # encoding: [0x38,0x60,A,A]
+# CHECK-LE: li 3, target@h # encoding: [A,A,0x60,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HI target 0x0
+ li 3, target@h
+
+# CHECK-BE: lis 3, target@h # encoding: [0x3c,0x60,A,A]
+# CHECK-LE: lis 3, target@h # encoding: [A,A,0x60,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HI target 0x0
+ lis 3, target@h
+
+# CHECK-BE: li 3, target@higher # encoding: [0x38,0x60,A,A]
+# CHECK-LE: li 3, target@higher # encoding: [A,A,0x60,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@higher, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@higher, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HIGHER target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HIGHER target 0x0
+ li 3, target@higher
+
+# CHECK-BE: lis 3, target@highest # encoding: [0x3c,0x60,A,A]
+# CHECK-LE: lis 3, target@highest # encoding: [A,A,0x60,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@highest, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@highest, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HIGHEST target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HIGHEST target 0x0
+ lis 3, target@highest
+
+# CHECK-BE: li 3, target@highera # encoding: [0x38,0x60,A,A]
+# CHECK-LE: li 3, target@highera # encoding: [A,A,0x60,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@highera, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@highera, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HIGHERA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HIGHERA target 0x0
+ li 3, target@highera
+
+# CHECK-BE: lis 3, target@highesta # encoding: [0x3c,0x60,A,A]
+# CHECK-LE: lis 3, target@highesta # encoding: [A,A,0x60,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@highesta, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@highesta, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HIGHESTA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HIGHESTA target 0x0
+ lis 3, target@highesta
+
+# CHECK-BE: lwz 1, target@l(3) # encoding: [0x80,0x23,A,A]
+# CHECK-LE: lwz 1, target@l(3) # encoding: [A,A,0x23,0x80]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_LO target 0x0
+ lwz 1, target@l(3)
+
+# CHECK-BE: lwz 1, target(3) # encoding: [0x80,0x23,A,A]
+# CHECK-LE: lwz 1, target(3) # encoding: [A,A,0x23,0x80]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16 target 0x0
+ lwz 1, target(3)
+
+# CHECK-BE: ld 1, target@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@l(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@l, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_LO_DS target 0x0
+ ld 1, target@l(3)
+
+# CHECK-BE: ld 1, target(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_DS target 0x0
+ ld 1, target(3)
base:
-# CHECK: lwz 1, target-base(3) # encoding: [0x80,0x23,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target-base, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_REL16 target 0x2
- lwz 1, target-base(3)
-
-# CHECK: li 3, target-base@h # encoding: [0x38,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target-base@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_REL16_HI target 0x6
- li 3, target-base@h
-
-# CHECK: li 3, target-base@l # encoding: [0x38,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target-base@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_REL16_LO target 0xA
- li 3, target-base@l
-
-# CHECK: li 3, target-base@ha # encoding: [0x38,0x60,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target-base@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_REL16_HA target 0xE
- li 3, target-base@ha
-
-# CHECK: ori 3, 3, target@l # encoding: [0x60,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
- ori 3, 3, target@l
-
-# CHECK: oris 3, 3, target@h # encoding: [0x64,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HI target 0x0
- oris 3, 3, target@h
-
-# CHECK: ld 1, target@toc(2) # encoding: [0xe8,0x22,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@toc, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_DS target 0x0
- ld 1, target@toc(2)
-
-# CHECK: addis 3, 2, target@toc@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@toc@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_HA target 0x0
- addis 3, 2, target@toc@ha
-
-# CHECK: addi 4, 3, target@toc@l # encoding: [0x38,0x83,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@toc@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_LO target 0x0
- addi 4, 3, target@toc@l
-
-# CHECK: addis 3, 2, target@toc@h # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@toc@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_HI target 0x0
- addis 3, 2, target@toc@h
-
-# CHECK: lwz 1, target@toc@l(3) # encoding: [0x80,0x23,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@toc@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_LO target 0x0
- lwz 1, target@toc@l(3)
-
-# CHECK: ld 1, target@toc@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@toc@l, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_LO_DS target 0x0
- ld 1, target@toc@l(3)
-
-# CHECK: addi 4, 3, target@GOT # encoding: [0x38,0x83,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@GOT, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16 target 0x0
- addi 4, 3, target@got
-
-# CHECK: ld 1, target@GOT(2) # encoding: [0xe8,0x22,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@GOT, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_DS target 0x0
- ld 1, target@got(2)
-
-# CHECK: addis 3, 2, target@got@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_HA target 0x0
- addis 3, 2, target@got@ha
-
-# CHECK: addi 4, 3, target@got@l # encoding: [0x38,0x83,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO target 0x0
- addi 4, 3, target@got@l
-
-# CHECK: addis 3, 2, target@got@h # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_HI target 0x0
- addis 3, 2, target@got@h
-
-# CHECK: lwz 1, target@got@l(3) # encoding: [0x80,0x23,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO target 0x0
- lwz 1, target@got@l(3)
-
-# CHECK: ld 1, target@got@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO_DS target 0x0
- ld 1, target@got@l(3)
-
-
-# CHECK: addis 3, 2, target@tprel@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HA target 0x0
- addis 3, 2, target@tprel@ha
-
-# CHECK: addi 3, 3, target@tprel@l # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_LO target 0x0
- addi 3, 3, target@tprel@l
-
-# CHECK: addi 3, 3, target@tprel # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16 target 0x0
- addi 3, 3, target@tprel
-
-# CHECK: addi 3, 3, target@tprel@h # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HI target 0x0
- addi 3, 3, target@tprel@h
-
-# CHECK: addi 3, 3, target@tprel@higher # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel@higher, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HIGHER target 0x0
- addi 3, 3, target@tprel@higher
-
-# CHECK: addis 3, 2, target@tprel@highest # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel@highest, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HIGHEST target 0x0
- addis 3, 2, target@tprel@highest
-
-# CHECK: addi 3, 3, target@tprel@highera # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel@highera, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HIGHERA target 0x0
- addi 3, 3, target@tprel@highera
-
-# CHECK: addis 3, 2, target@tprel@highesta # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel@highesta, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HIGHESTA target 0x0
- addis 3, 2, target@tprel@highesta
-
-# CHECK: ld 1, target@tprel@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel@l, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_LO_DS target 0x0
- ld 1, target@tprel@l(3)
-
-# CHECK: ld 1, target@tprel(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@tprel, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_DS target 0x0
- ld 1, target@tprel(3)
-
-# CHECK: addis 3, 2, target@dtprel@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HA target 0x0
- addis 3, 2, target@dtprel@ha
-
-# CHECK: addi 3, 3, target@dtprel@l # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_LO target 0x0
- addi 3, 3, target@dtprel@l
-
-# CHECK: addi 3, 3, target@dtprel # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16 target 0x0
- addi 3, 3, target@dtprel
-
-# CHECK: addi 3, 3, target@dtprel@h # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HI target 0x0
- addi 3, 3, target@dtprel@h
-
-# CHECK: addi 3, 3, target@dtprel@higher # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel@higher, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HIGHER target 0x0
- addi 3, 3, target@dtprel@higher
-
-# CHECK: addis 3, 2, target@dtprel@highest # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel@highest, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HIGHEST target 0x0
- addis 3, 2, target@dtprel@highest
-
-# CHECK: addi 3, 3, target@dtprel@highera # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel@highera, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HIGHERA target 0x0
- addi 3, 3, target@dtprel@highera
-
-# CHECK: addis 3, 2, target@dtprel@highesta # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel@highesta, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HIGHESTA target 0x0
- addis 3, 2, target@dtprel@highesta
-
-# CHECK: ld 1, target@dtprel@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel@l, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_LO_DS target 0x0
- ld 1, target@dtprel@l(3)
-
-# CHECK: ld 1, target@dtprel(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@dtprel, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_DS target 0x0
- ld 1, target@dtprel(3)
-
-
-# CHECK: addis 3, 2, target@got@tprel@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tprel@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_HA target 0x0
- addis 3, 2, target@got@tprel@ha
-
-# CHECK: ld 1, target@got@tprel@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tprel@l, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_LO_DS target 0x0
- ld 1, target@got@tprel@l(3)
-
-# CHECK: addis 3, 2, target@got@tprel@h # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tprel@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_HI target 0x0
- addis 3, 2, target@got@tprel@h
-
-# CHECK: addis 3, 2, target@got@tprel@l # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tprel@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_LO_DS target 0x0
- addis 3, 2, target@got@tprel@l
-
-# CHECK: addis 3, 2, target@got@tprel # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tprel, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_DS target 0x0
- addis 3, 2, target@got@tprel
-
-# CHECK: ld 1, target@got@tprel(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tprel, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_DS target 0x0
- ld 1, target@got@tprel(3)
-
-# CHECK: addis 3, 2, target@got@dtprel@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@dtprel@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_HA target 0x0
- addis 3, 2, target@got@dtprel@ha
-
-# CHECK: ld 1, target@got@dtprel@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@dtprel@l, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_LO_DS target 0x0
- ld 1, target@got@dtprel@l(3)
-
-# CHECK: addis 3, 2, target@got@dtprel@h # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@dtprel@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_HI target 0x0
- addis 3, 2, target@got@dtprel@h
-
-# CHECK: addis 3, 2, target@got@dtprel@l # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@dtprel@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_LO_DS target 0x0
- addis 3, 2, target@got@dtprel@l
-
-# CHECK: addis 3, 2, target@got@dtprel # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@dtprel, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_DS target 0x0
- addis 3, 2, target@got@dtprel
-
-# CHECK: ld 1, target@got@dtprel(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@dtprel, kind: fixup_ppc_half16ds
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_DS target 0x0
- ld 1, target@got@dtprel(3)
-
-# CHECK: addis 3, 2, target@got@tlsgd@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tlsgd@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSGD16_HA target 0x0
- addis 3, 2, target@got@tlsgd@ha
-
-# CHECK: addi 3, 3, target@got@tlsgd@l # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tlsgd@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSGD16_LO target 0x0
- addi 3, 3, target@got@tlsgd@l
-
-# CHECK: addi 3, 3, target@got@tlsgd@h # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tlsgd@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSGD16_HI target 0x0
- addi 3, 3, target@got@tlsgd@h
-
-# CHECK: addi 3, 3, target@got@tlsgd # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tlsgd, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSGD16 target 0x0
- addi 3, 3, target@got@tlsgd
-
-
-# CHECK: addis 3, 2, target@got@tlsld@ha # encoding: [0x3c,0x62,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tlsld@ha, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSLD16_HA target 0x0
- addis 3, 2, target@got@tlsld@ha
-
-# CHECK: addi 3, 3, target@got@tlsld@l # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tlsld@l, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSLD16_LO target 0x0
- addi 3, 3, target@got@tlsld@l
-
-# CHECK: addi 3, 3, target@got@tlsld@h # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tlsld@h, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSLD16_HI target 0x0
- addi 3, 3, target@got@tlsld@h
-
-# CHECK: addi 3, 3, target@got@tlsld # encoding: [0x38,0x63,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: target@got@tlsld, kind: fixup_ppc_half16
-# CHECK-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSLD16 target 0x0
- addi 3, 3, target@got@tlsld
-
-# CHECK: bl __tls_get_addr(target@tlsgd) # encoding: [0b010010BB,B,B,0bBBBBBB01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target@tlsgd, kind: fixup_ppc_nofixup
-# CHECK-NEXT: # fixup B - offset: 0, value: __tls_get_addr, kind: fixup_ppc_br24
-# CHECK-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLSGD target 0x0
-# CHECK-REL-NEXT: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 __tls_get_addr 0x0
- bl __tls_get_addr(target@tlsgd)
-
-# CHECK: bl __tls_get_addr(target@tlsld) # encoding: [0b010010BB,B,B,0bBBBBBB01]
-# CHECK-NEXT: # fixup A - offset: 0, value: target@tlsld, kind: fixup_ppc_nofixup
-# CHECK-NEXT: # fixup B - offset: 0, value: __tls_get_addr, kind: fixup_ppc_br24
-# CHECK-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLSLD target 0x0
-# CHECK-REL-NEXT: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 __tls_get_addr 0x0
- bl __tls_get_addr(target@tlsld)
-
-# CHECK: add 3, 4, target@tls # encoding: [0x7c,0x64,0x6a,0x14]
-# CHECK-NEXT: # fixup A - offset: 0, value: target@tls, kind: fixup_ppc_nofixup
-# CHECK-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLS target 0x0
- add 3, 4, target@tls
-
+# CHECK-BE: lwz 1, target-base(3) # encoding: [0x80,0x23,A,A]
+# CHECK-LE: lwz 1, target-base(3) # encoding: [A,A,0x23,0x80]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target-base, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target-base, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_REL16 target 0x2
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL16 target 0x0
+ lwz 1, target-base(3)
+
+# CHECK-BE: li 3, target-base@h # encoding: [0x38,0x60,A,A]
+# CHECK-LE: li 3, target-base@h # encoding: [A,A,0x60,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target-base@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target-base@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_REL16_HI target 0x6
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL16_HI target 0x4
+ li 3, target-base@h
+
+# CHECK-BE: li 3, target-base@l # encoding: [0x38,0x60,A,A]
+# CHECK-LE: li 3, target-base@l # encoding: [A,A,0x60,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target-base@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target-base@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_REL16_LO target 0xA
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL16_LO target 0x8
+ li 3, target-base@l
+
+# CHECK-BE: li 3, target-base@ha # encoding: [0x38,0x60,A,A]
+# CHECK-LE: li 3, target-base@ha # encoding: [A,A,0x60,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target-base@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target-base@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_REL16_HA target 0xE
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL16_HA target 0xC
+ li 3, target-base@ha
+
+# CHECK-BE: ori 3, 3, target@l # encoding: [0x60,0x63,A,A]
+# CHECK-LE: ori 3, 3, target@l # encoding: [A,A,0x63,0x60]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_LO target 0x0
+ ori 3, 3, target@l
+
+# CHECK-BE: oris 3, 3, target@h # encoding: [0x64,0x63,A,A]
+# CHECK-LE: oris 3, 3, target@h # encoding: [A,A,0x63,0x64]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_ADDR16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_ADDR16_HI target 0x0
+ oris 3, 3, target@h
+
+# CHECK-BE: ld 1, target@toc(2) # encoding: [0xe8,0x22,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@toc(2) # encoding: [0bAAAAAA00,A,0x22,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@toc, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@toc, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TOC16_DS target 0x0
+ ld 1, target@toc(2)
+
+# CHECK-BE: addis 3, 2, target@toc@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@toc@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@toc@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@toc@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TOC16_HA target 0x0
+ addis 3, 2, target@toc@ha
+
+# CHECK-BE: addi 4, 3, target@toc@l # encoding: [0x38,0x83,A,A]
+# CHECK-LE: addi 4, 3, target@toc@l # encoding: [A,A,0x83,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@toc@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@toc@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TOC16_LO target 0x0
+ addi 4, 3, target@toc@l
+
+# CHECK-BE: addis 3, 2, target@toc@h # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@toc@h # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@toc@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@toc@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TOC16_HI target 0x0
+ addis 3, 2, target@toc@h
+
+# CHECK-BE: lwz 1, target@toc@l(3) # encoding: [0x80,0x23,A,A]
+# CHECK-LE: lwz 1, target@toc@l(3) # encoding: [A,A,0x23,0x80]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@toc@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@toc@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TOC16_LO target 0x0
+ lwz 1, target@toc@l(3)
+
+# CHECK-BE: ld 1, target@toc@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@toc@l(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@toc@l, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@toc@l, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TOC16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TOC16_LO_DS target 0x0
+ ld 1, target@toc@l(3)
+
+# CHECK-BE: addi 4, 3, target@GOT # encoding: [0x38,0x83,A,A]
+# CHECK-LE: addi 4, 3, target@GOT # encoding: [A,A,0x83,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@GOT, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@GOT, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16 target 0x0
+ addi 4, 3, target@got
+
+# CHECK-BE: ld 1, target@GOT(2) # encoding: [0xe8,0x22,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@GOT(2) # encoding: [0bAAAAAA00,A,0x22,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@GOT, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@GOT, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_DS target 0x0
+ ld 1, target@got(2)
+
+# CHECK-BE: addis 3, 2, target@got@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_HA target 0x0
+ addis 3, 2, target@got@ha
+
+# CHECK-BE: addi 4, 3, target@got@l # encoding: [0x38,0x83,A,A]
+# CHECK-LE: addi 4, 3, target@got@l # encoding: [A,A,0x83,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_LO target 0x0
+ addi 4, 3, target@got@l
+
+# CHECK-BE: addis 3, 2, target@got@h # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@h # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_HI target 0x0
+ addis 3, 2, target@got@h
+
+# CHECK-BE: lwz 1, target@got@l(3) # encoding: [0x80,0x23,A,A]
+# CHECK-LE: lwz 1, target@got@l(3) # encoding: [A,A,0x23,0x80]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_LO target 0x0
+ lwz 1, target@got@l(3)
+
+# CHECK-BE: ld 1, target@got@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@got@l(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@l, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@l, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT16_LO_DS target 0x0
+ ld 1, target@got@l(3)
+
+
+# CHECK-BE: addis 3, 2, target@tprel@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@tprel@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_HA target 0x0
+ addis 3, 2, target@tprel@ha
+
+# CHECK-BE: addi 3, 3, target@tprel@l # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@tprel@l # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_LO target 0x0
+ addi 3, 3, target@tprel@l
+
+# CHECK-BE: addi 3, 3, target@tprel # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@tprel # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16 target 0x0
+ addi 3, 3, target@tprel
+
+# CHECK-BE: addi 3, 3, target@tprel@h # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@tprel@h # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_HI target 0x0
+ addi 3, 3, target@tprel@h
+
+# CHECK-BE: addi 3, 3, target@tprel@higher # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@tprel@higher # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel@higher, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel@higher, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HIGHER target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_HIGHER target 0x0
+ addi 3, 3, target@tprel@higher
+
+# CHECK-BE: addis 3, 2, target@tprel@highest # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@tprel@highest # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel@highest, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel@highest, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HIGHEST target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_HIGHEST target 0x0
+ addis 3, 2, target@tprel@highest
+
+# CHECK-BE: addi 3, 3, target@tprel@highera # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@tprel@highera # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel@highera, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel@highera, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HIGHERA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_HIGHERA target 0x0
+ addi 3, 3, target@tprel@highera
+
+# CHECK-BE: addis 3, 2, target@tprel@highesta # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@tprel@highesta # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel@highesta, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel@highesta, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_HIGHESTA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_HIGHESTA target 0x0
+ addis 3, 2, target@tprel@highesta
+
+# CHECK-BE: ld 1, target@tprel@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@tprel@l(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel@l, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel@l, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_LO_DS target 0x0
+ ld 1, target@tprel@l(3)
+
+# CHECK-BE: ld 1, target@tprel(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@tprel(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@tprel, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tprel, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_TPREL16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TPREL16_DS target 0x0
+ ld 1, target@tprel(3)
+
+# CHECK-BE: addis 3, 2, target@dtprel@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@dtprel@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_HA target 0x0
+ addis 3, 2, target@dtprel@ha
+
+# CHECK-BE: addi 3, 3, target@dtprel@l # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@dtprel@l # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_LO target 0x0
+ addi 3, 3, target@dtprel@l
+
+# CHECK-BE: addi 3, 3, target@dtprel # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@dtprel # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16 target 0x0
+ addi 3, 3, target@dtprel
+
+# CHECK-BE: addi 3, 3, target@dtprel@h # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@dtprel@h # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_HI target 0x0
+ addi 3, 3, target@dtprel@h
+
+# CHECK-BE: addi 3, 3, target@dtprel@higher # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@dtprel@higher # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel@higher, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel@higher, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HIGHER target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_HIGHER target 0x0
+ addi 3, 3, target@dtprel@higher
+
+# CHECK-BE: addis 3, 2, target@dtprel@highest # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@dtprel@highest # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel@highest, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel@highest, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HIGHEST target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_HIGHEST target 0x0
+ addis 3, 2, target@dtprel@highest
+
+# CHECK-BE: addi 3, 3, target@dtprel@highera # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@dtprel@highera # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel@highera, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel@highera, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HIGHERA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_HIGHERA target 0x0
+ addi 3, 3, target@dtprel@highera
+
+# CHECK-BE: addis 3, 2, target@dtprel@highesta # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@dtprel@highesta # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel@highesta, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel@highesta, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_HIGHESTA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_HIGHESTA target 0x0
+ addis 3, 2, target@dtprel@highesta
+
+# CHECK-BE: ld 1, target@dtprel@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@dtprel@l(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel@l, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel@l, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_LO_DS target 0x0
+ ld 1, target@dtprel@l(3)
+
+# CHECK-BE: ld 1, target@dtprel(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@dtprel(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@dtprel, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@dtprel, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_DTPREL16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_DTPREL16_DS target 0x0
+ ld 1, target@dtprel(3)
+
+
+# CHECK-BE: addis 3, 2, target@got@tprel@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@tprel@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tprel@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tprel@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TPREL16_HA target 0x0
+ addis 3, 2, target@got@tprel@ha
+
+# CHECK-BE: ld 1, target@got@tprel@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@got@tprel@l(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tprel@l, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tprel@l, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TPREL16_LO_DS target 0x0
+ ld 1, target@got@tprel@l(3)
+
+# CHECK-BE: addis 3, 2, target@got@tprel@h # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@tprel@h # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tprel@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tprel@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TPREL16_HI target 0x0
+ addis 3, 2, target@got@tprel@h
+
+# CHECK-BE: addis 3, 2, target@got@tprel@l # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@tprel@l # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tprel@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tprel@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TPREL16_LO_DS target 0x0
+ addis 3, 2, target@got@tprel@l
+
+# CHECK-BE: addis 3, 2, target@got@tprel # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@tprel # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tprel, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tprel, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TPREL16_DS target 0x0
+ addis 3, 2, target@got@tprel
+
+# CHECK-BE: ld 1, target@got@tprel(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@got@tprel(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tprel, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tprel, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TPREL16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TPREL16_DS target 0x0
+ ld 1, target@got@tprel(3)
+
+# CHECK-BE: addis 3, 2, target@got@dtprel@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@dtprel@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@dtprel@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@dtprel@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_DTPREL16_HA target 0x0
+ addis 3, 2, target@got@dtprel@ha
+
+# CHECK-BE: ld 1, target@got@dtprel@l(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@got@dtprel@l(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@dtprel@l, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@dtprel@l, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_DTPREL16_LO_DS target 0x0
+ ld 1, target@got@dtprel@l(3)
+
+# CHECK-BE: addis 3, 2, target@got@dtprel@h # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@dtprel@h # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@dtprel@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@dtprel@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_DTPREL16_HI target 0x0
+ addis 3, 2, target@got@dtprel@h
+
+# CHECK-BE: addis 3, 2, target@got@dtprel@l # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@dtprel@l # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@dtprel@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@dtprel@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_LO_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_DTPREL16_LO_DS target 0x0
+ addis 3, 2, target@got@dtprel@l
+
+# CHECK-BE: addis 3, 2, target@got@dtprel # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@dtprel # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@dtprel, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@dtprel, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_DTPREL16_DS target 0x0
+ addis 3, 2, target@got@dtprel
+
+# CHECK-BE: ld 1, target@got@dtprel(3) # encoding: [0xe8,0x23,A,0bAAAAAA00]
+# CHECK-LE: ld 1, target@got@dtprel(3) # encoding: [0bAAAAAA00,A,0x23,0xe8]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@dtprel, kind: fixup_ppc_half16ds
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@dtprel, kind: fixup_ppc_half16ds
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_DTPREL16_DS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_DTPREL16_DS target 0x0
+ ld 1, target@got@dtprel(3)
+
+# CHECK-BE: addis 3, 2, target@got@tlsgd@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@tlsgd@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tlsgd@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tlsgd@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSGD16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TLSGD16_HA target 0x0
+ addis 3, 2, target@got@tlsgd@ha
+
+# CHECK-BE: addi 3, 3, target@got@tlsgd@l # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@got@tlsgd@l # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tlsgd@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tlsgd@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSGD16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TLSGD16_LO target 0x0
+ addi 3, 3, target@got@tlsgd@l
+
+# CHECK-BE: addi 3, 3, target@got@tlsgd@h # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@got@tlsgd@h # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tlsgd@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tlsgd@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSGD16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TLSGD16_HI target 0x0
+ addi 3, 3, target@got@tlsgd@h
+
+# CHECK-BE: addi 3, 3, target@got@tlsgd # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@got@tlsgd # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tlsgd, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tlsgd, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSGD16 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TLSGD16 target 0x0
+ addi 3, 3, target@got@tlsgd
+
+
+# CHECK-BE: addis 3, 2, target@got@tlsld@ha # encoding: [0x3c,0x62,A,A]
+# CHECK-LE: addis 3, 2, target@got@tlsld@ha # encoding: [A,A,0x62,0x3c]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tlsld@ha, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tlsld@ha, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSLD16_HA target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TLSLD16_HA target 0x0
+ addis 3, 2, target@got@tlsld@ha
+
+# CHECK-BE: addi 3, 3, target@got@tlsld@l # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@got@tlsld@l # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tlsld@l, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tlsld@l, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSLD16_LO target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TLSLD16_LO target 0x0
+ addi 3, 3, target@got@tlsld@l
+
+# CHECK-BE: addi 3, 3, target@got@tlsld@h # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@got@tlsld@h # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tlsld@h, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tlsld@h, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSLD16_HI target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TLSLD16_HI target 0x0
+ addi 3, 3, target@got@tlsld@h
+
+# CHECK-BE: addi 3, 3, target@got@tlsld # encoding: [0x38,0x63,A,A]
+# CHECK-LE: addi 3, 3, target@got@tlsld # encoding: [A,A,0x63,0x38]
+# CHECK-BE-NEXT: # fixup A - offset: 2, value: target@got@tlsld, kind: fixup_ppc_half16
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@got@tlsld, kind: fixup_ppc_half16
+# CHECK-BE-REL: 0x{{[0-9A-F]*[26AE]}} R_PPC64_GOT_TLSLD16 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_GOT_TLSLD16 target 0x0
+ addi 3, 3, target@got@tlsld
+
+# CHECK-BE: bl __tls_get_addr(target@tlsgd) # encoding: [0b010010BB,B,B,0bBBBBBB01]
+# CHECK-LE: bl __tls_get_addr(target@tlsgd) # encoding: [0bBBBBBB01,B,B,0b010010BB]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target@tlsgd, kind: fixup_ppc_nofixup
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tlsgd, kind: fixup_ppc_nofixup
+# CHECK-BE-NEXT: # fixup B - offset: 0, value: __tls_get_addr, kind: fixup_ppc_br24
+# CHECK-LE-NEXT: # fixup B - offset: 0, value: __tls_get_addr, kind: fixup_ppc_br24
+# CHECK-BE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLSGD target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLSGD target 0x0
+# CHECK-BE-REL-NEXT: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 __tls_get_addr 0x0
+# CHECK-LE-REL-NEXT: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 __tls_get_addr 0x0
+ bl __tls_get_addr(target@tlsgd)
+
+# CHECK-BE: bl __tls_get_addr(target@tlsld) # encoding: [0b010010BB,B,B,0bBBBBBB01]
+# CHECK-LE: bl __tls_get_addr(target@tlsld) # encoding: [0bBBBBBB01,B,B,0b010010BB]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target@tlsld, kind: fixup_ppc_nofixup
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tlsld, kind: fixup_ppc_nofixup
+# CHECK-BE-NEXT: # fixup B - offset: 0, value: __tls_get_addr, kind: fixup_ppc_br24
+# CHECK-LE-NEXT: # fixup B - offset: 0, value: __tls_get_addr, kind: fixup_ppc_br24
+# CHECK-BE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLSLD target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLSLD target 0x0
+# CHECK-BE-REL-NEXT: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 __tls_get_addr 0x0
+# CHECK-LE-REL-NEXT: 0x{{[0-9A-F]*[048C]}} R_PPC64_REL24 __tls_get_addr 0x0
+ bl __tls_get_addr(target@tlsld)
+
+# CHECK-BE: add 3, 4, target@tls # encoding: [0x7c,0x64,0x6a,0x14]
+# CHECK-LE: add 3, 4, target@tls # encoding: [0x14,0x6a,0x64,0x7c]
+# CHECK-BE-NEXT: # fixup A - offset: 0, value: target@tls, kind: fixup_ppc_nofixup
+# CHECK-LE-NEXT: # fixup A - offset: 0, value: target@tls, kind: fixup_ppc_nofixup
+# CHECK-BE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLS target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[048C]}} R_PPC64_TLS target 0x0
+ add 3, 4, target@tls
+
+# Verify that fixups on constants are resolved at assemble time
+
+# CHECK-BE: ori 1, 2, 65535 # encoding: [0x60,0x41,0xff,0xff]
+# CHECK-LE: ori 1, 2, 65535 # encoding: [0xff,0xff,0x41,0x60]
+ ori 1, 2, 131071@l
+# CHECK-BE: ori 1, 2, 1 # encoding: [0x60,0x41,0x00,0x01]
+# CHECK-LE: ori 1, 2, 1 # encoding: [0x01,0x00,0x41,0x60]
+ ori 1, 2, 131071@h
+# CHECK-BE: ori 1, 2, 2 # encoding: [0x60,0x41,0x00,0x02]
+# CHECK-LE: ori 1, 2, 2 # encoding: [0x02,0x00,0x41,0x60]
+ ori 1, 2, 131071@ha
# Data relocs
# llvm-mc does not show any "encoding" string for data, so we just check the relocs
-# CHECK-REL: .rela.data
+# CHECK-BE-REL: .rela.data
+# CHECK-LE-REL: .rela.data
.data
-# CHECK-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_TOC - 0x0
+# CHECK-BE-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_TOC - 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_TOC - 0x0
.quad .TOC.@tocbase
-# CHECK-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_DTPMOD64 target 0x0
+# CHECK-BE-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_DTPMOD64 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_DTPMOD64 target 0x0
.quad target@dtpmod
-# CHECK-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_TPREL64 target 0x0
+# CHECK-BE-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_TPREL64 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_TPREL64 target 0x0
.quad target@tprel
-# CHECK-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_DTPREL64 target 0x0
+# CHECK-BE-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_DTPREL64 target 0x0
+# CHECK-LE-REL: 0x{{[0-9A-F]*[08]}} R_PPC64_DTPREL64 target 0x0
.quad target@dtprel
-# Constant fixup
- ori 1, 2, 131071@l
-# CHECK: ori 1, 2, 131071@l # encoding: [0x60,0x41,A,A]
-# CHECK-NEXT: # fixup A - offset: 2, value: 131071@l, kind: fixup_ppc_half16
diff --git a/test/MC/PowerPC/ppc64-initial-cfa.s b/test/MC/PowerPC/ppc64-initial-cfa.s
index f976ae9ffa31..d0bc6b3ecd74 100644
--- a/test/MC/PowerPC/ppc64-initial-cfa.s
+++ b/test/MC/PowerPC/ppc64-initial-cfa.s
@@ -1,8 +1,13 @@
# RUN: llvm-mc -triple=powerpc64-unknown-linux-gnu -filetype=obj -relocation-model=static %s | \
-# RUN: llvm-readobj -s -sr -sd | FileCheck %s -check-prefix=STATIC
+# RUN: llvm-readobj -s -sr -sd | FileCheck %s -check-prefix=STATIC -check-prefix=STATIC-BE
# RUN: llvm-mc -triple=powerpc64-unknown-linux-gnu -filetype=obj -relocation-model=pic %s | \
-# RUN: llvm-readobj -s -sr -sd | FileCheck %s -check-prefix=PIC
+# RUN: llvm-readobj -s -sr -sd | FileCheck %s -check-prefix=PIC -check-prefix=PIC-BE
+# RUN: llvm-mc -triple=powerpc64le-unknown-linux-gnu -filetype=obj -relocation-model=static %s | \
+# RUN: llvm-readobj -s -sr -sd | FileCheck %s -check-prefix=STATIC -check-prefix=STATIC-LE
+# RUN: llvm-mc -triple=powerpc64le-unknown-linux-gnu -filetype=obj -relocation-model=pic %s | \
+# RUN: llvm-readobj -s -sr -sd | FileCheck %s -check-prefix=PIC -check-prefix=PIC-LE
+_proc:
.cfi_startproc
nop
.cfi_endproc
@@ -23,9 +28,12 @@
# STATIC-NEXT: Relocations [
# STATIC-NEXT: ]
# STATIC-NEXT: SectionData (
-# STATIC-NEXT: 0000: 00000010 00000000 017A5200 04784101
-# STATIC-NEXT: 0010: 1B0C0100 00000010 00000018 00000000
-# STATIC-NEXT: 0020: 00000004 00000000
+# STATIC-BE-NEXT: 0000: 00000010 00000000 037A5200 04784101
+# STATIC-LE-NEXT: 0000: 10000000 00000000 037A5200 04784101
+# STATIC-BE-NEXT: 0010: 1B0C0100 00000010 00000018 00000000
+# STATIC-LE-NEXT: 0010: 1B0C0100 10000000 18000000 00000000
+# STATIC-BE-NEXT: 0020: 00000004 00000000
+# STATIC-LE-NEXT: 0020: 04000000 00000000
# STATIC-NEXT: )
# STATIC-NEXT: }
@@ -61,9 +69,12 @@
# PIC-NEXT: Relocations [
# PIC-NEXT: ]
# PIC-NEXT: SectionData (
-# PIC-NEXT: 0000: 00000010 00000000 017A5200 04784101
-# PIC-NEXT: 0010: 1B0C0100 00000010 00000018 00000000
-# PIC-NEXT: 0020: 00000004 00000000
+# PIC-BE-NEXT: 0000: 00000010 00000000 037A5200 04784101
+# PIC-LE-NEXT: 0000: 10000000 00000000 037A5200 04784101
+# PIC-BE-NEXT: 0010: 1B0C0100 00000010 00000018 00000000
+# PIC-LE-NEXT: 0010: 1B0C0100 10000000 18000000 00000000
+# PIC-BE-NEXT: 0020: 00000004 00000000
+# PIC-LE-NEXT: 0020: 04000000 00000000
# PIC-NEXT: )
# PIC-NEXT: }
diff --git a/test/MC/PowerPC/ppc64-localentry-error1.s b/test/MC/PowerPC/ppc64-localentry-error1.s
new file mode 100644
index 000000000000..e47640fbeb05
--- /dev/null
+++ b/test/MC/PowerPC/ppc64-localentry-error1.s
@@ -0,0 +1,11 @@
+
+# RUN: not llvm-mc -triple powerpc64-unknown-unknown -filetype=obj < %s 2> %t
+# RUN: FileCheck < %t %s
+# RUN: not llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj < %s 2> %t
+# RUN: FileCheck < %t %s
+
+sym:
+ .localentry sym, 123
+
+# CHECK: LLVM ERROR: .localentry expression cannot be encoded.
+
diff --git a/test/MC/PowerPC/ppc64-localentry-error2.s b/test/MC/PowerPC/ppc64-localentry-error2.s
new file mode 100644
index 000000000000..b05687fe7b6f
--- /dev/null
+++ b/test/MC/PowerPC/ppc64-localentry-error2.s
@@ -0,0 +1,12 @@
+
+# RUN: not llvm-mc -triple powerpc64-unknown-unknown -filetype=obj < %s 2> %t
+# RUN: FileCheck < %t %s
+# RUN: not llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj < %s 2> %t
+# RUN: FileCheck < %t %s
+
+ .globl remote_sym
+sym:
+ .localentry sym, remote_sym
+
+# CHECK: LLVM ERROR: .localentry expression must be absolute.
+
diff --git a/test/MC/PowerPC/ppc64-localentry.s b/test/MC/PowerPC/ppc64-localentry.s
new file mode 100644
index 000000000000..6d2c12072289
--- /dev/null
+++ b/test/MC/PowerPC/ppc64-localentry.s
@@ -0,0 +1,70 @@
+
+# RUN: llvm-mc -triple powerpc64-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -h -r -symbols | FileCheck %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown -filetype=obj %s | \
+# RUN: llvm-readobj -h -r -symbols | FileCheck %s
+
+ .type callee1, @function
+callee1:
+ nop
+ nop
+ .localentry callee1, .-callee1
+ nop
+ nop
+ .size callee1, .-callee1
+
+ .type callee2, @function
+callee2:
+ nop
+ nop
+ .size callee2, .-callee2
+
+ .type caller, @function
+caller:
+ bl callee1
+ nop
+ bl callee2
+ nop
+ .size caller, .-caller
+
+ .section .text.other
+caller_other:
+ bl callee1
+ nop
+ bl callee2
+ nop
+ .size caller_other, .-caller_other
+
+# Verify that use of .localentry implies ABI version 2
+# CHECK: ElfHeader {
+# CHECK: Flags [ (0x2)
+
+# Verify that fixups to local function symbols are performed only
+# if the target symbol does not use .localentry
+# CHECK: Relocations [
+# CHECK: Section ({{[0-9]*}}) .rela.text {
+# CHECK-NEXT: R_PPC64_REL24 callee1
+# CHECK-NEXT: }
+# CHECK-NOT: R_PPC64_REL24 callee2
+# CHECK: Section ({{[0-9]*}}) .rela.text.other {
+# CHECK-NEXT: R_PPC64_REL24 callee1
+# CHECK-NEXT: R_PPC64_REL24 .text
+# CHECK-NEXT: }
+
+# Verify that .localentry is encoded in the Other field.
+# CHECK: Symbols [
+# CHECK: Name: callee1
+# CHECK-NEXT: Value:
+# CHECK-NEXT: Size: 16
+# CHECK-NEXT: Binding: Local
+# CHECK-NEXT: Type: Function
+# CHECK-NEXT: Other: 96
+# CHECK-NEXT: Section: .text
+# CHECK: Name: callee2
+# CHECK-NEXT: Value:
+# CHECK-NEXT: Size: 8
+# CHECK-NEXT: Binding: Local
+# CHECK-NEXT: Type: Function
+# CHECK-NEXT: Other: 0
+# CHECK-NEXT: Section: .text
+
diff --git a/test/MC/PowerPC/ppc64-operands.s b/test/MC/PowerPC/ppc64-operands.s
index fc1cbeb94b94..392b3b7893e4 100644
--- a/test/MC/PowerPC/ppc64-operands.s
+++ b/test/MC/PowerPC/ppc64-operands.s
@@ -1,115 +1,158 @@
-# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
# Register operands
-# CHECK: add 1, 2, 3 # encoding: [0x7c,0x22,0x1a,0x14]
- add 1, 2, 3
+# CHECK-BE: add 1, 2, 3 # encoding: [0x7c,0x22,0x1a,0x14]
+# CHECK-LE: add 1, 2, 3 # encoding: [0x14,0x1a,0x22,0x7c]
+ add 1, 2, 3
-# CHECK: add 1, 2, 3 # encoding: [0x7c,0x22,0x1a,0x14]
- add %r1, %r2, %r3
+# CHECK-BE: add 1, 2, 3 # encoding: [0x7c,0x22,0x1a,0x14]
+# CHECK-LE: add 1, 2, 3 # encoding: [0x14,0x1a,0x22,0x7c]
+ add %r1, %r2, %r3
-# CHECK: add 0, 0, 0 # encoding: [0x7c,0x00,0x02,0x14]
- add 0, 0, 0
+# CHECK-BE: add 0, 0, 0 # encoding: [0x7c,0x00,0x02,0x14]
+# CHECK-LE: add 0, 0, 0 # encoding: [0x14,0x02,0x00,0x7c]
+ add 0, 0, 0
-# CHECK: add 31, 31, 31 # encoding: [0x7f,0xff,0xfa,0x14]
- add 31, 31, 31
+# CHECK-BE: add 31, 31, 31 # encoding: [0x7f,0xff,0xfa,0x14]
+# CHECK-LE: add 31, 31, 31 # encoding: [0x14,0xfa,0xff,0x7f]
+ add 31, 31, 31
-# CHECK: addi 1, 0, 0 # encoding: [0x38,0x20,0x00,0x00]
- addi 1, 0, 0
+# CHECK-BE: addi 1, 0, 0 # encoding: [0x38,0x20,0x00,0x00]
+# CHECK-LE: addi 1, 0, 0 # encoding: [0x00,0x00,0x20,0x38]
+ addi 1, 0, 0
-# CHECK: addi 1, 0, 0 # encoding: [0x38,0x20,0x00,0x00]
- addi 1, %r0, 0
+# CHECK-BE: addi 1, 0, 0 # encoding: [0x38,0x20,0x00,0x00]
+# CHECK-LE: addi 1, 0, 0 # encoding: [0x00,0x00,0x20,0x38]
+ addi 1, %r0, 0
# Signed 16-bit immediate operands
-# CHECK: addi 1, 2, 0 # encoding: [0x38,0x22,0x00,0x00]
- addi 1, 2, 0
+# CHECK-BE: addi 1, 2, 0 # encoding: [0x38,0x22,0x00,0x00]
+# CHECK-LE: addi 1, 2, 0 # encoding: [0x00,0x00,0x22,0x38]
+ addi 1, 2, 0
-# CHECK: addi 1, 0, -32768 # encoding: [0x38,0x20,0x80,0x00]
- addi 1, 0, -32768
+# CHECK-BE: addi 1, 0, -32768 # encoding: [0x38,0x20,0x80,0x00]
+# CHECK-LE: addi 1, 0, -32768 # encoding: [0x00,0x80,0x20,0x38]
+ addi 1, 0, -32768
-# CHECK: addi 1, 0, 32767 # encoding: [0x38,0x20,0x7f,0xff]
- addi 1, 0, 32767
+# CHECK-BE: addi 1, 0, 32767 # encoding: [0x38,0x20,0x7f,0xff]
+# CHECK-LE: addi 1, 0, 32767 # encoding: [0xff,0x7f,0x20,0x38]
+ addi 1, 0, 32767
# Unsigned 16-bit immediate operands
-# CHECK: ori 1, 2, 0 # encoding: [0x60,0x41,0x00,0x00]
- ori 1, 2, 0
+# CHECK-BE: ori 1, 2, 0 # encoding: [0x60,0x41,0x00,0x00]
+# CHECK-LE: ori 1, 2, 0 # encoding: [0x00,0x00,0x41,0x60]
+ ori 1, 2, 0
-# CHECK: ori 1, 2, 65535 # encoding: [0x60,0x41,0xff,0xff]
- ori 1, 2, 65535
+# CHECK-BE: ori 1, 2, 65535 # encoding: [0x60,0x41,0xff,0xff]
+# CHECK-LE: ori 1, 2, 65535 # encoding: [0xff,0xff,0x41,0x60]
+ ori 1, 2, 65535
# Signed 16-bit immediate operands (extended range for addis)
-# CHECK: addis 1, 0, 0 # encoding: [0x3c,0x20,0x00,0x00]
- addis 1, 0, -65536
+# CHECK-BE: addis 1, 0, 0 # encoding: [0x3c,0x20,0x00,0x00]
+# CHECK-LE: addis 1, 0, 0 # encoding: [0x00,0x00,0x20,0x3c]
+ addis 1, 0, -65536
-# CHECK: addis 1, 0, -1 # encoding: [0x3c,0x20,0xff,0xff]
- addis 1, 0, 65535
+# CHECK-BE: addis 1, 0, -1 # encoding: [0x3c,0x20,0xff,0xff]
+# CHECK-LE: addis 1, 0, -1 # encoding: [0xff,0xff,0x20,0x3c]
+ addis 1, 0, 65535
# D-Form memory operands
-# CHECK: lwz 1, 0(0) # encoding: [0x80,0x20,0x00,0x00]
- lwz 1, 0(0)
+# CHECK-BE: lwz 1, 0(0) # encoding: [0x80,0x20,0x00,0x00]
+# CHECK-LE: lwz 1, 0(0) # encoding: [0x00,0x00,0x20,0x80]
+ lwz 1, 0(0)
-# CHECK: lwz 1, 0(0) # encoding: [0x80,0x20,0x00,0x00]
- lwz 1, 0(%r0)
+# CHECK-BE: lwz 1, 0(0) # encoding: [0x80,0x20,0x00,0x00]
+# CHECK-LE: lwz 1, 0(0) # encoding: [0x00,0x00,0x20,0x80]
+ lwz 1, 0(%r0)
-# CHECK: lwz 1, 0(31) # encoding: [0x80,0x3f,0x00,0x00]
- lwz 1, 0(31)
+# CHECK-BE: lwz 1, 0(31) # encoding: [0x80,0x3f,0x00,0x00]
+# CHECK-LE: lwz 1, 0(31) # encoding: [0x00,0x00,0x3f,0x80]
+ lwz 1, 0(31)
-# CHECK: lwz 1, 0(31) # encoding: [0x80,0x3f,0x00,0x00]
- lwz 1, 0(%r31)
+# CHECK-BE: lwz 1, 0(31) # encoding: [0x80,0x3f,0x00,0x00]
+# CHECK-LE: lwz 1, 0(31) # encoding: [0x00,0x00,0x3f,0x80]
+ lwz 1, 0(%r31)
-# CHECK: lwz 1, -32768(2) # encoding: [0x80,0x22,0x80,0x00]
- lwz 1, -32768(2)
+# CHECK-BE: lwz 1, -32768(2) # encoding: [0x80,0x22,0x80,0x00]
+# CHECK-LE: lwz 1, -32768(2) # encoding: [0x00,0x80,0x22,0x80]
+ lwz 1, -32768(2)
-# CHECK: lwz 1, 32767(2) # encoding: [0x80,0x22,0x7f,0xff]
- lwz 1, 32767(2)
+# CHECK-BE: lwz 1, 32767(2) # encoding: [0x80,0x22,0x7f,0xff]
+# CHECK-LE: lwz 1, 32767(2) # encoding: [0xff,0x7f,0x22,0x80]
+ lwz 1, 32767(2)
-# CHECK: ld 1, 0(0) # encoding: [0xe8,0x20,0x00,0x00]
- ld 1, 0(0)
+# CHECK-BE: ld 1, 0(0) # encoding: [0xe8,0x20,0x00,0x00]
+# CHECK-LE: ld 1, 0(0) # encoding: [0x00,0x00,0x20,0xe8]
+ ld 1, 0(0)
-# CHECK: ld 1, 0(0) # encoding: [0xe8,0x20,0x00,0x00]
- ld 1, 0(%r0)
+# CHECK-BE: ld 1, 0(0) # encoding: [0xe8,0x20,0x00,0x00]
+# CHECK-LE: ld 1, 0(0) # encoding: [0x00,0x00,0x20,0xe8]
+ ld 1, 0(%r0)
-# CHECK: ld 1, 0(31) # encoding: [0xe8,0x3f,0x00,0x00]
- ld 1, 0(31)
+# CHECK-BE: ld 1, 0(31) # encoding: [0xe8,0x3f,0x00,0x00]
+# CHECK-LE: ld 1, 0(31) # encoding: [0x00,0x00,0x3f,0xe8]
+ ld 1, 0(31)
-# CHECK: ld 1, 0(31) # encoding: [0xe8,0x3f,0x00,0x00]
- ld 1, 0(%r31)
+# CHECK-BE: ld 1, 0(31) # encoding: [0xe8,0x3f,0x00,0x00]
+# CHECK-LE: ld 1, 0(31) # encoding: [0x00,0x00,0x3f,0xe8]
+ ld 1, 0(%r31)
-# CHECK: ld 1, -32768(2) # encoding: [0xe8,0x22,0x80,0x00]
- ld 1, -32768(2)
+# CHECK-BE: ld 1, -32768(2) # encoding: [0xe8,0x22,0x80,0x00]
+# CHECK-LE: ld 1, -32768(2) # encoding: [0x00,0x80,0x22,0xe8]
+ ld 1, -32768(2)
-# CHECK: ld 1, 32764(2) # encoding: [0xe8,0x22,0x7f,0xfc]
- ld 1, 32764(2)
+# CHECK-BE: ld 1, 32764(2) # encoding: [0xe8,0x22,0x7f,0xfc]
+# CHECK-LE: ld 1, 32764(2) # encoding: [0xfc,0x7f,0x22,0xe8]
+ ld 1, 32764(2)
-# CHECK: ld 1, 4(2) # encoding: [0xe8,0x22,0x00,0x04]
- ld 1, 4(2)
+# CHECK-BE: ld 1, 4(2) # encoding: [0xe8,0x22,0x00,0x04]
+# CHECK-LE: ld 1, 4(2) # encoding: [0x04,0x00,0x22,0xe8]
+ ld 1, 4(2)
-# CHECK: ld 1, -4(2) # encoding: [0xe8,0x22,0xff,0xfc]
- ld 1, -4(2)
+# CHECK-BE: ld 1, -4(2) # encoding: [0xe8,0x22,0xff,0xfc]
+# CHECK-LE: ld 1, -4(2) # encoding: [0xfc,0xff,0x22,0xe8]
+ ld 1, -4(2)
# Immediate branch operands
-# CHECK: b .+1024 # encoding: [0x48,0x00,0x04,0x00]
- b 1024
+# CHECK-BE: b .+1024 # encoding: [0x48,0x00,0x04,0x00]
+# CHECK-LE: b .+1024 # encoding: [0x00,0x04,0x00,0x48]
+ b 1024
-# CHECK: ba 1024 # encoding: [0x48,0x00,0x04,0x02]
- ba 1024
+# CHECK-BE: ba 1024 # encoding: [0x48,0x00,0x04,0x02]
+# CHECK-LE: ba 1024 # encoding: [0x02,0x04,0x00,0x48]
+ ba 1024
-# CHECK: beq 0, .+1024 # encoding: [0x41,0x82,0x04,0x00]
- beq 1024
+# CHECK-BE: beq 0, .+1024 # encoding: [0x41,0x82,0x04,0x00]
+# CHECK-LE: beq 0, .+1024 # encoding: [0x00,0x04,0x82,0x41]
+ beq 1024
-# CHECK: beqa 0, 1024 # encoding: [0x41,0x82,0x04,0x02]
- beqa 1024
+# CHECK-BE: beqa 0, 1024 # encoding: [0x41,0x82,0x04,0x02]
+# CHECK-LE: beqa 0, 1024 # encoding: [0x02,0x04,0x82,0x41]
+ beqa 1024
-# CHECK: # encoding: [0x42,0x9f,A,0bAAAAAA01]
- bcl 20, 31, $+4
+# CHECK-BE: # encoding: [0x42,0x9f,A,0bAAAAAA01]
+# CHECK-LE: # encoding: [0bAAAAAA01,A,0x9f,0x42]
+ bcl 20, 31, $+4
+
+# CHECK-BE: # encoding: [0x42,0x00,A,0bAAAAAA00]
+# CHECK-LE: # encoding: [0bAAAAAA00,A,0x00,0x42]
+ bdnz $-8
+
+# CHECK-BE: andi. 0, 3, 32767 # encoding: [0x70,0x60,0x7f,0xff]
+# CHECK-LE: andi. 0, 3, 32767 # encoding: [0xff,0x7f,0x60,0x70]
+ andi. %r0,%r3,~0x8000@l
+
+# CHECK-BE: andi. 0, 3, 0 # encoding: [0x70,0x60,0x00,0x00]
+# CHECK-LE: andi. 0, 3, 0 # encoding: [0x00,0x00,0x60,0x70]
+ andi. %r0,%r3,!0x8000@l
-# CHECK: # encoding: [0x42,0x00,A,0bAAAAAA00]
- bdnz $-8
diff --git a/test/MC/PowerPC/ppc64-regs.s b/test/MC/PowerPC/ppc64-regs.s
index 02b1fc5503d3..fb1775c86039 100644
--- a/test/MC/PowerPC/ppc64-regs.s
+++ b/test/MC/PowerPC/ppc64-regs.s
@@ -1,4 +1,5 @@
# RUN: llvm-mc -triple powerpc64-unknown-unknown --show-encoding %s | FileCheck %s
+# RUN: llvm-mc -triple powerpc64le-unknown-unknown --show-encoding %s | FileCheck %s
#CHECK: .cfi_startproc
#CHECK: .cfi_offset r0, 0
diff --git a/test/MC/PowerPC/vsx.s b/test/MC/PowerPC/vsx.s
new file mode 100644
index 000000000000..d292ddaf5cc4
--- /dev/null
+++ b/test/MC/PowerPC/vsx.s
@@ -0,0 +1,447 @@
+# RUN: llvm-mc -triple powerpc64-unknown-linux-gnu --show-encoding %s | FileCheck -check-prefix=CHECK-BE %s
+# RUN: llvm-mc -triple powerpc64le-unknown-linux-gnu --show-encoding %s | FileCheck -check-prefix=CHECK-LE %s
+
+# CHECK-BE: lxsdx 7, 5, 31 # encoding: [0x7c,0xe5,0xfc,0x98]
+# CHECK-LE: lxsdx 7, 5, 31 # encoding: [0x98,0xfc,0xe5,0x7c]
+ lxsdx 7, 5, 31
+# CHECK-BE: lxvd2x 7, 5, 31 # encoding: [0x7c,0xe5,0xfe,0x98]
+# CHECK-LE: lxvd2x 7, 5, 31 # encoding: [0x98,0xfe,0xe5,0x7c]
+ lxvd2x 7, 5, 31
+# CHECK-BE: lxvdsx 7, 5, 31 # encoding: [0x7c,0xe5,0xfa,0x98]
+# CHECK-LE: lxvdsx 7, 5, 31 # encoding: [0x98,0xfa,0xe5,0x7c]
+ lxvdsx 7, 5, 31
+# CHECK-BE: lxvw4x 7, 5, 31 # encoding: [0x7c,0xe5,0xfe,0x18]
+# CHECK-LE: lxvw4x 7, 5, 31 # encoding: [0x18,0xfe,0xe5,0x7c]
+ lxvw4x 7, 5, 31
+# CHECK-BE: stxsdx 8, 5, 31 # encoding: [0x7d,0x05,0xfd,0x98]
+# CHECK-LE: stxsdx 8, 5, 31 # encoding: [0x98,0xfd,0x05,0x7d]
+ stxsdx 8, 5, 31
+# CHECK-BE: stxvd2x 8, 5, 31 # encoding: [0x7d,0x05,0xff,0x98]
+# CHECK-LE: stxvd2x 8, 5, 31 # encoding: [0x98,0xff,0x05,0x7d]
+ stxvd2x 8, 5, 31
+# CHECK-BE: stxvw4x 8, 5, 31 # encoding: [0x7d,0x05,0xff,0x18]
+# CHECK-LE: stxvw4x 8, 5, 31 # encoding: [0x18,0xff,0x05,0x7d]
+ stxvw4x 8, 5, 31
+# CHECK-BE: xsabsdp 7, 27 # encoding: [0xf0,0xe0,0xdd,0x64]
+# CHECK-LE: xsabsdp 7, 27 # encoding: [0x64,0xdd,0xe0,0xf0]
+ xsabsdp 7, 27
+# CHECK-BE: xsadddp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0x04]
+# CHECK-LE: xsadddp 7, 63, 27 # encoding: [0x04,0xd9,0xff,0xf0]
+ xsadddp 7, 63, 27
+# CHECK-BE: xscmpodp 6, 63, 27 # encoding: [0xf3,0x1f,0xd9,0x5c]
+# CHECK-LE: xscmpodp 6, 63, 27 # encoding: [0x5c,0xd9,0x1f,0xf3]
+ xscmpodp 6, 63, 27
+# CHECK-BE: xscmpudp 6, 63, 27 # encoding: [0xf3,0x1f,0xd9,0x1c]
+# CHECK-LE: xscmpudp 6, 63, 27 # encoding: [0x1c,0xd9,0x1f,0xf3]
+ xscmpudp 6, 63, 27
+# CHECK-BE: xscpsgndp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x84]
+# CHECK-LE: xscpsgndp 7, 63, 27 # encoding: [0x84,0xdd,0xff,0xf0]
+ xscpsgndp 7, 63, 27
+# CHECK-BE: xscvdpsp 7, 27 # encoding: [0xf0,0xe0,0xdc,0x24]
+# CHECK-LE: xscvdpsp 7, 27 # encoding: [0x24,0xdc,0xe0,0xf0]
+ xscvdpsp 7, 27
+# CHECK-BE: xscvdpsxds 7, 27 # encoding: [0xf0,0xe0,0xdd,0x60]
+# CHECK-LE: xscvdpsxds 7, 27 # encoding: [0x60,0xdd,0xe0,0xf0]
+ xscvdpsxds 7, 27
+# CHECK-BE: xscvdpsxws 7, 27 # encoding: [0xf0,0xe0,0xd9,0x60]
+# CHECK-LE: xscvdpsxws 7, 27 # encoding: [0x60,0xd9,0xe0,0xf0]
+ xscvdpsxws 7, 27
+# CHECK-BE: xscvdpuxds 7, 27 # encoding: [0xf0,0xe0,0xdd,0x20]
+# CHECK-LE: xscvdpuxds 7, 27 # encoding: [0x20,0xdd,0xe0,0xf0]
+ xscvdpuxds 7, 27
+# CHECK-BE: xscvdpuxws 7, 27 # encoding: [0xf0,0xe0,0xd9,0x20]
+# CHECK-LE: xscvdpuxws 7, 27 # encoding: [0x20,0xd9,0xe0,0xf0]
+ xscvdpuxws 7, 27
+# CHECK-BE: xscvspdp 7, 27 # encoding: [0xf0,0xe0,0xdd,0x24]
+# CHECK-LE: xscvspdp 7, 27 # encoding: [0x24,0xdd,0xe0,0xf0]
+ xscvspdp 7, 27
+# CHECK-BE: xscvsxddp 7, 27 # encoding: [0xf0,0xe0,0xdd,0xe0]
+# CHECK-LE: xscvsxddp 7, 27 # encoding: [0xe0,0xdd,0xe0,0xf0]
+ xscvsxddp 7, 27
+# CHECK-BE: xscvuxddp 7, 27 # encoding: [0xf0,0xe0,0xdd,0xa0]
+# CHECK-LE: xscvuxddp 7, 27 # encoding: [0xa0,0xdd,0xe0,0xf0]
+ xscvuxddp 7, 27
+# CHECK-BE: xsdivdp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0xc4]
+# CHECK-LE: xsdivdp 7, 63, 27 # encoding: [0xc4,0xd9,0xff,0xf0]
+ xsdivdp 7, 63, 27
+# CHECK-BE: xsmaddadp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0x0c]
+# CHECK-LE: xsmaddadp 7, 63, 27 # encoding: [0x0c,0xd9,0xff,0xf0]
+ xsmaddadp 7, 63, 27
+# CHECK-BE: xsmaddmdp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0x4c]
+# CHECK-LE: xsmaddmdp 7, 63, 27 # encoding: [0x4c,0xd9,0xff,0xf0]
+ xsmaddmdp 7, 63, 27
+# CHECK-BE: xsmaxdp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x04]
+# CHECK-LE: xsmaxdp 7, 63, 27 # encoding: [0x04,0xdd,0xff,0xf0]
+ xsmaxdp 7, 63, 27
+# CHECK-BE: xsmindp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x44]
+# CHECK-LE: xsmindp 7, 63, 27 # encoding: [0x44,0xdd,0xff,0xf0]
+ xsmindp 7, 63, 27
+# CHECK-BE: xsmsubadp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0x8c]
+# CHECK-LE: xsmsubadp 7, 63, 27 # encoding: [0x8c,0xd9,0xff,0xf0]
+ xsmsubadp 7, 63, 27
+# CHECK-BE: xsmsubmdp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0xcc]
+# CHECK-LE: xsmsubmdp 7, 63, 27 # encoding: [0xcc,0xd9,0xff,0xf0]
+ xsmsubmdp 7, 63, 27
+# CHECK-BE: xsmuldp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0x84]
+# CHECK-LE: xsmuldp 7, 63, 27 # encoding: [0x84,0xd9,0xff,0xf0]
+ xsmuldp 7, 63, 27
+# CHECK-BE: xsnabsdp 7, 27 # encoding: [0xf0,0xe0,0xdd,0xa4]
+# CHECK-LE: xsnabsdp 7, 27 # encoding: [0xa4,0xdd,0xe0,0xf0]
+ xsnabsdp 7, 27
+# CHECK-BE: xsnegdp 7, 27 # encoding: [0xf0,0xe0,0xdd,0xe4]
+# CHECK-LE: xsnegdp 7, 27 # encoding: [0xe4,0xdd,0xe0,0xf0]
+ xsnegdp 7, 27
+# CHECK-BE: xsnmaddadp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x0c]
+# CHECK-LE: xsnmaddadp 7, 63, 27 # encoding: [0x0c,0xdd,0xff,0xf0]
+ xsnmaddadp 7, 63, 27
+# CHECK-BE: xsnmaddmdp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x4c]
+# CHECK-LE: xsnmaddmdp 7, 63, 27 # encoding: [0x4c,0xdd,0xff,0xf0]
+ xsnmaddmdp 7, 63, 27
+# CHECK-BE: xsnmsubadp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x8c]
+# CHECK-LE: xsnmsubadp 7, 63, 27 # encoding: [0x8c,0xdd,0xff,0xf0]
+ xsnmsubadp 7, 63, 27
+# CHECK-BE: xsnmsubmdp 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0xcc]
+# CHECK-LE: xsnmsubmdp 7, 63, 27 # encoding: [0xcc,0xdd,0xff,0xf0]
+ xsnmsubmdp 7, 63, 27
+# CHECK-BE: xsrdpi 7, 27 # encoding: [0xf0,0xe0,0xd9,0x24]
+# CHECK-LE: xsrdpi 7, 27 # encoding: [0x24,0xd9,0xe0,0xf0]
+ xsrdpi 7, 27
+# CHECK-BE: xsrdpic 7, 27 # encoding: [0xf0,0xe0,0xd9,0xac]
+# CHECK-LE: xsrdpic 7, 27 # encoding: [0xac,0xd9,0xe0,0xf0]
+ xsrdpic 7, 27
+# CHECK-BE: xsrdpim 7, 27 # encoding: [0xf0,0xe0,0xd9,0xe4]
+# CHECK-LE: xsrdpim 7, 27 # encoding: [0xe4,0xd9,0xe0,0xf0]
+ xsrdpim 7, 27
+# CHECK-BE: xsrdpip 7, 27 # encoding: [0xf0,0xe0,0xd9,0xa4]
+# CHECK-LE: xsrdpip 7, 27 # encoding: [0xa4,0xd9,0xe0,0xf0]
+ xsrdpip 7, 27
+# CHECK-BE: xsrdpiz 7, 27 # encoding: [0xf0,0xe0,0xd9,0x64]
+# CHECK-LE: xsrdpiz 7, 27 # encoding: [0x64,0xd9,0xe0,0xf0]
+ xsrdpiz 7, 27
+# CHECK-BE: xsredp 7, 27 # encoding: [0xf0,0xe0,0xd9,0x68]
+# CHECK-LE: xsredp 7, 27 # encoding: [0x68,0xd9,0xe0,0xf0]
+ xsredp 7, 27
+# CHECK-BE: xsrsqrtedp 7, 27 # encoding: [0xf0,0xe0,0xd9,0x28]
+# CHECK-LE: xsrsqrtedp 7, 27 # encoding: [0x28,0xd9,0xe0,0xf0]
+ xsrsqrtedp 7, 27
+# CHECK-BE: xssqrtdp 7, 27 # encoding: [0xf0,0xe0,0xd9,0x2c]
+# CHECK-LE: xssqrtdp 7, 27 # encoding: [0x2c,0xd9,0xe0,0xf0]
+ xssqrtdp 7, 27
+# CHECK-BE: xssubdp 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0x44]
+# CHECK-LE: xssubdp 7, 63, 27 # encoding: [0x44,0xd9,0xff,0xf0]
+ xssubdp 7, 63, 27
+# CHECK-BE: xstdivdp 6, 63, 27 # encoding: [0xf3,0x1f,0xd9,0xec]
+# CHECK-LE: xstdivdp 6, 63, 27 # encoding: [0xec,0xd9,0x1f,0xf3]
+ xstdivdp 6, 63, 27
+# CHECK-BE: xstsqrtdp 6, 27 # encoding: [0xf3,0x00,0xd9,0xa8]
+# CHECK-LE: xstsqrtdp 6, 27 # encoding: [0xa8,0xd9,0x00,0xf3]
+ xstsqrtdp 6, 27
+# CHECK-BE: xvabsdp 7, 27 # encoding: [0xf0,0xe0,0xdf,0x64]
+# CHECK-LE: xvabsdp 7, 27 # encoding: [0x64,0xdf,0xe0,0xf0]
+ xvabsdp 7, 27
+# CHECK-BE: xvabssp 7, 27 # encoding: [0xf0,0xe0,0xde,0x64]
+# CHECK-LE: xvabssp 7, 27 # encoding: [0x64,0xde,0xe0,0xf0]
+ xvabssp 7, 27
+# CHECK-BE: xvadddp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x04]
+# CHECK-LE: xvadddp 7, 63, 27 # encoding: [0x04,0xdb,0xff,0xf0]
+ xvadddp 7, 63, 27
+# CHECK-BE: xvaddsp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x04]
+# CHECK-LE: xvaddsp 7, 63, 27 # encoding: [0x04,0xda,0xff,0xf0]
+ xvaddsp 7, 63, 27
+# CHECK-BE: xvcmpeqdp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x1c]
+# CHECK-LE: xvcmpeqdp 7, 63, 27 # encoding: [0x1c,0xdb,0xff,0xf0]
+ xvcmpeqdp 7, 63, 27
+# CHECK-BE: xvcmpeqdp. 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x1c]
+# CHECK-LE: xvcmpeqdp. 7, 63, 27 # encoding: [0x1c,0xdf,0xff,0xf0]
+ xvcmpeqdp. 7, 63, 27
+# CHECK-BE: xvcmpeqsp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x1c]
+# CHECK-LE: xvcmpeqsp 7, 63, 27 # encoding: [0x1c,0xda,0xff,0xf0]
+ xvcmpeqsp 7, 63, 27
+# CHECK-BE: xvcmpeqsp. 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x1c]
+# CHECK-LE: xvcmpeqsp. 7, 63, 27 # encoding: [0x1c,0xde,0xff,0xf0]
+ xvcmpeqsp. 7, 63, 27
+# CHECK-BE: xvcmpgedp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x9c]
+# CHECK-LE: xvcmpgedp 7, 63, 27 # encoding: [0x9c,0xdb,0xff,0xf0]
+ xvcmpgedp 7, 63, 27
+# CHECK-BE: xvcmpgedp. 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x9c]
+# CHECK-LE: xvcmpgedp. 7, 63, 27 # encoding: [0x9c,0xdf,0xff,0xf0]
+ xvcmpgedp. 7, 63, 27
+# CHECK-BE: xvcmpgesp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x9c]
+# CHECK-LE: xvcmpgesp 7, 63, 27 # encoding: [0x9c,0xda,0xff,0xf0]
+ xvcmpgesp 7, 63, 27
+# CHECK-BE: xvcmpgesp. 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x9c]
+# CHECK-LE: xvcmpgesp. 7, 63, 27 # encoding: [0x9c,0xde,0xff,0xf0]
+ xvcmpgesp. 7, 63, 27
+# CHECK-BE: xvcmpgtdp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x5c]
+# CHECK-LE: xvcmpgtdp 7, 63, 27 # encoding: [0x5c,0xdb,0xff,0xf0]
+ xvcmpgtdp 7, 63, 27
+# CHECK-BE: xvcmpgtdp. 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x5c]
+# CHECK-LE: xvcmpgtdp. 7, 63, 27 # encoding: [0x5c,0xdf,0xff,0xf0]
+ xvcmpgtdp. 7, 63, 27
+# CHECK-BE: xvcmpgtsp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x5c]
+# CHECK-LE: xvcmpgtsp 7, 63, 27 # encoding: [0x5c,0xda,0xff,0xf0]
+ xvcmpgtsp 7, 63, 27
+# CHECK-BE: xvcmpgtsp. 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x5c]
+# CHECK-LE: xvcmpgtsp. 7, 63, 27 # encoding: [0x5c,0xde,0xff,0xf0]
+ xvcmpgtsp. 7, 63, 27
+# CHECK-BE: xvcpsgndp 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x84]
+# CHECK-LE: xvcpsgndp 7, 63, 27 # encoding: [0x84,0xdf,0xff,0xf0]
+ xvcpsgndp 7, 63, 27
+# CHECK-BE: xvcpsgnsp 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x84]
+# CHECK-LE: xvcpsgnsp 7, 63, 27 # encoding: [0x84,0xde,0xff,0xf0]
+ xvcpsgnsp 7, 63, 27
+# CHECK-BE: xvcvdpsp 7, 27 # encoding: [0xf0,0xe0,0xde,0x24]
+# CHECK-LE: xvcvdpsp 7, 27 # encoding: [0x24,0xde,0xe0,0xf0]
+ xvcvdpsp 7, 27
+# CHECK-BE: xvcvdpsxds 7, 27 # encoding: [0xf0,0xe0,0xdf,0x60]
+# CHECK-LE: xvcvdpsxds 7, 27 # encoding: [0x60,0xdf,0xe0,0xf0]
+ xvcvdpsxds 7, 27
+# CHECK-BE: xvcvdpsxws 7, 27 # encoding: [0xf0,0xe0,0xdb,0x60]
+# CHECK-LE: xvcvdpsxws 7, 27 # encoding: [0x60,0xdb,0xe0,0xf0]
+ xvcvdpsxws 7, 27
+# CHECK-BE: xvcvdpuxds 7, 27 # encoding: [0xf0,0xe0,0xdf,0x20]
+# CHECK-LE: xvcvdpuxds 7, 27 # encoding: [0x20,0xdf,0xe0,0xf0]
+ xvcvdpuxds 7, 27
+# CHECK-BE: xvcvdpuxws 7, 27 # encoding: [0xf0,0xe0,0xdb,0x20]
+# CHECK-LE: xvcvdpuxws 7, 27 # encoding: [0x20,0xdb,0xe0,0xf0]
+ xvcvdpuxws 7, 27
+# CHECK-BE: xvcvspdp 7, 27 # encoding: [0xf0,0xe0,0xdf,0x24]
+# CHECK-LE: xvcvspdp 7, 27 # encoding: [0x24,0xdf,0xe0,0xf0]
+ xvcvspdp 7, 27
+# CHECK-BE: xvcvspsxds 7, 27 # encoding: [0xf0,0xe0,0xde,0x60]
+# CHECK-LE: xvcvspsxds 7, 27 # encoding: [0x60,0xde,0xe0,0xf0]
+ xvcvspsxds 7, 27
+# CHECK-BE: xvcvspsxws 7, 27 # encoding: [0xf0,0xe0,0xda,0x60]
+# CHECK-LE: xvcvspsxws 7, 27 # encoding: [0x60,0xda,0xe0,0xf0]
+ xvcvspsxws 7, 27
+# CHECK-BE: xvcvspuxds 7, 27 # encoding: [0xf0,0xe0,0xde,0x20]
+# CHECK-LE: xvcvspuxds 7, 27 # encoding: [0x20,0xde,0xe0,0xf0]
+ xvcvspuxds 7, 27
+# CHECK-BE: xvcvspuxws 7, 27 # encoding: [0xf0,0xe0,0xda,0x20]
+# CHECK-LE: xvcvspuxws 7, 27 # encoding: [0x20,0xda,0xe0,0xf0]
+ xvcvspuxws 7, 27
+# CHECK-BE: xvcvsxddp 7, 27 # encoding: [0xf0,0xe0,0xdf,0xe0]
+# CHECK-LE: xvcvsxddp 7, 27 # encoding: [0xe0,0xdf,0xe0,0xf0]
+ xvcvsxddp 7, 27
+# CHECK-BE: xvcvsxdsp 7, 27 # encoding: [0xf0,0xe0,0xde,0xe0]
+# CHECK-LE: xvcvsxdsp 7, 27 # encoding: [0xe0,0xde,0xe0,0xf0]
+ xvcvsxdsp 7, 27
+# CHECK-BE: xvcvsxwdp 7, 27 # encoding: [0xf0,0xe0,0xdb,0xe0]
+# CHECK-LE: xvcvsxwdp 7, 27 # encoding: [0xe0,0xdb,0xe0,0xf0]
+ xvcvsxwdp 7, 27
+# CHECK-BE: xvcvsxwsp 7, 27 # encoding: [0xf0,0xe0,0xda,0xe0]
+# CHECK-LE: xvcvsxwsp 7, 27 # encoding: [0xe0,0xda,0xe0,0xf0]
+ xvcvsxwsp 7, 27
+# CHECK-BE: xvcvuxddp 7, 27 # encoding: [0xf0,0xe0,0xdf,0xa0]
+# CHECK-LE: xvcvuxddp 7, 27 # encoding: [0xa0,0xdf,0xe0,0xf0]
+ xvcvuxddp 7, 27
+# CHECK-BE: xvcvuxdsp 7, 27 # encoding: [0xf0,0xe0,0xde,0xa0]
+# CHECK-LE: xvcvuxdsp 7, 27 # encoding: [0xa0,0xde,0xe0,0xf0]
+ xvcvuxdsp 7, 27
+# CHECK-BE: xvcvuxwdp 7, 27 # encoding: [0xf0,0xe0,0xdb,0xa0]
+# CHECK-LE: xvcvuxwdp 7, 27 # encoding: [0xa0,0xdb,0xe0,0xf0]
+ xvcvuxwdp 7, 27
+# CHECK-BE: xvcvuxwsp 7, 27 # encoding: [0xf0,0xe0,0xda,0xa0]
+# CHECK-LE: xvcvuxwsp 7, 27 # encoding: [0xa0,0xda,0xe0,0xf0]
+ xvcvuxwsp 7, 27
+# CHECK-BE: xvdivdp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0xc4]
+# CHECK-LE: xvdivdp 7, 63, 27 # encoding: [0xc4,0xdb,0xff,0xf0]
+ xvdivdp 7, 63, 27
+# CHECK-BE: xvdivsp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0xc4]
+# CHECK-LE: xvdivsp 7, 63, 27 # encoding: [0xc4,0xda,0xff,0xf0]
+ xvdivsp 7, 63, 27
+# CHECK-BE: xvmaddadp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x0c]
+# CHECK-LE: xvmaddadp 7, 63, 27 # encoding: [0x0c,0xdb,0xff,0xf0]
+ xvmaddadp 7, 63, 27
+# CHECK-BE: xvmaddasp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x0c]
+# CHECK-LE: xvmaddasp 7, 63, 27 # encoding: [0x0c,0xda,0xff,0xf0]
+ xvmaddasp 7, 63, 27
+# CHECK-BE: xvmaddmdp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x4c]
+# CHECK-LE: xvmaddmdp 7, 63, 27 # encoding: [0x4c,0xdb,0xff,0xf0]
+ xvmaddmdp 7, 63, 27
+# CHECK-BE: xvmaddmsp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x4c]
+# CHECK-LE: xvmaddmsp 7, 63, 27 # encoding: [0x4c,0xda,0xff,0xf0]
+ xvmaddmsp 7, 63, 27
+# CHECK-BE: xvmaxdp 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x04]
+# CHECK-LE: xvmaxdp 7, 63, 27 # encoding: [0x04,0xdf,0xff,0xf0]
+ xvmaxdp 7, 63, 27
+# CHECK-BE: xvmaxsp 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x04]
+# CHECK-LE: xvmaxsp 7, 63, 27 # encoding: [0x04,0xde,0xff,0xf0]
+ xvmaxsp 7, 63, 27
+# CHECK-BE: xvmindp 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x44]
+# CHECK-LE: xvmindp 7, 63, 27 # encoding: [0x44,0xdf,0xff,0xf0]
+ xvmindp 7, 63, 27
+# CHECK-BE: xvminsp 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x44]
+# CHECK-LE: xvminsp 7, 63, 27 # encoding: [0x44,0xde,0xff,0xf0]
+ xvminsp 7, 63, 27
+# CHECK-BE: xvcpsgndp 7, 63, 63 # encoding: [0xf0,0xff,0xff,0x86]
+# CHECK-LE: xvcpsgndp 7, 63, 63 # encoding: [0x86,0xff,0xff,0xf0]
+ xvmovdp 7, 63
+# CHECK-BE: xvcpsgnsp 7, 63, 63 # encoding: [0xf0,0xff,0xfe,0x86]
+# CHECK-LE: xvcpsgnsp 7, 63, 63 # encoding: [0x86,0xfe,0xff,0xf0]
+ xvmovsp 7, 63
+# CHECK-BE: xvmsubadp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x8c]
+# CHECK-LE: xvmsubadp 7, 63, 27 # encoding: [0x8c,0xdb,0xff,0xf0]
+ xvmsubadp 7, 63, 27
+# CHECK-BE: xvmsubasp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x8c]
+# CHECK-LE: xvmsubasp 7, 63, 27 # encoding: [0x8c,0xda,0xff,0xf0]
+ xvmsubasp 7, 63, 27
+# CHECK-BE: xvmsubmdp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0xcc]
+# CHECK-LE: xvmsubmdp 7, 63, 27 # encoding: [0xcc,0xdb,0xff,0xf0]
+ xvmsubmdp 7, 63, 27
+# CHECK-BE: xvmsubmsp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0xcc]
+# CHECK-LE: xvmsubmsp 7, 63, 27 # encoding: [0xcc,0xda,0xff,0xf0]
+ xvmsubmsp 7, 63, 27
+# CHECK-BE: xvmuldp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x84]
+# CHECK-LE: xvmuldp 7, 63, 27 # encoding: [0x84,0xdb,0xff,0xf0]
+ xvmuldp 7, 63, 27
+# CHECK-BE: xvmulsp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x84]
+# CHECK-LE: xvmulsp 7, 63, 27 # encoding: [0x84,0xda,0xff,0xf0]
+ xvmulsp 7, 63, 27
+# CHECK-BE: xvnabsdp 7, 27 # encoding: [0xf0,0xe0,0xdf,0xa4]
+# CHECK-LE: xvnabsdp 7, 27 # encoding: [0xa4,0xdf,0xe0,0xf0]
+ xvnabsdp 7, 27
+# CHECK-BE: xvnabssp 7, 27 # encoding: [0xf0,0xe0,0xde,0xa4]
+# CHECK-LE: xvnabssp 7, 27 # encoding: [0xa4,0xde,0xe0,0xf0]
+ xvnabssp 7, 27
+# CHECK-BE: xvnegdp 7, 27 # encoding: [0xf0,0xe0,0xdf,0xe4]
+# CHECK-LE: xvnegdp 7, 27 # encoding: [0xe4,0xdf,0xe0,0xf0]
+ xvnegdp 7, 27
+# CHECK-BE: xvnegsp 7, 27 # encoding: [0xf0,0xe0,0xde,0xe4]
+# CHECK-LE: xvnegsp 7, 27 # encoding: [0xe4,0xde,0xe0,0xf0]
+ xvnegsp 7, 27
+# CHECK-BE: xvnmaddadp 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x0c]
+# CHECK-LE: xvnmaddadp 7, 63, 27 # encoding: [0x0c,0xdf,0xff,0xf0]
+ xvnmaddadp 7, 63, 27
+# CHECK-BE: xvnmaddasp 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x0c]
+# CHECK-LE: xvnmaddasp 7, 63, 27 # encoding: [0x0c,0xde,0xff,0xf0]
+ xvnmaddasp 7, 63, 27
+# CHECK-BE: xvnmaddmdp 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x4c]
+# CHECK-LE: xvnmaddmdp 7, 63, 27 # encoding: [0x4c,0xdf,0xff,0xf0]
+ xvnmaddmdp 7, 63, 27
+# CHECK-BE: xvnmaddmsp 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x4c]
+# CHECK-LE: xvnmaddmsp 7, 63, 27 # encoding: [0x4c,0xde,0xff,0xf0]
+ xvnmaddmsp 7, 63, 27
+# CHECK-BE: xvnmsubadp 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0x8c]
+# CHECK-LE: xvnmsubadp 7, 63, 27 # encoding: [0x8c,0xdf,0xff,0xf0]
+ xvnmsubadp 7, 63, 27
+# CHECK-BE: xvnmsubasp 7, 63, 27 # encoding: [0xf0,0xff,0xde,0x8c]
+# CHECK-LE: xvnmsubasp 7, 63, 27 # encoding: [0x8c,0xde,0xff,0xf0]
+ xvnmsubasp 7, 63, 27
+# CHECK-BE: xvnmsubmdp 7, 63, 27 # encoding: [0xf0,0xff,0xdf,0xcc]
+# CHECK-LE: xvnmsubmdp 7, 63, 27 # encoding: [0xcc,0xdf,0xff,0xf0]
+ xvnmsubmdp 7, 63, 27
+# CHECK-BE: xvnmsubmsp 7, 63, 27 # encoding: [0xf0,0xff,0xde,0xcc]
+# CHECK-LE: xvnmsubmsp 7, 63, 27 # encoding: [0xcc,0xde,0xff,0xf0]
+ xvnmsubmsp 7, 63, 27
+# CHECK-BE: xvrdpi 7, 27 # encoding: [0xf0,0xe0,0xdb,0x24]
+# CHECK-LE: xvrdpi 7, 27 # encoding: [0x24,0xdb,0xe0,0xf0]
+ xvrdpi 7, 27
+# CHECK-BE: xvrdpic 7, 27 # encoding: [0xf0,0xe0,0xdb,0xac]
+# CHECK-LE: xvrdpic 7, 27 # encoding: [0xac,0xdb,0xe0,0xf0]
+ xvrdpic 7, 27
+# CHECK-BE: xvrdpim 7, 27 # encoding: [0xf0,0xe0,0xdb,0xe4]
+# CHECK-LE: xvrdpim 7, 27 # encoding: [0xe4,0xdb,0xe0,0xf0]
+ xvrdpim 7, 27
+# CHECK-BE: xvrdpip 7, 27 # encoding: [0xf0,0xe0,0xdb,0xa4]
+# CHECK-LE: xvrdpip 7, 27 # encoding: [0xa4,0xdb,0xe0,0xf0]
+ xvrdpip 7, 27
+# CHECK-BE: xvrdpiz 7, 27 # encoding: [0xf0,0xe0,0xdb,0x64]
+# CHECK-LE: xvrdpiz 7, 27 # encoding: [0x64,0xdb,0xe0,0xf0]
+ xvrdpiz 7, 27
+# CHECK-BE: xvredp 7, 27 # encoding: [0xf0,0xe0,0xdb,0x68]
+# CHECK-LE: xvredp 7, 27 # encoding: [0x68,0xdb,0xe0,0xf0]
+ xvredp 7, 27
+# CHECK-BE: xvresp 7, 27 # encoding: [0xf0,0xe0,0xda,0x68]
+# CHECK-LE: xvresp 7, 27 # encoding: [0x68,0xda,0xe0,0xf0]
+ xvresp 7, 27
+# CHECK-BE: xvrspi 7, 27 # encoding: [0xf0,0xe0,0xda,0x24]
+# CHECK-LE: xvrspi 7, 27 # encoding: [0x24,0xda,0xe0,0xf0]
+ xvrspi 7, 27
+# CHECK-BE: xvrspic 7, 27 # encoding: [0xf0,0xe0,0xda,0xac]
+# CHECK-LE: xvrspic 7, 27 # encoding: [0xac,0xda,0xe0,0xf0]
+ xvrspic 7, 27
+# CHECK-BE: xvrspim 7, 27 # encoding: [0xf0,0xe0,0xda,0xe4]
+# CHECK-LE: xvrspim 7, 27 # encoding: [0xe4,0xda,0xe0,0xf0]
+ xvrspim 7, 27
+# CHECK-BE: xvrspip 7, 27 # encoding: [0xf0,0xe0,0xda,0xa4]
+# CHECK-LE: xvrspip 7, 27 # encoding: [0xa4,0xda,0xe0,0xf0]
+ xvrspip 7, 27
+# CHECK-BE: xvrspiz 7, 27 # encoding: [0xf0,0xe0,0xda,0x64]
+# CHECK-LE: xvrspiz 7, 27 # encoding: [0x64,0xda,0xe0,0xf0]
+ xvrspiz 7, 27
+# CHECK-BE: xvrsqrtedp 7, 27 # encoding: [0xf0,0xe0,0xdb,0x28]
+# CHECK-LE: xvrsqrtedp 7, 27 # encoding: [0x28,0xdb,0xe0,0xf0]
+ xvrsqrtedp 7, 27
+# CHECK-BE: xvrsqrtesp 7, 27 # encoding: [0xf0,0xe0,0xda,0x28]
+# CHECK-LE: xvrsqrtesp 7, 27 # encoding: [0x28,0xda,0xe0,0xf0]
+ xvrsqrtesp 7, 27
+# CHECK-BE: xvsqrtdp 7, 27 # encoding: [0xf0,0xe0,0xdb,0x2c]
+# CHECK-LE: xvsqrtdp 7, 27 # encoding: [0x2c,0xdb,0xe0,0xf0]
+ xvsqrtdp 7, 27
+# CHECK-BE: xvsqrtsp 7, 27 # encoding: [0xf0,0xe0,0xda,0x2c]
+# CHECK-LE: xvsqrtsp 7, 27 # encoding: [0x2c,0xda,0xe0,0xf0]
+ xvsqrtsp 7, 27
+# CHECK-BE: xvsubdp 7, 63, 27 # encoding: [0xf0,0xff,0xdb,0x44]
+# CHECK-LE: xvsubdp 7, 63, 27 # encoding: [0x44,0xdb,0xff,0xf0]
+ xvsubdp 7, 63, 27
+# CHECK-BE: xvsubsp 7, 63, 27 # encoding: [0xf0,0xff,0xda,0x44]
+# CHECK-LE: xvsubsp 7, 63, 27 # encoding: [0x44,0xda,0xff,0xf0]
+ xvsubsp 7, 63, 27
+# CHECK-BE: xvtdivdp 6, 63, 27 # encoding: [0xf3,0x1f,0xdb,0xec]
+# CHECK-LE: xvtdivdp 6, 63, 27 # encoding: [0xec,0xdb,0x1f,0xf3]
+ xvtdivdp 6, 63, 27
+# CHECK-BE: xvtdivsp 6, 63, 27 # encoding: [0xf3,0x1f,0xda,0xec]
+# CHECK-LE: xvtdivsp 6, 63, 27 # encoding: [0xec,0xda,0x1f,0xf3]
+ xvtdivsp 6, 63, 27
+# CHECK-BE: xvtsqrtdp 6, 27 # encoding: [0xf3,0x00,0xdb,0xa8]
+# CHECK-LE: xvtsqrtdp 6, 27 # encoding: [0xa8,0xdb,0x00,0xf3]
+ xvtsqrtdp 6, 27
+# CHECK-BE: xvtsqrtsp 6, 27 # encoding: [0xf3,0x00,0xda,0xa8]
+# CHECK-LE: xvtsqrtsp 6, 27 # encoding: [0xa8,0xda,0x00,0xf3]
+ xvtsqrtsp 6, 27
+# CHECK-BE: xxland 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0x14]
+# CHECK-LE: xxland 7, 63, 27 # encoding: [0x14,0xdc,0xff,0xf0]
+ xxland 7, 63, 27
+# CHECK-BE: xxlandc 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0x54]
+# CHECK-LE: xxlandc 7, 63, 27 # encoding: [0x54,0xdc,0xff,0xf0]
+ xxlandc 7, 63, 27
+# CHECK-BE: xxlnor 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x14]
+# CHECK-LE: xxlnor 7, 63, 27 # encoding: [0x14,0xdd,0xff,0xf0]
+ xxlnor 7, 63, 27
+# CHECK-BE: xxlor 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0x94]
+# CHECK-LE: xxlor 7, 63, 27 # encoding: [0x94,0xdc,0xff,0xf0]
+ xxlor 7, 63, 27
+# CHECK-BE: xxlxor 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0xd4]
+# CHECK-LE: xxlxor 7, 63, 27 # encoding: [0xd4,0xdc,0xff,0xf0]
+ xxlxor 7, 63, 27
+# CHECK-BE: xxpermdi 7, 63, 27, 0 # encoding: [0xf0,0xff,0xd8,0x54]
+# CHECK-LE: xxpermdi 7, 63, 27, 0 # encoding: [0x54,0xd8,0xff,0xf0]
+ xxmrghd 7, 63, 27
+# CHECK-BE: xxmrghw 7, 63, 27 # encoding: [0xf0,0xff,0xd8,0x94]
+# CHECK-LE: xxmrghw 7, 63, 27 # encoding: [0x94,0xd8,0xff,0xf0]
+ xxmrghw 7, 63, 27
+# CHECK-BE: xxpermdi 7, 63, 27, 3 # encoding: [0xf0,0xff,0xdb,0x54]
+# CHECK-LE: xxpermdi 7, 63, 27, 3 # encoding: [0x54,0xdb,0xff,0xf0]
+ xxmrgld 7, 63, 27
+# CHECK-BE: xxmrglw 7, 63, 27 # encoding: [0xf0,0xff,0xd9,0x94]
+# CHECK-LE: xxmrglw 7, 63, 27 # encoding: [0x94,0xd9,0xff,0xf0]
+ xxmrglw 7, 63, 27
+# CHECK-BE: xxpermdi 7, 63, 27, 2 # encoding: [0xf0,0xff,0xda,0x54]
+# CHECK-LE: xxpermdi 7, 63, 27, 2 # encoding: [0x54,0xda,0xff,0xf0]
+ xxpermdi 7, 63, 27, 2
+# CHECK-BE: xxsel 7, 63, 27, 14 # encoding: [0xf0,0xff,0xdb,0xb4]
+# CHECK-LE: xxsel 7, 63, 27, 14 # encoding: [0xb4,0xdb,0xff,0xf0]
+ xxsel 7, 63, 27, 14
+# CHECK-BE: xxsldwi 7, 63, 27, 1 # encoding: [0xf0,0xff,0xd9,0x14]
+# CHECK-LE: xxsldwi 7, 63, 27, 1 # encoding: [0x14,0xd9,0xff,0xf0]
+ xxsldwi 7, 63, 27, 1
+# CHECK-BE: xxpermdi 7, 63, 63, 3 # encoding: [0xf0,0xff,0xfb,0x56]
+# CHECK-LE: xxpermdi 7, 63, 63, 3 # encoding: [0x56,0xfb,0xff,0xf0]
+ xxspltd 7, 63, 1
+# CHECK-BE: xxspltw 7, 27, 3 # encoding: [0xf0,0xe3,0xda,0x90]
+# CHECK-LE: xxspltw 7, 27, 3 # encoding: [0x90,0xda,0xe3,0xf0]
+ xxspltw 7, 27, 3
+# CHECK-BE: xxpermdi 7, 63, 63, 2 # encoding: [0xf0,0xff,0xfa,0x56]
+# CHECK-LE: xxpermdi 7, 63, 63, 2 # encoding: [0x56,0xfa,0xff,0xf0]
+ xxswapd 7, 63
diff --git a/test/MC/Sparc/lit.local.cfg b/test/MC/Sparc/lit.local.cfg
new file mode 100644
index 000000000000..fa6a54e50132
--- /dev/null
+++ b/test/MC/Sparc/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'Sparc' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/MC/Sparc/sparc-alu-instructions.s b/test/MC/Sparc/sparc-alu-instructions.s
new file mode 100644
index 000000000000..e2e5ef867252
--- /dev/null
+++ b/test/MC/Sparc/sparc-alu-instructions.s
@@ -0,0 +1,128 @@
+! RUN: llvm-mc %s -arch=sparc -show-encoding | FileCheck %s
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s
+
+ ! CHECK: add %g0, %g0, %g0 ! encoding: [0x80,0x00,0x00,0x00]
+ add %g0, %g0, %g0
+ ! CHECK: add %g1, %g2, %g3 ! encoding: [0x86,0x00,0x40,0x02]
+ add %g1, %g2, %g3
+ ! CHECK: add %o0, %o1, %l0 ! encoding: [0xa0,0x02,0x00,0x09]
+ add %r8, %r9, %l0
+ ! CHECK: add %o0, 10, %l0 ! encoding: [0xa0,0x02,0x20,0x0a]
+ add %o0, 10, %l0
+
+ ! CHECK: addcc %g1, %g2, %g3 ! encoding: [0x86,0x80,0x40,0x02]
+ addcc %g1, %g2, %g3
+
+ ! CHECK: addxcc %g1, %g2, %g3 ! encoding: [0x86,0xc0,0x40,0x02]
+ addxcc %g1, %g2, %g3
+
+ ! CHECK: udiv %g1, %g2, %g3 ! encoding: [0x86,0x70,0x40,0x02]
+ udiv %g1, %g2, %g3
+
+ ! CHECK: sdiv %g1, %g2, %g3 ! encoding: [0x86,0x78,0x40,0x02]
+ sdiv %g1, %g2, %g3
+
+ ! CHECK: and %g1, %g2, %g3 ! encoding: [0x86,0x08,0x40,0x02]
+ and %g1, %g2, %g3
+ ! CHECK: andn %g1, %g2, %g3 ! encoding: [0x86,0x28,0x40,0x02]
+ andn %g1, %g2, %g3
+ ! CHECK: or %g1, %g2, %g3 ! encoding: [0x86,0x10,0x40,0x02]
+ or %g1, %g2, %g3
+ ! CHECK: orn %g1, %g2, %g3 ! encoding: [0x86,0x30,0x40,0x02]
+ orn %g1, %g2, %g3
+ ! CHECK: xor %g1, %g2, %g3 ! encoding: [0x86,0x18,0x40,0x02]
+ xor %g1, %g2, %g3
+ ! CHECK: xnor %g1, %g2, %g3 ! encoding: [0x86,0x38,0x40,0x02]
+ xnor %g1, %g2, %g3
+
+ ! CHECK: umul %g1, %g2, %g3 ! encoding: [0x86,0x50,0x40,0x02]
+ umul %g1, %g2, %g3
+
+ ! CHECK: smul %g1, %g2, %g3 ! encoding: [0x86,0x58,0x40,0x02]
+ smul %g1, %g2, %g3
+
+ ! CHECK: nop ! encoding: [0x01,0x00,0x00,0x00]
+ nop
+
+ ! CHECK: sethi 10, %l0 ! encoding: [0x21,0x00,0x00,0x0a]
+ sethi 10, %l0
+
+ ! CHECK: sll %g1, %g2, %g3 ! encoding: [0x87,0x28,0x40,0x02]
+ sll %g1, %g2, %g3
+ ! CHECK: sll %g1, 31, %g3 ! encoding: [0x87,0x28,0x60,0x1f]
+ sll %g1, 31, %g3
+
+ ! CHECK: srl %g1, %g2, %g3 ! encoding: [0x87,0x30,0x40,0x02]
+ srl %g1, %g2, %g3
+ ! CHECK: srl %g1, 31, %g3 ! encoding: [0x87,0x30,0x60,0x1f]
+ srl %g1, 31, %g3
+
+ ! CHECK: sra %g1, %g2, %g3 ! encoding: [0x87,0x38,0x40,0x02]
+ sra %g1, %g2, %g3
+ ! CHECK: sra %g1, 31, %g3 ! encoding: [0x87,0x38,0x60,0x1f]
+ sra %g1, 31, %g3
+
+ ! CHECK: sub %g1, %g2, %g3 ! encoding: [0x86,0x20,0x40,0x02]
+ sub %g1, %g2, %g3
+ ! CHECK: subcc %g1, %g2, %g3 ! encoding: [0x86,0xa0,0x40,0x02]
+ subcc %g1, %g2, %g3
+
+ ! CHECK: subxcc %g1, %g2, %g3 ! encoding: [0x86,0xe0,0x40,0x02]
+ subxcc %g1, %g2, %g3
+
+ ! CHECK: mov %g1, %g3 ! encoding: [0x86,0x10,0x00,0x01]
+ mov %g1, %g3
+
+ ! CHECK: mov 255, %g3 ! encoding: [0x86,0x10,0x20,0xff]
+ mov 0xff, %g3
+
+ ! CHECK: restore ! encoding: [0x81,0xe8,0x00,0x00]
+ restore %g0, %g0, %g0
+
+ ! CHECK: addx %g2, %g1, %g3 ! encoding: [0x86,0x40,0x80,0x01]
+ addx %g2, %g1, %g3
+
+ ! CHECK: subx %g2, %g1, %g3 ! encoding: [0x86,0x60,0x80,0x01]
+ subx %g2, %g1, %g3
+
+ ! CHECK: umulcc %g2, %g1, %g3 ! encoding: [0x86,0xd0,0x80,0x01]
+ umulcc %g2, %g1, %g3
+
+ ! CHECK: smulcc %g2, %g1, %g3 ! encoding: [0x86,0xd8,0x80,0x01]
+ smulcc %g2, %g1, %g3
+
+ ! CHECK: udivcc %g2, %g1, %g3 ! encoding: [0x86,0xf0,0x80,0x01]
+ udivcc %g2, %g1, %g3
+
+ ! CHECK: sdivcc %g2, %g1, %g3 ! encoding: [0x86,0xf8,0x80,0x01]
+ sdivcc %g2, %g1, %g3
+
+ ! CHECK: andcc %g2, %g1, %g3 ! encoding: [0x86,0x88,0x80,0x01]
+ andcc %g2, %g1, %g3
+
+ ! CHECK: andncc %g2, %g1, %g3 ! encoding: [0x86,0xa8,0x80,0x01]
+ andncc %g2, %g1, %g3
+
+ ! CHECK: orcc %g2, %g1, %g3 ! encoding: [0x86,0x90,0x80,0x01]
+ orcc %g2, %g1, %g3
+
+ ! CHECK: orncc %g2, %g1, %g3 ! encoding: [0x86,0xb0,0x80,0x01]
+ orncc %g2, %g1, %g3
+
+ ! CHECK: xorcc %g2, %g1, %g3 ! encoding: [0x86,0x98,0x80,0x01]
+ xorcc %g2, %g1, %g3
+
+ ! CHECK: xnorcc %g2, %g1, %g3 ! encoding: [0x86,0xb8,0x80,0x01]
+ xnorcc %g2, %g1, %g3
+
+ ! CHECK: taddcc %g2, %g1, %g3 ! encoding: [0x87,0x00,0x80,0x01]
+ taddcc %g2, %g1, %g3
+
+ ! CHECK: tsubcc %g2, %g1, %g3 ! encoding: [0x87,0x08,0x80,0x01]
+ tsubcc %g2, %g1, %g3
+
+ ! CHECK: taddcctv %g2, %g1, %g3 ! encoding: [0x87,0x10,0x80,0x01]
+ taddcctv %g2, %g1, %g3
+
+ ! CHECK: tsubcctv %g2, %g1, %g3 ! encoding: [0x87,0x18,0x80,0x01]
+ tsubcctv %g2, %g1, %g3
diff --git a/test/MC/Sparc/sparc-atomic-instructions.s b/test/MC/Sparc/sparc-atomic-instructions.s
new file mode 100644
index 000000000000..5c46067ea3bc
--- /dev/null
+++ b/test/MC/Sparc/sparc-atomic-instructions.s
@@ -0,0 +1,19 @@
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s
+
+ ! CHECK: membar 15 ! encoding: [0x81,0x43,0xe0,0x0f]
+ membar 15
+
+ ! CHECK: stbar ! encoding: [0x81,0x43,0xc0,0x00]
+ stbar
+
+ ! CHECK: swap [%i0+%l6], %o2 ! encoding: [0xd4,0x7e,0x00,0x16]
+ swap [%i0+%l6], %o2
+
+ ! CHECK: swap [%i0+32], %o2 ! encoding: [0xd4,0x7e,0x20,0x20]
+ swap [%i0+32], %o2
+
+ ! CHECK: cas [%i0], %l6, %o2 ! encoding: [0xd5,0xe6,0x10,0x16]
+ cas [%i0], %l6, %o2
+
+ ! CHECK: casx [%i0], %l6, %o2 ! encoding: [0xd5,0xf6,0x10,0x16]
+ casx [%i0], %l6, %o2
diff --git a/test/MC/Sparc/sparc-ctrl-instructions.s b/test/MC/Sparc/sparc-ctrl-instructions.s
new file mode 100644
index 000000000000..cf92e7045d43
--- /dev/null
+++ b/test/MC/Sparc/sparc-ctrl-instructions.s
@@ -0,0 +1,278 @@
+! RUN: llvm-mc %s -arch=sparc -show-encoding | FileCheck %s
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s
+
+ ! CHECK: call foo ! encoding: [0b01AAAAAA,A,A,A]
+ ! CHECK: ! fixup A - offset: 0, value: foo, kind: fixup_sparc_call30
+ call foo
+
+ ! CHECK: call %g1+%i2 ! encoding: [0x9f,0xc0,0x40,0x1a]
+ call %g1 + %i2
+
+ ! CHECK: call %o1+8 ! encoding: [0x9f,0xc2,0x60,0x08]
+ call %o1 + 8
+
+ ! CHECK: call %g1 ! encoding: [0x9f,0xc0,0x60,0x00]
+ call %g1
+
+ ! CHECK: call %g1+%lo(sym) ! encoding: [0x9f,0xc0,0b011000AA,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: %lo(sym), kind: fixup_sparc_lo10
+ call %g1+%lo(sym)
+
+ ! CHECK: jmp %g1+%i2 ! encoding: [0x81,0xc0,0x40,0x1a]
+ jmp %g1 + %i2
+
+ ! CHECK: jmp %o1+8 ! encoding: [0x81,0xc2,0x60,0x08]
+ jmp %o1 + 8
+
+ ! CHECK: jmp %g1 ! encoding: [0x81,0xc0,0x60,0x00]
+ jmp %g1
+
+ ! CHECK: jmp %g1+%lo(sym) ! encoding: [0x81,0xc0,0b011000AA,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: %lo(sym), kind: fixup_sparc_lo10
+ jmp %g1+%lo(sym)
+
+ ! CHECK: jmpl %g1+%i2, %g2 ! encoding: [0x85,0xc0,0x40,0x1a]
+ jmpl %g1 + %i2, %g2
+
+ ! CHECK: jmpl %o1+8, %g2 ! encoding: [0x85,0xc2,0x60,0x08]
+ jmpl %o1 + 8, %g2
+
+ ! CHECK: jmpl %g1, %g2 ! encoding: [0x85,0xc0,0x60,0x00]
+ jmpl %g1, %g2
+
+ ! CHECK: jmpl %g1+%lo(sym), %g2 ! encoding: [0x85,0xc0,0b011000AA,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: %lo(sym), kind: fixup_sparc_lo10
+ jmpl %g1+%lo(sym), %g2
+
+ ! CHECK: ba .BB0 ! encoding: [0x10,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ ba .BB0
+
+ ! CHECK: bne .BB0 ! encoding: [0x12,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bne .BB0
+
+ ! CHECK: be .BB0 ! encoding: [0x02,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ be .BB0
+
+ ! CHECK: bg .BB0 ! encoding: [0x14,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bg .BB0
+
+ ! CHECK: ble .BB0 ! encoding: [0x04,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ ble .BB0
+
+ ! CHECK: bge .BB0 ! encoding: [0x16,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bge .BB0
+
+ ! CHECK: bl .BB0 ! encoding: [0x06,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bl .BB0
+
+ ! CHECK: bgu .BB0 ! encoding: [0x18,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bgu .BB0
+
+ ! CHECK: bleu .BB0 ! encoding: [0x08,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bleu .BB0
+
+ ! CHECK: bcc .BB0 ! encoding: [0x1a,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bcc .BB0
+
+ ! CHECK: bcs .BB0 ! encoding: [0x0a,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bcs .BB0
+
+ ! CHECK: bpos .BB0 ! encoding: [0x1c,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bpos .BB0
+
+ ! CHECK: bneg .BB0 ! encoding: [0x0c,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bneg .BB0
+
+ ! CHECK: bvc .BB0 ! encoding: [0x1e,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bvc .BB0
+
+ ! CHECK: bvs .BB0 ! encoding: [0x0e,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bvs .BB0
+
+ ! CHECK: fbu .BB0 ! encoding: [0x0f,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbu .BB0
+
+ ! CHECK: fbg .BB0 ! encoding: [0x0d,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbg .BB0
+ ! CHECK: fbug .BB0 ! encoding: [0x0b,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbug .BB0
+
+ ! CHECK: fbl .BB0 ! encoding: [0x09,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbl .BB0
+
+ ! CHECK: fbul .BB0 ! encoding: [0x07,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbul .BB0
+
+ ! CHECK: fblg .BB0 ! encoding: [0x05,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fblg .BB0
+
+ ! CHECK: fbne .BB0 ! encoding: [0x03,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbne .BB0
+
+ ! CHECK: fbe .BB0 ! encoding: [0x13,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbe .BB0
+
+ ! CHECK: fbue .BB0 ! encoding: [0x15,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbue .BB0
+
+ ! CHECK: fbge .BB0 ! encoding: [0x17,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbge .BB0
+
+ ! CHECK: fbuge .BB0 ! encoding: [0x19,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbuge .BB0
+
+ ! CHECK: fble .BB0 ! encoding: [0x1b,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fble .BB0
+
+ ! CHECK: fbule .BB0 ! encoding: [0x1d,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbule .BB0
+
+ ! CHECK: fbo .BB0 ! encoding: [0x1f,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbo .BB0
+
+ ! CHECK: ba,a .BB0 ! encoding: [0x30,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ ba,a .BB0
+
+ ! CHECK: bne,a .BB0 ! encoding: [0x32,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bne,a .BB0
+
+ ! CHECK: be,a .BB0 ! encoding: [0x22,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ be,a .BB0
+
+ ! CHECK: bg,a .BB0 ! encoding: [0x34,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bg,a .BB0
+
+ ! CHECK: ble,a .BB0 ! encoding: [0x24,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ ble,a .BB0
+
+ ! CHECK: bge,a .BB0 ! encoding: [0x36,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bge,a .BB0
+
+ ! CHECK: bl,a .BB0 ! encoding: [0x26,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bl,a .BB0
+
+ ! CHECK: bgu,a .BB0 ! encoding: [0x38,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bgu,a .BB0
+
+ ! CHECK: bleu,a .BB0 ! encoding: [0x28,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bleu,a .BB0
+
+ ! CHECK: bcc,a .BB0 ! encoding: [0x3a,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bcc,a .BB0
+
+ ! CHECK: bcs,a .BB0 ! encoding: [0x2a,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bcs,a .BB0
+
+ ! CHECK: bpos,a .BB0 ! encoding: [0x3c,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bpos,a .BB0
+
+ ! CHECK: bneg,a .BB0 ! encoding: [0x2c,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bneg,a .BB0
+
+ ! CHECK: bvc,a .BB0 ! encoding: [0x3e,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bvc,a .BB0
+
+ ! CHECK: bvs,a .BB0 ! encoding: [0x2e,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ bvs,a .BB0
+
+ ! CHECK: fbu,a .BB0 ! encoding: [0x2f,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbu,a .BB0
+
+ ! CHECK: fbg,a .BB0 ! encoding: [0x2d,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbg,a .BB0
+ ! CHECK: fbug,a .BB0 ! encoding: [0x2b,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbug,a .BB0
+
+ ! CHECK: fbl,a .BB0 ! encoding: [0x29,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbl,a .BB0
+
+ ! CHECK: fbul,a .BB0 ! encoding: [0x27,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbul,a .BB0
+
+ ! CHECK: fblg,a .BB0 ! encoding: [0x25,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fblg,a .BB0
+
+ ! CHECK: fbne,a .BB0 ! encoding: [0x23,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbne,a .BB0
+
+ ! CHECK: fbe,a .BB0 ! encoding: [0x33,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbe,a .BB0
+
+ ! CHECK: fbue,a .BB0 ! encoding: [0x35,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbue,a .BB0
+
+ ! CHECK: fbge,a .BB0 ! encoding: [0x37,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbge,a .BB0
+
+ ! CHECK: fbuge,a .BB0 ! encoding: [0x39,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbuge,a .BB0
+
+ ! CHECK: fble,a .BB0 ! encoding: [0x3b,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fble,a .BB0
+
+ ! CHECK: fbule,a .BB0 ! encoding: [0x3d,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbule,a .BB0
+
+ ! CHECK: fbo,a .BB0 ! encoding: [0x3f,0b10AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br22
+ fbo,a .BB0
+
+ ! CHECK: rett %i7+8 ! encoding: [0x81,0xcf,0xe0,0x08]
+ rett %i7 + 8
diff --git a/test/MC/Sparc/sparc-directive-xword.s b/test/MC/Sparc/sparc-directive-xword.s
new file mode 100644
index 000000000000..0c9e249a6ad3
--- /dev/null
+++ b/test/MC/Sparc/sparc-directive-xword.s
@@ -0,0 +1,10 @@
+! RUN: not llvm-mc %s -arch=sparc -show-encoding 2>&1 | FileCheck %s --check-prefix=SPARC32
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s --check-prefix=SPARC64
+
+ ! SPARC32: error: unknown directive
+ ! SPARC32-NEXT: .xword 65536
+ ! SPARC32-NEXT: ^
+
+ ! SPARC64: .xword 65536
+ .xword 65536
+
diff --git a/test/MC/Sparc/sparc-directives.s b/test/MC/Sparc/sparc-directives.s
new file mode 100644
index 000000000000..9185e4bc9b2b
--- /dev/null
+++ b/test/MC/Sparc/sparc-directives.s
@@ -0,0 +1,19 @@
+! RUN: llvm-mc %s -arch=sparc -show-encoding | FileCheck %s --check-prefix=SPARC32
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s --check-prefix=SPARC64
+
+ ! SPARC32: .byte 24
+ ! SPARC64: .byte 24
+ .byte 24
+
+ ! SPARC32: .half 1024
+ ! SPARC64: .half 1024
+ .half 1024
+
+ ! SPARC32: .word 65536
+ ! SPARC64: .word 65536
+ .word 65536
+
+ ! SPARC32: .word 65536
+ ! SPARC64: .xword 65536
+ .nword 65536
+
diff --git a/test/MC/Sparc/sparc-fp-instructions.s b/test/MC/Sparc/sparc-fp-instructions.s
new file mode 100644
index 000000000000..f8c130f6e5f9
--- /dev/null
+++ b/test/MC/Sparc/sparc-fp-instructions.s
@@ -0,0 +1,140 @@
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s
+
+ ! CHECK: fitos %f0, %f4 ! encoding: [0x89,0xa0,0x18,0x80]
+ ! CHECK: fitod %f0, %f4 ! encoding: [0x89,0xa0,0x19,0x00]
+ ! CHECK: fitoq %f0, %f4 ! encoding: [0x89,0xa0,0x19,0x80]
+ fitos %f0, %f4
+ fitod %f0, %f4
+ fitoq %f0, %f4
+
+ ! CHECK: fstoi %f0, %f4 ! encoding: [0x89,0xa0,0x1a,0x20]
+ ! CHECK: fdtoi %f0, %f4 ! encoding: [0x89,0xa0,0x1a,0x40]
+ ! CHECK: fqtoi %f0, %f4 ! encoding: [0x89,0xa0,0x1a,0x60]
+ fstoi %f0, %f4
+ fdtoi %f0, %f4
+ fqtoi %f0, %f4
+
+ ! CHECK: fstod %f0, %f4 ! encoding: [0x89,0xa0,0x19,0x20]
+ ! CHECK: fstoq %f0, %f4 ! encoding: [0x89,0xa0,0x19,0xa0]
+ fstod %f0, %f4
+ fstoq %f0, %f4
+
+ ! CHECK: fdtos %f0, %f4 ! encoding: [0x89,0xa0,0x18,0xc0]
+ ! CHECK: fdtoq %f0, %f4 ! encoding: [0x89,0xa0,0x19,0xc0]
+ fdtos %f0, %f4
+ fdtoq %f0, %f4
+
+ ! CHECK: fqtos %f0, %f4 ! encoding: [0x89,0xa0,0x18,0xe0]
+ ! CHECK: fqtod %f0, %f4 ! encoding: [0x89,0xa0,0x19,0x60]
+ fqtos %f0, %f4
+ fqtod %f0, %f4
+
+ ! CHECK: fmovs %f0, %f4 ! encoding: [0x89,0xa0,0x00,0x20]
+ ! CHECK: fmovd %f0, %f4 ! encoding: [0x89,0xa0,0x00,0x40]
+ ! CHECK: fmovq %f0, %f4 ! encoding: [0x89,0xa0,0x00,0x60]
+ fmovs %f0, %f4
+ fmovd %f0, %f4
+ fmovq %f0, %f4
+
+ ! CHECK: fnegs %f0, %f4 ! encoding: [0x89,0xa0,0x00,0xa0]
+ ! CHECK: fnegd %f0, %f4 ! encoding: [0x89,0xa0,0x00,0xc0]
+ ! CHECK: fnegq %f0, %f4 ! encoding: [0x89,0xa0,0x00,0xe0]
+ fnegs %f0, %f4
+ fnegd %f0, %f4
+ fnegq %f0, %f4
+
+ ! CHECK: fabss %f0, %f4 ! encoding: [0x89,0xa0,0x01,0x20]
+ ! CHECK: fabsd %f0, %f4 ! encoding: [0x89,0xa0,0x01,0x40]
+ ! CHECK: fabsq %f0, %f4 ! encoding: [0x89,0xa0,0x01,0x60]
+ fabss %f0, %f4
+ fabsd %f0, %f4
+ fabsq %f0, %f4
+
+ ! CHECK: fsqrts %f0, %f4 ! encoding: [0x89,0xa0,0x05,0x20]
+ ! CHECK: fsqrtd %f0, %f4 ! encoding: [0x89,0xa0,0x05,0x40]
+ ! CHECK: fsqrtq %f0, %f4 ! encoding: [0x89,0xa0,0x05,0x60]
+ fsqrts %f0, %f4
+ fsqrtd %f0, %f4
+ fsqrtq %f0, %f4
+
+ ! CHECK: fadds %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x08,0x24]
+ ! CHECK: faddd %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x08,0x44]
+ ! CHECK: faddq %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x08,0x64]
+ fadds %f0, %f4, %f8
+ faddd %f0, %f4, %f8
+ faddq %f0, %f4, %f8
+
+ ! make sure we can handle V9 double registers and their aliased quad registers.
+ ! CHECK: faddd %f32, %f34, %f62 ! encoding: [0xbf,0xa0,0x48,0x43]
+ ! CHECK: faddq %f32, %f36, %f60 ! encoding: [0xbb,0xa0,0x48,0x65]
+ faddd %f32, %f34, %f62
+ faddq %f32, %f36, %f60
+
+ ! CHECK: fsubs %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x08,0xa4]
+ ! CHECK: fsubd %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x08,0xc4]
+ ! CHECK: fsubq %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x08,0xe4]
+ fsubs %f0, %f4, %f8
+ fsubd %f0, %f4, %f8
+ fsubq %f0, %f4, %f8
+
+ ! CHECK: fmuls %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x09,0x24]
+ ! CHECK: fmuld %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x09,0x44]
+ ! CHECK: fmulq %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x09,0x64]
+ fmuls %f0, %f4, %f8
+ fmuld %f0, %f4, %f8
+ fmulq %f0, %f4, %f8
+
+ ! CHECK: fsmuld %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x0d,0x24]
+ ! CHECK: fdmulq %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x0d,0xc4]
+ fsmuld %f0, %f4, %f8
+ fdmulq %f0, %f4, %f8
+
+ ! CHECK: fdivs %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x09,0xa4]
+ ! CHECK: fdivd %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x09,0xc4]
+ ! CHECK: fdivq %f0, %f4, %f8 ! encoding: [0x91,0xa0,0x09,0xe4]
+ fdivs %f0, %f4, %f8
+ fdivd %f0, %f4, %f8
+ fdivq %f0, %f4, %f8
+
+ ! CHECK: fcmps %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x24]
+ ! CHECK: fcmpd %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x44]
+ ! CHECK: fcmpq %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x64]
+ fcmps %f0, %f4
+ fcmpd %f0, %f4
+ fcmpq %f0, %f4
+
+ ! CHECK: fcmpes %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xa4]
+ ! CHECK: fcmped %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xc4]
+ ! CHECK: fcmpeq %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xe4]
+ fcmpes %f0, %f4
+ fcmped %f0, %f4
+ fcmpeq %f0, %f4
+
+ ! CHECK: fcmps %fcc2, %f0, %f4 ! encoding: [0x85,0xa8,0x0a,0x24]
+ ! CHECK: fcmpd %fcc2, %f0, %f4 ! encoding: [0x85,0xa8,0x0a,0x44]
+ ! CHECK: fcmpq %fcc2, %f0, %f4 ! encoding: [0x85,0xa8,0x0a,0x64]
+ fcmps %fcc2, %f0, %f4
+ fcmpd %fcc2, %f0, %f4
+ fcmpq %fcc2, %f0, %f4
+
+ ! CHECK: fcmpes %fcc2, %f0, %f4 ! encoding: [0x85,0xa8,0x0a,0xa4]
+ ! CHECK: fcmped %fcc2, %f0, %f4 ! encoding: [0x85,0xa8,0x0a,0xc4]
+ ! CHECK: fcmpeq %fcc2, %f0, %f4 ! encoding: [0x85,0xa8,0x0a,0xe4]
+ fcmpes %fcc2, %f0, %f4
+ fcmped %fcc2, %f0, %f4
+ fcmpeq %fcc2, %f0, %f4
+
+ ! CHECK: fxtos %f0, %f4 ! encoding: [0x89,0xa0,0x10,0x80]
+ ! CHECK: fxtod %f0, %f4 ! encoding: [0x89,0xa0,0x11,0x00]
+ ! CHECK: fxtoq %f0, %f4 ! encoding: [0x89,0xa0,0x11,0x80]
+ fxtos %f0, %f4
+ fxtod %f0, %f4
+ fxtoq %f0, %f4
+
+ ! CHECK: fstox %f0, %f4 ! encoding: [0x89,0xa0,0x10,0x20]
+ ! CHECK: fdtox %f0, %f4 ! encoding: [0x89,0xa0,0x10,0x40]
+ ! CHECK: fqtox %f0, %f4 ! encoding: [0x89,0xa0,0x10,0x60]
+ fstox %f0, %f4
+ fdtox %f0, %f4
+ fqtox %f0, %f4
+
diff --git a/test/MC/Sparc/sparc-mem-instructions.s b/test/MC/Sparc/sparc-mem-instructions.s
new file mode 100644
index 000000000000..e8eb5933c3a1
--- /dev/null
+++ b/test/MC/Sparc/sparc-mem-instructions.s
@@ -0,0 +1,58 @@
+! RUN: llvm-mc %s -arch=sparc -show-encoding | FileCheck %s
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s
+
+ ! CHECK: ldsb [%i0+%l6], %o2 ! encoding: [0xd4,0x4e,0x00,0x16]
+ ldsb [%i0 + %l6], %o2
+ ! CHECK: ldsb [%i0+32], %o2 ! encoding: [0xd4,0x4e,0x20,0x20]
+ ldsb [%i0 + 32], %o2
+ ! CHECK: ldsb [%g1], %o4 ! encoding: [0xd8,0x48,0x60,0x00]
+ ldsb [%g1], %o4
+
+ ! CHECK: ldsh [%i0+%l6], %o2 ! encoding: [0xd4,0x56,0x00,0x16]
+ ldsh [%i0 + %l6], %o2
+ ! CHECK: ldsh [%i0+32], %o2 ! encoding: [0xd4,0x56,0x20,0x20]
+ ldsh [%i0 + 32], %o2
+ ! CHECK: ldsh [%g1], %o4 ! encoding: [0xd8,0x50,0x60,0x00]
+ ldsh [%g1], %o4
+
+ ! CHECK: ldub [%i0+%l6], %o2 ! encoding: [0xd4,0x0e,0x00,0x16]
+ ldub [%i0 + %l6], %o2
+ ! CHECK: ldub [%i0+32], %o2 ! encoding: [0xd4,0x0e,0x20,0x20]
+ ldub [%i0 + 32], %o2
+ ! CHECK: ldub [%g1], %o2 ! encoding: [0xd4,0x08,0x60,0x00]
+ ldub [%g1], %o2
+
+ ! CHECK: lduh [%i0+%l6], %o2 ! encoding: [0xd4,0x16,0x00,0x16]
+ lduh [%i0 + %l6], %o2
+ ! CHECK: lduh [%i0+32], %o2 ! encoding: [0xd4,0x16,0x20,0x20]
+ lduh [%i0 + 32], %o2
+ ! CHECK: lduh [%g1], %o2 ! encoding: [0xd4,0x10,0x60,0x00]
+ lduh [%g1], %o2
+
+ ! CHECK: ld [%i0+%l6], %o2 ! encoding: [0xd4,0x06,0x00,0x16]
+ ld [%i0 + %l6], %o2
+ ! CHECK: ld [%i0+32], %o2 ! encoding: [0xd4,0x06,0x20,0x20]
+ ld [%i0 + 32], %o2
+ ! CHECK: ld [%g1], %o2 ! encoding: [0xd4,0x00,0x60,0x00]
+ ld [%g1], %o2
+
+ ! CHECK: stb %o2, [%i0+%l6] ! encoding: [0xd4,0x2e,0x00,0x16]
+ stb %o2, [%i0 + %l6]
+ ! CHECK: stb %o2, [%i0+32] ! encoding: [0xd4,0x2e,0x20,0x20]
+ stb %o2, [%i0 + 32]
+ ! CHECK: stb %o2, [%g1] ! encoding: [0xd4,0x28,0x60,0x00]
+ stb %o2, [%g1]
+
+ ! CHECK: sth %o2, [%i0+%l6] ! encoding: [0xd4,0x36,0x00,0x16]
+ sth %o2, [%i0 + %l6]
+ ! CHECK: sth %o2, [%i0+32] ! encoding: [0xd4,0x36,0x20,0x20]
+ sth %o2, [%i0 + 32]
+ ! CHECK: sth %o2, [%g1] ! encoding: [0xd4,0x30,0x60,0x00]
+ sth %o2, [%g1]
+
+ ! CHECK: st %o2, [%i0+%l6] ! encoding: [0xd4,0x26,0x00,0x16]
+ st %o2, [%i0 + %l6]
+ ! CHECK: st %o2, [%i0+32] ! encoding: [0xd4,0x26,0x20,0x20]
+ st %o2, [%i0 + 32]
+ ! CHECK: st %o2, [%g1] ! encoding: [0xd4,0x20,0x60,0x00]
+ st %o2, [%g1]
diff --git a/test/MC/Sparc/sparc-nop-data.s b/test/MC/Sparc/sparc-nop-data.s
new file mode 100644
index 000000000000..1538505af73f
--- /dev/null
+++ b/test/MC/Sparc/sparc-nop-data.s
@@ -0,0 +1,11 @@
+! RUN: llvm-mc %s -arch=sparc -filetype=obj | llvm-readobj -s -sd | FileCheck %s
+! RUN: llvm-mc %s -arch=sparcv9 -filetype=obj | llvm-readobj -s -sd | FileCheck %s
+
+! CHECK: 0000: BA1F401D 01000000 01000000 01000000
+! CHECK: 0010: BA1F401D
+
+foo:
+ xor %i5, %i5, %i5
+ .align 16
+ xor %i5, %i5, %i5
+
diff --git a/test/MC/Sparc/sparc-pic.s b/test/MC/Sparc/sparc-pic.s
new file mode 100644
index 000000000000..5a34d309899e
--- /dev/null
+++ b/test/MC/Sparc/sparc-pic.s
@@ -0,0 +1,49 @@
+! RUN: llvm-mc %s -arch=sparcv9 --relocation-model=pic -filetype=obj | llvm-readobj -r | FileCheck %s
+
+
+! CHECK: Relocations [
+! CHECK-NOT: 0x{{[0-9,A-F]+}} R_SPARC_WPLT30 .text 0xC
+! CHECK: 0x{{[0-9,A-F]+}} R_SPARC_PC22 _GLOBAL_OFFSET_TABLE_ 0x4
+! CHECK-NEXT: 0x{{[0-9,A-F]+}} R_SPARC_PC10 _GLOBAL_OFFSET_TABLE_ 0x8
+! CHECK-NEXT: 0x{{[0-9,A-F]+}} R_SPARC_GOT22 AGlobalVar 0x0
+! CHECK-NEXT: 0x{{[0-9,A-F]+}} R_SPARC_GOT10 AGlobalVar 0x0
+! CHECK-NEXT: 0x{{[0-9,A-F]+}} R_SPARC_WPLT30 bar 0x0
+! CHECK: ]
+
+ .text
+ .globl foo
+ .align 4
+ .type foo,@function
+foo:
+ .cfi_startproc
+ save %sp, -176, %sp
+ .cfi_def_cfa_register %fp
+ .cfi_window_save
+ .cfi_register 15, 31
+.Ltmp4:
+ call .Ltmp5
+.Ltmp6:
+ sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.Ltmp4)), %i1
+.Ltmp5:
+ or %i1, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.Ltmp4)), %i1
+ add %i1, %o7, %i1
+ sethi %hi(AGlobalVar), %i2
+ add %i2, %lo(AGlobalVar), %i2
+ ldx [%i1+%i2], %i1
+ ldx [%i1], %i1
+ call bar
+ add %i0, %i1, %o0
+ ret
+ restore %g0, %o0, %o0
+.Ltmp7:
+ .size foo, .Ltmp7-foo
+ .cfi_endproc
+
+ .type AGlobalVar,@object ! @AGlobalVar
+ .section .bss,#alloc,#write
+ .globl AGlobalVar
+ .align 8
+AGlobalVar:
+ .xword 0 ! 0x0
+ .size AGlobalVar, 8
+
diff --git a/test/MC/Sparc/sparc-relocations.s b/test/MC/Sparc/sparc-relocations.s
new file mode 100644
index 000000000000..a5b7bafa4f57
--- /dev/null
+++ b/test/MC/Sparc/sparc-relocations.s
@@ -0,0 +1,46 @@
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s
+! RUN: llvm-mc %s -arch=sparcv9 -filetype=obj | llvm-readobj -r | FileCheck %s --check-prefix=CHECK-OBJ
+
+ ! CHECK-OBJ: Format: ELF64-sparc
+ ! CHECK-OBJ: Relocations [
+ ! CHECK-OBJ: 0x{{[0-9,A-F]+}} R_SPARC_WDISP30 foo
+ ! CHECK-OBJ: 0x{{[0-9,A-F]+}} R_SPARC_LO10 sym
+ ! CHECK-OBJ: 0x{{[0-9,A-F]+}} R_SPARC_HI22 sym
+ ! CHECK-OBJ: 0x{{[0-9,A-F]+}} R_SPARC_H44 sym
+ ! CHECK-OBJ: 0x{{[0-9,A-F]+}} R_SPARC_M44 sym
+ ! CHECK-OBJ: 0x{{[0-9,A-F]+}} R_SPARC_L44 sym
+ ! CHECK-OBJ: 0x{{[0-9,A-F]+}} R_SPARC_HH22 sym
+ ! CHECK-OBJ: 0x{{[0-9,A-F]+}} R_SPARC_HM10 sym
+ ! CHECK-ELF: ]
+
+ ! CHECK: call foo ! encoding: [0b01AAAAAA,A,A,A]
+ ! CHECK: ! fixup A - offset: 0, value: foo, kind: fixup_sparc_call30
+ call foo
+
+ ! CHECK: or %g1, %lo(sym), %g3 ! encoding: [0x86,0x10,0b011000AA,A]
+ ! CHECK-NEXT ! fixup A - offset: 0, value: %lo(sym), kind: fixup_sparc_lo10
+ or %g1, %lo(sym), %g3
+
+ ! CHECK: sethi %hi(sym), %l0 ! encoding: [0x21,0b00AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: %hi(sym), kind: fixup_sparc_hi22
+ sethi %hi(sym), %l0
+
+ ! CHECK: sethi %h44(sym), %l0 ! encoding: [0x21,0b00AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: %h44(sym), kind: fixup_sparc_h44
+ sethi %h44(sym), %l0
+
+ ! CHECK: or %g1, %m44(sym), %g3 ! encoding: [0x86,0x10,0b011000AA,A]
+ ! CHECK-NEXT ! fixup A - offset: 0, value: %m44(sym), kind: fixup_sparc_m44
+ or %g1, %m44(sym), %g3
+
+ ! CHECK: or %g1, %l44(sym), %g3 ! encoding: [0x86,0x10,0b0110AAAA,A]
+ ! CHECK-NEXT ! fixup A - offset: 0, value: %l44(sym), kind: fixup_sparc_l44
+ or %g1, %l44(sym), %g3
+
+ ! CHECK: sethi %hh(sym), %l0 ! encoding: [0x21,0b00AAAAAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: %hh(sym), kind: fixup_sparc_hh
+ sethi %hh(sym), %l0
+
+ ! CHECK: or %g1, %hm(sym), %g3 ! encoding: [0x86,0x10,0b011000AA,A]
+ ! CHECK-NEXT ! fixup A - offset: 0, value: %hm(sym), kind: fixup_sparc_hm
+ or %g1, %hm(sym), %g3
diff --git a/test/MC/Sparc/sparc-vis.s b/test/MC/Sparc/sparc-vis.s
new file mode 100644
index 000000000000..11ca564d48d1
--- /dev/null
+++ b/test/MC/Sparc/sparc-vis.s
@@ -0,0 +1,4 @@
+! RUN: llvm-mc %s -arch=sparcv9 -mcpu=niagara -show-encoding | FileCheck %s
+
+ ! CHECK: fzeros %f31 ! encoding: [0xbf,0xb0,0x0c,0x20]
+ fzeros %f31
diff --git a/test/MC/Sparc/sparc64-alu-instructions.s b/test/MC/Sparc/sparc64-alu-instructions.s
new file mode 100644
index 000000000000..d4acea43f72d
--- /dev/null
+++ b/test/MC/Sparc/sparc64-alu-instructions.s
@@ -0,0 +1,38 @@
+! RUN: llvm-mc %s -triple=sparc64-unknown-linux-gnu -show-encoding | FileCheck %s
+
+ ! CHECK: sllx %g1, %i2, %i0 ! encoding: [0xb1,0x28,0x50,0x1a]
+ sllx %g1, %i2, %i0
+
+ ! CHECK: sllx %g1, 63, %i0 ! encoding: [0xb1,0x28,0x70,0x3f]
+ sllx %g1, 63, %i0
+
+ ! CHECK: srlx %g1, %i2, %i0 ! encoding: [0xb1,0x30,0x50,0x1a]
+ srlx %g1, %i2, %i0
+
+ ! CHECK: srlx %g1, 63, %i0 ! encoding: [0xb1,0x30,0x70,0x3f]
+ srlx %g1, 63, %i0
+
+ ! CHECK: srax %g1, %i2, %i0 ! encoding: [0xb1,0x38,0x50,0x1a]
+ srax %g1, %i2, %i0
+
+ ! CHECK: srax %g1, 63, %i0 ! encoding: [0xb1,0x38,0x70,0x3f]
+ srax %g1, 63, %i0
+
+ ! CHECK: mulx %g1, %i2, %i0 ! encoding: [0xb0,0x48,0x40,0x1a]
+ mulx %g1, %i2, %i0
+
+ ! CHECK: mulx %g1, 63, %i0 ! encoding: [0xb0,0x48,0x60,0x3f]
+ mulx %g1, 63, %i0
+
+ ! CHECK: sdivx %g1, %i2, %i0 ! encoding: [0xb1,0x68,0x40,0x1a]
+ sdivx %g1, %i2, %i0
+
+ ! CHECK: sdivx %g1, 63, %i0 ! encoding: [0xb1,0x68,0x60,0x3f]
+ sdivx %g1, 63, %i0
+
+ ! CHECK: udivx %g1, %i2, %i0 ! encoding: [0xb0,0x68,0x40,0x1a]
+ udivx %g1, %i2, %i0
+
+ ! CHECK: udivx %g1, 63, %i0 ! encoding: [0xb0,0x68,0x60,0x3f]
+ udivx %g1, 63, %i0
+
diff --git a/test/MC/Sparc/sparc64-ctrl-instructions.s b/test/MC/Sparc/sparc64-ctrl-instructions.s
new file mode 100644
index 000000000000..65bca2990f15
--- /dev/null
+++ b/test/MC/Sparc/sparc64-ctrl-instructions.s
@@ -0,0 +1,1226 @@
+! RUN: llvm-mc %s -triple=sparc64-unknown-linux-gnu -show-encoding | FileCheck %s
+
+
+ ! CHECK: bne %xcc, .BB0 ! encoding: [0x12,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne %xcc, .BB0
+
+ ! CHECK: be %xcc, .BB0 ! encoding: [0x02,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be %xcc, .BB0
+
+ ! CHECK: bg %xcc, .BB0 ! encoding: [0x14,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg %xcc, .BB0
+
+ ! CHECK: ble %xcc, .BB0 ! encoding: [0x04,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble %xcc, .BB0
+
+ ! CHECK: bge %xcc, .BB0 ! encoding: [0x16,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge %xcc, .BB0
+
+ ! CHECK: bl %xcc, .BB0 ! encoding: [0x06,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl %xcc, .BB0
+
+ ! CHECK: bgu %xcc, .BB0 ! encoding: [0x18,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu %xcc, .BB0
+
+ ! CHECK: bleu %xcc, .BB0 ! encoding: [0x08,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu %xcc, .BB0
+
+ ! CHECK: bcc %xcc, .BB0 ! encoding: [0x1a,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc %xcc, .BB0
+
+ ! CHECK: bcs %xcc, .BB0 ! encoding: [0x0a,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs %xcc, .BB0
+
+ ! CHECK: bpos %xcc, .BB0 ! encoding: [0x1c,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos %xcc, .BB0
+
+ ! CHECK: bneg %xcc, .BB0 ! encoding: [0x0c,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg %xcc, .BB0
+
+ ! CHECK: bvc %xcc, .BB0 ! encoding: [0x1e,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc %xcc, .BB0
+
+ ! CHECK: bvs %xcc, .BB0 ! encoding: [0x0e,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs %xcc, .BB0
+
+
+ ! CHECK: movne %icc, %g1, %g2 ! encoding: [0x85,0x66,0x40,0x01]
+ ! CHECK: move %icc, %g1, %g2 ! encoding: [0x85,0x64,0x40,0x01]
+ ! CHECK: movg %icc, %g1, %g2 ! encoding: [0x85,0x66,0x80,0x01]
+ ! CHECK: movle %icc, %g1, %g2 ! encoding: [0x85,0x64,0x80,0x01]
+ ! CHECK: movge %icc, %g1, %g2 ! encoding: [0x85,0x66,0xc0,0x01]
+ ! CHECK: movl %icc, %g1, %g2 ! encoding: [0x85,0x64,0xc0,0x01]
+ ! CHECK: movgu %icc, %g1, %g2 ! encoding: [0x85,0x67,0x00,0x01]
+ ! CHECK: movleu %icc, %g1, %g2 ! encoding: [0x85,0x65,0x00,0x01]
+ ! CHECK: movcc %icc, %g1, %g2 ! encoding: [0x85,0x67,0x40,0x01]
+ ! CHECK: movcs %icc, %g1, %g2 ! encoding: [0x85,0x65,0x40,0x01]
+ ! CHECK: movpos %icc, %g1, %g2 ! encoding: [0x85,0x67,0x80,0x01]
+ ! CHECK: movneg %icc, %g1, %g2 ! encoding: [0x85,0x65,0x80,0x01]
+ ! CHECK: movvc %icc, %g1, %g2 ! encoding: [0x85,0x67,0xc0,0x01]
+ ! CHECK: movvs %icc, %g1, %g2 ! encoding: [0x85,0x65,0xc0,0x01]
+ movne %icc, %g1, %g2
+ move %icc, %g1, %g2
+ movg %icc, %g1, %g2
+ movle %icc, %g1, %g2
+ movge %icc, %g1, %g2
+ movl %icc, %g1, %g2
+ movgu %icc, %g1, %g2
+ movleu %icc, %g1, %g2
+ movcc %icc, %g1, %g2
+ movcs %icc, %g1, %g2
+ movpos %icc, %g1, %g2
+ movneg %icc, %g1, %g2
+ movvc %icc, %g1, %g2
+ movvs %icc, %g1, %g2
+
+ ! CHECK: movne %xcc, %g1, %g2 ! encoding: [0x85,0x66,0x50,0x01]
+ ! CHECK: move %xcc, %g1, %g2 ! encoding: [0x85,0x64,0x50,0x01]
+ ! CHECK: movg %xcc, %g1, %g2 ! encoding: [0x85,0x66,0x90,0x01]
+ ! CHECK: movle %xcc, %g1, %g2 ! encoding: [0x85,0x64,0x90,0x01]
+ ! CHECK: movge %xcc, %g1, %g2 ! encoding: [0x85,0x66,0xd0,0x01]
+ ! CHECK: movl %xcc, %g1, %g2 ! encoding: [0x85,0x64,0xd0,0x01]
+ ! CHECK: movgu %xcc, %g1, %g2 ! encoding: [0x85,0x67,0x10,0x01]
+ ! CHECK: movleu %xcc, %g1, %g2 ! encoding: [0x85,0x65,0x10,0x01]
+ ! CHECK: movcc %xcc, %g1, %g2 ! encoding: [0x85,0x67,0x50,0x01]
+ ! CHECK: movcs %xcc, %g1, %g2 ! encoding: [0x85,0x65,0x50,0x01]
+ ! CHECK: movpos %xcc, %g1, %g2 ! encoding: [0x85,0x67,0x90,0x01]
+ ! CHECK: movneg %xcc, %g1, %g2 ! encoding: [0x85,0x65,0x90,0x01]
+ ! CHECK: movvc %xcc, %g1, %g2 ! encoding: [0x85,0x67,0xd0,0x01]
+ ! CHECK: movvs %xcc, %g1, %g2 ! encoding: [0x85,0x65,0xd0,0x01]
+ movne %xcc, %g1, %g2
+ move %xcc, %g1, %g2
+ movg %xcc, %g1, %g2
+ movle %xcc, %g1, %g2
+ movge %xcc, %g1, %g2
+ movl %xcc, %g1, %g2
+ movgu %xcc, %g1, %g2
+ movleu %xcc, %g1, %g2
+ movcc %xcc, %g1, %g2
+ movcs %xcc, %g1, %g2
+ movpos %xcc, %g1, %g2
+ movneg %xcc, %g1, %g2
+ movvc %xcc, %g1, %g2
+ movvs %xcc, %g1, %g2
+
+ ! CHECK: movu %fcc0, %g1, %g2 ! encoding: [0x85,0x61,0xc0,0x01]
+ ! CHECK: movg %fcc0, %g1, %g2 ! encoding: [0x85,0x61,0x80,0x01]
+ ! CHECK: movug %fcc0, %g1, %g2 ! encoding: [0x85,0x61,0x40,0x01]
+ ! CHECK: movl %fcc0, %g1, %g2 ! encoding: [0x85,0x61,0x00,0x01]
+ ! CHECK: movul %fcc0, %g1, %g2 ! encoding: [0x85,0x60,0xc0,0x01]
+ ! CHECK: movlg %fcc0, %g1, %g2 ! encoding: [0x85,0x60,0x80,0x01]
+ ! CHECK: movne %fcc0, %g1, %g2 ! encoding: [0x85,0x60,0x40,0x01]
+ ! CHECK: move %fcc0, %g1, %g2 ! encoding: [0x85,0x62,0x40,0x01]
+ ! CHECK: movue %fcc0, %g1, %g2 ! encoding: [0x85,0x62,0x80,0x01]
+ ! CHECK: movge %fcc0, %g1, %g2 ! encoding: [0x85,0x62,0xc0,0x01]
+ ! CHECK: movuge %fcc0, %g1, %g2 ! encoding: [0x85,0x63,0x00,0x01]
+ ! CHECK: movle %fcc0, %g1, %g2 ! encoding: [0x85,0x63,0x40,0x01]
+ ! CHECK: movule %fcc0, %g1, %g2 ! encoding: [0x85,0x63,0x80,0x01]
+ ! CHECK: movo %fcc0, %g1, %g2 ! encoding: [0x85,0x63,0xc0,0x01]
+ movu %fcc0, %g1, %g2
+ movg %fcc0, %g1, %g2
+ movug %fcc0, %g1, %g2
+ movl %fcc0, %g1, %g2
+ movul %fcc0, %g1, %g2
+ movlg %fcc0, %g1, %g2
+ movne %fcc0, %g1, %g2
+ move %fcc0, %g1, %g2
+ movue %fcc0, %g1, %g2
+ movge %fcc0, %g1, %g2
+ movuge %fcc0, %g1, %g2
+ movle %fcc0, %g1, %g2
+ movule %fcc0, %g1, %g2
+ movo %fcc0, %g1, %g2
+
+
+ ! CHECK: fmovsne %icc, %f1, %f2 ! encoding: [0x85,0xaa,0x60,0x21]
+ ! CHECK: fmovse %icc, %f1, %f2 ! encoding: [0x85,0xa8,0x60,0x21]
+ ! CHECK: fmovsg %icc, %f1, %f2 ! encoding: [0x85,0xaa,0xa0,0x21]
+ ! CHECK: fmovsle %icc, %f1, %f2 ! encoding: [0x85,0xa8,0xa0,0x21]
+ ! CHECK: fmovsge %icc, %f1, %f2 ! encoding: [0x85,0xaa,0xe0,0x21]
+ ! CHECK: fmovsl %icc, %f1, %f2 ! encoding: [0x85,0xa8,0xe0,0x21]
+ ! CHECK: fmovsgu %icc, %f1, %f2 ! encoding: [0x85,0xab,0x20,0x21]
+ ! CHECK: fmovsleu %icc, %f1, %f2 ! encoding: [0x85,0xa9,0x20,0x21]
+ ! CHECK: fmovscc %icc, %f1, %f2 ! encoding: [0x85,0xab,0x60,0x21]
+ ! CHECK: fmovscs %icc, %f1, %f2 ! encoding: [0x85,0xa9,0x60,0x21]
+ ! CHECK: fmovspos %icc, %f1, %f2 ! encoding: [0x85,0xab,0xa0,0x21]
+ ! CHECK: fmovsneg %icc, %f1, %f2 ! encoding: [0x85,0xa9,0xa0,0x21]
+ ! CHECK: fmovsvc %icc, %f1, %f2 ! encoding: [0x85,0xab,0xe0,0x21]
+ ! CHECK: fmovsvs %icc, %f1, %f2 ! encoding: [0x85,0xa9,0xe0,0x21]
+ fmovsne %icc, %f1, %f2
+ fmovse %icc, %f1, %f2
+ fmovsg %icc, %f1, %f2
+ fmovsle %icc, %f1, %f2
+ fmovsge %icc, %f1, %f2
+ fmovsl %icc, %f1, %f2
+ fmovsgu %icc, %f1, %f2
+ fmovsleu %icc, %f1, %f2
+ fmovscc %icc, %f1, %f2
+ fmovscs %icc, %f1, %f2
+ fmovspos %icc, %f1, %f2
+ fmovsneg %icc, %f1, %f2
+ fmovsvc %icc, %f1, %f2
+ fmovsvs %icc, %f1, %f2
+
+ ! CHECK: fmovsne %xcc, %f1, %f2 ! encoding: [0x85,0xaa,0x70,0x21]
+ ! CHECK: fmovse %xcc, %f1, %f2 ! encoding: [0x85,0xa8,0x70,0x21]
+ ! CHECK: fmovsg %xcc, %f1, %f2 ! encoding: [0x85,0xaa,0xb0,0x21]
+ ! CHECK: fmovsle %xcc, %f1, %f2 ! encoding: [0x85,0xa8,0xb0,0x21]
+ ! CHECK: fmovsge %xcc, %f1, %f2 ! encoding: [0x85,0xaa,0xf0,0x21]
+ ! CHECK: fmovsl %xcc, %f1, %f2 ! encoding: [0x85,0xa8,0xf0,0x21]
+ ! CHECK: fmovsgu %xcc, %f1, %f2 ! encoding: [0x85,0xab,0x30,0x21]
+ ! CHECK: fmovsleu %xcc, %f1, %f2 ! encoding: [0x85,0xa9,0x30,0x21]
+ ! CHECK: fmovscc %xcc, %f1, %f2 ! encoding: [0x85,0xab,0x70,0x21]
+ ! CHECK: fmovscs %xcc, %f1, %f2 ! encoding: [0x85,0xa9,0x70,0x21]
+ ! CHECK: fmovspos %xcc, %f1, %f2 ! encoding: [0x85,0xab,0xb0,0x21]
+ ! CHECK: fmovsneg %xcc, %f1, %f2 ! encoding: [0x85,0xa9,0xb0,0x21]
+ ! CHECK: fmovsvc %xcc, %f1, %f2 ! encoding: [0x85,0xab,0xf0,0x21]
+ ! CHECK: fmovsvs %xcc, %f1, %f2 ! encoding: [0x85,0xa9,0xf0,0x21]
+ fmovsne %xcc, %f1, %f2
+ fmovse %xcc, %f1, %f2
+ fmovsg %xcc, %f1, %f2
+ fmovsle %xcc, %f1, %f2
+ fmovsge %xcc, %f1, %f2
+ fmovsl %xcc, %f1, %f2
+ fmovsgu %xcc, %f1, %f2
+ fmovsleu %xcc, %f1, %f2
+ fmovscc %xcc, %f1, %f2
+ fmovscs %xcc, %f1, %f2
+ fmovspos %xcc, %f1, %f2
+ fmovsneg %xcc, %f1, %f2
+ fmovsvc %xcc, %f1, %f2
+ fmovsvs %xcc, %f1, %f2
+
+ ! CHECK: fmovsu %fcc0, %f1, %f2 ! encoding: [0x85,0xa9,0xc0,0x21]
+ ! CHECK: fmovsg %fcc0, %f1, %f2 ! encoding: [0x85,0xa9,0x80,0x21]
+ ! CHECK: fmovsug %fcc0, %f1, %f2 ! encoding: [0x85,0xa9,0x40,0x21]
+ ! CHECK: fmovsl %fcc0, %f1, %f2 ! encoding: [0x85,0xa9,0x00,0x21]
+ ! CHECK: fmovsul %fcc0, %f1, %f2 ! encoding: [0x85,0xa8,0xc0,0x21]
+ ! CHECK: fmovslg %fcc0, %f1, %f2 ! encoding: [0x85,0xa8,0x80,0x21]
+ ! CHECK: fmovsne %fcc0, %f1, %f2 ! encoding: [0x85,0xa8,0x40,0x21]
+ ! CHECK: fmovse %fcc0, %f1, %f2 ! encoding: [0x85,0xaa,0x40,0x21]
+ ! CHECK: fmovsue %fcc0, %f1, %f2 ! encoding: [0x85,0xaa,0x80,0x21]
+ ! CHECK: fmovsge %fcc0, %f1, %f2 ! encoding: [0x85,0xaa,0xc0,0x21]
+ ! CHECK: fmovsuge %fcc0, %f1, %f2 ! encoding: [0x85,0xab,0x00,0x21]
+ ! CHECK: fmovsle %fcc0, %f1, %f2 ! encoding: [0x85,0xab,0x40,0x21]
+ ! CHECK: fmovsule %fcc0, %f1, %f2 ! encoding: [0x85,0xab,0x80,0x21]
+ ! CHECK: fmovso %fcc0, %f1, %f2 ! encoding: [0x85,0xab,0xc0,0x21]
+ fmovsu %fcc0, %f1, %f2
+ fmovsg %fcc0, %f1, %f2
+ fmovsug %fcc0, %f1, %f2
+ fmovsl %fcc0, %f1, %f2
+ fmovsul %fcc0, %f1, %f2
+ fmovslg %fcc0, %f1, %f2
+ fmovsne %fcc0, %f1, %f2
+ fmovse %fcc0, %f1, %f2
+ fmovsue %fcc0, %f1, %f2
+ fmovsge %fcc0, %f1, %f2
+ fmovsuge %fcc0, %f1, %f2
+ fmovsle %fcc0, %f1, %f2
+ fmovsule %fcc0, %f1, %f2
+ fmovso %fcc0, %f1, %f2
+
+ ! CHECK: bne,a %icc, .BB0 ! encoding: [0x32,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,a %icc, .BB0
+
+ ! CHECK: be,a %icc, .BB0 ! encoding: [0x22,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,a %icc, .BB0
+
+ ! CHECK: bg,a %icc, .BB0 ! encoding: [0x34,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,a %icc, .BB0
+
+ ! CHECK: ble,a %icc, .BB0 ! encoding: [0x24,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,a %icc, .BB0
+
+ ! CHECK: bge,a %icc, .BB0 ! encoding: [0x36,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,a %icc, .BB0
+
+ ! CHECK: bl,a %icc, .BB0 ! encoding: [0x26,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,a %icc, .BB0
+
+ ! CHECK: bgu,a %icc, .BB0 ! encoding: [0x38,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,a %icc, .BB0
+
+ ! CHECK: bleu,a %icc, .BB0 ! encoding: [0x28,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,a %icc, .BB0
+
+ ! CHECK: bcc,a %icc, .BB0 ! encoding: [0x3a,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,a %icc, .BB0
+
+ ! CHECK: bcs,a %icc, .BB0 ! encoding: [0x2a,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,a %icc, .BB0
+
+ ! CHECK: bpos,a %icc, .BB0 ! encoding: [0x3c,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,a %icc, .BB0
+
+ ! CHECK: bneg,a %icc, .BB0 ! encoding: [0x2c,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg,a %icc, .BB0
+
+ ! CHECK: bvc,a %icc, .BB0 ! encoding: [0x3e,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc,a %icc, .BB0
+
+ ! CHECK: bvs,a %icc, .BB0 ! encoding: [0x2e,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs,a %icc, .BB0
+
+ ! CHECK: bne,pn %icc, .BB0 ! encoding: [0x12,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,pn %icc, .BB0
+
+ ! CHECK: be,pn %icc, .BB0 ! encoding: [0x02,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,pn %icc, .BB0
+
+ ! CHECK: bg,pn %icc, .BB0 ! encoding: [0x14,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,pn %icc, .BB0
+
+ ! CHECK: ble,pn %icc, .BB0 ! encoding: [0x04,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,pn %icc, .BB0
+
+ ! CHECK: bge,pn %icc, .BB0 ! encoding: [0x16,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,pn %icc, .BB0
+
+ ! CHECK: bl,pn %icc, .BB0 ! encoding: [0x06,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,pn %icc, .BB0
+
+ ! CHECK: bgu,pn %icc, .BB0 ! encoding: [0x18,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,pn %icc, .BB0
+
+ ! CHECK: bleu,pn %icc, .BB0 ! encoding: [0x08,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,pn %icc, .BB0
+
+ ! CHECK: bcc,pn %icc, .BB0 ! encoding: [0x1a,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,pn %icc, .BB0
+
+ ! CHECK: bcs,pn %icc, .BB0 ! encoding: [0x0a,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,pn %icc, .BB0
+
+ ! CHECK: bpos,pn %icc, .BB0 ! encoding: [0x1c,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,pn %icc, .BB0
+
+ ! CHECK: bneg,pn %icc, .BB0 ! encoding: [0x0c,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg,pn %icc, .BB0
+
+ ! CHECK: bvc,pn %icc, .BB0 ! encoding: [0x1e,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc,pn %icc, .BB0
+
+ ! CHECK: bvs,pn %icc, .BB0 ! encoding: [0x0e,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs,pn %icc, .BB0
+
+ ! CHECK: bne,a,pn %icc, .BB0 ! encoding: [0x32,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,a,pn %icc, .BB0
+
+ ! CHECK: be,a,pn %icc, .BB0 ! encoding: [0x22,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,a,pn %icc, .BB0
+
+ ! CHECK: bg,a,pn %icc, .BB0 ! encoding: [0x34,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,a,pn %icc, .BB0
+
+ ! CHECK: ble,a,pn %icc, .BB0 ! encoding: [0x24,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,a,pn %icc, .BB0
+
+ ! CHECK: bge,a,pn %icc, .BB0 ! encoding: [0x36,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,a,pn %icc, .BB0
+
+ ! CHECK: bl,a,pn %icc, .BB0 ! encoding: [0x26,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,a,pn %icc, .BB0
+
+ ! CHECK: bgu,a,pn %icc, .BB0 ! encoding: [0x38,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,a,pn %icc, .BB0
+
+ ! CHECK: bleu,a,pn %icc, .BB0 ! encoding: [0x28,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,a,pn %icc, .BB0
+
+ ! CHECK: bcc,a,pn %icc, .BB0 ! encoding: [0x3a,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,a,pn %icc, .BB0
+
+ ! CHECK: bcs,a,pn %icc, .BB0 ! encoding: [0x2a,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,a,pn %icc, .BB0
+
+ ! CHECK: bpos,a,pn %icc, .BB0 ! encoding: [0x3c,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,a,pn %icc, .BB0
+
+ ! CHECK: bneg,a,pn %icc, .BB0 ! encoding: [0x2c,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg,a,pn %icc, .BB0
+
+ ! CHECK: bvc,a,pn %icc, .BB0 ! encoding: [0x3e,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc,a,pn %icc, .BB0
+
+ ! CHECK: bvs,a,pn %icc, .BB0 ! encoding: [0x2e,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs,a,pn %icc, .BB0
+
+ ! CHECK: bne %icc, .BB0 ! encoding: [0x12,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,pt %icc, .BB0
+
+ ! CHECK: be %icc, .BB0 ! encoding: [0x02,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,pt %icc, .BB0
+
+ ! CHECK: bg %icc, .BB0 ! encoding: [0x14,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,pt %icc, .BB0
+
+ ! CHECK: ble %icc, .BB0 ! encoding: [0x04,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,pt %icc, .BB0
+
+ ! CHECK: bge %icc, .BB0 ! encoding: [0x16,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,pt %icc, .BB0
+
+ ! CHECK: bl %icc, .BB0 ! encoding: [0x06,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,pt %icc, .BB0
+
+ ! CHECK: bgu %icc, .BB0 ! encoding: [0x18,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,pt %icc, .BB0
+
+ ! CHECK: bleu %icc, .BB0 ! encoding: [0x08,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,pt %icc, .BB0
+
+ ! CHECK: bcc %icc, .BB0 ! encoding: [0x1a,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,pt %icc, .BB0
+
+ ! CHECK: bcs %icc, .BB0 ! encoding: [0x0a,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,pt %icc, .BB0
+
+ ! CHECK: bpos %icc, .BB0 ! encoding: [0x1c,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,pt %icc, .BB0
+
+ ! CHECK: bneg %icc, .BB0 ! encoding: [0x0c,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg,pt %icc, .BB0
+
+ ! CHECK: bvc %icc, .BB0 ! encoding: [0x1e,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc,pt %icc, .BB0
+
+ ! CHECK: bvs %icc, .BB0 ! encoding: [0x0e,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs,pt %icc, .BB0
+
+ ! CHECK: bne,a %icc, .BB0 ! encoding: [0x32,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,a,pt %icc, .BB0
+
+ ! CHECK: be,a %icc, .BB0 ! encoding: [0x22,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,a,pt %icc, .BB0
+
+ ! CHECK: bg,a %icc, .BB0 ! encoding: [0x34,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,a,pt %icc, .BB0
+
+ ! CHECK: ble,a %icc, .BB0 ! encoding: [0x24,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,a,pt %icc, .BB0
+
+ ! CHECK: bge,a %icc, .BB0 ! encoding: [0x36,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,a,pt %icc, .BB0
+
+ ! CHECK: bl,a %icc, .BB0 ! encoding: [0x26,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,a,pt %icc, .BB0
+
+ ! CHECK: bgu,a %icc, .BB0 ! encoding: [0x38,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,a,pt %icc, .BB0
+
+ ! CHECK: bleu,a %icc, .BB0 ! encoding: [0x28,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,a,pt %icc, .BB0
+
+ ! CHECK: bcc,a %icc, .BB0 ! encoding: [0x3a,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,a,pt %icc, .BB0
+
+ ! CHECK: bcs,a %icc, .BB0 ! encoding: [0x2a,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,a,pt %icc, .BB0
+
+ ! CHECK: bpos,a %icc, .BB0 ! encoding: [0x3c,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,a,pt %icc, .BB0
+
+
+ ! CHECK: bne,a %xcc, .BB0 ! encoding: [0x32,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,a %xcc, .BB0
+
+ ! CHECK: be,a %xcc, .BB0 ! encoding: [0x22,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,a %xcc, .BB0
+
+ ! CHECK: bg,a %xcc, .BB0 ! encoding: [0x34,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,a %xcc, .BB0
+
+ ! CHECK: ble,a %xcc, .BB0 ! encoding: [0x24,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,a %xcc, .BB0
+
+ ! CHECK: bge,a %xcc, .BB0 ! encoding: [0x36,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,a %xcc, .BB0
+
+ ! CHECK: bl,a %xcc, .BB0 ! encoding: [0x26,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,a %xcc, .BB0
+
+ ! CHECK: bgu,a %xcc, .BB0 ! encoding: [0x38,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,a %xcc, .BB0
+
+ ! CHECK: bleu,a %xcc, .BB0 ! encoding: [0x28,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,a %xcc, .BB0
+
+ ! CHECK: bcc,a %xcc, .BB0 ! encoding: [0x3a,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,a %xcc, .BB0
+
+ ! CHECK: bcs,a %xcc, .BB0 ! encoding: [0x2a,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,a %xcc, .BB0
+
+ ! CHECK: bpos,a %xcc, .BB0 ! encoding: [0x3c,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,a %xcc, .BB0
+
+ ! CHECK: bneg,a %xcc, .BB0 ! encoding: [0x2c,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg,a %xcc, .BB0
+
+ ! CHECK: bvc,a %xcc, .BB0 ! encoding: [0x3e,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc,a %xcc, .BB0
+
+ ! CHECK: bvs,a %xcc, .BB0 ! encoding: [0x2e,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs,a %xcc, .BB0
+
+ ! CHECK: bne,pn %xcc, .BB0 ! encoding: [0x12,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,pn %xcc, .BB0
+
+ ! CHECK: be,pn %xcc, .BB0 ! encoding: [0x02,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,pn %xcc, .BB0
+
+ ! CHECK: bg,pn %xcc, .BB0 ! encoding: [0x14,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,pn %xcc, .BB0
+
+ ! CHECK: ble,pn %xcc, .BB0 ! encoding: [0x04,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,pn %xcc, .BB0
+
+ ! CHECK: bge,pn %xcc, .BB0 ! encoding: [0x16,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,pn %xcc, .BB0
+
+ ! CHECK: bl,pn %xcc, .BB0 ! encoding: [0x06,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,pn %xcc, .BB0
+
+ ! CHECK: bgu,pn %xcc, .BB0 ! encoding: [0x18,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,pn %xcc, .BB0
+
+ ! CHECK: bleu,pn %xcc, .BB0 ! encoding: [0x08,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,pn %xcc, .BB0
+
+ ! CHECK: bcc,pn %xcc, .BB0 ! encoding: [0x1a,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,pn %xcc, .BB0
+
+ ! CHECK: bcs,pn %xcc, .BB0 ! encoding: [0x0a,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,pn %xcc, .BB0
+
+ ! CHECK: bpos,pn %xcc, .BB0 ! encoding: [0x1c,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,pn %xcc, .BB0
+
+ ! CHECK: bneg,pn %xcc, .BB0 ! encoding: [0x0c,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg,pn %xcc, .BB0
+
+ ! CHECK: bvc,pn %xcc, .BB0 ! encoding: [0x1e,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc,pn %xcc, .BB0
+
+ ! CHECK: bvs,pn %xcc, .BB0 ! encoding: [0x0e,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs,pn %xcc, .BB0
+
+ ! CHECK: bne,a,pn %xcc, .BB0 ! encoding: [0x32,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,a,pn %xcc, .BB0
+
+ ! CHECK: be,a,pn %xcc, .BB0 ! encoding: [0x22,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,a,pn %xcc, .BB0
+
+ ! CHECK: bg,a,pn %xcc, .BB0 ! encoding: [0x34,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,a,pn %xcc, .BB0
+
+ ! CHECK: ble,a,pn %xcc, .BB0 ! encoding: [0x24,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,a,pn %xcc, .BB0
+
+ ! CHECK: bge,a,pn %xcc, .BB0 ! encoding: [0x36,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,a,pn %xcc, .BB0
+
+ ! CHECK: bl,a,pn %xcc, .BB0 ! encoding: [0x26,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,a,pn %xcc, .BB0
+
+ ! CHECK: bgu,a,pn %xcc, .BB0 ! encoding: [0x38,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,a,pn %xcc, .BB0
+
+ ! CHECK: bleu,a,pn %xcc, .BB0 ! encoding: [0x28,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,a,pn %xcc, .BB0
+
+ ! CHECK: bcc,a,pn %xcc, .BB0 ! encoding: [0x3a,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,a,pn %xcc, .BB0
+
+ ! CHECK: bcs,a,pn %xcc, .BB0 ! encoding: [0x2a,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,a,pn %xcc, .BB0
+
+ ! CHECK: bpos,a,pn %xcc, .BB0 ! encoding: [0x3c,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,a,pn %xcc, .BB0
+
+ ! CHECK: bneg,a,pn %xcc, .BB0 ! encoding: [0x2c,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg,a,pn %xcc, .BB0
+
+ ! CHECK: bvc,a,pn %xcc, .BB0 ! encoding: [0x3e,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc,a,pn %xcc, .BB0
+
+ ! CHECK: bvs,a,pn %xcc, .BB0 ! encoding: [0x2e,0b01100AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs,a,pn %xcc, .BB0
+
+ ! CHECK: bne %xcc, .BB0 ! encoding: [0x12,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,pt %xcc, .BB0
+
+ ! CHECK: be %xcc, .BB0 ! encoding: [0x02,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,pt %xcc, .BB0
+
+ ! CHECK: bg %xcc, .BB0 ! encoding: [0x14,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,pt %xcc, .BB0
+
+ ! CHECK: ble %xcc, .BB0 ! encoding: [0x04,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,pt %xcc, .BB0
+
+ ! CHECK: bge %xcc, .BB0 ! encoding: [0x16,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,pt %xcc, .BB0
+
+ ! CHECK: bl %xcc, .BB0 ! encoding: [0x06,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,pt %xcc, .BB0
+
+ ! CHECK: bgu %xcc, .BB0 ! encoding: [0x18,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,pt %xcc, .BB0
+
+ ! CHECK: bleu %xcc, .BB0 ! encoding: [0x08,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,pt %xcc, .BB0
+
+ ! CHECK: bcc %xcc, .BB0 ! encoding: [0x1a,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,pt %xcc, .BB0
+
+ ! CHECK: bcs %xcc, .BB0 ! encoding: [0x0a,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,pt %xcc, .BB0
+
+ ! CHECK: bpos %xcc, .BB0 ! encoding: [0x1c,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,pt %xcc, .BB0
+
+ ! CHECK: bneg %xcc, .BB0 ! encoding: [0x0c,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bneg,pt %xcc, .BB0
+
+ ! CHECK: bvc %xcc, .BB0 ! encoding: [0x1e,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvc,pt %xcc, .BB0
+
+ ! CHECK: bvs %xcc, .BB0 ! encoding: [0x0e,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bvs,pt %xcc, .BB0
+
+ ! CHECK: bne,a %xcc, .BB0 ! encoding: [0x32,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bne,a,pt %xcc, .BB0
+
+ ! CHECK: be,a %xcc, .BB0 ! encoding: [0x22,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ be,a,pt %xcc, .BB0
+
+ ! CHECK: bg,a %xcc, .BB0 ! encoding: [0x34,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bg,a,pt %xcc, .BB0
+
+ ! CHECK: ble,a %xcc, .BB0 ! encoding: [0x24,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ ble,a,pt %xcc, .BB0
+
+ ! CHECK: bge,a %xcc, .BB0 ! encoding: [0x36,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bge,a,pt %xcc, .BB0
+
+ ! CHECK: bl,a %xcc, .BB0 ! encoding: [0x26,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bl,a,pt %xcc, .BB0
+
+ ! CHECK: bgu,a %xcc, .BB0 ! encoding: [0x38,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bgu,a,pt %xcc, .BB0
+
+ ! CHECK: bleu,a %xcc, .BB0 ! encoding: [0x28,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bleu,a,pt %xcc, .BB0
+
+ ! CHECK: bcc,a %xcc, .BB0 ! encoding: [0x3a,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcc,a,pt %xcc, .BB0
+
+ ! CHECK: bcs,a %xcc, .BB0 ! encoding: [0x2a,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bcs,a,pt %xcc, .BB0
+
+ ! CHECK: bpos,a %xcc, .BB0 ! encoding: [0x3c,0b01101AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ bpos,a,pt %xcc, .BB0
+
+ ! CHECK: fbu %fcc0, .BB0 ! encoding: [0x0f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbu %fcc0, .BB0
+
+ ! CHECK: fbg %fcc0, .BB0 ! encoding: [0x0d,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbg %fcc0, .BB0
+ ! CHECK: fbug %fcc0, .BB0 ! encoding: [0x0b,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbug %fcc0, .BB0
+
+ ! CHECK: fbl %fcc0, .BB0 ! encoding: [0x09,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbl %fcc0, .BB0
+
+ ! CHECK: fbul %fcc0, .BB0 ! encoding: [0x07,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbul %fcc0, .BB0
+
+ ! CHECK: fblg %fcc0, .BB0 ! encoding: [0x05,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fblg %fcc0, .BB0
+
+ ! CHECK: fbne %fcc0, .BB0 ! encoding: [0x03,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbne %fcc0, .BB0
+
+ ! CHECK: fbe %fcc0, .BB0 ! encoding: [0x13,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbe %fcc0, .BB0
+
+ ! CHECK: fbue %fcc0, .BB0 ! encoding: [0x15,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbue %fcc0, .BB0
+
+ ! CHECK: fbge %fcc0, .BB0 ! encoding: [0x17,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbge %fcc0, .BB0
+
+ ! CHECK: fbuge %fcc0, .BB0 ! encoding: [0x19,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbuge %fcc0, .BB0
+
+ ! CHECK: fble %fcc0, .BB0 ! encoding: [0x1b,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fble %fcc0, .BB0
+
+ ! CHECK: fbule %fcc0, .BB0 ! encoding: [0x1d,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbule %fcc0, .BB0
+
+ ! CHECK: fbo %fcc0, .BB0 ! encoding: [0x1f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbo %fcc0, .BB0
+
+ ! CHECK: fbu %fcc0, .BB0 ! encoding: [0x0f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbu,pt %fcc0, .BB0
+
+ ! CHECK: fbg %fcc0, .BB0 ! encoding: [0x0d,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbg,pt %fcc0, .BB0
+ ! CHECK: fbug %fcc0, .BB0 ! encoding: [0x0b,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbug,pt %fcc0, .BB0
+
+ ! CHECK: fbl %fcc0, .BB0 ! encoding: [0x09,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbl,pt %fcc0, .BB0
+
+ ! CHECK: fbul %fcc0, .BB0 ! encoding: [0x07,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbul,pt %fcc0, .BB0
+
+ ! CHECK: fblg %fcc0, .BB0 ! encoding: [0x05,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fblg,pt %fcc0, .BB0
+
+ ! CHECK: fbne %fcc0, .BB0 ! encoding: [0x03,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbne,pt %fcc0, .BB0
+
+ ! CHECK: fbe %fcc0, .BB0 ! encoding: [0x13,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbe,pt %fcc0, .BB0
+
+ ! CHECK: fbue %fcc0, .BB0 ! encoding: [0x15,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbue,pt %fcc0, .BB0
+
+ ! CHECK: fbge %fcc0, .BB0 ! encoding: [0x17,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbge,pt %fcc0, .BB0
+
+ ! CHECK: fbuge %fcc0, .BB0 ! encoding: [0x19,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbuge,pt %fcc0, .BB0
+
+ ! CHECK: fble %fcc0, .BB0 ! encoding: [0x1b,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fble,pt %fcc0, .BB0
+
+ ! CHECK: fbule %fcc0, .BB0 ! encoding: [0x1d,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbule,pt %fcc0, .BB0
+
+ ! CHECK: fbo %fcc0, .BB0 ! encoding: [0x1f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbo,pt %fcc0, .BB0
+
+
+ ! CHECK: fbo,a %fcc0, .BB0 ! encoding: [0x3f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbo,a %fcc0, .BB0
+
+ ! CHECK: fbu,a %fcc0, .BB0 ! encoding: [0x2f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbu,a %fcc0, .BB0
+
+ ! CHECK: fbg,a %fcc0, .BB0 ! encoding: [0x2d,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbg,a %fcc0, .BB0
+ ! CHECK: fbug,a %fcc0, .BB0 ! encoding: [0x2b,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbug,a %fcc0, .BB0
+
+ ! CHECK: fbl,a %fcc0, .BB0 ! encoding: [0x29,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbl,a %fcc0, .BB0
+
+ ! CHECK: fbul,a %fcc0, .BB0 ! encoding: [0x27,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbul,a %fcc0, .BB0
+
+ ! CHECK: fblg,a %fcc0, .BB0 ! encoding: [0x25,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fblg,a %fcc0, .BB0
+
+ ! CHECK: fbne,a %fcc0, .BB0 ! encoding: [0x23,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbne,a %fcc0, .BB0
+
+ ! CHECK: fbe,a %fcc0, .BB0 ! encoding: [0x33,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbe,a %fcc0, .BB0
+
+ ! CHECK: fbue,a %fcc0, .BB0 ! encoding: [0x35,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbue,a %fcc0, .BB0
+
+ ! CHECK: fbge,a %fcc0, .BB0 ! encoding: [0x37,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbge,a %fcc0, .BB0
+
+ ! CHECK: fbuge,a %fcc0, .BB0 ! encoding: [0x39,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbuge,a %fcc0, .BB0
+
+ ! CHECK: fble,a %fcc0, .BB0 ! encoding: [0x3b,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fble,a %fcc0, .BB0
+
+ ! CHECK: fbule,a %fcc0, .BB0 ! encoding: [0x3d,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbule,a %fcc0, .BB0
+
+ ! CHECK: fbo,a %fcc0, .BB0 ! encoding: [0x3f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbo,a %fcc0, .BB0
+
+ ! CHECK: fbo,a %fcc0, .BB0 ! encoding: [0x3f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbo,a %fcc0, .BB0
+
+ ! CHECK: fbu,a %fcc0, .BB0 ! encoding: [0x2f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbu,a,pt %fcc0, .BB0
+
+ ! CHECK: fbg,a %fcc0, .BB0 ! encoding: [0x2d,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbg,a,pt %fcc0, .BB0
+
+ ! CHECK: fbug,a %fcc0, .BB0 ! encoding: [0x2b,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbug,a,pt %fcc0, .BB0
+
+ ! CHECK: fbl,a %fcc0, .BB0 ! encoding: [0x29,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbl,a,pt %fcc0, .BB0
+
+ ! CHECK: fbul,a %fcc0, .BB0 ! encoding: [0x27,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbul,a,pt %fcc0, .BB0
+
+ ! CHECK: fblg,a %fcc0, .BB0 ! encoding: [0x25,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fblg,a,pt %fcc0, .BB0
+
+ ! CHECK: fbne,a %fcc0, .BB0 ! encoding: [0x23,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbne,a,pt %fcc0, .BB0
+
+ ! CHECK: fbe,a %fcc0, .BB0 ! encoding: [0x33,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbe,a,pt %fcc0, .BB0
+
+ ! CHECK: fbue,a %fcc0, .BB0 ! encoding: [0x35,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbue,a,pt %fcc0, .BB0
+
+ ! CHECK: fbge,a %fcc0, .BB0 ! encoding: [0x37,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbge,a,pt %fcc0, .BB0
+
+ ! CHECK: fbuge,a %fcc0, .BB0 ! encoding: [0x39,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbuge,a,pt %fcc0, .BB0
+
+ ! CHECK: fble,a %fcc0, .BB0 ! encoding: [0x3b,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fble,a,pt %fcc0, .BB0
+
+ ! CHECK: fbule,a %fcc0, .BB0 ! encoding: [0x3d,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbule,a,pt %fcc0, .BB0
+
+ ! CHECK: fbo,a %fcc0, .BB0 ! encoding: [0x3f,0b01001AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbo,a,pt %fcc0, .BB0
+
+ ! CHECK: fbu,pn %fcc0, .BB0 ! encoding: [0x0f,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbu,pn %fcc0, .BB0
+
+ ! CHECK: fbg,pn %fcc0, .BB0 ! encoding: [0x0d,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbg,pn %fcc0, .BB0
+ ! CHECK: fbug,pn %fcc0, .BB0 ! encoding: [0x0b,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbug,pn %fcc0, .BB0
+
+ ! CHECK: fbl,pn %fcc0, .BB0 ! encoding: [0x09,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbl,pn %fcc0, .BB0
+
+ ! CHECK: fbul,pn %fcc0, .BB0 ! encoding: [0x07,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbul,pn %fcc0, .BB0
+
+ ! CHECK: fblg,pn %fcc0, .BB0 ! encoding: [0x05,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fblg,pn %fcc0, .BB0
+
+ ! CHECK: fbne,pn %fcc0, .BB0 ! encoding: [0x03,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbne,pn %fcc0, .BB0
+
+ ! CHECK: fbe,pn %fcc0, .BB0 ! encoding: [0x13,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbe,pn %fcc0, .BB0
+
+ ! CHECK: fbue,pn %fcc0, .BB0 ! encoding: [0x15,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbue,pn %fcc0, .BB0
+
+ ! CHECK: fbge,pn %fcc0, .BB0 ! encoding: [0x17,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbge,pn %fcc0, .BB0
+
+ ! CHECK: fbuge,pn %fcc0, .BB0 ! encoding: [0x19,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbuge,pn %fcc0, .BB0
+
+ ! CHECK: fble,pn %fcc0, .BB0 ! encoding: [0x1b,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fble,pn %fcc0, .BB0
+
+ ! CHECK: fbule,pn %fcc0, .BB0 ! encoding: [0x1d,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbule,pn %fcc0, .BB0
+
+ ! CHECK: fbo,pn %fcc0, .BB0 ! encoding: [0x1f,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbo,pn %fcc0, .BB0
+
+ ! CHECK: fbu,a,pn %fcc0, .BB0 ! encoding: [0x2f,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbu,a,pn %fcc0, .BB0
+
+ ! CHECK: fbg,a,pn %fcc0, .BB0 ! encoding: [0x2d,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbg,a,pn %fcc0, .BB0
+
+ ! CHECK: fbug,a,pn %fcc0, .BB0 ! encoding: [0x2b,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbug,a,pn %fcc0, .BB0
+
+ ! CHECK: fbl,a,pn %fcc0, .BB0 ! encoding: [0x29,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbl,a,pn %fcc0, .BB0
+
+ ! CHECK: fbul,a,pn %fcc0, .BB0 ! encoding: [0x27,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbul,a,pn %fcc0, .BB0
+
+ ! CHECK: fblg,a,pn %fcc0, .BB0 ! encoding: [0x25,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fblg,a,pn %fcc0, .BB0
+
+ ! CHECK: fbne,a,pn %fcc0, .BB0 ! encoding: [0x23,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbne,a,pn %fcc0, .BB0
+
+ ! CHECK: fbe,a,pn %fcc0, .BB0 ! encoding: [0x33,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbe,a,pn %fcc0, .BB0
+
+ ! CHECK: fbue,a,pn %fcc0, .BB0 ! encoding: [0x35,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbue,a,pn %fcc0, .BB0
+
+ ! CHECK: fbge,a,pn %fcc0, .BB0 ! encoding: [0x37,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbge,a,pn %fcc0, .BB0
+
+ ! CHECK: fbuge,a,pn %fcc0, .BB0 ! encoding: [0x39,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbuge,a,pn %fcc0, .BB0
+
+ ! CHECK: fble,a,pn %fcc0, .BB0 ! encoding: [0x3b,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fble,a,pn %fcc0, .BB0
+
+ ! CHECK: fbule,a,pn %fcc0, .BB0 ! encoding: [0x3d,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbule,a,pn %fcc0, .BB0
+
+ ! CHECK: fbo,a,pn %fcc0, .BB0 ! encoding: [0x3f,0b01000AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbo,a,pn %fcc0, .BB0
+
+ ! CHECK: movu %fcc1, %g1, %g2 ! encoding: [0x85,0x61,0xc8,0x01]
+ movu %fcc1, %g1, %g2
+
+ ! CHECK: fmovsg %fcc2, %f1, %f2 ! encoding: [0x85,0xa9,0x90,0x21]
+ fmovsg %fcc2, %f1, %f2
+
+ ! CHECK: fbug %fcc3, .BB0 ! encoding: [0x0b,0b01111AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbug %fcc3, .BB0
+
+ ! CHECK: fbu %fcc3, .BB0 ! encoding: [0x0f,0b01111AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbu,pt %fcc3, .BB0
+
+ ! CHECK: fbl,a %fcc3, .BB0 ! encoding: [0x29,0b01111AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbl,a %fcc3, .BB0
+
+ ! CHECK: fbue,pn %fcc3, .BB0 ! encoding: [0x15,0b01110AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbue,pn %fcc3, .BB0
+
+ ! CHECK: fbne,a,pn %fcc3, .BB0 ! encoding: [0x23,0b01110AAA,A,A]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br19
+ fbne,a,pn %fcc3, .BB0
+
+
+ ! CHECK: brz %g1, .BB0 ! encoding: [0x02,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ ! CHECK: brlez %g1, .BB0 ! encoding: [0x04,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ ! CHECK: brlz %g1, .BB0 ! encoding: [0x06,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ ! CHECK: brnz %g1, .BB0 ! encoding: [0x0a,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ ! CHECK: brgz %g1, .BB0 ! encoding: [0x0c,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ ! CHECK: brgez %g1, .BB0 ! encoding: [0x0e,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+
+ brz %g1, .BB0
+ brlez %g1, .BB0
+ brlz %g1, .BB0
+ brnz %g1, .BB0
+ brgz %g1, .BB0
+ brgez %g1, .BB0
+
+ ! CHECK: brz %g1, .BB0 ! encoding: [0x02,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ brz,pt %g1, .BB0
+
+ ! CHECK: brz,a %g1, .BB0 ! encoding: [0x22,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ brz,a %g1, .BB0
+
+ ! CHECK: brz,a %g1, .BB0 ! encoding: [0x22,0b11AA1000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ brz,a,pt %g1, .BB0
+
+ ! CHECK: brz,pn %g1, .BB0 ! encoding: [0x02,0b11AA0000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ brz,pn %g1, .BB0
+
+ ! CHECK: brz,a,pn %g1, .BB0 ! encoding: [0x22,0b11AA0000,0b01BBBBBB,B]
+ ! CHECK-NEXT: ! fixup A - offset: 0, value: .BB0, kind: fixup_sparc_br16_2
+ ! CHECK-NEXT: ! fixup B - offset: 0, value: .BB0, kind: fixup_sparc_br16_14
+ brz,a,pn %g1, .BB0
+
+ ! CHECK: movrz %g1, %g2, %g3 ! encoding: [0x87,0x78,0x44,0x02]
+ ! CHECK: movrlez %g1, %g2, %g3 ! encoding: [0x87,0x78,0x48,0x02]
+ ! CHECK: movrlz %g1, %g2, %g3 ! encoding: [0x87,0x78,0x4c,0x02]
+ ! CHECK: movrnz %g1, %g2, %g3 ! encoding: [0x87,0x78,0x54,0x02]
+ ! CHECK: movrgz %g1, %g2, %g3 ! encoding: [0x87,0x78,0x58,0x02]
+ ! CHECK: movrgez %g1, %g2, %g3 ! encoding: [0x87,0x78,0x5c,0x02]
+ movrz %g1, %g2, %g3
+ movrlez %g1, %g2, %g3
+ movrlz %g1, %g2, %g3
+ movrnz %g1, %g2, %g3
+ movrgz %g1, %g2, %g3
+ movrgez %g1, %g2, %g3
+
+ ! CHECK: fmovrsz %g1, %f2, %f3 ! encoding: [0x87,0xa8,0x44,0xa2]
+ ! CHECK: fmovrslez %g1, %f2, %f3 ! encoding: [0x87,0xa8,0x48,0xa2]
+ ! CHECK: fmovrslz %g1, %f2, %f3 ! encoding: [0x87,0xa8,0x4c,0xa2]
+ ! CHECK: fmovrsnz %g1, %f2, %f3 ! encoding: [0x87,0xa8,0x54,0xa2]
+ ! CHECK: fmovrsgz %g1, %f2, %f3 ! encoding: [0x87,0xa8,0x58,0xa2]
+ ! CHECK: fmovrsgez %g1, %f2, %f3 ! encoding: [0x87,0xa8,0x5c,0xa2]
+ fmovrsz %g1, %f2, %f3
+ fmovrslez %g1, %f2, %f3
+ fmovrslz %g1, %f2, %f3
+ fmovrsnz %g1, %f2, %f3
+ fmovrsgz %g1, %f2, %f3
+ fmovrsgez %g1, %f2, %f3
+
+ ! CHECK: rett %i7+8 ! encoding: [0x81,0xcf,0xe0,0x08]
+ return %i7 + 8
+
+ ! CHECK: ta %icc, %g0 + 5 ! encoding: [0x91,0xd0,0x20,0x05]
+ ta 5
+
+ ! CHECK: te %xcc, %g0 + 3 ! encoding: [0x83,0xd0,0x30,0x03]
+ te %xcc, 3
+
diff --git a/test/MC/Sparc/sparcv8-instructions.s b/test/MC/Sparc/sparcv8-instructions.s
new file mode 100644
index 000000000000..9071b45740c9
--- /dev/null
+++ b/test/MC/Sparc/sparcv8-instructions.s
@@ -0,0 +1,15 @@
+! RUN: llvm-mc %s -arch=sparc -show-encoding | FileCheck %s
+
+ ! CHECK: fcmps %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x24]
+ ! CHECK: fcmpd %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x44]
+ ! CHECK: fcmpq %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x64]
+ fcmps %f0, %f4
+ fcmpd %f0, %f4
+ fcmpq %f0, %f4
+
+ ! CHECK: fcmpes %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xa4]
+ ! CHECK: fcmped %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xc4]
+ ! CHECK: fcmpeq %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xe4]
+ fcmpes %f0, %f4
+ fcmped %f0, %f4
+ fcmpeq %f0, %f4
diff --git a/test/MC/Sparc/sparcv9-instructions.s b/test/MC/Sparc/sparcv9-instructions.s
new file mode 100644
index 000000000000..37f4c8b2f6b9
--- /dev/null
+++ b/test/MC/Sparc/sparcv9-instructions.s
@@ -0,0 +1,23 @@
+! RUN: not llvm-mc %s -arch=sparc -show-encoding 2>&1 | FileCheck %s --check-prefix=V8
+! RUN: llvm-mc %s -arch=sparcv9 -show-encoding | FileCheck %s --check-prefix=V9
+
+ ! V8: error: invalid instruction mnemonic
+ ! V8-NEXT: addc %g2, %g1, %g3
+ ! V9: addx %g2, %g1, %g3 ! encoding: [0x86,0x40,0x80,0x01]
+ addc %g2, %g1, %g3
+
+ ! V8: error: invalid instruction mnemonic
+ ! V8-NEXT: addccc %g1, %g2, %g3
+ ! V9: addxcc %g1, %g2, %g3 ! encoding: [0x86,0xc0,0x40,0x02]
+ addccc %g1, %g2, %g3
+
+ ! V8: error: invalid instruction mnemonic
+ ! V8-NEXT: subc %g2, %g1, %g3
+ ! V9: subx %g2, %g1, %g3 ! encoding: [0x86,0x60,0x80,0x01]
+ subc %g2, %g1, %g3
+
+ ! V8: error: invalid instruction mnemonic
+ ! V8-NEXT: subccc %g1, %g2, %g3
+ ! V9: subxcc %g1, %g2, %g3 ! encoding: [0x86,0xe0,0x40,0x02]
+ subccc %g1, %g2, %g3
+
diff --git a/test/MC/SystemZ/insn-bad-z196.s b/test/MC/SystemZ/insn-bad-z196.s
index 089d9b5b3e14..47dbe08b2525 100644
--- a/test/MC/SystemZ/insn-bad-z196.s
+++ b/test/MC/SystemZ/insn-bad-z196.s
@@ -33,6 +33,62 @@
aih %r0, (1 << 31)
#CHECK: error: invalid operand
+#CHECK: cdlfbr %f0, 0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: cdlfbr %f0, 0, %r0, 16
+#CHECK: error: invalid operand
+#CHECK: cdlfbr %f0, -1, %r0, 0
+#CHECK: error: invalid operand
+#CHECK: cdlfbr %f0, 16, %r0, 0
+
+ cdlfbr %f0, 0, %r0, -1
+ cdlfbr %f0, 0, %r0, 16
+ cdlfbr %f0, -1, %r0, 0
+ cdlfbr %f0, 16, %r0, 0
+
+#CHECK: error: invalid operand
+#CHECK: cdlgbr %f0, 0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: cdlgbr %f0, 0, %r0, 16
+#CHECK: error: invalid operand
+#CHECK: cdlgbr %f0, -1, %r0, 0
+#CHECK: error: invalid operand
+#CHECK: cdlgbr %f0, 16, %r0, 0
+
+ cdlgbr %f0, 0, %r0, -1
+ cdlgbr %f0, 0, %r0, 16
+ cdlgbr %f0, -1, %r0, 0
+ cdlgbr %f0, 16, %r0, 0
+
+#CHECK: error: invalid operand
+#CHECK: celfbr %f0, 0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: celfbr %f0, 0, %r0, 16
+#CHECK: error: invalid operand
+#CHECK: celfbr %f0, -1, %r0, 0
+#CHECK: error: invalid operand
+#CHECK: celfbr %f0, 16, %r0, 0
+
+ celfbr %f0, 0, %r0, -1
+ celfbr %f0, 0, %r0, 16
+ celfbr %f0, -1, %r0, 0
+ celfbr %f0, 16, %r0, 0
+
+#CHECK: error: invalid operand
+#CHECK: celgbr %f0, 0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: celgbr %f0, 0, %r0, 16
+#CHECK: error: invalid operand
+#CHECK: celgbr %f0, -1, %r0, 0
+#CHECK: error: invalid operand
+#CHECK: celgbr %f0, 16, %r0, 0
+
+ celgbr %f0, 0, %r0, -1
+ celgbr %f0, 0, %r0, 16
+ celgbr %f0, -1, %r0, 0
+ celgbr %f0, 16, %r0, 0
+
+#CHECK: error: invalid operand
#CHECK: chf %r0, -524289
#CHECK: error: invalid operand
#CHECK: chf %r0, 524288
@@ -49,6 +105,96 @@
cih %r0, (1 << 31)
#CHECK: error: invalid operand
+#CHECK: clfdbr %r0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: clfdbr %r0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: clfdbr %r0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: clfdbr %r0, 16, %f0, 0
+
+ clfdbr %r0, 0, %f0, -1
+ clfdbr %r0, 0, %f0, 16
+ clfdbr %r0, -1, %f0, 0
+ clfdbr %r0, 16, %f0, 0
+
+#CHECK: error: invalid operand
+#CHECK: clfebr %r0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: clfebr %r0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: clfebr %r0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: clfebr %r0, 16, %f0, 0
+
+ clfebr %r0, 0, %f0, -1
+ clfebr %r0, 0, %f0, 16
+ clfebr %r0, -1, %f0, 0
+ clfebr %r0, 16, %f0, 0
+
+#CHECK: error: invalid operand
+#CHECK: clfxbr %r0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: clfxbr %r0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: clfxbr %r0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: clfxbr %r0, 16, %f0, 0
+#CHECK: error: invalid register pair
+#CHECK: clfxbr %r0, 0, %f14, 0
+
+ clfxbr %r0, 0, %f0, -1
+ clfxbr %r0, 0, %f0, 16
+ clfxbr %r0, -1, %f0, 0
+ clfxbr %r0, 16, %f0, 0
+ clfxbr %r0, 0, %f14, 0
+
+#CHECK: error: invalid operand
+#CHECK: clgdbr %r0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: clgdbr %r0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: clgdbr %r0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: clgdbr %r0, 16, %f0, 0
+
+ clgdbr %r0, 0, %f0, -1
+ clgdbr %r0, 0, %f0, 16
+ clgdbr %r0, -1, %f0, 0
+ clgdbr %r0, 16, %f0, 0
+
+#CHECK: error: invalid operand
+#CHECK: clgebr %r0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: clgebr %r0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: clgebr %r0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: clgebr %r0, 16, %f0, 0
+
+ clgebr %r0, 0, %f0, -1
+ clgebr %r0, 0, %f0, 16
+ clgebr %r0, -1, %f0, 0
+ clgebr %r0, 16, %f0, 0
+
+#CHECK: error: invalid operand
+#CHECK: clgxbr %r0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: clgxbr %r0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: clgxbr %r0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: clgxbr %r0, 16, %f0, 0
+#CHECK: error: invalid register pair
+#CHECK: clgxbr %r0, 0, %f14, 0
+
+ clgxbr %r0, 0, %f0, -1
+ clgxbr %r0, 0, %f0, 16
+ clgxbr %r0, -1, %f0, 0
+ clgxbr %r0, 16, %f0, 0
+ clgxbr %r0, 0, %f14, 0
+
+#CHECK: error: invalid operand
#CHECK: clhf %r0, -524289
#CHECK: error: invalid operand
#CHECK: clhf %r0, 524288
@@ -65,6 +211,40 @@
clih %r0, (1 << 32)
#CHECK: error: invalid operand
+#CHECK: cxlfbr %f0, 0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: cxlfbr %f0, 0, %r0, 16
+#CHECK: error: invalid operand
+#CHECK: cxlfbr %f0, -1, %r0, 0
+#CHECK: error: invalid operand
+#CHECK: cxlfbr %f0, 16, %r0, 0
+#CHECK: error: invalid register pair
+#CHECK: cxlfbr %f2, 0, %r0, 0
+
+ cxlfbr %f0, 0, %r0, -1
+ cxlfbr %f0, 0, %r0, 16
+ cxlfbr %f0, -1, %r0, 0
+ cxlfbr %f0, 16, %r0, 0
+ cxlfbr %f2, 0, %r0, 0
+
+#CHECK: error: invalid operand
+#CHECK: cxlgbr %f0, 0, %r0, -1
+#CHECK: error: invalid operand
+#CHECK: cxlgbr %f0, 0, %r0, 16
+#CHECK: error: invalid operand
+#CHECK: cxlgbr %f0, -1, %r0, 0
+#CHECK: error: invalid operand
+#CHECK: cxlgbr %f0, 16, %r0, 0
+#CHECK: error: invalid register pair
+#CHECK: cxlgbr %f2, 0, %r0, 0
+
+ cxlgbr %f0, 0, %r0, -1
+ cxlgbr %f0, 0, %r0, 16
+ cxlgbr %f0, -1, %r0, 0
+ cxlgbr %f0, 16, %r0, 0
+ cxlgbr %f2, 0, %r0, 0
+
+#CHECK: error: invalid operand
#CHECK: fidbra %f0, 0, %f0, -1
#CHECK: error: invalid operand
#CHECK: fidbra %f0, 0, %f0, 16
@@ -113,6 +293,116 @@
fixbra %f2, 0, %f0, 0
#CHECK: error: invalid operand
+#CHECK: laa %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: laa %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: laa %r0, %r0, 0(%r1,%r2)
+
+ laa %r0, %r0, -524289
+ laa %r0, %r0, 524288
+ laa %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: laag %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: laag %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: laag %r0, %r0, 0(%r1,%r2)
+
+ laag %r0, %r0, -524289
+ laag %r0, %r0, 524288
+ laag %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: laal %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: laal %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: laal %r0, %r0, 0(%r1,%r2)
+
+ laal %r0, %r0, -524289
+ laal %r0, %r0, 524288
+ laal %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: laalg %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: laalg %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: laalg %r0, %r0, 0(%r1,%r2)
+
+ laalg %r0, %r0, -524289
+ laalg %r0, %r0, 524288
+ laalg %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: lan %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: lan %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lan %r0, %r0, 0(%r1,%r2)
+
+ lan %r0, %r0, -524289
+ lan %r0, %r0, 524288
+ lan %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: lang %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: lang %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lang %r0, %r0, 0(%r1,%r2)
+
+ lang %r0, %r0, -524289
+ lang %r0, %r0, 524288
+ lang %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: lao %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: lao %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lao %r0, %r0, 0(%r1,%r2)
+
+ lao %r0, %r0, -524289
+ lao %r0, %r0, 524288
+ lao %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: laog %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: laog %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: laog %r0, %r0, 0(%r1,%r2)
+
+ laog %r0, %r0, -524289
+ laog %r0, %r0, 524288
+ laog %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: lax %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: lax %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: lax %r0, %r0, 0(%r1,%r2)
+
+ lax %r0, %r0, -524289
+ lax %r0, %r0, 524288
+ lax %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
+#CHECK: laxg %r0, %r0, -524289
+#CHECK: error: invalid operand
+#CHECK: laxg %r0, %r0, 524288
+#CHECK: error: invalid use of indexed addressing
+#CHECK: laxg %r0, %r0, 0(%r1,%r2)
+
+ laxg %r0, %r0, -524289
+ laxg %r0, %r0, 524288
+ laxg %r0, %r0, 0(%r1,%r2)
+
+#CHECK: error: invalid operand
#CHECK: lbh %r0, -524289
#CHECK: error: invalid operand
#CHECK: lbh %r0, 524288
@@ -121,6 +411,60 @@
lbh %r0, 524288
#CHECK: error: invalid operand
+#CHECK: ldxbra %f0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: ldxbra %f0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: ldxbra %f0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: ldxbra %f0, 16, %f0, 0
+#CHECK: error: invalid register pair
+#CHECK: ldxbra %f0, 0, %f2, 0
+#CHECK: error: invalid register pair
+#CHECK: ldxbra %f2, 0, %f0, 0
+
+ ldxbra %f0, 0, %f0, -1
+ ldxbra %f0, 0, %f0, 16
+ ldxbra %f0, -1, %f0, 0
+ ldxbra %f0, 16, %f0, 0
+ ldxbra %f0, 0, %f2, 0
+ ldxbra %f2, 0, %f0, 0
+
+#CHECK: error: invalid operand
+#CHECK: ledbra %f0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: ledbra %f0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: ledbra %f0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: ledbra %f0, 16, %f0, 0
+
+ ledbra %f0, 0, %f0, -1
+ ledbra %f0, 0, %f0, 16
+ ledbra %f0, -1, %f0, 0
+ ledbra %f0, 16, %f0, 0
+
+#CHECK: error: invalid operand
+#CHECK: lexbra %f0, 0, %f0, -1
+#CHECK: error: invalid operand
+#CHECK: lexbra %f0, 0, %f0, 16
+#CHECK: error: invalid operand
+#CHECK: lexbra %f0, -1, %f0, 0
+#CHECK: error: invalid operand
+#CHECK: lexbra %f0, 16, %f0, 0
+#CHECK: error: invalid register pair
+#CHECK: lexbra %f0, 0, %f2, 0
+#CHECK: error: invalid register pair
+#CHECK: lexbra %f2, 0, %f0, 0
+
+ lexbra %f0, 0, %f0, -1
+ lexbra %f0, 0, %f0, 16
+ lexbra %f0, -1, %f0, 0
+ lexbra %f0, 16, %f0, 0
+ lexbra %f0, 0, %f2, 0
+ lexbra %f2, 0, %f0, 0
+
+#CHECK: error: invalid operand
#CHECK: lfh %r0, -524289
#CHECK: error: invalid operand
#CHECK: lfh %r0, 524288
diff --git a/test/MC/SystemZ/insn-bad.s b/test/MC/SystemZ/insn-bad.s
index 2a3fb98c2b14..a08cb34da832 100644
--- a/test/MC/SystemZ/insn-bad.s
+++ b/test/MC/SystemZ/insn-bad.s
@@ -386,6 +386,16 @@
cdb %f0, -1
cdb %f0, 4096
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: cdlfbr %f0, 0, %r0, 0
+
+ cdlfbr %f0, 0, %r0, 0
+
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: cdlgbr %f0, 0, %r0, 0
+
+ cdlgbr %f0, 0, %r0, 0
+
#CHECK: error: invalid operand
#CHECK: ceb %f0, -1
#CHECK: error: invalid operand
@@ -394,6 +404,16 @@
ceb %f0, -1
ceb %f0, 4096
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: celfbr %f0, 0, %r0, 0
+
+ celfbr %f0, 0, %r0, 0
+
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: celgbr %f0, 0, %r0, 0
+
+ celgbr %f0, 0, %r0, 0
+
#CHECK: error: invalid operand
#CHECK: cfdbr %r0, -1, %f0
#CHECK: error: invalid operand
@@ -784,6 +804,16 @@
clhf %r0, 0
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: clfdbr %r0, 0, %f0, 0
+
+ clfdbr %r0, 0, %f0, 0
+
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: clfebr %r0, 0, %f0, 0
+
+ clfebr %r0, 0, %f0, 0
+
#CHECK: error: invalid operand
#CHECK: clfhsi -1, 0
#CHECK: error: invalid operand
@@ -809,6 +839,11 @@
clfi %r0, -1
clfi %r0, (1 << 32)
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: clfxbr %r0, 0, %f0, 0
+
+ clfxbr %r0, 0, %f0, 0
+
#CHECK: error: invalid operand
#CHECK: clg %r0, -524289
#CHECK: error: invalid operand
@@ -817,6 +852,16 @@
clg %r0, -524289
clg %r0, 524288
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: clgdbr %r0, 0, %f0, 0
+
+ clgdbr %r0, 0, %f0, 0
+
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: clgebr %r0, 0, %f0, 0
+
+ clgebr %r0, 0, %f0, 0
+
#CHECK: error: invalid operand
#CHECK: clgf %r0, -524289
#CHECK: error: invalid operand
@@ -936,6 +981,11 @@
clgrl %r0, 1
clgrl %r0, 0x100000000
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: clgxbr %r0, 0, %f0, 0
+
+ clgxbr %r0, 0, %f0, 0
+
#CHECK: error: invalid operand
#CHECK: clhhsi -1, 0
#CHECK: error: invalid operand
@@ -1167,6 +1217,16 @@
cxgbr %f2, %r0
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: cxlfbr %f0, 0, %r0, 0
+
+ cxlfbr %f0, 0, %r0, 0
+
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: cxlgbr %f0, 0, %r0, 0
+
+ cxlgbr %f0, 0, %r0, 0
+
#CHECK: error: invalid operand
#CHECK: cy %r0, -524289
#CHECK: error: invalid operand
@@ -1393,6 +1453,46 @@
la %r0, -1
la %r0, 4096
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: laa %r1, %r2, 100(%r3)
+ laa %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: laag %r1, %r2, 100(%r3)
+ laag %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: laal %r1, %r2, 100(%r3)
+ laal %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: laalg %r1, %r2, 100(%r3)
+ laalg %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: lan %r1, %r2, 100(%r3)
+ lan %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: lang %r1, %r2, 100(%r3)
+ lang %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: lao %r1, %r2, 100(%r3)
+ lao %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: laog %r1, %r2, 100(%r3)
+ laog %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: lax %r1, %r2, 100(%r3)
+ lax %r1, %r2, 100(%r3)
+
+#CHECK: error: {{(instruction requires: interlocked-access1)?}}
+#CHECK: laxg %r1, %r2, 100(%r3)
+ laxg %r1, %r2, 100(%r3)
+
#CHECK: error: offset out of range
#CHECK: larl %r0, -0x1000000002
#CHECK: error: offset out of range
@@ -1460,6 +1560,11 @@
ldxbr %f0, %f2
ldxbr %f2, %f0
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: ldxbra %f0, 0, %f0, 0
+
+ ldxbra %f0, 0, %f0, 0
+
#CHECK: error: invalid operand
#CHECK: ldy %f0, -524289
#CHECK: error: invalid operand
@@ -1476,6 +1581,11 @@
le %f0, -1
le %f0, 4096
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: ledbra %f0, 0, %f0, 0
+
+ ledbra %f0, 0, %f0, 0
+
#CHECK: error: invalid register pair
#CHECK: lexbr %f0, %f2
#CHECK: error: invalid register pair
@@ -1484,6 +1594,11 @@
lexbr %f0, %f2
lexbr %f2, %f0
+#CHECK: error: {{(instruction requires: fp-extension)?}}
+#CHECK: lexbra %f0, 0, %f0, 0
+
+ lexbra %f0, 0, %f0, 0
+
#CHECK: error: invalid operand
#CHECK: ley %f0, -524289
#CHECK: error: invalid operand
diff --git a/test/MC/SystemZ/insn-good-z196.s b/test/MC/SystemZ/insn-good-z196.s
index 258e06f99dd1..db5ecdd238ca 100644
--- a/test/MC/SystemZ/insn-good-z196.s
+++ b/test/MC/SystemZ/insn-good-z196.s
@@ -135,6 +135,62 @@
ark %r15,%r0,%r0
ark %r7,%r8,%r9
+#CHECK: cdlfbr %f0, 0, %r0, 0 # encoding: [0xb3,0x91,0x00,0x00]
+#CHECK: cdlfbr %f0, 0, %r0, 15 # encoding: [0xb3,0x91,0x0f,0x00]
+#CHECK: cdlfbr %f0, 0, %r15, 0 # encoding: [0xb3,0x91,0x00,0x0f]
+#CHECK: cdlfbr %f0, 15, %r0, 0 # encoding: [0xb3,0x91,0xf0,0x00]
+#CHECK: cdlfbr %f4, 5, %r6, 7 # encoding: [0xb3,0x91,0x57,0x46]
+#CHECK: cdlfbr %f15, 0, %r0, 0 # encoding: [0xb3,0x91,0x00,0xf0]
+
+ cdlfbr %f0, 0, %r0, 0
+ cdlfbr %f0, 0, %r0, 15
+ cdlfbr %f0, 0, %r15, 0
+ cdlfbr %f0, 15, %r0, 0
+ cdlfbr %f4, 5, %r6, 7
+ cdlfbr %f15, 0, %r0, 0
+
+#CHECK: cdlgbr %f0, 0, %r0, 0 # encoding: [0xb3,0xa1,0x00,0x00]
+#CHECK: cdlgbr %f0, 0, %r0, 15 # encoding: [0xb3,0xa1,0x0f,0x00]
+#CHECK: cdlgbr %f0, 0, %r15, 0 # encoding: [0xb3,0xa1,0x00,0x0f]
+#CHECK: cdlgbr %f0, 15, %r0, 0 # encoding: [0xb3,0xa1,0xf0,0x00]
+#CHECK: cdlgbr %f4, 5, %r6, 7 # encoding: [0xb3,0xa1,0x57,0x46]
+#CHECK: cdlgbr %f15, 0, %r0, 0 # encoding: [0xb3,0xa1,0x00,0xf0]
+
+ cdlgbr %f0, 0, %r0, 0
+ cdlgbr %f0, 0, %r0, 15
+ cdlgbr %f0, 0, %r15, 0
+ cdlgbr %f0, 15, %r0, 0
+ cdlgbr %f4, 5, %r6, 7
+ cdlgbr %f15, 0, %r0, 0
+
+#CHECK: celfbr %f0, 0, %r0, 0 # encoding: [0xb3,0x90,0x00,0x00]
+#CHECK: celfbr %f0, 0, %r0, 15 # encoding: [0xb3,0x90,0x0f,0x00]
+#CHECK: celfbr %f0, 0, %r15, 0 # encoding: [0xb3,0x90,0x00,0x0f]
+#CHECK: celfbr %f0, 15, %r0, 0 # encoding: [0xb3,0x90,0xf0,0x00]
+#CHECK: celfbr %f4, 5, %r6, 7 # encoding: [0xb3,0x90,0x57,0x46]
+#CHECK: celfbr %f15, 0, %r0, 0 # encoding: [0xb3,0x90,0x00,0xf0]
+
+ celfbr %f0, 0, %r0, 0
+ celfbr %f0, 0, %r0, 15
+ celfbr %f0, 0, %r15, 0
+ celfbr %f0, 15, %r0, 0
+ celfbr %f4, 5, %r6, 7
+ celfbr %f15, 0, %r0, 0
+
+#CHECK: celgbr %f0, 0, %r0, 0 # encoding: [0xb3,0xa0,0x00,0x00]
+#CHECK: celgbr %f0, 0, %r0, 15 # encoding: [0xb3,0xa0,0x0f,0x00]
+#CHECK: celgbr %f0, 0, %r15, 0 # encoding: [0xb3,0xa0,0x00,0x0f]
+#CHECK: celgbr %f0, 15, %r0, 0 # encoding: [0xb3,0xa0,0xf0,0x00]
+#CHECK: celgbr %f4, 5, %r6, 7 # encoding: [0xb3,0xa0,0x57,0x46]
+#CHECK: celgbr %f15, 0, %r0, 0 # encoding: [0xb3,0xa0,0x00,0xf0]
+
+ celgbr %f0, 0, %r0, 0
+ celgbr %f0, 0, %r0, 15
+ celgbr %f0, 0, %r15, 0
+ celgbr %f0, 15, %r0, 0
+ celgbr %f4, 5, %r6, 7
+ celgbr %f15, 0, %r0, 0
+
#CHECK: chf %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0xcd]
#CHECK: chf %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0xcd]
#CHECK: chf %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0xcd]
@@ -171,6 +227,90 @@
cih %r0, (1 << 31) - 1
cih %r15, 0
+#CHECK: clfdbr %r0, 0, %f0, 0 # encoding: [0xb3,0x9d,0x00,0x00]
+#CHECK: clfdbr %r0, 0, %f0, 15 # encoding: [0xb3,0x9d,0x0f,0x00]
+#CHECK: clfdbr %r0, 0, %f15, 0 # encoding: [0xb3,0x9d,0x00,0x0f]
+#CHECK: clfdbr %r0, 15, %f0, 0 # encoding: [0xb3,0x9d,0xf0,0x00]
+#CHECK: clfdbr %r4, 5, %f6, 7 # encoding: [0xb3,0x9d,0x57,0x46]
+#CHECK: clfdbr %r15, 0, %f0, 0 # encoding: [0xb3,0x9d,0x00,0xf0]
+
+ clfdbr %r0, 0, %f0, 0
+ clfdbr %r0, 0, %f0, 15
+ clfdbr %r0, 0, %f15, 0
+ clfdbr %r0, 15, %f0, 0
+ clfdbr %r4, 5, %f6, 7
+ clfdbr %r15, 0, %f0, 0
+
+#CHECK: clfebr %r0, 0, %f0, 0 # encoding: [0xb3,0x9c,0x00,0x00]
+#CHECK: clfebr %r0, 0, %f0, 15 # encoding: [0xb3,0x9c,0x0f,0x00]
+#CHECK: clfebr %r0, 0, %f15, 0 # encoding: [0xb3,0x9c,0x00,0x0f]
+#CHECK: clfebr %r0, 15, %f0, 0 # encoding: [0xb3,0x9c,0xf0,0x00]
+#CHECK: clfebr %r4, 5, %f6, 7 # encoding: [0xb3,0x9c,0x57,0x46]
+#CHECK: clfebr %r15, 0, %f0, 0 # encoding: [0xb3,0x9c,0x00,0xf0]
+
+ clfebr %r0, 0, %f0, 0
+ clfebr %r0, 0, %f0, 15
+ clfebr %r0, 0, %f15, 0
+ clfebr %r0, 15, %f0, 0
+ clfebr %r4, 5, %f6, 7
+ clfebr %r15, 0, %f0, 0
+
+#CHECK: clfxbr %r0, 0, %f0, 0 # encoding: [0xb3,0x9e,0x00,0x00]
+#CHECK: clfxbr %r0, 0, %f0, 15 # encoding: [0xb3,0x9e,0x0f,0x00]
+#CHECK: clfxbr %r0, 0, %f13, 0 # encoding: [0xb3,0x9e,0x00,0x0d]
+#CHECK: clfxbr %r0, 15, %f0, 0 # encoding: [0xb3,0x9e,0xf0,0x00]
+#CHECK: clfxbr %r7, 5, %f8, 9 # encoding: [0xb3,0x9e,0x59,0x78]
+#CHECK: clfxbr %r15, 0, %f0, 0 # encoding: [0xb3,0x9e,0x00,0xf0]
+
+ clfxbr %r0, 0, %f0, 0
+ clfxbr %r0, 0, %f0, 15
+ clfxbr %r0, 0, %f13, 0
+ clfxbr %r0, 15, %f0, 0
+ clfxbr %r7, 5, %f8, 9
+ clfxbr %r15, 0, %f0, 0
+
+#CHECK: clgdbr %r0, 0, %f0, 0 # encoding: [0xb3,0xad,0x00,0x00]
+#CHECK: clgdbr %r0, 0, %f0, 15 # encoding: [0xb3,0xad,0x0f,0x00]
+#CHECK: clgdbr %r0, 0, %f15, 0 # encoding: [0xb3,0xad,0x00,0x0f]
+#CHECK: clgdbr %r0, 15, %f0, 0 # encoding: [0xb3,0xad,0xf0,0x00]
+#CHECK: clgdbr %r4, 5, %f6, 7 # encoding: [0xb3,0xad,0x57,0x46]
+#CHECK: clgdbr %r15, 0, %f0, 0 # encoding: [0xb3,0xad,0x00,0xf0]
+
+ clgdbr %r0, 0, %f0, 0
+ clgdbr %r0, 0, %f0, 15
+ clgdbr %r0, 0, %f15, 0
+ clgdbr %r0, 15, %f0, 0
+ clgdbr %r4, 5, %f6, 7
+ clgdbr %r15, 0, %f0, 0
+
+#CHECK: clgebr %r0, 0, %f0, 0 # encoding: [0xb3,0xac,0x00,0x00]
+#CHECK: clgebr %r0, 0, %f0, 15 # encoding: [0xb3,0xac,0x0f,0x00]
+#CHECK: clgebr %r0, 0, %f15, 0 # encoding: [0xb3,0xac,0x00,0x0f]
+#CHECK: clgebr %r0, 15, %f0, 0 # encoding: [0xb3,0xac,0xf0,0x00]
+#CHECK: clgebr %r4, 5, %f6, 7 # encoding: [0xb3,0xac,0x57,0x46]
+#CHECK: clgebr %r15, 0, %f0, 0 # encoding: [0xb3,0xac,0x00,0xf0]
+
+ clgebr %r0, 0, %f0, 0
+ clgebr %r0, 0, %f0, 15
+ clgebr %r0, 0, %f15, 0
+ clgebr %r0, 15, %f0, 0
+ clgebr %r4, 5, %f6, 7
+ clgebr %r15, 0, %f0, 0
+
+#CHECK: clgxbr %r0, 0, %f0, 0 # encoding: [0xb3,0xae,0x00,0x00]
+#CHECK: clgxbr %r0, 0, %f0, 15 # encoding: [0xb3,0xae,0x0f,0x00]
+#CHECK: clgxbr %r0, 0, %f13, 0 # encoding: [0xb3,0xae,0x00,0x0d]
+#CHECK: clgxbr %r0, 15, %f0, 0 # encoding: [0xb3,0xae,0xf0,0x00]
+#CHECK: clgxbr %r7, 5, %f8, 9 # encoding: [0xb3,0xae,0x59,0x78]
+#CHECK: clgxbr %r15, 0, %f0, 0 # encoding: [0xb3,0xae,0x00,0xf0]
+
+ clgxbr %r0, 0, %f0, 0
+ clgxbr %r0, 0, %f0, 15
+ clgxbr %r0, 0, %f13, 0
+ clgxbr %r0, 15, %f0, 0
+ clgxbr %r7, 5, %f8, 9
+ clgxbr %r15, 0, %f0, 0
+
#CHECK: clhf %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0xcf]
#CHECK: clhf %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0xcf]
#CHECK: clhf %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0xcf]
@@ -203,6 +343,34 @@
clih %r0, (1 << 32) - 1
clih %r15, 0
+#CHECK: cxlfbr %f0, 0, %r0, 0 # encoding: [0xb3,0x92,0x00,0x00]
+#CHECK: cxlfbr %f0, 0, %r0, 15 # encoding: [0xb3,0x92,0x0f,0x00]
+#CHECK: cxlfbr %f0, 0, %r15, 0 # encoding: [0xb3,0x92,0x00,0x0f]
+#CHECK: cxlfbr %f0, 15, %r0, 0 # encoding: [0xb3,0x92,0xf0,0x00]
+#CHECK: cxlfbr %f4, 5, %r9, 10 # encoding: [0xb3,0x92,0x5a,0x49]
+#CHECK: cxlfbr %f13, 0, %r0, 0 # encoding: [0xb3,0x92,0x00,0xd0]
+
+ cxlfbr %f0, 0, %r0, 0
+ cxlfbr %f0, 0, %r0, 15
+ cxlfbr %f0, 0, %r15, 0
+ cxlfbr %f0, 15, %r0, 0
+ cxlfbr %f4, 5, %r9, 10
+ cxlfbr %f13, 0, %r0, 0
+
+#CHECK: cxlgbr %f0, 0, %r0, 0 # encoding: [0xb3,0xa2,0x00,0x00]
+#CHECK: cxlgbr %f0, 0, %r0, 15 # encoding: [0xb3,0xa2,0x0f,0x00]
+#CHECK: cxlgbr %f0, 0, %r15, 0 # encoding: [0xb3,0xa2,0x00,0x0f]
+#CHECK: cxlgbr %f0, 15, %r0, 0 # encoding: [0xb3,0xa2,0xf0,0x00]
+#CHECK: cxlgbr %f4, 5, %r9, 10 # encoding: [0xb3,0xa2,0x5a,0x49]
+#CHECK: cxlgbr %f13, 0, %r0, 0 # encoding: [0xb3,0xa2,0x00,0xd0]
+
+ cxlgbr %f0, 0, %r0, 0
+ cxlgbr %f0, 0, %r0, 15
+ cxlgbr %f0, 0, %r15, 0
+ cxlgbr %f0, 15, %r0, 0
+ cxlgbr %f4, 5, %r9, 10
+ cxlgbr %f13, 0, %r0, 0
+
#CHECK: fidbra %f0, 0, %f0, 0 # encoding: [0xb3,0x5f,0x00,0x00]
#CHECK: fidbra %f0, 0, %f0, 15 # encoding: [0xb3,0x5f,0x0f,0x00]
#CHECK: fidbra %f0, 0, %f15, 0 # encoding: [0xb3,0x5f,0x00,0x0f]
@@ -245,6 +413,246 @@
fixbra %f4, 5, %f8, 9
fixbra %f13, 0, %f0, 0
+#CHECK: laa %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xf8]
+#CHECK: laa %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xf8]
+#CHECK: laa %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xf8]
+#CHECK: laa %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xf8]
+#CHECK: laa %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xf8]
+#CHECK: laa %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xf8]
+#CHECK: laa %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xf8]
+#CHECK: laa %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xf8]
+#CHECK: laa %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xf8]
+#CHECK: laa %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xf8]
+#CHECK: laa %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xf8]
+
+ laa %r0, %r0, -524288
+ laa %r0, %r0, -1
+ laa %r0, %r0, 0
+ laa %r0, %r0, 1
+ laa %r0, %r0, 524287
+ laa %r0, %r0, 0(%r1)
+ laa %r0, %r0, 0(%r15)
+ laa %r0, %r0, 524287(%r1)
+ laa %r0, %r0, 524287(%r15)
+ laa %r0, %r15, 0
+ laa %r15, %r0, 0
+
+#CHECK: laag %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xe8]
+#CHECK: laag %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xe8]
+#CHECK: laag %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xe8]
+#CHECK: laag %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xe8]
+#CHECK: laag %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xe8]
+#CHECK: laag %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xe8]
+#CHECK: laag %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xe8]
+#CHECK: laag %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xe8]
+#CHECK: laag %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xe8]
+#CHECK: laag %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xe8]
+#CHECK: laag %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xe8]
+
+ laag %r0, %r0, -524288
+ laag %r0, %r0, -1
+ laag %r0, %r0, 0
+ laag %r0, %r0, 1
+ laag %r0, %r0, 524287
+ laag %r0, %r0, 0(%r1)
+ laag %r0, %r0, 0(%r15)
+ laag %r0, %r0, 524287(%r1)
+ laag %r0, %r0, 524287(%r15)
+ laag %r0, %r15, 0
+ laag %r15, %r0, 0
+
+#CHECK: laal %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xfa]
+#CHECK: laal %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xfa]
+#CHECK: laal %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xfa]
+#CHECK: laal %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xfa]
+#CHECK: laal %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xfa]
+#CHECK: laal %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xfa]
+#CHECK: laal %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xfa]
+#CHECK: laal %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xfa]
+#CHECK: laal %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xfa]
+#CHECK: laal %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xfa]
+#CHECK: laal %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xfa]
+
+ laal %r0, %r0, -524288
+ laal %r0, %r0, -1
+ laal %r0, %r0, 0
+ laal %r0, %r0, 1
+ laal %r0, %r0, 524287
+ laal %r0, %r0, 0(%r1)
+ laal %r0, %r0, 0(%r15)
+ laal %r0, %r0, 524287(%r1)
+ laal %r0, %r0, 524287(%r15)
+ laal %r0, %r15, 0
+ laal %r15, %r0, 0
+
+#CHECK: laalg %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xea]
+#CHECK: laalg %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xea]
+#CHECK: laalg %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xea]
+#CHECK: laalg %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xea]
+#CHECK: laalg %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xea]
+#CHECK: laalg %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xea]
+#CHECK: laalg %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xea]
+#CHECK: laalg %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xea]
+#CHECK: laalg %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xea]
+#CHECK: laalg %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xea]
+#CHECK: laalg %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xea]
+
+ laalg %r0, %r0, -524288
+ laalg %r0, %r0, -1
+ laalg %r0, %r0, 0
+ laalg %r0, %r0, 1
+ laalg %r0, %r0, 524287
+ laalg %r0, %r0, 0(%r1)
+ laalg %r0, %r0, 0(%r15)
+ laalg %r0, %r0, 524287(%r1)
+ laalg %r0, %r0, 524287(%r15)
+ laalg %r0, %r15, 0
+ laalg %r15, %r0, 0
+
+#CHECK: lan %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xf4]
+#CHECK: lan %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xf4]
+#CHECK: lan %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xf4]
+#CHECK: lan %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xf4]
+#CHECK: lan %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xf4]
+#CHECK: lan %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xf4]
+#CHECK: lan %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xf4]
+#CHECK: lan %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xf4]
+#CHECK: lan %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xf4]
+#CHECK: lan %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xf4]
+#CHECK: lan %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xf4]
+
+ lan %r0, %r0, -524288
+ lan %r0, %r0, -1
+ lan %r0, %r0, 0
+ lan %r0, %r0, 1
+ lan %r0, %r0, 524287
+ lan %r0, %r0, 0(%r1)
+ lan %r0, %r0, 0(%r15)
+ lan %r0, %r0, 524287(%r1)
+ lan %r0, %r0, 524287(%r15)
+ lan %r0, %r15, 0
+ lan %r15, %r0, 0
+
+#CHECK: lang %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xe4]
+#CHECK: lang %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xe4]
+#CHECK: lang %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xe4]
+#CHECK: lang %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xe4]
+#CHECK: lang %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xe4]
+#CHECK: lang %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xe4]
+#CHECK: lang %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xe4]
+#CHECK: lang %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xe4]
+#CHECK: lang %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xe4]
+#CHECK: lang %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xe4]
+#CHECK: lang %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xe4]
+
+ lang %r0, %r0, -524288
+ lang %r0, %r0, -1
+ lang %r0, %r0, 0
+ lang %r0, %r0, 1
+ lang %r0, %r0, 524287
+ lang %r0, %r0, 0(%r1)
+ lang %r0, %r0, 0(%r15)
+ lang %r0, %r0, 524287(%r1)
+ lang %r0, %r0, 524287(%r15)
+ lang %r0, %r15, 0
+ lang %r15, %r0, 0
+
+#CHECK: lao %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xf6]
+#CHECK: lao %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xf6]
+#CHECK: lao %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xf6]
+#CHECK: lao %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xf6]
+#CHECK: lao %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xf6]
+#CHECK: lao %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xf6]
+#CHECK: lao %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xf6]
+#CHECK: lao %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xf6]
+#CHECK: lao %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xf6]
+#CHECK: lao %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xf6]
+#CHECK: lao %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xf6]
+
+ lao %r0, %r0, -524288
+ lao %r0, %r0, -1
+ lao %r0, %r0, 0
+ lao %r0, %r0, 1
+ lao %r0, %r0, 524287
+ lao %r0, %r0, 0(%r1)
+ lao %r0, %r0, 0(%r15)
+ lao %r0, %r0, 524287(%r1)
+ lao %r0, %r0, 524287(%r15)
+ lao %r0, %r15, 0
+ lao %r15, %r0, 0
+
+#CHECK: laog %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xe6]
+#CHECK: laog %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xe6]
+#CHECK: laog %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xe6]
+#CHECK: laog %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xe6]
+#CHECK: laog %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xe6]
+#CHECK: laog %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xe6]
+#CHECK: laog %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xe6]
+#CHECK: laog %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xe6]
+#CHECK: laog %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xe6]
+#CHECK: laog %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xe6]
+#CHECK: laog %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xe6]
+
+ laog %r0, %r0, -524288
+ laog %r0, %r0, -1
+ laog %r0, %r0, 0
+ laog %r0, %r0, 1
+ laog %r0, %r0, 524287
+ laog %r0, %r0, 0(%r1)
+ laog %r0, %r0, 0(%r15)
+ laog %r0, %r0, 524287(%r1)
+ laog %r0, %r0, 524287(%r15)
+ laog %r0, %r15, 0
+ laog %r15, %r0, 0
+
+#CHECK: lax %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xf7]
+#CHECK: lax %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xf7]
+#CHECK: lax %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xf7]
+#CHECK: lax %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xf7]
+#CHECK: lax %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xf7]
+#CHECK: lax %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xf7]
+#CHECK: lax %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xf7]
+#CHECK: lax %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xf7]
+#CHECK: lax %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xf7]
+#CHECK: lax %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xf7]
+#CHECK: lax %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xf7]
+
+ lax %r0, %r0, -524288
+ lax %r0, %r0, -1
+ lax %r0, %r0, 0
+ lax %r0, %r0, 1
+ lax %r0, %r0, 524287
+ lax %r0, %r0, 0(%r1)
+ lax %r0, %r0, 0(%r15)
+ lax %r0, %r0, 524287(%r1)
+ lax %r0, %r0, 524287(%r15)
+ lax %r0, %r15, 0
+ lax %r15, %r0, 0
+
+#CHECK: laxg %r0, %r0, -524288 # encoding: [0xeb,0x00,0x00,0x00,0x80,0xe7]
+#CHECK: laxg %r0, %r0, -1 # encoding: [0xeb,0x00,0x0f,0xff,0xff,0xe7]
+#CHECK: laxg %r0, %r0, 0 # encoding: [0xeb,0x00,0x00,0x00,0x00,0xe7]
+#CHECK: laxg %r0, %r0, 1 # encoding: [0xeb,0x00,0x00,0x01,0x00,0xe7]
+#CHECK: laxg %r0, %r0, 524287 # encoding: [0xeb,0x00,0x0f,0xff,0x7f,0xe7]
+#CHECK: laxg %r0, %r0, 0(%r1) # encoding: [0xeb,0x00,0x10,0x00,0x00,0xe7]
+#CHECK: laxg %r0, %r0, 0(%r15) # encoding: [0xeb,0x00,0xf0,0x00,0x00,0xe7]
+#CHECK: laxg %r0, %r0, 524287(%r1) # encoding: [0xeb,0x00,0x1f,0xff,0x7f,0xe7]
+#CHECK: laxg %r0, %r0, 524287(%r15) # encoding: [0xeb,0x00,0xff,0xff,0x7f,0xe7]
+#CHECK: laxg %r0, %r15, 0 # encoding: [0xeb,0x0f,0x00,0x00,0x00,0xe7]
+#CHECK: laxg %r15, %r0, 0 # encoding: [0xeb,0xf0,0x00,0x00,0x00,0xe7]
+
+ laxg %r0, %r0, -524288
+ laxg %r0, %r0, -1
+ laxg %r0, %r0, 0
+ laxg %r0, %r0, 1
+ laxg %r0, %r0, 524287
+ laxg %r0, %r0, 0(%r1)
+ laxg %r0, %r0, 0(%r15)
+ laxg %r0, %r0, 524287(%r1)
+ laxg %r0, %r0, 524287(%r15)
+ laxg %r0, %r15, 0
+ laxg %r15, %r0, 0
+
#CHECK: lbh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0xc0]
#CHECK: lbh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0xc0]
#CHECK: lbh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0xc0]
@@ -267,6 +675,48 @@
lbh %r0, 524287(%r15,%r1)
lbh %r15, 0
+#CHECK: ldxbra %f0, 0, %f0, 0 # encoding: [0xb3,0x45,0x00,0x00]
+#CHECK: ldxbra %f0, 0, %f0, 15 # encoding: [0xb3,0x45,0x0f,0x00]
+#CHECK: ldxbra %f0, 0, %f13, 0 # encoding: [0xb3,0x45,0x00,0x0d]
+#CHECK: ldxbra %f0, 15, %f0, 0 # encoding: [0xb3,0x45,0xf0,0x00]
+#CHECK: ldxbra %f4, 5, %f8, 9 # encoding: [0xb3,0x45,0x59,0x48]
+#CHECK: ldxbra %f13, 0, %f0, 0 # encoding: [0xb3,0x45,0x00,0xd0]
+
+ ldxbra %f0, 0, %f0, 0
+ ldxbra %f0, 0, %f0, 15
+ ldxbra %f0, 0, %f13, 0
+ ldxbra %f0, 15, %f0, 0
+ ldxbra %f4, 5, %f8, 9
+ ldxbra %f13, 0, %f0, 0
+
+#CHECK: ledbra %f0, 0, %f0, 0 # encoding: [0xb3,0x44,0x00,0x00]
+#CHECK: ledbra %f0, 0, %f0, 15 # encoding: [0xb3,0x44,0x0f,0x00]
+#CHECK: ledbra %f0, 0, %f15, 0 # encoding: [0xb3,0x44,0x00,0x0f]
+#CHECK: ledbra %f0, 15, %f0, 0 # encoding: [0xb3,0x44,0xf0,0x00]
+#CHECK: ledbra %f4, 5, %f6, 7 # encoding: [0xb3,0x44,0x57,0x46]
+#CHECK: ledbra %f15, 0, %f0, 0 # encoding: [0xb3,0x44,0x00,0xf0]
+
+ ledbra %f0, 0, %f0, 0
+ ledbra %f0, 0, %f0, 15
+ ledbra %f0, 0, %f15, 0
+ ledbra %f0, 15, %f0, 0
+ ledbra %f4, 5, %f6, 7
+ ledbra %f15, 0, %f0, 0
+
+#CHECK: lexbra %f0, 0, %f0, 0 # encoding: [0xb3,0x46,0x00,0x00]
+#CHECK: lexbra %f0, 0, %f0, 15 # encoding: [0xb3,0x46,0x0f,0x00]
+#CHECK: lexbra %f0, 0, %f13, 0 # encoding: [0xb3,0x46,0x00,0x0d]
+#CHECK: lexbra %f0, 15, %f0, 0 # encoding: [0xb3,0x46,0xf0,0x00]
+#CHECK: lexbra %f4, 5, %f8, 9 # encoding: [0xb3,0x46,0x59,0x48]
+#CHECK: lexbra %f13, 0, %f0, 0 # encoding: [0xb3,0x46,0x00,0xd0]
+
+ lexbra %f0, 0, %f0, 0
+ lexbra %f0, 0, %f0, 15
+ lexbra %f0, 0, %f13, 0
+ lexbra %f0, 15, %f0, 0
+ lexbra %f4, 5, %f8, 9
+ lexbra %f13, 0, %f0, 0
+
#CHECK: lfh %r0, -524288 # encoding: [0xe3,0x00,0x00,0x00,0x80,0xca]
#CHECK: lfh %r0, -1 # encoding: [0xe3,0x00,0x0f,0xff,0xff,0xca]
#CHECK: lfh %r0, 0 # encoding: [0xe3,0x00,0x00,0x00,0x00,0xca]
diff --git a/test/MC/SystemZ/lit.local.cfg b/test/MC/SystemZ/lit.local.cfg
index b12af09434be..5c02dd3614a4 100644
--- a/test/MC/SystemZ/lit.local.cfg
+++ b/test/MC/SystemZ/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'SystemZ' in targets:
+if not 'SystemZ' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/X86/AlignedBundling/lit.local.cfg b/test/MC/X86/AlignedBundling/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/MC/X86/AlignedBundling/lit.local.cfg
+++ b/test/MC/X86/AlignedBundling/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/X86/address-size.s b/test/MC/X86/address-size.s
index b105b40ec568..c9d04c4af3f4 100644
--- a/test/MC/X86/address-size.s
+++ b/test/MC/X86/address-size.s
@@ -8,6 +8,20 @@
.code32
movb $0x0, (%si)
-// CHECK: encoding: [0x67,0xc6,0x06,0x00]
+// CHECK: encoding: [0x67,0xc6,0x04,0x00]
movb $0x0, (%esi)
// CHECK: encoding: [0xc6,0x06,0x00]
+ movw $0x1234, (%si)
+// CHECK: encoding: [0x67,0x66,0xc7,0x04,0x34,0x12]
+ movl $0x12345678, (%bx,%si,1)
+// CHECK: encoding: [0x67,0xc7,0x00,0x78,0x56,0x34,0x12]
+ movw $0x1234, 0x5678(%bp)
+// CHECK: encoding: [0x67,0x66,0xc7,0x86,0x78,0x56,0x34,0x12]
+
+ .code16
+ movb $0x0, (%si)
+// CHECK: encoding: [0xc6,0x04,0x00]
+ movb $0x0, (%esi)
+// CHECK: encoding: [0x67,0xc6,0x06,0x00]
+ movb $0x5a, (%di,%bp,1)
+// CHECK: encoding: [0xc6,0x03,0x5a]
diff --git a/test/MC/X86/avx512-encodings.s b/test/MC/X86/avx512-encodings.s
index 38f9190d949f..187b51264c41 100644
--- a/test/MC/X86/avx512-encodings.s
+++ b/test/MC/X86/avx512-encodings.s
@@ -1,4 +1,3093 @@
-// RUN: llvm-mc -triple x86_64-unknown-unknown -mcpu=knl --show-encoding %s | FileCheck %s
+// RUN: not llvm-mc -triple x86_64-unknown-unknown -mcpu=knl --show-encoding %s 2> %t.err | FileCheck %s
+// RUN: FileCheck --check-prefix=ERR < %t.err %s
+
+// CHECK: vaddpd %zmm6, %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x40,0x58,0xc6]
+ vaddpd %zmm6, %zmm27, %zmm8
+
+// CHECK: vaddpd %zmm6, %zmm27, %zmm8 {%k7}
+// CHECK: encoding: [0x62,0x71,0xa5,0x47,0x58,0xc6]
+ vaddpd %zmm6, %zmm27, %zmm8 {%k7}
+
+// CHECK: vaddpd %zmm6, %zmm27, %zmm8 {%k7} {z}
+// CHECK: encoding: [0x62,0x71,0xa5,0xc7,0x58,0xc6]
+ vaddpd %zmm6, %zmm27, %zmm8 {%k7} {z}
+
+// CHECK: vaddpd (%rcx), %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x40,0x58,0x01]
+ vaddpd (%rcx), %zmm27, %zmm8
+
+// CHECK: vaddpd 291(%rax,%r14,8), %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x31,0xa5,0x40,0x58,0x84,0xf0,0x23,0x01,0x00,0x00]
+ vaddpd 291(%rax,%r14,8), %zmm27, %zmm8
+
+// CHECK: vaddpd (%rcx){1to8}, %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x50,0x58,0x01]
+ vaddpd (%rcx){1to8}, %zmm27, %zmm8
+
+// CHECK: vaddpd 8128(%rdx), %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x40,0x58,0x42,0x7f]
+ vaddpd 8128(%rdx), %zmm27, %zmm8
+
+// CHECK: vaddpd 8192(%rdx), %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x40,0x58,0x82,0x00,0x20,0x00,0x00]
+ vaddpd 8192(%rdx), %zmm27, %zmm8
+
+// CHECK: vaddpd -8192(%rdx), %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x40,0x58,0x42,0x80]
+ vaddpd -8192(%rdx), %zmm27, %zmm8
+
+// CHECK: vaddpd -8256(%rdx), %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x40,0x58,0x82,0xc0,0xdf,0xff,0xff]
+ vaddpd -8256(%rdx), %zmm27, %zmm8
+
+// CHECK: vaddpd 1016(%rdx){1to8}, %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x50,0x58,0x42,0x7f]
+ vaddpd 1016(%rdx){1to8}, %zmm27, %zmm8
+
+// CHECK: vaddpd 1024(%rdx){1to8}, %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x50,0x58,0x82,0x00,0x04,0x00,0x00]
+ vaddpd 1024(%rdx){1to8}, %zmm27, %zmm8
+
+// CHECK: vaddpd -1024(%rdx){1to8}, %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x50,0x58,0x42,0x80]
+ vaddpd -1024(%rdx){1to8}, %zmm27, %zmm8
+
+// CHECK: vaddpd -1032(%rdx){1to8}, %zmm27, %zmm8
+// CHECK: encoding: [0x62,0x71,0xa5,0x50,0x58,0x82,0xf8,0xfb,0xff,0xff]
+ vaddpd -1032(%rdx){1to8}, %zmm27, %zmm8
+
+// CHECK: vaddps %zmm2, %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x48,0x58,0xd2]
+ vaddps %zmm2, %zmm13, %zmm18
+
+// CHECK: vaddps %zmm2, %zmm13, %zmm18 {%k4}
+// CHECK: encoding: [0x62,0xe1,0x14,0x4c,0x58,0xd2]
+ vaddps %zmm2, %zmm13, %zmm18 {%k4}
+
+// CHECK: vaddps %zmm2, %zmm13, %zmm18 {%k4} {z}
+// CHECK: encoding: [0x62,0xe1,0x14,0xcc,0x58,0xd2]
+ vaddps %zmm2, %zmm13, %zmm18 {%k4} {z}
+
+// CHECK: vaddps (%rcx), %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x48,0x58,0x11]
+ vaddps (%rcx), %zmm13, %zmm18
+
+// CHECK: vaddps 291(%rax,%r14,8), %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xa1,0x14,0x48,0x58,0x94,0xf0,0x23,0x01,0x00,0x00]
+ vaddps 291(%rax,%r14,8), %zmm13, %zmm18
+
+// CHECK: vaddps (%rcx){1to16}, %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x58,0x58,0x11]
+ vaddps (%rcx){1to16}, %zmm13, %zmm18
+
+// CHECK: vaddps 8128(%rdx), %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x48,0x58,0x52,0x7f]
+ vaddps 8128(%rdx), %zmm13, %zmm18
+
+// CHECK: vaddps 8192(%rdx), %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x48,0x58,0x92,0x00,0x20,0x00,0x00]
+ vaddps 8192(%rdx), %zmm13, %zmm18
+
+// CHECK: vaddps -8192(%rdx), %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x48,0x58,0x52,0x80]
+ vaddps -8192(%rdx), %zmm13, %zmm18
+
+// CHECK: vaddps -8256(%rdx), %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x48,0x58,0x92,0xc0,0xdf,0xff,0xff]
+ vaddps -8256(%rdx), %zmm13, %zmm18
+
+// CHECK: vaddps 508(%rdx){1to16}, %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x58,0x58,0x52,0x7f]
+ vaddps 508(%rdx){1to16}, %zmm13, %zmm18
+
+// CHECK: vaddps 512(%rdx){1to16}, %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x58,0x58,0x92,0x00,0x02,0x00,0x00]
+ vaddps 512(%rdx){1to16}, %zmm13, %zmm18
+
+// CHECK: vaddps -512(%rdx){1to16}, %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x58,0x58,0x52,0x80]
+ vaddps -512(%rdx){1to16}, %zmm13, %zmm18
+
+// CHECK: vaddps -516(%rdx){1to16}, %zmm13, %zmm18
+// CHECK: encoding: [0x62,0xe1,0x14,0x58,0x58,0x92,0xfc,0xfd,0xff,0xff]
+ vaddps -516(%rdx){1to16}, %zmm13, %zmm18
+
+// CHECK: vdivpd %zmm11, %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xc1,0xcd,0x48,0x5e,0xd3]
+ vdivpd %zmm11, %zmm6, %zmm18
+
+// CHECK: vdivpd %zmm11, %zmm6, %zmm18 {%k4}
+// CHECK: encoding: [0x62,0xc1,0xcd,0x4c,0x5e,0xd3]
+ vdivpd %zmm11, %zmm6, %zmm18 {%k4}
+
+// CHECK: vdivpd %zmm11, %zmm6, %zmm18 {%k4} {z}
+// CHECK: encoding: [0x62,0xc1,0xcd,0xcc,0x5e,0xd3]
+ vdivpd %zmm11, %zmm6, %zmm18 {%k4} {z}
+
+// CHECK: vdivpd (%rcx), %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x48,0x5e,0x11]
+ vdivpd (%rcx), %zmm6, %zmm18
+
+// CHECK: vdivpd 291(%rax,%r14,8), %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xa1,0xcd,0x48,0x5e,0x94,0xf0,0x23,0x01,0x00,0x00]
+ vdivpd 291(%rax,%r14,8), %zmm6, %zmm18
+
+// CHECK: vdivpd (%rcx){1to8}, %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x58,0x5e,0x11]
+ vdivpd (%rcx){1to8}, %zmm6, %zmm18
+
+// CHECK: vdivpd 8128(%rdx), %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x48,0x5e,0x52,0x7f]
+ vdivpd 8128(%rdx), %zmm6, %zmm18
+
+// CHECK: vdivpd 8192(%rdx), %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x48,0x5e,0x92,0x00,0x20,0x00,0x00]
+ vdivpd 8192(%rdx), %zmm6, %zmm18
+
+// CHECK: vdivpd -8192(%rdx), %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x48,0x5e,0x52,0x80]
+ vdivpd -8192(%rdx), %zmm6, %zmm18
+
+// CHECK: vdivpd -8256(%rdx), %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x48,0x5e,0x92,0xc0,0xdf,0xff,0xff]
+ vdivpd -8256(%rdx), %zmm6, %zmm18
+
+// CHECK: vdivpd 1016(%rdx){1to8}, %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x58,0x5e,0x52,0x7f]
+ vdivpd 1016(%rdx){1to8}, %zmm6, %zmm18
+
+// CHECK: vdivpd 1024(%rdx){1to8}, %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x58,0x5e,0x92,0x00,0x04,0x00,0x00]
+ vdivpd 1024(%rdx){1to8}, %zmm6, %zmm18
+
+// CHECK: vdivpd -1024(%rdx){1to8}, %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x58,0x5e,0x52,0x80]
+ vdivpd -1024(%rdx){1to8}, %zmm6, %zmm18
+
+// CHECK: vdivpd -1032(%rdx){1to8}, %zmm6, %zmm18
+// CHECK: encoding: [0x62,0xe1,0xcd,0x58,0x5e,0x92,0xf8,0xfb,0xff,0xff]
+ vdivpd -1032(%rdx){1to8}, %zmm6, %zmm18
+
+// CHECK: vdivps %zmm28, %zmm23, %zmm23
+// CHECK: encoding: [0x62,0x81,0x44,0x40,0x5e,0xfc]
+ vdivps %zmm28, %zmm23, %zmm23
+
+// CHECK: vdivps %zmm28, %zmm23, %zmm23 {%k2}
+// CHECK: encoding: [0x62,0x81,0x44,0x42,0x5e,0xfc]
+ vdivps %zmm28, %zmm23, %zmm23 {%k2}
+
+// CHECK: vdivps %zmm28, %zmm23, %zmm23 {%k2} {z}
+// CHECK: encoding: [0x62,0x81,0x44,0xc2,0x5e,0xfc]
+ vdivps %zmm28, %zmm23, %zmm23 {%k2} {z}
+
+// CHECK: vdivps (%rcx), %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x40,0x5e,0x39]
+ vdivps (%rcx), %zmm23, %zmm23
+
+// CHECK: vdivps 291(%rax,%r14,8), %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xa1,0x44,0x40,0x5e,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vdivps 291(%rax,%r14,8), %zmm23, %zmm23
+
+// CHECK: vdivps (%rcx){1to16}, %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x50,0x5e,0x39]
+ vdivps (%rcx){1to16}, %zmm23, %zmm23
+
+// CHECK: vdivps 8128(%rdx), %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x40,0x5e,0x7a,0x7f]
+ vdivps 8128(%rdx), %zmm23, %zmm23
+
+// CHECK: vdivps 8192(%rdx), %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x40,0x5e,0xba,0x00,0x20,0x00,0x00]
+ vdivps 8192(%rdx), %zmm23, %zmm23
+
+// CHECK: vdivps -8192(%rdx), %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x40,0x5e,0x7a,0x80]
+ vdivps -8192(%rdx), %zmm23, %zmm23
+
+// CHECK: vdivps -8256(%rdx), %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x40,0x5e,0xba,0xc0,0xdf,0xff,0xff]
+ vdivps -8256(%rdx), %zmm23, %zmm23
+
+// CHECK: vdivps 508(%rdx){1to16}, %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x50,0x5e,0x7a,0x7f]
+ vdivps 508(%rdx){1to16}, %zmm23, %zmm23
+
+// CHECK: vdivps 512(%rdx){1to16}, %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x50,0x5e,0xba,0x00,0x02,0x00,0x00]
+ vdivps 512(%rdx){1to16}, %zmm23, %zmm23
+
+// CHECK: vdivps -512(%rdx){1to16}, %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x50,0x5e,0x7a,0x80]
+ vdivps -512(%rdx){1to16}, %zmm23, %zmm23
+
+// CHECK: vdivps -516(%rdx){1to16}, %zmm23, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x44,0x50,0x5e,0xba,0xfc,0xfd,0xff,0xff]
+ vdivps -516(%rdx){1to16}, %zmm23, %zmm23
+
+// CHECK: vmaxpd %zmm20, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x21,0x9d,0x40,0x5f,0xf4]
+ vmaxpd %zmm20, %zmm28, %zmm30
+
+// CHECK: vmaxpd %zmm20, %zmm28, %zmm30 {%k1}
+// CHECK: encoding: [0x62,0x21,0x9d,0x41,0x5f,0xf4]
+ vmaxpd %zmm20, %zmm28, %zmm30 {%k1}
+
+// CHECK: vmaxpd %zmm20, %zmm28, %zmm30 {%k1} {z}
+// CHECK: encoding: [0x62,0x21,0x9d,0xc1,0x5f,0xf4]
+ vmaxpd %zmm20, %zmm28, %zmm30 {%k1} {z}
+
+// CHECK: vmaxpd (%rcx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0x5f,0x31]
+ vmaxpd (%rcx), %zmm28, %zmm30
+
+// CHECK: vmaxpd 291(%rax,%r14,8), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x21,0x9d,0x40,0x5f,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vmaxpd 291(%rax,%r14,8), %zmm28, %zmm30
+
+// CHECK: vmaxpd (%rcx){1to8}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0x5f,0x31]
+ vmaxpd (%rcx){1to8}, %zmm28, %zmm30
+
+// CHECK: vmaxpd 8128(%rdx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0x5f,0x72,0x7f]
+ vmaxpd 8128(%rdx), %zmm28, %zmm30
+
+// CHECK: vmaxpd 8192(%rdx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0x5f,0xb2,0x00,0x20,0x00,0x00]
+ vmaxpd 8192(%rdx), %zmm28, %zmm30
+
+// CHECK: vmaxpd -8192(%rdx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0x5f,0x72,0x80]
+ vmaxpd -8192(%rdx), %zmm28, %zmm30
+
+// CHECK: vmaxpd -8256(%rdx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0x5f,0xb2,0xc0,0xdf,0xff,0xff]
+ vmaxpd -8256(%rdx), %zmm28, %zmm30
+
+// CHECK: vmaxpd 1016(%rdx){1to8}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0x5f,0x72,0x7f]
+ vmaxpd 1016(%rdx){1to8}, %zmm28, %zmm30
+
+// CHECK: vmaxpd 1024(%rdx){1to8}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0x5f,0xb2,0x00,0x04,0x00,0x00]
+ vmaxpd 1024(%rdx){1to8}, %zmm28, %zmm30
+
+// CHECK: vmaxpd -1024(%rdx){1to8}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0x5f,0x72,0x80]
+ vmaxpd -1024(%rdx){1to8}, %zmm28, %zmm30
+
+// CHECK: vmaxpd -1032(%rdx){1to8}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0x5f,0xb2,0xf8,0xfb,0xff,0xff]
+ vmaxpd -1032(%rdx){1to8}, %zmm28, %zmm30
+
+// CHECK: vmaxps %zmm20, %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x21,0x4c,0x48,0x5f,0xcc]
+ vmaxps %zmm20, %zmm6, %zmm25
+
+// CHECK: vmaxps %zmm20, %zmm6, %zmm25 {%k1}
+// CHECK: encoding: [0x62,0x21,0x4c,0x49,0x5f,0xcc]
+ vmaxps %zmm20, %zmm6, %zmm25 {%k1}
+
+// CHECK: vmaxps %zmm20, %zmm6, %zmm25 {%k1} {z}
+// CHECK: encoding: [0x62,0x21,0x4c,0xc9,0x5f,0xcc]
+ vmaxps %zmm20, %zmm6, %zmm25 {%k1} {z}
+
+// CHECK: vmaxps (%rcx), %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x48,0x5f,0x09]
+ vmaxps (%rcx), %zmm6, %zmm25
+
+// CHECK: vmaxps 291(%rax,%r14,8), %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x21,0x4c,0x48,0x5f,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vmaxps 291(%rax,%r14,8), %zmm6, %zmm25
+
+// CHECK: vmaxps (%rcx){1to16}, %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x58,0x5f,0x09]
+ vmaxps (%rcx){1to16}, %zmm6, %zmm25
+
+// CHECK: vmaxps 8128(%rdx), %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x48,0x5f,0x4a,0x7f]
+ vmaxps 8128(%rdx), %zmm6, %zmm25
+
+// CHECK: vmaxps 8192(%rdx), %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x48,0x5f,0x8a,0x00,0x20,0x00,0x00]
+ vmaxps 8192(%rdx), %zmm6, %zmm25
+
+// CHECK: vmaxps -8192(%rdx), %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x48,0x5f,0x4a,0x80]
+ vmaxps -8192(%rdx), %zmm6, %zmm25
+
+// CHECK: vmaxps -8256(%rdx), %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x48,0x5f,0x8a,0xc0,0xdf,0xff,0xff]
+ vmaxps -8256(%rdx), %zmm6, %zmm25
+
+// CHECK: vmaxps 508(%rdx){1to16}, %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x58,0x5f,0x4a,0x7f]
+ vmaxps 508(%rdx){1to16}, %zmm6, %zmm25
+
+// CHECK: vmaxps 512(%rdx){1to16}, %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x58,0x5f,0x8a,0x00,0x02,0x00,0x00]
+ vmaxps 512(%rdx){1to16}, %zmm6, %zmm25
+
+// CHECK: vmaxps -512(%rdx){1to16}, %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x58,0x5f,0x4a,0x80]
+ vmaxps -512(%rdx){1to16}, %zmm6, %zmm25
+
+// CHECK: vmaxps -516(%rdx){1to16}, %zmm6, %zmm25
+// CHECK: encoding: [0x62,0x61,0x4c,0x58,0x5f,0x8a,0xfc,0xfd,0xff,0xff]
+ vmaxps -516(%rdx){1to16}, %zmm6, %zmm25
+
+// CHECK: vminpd %zmm22, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xb1,0xcd,0x48,0x5d,0xf6]
+ vminpd %zmm22, %zmm6, %zmm6
+
+// CHECK: vminpd %zmm22, %zmm6, %zmm6 {%k7}
+// CHECK: encoding: [0x62,0xb1,0xcd,0x4f,0x5d,0xf6]
+ vminpd %zmm22, %zmm6, %zmm6 {%k7}
+
+// CHECK: vminpd %zmm22, %zmm6, %zmm6 {%k7} {z}
+// CHECK: encoding: [0x62,0xb1,0xcd,0xcf,0x5d,0xf6]
+ vminpd %zmm22, %zmm6, %zmm6 {%k7} {z}
+
+// CHECK: vminpd (%rcx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x48,0x5d,0x31]
+ vminpd (%rcx), %zmm6, %zmm6
+
+// CHECK: vminpd 291(%rax,%r14,8), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xb1,0xcd,0x48,0x5d,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vminpd 291(%rax,%r14,8), %zmm6, %zmm6
+
+// CHECK: vminpd (%rcx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x58,0x5d,0x31]
+ vminpd (%rcx){1to8}, %zmm6, %zmm6
+
+// CHECK: vminpd 8128(%rdx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x48,0x5d,0x72,0x7f]
+ vminpd 8128(%rdx), %zmm6, %zmm6
+
+// CHECK: vminpd 8192(%rdx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x48,0x5d,0xb2,0x00,0x20,0x00,0x00]
+ vminpd 8192(%rdx), %zmm6, %zmm6
+
+// CHECK: vminpd -8192(%rdx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x48,0x5d,0x72,0x80]
+ vminpd -8192(%rdx), %zmm6, %zmm6
+
+// CHECK: vminpd -8256(%rdx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x48,0x5d,0xb2,0xc0,0xdf,0xff,0xff]
+ vminpd -8256(%rdx), %zmm6, %zmm6
+
+// CHECK: vminpd 1016(%rdx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x58,0x5d,0x72,0x7f]
+ vminpd 1016(%rdx){1to8}, %zmm6, %zmm6
+
+// CHECK: vminpd 1024(%rdx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x58,0x5d,0xb2,0x00,0x04,0x00,0x00]
+ vminpd 1024(%rdx){1to8}, %zmm6, %zmm6
+
+// CHECK: vminpd -1024(%rdx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x58,0x5d,0x72,0x80]
+ vminpd -1024(%rdx){1to8}, %zmm6, %zmm6
+
+// CHECK: vminpd -1032(%rdx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf1,0xcd,0x58,0x5d,0xb2,0xf8,0xfb,0xff,0xff]
+ vminpd -1032(%rdx){1to8}, %zmm6, %zmm6
+
+// CHECK: vminps %zmm7, %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x48,0x5d,0xdf]
+ vminps %zmm7, %zmm3, %zmm3
+
+// CHECK: vminps %zmm7, %zmm3, %zmm3 {%k3}
+// CHECK: encoding: [0x62,0xf1,0x64,0x4b,0x5d,0xdf]
+ vminps %zmm7, %zmm3, %zmm3 {%k3}
+
+// CHECK: vminps %zmm7, %zmm3, %zmm3 {%k3} {z}
+// CHECK: encoding: [0x62,0xf1,0x64,0xcb,0x5d,0xdf]
+ vminps %zmm7, %zmm3, %zmm3 {%k3} {z}
+
+// CHECK: vminps (%rcx), %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x48,0x5d,0x19]
+ vminps (%rcx), %zmm3, %zmm3
+
+// CHECK: vminps 291(%rax,%r14,8), %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xb1,0x64,0x48,0x5d,0x9c,0xf0,0x23,0x01,0x00,0x00]
+ vminps 291(%rax,%r14,8), %zmm3, %zmm3
+
+// CHECK: vminps (%rcx){1to16}, %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x58,0x5d,0x19]
+ vminps (%rcx){1to16}, %zmm3, %zmm3
+
+// CHECK: vminps 8128(%rdx), %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x48,0x5d,0x5a,0x7f]
+ vminps 8128(%rdx), %zmm3, %zmm3
+
+// CHECK: vminps 8192(%rdx), %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x48,0x5d,0x9a,0x00,0x20,0x00,0x00]
+ vminps 8192(%rdx), %zmm3, %zmm3
+
+// CHECK: vminps -8192(%rdx), %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x48,0x5d,0x5a,0x80]
+ vminps -8192(%rdx), %zmm3, %zmm3
+
+// CHECK: vminps -8256(%rdx), %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x48,0x5d,0x9a,0xc0,0xdf,0xff,0xff]
+ vminps -8256(%rdx), %zmm3, %zmm3
+
+// CHECK: vminps 508(%rdx){1to16}, %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x58,0x5d,0x5a,0x7f]
+ vminps 508(%rdx){1to16}, %zmm3, %zmm3
+
+// CHECK: vminps 512(%rdx){1to16}, %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x58,0x5d,0x9a,0x00,0x02,0x00,0x00]
+ vminps 512(%rdx){1to16}, %zmm3, %zmm3
+
+// CHECK: vminps -512(%rdx){1to16}, %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x58,0x5d,0x5a,0x80]
+ vminps -512(%rdx){1to16}, %zmm3, %zmm3
+
+// CHECK: vminps -516(%rdx){1to16}, %zmm3, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x64,0x58,0x5d,0x9a,0xfc,0xfd,0xff,0xff]
+ vminps -516(%rdx){1to16}, %zmm3, %zmm3
+
+// CHECK: vmulpd %zmm23, %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x21,0xdd,0x48,0x59,0xc7]
+ vmulpd %zmm23, %zmm4, %zmm24
+
+// CHECK: vmulpd %zmm23, %zmm4, %zmm24 {%k6}
+// CHECK: encoding: [0x62,0x21,0xdd,0x4e,0x59,0xc7]
+ vmulpd %zmm23, %zmm4, %zmm24 {%k6}
+
+// CHECK: vmulpd %zmm23, %zmm4, %zmm24 {%k6} {z}
+// CHECK: encoding: [0x62,0x21,0xdd,0xce,0x59,0xc7]
+ vmulpd %zmm23, %zmm4, %zmm24 {%k6} {z}
+
+// CHECK: vmulpd (%rcx), %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x48,0x59,0x01]
+ vmulpd (%rcx), %zmm4, %zmm24
+
+// CHECK: vmulpd 291(%rax,%r14,8), %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x21,0xdd,0x48,0x59,0x84,0xf0,0x23,0x01,0x00,0x00]
+ vmulpd 291(%rax,%r14,8), %zmm4, %zmm24
+
+// CHECK: vmulpd (%rcx){1to8}, %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x58,0x59,0x01]
+ vmulpd (%rcx){1to8}, %zmm4, %zmm24
+
+// CHECK: vmulpd 8128(%rdx), %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x48,0x59,0x42,0x7f]
+ vmulpd 8128(%rdx), %zmm4, %zmm24
+
+// CHECK: vmulpd 8192(%rdx), %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x48,0x59,0x82,0x00,0x20,0x00,0x00]
+ vmulpd 8192(%rdx), %zmm4, %zmm24
+
+// CHECK: vmulpd -8192(%rdx), %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x48,0x59,0x42,0x80]
+ vmulpd -8192(%rdx), %zmm4, %zmm24
+
+// CHECK: vmulpd -8256(%rdx), %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x48,0x59,0x82,0xc0,0xdf,0xff,0xff]
+ vmulpd -8256(%rdx), %zmm4, %zmm24
+
+// CHECK: vmulpd 1016(%rdx){1to8}, %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x58,0x59,0x42,0x7f]
+ vmulpd 1016(%rdx){1to8}, %zmm4, %zmm24
+
+// CHECK: vmulpd 1024(%rdx){1to8}, %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x58,0x59,0x82,0x00,0x04,0x00,0x00]
+ vmulpd 1024(%rdx){1to8}, %zmm4, %zmm24
+
+// CHECK: vmulpd -1024(%rdx){1to8}, %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x58,0x59,0x42,0x80]
+ vmulpd -1024(%rdx){1to8}, %zmm4, %zmm24
+
+// CHECK: vmulpd -1032(%rdx){1to8}, %zmm4, %zmm24
+// CHECK: encoding: [0x62,0x61,0xdd,0x58,0x59,0x82,0xf8,0xfb,0xff,0xff]
+ vmulpd -1032(%rdx){1to8}, %zmm4, %zmm24
+
+// CHECK: vmulps %zmm24, %zmm6, %zmm3
+// CHECK: encoding: [0x62,0x91,0x4c,0x48,0x59,0xd8]
+ vmulps %zmm24, %zmm6, %zmm3
+
+// CHECK: vmulps %zmm24, %zmm6, %zmm3 {%k4}
+// CHECK: encoding: [0x62,0x91,0x4c,0x4c,0x59,0xd8]
+ vmulps %zmm24, %zmm6, %zmm3 {%k4}
+
+// CHECK: vmulps %zmm24, %zmm6, %zmm3 {%k4} {z}
+// CHECK: encoding: [0x62,0x91,0x4c,0xcc,0x59,0xd8]
+ vmulps %zmm24, %zmm6, %zmm3 {%k4} {z}
+
+// CHECK: vmulps (%rcx), %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x48,0x59,0x19]
+ vmulps (%rcx), %zmm6, %zmm3
+
+// CHECK: vmulps 291(%rax,%r14,8), %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xb1,0x4c,0x48,0x59,0x9c,0xf0,0x23,0x01,0x00,0x00]
+ vmulps 291(%rax,%r14,8), %zmm6, %zmm3
+
+// CHECK: vmulps (%rcx){1to16}, %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x58,0x59,0x19]
+ vmulps (%rcx){1to16}, %zmm6, %zmm3
+
+// CHECK: vmulps 8128(%rdx), %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x48,0x59,0x5a,0x7f]
+ vmulps 8128(%rdx), %zmm6, %zmm3
+
+// CHECK: vmulps 8192(%rdx), %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x48,0x59,0x9a,0x00,0x20,0x00,0x00]
+ vmulps 8192(%rdx), %zmm6, %zmm3
+
+// CHECK: vmulps -8192(%rdx), %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x48,0x59,0x5a,0x80]
+ vmulps -8192(%rdx), %zmm6, %zmm3
+
+// CHECK: vmulps -8256(%rdx), %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x48,0x59,0x9a,0xc0,0xdf,0xff,0xff]
+ vmulps -8256(%rdx), %zmm6, %zmm3
+
+// CHECK: vmulps 508(%rdx){1to16}, %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x58,0x59,0x5a,0x7f]
+ vmulps 508(%rdx){1to16}, %zmm6, %zmm3
+
+// CHECK: vmulps 512(%rdx){1to16}, %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x58,0x59,0x9a,0x00,0x02,0x00,0x00]
+ vmulps 512(%rdx){1to16}, %zmm6, %zmm3
+
+// CHECK: vmulps -512(%rdx){1to16}, %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x58,0x59,0x5a,0x80]
+ vmulps -512(%rdx){1to16}, %zmm6, %zmm3
+
+// CHECK: vmulps -516(%rdx){1to16}, %zmm6, %zmm3
+// CHECK: encoding: [0x62,0xf1,0x4c,0x58,0x59,0x9a,0xfc,0xfd,0xff,0xff]
+ vmulps -516(%rdx){1to16}, %zmm6, %zmm3
+
+// CHECK: vpabsd %zmm14, %zmm15
+// CHECK: encoding: [0x62,0x52,0x7d,0x48,0x1e,0xfe]
+ vpabsd %zmm14, %zmm15
+
+// CHECK: vpabsd %zmm14, %zmm15 {%k6}
+// CHECK: encoding: [0x62,0x52,0x7d,0x4e,0x1e,0xfe]
+ vpabsd %zmm14, %zmm15 {%k6}
+
+// CHECK: vpabsd %zmm14, %zmm15 {%k6} {z}
+// CHECK: encoding: [0x62,0x52,0x7d,0xce,0x1e,0xfe]
+ vpabsd %zmm14, %zmm15 {%k6} {z}
+
+// CHECK: vpabsd (%rcx), %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x1e,0x39]
+ vpabsd (%rcx), %zmm15
+
+// CHECK: vpabsd (%rcx), %zmm15 {%k1}
+// CHECK: encoding: [0x62,0x72,0x7d,0x49,0x1e,0x39]
+ vpabsd (%rcx), %zmm15 {%k1}
+
+// CHECK: vpabsd (%rcx), %zmm15 {%k1} {z}
+// CHECK: encoding: [0x62,0x72,0x7d,0xc9,0x1e,0x39]
+ vpabsd (%rcx), %zmm15 {%k1} {z}
+
+// CHECK: vpabsd 291(%rax,%r14,8), %zmm15
+// CHECK: encoding: [0x62,0x32,0x7d,0x48,0x1e,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpabsd 291(%rax,%r14,8), %zmm15
+
+// CHECK: vpabsd (%rcx){1to16}, %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x58,0x1e,0x39]
+ vpabsd (%rcx){1to16}, %zmm15
+
+// CHECK: vpabsd 8128(%rdx), %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x1e,0x7a,0x7f]
+ vpabsd 8128(%rdx), %zmm15
+
+// CHECK: vpabsd 8192(%rdx), %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x1e,0xba,0x00,0x20,0x00,0x00]
+ vpabsd 8192(%rdx), %zmm15
+
+// CHECK: vpabsd -8192(%rdx), %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x1e,0x7a,0x80]
+ vpabsd -8192(%rdx), %zmm15
+
+// CHECK: vpabsd -8256(%rdx), %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x1e,0xba,0xc0,0xdf,0xff,0xff]
+ vpabsd -8256(%rdx), %zmm15
+
+// CHECK: vpabsd 508(%rdx){1to16}, %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x58,0x1e,0x7a,0x7f]
+ vpabsd 508(%rdx){1to16}, %zmm15
+
+// CHECK: vpabsd 512(%rdx){1to16}, %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x58,0x1e,0xba,0x00,0x02,0x00,0x00]
+ vpabsd 512(%rdx){1to16}, %zmm15
+
+// CHECK: vpabsd -512(%rdx){1to16}, %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x58,0x1e,0x7a,0x80]
+ vpabsd -512(%rdx){1to16}, %zmm15
+
+// CHECK: vpabsd -516(%rdx){1to16}, %zmm15
+// CHECK: encoding: [0x62,0x72,0x7d,0x58,0x1e,0xba,0xfc,0xfd,0xff,0xff]
+ vpabsd -516(%rdx){1to16}, %zmm15
+
+// CHECK: vpabsd (%rcx){1to16}, %zmm15 {%k2}
+// CHECK: encoding: [0x62,0x72,0x7d,0x5a,0x1e,0x39]
+ vpabsd (%rcx){1to16}, %zmm15 {%k2}
+
+// CHECK: vpabsd (%rcx){1to16}, %zmm15 {%k2} {z}
+// CHECK: encoding: [0x62,0x72,0x7d,0xda,0x1e,0x39]
+ vpabsd (%rcx){1to16}, %zmm15 {%k2} {z}
+
+// CHECK: vpabsq %zmm24, %zmm5
+// CHECK: encoding: [0x62,0x92,0xfd,0x48,0x1f,0xe8]
+ vpabsq %zmm24, %zmm5
+
+// CHECK: vpabsq %zmm24, %zmm5 {%k6}
+// CHECK: encoding: [0x62,0x92,0xfd,0x4e,0x1f,0xe8]
+ vpabsq %zmm24, %zmm5 {%k6}
+
+// CHECK: vpabsq %zmm24, %zmm5 {%k6} {z}
+// CHECK: encoding: [0x62,0x92,0xfd,0xce,0x1f,0xe8]
+ vpabsq %zmm24, %zmm5 {%k6} {z}
+
+// CHECK: vpabsq (%rcx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x48,0x1f,0x29]
+ vpabsq (%rcx), %zmm5
+
+// CHECK: vpabsq 291(%rax,%r14,8), %zmm5
+// CHECK: encoding: [0x62,0xb2,0xfd,0x48,0x1f,0xac,0xf0,0x23,0x01,0x00,0x00]
+ vpabsq 291(%rax,%r14,8), %zmm5
+
+// CHECK: vpabsq (%rcx){1to8}, %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x58,0x1f,0x29]
+ vpabsq (%rcx){1to8}, %zmm5
+
+// CHECK: vpabsq 8128(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x48,0x1f,0x6a,0x7f]
+ vpabsq 8128(%rdx), %zmm5
+
+// CHECK: vpabsq 8192(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x48,0x1f,0xaa,0x00,0x20,0x00,0x00]
+ vpabsq 8192(%rdx), %zmm5
+
+// CHECK: vpabsq -8192(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x48,0x1f,0x6a,0x80]
+ vpabsq -8192(%rdx), %zmm5
+
+// CHECK: vpabsq -8256(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x48,0x1f,0xaa,0xc0,0xdf,0xff,0xff]
+ vpabsq -8256(%rdx), %zmm5
+
+// CHECK: vpabsq 1016(%rdx){1to8}, %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x58,0x1f,0x6a,0x7f]
+ vpabsq 1016(%rdx){1to8}, %zmm5
+
+// CHECK: vpabsq 1024(%rdx){1to8}, %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x58,0x1f,0xaa,0x00,0x04,0x00,0x00]
+ vpabsq 1024(%rdx){1to8}, %zmm5
+
+// CHECK: vpabsq -1024(%rdx){1to8}, %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x58,0x1f,0x6a,0x80]
+ vpabsq -1024(%rdx){1to8}, %zmm5
+
+// CHECK: vpabsq -1032(%rdx){1to8}, %zmm5
+// CHECK: encoding: [0x62,0xf2,0xfd,0x58,0x1f,0xaa,0xf8,0xfb,0xff,0xff]
+ vpabsq -1032(%rdx){1to8}, %zmm5
+
+// CHECK: vpaddd %zmm20, %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x21,0x55,0x48,0xfe,0xd4]
+ vpaddd %zmm20, %zmm5, %zmm26
+
+// CHECK: vpaddd %zmm20, %zmm5, %zmm26 {%k1}
+// CHECK: encoding: [0x62,0x21,0x55,0x49,0xfe,0xd4]
+ vpaddd %zmm20, %zmm5, %zmm26 {%k1}
+
+// CHECK: vpaddd %zmm20, %zmm5, %zmm26 {%k1} {z}
+// CHECK: encoding: [0x62,0x21,0x55,0xc9,0xfe,0xd4]
+ vpaddd %zmm20, %zmm5, %zmm26 {%k1} {z}
+
+// CHECK: vpaddd (%rcx), %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x48,0xfe,0x11]
+ vpaddd (%rcx), %zmm5, %zmm26
+
+// CHECK: vpaddd (%rcx), %zmm5, %zmm26 {%k2}
+// CHECK: encoding: [0x62,0x61,0x55,0x4a,0xfe,0x11]
+ vpaddd (%rcx), %zmm5, %zmm26 {%k2}
+
+// CHECK: vpaddd (%rcx), %zmm5, %zmm26 {%k2} {z}
+// CHECK: encoding: [0x62,0x61,0x55,0xca,0xfe,0x11]
+ vpaddd (%rcx), %zmm5, %zmm26 {%k2} {z}
+
+// CHECK: vpaddd 291(%rax,%r14,8), %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x21,0x55,0x48,0xfe,0x94,0xf0,0x23,0x01,0x00,0x00]
+ vpaddd 291(%rax,%r14,8), %zmm5, %zmm26
+
+// CHECK: vpaddd (%rcx){1to16}, %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x58,0xfe,0x11]
+ vpaddd (%rcx){1to16}, %zmm5, %zmm26
+
+// CHECK: vpaddd 8128(%rdx), %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x48,0xfe,0x52,0x7f]
+ vpaddd 8128(%rdx), %zmm5, %zmm26
+
+// CHECK: vpaddd 8192(%rdx), %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x48,0xfe,0x92,0x00,0x20,0x00,0x00]
+ vpaddd 8192(%rdx), %zmm5, %zmm26
+
+// CHECK: vpaddd -8192(%rdx), %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x48,0xfe,0x52,0x80]
+ vpaddd -8192(%rdx), %zmm5, %zmm26
+
+// CHECK: vpaddd -8256(%rdx), %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x48,0xfe,0x92,0xc0,0xdf,0xff,0xff]
+ vpaddd -8256(%rdx), %zmm5, %zmm26
+
+// CHECK: vpaddd (%rcx){1to16}, %zmm5, %zmm26 {%k2}
+// CHECK: encoding: [0x62,0x61,0x55,0x5a,0xfe,0x11]
+ vpaddd (%rcx){1to16}, %zmm5, %zmm26 {%k2}
+
+// CHECK: vpaddd (%rcx){1to16}, %zmm5, %zmm26 {%k2} {z}
+// CHECK: encoding: [0x62,0x61,0x55,0xda,0xfe,0x11]
+ vpaddd (%rcx){1to16}, %zmm5, %zmm26 {%k2} {z}
+
+// CHECK: vpaddd 508(%rdx){1to16}, %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x58,0xfe,0x52,0x7f]
+ vpaddd 508(%rdx){1to16}, %zmm5, %zmm26
+
+// CHECK: vpaddd 512(%rdx){1to16}, %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x58,0xfe,0x92,0x00,0x02,0x00,0x00]
+ vpaddd 512(%rdx){1to16}, %zmm5, %zmm26
+
+// CHECK: vpaddd -512(%rdx){1to16}, %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x58,0xfe,0x52,0x80]
+ vpaddd -512(%rdx){1to16}, %zmm5, %zmm26
+
+// CHECK: vpaddd -516(%rdx){1to16}, %zmm5, %zmm26
+// CHECK: encoding: [0x62,0x61,0x55,0x58,0xfe,0x92,0xfc,0xfd,0xff,0xff]
+ vpaddd -516(%rdx){1to16}, %zmm5, %zmm26
+
+// CHECK: vpaddq %zmm14, %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x51,0xb5,0x40,0xd4,0xc6]
+ vpaddq %zmm14, %zmm25, %zmm8
+
+// CHECK: vpaddq %zmm14, %zmm25, %zmm8 {%k3}
+// CHECK: encoding: [0x62,0x51,0xb5,0x43,0xd4,0xc6]
+ vpaddq %zmm14, %zmm25, %zmm8 {%k3}
+
+// CHECK: vpaddq %zmm14, %zmm25, %zmm8 {%k3} {z}
+// CHECK: encoding: [0x62,0x51,0xb5,0xc3,0xd4,0xc6]
+ vpaddq %zmm14, %zmm25, %zmm8 {%k3} {z}
+
+// CHECK: vpaddq (%rcx), %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x40,0xd4,0x01]
+ vpaddq (%rcx), %zmm25, %zmm8
+
+// CHECK: vpaddq 291(%rax,%r14,8), %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x31,0xb5,0x40,0xd4,0x84,0xf0,0x23,0x01,0x00,0x00]
+ vpaddq 291(%rax,%r14,8), %zmm25, %zmm8
+
+// CHECK: vpaddq (%rcx){1to8}, %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x50,0xd4,0x01]
+ vpaddq (%rcx){1to8}, %zmm25, %zmm8
+
+// CHECK: vpaddq 8128(%rdx), %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x40,0xd4,0x42,0x7f]
+ vpaddq 8128(%rdx), %zmm25, %zmm8
+
+// CHECK: vpaddq 8192(%rdx), %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x40,0xd4,0x82,0x00,0x20,0x00,0x00]
+ vpaddq 8192(%rdx), %zmm25, %zmm8
+
+// CHECK: vpaddq -8192(%rdx), %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x40,0xd4,0x42,0x80]
+ vpaddq -8192(%rdx), %zmm25, %zmm8
+
+// CHECK: vpaddq -8256(%rdx), %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x40,0xd4,0x82,0xc0,0xdf,0xff,0xff]
+ vpaddq -8256(%rdx), %zmm25, %zmm8
+
+// CHECK: vpaddq 1016(%rdx){1to8}, %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x50,0xd4,0x42,0x7f]
+ vpaddq 1016(%rdx){1to8}, %zmm25, %zmm8
+
+// CHECK: vpaddq 1024(%rdx){1to8}, %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x50,0xd4,0x82,0x00,0x04,0x00,0x00]
+ vpaddq 1024(%rdx){1to8}, %zmm25, %zmm8
+
+// CHECK: vpaddq -1024(%rdx){1to8}, %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x50,0xd4,0x42,0x80]
+ vpaddq -1024(%rdx){1to8}, %zmm25, %zmm8
+
+// CHECK: vpaddq -1032(%rdx){1to8}, %zmm25, %zmm8
+// CHECK: encoding: [0x62,0x71,0xb5,0x50,0xd4,0x82,0xf8,0xfb,0xff,0xff]
+ vpaddq -1032(%rdx){1to8}, %zmm25, %zmm8
+
+// CHECK: vpandd %zmm25, %zmm22, %zmm19
+// CHECK: encoding: [0x62,0x81,0x4d,0x40,0xdb,0xd9]
+ vpandd %zmm25, %zmm22, %zmm19
+
+// CHECK: vpandd %zmm25, %zmm22, %zmm19 {%k1}
+// CHECK: encoding: [0x62,0x81,0x4d,0x41,0xdb,0xd9]
+ vpandd %zmm25, %zmm22, %zmm19 {%k1}
+
+// CHECK: vpandd %zmm25, %zmm22, %zmm19 {%k1} {z}
+// CHECK: encoding: [0x62,0x81,0x4d,0xc1,0xdb,0xd9]
+ vpandd %zmm25, %zmm22, %zmm19 {%k1} {z}
+
+// CHECK: vpandd (%rcx), %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x40,0xdb,0x19]
+ vpandd (%rcx), %zmm22, %zmm19
+
+// CHECK: vpandd 291(%rax,%r14,8), %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xa1,0x4d,0x40,0xdb,0x9c,0xf0,0x23,0x01,0x00,0x00]
+ vpandd 291(%rax,%r14,8), %zmm22, %zmm19
+
+// CHECK: vpandd (%rcx){1to16}, %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x50,0xdb,0x19]
+ vpandd (%rcx){1to16}, %zmm22, %zmm19
+
+// CHECK: vpandd 8128(%rdx), %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x40,0xdb,0x5a,0x7f]
+ vpandd 8128(%rdx), %zmm22, %zmm19
+
+// CHECK: vpandd 8192(%rdx), %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x40,0xdb,0x9a,0x00,0x20,0x00,0x00]
+ vpandd 8192(%rdx), %zmm22, %zmm19
+
+// CHECK: vpandd -8192(%rdx), %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x40,0xdb,0x5a,0x80]
+ vpandd -8192(%rdx), %zmm22, %zmm19
+
+// CHECK: vpandd -8256(%rdx), %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x40,0xdb,0x9a,0xc0,0xdf,0xff,0xff]
+ vpandd -8256(%rdx), %zmm22, %zmm19
+
+// CHECK: vpandd 508(%rdx){1to16}, %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x50,0xdb,0x5a,0x7f]
+ vpandd 508(%rdx){1to16}, %zmm22, %zmm19
+
+// CHECK: vpandd 512(%rdx){1to16}, %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x50,0xdb,0x9a,0x00,0x02,0x00,0x00]
+ vpandd 512(%rdx){1to16}, %zmm22, %zmm19
+
+// CHECK: vpandd -512(%rdx){1to16}, %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x50,0xdb,0x5a,0x80]
+ vpandd -512(%rdx){1to16}, %zmm22, %zmm19
+
+// CHECK: vpandd -516(%rdx){1to16}, %zmm22, %zmm19
+// CHECK: encoding: [0x62,0xe1,0x4d,0x50,0xdb,0x9a,0xfc,0xfd,0xff,0xff]
+ vpandd -516(%rdx){1to16}, %zmm22, %zmm19
+
+// CHECK: vpandnd %zmm15, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x41,0x1d,0x40,0xdf,0xf7]
+ vpandnd %zmm15, %zmm28, %zmm30
+
+// CHECK: vpandnd %zmm15, %zmm28, %zmm30 {%k3}
+// CHECK: encoding: [0x62,0x41,0x1d,0x43,0xdf,0xf7]
+ vpandnd %zmm15, %zmm28, %zmm30 {%k3}
+
+// CHECK: vpandnd %zmm15, %zmm28, %zmm30 {%k3} {z}
+// CHECK: encoding: [0x62,0x41,0x1d,0xc3,0xdf,0xf7]
+ vpandnd %zmm15, %zmm28, %zmm30 {%k3} {z}
+
+// CHECK: vpandnd (%rcx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x40,0xdf,0x31]
+ vpandnd (%rcx), %zmm28, %zmm30
+
+// CHECK: vpandnd 291(%rax,%r14,8), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x21,0x1d,0x40,0xdf,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpandnd 291(%rax,%r14,8), %zmm28, %zmm30
+
+// CHECK: vpandnd (%rcx){1to16}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x50,0xdf,0x31]
+ vpandnd (%rcx){1to16}, %zmm28, %zmm30
+
+// CHECK: vpandnd 8128(%rdx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x40,0xdf,0x72,0x7f]
+ vpandnd 8128(%rdx), %zmm28, %zmm30
+
+// CHECK: vpandnd 8192(%rdx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x40,0xdf,0xb2,0x00,0x20,0x00,0x00]
+ vpandnd 8192(%rdx), %zmm28, %zmm30
+
+// CHECK: vpandnd -8192(%rdx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x40,0xdf,0x72,0x80]
+ vpandnd -8192(%rdx), %zmm28, %zmm30
+
+// CHECK: vpandnd -8256(%rdx), %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x40,0xdf,0xb2,0xc0,0xdf,0xff,0xff]
+ vpandnd -8256(%rdx), %zmm28, %zmm30
+
+// CHECK: vpandnd 508(%rdx){1to16}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x50,0xdf,0x72,0x7f]
+ vpandnd 508(%rdx){1to16}, %zmm28, %zmm30
+
+// CHECK: vpandnd 512(%rdx){1to16}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x50,0xdf,0xb2,0x00,0x02,0x00,0x00]
+ vpandnd 512(%rdx){1to16}, %zmm28, %zmm30
+
+// CHECK: vpandnd -512(%rdx){1to16}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x50,0xdf,0x72,0x80]
+ vpandnd -512(%rdx){1to16}, %zmm28, %zmm30
+
+// CHECK: vpandnd -516(%rdx){1to16}, %zmm28, %zmm30
+// CHECK: encoding: [0x62,0x61,0x1d,0x50,0xdf,0xb2,0xfc,0xfd,0xff,0xff]
+ vpandnd -516(%rdx){1to16}, %zmm28, %zmm30
+
+// CHECK: vpandnq %zmm19, %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xa1,0xc5,0x48,0xdf,0xe3]
+ vpandnq %zmm19, %zmm7, %zmm20
+
+// CHECK: vpandnq %zmm19, %zmm7, %zmm20 {%k5}
+// CHECK: encoding: [0x62,0xa1,0xc5,0x4d,0xdf,0xe3]
+ vpandnq %zmm19, %zmm7, %zmm20 {%k5}
+
+// CHECK: vpandnq %zmm19, %zmm7, %zmm20 {%k5} {z}
+// CHECK: encoding: [0x62,0xa1,0xc5,0xcd,0xdf,0xe3]
+ vpandnq %zmm19, %zmm7, %zmm20 {%k5} {z}
+
+// CHECK: vpandnq (%rcx), %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x48,0xdf,0x21]
+ vpandnq (%rcx), %zmm7, %zmm20
+
+// CHECK: vpandnq 291(%rax,%r14,8), %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xa1,0xc5,0x48,0xdf,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vpandnq 291(%rax,%r14,8), %zmm7, %zmm20
+
+// CHECK: vpandnq (%rcx){1to8}, %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x58,0xdf,0x21]
+ vpandnq (%rcx){1to8}, %zmm7, %zmm20
+
+// CHECK: vpandnq 8128(%rdx), %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x48,0xdf,0x62,0x7f]
+ vpandnq 8128(%rdx), %zmm7, %zmm20
+
+// CHECK: vpandnq 8192(%rdx), %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x48,0xdf,0xa2,0x00,0x20,0x00,0x00]
+ vpandnq 8192(%rdx), %zmm7, %zmm20
+
+// CHECK: vpandnq -8192(%rdx), %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x48,0xdf,0x62,0x80]
+ vpandnq -8192(%rdx), %zmm7, %zmm20
+
+// CHECK: vpandnq -8256(%rdx), %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x48,0xdf,0xa2,0xc0,0xdf,0xff,0xff]
+ vpandnq -8256(%rdx), %zmm7, %zmm20
+
+// CHECK: vpandnq 1016(%rdx){1to8}, %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x58,0xdf,0x62,0x7f]
+ vpandnq 1016(%rdx){1to8}, %zmm7, %zmm20
+
+// CHECK: vpandnq 1024(%rdx){1to8}, %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x58,0xdf,0xa2,0x00,0x04,0x00,0x00]
+ vpandnq 1024(%rdx){1to8}, %zmm7, %zmm20
+
+// CHECK: vpandnq -1024(%rdx){1to8}, %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x58,0xdf,0x62,0x80]
+ vpandnq -1024(%rdx){1to8}, %zmm7, %zmm20
+
+// CHECK: vpandnq -1032(%rdx){1to8}, %zmm7, %zmm20
+// CHECK: encoding: [0x62,0xe1,0xc5,0x58,0xdf,0xa2,0xf8,0xfb,0xff,0xff]
+ vpandnq -1032(%rdx){1to8}, %zmm7, %zmm20
+
+// CHECK: vpandq %zmm26, %zmm4, %zmm17
+// CHECK: encoding: [0x62,0x81,0xdd,0x48,0xdb,0xca]
+ vpandq %zmm26, %zmm4, %zmm17
+
+// CHECK: vpandq %zmm26, %zmm4, %zmm17 {%k7}
+// CHECK: encoding: [0x62,0x81,0xdd,0x4f,0xdb,0xca]
+ vpandq %zmm26, %zmm4, %zmm17 {%k7}
+
+// CHECK: vpandq %zmm26, %zmm4, %zmm17 {%k7} {z}
+// CHECK: encoding: [0x62,0x81,0xdd,0xcf,0xdb,0xca]
+ vpandq %zmm26, %zmm4, %zmm17 {%k7} {z}
+
+// CHECK: vpandq (%rcx), %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x48,0xdb,0x09]
+ vpandq (%rcx), %zmm4, %zmm17
+
+// CHECK: vpandq 291(%rax,%r14,8), %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xa1,0xdd,0x48,0xdb,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vpandq 291(%rax,%r14,8), %zmm4, %zmm17
+
+// CHECK: vpandq (%rcx){1to8}, %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x58,0xdb,0x09]
+ vpandq (%rcx){1to8}, %zmm4, %zmm17
+
+// CHECK: vpandq 8128(%rdx), %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x48,0xdb,0x4a,0x7f]
+ vpandq 8128(%rdx), %zmm4, %zmm17
+
+// CHECK: vpandq 8192(%rdx), %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x48,0xdb,0x8a,0x00,0x20,0x00,0x00]
+ vpandq 8192(%rdx), %zmm4, %zmm17
+
+// CHECK: vpandq -8192(%rdx), %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x48,0xdb,0x4a,0x80]
+ vpandq -8192(%rdx), %zmm4, %zmm17
+
+// CHECK: vpandq -8256(%rdx), %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x48,0xdb,0x8a,0xc0,0xdf,0xff,0xff]
+ vpandq -8256(%rdx), %zmm4, %zmm17
+
+// CHECK: vpandq 1016(%rdx){1to8}, %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x58,0xdb,0x4a,0x7f]
+ vpandq 1016(%rdx){1to8}, %zmm4, %zmm17
+
+// CHECK: vpandq 1024(%rdx){1to8}, %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x58,0xdb,0x8a,0x00,0x04,0x00,0x00]
+ vpandq 1024(%rdx){1to8}, %zmm4, %zmm17
+
+// CHECK: vpandq -1024(%rdx){1to8}, %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x58,0xdb,0x4a,0x80]
+ vpandq -1024(%rdx){1to8}, %zmm4, %zmm17
+
+// CHECK: vpandq -1032(%rdx){1to8}, %zmm4, %zmm17
+// CHECK: encoding: [0x62,0xe1,0xdd,0x58,0xdb,0x8a,0xf8,0xfb,0xff,0xff]
+ vpandq -1032(%rdx){1to8}, %zmm4, %zmm17
+
+// CHECK: vpmaxsd %zmm16, %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xb2,0x3d,0x48,0x3d,0xf0]
+ vpmaxsd %zmm16, %zmm8, %zmm6
+
+// CHECK: vpmaxsd %zmm16, %zmm8, %zmm6 {%k3}
+// CHECK: encoding: [0x62,0xb2,0x3d,0x4b,0x3d,0xf0]
+ vpmaxsd %zmm16, %zmm8, %zmm6 {%k3}
+
+// CHECK: vpmaxsd %zmm16, %zmm8, %zmm6 {%k3} {z}
+// CHECK: encoding: [0x62,0xb2,0x3d,0xcb,0x3d,0xf0]
+ vpmaxsd %zmm16, %zmm8, %zmm6 {%k3} {z}
+
+// CHECK: vpmaxsd (%rcx), %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x48,0x3d,0x31]
+ vpmaxsd (%rcx), %zmm8, %zmm6
+
+// CHECK: vpmaxsd 291(%rax,%r14,8), %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xb2,0x3d,0x48,0x3d,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpmaxsd 291(%rax,%r14,8), %zmm8, %zmm6
+
+// CHECK: vpmaxsd (%rcx){1to16}, %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x58,0x3d,0x31]
+ vpmaxsd (%rcx){1to16}, %zmm8, %zmm6
+
+// CHECK: vpmaxsd 8128(%rdx), %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x48,0x3d,0x72,0x7f]
+ vpmaxsd 8128(%rdx), %zmm8, %zmm6
+
+// CHECK: vpmaxsd 8192(%rdx), %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x48,0x3d,0xb2,0x00,0x20,0x00,0x00]
+ vpmaxsd 8192(%rdx), %zmm8, %zmm6
+
+// CHECK: vpmaxsd -8192(%rdx), %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x48,0x3d,0x72,0x80]
+ vpmaxsd -8192(%rdx), %zmm8, %zmm6
+
+// CHECK: vpmaxsd -8256(%rdx), %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x48,0x3d,0xb2,0xc0,0xdf,0xff,0xff]
+ vpmaxsd -8256(%rdx), %zmm8, %zmm6
+
+// CHECK: vpmaxsd 508(%rdx){1to16}, %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x58,0x3d,0x72,0x7f]
+ vpmaxsd 508(%rdx){1to16}, %zmm8, %zmm6
+
+// CHECK: vpmaxsd 512(%rdx){1to16}, %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x58,0x3d,0xb2,0x00,0x02,0x00,0x00]
+ vpmaxsd 512(%rdx){1to16}, %zmm8, %zmm6
+
+// CHECK: vpmaxsd -512(%rdx){1to16}, %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x58,0x3d,0x72,0x80]
+ vpmaxsd -512(%rdx){1to16}, %zmm8, %zmm6
+
+// CHECK: vpmaxsd -516(%rdx){1to16}, %zmm8, %zmm6
+// CHECK: encoding: [0x62,0xf2,0x3d,0x58,0x3d,0xb2,0xfc,0xfd,0xff,0xff]
+ vpmaxsd -516(%rdx){1to16}, %zmm8, %zmm6
+
+// CHECK: vpmaxsq %zmm1, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x48,0x3d,0xf1]
+ vpmaxsq %zmm1, %zmm6, %zmm6
+
+// CHECK: vpmaxsq %zmm1, %zmm6, %zmm6 {%k7}
+// CHECK: encoding: [0x62,0xf2,0xcd,0x4f,0x3d,0xf1]
+ vpmaxsq %zmm1, %zmm6, %zmm6 {%k7}
+
+// CHECK: vpmaxsq %zmm1, %zmm6, %zmm6 {%k7} {z}
+// CHECK: encoding: [0x62,0xf2,0xcd,0xcf,0x3d,0xf1]
+ vpmaxsq %zmm1, %zmm6, %zmm6 {%k7} {z}
+
+// CHECK: vpmaxsq (%rcx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x48,0x3d,0x31]
+ vpmaxsq (%rcx), %zmm6, %zmm6
+
+// CHECK: vpmaxsq 291(%rax,%r14,8), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xb2,0xcd,0x48,0x3d,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpmaxsq 291(%rax,%r14,8), %zmm6, %zmm6
+
+// CHECK: vpmaxsq (%rcx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x58,0x3d,0x31]
+ vpmaxsq (%rcx){1to8}, %zmm6, %zmm6
+
+// CHECK: vpmaxsq 8128(%rdx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x48,0x3d,0x72,0x7f]
+ vpmaxsq 8128(%rdx), %zmm6, %zmm6
+
+// CHECK: vpmaxsq 8192(%rdx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x48,0x3d,0xb2,0x00,0x20,0x00,0x00]
+ vpmaxsq 8192(%rdx), %zmm6, %zmm6
+
+// CHECK: vpmaxsq -8192(%rdx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x48,0x3d,0x72,0x80]
+ vpmaxsq -8192(%rdx), %zmm6, %zmm6
+
+// CHECK: vpmaxsq -8256(%rdx), %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x48,0x3d,0xb2,0xc0,0xdf,0xff,0xff]
+ vpmaxsq -8256(%rdx), %zmm6, %zmm6
+
+// CHECK: vpmaxsq 1016(%rdx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x58,0x3d,0x72,0x7f]
+ vpmaxsq 1016(%rdx){1to8}, %zmm6, %zmm6
+
+// CHECK: vpmaxsq 1024(%rdx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x58,0x3d,0xb2,0x00,0x04,0x00,0x00]
+ vpmaxsq 1024(%rdx){1to8}, %zmm6, %zmm6
+
+// CHECK: vpmaxsq -1024(%rdx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x58,0x3d,0x72,0x80]
+ vpmaxsq -1024(%rdx){1to8}, %zmm6, %zmm6
+
+// CHECK: vpmaxsq -1032(%rdx){1to8}, %zmm6, %zmm6
+// CHECK: encoding: [0x62,0xf2,0xcd,0x58,0x3d,0xb2,0xf8,0xfb,0xff,0xff]
+ vpmaxsq -1032(%rdx){1to8}, %zmm6, %zmm6
+
+// CHECK: vpmaxud %zmm25, %zmm7, %zmm17
+// CHECK: encoding: [0x62,0x82,0x45,0x48,0x3f,0xc9]
+ vpmaxud %zmm25, %zmm7, %zmm17
+
+// CHECK: vpmaxud %zmm25, %zmm7, %zmm17 {%k5}
+// CHECK: encoding: [0x62,0x82,0x45,0x4d,0x3f,0xc9]
+ vpmaxud %zmm25, %zmm7, %zmm17 {%k5}
+
+// CHECK: vpmaxud %zmm25, %zmm7, %zmm17 {%k5} {z}
+// CHECK: encoding: [0x62,0x82,0x45,0xcd,0x3f,0xc9]
+ vpmaxud %zmm25, %zmm7, %zmm17 {%k5} {z}
+
+// CHECK: vpmaxud (%rcx), %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x48,0x3f,0x09]
+ vpmaxud (%rcx), %zmm7, %zmm17
+
+// CHECK: vpmaxud 291(%rax,%r14,8), %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xa2,0x45,0x48,0x3f,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vpmaxud 291(%rax,%r14,8), %zmm7, %zmm17
+
+// CHECK: vpmaxud (%rcx){1to16}, %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x58,0x3f,0x09]
+ vpmaxud (%rcx){1to16}, %zmm7, %zmm17
+
+// CHECK: vpmaxud 8128(%rdx), %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x48,0x3f,0x4a,0x7f]
+ vpmaxud 8128(%rdx), %zmm7, %zmm17
+
+// CHECK: vpmaxud 8192(%rdx), %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x48,0x3f,0x8a,0x00,0x20,0x00,0x00]
+ vpmaxud 8192(%rdx), %zmm7, %zmm17
+
+// CHECK: vpmaxud -8192(%rdx), %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x48,0x3f,0x4a,0x80]
+ vpmaxud -8192(%rdx), %zmm7, %zmm17
+
+// CHECK: vpmaxud -8256(%rdx), %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x48,0x3f,0x8a,0xc0,0xdf,0xff,0xff]
+ vpmaxud -8256(%rdx), %zmm7, %zmm17
+
+// CHECK: vpmaxud 508(%rdx){1to16}, %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x58,0x3f,0x4a,0x7f]
+ vpmaxud 508(%rdx){1to16}, %zmm7, %zmm17
+
+// CHECK: vpmaxud 512(%rdx){1to16}, %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x58,0x3f,0x8a,0x00,0x02,0x00,0x00]
+ vpmaxud 512(%rdx){1to16}, %zmm7, %zmm17
+
+// CHECK: vpmaxud -512(%rdx){1to16}, %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x58,0x3f,0x4a,0x80]
+ vpmaxud -512(%rdx){1to16}, %zmm7, %zmm17
+
+// CHECK: vpmaxud -516(%rdx){1to16}, %zmm7, %zmm17
+// CHECK: encoding: [0x62,0xe2,0x45,0x58,0x3f,0x8a,0xfc,0xfd,0xff,0xff]
+ vpmaxud -516(%rdx){1to16}, %zmm7, %zmm17
+
+// CHECK: vpmaxuq %zmm25, %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x02,0xcd,0x40,0x3f,0xf1]
+ vpmaxuq %zmm25, %zmm22, %zmm30
+
+// CHECK: vpmaxuq %zmm25, %zmm22, %zmm30 {%k1}
+// CHECK: encoding: [0x62,0x02,0xcd,0x41,0x3f,0xf1]
+ vpmaxuq %zmm25, %zmm22, %zmm30 {%k1}
+
+// CHECK: vpmaxuq %zmm25, %zmm22, %zmm30 {%k1} {z}
+// CHECK: encoding: [0x62,0x02,0xcd,0xc1,0x3f,0xf1]
+ vpmaxuq %zmm25, %zmm22, %zmm30 {%k1} {z}
+
+// CHECK: vpmaxuq (%rcx), %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x40,0x3f,0x31]
+ vpmaxuq (%rcx), %zmm22, %zmm30
+
+// CHECK: vpmaxuq 291(%rax,%r14,8), %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x22,0xcd,0x40,0x3f,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpmaxuq 291(%rax,%r14,8), %zmm22, %zmm30
+
+// CHECK: vpmaxuq (%rcx){1to8}, %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x50,0x3f,0x31]
+ vpmaxuq (%rcx){1to8}, %zmm22, %zmm30
+
+// CHECK: vpmaxuq 8128(%rdx), %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x40,0x3f,0x72,0x7f]
+ vpmaxuq 8128(%rdx), %zmm22, %zmm30
+
+// CHECK: vpmaxuq 8192(%rdx), %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x40,0x3f,0xb2,0x00,0x20,0x00,0x00]
+ vpmaxuq 8192(%rdx), %zmm22, %zmm30
+
+// CHECK: vpmaxuq -8192(%rdx), %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x40,0x3f,0x72,0x80]
+ vpmaxuq -8192(%rdx), %zmm22, %zmm30
+
+// CHECK: vpmaxuq -8256(%rdx), %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x40,0x3f,0xb2,0xc0,0xdf,0xff,0xff]
+ vpmaxuq -8256(%rdx), %zmm22, %zmm30
+
+// CHECK: vpmaxuq 1016(%rdx){1to8}, %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x50,0x3f,0x72,0x7f]
+ vpmaxuq 1016(%rdx){1to8}, %zmm22, %zmm30
+
+// CHECK: vpmaxuq 1024(%rdx){1to8}, %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x50,0x3f,0xb2,0x00,0x04,0x00,0x00]
+ vpmaxuq 1024(%rdx){1to8}, %zmm22, %zmm30
+
+// CHECK: vpmaxuq -1024(%rdx){1to8}, %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x50,0x3f,0x72,0x80]
+ vpmaxuq -1024(%rdx){1to8}, %zmm22, %zmm30
+
+// CHECK: vpmaxuq -1032(%rdx){1to8}, %zmm22, %zmm30
+// CHECK: encoding: [0x62,0x62,0xcd,0x50,0x3f,0xb2,0xf8,0xfb,0xff,0xff]
+ vpmaxuq -1032(%rdx){1to8}, %zmm22, %zmm30
+
+// CHECK: vpminsd %zmm24, %zmm16, %zmm2
+// CHECK: encoding: [0x62,0x92,0x7d,0x40,0x39,0xd0]
+ vpminsd %zmm24, %zmm16, %zmm2
+
+// CHECK: vpminsd %zmm24, %zmm16, %zmm2 {%k3}
+// CHECK: encoding: [0x62,0x92,0x7d,0x43,0x39,0xd0]
+ vpminsd %zmm24, %zmm16, %zmm2 {%k3}
+
+// CHECK: vpminsd %zmm24, %zmm16, %zmm2 {%k3} {z}
+// CHECK: encoding: [0x62,0x92,0x7d,0xc3,0x39,0xd0]
+ vpminsd %zmm24, %zmm16, %zmm2 {%k3} {z}
+
+// CHECK: vpminsd (%rcx), %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x40,0x39,0x11]
+ vpminsd (%rcx), %zmm16, %zmm2
+
+// CHECK: vpminsd 291(%rax,%r14,8), %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xb2,0x7d,0x40,0x39,0x94,0xf0,0x23,0x01,0x00,0x00]
+ vpminsd 291(%rax,%r14,8), %zmm16, %zmm2
+
+// CHECK: vpminsd (%rcx){1to16}, %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x50,0x39,0x11]
+ vpminsd (%rcx){1to16}, %zmm16, %zmm2
+
+// CHECK: vpminsd 8128(%rdx), %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x40,0x39,0x52,0x7f]
+ vpminsd 8128(%rdx), %zmm16, %zmm2
+
+// CHECK: vpminsd 8192(%rdx), %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x40,0x39,0x92,0x00,0x20,0x00,0x00]
+ vpminsd 8192(%rdx), %zmm16, %zmm2
+
+// CHECK: vpminsd -8192(%rdx), %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x40,0x39,0x52,0x80]
+ vpminsd -8192(%rdx), %zmm16, %zmm2
+
+// CHECK: vpminsd -8256(%rdx), %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x40,0x39,0x92,0xc0,0xdf,0xff,0xff]
+ vpminsd -8256(%rdx), %zmm16, %zmm2
+
+// CHECK: vpminsd 508(%rdx){1to16}, %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x50,0x39,0x52,0x7f]
+ vpminsd 508(%rdx){1to16}, %zmm16, %zmm2
+
+// CHECK: vpminsd 512(%rdx){1to16}, %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x50,0x39,0x92,0x00,0x02,0x00,0x00]
+ vpminsd 512(%rdx){1to16}, %zmm16, %zmm2
+
+// CHECK: vpminsd -512(%rdx){1to16}, %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x50,0x39,0x52,0x80]
+ vpminsd -512(%rdx){1to16}, %zmm16, %zmm2
+
+// CHECK: vpminsd -516(%rdx){1to16}, %zmm16, %zmm2
+// CHECK: encoding: [0x62,0xf2,0x7d,0x50,0x39,0x92,0xfc,0xfd,0xff,0xff]
+ vpminsd -516(%rdx){1to16}, %zmm16, %zmm2
+
+// CHECK: vpminsq %zmm17, %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xa2,0xfd,0x40,0x39,0xe1]
+ vpminsq %zmm17, %zmm16, %zmm20
+
+// CHECK: vpminsq %zmm17, %zmm16, %zmm20 {%k6}
+// CHECK: encoding: [0x62,0xa2,0xfd,0x46,0x39,0xe1]
+ vpminsq %zmm17, %zmm16, %zmm20 {%k6}
+
+// CHECK: vpminsq %zmm17, %zmm16, %zmm20 {%k6} {z}
+// CHECK: encoding: [0x62,0xa2,0xfd,0xc6,0x39,0xe1]
+ vpminsq %zmm17, %zmm16, %zmm20 {%k6} {z}
+
+// CHECK: vpminsq (%rcx), %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x40,0x39,0x21]
+ vpminsq (%rcx), %zmm16, %zmm20
+
+// CHECK: vpminsq 291(%rax,%r14,8), %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xa2,0xfd,0x40,0x39,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vpminsq 291(%rax,%r14,8), %zmm16, %zmm20
+
+// CHECK: vpminsq (%rcx){1to8}, %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x50,0x39,0x21]
+ vpminsq (%rcx){1to8}, %zmm16, %zmm20
+
+// CHECK: vpminsq 8128(%rdx), %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x40,0x39,0x62,0x7f]
+ vpminsq 8128(%rdx), %zmm16, %zmm20
+
+// CHECK: vpminsq 8192(%rdx), %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x40,0x39,0xa2,0x00,0x20,0x00,0x00]
+ vpminsq 8192(%rdx), %zmm16, %zmm20
+
+// CHECK: vpminsq -8192(%rdx), %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x40,0x39,0x62,0x80]
+ vpminsq -8192(%rdx), %zmm16, %zmm20
+
+// CHECK: vpminsq -8256(%rdx), %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x40,0x39,0xa2,0xc0,0xdf,0xff,0xff]
+ vpminsq -8256(%rdx), %zmm16, %zmm20
+
+// CHECK: vpminsq 1016(%rdx){1to8}, %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x50,0x39,0x62,0x7f]
+ vpminsq 1016(%rdx){1to8}, %zmm16, %zmm20
+
+// CHECK: vpminsq 1024(%rdx){1to8}, %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x50,0x39,0xa2,0x00,0x04,0x00,0x00]
+ vpminsq 1024(%rdx){1to8}, %zmm16, %zmm20
+
+// CHECK: vpminsq -1024(%rdx){1to8}, %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x50,0x39,0x62,0x80]
+ vpminsq -1024(%rdx){1to8}, %zmm16, %zmm20
+
+// CHECK: vpminsq -1032(%rdx){1to8}, %zmm16, %zmm20
+// CHECK: encoding: [0x62,0xe2,0xfd,0x50,0x39,0xa2,0xf8,0xfb,0xff,0xff]
+ vpminsq -1032(%rdx){1to8}, %zmm16, %zmm20
+
+// CHECK: vpminud %zmm20, %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xb2,0x45,0x40,0x3b,0xdc]
+ vpminud %zmm20, %zmm23, %zmm3
+
+// CHECK: vpminud %zmm20, %zmm23, %zmm3 {%k3}
+// CHECK: encoding: [0x62,0xb2,0x45,0x43,0x3b,0xdc]
+ vpminud %zmm20, %zmm23, %zmm3 {%k3}
+
+// CHECK: vpminud %zmm20, %zmm23, %zmm3 {%k3} {z}
+// CHECK: encoding: [0x62,0xb2,0x45,0xc3,0x3b,0xdc]
+ vpminud %zmm20, %zmm23, %zmm3 {%k3} {z}
+
+// CHECK: vpminud (%rcx), %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x40,0x3b,0x19]
+ vpminud (%rcx), %zmm23, %zmm3
+
+// CHECK: vpminud 291(%rax,%r14,8), %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xb2,0x45,0x40,0x3b,0x9c,0xf0,0x23,0x01,0x00,0x00]
+ vpminud 291(%rax,%r14,8), %zmm23, %zmm3
+
+// CHECK: vpminud (%rcx){1to16}, %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x50,0x3b,0x19]
+ vpminud (%rcx){1to16}, %zmm23, %zmm3
+
+// CHECK: vpminud 8128(%rdx), %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x40,0x3b,0x5a,0x7f]
+ vpminud 8128(%rdx), %zmm23, %zmm3
+
+// CHECK: vpminud 8192(%rdx), %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x40,0x3b,0x9a,0x00,0x20,0x00,0x00]
+ vpminud 8192(%rdx), %zmm23, %zmm3
+
+// CHECK: vpminud -8192(%rdx), %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x40,0x3b,0x5a,0x80]
+ vpminud -8192(%rdx), %zmm23, %zmm3
+
+// CHECK: vpminud -8256(%rdx), %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x40,0x3b,0x9a,0xc0,0xdf,0xff,0xff]
+ vpminud -8256(%rdx), %zmm23, %zmm3
+
+// CHECK: vpminud 508(%rdx){1to16}, %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x50,0x3b,0x5a,0x7f]
+ vpminud 508(%rdx){1to16}, %zmm23, %zmm3
+
+// CHECK: vpminud 512(%rdx){1to16}, %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x50,0x3b,0x9a,0x00,0x02,0x00,0x00]
+ vpminud 512(%rdx){1to16}, %zmm23, %zmm3
+
+// CHECK: vpminud -512(%rdx){1to16}, %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x50,0x3b,0x5a,0x80]
+ vpminud -512(%rdx){1to16}, %zmm23, %zmm3
+
+// CHECK: vpminud -516(%rdx){1to16}, %zmm23, %zmm3
+// CHECK: encoding: [0x62,0xf2,0x45,0x50,0x3b,0x9a,0xfc,0xfd,0xff,0xff]
+ vpminud -516(%rdx){1to16}, %zmm23, %zmm3
+
+// CHECK: vpminuq %zmm7, %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x40,0x3b,0xdf]
+ vpminuq %zmm7, %zmm26, %zmm11
+
+// CHECK: vpminuq %zmm7, %zmm26, %zmm11 {%k5}
+// CHECK: encoding: [0x62,0x72,0xad,0x45,0x3b,0xdf]
+ vpminuq %zmm7, %zmm26, %zmm11 {%k5}
+
+// CHECK: vpminuq %zmm7, %zmm26, %zmm11 {%k5} {z}
+// CHECK: encoding: [0x62,0x72,0xad,0xc5,0x3b,0xdf]
+ vpminuq %zmm7, %zmm26, %zmm11 {%k5} {z}
+
+// CHECK: vpminuq (%rcx), %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x40,0x3b,0x19]
+ vpminuq (%rcx), %zmm26, %zmm11
+
+// CHECK: vpminuq 291(%rax,%r14,8), %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x32,0xad,0x40,0x3b,0x9c,0xf0,0x23,0x01,0x00,0x00]
+ vpminuq 291(%rax,%r14,8), %zmm26, %zmm11
+
+// CHECK: vpminuq (%rcx){1to8}, %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x50,0x3b,0x19]
+ vpminuq (%rcx){1to8}, %zmm26, %zmm11
+
+// CHECK: vpminuq 8128(%rdx), %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x40,0x3b,0x5a,0x7f]
+ vpminuq 8128(%rdx), %zmm26, %zmm11
+
+// CHECK: vpminuq 8192(%rdx), %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x40,0x3b,0x9a,0x00,0x20,0x00,0x00]
+ vpminuq 8192(%rdx), %zmm26, %zmm11
+
+// CHECK: vpminuq -8192(%rdx), %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x40,0x3b,0x5a,0x80]
+ vpminuq -8192(%rdx), %zmm26, %zmm11
+
+// CHECK: vpminuq -8256(%rdx), %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x40,0x3b,0x9a,0xc0,0xdf,0xff,0xff]
+ vpminuq -8256(%rdx), %zmm26, %zmm11
+
+// CHECK: vpminuq 1016(%rdx){1to8}, %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x50,0x3b,0x5a,0x7f]
+ vpminuq 1016(%rdx){1to8}, %zmm26, %zmm11
+
+// CHECK: vpminuq 1024(%rdx){1to8}, %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x50,0x3b,0x9a,0x00,0x04,0x00,0x00]
+ vpminuq 1024(%rdx){1to8}, %zmm26, %zmm11
+
+// CHECK: vpminuq -1024(%rdx){1to8}, %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x50,0x3b,0x5a,0x80]
+ vpminuq -1024(%rdx){1to8}, %zmm26, %zmm11
+
+// CHECK: vpminuq -1032(%rdx){1to8}, %zmm26, %zmm11
+// CHECK: encoding: [0x62,0x72,0xad,0x50,0x3b,0x9a,0xf8,0xfb,0xff,0xff]
+ vpminuq -1032(%rdx){1to8}, %zmm26, %zmm11
+
+// CHECK: vpmovsxbd %xmm7, %zmm27
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x21,0xdf]
+ vpmovsxbd %xmm7, %zmm27
+
+// CHECK: vpmovsxbd %xmm7, %zmm27 {%k5}
+// CHECK: encoding: [0x62,0x62,0x7d,0x4d,0x21,0xdf]
+ vpmovsxbd %xmm7, %zmm27 {%k5}
+
+// CHECK: vpmovsxbd %xmm7, %zmm27 {%k5} {z}
+// CHECK: encoding: [0x62,0x62,0x7d,0xcd,0x21,0xdf]
+ vpmovsxbd %xmm7, %zmm27 {%k5} {z}
+
+// CHECK: vpmovsxbd (%rcx), %zmm27
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x21,0x19]
+ vpmovsxbd (%rcx), %zmm27
+
+// CHECK: vpmovsxbd 291(%rax,%r14,8), %zmm27
+// CHECK: encoding: [0x62,0x22,0x7d,0x48,0x21,0x9c,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsxbd 291(%rax,%r14,8), %zmm27
+
+// CHECK: vpmovsxbd 2032(%rdx), %zmm27
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x21,0x5a,0x7f]
+ vpmovsxbd 2032(%rdx), %zmm27
+
+// CHECK: vpmovsxbd 2048(%rdx), %zmm27
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x21,0x9a,0x00,0x08,0x00,0x00]
+ vpmovsxbd 2048(%rdx), %zmm27
+
+// CHECK: vpmovsxbd -2048(%rdx), %zmm27
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x21,0x5a,0x80]
+ vpmovsxbd -2048(%rdx), %zmm27
+
+// CHECK: vpmovsxbd -2064(%rdx), %zmm27
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x21,0x9a,0xf0,0xf7,0xff,0xff]
+ vpmovsxbd -2064(%rdx), %zmm27
+
+// CHECK: vpmovsxbd (%rcx), %zmm27 {%k1}
+// CHECK: encoding: [0x62,0x62,0x7d,0x49,0x21,0x19]
+ vpmovsxbd (%rcx), %zmm27 {%k1}
+
+// CHECK: vpmovsxbd (%rcx), %zmm27 {%k2} {z}
+// CHECK: encoding: [0x62,0x62,0x7d,0xca,0x21,0x19]
+ vpmovsxbd (%rcx), %zmm27 {%k2} {z}
+
+// CHECK: vpmovsxbq %xmm11, %zmm11
+// CHECK: encoding: [0x62,0x52,0x7d,0x48,0x22,0xdb]
+ vpmovsxbq %xmm11, %zmm11
+
+// CHECK: vpmovsxbq %xmm11, %zmm11 {%k5}
+// CHECK: encoding: [0x62,0x52,0x7d,0x4d,0x22,0xdb]
+ vpmovsxbq %xmm11, %zmm11 {%k5}
+
+// CHECK: vpmovsxbq %xmm11, %zmm11 {%k5} {z}
+// CHECK: encoding: [0x62,0x52,0x7d,0xcd,0x22,0xdb]
+ vpmovsxbq %xmm11, %zmm11 {%k5} {z}
+
+// CHECK: vpmovsxbq (%rcx), %zmm11
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x22,0x19]
+ vpmovsxbq (%rcx), %zmm11
+
+// CHECK: vpmovsxbq 291(%rax,%r14,8), %zmm11
+// CHECK: encoding: [0x62,0x32,0x7d,0x48,0x22,0x9c,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsxbq 291(%rax,%r14,8), %zmm11
+
+// CHECK: vpmovsxbq 1016(%rdx), %zmm11
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x22,0x5a,0x7f]
+ vpmovsxbq 1016(%rdx), %zmm11
+
+// CHECK: vpmovsxbq 1024(%rdx), %zmm11
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x22,0x9a,0x00,0x04,0x00,0x00]
+ vpmovsxbq 1024(%rdx), %zmm11
+
+// CHECK: vpmovsxbq -1024(%rdx), %zmm11
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x22,0x5a,0x80]
+ vpmovsxbq -1024(%rdx), %zmm11
+
+// CHECK: vpmovsxbq -1032(%rdx), %zmm11
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x22,0x9a,0xf8,0xfb,0xff,0xff]
+ vpmovsxbq -1032(%rdx), %zmm11
+
+// CHECK: vpmovsxdq %ymm29, %zmm26
+// CHECK: encoding: [0x62,0x02,0x7d,0x48,0x25,0xd5]
+ vpmovsxdq %ymm29, %zmm26
+
+// CHECK: vpmovsxdq %ymm29, %zmm26 {%k1}
+// CHECK: encoding: [0x62,0x02,0x7d,0x49,0x25,0xd5]
+ vpmovsxdq %ymm29, %zmm26 {%k1}
+
+// CHECK: vpmovsxdq %ymm29, %zmm26 {%k1} {z}
+// CHECK: encoding: [0x62,0x02,0x7d,0xc9,0x25,0xd5]
+ vpmovsxdq %ymm29, %zmm26 {%k1} {z}
+
+// CHECK: vpmovsxdq (%rcx), %zmm26
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x25,0x11]
+ vpmovsxdq (%rcx), %zmm26
+
+// CHECK: vpmovsxdq 291(%rax,%r14,8), %zmm26
+// CHECK: encoding: [0x62,0x22,0x7d,0x48,0x25,0x94,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsxdq 291(%rax,%r14,8), %zmm26
+
+// CHECK: vpmovsxdq 4064(%rdx), %zmm26
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x25,0x52,0x7f]
+ vpmovsxdq 4064(%rdx), %zmm26
+
+// CHECK: vpmovsxdq 4096(%rdx), %zmm26
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x25,0x92,0x00,0x10,0x00,0x00]
+ vpmovsxdq 4096(%rdx), %zmm26
+
+// CHECK: vpmovsxdq -4096(%rdx), %zmm26
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x25,0x52,0x80]
+ vpmovsxdq -4096(%rdx), %zmm26
+
+// CHECK: vpmovsxdq -4128(%rdx), %zmm26
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x25,0x92,0xe0,0xef,0xff,0xff]
+ vpmovsxdq -4128(%rdx), %zmm26
+
+// CHECK: vpmovsxwd %ymm11, %zmm23
+// CHECK: encoding: [0x62,0xc2,0x7d,0x48,0x23,0xfb]
+ vpmovsxwd %ymm11, %zmm23
+
+// CHECK: vpmovsxwd %ymm11, %zmm23 {%k2}
+// CHECK: encoding: [0x62,0xc2,0x7d,0x4a,0x23,0xfb]
+ vpmovsxwd %ymm11, %zmm23 {%k2}
+
+// CHECK: vpmovsxwd %ymm11, %zmm23 {%k2} {z}
+// CHECK: encoding: [0x62,0xc2,0x7d,0xca,0x23,0xfb]
+ vpmovsxwd %ymm11, %zmm23 {%k2} {z}
+
+// CHECK: vpmovsxwd (%rcx), %zmm23
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x23,0x39]
+ vpmovsxwd (%rcx), %zmm23
+
+// CHECK: vpmovsxwd 291(%rax,%r14,8), %zmm23
+// CHECK: encoding: [0x62,0xa2,0x7d,0x48,0x23,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsxwd 291(%rax,%r14,8), %zmm23
+
+// CHECK: vpmovsxwd 4064(%rdx), %zmm23
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x23,0x7a,0x7f]
+ vpmovsxwd 4064(%rdx), %zmm23
+
+// CHECK: vpmovsxwd 4096(%rdx), %zmm23
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x23,0xba,0x00,0x10,0x00,0x00]
+ vpmovsxwd 4096(%rdx), %zmm23
+
+// CHECK: vpmovsxwd -4096(%rdx), %zmm23
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x23,0x7a,0x80]
+ vpmovsxwd -4096(%rdx), %zmm23
+
+// CHECK: vpmovsxwd -4128(%rdx), %zmm23
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x23,0xba,0xe0,0xef,0xff,0xff]
+ vpmovsxwd -4128(%rdx), %zmm23
+
+// CHECK: vpmovsxwq %xmm25, %zmm25
+// CHECK: encoding: [0x62,0x02,0x7d,0x48,0x24,0xc9]
+ vpmovsxwq %xmm25, %zmm25
+
+// CHECK: vpmovsxwq %xmm25, %zmm25 {%k4}
+// CHECK: encoding: [0x62,0x02,0x7d,0x4c,0x24,0xc9]
+ vpmovsxwq %xmm25, %zmm25 {%k4}
+
+// CHECK: vpmovsxwq %xmm25, %zmm25 {%k4} {z}
+// CHECK: encoding: [0x62,0x02,0x7d,0xcc,0x24,0xc9]
+ vpmovsxwq %xmm25, %zmm25 {%k4} {z}
+
+// CHECK: vpmovsxwq (%rcx), %zmm25
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x24,0x09]
+ vpmovsxwq (%rcx), %zmm25
+
+// CHECK: vpmovsxwq 291(%rax,%r14,8), %zmm25
+// CHECK: encoding: [0x62,0x22,0x7d,0x48,0x24,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsxwq 291(%rax,%r14,8), %zmm25
+
+// CHECK: vpmovsxwq 2032(%rdx), %zmm25
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x24,0x4a,0x7f]
+ vpmovsxwq 2032(%rdx), %zmm25
+
+// CHECK: vpmovsxwq 2048(%rdx), %zmm25
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x24,0x8a,0x00,0x08,0x00,0x00]
+ vpmovsxwq 2048(%rdx), %zmm25
+
+// CHECK: vpmovsxwq -2048(%rdx), %zmm25
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x24,0x4a,0x80]
+ vpmovsxwq -2048(%rdx), %zmm25
+
+// CHECK: vpmovsxwq -2064(%rdx), %zmm25
+// CHECK: encoding: [0x62,0x62,0x7d,0x48,0x24,0x8a,0xf0,0xf7,0xff,0xff]
+ vpmovsxwq -2064(%rdx), %zmm25
+
+// CHECK: vpmovzxbd %xmm25, %zmm18
+// CHECK: encoding: [0x62,0x82,0x7d,0x48,0x31,0xd1]
+ vpmovzxbd %xmm25, %zmm18
+
+// CHECK: vpmovzxbd %xmm25, %zmm18 {%k7}
+// CHECK: encoding: [0x62,0x82,0x7d,0x4f,0x31,0xd1]
+ vpmovzxbd %xmm25, %zmm18 {%k7}
+
+// CHECK: vpmovzxbd %xmm25, %zmm18 {%k7} {z}
+// CHECK: encoding: [0x62,0x82,0x7d,0xcf,0x31,0xd1]
+ vpmovzxbd %xmm25, %zmm18 {%k7} {z}
+
+// CHECK: vpmovzxbd (%rcx), %zmm18
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x31,0x11]
+ vpmovzxbd (%rcx), %zmm18
+
+// CHECK: vpmovzxbd 291(%rax,%r14,8), %zmm18
+// CHECK: encoding: [0x62,0xa2,0x7d,0x48,0x31,0x94,0xf0,0x23,0x01,0x00,0x00]
+ vpmovzxbd 291(%rax,%r14,8), %zmm18
+
+// CHECK: vpmovzxbd 2032(%rdx), %zmm18
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x31,0x52,0x7f]
+ vpmovzxbd 2032(%rdx), %zmm18
+
+// CHECK: vpmovzxbd 2048(%rdx), %zmm18
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x31,0x92,0x00,0x08,0x00,0x00]
+ vpmovzxbd 2048(%rdx), %zmm18
+
+// CHECK: vpmovzxbd -2048(%rdx), %zmm18
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x31,0x52,0x80]
+ vpmovzxbd -2048(%rdx), %zmm18
+
+// CHECK: vpmovzxbd -2064(%rdx), %zmm18
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x31,0x92,0xf0,0xf7,0xff,0xff]
+ vpmovzxbd -2064(%rdx), %zmm18
+
+// CHECK: vpmovzxbq %xmm15, %zmm5
+// CHECK: encoding: [0x62,0xd2,0x7d,0x48,0x32,0xef]
+ vpmovzxbq %xmm15, %zmm5
+
+// CHECK: vpmovzxbq %xmm15, %zmm5 {%k1}
+// CHECK: encoding: [0x62,0xd2,0x7d,0x49,0x32,0xef]
+ vpmovzxbq %xmm15, %zmm5 {%k1}
+
+// CHECK: vpmovzxbq %xmm15, %zmm5 {%k1} {z}
+// CHECK: encoding: [0x62,0xd2,0x7d,0xc9,0x32,0xef]
+ vpmovzxbq %xmm15, %zmm5 {%k1} {z}
+
+// CHECK: vpmovzxbq (%rcx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x32,0x29]
+ vpmovzxbq (%rcx), %zmm5
+
+// CHECK: vpmovzxbq 291(%rax,%r14,8), %zmm5
+// CHECK: encoding: [0x62,0xb2,0x7d,0x48,0x32,0xac,0xf0,0x23,0x01,0x00,0x00]
+ vpmovzxbq 291(%rax,%r14,8), %zmm5
+
+// CHECK: vpmovzxbq 1016(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x32,0x6a,0x7f]
+ vpmovzxbq 1016(%rdx), %zmm5
+
+// CHECK: vpmovzxbq 1024(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x32,0xaa,0x00,0x04,0x00,0x00]
+ vpmovzxbq 1024(%rdx), %zmm5
+
+// CHECK: vpmovzxbq -1024(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x32,0x6a,0x80]
+ vpmovzxbq -1024(%rdx), %zmm5
+
+// CHECK: vpmovzxbq -1032(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x32,0xaa,0xf8,0xfb,0xff,0xff]
+ vpmovzxbq -1032(%rdx), %zmm5
+
+// CHECK: vpmovzxdq %ymm4, %zmm20
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x35,0xe4]
+ vpmovzxdq %ymm4, %zmm20
+
+// CHECK: vpmovzxdq %ymm4, %zmm20 {%k3}
+// CHECK: encoding: [0x62,0xe2,0x7d,0x4b,0x35,0xe4]
+ vpmovzxdq %ymm4, %zmm20 {%k3}
+
+// CHECK: vpmovzxdq %ymm4, %zmm20 {%k3} {z}
+// CHECK: encoding: [0x62,0xe2,0x7d,0xcb,0x35,0xe4]
+ vpmovzxdq %ymm4, %zmm20 {%k3} {z}
+
+// CHECK: vpmovzxdq (%rcx), %zmm20
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x35,0x21]
+ vpmovzxdq (%rcx), %zmm20
+
+// CHECK: vpmovzxdq 291(%rax,%r14,8), %zmm20
+// CHECK: encoding: [0x62,0xa2,0x7d,0x48,0x35,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vpmovzxdq 291(%rax,%r14,8), %zmm20
+
+// CHECK: vpmovzxdq 4064(%rdx), %zmm20
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x35,0x62,0x7f]
+ vpmovzxdq 4064(%rdx), %zmm20
+
+// CHECK: vpmovzxdq 4096(%rdx), %zmm20
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x35,0xa2,0x00,0x10,0x00,0x00]
+ vpmovzxdq 4096(%rdx), %zmm20
+
+// CHECK: vpmovzxdq -4096(%rdx), %zmm20
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x35,0x62,0x80]
+ vpmovzxdq -4096(%rdx), %zmm20
+
+// CHECK: vpmovzxdq -4128(%rdx), %zmm20
+// CHECK: encoding: [0x62,0xe2,0x7d,0x48,0x35,0xa2,0xe0,0xef,0xff,0xff]
+ vpmovzxdq -4128(%rdx), %zmm20
+
+// CHECK: vpmovzxwd %ymm6, %zmm8
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x33,0xc6]
+ vpmovzxwd %ymm6, %zmm8
+
+// CHECK: vpmovzxwd %ymm6, %zmm8 {%k7}
+// CHECK: encoding: [0x62,0x72,0x7d,0x4f,0x33,0xc6]
+ vpmovzxwd %ymm6, %zmm8 {%k7}
+
+// CHECK: vpmovzxwd %ymm6, %zmm8 {%k7} {z}
+// CHECK: encoding: [0x62,0x72,0x7d,0xcf,0x33,0xc6]
+ vpmovzxwd %ymm6, %zmm8 {%k7} {z}
+
+// CHECK: vpmovzxwd (%rcx), %zmm8
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x33,0x01]
+ vpmovzxwd (%rcx), %zmm8
+
+// CHECK: vpmovzxwd 291(%rax,%r14,8), %zmm8
+// CHECK: encoding: [0x62,0x32,0x7d,0x48,0x33,0x84,0xf0,0x23,0x01,0x00,0x00]
+ vpmovzxwd 291(%rax,%r14,8), %zmm8
+
+// CHECK: vpmovzxwd 4064(%rdx), %zmm8
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x33,0x42,0x7f]
+ vpmovzxwd 4064(%rdx), %zmm8
+
+// CHECK: vpmovzxwd 4096(%rdx), %zmm8
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x33,0x82,0x00,0x10,0x00,0x00]
+ vpmovzxwd 4096(%rdx), %zmm8
+
+// CHECK: vpmovzxwd -4096(%rdx), %zmm8
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x33,0x42,0x80]
+ vpmovzxwd -4096(%rdx), %zmm8
+
+// CHECK: vpmovzxwd -4128(%rdx), %zmm8
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x33,0x82,0xe0,0xef,0xff,0xff]
+ vpmovzxwd -4128(%rdx), %zmm8
+
+// CHECK: vpmovzxwq %xmm15, %zmm5
+// CHECK: encoding: [0x62,0xd2,0x7d,0x48,0x34,0xef]
+ vpmovzxwq %xmm15, %zmm5
+
+// CHECK: vpmovzxwq %xmm15, %zmm5 {%k7}
+// CHECK: encoding: [0x62,0xd2,0x7d,0x4f,0x34,0xef]
+ vpmovzxwq %xmm15, %zmm5 {%k7}
+
+// CHECK: vpmovzxwq %xmm15, %zmm5 {%k7} {z}
+// CHECK: encoding: [0x62,0xd2,0x7d,0xcf,0x34,0xef]
+ vpmovzxwq %xmm15, %zmm5 {%k7} {z}
+
+// CHECK: vpmovzxwq (%rcx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x34,0x29]
+ vpmovzxwq (%rcx), %zmm5
+
+// CHECK: vpmovzxwq 291(%rax,%r14,8), %zmm5
+// CHECK: encoding: [0x62,0xb2,0x7d,0x48,0x34,0xac,0xf0,0x23,0x01,0x00,0x00]
+ vpmovzxwq 291(%rax,%r14,8), %zmm5
+
+// CHECK: vpmovzxwq 2032(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x34,0x6a,0x7f]
+ vpmovzxwq 2032(%rdx), %zmm5
+
+// CHECK: vpmovzxwq 2048(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x34,0xaa,0x00,0x08,0x00,0x00]
+ vpmovzxwq 2048(%rdx), %zmm5
+
+// CHECK: vpmovzxwq -2048(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x34,0x6a,0x80]
+ vpmovzxwq -2048(%rdx), %zmm5
+
+// CHECK: vpmovzxwq -2064(%rdx), %zmm5
+// CHECK: encoding: [0x62,0xf2,0x7d,0x48,0x34,0xaa,0xf0,0xf7,0xff,0xff]
+ vpmovzxwq -2064(%rdx), %zmm5
+
+// CHECK: vpmuldq %zmm9, %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x42,0xb5,0x48,0x28,0xe9]
+ vpmuldq %zmm9, %zmm9, %zmm29
+
+// CHECK: vpmuldq %zmm9, %zmm9, %zmm29 {%k5}
+// CHECK: encoding: [0x62,0x42,0xb5,0x4d,0x28,0xe9]
+ vpmuldq %zmm9, %zmm9, %zmm29 {%k5}
+
+// CHECK: vpmuldq %zmm9, %zmm9, %zmm29 {%k5} {z}
+// CHECK: encoding: [0x62,0x42,0xb5,0xcd,0x28,0xe9]
+ vpmuldq %zmm9, %zmm9, %zmm29 {%k5} {z}
+
+// CHECK: vpmuldq (%rcx), %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x48,0x28,0x29]
+ vpmuldq (%rcx), %zmm9, %zmm29
+
+// CHECK: vpmuldq (%rcx), %zmm9, %zmm29 {%k6}
+// CHECK: encoding: [0x62,0x62,0xb5,0x4e,0x28,0x29]
+ vpmuldq (%rcx), %zmm9, %zmm29 {%k6}
+
+// CHECK: vpmuldq (%rcx), %zmm9, %zmm29 {%k6} {z}
+// CHECK: encoding: [0x62,0x62,0xb5,0xce,0x28,0x29]
+ vpmuldq (%rcx), %zmm9, %zmm29 {%k6} {z}
+
+// CHECK: vpmuldq 291(%rax,%r14,8), %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x22,0xb5,0x48,0x28,0xac,0xf0,0x23,0x01,0x00,0x00]
+ vpmuldq 291(%rax,%r14,8), %zmm9, %zmm29
+
+// CHECK: vpmuldq (%rcx){1to8}, %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x58,0x28,0x29]
+ vpmuldq (%rcx){1to8}, %zmm9, %zmm29
+
+// CHECK: vpmuldq (%rcx){1to8}, %zmm9, %zmm29 {%k3}
+// CHECK: encoding: [0x62,0x62,0xb5,0x5b,0x28,0x29]
+ vpmuldq (%rcx){1to8}, %zmm9, %zmm29 {%k3}
+
+// CHECK: vpmuldq (%rcx){1to8}, %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0xdb,0x28,0x29]
+ vpmuldq (%rcx){1to8}, %zmm9, %zmm29 {%k3} {z}
+
+// CHECK: vpmuldq 8128(%rdx), %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x48,0x28,0x6a,0x7f]
+ vpmuldq 8128(%rdx), %zmm9, %zmm29
+
+// CHECK: vpmuldq 8192(%rdx), %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x48,0x28,0xaa,0x00,0x20,0x00,0x00]
+ vpmuldq 8192(%rdx), %zmm9, %zmm29
+
+// CHECK: vpmuldq -8192(%rdx), %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x48,0x28,0x6a,0x80]
+ vpmuldq -8192(%rdx), %zmm9, %zmm29
+
+// CHECK: vpmuldq -8256(%rdx), %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x48,0x28,0xaa,0xc0,0xdf,0xff,0xff]
+ vpmuldq -8256(%rdx), %zmm9, %zmm29
+
+// CHECK: vpmuldq 1016(%rdx){1to8}, %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x58,0x28,0x6a,0x7f]
+ vpmuldq 1016(%rdx){1to8}, %zmm9, %zmm29
+
+// CHECK: vpmuldq 1024(%rdx){1to8}, %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x58,0x28,0xaa,0x00,0x04,0x00,0x00]
+ vpmuldq 1024(%rdx){1to8}, %zmm9, %zmm29
+
+// CHECK: vpmuldq -1024(%rdx){1to8}, %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x58,0x28,0x6a,0x80]
+ vpmuldq -1024(%rdx){1to8}, %zmm9, %zmm29
+
+// CHECK: vpmuldq -1032(%rdx){1to8}, %zmm9, %zmm29
+// CHECK: encoding: [0x62,0x62,0xb5,0x58,0x28,0xaa,0xf8,0xfb,0xff,0xff]
+ vpmuldq -1032(%rdx){1to8}, %zmm9, %zmm29
+
+// CHECK: vpmulld %zmm2, %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x48,0x40,0xe2]
+ vpmulld %zmm2, %zmm3, %zmm12
+
+// CHECK: vpmulld %zmm2, %zmm3, %zmm12 {%k6}
+// CHECK: encoding: [0x62,0x72,0x65,0x4e,0x40,0xe2]
+ vpmulld %zmm2, %zmm3, %zmm12 {%k6}
+
+// CHECK: vpmulld %zmm2, %zmm3, %zmm12 {%k6} {z}
+// CHECK: encoding: [0x62,0x72,0x65,0xce,0x40,0xe2]
+ vpmulld %zmm2, %zmm3, %zmm12 {%k6} {z}
+
+// CHECK: vpmulld (%rcx), %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x48,0x40,0x21]
+ vpmulld (%rcx), %zmm3, %zmm12
+
+// CHECK: vpmulld 291(%rax,%r14,8), %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x32,0x65,0x48,0x40,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vpmulld 291(%rax,%r14,8), %zmm3, %zmm12
+
+// CHECK: vpmulld (%rcx){1to16}, %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x58,0x40,0x21]
+ vpmulld (%rcx){1to16}, %zmm3, %zmm12
+
+// CHECK: vpmulld 8128(%rdx), %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x48,0x40,0x62,0x7f]
+ vpmulld 8128(%rdx), %zmm3, %zmm12
+
+// CHECK: vpmulld 8192(%rdx), %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x48,0x40,0xa2,0x00,0x20,0x00,0x00]
+ vpmulld 8192(%rdx), %zmm3, %zmm12
+
+// CHECK: vpmulld -8192(%rdx), %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x48,0x40,0x62,0x80]
+ vpmulld -8192(%rdx), %zmm3, %zmm12
+
+// CHECK: vpmulld -8256(%rdx), %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x48,0x40,0xa2,0xc0,0xdf,0xff,0xff]
+ vpmulld -8256(%rdx), %zmm3, %zmm12
+
+// CHECK: vpmulld 508(%rdx){1to16}, %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x58,0x40,0x62,0x7f]
+ vpmulld 508(%rdx){1to16}, %zmm3, %zmm12
+
+// CHECK: vpmulld 512(%rdx){1to16}, %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x58,0x40,0xa2,0x00,0x02,0x00,0x00]
+ vpmulld 512(%rdx){1to16}, %zmm3, %zmm12
+
+// CHECK: vpmulld -512(%rdx){1to16}, %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x58,0x40,0x62,0x80]
+ vpmulld -512(%rdx){1to16}, %zmm3, %zmm12
+
+// CHECK: vpmulld -516(%rdx){1to16}, %zmm3, %zmm12
+// CHECK: encoding: [0x62,0x72,0x65,0x58,0x40,0xa2,0xfc,0xfd,0xff,0xff]
+ vpmulld -516(%rdx){1to16}, %zmm3, %zmm12
+
+// CHECK: vpmuludq %zmm9, %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xc1,0xd5,0x48,0xf4,0xf9]
+ vpmuludq %zmm9, %zmm5, %zmm23
+
+// CHECK: vpmuludq %zmm9, %zmm5, %zmm23 {%k4}
+// CHECK: encoding: [0x62,0xc1,0xd5,0x4c,0xf4,0xf9]
+ vpmuludq %zmm9, %zmm5, %zmm23 {%k4}
+
+// CHECK: vpmuludq %zmm9, %zmm5, %zmm23 {%k4} {z}
+// CHECK: encoding: [0x62,0xc1,0xd5,0xcc,0xf4,0xf9]
+ vpmuludq %zmm9, %zmm5, %zmm23 {%k4} {z}
+
+// CHECK: vpmuludq (%rcx), %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x48,0xf4,0x39]
+ vpmuludq (%rcx), %zmm5, %zmm23
+
+// CHECK: vpmuludq 291(%rax,%r14,8), %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xa1,0xd5,0x48,0xf4,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpmuludq 291(%rax,%r14,8), %zmm5, %zmm23
+
+// CHECK: vpmuludq (%rcx){1to8}, %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x58,0xf4,0x39]
+ vpmuludq (%rcx){1to8}, %zmm5, %zmm23
+
+// CHECK: vpmuludq 8128(%rdx), %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x48,0xf4,0x7a,0x7f]
+ vpmuludq 8128(%rdx), %zmm5, %zmm23
+
+// CHECK: vpmuludq 8192(%rdx), %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x48,0xf4,0xba,0x00,0x20,0x00,0x00]
+ vpmuludq 8192(%rdx), %zmm5, %zmm23
+
+// CHECK: vpmuludq -8192(%rdx), %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x48,0xf4,0x7a,0x80]
+ vpmuludq -8192(%rdx), %zmm5, %zmm23
+
+// CHECK: vpmuludq -8256(%rdx), %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x48,0xf4,0xba,0xc0,0xdf,0xff,0xff]
+ vpmuludq -8256(%rdx), %zmm5, %zmm23
+
+// CHECK: vpmuludq 1016(%rdx){1to8}, %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x58,0xf4,0x7a,0x7f]
+ vpmuludq 1016(%rdx){1to8}, %zmm5, %zmm23
+
+// CHECK: vpmuludq 1024(%rdx){1to8}, %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x58,0xf4,0xba,0x00,0x04,0x00,0x00]
+ vpmuludq 1024(%rdx){1to8}, %zmm5, %zmm23
+
+// CHECK: vpmuludq -1024(%rdx){1to8}, %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x58,0xf4,0x7a,0x80]
+ vpmuludq -1024(%rdx){1to8}, %zmm5, %zmm23
+
+// CHECK: vpmuludq -1032(%rdx){1to8}, %zmm5, %zmm23
+// CHECK: encoding: [0x62,0xe1,0xd5,0x58,0xf4,0xba,0xf8,0xfb,0xff,0xff]
+ vpmuludq -1032(%rdx){1to8}, %zmm5, %zmm23
+
+// CHECK: vpord %zmm20, %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xa1,0x6d,0x48,0xeb,0xfc]
+ vpord %zmm20, %zmm2, %zmm23
+
+// CHECK: vpord %zmm20, %zmm2, %zmm23 {%k2}
+// CHECK: encoding: [0x62,0xa1,0x6d,0x4a,0xeb,0xfc]
+ vpord %zmm20, %zmm2, %zmm23 {%k2}
+
+// CHECK: vpord %zmm20, %zmm2, %zmm23 {%k2} {z}
+// CHECK: encoding: [0x62,0xa1,0x6d,0xca,0xeb,0xfc]
+ vpord %zmm20, %zmm2, %zmm23 {%k2} {z}
+
+// CHECK: vpord (%rcx), %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x48,0xeb,0x39]
+ vpord (%rcx), %zmm2, %zmm23
+
+// CHECK: vpord 291(%rax,%r14,8), %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xa1,0x6d,0x48,0xeb,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpord 291(%rax,%r14,8), %zmm2, %zmm23
+
+// CHECK: vpord (%rcx){1to16}, %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x58,0xeb,0x39]
+ vpord (%rcx){1to16}, %zmm2, %zmm23
+
+// CHECK: vpord 8128(%rdx), %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x48,0xeb,0x7a,0x7f]
+ vpord 8128(%rdx), %zmm2, %zmm23
+
+// CHECK: vpord 8192(%rdx), %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x48,0xeb,0xba,0x00,0x20,0x00,0x00]
+ vpord 8192(%rdx), %zmm2, %zmm23
+
+// CHECK: vpord -8192(%rdx), %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x48,0xeb,0x7a,0x80]
+ vpord -8192(%rdx), %zmm2, %zmm23
+
+// CHECK: vpord -8256(%rdx), %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x48,0xeb,0xba,0xc0,0xdf,0xff,0xff]
+ vpord -8256(%rdx), %zmm2, %zmm23
+
+// CHECK: vpord 508(%rdx){1to16}, %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x58,0xeb,0x7a,0x7f]
+ vpord 508(%rdx){1to16}, %zmm2, %zmm23
+
+// CHECK: vpord 512(%rdx){1to16}, %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x58,0xeb,0xba,0x00,0x02,0x00,0x00]
+ vpord 512(%rdx){1to16}, %zmm2, %zmm23
+
+// CHECK: vpord -512(%rdx){1to16}, %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x58,0xeb,0x7a,0x80]
+ vpord -512(%rdx){1to16}, %zmm2, %zmm23
+
+// CHECK: vpord -516(%rdx){1to16}, %zmm2, %zmm23
+// CHECK: encoding: [0x62,0xe1,0x6d,0x58,0xeb,0xba,0xfc,0xfd,0xff,0xff]
+ vpord -516(%rdx){1to16}, %zmm2, %zmm23
+
+// CHECK: vporq %zmm6, %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x48,0xeb,0xce]
+ vporq %zmm6, %zmm10, %zmm1
+
+// CHECK: vporq %zmm6, %zmm10, %zmm1 {%k2}
+// CHECK: encoding: [0x62,0xf1,0xad,0x4a,0xeb,0xce]
+ vporq %zmm6, %zmm10, %zmm1 {%k2}
+
+// CHECK: vporq %zmm6, %zmm10, %zmm1 {%k2} {z}
+// CHECK: encoding: [0x62,0xf1,0xad,0xca,0xeb,0xce]
+ vporq %zmm6, %zmm10, %zmm1 {%k2} {z}
+
+// CHECK: vporq (%rcx), %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x48,0xeb,0x09]
+ vporq (%rcx), %zmm10, %zmm1
+
+// CHECK: vporq 291(%rax,%r14,8), %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xb1,0xad,0x48,0xeb,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vporq 291(%rax,%r14,8), %zmm10, %zmm1
+
+// CHECK: vporq (%rcx){1to8}, %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x58,0xeb,0x09]
+ vporq (%rcx){1to8}, %zmm10, %zmm1
+
+// CHECK: vporq 8128(%rdx), %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x48,0xeb,0x4a,0x7f]
+ vporq 8128(%rdx), %zmm10, %zmm1
+
+// CHECK: vporq 8192(%rdx), %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x48,0xeb,0x8a,0x00,0x20,0x00,0x00]
+ vporq 8192(%rdx), %zmm10, %zmm1
+
+// CHECK: vporq -8192(%rdx), %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x48,0xeb,0x4a,0x80]
+ vporq -8192(%rdx), %zmm10, %zmm1
+
+// CHECK: vporq -8256(%rdx), %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x48,0xeb,0x8a,0xc0,0xdf,0xff,0xff]
+ vporq -8256(%rdx), %zmm10, %zmm1
+
+// CHECK: vporq 1016(%rdx){1to8}, %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x58,0xeb,0x4a,0x7f]
+ vporq 1016(%rdx){1to8}, %zmm10, %zmm1
+
+// CHECK: vporq 1024(%rdx){1to8}, %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x58,0xeb,0x8a,0x00,0x04,0x00,0x00]
+ vporq 1024(%rdx){1to8}, %zmm10, %zmm1
+
+// CHECK: vporq -1024(%rdx){1to8}, %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x58,0xeb,0x4a,0x80]
+ vporq -1024(%rdx){1to8}, %zmm10, %zmm1
+
+// CHECK: vporq -1032(%rdx){1to8}, %zmm10, %zmm1
+// CHECK: encoding: [0x62,0xf1,0xad,0x58,0xeb,0x8a,0xf8,0xfb,0xff,0xff]
+ vporq -1032(%rdx){1to8}, %zmm10, %zmm1
+
+// CHECK: vpsubd %zmm7, %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x40,0xfa,0xff]
+ vpsubd %zmm7, %zmm28, %zmm7
+
+// CHECK: vpsubd %zmm7, %zmm28, %zmm7 {%k3}
+// CHECK: encoding: [0x62,0xf1,0x1d,0x43,0xfa,0xff]
+ vpsubd %zmm7, %zmm28, %zmm7 {%k3}
+
+// CHECK: vpsubd %zmm7, %zmm28, %zmm7 {%k3} {z}
+// CHECK: encoding: [0x62,0xf1,0x1d,0xc3,0xfa,0xff]
+ vpsubd %zmm7, %zmm28, %zmm7 {%k3} {z}
+
+// CHECK: vpsubd (%rcx), %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x40,0xfa,0x39]
+ vpsubd (%rcx), %zmm28, %zmm7
+
+// CHECK: vpsubd 291(%rax,%r14,8), %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xb1,0x1d,0x40,0xfa,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpsubd 291(%rax,%r14,8), %zmm28, %zmm7
+
+// CHECK: vpsubd (%rcx){1to16}, %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x50,0xfa,0x39]
+ vpsubd (%rcx){1to16}, %zmm28, %zmm7
+
+// CHECK: vpsubd 8128(%rdx), %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x40,0xfa,0x7a,0x7f]
+ vpsubd 8128(%rdx), %zmm28, %zmm7
+
+// CHECK: vpsubd 8192(%rdx), %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x40,0xfa,0xba,0x00,0x20,0x00,0x00]
+ vpsubd 8192(%rdx), %zmm28, %zmm7
+
+// CHECK: vpsubd -8192(%rdx), %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x40,0xfa,0x7a,0x80]
+ vpsubd -8192(%rdx), %zmm28, %zmm7
+
+// CHECK: vpsubd -8256(%rdx), %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x40,0xfa,0xba,0xc0,0xdf,0xff,0xff]
+ vpsubd -8256(%rdx), %zmm28, %zmm7
+
+// CHECK: vpsubd 508(%rdx){1to16}, %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x50,0xfa,0x7a,0x7f]
+ vpsubd 508(%rdx){1to16}, %zmm28, %zmm7
+
+// CHECK: vpsubd 512(%rdx){1to16}, %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x50,0xfa,0xba,0x00,0x02,0x00,0x00]
+ vpsubd 512(%rdx){1to16}, %zmm28, %zmm7
+
+// CHECK: vpsubd -512(%rdx){1to16}, %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x50,0xfa,0x7a,0x80]
+ vpsubd -512(%rdx){1to16}, %zmm28, %zmm7
+
+// CHECK: vpsubd -516(%rdx){1to16}, %zmm28, %zmm7
+// CHECK: encoding: [0x62,0xf1,0x1d,0x50,0xfa,0xba,0xfc,0xfd,0xff,0xff]
+ vpsubd -516(%rdx){1to16}, %zmm28, %zmm7
+
+// CHECK: vpsubq %zmm17, %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x21,0x9d,0x40,0xfb,0xe9]
+ vpsubq %zmm17, %zmm28, %zmm29
+
+// CHECK: vpsubq %zmm17, %zmm28, %zmm29 {%k2}
+// CHECK: encoding: [0x62,0x21,0x9d,0x42,0xfb,0xe9]
+ vpsubq %zmm17, %zmm28, %zmm29 {%k2}
+
+// CHECK: vpsubq %zmm17, %zmm28, %zmm29 {%k2} {z}
+// CHECK: encoding: [0x62,0x21,0x9d,0xc2,0xfb,0xe9]
+ vpsubq %zmm17, %zmm28, %zmm29 {%k2} {z}
+
+// CHECK: vpsubq (%rcx), %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0xfb,0x29]
+ vpsubq (%rcx), %zmm28, %zmm29
+
+// CHECK: vpsubq 291(%rax,%r14,8), %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x21,0x9d,0x40,0xfb,0xac,0xf0,0x23,0x01,0x00,0x00]
+ vpsubq 291(%rax,%r14,8), %zmm28, %zmm29
+
+// CHECK: vpsubq (%rcx){1to8}, %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0xfb,0x29]
+ vpsubq (%rcx){1to8}, %zmm28, %zmm29
+
+// CHECK: vpsubq 8128(%rdx), %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0xfb,0x6a,0x7f]
+ vpsubq 8128(%rdx), %zmm28, %zmm29
+
+// CHECK: vpsubq 8192(%rdx), %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0xfb,0xaa,0x00,0x20,0x00,0x00]
+ vpsubq 8192(%rdx), %zmm28, %zmm29
+
+// CHECK: vpsubq -8192(%rdx), %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0xfb,0x6a,0x80]
+ vpsubq -8192(%rdx), %zmm28, %zmm29
+
+// CHECK: vpsubq -8256(%rdx), %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x40,0xfb,0xaa,0xc0,0xdf,0xff,0xff]
+ vpsubq -8256(%rdx), %zmm28, %zmm29
+
+// CHECK: vpsubq 1016(%rdx){1to8}, %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0xfb,0x6a,0x7f]
+ vpsubq 1016(%rdx){1to8}, %zmm28, %zmm29
+
+// CHECK: vpsubq 1024(%rdx){1to8}, %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0xfb,0xaa,0x00,0x04,0x00,0x00]
+ vpsubq 1024(%rdx){1to8}, %zmm28, %zmm29
+
+// CHECK: vpsubq -1024(%rdx){1to8}, %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0xfb,0x6a,0x80]
+ vpsubq -1024(%rdx){1to8}, %zmm28, %zmm29
+
+// CHECK: vpsubq -1032(%rdx){1to8}, %zmm28, %zmm29
+// CHECK: encoding: [0x62,0x61,0x9d,0x50,0xfb,0xaa,0xf8,0xfb,0xff,0xff]
+ vpsubq -1032(%rdx){1to8}, %zmm28, %zmm29
+
+// CHECK: vpxord %zmm24, %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x11,0x1d,0x48,0xef,0xc0]
+ vpxord %zmm24, %zmm12, %zmm8
+
+// CHECK: vpxord %zmm24, %zmm12, %zmm8 {%k6}
+// CHECK: encoding: [0x62,0x11,0x1d,0x4e,0xef,0xc0]
+ vpxord %zmm24, %zmm12, %zmm8 {%k6}
+
+// CHECK: vpxord %zmm24, %zmm12, %zmm8 {%k6} {z}
+// CHECK: encoding: [0x62,0x11,0x1d,0xce,0xef,0xc0]
+ vpxord %zmm24, %zmm12, %zmm8 {%k6} {z}
+
+// CHECK: vpxord (%rcx), %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x48,0xef,0x01]
+ vpxord (%rcx), %zmm12, %zmm8
+
+// CHECK: vpxord 291(%rax,%r14,8), %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x31,0x1d,0x48,0xef,0x84,0xf0,0x23,0x01,0x00,0x00]
+ vpxord 291(%rax,%r14,8), %zmm12, %zmm8
+
+// CHECK: vpxord (%rcx){1to16}, %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x58,0xef,0x01]
+ vpxord (%rcx){1to16}, %zmm12, %zmm8
+
+// CHECK: vpxord 8128(%rdx), %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x48,0xef,0x42,0x7f]
+ vpxord 8128(%rdx), %zmm12, %zmm8
+
+// CHECK: vpxord 8192(%rdx), %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x48,0xef,0x82,0x00,0x20,0x00,0x00]
+ vpxord 8192(%rdx), %zmm12, %zmm8
+
+// CHECK: vpxord -8192(%rdx), %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x48,0xef,0x42,0x80]
+ vpxord -8192(%rdx), %zmm12, %zmm8
+
+// CHECK: vpxord -8256(%rdx), %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x48,0xef,0x82,0xc0,0xdf,0xff,0xff]
+ vpxord -8256(%rdx), %zmm12, %zmm8
+
+// CHECK: vpxord 508(%rdx){1to16}, %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x58,0xef,0x42,0x7f]
+ vpxord 508(%rdx){1to16}, %zmm12, %zmm8
+
+// CHECK: vpxord 512(%rdx){1to16}, %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x58,0xef,0x82,0x00,0x02,0x00,0x00]
+ vpxord 512(%rdx){1to16}, %zmm12, %zmm8
+
+// CHECK: vpxord -512(%rdx){1to16}, %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x58,0xef,0x42,0x80]
+ vpxord -512(%rdx){1to16}, %zmm12, %zmm8
+
+// CHECK: vpxord -516(%rdx){1to16}, %zmm12, %zmm8
+// CHECK: encoding: [0x62,0x71,0x1d,0x58,0xef,0x82,0xfc,0xfd,0xff,0xff]
+ vpxord -516(%rdx){1to16}, %zmm12, %zmm8
+
+// CHECK: vpxorq %zmm10, %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xd1,0xcd,0x40,0xef,0xfa]
+ vpxorq %zmm10, %zmm22, %zmm7
+
+// CHECK: vpxorq %zmm10, %zmm22, %zmm7 {%k6}
+// CHECK: encoding: [0x62,0xd1,0xcd,0x46,0xef,0xfa]
+ vpxorq %zmm10, %zmm22, %zmm7 {%k6}
+
+// CHECK: vpxorq %zmm10, %zmm22, %zmm7 {%k6} {z}
+// CHECK: encoding: [0x62,0xd1,0xcd,0xc6,0xef,0xfa]
+ vpxorq %zmm10, %zmm22, %zmm7 {%k6} {z}
+
+// CHECK: vpxorq (%rcx), %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x40,0xef,0x39]
+ vpxorq (%rcx), %zmm22, %zmm7
+
+// CHECK: vpxorq 291(%rax,%r14,8), %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xb1,0xcd,0x40,0xef,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpxorq 291(%rax,%r14,8), %zmm22, %zmm7
+
+// CHECK: vpxorq (%rcx){1to8}, %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x50,0xef,0x39]
+ vpxorq (%rcx){1to8}, %zmm22, %zmm7
+
+// CHECK: vpxorq 8128(%rdx), %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x40,0xef,0x7a,0x7f]
+ vpxorq 8128(%rdx), %zmm22, %zmm7
+
+// CHECK: vpxorq 8192(%rdx), %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x40,0xef,0xba,0x00,0x20,0x00,0x00]
+ vpxorq 8192(%rdx), %zmm22, %zmm7
+
+// CHECK: vpxorq -8192(%rdx), %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x40,0xef,0x7a,0x80]
+ vpxorq -8192(%rdx), %zmm22, %zmm7
+
+// CHECK: vpxorq -8256(%rdx), %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x40,0xef,0xba,0xc0,0xdf,0xff,0xff]
+ vpxorq -8256(%rdx), %zmm22, %zmm7
+
+// CHECK: vpxorq 1016(%rdx){1to8}, %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x50,0xef,0x7a,0x7f]
+ vpxorq 1016(%rdx){1to8}, %zmm22, %zmm7
+
+// CHECK: vpxorq 1024(%rdx){1to8}, %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x50,0xef,0xba,0x00,0x04,0x00,0x00]
+ vpxorq 1024(%rdx){1to8}, %zmm22, %zmm7
+
+// CHECK: vpxorq -1024(%rdx){1to8}, %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x50,0xef,0x7a,0x80]
+ vpxorq -1024(%rdx){1to8}, %zmm22, %zmm7
+
+// CHECK: vpxorq -1032(%rdx){1to8}, %zmm22, %zmm7
+// CHECK: encoding: [0x62,0xf1,0xcd,0x50,0xef,0xba,0xf8,0xfb,0xff,0xff]
+ vpxorq -1032(%rdx){1to8}, %zmm22, %zmm7
+
+// CHECK: vsubpd %zmm9, %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x51,0x9d,0x48,0x5c,0xc9]
+ vsubpd %zmm9, %zmm12, %zmm9
+
+// CHECK: vsubpd %zmm9, %zmm12, %zmm9 {%k7}
+// CHECK: encoding: [0x62,0x51,0x9d,0x4f,0x5c,0xc9]
+ vsubpd %zmm9, %zmm12, %zmm9 {%k7}
+
+// CHECK: vsubpd %zmm9, %zmm12, %zmm9 {%k7} {z}
+// CHECK: encoding: [0x62,0x51,0x9d,0xcf,0x5c,0xc9]
+ vsubpd %zmm9, %zmm12, %zmm9 {%k7} {z}
+
+// CHECK: vsubpd (%rcx), %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x48,0x5c,0x09]
+ vsubpd (%rcx), %zmm12, %zmm9
+
+// CHECK: vsubpd 291(%rax,%r14,8), %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x31,0x9d,0x48,0x5c,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vsubpd 291(%rax,%r14,8), %zmm12, %zmm9
+
+// CHECK: vsubpd (%rcx){1to8}, %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x58,0x5c,0x09]
+ vsubpd (%rcx){1to8}, %zmm12, %zmm9
+
+// CHECK: vsubpd 8128(%rdx), %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x48,0x5c,0x4a,0x7f]
+ vsubpd 8128(%rdx), %zmm12, %zmm9
+
+// CHECK: vsubpd 8192(%rdx), %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x48,0x5c,0x8a,0x00,0x20,0x00,0x00]
+ vsubpd 8192(%rdx), %zmm12, %zmm9
+
+// CHECK: vsubpd -8192(%rdx), %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x48,0x5c,0x4a,0x80]
+ vsubpd -8192(%rdx), %zmm12, %zmm9
+
+// CHECK: vsubpd -8256(%rdx), %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x48,0x5c,0x8a,0xc0,0xdf,0xff,0xff]
+ vsubpd -8256(%rdx), %zmm12, %zmm9
+
+// CHECK: vsubpd 1016(%rdx){1to8}, %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x58,0x5c,0x4a,0x7f]
+ vsubpd 1016(%rdx){1to8}, %zmm12, %zmm9
+
+// CHECK: vsubpd 1024(%rdx){1to8}, %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x58,0x5c,0x8a,0x00,0x04,0x00,0x00]
+ vsubpd 1024(%rdx){1to8}, %zmm12, %zmm9
+
+// CHECK: vsubpd -1024(%rdx){1to8}, %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x58,0x5c,0x4a,0x80]
+ vsubpd -1024(%rdx){1to8}, %zmm12, %zmm9
+
+// CHECK: vsubpd -1032(%rdx){1to8}, %zmm12, %zmm9
+// CHECK: encoding: [0x62,0x71,0x9d,0x58,0x5c,0x8a,0xf8,0xfb,0xff,0xff]
+ vsubpd -1032(%rdx){1to8}, %zmm12, %zmm9
+
+// CHECK: vsubps %zmm21, %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x31,0x24,0x40,0x5c,0xf5]
+ vsubps %zmm21, %zmm27, %zmm14
+
+// CHECK: vsubps %zmm21, %zmm27, %zmm14 {%k5}
+// CHECK: encoding: [0x62,0x31,0x24,0x45,0x5c,0xf5]
+ vsubps %zmm21, %zmm27, %zmm14 {%k5}
+
+// CHECK: vsubps %zmm21, %zmm27, %zmm14 {%k5} {z}
+// CHECK: encoding: [0x62,0x31,0x24,0xc5,0x5c,0xf5]
+ vsubps %zmm21, %zmm27, %zmm14 {%k5} {z}
+
+// CHECK: vsubps (%rcx), %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x40,0x5c,0x31]
+ vsubps (%rcx), %zmm27, %zmm14
+
+// CHECK: vsubps 291(%rax,%r14,8), %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x31,0x24,0x40,0x5c,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vsubps 291(%rax,%r14,8), %zmm27, %zmm14
+
+// CHECK: vsubps (%rcx){1to16}, %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x50,0x5c,0x31]
+ vsubps (%rcx){1to16}, %zmm27, %zmm14
+
+// CHECK: vsubps 8128(%rdx), %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x40,0x5c,0x72,0x7f]
+ vsubps 8128(%rdx), %zmm27, %zmm14
+
+// CHECK: vsubps 8192(%rdx), %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x40,0x5c,0xb2,0x00,0x20,0x00,0x00]
+ vsubps 8192(%rdx), %zmm27, %zmm14
+
+// CHECK: vsubps -8192(%rdx), %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x40,0x5c,0x72,0x80]
+ vsubps -8192(%rdx), %zmm27, %zmm14
+
+// CHECK: vsubps -8256(%rdx), %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x40,0x5c,0xb2,0xc0,0xdf,0xff,0xff]
+ vsubps -8256(%rdx), %zmm27, %zmm14
+
+// CHECK: vsubps 508(%rdx){1to16}, %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x50,0x5c,0x72,0x7f]
+ vsubps 508(%rdx){1to16}, %zmm27, %zmm14
+
+// CHECK: vsubps 512(%rdx){1to16}, %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x50,0x5c,0xb2,0x00,0x02,0x00,0x00]
+ vsubps 512(%rdx){1to16}, %zmm27, %zmm14
+
+// CHECK: vsubps -512(%rdx){1to16}, %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x50,0x5c,0x72,0x80]
+ vsubps -512(%rdx){1to16}, %zmm27, %zmm14
+
+// CHECK: vsubps -516(%rdx){1to16}, %zmm27, %zmm14
+// CHECK: encoding: [0x62,0x71,0x24,0x50,0x5c,0xb2,0xfc,0xfd,0xff,0xff]
+ vsubps -516(%rdx){1to16}, %zmm27, %zmm14
+
+// CHECK: vpmovqb %zmm2, %xmm3
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x32,0xd3]
+ vpmovqb %zmm2, %xmm3
+
+// CHECK: vpmovqb %zmm2, %xmm3 {%k1}
+// CHECK: encoding: [0x62,0xf2,0x7e,0x49,0x32,0xd3]
+ vpmovqb %zmm2, %xmm3 {%k1}
+
+// CHECK: vpmovqb %zmm2, %xmm3 {%k1} {z}
+// CHECK: encoding: [0x62,0xf2,0x7e,0xc9,0x32,0xd3]
+ vpmovqb %zmm2, %xmm3 {%k1} {z}
+
+// CHECK: vpmovsqb %zmm29, %xmm30
+// CHECK: encoding: [0x62,0x02,0x7e,0x48,0x22,0xee]
+ vpmovsqb %zmm29, %xmm30
+
+// CHECK: vpmovsqb %zmm29, %xmm30 {%k5}
+// CHECK: encoding: [0x62,0x02,0x7e,0x4d,0x22,0xee]
+ vpmovsqb %zmm29, %xmm30 {%k5}
+
+// CHECK: vpmovsqb %zmm29, %xmm30 {%k5} {z}
+// CHECK: encoding: [0x62,0x02,0x7e,0xcd,0x22,0xee]
+ vpmovsqb %zmm29, %xmm30 {%k5} {z}
+
+// CHECK: vpmovusqb %zmm28, %xmm24
+// CHECK: encoding: [0x62,0x02,0x7e,0x48,0x12,0xe0]
+ vpmovusqb %zmm28, %xmm24
+
+// CHECK: vpmovusqb %zmm28, %xmm24 {%k7}
+// CHECK: encoding: [0x62,0x02,0x7e,0x4f,0x12,0xe0]
+ vpmovusqb %zmm28, %xmm24 {%k7}
+
+// CHECK: vpmovusqb %zmm28, %xmm24 {%k7} {z}
+// CHECK: encoding: [0x62,0x02,0x7e,0xcf,0x12,0xe0]
+ vpmovusqb %zmm28, %xmm24 {%k7} {z}
+
+// CHECK: vpmovqw %zmm18, %xmm6
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x34,0xd6]
+ vpmovqw %zmm18, %xmm6
+
+// CHECK: vpmovqw %zmm18, %xmm6 {%k1}
+// CHECK: encoding: [0x62,0xe2,0x7e,0x49,0x34,0xd6]
+ vpmovqw %zmm18, %xmm6 {%k1}
+
+// CHECK: vpmovqw %zmm18, %xmm6 {%k1} {z}
+// CHECK: encoding: [0x62,0xe2,0x7e,0xc9,0x34,0xd6]
+ vpmovqw %zmm18, %xmm6 {%k1} {z}
+
+// CHECK: vpmovsqw %zmm19, %xmm27
+// CHECK: encoding: [0x62,0x82,0x7e,0x48,0x24,0xdb]
+ vpmovsqw %zmm19, %xmm27
+
+// CHECK: vpmovsqw %zmm19, %xmm27 {%k6}
+// CHECK: encoding: [0x62,0x82,0x7e,0x4e,0x24,0xdb]
+ vpmovsqw %zmm19, %xmm27 {%k6}
+
+// CHECK: vpmovsqw %zmm19, %xmm27 {%k6} {z}
+// CHECK: encoding: [0x62,0x82,0x7e,0xce,0x24,0xdb]
+ vpmovsqw %zmm19, %xmm27 {%k6} {z}
+
+// CHECK: vpmovusqw %zmm10, %xmm28
+// CHECK: encoding: [0x62,0x12,0x7e,0x48,0x14,0xd4]
+ vpmovusqw %zmm10, %xmm28
+
+// CHECK: vpmovusqw %zmm10, %xmm28 {%k7}
+// CHECK: encoding: [0x62,0x12,0x7e,0x4f,0x14,0xd4]
+ vpmovusqw %zmm10, %xmm28 {%k7}
+
+// CHECK: vpmovusqw %zmm10, %xmm28 {%k7} {z}
+// CHECK: encoding: [0x62,0x12,0x7e,0xcf,0x14,0xd4]
+ vpmovusqw %zmm10, %xmm28 {%k7} {z}
+
+// CHECK: vpmovqd %zmm25, %ymm6
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x35,0xce]
+ vpmovqd %zmm25, %ymm6
+
+// CHECK: vpmovqd %zmm25, %ymm6 {%k5}
+// CHECK: encoding: [0x62,0x62,0x7e,0x4d,0x35,0xce]
+ vpmovqd %zmm25, %ymm6 {%k5}
+
+// CHECK: vpmovqd %zmm25, %ymm6 {%k5} {z}
+// CHECK: encoding: [0x62,0x62,0x7e,0xcd,0x35,0xce]
+ vpmovqd %zmm25, %ymm6 {%k5} {z}
+
+// CHECK: vpmovsqd %zmm2, %ymm15
+// CHECK: encoding: [0x62,0xd2,0x7e,0x48,0x25,0xd7]
+ vpmovsqd %zmm2, %ymm15
+
+// CHECK: vpmovsqd %zmm2, %ymm15 {%k2}
+// CHECK: encoding: [0x62,0xd2,0x7e,0x4a,0x25,0xd7]
+ vpmovsqd %zmm2, %ymm15 {%k2}
+
+// CHECK: vpmovsqd %zmm2, %ymm15 {%k2} {z}
+// CHECK: encoding: [0x62,0xd2,0x7e,0xca,0x25,0xd7]
+ vpmovsqd %zmm2, %ymm15 {%k2} {z}
+
+// CHECK: vpmovusqd %zmm4, %ymm8
+// CHECK: encoding: [0x62,0xd2,0x7e,0x48,0x15,0xe0]
+ vpmovusqd %zmm4, %ymm8
+
+// CHECK: vpmovusqd %zmm4, %ymm8 {%k4}
+// CHECK: encoding: [0x62,0xd2,0x7e,0x4c,0x15,0xe0]
+ vpmovusqd %zmm4, %ymm8 {%k4}
+
+// CHECK: vpmovusqd %zmm4, %ymm8 {%k4} {z}
+// CHECK: encoding: [0x62,0xd2,0x7e,0xcc,0x15,0xe0]
+ vpmovusqd %zmm4, %ymm8 {%k4} {z}
+
+// CHECK: vpmovdb %zmm5, %xmm2
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x31,0xea]
+ vpmovdb %zmm5, %xmm2
+
+// CHECK: vpmovdb %zmm5, %xmm2 {%k5}
+// CHECK: encoding: [0x62,0xf2,0x7e,0x4d,0x31,0xea]
+ vpmovdb %zmm5, %xmm2 {%k5}
+
+// CHECK: vpmovdb %zmm5, %xmm2 {%k5} {z}
+// CHECK: encoding: [0x62,0xf2,0x7e,0xcd,0x31,0xea]
+ vpmovdb %zmm5, %xmm2 {%k5} {z}
+
+// CHECK: vpmovsdb %zmm2, %xmm21
+// CHECK: encoding: [0x62,0xb2,0x7e,0x48,0x21,0xd5]
+ vpmovsdb %zmm2, %xmm21
+
+// CHECK: vpmovsdb %zmm2, %xmm21 {%k4}
+// CHECK: encoding: [0x62,0xb2,0x7e,0x4c,0x21,0xd5]
+ vpmovsdb %zmm2, %xmm21 {%k4}
+
+// CHECK: vpmovsdb %zmm2, %xmm21 {%k4} {z}
+// CHECK: encoding: [0x62,0xb2,0x7e,0xcc,0x21,0xd5]
+ vpmovsdb %zmm2, %xmm21 {%k4} {z}
+
+// CHECK: vpmovusdb %zmm2, %xmm20
+// CHECK: encoding: [0x62,0xb2,0x7e,0x48,0x11,0xd4]
+ vpmovusdb %zmm2, %xmm20
+
+// CHECK: vpmovusdb %zmm2, %xmm20 {%k3}
+// CHECK: encoding: [0x62,0xb2,0x7e,0x4b,0x11,0xd4]
+ vpmovusdb %zmm2, %xmm20 {%k3}
+
+// CHECK: vpmovusdb %zmm2, %xmm20 {%k3} {z}
+// CHECK: encoding: [0x62,0xb2,0x7e,0xcb,0x11,0xd4]
+ vpmovusdb %zmm2, %xmm20 {%k3} {z}
+
+// CHECK: vpmovdw %zmm29, %ymm22
+// CHECK: encoding: [0x62,0x22,0x7e,0x48,0x33,0xee]
+ vpmovdw %zmm29, %ymm22
+
+// CHECK: vpmovdw %zmm29, %ymm22 {%k5}
+// CHECK: encoding: [0x62,0x22,0x7e,0x4d,0x33,0xee]
+ vpmovdw %zmm29, %ymm22 {%k5}
+
+// CHECK: vpmovdw %zmm29, %ymm22 {%k5} {z}
+// CHECK: encoding: [0x62,0x22,0x7e,0xcd,0x33,0xee]
+ vpmovdw %zmm29, %ymm22 {%k5} {z}
+
+// CHECK: vpmovsdw %zmm14, %ymm25
+// CHECK: encoding: [0x62,0x12,0x7e,0x48,0x23,0xf1]
+ vpmovsdw %zmm14, %ymm25
+
+// CHECK: vpmovsdw %zmm14, %ymm25 {%k4}
+// CHECK: encoding: [0x62,0x12,0x7e,0x4c,0x23,0xf1]
+ vpmovsdw %zmm14, %ymm25 {%k4}
+
+// CHECK: vpmovsdw %zmm14, %ymm25 {%k4} {z}
+// CHECK: encoding: [0x62,0x12,0x7e,0xcc,0x23,0xf1]
+ vpmovsdw %zmm14, %ymm25 {%k4} {z}
+
+// CHECK: vpmovusdw %zmm7, %ymm8
+// CHECK: encoding: [0x62,0xd2,0x7e,0x48,0x13,0xf8]
+ vpmovusdw %zmm7, %ymm8
+
+// CHECK: vpmovusdw %zmm7, %ymm8 {%k1}
+// CHECK: encoding: [0x62,0xd2,0x7e,0x49,0x13,0xf8]
+ vpmovusdw %zmm7, %ymm8 {%k1}
+
+// CHECK: vpmovusdw %zmm7, %ymm8 {%k1} {z}
+// CHECK: encoding: [0x62,0xd2,0x7e,0xc9,0x13,0xf8]
+ vpmovusdw %zmm7, %ymm8 {%k1} {z}
+
+// CHECK: vpmovqb %zmm3, (%rcx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x32,0x19]
+ vpmovqb %zmm3, (%rcx)
+
+// CHECK: vpmovqb %zmm3, (%rcx) {%k7}
+// CHECK: encoding: [0x62,0xf2,0x7e,0x4f,0x32,0x19]
+ vpmovqb %zmm3, (%rcx) {%k7}
+
+// CHECK: vpmovqb %zmm3, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xb2,0x7e,0x48,0x32,0x9c,0xf0,0x23,0x01,0x00,0x00]
+ vpmovqb %zmm3, 291(%rax,%r14,8)
+
+// CHECK: vpmovqb %zmm3, 1016(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x32,0x5a,0x7f]
+ vpmovqb %zmm3, 1016(%rdx)
+
+// CHECK: vpmovqb %zmm3, 1024(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x32,0x9a,0x00,0x04,0x00,0x00]
+ vpmovqb %zmm3, 1024(%rdx)
+
+// CHECK: vpmovqb %zmm3, -1024(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x32,0x5a,0x80]
+ vpmovqb %zmm3, -1024(%rdx)
+
+// CHECK: vpmovqb %zmm3, -1032(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x32,0x9a,0xf8,0xfb,0xff,0xff]
+ vpmovqb %zmm3, -1032(%rdx)
+
+// CHECK: vpmovsqb %zmm16, (%rcx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x22,0x01]
+ vpmovsqb %zmm16, (%rcx)
+
+// CHECK: vpmovsqb %zmm16, (%rcx) {%k2}
+// CHECK: encoding: [0x62,0xe2,0x7e,0x4a,0x22,0x01]
+ vpmovsqb %zmm16, (%rcx) {%k2}
+
+// CHECK: vpmovsqb %zmm16, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xa2,0x7e,0x48,0x22,0x84,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsqb %zmm16, 291(%rax,%r14,8)
+
+// CHECK: vpmovsqb %zmm16, 1016(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x22,0x42,0x7f]
+ vpmovsqb %zmm16, 1016(%rdx)
+
+// CHECK: vpmovsqb %zmm16, 1024(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x22,0x82,0x00,0x04,0x00,0x00]
+ vpmovsqb %zmm16, 1024(%rdx)
+
+// CHECK: vpmovsqb %zmm16, -1024(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x22,0x42,0x80]
+ vpmovsqb %zmm16, -1024(%rdx)
+
+// CHECK: vpmovsqb %zmm16, -1032(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x22,0x82,0xf8,0xfb,0xff,0xff]
+ vpmovsqb %zmm16, -1032(%rdx)
+
+// CHECK: vpmovusqb %zmm28, (%rcx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x12,0x21]
+ vpmovusqb %zmm28, (%rcx)
+
+// CHECK: vpmovusqb %zmm28, (%rcx) {%k1}
+// CHECK: encoding: [0x62,0x62,0x7e,0x49,0x12,0x21]
+ vpmovusqb %zmm28, (%rcx) {%k1}
+
+// CHECK: vpmovusqb %zmm28, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x22,0x7e,0x48,0x12,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vpmovusqb %zmm28, 291(%rax,%r14,8)
+
+// CHECK: vpmovusqb %zmm28, 1016(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x12,0x62,0x7f]
+ vpmovusqb %zmm28, 1016(%rdx)
+
+// CHECK: vpmovusqb %zmm28, 1024(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x12,0xa2,0x00,0x04,0x00,0x00]
+ vpmovusqb %zmm28, 1024(%rdx)
+
+// CHECK: vpmovusqb %zmm28, -1024(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x12,0x62,0x80]
+ vpmovusqb %zmm28, -1024(%rdx)
+
+// CHECK: vpmovusqb %zmm28, -1032(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x12,0xa2,0xf8,0xfb,0xff,0xff]
+ vpmovusqb %zmm28, -1032(%rdx)
+
+// CHECK: vpmovqw %zmm7, (%rcx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x34,0x39]
+ vpmovqw %zmm7, (%rcx)
+
+// CHECK: vpmovqw %zmm7, (%rcx) {%k6}
+// CHECK: encoding: [0x62,0xf2,0x7e,0x4e,0x34,0x39]
+ vpmovqw %zmm7, (%rcx) {%k6}
+
+// CHECK: vpmovqw %zmm7, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xb2,0x7e,0x48,0x34,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpmovqw %zmm7, 291(%rax,%r14,8)
+
+// CHECK: vpmovqw %zmm7, 2032(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x34,0x7a,0x7f]
+ vpmovqw %zmm7, 2032(%rdx)
+
+// CHECK: vpmovqw %zmm7, 2048(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x34,0xba,0x00,0x08,0x00,0x00]
+ vpmovqw %zmm7, 2048(%rdx)
+
+// CHECK: vpmovqw %zmm7, -2048(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x34,0x7a,0x80]
+ vpmovqw %zmm7, -2048(%rdx)
+
+// CHECK: vpmovqw %zmm7, -2064(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x34,0xba,0xf0,0xf7,0xff,0xff]
+ vpmovqw %zmm7, -2064(%rdx)
+
+// CHECK: vpmovsqw %zmm1, (%rcx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x24,0x09]
+ vpmovsqw %zmm1, (%rcx)
+
+// CHECK: vpmovsqw %zmm1, (%rcx) {%k5}
+// CHECK: encoding: [0x62,0xf2,0x7e,0x4d,0x24,0x09]
+ vpmovsqw %zmm1, (%rcx) {%k5}
+
+// CHECK: vpmovsqw %zmm1, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xb2,0x7e,0x48,0x24,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsqw %zmm1, 291(%rax,%r14,8)
+
+// CHECK: vpmovsqw %zmm1, 2032(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x24,0x4a,0x7f]
+ vpmovsqw %zmm1, 2032(%rdx)
+
+// CHECK: vpmovsqw %zmm1, 2048(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x24,0x8a,0x00,0x08,0x00,0x00]
+ vpmovsqw %zmm1, 2048(%rdx)
+
+// CHECK: vpmovsqw %zmm1, -2048(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x24,0x4a,0x80]
+ vpmovsqw %zmm1, -2048(%rdx)
+
+// CHECK: vpmovsqw %zmm1, -2064(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x24,0x8a,0xf0,0xf7,0xff,0xff]
+ vpmovsqw %zmm1, -2064(%rdx)
+
+// CHECK: vpmovusqw %zmm25, (%rcx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x14,0x09]
+ vpmovusqw %zmm25, (%rcx)
+
+// CHECK: vpmovusqw %zmm25, (%rcx) {%k3}
+// CHECK: encoding: [0x62,0x62,0x7e,0x4b,0x14,0x09]
+ vpmovusqw %zmm25, (%rcx) {%k3}
+
+// CHECK: vpmovusqw %zmm25, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x22,0x7e,0x48,0x14,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vpmovusqw %zmm25, 291(%rax,%r14,8)
+
+// CHECK: vpmovusqw %zmm25, 2032(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x14,0x4a,0x7f]
+ vpmovusqw %zmm25, 2032(%rdx)
+
+// CHECK: vpmovusqw %zmm25, 2048(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x14,0x8a,0x00,0x08,0x00,0x00]
+ vpmovusqw %zmm25, 2048(%rdx)
+
+// CHECK: vpmovusqw %zmm25, -2048(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x14,0x4a,0x80]
+ vpmovusqw %zmm25, -2048(%rdx)
+
+// CHECK: vpmovusqw %zmm25, -2064(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x14,0x8a,0xf0,0xf7,0xff,0xff]
+ vpmovusqw %zmm25, -2064(%rdx)
+
+// CHECK: vpmovqd %zmm28, (%rcx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x35,0x21]
+ vpmovqd %zmm28, (%rcx)
+
+// CHECK: vpmovqd %zmm28, (%rcx) {%k5}
+// CHECK: encoding: [0x62,0x62,0x7e,0x4d,0x35,0x21]
+ vpmovqd %zmm28, (%rcx) {%k5}
+
+// CHECK: vpmovqd %zmm28, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x22,0x7e,0x48,0x35,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vpmovqd %zmm28, 291(%rax,%r14,8)
+
+// CHECK: vpmovqd %zmm28, 4064(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x35,0x62,0x7f]
+ vpmovqd %zmm28, 4064(%rdx)
+
+// CHECK: vpmovqd %zmm28, 4096(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x35,0xa2,0x00,0x10,0x00,0x00]
+ vpmovqd %zmm28, 4096(%rdx)
+
+// CHECK: vpmovqd %zmm28, -4096(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x35,0x62,0x80]
+ vpmovqd %zmm28, -4096(%rdx)
+
+// CHECK: vpmovqd %zmm28, -4128(%rdx)
+// CHECK: encoding: [0x62,0x62,0x7e,0x48,0x35,0xa2,0xe0,0xef,0xff,0xff]
+ vpmovqd %zmm28, -4128(%rdx)
+
+// CHECK: vpmovsqd %zmm9, (%rcx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x25,0x09]
+ vpmovsqd %zmm9, (%rcx)
+
+// CHECK: vpmovsqd %zmm9, (%rcx) {%k7}
+// CHECK: encoding: [0x62,0x72,0x7e,0x4f,0x25,0x09]
+ vpmovsqd %zmm9, (%rcx) {%k7}
+
+// CHECK: vpmovsqd %zmm9, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x32,0x7e,0x48,0x25,0x8c,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsqd %zmm9, 291(%rax,%r14,8)
+
+// CHECK: vpmovsqd %zmm9, 4064(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x25,0x4a,0x7f]
+ vpmovsqd %zmm9, 4064(%rdx)
+
+// CHECK: vpmovsqd %zmm9, 4096(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x25,0x8a,0x00,0x10,0x00,0x00]
+ vpmovsqd %zmm9, 4096(%rdx)
+
+// CHECK: vpmovsqd %zmm9, -4096(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x25,0x4a,0x80]
+ vpmovsqd %zmm9, -4096(%rdx)
+
+// CHECK: vpmovsqd %zmm9, -4128(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x25,0x8a,0xe0,0xef,0xff,0xff]
+ vpmovsqd %zmm9, -4128(%rdx)
+
+// CHECK: vpmovusqd %zmm22, (%rcx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x15,0x31]
+ vpmovusqd %zmm22, (%rcx)
+
+// CHECK: vpmovusqd %zmm22, (%rcx) {%k1}
+// CHECK: encoding: [0x62,0xe2,0x7e,0x49,0x15,0x31]
+ vpmovusqd %zmm22, (%rcx) {%k1}
+
+// CHECK: vpmovusqd %zmm22, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xa2,0x7e,0x48,0x15,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpmovusqd %zmm22, 291(%rax,%r14,8)
+
+// CHECK: vpmovusqd %zmm22, 4064(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x15,0x72,0x7f]
+ vpmovusqd %zmm22, 4064(%rdx)
+
+// CHECK: vpmovusqd %zmm22, 4096(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x15,0xb2,0x00,0x10,0x00,0x00]
+ vpmovusqd %zmm22, 4096(%rdx)
+
+// CHECK: vpmovusqd %zmm22, -4096(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x15,0x72,0x80]
+ vpmovusqd %zmm22, -4096(%rdx)
+
+// CHECK: vpmovusqd %zmm22, -4128(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x15,0xb2,0xe0,0xef,0xff,0xff]
+ vpmovusqd %zmm22, -4128(%rdx)
+
+// CHECK: vpmovdb %zmm12, (%rcx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x31,0x21]
+ vpmovdb %zmm12, (%rcx)
+
+// CHECK: vpmovdb %zmm12, (%rcx) {%k3}
+// CHECK: encoding: [0x62,0x72,0x7e,0x4b,0x31,0x21]
+ vpmovdb %zmm12, (%rcx) {%k3}
+
+// CHECK: vpmovdb %zmm12, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x32,0x7e,0x48,0x31,0xa4,0xf0,0x23,0x01,0x00,0x00]
+ vpmovdb %zmm12, 291(%rax,%r14,8)
+
+// CHECK: vpmovdb %zmm12, 2032(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x31,0x62,0x7f]
+ vpmovdb %zmm12, 2032(%rdx)
+
+// CHECK: vpmovdb %zmm12, 2048(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x31,0xa2,0x00,0x08,0x00,0x00]
+ vpmovdb %zmm12, 2048(%rdx)
+
+// CHECK: vpmovdb %zmm12, -2048(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x31,0x62,0x80]
+ vpmovdb %zmm12, -2048(%rdx)
+
+// CHECK: vpmovdb %zmm12, -2064(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x31,0xa2,0xf0,0xf7,0xff,0xff]
+ vpmovdb %zmm12, -2064(%rdx)
+
+// CHECK: vpmovsdb %zmm6, (%rcx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x21,0x31]
+ vpmovsdb %zmm6, (%rcx)
+
+// CHECK: vpmovsdb %zmm6, (%rcx) {%k1}
+// CHECK: encoding: [0x62,0xf2,0x7e,0x49,0x21,0x31]
+ vpmovsdb %zmm6, (%rcx) {%k1}
+
+// CHECK: vpmovsdb %zmm6, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xb2,0x7e,0x48,0x21,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsdb %zmm6, 291(%rax,%r14,8)
+
+// CHECK: vpmovsdb %zmm6, 2032(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x21,0x72,0x7f]
+ vpmovsdb %zmm6, 2032(%rdx)
+
+// CHECK: vpmovsdb %zmm6, 2048(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x21,0xb2,0x00,0x08,0x00,0x00]
+ vpmovsdb %zmm6, 2048(%rdx)
+
+// CHECK: vpmovsdb %zmm6, -2048(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x21,0x72,0x80]
+ vpmovsdb %zmm6, -2048(%rdx)
+
+// CHECK: vpmovsdb %zmm6, -2064(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x21,0xb2,0xf0,0xf7,0xff,0xff]
+ vpmovsdb %zmm6, -2064(%rdx)
+
+// CHECK: vpmovusdb %zmm23, (%rcx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x11,0x39]
+ vpmovusdb %zmm23, (%rcx)
+
+// CHECK: vpmovusdb %zmm23, (%rcx) {%k3}
+// CHECK: encoding: [0x62,0xe2,0x7e,0x4b,0x11,0x39]
+ vpmovusdb %zmm23, (%rcx) {%k3}
+
+// CHECK: vpmovusdb %zmm23, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xa2,0x7e,0x48,0x11,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpmovusdb %zmm23, 291(%rax,%r14,8)
+
+// CHECK: vpmovusdb %zmm23, 2032(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x11,0x7a,0x7f]
+ vpmovusdb %zmm23, 2032(%rdx)
+
+// CHECK: vpmovusdb %zmm23, 2048(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x11,0xba,0x00,0x08,0x00,0x00]
+ vpmovusdb %zmm23, 2048(%rdx)
+
+// CHECK: vpmovusdb %zmm23, -2048(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x11,0x7a,0x80]
+ vpmovusdb %zmm23, -2048(%rdx)
+
+// CHECK: vpmovusdb %zmm23, -2064(%rdx)
+// CHECK: encoding: [0x62,0xe2,0x7e,0x48,0x11,0xba,0xf0,0xf7,0xff,0xff]
+ vpmovusdb %zmm23, -2064(%rdx)
+
+// CHECK: vpmovdw %zmm7, (%rcx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x33,0x39]
+ vpmovdw %zmm7, (%rcx)
+
+// CHECK: vpmovdw %zmm7, (%rcx) {%k7}
+// CHECK: encoding: [0x62,0xf2,0x7e,0x4f,0x33,0x39]
+ vpmovdw %zmm7, (%rcx) {%k7}
+
+// CHECK: vpmovdw %zmm7, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xb2,0x7e,0x48,0x33,0xbc,0xf0,0x23,0x01,0x00,0x00]
+ vpmovdw %zmm7, 291(%rax,%r14,8)
+
+// CHECK: vpmovdw %zmm7, 4064(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x33,0x7a,0x7f]
+ vpmovdw %zmm7, 4064(%rdx)
+
+// CHECK: vpmovdw %zmm7, 4096(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x33,0xba,0x00,0x10,0x00,0x00]
+ vpmovdw %zmm7, 4096(%rdx)
+
+// CHECK: vpmovdw %zmm7, -4096(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x33,0x7a,0x80]
+ vpmovdw %zmm7, -4096(%rdx)
+
+// CHECK: vpmovdw %zmm7, -4128(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x33,0xba,0xe0,0xef,0xff,0xff]
+ vpmovdw %zmm7, -4128(%rdx)
+
+// CHECK: vpmovsdw %zmm14, (%rcx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x23,0x31]
+ vpmovsdw %zmm14, (%rcx)
+
+// CHECK: vpmovsdw %zmm14, (%rcx) {%k6}
+// CHECK: encoding: [0x62,0x72,0x7e,0x4e,0x23,0x31]
+ vpmovsdw %zmm14, (%rcx) {%k6}
+
+// CHECK: vpmovsdw %zmm14, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x32,0x7e,0x48,0x23,0xb4,0xf0,0x23,0x01,0x00,0x00]
+ vpmovsdw %zmm14, 291(%rax,%r14,8)
+
+// CHECK: vpmovsdw %zmm14, 4064(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x23,0x72,0x7f]
+ vpmovsdw %zmm14, 4064(%rdx)
+
+// CHECK: vpmovsdw %zmm14, 4096(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x23,0xb2,0x00,0x10,0x00,0x00]
+ vpmovsdw %zmm14, 4096(%rdx)
+
+// CHECK: vpmovsdw %zmm14, -4096(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x23,0x72,0x80]
+ vpmovsdw %zmm14, -4096(%rdx)
+
+// CHECK: vpmovsdw %zmm14, -4128(%rdx)
+// CHECK: encoding: [0x62,0x72,0x7e,0x48,0x23,0xb2,0xe0,0xef,0xff,0xff]
+ vpmovsdw %zmm14, -4128(%rdx)
+
+// CHECK: vpmovusdw %zmm5, (%rcx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x13,0x29]
+ vpmovusdw %zmm5, (%rcx)
+
+// CHECK: vpmovusdw %zmm5, (%rcx) {%k3}
+// CHECK: encoding: [0x62,0xf2,0x7e,0x4b,0x13,0x29]
+ vpmovusdw %zmm5, (%rcx) {%k3}
+
+// CHECK: vpmovusdw %zmm5, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xb2,0x7e,0x48,0x13,0xac,0xf0,0x23,0x01,0x00,0x00]
+ vpmovusdw %zmm5, 291(%rax,%r14,8)
+
+// CHECK: vpmovusdw %zmm5, 4064(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x13,0x6a,0x7f]
+ vpmovusdw %zmm5, 4064(%rdx)
+
+// CHECK: vpmovusdw %zmm5, 4096(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x13,0xaa,0x00,0x10,0x00,0x00]
+ vpmovusdw %zmm5, 4096(%rdx)
+
+// CHECK: vpmovusdw %zmm5, -4096(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x13,0x6a,0x80]
+ vpmovusdw %zmm5, -4096(%rdx)
+
+// CHECK: vpmovusdw %zmm5, -4128(%rdx)
+// CHECK: encoding: [0x62,0xf2,0x7e,0x48,0x13,0xaa,0xe0,0xef,0xff,0xff]
+ vpmovusdw %zmm5, -4128(%rdx)
// CHECK: vinserti32x4
// CHECK: encoding: [0x62,0xa3,0x55,0x48,0x38,0xcd,0x01]
@@ -40,6 +3129,93 @@ vpsrad 512(%rdi, %rsi, 4), %zmm12, %zmm25
// CHECK: encoding: [0x62,0xf2,0x7d,0xc9,0x58,0xc8]
vpbroadcastd %xmm0, %zmm1 {%k1} {z}
+// CHECK: vbroadcasti32x4 {{.*}} {%k7} {z}
+// CHECK: encoding: [0x67,0x62,0xf2,0x7d,0xcf,0x5a,0x52,0x02]
+vbroadcasti32x4 0x20(%edx), %zmm2 {%k7} {z}
+
+// CHECK: vbroadcasti64x4 {{.*}} %zmm22
+// CHECK: encoding: [0x62,0xe2,0xfd,0x48,0x5b,0x72,0x02]
+vbroadcasti64x4 0x40(%rdx), %zmm22
+
// CHECK: vmovdqu64 {{.*}} {%k3}
-// CHECK: encoding: [0x62,0xf1,0xfe,0x4b,0x6f,0xc8]
-vmovdqu64 %zmm0, %zmm1 {%k3}
+// CHECK: encoding: [0x62,0xf1,0xfe,0x4b,0x7f,0x07]
+vmovdqu64 %zmm0, (%rdi) {%k3}
+
+// CHECK: vmovdqa32 {{.*}} {%k4}
+// CHECK: encoding: [0x62,0x61,0x7d,0x4c,0x6f,0x1e]
+vmovdqa32 (%rsi), %zmm27 {%k4}
+
+// CHECK: vmovd
+// CHECK: encoding: [0x62,0xe1,0x7d,0x08,0x7e,0x74,0x24,0xeb]
+vmovd %xmm22, -84(%rsp)
+
+// CHECK: vextractps
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x17,0x61,0x1f,0x02]
+vextractps $2, %xmm20, 124(%rcx)
+
+// CHECK: vaddpd {{.*}}{1to8}
+// CHECK: encoding: [0x62,0x61,0xdd,0x50,0x58,0x74,0xf7,0x40]
+vaddpd 512(%rdi, %rsi, 8) {1to8}, %zmm20, %zmm30
+
+// CHECK: vaddps {{.*}}{1to16}
+// CHECK: encoding: [0x62,0x61,0x5c,0x50,0x58,0xb4,0xf7,0x00,0x02,0x00,0x00]
+vaddps 512(%rdi, %rsi, 8) {1to16}, %zmm20, %zmm30
+
+// CHECK: vmovntdqa
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x2a,0xab,0x78,0x56,0x34,0x12]
+vmovntdqa 0x12345678(%rbx), %zmm13
+
+// CHECK: vmovntdqa
+// CHECK: encoding: [0x62,0xc2,0x7d,0x48,0x2a,0x14,0x56]
+vmovntdqa (%r14,%rdx,2), %zmm18
+
+// CHECK: vmovntdqa
+// CHECK: encoding: [0x62,0xc2,0x7d,0x48,0x2a,0x7c,0x14,0x02]
+vmovntdqa 128(%r12,%rdx), %zmm23
+
+// CHECK: vmovntdq
+// CHECK: encoding: [0x62,0x21,0x7d,0x48,0xe7,0x24,0xa9]
+vmovntdq %zmm28, (%rcx,%r13,4)
+
+// CHECK: vmovntpd
+// CHECK: encoding: [0x62,0xf1,0xfd,0x48,0x2b,0xb2,0x04,0x00,0x00,0x00]
+vmovntpd %zmm6, 4(%rdx)
+
+// CHECK: vmovntps
+// CHECK: encoding: [0x62,0x51,0x7c,0x48,0x2b,0x5c,0x8d,0x00]
+vmovntps %zmm11, (%r13,%rcx,4)
+
+// CHECK: vcmpps $14
+// CHECK: encoding: [0x62,0xb1,0x54,0x48,0xc2,0xd1,0x0e]
+vcmpgtps %zmm17, %zmm5, %k2
+
+// CHECK: vcmppd $13
+// CHECK: encoding: [0x62,0xd1,0xf5,0x40,0xc2,0x76,0x02,0x0d]
+vcmpgepd 0x80(%r14), %zmm17, %k6
+
+// CHECK: vpcmpd $1,
+// CHECK: encoding: [0x62,0x93,0x45,0x4c,0x1f,0xe8,0x01]
+vpcmpd $1, %zmm24, %zmm7, %k5{%k4}
+
+// CHECK: vpcmpuq $2,
+// CHECK: encoding: [0x62,0xf3,0xf5,0x47,0x1e,0x72,0x01,0x02]
+vpcmpuq $2, 0x40(%rdx), %zmm17, %k6{%k7}
+
+// ERR: invalid operand for instruction
+vpcmpd $1, %zmm24, %zmm7, %k5{%k0}
+
+// CHECK: vpermi2d
+// CHECK: encoding: [0x62,0x42,0x6d,0x4b,0x76,0xd6]
+vpermi2d %zmm14, %zmm2, %zmm26 {%k3}
+
+// CHECK: vpermt2pd
+// CHECK: encoding: [0x62,0xf2,0xcd,0xc6,0x7f,0xf3]
+vpermt2pd %zmm3, %zmm22, %zmm6 {%k6} {z}
+
+// CHECK: vpermi2q
+// CHECK: encoding: [0x62,0x62,0xed,0x4b,0x76,0x54,0x58,0x02]
+vpermi2q 0x80(%rax,%rbx,2), %zmm2, %zmm26 {%k3}
+
+// CHECK: vpermt2d
+// CHECK: encoding: [0x62,0x32,0x4d,0xc2,0x7e,0x24,0xad,0x05,0x00,0x00,0x00]
+vpermt2d 5(,%r13,4), %zmm22, %zmm12 {%k2} {z}
diff --git a/test/MC/X86/fixup-cpu-mode.s b/test/MC/X86/fixup-cpu-mode.s
new file mode 100644
index 000000000000..13e0d462156c
--- /dev/null
+++ b/test/MC/X86/fixup-cpu-mode.s
@@ -0,0 +1,8 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu %s -o %t
+// RUN: llvm-objdump -d %t | FileCheck %s
+
+//PR18303
+.global edata
+sub $edata, %r12 // CHECK: subq $0, %r12
+.code32
+
diff --git a/test/MC/X86/index-operations.s b/test/MC/X86/index-operations.s
new file mode 100644
index 000000000000..9f69b0b78499
--- /dev/null
+++ b/test/MC/X86/index-operations.s
@@ -0,0 +1,146 @@
+// RUN: not llvm-mc -triple x86_64-unknown-unknown --show-encoding %s 2> %t.err | FileCheck --check-prefix=64 %s
+// RUN: FileCheck --check-prefix=ERR64 < %t.err %s
+// RUN: not llvm-mc -triple i386-unknown-unknown --show-encoding %s 2> %t.err | FileCheck --check-prefix=32 %s
+// RUN: FileCheck --check-prefix=ERR32 < %t.err %s
+// RUN: not llvm-mc -triple i386-unknown-unknown-code16 --show-encoding %s 2> %t.err | FileCheck --check-prefix=16 %s
+// RUN: FileCheck --check-prefix=ERR16 < %t.err %s
+
+lodsb
+// 64: lodsb (%rsi), %al # encoding: [0xac]
+// 32: lodsb (%esi), %al # encoding: [0xac]
+// 16: lodsb (%si), %al # encoding: [0xac]
+
+lodsb (%rsi), %al
+// 64: lodsb (%rsi), %al # encoding: [0xac]
+// ERR32: 64-bit
+// ERR16: 64-bit
+
+lodsb (%esi), %al
+// 64: lodsb (%esi), %al # encoding: [0x67,0xac]
+// 32: lodsb (%esi), %al # encoding: [0xac]
+// 16: lodsb (%esi), %al # encoding: [0x67,0xac]
+
+lodsb (%si), %al
+// ERR64: invalid 16-bit base register
+// 32: lodsb (%si), %al # encoding: [0x67,0xac]
+// 16: lodsb (%si), %al # encoding: [0xac]
+
+lodsl %gs:(%esi)
+// 64: lodsl %gs:(%esi), %eax # encoding: [0x65,0x67,0xad]
+// 32: lodsl %gs:(%esi), %eax # encoding: [0x65,0xad]
+// 16: lodsl %gs:(%esi), %eax # encoding: [0x66,0x65,0x67,0xad]
+
+lodsl (%edi), %eax
+// ERR64: invalid operand
+// ERR32: invalid operand
+// ERR16: invalid operand
+
+lodsl 44(%edi), %eax
+// ERR64: invalid operand
+// ERR32: invalid operand
+// ERR16: invalid operand
+
+lods (%esi), %ax
+// 64: lodsw (%esi), %ax # encoding: [0x66,0x67,0xad]
+// 32: lodsw (%esi), %ax # encoding: [0x66,0xad]
+// 16: lodsw (%esi), %ax # encoding: [0x67,0xad]
+
+stosw
+// 64: stosw %ax, %es:(%rdi) # encoding: [0x66,0xab]
+// 32: stosw %ax, %es:(%edi) # encoding: [0x66,0xab]
+// 16: stosw %ax, %es:(%di) # encoding: [0xab]
+
+stos %eax, (%edi)
+// 64: stosl %eax, %es:(%edi) # encoding: [0x67,0xab]
+// 32: stosl %eax, %es:(%edi) # encoding: [0xab]
+// 16: stosl %eax, %es:(%edi) # encoding: [0x66,0x67,0xab]
+
+stosb %al, %fs:(%edi)
+// ERR64: invalid operand for instruction
+// ERR32: invalid operand for instruction
+// ERR16: invalid operand for instruction
+
+stosb %al, %es:(%edi)
+// 64: stosb %al, %es:(%edi) # encoding: [0x67,0xaa]
+// 32: stosb %al, %es:(%edi) # encoding: [0xaa]
+// 16: stosb %al, %es:(%edi) # encoding: [0x67,0xaa]
+
+stosq
+// 64: stosq %rax, %es:(%rdi) # encoding: [0x48,0xab]
+// ERR32: 64-bit
+// ERR16: 64-bit
+
+stos %rax, (%edi)
+// 64: stosq %rax, %es:(%edi) # encoding: [0x48,0x67,0xab]
+// ERR32: only available in 64-bit mode
+// ERR16: only available in 64-bit mode
+
+scas %es:(%edi), %al
+// 64: scasb %es:(%edi), %al # encoding: [0x67,0xae]
+// 32: scasb %es:(%edi), %al # encoding: [0xae]
+// 16: scasb %es:(%edi), %al # encoding: [0x67,0xae]
+
+scasq %es:(%edi)
+// 64: scasq %es:(%edi), %rax # encoding: [0x48,0x67,0xaf]
+// ERR32: 64-bit
+// ERR16: 64-bit
+
+scasl %es:(%edi), %al
+// ERR64: invalid operand
+// ERR32: invalid operand
+// ERR16: invalid operand
+
+scas %es:(%di), %ax
+// ERR64: invalid 16-bit base register
+// 16: scasw %es:(%di), %ax # encoding: [0xaf]
+// 32: scasw %es:(%di), %ax # encoding: [0x66,0x67,0xaf]
+
+cmpsb
+// 64: cmpsb %es:(%rdi), (%rsi) # encoding: [0xa6]
+// 32: cmpsb %es:(%edi), (%esi) # encoding: [0xa6]
+// 16: cmpsb %es:(%di), (%si) # encoding: [0xa6]
+
+cmpsw (%edi), (%esi)
+// 64: cmpsw %es:(%edi), (%esi) # encoding: [0x66,0x67,0xa7]
+// 32: cmpsw %es:(%edi), (%esi) # encoding: [0x66,0xa7]
+// 16: cmpsw %es:(%edi), (%esi) # encoding: [0x67,0xa7]
+
+cmpsb (%di), (%esi)
+// ERR64: invalid 16-bit base register
+// ERR32: mismatching source and destination
+// ERR16: mismatching source and destination
+
+cmpsl %es:(%edi), %ss:(%esi)
+// 64: cmpsl %es:(%edi), %ss:(%esi) # encoding: [0x36,0x67,0xa7]
+// 32: cmpsl %es:(%edi), %ss:(%esi) # encoding: [0x36,0xa7]
+// 16: cmpsl %es:(%edi), %ss:(%esi) # encoding: [0x66,0x36,0x67,0xa7]
+
+cmpsq (%rdi), (%rsi)
+// 64: cmpsq %es:(%rdi), (%rsi) # encoding: [0x48,0xa7]
+// ERR32: 64-bit
+// ERR16: 64-bit
+
+movsb (%esi), (%edi)
+// 64: movsb (%esi), %es:(%edi) # encoding: [0x67,0xa4]
+// 32: movsb (%esi), %es:(%edi) # encoding: [0xa4]
+// 16: movsb (%esi), %es:(%edi) # encoding: [0x67,0xa4]
+
+movsl %gs:(%esi), (%edi)
+// 64: movsl %gs:(%esi), %es:(%edi) # encoding: [0x65,0x67,0xa5]
+// 32: movsl %gs:(%esi), %es:(%edi) # encoding: [0x65,0xa5]
+// 16: movsl %gs:(%esi), %es:(%edi) # encoding: [0x66,0x65,0x67,0xa5]
+
+outsb
+// 64: outsb (%rsi), %dx # encoding: [0x6e]
+// 32: outsb (%esi), %dx # encoding: [0x6e]
+// 16: outsb (%si), %dx # encoding: [0x6e]
+
+outsw %fs:(%esi), %dx
+// 64: outsw %fs:(%esi), %dx # encoding: [0x66,0x64,0x67,0x6f]
+// 32: outsw %fs:(%esi), %dx # encoding: [0x66,0x64,0x6f]
+// 16: outsw %fs:(%esi), %dx # encoding: [0x64,0x67,0x6f]
+
+insw %dx, (%edi)
+// 64: insw %dx, %es:(%edi) # encoding: [0x66,0x67,0x6d]
+// 32: insw %dx, %es:(%edi) # encoding: [0x66,0x6d]
+// 16: insw %dx, %es:(%edi) # encoding: [0x67,0x6d]
diff --git a/test/MC/X86/intel-syntax-avx512.s b/test/MC/X86/intel-syntax-avx512.s
new file mode 100644
index 000000000000..b382994a097a
--- /dev/null
+++ b/test/MC/X86/intel-syntax-avx512.s
@@ -0,0 +1,5 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown -x86-asm-syntax=intel -mcpu=knl --show-encoding %s | FileCheck %s
+
+// CHECK: vaddps (%rax), %zmm1, %zmm1
+// CHECK: encoding: [0x62,0xf1,0x74,0x48,0x58,0x08]
+vaddps zmm1, zmm1, zmmword ptr [rax]
diff --git a/test/MC/X86/intel-syntax-bitwise-ops.s b/test/MC/X86/intel-syntax-bitwise-ops.s
new file mode 100644
index 000000000000..c9c9b1d17b2b
--- /dev/null
+++ b/test/MC/X86/intel-syntax-bitwise-ops.s
@@ -0,0 +1,22 @@
+// RUN: llvm-mc -triple x86_64-unknown-unknown -x86-asm-syntax=att %s | FileCheck %s
+
+.intel_syntax
+
+// CHECK: andl $3, %ecx
+ and ecx, 1+2
+// CHECK: andl $3, %ecx
+ and ecx, 1|2
+// CHECK: andl $3, %ecx
+ and ecx, 1*3
+// CHECK: andl $1, %ecx
+ and ecx, 1&3
+// CHECK: andl $0, %ecx
+ and ecx, (1&2)
+// CHECK: andl $3, %ecx
+ and ecx, ((1)|2)
+// CHECK: andl $1, %ecx
+ and ecx, 1&2+3
+// CHECK: addl $4938, %eax
+ add eax, 9876 >> 1
+// CHECK: addl $19752, %eax
+ add eax, 9876 << 1
diff --git a/test/MC/X86/intel-syntax-directional-label.s b/test/MC/X86/intel-syntax-directional-label.s
new file mode 100644
index 000000000000..c1aa90f1923b
--- /dev/null
+++ b/test/MC/X86/intel-syntax-directional-label.s
@@ -0,0 +1,17 @@
+// RUN: llvm-mc -triple x86_64-apple-darwin -x86-asm-syntax=intel %s | FileCheck %s
+// rdar://14961158
+ .text
+ .align 16
+ .globl FUNCTION_NAME
+ .private_extern FUNCTION_NAME
+FUNCTION_NAME:
+ .intel_syntax
+ cmp rdi, 1
+ jge 1f
+// CHECK: jge Ltmp0
+ add rdi, 2
+// CHECK: addq $2, %rdi
+1:
+// CHECK: Ltmp0:
+ add rdi, 1
+ ret
diff --git a/test/MC/X86/intel-syntax-invalid-basereg.s b/test/MC/X86/intel-syntax-invalid-basereg.s
new file mode 100644
index 000000000000..fe026e184086
--- /dev/null
+++ b/test/MC/X86/intel-syntax-invalid-basereg.s
@@ -0,0 +1,7 @@
+// RUN: not llvm-mc -triple x86_64-unknown-unknown %s 2> %t.err
+// RUN: FileCheck < %t.err %s
+
+.intel_syntax
+
+// CHECK: error: base register is 64-bit, but index register is not
+ lea rax, [rdi + edx]
diff --git a/test/MC/X86/intel-syntax-invalid-scale.s b/test/MC/X86/intel-syntax-invalid-scale.s
new file mode 100644
index 000000000000..69c6add06d5b
--- /dev/null
+++ b/test/MC/X86/intel-syntax-invalid-scale.s
@@ -0,0 +1,11 @@
+// RUN: not llvm-mc -triple x86_64-unknown-unknown %s 2> %t.err
+// RUN: FileCheck < %t.err %s
+
+.intel_syntax
+
+// CHECK: error: scale factor in address must be 1, 2, 4 or 8
+ lea rax, [rdi + rdx*64]
+// CHECK: error: scale factor in address must be 1, 2, 4 or 8
+ lea rax, [rdi + rdx*32]
+// CHECK: error: scale factor in address must be 1, 2, 4 or 8
+ lea rax, [rdi + rdx*16]
diff --git a/test/MC/X86/intel-syntax.s b/test/MC/X86/intel-syntax.s
index 50f29e0a64c9..796891880b12 100644
--- a/test/MC/X86/intel-syntax.s
+++ b/test/MC/X86/intel-syntax.s
@@ -585,6 +585,12 @@ fsubr ST(1)
fdiv ST(1)
fdivr ST(1)
+
+// CHECK: fxsaveq (%rax)
+// CHECK: fxrstorq (%rax)
+fxsave64 opaque ptr [rax]
+fxrstor64 opaque ptr [rax]
+
.bss
.globl _g0
.text
@@ -593,3 +599,11 @@ fdivr ST(1)
// CHECK: movq _g0+8, %rcx
mov rbx, qword ptr [_g0]
mov rcx, qword ptr [_g0 + 8]
+
+"?half@?0??bar@@YAXXZ@4NA":
+ .quad 4602678819172646912
+
+fadd "?half@?0??bar@@YAXXZ@4NA"
+fadd "?half@?0??bar@@YAXXZ@4NA"@IMGREL
+// CHECK: fadds "?half@?0??bar@@YAXXZ@4NA"
+// CHECK: fadds "?half@?0??bar@@YAXXZ@4NA"@IMGREL32
diff --git a/test/MC/X86/lit.local.cfg b/test/MC/X86/lit.local.cfg
index 19840aa7574c..c8625f4d9d24 100644
--- a/test/MC/X86/lit.local.cfg
+++ b/test/MC/X86/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/MC/X86/no-elf-compact-unwind.s b/test/MC/X86/no-elf-compact-unwind.s
new file mode 100644
index 000000000000..4e9236b87971
--- /dev/null
+++ b/test/MC/X86/no-elf-compact-unwind.s
@@ -0,0 +1,16 @@
+// RUN: llvm-mc < %s -filetype=obj -triple x86_64-apple-macosx10.8.0 | llvm-readobj -s | FileCheck -check-prefix=MACHO %s
+// RUN: llvm-mc < %s -filetype=obj -triple x86_64-apple-ios7.0.0 | llvm-readobj -s | FileCheck -check-prefix=MACHO %s
+// RUN: llvm-mc < %s -filetype=obj -triple x86_64-unknown-linux | llvm-readobj -s | FileCheck -check-prefix=ELF %s
+
+ .globl __Z3barv
+ .align 4, 0x90
+__Z3barv:
+ .cfi_startproc
+ pushq %rax
+ .cfi_def_cfa_offset 16
+ popq %rax
+ retq
+ .cfi_endproc
+
+// MACHO: Name: __compact_unwind
+// ELF-NOT: __compact_unwind
diff --git a/test/MC/X86/padlock.s b/test/MC/X86/padlock.s
index 5c523e7f68c0..698581759f51 100644
--- a/test/MC/X86/padlock.s
+++ b/test/MC/X86/padlock.s
@@ -3,55 +3,3 @@
xstore
// CHECK: xstore
// CHECK: encoding: [0x0f,0xa7,0xc0]
-
- xstorerng
-// CHECK: xstore
-// CHECK: encoding: [0x0f,0xa7,0xc0]
-
- rep xcryptecb
-// CHECK: rep
-// CHECK: encoding: [0xf3]
-// CHECK: xcryptecb
-// CHECK: encoding: [0x0f,0xa7,0xc8]
-
- rep xcryptcbc
-// CHECK: rep
-// CHECK: encoding: [0xf3]
-// CHECK: xcryptcbc
-// CHECK: encoding: [0x0f,0xa7,0xd0]
-
- rep xcryptctr
-// CHECK: rep
-// CHECK: encoding: [0xf3]
-// CHECK: xcryptctr
-// CHECK: encoding: [0x0f,0xa7,0xd8]
-
- rep xcryptcfb
-// CHECK: rep
-// CHECK: encoding: [0xf3]
-// CHECK: xcryptcfb
-// CHECK: encoding: [0x0f,0xa7,0xe0]
-
- rep xcryptofb
-// CHECK: rep
-// CHECK: encoding: [0xf3]
-// CHECK: xcryptofb
-// CHECK: encoding: [0x0f,0xa7,0xe8]
-
- rep xsha1
-// CHECK: rep
-// CHECK: encoding: [0xf3]
-// CHECK: xsha1
-// CHECK: encoding: [0x0f,0xa6,0xc8]
-
- rep xsha256
-// CHECK: rep
-// CHECK: encoding: [0xf3]
-// CHECK: xsha256
-// CHECK: encoding: [0x0f,0xa6,0xd0]
-
- rep montmul
-// CHECK: rep
-// CHECK: encoding: [0xf3]
-// CHECK: montmul
-// CHECK: encoding: [0x0f,0xa6,0xc0]
diff --git a/test/MC/X86/relax-insn.s b/test/MC/X86/relax-insn.s
new file mode 100644
index 000000000000..510766c82840
--- /dev/null
+++ b/test/MC/X86/relax-insn.s
@@ -0,0 +1,5 @@
+// RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o %t
+// RUN: llvm-objdump -d %t | FileCheck %s
+
+.global foo
+pushw $foo // CHECK: pushw
diff --git a/test/MC/X86/reloc-undef-global.s b/test/MC/X86/reloc-undef-global.s
new file mode 100644
index 000000000000..a4854d4a3593
--- /dev/null
+++ b/test/MC/X86/reloc-undef-global.s
@@ -0,0 +1,20 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux %s -o - | llvm-readobj -r | FileCheck --check-prefix=ELF %s
+// RUN: llvm-mc -filetype=obj -triple x86_64-apple-darwin %s -o - | llvm-readobj -r | FileCheck --check-prefix=MACHO %s
+
+
+bar = foo + 4
+ .globl bar
+ .long bar
+
+// ELF: Relocations [
+// ELF-NEXT: Section (2) .rela.text {
+// ELF-NEXT: 0x0 R_X86_64_32 foo 0x4
+// ELF-NEXT: }
+// ELF-NEXT: ]
+
+
+// MACHO: Relocations [
+// MACHO: Section __text {
+// MACHO: 0x0 0 2 1 X86_64_RELOC_UNSIGNED 0 bar
+// MACHO: }
+// MACHO: ]
diff --git a/test/MC/X86/ret.s b/test/MC/X86/ret.s
new file mode 100644
index 000000000000..bac669b2561b
--- /dev/null
+++ b/test/MC/X86/ret.s
@@ -0,0 +1,114 @@
+// RUN: not llvm-mc -triple x86_64-unknown-unknown --show-encoding %s 2> %t.err | FileCheck --check-prefix=64 %s
+// RUN: FileCheck --check-prefix=ERR64 < %t.err %s
+// RUN: not llvm-mc -triple i386-unknown-unknown --show-encoding %s 2> %t.err | FileCheck --check-prefix=32 %s
+// RUN: FileCheck --check-prefix=ERR32 < %t.err %s
+// RUN: not llvm-mc -triple i386-unknown-unknown-code16 --show-encoding %s 2> %t.err | FileCheck --check-prefix=16 %s
+// RUN: FileCheck --check-prefix=ERR16 < %t.err %s
+
+ ret
+// 64: retq
+// 64: encoding: [0xc3]
+// 32: retl
+// 32: encoding: [0xc3]
+// 16: retw
+// 16: encoding: [0xc3]
+ retw
+// 64: retw
+// 64: encoding: [0x66,0xc3]
+// 32: retw
+// 32: encoding: [0x66,0xc3]
+// 16: retw
+// 16: encoding: [0xc3]
+ retl
+// ERR64: error: instruction requires: Not 64-bit mode
+// 32: retl
+// 32: encoding: [0xc3]
+// 16: retl
+// 16: encoding: [0x66,0xc3]
+ retq
+// 64: retq
+// 64: encoding: [0xc3]
+// ERR32: error: instruction requires: 64-bit mode
+// ERR16: error: instruction requires: 64-bit mode
+
+ ret $0
+// 64: retq $0
+// 64: encoding: [0xc2,0x00,0x00]
+// 32: retl $0
+// 32: encoding: [0xc2,0x00,0x00]
+// 16: retw $0
+// 16: encoding: [0xc2,0x00,0x00]
+ retw $0
+// 64: retw $0
+// 64: encoding: [0x66,0xc2,0x00,0x00]
+// 32: retw $0
+// 32: encoding: [0x66,0xc2,0x00,0x00]
+// 16: retw $0
+// 16: encoding: [0xc2,0x00,0x00]
+ retl $0
+// ERR64: error: instruction requires: Not 64-bit mode
+// 32: retl $0
+// 32: encoding: [0xc2,0x00,0x00]
+// 16: retl $0
+// 16: encoding: [0x66,0xc2,0x00,0x00]
+ retq $0
+// 64: retq $0
+// 64: encoding: [0xc2,0x00,0x00]
+// ERR32: error: instruction requires: 64-bit mode
+// ERR16: error: instruction requires: 64-bit mode
+
+ lret
+// 64: lretl
+// 64: encoding: [0xcb]
+// 32: lretl
+// 32: encoding: [0xcb]
+// 16: lretw
+// 16: encoding: [0xcb]
+ lretw
+// 64: lretw
+// 64: encoding: [0x66,0xcb]
+// 32: lretw
+// 32: encoding: [0x66,0xcb]
+// 16: lretw
+// 16: encoding: [0xcb]
+ lretl
+// 64: lretl
+// 64: encoding: [0xcb]
+// 32: lretl
+// 32: encoding: [0xcb]
+// 16: lretl
+// 16: encoding: [0x66,0xcb]
+ lretq
+// 64: lretq
+// 64: encoding: [0x48,0xcb]
+// ERR32: error: instruction requires: 64-bit mode
+// ERR16: error: instruction requires: 64-bit mode
+
+ lret $0
+// 64: lretl $0
+// 64: encoding: [0xca,0x00,0x00]
+// 32: lretl $0
+// 32: encoding: [0xca,0x00,0x00]
+// 16: lretw $0
+// 16: encoding: [0xca,0x00,0x00]
+ lretw $0
+// 64: lretw $0
+// 64: encoding: [0x66,0xca,0x00,0x00]
+// 32: lretw $0
+// 32: encoding: [0x66,0xca,0x00,0x00]
+// 16: lretw $0
+// 16: encoding: [0xca,0x00,0x00]
+ lretl $0
+// 64: lretl $0
+// 64: encoding: [0xca,0x00,0x00]
+// 32: lretl $0
+// 32: encoding: [0xca,0x00,0x00]
+// 16: lretl $0
+// 16: encoding: [0x66,0xca,0x00,0x00]
+ lretq $0
+// 64: lretq $0
+// 64: encoding: [0x48,0xca,0x00,0x00]
+// ERR32: error: instruction requires: 64-bit mode
+// ERR16: error: instruction requires: 64-bit mode
+
+
diff --git a/test/MC/X86/stackmap-nops.ll b/test/MC/X86/stackmap-nops.ll
new file mode 100644
index 000000000000..98d17ea6a62b
--- /dev/null
+++ b/test/MC/X86/stackmap-nops.ll
@@ -0,0 +1,47 @@
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim -filetype=obj %s -o - | llvm-objdump -d - | FileCheck %s
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim -filetype=asm %s -o - | llvm-mc -triple=x86_64-apple-darwin -mcpu=corei7 -filetype=obj - | llvm-objdump -d - | FileCheck %s
+
+define void @nop_test() {
+entry:
+; CHECK: 0: 55
+; CHECK: 1: 48 89 e5
+
+; CHECK: 4: 90
+; CHECK: 5: 66 90
+; CHECK: 7: 0f 1f 00
+; CHECK: a: 0f 1f 40 08
+; CHECK: e: 0f 1f 44 00 08
+; CHECK: 13: 66 0f 1f 44 00 08
+; CHECK: 19: 0f 1f 80 00 02 00 00
+; CHECK: 20: 0f 1f 84 00 00 02 00 00
+; CHECK: 28: 66 0f 1f 84 00 00 02 00 00
+; CHECK: 31: 2e 66 0f 1f 84 00 00 02 00 00
+; CHECK: 3b: 66 2e 66 0f 1f 84 00 00 02 00 00
+; CHECK: 46: 66 66 2e 66 0f 1f 84 00 00 02 00 00
+; CHECK: 52: 66 66 66 2e 66 0f 1f 84 00 00 02 00 00
+; CHECK: 5f: 66 66 66 66 2e 66 0f 1f 84 00 00 02 00 00
+; CHECK: 6d: 66 66 66 66 66 2e 66 0f 1f 84 00 00 02 00 00
+
+; CHECK: 7c: 5d
+; CHECK: 7d: c3
+
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 0, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 1)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 2, i32 2)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 3)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 4)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 5, i32 5)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 6, i32 6)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 7, i32 7)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 8, i32 8)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 9, i32 9)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 10, i32 10)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 11, i32 11)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 12, i32 12)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 13, i32 13)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 14)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 15)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
diff --git a/test/MC/X86/variant-diagnostics.s b/test/MC/X86/variant-diagnostics.s
new file mode 100644
index 000000000000..bf6a7c772b47
--- /dev/null
+++ b/test/MC/X86/variant-diagnostics.s
@@ -0,0 +1,11 @@
+# RUN: not llvm-mc -triple i386-linux-gnu -filetype asm -o /dev/null 2>&1 %s \
+# RUN: | FileCheck %s
+
+ .text
+
+function:
+ call external@invalid
+
+# CHECK: error: invalid variant 'invalid'
+# CHECK: call external@invalid
+# CHECK: ^
diff --git a/test/MC/X86/x86-16.s b/test/MC/X86/x86-16.s
new file mode 100644
index 000000000000..1f87c8159f67
--- /dev/null
+++ b/test/MC/X86/x86-16.s
@@ -0,0 +1,949 @@
+// RUN: llvm-mc -triple i386-unknown-unknown-code16 --show-encoding %s | FileCheck %s
+
+ movl $0x12345678, %ebx
+// CHECK: movl
+// CHECK: encoding: [0x66,0xbb,0x78,0x56,0x34,0x12]
+ pause
+// CHECK: pause
+// CHECK: encoding: [0xf3,0x90]
+ sfence
+// CHECK: sfence
+// CHECK: encoding: [0x0f,0xae,0xf8]
+ lfence
+// CHECK: lfence
+// CHECK: encoding: [0x0f,0xae,0xe8]
+ mfence
+ stgi
+// CHECK: stgi
+// CHECK: encoding: [0x0f,0x01,0xdc]
+ clgi
+// CHECK: clgi
+// CHECK: encoding: [0x0f,0x01,0xdd]
+
+ rdtscp
+// CHECK: rdtscp
+// CHECK: encoding: [0x0f,0x01,0xf9]
+
+
+// CHECK: movl %eax, 16(%ebp) # encoding: [0x67,0x66,0x89,0x45,0x10]
+ movl %eax, 16(%ebp)
+// CHECK: movl %eax, -16(%ebp) # encoding: [0x67,0x66,0x89,0x45,0xf0]
+ movl %eax, -16(%ebp)
+
+// CHECK: testb %bl, %cl # encoding: [0x84,0xcb]
+ testb %bl, %cl
+
+// CHECK: cmpl %eax, %ebx # encoding: [0x66,0x39,0xc3]
+ cmpl %eax, %ebx
+
+// CHECK: addw %ax, %ax # encoding: [0x01,0xc0]
+ addw %ax, %ax
+
+// CHECK: shrl %eax # encoding: [0x66,0xd1,0xe8]
+ shrl $1, %eax
+
+// CHECK: shll %eax # encoding: [0x66,0xd1,0xe0]
+ sall $1, %eax
+// CHECK: shll %eax # encoding: [0x66,0xd1,0xe0]
+ sal $1, %eax
+
+// moffset forms of moves
+
+// CHECK: movb 0, %al # encoding: [0xa0,0x00,0x00]
+movb 0, %al
+
+// CHECK: movw 0, %ax # encoding: [0xa1,0x00,0x00]
+movw 0, %ax
+
+// CHECK: movl 0, %eax # encoding: [0x66,0xa1,0x00,0x00]
+movl 0, %eax
+
+into
+// CHECK: into
+// CHECK: encoding: [0xce]
+int3
+// CHECK: int3
+// CHECK: encoding: [0xcc]
+int $4
+// CHECK: int $4
+// CHECK: encoding: [0xcd,0x04]
+int $255
+// CHECK: int $255
+// CHECK: encoding: [0xcd,0xff]
+
+// CHECK: pushfw # encoding: [0x9c]
+ pushf
+// CHECK: pushfl # encoding: [0x66,0x9c]
+ pushfl
+// CHECK: popfw # encoding: [0x9d]
+ popf
+// CHECK: popfl # encoding: [0x66,0x9d]
+ popfl
+
+retl
+// CHECK: ret
+// CHECK: encoding: [0x66,0xc3]
+
+// CHECK: cmoval %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x47,0xd0]
+ cmoval %eax,%edx
+
+// CHECK: cmovael %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x43,0xd0]
+ cmovael %eax,%edx
+
+// CHECK: cmovbel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x46,0xd0]
+ cmovbel %eax,%edx
+
+// CHECK: cmovbl %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x42,0xd0]
+ cmovbl %eax,%edx
+
+// CHECK: cmovbw %bx, %bx
+cmovnae %bx,%bx
+
+
+// CHECK: cmovbel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x46,0xd0]
+ cmovbel %eax,%edx
+
+// CHECK: cmovbl %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x42,0xd0]
+ cmovcl %eax,%edx
+
+// CHECK: cmovel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x44,0xd0]
+ cmovel %eax,%edx
+
+// CHECK: cmovgl %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4f,0xd0]
+ cmovgl %eax,%edx
+
+// CHECK: cmovgel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4d,0xd0]
+ cmovgel %eax,%edx
+
+// CHECK: cmovll %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4c,0xd0]
+ cmovll %eax,%edx
+
+// CHECK: cmovlel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4e,0xd0]
+ cmovlel %eax,%edx
+
+// CHECK: cmovbel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x46,0xd0]
+ cmovnal %eax,%edx
+
+// CHECK: cmovnel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x45,0xd0]
+ cmovnel %eax,%edx
+
+// CHECK: cmovael %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x43,0xd0]
+ cmovnbl %eax,%edx
+
+// CHECK: cmoval %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x47,0xd0]
+ cmovnbel %eax,%edx
+
+// CHECK: cmovael %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x43,0xd0]
+ cmovncl %eax,%edx
+
+// CHECK: cmovnel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x45,0xd0]
+ cmovnel %eax,%edx
+
+// CHECK: cmovlel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4e,0xd0]
+ cmovngl %eax,%edx
+
+// CHECK: cmovgel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4d,0xd0]
+ cmovnl %eax,%edx
+
+// CHECK: cmovnel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x45,0xd0]
+ cmovnel %eax,%edx
+
+// CHECK: cmovlel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4e,0xd0]
+ cmovngl %eax,%edx
+
+// CHECK: cmovll %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4c,0xd0]
+ cmovngel %eax,%edx
+
+// CHECK: cmovgel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4d,0xd0]
+ cmovnll %eax,%edx
+
+// CHECK: cmovgl %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4f,0xd0]
+ cmovnlel %eax,%edx
+
+// CHECK: cmovnol %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x41,0xd0]
+ cmovnol %eax,%edx
+
+// CHECK: cmovnpl %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4b,0xd0]
+ cmovnpl %eax,%edx
+
+// CHECK: cmovnsl %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x49,0xd0]
+ cmovnsl %eax,%edx
+
+// CHECK: cmovnel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x45,0xd0]
+ cmovnzl %eax,%edx
+
+// CHECK: cmovol %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x40,0xd0]
+ cmovol %eax,%edx
+
+// CHECK: cmovpl %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x4a,0xd0]
+ cmovpl %eax,%edx
+
+// CHECK: cmovsl %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x48,0xd0]
+ cmovsl %eax,%edx
+
+// CHECK: cmovel %eax, %edx
+// CHECK: encoding: [0x66,0x0f,0x44,0xd0]
+ cmovzl %eax,%edx
+
+// CHECK: fmul %st(0)
+// CHECK: encoding: [0xd8,0xc8]
+ fmul %st(0), %st
+
+// CHECK: fadd %st(0)
+// CHECK: encoding: [0xd8,0xc0]
+ fadd %st(0), %st
+
+// CHECK: fsub %st(0)
+// CHECK: encoding: [0xd8,0xe0]
+ fsub %st(0), %st
+
+// CHECK: fsubr %st(0)
+// CHECK: encoding: [0xd8,0xe8]
+ fsubr %st(0), %st
+
+// CHECK: fdivr %st(0)
+// CHECK: encoding: [0xd8,0xf8]
+ fdivr %st(0), %st
+
+// CHECK: fdiv %st(0)
+// CHECK: encoding: [0xd8,0xf0]
+ fdiv %st(0), %st
+
+// CHECK: movl %cs, %eax
+// CHECK: encoding: [0x66,0x8c,0xc8]
+ movl %cs, %eax
+
+// CHECK: movw %cs, %ax
+// CHECK: encoding: [0x8c,0xc8]
+ movw %cs, %ax
+
+// CHECK: movl %cs, (%eax)
+// CHECK: encoding: [0x67,0x66,0x8c,0x08]
+ movl %cs, (%eax)
+
+// CHECK: movw %cs, (%eax)
+// CHECK: encoding: [0x67,0x8c,0x08]
+ movw %cs, (%eax)
+
+// CHECK: movl %eax, %cs
+// CHECK: encoding: [0x66,0x8e,0xc8]
+ movl %eax, %cs
+
+// CHECK: movl (%eax), %cs
+// CHECK: encoding: [0x67,0x66,0x8e,0x08]
+ movl (%eax), %cs
+
+// CHECK: movw (%eax), %cs
+// CHECK: encoding: [0x67,0x8e,0x08]
+ movw (%eax), %cs
+
+// CHECK: movl %cr0, %eax
+// CHECK: encoding: [0x0f,0x20,0xc0]
+ movl %cr0,%eax
+
+// CHECK: movl %cr1, %eax
+// CHECK: encoding: [0x0f,0x20,0xc8]
+ movl %cr1,%eax
+
+// CHECK: movl %cr2, %eax
+// CHECK: encoding: [0x0f,0x20,0xd0]
+ movl %cr2,%eax
+
+// CHECK: movl %cr3, %eax
+// CHECK: encoding: [0x0f,0x20,0xd8]
+ movl %cr3,%eax
+
+// CHECK: movl %cr4, %eax
+// CHECK: encoding: [0x0f,0x20,0xe0]
+ movl %cr4,%eax
+
+// CHECK: movl %dr0, %eax
+// CHECK: encoding: [0x0f,0x21,0xc0]
+ movl %dr0,%eax
+
+// CHECK: movl %dr1, %eax
+// CHECK: encoding: [0x0f,0x21,0xc8]
+ movl %dr1,%eax
+
+// CHECK: movl %dr1, %eax
+// CHECK: encoding: [0x0f,0x21,0xc8]
+ movl %dr1,%eax
+
+// CHECK: movl %dr2, %eax
+// CHECK: encoding: [0x0f,0x21,0xd0]
+ movl %dr2,%eax
+
+// CHECK: movl %dr3, %eax
+// CHECK: encoding: [0x0f,0x21,0xd8]
+ movl %dr3,%eax
+
+// CHECK: movl %dr4, %eax
+// CHECK: encoding: [0x0f,0x21,0xe0]
+ movl %dr4,%eax
+
+// CHECK: movl %dr5, %eax
+// CHECK: encoding: [0x0f,0x21,0xe8]
+ movl %dr5,%eax
+
+// CHECK: movl %dr6, %eax
+// CHECK: encoding: [0x0f,0x21,0xf0]
+ movl %dr6,%eax
+
+// CHECK: movl %dr7, %eax
+// CHECK: encoding: [0x0f,0x21,0xf8]
+ movl %dr7,%eax
+
+// CHECK: wait
+// CHECK: encoding: [0x9b]
+ fwait
+
+// CHECK: [0x66,0x65,0xa1,0x7c,0x00]
+ movl %gs:124, %eax
+
+// CHECK: pusha
+// CHECK: encoding: [0x60]
+ pusha
+
+// CHECK: popa
+// CHECK: encoding: [0x61]
+ popa
+
+// CHECK: pushaw
+// CHECK: encoding: [0x60]
+ pushaw
+
+// CHECK: popaw
+// CHECK: encoding: [0x61]
+ popaw
+
+// CHECK: pushal
+// CHECK: encoding: [0x66,0x60]
+ pushal
+
+// CHECK: popal
+// CHECK: encoding: [0x66,0x61]
+ popal
+
+// CHECK: jmpw *8(%eax)
+// CHECK: encoding: [0x67,0xff,0x60,0x08]
+ jmp *8(%eax)
+
+// CHECK: jmpl *8(%eax)
+// CHECK: encoding: [0x67,0x66,0xff,0x60,0x08]
+ jmpl *8(%eax)
+
+// CHECK: lcalll $2, $4660
+// CHECK: encoding: [0x66,0x9a,0x34,0x12,0x00,0x00,0x02,0x00]
+lcalll $0x2, $0x1234
+
+
+L1:
+ jcxz L1
+// CHECK: jcxz L1
+// CHECK: encoding: [0xe3,A]
+ jecxz L1
+// CHECK: jecxz L1
+// CHECK: encoding: [0x67,0xe3,A]
+
+iret
+// CHECK: iretw
+// CHECK: encoding: [0xcf]
+iretw
+// CHECK: iretw
+// CHECK: encoding: [0xcf]
+iretl
+// CHECK: iretl
+// CHECK: encoding: [0x66,0xcf]
+
+sysret
+// CHECK: sysretl
+// CHECK: encoding: [0x0f,0x07]
+sysretl
+// CHECK: sysretl
+// CHECK: encoding: [0x0f,0x07]
+
+testl %ecx, -24(%ebp)
+// CHECK: testl -24(%ebp), %ecx
+testl -24(%ebp), %ecx
+// CHECK: testl -24(%ebp), %ecx
+
+
+push %cs
+// CHECK: pushw %cs
+// CHECK: encoding: [0x0e]
+push %ds
+// CHECK: pushw %ds
+// CHECK: encoding: [0x1e]
+push %ss
+// CHECK: pushw %ss
+// CHECK: encoding: [0x16]
+push %es
+// CHECK: pushw %es
+// CHECK: encoding: [0x06]
+push %fs
+// CHECK: pushw %fs
+// CHECK: encoding: [0x0f,0xa0]
+push %gs
+// CHECK: pushw %gs
+// CHECK: encoding: [0x0f,0xa8]
+
+pushw %cs
+// CHECK: pushw %cs
+// CHECK: encoding: [0x0e]
+pushw %ds
+// CHECK: pushw %ds
+// CHECK: encoding: [0x1e]
+pushw %ss
+// CHECK: pushw %ss
+// CHECK: encoding: [0x16]
+pushw %es
+// CHECK: pushw %es
+// CHECK: encoding: [0x06]
+pushw %fs
+// CHECK: pushw %fs
+// CHECK: encoding: [0x0f,0xa0]
+pushw %gs
+// CHECK: pushw %gs
+// CHECK: encoding: [0x0f,0xa8]
+
+pushl %cs
+// CHECK: pushl %cs
+// CHECK: encoding: [0x66,0x0e]
+pushl %ds
+// CHECK: pushl %ds
+// CHECK: encoding: [0x66,0x1e]
+pushl %ss
+// CHECK: pushl %ss
+// CHECK: encoding: [0x66,0x16]
+pushl %es
+// CHECK: pushl %es
+// CHECK: encoding: [0x66,0x06]
+pushl %fs
+// CHECK: pushl %fs
+// CHECK: encoding: [0x66,0x0f,0xa0]
+pushl %gs
+// CHECK: pushl %gs
+// CHECK: encoding: [0x66,0x0f,0xa8]
+
+pop %ss
+// CHECK: popw %ss
+// CHECK: encoding: [0x17]
+pop %ds
+// CHECK: popw %ds
+// CHECK: encoding: [0x1f]
+pop %es
+// CHECK: popw %es
+// CHECK: encoding: [0x07]
+
+popl %ss
+// CHECK: popl %ss
+// CHECK: encoding: [0x66,0x17]
+popl %ds
+// CHECK: popl %ds
+// CHECK: encoding: [0x66,0x1f]
+popl %es
+// CHECK: popl %es
+// CHECK: encoding: [0x66,0x07]
+
+pushfd
+// CHECK: pushfl
+popfd
+// CHECK: popfl
+pushfl
+// CHECK: pushfl
+popfl
+// CHECK: popfl
+
+
+ setc %bl
+ setnae %bl
+ setnb %bl
+ setnc %bl
+ setna %bl
+ setnbe %bl
+ setpe %bl
+ setpo %bl
+ setnge %bl
+ setnl %bl
+ setng %bl
+ setnle %bl
+
+ setneb %cl // CHECK: setne %cl
+ setcb %bl // CHECK: setb %bl
+ setnaeb %bl // CHECK: setb %bl
+
+
+// CHECK: lcalll $31438, $31438
+// CHECK: lcalll $31438, $31438
+// CHECK: ljmpl $31438, $31438
+// CHECK: ljmpl $31438, $31438
+
+calll $0x7ace,$0x7ace
+lcalll $0x7ace,$0x7ace
+jmpl $0x7ace,$0x7ace
+ljmpl $0x7ace,$0x7ace
+
+// CHECK: lcallw $31438, $31438
+// CHECK: lcallw $31438, $31438
+// CHECK: ljmpw $31438, $31438
+// CHECK: ljmpw $31438, $31438
+
+callw $0x7ace,$0x7ace
+lcallw $0x7ace,$0x7ace
+jmpw $0x7ace,$0x7ace
+ljmpw $0x7ace,$0x7ace
+
+// CHECK: lcallw $31438, $31438
+// CHECK: lcallw $31438, $31438
+// CHECK: ljmpw $31438, $31438
+// CHECK: ljmpw $31438, $31438
+
+call $0x7ace,$0x7ace
+lcall $0x7ace,$0x7ace
+jmp $0x7ace,$0x7ace
+ljmp $0x7ace,$0x7ace
+
+// CHECK: calll a
+ calll a
+
+// CHECK: incb %al # encoding: [0xfe,0xc0]
+ incb %al
+
+// CHECK: incw %ax # encoding: [0x40]
+ incw %ax
+
+// CHECK: incl %eax # encoding: [0x66,0x40]
+ incl %eax
+
+// CHECK: decb %al # encoding: [0xfe,0xc8]
+ decb %al
+
+// CHECK: decw %ax # encoding: [0x48]
+ decw %ax
+
+// CHECK: decl %eax # encoding: [0x66,0x48]
+ decl %eax
+
+// CHECK: pshufw $14, %mm4, %mm0 # encoding: [0x0f,0x70,0xc4,0x0e]
+pshufw $14, %mm4, %mm0
+
+// CHECK: pshufw $90, %mm4, %mm0 # encoding: [0x0f,0x70,0xc4,0x5a]
+pshufw $90, %mm4, %mm0
+
+// CHECK: aaa
+// CHECK: encoding: [0x37]
+ aaa
+
+// CHECK: aad $1
+// CHECK: encoding: [0xd5,0x01]
+ aad $1
+
+// CHECK: aad
+// CHECK: encoding: [0xd5,0x0a]
+ aad $0xA
+
+// CHECK: aad
+// CHECK: encoding: [0xd5,0x0a]
+ aad
+
+// CHECK: aam $2
+// CHECK: encoding: [0xd4,0x02]
+ aam $2
+
+// CHECK: aam
+// CHECK: encoding: [0xd4,0x0a]
+ aam $0xA
+
+// CHECK: aam
+// CHECK: encoding: [0xd4,0x0a]
+ aam
+
+// CHECK: aas
+// CHECK: encoding: [0x3f]
+ aas
+
+// CHECK: daa
+// CHECK: encoding: [0x27]
+ daa
+
+// CHECK: das
+// CHECK: encoding: [0x2f]
+ das
+
+// CHECK: retw $31438
+// CHECK: encoding: [0xc2,0xce,0x7a]
+ retw $0x7ace
+
+// CHECK: lretw $31438
+// CHECK: encoding: [0xca,0xce,0x7a]
+ lretw $0x7ace
+
+// CHECK: retw $31438
+// CHECK: encoding: [0xc2,0xce,0x7a]
+ ret $0x7ace
+
+// CHECK: lretw $31438
+// CHECK: encoding: [0xca,0xce,0x7a]
+ lret $0x7ace
+
+// CHECK: retl $31438
+// CHECK: encoding: [0x66,0xc2,0xce,0x7a]
+ retl $0x7ace
+
+// CHECK: lretl $31438
+// CHECK: encoding: [0x66,0xca,0xce,0x7a]
+ lretl $0x7ace
+
+// CHECK: bound 2(%eax), %bx
+// CHECK: encoding: [0x67,0x62,0x58,0x02]
+ bound 2(%eax),%bx
+
+// CHECK: bound 4(%ebx), %ecx
+// CHECK: encoding: [0x67,0x66,0x62,0x4b,0x04]
+ bound 4(%ebx),%ecx
+
+// CHECK: arpl %bx, %bx
+// CHECK: encoding: [0x63,0xdb]
+ arpl %bx,%bx
+
+// CHECK: arpl %bx, 6(%ecx)
+// CHECK: encoding: [0x67,0x63,0x59,0x06]
+ arpl %bx,6(%ecx)
+
+// CHECK: lgdtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x50,0x04]
+ lgdtw 4(%eax)
+
+// CHECK: lgdtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x50,0x04]
+ lgdt 4(%eax)
+
+// CHECK: lgdtl 4(%eax)
+// CHECK: encoding: [0x67,0x66,0x0f,0x01,0x50,0x04]
+ lgdtl 4(%eax)
+
+// CHECK: lidtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x58,0x04]
+ lidtw 4(%eax)
+
+// CHECK: lidtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x58,0x04]
+ lidt 4(%eax)
+
+// CHECK: lidtl 4(%eax)
+// CHECK: encoding: [0x67,0x66,0x0f,0x01,0x58,0x04]
+ lidtl 4(%eax)
+
+// CHECK: sgdtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x40,0x04]
+ sgdtw 4(%eax)
+
+// CHECK: sgdtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x40,0x04]
+ sgdt 4(%eax)
+
+// CHECK: sgdtl 4(%eax)
+// CHECK: encoding: [0x67,0x66,0x0f,0x01,0x40,0x04]
+ sgdtl 4(%eax)
+
+// CHECK: sidtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x48,0x04]
+ sidtw 4(%eax)
+
+// CHECK: sidtw 4(%eax)
+// CHECK: encoding: [0x67,0x0f,0x01,0x48,0x04]
+ sidt 4(%eax)
+
+// CHECK: sidtl 4(%eax)
+// CHECK: encoding: [0x67,0x66,0x0f,0x01,0x48,0x04]
+ sidtl 4(%eax)
+
+// CHECK: fcompi %st(2)
+// CHECK: encoding: [0xdf,0xf2]
+ fcompi %st(2), %st
+
+// CHECK: fcompi %st(2)
+// CHECK: encoding: [0xdf,0xf2]
+ fcompi %st(2)
+
+// CHECK: fcompi
+// CHECK: encoding: [0xdf,0xf1]
+ fcompi
+
+// CHECK: fucompi %st(2)
+// CHECK: encoding: [0xdf,0xea]
+ fucompi %st(2),%st
+
+// CHECK: fucompi %st(2)
+// CHECK: encoding: [0xdf,0xea]
+ fucompi %st(2)
+
+// CHECK: fucompi
+// CHECK: encoding: [0xdf,0xe9]
+ fucompi
+
+// CHECK: fldcw 32493
+// CHECK: encoding: [0xd9,0x2e,0xed,0x7e]
+ fldcww 0x7eed
+
+// CHECK: fldcw 32493
+// CHECK: encoding: [0xd9,0x2e,0xed,0x7e]
+ fldcw 0x7eed
+
+// CHECK: fnstcw 32493
+// CHECK: encoding: [0xd9,0x3e,0xed,0x7e]
+ fnstcww 0x7eed
+
+// CHECK: fnstcw 32493
+// CHECK: encoding: [0xd9,0x3e,0xed,0x7e]
+ fnstcw 0x7eed
+
+// CHECK: wait
+// CHECK: encoding: [0x9b]
+ fstcww 0x7eed
+
+// CHECK: wait
+// CHECK: encoding: [0x9b]
+ fstcw 0x7eed
+
+// CHECK: fnstsw 32493
+// CHECK: encoding: [0xdd,0x3e,0xed,0x7e]
+ fnstsww 0x7eed
+
+// CHECK: fnstsw 32493
+// CHECK: encoding: [0xdd,0x3e,0xed,0x7e]
+ fnstsw 0x7eed
+
+// CHECK: wait
+// CHECK: encoding: [0x9b]
+ fstsww 0x7eed
+
+// CHECK: wait
+// CHECK: encoding: [0x9b]
+ fstsw 0x7eed
+
+// CHECK: verr 32493
+// CHECK: encoding: [0x0f,0x00,0x26,0xed,0x7e]
+ verrw 0x7eed
+
+// CHECK: verr 32493
+// CHECK: encoding: [0x0f,0x00,0x26,0xed,0x7e]
+ verr 0x7eed
+
+// CHECK: wait
+// CHECK: encoding: [0x9b]
+ fclex
+
+// CHECK: fnclex
+// CHECK: encoding: [0xdb,0xe2]
+ fnclex
+
+// CHECK: ud2
+// CHECK: encoding: [0x0f,0x0b]
+ ud2
+
+// CHECK: ud2
+// CHECK: encoding: [0x0f,0x0b]
+ ud2a
+
+// CHECK: ud2b
+// CHECK: encoding: [0x0f,0xb9]
+ ud2b
+
+// CHECK: loope 0
+// CHECK: encoding: [0xe1,A]
+ loopz 0
+
+// CHECK: loopne 0
+// CHECK: encoding: [0xe0,A]
+ loopnz 0
+
+// CHECK: outsb (%si), %dx # encoding: [0x6e]
+// CHECK: outsb
+// CHECK: outsb
+ outsb
+ outsb %ds:(%si), %dx
+ outsb (%si), %dx
+
+// CHECK: outsw (%si), %dx # encoding: [0x6f]
+// CHECK: outsw
+// CHECK: outsw
+ outsw
+ outsw %ds:(%si), %dx
+ outsw (%si), %dx
+
+// CHECK: outsl (%si), %dx # encoding: [0x66,0x6f]
+// CHECK: outsl
+ outsl
+ outsl %ds:(%si), %dx
+ outsl (%si), %dx
+
+// CHECK: insb %dx, %es:(%di) # encoding: [0x6c]
+// CHECK: insb
+ insb
+ insb %dx, %es:(%di)
+
+// CHECK: insw %dx, %es:(%di) # encoding: [0x6d]
+// CHECK: insw
+ insw
+ insw %dx, %es:(%di)
+
+// CHECK: insl %dx, %es:(%di) # encoding: [0x66,0x6d]
+// CHECK: insl
+ insl
+ insl %dx, %es:(%di)
+
+// CHECK: movsb (%si), %es:(%di) # encoding: [0xa4]
+// CHECK: movsb
+// CHECK: movsb
+ movsb
+ movsb %ds:(%si), %es:(%di)
+ movsb (%si), %es:(%di)
+
+// CHECK: movsw (%si), %es:(%di) # encoding: [0xa5]
+// CHECK: movsw
+// CHECK: movsw
+ movsw
+ movsw %ds:(%si), %es:(%di)
+ movsw (%si), %es:(%di)
+
+// CHECK: movsl (%si), %es:(%di) # encoding: [0x66,0xa5]
+// CHECK: movsl
+// CHECK: movsl
+ movsl
+ movsl %ds:(%si), %es:(%di)
+ movsl (%si), %es:(%di)
+
+// CHECK: lodsb (%si), %al # encoding: [0xac]
+// CHECK: lodsb
+// CHECK: lodsb
+// CHECK: lodsb
+// CHECK: lodsb
+ lodsb
+ lodsb %ds:(%si), %al
+ lodsb (%si), %al
+ lods %ds:(%si), %al
+ lods (%si), %al
+
+// CHECK: lodsw (%si), %ax # encoding: [0xad]
+// CHECK: lodsw
+// CHECK: lodsw
+// CHECK: lodsw
+// CHECK: lodsw
+ lodsw
+ lodsw %ds:(%si), %ax
+ lodsw (%si), %ax
+ lods %ds:(%si), %ax
+ lods (%si), %ax
+
+// CHECK: lodsl (%si), %eax # encoding: [0x66,0xad]
+// CHECK: lodsl
+// CHECK: lodsl
+// CHECK: lodsl
+// CHECK: lodsl
+ lodsl
+ lodsl %ds:(%si), %eax
+ lodsl (%si), %eax
+ lods %ds:(%si), %eax
+ lods (%si), %eax
+
+// CHECK: stosb %al, %es:(%di) # encoding: [0xaa]
+// CHECK: stosb
+// CHECK: stosb
+ stosb
+ stosb %al, %es:(%di)
+ stos %al, %es:(%di)
+
+// CHECK: stosw %ax, %es:(%di) # encoding: [0xab]
+// CHECK: stosw
+// CHECK: stosw
+ stosw
+ stosw %ax, %es:(%di)
+ stos %ax, %es:(%di)
+
+// CHECK: stosl %eax, %es:(%di) # encoding: [0x66,0xab]
+// CHECK: stosl
+// CHECK: stosl
+ stosl
+ stosl %eax, %es:(%di)
+ stos %eax, %es:(%di)
+
+// CHECK: strw
+// CHECK: encoding: [0x0f,0x00,0xc8]
+ str %ax
+
+// CHECK: strl
+// CHECK: encoding: [0x66,0x0f,0x00,0xc8]
+ str %eax
+
+
+// CHECK: fsubp
+// CHECK: encoding: [0xde,0xe1]
+fsubp %st,%st(1)
+
+// CHECK: fsubp %st(2)
+// CHECK: encoding: [0xde,0xe2]
+fsubp %st, %st(2)
+
+// CHECK: xchgl %eax, %eax
+// CHECK: encoding: [0x66,0x90]
+xchgl %eax, %eax
+
+// CHECK: xchgw %ax, %ax
+// CHECK: encoding: [0x90]
+xchgw %ax, %ax
+
+// CHECK: xchgl %ecx, %eax
+// CHECK: encoding: [0x66,0x91]
+xchgl %ecx, %eax
+
+// CHECK: xchgl %ecx, %eax
+// CHECK: encoding: [0x66,0x91]
+xchgl %eax, %ecx
+
+// CHECK: retw
+// CHECK: encoding: [0xc3]
+retw
+
+// CHECK: retl
+// CHECK: encoding: [0x66,0xc3]
+retl
+
+// CHECK: lretw
+// CHECK: encoding: [0xcb]
+lretw
+
+// CHECK: lretl
+// CHECK: encoding: [0x66,0xcb]
+lretl
diff --git a/test/MC/X86/x86-32.s b/test/MC/X86/x86-32.s
index 99136bd19cdf..bebaa65227f1 100644
--- a/test/MC/X86/x86-32.s
+++ b/test/MC/X86/x86-32.s
@@ -438,18 +438,28 @@ cmovnae %bx,%bx
fwait
// rdar://7873482
-// CHECK: [0x65,0x8b,0x05,0x7c,0x00,0x00,0x00]
-// FIXME: This is a correct bug poor encoding: Use 65 a1 7c 00 00 00
+// CHECK: [0x65,0xa1,0x7c,0x00,0x00,0x00]
movl %gs:124, %eax
-// CHECK: pusha
+// CHECK: [0x65,0xa3,0x7c,0x00,0x00,0x00]
+ movl %eax, %gs:124
+
+// CHECK: pushal
// CHECK: encoding: [0x60]
pusha
-// CHECK: popa
+// CHECK: popal
// CHECK: encoding: [0x61]
popa
+// CHECK: pushaw
+// CHECK: encoding: [0x66,0x60]
+ pushaw
+
+// CHECK: popaw
+// CHECK: encoding: [0x66,0x61]
+ popaw
+
// CHECK: pushal
// CHECK: encoding: [0x60]
pushal
@@ -593,6 +603,16 @@ lcalll $0x7ace,$0x7ace
jmpl $0x7ace,$0x7ace
ljmpl $0x7ace,$0x7ace
+// CHECK: lcallw $31438, $31438
+// CHECK: lcallw $31438, $31438
+// CHECK: ljmpw $31438, $31438
+// CHECK: ljmpw $31438, $31438
+
+callw $0x7ace,$0x7ace
+lcallw $0x7ace,$0x7ace
+jmpw $0x7ace,$0x7ace
+ljmpw $0x7ace,$0x7ace
+
// CHECK: lcalll $31438, $31438
// CHECK: lcalll $31438, $31438
// CHECK: ljmpl $31438, $31438
@@ -701,11 +721,11 @@ pshufw $90, %mm4, %mm0
// CHECK: encoding: [0x66,0x0f,0x01,0x50,0x04]
lgdtw 4(%eax)
-// CHECK: lgdt 4(%eax)
+// CHECK: lgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
lgdt 4(%eax)
-// CHECK: lgdt 4(%eax)
+// CHECK: lgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
lgdtl 4(%eax)
@@ -713,11 +733,11 @@ pshufw $90, %mm4, %mm0
// CHECK: encoding: [0x66,0x0f,0x01,0x58,0x04]
lidtw 4(%eax)
-// CHECK: lidt 4(%eax)
+// CHECK: lidtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x58,0x04]
lidt 4(%eax)
-// CHECK: lidt 4(%eax)
+// CHECK: lidtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x58,0x04]
lidtl 4(%eax)
@@ -725,11 +745,11 @@ pshufw $90, %mm4, %mm0
// CHECK: encoding: [0x66,0x0f,0x01,0x40,0x04]
sgdtw 4(%eax)
-// CHECK: sgdt 4(%eax)
+// CHECK: sgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x40,0x04]
sgdt 4(%eax)
-// CHECK: sgdt 4(%eax)
+// CHECK: sgdtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x40,0x04]
sgdtl 4(%eax)
@@ -737,11 +757,11 @@ pshufw $90, %mm4, %mm0
// CHECK: encoding: [0x66,0x0f,0x01,0x48,0x04]
sidtw 4(%eax)
-// CHECK: sidt 4(%eax)
+// CHECK: sidtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x48,0x04]
sidt 4(%eax)
-// CHECK: sidt 4(%eax)
+// CHECK: sidtl 4(%eax)
// CHECK: encoding: [0x0f,0x01,0x48,0x04]
sidtl 4(%eax)
@@ -845,63 +865,63 @@ pshufw $90, %mm4, %mm0
// CHECK: encoding: [0xe0,A]
loopnz 0
-// CHECK: outsb # encoding: [0x6e]
+// CHECK: outsb (%esi), %dx # encoding: [0x6e]
// CHECK: outsb
// CHECK: outsb
outsb
outsb %ds:(%esi), %dx
outsb (%esi), %dx
-// CHECK: outsw # encoding: [0x66,0x6f]
+// CHECK: outsw (%esi), %dx # encoding: [0x66,0x6f]
// CHECK: outsw
// CHECK: outsw
outsw
outsw %ds:(%esi), %dx
outsw (%esi), %dx
-// CHECK: outsl # encoding: [0x6f]
+// CHECK: outsl (%esi), %dx # encoding: [0x6f]
// CHECK: outsl
outsl
outsl %ds:(%esi), %dx
outsl (%esi), %dx
-// CHECK: insb # encoding: [0x6c]
+// CHECK: insb %dx, %es:(%edi) # encoding: [0x6c]
// CHECK: insb
insb
insb %dx, %es:(%edi)
-// CHECK: insw # encoding: [0x66,0x6d]
+// CHECK: insw %dx, %es:(%edi) # encoding: [0x66,0x6d]
// CHECK: insw
insw
insw %dx, %es:(%edi)
-// CHECK: insl # encoding: [0x6d]
+// CHECK: insl %dx, %es:(%edi) # encoding: [0x6d]
// CHECK: insl
insl
insl %dx, %es:(%edi)
-// CHECK: movsb # encoding: [0xa4]
+// CHECK: movsb (%esi), %es:(%edi) # encoding: [0xa4]
// CHECK: movsb
// CHECK: movsb
movsb
movsb %ds:(%esi), %es:(%edi)
movsb (%esi), %es:(%edi)
-// CHECK: movsw # encoding: [0x66,0xa5]
+// CHECK: movsw (%esi), %es:(%edi) # encoding: [0x66,0xa5]
// CHECK: movsw
// CHECK: movsw
movsw
movsw %ds:(%esi), %es:(%edi)
movsw (%esi), %es:(%edi)
-// CHECK: movsl # encoding: [0xa5]
+// CHECK: movsl (%esi), %es:(%edi) # encoding: [0xa5]
// CHECK: movsl
// CHECK: movsl
movsl
movsl %ds:(%esi), %es:(%edi)
movsl (%esi), %es:(%edi)
-// CHECK: lodsb # encoding: [0xac]
+// CHECK: lodsb (%esi), %al # encoding: [0xac]
// CHECK: lodsb
// CHECK: lodsb
// CHECK: lodsb
@@ -912,7 +932,7 @@ pshufw $90, %mm4, %mm0
lods %ds:(%esi), %al
lods (%esi), %al
-// CHECK: lodsw # encoding: [0x66,0xad]
+// CHECK: lodsw (%esi), %ax # encoding: [0x66,0xad]
// CHECK: lodsw
// CHECK: lodsw
// CHECK: lodsw
@@ -923,7 +943,7 @@ pshufw $90, %mm4, %mm0
lods %ds:(%esi), %ax
lods (%esi), %ax
-// CHECK: lodsl # encoding: [0xad]
+// CHECK: lodsl (%esi), %eax # encoding: [0xad]
// CHECK: lodsl
// CHECK: lodsl
// CHECK: lodsl
@@ -934,21 +954,21 @@ pshufw $90, %mm4, %mm0
lods %ds:(%esi), %eax
lods (%esi), %eax
-// CHECK: stosb # encoding: [0xaa]
+// CHECK: stosb %al, %es:(%edi) # encoding: [0xaa]
// CHECK: stosb
// CHECK: stosb
stosb
stosb %al, %es:(%edi)
stos %al, %es:(%edi)
-// CHECK: stosw # encoding: [0x66,0xab]
+// CHECK: stosw %ax, %es:(%edi) # encoding: [0x66,0xab]
// CHECK: stosw
// CHECK: stosw
stosw
stosw %ax, %es:(%edi)
stos %ax, %es:(%edi)
-// CHECK: stosl # encoding: [0xab]
+// CHECK: stosl %eax, %es:(%edi) # encoding: [0xab]
// CHECK: stosl
// CHECK: stosl
stosl
diff --git a/test/MC/X86/x86-64.s b/test/MC/X86/x86-64.s
index 6b41f485f165..10d420aa447b 100644
--- a/test/MC/X86/x86-64.s
+++ b/test/MC/X86/x86-64.s
@@ -203,7 +203,7 @@ int $3
// CHECK-STDERR: warning: scale factor without index register is ignored
movaps %xmm3, (%esi, 2)
-// CHECK: imull $12, %eax, %eax
+// CHECK: imull $12, %eax
imul $12, %eax
// CHECK: imull %ecx, %eax
@@ -951,35 +951,35 @@ decw %ax // CHECK: decw %ax # encoding: [0x66,0xff,0xc8]
decl %eax // CHECK: decl %eax # encoding: [0xff,0xc8]
// rdar://8416805
-// CHECK: lgdt 4(%rax)
+// CHECK: lgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
lgdt 4(%rax)
-// CHECK: lgdt 4(%rax)
+// CHECK: lgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x50,0x04]
lgdtq 4(%rax)
-// CHECK: lidt 4(%rax)
+// CHECK: lidtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x58,0x04]
lidt 4(%rax)
-// CHECK: lidt 4(%rax)
+// CHECK: lidtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x58,0x04]
lidtq 4(%rax)
-// CHECK: sgdt 4(%rax)
+// CHECK: sgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x40,0x04]
sgdt 4(%rax)
-// CHECK: sgdt 4(%rax)
+// CHECK: sgdtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x40,0x04]
sgdtq 4(%rax)
-// CHECK: sidt 4(%rax)
+// CHECK: sidtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x48,0x04]
sidt 4(%rax)
-// CHECK: sidt 4(%rax)
+// CHECK: sidtq 4(%rax)
// CHECK: encoding: [0x0f,0x01,0x48,0x04]
sidtq 4(%rax)
@@ -1050,56 +1050,56 @@ xsetbv // CHECK: xsetbv # encoding: [0x0f,0x01,0xd1]
// CHECK: encoding: [0xe0,A]
loopnz 0
-// CHECK: outsb # encoding: [0x6e]
+// CHECK: outsb (%rsi), %dx # encoding: [0x6e]
// CHECK: outsb
// CHECK: outsb
outsb
outsb %ds:(%rsi), %dx
outsb (%rsi), %dx
-// CHECK: outsw # encoding: [0x66,0x6f]
+// CHECK: outsw (%rsi), %dx # encoding: [0x66,0x6f]
// CHECK: outsw
// CHECK: outsw
outsw
outsw %ds:(%rsi), %dx
outsw (%rsi), %dx
-// CHECK: outsl # encoding: [0x6f]
+// CHECK: outsl (%rsi), %dx # encoding: [0x6f]
// CHECK: outsl
outsl
outsl %ds:(%rsi), %dx
outsl (%rsi), %dx
-// CHECK: insb # encoding: [0x6c]
+// CHECK: insb %dx, %es:(%rdi) # encoding: [0x6c]
// CHECK: insb
insb
insb %dx, %es:(%rdi)
-// CHECK: insw # encoding: [0x66,0x6d]
+// CHECK: insw %dx, %es:(%rdi) # encoding: [0x66,0x6d]
// CHECK: insw
insw
insw %dx, %es:(%rdi)
-// CHECK: insl # encoding: [0x6d]
+// CHECK: insl %dx, %es:(%rdi) # encoding: [0x6d]
// CHECK: insl
insl
insl %dx, %es:(%rdi)
-// CHECK: movsb # encoding: [0xa4]
+// CHECK: movsb (%rsi), %es:(%rdi) # encoding: [0xa4]
// CHECK: movsb
// CHECK: movsb
movsb
movsb %ds:(%rsi), %es:(%rdi)
movsb (%rsi), %es:(%rdi)
-// CHECK: movsw # encoding: [0x66,0xa5]
+// CHECK: movsw (%rsi), %es:(%rdi) # encoding: [0x66,0xa5]
// CHECK: movsw
// CHECK: movsw
movsw
movsw %ds:(%rsi), %es:(%rdi)
movsw (%rsi), %es:(%rdi)
-// CHECK: movsl # encoding: [0xa5]
+// CHECK: movsl (%rsi), %es:(%rdi) # encoding: [0xa5]
// CHECK: movsl
// CHECK: movsl
movsl
@@ -1109,14 +1109,14 @@ xsetbv // CHECK: xsetbv # encoding: [0x0f,0x01,0xd1]
// CHECK: movsl
movsl (%rsi), (%rdi)
-// CHECK: movsq # encoding: [0x48,0xa5]
+// CHECK: movsq (%rsi), %es:(%rdi) # encoding: [0x48,0xa5]
// CHECK: movsq
// CHECK: movsq
movsq
movsq %ds:(%rsi), %es:(%rdi)
movsq (%rsi), %es:(%rdi)
-// CHECK: lodsb # encoding: [0xac]
+// CHECK: lodsb (%rsi), %al # encoding: [0xac]
// CHECK: lodsb
// CHECK: lodsb
// CHECK: lodsb
@@ -1127,7 +1127,7 @@ xsetbv // CHECK: xsetbv # encoding: [0x0f,0x01,0xd1]
lods %ds:(%rsi), %al
lods (%rsi), %al
-// CHECK: lodsw # encoding: [0x66,0xad]
+// CHECK: lodsw (%rsi), %ax # encoding: [0x66,0xad]
// CHECK: lodsw
// CHECK: lodsw
// CHECK: lodsw
@@ -1138,7 +1138,7 @@ xsetbv // CHECK: xsetbv # encoding: [0x0f,0x01,0xd1]
lods %ds:(%rsi), %ax
lods (%rsi), %ax
-// CHECK: lodsl # encoding: [0xad]
+// CHECK: lodsl (%rsi), %eax # encoding: [0xad]
// CHECK: lodsl
// CHECK: lodsl
// CHECK: lodsl
@@ -1149,7 +1149,7 @@ xsetbv // CHECK: xsetbv # encoding: [0x0f,0x01,0xd1]
lods %ds:(%rsi), %eax
lods (%rsi), %eax
-// CHECK: lodsq # encoding: [0x48,0xad]
+// CHECK: lodsq (%rsi), %rax # encoding: [0x48,0xad]
// CHECK: lodsq
// CHECK: lodsq
// CHECK: lodsq
@@ -1160,28 +1160,28 @@ xsetbv // CHECK: xsetbv # encoding: [0x0f,0x01,0xd1]
lods %ds:(%rsi), %rax
lods (%rsi), %rax
-// CHECK: stosb # encoding: [0xaa]
+// CHECK: stosb %al, %es:(%rdi) # encoding: [0xaa]
// CHECK: stosb
// CHECK: stosb
stosb
stosb %al, %es:(%rdi)
stos %al, %es:(%rdi)
-// CHECK: stosw # encoding: [0x66,0xab]
+// CHECK: stosw %ax, %es:(%rdi) # encoding: [0x66,0xab]
// CHECK: stosw
// CHECK: stosw
stosw
stosw %ax, %es:(%rdi)
stos %ax, %es:(%rdi)
-// CHECK: stosl # encoding: [0xab]
+// CHECK: stosl %eax, %es:(%rdi) # encoding: [0xab]
// CHECK: stosl
// CHECK: stosl
stosl
stosl %eax, %es:(%rdi)
stos %eax, %es:(%rdi)
-// CHECK: stosq # encoding: [0x48,0xab]
+// CHECK: stosq %rax, %es:(%rdi) # encoding: [0x48,0xab]
// CHECK: stosq
// CHECK: stosq
stosq
@@ -1388,3 +1388,7 @@ movq %xmm0, %rax
vmovd %xmm0, %eax
vmovd %xmm0, %rax
vmovq %xmm0, %rax
+
+// CHECK: seto 3735928559(%r10,%r9,8)
+// CHECK: encoding: [0x43,0x0f,0x90,0x84,0xca,0xef,0xbe,0xad,0xde]
+ seto 0xdeadbeef(%r10,%r9,8)
diff --git a/test/MC/X86/x86-itanium.ll b/test/MC/X86/x86-itanium.ll
new file mode 100644
index 000000000000..1d8308dca75a
--- /dev/null
+++ b/test/MC/X86/x86-itanium.ll
@@ -0,0 +1,6 @@
+; RUN: llc -mtriple i686-windows-itanium -filetype asm -o - %s | FileCheck %s
+
+@var = common global i32 0, align 4
+
+; CHECK-NOT: .type _var,@object
+
diff --git a/test/MC/X86/x86-target-directives.s b/test/MC/X86/x86-target-directives.s
new file mode 100644
index 000000000000..b6c7e78406f5
--- /dev/null
+++ b/test/MC/X86/x86-target-directives.s
@@ -0,0 +1,7 @@
+# RUN: not llvm-mc -triple i386 -filetype asm -o - %s 2>&1 | FileCheck %s
+
+ .code42
+
+# CHECK: unknown directive .code42
+# CHECK-NOT: unknown directive
+
diff --git a/test/MC/X86/x86-windows-itanium-libcalls.ll b/test/MC/X86/x86-windows-itanium-libcalls.ll
new file mode 100644
index 000000000000..773d03b4def6
--- /dev/null
+++ b/test/MC/X86/x86-windows-itanium-libcalls.ll
@@ -0,0 +1,16 @@
+; RUN: opt -mtriple i686-windows-itanium -O2 -o - %s | llvm-dis | FileCheck %s
+
+target triple = "i686-windows-itanium"
+
+declare dllimport double @floor(double)
+
+define dllexport float @test(float %f) {
+ %conv = fpext float %f to double
+ %call = tail call double @floor(double %conv)
+ %cast = fptrunc double %call to float
+ ret float %cast
+}
+
+; CHECK-NOT: floorf
+; CHECK: floor
+
diff --git a/test/MC/X86/x86_64-avx-encoding.s b/test/MC/X86/x86_64-avx-encoding.s
index 5ba8064ff48a..1704b94a7298 100644
--- a/test/MC/X86/x86_64-avx-encoding.s
+++ b/test/MC/X86/x86_64-avx-encoding.s
@@ -1557,7 +1557,7 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
vcvtdq2ps %xmm13, %xmm10
// CHECK: vcvtdq2ps (%ecx), %xmm13
-// CHECK: encoding: [0xc5,0x78,0x5b,0x29]
+// CHECK: encoding: [0x67,0xc5,0x78,0x5b,0x29]
vcvtdq2ps (%ecx), %xmm13
// CHECK: vcvttps2dq %xmm12, %xmm11
diff --git a/test/MC/X86/x86_64-signed-reloc.s b/test/MC/X86/x86_64-signed-reloc.s
new file mode 100644
index 000000000000..e8a2720df1d3
--- /dev/null
+++ b/test/MC/X86/x86_64-signed-reloc.s
@@ -0,0 +1,16 @@
+// RUN: llvm-mc -triple x86_64-linux-gnu -filetype=obj %s | llvm-readobj -r | FileCheck %s
+
+
+ // CHECK: Relocations [
+ // CHECK-NEXT: Section ({{[0-9]+}}) .rela.text {
+
+pushq $foo // CHECK-NEXT: R_X86_64_32S
+addq $foo, %rax // CHECK-NEXT: R_X86_64_32S
+andq $foo, %rax // CHECK-NEXT: R_X86_64_32S
+movq $foo, %rax // CHECK-NEXT: R_X86_64_32S
+bextr $foo, (%edi), %eax // CHECK-NEXT: R_X86_64_32
+bextr $foo, (%rdi), %rax // CHECK-NEXT: R_X86_64_32S
+imul $foo, %rax // CHECK-NEXT: R_X86_64_32S
+
+ // CHECK-NEXT: }
+ // CHECK-NEXT: ]
diff --git a/test/MC/X86/x86_64-tbm-encoding.s b/test/MC/X86/x86_64-tbm-encoding.s
index 180578bfc01c..a9b8f34caa42 100644
--- a/test/MC/X86/x86_64-tbm-encoding.s
+++ b/test/MC/X86/x86_64-tbm-encoding.s
@@ -194,3 +194,9 @@
// CHECK: tzmsk (%rdi), %rax
// CHECK: encoding: [0x8f,0xe9,0xf8,0x01,0x27]
tzmsk (%rdi), %rax
+
+// CHECK: encoding: [0x67,0xc4,0xe2,0x60,0xf7,0x07]
+ bextr %ebx, (%edi), %eax
+
+// CHECK: encoding: [0x67,0x8f,0xea,0x78,0x10,0x07,A,A,A,A]
+ bextr $foo, (%edi), %eax
diff --git a/test/MC/X86/x86_errors.s b/test/MC/X86/x86_errors.s
index a974233d2f49..51f2e8e14685 100644
--- a/test/MC/X86/x86_errors.s
+++ b/test/MC/X86/x86_errors.s
@@ -26,8 +26,23 @@ sysexitq
lea (%rsp, %rbp, $4), %rax
// rdar://10423777
-// 64: error: index register is 32-bit, but base register is 64-bit
+// 64: error: base register is 64-bit, but index register is not
movq (%rsi,%ecx),%xmm0
+// 64: error: invalid 16-bit base register
+movl %eax,(%bp,%si)
+
+// 32: error: scale factor in 16-bit address must be 1
+movl %eax,(%bp,%si,2)
+
+// 32: error: invalid 16-bit base register
+movl %eax,(%cx)
+
+// 32: error: invalid 16-bit base/index register combination
+movl %eax,(%bp,%bx)
+
+// 32: error: 16-bit memory operand may not include only index register
+movl %eax,(,%bx)
+
// 32: error: invalid operand for instruction
outb al, 4
diff --git a/test/MC/X86/x86_long_nop.s b/test/MC/X86/x86_long_nop.s
index ac1bc08ff38b..eee840c5a977 100644
--- a/test/MC/X86/x86_long_nop.s
+++ b/test/MC/X86/x86_long_nop.s
@@ -2,6 +2,7 @@
# RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu %s | llvm-objdump -d -no-show-raw-insn - | FileCheck %s
# RUN: llvm-mc -filetype=obj -arch=x86 -triple=x86_64-apple-darwin10.0 %s | llvm-objdump -d -no-show-raw-insn - | FileCheck %s
# RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-apple-darwin8 %s | llvm-objdump -d -no-show-raw-insn - | FileCheck %s
+# RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu -mcpu=slm %s | llvm-objdump -d -no-show-raw-insn - | FileCheck --check-prefix=SLM %s
# Ensure alignment directives also emit sequences of 15-byte NOPs on processors
# capable of using long NOPs.
@@ -13,3 +14,12 @@ inc %eax
# CHECK-NEXT: 10: nop
# CHECK-NEXT: 1f: nop
# CHECK-NEXT: 20: inc
+
+# On Silvermont we emit only 7 byte NOPs since longer NOPs are not profitable
+# SLM: 0: inc
+# SLM-NEXT: 1: nop
+# SLM-NEXT: 8: nop
+# SLM-NEXT: f: nop
+# SLM-NEXT: 16: nop
+# SLM-NEXT: 1d: nop
+# SLM-NEXT: 20: inc
diff --git a/test/MC/X86/x86_nop.s b/test/MC/X86/x86_nop.s
index 059f591168d5..572487bfdaca 100644
--- a/test/MC/X86/x86_nop.s
+++ b/test/MC/X86/x86_nop.s
@@ -14,6 +14,7 @@
# RUN: llvm-mc -filetype=obj -triple=i686-pc-linux -mcpu=c3 %s | llvm-objdump -d - | FileCheck %s
# RUN: llvm-mc -filetype=obj -triple=i686-pc-linux -mcpu=c3-2 %s | llvm-objdump -d - | FileCheck %s
# RUN: llvm-mc -filetype=obj -triple=i686-pc-linux -mcpu=core2 %s | llvm-objdump -d - | FileCheck --check-prefix=NOPL %s
+# RUN: llvm-mc -filetype=obj -triple=i686-pc-linux -mcpu=slm %s | llvm-objdump -d - | FileCheck --check-prefix=NOPL %s
inc %eax
diff --git a/test/Makefile b/test/Makefile
index d3227dd5a347..c78c2561c34b 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -61,6 +61,15 @@ clang-tools-site-cfg: FORCE
extra-site-cfgs:: clang-tools-site-cfg
endif
+ifeq ($(shell test -f $(PROJ_OBJ_DIR)/../tools/lld/Makefile && echo OK), OK)
+LIT_ALL_TESTSUITES += $(PROJ_OBJ_DIR)/../tools/lld/test
+
+# Force creation of lld's lit.site.cfg.
+lld-site-cfg: FORCE
+ $(MAKE) -C $(PROJ_OBJ_DIR)/../tools/lld/test lit.site.cfg Unit/lit.site.cfg
+extra-site-cfgs:: lld-site-cfg
+endif
+
ifeq ($(shell test -f $(PROJ_OBJ_DIR)/../tools/polly/Makefile && echo OK), OK)
LIT_ALL_TESTSUITES += $(PROJ_OBJ_DIR)/../tools/polly/test
@@ -125,6 +134,7 @@ lit.site.cfg: FORCE
@$(ECHOPATH) s=@LLVM_TOOLS_DIR@=$(ToolDir)=g >> lit.tmp
@$(ECHOPATH) s=@SHLIBDIR@=$(SharedLibDir)=g >> lit.tmp
@$(ECHOPATH) s=@SHLIBEXT@=$(SHLIBEXT)=g >> lit.tmp
+ @$(ECHOPATH) s=@EXEEXT@=$(EXEEXT)=g >> lit.tmp
@$(ECHOPATH) s=@PYTHON_EXECUTABLE@=$(PYTHON)=g >> lit.tmp
@$(ECHOPATH) s=@OCAMLOPT@=$(OCAMLOPT) -cc $(subst *,'\\\"',*$(subst =,"\\=",$(CXX_FOR_OCAMLOPT))*) -cclib -L$(LibDir) -I $(LibDir)/ocaml=g >> lit.tmp
@$(ECHOPATH) s=@ENABLE_SHARED@=$(ENABLE_SHARED)=g >> lit.tmp
@@ -145,7 +155,6 @@ Unit/lit.site.cfg: $(PROJ_OBJ_DIR)/Unit/.dir FORCE
@$(ECHOPATH) s=@LLVM_BUILD_MODE@=$(BuildMode)=g >> unit.tmp
@$(ECHOPATH) s=@ENABLE_SHARED@=$(ENABLE_SHARED)=g >> unit.tmp
@$(ECHOPATH) s=@SHLIBDIR@=$(SharedLibDir)=g >> unit.tmp
- @$(ECHOPATH) s=@SHLIBPATH_VAR@=$(SHLIBPATH_VAR)=g >> unit.tmp
@$(ECHOPATH) s=@HOST_OS@=$(HOST_OS)=g >> unit.tmp
@$(ECHOPATH) s=@HOST_ARCH@=$(HOST_ARCH)=g >> lit.tmp
@sed -f unit.tmp $(PROJ_SRC_DIR)/Unit/lit.site.cfg.in > $@
diff --git a/test/Object/ARM/lit.local.cfg b/test/Object/ARM/lit.local.cfg
index 5fc35d80541d..236e1d344166 100644
--- a/test/Object/ARM/lit.local.cfg
+++ b/test/Object/ARM/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/Object/Inputs/COFF/i386.yaml b/test/Object/Inputs/COFF/i386.yaml
index 1badad8d5915..7873df0a7e25 100644
--- a/test/Object/Inputs/COFF/i386.yaml
+++ b/test/Object/Inputs/COFF/i386.yaml
@@ -39,8 +39,12 @@ symbols:
SimpleType: IMAGE_SYM_TYPE_NULL # (0)
ComplexType: IMAGE_SYM_DTYPE_NULL # (0)
StorageClass: IMAGE_SYM_CLASS_STATIC # (3)
- NumberOfAuxSymbols: 1
- AuxiliaryData: !hex "240000000300000000000000010000000000" # |$.................|
+ SectionDefinition:
+ Length: 36
+ NumberOfRelocations: 3
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 1
- !Symbol
Name: .data
@@ -49,8 +53,12 @@ symbols:
SimpleType: IMAGE_SYM_TYPE_NULL # (0)
ComplexType: IMAGE_SYM_DTYPE_NULL # (0)
StorageClass: IMAGE_SYM_CLASS_STATIC # (3)
- NumberOfAuxSymbols: 1
- AuxiliaryData: !hex "0D0000000000000000000000020000000000" # |..................|
+ SectionDefinition:
+ Length: 13
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 2
- !Symbol
Name: _main
diff --git a/test/Object/Inputs/COFF/long-file-symbol.yaml b/test/Object/Inputs/COFF/long-file-symbol.yaml
new file mode 100644
index 000000000000..37caad796208
--- /dev/null
+++ b/test/Object/Inputs/COFF/long-file-symbol.yaml
@@ -0,0 +1,14 @@
+---
+header:
+ Machine: IMAGE_FILE_MACHINE_AMD64
+ Characteristics: [ IMAGE_FILE_RELOCS_STRIPPED, IMAGE_FILE_LINE_NUMS_STRIPPED ]
+sections:
+symbols:
+ - Name: .file
+ Value: 0
+ SectionNumber: 65534
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_FILE
+ File: filename_with_22_chars
+...
diff --git a/test/Object/Inputs/COFF/weak-external.yaml b/test/Object/Inputs/COFF/weak-external.yaml
new file mode 100644
index 000000000000..064b44a4a38f
--- /dev/null
+++ b/test/Object/Inputs/COFF/weak-external.yaml
@@ -0,0 +1,43 @@
+---
+header:
+ Machine: IMAGE_FILE_MACHINE_I386
+ Characteristics: [ IMAGE_FILE_LINE_NUMS_STRIPPED, IMAGE_FILE_32BIT_MACHINE ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ Alignment: 4
+ SectionData: 5589E583E4F0E800000000B800000000C9C39090
+ Relocations:
+ - VirtualAddress: 7
+ SymbolName: ___main
+ Type: IMAGE_REL_I386_REL32
+symbols:
+ - Name: .file
+ Value: 0
+ SectionNumber: 65534
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_FILE
+ File: 'file'
+ - Name: .text
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 18
+ NumberOfRelocations: 1
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 0
+ - Name: ___main
+ Value: 0
+ SectionNumber: 0
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+ WeakExternal:
+ TagIndex: 0
+ Characteristics: 0
+...
diff --git a/test/Object/Inputs/COFF/x86-64.yaml b/test/Object/Inputs/COFF/x86-64.yaml
index b775ae9cdfbd..b8a863a429cd 100644
--- a/test/Object/Inputs/COFF/x86-64.yaml
+++ b/test/Object/Inputs/COFF/x86-64.yaml
@@ -30,6 +30,16 @@ sections:
Characteristics: [IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ, IMAGE_SCN_MEM_WRITE, ] # 0xc0100040
SectionData: !hex "48656C6C6F20576F726C642100" # |Hello World!.|
+ - !Section
+ Name: '.CRT$XCU'
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ Alignment: 8
+ SectionData: !hex "0000000000000000"
+ Relocations:
+ - VirtualAddress: 0
+ SymbolName: '??__Ex@@YAXXZ'
+ Type: IMAGE_REL_AMD64_ADDR64
+
symbols:
- !Symbol
Name: .text
@@ -38,8 +48,12 @@ symbols:
SimpleType: IMAGE_SYM_TYPE_NULL # (0)
ComplexType: IMAGE_SYM_DTYPE_NULL # (0)
StorageClass: IMAGE_SYM_CLASS_STATIC # (3)
- NumberOfAuxSymbols: 1
- AuxiliaryData: !hex "260000000300000000000000010000000000" # |&.................|
+ SectionDefinition:
+ Length: 38
+ NumberOfRelocations: 3
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 1
- !Symbol
Name: .data
@@ -48,8 +62,12 @@ symbols:
SimpleType: IMAGE_SYM_TYPE_NULL # (0)
ComplexType: IMAGE_SYM_DTYPE_NULL # (0)
StorageClass: IMAGE_SYM_CLASS_STATIC # (3)
- NumberOfAuxSymbols: 1
- AuxiliaryData: !hex "0D0000000000000000000000020000000000" # |..................|
+ SectionDefinition:
+ Length: 13
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 2
- !Symbol
Name: main
@@ -83,3 +101,10 @@ symbols:
ComplexType: IMAGE_SYM_DTYPE_NULL # (0)
StorageClass: IMAGE_SYM_CLASS_EXTERNAL # (2)
+ - !Symbol
+ Name: '??__Ex@@YAXXZ'
+ Value: 0
+ SectionNumber: 3
+ SimpleType: IMAGE_SYM_TYPE_NULL # (0)
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION # (2)
+ StorageClass: IMAGE_SYM_CLASS_STATIC # (3)
diff --git a/test/Object/Inputs/absolute.elf-x86-64 b/test/Object/Inputs/absolute.elf-x86-64
new file mode 100644
index 000000000000..863ab5892253
--- /dev/null
+++ b/test/Object/Inputs/absolute.elf-x86-64
Binary files differ
diff --git a/test/Object/Inputs/common.coff-i386 b/test/Object/Inputs/common.coff-i386
new file mode 100644
index 000000000000..a61d76c1f9d0
--- /dev/null
+++ b/test/Object/Inputs/common.coff-i386
Binary files differ
diff --git a/test/Object/Inputs/corrupt-archive.a b/test/Object/Inputs/corrupt-archive.a
new file mode 100644
index 000000000000..f8940ff6420c
--- /dev/null
+++ b/test/Object/Inputs/corrupt-archive.a
Binary files differ
diff --git a/test/Object/Inputs/darwin-m-test1.mach0-armv7 b/test/Object/Inputs/darwin-m-test1.mach0-armv7
new file mode 100644
index 000000000000..2ce3a18cfa76
--- /dev/null
+++ b/test/Object/Inputs/darwin-m-test1.mach0-armv7
Binary files differ
diff --git a/test/Object/Inputs/darwin-m-test2.macho-i386 b/test/Object/Inputs/darwin-m-test2.macho-i386
new file mode 100644
index 000000000000..dc0e86525f40
--- /dev/null
+++ b/test/Object/Inputs/darwin-m-test2.macho-i386
Binary files differ
diff --git a/test/Object/Inputs/darwin-m-test3.macho-x86-64 b/test/Object/Inputs/darwin-m-test3.macho-x86-64
new file mode 100755
index 000000000000..18960c4f6aab
--- /dev/null
+++ b/test/Object/Inputs/darwin-m-test3.macho-x86-64
Binary files differ
diff --git a/test/Object/Inputs/hello-world.macho-x86_64 b/test/Object/Inputs/hello-world.macho-x86_64
new file mode 100755
index 000000000000..d004bedf6ad4
--- /dev/null
+++ b/test/Object/Inputs/hello-world.macho-x86_64
Binary files differ
diff --git a/test/Object/Inputs/macho-archive-x86_64.a b/test/Object/Inputs/macho-archive-x86_64.a
new file mode 100644
index 000000000000..9979ba9dd1e3
--- /dev/null
+++ b/test/Object/Inputs/macho-archive-x86_64.a
Binary files differ
diff --git a/test/Object/Inputs/macho-hello-g.macho-x86_64 b/test/Object/Inputs/macho-hello-g.macho-x86_64
new file mode 100755
index 000000000000..41be03a0e2af
--- /dev/null
+++ b/test/Object/Inputs/macho-hello-g.macho-x86_64
Binary files differ
diff --git a/test/Object/Inputs/macho-text-data-bss.macho-x86_64 b/test/Object/Inputs/macho-text-data-bss.macho-x86_64
new file mode 100644
index 000000000000..b7628c83799a
--- /dev/null
+++ b/test/Object/Inputs/macho-text-data-bss.macho-x86_64
Binary files differ
diff --git a/test/Object/Inputs/macho-universal-archive.x86_64.i386 b/test/Object/Inputs/macho-universal-archive.x86_64.i386
new file mode 100644
index 000000000000..1660714c68ea
--- /dev/null
+++ b/test/Object/Inputs/macho-universal-archive.x86_64.i386
Binary files differ
diff --git a/test/Object/Inputs/no-sections.elf-x86-64 b/test/Object/Inputs/no-sections.elf-x86-64
new file mode 100755
index 000000000000..9b8ca2e46c13
--- /dev/null
+++ b/test/Object/Inputs/no-sections.elf-x86-64
Binary files differ
diff --git a/test/Object/Inputs/program-headers.mips64 b/test/Object/Inputs/program-headers.mips64
new file mode 100644
index 000000000000..ad21c7db08d2
--- /dev/null
+++ b/test/Object/Inputs/program-headers.mips64
Binary files differ
diff --git a/test/Object/Inputs/relocatable-with-section-address.elf-x86-64 b/test/Object/Inputs/relocatable-with-section-address.elf-x86-64
new file mode 100644
index 000000000000..7bee9a9835f1
--- /dev/null
+++ b/test/Object/Inputs/relocatable-with-section-address.elf-x86-64
Binary files differ
diff --git a/test/Object/Inputs/relocation-dynamic.elf-i386 b/test/Object/Inputs/relocation-dynamic.elf-i386
new file mode 100755
index 000000000000..1548f13f7232
--- /dev/null
+++ b/test/Object/Inputs/relocation-dynamic.elf-i386
Binary files differ
diff --git a/test/Object/Inputs/relocation-relocatable.elf-i386 b/test/Object/Inputs/relocation-relocatable.elf-i386
new file mode 100644
index 000000000000..b8f375b22b5c
--- /dev/null
+++ b/test/Object/Inputs/relocation-relocatable.elf-i386
Binary files differ
diff --git a/test/Object/Inputs/thumb-symbols.elf.arm b/test/Object/Inputs/thumb-symbols.elf.arm
new file mode 100644
index 000000000000..923eef6cede2
--- /dev/null
+++ b/test/Object/Inputs/thumb-symbols.elf.arm
Binary files differ
diff --git a/test/Object/Inputs/trivial-object-test.coff-x86-64 b/test/Object/Inputs/trivial-object-test.coff-x86-64
index 077591482cea..ed144d1265f3 100644
--- a/test/Object/Inputs/trivial-object-test.coff-x86-64
+++ b/test/Object/Inputs/trivial-object-test.coff-x86-64
Binary files differ
diff --git a/test/Object/Inputs/trivial-object-test.elf-mipsel b/test/Object/Inputs/trivial-object-test.elf-mipsel
new file mode 100644
index 000000000000..e72e02b9b253
--- /dev/null
+++ b/test/Object/Inputs/trivial-object-test.elf-mipsel
Binary files differ
diff --git a/test/Object/Inputs/trivial.ll b/test/Object/Inputs/trivial.ll
index 25ece7611a31..463442eb90ac 100644
--- a/test/Object/Inputs/trivial.ll
+++ b/test/Object/Inputs/trivial.ll
@@ -10,3 +10,7 @@ entry:
declare i32 @puts(i8* nocapture) nounwind
declare void @SomeOtherFunction(...)
+
+@var = global i32 0
+@llvm.used = appending global [1 x i8*] [i8* bitcast (i32* @var to i8*)], section "llvm.metadata"
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* null, i8* null }]
diff --git a/test/Object/Inputs/unwind-section.elf-x86-64 b/test/Object/Inputs/unwind-section.elf-x86-64
new file mode 100644
index 000000000000..3a845086510a
--- /dev/null
+++ b/test/Object/Inputs/unwind-section.elf-x86-64
Binary files differ
diff --git a/test/Object/Inputs/weak.elf-x86-64 b/test/Object/Inputs/weak.elf-x86-64
new file mode 100644
index 000000000000..e45f9880f9db
--- /dev/null
+++ b/test/Object/Inputs/weak.elf-x86-64
Binary files differ
diff --git a/test/Object/Mips/lit.local.cfg b/test/Object/Mips/lit.local.cfg
index 88262fb1d323..7d12f7a9c564 100644
--- a/test/Object/Mips/lit.local.cfg
+++ b/test/Object/Mips/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Mips' in targets:
+if not 'Mips' in config.root.targets:
config.unsupported = True
diff --git a/test/Object/X86/archive-ir-asm.ll b/test/Object/X86/archive-ir-asm.ll
new file mode 100644
index 000000000000..560ac176945d
--- /dev/null
+++ b/test/Object/X86/archive-ir-asm.ll
@@ -0,0 +1,20 @@
+; RUN: llvm-as %s -o=%t1
+; RUN: rm -f %t2
+; RUN: llvm-ar rcs %t2 %t1
+; RUN: llvm-nm -M %t2 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+module asm ".global global_asm_sym"
+module asm "global_asm_sym:"
+module asm "local_asm_sym:"
+module asm ".long undef_asm_sym"
+
+; CHECK: Archive map
+; CHECK-NEXT: global_asm_sym in archive-ir-asm.ll
+
+; CHECK: archive-ir-asm.ll
+; CHECK-NEXT: T global_asm_sym
+; CHECK-NEXT: t local_asm_sym
+; CHECK-NEXT: U undef_asm_sym
diff --git a/test/Object/X86/lit.local.cfg b/test/Object/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Object/X86/lit.local.cfg
+++ b/test/Object/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Object/X86/nm-ir.ll b/test/Object/X86/nm-ir.ll
new file mode 100644
index 000000000000..6bb7e2323a22
--- /dev/null
+++ b/test/Object/X86/nm-ir.ll
@@ -0,0 +1,45 @@
+; RUN: llvm-as %s -o - | llvm-nm - | FileCheck %s
+
+; CHECK: D a1
+; CHECK-NEXT: d a2
+; CHECK-NEXT: T f1
+; CHECK-NEXT: t f2
+; CHECK-NEXT: W f3
+; CHECK-NEXT: U f4
+; CHECK-NEXT: D g1
+; CHECK-NEXT: d g2
+; CHECK-NEXT: C g3
+; CHECK-NOT: g4
+; CHECK-NEXT: T global_asm_sym
+; CHECK-NEXT: t local_asm_sym
+; CHECK-NEXT: U undef_asm_sy
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+module asm ".global global_asm_sym"
+module asm "global_asm_sym:"
+module asm "local_asm_sym:"
+module asm ".long undef_asm_sym"
+
+@g1 = global i32 42
+@g2 = internal global i32 42
+@g3 = common global i32 0
+@g4 = private global i32 42
+
+@a1 = alias i32* @g1
+@a2 = alias internal i32* @g1
+
+define void @f1() {
+ ret void
+}
+
+define internal void @f2() {
+ ret void
+}
+
+define linkonce_odr void @f3() {
+ ret void
+}
+
+declare void @f4()
diff --git a/test/Object/X86/objdump-cfg-invalid-opcode.yaml b/test/Object/X86/objdump-cfg-invalid-opcode.yaml
index 56ab1d274eef..d0a29be8697d 100644
--- a/test/Object/X86/objdump-cfg-invalid-opcode.yaml
+++ b/test/Object/X86/objdump-cfg-invalid-opcode.yaml
@@ -38,7 +38,7 @@ Sections:
#CFG: Type: Data
## 4: 06 (bad)
-#CFG: Content: 06
+#CFG: Content: '06'
#CFG: - StartAddress: 0x0000000000000005
#CFG: Size: 1
diff --git a/test/Object/X86/objdump-disassembly-inline-relocations.test b/test/Object/X86/objdump-disassembly-inline-relocations.test
index 2ef1a435d7d4..78615765046d 100644
--- a/test/Object/X86/objdump-disassembly-inline-relocations.test
+++ b/test/Object/X86/objdump-disassembly-inline-relocations.test
@@ -6,6 +6,10 @@ RUN: llvm-objdump -d -r %p/../Inputs/trivial-object-test.macho-i386 \
RUN: | FileCheck %s -check-prefix MACHO-i386
RUN: llvm-objdump -d -r %p/../Inputs/trivial-object-test.macho-x86-64 \
RUN: | FileCheck %s -check-prefix MACHO-x86-64
+RUN: llvm-objdump -d -r %p/../Inputs/trivial-object-test.elf-i386 \
+RUN: | FileCheck %s -check-prefix ELF-i386
+RUN: llvm-objdump -d -r %p/../Inputs/trivial-object-test.elf-x86-64 \
+RUN: | FileCheck %s -check-prefix ELF-x86-64
COFF-i386: file format COFF-i386
COFF-i386: Disassembly of section .text:
@@ -65,3 +69,34 @@ MACHO-x86-64: 1b: X86_64_RELOC_BRANCH _SomeOther
MACHO-x86-64: 1f: 8b 44 24 04 movl 4(%rsp), %eax
MACHO-x86-64: 23: 48 83 c4 08 addq $8, %rsp
MACHO-x86-64: 27: c3 ret
+
+ELF-i386: file format ELF32-i386
+ELF-i386: Disassembly of section .text:
+ELF-i386: main:
+ELF-i386: 0: 83 ec 0c subl $12, %esp
+ELF-i386: 3: c7 44 24 08 00 00 00 00 movl $0, 8(%esp)
+ELF-i386: b: c7 04 24 00 00 00 00 movl $0, (%esp)
+ELF-i386: e: R_386_32 Unknown
+ELF-i386: 12: e8 fc ff ff ff calll -4
+ELF-i386: 13: R_386_PC32 Unknown
+ELF-i386: 17: e8 fc ff ff ff calll -4
+ELF-i386: 18: R_386_PC32 Unknown
+ELF-i386: 1c: 8b 44 24 08 movl 8(%esp), %eax
+ELF-i386: 20: 83 c4 0c addl $12, %esp
+ELF-i386: 23: c3 ret
+
+ELF-x86-64: file format ELF64-x86-64
+ELF-x86-64: Disassembly of section .text:
+ELF-x86-64: main:
+ELF-x86-64: 0: 48 83 ec 08 subq $8, %rsp
+ELF-x86-64: 4: c7 44 24 04 00 00 00 00 movl $0, 4(%rsp)
+ELF-x86-64: c: bf 00 00 00 00 movl $0, %edi
+ELF-x86-64: d: R_X86_64_32S .rodata.str1.1+0
+ELF-x86-64: 11: e8 00 00 00 00 callq 0
+ELF-x86-64: 12: R_X86_64_PC32 puts-4-P
+ELF-x86-64: 16: 30 c0 xorb %al, %al
+ELF-x86-64: 18: e8 00 00 00 00 callq 0
+ELF-x86-64: 19: R_X86_64_PC32 SomeOtherFunction-4-P
+ELF-x86-64: 1d: 8b 44 24 04 movl 4(%rsp), %eax
+ELF-x86-64: 21: 48 83 c4 08 addq $8, %rsp
+ELF-x86-64: 25: c3 ret
diff --git a/test/Object/X86/objdump-disassembly-symbolic.test b/test/Object/X86/objdump-disassembly-symbolic.test
index 858653e95ebc..95a5fc8e70ac 100644
--- a/test/Object/X86/objdump-disassembly-symbolic.test
+++ b/test/Object/X86/objdump-disassembly-symbolic.test
@@ -46,3 +46,23 @@ MACHO-STUBS-x86-64: 1faa: e8 09 00 00 00
MACHO-STUBS-x86-64: 1faf: 8b 44 24 04 movl 4(%rsp), %eax
MACHO-STUBS-x86-64: 1fb3: 48 83 c4 08 addq $8, %rsp
MACHO-STUBS-x86-64: 1fb7: c3 ret
+
+
+RUN: llvm-objdump -d -symbolize %p/../Inputs/relocation-relocatable.elf-i386 \
+RUN: | FileCheck %s -check-prefix ELF-i386-REL
+
+ELF-i386-REL: Disassembly of section .text:
+ELF-i386-REL-NEXT: f:
+ELF-i386-REL-NEXT: 0: e9 fc ff ff ff jmp h
+ELF-i386-REL: g:
+ELF-i386-REL-NEXT: 5: e9 fc ff ff ff jmp f
+
+
+RUN: llvm-objdump -d -symbolize %p/../Inputs/relocation-dynamic.elf-i386 \
+RUN: | FileCheck %s -check-prefix ELF-i386-DYN
+
+ELF-i386-DYN: Disassembly of section .text:
+ELF-i386-DYN-NEXT: f:
+ELF-i386-DYN-NEXT: 1a4: e9 fc ff ff ff jmp h
+ELF-i386-DYN: g:
+ELF-i386-DYN-NEXT: 1a9: e9 fc ff ff ff jmp f
diff --git a/test/Object/X86/yaml2obj-elf-x86-rel.yaml b/test/Object/X86/yaml2obj-elf-x86-rel.yaml
new file mode 100644
index 000000000000..5ca6614d2380
--- /dev/null
+++ b/test/Object/X86/yaml2obj-elf-x86-rel.yaml
@@ -0,0 +1,41 @@
+# RUN: yaml2obj -format=elf %s > %t
+# RUN: llvm-readobj -r %t | FileCheck %s
+
+# CHECK: Relocations [
+# CHECK-NEXT: Section (2) .rel.text {
+# CHECK-NEXT: 0x0 R_386_32 main 0x0
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_386
+Sections:
+ - Type: SHT_PROGBITS
+ Name: .text
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ AddressAlign: 0x04
+ Content: 0000000000000000
+ - Type: SHT_REL
+ Name: .rel.text
+ Link: .symtab
+ Info: .text
+ AddressAlign: 0x04
+ Relocations:
+ - Offset: 0
+ Symbol: main
+ Type: R_386_32
+
+Symbols:
+ Local:
+ - Name: .text
+ Type: STT_SECTION
+ Section: .text
+
+ Global:
+ - Name: main
+ Type: STT_FUNC
+ Section: .text
+ Size: 0x08
diff --git a/test/Object/ar-error.test b/test/Object/ar-error.test
new file mode 100644
index 000000000000..7add9b448e19
--- /dev/null
+++ b/test/Object/ar-error.test
@@ -0,0 +1,6 @@
+Test if we get a proper error with a filename that doesn't exist
+
+RUN: not llvm-ar r %t.out.a sparkle.o %t 2>&1 | FileCheck %s
+
+# Don't check the message "No such file or directory".
+CHECK: llvm-ar{{(.exe|.EXE)?}}: sparkle.o:
diff --git a/test/Object/archive-long-index.test b/test/Object/archive-long-index.test
index bd530edbf418..6feb69e6d264 100644
--- a/test/Object/archive-long-index.test
+++ b/test/Object/archive-long-index.test
@@ -1,7 +1,7 @@
#
# Check if the index is appearing properly in the output file
#
-RUN: llvm-nm -s %p/Inputs/liblong_filenames.a | FileCheck -check-prefix=CHECKIDX %s
+RUN: llvm-nm -M %p/Inputs/liblong_filenames.a | FileCheck -check-prefix=CHECKIDX %s
CHECKIDX: Archive map
CHECKIDX: abcdefghijklmnopqrstuvwxyz12345678 in 1.o
@@ -17,24 +17,24 @@ CHECKIDX: b in abcdefghijklmnopqrstuvwxyz2.o
CHECKIDX: bda in abcdefghijklmnopqrstuvwxyz2.o
CHECKIDX: b in abcdefghijklmnopq.o
CHECKIDX: 1.o:
-CHECKIDX: 00000000 D abcdefghijklmnopqrstuvwxyz12345678
-CHECKIDX: U bda
-CHECKIDX: 00000000 T main
+CHECKIDX: 0000000000000000 D abcdefghijklmnopqrstuvwxyz12345678
+CHECKIDX: U bda
+CHECKIDX: 0000000000000000 T main
CHECKIDX: 2.o:
-CHECKIDX: 00000000 T fn1
+CHECKIDX: 0000000000000000 T fn1
CHECKIDX: 3.o:
-CHECKIDX: 0000000b T fn1
-CHECKIDX: 00000000 T fn3
+CHECKIDX: 000000000000000b T fn1
+CHECKIDX: 0000000000000000 T fn3
CHECKIDX: 4.o:
-CHECKIDX: C shankar
+CHECKIDX: C shankar
CHECKIDX: 5.o:
-CHECKIDX: C a
+CHECKIDX: C a
CHECKIDX: 6.o:
-CHECKIDX: C b
+CHECKIDX: C b
CHECKIDX: abcdefghijklmnopqrstuvwxyz1.o:
-CHECKIDX: C a
+CHECKIDX: C a
CHECKIDX: abcdefghijklmnopqrstuvwxyz2.o:
-CHECKIDX: C b
-CHECKIDX: 00000000 T bda
+CHECKIDX: C b
+CHECKIDX: 0000000000000000 T bda
CHECKIDX: abcdefghijklmnopq.o:
-CHECKIDX: C b
+CHECKIDX: C b
diff --git a/test/Object/archive-symtab.test b/test/Object/archive-symtab.test
index 6379504318a6..0899828bdfbe 100644
--- a/test/Object/archive-symtab.test
+++ b/test/Object/archive-symtab.test
@@ -1,6 +1,6 @@
RUN: rm -f %t.a
RUN: llvm-ar rcs %t.a %p/Inputs/trivial-object-test.elf-x86-64 %p/Inputs/trivial-object-test2.elf-x86-64
-RUN: llvm-nm -s %t.a | FileCheck %s
+RUN: llvm-nm -M %t.a | FileCheck %s
CHECK: Archive map
CHECK-NEXT: main in trivial-object-test.elf-x86-64
@@ -9,27 +9,28 @@ CHECK-NEXT: main in trivial-object-test2.elf-x86-64
CHECK-NOT: bar
CHECK: trivial-object-test.elf-x86-64:
-CHECK-NEXT: U SomeOtherFunction
-CHECK-NEXT: 00000000 T main
-CHECK-NEXT: U puts
-CHECK-NEXT: trivial-object-test2.elf-x86-64:
-CHECK-NEXT: 00000000 t bar
-CHECK-NEXT: 00000006 T foo
-CHECK-NEXT: 00000016 T main
+CHECK-NEXT: U SomeOtherFunction
+CHECK-NEXT: 0000000000000000 T main
+CHECK-NEXT: U puts
+
+CHECK: trivial-object-test2.elf-x86-64:
+CHECK-NEXT: 0000000000000000 t bar
+CHECK-NEXT: 0000000000000006 T foo
+CHECK-NEXT: 0000000000000016 T main
RUN: rm -f %t.a
RUN: llvm-ar rcS %t.a %p/Inputs/trivial-object-test.elf-x86-64 %p/Inputs/trivial-object-test2.elf-x86-64
-RUN: llvm-nm -s %t.a | FileCheck %s --check-prefix=NOMAP
+RUN: llvm-nm -M %t.a | FileCheck %s --check-prefix=NOMAP
NOMAP-NOT: Archive map
RUN: llvm-ar s %t.a
-RUN: llvm-nm -s %t.a | FileCheck %s
+RUN: llvm-nm -M %t.a | FileCheck %s
check that the archive does have a corrupt symbol table.
RUN: rm -f %t.a
RUN: cp %p/Inputs/archive-test.a-corrupt-symbol-table %t.a
-RUN: llvm-nm -s %t.a | FileCheck %s --check-prefix=CORRUPT
+RUN: llvm-nm -M %t.a | FileCheck %s --check-prefix=CORRUPT
CORRUPT: Archive map
CORRUPT-NEXT: mbin in trivial-object-test.elf-x86-64
@@ -37,23 +38,30 @@ CORRUPT-NEXT: foo in trivial-object-test2.elf-x86-64
CORRUPT-NEXT: main in trivial-object-test2.elf-x86-64
CORRUPT: trivial-object-test.elf-x86-64:
-CORRUPT-NEXT: U SomeOtherFunction
-CORRUPT-NEXT: 00000000 T main
-CORRUPT-NEXT: U puts
-CORRUPT-NEXT: trivial-object-test2.elf-x86-64:
-CORRUPT-NEXT: 00000000 t bar
-CORRUPT-NEXT: 00000006 T foo
-CORRUPT-NEXT: 00000016 T main
+CORRUPT-NEXT: U SomeOtherFunction
+CORRUPT-NEXT: 0000000000000000 T main
+CORRUPT-NEXT: U puts
+
+CORRUPT: trivial-object-test2.elf-x86-64:
+CORRUPT-NEXT: 0000000000000000 t bar
+CORRUPT-NEXT: 0000000000000006 T foo
+CORRUPT-NEXT: 0000000000000016 T main
check that the we *don't* update the symbol table.
RUN: llvm-ar s %t.a
-RUN: llvm-nm -s %t.a | FileCheck %s --check-prefix=CORRUPT
+RUN: llvm-nm -M %t.a | FileCheck %s --check-prefix=CORRUPT
repeate the test with llvm-ranlib
RUN: rm -f %t.a
RUN: llvm-ar rcS %t.a %p/Inputs/trivial-object-test.elf-x86-64 %p/Inputs/trivial-object-test2.elf-x86-64
-RUN: llvm-nm -s %t.a | FileCheck %s --check-prefix=NOMAP
+RUN: llvm-nm -M %t.a | FileCheck %s --check-prefix=NOMAP
RUN: llvm-ranlib %t.a
-RUN: llvm-nm -s %t.a | FileCheck %s
+RUN: llvm-nm -M %t.a | FileCheck %s
+
+RUN: llvm-nm -M %p/Inputs/macho-archive-x86_64.a | FileCheck %s --check-prefix=BSD-MachO
+
+BSD-MachO: Archive map
+BSD-MachO: _bar in bar.o
+BSD-MachO: _foo in foo.o
diff --git a/test/Object/archive-toc.test b/test/Object/archive-toc.test
index 0a5e72b61dce..4195c4000649 100644
--- a/test/Object/archive-toc.test
+++ b/test/Object/archive-toc.test
@@ -1,20 +1,20 @@
Test reading an archive created by gnu ar
RUN: env TZ=GMT llvm-ar tv %p/Inputs/GNU.a | FileCheck %s --check-prefix=GNU -strict-whitespace
-GNU: rw-r--r-- 500/500 8 Nov 19 02:57 2004 evenlen
-GNU-NEXT: rw-r--r-- 500/500 7 Nov 19 02:57 2004 oddlen
-GNU-NEXT: rwxr-xr-x 500/500 1465 Nov 19 03:01 2004 very_long_bytecode_file_name.bc
-GNU-NEXT: rw-r--r-- 500/500 2280 Nov 19 03:04 2004 IsNAN.o
+GNU: rw-r--r-- 500/500 8 2004-11-19 02:57:37.000000000 evenlen
+GNU-NEXT: rw-r--r-- 500/500 7 2004-11-19 02:57:21.000000000 oddlen
+GNU-NEXT: rwxr-xr-x 500/500 1465 2004-11-19 03:01:31.000000000 very_long_bytecode_file_name.bc
+GNU-NEXT: rw-r--r-- 500/500 2280 2004-11-19 03:04:30.000000000 IsNAN.o
Test reading an archive createdy by Mac OS X ar
RUN: env TZ=GMT llvm-ar tv %p/Inputs/MacOSX.a | FileCheck %s --check-prefix=OSX -strict-whitespace
OSX-NOT: __.SYMDEF
-OSX: rw-r--r-- 501/501 8 Nov 19 02:57 2004 evenlen
-OSX-NEXT: rw-r--r-- 501/501 8 Nov 19 02:57 2004 oddlen
-OSX-NEXT: rw-r--r-- 502/502 1465 Feb 4 06:59 2010 very_long_bytecode_file_name.bc
-OSX-NEXT: rw-r--r-- 501/501 2280 Nov 19 04:32 2004 IsNAN.o
+OSX: rw-r--r-- 501/501 8 2004-11-19 02:57:37.000000000 evenlen
+OSX-NEXT: rw-r--r-- 501/501 8 2004-11-19 02:57:21.000000000 oddlen
+OSX-NEXT: rw-r--r-- 502/502 1465 2010-02-04 06:59:14.000000000 very_long_bytecode_file_name.bc
+OSX-NEXT: rw-r--r-- 501/501 2280 2004-11-19 04:32:06.000000000 IsNAN.o
Test reading an archive created on Solaris by /usr/ccs/bin/ar
RUN: env TZ=GMT llvm-ar tv %p/Inputs/SVR4.a | FileCheck %s -strict-whitespace
@@ -22,7 +22,7 @@ RUN: env TZ=GMT llvm-ar tv %p/Inputs/SVR4.a | FileCheck %s -strict-whitespace
Test reading an archive created on Solaris by /usr/xpg4/bin/ar
RUN: env TZ=GMT llvm-ar tv %p/Inputs/xpg4.a | FileCheck %s -strict-whitespace
-CHECK: rw-r--r-- 1002/102 8 Nov 19 03:24 2004 evenlen
-CHECK-NEXT: rw-r--r-- 1002/102 7 Nov 19 03:24 2004 oddlen
-CHECK-NEXT: rwxr-xr-x 1002/102 1465 Nov 19 03:24 2004 very_long_bytecode_file_name.bc
-CHECK-NEXT: rw-r--r-- 1002/102 2280 Nov 19 03:24 2004 IsNAN.o
+CHECK: rw-r--r-- 1002/102 8 2004-11-19 03:24:02.000000000 evenlen
+CHECK-NEXT: rw-r--r-- 1002/102 7 2004-11-19 03:24:02.000000000 oddlen
+CHECK-NEXT: rwxr-xr-x 1002/102 1465 2004-11-19 03:24:02.000000000 very_long_bytecode_file_name.bc
+CHECK-NEXT: rw-r--r-- 1002/102 2280 2004-11-19 03:24:02.000000000 IsNAN.o
diff --git a/test/Object/coff-archive-short.test b/test/Object/coff-archive-short.test
index fa531b3b6314..2aee95699b50 100644
--- a/test/Object/coff-archive-short.test
+++ b/test/Object/coff-archive-short.test
@@ -5,7 +5,7 @@
# than 15 characters, thus, unlike coff_archive.lib, it has no string
# table as the third member.
#
-RUN: llvm-nm --numeric-sort -s %p/Inputs/coff_archive_short.lib | FileCheck -check-prefix=CHECKIDX %s
+RUN: llvm-nm --numeric-sort -M %p/Inputs/coff_archive_short.lib | FileCheck -check-prefix=CHECKIDX %s
CHECKIDX: Archive map
CHECKIDX: _shortfn1 in short1.obj
diff --git a/test/Object/coff-archive.test b/test/Object/coff-archive.test
index 768fe1c4b129..3b0aa0ca0634 100644
--- a/test/Object/coff-archive.test
+++ b/test/Object/coff-archive.test
@@ -1,7 +1,7 @@
#
# Check if the index is appearing properly in the output file
#
-RUN: llvm-nm --numeric-sort -s %p/Inputs/coff_archive.lib | FileCheck -check-prefix=CHECKIDX %s
+RUN: llvm-nm --numeric-sort -M %p/Inputs/coff_archive.lib | FileCheck -check-prefix=CHECKIDX %s
CHECKIDX: Archive map
CHECKIDX: ??0invalid_argument@std@@QAE@PBD@Z in Debug\mymath.obj
diff --git a/test/Object/directory.ll b/test/Object/directory.ll
index 48eefcb6ecb8..c4b0bbf20efb 100644
--- a/test/Object/directory.ll
+++ b/test/Object/directory.ll
@@ -1,6 +1,6 @@
;RUN: rm -f %T/test.a
;RUN: not llvm-ar r %T/test.a . 2>&1 | FileCheck %s
-;CHECK: .: Is a directory
+;CHECK: .: {{I|i}}s a directory
;RUN: rm -f %T/test.a
;RUN: touch %T/a-very-long-file-name
diff --git a/test/Object/extract.ll b/test/Object/extract.ll
index 4e519aea7505..a4e76499ca6c 100644
--- a/test/Object/extract.ll
+++ b/test/Object/extract.ll
@@ -40,7 +40,8 @@
; new archive and checking that date.
; RUN: rm -f very_long_bytecode_file_name.bc
; RUN: llvm-ar xo %p/Inputs/GNU.a very_long_bytecode_file_name.bc
+; RUN: rm -f %t.a
; RUN: llvm-ar rc %t.a very_long_bytecode_file_name.bc
; RUN: env TZ=GMT llvm-ar tv %t.a | FileCheck %s
-CHECK: 1465 Nov 19 03:01 2004 very_long_bytecode_file_name.bc
+CHECK: 1465 2004-11-19 03:01:31.000000000 very_long_bytecode_file_name.bc
diff --git a/test/Object/mangle-ir.ll b/test/Object/mangle-ir.ll
new file mode 100644
index 000000000000..5b3cd09cb7ce
--- /dev/null
+++ b/test/Object/mangle-ir.ll
@@ -0,0 +1,14 @@
+; RUN: llvm-as %s -o - | llvm-nm - | FileCheck %s
+
+target datalayout = "m:o"
+
+; CHECK-NOT: memcpy
+; CHECK: T _f
+; CHECK-NOT: memcpy
+
+define void @f() {
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* null, i8* null, i64 0, i32 1, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
diff --git a/test/Object/nm-archive.test b/test/Object/nm-archive.test
index 0d43cc701550..7dbc22a1e8cf 100644
--- a/test/Object/nm-archive.test
+++ b/test/Object/nm-archive.test
@@ -18,6 +18,7 @@ RUN: llvm-nm %t2 | FileCheck %s -check-prefix BITCODE
BITCODE: U SomeOtherFunction
BITCODE-NEXT: T main
BITCODE-NEXT: U puts
+BITCODE-NEXT: D var
Test we don't error with an archive with no symtab.
@@ -28,8 +29,14 @@ Or in an archive with no symtab or string table.
RUN: llvm-nm %p/Inputs/archive-test.a-gnu-minimal
-And don't crash when asked to print a non existing symtab.
-RUN: llvm-nm -s %p/Inputs/archive-test.a-gnu-minimal
+And don't crash when asked to print a non-existing symtab.
+RUN: llvm-nm -M %p/Inputs/archive-test.a-gnu-minimal
Don't reject an empty archive.
RUN: llvm-nm %p/Inputs/archive-test.a-empty
+
+This archive has an unaligned member and a unknown format member.
+GNU AR is able to parse the unaligned member and warns about the member with
+the unknown format. We should probably simply warn on both. For now just check
+that we don't produce an error.
+RUN: llvm-nm %p/Inputs/corrupt-archive.a
diff --git a/test/Object/nm-darwin-m.test b/test/Object/nm-darwin-m.test
new file mode 100644
index 000000000000..5bb19dcacd37
--- /dev/null
+++ b/test/Object/nm-darwin-m.test
@@ -0,0 +1,53 @@
+RUN: llvm-nm -format darwin %p/Inputs/darwin-m-test1.mach0-armv7 \
+RUN: | FileCheck %s -check-prefix test1
+RUN: llvm-nm -format darwin %p/Inputs/darwin-m-test2.macho-i386 \
+RUN: | FileCheck %s -check-prefix test2
+RUN: llvm-nm -m %p/Inputs/darwin-m-test3.macho-x86-64 \
+RUN: | FileCheck %s -check-prefix test3
+
+# This is testing that the various bits in the n_desc feild are correct
+test1: 00000001 (absolute) non-external _a
+test1: 00000008 (common) (alignment 2^2) external _c
+test1: 0000000a (__DATA,__data) non-external [no dead strip] _d
+test1: 00000004 (__TEXT,__text) non-external [alt entry] _e
+test1: 00000000 (__TEXT,__text) non-external [symbol resolver] _r
+test1: 00000008 (__TEXT,__text) non-external [Thumb] _t
+
+# This is testing that an N_INDR symbol gets its alias name, the "(for ...)"
+test2: (undefined) external __i
+test2: (indirect) external _i (for __i)
+
+# This is testing is using darwin-m-test3.macho-x86-64 that is linked with
+# dylibs that have the follow set of -install_names:
+# Foo.framework/Foo
+# /System/Library/Frameworks/FooPath.framework/FooPath
+# FooSuffix.framework/FooSuffix_debug
+# /System/Library/Frameworks/FooPathSuffix.framework/FooPathSuffix_profile
+# FooVers.framework/Versions/A/FooVers
+# /System/Library/Frameworks/FooPathVers.framework/Versions/B/FooPathVers
+# libx.dylib
+# libxSuffix_profile.dylib
+# /usr/local/lib/libxPathSuffix_debug.dylib
+# libATS.A_profile.dylib
+# /usr/lib/libPathATS.A_profile.dylib
+# QT.A.qtx
+# /lib/QTPath.qtx
+# /usr/lib/libSystem.B.dylib
+# to test that MachOObjectFile::guessLibraryShortName() is correctly parsing
+# them into their short names.
+test3: 0000000100000000 (__TEXT,__text) [referenced dynamically] external __mh_execute_header
+test3: (undefined) external _atsPathVersSuffix (from libPathATS)
+test3: (undefined) external _atsVersSuffix (from libATS)
+test3: (undefined) external _foo (from Foo)
+test3: (undefined) external _fooPath (from FooPath)
+test3: (undefined) external _fooPathSuffix (from FooPathSuffix)
+test3: (undefined) external _fooPathVers (from FooPathVers)
+test3: (undefined) external _fooSuffix (from FooSuffix)
+test3: (undefined) external _fooVers (from FooVers)
+test3: 0000000100000e60 (__TEXT,__text) external _main
+test3: (undefined) external _qt (from QT)
+test3: (undefined) external _qtPath (from QTPath)
+test3: (undefined) external _x (from libx)
+test3: (undefined) external _xPathSuffix (from libxPathSuffix)
+test3: (undefined) external _xSuffix (from libxSuffix)
+test3: (undefined) external dyld_stub_binder (from libSystem)
diff --git a/test/Object/nm-error.test b/test/Object/nm-error.test
index 146b88713f87..bed9ccbd1f21 100644
--- a/test/Object/nm-error.test
+++ b/test/Object/nm-error.test
@@ -1,12 +1,10 @@
Test that llvm-nm returns an error because of the unknown file type, but
keeps processing subsequent files.
-Note: We use a temporary file since the tests don't run with pipefail.
-
RUN: touch %t
RUN: not llvm-nm %p/Inputs/trivial-object-test.elf-i386 %t \
-RUN: %p/Inputs/trivial-object-test.elf-i386 > %t.log
-RUN: FileCheck %s < %t.log
+RUN: %p/Inputs/trivial-object-test.elf-i386 | \
+RUN: FileCheck %s
CHECK: U SomeOtherFunction
CHECK: 00000000 T main
diff --git a/test/Object/nm-shared-object.test b/test/Object/nm-shared-object.test
index a57b9401ad42..32ae6a861529 100644
--- a/test/Object/nm-shared-object.test
+++ b/test/Object/nm-shared-object.test
@@ -1,23 +1,31 @@
RUN: llvm-nm -D %p/Inputs/shared-object-test.elf-i386 \
RUN: | FileCheck %s -check-prefix ELF-32
+
+ELF-32-NOT: U
+ELF-32: 000012c8 A __bss_start
+ELF-32: 000012c8 A _edata
+ELF-32: 000012cc A _end
+ELF-32: 000012c8 B common_sym
+ELF-32: 000012c4 D defined_sym
+ELF-32: 000001f0 T global_func
+ELF-32: 00000000 D tls_sym
+
RUN: llvm-nm -D %p/Inputs/shared-object-test.elf-x86-64 \
RUN: | FileCheck %s -check-prefix ELF-64
-; Note: tls_sym should be 'D' (not '?'), but TLS is not
-; yet recognized by ObjectFile.
+ELF-64-NOT: U
+ELF-64: 0000000000200454 A __bss_start
+ELF-64: 0000000000200454 A _edata
+ELF-64: 0000000000200458 A _end
+ELF-64: 0000000000200454 B common_sym
+ELF-64: 0000000000200450 D defined_sym
+ELF-64: 00000000000002f0 T global_func
+ELF-64: 0000000000000000 D tls_sym
+
+RUN: not llvm-nm -D %p/Inputs/weak-global-symbol.macho-i386 2>&1 \
+RUN: | FileCheck %s -check-prefix ERROR
-ELF-32: 0012c8 A __bss_start
-ELF-32: 0012c8 A _edata
-ELF-32: 0012cc A _end
-ELF-32: 0012c8 B common_sym
-ELF-32: 0012c4 D defined_sym
-ELF-32: 0001f0 T global_func
-ELF-32: ? tls_sym
+RUN: not llvm-nm -D %p/Inputs/trivial-object-test.coff-i386 2>&1 \
+RUN: | FileCheck %s -check-prefix ERROR
-ELF-64: 200454 A __bss_start
-ELF-64: 200454 A _edata
-ELF-64: 200458 A _end
-ELF-64: 200454 B common_sym
-ELF-64: 200450 D defined_sym
-ELF-64: 0002f0 T global_func
-ELF-64: ? tls_sym
+ERROR: File format has no dynamic symbol table.
diff --git a/test/Object/nm-trivial-object.test b/test/Object/nm-trivial-object.test
index 748d6f20d510..49c7683c80d4 100644
--- a/test/Object/nm-trivial-object.test
+++ b/test/Object/nm-trivial-object.test
@@ -1,15 +1,45 @@
-RUN: yaml2obj %p/Inputs/COFF/i386.yaml | llvm-nm \
+RUN: yaml2obj %p/Inputs/COFF/i386.yaml | llvm-nm - \
RUN: | FileCheck %s -check-prefix COFF
-RUN: yaml2obj %p/Inputs/COFF/x86-64.yaml | llvm-nm \
+RUN: yaml2obj %p/Inputs/COFF/x86-64.yaml | llvm-nm - \
RUN: | FileCheck %s -check-prefix COFF
RUN: llvm-nm %p/Inputs/trivial-object-test.elf-i386 \
RUN: | FileCheck %s -check-prefix ELF
RUN: llvm-nm %p/Inputs/trivial-object-test.elf-x86-64 \
-RUN: | FileCheck %s -check-prefix ELF
+RUN: | FileCheck %s -check-prefix ELF64
+RUN: llvm-nm %p/Inputs/weak.elf-x86-64 \
+RUN: | FileCheck %s -check-prefix WEAK-ELF64
+RUN: llvm-nm %p/Inputs/absolute.elf-x86-64 \
+RUN: | FileCheck %s -check-prefix ABSOLUTE-ELF64
RUN: llvm-nm %p/Inputs/trivial-object-test.macho-i386 \
RUN: | FileCheck %s -check-prefix macho
+RUN: llvm-nm -U %p/Inputs/trivial-object-test.macho-i386 \
+RUN: | FileCheck %s -check-prefix macho-U
RUN: llvm-nm %p/Inputs/trivial-object-test.macho-x86-64 \
RUN: | FileCheck %s -check-prefix macho64
+RUN: llvm-nm %p/Inputs/macho-text-data-bss.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix macho-tdb
+RUN: llvm-nm -j %p/Inputs/macho-text-data-bss.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix macho-j
+RUN: llvm-nm -r %p/Inputs/macho-text-data-bss.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix macho-r
+RUN: llvm-nm %p/Inputs/macho-text-data-bss.macho-x86_64 -s __DATA __data \
+RUN: | FileCheck %s -check-prefix macho-s
+RUN: llvm-nm -x %p/Inputs/macho-text-data-bss.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix macho-x
+RUN: llvm-nm -p -a %p/Inputs/macho-hello-g.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix macho-pa
+RUN: llvm-nm %p/Inputs/common.coff-i386 \
+RUN: | FileCheck %s -check-prefix COFF-COMMON
+RUN: llvm-nm %p/Inputs/relocatable-with-section-address.elf-x86-64 \
+RUN: | FileCheck %s -check-prefix ELF-SEC-ADDR64
+RUN: llvm-nm %p/Inputs/thumb-symbols.elf.arm \
+RUN: | FileCheck %s -check-prefix ELF-THUMB
+RUN: mkdir -p %t
+RUN: cd %t
+RUN: cp %p/Inputs/trivial-object-test.macho-i386 a.out
+RUN: llvm-nm | FileCheck %s -check-prefix A-OUT
+REQUIRES: shell
+
COFF: 00000000 d .data
COFF: 00000000 t .text
@@ -18,17 +48,100 @@ COFF: U {{_?}}SomeOtherFunction
COFF: 00000000 T {{_?}}main
COFF: U {{_?}}puts
+COFF-COMMON: 00000000 b .bss
+COFF-COMMON-NEXT: 00000000 d .data
+COFF-COMMON-NEXT: 00000000 d .drectve
+COFF-COMMON-NEXT: 00000000 n .file
+COFF-COMMON-NEXT: 00000000 r .rdata$zzz
+COFF-COMMON-NEXT: 00000000 t .text
+COFF-COMMON-NEXT: C _a
+
+
ELF-NOT: U
ELF: U SomeOtherFunction
ELF: 00000000 T main
ELF: U puts
+ELF64: U SomeOtherFunction
+ELF64: 0000000000000000 T main
+ELF64: U puts
+
+WEAK-ELF64: w f1
+WEAK-ELF64: 0000000000000000 W f2
+WEAK-ELF64: v x1
+WEAK-ELF64: 0000000000000000 V x2
-macho: 00000000 U _SomeOtherFunction
+ABSOLUTE-ELF64: 0000000000000123 a a1
+ABSOLUTE-ELF64: 0000000000000123 A a2
+
+macho: U _SomeOtherFunction
macho: 00000000 T _main
-macho: 00000000 U _puts
+macho: U _puts
+
+macho-U-NOT: U _SomeOtherFunction
+macho-U: 00000000 T _main
+macho-U-NOT: U _puts
+
+macho64: 0000000000000028 s L_.str
+macho64: U _SomeOtherFunction
+macho64: 0000000000000000 T _main
+macho64: U _puts
+
+macho-tdb: 0000000000000030 s EH_frame0
+macho-tdb: 0000000000000070 b _b
+macho-tdb: 000000000000000c D _d
+macho-tdb: 0000000000000000 T _t
+macho-tdb: 0000000000000048 S _t.eh
+
+macho-j: EH_frame0
+macho-j: _b
+macho-j: _d
+macho-j: _t
+macho-j: _t.eh
+
+macho-r: 0000000000000048 S _t.eh
+macho-r-NEXT: 0000000000000000 T _t
+macho-r-NEXT: 000000000000000c D _d
+macho-r-NEXT: 0000000000000070 b _b
+macho-r-NEXT: 0000000000000030 s EH_frame0
+
+macho-s: 000000000000000c D _d
+macho-s-NOT: 0000000000000048 S _t.eh
+macho-s-NOT: 0000000000000000 T _t
+macho-s-NOT: 0000000000000070 b _b
+macho-s-NOT: 0000000000000030 s EH_frame0
+
+macho-x: 0000000000000030 0e 05 0000 00000010 EH_frame0
+macho-x: 0000000000000070 0e 03 0000 0000000d _b
+macho-x: 000000000000000c 0f 02 0000 00000004 _d
+macho-x: 0000000000000000 0f 01 0000 00000001 _t
+macho-x: 0000000000000048 0f 05 0000 00000007 _t.eh
+
+macho-pa: 0000000000000000 - 00 0000 SO /Volumes/SandBox/
+macho-pa: 0000000000000000 - 00 0000 SO hello.c
+macho-pa: 0000000053c8408d - 03 0001 OSO /Volumes/SandBox/hello.o
+macho-pa: 0000000100000f30 - 01 0000 BNSYM
+macho-pa: 0000000100000f30 - 01 0000 FUN _main
+macho-pa: 000000000000003b - 00 0000 FUN
+macho-pa: 000000000000003b - 01 0000 ENSYM
+macho-pa: 0000000000000000 - 01 0000 SO
+macho-pa: 0000000100000000 T __mh_execute_header
+macho-pa: 0000000100000f30 T _main
+macho-pa: U _printf
+macho-pa: U dyld_stub_binder
+
+Test that nm uses addresses even with ELF .o files.
+ELF-SEC-ADDR64: 0000000000000058 D a
+ELF-SEC-ADDR64-NEXT: 000000000000005c D b
+ELF-SEC-ADDR64-NEXT: 0000000000000040 T f
+ELF-SEC-ADDR64-NEXT: 0000000000000050 T g
+ELF-SEC-ADDR64-NEXT: 0000000000000060 D p
+
+
+Test that we drop the thumb bit only from function addresses.
+ELF-THUMB: 00000000 t f
+ELF-THUMB: 00000003 t g
-macho64: 00000028 s L_.str
-macho64: 00000000 U _SomeOtherFunction
-macho64: 00000000 T _main
-macho64: 00000000 U _puts
+A-OUT: U _SomeOtherFunction
+A-OUT: 00000000 T _main
+A-OUT: U _puts
diff --git a/test/Object/nm-universal-binary.test b/test/Object/nm-universal-binary.test
index 8febfdfab39c..889377b5b35b 100644
--- a/test/Object/nm-universal-binary.test
+++ b/test/Object/nm-universal-binary.test
@@ -1,6 +1,31 @@
-RUN: llvm-nm %p/Inputs/macho-universal.x86_64.i386 | FileCheck %s
+RUN: llvm-nm -arch all %p/Inputs/macho-universal.x86_64.i386 \
+RUN: | FileCheck %s -check-prefix CHECK-OBJ
+RUN: llvm-nm -arch x86_64 %p/Inputs/macho-universal.x86_64.i386 \
+RUN: | FileCheck %s -check-prefix CHECK-OBJ-x86_64
+RUN: llvm-nm -arch all %p/Inputs/macho-universal-archive.x86_64.i386 \
+RUN: | FileCheck %s -check-prefix CHECK-AR
+RUN: llvm-nm -arch i386 %p/Inputs/macho-universal-archive.x86_64.i386 \
+RUN: | FileCheck %s -check-prefix CHECK-AR-i386
-CHECK: macho-universal.x86_64.i386:x86_64
-CHECK: main
-CHECK: macho-universal.x86_64.i386:i386
-CHECK: main
+CHECK-OBJ: macho-universal.x86_64.i386 (for architecture x86_64):
+CHECK-OBJ: 0000000100000f60 T _main
+CHECK-OBJ: macho-universal.x86_64.i386 (for architecture i386):
+CHECK-OBJ: 00001fa0 T _main
+
+CHECK-OBJ-x86_64: 0000000100000000 T __mh_execute_header
+CHECK-OBJ-x86_64: 0000000100000f60 T _main
+CHECK-OBJ-x86_64: U dyld_stub_binder
+
+CHECK-AR: macho-universal-archive.x86_64.i386(hello.o) (for architecture x86_64):
+CHECK-AR: 0000000000000068 s EH_frame0
+CHECK-AR: 000000000000003b s L_.str
+CHECK-AR: 0000000000000000 T _main
+CHECK-AR: 0000000000000080 S _main.eh
+CHECK-AR: U _printf
+CHECK-AR: macho-universal-archive.x86_64.i386(foo.o) (for architecture i386):
+CHECK-AR: 00000008 D _bar
+CHECK-AR: 00000000 T _foo
+
+CHECK-AR-i386: macho-universal-archive.x86_64.i386(foo.o):
+CHECK-AR-i386: 00000008 D _bar
+CHECK-AR-i386: 00000000 T _foo
diff --git a/test/Object/obj2yaml-coff-long-file-symbol.test b/test/Object/obj2yaml-coff-long-file-symbol.test
new file mode 100644
index 000000000000..3a4d380bd8e3
--- /dev/null
+++ b/test/Object/obj2yaml-coff-long-file-symbol.test
@@ -0,0 +1,3 @@
+RUN: yaml2obj %p/Inputs/COFF/long-file-symbol.yaml | obj2yaml | FileCheck %s --check-prefix COFF-I386
+
+COFF-I386: File: filename_with_22_chars
diff --git a/test/Object/obj2yaml-coff-weak-external.test b/test/Object/obj2yaml-coff-weak-external.test
new file mode 100644
index 000000000000..4ecdc1bf6f54
--- /dev/null
+++ b/test/Object/obj2yaml-coff-weak-external.test
@@ -0,0 +1,3 @@
+RUN: yaml2obj %p/Inputs/COFF/weak-external.yaml | obj2yaml | FileCheck %s --check-prefix COFF-I386
+
+COFF-I386: Characteristics: 0
diff --git a/test/Object/obj2yaml.test b/test/Object/obj2yaml.test
index 49541336c682..1c79e98fdc3b 100644
--- a/test/Object/obj2yaml.test
+++ b/test/Object/obj2yaml.test
@@ -1,6 +1,10 @@
RUN: obj2yaml %p/Inputs/trivial-object-test.coff-i386 | FileCheck %s --check-prefix COFF-I386
RUN: obj2yaml %p/Inputs/trivial-object-test.coff-x86-64 | FileCheck %s --check-prefix COFF-X86-64
-
+RUN: obj2yaml %p/Inputs/trivial-object-test.elf-mipsel | FileCheck %s --check-prefix ELF-MIPSEL
+RUN: obj2yaml %p/Inputs/trivial-object-test.elf-mips64el | FileCheck %s --check-prefix ELF-MIPS64EL
+RUN: obj2yaml %p/Inputs/trivial-object-test.elf-x86-64 | FileCheck %s --check-prefix ELF-X86-64
+RUN: obj2yaml %p/Inputs/unwind-section.elf-x86-64 \
+RUN: | FileCheck %s --check-prefix ELF-X86-64-UNWIND
COFF-I386: header:
COFF-I386-NEXT: Machine: IMAGE_FILE_MACHINE_I386
@@ -36,8 +40,12 @@ COFF-I386-NEXT: SectionNumber: 1
COFF-I386-NEXT: SimpleType: IMAGE_SYM_TYPE_NULL
COFF-I386-NEXT: ComplexType: IMAGE_SYM_DTYPE_NULL
COFF-I386-NEXT: StorageClass: IMAGE_SYM_CLASS_STATIC
-COFF-I386-NEXT: NumberOfAuxSymbols: 1
-COFF-I386-NEXT: AuxiliaryData: 240000000300000000000000010000000000
+COFF-I386-NEXT: SectionDefinition:
+COFF-I386-NEXT: Length: 36
+COFF-I386-NEXT: NumberOfRelocations: 3
+COFF-I386-NEXT: NumberOfLinenumbers: 0
+COFF-I386-NEXT: CheckSum: 0
+COFF-I386-NEXT: Number: 1
COFF-I386: - Name: .data
COFF-I386-NEXT: Value: 0
@@ -45,8 +53,12 @@ COFF-I386-NEXT: SectionNumber: 2
COFF-I386-NEXT: SimpleType: IMAGE_SYM_TYPE_NULL
COFF-I386-NEXT: ComplexType: IMAGE_SYM_DTYPE_NULL
COFF-I386-NEXT: StorageClass: IMAGE_SYM_CLASS_STATIC
-COFF-I386-NEXT: NumberOfAuxSymbols: 1
-COFF-I386-NEXT: AuxiliaryData: 0D0000000000000000000000020000000000
+COFF-I386-NEXT: SectionDefinition:
+COFF-I386-NEXT: Length: 13
+COFF-I386-NEXT: NumberOfRelocations: 0
+COFF-I386-NEXT: NumberOfLinenumbers: 0
+COFF-I386-NEXT: CheckSum: 0
+COFF-I386-NEXT: Number: 2
COFF-I386: - Name: _main
COFF-I386-NEXT: Value: 0
@@ -104,6 +116,16 @@ COFF-X86-64-NEXT: Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_S
COFF-X86-64-NEXT: Alignment: 1
COFF-X86-64-NEXT: SectionData: 48656C6C6F20576F726C642100
+COFF-X86-64: - Name: '.CRT$XCU'
+COFF-X86-64-NEXT: Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+COFF-X86-64-NEXT: Alignment: 8
+COFF-X86-64-NEXT: SectionData: '0000000000000000'
+
+COFF-X86-64: Relocations:
+COFF-X86-64-NEXT: - VirtualAddress: 0
+COFF-X86-64-NEXT: SymbolName: '??__Ex@@YAXXZ'
+COFF-X86-64-NEXT: Type: IMAGE_REL_AMD64_ADDR64
+
COFF-X86-64: symbols:
COFF-X86-64-NEXT: - Name: .text
COFF-X86-64-NEXT: Value: 0
@@ -111,8 +133,12 @@ COFF-X86-64-NEXT: SectionNumber: 1
COFF-X86-64-NEXT: SimpleType: IMAGE_SYM_TYPE_NULL
COFF-X86-64-NEXT: ComplexType: IMAGE_SYM_DTYPE_NULL
COFF-X86-64-NEXT: StorageClass: IMAGE_SYM_CLASS_STATIC
-COFF-X86-64-NEXT: NumberOfAuxSymbols: 1
-COFF-X86-64-NEXT: AuxiliaryData: 260000000300000000000000010000000000
+COFF-X86-64-NEXT: SectionDefinition:
+COFF-X86-64-NEXT: Length: 38
+COFF-X86-64-NEXT: NumberOfRelocations: 3
+COFF-X86-64-NEXT: NumberOfLinenumbers: 0
+COFF-X86-64-NEXT: CheckSum: 0
+COFF-X86-64-NEXT: Number: 1
COFF-X86-64: - Name: .data
COFF-X86-64-NEXT: Value: 0
@@ -120,14 +146,18 @@ COFF-X86-64-NEXT: SectionNumber: 2
COFF-X86-64-NEXT: SimpleType: IMAGE_SYM_TYPE_NULL
COFF-X86-64-NEXT: ComplexType: IMAGE_SYM_DTYPE_NULL
COFF-X86-64-NEXT: StorageClass: IMAGE_SYM_CLASS_STATIC
-COFF-X86-64-NEXT: NumberOfAuxSymbols: 1
-COFF-X86-64-NEXT: AuxiliaryData: 0D0000000000000000000000020000000000
+COFF-X86-64-NEXT: SectionDefinition:
+COFF-X86-64-NEXT: Length: 13
+COFF-X86-64-NEXT: NumberOfRelocations: 0
+COFF-X86-64-NEXT: NumberOfLinenumbers: 0
+COFF-X86-64-NEXT: CheckSum: 0
+COFF-X86-64-NEXT: Number: 2
COFF-X86-64: - Name: main
COFF-X86-64-NEXT: Value: 0
COFF-X86-64-NEXT: SectionNumber: 1
COFF-X86-64-NEXT: SimpleType: IMAGE_SYM_TYPE_NULL
-COFF-X86-64-NEXT: ComplexType: IMAGE_SYM_DTYPE_NULL
+COFF-X86-64-NEXT: ComplexType: IMAGE_SYM_DTYPE_FUNCTION
COFF-X86-64-NEXT: StorageClass: IMAGE_SYM_CLASS_EXTERNAL
COFF-X86-64: - Name: L.str
@@ -150,4 +180,256 @@ COFF-X86-64-NEXT: SectionNumber: 0
COFF-X86-64-NEXT: SimpleType: IMAGE_SYM_TYPE_NULL
COFF-X86-64-NEXT: ComplexType: IMAGE_SYM_DTYPE_NULL
COFF-X86-64-NEXT: StorageClass: IMAGE_SYM_CLASS_EXTERNAL
-COFF-X86-64-NOT: NumberOfAuxSymbols
+
+COFF-X86-64: - Name: '??__Ex@@YAXXZ'
+COFF-X86-64-NEXT: Value: 0
+COFF-X86-64-NEXT: SectionNumber: 3
+COFF-X86-64-NEXT: SimpleType: IMAGE_SYM_TYPE_NULL
+COFF-X86-64-NEXT: ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+COFF-X86-64-NEXT: StorageClass: IMAGE_SYM_CLASS_STATIC
+
+ELF-MIPSEL: FileHeader:
+ELF-MIPSEL-NEXT: Class: ELFCLASS32
+ELF-MIPSEL-NEXT: Data: ELFDATA2LSB
+ELF-MIPSEL-NEXT: OSABI: ELFOSABI_GNU
+ELF-MIPSEL-NEXT: Type: ET_REL
+ELF-MIPSEL-NEXT: Machine: EM_MIPS
+ELF-MIPSEL-NEXT: Flags: [ EF_MIPS_NOREORDER, EF_MIPS_PIC, EF_MIPS_CPIC, EF_MIPS_ABI_O32, EF_MIPS_ARCH_32 ]
+ELF-MIPSEL-NEXT: Sections:
+ELF-MIPSEL-NEXT: - Name: .text
+ELF-MIPSEL-NEXT: Type: SHT_PROGBITS
+ELF-MIPSEL-NEXT: Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ELF-MIPSEL-NEXT: AddressAlign: 0x0000000000000004
+ELF-MIPSEL-NEXT: Content: 0000023C00004224E8FFBD271400BFAF1000B0AF218059000000018E000024240000198E09F8200321E000020000198E09F8200321E00002000002241000B08F1400BF8F0800E0031800BD27
+ELF-MIPSEL-NEXT: - Name: .rel.text
+ELF-MIPSEL-NEXT: Type: SHT_REL
+ELF-MIPSEL-NEXT: Link: .symtab
+ELF-MIPSEL-NEXT: AddressAlign: 0x0000000000000004
+ELF-MIPSEL-NEXT: Info: .text
+ELF-MIPSEL-NEXT: Relocations:
+ELF-MIPSEL-NEXT: - Offset: 0x0000000000000000
+ELF-MIPSEL-NEXT: Symbol: _gp_disp
+ELF-MIPSEL-NEXT: Type: R_MIPS_HI16
+ELF-MIPSEL-NEXT: Addend: 0
+ELF-MIPSEL-NEXT: - Offset: 0x0000000000000004
+ELF-MIPSEL-NEXT: Symbol: _gp_disp
+ELF-MIPSEL-NEXT: Type: R_MIPS_LO16
+ELF-MIPSEL-NEXT: Addend: 0
+ELF-MIPSEL-NEXT: - Offset: 0x0000000000000018
+ELF-MIPSEL-NEXT: Symbol: '$.str'
+ELF-MIPSEL-NEXT: Type: R_MIPS_GOT16
+ELF-MIPSEL-NEXT: Addend: 0
+ELF-MIPSEL-NEXT: - Offset: 0x000000000000001C
+ELF-MIPSEL-NEXT: Symbol: '$.str'
+ELF-MIPSEL-NEXT: Type: R_MIPS_LO16
+ELF-MIPSEL-NEXT: Addend: 0
+ELF-MIPSEL-NEXT: - Offset: 0x0000000000000020
+ELF-MIPSEL-NEXT: Symbol: puts
+ELF-MIPSEL-NEXT: Type: R_MIPS_CALL16
+ELF-MIPSEL-NEXT: Addend: 0
+ELF-MIPSEL-NEXT: - Offset: 0x000000000000002C
+ELF-MIPSEL-NEXT: Symbol: SomeOtherFunction
+ELF-MIPSEL-NEXT: Type: R_MIPS_CALL16
+ELF-MIPSEL-NEXT: Addend: 0
+ELF-MIPSEL-NEXT: - Name: .data
+ELF-MIPSEL-NEXT: Type: SHT_PROGBITS
+ELF-MIPSEL-NEXT: Flags: [ SHF_WRITE, SHF_ALLOC ]
+ELF-MIPSEL-NEXT: AddressAlign: 0x0000000000000004
+ELF-MIPSEL-NEXT: Content: ''
+ELF-MIPSEL-NEXT: - Name: .bss
+ELF-MIPSEL-NEXT: Type: SHT_NOBITS
+ELF-MIPSEL-NEXT: Flags: [ SHF_WRITE, SHF_ALLOC ]
+ELF-MIPSEL-NEXT: AddressAlign: 0x0000000000000004
+ELF-MIPSEL-NEXT: Content: 48656C6C
+ELF-MIPSEL-NEXT: - Name: .mdebug.abi32
+ELF-MIPSEL-NEXT: Type: SHT_PROGBITS
+ELF-MIPSEL-NEXT: AddressAlign: 0x0000000000000001
+ELF-MIPSEL-NEXT: Content: ''
+ELF-MIPSEL-NEXT: - Name: .rodata.str1.1
+ELF-MIPSEL-NEXT: Type: SHT_PROGBITS
+ELF-MIPSEL-NEXT: Flags: [ SHF_ALLOC, SHF_MERGE, SHF_STRINGS ]
+ELF-MIPSEL-NEXT: AddressAlign: 0x0000000000000001
+ELF-MIPSEL-NEXT: Content: 48656C6C6F20576F726C640A00
+ELF-MIPSEL-NEXT: - Name: .reginfo
+ELF-MIPSEL-NEXT: Type: SHT_MIPS_REGINFO
+ELF-MIPSEL-NEXT: Flags: [ SHF_ALLOC ]
+ELF-MIPSEL-NEXT: AddressAlign: 0x0000000000000001
+ELF-MIPSEL-NEXT: Content: '000000000000000000000000000000000000000000000000'
+ELF-MIPSEL-NEXT: - Name: .MIPS.abiflags
+ELF-MIPSEL-NEXT: Type: SHT_MIPS_ABIFLAGS
+ELF-MIPSEL-NEXT: Flags: [ SHF_ALLOC ]
+ELF-MIPSEL-NEXT: AddressAlign: 0x0000000000000008
+ELF-MIPSEL-NEXT: Content: '000020010101000100000000000000000100000000000000'
+ELF-MIPSEL-NEXT: Symbols:
+ELF-MIPSEL-NEXT: Local:
+ELF-MIPSEL-NEXT: - Name: trivial.ll
+ELF-MIPSEL-NEXT: Type: STT_FILE
+ELF-MIPSEL-NEXT: - Name: '$.str'
+ELF-MIPSEL-NEXT: Type: STT_OBJECT
+ELF-MIPSEL-NEXT: Section: .rodata.str1.1
+ELF-MIPSEL-NEXT: Size: 0x000000000000000D
+ELF-MIPSEL-NEXT: - Name: .text
+ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: Section: .text
+ELF-MIPSEL-NEXT: - Name: .data
+ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: Section: .data
+ELF-MIPSEL-NEXT: - Name: .bss
+ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: Section: .bss
+ELF-MIPSEL-NEXT: - Name: .mdebug.abi32
+ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: Section: .mdebug.abi32
+ELF-MIPSEL-NEXT: - Name: .rodata.str1.1
+ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: Section: .rodata.str1.1
+ELF-MIPSEL-NEXT: - Name: .reginfo
+ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: Section: .reginfo
+ELF-MIPSEL-NEXT: - Name: .MIPS.abiflags
+ELF-MIPSEL-NEXT: Type: STT_SECTION
+ELF-MIPSEL-NEXT: Section: .MIPS.abiflags
+ELF-MIPSEL-NEXT: Global:
+ELF-MIPSEL-NEXT: - Name: main
+ELF-MIPSEL-NEXT: Type: STT_FUNC
+ELF-MIPSEL-NEXT: Section: .text
+ELF-MIPSEL-NEXT: Size: 0x000000000000004C
+ELF-MIPSEL-NEXT: - Name: var
+ELF-MIPSEL-NEXT: Type: STT_OBJECT
+ELF-MIPSEL-NEXT: Section: .bss
+ELF-MIPSEL-NEXT: Size: 0x0000000000000004
+ELF-MIPSEL-NEXT: - Name: SomeOtherFunction
+ELF-MIPSEL-NEXT: - Name: _gp_disp
+ELF-MIPSEL-NEXT: - Name: puts
+
+ELF-MIPS64EL: FileHeader:
+ELF-MIPS64EL-NEXT: Class: ELFCLASS64
+ELF-MIPS64EL-NEXT: Data: ELFDATA2LSB
+ELF-MIPS64EL-NEXT: Type: ET_REL
+ELF-MIPS64EL-NEXT: Machine: EM_MIPS
+ELF-MIPS64EL-NEXT: Flags: [ EF_MIPS_ARCH_3 ]
+ELF-MIPS64EL-NEXT: Sections:
+ELF-MIPS64EL-NEXT: - Name: .text
+ELF-MIPS64EL-NEXT: Type: SHT_PROGBITS
+ELF-MIPS64EL-NEXT: Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ELF-MIPS64EL-NEXT: AddressAlign: 0x0000000000000010
+ELF-MIPS64EL-NEXT: Content: ''
+ELF-MIPS64EL-NEXT: - Name: .data
+ELF-MIPS64EL-NEXT: Type: SHT_PROGBITS
+ELF-MIPS64EL-NEXT: Flags: [ SHF_WRITE, SHF_ALLOC ]
+ELF-MIPS64EL-NEXT: AddressAlign: 0x0000000000000010
+ELF-MIPS64EL-NEXT: Content: '00000000000000000000000000000000'
+ELF-MIPS64EL-NEXT: - Name: .rela.data
+ELF-MIPS64EL-NEXT: Type: SHT_RELA
+ELF-MIPS64EL-NEXT: Link: .symtab
+ELF-MIPS64EL-NEXT: AddressAlign: 0x0000000000000008
+ELF-MIPS64EL-NEXT: Info: .data
+ELF-MIPS64EL-NEXT: Relocations:
+ELF-MIPS64EL-NEXT: - Offset: 0
+ELF-MIPS64EL-NEXT: Symbol: zed
+ELF-MIPS64EL-NEXT: Type: R_MIPS_64
+ELF-MIPS64EL-NEXT: Addend: 0
+ELF-MIPS64EL-NEXT: - Name: .bss
+ELF-MIPS64EL-NEXT: Type: SHT_NOBITS
+ELF-MIPS64EL-NEXT: Flags: [ SHF_WRITE, SHF_ALLOC ]
+ELF-MIPS64EL-NEXT: AddressAlign: 0x0000000000000010
+ELF-MIPS64EL-NEXT: Content: ''
+ELF-MIPS64EL-NEXT: - Name: .MIPS.options
+ELF-MIPS64EL-NEXT: Type: SHT_MIPS_OPTIONS
+ELF-MIPS64EL-NEXT: Flags: [ SHF_ALLOC ]
+ELF-MIPS64EL-NEXT: AddressAlign: 0x0000000000000008
+ELF-MIPS64EL-NEXT: Content: '01280000000000000000000000000000000000000000000000000000000000000000000000000000'
+ELF-MIPS64EL-NEXT: - Name: .pdr
+ELF-MIPS64EL-NEXT: Type: SHT_PROGBITS
+ELF-MIPS64EL-NEXT: AddressAlign: 0x0000000000000004
+ELF-MIPS64EL-NEXT: Content: ''
+ELF-MIPS64EL-NEXT: Symbols:
+ELF-MIPS64EL-NEXT: Local:
+ELF-MIPS64EL-NEXT: - Name: .text
+ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: Section: .text
+ELF-MIPS64EL-NEXT: - Name: .data
+ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: Section: .data
+ELF-MIPS64EL-NEXT: - Name: .bss
+ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: Section: .bss
+ELF-MIPS64EL-NEXT: - Name: bar
+ELF-MIPS64EL-NEXT: Section: .data
+ELF-MIPS64EL-NEXT: - Name: .MIPS.options
+ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: Section: .MIPS.options
+ELF-MIPS64EL-NEXT: - Name: .pdr
+ELF-MIPS64EL-NEXT: Type: STT_SECTION
+ELF-MIPS64EL-NEXT: Section: .pdr
+ELF-MIPS64EL-NEXT: Global:
+ELF-MIPS64EL-NEXT: - Name: zed
+
+ELF-X86-64: FileHeader:
+ELF-X86-64-NEXT: Class: ELFCLASS64
+ELF-X86-64-NEXT: Data: ELFDATA2LSB
+ELF-X86-64-NEXT: OSABI: ELFOSABI_GNU
+ELF-X86-64-NEXT: Type: ET_REL
+ELF-X86-64-NEXT: Machine: EM_X86_64
+ELF-X86-64-NEXT: Sections:
+ELF-X86-64-NEXT: - Name: .text
+ELF-X86-64-NEXT: Type: SHT_PROGBITS
+ELF-X86-64-NEXT: Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ELF-X86-64-NEXT: AddressAlign: 0x0000000000000010
+ELF-X86-64-NEXT: Content: 4883EC08C744240400000000BF00000000E80000000030C0E8000000008B4424044883C408C3
+ELF-X86-64-NEXT: - Name: .rodata.str1.1
+ELF-X86-64-NEXT: Type: SHT_PROGBITS
+ELF-X86-64-NEXT: Flags: [ SHF_ALLOC, SHF_MERGE, SHF_STRINGS ]
+ELF-X86-64-NEXT: Address: 0x0000000000000026
+ELF-X86-64-NEXT: AddressAlign: 0x0000000000000001
+ELF-X86-64-NEXT: Content: 48656C6C6F20576F726C642100
+ELF-X86-64-NEXT: - Name: .note.GNU-stack
+ELF-X86-64-NEXT: Type: SHT_PROGBITS
+ELF-X86-64-NEXT: Address: 0x0000000000000033
+ELF-X86-64-NEXT: AddressAlign: 0x0000000000000001
+ELF-X86-64-NEXT: Content: ''
+ELF-X86-64-NEXT: - Name: .rela.text
+ELF-X86-64-NEXT: Type: SHT_RELA
+ELF-X86-64-NEXT: Address: 0x0000000000000038
+ELF-X86-64-NEXT: Link: .symtab
+ELF-X86-64-NEXT: AddressAlign: 0x0000000000000008
+ELF-X86-64-NEXT: Info: .text
+ELF-X86-64-NEXT: Relocations:
+ELF-X86-64-NEXT: - Offset: 0x000000000000000D
+ELF-X86-64-NEXT: Symbol: .rodata.str1.1
+ELF-X86-64-NEXT: Type: R_X86_64_32S
+ELF-X86-64-NEXT: Addend: 0
+ELF-X86-64-NEXT: - Offset: 0x0000000000000012
+ELF-X86-64-NEXT: Symbol: puts
+ELF-X86-64-NEXT: Type: R_X86_64_PC32
+ELF-X86-64-NEXT: Addend: -4
+ELF-X86-64-NEXT: - Offset: 0x0000000000000019
+ELF-X86-64-NEXT: Symbol: SomeOtherFunction
+ELF-X86-64-NEXT: Type: R_X86_64_PC32
+ELF-X86-64-NEXT: Addend: -4
+ELF-X86-64-NEXT: Symbols:
+ELF-X86-64-NEXT: Local:
+ELF-X86-64-NEXT: - Name: trivial-object-test.s
+ELF-X86-64-NEXT: Type: STT_FILE
+ELF-X86-64-NEXT: - Name: .text
+ELF-X86-64-NEXT: Type: STT_SECTION
+ELF-X86-64-NEXT: Section: .text
+ELF-X86-64-NEXT: - Name: .rodata.str1.1
+ELF-X86-64-NEXT: Type: STT_SECTION
+ELF-X86-64-NEXT: Section: .rodata.str1.1
+ELF-X86-64-NEXT: - Name: .note.GNU-stack
+ELF-X86-64-NEXT: Type: STT_SECTION
+ELF-X86-64-NEXT: Section: .note.GNU-stack
+ELF-X86-64-NEXT: Global:
+ELF-X86-64-NEXT: - Name: main
+ELF-X86-64-NEXT: Type: STT_FUNC
+ELF-X86-64-NEXT: Section: .text
+ELF-X86-64-NEXT: Size: 0x0000000000000026
+ELF-X86-64-NEXT: - Name: SomeOtherFunction
+ELF-X86-64-NEXT: - Name: puts
+
+ELF-X86-64-UNWIND: - Name: .eh_frame
+ELF-X86-64-UNWIND-NEXT: Type: SHT_X86_64_UNWIND
+ELF-X86-64-UNWIND-NEXT: Flags: [ SHF_ALLOC ]
+ELF-X86-64-UNWIND-NEXT: AddressAlign: 0x0000000000000001
+ELF-X86-64-UNWIND-NEXT: Content: ''
diff --git a/test/Object/objdump-no-sectionheaders.test b/test/Object/objdump-no-sectionheaders.test
new file mode 100644
index 000000000000..5130100d8ee7
--- /dev/null
+++ b/test/Object/objdump-no-sectionheaders.test
@@ -0,0 +1,6 @@
+; RUN: llvm-objdump -h %p/Inputs/no-sections.elf-x86-64 \
+; RUN: | FileCheck %s
+
+; CHECK: Sections:
+; CHECK: Idx Name Size Address Type
+; CHECK-NOT: {{.}}
diff --git a/test/Object/objdump-relocations.test b/test/Object/objdump-relocations.test
index 95c4c4dcaedf..28cac104c7b7 100644
--- a/test/Object/objdump-relocations.test
+++ b/test/Object/objdump-relocations.test
@@ -10,6 +10,8 @@ RUN: llvm-objdump -r %p/Inputs/trivial-object-test.elf-hexagon \
RUN: | FileCheck %s -check-prefix ELF-hexagon
RUN: llvm-objdump -r %p/Inputs/trivial-object-test.elf-mips64el \
RUN: | FileCheck %s -check-prefix ELF-MIPS64EL
+RUN: llvm-objdump -r %p/Inputs/trivial-object-test.elf-mipsel \
+RUN: | FileCheck %s -check-prefix ELF-MIPSEL
RUN: llvm-objdump -r %p/Inputs/relocations.elf-x86-64 \
RUN: | FileCheck %s -check-prefix ELF-complex-x86-64
@@ -45,7 +47,15 @@ ELF-hexagon: R_HEX_B22_PCREL puts
// Note: this file was produced with gas to make sure we don't end up in a
// situation where LLVM produces and accepts a broken file.
ELF-MIPS64EL: .data
-ELF-MIPS64EL: R_MIPS_64
+ELF-MIPS64EL: R_MIPS_64/R_MIPS_NONE/R_MIPS_NONE zed
+
+ELF-MIPSEL: .rel.text
+ELF-MIPSEL: R_MIPS_HI16 _gp_disp
+ELF-MIPSEL: R_MIPS_LO16 _gp_disp
+ELF-MIPSEL: R_MIPS_GOT16 $.str
+ELF-MIPSEL: R_MIPS_LO16 $.str
+ELF-MIPSEL: R_MIPS_CALL16 puts
+ELF-MIPSEL: R_MIPS_CALL16 SomeOtherFunction
ELF-complex-x86-64: .text
ELF-complex-x86-64-NEXT: R_X86_64_8 .data-4
diff --git a/test/Object/readobj-elf-versioning.test b/test/Object/readobj-elf-versioning.test
index 1f09ef32a11a..40852877457d 100644
--- a/test/Object/readobj-elf-versioning.test
+++ b/test/Object/readobj-elf-versioning.test
@@ -33,7 +33,7 @@ ELF32: Symbol {
ELF32: Name: puts@GLIBC_2.0
ELF32: Binding: Global
ELF32: Type: Function
-ELF32: Section: (0x0)
+ELF32: Section: Undefined (0x0)
ELF32: }
ELF32: ]
ELF64: DynamicSymbols [
@@ -41,6 +41,6 @@ ELF64: Symbol {
ELF64: Name: puts@GLIBC_2.2.5
ELF64: Binding: Global
ELF64: Type: Function
-ELF64: Section: (0x0)
+ELF64: Section: Undefined (0x0)
ELF64: }
ELF64: ]
diff --git a/test/Object/readobj-shared-object.test b/test/Object/readobj-shared-object.test
index 72dbd32ea9d5..516d4c699e42 100644
--- a/test/Object/readobj-shared-object.test
+++ b/test/Object/readobj-shared-object.test
@@ -191,7 +191,7 @@ ELF: Symbol {
ELF: Name: shared.ll
ELF: Binding: Local
ELF: Type: File
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: Symbol {
ELF: Name: local_func
@@ -203,13 +203,13 @@ ELF: Symbol {
ELF: Name: _GLOBAL_OFFSET_TABLE_
ELF: Binding: Local
ELF: Type: Object
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: Symbol {
ELF: Name: _DYNAMIC
ELF: Binding: Local
ELF: Type: Object
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: Symbol {
ELF: Name: common_sym
@@ -233,13 +233,13 @@ ELF: Symbol {
ELF: Name: __bss_start
ELF: Binding: Global
ELF: Type: None
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: Symbol {
ELF: Name: _end
ELF: Binding: Global
ELF: Type: None
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: Symbol {
ELF: Name: global_func
@@ -251,7 +251,7 @@ ELF: Symbol {
ELF: Name: _edata
ELF: Binding: Global
ELF: Type: None
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: ]
@@ -278,13 +278,13 @@ ELF: Symbol {
ELF: Name: __bss_start
ELF: Binding: Global
ELF: Type: None
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: Symbol {
ELF: Name: _end
ELF: Binding: Global
ELF: Type: None
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: Symbol {
ELF: Name: global_func
@@ -296,7 +296,7 @@ ELF: Symbol {
ELF: Name: _edata
ELF: Binding: Global
ELF: Type: None
-ELF: Section: (0xFFF1)
+ELF: Section: Absolute (0xFFF1)
ELF: }
ELF: ]
diff --git a/test/Object/simple-archive.test b/test/Object/simple-archive.test
index 3e6760ed97a4..085a91e7bd80 100644
--- a/test/Object/simple-archive.test
+++ b/test/Object/simple-archive.test
@@ -1,7 +1,7 @@
#
# Check if the index is appearing properly in the output file
#
-RUN: llvm-nm -s %p/Inputs/libsimple_archive.a | FileCheck -check-prefix=CHECKIDX %s
+RUN: llvm-nm -M %p/Inputs/libsimple_archive.a | FileCheck -check-prefix=CHECKIDX %s
CHECKIDX: Archive map
CHECKIDX: abcdefghijklmnopqrstuvwxyz12345678 in 1.o
diff --git a/test/Object/size-trivial-macho.test b/test/Object/size-trivial-macho.test
new file mode 100644
index 000000000000..a6d3d1c43304
--- /dev/null
+++ b/test/Object/size-trivial-macho.test
@@ -0,0 +1,89 @@
+RUN: llvm-size -A %p/Inputs/macho-text-data-bss.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix A
+RUN: llvm-size -B %p/Inputs/macho-text-data-bss.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix B
+RUN: llvm-size -format darwin %p/Inputs/macho-text-data-bss.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix m
+RUN: llvm-size %p/Inputs/macho-archive-x86_64.a \
+RUN: | FileCheck %s -check-prefix AR
+RUN: llvm-size -format darwin %p/Inputs/macho-archive-x86_64.a \
+RUN: | FileCheck %s -check-prefix mAR
+RUN: llvm-size -m -x -l %p/Inputs/hello-world.macho-x86_64 \
+RUN: | FileCheck %s -check-prefix mxl
+RUN: llvm-size -arch all %p/Inputs/macho-universal.x86_64.i386 \
+RUN: | FileCheck %s -check-prefix u
+RUN: llvm-size -arch i386 %p/Inputs/macho-universal.x86_64.i386 \
+RUN: | FileCheck %s -check-prefix u-i386
+RUN: llvm-size -arch all %p/Inputs/macho-universal-archive.x86_64.i386 \
+RUN: | FileCheck %s -check-prefix uAR
+RUN: llvm-size -arch x86_64 %p/Inputs/macho-universal-archive.x86_64.i386 \
+RUN: | FileCheck %s -check-prefix uAR-x86_64
+
+A: section size addr
+A: __text 12 0
+A: __data 4 12
+A: __bss 4 112
+A: __compact_unwind 32 16
+A: __eh_frame 64 48
+A: Total 116
+
+B: __TEXT __DATA __OBJC others dec hex
+B: 76 8 0 32 116 74
+
+m: Segment : 116
+m: Section (__TEXT, __text): 12
+m: Section (__DATA, __data): 4
+m: Section (__DATA, __bss): 4
+m: Section (__LD, __compact_unwind): 32
+m: Section (__TEXT, __eh_frame): 64
+m: total 116
+m: total 116
+
+AR: __TEXT __DATA __OBJC others dec hex
+AR: 70 0 0 32 102 66 {{.*}}/macho-archive-x86_64.a(foo.o)
+AR: 0 4 0 0 4 4 {{.*}}/macho-archive-x86_64.a(bar.o)
+
+mAR: {{.*}}/macho-archive-x86_64.a(foo.o):
+mAR: Segment : 104
+mAR: Section (__TEXT, __text): 6
+mAR: Section (__LD, __compact_unwind): 32
+mAR: Section (__TEXT, __eh_frame): 64
+mAR: total 102
+mAR: total 104
+mAR: {{.*}}/macho-archive-x86_64.a(bar.o):
+mAR: Segment : 4
+mAR: Section (__TEXT, __text): 0
+mAR: Section (__DATA, __data): 4
+mAR: total 4
+mAR: total 4
+
+
+mxl: Segment __PAGEZERO: 0x100000000 (vmaddr 0x0 fileoff 0)
+mxl: Segment __TEXT: 0x1000 (vmaddr 0x100000000 fileoff 0)
+mxl: Section __text: 0x3b (addr 0x100000f30 offset 3888)
+mxl: Section __stubs: 0x6 (addr 0x100000f6c offset 3948)
+mxl: Section __stub_helper: 0x1a (addr 0x100000f74 offset 3956)
+mxl: Section __cstring: 0xd (addr 0x100000f8e offset 3982)
+mxl: Section __unwind_info: 0x48 (addr 0x100000f9b offset 3995)
+mxl: Section __eh_frame: 0x18 (addr 0x100000fe8 offset 4072)
+mxl: total 0xc8
+mxl: Segment __DATA: 0x1000 (vmaddr 0x100001000 fileoff 4096)
+mxl: Section __nl_symbol_ptr: 0x10 (addr 0x100001000 offset 4096)
+mxl: Section __la_symbol_ptr: 0x8 (addr 0x100001010 offset 4112)
+mxl: total 0x18
+mxl: Segment __LINKEDIT: 0x1000 (vmaddr 0x100002000 fileoff 8192)
+mxl: total 0x100003000
+
+u: __TEXT __DATA __OBJC others dec hex
+u: 4096 0 0 4294971392 4294975488 100002000 {{.*}}/macho-universal.x86_64.i386 (for architecture x86_64)
+u: 4096 0 0 8192 12288 3000 {{.*}}/macho-universal.x86_64.i386 (for architecture i386)
+
+u-i386: __TEXT __DATA __OBJC others dec hex
+u-i386: 4096 0 0 8192 12288 3000
+
+uAR: __TEXT __DATA __OBJC others dec hex
+uAR: 136 0 0 32 168 a8 {{.*}}/macho-universal-archive.x86_64.i386(hello.o) (for architecture x86_64)
+uAR: 5 4 0 0 9 9 {{.*}}/macho-universal-archive.x86_64.i386(foo.o) (for architecture i386)
+
+uAR-x86_64: __TEXT __DATA __OBJC others dec hex
+uAR-x86_64: 136 0 0 32 168 a8 {{.*}}/macho-universal-archive.x86_64.i386(hello.o)
diff --git a/test/Object/yaml2obj-coff-multi-doc.test b/test/Object/yaml2obj-coff-multi-doc.test
new file mode 100644
index 000000000000..1cf720306bf7
--- /dev/null
+++ b/test/Object/yaml2obj-coff-multi-doc.test
@@ -0,0 +1,91 @@
+# RUN: yaml2obj -format=coff -docnum=1 %s \
+# RUN: | llvm-readobj -symbols - | FileCheck -check-prefix=DOC1 %s
+# RUN: yaml2obj -format=coff -docnum=2 %s \
+# RUN: | llvm-readobj -symbols - | FileCheck -check-prefix=DOC2 %s
+# RUN: not yaml2obj -format=coff -docnum=3 %s 2>&1 \
+# RUN: | FileCheck -check-prefix=DOC3 %s
+
+# DOC1: Name: _sym1
+# DOC2: Name: _sym2
+# DOC3: yaml2obj: Cannot find the 3rd document
+
+---
+header:
+ Machine: IMAGE_FILE_MACHINE_I386
+ Characteristics: [ IMAGE_FILE_DEBUG_STRIPPED ]
+
+sections:
+ - Name: .text
+ Alignment: 16
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE,
+ IMAGE_SCN_MEM_READ ]
+ SectionData: "00000000"
+
+symbols:
+ - Name: .text
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 36
+ NumberOfRelocations: 3
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 1
+
+ - Name: _main
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+
+ - Name: _sym1
+ Value: 0
+ SectionNumber: 0
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+
+---
+header:
+ Machine: IMAGE_FILE_MACHINE_I386
+ Characteristics: [ IMAGE_FILE_DEBUG_STRIPPED ]
+
+sections:
+ - Name: .text
+ Alignment: 16
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE,
+ IMAGE_SCN_MEM_READ ]
+ SectionData: "00000000"
+
+symbols:
+ - Name: .text
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 36
+ NumberOfRelocations: 3
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 1
+
+ - Name: _main
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+
+ - Name: _sym2
+ Value: 0
+ SectionNumber: 0
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+...
diff --git a/test/Object/yaml2obj-elf-file-headers-with-e_flags.yaml b/test/Object/yaml2obj-elf-file-headers-with-e_flags.yaml
new file mode 100644
index 000000000000..7d098077dd5e
--- /dev/null
+++ b/test/Object/yaml2obj-elf-file-headers-with-e_flags.yaml
@@ -0,0 +1,17 @@
+# RUN: yaml2obj -format=elf %s | llvm-readobj -file-headers - | FileCheck %s
+!ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_MIPS
+ Flags: [ EF_MIPS_NOREORDER, EF_MIPS_ABI_O32, EF_MIPS_ARCH_32R2 ]
+
+# CHECK: Format: ELF32-mips
+# CHECK: Arch: mipsel
+# CHECK: Machine: EM_MIPS
+# CHECK: Flags [ (0x70001001)
+# CHECK-NEXT: EF_MIPS_ABI_O32 (0x1000)
+# CHECK-NEXT: EF_MIPS_ARCH_32R2 (0x70000000)
+# CHECK-NEXT: EF_MIPS_NOREORDER (0x1)
+# CHECK-NEXT: ]
diff --git a/test/Object/yaml2obj-elf-multi-doc.test b/test/Object/yaml2obj-elf-multi-doc.test
new file mode 100644
index 000000000000..c51f803300ec
--- /dev/null
+++ b/test/Object/yaml2obj-elf-multi-doc.test
@@ -0,0 +1,56 @@
+# RUN: yaml2obj -format=elf -docnum=1 %s \
+# RUN: | llvm-readobj -symbols - | FileCheck -check-prefix=DOC1 %s
+# RUN: yaml2obj -format=elf -docnum=2 %s \
+# RUN: | llvm-readobj -symbols - | FileCheck -check-prefix=DOC2 %s
+# RUN: not yaml2obj -format=elf -docnum=3 %s 2>&1 \
+# RUN: | FileCheck -check-prefix=DOC3 %s
+
+# DOC1: Name: T1 (1)
+# DOC2: Name: T2 (1)
+# DOC3: yaml2obj: Cannot find the 3rd document
+
+--- !ELF
+FileHeader: !FileHeader
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_MIPS
+ Flags: [EF_MIPS_CPIC]
+
+Sections:
+- Name: .text
+ Type: SHT_PROGBITS
+ Content: "0000000000000000"
+ AddressAlign: 16
+ Flags: [SHF_EXECINSTR, SHF_ALLOC]
+
+Symbols:
+ Global:
+ - Name: T1
+ Section: .text
+ Type: STT_FUNC
+ Value: 0x0
+ Size: 8
+
+--- !ELF
+FileHeader: !FileHeader
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_MIPS
+
+Sections:
+- Name: .text
+ Type: SHT_PROGBITS
+ Content: "00000000"
+ AddressAlign: 16
+ Flags: [SHF_EXECINSTR, SHF_ALLOC]
+
+Symbols:
+ Global:
+ - Name: T2
+ Section: .text
+ Type: STT_FUNC
+ Value: 0x0
+ Size: 4
+...
diff --git a/test/Object/yaml2obj-elf-rel.yaml b/test/Object/yaml2obj-elf-rel.yaml
new file mode 100644
index 000000000000..6a7ed459eff2
--- /dev/null
+++ b/test/Object/yaml2obj-elf-rel.yaml
@@ -0,0 +1,118 @@
+# RUN: yaml2obj -format=elf %s | llvm-readobj -sections -relocations - | FileCheck %s
+
+!ELF
+FileHeader: !FileHeader
+ Class: ELFCLASS32
+ Data: ELFDATA2MSB
+ Type: ET_REL
+ Machine: EM_MIPS
+
+Sections:
+- Name: .text
+ Type: SHT_PROGBITS
+ Content: "0000000000000000"
+ AddressAlign: 16
+ Flags: [SHF_ALLOC]
+
+- Name: .rel.text
+ Type: SHT_REL
+ Info: .text
+ AddressAlign: 4
+ Relocations:
+ - Offset: 0x1
+ Symbol: glob1
+ Type: R_MIPS_32
+ - Offset: 0x1
+ Symbol: glob2
+ Type: R_MIPS_CALL16
+ - Offset: 0x2
+ Symbol: loc1
+ Type: R_MIPS_LO16
+
+- Name: .rela.text
+ Type: SHT_RELA
+ Link: .symtab
+ Info: .text
+ AddressAlign: 4
+ Relocations:
+ - Offset: 0x1
+ Addend: 1
+ Symbol: glob1
+ Type: R_MIPS_32
+ - Offset: 0x1
+ Addend: 2
+ Symbol: glob2
+ Type: R_MIPS_CALL16
+ - Offset: 0x2
+ Addend: 3
+ Symbol: loc1
+ Type: R_MIPS_LO16
+
+Symbols:
+ Local:
+ - Name: loc1
+ - Name: loc2
+ Global:
+ - Name: glob1
+ Section: .text
+ Value: 0x0
+ Size: 4
+ - Name: glob2
+ Weak:
+ - Name: weak1
+
+# CHECK: Section {
+# CHECK-NEXT: Index: 0
+# CHECK: }
+# CHECK: Section {
+# CHECK-NEXT: Index: 1
+# CHECK-NEXT: Name: .text (16)
+# CHECK: }
+# CHECK-NEXT: Section {
+# CHECK-NEXT: Index: 2
+# CHECK-NEXT: Name: .rel.text (1)
+# CHECK-NEXT: Type: SHT_REL (0x9)
+# CHECK-NEXT: Flags [ (0x0)
+# CHECK-NEXT: ]
+# CHECK-NEXT: Address: 0x0
+# CHECK-NEXT: Offset: 0x160
+# CHECK-NEXT: Size: 24
+# CHECK-NEXT: Link: 4
+# CHECK-NEXT: Info: 1
+# CHECK-NEXT: AddressAlignment: 4
+# CHECK-NEXT: EntrySize: 8
+# CHECK-NEXT: }
+# CHECK-NEXT: Section {
+# CHECK-NEXT: Index: 3
+# CHECK-NEXT: Name: .rela.text (11)
+# CHECK-NEXT: Type: SHT_RELA (0x4)
+# CHECK-NEXT: Flags [ (0x0)
+# CHECK-NEXT: ]
+# CHECK-NEXT: Address: 0x0
+# CHECK-NEXT: Offset: 0x180
+# CHECK-NEXT: Size: 36
+# CHECK-NEXT: Link: 4
+# CHECK-NEXT: Info: 1
+# CHECK-NEXT: AddressAlignment: 4
+# CHECK-NEXT: EntrySize: 12
+# CHECK-NEXT: }
+# CHECK-NEXT: Section {
+# CHECK-NEXT: Index: 4
+# CHECK-NEXT: Name: .symtab (40)
+# CHECK: }
+# CHECK-NEXT: Section {
+# CHECK-NEXT: Index: 5
+# CHECK-NEXT: Name: .strtab (32)
+# CHECK: }
+# CHECK: Relocations [
+# CHECK-NEXT: Section (2) .rel.text {
+# CHECK-NEXT: 0x1 R_MIPS_32 glob1 0x0
+# CHECK-NEXT: 0x1 R_MIPS_CALL16 glob2 0x0
+# CHECK-NEXT: 0x2 R_MIPS_LO16 loc1 0x0
+# CHECK-NEXT: }
+# CHECK-NEXT: Section (3) .rela.text {
+# CHECK-NEXT: 0x1 R_MIPS_32 glob1 0x1
+# CHECK-NEXT: 0x1 R_MIPS_CALL16 glob2 0x2
+# CHECK-NEXT: 0x2 R_MIPS_LO16 loc1 0x3
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
diff --git a/test/Object/yaml2obj-elf-section-basic.yaml b/test/Object/yaml2obj-elf-section-basic.yaml
index 34be11d3658c..56a3fd6e5f18 100644
--- a/test/Object/yaml2obj-elf-section-basic.yaml
+++ b/test/Object/yaml2obj-elf-section-basic.yaml
@@ -1,4 +1,7 @@
# RUN: yaml2obj -format=elf %s | llvm-readobj -sections -section-data - | FileCheck %s
+# RUN: yaml2obj -format=elf -o %t %s
+# RUN: llvm-readobj -sections -section-data %t | FileCheck %s
+
!ELF
FileHeader:
Class: ELFCLASS64
@@ -14,6 +17,14 @@ Sections:
Content: EBFE
AddressAlign: 2
+ - Name: .data
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Address: 0xCAFECAFE
+ Content: FEBF
+ Size: 8
+ AddressAlign: 2
+
# CHECK: Section {
# CHECK: Index: 0
# CHECK: Type: SHT_NULL (0x0)
@@ -33,3 +44,33 @@ Sections:
# CHECK: SectionData (
# CHECK-NEXT: 0000: EBFE
# CHECK-NEXT: )
+#
+# CHECK: Section {
+# CHECK: Name: .data
+# CHECK-NEXT: Type: SHT_PROGBITS (0x1)
+# CHECK-NEXT: Flags [ (0x2)
+# CHECK-NEXT: SHF_ALLOC (0x2)
+# CHECK-NEXT: ]
+# CHECK-NEXT: Address: 0xCAFECAFE
+# CHECK-NEXT: Offset: 0x1D0
+# CHECK-NEXT: Size: 8
+# CHECK-NEXT: Link: 0
+# CHECK-NEXT: Info: 0
+# CHECK-NEXT: AddressAlignment: 2
+# CHECK-NEXT: EntrySize: 0
+# CHECK-NEXT: SectionData (
+# CHECK-NEXT: 0000: FEBF0000 00000000 |........|
+# CHECK-NEXT: )
+#
+# CHECK: Section {
+# CHECK: Name: .symtab (25)
+# CHECK: Type: SHT_SYMTAB (0x2)
+# CHECK: }
+# CHECK: Section {
+# CHECK: Name: .strtab (17)
+# CHECK: Type: SHT_STRTAB (0x3)
+# CHECK: }
+# CHECK: Section {
+# CHECK: Name: .shstrtab (7)
+# CHECK: Type: SHT_STRTAB (0x3)
+# CHECK: }
diff --git a/test/Object/yaml2obj-elf-section-invalid-size.yaml b/test/Object/yaml2obj-elf-section-invalid-size.yaml
new file mode 100644
index 000000000000..d0cb370072dd
--- /dev/null
+++ b/test/Object/yaml2obj-elf-section-invalid-size.yaml
@@ -0,0 +1,26 @@
+# RUN: not yaml2obj -format=elf -o %t %s 2>&1 | FileCheck %s
+
+!ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Content: EBFE
+ AddressAlign: 2
+
+ - Name: .data
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Content: 0000000000000000
+ Size: 2
+
+# CHECK: YAML:17:5: error: Section size must be greater or equal to the content size
+# CHECK-NEXT: - Name: .data
+# CHECK-NEXT: ^
+# CHECK-NEXT: yaml2obj: Failed to parse YAML file!
diff --git a/test/Object/yaml2obj-elf-symbol-basic.yaml b/test/Object/yaml2obj-elf-symbol-basic.yaml
index 3fb9b17655fd..6d49ddd1c422 100644
--- a/test/Object/yaml2obj-elf-symbol-basic.yaml
+++ b/test/Object/yaml2obj-elf-symbol-basic.yaml
@@ -37,4 +37,4 @@ Symbols:
# CHECK: Section: .text
# CHECK: Symbol {
# CHECK: Name: undefined_symbol
-# CHECK: Section: (0x0)
+# CHECK: Section: Undefined (0x0)
diff --git a/test/Object/yaml2obj-elf-symbol-visibility.yaml b/test/Object/yaml2obj-elf-symbol-visibility.yaml
new file mode 100644
index 000000000000..113354a05e3a
--- /dev/null
+++ b/test/Object/yaml2obj-elf-symbol-visibility.yaml
@@ -0,0 +1,126 @@
+# RUN: yaml2obj -format=elf %s | llvm-readobj -symbols - | \
+# RUN: FileCheck --check-prefix OBJ %s
+# RUN: yaml2obj -format=elf %s | obj2yaml - | FileCheck --check-prefix YAML %s
+
+# OBJ: Symbol {
+# OBJ: Name: default1 (36)
+# OBJ-NEXT: Value: 0x0
+# OBJ-NEXT: Size: 4
+# OBJ-NEXT: Binding: Global (0x1)
+# OBJ-NEXT: Type: Object (0x1)
+# OBJ-NEXT: Other: 0
+# OBJ-NEXT: Section: .data (0x1)
+# OBJ-NEXT: }
+# OBJ-NEXT: Symbol {
+# OBJ-NEXT: Name: default2 (27)
+# OBJ-NEXT: Value: 0x4
+# OBJ-NEXT: Size: 4
+# OBJ-NEXT: Binding: Global (0x1)
+# OBJ-NEXT: Type: Object (0x1)
+# OBJ-NEXT: Other: 0
+# OBJ-NEXT: Section: .data (0x1)
+# OBJ-NEXT: }
+# OBJ-NEXT: Symbol {
+# OBJ-NEXT: Name: internal (8)
+# OBJ-NEXT: Value: 0x8
+# OBJ-NEXT: Size: 4
+# OBJ-NEXT: Binding: Global (0x1)
+# OBJ-NEXT: Type: Object (0x1)
+# OBJ-NEXT: Other: 1
+# OBJ-NEXT: Section: .data (0x1)
+# OBJ-NEXT: }
+# OBJ-NEXT: Symbol {
+# OBJ-NEXT: Name: hidden (1)
+# OBJ-NEXT: Value: 0xC
+# OBJ-NEXT: Size: 4
+# OBJ-NEXT: Binding: Global (0x1)
+# OBJ-NEXT: Type: Object (0x1)
+# OBJ-NEXT: Other: 2
+# OBJ-NEXT: Section: .data (0x1)
+# OBJ-NEXT: }
+# OBJ-NEXT: Symbol {
+# OBJ-NEXT: Name: protected (17)
+# OBJ-NEXT: Value: 0x10
+# OBJ-NEXT: Size: 4
+# OBJ-NEXT: Binding: Global (0x1)
+# OBJ-NEXT: Type: Object (0x1)
+# OBJ-NEXT: Other: 3
+# OBJ-NEXT: Section: .data (0x1)
+# OBJ-NEXT: }
+
+# YAML: Symbols:
+# YAML-NEXT: Global:
+# YAML-NEXT: - Name: default1
+# YAML-NEXT: Type: STT_OBJECT
+# YAML-NEXT: Section: .data
+# YAML-NEXT: Size: 0x0000000000000004
+# YAML-NEXT: - Name: default2
+# YAML-NEXT: Type: STT_OBJECT
+# YAML-NEXT: Section: .data
+# YAML-NEXT: Value: 0x0000000000000004
+# YAML-NEXT: Size: 0x0000000000000004
+# YAML-NEXT: - Name: internal
+# YAML-NEXT: Type: STT_OBJECT
+# YAML-NEXT: Section: .data
+# YAML-NEXT: Value: 0x0000000000000008
+# YAML-NEXT: Size: 0x0000000000000004
+# YAML-NEXT: Visibility: STV_INTERNAL
+# YAML-NEXT: - Name: hidden
+# YAML-NEXT: Type: STT_OBJECT
+# YAML-NEXT: Section: .data
+# YAML-NEXT: Value: 0x000000000000000C
+# YAML-NEXT: Size: 0x0000000000000004
+# YAML-NEXT: Visibility: STV_HIDDEN
+# YAML-NEXT: - Name: protected
+# YAML-NEXT: Type: STT_OBJECT
+# YAML-NEXT: Section: .data
+# YAML-NEXT: Value: 0x0000000000000010
+# YAML-NEXT: Size: 0x0000000000000004
+# YAML-NEXT: Visibility: STV_PROTECTED
+
+---
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_MIPS
+ Flags: [ EF_MIPS_ABI_O32, EF_MIPS_ARCH_32 ]
+
+Sections:
+ - Name: .data
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_WRITE ]
+ AddressAlign: 0x04
+ Size: 0x14
+
+Symbols:
+ Global:
+ - Name: default1
+ Type: STT_OBJECT
+ Visibility: STV_DEFAULT
+ Section: .data
+ Value: 0x00
+ Size: 0x04
+ - Name: default2
+ Type: STT_OBJECT
+ Section: .data
+ Value: 0x04
+ Size: 0x04
+ - Name: internal
+ Type: STT_OBJECT
+ Visibility: STV_INTERNAL
+ Section: .data
+ Value: 0x08
+ Size: 0x04
+ - Name: hidden
+ Type: STT_OBJECT
+ Visibility: STV_HIDDEN
+ Section: .data
+ Value: 0x0C
+ Size: 0x04
+ - Name: protected
+ Type: STT_OBJECT
+ Visibility: STV_PROTECTED
+ Section: .data
+ Value: 0x10
+ Size: 0x04
diff --git a/test/Object/yaml2obj-readobj.test b/test/Object/yaml2obj-readobj.test
index 3031f5ed31bc..3bd0c6b0269a 100644
--- a/test/Object/yaml2obj-readobj.test
+++ b/test/Object/yaml2obj-readobj.test
@@ -1,4 +1,7 @@
RUN: yaml2obj %p/Inputs/COFF/i386.yaml | llvm-readobj -file-headers -relocations -expand-relocs - | FileCheck %s --check-prefix COFF-I386
+RUN: yaml2obj -o %t %p/Inputs/COFF/i386.yaml
+RUN: llvm-readobj -file-headers -relocations -expand-relocs %t \
+RUN: | FileCheck %s --check-prefix COFF-I386
// COFF-I386: Characteristics [ (0x200)
// COFF-I386-NEXT: IMAGE_FILE_DEBUG_STRIPPED (0x200)
diff --git a/test/Other/Inputs/llvm_cov.gcda b/test/Other/Inputs/llvm_cov.gcda
deleted file mode 100644
index 9ae2286ea2f4..000000000000
--- a/test/Other/Inputs/llvm_cov.gcda
+++ /dev/null
Binary files differ
diff --git a/test/Other/Inputs/llvm_cov.gcno b/test/Other/Inputs/llvm_cov.gcno
deleted file mode 100644
index 25e202386a89..000000000000
--- a/test/Other/Inputs/llvm_cov.gcno
+++ /dev/null
Binary files differ
diff --git a/test/Other/X86/lit.local.cfg b/test/Other/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Other/X86/lit.local.cfg
+++ b/test/Other/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Other/constant-fold-gep.ll b/test/Other/constant-fold-gep.ll
index aed4145c5507..387489820b26 100644
--- a/test/Other/constant-fold-gep.ll
+++ b/test/Other/constant-fold-gep.ll
@@ -457,7 +457,7 @@ define i8* @different_addrspace() nounwind noinline {
%p = getelementptr inbounds i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*),
i32 2
ret i8* %p
-; OPT: ret i8* getelementptr (i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*), i32 2)
+; OPT: ret i8* getelementptr (i8* addrspacecast (i8 addrspace(12)* getelementptr inbounds ([4 x i8] addrspace(12)* @p12, i32 0, i32 0) to i8*), i32 2)
}
define i8* @same_addrspace() nounwind noinline {
@@ -467,4 +467,21 @@ define i8* @same_addrspace() nounwind noinline {
; OPT: ret i8* getelementptr inbounds ([4 x i8]* @p0, i32 0, i32 2)
}
+@gv1 = internal global i32 1
+@gv2 = internal global [1 x i32] [ i32 2 ]
+@gv3 = internal global [1 x i32] [ i32 2 ]
+
+; Handled by TI-independent constant folder
+define i1 @gv_gep_vs_gv() {
+ ret i1 icmp eq (i32* getelementptr inbounds ([1 x i32]* @gv2, i32 0, i32 0), i32* @gv1)
+}
+; PLAIN: gv_gep_vs_gv
+; PLAIN: ret i1 false
+
+define i1 @gv_gep_vs_gv_gep() {
+ ret i1 icmp eq (i32* getelementptr inbounds ([1 x i32]* @gv2, i32 0, i32 0), i32* getelementptr inbounds ([1 x i32]* @gv3, i32 0, i32 0))
+}
+; PLAIN: gv_gep_vs_gv_gep
+; PLAIN: ret i1 false
+
; CHECK: attributes #0 = { nounwind }
diff --git a/test/Other/extract-alias.ll b/test/Other/extract-alias.ll
index d1e4af545625..dbc650ec6903 100644
--- a/test/Other/extract-alias.ll
+++ b/test/Other/extract-alias.ll
@@ -14,7 +14,7 @@
; DELETE: @zed = global i32 0
; DELETE: @zeda0 = alias i32* @zed
; DELETE-NEXT: @a0foo = alias i32* ()* @foo
-; DELETE-NEXT: @a0a0bar = alias void ()* @a0bar
+; DELETE-NEXT: @a0a0bar = alias void ()* @bar
; DELETE-NEXT: @a0bar = alias void ()* @bar
; DELETE: declare i32* @foo()
; DELETE: define void @bar() {
@@ -25,7 +25,7 @@
; ALIAS: @zed = external global i32
; ALIAS: @zeda0 = alias i32* @zed
-; ALIASRE: @a0a0bar = alias void ()* @a0bar
+; ALIASRE: @a0a0bar = alias void ()* @bar
; ALIASRE: @a0bar = alias void ()* @bar
; ALIASRE: declare void @bar()
@@ -39,7 +39,7 @@ define i32* @foo() {
ret i32* @zeda0
}
-@a0a0bar = alias void ()* @a0bar
+@a0a0bar = alias void ()* @bar
@a0bar = alias void ()* @bar
diff --git a/test/Other/llvm-cov.test b/test/Other/llvm-cov.test
deleted file mode 100644
index 2ac4e9e866b6..000000000000
--- a/test/Other/llvm-cov.test
+++ /dev/null
@@ -1,4 +0,0 @@
-PR11760
-RUN: llvm-cov -gcda=%S/Inputs/llvm_cov.gcda -gcno=%S/Inputs/llvm_cov.gcno
-REQUIRES: asserts
-XFAIL: *
diff --git a/test/Other/llvm-nm-without-aliases.ll b/test/Other/llvm-nm-without-aliases.ll
index 9d9408c13b6d..6ef72c742328 100644
--- a/test/Other/llvm-nm-without-aliases.ll
+++ b/test/Other/llvm-nm-without-aliases.ll
@@ -1,6 +1,6 @@
; RUN: llvm-as < %s > %t
-; RUN: llvm-nm -without-aliases < %t | FileCheck %s
-; RUN: llvm-nm < %t | FileCheck --check-prefix=WITH %s
+; RUN: llvm-nm -without-aliases - < %t | FileCheck %s
+; RUN: llvm-nm - < %t | FileCheck --check-prefix=WITH %s
; CHECK-NOT: T a0bar
; CHECK-NOT: T a0foo
diff --git a/test/Other/new-pass-manager.ll b/test/Other/new-pass-manager.ll
new file mode 100644
index 000000000000..cec01b54ff0c
--- /dev/null
+++ b/test/Other/new-pass-manager.ll
@@ -0,0 +1,69 @@
+; This test is essentially doing very basic things with the opt tool and the
+; new pass manager pipeline. It will be used to flesh out the feature
+; completeness of the opt tool when the new pass manager is engaged. The tests
+; may not be useful once it becomes the default or may get spread out into other
+; files, but for now this is just going to step the new process through its
+; paces.
+
+; RUN: opt -disable-output -debug-pass-manager -passes=print %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-MODULE-PRINT
+; CHECK-MODULE-PRINT: Starting module pass manager
+; CHECK-MODULE-PRINT: Running module pass: VerifierPass
+; CHECK-MODULE-PRINT: Running module pass: PrintModulePass
+; CHECK-MODULE-PRINT: ModuleID
+; CHECK-MODULE-PRINT: define void @foo()
+; CHECK-MODULE-PRINT: Running module pass: VerifierPass
+; CHECK-MODULE-PRINT: Finished module pass manager
+
+; RUN: opt -disable-output -debug-pass-manager -passes='function(print)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-FUNCTION-PRINT
+; CHECK-FUNCTION-PRINT: Starting module pass manager
+; CHECK-FUNCTION-PRINT: Running module pass: VerifierPass
+; CHECK-FUNCTION-PRINT: Starting function pass manager
+; CHECK-FUNCTION-PRINT: Running function pass: PrintFunctionPass
+; CHECK-FUNCTION-PRINT-NOT: ModuleID
+; CHECK-FUNCTION-PRINT: define void @foo()
+; CHECK-FUNCTION-PRINT: Finished function pass manager
+; CHECK-FUNCTION-PRINT: Running module pass: VerifierPass
+; CHECK-FUNCTION-PRINT: Finished module pass manager
+
+; RUN: opt -S -o - -passes='no-op-module,no-op-module' %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-NOOP
+; CHECK-NOOP: define void @foo() {
+; CHECK-NOOP: ret void
+; CHECK-NOOP: }
+
+; Round trip through bitcode.
+; RUN: opt -f -o - -passes='no-op-module,no-op-module' %s \
+; RUN: | llvm-dis \
+; RUN: | FileCheck %s --check-prefix=CHECK-NOOP
+
+; RUN: opt -disable-output -debug-pass-manager -verify-each -passes='no-op-module,function(no-op-function)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-VERIFY-EACH
+; CHECK-VERIFY-EACH: Starting module pass manager
+; CHECK-VERIFY-EACH: Running module pass: VerifierPass
+; CHECK-VERIFY-EACH: Running module pass: NoOpModulePass
+; CHECK-VERIFY-EACH: Running module pass: VerifierPass
+; CHECK-VERIFY-EACH: Starting function pass manager
+; CHECK-VERIFY-EACH: Running function pass: NoOpFunctionPass
+; CHECK-VERIFY-EACH: Running function pass: VerifierPass
+; CHECK-VERIFY-EACH: Finished function pass manager
+; CHECK-VERIFY-EACH: Running module pass: VerifierPass
+; CHECK-VERIFY-EACH: Finished module pass manager
+
+; RUN: opt -disable-output -debug-pass-manager -disable-verify -passes='no-op-module,function(no-op-function)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-NO-VERIFY
+; CHECK-NO-VERIFY: Starting module pass manager
+; CHECK-NO-VERIFY-NOT: VerifierPass
+; CHECK-NO-VERIFY: Running module pass: NoOpModulePass
+; CHECK-NO-VERIFY-NOT: VerifierPass
+; CHECK-NO-VERIFY: Starting function pass manager
+; CHECK-NO-VERIFY: Running function pass: NoOpFunctionPass
+; CHECK-NO-VERIFY-NOT: VerifierPass
+; CHECK-NO-VERIFY: Finished function pass manager
+; CHECK-NO-VERIFY-NOT: VerifierPass
+; CHECK-NO-VERIFY: Finished module pass manager
+
+define void @foo() {
+ ret void
+}
diff --git a/test/Other/optimization-remarks-inline.ll b/test/Other/optimization-remarks-inline.ll
new file mode 100644
index 000000000000..566b206919e1
--- /dev/null
+++ b/test/Other/optimization-remarks-inline.ll
@@ -0,0 +1,40 @@
+; RUN: opt < %s -inline -pass-remarks='inline' -S 2>&1 | FileCheck %s
+; RUN: opt < %s -inline -pass-remarks='inl.*' -S 2>&1 | FileCheck %s
+; RUN: opt < %s -inline -pass-remarks='vector' -pass-remarks='inl' -S 2>&1 | FileCheck %s
+
+; These two should not yield an inline remark for the same reason.
+; In the first command, we only ask for vectorizer remarks, in the
+; second one we ask for the inliner, but we then ask for the vectorizer
+; (thus overriding the first flag).
+; RUN: opt < %s -inline -pass-remarks='vector' -S 2>&1 | FileCheck --check-prefix=REMARKS %s
+; RUN: opt < %s -inline -pass-remarks='inl' -pass-remarks='vector' -S 2>&1 | FileCheck --check-prefix=REMARKS %s
+
+; RUN: opt < %s -inline -S 2>&1 | FileCheck --check-prefix=REMARKS %s
+; RUN: not opt < %s -pass-remarks='(' 2>&1 | FileCheck --check-prefix=BAD-REGEXP %s
+
+define i32 @foo(i32 %x, i32 %y) #0 {
+entry:
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ store i32 %y, i32* %y.addr, align 4
+ %0 = load i32* %x.addr, align 4
+ %1 = load i32* %y.addr, align 4
+ %add = add nsw i32 %0, %1
+ ret i32 %add
+}
+
+define i32 @bar(i32 %j) #0 {
+entry:
+ %j.addr = alloca i32, align 4
+ store i32 %j, i32* %j.addr, align 4
+ %0 = load i32* %j.addr, align 4
+ %1 = load i32* %j.addr, align 4
+ %sub = sub nsw i32 %1, 2
+ %call = call i32 @foo(i32 %0, i32 %sub)
+; CHECK: foo inlined into bar
+; REMARKS-NOT: foo inlined into bar
+ ret i32 %call
+}
+
+; BAD-REGEXP: Invalid regular expression '(' in -pass-remarks:
diff --git a/test/Other/pass-pipeline-parsing.ll b/test/Other/pass-pipeline-parsing.ll
new file mode 100644
index 000000000000..4ec4162cd4c9
--- /dev/null
+++ b/test/Other/pass-pipeline-parsing.ll
@@ -0,0 +1,146 @@
+; RUN: opt -disable-output -debug-pass-manager \
+; RUN: -passes=no-op-module,no-op-module %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-TWO-NOOP-MP
+; CHECK-TWO-NOOP-MP: Starting module pass manager
+; CHECK-TWO-NOOP-MP: Running module pass: NoOpModulePass
+; CHECK-TWO-NOOP-MP: Running module pass: NoOpModulePass
+; CHECK-TWO-NOOP-MP: Finished module pass manager
+
+; RUN: opt -disable-output -debug-pass-manager \
+; RUN: -passes='module(no-op-module,no-op-module)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-NESTED-TWO-NOOP-MP
+; CHECK-NESTED-TWO-NOOP-MP: Starting module pass manager
+; CHECK-NESTED-TWO-NOOP-MP: Running module pass: ModulePassManager
+; CHECK-NESTED-TWO-NOOP-MP: Starting module pass manager
+; CHECK-NESTED-TWO-NOOP-MP: Running module pass: NoOpModulePass
+; CHECK-NESTED-TWO-NOOP-MP: Running module pass: NoOpModulePass
+; CHECK-NESTED-TWO-NOOP-MP: Finished module pass manager
+; CHECK-NESTED-TWO-NOOP-MP: Finished module pass manager
+
+; RUN: opt -disable-output -debug-pass-manager \
+; RUN: -passes=no-op-function,no-op-function %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-TWO-NOOP-FP
+; CHECK-TWO-NOOP-FP: Starting module pass manager
+; CHECK-TWO-NOOP-FP: Running module pass: ModuleToFunctionPassAdaptor
+; CHECK-TWO-NOOP-FP: Starting function pass manager
+; CHECK-TWO-NOOP-FP: Running function pass: NoOpFunctionPass
+; CHECK-TWO-NOOP-FP: Running function pass: NoOpFunctionPass
+; CHECK-TWO-NOOP-FP: Finished function pass manager
+; CHECK-TWO-NOOP-FP: Finished module pass manager
+
+; RUN: opt -disable-output -debug-pass-manager \
+; RUN: -passes='function(no-op-function,no-op-function)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-NESTED-TWO-NOOP-FP
+; CHECK-NESTED-TWO-NOOP-FP: Starting module pass manager
+; CHECK-NESTED-TWO-NOOP-FP: Running module pass: ModuleToFunctionPassAdaptor
+; CHECK-NESTED-TWO-NOOP-FP: Starting function pass manager
+; CHECK-NESTED-TWO-NOOP-FP: Running function pass: FunctionPassManager
+; CHECK-NESTED-TWO-NOOP-FP: Starting function pass manager
+; CHECK-NESTED-TWO-NOOP-FP: Running function pass: NoOpFunctionPass
+; CHECK-NESTED-TWO-NOOP-FP: Running function pass: NoOpFunctionPass
+; CHECK-NESTED-TWO-NOOP-FP: Finished function pass manager
+; CHECK-NESTED-TWO-NOOP-FP: Finished function pass manager
+; CHECK-NESTED-TWO-NOOP-FP: Finished module pass manager
+
+; RUN: opt -disable-output -debug-pass-manager \
+; RUN: -passes='no-op-module,function(no-op-function,no-op-function),no-op-module' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-MIXED-FP-AND-MP
+; CHECK-MIXED-FP-AND-MP: Starting module pass manager
+; CHECK-MIXED-FP-AND-MP: Running module pass: NoOpModulePass
+; CHECK-MIXED-FP-AND-MP: Running module pass: ModuleToFunctionPassAdaptor
+; CHECK-MIXED-FP-AND-MP: Starting function pass manager
+; CHECK-MIXED-FP-AND-MP: Running function pass: NoOpFunctionPass
+; CHECK-MIXED-FP-AND-MP: Running function pass: NoOpFunctionPass
+; CHECK-MIXED-FP-AND-MP: Finished function pass manager
+; CHECK-MIXED-FP-AND-MP: Running module pass: NoOpModulePass
+; CHECK-MIXED-FP-AND-MP: Finished module pass manager
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='no-op-module)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED1
+; CHECK-UNBALANCED1: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='module(no-op-module))' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED2
+; CHECK-UNBALANCED2: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='module(no-op-module' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED3
+; CHECK-UNBALANCED3: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='no-op-function)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED4
+; CHECK-UNBALANCED4: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='function(no-op-function))' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED5
+; CHECK-UNBALANCED5: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='function(function(no-op-function)))' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED6
+; CHECK-UNBALANCED6: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='function(no-op-function' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED7
+; CHECK-UNBALANCED7: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='function(function(no-op-function)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED8
+; CHECK-UNBALANCED8: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='no-op-module,)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED9
+; CHECK-UNBALANCED9: unable to parse pass pipeline description
+
+; RUN: not opt -disable-output -debug-pass-manager \
+; RUN: -passes='no-op-function,)' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNBALANCED10
+; CHECK-UNBALANCED10: unable to parse pass pipeline description
+
+; RUN: opt -disable-output -debug-pass-manager -debug-cgscc-pass-manager \
+; RUN: -passes=no-op-cgscc,no-op-cgscc %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-TWO-NOOP-CG
+; CHECK-TWO-NOOP-CG: Starting module pass manager
+; CHECK-TWO-NOOP-CG: Running module pass: ModuleToPostOrderCGSCCPassAdaptor
+; CHECK-TWO-NOOP-CG: Starting CGSCC pass manager
+; CHECK-TWO-NOOP-CG: Running CGSCC pass: NoOpCGSCCPass
+; CHECK-TWO-NOOP-CG: Running CGSCC pass: NoOpCGSCCPass
+; CHECK-TWO-NOOP-CG: Finished CGSCC pass manager
+; CHECK-TWO-NOOP-CG: Finished module pass manager
+
+; RUN: opt -disable-output -debug-pass-manager -debug-cgscc-pass-manager \
+; RUN: -passes='module(function(no-op-function),cgscc(no-op-cgscc,function(no-op-function),no-op-cgscc),function(no-op-function))' %s 2>&1 \
+; RUN: | FileCheck %s --check-prefix=CHECK-NESTED-MP-CG-FP
+; CHECK-NESTED-MP-CG-FP: Starting module pass manager
+; CHECK-NESTED-MP-CG-FP: Starting module pass manager
+; CHECK-NESTED-MP-CG-FP: Running module pass: ModuleToFunctionPassAdaptor
+; CHECK-NESTED-MP-CG-FP: Starting function pass manager
+; CHECK-NESTED-MP-CG-FP: Running function pass: NoOpFunctionPass
+; CHECK-NESTED-MP-CG-FP: Finished function pass manager
+; CHECK-NESTED-MP-CG-FP: Running module pass: ModuleToPostOrderCGSCCPassAdaptor
+; CHECK-NESTED-MP-CG-FP: Starting CGSCC pass manager
+; CHECK-NESTED-MP-CG-FP: Running CGSCC pass: NoOpCGSCCPass
+; CHECK-NESTED-MP-CG-FP: Running CGSCC pass: CGSCCToFunctionPassAdaptor
+; CHECK-NESTED-MP-CG-FP: Starting function pass manager
+; CHECK-NESTED-MP-CG-FP: Running function pass: NoOpFunctionPass
+; CHECK-NESTED-MP-CG-FP: Finished function pass manager
+; CHECK-NESTED-MP-CG-FP: Running CGSCC pass: NoOpCGSCCPass
+; CHECK-NESTED-MP-CG-FP: Finished CGSCC pass manager
+; CHECK-NESTED-MP-CG-FP: Running module pass: ModuleToFunctionPassAdaptor
+; CHECK-NESTED-MP-CG-FP: Starting function pass manager
+; CHECK-NESTED-MP-CG-FP: Running function pass: NoOpFunctionPass
+; CHECK-NESTED-MP-CG-FP: Finished function pass manager
+; CHECK-NESTED-MP-CG-FP: Finished module pass manager
+; CHECK-NESTED-MP-CG-FP: Finished module pass manager
+
+define void @f() {
+ ret void
+}
diff --git a/test/TableGen/ForeachLoop.td b/test/TableGen/ForeachLoop.td
index 4aacc74d8aa2..25208fae227e 100644
--- a/test/TableGen/ForeachLoop.td
+++ b/test/TableGen/ForeachLoop.td
@@ -51,8 +51,10 @@ foreach i = [0, 1, 2, 3, 4, 5, 6, 7] in
// CHECK: string Name = "R7";
// CHECK: int Index = 7;
-foreach i = {0-3,9-7} in
+foreach i = {0-3,9-7} in {
def S#i : Register<"Q"#i, i>;
+ def : Register<"T"#i, i>;
+}
// CHECK: def S0
// CHECK: def S1
@@ -61,3 +63,25 @@ foreach i = {0-3,9-7} in
// CHECK: def S7
// CHECK: def S8
// CHECK: def S9
+
+// CHECK: def
+// CHECK: string Name = "T0";
+
+// CHECK: def
+// CHECK: string Name = "T1";
+
+// CHECK: def
+// CHECK: string Name = "T2";
+
+// CHECK: def
+// CHECK: string Name = "T3";
+
+// CHECK: def
+// CHECK: string Name = "T9";
+
+// CHECK: def
+// CHECK: string Name = "T8";
+
+// CHECK: def
+// CHECK: string Name = "T7";
+
diff --git a/test/TableGen/GeneralList.td b/test/TableGen/GeneralList.td
index 9e0c7df552b3..17cc9a5cb035 100644
--- a/test/TableGen/GeneralList.td
+++ b/test/TableGen/GeneralList.td
@@ -1,5 +1,4 @@
// RUN: llvm-tblgen %s
-// XFAIL: vg_leak
//
// Test to make sure that lists work with any data-type
diff --git a/test/TableGen/MultiClassDefName.td b/test/TableGen/MultiClassDefName.td
index d3c6de7e8421..811d92667eb2 100644
--- a/test/TableGen/MultiClassDefName.td
+++ b/test/TableGen/MultiClassDefName.td
@@ -14,3 +14,28 @@ multiclass Names<string n, string m> {
}
defm Hello : Names<"hello", "world">;
+
+// Ensure that the same anonymous name is used as the prefix for all defs in an
+// anonymous multiclass.
+
+class Outer<C i> {
+ C Inner = i;
+}
+
+multiclass MC<string name> {
+ def hi : C<name>;
+ def there : Outer<!cast<C>(!strconcat(NAME, "hi"))>;
+}
+
+defm : MC<"foo">;
+
+multiclass MC2<string name> {
+ def there : Outer<C<name> >;
+}
+
+// Ensure that we've correctly captured the reference to name from the implicit
+// anonymous C def in the template parameter list of Outer.
+// CHECK-NOT: MC2::name
+
+defm : MC2<"bar">;
+
diff --git a/test/TableGen/ValidIdentifiers.td b/test/TableGen/ValidIdentifiers.td
new file mode 100644
index 000000000000..333ede0c62bc
--- /dev/null
+++ b/test/TableGen/ValidIdentifiers.td
@@ -0,0 +1,16 @@
+// RUN: llvm-tblgen -gen-ctags %s | FileCheck %s
+// XFAIL: vg_leak
+
+// Ensure that generated names for anonymous records are valid identifiers via the ctags index.
+
+class foo<int X> { int THEVAL = X; }
+// CHECK: {{^X }}
+
+def : foo<2>;
+// CHECK: {{^anonymous_0 }}
+
+def X {
+ foo Y = foo<1>;
+}
+// CHECK: {{^anonymous_1 }}
+// CHECK: {{^foo }}
diff --git a/test/TableGen/if-empty-list-arg.td b/test/TableGen/if-empty-list-arg.td
new file mode 100644
index 000000000000..39edf58ff29a
--- /dev/null
+++ b/test/TableGen/if-empty-list-arg.td
@@ -0,0 +1,7 @@
+// RUN: llvm-tblgen %s
+// XFAIL: vg_leak
+
+class C<bit cond> {
+ list<int> X = !if(cond, [1, 2, 3], []);
+ list<int> Y = !if(cond, [], [4, 5, 6]);
+}
diff --git a/test/TableGen/intrinsic-long-name.td b/test/TableGen/intrinsic-long-name.td
new file mode 100644
index 000000000000..6b9ba018e383
--- /dev/null
+++ b/test/TableGen/intrinsic-long-name.td
@@ -0,0 +1,32 @@
+// RUN: llvm-tblgen -gen-intrinsic %s | FileCheck %s
+// XFAIL: vg_leak
+
+class IntrinsicProperty;
+
+class ValueType<int size, int value> {
+ string Namespace = "MVT";
+ int Size = size;
+ int Value = value;
+}
+
+class LLVMType<ValueType vt> {
+ ValueType VT = vt;
+}
+
+class Intrinsic<string name, list<LLVMType> param_types = []> {
+ string LLVMName = name;
+ bit isTarget = 0;
+ string TargetPrefix = "";
+ list<LLVMType> RetTypes = [];
+ list<LLVMType> ParamTypes = param_types;
+ list<IntrinsicProperty> Properties = [];
+}
+
+def iAny : ValueType<0, 254>;
+def llvm_anyint_ty : LLVMType<iAny>;
+
+// Make sure we generate the long name without crashing
+// CHECK: this_is_a_really_long_intrinsic_name_but_we_should_still_not_crash // llvm.this.is.a.really.long.intrinsic.name.but.we.should.still.not.crash
+def int_foo : Intrinsic<"llvm.foo", [llvm_anyint_ty]>;
+def int_this_is_a_really_long_intrinsic_name_but_we_should_still_not_crash : Intrinsic<"llvm.this.is.a.really.long.intrinsic.name.but.we.should.still.not.crash", [llvm_anyint_ty]>;
+
diff --git a/test/TableGen/lisp.td b/test/TableGen/lisp.td
index 9e586055ff9d..d753fbd299ce 100644
--- a/test/TableGen/lisp.td
+++ b/test/TableGen/lisp.td
@@ -1,5 +1,4 @@
// RUN: llvm-tblgen %s
-// XFAIL: vg_leak
// CHECK: def One {
// CHECK-NEXT: list<string> names = ["Jeffrey Sinclair"];
diff --git a/test/TableGen/listconcat.td b/test/TableGen/listconcat.td
new file mode 100644
index 000000000000..870e649d41dc
--- /dev/null
+++ b/test/TableGen/listconcat.td
@@ -0,0 +1,18 @@
+// RUN: llvm-tblgen %s | FileCheck %s
+
+// CHECK: class Y<list<string> Y:S = ?> {
+// CHECK: list<string> T1 = !listconcat(Y:S, ["foo"]);
+// CHECK: list<string> T2 = !listconcat(Y:S, !listconcat(["foo"], !listconcat(Y:S, ["bar", "baz"])));
+// CHECK: }
+
+// CHECK: def Z {
+// CHECK: list<string> T1 = ["fu", "foo"];
+// CHECK: list<string> T2 = ["fu", "foo", "fu", "bar", "baz"];
+// CHECK: }
+
+class Y<list<string> S> {
+ list<string> T1 = !listconcat(S, ["foo"]);
+ list<string> T2 = !listconcat(S, ["foo"], S, ["bar", "baz"]);
+}
+
+def Z : Y<["fu"]>;
diff --git a/test/TableGen/math.td b/test/TableGen/math.td
index 59d16ae908e2..71c60579de21 100644
--- a/test/TableGen/math.td
+++ b/test/TableGen/math.td
@@ -1,6 +1,16 @@
// RUN: llvm-tblgen %s | FileCheck %s
// XFAIL: vg_leak
+def shifts {
+ bits<2> b = 0b10;
+ int i = 2;
+ int shifted_b = !shl(b, 2);
+ int shifted_i = !shl(i, 2);
+}
+// CHECK: def shifts
+// CHECK: shifted_b = 8
+// CHECK: shifted_i = 8
+
class Int<int value> {
int Value = value;
}
diff --git a/test/TableGen/strconcat.td b/test/TableGen/strconcat.td
index dfb1a94d82c8..f5d7512fd695 100644
--- a/test/TableGen/strconcat.td
+++ b/test/TableGen/strconcat.td
@@ -1,9 +1,21 @@
// RUN: llvm-tblgen %s | FileCheck %s
-// CHECK: fufoo
+// CHECK: class Y<string Y:S = ?> {
+// CHECK: string T = !strconcat(Y:S, "foo");
+// CHECK: string T2 = !strconcat(Y:S, !strconcat("foo", !strconcat(Y:S, "bar")));
+// CHECK: string S = "foobar";
+// CHECK: }
+
+// CHECK: def Z {
+// CHECK: string T = "fufoo";
+// CHECK: string T2 = "fufoofubar";
+// CHECK: string S = "foobar";
+// CHECK: }
class Y<string S> {
string T = !strconcat(S, "foo");
+ // More than two arguments is equivalent to nested calls
+ string T2 = !strconcat(S, "foo", S, "bar");
// String values concatenate lexically, as in C.
string S = "foo" "bar";
diff --git a/test/Transforms/AddDiscriminators/basic.ll b/test/Transforms/AddDiscriminators/basic.ll
new file mode 100644
index 000000000000..b12cbee6adb5
--- /dev/null
+++ b/test/Transforms/AddDiscriminators/basic.ll
@@ -0,0 +1,59 @@
+; RUN: opt < %s -add-discriminators -S | FileCheck %s
+
+; Basic DWARF discriminator test. All the instructions in block
+; 'if.then' should have a different discriminator value than
+; the conditional branch at the end of block 'entry'.
+;
+; Original code:
+;
+; void foo(int i) {
+; int x;
+; if (i < 10) x = i;
+; }
+
+define void @foo(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ %x = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32* %i.addr, align 4, !dbg !10
+ %cmp = icmp slt i32 %0, 10, !dbg !10
+ br i1 %cmp, label %if.then, label %if.end, !dbg !10
+
+if.then: ; preds = %entry
+ %1 = load i32* %i.addr, align 4, !dbg !10
+; CHECK: %1 = load i32* %i.addr, align 4, !dbg !12
+
+ store i32 %1, i32* %x, align 4, !dbg !10
+; CHECK: store i32 %1, i32* %x, align 4, !dbg !12
+
+ br label %if.end, !dbg !10
+; CHECK: br label %if.end, !dbg !12
+
+if.end: ; preds = %if.then, %entry
+ ret void, !dbg !12
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [basic.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"basic.c", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [basic.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5 "}
+!10 = metadata !{i32 3, i32 0, metadata !11, null}
+!11 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [basic.c]
+!12 = metadata !{i32 4, i32 0, metadata !4, null}
+
+; CHECK: !12 = metadata !{i32 3, i32 0, metadata !13, null}
+; CHECK: !13 = metadata !{i32 786443, metadata !1, metadata !11, i32 3, i32 0, i32 1, i32 0} ; [ DW_TAG_lexical_block ] [./basic.c]
+; CHECK: !14 = metadata !{i32 4, i32 0, metadata !4, null}
diff --git a/test/Transforms/AddDiscriminators/first-only.ll b/test/Transforms/AddDiscriminators/first-only.ll
new file mode 100644
index 000000000000..f3b0357e5766
--- /dev/null
+++ b/test/Transforms/AddDiscriminators/first-only.ll
@@ -0,0 +1,82 @@
+; RUN: opt < %s -add-discriminators -S | FileCheck %s
+
+; Test that the only instructions that receive a new discriminator in
+; the block 'if.then' are those that share the same line number as
+; the branch in 'entry'.
+;
+; Original code:
+;
+; void foo(int i) {
+; int x, y;
+; if (i < 10) { x = i;
+; y = -i;
+; }
+; }
+
+define void @foo(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32* %i.addr, align 4, !dbg !10
+ %cmp = icmp slt i32 %0, 10, !dbg !10
+ br i1 %cmp, label %if.then, label %if.end, !dbg !10
+
+if.then: ; preds = %entry
+ %1 = load i32* %i.addr, align 4, !dbg !12
+ store i32 %1, i32* %x, align 4, !dbg !12
+
+ %2 = load i32* %i.addr, align 4, !dbg !14
+; CHECK: %2 = load i32* %i.addr, align 4, !dbg !15
+
+ %sub = sub nsw i32 0, %2, !dbg !14
+; CHECK: %sub = sub nsw i32 0, %2, !dbg !15
+
+ store i32 %sub, i32* %y, align 4, !dbg !14
+; CHECK: store i32 %sub, i32* %y, align 4, !dbg !15
+
+ br label %if.end, !dbg !15
+; CHECK: br label %if.end, !dbg !16
+
+if.end: ; preds = %if.then, %entry
+ ret void, !dbg !16
+; CHECK: ret void, !dbg !17
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 (trunk 199750) (llvm/trunk 199751)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [first-only.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"first-only.c", metadata !"."}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [first-only.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5 (trunk 199750) (llvm/trunk 199751)"}
+!10 = metadata !{i32 3, i32 0, metadata !11, null}
+
+!11 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [first-only.c]
+; CHECK: !11 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 0, i32 0}
+
+!12 = metadata !{i32 3, i32 0, metadata !13, null}
+
+!13 = metadata !{i32 786443, metadata !1, metadata !11, i32 3, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [first-only.c]
+; CHECK: !13 = metadata !{i32 786443, metadata !1, metadata !14, i32 3, i32 0, i32 1, i32 0} ; [ DW_TAG_lexical_block ] [./first-only.c]
+
+!14 = metadata !{i32 4, i32 0, metadata !13, null}
+; CHECK: !14 = metadata !{i32 786443, metadata !1, metadata !11, i32 3, i32 0, i32 1}
+
+!15 = metadata !{i32 5, i32 0, metadata !13, null}
+; CHECK: !15 = metadata !{i32 4, i32 0, metadata !14, null}
+
+!16 = metadata !{i32 6, i32 0, metadata !4, null}
+; CHECK: !16 = metadata !{i32 5, i32 0, metadata !14, null}
+; CHECK: !17 = metadata !{i32 6, i32 0, metadata !4, null}
+
diff --git a/test/Transforms/AddDiscriminators/multiple.ll b/test/Transforms/AddDiscriminators/multiple.ll
new file mode 100644
index 000000000000..0241a0c1a0b1
--- /dev/null
+++ b/test/Transforms/AddDiscriminators/multiple.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -add-discriminators -S | FileCheck %s
+
+; Discriminator support for multiple CFG paths on the same line.
+;
+; void foo(int i) {
+; int x;
+; if (i < 10) x = i; else x = -i;
+; }
+;
+; The two stores inside the if-then-else line must have different discriminator
+; values.
+
+define void @foo(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ %x = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32* %i.addr, align 4, !dbg !10
+ %cmp = icmp slt i32 %0, 10, !dbg !10
+ br i1 %cmp, label %if.then, label %if.else, !dbg !10
+
+if.then: ; preds = %entry
+ %1 = load i32* %i.addr, align 4, !dbg !10
+; CHECK: %1 = load i32* %i.addr, align 4, !dbg !12
+
+ store i32 %1, i32* %x, align 4, !dbg !10
+; CHECK: store i32 %1, i32* %x, align 4, !dbg !12
+
+ br label %if.end, !dbg !10
+; CHECK: br label %if.end, !dbg !12
+
+if.else: ; preds = %entry
+ %2 = load i32* %i.addr, align 4, !dbg !10
+; CHECK: %2 = load i32* %i.addr, align 4, !dbg !14
+
+ %sub = sub nsw i32 0, %2, !dbg !10
+; CHECK: %sub = sub nsw i32 0, %2, !dbg !14
+
+ store i32 %sub, i32* %x, align 4, !dbg !10
+; CHECK: store i32 %sub, i32* %x, align 4, !dbg !14
+
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void, !dbg !12
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 (trunk 199750) (llvm/trunk 199751)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [multiple.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"multiple.c", metadata !"."}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [multiple.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5 (trunk 199750) (llvm/trunk 199751)"}
+!10 = metadata !{i32 3, i32 0, metadata !11, null}
+!11 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [multiple.c]
+!12 = metadata !{i32 4, i32 0, metadata !4, null}
+
+; CHECK: !12 = metadata !{i32 3, i32 0, metadata !13, null}
+; CHECK: !13 = metadata !{i32 786443, metadata !1, metadata !11, i32 3, i32 0, i32 1, i32 0} ; [ DW_TAG_lexical_block ] [./multiple.c]
+; CHECK: !14 = metadata !{i32 3, i32 0, metadata !15, null}
+; CHECK: !15 = metadata !{i32 786443, metadata !1, metadata !11, i32 3, i32 0, i32 2, i32 1} ; [ DW_TAG_lexical_block ] [./multiple.c]
diff --git a/test/Transforms/AddDiscriminators/no-discriminators.ll b/test/Transforms/AddDiscriminators/no-discriminators.ll
new file mode 100644
index 000000000000..f7b45e295551
--- /dev/null
+++ b/test/Transforms/AddDiscriminators/no-discriminators.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -add-discriminators -S | FileCheck %s
+
+; We should not generate discriminators for DWARF versions prior to 4.
+;
+; Original code:
+;
+; int foo(long i) {
+; if (i < 5) return 2; else return 90;
+; }
+;
+; None of the !dbg nodes associated with the if() statement should be
+; altered. If they are, it means that the discriminators pass added a
+; new lexical scope.
+
+define i32 @foo(i64 %i) #0 {
+entry:
+ %retval = alloca i32, align 4
+ %i.addr = alloca i64, align 8
+ store i64 %i, i64* %i.addr, align 8
+ call void @llvm.dbg.declare(metadata !{i64* %i.addr}, metadata !13), !dbg !14
+ %0 = load i64* %i.addr, align 8, !dbg !15
+; CHECK: %0 = load i64* %i.addr, align 8, !dbg !15
+ %cmp = icmp slt i64 %0, 5, !dbg !15
+; CHECK: %cmp = icmp slt i64 %0, 5, !dbg !15
+ br i1 %cmp, label %if.then, label %if.else, !dbg !15
+; CHECK: br i1 %cmp, label %if.then, label %if.else, !dbg !15
+
+if.then: ; preds = %entry
+ store i32 2, i32* %retval, !dbg !15
+ br label %return, !dbg !15
+
+if.else: ; preds = %entry
+ store i32 90, i32* %retval, !dbg !15
+ br label %return, !dbg !15
+
+return: ; preds = %if.else, %if.then
+ %1 = load i32* %retval, !dbg !17
+ ret i32 %1, !dbg !17
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11}
+!llvm.ident = !{!12}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [./no-discriminators] [DW_LANG_C99]
+!1 = metadata !{metadata !"no-discriminators", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i64)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [./no-discriminators]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8, metadata !9}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{i32 786468, null, null, metadata !"long int", i32 0, i64 64, i64 64, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [long int] [line 0, size 64, align 64, offset 0, enc DW_ATE_signed]
+!10 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+; CHECK: !10 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!12 = metadata !{metadata !"clang version 3.5.0 "}
+!13 = metadata !{i32 786689, metadata !4, metadata !"i", metadata !5, i32 16777217, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [i] [line 1]
+!14 = metadata !{i32 1, i32 0, metadata !4, null}
+!15 = metadata !{i32 2, i32 0, metadata !16, null}
+; CHECK: !15 = metadata !{i32 2, i32 0, metadata !16, null}
+!16 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./no-discriminators]
+; CHECK: !16 = metadata !{i32 786443, metadata !1, metadata !4, i32 2, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./no-discriminators]
+!17 = metadata !{i32 3, i32 0, metadata !4, null}
diff --git a/test/Transforms/ArgumentPromotion/basictest.ll b/test/Transforms/ArgumentPromotion/basictest.ll
index d3d21fcabee1..8f78b98437bc 100644
--- a/test/Transforms/ArgumentPromotion/basictest.ll
+++ b/test/Transforms/ArgumentPromotion/basictest.ll
@@ -1,23 +1,29 @@
-; RUN: opt < %s -basicaa -argpromotion -mem2reg -S | not grep alloca
+; RUN: opt < %s -basicaa -argpromotion -mem2reg -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
define internal i32 @test(i32* %X, i32* %Y) {
- %A = load i32* %X ; <i32> [#uses=1]
- %B = load i32* %Y ; <i32> [#uses=1]
- %C = add i32 %A, %B ; <i32> [#uses=1]
- ret i32 %C
+; CHECK-LABEL: define internal i32 @test(i32 %X.val, i32 %Y.val)
+ %A = load i32* %X
+ %B = load i32* %Y
+ %C = add i32 %A, %B
+ ret i32 %C
}
define internal i32 @caller(i32* %B) {
- %A = alloca i32 ; <i32*> [#uses=2]
- store i32 1, i32* %A
- %C = call i32 @test( i32* %A, i32* %B ) ; <i32> [#uses=1]
- ret i32 %C
+; CHECK-LABEL: define internal i32 @caller(i32 %B.val1)
+ %A = alloca i32
+ store i32 1, i32* %A
+ %C = call i32 @test(i32* %A, i32* %B)
+; CHECK: call i32 @test(i32 1, i32 %B.val1)
+ ret i32 %C
}
define i32 @callercaller() {
- %B = alloca i32 ; <i32*> [#uses=2]
- store i32 2, i32* %B
- %X = call i32 @caller( i32* %B ) ; <i32> [#uses=1]
- ret i32 %X
+; CHECK-LABEL: define i32 @callercaller()
+ %B = alloca i32
+ store i32 2, i32* %B
+ %X = call i32 @caller(i32* %B)
+; CHECK: call i32 @caller(i32 2)
+ ret i32 %X
}
diff --git a/test/Transforms/ArgumentPromotion/byval-2.ll b/test/Transforms/ArgumentPromotion/byval-2.ll
index 368c6896cf82..b412f5ef0856 100644
--- a/test/Transforms/ArgumentPromotion/byval-2.ll
+++ b/test/Transforms/ArgumentPromotion/byval-2.ll
@@ -1,26 +1,31 @@
-; RUN: opt < %s -argpromotion -S | grep -F "i32* byval" | count 2
-; Argpromote + scalarrepl should change this to passing the two integers by value.
+; RUN: opt < %s -argpromotion -S | FileCheck %s
- %struct.ss = type { i32, i64 }
+; Arg promotion eliminates the struct argument.
+; FIXME: Should it eliminate the i32* argument?
+
+%struct.ss = type { i32, i64 }
define internal void @f(%struct.ss* byval %b, i32* byval %X) nounwind {
+; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1, i32* byval %X)
entry:
- %tmp = getelementptr %struct.ss* %b, i32 0, i32 0
- %tmp1 = load i32* %tmp, align 4
- %tmp2 = add i32 %tmp1, 1
- store i32 %tmp2, i32* %tmp, align 4
+ %tmp = getelementptr %struct.ss* %b, i32 0, i32 0
+ %tmp1 = load i32* %tmp, align 4
+ %tmp2 = add i32 %tmp1, 1
+ store i32 %tmp2, i32* %tmp, align 4
- store i32 0, i32* %X
- ret void
+ store i32 0, i32* %X
+ ret void
}
define i32 @test(i32* %X) {
+; CHECK-LABEL: define i32 @test
entry:
- %S = alloca %struct.ss ; <%struct.ss*> [#uses=4]
- %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp1, align 8
- %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
- store i64 2, i64* %tmp4, align 4
- call void @f( %struct.ss* byval %S, i32* byval %X)
- ret i32 0
+ %S = alloca %struct.ss
+ %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0
+ store i32 1, i32* %tmp1, align 8
+ %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1
+ store i64 2, i64* %tmp4, align 4
+ call void @f( %struct.ss* byval %S, i32* byval %X)
+; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}}, i32* byval %{{.*}})
+ ret i32 0
}
diff --git a/test/Transforms/ArgumentPromotion/byval.ll b/test/Transforms/ArgumentPromotion/byval.ll
index 44b26fc2f30c..27305e92068d 100644
--- a/test/Transforms/ArgumentPromotion/byval.ll
+++ b/test/Transforms/ArgumentPromotion/byval.ll
@@ -1,25 +1,28 @@
-; RUN: opt < %s -argpromotion -scalarrepl -S | not grep load
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-; Argpromote + scalarrepl should change this to passing the two integers by value.
- %struct.ss = type { i32, i64 }
+%struct.ss = type { i32, i64 }
define internal void @f(%struct.ss* byval %b) nounwind {
+; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1)
entry:
- %tmp = getelementptr %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
- %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
- %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
- store i32 %tmp2, i32* %tmp, align 4
- ret void
+ %tmp = getelementptr %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
+ %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
+ store i32 %tmp2, i32* %tmp, align 4
+ ret void
}
define i32 @main() nounwind {
+; CHECK-LABEL: define i32 @main
entry:
- %S = alloca %struct.ss ; <%struct.ss*> [#uses=4]
- %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp1, align 8
- %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
- store i64 2, i64* %tmp4, align 4
- call void @f( %struct.ss* byval %S ) nounwind
- ret i32 0
+ %S = alloca %struct.ss ; <%struct.ss*> [#uses=4]
+ %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 1, i32* %tmp1, align 8
+ %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
+ store i64 2, i64* %tmp4, align 4
+ call void @f( %struct.ss* byval %S ) nounwind
+; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}})
+ ret i32 0
}
diff --git a/test/Transforms/ArgumentPromotion/dbg.ll b/test/Transforms/ArgumentPromotion/dbg.ll
new file mode 100644
index 000000000000..70503afb5870
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/dbg.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+; CHECK: call void @test(), !dbg [[DBG_LOC:![0-9]]]
+; CHECK: [[TEST_FN:.*]] = {{.*}} void ()* @test
+; CHECK: [[DBG_LOC]] = metadata !{i32 8, i32 0, metadata [[TEST_FN]], null}
+
+define internal void @test(i32* %X) {
+ ret void
+}
+
+define void @caller() {
+ call void @test(i32* null), !dbg !1
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!llvm.dbg.cu = !{!3}
+
+!0 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!1 = metadata !{i32 8, i32 0, metadata !2, null}
+!2 = metadata !{i32 786478, null, null, metadata !"test", metadata !"test", metadata !"", i32 3, null, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i32*)* @test, null, null, null, i32 3}
+!3 = metadata !{i32 786449, null, i32 4, metadata !"clang version 3.5.0 ", i1 false, metadata !"", i32 0, null, null, metadata !4, null, null, metadata !"", i32 2} ; [ DW_TAG_compile_unit ] [/usr/local/google/home/blaikie/dev/scratch/pr20038/reduce/<stdin>] [DW_LANG_C_plus_plus]
+!4 = metadata !{metadata !2}
diff --git a/test/Transforms/ArgumentPromotion/inalloca.ll b/test/Transforms/ArgumentPromotion/inalloca.ll
new file mode 100644
index 000000000000..089a78f6b319
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/inalloca.ll
@@ -0,0 +1,49 @@
+; RUN: opt %s -argpromotion -scalarrepl -S | FileCheck %s
+
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+%struct.ss = type { i32, i32 }
+
+; Argpromote + scalarrepl should change this to passing the two integers by value.
+define internal i32 @f(%struct.ss* inalloca %s) {
+entry:
+ %f0 = getelementptr %struct.ss* %s, i32 0, i32 0
+ %f1 = getelementptr %struct.ss* %s, i32 0, i32 1
+ %a = load i32* %f0, align 4
+ %b = load i32* %f1, align 4
+ %r = add i32 %a, %b
+ ret i32 %r
+}
+; CHECK-LABEL: define internal i32 @f
+; CHECK-NOT: load
+; CHECK: ret
+
+define i32 @main() {
+entry:
+ %S = alloca inalloca %struct.ss
+ %f0 = getelementptr %struct.ss* %S, i32 0, i32 0
+ %f1 = getelementptr %struct.ss* %S, i32 0, i32 1
+ store i32 1, i32* %f0, align 4
+ store i32 2, i32* %f1, align 4
+ %r = call i32 @f(%struct.ss* inalloca %S)
+ ret i32 %r
+}
+; CHECK-LABEL: define i32 @main
+; CHECK-NOT: load
+; CHECK: ret
+
+; Argpromote can't promote %a because of the icmp use.
+define internal i1 @g(%struct.ss* %a, %struct.ss* inalloca %b) nounwind {
+; CHECK: define internal i1 @g(%struct.ss* %a, %struct.ss* inalloca %b)
+entry:
+ %c = icmp eq %struct.ss* %a, %b
+ ret i1 %c
+}
+
+define i32 @test() {
+entry:
+ %S = alloca inalloca %struct.ss
+ %c = call i1 @g(%struct.ss* %S, %struct.ss* inalloca %S)
+; CHECK: call i1 @g(%struct.ss* %S, %struct.ss* inalloca %S)
+ ret i32 0
+}
diff --git a/test/Transforms/ArgumentPromotion/tail.ll b/test/Transforms/ArgumentPromotion/tail.ll
new file mode 100644
index 000000000000..43b8996ca18a
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/tail.ll
@@ -0,0 +1,20 @@
+; RUN: opt %s -argpromotion -S -o - | FileCheck %s
+; PR14710
+
+%pair = type { i32, i32 }
+
+declare i8* @foo(%pair*)
+
+define internal void @bar(%pair* byval %Data) {
+; CHECK: define internal void @bar(i32 %Data.0, i32 %Data.1)
+; CHECK: %Data = alloca %pair
+; CHECK-NOT: tail
+; CHECK: call i8* @foo(%pair* %Data)
+ tail call i8* @foo(%pair* %Data)
+ ret void
+}
+
+define void @zed(%pair* byval %Data) {
+ call void @bar(%pair* byval %Data)
+ ret void
+}
diff --git a/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v7.ll b/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v7.ll
new file mode 100644
index 000000000000..6a93016fc26e
--- /dev/null
+++ b/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v7.ll
@@ -0,0 +1,364 @@
+; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-ll-sc %s | FileCheck %s
+
+define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
+; CHECK-LABEL: @test_atomic_xchg_i8
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[NEWVAL32:%.*]] = zext i8 %xchgend to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw xchg i8* %ptr, i8 %xchgend monotonic
+ ret i8 %res
+}
+
+define i16 @test_atomic_add_i16(i16* %ptr, i16 %addend) {
+; CHECK-LABEL: @test_atomic_add_i16
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
+; CHECK: [[NEWVAL:%.*]] = add i16 [[OLDVAL]], %addend
+; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i16 [[OLDVAL]]
+ %res = atomicrmw add i16* %ptr, i16 %addend seq_cst
+ ret i16 %res
+}
+
+define i32 @test_atomic_sub_i32(i32* %ptr, i32 %subend) {
+; CHECK-LABEL: @test_atomic_sub_i32
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %ptr)
+; CHECK: [[NEWVAL:%.*]] = sub i32 [[OLDVAL]], %subend
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[NEWVAL]], i32* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence acquire
+; CHECK: ret i32 [[OLDVAL]]
+ %res = atomicrmw sub i32* %ptr, i32 %subend acquire
+ ret i32 %res
+}
+
+define i8 @test_atomic_and_i8(i8* %ptr, i8 %andend) {
+; CHECK-LABEL: @test_atomic_and_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[NEWVAL:%.*]] = and i8 [[OLDVAL]], %andend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw and i8* %ptr, i8 %andend release
+ ret i8 %res
+}
+
+define i16 @test_atomic_nand_i16(i16* %ptr, i16 %nandend) {
+; CHECK-LABEL: @test_atomic_nand_i16
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
+; CHECK: [[NEWVAL_TMP:%.*]] = and i16 [[OLDVAL]], %nandend
+; CHECK: [[NEWVAL:%.*]] = xor i16 [[NEWVAL_TMP]], -1
+; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i16 [[OLDVAL]]
+ %res = atomicrmw nand i16* %ptr, i16 %nandend seq_cst
+ ret i16 %res
+}
+
+define i64 @test_atomic_or_i64(i64* %ptr, i64 %orend) {
+; CHECK-LABEL: @test_atomic_or_i64
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
+; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
+; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
+; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
+; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
+; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
+; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
+; CHECK: [[NEWVAL:%.*]] = or i64 [[OLDVAL]], %orend
+; CHECK: [[NEWLO:%.*]] = trunc i64 [[NEWVAL]] to i32
+; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 [[NEWVAL]], 32
+; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i64 [[OLDVAL]]
+ %res = atomicrmw or i64* %ptr, i64 %orend seq_cst
+ ret i64 %res
+}
+
+define i8 @test_atomic_xor_i8(i8* %ptr, i8 %xorend) {
+; CHECK-LABEL: @test_atomic_xor_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[NEWVAL:%.*]] = xor i8 [[OLDVAL]], %xorend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw xor i8* %ptr, i8 %xorend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomic_max_i8(i8* %ptr, i8 %maxend) {
+; CHECK-LABEL: @test_atomic_max_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[WANT_OLD:%.*]] = icmp sgt i8 [[OLDVAL]], %maxend
+; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %maxend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw max i8* %ptr, i8 %maxend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomic_min_i8(i8* %ptr, i8 %minend) {
+; CHECK-LABEL: @test_atomic_min_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[WANT_OLD:%.*]] = icmp sle i8 [[OLDVAL]], %minend
+; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %minend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw min i8* %ptr, i8 %minend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomic_umax_i8(i8* %ptr, i8 %umaxend) {
+; CHECK-LABEL: @test_atomic_umax_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[WANT_OLD:%.*]] = icmp ugt i8 [[OLDVAL]], %umaxend
+; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %umaxend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw umax i8* %ptr, i8 %umaxend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_atomic_umin_i8(i8* %ptr, i8 %uminend) {
+; CHECK-LABEL: @test_atomic_umin_i8
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[WANT_OLD:%.*]] = icmp ule i8 [[OLDVAL]], %uminend
+; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %uminend
+; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK: fence seq_cst
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw umin i8* %ptr, i8 %uminend seq_cst
+ ret i8 %res
+}
+
+define i8 @test_cmpxchg_i8_seqcst_seqcst(i8* %ptr, i8 %desired, i8 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i8_seqcst_seqcst
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i8
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i8 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWVAL32:%.*]] = zext i8 %newval to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK: fence seq_cst
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK: fence seq_cst
+; CHECK: br label %[[DONE]]
+
+; CHECK: [[DONE]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i8 [[OLDVAL]]
+
+ %pairold = cmpxchg i8* %ptr, i8 %desired, i8 %newval seq_cst seq_cst
+ %old = extractvalue { i8, i1 } %pairold, 0
+ ret i8 %old
+}
+
+define i16 @test_cmpxchg_i16_seqcst_monotonic(i16* %ptr, i16 %desired, i16 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i16_seqcst_monotonic
+; CHECK: fence release
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i16
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i16 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWVAL32:%.*]] = zext i16 %newval to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK: fence seq_cst
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE]]
+
+; CHECK: [[DONE]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i16 [[OLDVAL]]
+
+ %pairold = cmpxchg i16* %ptr, i16 %desired, i16 %newval seq_cst monotonic
+ %old = extractvalue { i16, i1 } %pairold, 0
+ ret i16 %old
+}
+
+define i32 @test_cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %desired, i32 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i32_acquire_acquire
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %ptr)
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %newval, i32* %ptr)
+; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK: fence acquire
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK: fence acquire
+; CHECK: br label %[[DONE]]
+
+; CHECK: [[DONE]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i32 [[OLDVAL]]
+
+ %pairold = cmpxchg i32* %ptr, i32 %desired, i32 %newval acquire acquire
+ %old = extractvalue { i32, i1 } %pairold, 0
+ ret i32 %old
+}
+
+define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i64_monotonic_monotonic
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
+; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
+; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
+; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
+; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
+; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
+; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i64 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWLO:%.*]] = trunc i64 %newval to i32
+; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 %newval, 32
+; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
+; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE]]
+
+; CHECK: [[DONE]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i64 [[OLDVAL]]
+
+ %pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
+ %old = extractvalue { i64, i1 } %pairold, 0
+ ret i64 %old
+} \ No newline at end of file
diff --git a/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v8.ll b/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v8.ll
new file mode 100644
index 000000000000..8092c1010ff5
--- /dev/null
+++ b/test/Transforms/AtomicExpandLoadLinked/ARM/atomic-expansion-v8.ll
@@ -0,0 +1,226 @@
+; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-ll-sc %s | FileCheck %s
+
+define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
+; CHECK-LABEL: @test_atomic_xchg_i8
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
+; CHECK: [[NEWVAL32:%.*]] = zext i8 %xchgend to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i8 [[OLDVAL]]
+ %res = atomicrmw xchg i8* %ptr, i8 %xchgend monotonic
+ ret i8 %res
+}
+
+define i16 @test_atomic_add_i16(i16* %ptr, i16 %addend) {
+; CHECK-LABEL: @test_atomic_add_i16
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
+; CHECK: [[NEWVAL:%.*]] = add i16 [[OLDVAL]], %addend
+; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i16 [[OLDVAL]]
+ %res = atomicrmw add i16* %ptr, i16 %addend seq_cst
+ ret i16 %res
+}
+
+define i32 @test_atomic_sub_i32(i32* %ptr, i32 %subend) {
+; CHECK-LABEL: @test_atomic_sub_i32
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldaex.p0i32(i32* %ptr)
+; CHECK: [[NEWVAL:%.*]] = sub i32 [[OLDVAL]], %subend
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[NEWVAL]], i32* %ptr)
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i32 [[OLDVAL]]
+ %res = atomicrmw sub i32* %ptr, i32 %subend acquire
+ ret i32 %res
+}
+
+define i64 @test_atomic_or_i64(i64* %ptr, i64 %orend) {
+; CHECK-LABEL: @test_atomic_or_i64
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldaexd(i8* [[PTR8]])
+; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
+; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
+; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
+; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
+; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
+; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
+; CHECK: [[NEWVAL:%.*]] = or i64 [[OLDVAL]], %orend
+; CHECK: [[NEWLO:%.*]] = trunc i64 [[NEWVAL]] to i32
+; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 [[NEWVAL]], 32
+; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
+; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
+; CHECK-NOT: fence
+; CHECK: ret i64 [[OLDVAL]]
+ %res = atomicrmw or i64* %ptr, i64 %orend seq_cst
+ ret i64 %res
+}
+
+define i8 @test_cmpxchg_i8_seqcst_seqcst(i8* %ptr, i8 %desired, i8 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i8_seqcst_seqcst
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i8(i8* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i8
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i8 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWVAL32:%.*]] = zext i8 %newval to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
+; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK-NOT: fence_cst
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK-NOT: fence_cst
+; CHECK: br label %[[DONE]]
+
+; CHECK: [[DONE]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i8 [[OLDVAL]]
+
+ %pairold = cmpxchg i8* %ptr, i8 %desired, i8 %newval seq_cst seq_cst
+ %old = extractvalue { i8, i1 } %pairold, 0
+ ret i8 %old
+}
+
+define i16 @test_cmpxchg_i16_seqcst_monotonic(i16* %ptr, i16 %desired, i16 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i16_seqcst_monotonic
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i16(i16* %ptr)
+; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i16
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i16 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWVAL32:%.*]] = zext i16 %newval to i32
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
+; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[DONE]]
+
+; CHECK: [[DONE]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i16 [[OLDVAL]]
+
+ %pairold = cmpxchg i16* %ptr, i16 %desired, i16 %newval seq_cst monotonic
+ %old = extractvalue { i16, i1 } %pairold, 0
+ ret i16 %old
+}
+
+define i32 @test_cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %desired, i32 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i32_acquire_acquire
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldaex.p0i32(i32* %ptr)
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %newval, i32* %ptr)
+; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK-NOT: fence_cst
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK-NOT: fence_cst
+; CHECK: br label %[[DONE]]
+
+; CHECK: [[DONE]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i32 [[OLDVAL]]
+
+ %pairold = cmpxchg i32* %ptr, i32 %desired, i32 %newval acquire acquire
+ %old = extractvalue { i32, i1 } %pairold, 0
+ ret i32 %old
+}
+
+define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %newval) {
+; CHECK-LABEL: @test_cmpxchg_i64_monotonic_monotonic
+; CHECK-NOT: fence
+; CHECK: br label %[[LOOP:.*]]
+
+; CHECK: [[LOOP]]:
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
+; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
+; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
+; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
+; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
+; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
+; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i64 [[OLDVAL]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[NEWLO:%.*]] = trunc i64 %newval to i32
+; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 %newval, 32
+; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
+; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
+; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
+; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
+; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK-NOT: fence_cst
+; CHECK: br label %[[DONE:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK-NOT: fence_cst
+; CHECK: br label %[[DONE]]
+
+; CHECK: [[DONE]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i64 [[OLDVAL]]
+
+ %pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
+ %old = extractvalue { i64, i1 } %pairold, 0
+ ret i64 %old
+} \ No newline at end of file
diff --git a/test/Transforms/AtomicExpandLoadLinked/ARM/cmpxchg-weak.ll b/test/Transforms/AtomicExpandLoadLinked/ARM/cmpxchg-weak.ll
new file mode 100644
index 000000000000..07a4a7f26e62
--- /dev/null
+++ b/test/Transforms/AtomicExpandLoadLinked/ARM/cmpxchg-weak.ll
@@ -0,0 +1,97 @@
+; RUN: opt -atomic-ll-sc -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
+
+define i32 @test_cmpxchg_seq_cst(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: @test_cmpxchg_seq_cst
+; CHECK: fence release
+; CHECK: br label %[[START:.*]]
+
+; CHECK: [[START]]:
+; CHECK: [[LOADED:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LOADED]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[STREX:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %new, i32* %addr)
+; CHECK: [[SUCCESS:%.*]] = icmp eq i32 [[STREX]], 0
+; CHECK: br i1 [[SUCCESS]], label %[[SUCCESS_BB:.*]], label %[[FAILURE_BB]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK: fence seq_cst
+; CHECK: br label %[[END:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK: fence seq_cst
+; CHECK: br label %[[END]]
+
+; CHECK: [[END]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i32 [[LOADED]]
+
+ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %oldval = extractvalue { i32, i1 } %pair, 0
+ ret i32 %oldval
+}
+
+define i1 @test_cmpxchg_weak_fail(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: @test_cmpxchg_weak_fail
+; CHECK: fence release
+; CHECK: br label %[[START:.*]]
+
+; CHECK: [[START]]:
+; CHECK: [[LOADED:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LOADED]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[STREX:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %new, i32* %addr)
+; CHECK: [[SUCCESS:%.*]] = icmp eq i32 [[STREX]], 0
+; CHECK: br i1 [[SUCCESS]], label %[[SUCCESS_BB:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK: fence seq_cst
+; CHECK: br label %[[END:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[END]]
+
+; CHECK: [[END]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i1 [[SUCCESS]]
+
+ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+ %oldval = extractvalue { i32, i1 } %pair, 1
+ ret i1 %oldval
+}
+
+define i32 @test_cmpxchg_monotonic(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: @test_cmpxchg_monotonic
+; CHECK-NOT: fence
+; CHECK: br label %[[START:.*]]
+
+; CHECK: [[START]]:
+; CHECK: [[LOADED:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
+; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LOADED]], %desired
+; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[TRY_STORE]]:
+; CHECK: [[STREX:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %new, i32* %addr)
+; CHECK: [[SUCCESS:%.*]] = icmp eq i32 [[STREX]], 0
+; CHECK: br i1 [[SUCCESS]], label %[[SUCCESS_BB:.*]], label %[[FAILURE_BB:.*]]
+
+; CHECK: [[SUCCESS_BB]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[END:.*]]
+
+; CHECK: [[FAILURE_BB]]:
+; CHECK-NOT: fence
+; CHECK: br label %[[END]]
+
+; CHECK: [[END]]:
+; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
+; CHECK: ret i32 [[LOADED]]
+
+ %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new monotonic monotonic
+ %oldval = extractvalue { i32, i1 } %pair, 0
+ ret i32 %oldval
+}
diff --git a/test/Transforms/AtomicExpandLoadLinked/ARM/lit.local.cfg b/test/Transforms/AtomicExpandLoadLinked/ARM/lit.local.cfg
new file mode 100644
index 000000000000..98c6700c209d
--- /dev/null
+++ b/test/Transforms/AtomicExpandLoadLinked/ARM/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'ARM' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/BBVectorize/lit.local.cfg b/test/Transforms/BBVectorize/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Transforms/BBVectorize/lit.local.cfg
+++ b/test/Transforms/BBVectorize/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/BBVectorize/simple-int.ll b/test/Transforms/BBVectorize/simple-int.ll
index e33ac612edc2..e0c1efa5b974 100644
--- a/test/Transforms/BBVectorize/simple-int.ll
+++ b/test/Transforms/BBVectorize/simple-int.ll
@@ -5,6 +5,18 @@ declare double @llvm.fma.f64(double, double, double)
declare double @llvm.fmuladd.f64(double, double, double)
declare double @llvm.cos.f64(double)
declare double @llvm.powi.f64(double, i32)
+declare double @llvm.round.f64(double)
+declare double @llvm.copysign.f64(double, double)
+declare double @llvm.ceil.f64(double)
+declare double @llvm.nearbyint.f64(double)
+declare double @llvm.rint.f64(double)
+declare double @llvm.trunc.f64(double)
+declare double @llvm.floor.f64(double)
+declare double @llvm.fabs.f64(double)
+declare i64 @llvm.bswap.i64(i64)
+declare i64 @llvm.ctpop.i64(i64)
+declare i64 @llvm.ctlz.i64(i64, i1)
+declare i64 @llvm.cttz.i64(i64, i1)
; Basic depth-3 chain with fma
define double @test1(double %A1, double %A2, double %B1, double %B2, double %C1, double %C2) {
@@ -124,10 +136,371 @@ define double @test4(double %A1, double %A2, double %B1, double %B2, i32 %P) {
; CHECK: ret double %R
}
+; Basic depth-3 chain with round
+define double @testround(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.round.f64(double %X1)
+ %Y2 = call double @llvm.round.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testround
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.round.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with copysign
+define double @testcopysign(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.copysign.f64(double %X1, double %A1)
+ %Y2 = call double @llvm.copysign.f64(double %X2, double %A1)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testcopysign
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1.v.i1.2 = insertelement <2 x double> %X1.v.i0.1, double %A1, i32 1
+; CHECK: %Y1 = call <2 x double> @llvm.copysign.v2f64(<2 x double> %X1, <2 x double> %Y1.v.i1.2)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with ceil
+define double @testceil(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.ceil.f64(double %X1)
+ %Y2 = call double @llvm.ceil.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testceil
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with nearbyint
+define double @testnearbyint(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.nearbyint.f64(double %X1)
+ %Y2 = call double @llvm.nearbyint.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testnearbyint
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with rint
+define double @testrint(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.rint.f64(double %X1)
+ %Y2 = call double @llvm.rint.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testrint
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.rint.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with trunc
+define double @testtrunc(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.trunc.f64(double %X1)
+ %Y2 = call double @llvm.trunc.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testtrunc
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with floor
+define double @testfloor(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.floor.f64(double %X1)
+ %Y2 = call double @llvm.floor.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testfloor
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with fabs
+define double @testfabs(double %A1, double %A2, double %B1, double %B2) {
+ %X1 = fsub double %A1, %B1
+ %X2 = fsub double %A2, %B2
+ %Y1 = call double @llvm.fabs.f64(double %X1)
+ %Y2 = call double @llvm.fabs.f64(double %X2)
+ %Z1 = fadd double %Y1, %B1
+ %Z2 = fadd double %Y2, %B2
+ %R = fmul double %Z1, %Z2
+ ret double %R
+; CHECK: @testfabs
+; CHECK: %X1.v.i1.1 = insertelement <2 x double> undef, double %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x double> %X1.v.i1.1, double %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x double> undef, double %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x double> %X1.v.i0.1, double %A2, i32 1
+; CHECK: %X1 = fsub <2 x double> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x double> @llvm.fabs.v2f64(<2 x double> %X1)
+; CHECK: %Z1 = fadd <2 x double> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x double> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x double> %Z1, i32 1
+; CHECK: %R = fmul double %Z1.v.r1, %Z1.v.r2
+; CHECK: ret double %R
+
+}
+
+; Basic depth-3 chain with bswap
+define i64 @testbswap(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.bswap.i64(i64 %X1)
+ %Y2 = call i64 @llvm.bswap.i64(i64 %X2)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testbswap
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %X1)
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+; CHECK: ret i64 %R
+
+}
+
+; Basic depth-3 chain with ctpop
+define i64 @testctpop(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.ctpop.i64(i64 %X1)
+ %Y2 = call i64 @llvm.ctpop.i64(i64 %X2)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testctpop
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %X1)
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+; CHECK: ret i64 %R
+
+}
+
+; Basic depth-3 chain with ctlz
+define i64 @testctlz(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
+ %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 true)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testctlz
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %X1, i1 true)
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+; CHECK: ret i64 %R
+
+}
+
+; Basic depth-3 chain with ctlz
+define i64 @testctlzneg(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
+ %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 false)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testctlzneg
+; CHECK: %X1 = sub i64 %A1, %B1
+; CHECK: %X2 = sub i64 %A2, %B2
+; CHECK: %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
+; CHECK: %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 false)
+; CHECK: %Z1 = add i64 %Y1, %B1
+; CHECK: %Z2 = add i64 %Y2, %B2
+; CHECK: %R = mul i64 %Z1, %Z2
+; CHECK: ret i64 %R
+}
+
+; Basic depth-3 chain with cttz
+define i64 @testcttz(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
+ %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 true)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testcttz
+; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
+; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
+; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
+; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
+; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
+; CHECK: %Y1 = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %X1, i1 true)
+; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
+; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
+; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
+; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
+; CHECK: ret i64 %R
+
+}
+
+; Basic depth-3 chain with cttz
+define i64 @testcttzneg(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
+ %X1 = sub i64 %A1, %B1
+ %X2 = sub i64 %A2, %B2
+ %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
+ %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 false)
+ %Z1 = add i64 %Y1, %B1
+ %Z2 = add i64 %Y2, %B2
+ %R = mul i64 %Z1, %Z2
+ ret i64 %R
+
+; CHECK: @testcttzneg
+; CHECK: %X1 = sub i64 %A1, %B1
+; CHECK: %X2 = sub i64 %A2, %B2
+; CHECK: %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
+; CHECK: %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 false)
+; CHECK: %Z1 = add i64 %Y1, %B1
+; CHECK: %Z2 = add i64 %Y2, %B2
+; CHECK: %R = mul i64 %Z1, %Z2
+; CHECK: ret i64 %R
+}
+
+
+
; CHECK: declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
; CHECK: declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
-; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) #1
-; CHECK: declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) #1
-
+; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) #0
+; CHECK: declare <2 x double> @llvm.round.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) #0
+; CHECK: declare <2 x double> @llvm.ceil.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.rint.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.trunc.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.floor.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #0
+; CHECK: declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) #0
+; CHECK: declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) #0
+; CHECK: declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) #0
+; CHECK: declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) #0
; CHECK: attributes #0 = { nounwind readnone }
-; CHECK: attributes #1 = { nounwind readonly }
diff --git a/test/Transforms/BranchFolding/2007-10-19-InlineAsmDirectives.ll b/test/Transforms/BranchFolding/2007-10-19-InlineAsmDirectives.ll
index 9d82819f9db4..598ea0e354e1 100644
--- a/test/Transforms/BranchFolding/2007-10-19-InlineAsmDirectives.ll
+++ b/test/Transforms/BranchFolding/2007-10-19-InlineAsmDirectives.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -std-compile-opts -o - | llc -o - | grep bork_directive | wc -l | grep 2
+; RUN: opt < %s -std-compile-opts -o - | llc -no-integrated-as -o - | grep bork_directive | wc -l | grep 2
;; We don't want branch folding to fold asm directives.
diff --git a/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll b/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll
new file mode 100644
index 000000000000..430b99299d82
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll
@@ -0,0 +1,64 @@
+; RUN: opt -codegenprepare -disable-cgp-branch-opts -S < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; The first cast should be sunk into block2, in order that the
+; instruction selector can form an efficient
+; i64 * i64 -> i128 multiplication.
+define i128 @sink(i64* %mem1, i64* %mem2) {
+; CHECK-LABEL: block1:
+; CHECK-NEXT: load
+block1:
+ %l1 = load i64* %mem1
+ %s1 = sext i64 %l1 to i128
+ br label %block2
+
+; CHECK-LABEL: block2:
+; CHECK-NEXT: sext
+; CHECK-NEXT: load
+; CHECK-NEXT: sext
+block2:
+ %l2 = load i64* %mem2
+ %s2 = sext i64 %l2 to i128
+ %res = mul i128 %s1, %s2
+ ret i128 %res
+}
+
+; The first cast should be hoisted into block1, in order that the
+; instruction selector can form an extend-load.
+define i64 @hoist(i32* %mem1, i32* %mem2) {
+; CHECK-LABEL: block1:
+; CHECK-NEXT: load
+; CHECK-NEXT: sext
+block1:
+ %l1 = load i32* %mem1
+ br label %block2
+
+; CHECK-LABEL: block2:
+; CHECK-NEXT: load
+; CHECK-NEXT: sext
+block2:
+ %s1 = sext i32 %l1 to i64
+ %l2 = load i32* %mem2
+ %s2 = sext i32 %l2 to i64
+ %res = mul i64 %s1, %s2
+ ret i64 %res
+}
+
+; Make sure the cast sink logic and OptimizeExtUses don't end up in an infinite
+; loop.
+define i128 @use_ext_source() {
+block1:
+ %v1 = or i64 undef, undef
+ %v2 = zext i64 %v1 to i128
+ br i1 undef, label %block2, label %block3
+
+block2:
+ %v3 = add i64 %v1, 1
+ %v4 = zext i64 %v3 to i128
+ br label %block3
+
+block3:
+ %res = phi i128 [ %v2, %block1 ], [ %v4, %block2 ]
+ ret i128 %res
+}
diff --git a/test/Transforms/CodeGenPrepare/X86/lit.local.cfg b/test/Transforms/CodeGenPrepare/X86/lit.local.cfg
new file mode 100644
index 000000000000..e71f3cc4c41e
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/X86/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'X86' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll b/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
new file mode 100644
index 000000000000..a985c36707a9
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
@@ -0,0 +1,37 @@
+; RUN: opt -S -codegenprepare < %s | FileCheck %s
+
+target datalayout =
+"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: @load_cast_gep
+; CHECK: add i64 %sunkaddr, 40
+define void @load_cast_gep(i1 %cond, i64* %base) {
+entry:
+ %addr = getelementptr inbounds i64* %base, i64 5
+ %casted = addrspacecast i64* %addr to i32 addrspace(1)*
+ br i1 %cond, label %if.then, label %fallthrough
+
+if.then:
+ %v = load i32 addrspace(1)* %casted, align 4
+ br label %fallthrough
+
+fallthrough:
+ ret void
+}
+
+; CHECK-LABEL: @store_gep_cast
+; CHECK: add i64 %sunkaddr, 20
+define void @store_gep_cast(i1 %cond, i64* %base) {
+entry:
+ %casted = addrspacecast i64* %base to i32 addrspace(1)*
+ %addr = getelementptr inbounds i32 addrspace(1)* %casted, i64 5
+ br i1 %cond, label %if.then, label %fallthrough
+
+if.then:
+ store i32 0, i32 addrspace(1)* %addr, align 4
+ br label %fallthrough
+
+fallthrough:
+ ret void
+}
diff --git a/test/Transforms/CodeGenPrepare/X86/x86-shuffle-sink.ll b/test/Transforms/CodeGenPrepare/X86/x86-shuffle-sink.ll
new file mode 100644
index 000000000000..e945b03c33ae
--- /dev/null
+++ b/test/Transforms/CodeGenPrepare/X86/x86-shuffle-sink.ll
@@ -0,0 +1,105 @@
+; RUN: opt -S -codegenprepare -mcpu=core-avx2 %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AVX2
+; RUN: opt -S -codegenprepare -mcpu=corei7 %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SSE2
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-darwin10.9.0"
+
+define <16 x i8> @test_8bit(<16 x i8> %lhs, <16 x i8> %tmp, i1 %tst) {
+; CHECK-LABEL: @test_8bit
+; CHECK: if_true:
+; CHECK-NOT: shufflevector
+
+; CHECK: if_false:
+; CHECK-NOT: shufflevector
+; CHECK: shl <16 x i8> %lhs, %mask
+ %mask = shufflevector <16 x i8> %tmp, <16 x i8> undef, <16 x i32> zeroinitializer
+ br i1 %tst, label %if_true, label %if_false
+
+if_true:
+ ret <16 x i8> %mask
+
+if_false:
+ %res = shl <16 x i8> %lhs, %mask
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @test_16bit(<8 x i16> %lhs, <8 x i16> %tmp, i1 %tst) {
+; CHECK-LABEL: @test_16bit
+; CHECK: if_true:
+; CHECK-NOT: shufflevector
+
+; CHECK: if_false:
+; CHECK: [[SPLAT:%[0-9a-zA-Z_]+]] = shufflevector
+; CHECK: shl <8 x i16> %lhs, [[SPLAT]]
+ %mask = shufflevector <8 x i16> %tmp, <8 x i16> undef, <8 x i32> zeroinitializer
+ br i1 %tst, label %if_true, label %if_false
+
+if_true:
+ ret <8 x i16> %mask
+
+if_false:
+ %res = shl <8 x i16> %lhs, %mask
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @test_notsplat(<4 x i32> %lhs, <4 x i32> %tmp, i1 %tst) {
+; CHECK-LABEL: @test_notsplat
+; CHECK: if_true:
+; CHECK-NOT: shufflevector
+
+; CHECK: if_false:
+; CHECK-NOT: shufflevector
+; CHECK: shl <4 x i32> %lhs, %mask
+ %mask = shufflevector <4 x i32> %tmp, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 1, i32 0>
+ br i1 %tst, label %if_true, label %if_false
+
+if_true:
+ ret <4 x i32> %mask
+
+if_false:
+ %res = shl <4 x i32> %lhs, %mask
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @test_32bit(<4 x i32> %lhs, <4 x i32> %tmp, i1 %tst) {
+; CHECK-AVX2-LABEL: @test_32bit
+; CHECK-AVX2: if_false:
+; CHECK-AVX2-NOT: shufflevector
+; CHECK-AVX2: ashr <4 x i32> %lhs, %mask
+
+; CHECK-SSE2-LABEL: @test_32bit
+; CHECK-SSE2: if_false:
+; CHECK-SSE2: [[SPLAT:%[0-9a-zA-Z_]+]] = shufflevector
+; CHECK-SSE2: ashr <4 x i32> %lhs, [[SPLAT]]
+ %mask = shufflevector <4 x i32> %tmp, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 0, i32 0>
+ br i1 %tst, label %if_true, label %if_false
+
+if_true:
+ ret <4 x i32> %mask
+
+if_false:
+ %res = ashr <4 x i32> %lhs, %mask
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @test_64bit(<2 x i64> %lhs, <2 x i64> %tmp, i1 %tst) {
+; CHECK-AVX2-LABEL: @test_64bit
+; CHECK-AVX2: if_false:
+; CHECK-AVX2-NOT: shufflevector
+; CHECK-AVX2: lshr <2 x i64> %lhs, %mask
+
+; CHECK-SSE2-LABEL: @test_64bit
+; CHECK-SSE2: if_false:
+; CHECK-SSE2: [[SPLAT:%[0-9a-zA-Z_]+]] = shufflevector
+; CHECK-SSE2: lshr <2 x i64> %lhs, [[SPLAT]]
+
+ %mask = shufflevector <2 x i64> %tmp, <2 x i64> undef, <2 x i32> zeroinitializer
+ br i1 %tst, label %if_true, label %if_false
+
+if_true:
+ ret <2 x i64> %mask
+
+if_false:
+ %res = lshr <2 x i64> %lhs, %mask
+ ret <2 x i64> %res
+}
diff --git a/test/Transforms/ConstProp/loads.ll b/test/Transforms/ConstProp/loads.ll
index d05db47dcaaa..5a23dad87f3a 100644
--- a/test/Transforms/ConstProp/loads.ll
+++ b/test/Transforms/ConstProp/loads.ll
@@ -36,6 +36,19 @@ define i16 @test2() {
; BE: ret i16 -8531
}
+define i16 @test2_addrspacecast() {
+ %r = load i16 addrspace(1)* addrspacecast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16 addrspace(1)*)
+ ret i16 %r
+
+; 0xBEEF
+; LE-LABEL: @test2_addrspacecast(
+; LE: ret i16 -16657
+
+; 0xDEAD
+; BE-LABEL: @test2_addrspacecast(
+; BE: ret i16 -8531
+}
+
; Load of second 16 bits of 32-bit value.
define i16 @test3() {
%r = load i16* getelementptr(i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 1)
@@ -219,3 +232,37 @@ entry:
; BE-LABEL: @test15(
; BE: ret i64 2
}
+
+@gv7 = constant [4 x i8*] [i8* null, i8* inttoptr (i64 -14 to i8*), i8* null, i8* null]
+define i64 @test16.1() {
+ %v = load i64* bitcast ([4 x i8*]* @gv7 to i64*), align 8
+ ret i64 %v
+
+; LE-LABEL: @test16.1(
+; LE: ret i64 0
+
+; BE-LABEL: @test16.1(
+; BE: ret i64 0
+}
+
+define i64 @test16.2() {
+ %v = load i64* bitcast (i8** getelementptr inbounds ([4 x i8*]* @gv7, i64 0, i64 1) to i64*), align 8
+ ret i64 %v
+
+; LE-LABEL: @test16.2(
+; LE: ret i64 -14
+
+; BE-LABEL: @test16.2(
+; BE: ret i64 -14
+}
+
+define i64 @test16.3() {
+ %v = load i64* bitcast (i8** getelementptr inbounds ([4 x i8*]* @gv7, i64 0, i64 2) to i64*), align 8
+ ret i64 %v
+
+; LE-LABEL: @test16.3(
+; LE: ret i64 0
+
+; BE-LABEL: @test16.3(
+; BE: ret i64 0
+}
diff --git a/test/Transforms/ConstantHoisting/AArch64/const-addr.ll b/test/Transforms/ConstantHoisting/AArch64/const-addr.ll
new file mode 100644
index 000000000000..89d596055c44
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/AArch64/const-addr.ll
@@ -0,0 +1,23 @@
+; RUN: opt -mtriple=arm64-darwin-unknown -S -consthoist < %s | FileCheck %s
+
+%T = type { i32, i32, i32, i32 }
+
+define i32 @test1() nounwind {
+; CHECK-LABEL: test1
+; CHECK: %const = bitcast i64 68141056 to i64
+; CHECK: %1 = inttoptr i64 %const to %T*
+; CHECK: %o1 = getelementptr %T* %1, i32 0, i32 1
+; CHECK: %o2 = getelementptr %T* %1, i32 0, i32 2
+; CHECK: %o3 = getelementptr %T* %1, i32 0, i32 3
+ %at = inttoptr i64 68141056 to %T*
+ %o1 = getelementptr %T* %at, i32 0, i32 1
+ %t1 = load i32* %o1
+ %o2 = getelementptr %T* %at, i32 0, i32 2
+ %t2 = load i32* %o2
+ %a1 = add i32 %t1, %t2
+ %o3 = getelementptr %T* %at, i32 0, i32 3
+ %t3 = load i32* %o3
+ %a2 = add i32 %a1, %t3
+ ret i32 %a2
+}
+
diff --git a/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll b/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll
new file mode 100644
index 000000000000..575be791d9b1
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/AArch64/large-immediate.ll
@@ -0,0 +1,27 @@
+; RUN: opt -mtriple=arm64-darwin-unknown -S -consthoist < %s | FileCheck %s
+
+define i128 @test1(i128 %a) nounwind {
+; CHECK-LABEL: test1
+; CHECK: %const = bitcast i128 12297829382473034410122878 to i128
+ %1 = add i128 %a, 12297829382473034410122878
+ %2 = add i128 %1, 12297829382473034410122878
+ ret i128 %2
+}
+
+; Check that we don't hoist large, but cheap constants
+define i512 @test2(i512 %a) nounwind {
+; CHECK-LABEL: test2
+; CHECK-NOT: %const = bitcast i512 7 to i512
+ %1 = and i512 %a, 7
+ %2 = or i512 %1, 7
+ ret i512 %2
+}
+
+; Check that we don't hoist the shift value of a shift instruction.
+define i512 @test3(i512 %a) nounwind {
+; CHECK-LABEL: test3
+; CHECK-NOT: %const = bitcast i512 504 to i512
+ %1 = shl i512 %a, 504
+ %2 = ashr i512 %1, 504
+ ret i512 %2
+}
diff --git a/test/Transforms/ConstantHoisting/AArch64/lit.local.cfg b/test/Transforms/ConstantHoisting/AArch64/lit.local.cfg
new file mode 100644
index 000000000000..7184443994b6
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/AArch64/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll b/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll
new file mode 100644
index 000000000000..b4337eeda653
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll
@@ -0,0 +1,23 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%T = type { i32, i32, i32, i32 }
+
+; Test if even cheap base addresses are hoisted.
+define i32 @test1() nounwind {
+; CHECK-LABEL: @test1
+; CHECK: %const = bitcast i32 12345678 to i32
+; CHECK: %1 = inttoptr i32 %const to %T*
+; CHECK: %addr1 = getelementptr %T* %1, i32 0, i32 1
+ %addr1 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
+ %tmp1 = load i32* %addr1
+ %addr2 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
+ %tmp2 = load i32* %addr2
+ %addr3 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
+ %tmp3 = load i32* %addr3
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+}
+
diff --git a/test/Transforms/ConstantHoisting/PowerPC/lit.local.cfg b/test/Transforms/ConstantHoisting/PowerPC/lit.local.cfg
new file mode 100644
index 000000000000..5d33887ff0a4
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/PowerPC/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'PowerPC' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/ConstantHoisting/PowerPC/masks.ll b/test/Transforms/ConstantHoisting/PowerPC/masks.ll
new file mode 100644
index 000000000000..d55318201136
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/PowerPC/masks.ll
@@ -0,0 +1,66 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Here the masks are all contiguous, and should not be hoisted.
+define i32 @test1() nounwind {
+entry:
+; CHECK-LABEL: @test1
+; CHECK-NOT: bitcast i32 65535 to i32
+; CHECK: and i32 undef, 65535
+ %conv121 = and i32 undef, 65535
+ br i1 undef, label %if.then152, label %if.end167
+
+if.then152:
+; CHECK: and i32 undef, 65535
+ %conv153 = and i32 undef, 65535
+ br i1 undef, label %if.end167, label %end2
+
+if.end167:
+; CHECK: and i32 {{.*}}, 32768
+ %shl161 = shl nuw nsw i32 %conv121, 15
+ %0 = load i8* undef, align 1
+ %conv169 = zext i8 %0 to i32
+ %shl170 = shl nuw nsw i32 %conv169, 7
+ %shl161.masked = and i32 %shl161, 32768
+ %conv174 = or i32 %shl170, %shl161.masked
+ %cmp178 = icmp ugt i32 %conv174, 32767
+ br i1 %cmp178, label %end1, label %end2
+
+end1:
+ unreachable
+
+end2:
+ unreachable
+}
+
+; Here the masks are not contiguous, and should be hoisted.
+define i32 @test2() nounwind {
+entry:
+; CHECK-LABEL: @test2
+; CHECK: bitcast i32 65531 to i32
+ %conv121 = and i32 undef, 65531
+ br i1 undef, label %if.then152, label %if.end167
+
+if.then152:
+ %conv153 = and i32 undef, 65531
+ br i1 undef, label %if.end167, label %end2
+
+if.end167:
+; CHECK: add i32 {{.*}}, -32758
+ %shl161 = shl nuw nsw i32 %conv121, 15
+ %0 = load i8* undef, align 1
+ %conv169 = zext i8 %0 to i32
+ %shl170 = shl nuw nsw i32 %conv169, 7
+ %shl161.masked = and i32 %shl161, 32773
+ %conv174 = or i32 %shl170, %shl161.masked
+ %cmp178 = icmp ugt i32 %conv174, 32767
+ br i1 %cmp178, label %end1, label %end2
+
+end1:
+ unreachable
+
+end2:
+ unreachable
+}
+
diff --git a/test/Transforms/ConstantHoisting/X86/cast-inst.ll b/test/Transforms/ConstantHoisting/X86/cast-inst.ll
new file mode 100644
index 000000000000..f490f4a37236
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/cast-inst.ll
@@ -0,0 +1,29 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Check if the materialization of the constant and the cast instruction are
+; inserted in the correct order.
+define i32 @cast_inst_test() {
+; CHECK-LABEL: @cast_inst_test
+; CHECK: %const = bitcast i64 4646526064 to i64
+; CHECK: %1 = inttoptr i64 %const to i32*
+; CHECK: %v0 = load i32* %1, align 16
+; CHECK: %const_mat = add i64 %const, 16
+; CHECK-NEXT: %2 = inttoptr i64 %const_mat to i32*
+; CHECK-NEXT: %v1 = load i32* %2, align 16
+; CHECK: %const_mat1 = add i64 %const, 32
+; CHECK-NEXT: %3 = inttoptr i64 %const_mat1 to i32*
+; CHECK-NEXT: %v2 = load i32* %3, align 16
+ %a0 = inttoptr i64 4646526064 to i32*
+ %v0 = load i32* %a0, align 16
+ %a1 = inttoptr i64 4646526080 to i32*
+ %v1 = load i32* %a1, align 16
+ %a2 = inttoptr i64 4646526096 to i32*
+ %v2 = load i32* %a2, align 16
+ %r0 = add i32 %v0, %v1
+ %r1 = add i32 %r0, %v2
+ ret i32 %r1
+}
+
diff --git a/test/Transforms/ConstantHoisting/X86/const-base-addr.ll b/test/Transforms/ConstantHoisting/X86/const-base-addr.ll
new file mode 100644
index 000000000000..01e6cdfd2a5b
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/const-base-addr.ll
@@ -0,0 +1,24 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%T = type { i32, i32, i32, i32 }
+
+; Test if even cheap base addresses are hoisted.
+define i32 @test1() nounwind {
+; CHECK-LABEL: @test1
+; CHECK: %const = bitcast i32 12345678 to i32
+; CHECK: %1 = inttoptr i32 %const to %T*
+; CHECK: %addr1 = getelementptr %T* %1, i32 0, i32 1
+ %addr1 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
+ %tmp1 = load i32* %addr1
+ %addr2 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
+ %tmp2 = load i32* %addr2
+ %addr3 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
+ %tmp3 = load i32* %addr3
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+}
+
diff --git a/test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll b/test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll
new file mode 100644
index 000000000000..d35238692078
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/delete-dead-cast-inst.ll
@@ -0,0 +1,22 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%T = type { i32, i32, i32, i32 }
+
+define i32 @test1() nounwind {
+; CHECK-LABEL: @test1
+; CHECK: %const = bitcast i32 12345678 to i32
+; CHECK-NOT: %base = inttoptr i32 12345678 to %T*
+; CHECK-NEXT: %1 = inttoptr i32 %const to %T*
+; CHECK-NEXT: %addr1 = getelementptr %T* %1, i32 0, i32 1
+; CHECK-NEXT: %addr2 = getelementptr %T* %1, i32 0, i32 2
+; CHECK-NEXT: %addr3 = getelementptr %T* %1, i32 0, i32 3
+ %base = inttoptr i32 12345678 to %T*
+ %addr1 = getelementptr %T* %base, i32 0, i32 1
+ %addr2 = getelementptr %T* %base, i32 0, i32 2
+ %addr3 = getelementptr %T* %base, i32 0, i32 3
+ ret i32 12345678
+}
+
diff --git a/test/Transforms/ConstantHoisting/X86/large-immediate.ll b/test/Transforms/ConstantHoisting/X86/large-immediate.ll
new file mode 100644
index 000000000000..b8c04f38b12f
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/large-immediate.ll
@@ -0,0 +1,36 @@
+; RUN: opt -mtriple=x86_64-darwin-unknown -S -consthoist < %s | FileCheck %s
+
+define i128 @test1(i128 %a) nounwind {
+; CHECK-LABEL: test1
+; CHECK: %const = bitcast i128 12297829382473034410122878 to i128
+ %1 = add i128 %a, 12297829382473034410122878
+ %2 = add i128 %1, 12297829382473034410122878
+ ret i128 %2
+}
+
+; Check that we don't hoist the shift value of a shift instruction.
+define i512 @test2(i512 %a) nounwind {
+; CHECK-LABEL: test2
+; CHECK-NOT: %const = bitcast i512 504 to i512
+ %1 = shl i512 %a, 504
+ %2 = ashr i512 %1, 504
+ ret i512 %2
+}
+
+; Check that we don't hoist constants with a type larger than i128.
+define i196 @test3(i196 %a) nounwind {
+; CHECK-LABEL: test3
+; CHECK-NOT: %const = bitcast i196 2 to i196
+ %1 = mul i196 %a, 2
+ %2 = mul i196 %1, 2
+ ret i196 %2
+}
+
+; Check that we don't hoist immediates with small values.
+define i96 @test4(i96 %a) nounwind {
+; CHECK-LABEL: test4
+; CHECK-NOT: %const = bitcast i96 2 to i96
+ %1 = mul i96 %a, 2
+ %2 = add i96 %1, 2
+ ret i96 %2
+}
diff --git a/test/Transforms/ConstantHoisting/X86/lit.local.cfg b/test/Transforms/ConstantHoisting/X86/lit.local.cfg
new file mode 100644
index 000000000000..e71f3cc4c41e
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'X86' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/ConstantHoisting/X86/phi.ll b/test/Transforms/ConstantHoisting/X86/phi.ll
new file mode 100644
index 000000000000..086df1404703
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/phi.ll
@@ -0,0 +1,116 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; PR18626
+define i8* @test1(i1 %cmp, i64* %tmp) {
+entry:
+ call void @foo(i8* inttoptr (i64 68719476735 to i8*))
+ br i1 %cmp, label %if.end, label %return
+
+if.end: ; preds = %bb1
+ call void @foo(i8* inttoptr (i64 68719476736 to i8*))
+ br label %return
+
+return:
+ %retval.0 = phi i8* [ null, %entry ], [ inttoptr (i64 68719476736 to i8*), %if.end ]
+ store i64 1172321806, i64* %tmp
+ ret i8* %retval.0
+
+; CHECK-LABEL: @test1
+; CHECK: if.end:
+; CHECK: %2 = inttoptr i64 %const to i8*
+; CHECK-NEXT: br
+; CHECK: return:
+; CHECK-NEXT: %retval.0 = phi i8* [ null, %entry ], [ %2, %if.end ]
+}
+
+define void @test2(i1 %cmp, i64** %tmp) {
+entry:
+ call void @foo(i8* inttoptr (i64 68719476736 to i8*))
+ br i1 %cmp, label %if.end, label %return
+
+if.end: ; preds = %bb1
+ call void @foo(i8* inttoptr (i64 68719476736 to i8*))
+ br label %return
+
+return:
+ store i64* inttoptr (i64 68719476735 to i64*), i64** %tmp
+ ret void
+
+; CHECK-LABEL: @test2
+; CHECK: return:
+; CHECK-NEXT: %const_mat = add i64 %const, -1
+; CHECK-NEXT: inttoptr i64 %const_mat to i64*
+}
+
+declare void @foo(i8*)
+
+; PR18768
+define i32 @test3(i1 %c) {
+entry:
+ br i1 %c, label %if.then, label %if.end3
+
+if.then: ; preds = %entry
+ br label %if.end3
+
+if.end3: ; preds = %if.then, %entry
+ %d.0 = phi i32* [ inttoptr (i64 985162435264511 to i32*), %entry ], [ null, %if.then ]
+ %cmp4 = icmp eq i32* %d.0, inttoptr (i64 985162435264511 to i32*)
+ %cmp6 = icmp eq i32* %d.0, inttoptr (i64 985162418487296 to i32*)
+ %or = or i1 %cmp4, %cmp6
+ br i1 %or, label %if.then8, label %if.end9
+
+if.then8: ; preds = %if.end3
+ ret i32 1
+
+if.end9: ; preds = %if.then8, %if.end3
+ ret i32 undef
+}
+
+; <rdar://problem/16394449>
+define i64 @switch_test1(i64 %a) {
+; CHECK-LABEL: @switch_test1
+; CHECK: %0 = phi i64 [ %const, %case2 ], [ %const_mat, %Entry ], [ %const_mat, %Entry ]
+Entry:
+ %sel = add i64 %a, 4519019440
+ switch i64 %sel, label %fail [
+ i64 462, label %continuation
+ i64 449, label %case2
+ i64 443, label %continuation
+ ]
+
+case2:
+ br label %continuation
+
+continuation:
+ %0 = phi i64 [ 4519019440, %case2 ], [ 4519019460, %Entry ], [ 4519019460, %Entry ]
+ ret i64 0;
+
+fail:
+ ret i64 -1;
+}
+
+define i64 @switch_test2(i64 %a) {
+; CHECK-LABEL: @switch_test2
+; CHECK: %2 = phi i64* [ %1, %case2 ], [ %0, %Entry ], [ %0, %Entry ]
+Entry:
+ %sel = add i64 %a, 4519019440
+ switch i64 %sel, label %fail [
+ i64 462, label %continuation
+ i64 449, label %case2
+ i64 443, label %continuation
+ ]
+
+case2:
+ br label %continuation
+
+continuation:
+ %0 = phi i64* [ inttoptr(i64 4519019440 to i64*), %case2 ], [ inttoptr(i64 4519019460 to i64*), %Entry ], [ inttoptr(i64 4519019460 to i64*), %Entry ]
+ ret i64 0;
+
+fail:
+ ret i64 -1;
+}
+
diff --git a/test/Transforms/ConstantHoisting/X86/stackmap.ll b/test/Transforms/ConstantHoisting/X86/stackmap.ll
new file mode 100644
index 000000000000..9df44177820e
--- /dev/null
+++ b/test/Transforms/ConstantHoisting/X86/stackmap.ll
@@ -0,0 +1,17 @@
+; RUN: opt -S -consthoist < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Test if the 3rd argument of a stackmap is hoisted.
+define i128 @test1(i128 %a) {
+; CHECK-LABEL: @test1
+; CHECK: %const = bitcast i128 134646182756734033220 to i128
+; CHECK: tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 24, i128 %const)
+entry:
+ %0 = add i128 %a, 134646182756734033220
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 24, i128 134646182756734033220)
+ ret i128 %0
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
diff --git a/test/Transforms/ConstantMerge/linker-private.ll b/test/Transforms/ConstantMerge/linker-private.ll
deleted file mode 100644
index eba7880e8af7..000000000000
--- a/test/Transforms/ConstantMerge/linker-private.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: opt < %s -constmerge -S | FileCheck %s
-; <rdar://problem/10564621>
-
-%0 = type opaque
-%struct.NSConstantString = type { i32*, i32, i8*, i32 }
-
-; CHECK: @.str3 = linker_private unnamed_addr constant [1 x i8] zeroinitializer, align 1
-
-@isLogVisible = global i8 0, align 1
-@__CFConstantStringClassReference = external global [0 x i32]
-@.str3 = linker_private unnamed_addr constant [1 x i8] zeroinitializer, align 1
-@_unnamed_cfstring_4 = private constant %struct.NSConstantString { i32* getelementptr inbounds ([0 x i32]* @__CFConstantStringClassReference, i32 0, i32 0), i32 1992, i8* getelementptr inbounds ([1 x i8]* @.str3, i32 0, i32 0), i32 0 }, section "__DATA,__cfstring"
-@null.array = weak_odr constant [1 x i8] zeroinitializer, align 1
-
-define linkonce_odr void @bar() nounwind ssp align 2 {
-entry:
- %stack = alloca i8*, align 4
- %call = call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* (i8*, i8*, %0*)*)(i8* null, i8* null, %0* bitcast (%struct.NSConstantString* @_unnamed_cfstring_4 to %0*))
- store i8* getelementptr inbounds ([1 x i8]* @null.array, i32 0, i32 0), i8** %stack, align 4
- ret void
-}
-
-declare i8* @objc_msgSend(i8*, i8*, ...) nonlazybind
diff --git a/test/Transforms/DeadArgElim/deadexternal.ll b/test/Transforms/DeadArgElim/deadexternal.ll
index acbcf75ee495..665d7dbf4937 100644
--- a/test/Transforms/DeadArgElim/deadexternal.ll
+++ b/test/Transforms/DeadArgElim/deadexternal.ll
@@ -32,7 +32,7 @@ entry:
%i = alloca i32, align 4
store volatile i32 10, i32* %i, align 4
; CHECK: %tmp = load volatile i32* %i, align 4
-; CHECK-next: call void @f(i32 undef)
+; CHECK-NEXT: call void @f(i32 undef)
%tmp = load volatile i32* %i, align 4
call void @f(i32 %tmp)
ret void
diff --git a/test/Transforms/DeadArgElim/keepalive.ll b/test/Transforms/DeadArgElim/keepalive.ll
index 82e01f225843..16569db4d38f 100644
--- a/test/Transforms/DeadArgElim/keepalive.ll
+++ b/test/Transforms/DeadArgElim/keepalive.ll
@@ -28,4 +28,20 @@ define void @caller() {
ret void
}
+; We can't remove 'this' here, as that would put argmem in ecx instead of
+; memory.
+define internal x86_thiscallcc i32 @unused_this(i32* %this, i32* inalloca %argmem) {
+ %v = load i32* %argmem
+ ret i32 %v
+}
+; CHECK-LABEL: define internal x86_thiscallcc i32 @unused_this(i32* %this, i32* inalloca %argmem)
+
+define i32 @caller2() {
+ %t = alloca i32
+ %m = alloca inalloca i32
+ store i32 42, i32* %m
+ %v = call x86_thiscallcc i32 @unused_this(i32* %t, i32* inalloca %m)
+ ret i32 %v
+}
+
; CHECK: attributes #0 = { nounwind }
diff --git a/test/Transforms/DeadStoreElimination/PartialStore.ll b/test/Transforms/DeadStoreElimination/PartialStore.ll
index 4799ef3383bc..80c2bfae846e 100644
--- a/test/Transforms/DeadStoreElimination/PartialStore.ll
+++ b/test/Transforms/DeadStoreElimination/PartialStore.ll
@@ -45,9 +45,9 @@ define void @test4(i8* %P) {
store i8 19, i8* %P ;; dead
%A = getelementptr i8* %P, i32 3
-
+
store i8 42, i8* %A ;; dead
-
+
%Q = bitcast i8* %P to double*
store double 0.0, double* %Q
ret void
@@ -61,7 +61,7 @@ define void @test5(i32 %i) nounwind ssp {
%C = getelementptr i8* %B, i32 %i
store i8 10, i8* %C ;; Dead store to variable index.
store i32 20, i32* %A
-
+
call void @test5a(i32* %A)
ret void
; CHECK-LABEL: @test5(
@@ -69,3 +69,19 @@ define void @test5(i32 %i) nounwind ssp {
; CHECK-NEXT: store i32 20
; CHECK-NEXT: call void @test5a
}
+
+declare void @test5a_as1(i32*)
+define void @test5_addrspacecast(i32 %i) nounwind ssp {
+ %A = alloca i32
+ %B = addrspacecast i32* %A to i8 addrspace(1)*
+ %C = getelementptr i8 addrspace(1)* %B, i32 %i
+ store i8 10, i8 addrspace(1)* %C ;; Dead store to variable index.
+ store i32 20, i32* %A
+
+ call void @test5a(i32* %A)
+ ret void
+; CHECK-LABEL: @test5_addrspacecast(
+; CHECK-NEXT: alloca
+; CHECK-NEXT: store i32 20
+; CHECK-NEXT: call void @test5a
+}
diff --git a/test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll b/test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll
new file mode 100644
index 000000000000..8953f9ce0834
--- /dev/null
+++ b/test/Transforms/DeadStoreElimination/cs-cs-aliasing.ll
@@ -0,0 +1,74 @@
+; RUN: opt -basicaa -dse -S < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%class.basic_string = type { %"class.__gnu_cxx::__versa_string" }
+%"class.__gnu_cxx::__versa_string" = type { %"class.__gnu_cxx::__sso_string_base" }
+%"class.__gnu_cxx::__sso_string_base" = type { %"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider", i64, %union.anon }
+%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider" = type { i8* }
+%union.anon = type { i64, [8 x i8] }
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #0
+
+; Function Attrs: noinline nounwind readonly uwtable
+declare zeroext i1 @callee_takes_string(%class.basic_string* nonnull) #1 align 2
+
+; Function Attrs: nounwind uwtable
+define weak_odr zeroext i1 @test() #2 align 2 {
+
+; CHECK-LABEL: @test
+
+bb:
+ %tmp = alloca %class.basic_string, align 8
+ %tmp1 = alloca %class.basic_string, align 8
+ %tmp3 = getelementptr inbounds %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 2
+ %tmp4 = bitcast %union.anon* %tmp3 to i8*
+ %tmp5 = getelementptr inbounds %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 0, i32 0
+ %tmp6 = getelementptr inbounds %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 1
+ %tmp7 = getelementptr inbounds i8* %tmp4, i64 1
+ %tmp8 = bitcast %class.basic_string* %tmp to i8*
+ %tmp9 = bitcast i64 0 to i64
+ %tmp10 = getelementptr inbounds %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 2
+ %tmp11 = bitcast %union.anon* %tmp10 to i8*
+ %tmp12 = getelementptr inbounds %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 0, i32 0
+ %tmp13 = getelementptr inbounds %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 1
+ %tmp14 = getelementptr inbounds i8* %tmp11, i64 1
+ %tmp15 = bitcast %class.basic_string* %tmp1 to i8*
+ br label %_ZN12basic_stringIcSt11char_traitsIcESaIcEEC2EPKcRKS2_.exit
+
+_ZN12basic_stringIcSt11char_traitsIcESaIcEEC2EPKcRKS2_.exit: ; preds = %bb
+ store i8* %tmp4, i8** %tmp5, align 8
+ store i8 62, i8* %tmp4, align 8
+ store i64 1, i64* %tmp6, align 8
+ store i8 0, i8* %tmp7, align 1
+ %tmp16 = call zeroext i1 @callee_takes_string(%class.basic_string* nonnull %tmp)
+ br label %_ZN9__gnu_cxx17__sso_string_baseIcSt11char_traitsIcESaIcEED2Ev.exit3
+
+_ZN9__gnu_cxx17__sso_string_baseIcSt11char_traitsIcESaIcEED2Ev.exit3: ; preds = %_ZN12basic_stringIcSt11char_traitsIcESaIcEEC2EPKcRKS2_.exit
+
+; CHECK: _ZN9__gnu_cxx17__sso_string_baseIcSt11char_traitsIcESaIcEED2Ev.exit3:
+
+; The following can be read through the call %tmp17:
+ store i8* %tmp11, i8** %tmp12, align 8
+ store i8 125, i8* %tmp11, align 8
+ store i64 1, i64* %tmp13, align 8
+ store i8 0, i8* %tmp14, align 1
+
+; CHECK: store i8* %tmp11, i8** %tmp12, align 8
+; CHECK: store i8 125, i8* %tmp11, align 8
+; CHECK: store i64 1, i64* %tmp13, align 8
+; CHECK: store i8 0, i8* %tmp14, align 1
+
+ %tmp17 = call zeroext i1 @callee_takes_string(%class.basic_string* nonnull %tmp1)
+ call void @llvm.memset.p0i8.i64(i8* %tmp11, i8 -51, i64 16, i32 8, i1 false) #0
+ call void @llvm.memset.p0i8.i64(i8* %tmp15, i8 -51, i64 32, i32 8, i1 false) #0
+ call void @llvm.memset.p0i8.i64(i8* %tmp4, i8 -51, i64 16, i32 8, i1 false) #0
+ call void @llvm.memset.p0i8.i64(i8* %tmp8, i8 -51, i64 32, i32 8, i1 false) #0
+ ret i1 %tmp17
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { noinline nounwind readonly uwtable }
+attributes #2 = { nounwind uwtable }
+
diff --git a/test/Transforms/DeadStoreElimination/simple.ll b/test/Transforms/DeadStoreElimination/simple.ll
index ec98466d33b9..1e813852b04c 100644
--- a/test/Transforms/DeadStoreElimination/simple.ll
+++ b/test/Transforms/DeadStoreElimination/simple.ll
@@ -105,6 +105,15 @@ define void @test9(%struct.x* byval %a) nounwind {
; CHECK-NEXT: ret void
}
+; Test for inalloca handling.
+define void @test9_2(%struct.x* inalloca %a) nounwind {
+ %tmp2 = getelementptr %struct.x* %a, i32 0, i32 0
+ store i32 1, i32* %tmp2, align 4
+ ret void
+; CHECK-LABEL: @test9_2(
+; CHECK-NEXT: ret void
+}
+
; va_arg has fuzzy dependence, the store shouldn't be zapped.
define double @test10(i8* %X) {
%X_addr = alloca i8*
@@ -163,6 +172,23 @@ define i32* @test13() {
; CHECK-NEXT: call void
}
+define i32 addrspace(1)* @test13_addrspacecast() {
+ %p = tail call i8* @malloc(i32 4)
+ %p.bc = bitcast i8* %p to i32*
+ %P = addrspacecast i32* %p.bc to i32 addrspace(1)*
+ %DEAD = load i32 addrspace(1)* %P
+ %DEAD2 = add i32 %DEAD, 1
+ store i32 %DEAD2, i32 addrspace(1)* %P
+ call void @test13f( )
+ store i32 0, i32 addrspace(1)* %P
+ ret i32 addrspace(1)* %P
+; CHECK: @test13_addrspacecast()
+; CHECK-NEXT: malloc
+; CHECK-NEXT: bitcast
+; CHECK-NEXT: addrspacecast
+; CHECK-NEXT: call void
+}
+
declare noalias i8* @malloc(i32)
declare noalias i8* @calloc(i32, i32)
diff --git a/test/Transforms/FunctionAttrs/nocapture.ll b/test/Transforms/FunctionAttrs/nocapture.ll
index 110bd03dac73..d3842c8415ca 100644
--- a/test/Transforms/FunctionAttrs/nocapture.ll
+++ b/test/Transforms/FunctionAttrs/nocapture.ll
@@ -68,7 +68,7 @@ define i1* @lookup_bit(i32* %q, i32 %bitno) readnone nounwind {
ret i1* %lookup
}
-; CHECK: define i1 @c7(i32* readnone %q, i32 %bitno)
+; CHECK: define i1 @c7(i32* readonly %q, i32 %bitno)
define i1 @c7(i32* %q, i32 %bitno) {
%ptr = call i1* @lookup_bit(i32* %q, i32 %bitno)
%val = load i1* %ptr
@@ -91,6 +91,21 @@ l:
ret i32 %val
}
+; CHECK: define i32 @nc1_addrspace(i32* %q, i32 addrspace(1)* nocapture %p, i1 %b)
+define i32 @nc1_addrspace(i32* %q, i32 addrspace(1)* %p, i1 %b) {
+e:
+ br label %l
+l:
+ %x = phi i32 addrspace(1)* [ %p, %e ]
+ %y = phi i32* [ %q, %e ]
+ %tmp = addrspacecast i32 addrspace(1)* %x to i32* ; <i32*> [#uses=2]
+ %tmp2 = select i1 %b, i32* %tmp, i32* %y
+ %val = load i32* %tmp2 ; <i32> [#uses=1]
+ store i32 0, i32* %tmp
+ store i32* %y, i32** @g
+ ret i32 %val
+}
+
; CHECK: define void @nc2(i32* nocapture %p, i32* %q)
define void @nc2(i32* %p, i32* %q) {
%1 = call i32 @nc1(i32* %q, i32* %p, i1 0) ; <i32> [#uses=0]
diff --git a/test/Transforms/FunctionAttrs/readattrs.ll b/test/Transforms/FunctionAttrs/readattrs.ll
index 0842f566d124..b4e904cf9b04 100644
--- a/test/Transforms/FunctionAttrs/readattrs.ll
+++ b/test/Transforms/FunctionAttrs/readattrs.ll
@@ -45,3 +45,23 @@ define void @test6_2(i8** %p, i8* %q) {
call void @test6_1()
ret void
}
+
+; CHECK: define void @test7_1(i32* inalloca nocapture %a)
+; inalloca parameters are always considered written
+define void @test7_1(i32* inalloca %a) {
+ ret void
+}
+
+; CHECK: define i32* @test8_1(i32* readnone %p)
+define i32* @test8_1(i32* %p) {
+entry:
+ ret i32* %p
+}
+
+; CHECK: define void @test8_2(i32* %p)
+define void @test8_2(i32* %p) {
+entry:
+ %call = call i32* @test8_1(i32* %p)
+ store i32 10, i32* %call, align 4
+ ret void
+}
diff --git a/test/Transforms/GCOVProfiling/global-ctor.ll b/test/Transforms/GCOVProfiling/global-ctor.ll
new file mode 100644
index 000000000000..722a0962ec57
--- /dev/null
+++ b/test/Transforms/GCOVProfiling/global-ctor.ll
@@ -0,0 +1,58 @@
+; RUN: echo '!16 = metadata !{metadata !"%T/global-ctor.ll", metadata !0}' > %t1
+; RUN: cat %s %t1 > %t2
+; RUN: opt -insert-gcov-profiling -disable-output < %t2
+; RUN: not grep '_GLOBAL__sub_I_global-ctor' %T/global-ctor.gcno
+; RUN: rm %T/global-ctor.gcno
+
+; REQUIRES: shell
+
+@x = global i32 0, align 4
+@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__sub_I_global-ctor.ll, i8* null }]
+
+; Function Attrs: nounwind
+define internal void @__cxx_global_var_init() #0 section ".text.startup" {
+entry:
+ br label %0
+
+; <label>:0 ; preds = %entry
+ %call = call i32 @_Z1fv(), !dbg !13
+ store i32 %call, i32* @x, align 4, !dbg !13
+ ret void, !dbg !13
+}
+
+declare i32 @_Z1fv() #1
+
+; Function Attrs: nounwind
+define internal void @_GLOBAL__sub_I_global-ctor.ll() #0 section ".text.startup" {
+entry:
+ br label %0
+
+; <label>:0 ; preds = %entry
+ call void @__cxx_global_var_init(), !dbg !14
+ ret void, !dbg !14
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!10, !11}
+!llvm.gcov = !{!16}
+!llvm.ident = !{!12}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (trunk 210217)", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2} ; [ DW_TAG_compile_unit ] [/home/nlewycky/<stdin>] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"<stdin>", metadata !"/home/nlewycky"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !8}
+!4 = metadata !{i32 786478, metadata !5, metadata !6, metadata !"__cxx_global_var_init", metadata !"__cxx_global_var_init", metadata !"", i32 2, metadata !7, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @__cxx_global_var_init, null, null, metadata !2, i32 2} ; [ DW_TAG_subprogram ] [line 2] [local] [def] [__cxx_global_var_init]
+!5 = metadata !{metadata !"global-ctor.ll", metadata !"/home/nlewycky"}
+!6 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [/home/nlewycky/global-ctor.ll]
+!7 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = metadata !{i32 786478, metadata !1, metadata !9, metadata !"", metadata !"", metadata !"_GLOBAL__sub_I_global-ctor.ll", i32 0, metadata !7, i1 true, i1 true, i32 0, i32 0, null, i32 64, i1 false, void ()* @_GLOBAL__sub_I_global-ctor.ll, null, null, metadata !2, i32 0} ; [ DW_TAG_subprogram ] [line 0] [local] [def]
+!9 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/home/nlewycky/<stdin>]
+!10 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!11 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!12 = metadata !{metadata !"clang version 3.5.0 (trunk 210217)"}
+!13 = metadata !{i32 2, i32 0, metadata !4, null}
+!14 = metadata !{i32 0, i32 0, metadata !15, null}
+!15 = metadata !{i32 786443, metadata !5, metadata !8} ; [ DW_TAG_lexical_block ] [/home/nlewycky/global-ctor.ll]
diff --git a/test/Transforms/GCOVProfiling/linezero.ll b/test/Transforms/GCOVProfiling/linezero.ll
new file mode 100644
index 000000000000..e2f832498428
--- /dev/null
+++ b/test/Transforms/GCOVProfiling/linezero.ll
@@ -0,0 +1,143 @@
+; RUN: sed -e 's@PATTERN@\%T@g' < %s > %t1
+; RUN: opt -insert-gcov-profiling -disable-output < %t1
+; RUN: rm %T/linezero.gcno %t1
+; REQUIRES: shell
+
+; This is a crash test.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.vector = type { i8 }
+
+; Function Attrs: nounwind
+define i32 @_Z4testv() #0 {
+entry:
+ %retval = alloca i32, align 4
+ %__range = alloca %struct.vector*, align 8
+ %ref.tmp = alloca %struct.vector, align 1
+ %undef.agg.tmp = alloca %struct.vector, align 1
+ %__begin = alloca i8*, align 8
+ %__end = alloca i8*, align 8
+ %spec = alloca i8, align 1
+ call void @llvm.dbg.declare(metadata !{%struct.vector** %__range}, metadata !27), !dbg !30
+ br label %0
+
+; <label>:0 ; preds = %entry
+ call void @_Z13TagFieldSpecsv(), !dbg !31
+ store %struct.vector* %ref.tmp, %struct.vector** %__range, align 8, !dbg !31
+ call void @llvm.dbg.declare(metadata !{i8** %__begin}, metadata !32), !dbg !30
+ %1 = load %struct.vector** %__range, align 8, !dbg !31
+ %call = call i8* @_ZN6vector5beginEv(%struct.vector* %1), !dbg !31
+ store i8* %call, i8** %__begin, align 8, !dbg !31
+ call void @llvm.dbg.declare(metadata !{i8** %__end}, metadata !33), !dbg !30
+ %2 = load %struct.vector** %__range, align 8, !dbg !31
+ %call1 = call i8* @_ZN6vector3endEv(%struct.vector* %2), !dbg !31
+ store i8* %call1, i8** %__end, align 8, !dbg !31
+ br label %for.cond, !dbg !31
+
+for.cond: ; preds = %for.inc, %0
+ %3 = load i8** %__begin, align 8, !dbg !34
+ %4 = load i8** %__end, align 8, !dbg !34
+ %cmp = icmp ne i8* %3, %4, !dbg !34
+ br i1 %cmp, label %for.body, label %for.end, !dbg !34
+
+for.body: ; preds = %for.cond
+ call void @llvm.dbg.declare(metadata !{i8* %spec}, metadata !37), !dbg !31
+ %5 = load i8** %__begin, align 8, !dbg !38
+ %6 = load i8* %5, align 1, !dbg !38
+ store i8 %6, i8* %spec, align 1, !dbg !38
+ br label %for.inc, !dbg !38
+
+for.inc: ; preds = %for.body
+ %7 = load i8** %__begin, align 8, !dbg !40
+ %incdec.ptr = getelementptr inbounds i8* %7, i32 1, !dbg !40
+ store i8* %incdec.ptr, i8** %__begin, align 8, !dbg !40
+ br label %for.cond, !dbg !40
+
+for.end: ; preds = %for.cond
+ call void @llvm.trap(), !dbg !42
+ unreachable, !dbg !42
+
+return: ; No predecessors!
+ %8 = load i32* %retval, !dbg !44
+ ret i32 %8, !dbg !44
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata) #1
+
+declare void @_Z13TagFieldSpecsv() #2
+
+declare i8* @_ZN6vector5beginEv(%struct.vector*) #2
+
+declare i8* @_ZN6vector3endEv(%struct.vector*) #2
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.trap() #3
+
+; Function Attrs: nounwind
+define void @_Z2f1v() #0 {
+entry:
+ br label %0
+
+; <label>:0 ; preds = %entry
+ ret void, !dbg !45
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { noreturn nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!23, !24}
+!llvm.gcov = !{!25}
+!llvm.ident = !{!26}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 (trunk 209871)", i1 false, metadata !"", i32 0, metadata !2, metadata !3, metadata !14, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [<stdin>] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"<stdin>", metadata !"PATTERN"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !5, null, metadata !"vector", i32 21, i64 8, i64 8, i32 0, i32 0, null, metadata !6, i32 0, null, null, metadata !"_ZTS6vector"} ; [ DW_TAG_structure_type ] [vector] [line 21, size 8, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !"linezero.cc", metadata !"PATTERN"}
+!6 = metadata !{metadata !7, metadata !13}
+!7 = metadata !{i32 786478, metadata !5, metadata !"_ZTS6vector", metadata !"begin", metadata !"begin", metadata !"_ZN6vector5beginEv", i32 25, metadata !8, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 25} ; [ DW_TAG_subprogram ] [line 25] [begin]
+!8 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !9, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!9 = metadata !{metadata !10, metadata !12}
+!10 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !11} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from char]
+!11 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!12 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS6vector"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS6vector]
+!13 = metadata !{i32 786478, metadata !5, metadata !"_ZTS6vector", metadata !"end", metadata !"end", metadata !"_ZN6vector3endEv", i32 26, metadata !8, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, null, i32 26} ; [ DW_TAG_subprogram ] [line 26] [end]
+!14 = metadata !{metadata !15, metadata !20}
+!15 = metadata !{i32 786478, metadata !5, metadata !16, metadata !"test", metadata !"test", metadata !"_Z4testv", i32 50, metadata !17, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @_Z4testv, null, null, metadata !2, i32 50} ; [ DW_TAG_subprogram ] [line 50] [def] [test]
+!16 = metadata !{i32 786473, metadata !5} ; [ DW_TAG_file_type ] [./linezero.cc]
+!17 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !18, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!18 = metadata !{metadata !19}
+!19 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!20 = metadata !{i32 786478, metadata !5, metadata !16, metadata !"f1", metadata !"f1", metadata !"_Z2f1v", i32 54, metadata !21, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z2f1v, null, null, metadata !2, i32 54} ; [ DW_TAG_subprogram ] [line 54] [def] [f1]
+!21 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !22, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!22 = metadata !{null}
+!23 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!24 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!25 = metadata !{metadata !"PATTERN/linezero.o", metadata !0}
+!26 = metadata !{metadata !"clang version 3.5.0 (trunk 209871)"}
+!27 = metadata !{i32 786688, metadata !28, metadata !"__range", null, i32 0, metadata !29, i32 64, i32 0} ; [ DW_TAG_auto_variable ] [__range] [line 0]
+!28 = metadata !{i32 786443, metadata !5, metadata !15, i32 51, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./linezero.cc]
+!29 = metadata !{i32 786498, null, null, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !"_ZTS6vector"} ; [ DW_TAG_rvalue_reference_type ] [line 0, size 0, align 0, offset 0] [from _ZTS6vector]
+!30 = metadata !{i32 0, i32 0, metadata !28, null}
+!31 = metadata !{i32 51, i32 0, metadata !28, null}
+!32 = metadata !{i32 786688, metadata !28, metadata !"__begin", null, i32 0, metadata !10, i32 64, i32 0} ; [ DW_TAG_auto_variable ] [__begin] [line 0]
+!33 = metadata !{i32 786688, metadata !28, metadata !"__end", null, i32 0, metadata !10, i32 64, i32 0} ; [ DW_TAG_auto_variable ] [__end] [line 0]
+!34 = metadata !{i32 51, i32 0, metadata !35, null}
+!35 = metadata !{i32 786443, metadata !5, metadata !36, i32 51, i32 0, i32 5, i32 5} ; [ DW_TAG_lexical_block ] [./linezero.cc]
+!36 = metadata !{i32 786443, metadata !5, metadata !28, i32 51, i32 0, i32 1, i32 1} ; [ DW_TAG_lexical_block ] [./linezero.cc]
+!37 = metadata !{i32 786688, metadata !28, metadata !"spec", metadata !16, i32 51, metadata !11, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [spec] [line 51]
+!38 = metadata !{i32 51, i32 0, metadata !39, null}
+!39 = metadata !{i32 786443, metadata !5, metadata !28, i32 51, i32 0, i32 2, i32 2} ; [ DW_TAG_lexical_block ] [./linezero.cc]
+!40 = metadata !{i32 51, i32 0, metadata !41, null}
+!41 = metadata !{i32 786443, metadata !5, metadata !28, i32 51, i32 0, i32 4, i32 4} ; [ DW_TAG_lexical_block ] [./linezero.cc]
+!42 = metadata !{i32 51, i32 0, metadata !43, null}
+!43 = metadata !{i32 786443, metadata !5, metadata !28, i32 51, i32 0, i32 3, i32 3} ; [ DW_TAG_lexical_block ] [./linezero.cc]
+!44 = metadata !{i32 52, i32 0, metadata !15, null}
+!45 = metadata !{i32 54, i32 0, metadata !20, null}
diff --git a/test/Transforms/GCOVProfiling/version.ll b/test/Transforms/GCOVProfiling/version.ll
index 2f1bd70f6df9..04f3f992b836 100644
--- a/test/Transforms/GCOVProfiling/version.ll
+++ b/test/Transforms/GCOVProfiling/version.ll
@@ -1,11 +1,11 @@
; RUN: echo '!9 = metadata !{metadata !"%T/version.ll", metadata !0}' > %t1
; RUN: cat %s %t1 > %t2
; RUN: opt -insert-gcov-profiling -disable-output < %t2
-; RUN: head -c12 %T/version.gcno | grep '^oncg\*204MVLL$'
+; RUN: head -c8 %T/version.gcno | grep '^oncg\*204'
; RUN: rm %T/version.gcno
; RUN: not opt -insert-gcov-profiling -default-gcov-version=asdfasdf -disable-output < %t2
; RUN: opt -insert-gcov-profiling -default-gcov-version=407* -disable-output < %t2
-; RUN: head -c12 %T/version.gcno | grep '^oncg\*704MVLL$'
+; RUN: head -c8 %T/version.gcno | grep '^oncg\*704'
; RUN: rm %T/version.gcno
define void @test() {
diff --git a/test/Transforms/GVN/2009-03-10-PREOnVoid.ll b/test/Transforms/GVN/2009-03-10-PREOnVoid.ll
index 89d6a5f982b8..fd31fce59a84 100644
--- a/test/Transforms/GVN/2009-03-10-PREOnVoid.ll
+++ b/test/Transforms/GVN/2009-03-10-PREOnVoid.ll
@@ -53,30 +53,58 @@ bb11: ; preds = %bb7, %bb5
unreachable
}
-declare i32 @pthread_once(i32*, void ()*)
+define i32 @pthread_once(i32*, void ()*) {
+ ret i32 0
+}
-declare i8* @pthread_getspecific(i32)
+define i8* @pthread_getspecific(i32) {
+ ret i8* null
+}
-declare i32 @pthread_setspecific(i32, i8*)
+define i32 @pthread_setspecific(i32, i8*) {
+ ret i32 0
+}
-declare i32 @pthread_create(i32*, %struct.pthread_attr_t*, i8* (i8*)*, i8*)
+define i32 @pthread_create(i32*, %struct.pthread_attr_t*, i8* (i8*)*, i8*) {
+ ret i32 0
+}
-declare i32 @pthread_cancel(i32)
+define i32 @pthread_cancel(i32) {
+ ret i32 0
+}
-declare i32 @pthread_mutex_lock(%struct.pthread_mutex_t*)
+define i32 @pthread_mutex_lock(%struct.pthread_mutex_t*) {
+ ret i32 0
+}
-declare i32 @pthread_mutex_trylock(%struct.pthread_mutex_t*)
+define i32 @pthread_mutex_trylock(%struct.pthread_mutex_t*) {
+ ret i32 0
+}
-declare i32 @pthread_mutex_unlock(%struct.pthread_mutex_t*)
+define i32 @pthread_mutex_unlock(%struct.pthread_mutex_t*) {
+ ret i32 0
+}
-declare i32 @pthread_mutex_init(%struct.pthread_mutex_t*, %struct.__sched_param*)
+define i32 @pthread_mutex_init(%struct.pthread_mutex_t*, %struct.__sched_param*) {
+ ret i32 0
+}
-declare i32 @pthread_key_create(i32*, void (i8*)*)
+define i32 @pthread_key_create(i32*, void (i8*)*) {
+ ret i32 0
+}
-declare i32 @pthread_key_delete(i32)
+define i32 @pthread_key_delete(i32) {
+ ret i32 0
+}
-declare i32 @pthread_mutexattr_init(%struct.__sched_param*)
+define i32 @pthread_mutexattr_init(%struct.__sched_param*) {
+ ret i32 0
+}
-declare i32 @pthread_mutexattr_settype(%struct.__sched_param*, i32)
+define i32 @pthread_mutexattr_settype(%struct.__sched_param*, i32) {
+ ret i32 0
+}
-declare i32 @pthread_mutexattr_destroy(%struct.__sched_param*)
+define i32 @pthread_mutexattr_destroy(%struct.__sched_param*) {
+ ret i32 0
+}
diff --git a/test/Transforms/GVN/calloc-load-removal.ll b/test/Transforms/GVN/calloc-load-removal.ll
new file mode 100644
index 000000000000..2dde5b7b4146
--- /dev/null
+++ b/test/Transforms/GVN/calloc-load-removal.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -basicaa -gvn < %s | FileCheck %s
+; RUN: opt -S -basicaa -gvn -disable-simplify-libcalls < %s | FileCheck %s -check-prefix=CHECK_NO_LIBCALLS
+; Check that loads from calloc are recognized as being zero.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+; Function Attrs: nounwind uwtable
+define i32 @test1() {
+ %1 = tail call noalias i8* @calloc(i64 1, i64 4)
+ %2 = bitcast i8* %1 to i32*
+ ; This load is trivially constant zero
+ %3 = load i32* %2, align 4
+ ret i32 %3
+
+; CHECK-LABEL: @test1(
+; CHECK-NOT: %3 = load i32* %2, align 4
+; CHECK: ret i32 0
+
+; CHECK_NO_LIBCALLS-LABEL: @test1(
+; CHECK_NO_LIBCALLS: load
+; CHECK_NO_LIBCALLS: ret i32 %
+
+}
+
+declare noalias i8* @calloc(i64, i64)
diff --git a/test/Transforms/GVN/invariant-load.ll b/test/Transforms/GVN/invariant-load.ll
new file mode 100644
index 000000000000..80e2226b5fae
--- /dev/null
+++ b/test/Transforms/GVN/invariant-load.ll
@@ -0,0 +1,31 @@
+; Test if the !invariant.load metadata is maintained by GVN.
+; RUN: opt -basicaa -gvn -S < %s | FileCheck %s
+
+define i32 @test1(i32* nocapture %p, i8* nocapture %q) {
+; CHECK-LABEL: test1
+; CHECK: %x = load i32* %p, align 4, !invariant.load !0
+; CHECK-NOT: %y = load
+entry:
+ %x = load i32* %p, align 4, !invariant.load !0
+ %conv = trunc i32 %x to i8
+ store i8 %conv, i8* %q, align 1
+ %y = load i32* %p, align 4, !invariant.load !0
+ %add = add i32 %y, 1
+ ret i32 %add
+}
+
+define i32 @test2(i32* nocapture %p, i8* nocapture %q) {
+; CHECK-LABEL: test2
+; CHECK-NOT: !invariant.load
+; CHECK-NOT: %y = load
+entry:
+ %x = load i32* %p, align 4
+ %conv = trunc i32 %x to i8
+ store i8 %conv, i8* %q, align 1
+ %y = load i32* %p, align 4, !invariant.load !0
+ %add = add i32 %y, 1
+ ret i32 %add
+}
+
+!0 = metadata !{ }
+
diff --git a/test/Transforms/GVN/load-pre-nonlocal.ll b/test/Transforms/GVN/load-pre-nonlocal.ll
new file mode 100644
index 000000000000..7bac1b78e6a2
--- /dev/null
+++ b/test/Transforms/GVN/load-pre-nonlocal.ll
@@ -0,0 +1,87 @@
+; RUN: opt -S -o - -basicaa -domtree -gvn %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+%struct.S1 = type { i32, i32 }
+
+@a2 = common global i32* null, align 8
+@a = common global i32* null, align 8
+@s1 = common global %struct.S1 zeroinitializer, align 8
+
+; Check that GVN doesn't determine %2 is partially redundant.
+
+; CHECK-LABEL: define i32 @volatile_load
+; CHECK: for.body:
+; CHECK: %2 = load i32*
+; CHECK: %3 = load volatile i32*
+; CHECK: for.cond.for.end_crit_edge:
+
+define i32 @volatile_load(i32 %n) {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph:
+ %0 = load i32** @a2, align 8, !tbaa !1
+ %1 = load i32** @a, align 8, !tbaa !1
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
+ %s.09 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
+ %p.08 = phi i32* [ %0, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+ %2 = load i32* %p.08, align 4, !tbaa !5
+ %arrayidx = getelementptr inbounds i32* %1, i64 %indvars.iv
+ store i32 %2, i32* %arrayidx, align 4, !tbaa !5
+ %3 = load volatile i32* %p.08, align 4, !tbaa !5
+ %add = add nsw i32 %3, %s.09
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %incdec.ptr = getelementptr inbounds i32* %p.08, i64 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp ne i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
+
+for.cond.for.end_crit_edge:
+ %add.lcssa = phi i32 [ %add, %for.body ]
+ br label %for.end
+
+for.end:
+ %s.0.lcssa = phi i32 [ %add.lcssa, %for.cond.for.end_crit_edge ], [ 0, %entry ]
+ ret i32 %s.0.lcssa
+}
+
+; %1 is partially redundant if %0 can be widened to a 64-bit load.
+
+; CHECK-LABEL: define i32 @overaligned_load
+; CHECK: if.end:
+; CHECK-NOT: %1 = load i32*
+
+define i32 @overaligned_load(i32 %a, i32* nocapture %b) {
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %0 = load i32* getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), align 8, !tbaa !5
+ br label %if.end
+
+if.else:
+ %arrayidx = getelementptr inbounds i32* %b, i64 2
+ store i32 10, i32* %arrayidx, align 4, !tbaa !5
+ br label %if.end
+
+if.end:
+ %i.0 = phi i32 [ %0, %if.then ], [ 0, %if.else ]
+ %p.0 = phi i32* [ getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), %if.then ], [ %b, %if.else ]
+ %add.ptr = getelementptr inbounds i32* %p.0, i64 1
+ %1 = load i32* %add.ptr, align 4, !tbaa !5
+ %add1 = add nsw i32 %1, %i.0
+ ret i32 %add1
+}
+
+!1 = metadata !{metadata !2, metadata !2, i64 0}
+!2 = metadata !{metadata !"any pointer", metadata !3, i64 0}
+!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
+!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!5 = metadata !{metadata !6, metadata !6, i64 0}
+!6 = metadata !{metadata !"int", metadata !3, i64 0}
diff --git a/test/Transforms/GVN/rle.ll b/test/Transforms/GVN/rle.ll
index 8d289b06997c..6aac93e75a78 100644
--- a/test/Transforms/GVN/rle.ll
+++ b/test/Transforms/GVN/rle.ll
@@ -318,6 +318,19 @@ define i8 @coerce_offset0(i32 %V, i32* %P) {
; CHECK: ret i8
}
+define i8 @coerce_offset0_addrspacecast(i32 %V, i32* %P) {
+ store i32 %V, i32* %P
+
+ %P2 = addrspacecast i32* %P to i8 addrspace(1)*
+ %P3 = getelementptr i8 addrspace(1)* %P2, i32 2
+
+ %A = load i8 addrspace(1)* %P3
+ ret i8 %A
+; CHECK-LABEL: @coerce_offset0_addrspacecast(
+; CHECK-NOT: load
+; CHECK: ret i8
+}
+
;; non-local i32/float -> i8 load forwarding.
define i8 @coerce_offset_nonlocal0(i32* %P, i1 %cond) {
%P2 = bitcast i32* %P to float*
diff --git a/test/Transforms/GVN/unreachable_block_infinite_loop.ll b/test/Transforms/GVN/unreachable_block_infinite_loop.ll
index fe335ced5c37..fca5a28b38dd 100644
--- a/test/Transforms/GVN/unreachable_block_infinite_loop.ll
+++ b/test/Transforms/GVN/unreachable_block_infinite_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt -memdep -gvn -disable-output
+; RUN: opt -memdep -gvn -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.0"
diff --git a/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll b/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll
index 6658cee12239..0bdced5114d3 100644
--- a/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll
+++ b/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll
@@ -1,8 +1,18 @@
-; RUN: opt < %s -globaldce -S | not grep @D
-; RUN: opt < %s -globaldce -S | grep @L | count 3
+; RUN: opt < %s -globaldce -S > %t
+; RUN: FileCheck %s < %t
+; RUN: FileCheck --check-prefix=DEAD %s < %t
@A = global i32 0
+; CHECK: @A = global i32 0
+
@D = alias internal i32* @A
+; DEAD-NOT: @D
+
@L1 = alias i32* @A
+; CHECK: @L1 = alias i32* @A
+
@L2 = alias internal i32* @L1
+; CHECK: @L2 = alias internal i32* @L1
+
@L3 = alias i32* @L2
+; CHECK: @L3 = alias i32* @L2
diff --git a/test/Transforms/GlobalDCE/global_ctors.ll b/test/Transforms/GlobalDCE/global_ctors.ll
new file mode 100644
index 000000000000..91bb9ab7f6bc
--- /dev/null
+++ b/test/Transforms/GlobalDCE/global_ctors.ll
@@ -0,0 +1,14 @@
+; RUN: opt -S -globaldce < %s | FileCheck %s
+
+; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_notremovable }]
+; CHECK-NOT: @_GLOBAL__I_a
+
+declare void @_notremovable()
+
+@llvm.global_ctors = appending global [2 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }, { i32, void ()* } { i32 65535, void ()* @_notremovable }]
+
+; Function Attrs: nounwind readnone
+define internal void @_GLOBAL__I_a() #1 section "__TEXT,__StaticInit,regular,pure_instructions" {
+entry:
+ ret void
+}
diff --git a/test/Transforms/GlobalDCE/global_ctors_integration.ll b/test/Transforms/GlobalDCE/global_ctors_integration.ll
new file mode 100644
index 000000000000..5e6cc79f103c
--- /dev/null
+++ b/test/Transforms/GlobalDCE/global_ctors_integration.ll
@@ -0,0 +1,45 @@
+; RUN: opt -S -O2 < %s | FileCheck %s
+
+; This test checks that -O2 is able to delete constructors that become empty
+; only after some optimization passes have run, even if the pass structure
+; changes.
+; CHECK-NOT: @_GLOBAL__I_a
+
+%class.Foo = type { i32 }
+
+@foo = global %class.Foo zeroinitializer, align 4
+@_ZN3Bar18LINKER_INITIALIZEDE = external constant i32
+@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
+
+define internal void @__cxx_global_var_init() section "__TEXT,__StaticInit,regular,pure_instructions" {
+ %1 = load i32* @_ZN3Bar18LINKER_INITIALIZEDE, align 4
+ call void @_ZN3FooC1E17LinkerInitialized(%class.Foo* @foo, i32 %1)
+ ret void
+}
+
+; Function Attrs: ssp uwtable
+define linkonce_odr void @_ZN3FooC1E17LinkerInitialized(%class.Foo* %this, i32) unnamed_addr #0 align 2 {
+ %2 = alloca %class.Foo*, align 8
+ %3 = alloca i32, align 4
+ store %class.Foo* %this, %class.Foo** %2, align 8
+ store i32 %0, i32* %3, align 4
+ %4 = load %class.Foo** %2
+ %5 = load i32* %3, align 4
+ call void @_ZN3FooC2E17LinkerInitialized(%class.Foo* %4, i32 %5)
+ ret void
+}
+
+; Function Attrs: nounwind ssp uwtable
+define linkonce_odr void @_ZN3FooC2E17LinkerInitialized(%class.Foo* %this, i32) unnamed_addr #1 align 2 {
+ %2 = alloca %class.Foo*, align 8
+ %3 = alloca i32, align 4
+ store %class.Foo* %this, %class.Foo** %2, align 8
+ store i32 %0, i32* %3, align 4
+ %4 = load %class.Foo** %2
+ ret void
+}
+
+define internal void @_GLOBAL__I_a() section "__TEXT,__StaticInit,regular,pure_instructions" {
+ call void @__cxx_global_var_init()
+ ret void
+}
diff --git a/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll b/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
index a1b69efe1a76..d6a565ad10a8 100644
--- a/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
+++ b/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s -globalopt
-@g = external global i32
+@g = global i32 0
@a = alias bitcast (i32* @g to i8*)
diff --git a/test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll b/test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll
index 62f75e123be7..930a96e2182f 100644
--- a/test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll
+++ b/test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll
@@ -1,11 +1,23 @@
-; RUN: opt < %s -globalopt -S | grep internal | count 2
+; RUN: opt < %s -globalopt -S | FileCheck %s
global i32 0
-define i32* @1() {
+; CHECK-DAG: @0 = internal global i32 0
+
+private global i32 0
+; CHECK-DAG: @1 = private global i32 0
+
+define i32* @2() {
ret i32* @0
}
+; CHECK-DAG: define internal fastcc i32* @2()
+
define i32* @f() {
entry:
- call i32* @1()
+ call i32* @2()
ret i32* %0
}
+
+define i32* @g() {
+entry:
+ ret i32* @1
+}
diff --git a/test/Transforms/GlobalOpt/alias-resolve.ll b/test/Transforms/GlobalOpt/alias-resolve.ll
index 32f4bf8ebe25..9d70c708aad8 100644
--- a/test/Transforms/GlobalOpt/alias-resolve.ll
+++ b/test/Transforms/GlobalOpt/alias-resolve.ll
@@ -1,31 +1,39 @@
-; We use a temporary file so that the test fails when opt crashes.
-
-; RUN: opt < %s -globalopt -S > %t
-; RUN: FileCheck %s < %t
+; RUN: opt < %s -globalopt -S | FileCheck %s
@foo1 = alias void ()* @foo2
-; CHECK: @foo1 = alias void ()* @foo2
+; CHECK: @foo1 = alias void ()* @bar2
-@foo2 = alias weak void()* @bar1
-; CHECK: @foo2 = alias weak void ()* @bar2
+@foo2 = alias void()* @bar1
+; CHECK: @foo2 = alias void ()* @bar2
@bar1 = alias void ()* @bar2
; CHECK: @bar1 = alias void ()* @bar2
-declare void @bar2()
-; CHECK: declare void @bar2()
+@weak1 = alias weak void ()* @bar2
+; CHECK: @weak1 = alias weak void ()* @bar2
+
+@bar4 = private unnamed_addr constant [2 x i8*] zeroinitializer
+@foo4 = unnamed_addr alias linkonce_odr getelementptr inbounds ([2 x i8*]* @bar4, i32 0, i32 1)
+; CHECK: @foo4 = unnamed_addr alias linkonce_odr getelementptr inbounds ([2 x i8*]* @bar4, i32 0, i32 1)
+
+define void @bar2() {
+ ret void
+}
+; CHECK: define void @bar2()
define void @baz() {
entry:
call void @foo1()
-; CHECK: call void @foo2()
+; CHECK: call void @bar2()
call void @foo2()
-; CHECK: call void @foo2()
+; CHECK: call void @bar2()
call void @bar1()
; CHECK: call void @bar2()
+ call void @weak1()
+; CHECK: call void @weak1()
ret void
}
diff --git a/test/Transforms/GlobalOpt/alias-used-address-space.ll b/test/Transforms/GlobalOpt/alias-used-address-space.ll
new file mode 100644
index 000000000000..633cd344a796
--- /dev/null
+++ b/test/Transforms/GlobalOpt/alias-used-address-space.ll
@@ -0,0 +1,26 @@
+; RUN: opt -S -globalopt < %s | FileCheck %s
+
+target datalayout = "p:32:32:32-p1:16:16:16"
+
+@c = addrspace(1) global i8 42
+
+@i = internal addrspace(1) global i8 42
+
+; CHECK: @ia = internal addrspace(1) global i8 42
+@ia = alias internal i8 addrspace(1)* @i
+
+@llvm.used = appending global [1 x i8*] [i8* addrspacecast (i8 addrspace(1)* @ca to i8*)], section "llvm.metadata"
+; CHECK-DAG: @llvm.used = appending global [1 x i8*] [i8* addrspacecast (i8 addrspace(1)* @ca to i8*)], section "llvm.metadata"
+
+@llvm.compiler.used = appending global [2 x i8*] [i8* addrspacecast(i8 addrspace(1)* @ia to i8*), i8* addrspacecast (i8 addrspace(1)* @i to i8*)], section "llvm.metadata"
+; CHECK-DAG: @llvm.compiler.used = appending global [1 x i8*] [i8* addrspacecast (i8 addrspace(1)* @ia to i8*)], section "llvm.metadata"
+
+@sameAsUsed = global [1 x i8*] [i8* addrspacecast(i8 addrspace(1)* @ca to i8*)]
+; CHECK-DAG: @sameAsUsed = global [1 x i8*] [i8* addrspacecast (i8 addrspace(1)* @c to i8*)]
+
+@ca = alias internal i8 addrspace(1)* @c
+; CHECK: @ca = alias internal i8 addrspace(1)* @c
+
+define i8 addrspace(1)* @h() {
+ ret i8 addrspace(1)* @ca
+}
diff --git a/test/Transforms/GlobalOpt/alias-used-section.ll b/test/Transforms/GlobalOpt/alias-used-section.ll
new file mode 100644
index 000000000000..121793724d72
--- /dev/null
+++ b/test/Transforms/GlobalOpt/alias-used-section.ll
@@ -0,0 +1,8 @@
+; RUN: opt -S -globalopt < %s | FileCheck %s
+
+@_Z17in_custom_section = internal global i8 42, section "CUSTOM"
+@in_custom_section = dllexport alias internal i8* @_Z17in_custom_section
+
+; CHECK: @in_custom_section = internal dllexport global i8 42, section "CUSTOM"
+
+@llvm.used = appending global [1 x i8*] [i8* @in_custom_section], section "llvm.metadata"
diff --git a/test/Transforms/GlobalOpt/atexit.ll b/test/Transforms/GlobalOpt/atexit.ll
index dbcd0d7b00bd..55c2dab1c1d7 100644
--- a/test/Transforms/GlobalOpt/atexit.ll
+++ b/test/Transforms/GlobalOpt/atexit.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s -globalopt -S | FileCheck %s
; CHECK: ModuleID
-define internal hidden i32 @__cxa_atexit(void (i8*)* nocapture %func, i8* nocapture %arg, i8* nocapture %dso_handle) nounwind readnone optsize noimplicitfloat {
+define internal i32 @__cxa_atexit(void (i8*)* nocapture %func, i8* nocapture %arg, i8* nocapture %dso_handle) nounwind readnone optsize noimplicitfloat {
unreachable
}
diff --git a/test/Transforms/GlobalOpt/constantfold-initializers.ll b/test/Transforms/GlobalOpt/constantfold-initializers.ll
index ce6e2c46d627..4a25d661edcf 100644
--- a/test/Transforms/GlobalOpt/constantfold-initializers.ll
+++ b/test/Transforms/GlobalOpt/constantfold-initializers.ll
@@ -50,7 +50,41 @@ entry:
ret void
}
+; PR19955
+
+@dllimportptr = global i32* null, align 4
+; CHECK: @dllimportptr = global i32* null, align 4
+@dllimportvar = external dllimport global i32
+define internal void @test3() {
+entry:
+ store i32* @dllimportvar, i32** @dllimportptr, align 4
+ ret void
+}
+
+@dllexportptr = global i32* null, align 4
+; CHECK: @dllexportptr = global i32* @dllexportvar, align 4
+@dllexportvar = dllexport global i32 0, align 4
+; CHECK: @dllexportvar = dllexport global i32 20, align 4
+define internal void @test4() {
+entry:
+ store i32 20, i32* @dllexportvar, align 4
+ store i32* @dllexportvar, i32** @dllexportptr, align 4
+ ret void
+}
+
+@threadlocalptr = global i32* null, align 4
+; CHECK: @threadlocalptr = global i32* null, align 4
+@threadlocalvar = external thread_local global i32
+define internal void @test5() {
+entry:
+ store i32* @threadlocalvar, i32** @threadlocalptr, align 4
+ ret void
+}
+
@llvm.global_ctors = appending constant
- [2 x { i32, void ()* }]
+ [5 x { i32, void ()* }]
[{ i32, void ()* } { i32 65535, void ()* @test1 },
- { i32, void ()* } { i32 65535, void ()* @test2 }]
+ { i32, void ()* } { i32 65535, void ()* @test2 },
+ { i32, void ()* } { i32 65535, void ()* @test3 },
+ { i32, void ()* } { i32 65535, void ()* @test4 },
+ { i32, void ()* } { i32 65535, void ()* @test5 }]
diff --git a/test/Transforms/GlobalOpt/ctor-list-opt.ll b/test/Transforms/GlobalOpt/ctor-list-opt.ll
index 542c786762ea..450bdb830284 100644
--- a/test/Transforms/GlobalOpt/ctor-list-opt.ll
+++ b/test/Transforms/GlobalOpt/ctor-list-opt.ll
@@ -1,5 +1,20 @@
-; RUN: opt < %s -globalopt -S | not grep CTOR
-@llvm.global_ctors = appending global [11 x { i32, void ()* }] [ { i32, void ()* } { i32 65535, void ()* @CTOR1 }, { i32, void ()* } { i32 65535, void ()* @CTOR1 }, { i32, void ()* } { i32 65535, void ()* @CTOR2 }, { i32, void ()* } { i32 65535, void ()* @CTOR3 }, { i32, void ()* } { i32 65535, void ()* @CTOR4 }, { i32, void ()* } { i32 65535, void ()* @CTOR5 }, { i32, void ()* } { i32 65535, void ()* @CTOR6 }, { i32, void ()* } { i32 65535, void ()* @CTOR7 }, { i32, void ()* } { i32 65535, void ()* @CTOR8 }, { i32, void ()* } { i32 65535, void ()* @CTOR9 }, { i32, void ()* } { i32 2147483647, void ()* null } ] ; <[10 x { i32, void ()* }]*> [#uses=0]
+; RUN: opt < %s -globalopt -S | FileCheck %s
+; CHECK-NOT: CTOR
+%ini = type { i32, void()*, i8* }
+@llvm.global_ctors = appending global [11 x %ini] [
+ %ini { i32 65535, void ()* @CTOR1, i8* null },
+ %ini { i32 65535, void ()* @CTOR1, i8* null },
+ %ini { i32 65535, void ()* @CTOR2, i8* null },
+ %ini { i32 65535, void ()* @CTOR3, i8* null },
+ %ini { i32 65535, void ()* @CTOR4, i8* null },
+ %ini { i32 65535, void ()* @CTOR5, i8* null },
+ %ini { i32 65535, void ()* @CTOR6, i8* null },
+ %ini { i32 65535, void ()* @CTOR7, i8* null },
+ %ini { i32 65535, void ()* @CTOR8, i8* null },
+ %ini { i32 65535, void ()* @CTOR9, i8* null },
+ %ini { i32 2147483647, void ()* null, i8* null }
+]
+
@G = global i32 0 ; <i32*> [#uses=1]
@G2 = global i32 0 ; <i32*> [#uses=1]
@G3 = global i32 -123 ; <i32*> [#uses=2]
diff --git a/test/Transforms/GlobalOpt/fastcc.ll b/test/Transforms/GlobalOpt/fastcc.ll
new file mode 100644
index 000000000000..76122b203d79
--- /dev/null
+++ b/test/Transforms/GlobalOpt/fastcc.ll
@@ -0,0 +1,46 @@
+; RUN: opt < %s -globalopt -S | FileCheck %s
+
+define internal i32 @f(i32* %m) {
+; CHECK-LABEL: define internal fastcc i32 @f
+ %v = load i32* %m
+ ret i32 %v
+}
+
+define internal x86_thiscallcc i32 @g(i32* %m) {
+; CHECK-LABEL: define internal fastcc i32 @g
+ %v = load i32* %m
+ ret i32 %v
+}
+
+; Leave this one alone, because the user went out of their way to request this
+; convention.
+define internal coldcc i32 @h(i32* %m) {
+; CHECK-LABEL: define internal coldcc i32 @h
+ %v = load i32* %m
+ ret i32 %v
+}
+
+define internal i32 @j(i32* %m) {
+; CHECK-LABEL: define internal i32 @j
+ %v = load i32* %m
+ ret i32 %v
+}
+
+define void @call_things() {
+ %m = alloca i32
+ call i32 @f(i32* %m)
+ call x86_thiscallcc i32 @g(i32* %m)
+ call coldcc i32 @h(i32* %m)
+ call i32 @j(i32* %m)
+ ret void
+}
+
+@llvm.used = appending global [1 x i8*] [
+ i8* bitcast (i32(i32*)* @j to i8*)
+], section "llvm.metadata"
+
+; CHECK-LABEL: define void @call_things()
+; CHECK: call fastcc i32 @f
+; CHECK: call fastcc i32 @g
+; CHECK: call coldcc i32 @h
+; CHECK: call i32 @j
diff --git a/test/Transforms/GlobalOpt/memset.ll b/test/Transforms/GlobalOpt/memset.ll
index 3bb5ce92d082..85320b7f2461 100644
--- a/test/Transforms/GlobalOpt/memset.ll
+++ b/test/Transforms/GlobalOpt/memset.ll
@@ -1,6 +1,8 @@
-; both globals are write only, delete them.
+; RUN: opt -S -globalopt < %s | FileCheck %s
-; RUN: opt < %s -globalopt -S | not grep internal
+; CHECK-NOT: internal
+
+; Both globals are write only, delete them.
@G0 = internal global [58 x i8] c"asdlfkajsdlfkajsd;lfkajds;lfkjasd;flkajsd;lkfja;sdlkfjasd\00" ; <[58 x i8]*> [#uses=1]
@G1 = internal global [4 x i32] [ i32 1, i32 2, i32 3, i32 4 ] ; <[4 x i32]*> [#uses=1]
@@ -13,6 +15,17 @@ define void @foo() {
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+@G0_as1 = internal addrspace(1) global [58 x i8] c"asdlfkajsdlfkajsd;lfkajds;lfkjasd;flkajsd;lkfja;sdlkfjasd\00" ; <[58 x i8]*> [#uses=1]
+@G1_as1 = internal addrspace(1) global [4 x i32] [ i32 1, i32 2, i32 3, i32 4 ] ; <[4 x i32]*> [#uses=1]
+
+define void @foo_as1() {
+ %Blah = alloca [58 x i8]
+ %tmp3 = bitcast [58 x i8]* %Blah to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* addrspacecast ([4 x i32] addrspace(1)* @G1_as1 to i8*), i8* %tmp3, i32 16, i32 1, i1 false)
+ call void @llvm.memset.p1i8.i32(i8 addrspace(1)* getelementptr inbounds ([58 x i8] addrspace(1)* @G0_as1, i32 0, i32 0), i8 17, i32 58, i32 1, i1 false)
+ ret void
+}
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p1i8.i32(i8 addrspace(1)* nocapture, i8, i32, i32, i1) nounwind \ No newline at end of file
diff --git a/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll b/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
index bd174a8be3ff..4ea0b88fd0de 100644
--- a/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
+++ b/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as <%s | opt -ipsccp | llvm-dis | FileCheck %s
+; RUN: opt < %s -ipsccp -S | FileCheck %s
; Don't constant-propagate byval pointers, since they are not pointers!
; PR5038
%struct.MYstr = type { i8, i32 }
diff --git a/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll b/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll
new file mode 100644
index 000000000000..2c738de32e0a
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll
@@ -0,0 +1,57 @@
+; RUN: opt -S -loop-unswitch -instcombine -indvars < %s | FileCheck %s
+
+; This used to crash in SCEVExpander when there were congruent phis with and
+; undef incoming value from the loop header. The -loop-unswitch -instcombine is
+; necessary to create just this pattern, which is essentially a nop and gets
+; folded away aggressively if spelled out in IR directly.
+; PR 20093
+
+@c = external global i32**, align 8
+
+define void @test1() {
+entry:
+ br i1 undef, label %for.end12, label %for.cond.preheader
+
+for.cond.preheader: ; preds = %entry
+ %0 = load i32*** @c, align 8
+ %1 = load i32** %0, align 8
+ %2 = load i32* %1, align 4
+ br label %for.body
+
+for.body: ; preds = %for.cond.backedge, %for.body9.us, %for.cond.preheader
+ %3 = phi i32* [ %1, %for.cond.preheader ], [ %3, %for.cond.backedge ], [ %6, %for.body9.us ]
+ %4 = phi i32 [ %2, %for.cond.preheader ], [ undef, %for.cond.backedge ], [ %7, %for.body9.us ]
+ %i.024 = phi i32 [ 0, %for.cond.preheader ], [ %inc, %for.cond.backedge ], [ 0, %for.body9.us ]
+ %tobool1 = icmp eq i32 %4, 0
+ br i1 %tobool1, label %if.end, label %for.cond.backedge
+
+if.end: ; preds = %for.body
+ %5 = load i32* %3, align 4
+ %tobool4 = icmp eq i32 %5, 0
+ br i1 %tobool4, label %for.cond3, label %for.body9.preheader
+
+for.body9.preheader: ; preds = %if.end
+ %tobool8 = icmp eq i32 %i.024, 1
+ br i1 %tobool8, label %for.body9.us, label %for.body9
+
+for.body9.us: ; preds = %for.body9.preheader
+ %6 = load i32** undef, align 8
+ %7 = load i32* %6, align 4
+ br label %for.body
+
+for.cond3: ; preds = %for.cond3, %if.end
+ br label %for.cond3
+
+for.body9: ; preds = %for.body9, %for.body9.preheader
+ br label %for.body9
+
+for.cond.backedge: ; preds = %for.body
+ %inc = add nsw i32 %i.024, 1
+ br i1 false, label %for.body, label %for.end12
+
+for.end12: ; preds = %for.cond.backedge, %entry
+ ret void
+
+; CHECK-LABEL: @test1
+; CHECK-NOT: phi
+}
diff --git a/test/Transforms/IndVarSimplify/iv-widen.ll b/test/Transforms/IndVarSimplify/iv-widen.ll
new file mode 100644
index 000000000000..c899e2f0a580
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/iv-widen.ll
@@ -0,0 +1,40 @@
+; RUN: opt < %s -indvars -S | FileCheck %s
+
+target triple = "x86_64-apple-darwin"
+
+; CHECK-LABEL: @sloop
+; CHECK-LABEL: B18:
+; Only one phi now.
+; CHECK: phi
+; CHECK-NOT: phi
+; One trunc for the gep.
+; CHECK: trunc i64 %indvars.iv to i32
+; One trunc for the dummy() call.
+; CHECK-LABEL: exit24:
+; CHECK: trunc i64 {{.*}}lcssa.wide to i32
+define void @sloop(i32* %a) {
+Prologue:
+ br i1 undef, label %B18, label %B6
+
+B18: ; preds = %B24, %Prologue
+ %.02 = phi i32 [ 0, %Prologue ], [ %tmp33, %B24 ]
+ %tmp23 = zext i32 %.02 to i64
+ %tmp33 = add i32 %.02, 1
+ %o = getelementptr i32* %a, i32 %.02
+ %v = load i32* %o
+ %t = icmp eq i32 %v, 0
+ br i1 %t, label %exit24, label %B24
+
+B24: ; preds = %B18
+ %t2 = icmp eq i32 %tmp33, 20
+ br i1 %t2, label %B6, label %B18
+
+B6: ; preds = %Prologue
+ ret void
+
+exit24: ; preds = %B18
+ call void @dummy(i32 %.02)
+ unreachable
+}
+
+declare void @dummy(i32)
diff --git a/test/Transforms/IndVarSimplify/lcssa-preservation.ll b/test/Transforms/IndVarSimplify/lcssa-preservation.ll
new file mode 100644
index 000000000000..f69c96ce0210
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/lcssa-preservation.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -indvars -S | FileCheck %s
+;
+; Make sure IndVars preserves LCSSA form, especially across loop nests.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+
+define void @PR18642(i32 %x) {
+; CHECK-LABEL: @PR18642(
+entry:
+ br label %outer.header
+; CHECK: br label %outer.header
+
+outer.header:
+; CHECK: outer.header:
+ %outer.iv = phi i32 [ 0, %entry ], [ %x, %outer.latch ]
+ br label %inner.header
+; CHECK: %[[SCEV_EXPANDED:.*]] = add i32
+; CHECK: br label %inner.header
+
+inner.header:
+; CHECK: inner.header:
+ %inner.iv = phi i32 [ undef, %outer.header ], [ %inc, %inner.latch ]
+ %cmp1 = icmp slt i32 %inner.iv, %outer.iv
+ br i1 %cmp1, label %inner.latch, label %outer.latch
+; CHECK: br i1 {{.*}}, label %inner.latch, label %outer.latch
+
+inner.latch:
+; CHECK: inner.latch:
+ %inc = add nsw i32 %inner.iv, 1
+ %cmp2 = icmp slt i32 %inner.iv, %outer.iv
+ br i1 %cmp2, label %inner.header, label %exit
+; CHECK: br i1 {{.*}}, label %inner.header, label %[[EXIT_FROM_INNER:.*]]
+
+outer.latch:
+; CHECK: outer.latch:
+ br i1 undef, label %outer.header, label %exit
+; CHECK: br i1 {{.*}}, label %outer.header, label %[[EXIT_FROM_OUTER:.*]]
+
+; CHECK: [[EXIT_FROM_INNER]]:
+; CHECK-NEXT: %[[LCSSA:.*]] = phi i32 [ %[[SCEV_EXPANDED]], %inner.latch ]
+; CHECK-NEXT: br label %exit
+
+; CHECK: [[EXIT_FROM_OUTER]]:
+; CHECK-NEXT: br label %exit
+
+exit:
+; CHECK: exit:
+ %exit.phi = phi i32 [ %inc, %inner.latch ], [ undef, %outer.latch ]
+; CHECK-NEXT: phi i32 [ %[[LCSSA]], %[[EXIT_FROM_INNER]] ], [ undef, %[[EXIT_FROM_OUTER]] ]
+ ret void
+}
diff --git a/test/Transforms/IndVarSimplify/lftr-extend-const.ll b/test/Transforms/IndVarSimplify/lftr-extend-const.ll
index 2fac4a797e29..4736f857bcce 100644
--- a/test/Transforms/IndVarSimplify/lftr-extend-const.ll
+++ b/test/Transforms/IndVarSimplify/lftr-extend-const.ll
@@ -1,6 +1,6 @@
;RUN: opt -S %s -indvars | FileCheck %s
-; CHECK-LABEL-LABEL: @foo(
+; CHECK-LABEL: @foo(
; CHECK-NOT: %lftr.wideiv = trunc i32 %indvars.iv.next to i16
; CHECK: %exitcond = icmp ne i32 %indvars.iv.next, 512
define void @foo() #0 {
@@ -20,7 +20,7 @@ for.end: ; preds = %for.body
}
; Check that post-incrementing the backedge taken count does not overflow.
-; CHECK-LABEL-LABEL: @postinc(
+; CHECK-LABEL: @postinc(
; CHECK: icmp eq i32 %indvars.iv.next, 256
define i32 @postinc() #0 {
entry:
diff --git a/test/Transforms/IndVarSimplify/lftr-reuse.ll b/test/Transforms/IndVarSimplify/lftr-reuse.ll
index fe3df5cfa88c..1fdcdd1ec3a4 100644
--- a/test/Transforms/IndVarSimplify/lftr-reuse.ll
+++ b/test/Transforms/IndVarSimplify/lftr-reuse.ll
@@ -38,17 +38,16 @@ for.end:
ret void
}
-; It would be nice if SCEV and any loop analysis could assume that
-; preheaders exist. Unfortunately it is not always the case. This test
-; checks that SCEVExpander can handle an outer loop that has not yet
-; been simplified. As a result, the inner loop's exit test will not be
-; rewritten.
+; This test checks that SCEVExpander can handle an outer loop that has been
+; simplified, and as a result the inner loop's exit test will be rewritten.
define void @expandOuterRecurrence(i32 %arg) nounwind {
entry:
%sub1 = sub nsw i32 %arg, 1
%cmp1 = icmp slt i32 0, %sub1
br i1 %cmp1, label %outer, label %exit
+; CHECK: outer:
+; CHECK: icmp slt
outer:
%i = phi i32 [ 0, %entry ], [ %i.inc, %outer.inc ]
%sub2 = sub nsw i32 %arg, %i
@@ -60,7 +59,6 @@ inner.ph:
br label %inner
; CHECK: inner:
-; CHECK: icmp slt
; CHECK: br i1
inner:
%j = phi i32 [ 0, %inner.ph ], [ %j.inc, %inner ]
diff --git a/test/Transforms/IndVarSimplify/overflowcheck.ll b/test/Transforms/IndVarSimplify/overflowcheck.ll
new file mode 100644
index 000000000000..2603f363ab60
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/overflowcheck.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -indvars -liv-reduce -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+; CHECK-LABEL: @addwithoverflow
+; CHECK-LABEL: loop1:
+; CHECK-NOT: zext
+; CHECK: add nsw
+; CHECK: @llvm.sadd.with.overflow
+; CHECK-LABEL: loop2:
+; CHECK-NOT: extractvalue
+; CHECK: add nuw nsw
+; CHECK: @llvm.sadd.with.overflow
+; CHECK-LABEL: loop3:
+; CHECK-NOT: extractvalue
+; CHECK: ret
+define i64 @addwithoverflow(i32 %n, i64* %a) {
+entry:
+ br label %loop0
+
+loop0:
+ %i = phi i32 [ 0, %entry ], [ %i1val, %loop3 ]
+ %s = phi i32 [ 0, %entry ], [ %addsval, %loop3 ]
+ %bc = icmp ult i32 %i, %n
+ br i1 %bc, label %loop1, label %exit
+
+loop1:
+ %zxt = zext i32 %i to i64
+ %ofs = shl nuw nsw i64 %zxt, 3
+ %gep = getelementptr i64* %a, i64 %zxt
+ %v = load i64* %gep, align 8
+ %truncv = trunc i64 %v to i32
+ %adds = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %s, i32 %truncv)
+ %ovflows = extractvalue { i32, i1 } %adds, 1
+ br i1 %ovflows, label %exit, label %loop2
+
+loop2:
+ %addsval = extractvalue { i32, i1 } %adds, 0
+ %i1 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i, i32 1)
+ %i1check = extractvalue { i32, i1 } %i1, 1
+ br i1 %i1check, label %exit, label %loop3
+
+loop3:
+ %i1val = extractvalue { i32, i1 } %i1, 0
+ %test = icmp slt i32 %i1val, %n
+ br i1 %test, label %return, label %loop0
+
+return:
+ %ret = zext i32 %addsval to i64
+ ret i64 %ret
+
+exit:
+ unreachable
+}
+
+declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
diff --git a/test/Transforms/IndVarSimplify/pr18223.ll b/test/Transforms/IndVarSimplify/pr18223.ll
new file mode 100644
index 000000000000..738f75c0fe03
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/pr18223.ll
@@ -0,0 +1,30 @@
+; RUN: opt -indvars -S < %s | FileCheck %s
+
+; indvars should transform the phi node pair from the for-loop
+; CHECK-LABEL: @main(
+; CHECK: ret = phi i32 [ 0, %entry ], [ 0, {{.*}} ]
+
+@c = common global i32 0, align 4
+
+define i32 @main() #0 {
+entry:
+ %0 = load i32* @c, align 4
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %for.body, label %exit
+
+for.body:
+ %inc2 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
+ %sub = add i32 %inc2, -1
+ %cmp1 = icmp uge i32 %sub, %inc2
+ %conv = zext i1 %cmp1 to i32
+ br label %for.inc
+
+for.inc:
+ %inc = add nsw i32 %inc2, 1
+ %cmp = icmp slt i32 %inc, 5
+ br i1 %cmp, label %for.body, label %exit
+
+exit:
+ %ret = phi i32 [ 0, %entry ], [ %conv, %for.inc ]
+ ret i32 %ret
+}
diff --git a/test/Transforms/IndVarSimplify/tripcount_compute.ll b/test/Transforms/IndVarSimplify/tripcount_compute.ll
index 626a29b20b7e..966d152cda69 100644
--- a/test/Transforms/IndVarSimplify/tripcount_compute.ll
+++ b/test/Transforms/IndVarSimplify/tripcount_compute.ll
@@ -160,3 +160,34 @@ loop9: ; preds = %loop2, %loopexit
loopexit9: ; preds = %loop2
ret i32 %l.next
}
+
+; PR18449. Check that the early exit is reduced to never taken.
+;
+; CHECK-LABEL: @twoexit
+; CHECK-LABEL: loop:
+; CHECK: phi
+; CHECK: br i1 false
+; CHECK: br
+; CHECK: ret
+define void @twoexit() {
+"function top level":
+ br label %loop
+
+loop: ; preds = %body, %"function top level"
+ %0 = phi i64 [ 0, %"function top level" ], [ %2, %body ]
+ %1 = icmp ugt i64 %0, 2
+ br i1 %1, label %fail, label %body
+
+fail: ; preds = %loop
+ tail call void @bounds_fail()
+ unreachable
+
+body: ; preds = %loop
+ %2 = add i64 %0, 1
+ %3 = icmp slt i64 %2, 3
+ br i1 %3, label %loop, label %out
+
+out: ; preds = %body
+ ret void
+}
+declare void @bounds_fail()
diff --git a/test/Transforms/Inline/2010-05-31-ByvalTailcall.ll b/test/Transforms/Inline/2010-05-31-ByvalTailcall.ll
deleted file mode 100644
index b37b9f2ffa28..000000000000
--- a/test/Transforms/Inline/2010-05-31-ByvalTailcall.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: opt < %s -tailcallelim -inline -instcombine -dse -S | FileCheck %s
-; PR7272
-
-; When inlining through a byval call site, the inliner creates allocas which may
-; be used by inlined calls, so any inlined calls need to have their 'tail' flags
-; cleared. If not then you can get nastiness like with this testcase, where the
-; (inlined) call to 'ext' in 'foo' was being passed an uninitialized value.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
-target triple = "i386-pc-linux-gnu"
-
-declare void @ext(i32*)
-
-define void @bar(i32* byval %x) {
- call void @ext(i32* %x)
- ret void
-}
-
-define void @foo(i32* %x) {
-; CHECK-LABEL: define void @foo(
-; CHECK: store i32 %1, i32* %x
- call void @bar(i32* byval %x)
- ret void
-}
diff --git a/test/Transforms/Inline/always-inline.ll b/test/Transforms/Inline/always-inline.ll
index a8703b898777..5ad1bde3e2d2 100644
--- a/test/Transforms/Inline/always-inline.ll
+++ b/test/Transforms/Inline/always-inline.ll
@@ -122,3 +122,14 @@ entry:
ret void
}
+define i32 @inner7() {
+ ret i32 1
+}
+define i32 @outer7() {
+; CHECK-LABEL: @outer7(
+; CHECK-NOT: call
+; CHECK: ret
+
+ %r = call i32 @inner7() alwaysinline
+ ret i32 %r
+}
diff --git a/test/Transforms/Inline/blockaddress.ll b/test/Transforms/Inline/blockaddress.ll
index 4206312d7743..8eb307250330 100644
--- a/test/Transforms/Inline/blockaddress.ll
+++ b/test/Transforms/Inline/blockaddress.ll
@@ -1,8 +1,9 @@
; RUN: opt -inline -S < %s | FileCheck %s
; PR10162
-; Make sure the blockaddress is mapped correctly when doit is inlined
-; CHECK: store i8* blockaddress(@f, %here.i), i8** @ptr1, align 8
+; Make sure doit is not inlined since the blockaddress is taken
+; which could be unsafe
+; CHECK: store i8* blockaddress(@doit, %here), i8** %pptr, align 8
@i = global i32 1, align 4
@ptr1 = common global i8* null, align 8
diff --git a/test/Transforms/Inline/byval-tail-call.ll b/test/Transforms/Inline/byval-tail-call.ll
new file mode 100644
index 000000000000..3a8906aa210f
--- /dev/null
+++ b/test/Transforms/Inline/byval-tail-call.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -tailcallelim -inline -instcombine -dse -S | FileCheck %s
+; PR7272
+
+; Calls that capture byval parameters cannot be marked as tail calls. Other
+; tails that don't capture byval parameters can still be tail calls.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+target triple = "i386-pc-linux-gnu"
+
+declare void @ext(i32*)
+
+define void @bar(i32* byval %x) {
+ call void @ext(i32* %x)
+ ret void
+}
+
+define void @foo(i32* %x) {
+; CHECK-LABEL: define void @foo(
+; CHECK: llvm.lifetime.start
+; CHECK: store i32 %2, i32* %x
+ call void @bar(i32* byval %x)
+ ret void
+}
+
+define internal void @qux(i32* byval %x) {
+ call void @ext(i32* %x)
+ tail call void @ext(i32* null)
+ ret void
+}
+define void @frob(i32* %x) {
+; CHECK-LABEL: define void @frob(
+; CHECK: alloca i32
+; CHECK: {{^ *}}call void @ext(
+; CHECK: tail call void @ext(i32* null)
+; CHECK: ret void
+ tail call void @qux(i32* byval %x)
+ ret void
+}
diff --git a/test/Transforms/Inline/byval_lifetime.ll b/test/Transforms/Inline/byval_lifetime.ll
new file mode 100644
index 000000000000..e8dff2aa711d
--- /dev/null
+++ b/test/Transforms/Inline/byval_lifetime.ll
@@ -0,0 +1,26 @@
+; RUN: opt -S -inline < %s | FileCheck %s
+; END.
+
+; By inlining foo, an alloca is created in main to hold the byval argument, so
+; a lifetime marker should be generated as well by default.
+
+%struct.foo = type { i32, [16 x i32] }
+
+@gFoo = global %struct.foo zeroinitializer, align 8
+
+define i32 @foo(%struct.foo* byval align 8 %f, i32 %a) {
+entry:
+ %a1 = getelementptr inbounds %struct.foo* %f, i32 0, i32 1
+ %arrayidx = getelementptr inbounds [16 x i32]* %a1, i32 0, i32 %a
+ %tmp2 = load i32* %arrayidx, align 1
+ ret i32 %tmp2
+}
+
+define i32 @main(i32 %argc, i8** %argv) {
+; CHECK-LABEL: @main
+; CHECK: llvm.lifetime.start
+; CHECK: memcpy
+entry:
+ %call = call i32 @foo(%struct.foo* byval align 8 @gFoo, i32 %argc)
+ ret i32 %call
+}
diff --git a/test/Transforms/Inline/debug-invoke.ll b/test/Transforms/Inline/debug-invoke.ll
new file mode 100644
index 000000000000..41d60742fd29
--- /dev/null
+++ b/test/Transforms/Inline/debug-invoke.ll
@@ -0,0 +1,37 @@
+; RUN: opt < %s -always-inline -S | FileCheck %s
+
+; Test that the debug location is preserved when rewriting an inlined call as an invoke
+
+; CHECK: invoke void @test()
+; CHECK-NEXT: to label {{.*}} unwind label {{.*}}, !dbg [[INL_LOC:!.*]]
+; CHECK: [[EMPTY:.*]] = metadata !{}
+; CHECK: [[INL_LOC]] = metadata !{i32 1, i32 0, metadata [[EMPTY]], metadata [[INL_AT:.*]]}
+; CHECK: [[INL_AT]] = metadata !{i32 2, i32 0, metadata [[EMPTY]], null}
+
+declare void @test()
+declare i32 @__gxx_personality_v0(...)
+
+attributes #0 = { alwaysinline }
+define void @inl() #0 {
+ call void @test(), !dbg !3
+ ret void
+}
+
+define void @caller() {
+ invoke void @inl()
+ to label %cont unwind label %lpad, !dbg !4
+
+cont:
+ ret void
+
+lpad:
+ landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ ret void
+}
+
+!llvm.module.flags = !{!1}
+!1 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!2 = metadata !{}
+!3 = metadata !{i32 1, i32 0, metadata !2, null}
+!4 = metadata !{i32 2, i32 0, metadata !2, null}
diff --git a/test/Transforms/Inline/ignore-debug-info.ll b/test/Transforms/Inline/ignore-debug-info.ll
new file mode 100644
index 000000000000..543a89be0214
--- /dev/null
+++ b/test/Transforms/Inline/ignore-debug-info.ll
@@ -0,0 +1,55 @@
+; RUN: opt < %s -S -inline -inline-threshold=2 | FileCheck %s
+; RUN: opt < %s -S -strip-debug -inline -inline-threshold=2 | FileCheck %s
+;
+; The purpose of this test is to check that debug info doesn't influence
+; inlining decisions.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare void @llvm.dbg.declare(metadata, metadata) #1
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+define <4 x float> @inner_vectors(<4 x float> %a, <4 x float> %b) {
+entry:
+ call void @llvm.dbg.value(metadata !{}, i64 0, metadata !{})
+ %mul = fmul <4 x float> %a, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ call void @llvm.dbg.value(metadata !{}, i64 0, metadata !{})
+ %mul1 = fmul <4 x float> %b, <float 5.000000e+00, float 5.000000e+00, float 5.000000e+00, float 5.000000e+00>
+ call void @llvm.dbg.value(metadata !{}, i64 0, metadata !{})
+ %add = fadd <4 x float> %mul, %mul1
+ ret <4 x float> %add
+}
+
+define float @outer_vectors(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: @outer_vectors(
+; CHECK-NOT: call <4 x float> @inner_vectors(
+; CHECK: ret float
+
+entry:
+ call void @llvm.dbg.value(metadata !{}, i64 0, metadata !{})
+ call void @llvm.dbg.value(metadata !{}, i64 0, metadata !{})
+ %call = call <4 x float> @inner_vectors(<4 x float> %a, <4 x float> %b)
+ call void @llvm.dbg.value(metadata !{}, i64 0, metadata !{})
+ %vecext = extractelement <4 x float> %call, i32 0
+ %vecext1 = extractelement <4 x float> %call, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecext2 = extractelement <4 x float> %call, i32 2
+ %add3 = fadd float %add, %vecext2
+ %vecext4 = extractelement <4 x float> %call, i32 3
+ %add5 = fadd float %add3, %vecext4
+ ret float %add5
+}
+
+attributes #0 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !{}, metadata !2, metadata !2, metadata !""}
+!1 = metadata !{metadata !"", metadata !""}
+!2 = metadata !{i32 0}
+!3 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!4 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!5 = metadata !{metadata !""}
diff --git a/test/Transforms/Inline/inline-cold.ll b/test/Transforms/Inline/inline-cold.ll
new file mode 100644
index 000000000000..57433771961e
--- /dev/null
+++ b/test/Transforms/Inline/inline-cold.ll
@@ -0,0 +1,200 @@
+; RUN: opt < %s -inline -S -inlinecold-threshold=75 | FileCheck %s
+; Test that functions with attribute Cold are not inlined while the
+; same function without attribute Cold will be inlined.
+
+; RUN: opt < %s -inline -S -inline-threshold=600 | FileCheck %s -check-prefix=OVERRIDE
+; The command line argument for inline-threshold should override
+; the default cold threshold, so a cold function with size bigger
+; than the default cold threshold (225) will be inlined.
+
+; RUN: opt < %s -inline -S | FileCheck %s -check-prefix=DEFAULT
+; The same cold function will not be inlined with the default behavior.
+
+@a = global i32 4
+
+; This function should be larger than the cold threshold (75), but smaller
+; than the regular threshold.
+; Function Attrs: nounwind readnone uwtable
+define i32 @simpleFunction(i32 %a) #0 {
+entry:
+ %a1 = load volatile i32* @a
+ %x1 = add i32 %a1, %a1
+ %a2 = load volatile i32* @a
+ %x2 = add i32 %x1, %a2
+ %a3 = load volatile i32* @a
+ %x3 = add i32 %x2, %a3
+ %a4 = load volatile i32* @a
+ %x4 = add i32 %x3, %a4
+ %a5 = load volatile i32* @a
+ %x5 = add i32 %x4, %a5
+ %a6 = load volatile i32* @a
+ %x6 = add i32 %x5, %a6
+ %a7 = load volatile i32* @a
+ %x7 = add i32 %x6, %a6
+ %a8 = load volatile i32* @a
+ %x8 = add i32 %x7, %a8
+ %a9 = load volatile i32* @a
+ %x9 = add i32 %x8, %a9
+ %a10 = load volatile i32* @a
+ %x10 = add i32 %x9, %a10
+ %a11 = load volatile i32* @a
+ %x11 = add i32 %x10, %a11
+ %a12 = load volatile i32* @a
+ %x12 = add i32 %x11, %a12
+ %add = add i32 %x12, %a
+ ret i32 %add
+}
+
+; Function Attrs: nounwind cold readnone uwtable
+define i32 @ColdFunction(i32 %a) #1 {
+; CHECK-LABEL: @ColdFunction
+; CHECK: ret
+; OVERRIDE-LABEL: @ColdFunction
+; OVERRIDE: ret
+; DEFAULT-LABEL: @ColdFunction
+; DEFAULT: ret
+entry:
+ %a1 = load volatile i32* @a
+ %x1 = add i32 %a1, %a1
+ %a2 = load volatile i32* @a
+ %x2 = add i32 %x1, %a2
+ %a3 = load volatile i32* @a
+ %x3 = add i32 %x2, %a3
+ %a4 = load volatile i32* @a
+ %x4 = add i32 %x3, %a4
+ %a5 = load volatile i32* @a
+ %x5 = add i32 %x4, %a5
+ %a6 = load volatile i32* @a
+ %x6 = add i32 %x5, %a6
+ %a7 = load volatile i32* @a
+ %x7 = add i32 %x6, %a6
+ %a8 = load volatile i32* @a
+ %x8 = add i32 %x7, %a8
+ %a9 = load volatile i32* @a
+ %x9 = add i32 %x8, %a9
+ %a10 = load volatile i32* @a
+ %x10 = add i32 %x9, %a10
+ %a11 = load volatile i32* @a
+ %x11 = add i32 %x10, %a11
+ %a12 = load volatile i32* @a
+ %x12 = add i32 %x11, %a12
+ %add = add i32 %x12, %a
+ ret i32 %add
+}
+
+; This function should be larger than the default cold threshold (225).
+define i32 @ColdFunction2(i32 %a) #1 {
+; CHECK-LABEL: @ColdFunction2
+; CHECK: ret
+; OVERRIDE-LABEL: @ColdFunction2
+; OVERRIDE: ret
+; DEFAULT-LABEL: @ColdFunction2
+; DEFAULT: ret
+entry:
+ %a1 = load volatile i32* @a
+ %x1 = add i32 %a1, %a1
+ %a2 = load volatile i32* @a
+ %x2 = add i32 %x1, %a2
+ %a3 = load volatile i32* @a
+ %x3 = add i32 %x2, %a3
+ %a4 = load volatile i32* @a
+ %x4 = add i32 %x3, %a4
+ %a5 = load volatile i32* @a
+ %x5 = add i32 %x4, %a5
+ %a6 = load volatile i32* @a
+ %x6 = add i32 %x5, %a6
+ %a7 = load volatile i32* @a
+ %x7 = add i32 %x6, %a7
+ %a8 = load volatile i32* @a
+ %x8 = add i32 %x7, %a8
+ %a9 = load volatile i32* @a
+ %x9 = add i32 %x8, %a9
+ %a10 = load volatile i32* @a
+ %x10 = add i32 %x9, %a10
+ %a11 = load volatile i32* @a
+ %x11 = add i32 %x10, %a11
+ %a12 = load volatile i32* @a
+ %x12 = add i32 %x11, %a12
+
+ %a21 = load volatile i32* @a
+ %x21 = add i32 %x12, %a21
+ %a22 = load volatile i32* @a
+ %x22 = add i32 %x21, %a22
+ %a23 = load volatile i32* @a
+ %x23 = add i32 %x22, %a23
+ %a24 = load volatile i32* @a
+ %x24 = add i32 %x23, %a24
+ %a25 = load volatile i32* @a
+ %x25 = add i32 %x24, %a25
+ %a26 = load volatile i32* @a
+ %x26 = add i32 %x25, %a26
+ %a27 = load volatile i32* @a
+ %x27 = add i32 %x26, %a27
+ %a28 = load volatile i32* @a
+ %x28 = add i32 %x27, %a28
+ %a29 = load volatile i32* @a
+ %x29 = add i32 %x28, %a29
+ %a30 = load volatile i32* @a
+ %x30 = add i32 %x29, %a30
+ %a31 = load volatile i32* @a
+ %x31 = add i32 %x30, %a31
+ %a32 = load volatile i32* @a
+ %x32 = add i32 %x31, %a32
+
+ %a41 = load volatile i32* @a
+ %x41 = add i32 %x32, %a41
+ %a42 = load volatile i32* @a
+ %x42 = add i32 %x41, %a42
+ %a43 = load volatile i32* @a
+ %x43 = add i32 %x42, %a43
+ %a44 = load volatile i32* @a
+ %x44 = add i32 %x43, %a44
+ %a45 = load volatile i32* @a
+ %x45 = add i32 %x44, %a45
+ %a46 = load volatile i32* @a
+ %x46 = add i32 %x45, %a46
+ %a47 = load volatile i32* @a
+ %x47 = add i32 %x46, %a47
+ %a48 = load volatile i32* @a
+ %x48 = add i32 %x47, %a48
+ %a49 = load volatile i32* @a
+ %x49 = add i32 %x48, %a49
+ %a50 = load volatile i32* @a
+ %x50 = add i32 %x49, %a50
+ %a51 = load volatile i32* @a
+ %x51 = add i32 %x50, %a51
+ %a52 = load volatile i32* @a
+ %x52 = add i32 %x51, %a52
+
+ %add = add i32 %x52, %a
+ ret i32 %add
+}
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @bar(i32 %a) #0 {
+; CHECK-LABEL: @bar
+; CHECK: call i32 @ColdFunction(i32 5)
+; CHECK-NOT: call i32 @simpleFunction(i32 6)
+; CHECK: call i32 @ColdFunction2(i32 5)
+; CHECK: ret
+; OVERRIDE-LABEL: @bar
+; OVERRIDE-NOT: call i32 @ColdFunction(i32 5)
+; OVERRIDE-NOT: call i32 @simpleFunction(i32 6)
+; OVERRIDE-NOT: call i32 @ColdFunction2(i32 5)
+; OVERRIDE: ret
+; DEFAULT-LABEL: @bar
+; DEFAULT-NOT: call i32 @ColdFunction(i32 5)
+; DEFAULT-NOT: call i32 @simpleFunction(i32 6)
+; DEFAULT: call i32 @ColdFunction2(i32 5)
+; DEFAULT: ret
+entry:
+ %0 = tail call i32 @ColdFunction(i32 5)
+ %1 = tail call i32 @simpleFunction(i32 6)
+ %2 = tail call i32 @ColdFunction2(i32 5)
+ %3 = add i32 %0, %1
+ %add = add i32 %2, %3
+ ret i32 %add
+}
+
+attributes #0 = { nounwind readnone uwtable }
+attributes #1 = { nounwind cold readnone uwtable }
diff --git a/test/Transforms/Inline/inline-tail.ll b/test/Transforms/Inline/inline-tail.ll
index 8bb059d01a0c..b40328e0a272 100644
--- a/test/Transforms/Inline/inline-tail.ll
+++ b/test/Transforms/Inline/inline-tail.ll
@@ -1,15 +1,182 @@
-; RUN: opt < %s -inline -S | not grep tail
+; RUN: opt < %s -inline -S | FileCheck %s
-declare void @bar(i32*)
+; We have to apply the less restrictive TailCallKind of the call site being
+; inlined and any call sites cloned into the caller.
-define internal void @foo(i32* %P) {
- tail call void @bar( i32* %P )
- ret void
+; No tail marker after inlining, since test_capture_c captures an alloca.
+; CHECK: define void @test_capture_a(
+; CHECK-NOT: tail
+; CHECK: call void @test_capture_c(
+
+declare void @test_capture_c(i32*)
+define internal void @test_capture_b(i32* %P) {
+ tail call void @test_capture_c(i32* %P)
+ ret void
+}
+define void @test_capture_a() {
+ %A = alloca i32 ; captured by test_capture_b
+ call void @test_capture_b(i32* %A)
+ ret void
+}
+
+; No musttail marker after inlining, since the prototypes don't match.
+; CHECK: define void @test_proto_mismatch_a(
+; CHECK-NOT: musttail
+; CHECK: call void @test_proto_mismatch_c(
+
+declare void @test_proto_mismatch_c(i32*)
+define internal void @test_proto_mismatch_b(i32* %p) {
+ musttail call void @test_proto_mismatch_c(i32* %p)
+ ret void
+}
+define void @test_proto_mismatch_a() {
+ call void @test_proto_mismatch_b(i32* null)
+ ret void
+}
+
+; After inlining through a musttail call site, we need to keep musttail markers
+; to prevent unbounded stack growth.
+; CHECK: define void @test_musttail_basic_a(
+; CHECK: musttail call void @test_musttail_basic_c(
+
+declare void @test_musttail_basic_c(i32* %p)
+define internal void @test_musttail_basic_b(i32* %p) {
+ musttail call void @test_musttail_basic_c(i32* %p)
+ ret void
+}
+define void @test_musttail_basic_a(i32* %p) {
+ musttail call void @test_musttail_basic_b(i32* %p)
+ ret void
+}
+
+; Don't insert lifetime end markers here, the lifetime is trivially over due
+; the return.
+; CHECK: define void @test_byval_a(
+; CHECK: musttail call void @test_byval_c(
+; CHECK-NEXT: ret void
+
+declare void @test_byval_c(i32* byval %p)
+define internal void @test_byval_b(i32* byval %p) {
+ musttail call void @test_byval_c(i32* byval %p)
+ ret void
+}
+define void @test_byval_a(i32* byval %p) {
+ musttail call void @test_byval_b(i32* byval %p)
+ ret void
}
-define void @caller() {
- %A = alloca i32 ; <i32*> [#uses=1]
- call void @foo( i32* %A )
- ret void
+; Don't insert a stack restore, we're about to return.
+; CHECK: define void @test_dynalloca_a(
+; CHECK: call i8* @llvm.stacksave(
+; CHECK: alloca i8, i32 %n
+; CHECK: musttail call void @test_dynalloca_c(
+; CHECK-NEXT: ret void
+
+declare void @escape(i8* %buf)
+declare void @test_dynalloca_c(i32* byval %p, i32 %n)
+define internal void @test_dynalloca_b(i32* byval %p, i32 %n) alwaysinline {
+ %buf = alloca i8, i32 %n ; dynamic alloca
+ call void @escape(i8* %buf) ; escape it
+ musttail call void @test_dynalloca_c(i32* byval %p, i32 %n)
+ ret void
+}
+define void @test_dynalloca_a(i32* byval %p, i32 %n) {
+ musttail call void @test_dynalloca_b(i32* byval %p, i32 %n)
+ ret void
}
+; We can't merge the returns.
+; CHECK: define void @test_multiret_a(
+; CHECK: musttail call void @test_multiret_c(
+; CHECK-NEXT: ret void
+; CHECK: musttail call void @test_multiret_d(
+; CHECK-NEXT: ret void
+
+declare void @test_multiret_c(i1 zeroext %b)
+declare void @test_multiret_d(i1 zeroext %b)
+define internal void @test_multiret_b(i1 zeroext %b) {
+ br i1 %b, label %c, label %d
+c:
+ musttail call void @test_multiret_c(i1 zeroext %b)
+ ret void
+d:
+ musttail call void @test_multiret_d(i1 zeroext %b)
+ ret void
+}
+define void @test_multiret_a(i1 zeroext %b) {
+ musttail call void @test_multiret_b(i1 zeroext %b)
+ ret void
+}
+
+; We have to avoid bitcast chains.
+; CHECK: define i32* @test_retptr_a(
+; CHECK: musttail call i8* @test_retptr_c(
+; CHECK-NEXT: bitcast i8* {{.*}} to i32*
+; CHECK-NEXT: ret i32*
+
+declare i8* @test_retptr_c()
+define internal i16* @test_retptr_b() {
+ %rv = musttail call i8* @test_retptr_c()
+ %v = bitcast i8* %rv to i16*
+ ret i16* %v
+}
+define i32* @test_retptr_a() {
+ %rv = musttail call i16* @test_retptr_b()
+ %v = bitcast i16* %rv to i32*
+ ret i32* %v
+}
+
+; Combine the last two cases: multiple returns with pointer bitcasts.
+; CHECK: define i32* @test_multiptrret_a(
+; CHECK: musttail call i8* @test_multiptrret_c(
+; CHECK-NEXT: bitcast i8* {{.*}} to i32*
+; CHECK-NEXT: ret i32*
+; CHECK: musttail call i8* @test_multiptrret_d(
+; CHECK-NEXT: bitcast i8* {{.*}} to i32*
+; CHECK-NEXT: ret i32*
+
+declare i8* @test_multiptrret_c(i1 zeroext %b)
+declare i8* @test_multiptrret_d(i1 zeroext %b)
+define internal i16* @test_multiptrret_b(i1 zeroext %b) {
+ br i1 %b, label %c, label %d
+c:
+ %c_rv = musttail call i8* @test_multiptrret_c(i1 zeroext %b)
+ %c_v = bitcast i8* %c_rv to i16*
+ ret i16* %c_v
+d:
+ %d_rv = musttail call i8* @test_multiptrret_d(i1 zeroext %b)
+ %d_v = bitcast i8* %d_rv to i16*
+ ret i16* %d_v
+}
+define i32* @test_multiptrret_a(i1 zeroext %b) {
+ %rv = musttail call i16* @test_multiptrret_b(i1 zeroext %b)
+ %v = bitcast i16* %rv to i32*
+ ret i32* %v
+}
+
+; Inline a musttail call site which contains a normal return and a musttail call.
+; CHECK: define i32 @test_mixedret_a(
+; CHECK: br i1 %b
+; CHECK: musttail call i32 @test_mixedret_c(
+; CHECK-NEXT: ret i32
+; CHECK: call i32 @test_mixedret_d(i1 zeroext %b)
+; CHECK: add i32 1,
+; CHECK-NOT: br
+; CHECK: ret i32
+
+declare i32 @test_mixedret_c(i1 zeroext %b)
+declare i32 @test_mixedret_d(i1 zeroext %b)
+define internal i32 @test_mixedret_b(i1 zeroext %b) {
+ br i1 %b, label %c, label %d
+c:
+ %c_rv = musttail call i32 @test_mixedret_c(i1 zeroext %b)
+ ret i32 %c_rv
+d:
+ %d_rv = call i32 @test_mixedret_d(i1 zeroext %b)
+ %d_rv1 = add i32 1, %d_rv
+ ret i32 %d_rv1
+}
+define i32 @test_mixedret_a(i1 zeroext %b) {
+ %rv = musttail call i32 @test_mixedret_b(i1 zeroext %b)
+ ret i32 %rv
+}
diff --git a/test/Transforms/Inline/inline-vla.ll b/test/Transforms/Inline/inline-vla.ll
new file mode 100644
index 000000000000..dc9deaafe743
--- /dev/null
+++ b/test/Transforms/Inline/inline-vla.ll
@@ -0,0 +1,38 @@
+; RUN: opt -S -inline %s -o - | FileCheck %s
+
+; Check that memcpy2 is completely inlined away.
+; CHECK-NOT: memcpy2
+
+@.str = private unnamed_addr constant [2 x i8] c"a\00", align 1
+@.str1 = private unnamed_addr constant [3 x i8] c"ab\00", align 1
+
+; Function Attrs: nounwind ssp uwtable
+define i32 @main(i32 %argc, i8** nocapture readnone %argv) #0 {
+entry:
+ %data = alloca [2 x i8], align 1
+ %arraydecay = getelementptr inbounds [2 x i8]* %data, i64 0, i64 0
+ call fastcc void @memcpy2(i8* %arraydecay, i8* getelementptr inbounds ([2 x i8]* @.str, i64 0, i64 0), i64 1)
+ call fastcc void @memcpy2(i8* %arraydecay, i8* getelementptr inbounds ([3 x i8]* @.str1, i64 0, i64 0), i64 2)
+ ret i32 0
+}
+
+; Function Attrs: inlinehint nounwind ssp uwtable
+define internal fastcc void @memcpy2(i8* nocapture %dst, i8* nocapture readonly %src, i64 %size) #1 {
+entry:
+ %vla = alloca i64, i64 %size, align 16
+ %0 = bitcast i64* %vla to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %src, i64 %size, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %0, i64 %size, i32 1, i1 false)
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #2
+
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { inlinehint nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 (trunk 205695) (llvm/trunk 205706)"}
diff --git a/test/Transforms/Inline/inline_invoke.ll b/test/Transforms/Inline/inline_invoke.ll
index c3941388f937..c53bb5aa17be 100644
--- a/test/Transforms/Inline/inline_invoke.ll
+++ b/test/Transforms/Inline/inline_invoke.ll
@@ -96,7 +96,6 @@ eh.resume:
; CHECK: landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
; CHECK-NEXT: cleanup
; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
; CHECK-NEXT: invoke void @_ZN1AD1Ev(%struct.A* [[A]])
; CHECK-NEXT: to label %[[LBL:[^\s]+]] unwind
; CHECK: [[LBL]]:
@@ -167,7 +166,6 @@ eh.resume:
; CHECK-NEXT: [[LPADVAL1:%.*]] = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
; CHECK-NEXT: cleanup
; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
; CHECK-NEXT: invoke void @_ZN1AD1Ev(%struct.A* [[A1]])
; CHECK-NEXT: to label %[[RESUME1:[^\s]+]] unwind
; CHECK: [[RESUME1]]:
@@ -187,7 +185,6 @@ eh.resume:
; CHECK-NEXT: [[LPADVAL2:%.*]] = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
; CHECK-NEXT: cleanup
; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
; CHECK-NEXT: invoke void @_ZN1AD1Ev(%struct.A* [[A2]])
; CHECK-NEXT: to label %[[RESUME2:[^\s]+]] unwind
; CHECK: [[RESUME2]]:
@@ -275,7 +272,6 @@ lpad.cont:
; CHECK: landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
; CHECK-NEXT: cleanup
; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
; CHECK-NEXT: invoke void @_ZN1AD1Ev(
; CHECK-NEXT: to label %[[L:[^\s]+]] unwind
; CHECK: [[L]]:
@@ -322,7 +318,6 @@ terminate:
; CHECK: landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
; CHECK-NEXT: cleanup
; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
; CHECK-NEXT: invoke void @_ZN1AD1Ev(
; CHECK-NEXT: to label %[[L:[^\s]+]] unwind
; CHECK: [[L]]:
diff --git a/test/Transforms/Inline/inline_returns_twice.ll b/test/Transforms/Inline/inline_returns_twice.ll
index 678ee82f4b84..36042640cc00 100644
--- a/test/Transforms/Inline/inline_returns_twice.ll
+++ b/test/Transforms/Inline/inline_returns_twice.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -inline -S | FileCheck %s
; Check that functions with "returns_twice" calls are only inlined,
-; if they are themselve marked as such.
+; if they are themselves marked as such.
declare i32 @a() returns_twice
diff --git a/test/Transforms/Inline/invoke-cleanup.ll b/test/Transforms/Inline/invoke-cleanup.ll
new file mode 100644
index 000000000000..457ae2addeb3
--- /dev/null
+++ b/test/Transforms/Inline/invoke-cleanup.ll
@@ -0,0 +1,39 @@
+; RUN: opt %s -inline -S | FileCheck %s
+
+declare void @external_func()
+
+@exception_type1 = external global i8
+@exception_type2 = external global i8
+
+
+define internal void @inner() {
+ invoke void @external_func()
+ to label %cont unwind label %lpad
+cont:
+ ret void
+lpad:
+ %lp = landingpad i32 personality i8* null
+ catch i8* @exception_type1
+ resume i32 %lp
+}
+
+; Test that the "cleanup" clause is kept when inlining @inner() into
+; this call site (PR17872), otherwise C++ destructors will not be
+; called when they should be.
+
+define void @outer() {
+ invoke void @inner()
+ to label %cont unwind label %lpad
+cont:
+ ret void
+lpad:
+ %lp = landingpad i32 personality i8* null
+ cleanup
+ catch i8* @exception_type2
+ resume i32 %lp
+}
+; CHECK: define void @outer
+; CHECK: landingpad
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: catch i8* @exception_type1
+; CHECK-NEXT: catch i8* @exception_type2
diff --git a/test/Transforms/Inline/invoke-combine-clauses.ll b/test/Transforms/Inline/invoke-combine-clauses.ll
new file mode 100644
index 000000000000..5f06039b9edc
--- /dev/null
+++ b/test/Transforms/Inline/invoke-combine-clauses.ll
@@ -0,0 +1,117 @@
+; RUN: opt %s -inline -S | FileCheck %s
+
+declare void @external_func()
+declare void @abort()
+
+@exception_inner = external global i8
+@exception_outer = external global i8
+@condition = external global i1
+
+
+; Check for a bug in which multiple "resume" instructions in the
+; inlined function caused "catch i8* @exception_outer" to appear
+; multiple times in the resulting landingpad.
+
+define internal void @inner_multiple_resume() {
+ invoke void @external_func()
+ to label %cont unwind label %lpad
+cont:
+ ret void
+lpad:
+ %lp = landingpad i32 personality i8* null
+ catch i8* @exception_inner
+ %cond = load i1* @condition
+ br i1 %cond, label %resume1, label %resume2
+resume1:
+ resume i32 1
+resume2:
+ resume i32 2
+}
+
+define void @outer_multiple_resume() {
+ invoke void @inner_multiple_resume()
+ to label %cont unwind label %lpad
+cont:
+ ret void
+lpad:
+ %lp = landingpad i32 personality i8* null
+ catch i8* @exception_outer
+ resume i32 %lp
+}
+; CHECK: define void @outer_multiple_resume()
+; CHECK: %lp.i = landingpad
+; CHECK-NEXT: catch i8* @exception_inner
+; CHECK-NEXT: catch i8* @exception_outer
+; Check that there isn't another "catch" clause:
+; CHECK-NEXT: load
+
+
+; Check for a bug in which having a "resume" and a "call" in the
+; inlined function caused "catch i8* @exception_outer" to appear
+; multiple times in the resulting landingpad.
+
+define internal void @inner_resume_and_call() {
+ call void @external_func()
+ invoke void @external_func()
+ to label %cont unwind label %lpad
+cont:
+ ret void
+lpad:
+ %lp = landingpad i32 personality i8* null
+ catch i8* @exception_inner
+ resume i32 %lp
+}
+
+define void @outer_resume_and_call() {
+ invoke void @inner_resume_and_call()
+ to label %cont unwind label %lpad
+cont:
+ ret void
+lpad:
+ %lp = landingpad i32 personality i8* null
+ catch i8* @exception_outer
+ resume i32 %lp
+}
+; CHECK: define void @outer_resume_and_call()
+; CHECK: %lp.i = landingpad
+; CHECK-NEXT: catch i8* @exception_inner
+; CHECK-NEXT: catch i8* @exception_outer
+; Check that there isn't another "catch" clause:
+; CHECK-NEXT: br
+
+
+; Check what happens if the inlined function contains an "invoke" but
+; no "resume". In this case, the inlined landingpad does not need to
+; include the "catch i8* @exception_outer" clause from the outer
+; function (since the outer function's landingpad will not be
+; reachable), but it's OK to include this clause.
+
+define internal void @inner_no_resume_or_call() {
+ invoke void @external_func()
+ to label %cont unwind label %lpad
+cont:
+ ret void
+lpad:
+ %lp = landingpad i32 personality i8* null
+ catch i8* @exception_inner
+ ; A landingpad might have no "resume" if a C++ destructor aborts.
+ call void @abort() noreturn nounwind
+ unreachable
+}
+
+define void @outer_no_resume_or_call() {
+ invoke void @inner_no_resume_or_call()
+ to label %cont unwind label %lpad
+cont:
+ ret void
+lpad:
+ %lp = landingpad i32 personality i8* null
+ catch i8* @exception_outer
+ resume i32 %lp
+}
+; CHECK: define void @outer_no_resume_or_call()
+; CHECK: %lp.i = landingpad
+; CHECK-NEXT: catch i8* @exception_inner
+; CHECK-NEXT: catch i8* @exception_outer
+; Check that there isn't another "catch" clause:
+; CHECK-NEXT: call void @abort()
diff --git a/test/Transforms/Inline/null-function.ll b/test/Transforms/Inline/null-function.ll
new file mode 100644
index 000000000000..2aecfa85cd8f
--- /dev/null
+++ b/test/Transforms/Inline/null-function.ll
@@ -0,0 +1,9 @@
+; RUN: opt -print-before=always-inline -always-inline < %s -o /dev/null 2>&1 | FileCheck %s
+
+define i32 @main() #0 {
+entry:
+ ret i32 0
+}
+
+; CHECK: *** IR Dump Before Inliner for always_inline functions ***
+; CHECK: Printing <null> Function
diff --git a/test/Transforms/Inline/optimization-remarks.ll b/test/Transforms/Inline/optimization-remarks.ll
new file mode 100644
index 000000000000..9108f3ab14d6
--- /dev/null
+++ b/test/Transforms/Inline/optimization-remarks.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -inline -pass-remarks=inline -pass-remarks-missed=inline -pass-remarks-analysis=inline -S 2>&1 | FileCheck %s
+
+; CHECK: foo should always be inlined (cost=always)
+; CHECK: foo inlined into bar
+; CHECK: foz should never be inlined (cost=never)
+; CHECK: foz will not be inlined into bar
+
+; Function Attrs: alwaysinline nounwind uwtable
+define i32 @foo(i32 %x, i32 %y) #0 {
+entry:
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ store i32 %y, i32* %y.addr, align 4
+ %0 = load i32* %x.addr, align 4
+ %1 = load i32* %y.addr, align 4
+ %add = add nsw i32 %0, %1
+ ret i32 %add
+}
+
+; Function Attrs: noinline nounwind uwtable
+define float @foz(i32 %x, i32 %y) #1 {
+entry:
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ store i32 %y, i32* %y.addr, align 4
+ %0 = load i32* %x.addr, align 4
+ %1 = load i32* %y.addr, align 4
+ %mul = mul nsw i32 %0, %1
+ %conv = sitofp i32 %mul to float
+ ret float %conv
+}
+
+; Function Attrs: nounwind uwtable
+define i32 @bar(i32 %j) #2 {
+entry:
+ %j.addr = alloca i32, align 4
+ store i32 %j, i32* %j.addr, align 4
+ %0 = load i32* %j.addr, align 4
+ %1 = load i32* %j.addr, align 4
+ %sub = sub nsw i32 %1, 2
+ %call = call i32 @foo(i32 %0, i32 %sub)
+ %conv = sitofp i32 %call to float
+ %2 = load i32* %j.addr, align 4
+ %sub1 = sub nsw i32 %2, 2
+ %3 = load i32* %j.addr, align 4
+ %call2 = call float @foz(i32 %sub1, i32 %3)
+ %mul = fmul float %conv, %call2
+ %conv3 = fptosi float %mul to i32
+ ret i32 %conv3
+}
+
+attributes #0 = { alwaysinline nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { noinline nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 "}
diff --git a/test/Transforms/Inline/ptr-diff.ll b/test/Transforms/Inline/ptr-diff.ll
index af42bc7cedeb..46c3bcd4dc53 100644
--- a/test/Transforms/Inline/ptr-diff.ll
+++ b/test/Transforms/Inline/ptr-diff.ll
@@ -31,7 +31,7 @@ else:
define i32 @outer2(i32* %ptr) {
; Test that an inbounds GEP disables this -- it isn't safe in general as
-; wrapping changes the behavior of lessthan and greaterthan comparisions.
+; wrapping changes the behavior of lessthan and greaterthan comparisons.
; CHECK-LABEL: @outer2(
; CHECK: call i32 @inner2
; CHECK: ret i32
diff --git a/test/Transforms/Inline/switch.ll b/test/Transforms/Inline/switch.ll
new file mode 100644
index 000000000000..c5dab53e8b64
--- /dev/null
+++ b/test/Transforms/Inline/switch.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -inline -inline-threshold=20 -S | FileCheck %s
+
+define i32 @callee(i32 %a) {
+ switch i32 %a, label %sw.default [
+ i32 0, label %sw.bb0
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ i32 4, label %sw.bb4
+ i32 5, label %sw.bb5
+ i32 6, label %sw.bb6
+ i32 7, label %sw.bb7
+ i32 8, label %sw.bb8
+ i32 9, label %sw.bb9
+ ]
+
+sw.default:
+ br label %return
+
+sw.bb0:
+ br label %return
+
+sw.bb1:
+ br label %return
+
+sw.bb2:
+ br label %return
+
+sw.bb3:
+ br label %return
+
+sw.bb4:
+ br label %return
+
+sw.bb5:
+ br label %return
+
+sw.bb6:
+ br label %return
+
+sw.bb7:
+ br label %return
+
+sw.bb8:
+ br label %return
+
+sw.bb9:
+ br label %return
+
+return:
+ ret i32 42
+}
+
+define i32 @caller(i32 %a) {
+; CHECK-LABEL: @caller(
+; CHECK: call i32 @callee(
+
+ %result = call i32 @callee(i32 %a)
+ ret i32 %result
+}
diff --git a/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll b/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
index c27fe0ab6a6d..7f9bd9e40dcb 100644
--- a/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
+++ b/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
@@ -3,7 +3,9 @@
@__gthrw_pthread_cancel = alias weak i32 (i32)* @pthread_cancel ; <i32 (i32)*> [#uses=1]
@__gthread_active_ptr.5335 = internal constant i8* bitcast (i32 (i32)* @__gthrw_pthread_cancel to i8*) ; <i8**> [#uses=1]
-declare extern_weak i32 @pthread_cancel(i32)
+define weak i32 @pthread_cancel(i32) {
+ ret i32 0
+}
define i1 @__gthread_active_p() {
entry:
diff --git a/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll b/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll
index 23ee12ba754f..c7cef752dcc9 100644
--- a/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll
+++ b/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll
@@ -3,7 +3,9 @@
@A = alias weak void ()* @B ; <void ()*> [#uses=1]
-declare extern_weak void @B()
+define weak void @B() {
+ ret void
+}
define i32 @active() {
entry:
diff --git a/test/Transforms/InstCombine/2010-03-03-ExtElim.ll b/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
index b1384ec00209..e0def997d974 100644
--- a/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
+++ b/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
@@ -22,11 +22,11 @@ define i1 @PR6486() nounwind {
define i1 @PR16462_1() nounwind {
; CHECK-LABEL: @PR16462_1(
ret i1 icmp sgt (i32 sext (i16 trunc (i32 select (i1 icmp eq (i32* getelementptr inbounds ([1 x i32]* @a, i32 0, i32 0), i32* @d), i32 0, i32 1) to i16) to i32), i32 65535)
-; CHECK: ret i1 icmp sgt (i32 sext (i16 trunc (i32 select (i1 icmp eq (i32* getelementptr inbounds ([1 x i32]* @a, i32 0, i32 0), i32* @d), i32 0, i32 1) to i16) to i32), i32 65535)
+; CHECK: ret i1 false
}
define i1 @PR16462_2() nounwind {
; CHECK-LABEL: @PR16462_2(
ret i1 icmp sgt (i32 sext (i16 trunc (i32 select (i1 icmp eq (i32* getelementptr inbounds ([1 x i32]* @a, i32 0, i32 0), i32* @d), i32 0, i32 1) to i16) to i32), i32 42)
-; CHECK: ret i1 icmp sgt (i16 trunc (i32 select (i1 icmp eq (i32* getelementptr inbounds ([1 x i32]* @a, i32 0, i32 0), i32* @d), i32 0, i32 1) to i16), i16 42)
+; CHECK: ret i1 false
}
diff --git a/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll b/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll
index 2dedd44e2be1..39408a2d394c 100644
--- a/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll
+++ b/test/Transforms/InstCombine/2012-04-23-Neon-Intrinsics.ll
@@ -1,6 +1,3 @@
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
-target triple = "thumbv7-apple-ios0"
-
; RUN: opt -S -instcombine < %s | FileCheck %s
define <4 x i32> @mulByZero(<4 x i16> %x) nounwind readnone ssp {
@@ -67,6 +64,72 @@ entry:
declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+; ARM64 variants - <rdar://problem/12349617>
+
+define <4 x i32> @mulByZeroARM64(<4 x i16> %x) nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %x, <4 x i16> zeroinitializer) nounwind
+ ret <4 x i32> %a
+; CHECK: entry:
+; CHECK-NEXT: ret <4 x i32> zeroinitializer
+}
+
+define <4 x i32> @mulByOneARM64(<4 x i16> %x) nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %x, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ ret <4 x i32> %a
+; CHECK: entry:
+; CHECK-NEXT: %a = sext <4 x i16> %x to <4 x i32>
+; CHECK-NEXT: ret <4 x i32> %a
+}
+
+define <4 x i32> @constantMulARM64() nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
+ ret <4 x i32> %a
+; CHECK: entry:
+; CHECK-NEXT: ret <4 x i32> <i32 6, i32 6, i32 6, i32 6>
+}
+
+define <4 x i32> @constantMulSARM64() nounwind readnone ssp {
+entry:
+ %b = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ ret <4 x i32> %b
+; CHECK: entry:
+; CHECK-NEXT: ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+}
+
+define <4 x i32> @constantMulUARM64() nounwind readnone ssp {
+entry:
+ %b = tail call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> <i16 1, i16 1, i16 1, i16 1>) nounwind
+ ret <4 x i32> %b
+; CHECK: entry:
+; CHECK-NEXT: ret <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
+}
+
+define <4 x i32> @complex1ARM64(<4 x i16> %x) nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) nounwind
+ %b = add <4 x i32> zeroinitializer, %a
+ ret <4 x i32> %b
+; CHECK: entry:
+; CHECK-NEXT: %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 2, i16 2, i16 2, i16 2>, <4 x i16> %x) [[NUW:#[0-9]+]]
+; CHECK-NEXT: ret <4 x i32> %a
+}
+
+define <4 x i32> @complex2ARM64(<4 x i32> %x) nounwind readnone ssp {
+entry:
+ %a = tail call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> <i16 3, i16 3, i16 3, i16 3>, <4 x i16> <i16 2, i16 2, i16 2, i16 2>) nounwind
+ %b = add <4 x i32> %x, %a
+ ret <4 x i32> %b
+; CHECK: entry:
+; CHECK-NEXT: %b = add <4 x i32> %x, <i32 6, i32 6, i32 6, i32 6>
+; CHECK-NEXT: ret <4 x i32> %b
+}
+
+declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
+
; CHECK: attributes #0 = { nounwind readnone ssp }
; CHECK: attributes #1 = { nounwind readnone }
; CHECK: attributes [[NUW]] = { nounwind }
diff --git a/test/Transforms/InstCombine/2012-07-30-addrsp-bitcast.ll b/test/Transforms/InstCombine/2012-07-30-addrsp-bitcast.ll
index 4d185bf7e06e..ac9c555020d0 100644
--- a/test/Transforms/InstCombine/2012-07-30-addrsp-bitcast.ll
+++ b/test/Transforms/InstCombine/2012-07-30-addrsp-bitcast.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
; CHECK: addrspacecast
-@base = internal addrspace(3) unnamed_addr global [16 x i32] zeroinitializer, align 16
+@base = internal unnamed_addr addrspace(3) global [16 x i32] zeroinitializer, align 16
declare void @foo(i32*)
define void @test() nounwind {
diff --git a/test/Transforms/InstCombine/AddOverFlow.ll b/test/Transforms/InstCombine/AddOverFlow.ll
new file mode 100644
index 000000000000..8f3d429c8d60
--- /dev/null
+++ b/test/Transforms/InstCombine/AddOverFlow.ll
@@ -0,0 +1,118 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; CHECK-LABEL: @oppositesign
+; CHECK: add nsw i16 %a, %b
+define i16 @oppositesign(i16 %x, i16 %y) {
+; %a is negative, %b is positive
+ %a = or i16 %x, 32768
+ %b = and i16 %y, 32767
+ %c = add i16 %a, %b
+ ret i16 %c
+}
+
+define i16 @zero_sign_bit(i16 %a) {
+; CHECK-LABEL: @zero_sign_bit(
+; CHECK-NEXT: and
+; CHECK-NEXT: add nuw
+; CHECK-NEXT: ret
+ %1 = and i16 %a, 32767
+ %2 = add i16 %1, 512
+ ret i16 %2
+}
+
+define i16 @zero_sign_bit2(i16 %a, i16 %b) {
+; CHECK-LABEL: @zero_sign_bit2(
+; CHECK-NEXT: and
+; CHECK-NEXT: and
+; CHECK-NEXT: add nuw
+; CHECK-NEXT: ret
+ %1 = and i16 %a, 32767
+ %2 = and i16 %b, 32767
+ %3 = add i16 %1, %2
+ ret i16 %3
+}
+
+declare i16 @bounded(i16 %input);
+declare i32 @__gxx_personality_v0(...);
+!0 = metadata !{i16 0, i16 32768} ; [0, 32767]
+!1 = metadata !{i16 0, i16 32769} ; [0, 32768]
+
+define i16 @add_bounded_values(i16 %a, i16 %b) {
+; CHECK-LABEL: @add_bounded_values(
+entry:
+ %c = call i16 @bounded(i16 %a), !range !0
+ %d = invoke i16 @bounded(i16 %b) to label %cont unwind label %lpad, !range !0
+cont:
+; %c and %d are in [0, 32767]. Therefore, %c + %d doesn't unsigned overflow.
+ %e = add i16 %c, %d
+; CHECK: add nuw i16 %c, %d
+ ret i16 %e
+lpad:
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ filter [0 x i8*] zeroinitializer
+ ret i16 42
+}
+
+define i16 @add_bounded_values_2(i16 %a, i16 %b) {
+; CHECK-LABEL: @add_bounded_values_2(
+entry:
+ %c = call i16 @bounded(i16 %a), !range !1
+ %d = invoke i16 @bounded(i16 %b) to label %cont unwind label %lpad, !range !1
+cont:
+; Similar to add_bounded_values, but %c and %d are in [0, 32768]. Therefore,
+; %c + %d may unsigned overflow and we cannot add NUW.
+ %e = add i16 %c, %d
+; CHECK: add i16 %c, %d
+ ret i16 %e
+lpad:
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ filter [0 x i8*] zeroinitializer
+ ret i16 42
+}
+
+; CHECK-LABEL: @ripple_nsw1
+; CHECK: add nsw i16 %a, %b
+define i16 @ripple_nsw1(i16 %x, i16 %y) {
+; %a has at most one bit set
+ %a = and i16 %y, 1
+
+; %b has a 0 bit other than the sign bit
+ %b = and i16 %x, 49151
+
+ %c = add i16 %a, %b
+ ret i16 %c
+}
+
+; Like the previous test, but flip %a and %b
+; CHECK-LABEL: @ripple_nsw2
+; CHECK: add nsw i16 %b, %a
+define i16 @ripple_nsw2(i16 %x, i16 %y) {
+ %a = and i16 %y, 1
+ %b = and i16 %x, 49151
+ %c = add i16 %b, %a
+ ret i16 %c
+}
+
+; CHECK-LABEL: @ripple_no_nsw1
+; CHECK: add i32 %a, %x
+define i32 @ripple_no_nsw1(i32 %x, i32 %y) {
+; We know nothing about %x
+ %a = and i32 %y, 1
+ %b = add i32 %a, %x
+ ret i32 %b
+}
+
+; CHECK-LABEL: @ripple_no_nsw2
+; CHECK: add nuw i16 %a, %b
+define i16 @ripple_no_nsw2(i16 %x, i16 %y) {
+; %a has at most one bit set
+ %a = and i16 %y, 1
+
+; %b has a 0 bit, but it is the sign bit
+ %b = and i16 %x, 32767
+
+ %c = add i16 %a, %b
+ ret i16 %c
+}
diff --git a/test/Transforms/InstCombine/OverlappingInsertvalues.ll b/test/Transforms/InstCombine/OverlappingInsertvalues.ll
new file mode 100644
index 000000000000..9248aecdf575
--- /dev/null
+++ b/test/Transforms/InstCombine/OverlappingInsertvalues.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Check that we can find and remove redundant insertvalues
+; CHECK-LABEL: foo_simple
+; CHECK-NOT: i8* %x, 0
+define { i8*, i64, i32 } @foo_simple(i8* %x, i8* %y) nounwind {
+entry:
+ %0 = insertvalue { i8*, i64, i32 } undef, i8* %x, 0
+ %1 = insertvalue { i8*, i64, i32 } %0, i8* %y, 0
+ ret { i8*, i64, i32 } %1
+}
+; Check that we can find and remove redundant nodes in insertvalues chain
+; CHECK-LABEL: foo_ovwrt_chain
+; CHECK-NOT: i64 %y, 1
+; CHECK-NOT: i32 555, 2
+define { i8*, i64, i32 } @foo_ovwrt_chain(i8* %x, i64 %y, i64 %z) nounwind {
+entry:
+ %0 = insertvalue { i8*, i64, i32 } undef, i8* %x, 0
+ %1 = insertvalue { i8*, i64, i32 } %0, i64 %y, 1
+ %2 = insertvalue { i8*, i64, i32 } %1, i32 555, 2
+ %3 = insertvalue { i8*, i64, i32 } %2, i64 %z, 1
+ %4 = insertvalue { i8*, i64, i32 } %3, i32 777, 2
+ ret { i8*, i64, i32 } %4
+}
+; Check that we propagate insertvalues only if they are use as the first
+; operand (as initial value of aggregate)
+; CHECK-LABEL: foo_use_as_second_operand
+; CHECK: i16 %x, 0
+; CHECK: %0, 1
+define { i8, {i16, i32} } @foo_use_as_second_operand(i16 %x) nounwind {
+entry:
+ %0 = insertvalue { i16, i32 } undef, i16 %x, 0
+ %1 = insertvalue { i8, {i16, i32} } undef, { i16, i32 } %0, 1
+ ret { i8, {i16, i32} } %1
+}
diff --git a/test/Transforms/InstCombine/abs_abs.ll b/test/Transforms/InstCombine/abs_abs.ll
new file mode 100644
index 000000000000..de10fd180a79
--- /dev/null
+++ b/test/Transforms/InstCombine/abs_abs.ll
@@ -0,0 +1,961 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @abs_abs_x01(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x01(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x02(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x02(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x03(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x03(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x04(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x04(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x05(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x05(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x06(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x06(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x07(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x07(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x08(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x08(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x09(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x09(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x10(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x10(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x11(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x11(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x12(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x12(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x13(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x13(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x14(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x14(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x15(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x15(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_abs_x16(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_abs_x16(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x01(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x01(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x02(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x02(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x03(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x03(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x04(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x04(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x05(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x05(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x06(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x06(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x07(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x07(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x08(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x08(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x09(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x09(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x10(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x10(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x11(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x11(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x12(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x12(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x13(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x13(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x14(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x14(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x15(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x15(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_nabs_x16(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_nabs_x16(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x01(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x01(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x02(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x02(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x03(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x03(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x04(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x04(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x05(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x05(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x06(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x06(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x07(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x07(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x08(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x08(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x09(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x09(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x10(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x10(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x11(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x11(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x12(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x12(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x13(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x13(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x14(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x14(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x15(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x15(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @abs_nabs_x16(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @abs_nabs_x16(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x01(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x01(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x02(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x02(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x03(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x03(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x04(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, -1
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x04(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x05(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x05(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x06(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x06(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x07(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x07(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x08(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp sgt i32 %cond, 0
+ %sub9 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %sub9, i32 %cond
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x08(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x09(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x09(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x10(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x10(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x11(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x11(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x12(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 0
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x12(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x13(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x13(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, -1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x14(i32 %x) {
+ %cmp = icmp sgt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %x, i32 %sub
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x14(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[NEG]], i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x15(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x15(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 0
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+define i32 @nabs_abs_x16(i32 %x) {
+ %cmp = icmp slt i32 %x, 1
+ %sub = sub nsw i32 0, %x
+ %cond = select i1 %cmp, i32 %sub, i32 %x
+ %cmp1 = icmp slt i32 %cond, 1
+ %sub16 = sub nsw i32 0, %cond
+ %cond18 = select i1 %cmp1, i32 %cond, i32 %sub16
+ ret i32 %cond18
+; CHECK-LABEL: @nabs_abs_x16(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 1
+; CHECK-NEXT: [[NEG:%[a-z0-9]+]] = sub nsw i32 0, %x
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 %x, i32 [[NEG]]
+; CHECK-NEXT: ret i32 [[SEL]]
+} \ No newline at end of file
diff --git a/test/Transforms/InstCombine/add-shrink.ll b/test/Transforms/InstCombine/add-shrink.ll
index 3edb392ed184..67a990fcb105 100644
--- a/test/Transforms/InstCombine/add-shrink.ll
+++ b/test/Transforms/InstCombine/add-shrink.ll
@@ -1,9 +1,11 @@
-; RUN: opt < %s -instcombine -S | grep "add nsw i32"
-; RUN: opt < %s -instcombine -S | grep sext | count 1
-
-; Should only have one sext and the add should be i32 instead of i64.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; CHECK-LABEL: define i64 @test
define i64 @test1(i32 %A) {
+; CHECK: %[[ADD:.*]] = add nsw i32 %B, %C
+; CHECK: %F = sext i32 %[[ADD]] to i64
+; CHECK: ret i64 %F
+
%B = ashr i32 %A, 7 ; <i32> [#uses=1]
%C = ashr i32 %A, 9 ; <i32> [#uses=1]
%D = sext i32 %B to i64 ; <i64> [#uses=1]
diff --git a/test/Transforms/InstCombine/add-sitofp.ll b/test/Transforms/InstCombine/add-sitofp.ll
index 40edf7114a06..3b5485e00528 100644
--- a/test/Transforms/InstCombine/add-sitofp.ll
+++ b/test/Transforms/InstCombine/add-sitofp.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S | grep "add nsw i32"
+; RUN: opt < %s -instcombine -S | grep "add nuw nsw i32"
define double @x(i32 %a, i32 %b) nounwind {
%m = lshr i32 %a, 24
diff --git a/test/Transforms/InstCombine/add2.ll b/test/Transforms/InstCombine/add2.ll
index 0964bc00d1cb..d7eac4b0fd21 100644
--- a/test/Transforms/InstCombine/add2.ll
+++ b/test/Transforms/InstCombine/add2.ll
@@ -41,3 +41,275 @@ define i32 @test4(i32 %A) {
; CHECK-NEXT: ret i32 %B
}
+define <2 x i1> @test5(<2 x i1> %A, <2 x i1> %B) {
+ %add = add <2 x i1> %A, %B
+ ret <2 x i1> %add
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: %add = xor <2 x i1> %A, %B
+; CHECK-NEXT: ret <2 x i1> %add
+}
+
+define <2 x i64> @test6(<2 x i64> %A) {
+ %shl = shl <2 x i64> %A, <i64 2, i64 3>
+ %add = add <2 x i64> %shl, %A
+ ret <2 x i64> %add
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: %add = mul <2 x i64> %A, <i64 5, i64 9>
+; CHECK-NEXT: ret <2 x i64> %add
+}
+
+define <2 x i64> @test7(<2 x i64> %A) {
+ %shl = shl <2 x i64> %A, <i64 2, i64 3>
+ %mul = mul <2 x i64> %A, <i64 3, i64 4>
+ %add = add <2 x i64> %shl, %mul
+ ret <2 x i64> %add
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: %add = mul <2 x i64> %A, <i64 7, i64 12>
+; CHECK-NEXT: ret <2 x i64> %add
+}
+
+define <2 x i64> @test8(<2 x i64> %A) {
+ %xor = xor <2 x i64> %A, <i64 -1, i64 -1>
+ %add = add <2 x i64> %xor, <i64 2, i64 3>
+ ret <2 x i64> %add
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: %add = sub <2 x i64> <i64 1, i64 2>, %A
+; CHECK-NEXT: ret <2 x i64> %add
+}
+
+define i16 @test9(i16 %a) {
+ %b = mul i16 %a, 2
+ %c = mul i16 %a, 32767
+ %d = add i16 %b, %c
+ ret i16 %d
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: %d = mul i16 %a, -32767
+; CHECK-NEXT: ret i16 %d
+}
+
+; y + (~((x >> 3) & 0x55555555) + 1) -> y - ((x >> 3) & 0x55555555)
+define i32 @test10(i32 %x, i32 %y) {
+ %shr = ashr i32 %x, 3
+ %shr.not = or i32 %shr, -1431655766
+ %neg = xor i32 %shr.not, 1431655765
+ %add = add i32 %y, 1
+ %add1 = add i32 %add, %neg
+ ret i32 %add1
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: [[SHR:%[a-z0-9]+]] = ashr i32 %x, 3
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[SHR]], 1431655765
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+; y + (~(x & 0x55555555) + 1) -> y - (x & 0x55555555)
+define i32 @test11(i32 %x, i32 %y) {
+ %x.not = or i32 %x, -1431655766
+ %neg = xor i32 %x.not, 1431655765
+ %add = add i32 %y, 1
+ %add1 = add i32 %add, %neg
+ ret i32 %add1
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, 1431655765
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+; (y + 1) + ~(x & 0x55555555) -> y - (x & 0x55555555)
+define i32 @test12(i32 %x, i32 %y) {
+ %add = add nsw i32 %y, 1
+ %x.not = or i32 %x, -1431655766
+ %neg = xor i32 %x.not, 1431655765
+ %add1 = add nsw i32 %add, %neg
+ ret i32 %add1
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, 1431655765
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+; y + (~(x & 0x55555556) + 1) -> y - (x & 0x55555556)
+define i32 @test13(i32 %x, i32 %y) {
+ %x.not = or i32 %x, -1431655767
+ %neg = xor i32 %x.not, 1431655766
+ %add = add i32 %y, 1
+ %add1 = add i32 %add, %neg
+ ret i32 %add1
+; CHECK-LABEL: @test13(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, 1431655766
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+; (y + 1) + ~(x & 0x55555556) -> y - (x & 0x55555556)
+define i32 @test14(i32 %x, i32 %y) {
+ %add = add nsw i32 %y, 1
+ %x.not = or i32 %x, -1431655767
+ %neg = xor i32 %x.not, 1431655766
+ %add1 = add nsw i32 %add, %neg
+ ret i32 %add1
+; CHECK-LABEL: @test14(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, 1431655766
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+; y + (~(x | 0x55555556) + 1) -> y - (x | 0x55555556)
+define i32 @test15(i32 %x, i32 %y) {
+ %x.not = and i32 %x, -1431655767
+ %neg = xor i32 %x.not, -1431655767
+ %add = add i32 %y, 1
+ %add1 = add i32 %add, %neg
+ ret i32 %add1
+; CHECK-LABEL: @test15(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = or i32 %x, 1431655766
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+; (y + 1) + ~(x | 0x55555556) -> y - (x | 0x555555556)
+define i32 @test16(i32 %x, i32 %y) {
+ %add = add nsw i32 %y, 1
+ %x.not = and i32 %x, -1431655767
+ %neg = xor i32 %x.not, -1431655767
+ %add1 = add nsw i32 %add, %neg
+ ret i32 %add1
+; CHECK-LABEL: @test16(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = or i32 %x, 1431655766
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+; y + (~(x | 0x55555555) + 1) -> y - (x | 0x55555555)
+define i32 @test17(i32 %x, i32 %y) {
+ %x.not = and i32 %x, -1431655766
+ %add2 = xor i32 %x.not, -1431655765
+ %add1 = add nsw i32 %add2, %y
+ ret i32 %add1
+; CHECK-LABEL: @test17(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = or i32 %x, 1431655765
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+; (y + 1) + ~(x | 0x55555555) -> y - (x | 0x55555555)
+define i32 @test18(i32 %x, i32 %y) {
+ %add = add nsw i32 %y, 1
+ %x.not = and i32 %x, -1431655766
+ %neg = xor i32 %x.not, -1431655766
+ %add1 = add nsw i32 %add, %neg
+ ret i32 %add1
+; CHECK-LABEL: @test18(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = or i32 %x, 1431655765
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[AND]]
+; CHECK-NEXT: ret i32 [[SUB]]
+}
+
+define i16 @add_nsw_mul_nsw(i16 %x) {
+ %add1 = add nsw i16 %x, %x
+ %add2 = add nsw i16 %add1, %x
+ ret i16 %add2
+; CHECK-LABEL: @add_nsw_mul_nsw(
+; CHECK-NEXT: %add2 = mul nsw i16 %x, 3
+; CHECK-NEXT: ret i16 %add2
+}
+
+define i16 @mul_add_to_mul_1(i16 %x) {
+ %mul1 = mul nsw i16 %x, 8
+ %add2 = add nsw i16 %x, %mul1
+ ret i16 %add2
+; CHECK-LABEL: @mul_add_to_mul_1(
+; CHECK-NEXT: %add2 = mul nsw i16 %x, 9
+; CHECK-NEXT: ret i16 %add2
+}
+
+define i16 @mul_add_to_mul_2(i16 %x) {
+ %mul1 = mul nsw i16 %x, 8
+ %add2 = add nsw i16 %mul1, %x
+ ret i16 %add2
+; CHECK-LABEL: @mul_add_to_mul_2(
+; CHECK-NEXT: %add2 = mul nsw i16 %x, 9
+; CHECK-NEXT: ret i16 %add2
+}
+
+define i16 @mul_add_to_mul_3(i16 %a) {
+ %mul1 = mul i16 %a, 2
+ %mul2 = mul i16 %a, 3
+ %add = add nsw i16 %mul1, %mul2
+ ret i16 %add
+; CHECK-LABEL: @mul_add_to_mul_3(
+; CHECK-NEXT: %add = mul i16 %a, 5
+; CHECK-NEXT: ret i16 %add
+}
+
+define i16 @mul_add_to_mul_4(i16 %a) {
+ %mul1 = mul nsw i16 %a, 2
+ %mul2 = mul nsw i16 %a, 7
+ %add = add nsw i16 %mul1, %mul2
+ ret i16 %add
+; CHECK-LABEL: @mul_add_to_mul_4(
+; CHECK-NEXT: %add = mul nsw i16 %a, 9
+; CHECK-NEXT: ret i16 %add
+}
+
+define i16 @mul_add_to_mul_5(i16 %a) {
+ %mul1 = mul nsw i16 %a, 3
+ %mul2 = mul nsw i16 %a, 7
+ %add = add nsw i16 %mul1, %mul2
+ ret i16 %add
+; CHECK-LABEL: @mul_add_to_mul_5(
+; CHECK-NEXT: %add = mul nsw i16 %a, 10
+; CHECK-NEXT: ret i16 %add
+}
+
+define i32 @mul_add_to_mul_6(i32 %x, i32 %y) {
+ %mul1 = mul nsw i32 %x, %y
+ %mul2 = mul nsw i32 %mul1, 5
+ %add = add nsw i32 %mul1, %mul2
+ ret i32 %add
+; CHECK-LABEL: @mul_add_to_mul_6(
+; CHECK-NEXT: %mul1 = mul nsw i32 %x, %y
+; CHECK-NEXT: %add = mul nsw i32 %mul1, 6
+; CHECK-NEXT: ret i32 %add
+}
+
+; This test and the next test verify that when a range metadata is attached to
+; llvm.cttz, ValueTracking correctly intersects the range specified by the
+; metadata and the range implied by the intrinsic.
+;
+; In this test, the range specified by the metadata is more strict. Therefore,
+; ValueTracking uses that range.
+define i16 @add_cttz(i16 %a) {
+; CHECK-LABEL: @add_cttz(
+ ; llvm.cttz.i16(..., /*is_zero_undefined=*/true) implies the value returned
+ ; is in [0, 16). The range metadata indicates the value returned is in [0, 8).
+ ; Intersecting these ranges, we know the value returned is in [0, 8).
+ ; Therefore, InstCombine will transform
+ ; add %cttz, 1111 1111 1111 1000 ; decimal -8
+ ; to
+ ; or %cttz, 1111 1111 1111 1000
+ %cttz = call i16 @llvm.cttz.i16(i16 %a, i1 true), !range !0
+ %b = add i16 %cttz, -8
+; CHECK: or i16 %cttz, -8
+ ret i16 %b
+}
+declare i16 @llvm.cttz.i16(i16, i1)
+!0 = metadata !{i16 0, i16 8}
+
+; Similar to @add_cttz, but in this test, the range implied by the
+; intrinsic is more strict. Therefore, ValueTracking uses that range.
+define i16 @add_cttz_2(i16 %a) {
+; CHECK-LABEL: @add_cttz_2(
+ ; llvm.cttz.i16(..., /*is_zero_undefined=*/true) implies the value returned
+ ; is in [0, 16). The range metadata indicates the value returned is in
+ ; [0, 32). Intersecting these ranges, we know the value returned is in
+ ; [0, 16). Therefore, InstCombine will transform
+ ; add %cttz, 1111 1111 1111 0000 ; decimal -16
+ ; to
+ ; or %cttz, 1111 1111 1111 0000
+ %cttz = call i16 @llvm.cttz.i16(i16 %a, i1 true), !range !1
+ %b = add i16 %cttz, -16
+; CHECK: or i16 %cttz, -16
+ ret i16 %b
+}
+!1 = metadata !{i16 0, i16 32}
diff --git a/test/Transforms/InstCombine/add4.ll b/test/Transforms/InstCombine/add4.ll
index 208c7f03200e..f9b7e3b5a079 100644
--- a/test/Transforms/InstCombine/add4.ll
+++ b/test/Transforms/InstCombine/add4.ll
@@ -77,3 +77,26 @@ define float @test7(float %A, float %B, i32 %C) {
; CHECK: uitofp
}
+define <4 x float> @test8(<4 x float> %A, <4 x float> %B, <4 x i1> %C) {
+ ;; B*(uitofp i1 C) + A*(1 - uitofp i1 C) -> select C, A, B
+ %cf = uitofp <4 x i1> %C to <4 x float>
+ %mc = fsub fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %cf
+ %p1 = fmul fast <4 x float> %A, %mc
+ %p2 = fmul fast <4 x float> %B, %cf
+ %s1 = fadd fast <4 x float> %p2, %p1
+ ret <4 x float> %s1
+; CHECK-LABEL: @test8(
+; CHECK: select <4 x i1> %C, <4 x float> %B, <4 x float> %A
+}
+
+define <4 x float> @test9(<4 x float> %A, <4 x float> %B, <4 x i1> %C) {
+ ;; A*(1 - uitofp i1 C) + B*(uitofp i1 C) -> select C, A, B
+ %cf = uitofp <4 x i1> %C to <4 x float>
+ %mc = fsub fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %cf
+ %p1 = fmul fast <4 x float> %A, %mc
+ %p2 = fmul fast <4 x float> %B, %cf
+ %s1 = fadd fast <4 x float> %p1, %p2
+ ret <4 x float> %s1
+; CHECK-LABEL: @test9
+; CHECK: select <4 x i1> %C, <4 x float> %B, <4 x float> %A
+}
diff --git a/test/Transforms/InstCombine/addrspacecast.ll b/test/Transforms/InstCombine/addrspacecast.ll
index d908b556e195..c1684361f99f 100644
--- a/test/Transforms/InstCombine/addrspacecast.ll
+++ b/test/Transforms/InstCombine/addrspacecast.ll
@@ -28,13 +28,91 @@ define <4 x i32*> @combine_redundant_addrspacecast_vector(<4 x i32 addrspace(1)*
define float* @combine_redundant_addrspacecast_types(i32 addrspace(1)* %x) nounwind {
; CHECK-LABEL: @combine_redundant_addrspacecast_types(
-; CHECK: addrspacecast i32 addrspace(1)* %x to float*
+; CHECK-NEXT: bitcast i32 addrspace(1)* %x to float addrspace(1)*
+; CHECK-NEXT: addrspacecast float addrspace(1)* %1 to float*
; CHECK-NEXT: ret
%y = addrspacecast i32 addrspace(1)* %x to i32 addrspace(3)*
%z = addrspacecast i32 addrspace(3)* %y to float*
ret float* %z
}
+define <4 x float*> @combine_redundant_addrspacecast_types_vector(<4 x i32 addrspace(1)*> %x) nounwind {
+; CHECK-LABEL: @combine_redundant_addrspacecast_types_vector(
+; CHECK-NEXT: bitcast <4 x i32 addrspace(1)*> %x to <4 x float addrspace(1)*>
+; CHECK-NEXT: addrspacecast <4 x float addrspace(1)*> %1 to <4 x float*>
+; CHECK-NEXT: ret
+ %y = addrspacecast <4 x i32 addrspace(1)*> %x to <4 x i32 addrspace(3)*>
+ %z = addrspacecast <4 x i32 addrspace(3)*> %y to <4 x float*>
+ ret <4 x float*> %z
+}
+
+define float addrspace(2)* @combine_addrspacecast_bitcast_1(i32 addrspace(1)* %x) nounwind {
+; CHECK-LABEL: @combine_addrspacecast_bitcast_1(
+; CHECK-NEXT: bitcast i32 addrspace(1)* %x to float addrspace(1)*
+; CHECK-NEXT: addrspacecast float addrspace(1)* %1 to float addrspace(2)*
+; CHECK-NEXT: ret
+ %y = addrspacecast i32 addrspace(1)* %x to i32 addrspace(2)*
+ %z = bitcast i32 addrspace(2)* %y to float addrspace(2)*
+ ret float addrspace(2)* %z
+}
+
+define i32 addrspace(2)* @combine_addrspacecast_bitcast_2(i32 addrspace(1)* %x) nounwind {
+; CHECK-LABEL: @combine_addrspacecast_bitcast_2(
+; CHECK: addrspacecast i32 addrspace(1)* %x to i32 addrspace(2)*
+; CHECK-NEXT: ret
+ %y = addrspacecast i32 addrspace(1)* %x to float addrspace(2)*
+ %z = bitcast float addrspace(2)* %y to i32 addrspace(2)*
+ ret i32 addrspace(2)* %z
+}
+
+define i32 addrspace(2)* @combine_bitcast_addrspacecast_1(i32 addrspace(1)* %x) nounwind {
+; CHECK-LABEL: @combine_bitcast_addrspacecast_1(
+; CHECK: addrspacecast i32 addrspace(1)* %x to i32 addrspace(2)*
+; CHECK-NEXT: ret
+ %y = bitcast i32 addrspace(1)* %x to i8 addrspace(1)*
+ %z = addrspacecast i8 addrspace(1)* %y to i32 addrspace(2)*
+ ret i32 addrspace(2)* %z
+}
+
+define float addrspace(2)* @combine_bitcast_addrspacecast_2(i32 addrspace(1)* %x) nounwind {
+; CHECK-LABEL: @combine_bitcast_addrspacecast_2(
+; CHECK: bitcast i32 addrspace(1)* %x to float addrspace(1)*
+; CHECK: addrspacecast float addrspace(1)* %1 to float addrspace(2)*
+; CHECK-NEXT: ret
+ %y = bitcast i32 addrspace(1)* %x to i8 addrspace(1)*
+ %z = addrspacecast i8 addrspace(1)* %y to float addrspace(2)*
+ ret float addrspace(2)* %z
+}
+
+define float addrspace(2)* @combine_addrspacecast_types(i32 addrspace(1)* %x) nounwind {
+; CHECK-LABEL: @combine_addrspacecast_types(
+; CHECK-NEXT: bitcast i32 addrspace(1)* %x to float addrspace(1)*
+; CHECK-NEXT: addrspacecast float addrspace(1)* %1 to float addrspace(2)*
+; CHECK-NEXT: ret
+ %y = addrspacecast i32 addrspace(1)* %x to float addrspace(2)*
+ ret float addrspace(2)* %y
+}
+
+define <4 x float addrspace(2)*> @combine_addrspacecast_types_vector(<4 x i32 addrspace(1)*> %x) nounwind {
+; CHECK-LABEL: @combine_addrspacecast_types_vector(
+; CHECK-NEXT: bitcast <4 x i32 addrspace(1)*> %x to <4 x float addrspace(1)*>
+; CHECK-NEXT: addrspacecast <4 x float addrspace(1)*> %1 to <4 x float addrspace(2)*>
+; CHECK-NEXT: ret
+ %y = addrspacecast <4 x i32 addrspace(1)*> %x to <4 x float addrspace(2)*>
+ ret <4 x float addrspace(2)*> %y
+}
+
+define i32 @canonicalize_addrspacecast([16 x i32] addrspace(1)* %arr) {
+; CHECK-LABEL: @canonicalize_addrspacecast(
+; CHECK-NEXT: getelementptr inbounds [16 x i32] addrspace(1)* %arr, i32 0, i32 0
+; CHECK-NEXT: addrspacecast i32 addrspace(1)* %{{[a-zA-Z0-9]+}} to i32*
+; CHECK-NEXT: load i32*
+; CHECK-NEXT: ret i32
+ %p = addrspacecast [16 x i32] addrspace(1)* %arr to i32*
+ %v = load i32* %p
+ ret i32 %v
+}
+
@const_array = addrspace(2) constant [60 x i8] [i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
diff --git a/test/Transforms/InstCombine/align-2d-gep.ll b/test/Transforms/InstCombine/align-2d-gep.ll
index 5bca46d5a21d..f6a877684ce4 100644
--- a/test/Transforms/InstCombine/align-2d-gep.ll
+++ b/test/Transforms/InstCombine/align-2d-gep.ll
@@ -31,7 +31,7 @@ bb1:
store <2 x double><double 0.0, double 0.0>, <2 x double>* %r, align 8
%indvar.next = add i64 %j, 2
- %exitcond = icmp eq i64 %indvar.next, 557
+ %exitcond = icmp eq i64 %indvar.next, 556
br i1 %exitcond, label %bb11, label %bb1
bb11:
diff --git a/test/Transforms/InstCombine/alloca.ll b/test/Transforms/InstCombine/alloca.ll
index ae1cfa1ed2fc..6d0c131c51ed 100644
--- a/test/Transforms/InstCombine/alloca.ll
+++ b/test/Transforms/InstCombine/alloca.ll
@@ -129,3 +129,24 @@ define void @test8() {
call void (...)* @use(i32* %x)
ret void
}
+
+; PR19569
+%struct_type = type { i32, i32 }
+declare void @test9_aux(<{ %struct_type }>* inalloca)
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
+
+define void @test9(%struct_type* %a) {
+; CHECK-LABEL: @test9(
+entry:
+ %inalloca.save = call i8* @llvm.stacksave()
+ %argmem = alloca inalloca <{ %struct_type }>
+; CHECK: alloca inalloca i64, align 8
+ %0 = getelementptr inbounds <{ %struct_type }>* %argmem, i32 0, i32 0
+ %1 = bitcast %struct_type* %0 to i8*
+ %2 = bitcast %struct_type* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 8, i32 4, i1 false)
+ call void @test9_aux(<{ %struct_type }>* inalloca %argmem)
+ call void @llvm.stackrestore(i8* %inalloca.save)
+ ret void
+}
diff --git a/test/Transforms/InstCombine/ashr-nop.ll b/test/Transforms/InstCombine/ashr-nop.ll
deleted file mode 100644
index 870ede38cd88..000000000000
--- a/test/Transforms/InstCombine/ashr-nop.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: opt < %s -instcombine -S | not grep ashr
-
-define i32 @foo(i32 %x) {
- %o = and i32 %x, 1
- %n = add i32 %o, -1
- %t = ashr i32 %n, 17
- ret i32 %t
-}
diff --git a/test/Transforms/InstCombine/bitcast-store.ll b/test/Transforms/InstCombine/bitcast-store.ll
index e4a61e98e4f5..e46b5c82d9ff 100644
--- a/test/Transforms/InstCombine/bitcast-store.ll
+++ b/test/Transforms/InstCombine/bitcast-store.ll
@@ -3,14 +3,14 @@
; Instcombine should preserve metadata and alignment while
; folding a bitcast into a store.
-; CHECK: store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*]* @G, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 16, !tag !0
-
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
%struct.A = type { i32 (...)** }
@G = external constant [5 x i8*]
+; CHECK-LABEL: @foo
+; CHECK: store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*]* @G, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 16, !tag !0
define void @foo(%struct.A* %a) nounwind {
entry:
%0 = bitcast %struct.A* %a to i8***
@@ -18,4 +18,18 @@ entry:
ret void
}
+; Check instcombine doesn't try and fold the following bitcast into the store.
+; This transformation would not be safe since we would need to use addrspacecast
+; and addrspacecast is not guaranteed to be a no-op cast.
+
+; CHECK-LABEL: @bar
+; CHECK: %cast = bitcast i8** %b to i8 addrspace(1)**
+; CHECK: store i8 addrspace(1)* %a, i8 addrspace(1)** %cast
+define void @bar(i8 addrspace(1)* %a, i8** %b) nounwind {
+entry:
+ %cast = bitcast i8** %b to i8 addrspace(1)**
+ store i8 addrspace(1)* %a, i8 addrspace(1)** %cast
+ ret void
+}
+
!0 = metadata !{metadata !"hello"}
diff --git a/test/Transforms/InstCombine/blend_x86.ll b/test/Transforms/InstCombine/blend_x86.ll
new file mode 100644
index 000000000000..778d44ba342c
--- /dev/null
+++ b/test/Transforms/InstCombine/blend_x86.ll
@@ -0,0 +1,55 @@
+; RUN: opt < %s -instcombine -mtriple=x86_64-apple-macosx -mcpu=core-avx2 -S | FileCheck %s
+
+define <2 x double> @constant_blendvpd(<2 x double> %xy, <2 x double> %ab) {
+; CHECK-LABEL: @constant_blendvpd
+; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x double> %ab, <2 x double> %xy
+ %1 = tail call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %xy, <2 x double> %ab, <2 x double> <double 0xFFFFFFFFE0000000, double 0.000000e+00>)
+ ret <2 x double> %1
+}
+
+define <4 x float> @constant_blendvps(<4 x float> %xyzw, <4 x float> %abcd) {
+; CHECK-LABEL: @constant_blendvps
+; CHECK: select <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> %abcd, <4 x float> %xyzw
+ %1 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %xyzw, <4 x float> %abcd, <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0xFFFFFFFFE0000000>)
+ ret <4 x float> %1
+}
+
+define <16 x i8> @constant_pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd) {
+; CHECK-LABEL: @constant_pblendvb
+; CHECK: select <16 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x i8> %abcd, <16 x i8> %xyzw
+ %1 = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd, <16 x i8> <i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0>)
+ ret <16 x i8> %1
+}
+
+define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) {
+; CHECK-LABEL: @constant_blendvpd_avx
+; CHECK: select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %ab, <4 x double> %xy
+ %1 = tail call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %xy, <4 x double> %ab, <4 x double> <double 0xFFFFFFFFE0000000, double 0.000000e+00, double 0xFFFFFFFFE0000000, double 0.000000e+00>)
+ ret <4 x double> %1
+}
+
+define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) {
+; CHECK-LABEL: @constant_blendvps_avx
+; CHECK: select <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true>, <8 x float> %abcd, <8 x float> %xyzw
+ %1 = tail call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %xyzw, <8 x float> %abcd, <8 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0xFFFFFFFFE0000000>)
+ ret <8 x float> %1
+}
+
+define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
+; CHECK-LABEL: @constant_pblendvb_avx2
+; CHECK: select <32 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <32 x i8> %abcd, <32 x i8> %xyzw
+ %1 = tail call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %xyzw, <32 x i8> %abcd,
+ <32 x i8> <i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0,
+ i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0,
+ i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0,
+ i8 0, i8 0, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0>)
+ ret <32 x i8> %1
+}
+
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
+declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)
+declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>)
+
+declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>)
+declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>)
+declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>)
diff --git a/test/Transforms/InstCombine/call-cast-target-inalloca.ll b/test/Transforms/InstCombine/call-cast-target-inalloca.ll
new file mode 100644
index 000000000000..90289e2468f8
--- /dev/null
+++ b/test/Transforms/InstCombine/call-cast-target-inalloca.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+
+declare void @takes_i32(i32)
+declare void @takes_i32_inalloca(i32* inalloca)
+
+define void @f() {
+; CHECK-LABEL: define void @f()
+ %args = alloca inalloca i32
+ call void bitcast (void (i32)* @takes_i32 to void (i32*)*)(i32* inalloca %args)
+; CHECK: call void bitcast
+ ret void
+}
+
+define void @g() {
+; CHECK-LABEL: define void @g()
+ call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
+; CHECK: call void bitcast
+ ret void
+}
diff --git a/test/Transforms/InstCombine/call-cast-target.ll b/test/Transforms/InstCombine/call-cast-target.ll
index 315c51683fd2..1af3317a398b 100644
--- a/test/Transforms/InstCombine/call-cast-target.ll
+++ b/test/Transforms/InstCombine/call-cast-target.ll
@@ -13,3 +13,15 @@ entry:
declare i8* @ctime(i32*)
+define internal { i8 } @foo(i32*) {
+entry:
+ ret { i8 } { i8 0 }
+}
+
+define void @test_struct_ret() {
+; CHECK-LABEL: @test_struct_ret
+; CHECK-NOT: bitcast
+entry:
+ %0 = call { i8 } bitcast ({ i8 } (i32*)* @foo to { i8 } (i16*)*)(i16* null)
+ ret void
+}
diff --git a/test/Transforms/InstCombine/cast-call-combine.ll b/test/Transforms/InstCombine/cast-call-combine.ll
new file mode 100644
index 000000000000..be70a8763ea8
--- /dev/null
+++ b/test/Transforms/InstCombine/cast-call-combine.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -always-inline -instcombine -S | FileCheck %s
+
+define internal void @foo(i16*) alwaysinline {
+ ret void
+}
+
+define void @bar() noinline noreturn {
+ unreachable
+}
+
+define void @test() {
+ br i1 false, label %then, label %else
+
+then:
+ call void @bar()
+ unreachable
+
+else:
+ ; CHECK-NOT: call
+ call void bitcast (void (i16*)* @foo to void (i8*)*) (i8* null)
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/cast-set.ll b/test/Transforms/InstCombine/cast-set.ll
index 893440424c4c..47ba920d9286 100644
--- a/test/Transforms/InstCombine/cast-set.ll
+++ b/test/Transforms/InstCombine/cast-set.ll
@@ -10,6 +10,7 @@ define i1 @test1(i32 %X) {
; Convert to setne int %X, 12
%c = icmp ne i32 %A, 12 ; <i1> [#uses=1]
ret i1 %c
+; CHECK-LABEL @test1(
; CHECK: %c = icmp ne i32 %X, 12
; CHECK: ret i1 %c
}
@@ -20,6 +21,7 @@ define i1 @test2(i32 %X, i32 %Y) {
; Convert to setne int %X, %Y
%c = icmp ne i32 %A, %B ; <i1> [#uses=1]
ret i1 %c
+; CHECK-LABEL @test2(
; CHECK: %c = icmp ne i32 %X, %Y
; CHECK: ret i1 %c
}
@@ -29,6 +31,7 @@ define i32 @test4(i32 %A) {
%C = shl i32 %B, 2 ; <i32> [#uses=1]
%D = bitcast i32 %C to i32 ; <i32> [#uses=1]
ret i32 %D
+; CHECK-LABEL: @test4(
; CHECK: %C = shl i32 %A, 2
; CHECK: ret i32 %C
}
@@ -38,6 +41,7 @@ define i16 @test5(i16 %A) {
%C = and i32 %B, 15 ; <i32> [#uses=1]
%D = trunc i32 %C to i16 ; <i16> [#uses=1]
ret i16 %D
+; CHECK-LABEL: @test5(
; CHECK: %C = and i16 %A, 15
; CHECK: ret i16 %C
}
@@ -46,6 +50,7 @@ define i1 @test6(i1 %A) {
%B = zext i1 %A to i32 ; <i32> [#uses=1]
%C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
ret i1 %C
+; CHECK-LABEL: @test6(
; CHECK: ret i1 %A
}
@@ -53,6 +58,7 @@ define i1 @test6a(i1 %A) {
%B = zext i1 %A to i32 ; <i32> [#uses=1]
%C = icmp ne i32 %B, -1 ; <i1> [#uses=1]
ret i1 %C
+; CHECK-LABEL: @test6a(
; CHECK: ret i1 true
}
@@ -60,6 +66,7 @@ define i1 @test7(i8* %A) {
%B = bitcast i8* %A to i32* ; <i32*> [#uses=1]
%C = icmp eq i32* %B, null ; <i1> [#uses=1]
ret i1 %C
+; CHECK-LABEL: @test7(
; CHECK: %C = icmp eq i8* %A, null
; CHECK: ret i1 %C
}
diff --git a/test/Transforms/InstCombine/cast.ll b/test/Transforms/InstCombine/cast.ll
index cac0ec109163..0cbfbb071bb7 100644
--- a/test/Transforms/InstCombine/cast.ll
+++ b/test/Transforms/InstCombine/cast.ll
@@ -1,6 +1,6 @@
; Tests to make sure elimination of casts is working correctly
; RUN: opt < %s -instcombine -S | FileCheck %s
-target datalayout = "E-p:64:64:64-p1:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n8:16:32:64"
+target datalayout = "E-p:64:64:64-p1:32:32:32-p2:64:64:64-p3:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n8:16:32:64"
@inbuf = external global [32832 x i8] ; <[32832 x i8]*> [#uses=1]
@@ -370,7 +370,7 @@ define zeroext i64 @test43(i8 zeroext %on_off) nounwind readonly {
ret i64 %C ;; Should be (add (zext i8 -> i64), -1)
; CHECK-LABEL: @test43(
; CHECK-NEXT: %A = zext i8 %on_off to i64
-; CHECK-NEXT: %B = add i64 %A, -1
+; CHECK-NEXT: %B = add nsw i64 %A, -1
; CHECK-NEXT: ret i64 %B
}
@@ -708,6 +708,34 @@ define %s @test68(%s *%p, i64 %i) {
; CHECK-NEXT: ret %s
}
+; addrspacecasts should be eliminated.
+define %s @test68_addrspacecast(%s* %p, i64 %i) {
+; CHECK-LABEL: @test68_addrspacecast(
+; CHECK-NEXT: getelementptr %s*
+; CHECK-NEXT: load %s*
+; CHECK-NEXT: ret %s
+ %o = mul i64 %i, 12
+ %q = addrspacecast %s* %p to i8 addrspace(2)*
+ %pp = getelementptr inbounds i8 addrspace(2)* %q, i64 %o
+ %r = addrspacecast i8 addrspace(2)* %pp to %s*
+ %l = load %s* %r
+ ret %s %l
+}
+
+define %s @test68_addrspacecast_2(%s* %p, i64 %i) {
+; CHECK-LABEL: @test68_addrspacecast_2(
+; CHECK-NEXT: getelementptr %s* %p
+; CHECK-NEXT: addrspacecast
+; CHECK-NEXT: load %s addrspace(1)*
+; CHECK-NEXT: ret %s
+ %o = mul i64 %i, 12
+ %q = addrspacecast %s* %p to i8 addrspace(2)*
+ %pp = getelementptr inbounds i8 addrspace(2)* %q, i64 %o
+ %r = addrspacecast i8 addrspace(2)* %pp to %s addrspace(1)*
+ %l = load %s addrspace(1)* %r
+ ret %s %l
+}
+
define %s @test68_as1(%s addrspace(1)* %p, i32 %i) {
; CHECK-LABEL: @test68_as1(
%o = mul i32 %i, 12
@@ -903,6 +931,33 @@ define double @test80([100 x double]* %p, i32 %i) {
; CHECK-NEXT: ret double
}
+define double @test80_addrspacecast([100 x double] addrspace(1)* %p, i32 %i) {
+; CHECK-LABEL: @test80_addrspacecast(
+; CHECK-NEXT: getelementptr [100 x double] addrspace(1)* %p
+; CHECK-NEXT: load double addrspace(1)*
+; CHECK-NEXT: ret double
+ %tmp = mul nsw i32 %i, 8
+ %q = addrspacecast [100 x double] addrspace(1)* %p to i8 addrspace(2)*
+ %pp = getelementptr i8 addrspace(2)* %q, i32 %tmp
+ %r = addrspacecast i8 addrspace(2)* %pp to double addrspace(1)*
+ %l = load double addrspace(1)* %r
+ ret double %l
+}
+
+define double @test80_addrspacecast_2([100 x double] addrspace(1)* %p, i32 %i) {
+; CHECK-LABEL: @test80_addrspacecast_2(
+; CHECK-NEXT: getelementptr [100 x double] addrspace(1)*
+; CHECK-NEXT: addrspacecast double addrspace(1)*
+; CHECK-NEXT: load double addrspace(3)*
+; CHECK-NEXT: ret double
+ %tmp = mul nsw i32 %i, 8
+ %q = addrspacecast [100 x double] addrspace(1)* %p to i8 addrspace(2)*
+ %pp = getelementptr i8 addrspace(2)* %q, i32 %tmp
+ %r = addrspacecast i8 addrspace(2)* %pp to double addrspace(3)*
+ %l = load double addrspace(3)* %r
+ ret double %l
+}
+
define double @test80_as1([100 x double] addrspace(1)* %p, i16 %i) {
; CHECK-LABEL: @test80_as1(
%tmp = mul nsw i16 %i, 8
diff --git a/test/Transforms/InstCombine/ceil.ll b/test/Transforms/InstCombine/ceil.ll
new file mode 100644
index 000000000000..9f965a3c34b4
--- /dev/null
+++ b/test/Transforms/InstCombine/ceil.ll
@@ -0,0 +1,56 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+declare float @llvm.ceil.f32(float) #0
+declare double @llvm.ceil.f64(double) #0
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>) #0
+
+; CHECK-LABEL: @constant_fold_ceil_f32_01
+; CHECK-NEXT: ret float 1.000000e+00
+define float @constant_fold_ceil_f32_01() #0 {
+ %x = call float @llvm.ceil.f32(float 1.00) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_ceil_f32_02
+; CHECK-NEXT: ret float 2.000000e+00
+define float @constant_fold_ceil_f32_02() #0 {
+ %x = call float @llvm.ceil.f32(float 1.25) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_ceil_f32_03
+; CHECK-NEXT: ret float -1.000000e+00
+define float @constant_fold_ceil_f32_03() #0 {
+ %x = call float @llvm.ceil.f32(float -1.25) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_ceil_v4f32_01
+; CHECK-NEXT: ret <4 x float> <float 1.000000e+00, float 2.000000e+00, float -1.000000e+00, float -1.000000e+00>
+define <4 x float> @constant_fold_ceil_v4f32_01() #0 {
+ %x = call <4 x float> @llvm.ceil.v4f32(<4 x float> <float 1.00, float 1.25, float -1.25, float -1.00>)
+ ret <4 x float> %x
+}
+
+; CHECK-LABEL: @constant_fold_ceil_f64_01
+; CHECK-NEXT: ret double 1.000000e+00
+define double @constant_fold_ceil_f64_01() #0 {
+ %x = call double @llvm.ceil.f64(double 1.0) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_ceil_f64_02
+; CHECK-NEXT: ret double 2.000000e+00
+define double @constant_fold_ceil_f64_02() #0 {
+ %x = call double @llvm.ceil.f64(double 1.3) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_ceil_f64_03
+; CHECK-NEXT: ret double -1.000000e+00
+define double @constant_fold_ceil_f64_03() #0 {
+ %x = call double @llvm.ceil.f64(double -1.75) #0
+ ret double %x
+}
+
+attributes #0 = { nounwind readnone }
diff --git a/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll b/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
index 9f21d5419b72..7fac78a40f56 100644
--- a/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
+++ b/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
@@ -230,3 +230,13 @@ define i32 @constant_through_array_as_ptrs() {
%b = load i32 addrspace(1)* %a, align 4
ret i32 %b
}
+
+@shared_mem = external addrspace(3) global [0 x i8]
+
+define float @canonicalize_addrspacecast(i32 %i) {
+; CHECK-LABEL: @canonicalize_addrspacecast
+; CHECK-NEXT: getelementptr inbounds float* addrspacecast (float addrspace(3)* bitcast ([0 x i8] addrspace(3)* @shared_mem to float addrspace(3)*) to float*), i32 %i
+ %p = getelementptr inbounds float* addrspacecast ([0 x i8] addrspace(3)* @shared_mem to float*), i32 %i
+ %v = load float* %p
+ ret float %v
+}
diff --git a/test/Transforms/InstCombine/constant-fold-math.ll b/test/Transforms/InstCombine/constant-fold-math.ll
new file mode 100644
index 000000000000..14377df37299
--- /dev/null
+++ b/test/Transforms/InstCombine/constant-fold-math.ll
@@ -0,0 +1,47 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+declare float @llvm.fma.f32(float, float, float) #0
+declare float @llvm.fmuladd.f32(float, float, float) #0
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #0
+
+declare double @llvm.fma.f64(double, double, double) #0
+declare double @llvm.fmuladd.f64(double, double, double) #0
+
+
+
+; CHECK-LABEL: @constant_fold_fma_f32
+; CHECK-NEXT: ret float 6.000000e+00
+define float @constant_fold_fma_f32() #0 {
+ %x = call float @llvm.fma.f32(float 1.0, float 2.0, float 4.0) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_fma_v4f32
+; CHECK-NEXT: ret <4 x float> <float 1.200000e+01, float 1.400000e+01, float 1.600000e+01, float 1.800000e+01>
+define <4 x float> @constant_fold_fma_v4f32() #0 {
+ %x = call <4 x float> @llvm.fma.v4f32(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>, <4 x float> <float 10.0, float 10.0, float 10.0, float 10.0>)
+ ret <4 x float> %x
+}
+
+; CHECK-LABEL: @constant_fold_fmuladd_f32
+; CHECK-NEXT: ret float 6.000000e+00
+define float @constant_fold_fmuladd_f32() #0 {
+ %x = call float @llvm.fmuladd.f32(float 1.0, float 2.0, float 4.0) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_fma_f64
+; CHECK-NEXT: ret double 6.000000e+00
+define double @constant_fold_fma_f64() #0 {
+ %x = call double @llvm.fma.f64(double 1.0, double 2.0, double 4.0) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_fmuladd_f64
+; CHECK-NEXT: ret double 6.000000e+00
+define double @constant_fold_fmuladd_f64() #0 {
+ %x = call double @llvm.fmuladd.f64(double 1.0, double 2.0, double 4.0) #0
+ ret double %x
+}
+
+attributes #0 = { nounwind readnone }
diff --git a/test/Transforms/InstCombine/copysign.ll b/test/Transforms/InstCombine/copysign.ll
new file mode 100644
index 000000000000..556b79999b02
--- /dev/null
+++ b/test/Transforms/InstCombine/copysign.ll
@@ -0,0 +1,49 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+declare float @llvm.copysign.f32(float, float) #0
+declare double @llvm.copysign.f64(double, double) #0
+
+; CHECK-LABEL: @constant_fold_copysign_f32_01
+; CHECK-NEXT: ret float -1.000000e+00
+define float @constant_fold_copysign_f32_01() #0 {
+ %x = call float @llvm.copysign.f32(float 1.0, float -2.0) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_copysign_f32_02
+; CHECK-NEXT: ret float 2.000000e+00
+define float @constant_fold_copysign_f32_02() #0 {
+ %x = call float @llvm.copysign.f32(float -2.0, float 1.0) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_copysign_f32_03
+; CHECK-NEXT: ret float -2.000000e+00
+define float @constant_fold_copysign_f32_03() #0 {
+ %x = call float @llvm.copysign.f32(float -2.0, float -1.0) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_copysign_f64_01
+; CHECK-NEXT: ret double -1.000000e+00
+define double @constant_fold_copysign_f64_01() #0 {
+ %x = call double @llvm.copysign.f64(double 1.0, double -2.0) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_copysign_f64_02
+; CHECK-NEXT: ret double 1.000000e+00
+define double @constant_fold_copysign_f64_02() #0 {
+ %x = call double @llvm.copysign.f64(double -1.0, double 2.0) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_copysign_f64_03
+; CHECK-NEXT: ret double -1.000000e+00
+define double @constant_fold_copysign_f64_03() #0 {
+ %x = call double @llvm.copysign.f64(double -1.0, double -2.0) #0
+ ret double %x
+}
+
+
+attributes #0 = { nounwind readnone }
diff --git a/test/Transforms/InstCombine/descale-zero.ll b/test/Transforms/InstCombine/descale-zero.ll
new file mode 100644
index 000000000000..7990fdb3eca3
--- /dev/null
+++ b/test/Transforms/InstCombine/descale-zero.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define internal i8* @descale_zero() {
+entry:
+; CHECK: load i16** inttoptr (i64 48 to i16**), align 16
+; CHECK-NEXT: bitcast i16*
+; CHECK-NEXT: ret i8*
+ %i16_ptr = load i16** inttoptr (i64 48 to i16**), align 16
+ %num = load i64* inttoptr (i64 64 to i64*), align 64
+ %num_times_2 = shl i64 %num, 1
+ %num_times_2_plus_4 = add i64 %num_times_2, 4
+ %i8_ptr = bitcast i16* %i16_ptr to i8*
+ %i8_ptr_num_times_2_plus_4 = getelementptr i8* %i8_ptr, i64 %num_times_2_plus_4
+ %num_times_neg2 = mul i64 %num, -2
+ %num_times_neg2_minus_4 = add i64 %num_times_neg2, -4
+ %addr = getelementptr i8* %i8_ptr_num_times_2_plus_4, i64 %num_times_neg2_minus_4
+ ret i8* %addr
+}
diff --git a/test/Transforms/InstCombine/distribute.ll b/test/Transforms/InstCombine/distribute.ll
new file mode 100644
index 000000000000..e6360f8ba64e
--- /dev/null
+++ b/test/Transforms/InstCombine/distribute.ll
@@ -0,0 +1,68 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @factorize(i32 %x, i32 %y) {
+; CHECK-LABEL: @factorize(
+; (X | 1) & (X | 2) -> X | (1 & 2) -> X
+ %l = or i32 %x, 1
+ %r = or i32 %x, 2
+ %z = and i32 %l, %r
+ ret i32 %z
+; CHECK: ret i32 %x
+}
+
+define i32 @factorize2(i32 %x) {
+; CHECK-LABEL: @factorize2(
+; 3*X - 2*X -> X
+ %l = mul i32 3, %x
+ %r = mul i32 2, %x
+ %z = sub i32 %l, %r
+ ret i32 %z
+; CHECK: ret i32 %x
+}
+
+define i32 @factorize3(i32 %x, i32 %a, i32 %b) {
+; CHECK-LABEL: @factorize3(
+; (X | (A|B)) & (X | B) -> X | ((A|B) & B) -> X | B
+ %aORb = or i32 %a, %b
+ %l = or i32 %x, %aORb
+ %r = or i32 %x, %b
+ %z = and i32 %l, %r
+ ret i32 %z
+; CHECK: %z = or i32 %b, %x
+; CHECK: ret i32 %z
+}
+
+define i32 @factorize4(i32 %x, i32 %y) {
+; CHECK-LABEL: @factorize4(
+; ((Y << 1) * X) - (X * Y) -> (X * (Y * 2 - Y)) -> (X * Y)
+ %sh = shl i32 %y, 1
+ %ml = mul i32 %sh, %x
+ %mr = mul i32 %x, %y
+ %s = sub i32 %ml, %mr
+ ret i32 %s
+; CHECK: %s = mul i32 %y, %x
+; CHECK: ret i32 %s
+}
+
+define i32 @factorize5(i32 %x, i32 %y) {
+; CHECK-LABEL: @factorize5(
+; ((Y * 2) * X) - (X * Y) -> (X * Y)
+ %sh = mul i32 %y, 2
+ %ml = mul i32 %sh, %x
+ %mr = mul i32 %x, %y
+ %s = sub i32 %ml, %mr
+ ret i32 %s
+; CHECK: %s = mul i32 %y, %x
+; CHECK: ret i32 %s
+}
+
+define i32 @expand(i32 %x) {
+; CHECK-LABEL: @expand(
+; ((X & 1) | 2) & 1 -> ((X & 1) & 1) | (2 & 1) -> (X & 1) | 0 -> X & 1
+ %a = and i32 %x, 1
+ %b = or i32 %a, 2
+ %c = and i32 %b, 1
+ ret i32 %c
+; CHECK: %a = and i32 %x, 1
+; CHECK: ret i32 %a
+}
diff --git a/test/Transforms/InstCombine/div.ll b/test/Transforms/InstCombine/div.ll
index f67fd1c51be3..9c7ba9b03059 100644
--- a/test/Transforms/InstCombine/div.ll
+++ b/test/Transforms/InstCombine/div.ll
@@ -131,4 +131,47 @@ define i32 @test15(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: ret i32
}
+define <2 x i64> @test16(<2 x i64> %x) nounwind {
+ %shr = lshr <2 x i64> %x, <i64 3, i64 5>
+ %div = udiv <2 x i64> %shr, <i64 4, i64 6>
+ ret <2 x i64> %div
+; CHECK-LABEL: @test16(
+; CHECK-NEXT: udiv <2 x i64> %x, <i64 32, i64 192>
+; CHECK-NEXT: ret <2 x i64>
+}
+
+define <2 x i64> @test17(<2 x i64> %x) nounwind {
+ %neg = sub nsw <2 x i64> zeroinitializer, %x
+ %div = sdiv <2 x i64> %neg, <i64 3, i64 4>
+ ret <2 x i64> %div
+; CHECK-LABEL: @test17(
+; CHECK-NEXT: sdiv <2 x i64> %x, <i64 -3, i64 -4>
+; CHECK-NEXT: ret <2 x i64>
+}
+
+define <2 x i64> @test18(<2 x i64> %x) nounwind {
+ %div = sdiv <2 x i64> %x, <i64 -1, i64 -1>
+ ret <2 x i64> %div
+; CHECK-LABEL: @test18(
+; CHECK-NEXT: sub <2 x i64> zeroinitializer, %x
+; CHECK-NEXT: ret <2 x i64>
+}
+define i32 @test19(i32 %x) {
+ %A = udiv i32 1, %x
+ ret i32 %A
+; CHECK-LABEL: @test19(
+; CHECK-NEXT: icmp eq i32 %x, 1
+; CHECK-NEXT: zext i1 %{{.*}} to i32
+; CHECK-NEXT ret i32
+}
+
+define i32 @test20(i32 %x) {
+ %A = sdiv i32 1, %x
+ ret i32 %A
+; CHECK-LABEL: @test20(
+; CHECK-NEXT: add i32 %x, 1
+; CHECK-NEXT: icmp ult i32 %{{.*}}, 3
+; CHECK-NEXT: select i1 %{{.*}}, i32 %x, i32 {{.*}}
+; CHECK-NEXT: ret i32
+}
diff --git a/test/Transforms/InstCombine/double-float-shrink-1.ll b/test/Transforms/InstCombine/double-float-shrink-1.ll
index 5cacb591e006..d958470f1baa 100644
--- a/test/Transforms/InstCombine/double-float-shrink-1.ll
+++ b/test/Transforms/InstCombine/double-float-shrink-1.ll
@@ -157,7 +157,10 @@ define float @exp10_test(float %f) nounwind readnone {
%call = call double @exp10(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
-; CHECK: call float @exp10f(float %f)
+; FIXME: Re-enable this when Linux allows transforming this again, or when we
+; can use builtin attributes to test the transform regardless of OS.
+; DISABLED-CHECK: call float @exp10f(float %f)
+; CHECK: call double @exp10(double %conv)
}
define double @exp10_test2(float %f) nounwind readnone {
diff --git a/test/Transforms/InstCombine/exp2-1.ll b/test/Transforms/InstCombine/exp2-1.ll
index 99fb9ecfd2b2..8e6a0e0d93f6 100644
--- a/test/Transforms/InstCombine/exp2-1.ll
+++ b/test/Transforms/InstCombine/exp2-1.ll
@@ -1,6 +1,7 @@
; Test that the exp2 library call simplifier works correctly.
;
; RUN: opt < %s -instcombine -S | FileCheck %s
+; RUN: opt < %s -instcombine -S -mtriple=i386-pc-win32 | FileCheck %s -check-prefix=CHECK-WIN
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -74,3 +75,26 @@ define float @test_simplify8(i8 zeroext %x) {
; CHECK: call float @ldexpf
ret float %ret
}
+
+declare double @llvm.exp2.f64(double)
+declare float @llvm.exp2.f32(float)
+
+define double @test_simplify9(i8 zeroext %x) {
+; CHECK-LABEL: @test_simplify9(
+; CHECK-WIN-LABEL: @test_simplify9(
+ %conv = uitofp i8 %x to double
+ %ret = call double @llvm.exp2.f64(double %conv)
+; CHECK: call double @ldexp
+; CHECK-WIN: call double @ldexp
+ ret double %ret
+}
+
+define float @test_simplify10(i8 zeroext %x) {
+; CHECK-LABEL: @test_simplify10(
+; CHECK-WIN-LABEL: @test_simplify10(
+ %conv = uitofp i8 %x to float
+ %ret = call float @llvm.exp2.f32(float %conv)
+; CHECK: call float @ldexpf
+; CHECK-WIN-NOT: call float @ldexpf
+ ret float %ret
+}
diff --git a/test/Transforms/InstCombine/fast-math.ll b/test/Transforms/InstCombine/fast-math.ll
index d8ba2a59ff5e..2ee4b0f2c381 100644
--- a/test/Transforms/InstCombine/fast-math.ll
+++ b/test/Transforms/InstCombine/fast-math.ll
@@ -140,6 +140,42 @@ define float @fold13(float %x) {
; CHECK: ret
}
+; -x + y => y - x
+define float @fold14(float %x, float %y) {
+ %neg = fsub fast float -0.0, %x
+ %add = fadd fast float %neg, %y
+ ret float %add
+; CHECK: fold14
+; CHECK: fsub fast float %y, %x
+; CHECK: ret
+}
+
+; x + -y => x - y
+define float @fold15(float %x, float %y) {
+ %neg = fsub fast float -0.0, %y
+ %add = fadd fast float %x, %neg
+ ret float %add
+; CHECK: fold15
+; CHECK: fsub fast float %x, %y
+; CHECK: ret
+}
+
+; (select X+Y, X-Y) => X + (select Y, -Y)
+define float @fold16(float %x, float %y) {
+ %cmp = fcmp ogt float %x, %y
+ %plus = fadd fast float %x, %y
+ %minus = fsub fast float %x, %y
+ %r = select i1 %cmp, float %plus, float %minus
+ ret float %r
+; CHECK: fold16
+; CHECK: fsub fast float
+; CHECK: select
+; CHECK: fadd fast float
+; CHECK: ret
+}
+
+
+
; =========================================================================
;
; Testing-cases about fmul begin
@@ -223,6 +259,14 @@ define float @fmul3(float %f1, float %f2) {
; CHECK: fmul fast float %f1, 3.000000e+00
}
+define <4 x float> @fmul3_vec(<4 x float> %f1, <4 x float> %f2) {
+ %t1 = fdiv <4 x float> %f1, <float 2.0e+3, float 3.0e+3, float 2.0e+3, float 1.0e+3>
+ %t3 = fmul fast <4 x float> %t1, <float 6.0e+3, float 6.0e+3, float 2.0e+3, float 1.0e+3>
+ ret <4 x float> %t3
+; CHECK-LABEL: @fmul3_vec(
+; CHECK: fmul fast <4 x float> %f1, <float 3.000000e+00, float 2.000000e+00, float 1.000000e+00, float 1.000000e+00>
+}
+
; Rule "X/C1 * C2 => X * (C2/C1) is not applicable if C2/C1 is either a special
; value of a denormal. The 0x3810000000000000 here take value FLT_MIN
;
@@ -309,6 +353,15 @@ define float @fdiv2(float %x) {
; CHECK: fmul fast float %x, 0x3FE0B21660000000
}
+define <2 x float> @fdiv2_vec(<2 x float> %x) {
+ %mul = fmul <2 x float> %x, <float 6.0, float 9.0>
+ %div1 = fdiv fast <2 x float> %mul, <float 2.0, float 3.0>
+ ret <2 x float> %div1
+
+; CHECK-LABEL: @fdiv2_vec(
+; CHECK: fmul fast <2 x float> %x, <float 3.000000e+00, float 3.000000e+00>
+}
+
; "X/C1 / C2 => X * (1/(C2*C1))" is disabled (for now) is C2/C1 is a denormal
;
define float @fdiv3(float %x) {
diff --git a/test/Transforms/InstCombine/fdiv.ll b/test/Transforms/InstCombine/fdiv.ll
index 1edbc5ecd60b..af6a2401a8fc 100644
--- a/test/Transforms/InstCombine/fdiv.ll
+++ b/test/Transforms/InstCombine/fdiv.ll
@@ -23,3 +23,29 @@ define float @test3(float %x) nounwind readnone ssp {
; CHECK-LABEL: @test3(
; CHECK-NEXT: fdiv float %x, 0x36A0000000000000
}
+
+define float @test4(float %x) nounwind readnone ssp {
+ %div = fdiv fast float %x, 8.0
+ ret float %div
+
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: fmul fast float %x, 1.250000e-01
+}
+
+define float @test5(float %x, float %y, float %z) nounwind readnone ssp {
+ %div1 = fdiv fast float %x, %y
+ %div2 = fdiv fast float %div1, %z
+ ret float %div2
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: fmul fast
+; CHECK-NEXT: fdiv fast
+}
+
+define float @test6(float %x, float %y, float %z) nounwind readnone ssp {
+ %div1 = fdiv fast float %x, %y
+ %div2 = fdiv fast float %z, %div1
+ ret float %div2
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: fmul fast
+; CHECK-NEXT: fdiv fast
+}
diff --git a/test/Transforms/InstCombine/ffs-1.ll b/test/Transforms/InstCombine/ffs-1.ll
index 1dec11da0eb0..c8763dc199a9 100644
--- a/test/Transforms/InstCombine/ffs-1.ll
+++ b/test/Transforms/InstCombine/ffs-1.ll
@@ -103,7 +103,7 @@ define i32 @test_simplify13(i32 %x) {
; CHECK-LABEL: @test_simplify13(
%ret = call i32 @ffs(i32 %x)
; CHECK-NEXT: [[CTTZ:%[a-z0-9]+]] = call i32 @llvm.cttz.i32(i32 %x, i1 false)
-; CHECK-NEXT: [[INC:%[a-z0-9]+]] = add i32 [[CTTZ]], 1
+; CHECK-NEXT: [[INC:%[a-z0-9]+]] = add nuw nsw i32 [[CTTZ]], 1
; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ne i32 %x, 0
; CHECK-NEXT: [[RET:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[INC]], i32 0
ret i32 %ret
@@ -114,7 +114,7 @@ define i32 @test_simplify14(i32 %x) {
; CHECK-LINUX-LABEL: @test_simplify14(
%ret = call i32 @ffsl(i32 %x)
; CHECK-LINUX-NEXT: [[CTTZ:%[a-z0-9]+]] = call i32 @llvm.cttz.i32(i32 %x, i1 false)
-; CHECK-LINUX-NEXT: [[INC:%[a-z0-9]+]] = add i32 [[CTTZ]], 1
+; CHECK-LINUX-NEXT: [[INC:%[a-z0-9]+]] = add nuw nsw i32 [[CTTZ]], 1
; CHECK-LINUX-NEXT: [[CMP:%[a-z0-9]+]] = icmp ne i32 %x, 0
; CHECK-LINUX-NEXT: [[RET:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[INC]], i32 0
ret i32 %ret
@@ -125,7 +125,7 @@ define i32 @test_simplify15(i64 %x) {
; CHECK-LINUX-LABEL: @test_simplify15(
%ret = call i32 @ffsll(i64 %x)
; CHECK-LINUX-NEXT: [[CTTZ:%[a-z0-9]+]] = call i64 @llvm.cttz.i64(i64 %x, i1 false)
-; CHECK-LINUX-NEXT: [[INC:%[a-z0-9]+]] = add i64 [[CTTZ]], 1
+; CHECK-LINUX-NEXT: [[INC:%[a-z0-9]+]] = add nuw nsw i64 [[CTTZ]], 1
; CHECK-LINUX-NEXT: [[TRUNC:%[a-z0-9]+]] = trunc i64 [[INC]] to i32
; CHECK-LINUX-NEXT: [[CMP:%[a-z0-9]+]] = icmp ne i64 %x, 0
; CHECK-LINUX-NEXT: [[RET:%[a-z0-9]+]] = select i1 [[CMP]], i32 [[TRUNC]], i32 0
diff --git a/test/Transforms/InstCombine/float-shrink-compare.ll b/test/Transforms/InstCombine/float-shrink-compare.ll
index 26f77a7f702b..e50046790da8 100644
--- a/test/Transforms/InstCombine/float-shrink-compare.ll
+++ b/test/Transforms/InstCombine/float-shrink-compare.ll
@@ -170,6 +170,58 @@ define i32 @test14(float %x, float %y) nounwind uwtable {
; CHECK-NEXT: fcmp oeq float %truncf, %y
}
+define i32 @test15(float %x, float %y, float %z) nounwind uwtable {
+ %1 = fpext float %x to double
+ %2 = fpext float %y to double
+ %3 = call double @fmin(double %1, double %2) nounwind
+ %4 = fpext float %z to double
+ %5 = fcmp oeq double %3, %4
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+; CHECK-LABEL: @test15(
+; CHECK-NEXT: %fminf = call float @fminf(float %x, float %y)
+; CHECK-NEXT: fcmp oeq float %fminf, %z
+}
+
+define i32 @test16(float %x, float %y, float %z) nounwind uwtable {
+ %1 = fpext float %z to double
+ %2 = fpext float %x to double
+ %3 = fpext float %y to double
+ %4 = call double @fmin(double %2, double %3) nounwind
+ %5 = fcmp oeq double %1, %4
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+; CHECK-LABEL: @test16(
+; CHECK-NEXT: %fminf = call float @fminf(float %x, float %y)
+; CHECK-NEXT: fcmp oeq float %fminf, %z
+}
+
+define i32 @test17(float %x, float %y, float %z) nounwind uwtable {
+ %1 = fpext float %x to double
+ %2 = fpext float %y to double
+ %3 = call double @fmax(double %1, double %2) nounwind
+ %4 = fpext float %z to double
+ %5 = fcmp oeq double %3, %4
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+; CHECK-LABEL: @test17(
+; CHECK-NEXT: %fmaxf = call float @fmaxf(float %x, float %y)
+; CHECK-NEXT: fcmp oeq float %fmaxf, %z
+}
+
+define i32 @test18(float %x, float %y, float %z) nounwind uwtable {
+ %1 = fpext float %z to double
+ %2 = fpext float %x to double
+ %3 = fpext float %y to double
+ %4 = call double @fmax(double %2, double %3) nounwind
+ %5 = fcmp oeq double %1, %4
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+; CHECK-LABEL: @test18(
+; CHECK-NEXT: %fmaxf = call float @fmaxf(float %x, float %y)
+; CHECK-NEXT: fcmp oeq float %fmaxf, %z
+}
+
declare double @fabs(double) nounwind readnone
declare double @ceil(double) nounwind readnone
declare double @floor(double) nounwind readnone
@@ -177,3 +229,5 @@ declare double @nearbyint(double) nounwind readnone
declare double @rint(double) nounwind readnone
declare double @round(double) nounwind readnone
declare double @trunc(double) nounwind readnone
+declare double @fmin(double, double) nounwind readnone
+declare double @fmax(double, double) nounwind readnone
diff --git a/test/Transforms/InstCombine/fmul.ll b/test/Transforms/InstCombine/fmul.ll
index 402ee52e624a..18cbf9da5361 100644
--- a/test/Transforms/InstCombine/fmul.ll
+++ b/test/Transforms/InstCombine/fmul.ll
@@ -24,10 +24,10 @@ define float @test2(float %x) {
define float @test3(float %x, float %y) {
%sub1 = fsub float -0.000000e+00, %x
%sub2 = fsub float -0.000000e+00, %y
- %mul = fmul float %sub1, %sub2
+ %mul = fmul fast float %sub1, %sub2
ret float %mul
; CHECK-LABEL: @test3(
-; CHECK: fmul float %x, %y
+; CHECK: fmul fast float %x, %y
}
; (0.0 - X) * (0.0 - Y) => X * Y
@@ -93,3 +93,33 @@ for.body: ; preds = %for.cond
for.end: ; preds = %for.cond
ret void
}
+
+; X * -1.0 => -0.0 - X
+define float @test9(float %x) {
+ %mul = fmul float %x, -1.0
+ ret float %mul
+
+; CHECK-LABEL: @test9(
+; CHECK-NOT: fmul
+; CHECK: fsub
+}
+
+; PR18532
+define <4 x float> @test10(<4 x float> %x) {
+ %mul = fmul <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
+ ret <4 x float> %mul
+
+; CHECK-LABEL: @test10(
+; CHECK-NOT: fmul
+; CHECK: fsub
+}
+
+define float @test11(float %x, float %y) {
+ %a = fadd fast float %x, 1.0
+ %b = fadd fast float %y, 2.0
+ %c = fadd fast float %a, %b
+ ret float %c
+; CHECK-LABEL: @test11(
+; CHECK-NOT: fadd float
+; CHECK: fadd fast float
+}
diff --git a/test/Transforms/InstCombine/fpcast.ll b/test/Transforms/InstCombine/fpcast.ll
index 05d1b48d5996..9be66fd42c68 100644
--- a/test/Transforms/InstCombine/fpcast.ll
+++ b/test/Transforms/InstCombine/fpcast.ll
@@ -31,6 +31,15 @@ define half @test4(float %a) {
ret half %c
}
+; CHECK: test4-fast
+define half @test4-fast(float %a) {
+; CHECK: fptrunc
+; CHECK: fsub fast
+ %b = fsub fast float -0.0, %a
+ %c = fptrunc float %b to half
+ ret half %c
+}
+
; CHECK: test5
define half @test5(float %a, float %b, float %c) {
; CHECK: fcmp ogt
diff --git a/test/Transforms/InstCombine/fpextend.ll b/test/Transforms/InstCombine/fpextend.ll
index 70e0c62dd70e..8640cd2b6f5c 100644
--- a/test/Transforms/InstCombine/fpextend.ll
+++ b/test/Transforms/InstCombine/fpextend.ll
@@ -1,3 +1,4 @@
+
; RUN: opt < %s -instcombine -S | not grep fpext
@X = external global float
@Y = external global float
@@ -12,6 +13,18 @@ entry:
ret void
}
+define void @test2() nounwind {
+entry:
+ %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
+ %tmp2 = load float* @Y, align 4 ; <float> [#uses=1]
+ %tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
+ %tmp5 = fmul double %tmp1, %tmp23 ; <double> [#uses=1]
+ %tmp56 = fptrunc double %tmp5 to float ; <float> [#uses=1]
+ store float %tmp56, float* @X, align 4
+ ret void
+}
+
define void @test3() nounwind {
entry:
%tmp = load float* @X, align 4 ; <float> [#uses=1]
@@ -33,4 +46,3 @@ entry:
store float %tmp34, float* @X, align 4
ret void
}
-
diff --git a/test/Transforms/InstCombine/fpextend_x86.ll b/test/Transforms/InstCombine/fpextend_x86.ll
new file mode 100644
index 000000000000..e012551ebdf4
--- /dev/null
+++ b/test/Transforms/InstCombine/fpextend_x86.ll
@@ -0,0 +1,57 @@
+; RUN: opt < %s -instcombine -mtriple=x86_64-apple-macosx -S | FileCheck %s
+target triple = "x86_64-apple-macosx"
+
+define double @test1(double %a, double %b) nounwind {
+ %wa = fpext double %a to x86_fp80
+ %wb = fpext double %b to x86_fp80
+ %wr = fadd x86_fp80 %wa, %wb
+ %r = fptrunc x86_fp80 %wr to double
+ ret double %r
+; CHECK: test1
+; CHECK: fadd x86_fp80
+; CHECK: ret
+}
+
+define double @test2(double %a, double %b) nounwind {
+ %wa = fpext double %a to x86_fp80
+ %wb = fpext double %b to x86_fp80
+ %wr = fsub x86_fp80 %wa, %wb
+ %r = fptrunc x86_fp80 %wr to double
+ ret double %r
+; CHECK: test2
+; CHECK: fsub x86_fp80
+; CHECK: ret
+}
+
+define double @test3(double %a, double %b) nounwind {
+ %wa = fpext double %a to x86_fp80
+ %wb = fpext double %b to x86_fp80
+ %wr = fmul x86_fp80 %wa, %wb
+ %r = fptrunc x86_fp80 %wr to double
+ ret double %r
+; CHECK: test3
+; CHECK: fmul x86_fp80
+; CHECK: ret
+}
+
+define double @test4(double %a, half %b) nounwind {
+ %wa = fpext double %a to x86_fp80
+ %wb = fpext half %b to x86_fp80
+ %wr = fmul x86_fp80 %wa, %wb
+ %r = fptrunc x86_fp80 %wr to double
+ ret double %r
+; CHECK: test4
+; CHECK: fmul double
+; CHECK: ret
+}
+
+define double @test5(double %a, double %b) nounwind {
+ %wa = fpext double %a to x86_fp80
+ %wb = fpext double %b to x86_fp80
+ %wr = fdiv x86_fp80 %wa, %wb
+ %r = fptrunc x86_fp80 %wr to double
+ ret double %r
+; CHECK: test5
+; CHECK: fdiv x86_fp80
+; CHECK: ret
+}
diff --git a/test/Transforms/InstCombine/fprintf-1.ll b/test/Transforms/InstCombine/fprintf-1.ll
index 3f6a314e3472..6741345839a0 100644
--- a/test/Transforms/InstCombine/fprintf-1.ll
+++ b/test/Transforms/InstCombine/fprintf-1.ll
@@ -56,18 +56,18 @@ define void @test_simplify4(%FILE* %fp) {
; CHECK-IPRINTF-LABEL: @test_simplify4(
%fmt = getelementptr [3 x i8]* @percent_d, i32 0, i32 0
call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt, i32 187)
-; CHECK-NEXT-IPRINTF: call i32 (%FILE*, i8*, ...)* @fiprintf(%FILE* %fp, i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
+; CHECK-IPRINTF-NEXT: call i32 (%FILE*, i8*, ...)* @fiprintf(%FILE* %fp, i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
ret void
-; CHECK-NEXT-IPRINTF: ret void
+; CHECK-IPRINTF-NEXT: ret void
}
define void @test_no_simplify1(%FILE* %fp) {
; CHECK-IPRINTF-LABEL: @test_no_simplify1(
%fmt = getelementptr [3 x i8]* @percent_f, i32 0, i32 0
call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt, double 1.87)
-; CHECK-NEXT-IPRINTF: call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
+; CHECK-IPRINTF-NEXT: call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
ret void
-; CHECK-NEXT-IPRINTF: ret void
+; CHECK-IPRINTF-NEXT: ret void
}
define void @test_no_simplify2(%FILE* %fp, double %d) {
diff --git a/test/Transforms/InstCombine/gep-addrspace.ll b/test/Transforms/InstCombine/gep-addrspace.ll
index 24c355d817e5..29511a3c6677 100644
--- a/test/Transforms/InstCombine/gep-addrspace.ll
+++ b/test/Transforms/InstCombine/gep-addrspace.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -instcombine -S
+; RUN: opt < %s -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-pc-win32"
@@ -17,3 +17,18 @@ ST:
ret void
}
+@array = internal addrspace(3) global [256 x float] zeroinitializer, align 4
+@scalar = internal addrspace(3) global float 0.000000e+00, align 4
+
+define void @keep_necessary_addrspacecast(i64 %i, float** %out0, float** %out1) {
+entry:
+; CHECK-LABEL: @keep_necessary_addrspacecast
+ %0 = getelementptr [256 x float]* addrspacecast ([256 x float] addrspace(3)* @array to [256 x float]*), i64 0, i64 %i
+; CHECK: addrspacecast float addrspace(3)* %{{[0-9]+}} to float*
+ %1 = getelementptr [0 x float]* addrspacecast (float addrspace(3)* @scalar to [0 x float]*), i64 0, i64 %i
+; CHECK: addrspacecast float addrspace(3)* %{{[0-9]+}} to float*
+ store float* %0, float** %out0, align 4
+ store float* %1, float** %out1, align 4
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/gepphigep.ll b/test/Transforms/InstCombine/gepphigep.ll
new file mode 100644
index 000000000000..9aab609901e2
--- /dev/null
+++ b/test/Transforms/InstCombine/gepphigep.ll
@@ -0,0 +1,56 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+%struct1 = type { %struct2*, i32, i32, i32 }
+%struct2 = type { i32, i32 }
+
+define i32 @test1(%struct1* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
+bb:
+ %tmp = getelementptr inbounds %struct1* %dm, i64 0, i32 0
+ %tmp1 = load %struct2** %tmp, align 8
+ br i1 %tmp4, label %bb1, label %bb2
+
+bb1:
+ %tmp10 = getelementptr inbounds %struct2* %tmp1, i64 %tmp9
+ %tmp11 = getelementptr inbounds %struct2* %tmp10, i64 0, i32 0
+ store i32 0, i32* %tmp11, align 4
+ br label %bb3
+
+bb2:
+ %tmp20 = getelementptr inbounds %struct2* %tmp1, i64 %tmp19
+ %tmp21 = getelementptr inbounds %struct2* %tmp20, i64 0, i32 0
+ store i32 0, i32* %tmp21, align 4
+ br label %bb3
+
+bb3:
+ %phi = phi %struct2* [ %tmp10, %bb1 ], [ %tmp20, %bb2 ]
+ %tmp24 = getelementptr inbounds %struct2* %phi, i64 0, i32 1
+ %tmp25 = load i32* %tmp24, align 4
+ ret i32 %tmp25
+
+; CHECK-LABEL: @test1(
+; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp9, i32 0
+; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp19, i32 0
+; CHECK: %[[PHI:[0-9A-Za-z]+]] = phi i64 [ %tmp9, %bb1 ], [ %tmp19, %bb2 ]
+; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %[[PHI]], i32 1
+
+}
+
+define i32 @test2(%struct1* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
+bb:
+ %tmp = getelementptr inbounds %struct1* %dm, i64 0, i32 0
+ %tmp1 = load %struct2** %tmp, align 8
+ %tmp10 = getelementptr inbounds %struct2* %tmp1, i64 %tmp9
+ %tmp11 = getelementptr inbounds %struct2* %tmp10, i64 0, i32 0
+ store i32 0, i32* %tmp11, align 4
+ %tmp20 = getelementptr inbounds %struct2* %tmp1, i64 %tmp19
+ %tmp21 = getelementptr inbounds %struct2* %tmp20, i64 0, i32 0
+ store i32 0, i32* %tmp21, align 4
+ %tmp24 = getelementptr inbounds %struct2* %tmp10, i64 0, i32 1
+ %tmp25 = load i32* %tmp24, align 4
+ ret i32 %tmp25
+
+; CHECK-LABEL: @test2(
+; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp9, i32 0
+; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp19, i32 0
+; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp9, i32 1
+}
diff --git a/test/Transforms/InstCombine/getelementptr.ll b/test/Transforms/InstCombine/getelementptr.ll
index c29a7dccb8ee..3240c6d2a4d0 100644
--- a/test/Transforms/InstCombine/getelementptr.ll
+++ b/test/Transforms/InstCombine/getelementptr.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:16:16-p2:32:32:32"
+target datalayout = "e-p:64:64-p1:16:16-p2:32:32:32-p3:64:64:64"
%intstruct = type { i32 }
%pair = type { i32, i32 }
@@ -728,6 +728,20 @@ define i64 @test_gep_bitcast_array_same_size_element([100 x double]* %arr, i64 %
ret i64 %x
}
+; gep should be done in the original address space.
+define i64 @test_gep_bitcast_array_same_size_element_addrspacecast([100 x double]* %arr, i64 %N) {
+; CHECK-LABEL: @test_gep_bitcast_array_same_size_element_addrspacecast(
+; CHECK: getelementptr [100 x double]* %arr, i64 0, i64 %V
+; CHECK-NEXT: bitcast double*
+; CHECK-NEXT: %t = addrspacecast i64*
+; CHECK: load i64 addrspace(3)* %t
+ %cast = addrspacecast [100 x double]* %arr to i64 addrspace(3)*
+ %V = mul i64 %N, 8
+ %t = getelementptr i64 addrspace(3)* %cast, i64 %V
+ %x = load i64 addrspace(3)* %t
+ ret i64 %x
+}
+
; The element size of the array is different the element size of the pointer
define i8 @test_gep_bitcast_array_different_size_element([100 x double]* %arr, i64 %N) {
; CHECK-LABEL: @test_gep_bitcast_array_different_size_element(
@@ -789,4 +803,25 @@ define i16 @test41([3 x i32] addrspace(1)* %array) {
; CHECK-NEXT: ret i16 8
}
+define i32 addrspace(1)* @ascast_0_gep(i32* %p) nounwind {
+; CHECK-LABEL: @ascast_0_gep(
+; CHECK-NOT: getelementptr
+; CHECK: ret
+ %gep = getelementptr i32* %p, i32 0
+ %x = addrspacecast i32* %gep to i32 addrspace(1)*
+ ret i32 addrspace(1)* %x
+}
+
+; Do not merge the GEP and the addrspacecast, because it would undo the
+; addrspacecast canonicalization.
+define i32 addrspace(1)* @ascast_0_0_gep([128 x i32]* %p) nounwind {
+; CHECK-LABEL: @ascast_0_0_gep(
+; CHECK-NEXT: getelementptr [128 x i32]
+; CHECK-NEXT: addrspacecast i32*
+; CHECK-NEXT: ret i32 addrspace(1)*
+ %gep = getelementptr [128 x i32]* %p, i32 0, i32 0
+ %x = addrspacecast i32* %gep to i32 addrspace(1)*
+ ret i32 addrspace(1)* %x
+}
+
; CHECK: attributes [[NUW]] = { nounwind }
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
index 12a4744cc0fe..26e144f93a57 100644
--- a/test/Transforms/InstCombine/icmp.ll
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -1,7 +1,6 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
-target datalayout =
-"e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target datalayout = "e-p:64:64:64-p1:16:16:16-p2:32:32:32-p3:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define i32 @test1(i32 %X) {
entry:
@@ -166,6 +165,14 @@ define i1 @test17(i32 %x) nounwind {
; CHECK-NEXT: %cmp = icmp ne i32 %x, 3
}
+define i1 @test17a(i32 %x) nounwind {
+ %shl = shl i32 1, %x
+ %and = and i32 %shl, 7
+ %cmp = icmp eq i32 %and, 0
+ ret i1 %cmp
+; CHECK-LABEL: @test17a(
+; CHECK-NEXT: %cmp = icmp ugt i32 %x, 2
+}
define i1 @test18(i32 %x) nounwind {
%sh = lshr i32 8, %x
@@ -194,6 +201,15 @@ define i1 @test20(i32 %x) nounwind {
; CHECK-NEXT: %cmp = icmp eq i32 %x, 3
}
+define i1 @test20a(i32 %x) nounwind {
+ %shl = shl i32 1, %x
+ %and = and i32 %shl, 7
+ %cmp = icmp ne i32 %and, 0
+ ret i1 %cmp
+; CHECK-LABEL: @test20a(
+; CHECK-NEXT: %cmp = icmp ult i32 %x, 3
+}
+
define i1 @test21(i8 %x, i8 %y) {
; CHECK-LABEL: @test21(
; CHECK-NOT: or i8
@@ -657,6 +673,49 @@ define i1 @test60_as1(i8 addrspace(1)* %foo, i64 %i, i64 %j) {
; CHECK-NEXT: ret i1
}
+; Same as test60, but look through an addrspacecast instead of a
+; bitcast. This uses the same sized addrspace.
+define i1 @test60_addrspacecast(i8* %foo, i64 %i, i64 %j) {
+ %bit = addrspacecast i8* %foo to i32 addrspace(3)*
+ %gep1 = getelementptr inbounds i32 addrspace(3)* %bit, i64 %i
+ %gep2 = getelementptr inbounds i8* %foo, i64 %j
+ %cast1 = addrspacecast i32 addrspace(3)* %gep1 to i8*
+ %cmp = icmp ult i8* %cast1, %gep2
+ ret i1 %cmp
+; CHECK-LABEL: @test60_addrspacecast(
+; CHECK-NEXT: %gep1.idx = shl nuw i64 %i, 2
+; CHECK-NEXT: icmp slt i64 %gep1.idx, %j
+; CHECK-NEXT: ret i1
+}
+
+define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) {
+ %bit = addrspacecast i8* %foo to i32 addrspace(1)*
+ %gep1 = getelementptr inbounds i32 addrspace(1)* %bit, i16 %i
+ %gep2 = getelementptr inbounds i8* %foo, i64 %j
+ %cast1 = addrspacecast i32 addrspace(1)* %gep1 to i8*
+ %cmp = icmp ult i8* %cast1, %gep2
+ ret i1 %cmp
+; CHECK-LABEL: @test60_addrspacecast_smaller(
+; CHECK-NEXT: %gep1.idx = shl nuw i16 %i, 2
+; CHECK-NEXT: trunc i64 %j to i16
+; CHECK-NEXT: icmp sgt i16 %1, %gep1.idx
+; CHECK-NEXT: ret i1
+}
+
+define i1 @test60_addrspacecast_larger(i8 addrspace(1)* %foo, i32 %i, i16 %j) {
+ %bit = addrspacecast i8 addrspace(1)* %foo to i32 addrspace(2)*
+ %gep1 = getelementptr inbounds i32 addrspace(2)* %bit, i32 %i
+ %gep2 = getelementptr inbounds i8 addrspace(1)* %foo, i16 %j
+ %cast1 = addrspacecast i32 addrspace(2)* %gep1 to i8 addrspace(1)*
+ %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
+ ret i1 %cmp
+; CHECK-LABEL: @test60_addrspacecast_larger(
+; CHECK-NEXT: %gep1.idx = shl nuw i32 %i, 2
+; CHECK-NEXT: trunc i32 %gep1.idx to i16
+; CHECK-NEXT: icmp slt i16 %1, %j
+; CHECK-NEXT: ret i1
+}
+
define i1 @test61(i8* %foo, i64 %i, i64 %j) {
%bit = bitcast i8* %foo to i32*
%gep1 = getelementptr i32* %bit, i64 %i
@@ -1356,3 +1415,12 @@ define i1 @icmp_ashr_ashr_ne(i32 %a, i32 %b) nounwind {
%z = icmp ne i32 %x, %y
ret i1 %z
}
+
+; CHECK-LABEL: @icmp_neg_cst_slt
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %a, 10
+; CHECK-NEXT: ret i1 [[CMP]]
+define i1 @icmp_neg_cst_slt(i32 %a) {
+ %1 = sub nsw i32 0, %a
+ %2 = icmp slt i32 %1, -10
+ ret i1 %2
+}
diff --git a/test/Transforms/InstCombine/insert-extract-shuffle.ll b/test/Transforms/InstCombine/insert-extract-shuffle.ll
new file mode 100644
index 000000000000..8929c82def7b
--- /dev/null
+++ b/test/Transforms/InstCombine/insert-extract-shuffle.ll
@@ -0,0 +1,37 @@
+; RUN: opt -S -instcombine %s | FileCheck %s
+
+define <1 x i8> @test1(<8 x i8> %in) {
+; CHECK-LABEL: @test1
+; CHECK: shufflevector <8 x i8> %in, <8 x i8> undef, <1 x i32> <i32 5>
+ %val = extractelement <8 x i8> %in, i32 5
+ %vec = insertelement <1 x i8> undef, i8 %val, i32 0
+ ret <1 x i8> %vec
+}
+
+define <4 x i16> @test2(<8 x i16> %in, <8 x i16> %in2) {
+; CHECK-LABEL: @test2
+; CHECK: shufflevector <8 x i16> %in2, <8 x i16> %in, <4 x i32> <i32 11, i32 9, i32 0, i32 10>
+ %elt0 = extractelement <8 x i16> %in, i32 3
+ %elt1 = extractelement <8 x i16> %in, i32 1
+ %elt2 = extractelement <8 x i16> %in2, i32 0
+ %elt3 = extractelement <8 x i16> %in, i32 2
+
+ %vec.0 = insertelement <4 x i16> undef, i16 %elt0, i32 0
+ %vec.1 = insertelement <4 x i16> %vec.0, i16 %elt1, i32 1
+ %vec.2 = insertelement <4 x i16> %vec.1, i16 %elt2, i32 2
+ %vec.3 = insertelement <4 x i16> %vec.2, i16 %elt3, i32 3
+
+ ret <4 x i16> %vec.3
+}
+
+define <2 x i64> @test_vcopyq_lane_p64(<2 x i64> %a, <1 x i64> %b) #0 {
+; CHECK-LABEL: @test_vcopyq_lane_p64
+; CHECK: extractelement
+; CHECK: insertelement
+; CHECK-NOT: shufflevector
+entry:
+ %elt = extractelement <1 x i64> %b, i32 0
+ %res = insertelement <2 x i64> %a, i64 %elt, i32 1
+ ret <2 x i64> %res
+}
+
diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll
index 91c44704ce78..9b58d9386f58 100644
--- a/test/Transforms/InstCombine/intrinsics.ll
+++ b/test/Transforms/InstCombine/intrinsics.ll
@@ -3,6 +3,7 @@
%overflow.result = type {i8, i1}
declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8)
+declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8)
declare double @llvm.powi.f64(double, i32) nounwind readonly
declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
@@ -89,6 +90,18 @@ define i8 @uaddtest7(i8 %A, i8 %B) {
; CHECK-NEXT: ret i8 %z
}
+; PR20194
+define { i32, i1 } @saddtest1(i8 %a, i8 %b) {
+ %A = sext i8 %a to i32
+ %B = sext i8 %b to i32
+ %x = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %A, i32 %B)
+ ret { i32, i1 } %x
+; CHECK-LABEL: @saddtest1
+; CHECK: %x = add nsw i32 %A, %B
+; CHECK-NEXT: %1 = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 %x, 0
+; CHECK-NEXT: ret { i32, i1 } %1
+}
+
define i8 @umultest1(i8 %A, i1* %overflowPtr) {
%x = call %overflow.result @llvm.umul.with.overflow.i8(i8 0, i8 %A)
diff --git a/test/Transforms/InstCombine/load-addrspace-cast.ll b/test/Transforms/InstCombine/load-addrspace-cast.ll
new file mode 100644
index 000000000000..fd6339cc9262
--- /dev/null
+++ b/test/Transforms/InstCombine/load-addrspace-cast.ll
@@ -0,0 +1,12 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+target datalayout = "e-p:64:64:64-n8:16:32:64"
+
+define i32* @pointer_to_addrspace_pointer(i32 addrspace(1)** %x) nounwind {
+; CHECK-LABEL: @pointer_to_addrspace_pointer(
+; CHECK: load
+; CHECK: addrspacecast
+ %y = bitcast i32 addrspace(1)** %x to i32**
+ %z = load i32** %y
+ ret i32* %z
+}
+
diff --git a/test/Transforms/InstCombine/load.ll b/test/Transforms/InstCombine/load.ll
index d11e08e10de9..c8ce70a5c03a 100644
--- a/test/Transforms/InstCombine/load.ll
+++ b/test/Transforms/InstCombine/load.ll
@@ -1,6 +1,7 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
; This test makes sure that these instructions are properly eliminated.
-;
-; RUN: opt < %s -instcombine -S | not grep load
+
@X = constant i32 42 ; <i32*> [#uses=2]
@X2 = constant i32 47 ; <i32*> [#uses=1]
@@ -10,47 +11,63 @@
@GLOBAL = internal constant [4 x i32] zeroinitializer
+; CHECK-LABEL: @test1(
+; CHECK-NOT: load
define i32 @test1() {
%B = load i32* @X ; <i32> [#uses=1]
ret i32 %B
}
+; CHECK-LABEL: @test2(
+; CHECK-NOT: load
define float @test2() {
%A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
%B = load float* %A ; <float> [#uses=1]
ret float %B
}
+; CHECK-LABEL: @test3(
+; CHECK-NOT: load
define i32 @test3() {
%A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
%B = load i32* %A ; <i32> [#uses=1]
ret i32 %B
}
+; CHECK-LABEL: @test4(
+; CHECK-NOT: load
define i32 @test4() {
%A = getelementptr [2 x { i32, float }]* @Z, i64 0, i64 1, i32 0 ; <i32*> [#uses=1]
%B = load i32* %A ; <i32> [#uses=1]
ret i32 %B
}
+; CHECK-LABEL: @test5(
+; CHECK-NOT: load
define i32 @test5(i1 %C) {
%Y = select i1 %C, i32* @X, i32* @X2 ; <i32*> [#uses=1]
%Z = load i32* %Y ; <i32> [#uses=1]
ret i32 %Z
}
+; CHECK-LABEL: @test7(
+; CHECK-NOT: load
define i32 @test7(i32 %X) {
%V = getelementptr i32* null, i32 %X ; <i32*> [#uses=1]
%R = load i32* %V ; <i32> [#uses=1]
ret i32 %R
}
+; CHECK-LABEL: @test8(
+; CHECK-NOT: load
define i32 @test8(i32* %P) {
store i32 1, i32* %P
%X = load i32* %P ; <i32> [#uses=1]
ret i32 %X
}
+; CHECK-LABEL: @test9(
+; CHECK-NOT: load
define i32 @test9(i32* %P) {
%X = load i32* %P ; <i32> [#uses=1]
%Y = load i32* %P ; <i32> [#uses=1]
@@ -58,6 +75,8 @@ define i32 @test9(i32* %P) {
ret i32 %Z
}
+; CHECK-LABEL: @test10(
+; CHECK-NOT: load
define i32 @test10(i1 %C.upgrd.1, i32* %P, i32* %Q) {
br i1 %C.upgrd.1, label %T, label %F
T: ; preds = %0
@@ -72,6 +91,8 @@ C: ; preds = %F, %T
ret i32 %V
}
+; CHECK-LABEL: @test11(
+; CHECK-NOT: load
define double @test11(double* %p) {
%t0 = getelementptr double* %p, i32 1
store double 2.0, double* %t0
@@ -80,19 +101,20 @@ define double @test11(double* %p) {
ret double %x
}
+; CHECK-LABEL: @test12(
+; CHECK-NOT: load
define i32 @test12(i32* %P) {
- %A = alloca i32
- store i32 123, i32* %A
- ; Cast the result of the load not the source
- %Q = bitcast i32* %A to i32*
- %V = load i32* %Q
- ret i32 %V
+ %A = alloca i32
+ store i32 123, i32* %A
+ ; Cast the result of the load not the source
+ %Q = bitcast i32* %A to i32*
+ %V = load i32* %Q
+ ret i32 %V
}
+; CHECK-LABEL: @test13(
+; CHECK-NOT: load
define <16 x i8> @test13(<2 x i64> %x) {
-entry:
- %tmp = load <16 x i8> * bitcast ([4 x i32]* @GLOBAL to <16 x i8>*)
- ret <16 x i8> %tmp
+ %tmp = load <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*)
+ ret <16 x i8> %tmp
}
-
-
diff --git a/test/Transforms/InstCombine/loadstore-alignment.ll b/test/Transforms/InstCombine/loadstore-alignment.ll
index 2263cb20ec5e..e90bdb7cdd99 100644
--- a/test/Transforms/InstCombine/loadstore-alignment.ll
+++ b/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -1,67 +1,117 @@
-; RUN: opt < %s -instcombine -S | grep ", align 16" | count 14
-target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+; RUN: opt -instcombine -S < %s | FileCheck %s
+target datalayout = "E-p:64:64:64-p1:64:64:64-p2:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
@x = external global <2 x i64>, align 16
@xx = external global [13 x <2 x i64>], align 16
+@x.as2 = external addrspace(2) global <2 x i64>, align 16
+
+; CHECK-LABEL: @static_hem(
+; CHECK: , align 16
define <2 x i64> @static_hem() {
- %t = getelementptr <2 x i64>* @x, i32 7
- %tmp1 = load <2 x i64>* %t, align 1
- ret <2 x i64> %tmp1
+ %t = getelementptr <2 x i64>* @x, i32 7
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+; CHECK-LABEL: @static_hem_addrspacecast(
+; CHECK: , align 16
+define <2 x i64> @static_hem_addrspacecast() {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ %t.asc = addrspacecast <2 x i64>* %t to <2 x i64> addrspace(1)*
+ %tmp1 = load <2 x i64> addrspace(1)* %t.asc, align 1
+ ret <2 x i64> %tmp1
+}
+
+; CHECK-LABEL: @static_hem_addrspacecast_smaller_ptr(
+; CHECK: , align 16
+define <2 x i64> @static_hem_addrspacecast_smaller_ptr() {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ %t.asc = addrspacecast <2 x i64>* %t to <2 x i64> addrspace(2)*
+ %tmp1 = load <2 x i64> addrspace(2)* %t.asc, align 1
+ ret <2 x i64> %tmp1
+}
+
+; CHECK-LABEL: @static_hem_addrspacecast_larger_ptr(
+; CHECK: , align 16
+define <2 x i64> @static_hem_addrspacecast_larger_ptr() {
+ %t = getelementptr <2 x i64> addrspace(2)* @x.as2, i32 7
+ %t.asc = addrspacecast <2 x i64> addrspace(2)* %t to <2 x i64> addrspace(1)*
+ %tmp1 = load <2 x i64> addrspace(1)* %t.asc, align 1
+ ret <2 x i64> %tmp1
}
+; CHECK-LABEL: @hem(
+; CHECK: , align 16
define <2 x i64> @hem(i32 %i) {
- %t = getelementptr <2 x i64>* @x, i32 %i
- %tmp1 = load <2 x i64>* %t, align 1
- ret <2 x i64> %tmp1
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
}
+; CHECK-LABEL: @hem_2d(
+; CHECK: , align 16
define <2 x i64> @hem_2d(i32 %i, i32 %j) {
- %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
- %tmp1 = load <2 x i64>* %t, align 1
- ret <2 x i64> %tmp1
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
}
+; CHECK-LABEL: @foo(
+; CHECK: , align 16
define <2 x i64> @foo() {
- %tmp1 = load <2 x i64>* @x, align 1
- ret <2 x i64> %tmp1
+ %tmp1 = load <2 x i64>* @x, align 1
+ ret <2 x i64> %tmp1
}
+; CHECK-LABEL: @bar(
+; CHECK: , align 16
+; CHECK: , align 16
define <2 x i64> @bar() {
- %t = alloca <2 x i64>
- call void @kip(<2 x i64>* %t)
- %tmp1 = load <2 x i64>* %t, align 1
- ret <2 x i64> %tmp1
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t)
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
}
+; CHECK-LABEL: @static_hem_store(
+; CHECK: , align 16
define void @static_hem_store(<2 x i64> %y) {
- %t = getelementptr <2 x i64>* @x, i32 7
- store <2 x i64> %y, <2 x i64>* %t, align 1
- ret void
+ %t = getelementptr <2 x i64>* @x, i32 7
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
}
+; CHECK-LABEL: @hem_store(
+; CHECK: , align 16
define void @hem_store(i32 %i, <2 x i64> %y) {
- %t = getelementptr <2 x i64>* @x, i32 %i
- store <2 x i64> %y, <2 x i64>* %t, align 1
- ret void
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
}
+; CHECK-LABEL: @hem_2d_store(
+; CHECK: , align 16
define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
- %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
- store <2 x i64> %y, <2 x i64>* %t, align 1
- ret void
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
}
+; CHECK-LABEL: @foo_store(
+; CHECK: , align 16
define void @foo_store(<2 x i64> %y) {
- store <2 x i64> %y, <2 x i64>* @x, align 1
- ret void
+ store <2 x i64> %y, <2 x i64>* @x, align 1
+ ret void
}
+; CHECK-LABEL: @bar_store(
+; CHECK: , align 16
define void @bar_store(<2 x i64> %y) {
- %t = alloca <2 x i64>
- call void @kip(<2 x i64>* %t)
- store <2 x i64> %y, <2 x i64>* %t, align 1
- ret void
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t)
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
}
declare void @kip(<2 x i64>* %t)
diff --git a/test/Transforms/InstCombine/memcpy-from-global.ll b/test/Transforms/InstCombine/memcpy-from-global.ll
index 58793ab431d1..3bc1d36f486d 100644
--- a/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -7,11 +7,11 @@ entry:
%lookupTable = alloca [128 x float], align 16 ; <[128 x float]*> [#uses=5]
%lookupTable1 = bitcast [128 x float]* %lookupTable to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %lookupTable1, i8* bitcast ([128 x float]* @C.0.1248 to i8*), i64 512, i32 16, i1 false)
-
+
; CHECK-LABEL: @test1(
; CHECK-NOT: alloca
; CHECK-NOT: call{{.*}}@llvm.memcpy
-
+
%tmp3 = shl i32 %hash, 2 ; <i32> [#uses=1]
%tmp5 = and i32 %tmp3, 124 ; <i32> [#uses=4]
%tmp753 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp5 ; <float*> [#uses=1]
@@ -37,6 +37,9 @@ entry:
}
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
%T = type { i8, [123 x i8] }
%U = type { i32, i32, i32, i32, i32 }
@@ -64,7 +67,31 @@ define void @test2() {
ret void
}
+define void @test2_addrspacecast() {
+ %A = alloca %T
+ %B = alloca %T
+ %a = addrspacecast %T* %A to i8 addrspace(1)*
+ %b = addrspacecast %T* %B to i8 addrspace(1)*
+
+; CHECK-LABEL: @test2_addrspacecast(
+
+; %A alloca is deleted
+; This doesn't exactly match what test2 does, because folding the type
+; cast into the alloca doesn't work for the addrspacecast yet.
+; CHECK-NEXT: alloca [124 x i8]
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: addrspacecast
+
+; use @G instead of %A
+; CHECK-NEXT: call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %{{.*}},
+ call void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %b, i8 addrspace(1)* %a, i64 124, i32 4, i1 false)
+ call void @bar_as1(i8 addrspace(1)* %b)
+ ret void
+}
+
declare void @bar(i8*)
+declare void @bar_as1(i8 addrspace(1)*)
;; Should be able to eliminate the alloca.
@@ -78,11 +105,22 @@ define void @test3() {
ret void
}
+define void @test3_addrspacecast() {
+ %A = alloca %T
+ %a = bitcast %T* %A to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i64(i8* %a, i8 addrspace(1)* addrspacecast (%T* @G to i8 addrspace(1)*), i64 124, i32 4, i1 false)
+ call void @bar(i8* %a) readonly
+; CHECK-LABEL: @test3_addrspacecast(
+; CHECK-NEXT: call void @bar(i8* getelementptr inbounds (%T* @G, i64 0, i32 0))
+ ret void
+}
+
+
define void @test4() {
%A = alloca %T
%a = bitcast %T* %A to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
- call void @baz(i8* byval %a)
+ call void @baz(i8* byval %a)
; CHECK-LABEL: @test4(
; CHECK-NEXT: call void @baz(i8* byval getelementptr inbounds (%T* @G, i64 0, i32 0))
ret void
@@ -94,7 +132,7 @@ define void @test5() {
%a = bitcast %T* %A to i8*
call void @llvm.lifetime.start(i64 -1, i8* %a)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
- call void @baz(i8* byval %a)
+ call void @baz(i8* byval %a)
; CHECK-LABEL: @test5(
; CHECK-NEXT: call void @baz(i8* byval getelementptr inbounds (%T* @G, i64 0, i32 0))
ret void
@@ -135,6 +173,18 @@ define void @test8() {
ret void
}
+
+define void @test8_addrspacecast() {
+ %A = alloca %U, align 16
+ %a = bitcast %U* %A to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i64(i8* %a, i8 addrspace(1)* addrspacecast (%U* getelementptr ([2 x %U]* @H, i64 0, i32 1) to i8 addrspace(1)*), i64 20, i32 4, i1 false)
+ call void @bar(i8* %a) readonly
+; CHECK-LABEL: @test8_addrspacecast(
+; CHECK: llvm.memcpy
+; CHECK: bar
+ ret void
+}
+
define void @test9() {
%A = alloca %U, align 4
%a = bitcast %U* %A to i8*
@@ -144,3 +194,13 @@ define void @test9() {
; CHECK-NEXT: call void @bar(i8* bitcast (%U* getelementptr inbounds ([2 x %U]* @H, i64 0, i64 1) to i8*))
ret void
}
+
+define void @test9_addrspacecast() {
+ %A = alloca %U, align 4
+ %a = bitcast %U* %A to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i64(i8* %a, i8 addrspace(1)* addrspacecast (%U* getelementptr ([2 x %U]* @H, i64 0, i32 1) to i8 addrspace(1)*), i64 20, i32 4, i1 false)
+ call void @bar(i8* %a) readonly
+; CHECK-LABEL: @test9_addrspacecast(
+; CHECK-NEXT: call void @bar(i8* bitcast (%U* getelementptr inbounds ([2 x %U]* @H, i64 0, i64 1) to i8*))
+ ret void
+}
diff --git a/test/Transforms/InstCombine/mul.ll b/test/Transforms/InstCombine/mul.ll
index 94fc1183c55a..d19bedc7a10c 100644
--- a/test/Transforms/InstCombine/mul.ll
+++ b/test/Transforms/InstCombine/mul.ll
@@ -181,3 +181,19 @@ define i32 @test19(i32 %A, i32 %B) {
ret i32 %H
; CHECK: ret i32 0
}
+
+define <2 x i64> @test20(<2 x i64> %A) {
+; CHECK-LABEL: @test20(
+ %B = add <2 x i64> %A, <i64 12, i64 14>
+ %C = mul <2 x i64> %B, <i64 3, i64 2>
+ ret <2 x i64> %C
+; CHECK: mul <2 x i64> %A, <i64 3, i64 2>
+; CHECK: add <2 x i64> %{{.}}, <i64 36, i64 28>
+}
+
+define <2 x i1> @test21(<2 x i1> %A, <2 x i1> %B) {
+; CHECK-LABEL: @test21(
+ %C = mul <2 x i1> %A, %B
+ ret <2 x i1> %C
+; CHECK: %C = and <2 x i1> %A, %B
+}
diff --git a/test/Transforms/InstCombine/onehot_merge.ll b/test/Transforms/InstCombine/onehot_merge.ll
index 51f955c2c248..496d847b5321 100644
--- a/test/Transforms/InstCombine/onehot_merge.ll
+++ b/test/Transforms/InstCombine/onehot_merge.ll
@@ -16,7 +16,7 @@ bb:
;CHECK: @foo1_and
;CHECK: shl i32 1, %c1
-;CHECK-NEXT: shl i32 1, %c2
+;CHECK-NEXT: lshr i32 -2147483648, %c2
;CHECK-NEXT: or i32
;CHECK-NEXT: and i32
;CHECK-NEXT: icmp ne i32 %1, %0
@@ -24,7 +24,7 @@ bb:
define i1 @foo1_and(i32 %k, i32 %c1, i32 %c2) {
bb:
%tmp = shl i32 1, %c1
- %tmp4 = shl i32 1, %c2
+ %tmp4 = lshr i32 -2147483648, %c2
%tmp1 = and i32 %tmp, %k
%tmp2 = icmp eq i32 %tmp1, 0
%tmp5 = and i32 %tmp4, %k
diff --git a/test/Transforms/InstCombine/overflow-mul.ll b/test/Transforms/InstCombine/overflow-mul.ll
new file mode 100644
index 000000000000..cbb2f5f95003
--- /dev/null
+++ b/test/Transforms/InstCombine/overflow-mul.ll
@@ -0,0 +1,175 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+; return mul(zext x, zext y) > MAX
+define i32 @pr4917_1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_1(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp ugt i64 %mul64, 4294967295
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; return mul(zext x, zext y) >= MAX+1
+define i32 @pr4917_1a(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_1a(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp uge i64 %mul64, 4294967296
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; mul(zext x, zext y) > MAX
+; mul(x, y) is used
+define i32 @pr4917_2(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_2(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp ugt i64 %mul64, 4294967295
+; CHECK-DAG: [[VAL:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
+ %mul32 = trunc i64 %mul64 to i32
+; CHECK-DAG: [[OVFL:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
+ %retval = select i1 %overflow, i32 %mul32, i32 111
+; CHECK: select i1 [[OVFL]], i32 [[VAL]]
+ ret i32 %retval
+}
+
+; return mul(zext x, zext y) > MAX
+; mul is used in non-truncate
+define i64 @pr4917_3(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_3(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+ %mul64 = mul i64 %l, %r
+; CHECK-NOT: umul.with.overflow.i32
+ %overflow = icmp ugt i64 %mul64, 4294967295
+ %retval = select i1 %overflow, i64 %mul64, i64 111
+ ret i64 %retval
+}
+
+; return mul(zext x, zext y) <= MAX
+define i32 @pr4917_4(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_4(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp ule i64 %mul64, 4294967295
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+; CHECK: xor
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; return mul(zext x, zext y) < MAX+1
+define i32 @pr4917_4a(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4917_4a(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+; CHECK-NOT: zext i32
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %overflow = icmp ult i64 %mul64, 4294967296
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+; CHECK: xor
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; operands of mul are of different size
+define i32 @pr4917_5(i32 %x, i8 %y) nounwind {
+; CHECK-LABEL: @pr4917_5(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i8 %y to i64
+; CHECK: [[Y:%.*]] = zext i8 %y to i32
+ %mul64 = mul i64 %l, %r
+ %overflow = icmp ugt i64 %mul64, 4294967295
+ %mul32 = trunc i64 %mul64 to i32
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 [[Y]])
+; CHECK-DAG: [[VAL:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
+; CHECK-DAG: [[OVFL:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
+ %retval = select i1 %overflow, i32 %mul32, i32 111
+; CHECK: select i1 [[OVFL]], i32 [[VAL]]
+ ret i32 %retval
+}
+
+; mul(zext x, zext y) != zext trunc mul
+define i32 @pr4918_1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4918_1(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %part32 = trunc i64 %mul64 to i32
+ %part64 = zext i32 %part32 to i64
+ %overflow = icmp ne i64 %mul64, %part64
+; CHECK: [[OVFL:%.*]] = extractvalue { i32, i1 } [[MUL:%.*]], 1
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+; mul(zext x, zext y) == zext trunc mul
+define i32 @pr4918_2(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4918_2(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %part32 = trunc i64 %mul64 to i32
+ %part64 = zext i32 %part32 to i64
+ %overflow = icmp eq i64 %mul64, %part64
+; CHECK: extractvalue { i32, i1 } [[MUL]]
+ %retval = zext i1 %overflow to i32
+; CHECK: xor
+ ret i32 %retval
+}
+
+; zext trunc mul != mul(zext x, zext y)
+define i32 @pr4918_3(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: @pr4918_3(
+entry:
+ %l = zext i32 %x to i64
+ %r = zext i32 %y to i64
+ %mul64 = mul i64 %l, %r
+; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+ %part32 = trunc i64 %mul64 to i32
+ %part64 = zext i32 %part32 to i64
+ %overflow = icmp ne i64 %part64, %mul64
+; CHECK: extractvalue { i32, i1 } [[MUL]], 1
+ %retval = zext i1 %overflow to i32
+ ret i32 %retval
+}
+
+define <4 x i32> @pr20113(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: @pr20113
+; CHECK-NOT: mul.with.overflow
+; CHECK: ret
+ %vmovl.i.i726 = zext <4 x i16> %a to <4 x i32>
+ %vmovl.i.i712 = zext <4 x i16> %b to <4 x i32>
+ %mul.i703 = mul <4 x i32> %vmovl.i.i712, %vmovl.i.i726
+ %tmp = icmp sge <4 x i32> %mul.i703, zeroinitializer
+ %vcgez.i = sext <4 x i1> %tmp to <4 x i32>
+ ret <4 x i32> %vcgez.i
+}
diff --git a/test/Transforms/InstCombine/pow-1.ll b/test/Transforms/InstCombine/pow-1.ll
index 9f1d073fe760..fb3b7d796160 100644
--- a/test/Transforms/InstCombine/pow-1.ll
+++ b/test/Transforms/InstCombine/pow-1.ll
@@ -1,6 +1,11 @@
; Test that the pow library call simplifier works correctly.
;
; RUN: opt < %s -instcombine -S | FileCheck %s
+; RUN: opt -instcombine -S < %s -mtriple=x86_64-apple-macosx10.9 | FileCheck %s --check-prefix=CHECK-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=arm-apple-ios7.0 | FileCheck %s --check-prefix=CHECK-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=x86_64-apple-macosx10.8 | FileCheck %s --check-prefix=CHECK-NO-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=arm-apple-ios6.0 | FileCheck %s --check-prefix=CHECK-NO-EXP10
+; RUN: opt -instcombine -S < %s -mtriple=x86_64-netbsd | FileCheck %s --check-prefix=CHECK-NO-EXP10
; rdar://7251832
; NOTE: The readonly attribute on the pow call should be preserved
@@ -155,13 +160,33 @@ declare double @llvm.pow.f64(double %Val, double %Power)
define double @test_simplify17(double %x) {
; CHECK-LABEL: @test_simplify17(
%retval = call double @llvm.pow.f64(double %x, double 0.5)
-; CHECK-NEXT: [[SQRT:%[a-z0-9]+]] = call double @sqrt(double %x) [[NUW_RO]]
-; CHECK-NEXT: [[FABS:%[a-z0-9]+]] = call double @fabs(double [[SQRT]]) [[NUW_RO]]
+; CHECK-NEXT: [[SQRT:%[a-z0-9]+]] = call double @sqrt(double %x)
+; CHECK-NEXT: [[FABS:%[a-z0-9]+]] = call double @fabs(double [[SQRT]])
; CHECK-NEXT: [[FCMP:%[a-z0-9]+]] = fcmp oeq double %x, 0xFFF0000000000000
; CHECK-NEXT: [[SELECT:%[a-z0-9]+]] = select i1 [[FCMP]], double 0x7FF0000000000000, double [[FABS]]
ret double %retval
; CHECK-NEXT: ret double [[SELECT]]
}
+; Check pow(10.0, x) -> __exp10(x) on OS X 10.9+ and iOS 7.0+.
+
+define float @test_simplify18(float %x) {
+; CHECK-LABEL: @test_simplify18(
+ %retval = call float @powf(float 10.0, float %x)
+; CHECK-EXP10: [[EXP10F:%[_a-z0-9]+]] = call float @__exp10f(float %x) [[NUW_RO:#[0-9]+]]
+ ret float %retval
+; CHECK-EXP10: ret float [[EXP10F]]
+; CHECK-NO-EXP10: call float @powf
+}
+
+define double @test_simplify19(double %x) {
+; CHECK-LABEL: @test_simplify19(
+ %retval = call double @pow(double 10.0, double %x)
+; CHECK-EXP10: [[EXP10:%[_a-z0-9]+]] = call double @__exp10(double %x) [[NUW_RO]]
+ ret double %retval
+; CHECK-EXP10: ret double [[EXP10]]
+; CHECK-NO-EXP10: call double @pow
+}
+
; CHECK: attributes [[NUW_RO]] = { nounwind readonly }
diff --git a/test/Transforms/InstCombine/pr19420.ll b/test/Transforms/InstCombine/pr19420.ll
new file mode 100644
index 000000000000..23fa0a409745
--- /dev/null
+++ b/test/Transforms/InstCombine/pr19420.ll
@@ -0,0 +1,67 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateSHL
+; CHECK: mul <4 x i32> %in, <i32 0, i32 -32, i32 0, i32 -32>
+; CHECK-NEXT: ret
+define <4 x i32> @test_FoldShiftByConstant_CreateSHL(<4 x i32> %in) {
+ %mul.i = mul <4 x i32> %in, <i32 0, i32 -1, i32 0, i32 -1>
+ %vshl_n = shl <4 x i32> %mul.i, <i32 5, i32 5, i32 5, i32 5>
+ ret <4 x i32> %vshl_n
+}
+
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateSHL2
+; CHECK: mul <8 x i16> %in, <i16 0, i16 -32, i16 0, i16 -32, i16 0, i16 -32, i16 0, i16 -32>
+; CHECK-NEXT: ret
+define <8 x i16> @test_FoldShiftByConstant_CreateSHL2(<8 x i16> %in) {
+ %mul.i = mul <8 x i16> %in, <i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1>
+ %vshl_n = shl <8 x i16> %mul.i, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+ ret <8 x i16> %vshl_n
+}
+
+; CHECK-LABEL: @test_FoldShiftByConstant_CreateAnd
+; CHECK: mul <16 x i8> %in0, <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
+; CHECK-NEXT: and <16 x i8> %vsra_n2, <i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32>
+; CHECK-NEXT: ret
+define <16 x i8> @test_FoldShiftByConstant_CreateAnd(<16 x i8> %in0) {
+ %vsra_n = ashr <16 x i8> %in0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ %tmp = add <16 x i8> %in0, %vsra_n
+ %vshl_n = shl <16 x i8> %tmp, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+ ret <16 x i8> %vshl_n
+}
+
+
+define i32 @bar(i32 %x, i32 %y) {
+ %a = lshr i32 %x, 4
+ %b = add i32 %a, %y
+ %c = shl i32 %b, 4
+ ret i32 %c
+}
+
+define <2 x i32> @bar_v2i32(<2 x i32> %x, <2 x i32> %y) {
+ %a = lshr <2 x i32> %x, <i32 5, i32 5>
+ %b = add <2 x i32> %a, %y
+ %c = shl <2 x i32> %b, <i32 5, i32 5>
+ ret <2 x i32> %c
+}
+
+
+
+
+define i32 @foo(i32 %x, i32 %y) {
+ %a = lshr i32 %x, 4
+ %b = and i32 %a, 8
+ %c = add i32 %b, %y
+ %d = shl i32 %c, 4
+ ret i32 %d
+}
+
+define <2 x i32> @foo_v2i32(<2 x i32> %x, <2 x i32> %y) {
+ %a = lshr <2 x i32> %x, <i32 4, i32 4>
+ %b = and <2 x i32> %a, <i32 8, i32 8>
+ %c = add <2 x i32> %b, %y
+ %d = shl <2 x i32> %c, <i32 4, i32 4>
+ ret <2 x i32> %d
+}
+
+
+
diff --git a/test/Transforms/InstCombine/pr20059.ll b/test/Transforms/InstCombine/pr20059.ll
new file mode 100644
index 000000000000..0ef315936ff2
--- /dev/null
+++ b/test/Transforms/InstCombine/pr20059.ll
@@ -0,0 +1,16 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+; In PR20059 ( http://llvm.org/pr20059 ), shufflevector operations are reordered/removed
+; for an srem operation. This is not a valid optimization because it may cause a trap
+; on div-by-zero.
+
+; CHECK-LABEL: @do_not_reorder
+; CHECK: %splat1 = shufflevector <4 x i32> %p1, <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: %splat2 = shufflevector <4 x i32> %p2, <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK-NEXT: %retval = srem <4 x i32> %splat1, %splat2
+define <4 x i32> @do_not_reorder(<4 x i32> %p1, <4 x i32> %p2) {
+ %splat1 = shufflevector <4 x i32> %p1, <4 x i32> undef, <4 x i32> zeroinitializer
+ %splat2 = shufflevector <4 x i32> %p2, <4 x i32> undef, <4 x i32> zeroinitializer
+ %retval = srem <4 x i32> %splat1, %splat2
+ ret <4 x i32> %retval
+}
diff --git a/test/Transforms/InstCombine/pr20079.ll b/test/Transforms/InstCombine/pr20079.ll
new file mode 100644
index 000000000000..ce9c4deb06a0
--- /dev/null
+++ b/test/Transforms/InstCombine/pr20079.ll
@@ -0,0 +1,9 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+@b = internal global [1 x i32] zeroinitializer, align 4
+@c = internal global i32 0, align 4
+
+; CHECK-LABEL: @fn1
+; CHECK-NEXT: ret i32 0
+define i32 @fn1(i32 %a) {
+ ret i32 0
+}
diff --git a/test/Transforms/InstCombine/printf-1.ll b/test/Transforms/InstCombine/printf-1.ll
index c98ddd55df10..483bc7a6b079 100644
--- a/test/Transforms/InstCombine/printf-1.ll
+++ b/test/Transforms/InstCombine/printf-1.ll
@@ -87,18 +87,18 @@ define void @test_simplify7() {
; CHECK-IPRINTF-LABEL: @test_simplify7(
%fmt = getelementptr [3 x i8]* @percent_d, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt, i32 187)
-; CHECK-NEXT-IPRINTF: call i32 (i8*, ...)* @iprintf(i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
+; CHECK-IPRINTF-NEXT: call i32 (i8*, ...)* @iprintf(i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
ret void
-; CHECK-NEXT-IPRINTF: ret void
+; CHECK-IPRINTF-NEXT: ret void
}
define void @test_no_simplify1() {
; CHECK-IPRINTF-LABEL: @test_no_simplify1(
%fmt = getelementptr [3 x i8]* @percent_f, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt, double 1.87)
-; CHECK-NEXT-IPRINTF: call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
+; CHECK-IPRINTF-NEXT: call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
ret void
-; CHECK-NEXT-IPRINTF: ret void
+; CHECK-IPRINTF-NEXT: ret void
}
define void @test_no_simplify2(i8* %fmt, double %d) {
diff --git a/test/Transforms/InstCombine/r600-intrinsics.ll b/test/Transforms/InstCombine/r600-intrinsics.ll
new file mode 100644
index 000000000000..1db6b0d28bf5
--- /dev/null
+++ b/test/Transforms/InstCombine/r600-intrinsics.ll
@@ -0,0 +1,47 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+declare float @llvm.AMDGPU.rcp.f32(float) nounwind readnone
+declare double @llvm.AMDGPU.rcp.f64(double) nounwind readnone
+
+; CHECK-LABEL: @test_constant_fold_rcp_f32_1
+; CHECK-NEXT: ret float 1.000000e+00
+define float @test_constant_fold_rcp_f32_1() nounwind {
+ %val = call float @llvm.AMDGPU.rcp.f32(float 1.0) nounwind readnone
+ ret float %val
+}
+
+; CHECK-LABEL: @test_constant_fold_rcp_f64_1
+; CHECK-NEXT: ret double 1.000000e+00
+define double @test_constant_fold_rcp_f64_1() nounwind {
+ %val = call double @llvm.AMDGPU.rcp.f64(double 1.0) nounwind readnone
+ ret double %val
+}
+
+; CHECK-LABEL: @test_constant_fold_rcp_f32_half
+; CHECK-NEXT: ret float 2.000000e+00
+define float @test_constant_fold_rcp_f32_half() nounwind {
+ %val = call float @llvm.AMDGPU.rcp.f32(float 0.5) nounwind readnone
+ ret float %val
+}
+
+; CHECK-LABEL: @test_constant_fold_rcp_f64_half
+; CHECK-NEXT: ret double 2.000000e+00
+define double @test_constant_fold_rcp_f64_half() nounwind {
+ %val = call double @llvm.AMDGPU.rcp.f64(double 0.5) nounwind readnone
+ ret double %val
+}
+
+; CHECK-LABEL: @test_constant_fold_rcp_f32_43
+; CHECK-NEXT: call float @llvm.AMDGPU.rcp.f32(float 4.300000e+01)
+define float @test_constant_fold_rcp_f32_43() nounwind {
+ %val = call float @llvm.AMDGPU.rcp.f32(float 4.300000e+01) nounwind readnone
+ ret float %val
+}
+
+; CHECK-LABEL: @test_constant_fold_rcp_f64_43
+; CHECK-NEXT: call double @llvm.AMDGPU.rcp.f64(double 4.300000e+01)
+define double @test_constant_fold_rcp_f64_43() nounwind {
+ %val = call double @llvm.AMDGPU.rcp.f64(double 4.300000e+01) nounwind readnone
+ ret double %val
+}
+
diff --git a/test/Transforms/InstCombine/rem.ll b/test/Transforms/InstCombine/rem.ll
index 22fd90bf7a75..0595a67393a6 100644
--- a/test/Transforms/InstCombine/rem.ll
+++ b/test/Transforms/InstCombine/rem.ll
@@ -127,7 +127,7 @@ define i64 @test14(i64 %x, i32 %y) {
; CHECK-LABEL: @test14(
; CHECK-NEXT: [[SHL:%.*]] = shl i32 1, %y
; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[SHL]] to i64
-; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ZEXT]], -1
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[ZEXT]], -1
; CHECK-NEXT: [[AND:%.*]] = and i64 [[ADD]], %x
; CHECK-NEXT: ret i64 [[AND]]
%shl = shl i32 1, %y
@@ -204,3 +204,12 @@ define i32 @test19(i32 %x, i32 %y) {
%E = urem i32 %y, %D
ret i32 %E
}
+
+define <2 x i64> @test20(<2 x i64> %X, <2 x i1> %C) {
+; CHECK-LABEL: @test20(
+; CHECK-NEXT: select <2 x i1> %C, <2 x i64> <i64 1, i64 2>, <2 x i64> zeroinitializer
+; CHECK-NEXT: ret <2 x i64>
+ %V = select <2 x i1> %C, <2 x i64> <i64 1, i64 2>, <2 x i64> <i64 8, i64 9>
+ %R = urem <2 x i64> %V, <i64 2, i64 3>
+ ret <2 x i64> %R
+}
diff --git a/test/Transforms/InstCombine/round.ll b/test/Transforms/InstCombine/round.ll
new file mode 100644
index 000000000000..ecc62dda36ba
--- /dev/null
+++ b/test/Transforms/InstCombine/round.ll
@@ -0,0 +1,90 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+declare float @llvm.round.f32(float) #0
+declare double @llvm.round.f64(double) #0
+
+; CHECK-LABEL: @constant_fold_round_f32_01
+; CHECK-NEXT: ret float 1.000000e+00
+define float @constant_fold_round_f32_01() #0 {
+ %x = call float @llvm.round.f32(float 1.25) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f32_02
+; CHECK-NEXT: ret float -1.000000e+00
+define float @constant_fold_round_f32_02() #0 {
+ %x = call float @llvm.round.f32(float -1.25) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f32_03
+; CHECK-NEXT: ret float 2.000000e+00
+define float @constant_fold_round_f32_03() #0 {
+ %x = call float @llvm.round.f32(float 1.5) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f32_04
+; CHECK-NEXT: ret float -2.000000e+00
+define float @constant_fold_round_f32_04() #0 {
+ %x = call float @llvm.round.f32(float -1.5) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f32_05
+; CHECK-NEXT: ret float 3.000000e+00
+define float @constant_fold_round_f32_05() #0 {
+ %x = call float @llvm.round.f32(float 2.75) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f32_06
+; CHECK-NEXT: ret float -3.000000e+00
+define float @constant_fold_round_f32_06() #0 {
+ %x = call float @llvm.round.f32(float -2.75) #0
+ ret float %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f64_01
+; CHECK-NEXT: ret double 1.000000e+00
+define double @constant_fold_round_f64_01() #0 {
+ %x = call double @llvm.round.f64(double 1.3) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f64_02
+; CHECK-NEXT: ret double -1.000000e+00
+define double @constant_fold_round_f64_02() #0 {
+ %x = call double @llvm.round.f64(double -1.3) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f64_03
+; CHECK-NEXT: ret double 2.000000e+00
+define double @constant_fold_round_f64_03() #0 {
+ %x = call double @llvm.round.f64(double 1.5) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f64_04
+; CHECK-NEXT: ret double -2.000000e+00
+define double @constant_fold_round_f64_04() #0 {
+ %x = call double @llvm.round.f64(double -1.5) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f64_05
+; CHECK-NEXT: ret double 3.000000e+00
+define double @constant_fold_round_f64_05() #0 {
+ %x = call double @llvm.round.f64(double 2.7) #0
+ ret double %x
+}
+
+; CHECK-LABEL: @constant_fold_round_f64_06
+; CHECK-NEXT: ret double -3.000000e+00
+define double @constant_fold_round_f64_06() #0 {
+ %x = call double @llvm.round.f64(double -2.7) #0
+ ret double %x
+}
+
+attributes #0 = { nounwind readnone }
diff --git a/test/Transforms/InstCombine/select-2.ll b/test/Transforms/InstCombine/select-2.ll
index 5b9deb4515a8..832d958c5f34 100644
--- a/test/Transforms/InstCombine/select-2.ll
+++ b/test/Transforms/InstCombine/select-2.ll
@@ -19,3 +19,13 @@ define i32 @t2(i32 %c, i32 %x) nounwind {
%t3 = select i1 %t1, i32 %t2, i32 %x
ret i32 %t3
}
+
+define float @t3(float %x, float %y) nounwind {
+ %t1 = fcmp ogt float %x, %y
+ %t2 = select i1 %t1, float %x, float 1.0
+ %t3 = fadd fast float %t2, 1.0
+ ret float %t3
+; CHECK-LABEL: @t3(
+; CHECK: fadd fast
+; CHECK: select
+}
diff --git a/test/Transforms/InstCombine/select-select.ll b/test/Transforms/InstCombine/select-select.ll
new file mode 100644
index 000000000000..65820acf07b8
--- /dev/null
+++ b/test/Transforms/InstCombine/select-select.ll
@@ -0,0 +1,24 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+; CHECK: @foo1
+define float @foo1(float %a) #0 {
+; CHECK-NOT: xor
+ %b = fcmp ogt float %a, 0.000000e+00
+ %c = select i1 %b, float %a, float 0.000000e+00
+ %d = fcmp olt float %c, 1.000000e+00
+ %f = select i1 %d, float %c, float 1.000000e+00
+ ret float %f
+}
+
+; CHECK: @foo2
+define float @foo2(float %a) #0 {
+; CHECK-NOT: xor
+ %b = fcmp ogt float %a, 0.000000e+00
+ %c = select i1 %b, float %a, float 0.000000e+00
+ %d = fcmp olt float %c, 1.000000e+00
+ %e = select i1 %b, float %a, float 0.000000e+00
+ %f = select i1 %d, float %e, float 1.000000e+00
+ ret float %f
+}
+
+attributes #0 = { nounwind readnone ssp uwtable }
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
index 1458bde82124..d625f3b1b33d 100644
--- a/test/Transforms/InstCombine/select.ll
+++ b/test/Transforms/InstCombine/select.ll
@@ -281,7 +281,7 @@ define i32 @test15i(i32 %X) {
; CHECK-NEXT: %t1 = shl i32 %X, 8
; CHECK-NEXT: %1 = and i32 %t1, 512
; CHECK-NEXT: %2 = xor i32 %1, 512
-; CHECK-NEXT: %3 = add i32 %2, 577
+; CHECK-NEXT: %3 = add nuw nsw i32 %2, 577
; CHECK-NEXT: ret i32 %3
}
@@ -294,7 +294,7 @@ define i32 @test15j(i32 %X) {
; CHECK-LABEL: @test15j(
; CHECK-NEXT: %t1 = shl i32 %X, 8
; CHECK-NEXT: %1 = and i32 %t1, 512
-; CHECK-NEXT: %2 = add i32 %1, 577
+; CHECK-NEXT: %2 = add nuw nsw i32 %1, 577
; CHECK-NEXT: ret i32 %2
}
@@ -521,7 +521,7 @@ define i32 @test35(i32 %x) {
; CHECK-LABEL: @test35(
; CHECK: ashr i32 %x, 31
; CHECK: and i32 {{.*}}, 40
-; CHECK: add i32 {{.*}}, 60
+; CHECK: add nuw nsw i32 {{.*}}, 60
; CHECK: ret
}
@@ -532,7 +532,7 @@ define i32 @test36(i32 %x) {
; CHECK-LABEL: @test36(
; CHECK: ashr i32 %x, 31
; CHECK: and i32 {{.*}}, -40
-; CHECK: add i32 {{.*}}, 100
+; CHECK: add nsw i32 {{.*}}, 100
; CHECK: ret
}
@@ -996,6 +996,111 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_vector_of_2s(i32 %x, <2 x i32> %y) {
ret <2 x i32> %select
}
+; CHECK-LABEL: @select_icmp_and_8_eq_0_or_8(
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 %x, 8
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_and_8_eq_0_or_8(i32 %x) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %or = or i32 %x, 8
+ %or.x = select i1 %cmp, i32 %or, i32 %x
+ ret i32 %or.x
+}
+
+; CHECK-LABEL: @select_icmp_and_8_ne_0_xor_8(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, -9
+; CHECK-NEXT: ret i32 [[AND]]
+define i32 @select_icmp_and_8_ne_0_xor_8(i32 %x) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %xor = xor i32 %x, 8
+ %x.xor = select i1 %cmp, i32 %x, i32 %xor
+ ret i32 %x.xor
+}
+
+; CHECK-LABEL: @select_icmp_and_8_eq_0_xor_8(
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 %x, 8
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_and_8_eq_0_xor_8(i32 %x) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %xor = xor i32 %x, 8
+ %xor.x = select i1 %cmp, i32 %xor, i32 %x
+ ret i32 %xor.x
+}
+
+; CHECK-LABEL: @select_icmp_and_8_ne_0_and_not_8(
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, -9
+; CHECK-NEXT: ret i32 [[AND]]
+define i32 @select_icmp_and_8_ne_0_and_not_8(i32 %x) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %and1 = and i32 %x, -9
+ %x.and1 = select i1 %cmp, i32 %x, i32 %and1
+ ret i32 %x.and1
+}
+
+; CHECK-LABEL: @select_icmp_and_8_eq_0_and_not_8(
+; CHECK-NEXT: ret i32 %x
+define i32 @select_icmp_and_8_eq_0_and_not_8(i32 %x) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %and1 = and i32 %x, -9
+ %and1.x = select i1 %cmp, i32 %and1, i32 %x
+ ret i32 %and1.x
+}
+
+; CHECK-LABEL: @select_icmp_x_and_8_eq_0_y_xor_8(
+; CHECK: select i1 %cmp, i64 %y, i64 %xor
+define i64 @select_icmp_x_and_8_eq_0_y_xor_8(i32 %x, i64 %y) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %xor = xor i64 %y, 8
+ %y.xor = select i1 %cmp, i64 %y, i64 %xor
+ ret i64 %y.xor
+}
+
+; CHECK-LABEL: @select_icmp_x_and_8_eq_0_y_and_not_8(
+; CHECK: select i1 %cmp, i64 %y, i64 %and1
+define i64 @select_icmp_x_and_8_eq_0_y_and_not_8(i32 %x, i64 %y) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %and1 = and i64 %y, -9
+ %y.and1 = select i1 %cmp, i64 %y, i64 %and1
+ ret i64 %y.and1
+}
+
+; CHECK-LABEL: @select_icmp_x_and_8_ne_0_y_xor_8(
+; CHECK: select i1 %cmp, i64 %xor, i64 %y
+define i64 @select_icmp_x_and_8_ne_0_y_xor_8(i32 %x, i64 %y) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %xor = xor i64 %y, 8
+ %xor.y = select i1 %cmp, i64 %xor, i64 %y
+ ret i64 %xor.y
+}
+
+; CHECK-LABEL: @select_icmp_x_and_8_ne_0_y_and_not_8(
+; CHECK: select i1 %cmp, i64 %and1, i64 %y
+define i64 @select_icmp_x_and_8_ne_0_y_and_not_8(i32 %x, i64 %y) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %and1 = and i64 %y, -9
+ %and1.y = select i1 %cmp, i64 %and1, i64 %y
+ ret i64 %and1.y
+}
+
+; CHECK-LABEL: @select_icmp_x_and_8_ne_0_y_or_8(
+; CHECK: xor i64 %1, 8
+; CHECK: or i64 %2, %y
+define i64 @select_icmp_x_and_8_ne_0_y_or_8(i32 %x, i64 %y) {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %or = or i64 %y, 8
+ %or.y = select i1 %cmp, i64 %or, i64 %y
+ ret i64 %or.y
+}
+
define i32 @test65(i64 %x) {
%1 = and i64 %x, 16
%2 = icmp ne i64 %1, 0
@@ -1031,3 +1136,103 @@ define i32 @test67(i16 %x) {
; CHECK: lshr exact i32 %2, 1
; CHECK: xor i32 %3, 42
}
+
+; SMIN(SMIN(X, 11), 92) -> SMIN(X, 11)
+define i32 @test68(i32 %x) {
+entry:
+ %cmp = icmp slt i32 11, %x
+ %cond = select i1 %cmp, i32 11, i32 %x
+ %cmp3 = icmp slt i32 92, %cond
+ %retval = select i1 %cmp3, i32 92, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test68(
+; CHECK: ret i32 %cond
+}
+
+; MIN(MIN(X, 24), 83) -> MIN(X, 24)
+define i32 @test69(i32 %x) {
+entry:
+ %cmp = icmp ult i32 24, %x
+ %cond = select i1 %cmp, i32 24, i32 %x
+ %cmp3 = icmp ult i32 83, %cond
+ %retval = select i1 %cmp3, i32 83, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test69(
+; CHECK: ret i32 %cond
+}
+
+; SMAX(SMAX(X, 75), 36) -> SMAX(X, 75)
+define i32 @test70(i32 %x) {
+entry:
+ %cmp = icmp slt i32 %x, 75
+ %cond = select i1 %cmp, i32 75, i32 %x
+ %cmp3 = icmp slt i32 %cond, 36
+ %retval = select i1 %cmp3, i32 36, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test70(
+; CHECK: ret i32 %cond
+}
+
+; MAX(MAX(X, 68), 47) -> MAX(X, 68)
+define i32 @test71(i32 %x) {
+entry:
+ %cmp = icmp ult i32 %x, 68
+ %cond = select i1 %cmp, i32 68, i32 %x
+ %cmp3 = icmp ult i32 %cond, 47
+ %retval = select i1 %cmp3, i32 47, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test71(
+; CHECK: ret i32 %cond
+}
+
+; SMIN(SMIN(X, 92), 11) -> SMIN(X, 11)
+define i32 @test72(i32 %x) {
+ %cmp = icmp sgt i32 %x, 92
+ %cond = select i1 %cmp, i32 92, i32 %x
+ %cmp3 = icmp sgt i32 %cond, 11
+ %retval = select i1 %cmp3, i32 11, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test72(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp sgt i32 %x, 11
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 11, i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+; MIN(MIN(X, 83), 24) -> MIN(X, 24)
+define i32 @test73(i32 %x) {
+ %cmp = icmp ugt i32 %x, 83
+ %cond = select i1 %cmp, i32 83, i32 %x
+ %cmp3 = icmp ugt i32 %cond, 24
+ %retval = select i1 %cmp3, i32 24, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test73(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ugt i32 %x, 24
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 24, i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+; SMAX(SMAX(X, 36), 75) -> SMAX(X, 75)
+define i32 @test74(i32 %x) {
+ %cmp = icmp slt i32 %x, 36
+ %cond = select i1 %cmp, i32 36, i32 %x
+ %cmp3 = icmp slt i32 %cond, 75
+ %retval = select i1 %cmp3, i32 75, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test74(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp slt i32 %x, 75
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 75, i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
+
+; MAX(MAX(X, 47), 68) -> MAX(X, 68)
+define i32 @test75(i32 %x) {
+ %cmp = icmp ult i32 %x, 47
+ %cond = select i1 %cmp, i32 47, i32 %x
+ %cmp3 = icmp ult i32 %cond, 68
+ %retval = select i1 %cmp3, i32 68, i32 %cond
+ ret i32 %retval
+; CHECK-LABEL: @test75(
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ult i32 %x, 68
+; CHECK-NEXT: [[SEL:%[a-z0-9]+]] = select i1 [[CMP]], i32 68, i32 %x
+; CHECK-NEXT: ret i32 [[SEL]]
+}
diff --git a/test/Transforms/InstCombine/sext.ll b/test/Transforms/InstCombine/sext.ll
index b8dfe2257b18..f04afcc747b8 100644
--- a/test/Transforms/InstCombine/sext.ll
+++ b/test/Transforms/InstCombine/sext.ll
@@ -145,7 +145,7 @@ define i32 @test13(i32 %x) nounwind {
; CHECK-LABEL: @test13(
; CHECK-NEXT: %and = lshr i32 %x, 3
; CHECK-NEXT: %1 = and i32 %and, 1
-; CHECK-NEXT: %sext = add i32 %1, -1
+; CHECK-NEXT: %sext = add nsw i32 %1, -1
; CHECK-NEXT: ret i32 %sext
}
@@ -157,7 +157,7 @@ define i32 @test14(i16 %x) nounwind {
; CHECK-LABEL: @test14(
; CHECK-NEXT: %and = lshr i16 %x, 4
; CHECK-NEXT: %1 = and i16 %and, 1
-; CHECK-NEXT: %sext = add i16 %1, -1
+; CHECK-NEXT: %sext = add nsw i16 %1, -1
; CHECK-NEXT: %ext = sext i16 %sext to i32
; CHECK-NEXT: ret i32 %ext
}
diff --git a/test/Transforms/InstCombine/shift.ll b/test/Transforms/InstCombine/shift.ll
index b1082f06ef74..5586bb652783 100644
--- a/test/Transforms/InstCombine/shift.ll
+++ b/test/Transforms/InstCombine/shift.ll
@@ -36,17 +36,52 @@ define i32 @test4(i8 %A) {
define i32 @test5(i32 %A) {
; CHECK-LABEL: @test5(
; CHECK: ret i32 undef
- %B = lshr i32 %A, 32 ;; shift all bits out
+ %B = lshr i32 %A, 32 ;; shift all bits out
ret i32 %B
}
+define <4 x i32> @test5_splat_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5_splat_vector(
+; CHECK: ret <4 x i32> undef
+ %B = lshr <4 x i32> %A, <i32 32, i32 32, i32 32, i32 32> ;; shift all bits out
+ ret <4 x i32> %B
+}
+
+define <4 x i32> @test5_zero_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5_zero_vector(
+; CHECK-NEXT: ret <4 x i32> %A
+ %B = lshr <4 x i32> %A, zeroinitializer
+ ret <4 x i32> %B
+}
+
+define <4 x i32> @test5_non_splat_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5_non_splat_vector(
+; CHECK-NOT: ret <4 x i32> undef
+ %B = shl <4 x i32> %A, <i32 32, i32 1, i32 2, i32 3>
+ ret <4 x i32> %B
+}
+
define i32 @test5a(i32 %A) {
; CHECK-LABEL: @test5a(
; CHECK: ret i32 undef
- %B = shl i32 %A, 32 ;; shift all bits out
+ %B = shl i32 %A, 32 ;; shift all bits out
ret i32 %B
}
+define <4 x i32> @test5a_splat_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5a_splat_vector(
+; CHECK: ret <4 x i32> undef
+ %B = shl <4 x i32> %A, <i32 32, i32 32, i32 32, i32 32> ;; shift all bits out
+ ret <4 x i32> %B
+}
+
+define <4 x i32> @test5a_non_splat_vector(<4 x i32> %A) {
+; CHECK-LABEL: @test5a_non_splat_vector(
+; CHECK-NOT: ret <4 x i32> undef
+ %B = shl <4 x i32> %A, <i32 32, i32 1, i32 2, i32 3>
+ ret <4 x i32> %B
+}
+
define i32 @test5b() {
; CHECK-LABEL: @test5b(
; CHECK: ret i32 -1
@@ -82,7 +117,7 @@ define i32 @test6a(i32 %A) {
define i32 @test7(i8 %A) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: ret i32 -1
- %shift.upgrd.3 = zext i8 %A to i32
+ %shift.upgrd.3 = zext i8 %A to i32
%B = ashr i32 -1, %shift.upgrd.3 ;; Always equal to -1
ret i32 %B
}
@@ -232,7 +267,7 @@ define i1 @test16(i32 %X) {
; CHECK-NEXT: and i32 %X, 16
; CHECK-NEXT: icmp ne i32
; CHECK-NEXT: ret i1
- %tmp.3 = ashr i32 %X, 4
+ %tmp.3 = ashr i32 %X, 4
%tmp.6 = and i32 %tmp.3, 1
%tmp.7 = icmp ne i32 %tmp.6, 0
ret i1 %tmp.7
@@ -344,6 +379,20 @@ define i32 @test25(i32 %tmp.2, i32 %AA) {
ret i32 %tmp.6
}
+define <2 x i32> @test25_vector(<2 x i32> %tmp.2, <2 x i32> %AA) {
+; CHECK-LABEL: @test25_vector(
+; CHECK: %tmp.3 = lshr <2 x i32> %tmp.2, <i32 17, i32 17>
+; CHECK-NEXT: shl <2 x i32> %tmp.3, <i32 17, i32 17>
+; CHECK-NEXT: add <2 x i32> %tmp.51, %AA
+; CHECK-NEXT: and <2 x i32> %x2, <i32 -131072, i32 -131072>
+; CHECK-NEXT: ret <2 x i32>
+ %x = lshr <2 x i32> %AA, <i32 17, i32 17>
+ %tmp.3 = lshr <2 x i32> %tmp.2, <i32 17, i32 17>
+ %tmp.5 = add <2 x i32> %tmp.3, %x
+ %tmp.6 = shl <2 x i32> %tmp.5, <i32 17, i32 17>
+ ret <2 x i32> %tmp.6
+}
+
;; handle casts between shifts.
define i32 @test26(i32 %A) {
; CHECK-LABEL: @test26(
@@ -365,12 +414,12 @@ define i1 @test27(i32 %x) nounwind {
%z = trunc i32 %y to i1
ret i1 %z
}
-
+
define i8 @test28(i8 %x) {
entry:
; CHECK-LABEL: @test28(
; CHECK: icmp slt i8 %x, 0
-; CHECK-NEXT: br i1
+; CHECK-NEXT: br i1
%tmp1 = lshr i8 %x, 7
%cond1 = icmp ne i8 %tmp1, 0
br i1 %cond1, label %bb1, label %bb2
@@ -476,7 +525,7 @@ entry:
%ins = or i128 %tmp23, %tmp27
%tmp45 = lshr i128 %ins, 64
ret i128 %tmp45
-
+
; CHECK-LABEL: @test36(
; CHECK: %tmp231 = or i128 %B, %A
; CHECK: %ins = and i128 %tmp231, 18446744073709551615
@@ -492,7 +541,7 @@ entry:
%tmp45 = lshr i128 %ins, 64
%tmp46 = trunc i128 %tmp45 to i64
ret i64 %tmp46
-
+
; CHECK-LABEL: @test37(
; CHECK: %tmp23 = shl nuw nsw i128 %tmp22, 32
; CHECK: %ins = or i128 %tmp23, %A
@@ -780,3 +829,32 @@ bb11: ; preds = %bb8
bb12: ; preds = %bb11, %bb8, %bb
ret void
}
+
+define i32 @test64(i32 %a) {
+; CHECK-LABEL: @test64(
+; CHECK-NEXT: ret i32 undef
+ %b = ashr i32 %a, 32 ; shift all bits out
+ ret i32 %b
+}
+
+define <4 x i32> @test64_splat_vector(<4 x i32> %a) {
+; CHECK-LABEL: @test64_splat_vector
+; CHECK-NEXT: ret <4 x i32> undef
+ %b = ashr <4 x i32> %a, <i32 32, i32 32, i32 32, i32 32> ; shift all bits out
+ ret <4 x i32> %b
+}
+
+define <4 x i32> @test64_non_splat_vector(<4 x i32> %a) {
+; CHECK-LABEL: @test64_non_splat_vector
+; CHECK-NOT: ret <4 x i32> undef
+ %b = ashr <4 x i32> %a, <i32 32, i32 0, i32 1, i32 2> ; shift all bits out
+ ret <4 x i32> %b
+}
+
+define <2 x i65> @test_65(<2 x i64> %t) {
+; CHECK-LABEL: @test_65
+ %a = zext <2 x i64> %t to <2 x i65>
+ %sext = shl <2 x i65> %a, <i65 33, i65 33>
+ %b = ashr <2 x i65> %sext, <i65 33, i65 33>
+ ret <2 x i65> %b
+}
diff --git a/test/Transforms/InstCombine/sign-test-and-or.ll b/test/Transforms/InstCombine/sign-test-and-or.ll
index 95ed9b976ba3..aa23d933b09e 100644
--- a/test/Transforms/InstCombine/sign-test-and-or.ll
+++ b/test/Transforms/InstCombine/sign-test-and-or.ll
@@ -177,3 +177,41 @@ if.then:
if.end:
ret void
}
+
+define void @test10(i32 %a) nounwind {
+ %1 = and i32 %a, 2
+ %2 = icmp eq i32 %1, 0
+ %3 = icmp ult i32 %a, 4
+ %or.cond = and i1 %2, %3
+ br i1 %or.cond, label %if.then, label %if.end
+
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: %1 = icmp ult i32 %a, 2
+; CHECK-NEXT: br i1 %1, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo() nounwind
+ ret void
+
+if.end:
+ ret void
+}
+
+define void @test11(i32 %a) nounwind {
+ %1 = and i32 %a, 2
+ %2 = icmp ne i32 %1, 0
+ %3 = icmp ugt i32 %a, 3
+ %or.cond = or i1 %2, %3
+ br i1 %or.cond, label %if.then, label %if.end
+
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: %1 = icmp ugt i32 %a, 1
+; CHECK-NEXT: br i1 %1, label %if.then, label %if.end
+
+if.then:
+ tail call void @foo() nounwind
+ ret void
+
+if.end:
+ ret void
+}
diff --git a/test/Transforms/InstCombine/sincospi.ll b/test/Transforms/InstCombine/sincospi.ll
index 0d1a6027a00a..739827f1962a 100644
--- a/test/Transforms/InstCombine/sincospi.ll
+++ b/test/Transforms/InstCombine/sincospi.ll
@@ -1,5 +1,6 @@
; RUN: opt -instcombine -S < %s -mtriple=x86_64-apple-macosx10.9 | FileCheck %s --check-prefix=CHECK-FLOAT-IN-VEC
; RUN: opt -instcombine -S < %s -mtriple=arm-apple-ios7.0 | FileCheck %s
+; RUN: opt -instcombine -S < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
; RUN: opt -instcombine -S < %s -mtriple=x86_64-apple-macosx10.8 | FileCheck %s --check-prefix=CHECK-NO-SINCOS
; RUN: opt -instcombine -S < %s -mtriple=arm-apple-ios6.0 | FileCheck %s --check-prefix=CHECK-NO-SINCOS
; RUN: opt -instcombine -S < %s -mtriple=x86_64-none-linux-gnu | FileCheck %s --check-prefix=CHECK-NO-SINCOS
@@ -23,12 +24,12 @@ define float @test_instbased_f32() {
%res = fadd float %sin, %cos
ret float %res
; CHECK-FLOAT-IN-VEC: [[VAL:%[a-z0-9]+]] = load float* @var32
-; CHECK-FLOAT-IN-VEC: [[SINCOS:%[a-z0-9]+]] = call <2 x float> @__sincospi_stretf(float [[VAL]])
+; CHECK-FLOAT-IN-VEC: [[SINCOS:%[a-z0-9]+]] = call <2 x float> @__sincospif_stret(float [[VAL]])
; CHECK-FLOAT-IN-VEC: extractelement <2 x float> [[SINCOS]], i32 0
; CHECK-FLOAT-IN-VEC: extractelement <2 x float> [[SINCOS]], i32 1
; CHECK: [[VAL:%[a-z0-9]+]] = load float* @var32
-; CHECK: [[SINCOS:%[a-z0-9]+]] = call { float, float } @__sincospi_stretf(float [[VAL]])
+; CHECK: [[SINCOS:%[a-z0-9]+]] = call { float, float } @__sincospif_stret(float [[VAL]])
; CHECK: extractvalue { float, float } [[SINCOS]], 0
; CHECK: extractvalue { float, float } [[SINCOS]], 1
@@ -41,11 +42,11 @@ define float @test_constant_f32() {
%cos = call float @__cospif(float 1.0) #0
%res = fadd float %sin, %cos
ret float %res
-; CHECK-FLOAT-IN-VEC: [[SINCOS:%[a-z0-9]+]] = call <2 x float> @__sincospi_stretf(float 1.000000e+00)
+; CHECK-FLOAT-IN-VEC: [[SINCOS:%[a-z0-9]+]] = call <2 x float> @__sincospif_stret(float 1.000000e+00)
; CHECK-FLOAT-IN-VEC: extractelement <2 x float> [[SINCOS]], i32 0
; CHECK-FLOAT-IN-VEC: extractelement <2 x float> [[SINCOS]], i32 1
-; CHECK: [[SINCOS:%[a-z0-9]+]] = call { float, float } @__sincospi_stretf(float 1.000000e+00)
+; CHECK: [[SINCOS:%[a-z0-9]+]] = call { float, float } @__sincospif_stret(float 1.000000e+00)
; CHECK: extractvalue { float, float } [[SINCOS]], 0
; CHECK: extractvalue { float, float } [[SINCOS]], 1
diff --git a/test/Transforms/InstCombine/sprintf-1.ll b/test/Transforms/InstCombine/sprintf-1.ll
index 78dd7aa7df47..afa38f35ae83 100644
--- a/test/Transforms/InstCombine/sprintf-1.ll
+++ b/test/Transforms/InstCombine/sprintf-1.ll
@@ -77,18 +77,18 @@ define void @test_simplify6(i8* %dst) {
; CHECK-IPRINTF-LABEL: @test_simplify6(
%fmt = getelementptr [3 x i8]* @percent_d, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt, i32 187)
-; CHECK-NEXT-IPRINTF: call i32 (i8*, i8*, ...)* @siprintf(i8* %dst, i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
+; CHECK-IPRINTF-NEXT: call i32 (i8*, i8*, ...)* @siprintf(i8* %dst, i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
ret void
-; CHECK-NEXT-IPRINTF: ret void
+; CHECK-IPRINTF-NEXT: ret void
}
define void @test_no_simplify1(i8* %dst) {
; CHECK-IPRINTF-LABEL: @test_no_simplify1(
%fmt = getelementptr [3 x i8]* @percent_f, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt, double 1.87)
-; CHECK-NEXT-IPRINTF: call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
+; CHECK-IPRINTF-NEXT: call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
ret void
-; CHECK-NEXT-IPRINTF: ret void
+; CHECK-IPRINTF-NEXT: ret void
}
define void @test_no_simplify2(i8* %dst, i8* %fmt, double %d) {
diff --git a/test/Transforms/InstCombine/strchr-1.ll b/test/Transforms/InstCombine/strchr-1.ll
index d2c989462156..66b3e2e51c7a 100644
--- a/test/Transforms/InstCombine/strchr-1.ll
+++ b/test/Transforms/InstCombine/strchr-1.ll
@@ -63,3 +63,16 @@ define void @test_simplify5() {
store i8* %dst, i8** @chp
ret void
}
+
+; Check transformation strchr(p, 0) -> p + strlen(p)
+define void @test_simplify6(i8* %str) {
+; CHECK: %strlen = call i32 @strlen(i8* %str)
+; CHECK-NOT: call i8* @strchr
+; CHECK: %strchr = getelementptr i8* %str, i32 %strlen
+; CHECK: store i8* %strchr, i8** @chp, align 4
+; CHECK: ret void
+
+ %dst = call i8* @strchr(i8* %str, i32 0)
+ store i8* %dst, i8** @chp
+ ret void
+}
diff --git a/test/Transforms/InstCombine/strlen-1.ll b/test/Transforms/InstCombine/strlen-1.ll
index 4fa5b4fdb62f..4a3caf28a0fe 100644
--- a/test/Transforms/InstCombine/strlen-1.ll
+++ b/test/Transforms/InstCombine/strlen-1.ll
@@ -5,6 +5,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@hello = constant [6 x i8] c"hello\00"
+@longer = constant [7 x i8] c"longer\00"
@null = constant [1 x i8] zeroinitializer
@null_hello = constant [7 x i8] c"\00hello\00"
@nullstring = constant i8 0
@@ -85,6 +86,17 @@ define i1 @test_simplify8() {
; CHECK-NEXT: ret i1 false
}
+define i32 @test_simplify9(i1 %x) {
+; CHECK-LABEL: @test_simplify9
+ %hello = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %longer = getelementptr [7 x i8]* @longer, i32 0, i32 0
+ %s = select i1 %x, i8* %hello, i8* %longer
+ %l = call i32 @strlen(i8* %s)
+; CHECK-NEXT: select i1 %x, i32 5, i32 6
+ ret i32 %l
+; CHECK-NEXT: ret
+}
+
; Check cases that shouldn't be simplified.
define i32 @test_no_simplify1() {
diff --git a/test/Transforms/InstCombine/sub.ll b/test/Transforms/InstCombine/sub.ll
index 36c523bd7b75..67b7c4996b07 100644
--- a/test/Transforms/InstCombine/sub.ll
+++ b/test/Transforms/InstCombine/sub.ll
@@ -391,4 +391,76 @@ define i16 @test30_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
ret i16 %sub
}
-
+define <2 x i64> @test31(<2 x i64> %A) {
+ %xor = xor <2 x i64> %A, <i64 -1, i64 -1>
+ %sub = sub <2 x i64> <i64 2, i64 3>, %xor
+ ret <2 x i64> %sub
+; CHECK-LABEL: @test31(
+; CHECK-NEXT: %sub = add <2 x i64> %A, <i64 3, i64 4>
+; CHECK-NEXT: ret <2 x i64> %sub
+}
+
+define <2 x i64> @test32(<2 x i64> %A) {
+ %add = add <2 x i64> %A, <i64 -1, i64 -1>
+ %sub = sub <2 x i64> <i64 2, i64 3>, %add
+ ret <2 x i64> %sub
+; CHECK-LABEL: @test32(
+; CHECK-NEXT: %sub = sub <2 x i64> <i64 3, i64 4>
+; CHECK-NEXT: ret <2 x i64> %sub
+}
+
+define <2 x i64> @test33(<2 x i1> %A) {
+ %ext = zext <2 x i1> %A to <2 x i64>
+ %sub = sub <2 x i64> zeroinitializer, %ext
+ ret <2 x i64> %sub
+; CHECK-LABEL: @test33(
+; CHECK-NEXT: %sub = sext <2 x i1> %A to <2 x i64>
+; CHECK-NEXT: ret <2 x i64> %sub
+}
+
+define <2 x i64> @test34(<2 x i1> %A) {
+ %ext = sext <2 x i1> %A to <2 x i64>
+ %sub = sub <2 x i64> zeroinitializer, %ext
+ ret <2 x i64> %sub
+; CHECK-LABEL: @test34(
+; CHECK-NEXT: %sub = zext <2 x i1> %A to <2 x i64>
+; CHECK-NEXT: ret <2 x i64> %sub
+}
+
+define <2 x i64> @test35(<2 x i64> %A) {
+ %mul = mul <2 x i64> %A, <i64 3, i64 4>
+ %sub = sub <2 x i64> %A, %mul
+ ret <2 x i64> %sub
+; CHECK-LABEL: @test35(
+; CHECK-NEXT: %sub = mul <2 x i64> %A, <i64 -2, i64 -3>
+; CHECK-NEXT: ret <2 x i64> %sub
+}
+
+define <2 x i64> @test36(<2 x i64> %A) {
+ %shl = shl <2 x i64> %A, <i64 3, i64 4>
+ %sub = sub <2 x i64> %shl, %A
+ ret <2 x i64> %sub
+; CHECK-LABEL: @test36(
+; CHECK-NEXT: %sub = mul <2 x i64> %A, <i64 7, i64 15>
+; CHECK-NEXT: ret <2 x i64> %sub
+}
+
+define <2 x i32> @test37(<2 x i32> %A) {
+ %div = sdiv <2 x i32> %A, <i32 -2147483648, i32 -2147483648>
+ %sub = sub nsw <2 x i32> zeroinitializer, %div
+ ret <2 x i32> %sub
+; CHECK-LABEL: @test37(
+; CHECK-NEXT: [[ICMP:%.*]] = icmp eq <2 x i32> %A, <i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> [[ICMP]] to <2 x i32>
+; CHECK-NEXT: ret <2 x i32> [[SEXT]]
+}
+
+define i32 @test38(i32 %A) {
+ %div = sdiv i32 %A, -2147483648
+ %sub = sub nsw i32 0, %div
+ ret i32 %sub
+; CHECK-LABEL: @test38(
+; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i32 %A, -2147483648
+; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[ICMP]] to i32
+; CHECK-NEXT: ret i32 [[SEXT]]
+}
diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll
index d12412a92977..41d2b292eeff 100644
--- a/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -1,4 +1,5 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define i16 @test1(float %f) {
entry:
@@ -209,4 +210,369 @@ define <4 x float> @test_select(float %f, float %g) {
ret <4 x float> %ret
}
+; We should optimize these two redundant insertqi into one
+; CHECK: define <2 x i64> @testInsertTwice(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertTwice(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 32)
+; CHECK-NOT: insertqi
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 32)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 32)
+ ret <2 x i64> %2
+}
+
+; The result of this insert is the second arg, since the top 64 bits of
+; the result are undefined, and we copy the bottom 64 bits from the
+; second arg
+; CHECK: define <2 x i64> @testInsert64Bits(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsert64Bits(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: ret <2 x i64> %i
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 64, i8 0)
+ ret <2 x i64> %1
+}
+
+; Test the several types of ranges and ordering that exist for two insertqi
+; CHECK: define <2 x i64> @testInsertContainedRange(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertContainedRange(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 16)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertContainedRange_2(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertContainedRange_2(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 16)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 0)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertOverlappingRange(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertOverlappingRange(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 48, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 16)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertOverlappingRange_2(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertOverlappingRange_2(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 48, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 16)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 0)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertAdjacentRange(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertAdjacentRange(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 48, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 32, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertAdjacentRange_2(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertAdjacentRange_2(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: %[[RES:.*]] = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 48, i8 0)
+; CHECK: ret <2 x i64> %[[RES]]
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 32)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 32, i8 0)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertDisjointRange(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertDisjointRange(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 0)
+; CHECK: tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ ret <2 x i64> %2
+}
+
+; CHECK: define <2 x i64> @testInsertDisjointRange_2(<2 x i64> %v, <2 x i64> %i)
+define <2 x i64> @testInsertDisjointRange_2(<2 x i64> %v, <2 x i64> %i) {
+; CHECK: tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 0)
+; CHECK: tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %v, <2 x i64> %i, i8 16, i8 0)
+ %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %i, i8 16, i8 32)
+ ret <2 x i64> %2
+}
+
+
+; CHECK: declare <2 x i64> @llvm.x86.sse4a.insertqi
+declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
+
+declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>)
+define <4 x float> @test_vpermilvar_ps(<4 x float> %v) {
+; CHECK-LABEL: @test_vpermilvar_ps(
+; CHECK: shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
+ ret <4 x float> %a
+}
+
+declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>)
+define <8 x float> @test_vpermilvar_ps_256(<8 x float> %v) {
+; CHECK-LABEL: @test_vpermilvar_ps_256(
+; CHECK: shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>)
+ ret <8 x float> %a
+}
+
+declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i32>)
+define <2 x double> @test_vpermilvar_pd(<2 x double> %v) {
+; CHECK-LABEL: @test_vpermilvar_pd(
+; CHECK: shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i32> <i32 2, i32 0>)
+ ret <2 x double> %a
+}
+
+declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i32>)
+define <4 x double> @test_vpermilvar_pd_256(<4 x double> %v) {
+; CHECK-LABEL: @test_vpermilvar_pd_256(
+; CHECK: shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i32> <i32 3, i32 1, i32 2, i32 0>)
+ ret <4 x double> %a
+}
+
+define <4 x float> @test_vpermilvar_ps_zero(<4 x float> %v) {
+; CHECK-LABEL: @test_vpermilvar_ps_zero(
+; CHECK: shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> zeroinitializer)
+ ret <4 x float> %a
+}
+
+define <8 x float> @test_vpermilvar_ps_256_zero(<8 x float> %v) {
+; CHECK-LABEL: @test_vpermilvar_ps_256_zero(
+; CHECK: shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> zeroinitializer)
+ ret <8 x float> %a
+}
+
+define <2 x double> @test_vpermilvar_pd_zero(<2 x double> %v) {
+; CHECK-LABEL: @test_vpermilvar_pd_zero(
+; CHECK: shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
+ %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i32> zeroinitializer)
+ ret <2 x double> %a
+}
+
+define <4 x double> @test_vpermilvar_pd_256_zero(<4 x double> %v) {
+; CHECK-LABEL: @test_vpermilvar_pd_256_zero(
+; CHECK: shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+ %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i32> zeroinitializer)
+ ret <4 x double> %a
+}
+
+define <2 x i64> @test_sse2_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_1
+; CHECK: ret <2 x i64> <i64 72058418680037440, i64 144117112246370624>
+}
+
+define <4 x i64> @test_avx2_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> <i16 1, i16 0, i16 0, i16 0, i16 2, i16 0, i16 0, i16 0, i16 3, i16 0, i16 0, i16 0, i16 4, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_1
+; CHECK: ret <4 x i64> <i64 64, i64 128, i64 192, i64 256>
+}
+
+define <2 x i64> @test_sse2_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_0
+; CHECK: ret <2 x i64> zeroinitializer
+}
+
+define <4 x i64> @test_avx2_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> <i16 1, i16 0, i16 0, i16 0, i16 2, i16 0, i16 0, i16 0, i16 3, i16 0, i16 0, i16 0, i16 4, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_0
+; CHECK: ret <4 x i64> zeroinitializer
+}
+define <2 x i64> @test_sse2_psrl_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> <i16 16, i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_psrl_1
+; CHECK: ret <2 x i64> <i64 562954248421376, i64 9007267974742020>
+}
+
+define <4 x i64> @test_avx2_psrl_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> <i16 1024, i16 0, i16 0, i16 0, i16 2048, i16 0, i16 0, i16 0, i16 4096, i16 0, i16 0, i16 0, i16 8192, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_psrl_1
+; CHECK: ret <4 x i64> <i64 16, i64 32, i64 64, i64 128>
+}
+
+define <2 x i64> @test_sse2_psrl_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> <i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048, i16 4096>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_psrl_0
+; CHECK: ret <2 x i64> zeroinitializer
+}
+
+define <4 x i64> @test_avx2_psrl_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> <i16 1024, i16 0, i16 0, i16 0, i16 2048, i16 0, i16 0, i16 0, i16 4096, i16 0, i16 0, i16 0, i16 8192, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_psrl_0
+; CHECK: ret <4 x i64> zeroinitializer
+}
+
+declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) #1
+declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) #1
+declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) #1
+declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) #1
+declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) #1
+declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) #1
+declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) #1
+declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) #1
+declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) #1
+declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) #1
+declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) #1
+declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) #1
+declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) #1
+declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) #1
+declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) #1
+declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) #1
+declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) #1
+declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) #1
+declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) #1
+declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) #1
+declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) #1
+declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) #1
+declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) #1
+declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) #1
+attributes #1 = { nounwind readnone }
diff --git a/test/Transforms/InstCombine/vec_phi_extract.ll b/test/Transforms/InstCombine/vec_phi_extract.ll
index 73ec1f1bb04f..1d778a000fb7 100644
--- a/test/Transforms/InstCombine/vec_phi_extract.ll
+++ b/test/Transforms/InstCombine/vec_phi_extract.ll
@@ -36,10 +36,10 @@ for.cond:
%input_1.addr.1 = phi <3 x i32> [ undef, %entry ], [ %dec43, %for.body ]
br i1 undef, label %for.end, label %for.body
-; CHECK extractelement
+; CHECK: extractelement
for.body:
%dec43 = add <3 x i32> %input_1.addr.1, <i32 -1, i32 -1, i32 -1>
- %sub44 = sub <3 x i32> zeroinitializer, %dec43
+ %sub44 = sub <3 x i32> <i32 -1, i32 -1, i32 -1>, %dec43
%div45 = sdiv <3 x i32> %input_2.addr.0, %sub44
br label %for.cond
diff --git a/test/Transforms/InstCombine/vec_sext.ll b/test/Transforms/InstCombine/vec_sext.ll
index d7ab96b9cfd8..6f0d21476e3e 100644
--- a/test/Transforms/InstCombine/vec_sext.ll
+++ b/test/Transforms/InstCombine/vec_sext.ll
@@ -13,6 +13,7 @@ entry:
%cond = or <4 x i32> %2, %3
ret <4 x i32> %cond
+; CHECK-LABEL: @psignd_3
; CHECK: ashr <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
; CHECK: sub nsw <4 x i32> zeroinitializer, %a
; CHECK: xor <4 x i32> %b.lobit, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -20,3 +21,25 @@ entry:
; CHECK: and <4 x i32> %b.lobit, %sub
; CHECK: or <4 x i32> %1, %2
}
+
+define <4 x i32> @test1(<4 x i32> %a, <4 x i32> %b) nounwind ssp {
+entry:
+ %cmp = icmp sgt <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %sub = sub nsw <4 x i32> zeroinitializer, %a
+ %0 = icmp slt <4 x i32> %sext, zeroinitializer
+ %sext3 = sext <4 x i1> %0 to <4 x i32>
+ %1 = xor <4 x i32> %sext3, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %2 = and <4 x i32> %a, %1
+ %3 = and <4 x i32> %sext3, %sub
+ %cond = or <4 x i32> %2, %3
+ ret <4 x i32> %cond
+
+; CHECK-LABEL: @test1
+; CHECK: ashr <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
+; CHECK: xor <4 x i32> %b.lobit, <i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK: sub nsw <4 x i32> zeroinitializer, %a
+; CHECK: and <4 x i32> %b.lobit, %a
+; CHECK: and <4 x i32> %b.lobit.not, %sub
+; CHECK: or <4 x i32> %0, %1
+}
diff --git a/test/Transforms/InstCombine/vec_shuffle.ll b/test/Transforms/InstCombine/vec_shuffle.ll
index 3ee43dc63a6f..eb4e9d6f8c30 100644
--- a/test/Transforms/InstCombine/vec_shuffle.ll
+++ b/test/Transforms/InstCombine/vec_shuffle.ll
@@ -228,3 +228,189 @@ define <4 x float> @test15b(<4 x float> %LHS, <4 x float> %RHS) {
ret <4 x float> %tmp5
}
+define <1 x i32> @test16a(i32 %ele) {
+; CHECK-LABEL: @test16a(
+; CHECK-NEXT: ret <1 x i32> <i32 2>
+ %tmp0 = insertelement <2 x i32> <i32 1, i32 undef>, i32 %ele, i32 1
+ %tmp1 = shl <2 x i32> %tmp0, <i32 1, i32 1>
+ %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <1 x i32> <i32 0>
+ ret <1 x i32> %tmp2
+}
+
+define <4 x i8> @test16b(i8 %ele) {
+; CHECK-LABEL: @test16b(
+; CHECK-NEXT: ret <4 x i8> <i8 2, i8 2, i8 2, i8 2>
+ %tmp0 = insertelement <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 undef, i8 1>, i8 %ele, i32 6
+ %tmp1 = shl <8 x i8> %tmp0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i8> %tmp2
+}
+
+; If composition of two shuffles is identity, shuffles can be removed.
+define <4 x i32> @shuffle_17ident(<4 x i32> %v) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17ident(
+; CHECK-NOT: shufflevector
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %shuffle2 = shufflevector <4 x i32> %shuffle, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ ret <4 x i32> %shuffle2
+}
+
+; swizzle can be put after operation
+define <4 x i32> @shuffle_17and(<4 x i32> %v1, <4 x i32> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17and(
+; CHECK-NOT: shufflevector
+; CHECK: and <4 x i32> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x i32> %v2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = and <4 x i32> %t1, %t2
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17add(<4 x i32> %v1, <4 x i32> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17add(
+; CHECK-NOT: shufflevector
+; CHECK: add <4 x i32> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x i32> %v2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = add <4 x i32> %t1, %t2
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17addnsw(<4 x i32> %v1, <4 x i32> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17addnsw(
+; CHECK-NOT: shufflevector
+; CHECK: add nsw <4 x i32> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x i32> %v2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = add nsw <4 x i32> %t1, %t2
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17addnuw(<4 x i32> %v1, <4 x i32> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17addnuw(
+; CHECK-NOT: shufflevector
+; CHECK: add nuw <4 x i32> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x i32> %v2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = add nuw <4 x i32> %t1, %t2
+ ret <4 x i32> %r
+}
+
+define <4 x float> @shuffle_17fsub(<4 x float> %v1, <4 x float> %v2) nounwind uwtable {
+; CHECK-LABEL: @shuffle_17fsub(
+; CHECK-NOT: shufflevector
+; CHECK: fsub <4 x float> %v1, %v2
+; CHECK: shufflevector
+ %t1 = shufflevector <4 x float> %v1, <4 x float> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %t2 = shufflevector <4 x float> %v2, <4 x float> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = fsub <4 x float> %t1, %t2
+ ret <4 x float> %r
+}
+
+define <4 x i32> @shuffle_17addconst(<4 x i32> %v1, <4 x i32> %v2) {
+; CHECK-LABEL: @shuffle_17addconst(
+; CHECK-NOT: shufflevector
+; CHECK: [[VAR1:%[a-zA-Z0-9.]+]] = add <4 x i32> %v1, <i32 4, i32 1, i32 2, i32 3>
+; CHECK: [[VAR2:%[a-zA-Z0-9.]+]] = shufflevector <4 x i32> [[VAR1]], <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+; CHECK: ret <4 x i32> [[VAR2]]
+ %t1 = shufflevector <4 x i32> %v1, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %r = add <4 x i32> %t1, <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17add2(<4 x i32> %v) {
+; CHECK-LABEL: @shuffle_17add2(
+; CHECK-NOT: shufflevector
+; CHECK: [[VAR:%[a-zA-Z0-9.]+]] = shl <4 x i32> %v, <i32 1, i32 1, i32 1, i32 1>
+; CHECK: ret <4 x i32> [[VAR]]
+ %t1 = shufflevector <4 x i32> %v, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %t2 = add <4 x i32> %t1, %t1
+ %r = shufflevector <4 x i32> %t2, <4 x i32> zeroinitializer,
+ <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %r
+}
+
+define <4 x i32> @shuffle_17mulsplat(<4 x i32> %v) {
+; CHECK-LABEL: @shuffle_17mulsplat(
+; CHECK-NOT: shufflevector
+; CHECK: [[VAR1:%[a-zA-Z0-9.]+]] = mul <4 x i32> %v, %v
+; CHECK: [[VAR2:%[a-zA-Z0-9.]+]] = shufflevector <4 x i32> [[VAR1]], <4 x i32> undef, <4 x i32> zeroinitializer
+; CHECK: ret <4 x i32> [[VAR2]]
+ %s1 = shufflevector <4 x i32> %v,
+ <4 x i32> zeroinitializer,
+ <4 x i32> zeroinitializer
+ %m1 = mul <4 x i32> %s1, %s1
+ %s2 = shufflevector <4 x i32> %m1,
+ <4 x i32> zeroinitializer,
+ <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %s2
+}
+
+; Do not reorder shuffle and binop if LHS of shuffles are of different size
+define <2 x i32> @pr19717(<4 x i32> %in0, <2 x i32> %in1) {
+; CHECK-LABEL: @pr19717(
+; CHECK: shufflevector
+; CHECK: shufflevector
+; CHECK: mul
+ %shuffle = shufflevector <4 x i32> %in0, <4 x i32> %in0, <2 x i32> zeroinitializer
+ %shuffle4 = shufflevector <2 x i32> %in1, <2 x i32> %in1, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %shuffle4
+ ret <2 x i32> %mul
+}
+
+define <4 x i16> @pr19717a(<8 x i16> %in0, <8 x i16> %in1) {
+; CHECK-LABEL: @pr19717a(
+; CHECK: [[VAR1:%[a-zA-Z0-9.]+]] = mul <8 x i16> %in0, %in1
+; CHECK: [[VAR2:%[a-zA-Z0-9.]+]] = shufflevector <8 x i16> [[VAR1]], <8 x i16> undef, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+; CHECK: ret <4 x i16> [[VAR2]]
+ %shuffle = shufflevector <8 x i16> %in0, <8 x i16> %in0, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+ %shuffle1 = shufflevector <8 x i16> %in1, <8 x i16> %in1, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+ %mul = mul <4 x i16> %shuffle, %shuffle1
+ ret <4 x i16> %mul
+}
+
+define <8 x i8> @pr19730(<16 x i8> %in0) {
+; CHECK-LABEL: @pr19730(
+; CHECK: shufflevector
+ %shuffle = shufflevector <16 x i8> %in0, <16 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ %shuffle1 = shufflevector <8 x i8> %shuffle, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %shuffle1
+}
+
+define i32 @pr19737(<4 x i32> %in0) {
+; CHECK-LABEL: @pr19737(
+; CHECK: [[VAR:%[a-zA-Z0-9.]+]] = extractelement <4 x i32> %in0, i32 0
+; CHECK: ret i32 [[VAR]]
+ %shuffle.i = shufflevector <4 x i32> zeroinitializer, <4 x i32> %in0, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %neg.i = xor <4 x i32> %shuffle.i, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %and.i = and <4 x i32> %in0, %neg.i
+ %rv = extractelement <4 x i32> %and.i, i32 0
+ ret i32 %rv
+}
+
+define <4 x i32> @pr20114(<4 x i32> %__mask) {
+; CHECK-LABEL: @pr20114
+; CHECK: shufflevector
+; CHECK: and
+ %mask01.i = shufflevector <4 x i32> %__mask, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
+ %masked_new.i.i.i = and <4 x i32> bitcast (<2 x i64> <i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64), i64 ptrtoint (<4 x i32> (<4 x i32>)* @pr20114 to i64)> to <4 x i32>), %mask01.i
+ ret <4 x i32> %masked_new.i.i.i
+}
diff --git a/test/Transforms/InstCombine/zext-bool-add-sub.ll b/test/Transforms/InstCombine/zext-bool-add-sub.ll
index d7f338b659b4..6fa4d70d6360 100644
--- a/test/Transforms/InstCombine/zext-bool-add-sub.ll
+++ b/test/Transforms/InstCombine/zext-bool-add-sub.ll
@@ -6,7 +6,7 @@ entry:
; CHECK-LABEL: @a(
; CHECK: [[TMP1:%.*]] = sext i1 %y to i32
; CHECK: [[TMP2:%.*]] = select i1 %x, i32 2, i32 1
-; CHECK-NEXT: add i32 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: add nsw i32 [[TMP2]], [[TMP1]]
%conv = zext i1 %x to i32
%conv3 = zext i1 %y to i32
%conv3.neg = sub i32 0, %conv3
diff --git a/test/Transforms/InstCombine/zext.ll b/test/Transforms/InstCombine/zext.ll
index 10eabf7aed46..b62c626c95c0 100644
--- a/test/Transforms/InstCombine/zext.ll
+++ b/test/Transforms/InstCombine/zext.ll
@@ -5,7 +5,41 @@ define i64 @test_sext_zext(i16 %A) {
%c1 = zext i16 %A to i32 ; <i32> [#uses=1]
%c2 = sext i32 %c1 to i64 ; <i64> [#uses=1]
ret i64 %c2
+
+; CHECK-LABEL: @test_sext_zext
; CHECK-NOT: %c1
; CHECK: %c2 = zext i16 %A to i64
; CHECK: ret i64 %c2
}
+
+define <2 x i64> @test2(<2 x i1> %A) {
+ %xor = xor <2 x i1> %A, <i1 true, i1 true>
+ %zext = zext <2 x i1> %xor to <2 x i64>
+ ret <2 x i64> %zext
+
+; CHECK-LABEL: @test2
+; CHECK-NEXT: zext <2 x i1> %A to <2 x i64>
+; CHECK-NEXT: xor <2 x i64> %1, <i64 1, i64 1>
+}
+
+define <2 x i64> @test3(<2 x i64> %A) {
+ %trunc = trunc <2 x i64> %A to <2 x i32>
+ %and = and <2 x i32> %trunc, <i32 23, i32 42>
+ %zext = zext <2 x i32> %and to <2 x i64>
+ ret <2 x i64> %zext
+
+; CHECK-LABEL: @test3
+; CHECK-NEXT: and <2 x i64> %A, <i64 23, i64 42>
+}
+
+define <2 x i64> @test4(<2 x i64> %A) {
+ %trunc = trunc <2 x i64> %A to <2 x i32>
+ %and = and <2 x i32> %trunc, <i32 23, i32 42>
+ %xor = xor <2 x i32> %and, <i32 23, i32 42>
+ %zext = zext <2 x i32> %xor to <2 x i64>
+ ret <2 x i64> %zext
+
+; CHECK-LABEL: @test4
+; CHECK-NEXT: xor <2 x i64> %A, <i64 4294967295, i64 4294967295>
+; CHECK-NEXT: and <2 x i64> %1, <i64 23, i64 42>
+}
diff --git a/test/Transforms/InstMerge/ld_hoist_st_sink.ll b/test/Transforms/InstMerge/ld_hoist_st_sink.ll
new file mode 100644
index 000000000000..978160aa002e
--- /dev/null
+++ b/test/Transforms/InstMerge/ld_hoist_st_sink.ll
@@ -0,0 +1,84 @@
+; Tests to make sure that loads and stores in a diamond get merged
+; Loads are hoisted into the header. Stores sunks into the footer.
+; RUN: opt -basicaa -memdep -mldst-motion -S < %s | FileCheck %s
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+%struct.node = type { i64, %struct.node*, %struct.node*, %struct.node*, i64, %struct.arc*, i64, i64, i64 }
+%struct.arc = type { i64, i64, i64 }
+
+define i64 @foo(%struct.node* nocapture readonly %r) nounwind {
+entry:
+ %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
+ %node.017 = load %struct.node** %node.0.in16, align 8
+ %tobool18 = icmp eq %struct.node* %node.017, null
+ br i1 %tobool18, label %while.end, label %while.body.preheader
+
+; CHECK-LABEL: while.body.preheader
+while.body.preheader: ; preds = %entry
+; CHECK: load
+ br label %while.body
+
+while.body: ; preds = %while.body.preheader, %if.end
+ %node.020 = phi %struct.node* [ %node.0, %if.end ], [ %node.017, %while.body.preheader ]
+ %sum.019 = phi i64 [ %inc, %if.end ], [ 0, %while.body.preheader ]
+ %orientation = getelementptr inbounds %struct.node* %node.020, i64 0, i32 4
+ %0 = load i64* %orientation, align 8
+ %cmp = icmp eq i64 %0, 1
+ br i1 %cmp, label %if.then, label %if.else
+; CHECK: if.then
+if.then: ; preds = %while.body
+ %a = getelementptr inbounds %struct.node* %node.020, i64 0, i32 5
+; CHECK-NOT: load %struct.arc
+ %1 = load %struct.arc** %a, align 8
+ %cost = getelementptr inbounds %struct.arc* %1, i64 0, i32 0
+; CHECK-NOT: load i64*
+ %2 = load i64* %cost, align 8
+ %pred = getelementptr inbounds %struct.node* %node.020, i64 0, i32 1
+; CHECK-NOT: load %struct.node**
+ %3 = load %struct.node** %pred, align 8
+ %p = getelementptr inbounds %struct.node* %3, i64 0, i32 6
+; CHECK-NOT: load i64*
+ %4 = load i64* %p, align 8
+ %add = add nsw i64 %4, %2
+ %p1 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 6
+; CHECK-NOT: store i64
+ store i64 %add, i64* %p1, align 8
+ br label %if.end
+
+; CHECK: if.else
+if.else: ; preds = %while.body
+ %pred2 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 1
+; CHECK-NOT: load %struct.node**
+ %5 = load %struct.node** %pred2, align 8
+ %p3 = getelementptr inbounds %struct.node* %5, i64 0, i32 6
+; CHECK-NOT: load i64*
+ %6 = load i64* %p3, align 8
+ %a4 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 5
+; CHECK-NOT: load %struct.arc**
+ %7 = load %struct.arc** %a4, align 8
+ %cost5 = getelementptr inbounds %struct.arc* %7, i64 0, i32 0
+; CHECK-NOT: load i64*
+ %8 = load i64* %cost5, align 8
+ %sub = sub nsw i64 %6, %8
+ %p6 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 6
+; CHECK-NOT: store i64
+ store i64 %sub, i64* %p6, align 8
+ br label %if.end
+
+; CHECK: if.end
+if.end: ; preds = %if.else, %if.then
+; CHECK: store
+ %inc = add nsw i64 %sum.019, 1
+ %node.0.in = getelementptr inbounds %struct.node* %node.020, i64 0, i32 2
+ %node.0 = load %struct.node** %node.0.in, align 8
+ %tobool = icmp eq %struct.node* %node.0, null
+ br i1 %tobool, label %while.end.loopexit, label %while.body
+
+while.end.loopexit: ; preds = %if.end
+ %inc.lcssa = phi i64 [ %inc, %if.end ]
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ %sum.0.lcssa = phi i64 [ 0, %entry ], [ %inc.lcssa, %while.end.loopexit ]
+ ret i64 %sum.0.lcssa
+}
diff --git a/test/Transforms/InstSimplify/2010-12-20-Distribute.ll b/test/Transforms/InstSimplify/2010-12-20-Distribute.ll
deleted file mode 100644
index 9ea0a5e10708..000000000000
--- a/test/Transforms/InstSimplify/2010-12-20-Distribute.ll
+++ /dev/null
@@ -1,62 +0,0 @@
-; RUN: opt < %s -instsimplify -S | FileCheck %s
-
-define i32 @factorize(i32 %x, i32 %y) {
-; CHECK-LABEL: @factorize(
-; (X | 1) & (X | 2) -> X | (1 & 2) -> X
- %l = or i32 %x, 1
- %r = or i32 %x, 2
- %z = and i32 %l, %r
- ret i32 %z
-; CHECK: ret i32 %x
-}
-
-define i32 @factorize2(i32 %x) {
-; CHECK-LABEL: @factorize2(
-; 3*X - 2*X -> X
- %l = mul i32 3, %x
- %r = mul i32 2, %x
- %z = sub i32 %l, %r
- ret i32 %z
-; CHECK: ret i32 %x
-}
-
-define i32 @factorize3(i32 %x, i32 %a, i32 %b) {
-; CHECK-LABEL: @factorize3(
-; (X | (A|B)) & (X | B) -> X | ((A|B) & B) -> X | B
- %aORb = or i32 %a, %b
- %l = or i32 %x, %aORb
- %r = or i32 %x, %b
- %z = and i32 %l, %r
- ret i32 %z
-; CHECK: ret i32 %r
-}
-
-define i32 @factorize4(i32 %x, i32 %y) {
-; CHECK-LABEL: @factorize4(
- %sh = shl i32 %y, 1
- %ml = mul i32 %sh, %x
- %mr = mul i32 %x, %y
- %s = sub i32 %ml, %mr
- ret i32 %s
-; CHECK: ret i32 %mr
-}
-
-define i32 @factorize5(i32 %x, i32 %y) {
-; CHECK-LABEL: @factorize5(
- %sh = mul i32 %y, 2
- %ml = mul i32 %sh, %x
- %mr = mul i32 %x, %y
- %s = sub i32 %ml, %mr
- ret i32 %s
-; CHECK: ret i32 %mr
-}
-
-define i32 @expand(i32 %x) {
-; CHECK-LABEL: @expand(
-; ((X & 1) | 2) & 1 -> ((X & 1) & 1) | (2 & 1) -> (X & 1) | 0 -> X & 1
- %a = and i32 %x, 1
- %b = or i32 %a, 2
- %c = and i32 %b, 1
- ret i32 %c
-; CHECK: ret i32 %a
-}
diff --git a/test/Transforms/InstSimplify/apint-or.ll b/test/Transforms/InstSimplify/apint-or.ll
new file mode 100644
index 000000000000..5d314db7133d
--- /dev/null
+++ b/test/Transforms/InstSimplify/apint-or.ll
@@ -0,0 +1,37 @@
+; RUN: opt < %s -instsimplify -S | not grep or
+
+; Test the case where integer BitWidth <= 64 && BitWidth % 2 != 0.
+define i39 @test1(i39 %V, i39 %M) {
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i39 274877906943, -1 ;; C2 = 274877906943
+ %N = and i39 %M, 274877906944
+ %A = add i39 %V, %N
+ %B = and i39 %A, %C1
+ %D = and i39 %V, 274877906943
+ %R = or i39 %B, %D
+ ret i39 %R
+; CHECK-LABEL @test1
+; CHECK-NEXT: and {{.*}}, -274877906944
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+}
+
+; Test the case where Integer BitWidth > 64 && BitWidth <= 1024.
+define i399 @test2(i399 %V, i399 %M) {
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i399 274877906943, -1 ;; C2 = 274877906943
+ %N = and i399 %M, 18446742974197923840
+ %A = add i399 %V, %N
+ %B = and i399 %A, %C1
+ %D = and i399 %V, 274877906943
+ %R = or i399 %B, %D
+ ret i399 %R
+; CHECK-LABEL @test2
+; CHECK-NEXT: and {{.*}}, 18446742974197923840
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+}
diff --git a/test/Transforms/InstSimplify/ashr-nop.ll b/test/Transforms/InstSimplify/ashr-nop.ll
new file mode 100644
index 000000000000..0914d725e40d
--- /dev/null
+++ b/test/Transforms/InstSimplify/ashr-nop.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+; CHECK-LABEL: @foo
+; CHECK-NOT: ashr
+define i32 @foo(i32 %x) {
+ %o = and i32 %x, 1
+ %n = add i32 %o, -1
+ %t = ashr i32 %n, 17
+ ret i32 %t
+}
diff --git a/test/Transforms/InstSimplify/compare.ll b/test/Transforms/InstSimplify/compare.ll
index abb38695e710..94872d35bb97 100644
--- a/test/Transforms/InstSimplify/compare.ll
+++ b/test/Transforms/InstSimplify/compare.ll
@@ -739,3 +739,233 @@ define i1 @non_inbounds_gep_compare2(i64* %a) {
ret i1 %cmp
; CHECK-NEXT: ret i1 true
}
+
+define <4 x i8> @vectorselectfold(<4 x i8> %a, <4 x i8> %b) {
+ %false = icmp ne <4 x i8> zeroinitializer, zeroinitializer
+ %sel = select <4 x i1> %false, <4 x i8> %a, <4 x i8> %b
+ ret <4 x i8> %sel
+
+; CHECK-LABEL: @vectorselectfold
+; CHECK-NEXT: ret <4 x i8> %b
+}
+
+define <4 x i8> @vectorselectfold2(<4 x i8> %a, <4 x i8> %b) {
+ %true = icmp eq <4 x i8> zeroinitializer, zeroinitializer
+ %sel = select <4 x i1> %true, <4 x i8> %a, <4 x i8> %b
+ ret <4 x i8> %sel
+
+; CHECK-LABEL: @vectorselectfold
+; CHECK-NEXT: ret <4 x i8> %a
+}
+
+define i1 @compare_always_true_slt(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp slt i32 %2, 1
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_true_slt
+; CHECK-NEXT: ret i1 true
+}
+
+define i1 @compare_always_true_sle(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp sle i32 %2, 0
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_true_sle
+; CHECK-NEXT: ret i1 true
+}
+
+define i1 @compare_always_false_sgt(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp sgt i32 %2, 0
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_false_sgt
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @compare_always_false_sge(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp sge i32 %2, 1
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_false_sge
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @compare_always_false_eq(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp eq i32 %2, 1
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_false_eq
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @compare_always_false_ne(i16 %a) {
+ %1 = zext i16 %a to i32
+ %2 = sub nsw i32 0, %1
+ %3 = icmp ne i32 %2, 1
+ ret i1 %3
+
+; CHECK-LABEL: @compare_always_false_ne
+; CHECK-NEXT: ret i1 true
+}
+
+define i1 @compare_dividend(i32 %a) {
+ %div = sdiv i32 2, %a
+ %cmp = icmp eq i32 %div, 3
+ ret i1 %cmp
+
+; CHECK-LABEL: @compare_dividend
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @lshr_ugt_false(i32 %a) {
+ %shr = lshr i32 1, %a
+ %cmp = icmp ugt i32 %shr, 1
+ ret i1 %cmp
+; CHECK-LABEL: @lshr_ugt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @exact_lshr_ugt_false(i32 %a) {
+ %shr = lshr exact i32 30, %a
+ %cmp = icmp ult i32 %shr, 15
+ ret i1 %cmp
+; CHECK-LABEL: @exact_lshr_ugt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @lshr_sgt_false(i32 %a) {
+ %shr = lshr i32 1, %a
+ %cmp = icmp sgt i32 %shr, 1
+ ret i1 %cmp
+; CHECK-LABEL: @lshr_sgt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @ashr_sgt_false(i32 %a) {
+ %shr = ashr i32 -30, %a
+ %cmp = icmp sgt i32 %shr, -1
+ ret i1 %cmp
+; CHECK-LABEL: @ashr_sgt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @exact_ashr_sgt_false(i32 %a) {
+ %shr = ashr exact i32 -30, %a
+ %cmp = icmp sgt i32 %shr, -15
+ ret i1 %cmp
+; CHECK-LABEL: @exact_ashr_sgt_false
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @nonnull_arg(i32* nonnull %i) {
+ %cmp = icmp eq i32* %i, null
+ ret i1 %cmp
+; CHECK-LABEL: @nonnull_arg
+; CHECK: ret i1 false
+}
+
+define i1 @nonnull_deref_arg(i32* dereferenceable(4) %i) {
+ %cmp = icmp eq i32* %i, null
+ ret i1 %cmp
+; CHECK-LABEL: @nonnull_deref_arg
+; CHECK: ret i1 false
+}
+
+define i1 @nonnull_deref_as_arg(i32 addrspace(1)* dereferenceable(4) %i) {
+ %cmp = icmp eq i32 addrspace(1)* %i, null
+ ret i1 %cmp
+; CHECK-LABEL: @nonnull_deref_as_arg
+; CHECK: icmp
+; CHECK ret
+}
+
+declare nonnull i32* @returns_nonnull_helper()
+define i1 @returns_nonnull() {
+ %call = call nonnull i32* @returns_nonnull_helper()
+ %cmp = icmp eq i32* %call, null
+ ret i1 %cmp
+; CHECK-LABEL: @returns_nonnull
+; CHECK: ret i1 false
+}
+
+declare dereferenceable(4) i32* @returns_nonnull_deref_helper()
+define i1 @returns_nonnull_deref() {
+ %call = call dereferenceable(4) i32* @returns_nonnull_deref_helper()
+ %cmp = icmp eq i32* %call, null
+ ret i1 %cmp
+; CHECK-LABEL: @returns_nonnull_deref
+; CHECK: ret i1 false
+}
+
+declare dereferenceable(4) i32 addrspace(1)* @returns_nonnull_deref_as_helper()
+define i1 @returns_nonnull_as_deref() {
+ %call = call dereferenceable(4) i32 addrspace(1)* @returns_nonnull_deref_as_helper()
+ %cmp = icmp eq i32 addrspace(1)* %call, null
+ ret i1 %cmp
+; CHECK-LABEL: @returns_nonnull_as_deref
+; CHECK: icmp
+; CHECK: ret
+}
+
+; If a bit is known to be zero for A and known to be one for B,
+; then A and B cannot be equal.
+define i1 @icmp_eq_const(i32 %a) nounwind {
+ %b = mul nsw i32 %a, -2
+ %c = icmp eq i32 %b, 1
+ ret i1 %c
+
+; CHECK-LABEL: @icmp_eq_const
+; CHECK-NEXT: ret i1 false
+}
+
+define i1 @icmp_ne_const(i32 %a) nounwind {
+ %b = mul nsw i32 %a, -2
+ %c = icmp ne i32 %b, 1
+ ret i1 %c
+
+; CHECK-LABEL: @icmp_ne_const
+; CHECK-NEXT: ret i1 true
+}
+
+define i1 @icmp_sdiv_int_min(i32 %a) {
+ %div = sdiv i32 -2147483648, %a
+ %cmp = icmp ne i32 %div, -1073741824
+ ret i1 %cmp
+
+; CHECK-LABEL: @icmp_sdiv_int_min
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 -2147483648, %a
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[DIV]], -1073741824
+; CHECK-NEXT: ret i1 [[CMP]]
+}
+
+define i1 @icmp_sdiv_pr20288(i64 %a) {
+ %div = sdiv i64 %a, -8589934592
+ %cmp = icmp ne i64 %div, 1073741824
+ ret i1 %cmp
+
+; CHECK-LABEL: @icmp_sdiv_pr20288
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 %a, -8589934592
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[DIV]], 1073741824
+; CHECK-NEXT: ret i1 [[CMP]]
+}
+
+define i1 @icmp_sdiv_neg1(i64 %a) {
+ %div = sdiv i64 %a, -1
+ %cmp = icmp ne i64 %div, 1073741824
+ ret i1 %cmp
+
+; CHECK-LABEL: @icmp_sdiv_neg1
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 %a, -1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[DIV]], 1073741824
+; CHECK-NEXT: ret i1 [[CMP]]
+}
diff --git a/test/Transforms/InstSimplify/dead-code-removal.ll b/test/Transforms/InstSimplify/dead-code-removal.ll
new file mode 100644
index 000000000000..e181f3b60d5b
--- /dev/null
+++ b/test/Transforms/InstSimplify/dead-code-removal.ll
@@ -0,0 +1,15 @@
+; RUN: opt -instsimplify -S < %s | FileCheck %s
+
+define void @foo() nounwind {
+ br i1 undef, label %1, label %4
+
+; <label>:1 ; preds = %1, %0
+; CHECK-NOT: phi
+; CHECK-NOT: sub
+ %2 = phi i32 [ %3, %1 ], [ undef, %0 ]
+ %3 = sub i32 0, undef
+ br label %1
+
+; <label>:4 ; preds = %0
+ ret void
+}
diff --git a/test/Transforms/InstSimplify/undef.ll b/test/Transforms/InstSimplify/undef.ll
index 23cd50f92b40..181c2efa2dc0 100644
--- a/test/Transforms/InstSimplify/undef.ll
+++ b/test/Transforms/InstSimplify/undef.ll
@@ -153,3 +153,10 @@ define i64 @test18(i64 %a) {
%r = call i64 (i64)* undef(i64 %a)
ret i64 %r
}
+
+; CHECK-LABEL: @test19
+; CHECK: ret <4 x i8> undef
+define <4 x i8> @test19(<4 x i8> %a) {
+ %b = shl <4 x i8> %a, <i8 8, i8 9, i8 undef, i8 -1>
+ ret <4 x i8> %b
+}
diff --git a/test/Transforms/InstSimplify/vector_gep.ll b/test/Transforms/InstSimplify/vector_gep.ll
index 5ac1ddef64f8..17814637b3dd 100644
--- a/test/Transforms/InstSimplify/vector_gep.ll
+++ b/test/Transforms/InstSimplify/vector_gep.ll
@@ -1,4 +1,7 @@
-;RUN: opt -instsimplify -disable-output < %s
+; RUN: opt -S -instsimplify < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
declare void @helper(<2 x i8*>)
define void @test(<2 x i8*> %a) {
%A = getelementptr <2 x i8*> %a, <2 x i32> <i32 0, i32 0>
@@ -6,3 +9,47 @@ define void @test(<2 x i8*> %a) {
ret void
}
+define <4 x i8*> @test1(<4 x i8*> %a) {
+ %gep = getelementptr <4 x i8*> %a, <4 x i32> zeroinitializer
+ ret <4 x i8*> %gep
+
+; CHECK-LABEL: @test1
+; CHECK-NEXT: ret <4 x i8*> %a
+}
+
+define <4 x i8*> @test2(<4 x i8*> %a) {
+ %gep = getelementptr <4 x i8*> %a
+ ret <4 x i8*> %gep
+
+; CHECK-LABEL: @test2
+; CHECK-NEXT: ret <4 x i8*> %a
+}
+
+%struct = type { double, float }
+
+define <4 x float*> @test3() {
+ %gep = getelementptr <4 x %struct*> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x float*> %gep
+
+; CHECK-LABEL: @test3
+; CHECK-NEXT: ret <4 x float*> undef
+}
+
+%struct.empty = type { }
+
+define <4 x %struct.empty*> @test4(<4 x %struct.empty*> %a) {
+ %gep = getelementptr <4 x %struct.empty*> %a, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x %struct.empty*> %gep
+
+; CHECK-LABEL: @test4
+; CHECK-NEXT: ret <4 x %struct.empty*> %a
+}
+
+define <4 x i8*> @test5() {
+ %c = inttoptr <4 x i64> <i64 1, i64 2, i64 3, i64 4> to <4 x i8*>
+ %gep = getelementptr <4 x i8*> %c, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i8*> %gep
+
+; CHECK-LABEL: @test5
+; CHECK-NEXT: ret <4 x i8*> getelementptr (<4 x i8*> <i8* inttoptr (i64 1 to i8*), i8* inttoptr (i64 2 to i8*), i8* inttoptr (i64 3 to i8*), i8* inttoptr (i64 4 to i8*)>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
+}
diff --git a/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll b/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
index 47cf3f0373e4..16bfe2a46091 100644
--- a/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
+++ b/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
@@ -1,10 +1,17 @@
-; RUN: opt < %s -internalize -internalize-public-api-list main -S | grep internal | count 3
+; RUN: opt < %s -internalize -internalize-public-api-list main -S | FileCheck %s
@A = global i32 0
+; CHECK: @A = internal global i32 0
+
@B = alias i32* @A
-@C = alias i32* @B
+; CHECK: @B = alias internal i32* @A
+
+@C = alias i32* @A
+; CHECK: @C = alias internal i32* @A
define i32 @main() {
%tmp = load i32* @C
ret i32 %tmp
}
+
+; CHECK: define i32 @main() {
diff --git a/test/Transforms/Internalize/lists.ll b/test/Transforms/Internalize/lists.ll
index 83e441a2dfe7..548c8aa267b5 100644
--- a/test/Transforms/Internalize/lists.ll
+++ b/test/Transforms/Internalize/lists.ll
@@ -1,7 +1,7 @@
; No arguments means internalize everything
; RUN: opt < %s -internalize -S | FileCheck --check-prefix=ALL %s
-; Non existent files should be treated as if they were empty (so internalize
+; Non-existent files should be treated as if they were empty (so internalize
; everything)
; RUN: opt < %s -internalize -internalize-public-api-file /nonexistent/file 2> /dev/null -S | FileCheck --check-prefix=ALL %s
@@ -48,3 +48,12 @@ define void @foo() {
define available_externally void @bar() {
ret void
}
+
+; ALL: define dllexport void @export_foo() {
+; FOO_AND_J: define dllexport void @export_foo() {
+; FOO_AND_BAR: define dllexport void @export_foo() {
+; FOO_J_AND_BAR: define dllexport void @export_foo() {
+define dllexport void @export_foo() {
+ ret void
+}
+
diff --git a/test/Transforms/Internalize/local-visibility.ll b/test/Transforms/Internalize/local-visibility.ll
new file mode 100644
index 000000000000..c24d4b7f32a0
--- /dev/null
+++ b/test/Transforms/Internalize/local-visibility.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -internalize -S | FileCheck %s
+; Internalized symbols should have default visibility.
+
+; CHECK: @global = global i32 0
+@global = global i32 0
+@llvm.used = appending global [1 x i32*] [i32* @global]
+
+; CHECK: @hidden.variable = internal global i32 0
+@hidden.variable = hidden global i32 0
+; CHECK: @protected.variable = internal global i32 0
+@protected.variable = protected global i32 0
+
+; CHECK: @hidden.alias = alias internal i32* @global
+@hidden.alias = hidden alias i32* @global
+; CHECK: @protected.alias = alias internal i32* @global
+@protected.alias = protected alias i32* @global
+
+; CHECK: define internal void @hidden.function() {
+define hidden void @hidden.function() {
+ ret void
+}
+; CHECK: define internal void @protected.function() {
+define protected void @protected.function() {
+ ret void
+}
diff --git a/test/Transforms/JumpThreading/phi-eq.ll b/test/Transforms/JumpThreading/phi-eq.ll
index 40d3c7edd05d..e05d5ee7c974 100644
--- a/test/Transforms/JumpThreading/phi-eq.ll
+++ b/test/Transforms/JumpThreading/phi-eq.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | opt -jump-threading | llvm-dis | FileCheck %s
+; RUN: opt < %s -jump-threading -S | FileCheck %s
; Test whether two consecutive switches with identical structures assign the
; proper value to the proper variable. This is really testing
; Instruction::isIdenticalToWhenDefined, as previously that function was
diff --git a/test/Transforms/JumpThreading/pr15851_hang.ll b/test/Transforms/JumpThreading/pr15851_hang.ll
new file mode 100644
index 000000000000..0484bc9f9dc9
--- /dev/null
+++ b/test/Transforms/JumpThreading/pr15851_hang.ll
@@ -0,0 +1,22 @@
+; RUN: opt -S -jump-threading < %s | FileCheck %s
+
+; CHECK-LABEL: @f(
+; CHECK-LABEL: entry
+; CHECK: ret void
+; CHECK-NOT: for.cond1
+; CHECK-NOT: for.body
+
+define void @f() {
+entry:
+ ret void
+
+for.cond1:
+ %i.025 = phi i32 [ %inc, %for.body ], [ %inc, %for.body ], [ 1, %for.cond1 ]
+ %cmp = icmp slt i32 %i.025, 2
+ br i1 %cmp, label %for.body, label %for.cond1
+
+for.body:
+ %inc = add nsw i32 %i.025, 0
+ %a = icmp ugt i32 %inc, 2
+ br i1 %a, label %for.cond1, label %for.cond1
+}
diff --git a/test/Transforms/JumpThreading/select.ll b/test/Transforms/JumpThreading/select.ll
index 201e604e0c5e..545e86c082f4 100644
--- a/test/Transforms/JumpThreading/select.ll
+++ b/test/Transforms/JumpThreading/select.ll
@@ -127,7 +127,7 @@ L4:
; CHECK: test_switch_default
; CHECK: entry:
; CHECK: load
-; CHECK: switch
+; CHECK: icmp
; CHECK: [[THREADED:[A-Za-z.0-9]+]]:
; CHECK: store
; CHECK: br
diff --git a/test/Transforms/LICM/extra-copies.ll b/test/Transforms/LICM/extra-copies.ll
new file mode 100644
index 000000000000..ef52f9f404c1
--- /dev/null
+++ b/test/Transforms/LICM/extra-copies.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -licm -S | FileCheck %s
+; PR19835
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @f(i32 %x) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %storemerge4 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %mul = mul nsw i32 %x, %x
+ %add2 = add nsw i32 %mul, %x
+ %mul3 = add nsw i32 %add2, %mul
+ %inc = add nsw i32 %storemerge4, 1
+ %cmp = icmp slt i32 %inc, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ %a9.0.lcssa = phi i32 [ %mul3, %for.body ]
+ ret i32 %a9.0.lcssa
+}
+
+; Test that there is exactly one copy of mul nsw i32 %x, %x in the exit block.
+; CHECK: define i32 @f(i32 [[X:%.*]])
+; CHECK: for.end:
+; CHECK-NOT: mul nsw i32 [[X]], [[X]]
+; CHECK: mul nsw i32 [[X]], [[X]]
+; CHECK-NOT: mul nsw i32 [[X]], [[X]]
diff --git a/test/Transforms/LICM/hoist-bitcast-load.ll b/test/Transforms/LICM/hoist-bitcast-load.ll
new file mode 100644
index 000000000000..fa61eaf5b46f
--- /dev/null
+++ b/test/Transforms/LICM/hoist-bitcast-load.ll
@@ -0,0 +1,239 @@
+; RUN: opt -S -basicaa -licm < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Make sure the basic alloca pointer hoisting works:
+; CHECK-LABEL: @test1
+; CHECK: load i32* %c, align 4
+; CHECK: for.body:
+
+; Function Attrs: nounwind uwtable
+define void @test1(i32* nocapture %a, i32* nocapture readonly %b, i32 %n) #0 {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ %c = alloca i32
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %1 = load i32* %c, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+; Make sure the basic alloca pointer hoisting works through a bitcast to a
+; pointer to a smaller type:
+; CHECK-LABEL: @test2
+; CHECK: load i32* %c, align 4
+; CHECK: for.body:
+
+; Function Attrs: nounwind uwtable
+define void @test2(i32* nocapture %a, i32* nocapture readonly %b, i32 %n) #0 {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ %ca = alloca i64
+ %c = bitcast i64* %ca to i32*
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %1 = load i32* %c, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+; Make sure the basic alloca pointer hoisting works through an addrspacecast
+; CHECK-LABEL: @test2_addrspacecast
+; CHECK: load i32 addrspace(1)* %c, align 4
+; CHECK: for.body:
+
+; Function Attrs: nounwind uwtable
+define void @test2_addrspacecast(i32 addrspace(1)* nocapture %a, i32 addrspace(1)* nocapture readonly %b, i32 %n) #0 {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ %ca = alloca i64
+ %c = addrspacecast i64* %ca to i32 addrspace(1)*
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32 addrspace(1)* %a, i64 %indvars.iv
+ %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %1 = load i32 addrspace(1)* %c, align 4
+ %arrayidx3 = getelementptr inbounds i32 addrspace(1)* %b, i64 %indvars.iv
+ %2 = load i32 addrspace(1)* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32 addrspace(1)* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+; Make sure the basic alloca pointer hoisting works through a bitcast to a
+; pointer to a smaller type (where the bitcast also needs to be hoisted):
+; CHECK-LABEL: @test3
+; CHECK: load i32* %c, align 4
+; CHECK: for.body:
+
+; Function Attrs: nounwind uwtable
+define void @test3(i32* nocapture %a, i32* nocapture readonly %b, i32 %n) #0 {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ %ca = alloca i64
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %c = bitcast i64* %ca to i32*
+ %1 = load i32* %c, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+; Make sure the basic alloca pointer hoisting does not happen through a bitcast
+; to a pointer to a larger type:
+; CHECK-LABEL: @test4
+; CHECK: for.body:
+; CHECK: load i32* %c, align 4
+
+; Function Attrs: nounwind uwtable
+define void @test4(i32* nocapture %a, i32* nocapture readonly %b, i32 %n) #0 {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ %ca = alloca i16
+ %c = bitcast i16* %ca to i32*
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %1 = load i32* %c, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+; Don't crash on bitcasts to unsized types.
+; CHECK-LABEL: @test5
+; CHECK: for.body:
+; CHECK: load i32* %c, align 4
+
+%atype = type opaque
+
+; Function Attrs: nounwind uwtable
+define void @test5(i32* nocapture %a, i32* nocapture readonly %b, i32 %n) #0 {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ %ca = alloca i16
+ %cab = bitcast i16* %ca to %atype*
+ %c = bitcast %atype* %cab to i32*
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %1 = load i32* %c, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+attributes #0 = { nounwind uwtable }
+
diff --git a/test/Transforms/LICM/hoist-deref-load.ll b/test/Transforms/LICM/hoist-deref-load.ll
new file mode 100644
index 000000000000..c230d1dcd43b
--- /dev/null
+++ b/test/Transforms/LICM/hoist-deref-load.ll
@@ -0,0 +1,168 @@
+; RUN: opt -S -basicaa -licm < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; This test represents the following function:
+; void test1(int * __restrict__ a, int * __restrict__ b, int &c, int n) {
+; for (int i = 0; i < n; ++i)
+; if (a[i] > 0)
+; a[i] = c*b[i];
+; }
+; and we want to hoist the load of %c out of the loop. This can be done only
+; because the dereferenceable attribute is on %c.
+
+; CHECK-LABEL: @test1
+; CHECK: load i32* %c, align 4
+; CHECK: for.body:
+
+define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly nonnull dereferenceable(4) %c, i32 %n) #0 {
+entry:
+ %cmp11 = icmp sgt i32 %n, 0
+ br i1 %cmp11, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %1 = load i32* %c, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+; This is the same as @test1, but without the dereferenceable attribute on %c.
+; Without this attribute, we should not hoist the load of %c.
+
+; CHECK-LABEL: @test2
+; CHECK: if.then:
+; CHECK: load i32* %c, align 4
+
+define void @test2(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly nonnull %c, i32 %n) #0 {
+entry:
+ %cmp11 = icmp sgt i32 %n, 0
+ br i1 %cmp11, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %1 = load i32* %c, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+; This test represents the following function:
+; void test3(int * restrict a, int * restrict b, int c[static 3], int n) {
+; for (int i = 0; i < n; ++i)
+; if (a[i] > 0)
+; a[i] = c[2]*b[i];
+; }
+; and we want to hoist the load of c[2] out of the loop. This can be done only
+; because the dereferenceable attribute is on %c.
+
+; CHECK-LABEL: @test3
+; CHECK: load i32* %c2, align 4
+; CHECK: for.body:
+
+define void @test3(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly dereferenceable(12) %c, i32 %n) #0 {
+entry:
+ %cmp11 = icmp sgt i32 %n, 0
+ br i1 %cmp11, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %c2 = getelementptr inbounds i32* %c, i64 2
+ %1 = load i32* %c2, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+; This is the same as @test3, but with a dereferenceable attribute on %c with a
+; size too small to cover c[2] (and so we should not hoist it).
+
+; CHECK-LABEL: @test4
+; CHECK: if.then:
+; CHECK: load i32* %c2, align 4
+
+define void @test4(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly dereferenceable(11) %c, i32 %n) #0 {
+entry:
+ %cmp11 = icmp sgt i32 %n, 0
+ br i1 %cmp11, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 0
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %c2 = getelementptr inbounds i32* %c, i64 2
+ %1 = load i32* %c2, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = load i32* %arrayidx3, align 4
+ %mul = mul nsw i32 %2, %1
+ store i32 %mul, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc, %entry
+ ret void
+}
+
+attributes #0 = { nounwind uwtable }
+
diff --git a/test/Transforms/LICM/lcssa-ssa-promoter.ll b/test/Transforms/LICM/lcssa-ssa-promoter.ll
new file mode 100644
index 000000000000..5df3ef12181b
--- /dev/null
+++ b/test/Transforms/LICM/lcssa-ssa-promoter.ll
@@ -0,0 +1,76 @@
+; RUN: opt -S -basicaa -licm < %s | FileCheck %s
+;
+; Manually validate LCSSA form is preserved even after SSAUpdater is used to
+; promote things in the loop bodies.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@x = common global i32 0, align 4
+@y = common global i32 0, align 4
+
+define void @PR18688() {
+; CHECK-LABEL: @PR18688(
+
+entry:
+ br i1 undef, label %return, label %outer.preheader
+
+outer.preheader:
+ br label %outer.header
+; CHECK: outer.preheader:
+; CHECK: br label %outer.header
+
+outer.header:
+ store i32 0, i32* @x, align 4
+ br i1 undef, label %outer.latch, label %inner.preheader
+; CHECK: outer.header:
+; CHECK-NEXT: br i1 undef, label %outer.latch, label %inner.preheader
+
+inner.preheader:
+ br label %inner.header
+; CHECK: inner.preheader:
+; CHECK-NEXT: br label %inner.header
+
+inner.header:
+ br i1 undef, label %inner.body.rhs, label %inner.latch
+; CHECK: inner.header:
+; CHECK-NEXT: %[[PHI0:[^,]+]] = phi i32 [ %{{[^,]+}}, %inner.latch ], [ 0, %inner.preheader ]
+; CHECK-NEXT: br i1 undef, label %inner.body.rhs, label %inner.latch
+
+inner.body.rhs:
+ store i32 0, i32* @x, align 4
+ br label %inner.latch
+; CHECK: inner.body.rhs:
+; CHECK-NEXT: br label %inner.latch
+
+inner.latch:
+ %y_val = load i32* @y, align 4
+ %icmp = icmp eq i32 %y_val, 0
+ br i1 %icmp, label %inner.exit, label %inner.header
+; CHECK: inner.latch:
+; CHECK-NEXT: %[[PHI1:[^,]+]] = phi i32 [ 0, %inner.body.rhs ], [ %[[PHI0]], %inner.header ]
+; CHECK-NEXT: br i1 %{{[^,]+}}, label %inner.exit, label %inner.header
+
+inner.exit:
+ br label %outer.latch
+; CHECK: inner.exit:
+; CHECK-NEXT: %[[INNER_LCSSA:[^,]+]] = phi i32 [ %[[PHI1]], %inner.latch ]
+; CHECK-NEXT: br label %outer.latch
+
+outer.latch:
+ br i1 undef, label %outer.exit, label %outer.header
+; CHECK: outer.latch:
+; CHECK-NEXT: %[[PHI2:[^,]+]] = phi i32 [ %[[INNER_LCSSA]], %inner.exit ], [ 0, %outer.header ]
+; CHECK-NEXT: br i1 {{.*}}, label %outer.exit, label %outer.header
+
+outer.exit:
+ br label %return
+; CHECK: outer.exit:
+; CHECK-NEXT: %[[OUTER_LCSSA:[^,]+]] = phi i32 [ %[[PHI2]], %outer.latch ]
+; CHECK-NEXT: store i32 %[[OUTER_LCSSA]]
+; CHECK-NEXT: br label %return
+
+return:
+ ret void
+}
+
diff --git a/test/Transforms/LICM/scalar_promote.ll b/test/Transforms/LICM/scalar_promote.ll
index 92ef15581ce0..d7e7c6e9a316 100644
--- a/test/Transforms/LICM/scalar_promote.ll
+++ b/test/Transforms/LICM/scalar_promote.ll
@@ -24,7 +24,8 @@ Loop: ; preds = %Loop, %0
Out:
ret void
; CHECK: Out:
-; CHECK-NEXT: store i32 %x2, i32* @X
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %x2
+; CHECK-NEXT: store i32 %[[LCSSAPHI]], i32* @X
; CHECK-NEXT: ret void
}
@@ -48,7 +49,8 @@ Loop: ; preds = %Loop, %0
Exit: ; preds = %Loop
ret void
; CHECK: Exit:
-; CHECK-NEXT: store i32 %V, i32* getelementptr inbounds (i32* @X, i64 1)
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %V
+; CHECK-NEXT: store i32 %[[LCSSAPHI]], i32* getelementptr inbounds (i32* @X, i64 1)
; CHECK-NEXT: ret void
}
@@ -142,7 +144,8 @@ Loop: ; preds = %Loop, %0
Out:
ret void
; CHECK: Out:
-; CHECK-NEXT: store i32 %x2, i32* @X
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %x2
+; CHECK-NEXT: store i32 %[[LCSSAPHI]], i32* @X
; CHECK-NEXT: ret void
}
@@ -178,7 +181,8 @@ for.end: ; preds = %for.cond.for.end_cr
; CHECK: for.body.lr.ph:
; CHECK-NEXT: %gi.promoted = load i32* %gi, align 4, !tbaa !0
; CHECK: for.cond.for.end_crit_edge:
-; CHECK-NEXT: store i32 %inc, i32* %gi, align 4, !tbaa !0
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %inc
+; CHECK-NEXT: store i32 %[[LCSSAPHI]], i32* %gi, align 4, !tbaa !0
}
!0 = metadata !{metadata !4, metadata !4, i64 0}
diff --git a/test/Transforms/LICM/sinking.ll b/test/Transforms/LICM/sinking.ll
index b503f96e42c0..ccc9186f7a48 100644
--- a/test/Transforms/LICM/sinking.ll
+++ b/test/Transforms/LICM/sinking.ll
@@ -53,7 +53,7 @@ Exit:
; CHECK-LABEL: @test3(
; CHECK: Exit.loopexit:
-; CHECK-NEXT: %X = add i32 0, 1
+; CHECK-NEXT: %X.le = add i32 0, 1
; CHECK-NEXT: br label %Exit
}
@@ -76,8 +76,9 @@ Out: ; preds = %Loop
ret i32 %tmp.7
; CHECK-LABEL: @test4(
; CHECK: Out:
-; CHECK-NEXT: mul i32 %N, %N_addr.0.pn
-; CHECK-NEXT: sub i32 %tmp.6, %N
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %N_addr.0.pn
+; CHECK-NEXT: mul i32 %N, %[[LCSSAPHI]]
+; CHECK-NEXT: sub i32 %tmp.6.le, %N
; CHECK-NEXT: ret i32
}
@@ -100,8 +101,8 @@ Out: ; preds = %Loop
ret i32 %tmp.6
; CHECK-LABEL: @test5(
; CHECK: Out:
-; CHECK-NEXT: %tmp.6 = load i32* @X
-; CHECK-NEXT: ret i32 %tmp.6
+; CHECK-NEXT: %tmp.6.le = load i32* @X
+; CHECK-NEXT: ret i32 %tmp.6.le
}
@@ -124,9 +125,9 @@ Out: ; preds = %Loop
ret i32 %sunk2
; CHECK-LABEL: @test6(
; CHECK: Out:
-; CHECK-NEXT: %dead = getelementptr %Ty* @X2, i64 0, i32 0
-; CHECK-NEXT: %sunk2 = load i32* %dead
-; CHECK-NEXT: ret i32 %sunk2
+; CHECK-NEXT: %dead.le = getelementptr %Ty* @X2, i64 0, i32 0
+; CHECK-NEXT: %sunk2.le = load i32* %dead.le
+; CHECK-NEXT: ret i32 %sunk2.le
}
@@ -152,12 +153,14 @@ Out2: ; preds = %ContLoop
ret i32 %tmp.7
; CHECK-LABEL: @test7(
; CHECK: Out1:
-; CHECK-NEXT: mul i32 %N, %N_addr.0.pn
-; CHECK-NEXT: sub i32 %tmp.6, %N
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %N_addr.0.pn
+; CHECK-NEXT: mul i32 %N, %[[LCSSAPHI]]
+; CHECK-NEXT: sub i32 %tmp.6.le, %N
; CHECK-NEXT: ret
; CHECK: Out2:
-; CHECK-NEXT: mul i32 %N, %N_addr.0.pn
-; CHECK-NEXT: sub i32 %tmp.6
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %N_addr.0.pn
+; CHECK-NEXT: mul i32 %N, %[[LCSSAPHI]]
+; CHECK-NEXT: sub i32 %tmp.6.le4, %N
; CHECK-NEXT: ret
}
@@ -183,8 +186,9 @@ exit2: ; preds = %Cont
; CHECK: exit1:
; CHECK-NEXT: ret i32 0
; CHECK: exit2:
-; CHECK-NEXT: %V = add i32 %X, 1
-; CHECK-NEXT: ret i32 %V
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %X
+; CHECK-NEXT: %V.le = add i32 %[[LCSSAPHI]], 1
+; CHECK-NEXT: ret i32 %V.le
}
@@ -208,7 +212,7 @@ return.i: ; preds = %no_exit.1.i
; CHECK-LABEL: @test9(
; CHECK: loopentry.3.i.preheader.loopexit:
-; CHECK-NEXT: %inc.1.i = add i32 0, 1
+; CHECK-NEXT: %inc.1.i.le = add i32 0, 1
; CHECK-NEXT: br label %loopentry.3.i.preheader
}
@@ -229,8 +233,9 @@ Out: ; preds = %Loop
; CHECK-LABEL: @test10(
; CHECK: Out:
-; CHECK-NEXT: %tmp.6 = sdiv i32 %N, %N_addr.0.pn
-; CHECK-NEXT: ret i32 %tmp.6
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %N_addr.0.pn
+; CHECK-NEXT: %tmp.6.le = sdiv i32 %N, %[[LCSSAPHI]]
+; CHECK-NEXT: ret i32 %tmp.6.le
}
; Should delete, not sink, dead instructions.
@@ -246,4 +251,69 @@ Out:
; CHECK-NEXT: ret void
}
+@c = common global [1 x i32] zeroinitializer, align 4
+; Test a *many* way nested loop with multiple exit blocks both of which exit
+; multiple loop nests. This exercises LCSSA corner cases.
+define i32 @PR18753(i1* %a, i1* %b, i1* %c, i1* %d) {
+entry:
+ br label %l1.header
+
+l1.header:
+ %iv = phi i64 [ %iv.next, %l1.latch ], [ 0, %entry ]
+ %arrayidx.i = getelementptr inbounds [1 x i32]* @c, i64 0, i64 %iv
+ br label %l2.header
+
+l2.header:
+ %x0 = load i1* %c, align 4
+ br i1 %x0, label %l1.latch, label %l3.preheader
+
+l3.preheader:
+ br label %l3.header
+
+l3.header:
+ %x1 = load i1* %d, align 4
+ br i1 %x1, label %l2.latch, label %l4.preheader
+
+l4.preheader:
+ br label %l4.header
+
+l4.header:
+ %x2 = load i1* %a
+ br i1 %x2, label %l3.latch, label %l4.body
+
+l4.body:
+ call void @f(i32* %arrayidx.i)
+ %x3 = load i1* %b
+ %l = trunc i64 %iv to i32
+ br i1 %x3, label %l4.latch, label %exit
+
+l4.latch:
+ call void @g()
+ %x4 = load i1* %b, align 4
+ br i1 %x4, label %l4.header, label %exit
+
+l3.latch:
+ br label %l3.header
+
+l2.latch:
+ br label %l2.header
+
+l1.latch:
+ %iv.next = add nsw i64 %iv, 1
+ br label %l1.header
+
+exit:
+ %lcssa = phi i32 [ %l, %l4.latch ], [ %l, %l4.body ]
+; CHECK-LABEL: @PR18753(
+; CHECK: exit:
+; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i64 [ %iv, %l4.latch ], [ %iv, %l4.body ]
+; CHECK-NEXT: %l.le = trunc i64 %[[LCSSAPHI]] to i32
+; CHECK-NEXT: ret i32 %l.le
+
+ ret i32 %lcssa
+}
+
+declare void @f(i32*)
+
+declare void @g()
diff --git a/test/Transforms/LICM/volatile-alias.ll b/test/Transforms/LICM/volatile-alias.ll
index 886d7f2f8074..df7f0a931eb8 100644
--- a/test/Transforms/LICM/volatile-alias.ll
+++ b/test/Transforms/LICM/volatile-alias.ll
@@ -4,7 +4,7 @@
; out of the loop.
; CHECK: load i32* %p
; CHECK: for.body:
-; CHECK; load volatile i32* %q
+; CHECK: load volatile i32* %q
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Transforms/LoadCombine/load-combine.ll b/test/Transforms/LoadCombine/load-combine.ll
new file mode 100644
index 000000000000..c4d9241764d9
--- /dev/null
+++ b/test/Transforms/LoadCombine/load-combine.ll
@@ -0,0 +1,190 @@
+; RUN: opt < %s -load-combine -instcombine -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Combine read from char* idiom.
+define i64 @LoadU64_x64_0(i64* %pData) {
+ %1 = bitcast i64* %pData to i8*
+ %2 = load i8* %1, align 1
+ %3 = zext i8 %2 to i64
+ %4 = shl nuw i64 %3, 56
+ %5 = getelementptr inbounds i8* %1, i64 1
+ %6 = load i8* %5, align 1
+ %7 = zext i8 %6 to i64
+ %8 = shl nuw nsw i64 %7, 48
+ %9 = or i64 %8, %4
+ %10 = getelementptr inbounds i8* %1, i64 2
+ %11 = load i8* %10, align 1
+ %12 = zext i8 %11 to i64
+ %13 = shl nuw nsw i64 %12, 40
+ %14 = or i64 %9, %13
+ %15 = getelementptr inbounds i8* %1, i64 3
+ %16 = load i8* %15, align 1
+ %17 = zext i8 %16 to i64
+ %18 = shl nuw nsw i64 %17, 32
+ %19 = or i64 %14, %18
+ %20 = getelementptr inbounds i8* %1, i64 4
+ %21 = load i8* %20, align 1
+ %22 = zext i8 %21 to i64
+ %23 = shl nuw nsw i64 %22, 24
+ %24 = or i64 %19, %23
+ %25 = getelementptr inbounds i8* %1, i64 5
+ %26 = load i8* %25, align 1
+ %27 = zext i8 %26 to i64
+ %28 = shl nuw nsw i64 %27, 16
+ %29 = or i64 %24, %28
+ %30 = getelementptr inbounds i8* %1, i64 6
+ %31 = load i8* %30, align 1
+ %32 = zext i8 %31 to i64
+ %33 = shl nuw nsw i64 %32, 8
+ %34 = or i64 %29, %33
+ %35 = getelementptr inbounds i8* %1, i64 7
+ %36 = load i8* %35, align 1
+ %37 = zext i8 %36 to i64
+ %38 = or i64 %34, %37
+ ret i64 %38
+; CHECK-LABEL: @LoadU64_x64_0(
+; CHECK: load i64* %{{.*}}, align 1
+; CHECK-NOT: load
+}
+
+; Combine simple adjacent loads.
+define i32 @"2xi16_i32"(i16* %x) {
+ %1 = load i16* %x, align 2
+ %2 = getelementptr inbounds i16* %x, i64 1
+ %3 = load i16* %2, align 2
+ %4 = zext i16 %3 to i32
+ %5 = shl nuw i32 %4, 16
+ %6 = zext i16 %1 to i32
+ %7 = or i32 %5, %6
+ ret i32 %7
+; CHECK-LABEL: @"2xi16_i32"(
+; CHECK: load i32* %{{.*}}, align 2
+; CHECK-NOT: load
+}
+
+; Don't combine loads across stores.
+define i32 @"2xi16_i32_store"(i16* %x, i16* %y) {
+ %1 = load i16* %x, align 2
+ store i16 0, i16* %y, align 2
+ %2 = getelementptr inbounds i16* %x, i64 1
+ %3 = load i16* %2, align 2
+ %4 = zext i16 %3 to i32
+ %5 = shl nuw i32 %4, 16
+ %6 = zext i16 %1 to i32
+ %7 = or i32 %5, %6
+ ret i32 %7
+; CHECK-LABEL: @"2xi16_i32_store"(
+; CHECK: load i16* %{{.*}}, align 2
+; CHECK: store
+; CHECK: load i16* %{{.*}}, align 2
+}
+
+; Don't combine loads with a gap.
+define i32 @"2xi16_i32_gap"(i16* %x) {
+ %1 = load i16* %x, align 2
+ %2 = getelementptr inbounds i16* %x, i64 2
+ %3 = load i16* %2, align 2
+ %4 = zext i16 %3 to i32
+ %5 = shl nuw i32 %4, 16
+ %6 = zext i16 %1 to i32
+ %7 = or i32 %5, %6
+ ret i32 %7
+; CHECK-LABEL: @"2xi16_i32_gap"(
+; CHECK: load i16* %{{.*}}, align 2
+; CHECK: load i16* %{{.*}}, align 2
+}
+
+; Combine out of order loads.
+define i32 @"2xi16_i32_order"(i16* %x) {
+ %1 = getelementptr inbounds i16* %x, i64 1
+ %2 = load i16* %1, align 2
+ %3 = zext i16 %2 to i32
+ %4 = load i16* %x, align 2
+ %5 = shl nuw i32 %3, 16
+ %6 = zext i16 %4 to i32
+ %7 = or i32 %5, %6
+ ret i32 %7
+; CHECK-LABEL: @"2xi16_i32_order"(
+; CHECK: load i32* %{{.*}}, align 2
+; CHECK-NOT: load
+}
+
+; Overlapping loads.
+define i32 @"2xi16_i32_overlap"(i8* %x) {
+ %1 = bitcast i8* %x to i16*
+ %2 = load i16* %1, align 2
+ %3 = getelementptr inbounds i8* %x, i64 1
+ %4 = bitcast i8* %3 to i16*
+ %5 = load i16* %4, align 2
+ %6 = zext i16 %5 to i32
+ %7 = shl nuw i32 %6, 16
+ %8 = zext i16 %2 to i32
+ %9 = or i32 %7, %8
+ ret i32 %9
+; CHECK-LABEL: @"2xi16_i32_overlap"(
+; CHECK: load i16* %{{.*}}, align 2
+; CHECK: load i16* %{{.*}}, align 2
+}
+
+; Combine valid alignments.
+define i64 @"2xi16_i64_align"(i8* %x) {
+ %1 = bitcast i8* %x to i32*
+ %2 = load i32* %1, align 4
+ %3 = getelementptr inbounds i8* %x, i64 4
+ %4 = bitcast i8* %3 to i16*
+ %5 = load i16* %4, align 2
+ %6 = getelementptr inbounds i8* %x, i64 6
+ %7 = bitcast i8* %6 to i16*
+ %8 = load i16* %7, align 2
+ %9 = zext i16 %8 to i64
+ %10 = shl nuw i64 %9, 48
+ %11 = zext i16 %5 to i64
+ %12 = shl nuw nsw i64 %11, 32
+ %13 = zext i32 %2 to i64
+ %14 = or i64 %12, %13
+ %15 = or i64 %14, %10
+ ret i64 %15
+; CHECK-LABEL: @"2xi16_i64_align"(
+; CHECK: load i64* %{{.*}}, align 4
+}
+
+; Non power of two.
+define i64 @"2xi16_i64_npo2"(i8* %x) {
+ %1 = load i8* %x, align 1
+ %2 = zext i8 %1 to i64
+ %3 = getelementptr inbounds i8* %x, i64 1
+ %4 = load i8* %3, align 1
+ %5 = zext i8 %4 to i64
+ %6 = shl nuw nsw i64 %5, 8
+ %7 = or i64 %6, %2
+ %8 = getelementptr inbounds i8* %x, i64 2
+ %9 = load i8* %8, align 1
+ %10 = zext i8 %9 to i64
+ %11 = shl nuw nsw i64 %10, 16
+ %12 = or i64 %11, %7
+ %13 = getelementptr inbounds i8* %x, i64 3
+ %14 = load i8* %13, align 1
+ %15 = zext i8 %14 to i64
+ %16 = shl nuw nsw i64 %15, 24
+ %17 = or i64 %16, %12
+ %18 = getelementptr inbounds i8* %x, i64 4
+ %19 = load i8* %18, align 1
+ %20 = zext i8 %19 to i64
+ %21 = shl nuw nsw i64 %20, 32
+ %22 = or i64 %21, %17
+ %23 = getelementptr inbounds i8* %x, i64 5
+ %24 = load i8* %23, align 1
+ %25 = zext i8 %24 to i64
+ %26 = shl nuw nsw i64 %25, 40
+ %27 = or i64 %26, %22
+ %28 = getelementptr inbounds i8* %x, i64 6
+ %29 = load i8* %28, align 1
+ %30 = zext i8 %29 to i64
+ %31 = shl nuw nsw i64 %30, 48
+ %32 = or i64 %31, %27
+ ret i64 %32
+; CHECK-LABEL: @"2xi16_i64_npo2"(
+; CHECK: load i32* %{{.*}}, align 1
+}
diff --git a/test/Transforms/LoopIdiom/R600/lit.local.cfg b/test/Transforms/LoopIdiom/R600/lit.local.cfg
new file mode 100644
index 000000000000..4086e8d681c3
--- /dev/null
+++ b/test/Transforms/LoopIdiom/R600/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'R600' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/LoopIdiom/R600/popcnt.ll b/test/Transforms/LoopIdiom/R600/popcnt.ll
new file mode 100644
index 000000000000..e4301bbb06d3
--- /dev/null
+++ b/test/Transforms/LoopIdiom/R600/popcnt.ll
@@ -0,0 +1,104 @@
+; RUN: opt -loop-idiom -mtriple=r600-- -mcpu=SI -S < %s | FileCheck %s
+
+; Mostly copied from x86 version.
+
+;To recognize this pattern:
+;int popcount(unsigned long long a) {
+; int c = 0;
+; while (a) {
+; c++;
+; a &= a - 1;
+; }
+; return c;
+;}
+;
+
+; CHECK-LABEL: @popcount_i64
+; CHECK: entry
+; CHECK: llvm.ctpop.i64
+; CHECK: ret
+define i32 @popcount_i64(i64 %a) nounwind uwtable readnone ssp {
+entry:
+ %tobool3 = icmp eq i64 %a, 0
+ br i1 %tobool3, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %c.05 = phi i32 [ %inc, %while.body ], [ 0, %entry ]
+ %a.addr.04 = phi i64 [ %and, %while.body ], [ %a, %entry ]
+ %inc = add nsw i32 %c.05, 1
+ %sub = add i64 %a.addr.04, -1
+ %and = and i64 %sub, %a.addr.04
+ %tobool = icmp eq i64 %and, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ %c.0.lcssa = phi i32 [ 0, %entry ], [ %inc, %while.body ]
+ ret i32 %c.0.lcssa
+}
+
+; CHECK-LABEL: @popcount_i32
+; CHECK: entry
+; CHECK: llvm.ctpop.i32
+; CHECK: ret
+define i32 @popcount_i32(i32 %a) nounwind uwtable readnone ssp {
+entry:
+ %tobool3 = icmp eq i32 %a, 0
+ br i1 %tobool3, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %c.05 = phi i32 [ %inc, %while.body ], [ 0, %entry ]
+ %a.addr.04 = phi i32 [ %and, %while.body ], [ %a, %entry ]
+ %inc = add nsw i32 %c.05, 1
+ %sub = add i32 %a.addr.04, -1
+ %and = and i32 %sub, %a.addr.04
+ %tobool = icmp eq i32 %and, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ %c.0.lcssa = phi i32 [ 0, %entry ], [ %inc, %while.body ]
+ ret i32 %c.0.lcssa
+}
+
+; To recognize this pattern:
+;int popcount(unsigned long long a, int mydata1, int mydata2) {
+; int c = 0;
+; while (a) {
+; c++;
+; a &= a - 1;
+; mydata1 *= c;
+; mydata2 *= (int)a;
+; }
+; return c + mydata1 + mydata2;
+;}
+
+; CHECK-LABEL: @popcount2
+; CHECK: entry
+; CHECK: llvm.ctpop.i64
+; CHECK: ret
+define i32 @popcount2(i64 %a, i32 %mydata1, i32 %mydata2) nounwind uwtable readnone ssp {
+entry:
+ %tobool9 = icmp eq i64 %a, 0
+ br i1 %tobool9, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %c.013 = phi i32 [ %inc, %while.body ], [ 0, %entry ]
+ %mydata2.addr.012 = phi i32 [ %mul1, %while.body ], [ %mydata2, %entry ]
+ %mydata1.addr.011 = phi i32 [ %mul, %while.body ], [ %mydata1, %entry ]
+ %a.addr.010 = phi i64 [ %and, %while.body ], [ %a, %entry ]
+ %inc = add nsw i32 %c.013, 1
+ %sub = add i64 %a.addr.010, -1
+ %and = and i64 %sub, %a.addr.010
+ %mul = mul nsw i32 %inc, %mydata1.addr.011
+ %conv = trunc i64 %and to i32
+ %mul1 = mul nsw i32 %conv, %mydata2.addr.012
+ %tobool = icmp eq i64 %and, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ %c.0.lcssa = phi i32 [ 0, %entry ], [ %inc, %while.body ]
+ %mydata2.addr.0.lcssa = phi i32 [ %mydata2, %entry ], [ %mul1, %while.body ]
+ %mydata1.addr.0.lcssa = phi i32 [ %mydata1, %entry ], [ %mul, %while.body ]
+ %add = add i32 %mydata2.addr.0.lcssa, %mydata1.addr.0.lcssa
+ %add2 = add i32 %add, %c.0.lcssa
+ ret i32 %add2
+}
diff --git a/test/Transforms/LoopIdiom/X86/lit.local.cfg b/test/Transforms/LoopIdiom/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Transforms/LoopIdiom/X86/lit.local.cfg
+++ b/test/Transforms/LoopIdiom/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/LoopRotate/PhiSelfReference-1.ll b/test/Transforms/LoopRotate/PhiSelfReference-1.ll
new file mode 100644
index 000000000000..aa1708e8475c
--- /dev/null
+++ b/test/Transforms/LoopRotate/PhiSelfReference-1.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -loop-rotate -verify-dom-info -verify-loop-info -disable-output
+; ModuleID = 'PhiSelfReference-1.bc'
+
+define void @snrm2(i32 %incx) {
+entry:
+ br i1 false, label %START, label %return
+
+START: ; preds = %entry
+ br i1 false, label %bb85, label %cond_false93
+
+bb52: ; preds = %bb85
+ br i1 false, label %bb307, label %cond_next71
+
+cond_next71: ; preds = %bb52
+ ret void
+
+bb85: ; preds = %START
+ br i1 false, label %bb52, label %bb88
+
+bb88: ; preds = %bb85
+ ret void
+
+cond_false93: ; preds = %START
+ ret void
+
+bb243: ; preds = %bb307
+ br label %bb307
+
+bb307: ; preds = %bb243, %bb52
+ %sx_addr.2.pn = phi float* [ %sx_addr.5, %bb243 ], [ null, %bb52 ] ; <float*> [#uses=1]
+ %sx_addr.5 = getelementptr float* %sx_addr.2.pn, i32 %incx ; <float*> [#uses=1]
+ br i1 false, label %bb243, label %bb310
+
+bb310: ; preds = %bb307
+ ret void
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/test/Transforms/LoopRotate/PhiSelfRefernce-1.ll b/test/Transforms/LoopRotate/PhiSelfRefernce-1.ll
deleted file mode 100644
index a1aa21beeef3..000000000000
--- a/test/Transforms/LoopRotate/PhiSelfRefernce-1.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: opt < %s -loop-rotate -verify-dom-info -verify-loop-info -disable-output
-; ModuleID = 'PhiSelfRefernce-1.bc'
-
-define void @snrm2(i32 %incx) {
-entry:
- br i1 false, label %START, label %return
-
-START: ; preds = %entry
- br i1 false, label %bb85, label %cond_false93
-
-bb52: ; preds = %bb85
- br i1 false, label %bb307, label %cond_next71
-
-cond_next71: ; preds = %bb52
- ret void
-
-bb85: ; preds = %START
- br i1 false, label %bb52, label %bb88
-
-bb88: ; preds = %bb85
- ret void
-
-cond_false93: ; preds = %START
- ret void
-
-bb243: ; preds = %bb307
- br label %bb307
-
-bb307: ; preds = %bb243, %bb52
- %sx_addr.2.pn = phi float* [ %sx_addr.5, %bb243 ], [ null, %bb52 ] ; <float*> [#uses=1]
- %sx_addr.5 = getelementptr float* %sx_addr.2.pn, i32 %incx ; <float*> [#uses=1]
- br i1 false, label %bb243, label %bb310
-
-bb310: ; preds = %bb307
- ret void
-
-return: ; preds = %entry
- ret void
-}
diff --git a/test/Transforms/LoopRotate/dbgvalue.ll b/test/Transforms/LoopRotate/dbgvalue.ll
index 9461980ac08d..50fc9659a804 100644
--- a/test/Transforms/LoopRotate/dbgvalue.ll
+++ b/test/Transforms/LoopRotate/dbgvalue.ll
@@ -46,7 +46,11 @@ define void @FindFreeHorzSeg(i64 %startCol, i64 %row, i64* %rowStart) {
; CHECK-LABEL: define void @FindFreeHorzSeg(
; CHECK: %dec = add
; CHECK-NEXT: tail call void @llvm.dbg.value
-; CHECK-NEXT: br i1 %tobool, label %for.cond, label %for.end
+; CHECK-NEXT: br i1 %tobool, label %for.cond, label %[[LOOP_EXIT:[^,]*]]
+; CHECK: [[LOOP_EXIT]]:
+; CHECK-NEXT: phi i64 [ %{{[^,]*}}, %{{[^,]*}} ]
+; CHECK-NEXT: br label %for.end
+
entry:
br label %for.cond
diff --git a/test/Transforms/LoopRotate/preserve-loop-simplify.ll b/test/Transforms/LoopRotate/preserve-loop-simplify.ll
new file mode 100644
index 000000000000..53fa02a42a8e
--- /dev/null
+++ b/test/Transforms/LoopRotate/preserve-loop-simplify.ll
@@ -0,0 +1,65 @@
+; RUN: opt -S -loop-rotate < %s -verify-loop-info | FileCheck %s
+;
+; Verify that LoopRotate preserves LoopSimplify form even in very peculiar loop
+; structures. We manually validate the CFG with FileCheck because currently we
+; can't cause a failure when LoopSimplify fails to be preserved.
+
+define void @PR18643() {
+; CHECK-LABEL: @PR18643(
+entry:
+ br label %outer.header
+; CHECK: br label %outer.header
+
+outer.header:
+; CHECK: outer.header:
+ br i1 undef, label %inner.header, label %outer.body
+; CHECK-NEXT: br i1 {{[^,]*}}, label %[[INNER_PREROTATE_PREHEADER:[^,]*]], label %outer.body
+
+; CHECK: [[INNER_PREROTATE_PREHEADER]]:
+; CHECK-NEXT: br i1 {{[^,]*}}, label %[[INNER_PREROTATE_PREHEADER_SPLIT_RETURN:[^,]*]], label %[[INNER_ROTATED_PREHEADER:[^,]*]]
+
+; CHECK: [[INNER_ROTATED_PREHEADER]]:
+; CHECK-NEXT: br label %inner.body
+
+inner.header:
+; Now the latch!
+; CHECK: inner.header:
+ br i1 undef, label %return, label %inner.body
+; CHECK-NEXT: br i1 {{[^,]*}}, label %[[INNER_SPLIT_RETURN:[^,]*]], label %inner.body
+
+inner.body:
+; Now the header!
+; CHECK: inner.body:
+ br i1 undef, label %outer.latch, label %inner.latch
+; CHECK-NEXT: br i1 {{[^,]*}}, label %[[INNER_SPLIT_OUTER_LATCH:[^,]*]], label %inner.header
+
+inner.latch:
+; Dead!
+ br label %inner.header
+
+outer.body:
+; CHECK: outer.body:
+ br label %outer.latch
+; CHECK-NEXT: br label %outer.latch
+
+; L2 -> L1 exit edge needs a simplified exit block.
+; CHECK: [[INNER_SPLIT_OUTER_LATCH]]:
+; CHECK-NEXT: br label %outer.latch
+
+outer.latch:
+; CHECK: outer.latch:
+ br label %outer.header
+; CHECK-NEXT: br label %outer.header
+
+; L1 -> L0 exit edge need sa simplified exit block.
+; CHECK: [[INNER_PREROTATE_PREHEADER_SPLIT_RETURN]]:
+; CHECK-NEXT: br label %return
+
+; L2 -> L0 exit edge needs a simplified exit block.
+; CHECK: [[INNER_SPLIT_RETURN]]:
+; CHECK-NEXT: br label %return
+
+return:
+; CHECK: return:
+ unreachable
+}
diff --git a/test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll b/test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll
index e91d141cc6ff..0534a0bf7d06 100644
--- a/test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll
+++ b/test/Transforms/LoopSimplify/2007-10-28-InvokeCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | opt -loop-simplify -disable-output
+; RUN: opt < %s -loop-simplify -disable-output
; PR1752
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-s0:0:64-f80:32:32"
target triple = "i686-pc-mingw32"
diff --git a/test/Transforms/LoopSimplify/ashr-crash.ll b/test/Transforms/LoopSimplify/ashr-crash.ll
new file mode 100644
index 000000000000..c58903d49d55
--- /dev/null
+++ b/test/Transforms/LoopSimplify/ashr-crash.ll
@@ -0,0 +1,80 @@
+; RUN: opt -basicaa -loop-rotate -licm -instcombine -indvars -loop-unroll -S %s | FileCheck %s
+;
+; PR18361: ScalarEvolution::getAddRecExpr():
+; Assertion `isLoopInvariant(Operands[i],...
+;
+; After a series of loop optimizations, SCEV's LoopDispositions grow stale.
+; In particular, LoopSimplify hoists %cmp4, resulting in this SCEV for %add:
+; {(zext i1 %cmp4 to i32),+,1}<nw><%for.cond1.preheader>
+;
+; When recomputing the SCEV for %ashr, we truncate the operands to get:
+; (zext i1 %cmp4 to i16)
+;
+; This SCEV was never mapped to a value so never invalidated. It's
+; loop disposition is still marked as non-loop-invariant, which is
+; inconsistent with the AddRec.
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+@d = common global i32 0, align 4
+@a = common global i32 0, align 4
+@c = common global i32 0, align 4
+@b = common global i32 0, align 4
+
+; Check that the def-use chain that leads to the bad SCEV is still
+; there.
+;
+; CHECK-LABEL: @foo
+; CHECK-LABEL: entry:
+; CHECK-LABEL: for.cond1.preheader:
+; CHECK-LABEL: for.body3:
+; CHECK: %cmp4.le.le
+; CHECK: %conv.le.le = zext i1 %cmp4.le.le to i32
+; CHECK: %xor.le.le = xor i32 %conv6.le.le, 1
+define void @foo() {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.inc7, %entry
+ %storemerge = phi i32 [ 0, %entry ], [ %inc8, %for.inc7 ]
+ %f.0 = phi i32 [ undef, %entry ], [ %f.1, %for.inc7 ]
+ store i32 %storemerge, i32* @d, align 4
+ %cmp = icmp slt i32 %storemerge, 1
+ br i1 %cmp, label %for.cond1, label %for.end9
+
+for.cond1: ; preds = %for.cond, %for.body3
+ %storemerge1 = phi i32 [ %inc, %for.body3 ], [ 0, %for.cond ]
+ %f.1 = phi i32 [ %xor, %for.body3 ], [ %f.0, %for.cond ]
+ store i32 %storemerge1, i32* @a, align 4
+ %cmp2 = icmp slt i32 %storemerge1, 1
+ br i1 %cmp2, label %for.body3, label %for.inc7
+
+for.body3: ; preds = %for.cond1
+ %0 = load i32* @c, align 4
+ %cmp4 = icmp sge i32 %storemerge1, %0
+ %conv = zext i1 %cmp4 to i32
+ %1 = load i32* @d, align 4
+ %add = add nsw i32 %conv, %1
+ %sext = shl i32 %add, 16
+ %conv6 = ashr exact i32 %sext, 16
+ %xor = xor i32 %conv6, 1
+ %inc = add nsw i32 %storemerge1, 1
+ br label %for.cond1
+
+for.inc7: ; preds = %for.cond1
+ %2 = load i32* @d, align 4
+ %inc8 = add nsw i32 %2, 1
+ br label %for.cond
+
+for.end9: ; preds = %for.cond
+ %cmp10 = icmp sgt i32 %f.0, 0
+ br i1 %cmp10, label %if.then, label %if.end
+
+if.then: ; preds = %for.end9
+ store i32 0, i32* @b, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %for.end9
+ ret void
+}
diff --git a/test/Transforms/LoopSimplify/notify-scev.ll b/test/Transforms/LoopSimplify/notify-scev.ll
new file mode 100644
index 000000000000..ee8e2eec9b3f
--- /dev/null
+++ b/test/Transforms/LoopSimplify/notify-scev.ll
@@ -0,0 +1,110 @@
+; RUN: opt -indvars -S %s | FileCheck %s
+;
+; PR18384: ValueHandleBase::ValueIsDeleted.
+;
+; Ensure that LoopSimplify calls ScalarEvolution::forgetLoop before
+; deleting a block, regardless of whether any values were hoisted out
+; of the block.
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-darwin"
+
+%struct.Params = type { [2 x [4 x [16 x i16]]] }
+
+; Verify that the loop tail is deleted, and we don't crash!
+;
+; CHECK-LABEL: @t
+; CHECK-LABEL: for.cond127.preheader:
+; CHECK-NOT: for.cond127:
+; CHECK-LABEL: for.body129:
+define void @t() {
+entry:
+ br label %for.body102
+
+for.body102:
+ br i1 undef, label %for.cond127.preheader, label %for.inc203
+
+for.cond127.preheader:
+ br label %for.body129
+
+for.cond127:
+ %cmp128 = icmp slt i32 %inc191, 2
+ br i1 %cmp128, label %for.body129, label %for.end192
+
+for.body129:
+ %uv.013 = phi i32 [ 0, %for.cond127.preheader ], [ %inc191, %for.cond127 ]
+ %idxprom130 = sext i32 %uv.013 to i64
+ br i1 undef, label %for.cond135.preheader.lr.ph, label %for.end185
+
+for.cond135.preheader.lr.ph:
+ br i1 undef, label %for.cond135.preheader.lr.ph.split.us, label %for.cond135.preheader.lr.ph.split_crit_edge
+
+for.cond135.preheader.lr.ph.split_crit_edge:
+ br label %for.cond135.preheader.lr.ph.split
+
+for.cond135.preheader.lr.ph.split.us:
+ br label %for.cond135.preheader.us
+
+for.cond135.preheader.us:
+ %block_y.09.us = phi i32 [ 0, %for.cond135.preheader.lr.ph.split.us ], [ %add184.us, %for.cond132.us ]
+ br i1 true, label %for.cond138.preheader.lr.ph.us, label %for.end178.us
+
+for.end178.us:
+ %add184.us = add nsw i32 %block_y.09.us, 4
+ br i1 undef, label %for.end185split.us-lcssa.us, label %for.cond132.us
+
+for.end174.us:
+ br i1 undef, label %for.cond138.preheader.us, label %for.cond135.for.end178_crit_edge.us
+
+for.inc172.us:
+ br i1 undef, label %for.cond142.preheader.us, label %for.end174.us
+
+for.body145.us:
+ %arrayidx163.us = getelementptr inbounds %struct.Params* undef, i64 0, i32 0, i64 %idxprom130, i64 %idxprom146.us
+ br i1 undef, label %for.body145.us, label %for.inc172.us
+
+for.cond142.preheader.us:
+ %j.04.us = phi i32 [ %block_y.09.us, %for.cond138.preheader.us ], [ undef, %for.inc172.us ]
+ %idxprom146.us = sext i32 %j.04.us to i64
+ br label %for.body145.us
+
+for.cond138.preheader.us:
+ br label %for.cond142.preheader.us
+
+for.cond132.us:
+ br i1 undef, label %for.cond135.preheader.us, label %for.cond132.for.end185_crit_edge.us-lcssa.us
+
+for.cond138.preheader.lr.ph.us:
+ br label %for.cond138.preheader.us
+
+for.cond135.for.end178_crit_edge.us:
+ br label %for.end178.us
+
+for.end185split.us-lcssa.us:
+ br label %for.end185split
+
+for.cond132.for.end185_crit_edge.us-lcssa.us:
+ br label %for.cond132.for.end185_crit_edge
+
+for.cond135.preheader.lr.ph.split:
+ br label %for.end185split
+
+for.end185split:
+ br label %for.end185
+
+for.cond132.for.end185_crit_edge:
+ br label %for.end185
+
+for.end185:
+ %inc191 = add nsw i32 %uv.013, 1
+ br i1 false, label %for.end192, label %for.cond127
+
+for.end192:
+ br label %for.inc203
+
+for.inc203:
+ br label %for.end205
+
+for.end205:
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/AArch64/lit.local.cfg b/test/Transforms/LoopStrengthReduce/AArch64/lit.local.cfg
new file mode 100644
index 000000000000..675f48e199a0
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AArch64/lit.local.cfg
@@ -0,0 +1,4 @@
+config.suffixes = ['.ll']
+
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll b/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll
new file mode 100644
index 000000000000..9a175ad8d355
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=arm64-unknown-unknown -mcpu=cyclone -pre-RA-sched=list-hybrid < %s | FileCheck %s
+; rdar://10232252
+; Prevent LSR of doing poor choice that cannot be folded in addressing mode
+
+; Remove the -pre-RA-sched=list-hybrid option after fixing:
+; <rdar://problem/12702735> [ARM64][coalescer] need better register
+; coalescing for simple unit tests.
+
+; CHECK: testCase
+; CHECK: %while.body{{$}}
+; CHECK: ldr [[STREG:x[0-9]+]], [{{x[0-9]+}}], #8
+; CHECK-NEXT: str [[STREG]], [{{x[0-9]+}}], #8
+; CHECK: %while.end
+define i32 @testCase() nounwind ssp {
+entry:
+ br label %while.body
+
+while.body: ; preds = %while.body, %entry
+ %len.06 = phi i64 [ 1288, %entry ], [ %sub, %while.body ]
+ %pDst.05 = phi i64* [ inttoptr (i64 6442450944 to i64*), %entry ], [ %incdec.ptr1, %while.body ]
+ %pSrc.04 = phi i64* [ inttoptr (i64 4294967296 to i64*), %entry ], [ %incdec.ptr, %while.body ]
+ %incdec.ptr = getelementptr inbounds i64* %pSrc.04, i64 1
+ %tmp = load volatile i64* %pSrc.04, align 8
+ %incdec.ptr1 = getelementptr inbounds i64* %pDst.05, i64 1
+ store volatile i64 %tmp, i64* %pDst.05, align 8
+ %sub = add i64 %len.06, -8
+ %cmp = icmp sgt i64 %sub, -1
+ br i1 %cmp, label %while.body, label %while.end
+
+while.end: ; preds = %while.body
+ tail call void inttoptr (i64 6442450944 to void ()*)() nounwind
+ ret i32 0
+}
diff --git a/test/Transforms/LoopStrengthReduce/AArch64/lsr-memset.ll b/test/Transforms/LoopStrengthReduce/AArch64/lsr-memset.ll
new file mode 100644
index 000000000000..10b2c3a403cc
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AArch64/lsr-memset.ll
@@ -0,0 +1,101 @@
+; RUN: llc < %s -O3 -mtriple=arm64-unknown-unknown -mcpu=cyclone -pre-RA-sched=list-hybrid | FileCheck %s
+; <rdar://problem/11635990> [arm64] [lsr] Inefficient EA/loop-exit calc in bzero_phys
+;
+; LSR on loop %while.cond should reassociate non-address mode
+; expressions at use %cmp16 to avoid sinking computation into %while.body18.
+;
+; Remove the -pre-RA-sched=list-hybrid option after fixing:
+; <rdar://problem/12702735> [ARM64][coalescer] need better register
+; coalescing for simple unit tests.
+
+; CHECK: @memset
+; CHECK: %while.body18{{$}}
+; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #8
+; First set the IVREG variable, then use it
+; CHECK-NEXT: sub [[IVREG:x[0-9]+]],
+; CHECK: [[IVREG]], #8
+; CHECK-NEXT: cmp [[IVREG]], #7
+; CHECK-NEXT: b.hi
+define i8* @memset(i8* %dest, i32 %val, i64 %len) nounwind ssp noimplicitfloat {
+entry:
+ %cmp = icmp eq i64 %len, 0
+ br i1 %cmp, label %done, label %while.cond.preheader
+
+while.cond.preheader: ; preds = %entry
+ %conv = trunc i32 %val to i8
+ br label %while.cond
+
+while.cond: ; preds = %while.body, %while.cond.preheader
+ %ptr.0 = phi i8* [ %incdec.ptr, %while.body ], [ %dest, %while.cond.preheader ]
+ %len.addr.0 = phi i64 [ %dec, %while.body ], [ %len, %while.cond.preheader ]
+ %cond = icmp eq i64 %len.addr.0, 0
+ br i1 %cond, label %done, label %land.rhs
+
+land.rhs: ; preds = %while.cond
+ %0 = ptrtoint i8* %ptr.0 to i64
+ %and = and i64 %0, 7
+ %cmp5 = icmp eq i64 %and, 0
+ br i1 %cmp5, label %if.end9, label %while.body
+
+while.body: ; preds = %land.rhs
+ %incdec.ptr = getelementptr inbounds i8* %ptr.0, i64 1
+ store i8 %conv, i8* %ptr.0, align 1, !tbaa !0
+ %dec = add i64 %len.addr.0, -1
+ br label %while.cond
+
+if.end9: ; preds = %land.rhs
+ %conv.mask = and i32 %val, 255
+ %1 = zext i32 %conv.mask to i64
+ %2 = shl nuw nsw i64 %1, 8
+ %ins18 = or i64 %2, %1
+ %3 = shl nuw nsw i64 %1, 16
+ %ins15 = or i64 %ins18, %3
+ %4 = shl nuw nsw i64 %1, 24
+ %5 = shl nuw nsw i64 %1, 32
+ %mask8 = or i64 %ins15, %4
+ %6 = shl nuw nsw i64 %1, 40
+ %mask5 = or i64 %mask8, %5
+ %7 = shl nuw nsw i64 %1, 48
+ %8 = shl nuw i64 %1, 56
+ %mask2.masked = or i64 %mask5, %6
+ %mask = or i64 %mask2.masked, %7
+ %ins = or i64 %mask, %8
+ %9 = bitcast i8* %ptr.0 to i64*
+ %cmp1636 = icmp ugt i64 %len.addr.0, 7
+ br i1 %cmp1636, label %while.body18, label %while.body29.lr.ph
+
+while.body18: ; preds = %if.end9, %while.body18
+ %wideptr.038 = phi i64* [ %incdec.ptr19, %while.body18 ], [ %9, %if.end9 ]
+ %len.addr.137 = phi i64 [ %sub, %while.body18 ], [ %len.addr.0, %if.end9 ]
+ %incdec.ptr19 = getelementptr inbounds i64* %wideptr.038, i64 1
+ store i64 %ins, i64* %wideptr.038, align 8, !tbaa !2
+ %sub = add i64 %len.addr.137, -8
+ %cmp16 = icmp ugt i64 %sub, 7
+ br i1 %cmp16, label %while.body18, label %while.end20
+
+while.end20: ; preds = %while.body18
+ %cmp21 = icmp eq i64 %sub, 0
+ br i1 %cmp21, label %done, label %while.body29.lr.ph
+
+while.body29.lr.ph: ; preds = %while.end20, %if.end9
+ %len.addr.1.lcssa49 = phi i64 [ %sub, %while.end20 ], [ %len.addr.0, %if.end9 ]
+ %wideptr.0.lcssa48 = phi i64* [ %incdec.ptr19, %while.end20 ], [ %9, %if.end9 ]
+ %10 = bitcast i64* %wideptr.0.lcssa48 to i8*
+ br label %while.body29
+
+while.body29: ; preds = %while.body29, %while.body29.lr.ph
+ %len.addr.235 = phi i64 [ %len.addr.1.lcssa49, %while.body29.lr.ph ], [ %dec26, %while.body29 ]
+ %ptr.134 = phi i8* [ %10, %while.body29.lr.ph ], [ %incdec.ptr31, %while.body29 ]
+ %dec26 = add i64 %len.addr.235, -1
+ %incdec.ptr31 = getelementptr inbounds i8* %ptr.134, i64 1
+ store i8 %conv, i8* %ptr.134, align 1, !tbaa !0
+ %cmp27 = icmp eq i64 %dec26, 0
+ br i1 %cmp27, label %done, label %while.body29
+
+done: ; preds = %while.cond, %while.body29, %while.end20, %entry
+ ret i8* %dest
+}
+
+!0 = metadata !{metadata !"omnipotent char", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA"}
+!2 = metadata !{metadata !"long long", metadata !0}
diff --git a/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll b/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll
new file mode 100644
index 000000000000..217896e55c66
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll
@@ -0,0 +1,70 @@
+; RUN: llc -mcpu=cyclone -debug-only=loop-reduce < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; LSR used to fail here due to a bug in the ReqRegs test.
+; CHECK: The chosen solution requires
+; CHECK-NOT: No Satisfactory Solution
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios"
+
+define void @do_integer_add(i64 %iterations, i8* nocapture readonly %cookie) {
+entry:
+ %N = bitcast i8* %cookie to i32*
+ %0 = load i32* %N, align 4
+ %add = add nsw i32 %0, 57
+ %cmp56 = icmp eq i64 %iterations, 0
+ br i1 %cmp56, label %while.end, label %for.cond.preheader.preheader
+
+for.cond.preheader.preheader: ; preds = %entry
+ br label %for.cond.preheader
+
+while.cond.loopexit: ; preds = %for.body
+ %add21.lcssa = phi i32 [ %add21, %for.body ]
+ %dec58 = add i64 %dec58.in, -1
+ %cmp = icmp eq i64 %dec58, 0
+ br i1 %cmp, label %while.end.loopexit, label %for.cond.preheader
+
+for.cond.preheader: ; preds = %for.cond.preheader.preheader, %while.cond.loopexit
+ %dec58.in = phi i64 [ %dec58, %while.cond.loopexit ], [ %iterations, %for.cond.preheader.preheader ]
+ %a.057 = phi i32 [ %add21.lcssa, %while.cond.loopexit ], [ %add, %for.cond.preheader.preheader ]
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.cond.preheader
+ %a.154 = phi i32 [ %a.057, %for.cond.preheader ], [ %add21, %for.body ]
+ %i.053 = phi i32 [ 1, %for.cond.preheader ], [ %inc, %for.body ]
+ %inc = add nsw i32 %i.053, 1
+ %add2 = shl i32 %a.154, 1
+ %add3 = add nsw i32 %add2, %i.053
+ %add4 = shl i32 %add3, 1
+ %add5 = add nsw i32 %add4, %i.053
+ %add6 = shl i32 %add5, 1
+ %add7 = add nsw i32 %add6, %i.053
+ %add8 = shl i32 %add7, 1
+ %add9 = add nsw i32 %add8, %i.053
+ %add10 = shl i32 %add9, 1
+ %add11 = add nsw i32 %add10, %i.053
+ %add12 = shl i32 %add11, 1
+ %add13 = add nsw i32 %add12, %i.053
+ %add14 = shl i32 %add13, 1
+ %add15 = add nsw i32 %add14, %i.053
+ %add16 = shl i32 %add15, 1
+ %add17 = add nsw i32 %add16, %i.053
+ %add18 = shl i32 %add17, 1
+ %add19 = add nsw i32 %add18, %i.053
+ %add20 = shl i32 %add19, 1
+ %add21 = add nsw i32 %add20, %i.053
+ %exitcond = icmp eq i32 %inc, 1001
+ br i1 %exitcond, label %while.cond.loopexit, label %for.body
+
+while.end.loopexit: ; preds = %while.cond.loopexit
+ %add21.lcssa.lcssa = phi i32 [ %add21.lcssa, %while.cond.loopexit ]
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ %a.0.lcssa = phi i32 [ %add, %entry ], [ %add21.lcssa.lcssa, %while.end.loopexit ]
+ tail call void @use_int(i32 %a.0.lcssa)
+ ret void
+}
+
+declare void @use_int(i32)
diff --git a/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll b/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
index 5d728b528ea5..1d56ddea2446 100644
--- a/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
+++ b/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O3 -march=thumb -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - -arm-atomic-cfg-tidy=0 | FileCheck %s
;
; LSR should only check for valid address modes when the IV user is a
; memory address.
diff --git a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
index ab7f20f0129b..f4edf092641f 100644
--- a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
+++ b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -O3 -march=thumb -mcpu=cortex-a9 | FileCheck %s -check-prefix=A9
+; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
+; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 -addr-sink-using-gep=1 %s -o - | FileCheck %s -check-prefix=A9
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg b/test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg
+++ b/test/Transforms/LoopStrengthReduce/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index e42b67fd35af..937791dca413 100644
--- a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -1,5 +1,7 @@
; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64
; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X32
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg b/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg
+++ b/test/Transforms/LoopStrengthReduce/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll b/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll
new file mode 100644
index 000000000000..55069947240d
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll
@@ -0,0 +1,50 @@
+; RUN: opt -S -loop-reduce -mcpu=corei7-avx -mtriple=x86_64-apple-macosx < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @indvar_expansion(i8* nocapture readonly %rowsptr) {
+entry:
+ br label %for.cond
+
+; SCEVExpander used to create induction variables in the loop %for.cond while
+; expanding the recurrence start value of loop strength reduced values from
+; %vector.body.
+
+; CHECK-LABEL: indvar_expansion
+; CHECK: for.cond:
+; CHECK-NOT: phi i3
+; CHECK: br i1 {{.+}}, label %for.cond
+
+for.cond:
+ %indvars.iv44 = phi i64 [ %indvars.iv.next45, %for.cond ], [ 0, %entry ]
+ %cmp = icmp eq i8 undef, 0
+ %indvars.iv.next45 = add nuw nsw i64 %indvars.iv44, 1
+ br i1 %cmp, label %for.cond, label %for.cond2
+
+for.cond2:
+ br i1 undef, label %for.cond2, label %for.body14.lr.ph
+
+for.body14.lr.ph:
+ %sext = shl i64 %indvars.iv44, 32
+ %0 = ashr exact i64 %sext, 32
+ %1 = sub i64 undef, %indvars.iv44
+ %2 = and i64 %1, 4294967295
+ %3 = add i64 %2, 1
+ %fold = add i64 %1, 1
+ %n.mod.vf = and i64 %fold, 7
+ %n.vec = sub i64 %3, %n.mod.vf
+ %end.idx.rnd.down = add i64 %n.vec, %0
+ br label %vector.body
+
+vector.body:
+ %index = phi i64 [ %index.next, %vector.body ], [ %0, %for.body14.lr.ph ]
+ %4 = getelementptr inbounds i8* %rowsptr, i64 %index
+ %5 = bitcast i8* %4 to <4 x i8>*
+ %wide.load = load <4 x i8>* %5, align 1
+ %index.next = add i64 %index, 8
+ %6 = icmp eq i64 %index.next, %end.idx.rnd.down
+ br i1 %6, label %for.end24, label %vector.body
+
+for.end24:
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/pr17473.ll b/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
index 4204abc7ca22..e7ebaa8ea61b 100644
--- a/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
+++ b/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s -loop-reduce -S | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
; LSR shouldn't normalize IV if it can't be denormalized to the original
diff --git a/test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll b/test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll
index 90051e3542c2..16bb508d2e27 100644
--- a/test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll
+++ b/test/Transforms/LoopStrengthReduce/dont_insert_redundant_ops.ll
@@ -1,5 +1,9 @@
; Check that this test makes INDVAR and related stuff dead.
-; RUN: opt < %s -loop-reduce -S | grep phi | count 2
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
+
+; CHECK: phi
+; CHECK: phi
+; CHECK-NOT: phi
declare i1 @pred()
diff --git a/test/Transforms/LoopStrengthReduce/lsr-expand-quadratic.ll b/test/Transforms/LoopStrengthReduce/lsr-expand-quadratic.ll
index 255cf41a8174..aa688d999e60 100644
--- a/test/Transforms/LoopStrengthReduce/lsr-expand-quadratic.ll
+++ b/test/Transforms/LoopStrengthReduce/lsr-expand-quadratic.ll
@@ -13,7 +13,7 @@ target triple = "x86_64-apple-macosx"
; CHECK: %lsr.iv = phi i32 [ %lsr.iv.next, %test2.loop ], [ -16777216, %entry ]
; CHECK: %lsr.iv.next = add nsw i32 %lsr.iv, 16777216
;
-; CHECK=LABEL: for.end:
+; CHECK-LABEL: for.end:
; CHECK: %sub.cond.us = sub nsw i32 %inc1115.us, %sub.us
; CHECK: %sext.us = mul i32 %lsr.iv.next, %sub.cond.us
; CHECK: %f = ashr i32 %sext.us, 24
diff --git a/test/Transforms/LoopStrengthReduce/pr18165.ll b/test/Transforms/LoopStrengthReduce/pr18165.ll
index 89adef7bd49d..c38d6a625e88 100644
--- a/test/Transforms/LoopStrengthReduce/pr18165.ll
+++ b/test/Transforms/LoopStrengthReduce/pr18165.ll
@@ -1,5 +1,5 @@
; RUN: opt < %s -loop-reduce -S | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
; LSR shouldn't reuse IV if the resultant offset is not valid for the operand type.
diff --git a/test/Transforms/LoopUnroll/PowerPC/lit.local.cfg b/test/Transforms/LoopUnroll/PowerPC/lit.local.cfg
index 2e463005586f..5d33887ff0a4 100644
--- a/test/Transforms/LoopUnroll/PowerPC/lit.local.cfg
+++ b/test/Transforms/LoopUnroll/PowerPC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'PowerPC' in targets:
+if not 'PowerPC' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/LoopUnroll/X86/lit.local.cfg b/test/Transforms/LoopUnroll/X86/lit.local.cfg
new file mode 100644
index 000000000000..e71f3cc4c41e
--- /dev/null
+++ b/test/Transforms/LoopUnroll/X86/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'X86' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/LoopUnroll/X86/partial.ll b/test/Transforms/LoopUnroll/X86/partial.ll
new file mode 100644
index 000000000000..a2b04c7d85f8
--- /dev/null
+++ b/test/Transforms/LoopUnroll/X86/partial.ll
@@ -0,0 +1,127 @@
+; RUN: opt < %s -S -loop-unroll -mcpu=nehalem | FileCheck %s
+; RUN: opt < %s -S -loop-unroll -mcpu=core -unroll-runtime=0 | FileCheck -check-prefix=CHECK-NOUNRL %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @foo(i32* noalias nocapture readnone %ip, double %alpha, double* noalias nocapture %a, double* noalias nocapture readonly %b) #0 {
+entry:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds double* %b, i64 %index
+ %1 = bitcast double* %0 to <2 x double>*
+ %wide.load = load <2 x double>* %1, align 8
+ %.sum9 = or i64 %index, 2
+ %2 = getelementptr double* %b, i64 %.sum9
+ %3 = bitcast double* %2 to <2 x double>*
+ %wide.load8 = load <2 x double>* %3, align 8
+ %4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
+ %5 = fadd <2 x double> %wide.load8, <double 1.000000e+00, double 1.000000e+00>
+ %6 = getelementptr inbounds double* %a, i64 %index
+ %7 = bitcast double* %6 to <2 x double>*
+ store <2 x double> %4, <2 x double>* %7, align 8
+ %.sum10 = or i64 %index, 2
+ %8 = getelementptr double* %a, i64 %.sum10
+ %9 = bitcast double* %8 to <2 x double>*
+ store <2 x double> %5, <2 x double>* %9, align 8
+ %index.next = add i64 %index, 4
+ %10 = icmp eq i64 %index.next, 1600
+ br i1 %10, label %for.end, label %vector.body
+
+; FIXME: We should probably unroll this loop by a factor of 2, but the cost
+; model needs to be fixed to account for instructions likely to be folded
+; as part of an addressing mode.
+; CHECK-LABEL: @foo
+; CHECK-NOUNRL-LABEL: @foo
+
+for.end: ; preds = %vector.body
+ ret void
+}
+
+define void @bar(i32* noalias nocapture readnone %ip, double %alpha, double* noalias nocapture %a, double* noalias nocapture readonly %b) #0 {
+entry:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %v0 = getelementptr inbounds double* %b, i64 %index
+ %v1 = bitcast double* %v0 to <2 x double>*
+ %wide.load = load <2 x double>* %v1, align 8
+ %v4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
+ %v5 = fmul <2 x double> %v4, <double 8.000000e+00, double 8.000000e+00>
+ %v6 = getelementptr inbounds double* %a, i64 %index
+ %v7 = bitcast double* %v6 to <2 x double>*
+ store <2 x double> %v5, <2 x double>* %v7, align 8
+ %index.next = add i64 %index, 2
+ %v10 = icmp eq i64 %index.next, 1600
+ br i1 %v10, label %for.end, label %vector.body
+
+; FIXME: We should probably unroll this loop by a factor of 2, but the cost
+; model needs to first to fixed to account for instructions likely to be folded
+; as part of an addressing mode.
+
+; CHECK-LABEL: @bar
+; CHECK: fadd
+; CHECK-NEXT: fmul
+; CHECK: fadd
+; CHECK-NEXT: fmul
+
+; CHECK-NOUNRL-LABEL: @bar
+; CHECK-NOUNRL: fadd
+; CHECK-NOUNRL-NEXT: fmul
+; CHECK-NOUNRL-NOT: fadd
+
+for.end: ; preds = %vector.body
+ ret void
+}
+
+define zeroext i16 @test1(i16* nocapture readonly %arr, i32 %n) #0 {
+entry:
+ %cmp25 = icmp eq i32 %n, 0
+ br i1 %cmp25, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %reduction.026 = phi i16 [ %add14, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i16* %arr, i64 %indvars.iv
+ %0 = load i16* %arrayidx, align 2
+ %add = add i16 %0, %reduction.026
+ %sext = mul i64 %indvars.iv, 12884901888
+ %idxprom3 = ashr exact i64 %sext, 32
+ %arrayidx4 = getelementptr inbounds i16* %arr, i64 %idxprom3
+ %1 = load i16* %arrayidx4, align 2
+ %add7 = add i16 %add, %1
+ %sext28 = mul i64 %indvars.iv, 21474836480
+ %idxprom10 = ashr exact i64 %sext28, 32
+ %arrayidx11 = getelementptr inbounds i16* %arr, i64 %idxprom10
+ %2 = load i16* %arrayidx11, align 2
+ %add14 = add i16 %add7, %2
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %reduction.0.lcssa = phi i16 [ 0, %entry ], [ %add14, %for.body ]
+ ret i16 %reduction.0.lcssa
+
+; This loop is too large to be partially unrolled (size=16)
+
+; CHECK-LABEL: @test1
+; CHECK: br
+; CHECK: br
+; CHECK: br
+; CHECK: br
+; CHECK-NOT: br
+
+; CHECK-NOUNRL-LABEL: @test1
+; CHECK-NOUNRL: br
+; CHECK-NOUNRL: br
+; CHECK-NOUNRL: br
+; CHECK-NOUNRL: br
+; CHECK-NOUNRL-NOT: br
+}
+
+attributes #0 = { nounwind uwtable }
+
diff --git a/test/Transforms/LoopUnroll/loop-remarks.ll b/test/Transforms/LoopUnroll/loop-remarks.ll
new file mode 100644
index 000000000000..ff3ac178363a
--- /dev/null
+++ b/test/Transforms/LoopUnroll/loop-remarks.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -S -loop-unroll -pass-remarks=loop-unroll -unroll-count=16 2>&1 | FileCheck -check-prefix=COMPLETE-UNROLL %s
+; RUN: opt < %s -S -loop-unroll -pass-remarks=loop-unroll -unroll-count=4 2>&1 | FileCheck -check-prefix=PARTIAL-UNROLL %s
+
+; COMPLETE-UNROLL: remark: {{.*}}: completely unrolled loop with 16 iterations
+; PARTIAL-UNROLL: remark: {{.*}}: unrolled loop by a factor of 4
+
+define i32 @sum() {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %s.06 = phi i32 [ 0, %entry ], [ %add1, %for.body ]
+ %i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %add = add nsw i32 %i.05, 4
+ %call = tail call i32 @baz(i32 %add) #2
+ %add1 = add nsw i32 %call, %s.06
+ %inc = add nsw i32 %i.05, 1
+ %exitcond = icmp eq i32 %inc, 16
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret i32 %add1
+}
+
+declare i32 @baz(i32)
diff --git a/test/Transforms/LoopUnroll/pr18861.ll b/test/Transforms/LoopUnroll/pr18861.ll
new file mode 100644
index 000000000000..62f26106afb2
--- /dev/null
+++ b/test/Transforms/LoopUnroll/pr18861.ll
@@ -0,0 +1,43 @@
+; RUN: opt < %s -loop-unroll -indvars -disable-output
+
+@b = external global i32, align 4
+
+; Function Attrs: nounwind uwtable
+define void @fn1() #0 {
+entry:
+ br label %for.cond1thread-pre-split
+
+for.cond1thread-pre-split: ; preds = %for.inc8, %entry
+ %storemerge1 = phi i32 [ 0, %entry ], [ %inc9, %for.inc8 ]
+ br i1 undef, label %for.inc8, label %for.cond2.preheader.lr.ph
+
+for.cond2.preheader.lr.ph: ; preds = %for.cond1thread-pre-split
+ br label %for.cond2.preheader
+
+for.cond2.preheader: ; preds = %for.inc5, %for.cond2.preheader.lr.ph
+ br label %for.cond2
+
+for.cond2: ; preds = %for.body3, %for.cond2.preheader
+ %storemerge = phi i32 [ %add, %for.body3 ], [ 0, %for.cond2.preheader ]
+ %cmp = icmp slt i32 %storemerge, 1
+ br i1 %cmp, label %for.body3, label %for.inc5
+
+for.body3: ; preds = %for.cond2
+ %tobool4 = icmp eq i32 %storemerge, 0
+ %add = add nsw i32 %storemerge, 1
+ br i1 %tobool4, label %for.cond2, label %if.then
+
+if.then: ; preds = %for.body3
+ store i32 %storemerge1, i32* @b, align 4
+ ret void
+
+for.inc5: ; preds = %for.cond2
+ br i1 undef, label %for.cond1.for.inc8_crit_edge, label %for.cond2.preheader
+
+for.cond1.for.inc8_crit_edge: ; preds = %for.inc5
+ br label %for.inc8
+
+for.inc8: ; preds = %for.cond1.for.inc8_crit_edge, %for.cond1thread-pre-split
+ %inc9 = add nsw i32 %storemerge1, 1
+ br label %for.cond1thread-pre-split
+}
diff --git a/test/Transforms/LoopUnroll/runtime-loop.ll b/test/Transforms/LoopUnroll/runtime-loop.ll
index d8bbea9f1073..a14087dcdce7 100644
--- a/test/Transforms/LoopUnroll/runtime-loop.ll
+++ b/test/Transforms/LoopUnroll/runtime-loop.ll
@@ -2,6 +2,12 @@
; Tests for unrolling loops with run-time trip counts
+; CHECK: %xtraiter = and i32 %n
+; CHECK: %lcmp.mod = icmp ne i32 %xtraiter, 0
+; CHECK: %lcmp.overflow = icmp eq i32 %n, 0
+; CHECK: %lcmp.or = or i1 %lcmp.overflow, %lcmp.mod
+; CHECK: br i1 %lcmp.or, label %unr.cmp
+
; CHECK: unr.cmp{{.*}}:
; CHECK: for.body.unr{{.*}}:
; CHECK: for.body:
diff --git a/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll b/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
new file mode 100644
index 000000000000..39da7fa70ade
--- /dev/null
+++ b/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
@@ -0,0 +1,69 @@
+; RUN: opt < %s -loop-unroll -S | FileCheck %s
+;
+; Verify that the unrolling pass removes existing loop unrolling metadata
+; and adds a disable unrolling node after unrolling is complete.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; #pragma clang loop vectorize(enable) unroll(enable) unroll_count(4) vectorize_width(8)
+;
+; Unroll metadata should be replaces with unroll(disable). Vectorize
+; metadata should be untouched.
+;
+; CHECK-LABEL: @loop1(
+; CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[LOOP_1:.*]]
+define void @loop1(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 64
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
+
+for.end: ; preds = %for.body
+ ret void
+}
+!1 = metadata !{metadata !1, metadata !2, metadata !3, metadata !4, metadata !5}
+!2 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
+!3 = metadata !{metadata !"llvm.loop.unroll.enable", i1 true}
+!4 = metadata !{metadata !"llvm.loop.unroll.count", i32 4}
+!5 = metadata !{metadata !"llvm.loop.vectorize.width", i32 8}
+
+; #pragma clang loop unroll(disable)
+;
+; Unroll metadata should not change.
+;
+; CHECK-LABEL: @loop2(
+; CHECK: br i1 {{.*}}, label {{.*}}, label {{.*}}, !llvm.loop ![[LOOP_2:.*]]
+define void @loop2(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 64
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !6
+
+for.end: ; preds = %for.body
+ ret void
+}
+!6 = metadata !{metadata !6, metadata !7}
+!7 = metadata !{metadata !"llvm.loop.unroll.enable", i1 false}
+
+; CHECK: ![[LOOP_1]] = metadata !{metadata ![[LOOP_1]], metadata ![[VEC_ENABLE:.*]], metadata ![[WIDTH_8:.*]], metadata ![[UNROLL_DISABLE:.*]]}
+; CHECK: ![[VEC_ENABLE]] = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
+; CHECK: ![[WIDTH_8]] = metadata !{metadata !"llvm.loop.vectorize.width", i32 8}
+; CHECK: ![[UNROLL_DISABLE]] = metadata !{metadata !"llvm.loop.unroll.enable", i1 false}
+; CHECK: ![[LOOP_2]] = metadata !{metadata ![[LOOP_2]], metadata ![[UNROLL_DISABLE:.*]]}
diff --git a/test/Transforms/LoopUnroll/unroll-pragmas.ll b/test/Transforms/LoopUnroll/unroll-pragmas.ll
new file mode 100644
index 000000000000..e1b24e44b5ab
--- /dev/null
+++ b/test/Transforms/LoopUnroll/unroll-pragmas.ll
@@ -0,0 +1,289 @@
+; RUN: opt < %s -loop-unroll -S | FileCheck %s
+; RUN: opt < %s -loop-unroll -loop-unroll -S | FileCheck %s
+;
+; Run loop unrolling twice to verify that loop unrolling metadata is properly
+; removed and further unrolling is disabled after the pass is run once.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; loop4 contains a small loop which should be completely unrolled by
+; the default unrolling heuristics. It serves as a control for the
+; unroll(disable) pragma test loop4_with_disable.
+;
+; CHECK-LABEL: @loop4(
+; CHECK-NOT: br i1
+define void @loop4(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 4
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; #pragma clang loop unroll(disable)
+;
+; CHECK-LABEL: @loop4_with_disable(
+; CHECK: store i32
+; CHECK-NOT: store i32
+; CHECK: br i1
+define void @loop4_with_disable(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 4
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
+
+for.end: ; preds = %for.body
+ ret void
+}
+!1 = metadata !{metadata !1, metadata !2}
+!2 = metadata !{metadata !"llvm.loop.unroll.enable", i1 false}
+
+; loop64 has a high enough count that it should *not* be unrolled by
+; the default unrolling heuristic. It serves as the control for the
+; unroll(enable) pragma test loop64_with_.* tests below.
+;
+; CHECK-LABEL: @loop64(
+; CHECK: store i32
+; CHECK-NOT: store i32
+; CHECK: br i1
+define void @loop64(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 64
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; #pragma clang loop unroll(enable)
+; Loop should be fully unrolled.
+;
+; CHECK-LABEL: @loop64_with_enable(
+; CHECK-NOT: br i1
+define void @loop64_with_enable(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 64
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
+
+for.end: ; preds = %for.body
+ ret void
+}
+!3 = metadata !{metadata !3, metadata !4}
+!4 = metadata !{metadata !"llvm.loop.unroll.enable", i1 true}
+
+; #pragma clang loop unroll_count(4)
+; Loop should be unrolled 4 times.
+;
+; CHECK-LABEL: @loop64_with_count4(
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: store i32
+; CHECK-NOT: store i32
+; CHECK: br i1
+define void @loop64_with_count4(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 64
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !5
+
+for.end: ; preds = %for.body
+ ret void
+}
+!5 = metadata !{metadata !5, metadata !6}
+!6 = metadata !{metadata !"llvm.loop.unroll.count", i32 4}
+
+
+; #pragma clang loop unroll_count(enable) unroll_count(4)
+; Loop should be unrolled 4 times.
+;
+; CHECK-LABEL: @loop64_with_enable_and_count4(
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: store i32
+; CHECK-NOT: store i32
+; CHECK: br i1
+define void @loop64_with_enable_and_count4(i32* nocapture %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 64
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !7
+
+for.end: ; preds = %for.body
+ ret void
+}
+!7 = metadata !{metadata !7, metadata !6, metadata !4}
+
+; #pragma clang loop unroll_count(enable)
+; Full unrolling is requested, but loop has a dynamic trip count so
+; no unrolling should occur.
+;
+; CHECK-LABEL: @dynamic_loop_with_enable(
+; CHECK: store i32
+; CHECK-NOT: store i32
+; CHECK: br i1
+define void @dynamic_loop_with_enable(i32* nocapture %a, i32 %b) {
+entry:
+ %cmp3 = icmp sgt i32 %b, 0
+ br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !8
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %b
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !8
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+!8 = metadata !{metadata !8, metadata !4}
+
+; #pragma clang loop unroll_count(4)
+; Loop has a dynamic trip count. Unrolling should occur, but no
+; conditional branches can be removed.
+;
+; CHECK-LABEL: @dynamic_loop_with_count4(
+; CHECK-NOT: store
+; CHECK: br i1
+; CHECK: store
+; CHECK: br i1
+; CHECK: store
+; CHECK: br i1
+; CHECK: store
+; CHECK: br i1
+; CHECK: store
+; CHECK: br i1
+; CHECK-NOT: br i1
+define void @dynamic_loop_with_count4(i32* nocapture %a, i32 %b) {
+entry:
+ %cmp3 = icmp sgt i32 %b, 0
+ br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !9
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %b
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !9
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+!9 = metadata !{metadata !9, metadata !6}
+
+; #pragma clang loop unroll_count(1)
+; Loop should not be unrolled
+;
+; CHECK-LABEL: @unroll_1(
+; CHECK: store i32
+; CHECK-NOT: store i32
+; CHECK: br i1
+define void @unroll_1(i32* nocapture %a, i32 %b) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 4
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !10
+
+for.end: ; preds = %for.body
+ ret void
+}
+!10 = metadata !{metadata !10, metadata !11}
+!11 = metadata !{metadata !"llvm.loop.unroll.count", i32 1}
+
+; #pragma clang loop unroll(enable)
+; Loop has very high loop count (1 million) and full unrolling was requested.
+; Loop should unrolled up to the pragma threshold, but not completely.
+;
+; CHECK-LABEL: @unroll_1M(
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: br i1
+define void @unroll_1M(i32* nocapture %a, i32 %b) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !12
+
+for.end: ; preds = %for.body
+ ret void
+}
+!12 = metadata !{metadata !12, metadata !4}
diff --git a/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll b/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll
new file mode 100644
index 000000000000..9962c3d76a66
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -loop-vectorize -mtriple=aarch64-none-linux-gnu -mattr=+neon -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; Function Attrs: nounwind
+define i32* @array_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* %c, i32 %size) {
+;CHECK-LABEL: array_add
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+entry:
+ %cmp10 = icmp sgt i32 %size, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4
+ %add = add nsw i32 %1, %0
+ %arrayidx4 = getelementptr inbounds i32* %c, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %size
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret i32* %c
+}
diff --git a/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll b/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll
new file mode 100644
index 000000000000..f8eb3ed1f35f
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -loop-vectorize -mtriple=arm64-none-linux-gnu -mattr=+neon -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+
+; Function Attrs: nounwind
+define i32* @array_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* %c, i32 %size) {
+;CHECK-LABEL: array_add
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+entry:
+ %cmp10 = icmp sgt i32 %size, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4
+ %add = add nsw i32 %1, %0
+ %arrayidx4 = getelementptr inbounds i32* %c, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %size
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret i32* %c
+}
diff --git a/test/Transforms/LoopVectorize/AArch64/gather-cost.ll b/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
new file mode 100644
index 000000000000..bb285382e53c
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
@@ -0,0 +1,85 @@
+; RUN: opt -loop-vectorize -mtriple=arm64-apple-ios -S -mcpu=cyclone < %s | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
+
+@kernel = global [512 x float] zeroinitializer, align 16
+@kernel2 = global [512 x float] zeroinitializer, align 16
+@kernel3 = global [512 x float] zeroinitializer, align 16
+@kernel4 = global [512 x float] zeroinitializer, align 16
+@src_data = global [1536 x float] zeroinitializer, align 16
+@r_ = global i8 0, align 1
+@g_ = global i8 0, align 1
+@b_ = global i8 0, align 1
+
+; We don't want to vectorize most loops containing gathers because they are
+; expensive.
+; Make sure we don't vectorize it.
+; CHECK-NOT: x float>
+
+define void @_Z4testmm(i64 %size, i64 %offset) {
+entry:
+ %cmp53 = icmp eq i64 %size, 0
+ br i1 %cmp53, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph:
+ br label %for.body
+
+for.body:
+ %r.057 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add10, %for.body ]
+ %g.056 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add20, %for.body ]
+ %v.055 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
+ %add = add i64 %v.055, %offset
+ %mul = mul i64 %add, 3
+ %arrayidx = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %mul
+ %0 = load float* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds [512 x float]* @kernel, i64 0, i64 %v.055
+ %1 = load float* %arrayidx2, align 4
+ %mul3 = fmul fast float %0, %1
+ %arrayidx4 = getelementptr inbounds [512 x float]* @kernel2, i64 0, i64 %v.055
+ %2 = load float* %arrayidx4, align 4
+ %mul5 = fmul fast float %mul3, %2
+ %arrayidx6 = getelementptr inbounds [512 x float]* @kernel3, i64 0, i64 %v.055
+ %3 = load float* %arrayidx6, align 4
+ %mul7 = fmul fast float %mul5, %3
+ %arrayidx8 = getelementptr inbounds [512 x float]* @kernel4, i64 0, i64 %v.055
+ %4 = load float* %arrayidx8, align 4
+ %mul9 = fmul fast float %mul7, %4
+ %add10 = fadd fast float %r.057, %mul9
+ %arrayidx.sum = add i64 %mul, 1
+ %arrayidx11 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
+ %5 = load float* %arrayidx11, align 4
+ %mul13 = fmul fast float %1, %5
+ %mul15 = fmul fast float %2, %mul13
+ %mul17 = fmul fast float %3, %mul15
+ %mul19 = fmul fast float %4, %mul17
+ %add20 = fadd fast float %g.056, %mul19
+ %arrayidx.sum52 = add i64 %mul, 2
+ %arrayidx21 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
+ %6 = load float* %arrayidx21, align 4
+ %mul23 = fmul fast float %1, %6
+ %mul25 = fmul fast float %2, %mul23
+ %mul27 = fmul fast float %3, %mul25
+ %mul29 = fmul fast float %4, %mul27
+ %add30 = fadd fast float %b.054, %mul29
+ %inc = add i64 %v.055, 1
+ %exitcond = icmp ne i64 %inc, %size
+ br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
+
+for.cond.for.end_crit_edge:
+ %add30.lcssa = phi float [ %add30, %for.body ]
+ %add20.lcssa = phi float [ %add20, %for.body ]
+ %add10.lcssa = phi float [ %add10, %for.body ]
+ %phitmp = fptoui float %add10.lcssa to i8
+ %phitmp60 = fptoui float %add20.lcssa to i8
+ %phitmp61 = fptoui float %add30.lcssa to i8
+ br label %for.end
+
+for.end:
+ %r.0.lcssa = phi i8 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ]
+ %g.0.lcssa = phi i8 [ %phitmp60, %for.cond.for.end_crit_edge ], [ 0, %entry ]
+ %b.0.lcssa = phi i8 [ %phitmp61, %for.cond.for.end_crit_edge ], [ 0, %entry ]
+ store i8 %r.0.lcssa, i8* @r_, align 1
+ store i8 %g.0.lcssa, i8* @g_, align 1
+ store i8 %b.0.lcssa, i8* @b_, align 1
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/AArch64/lit.local.cfg b/test/Transforms/LoopVectorize/AArch64/lit.local.cfg
new file mode 100644
index 000000000000..937cffb2c119
--- /dev/null
+++ b/test/Transforms/LoopVectorize/AArch64/lit.local.cfg
@@ -0,0 +1,5 @@
+config.suffixes = ['.ll']
+
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/LoopVectorize/ARM/arm-unroll.ll b/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
index 39363ab2d802..8843fc2d2b1a 100644
--- a/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
+++ b/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
@@ -1,5 +1,6 @@
; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -S | FileCheck %s
; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift -S | FileCheck %s --check-prefix=SWIFT
+; RUN: opt < %s -loop-vectorize -force-vector-width=1 -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift -S | FileCheck %s --check-prefix=SWIFTUNROLL
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
target triple = "thumbv7-apple-ios3.0.0"
@@ -30,3 +31,41 @@ define i32 @foo(i32* nocapture %A, i32 %n) nounwind readonly ssp {
%sum.0.lcssa = phi i32 [ 0, %0 ], [ %4, %.lr.ph ]
ret i32 %sum.0.lcssa
}
+
+; Verify the register limit. On arm we don't have 16 allocatable registers.
+;SWIFTUNROLL-LABEL: @register_limit(
+;SWIFTUNROLL: load i32
+;SWIFTUNROLL-NOT: load i32
+define i32 @register_limit(i32* nocapture %A, i32 %n) {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph:
+ %i.02 = phi i32 [ %5, %.lr.ph ], [ 0, %0 ]
+ %sum.01 = phi i32 [ %4, %.lr.ph ], [ 0, %0 ]
+ %sum.02 = phi i32 [ %6, %.lr.ph ], [ 0, %0 ]
+ %sum.03 = phi i32 [ %7, %.lr.ph ], [ 0, %0 ]
+ %sum.04 = phi i32 [ %8, %.lr.ph ], [ 0, %0 ]
+ %sum.05 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
+ %sum.06 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds i32* %A, i32 %i.02
+ %3 = load i32* %2, align 4
+ %4 = add nsw i32 %3, %sum.01
+ %5 = add nsw i32 %i.02, 1
+ %6 = add nsw i32 %3, %sum.02
+ %7 = add nsw i32 %3, %sum.03
+ %8 = add nsw i32 %3, %sum.04
+ %9 = add nsw i32 %3, %sum.05
+ %10 = add nsw i32 %3, %sum.05
+ %exitcond = icmp eq i32 %5, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ %sum.0.lcssa = phi i32 [ 0, %0 ], [ %4, %.lr.ph ]
+ %sum.1.lcssa = phi i32 [ 0, %0 ], [ %6, %.lr.ph ]
+ %sum.2.lcssa = phi i32 [ 0, %0 ], [ %7, %.lr.ph ]
+ %sum.4.lcssa = phi i32 [ 0, %0 ], [ %8, %.lr.ph ]
+ %sum.5.lcssa = phi i32 [ 0, %0 ], [ %9, %.lr.ph ]
+ %sum.6.lcssa = phi i32 [ 0, %0 ], [ %10, %.lr.ph ]
+ ret i32 %sum.0.lcssa
+}
diff --git a/test/Transforms/LoopVectorize/ARM/lit.local.cfg b/test/Transforms/LoopVectorize/ARM/lit.local.cfg
index 8a3ba96497e7..98c6700c209d 100644
--- a/test/Transforms/LoopVectorize/ARM/lit.local.cfg
+++ b/test/Transforms/LoopVectorize/ARM/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/LoopVectorize/PowerPC/lit.local.cfg b/test/Transforms/LoopVectorize/PowerPC/lit.local.cfg
new file mode 100644
index 000000000000..5d33887ff0a4
--- /dev/null
+++ b/test/Transforms/LoopVectorize/PowerPC/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'PowerPC' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll b/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
new file mode 100644
index 000000000000..6cd9c4d610b5
--- /dev/null
+++ b/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -mcpu=pwr7 -mattr=+vsx -loop-vectorize -instcombine -S | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.GlobalData = type { [32000 x float], [3 x i32], [4 x i8], [32000 x float], [5 x i32], [12 x i8], [32000 x float], [7 x i32], [4 x i8], [32000 x float], [11 x i32], [4 x i8], [32000 x float], [13 x i32], [12 x i8], [256 x [256 x float]], [17 x i32], [12 x i8], [256 x [256 x float]], [19 x i32], [4 x i8], [256 x [256 x float]], [23 x i32], [4 x i8], [256 x [256 x float]] }
+
+@global_data = external global %struct.GlobalData, align 16
+@ntimes = external hidden unnamed_addr global i32, align 4
+
+define signext i32 @s173() #0 {
+entry:
+ %0 = load i32* @ntimes, align 4
+ %cmp21 = icmp sgt i32 %0, 0
+ br i1 %cmp21, label %for.cond1.preheader, label %for.end12
+
+for.cond1.preheader: ; preds = %for.end, %entry
+ %nl.022 = phi i32 [ %inc11, %for.end ], [ 0, %entry ]
+ br label %for.body3
+
+for.body3: ; preds = %for.body3, %for.cond1.preheader
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
+ %arrayidx = getelementptr inbounds %struct.GlobalData* @global_data, i64 0, i32 0, i64 %indvars.iv
+ %1 = load float* %arrayidx, align 4
+ %arrayidx5 = getelementptr inbounds %struct.GlobalData* @global_data, i64 0, i32 3, i64 %indvars.iv
+ %2 = load float* %arrayidx5, align 4
+ %add = fadd float %1, %2
+ %3 = add nsw i64 %indvars.iv, 16000
+ %arrayidx8 = getelementptr inbounds %struct.GlobalData* @global_data, i64 0, i32 0, i64 %3
+ store float %add, float* %arrayidx8, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 16000
+ br i1 %exitcond, label %for.end, label %for.body3
+
+for.end: ; preds = %for.body3
+ %inc11 = add nsw i32 %nl.022, 1
+ %4 = load i32* @ntimes, align 4
+ %mul = mul nsw i32 %4, 10
+ %cmp = icmp slt i32 %inc11, %mul
+ br i1 %cmp, label %for.cond1.preheader, label %for.end12
+
+for.end12: ; preds = %for.end, %entry
+ ret i32 0
+
+; CHECK-LABEL: @s173
+; CHECK: load <4 x float>*
+; CHECK: add i64 %index, 16000
+; CHECK: ret i32 0
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/Transforms/LoopVectorize/X86/already-vectorized.ll b/test/Transforms/LoopVectorize/X86/already-vectorized.ll
index 885418c0fdd9..9c69ba87f392 100644
--- a/test/Transforms/LoopVectorize/X86/already-vectorized.ll
+++ b/test/Transforms/LoopVectorize/X86/already-vectorized.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -debug-only=loop-vectorize -O3 -S 2>&1 | FileCheck %s
+; RUN: opt < %s -disable-loop-unrolling -debug-only=loop-vectorize -O3 -S 2>&1 | FileCheck %s
; REQUIRES: asserts
; We want to make sure that we don't even try to vectorize loops again
; The vectorizer used to mark the un-vectorized loop only as already vectorized
@@ -40,7 +40,7 @@ for.end: ; preds = %for.body
; Now, we check for the Hint metadata
; CHECK: [[vect]] = metadata !{metadata [[vect]], metadata [[width:![0-9]+]], metadata [[unroll:![0-9]+]]}
-; CHECK: [[width]] = metadata !{metadata !"llvm.vectorizer.width", i32 1}
-; CHECK: [[unroll]] = metadata !{metadata !"llvm.vectorizer.unroll", i32 1}
+; CHECK: [[width]] = metadata !{metadata !"llvm.loop.vectorize.width", i32 1}
+; CHECK: [[unroll]] = metadata !{metadata !"llvm.loop.interleave.count", i32 1}
; CHECK: [[scalar]] = metadata !{metadata [[scalar]], metadata [[width]], metadata [[unroll]]}
diff --git a/test/Transforms/LoopVectorize/X86/avx512.ll b/test/Transforms/LoopVectorize/X86/avx512.ll
new file mode 100644
index 000000000000..a2208668177d
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/avx512.ll
@@ -0,0 +1,35 @@
+; RUN: opt -mattr=+avx512f --loop-vectorize -S < %s | llc -mattr=+avx512f | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Verify that we generate 512-bit wide vectors for a basic integer memset
+; loop.
+
+; CHECK-LABEL: f:
+; CHECK: vmovdqu32 %zmm{{.}}, (
+; CHECK-NOT: %ymm
+
+define void @f(i32* %a, i32 %n) {
+entry:
+ %cmp4 = icmp sgt i32 %n, 0
+ br i1 %cmp4, label %for.body.preheader, label %for.end
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ store i32 %n, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll b/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll
new file mode 100644
index 000000000000..529ed883c3b4
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -mcpu=core-avx2 -loop-vectorize -S | llc -mcpu=core-avx2 | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+@float_array = common global [10000 x float] zeroinitializer, align 16
+@unsigned_array = common global [10000 x i32] zeroinitializer, align 16
+
+; If we need to scalarize the fptoui and then use inserts to build up the
+; vector again, then there is certainly no value in going 256-bit wide.
+; CHECK-NOT: vinserti128
+
+define void @convert(i32 %N) {
+entry:
+ %0 = icmp eq i32 %N, 0
+ br i1 %0, label %for.end, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds [10000 x float]* @float_array, i64 0, i64 %indvars.iv
+ %1 = load float* %arrayidx, align 4
+ %conv = fptoui float %1 to i32
+ %arrayidx2 = getelementptr inbounds [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
+ store i32 %conv, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %N
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll b/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
new file mode 100644
index 000000000000..ef3e3bec793a
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
@@ -0,0 +1,40 @@
+; RUN: opt < %s -mcpu=core-avx2 -loop-vectorize -S | llc -mcpu=core-avx2 | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+@n = global i32 10000, align 4
+@double_array = common global [10000 x double] zeroinitializer, align 16
+@unsigned_array = common global [10000 x i32] zeroinitializer, align 16
+
+; If we need to scalarize the fptoui and then use inserts to build up the
+; vector again, then there is certainly no value in going 256-bit wide.
+; CHECK-NOT: vpinsrd
+
+define void @convert() {
+entry:
+ %0 = load i32* @n, align 4
+ %cmp4 = icmp eq i32 %0, 0
+ br i1 %cmp4, label %for.end, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds [10000 x double]* @double_array, i64 0, i64 %indvars.iv
+ %1 = load double* %arrayidx, align 8
+ %conv = fptoui double %1 to i32
+ %arrayidx2 = getelementptr inbounds [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
+ store i32 %conv, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %2 = trunc i64 %indvars.iv.next to i32
+ %cmp = icmp ult i32 %2, %0
+ br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll b/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll
new file mode 100644
index 000000000000..23e62275ce04
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -S -debug-only=loop-vectorize 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+
+; CHECK: cost of 7 for VF 8 For instruction: %conv = fptosi float %tmp to i8
+define void @float_to_sint8_cost(i8* noalias nocapture %a, float* noalias nocapture readonly %b) nounwind {
+entry:
+ br label %for.body
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %tmp = load float* %arrayidx, align 4
+ %conv = fptosi float %tmp to i8
+ %arrayidx2 = getelementptr inbounds i8* %a, i64 %indvars.iv
+ store i8 %conv, i8* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 256
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/lit.local.cfg b/test/Transforms/LoopVectorize/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Transforms/LoopVectorize/X86/lit.local.cfg
+++ b/test/Transforms/LoopVectorize/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/test/Transforms/LoopVectorize/X86/metadata-enable.ll
new file mode 100644
index 000000000000..8e0ca417b404
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/metadata-enable.ll
@@ -0,0 +1,176 @@
+; RUN: opt < %s -mcpu=corei7 -O1 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1
+; RUN: opt < %s -mcpu=corei7 -O2 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O2
+; RUN: opt < %s -mcpu=corei7 -O3 -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3
+; RUN: opt < %s -mcpu=corei7 -Os -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=Os
+; RUN: opt < %s -mcpu=corei7 -Oz -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=Oz
+; RUN: opt < %s -mcpu=corei7 -O1 -vectorize-loops -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1VEC
+; RUN: opt < %s -mcpu=corei7 -Oz -vectorize-loops -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=OzVEC
+; RUN: opt < %s -mcpu=corei7 -O1 -loop-vectorize -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O1VEC2
+; RUN: opt < %s -mcpu=corei7 -Oz -loop-vectorize -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=OzVEC2
+; RUN: opt < %s -mcpu=corei7 -O3 -disable-loop-vectorization -S -unroll-allow-partial=0 | FileCheck %s --check-prefix=O3DIS
+
+; This file tests the llvm.loop.vectorize.enable metadata forcing
+; vectorization even when optimization levels are too low, or when
+; vectorization is disabled.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; O1-LABEL: @enabled(
+; O1: store <4 x i32>
+; O1: ret i32
+; O2-LABEL: @enabled(
+; O2: store <4 x i32>
+; O2: ret i32
+; O3-LABEL: @enabled(
+; O3: store <4 x i32>
+; O3: ret i32
+; Pragma always wins!
+; O3DIS-LABEL: @enabled(
+; O3DIS: store <4 x i32>
+; O3DIS: ret i32
+; Os-LABEL: @enabled(
+; Os: store <4 x i32>
+; Os: ret i32
+; Oz-LABEL: @enabled(
+; Oz: store <4 x i32>
+; Oz: ret i32
+; O1VEC-LABEL: @enabled(
+; O1VEC: store <4 x i32>
+; O1VEC: ret i32
+; OzVEC-LABEL: @enabled(
+; OzVEC: store <4 x i32>
+; OzVEC: ret i32
+; O1VEC2-LABEL: @enabled(
+; O1VEC2: store <4 x i32>
+; O1VEC2: ret i32
+; OzVEC2-LABEL: @enabled(
+; OzVEC2: store <4 x i32>
+; OzVEC2: ret i32
+
+define i32 @enabled(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32 %N) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %N
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 32
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
+
+for.end: ; preds = %for.body
+ %1 = load i32* %a, align 4
+ ret i32 %1
+}
+
+; O1-LABEL: @nopragma(
+; O1-NOT: store <4 x i32>
+; O1: ret i32
+; O2-LABEL: @nopragma(
+; O2: store <4 x i32>
+; O2: ret i32
+; O3-LABEL: @nopragma(
+; O3: store <4 x i32>
+; O3: ret i32
+; O3DIS-LABEL: @nopragma(
+; O3DIS-NOT: store <4 x i32>
+; O3DIS: ret i32
+; Os-LABEL: @nopragma(
+; Os: store <4 x i32>
+; Os: ret i32
+; Oz-LABEL: @nopragma(
+; Oz-NOT: store <4 x i32>
+; Oz: ret i32
+; O1VEC-LABEL: @nopragma(
+; O1VEC: store <4 x i32>
+; O1VEC: ret i32
+; OzVEC-LABEL: @nopragma(
+; OzVEC: store <4 x i32>
+; OzVEC: ret i32
+; O1VEC2-LABEL: @nopragma(
+; O1VEC2: store <4 x i32>
+; O1VEC2: ret i32
+; OzVEC2-LABEL: @nopragma(
+; OzVEC2: store <4 x i32>
+; OzVEC2: ret i32
+
+define i32 @nopragma(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32 %N) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %N
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 32
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ %1 = load i32* %a, align 4
+ ret i32 %1
+}
+
+; O1-LABEL: @disabled(
+; O1-NOT: store <4 x i32>
+; O1: ret i32
+; O2-LABEL: @disabled(
+; O2-NOT: store <4 x i32>
+; O2: ret i32
+; O3-LABEL: @disabled(
+; O3-NOT: store <4 x i32>
+; O3: ret i32
+; O3DIS-LABEL: @disabled(
+; O3DIS-NOT: store <4 x i32>
+; O3DIS: ret i32
+; Os-LABEL: @disabled(
+; Os-NOT: store <4 x i32>
+; Os: ret i32
+; Oz-LABEL: @disabled(
+; Oz-NOT: store <4 x i32>
+; Oz: ret i32
+; O1VEC-LABEL: @disabled(
+; O1VEC-NOT: store <4 x i32>
+; O1VEC: ret i32
+; OzVEC-LABEL: @disabled(
+; OzVEC-NOT: store <4 x i32>
+; OzVEC: ret i32
+; O1VEC2-LABEL: @disabled(
+; O1VEC2-NOT: store <4 x i32>
+; O1VEC2: ret i32
+; OzVEC2-LABEL: @disabled(
+; OzVEC2-NOT: store <4 x i32>
+; OzVEC2: ret i32
+
+define i32 @disabled(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32 %N) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %N
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 32
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !2
+
+for.end: ; preds = %for.body
+ %1 = load i32* %a, align 4
+ ret i32 %1
+}
+
+!0 = metadata !{metadata !0, metadata !1}
+!1 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 1}
+!2 = metadata !{metadata !2, metadata !3}
+!3 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 0}
diff --git a/test/Transforms/LoopVectorize/X86/small-size.ll b/test/Transforms/LoopVectorize/X86/small-size.ll
index 14ac417bb573..bcf16aa5db13 100644
--- a/test/Transforms/LoopVectorize/X86/small-size.ll
+++ b/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -basicaa -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -loop-vectorize-with-block-frequency -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -115,6 +115,31 @@ define void @example3(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
ret void
}
+; N is unknown, we need a tail. Can't vectorize because the loop is cold.
+;CHECK-LABEL: @example4(
+;CHECK-NOT: <4 x i32>
+;CHECK: ret void
+define void @example4(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture %q) {
+ %1 = icmp eq i32 %n, 0
+ br i1 %1, label %._crit_edge, label %.lr.ph, !prof !0
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %.05 = phi i32 [ %2, %.lr.ph ], [ %n, %0 ]
+ %.014 = phi i32* [ %5, %.lr.ph ], [ %p, %0 ]
+ %.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
+ %2 = add nsw i32 %.05, -1
+ %3 = getelementptr inbounds i32* %.023, i64 1
+ %4 = load i32* %.023, align 16
+ %5 = getelementptr inbounds i32* %.014, i64 1
+ store i32 %4, i32* %.014, align 16
+ %6 = icmp eq i32 %2, 0
+ br i1 %6, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ ret void
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
; We can't vectorize this one because we need a runtime ptr check.
;CHECK-LABEL: @example23(
diff --git a/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll b/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll
new file mode 100644
index 000000000000..86c32b2d2ee4
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -S -debug-only=loop-vectorize 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+
+; CHECK: cost of 20 for VF 2 For instruction: %conv = uitofp i64 %tmp to double
+; CHECK: cost of 40 for VF 4 For instruction: %conv = uitofp i64 %tmp to double
+define void @uint64_to_double_cost(i64* noalias nocapture %a, double* noalias nocapture readonly %b) nounwind {
+entry:
+ br label %for.body
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i64* %a, i64 %indvars.iv
+ %tmp = load i64* %arrayidx, align 4
+ %conv = uitofp i64 %tmp to double
+ %arrayidx2 = getelementptr inbounds double* %b, i64 %indvars.iv
+ store double %conv, double* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 256
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll b/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
index ea107dc4dc51..d5024bb13210 100644
--- a/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
+++ b/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
@@ -1,13 +1,26 @@
-; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=4 -force-vector-unroll=0 -dce -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=4 -force-vector-unroll=0 -dce -S \
+; RUN: | FileCheck %s --check-prefix=CHECK-VECTOR
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=1 -force-vector-unroll=0 -dce -S \
+; RUN: | FileCheck %s --check-prefix=CHECK-SCALAR
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
-;CHECK-LABEL: @foo(
-;CHECK: load <4 x i32>
-;CHECK-NOT: load <4 x i32>
-;CHECK: store <4 x i32>
-;CHECK-NOT: store <4 x i32>
-;CHECK: ret
+
+; We don't unroll this loop because it has a small constant trip count.
+;
+; CHECK-VECTOR-LABEL: @foo(
+; CHECK-VECTOR: load <4 x i32>
+; CHECK-VECTOR-NOT: load <4 x i32>
+; CHECK-VECTOR: store <4 x i32>
+; CHECK-VECTOR-NOT: store <4 x i32>
+; CHECK-VECTOR: ret
+;
+; CHECK-SCALAR-LABEL: @foo(
+; CHECK-SCALAR: load i32*
+; CHECK-SCALAR-NOT: load i32*
+; CHECK-SCALAR: store i32
+; CHECK-SCALAR-NOT: store i32
+; CHECK-SCALAR: ret
define i32 @foo(i32* nocapture %A) nounwind uwtable ssp {
br label %1
@@ -26,10 +39,18 @@ define i32 @foo(i32* nocapture %A) nounwind uwtable ssp {
ret i32 undef
}
-;CHECK-LABEL: @bar(
-;CHECK: store <4 x i32>
-;CHECK: store <4 x i32>
-;CHECK: ret
+; But this is a good small loop to unroll as we don't know of a bound on its
+; trip count.
+;
+; CHECK-VECTOR-LABEL: @bar(
+; CHECK-VECTOR: store <4 x i32>
+; CHECK-VECTOR: store <4 x i32>
+; CHECK-VECTOR: ret
+;
+; CHECK-SCALAR-LABEL: @bar(
+; CHECK-SCALAR: store i32
+; CHECK-SCALAR: store i32
+; CHECK-SCALAR: ret
define i32 @bar(i32* nocapture %A, i32 %n) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 0
br i1 %1, label %.lr.ph, label %._crit_edge
@@ -48,3 +69,32 @@ define i32 @bar(i32* nocapture %A, i32 %n) nounwind uwtable ssp {
._crit_edge: ; preds = %.lr.ph, %0
ret i32 undef
}
+
+; Also unroll if we need a runtime check but it was going to be added for
+; vectorization anyways.
+; CHECK-VECTOR-LABEL: @runtime_chk(
+; CHECK-VECTOR: store <4 x float>
+; CHECK-VECTOR: store <4 x float>
+;
+; But not if the unrolling would introduce the runtime check.
+; CHECK-SCALAR-LABEL: @runtime_chk(
+; CHECK-SCALAR: store float
+; CHECK-SCALAR-NOT: store float
+define void @runtime_chk(float* %A, float* %B, float %N) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %mul = fmul float %0, %N
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ store float %mul, float* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 256
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/vect.omp.force.ll b/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
new file mode 100644
index 000000000000..074313bde6f5
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
@@ -0,0 +1,93 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: LV: Loop hints: force=enabled
+; CHECK: LV: Loop hints: force=?
+; No more loops in the module
+; CHECK-NOT: LV: Loop hints: force=
+; CHECK: 2 loop-vectorize - Number of loops analyzed for vectorization
+; CHECK: 1 loop-vectorize - Number of loops vectorized
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;
+; The source code for the test:
+;
+; #include <math.h>
+; void foo(float* restrict A, float * restrict B, int size)
+; {
+; for (int i = 0; i < size; ++i) A[i] = sinf(B[i]);
+; }
+;
+
+;
+; This loop will be vectorized, although the scalar cost is lower than any of vector costs, but vectorization is explicitly forced in metadata.
+;
+
+define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
+entry:
+ %cmp6 = icmp sgt i32 %size, 0
+ br i1 %cmp6, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %call = tail call float @llvm.sin.f32(float %0)
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %size
+ br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !1
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+!1 = metadata !{metadata !1, metadata !2}
+!2 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
+
+;
+; This method will not be vectorized, as scalar cost is lower than any of vector costs.
+;
+
+define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
+entry:
+ %cmp6 = icmp sgt i32 %size, 0
+ br i1 %cmp6, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %call = tail call float @llvm.sin.f32(float %0)
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %size
+ br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !3
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+declare float @llvm.sin.f32(float) nounwind readnone
+
+; Dummy metadata
+!3 = metadata !{metadata !3}
+
diff --git a/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
new file mode 100644
index 000000000000..97c31a148e3a
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
@@ -0,0 +1,73 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -debug-only=loop-vectorize -stats -S -vectorizer-min-trip-count=21 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: LV: Loop hints: force=enabled
+; CHECK: LV: Loop hints: force=?
+; No more loops in the module
+; CHECK-NOT: LV: Loop hints: force=
+; CHECK: 2 loop-vectorize - Number of loops analyzed for vectorization
+; CHECK: 1 loop-vectorize - Number of loops vectorized
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;
+; The source code for the test:
+;
+; void foo(float* restrict A, float* restrict B)
+; {
+; for (int i = 0; i < 20; ++i) A[i] += B[i];
+; }
+;
+
+;
+; This loop will be vectorized, although the trip count is below the threshold, but vectorization is explicitly forced in metadata.
+;
+define void @vectorized(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %1 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %add = fadd fast float %0, %1
+ store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 20
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
+
+for.end:
+ ret void
+}
+
+!1 = metadata !{metadata !1, metadata !2}
+!2 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
+
+;
+; This loop will not be vectorized as the trip count is below the threshold.
+;
+define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %1 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %add = fadd fast float %0, %1
+ store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 20
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
+
+for.end:
+ ret void
+}
+
+!3 = metadata !{metadata !3}
+
diff --git a/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll b/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
index 59bb8d0054c5..e57cfefec07c 100644
--- a/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
+++ b/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
@@ -1,4 +1,4 @@
-; RUN: opt -loop-vectorize -mcpu=corei7-avx -debug -S < %s 2>&1 | FileCheck %s
+; RUN: opt -basicaa -loop-vectorize -mcpu=corei7-avx -debug -S < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll b/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
new file mode 100644
index 000000000000..93f24cbf4909
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
@@ -0,0 +1,161 @@
+; RUN: opt < %s -loop-vectorize -S -pass-remarks-missed='loop-vectorize' -pass-remarks-analysis='loop-vectorize' 2>&1 | FileCheck %s
+
+; C/C++ code for tests
+; void test(int *A, int Length) {
+; #pragma clang loop vectorize(enable) interleave(enable)
+; for (int i = 0; i < Length; i++) {
+; A[i] = i;
+; if (A[i] > Length)
+; break;
+; }
+; }
+
+; void test_disabled(int *A, int Length) {
+; #pragma clang loop vectorize(disable) interleave(disable)
+; for (int i = 0; i < Length; i++)
+; A[i] = i;
+; }
+
+; void test_array_bounds(int *A, int *B, int Length) {
+; #pragma clang loop vectorize(enable)
+; for (int i = 0; i < Length; i++)
+; A[i] = A[B[i]];
+; }
+
+; File, line, and column should match those specified in the metadata
+; CHECK: remark: source.cpp:4:5: loop not vectorized: could not determine number of loop iterations
+; CHECK: remark: source.cpp:4:5: loop not vectorized: vectorization was not specified
+; CHECK: remark: source.cpp:13:5: loop not vectorized: vector width and interleave count are explicitly set to 1
+; CHECK: remark: source.cpp:19:5: loop not vectorized: cannot identify array bounds
+; CHECK: remark: source.cpp:19:5: loop not vectorized: vectorization is explicitly enabled
+; CHECK: warning: source.cpp:19:5: loop not vectorized: failed explicitly specified loop vectorization
+
+; CHECK: _Z4testPii
+; CHECK-NOT: x i32>
+; CHECK: ret
+
+; CHECK: _Z13test_disabledPii
+; CHECK-NOT: x i32>
+; CHECK: ret
+
+; CHECK: _Z17test_array_boundsPiS_i
+; CHECK-NOT: x i32>
+; CHECK: ret
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: nounwind optsize ssp uwtable
+define void @_Z4testPii(i32* nocapture %A, i32 %Length) #0 {
+entry:
+ %cmp10 = icmp sgt i32 %Length, 0, !dbg !12
+ br i1 %cmp10, label %for.body, label %for.end, !dbg !12, !llvm.loop !14
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !16
+ %0 = trunc i64 %indvars.iv to i32, !dbg !16
+ store i32 %0, i32* %arrayidx, align 4, !dbg !16, !tbaa !18
+ %cmp3 = icmp sle i32 %0, %Length, !dbg !22
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !12
+ %1 = trunc i64 %indvars.iv.next to i32
+ %cmp = icmp slt i32 %1, %Length, !dbg !12
+ %or.cond = and i1 %cmp3, %cmp, !dbg !22
+ br i1 %or.cond, label %for.body, label %for.end, !dbg !22
+
+for.end: ; preds = %for.body, %entry
+ ret void, !dbg !24
+}
+
+; Function Attrs: nounwind optsize ssp uwtable
+define void @_Z13test_disabledPii(i32* nocapture %A, i32 %Length) #0 {
+entry:
+ %cmp4 = icmp sgt i32 %Length, 0, !dbg !25
+ br i1 %cmp4, label %for.body, label %for.end, !dbg !25, !llvm.loop !27
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !30
+ %0 = trunc i64 %indvars.iv to i32, !dbg !30
+ store i32 %0, i32* %arrayidx, align 4, !dbg !30, !tbaa !18
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !25
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !25
+ %exitcond = icmp eq i32 %lftr.wideiv, %Length, !dbg !25
+ br i1 %exitcond, label %for.end, label %for.body, !dbg !25, !llvm.loop !27
+
+for.end: ; preds = %for.body, %entry
+ ret void, !dbg !31
+}
+
+; Function Attrs: nounwind optsize ssp uwtable
+define void @_Z17test_array_boundsPiS_i(i32* nocapture %A, i32* nocapture readonly %B, i32 %Length) #0 {
+entry:
+ %cmp9 = icmp sgt i32 %Length, 0, !dbg !32
+ br i1 %cmp9, label %for.body.preheader, label %for.end, !dbg !32, !llvm.loop !34
+
+for.body.preheader: ; preds = %entry
+ br label %for.body, !dbg !35
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv, !dbg !35
+ %0 = load i32* %arrayidx, align 4, !dbg !35, !tbaa !18
+ %idxprom1 = sext i32 %0 to i64, !dbg !35
+ %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1, !dbg !35
+ %1 = load i32* %arrayidx2, align 4, !dbg !35, !tbaa !18
+ %arrayidx4 = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !35
+ store i32 %1, i32* %arrayidx4, align 4, !dbg !35, !tbaa !18
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !32
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !32
+ %exitcond = icmp eq i32 %lftr.wideiv, %Length, !dbg !32
+ br i1 %exitcond, label %for.end.loopexit, label %for.body, !dbg !32, !llvm.loop !34
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void, !dbg !36
+}
+
+attributes #0 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2}
+!1 = metadata !{metadata !"source.cpp", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !7, metadata !8}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test", metadata !"test", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32*, i32)* @_Z4testPii, null, null, metadata !2, i32 1}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null}
+!7 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test_disabled", metadata !"test_disabled", metadata !"", i32 10, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32*, i32)* @_Z13test_disabledPii, null, null, metadata !2, i32 10}
+!8 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test_array_bounds", metadata !"test_array_bounds", metadata !"", i32 16, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32*, i32*, i32)* @_Z17test_array_boundsPiS_i, null, null, metadata !2, i32 16}
+!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!10 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!11 = metadata !{metadata !"clang version 3.5.0"}
+!12 = metadata !{i32 3, i32 8, metadata !13, null}
+!13 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 3, i32 0, i32 0}
+!14 = metadata !{metadata !14, metadata !15, metadata !15}
+!15 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
+!16 = metadata !{i32 4, i32 5, metadata !17, null}
+!17 = metadata !{i32 786443, metadata !1, metadata !13, i32 3, i32 36, i32 0, i32 1}
+!18 = metadata !{metadata !19, metadata !19, i64 0}
+!19 = metadata !{metadata !"int", metadata !20, i64 0}
+!20 = metadata !{metadata !"omnipotent char", metadata !21, i64 0}
+!21 = metadata !{metadata !"Simple C/C++ TBAA"}
+!22 = metadata !{i32 5, i32 9, metadata !23, null}
+!23 = metadata !{i32 786443, metadata !1, metadata !17, i32 5, i32 9, i32 0, i32 2}
+!24 = metadata !{i32 8, i32 1, metadata !4, null}
+!25 = metadata !{i32 12, i32 8, metadata !26, null}
+!26 = metadata !{i32 786443, metadata !1, metadata !7, i32 12, i32 3, i32 0, i32 3}
+!27 = metadata !{metadata !27, metadata !28, metadata !29}
+!28 = metadata !{metadata !"llvm.loop.interleave.count", i32 1}
+!29 = metadata !{metadata !"llvm.loop.vectorize.width", i32 1}
+!30 = metadata !{i32 13, i32 5, metadata !26, null}
+!31 = metadata !{i32 14, i32 1, metadata !7, null}
+!32 = metadata !{i32 18, i32 8, metadata !33, null}
+!33 = metadata !{i32 786443, metadata !1, metadata !8, i32 18, i32 3, i32 0, i32 4}
+!34 = metadata !{metadata !34, metadata !15}
+!35 = metadata !{i32 19, i32 5, metadata !33, null}
+!36 = metadata !{i32 20, i32 1, metadata !8, null}
diff --git a/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll b/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll
new file mode 100644
index 000000000000..f6834477ff51
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll
@@ -0,0 +1,74 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-unknown-linux -S -pass-remarks='loop-vectorize' 2>&1 | FileCheck -check-prefix=VECTORIZED %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-unroll=4 -mtriple=x86_64-unknown-linux -S -pass-remarks='loop-vectorize' 2>&1 | FileCheck -check-prefix=UNROLLED %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-unroll=1 -mtriple=x86_64-unknown-linux -S -pass-remarks-analysis='loop-vectorize' 2>&1 | FileCheck -check-prefix=NONE %s
+
+; This code has all the !dbg annotations needed to track source line information,
+; but is missing the llvm.dbg.cu annotation. This prevents code generation from
+; emitting debug info in the final output.
+; RUN: llc -mtriple x86_64-pc-linux-gnu %s -o - | FileCheck -check-prefix=DEBUG-OUTPUT %s
+; DEBUG-OUTPUT-NOT: .loc
+; DEBUG-OUTPUT-NOT: {{.*}}.debug_info
+
+; VECTORIZED: remark: vectorization-remarks.c:17:8: vectorized loop (vectorization factor: 4, unrolling interleave factor: 1)
+; UNROLLED: remark: vectorization-remarks.c:17:8: unrolled with interleaving factor 4 (vectorization not beneficial)
+; NONE: remark: vectorization-remarks.c:17:8: loop not vectorized: vector width and interleave count are explicitly set to 1
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define i32 @foo(i32 %n) #0 {
+entry:
+ %diff = alloca i32, align 4
+ %cb = alloca [16 x i8], align 16
+ %cc = alloca [16 x i8], align 16
+ store i32 0, i32* %diff, align 4, !dbg !10, !tbaa !11
+ br label %for.body, !dbg !15
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %add8 = phi i32 [ 0, %entry ], [ %add, %for.body ], !dbg !19
+ %arrayidx = getelementptr inbounds [16 x i8]* %cb, i64 0, i64 %indvars.iv, !dbg !19
+ %0 = load i8* %arrayidx, align 1, !dbg !19, !tbaa !21
+ %conv = sext i8 %0 to i32, !dbg !19
+ %arrayidx2 = getelementptr inbounds [16 x i8]* %cc, i64 0, i64 %indvars.iv, !dbg !19
+ %1 = load i8* %arrayidx2, align 1, !dbg !19, !tbaa !21
+ %conv3 = sext i8 %1 to i32, !dbg !19
+ %sub = sub i32 %conv, %conv3, !dbg !19
+ %add = add nsw i32 %sub, %add8, !dbg !19
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !15
+ %exitcond = icmp eq i64 %indvars.iv.next, 16, !dbg !15
+ br i1 %exitcond, label %for.end, label %for.body, !dbg !15
+
+for.end: ; preds = %for.body
+ store i32 %add, i32* %diff, align 4, !dbg !19, !tbaa !11
+ call void @ibar(i32* %diff) #2, !dbg !22
+ ret i32 0, !dbg !23
+}
+
+declare void @ibar(i32*) #1
+
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!1 = metadata !{metadata !"vectorization-remarks.c", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 5, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @foo, null, null, metadata !2, i32 6} ; [ DW_TAG_subprogram ] [line 5] [def] [scope 6] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [./vectorization-remarks.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5.0 "}
+!10 = metadata !{i32 8, i32 3, metadata !4, null} ; [ DW_TAG_imported_declaration ]
+!11 = metadata !{metadata !12, metadata !12, i64 0}
+!12 = metadata !{metadata !"int", metadata !13, i64 0}
+!13 = metadata !{metadata !"omnipotent char", metadata !14, i64 0}
+!14 = metadata !{metadata !"Simple C/C++ TBAA"}
+!15 = metadata !{i32 17, i32 8, metadata !16, null}
+!16 = metadata !{i32 786443, metadata !1, metadata !17, i32 17, i32 8, i32 2, i32 3} ; [ DW_TAG_lexical_block ] [./vectorization-remarks.c]
+!17 = metadata !{i32 786443, metadata !1, metadata !18, i32 17, i32 8, i32 1, i32 2} ; [ DW_TAG_lexical_block ] [./vectorization-remarks.c]
+!18 = metadata !{i32 786443, metadata !1, metadata !4, i32 17, i32 3, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./vectorization-remarks.c]
+!19 = metadata !{i32 18, i32 5, metadata !20, null}
+!20 = metadata !{i32 786443, metadata !1, metadata !18, i32 17, i32 27, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [./vectorization-remarks.c]
+!21 = metadata !{metadata !13, metadata !13, i64 0}
+!22 = metadata !{i32 20, i32 3, metadata !4, null}
+!23 = metadata !{i32 21, i32 3, metadata !4, null}
diff --git a/test/Transforms/LoopVectorize/XCore/lit.local.cfg b/test/Transforms/LoopVectorize/XCore/lit.local.cfg
index 4d17d4642045..bb48713fe33e 100644
--- a/test/Transforms/LoopVectorize/XCore/lit.local.cfg
+++ b/test/Transforms/LoopVectorize/XCore/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'XCore' in targets:
+if not 'XCore' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/LoopVectorize/calloc.ll b/test/Transforms/LoopVectorize/calloc.ll
index 7e7991616459..55c0a605450f 100644
--- a/test/Transforms/LoopVectorize/calloc.ll
+++ b/test/Transforms/LoopVectorize/calloc.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -basicaa -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
diff --git a/test/Transforms/LoopVectorize/control-flow.ll b/test/Transforms/LoopVectorize/control-flow.ll
new file mode 100644
index 000000000000..e4ba77fa3daa
--- /dev/null
+++ b/test/Transforms/LoopVectorize/control-flow.ll
@@ -0,0 +1,78 @@
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -S -pass-remarks-missed='loop-vectorize' -pass-remarks-analysis='loop-vectorize' 2>&1 | FileCheck %s
+
+; C/C++ code for control flow test
+; int test(int *A, int Length) {
+; for (int i = 0; i < Length; i++) {
+; if (A[i] > 10.0) goto end;
+; A[i] = 0;
+; }
+; end:
+; return 0;
+; }
+
+; CHECK: remark: source.cpp:5:9: loop not vectorized: loop control flow is not understood by vectorizer
+; CHECK: remark: source.cpp:5:9: loop not vectorized: vectorization was not specified
+
+; CHECK: _Z4testPii
+; CHECK-NOT: x i32>
+; CHECK: ret
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: nounwind optsize ssp uwtable
+define i32 @_Z4testPii(i32* nocapture %A, i32 %Length) #0 {
+entry:
+ %cmp8 = icmp sgt i32 %Length, 0, !dbg !10
+ br i1 %cmp8, label %for.body.preheader, label %end, !dbg !10
+
+for.body.preheader: ; preds = %entry
+ br label %for.body, !dbg !12
+
+for.body: ; preds = %for.body.preheader, %if.else
+ %indvars.iv = phi i64 [ %indvars.iv.next, %if.else ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !12
+ %0 = load i32* %arrayidx, align 4, !dbg !12, !tbaa !15
+ %cmp1 = icmp sgt i32 %0, 10, !dbg !12
+ br i1 %cmp1, label %end.loopexit, label %if.else, !dbg !12
+
+if.else: ; preds = %for.body
+ store i32 0, i32* %arrayidx, align 4, !dbg !19, !tbaa !15
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !10
+ %1 = trunc i64 %indvars.iv.next to i32, !dbg !10
+ %cmp = icmp slt i32 %1, %Length, !dbg !10
+ br i1 %cmp, label %for.body, label %end.loopexit, !dbg !10
+
+end.loopexit: ; preds = %if.else, %for.body
+ br label %end
+
+end: ; preds = %end.loopexit, %entry
+ ret i32 0, !dbg !20
+}
+
+attributes #0 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2}
+!1 = metadata !{metadata !"source.cpp", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test", metadata !"test", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32*, i32)* @_Z4testPii, null, null, metadata !2, i32 2}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null}
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!8 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5.0"}
+!10 = metadata !{i32 3, i32 8, metadata !11, null}
+!11 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 3, i32 0, i32 0}
+!12 = metadata !{i32 5, i32 9, metadata !13, null}
+!13 = metadata !{i32 786443, metadata !1, metadata !14, i32 5, i32 9, i32 0, i32 2}
+!14 = metadata !{i32 786443, metadata !1, metadata !11, i32 4, i32 3, i32 0, i32 1}
+!15 = metadata !{metadata !16, metadata !16, i64 0}
+!16 = metadata !{metadata !"int", metadata !17, i64 0}
+!17 = metadata !{metadata !"omnipotent char", metadata !18, i64 0}
+!18 = metadata !{metadata !"Simple C/C++ TBAA"}
+!19 = metadata !{i32 8, i32 7, metadata !13, null}
+!20 = metadata !{i32 12, i32 3, metadata !4, null}
diff --git a/test/Transforms/LoopVectorize/flags.ll b/test/Transforms/LoopVectorize/flags.ll
index a4ebb4284881..21d09372d546 100644
--- a/test/Transforms/LoopVectorize/flags.ll
+++ b/test/Transforms/LoopVectorize/flags.ll
@@ -51,3 +51,29 @@ define i32 @flags2(i32 %n, i32* nocapture %A) nounwind uwtable ssp {
._crit_edge: ; preds = %.lr.ph, %0
ret i32 undef
}
+
+; Make sure we copy fast math flags and use them for the final reduction.
+; CHECK-LABEL: fast_math
+; CHECK: load <4 x float>
+; CHECK: fadd fast <4 x float>
+; CHECK: br
+; CHECK: fadd fast <4 x float>
+; CHECK: fadd fast <4 x float>
+define float @fast_math(float* noalias %s) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %q.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds float* %s, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %add = fadd fast float %q.04, %0
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 256
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ %add.lcssa = phi float [ %add, %for.body ]
+ ret float %add.lcssa
+}
diff --git a/test/Transforms/LoopVectorize/float-reduction.ll b/test/Transforms/LoopVectorize/float-reduction.ll
index c45098dd2c3b..0dfbab07279a 100644
--- a/test/Transforms/LoopVectorize/float-reduction.ll
+++ b/test/Transforms/LoopVectorize/float-reduction.ll
@@ -3,7 +3,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
;CHECK-LABEL: @foo(
-;CHECK: fadd <4 x float>
+;CHECK: fadd fast <4 x float>
;CHECK: ret
define float @foo(float* nocapture %A, i32* nocapture %n) nounwind uwtable readonly ssp {
entry:
diff --git a/test/Transforms/LoopVectorize/gcc-examples.ll b/test/Transforms/LoopVectorize/gcc-examples.ll
index d8959d4c106a..b6cde5d00f5e 100644
--- a/test/Transforms/LoopVectorize/gcc-examples.ll
+++ b/test/Transforms/LoopVectorize/gcc-examples.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=1 -dce -instcombine -S | FileCheck %s
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=4 -dce -instcombine -S | FileCheck %s -check-prefix=UNROLL
+; RUN: opt < %s -basicaa -loop-vectorize -force-vector-width=4 -force-vector-unroll=1 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -basicaa -loop-vectorize -force-vector-width=4 -force-vector-unroll=4 -dce -instcombine -S | FileCheck %s -check-prefix=UNROLL
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/global_alias.ll b/test/Transforms/LoopVectorize/global_alias.ll
index 0118fb47412a..d64d67f6a5b1 100644
--- a/test/Transforms/LoopVectorize/global_alias.ll
+++ b/test/Transforms/LoopVectorize/global_alias.ll
@@ -387,7 +387,7 @@ for.end: ; preds = %for.cond
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias08(
-; CHECK: sub nsw <4 x i32>
+; CHECK: sub <4 x i32>
; CHECK: ret
define i32 @noAlias08(i32 %a) #0 {
@@ -439,7 +439,7 @@ for.end: ; preds = %for.cond
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias09(
-; CHECK: sub nsw <4 x i32>
+; CHECK: sub <4 x i32>
; CHECK: ret
define i32 @noAlias09(i32 %a) #0 {
@@ -491,7 +491,7 @@ for.end: ; preds = %for.cond
; return *(PA+a);
; }
; CHECK-LABEL: define i32 @noAlias10(
-; CHECK-NOT: sub nsw <4 x i32>
+; CHECK-NOT: sub {{.*}} <4 x i32>
; CHECK: ret
;
; TODO: This test vectorizes (with run-time check) on real targets with -O3)
@@ -721,7 +721,7 @@ for.end: ; preds = %for.cond
; return Foo.A[a];
; }
; CHECK-LABEL: define i32 @noAlias14(
-; CHECK: sub nsw <4 x i32>
+; CHECK: sub <4 x i32>
; CHECK: ret
define i32 @noAlias14(i32 %a) #0 {
diff --git a/test/Transforms/LoopVectorize/if-conversion.ll b/test/Transforms/LoopVectorize/if-conversion.ll
index dbe0243a8110..6e3e8ed27853 100644
--- a/test/Transforms/LoopVectorize/if-conversion.ll
+++ b/test/Transforms/LoopVectorize/if-conversion.ll
@@ -156,7 +156,7 @@ for.body:
br i1 icmp eq (i32** getelementptr inbounds ([1 x i32*]* @a, i64 0, i64 0), i32** @c), label %cond.false, label %cond.end
cond.false:
- %cond.1 = or i32 %inc3, sdiv (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*]* @a, i64 0, i64 0), i32** @c) to i32))
+ %cond.1 = or i32 %inc3, sdiv (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*]* @a, i64 0, i64 1), i32** @c) to i32))
br label %cond.end
cond.end:
diff --git a/test/Transforms/LoopVectorize/if-pred-stores.ll b/test/Transforms/LoopVectorize/if-pred-stores.ll
new file mode 100644
index 000000000000..7b0e181c845f
--- /dev/null
+++ b/test/Transforms/LoopVectorize/if-pred-stores.ll
@@ -0,0 +1,126 @@
+; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-unroll=2 -loop-vectorize < %s | FileCheck %s --check-prefix=UNROLL
+; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=2 -force-vector-unroll=1 -loop-vectorize -enable-cond-stores-vec < %s | FileCheck %s --check-prefix=VEC
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Test predication of stores.
+define i32 @test(i32* nocapture %f) #0 {
+entry:
+ br label %for.body
+
+; VEC-LABEL: test
+; VEC: %[[v8:.+]] = icmp sgt <2 x i32> %{{.*}}, <i32 100, i32 100>
+; VEC: %[[v9:.+]] = add nsw <2 x i32> %{{.*}}, <i32 20, i32 20>
+; VEC: %[[v10:.+]] = and <2 x i1> %[[v8]], <i1 true, i1 true>
+; VEC: %[[v11:.+]] = extractelement <2 x i1> %[[v10]], i32 0
+; VEC: %[[v12:.+]] = icmp eq i1 %[[v11]], true
+; VEC: br i1 %[[v12]], label %[[cond:.+]], label %[[else:.+]]
+;
+; VEC: [[cond]]:
+; VEC: %[[v13:.+]] = extractelement <2 x i32> %[[v9]], i32 0
+; VEC: %[[v14:.+]] = extractelement <2 x i32*> %{{.*}}, i32 0
+; VEC: store i32 %[[v13]], i32* %[[v14]], align 4
+; VEC: br label %[[else:.+]]
+;
+; VEC: [[else]]:
+; VEC: %[[v15:.+]] = extractelement <2 x i1> %[[v10]], i32 1
+; VEC: %[[v16:.+]] = icmp eq i1 %[[v15]], true
+; VEC: br i1 %[[v16]], label %[[cond2:.+]], label %[[else2:.+]]
+;
+; VEC: [[cond2]]:
+; VEC: %[[v17:.+]] = extractelement <2 x i32> %[[v9]], i32 1
+; VEC: %[[v18:.+]] = extractelement <2 x i32*> %{{.+}} i32 1
+; VEC: store i32 %[[v17]], i32* %[[v18]], align 4
+; VEC: br label %[[else2:.+]]
+;
+; VEC: [[else2]]:
+
+; UNROLL-LABEL: test
+; UNROLL: vector.body:
+; UNROLL: %[[IND:[a-zA-Z0-9]+]] = add i64 %{{.*}}, 0
+; UNROLL: %[[IND1:[a-zA-Z0-9]+]] = add i64 %{{.*}}, 1
+; UNROLL: %[[v0:[a-zA-Z0-9]+]] = getelementptr inbounds i32* %f, i64 %[[IND]]
+; UNROLL: %[[v1:[a-zA-Z0-9]+]] = getelementptr inbounds i32* %f, i64 %[[IND1]]
+; UNROLL: %[[v2:[a-zA-Z0-9]+]] = load i32* %[[v0]], align 4
+; UNROLL: %[[v3:[a-zA-Z0-9]+]] = load i32* %[[v1]], align 4
+; UNROLL: %[[v4:[a-zA-Z0-9]+]] = icmp sgt i32 %[[v2]], 100
+; UNROLL: %[[v5:[a-zA-Z0-9]+]] = icmp sgt i32 %[[v3]], 100
+; UNROLL: %[[v6:[a-zA-Z0-9]+]] = add nsw i32 %[[v2]], 20
+; UNROLL: %[[v7:[a-zA-Z0-9]+]] = add nsw i32 %[[v3]], 20
+; UNROLL: %[[v8:[a-zA-Z0-9]+]] = icmp eq i1 %[[v4]], true
+; UNROLL: br i1 %[[v8]], label %[[cond:[a-zA-Z0-9.]+]], label %[[else:[a-zA-Z0-9.]+]]
+;
+; UNROLL: [[cond]]:
+; UNROLL: store i32 %[[v6]], i32* %[[v0]], align 4
+; UNROLL: br label %[[else]]
+;
+; UNROLL: [[else]]:
+; UNROLL: %[[v9:[a-zA-Z0-9]+]] = icmp eq i1 %[[v5]], true
+; UNROLL: br i1 %[[v9]], label %[[cond2:[a-zA-Z0-9.]+]], label %[[else2:[a-zA-Z0-9.]+]]
+;
+; UNROLL: [[cond2]]:
+; UNROLL: store i32 %[[v7]], i32* %[[v1]], align 4
+; UNROLL: br label %[[else2]]
+;
+; UNROLL: [[else2]]:
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
+ %arrayidx = getelementptr inbounds i32* %f, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp1 = icmp sgt i32 %0, 100
+ br i1 %cmp1, label %if.then, label %for.inc
+
+if.then:
+ %add = add nsw i32 %0, 20
+ store i32 %add, i32* %arrayidx, align 4
+ br label %for.inc
+
+for.inc:
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 128
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 0
+}
+
+; Track basic blocks when unrolling conditional blocks. This code used to assert
+; because we did not update the phi nodes with the proper predecessor in the
+; vectorized loop body.
+; PR18724
+
+; UNROLL-LABEL: bug18724
+; UNROLL: store i32
+; UNROLL: store i32
+
+define void @bug18724() {
+entry:
+ br label %for.body9
+
+for.body9:
+ br i1 undef, label %for.inc26, label %for.body14
+
+for.body14:
+ %indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ undef, %for.body9 ]
+ %iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ undef, %for.body9 ]
+ %arrayidx16 = getelementptr inbounds [768 x i32]* undef, i64 0, i64 %indvars.iv3
+ %tmp = load i32* %arrayidx16, align 4
+ br i1 undef, label %if.then18, label %for.inc23
+
+if.then18:
+ store i32 2, i32* %arrayidx16, align 4
+ %inc21 = add nsw i32 %iNewChunks.120, 1
+ br label %for.inc23
+
+for.inc23:
+ %iNewChunks.2 = phi i32 [ %inc21, %if.then18 ], [ %iNewChunks.120, %for.body14 ]
+ %indvars.iv.next4 = add nsw i64 %indvars.iv3, 1
+ %tmp1 = trunc i64 %indvars.iv3 to i32
+ %cmp13 = icmp slt i32 %tmp1, 0
+ br i1 %cmp13, label %for.body14, label %for.inc26
+
+for.inc26:
+ %iNewChunks.1.lcssa = phi i32 [ undef, %for.body9 ], [ %iNewChunks.2, %for.inc23 ]
+ unreachable
+}
diff --git a/test/Transforms/LoopVectorize/increment.ll b/test/Transforms/LoopVectorize/increment.ll
index d35bd58a0281..71bedb7334ac 100644
--- a/test/Transforms/LoopVectorize/increment.ll
+++ b/test/Transforms/LoopVectorize/increment.ll
@@ -34,7 +34,7 @@ define void @inc(i32 %n) nounwind uwtable noinline ssp {
ret void
}
-; Can't vectorize this loop because the access to A[X] is non linear.
+; Can't vectorize this loop because the access to A[X] is non-linear.
;
; for (i = 0; i < n; ++i) {
; A[B[i]]++;
diff --git a/test/Transforms/LoopVectorize/induction.ll b/test/Transforms/LoopVectorize/induction.ll
index 50c3b6b6e79b..7dabcb2ba04f 100644
--- a/test/Transforms/LoopVectorize/induction.ll
+++ b/test/Transforms/LoopVectorize/induction.ll
@@ -75,7 +75,7 @@ loopexit:
; PR17532
; CHECK-LABEL: i8_loop
-; CHECK; icmp eq i32 {{.*}}, 256
+; CHECK: icmp eq i32 {{.*}}, 256
define i32 @i8_loop() nounwind readnone ssp uwtable {
br label %1
@@ -92,7 +92,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable {
}
; CHECK-LABEL: i16_loop
-; CHECK; icmp eq i32 {{.*}}, 65536
+; CHECK: icmp eq i32 {{.*}}, 65536
define i32 @i16_loop() nounwind readnone ssp uwtable {
br label %1
@@ -108,3 +108,64 @@ define i32 @i16_loop() nounwind readnone ssp uwtable {
; <label>:5 ; preds = %1
ret i32 %2
}
+
+; This loop has a backedge taken count of i32_max. We need to check for this
+; condition and branch directly to the scalar loop.
+
+; CHECK-LABEL: max_i32_backedgetaken
+; CHECK: %backedge.overflow = icmp eq i32 -1, -1
+; CHECK: br i1 %backedge.overflow, label %scalar.ph, label %overflow.checked
+
+; CHECK: scalar.ph:
+; CHECK: %bc.resume.val = phi i32 [ %resume.val, %middle.block ], [ 0, %0 ]
+; CHECK: %bc.merge.rdx = phi i32 [ 1, %0 ], [ %5, %middle.block ]
+
+define i32 @max_i32_backedgetaken() nounwind readnone ssp uwtable {
+
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ]
+ %b.0 = phi i32 [ 0, %0 ], [ %3, %1 ]
+ %2 = and i32 %a.0, 4
+ %3 = add i32 %b.0, -1
+ %4 = icmp eq i32 %3, 0
+ br i1 %4, label %5, label %1
+
+; <label>:5 ; preds = %1
+ ret i32 %2
+}
+
+; When generating the overflow check we must sure that the induction start value
+; is defined before the branch to the scalar preheader.
+
+; CHECK-LABEL: testoverflowcheck
+; CHECK: entry
+; CHECK: %[[LOAD:.*]] = load i8
+; CHECK: %[[VAL:.*]] = zext i8 %[[LOAD]] to i32
+; CHECK: br
+
+; CHECK: scalar.ph
+; CHECK: phi i32 [ %{{.*}}, %middle.block ], [ %[[VAL]], %entry ]
+
+@e = global i8 1, align 1
+@d = common global i32 0, align 4
+@c = common global i32 0, align 4
+define i32 @testoverflowcheck() {
+entry:
+ %.pr.i = load i8* @e, align 1
+ %0 = load i32* @d, align 4
+ %c.promoted.i = load i32* @c, align 4
+ br label %cond.end.i
+
+cond.end.i:
+ %inc4.i = phi i8 [ %.pr.i, %entry ], [ %inc.i, %cond.end.i ]
+ %and3.i = phi i32 [ %c.promoted.i, %entry ], [ %and.i, %cond.end.i ]
+ %and.i = and i32 %0, %and3.i
+ %inc.i = add i8 %inc4.i, 1
+ %tobool.i = icmp eq i8 %inc.i, 0
+ br i1 %tobool.i, label %loopexit, label %cond.end.i
+
+loopexit:
+ ret i32 %and.i
+}
diff --git a/test/Transforms/LoopVectorize/intrinsic.ll b/test/Transforms/LoopVectorize/intrinsic.ll
index c3d570c03a77..7dfaf03b0f2d 100644
--- a/test/Transforms/LoopVectorize/intrinsic.ll
+++ b/test/Transforms/LoopVectorize/intrinsic.ll
@@ -1090,3 +1090,105 @@ for.end: ; preds = %for.body
ret void
}
+declare double @llvm.powi.f64(double %Val, i32 %power) nounwind readnone
+
+;CHECK-LABEL: @powi_f64(
+;CHECK: llvm.powi.v4f64
+;CHECK: ret void
+define void @powi_f64(i32 %n, double* noalias %y, double* noalias %x, i32 %P) nounwind uwtable {
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8
+ %call = tail call double @llvm.powi.f64(double %0, i32 %P) nounwind readnone
+ %arrayidx4 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx4, align 8
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+;CHECK-LABEL: @powi_f64_neg(
+;CHECK-NOT: llvm.powi.v4f64
+;CHECK: ret void
+define void @powi_f64_neg(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8
+ %1 = trunc i64 %indvars.iv to i32
+ %call = tail call double @llvm.powi.f64(double %0, i32 %1) nounwind readnone
+ %arrayidx4 = getelementptr inbounds double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx4, align 8
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.cttz.i64 (i64, i1) nounwind readnone
+
+;CHECK-LABEL: @cttz_f64(
+;CHECK: llvm.cttz.v4i64
+;CHECK: ret void
+define void @cttz_f64(i32 %n, i64* noalias %y, i64* noalias %x) nounwind uwtable {
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i64* %y, i64 %indvars.iv
+ %0 = load i64* %arrayidx, align 8
+ %call = tail call i64 @llvm.cttz.i64(i64 %0, i1 true) nounwind readnone
+ %arrayidx4 = getelementptr inbounds i64* %x, i64 %indvars.iv
+ store i64 %call, i64* %arrayidx4, align 8
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.ctlz.i64 (i64, i1) nounwind readnone
+
+;CHECK-LABEL: @ctlz_f64(
+;CHECK: llvm.ctlz.v4i64
+;CHECK: ret void
+define void @ctlz_f64(i32 %n, i64* noalias %y, i64* noalias %x) nounwind uwtable {
+entry:
+ %cmp9 = icmp sgt i32 %n, 0
+ br i1 %cmp9, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i64* %y, i64 %indvars.iv
+ %0 = load i64* %arrayidx, align 8
+ %call = tail call i64 @llvm.ctlz.i64(i64 %0, i1 true) nounwind readnone
+ %arrayidx4 = getelementptr inbounds i64* %x, i64 %indvars.iv
+ store i64 %call, i64* %arrayidx4, align 8
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/metadata-unroll.ll b/test/Transforms/LoopVectorize/metadata-unroll.ll
index 7f1037200659..848f1f9601b9 100644
--- a/test/Transforms/LoopVectorize/metadata-unroll.ll
+++ b/test/Transforms/LoopVectorize/metadata-unroll.ll
@@ -38,4 +38,4 @@ define void @inc(i32 %n) nounwind uwtable noinline ssp {
}
!0 = metadata !{metadata !0, metadata !1}
-!1 = metadata !{metadata !"llvm.vectorizer.unroll", i32 2}
+!1 = metadata !{metadata !"llvm.loop.interleave.count", i32 2}
diff --git a/test/Transforms/LoopVectorize/metadata-width.ll b/test/Transforms/LoopVectorize/metadata-width.ll
index 1960c0bad6bc..87de655da6f2 100644
--- a/test/Transforms/LoopVectorize/metadata-width.ll
+++ b/test/Transforms/LoopVectorize/metadata-width.ll
@@ -28,4 +28,4 @@ for.end: ; preds = %for.body, %entry
attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
!0 = metadata !{metadata !0, metadata !1}
-!1 = metadata !{metadata !"llvm.vectorizer.width", i32 8}
+!1 = metadata !{metadata !"llvm.loop.vectorize.width", i32 8}
diff --git a/test/Transforms/LoopVectorize/metadata.ll b/test/Transforms/LoopVectorize/metadata.ll
new file mode 100644
index 000000000000..bdcf1c9fb229
--- /dev/null
+++ b/test/Transforms/LoopVectorize/metadata.ll
@@ -0,0 +1,44 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind uwtable
+define i32 @test1(i32* nocapture %a, float* nocapture readonly %b) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %conv = fptosi float %0 to i32
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ store i32 %conv, i32* %arrayidx2, align 4, !tbaa !4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1600
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret i32 0
+}
+
+; CHECK-LABEL: @test1
+; CHECK: load <4 x float>* %{{.*}}, align 4, !tbaa ![[TFLT:[0-9]+]]
+; CHECK: store <4 x i32> %{{.*}}, <4 x i32>* %{{.*}}, align 4, !tbaa ![[TINT:[0-9]+]]
+; CHECK: ret i32 0
+
+; CHECK-DAG: ![[TFLT]] = metadata !{metadata ![[TFLT1:[0-9]+]]
+; CHECK-DAG: ![[TFLT1]] = metadata !{metadata !"float"
+
+; CHECK-DAG: ![[TINT]] = metadata !{metadata ![[TINT1:[0-9]+]]
+; CHECK-DAG: ![[TINT1]] = metadata !{metadata !"int"
+
+attributes #0 = { nounwind uwtable }
+
+!0 = metadata !{metadata !1, metadata !1, i64 0}
+!1 = metadata !{metadata !"float", metadata !2, i64 0}
+!2 = metadata !{metadata !"omnipotent char", metadata !3, i64 0}
+!3 = metadata !{metadata !"Simple C/C++ TBAA"}
+!4 = metadata !{metadata !5, metadata !5, i64 0}
+!5 = metadata !{metadata !"int", metadata !2, i64 0}
+
diff --git a/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll b/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll
index 5fc5ed55a99d..88a29c50df5a 100644
--- a/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll
+++ b/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll
@@ -1,6 +1,6 @@
; RUN: opt -indvars -loop-vectorize -force-vector-width=2 -force-vector-unroll=1 -S < %s | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
; We must not vectorize this loop. %add55 is not reduction. Its value is used
diff --git a/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/test/Transforms/LoopVectorize/multiple-address-spaces.ll
index 7d836dedbdbb..d64662838e00 100644
--- a/test/Transforms/LoopVectorize/multiple-address-spaces.ll
+++ b/test/Transforms/LoopVectorize/multiple-address-spaces.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -basicaa -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
; From a simple program with two address spaces:
; char Y[4*10000] __attribute__((address_space(1)));
diff --git a/test/Transforms/LoopVectorize/no_array_bounds.ll b/test/Transforms/LoopVectorize/no_array_bounds.ll
new file mode 100644
index 000000000000..240b1b5d49dc
--- /dev/null
+++ b/test/Transforms/LoopVectorize/no_array_bounds.ll
@@ -0,0 +1,101 @@
+; RUN: opt < %s -loop-vectorize -S 2>&1 | FileCheck %s
+
+; Verify warning is generated when vectorization/ interleaving is explicitly specified and fails to occur.
+; CHECK: warning: no_array_bounds.cpp:5:5: loop not vectorized: failed explicitly specified loop vectorization
+; CHECK: warning: no_array_bounds.cpp:10:5: loop not interleaved: failed explicitly specified loop interleaving
+
+; #pragma clang loop vectorize(enable)
+; for (int i = 0; i < number; i++) {
+; A[B[i]]++;
+; }
+
+; #pragma clang loop vectorize(disable) interleave(enable)
+; for (int i = 0; i < number; i++) {
+; B[A[i]]++;
+; }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: nounwind ssp uwtable
+define void @_Z4testPiS_i(i32* nocapture %A, i32* nocapture %B, i32 %number) #0 {
+entry:
+ %cmp25 = icmp sgt i32 %number, 0, !dbg !10
+ br i1 %cmp25, label %for.body.preheader, label %for.end15, !dbg !10, !llvm.loop !12
+
+for.body.preheader: ; preds = %entry
+ br label %for.body, !dbg !14
+
+for.cond5.preheader: ; preds = %for.body
+ br i1 %cmp25, label %for.body7.preheader, label %for.end15, !dbg !16, !llvm.loop !18
+
+for.body7.preheader: ; preds = %for.cond5.preheader
+ br label %for.body7, !dbg !20
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv27 = phi i64 [ %indvars.iv.next28, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv27, !dbg !14
+ %0 = load i32* %arrayidx, align 4, !dbg !14, !tbaa !22
+ %idxprom1 = sext i32 %0 to i64, !dbg !14
+ %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1, !dbg !14
+ %1 = load i32* %arrayidx2, align 4, !dbg !14, !tbaa !22
+ %inc = add nsw i32 %1, 1, !dbg !14
+ store i32 %inc, i32* %arrayidx2, align 4, !dbg !14, !tbaa !22
+ %indvars.iv.next28 = add nuw nsw i64 %indvars.iv27, 1, !dbg !10
+ %lftr.wideiv29 = trunc i64 %indvars.iv.next28 to i32, !dbg !10
+ %exitcond30 = icmp eq i32 %lftr.wideiv29, %number, !dbg !10
+ br i1 %exitcond30, label %for.cond5.preheader, label %for.body, !dbg !10, !llvm.loop !12
+
+for.body7: ; preds = %for.body7.preheader, %for.body7
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body7 ], [ 0, %for.body7.preheader ]
+ %arrayidx9 = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !20
+ %2 = load i32* %arrayidx9, align 4, !dbg !20, !tbaa !22
+ %idxprom10 = sext i32 %2 to i64, !dbg !20
+ %arrayidx11 = getelementptr inbounds i32* %B, i64 %idxprom10, !dbg !20
+ %3 = load i32* %arrayidx11, align 4, !dbg !20, !tbaa !22
+ %inc12 = add nsw i32 %3, 1, !dbg !20
+ store i32 %inc12, i32* %arrayidx11, align 4, !dbg !20, !tbaa !22
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !16
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !16
+ %exitcond = icmp eq i32 %lftr.wideiv, %number, !dbg !16
+ br i1 %exitcond, label %for.end15.loopexit, label %for.body7, !dbg !16, !llvm.loop !18
+
+for.end15.loopexit: ; preds = %for.body7
+ br label %for.end15
+
+for.end15: ; preds = %for.end15.loopexit, %entry, %for.cond5.preheader
+ ret void, !dbg !26
+}
+
+attributes #0 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2}
+!1 = metadata !{metadata !"no_array_bounds.cpp", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test", metadata !"test", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32*, i32*, i32)* @_Z4testPiS_i, null, null, metadata !2, i32 2}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null}
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!8 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5.0"}
+!10 = metadata !{i32 4, i32 8, metadata !11, null}
+!11 = metadata !{i32 786443, metadata !1, metadata !4, i32 4, i32 3, i32 0, i32 0}
+!12 = metadata !{metadata !12, metadata !13}
+!13 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
+!14 = metadata !{i32 5, i32 5, metadata !15, null}
+!15 = metadata !{i32 786443, metadata !1, metadata !11, i32 4, i32 36, i32 0, i32 1}
+!16 = metadata !{i32 9, i32 8, metadata !17, null}
+!17 = metadata !{i32 786443, metadata !1, metadata !4, i32 9, i32 3, i32 0, i32 2}
+!18 = metadata !{metadata !18, metadata !13, metadata !19}
+!19 = metadata !{metadata !"llvm.loop.vectorize.width", i32 1}
+!20 = metadata !{i32 10, i32 5, metadata !21, null}
+!21 = metadata !{i32 786443, metadata !1, metadata !17, i32 9, i32 36, i32 0, i32 3}
+!22 = metadata !{metadata !23, metadata !23, i64 0}
+!23 = metadata !{metadata !"int", metadata !24, i64 0}
+!24 = metadata !{metadata !"omnipotent char", metadata !25, i64 0}
+!25 = metadata !{metadata !"Simple C/C++ TBAA"}
+!26 = metadata !{i32 12, i32 1, metadata !4, null}
diff --git a/test/Transforms/LoopVectorize/no_switch.ll b/test/Transforms/LoopVectorize/no_switch.ll
new file mode 100644
index 000000000000..8f654e41d4c8
--- /dev/null
+++ b/test/Transforms/LoopVectorize/no_switch.ll
@@ -0,0 +1,86 @@
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -S -pass-remarks-missed='loop-vectorize' -pass-remarks-analysis='loop-vectorize' 2>&1 | FileCheck %s
+
+; CHECK: remark: source.cpp:4:5: loop not vectorized: loop contains a switch statement
+; CHECK: remark: source.cpp:4:5: loop not vectorized: vectorization is explicitly enabled with width 4
+; CHECK: warning: source.cpp:4:5: loop not vectorized: failed explicitly specified loop vectorization
+
+; CHECK: _Z11test_switchPii
+; CHECK-NOT: x i32>
+; CHECK: ret
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: nounwind optsize ssp uwtable
+define void @_Z11test_switchPii(i32* nocapture %A, i32 %Length) #0 {
+entry:
+ %cmp18 = icmp sgt i32 %Length, 0, !dbg !10
+ br i1 %cmp18, label %for.body.preheader, label %for.end, !dbg !10, !llvm.loop !12
+
+for.body.preheader: ; preds = %entry
+ br label %for.body, !dbg !14
+
+for.body: ; preds = %for.body.preheader, %for.inc
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !14
+ %0 = load i32* %arrayidx, align 4, !dbg !14, !tbaa !16
+ switch i32 %0, label %for.inc [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb3
+ ], !dbg !14
+
+sw.bb: ; preds = %for.body
+ %1 = trunc i64 %indvars.iv to i32, !dbg !20
+ %mul = shl nsw i32 %1, 1, !dbg !20
+ br label %for.inc, !dbg !22
+
+sw.bb3: ; preds = %for.body
+ %2 = trunc i64 %indvars.iv to i32, !dbg !23
+ store i32 %2, i32* %arrayidx, align 4, !dbg !23, !tbaa !16
+ br label %for.inc, !dbg !23
+
+for.inc: ; preds = %sw.bb3, %for.body, %sw.bb
+ %storemerge = phi i32 [ %mul, %sw.bb ], [ 0, %for.body ], [ 0, %sw.bb3 ]
+ store i32 %storemerge, i32* %arrayidx, align 4, !dbg !20, !tbaa !16
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !10
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !10
+ %exitcond = icmp eq i32 %lftr.wideiv, %Length, !dbg !10
+ br i1 %exitcond, label %for.end.loopexit, label %for.body, !dbg !10, !llvm.loop !12
+
+for.end.loopexit: ; preds = %for.inc
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void, !dbg !24
+}
+
+attributes #0 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !"", i32 2}
+!1 = metadata !{metadata !"source.cpp", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"test_switch", metadata !"test_switch", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32*, i32)* @_Z11test_switchPii, null, null, metadata !2, i32 1}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null}
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
+!8 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5.0"}
+!10 = metadata !{i32 3, i32 8, metadata !11, null}
+!11 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 3, i32 0, i32 0}
+!12 = metadata !{metadata !12, metadata !13, metadata !13}
+!13 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
+!14 = metadata !{i32 4, i32 5, metadata !15, null}
+!15 = metadata !{i32 786443, metadata !1, metadata !11, i32 3, i32 36, i32 0, i32 1}
+!16 = metadata !{metadata !17, metadata !17, i64 0}
+!17 = metadata !{metadata !"int", metadata !18, i64 0}
+!18 = metadata !{metadata !"omnipotent char", metadata !19, i64 0}
+!19 = metadata !{metadata !"Simple C/C++ TBAA"}
+!20 = metadata !{i32 6, i32 7, metadata !21, null}
+!21 = metadata !{i32 786443, metadata !1, metadata !15, i32 4, i32 18, i32 0, i32 2}
+!22 = metadata !{i32 7, i32 5, metadata !21, null}
+!23 = metadata !{i32 9, i32 7, metadata !21, null}
+!24 = metadata !{i32 14, i32 1, metadata !4, null}
diff --git a/test/Transforms/LoopVectorize/ptr_loops.ll b/test/Transforms/LoopVectorize/ptr_loops.ll
index 15983f068556..1259e21ebf2e 100644
--- a/test/Transforms/LoopVectorize/ptr_loops.ll
+++ b/test/Transforms/LoopVectorize/ptr_loops.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s
+; RUN: opt < %s -basicaa -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/runtime-check-address-space.ll b/test/Transforms/LoopVectorize/runtime-check-address-space.ll
index 6c86561a1c7e..5bf7020a475a 100644
--- a/test/Transforms/LoopVectorize/runtime-check-address-space.ll
+++ b/test/Transforms/LoopVectorize/runtime-check-address-space.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -march=r600 -mcpu=cayman -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine < %s | FileCheck %s
+; RUN: opt -S -march=r600 -mcpu=cayman -basicaa -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine < %s | FileCheck %s
; Check vectorization that would ordinarily require a runtime bounds
; check on the pointers when mixing address spaces. For now we cannot
diff --git a/test/Transforms/LoopVectorize/runtime-check-readonly.ll b/test/Transforms/LoopVectorize/runtime-check-readonly.ll
index a2b9ad94c837..73b28301b7e7 100644
--- a/test/Transforms/LoopVectorize/runtime-check-readonly.ll
+++ b/test/Transforms/LoopVectorize/runtime-check-readonly.ll
@@ -5,13 +5,16 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK-LABEL: @add_ints(
;CHECK: br
+;CHECK: br
;CHECK: getelementptr
-;CHECK-NEXT: getelementptr
-;CHECK-NEXT: icmp uge
-;CHECK-NEXT: icmp uge
-;CHECK-NEXT: icmp uge
-;CHECK-NEXT: icmp uge
-;CHECK-NEXT: and
+;CHECK-DAG: getelementptr
+;CHECK-DAG: icmp uge
+;CHECK-DAG: icmp uge
+;CHECK-DAG: icmp uge
+;CHECK-DAG: icmp uge
+;CHECK-DAG: and
+;CHECK-DAG: and
+;CHECK: br
;CHECK: ret
define void @add_ints(i32* nocapture %A, i32* nocapture %B, i32* nocapture %C) {
entry:
diff --git a/test/Transforms/LoopVectorize/store-shuffle-bug.ll b/test/Transforms/LoopVectorize/store-shuffle-bug.ll
index 0ec8010756d1..26f4d156df61 100644
--- a/test/Transforms/LoopVectorize/store-shuffle-bug.ll
+++ b/test/Transforms/LoopVectorize/store-shuffle-bug.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine < %s | FileCheck %s
+; RUN: opt -S -basicaa -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -19,18 +19,13 @@ entry:
; CHECK-LABEL: @t(
; CHECK: vector.body:
-; CHECK: load <4 x i32>
-; CHECK: [[VAR1:%[a-zA-Z0-9]+]] = shufflevector
-; CHECK: load <4 x i32>
-; CHECK: [[VAR2:%[a-zA-Z0-9]+]] = shufflevector
+; CHECK: [[VAR1:%[a-zA-Z0-9.]+]] = load <4 x i32>
+; CHECK: [[VAR2:%[a-zA-Z0-9.]+]] = load <4 x i32>
; CHECK: [[VAR3:%[a-zA-Z0-9]+]] = add nsw <4 x i32> [[VAR2]], [[VAR1]]
-; CHECK: [[VAR4:%[a-zA-Z0-9]+]] = shufflevector <4 x i32> [[VAR3]]
-; CHECK: store <4 x i32> [[VAR4]]
-; CHECK: load <4 x i32>
-; CHECK: [[VAR5:%[a-zA-Z0-9]+]] = shufflevector
-; CHECK-NOT: add nsw <4 x i32> [[VAR4]], [[VAR5]]
-; CHECK-NOT: add nsw <4 x i32> [[VAR5]], [[VAR4]]
-; CHECK: add nsw <4 x i32> [[VAR3]], [[VAR5]]
+; CHECK: store <4 x i32> [[VAR3]]
+; CHECK: [[VAR4:%[a-zA-Z0-9.]+]] = load <4 x i32>
+; CHECK: add nsw <4 x i32> [[VAR3]], [[VAR4]]
+; CHECK-NOT: shufflevector
for.body:
%indvars.iv = phi i64 [ 93, %entry ], [ %indvars.iv.next, %for.body ]
diff --git a/test/Transforms/LoopVectorize/tbaa-nodep.ll b/test/Transforms/LoopVectorize/tbaa-nodep.ll
new file mode 100644
index 000000000000..f31b3072bc6c
--- /dev/null
+++ b/test/Transforms/LoopVectorize/tbaa-nodep.ll
@@ -0,0 +1,102 @@
+; RUN: opt < %s -tbaa -basicaa -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -simplifycfg -S | FileCheck %s
+; RUN: opt < %s -basicaa -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -simplifycfg -S | FileCheck %s --check-prefix=CHECK-NOTBAA
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: nounwind uwtable
+define i32 @test1(i32* nocapture %a, float* nocapture readonly %b) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %conv = fptosi float %0 to i32
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ store i32 %conv, i32* %arrayidx2, align 4, !tbaa !4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1600
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret i32 0
+
+; TBAA partitions the accesses in this loop, so it can be vectorized without
+; runtime checks.
+
+; CHECK-LABEL: @test1
+; CHECK: entry:
+; CHECK-NEXT: br label %vector.body
+; CHECK: vector.body:
+
+; CHECK: load <4 x float>* %{{.*}}, align 4, !tbaa
+; CHECK: store <4 x i32> %{{.*}}, <4 x i32>* %{{.*}}, align 4, !tbaa
+
+; CHECK: ret i32 0
+
+; CHECK-NOTBAA-LABEL: @test1
+; CHECK-NOTBAA: icmp uge i32*
+
+; CHECK-NOTBAA: load <4 x float>* %{{.*}}, align 4, !tbaa
+; CHECK-NOTBAA: store <4 x i32> %{{.*}}, <4 x i32>* %{{.*}}, align 4, !tbaa
+
+; CHECK-NOTBAA: ret i32 0
+}
+
+; Function Attrs: nounwind uwtable
+define i32 @test2(i32* nocapture readonly %a, float* nocapture readonly %b, float* nocapture %c) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4, !tbaa !4
+ %conv = sitofp i32 %1 to float
+ %mul = fmul float %0, %conv
+ %arrayidx4 = getelementptr inbounds float* %c, i64 %indvars.iv
+ store float %mul, float* %arrayidx4, align 4, !tbaa !0
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1600
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret i32 0
+
+; This test is like the first, except here there is still one runtime check
+; required. Without TBAA, however, two checks are required.
+
+; CHECK-LABEL: @test2
+; CHECK: icmp uge float*
+; CHECK: icmp uge float*
+; CHECK-NOT: icmp uge i32*
+
+; CHECK: load <4 x float>* %{{.*}}, align 4, !tbaa
+; CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 4, !tbaa
+
+; CHECK: ret i32 0
+
+; CHECK-NOTBAA-LABEL: @test2
+; CHECK-NOTBAA: icmp uge float*
+; CHECK-NOTBAA: icmp uge float*
+; CHECK-NOTBAA-DAG: icmp uge float*
+; CHECK-NOTBAA-DAG: icmp uge i32*
+
+; CHECK-NOTBAA: load <4 x float>* %{{.*}}, align 4, !tbaa
+; CHECK-NOTBAA: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 4, !tbaa
+
+; CHECK-NOTBAA: ret i32 0
+}
+
+attributes #0 = { nounwind uwtable }
+
+!0 = metadata !{metadata !1, metadata !1, i64 0}
+!1 = metadata !{metadata !"float", metadata !2, i64 0}
+!2 = metadata !{metadata !"omnipotent char", metadata !3, i64 0}
+!3 = metadata !{metadata !"Simple C/C++ TBAA"}
+!4 = metadata !{metadata !5, metadata !5, i64 0}
+!5 = metadata !{metadata !"int", metadata !2, i64 0}
+
diff --git a/test/Transforms/LoopVectorize/unroll_novec.ll b/test/Transforms/LoopVectorize/unroll_novec.ll
index 33f128da905d..89f4678526de 100644
--- a/test/Transforms/LoopVectorize/unroll_novec.ll
+++ b/test/Transforms/LoopVectorize/unroll_novec.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-unroll=2 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-target-num-scalar-regs=16 -force-target-max-scalar-unroll=8 -force-target-instruction-cost=1 -small-loop-cost=40 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -12,10 +12,20 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK-LABEL: @inc(
;CHECK: load i32*
;CHECK: load i32*
+;CHECK: load i32*
+;CHECK: load i32*
+;CHECK-NOT: load i32*
+;CHECK: add nsw i32
;CHECK: add nsw i32
;CHECK: add nsw i32
+;CHECK: add nsw i32
+;CHECK-NOT: add nsw i32
+;CHECK: store i32
+;CHECK: store i32
;CHECK: store i32
;CHECK: store i32
+;CHECK-NOT: store i32
+;CHECK: add i64 %{{.*}}, 4
;CHECK: ret void
define void @inc(i32 %n) nounwind uwtable noinline ssp {
%1 = icmp sgt i32 %n, 0
diff --git a/test/Transforms/LoopVectorize/value-ptr-bug.ll b/test/Transforms/LoopVectorize/value-ptr-bug.ll
index e8d37285f803..6b06afaf0de2 100644
--- a/test/Transforms/LoopVectorize/value-ptr-bug.ll
+++ b/test/Transforms/LoopVectorize/value-ptr-bug.ll
@@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; PR16073
-; Because we were caching value pointers accross a function call that could RAUW
+; Because we were caching value pointers across a function call that could RAUW
; we would generate an undefined value store below:
; SCEVExpander::expandCodeFor would change a value (the start value of an
; induction) that we cached in the induction variable list.
diff --git a/test/Transforms/LoopVectorize/vect.omp.persistence.ll b/test/Transforms/LoopVectorize/vect.omp.persistence.ll
new file mode 100644
index 000000000000..f6465677839e
--- /dev/null
+++ b/test/Transforms/LoopVectorize/vect.omp.persistence.ll
@@ -0,0 +1,88 @@
+; RUN: opt < %s -O2 -force-vector-unroll=2 -force-vector-width=4 -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; Loop from "rotated"
+; CHECK: LV: Loop hints: force=enabled
+; Loop from "nonrotated"
+; CHECK: LV: Loop hints: force=enabled
+; No more loops in the module
+; CHECK-NOT: LV: Loop hints: force=
+; In total only 1 loop should be rotated.
+; CHECK: 1 loop-rotate
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; See http://reviews.llvm.org/D3348 for details.
+
+;
+; Test #1
+;
+; Ensure that "llvm.loop.vectorize.enable" metadata was not lost prior to LoopVectorize pass.
+; In past LoopRotate was clearing that metadata.
+;
+; The source C code is:
+; void rotated(float *a, int size)
+; {
+; int t = 0;
+; #pragma omp simd
+; for (int i = 0; i < size; ++i) {
+; a[i] = a[i-5] * a[i+2];
+; ++t;
+; }
+;}
+
+define void @rotated(float* nocapture %a, i64 %size) {
+entry:
+ %cmp1 = icmp sgt i64 %size, 0
+ br i1 %cmp1, label %for.header, label %for.end
+
+for.header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %cmp2 = icmp sgt i64 %indvars.iv, %size
+ br i1 %cmp2, label %for.end, label %for.body
+
+for.body:
+
+ %0 = add nsw i64 %indvars.iv, -5
+ %arrayidx = getelementptr inbounds float* %a, i64 %0
+ %1 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %2 = add nsw i64 %indvars.iv, 2
+ %arrayidx2 = getelementptr inbounds float* %a, i64 %2
+ %3 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %mul = fmul float %1, %3
+ %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
+ store float %mul, float* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !1
+
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ br label %for.header, !llvm.loop !1
+
+for.end:
+ ret void
+}
+
+!1 = metadata !{metadata !1, metadata !2}
+!2 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
+
+;
+; Test #2
+;
+; Ensure that "llvm.loop.vectorize.enable" metadata was not lost even
+; if loop was not rotated (see http://reviews.llvm.org/D3348#comment-4).
+;
+define i32 @nonrotated(i32 %a) {
+entry:
+ br label %loop_cond
+loop_cond:
+ %indx = phi i32 [ 1, %entry ], [ %inc, %loop_inc ]
+ %cmp = icmp ne i32 %indx, %a
+ br i1 %cmp, label %return, label %loop_inc
+loop_inc:
+ %inc = add i32 %indx, 1
+ br label %loop_cond, !llvm.loop !3
+return:
+ ret i32 0
+}
+
+!3 = metadata !{metadata !3, metadata !4}
+!4 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
diff --git a/test/Transforms/LoopVectorize/vect.stats.ll b/test/Transforms/LoopVectorize/vect.stats.ll
new file mode 100644
index 000000000000..92ec24f726ee
--- /dev/null
+++ b/test/Transforms/LoopVectorize/vect.stats.ll
@@ -0,0 +1,65 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=4 -force-vector-width=4 -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+;
+; We have 2 loops, one of them is vectorizable and the second one is not.
+;
+
+; CHECK: 2 loop-vectorize - Number of loops analyzed for vectorization
+; CHECK: 1 loop-vectorize - Number of loops vectorized
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @vectorized(float* nocapture %a, i64 %size) {
+entry:
+ %cmp1 = icmp sgt i64 %size, 0
+ br i1 %cmp1, label %for.header, label %for.end
+
+for.header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %cmp2 = icmp sgt i64 %indvars.iv, %size
+ br i1 %cmp2, label %for.end, label %for.body
+
+for.body:
+
+ %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %mul = fmul float %0, %0
+ store float %mul, float* %arrayidx, align 4
+
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ br label %for.header
+
+for.end:
+ ret void
+}
+
+define void @not_vectorized(float* nocapture %a, i64 %size) {
+entry:
+ %cmp1 = icmp sgt i64 %size, 0
+ br i1 %cmp1, label %for.header, label %for.end
+
+for.header:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %cmp2 = icmp sgt i64 %indvars.iv, %size
+ br i1 %cmp2, label %for.end, label %for.body
+
+for.body:
+
+ %0 = add nsw i64 %indvars.iv, -5
+ %arrayidx = getelementptr inbounds float* %a, i64 %0
+ %1 = load float* %arrayidx, align 4
+ %2 = add nsw i64 %indvars.iv, 2
+ %arrayidx2 = getelementptr inbounds float* %a, i64 %2
+ %3 = load float* %arrayidx2, align 4
+ %mul = fmul float %1, %3
+ %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
+ store float %mul, float* %arrayidx4, align 4
+
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ br label %for.header
+
+for.end:
+ ret void
+} \ No newline at end of file
diff --git a/test/Transforms/LoopVectorize/vectorize-once.ll b/test/Transforms/LoopVectorize/vectorize-once.ll
index 780046930e1b..97654f495018 100644
--- a/test/Transforms/LoopVectorize/vectorize-once.ll
+++ b/test/Transforms/LoopVectorize/vectorize-once.ll
@@ -69,9 +69,9 @@ _ZSt10accumulateIPiiET0_T_S2_S1_.exit: ; preds = %for.body.i, %entry
attributes #0 = { nounwind readonly ssp uwtable "fp-contract-model"="standard" "no-frame-pointer-elim" "no-frame-pointer-elim-non-leaf" "realign-stack" "relocation-model"="pic" "ssp-buffers-size"="8" }
; CHECK: !0 = metadata !{metadata !0, metadata !1, metadata !2}
-; CHECK: !1 = metadata !{metadata !"llvm.vectorizer.width", i32 1}
-; CHECK: !2 = metadata !{metadata !"llvm.vectorizer.unroll", i32 1}
+; CHECK: !1 = metadata !{metadata !"llvm.loop.vectorize.width", i32 1}
+; CHECK: !2 = metadata !{metadata !"llvm.loop.interleave.count", i32 1}
; CHECK: !3 = metadata !{metadata !3, metadata !1, metadata !2}
!0 = metadata !{metadata !0, metadata !1}
-!1 = metadata !{metadata !"llvm.vectorizer.width", i32 1}
+!1 = metadata !{metadata !"llvm.loop.vectorize.width", i32 1}
diff --git a/test/Transforms/LoopVectorize/version-mem-access.ll b/test/Transforms/LoopVectorize/version-mem-access.ll
new file mode 100644
index 000000000000..51d20e227ddf
--- /dev/null
+++ b/test/Transforms/LoopVectorize/version-mem-access.ll
@@ -0,0 +1,87 @@
+; RUN: opt -basicaa -loop-vectorize -enable-mem-access-versioning -force-vector-width=2 -force-vector-unroll=1 < %s -S | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+; CHECK-LABEL: test
+define void @test(i32* noalias %A, i64 %AStride,
+ i32* noalias %B, i32 %BStride,
+ i32* noalias %C, i64 %CStride, i32 %N) {
+entry:
+ %cmp13 = icmp eq i32 %N, 0
+ br i1 %cmp13, label %for.end, label %for.body.preheader
+
+; CHECK-DAG: icmp ne i64 %AStride, 1
+; CHECK-DAG: icmp ne i32 %BStride, 1
+; CHECK-DAG: icmp ne i64 %CStride, 1
+; CHECK: or
+; CHECK: or
+; CHECK: br
+
+; CHECK: vector.body
+; CHECK: load <2 x i32>
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %iv.trunc = trunc i64 %indvars.iv to i32
+ %mul = mul i32 %iv.trunc, %BStride
+ %mul64 = zext i32 %mul to i64
+ %arrayidx = getelementptr inbounds i32* %B, i64 %mul64
+ %0 = load i32* %arrayidx, align 4
+ %mul2 = mul nsw i64 %indvars.iv, %CStride
+ %arrayidx3 = getelementptr inbounds i32* %C, i64 %mul2
+ %1 = load i32* %arrayidx3, align 4
+ %mul4 = mul nsw i32 %1, %0
+ %mul3 = mul nsw i64 %indvars.iv, %AStride
+ %arrayidx7 = getelementptr inbounds i32* %A, i64 %mul3
+ store i32 %mul4, i32* %arrayidx7, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %N
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+; We used to crash on this function because we removed the fptosi cast when
+; replacing the symbolic stride '%conv'.
+; PR18480
+
+; CHECK-LABEL: fn1
+; CHECK: load <2 x double>
+
+define void @fn1(double* noalias %x, double* noalias %c, double %a) {
+entry:
+ %conv = fptosi double %a to i32
+ %cmp8 = icmp sgt i32 %conv, 0
+ br i1 %cmp8, label %for.body.preheader, label %for.end
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %0 = trunc i64 %indvars.iv to i32
+ %mul = mul nsw i32 %0, %conv
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds double* %x, i64 %idxprom
+ %1 = load double* %arrayidx, align 8
+ %arrayidx3 = getelementptr inbounds double* %c, i64 %indvars.iv
+ store double %1, double* %arrayidx3, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %conv
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LowerAtomic/atomic-swap.ll b/test/Transforms/LowerAtomic/atomic-swap.ll
index 4331677764ba..cb1124136f86 100644
--- a/test/Transforms/LowerAtomic/atomic-swap.ll
+++ b/test/Transforms/LowerAtomic/atomic-swap.ll
@@ -3,15 +3,20 @@
define i8 @cmpswap() {
; CHECK-LABEL: @cmpswap(
%i = alloca i8
- %j = cmpxchg i8* %i, i8 0, i8 42 monotonic
-; CHECK: [[INST:%[a-z0-9]+]] = load
-; CHECK-NEXT: icmp
-; CHECK-NEXT: select
-; CHECK-NEXT: store
+ %pair = cmpxchg i8* %i, i8 0, i8 42 monotonic monotonic
+ %j = extractvalue { i8, i1 } %pair, 0
+; CHECK: [[OLDVAL:%[a-z0-9]+]] = load i8* [[ADDR:%[a-z0-9]+]]
+; CHECK-NEXT: [[SAME:%[a-z0-9]+]] = icmp eq i8 [[OLDVAL]], 0
+; CHECK-NEXT: [[TO_STORE:%[a-z0-9]+]] = select i1 [[SAME]], i8 42, i8 [[OLDVAL]]
+; CHECK-NEXT: store i8 [[TO_STORE]], i8* [[ADDR]]
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = insertvalue { i8, i1 } undef, i8 [[OLDVAL]], 0
+; CHECK-NEXT: [[RES:%[a-z0-9]+]] = insertvalue { i8, i1 } [[TMP]], i1 [[SAME]], 1
+; CHECK-NEXT: [[VAL:%[a-z0-9]+]] = extractvalue { i8, i1 } [[RES]], 0
ret i8 %j
-; CHECK: ret i8 [[INST]]
+; CHECK: ret i8 [[VAL]]
}
+
define i8 @swap() {
; CHECK-LABEL: @swap(
%i = alloca i8
diff --git a/test/Transforms/LowerExpectIntrinsic/basic.ll b/test/Transforms/LowerExpectIntrinsic/basic.ll
index 955209af14a6..e184cb05b94a 100644
--- a/test/Transforms/LowerExpectIntrinsic/basic.ll
+++ b/test/Transforms/LowerExpectIntrinsic/basic.ll
@@ -245,6 +245,35 @@ return: ; preds = %if.end, %if.then
declare i32 @llvm.expect.i32(i32, i32) nounwind readnone
+; CHECK-LABEL: @test9(
+define i32 @test9(i32 %x) nounwind uwtable ssp {
+entry:
+ %retval = alloca i32, align 4
+ %x.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ %tmp = load i32* %x.addr, align 4
+ %cmp = icmp sgt i32 %tmp, 1
+ %expval = call i1 @llvm.expect.i1(i1 %cmp, i1 1)
+; CHECK: !prof !0
+; CHECK-NOT: @llvm.expect
+ br i1 %expval, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ %call = call i32 (...)* @f()
+ store i32 %call, i32* %retval
+ br label %return
+
+if.end: ; preds = %entry
+ store i32 1, i32* %retval
+ br label %return
+
+return: ; preds = %if.end, %if.then
+ %0 = load i32* %retval
+ ret i32 %0
+}
+
+declare i1 @llvm.expect.i1(i1, i1) nounwind readnone
+
; CHECK: !0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
; CHECK: !1 = metadata !{metadata !"branch_weights", i32 4, i32 64}
; CHECK: !2 = metadata !{metadata !"branch_weights", i32 4, i32 64, i32 4}
diff --git a/test/Transforms/LowerInvoke/2004-02-29-PHICrash.ll b/test/Transforms/LowerInvoke/2004-02-29-PHICrash.ll
deleted file mode 100644
index bddb70248ed8..000000000000
--- a/test/Transforms/LowerInvoke/2004-02-29-PHICrash.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: opt < %s -lowerinvoke -enable-correct-eh-support -disable-output
-
-define void @_ZNKSt11__use_cacheISt16__numpunct_cacheIcEEclERKSt6locale() {
-entry:
- br i1 false, label %then, label %UnifiedReturnBlock
-then: ; preds = %entry
- invoke void @_Znwj( )
- to label %UnifiedReturnBlock unwind label %UnifiedReturnBlock
-UnifiedReturnBlock: ; preds = %then, %then, %entry
- %UnifiedRetVal = phi i32* [ null, %entry ], [ null, %then ], [ null, %then ] ; <i32*> [#uses=0]
- ret void
-}
-
-declare void @_Znwj()
-
diff --git a/test/Transforms/LowerInvoke/2005-08-03-InvokeWithPHI.ll b/test/Transforms/LowerInvoke/2005-08-03-InvokeWithPHI.ll
deleted file mode 100644
index 1057ad7057cc..000000000000
--- a/test/Transforms/LowerInvoke/2005-08-03-InvokeWithPHI.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: opt < %s -lowerinvoke -enable-correct-eh-support -disable-output
-
-declare void @ll_listnext__listiterPtr()
-
-define void @WorkTask.fn() {
-block0:
- invoke void @ll_listnext__listiterPtr( )
- to label %block9 unwind label %block8_exception_handling
-block8_exception_handling: ; preds = %block0
- ret void
-block9: ; preds = %block0
- %w_2690 = phi { i32, i32 }* [ null, %block0 ] ; <{ i32, i32 }*> [#uses=1]
- %tmp.129 = getelementptr { i32, i32 }* %w_2690, i32 0, i32 1 ; <i32*> [#uses=1]
- %v2769 = load i32* %tmp.129 ; <i32> [#uses=0]
- ret void
-}
-
diff --git a/test/Transforms/LowerInvoke/2005-08-03-InvokeWithPHIUse.ll b/test/Transforms/LowerInvoke/2005-08-03-InvokeWithPHIUse.ll
deleted file mode 100644
index 940204649c74..000000000000
--- a/test/Transforms/LowerInvoke/2005-08-03-InvokeWithPHIUse.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: opt < %s -lowerinvoke -enable-correct-eh-support -disable-output
-
-declare fastcc i32 @ll_listnext__listiterPtr()
-
-define fastcc i32 @WorkTask.fn() {
-block0:
- %v2679 = invoke fastcc i32 @ll_listnext__listiterPtr( )
- to label %block9 unwind label %block8_exception_handling ; <i32> [#uses=1]
-block8_exception_handling: ; preds = %block0
- ret i32 0
-block9: ; preds = %block0
- %i_2689 = phi i32 [ %v2679, %block0 ] ; <i32> [#uses=1]
- ret i32 %i_2689
-}
-
diff --git a/test/Transforms/LowerInvoke/2008-02-14-CritEdgePhiCrash.ll b/test/Transforms/LowerInvoke/2008-02-14-CritEdgePhiCrash.ll
deleted file mode 100644
index b46ccfbb79a8..000000000000
--- a/test/Transforms/LowerInvoke/2008-02-14-CritEdgePhiCrash.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: opt < %s -lowerinvoke -enable-correct-eh-support -disable-output
-; PR2029
-define i32 @main(i32 %argc, i8** %argv) {
-bb470:
- invoke i32 @main(i32 0, i8** null) to label %invcont474 unwind label
-%lpad902
-
-invcont474: ; preds = %bb470
- ret i32 0
-
-lpad902: ; preds = %bb470
- %tmp471.lcssa = phi i8* [ null, %bb470 ] ; <i8*>
- ret i32 0
-}
diff --git a/test/Transforms/LowerInvoke/basictest.ll b/test/Transforms/LowerInvoke/basictest.ll
deleted file mode 100644
index f0ca5f425311..000000000000
--- a/test/Transforms/LowerInvoke/basictest.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: opt < %s -lowerinvoke -disable-output -enable-correct-eh-support
-
-
-define i32 @foo() {
- invoke i32 @foo( )
- to label %Ok unwind label %Crap ; <i32>:1 [#uses=0]
-Ok: ; preds = %0
- invoke i32 @foo( )
- to label %Ok2 unwind label %Crap ; <i32>:2 [#uses=0]
-Ok2: ; preds = %Ok
- ret i32 2
-Crap: ; preds = %Ok, %0
- ret i32 1
-}
-
-define i32 @bar(i32 %blah) {
- br label %doit
-doit: ; preds = %0
- ;; Value live across an unwind edge.
- %B2 = add i32 %blah, 1 ; <i32> [#uses=1]
- invoke i32 @foo( )
- to label %Ok unwind label %Crap ; <i32>:1 [#uses=0]
-Ok: ; preds = %doit
- invoke i32 @foo( )
- to label %Ok2 unwind label %Crap ; <i32>:2 [#uses=0]
-Ok2: ; preds = %Ok
- ret i32 2
-Crap: ; preds = %Ok, %doit
- ret i32 %B2
-}
diff --git a/test/Transforms/LowerInvoke/lowerinvoke.ll b/test/Transforms/LowerInvoke/lowerinvoke.ll
new file mode 100644
index 000000000000..05c19be7a9ac
--- /dev/null
+++ b/test/Transforms/LowerInvoke/lowerinvoke.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -lowerinvoke -S | FileCheck %s
+
+declare i32 @external_func(i64 %arg)
+
+define i32 @invoke_test(i64 %arg) {
+entry:
+ %result = invoke fastcc i32 @external_func(i64 inreg %arg)
+ to label %cont unwind label %lpad
+cont:
+ ret i32 %result
+lpad:
+ %phi = phi i32 [ 99, %entry ]
+ %lp = landingpad { i8*, i32 } personality i8* null cleanup
+ ret i32 %phi
+}
+
+; The "invoke" should be converted to a "call".
+; CHECK-LABEL: define i32 @invoke_test
+; CHECK: %result = call fastcc i32 @external_func(i64 inreg %arg)
+; CHECK-NEXT: br label %cont
+
+; Note that this pass does not remove dead landingpad blocks.
+; CHECK: lpad:
+; CHECK-NOT: phi
+; CHECK: landingpad
diff --git a/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll b/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll
new file mode 100644
index 000000000000..3673c04a8b81
--- /dev/null
+++ b/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -lowerswitch -S | FileCheck %s
+; CHECK-NOT: icmp eq i32 %0, 1
+
+define i32 @foo(i32 %a) #0 {
+entry:
+ %retval = alloca i32, align 4
+ %a.addr = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ %0 = load i32* %a.addr, align 4
+ switch i32 %0, label %sw.default [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ ]
+
+sw.bb:
+ ret i32 12
+
+sw.bb1:
+ ret i32 4
+
+sw.bb2:
+ ret i32 2
+
+sw.default:
+ ret i32 9
+}
diff --git a/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll b/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll
new file mode 100644
index 000000000000..0f737211f598
--- /dev/null
+++ b/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -lowerswitch -S | FileCheck %s
+; CHECK-NOT: {{.*}}icmp eq{{.*}}
+;
+;int foo(int a) {
+;
+; switch (a) {
+; case 0:
+; return 10;
+; case 1:
+; return 3;
+; default:
+; __builtin_unreachable();
+; }
+;
+;}
+
+define i32 @foo(i32 %a) nounwind ssp uwtable {
+ %1 = alloca i32, align 4
+ %2 = alloca i32, align 4
+ store i32 %a, i32* %2, align 4
+ %3 = load i32* %2, align 4
+ switch i32 %3, label %6 [
+ i32 0, label %4
+ i32 1, label %5
+ ]
+
+; <label>:4
+ store i32 10, i32* %1
+ br label %7
+
+; <label>:5
+ store i32 3, i32* %1
+ br label %7
+
+; <label>:6
+ unreachable
+
+; <label>:7
+ %8 = load i32* %1
+ ret i32 %8
+}
diff --git a/test/Transforms/LowerSwitch/2014-06-23-PHIlowering.ll b/test/Transforms/LowerSwitch/2014-06-23-PHIlowering.ll
new file mode 100644
index 000000000000..c6cddf636a88
--- /dev/null
+++ b/test/Transforms/LowerSwitch/2014-06-23-PHIlowering.ll
@@ -0,0 +1,40 @@
+; RUN: opt < %s -lowerswitch -S | FileCheck %s
+
+define i32 @test(i32 %arg) #0 {
+; CHECK-LABEL: @test
+; CHECK: ; <label>:2
+; CHECK-NEXT: %res.0 = phi i32 [ 1, %NodeBlock ], [ 2, %1 ]
+; CHECK-NEXT: br label %3
+; CHECK: ; <label>:5
+; CHECK-NEXT: %res.3 = phi i32 [ 0, %NewDefault ], [ %res.2, %4 ]
+; CHECK-NEXT: %6 = add nsw i32 %res.3, 1
+; CHECK-NEXT: ret i32 %6
+
+ switch i32 %arg, label %5 [
+ i32 1, label %1
+ i32 2, label %2
+ i32 3, label %3
+ i32 4, label %4
+ ]
+
+; <label>:1
+ br label %2
+
+; <label>:2
+ %res.0 = phi i32 [ 1, %0 ], [ 2, %1 ]
+ br label %3
+
+; <label>:3
+ %res.1 = phi i32 [ 0, %0 ], [ %res.0, %2 ]
+ %phitmp = add nsw i32 %res.1, 2
+ br label %4
+
+; <label>:4
+ %res.2 = phi i32 [ 1, %0 ], [ %phitmp, %3 ]
+ br label %5
+
+; <label>:5
+ %res.3 = phi i32 [ 0, %0 ], [ %res.2, %4 ]
+ %6 = add nsw i32 %res.3, 1
+ ret i32 %6
+}
diff --git a/test/Transforms/LowerSwitch/feature.ll b/test/Transforms/LowerSwitch/feature.ll
index e85f03ee5c78..09d25f0b06d4 100644
--- a/test/Transforms/LowerSwitch/feature.ll
+++ b/test/Transforms/LowerSwitch/feature.ll
@@ -3,93 +3,57 @@
; We have switch on input.
; On output we should got binary comparison tree. Check that all is fine.
-;CHECK: entry:
-;CHECK-NEXT: br label %NodeBlock37
+;CHECK: entry:
+;CHECK-NEXT: br label %NodeBlock19
-;CHECK: NodeBlock37: ; preds = %entry
-;CHECK-NEXT: %Pivot38 = icmp slt i32 %tmp158, 10
-;CHECK-NEXT: br i1 %Pivot38, label %NodeBlock13, label %NodeBlock35
+;CHECK: NodeBlock19: ; preds = %entry
+;CHECK-NEXT: %Pivot20 = icmp slt i32 %tmp158, 10
+;CHECK-NEXT: br i1 %Pivot20, label %NodeBlock5, label %NodeBlock17
-;CHECK: NodeBlock35: ; preds = %NodeBlock37
-;CHECK-NEXT: %Pivot36 = icmp slt i32 %tmp158, 13
-;CHECK-NEXT: br i1 %Pivot36, label %NodeBlock23, label %NodeBlock33
+;CHECK: NodeBlock17: ; preds = %NodeBlock19
+;CHECK-NEXT: %Pivot18 = icmp slt i32 %tmp158, 13
+;CHECK-NEXT: br i1 %Pivot18, label %NodeBlock9, label %NodeBlock15
-;CHECK: NodeBlock33: ; preds = %NodeBlock35
-;CHECK-NEXT: %Pivot34 = icmp slt i32 %tmp158, 14
-;CHECK-NEXT: br i1 %Pivot34, label %LeafBlock25, label %NodeBlock31
+;CHECK: NodeBlock15: ; preds = %NodeBlock17
+;CHECK-NEXT: %Pivot16 = icmp slt i32 %tmp158, 14
+;CHECK-NEXT: br i1 %Pivot16, label %bb330, label %NodeBlock13
-;CHECK: NodeBlock31: ; preds = %NodeBlock33
-;CHECK-NEXT: %Pivot32 = icmp slt i32 %tmp158, 15
-;CHECK-NEXT: br i1 %Pivot32, label %LeafBlock27, label %LeafBlock29
+;CHECK: NodeBlock13: ; preds = %NodeBlock15
+;CHECK-NEXT: %Pivot14 = icmp slt i32 %tmp158, 15
+;CHECK-NEXT: br i1 %Pivot14, label %bb332, label %LeafBlock11
-;CHECK: LeafBlock29: ; preds = %NodeBlock31
-;CHECK-NEXT: %SwitchLeaf30 = icmp eq i32 %tmp158, 15
-;CHECK-NEXT: br i1 %SwitchLeaf30, label %bb334, label %NewDefault
+;CHECK: LeafBlock11: ; preds = %NodeBlock13
+;CHECK-NEXT: %SwitchLeaf12 = icmp eq i32 %tmp158, 15
+;CHECK-NEXT: br i1 %SwitchLeaf12, label %bb334, label %NewDefault
-;CHECK: LeafBlock27: ; preds = %NodeBlock31
-;CHECK-NEXT: %SwitchLeaf28 = icmp eq i32 %tmp158, 14
-;CHECK-NEXT: br i1 %SwitchLeaf28, label %bb332, label %NewDefault
+;CHECK: NodeBlock9: ; preds = %NodeBlock17
+;CHECK-NEXT: %Pivot10 = icmp slt i32 %tmp158, 11
+;CHECK-NEXT: br i1 %Pivot10, label %bb324, label %NodeBlock7
-;CHECK: LeafBlock25: ; preds = %NodeBlock33
-;CHECK-NEXT: %SwitchLeaf26 = icmp eq i32 %tmp158, 13
-;CHECK-NEXT: br i1 %SwitchLeaf26, label %bb330, label %NewDefault
+;CHECK: NodeBlock7: ; preds = %NodeBlock9
+;CHECK-NEXT: %Pivot8 = icmp slt i32 %tmp158, 12
+;CHECK-NEXT: br i1 %Pivot8, label %bb326, label %bb328
-;CHECK: NodeBlock23: ; preds = %NodeBlock35
-;CHECK-NEXT: %Pivot24 = icmp slt i32 %tmp158, 11
-;CHECK-NEXT: br i1 %Pivot24, label %LeafBlock15, label %NodeBlock21
+;CHECK: NodeBlock5: ; preds = %NodeBlock19
+;CHECK-NEXT: %Pivot6 = icmp slt i32 %tmp158, 7
+;CHECK-NEXT: br i1 %Pivot6, label %NodeBlock, label %NodeBlock3
-;CHECK: NodeBlock21: ; preds = %NodeBlock23
-;CHECK-NEXT: %Pivot22 = icmp slt i32 %tmp158, 12
-;CHECK-NEXT: br i1 %Pivot22, label %LeafBlock17, label %LeafBlock19
+;CHECK: NodeBlock3: ; preds = %NodeBlock5
+;CHECK-NEXT: %Pivot4 = icmp slt i32 %tmp158, 8
+;CHECK-NEXT: br i1 %Pivot4, label %bb, label %NodeBlock1
-;CHECK: LeafBlock19: ; preds = %NodeBlock21
-;CHECK-NEXT: %SwitchLeaf20 = icmp eq i32 %tmp158, 12
-;CHECK-NEXT: br i1 %SwitchLeaf20, label %bb328, label %NewDefault
+;CHECK: NodeBlock1: ; preds = %NodeBlock3
+;CHECK-NEXT: %Pivot2 = icmp slt i32 %tmp158, 9
+;CHECK-NEXT: br i1 %Pivot2, label %bb338, label %bb322
-;CHECK: LeafBlock17: ; preds = %NodeBlock21
-;CHECK-NEXT: %SwitchLeaf18 = icmp eq i32 %tmp158, 11
-;CHECK-NEXT: br i1 %SwitchLeaf18, label %bb326, label %NewDefault
+;CHECK: NodeBlock: ; preds = %NodeBlock5
+;CHECK-NEXT: %Pivot = icmp slt i32 %tmp158, 0
+;CHECK-NEXT: br i1 %Pivot, label %LeafBlock, label %bb338
-;CHECK: LeafBlock15: ; preds = %NodeBlock23
-;CHECK-NEXT: %SwitchLeaf16 = icmp eq i32 %tmp158, 10
-;CHECK-NEXT: br i1 %SwitchLeaf16, label %bb324, label %NewDefault
-
-;CHECK: NodeBlock13: ; preds = %NodeBlock37
-;CHECK-NEXT: %Pivot14 = icmp slt i32 %tmp158, 7
-;CHECK-NEXT: br i1 %Pivot14, label %NodeBlock, label %NodeBlock11
-
-;CHECK: NodeBlock11: ; preds = %NodeBlock13
-;CHECK-NEXT: %Pivot12 = icmp slt i32 %tmp158, 8
-;CHECK-NEXT: br i1 %Pivot12, label %LeafBlock3, label %NodeBlock9
-
-;CHECK: NodeBlock9: ; preds = %NodeBlock11
-;CHECK-NEXT: %Pivot10 = icmp slt i32 %tmp158, 9
-;CHECK-NEXT: br i1 %Pivot10, label %LeafBlock5, label %LeafBlock7
-
-;CHECK: LeafBlock7: ; preds = %NodeBlock9
-;CHECK-NEXT: %SwitchLeaf8 = icmp eq i32 %tmp158, 9
-;CHECK-NEXT: br i1 %SwitchLeaf8, label %bb322, label %NewDefault
-
-;CHECK: LeafBlock5: ; preds = %NodeBlock9
-;CHECK-NEXT: %SwitchLeaf6 = icmp eq i32 %tmp158, 8
-;CHECK-NEXT: br i1 %SwitchLeaf6, label %bb338, label %NewDefault
-
-;CHECK: LeafBlock3: ; preds = %NodeBlock11
-;CHECK-NEXT: %SwitchLeaf4 = icmp eq i32 %tmp158, 7
-;CHECK-NEXT: br i1 %SwitchLeaf4, label %bb, label %NewDefault
-
-;CHECK: NodeBlock: ; preds = %NodeBlock13
-;CHECK-NEXT: %Pivot = icmp slt i32 %tmp158, 0
-;CHECK-NEXT: br i1 %Pivot, label %LeafBlock, label %LeafBlock1
-
-;CHECK: LeafBlock1: ; preds = %NodeBlock
-;CHECK-NEXT: %SwitchLeaf2 = icmp ule i32 %tmp158, 6
-;CHECK-NEXT: br i1 %SwitchLeaf2, label %bb338, label %NewDefault
-
-;CHECK: LeafBlock: ; preds = %NodeBlock
-;CHECK-NEXT: %tmp158.off = add i32 %tmp158, 6
-;CHECK-NEXT: %SwitchLeaf = icmp ule i32 %tmp158.off, 4
-;CHECK-NEXT: br i1 %SwitchLeaf, label %bb338, label %NewDefault
+;CHECK: LeafBlock: ; preds = %NodeBlock
+;CHECK-NEXT: %tmp158.off = add i32 %tmp158, 6
+;CHECK-NEXT: %SwitchLeaf = icmp ule i32 %tmp158.off, 4
+;CHECK-NEXT: br i1 %SwitchLeaf, label %bb338, label %NewDefault
define i32 @main(i32 %tmp158) {
entry:
diff --git a/test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll b/test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll
index d124be5f9029..00ac34d93e79 100644
--- a/test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll
+++ b/test/Transforms/MemCpyOpt/2008-02-24-MultipleUseofSRet.ll
@@ -6,7 +6,7 @@ target triple = "i386-pc-linux-gnu"
%0 = type { x86_fp80, x86_fp80 }
-define internal fastcc void @initialize(%0* noalias sret %agg.result) nounwind {
+define internal fastcc void @initialize(%0* noalias nocapture sret %agg.result) nounwind {
entry:
%agg.result.03 = getelementptr %0* %agg.result, i32 0, i32 0
store x86_fp80 0xK00000000000000000000, x86_fp80* %agg.result.03
@@ -15,7 +15,7 @@ entry:
ret void
}
-declare fastcc x86_fp80 @passed_uninitialized(%0*) nounwind
+declare fastcc x86_fp80 @passed_uninitialized(%0* nocapture) nounwind
define fastcc void @badly_optimized() nounwind {
entry:
diff --git a/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll b/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
index 597b69dee3d4..6982c8bf2f85 100644
--- a/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
+++ b/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
@@ -4,7 +4,7 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
%a = type { i32 }
%b = type { float }
-declare void @g(%a*)
+declare void @g(%a* nocapture)
define float @f() {
entry:
diff --git a/test/Transforms/MemCpyOpt/capturing-func.ll b/test/Transforms/MemCpyOpt/capturing-func.ll
new file mode 100644
index 000000000000..17614fd181d4
--- /dev/null
+++ b/test/Transforms/MemCpyOpt/capturing-func.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -basicaa -memcpyopt -S | FileCheck %s
+
+target datalayout = "e"
+
+declare void @foo(i8*)
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+
+define void @test() {
+ %ptr1 = alloca i8
+ %ptr2 = alloca i8
+ call void @foo(i8* %ptr2)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr1, i8* %ptr2, i32 1, i32 1, i1 false)
+ call void @foo(i8* %ptr1)
+ ret void
+
+ ; Check that the transformation isn't applied if the called function can
+ ; capture the pointer argument (i.e. the nocapture attribute isn't present)
+ ; CHECK-LABEL: @test(
+ ; CHECK: call void @foo(i8* %ptr2)
+ ; CHECK-NEXT: call void @llvm.memcpy
+ ; CHECK-NEXT: call void @foo(i8* %ptr1)
+}
diff --git a/test/Transforms/MemCpyOpt/form-memset.ll b/test/Transforms/MemCpyOpt/form-memset.ll
index 7c7b4fc08809..d980b7fdbd69 100644
--- a/test/Transforms/MemCpyOpt/form-memset.ll
+++ b/test/Transforms/MemCpyOpt/form-memset.ll
@@ -272,3 +272,15 @@ define void @test9() nounwind {
; CHECK-LABEL: @test9(
; CHECK: call void @llvm.memset.p0i8.i64(i8* bitcast ([16 x i64]* @test9buf to i8*), i8 -1, i64 16, i32 16, i1 false)
}
+
+; PR19092
+define void @test10(i8* nocapture %P) nounwind {
+ tail call void @llvm.memset.p0i8.i64(i8* %P, i8 0, i64 42, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %P, i8 0, i64 23, i32 1, i1 false)
+ ret void
+; CHECK-LABEL: @test10(
+; CHECK-NOT: memset
+; CHECK: call void @llvm.memset.p0i8.i64(i8* %P, i8 0, i64 42, i32 1, i1 false)
+; CHECK-NOT: memset
+; CHECK: ret void
+}
diff --git a/test/Transforms/MemCpyOpt/loadstore-sret.ll b/test/Transforms/MemCpyOpt/loadstore-sret.ll
index 89eabca21bf9..d4a700d03115 100644
--- a/test/Transforms/MemCpyOpt/loadstore-sret.ll
+++ b/test/Transforms/MemCpyOpt/loadstore-sret.ll
@@ -22,4 +22,4 @@ _ZNSt8auto_ptrIiED1Ev.exit:
ret void
}
-declare void @_Z3barv(%"class.std::auto_ptr"* sret)
+declare void @_Z3barv(%"class.std::auto_ptr"* nocapture sret)
diff --git a/test/Transforms/MemCpyOpt/memcpy-undef.ll b/test/Transforms/MemCpyOpt/memcpy-undef.ll
new file mode 100644
index 000000000000..663b8dcfd3df
--- /dev/null
+++ b/test/Transforms/MemCpyOpt/memcpy-undef.ll
@@ -0,0 +1,46 @@
+; RUN: opt < %s -basicaa -memcpyopt -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+%struct.foo = type { i8, [7 x i8], i32 }
+
+define i32 @test1(%struct.foo* nocapture %foobie) nounwind noinline ssp uwtable {
+ %bletch.sroa.1 = alloca [7 x i8], align 1
+ %1 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 0
+ store i8 98, i8* %1, align 4
+ %2 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 1, i64 0
+ %3 = getelementptr inbounds [7 x i8]* %bletch.sroa.1, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 7, i32 1, i1 false)
+ %4 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 2
+ store i32 20, i32* %4, align 4
+ ret i32 undef
+
+; Check that the memcpy is removed.
+; CHECK-LABEL: @test1(
+; CHECK-NOT: call void @llvm.memcpy
+}
+
+define void @test2(i8* sret noalias nocapture %out, i8* %in) nounwind noinline ssp uwtable {
+ call void @llvm.lifetime.start(i64 8, i8* %in)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 8, i32 1, i1 false)
+ ret void
+
+; Check that the memcpy is removed.
+; CHECK-LABEL: @test2(
+; CHECK-NOT: call void @llvm.memcpy
+}
+
+define void @test3(i8* sret noalias nocapture %out, i8* %in) nounwind noinline ssp uwtable {
+ call void @llvm.lifetime.start(i64 4, i8* %in)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 8, i32 1, i1 false)
+ ret void
+
+; Check that the memcpy is not removed.
+; CHECK-LABEL: @test3(
+; CHECK: call void @llvm.memcpy
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+
+declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
diff --git a/test/Transforms/MemCpyOpt/memcpy.ll b/test/Transforms/MemCpyOpt/memcpy.ll
index 2417cd11f7ff..ee04f1951164 100644
--- a/test/Transforms/MemCpyOpt/memcpy.ll
+++ b/test/Transforms/MemCpyOpt/memcpy.ll
@@ -29,7 +29,7 @@ entry:
; CHECK: ret void
}
-declare void @ccoshl(%0* sret , x86_fp80, x86_fp80) nounwind
+declare void @ccoshl(%0* nocapture sret, x86_fp80, x86_fp80) nounwind
; The intermediate alloca and one of the memcpy's should be eliminated, the
@@ -78,6 +78,7 @@ define void @test4(i8 *%P) {
declare void @test4a(i8* align 1 byval)
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
%struct.S = type { i128, [4 x i8]}
@@ -152,6 +153,22 @@ declare noalias i8* @malloc(i32)
; rdar://11341081
%struct.big = type { [50 x i32] }
+define void @test9_addrspacecast() nounwind ssp uwtable {
+entry:
+; CHECK-LABEL: @test9_addrspacecast(
+; CHECK: f1
+; CHECK-NOT: memcpy
+; CHECK: f2
+ %b = alloca %struct.big, align 4
+ %tmp = alloca %struct.big, align 4
+ call void @f1(%struct.big* sret %tmp)
+ %0 = addrspacecast %struct.big* %b to i8 addrspace(1)*
+ %1 = addrspacecast %struct.big* %tmp to i8 addrspace(1)*
+ call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %0, i8 addrspace(1)* %1, i64 200, i32 4, i1 false)
+ call void @f2(%struct.big* %b)
+ ret void
+}
+
define void @test9() nounwind ssp uwtable {
entry:
; CHECK: test9
@@ -185,7 +202,7 @@ define void @test10(%opaque* noalias nocapture sret %x, i32 %y) {
ret void
}
-declare void @f1(%struct.big* sret)
+declare void @f1(%struct.big* nocapture sret)
declare void @f2(%struct.big*)
; CHECK: attributes [[NUW]] = { nounwind }
diff --git a/test/Transforms/MemCpyOpt/sret.ll b/test/Transforms/MemCpyOpt/sret.ll
index 1bbb5fe8651b..bfe5e0fbb993 100644
--- a/test/Transforms/MemCpyOpt/sret.ll
+++ b/test/Transforms/MemCpyOpt/sret.ll
@@ -25,6 +25,6 @@ entry:
ret void
}
-declare void @ccoshl(%0* noalias sret, %0* byval) nounwind
+declare void @ccoshl(%0* noalias nocapture sret, %0* byval) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll b/test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll
new file mode 100644
index 000000000000..9878b471500e
--- /dev/null
+++ b/test/Transforms/MergeFunc/call-and-invoke-with-ranges.ll
@@ -0,0 +1,91 @@
+; RUN: opt -mergefunc -S < %s | FileCheck %s
+
+define i8 @call_with_range() {
+ bitcast i8 0 to i8 ; dummy to make the function large enough
+ %out = call i8 @dummy(), !range !0
+ ret i8 %out
+}
+
+define i8 @call_no_range() {
+; CHECK-LABEL: @call_no_range
+; CHECK-NEXT: bitcast i8 0 to i8
+; CHECK-NEXT: %out = call i8 @dummy()
+; CHECK-NEXT: ret i8 %out
+ bitcast i8 0 to i8
+ %out = call i8 @dummy()
+ ret i8 %out
+}
+
+define i8 @call_different_range() {
+; CHECK-LABEL: @call_different_range
+; CHECK-NEXT: bitcast i8 0 to i8
+; CHECK-NEXT: %out = call i8 @dummy(), !range !1
+; CHECK-NEXT: ret i8 %out
+ bitcast i8 0 to i8
+ %out = call i8 @dummy(), !range !1
+ ret i8 %out
+}
+
+define i8 @invoke_with_range() {
+ %out = invoke i8 @dummy() to label %next unwind label %lpad, !range !0
+
+next:
+ ret i8 %out
+
+lpad:
+ %pad = landingpad { i8*, i32 } personality i8* undef cleanup
+ resume { i8*, i32 } zeroinitializer
+}
+
+define i8 @invoke_no_range() {
+; CHECK-LABEL: @invoke_no_range()
+; CHECK-NEXT: invoke i8 @dummy
+ %out = invoke i8 @dummy() to label %next unwind label %lpad
+
+next:
+ ret i8 %out
+
+lpad:
+ %pad = landingpad { i8*, i32 } personality i8* undef cleanup
+ resume { i8*, i32 } zeroinitializer
+}
+
+define i8 @invoke_different_range() {
+; CHECK-LABEL: @invoke_different_range()
+; CHECK-NEXT: invoke i8 @dummy
+ %out = invoke i8 @dummy() to label %next unwind label %lpad, !range !1
+
+next:
+ ret i8 %out
+
+lpad:
+ %pad = landingpad { i8*, i32 } personality i8* undef cleanup
+ resume { i8*, i32 } zeroinitializer
+}
+
+define i8 @call_same_range() {
+; CHECK-LABEL: @call_same_range
+; CHECK: tail call i8 @call_with_range
+ bitcast i8 0 to i8
+ %out = call i8 @dummy(), !range !0
+ ret i8 %out
+}
+
+define i8 @invoke_same_range() {
+; CHECK-LABEL: @invoke_same_range()
+; CHECK: tail call i8 @invoke_with_range()
+ %out = invoke i8 @dummy() to label %next unwind label %lpad, !range !0
+
+next:
+ ret i8 %out
+
+lpad:
+ %pad = landingpad { i8*, i32 } personality i8* undef cleanup
+ resume { i8*, i32 } zeroinitializer
+}
+
+declare i8 @dummy();
+declare i32 @__gxx_personality_v0(...)
+
+!0 = metadata !{i8 0, i8 2}
+!1 = metadata !{i8 5, i8 7} \ No newline at end of file
diff --git a/test/Transforms/MergeFunc/crash.ll b/test/Transforms/MergeFunc/crash.ll
index 0897ba289337..3475e28a6565 100644
--- a/test/Transforms/MergeFunc/crash.ll
+++ b/test/Transforms/MergeFunc/crash.ll
@@ -8,9 +8,9 @@ target triple = "i386-pc-linux-gnu"
%.qux.2585 = type { i32, i32, i8* }
@g2 = external unnamed_addr constant [9 x i8], align 1
-@g3 = internal hidden unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
+@g3 = internal unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
-define internal hidden i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
+define internal i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
br label %1
; <label>:1
@@ -20,26 +20,26 @@ define internal hidden i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) al
ret i32 undef
}
-define internal hidden i32 @func10(%.qux.2496* nocapture %this) align 2 {
+define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
%1 = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
%2 = load i32* %1, align 4
ret i32 %2
}
-define internal hidden i8* @func29(i32* nocapture %this) align 2 {
+define internal i8* @func29(i32* nocapture %this) align 2 {
ret i8* getelementptr inbounds ([9 x i8]* @g2, i32 0, i32 0)
}
-define internal hidden i32* @func33(%.qux.2585* nocapture %this) align 2 {
+define internal i32* @func33(%.qux.2585* nocapture %this) align 2 {
ret i32* undef
}
-define internal hidden i32* @func34(%.qux.2585* nocapture %this) align 2 {
+define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
%1 = getelementptr inbounds %.qux.2585* %this, i32 0
ret i32* undef
}
-define internal hidden i8* @func35(%.qux.2585* nocapture %this) align 2 {
+define internal i8* @func35(%.qux.2585* nocapture %this) align 2 {
%1 = getelementptr inbounds %.qux.2585* %this, i32 0, i32 2
%2 = load i8** %1, align 4
ret i8* %2
diff --git a/test/Transforms/MergeFunc/functions.ll b/test/Transforms/MergeFunc/functions.ll
new file mode 100644
index 000000000000..006fdf523670
--- /dev/null
+++ b/test/Transforms/MergeFunc/functions.ll
@@ -0,0 +1,27 @@
+; RUN: opt -S -mergefunc < %s | FileCheck %s
+
+; Be sure we don't merge cross-referenced functions of same type.
+
+; CHECK-LABEL: @left
+; CHECK-LABEL: entry-block
+; CHECK-LABEL: call void @right(i64 %p)
+define void @left(i64 %p) {
+entry-block:
+ call void @right(i64 %p)
+ call void @right(i64 %p)
+ call void @right(i64 %p)
+ call void @right(i64 %p)
+ ret void
+}
+
+; CHECK-LABEL: @right
+; CHECK-LABEL: entry-block
+; CHECK-LABEL: call void @left(i64 %p)
+define void @right(i64 %p) {
+entry-block:
+ call void @left(i64 %p)
+ call void @left(i64 %p)
+ call void @left(i64 %p)
+ call void @left(i64 %p)
+ ret void
+}
diff --git a/test/Transforms/MergeFunc/inttoptr-address-space.ll b/test/Transforms/MergeFunc/inttoptr-address-space.ll
index 0d834bc3b437..2e5e2fcc4865 100644
--- a/test/Transforms/MergeFunc/inttoptr-address-space.ll
+++ b/test/Transforms/MergeFunc/inttoptr-address-space.ll
@@ -6,10 +6,10 @@ target datalayout = "e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
%.qux.2585 = type { i32, i32, i8* }
@g2 = external addrspace(1) constant [9 x i8], align 1
-@g3 = internal hidden unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585 addrspace(1)*)* @func35 to i8*)]
+@g3 = internal unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585 addrspace(1)*)* @func35 to i8*)]
-define internal hidden i32 @func10(%.qux.2496 addrspace(1)* nocapture %this) align 2 {
+define internal i32 @func10(%.qux.2496 addrspace(1)* nocapture %this) align 2 {
bb:
%tmp = getelementptr inbounds %.qux.2496 addrspace(1)* %this, i32 0, i32 1, i32 1
%tmp1 = load i32 addrspace(1)* %tmp, align 4
@@ -17,7 +17,7 @@ bb:
}
; Check for pointer bitwidth equal assertion failure
-define internal hidden i8* @func35(%.qux.2585 addrspace(1)* nocapture %this) align 2 {
+define internal i8* @func35(%.qux.2585 addrspace(1)* nocapture %this) align 2 {
bb:
; CHECK-LABEL: @func35(
; CHECK: %[[V2:.+]] = bitcast %.qux.2585 addrspace(1)* %{{.*}} to %.qux.2496 addrspace(1)*
diff --git a/test/Transforms/MergeFunc/inttoptr.ll b/test/Transforms/MergeFunc/inttoptr.ll
index 6a69e3fcfd86..86c18a0c122b 100644
--- a/test/Transforms/MergeFunc/inttoptr.ll
+++ b/test/Transforms/MergeFunc/inttoptr.ll
@@ -8,9 +8,9 @@ target triple = "i386-pc-linux-gnu"
%.qux.2585 = type { i32, i32, i8* }
@g2 = external unnamed_addr constant [9 x i8], align 1
-@g3 = internal hidden unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
+@g3 = internal unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
-define internal hidden i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
+define internal i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
bb:
br label %bb1
@@ -21,30 +21,30 @@ bb2: ; preds = %bb1
ret i32 undef
}
-define internal hidden i32 @func10(%.qux.2496* nocapture %this) align 2 {
+define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
bb:
%tmp = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
%tmp1 = load i32* %tmp, align 4
ret i32 %tmp1
}
-define internal hidden i8* @func29(i32* nocapture %this) align 2 {
+define internal i8* @func29(i32* nocapture %this) align 2 {
bb:
ret i8* getelementptr inbounds ([9 x i8]* @g2, i32 0, i32 0)
}
-define internal hidden i32* @func33(%.qux.2585* nocapture %this) align 2 {
+define internal i32* @func33(%.qux.2585* nocapture %this) align 2 {
bb:
ret i32* undef
}
-define internal hidden i32* @func34(%.qux.2585* nocapture %this) align 2 {
+define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
bb:
%tmp = getelementptr inbounds %.qux.2585* %this, i32 0
ret i32* undef
}
-define internal hidden i8* @func35(%.qux.2585* nocapture %this) align 2 {
+define internal i8* @func35(%.qux.2585* nocapture %this) align 2 {
bb:
; CHECK-LABEL: @func35(
; CHECK: %[[V2:.+]] = bitcast %.qux.2585* %{{.*}} to %.qux.2496*
diff --git a/test/Transforms/MergeFunc/mergefunc-struct-return.ll b/test/Transforms/MergeFunc/mergefunc-struct-return.ll
new file mode 100644
index 000000000000..d2cbe43da0c4
--- /dev/null
+++ b/test/Transforms/MergeFunc/mergefunc-struct-return.ll
@@ -0,0 +1,40 @@
+; RUN: opt -mergefunc -S < %s | FileCheck %s
+
+; This test makes sure that the mergefunc pass, uses extract and insert value
+; to convert the struct result type; as struct types cannot be bitcast.
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+
+%kv1 = type { i32*, i32* }
+%kv2 = type { i8*, i8* }
+
+declare void @noop()
+
+define %kv1 @fn1() {
+; CHECK-LABEL: @fn1(
+ %tmp = alloca %kv1
+ %v1 = getelementptr %kv1* %tmp, i32 0, i32 0
+ store i32* null, i32** %v1
+ %v2 = getelementptr %kv1* %tmp, i32 0, i32 0
+ store i32* null, i32** %v2
+ call void @noop()
+ %v3 = load %kv1* %tmp
+ ret %kv1 %v3
+}
+
+define %kv2 @fn2() {
+; CHECK-LABEL: @fn2(
+; CHECK: %1 = tail call %kv1 @fn1()
+; CHECK: %2 = extractvalue %kv1 %1, 0
+; CHECK: %3 = bitcast i32* %2 to i8*
+; CHECK: %4 = insertvalue %kv2 undef, i8* %3, 0
+ %tmp = alloca %kv2
+ %v1 = getelementptr %kv2* %tmp, i32 0, i32 0
+ store i8* null, i8** %v1
+ %v2 = getelementptr %kv2* %tmp, i32 0, i32 0
+ store i8* null, i8** %v2
+ call void @noop()
+
+ %v3 = load %kv2* %tmp
+ ret %kv2 %v3
+}
diff --git a/test/Transforms/MergeFunc/ranges.ll b/test/Transforms/MergeFunc/ranges.ll
new file mode 100644
index 000000000000..e25ff1d3acb1
--- /dev/null
+++ b/test/Transforms/MergeFunc/ranges.ll
@@ -0,0 +1,43 @@
+; RUN: opt -mergefunc -S < %s | FileCheck %s
+define i1 @cmp_with_range(i8*, i8*) {
+ %v1 = load i8* %0, !range !0
+ %v2 = load i8* %1, !range !0
+ %out = icmp eq i8 %v1, %v2
+ ret i1 %out
+}
+
+define i1 @cmp_no_range(i8*, i8*) {
+; CHECK-LABEL: @cmp_no_range
+; CHECK-NEXT %v1 = load i8* %0
+; CHECK-NEXT %v2 = load i8* %1
+; CHECK-NEXT %out = icmp eq i8 %v1, %v2
+; CHECK-NEXT ret i1 %out
+ %v1 = load i8* %0
+ %v2 = load i8* %1
+ %out = icmp eq i8 %v1, %v2
+ ret i1 %out
+}
+
+define i1 @cmp_different_range(i8*, i8*) {
+; CHECK-LABEL: @cmp_different_range
+; CHECK-NEXT: %v1 = load i8* %0, !range !1
+; CHECK-NEXT: %v2 = load i8* %1, !range !1
+; CHECK-NEXT: %out = icmp eq i8 %v1, %v2
+; CHECK-NEXT: ret i1 %out
+ %v1 = load i8* %0, !range !1
+ %v2 = load i8* %1, !range !1
+ %out = icmp eq i8 %v1, %v2
+ ret i1 %out
+}
+
+define i1 @cmp_with_same_range(i8*, i8*) {
+; CHECK-LABEL: @cmp_with_same_range
+; CHECK: tail call i1 @cmp_with_range
+ %v1 = load i8* %0, !range !0
+ %v2 = load i8* %1, !range !0
+ %out = icmp eq i8 %v1, %v2
+ ret i1 %out
+}
+
+!0 = metadata !{i8 0, i8 2}
+!1 = metadata !{i8 5, i8 7}
diff --git a/test/Transforms/MetaRenamer/metarenamer.ll b/test/Transforms/MetaRenamer/metarenamer.ll
index 4020e1045081..6297af62ff0c 100644
--- a/test/Transforms/MetaRenamer/metarenamer.ll
+++ b/test/Transforms/MetaRenamer/metarenamer.ll
@@ -14,7 +14,9 @@ target triple = "x86_64-pc-linux-gnu"
@func_7_xxx = alias weak i32 (...)* @aliased_func_7_xxx
-declare i32 @aliased_func_7_xxx(...)
+define i32 @aliased_func_7_xxx(...) {
+ ret i32 0
+}
define i32 @func_3_xxx() nounwind uwtable ssp {
ret i32 3
diff --git a/test/Transforms/ObjCARC/allocas.ll b/test/Transforms/ObjCARC/allocas.ll
index 50656739ae71..7347a8fd4447 100644
--- a/test/Transforms/ObjCARC/allocas.ll
+++ b/test/Transforms/ObjCARC/allocas.ll
@@ -28,7 +28,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata)
declare i8* @objc_msgSend(i8*, i8*, ...)
-; In the presense of allocas, unconditionally remove retain/release pairs only
+; In the presence of allocas, unconditionally remove retain/release pairs only
; if they are known safe in both directions. This prevents matching up an inner
; retain with the boundary guarding release in the following situation:
;
@@ -336,7 +336,7 @@ bb3:
ret void
}
-; Make sure in the presense of allocas, if we find a cfghazard we do not perform
+; Make sure in the presence of allocas, if we find a cfghazard we do not perform
; code motion even if we are known safe. These two concepts are separate and
; should be treated as such.
;
diff --git a/test/Transforms/ObjCARC/contract-end-of-use-list.ll b/test/Transforms/ObjCARC/contract-end-of-use-list.ll
new file mode 100644
index 000000000000..a38cd8a1da17
--- /dev/null
+++ b/test/Transforms/ObjCARC/contract-end-of-use-list.ll
@@ -0,0 +1,30 @@
+; RUN: opt -S < %s -objc-arc-expand -objc-arc-contract | FileCheck %s
+; Don't crash. Reproducer for a use_iterator bug from r203364.
+; rdar://problem/16333235
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-darwin13.2.0"
+
+%struct = type { i8*, i8* }
+
+; CHECK-LABEL: @foo() {
+define internal i8* @foo() {
+entry:
+ %call = call i8* @bar()
+; CHECK: %retained1 = call i8* @objc_retainAutoreleasedReturnValue(i8* %call)
+ %retained1 = call i8* @objc_retain(i8* %call)
+ %isnull = icmp eq i8* %retained1, null
+ br i1 %isnull, label %cleanup, label %if.end
+
+if.end:
+; CHECK: %retained2 = call i8* @objc_retain(i8* %retained1)
+ %retained2 = call i8* @objc_retain(i8* %retained1)
+ br label %cleanup
+
+cleanup:
+ %retval = phi i8* [ %retained2, %if.end ], [ null, %entry ]
+ ret i8* %retval
+}
+
+declare i8* @bar()
+
+declare extern_weak i8* @objc_retain(i8*)
diff --git a/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll b/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
index 072861720e63..79e300cb6b43 100644
--- a/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
+++ b/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
@@ -24,11 +24,11 @@ target triple = "x86_64-apple-macosx10.9.0"
@"\01L_OBJC_METH_VAR_NAME_" = internal global [4 x i8] c"new\00", section "__TEXT,__objc_methname,cstring_literals", align 1
@"\01L_OBJC_SELECTOR_REFERENCES_" = internal global i8* getelementptr inbounds ([4 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i64 0, i64 0), section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
@__CFConstantStringClassReference = external global [0 x i32]
-@.str = linker_private unnamed_addr constant [11 x i8] c"Failed: %@\00", align 1
+@.str = private unnamed_addr constant [11 x i8] c"Failed: %@\00", align 1
@_unnamed_cfstring_ = private constant %struct.NSConstantString { i32* getelementptr inbounds ([0 x i32]* @__CFConstantStringClassReference, i32 0, i32 0), i32 1992, i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i64 10 }, section "__DATA,__cfstring"
@"OBJC_CLASS_$_NSException" = external global %struct._class_t
@"\01L_OBJC_CLASSLIST_REFERENCES_$_1" = internal global %struct._class_t* @"OBJC_CLASS_$_NSException", section "__DATA, __objc_classrefs, regular, no_dead_strip", align 8
-@.str2 = linker_private unnamed_addr constant [4 x i8] c"Foo\00", align 1
+@.str2 = private unnamed_addr constant [4 x i8] c"Foo\00", align 1
@_unnamed_cfstring_3 = private constant %struct.NSConstantString { i32* getelementptr inbounds ([0 x i32]* @__CFConstantStringClassReference, i32 0, i32 0), i32 1992, i8* getelementptr inbounds ([4 x i8]* @.str2, i32 0, i32 0), i64 3 }, section "__DATA,__cfstring"
@"\01L_OBJC_METH_VAR_NAME_4" = internal global [14 x i8] c"raise:format:\00", section "__TEXT,__objc_methname,cstring_literals", align 1
@"\01L_OBJC_SELECTOR_REFERENCES_5" = internal global i8* getelementptr inbounds ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_4", i64 0, i64 0), section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
diff --git a/test/Transforms/Reassociate/2002-05-15-AgressiveSubMove.ll b/test/Transforms/Reassociate/2002-05-15-AgressiveSubMove.ll
index 5780990c7618..24300352a62c 100644
--- a/test/Transforms/Reassociate/2002-05-15-AgressiveSubMove.ll
+++ b/test/Transforms/Reassociate/2002-05-15-AgressiveSubMove.ll
@@ -1,9 +1,10 @@
-; RUN: opt < %s -reassociate -instcombine -constprop -dce -S | not grep add
+; RUN: opt < %s -reassociate -S | FileCheck %s
-define i32 @test(i32 %A) {
- %X = add i32 %A, 1 ; <i32> [#uses=1]
- %Y = add i32 %A, 1 ; <i32> [#uses=1]
- %r = sub i32 %X, %Y ; <i32> [#uses=1]
- ret i32 %r
+define i32 @test1(i32 %A) {
+; CHECK-LABEL: test1
+; CHECK: ret i32 0
+ %X = add i32 %A, 1
+ %Y = add i32 %A, 1
+ %r = sub i32 %X, %Y
+ ret i32 %r
}
-
diff --git a/test/Transforms/Reassociate/2002-05-15-MissedTree.ll b/test/Transforms/Reassociate/2002-05-15-MissedTree.ll
index e8bccbde28e2..5f3c9209aeda 100644
--- a/test/Transforms/Reassociate/2002-05-15-MissedTree.ll
+++ b/test/Transforms/Reassociate/2002-05-15-MissedTree.ll
@@ -1,9 +1,11 @@
-; RUN: opt < %s -reassociate -instcombine -constprop -die -S | not grep 5
+; RUN: opt < %s -reassociate -instcombine -S | FileCheck %s
-define i32 @test(i32 %A, i32 %B) {
- %W = add i32 %B, -5 ; <i32> [#uses=1]
- %Y = add i32 %A, 5 ; <i32> [#uses=1]
- %Z = add i32 %W, %Y ; <i32> [#uses=1]
+define i32 @test1(i32 %A, i32 %B) {
+; CHECK-LABEL: test1
+; CHECK: %Z = add i32 %B, %A
+; CHECK: ret i32 %Z
+ %W = add i32 %B, -5
+ %Y = add i32 %A, 5
+ %Z = add i32 %W, %Y
ret i32 %Z
}
-
diff --git a/test/Transforms/Reassociate/2002-05-15-SubReassociate.ll b/test/Transforms/Reassociate/2002-05-15-SubReassociate.ll
index c18af5e07efd..29c178ffec3e 100644
--- a/test/Transforms/Reassociate/2002-05-15-SubReassociate.ll
+++ b/test/Transforms/Reassociate/2002-05-15-SubReassociate.ll
@@ -1,12 +1,30 @@
+; RUN: opt < %s -reassociate -constprop -instcombine -dce -S | FileCheck %s
+
; With sub reassociation, constant folding can eliminate all of the constants.
-;
-; RUN: opt < %s -reassociate -constprop -instcombine -dce -S | not grep add
+define i32 @test1(i32 %A, i32 %B) {
+; CHECK-LABEL: test1
+; CHECK-NEXT: %Z = sub i32 %A, %B
+; CHECK-NEXT: ret i32 %Z
-define i32 @test(i32 %A, i32 %B) {
- %W = add i32 5, %B ; <i32> [#uses=1]
- %X = add i32 -7, %A ; <i32> [#uses=1]
- %Y = sub i32 %X, %W ; <i32> [#uses=1]
- %Z = add i32 %Y, 12 ; <i32> [#uses=1]
- ret i32 %Z
+ %W = add i32 5, %B
+ %X = add i32 -7, %A
+ %Y = sub i32 %X, %W
+ %Z = add i32 %Y, 12
+ ret i32 %Z
}
+
+; With sub reassociation, constant folding can eliminate the two 12 constants.
+define i32 @test2(i32 %A, i32 %B, i32 %C, i32 %D) {
+; CHECK-LABEL: test2
+; CHECK-NEXT: %sum = add i32 %B, %A
+; CHECK-NEXT: %sum1 = add i32 %sum, %C
+; CHECK-NEXT: %Q = sub i32 %D, %sum1
+; CHECK-NEXT: ret i32 %Q
+ %M = add i32 %A, 12
+ %N = add i32 %M, %B
+ %O = add i32 %N, %C
+ %P = sub i32 %D, %O
+ %Q = add i32 %P, 12
+ ret i32 %Q
+}
diff --git a/test/Transforms/Reassociate/2002-05-15-SubReassociate2.ll b/test/Transforms/Reassociate/2002-05-15-SubReassociate2.ll
deleted file mode 100644
index 5848821e10fd..000000000000
--- a/test/Transforms/Reassociate/2002-05-15-SubReassociate2.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; With sub reassociation, constant folding can eliminate the two 12 constants.
-;
-; RUN: opt < %s -reassociate -constprop -dce -S | not grep 12
-
-define i32 @test(i32 %A, i32 %B, i32 %C, i32 %D) {
- %M = add i32 %A, 12 ; <i32> [#uses=1]
- %N = add i32 %M, %B ; <i32> [#uses=1]
- %O = add i32 %N, %C ; <i32> [#uses=1]
- %P = sub i32 %D, %O ; <i32> [#uses=1]
- %Q = add i32 %P, 12 ; <i32> [#uses=1]
- ret i32 %Q
-}
-
diff --git a/test/Transforms/Reassociate/2005-09-01-ArrayOutOfBounds.ll b/test/Transforms/Reassociate/2005-09-01-ArrayOutOfBounds.ll
index f66148bb4abc..f6cef35b1779 100644
--- a/test/Transforms/Reassociate/2005-09-01-ArrayOutOfBounds.ll
+++ b/test/Transforms/Reassociate/2005-09-01-ArrayOutOfBounds.ll
@@ -1,23 +1,24 @@
-; RUN: opt < %s -reassociate -instcombine -S |\
-; RUN: grep "ret i32 0"
+; RUN: opt < %s -reassociate -instcombine -S | FileCheck %s
-define i32 @f(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
- %tmp.2 = add i32 %a4, %a3 ; <i32> [#uses=1]
- %tmp.4 = add i32 %tmp.2, %a2 ; <i32> [#uses=1]
- %tmp.6 = add i32 %tmp.4, %a1 ; <i32> [#uses=1]
- %tmp.8 = add i32 %tmp.6, %a0 ; <i32> [#uses=1]
- %tmp.11 = add i32 %a3, %a2 ; <i32> [#uses=1]
- %tmp.13 = add i32 %tmp.11, %a1 ; <i32> [#uses=1]
- %tmp.15 = add i32 %tmp.13, %a0 ; <i32> [#uses=1]
- %tmp.18 = add i32 %a2, %a1 ; <i32> [#uses=1]
- %tmp.20 = add i32 %tmp.18, %a0 ; <i32> [#uses=1]
- %tmp.23 = add i32 %a1, %a0 ; <i32> [#uses=1]
- %tmp.26 = sub i32 %tmp.8, %tmp.15 ; <i32> [#uses=1]
- %tmp.28 = add i32 %tmp.26, %tmp.20 ; <i32> [#uses=1]
- %tmp.30 = sub i32 %tmp.28, %tmp.23 ; <i32> [#uses=1]
- %tmp.32 = sub i32 %tmp.30, %a4 ; <i32> [#uses=1]
- %tmp.34 = sub i32 %tmp.32, %a2 ; <i32> [#uses=2]
- %T = mul i32 %tmp.34, %tmp.34 ; <i32> [#uses=1]
- ret i32 %T
-}
+define i32 @f1(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
+; CHECK-LABEL: f1
+; CHECK-NEXT: ret i32 0
+ %tmp.2 = add i32 %a4, %a3
+ %tmp.4 = add i32 %tmp.2, %a2
+ %tmp.6 = add i32 %tmp.4, %a1
+ %tmp.8 = add i32 %tmp.6, %a0
+ %tmp.11 = add i32 %a3, %a2
+ %tmp.13 = add i32 %tmp.11, %a1
+ %tmp.15 = add i32 %tmp.13, %a0
+ %tmp.18 = add i32 %a2, %a1
+ %tmp.20 = add i32 %tmp.18, %a0
+ %tmp.23 = add i32 %a1, %a0
+ %tmp.26 = sub i32 %tmp.8, %tmp.15
+ %tmp.28 = add i32 %tmp.26, %tmp.20
+ %tmp.30 = sub i32 %tmp.28, %tmp.23
+ %tmp.32 = sub i32 %tmp.30, %a4
+ %tmp.34 = sub i32 %tmp.32, %a2
+ %T = mul i32 %tmp.34, %tmp.34
+ ret i32 %T
+}
diff --git a/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll b/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll
index 384cbc90a744..f78395540026 100644
--- a/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll
+++ b/test/Transforms/Reassociate/2006-04-27-ReassociateVector.ll
@@ -1,8 +1,12 @@
-; RUN: opt < %s -reassociate -disable-output
+; RUN: opt < %s -reassociate -S | FileCheck %s
-define void @foo() {
- %tmp162 = fsub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>> [#uses=1]
- %tmp164 = fmul <4 x float> zeroinitializer, %tmp162 ; <<4 x float>> [#uses=0]
- ret void
-}
+define <4 x float> @test1() {
+; CHECK-LABEL: test1
+; CHECK-NEXT: %tmp1 = fsub <4 x float> zeroinitializer, zeroinitializer
+; CHECK-NEXT: %tmp2 = fmul <4 x float> zeroinitializer, %tmp1
+; CHECK-NEXT: ret <4 x float> %tmp2
+ %tmp1 = fsub <4 x float> zeroinitializer, zeroinitializer
+ %tmp2 = fmul <4 x float> zeroinitializer, %tmp1
+ ret <4 x float> %tmp2
+}
diff --git a/test/Transforms/Reassociate/basictest.ll b/test/Transforms/Reassociate/basictest.ll
index fda0ca6be1aa..d70bfcbb72c6 100644
--- a/test/Transforms/Reassociate/basictest.ll
+++ b/test/Transforms/Reassociate/basictest.ll
@@ -1,46 +1,47 @@
-; With reassociation, constant folding can eliminate the 12 and -12 constants.
-;
-; RUN: opt < %s -reassociate -gvn -instcombine -S | FileCheck %s
+; RUN: opt < %s -reassociate -gvn -instcombine -S | FileCheck %s
define i32 @test1(i32 %arg) {
- %tmp1 = sub i32 -12, %arg
- %tmp2 = add i32 %tmp1, 12
- ret i32 %tmp2
-; CHECK-LABEL: @test1(
+ %tmp1 = sub i32 -12, %arg
+ %tmp2 = add i32 %tmp1, 12
+ ret i32 %tmp2
+
+; CHECK-LABEL: @test1
; CHECK-NEXT: sub i32 0, %arg
; CHECK-NEXT: ret i32
}
define i32 @test2(i32 %reg109, i32 %reg1111) {
- %reg115 = add i32 %reg109, -30 ; <i32> [#uses=1]
- %reg116 = add i32 %reg115, %reg1111 ; <i32> [#uses=1]
- %reg117 = add i32 %reg116, 30 ; <i32> [#uses=1]
- ret i32 %reg117
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: add i32 %reg1111, %reg109
-; CHECK-NEXT: ret i32
+ %reg115 = add i32 %reg109, -30
+ %reg116 = add i32 %reg115, %reg1111
+ %reg117 = add i32 %reg116, 30
+ ret i32 %reg117
+
+; CHECK-LABEL: @test2
+; CHECK-NEXT: %reg117 = add i32 %reg1111, %reg109
+; CHECK-NEXT: ret i32 %reg117
}
-@e = external global i32 ; <i32*> [#uses=3]
-@a = external global i32 ; <i32*> [#uses=3]
-@b = external global i32 ; <i32*> [#uses=3]
-@c = external global i32 ; <i32*> [#uses=3]
-@f = external global i32 ; <i32*> [#uses=3]
+@e = external global i32
+@a = external global i32
+@b = external global i32
+@c = external global i32
+@f = external global i32
define void @test3() {
- %A = load i32* @a ; <i32> [#uses=2]
- %B = load i32* @b ; <i32> [#uses=2]
- %C = load i32* @c ; <i32> [#uses=2]
- %t1 = add i32 %A, %B ; <i32> [#uses=1]
- %t2 = add i32 %t1, %C ; <i32> [#uses=1]
- %t3 = add i32 %C, %A ; <i32> [#uses=1]
- %t4 = add i32 %t3, %B ; <i32> [#uses=1]
- ; e = (a+b)+c;
- store i32 %t2, i32* @e
- ; f = (a+c)+b
- store i32 %t4, i32* @f
- ret void
-; CHECK-LABEL: @test3(
+ %A = load i32* @a
+ %B = load i32* @b
+ %C = load i32* @c
+ %t1 = add i32 %A, %B
+ %t2 = add i32 %t1, %C
+ %t3 = add i32 %C, %A
+ %t4 = add i32 %t3, %B
+ ; e = (a+b)+c;
+ store i32 %t2, i32* @e
+ ; f = (a+c)+b
+ store i32 %t4, i32* @f
+ ret void
+
+; CHECK-LABEL: @test3
; CHECK: add i32
; CHECK: add i32
; CHECK-NOT: add i32
@@ -48,19 +49,20 @@ define void @test3() {
}
define void @test4() {
- %A = load i32* @a ; <i32> [#uses=2]
- %B = load i32* @b ; <i32> [#uses=2]
- %C = load i32* @c ; <i32> [#uses=2]
- %t1 = add i32 %A, %B ; <i32> [#uses=1]
- %t2 = add i32 %t1, %C ; <i32> [#uses=1]
- %t3 = add i32 %C, %A ; <i32> [#uses=1]
- %t4 = add i32 %t3, %B ; <i32> [#uses=1]
- ; e = c+(a+b)
- store i32 %t2, i32* @e
- ; f = (c+a)+b
- store i32 %t4, i32* @f
- ret void
-; CHECK-LABEL: @test4(
+ %A = load i32* @a
+ %B = load i32* @b
+ %C = load i32* @c
+ %t1 = add i32 %A, %B
+ %t2 = add i32 %t1, %C
+ %t3 = add i32 %C, %A
+ %t4 = add i32 %t3, %B
+ ; e = c+(a+b)
+ store i32 %t2, i32* @e
+ ; f = (c+a)+b
+ store i32 %t4, i32* @f
+ ret void
+
+; CHECK-LABEL: @test4
; CHECK: add i32
; CHECK: add i32
; CHECK-NOT: add i32
@@ -68,19 +70,20 @@ define void @test4() {
}
define void @test5() {
- %A = load i32* @a ; <i32> [#uses=2]
- %B = load i32* @b ; <i32> [#uses=2]
- %C = load i32* @c ; <i32> [#uses=2]
- %t1 = add i32 %B, %A ; <i32> [#uses=1]
- %t2 = add i32 %t1, %C ; <i32> [#uses=1]
- %t3 = add i32 %C, %A ; <i32> [#uses=1]
- %t4 = add i32 %t3, %B ; <i32> [#uses=1]
- ; e = c+(b+a)
- store i32 %t2, i32* @e
- ; f = (c+a)+b
- store i32 %t4, i32* @f
- ret void
-; CHECK-LABEL: @test5(
+ %A = load i32* @a
+ %B = load i32* @b
+ %C = load i32* @c
+ %t1 = add i32 %B, %A
+ %t2 = add i32 %t1, %C
+ %t3 = add i32 %C, %A
+ %t4 = add i32 %t3, %B
+ ; e = c+(b+a)
+ store i32 %t2, i32* @e
+ ; f = (c+a)+b
+ store i32 %t4, i32* @f
+ ret void
+
+; CHECK-LABEL: @test5
; CHECK: add i32
; CHECK: add i32
; CHECK-NOT: add i32
@@ -88,60 +91,61 @@ define void @test5() {
}
define i32 @test6() {
- %tmp.0 = load i32* @a
- %tmp.1 = load i32* @b
- ; (a+b)
- %tmp.2 = add i32 %tmp.0, %tmp.1
- %tmp.4 = load i32* @c
- ; (a+b)+c
- %tmp.5 = add i32 %tmp.2, %tmp.4
- ; (a+c)
- %tmp.8 = add i32 %tmp.0, %tmp.4
- ; (a+c)+b
- %tmp.11 = add i32 %tmp.8, %tmp.1
- ; X ^ X = 0
- %RV = xor i32 %tmp.5, %tmp.11
- ret i32 %RV
-; CHECK-LABEL: @test6(
+ %tmp.0 = load i32* @a
+ %tmp.1 = load i32* @b
+ ; (a+b)
+ %tmp.2 = add i32 %tmp.0, %tmp.1
+ %tmp.4 = load i32* @c
+ ; (a+b)+c
+ %tmp.5 = add i32 %tmp.2, %tmp.4
+ ; (a+c)
+ %tmp.8 = add i32 %tmp.0, %tmp.4
+ ; (a+c)+b
+ %tmp.11 = add i32 %tmp.8, %tmp.1
+ ; X ^ X = 0
+ %RV = xor i32 %tmp.5, %tmp.11
+ ret i32 %RV
+
+; CHECK-LABEL: @test6
; CHECK: ret i32 0
}
; This should be one add and two multiplies.
define i32 @test7(i32 %A, i32 %B, i32 %C) {
- ; A*A*B + A*C*A
- %aa = mul i32 %A, %A
- %aab = mul i32 %aa, %B
- %ac = mul i32 %A, %C
- %aac = mul i32 %ac, %A
- %r = add i32 %aab, %aac
- ret i32 %r
-; CHECK-LABEL: @test7(
+ ; A*A*B + A*C*A
+ %aa = mul i32 %A, %A
+ %aab = mul i32 %aa, %B
+ %ac = mul i32 %A, %C
+ %aac = mul i32 %ac, %A
+ %r = add i32 %aab, %aac
+ ret i32 %r
+
+; CHECK-LABEL: @test7
; CHECK-NEXT: add i32 %C, %B
; CHECK-NEXT: mul i32
; CHECK-NEXT: mul i32
; CHECK-NEXT: ret i32
}
-
define i32 @test8(i32 %X, i32 %Y, i32 %Z) {
- %A = sub i32 0, %X
- %B = mul i32 %A, %Y
- ; (-X)*Y + Z -> Z-X*Y
- %C = add i32 %B, %Z
- ret i32 %C
-; CHECK-LABEL: @test8(
+ %A = sub i32 0, %X
+ %B = mul i32 %A, %Y
+ ; (-X)*Y + Z -> Z-X*Y
+ %C = add i32 %B, %Z
+ ret i32 %C
+
+; CHECK-LABEL: @test8
; CHECK-NEXT: %A = mul i32 %Y, %X
; CHECK-NEXT: %C = sub i32 %Z, %A
; CHECK-NEXT: ret i32 %C
}
-
; PR5458
define i32 @test9(i32 %X) {
%Y = mul i32 %X, 47
%Z = add i32 %Y, %Y
ret i32 %Z
-; CHECK-LABEL: @test9(
+; CHECK-LABEL: @test9
; CHECK-NEXT: mul i32 %X, 94
; CHECK-NEXT: ret i32
}
@@ -150,7 +154,7 @@ define i32 @test10(i32 %X) {
%Y = add i32 %X ,%X
%Z = add i32 %Y, %X
ret i32 %Z
-; CHECK-LABEL: @test10(
+; CHECK-LABEL: @test10
; CHECK-NEXT: mul i32 %X, 3
; CHECK-NEXT: ret i32
}
@@ -160,7 +164,7 @@ define i32 @test11(i32 %W) {
%Y = add i32 %X ,%X
%Z = add i32 %Y, %X
ret i32 %Z
-; CHECK-LABEL: @test11(
+; CHECK-LABEL: @test11
; CHECK-NEXT: mul i32 %W, 381
; CHECK-NEXT: ret i32
}
@@ -169,11 +173,10 @@ define i32 @test12(i32 %X) {
%A = sub i32 1, %X
%B = sub i32 2, %X
%C = sub i32 3, %X
-
%Y = add i32 %A ,%B
%Z = add i32 %Y, %C
ret i32 %Z
-; CHECK-LABEL: @test12(
+; CHECK-LABEL: @test12
; CHECK-NEXT: mul i32 %X, -3
; CHECK-NEXT: add i32{{.*}}, 6
; CHECK-NEXT: ret i32
@@ -185,7 +188,7 @@ define i32 @test13(i32 %X1, i32 %X2, i32 %X3) {
%C = mul i32 %X1, %X3 ; X1*X3
%D = add i32 %B, %C ; -X1*X2 + X1*X3 -> X1*(X3-X2)
ret i32 %D
-; CHECK-LABEL: @test13(
+; CHECK-LABEL: @test13
; CHECK-NEXT: sub i32 %X3, %X2
; CHECK-NEXT: mul i32 {{.*}}, %X1
; CHECK-NEXT: ret i32
@@ -197,9 +200,10 @@ define i32 @test14(i32 %X1, i32 %X2) {
%C = mul i32 %X2, -47 ; X2*-47
%D = add i32 %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
ret i32 %D
-; CHECK-LABEL: @test14(
+
+; CHECK-LABEL: @test14
; CHECK-NEXT: sub i32 %X1, %X2
-; CHECK-NEXT: mul i32 {{.*}}, 47
+; CHECK-NEXT: mul i32 %tmp, 47
; CHECK-NEXT: ret i32
}
@@ -210,7 +214,6 @@ define i32 @test15(i32 %X1, i32 %X2, i32 %X3) {
%C = and i1 %A, %B
%D = select i1 %C, i32 %X1, i32 0
ret i32 %D
-; CHECK-LABEL: @test15(
+; CHECK-LABEL: @test15
; CHECK: and i1 %A, %B
}
-
diff --git a/test/Transforms/Reassociate/fp-commute.ll b/test/Transforms/Reassociate/fp-commute.ll
index 025689bb0c10..eac5b5920ac1 100644
--- a/test/Transforms/Reassociate/fp-commute.ll
+++ b/test/Transforms/Reassociate/fp-commute.ll
@@ -1,18 +1,19 @@
; RUN: opt -reassociate -S < %s | FileCheck %s
-target triple = "armv7-apple-ios"
-
declare void @use(float)
-; CHECK: test
-define void @test(float %x, float %y) {
-entry:
+define void @test1(float %x, float %y) {
+; CHECK-LABEL: test1
; CHECK: fmul float %x, %y
; CHECK: fmul float %x, %y
- %0 = fmul float %x, %y
- %1 = fmul float %y, %x
- %2 = fsub float %0, %1
- call void @use(float %0)
- call void @use(float %2)
+; CHECK: fsub float %1, %2
+; CHECK: call void @use(float %{{.*}})
+; CHECK: call void @use(float %{{.*}})
+
+ %1 = fmul float %x, %y
+ %2 = fmul float %y, %x
+ %3 = fsub float %1, %2
+ call void @use(float %1)
+ call void @use(float %3)
ret void
}
diff --git a/test/Transforms/Reassociate/inverses.ll b/test/Transforms/Reassociate/inverses.ll
index afe076caea92..8500cd867fd3 100644
--- a/test/Transforms/Reassociate/inverses.ll
+++ b/test/Transforms/Reassociate/inverses.ll
@@ -32,3 +32,15 @@ define i32 @test3(i32 %b, i32 %a) {
; CHECK: %tmp.5 = add i32 %b, 1234
; CHECK: ret i32 %tmp.5
}
+
+define i32 @test4(i32 %b, i32 %a) {
+ %tmp.1 = add i32 %a, 1234
+ %tmp.2 = add i32 %b, %tmp.1
+ %tmp.4 = xor i32 %a, -1
+ ; (b+(a+1234))+~a -> b+1233
+ %tmp.5 = add i32 %tmp.2, %tmp.4
+ ret i32 %tmp.5
+; CHECK-LABEL: @test4(
+; CHECK: %tmp.5 = add i32 %b, 1233
+; CHECK: ret i32 %tmp.5
+}
diff --git a/test/Transforms/Reassociate/looptest.ll b/test/Transforms/Reassociate/looptest.ll
index 91723bc37b01..aad3b206f69f 100644
--- a/test/Transforms/Reassociate/looptest.ll
+++ b/test/Transforms/Reassociate/looptest.ll
@@ -18,6 +18,7 @@
declare i32 @printf(i8*, ...)
+; FIXME: No longer works.
define void @test(i32 %Num, i32* %Array) {
bb0:
%cond221 = icmp eq i32 0, %Num ; <i1> [#uses=3]
diff --git a/test/Transforms/Reassociate/mightymul.ll b/test/Transforms/Reassociate/mightymul.ll
index cfbc485ffa03..ae915da8e576 100644
--- a/test/Transforms/Reassociate/mightymul.ll
+++ b/test/Transforms/Reassociate/mightymul.ll
@@ -1,7 +1,7 @@
-; RUN: opt < %s -reassociate
+; RUN: opt < %s -reassociate -disable-output
; PR13021
-define i32 @foo(i32 %x) {
+define i32 @test1(i32 %x) {
%t0 = mul i32 %x, %x
%t1 = mul i32 %t0, %t0
%t2 = mul i32 %t1, %t1
diff --git a/test/Transforms/Reassociate/multistep.ll b/test/Transforms/Reassociate/multistep.ll
index d79464753f12..12eaeeea7b7b 100644
--- a/test/Transforms/Reassociate/multistep.ll
+++ b/test/Transforms/Reassociate/multistep.ll
@@ -28,4 +28,3 @@ define i64 @multistep2(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-NEXT: ret
ret i64 %t3
}
-
diff --git a/test/Transforms/Reassociate/negation.ll b/test/Transforms/Reassociate/negation.ll
index 6a3dfd3b8206..12d2c86192bb 100644
--- a/test/Transforms/Reassociate/negation.ll
+++ b/test/Transforms/Reassociate/negation.ll
@@ -1,21 +1,31 @@
-; RUN: opt < %s -reassociate -instcombine -S | not grep sub
+; RUN: opt < %s -reassociate -instcombine -S | FileCheck %s
; Test that we can turn things like X*-(Y*Z) -> X*-1*Y*Z.
define i32 @test1(i32 %a, i32 %b, i32 %z) {
- %c = sub i32 0, %z ; <i32> [#uses=1]
- %d = mul i32 %a, %b ; <i32> [#uses=1]
- %e = mul i32 %c, %d ; <i32> [#uses=1]
- %f = mul i32 %e, 12345 ; <i32> [#uses=1]
- %g = sub i32 0, %f ; <i32> [#uses=1]
- ret i32 %g
+; CHECK-LABEL: test1
+; CHECK-NEXT: %e = mul i32 %a, 12345
+; CHECK-NEXT: %f = mul i32 %e, %b
+; CHECK-NEXT: %g = mul i32 %f, %z
+; CHECK-NEXT: ret i32 %g
+
+ %c = sub i32 0, %z
+ %d = mul i32 %a, %b
+ %e = mul i32 %c, %d
+ %f = mul i32 %e, 12345
+ %g = sub i32 0, %f
+ ret i32 %g
}
define i32 @test2(i32 %a, i32 %b, i32 %z) {
- %d = mul i32 %z, 40 ; <i32> [#uses=1]
- %c = sub i32 0, %d ; <i32> [#uses=1]
- %e = mul i32 %a, %c ; <i32> [#uses=1]
- %f = sub i32 0, %e ; <i32> [#uses=1]
- ret i32 %f
-}
+; CHECK-LABEL: test2
+; CHECK-NEXT: %e = mul i32 %a, 40
+; CHECK-NEXT: %f = mul i32 %e, %z
+; CHECK-NEXT: ret i32 %f
+ %d = mul i32 %z, 40
+ %c = sub i32 0, %d
+ %e = mul i32 %a, %c
+ %f = sub i32 0, %e
+ ret i32 %f
+}
diff --git a/test/Transforms/Reassociate/otherops.ll b/test/Transforms/Reassociate/otherops.ll
index d68d00818cb8..7718881d8e51 100644
--- a/test/Transforms/Reassociate/otherops.ll
+++ b/test/Transforms/Reassociate/otherops.ll
@@ -1,28 +1,42 @@
; Reassociation should apply to Add, Mul, And, Or, & Xor
;
-; RUN: opt < %s -reassociate -constprop -instcombine -die -S | not grep 12
+; RUN: opt < %s -reassociate -constprop -instcombine -die -S | FileCheck %s
define i32 @test_mul(i32 %arg) {
- %tmp1 = mul i32 12, %arg ; <i32> [#uses=1]
- %tmp2 = mul i32 %tmp1, 12 ; <i32> [#uses=1]
- ret i32 %tmp2
+; CHECK-LABEL: test_mul
+; CHECK-NEXT: %tmp2 = mul i32 %arg, 144
+; CHECK-NEXT: ret i32 %tmp2
+
+ %tmp1 = mul i32 12, %arg
+ %tmp2 = mul i32 %tmp1, 12
+ ret i32 %tmp2
}
define i32 @test_and(i32 %arg) {
- %tmp1 = and i32 14, %arg ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 14 ; <i32> [#uses=1]
- ret i32 %tmp2
+; CHECK-LABEL: test_and
+; CHECK-NEXT: %tmp2 = and i32 %arg, 14
+; CHECK-NEXT: ret i32 %tmp2
+
+ %tmp1 = and i32 14, %arg
+ %tmp2 = and i32 %tmp1, 14
+ ret i32 %tmp2
}
define i32 @test_or(i32 %arg) {
- %tmp1 = or i32 14, %arg ; <i32> [#uses=1]
- %tmp2 = or i32 %tmp1, 14 ; <i32> [#uses=1]
- ret i32 %tmp2
+; CHECK-LABEL: test_or
+; CHECK-NEXT: %tmp2 = or i32 %arg, 14
+; CHECK-NEXT: ret i32 %tmp2
+
+ %tmp1 = or i32 14, %arg
+ %tmp2 = or i32 %tmp1, 14
+ ret i32 %tmp2
}
define i32 @test_xor(i32 %arg) {
- %tmp1 = xor i32 12, %arg ; <i32> [#uses=1]
- %tmp2 = xor i32 %tmp1, 12 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
+; CHECK-LABEL: test_xor
+; CHECK-NEXT: ret i32 %arg
+ %tmp1 = xor i32 12, %arg
+ %tmp2 = xor i32 %tmp1, 12
+ ret i32 %tmp2
+}
diff --git a/test/Transforms/Reassociate/shift-factor.ll b/test/Transforms/Reassociate/shift-factor.ll
index 73af5e5304ee..8fbf1b9d4bc5 100644
--- a/test/Transforms/Reassociate/shift-factor.ll
+++ b/test/Transforms/Reassociate/shift-factor.ll
@@ -1,12 +1,14 @@
; There should be exactly one shift and one add left.
-; RUN: opt < %s -reassociate -instcombine -S > %t
-; RUN: grep shl %t | count 1
-; RUN: grep add %t | count 1
+; RUN: opt < %s -reassociate -instcombine -S | FileCheck %s
-define i32 @test(i32 %X, i32 %Y) {
- %tmp.2 = shl i32 %X, 1 ; <i32> [#uses=1]
- %tmp.6 = shl i32 %Y, 1 ; <i32> [#uses=1]
- %tmp.4 = add i32 %tmp.6, %tmp.2 ; <i32> [#uses=1]
- ret i32 %tmp.4
-}
+define i32 @test1(i32 %X, i32 %Y) {
+; CHECK-LABEL: test1
+; CHECK-NEXT: %tmp = add i32 %Y, %X
+; CHECK-NEXT: %tmp1 = shl i32 %tmp, 1
+; CHECK-NEXT: ret i32 %tmp1
+ %tmp.2 = shl i32 %X, 1
+ %tmp.6 = shl i32 %Y, 1
+ %tmp.4 = add i32 %tmp.6, %tmp.2
+ ret i32 %tmp.4
+}
diff --git a/test/Transforms/Reassociate/subtest.ll b/test/Transforms/Reassociate/subtest.ll
index 4c63d1238a67..e6263d85522c 100644
--- a/test/Transforms/Reassociate/subtest.ll
+++ b/test/Transforms/Reassociate/subtest.ll
@@ -1,11 +1,26 @@
-; With sub reassociation, constant folding can eliminate the 12 and -12 constants.
-;
-; RUN: opt < %s -reassociate -instcombine -S | not grep 12
+; RUN: opt < %s -reassociate -instcombine -S | FileCheck %s
-define i32 @test(i32 %A, i32 %B) {
- %X = add i32 -12, %A ; <i32> [#uses=1]
- %Y = sub i32 %X, %B ; <i32> [#uses=1]
- %Z = add i32 %Y, 12 ; <i32> [#uses=1]
- ret i32 %Z
+; With sub reassociation, constant folding can eliminate the 12 and -12 constants.
+define i32 @test1(i32 %A, i32 %B) {
+; CHECK-LABEL: @test1
+; CHECK-NEXT: %Z = sub i32 %A, %B
+; CHECK-NEXT: ret i32 %Z
+ %X = add i32 -12, %A
+ %Y = sub i32 %X, %B
+ %Z = add i32 %Y, 12
+ ret i32 %Z
}
+; PR2047
+; With sub reassociation, constant folding can eliminate the uses of %a.
+define i32 @test2(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: @test2
+; CHECK-NEXT: %sum = add i32 %c, %b
+; CHECK-NEXT: %tmp7 = sub i32 0, %sum
+; CHECK-NEXT: ret i32 %tmp7
+
+ %tmp3 = sub i32 %a, %b
+ %tmp5 = sub i32 %tmp3, %c
+ %tmp7 = sub i32 %tmp5, %a
+ ret i32 %tmp7
+}
diff --git a/test/Transforms/Reassociate/subtest2.ll b/test/Transforms/Reassociate/subtest2.ll
deleted file mode 100644
index 0513c5fc1b63..000000000000
--- a/test/Transforms/Reassociate/subtest2.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; With sub reassociation, constant folding can eliminate the uses of %a.
-;
-; RUN: opt < %s -reassociate -instcombine -S | grep %a | count 1
-; PR2047
-
-define i32 @test(i32 %a, i32 %b, i32 %c) nounwind {
-entry:
- %tmp3 = sub i32 %a, %b ; <i32> [#uses=1]
- %tmp5 = sub i32 %tmp3, %c ; <i32> [#uses=1]
- %tmp7 = sub i32 %tmp5, %a ; <i32> [#uses=1]
- ret i32 %tmp7
-}
-
diff --git a/test/Transforms/SCCP/atomic.ll b/test/Transforms/SCCP/atomic.ll
new file mode 100644
index 000000000000..60d4896ec2b0
--- /dev/null
+++ b/test/Transforms/SCCP/atomic.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -sccp -S | FileCheck %s
+
+define i1 @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: @test_cmpxchg
+; CHECK: cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %val = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %res = extractvalue { i32, i1 } %val, 1
+ ret i1 %res
+}
diff --git a/test/Transforms/SLPVectorizer/AArch64/lit.local.cfg b/test/Transforms/SLPVectorizer/AArch64/lit.local.cfg
new file mode 100644
index 000000000000..7184443994b6
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/AArch64/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
diff --git a/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll b/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll
new file mode 100644
index 000000000000..3d6da124fc45
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/AArch64/mismatched-intrinsics.ll
@@ -0,0 +1,18 @@
+; RUN: opt -S -slp-vectorizer %s | FileCheck %s
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-ios5.0.0"
+
+define i64 @mismatched_intrinsics(<4 x i32> %in1, <2 x i32> %in2) nounwind {
+; CHECK-LABEL: @mismatched_intrinsics
+; CHECK: call i64 @llvm.arm64.neon.saddlv.i64.v4i32
+; CHECK: call i64 @llvm.arm64.neon.saddlv.i64.v2i32
+
+ %vaddlvq_s32.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> %in1) #2
+ %vaddlv_s32.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> %in2) #2
+ %tst = icmp sgt i64 %vaddlvq_s32.i, %vaddlv_s32.i
+ %equal = sext i1 %tst to i64
+ ret i64 %equal
+}
+
+declare i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> %in1)
+declare i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> %in1)
diff --git a/test/Transforms/SLPVectorizer/ARM/lit.local.cfg b/test/Transforms/SLPVectorizer/ARM/lit.local.cfg
index 5fc35d80541d..236e1d344166 100644
--- a/test/Transforms/SLPVectorizer/ARM/lit.local.cfg
+++ b/test/Transforms/SLPVectorizer/ARM/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'ARM' in targets:
+if not 'ARM' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/SLPVectorizer/R600/lit.local.cfg b/test/Transforms/SLPVectorizer/R600/lit.local.cfg
index 9e0ab99235e0..4086e8d681c3 100644
--- a/test/Transforms/SLPVectorizer/R600/lit.local.cfg
+++ b/test/Transforms/SLPVectorizer/R600/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'R600' in targets:
+if not 'R600' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/SLPVectorizer/X86/addsub.ll b/test/Transforms/SLPVectorizer/X86/addsub.ll
new file mode 100644
index 000000000000..8303bc8181ef
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/addsub.ll
@@ -0,0 +1,181 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@b = common global [4 x i32] zeroinitializer, align 16
+@c = common global [4 x i32] zeroinitializer, align 16
+@d = common global [4 x i32] zeroinitializer, align 16
+@e = common global [4 x i32] zeroinitializer, align 16
+@a = common global [4 x i32] zeroinitializer, align 16
+@fb = common global [4 x float] zeroinitializer, align 16
+@fc = common global [4 x float] zeroinitializer, align 16
+@fa = common global [4 x float] zeroinitializer, align 16
+
+; CHECK-LABEL: @addsub
+; CHECK: %5 = add <4 x i32> %3, %4
+; CHECK: %6 = add <4 x i32> %2, %5
+; CHECK: %7 = sub <4 x i32> %2, %5
+; CHECK: %8 = shufflevector <4 x i32> %6, <4 x i32> %7, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+
+; Function Attrs: nounwind uwtable
+define void @addsub() #0 {
+entry:
+ %0 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 0), align 4
+ %1 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 0), align 4
+ %add = add nsw i32 %0, %1
+ %2 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 0), align 4
+ %3 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 0), align 4
+ %add1 = add nsw i32 %2, %3
+ %add2 = add nsw i32 %add, %add1
+ store i32 %add2, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 0), align 4
+ %4 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 1), align 4
+ %5 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 1), align 4
+ %add3 = add nsw i32 %4, %5
+ %6 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 1), align 4
+ %7 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 1), align 4
+ %add4 = add nsw i32 %6, %7
+ %sub = sub nsw i32 %add3, %add4
+ store i32 %sub, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 1), align 4
+ %8 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 2), align 4
+ %9 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 2), align 4
+ %add5 = add nsw i32 %8, %9
+ %10 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 2), align 4
+ %11 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 2), align 4
+ %add6 = add nsw i32 %10, %11
+ %add7 = add nsw i32 %add5, %add6
+ store i32 %add7, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 2), align 4
+ %12 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 3), align 4
+ %13 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 3), align 4
+ %add8 = add nsw i32 %12, %13
+ %14 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 3), align 4
+ %15 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 3), align 4
+ %add9 = add nsw i32 %14, %15
+ %sub10 = sub nsw i32 %add8, %add9
+ store i32 %sub10, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 3), align 4
+ ret void
+}
+
+; CHECK-LABEL: @subadd
+; CHECK: %5 = add <4 x i32> %3, %4
+; CHECK: %6 = sub <4 x i32> %2, %5
+; CHECK: %7 = add <4 x i32> %2, %5
+; CHECK: %8 = shufflevector <4 x i32> %6, <4 x i32> %7, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+
+; Function Attrs: nounwind uwtable
+define void @subadd() #0 {
+entry:
+ %0 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 0), align 4
+ %1 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 0), align 4
+ %add = add nsw i32 %0, %1
+ %2 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 0), align 4
+ %3 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 0), align 4
+ %add1 = add nsw i32 %2, %3
+ %sub = sub nsw i32 %add, %add1
+ store i32 %sub, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 0), align 4
+ %4 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 1), align 4
+ %5 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 1), align 4
+ %add2 = add nsw i32 %4, %5
+ %6 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 1), align 4
+ %7 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 1), align 4
+ %add3 = add nsw i32 %6, %7
+ %add4 = add nsw i32 %add2, %add3
+ store i32 %add4, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 1), align 4
+ %8 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 2), align 4
+ %9 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 2), align 4
+ %add5 = add nsw i32 %8, %9
+ %10 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 2), align 4
+ %11 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 2), align 4
+ %add6 = add nsw i32 %10, %11
+ %sub7 = sub nsw i32 %add5, %add6
+ store i32 %sub7, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 2), align 4
+ %12 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 3), align 4
+ %13 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 3), align 4
+ %add8 = add nsw i32 %12, %13
+ %14 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 3), align 4
+ %15 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 3), align 4
+ %add9 = add nsw i32 %14, %15
+ %add10 = add nsw i32 %add8, %add9
+ store i32 %add10, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 3), align 4
+ ret void
+}
+
+; CHECK-LABEL: @faddfsub
+; CHECK: %2 = fadd <4 x float> %0, %1
+; CHECK: %3 = fsub <4 x float> %0, %1
+; CHECK: %4 = shufflevector <4 x float> %2, <4 x float> %3, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; Function Attrs: nounwind uwtable
+define void @faddfsub() #0 {
+entry:
+ %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %add = fadd float %0, %1
+ store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+ %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ %sub = fsub float %2, %3
+ store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+ %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ %add1 = fadd float %4, %5
+ store float %add1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+ %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ %sub2 = fsub float %6, %7
+ store float %sub2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ ret void
+}
+
+; CHECK-LABEL: @fsubfadd
+; CHECK: %2 = fsub <4 x float> %0, %1
+; CHECK: %3 = fadd <4 x float> %0, %1
+; CHECK: %4 = shufflevector <4 x float> %2, <4 x float> %3, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; Function Attrs: nounwind uwtable
+define void @fsubfadd() #0 {
+entry:
+ %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %sub = fsub float %0, %1
+ store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+ %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ %add = fadd float %2, %3
+ store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+ %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ %sub1 = fsub float %4, %5
+ store float %sub1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+ %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ %add2 = fadd float %6, %7
+ store float %add2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ ret void
+}
+
+; CHECK-LABEL: @No_faddfsub
+; CHECK-NOT: fadd <4 x float>
+; CHECK-NOT: fsub <4 x float>
+; CHECK-NOT: shufflevector
+; Function Attrs: nounwind uwtable
+define void @No_faddfsub() #0 {
+entry:
+ %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %add = fadd float %0, %1
+ store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+ %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ %add1 = fadd float %2, %3
+ store float %add1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+ %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ %add2 = fadd float %4, %5
+ store float %add2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+ %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ %sub = fsub float %6, %7
+ store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/Transforms/SLPVectorizer/X86/align.ll b/test/Transforms/SLPVectorizer/X86/align.ll
new file mode 100644
index 000000000000..f5865733ccb5
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/align.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; Simple 3-pair chain with loads and stores
+; CHECK: test1
+define void @test1(double* %a, double* %b, double* %c) {
+entry:
+ %agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16
+; CHECK: %[[V0:[0-9]+]] = load <2 x double>* %[[V2:[0-9]+]], align 8
+ %i0 = load double* %a
+ %i1 = load double* %b
+ %mul = fmul double %i0, %i1
+ %store1 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
+ %store2 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
+ store double %mul, double* %store1
+ store double %mul5, double* %store2, align 16
+; CHECK: ret
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/call.ll b/test/Transforms/SLPVectorizer/X86/call.ll
new file mode 100644
index 000000000000..83d45c0a9d72
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/call.ll
@@ -0,0 +1,128 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-999 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+declare double @sin(double)
+declare double @cos(double)
+declare double @pow(double, double)
+declare double @exp2(double)
+declare i64 @round(i64)
+
+
+; CHECK: sin_libm
+; CHECK: call <2 x double> @llvm.sin.v2f64
+; CHECK: ret void
+define void @sin_libm(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @sin(double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @sin(double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+; CHECK: cos_libm
+; CHECK: call <2 x double> @llvm.cos.v2f64
+; CHECK: ret void
+define void @cos_libm(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @cos(double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @cos(double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+; CHECK: pow_libm
+; CHECK: call <2 x double> @llvm.pow.v2f64
+; CHECK: ret void
+define void @pow_libm(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @pow(double %mul,double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @pow(double %mul5,double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+
+; CHECK: exp2_libm
+; CHECK: call <2 x double> @llvm.exp2.v2f64
+; CHECK: ret void
+define void @exp2_libm(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @exp2(double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @exp2(double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+
+; Negative test case
+; CHECK: round_custom
+; CHECK-NOT: load <4 x i64>
+; CHECK: ret void
+define void @round_custom(i64* %a, i64* %b, i64* %c) {
+entry:
+ %i0 = load i64* %a, align 8
+ %i1 = load i64* %b, align 8
+ %mul = mul i64 %i0, %i1
+ %call = tail call i64 @round(i64 %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds i64* %a, i64 1
+ %i3 = load i64* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds i64* %b, i64 1
+ %i4 = load i64* %arrayidx4, align 8
+ %mul5 = mul i64 %i3, %i4
+ %call5 = tail call i64 @round(i64 %mul5) nounwind readnone
+ store i64 %call, i64* %c, align 8
+ %arrayidx5 = getelementptr inbounds i64* %c, i64 1
+ store i64 %call5, i64* %arrayidx5, align 8
+ ret void
+}
+
+
+; CHECK: declare <2 x double> @llvm.sin.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) #0
+; CHECK: declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>) #0
+; CHECK: declare <2 x double> @llvm.exp2.v2f64(<2 x double>) #0
+
+; CHECK: attributes #0 = { nounwind readnone }
+
diff --git a/test/Transforms/SLPVectorizer/X86/consecutive-access.ll b/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
new file mode 100644
index 000000000000..f4f112fe32c6
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
@@ -0,0 +1,175 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+@A = common global [2000 x double] zeroinitializer, align 16
+@B = common global [2000 x double] zeroinitializer, align 16
+@C = common global [2000 x float] zeroinitializer, align 16
+@D = common global [2000 x float] zeroinitializer, align 16
+
+; Currently SCEV isn't smart enough to figure out that accesses
+; A[3*i], A[3*i+1] and A[3*i+2] are consecutive, but in future
+; that would hopefully be fixed. For now, check that this isn't
+; vectorized.
+; CHECK-LABEL: foo_3double
+; CHECK-NOT: x double>
+; Function Attrs: nounwind ssp uwtable
+define void @foo_3double(i32 %u) #0 {
+entry:
+ %u.addr = alloca i32, align 4
+ store i32 %u, i32* %u.addr, align 4
+ %mul = mul nsw i32 %u, 3
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
+ %0 = load double* %arrayidx, align 8
+ %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
+ %1 = load double* %arrayidx4, align 8
+ %add5 = fadd double %0, %1
+ store double %add5, double* %arrayidx, align 8
+ %add11 = add nsw i32 %mul, 1
+ %idxprom12 = sext i32 %add11 to i64
+ %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
+ %2 = load double* %arrayidx13, align 8
+ %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
+ %3 = load double* %arrayidx17, align 8
+ %add18 = fadd double %2, %3
+ store double %add18, double* %arrayidx13, align 8
+ %add24 = add nsw i32 %mul, 2
+ %idxprom25 = sext i32 %add24 to i64
+ %arrayidx26 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom25
+ %4 = load double* %arrayidx26, align 8
+ %arrayidx30 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom25
+ %5 = load double* %arrayidx30, align 8
+ %add31 = fadd double %4, %5
+ store double %add31, double* %arrayidx26, align 8
+ ret void
+}
+
+; SCEV should be able to tell that accesses A[C1 + C2*i], A[C1 + C2*i], ...
+; A[C1 + C2*i] are consecutive, if C2 is a power of 2, and C2 > C1 > 0.
+; Thus, the following code should be vectorized.
+; CHECK-LABEL: foo_2double
+; CHECK: x double>
+; Function Attrs: nounwind ssp uwtable
+define void @foo_2double(i32 %u) #0 {
+entry:
+ %u.addr = alloca i32, align 4
+ store i32 %u, i32* %u.addr, align 4
+ %mul = mul nsw i32 %u, 2
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
+ %0 = load double* %arrayidx, align 8
+ %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
+ %1 = load double* %arrayidx4, align 8
+ %add5 = fadd double %0, %1
+ store double %add5, double* %arrayidx, align 8
+ %add11 = add nsw i32 %mul, 1
+ %idxprom12 = sext i32 %add11 to i64
+ %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
+ %2 = load double* %arrayidx13, align 8
+ %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
+ %3 = load double* %arrayidx17, align 8
+ %add18 = fadd double %2, %3
+ store double %add18, double* %arrayidx13, align 8
+ ret void
+}
+
+; Similar to the previous test, but with different datatype.
+; CHECK-LABEL: foo_4float
+; CHECK: x float>
+; Function Attrs: nounwind ssp uwtable
+define void @foo_4float(i32 %u) #0 {
+entry:
+ %u.addr = alloca i32, align 4
+ store i32 %u, i32* %u.addr, align 4
+ %mul = mul nsw i32 %u, 4
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom
+ %0 = load float* %arrayidx, align 4
+ %arrayidx4 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom
+ %1 = load float* %arrayidx4, align 4
+ %add5 = fadd float %0, %1
+ store float %add5, float* %arrayidx, align 4
+ %add11 = add nsw i32 %mul, 1
+ %idxprom12 = sext i32 %add11 to i64
+ %arrayidx13 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom12
+ %2 = load float* %arrayidx13, align 4
+ %arrayidx17 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom12
+ %3 = load float* %arrayidx17, align 4
+ %add18 = fadd float %2, %3
+ store float %add18, float* %arrayidx13, align 4
+ %add24 = add nsw i32 %mul, 2
+ %idxprom25 = sext i32 %add24 to i64
+ %arrayidx26 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom25
+ %4 = load float* %arrayidx26, align 4
+ %arrayidx30 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom25
+ %5 = load float* %arrayidx30, align 4
+ %add31 = fadd float %4, %5
+ store float %add31, float* %arrayidx26, align 4
+ %add37 = add nsw i32 %mul, 3
+ %idxprom38 = sext i32 %add37 to i64
+ %arrayidx39 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom38
+ %6 = load float* %arrayidx39, align 4
+ %arrayidx43 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom38
+ %7 = load float* %arrayidx43, align 4
+ %add44 = fadd float %6, %7
+ store float %add44, float* %arrayidx39, align 4
+ ret void
+}
+
+; Similar to the previous tests, but now we are dealing with AddRec SCEV.
+; CHECK-LABEL: foo_loop
+; CHECK: x double>
+; Function Attrs: nounwind ssp uwtable
+define i32 @foo_loop(double* %A, i32 %n) #0 {
+entry:
+ %A.addr = alloca double*, align 8
+ %n.addr = alloca i32, align 4
+ %sum = alloca double, align 8
+ %i = alloca i32, align 4
+ store double* %A, double** %A.addr, align 8
+ store i32 %n, i32* %n.addr, align 4
+ store double 0.000000e+00, double* %sum, align 8
+ store i32 0, i32* %i, align 4
+ %cmp1 = icmp slt i32 0, %n
+ br i1 %cmp1, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.lr.ph, %for.body
+ %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
+ %mul = mul nsw i32 %0, 2
+ %idxprom = sext i32 %mul to i64
+ %arrayidx = getelementptr inbounds double* %A, i64 %idxprom
+ %2 = load double* %arrayidx, align 8
+ %mul1 = fmul double 7.000000e+00, %2
+ %add = add nsw i32 %mul, 1
+ %idxprom3 = sext i32 %add to i64
+ %arrayidx4 = getelementptr inbounds double* %A, i64 %idxprom3
+ %3 = load double* %arrayidx4, align 8
+ %mul5 = fmul double 7.000000e+00, %3
+ %add6 = fadd double %mul1, %mul5
+ %add7 = fadd double %1, %add6
+ store double %add7, double* %sum, align 8
+ %inc = add nsw i32 %0, 1
+ store i32 %inc, i32* %i, align 4
+ %cmp = icmp slt i32 %inc, %n
+ br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
+
+for.cond.for.end_crit_edge: ; preds = %for.body
+ %split = phi double [ %add7, %for.body ]
+ br label %for.end
+
+for.end: ; preds = %for.cond.for.end_crit_edge, %entry
+ %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ]
+ %conv = fptosi double %.lcssa to i32
+ ret i32 %conv
+}
+
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = metadata !{metadata !"clang version 3.5.0 "}
diff --git a/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll b/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
new file mode 100644
index 000000000000..ed225743a6e2
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; We will keep trying to vectorize the basic block even we already find vectorized store.
+; CHECK: test1
+; CHECK: store <2 x double>
+; CHECK: ret
+define void @test1(double* %a, double* %b, double* %c, double* %d) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ store double %mul, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %mul5, double* %arrayidx5, align 8
+ %0 = bitcast double* %a to <4 x i32>*
+ %1 = load <4 x i32>* %0, align 8
+ %2 = bitcast double* %b to <4 x i32>*
+ %3 = load <4 x i32>* %2, align 8
+ %4 = mul <4 x i32> %1, %3
+ %5 = bitcast double* %d to <4 x i32>*
+ store <4 x i32> %4, <4 x i32>* %5, align 8
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll b/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
new file mode 100644
index 000000000000..c7ec98adf135
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
@@ -0,0 +1,65 @@
+; RUN: opt -slp-vectorizer -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -S < %s | FileCheck %s
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+
+; This test used to crash because we were following phi chains incorrectly.
+; We used indices to get the incoming value of two phi nodes rather than
+; incoming block lookup.
+; This can give wrong results when the ordering of incoming
+; edges in the two phi nodes don't match.
+;CHECK-LABEL: bar
+
+%0 = type { %1, %2 }
+%1 = type { double, double }
+%2 = type { double, double }
+
+
+;define fastcc void @bar() {
+define void @bar() {
+ %1 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
+ %2 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
+ %3 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
+ %4 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
+ %5 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
+ %6 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
+ br label %7
+
+; <label>:7 ; preds = %18, %17, %17, %0
+ %8 = phi double [ 2.800000e+01, %0 ], [ %11, %18 ], [ %11, %17 ], [ %11, %17 ]
+ %9 = phi double [ 1.800000e+01, %0 ], [ %10, %18 ], [ %10, %17 ], [ %10, %17 ]
+ store double %9, double* %1, align 8
+ store double %8, double* %2, align 8
+ %10 = load double* %3, align 8
+ %11 = load double* %4, align 8
+ br i1 undef, label %12, label %13
+
+; <label>:12 ; preds = %7
+ ret void
+
+; <label>:13 ; preds = %7
+ store double %10, double* %5, align 8
+ store double %11, double* %6, align 8
+ br i1 undef, label %14, label %15
+
+; <label>:14 ; preds = %13
+ br label %15
+
+; <label>:15 ; preds = %14, %13
+ br i1 undef, label %16, label %17
+
+; <label>:16 ; preds = %15
+ unreachable
+
+; <label>:17 ; preds = %15
+ switch i32 undef, label %18 [
+ i32 32, label %7
+ i32 103, label %7
+ ]
+
+; <label>:18 ; preds = %17
+ br i1 undef, label %7, label %19
+
+; <label>:19 ; preds = %18
+ unreachable
+}
diff --git a/test/Transforms/SLPVectorizer/X86/cse.ll b/test/Transforms/SLPVectorizer/X86/cse.ll
index bbfd6f28ea97..d2ad7eb1a82a 100644
--- a/test/Transforms/SLPVectorizer/X86/cse.ll
+++ b/test/Transforms/SLPVectorizer/X86/cse.ll
@@ -217,3 +217,33 @@ return: ; preds = %entry, %if.end
ret i32 0
}
+%class.B.53.55 = type { %class.A.52.54, double }
+%class.A.52.54 = type { double, double, double }
+
+@a = external global double, align 8
+
+define void @PR19646(%class.B.53.55* %this) {
+entry:
+ br i1 undef, label %if.end13, label %if.end13
+
+sw.epilog7: ; No predecessors!
+ %.in = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 1
+ %0 = load double* %.in, align 8
+ %add = fadd double undef, 0.000000e+00
+ %add6 = fadd double %add, %0
+ %1 = load double* @a, align 8
+ %add8 = fadd double %1, 0.000000e+00
+ %_dy = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 2
+ %2 = load double* %_dy, align 8
+ %add10 = fadd double %add8, %2
+ br i1 undef, label %if.then12, label %if.end13
+
+if.then12: ; preds = %sw.epilog7
+ %3 = load double* undef, align 8
+ br label %if.end13
+
+if.end13: ; preds = %if.then12, %sw.epilog7, %entry
+ %x.1 = phi double [ 0.000000e+00, %if.then12 ], [ %add6, %sw.epilog7 ], [ undef, %entry ], [ undef, %entry ]
+ %b.0 = phi double [ %3, %if.then12 ], [ %add10, %sw.epilog7 ], [ undef, %entry], [ undef, %entry ]
+ unreachable
+}
diff --git a/test/Transforms/SLPVectorizer/X86/extractcost.ll b/test/Transforms/SLPVectorizer/X86/extractcost.ll
new file mode 100644
index 000000000000..01baf66e2f95
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/extractcost.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK-LABEL: @foo(
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @foo(i32* nocapture %A, i32 %n, i32 %m) {
+entry:
+ %mul = mul nsw i32 %n, 5
+ %add = add nsw i32 %mul, 9
+ store i32 %add, i32* %A, align 4
+ %mul1 = mul nsw i32 %n, 9
+ %add2 = add nsw i32 %mul1, 9
+ %arrayidx3 = getelementptr inbounds i32* %A, i64 1
+ store i32 %add2, i32* %arrayidx3, align 4
+ %mul4 = shl i32 %n, 3
+ %add5 = add nsw i32 %mul4, 9
+ %arrayidx6 = getelementptr inbounds i32* %A, i64 2
+ store i32 %add5, i32* %arrayidx6, align 4
+ %mul7 = mul nsw i32 %n, 10
+ %add8 = add nsw i32 %mul7, 9
+ %arrayidx9 = getelementptr inbounds i32* %A, i64 3
+ store i32 %add8, i32* %arrayidx9, align 4
+ %externaluse1 = add nsw i32 %add, %m
+ %externaluse2 = mul nsw i32 %add, %m ; we should add the extract cost only once and the store will be vectorized
+ %add10 = add nsw i32 %externaluse1, %externaluse2
+ ret i32 %add10
+}
diff --git a/test/Transforms/SLPVectorizer/X86/gep.ll b/test/Transforms/SLPVectorizer/X86/gep.ll
new file mode 100644
index 000000000000..9e105ec98489
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/gep.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S |FileCheck %s
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+; Test if SLP can handle GEP expressions.
+; The test perform the following action:
+; x->first = y->first + 16
+; x->second = y->second + 16
+
+; CHECK-LABEL: foo1
+; CHECK: <2 x i32*>
+define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
+ %1 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 0
+ %2 = load i32** %1, align 8
+ %3 = getelementptr inbounds i32* %2, i64 16
+ %4 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 0
+ store i32* %3, i32** %4, align 8
+ %5 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 1
+ %6 = load i32** %5, align 8
+ %7 = getelementptr inbounds i32* %6, i64 16
+ %8 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 1
+ store i32* %7, i32** %8, align 8
+ ret void
+}
+
+; Test that we don't vectorize GEP expressions if indexes are not constants.
+; We can't produce an efficient code in that case.
+; CHECK-LABEL: foo2
+; CHECK-NOT: <2 x i32*>
+define void @foo2 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y, i32 %i) {
+ %1 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 0
+ %2 = load i32** %1, align 8
+ %3 = getelementptr inbounds i32* %2, i32 %i
+ %4 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 0
+ store i32* %3, i32** %4, align 8
+ %5 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 1
+ %6 = load i32** %5, align 8
+ %7 = getelementptr inbounds i32* %6, i32 %i
+ %8 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 1
+ store i32* %7, i32** %8, align 8
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll b/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
index 43f7aed9f519..9eda29f101ac 100644
--- a/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
+++ b/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
@@ -1,4 +1,5 @@
; RUN: opt -S -slp-vectorizer -slp-threshold=-10000 < %s | FileCheck %s
+; RUN: opt -S -slp-vectorizer -slp-threshold=0 < %s | FileCheck %s -check-prefix=ZEROTHRESH
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -194,4 +195,88 @@ define <4 x float> @simple_select_partial_vector(<4 x float> %a, <4 x float> %b,
ret <4 x float> %rb
}
+; Make sure that vectorization happens even if insertelements operations
+; must be rescheduled. The case here is from compiling Julia.
+define <4 x float> @reschedule_extract(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: @reschedule_extract(
+; CHECK: %1 = fadd <4 x float> %a, %b
+ %a0 = extractelement <4 x float> %a, i32 0
+ %b0 = extractelement <4 x float> %b, i32 0
+ %c0 = fadd float %a0, %b0
+ %v0 = insertelement <4 x float> undef, float %c0, i32 0
+ %a1 = extractelement <4 x float> %a, i32 1
+ %b1 = extractelement <4 x float> %b, i32 1
+ %c1 = fadd float %a1, %b1
+ %v1 = insertelement <4 x float> %v0, float %c1, i32 1
+ %a2 = extractelement <4 x float> %a, i32 2
+ %b2 = extractelement <4 x float> %b, i32 2
+ %c2 = fadd float %a2, %b2
+ %v2 = insertelement <4 x float> %v1, float %c2, i32 2
+ %a3 = extractelement <4 x float> %a, i32 3
+ %b3 = extractelement <4 x float> %b, i32 3
+ %c3 = fadd float %a3, %b3
+ %v3 = insertelement <4 x float> %v2, float %c3, i32 3
+ ret <4 x float> %v3
+}
+
+; Check that cost model for vectorization takes credit for
+; instructions that are erased.
+define <4 x float> @take_credit(<4 x float> %a, <4 x float> %b) {
+; ZEROTHRESH-LABEL: @take_credit(
+; ZEROTHRESH: %1 = fadd <4 x float> %a, %b
+ %a0 = extractelement <4 x float> %a, i32 0
+ %b0 = extractelement <4 x float> %b, i32 0
+ %c0 = fadd float %a0, %b0
+ %a1 = extractelement <4 x float> %a, i32 1
+ %b1 = extractelement <4 x float> %b, i32 1
+ %c1 = fadd float %a1, %b1
+ %a2 = extractelement <4 x float> %a, i32 2
+ %b2 = extractelement <4 x float> %b, i32 2
+ %c2 = fadd float %a2, %b2
+ %a3 = extractelement <4 x float> %a, i32 3
+ %b3 = extractelement <4 x float> %b, i32 3
+ %c3 = fadd float %a3, %b3
+ %v0 = insertelement <4 x float> undef, float %c0, i32 0
+ %v1 = insertelement <4 x float> %v0, float %c1, i32 1
+ %v2 = insertelement <4 x float> %v1, float %c2, i32 2
+ %v3 = insertelement <4 x float> %v2, float %c3, i32 3
+ ret <4 x float> %v3
+}
+
+; Make sure we handle multiple trees that feed one build vector correctly.
+define <4 x double> @multi_tree(double %w, double %x, double %y, double %z) {
+entry:
+ %t0 = fadd double %w , 0.000000e+00
+ %t1 = fadd double %x , 1.000000e+00
+ %t2 = fadd double %y , 2.000000e+00
+ %t3 = fadd double %z , 3.000000e+00
+ %t4 = fmul double %t0, 1.000000e+00
+ %i1 = insertelement <4 x double> undef, double %t4, i32 3
+ %t5 = fmul double %t1, 1.000000e+00
+ %i2 = insertelement <4 x double> %i1, double %t5, i32 2
+ %t6 = fmul double %t2, 1.000000e+00
+ %i3 = insertelement <4 x double> %i2, double %t6, i32 1
+ %t7 = fmul double %t3, 1.000000e+00
+ %i4 = insertelement <4 x double> %i3, double %t7, i32 0
+ ret <4 x double> %i4
+}
+; CHECK-LABEL: @multi_tree
+; CHECK-DAG: %[[V0:.+]] = insertelement <2 x double> undef, double %w, i32 0
+; CHECK-DAG: %[[V1:.+]] = insertelement <2 x double> %[[V0]], double %x, i32 1
+; CHECK-DAG: %[[V2:.+]] = fadd <2 x double> %[[V1]], <double 0.000000e+00, double 1.000000e+00>
+; CHECK-DAG: %[[V3:.+]] = insertelement <2 x double> undef, double %y, i32 0
+; CHECK-DAG: %[[V4:.+]] = insertelement <2 x double> %[[V3]], double %z, i32 1
+; CHECK-DAG: %[[V5:.+]] = fadd <2 x double> %[[V4]], <double 2.000000e+00, double 3.000000e+00>
+; CHECK-DAG: %[[V6:.+]] = fmul <2 x double> <double 1.000000e+00, double 1.000000e+00>, %[[V2]]
+; CHECK-DAG: %[[V7:.+]] = extractelement <2 x double> %[[V6]], i32 0
+; CHECK-DAG: %[[I1:.+]] = insertelement <4 x double> undef, double %[[V7]], i32 3
+; CHECK-DAG: %[[V8:.+]] = extractelement <2 x double> %[[V6]], i32 1
+; CHECK-DAG: %[[I2:.+]] = insertelement <4 x double> %[[I1]], double %[[V8]], i32 2
+; CHECK-DAG: %[[V9:.+]] = fmul <2 x double> <double 1.000000e+00, double 1.000000e+00>, %[[V5]]
+; CHECK-DAG: %[[V10:.+]] = extractelement <2 x double> %[[V9]], i32 0
+; CHECK-DAG: %[[I3:.+]] = insertelement <4 x double> %i2, double %[[V10]], i32 1
+; CHECK-DAG: %[[V11:.+]] = extractelement <2 x double> %[[V9]], i32 1
+; CHECK-DAG: %[[I4:.+]] = insertelement <4 x double> %i3, double %[[V11]], i32 0
+; CHECK: ret <4 x double> %[[I4]]
+
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/Transforms/SLPVectorizer/X86/intrinsic.ll b/test/Transforms/SLPVectorizer/X86/intrinsic.ll
new file mode 100644
index 000000000000..937252f4146b
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/intrinsic.ll
@@ -0,0 +1,386 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-999 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+declare double @llvm.fabs.f64(double) nounwind readnone
+
+;CHECK-LABEL: @vec_fabs_f64(
+;CHECK: load <2 x double>
+;CHECK: load <2 x double>
+;CHECK: call <2 x double> @llvm.fabs.v2f64
+;CHECK: store <2 x double>
+;CHECK: ret
+define void @vec_fabs_f64(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %call = tail call double @llvm.fabs.f64(double %mul) nounwind readnone
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ %call5 = tail call double @llvm.fabs.f64(double %mul5) nounwind readnone
+ store double %call, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %call5, double* %arrayidx5, align 8
+ ret void
+}
+
+declare float @llvm.copysign.f32(float, float) nounwind readnone
+
+;CHECK-LABEL: @vec_copysign_f32(
+;CHECK: load <4 x float>
+;CHECK: load <4 x float>
+;CHECK: call <4 x float> @llvm.copysign.v4f32
+;CHECK: store <4 x float>
+;CHECK: ret
+define void @vec_copysign_f32(float* %a, float* %b, float* noalias %c) {
+entry:
+ %0 = load float* %a, align 4
+ %1 = load float* %b, align 4
+ %call0 = tail call float @llvm.copysign.f32(float %0, float %1) nounwind readnone
+ store float %call0, float* %c, align 4
+
+ %ix2 = getelementptr inbounds float* %a, i64 1
+ %2 = load float* %ix2, align 4
+ %ix3 = getelementptr inbounds float* %b, i64 1
+ %3 = load float* %ix3, align 4
+ %call1 = tail call float @llvm.copysign.f32(float %2, float %3) nounwind readnone
+ %c1 = getelementptr inbounds float* %c, i64 1
+ store float %call1, float* %c1, align 4
+
+ %ix4 = getelementptr inbounds float* %a, i64 2
+ %4 = load float* %ix4, align 4
+ %ix5 = getelementptr inbounds float* %b, i64 2
+ %5 = load float* %ix5, align 4
+ %call2 = tail call float @llvm.copysign.f32(float %4, float %5) nounwind readnone
+ %c2 = getelementptr inbounds float* %c, i64 2
+ store float %call2, float* %c2, align 4
+
+ %ix6 = getelementptr inbounds float* %a, i64 3
+ %6 = load float* %ix6, align 4
+ %ix7 = getelementptr inbounds float* %b, i64 3
+ %7 = load float* %ix7, align 4
+ %call3 = tail call float @llvm.copysign.f32(float %6, float %7) nounwind readnone
+ %c3 = getelementptr inbounds float* %c, i64 3
+ store float %call3, float* %c3, align 4
+
+ ret void
+}
+
+declare i32 @llvm.bswap.i32(i32) nounwind readnone
+
+define void @vec_bswap_i32(i32* %a, i32* %b, i32* %c) {
+entry:
+ %i0 = load i32* %a, align 4
+ %i1 = load i32* %b, align 4
+ %add1 = add i32 %i0, %i1
+ %call1 = tail call i32 @llvm.bswap.i32(i32 %add1) nounwind readnone
+
+ %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %i2 = load i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %i3 = load i32* %arrayidx3, align 4
+ %add2 = add i32 %i2, %i3
+ %call2 = tail call i32 @llvm.bswap.i32(i32 %add2) nounwind readnone
+
+ %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %i4 = load i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %i5 = load i32* %arrayidx5, align 4
+ %add3 = add i32 %i4, %i5
+ %call3 = tail call i32 @llvm.bswap.i32(i32 %add3) nounwind readnone
+
+ %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %i6 = load i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %i7 = load i32* %arrayidx7, align 4
+ %add4 = add i32 %i6, %i7
+ %call4 = tail call i32 @llvm.bswap.i32(i32 %add4) nounwind readnone
+
+ store i32 %call1, i32* %c, align 4
+ %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ store i32 %call2, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ store i32 %call3, i32* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ store i32 %call4, i32* %arrayidx10, align 4
+ ret void
+
+; CHECK-LABEL: @vec_bswap_i32(
+; CHECK: load <4 x i32>
+; CHECK: load <4 x i32>
+; CHECK: call <4 x i32> @llvm.bswap.v4i32
+; CHECK: store <4 x i32>
+; CHECK: ret
+}
+
+declare i32 @llvm.ctlz.i32(i32,i1) nounwind readnone
+
+define void @vec_ctlz_i32(i32* %a, i32* %b, i32* %c, i1) {
+entry:
+ %i0 = load i32* %a, align 4
+ %i1 = load i32* %b, align 4
+ %add1 = add i32 %i0, %i1
+ %call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
+
+ %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %i2 = load i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %i3 = load i32* %arrayidx3, align 4
+ %add2 = add i32 %i2, %i3
+ %call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 true) nounwind readnone
+
+ %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %i4 = load i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %i5 = load i32* %arrayidx5, align 4
+ %add3 = add i32 %i4, %i5
+ %call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
+
+ %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %i6 = load i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %i7 = load i32* %arrayidx7, align 4
+ %add4 = add i32 %i6, %i7
+ %call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 true) nounwind readnone
+
+ store i32 %call1, i32* %c, align 4
+ %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ store i32 %call2, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ store i32 %call3, i32* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ store i32 %call4, i32* %arrayidx10, align 4
+ ret void
+
+; CHECK-LABEL: @vec_ctlz_i32(
+; CHECK: load <4 x i32>
+; CHECK: load <4 x i32>
+; CHECK: call <4 x i32> @llvm.ctlz.v4i32
+; CHECK: store <4 x i32>
+; CHECK: ret
+}
+
+define void @vec_ctlz_i32_neg(i32* %a, i32* %b, i32* %c, i1) {
+entry:
+ %i0 = load i32* %a, align 4
+ %i1 = load i32* %b, align 4
+ %add1 = add i32 %i0, %i1
+ %call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
+
+ %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %i2 = load i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %i3 = load i32* %arrayidx3, align 4
+ %add2 = add i32 %i2, %i3
+ %call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 false) nounwind readnone
+
+ %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %i4 = load i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %i5 = load i32* %arrayidx5, align 4
+ %add3 = add i32 %i4, %i5
+ %call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
+
+ %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %i6 = load i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %i7 = load i32* %arrayidx7, align 4
+ %add4 = add i32 %i6, %i7
+ %call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 false) nounwind readnone
+
+ store i32 %call1, i32* %c, align 4
+ %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ store i32 %call2, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ store i32 %call3, i32* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ store i32 %call4, i32* %arrayidx10, align 4
+ ret void
+
+; CHECK-LABEL: @vec_ctlz_i32_neg(
+; CHECK-NOT: call <4 x i32> @llvm.ctlz.v4i32
+
+}
+
+
+declare i32 @llvm.cttz.i32(i32,i1) nounwind readnone
+
+define void @vec_cttz_i32(i32* %a, i32* %b, i32* %c, i1) {
+entry:
+ %i0 = load i32* %a, align 4
+ %i1 = load i32* %b, align 4
+ %add1 = add i32 %i0, %i1
+ %call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
+
+ %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %i2 = load i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %i3 = load i32* %arrayidx3, align 4
+ %add2 = add i32 %i2, %i3
+ %call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 true) nounwind readnone
+
+ %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %i4 = load i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %i5 = load i32* %arrayidx5, align 4
+ %add3 = add i32 %i4, %i5
+ %call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
+
+ %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %i6 = load i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %i7 = load i32* %arrayidx7, align 4
+ %add4 = add i32 %i6, %i7
+ %call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 true) nounwind readnone
+
+ store i32 %call1, i32* %c, align 4
+ %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ store i32 %call2, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ store i32 %call3, i32* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ store i32 %call4, i32* %arrayidx10, align 4
+ ret void
+
+; CHECK-LABEL: @vec_cttz_i32(
+; CHECK: load <4 x i32>
+; CHECK: load <4 x i32>
+; CHECK: call <4 x i32> @llvm.cttz.v4i32
+; CHECK: store <4 x i32>
+; CHECK: ret
+}
+
+define void @vec_cttz_i32_neg(i32* %a, i32* %b, i32* %c, i1) {
+entry:
+ %i0 = load i32* %a, align 4
+ %i1 = load i32* %b, align 4
+ %add1 = add i32 %i0, %i1
+ %call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
+
+ %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %i2 = load i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %i3 = load i32* %arrayidx3, align 4
+ %add2 = add i32 %i2, %i3
+ %call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 false) nounwind readnone
+
+ %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %i4 = load i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %i5 = load i32* %arrayidx5, align 4
+ %add3 = add i32 %i4, %i5
+ %call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
+
+ %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %i6 = load i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %i7 = load i32* %arrayidx7, align 4
+ %add4 = add i32 %i6, %i7
+ %call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 false) nounwind readnone
+
+ store i32 %call1, i32* %c, align 4
+ %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ store i32 %call2, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ store i32 %call3, i32* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ store i32 %call4, i32* %arrayidx10, align 4
+ ret void
+
+; CHECK-LABEL: @vec_cttz_i32_neg(
+; CHECK-NOT: call <4 x i32> @llvm.cttz.v4i32
+}
+
+
+declare float @llvm.powi.f32(float, i32)
+define void @vec_powi_f32(float* %a, float* %b, float* %c, i32 %P) {
+entry:
+ %i0 = load float* %a, align 4
+ %i1 = load float* %b, align 4
+ %add1 = fadd float %i0, %i1
+ %call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
+
+ %arrayidx2 = getelementptr inbounds float* %a, i32 1
+ %i2 = load float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float* %b, i32 1
+ %i3 = load float* %arrayidx3, align 4
+ %add2 = fadd float %i2, %i3
+ %call2 = tail call float @llvm.powi.f32(float %add2,i32 %P) nounwind readnone
+
+ %arrayidx4 = getelementptr inbounds float* %a, i32 2
+ %i4 = load float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float* %b, i32 2
+ %i5 = load float* %arrayidx5, align 4
+ %add3 = fadd float %i4, %i5
+ %call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
+
+ %arrayidx6 = getelementptr inbounds float* %a, i32 3
+ %i6 = load float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float* %b, i32 3
+ %i7 = load float* %arrayidx7, align 4
+ %add4 = fadd float %i6, %i7
+ %call4 = tail call float @llvm.powi.f32(float %add4,i32 %P) nounwind readnone
+
+ store float %call1, float* %c, align 4
+ %arrayidx8 = getelementptr inbounds float* %c, i32 1
+ store float %call2, float* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds float* %c, i32 2
+ store float %call3, float* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds float* %c, i32 3
+ store float %call4, float* %arrayidx10, align 4
+ ret void
+
+; CHECK-LABEL: @vec_powi_f32(
+; CHECK: load <4 x float>
+; CHECK: load <4 x float>
+; CHECK: call <4 x float> @llvm.powi.v4f32
+; CHECK: store <4 x float>
+; CHECK: ret
+}
+
+
+define void @vec_powi_f32_neg(float* %a, float* %b, float* %c, i32 %P, i32 %Q) {
+entry:
+ %i0 = load float* %a, align 4
+ %i1 = load float* %b, align 4
+ %add1 = fadd float %i0, %i1
+ %call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
+
+ %arrayidx2 = getelementptr inbounds float* %a, i32 1
+ %i2 = load float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float* %b, i32 1
+ %i3 = load float* %arrayidx3, align 4
+ %add2 = fadd float %i2, %i3
+ %call2 = tail call float @llvm.powi.f32(float %add2,i32 %Q) nounwind readnone
+
+ %arrayidx4 = getelementptr inbounds float* %a, i32 2
+ %i4 = load float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float* %b, i32 2
+ %i5 = load float* %arrayidx5, align 4
+ %add3 = fadd float %i4, %i5
+ %call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
+
+ %arrayidx6 = getelementptr inbounds float* %a, i32 3
+ %i6 = load float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float* %b, i32 3
+ %i7 = load float* %arrayidx7, align 4
+ %add4 = fadd float %i6, %i7
+ %call4 = tail call float @llvm.powi.f32(float %add4,i32 %Q) nounwind readnone
+
+ store float %call1, float* %c, align 4
+ %arrayidx8 = getelementptr inbounds float* %c, i32 1
+ store float %call2, float* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds float* %c, i32 2
+ store float %call3, float* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds float* %c, i32 3
+ store float %call4, float* %arrayidx10, align 4
+ ret void
+
+; CHECK-LABEL: @vec_powi_f32_neg(
+; CHECK-NOT: call <4 x float> @llvm.powi.v4f32
+}
diff --git a/test/Transforms/SLPVectorizer/X86/lit.local.cfg b/test/Transforms/SLPVectorizer/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Transforms/SLPVectorizer/X86/lit.local.cfg
+++ b/test/Transforms/SLPVectorizer/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/SLPVectorizer/X86/metadata.ll b/test/Transforms/SLPVectorizer/X86/metadata.ll
new file mode 100644
index 000000000000..5bd2fa4ea684
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/metadata.ll
@@ -0,0 +1,61 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK-LABEL: test1
+;CHECK: load <2 x double>{{.*}}!tbaa ![[TBAA:[0-9]+]]
+;CHECK: load <2 x double>{{.*}}!tbaa ![[TBAA]]
+;CHECK: fmul <2 x double>{{.*}}!fpmath ![[FP1:[0-9]+]]
+;CHECK: store <2 x double>{{.*}}!tbaa ![[TBAA]]
+;CHECK: ret void
+
+define void @test1(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8, !tbaa !4
+ %i1 = load double* %b, align 8, !tbaa !4
+ %mul = fmul double %i0, %i1, !fpmath !0
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8, !tbaa !4
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8, !tbaa !4
+ %mul5 = fmul double %i3, %i4, !fpmath !0
+ store double %mul, double* %c, align 8, !tbaa !4
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %mul5, double* %arrayidx5, align 8, !tbaa !4
+ ret void
+}
+
+;CHECK-LABEL: test2
+;CHECK: load <2 x double>{{.*}}!tbaa ![[TBAA]]
+;CHECK: load <2 x double>{{.*}}!tbaa ![[TBAA]]
+;CHECK: fmul <2 x double>{{.*}}!fpmath ![[FP2:[0-9]+]]
+;CHECK: store <2 x double>{{.*}}!tbaa ![[TBAA]]
+;CHECK: ret void
+
+define void @test2(double* %a, double* %b, i8* %e) {
+entry:
+ %i0 = load double* %a, align 8, !tbaa !4
+ %i1 = load double* %b, align 8, !tbaa !4
+ %mul = fmul double %i0, %i1, !fpmath !1
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8, !tbaa !4
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8, !tbaa !4
+ %mul5 = fmul double %i3, %i4, !fpmath !1
+ %c = bitcast i8* %e to double*
+ store double %mul, double* %c, align 8, !tbaa !4
+ %carrayidx5 = getelementptr inbounds i8* %e, i64 8
+ %arrayidx5 = bitcast i8* %carrayidx5 to double*
+ store double %mul5, double* %arrayidx5, align 8, !tbaa !4
+ ret void
+}
+
+;CHECK-DAG: ![[TBAA]] = metadata !{metadata [[TYPEC:!.*]], metadata [[TYPEC]], i64 0}
+;CHECK-DAG: ![[FP1]] = metadata !{float 5.000000e+00}
+;CHECK-DAG: ![[FP2]] = metadata !{float 2.500000e+00}
+!0 = metadata !{ float 5.0 }
+!1 = metadata !{ float 2.5 }
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"omnipotent char", metadata !2}
+!4 = metadata !{metadata !"double", metadata !3}
diff --git a/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll b/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll
new file mode 100644
index 000000000000..b250735874c5
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/non-vectorizable-intrinsic.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -slp-vectorizer -o - -S -slp-threshold=-1000
+
+target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx--nvidiacl"
+
+; CTLZ cannot be vectorized currently because the second argument is a scalar
+; for both the scalar and vector forms of the intrinsic. In the future it
+; should be possible to vectorize such functions.
+; Test causes an assert if LLVM tries to vectorize CTLZ.
+
+define <2 x i8> @cltz_test(<2 x i8> %x) #0 {
+entry:
+ %0 = extractelement <2 x i8> %x, i32 0
+ %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+ %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+ %1 = extractelement <2 x i8> %x, i32 1
+ %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+ %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+ ret <2 x i8> %vecinit2
+}
+
+define <2 x i8> @cltz_test2(<2 x i8> %x) #1 {
+entry:
+ %0 = extractelement <2 x i8> %x, i32 0
+ %1 = extractelement <2 x i8> %x, i32 1
+ %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+ %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+ %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+ %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+ ret <2 x i8> %vecinit2
+}
+
+declare i8 @llvm.ctlz.i8(i8, i1) #3
+
+attributes #0 = { alwaysinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
diff --git a/test/Transforms/SLPVectorizer/X86/phi.ll b/test/Transforms/SLPVectorizer/X86/phi.ll
index 964e0e4efee7..0c53b60f3df1 100644
--- a/test/Transforms/SLPVectorizer/X86/phi.ll
+++ b/test/Transforms/SLPVectorizer/X86/phi.ll
@@ -221,7 +221,7 @@ entry:
; CHECK: load x86_fp80*
; CHECK: load x86_fp80*
; CHECK-NOT: insertelement <2 x x86_fp80>
-; CHECK_NOT: insertelement <2 x x86_fp80>
+; CHECK-NOT: insertelement <2 x x86_fp80>
br i1 undef, label %then, label %end
then:
diff --git a/test/Transforms/SLPVectorizer/X86/pr19657.ll b/test/Transforms/SLPVectorizer/X86/pr19657.ll
new file mode 100644
index 000000000000..93523089c28b
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/pr19657.ll
@@ -0,0 +1,73 @@
+; RUN: opt < %s -O1 -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+;CHECK: load <2 x double>*
+;CHECK: fadd <2 x double>
+;CHECK: store <2 x double>
+
+; Function Attrs: nounwind uwtable
+define void @foo(double* %x) #0 {
+ %1 = alloca double*, align 8
+ store double* %x, double** %1, align 8
+ %2 = load double** %1, align 8
+ %3 = getelementptr inbounds double* %2, i64 0
+ %4 = load double* %3, align 8
+ %5 = load double** %1, align 8
+ %6 = getelementptr inbounds double* %5, i64 0
+ %7 = load double* %6, align 8
+ %8 = fadd double %4, %7
+ %9 = load double** %1, align 8
+ %10 = getelementptr inbounds double* %9, i64 0
+ %11 = load double* %10, align 8
+ %12 = fadd double %8, %11
+ %13 = load double** %1, align 8
+ %14 = getelementptr inbounds double* %13, i64 0
+ store double %12, double* %14, align 8
+ %15 = load double** %1, align 8
+ %16 = getelementptr inbounds double* %15, i64 1
+ %17 = load double* %16, align 8
+ %18 = load double** %1, align 8
+ %19 = getelementptr inbounds double* %18, i64 1
+ %20 = load double* %19, align 8
+ %21 = fadd double %17, %20
+ %22 = load double** %1, align 8
+ %23 = getelementptr inbounds double* %22, i64 1
+ %24 = load double* %23, align 8
+ %25 = fadd double %21, %24
+ %26 = load double** %1, align 8
+ %27 = getelementptr inbounds double* %26, i64 1
+ store double %25, double* %27, align 8
+ %28 = load double** %1, align 8
+ %29 = getelementptr inbounds double* %28, i64 2
+ %30 = load double* %29, align 8
+ %31 = load double** %1, align 8
+ %32 = getelementptr inbounds double* %31, i64 2
+ %33 = load double* %32, align 8
+ %34 = fadd double %30, %33
+ %35 = load double** %1, align 8
+ %36 = getelementptr inbounds double* %35, i64 2
+ %37 = load double* %36, align 8
+ %38 = fadd double %34, %37
+ %39 = load double** %1, align 8
+ %40 = getelementptr inbounds double* %39, i64 2
+ store double %38, double* %40, align 8
+ %41 = load double** %1, align 8
+ %42 = getelementptr inbounds double* %41, i64 3
+ %43 = load double* %42, align 8
+ %44 = load double** %1, align 8
+ %45 = getelementptr inbounds double* %44, i64 3
+ %46 = load double* %45, align 8
+ %47 = fadd double %43, %46
+ %48 = load double** %1, align 8
+ %49 = getelementptr inbounds double* %48, i64 3
+ %50 = load double* %49, align 8
+ %51 = fadd double %47, %50
+ %52 = load double** %1, align 8
+ %53 = getelementptr inbounds double* %52, i64 3
+ store double %51, double* %53, align 8
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/Transforms/SLPVectorizer/X86/tiny-tree.ll b/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
index 2747a1f48997..10c3130b424c 100644
--- a/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
+++ b/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
@@ -138,3 +138,18 @@ for.body: ; preds = %entry, %for.body
for.end: ; preds = %for.body, %entry
ret void
}
+
+
+; CHECK-LABEL: store_splat
+; CHECK: store <4 x float>
+define void @store_splat(float*, float) {
+ %3 = getelementptr inbounds float* %0, i64 0
+ store float %1, float* %3, align 4
+ %4 = getelementptr inbounds float* %0, i64 1
+ store float %1, float* %4, align 4
+ %5 = getelementptr inbounds float* %0, i64 2
+ store float %1, float* %5, align 4
+ %6 = getelementptr inbounds float* %0, i64 3
+ store float %1, float* %6, align 4
+ ret void
+}
diff --git a/test/Transforms/SLPVectorizer/X86/value-bug.ll b/test/Transforms/SLPVectorizer/X86/value-bug.ll
new file mode 100644
index 000000000000..64d2ae1c7d79
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/value-bug.ll
@@ -0,0 +1,80 @@
+; RUN: opt -slp-vectorizer < %s -S -mtriple="x86_64-grtev3-linux-gnu" -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-grtev3-linux-gnu"
+
+; We used to crash on this example because we were building a constant
+; expression during vectorization and the vectorizer expects instructions
+; as elements of the vectorized tree.
+; CHECK-LABEL: @test
+; PR19621
+
+define void @test() {
+bb279:
+ br label %bb283
+
+bb283:
+ %Av.sroa.8.0 = phi float [ undef, %bb279 ], [ %tmp315, %exit ]
+ %Av.sroa.5.0 = phi float [ undef, %bb279 ], [ %tmp319, %exit ]
+ %Av.sroa.3.0 = phi float [ undef, %bb279 ], [ %tmp307, %exit ]
+ %Av.sroa.0.0 = phi float [ undef, %bb279 ], [ %tmp317, %exit ]
+ br label %bb284
+
+bb284:
+ %tmp7.i = fpext float %Av.sroa.3.0 to double
+ %tmp8.i = fsub double %tmp7.i, undef
+ %tmp9.i = fsub double %tmp8.i, undef
+ %tmp17.i = fpext float %Av.sroa.8.0 to double
+ %tmp19.i = fsub double %tmp17.i, undef
+ %tmp20.i = fsub double %tmp19.i, undef
+ br label %bb21.i
+
+bb21.i:
+ br i1 undef, label %bb22.i, label %exit
+
+bb22.i:
+ %tmp24.i = fadd double undef, %tmp9.i
+ %tmp26.i = fadd double undef, %tmp20.i
+ br label %bb32.i
+
+bb32.i:
+ %xs.0.i = phi double [ %tmp24.i, %bb22.i ], [ 0.000000e+00, %bb32.i ]
+ %ys.0.i = phi double [ %tmp26.i, %bb22.i ], [ 0.000000e+00, %bb32.i ]
+ br i1 undef, label %bb32.i, label %bb21.i
+
+exit:
+ %tmp303 = fpext float %Av.sroa.0.0 to double
+ %tmp304 = fmul double %tmp303, undef
+ %tmp305 = fadd double undef, %tmp304
+ %tmp306 = fadd double %tmp305, undef
+ %tmp307 = fptrunc double %tmp306 to float
+ %tmp311 = fpext float %Av.sroa.5.0 to double
+ %tmp312 = fmul double %tmp311, 0.000000e+00
+ %tmp313 = fadd double undef, %tmp312
+ %tmp314 = fadd double %tmp313, undef
+ %tmp315 = fptrunc double %tmp314 to float
+ %tmp317 = fptrunc double undef to float
+ %tmp319 = fptrunc double undef to float
+ br label %bb283
+}
+
+; Make sure that we probably handle constant folded vectorized trees. The
+; vectorizer starts at the type (%t2, %t3) and wil constant fold the tree.
+; The code that handles insertelement instructions must handle this.
+define <4 x double> @constant_folding() {
+entry:
+ %t0 = fadd double 1.000000e+00 , 0.000000e+00
+ %t1 = fadd double 1.000000e+00 , 1.000000e+00
+ %t2 = fmul double %t0, 1.000000e+00
+ %i1 = insertelement <4 x double> undef, double %t2, i32 1
+ %t3 = fmul double %t1, 1.000000e+00
+ %i2 = insertelement <4 x double> %i1, double %t3, i32 0
+ ret <4 x double> %i2
+}
+
+; CHECK-LABEL: @constant_folding
+; CHECK: %[[V0:.+]] = extractelement <2 x double> <double 1.000000e+00, double 2.000000e+00>, i32 0
+; CHECK: %[[V1:.+]] = insertelement <4 x double> undef, double %[[V0]], i32 1
+; CHECK: %[[V2:.+]] = extractelement <2 x double> <double 1.000000e+00, double 2.000000e+00>, i32 1
+; CHECK: %[[V3:.+]] = insertelement <4 x double> %[[V1]], double %[[V2]], i32 0
+; CHECK: ret <4 x double> %[[V3]]
diff --git a/test/Transforms/SLPVectorizer/XCore/lit.local.cfg b/test/Transforms/SLPVectorizer/XCore/lit.local.cfg
index 4d17d4642045..bb48713fe33e 100644
--- a/test/Transforms/SLPVectorizer/XCore/lit.local.cfg
+++ b/test/Transforms/SLPVectorizer/XCore/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'XCore' in targets:
+if not 'XCore' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/SROA/address-spaces.ll b/test/Transforms/SROA/address-spaces.ll
new file mode 100644
index 000000000000..847f2851bbbf
--- /dev/null
+++ b/test/Transforms/SROA/address-spaces.ll
@@ -0,0 +1,68 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
+declare void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture readonly, i32, i32, i1)
+declare void @llvm.memcpy.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture readonly, i32, i32, i1)
+declare void @llvm.memcpy.p1i8.p1i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i32, i32, i1)
+
+
+; Make sure an illegal bitcast isn't introduced
+define void @test_address_space_1_1(<2 x i64> addrspace(1)* %a, i16 addrspace(1)* %b) {
+; CHECK-LABEL: @test_address_space_1_1(
+; CHECK: load <2 x i64> addrspace(1)* %a, align 2
+; CHECK: store <2 x i64> {{.*}}, <2 x i64> addrspace(1)* {{.*}}, align 2
+; CHECK: ret void
+ %aa = alloca <2 x i64>, align 16
+ %aptr = bitcast <2 x i64> addrspace(1)* %a to i8 addrspace(1)*
+ %aaptr = bitcast <2 x i64>* %aa to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i32(i8* %aaptr, i8 addrspace(1)* %aptr, i32 16, i32 2, i1 false)
+ %bptr = bitcast i16 addrspace(1)* %b to i8 addrspace(1)*
+ call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
+ ret void
+}
+
+define void @test_address_space_1_0(<2 x i64> addrspace(1)* %a, i16* %b) {
+; CHECK-LABEL: @test_address_space_1_0(
+; CHECK: load <2 x i64> addrspace(1)* %a, align 2
+; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
+; CHECK: ret void
+ %aa = alloca <2 x i64>, align 16
+ %aptr = bitcast <2 x i64> addrspace(1)* %a to i8 addrspace(1)*
+ %aaptr = bitcast <2 x i64>* %aa to i8*
+ call void @llvm.memcpy.p0i8.p1i8.i32(i8* %aaptr, i8 addrspace(1)* %aptr, i32 16, i32 2, i1 false)
+ %bptr = bitcast i16* %b to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
+ ret void
+}
+
+define void @test_address_space_0_1(<2 x i64>* %a, i16 addrspace(1)* %b) {
+; CHECK-LABEL: @test_address_space_0_1(
+; CHECK: load <2 x i64>* %a, align 2
+; CHECK: store <2 x i64> {{.*}}, <2 x i64> addrspace(1)* {{.*}}, align 2
+; CHECK: ret void
+ %aa = alloca <2 x i64>, align 16
+ %aptr = bitcast <2 x i64>* %a to i8*
+ %aaptr = bitcast <2 x i64>* %aa to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %aaptr, i8* %aptr, i32 16, i32 2, i1 false)
+ %bptr = bitcast i16 addrspace(1)* %b to i8 addrspace(1)*
+ call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
+ ret void
+}
+
+%struct.struct_test_27.0.13 = type { i32, float, i64, i8, [4 x i32] }
+
+; Function Attrs: nounwind
+define void @copy_struct([5 x i64] %in.coerce) {
+; CHECK-LABEL: @copy_struct(
+; CHECK-NOT: memcpy
+for.end:
+ %in = alloca %struct.struct_test_27.0.13, align 8
+ %0 = bitcast %struct.struct_test_27.0.13* %in to [5 x i64]*
+ store [5 x i64] %in.coerce, [5 x i64]* %0, align 8
+ %scevgep9 = getelementptr %struct.struct_test_27.0.13* %in, i32 0, i32 4, i32 0
+ %scevgep910 = bitcast i32* %scevgep9 to i8*
+ call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* undef, i8* %scevgep910, i32 16, i32 4, i1 false)
+ ret void
+}
+
diff --git a/test/Transforms/SROA/basictest.ll b/test/Transforms/SROA/basictest.ll
index 5d3e4b5d8b2c..dc2b16550a06 100644
--- a/test/Transforms/SROA/basictest.ll
+++ b/test/Transforms/SROA/basictest.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -sroa -S | FileCheck %s
; RUN: opt < %s -sroa -force-ssa-updater -S | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
declare void @llvm.lifetime.start(i64, i8* nocapture)
declare void @llvm.lifetime.end(i64, i8* nocapture)
@@ -404,6 +404,7 @@ entry:
}
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i32, i1) nounwind
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
@@ -1150,6 +1151,24 @@ entry:
; CHECK: ret
}
+define void @PR14105_as1({ [16 x i8] } addrspace(1)* %ptr) {
+; Make sure this the right address space pointer is used for type check.
+; CHECK-LABEL: @PR14105_as1(
+
+entry:
+ %a = alloca { [16 x i8] }, align 8
+; CHECK: alloca [16 x i8], align 8
+
+ %gep = getelementptr inbounds { [16 x i8] } addrspace(1)* %ptr, i64 -1
+; CHECK-NEXT: getelementptr inbounds { [16 x i8] } addrspace(1)* %ptr, i16 -1, i32 0, i16 0
+
+ %cast1 = bitcast { [16 x i8 ] } addrspace(1)* %gep to i8 addrspace(1)*
+ %cast2 = bitcast { [16 x i8 ] }* %a to i8*
+ call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* %cast1, i8* %cast2, i32 16, i32 8, i1 true)
+ ret void
+; CHECK: ret
+}
+
define void @PR14465() {
; Ensure that we don't crash when analyzing a alloca larger than the maximum
; integer type width (MAX_INT_BITS) supported by llvm (1048576*32 > (1<<23)-1).
@@ -1317,6 +1336,28 @@ define void @PR15805(i1 %a, i1 %b) {
ret void
}
+define void @PR15805.1(i1 %a, i1 %b) {
+; Same as the normal PR15805, but rigged to place the use before the def inside
+; of looping unreachable code. This helps ensure that we aren't sensitive to the
+; order in which the uses of the alloca are visited.
+;
+; CHECK-LABEL: @PR15805.1(
+; CHECK-NOT: alloca
+; CHECK: ret void
+
+ %c = alloca i64, align 8
+ br label %exit
+
+loop:
+ %cond.in = select i1 undef, i64* %c, i64* %p.0.c
+ %p.0.c = select i1 undef, i64* %c, i64* %c
+ %cond = load i64* %cond.in, align 8
+ br i1 undef, label %loop, label %exit
+
+exit:
+ ret void
+}
+
define void @PR16651.1(i8* %a) {
; This test case caused a crash due to the volatile memcpy in combination with
; lowering to integer loads and stores of a width other than that of the original
@@ -1356,3 +1397,46 @@ entry:
%cond105.i.i = load float* %cond105.in.i.i, align 8
ret void
}
+
+define void @test23(i32 %x) {
+; CHECK-LABEL: @test23(
+; CHECK-NOT: alloca
+; CHECK: ret void
+entry:
+ %a = alloca i32, align 4
+ store i32 %x, i32* %a, align 4
+ %gep1 = getelementptr inbounds i32* %a, i32 1
+ %gep0 = getelementptr inbounds i32* %a, i32 0
+ %cast1 = bitcast i32* %gep1 to i8*
+ %cast0 = bitcast i32* %gep0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast0, i32 4, i32 1, i1 false)
+ ret void
+}
+
+define void @PR18615() {
+; CHECK-LABEL: @PR18615(
+; CHECK-NOT: alloca
+; CHECK: ret void
+entry:
+ %f = alloca i8
+ %gep = getelementptr i8* %f, i64 -1
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %gep, i32 1, i32 1, i1 false)
+ ret void
+}
+
+define void @test24(i8* %src, i8* %dst) {
+; CHECK-LABEL: @test24(
+; CHECK: alloca i64, align 16
+; CHECK: load volatile i64* %{{[^,]*}}, align 1
+; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 16
+; CHECK: load volatile i64* %{{[^,]*}}, align 16
+; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 1
+
+entry:
+ %a = alloca i64, align 16
+ %ptr = bitcast i64* %a to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i32 1, i1 true)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i32 1, i1 true)
+ ret void
+}
+
diff --git a/test/Transforms/SROA/slice-order-independence.ll b/test/Transforms/SROA/slice-order-independence.ll
new file mode 100644
index 000000000000..364ef85f1d14
--- /dev/null
+++ b/test/Transforms/SROA/slice-order-independence.ll
@@ -0,0 +1,37 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+
+; Check that the chosen type for a split is independent from the order of
+; slices even in case of types that are skipped because their width is not a
+; byte width multiple
+define void @skipped_inttype_first({ i16*, i32 }*) {
+; CHECK-LABEL: @skipped_inttype_first
+; CHECK: alloca i8*
+ %arg = alloca { i16*, i32 }, align 8
+ %2 = bitcast { i16*, i32 }* %0 to i8*
+ %3 = bitcast { i16*, i32 }* %arg to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* %2, i32 16, i32 8, i1 false)
+ %b = getelementptr inbounds { i16*, i32 }* %arg, i64 0, i32 0
+ %pb0 = bitcast i16** %b to i63*
+ %b0 = load i63* %pb0
+ %pb1 = bitcast i16** %b to i8**
+ %b1 = load i8** %pb1
+ ret void
+}
+
+define void @skipped_inttype_last({ i16*, i32 }*) {
+; CHECK-LABEL: @skipped_inttype_last
+; CHECK: alloca i8*
+ %arg = alloca { i16*, i32 }, align 8
+ %2 = bitcast { i16*, i32 }* %0 to i8*
+ %3 = bitcast { i16*, i32 }* %arg to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* %2, i32 16, i32 8, i1 false)
+ %b = getelementptr inbounds { i16*, i32 }* %arg, i64 0, i32 0
+ %pb1 = bitcast i16** %b to i8**
+ %b1 = load i8** %pb1
+ %pb0 = bitcast i16** %b to i63*
+ %b0 = load i63* %pb0
+ ret void
+}
diff --git a/test/Transforms/SROA/slice-width.ll b/test/Transforms/SROA/slice-width.ll
new file mode 100644
index 000000000000..179780b4afee
--- /dev/null
+++ b/test/Transforms/SROA/slice-width.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -sroa -S | FileCheck %s
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+
+define void @no_split_on_non_byte_width(i32) {
+; This tests that allocas are not split into slices that are not byte width multiple
+ %arg = alloca i32 , align 8
+ store i32 %0, i32* %arg
+ br label %load_i32
+
+load_i32:
+; CHECK-LABEL: load_i32:
+; CHECK-NOT: bitcast {{.*}} to i1
+; CHECK-NOT: zext i1
+ %r0 = load i32* %arg
+ br label %load_i1
+
+load_i1:
+; CHECK-LABEL: load_i1:
+; CHECK: bitcast {{.*}} to i1
+ %p1 = bitcast i32* %arg to i1*
+ %t1 = load i1* %p1
+ ret void
+}
diff --git a/test/Transforms/SROA/vector-promotion.ll b/test/Transforms/SROA/vector-promotion.ll
index 4f084214d396..9c9f6a1d08d4 100644
--- a/test/Transforms/SROA/vector-promotion.ll
+++ b/test/Transforms/SROA/vector-promotion.ll
@@ -150,6 +150,53 @@ entry:
; CHECK-NEXT: ret
}
+declare void @llvm.memcpy.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture, i32, i32, i1) nounwind
+
+; Same as test4 with a different sized address space pointer source.
+define i32 @test4_as1(<4 x i32> %x, <4 x i32> %y, <4 x i32> addrspace(1)* %z) {
+; CHECK-LABEL: @test4_as1(
+entry:
+ %a = alloca [2 x <4 x i32>]
+; CHECK-NOT: alloca
+
+ %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ store <4 x i32> %x, <4 x i32>* %a.x
+ %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ store <4 x i32> %y, <4 x i32>* %a.y
+; CHECK-NOT: store
+
+ %a.y.cast = bitcast <4 x i32>* %a.y to i8*
+ %z.cast = bitcast <4 x i32> addrspace(1)* %z to i8 addrspace(1)*
+ call void @llvm.memcpy.p0i8.p1i8.i32(i8* %a.y.cast, i8 addrspace(1)* %z.cast, i32 16, i32 1, i1 false)
+; CHECK-NOT: memcpy
+
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
+ %z.tmp1 = getelementptr inbounds <4 x i32> addrspace(1)* %z, i16 0, i16 2
+ %z.tmp1.cast = bitcast i32 addrspace(1)* %z.tmp1 to i8 addrspace(1)*
+ call void @llvm.memcpy.p0i8.p1i8.i32(i8* %a.tmp1.cast, i8 addrspace(1)* %z.tmp1.cast, i32 4, i32 1, i1 false)
+ %tmp1 = load i32* %a.tmp1
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %tmp2 = load i32* %a.tmp2
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %tmp3 = load i32* %a.tmp3
+; CHECK-NOT: memcpy
+; CHECK: %[[load:.*]] = load <4 x i32> addrspace(1)* %z
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32> addrspace(1)* %z, i64 0, i64 2
+; CHECK-NEXT: %[[element_load:.*]] = load i32 addrspace(1)* %[[gep]]
+; CHECK-NEXT: %[[insert:.*]] = insertelement <4 x i32> %x, i32 %[[element_load]], i32 2
+; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
+; CHECK-NEXT: extractelement <4 x i32> %[[load]], i32 3
+; CHECK-NEXT: extractelement <4 x i32> %[[load]], i32 0
+
+ %tmp4 = add i32 %tmp1, %tmp2
+ %tmp5 = add i32 %tmp3, %tmp4
+ ret i32 %tmp5
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+}
+
define i32 @test5(<4 x i32> %x, <4 x i32> %y, <4 x i32>* %z) {
; CHECK-LABEL: @test5(
; The same as the above, but with reversed source and destination for the
diff --git a/test/Transforms/SampleProfile/Inputs/bad_discriminator_value.prof b/test/Transforms/SampleProfile/Inputs/bad_discriminator_value.prof
new file mode 100644
index 000000000000..cc7f0d4f2773
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/bad_discriminator_value.prof
@@ -0,0 +1,2 @@
+empty:100:0
+1.-3: 10
diff --git a/test/Transforms/SampleProfile/Inputs/bad_fn_header.prof b/test/Transforms/SampleProfile/Inputs/bad_fn_header.prof
new file mode 100644
index 000000000000..abcb0ba38415
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/bad_fn_header.prof
@@ -0,0 +1,3 @@
+3empty:100:BAD
+0: 0
+1: 100
diff --git a/test/Transforms/SampleProfile/Inputs/bad_line_values.prof b/test/Transforms/SampleProfile/Inputs/bad_line_values.prof
new file mode 100644
index 000000000000..61ba7c01591e
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/bad_line_values.prof
@@ -0,0 +1,2 @@
+empty:100:0
+-1: 10
diff --git a/test/Transforms/SampleProfile/Inputs/bad_mangle.prof b/test/Transforms/SampleProfile/Inputs/bad_mangle.prof
new file mode 100644
index 000000000000..50fe86119b71
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/bad_mangle.prof
@@ -0,0 +1,3 @@
+double convert<std::string, float>(float):2909472:181842
+0: 181842
+1: 181842
diff --git a/test/Transforms/SampleProfile/Inputs/bad_sample_line.prof b/test/Transforms/SampleProfile/Inputs/bad_sample_line.prof
new file mode 100644
index 000000000000..038c45f77e30
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/bad_sample_line.prof
@@ -0,0 +1,3 @@
+empty:100:0
+0: 0
+1: BAD
diff --git a/test/Transforms/SampleProfile/Inputs/bad_samples.prof b/test/Transforms/SampleProfile/Inputs/bad_samples.prof
new file mode 100644
index 000000000000..a121d8c1ac40
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/bad_samples.prof
@@ -0,0 +1,2 @@
+empty:100:0
+1.3: -10
diff --git a/test/Transforms/SampleProfile/Inputs/branch.prof b/test/Transforms/SampleProfile/Inputs/branch.prof
index d19894d428ce..cd1cb5b1f16b 100644
--- a/test/Transforms/SampleProfile/Inputs/branch.prof
+++ b/test/Transforms/SampleProfile/Inputs/branch.prof
@@ -1,7 +1,4 @@
-symbol table
-1
-main
-main:15680:0:7
+main:15680:0
0: 0
4: 0
7: 0
diff --git a/test/Transforms/SampleProfile/Inputs/calls.prof b/test/Transforms/SampleProfile/Inputs/calls.prof
new file mode 100644
index 000000000000..57d3887dfb65
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/calls.prof
@@ -0,0 +1,10 @@
+_Z3sumii:105580:5279
+0: 5279
+1: 5279
+2: 5279
+main:225715:0
+2.1: 5553
+3: 5391
+# This indicates that at line 3 of this function, the 'then' branch
+# of the conditional is taken (discriminator '1').
+3.1: 5752 _Z3sumii:5860
diff --git a/test/Transforms/SampleProfile/Inputs/discriminator.prof b/test/Transforms/SampleProfile/Inputs/discriminator.prof
new file mode 100644
index 000000000000..a6bcbc511a16
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/discriminator.prof
@@ -0,0 +1,8 @@
+foo:1000:0
+1: 1
+2: 1
+2.1: 100
+3: 100
+3.1: 5
+4: 100
+5: 1
diff --git a/test/Transforms/SampleProfile/Inputs/propagate.prof b/test/Transforms/SampleProfile/Inputs/propagate.prof
new file mode 100644
index 000000000000..b28609be66c1
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/propagate.prof
@@ -0,0 +1,17 @@
+_Z3fooiil:58139:0
+0: 0
+1: 0
+2: 0
+4: 1
+5: 10
+6: 0
+7: 5
+8: 3
+9: 0
+10: 0
+11: 6339
+12: 16191
+13: 8141
+16: 1
+18: 0
+19: 0
diff --git a/test/Transforms/SampleProfile/Inputs/syntax.prof b/test/Transforms/SampleProfile/Inputs/syntax.prof
new file mode 100644
index 000000000000..f3738912a9dc
--- /dev/null
+++ b/test/Transforms/SampleProfile/Inputs/syntax.prof
@@ -0,0 +1,3 @@
+empty:100:0
+0: 0
+1: 100
diff --git a/test/Transforms/SampleProfile/branch.ll b/test/Transforms/SampleProfile/branch.ll
index 516762763d7b..65f1f1769934 100644
--- a/test/Transforms/SampleProfile/branch.ll
+++ b/test/Transforms/SampleProfile/branch.ll
@@ -46,8 +46,8 @@ if.end: ; preds = %entry
tail call void @llvm.dbg.value(metadata !{i32 %call}, i64 0, metadata !17), !dbg !30
%cmp1 = icmp sgt i32 %call, 100, !dbg !35
br i1 %cmp1, label %for.body, label %if.end6, !dbg !35
-; CHECK: edge if.end -> for.body probability is 2243 / 2244 = 99.9554% [HOT edge]
-; CHECK: edge if.end -> if.end6 probability is 1 / 2244 = 0.0445633%
+; CHECK: edge if.end -> for.body probability is 1 / 2 = 50%
+; CHECK: edge if.end -> if.end6 probability is 1 / 2 = 50%
for.body: ; preds = %if.end, %for.body
%u.016 = phi i32 [ %inc, %for.body ], [ 0, %if.end ]
@@ -65,8 +65,8 @@ for.body: ; preds = %if.end, %for.body
tail call void @llvm.dbg.value(metadata !{i32 %inc}, i64 0, metadata !21), !dbg !38
%exitcond = icmp eq i32 %inc, %call, !dbg !38
br i1 %exitcond, label %if.end6, label %for.body, !dbg !38
-; CHECK: edge for.body -> if.end6 probability is 1 / 2244 = 0.0445633%
-; CHECK: edge for.body -> for.body probability is 2243 / 2244 = 99.9554% [HOT edge]
+; CHECK: edge for.body -> if.end6 probability is 1 / 10227 = 0.00977804
+; CHECK: edge for.body -> for.body probability is 10226 / 10227 = 99.9902% [HOT edge]
if.end6: ; preds = %for.body, %if.end
%result.0 = phi double [ 0.000000e+00, %if.end ], [ %sub, %for.body ]
@@ -117,17 +117,17 @@ attributes #4 = { nounwind readonly }
!16 = metadata !{i32 786468, null, null, metadata !"double", i32 0, i64 64, i64 64, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
!17 = metadata !{i32 786688, metadata !4, metadata !"limit", metadata !5, i32 8, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [limit] [line 8]
!18 = metadata !{i32 786688, metadata !19, metadata !"s", metadata !5, i32 10, metadata !16, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [s] [line 10]
-!19 = metadata !{i32 786443, metadata !1, metadata !20, i32 9, i32 0, i32 2} ; [ DW_TAG_lexical_block ] [./branch.cc]
-!20 = metadata !{i32 786443, metadata !1, metadata !4, i32 9, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [./branch.cc]
+!19 = metadata !{i32 786443, metadata !1, metadata !20, i32 9, i32 0, i32 0, i32 2} ; [ DW_TAG_lexical_block ] [./branch.cc]
+!20 = metadata !{i32 786443, metadata !1, metadata !4, i32 9, i32 0, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [./branch.cc]
!21 = metadata !{i32 786688, metadata !22, metadata !"u", metadata !5, i32 11, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [u] [line 11]
-!22 = metadata !{i32 786443, metadata !1, metadata !19, i32 11, i32 0, i32 3} ; [ DW_TAG_lexical_block ] [./branch.cc]
+!22 = metadata !{i32 786443, metadata !1, metadata !19, i32 11, i32 0, i32 0, i32 3} ; [ DW_TAG_lexical_block ] [./branch.cc]
!23 = metadata !{i32 786688, metadata !24, metadata !"x", metadata !5, i32 12, metadata !16, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [x] [line 12]
-!24 = metadata !{i32 786443, metadata !1, metadata !22, i32 11, i32 0, i32 4} ; [ DW_TAG_lexical_block ] [./branch.cc]
+!24 = metadata !{i32 786443, metadata !1, metadata !22, i32 11, i32 0, i32 0, i32 4} ; [ DW_TAG_lexical_block ] [./branch.cc]
!25 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
!26 = metadata !{metadata !"clang version 3.4 (trunk 192896) (llvm/trunk 192895)"}
!27 = metadata !{i32 4, i32 0, metadata !4, null}
!28 = metadata !{i32 5, i32 0, metadata !29, null}
-!29 = metadata !{i32 786443, metadata !1, metadata !4, i32 5, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./branch.cc]
+!29 = metadata !{i32 786443, metadata !1, metadata !4, i32 5, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./branch.cc]
!30 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ]
!31 = metadata !{metadata !32, metadata !32, i64 0}
!32 = metadata !{metadata !"any pointer", metadata !33, i64 0}
diff --git a/test/Transforms/SampleProfile/calls.ll b/test/Transforms/SampleProfile/calls.ll
new file mode 100644
index 000000000000..381be8714eb6
--- /dev/null
+++ b/test/Transforms/SampleProfile/calls.ll
@@ -0,0 +1,116 @@
+; RUN: opt < %s -sample-profile -sample-profile-file=%S/Inputs/calls.prof | opt -analyze -branch-prob | FileCheck %s
+
+; Original C++ test case
+;
+; #include <stdio.h>
+;
+; int sum(int x, int y) {
+; return x + y;
+; }
+;
+; int main() {
+; int s, i = 0;
+; while (i++ < 20000 * 20000)
+; if (i != 100) s = sum(i, s); else s = 30;
+; printf("sum is %d\n", s);
+; return 0;
+; }
+
+@.str = private unnamed_addr constant [11 x i8] c"sum is %d\0A\00", align 1
+
+; Function Attrs: nounwind uwtable
+define i32 @_Z3sumii(i32 %x, i32 %y) {
+entry:
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ store i32 %y, i32* %y.addr, align 4
+ %0 = load i32* %x.addr, align 4, !dbg !11
+ %1 = load i32* %y.addr, align 4, !dbg !11
+ %add = add nsw i32 %0, %1, !dbg !11
+ ret i32 %add, !dbg !11
+}
+
+; Function Attrs: uwtable
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ %s = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 0, i32* %retval
+ store i32 0, i32* %i, align 4, !dbg !12
+ br label %while.cond, !dbg !13
+
+while.cond: ; preds = %if.end, %entry
+ %0 = load i32* %i, align 4, !dbg !14
+ %inc = add nsw i32 %0, 1, !dbg !14
+ store i32 %inc, i32* %i, align 4, !dbg !14
+ %cmp = icmp slt i32 %0, 400000000, !dbg !14
+ br i1 %cmp, label %while.body, label %while.end, !dbg !14
+; CHECK: edge while.cond -> while.body probability is 5391 / 5392 = 99.9815% [HOT edge]
+; CHECK: edge while.cond -> while.end probability is 1 / 5392 = 0.018546%
+
+while.body: ; preds = %while.cond
+ %1 = load i32* %i, align 4, !dbg !16
+ %cmp1 = icmp ne i32 %1, 100, !dbg !16
+ br i1 %cmp1, label %if.then, label %if.else, !dbg !16
+; Without discriminator information, the profiler used to think that
+; both branches out of while.body had the same weight. In reality,
+; the edge while.body->if.then is taken most of the time.
+;
+; CHECK: edge while.body -> if.then probability is 5752 / 5753 = 99.9826% [HOT edge]
+; CHECK: edge while.body -> if.else probability is 1 / 5753 = 0.0173822%
+
+
+if.then: ; preds = %while.body
+ %2 = load i32* %i, align 4, !dbg !18
+ %3 = load i32* %s, align 4, !dbg !18
+ %call = call i32 @_Z3sumii(i32 %2, i32 %3), !dbg !18
+ store i32 %call, i32* %s, align 4, !dbg !18
+ br label %if.end, !dbg !18
+
+if.else: ; preds = %while.body
+ store i32 30, i32* %s, align 4, !dbg !20
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ br label %while.cond, !dbg !22
+
+while.end: ; preds = %while.cond
+ %4 = load i32* %s, align 4, !dbg !24
+ %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i32 %4), !dbg !24
+ ret i32 0, !dbg !25
+}
+
+declare i32 @printf(i8*, ...) #2
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [./calls.cc] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"calls.cc", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4, metadata !7}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"sum", metadata !"sum", metadata !"", i32 3, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32, i32)* @_Z3sumii, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [sum]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [./calls.cc]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 7, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [main]
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5 "}
+!11 = metadata !{i32 4, i32 0, metadata !4, null}
+!12 = metadata !{i32 8, i32 0, metadata !7, null} ; [ DW_TAG_imported_declaration ]
+!13 = metadata !{i32 9, i32 0, metadata !7, null}
+!14 = metadata !{i32 9, i32 0, metadata !15, null}
+!15 = metadata !{i32 786443, metadata !1, metadata !7, i32 9, i32 0, i32 1, i32 1} ; [ DW_TAG_lexical_block ] [./calls.cc]
+!16 = metadata !{i32 10, i32 0, metadata !17, null}
+!17 = metadata !{i32 786443, metadata !1, metadata !7, i32 10, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [./calls.cc]
+!18 = metadata !{i32 10, i32 0, metadata !19, null}
+!19 = metadata !{i32 786443, metadata !1, metadata !17, i32 10, i32 0, i32 1, i32 2} ; [ DW_TAG_lexical_block ] [./calls.cc]
+!20 = metadata !{i32 10, i32 0, metadata !21, null}
+!21 = metadata !{i32 786443, metadata !1, metadata !17, i32 10, i32 0, i32 2, i32 3} ; [ DW_TAG_lexical_block ] [./calls.cc]
+!22 = metadata !{i32 10, i32 0, metadata !23, null}
+!23 = metadata !{i32 786443, metadata !1, metadata !17, i32 10, i32 0, i32 3, i32 4} ; [ DW_TAG_lexical_block ] [./calls.cc]
+!24 = metadata !{i32 11, i32 0, metadata !7, null}
+!25 = metadata !{i32 12, i32 0, metadata !7, null}
diff --git a/test/Transforms/SampleProfile/discriminator.ll b/test/Transforms/SampleProfile/discriminator.ll
new file mode 100644
index 000000000000..0f773a541a4e
--- /dev/null
+++ b/test/Transforms/SampleProfile/discriminator.ll
@@ -0,0 +1,90 @@
+; RUN: opt < %s -sample-profile -sample-profile-file=%S/Inputs/discriminator.prof | opt -analyze -branch-prob | FileCheck %s
+
+; Original code
+;
+; 1 int foo(int i) {
+; 2 int x = 0;
+; 3 while (i < 100) {
+; 4 if (i < 5) x--;
+; 5 i++;
+; 6 }
+; 7 return x;
+; 8 }
+;
+; In this test, if the loop is executed 100 times, the decrement operation
+; at line 4 should only execute 5 times. This is reflected in the profile
+; data for line offset 3. In Inputs/discriminator.prof, we have:
+;
+; 3: 100
+; 3.1: 5
+;
+; This means that the predicate 'i < 5' (line 3) is executed 100 times,
+; but the then branch (line 3.1) is only executed 5 times.
+
+define i32 @foo(i32 %i) #0 {
+; CHECK: Printing analysis 'Branch Probability Analysis' for function 'foo':
+entry:
+ %i.addr = alloca i32, align 4
+ %x = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ store i32 0, i32* %x, align 4, !dbg !10
+ br label %while.cond, !dbg !11
+
+while.cond: ; preds = %if.end, %entry
+ %0 = load i32* %i.addr, align 4, !dbg !12
+ %cmp = icmp slt i32 %0, 100, !dbg !12
+ br i1 %cmp, label %while.body, label %while.end, !dbg !12
+; CHECK: edge while.cond -> while.body probability is 100 / 101 = 99.0099% [HOT edge]
+; CHECK: edge while.cond -> while.end probability is 1 / 101 = 0.990099%
+
+while.body: ; preds = %while.cond
+ %1 = load i32* %i.addr, align 4, !dbg !14
+ %cmp1 = icmp slt i32 %1, 50, !dbg !14
+ br i1 %cmp1, label %if.then, label %if.end, !dbg !14
+; CHECK: edge while.body -> if.then probability is 5 / 100 = 5%
+; CHECK: edge while.body -> if.end probability is 95 / 100 = 95% [HOT edge]
+
+if.then: ; preds = %while.body
+ %2 = load i32* %x, align 4, !dbg !17
+ %dec = add nsw i32 %2, -1, !dbg !17
+ store i32 %dec, i32* %x, align 4, !dbg !17
+ br label %if.end, !dbg !17
+
+if.end: ; preds = %if.then, %while.body
+ %3 = load i32* %i.addr, align 4, !dbg !19
+ %inc = add nsw i32 %3, 1, !dbg !19
+ store i32 %inc, i32* %i.addr, align 4, !dbg !19
+ br label %while.cond, !dbg !20
+
+while.end: ; preds = %while.cond
+ %4 = load i32* %x, align 4, !dbg !21
+ ret i32 %4, !dbg !21
+}
+
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8}
+!llvm.ident = !{!9}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [discriminator.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"discriminator.c", metadata !"."}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 (i32)* @foo, null, null, metadata !2, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [discriminator.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!9 = metadata !{metadata !"clang version 3.5 "}
+!10 = metadata !{i32 2, i32 0, metadata !4, null}
+!11 = metadata !{i32 3, i32 0, metadata !4, null}
+!12 = metadata !{i32 3, i32 0, metadata !13, null}
+!13 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 0, i32 1, i32 2} ; [ DW_TAG_lexical_block ] [discriminator.c]
+!14 = metadata !{i32 4, i32 0, metadata !15, null}
+!15 = metadata !{i32 786443, metadata !1, metadata !16, i32 4, i32 0, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [discriminator.c]
+!16 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [discriminator.c]
+!17 = metadata !{i32 4, i32 0, metadata !18, null}
+!18 = metadata !{i32 786443, metadata !1, metadata !15, i32 4, i32 0, i32 1, i32 3} ; [ DW_TAG_lexical_block ] [discriminator.c]
+!19 = metadata !{i32 5, i32 0, metadata !16, null}
+!20 = metadata !{i32 6, i32 0, metadata !16, null}
+!21 = metadata !{i32 7, i32 0, metadata !4, null}
diff --git a/test/Transforms/SampleProfile/propagate.ll b/test/Transforms/SampleProfile/propagate.ll
new file mode 100644
index 000000000000..939361b23703
--- /dev/null
+++ b/test/Transforms/SampleProfile/propagate.ll
@@ -0,0 +1,243 @@
+; RUN: opt < %s -sample-profile -sample-profile-file=%S/Inputs/propagate.prof | opt -analyze -branch-prob | FileCheck %s
+
+; Original C++ code for this test case:
+;
+; #include <stdio.h>
+;
+; long foo(int x, int y, long N) {
+; if (x < y) {
+; return y - x;
+; } else {
+; for (long i = 0; i < N; i++) {
+; if (i > N / 3)
+; x--;
+; if (i > N / 4) {
+; y++;
+; x += 3;
+; } else {
+; for (unsigned j = 0; j < i; j++) {
+; x += j;
+; y -= 3;
+; }
+; }
+; }
+; }
+; return y * x;
+; }
+;
+; int main() {
+; int x = 5678;
+; int y = 1234;
+; long N = 999999;
+; printf("foo(%d, %d, %ld) = %ld\n", x, y, N, foo(x, y, N));
+; return 0;
+; }
+
+; ModuleID = 'propagate.cc'
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@.str = private unnamed_addr constant [24 x i8] c"foo(%d, %d, %ld) = %ld\0A\00", align 1
+
+; Function Attrs: nounwind uwtable
+define i64 @_Z3fooiil(i32 %x, i32 %y, i64 %N) #0 {
+entry:
+ %retval = alloca i64, align 8
+ %x.addr = alloca i32, align 4
+ %y.addr = alloca i32, align 4
+ %N.addr = alloca i64, align 8
+ %i = alloca i64, align 8
+ %j = alloca i32, align 4
+ store i32 %x, i32* %x.addr, align 4
+ store i32 %y, i32* %y.addr, align 4
+ store i64 %N, i64* %N.addr, align 8
+ %0 = load i32* %x.addr, align 4, !dbg !11
+ %1 = load i32* %y.addr, align 4, !dbg !11
+ %cmp = icmp slt i32 %0, %1, !dbg !11
+ br i1 %cmp, label %if.then, label %if.else, !dbg !11
+
+if.then: ; preds = %entry
+ %2 = load i32* %y.addr, align 4, !dbg !13
+ %3 = load i32* %x.addr, align 4, !dbg !13
+ %sub = sub nsw i32 %2, %3, !dbg !13
+ %conv = sext i32 %sub to i64, !dbg !13
+ store i64 %conv, i64* %retval, !dbg !13
+ br label %return, !dbg !13
+
+if.else: ; preds = %entry
+ store i64 0, i64* %i, align 8, !dbg !15
+ br label %for.cond, !dbg !15
+
+for.cond: ; preds = %for.inc16, %if.else
+ %4 = load i64* %i, align 8, !dbg !15
+ %5 = load i64* %N.addr, align 8, !dbg !15
+ %cmp1 = icmp slt i64 %4, %5, !dbg !15
+ br i1 %cmp1, label %for.body, label %for.end18, !dbg !15
+; CHECK: edge for.cond -> for.body probability is 10 / 11 = 90.9091% [HOT edge]
+; CHECK: edge for.cond -> for.end18 probability is 1 / 11 = 9.09091%
+
+for.body: ; preds = %for.cond
+ %6 = load i64* %i, align 8, !dbg !18
+ %7 = load i64* %N.addr, align 8, !dbg !18
+ %div = sdiv i64 %7, 3, !dbg !18
+ %cmp2 = icmp sgt i64 %6, %div, !dbg !18
+ br i1 %cmp2, label %if.then3, label %if.end, !dbg !18
+; CHECK: edge for.body -> if.then3 probability is 1 / 5 = 20%
+; CHECK: edge for.body -> if.end probability is 4 / 5 = 80%
+
+if.then3: ; preds = %for.body
+ %8 = load i32* %x.addr, align 4, !dbg !21
+ %dec = add nsw i32 %8, -1, !dbg !21
+ store i32 %dec, i32* %x.addr, align 4, !dbg !21
+ br label %if.end, !dbg !21
+
+if.end: ; preds = %if.then3, %for.body
+ %9 = load i64* %i, align 8, !dbg !22
+ %10 = load i64* %N.addr, align 8, !dbg !22
+ %div4 = sdiv i64 %10, 4, !dbg !22
+ %cmp5 = icmp sgt i64 %9, %div4, !dbg !22
+ br i1 %cmp5, label %if.then6, label %if.else7, !dbg !22
+; CHECK: edge if.end -> if.then6 probability is 3 / 6342 = 0.0473037%
+; CHECK: edge if.end -> if.else7 probability is 6339 / 6342 = 99.9527% [HOT edge]
+
+if.then6: ; preds = %if.end
+ %11 = load i32* %y.addr, align 4, !dbg !24
+ %inc = add nsw i32 %11, 1, !dbg !24
+ store i32 %inc, i32* %y.addr, align 4, !dbg !24
+ %12 = load i32* %x.addr, align 4, !dbg !26
+ %add = add nsw i32 %12, 3, !dbg !26
+ store i32 %add, i32* %x.addr, align 4, !dbg !26
+ br label %if.end15, !dbg !27
+
+if.else7: ; preds = %if.end
+ store i32 0, i32* %j, align 4, !dbg !28
+ br label %for.cond8, !dbg !28
+
+for.cond8: ; preds = %for.inc, %if.else7
+ %13 = load i32* %j, align 4, !dbg !28
+ %conv9 = zext i32 %13 to i64, !dbg !28
+ %14 = load i64* %i, align 8, !dbg !28
+ %cmp10 = icmp slt i64 %conv9, %14, !dbg !28
+ br i1 %cmp10, label %for.body11, label %for.end, !dbg !28
+; CHECK: edge for.cond8 -> for.body11 probability is 16191 / 16192 = 99.9938% [HOT edge]
+; CHECK: edge for.cond8 -> for.end probability is 1 / 16192 = 0.00617589%
+
+for.body11: ; preds = %for.cond8
+ %15 = load i32* %j, align 4, !dbg !31
+ %16 = load i32* %x.addr, align 4, !dbg !31
+ %add12 = add i32 %16, %15, !dbg !31
+ store i32 %add12, i32* %x.addr, align 4, !dbg !31
+ %17 = load i32* %y.addr, align 4, !dbg !33
+ %sub13 = sub nsw i32 %17, 3, !dbg !33
+ store i32 %sub13, i32* %y.addr, align 4, !dbg !33
+ br label %for.inc, !dbg !34
+
+for.inc: ; preds = %for.body11
+ %18 = load i32* %j, align 4, !dbg !28
+ %inc14 = add i32 %18, 1, !dbg !28
+ store i32 %inc14, i32* %j, align 4, !dbg !28
+ br label %for.cond8, !dbg !28
+
+for.end: ; preds = %for.cond8
+ br label %if.end15
+
+if.end15: ; preds = %for.end, %if.then6
+ br label %for.inc16, !dbg !35
+
+for.inc16: ; preds = %if.end15
+ %19 = load i64* %i, align 8, !dbg !15
+ %inc17 = add nsw i64 %19, 1, !dbg !15
+ store i64 %inc17, i64* %i, align 8, !dbg !15
+ br label %for.cond, !dbg !15
+
+for.end18: ; preds = %for.cond
+ br label %if.end19
+
+if.end19: ; preds = %for.end18
+ %20 = load i32* %y.addr, align 4, !dbg !36
+ %21 = load i32* %x.addr, align 4, !dbg !36
+ %mul = mul nsw i32 %20, %21, !dbg !36
+ %conv20 = sext i32 %mul to i64, !dbg !36
+ store i64 %conv20, i64* %retval, !dbg !36
+ br label %return, !dbg !36
+
+return: ; preds = %if.end19, %if.then
+ %22 = load i64* %retval, !dbg !37
+ ret i64 %22, !dbg !37
+}
+
+; Function Attrs: uwtable
+define i32 @main() #1 {
+entry:
+ %retval = alloca i32, align 4
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %N = alloca i64, align 8
+ store i32 0, i32* %retval
+ store i32 5678, i32* %x, align 4, !dbg !38
+ store i32 1234, i32* %y, align 4, !dbg !39
+ store i64 999999, i64* %N, align 8, !dbg !40
+ %0 = load i32* %x, align 4, !dbg !41
+ %1 = load i32* %y, align 4, !dbg !41
+ %2 = load i64* %N, align 8, !dbg !41
+ %3 = load i32* %x, align 4, !dbg !41
+ %4 = load i32* %y, align 4, !dbg !41
+ %5 = load i64* %N, align 8, !dbg !41
+ %call = call i64 @_Z3fooiil(i32 %3, i32 %4, i64 %5), !dbg !41
+ %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8]* @.str, i32 0, i32 0), i32 %0, i32 %1, i64 %2, i64 %call), !dbg !41
+ ret i32 0, !dbg !42
+}
+
+declare i32 @printf(i8*, ...) #2
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+!llvm.ident = !{!10}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5 ", i1 false, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [propagate.cc] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"propagate.cc", metadata !"."}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4, metadata !7}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"foo", metadata !"foo", metadata !"", i32 3, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i64 (i32, i32, i64)* @_Z3fooiil, null, null, metadata !2, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [foo]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [propagate.cc]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !2, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"main", metadata !"main", metadata !"", i32 24, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, i32 ()* @main, null, null, metadata !2, i32 24} ; [ DW_TAG_subprogram ] [line 24] [def] [main]
+!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
+!10 = metadata !{metadata !"clang version 3.5 "}
+!11 = metadata !{i32 4, i32 0, metadata !12, null}
+!12 = metadata !{i32 786443, metadata !1, metadata !4, i32 4, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!13 = metadata !{i32 5, i32 0, metadata !14, null}
+!14 = metadata !{i32 786443, metadata !1, metadata !12, i32 4, i32 0, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!15 = metadata !{i32 7, i32 0, metadata !16, null}
+!16 = metadata !{i32 786443, metadata !1, metadata !17, i32 7, i32 0, i32 0, i32 3} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!17 = metadata !{i32 786443, metadata !1, metadata !12, i32 6, i32 0, i32 0, i32 2} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!18 = metadata !{i32 8, i32 0, metadata !19, null} ; [ DW_TAG_imported_declaration ]
+!19 = metadata !{i32 786443, metadata !1, metadata !20, i32 8, i32 0, i32 0, i32 5} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!20 = metadata !{i32 786443, metadata !1, metadata !16, i32 7, i32 0, i32 0, i32 4} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!21 = metadata !{i32 9, i32 0, metadata !19, null}
+!22 = metadata !{i32 10, i32 0, metadata !23, null}
+!23 = metadata !{i32 786443, metadata !1, metadata !20, i32 10, i32 0, i32 0, i32 6} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!24 = metadata !{i32 11, i32 0, metadata !25, null}
+!25 = metadata !{i32 786443, metadata !1, metadata !23, i32 10, i32 0, i32 0, i32 7} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!26 = metadata !{i32 12, i32 0, metadata !25, null}
+!27 = metadata !{i32 13, i32 0, metadata !25, null}
+!28 = metadata !{i32 14, i32 0, metadata !29, null}
+!29 = metadata !{i32 786443, metadata !1, metadata !30, i32 14, i32 0, i32 0, i32 9} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!30 = metadata !{i32 786443, metadata !1, metadata !23, i32 13, i32 0, i32 0, i32 8} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!31 = metadata !{i32 15, i32 0, metadata !32, null}
+!32 = metadata !{i32 786443, metadata !1, metadata !29, i32 14, i32 0, i32 0, i32 10} ; [ DW_TAG_lexical_block ] [propagate.cc]
+!33 = metadata !{i32 16, i32 0, metadata !32, null}
+!34 = metadata !{i32 17, i32 0, metadata !32, null}
+!35 = metadata !{i32 19, i32 0, metadata !20, null}
+!36 = metadata !{i32 21, i32 0, metadata !4, null}
+!37 = metadata !{i32 22, i32 0, metadata !4, null}
+!38 = metadata !{i32 25, i32 0, metadata !7, null}
+!39 = metadata !{i32 26, i32 0, metadata !7, null}
+!40 = metadata !{i32 27, i32 0, metadata !7, null}
+!41 = metadata !{i32 28, i32 0, metadata !7, null}
+!42 = metadata !{i32 29, i32 0, metadata !7, null}
diff --git a/test/Transforms/SampleProfile/syntax.ll b/test/Transforms/SampleProfile/syntax.ll
new file mode 100644
index 000000000000..53c65f44239c
--- /dev/null
+++ b/test/Transforms/SampleProfile/syntax.ll
@@ -0,0 +1,20 @@
+; RUN: not opt < %s -sample-profile -sample-profile-file=%S/Inputs/syntax.prof 2>&1 | FileCheck -check-prefix=NO-DEBUG %s
+; RUN: not opt < %s -sample-profile -sample-profile-file=missing.prof 2>&1 | FileCheck -check-prefix=MISSING-FILE %s
+; RUN: not opt < %s -sample-profile -sample-profile-file=%S/Inputs/bad_fn_header.prof 2>&1 | FileCheck -check-prefix=BAD-FN-HEADER %s
+; RUN: not opt < %s -sample-profile -sample-profile-file=%S/Inputs/bad_sample_line.prof 2>&1 | FileCheck -check-prefix=BAD-SAMPLE-LINE %s
+; RUN: not opt < %s -sample-profile -sample-profile-file=%S/Inputs/bad_line_values.prof 2>&1 | FileCheck -check-prefix=BAD-LINE-VALUES %s
+; RUN: not opt < %s -sample-profile -sample-profile-file=%S/Inputs/bad_discriminator_value.prof 2>&1 | FileCheck -check-prefix=BAD-DISCRIMINATOR-VALUE %s
+; RUN: not opt < %s -sample-profile -sample-profile-file=%S/Inputs/bad_samples.prof 2>&1 | FileCheck -check-prefix=BAD-SAMPLES %s
+; RUN: opt < %s -sample-profile -sample-profile-file=%S/Inputs/bad_mangle.prof 2>&1 >/dev/null
+
+define void @empty() {
+entry:
+ ret void
+}
+; NO-DEBUG: error: No debug information found in function empty
+; MISSING-FILE: error: missing.prof:
+; BAD-FN-HEADER: error: {{.*}}bad_fn_header.prof:1: Expected 'mangled_name:NUM:NUM', found 3empty:100:BAD
+; BAD-SAMPLE-LINE: error: {{.*}}bad_sample_line.prof:3: Expected 'NUM[.NUM]: NUM[ mangled_name:NUM]*', found 1: BAD
+; BAD-LINE-VALUES: error: {{.*}}bad_line_values.prof:2: Expected 'mangled_name:NUM:NUM', found -1: 10
+; BAD-DISCRIMINATOR-VALUE: error: {{.*}}bad_discriminator_value.prof:2: Expected 'NUM[.NUM]: NUM[ mangled_name:NUM]*', found 1.-3: 10
+; BAD-SAMPLES: error: {{.*}}bad_samples.prof:2: Expected 'NUM[.NUM]: NUM[ mangled_name:NUM]*', found 1.3: -10
diff --git a/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll b/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll
index 3510dfc24d9a..8ac1d2570a1c 100644
--- a/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll
+++ b/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll
@@ -1,6 +1,6 @@
; PR1226
; RUN: opt < %s -scalarrepl -S | \
-; RUN: not grep "call void @llvm.memcpy.i32"
+; RUN: not grep "call void @llvm.memcpy.p0i8.p0i8.i32"
; RUN: opt < %s -scalarrepl -S | grep getelementptr
; END.
@@ -14,10 +14,10 @@ entry:
%L = alloca %struct.foo, align 2 ; <%struct.foo*> [#uses=1]
%L2 = getelementptr %struct.foo* %L, i32 0, i32 0 ; <i8*> [#uses=2]
%tmp13 = getelementptr %struct.foo* %P, i32 0, i32 0 ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32( i8* %L2, i8* %tmp13, i32 2, i32 1 )
+ call void @llvm.memcpy.p0i8.p0i8.i32( i8* %L2, i8* %tmp13, i32 2, i32 1, i1 false)
%tmp5 = load i8* %L2 ; <i8> [#uses=1]
%tmp56 = sext i8 %tmp5 to i32 ; <i32> [#uses=1]
ret i32 %tmp56
}
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1)
diff --git a/test/Transforms/ScalarRepl/vector_memcpy.ll b/test/Transforms/ScalarRepl/vector_memcpy.ll
index 33e8034f5734..dfba9e2c8074 100644
--- a/test/Transforms/ScalarRepl/vector_memcpy.ll
+++ b/test/Transforms/ScalarRepl/vector_memcpy.ll
@@ -9,8 +9,7 @@ define <16 x float> @foo(<16 x float> %A) nounwind {
store <16 x float> %A, <16 x float>* %tmp
%s = bitcast <16 x float>* %tmp to i8*
%s2 = bitcast <16 x float>* %tmp2 to i8*
- call void @llvm.memcpy.i64(i8* %s2, i8* %s, i64 64, i32 16)
-
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %s2, i8* %s, i64 64, i32 16, i1 false)
%R = load <16 x float>* %tmp2
ret <16 x float> %R
}
@@ -19,12 +18,11 @@ define <16 x float> @foo2(<16 x float> %A) nounwind {
%tmp2 = alloca <16 x float>, align 16
%s2 = bitcast <16 x float>* %tmp2 to i8*
- call void @llvm.memset.i64(i8* %s2, i8 0, i64 64, i32 16)
+ call void @llvm.memset.p0i8.i64(i8* %s2, i8 0, i64 64, i32 16, i1 false)
%R = load <16 x float>* %tmp2
ret <16 x float> %R
}
-
-declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
-declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
diff --git a/test/Transforms/Scalarizer/basic.ll b/test/Transforms/Scalarizer/basic.ll
new file mode 100644
index 000000000000..1cfc0dd29017
--- /dev/null
+++ b/test/Transforms/Scalarizer/basic.ll
@@ -0,0 +1,451 @@
+; RUN: opt %s -scalarizer -scalarize-load-store -dce -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+declare <4 x float> @ext(<4 x float>)
+@g = global <4 x float> zeroinitializer
+
+define void @f1(<4 x float> %init, <4 x float> *%base, i32 %count) {
+; CHECK-LABEL: @f1(
+; CHECK: entry:
+; CHECK: %init.i0 = extractelement <4 x float> %init, i32 0
+; CHECK: %init.i1 = extractelement <4 x float> %init, i32 1
+; CHECK: %init.i2 = extractelement <4 x float> %init, i32 2
+; CHECK: %init.i3 = extractelement <4 x float> %init, i32 3
+; CHECK: br label %loop
+; CHECK: loop:
+; CHECK: %i = phi i32 [ %count, %entry ], [ %nexti, %loop ]
+; CHECK: %acc.i0 = phi float [ %init.i0, %entry ], [ %sel.i0, %loop ]
+; CHECK: %acc.i1 = phi float [ %init.i1, %entry ], [ %sel.i1, %loop ]
+; CHECK: %acc.i2 = phi float [ %init.i2, %entry ], [ %sel.i2, %loop ]
+; CHECK: %acc.i3 = phi float [ %init.i3, %entry ], [ %sel.i3, %loop ]
+; CHECK: %nexti = sub i32 %i, 1
+; CHECK: %ptr = getelementptr <4 x float>* %base, i32 %i
+; CHECK: %ptr.i0 = bitcast <4 x float>* %ptr to float*
+; CHECK: %val.i0 = load float* %ptr.i0, align 16
+; CHECK: %ptr.i1 = getelementptr float* %ptr.i0, i32 1
+; CHECK: %val.i1 = load float* %ptr.i1, align 4
+; CHECK: %ptr.i2 = getelementptr float* %ptr.i0, i32 2
+; CHECK: %val.i2 = load float* %ptr.i2, align 8
+; CHECK: %ptr.i3 = getelementptr float* %ptr.i0, i32 3
+; CHECK: %val.i3 = load float* %ptr.i3, align 4
+; CHECK: %add.i0 = fadd float %val.i0, %val.i2
+; CHECK: %add.i1 = fadd float %val.i1, %val.i3
+; CHECK: %add.i2 = fadd float %acc.i0, %acc.i2
+; CHECK: %add.i3 = fadd float %acc.i1, %acc.i3
+; CHECK: %add.upto0 = insertelement <4 x float> undef, float %add.i0, i32 0
+; CHECK: %add.upto1 = insertelement <4 x float> %add.upto0, float %add.i1, i32 1
+; CHECK: %add.upto2 = insertelement <4 x float> %add.upto1, float %add.i2, i32 2
+; CHECK: %add = insertelement <4 x float> %add.upto2, float %add.i3, i32 3
+; CHECK: %call = call <4 x float> @ext(<4 x float> %add)
+; CHECK: %call.i0 = extractelement <4 x float> %call, i32 0
+; CHECK: %cmp.i0 = fcmp ogt float %call.i0, 1.0
+; CHECK: %call.i1 = extractelement <4 x float> %call, i32 1
+; CHECK: %cmp.i1 = fcmp ogt float %call.i1, 2.0
+; CHECK: %call.i2 = extractelement <4 x float> %call, i32 2
+; CHECK: %cmp.i2 = fcmp ogt float %call.i2, 3.0
+; CHECK: %call.i3 = extractelement <4 x float> %call, i32 3
+; CHECK: %cmp.i3 = fcmp ogt float %call.i3, 4.0
+; CHECK: %sel.i0 = select i1 %cmp.i0, float %call.i0, float 5.0
+; CHECK: %sel.i1 = select i1 %cmp.i1, float %call.i1, float 6.0
+; CHECK: %sel.i2 = select i1 %cmp.i2, float %call.i2, float 7.0
+; CHECK: %sel.i3 = select i1 %cmp.i3, float %call.i3, float 8.0
+; CHECK: store float %sel.i0, float* %ptr.i0
+; CHECK: store float %sel.i1, float* %ptr.i1
+; CHECK: store float %sel.i2, float* %ptr.i2
+; CHECK: store float %sel.i3, float* %ptr.i3
+; CHECK: %test = icmp eq i32 %nexti, 0
+; CHECK: br i1 %test, label %loop, label %exit
+; CHECK: exit:
+; CHECK: ret void
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ %count, %entry ], [ %nexti, %loop ]
+ %acc = phi <4 x float> [ %init, %entry ], [ %sel, %loop ]
+ %nexti = sub i32 %i, 1
+
+ %ptr = getelementptr <4 x float> *%base, i32 %i
+ %val = load <4 x float> *%ptr
+ %dval = bitcast <4 x float> %val to <2 x double>
+ %dacc = bitcast <4 x float> %acc to <2 x double>
+ %shuffle1 = shufflevector <2 x double> %dval, <2 x double> %dacc,
+ <2 x i32> <i32 0, i32 2>
+ %shuffle2 = shufflevector <2 x double> %dval, <2 x double> %dacc,
+ <2 x i32> <i32 1, i32 3>
+ %f1 = bitcast <2 x double> %shuffle1 to <4 x float>
+ %f2 = bitcast <2 x double> %shuffle2 to <4 x float>
+ %add = fadd <4 x float> %f1, %f2
+ %call = call <4 x float> @ext(<4 x float> %add)
+ %cmp = fcmp ogt <4 x float> %call,
+ <float 1.0, float 2.0, float 3.0, float 4.0>
+ %sel = select <4 x i1> %cmp, <4 x float> %call,
+ <4 x float> <float 5.0, float 6.0, float 7.0, float 8.0>
+ store <4 x float> %sel, <4 x float> *%ptr
+
+ %test = icmp eq i32 %nexti, 0
+ br i1 %test, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+define void @f2(<4 x i32> %init, <4 x i8> *%base, i32 %count) {
+; CHECK-LABEL: define void @f2(<4 x i32> %init, <4 x i8>* %base, i32 %count) {
+; CHECK: entry:
+; CHECK: %init.i0 = extractelement <4 x i32> %init, i32 0
+; CHECK: %init.i1 = extractelement <4 x i32> %init, i32 1
+; CHECK: %init.i2 = extractelement <4 x i32> %init, i32 2
+; CHECK: %init.i3 = extractelement <4 x i32> %init, i32 3
+; CHECK: br label %loop
+; CHECK: loop:
+; CHECK: %i = phi i32 [ %count, %entry ], [ %nexti, %loop ]
+; CHECK: %acc.i0 = phi i32 [ %init.i0, %entry ], [ %sel.i0, %loop ]
+; CHECK: %acc.i1 = phi i32 [ %init.i1, %entry ], [ %sel.i1, %loop ]
+; CHECK: %acc.i2 = phi i32 [ %init.i2, %entry ], [ %sel.i2, %loop ]
+; CHECK: %acc.i3 = phi i32 [ %init.i3, %entry ], [ %sel.i3, %loop ]
+; CHECK: %nexti = sub i32 %i, 1
+; CHECK: %ptr = getelementptr <4 x i8>* %base, i32 %i
+; CHECK: %ptr.i0 = bitcast <4 x i8>* %ptr to i8*
+; CHECK: %val.i0 = load i8* %ptr.i0, align 4
+; CHECK: %ptr.i1 = getelementptr i8* %ptr.i0, i32 1
+; CHECK: %val.i1 = load i8* %ptr.i1, align 1
+; CHECK: %ptr.i2 = getelementptr i8* %ptr.i0, i32 2
+; CHECK: %val.i2 = load i8* %ptr.i2, align 2
+; CHECK: %ptr.i3 = getelementptr i8* %ptr.i0, i32 3
+; CHECK: %val.i3 = load i8* %ptr.i3, align 1
+; CHECK: %ext.i0 = sext i8 %val.i0 to i32
+; CHECK: %ext.i1 = sext i8 %val.i1 to i32
+; CHECK: %ext.i2 = sext i8 %val.i2 to i32
+; CHECK: %ext.i3 = sext i8 %val.i3 to i32
+; CHECK: %add.i0 = add i32 %ext.i0, %acc.i0
+; CHECK: %add.i1 = add i32 %ext.i1, %acc.i1
+; CHECK: %add.i2 = add i32 %ext.i2, %acc.i2
+; CHECK: %add.i3 = add i32 %ext.i3, %acc.i3
+; CHECK: %cmp.i0 = icmp slt i32 %add.i0, -10
+; CHECK: %cmp.i1 = icmp slt i32 %add.i1, -11
+; CHECK: %cmp.i2 = icmp slt i32 %add.i2, -12
+; CHECK: %cmp.i3 = icmp slt i32 %add.i3, -13
+; CHECK: %sel.i0 = select i1 %cmp.i0, i32 %add.i0, i32 %i
+; CHECK: %sel.i1 = select i1 %cmp.i1, i32 %add.i1, i32 %i
+; CHECK: %sel.i2 = select i1 %cmp.i2, i32 %add.i2, i32 %i
+; CHECK: %sel.i3 = select i1 %cmp.i3, i32 %add.i3, i32 %i
+; CHECK: %trunc.i0 = trunc i32 %sel.i0 to i8
+; CHECK: %trunc.i1 = trunc i32 %sel.i1 to i8
+; CHECK: %trunc.i2 = trunc i32 %sel.i2 to i8
+; CHECK: %trunc.i3 = trunc i32 %sel.i3 to i8
+; CHECK: store i8 %trunc.i0, i8* %ptr.i0, align 4
+; CHECK: store i8 %trunc.i1, i8* %ptr.i1, align 1
+; CHECK: store i8 %trunc.i2, i8* %ptr.i2, align 2
+; CHECK: store i8 %trunc.i3, i8* %ptr.i3, align 1
+; CHECK: %test = icmp eq i32 %nexti, 0
+; CHECK: br i1 %test, label %loop, label %exit
+; CHECK: exit:
+; CHECK: ret void
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ %count, %entry ], [ %nexti, %loop ]
+ %acc = phi <4 x i32> [ %init, %entry ], [ %sel, %loop ]
+ %nexti = sub i32 %i, 1
+
+ %ptr = getelementptr <4 x i8> *%base, i32 %i
+ %val = load <4 x i8> *%ptr
+ %ext = sext <4 x i8> %val to <4 x i32>
+ %add = add <4 x i32> %ext, %acc
+ %cmp = icmp slt <4 x i32> %add, <i32 -10, i32 -11, i32 -12, i32 -13>
+ %single = insertelement <4 x i32> undef, i32 %i, i32 0
+ %limit = shufflevector <4 x i32> %single, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ %sel = select <4 x i1> %cmp, <4 x i32> %add, <4 x i32> %limit
+ %trunc = trunc <4 x i32> %sel to <4 x i8>
+ store <4 x i8> %trunc, <4 x i8> *%ptr
+
+ %test = icmp eq i32 %nexti, 0
+ br i1 %test, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+; Check that !tbaa information is preserved.
+define void @f3(<4 x i32> *%src, <4 x i32> *%dst) {
+; CHECK-LABEL: @f3(
+; CHECK: %val.i0 = load i32* %src.i0, align 16, !tbaa ![[TAG:[0-9]*]]
+; CHECK: %val.i1 = load i32* %src.i1, align 4, !tbaa ![[TAG]]
+; CHECK: %val.i2 = load i32* %src.i2, align 8, !tbaa ![[TAG]]
+; CHECK: %val.i3 = load i32* %src.i3, align 4, !tbaa ![[TAG]]
+; CHECK: store i32 %add.i0, i32* %dst.i0, align 16, !tbaa ![[TAG:[0-9]*]]
+; CHECK: store i32 %add.i1, i32* %dst.i1, align 4, !tbaa ![[TAG]]
+; CHECK: store i32 %add.i2, i32* %dst.i2, align 8, !tbaa ![[TAG]]
+; CHECK: store i32 %add.i3, i32* %dst.i3, align 4, !tbaa ![[TAG]]
+; CHECK: ret void
+ %val = load <4 x i32> *%src, !tbaa !1
+ %add = add <4 x i32> %val, %val
+ store <4 x i32> %add, <4 x i32> *%dst, !tbaa !2
+ ret void
+}
+
+; Check that !tbaa.struct information is preserved.
+define void @f4(<4 x i32> *%src, <4 x i32> *%dst) {
+; CHECK-LABEL: @f4(
+; CHECK: %val.i0 = load i32* %src.i0, align 16, !tbaa.struct ![[TAG:[0-9]*]]
+; CHECK: %val.i1 = load i32* %src.i1, align 4, !tbaa.struct ![[TAG]]
+; CHECK: %val.i2 = load i32* %src.i2, align 8, !tbaa.struct ![[TAG]]
+; CHECK: %val.i3 = load i32* %src.i3, align 4, !tbaa.struct ![[TAG]]
+; CHECK: store i32 %add.i0, i32* %dst.i0, align 16, !tbaa.struct ![[TAG]]
+; CHECK: store i32 %add.i1, i32* %dst.i1, align 4, !tbaa.struct ![[TAG]]
+; CHECK: store i32 %add.i2, i32* %dst.i2, align 8, !tbaa.struct ![[TAG]]
+; CHECK: store i32 %add.i3, i32* %dst.i3, align 4, !tbaa.struct ![[TAG]]
+; CHECK: ret void
+ %val = load <4 x i32> *%src, !tbaa.struct !5
+ %add = add <4 x i32> %val, %val
+ store <4 x i32> %add, <4 x i32> *%dst, !tbaa.struct !5
+ ret void
+}
+
+; Check that llvm.mem.parallel_loop_access information is preserved.
+define void @f5(i32 %count, <4 x i32> *%src, <4 x i32> *%dst) {
+; CHECK-LABEL: @f5(
+; CHECK: %val.i0 = load i32* %this_src.i0, align 16, !llvm.mem.parallel_loop_access ![[TAG:[0-9]*]]
+; CHECK: %val.i1 = load i32* %this_src.i1, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: %val.i2 = load i32* %this_src.i2, align 8, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: %val.i3 = load i32* %this_src.i3, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: store i32 %add.i0, i32* %this_dst.i0, align 16, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: store i32 %add.i1, i32* %this_dst.i1, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: store i32 %add.i2, i32* %this_dst.i2, align 8, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: store i32 %add.i3, i32* %this_dst.i3, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: ret void
+entry:
+ br label %loop
+
+loop:
+ %index = phi i32 [ 0, %entry ], [ %next_index, %loop ]
+ %this_src = getelementptr <4 x i32> *%src, i32 %index
+ %this_dst = getelementptr <4 x i32> *%dst, i32 %index
+ %val = load <4 x i32> *%this_src, !llvm.mem.parallel_loop_access !3
+ %add = add <4 x i32> %val, %val
+ store <4 x i32> %add, <4 x i32> *%this_dst, !llvm.mem.parallel_loop_access !3
+ %next_index = add i32 %index, -1
+ %continue = icmp ne i32 %next_index, %count
+ br i1 %continue, label %loop, label %end, !llvm.loop !3
+
+end:
+ ret void
+}
+
+; Check that fpmath information is preserved.
+define <4 x float> @f6(<4 x float> %x) {
+; CHECK-LABEL: @f6(
+; CHECK: %x.i0 = extractelement <4 x float> %x, i32 0
+; CHECK: %res.i0 = fadd float %x.i0, 1.0{{[e+0]*}}, !fpmath ![[TAG:[0-9]*]]
+; CHECK: %x.i1 = extractelement <4 x float> %x, i32 1
+; CHECK: %res.i1 = fadd float %x.i1, 2.0{{[e+0]*}}, !fpmath ![[TAG]]
+; CHECK: %x.i2 = extractelement <4 x float> %x, i32 2
+; CHECK: %res.i2 = fadd float %x.i2, 3.0{{[e+0]*}}, !fpmath ![[TAG]]
+; CHECK: %x.i3 = extractelement <4 x float> %x, i32 3
+; CHECK: %res.i3 = fadd float %x.i3, 4.0{{[e+0]*}}, !fpmath ![[TAG]]
+; CHECK: %res.upto0 = insertelement <4 x float> undef, float %res.i0, i32 0
+; CHECK: %res.upto1 = insertelement <4 x float> %res.upto0, float %res.i1, i32 1
+; CHECK: %res.upto2 = insertelement <4 x float> %res.upto1, float %res.i2, i32 2
+; CHECK: %res = insertelement <4 x float> %res.upto2, float %res.i3, i32 3
+; CHECK: ret <4 x float> %res
+ %res = fadd <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 4.0>,
+ !fpmath !4
+ ret <4 x float> %res
+}
+
+; Check that random metadata isn't kept.
+define void @f7(<4 x i32> *%src, <4 x i32> *%dst) {
+; CHECK-LABEL: @f7(
+; CHECK-NOT: !foo
+; CHECK: ret void
+ %val = load <4 x i32> *%src, !foo !5
+ %add = add <4 x i32> %val, %val
+ store <4 x i32> %add, <4 x i32> *%dst, !foo !5
+ ret void
+}
+
+; Test GEP with vectors.
+define void @f8(<4 x float *> *%dest, <4 x float *> %ptr0, <4 x i32> %i0,
+ float *%other) {
+; CHECK-LABEL: @f8(
+; CHECK: %dest.i0 = bitcast <4 x float*>* %dest to float**
+; CHECK: %dest.i1 = getelementptr float** %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float** %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float** %dest.i0, i32 3
+; CHECK: %i0.i1 = extractelement <4 x i32> %i0, i32 1
+; CHECK: %i0.i3 = extractelement <4 x i32> %i0, i32 3
+; CHECK: %ptr0.i0 = extractelement <4 x float*> %ptr0, i32 0
+; CHECK: %val.i0 = getelementptr float* %ptr0.i0, i32 100
+; CHECK: %val.i1 = getelementptr float* %other, i32 %i0.i1
+; CHECK: %ptr0.i2 = extractelement <4 x float*> %ptr0, i32 2
+; CHECK: %val.i2 = getelementptr float* %ptr0.i2, i32 100
+; CHECK: %ptr0.i3 = extractelement <4 x float*> %ptr0, i32 3
+; CHECK: %val.i3 = getelementptr float* %ptr0.i3, i32 %i0.i3
+; CHECK: store float* %val.i0, float** %dest.i0, align 32
+; CHECK: store float* %val.i1, float** %dest.i1, align 8
+; CHECK: store float* %val.i2, float** %dest.i2, align 16
+; CHECK: store float* %val.i3, float** %dest.i3, align 8
+; CHECK: ret void
+ %i1 = insertelement <4 x i32> %i0, i32 100, i32 0
+ %i2 = insertelement <4 x i32> %i1, i32 100, i32 2
+ %ptr1 = insertelement <4 x float *> %ptr0, float *%other, i32 1
+ %val = getelementptr <4 x float *> %ptr1, <4 x i32> %i2
+ store <4 x float *> %val, <4 x float *> *%dest
+ ret void
+}
+
+; Test the handling of unaligned loads.
+define void @f9(<4 x float> *%dest, <4 x float> *%src) {
+; CHECK: @f9(
+; CHECK: %dest.i0 = bitcast <4 x float>* %dest to float*
+; CHECK: %dest.i1 = getelementptr float* %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float* %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float* %dest.i0, i32 3
+; CHECK: %src.i0 = bitcast <4 x float>* %src to float*
+; CHECK: %val.i0 = load float* %src.i0, align 4
+; CHECK: %src.i1 = getelementptr float* %src.i0, i32 1
+; CHECK: %val.i1 = load float* %src.i1, align 4
+; CHECK: %src.i2 = getelementptr float* %src.i0, i32 2
+; CHECK: %val.i2 = load float* %src.i2, align 4
+; CHECK: %src.i3 = getelementptr float* %src.i0, i32 3
+; CHECK: %val.i3 = load float* %src.i3, align 4
+; CHECK: store float %val.i0, float* %dest.i0, align 8
+; CHECK: store float %val.i1, float* %dest.i1, align 4
+; CHECK: store float %val.i2, float* %dest.i2, align 8
+; CHECK: store float %val.i3, float* %dest.i3, align 4
+; CHECK: ret void
+ %val = load <4 x float> *%src, align 4
+ store <4 x float> %val, <4 x float> *%dest, align 8
+ ret void
+}
+
+; ...and again with subelement alignment.
+define void @f10(<4 x float> *%dest, <4 x float> *%src) {
+; CHECK: @f10(
+; CHECK: %dest.i0 = bitcast <4 x float>* %dest to float*
+; CHECK: %dest.i1 = getelementptr float* %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float* %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float* %dest.i0, i32 3
+; CHECK: %src.i0 = bitcast <4 x float>* %src to float*
+; CHECK: %val.i0 = load float* %src.i0, align 1
+; CHECK: %src.i1 = getelementptr float* %src.i0, i32 1
+; CHECK: %val.i1 = load float* %src.i1, align 1
+; CHECK: %src.i2 = getelementptr float* %src.i0, i32 2
+; CHECK: %val.i2 = load float* %src.i2, align 1
+; CHECK: %src.i3 = getelementptr float* %src.i0, i32 3
+; CHECK: %val.i3 = load float* %src.i3, align 1
+; CHECK: store float %val.i0, float* %dest.i0, align 2
+; CHECK: store float %val.i1, float* %dest.i1, align 2
+; CHECK: store float %val.i2, float* %dest.i2, align 2
+; CHECK: store float %val.i3, float* %dest.i3, align 2
+; CHECK: ret void
+ %val = load <4 x float> *%src, align 1
+ store <4 x float> %val, <4 x float> *%dest, align 2
+ ret void
+}
+
+; Test that sub-byte loads aren't scalarized.
+define void @f11(<32 x i1> *%dest, <32 x i1> *%src0) {
+; CHECK: @f11(
+; CHECK: %val0 = load <32 x i1>* %src0
+; CHECK: %val1 = load <32 x i1>* %src1
+; CHECK: store <32 x i1> %and, <32 x i1>* %dest
+; CHECK: ret void
+ %src1 = getelementptr <32 x i1> *%src0, i32 1
+ %val0 = load <32 x i1> *%src0
+ %val1 = load <32 x i1> *%src1
+ %and = and <32 x i1> %val0, %val1
+ store <32 x i1> %and, <32 x i1> *%dest
+ ret void
+}
+
+; Test that variable inserts aren't scalarized.
+define void @f12(<4 x i32> *%dest, <4 x i32> *%src, i32 %index) {
+; CHECK: @f12(
+; CHECK: %val1 = insertelement <4 x i32> %val0, i32 1, i32 %index
+; CHECK-DAG: %val1.i0 = extractelement <4 x i32> %val1, i32 0
+; CHECK-DAG: %val1.i1 = extractelement <4 x i32> %val1, i32 1
+; CHECK-DAG: %val1.i2 = extractelement <4 x i32> %val1, i32 2
+; CHECK-DAG: %val1.i3 = extractelement <4 x i32> %val1, i32 3
+; CHECK-DAG: %val2.i0 = shl i32 1, %val1.i0
+; CHECK-DAG: %val2.i1 = shl i32 2, %val1.i1
+; CHECK-DAG: %val2.i2 = shl i32 3, %val1.i2
+; CHECK-DAG: %val2.i3 = shl i32 4, %val1.i3
+; CHECK: ret void
+ %val0 = load <4 x i32> *%src
+ %val1 = insertelement <4 x i32> %val0, i32 1, i32 %index
+ %val2 = shl <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %val1
+ store <4 x i32> %val2, <4 x i32> *%dest
+ ret void
+}
+
+; Test vector GEPs with more than one index.
+define void @f13(<4 x float *> *%dest, <4 x [4 x float] *> %ptr, <4 x i32> %i,
+ float *%other) {
+; CHECK-LABEL: @f13(
+; CHECK: %dest.i0 = bitcast <4 x float*>* %dest to float**
+; CHECK: %dest.i1 = getelementptr float** %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float** %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float** %dest.i0, i32 3
+; CHECK: %i.i0 = extractelement <4 x i32> %i, i32 0
+; CHECK: %ptr.i0 = extractelement <4 x [4 x float]*> %ptr, i32 0
+; CHECK: %val.i0 = getelementptr inbounds [4 x float]* %ptr.i0, i32 0, i32 %i.i0
+; CHECK: %i.i1 = extractelement <4 x i32> %i, i32 1
+; CHECK: %ptr.i1 = extractelement <4 x [4 x float]*> %ptr, i32 1
+; CHECK: %val.i1 = getelementptr inbounds [4 x float]* %ptr.i1, i32 1, i32 %i.i1
+; CHECK: %i.i2 = extractelement <4 x i32> %i, i32 2
+; CHECK: %ptr.i2 = extractelement <4 x [4 x float]*> %ptr, i32 2
+; CHECK: %val.i2 = getelementptr inbounds [4 x float]* %ptr.i2, i32 2, i32 %i.i2
+; CHECK: %i.i3 = extractelement <4 x i32> %i, i32 3
+; CHECK: %ptr.i3 = extractelement <4 x [4 x float]*> %ptr, i32 3
+; CHECK: %val.i3 = getelementptr inbounds [4 x float]* %ptr.i3, i32 3, i32 %i.i3
+; CHECK: store float* %val.i0, float** %dest.i0, align 32
+; CHECK: store float* %val.i1, float** %dest.i1, align 8
+; CHECK: store float* %val.i2, float** %dest.i2, align 16
+; CHECK: store float* %val.i3, float** %dest.i3, align 8
+; CHECK: ret void
+ %val = getelementptr inbounds <4 x [4 x float] *> %ptr,
+ <4 x i32> <i32 0, i32 1, i32 2, i32 3>,
+ <4 x i32> %i
+ store <4 x float *> %val, <4 x float *> *%dest
+ ret void
+}
+
+; Test combinations of vector and non-vector PHIs.
+define <4 x float> @f14(<4 x float> %acc, i32 %count) {
+; CHECK-LABEL: @f14(
+; CHECK: %this_acc.i0 = phi float [ %acc.i0, %entry ], [ %next_acc.i0, %loop ]
+; CHECK: %this_acc.i1 = phi float [ %acc.i1, %entry ], [ %next_acc.i1, %loop ]
+; CHECK: %this_acc.i2 = phi float [ %acc.i2, %entry ], [ %next_acc.i2, %loop ]
+; CHECK: %this_acc.i3 = phi float [ %acc.i3, %entry ], [ %next_acc.i3, %loop ]
+; CHECK: %this_count = phi i32 [ %count, %entry ], [ %next_count, %loop ]
+; CHECK: %this_acc.upto0 = insertelement <4 x float> undef, float %this_acc.i0, i32 0
+; CHECK: %this_acc.upto1 = insertelement <4 x float> %this_acc.upto0, float %this_acc.i1, i32 1
+; CHECK: %this_acc.upto2 = insertelement <4 x float> %this_acc.upto1, float %this_acc.i2, i32 2
+; CHECK: %this_acc = insertelement <4 x float> %this_acc.upto2, float %this_acc.i3, i32 3
+; CHECK: ret <4 x float> %next_acc
+entry:
+ br label %loop
+
+loop:
+ %this_acc = phi <4 x float> [ %acc, %entry ], [ %next_acc, %loop ]
+ %this_count = phi i32 [ %count, %entry ], [ %next_count, %loop ]
+ %foo = call <4 x float> @ext(<4 x float> %this_acc)
+ %next_acc = fadd <4 x float> %this_acc, %foo
+ %next_count = sub i32 %this_count, 1
+ %cmp = icmp eq i32 %next_count, 0
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret <4 x float> %next_acc
+}
+
+!0 = metadata !{ metadata !"root" }
+!1 = metadata !{ metadata !"set1", metadata !0 }
+!2 = metadata !{ metadata !"set2", metadata !0 }
+!3 = metadata !{ metadata !3 }
+!4 = metadata !{ float 4.0 }
+!5 = metadata !{ i64 0, i64 8, null }
diff --git a/test/Transforms/Scalarizer/dbginfo.ll b/test/Transforms/Scalarizer/dbginfo.ll
new file mode 100644
index 000000000000..546e89da80cb
--- /dev/null
+++ b/test/Transforms/Scalarizer/dbginfo.ll
@@ -0,0 +1,86 @@
+; RUN: opt %s -scalarizer -scalarize-load-store -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+; Function Attrs: nounwind uwtable
+define void @f1(<4 x i32>* nocapture %a, <4 x i32>* nocapture readonly %b, <4 x i32>* nocapture readonly %c) #0 {
+; CHECK: @f1(
+; CHECK: %a.i0 = bitcast <4 x i32>* %a to i32*
+; CHECK: %a.i1 = getelementptr i32* %a.i0, i32 1
+; CHECK: %a.i2 = getelementptr i32* %a.i0, i32 2
+; CHECK: %a.i3 = getelementptr i32* %a.i0, i32 3
+; CHECK: %c.i0 = bitcast <4 x i32>* %c to i32*
+; CHECK: %c.i1 = getelementptr i32* %c.i0, i32 1
+; CHECK: %c.i2 = getelementptr i32* %c.i0, i32 2
+; CHECK: %c.i3 = getelementptr i32* %c.i0, i32 3
+; CHECK: %b.i0 = bitcast <4 x i32>* %b to i32*
+; CHECK: %b.i1 = getelementptr i32* %b.i0, i32 1
+; CHECK: %b.i2 = getelementptr i32* %b.i0, i32 2
+; CHECK: %b.i3 = getelementptr i32* %b.i0, i32 3
+; CHECK: tail call void @llvm.dbg.value(metadata !{<4 x i32>* %a}, i64 0, metadata !{{[0-9]+}}), !dbg !{{[0-9]+}}
+; CHECK: tail call void @llvm.dbg.value(metadata !{<4 x i32>* %b}, i64 0, metadata !{{[0-9]+}}), !dbg !{{[0-9]+}}
+; CHECK: tail call void @llvm.dbg.value(metadata !{<4 x i32>* %c}, i64 0, metadata !{{[0-9]+}}), !dbg !{{[0-9]+}}
+; CHECK: %bval.i0 = load i32* %b.i0, align 16, !dbg ![[TAG1:[0-9]+]], !tbaa ![[TAG2:[0-9]+]]
+; CHECK: %bval.i1 = load i32* %b.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %bval.i2 = load i32* %b.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %bval.i3 = load i32* %b.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %cval.i0 = load i32* %c.i0, align 16, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %cval.i1 = load i32* %c.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %cval.i2 = load i32* %c.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %cval.i3 = load i32* %c.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %add.i0 = add i32 %bval.i0, %cval.i0, !dbg ![[TAG1]]
+; CHECK: %add.i1 = add i32 %bval.i1, %cval.i1, !dbg ![[TAG1]]
+; CHECK: %add.i2 = add i32 %bval.i2, %cval.i2, !dbg ![[TAG1]]
+; CHECK: %add.i3 = add i32 %bval.i3, %cval.i3, !dbg ![[TAG1]]
+; CHECK: store i32 %add.i0, i32* %a.i0, align 16, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: store i32 %add.i1, i32* %a.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: store i32 %add.i2, i32* %a.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: store i32 %add.i3, i32* %a.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: ret void
+entry:
+ tail call void @llvm.dbg.value(metadata !{<4 x i32>* %a}, i64 0, metadata !15), !dbg !20
+ tail call void @llvm.dbg.value(metadata !{<4 x i32>* %b}, i64 0, metadata !16), !dbg !20
+ tail call void @llvm.dbg.value(metadata !{<4 x i32>* %c}, i64 0, metadata !17), !dbg !20
+ %bval = load <4 x i32>* %b, align 16, !dbg !21, !tbaa !22
+ %cval = load <4 x i32>* %c, align 16, !dbg !21, !tbaa !22
+ %add = add <4 x i32> %bval, %cval, !dbg !21
+ store <4 x i32> %add, <4 x i32>* %a, align 16, !dbg !21, !tbaa !22
+ ret void, !dbg !25
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #1
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!18, !26}
+!llvm.ident = !{!19}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 194134) (llvm/trunk 194126)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/home/richards/llvm/build//tmp/add.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"/tmp/add.c", metadata !"/home/richards/llvm/build"}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"f1", metadata !"f1", metadata !"", i32 3, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (<4 x i32>*, <4 x i32>*, <4 x i32>*)* @f1, null, null, metadata !14, i32 4} ; [ DW_TAG_subprogram ] [line 3] [def] [scope 4] [f]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/home/richards/llvm/build//tmp/add.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{null, metadata !8, metadata !8, metadata !8}
+!8 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !9} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from V4SI]
+!9 = metadata !{i32 786454, metadata !1, null, metadata !"V4SI", i32 1, i64 0, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_typedef ] [V4SI] [line 1, size 0, align 0, offset 0] [from ]
+!10 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 128, i64 128, i32 0, i32 2048, metadata !11, metadata !12, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 128, offset 0] [vector] [from int]
+!11 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!12 = metadata !{metadata !13}
+!13 = metadata !{i32 786465, i64 0, i64 4} ; [ DW_TAG_subrange_type ] [0, 3]
+!14 = metadata !{metadata !15, metadata !16, metadata !17}
+!15 = metadata !{i32 786689, metadata !4, metadata !"a", metadata !5, i32 16777219, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [a] [line 3]
+!16 = metadata !{i32 786689, metadata !4, metadata !"b", metadata !5, i32 33554435, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [b] [line 3]
+!17 = metadata !{i32 786689, metadata !4, metadata !"c", metadata !5, i32 50331651, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [c] [line 3]
+!18 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!19 = metadata !{metadata !"clang version 3.4 (trunk 194134) (llvm/trunk 194126)"}
+!20 = metadata !{i32 3, i32 0, metadata !4, null}
+!21 = metadata !{i32 5, i32 0, metadata !4, null}
+!22 = metadata !{metadata !23, metadata !23, i64 0}
+!23 = metadata !{metadata !"omnipotent char", metadata !24, i64 0}
+!24 = metadata !{metadata !"Simple C/C++ TBAA"}
+!25 = metadata !{i32 6, i32 0, metadata !4, null}
+!26 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/Transforms/Scalarizer/no-data-layout.ll b/test/Transforms/Scalarizer/no-data-layout.ll
new file mode 100644
index 000000000000..3eaf669ef9dd
--- /dev/null
+++ b/test/Transforms/Scalarizer/no-data-layout.ll
@@ -0,0 +1,25 @@
+; RUN: opt %s -scalarizer -scalarize-load-store -S | FileCheck %s
+
+; Test the handling of loads and stores when no data layout is available.
+define void @f1(<4 x float> *%dest, <4 x float> *%src) {
+; CHECK: @f1(
+; CHECK: %val = load <4 x float>* %src, align 4
+; CHECK: %val.i0 = extractelement <4 x float> %val, i32 0
+; CHECK: %add.i0 = fadd float %val.i0, %val.i0
+; CHECK: %val.i1 = extractelement <4 x float> %val, i32 1
+; CHECK: %add.i1 = fadd float %val.i1, %val.i1
+; CHECK: %val.i2 = extractelement <4 x float> %val, i32 2
+; CHECK: %add.i2 = fadd float %val.i2, %val.i2
+; CHECK: %val.i3 = extractelement <4 x float> %val, i32 3
+; CHECK: %add.i3 = fadd float %val.i3, %val.i3
+; CHECK: %add.upto0 = insertelement <4 x float> undef, float %add.i0, i32 0
+; CHECK: %add.upto1 = insertelement <4 x float> %add.upto0, float %add.i1, i32 1
+; CHECK: %add.upto2 = insertelement <4 x float> %add.upto1, float %add.i2, i32 2
+; CHECK: %add = insertelement <4 x float> %add.upto2, float %add.i3, i32 3
+; CHECK: store <4 x float> %add, <4 x float>* %dest, align 8
+; CHECK: ret void
+ %val = load <4 x float> *%src, align 4
+ %add = fadd <4 x float> %val, %val
+ store <4 x float> %add, <4 x float> *%dest, align 8
+ ret void
+}
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
new file mode 100644
index 000000000000..a5e90f8e3c1d
--- /dev/null
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'NVPTX' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
new file mode 100644
index 000000000000..d054a3b1b9f2
--- /dev/null
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
@@ -0,0 +1,196 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
+; RUN: opt < %s -S -separate-const-offset-from-gep -gvn -dce | FileCheck %s --check-prefix=IR
+
+; Verifies the SeparateConstOffsetFromGEP pass.
+; The following code computes
+; *output = array[x][y] + array[x][y+1] + array[x+1][y] + array[x+1][y+1]
+;
+; We expect SeparateConstOffsetFromGEP to transform it to
+;
+; float *base = &a[x][y];
+; *output = base[0] + base[1] + base[32] + base[33];
+;
+; so the backend can emit PTX that uses fewer virtual registers.
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+@array = internal addrspace(3) constant [32 x [32 x float]] zeroinitializer, align 4
+
+define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
+.preheader:
+ %0 = sext i32 %y to i64
+ %1 = sext i32 %x to i64
+ %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %3 = addrspacecast float addrspace(3)* %2 to float*
+ %4 = load float* %3, align 4
+ %5 = fadd float %4, 0.000000e+00
+ %6 = add i32 %y, 1
+ %7 = sext i32 %6 to i64
+ %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+ %9 = addrspacecast float addrspace(3)* %8 to float*
+ %10 = load float* %9, align 4
+ %11 = fadd float %5, %10
+ %12 = add i32 %x, 1
+ %13 = sext i32 %12 to i64
+ %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+ %15 = addrspacecast float addrspace(3)* %14 to float*
+ %16 = load float* %15, align 4
+ %17 = fadd float %11, %16
+ %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+ %19 = addrspacecast float addrspace(3)* %18 to float*
+ %20 = load float* %19, align 4
+ %21 = fadd float %17, %20
+ store float %21, float* %output, align 4
+ ret void
+}
+; PTX-LABEL: sum_of_array(
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
+
+; IR-LABEL: @sum_of_array(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+
+; @sum_of_array2 is very similar to @sum_of_array. The only difference is in
+; the order of "sext" and "add" when computing the array indices. @sum_of_array
+; computes add before sext, e.g., array[sext(x + 1)][sext(y + 1)], while
+; @sum_of_array2 computes sext before add,
+; e.g., array[sext(x) + 1][sext(y) + 1]. SeparateConstOffsetFromGEP should be
+; able to extract constant offsets from both forms.
+define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
+.preheader:
+ %0 = sext i32 %y to i64
+ %1 = sext i32 %x to i64
+ %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %3 = addrspacecast float addrspace(3)* %2 to float*
+ %4 = load float* %3, align 4
+ %5 = fadd float %4, 0.000000e+00
+ %6 = add i64 %0, 1
+ %7 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
+ %8 = addrspacecast float addrspace(3)* %7 to float*
+ %9 = load float* %8, align 4
+ %10 = fadd float %5, %9
+ %11 = add i64 %1, 1
+ %12 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
+ %13 = addrspacecast float addrspace(3)* %12 to float*
+ %14 = load float* %13, align 4
+ %15 = fadd float %10, %14
+ %16 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
+ %17 = addrspacecast float addrspace(3)* %16 to float*
+ %18 = load float* %17, align 4
+ %19 = fadd float %15, %18
+ store float %19, float* %output, align 4
+ ret void
+}
+; PTX-LABEL: sum_of_array2(
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
+
+; IR-LABEL: @sum_of_array2(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+
+
+; This function loads
+; array[zext(x)][zext(y)]
+; array[zext(x)][zext(y +nuw 1)]
+; array[zext(x +nuw 1)][zext(y)]
+; array[zext(x +nuw 1)][zext(y +nuw 1)].
+;
+; This function is similar to @sum_of_array, but it
+; 1) extends array indices using zext instead of sext;
+; 2) annotates the addition with "nuw"; otherwise, zext(x + 1) => zext(x) + 1
+; may be invalid.
+define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) {
+.preheader:
+ %0 = zext i32 %y to i64
+ %1 = zext i32 %x to i64
+ %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %3 = addrspacecast float addrspace(3)* %2 to float*
+ %4 = load float* %3, align 4
+ %5 = fadd float %4, 0.000000e+00
+ %6 = add nuw i32 %y, 1
+ %7 = zext i32 %6 to i64
+ %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+ %9 = addrspacecast float addrspace(3)* %8 to float*
+ %10 = load float* %9, align 4
+ %11 = fadd float %5, %10
+ %12 = add nuw i32 %x, 1
+ %13 = zext i32 %12 to i64
+ %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+ %15 = addrspacecast float addrspace(3)* %14 to float*
+ %16 = load float* %15, align 4
+ %17 = fadd float %11, %16
+ %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+ %19 = addrspacecast float addrspace(3)* %18 to float*
+ %20 = load float* %19, align 4
+ %21 = fadd float %17, %20
+ store float %21, float* %output, align 4
+ ret void
+}
+; PTX-LABEL: sum_of_array3(
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
+
+; IR-LABEL: @sum_of_array3(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+
+
+; This function loads
+; array[zext(x)][zext(y)]
+; array[zext(x)][zext(y)]
+; array[zext(x) + 1][zext(y) + 1]
+; array[zext(x) + 1][zext(y) + 1].
+;
+; We expect the generated code to reuse the computation of
+; &array[zext(x)][zext(y)]. See the expected IR and PTX for details.
+define void @sum_of_array4(i32 %x, i32 %y, float* nocapture %output) {
+.preheader:
+ %0 = zext i32 %y to i64
+ %1 = zext i32 %x to i64
+ %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %3 = addrspacecast float addrspace(3)* %2 to float*
+ %4 = load float* %3, align 4
+ %5 = fadd float %4, 0.000000e+00
+ %6 = add i64 %0, 1
+ %7 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
+ %8 = addrspacecast float addrspace(3)* %7 to float*
+ %9 = load float* %8, align 4
+ %10 = fadd float %5, %9
+ %11 = add i64 %1, 1
+ %12 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
+ %13 = addrspacecast float addrspace(3)* %12 to float*
+ %14 = load float* %13, align 4
+ %15 = fadd float %10, %14
+ %16 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
+ %17 = addrspacecast float addrspace(3)* %16 to float*
+ %18 = load float* %17, align 4
+ %19 = fadd float %15, %18
+ store float %19, float* %output, align 4
+ ret void
+}
+; PTX-LABEL: sum_of_array4(
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
+
+; IR-LABEL: @sum_of_array4(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
new file mode 100644
index 000000000000..1784171454d6
--- /dev/null
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
@@ -0,0 +1,236 @@
+; RUN: opt < %s -separate-const-offset-from-gep -dce -S | FileCheck %s
+
+; Several unit tests for -separate-const-offset-from-gep. The transformation
+; heavily relies on TargetTransformInfo, so we put these tests under
+; target-specific folders.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+; target triple is necessary; otherwise TargetTransformInfo rejects any
+; addressing mode.
+target triple = "nvptx64-unknown-unknown"
+
+%struct.S = type { float, double }
+
+@struct_array = global [1024 x %struct.S] zeroinitializer, align 16
+@float_2d_array = global [32 x [32 x float]] zeroinitializer, align 4
+
+; We should not extract any struct field indices, because fields in a struct
+; may have different types.
+define double* @struct(i32 %i) {
+entry:
+ %add = add nsw i32 %i, 5
+ %idxprom = sext i32 %add to i64
+ %p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+ ret double* %p
+}
+; CHECK-LABEL: @struct(
+; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
+
+; We should be able to trace into sext(a + b) if a + b is non-negative
+; (e.g., used as an index of an inbounds GEP) and one of a and b is
+; non-negative.
+define float* @sext_add(i32 %i, i32 %j) {
+entry:
+ %0 = add i32 %i, 1
+ %1 = sext i32 %0 to i64 ; inbound sext(i + 1) = sext(i) + 1
+ %2 = add i32 %j, -2
+ ; However, inbound sext(j + -2) != sext(j) + -2, e.g., j = INT_MIN
+ %3 = sext i32 %2 to i64
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
+ ret float* %p
+}
+; CHECK-LABEL: @sext_add(
+; CHECK-NOT: = add
+; CHECK: add i32 %j, -2
+; CHECK: sext
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 32
+
+; We should be able to trace into sext/zext if it can be distributed to both
+; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
+;
+; This test verifies we can transform
+; gep base, a + sext(b +nsw 1), c + zext(d +nuw 1)
+; to
+; gep base, a + sext(b), c + zext(d); gep ..., 1 * 32 + 1
+define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) {
+ %b1 = add nsw i32 %b, 1
+ %b2 = sext i32 %b1 to i64
+ %i = add i64 %a, %b2 ; i = a + sext(b +nsw 1)
+ %d1 = add nuw i32 %d, 1
+ %d2 = zext i32 %d1 to i64
+ %j = add i64 %c, %d2 ; j = c + zext(d +nuw 1)
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+ ret float* %p
+}
+; CHECK-LABEL: @ext_add_no_overflow(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR]], i64 33
+
+; Verifies we handle nested sext/zext correctly.
+define void @sext_zext(i32 %a, i32 %b, float** %out1, float** %out2) {
+entry:
+ %0 = add nsw nuw i32 %a, 1
+ %1 = sext i32 %0 to i48
+ %2 = zext i48 %1 to i64 ; zext(sext(a +nsw nuw 1)) = zext(sext(a)) + 1
+ %3 = add nsw i32 %b, 2
+ %4 = sext i32 %3 to i48
+ %5 = zext i48 %4 to i64 ; zext(sext(b +nsw 2)) != zext(sext(b)) + 2
+ %p1 = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5
+ store float* %p1, float** %out1
+ %6 = add nuw i32 %a, 3
+ %7 = zext i32 %6 to i48
+ %8 = sext i48 %7 to i64 ; sext(zext(a +nuw 3)) = zext(a +nuw 3) = zext(a) + 3
+ %9 = add nsw i32 %b, 4
+ %10 = zext i32 %9 to i48
+ %11 = sext i48 %10 to i64 ; sext(zext(b +nsw 4)) != zext(b) + 4
+ %p2 = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11
+ store float* %p2, float** %out2
+ ret void
+}
+; CHECK-LABEL: @sext_zext(
+; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR_1]], i64 32
+; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR_2]], i64 96
+
+; Similar to @ext_add_no_overflow, we should be able to trace into s/zext if
+; its operand is an OR and the two operands of the OR have no common bits.
+define float* @sext_or(i64 %a, i32 %b) {
+entry:
+ %b1 = shl i32 %b, 2
+ %b2 = or i32 %b1, 1 ; (b << 2) and 1 have no common bits
+ %b3 = or i32 %b1, 4 ; (b << 2) and 4 may have common bits
+ %b2.ext = zext i32 %b2 to i64
+ %b3.ext = sext i32 %b3 to i64
+ %i = add i64 %a, %b2.ext
+ %j = add i64 %a, %b3.ext
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+ ret float* %p
+}
+; CHECK-LABEL: @sext_or(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR]], i64 32
+
+; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b +
+; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't
+; affected.
+define float* @expr(i64 %a, i64 %b, i64* %out) {
+entry:
+ %b5 = add i64 %b, 5
+ %i = add i64 %b5, %a
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
+ store i64 %b5, i64* %out
+ ret float* %p
+}
+; CHECK-LABEL: @expr(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
+; CHECK: getelementptr float* [[BASE_PTR]], i64 160
+; CHECK: store i64 %b5, i64* %out
+
+; d + sext(a +nsw (b +nsw (c +nsw 8))) => (d + sext(a) + sext(b) + sext(c)) + 8
+define float* @sext_expr(i32 %a, i32 %b, i32 %c, i64 %d) {
+entry:
+ %0 = add nsw i32 %c, 8
+ %1 = add nsw i32 %b, %0
+ %2 = add nsw i32 %a, %1
+ %3 = sext i32 %2 to i64
+ %i = add i64 %d, %3
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+ ret float* %p
+}
+; CHECK-LABEL: @sext_expr(
+; CHECK: sext i32
+; CHECK: sext i32
+; CHECK: sext i32
+; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 8
+
+; Verifies we handle "sub" correctly.
+define float* @sub(i64 %i, i64 %j) {
+ %i2 = sub i64 %i, 5 ; i - 5
+ %j2 = sub i64 5, %j ; 5 - i
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
+ ret float* %p
+}
+; CHECK-LABEL: @sub(
+; CHECK: %[[j2:[a-zA-Z0-9]+]] = sub i64 0, %j
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
+; CHECK: getelementptr float* [[BASE_PTR]], i64 -155
+
+%struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed
+
+; Verifies we can emit correct uglygep if the address is not natually aligned.
+define i64* @packed_struct(i32 %i, i32 %j) {
+entry:
+ %s = alloca [1024 x %struct.Packed], align 16
+ %add = add nsw i32 %j, 3
+ %idxprom = sext i32 %add to i64
+ %add1 = add nsw i32 %i, 1
+ %idxprom2 = sext i32 %add1 to i64
+ %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
+ ret i64* %arrayidx3
+}
+; CHECK-LABEL: @packed_struct(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: [[CASTED_PTR:%[a-zA-Z0-9]+]] = bitcast i64* [[BASE_PTR]] to i8*
+; CHECK: %uglygep = getelementptr i8* [[CASTED_PTR]], i64 100
+; CHECK: bitcast i8* %uglygep to i64*
+
+; We shouldn't be able to extract the 8 from "zext(a +nuw (b + 8))",
+; because "zext(b + 8) != zext(b) + 8"
+define float* @zext_expr(i32 %a, i32 %b) {
+entry:
+ %0 = add i32 %b, 8
+ %1 = add nuw i32 %a, %0
+ %i = zext i32 %1 to i64
+ %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+ ret float* %p
+}
+; CHECK-LABEL: zext_expr(
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+
+; Per http://llvm.org/docs/LangRef.html#id181, the indices of a off-bound gep
+; should be considered sign-extended to the pointer size. Therefore,
+; gep base, (add i32 a, b) != gep (gep base, i32 a), i32 b
+; because
+; sext(a + b) != sext(a) + sext(b)
+;
+; This test verifies we do not illegitimately extract the 8 from
+; gep base, (i32 a + 8)
+define float* @i32_add(i32 %a) {
+entry:
+ %i = add i32 %a, 8
+ %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i
+ ret float* %p
+}
+; CHECK-LABEL: @i32_add(
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK-NOT: getelementptr
+
+; Verifies that we compute the correct constant offset when the index is
+; sign-extended and then zero-extended. The old version of our code failed to
+; handle this case because it simply computed the constant offset as the
+; sign-extended value of the constant part of the GEP index.
+define float* @apint(i1 %a) {
+entry:
+ %0 = add nsw nuw i1 %a, 1
+ %1 = sext i1 %0 to i4
+ %2 = zext i4 %1 to i64 ; zext (sext i1 1 to i4) to i64 = 15
+ %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2
+ ret float* %p
+}
+; CHECK-LABEL: @apint(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR]], i64 15
+
+; Do not trace into binary operators other than ADD, SUB, and OR.
+define float* @and(i64 %a) {
+entry:
+ %0 = shl i64 %a, 2
+ %1 = and i64 %0, 1
+ %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1
+ ret float* %p
+}
+; CHECK-LABEL: @and(
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array
+; CHECK-NOT: getelementptr
diff --git a/test/Transforms/SimplifyCFG/PR17073.ll b/test/Transforms/SimplifyCFG/PR17073.ll
new file mode 100644
index 000000000000..8dc9fb28d617
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/PR17073.ll
@@ -0,0 +1,73 @@
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
+
+; In PR17073 ( http://llvm.org/pr17073 ), we illegally hoisted an operation that can trap.
+; The first test confirms that we don't do that when the trapping op is reached by the current BB (block1).
+; The second test confirms that we don't do that when the trapping op is reached by the previous BB (entry).
+; The third test confirms that we can still do this optimization for an operation (add) that doesn't trap.
+; The tests must be complicated enough to prevent previous SimplifyCFG actions from optimizing away
+; the instructions that we're checking for.
+
+target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
+target triple = "i386-apple-macosx10.9.0"
+
+@a = common global i32 0, align 4
+@b = common global i8 0, align 1
+
+; CHECK-LABEL: can_trap1
+; CHECK-NOT: or i1 %tobool, icmp eq (i32* bitcast (i8* @b to i32*), i32* @a)
+; CHECK-NOT: select i1 %tobool, i32* null, i32* select (i1 icmp eq (i64 urem (i64 2, i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64)), i64 0), i32* null, i32* @a)
+define i32* @can_trap1() {
+entry:
+ %0 = load i32* @a, align 4
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %exit, label %block1
+
+block1:
+ br i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a), label %exit, label %block2
+
+block2:
+ br label %exit
+
+exit:
+ %storemerge = phi i32* [ null, %entry ],[ null, %block2 ], [ select (i1 icmp eq (i64 urem (i64 2, i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64)), i64 0), i32* null, i32* @a), %block1 ]
+ ret i32* %storemerge
+}
+
+; CHECK-LABEL: can_trap2
+; CHECK-NOT: or i1 %tobool, icmp eq (i32* bitcast (i8* @b to i32*), i32* @a)
+; CHECK-NOT: select i1 %tobool, i32* select (i1 icmp eq (i64 urem (i64 2, i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64)), i64 0), i32* null, i32* @a), i32* null
+define i32* @can_trap2() {
+entry:
+ %0 = load i32* @a, align 4
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %exit, label %block1
+
+block1:
+ br i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a), label %exit, label %block2
+
+block2:
+ br label %exit
+
+exit:
+ %storemerge = phi i32* [ select (i1 icmp eq (i64 urem (i64 2, i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64)), i64 0), i32* null, i32* @a), %entry ],[ null, %block2 ], [ null, %block1 ]
+ ret i32* %storemerge
+}
+
+; CHECK-LABEL: cannot_trap
+; CHECK: select i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a), i32* select (i1 icmp eq (i64 add (i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64), i64 2), i64 0), i32* null, i32* @a), i32* null
+define i32* @cannot_trap() {
+entry:
+ %0 = load i32* @a, align 4
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %exit, label %block1
+
+block1:
+ br i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a), label %exit, label %block2
+
+block2:
+ br label %exit
+
+exit:
+ %storemerge = phi i32* [ null, %entry ],[ null, %block2 ], [ select (i1 icmp eq (i64 add (i64 2, i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64)), i64 0), i32* null, i32* @a), %block1 ]
+ ret i32* %storemerge
+}
diff --git a/test/Transforms/SimplifyCFG/SPARC/lit.local.cfg b/test/Transforms/SimplifyCFG/SPARC/lit.local.cfg
index 4d344fa91a9e..fa6a54e50132 100644
--- a/test/Transforms/SimplifyCFG/SPARC/lit.local.cfg
+++ b/test/Transforms/SimplifyCFG/SPARC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'Sparc' in targets:
+if not 'Sparc' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/SimplifyCFG/X86/lit.local.cfg b/test/Transforms/SimplifyCFG/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Transforms/SimplifyCFG/X86/lit.local.cfg
+++ b/test/Transforms/SimplifyCFG/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll b/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
new file mode 100644
index 000000000000..d0b8ab29d26b
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
@@ -0,0 +1,41 @@
+; RUN: opt -S -simplifycfg < %s -mtriple=x86_64-apple-darwin12.0.0 | FileCheck %s
+; rdar://17735071
+target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin12.0.0"
+
+; When tableindex can't fit into i2, we should extend the type to i3.
+; CHECK-LABEL: @_TFO6reduce1E5toRawfS0_FT_Si
+; CHECK: entry:
+; CHECK-NEXT: sub i2 %0, -2
+; CHECK-NEXT: zext i2 %switch.tableidx to i3
+; CHECK-NEXT: getelementptr inbounds [4 x i64]* @switch.table, i32 0, i3 %switch.tableidx.zext
+; CHECK-NEXT: load i64* %switch.gep
+; CHECK-NEXT: ret i64 %switch.load
+define i64 @_TFO6reduce1E5toRawfS0_FT_Si(i2) {
+entry:
+ switch i2 %0, label %1 [
+ i2 0, label %2
+ i2 1, label %3
+ i2 -2, label %4
+ i2 -1, label %5
+ ]
+
+; <label>:1 ; preds = %entry
+ unreachable
+
+; <label>:2 ; preds = %2
+ br label %6
+
+; <label>:3 ; preds = %4
+ br label %6
+
+; <label>:4 ; preds = %6
+ br label %6
+
+; <label>:5 ; preds = %8
+ br label %6
+
+; <label>:6 ; preds = %3, %5, %7, %9
+ %7 = phi i64 [ 3, %5 ], [ 2, %4 ], [ 1, %3 ], [ 0, %2 ]
+ ret i64 %7
+}
diff --git a/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll b/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
index 368732711a70..51ced4099ac9 100644
--- a/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
+++ b/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
@@ -806,3 +806,170 @@ return:
; CHECK-NOT: @switch.table
; CHECK: switch i32 %c
}
+
+; If we can build a lookup table without any holes, we don't need a default result.
+declare void @exit(i32)
+define i32 @nodefaultnoholes(i32 %c) {
+entry:
+ switch i32 %c, label %sw.default [
+ i32 0, label %return
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ ]
+
+sw.bb1: br label %return
+sw.bb2: br label %return
+sw.bb3: br label %return
+sw.default: call void @exit(i32 1)
+ unreachable
+return:
+ %x = phi i32 [ -1, %sw.bb3 ], [ 0, %sw.bb2 ], [ 123, %sw.bb1 ], [ 55, %entry ]
+ ret i32 %x
+
+; CHECK-LABEL: @nodefaultnoholes(
+; CHECK: @switch.table
+; CHECK-NOT: switch i32
+}
+
+; This lookup table will have holes, so we need to test for the holes.
+define i32 @nodefaultwithholes(i32 %c) {
+entry:
+ switch i32 %c, label %sw.default [
+ i32 0, label %return
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ i32 5, label %sw.bb3
+ ]
+
+sw.bb1: br label %return
+sw.bb2: br label %return
+sw.bb3: br label %return
+sw.default: call void @exit(i32 1)
+ unreachable
+return:
+ %x = phi i32 [ -1, %sw.bb3 ], [ 0, %sw.bb2 ], [ 123, %sw.bb1 ], [ 55, %entry ]
+ ret i32 %x
+
+; CHECK-LABEL: @nodefaultwithholes(
+; CHECK: entry:
+; CHECK: br i1 %{{.*}}, label %switch.hole_check, label %sw.default
+; CHECK: switch.hole_check:
+; CHECK-NEXT: %switch.maskindex = trunc i32 %switch.tableidx to i6
+; CHECK-NEXT: %switch.shifted = lshr i6 -17, %switch.maskindex
+; The mask is binary 101111.
+; CHECK-NEXT: %switch.lobit = trunc i6 %switch.shifted to i1
+; CHECK-NEXT: br i1 %switch.lobit, label %switch.lookup, label %sw.default
+; CHECK-NOT: switch i32
+}
+
+; We don't build lookup tables with holes for switches with less than four cases.
+define i32 @threecasesholes(i32 %c) {
+entry:
+ switch i32 %c, label %sw.default [
+ i32 0, label %return
+ i32 1, label %sw.bb1
+ i32 3, label %sw.bb2
+ ]
+sw.bb1: br label %return
+sw.bb2: br label %return
+sw.default: br label %return
+return:
+ %x = phi i32 [ %c, %sw.default ], [ 5, %sw.bb2 ], [ 7, %sw.bb1 ], [ 9, %entry ]
+ ret i32 %x
+; CHECK-LABEL: @threecasesholes(
+; CHECK: switch i32
+; CHECK-NOT: @switch.table
+}
+
+; We build lookup tables for switches with three or more cases.
+define i32 @threecases(i32 %c) {
+entry:
+ switch i32 %c, label %sw.default [
+ i32 0, label %return
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ ]
+sw.bb1: br label %return
+sw.bb2: br label %return
+sw.default: br label %return
+return:
+ %x = phi i32 [ 3, %sw.default ], [ 5, %sw.bb2 ], [ 7, %sw.bb1 ], [ 9, %entry ]
+ ret i32 %x
+; CHECK-LABEL: @threecases(
+; CHECK-NOT: switch i32
+; CHECK: @switch.table
+}
+
+; We don't build tables for switches with two cases.
+define i32 @twocases(i32 %c) {
+entry:
+ switch i32 %c, label %sw.default [
+ i32 0, label %return
+ i32 1, label %sw.bb1
+ ]
+sw.bb1: br label %return
+sw.default: br label %return
+return:
+ %x = phi i32 [ 3, %sw.default ], [ 7, %sw.bb1 ], [ 9, %entry ]
+ ret i32 %x
+; CHECK-LABEL: @twocases(
+; CHECK: switch i32
+; CHECK-NOT: @switch.table
+}
+
+; Don't build tables for switches with TLS variables.
+@tls_a = thread_local global i32 0
+@tls_b = thread_local global i32 0
+@tls_c = thread_local global i32 0
+@tls_d = thread_local global i32 0
+define i32* @tls(i32 %x) {
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %return
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ ]
+sw.bb1:
+ br label %return
+sw.bb2:
+ br label %return
+sw.default:
+ br label %return
+return:
+ %retval.0 = phi i32* [ @tls_d, %sw.default ], [ @tls_c, %sw.bb2 ], [ @tls_b, %sw.bb1 ], [ @tls_a, %entry ]
+ ret i32* %retval.0
+; CHECK-LABEL: @tls(
+; CHECK: switch i32
+; CHECK-NOT: @switch.table
+}
+
+; Don't build tables for switches with dllimport variables.
+@dllimport_a = external dllimport global [3x i32]
+@dllimport_b = external dllimport global [3x i32]
+@dllimport_c = external dllimport global [3x i32]
+@dllimport_d = external dllimport global [3x i32]
+define i32* @dllimport(i32 %x) {
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %return
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ ]
+sw.bb1:
+ br label %return
+sw.bb2:
+ br label %return
+sw.default:
+ br label %return
+return:
+ %retval.0 = phi i32* [ getelementptr inbounds ([3 x i32]* @dllimport_d, i32 0, i32 0), %sw.default ],
+ [ getelementptr inbounds ([3 x i32]* @dllimport_c, i32 0, i32 0), %sw.bb2 ],
+ [ getelementptr inbounds ([3 x i32]* @dllimport_b, i32 0, i32 0), %sw.bb1 ],
+ [ getelementptr inbounds ([3 x i32]* @dllimport_a, i32 0, i32 0), %entry ]
+ ret i32* %retval.0
+; CHECK-LABEL: @dllimport(
+; CHECK: switch i32
+; CHECK-NOT: @switch.table
+}
diff --git a/test/Transforms/SimplifyCFG/basictest.ll b/test/Transforms/SimplifyCFG/basictest.ll
index 9c4edd68b800..d6958a9c111a 100644
--- a/test/Transforms/SimplifyCFG/basictest.ll
+++ b/test/Transforms/SimplifyCFG/basictest.ll
@@ -41,3 +41,33 @@ return: ; preds = %entry
; CHECK-LABEL: @test5(
; CHECK-NEXT: ret void
}
+
+
+; PR14893
+define i8 @test6f() {
+; CHECK-LABEL: @test6f
+; CHECK: alloca i8, align 1
+; CHECK-NEXT: call i8 @test6g
+; CHECK-NEXT: icmp eq i8 %tmp, 0
+; CHECK-NEXT: load i8* %r, align 1{{$}}
+
+bb0:
+ %r = alloca i8, align 1
+ %tmp = call i8 @test6g(i8* %r)
+ %tmp1 = icmp eq i8 %tmp, 0
+ br i1 %tmp1, label %bb2, label %bb1
+bb1:
+ %tmp3 = load i8* %r, align 1, !range !2, !tbaa !1
+ %tmp4 = icmp eq i8 %tmp3, 1
+ br i1 %tmp4, label %bb2, label %bb3
+bb2:
+ br label %bb3
+bb3:
+ %tmp6 = phi i8 [ 0, %bb2 ], [ 1, %bb1 ]
+ ret i8 %tmp6
+}
+declare i8 @test6g(i8*)
+
+!0 = metadata !{metadata !1, metadata !1, i64 0}
+!1 = metadata !{metadata !"foo"}
+!2 = metadata !{i8 0, i8 2}
diff --git a/test/Transforms/SimplifyCFG/extract-cost.ll b/test/Transforms/SimplifyCFG/extract-cost.ll
new file mode 100644
index 000000000000..9c867256e783
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/extract-cost.ll
@@ -0,0 +1,22 @@
+; RUN: opt -simplifycfg -S < %s | FileCheck %s
+
+declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) #1
+
+define i32 @f(i32 %a, i32 %b) #0 {
+entry:
+ %uadd = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+ %cmp = extractvalue { i32, i1 } %uadd, 1
+ br i1 %cmp, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %0 = extractvalue { i32, i1 } %uadd, 0
+ br label %return
+
+return: ; preds = %entry, %if.end
+ %retval.0 = phi i32 [ %0, %if.end ], [ 0, %entry ]
+ ret i32 %retval.0
+
+; CHECK-LABEL: @f(
+; CHECK-NOT: phi
+; CHECK: select
+}
diff --git a/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll b/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll
new file mode 100644
index 000000000000..b388cc5830f4
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll
@@ -0,0 +1,40 @@
+; RUN: opt -simplifycfg -S %s | FileCheck %s
+; Make sure we don't speculate loads under ThreadSanitizer.
+@g = global i32 0, align 4
+
+define i32 @TestNoTsan(i32 %cond) nounwind readonly uwtable {
+entry:
+ %tobool = icmp eq i32 %cond, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then: ; preds = %entry
+ %0 = load i32* @g, align 4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval = phi i32 [ %0, %if.then ], [ 0, %entry ]
+ ret i32 %retval
+; CHECK-LABEL: @TestNoTsan
+; CHECK: %[[LOAD:[^ ]*]] = load
+; CHECK: select{{.*}}[[LOAD]]
+; CHECK: ret i32
+}
+
+define i32 @TestTsan(i32 %cond) nounwind readonly uwtable sanitize_thread {
+entry:
+ %tobool = icmp eq i32 %cond, 0
+ br i1 %tobool, label %return, label %if.then
+
+if.then: ; preds = %entry
+ %0 = load i32* @g, align 4
+ br label %return
+
+return: ; preds = %entry, %if.then
+ %retval = phi i32 [ %0, %if.then ], [ 0, %entry ]
+ ret i32 %retval
+; CHECK-LABEL: @TestTsan
+; CHECK: br i1
+; CHECK: load i32* @g
+; CHECK: br label
+; CHECK: ret i32
+}
diff --git a/test/Transforms/SimplifyCFG/preserve-branchweights.ll b/test/Transforms/SimplifyCFG/preserve-branchweights.ll
index 4022ed6927ee..bdd25ba80585 100644
--- a/test/Transforms/SimplifyCFG/preserve-branchweights.ll
+++ b/test/Transforms/SimplifyCFG/preserve-branchweights.ll
@@ -87,7 +87,7 @@ entry:
i32 2, label %sw.bb
i32 3, label %sw.bb1
], !prof !3
-; CHECK: test5
+; CHECK-LABEL: @test5(
; CHECK: switch i32 %N, label %sw2 [
; CHECK: i32 3, label %sw.bb1
; CHECK: i32 2, label %sw.bb
@@ -119,7 +119,7 @@ entry:
i32 2, label %sw.bb
i32 3, label %sw.bb1
], !prof !4
-; CHECK: test6
+; CHECK-LABEL: @test6(
; CHECK: switch i32 %N, label %sw.epilog
; CHECK: i32 3, label %sw.bb1
; CHECK: i32 2, label %sw.bb
@@ -266,7 +266,7 @@ lor.end:
call void @helper(i32 0) nounwind
ret void
-; CHECK: test10
+; CHECK-LABEL: @test10(
; CHECK: %x.off = add i32 %x, -1
; CHECK: %switch = icmp ult i32 %x.off, 3
; CHECK: br i1 %switch, label %lor.end, label %lor.rhs, !prof !8
@@ -279,6 +279,7 @@ define void @test11(i32 %x) nounwind {
i32 21, label %b
i32 24, label %c
], !prof !8
+; CHECK-LABEL: @test11(
; CHECK: %cond = icmp eq i32 %i, 24
; CHECK: br i1 %cond, label %c, label %a, !prof !9
@@ -293,6 +294,76 @@ c:
ret void
}
+;; test12 - Don't crash if the whole switch is removed
+define void @test12(i32 %M, i32 %N) nounwind uwtable {
+entry:
+ switch i32 %N, label %sw.bb [
+ i32 1, label %sw.bb
+ ], !prof !9
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: call void @helper
+; CHECK-NEXT: ret void
+
+sw.bb:
+ call void @helper(i32 0)
+ br label %sw.epilog
+
+sw.epilog:
+ ret void
+}
+
+;; If every case is dead, make sure they are all removed. This used to
+;; crash trying to merge the metadata.
+define void @test13(i32 %x) nounwind {
+entry:
+ %i = shl i32 %x, 1
+ switch i32 %i, label %a [
+ i32 21, label %b
+ i32 25, label %c
+ ], !prof !8
+; CHECK-LABEL: @test13(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: call void @helper
+; CHECK-NEXT: ret void
+
+a:
+ call void @helper(i32 0) nounwind
+ ret void
+b:
+ call void @helper(i32 1) nounwind
+ ret void
+c:
+ call void @helper(i32 2) nounwind
+ ret void
+}
+
+;; When folding branches to common destination, the updated branch weights
+;; can exceed uint32 by more than factor of 2. We should keep halving the
+;; weights until they can fit into uint32.
+@max_regno = common global i32 0, align 4
+define void @test14(i32* %old, i32 %final) {
+; CHECK-LABEL: @test14
+; CHECK: br i1 %or.cond, label %for.exit, label %for.inc, !prof !10
+for.cond:
+ br label %for.cond2
+for.cond2:
+ %i.1 = phi i32 [ %inc19, %for.inc ], [ 0, %for.cond ]
+ %bit.0 = phi i32 [ %shl, %for.inc ], [ 1, %for.cond ]
+ %tobool = icmp eq i32 %bit.0, 0
+ br i1 %tobool, label %for.exit, label %for.body3, !prof !10
+for.body3:
+ %v3 = load i32* @max_regno, align 4
+ %cmp4 = icmp eq i32 %i.1, %v3
+ br i1 %cmp4, label %for.exit, label %for.inc, !prof !11
+for.inc:
+ %shl = shl i32 %bit.0, 1
+ %inc19 = add nsw i32 %i.1, 1
+ br label %for.cond2
+for.exit:
+ ret void
+}
+
!0 = metadata !{metadata !"branch_weights", i32 3, i32 5}
!1 = metadata !{metadata !"branch_weights", i32 1, i32 1}
!2 = metadata !{metadata !"branch_weights", i32 1, i32 2}
@@ -302,6 +373,9 @@ c:
!6 = metadata !{metadata !"branch_weights", i32 1, i32 3}
!7 = metadata !{metadata !"branch_weights", i32 33, i32 9, i32 8, i32 7}
!8 = metadata !{metadata !"branch_weights", i32 33, i32 9, i32 8}
+!9 = metadata !{metadata !"branch_weights", i32 7, i32 6}
+!10 = metadata !{metadata !"branch_weights", i32 672646, i32 21604207}
+!11 = metadata !{metadata !"branch_weights", i32 6960, i32 21597248}
; CHECK: !0 = metadata !{metadata !"branch_weights", i32 5, i32 11}
; CHECK: !1 = metadata !{metadata !"branch_weights", i32 1, i32 5}
@@ -313,4 +387,6 @@ c:
; CHECK: !7 = metadata !{metadata !"branch_weights", i32 17, i32 9, i32 8, i32 7, i32 17}
; CHECK: !8 = metadata !{metadata !"branch_weights", i32 24, i32 33}
; CHECK: !9 = metadata !{metadata !"branch_weights", i32 8, i32 33}
-; CHECK-NOT: !9
+;; The false weight prints out as a negative integer here, but inside llvm, we
+;; treat the weight as an unsigned integer.
+; CHECK: !10 = metadata !{metadata !"branch_weights", i32 112017436, i32 -735157296}
diff --git a/test/Transforms/SimplifyCFG/speculate-math.ll b/test/Transforms/SimplifyCFG/speculate-math.ll
new file mode 100644
index 000000000000..fa7976d0689f
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/speculate-math.ll
@@ -0,0 +1,58 @@
+; RUN: opt -S -simplifycfg -phi-node-folding-threshold=2 < %s | FileCheck %s
+
+declare float @llvm.sqrt.f32(float) nounwind readonly
+declare float @llvm.fma.f32(float, float, float) nounwind readonly
+declare float @llvm.fmuladd.f32(float, float, float) nounwind readonly
+
+; CHECK-LABEL: @sqrt_test(
+; CHECK: select
+define void @sqrt_test(float addrspace(1)* noalias nocapture %out, float %a) nounwind {
+entry:
+ %cmp.i = fcmp olt float %a, 0.000000e+00
+ br i1 %cmp.i, label %test_sqrt.exit, label %cond.else.i
+
+cond.else.i: ; preds = %entry
+ %0 = tail call float @llvm.sqrt.f32(float %a) nounwind readnone
+ br label %test_sqrt.exit
+
+test_sqrt.exit: ; preds = %cond.else.i, %entry
+ %cond.i = phi float [ %0, %cond.else.i ], [ 0x7FF8000000000000, %entry ]
+ store float %cond.i, float addrspace(1)* %out, align 4
+ ret void
+}
+
+
+; CHECK-LABEL: @fma_test(
+; CHECK: select
+define void @fma_test(float addrspace(1)* noalias nocapture %out, float %a, float %b, float %c) nounwind {
+entry:
+ %cmp.i = fcmp olt float %a, 0.000000e+00
+ br i1 %cmp.i, label %test_fma.exit, label %cond.else.i
+
+cond.else.i: ; preds = %entry
+ %0 = tail call float @llvm.fma.f32(float %a, float %b, float %c) nounwind readnone
+ br label %test_fma.exit
+
+test_fma.exit: ; preds = %cond.else.i, %entry
+ %cond.i = phi float [ %0, %cond.else.i ], [ 0x7FF8000000000000, %entry ]
+ store float %cond.i, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; CHECK-LABEL: @fmuladd_test(
+; CHECK: select
+define void @fmuladd_test(float addrspace(1)* noalias nocapture %out, float %a, float %b, float %c) nounwind {
+entry:
+ %cmp.i = fcmp olt float %a, 0.000000e+00
+ br i1 %cmp.i, label %test_fmuladd.exit, label %cond.else.i
+
+cond.else.i: ; preds = %entry
+ %0 = tail call float @llvm.fmuladd.f32(float %a, float %b, float %c) nounwind readnone
+ br label %test_fmuladd.exit
+
+test_fmuladd.exit: ; preds = %cond.else.i, %entry
+ %cond.i = phi float [ %0, %cond.else.i ], [ 0x7FF8000000000000, %entry ]
+ store float %cond.i, float addrspace(1)* %out, align 4
+ ret void
+}
+
diff --git a/test/Transforms/SimplifyCFG/speculate-vector-ops.ll b/test/Transforms/SimplifyCFG/speculate-vector-ops.ll
new file mode 100644
index 000000000000..91972eb5dd9f
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/speculate-vector-ops.ll
@@ -0,0 +1,60 @@
+; RUN: opt -S -simplifycfg < %s | FileCheck %s
+
+define i32 @speculate_vector_extract(i32 %d, <4 x i32> %v) #0 {
+; CHECK-LABEL: @speculate_vector_extract(
+; CHECK-NOT: br
+entry:
+ %conv = insertelement <4 x i32> undef, i32 %d, i32 0
+ %conv2 = insertelement <4 x i32> %conv, i32 %d, i32 1
+ %conv3 = insertelement <4 x i32> %conv2, i32 %d, i32 2
+ %conv4 = insertelement <4 x i32> %conv3, i32 %d, i32 3
+ %tmp6 = add nsw <4 x i32> %conv4, <i32 0, i32 -1, i32 -2, i32 -3>
+ %cmp = icmp eq <4 x i32> %tmp6, zeroinitializer
+ %cmp.ext = sext <4 x i1> %cmp to <4 x i32>
+ %tmp8 = extractelement <4 x i32> %cmp.ext, i32 0
+ %tobool = icmp eq i32 %tmp8, 0
+ br i1 %tobool, label %cond.else, label %cond.then
+
+return: ; preds = %cond.end28
+ ret i32 %cond32
+
+cond.then: ; preds = %entry
+ %tmp10 = extractelement <4 x i32> %v, i32 0
+ br label %cond.end
+
+cond.else: ; preds = %entry
+ %tmp12 = extractelement <4 x i32> %v, i32 3
+ br label %cond.end
+
+cond.end: ; preds = %cond.else, %cond.then
+ %cond = phi i32 [ %tmp10, %cond.then ], [ %tmp12, %cond.else ]
+ %tmp14 = extractelement <4 x i32> %cmp.ext, i32 1
+ %tobool15 = icmp eq i32 %tmp14, 0
+ br i1 %tobool15, label %cond.else17, label %cond.then16
+
+cond.then16: ; preds = %cond.end
+ %tmp20 = extractelement <4 x i32> %v, i32 1
+ br label %cond.end18
+
+cond.else17: ; preds = %cond.end
+ br label %cond.end18
+
+cond.end18: ; preds = %cond.else17, %cond.then16
+ %cond22 = phi i32 [ %tmp20, %cond.then16 ], [ %cond, %cond.else17 ]
+ %tmp24 = extractelement <4 x i32> %cmp.ext, i32 2
+ %tobool25 = icmp eq i32 %tmp24, 0
+ br i1 %tobool25, label %cond.else27, label %cond.then26
+
+cond.then26: ; preds = %cond.end18
+ %tmp30 = extractelement <4 x i32> %v, i32 2
+ br label %cond.end28
+
+cond.else27: ; preds = %cond.end18
+ br label %cond.end28
+
+cond.end28: ; preds = %cond.else27, %cond.then26
+ %cond32 = phi i32 [ %tmp30, %cond.then26 ], [ %cond22, %cond.else27 ]
+ br label %return
+}
+
+attributes #0 = { nounwind }
diff --git a/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll b/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
index e9d93e834a50..5ae62af54581 100644
--- a/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
+++ b/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
@@ -65,7 +65,7 @@ define void @test5(i1 %C, i32* %P) {
entry:
br i1 %C, label %T, label %F
T:
- cmpxchg volatile i32* %P, i32 0, i32 1 seq_cst
+ cmpxchg volatile i32* %P, i32 0, i32 1 seq_cst seq_cst
unreachable
F:
ret void
diff --git a/test/Transforms/Sink/basic.ll b/test/Transforms/Sink/basic.ll
index 85ab3766002d..4aac6d613a8c 100644
--- a/test/Transforms/Sink/basic.ll
+++ b/test/Transforms/Sink/basic.ll
@@ -62,3 +62,82 @@ X: ; preds = %5, %3
ret i32 %R
}
+; We shouldn't sink constant sized allocas from the entry block, since CodeGen
+; interprets allocas outside the entry block as dynamically sized stack objects.
+
+; CHECK-LABEL: @alloca_nosink
+; CHECK: entry:
+; CHECK-NEXT: alloca
+define i32 @alloca_nosink(i32 %a, i32 %b) {
+entry:
+ %0 = alloca i32
+ %1 = icmp ne i32 %a, 0
+ br i1 %1, label %if, label %endif
+
+if:
+ %2 = getelementptr i32* %0, i32 1
+ store i32 0, i32* %0
+ store i32 1, i32* %2
+ %3 = getelementptr i32* %0, i32 %b
+ %4 = load i32* %3
+ ret i32 %4
+
+endif:
+ ret i32 0
+}
+
+; Make sure we sink dynamic sized allocas
+
+; CHECK-LABEL: @alloca_sink_dynamic
+; CHECK: entry:
+; CHECK-NOT: alloca
+; CHECK: if:
+; CHECK-NEXT: alloca
+define i32 @alloca_sink_dynamic(i32 %a, i32 %b, i32 %size) {
+entry:
+ %0 = alloca i32, i32 %size
+ %1 = icmp ne i32 %a, 0
+ br i1 %1, label %if, label %endif
+
+if:
+ %2 = getelementptr i32* %0, i32 1
+ store i32 0, i32* %0
+ store i32 1, i32* %2
+ %3 = getelementptr i32* %0, i32 %b
+ %4 = load i32* %3
+ ret i32 %4
+
+endif:
+ ret i32 0
+}
+
+; We also want to sink allocas that are not in the entry block. These
+; will already be considered as dynamically sized stack objects, so sinking
+; them does no further damage.
+
+; CHECK-LABEL: @alloca_sink_nonentry
+; CHECK: if0:
+; CHECK-NOT: alloca
+; CHECK: if:
+; CHECK-NEXT: alloca
+define i32 @alloca_sink_nonentry(i32 %a, i32 %b, i32 %c) {
+entry:
+ %cmp = icmp ne i32 %c, 0
+ br i1 %cmp, label %endif, label %if0
+
+if0:
+ %0 = alloca i32
+ %1 = icmp ne i32 %a, 0
+ br i1 %1, label %if, label %endif
+
+if:
+ %2 = getelementptr i32* %0, i32 1
+ store i32 0, i32* %0
+ store i32 1, i32* %2
+ %3 = getelementptr i32* %0, i32 %b
+ %4 = load i32* %3
+ ret i32 %4
+
+endif:
+ ret i32 0
+}
diff --git a/test/Transforms/StripSymbols/2010-08-25-crash.ll b/test/Transforms/StripSymbols/2010-08-25-crash.ll
index 28784686bb68..b55ac3c08245 100644
--- a/test/Transforms/StripSymbols/2010-08-25-crash.ll
+++ b/test/Transforms/StripSymbols/2010-08-25-crash.ll
@@ -9,7 +9,7 @@ entry:
!0 = metadata !{i32 524334, metadata !10, metadata !1, metadata !"foo", metadata !"foo", metadata !"foo", i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, i32 ()* @foo, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 524329, metadata !10} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 524305, metadata !10, i32 12, metadata !"clang version 2.8 (trunk 112062)", i1 true, metadata !"", i32 0, metadata !11, metadata !11, metadata !12, metadata !13, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 524305, metadata !10, i32 12, metadata !"clang version 2.8 (trunk 112062)", i1 true, metadata !"", i32 0, metadata !11, metadata !11, metadata !12, metadata !13, null, metadata !"", i32 1} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 524309, metadata !10, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 524324, metadata !10, metadata !1, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
diff --git a/test/Transforms/StripSymbols/strip-dead-debug-info.ll b/test/Transforms/StripSymbols/strip-dead-debug-info.ll
index 2d687ae65470..8ce7b87c8250 100644
--- a/test/Transforms/StripSymbols/strip-dead-debug-info.ll
+++ b/test/Transforms/StripSymbols/strip-dead-debug-info.ll
@@ -30,7 +30,7 @@ attributes #2 = { nounwind readonly ssp }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!25}
-!0 = metadata !{i32 524305, metadata !1, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !23, metadata !24, null, metadata !""} ; [ DW_TAG_compile_unit ] [/tmp//g.c] [DW_LANG_C89]
+!0 = metadata !{i32 524305, metadata !1, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !23, metadata !24, null, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp//g.c] [DW_LANG_C89]
!1 = metadata !{metadata !"g.c", metadata !"/tmp/"}
!2 = metadata !{null}
!3 = metadata !{i32 524334, metadata !1, null, metadata !"bar", metadata !"bar", metadata !"", i32 5, metadata !4, i1 true, i1 true, i32 0, i32 0, null, i1 false, i1 true, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ] [line 5] [local] [def] [scope 0] [bar]
diff --git a/test/Transforms/TailCallElim/basic.ll b/test/Transforms/TailCallElim/basic.ll
index 35420ab08c33..8e9814b52bbc 100644
--- a/test/Transforms/TailCallElim/basic.ll
+++ b/test/Transforms/TailCallElim/basic.ll
@@ -143,3 +143,48 @@ cond_false:
call void @noarg()
ret i32* null
}
+
+; Don't tail call if a byval arg is captured.
+define void @test9(i32* byval %a) {
+; CHECK-LABEL: define void @test9(
+; CHECK: {{^ *}}call void @use(
+ call void @use(i32* %a)
+ ret void
+}
+
+%struct.X = type { i8* }
+
+declare void @ctor(%struct.X*)
+define void @test10(%struct.X* noalias sret %agg.result, i1 zeroext %b) {
+; CHECK-LABEL @test10
+entry:
+ %x = alloca %struct.X, align 8
+ br i1 %b, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @ctor(%struct.X* %agg.result)
+; CHECK: tail call void @ctor
+ br label %return
+
+if.end:
+ call void @ctor(%struct.X* %x)
+; CHECK: call void @ctor
+ br label %return
+
+return:
+ ret void
+}
+
+declare void @test11_helper1(i8** nocapture, i8*)
+declare void @test11_helper2(i8*)
+define void @test11() {
+; CHECK-LABEL: @test11
+; CHECK-NOT: tail
+ %a = alloca i8*
+ %b = alloca i8
+ call void @test11_helper1(i8** %a, i8* %b) ; a = &b
+ %c = load i8** %a
+ call void @test11_helper2(i8* %c)
+; CHECK: call void @test11_helper2
+ ret void
+}
diff --git a/test/Transforms/TailDup/X86/lit.local.cfg b/test/Transforms/TailDup/X86/lit.local.cfg
index ba763cf03ffc..e71f3cc4c41e 100644
--- a/test/Transforms/TailDup/X86/lit.local.cfg
+++ b/test/Transforms/TailDup/X86/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Transforms/TailDup/lit.local.cfg b/test/Transforms/TailDup/lit.local.cfg
index 19840aa7574c..c8625f4d9d24 100644
--- a/test/Transforms/TailDup/lit.local.cfg
+++ b/test/Transforms/TailDup/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/Unit/lit.cfg b/test/Unit/lit.cfg
index 68ba0b36c4d0..e481dcc6a7bc 100644
--- a/test/Unit/lit.cfg
+++ b/test/Unit/lit.cfg
@@ -35,6 +35,11 @@ for symbolizer in ['ASAN_SYMBOLIZER_PATH', 'MSAN_SYMBOLIZER_PATH']:
if symbolizer in os.environ:
config.environment[symbolizer] = os.environ[symbolizer]
+# Win32 seeks DLLs along %PATH%.
+if sys.platform in ['win32', 'cygwin'] and os.path.isdir(config.shlibdir):
+ config.environment['PATH'] = os.path.pathsep.join((
+ config.shlibdir, config.environment['PATH']))
+
###
# Check that the object root is known.
@@ -80,11 +85,3 @@ if config.test_exec_root is None:
lit_config.note('using out-of-tree build at %r' % llvm_obj_root)
lit_config.load_config(config, site_cfg)
raise SystemExit
-
-# If necessary, point the dynamic loader at libLLVM.so.
-if config.enable_shared:
- shlibpath = config.environment.get(config.shlibpath_var,'')
- if shlibpath:
- shlibpath = os.pathsep + shlibpath
- shlibpath = config.shlibdir + shlibpath
- config.environment[config.shlibpath_var] = shlibpath
diff --git a/test/Unit/lit.site.cfg.in b/test/Unit/lit.site.cfg.in
index 7ff8155dff60..dd8de4895133 100644
--- a/test/Unit/lit.site.cfg.in
+++ b/test/Unit/lit.site.cfg.in
@@ -8,7 +8,6 @@ config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
config.llvm_build_mode = "@LLVM_BUILD_MODE@"
config.enable_shared = @ENABLE_SHARED@
config.shlibdir = "@SHLIBDIR@"
-config.shlibpath_var = "@SHLIBPATH_VAR@"
# Support substitution of the tools_dir and build_mode with user parameters.
# This is used when we can't determine the tool dir at configuration time.
diff --git a/test/Verifier/2010-08-07-PointerIntrinsic.ll b/test/Verifier/2010-08-07-PointerIntrinsic.ll
index a668d04213fa..427eb66aa7b6 100644
--- a/test/Verifier/2010-08-07-PointerIntrinsic.ll
+++ b/test/Verifier/2010-08-07-PointerIntrinsic.ll
@@ -1,6 +1,5 @@
-; RUN: not llvm-as < %s 2> %t
-; RUN: FileCheck %s --input-file=%t
-; CHECK: Broken module
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+; CHECK: assembly parsed, but does not verify as correct
; PR7316
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
diff --git a/test/Verifier/alias.ll b/test/Verifier/alias.ll
new file mode 100644
index 000000000000..ff02a37bab95
--- /dev/null
+++ b/test/Verifier/alias.ll
@@ -0,0 +1,27 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+
+declare void @f()
+@fa = alias void ()* @f
+; CHECK: Alias must point to a definition
+; CHECK-NEXT: @fa
+
+@g = external global i32
+@ga = alias i32* @g
+; CHECK: Alias must point to a definition
+; CHECK-NEXT: @ga
+
+
+@test2_a = alias i32* @test2_b
+@test2_b = alias i32* @test2_a
+; CHECK: Aliases cannot form a cycle
+; CHECK-NEXT: i32* @test2_a
+; CHECK-NEXT: Aliases cannot form a cycle
+; CHECK-NEXT: i32* @test2_b
+
+
+@test3_a = global i32 42
+@test3_b = alias weak i32* @test3_a
+@test3_c = alias i32* @test3_b
+; CHECK: Alias cannot point to a weak alias
+; CHECK-NEXT: i32* @test3_c
diff --git a/test/Verifier/aliasing-chain.ll b/test/Verifier/aliasing-chain.ll
deleted file mode 100644
index ae0b77fdc395..000000000000
--- a/test/Verifier/aliasing-chain.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
-; CHECK: Aliasing chain should end with function or global variable
-; Test that alising chain does not create a cycle
-
-@b1 = alias i32* @c1
-@c1 = alias i32* @b1
diff --git a/test/Verifier/bitcast-address-space-nested-global-cycle.ll b/test/Verifier/bitcast-address-space-nested-global-cycle.ll
index 0cee726a95cf..3c67aa9e7f2d 100644
--- a/test/Verifier/bitcast-address-space-nested-global-cycle.ll
+++ b/test/Verifier/bitcast-address-space-nested-global-cycle.ll
@@ -1,4 +1,6 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
+
+; CHECK: error: invalid cast opcode for cast from '%struct.Self1*' to '%struct.Self1 addrspace(1)*'
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
diff --git a/test/Verifier/bitcast-address-space-nested-global.ll b/test/Verifier/bitcast-address-space-nested-global.ll
index abe9d947f7ba..a79669058e58 100644
--- a/test/Verifier/bitcast-address-space-nested-global.ll
+++ b/test/Verifier/bitcast-address-space-nested-global.ll
@@ -1,4 +1,6 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
+
+; CHECK: error: invalid cast opcode for cast from '%struct.Self1*' to '%struct.Self1 addrspace(1)*'
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
diff --git a/test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll b/test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll
index ed71afaef9a9..ef38d37068ba 100644
--- a/test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll
+++ b/test/Verifier/bitcast-address-space-through-constant-inttoptr-inside-gep-instruction.ll
@@ -1,6 +1,9 @@
-; RUN: not llvm-as -verify -disable-output < %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
+
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
+; CHECK: error: invalid cast opcode for cast from 'i32 addrspace(1)*' to 'i32 addrspace(2)*'
+
; Check that we can find inttoptr -> illegal bitcasts when hidden
; inside constantexpr pointer operands
define i32 addrspace(2)* @illegal_bitcast_inttoptr_as_1_to_2_inside_gep() {
diff --git a/test/Verifier/bitcast-address-space-through-constant-inttoptr.ll b/test/Verifier/bitcast-address-space-through-constant-inttoptr.ll
index e65c71e8be0e..1affda418aa3 100644
--- a/test/Verifier/bitcast-address-space-through-constant-inttoptr.ll
+++ b/test/Verifier/bitcast-address-space-through-constant-inttoptr.ll
@@ -1,10 +1,12 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
%struct.Foo = type { i32 addrspace(1)* }
+; CHECK: error: invalid cast opcode for cast from 'i32 addrspace(2)*' to 'i32 addrspace(1)*'
+
; Make sure we still reject the bitcast when the source is a inttoptr (constant int) in a global initializer
@bitcast_after_constant_inttoptr_initializer = global %struct.Foo { i32 addrspace(1)* bitcast (i32 addrspace(2)* inttoptr (i8 7 to i32 addrspace(2)*) to i32 addrspace(1)*) }
diff --git a/test/Verifier/bitcast-address-space-through-gep-2.ll b/test/Verifier/bitcast-address-space-through-gep-2.ll
index 3b77d9a30223..2ee394206aef 100644
--- a/test/Verifier/bitcast-address-space-through-gep-2.ll
+++ b/test/Verifier/bitcast-address-space-through-gep-2.ll
@@ -1,4 +1,6 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
+
+; CHECK: error: invalid cast opcode for cast from 'i32 addrspace(2)*' to 'i32 addrspace(3)*'
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-p3:8:8:8-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
diff --git a/test/Verifier/bitcast-address-space-through-gep.ll b/test/Verifier/bitcast-address-space-through-gep.ll
index 8e950dc1e6bd..9494420841dc 100644
--- a/test/Verifier/bitcast-address-space-through-gep.ll
+++ b/test/Verifier/bitcast-address-space-through-gep.ll
@@ -1,4 +1,4 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
@@ -7,6 +7,8 @@ target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-i1:8:32-i8:8:32-i16:16:32
@as2_array = addrspace(2) global [32 x i32] zeroinitializer
+; CHECK: error: invalid cast opcode for cast from 'i32 addrspace(2)*' to 'i32 addrspace(1)*'
+
; Make sure we still reject the bitcast after the value is accessed through a GEP
@bitcast_after_gep = global %struct.Foo { i32 addrspace(1)* bitcast (i32 addrspace(2)* getelementptr ([32 x i32] addrspace(2)* @as2_array, i32 0, i32 8) to i32 addrspace(1)*) }
diff --git a/test/Verifier/bitcast-address-space-through-inttoptr.ll b/test/Verifier/bitcast-address-space-through-inttoptr.ll
index bec40488a13c..5e7b66feca5f 100644
--- a/test/Verifier/bitcast-address-space-through-inttoptr.ll
+++ b/test/Verifier/bitcast-address-space-through-inttoptr.ll
@@ -1,7 +1,8 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
+; CHECK: error: invalid cast opcode for cast from 'i32 addrspace(1)*' to 'i32 addrspace(2)*'
define i32 addrspace(2)* @illegal_bitcast_as_1_to_2_inttoptr() {
%cast = bitcast i32 addrspace(1)* inttoptr (i32 5 to i32 addrspace(1)*) to i32 addrspace(2)*
ret i32 addrspace(2)* %cast
diff --git a/test/Verifier/bitcast-address-spaces.ll b/test/Verifier/bitcast-address-spaces.ll
index 450841740b1d..7f37df66b306 100644
--- a/test/Verifier/bitcast-address-spaces.ll
+++ b/test/Verifier/bitcast-address-spaces.ll
@@ -1,7 +1,8 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
+; CHECK: error: invalid cast opcode for cast from 'i32*' to 'i32 addrspace(1)*'
define i32 addrspace(1)* @illegal_bitcast_as_0_to_1(i32 addrspace(0) *%p) {
%cast = bitcast i32 addrspace(0)* %p to i32 addrspace(1)*
ret i32 addrspace(1)* %cast
diff --git a/test/Verifier/bitcast-alias-address-space.ll b/test/Verifier/bitcast-alias-address-space.ll
index 9cad8ab3779a..d9794d9e338a 100644
--- a/test/Verifier/bitcast-alias-address-space.ll
+++ b/test/Verifier/bitcast-alias-address-space.ll
@@ -1,4 +1,6 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
+
+; CHECK: error: invalid cast opcode for cast from 'i32 addrspace(2)*' to 'i32 addrspace(1)*'
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
diff --git a/test/Verifier/bitcast-vector-pointer-as.ll b/test/Verifier/bitcast-vector-pointer-as.ll
index 89070e5a8900..bbf6ace4e770 100644
--- a/test/Verifier/bitcast-vector-pointer-as.ll
+++ b/test/Verifier/bitcast-vector-pointer-as.ll
@@ -1,7 +1,9 @@
-; RUN: not llvm-as -verify -disable-output %s
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n8:16:32"
+; CHECK: error: invalid cast opcode for cast from '<4 x i32*>' to '<4 x i32 addrspace(1)*>'
+
define <4 x i32 addrspace(1)*> @vector_illegal_bitcast_as_0_to_1(<4 x i32 addrspace(0)*> %p) {
%cast = bitcast <4 x i32 addrspace(0)*> %p to <4 x i32 addrspace(1)*>
ret <4 x i32 addrspace(1)*> %cast
diff --git a/test/Verifier/comdat.ll b/test/Verifier/comdat.ll
new file mode 100644
index 000000000000..ca47429b1086
--- /dev/null
+++ b/test/Verifier/comdat.ll
@@ -0,0 +1,5 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+$v = comdat any
+@v = common global i32 0, comdat $v
+; CHECK: 'common' global may not be in a Comdat!
diff --git a/test/Verifier/comdat2.ll b/test/Verifier/comdat2.ll
new file mode 100644
index 000000000000..d78030c12af8
--- /dev/null
+++ b/test/Verifier/comdat2.ll
@@ -0,0 +1,5 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+$v = comdat any
+@v = private global i32 0, comdat $v
+; CHECK: comdat global value has private linkage
diff --git a/test/Verifier/global-ctors.ll b/test/Verifier/global-ctors.ll
new file mode 100644
index 000000000000..76570c516f10
--- /dev/null
+++ b/test/Verifier/global-ctors.ll
@@ -0,0 +1,11 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+@llvm.global_ctors = appending global [1 x { i32, void()*, i8 } ] [
+ { i32, void()*, i8 } { i32 65535, void ()* null, i8 0 }
+]
+; CHECK: wrong type for intrinsic global variable
+
+@llvm.global_dtors = appending global [1 x { i32, void()*, i8*, i8 } ] [
+ { i32, void()*, i8*, i8 } { i32 65535, void ()* null, i8* null, i8 0}
+]
+; CHECK: wrong type for intrinsic global variable
diff --git a/test/Verifier/inalloca-vararg.ll b/test/Verifier/inalloca-vararg.ll
new file mode 100755
index 000000000000..5099fd19927e
--- /dev/null
+++ b/test/Verifier/inalloca-vararg.ll
@@ -0,0 +1,9 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+declare void @h(i32, ...)
+define void @i() {
+ %args = alloca inalloca i32
+ call void (i32, ...)* @h(i32 1, i32* inalloca %args, i32 3)
+; CHECK: inalloca isn't on the last argument!
+ ret void
+}
diff --git a/test/Verifier/inalloca1.ll b/test/Verifier/inalloca1.ll
new file mode 100644
index 000000000000..38b5507abba5
--- /dev/null
+++ b/test/Verifier/inalloca1.ll
@@ -0,0 +1,22 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+declare void @a(i64* byval inalloca %p)
+; CHECK: Attributes {{.*}} are incompatible
+
+declare void @b(i64* inreg inalloca %p)
+; CHECK: Attributes {{.*}} are incompatible
+
+declare void @c(i64* sret inalloca %p)
+; CHECK: Attributes {{.*}} are incompatible
+
+declare void @d(i64* nest inalloca %p)
+; CHECK: Attributes {{.*}} are incompatible
+
+declare void @e(i64* readonly inalloca %p)
+; CHECK: Attributes {{.*}} are incompatible
+
+declare void @f(void ()* inalloca %p)
+; CHECK: do not support unsized types
+
+declare void @g(i32* inalloca %p, i32 %p2)
+; CHECK: inalloca isn't on the last parameter!
diff --git a/test/Verifier/inalloca2.ll b/test/Verifier/inalloca2.ll
new file mode 100644
index 000000000000..12a454999285
--- /dev/null
+++ b/test/Verifier/inalloca2.ll
@@ -0,0 +1,39 @@
+; This used to be invalid, but now it's valid. Ensure the verifier
+; doesn't reject it.
+; RUN: llvm-as %s -o /dev/null
+
+declare void @doit(i64* inalloca %a)
+
+define void @a() {
+entry:
+ %a = alloca inalloca [2 x i32]
+ %b = bitcast [2 x i32]* %a to i64*
+ call void @doit(i64* inalloca %b)
+ ret void
+}
+
+define void @b() {
+entry:
+ %a = alloca inalloca i64
+ call void @doit(i64* inalloca %a)
+ call void @doit(i64* inalloca %a)
+ ret void
+}
+
+define void @c(i1 %cond) {
+entry:
+ br i1 %cond, label %if, label %else
+
+if:
+ %a = alloca inalloca i64
+ br label %call
+
+else:
+ %b = alloca inalloca i64
+ br label %call
+
+call:
+ %args = phi i64* [ %a, %if ], [ %b, %else ]
+ call void @doit(i64* inalloca %args)
+ ret void
+}
diff --git a/test/Verifier/inalloca3.ll b/test/Verifier/inalloca3.ll
new file mode 100644
index 000000000000..c09ce100849b
--- /dev/null
+++ b/test/Verifier/inalloca3.ll
@@ -0,0 +1,13 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+
+declare void @doit(i64* inalloca %a)
+
+define void @a() {
+entry:
+ %a = alloca [2 x i32]
+ %b = bitcast [2 x i32]* %a to i64*
+ call void @doit(i64* inalloca %b)
+; CHECK: inalloca argument for call has mismatched alloca
+ ret void
+}
diff --git a/test/Verifier/jumptable.ll b/test/Verifier/jumptable.ll
new file mode 100644
index 000000000000..81984eeb187f
--- /dev/null
+++ b/test/Verifier/jumptable.ll
@@ -0,0 +1,8 @@
+; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
+
+define i32 @f() jumptable {
+ ret i32 0
+}
+
+; CHECK: Attribute 'jumptable' requires 'unnamed_addr'
+; CHECK: i32 ()* @f
diff --git a/test/Verifier/musttail-invalid.ll b/test/Verifier/musttail-invalid.ll
new file mode 100644
index 000000000000..e5f9a404b9e3
--- /dev/null
+++ b/test/Verifier/musttail-invalid.ll
@@ -0,0 +1,82 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+; Each musttail call should fail to validate.
+
+declare x86_stdcallcc void @cc_mismatch_callee()
+define void @cc_mismatch() {
+; CHECK: mismatched calling conv
+ musttail call x86_stdcallcc void @cc_mismatch_callee()
+ ret void
+}
+
+declare void @more_parms_callee(i32)
+define void @more_parms() {
+; CHECK: mismatched parameter counts
+ musttail call void @more_parms_callee(i32 0)
+ ret void
+}
+
+declare void @mismatched_intty_callee(i8)
+define void @mismatched_intty(i32) {
+; CHECK: mismatched parameter types
+ musttail call void @mismatched_intty_callee(i8 0)
+ ret void
+}
+
+declare void @mismatched_vararg_callee(i8*, ...)
+define void @mismatched_vararg(i8*) {
+; CHECK: mismatched varargs
+ musttail call void (i8*, ...)* @mismatched_vararg_callee(i8* null)
+ ret void
+}
+
+; We would make this an implicit sret parameter, which would disturb the
+; tail call.
+declare { i32, i32, i32 } @mismatched_retty_callee(i32)
+define void @mismatched_retty(i32) {
+; CHECK: mismatched return types
+ musttail call { i32, i32, i32 } @mismatched_retty_callee(i32 0)
+ ret void
+}
+
+declare void @mismatched_byval_callee({ i32 }*)
+define void @mismatched_byval({ i32 }* byval %a) {
+; CHECK: mismatched ABI impacting function attributes
+ musttail call void @mismatched_byval_callee({ i32 }* %a)
+ ret void
+}
+
+declare void @mismatched_inreg_callee(i32 inreg)
+define void @mismatched_inreg(i32 %a) {
+; CHECK: mismatched ABI impacting function attributes
+ musttail call void @mismatched_inreg_callee(i32 inreg %a)
+ ret void
+}
+
+declare void @mismatched_sret_callee(i32* sret)
+define void @mismatched_sret(i32* %a) {
+; CHECK: mismatched ABI impacting function attributes
+ musttail call void @mismatched_sret_callee(i32* sret %a)
+ ret void
+}
+
+declare void @mismatched_alignment_callee(i32* byval align 8)
+define void @mismatched_alignment(i32* byval align 4 %a) {
+; CHECK: mismatched ABI impacting function attributes
+ musttail call void @mismatched_alignment_callee(i32* byval align 8 %a)
+ ret void
+}
+
+declare i32 @not_tail_pos_callee()
+define i32 @not_tail_pos() {
+; CHECK: musttail call must be precede a ret with an optional bitcast
+ %v = musttail call i32 @not_tail_pos_callee()
+ %w = add i32 %v, 1
+ ret i32 %w
+}
+
+define void @inline_asm() {
+; CHECK: cannot use musttail call with inline asm
+ musttail call void asm "ret", ""()
+ ret void
+}
diff --git a/test/Verifier/musttail-valid.ll b/test/Verifier/musttail-valid.ll
new file mode 100644
index 000000000000..815d77a13e35
--- /dev/null
+++ b/test/Verifier/musttail-valid.ll
@@ -0,0 +1,16 @@
+; RUN: llvm-as %s -o /dev/null
+
+; Should assemble without error.
+
+declare void @similar_param_ptrty_callee(i8*)
+define void @similar_param_ptrty(i32*) {
+ musttail call void @similar_param_ptrty_callee(i8* null)
+ ret void
+}
+
+declare i8* @similar_ret_ptrty_callee()
+define i32* @similar_ret_ptrty() {
+ %v = musttail call i8* @similar_ret_ptrty_callee()
+ %w = bitcast i8* %v to i32*
+ ret i32* %w
+}
diff --git a/test/Verifier/range-1.ll b/test/Verifier/range-1.ll
index b6a75d13bba0..f15ca3f74065 100644
--- a/test/Verifier/range-1.ll
+++ b/test/Verifier/range-1.ll
@@ -6,7 +6,7 @@ entry:
ret void
}
!0 = metadata !{i8 0, i8 1}
-; CHECK: Ranges are only for loads!
+; CHECK: Ranges are only for loads, calls and invokes!
; CHECK-NEXT: store i8 0, i8* %x, align 1, !range !0
define i8 @f2(i8* %x) {
diff --git a/test/Verifier/range-2.ll b/test/Verifier/range-2.ll
index 8d85d1915195..1d2e0575d76a 100644
--- a/test/Verifier/range-2.ll
+++ b/test/Verifier/range-2.ll
@@ -34,3 +34,33 @@ entry:
ret i8 %y
}
!4 = metadata !{i8 -1, i8 0, i8 1, i8 -2}
+
+; We can annotate the range of the return value of a CALL.
+define void @call_all(i8* %x) {
+entry:
+ %v1 = call i8 @f1(i8* %x), !range !0
+ %v2 = call i8 @f2(i8* %x), !range !1
+ %v3 = call i8 @f3(i8* %x), !range !2
+ %v4 = call i8 @f4(i8* %x), !range !3
+ %v5 = call i8 @f5(i8* %x), !range !4
+ ret void
+}
+
+; We can annotate the range of the return value of an INVOKE.
+define void @invoke_all(i8* %x) {
+entry:
+ %v1 = invoke i8 @f1(i8* %x) to label %cont unwind label %lpad, !range !0
+ %v2 = invoke i8 @f2(i8* %x) to label %cont unwind label %lpad, !range !1
+ %v3 = invoke i8 @f3(i8* %x) to label %cont unwind label %lpad, !range !2
+ %v4 = invoke i8 @f4(i8* %x) to label %cont unwind label %lpad, !range !3
+ %v5 = invoke i8 @f5(i8* %x) to label %cont unwind label %lpad, !range !4
+
+cont:
+ ret void
+
+lpad:
+ %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ filter [0 x i8*] zeroinitializer
+ ret void
+}
+declare i32 @__gxx_personality_v0(...)
diff --git a/test/Verifier/recursive-type-1.ll b/test/Verifier/recursive-type-1.ll
new file mode 100644
index 000000000000..4a3995759562
--- /dev/null
+++ b/test/Verifier/recursive-type-1.ll
@@ -0,0 +1,12 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+%rt2 = type { i32, { i8, %rt2, i8 }, i32 }
+
+define i32 @main() nounwind {
+entry:
+ ; Check that recursive types trigger an error instead of segfaulting, when
+ ; the recursion isn't through a pointer to the type.
+ ; CHECK: Cannot allocate unsized type
+ %0 = alloca %rt2
+ ret i32 0
+}
diff --git a/test/Verifier/recursive-type-2.ll b/test/Verifier/recursive-type-2.ll
new file mode 100644
index 000000000000..5f2f66fa1b11
--- /dev/null
+++ b/test/Verifier/recursive-type-2.ll
@@ -0,0 +1,14 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+%rt1 = type { i32, { i8, %rt2, i8 }, i32 }
+%rt2 = type { i64, { i6, %rt3 } }
+%rt3 = type { %rt1 }
+
+define i32 @main() nounwind {
+entry:
+ ; Check that mutually recursive types trigger an error instead of segfaulting,
+ ; when the recursion isn't through a pointer to the type.
+ ; CHECK: Cannot allocate unsized type
+ %0 = alloca %rt2
+ ret i32 0
+}
diff --git a/test/Verifier/recursive-type-3.ll b/test/Verifier/recursive-type-3.ll
new file mode 100644
index 000000000000..8968fb5eb610
--- /dev/null
+++ b/test/Verifier/recursive-type-3.ll
@@ -0,0 +1,11 @@
+; RUN: llvm-as %s -o /dev/null 2>&1
+
+%rt2 = type { i32, { i8, %rt2*, i8 }, i32 }
+
+define i32 @main() nounwind {
+entry:
+ ; Check that linked-list-style recursive types where the recursion is through
+ ; a pointer of the type is valid for an alloca.
+ %0 = alloca %rt2
+ ret i32 0
+}
diff --git a/test/Verifier/sret.ll b/test/Verifier/sret.ll
new file mode 100644
index 000000000000..1ddbf1f1a3bc
--- /dev/null
+++ b/test/Verifier/sret.ll
@@ -0,0 +1,7 @@
+; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s
+
+declare void @a(i32* sret %a, i32* sret %b)
+; CHECK: Cannot have multiple 'sret' parameters!
+
+declare void @b(i32* %a, i32* %b, i32* sret %c)
+; CHECK: Attribute 'sret' is not on first or second parameter!
diff --git a/test/Verifier/varargs-intrinsic.ll b/test/Verifier/varargs-intrinsic.ll
index f6d0a7084c72..2fff1db7d0ce 100644
--- a/test/Verifier/varargs-intrinsic.ll
+++ b/test/Verifier/varargs-intrinsic.ll
@@ -1,16 +1,16 @@
; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
-declare void @llvm.experimental.stackmap(i32, i32)
+declare void @llvm.experimental.stackmap(i64, i32)
declare void @llvm.donothing(...)
define void @foo1() {
- call void @llvm.experimental.stackmap(i32 0, i32 12)
+ call void @llvm.experimental.stackmap(i64 0, i32 12)
; CHECK: Callsite was not defined with variable arguments!
ret void
}
define void @foo2() {
- call void (...)* @llvm.donothing(i32 0, i64 1)
+ call void (...)* @llvm.donothing(i64 0, i64 1)
; CHECK: Intrinsic was not defined with variable arguments!
ret void
}
diff --git a/test/lit.cfg b/test/lit.cfg
index df1f4a101965..4f015267c619 100644
--- a/test/lit.cfg
+++ b/test/lit.cfg
@@ -95,6 +95,11 @@ for symbolizer in ['ASAN_SYMBOLIZER_PATH', 'MSAN_SYMBOLIZER_PATH']:
if symbolizer in os.environ:
config.environment[symbolizer] = os.environ[symbolizer]
+# Propagate options for sanitizers.
+for options in ['ASAN_OPTIONS']:
+ if options in os.environ:
+ config.environment[options] = os.environ[options]
+
###
import os
@@ -156,6 +161,12 @@ if re.search(r'cygwin|mingw32|win32', config.host_triple):
lli_mcjit += ' -mtriple='+config.host_triple+'-elf'
config.substitutions.append( ('%lli_mcjit', lli_mcjit) )
+# Similarly, have a macro to use llc with DWARF even when the host is win32.
+llc_dwarf = 'llc'
+if re.search(r'win32', config.target_triple):
+ llc_dwarf += ' -mtriple='+config.target_triple.replace('-win32', '-mingw32')
+config.substitutions.append( ('%llc_dwarf', llc_dwarf) )
+
# Provide a substition for those tests that need to run the jit to obtain data
# but simply want use the currently considered most reliable jit for platform
# FIXME: ppc32 is not ready for mcjit.
@@ -188,6 +199,8 @@ else:
config.substitutions.append( ('%ocamlopt', config.ocamlopt_executable) )
config.substitutions.append( ('%llvmshlibdir', config.llvm_shlib_dir) )
config.substitutions.append( ('%shlibext', config.llvm_shlib_ext) )
+config.substitutions.append( ('%exeext', config.llvm_exe_ext) )
+config.substitutions.append( ('%python', config.python_executable) )
# For each occurrence of an llvm tool name as its own word, replace it
# with the full path to the build directory holding that tool. This
@@ -195,22 +208,17 @@ config.substitutions.append( ('%shlibext', config.llvm_shlib_ext) )
# tools that might happen to be in the user's PATH. Thus this list
# includes every tool placed in $(LLVM_OBJ_ROOT)/$(BuildMode)/bin
# (llvm_tools_dir in lit parlance).
- # Don't match 'bugpoint-' or 'clang-'.
- # Don't match '/clang' or '-clang'.
-if os.pathsep == ';':
- pathext = os.environ.get('PATHEXT', '').split(';')
-else:
- pathext = ['']
-# Regex to reject matching a hyphen
-NOHYPHEN = r"(?<!-)"
+
+# Avoid matching RUN line fragments that are actually part of
+# path names or options or whatever.
+# The regex is a pre-assertion to avoid matching a preceding
+# dot, hyphen, carat, or slash (.foo, -foo, etc.). Some patterns
+# also have a post-assertion to not match a trailing hyphen (foo-).
+NOJUNK = r"(?<!\.|-|\^|/)"
for pattern in [r"\bbugpoint\b(?!-)",
- r"(?<!/|-)\bclang\b(?!-)",
- r"\bgold\b",
- # Match llc but not -llc
- NOHYPHEN + r"\bllc\b",
+ NOJUNK + r"\bllc\b",
r"\blli\b",
- r"\bllvm-PerfectShuffle\b",
r"\bllvm-ar\b",
r"\bllvm-as\b",
r"\bllvm-bcanalyzer\b",
@@ -220,31 +228,22 @@ for pattern in [r"\bbugpoint\b(?!-)",
r"\bllvm-dis\b",
r"\bllvm-dwarfdump\b",
r"\bllvm-extract\b",
- r"\bllvm-jistlistener\b",
r"\bllvm-link\b",
r"\bllvm-lto\b",
r"\bllvm-mc\b",
r"\bllvm-mcmarkup\b",
r"\bllvm-nm\b",
r"\bllvm-objdump\b",
+ r"\bllvm-profdata\b",
r"\bllvm-ranlib\b",
r"\bllvm-readobj\b",
r"\bllvm-rtdyld\b",
- r"\bllvm-shlib\b",
r"\bllvm-size\b",
r"\bllvm-tblgen\b",
r"\bllvm-c-test\b",
- # Match llvmc but not -llvmc
- NOHYPHEN + r"\bllvmc\b",
- # Match lto but not -lto
- NOHYPHEN + r"\blto\b",
r"\bmacho-dump\b",
- # Don't match '.opt', '-opt', '^opt' or '/opt'.
- r"(?<!\.|-|\^|/)\bopt\b",
+ NOJUNK + r"\bopt\b",
r"\bFileCheck\b",
- r"\bFileUpdate\b",
- r"\bc-index-test\b",
- r"\bfpcmp\b",
r"\bobj2yaml\b",
r"\byaml2obj\b",
# Handle these specially as they are strings searched
@@ -255,15 +254,20 @@ for pattern in [r"\bbugpoint\b(?!-)",
# name being surrounded by \b word match operators. If the
# pattern starts with "| ", include it in the string to be
# substituted.
- substitution = re.sub(r"^(\\)?((\| )?)\W+b([0-9A-Za-z-_]+)\\b\W*$",
- r"\2" + llvm_tools_dir + "/" + r"\4",
+ tool_match = re.match(r"^(\\)?((\| )?)\W+b([0-9A-Za-z-_]+)\\b\W*$",
pattern)
- for ext in pathext:
- substitution_ext = substitution + ext
- if os.path.exists(substitution_ext):
- substitution = substitution_ext
- break
- config.substitutions.append((pattern, substitution))
+ tool_pipe = tool_match.group(2)
+ tool_name = tool_match.group(4)
+ tool_path = lit.util.which(tool_name, llvm_tools_dir)
+ if not tool_path:
+ # Warn, but still provide a substitution.
+ lit_config.note('Did not find ' + tool_name + ' in ' + llvm_tools_dir)
+ tool_path = llvm_tools_dir + '/' + tool_name
+ config.substitutions.append((pattern, tool_pipe + tool_path))
+
+### Targets
+
+config.targets = frozenset(config.targets_to_build.split())
### Features
@@ -298,6 +302,8 @@ if not 'hexagon' in config.target_triple:
if config.have_zlib == "1":
config.available_features.add("zlib")
+else:
+ config.available_features.add("nozlib")
# Native compilation: host arch == target arch
# FIXME: Consider cases that target can be executed
@@ -305,18 +311,19 @@ if config.have_zlib == "1":
if config.host_triple == config.target_triple:
config.available_features.add("native")
-# llc knows whether he is compiled with -DNDEBUG.
+# Ask llvm-config about assertion mode.
import subprocess
try:
- llc_cmd = subprocess.Popen([os.path.join(llvm_tools_dir, 'llc'), '-version'],
- stdout = subprocess.PIPE)
+ llvm_config_cmd = subprocess.Popen(
+ [os.path.join(llvm_tools_dir, 'llvm-config'), '--assertion-mode'],
+ stdout = subprocess.PIPE)
except OSError:
- print("Could not find llc in " + llvm_tools_dir)
+ print("Could not find llvm-config in " + llvm_tools_dir)
exit(42)
-if re.search(r'with assertions', llc_cmd.stdout.read().decode('ascii')):
+if re.search(r'ON', llvm_config_cmd.stdout.read().decode('ascii')):
config.available_features.add('asserts')
-llc_cmd.wait()
+llvm_config_cmd.wait()
if 'darwin' == sys.platform:
try:
@@ -329,6 +336,10 @@ if 'darwin' == sys.platform:
config.available_features.add('fma3')
sysctl_cmd.wait()
+# .debug_frame is not emitted for targeting Windows x64.
+if not re.match(r'^x86_64.*-(mingw32|win32)', config.target_triple):
+ config.available_features.add('debug_frame')
+
# Check if we should use gmalloc.
use_gmalloc_str = lit_config.params.get('use_gmalloc', None)
if use_gmalloc_str is not None:
diff --git a/test/lit.site.cfg.in b/test/lit.site.cfg.in
index 72fd9c9ff785..4c0bb2e47d36 100644
--- a/test/lit.site.cfg.in
+++ b/test/lit.site.cfg.in
@@ -9,6 +9,7 @@ config.llvm_obj_root = "@LLVM_BINARY_DIR@"
config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
config.llvm_shlib_dir = "@SHLIBDIR@"
config.llvm_shlib_ext = "@SHLIBEXT@"
+config.llvm_exe_ext = "@EXEEXT@"
config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@"
config.python_executable = "@PYTHON_EXECUTABLE@"
config.ocamlopt_executable = "@OCAMLOPT@"
@@ -26,6 +27,7 @@ config.have_zlib = "@HAVE_LIBZ@"
# used when we can't determine the tool dir at configuration time.
try:
config.llvm_tools_dir = config.llvm_tools_dir % lit_config.params
+ config.llvm_shlib_dir = config.llvm_shlib_dir % lit_config.params
except KeyError:
e = sys.exc_info()[1]
key, = e.args
diff --git a/test/tools/llvm-cov/Inputs/copy_block_helper.gcda b/test/tools/llvm-cov/Inputs/copy_block_helper.gcda
new file mode 100644
index 000000000000..d7ff4696df76
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/copy_block_helper.gcda
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/copy_block_helper.gcno b/test/tools/llvm-cov/Inputs/copy_block_helper.gcno
new file mode 100644
index 000000000000..a9d10840646e
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/copy_block_helper.gcno
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/range_based_for.gcda b/test/tools/llvm-cov/Inputs/range_based_for.gcda
new file mode 100644
index 000000000000..df51888d48dd
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/range_based_for.gcda
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/range_based_for.gcno b/test/tools/llvm-cov/Inputs/range_based_for.gcno
new file mode 100644
index 000000000000..5f30acf12f84
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/range_based_for.gcno
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/test.cpp b/test/tools/llvm-cov/Inputs/test.cpp
index 07bc3f294c50..e580e5ae0c12 100644
--- a/test/tools/llvm-cov/Inputs/test.cpp
+++ b/test/tools/llvm-cov/Inputs/test.cpp
@@ -1,3 +1,4 @@
+#include "test.h"
#include <cstdlib>
bool on = false;
@@ -6,10 +7,6 @@ double grid[10][10] = {0};
const char * hello = "world";
const char * world = "hello";
-struct A {
- virtual void B();
-};
-
void A::B() {}
void useless() {}
diff --git a/test/tools/llvm-cov/Inputs/test.cpp.gcov b/test/tools/llvm-cov/Inputs/test.cpp.gcov
deleted file mode 100644
index a3dacc269ead..000000000000
--- a/test/tools/llvm-cov/Inputs/test.cpp.gcov
+++ /dev/null
@@ -1,82 +0,0 @@
- -: 0:Source:test.cpp
- -: 0:Graph:test.gcno
- -: 0:Data:test.gcda
- -: 0:Runs:2
- -: 0:Programs:1
- -: 1:#include <cstdlib>
- -: 2:
- -: 3:bool on = false;
- -: 4:int len = 42;
- -: 5:double grid[10][10] = {0};
- -: 6:const char * hello = "world";
- -: 7:const char * world = "hello";
- -: 8:
- 4: 9:struct A {
- -: 10: virtual void B();
- -: 11:};
- -: 12:
-8589934592: 13:void A::B() {}
- -: 14:
- #####: 15:void useless() {}
- -: 16:
- -: 17:double more_useless() {
- #####: 18: return 0;
- -: 19:}
- -: 20:
- -: 21:int foo() {
- 2: 22: on = true;
- 2: 23: return 3;
- -: 24:}
- -: 25:
- -: 26:int bar() {
- #####: 27: len--;
- #####: 28: return foo() + 45;
- -: 29:}
- -: 30:
- 8: 31:void assign(int ii, int jj) {
- 8: 32: grid[ii][jj] = (ii+1) * (jj+1);
- 8: 33:}
- -: 34:
- -: 35:void initialize_grid() {
- 12: 36: for (int ii = 0; ii < 2; ii++)
- 24: 37: for (int jj = 0; jj < 2; jj++)
- 12: 38: assign(ii, jj);
- 2: 39:}
- -: 40:
- -: 41:int main() {
- 2: 42: initialize_grid();
- -: 43:
- 2: 44: int a = 2;
- 2: 45: on = rand() % 2;
- 2: 46: if (on) {
- 2: 47: foo();
- 2: 48: ++a;
- 2: 49: } else {
- #####: 50: bar();
- #####: 51: a += rand();
- -: 52: }
- -: 53:
- 44: 54: for (int ii = 0; ii < 10; ++ii) {
- 20: 55: switch (rand() % 5) {
- -: 56: case 0:
- 4: 57: a += rand();
- 4: 58: break;
- -: 59: case 1:
- -: 60: case 2:
- 2: 61: a += rand() / rand();
- 2: 62: break;
- -: 63: case 3:
- 6: 64: a -= rand();
- 6: 65: break;
- -: 66: default:
- 8: 67: a = -1;
- 8: 68: }
- 20: 69: }
- -: 70:
- 2: 71: A thing;
-17179869188: 72: for (uint64_t ii = 0; ii < 4294967296; ++ii)
-8589934592: 73: thing.B();
- -: 74:
- 2: 75: return a + 8 + grid[2][3] + len;
- -: 76: return more_useless();
- -: 77:}
diff --git a/test/tools/llvm-cov/Inputs/test.gcda b/test/tools/llvm-cov/Inputs/test.gcda
index 23d03bdd1fd2..613e3a85c026 100644
--- a/test/tools/llvm-cov/Inputs/test.gcda
+++ b/test/tools/llvm-cov/Inputs/test.gcda
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/test.gcno b/test/tools/llvm-cov/Inputs/test.gcno
index 6162604e7449..24f1c82476b2 100644
--- a/test/tools/llvm-cov/Inputs/test.gcno
+++ b/test/tools/llvm-cov/Inputs/test.gcno
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/test.h b/test/tools/llvm-cov/Inputs/test.h
new file mode 100644
index 000000000000..55d9c6aa8c42
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test.h
@@ -0,0 +1,3 @@
+struct A {
+ virtual void B();
+};
diff --git a/test/tools/llvm-cov/Inputs/test_-a.cpp.gcov b/test/tools/llvm-cov/Inputs/test_-a.cpp.gcov
new file mode 100644
index 000000000000..c2210d5eca18
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-a.cpp.gcov
@@ -0,0 +1,111 @@
+ -: 0:Source:test.cpp
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ -: 1:#include "test.h"
+ -: 2:#include <cstdlib>
+ -: 3:
+ -: 4:bool on = false;
+ -: 5:int len = 42;
+ -: 6:double grid[10][10] = {0};
+ -: 7:const char * hello = "world";
+ -: 8:const char * world = "hello";
+ -: 9:
+8589934592: 10:void A::B() {}
+8589934592: 10-block 0
+ -: 11:
+ #####: 12:void useless() {}
+ $$$$$: 12-block 0
+ -: 13:
+ -: 14:double more_useless() {
+ #####: 15: return 0;
+ $$$$$: 15-block 0
+ -: 16:}
+ -: 17:
+ -: 18:int foo() {
+ 2: 19: on = true;
+ 2: 20: return 3;
+ 2: 20-block 0
+ -: 21:}
+ -: 22:
+ -: 23:int bar() {
+ #####: 24: len--;
+ #####: 25: return foo() + 45;
+ $$$$$: 25-block 0
+ -: 26:}
+ -: 27:
+ 8: 28:void assign(int ii, int jj) {
+ 8: 29: grid[ii][jj] = (ii+1) * (jj+1);
+ 8: 30:}
+ 8: 30-block 0
+ -: 31:
+ -: 32:void initialize_grid() {
+ 6: 33: for (int ii = 0; ii < 2; ii++)
+ 2: 33-block 0
+ 6: 33-block 1
+ 4: 33-block 2
+ 12: 34: for (int jj = 0; jj < 2; jj++)
+ 4: 34-block 0
+ 12: 34-block 1
+ 8: 34-block 2
+ 8: 35: assign(ii, jj);
+ 8: 35-block 0
+ 4: 35-block 1
+ 2: 36:}
+ 2: 36-block 0
+ -: 37:
+ -: 38:int main() {
+ 2: 39: initialize_grid();
+ -: 40:
+ 2: 41: int a = 2;
+ 2: 42: on = rand() % 2;
+ 2: 43: if (on) {
+ 2: 43-block 0
+ 2: 44: foo();
+ 2: 45: ++a;
+ 2: 46: } else {
+ 2: 46-block 0
+ #####: 47: bar();
+ #####: 48: a += rand();
+ $$$$$: 48-block 0
+ -: 49: }
+ -: 50:
+ 22: 51: for (int ii = 0; ii < 10; ++ii) {
+ 2: 51-block 0
+ 22: 51-block 1
+ 20: 51-block 2
+ 20: 52: switch (rand() % 5) {
+ 20: 52-block 0
+ -: 53: case 0:
+ 4: 54: a += rand();
+ 4: 55: break;
+ 4: 55-block 0
+ -: 56: case 1:
+ -: 57: case 2:
+ 2: 58: a += rand() / rand();
+ 2: 59: break;
+ 2: 59-block 0
+ -: 60: case 3:
+ 6: 61: a -= rand();
+ 6: 62: break;
+ 6: 62-block 0
+ -: 63: default:
+ 8: 64: a = -1;
+ 8: 65: }
+ 8: 65-block 0
+ 20: 66: }
+ 20: 66-block 0
+ -: 67:
+ 2: 68: A thing;
+8589934594: 69: for (uint64_t ii = 0; ii < 4294967296; ++ii)
+ 2: 69-block 0
+8589934594: 69-block 1
+8589934592: 69-block 2
+8589934592: 70: thing.B();
+8589934592: 70-block 0
+ -: 71:
+ 2: 72: return a + 8 + grid[2][3] + len;
+ 2: 72-block 0
+ -: 73: return more_useless();
+ -: 74:}
diff --git a/test/tools/llvm-cov/Inputs/test_-a.h.gcov b/test/tools/llvm-cov/Inputs/test_-a.h.gcov
new file mode 100644
index 000000000000..a5fe62b1cec9
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-a.h.gcov
@@ -0,0 +1,10 @@
+ -: 0:Source:./test.h
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ 2: 1:struct A {
+ 2: 1-block 0
+ 2: 1-block 1
+ -: 2: virtual void B();
+ -: 3:};
diff --git a/test/tools/llvm-cov/Inputs/test_-a_-b.cpp.gcov b/test/tools/llvm-cov/Inputs/test_-a_-b.cpp.gcov
new file mode 100644
index 000000000000..ae21037401fd
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-a_-b.cpp.gcov
@@ -0,0 +1,134 @@
+ -: 0:Source:test.cpp
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ -: 1:#include "test.h"
+ -: 2:#include <cstdlib>
+ -: 3:
+ -: 4:bool on = false;
+ -: 5:int len = 42;
+ -: 6:double grid[10][10] = {0};
+ -: 7:const char * hello = "world";
+ -: 8:const char * world = "hello";
+ -: 9:
+function _ZN1A1BEv called 8589934592 returned 100% blocks executed 100%
+8589934592: 10:void A::B() {}
+8589934592: 10-block 0
+ -: 11:
+function _Z7uselessv called 0 returned 0% blocks executed 0%
+ #####: 12:void useless() {}
+ $$$$$: 12-block 0
+ -: 13:
+function _Z12more_uselessv called 0 returned 0% blocks executed 0%
+ -: 14:double more_useless() {
+ #####: 15: return 0;
+ $$$$$: 15-block 0
+ -: 16:}
+ -: 17:
+function _Z3foov called 2 returned 100% blocks executed 100%
+ -: 18:int foo() {
+ 2: 19: on = true;
+ 2: 20: return 3;
+ 2: 20-block 0
+ -: 21:}
+ -: 22:
+function _Z3barv called 0 returned 0% blocks executed 0%
+ -: 23:int bar() {
+ #####: 24: len--;
+ #####: 25: return foo() + 45;
+ $$$$$: 25-block 0
+ -: 26:}
+ -: 27:
+function _Z6assignii called 8 returned 100% blocks executed 100%
+ 8: 28:void assign(int ii, int jj) {
+ 8: 29: grid[ii][jj] = (ii+1) * (jj+1);
+ 8: 30:}
+ 8: 30-block 0
+ -: 31:
+function _Z15initialize_gridv called 2 returned 100% blocks executed 100%
+ -: 32:void initialize_grid() {
+ 6: 33: for (int ii = 0; ii < 2; ii++)
+ 2: 33-block 0
+ 6: 33-block 1
+branch 0 taken 67%
+branch 1 taken 33%
+ 4: 33-block 2
+ 12: 34: for (int jj = 0; jj < 2; jj++)
+ 4: 34-block 0
+ 12: 34-block 1
+branch 0 taken 67%
+branch 1 taken 33%
+ 8: 34-block 2
+ 8: 35: assign(ii, jj);
+ 8: 35-block 0
+ 4: 35-block 1
+ 2: 36:}
+ 2: 36-block 0
+ -: 37:
+function main called 2 returned 100% blocks executed 94%
+ -: 38:int main() {
+ 2: 39: initialize_grid();
+ -: 40:
+ 2: 41: int a = 2;
+ 2: 42: on = rand() % 2;
+ 2: 43: if (on) {
+ 2: 43-block 0
+branch 0 taken 100%
+branch 1 taken 0%
+ 2: 44: foo();
+ 2: 45: ++a;
+ 2: 46: } else {
+ 2: 46-block 0
+ #####: 47: bar();
+ #####: 48: a += rand();
+ $$$$$: 48-block 0
+ -: 49: }
+ -: 50:
+ 22: 51: for (int ii = 0; ii < 10; ++ii) {
+ 2: 51-block 0
+ 22: 51-block 1
+branch 0 taken 91%
+branch 1 taken 9%
+ 20: 51-block 2
+ 20: 52: switch (rand() % 5) {
+ 20: 52-block 0
+branch 0 taken 20%
+branch 1 taken 0%
+branch 2 taken 10%
+branch 3 taken 30%
+branch 4 taken 40%
+ -: 53: case 0:
+ 4: 54: a += rand();
+ 4: 55: break;
+ 4: 55-block 0
+ -: 56: case 1:
+ -: 57: case 2:
+ 2: 58: a += rand() / rand();
+ 2: 59: break;
+ 2: 59-block 0
+ -: 60: case 3:
+ 6: 61: a -= rand();
+ 6: 62: break;
+ 6: 62-block 0
+ -: 63: default:
+ 8: 64: a = -1;
+ 8: 65: }
+ 8: 65-block 0
+ 20: 66: }
+ 20: 66-block 0
+ -: 67:
+ 2: 68: A thing;
+8589934594: 69: for (uint64_t ii = 0; ii < 4294967296; ++ii)
+ 2: 69-block 0
+8589934594: 69-block 1
+branch 0 taken 99%
+branch 1 taken 1%
+8589934592: 69-block 2
+8589934592: 70: thing.B();
+8589934592: 70-block 0
+ -: 71:
+ 2: 72: return a + 8 + grid[2][3] + len;
+ 2: 72-block 0
+ -: 73: return more_useless();
+ -: 74:}
diff --git a/test/tools/llvm-cov/Inputs/test_-a_-b.h.gcov b/test/tools/llvm-cov/Inputs/test_-a_-b.h.gcov
new file mode 100644
index 000000000000..f3dabcb727c5
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-a_-b.h.gcov
@@ -0,0 +1,12 @@
+ -: 0:Source:./test.h
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+function _ZN1AC1Ev called 2 returned 100% blocks executed 100%
+function _ZN1AC2Ev called 2 returned 100% blocks executed 100%
+ 2: 1:struct A {
+ 2: 1-block 0
+ 2: 1-block 1
+ -: 2: virtual void B();
+ -: 3:};
diff --git a/test/tools/llvm-cov/Inputs/test_-a_-b_-c_-u.cpp.gcov b/test/tools/llvm-cov/Inputs/test_-a_-b_-c_-u.cpp.gcov
new file mode 100644
index 000000000000..cc5940f8b92a
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-a_-b_-c_-u.cpp.gcov
@@ -0,0 +1,160 @@
+ -: 0:Source:test.cpp
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ -: 1:#include "test.h"
+ -: 2:#include <cstdlib>
+ -: 3:
+ -: 4:bool on = false;
+ -: 5:int len = 42;
+ -: 6:double grid[10][10] = {0};
+ -: 7:const char * hello = "world";
+ -: 8:const char * world = "hello";
+ -: 9:
+function _ZN1A1BEv called 8589934592 returned 100% blocks executed 100%
+8589934592: 10:void A::B() {}
+8589934592: 10-block 0
+unconditional 0 taken 8589934592
+ -: 11:
+function _Z7uselessv called 0 returned 0% blocks executed 0%
+ #####: 12:void useless() {}
+ $$$$$: 12-block 0
+unconditional 0 never executed
+ -: 13:
+function _Z12more_uselessv called 0 returned 0% blocks executed 0%
+ -: 14:double more_useless() {
+ #####: 15: return 0;
+ $$$$$: 15-block 0
+unconditional 0 never executed
+ -: 16:}
+ -: 17:
+function _Z3foov called 2 returned 100% blocks executed 100%
+ -: 18:int foo() {
+ 2: 19: on = true;
+ 2: 20: return 3;
+ 2: 20-block 0
+unconditional 0 taken 2
+ -: 21:}
+ -: 22:
+function _Z3barv called 0 returned 0% blocks executed 0%
+ -: 23:int bar() {
+ #####: 24: len--;
+ #####: 25: return foo() + 45;
+ $$$$$: 25-block 0
+unconditional 0 never executed
+ -: 26:}
+ -: 27:
+function _Z6assignii called 8 returned 100% blocks executed 100%
+ 8: 28:void assign(int ii, int jj) {
+ 8: 29: grid[ii][jj] = (ii+1) * (jj+1);
+ 8: 30:}
+ 8: 30-block 0
+unconditional 0 taken 8
+ -: 31:
+function _Z15initialize_gridv called 2 returned 100% blocks executed 100%
+ -: 32:void initialize_grid() {
+ 6: 33: for (int ii = 0; ii < 2; ii++)
+ 2: 33-block 0
+unconditional 0 taken 2
+ 6: 33-block 1
+branch 1 taken 4
+branch 2 taken 2
+ 4: 33-block 2
+unconditional 3 taken 4
+ 12: 34: for (int jj = 0; jj < 2; jj++)
+ 4: 34-block 0
+unconditional 0 taken 4
+ 12: 34-block 1
+branch 1 taken 8
+branch 2 taken 4
+ 8: 34-block 2
+unconditional 3 taken 8
+ 8: 35: assign(ii, jj);
+ 8: 35-block 0
+unconditional 0 taken 8
+ 4: 35-block 1
+unconditional 1 taken 4
+ 2: 36:}
+ 2: 36-block 0
+unconditional 0 taken 2
+ -: 37:
+function main called 2 returned 100% blocks executed 94%
+ -: 38:int main() {
+ 2: 39: initialize_grid();
+ -: 40:
+ 2: 41: int a = 2;
+ 2: 42: on = rand() % 2;
+ 2: 43: if (on) {
+ 2: 43-block 0
+branch 0 taken 2
+branch 1 taken 0
+ 2: 44: foo();
+ 2: 45: ++a;
+ 2: 46: } else {
+ 2: 46-block 0
+unconditional 0 taken 2
+ #####: 47: bar();
+ #####: 48: a += rand();
+ $$$$$: 48-block 0
+unconditional 0 never executed
+ -: 49: }
+ -: 50:
+ 22: 51: for (int ii = 0; ii < 10; ++ii) {
+ 2: 51-block 0
+unconditional 0 taken 2
+ 22: 51-block 1
+branch 1 taken 20
+branch 2 taken 2
+ 20: 51-block 2
+unconditional 3 taken 20
+ 20: 52: switch (rand() % 5) {
+ 20: 52-block 0
+branch 0 taken 4
+branch 1 taken 0
+branch 2 taken 2
+branch 3 taken 6
+branch 4 taken 8
+ -: 53: case 0:
+ 4: 54: a += rand();
+ 4: 55: break;
+ 4: 55-block 0
+unconditional 0 taken 4
+ -: 56: case 1:
+ -: 57: case 2:
+ 2: 58: a += rand() / rand();
+ 2: 59: break;
+ 2: 59-block 0
+unconditional 0 taken 2
+ -: 60: case 3:
+ 6: 61: a -= rand();
+ 6: 62: break;
+ 6: 62-block 0
+unconditional 0 taken 6
+ -: 63: default:
+ 8: 64: a = -1;
+ 8: 65: }
+ 8: 65-block 0
+unconditional 0 taken 8
+ 20: 66: }
+ 20: 66-block 0
+unconditional 0 taken 20
+ -: 67:
+ 2: 68: A thing;
+8589934594: 69: for (uint64_t ii = 0; ii < 4294967296; ++ii)
+ 2: 69-block 0
+unconditional 0 taken 2
+8589934594: 69-block 1
+branch 1 taken 8589934592
+branch 2 taken 2
+8589934592: 69-block 2
+unconditional 3 taken 8589934592
+8589934592: 70: thing.B();
+8589934592: 70-block 0
+unconditional 0 taken 8589934592
+ -: 71:
+ 2: 72: return a + 8 + grid[2][3] + len;
+ 2: 72-block 0
+unconditional 0 taken 2
+ -: 73: return more_useless();
+ -: 74:}
diff --git a/test/tools/llvm-cov/Inputs/test_-a_-b_-c_-u.h.gcov b/test/tools/llvm-cov/Inputs/test_-a_-b_-c_-u.h.gcov
new file mode 100644
index 000000000000..840324e9f9f2
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-a_-b_-c_-u.h.gcov
@@ -0,0 +1,14 @@
+ -: 0:Source:./test.h
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+function _ZN1AC1Ev called 2 returned 100% blocks executed 100%
+function _ZN1AC2Ev called 2 returned 100% blocks executed 100%
+ 2: 1:struct A {
+ 2: 1-block 0
+unconditional 0 taken 2
+ 2: 1-block 1
+unconditional 1 taken 2
+ -: 2: virtual void B();
+ -: 3:};
diff --git a/test/tools/llvm-cov/Inputs/test_-a_-b_-u.cpp.gcov b/test/tools/llvm-cov/Inputs/test_-a_-b_-u.cpp.gcov
new file mode 100644
index 000000000000..0d2c6b393561
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-a_-b_-u.cpp.gcov
@@ -0,0 +1,160 @@
+ -: 0:Source:test.cpp
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ -: 1:#include "test.h"
+ -: 2:#include <cstdlib>
+ -: 3:
+ -: 4:bool on = false;
+ -: 5:int len = 42;
+ -: 6:double grid[10][10] = {0};
+ -: 7:const char * hello = "world";
+ -: 8:const char * world = "hello";
+ -: 9:
+function _ZN1A1BEv called 8589934592 returned 100% blocks executed 100%
+8589934592: 10:void A::B() {}
+8589934592: 10-block 0
+unconditional 0 taken 100%
+ -: 11:
+function _Z7uselessv called 0 returned 0% blocks executed 0%
+ #####: 12:void useless() {}
+ $$$$$: 12-block 0
+unconditional 0 never executed
+ -: 13:
+function _Z12more_uselessv called 0 returned 0% blocks executed 0%
+ -: 14:double more_useless() {
+ #####: 15: return 0;
+ $$$$$: 15-block 0
+unconditional 0 never executed
+ -: 16:}
+ -: 17:
+function _Z3foov called 2 returned 100% blocks executed 100%
+ -: 18:int foo() {
+ 2: 19: on = true;
+ 2: 20: return 3;
+ 2: 20-block 0
+unconditional 0 taken 100%
+ -: 21:}
+ -: 22:
+function _Z3barv called 0 returned 0% blocks executed 0%
+ -: 23:int bar() {
+ #####: 24: len--;
+ #####: 25: return foo() + 45;
+ $$$$$: 25-block 0
+unconditional 0 never executed
+ -: 26:}
+ -: 27:
+function _Z6assignii called 8 returned 100% blocks executed 100%
+ 8: 28:void assign(int ii, int jj) {
+ 8: 29: grid[ii][jj] = (ii+1) * (jj+1);
+ 8: 30:}
+ 8: 30-block 0
+unconditional 0 taken 100%
+ -: 31:
+function _Z15initialize_gridv called 2 returned 100% blocks executed 100%
+ -: 32:void initialize_grid() {
+ 6: 33: for (int ii = 0; ii < 2; ii++)
+ 2: 33-block 0
+unconditional 0 taken 100%
+ 6: 33-block 1
+branch 1 taken 67%
+branch 2 taken 33%
+ 4: 33-block 2
+unconditional 3 taken 100%
+ 12: 34: for (int jj = 0; jj < 2; jj++)
+ 4: 34-block 0
+unconditional 0 taken 100%
+ 12: 34-block 1
+branch 1 taken 67%
+branch 2 taken 33%
+ 8: 34-block 2
+unconditional 3 taken 100%
+ 8: 35: assign(ii, jj);
+ 8: 35-block 0
+unconditional 0 taken 100%
+ 4: 35-block 1
+unconditional 1 taken 100%
+ 2: 36:}
+ 2: 36-block 0
+unconditional 0 taken 100%
+ -: 37:
+function main called 2 returned 100% blocks executed 94%
+ -: 38:int main() {
+ 2: 39: initialize_grid();
+ -: 40:
+ 2: 41: int a = 2;
+ 2: 42: on = rand() % 2;
+ 2: 43: if (on) {
+ 2: 43-block 0
+branch 0 taken 100%
+branch 1 taken 0%
+ 2: 44: foo();
+ 2: 45: ++a;
+ 2: 46: } else {
+ 2: 46-block 0
+unconditional 0 taken 100%
+ #####: 47: bar();
+ #####: 48: a += rand();
+ $$$$$: 48-block 0
+unconditional 0 never executed
+ -: 49: }
+ -: 50:
+ 22: 51: for (int ii = 0; ii < 10; ++ii) {
+ 2: 51-block 0
+unconditional 0 taken 100%
+ 22: 51-block 1
+branch 1 taken 91%
+branch 2 taken 9%
+ 20: 51-block 2
+unconditional 3 taken 100%
+ 20: 52: switch (rand() % 5) {
+ 20: 52-block 0
+branch 0 taken 20%
+branch 1 taken 0%
+branch 2 taken 10%
+branch 3 taken 30%
+branch 4 taken 40%
+ -: 53: case 0:
+ 4: 54: a += rand();
+ 4: 55: break;
+ 4: 55-block 0
+unconditional 0 taken 100%
+ -: 56: case 1:
+ -: 57: case 2:
+ 2: 58: a += rand() / rand();
+ 2: 59: break;
+ 2: 59-block 0
+unconditional 0 taken 100%
+ -: 60: case 3:
+ 6: 61: a -= rand();
+ 6: 62: break;
+ 6: 62-block 0
+unconditional 0 taken 100%
+ -: 63: default:
+ 8: 64: a = -1;
+ 8: 65: }
+ 8: 65-block 0
+unconditional 0 taken 100%
+ 20: 66: }
+ 20: 66-block 0
+unconditional 0 taken 100%
+ -: 67:
+ 2: 68: A thing;
+8589934594: 69: for (uint64_t ii = 0; ii < 4294967296; ++ii)
+ 2: 69-block 0
+unconditional 0 taken 100%
+8589934594: 69-block 1
+branch 1 taken 99%
+branch 2 taken 1%
+8589934592: 69-block 2
+unconditional 3 taken 100%
+8589934592: 70: thing.B();
+8589934592: 70-block 0
+unconditional 0 taken 100%
+ -: 71:
+ 2: 72: return a + 8 + grid[2][3] + len;
+ 2: 72-block 0
+unconditional 0 taken 100%
+ -: 73: return more_useless();
+ -: 74:}
diff --git a/test/tools/llvm-cov/Inputs/test_-a_-b_-u.h.gcov b/test/tools/llvm-cov/Inputs/test_-a_-b_-u.h.gcov
new file mode 100644
index 000000000000..e7fa658b2900
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-a_-b_-u.h.gcov
@@ -0,0 +1,14 @@
+ -: 0:Source:./test.h
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+function _ZN1AC1Ev called 2 returned 100% blocks executed 100%
+function _ZN1AC2Ev called 2 returned 100% blocks executed 100%
+ 2: 1:struct A {
+ 2: 1-block 0
+unconditional 0 taken 100%
+ 2: 1-block 1
+unconditional 1 taken 100%
+ -: 2: virtual void B();
+ -: 3:};
diff --git a/test/tools/llvm-cov/Inputs/test_-b.output b/test/tools/llvm-cov/Inputs/test_-b.output
new file mode 100644
index 000000000000..515987d45ea6
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-b.output
@@ -0,0 +1,13 @@
+File 'test.cpp'
+Lines executed:84.21% of 38
+Branches executed:100.00% of 15
+Taken at least once:86.67% of 15
+No calls
+test.cpp:creating 'test.cpp.gcov'
+
+File './test.h'
+Lines executed:100.00% of 1
+No branches
+No calls
+./test.h:creating 'test.h.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_-b_-f.output b/test/tools/llvm-cov/Inputs/test_-b_-f.output
new file mode 100644
index 000000000000..c3ccd05b1710
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-b_-f.output
@@ -0,0 +1,65 @@
+Function '_ZN1A1BEv'
+Lines executed:100.00% of 1
+No branches
+No calls
+
+Function '_Z7uselessv'
+Lines executed:0.00% of 1
+No branches
+No calls
+
+Function '_Z12more_uselessv'
+Lines executed:0.00% of 1
+No branches
+No calls
+
+Function '_Z3foov'
+Lines executed:100.00% of 2
+No branches
+No calls
+
+Function '_Z3barv'
+Lines executed:0.00% of 2
+No branches
+No calls
+
+Function '_Z6assignii'
+Lines executed:100.00% of 3
+No branches
+No calls
+
+Function '_Z15initialize_gridv'
+Lines executed:100.00% of 4
+Branches executed:100.00% of 4
+Taken at least once:100.00% of 4
+No calls
+
+Function 'main'
+Lines executed:91.67% of 24
+Branches executed:100.00% of 11
+Taken at least once:81.82% of 11
+No calls
+
+Function '_ZN1AC1Ev'
+Lines executed:100.00% of 1
+No branches
+No calls
+
+Function '_ZN1AC2Ev'
+No executable lines
+No branches
+No calls
+
+File 'test.cpp'
+Lines executed:84.21% of 38
+Branches executed:100.00% of 15
+Taken at least once:86.67% of 15
+No calls
+test.cpp:creating 'test.cpp.gcov'
+
+File './test.h'
+Lines executed:100.00% of 1
+No branches
+No calls
+./test.h:creating 'test.h.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_-f.output b/test/tools/llvm-cov/Inputs/test_-f.output
new file mode 100644
index 000000000000..d97aa18174a0
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_-f.output
@@ -0,0 +1,38 @@
+Function '_ZN1A1BEv'
+Lines executed:100.00% of 1
+
+Function '_Z7uselessv'
+Lines executed:0.00% of 1
+
+Function '_Z12more_uselessv'
+Lines executed:0.00% of 1
+
+Function '_Z3foov'
+Lines executed:100.00% of 2
+
+Function '_Z3barv'
+Lines executed:0.00% of 2
+
+Function '_Z6assignii'
+Lines executed:100.00% of 3
+
+Function '_Z15initialize_gridv'
+Lines executed:100.00% of 4
+
+Function 'main'
+Lines executed:91.67% of 24
+
+Function '_ZN1AC1Ev'
+Lines executed:100.00% of 1
+
+Function '_ZN1AC2Ev'
+Lines executed:100.00% of 1
+
+File 'test.cpp'
+Lines executed:84.21% of 38
+test.cpp:creating 'test.cpp.gcov'
+
+File './test.h'
+Lines executed:100.00% of 1
+./test.h:creating 'test.h.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_file_checksum_fail.gcda b/test/tools/llvm-cov/Inputs/test_file_checksum_fail.gcda
new file mode 100644
index 000000000000..8bfd82c7b248
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_file_checksum_fail.gcda
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/test_func_checksum_fail.gcda b/test/tools/llvm-cov/Inputs/test_func_checksum_fail.gcda
new file mode 100644
index 000000000000..4c729a850d86
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_func_checksum_fail.gcda
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/test_long_file_names.output b/test/tools/llvm-cov/Inputs/test_long_file_names.output
new file mode 100644
index 000000000000..e09f4cb9857f
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_long_file_names.output
@@ -0,0 +1,8 @@
+File 'srcdir/./nested_dir/../test.h'
+Lines executed:100.00% of 1
+srcdir/./nested_dir/../test.h:creating 'test_paths.cpp##test.h.gcov'
+
+File 'srcdir/./nested_dir/../test.cpp'
+Lines executed:84.21% of 38
+srcdir/./nested_dir/../test.cpp:creating 'test_paths.cpp##test.cpp.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_long_paths.output b/test/tools/llvm-cov/Inputs/test_long_paths.output
new file mode 100644
index 000000000000..376ee5b78dfb
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_long_paths.output
@@ -0,0 +1,8 @@
+File 'srcdir/./nested_dir/../test.h'
+Lines executed:100.00% of 1
+srcdir/./nested_dir/../test.h:creating 'srcdir#^#test_paths.cpp##srcdir#nested_dir#^#test.h.gcov'
+
+File 'srcdir/./nested_dir/../test.cpp'
+Lines executed:84.21% of 38
+srcdir/./nested_dir/../test.cpp:creating 'srcdir#^#test_paths.cpp##srcdir#nested_dir#^#test.cpp.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_missing.cpp.gcov b/test/tools/llvm-cov/Inputs/test_missing.cpp.gcov
new file mode 100644
index 000000000000..1c138e42581e
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_missing.cpp.gcov
@@ -0,0 +1,77 @@
+ -: 0:Source:srcdir/./nested_dir/../test.cpp
+ -: 0:Graph:test_paths.gcno
+ -: 0:Data:test_paths.gcda
+ -: 0:Runs:3
+ -: 0:Programs:1
+ -: 1:/*EOF*/
+ -: 2:/*EOF*/
+ -: 3:/*EOF*/
+ -: 4:/*EOF*/
+ -: 5:/*EOF*/
+ -: 6:/*EOF*/
+ -: 7:/*EOF*/
+ -: 8:/*EOF*/
+ -: 9:/*EOF*/
+12884901888: 10:/*EOF*/
+ -: 11:/*EOF*/
+ #####: 12:/*EOF*/
+ -: 13:/*EOF*/
+ -: 14:/*EOF*/
+ #####: 15:/*EOF*/
+ -: 16:/*EOF*/
+ -: 17:/*EOF*/
+ -: 18:/*EOF*/
+ 3: 19:/*EOF*/
+ 3: 20:/*EOF*/
+ -: 21:/*EOF*/
+ -: 22:/*EOF*/
+ -: 23:/*EOF*/
+ #####: 24:/*EOF*/
+ #####: 25:/*EOF*/
+ -: 26:/*EOF*/
+ -: 27:/*EOF*/
+ 12: 28:/*EOF*/
+ 12: 29:/*EOF*/
+ 12: 30:/*EOF*/
+ -: 31:/*EOF*/
+ -: 32:/*EOF*/
+ 21: 33:/*EOF*/
+ 36: 34:/*EOF*/
+ 18: 35:/*EOF*/
+ 3: 36:/*EOF*/
+ -: 37:/*EOF*/
+ -: 38:/*EOF*/
+ 3: 39:/*EOF*/
+ -: 40:/*EOF*/
+ 3: 41:/*EOF*/
+ 3: 42:/*EOF*/
+ 3: 43:/*EOF*/
+ 3: 44:/*EOF*/
+ 3: 45:/*EOF*/
+ 3: 46:/*EOF*/
+ #####: 47:/*EOF*/
+ #####: 48:/*EOF*/
+ -: 49:/*EOF*/
+ -: 50:/*EOF*/
+ 66: 51:/*EOF*/
+ 30: 52:/*EOF*/
+ -: 53:/*EOF*/
+ 6: 54:/*EOF*/
+ 6: 55:/*EOF*/
+ -: 56:/*EOF*/
+ -: 57:/*EOF*/
+ 3: 58:/*EOF*/
+ 3: 59:/*EOF*/
+ -: 60:/*EOF*/
+ 9: 61:/*EOF*/
+ 9: 62:/*EOF*/
+ -: 63:/*EOF*/
+ 12: 64:/*EOF*/
+ 12: 65:/*EOF*/
+ 30: 66:/*EOF*/
+ -: 67:/*EOF*/
+ 3: 68:/*EOF*/
+25769803782: 69:/*EOF*/
+12884901888: 70:/*EOF*/
+ -: 71:/*EOF*/
+ 3: 72:/*EOF*/
diff --git a/test/tools/llvm-cov/Inputs/test_missing.h.gcov b/test/tools/llvm-cov/Inputs/test_missing.h.gcov
new file mode 100644
index 000000000000..d500e8619682
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_missing.h.gcov
@@ -0,0 +1,6 @@
+ -: 0:Source:srcdir/./nested_dir/../test.h
+ -: 0:Graph:test_paths.gcno
+ -: 0:Data:test_paths.gcda
+ -: 0:Runs:3
+ -: 0:Programs:1
+ 6: 1:/*EOF*/
diff --git a/test/tools/llvm-cov/Inputs/test_missing.output b/test/tools/llvm-cov/Inputs/test_missing.output
new file mode 100644
index 000000000000..ada0c3603097
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_missing.output
@@ -0,0 +1,8 @@
+File 'srcdir/./nested_dir/../test.h'
+Lines executed:100.00% of 1
+srcdir/./nested_dir/../test.h:creating 'test.h.gcov'
+
+File 'srcdir/./nested_dir/../test.cpp'
+Lines executed:84.21% of 38
+srcdir/./nested_dir/../test.cpp:creating 'test.cpp.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_no_gcda.cpp.gcov b/test/tools/llvm-cov/Inputs/test_no_gcda.cpp.gcov
new file mode 100644
index 000000000000..31353ca0c0eb
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_no_gcda.cpp.gcov
@@ -0,0 +1,79 @@
+ -: 0:Source:test.cpp
+ -: 0:Graph:test.gcno
+ -: 0:Data:-
+ -: 0:Runs:0
+ -: 0:Programs:0
+ -: 1:#include "test.h"
+ -: 2:#include <cstdlib>
+ -: 3:
+ -: 4:bool on = false;
+ -: 5:int len = 42;
+ -: 6:double grid[10][10] = {0};
+ -: 7:const char * hello = "world";
+ -: 8:const char * world = "hello";
+ -: 9:
+ #####: 10:void A::B() {}
+ -: 11:
+ #####: 12:void useless() {}
+ -: 13:
+ -: 14:double more_useless() {
+ #####: 15: return 0;
+ -: 16:}
+ -: 17:
+ -: 18:int foo() {
+ #####: 19: on = true;
+ #####: 20: return 3;
+ -: 21:}
+ -: 22:
+ -: 23:int bar() {
+ #####: 24: len--;
+ #####: 25: return foo() + 45;
+ -: 26:}
+ -: 27:
+ #####: 28:void assign(int ii, int jj) {
+ #####: 29: grid[ii][jj] = (ii+1) * (jj+1);
+ #####: 30:}
+ -: 31:
+ -: 32:void initialize_grid() {
+ #####: 33: for (int ii = 0; ii < 2; ii++)
+ #####: 34: for (int jj = 0; jj < 2; jj++)
+ #####: 35: assign(ii, jj);
+ #####: 36:}
+ -: 37:
+ -: 38:int main() {
+ #####: 39: initialize_grid();
+ -: 40:
+ #####: 41: int a = 2;
+ #####: 42: on = rand() % 2;
+ #####: 43: if (on) {
+ #####: 44: foo();
+ #####: 45: ++a;
+ #####: 46: } else {
+ #####: 47: bar();
+ #####: 48: a += rand();
+ -: 49: }
+ -: 50:
+ #####: 51: for (int ii = 0; ii < 10; ++ii) {
+ #####: 52: switch (rand() % 5) {
+ -: 53: case 0:
+ #####: 54: a += rand();
+ #####: 55: break;
+ -: 56: case 1:
+ -: 57: case 2:
+ #####: 58: a += rand() / rand();
+ #####: 59: break;
+ -: 60: case 3:
+ #####: 61: a -= rand();
+ #####: 62: break;
+ -: 63: default:
+ #####: 64: a = -1;
+ #####: 65: }
+ #####: 66: }
+ -: 67:
+ #####: 68: A thing;
+ #####: 69: for (uint64_t ii = 0; ii < 4294967296; ++ii)
+ #####: 70: thing.B();
+ -: 71:
+ #####: 72: return a + 8 + grid[2][3] + len;
+ -: 73: return more_useless();
+ -: 74:}
diff --git a/test/tools/llvm-cov/Inputs/test_no_gcda.h.gcov b/test/tools/llvm-cov/Inputs/test_no_gcda.h.gcov
new file mode 100644
index 000000000000..c0a45c61ce06
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_no_gcda.h.gcov
@@ -0,0 +1,8 @@
+ -: 0:Source:./test.h
+ -: 0:Graph:test.gcno
+ -: 0:Data:-
+ -: 0:Runs:0
+ -: 0:Programs:0
+ #####: 1:struct A {
+ -: 2: virtual void B();
+ -: 3:};
diff --git a/test/tools/llvm-cov/Inputs/test_no_gcda.output b/test/tools/llvm-cov/Inputs/test_no_gcda.output
new file mode 100644
index 000000000000..e994be729109
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_no_gcda.output
@@ -0,0 +1,8 @@
+File 'test.cpp'
+Lines executed:0.00% of 38
+test.cpp:creating 'test.cpp.gcov'
+
+File './test.h'
+Lines executed:0.00% of 1
+./test.h:creating 'test.h.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_no_options.cpp.gcov b/test/tools/llvm-cov/Inputs/test_no_options.cpp.gcov
new file mode 100644
index 000000000000..871e3ba64456
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_no_options.cpp.gcov
@@ -0,0 +1,79 @@
+ -: 0:Source:test.cpp
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ -: 1:#include "test.h"
+ -: 2:#include <cstdlib>
+ -: 3:
+ -: 4:bool on = false;
+ -: 5:int len = 42;
+ -: 6:double grid[10][10] = {0};
+ -: 7:const char * hello = "world";
+ -: 8:const char * world = "hello";
+ -: 9:
+8589934592: 10:void A::B() {}
+ -: 11:
+ #####: 12:void useless() {}
+ -: 13:
+ -: 14:double more_useless() {
+ #####: 15: return 0;
+ -: 16:}
+ -: 17:
+ -: 18:int foo() {
+ 2: 19: on = true;
+ 2: 20: return 3;
+ -: 21:}
+ -: 22:
+ -: 23:int bar() {
+ #####: 24: len--;
+ #####: 25: return foo() + 45;
+ -: 26:}
+ -: 27:
+ 8: 28:void assign(int ii, int jj) {
+ 8: 29: grid[ii][jj] = (ii+1) * (jj+1);
+ 8: 30:}
+ -: 31:
+ -: 32:void initialize_grid() {
+ 12: 33: for (int ii = 0; ii < 2; ii++)
+ 24: 34: for (int jj = 0; jj < 2; jj++)
+ 12: 35: assign(ii, jj);
+ 2: 36:}
+ -: 37:
+ -: 38:int main() {
+ 2: 39: initialize_grid();
+ -: 40:
+ 2: 41: int a = 2;
+ 2: 42: on = rand() % 2;
+ 2: 43: if (on) {
+ 2: 44: foo();
+ 2: 45: ++a;
+ 2: 46: } else {
+ #####: 47: bar();
+ #####: 48: a += rand();
+ -: 49: }
+ -: 50:
+ 44: 51: for (int ii = 0; ii < 10; ++ii) {
+ 20: 52: switch (rand() % 5) {
+ -: 53: case 0:
+ 4: 54: a += rand();
+ 4: 55: break;
+ -: 56: case 1:
+ -: 57: case 2:
+ 2: 58: a += rand() / rand();
+ 2: 59: break;
+ -: 60: case 3:
+ 6: 61: a -= rand();
+ 6: 62: break;
+ -: 63: default:
+ 8: 64: a = -1;
+ 8: 65: }
+ 20: 66: }
+ -: 67:
+ 2: 68: A thing;
+17179869188: 69: for (uint64_t ii = 0; ii < 4294967296; ++ii)
+8589934592: 70: thing.B();
+ -: 71:
+ 2: 72: return a + 8 + grid[2][3] + len;
+ -: 73: return more_useless();
+ -: 74:}
diff --git a/test/tools/llvm-cov/Inputs/test_no_options.h.gcov b/test/tools/llvm-cov/Inputs/test_no_options.h.gcov
new file mode 100644
index 000000000000..4ba58c9fdc4e
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_no_options.h.gcov
@@ -0,0 +1,8 @@
+ -: 0:Source:./test.h
+ -: 0:Graph:test.gcno
+ -: 0:Data:test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ 4: 1:struct A {
+ -: 2: virtual void B();
+ -: 3:};
diff --git a/test/tools/llvm-cov/Inputs/test_no_options.output b/test/tools/llvm-cov/Inputs/test_no_options.output
new file mode 100644
index 000000000000..8be8c1c210af
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_no_options.output
@@ -0,0 +1,8 @@
+File 'test.cpp'
+Lines executed:84.21% of 38
+test.cpp:creating 'test.cpp.gcov'
+
+File './test.h'
+Lines executed:100.00% of 1
+./test.h:creating 'test.h.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_no_output.output b/test/tools/llvm-cov/Inputs/test_no_output.output
new file mode 100644
index 000000000000..74286b9a2fca
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_no_output.output
@@ -0,0 +1,6 @@
+File 'test.cpp'
+Lines executed:84.21% of 38
+
+File './test.h'
+Lines executed:100.00% of 1
+
diff --git a/test/tools/llvm-cov/Inputs/test_no_preserve_paths.output b/test/tools/llvm-cov/Inputs/test_no_preserve_paths.output
new file mode 100644
index 000000000000..ada0c3603097
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_no_preserve_paths.output
@@ -0,0 +1,8 @@
+File 'srcdir/./nested_dir/../test.h'
+Lines executed:100.00% of 1
+srcdir/./nested_dir/../test.h:creating 'test.h.gcov'
+
+File 'srcdir/./nested_dir/../test.cpp'
+Lines executed:84.21% of 38
+srcdir/./nested_dir/../test.cpp:creating 'test.cpp.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_objdir.cpp.gcov b/test/tools/llvm-cov/Inputs/test_objdir.cpp.gcov
new file mode 100644
index 000000000000..abf88567801e
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_objdir.cpp.gcov
@@ -0,0 +1,79 @@
+ -: 0:Source:test.cpp
+ -: 0:Graph:objdir/test.gcno
+ -: 0:Data:objdir/test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ -: 1:#include "test.h"
+ -: 2:#include <cstdlib>
+ -: 3:
+ -: 4:bool on = false;
+ -: 5:int len = 42;
+ -: 6:double grid[10][10] = {0};
+ -: 7:const char * hello = "world";
+ -: 8:const char * world = "hello";
+ -: 9:
+8589934592: 10:void A::B() {}
+ -: 11:
+ #####: 12:void useless() {}
+ -: 13:
+ -: 14:double more_useless() {
+ #####: 15: return 0;
+ -: 16:}
+ -: 17:
+ -: 18:int foo() {
+ 2: 19: on = true;
+ 2: 20: return 3;
+ -: 21:}
+ -: 22:
+ -: 23:int bar() {
+ #####: 24: len--;
+ #####: 25: return foo() + 45;
+ -: 26:}
+ -: 27:
+ 8: 28:void assign(int ii, int jj) {
+ 8: 29: grid[ii][jj] = (ii+1) * (jj+1);
+ 8: 30:}
+ -: 31:
+ -: 32:void initialize_grid() {
+ 12: 33: for (int ii = 0; ii < 2; ii++)
+ 24: 34: for (int jj = 0; jj < 2; jj++)
+ 12: 35: assign(ii, jj);
+ 2: 36:}
+ -: 37:
+ -: 38:int main() {
+ 2: 39: initialize_grid();
+ -: 40:
+ 2: 41: int a = 2;
+ 2: 42: on = rand() % 2;
+ 2: 43: if (on) {
+ 2: 44: foo();
+ 2: 45: ++a;
+ 2: 46: } else {
+ #####: 47: bar();
+ #####: 48: a += rand();
+ -: 49: }
+ -: 50:
+ 44: 51: for (int ii = 0; ii < 10; ++ii) {
+ 20: 52: switch (rand() % 5) {
+ -: 53: case 0:
+ 4: 54: a += rand();
+ 4: 55: break;
+ -: 56: case 1:
+ -: 57: case 2:
+ 2: 58: a += rand() / rand();
+ 2: 59: break;
+ -: 60: case 3:
+ 6: 61: a -= rand();
+ 6: 62: break;
+ -: 63: default:
+ 8: 64: a = -1;
+ 8: 65: }
+ 20: 66: }
+ -: 67:
+ 2: 68: A thing;
+17179869188: 69: for (uint64_t ii = 0; ii < 4294967296; ++ii)
+8589934592: 70: thing.B();
+ -: 71:
+ 2: 72: return a + 8 + grid[2][3] + len;
+ -: 73: return more_useless();
+ -: 74:}
diff --git a/test/tools/llvm-cov/Inputs/test_objdir.h.gcov b/test/tools/llvm-cov/Inputs/test_objdir.h.gcov
new file mode 100644
index 000000000000..8208d2541805
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_objdir.h.gcov
@@ -0,0 +1,8 @@
+ -: 0:Source:./test.h
+ -: 0:Graph:objdir/test.gcno
+ -: 0:Data:objdir/test.gcda
+ -: 0:Runs:2
+ -: 0:Programs:1
+ 4: 1:struct A {
+ -: 2: virtual void B();
+ -: 3:};
diff --git a/test/tools/llvm-cov/Inputs/test_paths.cpp.gcov b/test/tools/llvm-cov/Inputs/test_paths.cpp.gcov
new file mode 100644
index 000000000000..3982ddf4e5f7
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_paths.cpp.gcov
@@ -0,0 +1,79 @@
+ -: 0:Source:srcdir/./nested_dir/../test.cpp
+ -: 0:Graph:test_paths.gcno
+ -: 0:Data:test_paths.gcda
+ -: 0:Runs:3
+ -: 0:Programs:1
+ -: 1:#include "test.h"
+ -: 2:#include <cstdlib>
+ -: 3:
+ -: 4:bool on = false;
+ -: 5:int len = 42;
+ -: 6:double grid[10][10] = {0};
+ -: 7:const char * hello = "world";
+ -: 8:const char * world = "hello";
+ -: 9:
+12884901888: 10:void A::B() {}
+ -: 11:
+ #####: 12:void useless() {}
+ -: 13:
+ -: 14:double more_useless() {
+ #####: 15: return 0;
+ -: 16:}
+ -: 17:
+ -: 18:int foo() {
+ 3: 19: on = true;
+ 3: 20: return 3;
+ -: 21:}
+ -: 22:
+ -: 23:int bar() {
+ #####: 24: len--;
+ #####: 25: return foo() + 45;
+ -: 26:}
+ -: 27:
+ 12: 28:void assign(int ii, int jj) {
+ 12: 29: grid[ii][jj] = (ii+1) * (jj+1);
+ 12: 30:}
+ -: 31:
+ -: 32:void initialize_grid() {
+ 21: 33: for (int ii = 0; ii < 2; ii++)
+ 36: 34: for (int jj = 0; jj < 2; jj++)
+ 18: 35: assign(ii, jj);
+ 3: 36:}
+ -: 37:
+ -: 38:int main() {
+ 3: 39: initialize_grid();
+ -: 40:
+ 3: 41: int a = 2;
+ 3: 42: on = rand() % 2;
+ 3: 43: if (on) {
+ 3: 44: foo();
+ 3: 45: ++a;
+ 3: 46: } else {
+ #####: 47: bar();
+ #####: 48: a += rand();
+ -: 49: }
+ -: 50:
+ 66: 51: for (int ii = 0; ii < 10; ++ii) {
+ 30: 52: switch (rand() % 5) {
+ -: 53: case 0:
+ 6: 54: a += rand();
+ 6: 55: break;
+ -: 56: case 1:
+ -: 57: case 2:
+ 3: 58: a += rand() / rand();
+ 3: 59: break;
+ -: 60: case 3:
+ 9: 61: a -= rand();
+ 9: 62: break;
+ -: 63: default:
+ 12: 64: a = -1;
+ 12: 65: }
+ 30: 66: }
+ -: 67:
+ 3: 68: A thing;
+25769803782: 69: for (uint64_t ii = 0; ii < 4294967296; ++ii)
+12884901888: 70: thing.B();
+ -: 71:
+ 3: 72: return a + 8 + grid[2][3] + len;
+ -: 73: return more_useless();
+ -: 74:}
diff --git a/test/tools/llvm-cov/Inputs/test_paths.gcda b/test/tools/llvm-cov/Inputs/test_paths.gcda
new file mode 100644
index 000000000000..7e2cf9ef20fe
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_paths.gcda
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/test_paths.gcno b/test/tools/llvm-cov/Inputs/test_paths.gcno
new file mode 100644
index 000000000000..aada974bc54a
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_paths.gcno
Binary files differ
diff --git a/test/tools/llvm-cov/Inputs/test_paths.h.gcov b/test/tools/llvm-cov/Inputs/test_paths.h.gcov
new file mode 100644
index 000000000000..95e90ca664c0
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_paths.h.gcov
@@ -0,0 +1,8 @@
+ -: 0:Source:srcdir/./nested_dir/../test.h
+ -: 0:Graph:test_paths.gcno
+ -: 0:Data:test_paths.gcda
+ -: 0:Runs:3
+ -: 0:Programs:1
+ 6: 1:struct A {
+ -: 2: virtual void B();
+ -: 3:};
diff --git a/test/tools/llvm-cov/Inputs/test_preserve_paths.output b/test/tools/llvm-cov/Inputs/test_preserve_paths.output
new file mode 100644
index 000000000000..533197255631
--- /dev/null
+++ b/test/tools/llvm-cov/Inputs/test_preserve_paths.output
@@ -0,0 +1,8 @@
+File 'srcdir/./nested_dir/../test.h'
+Lines executed:100.00% of 1
+srcdir/./nested_dir/../test.h:creating 'srcdir#nested_dir#^#test.h.gcov'
+
+File 'srcdir/./nested_dir/../test.cpp'
+Lines executed:84.21% of 38
+srcdir/./nested_dir/../test.cpp:creating 'srcdir#nested_dir#^#test.cpp.gcov'
+
diff --git a/test/tools/llvm-cov/Inputs/test_read_fail.gcno b/test/tools/llvm-cov/Inputs/test_read_fail.gcno
index 63b5d71e6951..88073f15dac6 100644
--- a/test/tools/llvm-cov/Inputs/test_read_fail.gcno
+++ b/test/tools/llvm-cov/Inputs/test_read_fail.gcno
Binary files differ
diff --git a/test/tools/llvm-cov/copy_block_helper.m b/test/tools/llvm-cov/copy_block_helper.m
new file mode 100644
index 000000000000..64973f11009a
--- /dev/null
+++ b/test/tools/llvm-cov/copy_block_helper.m
@@ -0,0 +1,32 @@
+// Make sure that compiler-added functions (whose line number is zero) don't
+// crash llvm-cov.
+
+// We need shell for cd
+// REQUIRES: shell
+
+// RUN: rm -rf %t
+// RUN: mkdir %t
+// RUN: cd %t
+// RUN: cp %s %p/Inputs/copy_block_helper.gc* .
+
+// RUN: llvm-cov copy_block_helper.m | FileCheck %s --check-prefix=STDOUT
+// STDOUT: File 'copy_block_helper.m'
+// STDOUT: Lines executed:100.00% of 5
+// STDOUT: copy_block_helper.m:creating 'copy_block_helper.m.gcov'
+
+// RUN: FileCheck %s --check-prefix=GCOV < %t/copy_block_helper.m.gcov
+// GCOV: -: 0:Runs:1
+// GCOV: -: 0:Programs:1
+
+id test_helper(id (^foo)(void)) { return foo(); } // GCOV: 1: [[@LINE]]:id
+void test(id x) { // GCOV: -: [[@LINE]]:void test
+ test_helper(^{ // GCOV: 2: [[@LINE]]: test_helper
+ return x; // GCOV: 1: [[@LINE]]: return
+ }); // GCOV: -: [[@LINE]]:
+} // GCOV: 1: [[@LINE]]:}
+
+// GCOV: 1: [[@LINE+1]]:int main
+int main(int argc, const char *argv[]) { test(0); }
+
+// llvm-cov doesn't work on big endian yet
+// XFAIL: powerpc64-, s390x, mips-, mips64-, sparc
diff --git a/test/tools/llvm-cov/lit.local.cfg b/test/tools/llvm-cov/lit.local.cfg
index df9b335dd131..56c6f1f5fac2 100644
--- a/test/tools/llvm-cov/lit.local.cfg
+++ b/test/tools/llvm-cov/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.test']
+config.suffixes = ['.test', '.m', '.cpp']
diff --git a/test/tools/llvm-cov/llvm-cov.test b/test/tools/llvm-cov/llvm-cov.test
index 28738a78d160..0d3eb6b8f81b 100644
--- a/test/tools/llvm-cov/llvm-cov.test
+++ b/test/tools/llvm-cov/llvm-cov.test
@@ -1,10 +1,113 @@
-RUN: cd %p/Inputs
-# "cd" is unsupported in lit internal runner.
+# Tests for compatibility between llvm-cov and gcov. These work by
+# comparing llvm-cov against reference outputs generated by gcov 4.2.
+
+# "cd" and globbing are unsupported in lit internal runner.
REQUIRES: shell
-RUN: llvm-cov -gcno=test.gcno -gcda=test.gcda \
-RUN: | diff -aub test.cpp.gcov -
+RUN: rm -rf %t
+RUN: mkdir %t
+RUN: cd %t
+RUN: cp %p/Inputs/test* .
+
+# Basic behaviour with no flags
+RUN: llvm-cov test.c | diff -u test_no_options.output -
+RUN: diff -aub test_no_options.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_no_options.h.gcov test.h.gcov
+
+# Same, but specifying the object directory
+RUN: mkdir -p %t/objdir
+RUN: cp test.gcno test.gcda %t/objdir
+RUN: llvm-cov -o objdir test.c | diff -u test_no_options.output -
+RUN: diff -aub test_objdir.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_objdir.h.gcov test.h.gcov
+
+# Specifying an object file
+RUN: llvm-cov -o objdir/test.o test.c | diff -u test_no_options.output -
+RUN: diff -aub test_objdir.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_objdir.h.gcov test.h.gcov
+
+# Specifying an object file that could be ambiguous with a directory
+RUN: llvm-cov -o objdir/test test.c | diff -u test_no_options.output -
+RUN: diff -aub test_objdir.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_objdir.h.gcov test.h.gcov
+
+# With gcov output disabled
+RUN: llvm-cov -n test.c | diff -u test_no_output.output -
+
+# Missing source files. This test is fragile, as it depends on being
+# run before we copy some sources into place in the next test.
+RUN: llvm-cov test_paths.cpp 2>/dev/null | diff -u test_missing.output -
+RUN: diff -aub test_missing.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_missing.h.gcov test.h.gcov
+
+# Preserve paths. This mangles the output filenames.
+RUN: mkdir -p %t/srcdir/nested_dir
+RUN: cp test.cpp test.h %t/srcdir
+RUN: llvm-cov -p test_paths.cpp | diff -u test_preserve_paths.output -
+RUN: diff -aub test_paths.cpp.gcov srcdir#nested_dir#^#test.cpp.gcov
+RUN: diff -aub test_paths.h.gcov srcdir#nested_dir#^#test.h.gcov
+
+# Don't preserve paths. Same results as preserve paths, but no mangling.
+RUN: llvm-cov test_paths.cpp | diff -u test_no_preserve_paths.output -
+RUN: diff -aub test_paths.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_paths.h.gcov test.h.gcov
+
+# Long file names.
+RUN: llvm-cov -l test_paths.cpp | diff -u test_long_file_names.output -
+RUN: diff -aub test_paths.cpp.gcov test_paths.cpp##test.cpp.gcov
+RUN: diff -aub test_paths.h.gcov test_paths.cpp##test.h.gcov
+
+# Long file names and preserve paths.
+RUN: llvm-cov -lp -gcno test_paths.gcno -gcda test_paths.gcda srcdir/../test_paths.cpp | diff -u test_long_paths.output -
+RUN: diff -aub test_paths.cpp.gcov srcdir#^#test_paths.cpp##srcdir#nested_dir#^#test.cpp.gcov
+RUN: diff -aub test_paths.h.gcov srcdir#^#test_paths.cpp##srcdir#nested_dir#^#test.h.gcov
+
+# Function summaries. This changes stdout, but not the gcov files.
+RUN: llvm-cov test.c -f | diff -u test_-f.output -
+RUN: diff -aub test_no_options.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_no_options.h.gcov test.h.gcov
+
+# All blocks. This doesn't affect stdout, only the gcov files.
+RUN: llvm-cov test.c -a | diff -u test_no_options.output -
+RUN: diff -aub test_-a.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_-a.h.gcov test.h.gcov
+
+# Branch probabilities.
+RUN: llvm-cov test.c -a -b | diff -u test_-b.output -
+RUN: diff -aub test_-a_-b.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_-a_-b.h.gcov test.h.gcov
+
+# Function summaries including branch probabilities.
+
+# FIXME: We don't correctly handle calls when -b and -f are used
+# together, so our output differs from gcov. Remove the 'not' from
+# this test once this is fixed.
+RUN: llvm-cov test.c -a -b -f | not diff -u test_-b_-f.output - >/dev/null
+RUN: diff -aub test_-a_-b.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_-a_-b.h.gcov test.h.gcov
+
+# Summarize unconditional branches too.
+RUN: llvm-cov test.c -a -b -u | diff -u test_-b.output -
+RUN: diff -aub test_-a_-b_-u.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_-a_-b_-u.h.gcov test.h.gcov
+
+# Absolute counts for branches.
+RUN: llvm-cov test.c -a -b -c -u | diff -u test_-b.output -
+RUN: diff -aub test_-a_-b_-c_-u.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_-a_-b_-c_-u.h.gcov test.h.gcov
+
+# Missing gcda file just gives 0 counts.
+RUN: llvm-cov test.c -gcda=no_such_gcda_file | diff -u test_no_gcda.output -
+RUN: diff -aub test_no_gcda.cpp.gcov test.cpp.gcov
+RUN: diff -aub test_no_gcda.h.gcov test.h.gcov
+
+# Invalid gcno file.
+RUN: llvm-cov test.c -gcno=test_read_fail.gcno
+
+# Bad file checksum on gcda.
+RUN: llvm-cov test.c -gcda=test_file_checksum_fail.gcda
-RUN: not llvm-cov -gcno=test_read_fail.gcno -gcda=test.gcda
+# Bad function checksum on gcda
+RUN: llvm-cov test.c -gcda=test_func_checksum_fail.gcda
-XFAIL: powerpc64, s390x
+XFAIL: powerpc64-, s390x, mips-, mips64-, sparc
diff --git a/test/tools/llvm-cov/range_based_for.cpp b/test/tools/llvm-cov/range_based_for.cpp
new file mode 100644
index 000000000000..3fdb2441399c
--- /dev/null
+++ b/test/tools/llvm-cov/range_based_for.cpp
@@ -0,0 +1,29 @@
+// Make sure that compiler-added local variables (whose line number is zero)
+// don't crash llvm-cov.
+
+// We need shell for cd
+// REQUIRES: shell
+
+// RUN: rm -rf %t
+// RUN: mkdir %t
+// RUN: cd %t
+// RUN: cp %s %p/Inputs/range_based_for.gc* .
+
+// RUN: llvm-cov range_based_for.cpp | FileCheck %s --check-prefix=STDOUT
+// STDOUT: File 'range_based_for.cpp'
+// STDOUT: Lines executed:100.00% of 5
+// STDOUT: range_based_for.cpp:creating 'range_based_for.cpp.gcov'
+
+// RUN: FileCheck %s --check-prefix=GCOV < %t/range_based_for.cpp.gcov
+// GCOV: -: 0:Runs:1
+// GCOV: -: 0:Programs:1
+
+int main(int argc, const char *argv[]) { // GCOV: 1: [[@LINE]]:int main(
+ int V[] = {1, 2}; // GCOV: 1: [[@LINE]]: int V[]
+ for (int &I : V) { // GCOV: 10: [[@LINE]]: for (
+ } // GCOV: 2: [[@LINE]]: }
+ return 0; // GCOV: 1: [[@LINE]]: return
+} // GCOV: -: [[@LINE]]:}
+
+// llvm-cov doesn't work on big endian yet
+// XFAIL: powerpc64-, s390x, mips-, mips64-, sparc
diff --git a/test/tools/llvm-objdump/Inputs/export.dll.coff-i386 b/test/tools/llvm-objdump/Inputs/export.dll.coff-i386
new file mode 100644
index 000000000000..7b9371668dea
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/export.dll.coff-i386
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/file-aux-record.yaml b/test/tools/llvm-objdump/Inputs/file-aux-record.yaml
new file mode 100644
index 000000000000..d19afaf68a85
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/file-aux-record.yaml
@@ -0,0 +1,21 @@
+header: !Header
+ Machine: IMAGE_FILE_MACHINE_I386 # (0x14c)
+ Characteristics: [ IMAGE_FILE_DEBUG_STRIPPED ]
+sections:
+symbols:
+ - !Symbol
+ Name: .file
+ Value: 0
+ SectionNumber: 65534
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_FILE
+ File: eighteen-chars.obj
+ - !Symbol
+ Name: '@comp.id'
+ Value: 13485607
+ SectionNumber: 65535
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+
diff --git a/test/tools/llvm-objdump/Inputs/file.obj.coff-arm b/test/tools/llvm-objdump/Inputs/file.obj.coff-arm
new file mode 100755
index 000000000000..a333a87929b6
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/file.obj.coff-arm
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/large-bss.obj.coff-i386 b/test/tools/llvm-objdump/Inputs/large-bss.obj.coff-i386
new file mode 100644
index 000000000000..79311d346888
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/large-bss.obj.coff-i386
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/many-relocs.obj-i386 b/test/tools/llvm-objdump/Inputs/many-relocs.obj-i386
new file mode 100644
index 000000000000..c13e2356587f
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/many-relocs.obj-i386
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/nop.exe.coff-i386 b/test/tools/llvm-objdump/Inputs/nop.exe.coff-i386
index 68c9d3db0f8f..2cda30ff3d2b 100644
--- a/test/tools/llvm-objdump/Inputs/nop.exe.coff-i386
+++ b/test/tools/llvm-objdump/Inputs/nop.exe.coff-i386
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/out-of-section-sym.elf-i386 b/test/tools/llvm-objdump/Inputs/out-of-section-sym.elf-i386
new file mode 100644
index 000000000000..4c7158498baf
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/out-of-section-sym.elf-i386
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/out-of-section-sym.s b/test/tools/llvm-objdump/Inputs/out-of-section-sym.s
new file mode 100644
index 000000000000..9a1349c426f3
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/out-of-section-sym.s
@@ -0,0 +1,15 @@
+// $ cat out-of-section-sym.ld
+// SECTIONS
+// {
+// . = 0x10;
+// .text : { _ftext = . ; *(.text) }
+// . = 0x20;
+// .data : { _fdata = . ; *(.data) }
+// }
+// as --32 out-of-section-sym.s -o out-of-section-sym.o
+// ld -m elf_i386 -Tout-of-section-sym.ld -o out-of-section-sym.elf-i386 \
+// out-of-section-sym.o
+
+.text
+_start:
+ ret
diff --git a/test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64.exe b/test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64.exe
new file mode 100644
index 000000000000..c701c24fb82d
--- /dev/null
+++ b/test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64.exe
Binary files differ
diff --git a/test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64 b/test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64.obj
index 63460e7826ef..63460e7826ef 100644
--- a/test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64
+++ b/test/tools/llvm-objdump/Inputs/win64-unwind.exe.coff-x86_64.obj
Binary files differ
diff --git a/test/tools/llvm-objdump/coff-file.test b/test/tools/llvm-objdump/coff-file.test
new file mode 100644
index 000000000000..75d02b8a4950
--- /dev/null
+++ b/test/tools/llvm-objdump/coff-file.test
@@ -0,0 +1,6 @@
+RUN: llvm-objdump -t %p/Inputs/file.obj.coff-arm | FileCheck %s
+
+CHECK: .file
+CHECK-NEXT: AUX /Users/compnerd/work/llvm/test/tools/llvm-readobj/Inputs/file.asm
+CHECK-NEXT: [{{[ 0-9]+}}]
+
diff --git a/test/tools/llvm-objdump/coff-large-bss.test b/test/tools/llvm-objdump/coff-large-bss.test
new file mode 100644
index 000000000000..2d7643eb61a4
--- /dev/null
+++ b/test/tools/llvm-objdump/coff-large-bss.test
@@ -0,0 +1,6 @@
+RUN: llvm-objdump -s %p/Inputs/large-bss.obj.coff-i386 | FileCheck %s
+
+; CHECK: Contents of section .text:
+: CHECK-NEXT: Contents of section .data:
+: CHECK-NEXT: Contents of section .bss:
+: CHECK-NEXT: <skipping contents of bss section at [0000, 010f)>
diff --git a/test/tools/llvm-objdump/coff-many-relocs.test b/test/tools/llvm-objdump/coff-many-relocs.test
new file mode 100644
index 000000000000..d6d0d608b3b9
--- /dev/null
+++ b/test/tools/llvm-objdump/coff-many-relocs.test
@@ -0,0 +1,14 @@
+// Test that llvm-objdump can handle IMAGE_SCN_LNK_NRELOC_OVFL.
+// RUN: llvm-objdump -r %p/Inputs/many-relocs.obj-i386 | FileCheck %s
+
+CHECK: RELOCATION RECORDS FOR [.text]:
+CHECK-NEXT: IMAGE_REL_I386_DIR16 foo
+CHECK-NEXT: IMAGE_REL_I386_REL16 foo
+CHECK-NEXT: IMAGE_REL_I386_DIR32 foo
+CHECK-NEXT: IMAGE_REL_I386_DIR32NB foo
+CHECK-NEXT: IMAGE_REL_I386_SEG12 foo
+CHECK-NEXT: IMAGE_REL_I386_SECTION foo
+CHECK-NEXT: IMAGE_REL_I386_SECREL foo
+CHECK-NEXT: IMAGE_REL_I386_TOKEN foo
+CHECK-NEXT: IMAGE_REL_I386_SECREL7 foo
+CHECK-NEXT: IMAGE_REL_I386_REL32 foo
diff --git a/test/tools/llvm-objdump/coff-non-null-terminated-file.test b/test/tools/llvm-objdump/coff-non-null-terminated-file.test
new file mode 100644
index 000000000000..125994ff086b
--- /dev/null
+++ b/test/tools/llvm-objdump/coff-non-null-terminated-file.test
@@ -0,0 +1,5 @@
+RUN: yaml2obj %p/Inputs/file-aux-record.yaml | llvm-objdump -t - | FileCheck %s
+
+CHECK: .file
+CHECK: AUX eighteen-chars.obj{{$}}
+
diff --git a/test/tools/llvm-objdump/coff-private-headers.test b/test/tools/llvm-objdump/coff-private-headers.test
index d36c148cec6e..51bf4435389c 100644
--- a/test/tools/llvm-objdump/coff-private-headers.test
+++ b/test/tools/llvm-objdump/coff-private-headers.test
@@ -1,9 +1,67 @@
-// RUN: llvm-objdump -p %p/Inputs/nop.exe.coff-i386 | FileCheck %s
+// RUN: llvm-objdump -p %p/Inputs/nop.exe.coff-i386 | \
+// RUN: FileCheck -check-prefix=IMPORT %s
-CHECK: The Import Tables:
-CHECK-NEXT: lookup 00005028 time 00000000 fwd 00000000 name 00005096 addr 00005058
-CHECK: DLL Name: KERNEL32.dll
-CHECK-NEXT: Hint/Ord Name
-CHECK-NEXT: 365 ExitProcess
+IMPORT: The Import Tables:
+IMPORT: lookup 000021e4 time 00000000 fwd 00000000 name 0000234a addr 00002024
+IMPORT: DLL Name: MSVCR110.dll
+IMPORT-NEXT: Hint/Ord Name
+IMPORT-NEXT: 767 _initterm_e
+IMPORT-NEXT: 766 _initterm
+IMPORT-NEXT: 437 __initenv
+IMPORT-NEXT: 660 _fmode
+IMPORT-NEXT: 571 _commode
+IMPORT-NEXT: 315 ?terminate@@YAXXZ
+IMPORT-NEXT: 424 __crtSetUnhandledExceptionFilter
+IMPORT-NEXT: 892 _lock
+IMPORT-NEXT: 1254 _unlock
+IMPORT-NEXT: 498 __setusermatherr
+IMPORT-NEXT: 428 __dllonexit
+IMPORT-NEXT: 1058 _onexit
+IMPORT-NEXT: 774 _invoke_watson
+IMPORT-NEXT: 575 _controlfp_s
+IMPORT-NEXT: 624 _except_handler4_common
+IMPORT-NEXT: 587 _crt_debugger_hook
+IMPORT-NEXT: 426 __crtUnhandledException
+IMPORT-NEXT: 425 __crtTerminateProcess
+IMPORT-NEXT: 572 _configthreadlocale
+IMPORT-NEXT: 556 _cexit
+IMPORT-NEXT: 633 _exit
+IMPORT-NEXT: 1484 exit
+IMPORT-NEXT: 496 __set_app_type
+IMPORT-NEXT: 436 __getmainargs
+IMPORT-NEXT: 533 _amsg_exit
+IMPORT-NEXT: 555 _calloc_crt
+IMPORT-NEXT: 367 _XcptFilter
+// RUN: llvm-objdump -p %p/Inputs/export.dll.coff-i386 | \
+// RUN: FileCheck -check-prefix=EXPORT %s
+EXPORT: Export Table:
+EXPORT-NEXT: DLL name: export.test.tmp3.dll
+EXPORT-NEXT: Ordinal base: 5
+EXPORT-NEXT: Ordinal RVA Name
+EXPORT-NEXT: 5 0x2008
+EXPORT-NEXT: 6 0x2010 exportfn2
+
+// RUN: llvm-objdump -p %p/Inputs/nop.exe.coff-i386 | \
+// RUN: FileCheck -check-prefix=LOADCFG %s
+
+LOADCFG: Load configuration:
+LOADCFG-NEXT: Timestamp: 0
+LOADCFG-NEXT: Major Version: 0
+LOADCFG-NEXT: Minor Version: 0
+LOADCFG-NEXT: GlobalFlags Clear: 0
+LOADCFG-NEXT: GlobalFlags Set: 0
+LOADCFG-NEXT: Critical Section Default Timeout: 0
+LOADCFG-NEXT: Decommit Free Block Threshold: 0
+LOADCFG-NEXT: Decommit Total Free Threshold: 0
+LOADCFG-NEXT: Lock Prefix Table: 0
+LOADCFG-NEXT: Maximum Allocation Size: 0
+LOADCFG-NEXT: Virtual Memory Threshold: 0
+LOADCFG-NEXT: Process Affinity Mask: 0
+LOADCFG-NEXT: Process Heap Flags: 0
+LOADCFG-NEXT: CSD Version: 0
+LOADCFG-NEXT: Security Cookie: 4206616
+LOADCFG-NEXT: SEH Table: 4202768
+LOADCFG-NEXT: SEH Count: 1
+LOADCFG: SEH Table: 0x401689
diff --git a/test/tools/llvm-objdump/hex-relocation-addr.test b/test/tools/llvm-objdump/hex-relocation-addr.test
new file mode 100644
index 000000000000..7e7e97bc4124
--- /dev/null
+++ b/test/tools/llvm-objdump/hex-relocation-addr.test
@@ -0,0 +1,17 @@
+// This test checks that relocation addresses are printed in hex
+// RUN: llvm-objdump -r %p/Inputs/win64-unwind.exe.coff-x86_64.obj | FileCheck %s
+
+CHECK: RELOCATION RECORDS FOR [.pdata]:
+CHECK-NEXT: 0 IMAGE_REL_AMD64_ADDR32NB func
+CHECK-NEXT: 4 IMAGE_REL_AMD64_ADDR32NB func
+CHECK-NEXT: 8 IMAGE_REL_AMD64_ADDR32NB .xdata
+CHECK-NEXT: c IMAGE_REL_AMD64_ADDR32NB func
+CHECK-NEXT: 10 IMAGE_REL_AMD64_ADDR32NB func
+CHECK-NEXT: 14 IMAGE_REL_AMD64_ADDR32NB .xdata
+CHECK-NEXT: 18 IMAGE_REL_AMD64_ADDR32NB smallFunc
+CHECK-NEXT: 1c IMAGE_REL_AMD64_ADDR32NB smallFunc
+CHECK-NEXT: 20 IMAGE_REL_AMD64_ADDR32NB .xdata
+CHECK-NEXT: 24 IMAGE_REL_AMD64_ADDR32NB allocFunc
+CHECK-NEXT: 28 IMAGE_REL_AMD64_ADDR32NB allocFunc
+CHECK-NEXT: 2c IMAGE_REL_AMD64_ADDR32NB .xdata
+
diff --git a/test/tools/llvm-objdump/lit.local.cfg b/test/tools/llvm-objdump/lit.local.cfg
index 19840aa7574c..c8625f4d9d24 100644
--- a/test/tools/llvm-objdump/lit.local.cfg
+++ b/test/tools/llvm-objdump/lit.local.cfg
@@ -1,3 +1,2 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/tools/llvm-objdump/out-of-section-sym.test b/test/tools/llvm-objdump/out-of-section-sym.test
new file mode 100644
index 000000000000..f70dce6a8ed8
--- /dev/null
+++ b/test/tools/llvm-objdump/out-of-section-sym.test
@@ -0,0 +1,13 @@
+// Check that llvm-objdump does not attempt to disassemble symbols outside
+// of section boundaries.
+// RUN: llvm-objdump -d -t %p/Inputs/out-of-section-sym.elf-i386 | FileCheck %s
+
+CHECK: Disassembly of section .text:
+CHECK-NEXT: _start:
+CHECK-NEXT: 10: c3 retl
+CHECK-NEXT: SYMBOL TABLE:
+CHECK-NEXT: 00000000 *UND* 00000000
+CHECK-NEXT: 00000010 l d .text 00000000 .text
+CHECK-NEXT: 00000010 .text 00000000 _start
+CHECK-NEXT: 00000020 .text 00000000 _fdata
+CHECK-NEXT: 00000010 .text 00000000 _ftext
diff --git a/test/tools/llvm-objdump/win64-unwind-data.test b/test/tools/llvm-objdump/win64-unwind-data.test
index a723ffed3e2b..d39d08f20c20 100644
--- a/test/tools/llvm-objdump/win64-unwind-data.test
+++ b/test/tools/llvm-objdump/win64-unwind-data.test
@@ -1,52 +1,107 @@
// This test checks that the unwind data is dumped by llvm-objdump.
-// RUN: llvm-objdump -u %p/Inputs/win64-unwind.exe.coff-x86_64 | FileCheck %s
+// RUN: llvm-objdump -u %p/Inputs/win64-unwind.exe.coff-x86_64.obj \
+// RUN: | FileCheck -check-prefix=OBJ %s
+// RUN: llvm-objdump -u %p/Inputs/win64-unwind.exe.coff-x86_64.exe \
+// RUN: | FileCheck -check-prefix=EXE %s
-CHECK: Unwind info:
-CHECK: Function Table:
-CHECK-NEXT: Start Address: func
-CHECK-NEXT: End Address: func + 0x001b
-CHECK-NEXT: Unwind Info Address: .xdata
-CHECK-NEXT: Version: 1
-CHECK-NEXT: Flags: 1 UNW_ExceptionHandler
-CHECK-NEXT: Size of prolog: 18
-CHECK-NEXT: Number of Codes: 8
-CHECK-NEXT: Frame register: RBX
-CHECK-NEXT: Frame offset: 0
-CHECK-NEXT: Unwind Codes:
-CHECK-NEXT: 0x12: UOP_SetFPReg
-CHECK-NEXT: 0x0f: UOP_PushNonVol RBX
-CHECK-NEXT: 0x0e: UOP_SaveXMM128 XMM8 [0x0000]
-CHECK-NEXT: 0x09: UOP_SaveNonVol RSI [0x0010]
-CHECK-NEXT: 0x04: UOP_AllocSmall 24
-CHECK-NEXT: 0x00: UOP_PushMachFrame w/o error code
-CHECK: Function Table:
-CHECK-NEXT: Start Address: func + 0x0012
-CHECK-NEXT: End Address: func + 0x0012
-CHECK-NEXT: Unwind Info Address: .xdata + 0x001c
-CHECK-NEXT: Version: 1
-CHECK-NEXT: Flags: 4 UNW_ChainInfo
-CHECK-NEXT: Size of prolog: 0
-CHECK-NEXT: Number of Codes: 0
-CHECK-NEXT: No frame pointer used
-CHECK: Function Table:
-CHECK-NEXT: Start Address: smallFunc
-CHECK-NEXT: End Address: smallFunc + 0x0001
-CHECK-NEXT: Unwind Info Address: .xdata + 0x002c
-CHECK-NEXT: Version: 1
-CHECK-NEXT: Flags: 0
-CHECK-NEXT: Size of prolog: 0
-CHECK-NEXT: Number of Codes: 0
-CHECK-NEXT: No frame pointer used
-CHECK: Function Table:
-CHECK-NEXT: Start Address: allocFunc
-CHECK-NEXT: End Address: allocFunc + 0x001d
-CHECK-NEXT: Unwind Info Address: .xdata + 0x0034
-CHECK-NEXT: Version: 1
-CHECK-NEXT: Flags: 0
-CHECK-NEXT: Size of prolog: 14
-CHECK-NEXT: Number of Codes: 6
-CHECK-NEXT: No frame pointer used
-CHECK-NEXT: Unwind Codes:
-CHECK-NEXT: 0x0e: UOP_AllocLarge 8454128
-CHECK-NEXT: 0x07: UOP_AllocLarge 8190
-CHECK-NEXT: 0x00: UOP_PushMachFrame w/o error code
+OBJ: Unwind info:
+OBJ: Function Table:
+OBJ-NEXT: Start Address: func
+OBJ-NEXT: End Address: func + 0x001b
+OBJ-NEXT: Unwind Info Address: .xdata
+OBJ-NEXT: Version: 1
+OBJ-NEXT: Flags: 1 UNW_ExceptionHandler
+OBJ-NEXT: Size of prolog: 18
+OBJ-NEXT: Number of Codes: 8
+OBJ-NEXT: Frame register: RBX
+OBJ-NEXT: Frame offset: 0
+OBJ-NEXT: Unwind Codes:
+OBJ-NEXT: 0x12: UOP_SetFPReg
+OBJ-NEXT: 0x0f: UOP_PushNonVol RBX
+OBJ-NEXT: 0x0e: UOP_SaveXMM128 XMM8 [0x0000]
+OBJ-NEXT: 0x09: UOP_SaveNonVol RSI [0x0010]
+OBJ-NEXT: 0x04: UOP_AllocSmall 24
+OBJ-NEXT: 0x00: UOP_PushMachFrame w/o error code
+OBJ: Function Table:
+OBJ-NEXT: Start Address: func + 0x0012
+OBJ-NEXT: End Address: func + 0x0012
+OBJ-NEXT: Unwind Info Address: .xdata + 0x001c
+OBJ-NEXT: Version: 1
+OBJ-NEXT: Flags: 4 UNW_ChainInfo
+OBJ-NEXT: Size of prolog: 0
+OBJ-NEXT: Number of Codes: 0
+OBJ-NEXT: No frame pointer used
+OBJ: Function Table:
+OBJ-NEXT: Start Address: smallFunc
+OBJ-NEXT: End Address: smallFunc + 0x0001
+OBJ-NEXT: Unwind Info Address: .xdata + 0x002c
+OBJ-NEXT: Version: 1
+OBJ-NEXT: Flags: 0
+OBJ-NEXT: Size of prolog: 0
+OBJ-NEXT: Number of Codes: 0
+OBJ-NEXT: No frame pointer used
+OBJ: Function Table:
+OBJ-NEXT: Start Address: allocFunc
+OBJ-NEXT: End Address: allocFunc + 0x001d
+OBJ-NEXT: Unwind Info Address: .xdata + 0x0034
+OBJ-NEXT: Version: 1
+OBJ-NEXT: Flags: 0
+OBJ-NEXT: Size of prolog: 14
+OBJ-NEXT: Number of Codes: 6
+OBJ-NEXT: No frame pointer used
+OBJ-NEXT: Unwind Codes:
+OBJ-NEXT: 0x0e: UOP_AllocLarge 8454128
+OBJ-NEXT: 0x07: UOP_AllocLarge 8190
+OBJ-NEXT: 0x00: UOP_PushMachFrame w/o error code
+
+EXE: Function Table:
+EXE-NEXT: Start Address: 0x1000
+EXE-NEXT: End Address: 0x101b
+EXE-NEXT: Unwind Info Address: 0x2000
+EXE-NEXT: Version: 1
+EXE-NEXT: Flags: 1 UNW_ExceptionHandler
+EXE-NEXT: Size of prolog: 18
+EXE-NEXT: Number of Codes: 8
+EXE-NEXT: Frame register: RBX
+EXE-NEXT: Frame offset: 0
+EXE-NEXT: Unwind Codes:
+EXE-NEXT: 0x12: UOP_SetFPReg
+EXE-NEXT: 0x0f: UOP_PushNonVol RBX
+EXE-NEXT: 0x0e: UOP_SaveXMM128 XMM8 [0x0000]
+EXE-NEXT: 0x09: UOP_SaveNonVol RSI [0x0010]
+EXE-NEXT: 0x04: UOP_AllocSmall 24
+EXE-NEXT: 0x00: UOP_PushMachFrame w/o error code
+
+EXE: Function Table:
+EXE-NEXT: Start Address: 0x1012
+EXE-NEXT: End Address: 0x1012
+EXE-NEXT: Unwind Info Address: 0x201c
+EXE-NEXT: Version: 1
+EXE-NEXT: Flags: 4 UNW_ChainInfo
+EXE-NEXT: Size of prolog: 0
+EXE-NEXT: Number of Codes: 0
+EXE-NEXT: No frame pointer used
+
+EXE: Function Table:
+EXE-NEXT: Start Address: 0x101b
+EXE-NEXT: End Address: 0x101c
+EXE-NEXT: Unwind Info Address: 0x202c
+EXE-NEXT: Version: 1
+EXE-NEXT: Flags: 0
+EXE-NEXT: Size of prolog: 0
+EXE-NEXT: Number of Codes: 0
+EXE-NEXT: No frame pointer used
+
+EXE: Function Table:
+EXE-NEXT: Start Address: 0x101c
+EXE-NEXT: End Address: 0x1039
+EXE-NEXT: Unwind Info Address: 0x2034
+EXE-NEXT: Version: 1
+EXE-NEXT: Flags: 0
+EXE-NEXT: Size of prolog: 14
+EXE-NEXT: Number of Codes: 6
+EXE-NEXT: No frame pointer used
+EXE-NEXT: Unwind Codes:
+EXE-NEXT: 0x0e: UOP_AllocLarge 8454128
+EXE-NEXT: 0x07: UOP_AllocLarge 8190
+EXE-NEXT: 0x00: UOP_PushMachFrame w/o error code
diff --git a/test/tools/llvm-profdata/Inputs/bad-hash.profdata b/test/tools/llvm-profdata/Inputs/bad-hash.profdata
new file mode 100644
index 000000000000..faa6f4015253
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/bad-hash.profdata
@@ -0,0 +1,4 @@
+function_count_not
+badhash
+1
+1
diff --git a/test/tools/llvm-profdata/Inputs/bar3-1.profdata b/test/tools/llvm-profdata/Inputs/bar3-1.profdata
new file mode 100644
index 000000000000..5486e9d84d72
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/bar3-1.profdata
@@ -0,0 +1,6 @@
+bar
+3
+3
+1
+2
+3
diff --git a/test/tools/llvm-profdata/Inputs/c-general.profdata b/test/tools/llvm-profdata/Inputs/c-general.profdata
new file mode 100644
index 000000000000..e8cef21de5f2
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/c-general.profdata
Binary files differ
diff --git a/test/tools/llvm-profdata/Inputs/empty.profdata b/test/tools/llvm-profdata/Inputs/empty.profdata
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/empty.profdata
diff --git a/test/tools/llvm-profdata/Inputs/extra-word.profdata b/test/tools/llvm-profdata/Inputs/extra-word.profdata
new file mode 100644
index 000000000000..67a662909cf0
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/extra-word.profdata
@@ -0,0 +1,2 @@
+extra 1 word
+1
diff --git a/test/tools/llvm-profdata/Inputs/foo3-1.profdata b/test/tools/llvm-profdata/Inputs/foo3-1.profdata
new file mode 100644
index 000000000000..14a620043576
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/foo3-1.profdata
@@ -0,0 +1,6 @@
+foo
+3
+3
+1
+2
+3
diff --git a/test/tools/llvm-profdata/Inputs/foo3-2.profdata b/test/tools/llvm-profdata/Inputs/foo3-2.profdata
new file mode 100644
index 000000000000..801846e44045
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/foo3-2.profdata
@@ -0,0 +1,6 @@
+foo
+3
+3
+7
+5
+3
diff --git a/test/tools/llvm-profdata/Inputs/foo3bar3-1.profdata b/test/tools/llvm-profdata/Inputs/foo3bar3-1.profdata
new file mode 100644
index 000000000000..12157b9f9ada
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/foo3bar3-1.profdata
@@ -0,0 +1,13 @@
+foo
+3
+3
+2
+3
+5
+
+bar
+3
+3
+7
+11
+13
diff --git a/test/tools/llvm-profdata/Inputs/foo3bar3-2.profdata b/test/tools/llvm-profdata/Inputs/foo3bar3-2.profdata
new file mode 100644
index 000000000000..f1f10bd6f3df
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/foo3bar3-2.profdata
@@ -0,0 +1,13 @@
+foo
+3
+3
+17
+19
+23
+
+bar
+3
+3
+29
+31
+37
diff --git a/test/tools/llvm-profdata/Inputs/foo4-1.profdata b/test/tools/llvm-profdata/Inputs/foo4-1.profdata
new file mode 100644
index 000000000000..31d2a2ce7569
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/foo4-1.profdata
@@ -0,0 +1,7 @@
+foo
+4
+4
+11
+22
+33
+44
diff --git a/test/tools/llvm-profdata/Inputs/foo4-2.profdata b/test/tools/llvm-profdata/Inputs/foo4-2.profdata
new file mode 100644
index 000000000000..01d8309b5ce2
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/foo4-2.profdata
@@ -0,0 +1,7 @@
+foo
+4
+4
+7
+6
+5
+4
diff --git a/test/tools/llvm-profdata/Inputs/invalid-count-later.profdata b/test/tools/llvm-profdata/Inputs/invalid-count-later.profdata
new file mode 100644
index 000000000000..2b61c55a6766
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/invalid-count-later.profdata
@@ -0,0 +1,4 @@
+invalid_count
+1
+1
+1later
diff --git a/test/tools/llvm-profdata/Inputs/no-counts.profdata b/test/tools/llvm-profdata/Inputs/no-counts.profdata
new file mode 100644
index 000000000000..5c1fa15c086f
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/no-counts.profdata
@@ -0,0 +1,3 @@
+no_counts
+0
+0
diff --git a/test/tools/llvm-profdata/Inputs/overflow.profdata b/test/tools/llvm-profdata/Inputs/overflow.profdata
new file mode 100644
index 000000000000..c9a9d697ecfb
--- /dev/null
+++ b/test/tools/llvm-profdata/Inputs/overflow.profdata
@@ -0,0 +1,4 @@
+overflow
+1
+1
+9223372036854775808
diff --git a/test/tools/llvm-profdata/c-general.test b/test/tools/llvm-profdata/c-general.test
new file mode 100644
index 000000000000..9b6cd7f4828c
--- /dev/null
+++ b/test/tools/llvm-profdata/c-general.test
@@ -0,0 +1,24 @@
+REGENERATE: You need a checkout of clang with compiler-rt to generate the
+REGENERATE: binary file here. These shell commands can be used to regenerate
+REGENERATE: it.
+REGENERATE:
+REGENERATE: $ SRC=path/to/llvm
+REGENERATE: $ CFE=$SRC/tools/clang
+REGENERATE: $ TESTDIR=$SRC/test/tools/llvm-profdata
+REGENERATE: $ CFE_TESTDIR=$CFE/test/Profile
+REGENERATE: $ clang -o a.out -fprofile-instr-generate $CFE_TESTDIR/test/Profile/c-general.c
+REGENERATE: $ LLVM_PROFILE_FILE=$TESTDIR/Inputs/c-general.profdata ./a.out
+
+RUN: llvm-profdata show %p/Inputs/c-general.profdata -o - | FileCheck %s -check-prefix=CHECK
+RUN: llvm-profdata show %p/Inputs/c-general.profdata -o - --function=switches | FileCheck %s -check-prefix=SWITCHES -check-prefix=CHECK
+
+SWITCHES-LABEL: Counters:
+SWITCHES-NEXT: switches:
+SWITCHES-NEXT: Hash: 0x0000000000000013
+SWITCHES-NEXT: Counters: 19
+SWITCHES-NEXT: Function count: 1
+SWITCHES-LABEL: Functions shown: 1
+
+CHECK-LABEL: Total functions: 11
+CHECK-NEXT: Maximum function count: 1
+CHECK-NEXT: Maximum internal block count: 100
diff --git a/test/tools/llvm-profdata/errors.test b/test/tools/llvm-profdata/errors.test
new file mode 100644
index 000000000000..28262efe0638
--- /dev/null
+++ b/test/tools/llvm-profdata/errors.test
@@ -0,0 +1,16 @@
+RUN: llvm-profdata merge %p/Inputs/foo3-1.profdata %p/Inputs/foo4-1.profdata -o %t.out 2>&1 | FileCheck %s --check-prefix=HASH
+HASH: foo4-1.profdata: foo: Function hash mismatch
+
+RUN: llvm-profdata merge %p/Inputs/overflow.profdata %p/Inputs/overflow.profdata -o %t.out 2>&1 | FileCheck %s --check-prefix=OVERFLOW
+OVERFLOW: overflow.profdata: overflow: Counter overflow
+
+RUN: not llvm-profdata show %p/Inputs/invalid-count-later.profdata 2>&1 | FileCheck %s --check-prefix=INVALID-COUNT-LATER
+RUN: not llvm-profdata merge %p/Inputs/invalid-count-later.profdata %p/Inputs/invalid-count-later.profdata -o %t.out 2>&1 | FileCheck %s --check-prefix=INVALID-COUNT-LATER
+INVALID-COUNT-LATER: error: {{.*}}invalid-count-later.profdata: Malformed profile data
+
+RUN: not llvm-profdata show %p/Inputs/bad-hash.profdata 2>&1 | FileCheck %s --check-prefix=BAD-HASH
+RUN: not llvm-profdata merge %p/Inputs/bad-hash.profdata %p/Inputs/bad-hash.profdata -o %t.out 2>&1 | FileCheck %s --check-prefix=BAD-HASH
+BAD-HASH: error: {{.*}}bad-hash.profdata: Malformed profile data
+
+RUN: not llvm-profdata show %p/Inputs/no-counts.profdata 2>&1 | FileCheck %s --check-prefix=NO-COUNTS
+NO-COUNTS: error: {{.*}}no-counts.profdata: Malformed profile data
diff --git a/test/tools/llvm-profdata/raw-32-bits-be.test b/test/tools/llvm-profdata/raw-32-bits-be.test
new file mode 100644
index 000000000000..86ac56d39f26
--- /dev/null
+++ b/test/tools/llvm-profdata/raw-32-bits-be.test
@@ -0,0 +1,42 @@
+RUN: printf '\377lprofR\201' > %t
+RUN: printf '\0\0\0\0\0\0\0\1' >> %t
+RUN: printf '\0\0\0\0\0\0\0\2' >> %t
+RUN: printf '\0\0\0\0\0\0\0\3' >> %t
+RUN: printf '\0\0\0\0\0\0\0\6' >> %t
+RUN: printf '\0\0\0\0\1\0\0\0' >> %t
+RUN: printf '\0\0\0\0\2\0\0\0' >> %t
+
+RUN: printf '\0\0\0\3' >> %t
+RUN: printf '\0\0\0\1' >> %t
+RUN: printf '\0\0\0\0\0\0\0\1' >> %t
+RUN: printf '\2\0\0\0' >> %t
+RUN: printf '\1\0\0\0' >> %t
+
+RUN: printf '\0\0\0\3' >> %t
+RUN: printf '\0\0\0\2' >> %t
+RUN: printf '\0\0\0\0\0\0\0\2' >> %t
+RUN: printf '\2\0\0\03' >> %t
+RUN: printf '\1\0\0\10' >> %t
+
+RUN: printf '\0\0\0\0\0\0\0\023' >> %t
+RUN: printf '\0\0\0\0\0\0\0\067' >> %t
+RUN: printf '\0\0\0\0\0\0\0\101' >> %t
+RUN: printf 'foobar' >> %t
+
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s
+
+CHECK: Counters:
+CHECK: foo:
+CHECK: Hash: 0x0000000000000001
+CHECK: Counters: 1
+CHECK: Function count: 19
+CHECK: Block counts: []
+CHECK: bar:
+CHECK: Hash: 0x0000000000000002
+CHECK: Counters: 2
+CHECK: Function count: 55
+CHECK: Block counts: [65]
+CHECK: Functions shown: 2
+CHECK: Total functions: 2
+CHECK: Maximum function count: 55
+CHECK: Maximum internal block count: 65
diff --git a/test/tools/llvm-profdata/raw-32-bits-le.test b/test/tools/llvm-profdata/raw-32-bits-le.test
new file mode 100644
index 000000000000..9325e7eb0f52
--- /dev/null
+++ b/test/tools/llvm-profdata/raw-32-bits-le.test
@@ -0,0 +1,42 @@
+RUN: printf '\201Rforpl\377' > %t
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t
+RUN: printf '\2\0\0\0\0\0\0\0' >> %t
+RUN: printf '\3\0\0\0\0\0\0\0' >> %t
+RUN: printf '\6\0\0\0\0\0\0\0' >> %t
+RUN: printf '\0\0\0\1\0\0\0\0' >> %t
+RUN: printf '\0\0\0\2\0\0\0\0' >> %t
+
+RUN: printf '\3\0\0\0' >> %t
+RUN: printf '\1\0\0\0' >> %t
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t
+RUN: printf '\0\0\0\2' >> %t
+RUN: printf '\0\0\0\1' >> %t
+
+RUN: printf '\3\0\0\0' >> %t
+RUN: printf '\2\0\0\0' >> %t
+RUN: printf '\02\0\0\0\0\0\0\0' >> %t
+RUN: printf '\03\0\0\2' >> %t
+RUN: printf '\10\0\0\1' >> %t
+
+RUN: printf '\023\0\0\0\0\0\0\0' >> %t
+RUN: printf '\067\0\0\0\0\0\0\0' >> %t
+RUN: printf '\101\0\0\0\0\0\0\0' >> %t
+RUN: printf 'foobar' >> %t
+
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s
+
+CHECK: Counters:
+CHECK: foo:
+CHECK: Hash: 0x0000000000000001
+CHECK: Counters: 1
+CHECK: Function count: 19
+CHECK: Block counts: []
+CHECK: bar:
+CHECK: Hash: 0x0000000000000002
+CHECK: Counters: 2
+CHECK: Function count: 55
+CHECK: Block counts: [65]
+CHECK: Functions shown: 2
+CHECK: Total functions: 2
+CHECK: Maximum function count: 55
+CHECK: Maximum internal block count: 65
diff --git a/test/tools/llvm-profdata/raw-64-bits-be.test b/test/tools/llvm-profdata/raw-64-bits-be.test
new file mode 100644
index 000000000000..b97d8b5dac6d
--- /dev/null
+++ b/test/tools/llvm-profdata/raw-64-bits-be.test
@@ -0,0 +1,42 @@
+RUN: printf '\377lprofr\201' > %t
+RUN: printf '\0\0\0\0\0\0\0\1' >> %t
+RUN: printf '\0\0\0\0\0\0\0\2' >> %t
+RUN: printf '\0\0\0\0\0\0\0\3' >> %t
+RUN: printf '\0\0\0\0\0\0\0\6' >> %t
+RUN: printf '\0\0\0\1\0\4\0\0' >> %t
+RUN: printf '\0\0\0\2\0\4\0\0' >> %t
+
+RUN: printf '\0\0\0\3' >> %t
+RUN: printf '\0\0\0\1' >> %t
+RUN: printf '\0\0\0\0\0\0\0\1' >> %t
+RUN: printf '\0\0\0\2\0\4\0\0' >> %t
+RUN: printf '\0\0\0\1\0\4\0\0' >> %t
+
+RUN: printf '\0\0\0\3' >> %t
+RUN: printf '\0\0\0\2' >> %t
+RUN: printf '\0\0\0\0\0\0\0\02' >> %t
+RUN: printf '\0\0\0\2\0\4\0\03' >> %t
+RUN: printf '\0\0\0\1\0\4\0\10' >> %t
+
+RUN: printf '\0\0\0\0\0\0\0\023' >> %t
+RUN: printf '\0\0\0\0\0\0\0\067' >> %t
+RUN: printf '\0\0\0\0\0\0\0\101' >> %t
+RUN: printf 'foobar' >> %t
+
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s
+
+CHECK: Counters:
+CHECK: foo:
+CHECK: Hash: 0x0000000000000001
+CHECK: Counters: 1
+CHECK: Function count: 19
+CHECK: Block counts: []
+CHECK: bar:
+CHECK: Hash: 0x0000000000000002
+CHECK: Counters: 2
+CHECK: Function count: 55
+CHECK: Block counts: [65]
+CHECK: Functions shown: 2
+CHECK: Total functions: 2
+CHECK: Maximum function count: 55
+CHECK: Maximum internal block count: 65
diff --git a/test/tools/llvm-profdata/raw-64-bits-le.test b/test/tools/llvm-profdata/raw-64-bits-le.test
new file mode 100644
index 000000000000..0e6853811ec4
--- /dev/null
+++ b/test/tools/llvm-profdata/raw-64-bits-le.test
@@ -0,0 +1,42 @@
+RUN: printf '\201rforpl\377' > %t
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t
+RUN: printf '\2\0\0\0\0\0\0\0' >> %t
+RUN: printf '\3\0\0\0\0\0\0\0' >> %t
+RUN: printf '\6\0\0\0\0\0\0\0' >> %t
+RUN: printf '\0\0\4\0\1\0\0\0' >> %t
+RUN: printf '\0\0\4\0\2\0\0\0' >> %t
+
+RUN: printf '\3\0\0\0' >> %t
+RUN: printf '\1\0\0\0' >> %t
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t
+RUN: printf '\0\0\4\0\2\0\0\0' >> %t
+RUN: printf '\0\0\4\0\1\0\0\0' >> %t
+
+RUN: printf '\03\0\0\0' >> %t
+RUN: printf '\02\0\0\0' >> %t
+RUN: printf '\02\0\0\0\0\0\0\0' >> %t
+RUN: printf '\03\0\4\0\2\0\0\0' >> %t
+RUN: printf '\10\0\4\0\1\0\0\0' >> %t
+
+RUN: printf '\023\0\0\0\0\0\0\0' >> %t
+RUN: printf '\067\0\0\0\0\0\0\0' >> %t
+RUN: printf '\101\0\0\0\0\0\0\0' >> %t
+RUN: printf 'foobar' >> %t
+
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s
+
+CHECK: Counters:
+CHECK: foo:
+CHECK: Hash: 0x0000000000000001
+CHECK: Counters: 1
+CHECK: Function count: 19
+CHECK: Block counts: []
+CHECK: bar:
+CHECK: Hash: 0x0000000000000002
+CHECK: Counters: 2
+CHECK: Function count: 55
+CHECK: Block counts: [65]
+CHECK: Functions shown: 2
+CHECK: Total functions: 2
+CHECK: Maximum function count: 55
+CHECK: Maximum internal block count: 65
diff --git a/test/tools/llvm-profdata/raw-magic-but-no-header.test b/test/tools/llvm-profdata/raw-magic-but-no-header.test
new file mode 100644
index 000000000000..6db723c3e253
--- /dev/null
+++ b/test/tools/llvm-profdata/raw-magic-but-no-header.test
@@ -0,0 +1,6 @@
+RUN: printf '\201rforpl\377' > %t
+RUN: not llvm-profdata show %t 2>&1 | FileCheck %s
+RUN: printf '\377lprofr\201' > %t
+RUN: not llvm-profdata show %t 2>&1 | FileCheck %s
+
+CHECK: error: {{.+}}: Invalid header
diff --git a/test/tools/llvm-profdata/raw-two-profiles.test b/test/tools/llvm-profdata/raw-two-profiles.test
new file mode 100644
index 000000000000..3260836ba666
--- /dev/null
+++ b/test/tools/llvm-profdata/raw-two-profiles.test
@@ -0,0 +1,64 @@
+RUN: printf '\201rforpl\377' > %t-foo.profraw
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t-foo.profraw
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t-foo.profraw
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t-foo.profraw
+RUN: printf '\3\0\0\0\0\0\0\0' >> %t-foo.profraw
+RUN: printf '\0\0\4\0\1\0\0\0' >> %t-foo.profraw
+RUN: printf '\0\0\4\0\2\0\0\0' >> %t-foo.profraw
+
+RUN: printf '\3\0\0\0' >> %t-foo.profraw
+RUN: printf '\1\0\0\0' >> %t-foo.profraw
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t-foo.profraw
+RUN: printf '\0\0\4\0\2\0\0\0' >> %t-foo.profraw
+RUN: printf '\0\0\4\0\1\0\0\0' >> %t-foo.profraw
+
+RUN: printf '\023\0\0\0\0\0\0\0' >> %t-foo.profraw
+RUN: printf 'foo' >> %t-foo.profraw
+
+RUN: printf '\201rforpl\377' > %t-bar.profraw
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t-bar.profraw
+RUN: printf '\1\0\0\0\0\0\0\0' >> %t-bar.profraw
+RUN: printf '\2\0\0\0\0\0\0\0' >> %t-bar.profraw
+RUN: printf '\3\0\0\0\0\0\0\0' >> %t-bar.profraw
+RUN: printf '\0\0\6\0\1\0\0\0' >> %t-bar.profraw
+RUN: printf '\0\0\6\0\2\0\0\0' >> %t-bar.profraw
+
+RUN: printf '\3\0\0\0' >> %t-bar.profraw
+RUN: printf '\2\0\0\0' >> %t-bar.profraw
+RUN: printf '\2\0\0\0\0\0\0\0' >> %t-bar.profraw
+RUN: printf '\0\0\6\0\2\0\0\0' >> %t-bar.profraw
+RUN: printf '\0\0\6\0\1\0\0\0' >> %t-bar.profraw
+
+RUN: printf '\067\0\0\0\0\0\0\0' >> %t-bar.profraw
+RUN: printf '\101\0\0\0\0\0\0\0' >> %t-bar.profraw
+RUN: printf 'bar' >> %t-bar.profraw
+
+Versions of the profiles that are padded to eight byte alignment.
+RUN: cat %t-foo.profraw > %t-foo-padded.profraw
+RUN: printf '\0\0\0\0\0' >> %t-foo-padded.profraw
+RUN: cat %t-bar.profraw > %t-bar-padded.profraw
+RUN: printf '\0\0\0\0\0' >> %t-bar-padded.profraw
+
+RUN: cat %t-foo.profraw %t-bar.profraw > %t-nopad.profraw
+RUN: cat %t-foo-padded.profraw %t-bar.profraw > %t-pad-between.profraw
+RUN: cat %t-foo-padded.profraw %t-bar-padded.profraw > %t-pad.profraw
+
+RUN: llvm-profdata show %t-nopad.profraw -all-functions -counts | FileCheck %s
+RUN: llvm-profdata show %t-pad-between.profraw -all-functions -counts | FileCheck %s
+RUN: llvm-profdata show %t-pad.profraw -all-functions -counts | FileCheck %s
+
+CHECK: Counters:
+CHECK: foo:
+CHECK: Hash: 0x0000000000000001
+CHECK: Counters: 1
+CHECK: Function count: 19
+CHECK: Block counts: []
+CHECK: bar:
+CHECK: Hash: 0x0000000000000002
+CHECK: Counters: 2
+CHECK: Function count: 55
+CHECK: Block counts: [65]
+CHECK: Functions shown: 2
+CHECK: Total functions: 2
+CHECK: Maximum function count: 55
+CHECK: Maximum internal block count: 65
diff --git a/test/tools/llvm-profdata/simple.test b/test/tools/llvm-profdata/simple.test
new file mode 100644
index 000000000000..18741dd2ba86
--- /dev/null
+++ b/test/tools/llvm-profdata/simple.test
@@ -0,0 +1,77 @@
+RUN: llvm-profdata merge %p/Inputs/foo3-1.profdata %p/Inputs/foo3-2.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=FOO3
+RUN: llvm-profdata merge %p/Inputs/foo3-2.profdata %p/Inputs/foo3-1.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=FOO3
+FOO3: foo:
+FOO3: Counters: 3
+FOO3: Function count: 8
+FOO3: Block counts: [7, 6]
+FOO3: Total functions: 1
+FOO3: Maximum function count: 8
+FOO3: Maximum internal block count: 7
+
+RUN: llvm-profdata merge %p/Inputs/foo4-1.profdata %p/Inputs/foo4-2.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=FOO4
+RUN: llvm-profdata merge %p/Inputs/foo4-2.profdata %p/Inputs/foo4-1.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=FOO4
+FOO4: foo:
+FOO4: Counters: 4
+FOO4: Function count: 18
+FOO4: Block counts: [28, 38, 48]
+FOO4: Total functions: 1
+FOO4: Maximum function count: 18
+FOO4: Maximum internal block count: 48
+
+RUN: llvm-profdata merge %p/Inputs/foo3bar3-1.profdata %p/Inputs/foo3bar3-2.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=FOO3BAR3
+RUN: llvm-profdata merge %p/Inputs/foo3bar3-2.profdata %p/Inputs/foo3bar3-1.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=FOO3BAR3
+FOO3BAR3: foo:
+FOO3BAR3: Counters: 3
+FOO3BAR3: Function count: 19
+FOO3BAR3: Block counts: [22, 28]
+FOO3BAR3: bar:
+FOO3BAR3: Counters: 3
+FOO3BAR3: Function count: 36
+FOO3BAR3: Block counts: [42, 50]
+FOO3BAR3: Total functions: 2
+FOO3BAR3: Maximum function count: 36
+FOO3BAR3: Maximum internal block count: 50
+
+RUN: llvm-profdata merge %p/Inputs/empty.profdata %p/Inputs/foo3-1.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=FOO3EMPTY
+FOO3EMPTY: foo:
+FOO3EMPTY: Counters: 3
+FOO3EMPTY: Function count: 1
+FOO3EMPTY: Block counts: [2, 3]
+FOO3EMPTY: Total functions: 1
+FOO3EMPTY: Maximum function count: 1
+FOO3EMPTY: Maximum internal block count: 3
+
+RUN: llvm-profdata merge %p/Inputs/foo3-1.profdata %p/Inputs/foo3bar3-1.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=FOO3FOO3BAR3
+FOO3FOO3BAR3: foo:
+FOO3FOO3BAR3: Counters: 3
+FOO3FOO3BAR3: Function count: 3
+FOO3FOO3BAR3: Block counts: [5, 8]
+FOO3FOO3BAR3: bar:
+FOO3FOO3BAR3: Counters: 3
+FOO3FOO3BAR3: Function count: 7
+FOO3FOO3BAR3: Block counts: [11, 13]
+FOO3FOO3BAR3: Total functions: 2
+FOO3FOO3BAR3: Maximum function count: 7
+FOO3FOO3BAR3: Maximum internal block count: 13
+
+RUN: llvm-profdata merge %p/Inputs/foo3-1.profdata %p/Inputs/bar3-1.profdata -o %t
+RUN: llvm-profdata show %t -all-functions -counts | FileCheck %s --check-prefix=DISJOINT
+DISJOINT: foo:
+DISJOINT: Counters: 3
+DISJOINT: Function count: 1
+DISJOINT: Block counts: [2, 3]
+DISJOINT: bar:
+DISJOINT: Counters: 3
+DISJOINT: Function count: 1
+DISJOINT: Block counts: [2, 3]
+DISJOINT: Total functions: 2
+DISJOINT: Maximum function count: 1
+DISJOINT: Maximum internal block count: 3
diff --git a/test/tools/llvm-readobj/ARM/attributes.s b/test/tools/llvm-readobj/ARM/attributes.s
new file mode 100644
index 000000000000..594bab85a09e
--- /dev/null
+++ b/test/tools/llvm-readobj/ARM/attributes.s
@@ -0,0 +1,287 @@
+@ RUN: llvm-mc -triple armv7-eabi -filetype obj -o - %s \
+@ RUN: | llvm-readobj -arm-attributes - | FileCheck %s
+
+ .syntax unified
+
+ .cpu cortex-a8
+ .fpu neon
+
+ .eabi_attribute Tag_CPU_raw_name, "Cortex-A9"
+ .eabi_attribute Tag_CPU_name, "cortex-a9"
+ .eabi_attribute Tag_CPU_arch, 10
+ .eabi_attribute Tag_CPU_arch_profile, 'A'
+ .eabi_attribute Tag_ARM_ISA_use, 0
+ .eabi_attribute Tag_THUMB_ISA_use, 2
+ .eabi_attribute Tag_FP_arch, 3
+ .eabi_attribute Tag_WMMX_arch, 0
+ .eabi_attribute Tag_Advanced_SIMD_arch, 1
+ .eabi_attribute Tag_PCS_config, 2
+ .eabi_attribute Tag_ABI_PCS_R9_use, 0
+ .eabi_attribute Tag_ABI_PCS_RW_data, 0
+ .eabi_attribute Tag_ABI_PCS_RO_data, 0
+ .eabi_attribute Tag_ABI_PCS_GOT_use, 0
+ .eabi_attribute Tag_ABI_PCS_wchar_t, 4
+ .eabi_attribute Tag_ABI_FP_rounding, 1
+ .eabi_attribute Tag_ABI_FP_denormal, 2
+ .eabi_attribute Tag_ABI_FP_exceptions, 1
+ .eabi_attribute Tag_ABI_FP_user_exceptions, 1
+ .eabi_attribute Tag_ABI_FP_number_model, 3
+ .eabi_attribute Tag_ABI_align_needed, 1
+ .eabi_attribute Tag_ABI_align_preserved, 2
+ .eabi_attribute Tag_ABI_enum_size, 3
+ .eabi_attribute Tag_ABI_HardFP_use, 0
+ .eabi_attribute Tag_ABI_VFP_args, 1
+ .eabi_attribute Tag_ABI_WMMX_args, 0
+ .eabi_attribute Tag_ABI_optimization_goals, 2
+ .eabi_attribute Tag_ABI_FP_optimization_goals, 2
+ .eabi_attribute Tag_compatibility, 1
+ .eabi_attribute Tag_compatibility, 1, "aeabi"
+ .eabi_attribute Tag_CPU_unaligned_access, 0
+ .eabi_attribute Tag_FP_HP_extension, 0
+ .eabi_attribute Tag_ABI_FP_16bit_format, 0
+ .eabi_attribute Tag_MPextension_use, 0
+ .eabi_attribute Tag_DIV_use, 0
+ .eabi_attribute Tag_nodefaults, 0
+ .eabi_attribute Tag_also_compatible_with, "gnu"
+ .eabi_attribute Tag_T2EE_use, 0
+ .eabi_attribute Tag_conformance, "2.09"
+ .eabi_attribute Tag_Virtualization_use, 0
+
+@ CHECK: BuildAttributes {
+@ CHECK: Section 1 {
+@ CHECK: Tag: Tag_File (0x1)
+@ CHECK: FileAttributes {
+@ CHECK: Attribute {
+@ CHECK: Tag: 4
+@ CHECK: TagName: CPU_raw_name
+@ CHECK: Value: CORTEX-A9
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 5
+@ CHECK: TagName: CPU_name
+@ CHECK: Value: CORTEX-A9
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 6
+@ CHECK: Value: 10
+@ CHECK: TagName: CPU_arch
+@ CHECK: Description: ARM v7
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 7
+@ CHECK: Value: 65
+@ CHECK: TagName: CPU_arch_profile
+@ CHECK: Description: Application
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 8
+@ CHECK: Value: 0
+@ CHECK: TagName: ARM_ISA_use
+@ CHECK: Description: Not Permitted
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 9
+@ CHECK: Value: 2
+@ CHECK: TagName: THUMB_ISA_use
+@ CHECK: Description: Thumb-2
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 10
+@ CHECK: Value: 3
+@ CHECK: TagName: FP_arch
+@ CHECK: Description: VFPv3
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 11
+@ CHECK: Value: 0
+@ CHECK: TagName: WMMX_arch
+@ CHECK: Description: Not Permitted
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 12
+@ CHECK: Value: 1
+@ CHECK: TagName: Advanced_SIMD_arch
+@ CHECK: Description: NEONv1
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 13
+@ CHECK: Value: 2
+@ CHECK: TagName: PCS_config
+@ CHECK: Description: Linux Application
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 14
+@ CHECK: Value: 0
+@ CHECK: TagName: ABI_PCS_R9_use
+@ CHECK: Description: v6
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 15
+@ CHECK: Value: 0
+@ CHECK: TagName: ABI_PCS_RW_data
+@ CHECK: Description: Absolute
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 16
+@ CHECK: Value: 0
+@ CHECK: TagName: ABI_PCS_RO_data
+@ CHECK: Description: Absolute
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 17
+@ CHECK: Value: 0
+@ CHECK: TagName: ABI_PCS_GOT_use
+@ CHECK: Description: Not Permitted
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 18
+@ CHECK: Value: 4
+@ CHECK: TagName: ABI_PCS_wchar_t
+@ CHECK: Description: 4-byte
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 19
+@ CHECK: Value: 1
+@ CHECK: TagName: ABI_FP_rounding
+@ CHECK: Description: Runtime
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 20
+@ CHECK: Value: 2
+@ CHECK: TagName: ABI_FP_denormal
+@ CHECK: Description: Sign Only
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 21
+@ CHECK: Value: 1
+@ CHECK: TagName: ABI_FP_exceptions
+@ CHECK: Description: IEEE-754
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 22
+@ CHECK: Value: 1
+@ CHECK: TagName: ABI_FP_user_exceptions
+@ CHECK: Description: IEEE-754
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 23
+@ CHECK: Value: 3
+@ CHECK: TagName: ABI_FP_number_model
+@ CHECK: Description: IEEE-754
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 24
+@ CHECK: Value: 1
+@ CHECK: TagName: ABI_align_needed
+@ CHECK: Description: 8-byte alignment
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 25
+@ CHECK: Value: 2
+@ CHECK: TagName: ABI_align_preserved
+@ CHECK: Description: 8-byte data and code alignment
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 26
+@ CHECK: Value: 3
+@ CHECK: TagName: ABI_enum_size
+@ CHECK: Description: External Int32
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 27
+@ CHECK: Value: 0
+@ CHECK: TagName: ABI_HardFP_use
+@ CHECK: Description: Tag_FP_arch
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 28
+@ CHECK: Value: 1
+@ CHECK: TagName: ABI_VFP_args
+@ CHECK: Description: AAPCS VFP
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 29
+@ CHECK: Value: 0
+@ CHECK: TagName: ABI_WMMX_args
+@ CHECK: Description: AAPCS
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 30
+@ CHECK: Value: 2
+@ CHECK: TagName: ABI_optimization_goals
+@ CHECK: Description: Aggressive Speed
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 31
+@ CHECK: Value: 2
+@ CHECK: TagName: ABI_FP_optimization_goals
+@ CHECK: Description: Aggressive Speed
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 32
+@ CHECK: Value: 1, AEABI
+@ CHECK: TagName: compatibility
+@ CHECK: Description: AEABI Conformant
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 34
+@ CHECK: Value: 0
+@ CHECK: TagName: CPU_unaligned_access
+@ CHECK: Description: Not Permitted
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 36
+@ CHECK: Value: 0
+@ CHECK: TagName: FP_HP_extension
+@ CHECK: Description: If Available
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 38
+@ CHECK: Value: 0
+@ CHECK: TagName: ABI_FP_16bit_format
+@ CHECK: Description: Not Permitted
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 42
+@ CHECK: Value: 0
+@ CHECK: TagName: MPextension_use
+@ CHECK: Description: Not Permitted
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 44
+@ CHECK: Value: 0
+@ CHECK: TagName: DIV_use
+@ CHECK: Description: If Available
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 64
+@ CHECK: Value: 0
+@ CHECK: TagName: nodefaults
+@ CHECK: Description: Unspecified Tags UNDEFINED
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 65
+@ CHECK: TagName: also_compatible_with
+@ CHECK: Value: GNU
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 66
+@ CHECK: Value: 0
+@ CHECK: TagName: T2EE_use
+@ CHECK: Description: Not Permitted
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 67
+@ CHECK: TagName: conformance
+@ CHECK: Value: 2.09
+@ CHECK: }
+@ CHECK: Attribute {
+@ CHECK: Tag: 68
+@ CHECK: Value: 0
+@ CHECK: TagName: Virtualization_use
+@ CHECK: Description: Not Permitted
+@ CHECK: }
+@ CHECK: }
+@ CHECK: }
+@ CHECK: }
+
diff --git a/test/tools/llvm-readobj/ARM/lit.local.cfg b/test/tools/llvm-readobj/ARM/lit.local.cfg
new file mode 100644
index 000000000000..98c6700c209d
--- /dev/null
+++ b/test/tools/llvm-readobj/ARM/lit.local.cfg
@@ -0,0 +1,3 @@
+if not 'ARM' in config.root.targets:
+ config.unsupported = True
+
diff --git a/test/tools/llvm-readobj/ARM/unwind.s b/test/tools/llvm-readobj/ARM/unwind.s
new file mode 100644
index 000000000000..afabeb7cd2e7
--- /dev/null
+++ b/test/tools/llvm-readobj/ARM/unwind.s
@@ -0,0 +1,326 @@
+@ RUN: llvm-mc -triple armv7-linux-eabi -filetype obj -o - %s \
+@ RUN: | llvm-readobj -u | FileCheck %s
+
+ .syntax unified
+
+ .cpu cortex-a8
+ .fpu neon
+
+ .section .personality
+
+ .type __personality,%function
+__personality:
+ .fnstart
+ bkpt
+ .fnend
+
+
+ .section .personality0
+
+ .type personality0,%function
+personality0:
+ .fnstart
+ bx lr
+ .fnend
+
+
+ .section .personality1
+
+ .type personality1,%function
+personality1:
+ .fnstart
+ .pad #0x100
+ sub sp, sp, #0x100
+ .save {r0-r11}
+ push {r0-r11}
+ pop {r0-r11}
+ add sp, sp, #0x100
+ bx lr
+ .fnend
+
+
+ .section .custom_personality
+
+ .type custom_personality,%function
+custom_personality:
+ .fnstart
+ .personality __personality
+ bx lr
+ .fnend
+
+
+ .section .opcodes
+
+ .type opcodes,%function
+opcodes:
+ .fnstart
+ .vsave {d8-d12}
+ vpush {d8-d12}
+ vpop {d8-d12}
+ bx lr
+ .fnend
+
+
+ .section .multiple
+
+ .type function0,%function
+function0:
+ .fnstart
+ bx lr
+ .fnend
+
+ .type function1,%function
+function1:
+ .fnstart
+ .personality __personality
+ bx lr
+ .fnend
+
+ .type function2,%function
+function2:
+ .fnstart
+ bx lr
+ .fnend
+
+ .section .raw
+
+ .type raw,%function
+ .thumb_func
+raw:
+ .fnstart
+ .unwind_raw 12, 0x02
+ .unwind_raw -12, 0x42
+ .unwind_raw 0, 0x80, 0x00
+ .unwind_raw 4, 0x81, 0x00
+ .unwind_raw 4, 0x80, 0x01
+ .unwind_raw 8, 0x80, 0xc0
+ .unwind_raw 12, 0x84, 0xc0
+ .unwind_raw 0, 0x91
+ .unwind_raw 8, 0xa1
+ .unwind_raw 12, 0xa9
+ .unwind_raw 0, 0xb0
+ .unwind_raw 4, 0xb1, 0x01
+ .unwind_raw 0xa04, 0xb2, 0x80, 0x04
+ .unwind_raw 24, 0xb3, 0x12
+ .unwind_raw 24, 0xba
+ .unwind_raw 24, 0xc2
+ .unwind_raw 24, 0xc6, 0x02
+ .unwind_raw 8, 0xc7, 0x03
+ .unwind_raw 24, 0xc8, 0x02
+ .unwind_raw 24, 0xc9, 0x02
+ .unwind_raw 64, 0xd7
+ .fnend
+
+ .section .spare
+
+ .type spare,%function
+spare:
+ .fnstart
+ .unwind_raw 4, 0x00
+ .unwind_raw -4, 0x40
+ .unwind_raw 0, 0x80, 0x00
+ .unwind_raw 4, 0x88, 0x00
+ .unwind_raw 0, 0x91
+ .unwind_raw 0, 0x9d
+ .unwind_raw 0, 0x9f
+ .unwind_raw 0, 0xa0
+ .unwind_raw 0, 0xa8
+ .unwind_raw 0, 0xb0
+ .unwind_raw 0, 0xb1, 0x00
+ .unwind_raw 4, 0xb1, 0x01
+ .unwind_raw 0, 0xb1, 0x10
+ .unwind_raw 0x204, 0xb2, 0x00
+ .unwind_raw 16, 0xb3, 0x00
+ .unwind_raw 0, 0xb4
+ .unwind_raw 16, 0xb8
+ .unwind_raw 4, 0xc0
+ .unwind_raw 4, 0xc6, 0x00
+ .unwind_raw 4, 0xc7, 0x00
+ .unwind_raw 4, 0xc7, 0x01
+ .unwind_raw 0, 0xc7, 0x10
+ .unwind_raw 16, 0xc8, 0x00
+ .unwind_raw 16, 0xc9, 0x00
+ .unwind_raw 0, 0xca
+ .unwind_raw 16, 0xd0
+ .unwind_raw 0, 0xd8
+ .fnend
+
+@ CHECK: UnwindInformation {
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx.personality
+@ CHECK: Entries [
+@ CHECK: Entry {
+@ CHECK: FunctionAddress: 0x0
+@ CHECK: FunctionName: __personality
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx.personality0
+@ CHECK: Entries [
+@ CHECK: Entry {
+@ CHECK: FunctionAddress: 0x0
+@ CHECK: FunctionName: personality0
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx.personality1
+@ CHECK: Entries [
+@ CHECK: Entry {
+@ CHECK: FunctionAddress: 0x0
+@ CHECK: FunctionName: personality1
+@ CHECK: ExceptionHandlingTable: .ARM.extab.personality1
+@ CHECK: TableEntryOffset: 0x0
+@ CHECK: Model: Compact
+@ CHECK: PersonalityIndex: 1
+@ CHECK: Opcodes [
+@ CHECK: 0xB1 0x0F ; pop {r0, r1, r2, r3}
+@ CHECK: 0xA7 ; pop {r4, r5, r6, r7, r8, r9, r10, fp}
+@ CHECK: 0x3F ; vsp = vsp + 256
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx.custom_personality
+@ CHECK: Entries [
+@ CHECK: Entry {
+@ CHECK: FunctionAddress: 0x0
+@ CHECK: FunctionName: custom_personality
+@ CHECK: ExceptionHandlingTable: .ARM.extab.custom_personality
+@ CHECK: TableEntryOffset: 0x0
+@ CHECK: Model: Generic
+@ CHECK: PersonalityRoutineAddress: 0x0
+@ CHECK: }
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx.opcodes
+@ CHECK: Entries [
+@ CHECK: Entry {
+@ CHECK: FunctionAddress: 0x0
+@ CHECK: FunctionName: opcodes
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0xC9 0x84 ; pop {d8, d9, d10, d11, d12}
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx.multiple
+@ CHECK: Entries [
+@ CHECK: Entry {
+@ CHECK: FunctionAddress: 0x0
+@ CHECK: FunctionName: function0
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: Entry {
+@ CHECK: FunctionAddress: 0x4
+@ CHECK: FunctionName: function1
+@ CHECK: ExceptionHandlingTable: .ARM.extab.multiple
+@ CHECK: Model: Generic
+@ CHECK: PersonalityRoutineAddress: 0x0
+@ CHECK: }
+@ CHECK: Entry {
+@ CHECK: FunctionAddress: 0x8
+@ CHECK: FunctionName: function2
+@ CHECK: Model: Compact (Inline)
+@ CHECK: PersonalityIndex: 0
+@ CHECK: Opcodes [
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xB0 ; finish
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx.raw
+@ CHECK: Entries [
+@ CHECK: Opcodes [
+@ CHECK: 0xD7 ; pop {d8, d9, d10, d11, d12, d13, d14, d15}
+@ CHECK: 0xC9 0x02 ; pop {d0, d1, d2}
+@ CHECK: 0xC8 0x02 ; pop {d16, d17, d18}
+@ CHECK: 0xC7 0x03 ; pop {wCGR0, wCGR1}
+@ CHECK: 0xC6 0x02 ; pop {wR0, wR1, wR2}
+@ CHECK: 0xC2 ; pop {wR10, wR11, wR12}
+@ CHECK: 0xBA ; pop {d8, d9, d10}
+@ CHECK: 0xB3 0x12 ; pop {d1, d2, d3}
+@ CHECK: 0xB2 0x80 0x04 ; vsp = vsp + 2564
+@ CHECK: 0xB1 0x01 ; pop {r0}
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xA9 ; pop {r4, r5, lr}
+@ CHECK: 0xA1 ; pop {r4, r5}
+@ CHECK: 0x91 ; vsp = r1
+@ CHECK: 0x84 0xC0 ; pop {r10, fp, lr}
+@ CHECK: 0x80 0xC0 ; pop {r10, fp}
+@ CHECK: 0x80 0x01 ; pop {r4}
+@ CHECK: 0x81 0x00 ; pop {ip}
+@ CHECK: 0x80 0x00 ; refuse to unwind
+@ CHECK: 0x42 ; vsp = vsp - 12
+@ CHECK: 0x02 ; vsp = vsp + 12
+@ CHECK: ]
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: UnwindIndexTable {
+@ CHECK: SectionName: .ARM.exidx.spare
+@ CHECK: Entries [
+@ CHECK: Opcodes [
+@ CHECK: 0xD8 ; spare
+@ CHECK: 0xD0 ; pop {d8}
+@ CHECK: 0xCA ; spare
+@ CHECK: 0xC9 0x00 ; pop {d0}
+@ CHECK: 0xC8 0x00 ; pop {d16}
+@ CHECK: 0xC7 0x10 ; spare
+@ CHECK: 0xC7 0x01 ; pop {wCGR0}
+@ CHECK: 0xC7 0x00 ; spare
+@ CHECK: 0xC6 0x00 ; pop {wR0}
+@ CHECK: 0xC0 ; pop {wR10}
+@ CHECK: 0xB8 ; pop {d8}
+@ CHECK: 0xB4 ; spare
+@ CHECK: 0xB3 0x00 ; pop {d0}
+@ CHECK: 0xB2 0x00 ; vsp = vsp + 516
+@ CHECK: 0xB1 0x10 ; spare
+@ CHECK: 0xB1 0x01 ; pop {r0}
+@ CHECK: 0xB1 0x00 ; spare
+@ CHECK: 0xB0 ; finish
+@ CHECK: 0xA8 ; pop {r4, lr}
+@ CHECK: 0xA0 ; pop {r4}
+@ CHECK: 0x9F ; reserved (WiMMX MOVrr)
+@ CHECK: 0x9D ; reserved (ARM MOVrr)
+@ CHECK: 0x91 ; vsp = r1
+@ CHECK: 0x88 0x00 ; pop {pc}
+@ CHECK: 0x80 0x00 ; refuse to unwind
+@ CHECK: 0x40 ; vsp = vsp - 4
+@ CHECK: 0x00 ; vsp = vsp + 4
+@ CHECK: ]
+@ CHECK: ]
+@ CHECK: }
+@ CHECK: }
+
diff --git a/test/tools/llvm-readobj/Inputs/cxx-cli-aux.cpp b/test/tools/llvm-readobj/Inputs/cxx-cli-aux.cpp
new file mode 100644
index 000000000000..6386cb2e0151
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/cxx-cli-aux.cpp
@@ -0,0 +1,2 @@
+// cl.exe -clr -c t.cpp -Fo"cxx-cli-aux.obj.coff-i386"
+__declspec(appdomain) int PerAppDomain = 0;
diff --git a/test/tools/llvm-readobj/Inputs/cxx-cli-aux.obj.coff-i386 b/test/tools/llvm-readobj/Inputs/cxx-cli-aux.obj.coff-i386
new file mode 100644
index 000000000000..a88c670643af
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/cxx-cli-aux.obj.coff-i386
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/dynamic-table-exe.mips b/test/tools/llvm-readobj/Inputs/dynamic-table-exe.mips
new file mode 100755
index 000000000000..28d8e33752c5
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/dynamic-table-exe.mips
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/dynamic-table-exe.x86 b/test/tools/llvm-readobj/Inputs/dynamic-table-exe.x86
new file mode 100755
index 000000000000..4edbe5869957
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/dynamic-table-exe.x86
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/dynamic-table.mips b/test/tools/llvm-readobj/Inputs/dynamic-table-so.mips
index ab36ceeb5a00..ab36ceeb5a00 100644
--- a/test/tools/llvm-readobj/Inputs/dynamic-table.mips
+++ b/test/tools/llvm-readobj/Inputs/dynamic-table-so.mips
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/dynamic-table.c b/test/tools/llvm-readobj/Inputs/dynamic-table.c
index 6d36e8a71186..6b01ba5388be 100644
--- a/test/tools/llvm-readobj/Inputs/dynamic-table.c
+++ b/test/tools/llvm-readobj/Inputs/dynamic-table.c
@@ -1,7 +1,9 @@
// clang -target mipsel-linux-gnu -shared -fPIC -lc dynamic-table.c \
-// -o dynamic-table.mips
+// -o dynamic-table-so.mips
+// clang -target mipsel-linux-gnu -lc dynamic-table.c \
+// -o dynamic-table-exe.mips
int puts(const char *);
-void foo(void) {
+int main(void) {
puts("Hello, World");
}
diff --git a/test/tools/llvm-readobj/Inputs/file-aux-record.yaml b/test/tools/llvm-readobj/Inputs/file-aux-record.yaml
new file mode 100644
index 000000000000..d19afaf68a85
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/file-aux-record.yaml
@@ -0,0 +1,21 @@
+header: !Header
+ Machine: IMAGE_FILE_MACHINE_I386 # (0x14c)
+ Characteristics: [ IMAGE_FILE_DEBUG_STRIPPED ]
+sections:
+symbols:
+ - !Symbol
+ Name: .file
+ Value: 0
+ SectionNumber: 65534
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_FILE
+ File: eighteen-chars.obj
+ - !Symbol
+ Name: '@comp.id'
+ Value: 13485607
+ SectionNumber: 65535
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+
diff --git a/test/tools/llvm-readobj/Inputs/file-multiple-aux-records.yaml b/test/tools/llvm-readobj/Inputs/file-multiple-aux-records.yaml
new file mode 100644
index 000000000000..8d8f68447d47
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/file-multiple-aux-records.yaml
@@ -0,0 +1,21 @@
+header: !Header
+ Machine: IMAGE_FILE_MACHINE_I386 # (0x14c)
+ Characteristics: [ IMAGE_FILE_DEBUG_STRIPPED ]
+sections:
+symbols:
+ - !Symbol
+ Name: .file
+ Value: 0
+ SectionNumber: 65534
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_FILE
+ File: first-section-has-eighteen-characters.asm
+ - !Symbol
+ Name: '@comp.id'
+ Value: 13485607
+ SectionNumber: 65535
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+
diff --git a/test/tools/llvm-readobj/Inputs/got-empty.exe.mipsel b/test/tools/llvm-readobj/Inputs/got-empty.exe.mipsel
new file mode 100755
index 000000000000..b57874557c87
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/got-empty.exe.mipsel
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/got-tls.so.elf-mips64el b/test/tools/llvm-readobj/Inputs/got-tls.so.elf-mips64el
new file mode 100755
index 000000000000..3afc567f85d5
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/got-tls.so.elf-mips64el
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/multifile-linetables.obj.coff-2012-i368 b/test/tools/llvm-readobj/Inputs/multifile-linetables.obj.coff-2012-i368
new file mode 100644
index 000000000000..1672d3a54242
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/multifile-linetables.obj.coff-2012-i368
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/multifile-linetables.obj.coff-2012-x86_64 b/test/tools/llvm-readobj/Inputs/multifile-linetables.obj.coff-2012-x86_64
new file mode 100644
index 000000000000..30bfe79bc308
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/multifile-linetables.obj.coff-2012-x86_64
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/multifunction-linetables.obj.coff-2012-i368 b/test/tools/llvm-readobj/Inputs/multifunction-linetables.obj.coff-2012-i368
new file mode 100644
index 000000000000..a0196ff2d737
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/multifunction-linetables.obj.coff-2012-i368
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/multifunction-linetables.obj.coff-2012-x86_64 b/test/tools/llvm-readobj/Inputs/multifunction-linetables.obj.coff-2012-x86_64
new file mode 100644
index 000000000000..14f65ab2f6d7
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/multifunction-linetables.obj.coff-2012-x86_64
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/nop.exe.coff-x86-64 b/test/tools/llvm-readobj/Inputs/nop.exe.coff-x86-64
new file mode 100644
index 000000000000..62b3b46c369c
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/nop.exe.coff-x86-64
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/trivial.obj.coff-arm b/test/tools/llvm-readobj/Inputs/trivial.obj.coff-arm
new file mode 100755
index 000000000000..e3b5df4c7784
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/trivial.obj.coff-arm
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/trivial.obj.coff-i386 b/test/tools/llvm-readobj/Inputs/trivial.obj.coff-i386
index 282e5699a767..7486562c9787 100644
--- a/test/tools/llvm-readobj/Inputs/trivial.obj.coff-i386
+++ b/test/tools/llvm-readobj/Inputs/trivial.obj.coff-i386
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/trivial.obj.elf-mipsel b/test/tools/llvm-readobj/Inputs/trivial.obj.elf-mipsel
new file mode 100644
index 000000000000..c523908605cf
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/trivial.obj.elf-mipsel
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/zero-string-table.obj.coff-i386 b/test/tools/llvm-readobj/Inputs/zero-string-table.obj.coff-i386
new file mode 100644
index 000000000000..f41f224f6097
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/zero-string-table.obj.coff-i386
Binary files differ
diff --git a/test/tools/llvm-readobj/codeview-linetables.test b/test/tools/llvm-readobj/codeview-linetables.test
new file mode 100644
index 000000000000..4854d7ac6a39
--- /dev/null
+++ b/test/tools/llvm-readobj/codeview-linetables.test
@@ -0,0 +1,282 @@
+RUN: llvm-readobj -s -codeview-linetables %p/Inputs/multifunction-linetables.obj.coff-2012-i368 \
+RUN: | FileCheck %s -check-prefix MFUN32
+RUN: llvm-readobj -s -codeview-linetables %p/Inputs/multifunction-linetables.obj.coff-2012-x86_64 \
+RUN: | FileCheck %s -check-prefix MFUN64
+RUN: llvm-readobj -s -codeview-linetables %p/Inputs/multifile-linetables.obj.coff-2012-i368 \
+RUN: | FileCheck %s -check-prefix MFILE32
+RUN: llvm-readobj -s -codeview-linetables %p/Inputs/multifile-linetables.obj.coff-2012-x86_64 \
+RUN: | FileCheck %s -check-prefix MFILE64
+
+MFUN32: CodeViewLineTables [
+MFUN32-NEXT: Magic: 0x4
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF1
+MFUN32-NEXT: PayloadSize: 0x52
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF5
+MFUN32-NEXT: PayloadSize: 0x24
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF1
+MFUN32-NEXT: PayloadSize: 0x4B
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF2
+MFUN32-NEXT: PayloadSize: 0x30
+MFUN32: FunctionName: _x
+MFUN32-NEXT: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF5
+MFUN32-NEXT: PayloadSize: 0x24
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF1
+MFUN32-NEXT: PayloadSize: 0x4B
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF2
+MFUN32-NEXT: PayloadSize: 0x30
+MFUN32: FunctionName: _y
+MFUN32-NEXT: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF5
+MFUN32-NEXT: PayloadSize: 0x24
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF1
+MFUN32-NEXT: PayloadSize: 0x4B
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF2
+MFUN32-NEXT: PayloadSize: 0x40
+MFUN32: FunctionName: _f
+MFUN32-NEXT: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF4
+MFUN32-NEXT: PayloadSize: 0x18
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF3
+MFUN32-NEXT: PayloadSize: 0x46
+MFUN32: ]
+MFUN32-NEXT: Subsection [
+MFUN32-NEXT: Type: 0xF1
+MFUN32-NEXT: PayloadSize: 0x8
+MFUN32: ]
+MFUN32-NEXT: FunctionLineTable [
+MFUN32-NEXT: FunctionName: _x
+MFUN32-NEXT: CodeSize: 0xA
+MFUN32-NEXT: FilenameSegment [
+MFUN32-NEXT: Filename: d:\source.c
+MFUN32-NEXT: +0x0: 3
+MFUN32-NEXT: +0x3: 4
+MFUN32-NEXT: +0x8: 5
+MFUN32-NEXT: ]
+MFUN32-NEXT: ]
+MFUN32-NEXT: FunctionLineTable [
+MFUN32-NEXT: FunctionName: _y
+MFUN32-NEXT: CodeSize: 0xA
+MFUN32-NEXT: FilenameSegment [
+MFUN32-NEXT: Filename: d:\source.c
+MFUN32-NEXT: +0x0: 7
+MFUN32-NEXT: +0x3: 8
+MFUN32-NEXT: +0x8: 9
+MFUN32-NEXT: ]
+MFUN32-NEXT: ]
+MFUN32-NEXT: FunctionLineTable [
+MFUN32-NEXT: FunctionName: _f
+MFUN32-NEXT: CodeSize: 0x14
+MFUN32-NEXT: FilenameSegment [
+MFUN32-NEXT: Filename: d:\source.c
+MFUN32-NEXT: +0x0: 11
+MFUN32-NEXT: +0x3: 12
+MFUN32-NEXT: +0x8: 13
+MFUN32-NEXT: +0xD: 14
+MFUN32-NEXT: +0x12: 15
+MFUN32-NEXT: ]
+MFUN32-NEXT: ]
+MFUN32-NEXT: ]
+
+MFUN64: CodeViewLineTables [
+MFUN64-NEXT: Magic: 0x4
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF1
+MFUN64-NEXT: PayloadSize: 0x52
+MFUN64: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF1
+MFUN64-NEXT: PayloadSize: 0x4B
+MFUN64: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF2
+MFUN64-NEXT: PayloadSize: 0x30
+MFUN64: FunctionName: x
+MFUN64-NEXT: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF1
+MFUN64-NEXT: PayloadSize: 0x4B
+MFUN64: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF2
+MFUN64-NEXT: PayloadSize: 0x30
+MFUN64: FunctionName: y
+MFUN64-NEXT: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF1
+MFUN64-NEXT: PayloadSize: 0x4B
+MFUN64: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF2
+MFUN64-NEXT: PayloadSize: 0x40
+MFUN64: FunctionName: f
+MFUN64-NEXT: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF4
+MFUN64-NEXT: PayloadSize: 0x18
+MFUN64: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF3
+MFUN64-NEXT: PayloadSize: 0xD
+MFUN64: ]
+MFUN64-NEXT: Subsection [
+MFUN64-NEXT: Type: 0xF1
+MFUN64-NEXT: PayloadSize: 0x8
+MFUN64: ]
+MFUN64-NEXT: FunctionLineTable [
+MFUN64-NEXT: FunctionName: x
+MFUN64-NEXT: CodeSize: 0xE
+MFUN64-NEXT: FilenameSegment [
+MFUN64-NEXT: Filename: d:\source.c
+MFUN64-NEXT: +0x0: 3
+MFUN64-NEXT: +0x4: 4
+MFUN64-NEXT: +0x9: 5
+MFUN64-NEXT: ]
+MFUN64-NEXT: ]
+MFUN64-NEXT: FunctionLineTable [
+MFUN64-NEXT: FunctionName: y
+MFUN64-NEXT: CodeSize: 0xE
+MFUN64-NEXT: FilenameSegment [
+MFUN64-NEXT: Filename: d:\source.c
+MFUN64-NEXT: +0x0: 7
+MFUN64-NEXT: +0x4: 8
+MFUN64-NEXT: +0x9: 9
+MFUN64-NEXT: ]
+MFUN64-NEXT: ]
+MFUN64-NEXT: FunctionLineTable [
+MFUN64-NEXT: FunctionName: f
+MFUN64-NEXT: CodeSize: 0x18
+MFUN64-NEXT: FilenameSegment [
+MFUN64-NEXT: Filename: d:\source.c
+MFUN64-NEXT: +0x0: 11
+MFUN64-NEXT: +0x4: 12
+MFUN64-NEXT: +0x9: 13
+MFUN64-NEXT: +0xE: 14
+MFUN64-NEXT: +0x13: 15
+MFUN64-NEXT: ]
+MFUN64-NEXT: ]
+MFUN64-NEXT: ]
+
+MFILE32: CodeViewLineTables [
+MFILE32-NEXT: Magic: 0x4
+MFILE32-NEXT: Subsection [
+MFILE32-NEXT: Type: 0xF1
+MFILE32-NEXT: PayloadSize: 0x51
+MFILE32: ]
+MFILE32-NEXT: Subsection [
+MFILE32-NEXT: Type: 0xF5
+MFILE32-NEXT: PayloadSize: 0x24
+MFILE32: ]
+MFILE32-NEXT: Subsection [
+MFILE32-NEXT: Type: 0xF1
+MFILE32-NEXT: PayloadSize: 0x4B
+MFILE32: ]
+MFILE32-NEXT: Subsection [
+MFILE32-NEXT: Type: 0xF2
+MFILE32-NEXT: PayloadSize: 0x64
+MFILE32: FunctionName: _f
+MFILE32-NEXT: ]
+MFILE32-NEXT: Subsection [
+MFILE32-NEXT: Type: 0xF4
+MFILE32-NEXT: PayloadSize: 0x28
+MFILE32: ]
+MFILE32-NEXT: Subsection [
+MFILE32-NEXT: Type: 0xF3
+MFILE32-NEXT: PayloadSize: 0x57
+MFILE32: ]
+MFILE32-NEXT: Subsection [
+MFILE32-NEXT: Type: 0xF1
+MFILE32-NEXT: PayloadSize: 0x8
+MFILE32: ]
+MFILE32-NEXT: FunctionLineTable [
+MFILE32-NEXT: FunctionName: _f
+MFILE32-NEXT: CodeSize: 0x14
+MFILE32-NEXT: FilenameSegment [
+MFILE32-NEXT: Filename: d:\input.c
+MFILE32-NEXT: +0x0: 3
+MFILE32-NEXT: ]
+MFILE32-NEXT: FilenameSegment [
+MFILE32-NEXT: Filename: d:\one.c
+MFILE32-NEXT: +0x3: 1
+MFILE32-NEXT: ]
+MFILE32-NEXT: FilenameSegment [
+MFILE32-NEXT: Filename: d:\two.c
+MFILE32-NEXT: +0x8: 2
+MFILE32-NEXT: ]
+MFILE32-NEXT: FilenameSegment [
+MFILE32-NEXT: Filename: d:\one.c
+MFILE32-NEXT: +0xD: 7
+MFILE32-NEXT: +0x12: 8
+MFILE32-NEXT: ]
+MFILE32-NEXT: ]
+MFILE32-NEXT: ]
+
+MFILE64: CodeViewLineTables [
+MFILE64-NEXT: Magic: 0x4
+MFILE64-NEXT: Subsection [
+MFILE64-NEXT: Type: 0xF1
+MFILE64-NEXT: PayloadSize: 0x51
+MFILE64: ]
+MFILE64-NEXT: Subsection [
+MFILE64-NEXT: Type: 0xF1
+MFILE64-NEXT: PayloadSize: 0x4B
+MFILE64: ]
+MFILE64-NEXT: Subsection [
+MFILE64-NEXT: Type: 0xF2
+MFILE64-NEXT: PayloadSize: 0x64
+MFILE64: FunctionName: f
+MFILE64-NEXT: ]
+MFILE64-NEXT: Subsection [
+MFILE64-NEXT: Type: 0xF4
+MFILE64-NEXT: PayloadSize: 0x28
+MFILE64: ]
+MFILE64-NEXT: Subsection [
+MFILE64-NEXT: Type: 0xF3
+MFILE64-NEXT: PayloadSize: 0x1E
+MFILE64: ]
+MFILE64-NEXT: Subsection [
+MFILE64-NEXT: Type: 0xF1
+MFILE64-NEXT: PayloadSize: 0x8
+MFILE64: ]
+MFILE64-NEXT: FunctionLineTable [
+MFILE64-NEXT: FunctionName: f
+MFILE64-NEXT: CodeSize: 0x18
+MFILE64-NEXT: FilenameSegment [
+MFILE64-NEXT: Filename: d:\input.c
+MFILE64-NEXT: +0x0: 3
+MFILE64-NEXT: ]
+MFILE64-NEXT: FilenameSegment [
+MFILE64-NEXT: Filename: d:\one.c
+MFILE64-NEXT: +0x4: 1
+MFILE64-NEXT: ]
+MFILE64-NEXT: FilenameSegment [
+MFILE64-NEXT: Filename: d:\two.c
+MFILE64-NEXT: +0x9: 2
+MFILE64-NEXT: ]
+MFILE64-NEXT: FilenameSegment [
+MFILE64-NEXT: Filename: d:\one.c
+MFILE64-NEXT: +0xE: 7
+MFILE64-NEXT: +0x13: 8
+MFILE64-NEXT: ]
+MFILE64-NEXT: ]
+MFILE64-NEXT: ]
diff --git a/test/tools/llvm-readobj/coff-file-sections-reading.test b/test/tools/llvm-readobj/coff-file-sections-reading.test
new file mode 100644
index 000000000000..5c44c16f0058
--- /dev/null
+++ b/test/tools/llvm-readobj/coff-file-sections-reading.test
@@ -0,0 +1,18 @@
+RUN: yaml2obj %p/Inputs/file-multiple-aux-records.yaml | llvm-readobj -t - | FileCheck %s
+
+CHECK: Symbols [
+CHECK: Symbol {
+CHECK: Name: .file
+CHECK: Value: 0
+CHECK: Section: (65534)
+CHECK: BaseType: Null (0x0)
+CHECK: ComplexType: Null (0x0)
+CHECK: StorageClass: File (0x67)
+CHECK: AuxSymbolCount: 3
+CHECK: AuxFileRecord {
+CHECK: FileName: first-section-has-eighteen-characters.asm
+CHECK: }
+CHECK-NOT: AuxFileRecord {
+CHECK: }
+CHECK: ]
+
diff --git a/test/tools/llvm-readobj/coff-non-null-terminated-file.test b/test/tools/llvm-readobj/coff-non-null-terminated-file.test
new file mode 100644
index 000000000000..8bd88f3708ea
--- /dev/null
+++ b/test/tools/llvm-readobj/coff-non-null-terminated-file.test
@@ -0,0 +1,20 @@
+RUN: yaml2obj %p/Inputs/file-aux-record.yaml | llvm-readobj -t - | FileCheck %s
+
+CHECK: Symbols [
+CHECK: Symbol {
+CHECK: Name: .file
+CHECK: Value: 0
+CHECK: StorageClass: File
+CHECK: AuxSymbolCount: 1
+CHECK: AuxFileRecord {
+CHECK: FileName: eighteen-chars.obj{{$}}
+CHECK: }
+CHECK: }
+CHECK: Symbol {
+CHECK: Name: @comp.id
+CHECK: Value: 13485607
+CHECK: StorageClass: Static
+CHECK: AuxSymbolCount: 0
+CHECK: }
+CHECK: ]
+
diff --git a/test/tools/llvm-readobj/coff-zero-string-table.test b/test/tools/llvm-readobj/coff-zero-string-table.test
new file mode 100644
index 000000000000..dfcf79e2477a
--- /dev/null
+++ b/test/tools/llvm-readobj/coff-zero-string-table.test
@@ -0,0 +1,8 @@
+Ensure that we can read COFF objects with a string table size of 0 (instead
+of 4) for empty string tables.
+
+RUN: llvm-readobj -t %p/Inputs/zero-string-table.obj.coff-i386 | FileCheck %s
+
+CHECK: Symbols [
+CHECK: Symbol {
+CHECK: Name: $R000000
diff --git a/test/tools/llvm-readobj/cxx-cli-aux.test b/test/tools/llvm-readobj/cxx-cli-aux.test
new file mode 100644
index 000000000000..90e73c033a86
--- /dev/null
+++ b/test/tools/llvm-readobj/cxx-cli-aux.test
@@ -0,0 +1,42 @@
+Ensure that we can read the COFF auxiliary symbols 'section definition' and
+'CLR token definition' as used in C++/CLI object files. Auxiliary section
+definitions usually only follow a symbol with static storage class, but
+non-const appdomain globals (external ABS) also get one.
+
+RUN: llvm-readobj -t %p/Inputs/cxx-cli-aux.obj.coff-i386 | FileCheck %s
+
+CHECK: Symbols [
+CHECK: Symbol {
+CHECK: Name: ?PerAppDomain@@$$Q3HA
+CHECK-NEXT: Value: 4
+CHECK-NEXT: Section: (65535)
+CHECK-NEXT: BaseType: Null (0x0)
+CHECK-NEXT: ComplexType: Null (0x0)
+CHECK-NEXT: StorageClass: External (0x2)
+CHECK-NEXT: AuxSymbolCount: 1
+CHECK-NEXT: AuxSectionDef {
+CHECK-NEXT: Length: 0
+CHECK-NEXT: RelocationCount: 0
+CHECK-NEXT: LineNumberCount: 0
+CHECK-NEXT: Checksum: 0x0
+CHECK-NEXT: Number: 0
+CHECK-NEXT: Selection: NoDuplicates (0x1)
+CHECK-NEXT: Unused: (00 00 00)
+CHECK-NEXT: }
+CHECK-NEXT: }
+
+CHECK: Symbol {
+CHECK: Name: 04000001
+CHECK-NEXT: Value: 4
+CHECK-NEXT: Section: (65535)
+CHECK-NEXT: BaseType: Null (0x0)
+CHECK-NEXT: ComplexType: Null (0x0)
+CHECK-NEXT: StorageClass: CLRToken (0x6B)
+CHECK-NEXT: AuxSymbolCount: 1
+CHECK-NEXT: AuxCLRToken {
+CHECK-NEXT: AuxType: 1
+CHECK-NEXT: Reserved: 0
+CHECK-NEXT: SymbolTableIndex: ?PerAppDomain@@$$Q3HA (19)
+CHECK-NEXT: Unused: (00 00 00 00 00 00 00 00 00 00 00 00)
+CHECK-NEXT: }
+CHECK-NEXT: }
diff --git a/test/tools/llvm-readobj/dynamic.test b/test/tools/llvm-readobj/dynamic.test
index 78a9b3bd937e..08f29fcd40a9 100644
--- a/test/tools/llvm-readobj/dynamic.test
+++ b/test/tools/llvm-readobj/dynamic.test
@@ -1,4 +1,5 @@
-RUN: llvm-readobj -dynamic-table %p/Inputs/dynamic-table.mips \
+// Check dynamic section tags in case of shared library file.
+RUN: llvm-readobj -dynamic-table %p/Inputs/dynamic-table-so.mips \
RUN: | FileCheck %s -check-prefix ELF-MIPS
ELF-MIPS: Format: ELF32-mips
@@ -20,7 +21,7 @@ ELF-MIPS: 0x00000011 REL 0x518
ELF-MIPS: 0x00000012 RELSZ 16 (bytes)
ELF-MIPS: 0x00000013 RELENT 8 (bytes)
ELF-MIPS: 0x70000001 MIPS_RLD_VERSION 1
-ELF-MIPS: 0x70000005 MIPS_FLAGS 0x2
+ELF-MIPS: 0x70000005 MIPS_FLAGS NOTPOT
ELF-MIPS: 0x70000006 MIPS_BASE_ADDRESS 0x0
ELF-MIPS: 0x7000000A MIPS_LOCAL_GOTNO 10
ELF-MIPS: 0x70000011 MIPS_SYMTABNO 19
@@ -31,3 +32,82 @@ ELF-MIPS: 0x6FFFFFFF VERNEEDNUM 1
ELF-MIPS: 0x6FFFFFF0 VERSYM 0x4C0
ELF-MIPS: 0x00000000 NULL 0x0
ELF-MIPS: ]
+
+// Check dynamic section tags in case of non-pic executable file.
+RUN: llvm-readobj -dynamic-table %p/Inputs/dynamic-table-exe.mips \
+RUN: | FileCheck %s -check-prefix ELF-MIPS-EXE
+
+ELF-MIPS-EXE: Format: ELF32-mips
+ELF-MIPS-EXE: Arch: mipsel
+ELF-MIPS-EXE: AddressSize: 32bit
+ELF-MIPS-EXE: LoadName:
+ELF-MIPS-EXE: DynamicSection [ (26 entries)
+ELF-MIPS-EXE: Tag Type Name/Value
+ELF-MIPS-EXE: 0x00000001 NEEDED SharedLibrary (libc.so.6)
+ELF-MIPS-EXE: 0x0000000C INIT 0x400418
+ELF-MIPS-EXE: 0x0000000D FINI 0x4007B0
+ELF-MIPS-EXE: 0x00000004 HASH 0x4002B8
+ELF-MIPS-EXE: 0x00000005 STRTAB 0x40036C
+ELF-MIPS-EXE: 0x00000006 SYMTAB 0x4002EC
+ELF-MIPS-EXE: 0x0000000A STRSZ 107 (bytes)
+ELF-MIPS-EXE: 0x0000000B SYMENT 16 (bytes)
+ELF-MIPS-EXE: 0x70000016 MIPS_RLD_MAP 0x410880
+ELF-MIPS-EXE: 0x00000015 DEBUG 0x0
+ELF-MIPS-EXE: 0x00000003 PLTGOT 0x410890
+ELF-MIPS-EXE: 0x70000001 MIPS_RLD_VERSION 1
+ELF-MIPS-EXE: 0x70000005 MIPS_FLAGS NOTPOT
+ELF-MIPS-EXE: 0x70000006 MIPS_BASE_ADDRESS 0x400000
+ELF-MIPS-EXE: 0x7000000A MIPS_LOCAL_GOTNO 5
+ELF-MIPS-EXE: 0x70000011 MIPS_SYMTABNO 8
+ELF-MIPS-EXE: 0x70000012 MIPS_UNREFEXTNO 32
+ELF-MIPS-EXE: 0x70000013 MIPS_GOTSYM 0x7
+ELF-MIPS-EXE: 0x00000014 PLTREL REL
+ELF-MIPS-EXE: 0x00000017 JMPREL 0x400408
+ELF-MIPS-EXE: 0x00000002 PLTRELSZ 16 (bytes)
+ELF-MIPS-EXE: 0x70000032 MIPS_PLTGOT 0x410854
+ELF-MIPS-EXE: 0x6FFFFFFE VERNEED 0x4003E8
+ELF-MIPS-EXE: 0x6FFFFFFF VERNEEDNUM 1
+ELF-MIPS-EXE: 0x6FFFFFF0 VERSYM 0x4003D8
+ELF-MIPS-EXE: 0x00000000 NULL 0x0
+ELF-MIPS-EXE: ]
+
+RUN: llvm-readobj -dynamic-table %p/Inputs/dynamic-table-exe.x86 \
+RUN: | FileCheck %s -check-prefix ELF-X86-EXE
+
+ELF-X86-EXE: Format: ELF32-i386
+ELF-X86-EXE: Arch: i386
+ELF-X86-EXE: AddressSize: 32bit
+ELF-X86-EXE: LoadName:
+ELF-X86-EXE: DynamicSection [ (30 entries)
+ELF-X86-EXE: Tag Type Name/Value
+ELF-X86-EXE: 0x00000001 NEEDED SharedLibrary (libstdc++.so.6)
+ELF-X86-EXE: 0x00000001 NEEDED SharedLibrary (libgcc_s.so.1)
+ELF-X86-EXE: 0x00000001 NEEDED SharedLibrary (libc.so.6)
+ELF-X86-EXE: 0x0000000C INIT 0x62C
+ELF-X86-EXE: 0x0000000D FINI 0x920
+ELF-X86-EXE: 0x00000019 INIT_ARRAY 0x19FC
+ELF-X86-EXE: 0x0000001B INIT_ARRAYSZ 4 (bytes)
+ELF-X86-EXE: 0x0000001A FINI_ARRAY 0x1A00
+ELF-X86-EXE: 0x0000001C FINI_ARRAYSZ 4 (bytes)
+ELF-X86-EXE: 0x00000004 HASH 0x18C
+ELF-X86-EXE: 0x6FFFFEF5 GNU_HASH 0x1E4
+ELF-X86-EXE: 0x00000005 STRTAB 0x328
+ELF-X86-EXE: 0x00000006 SYMTAB 0x218
+ELF-X86-EXE: 0x0000000A STRSZ 408 (bytes)
+ELF-X86-EXE: 0x0000000B SYMENT 16 (bytes)
+ELF-X86-EXE: 0x00000015 DEBUG 0x0
+ELF-X86-EXE: 0x00000003 PLTGOT 0x1B30
+ELF-X86-EXE: 0x00000002 PLTRELSZ 64 (bytes)
+ELF-X86-EXE: 0x00000014 PLTREL REL
+ELF-X86-EXE: 0x00000017 JMPREL 0x5EC
+ELF-X86-EXE: 0x00000011 REL 0x564
+ELF-X86-EXE: 0x00000012 RELSZ 136 (bytes)
+ELF-X86-EXE: 0x00000013 RELENT 8 (bytes)
+ELF-X86-EXE: 0x00000016 TEXTREL
+ELF-X86-EXE: 0x0000001E FLAGS TEXTREL
+ELF-X86-EXE: 0x6FFFFFFE VERNEED 0x4E4
+ELF-X86-EXE: 0x6FFFFFFF VERNEEDNUM 3
+ELF-X86-EXE: 0x6FFFFFF0 VERSYM 0x4C0
+ELF-X86-EXE: 0x6FFFFFFA RELCOUNT 6
+ELF-X86-EXE: 0x00000000 NULL 0x0
+ELF-X86-EXE: ]
diff --git a/test/tools/llvm-readobj/file-headers.test b/test/tools/llvm-readobj/file-headers.test
index b2b454772d6a..39a8c0ef8991 100644
--- a/test/tools/llvm-readobj/file-headers.test
+++ b/test/tools/llvm-readobj/file-headers.test
@@ -1,3 +1,5 @@
+RUN: llvm-readobj -h %p/Inputs/trivial.obj.coff-arm \
+RUN: | FileCheck %s -check-prefix COFF-ARM
RUN: llvm-readobj -h %p/Inputs/trivial.obj.coff-i386 \
RUN: | FileCheck %s -check-prefix COFF32
RUN: llvm-readobj -h %p/Inputs/trivial.obj.coff-x86-64 \
@@ -13,6 +15,21 @@ RUN: | FileCheck %s -check-prefix COFF-UNKNOWN
RUN: llvm-readobj -h %p/Inputs/magic.coff-importlib \
RUN: | FileCheck %s -check-prefix COFF-IMPORTLIB
+COFF-ARM: File: {{(.*[/\\])?}}trivial.obj.coff-arm
+COFF-ARM-NEXT: Format: COFF-ARM
+COFF-ARM-NEXT: Arch: thumb
+COFF-ARM-NEXT: AddressSize: 32bit
+COFF-ARM-NEXT: ImageFileHeader {
+COFF-ARM-NEXT: Machine: IMAGE_FILE_MACHINE_ARMNT (0x1C4)
+COFF-ARM-NEXT: SectionCount: 2
+COFF-ARM-NEXT: TimeDateStamp: 2014-03-13 02:48:34 (0x53211C82)
+COFF-ARM-NEXT: PointerToSymbolTable: 0xFF
+COFF-ARM-NEXT: SymbolCount: 6
+COFF-ARM-NEXT: OptionalHeaderSize: 0
+COFF-ARM-NEXT: Characteristics [ (0x0)
+COFF-ARM-NEXT: ]
+COFF-ARM-NEXT: }
+
COFF32: File: {{(.*[/\\])?}}trivial.obj.coff-i386
COFF32-NEXT: Format: COFF-i386
COFF32-NEXT: Arch: i386
@@ -22,7 +39,7 @@ COFF32-NEXT: Machine: IMAGE_FILE_MACHINE_I386 (0x14C)
COFF32-NEXT: SectionCount: 2
COFF32-NEXT: TimeDateStamp: 2013-03-20 17:56:46 (0x5149F85E)
COFF32-NEXT: PointerToSymbolTable: 0xA5
-COFF32-NEXT: SymbolCount: 7
+COFF32-NEXT: SymbolCount: 9
COFF32-NEXT: OptionalHeaderSize: 0
COFF32-NEXT: Characteristics [ (0x0)
COFF32-NEXT: ]
diff --git a/test/tools/llvm-readobj/mips-got.test b/test/tools/llvm-readobj/mips-got.test
new file mode 100644
index 000000000000..20215871ef0b
--- /dev/null
+++ b/test/tools/llvm-readobj/mips-got.test
@@ -0,0 +1,331 @@
+RUN: llvm-readobj -mips-plt-got %p/Inputs/relocs.obj.elf-mips | \
+RUN: FileCheck %s -check-prefix GOT-OBJ
+RUN: llvm-readobj -mips-plt-got %p/Inputs/dynamic-table-exe.mips | \
+RUN: FileCheck %s -check-prefix GOT-EXE
+RUN: llvm-readobj -mips-plt-got %p/Inputs/dynamic-table-so.mips | \
+RUN: FileCheck %s -check-prefix GOT-SO
+RUN: llvm-readobj -mips-plt-got %p/Inputs/got-tls.so.elf-mips64el | \
+RUN: FileCheck %s -check-prefix GOT-TLS
+RUN: llvm-readobj -mips-plt-got %p/Inputs/got-empty.exe.mipsel | \
+RUN: FileCheck %s -check-prefix GOT-EMPTY
+
+GOT-OBJ: Cannot find PLTGOT dynamic table tag.
+
+GOT-EXE: Primary GOT {
+GOT-EXE-NEXT: Canonical gp value: 0x418880
+GOT-EXE-NEXT: Reserved entries [
+GOT-EXE-NEXT: Entry {
+GOT-EXE-NEXT: Address: 0x410890
+GOT-EXE-NEXT: Access: -32752
+GOT-EXE-NEXT: Initial: 0x0
+GOT-EXE-NEXT: Purpose: Lazy resolver
+GOT-EXE-NEXT: }
+GOT-EXE-NEXT: Entry {
+GOT-EXE-NEXT: Address: 0x410894
+GOT-EXE-NEXT: Access: -32748
+GOT-EXE-NEXT: Initial: 0x80000000
+GOT-EXE-NEXT: Purpose: Module pointer (GNU extension)
+GOT-EXE-NEXT: }
+GOT-EXE-NEXT: ]
+GOT-EXE-NEXT: Local entries [
+GOT-EXE-NEXT: Entry {
+GOT-EXE-NEXT: Address: 0x410898
+GOT-EXE-NEXT: Access: -32744
+GOT-EXE-NEXT: Initial: 0x400418
+GOT-EXE-NEXT: }
+GOT-EXE-NEXT: Entry {
+GOT-EXE-NEXT: Address: 0x41089C
+GOT-EXE-NEXT: Access: -32740
+GOT-EXE-NEXT: Initial: 0x410840
+GOT-EXE-NEXT: }
+GOT-EXE-NEXT: Entry {
+GOT-EXE-NEXT: Address: 0x4108A0
+GOT-EXE-NEXT: Access: -32736
+GOT-EXE-NEXT: Initial: 0x0
+GOT-EXE-NEXT: }
+GOT-EXE-NEXT: ]
+GOT-EXE-NEXT: Global entries [
+GOT-EXE-NEXT: Entry {
+GOT-EXE-NEXT: Address: 0x4108A4
+GOT-EXE-NEXT: Access: -32732
+GOT-EXE-NEXT: Initial: 0x0
+GOT-EXE-NEXT: Value: 0x0
+GOT-EXE-NEXT: Type: Function (0x2)
+GOT-EXE-NEXT: Section: Undefined (0x0)
+GOT-EXE-NEXT: Name: __gmon_start__@ (1)
+GOT-EXE-NEXT: }
+GOT-EXE-NEXT: ]
+GOT-EXE-NEXT: Number of TLS and multi-GOT entries: 0
+GOT-EXE-NEXT: }
+
+GOT-SO: Primary GOT {
+GOT-SO-NEXT: Canonical gp value: 0x188D0
+GOT-SO-NEXT: Reserved entries [
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x108E0
+GOT-SO-NEXT: Access: -32752
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: Purpose: Lazy resolver
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x108E4
+GOT-SO-NEXT: Access: -32748
+GOT-SO-NEXT: Initial: 0x80000000
+GOT-SO-NEXT: Purpose: Module pointer (GNU extension)
+GOT-SO-NEXT: }
+GOT-SO-NEXT: ]
+GOT-SO-NEXT: Local entries [
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x108E8
+GOT-SO-NEXT: Access: -32744
+GOT-SO-NEXT: Initial: 0x108E0
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x108EC
+GOT-SO-NEXT: Access: -32740
+GOT-SO-NEXT: Initial: 0x10000
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x108F0
+GOT-SO-NEXT: Access: -32736
+GOT-SO-NEXT: Initial: 0x10920
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x108F4
+GOT-SO-NEXT: Access: -32732
+GOT-SO-NEXT: Initial: 0x108CC
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x108F8
+GOT-SO-NEXT: Access: -32728
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x108FC
+GOT-SO-NEXT: Access: -32724
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x10900
+GOT-SO-NEXT: Access: -32720
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x10904
+GOT-SO-NEXT: Access: -32716
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: }
+GOT-SO-NEXT: ]
+GOT-SO-NEXT: Global entries [
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x10908
+GOT-SO-NEXT: Access: -32712
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: Value: 0x0
+GOT-SO-NEXT: Type: None (0x0)
+GOT-SO-NEXT: Section: Undefined (0x0)
+GOT-SO-NEXT: Name: _ITM_registerTMCloneTable@ (87)
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x1090C
+GOT-SO-NEXT: Access: -32708
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: Value: 0x0
+GOT-SO-NEXT: Type: None (0x0)
+GOT-SO-NEXT: Section: Undefined (0x0)
+GOT-SO-NEXT: Name: _Jv_RegisterClasses@ (128)
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x10910
+GOT-SO-NEXT: Access: -32704
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: Value: 0x0
+GOT-SO-NEXT: Type: Function (0x2)
+GOT-SO-NEXT: Section: Undefined (0x0)
+GOT-SO-NEXT: Name: __gmon_start__@ (23)
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x10914
+GOT-SO-NEXT: Access: -32700
+GOT-SO-NEXT: Initial: 0x840
+GOT-SO-NEXT: Value: 0x840
+GOT-SO-NEXT: Type: Function (0x2)
+GOT-SO-NEXT: Section: Undefined (0x0)
+GOT-SO-NEXT: Name: puts@GLIBC_2.0 (162)
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x10918
+GOT-SO-NEXT: Access: -32696
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: Value: 0x0
+GOT-SO-NEXT: Type: None (0x0)
+GOT-SO-NEXT: Section: Undefined (0x0)
+GOT-SO-NEXT: Name: _ITM_deregisterTMCloneTable@ (59)
+GOT-SO-NEXT: }
+GOT-SO-NEXT: Entry {
+GOT-SO-NEXT: Address: 0x1091C
+GOT-SO-NEXT: Access: -32692
+GOT-SO-NEXT: Initial: 0x0
+GOT-SO-NEXT: Value: 0x0
+GOT-SO-NEXT: Type: Function (0x2)
+GOT-SO-NEXT: Section: Undefined (0x0)
+GOT-SO-NEXT: Name: __cxa_finalize@GLIBC_2.2 (113)
+GOT-SO-NEXT: }
+GOT-SO-NEXT: ]
+GOT-SO-NEXT: Number of TLS and multi-GOT entries: 0
+GOT-SO-NEXT: }
+
+GOT-TLS: Primary GOT {
+GOT-TLS-NEXT: Canonical gp value: 0x18BF0
+GOT-TLS-NEXT: Reserved entries [
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C00
+GOT-TLS-NEXT: Access: -32752
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: Purpose: Lazy resolver
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C08
+GOT-TLS-NEXT: Access: -32744
+GOT-TLS-NEXT: Initial: 0x8000000000000000
+GOT-TLS-NEXT: Purpose: Module pointer (GNU extension)
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: ]
+GOT-TLS-NEXT: Local entries [
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C10
+GOT-TLS-NEXT: Access: -32736
+GOT-TLS-NEXT: Initial: 0x10000
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C18
+GOT-TLS-NEXT: Access: -32728
+GOT-TLS-NEXT: Initial: 0x10C00
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C20
+GOT-TLS-NEXT: Access: -32720
+GOT-TLS-NEXT: Initial: 0x10CB8
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C28
+GOT-TLS-NEXT: Access: -32712
+GOT-TLS-NEXT: Initial: 0x10BF0
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C30
+GOT-TLS-NEXT: Access: -32704
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C38
+GOT-TLS-NEXT: Access: -32696
+GOT-TLS-NEXT: Initial: 0x948
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C40
+GOT-TLS-NEXT: Access: -32688
+GOT-TLS-NEXT: Initial: 0xA20
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C48
+GOT-TLS-NEXT: Access: -32680
+GOT-TLS-NEXT: Initial: 0xAF0
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C50
+GOT-TLS-NEXT: Access: -32672
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C58
+GOT-TLS-NEXT: Access: -32664
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C60
+GOT-TLS-NEXT: Access: -32656
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: ]
+GOT-TLS-NEXT: Global entries [
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C68
+GOT-TLS-NEXT: Access: -32648
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: Value: 0x0
+GOT-TLS-NEXT: Type: None (0x0)
+GOT-TLS-NEXT: Section: Undefined (0x0)
+GOT-TLS-NEXT: Name: _ITM_registerTMCloneTable@ (78)
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C70
+GOT-TLS-NEXT: Access: -32640
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: Value: 0x0
+GOT-TLS-NEXT: Type: None (0x0)
+GOT-TLS-NEXT: Section: Undefined (0x0)
+GOT-TLS-NEXT: Name: _Jv_RegisterClasses@ (119)
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C78
+GOT-TLS-NEXT: Access: -32632
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: Value: 0x0
+GOT-TLS-NEXT: Type: Function (0x2)
+GOT-TLS-NEXT: Section: Undefined (0x0)
+GOT-TLS-NEXT: Name: __gmon_start__@ (23)
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C80
+GOT-TLS-NEXT: Access: -32624
+GOT-TLS-NEXT: Initial: 0xB60
+GOT-TLS-NEXT: Value: 0xB60
+GOT-TLS-NEXT: Type: Function (0x2)
+GOT-TLS-NEXT: Section: Undefined (0x0)
+GOT-TLS-NEXT: Name: __tls_get_addr@GLIBC_2.3 (150)
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C88
+GOT-TLS-NEXT: Access: -32616
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: Value: 0x0
+GOT-TLS-NEXT: Type: None (0x0)
+GOT-TLS-NEXT: Section: Undefined (0x0)
+GOT-TLS-NEXT: Name: _ITM_deregisterTMCloneTable@ (50)
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: Entry {
+GOT-TLS-NEXT: Address: 0x10C90
+GOT-TLS-NEXT: Access: -32608
+GOT-TLS-NEXT: Initial: 0x0
+GOT-TLS-NEXT: Value: 0x0
+GOT-TLS-NEXT: Type: Function (0x2)
+GOT-TLS-NEXT: Section: Undefined (0x0)
+GOT-TLS-NEXT: Name: __cxa_finalize@GLIBC_2.2 (104)
+GOT-TLS-NEXT: }
+GOT-TLS-NEXT: ]
+GOT-TLS-NEXT: Number of TLS and multi-GOT entries: 4
+GOT-TLS-NEXT: }
+
+GOT-EMPTY: Primary GOT {
+GOT-EMPTY-NEXT: Canonical gp value: 0x409FF0
+GOT-EMPTY-NEXT: Reserved entries [
+GOT-EMPTY-NEXT: Entry {
+GOT-EMPTY-NEXT: Address: 0x402000
+GOT-EMPTY-NEXT: Access: -32752
+GOT-EMPTY-NEXT: Initial: 0x0
+GOT-EMPTY-NEXT: Purpose: Lazy resolver
+GOT-EMPTY-NEXT: }
+GOT-EMPTY-NEXT: Entry {
+GOT-EMPTY-NEXT: Address: 0x402004
+GOT-EMPTY-NEXT: Access: -32748
+GOT-EMPTY-NEXT: Initial: 0x80000000
+GOT-EMPTY-NEXT: Purpose: Module pointer (GNU extension)
+GOT-EMPTY-NEXT: }
+GOT-EMPTY-NEXT: ]
+GOT-EMPTY-NEXT: Local entries [
+GOT-EMPTY-NEXT: ]
+GOT-EMPTY-NEXT: Global entries [
+GOT-EMPTY-NEXT: ]
+GOT-EMPTY-NEXT: Number of TLS and multi-GOT entries: 2
+GOT-EMPTY-NEXT: }
diff --git a/test/tools/llvm-readobj/peplus.test b/test/tools/llvm-readobj/peplus.test
new file mode 100644
index 000000000000..8e6f55085475
--- /dev/null
+++ b/test/tools/llvm-readobj/peplus.test
@@ -0,0 +1,83 @@
+RUN: llvm-readobj -file-headers %p/Inputs/nop.exe.coff-x86-64 | FileCheck %s
+
+CHECK: Format: COFF-x86-64
+CHECK: Arch: x86_64
+CHECK: AddressSize: 64bit
+CHECK: ImageFileHeader {
+CHECK: Machine: IMAGE_FILE_MACHINE_AMD64 (0x8664)
+CHECK: SectionCount: 1
+CHECK: TimeDateStamp: 2014-01-26 03:43:56 (0x52E4847C)
+CHECK: PointerToSymbolTable: 0x0
+CHECK: SymbolCount: 0
+CHECK: OptionalHeaderSize: 240
+CHECK: Characteristics [ (0x22)
+CHECK: IMAGE_FILE_EXECUTABLE_IMAGE (0x2)
+CHECK: IMAGE_FILE_LARGE_ADDRESS_AWARE (0x20)
+CHECK: ]
+CHECK: }
+CHECK: ImageOptionalHeader {
+CHECK: MajorLinkerVersion: 11
+CHECK: MinorLinkerVersion: 0
+CHECK: SizeOfCode: 512
+CHECK: SizeOfInitializedData: 0
+CHECK: SizeOfUninitializedData: 0
+CHECK: AddressOfEntryPoint: 0x1000
+CHECK: BaseOfCode: 0x1000
+CHECK: ImageBase: 0x140000000
+CHECK: SectionAlignment: 4096
+CHECK: FileAlignment: 512
+CHECK: MajorOperatingSystemVersion: 6
+CHECK: MinorOperatingSystemVersion: 0
+CHECK: MajorImageVersion: 0
+CHECK: MinorImageVersion: 0
+CHECK: MajorSubsystemVersion: 6
+CHECK: MinorSubsystemVersion: 0
+CHECK: SizeOfImage: 8192
+CHECK: SizeOfHeaders: 512
+CHECK: Subsystem: IMAGE_SUBSYSTEM_WINDOWS_CUI (0x3)
+CHECK: Subsystem [ (0x8160)
+CHECK: IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE (0x40)
+CHECK: IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA (0x20)
+CHECK: IMAGE_DLL_CHARACTERISTICS_NX_COMPAT (0x100)
+CHECK: IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE (0x8000)
+CHECK: ]
+CHECK: SizeOfStackReserve: 1048576
+CHECK: SizeOfStackCommit: 4096
+CHECK: SizeOfHeapReserve: 1048576
+CHECK: SizeOfHeapCommit: 4096
+CHECK: NumberOfRvaAndSize: 16
+CHECK: DataDirectory {
+CHECK: ExportTableRVA: 0x0
+CHECK: ExportTableSize: 0x0
+CHECK: ImportTableRVA: 0x0
+CHECK: ImportTableSize: 0x0
+CHECK: ResourceTableRVA: 0x0
+CHECK: ResourceTableSize: 0x0
+CHECK: ExceptionTableRVA: 0x0
+CHECK: ExceptionTableSize: 0x0
+CHECK: CertificateTableRVA: 0x0
+CHECK: CertificateTableSize: 0x0
+CHECK: BaseRelocationTableRVA: 0x0
+CHECK: BaseRelocationTableSize: 0x0
+CHECK: DebugRVA: 0x0
+CHECK: DebugSize: 0x0
+CHECK: ArchitectureRVA: 0x0
+CHECK: ArchitectureSize: 0x0
+CHECK: GlobalPtrRVA: 0x0
+CHECK: GlobalPtrSize: 0x0
+CHECK: TLSTableRVA: 0x0
+CHECK: TLSTableSize: 0x0
+CHECK: LoadConfigTableRVA: 0x0
+CHECK: LoadConfigTableSize: 0x0
+CHECK: BoundImportRVA: 0x0
+CHECK: BoundImportSize: 0x0
+CHECK: IATRVA: 0x0
+CHECK: IATSize: 0x0
+CHECK: DelayImportDescriptorRVA: 0x0
+CHECK: DelayImportDescriptorSize: 0x0
+CHECK: CLRRuntimeHeaderRVA: 0x0
+CHECK: CLRRuntimeHeaderSize: 0x0
+CHECK: ReservedRVA: 0x0
+CHECK: ReservedSize: 0x0
+CHECK: }
+CHECK: }
diff --git a/test/tools/llvm-readobj/program-headers.test b/test/tools/llvm-readobj/program-headers.test
index 7c22f2b529b1..f014c0390420 100644
--- a/test/tools/llvm-readobj/program-headers.test
+++ b/test/tools/llvm-readobj/program-headers.test
@@ -4,6 +4,8 @@ RUN: llvm-readobj -program-headers %p/../../Object/Inputs/program-headers.elf-x8
RUN: | FileCheck %s -check-prefix ELF-X86-64
RUN: llvm-readobj -program-headers %p/../../Object/Inputs/program-headers.mips \
RUN: | FileCheck %s -check-prefix ELF-MIPS
+RUN: llvm-readobj -program-headers %p/../../Object/Inputs/program-headers.mips64 \
+RUN: | FileCheck %s -check-prefix ELF-MIPS64
ELF-I386: ProgramHeaders [
ELF-I386-NEXT: ProgramHeader {
@@ -75,7 +77,11 @@ ELF-X86-64-NEXT: Alignment: 8
ELF-X86-64-NEXT: }
ELF-X86-64-NEXT: ]
-ELF-MIPS: ProgramHeaders [
+ELF-MIPS: Format: ELF32-mips
+ELF-MIPS-NEXT: Arch: mips
+ELF-MIPS-NEXT: AddressSize: 32bit
+ELF-MIPS-NEXT: LoadName:
+ELF-MIPS-NEXT: ProgramHeaders [
ELF-MIPS-NEXT: ProgramHeader {
ELF-MIPS-NEXT: Type: PT_MIPS_REGINFO (0x70000000)
ELF-MIPS-NEXT: Offset: 0x74
@@ -102,3 +108,23 @@ ELF-MIPS-NEXT: ]
ELF-MIPS-NEXT: Alignment: 65536
ELF-MIPS-NEXT: }
ELF-MIPS-NEXT: ]
+
+ELF-MIPS64: Format: ELF64-mips
+ELF-MIPS64-NEXT: Arch: mips64
+ELF-MIPS64-NEXT: AddressSize: 64bit
+ELF-MIPS64-NEXT: LoadName:
+ELF-MIPS64-NEXT: ProgramHeaders [
+ELF-MIPS64-NEXT: ProgramHeader {
+ELF-MIPS64-NEXT: Type: PT_LOAD (0x1)
+ELF-MIPS64-NEXT: Offset: 0x0
+ELF-MIPS64-NEXT: VirtualAddress: 0x120000000
+ELF-MIPS64-NEXT: PhysicalAddress: 0x120000000
+ELF-MIPS64-NEXT: FileSize: 136
+ELF-MIPS64-NEXT: MemSize: 136
+ELF-MIPS64-NEXT: Flags [ (0x5)
+ELF-MIPS64-NEXT: PF_R (0x4)
+ELF-MIPS64-NEXT: PF_X (0x1)
+ELF-MIPS64-NEXT: ]
+ELF-MIPS64-NEXT: Alignment: 65536
+ELF-MIPS64-NEXT: }
+ELF-MIPS64-NEXT: ]
diff --git a/test/tools/llvm-readobj/relocations.test b/test/tools/llvm-readobj/relocations.test
index 3a87ff548e39..864ded35a4e4 100644
--- a/test/tools/llvm-readobj/relocations.test
+++ b/test/tools/llvm-readobj/relocations.test
@@ -34,8 +34,8 @@ MACHO-I386: Relocations [
MACHO-I386-NEXT: Section __text {
MACHO-I386-NEXT: 0x18 1 2 1 GENERIC_RELOC_VANILLA 0 _SomeOtherFunction
MACHO-I386-NEXT: 0x13 1 2 1 GENERIC_RELOC_VANILLA 0 _puts
-MACHO-I386-NEXT: 0xB 0 2 n/a GENERIC_RELOC_LOCAL_SECTDIFF 1 -
-MACHO-I386-NEXT: 0x0 0 2 n/a GENERIC_RELOC_PAIR 1 -
+MACHO-I386-NEXT: 0xB 0 2 n/a GENERIC_RELOC_LOCAL_SECTDIFF 1 0x22
+MACHO-I386-NEXT: 0x0 0 2 n/a GENERIC_RELOC_PAIR 1 0x8
MACHO-I386-NEXT: }
MACHO-I386-NEXT: ]
@@ -49,17 +49,17 @@ MACHO-X86-64-NEXT:]
MACHO-PPC: Relocations [
MACHO-PPC-NEXT: Section __text {
-MACHO-PPC-NEXT: 0x24 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 -
-MACHO-PPC-NEXT: 0x0 0 2 n/a PPC_RELOC_PAIR 1 -
-MACHO-PPC-NEXT: 0x1C 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 -
-MACHO-PPC-NEXT: 0x58 0 2 n/a PPC_RELOC_PAIR 1 -
-MACHO-PPC-NEXT: 0x18 1 2 0 PPC_RELOC_BR24 0 -
+MACHO-PPC-NEXT: 0x24 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 0x64
+MACHO-PPC-NEXT: 0x0 0 2 n/a PPC_RELOC_PAIR 1 0xC
+MACHO-PPC-NEXT: 0x1C 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 0x64
+MACHO-PPC-NEXT: 0x58 0 2 n/a PPC_RELOC_PAIR 1 0xC
+MACHO-PPC-NEXT: 0x18 1 2 0 PPC_RELOC_BR24 0 0x2
MACHO-PPC-NEXT: }
MACHO-PPC-NEXT: Section __picsymbolstub1 {
-MACHO-PPC-NEXT: 0x14 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 -
-MACHO-PPC-NEXT: 0x0 0 2 n/a PPC_RELOC_PAIR 1 -
-MACHO-PPC-NEXT: 0xC 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 -
-MACHO-PPC-NEXT: 0x20 0 2 n/a PPC_RELOC_PAIR 1 -
+MACHO-PPC-NEXT: 0x14 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 0x68
+MACHO-PPC-NEXT: 0x0 0 2 n/a PPC_RELOC_PAIR 1 0x48
+MACHO-PPC-NEXT: 0xC 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 0x68
+MACHO-PPC-NEXT: 0x20 0 2 n/a PPC_RELOC_PAIR 1 0x48
MACHO-PPC-NEXT: }
MACHO-PPC-NEXT: Section __la_symbol_ptr {
MACHO-PPC-NEXT: 0x0 0 2 1 PPC_RELOC_VANILLA 0 dyld_stub_binding_helper
@@ -68,17 +68,17 @@ MACHO-PPC-NEXT: ]
MACHO-PPC64: Relocations [
MACHO-PPC64-NEXT: Section __text {
-MACHO-PPC64-NEXT: 0x24 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x0 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x1C 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x58 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x18 1 2 0 0 -
+MACHO-PPC64-NEXT: 0x24 0 2 n/a 1 0x64
+MACHO-PPC64-NEXT: 0x0 0 2 n/a 1 0xC
+MACHO-PPC64-NEXT: 0x1C 0 2 n/a 1 0x64
+MACHO-PPC64-NEXT: 0x58 0 2 n/a 1 0xC
+MACHO-PPC64-NEXT: 0x18 1 2 0 0 0x2
MACHO-PPC64-NEXT: }
MACHO-PPC64-NEXT: Section __picsymbolstub1 {
-MACHO-PPC64-NEXT: 0x14 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x0 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0xC 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x24 0 2 n/a 1 -
+MACHO-PPC64-NEXT: 0x14 0 2 n/a 1 0x6C
+MACHO-PPC64-NEXT: 0x0 0 2 n/a 1 0x48
+MACHO-PPC64-NEXT: 0xC 0 2 n/a 1 0x6C
+MACHO-PPC64-NEXT: 0x24 0 2 n/a 1 0x48
MACHO-PPC64-NEXT: }
MACHO-PPC64-NEXT: Section __la_symbol_ptr {
MACHO-PPC64-NEXT: 0x0 0 3 1 0 dyld_stub_binding_helper
@@ -94,7 +94,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 2
MACHO-ARM-NEXT: Extern: N/A
MACHO-ARM-NEXT: Type: ARM_RELOC_SECTDIFF (2)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0x40
MACHO-ARM-NEXT: Scattered: 1
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -103,7 +103,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 2
MACHO-ARM-NEXT: Extern: N/A
MACHO-ARM-NEXT: Type: ARM_RELOC_PAIR (1)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0x28
MACHO-ARM-NEXT: Scattered: 1
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -130,7 +130,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 1
MACHO-ARM-NEXT: Extern: 0
MACHO-ARM-NEXT: Type: ARM_RELOC_PAIR (1)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0xFFFFFF
MACHO-ARM-NEXT: Scattered: 0
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -148,7 +148,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 0
MACHO-ARM-NEXT: Extern: 0
MACHO-ARM-NEXT: Type: ARM_RELOC_PAIR (1)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0xFFFFFF
MACHO-ARM-NEXT: Scattered: 0
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -157,7 +157,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 2
MACHO-ARM-NEXT: Extern: N/A
MACHO-ARM-NEXT: Type: ARM_RELOC_SECTDIFF (2)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0x44
MACHO-ARM-NEXT: Scattered: 1
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -166,7 +166,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 2
MACHO-ARM-NEXT: Extern: N/A
MACHO-ARM-NEXT: Type: ARM_RELOC_PAIR (1)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0x4
MACHO-ARM-NEXT: Scattered: 1
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: }
diff --git a/test/tools/llvm-readobj/sections-ext.test b/test/tools/llvm-readobj/sections-ext.test
index e3a40c3eb192..972d8e6f4ef7 100644
--- a/test/tools/llvm-readobj/sections-ext.test
+++ b/test/tools/llvm-readobj/sections-ext.test
@@ -183,13 +183,14 @@ MACHO-I386-NEXT: Reserved2: 0x0
MACHO-I386-NEXT: Relocations [
MACHO-I386-NEXT: 0x18 1 2 1 GENERIC_RELOC_VANILLA 0 _SomeOtherFunction
MACHO-I386-NEXT: 0x13 1 2 1 GENERIC_RELOC_VANILLA 0 _puts
-MACHO-I386-NEXT: 0xB 0 2 n/a GENERIC_RELOC_LOCAL_SECTDIFF 1 -
-MACHO-I386-NEXT: 0x0 0 2 n/a GENERIC_RELOC_PAIR 1 -
+MACHO-I386-NEXT: 0xB 0 2 n/a GENERIC_RELOC_LOCAL_SECTDIFF 1 0x22
+MACHO-I386-NEXT: 0x0 0 2 n/a GENERIC_RELOC_PAIR 1 0x8
MACHO-I386-NEXT: ]
MACHO-I386-NEXT: Symbols [
MACHO-I386-NEXT: Symbol {
MACHO-I386-NEXT: Name: _main (1)
-MACHO-I386-NEXT: Type: 0xF
+MACHO-I386-NEXT: Extern
+MACHO-I386-NEXT: Type: Section (0xE)
MACHO-I386-NEXT: Section: __text (0x1)
MACHO-I386-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-I386-NEXT: Flags [ (0x0)
@@ -231,7 +232,8 @@ MACHO-X86-64-NEXT: ]
MACHO-X86-64-NEXT: Symbols [
MACHO-X86-64-NEXT: Symbol {
MACHO-X86-64-NEXT: Name: _main (1)
-MACHO-X86-64-NEXT: Type: 0xF
+MACHO-X86-64-NEXT: Extern
+MACHO-X86-64-NEXT: Type: Section (0xE)
MACHO-X86-64-NEXT: Section: __text (0x1)
MACHO-X86-64-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-X86-64-NEXT: Flags [ (0x0)
@@ -297,16 +299,17 @@ MACHO-PPC-NEXT: ]
MACHO-PPC-NEXT: Reserved1: 0x0
MACHO-PPC-NEXT: Reserved2: 0x0
MACHO-PPC-NEXT: Relocations [
-MACHO-PPC-NEXT: 0x24 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 -
-MACHO-PPC-NEXT: 0x0 0 2 n/a PPC_RELOC_PAIR 1 -
-MACHO-PPC-NEXT: 0x1C 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 -
-MACHO-PPC-NEXT: 0x58 0 2 n/a PPC_RELOC_PAIR 1 -
-MACHO-PPC-NEXT: 0x18 1 2 0 PPC_RELOC_BR24 0 -
+MACHO-PPC-NEXT: 0x24 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 0x64
+MACHO-PPC-NEXT: 0x0 0 2 n/a PPC_RELOC_PAIR 1 0xC
+MACHO-PPC-NEXT: 0x1C 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 0x64
+MACHO-PPC-NEXT: 0x58 0 2 n/a PPC_RELOC_PAIR 1 0xC
+MACHO-PPC-NEXT: 0x18 1 2 0 PPC_RELOC_BR24 0 0x2
MACHO-PPC-NEXT: ]
MACHO-PPC-NEXT: Symbols [
MACHO-PPC-NEXT: Symbol {
MACHO-PPC-NEXT: Name: _f (4)
-MACHO-PPC-NEXT: Type: 0xF
+MACHO-PPC-NEXT: Extern
+MACHO-PPC-NEXT: Type: Section (0xE)
MACHO-PPC-NEXT: Section: __text (0x1)
MACHO-PPC-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-PPC-NEXT: Flags [ (0x0)
@@ -339,10 +342,10 @@ MACHO-PPC-NEXT: ]
MACHO-PPC-NEXT: Reserved1: 0x0
MACHO-PPC-NEXT: Reserved2: 0x20
MACHO-PPC-NEXT: Relocations [
-MACHO-PPC-NEXT: 0x14 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 -
-MACHO-PPC-NEXT: 0x0 0 2 n/a PPC_RELOC_PAIR 1 -
-MACHO-PPC-NEXT: 0xC 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 -
-MACHO-PPC-NEXT: 0x20 0 2 n/a PPC_RELOC_PAIR 1 -
+MACHO-PPC-NEXT: 0x14 0 2 n/a PPC_RELOC_LO16_SECTDIFF 1 0x68
+MACHO-PPC-NEXT: 0x0 0 2 n/a PPC_RELOC_PAIR 1 0x48
+MACHO-PPC-NEXT: 0xC 0 2 n/a PPC_RELOC_HA16_SECTDIFF 1 0x68
+MACHO-PPC-NEXT: 0x20 0 2 n/a PPC_RELOC_PAIR 1 0x48
MACHO-PPC-NEXT: ]
MACHO-PPC-NEXT: Symbols [
MACHO-PPC-NEXT: ]
@@ -371,7 +374,8 @@ MACHO-PPC-NEXT: ]
MACHO-PPC-NEXT: Symbols [
MACHO-PPC-NEXT: Symbol {
MACHO-PPC-NEXT: Name: _b (1)
-MACHO-PPC-NEXT: Type: 0xF
+MACHO-PPC-NEXT: Extern
+MACHO-PPC-NEXT: Type: Section (0xE)
MACHO-PPC-NEXT: Section: __data (0x3)
MACHO-PPC-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-PPC-NEXT: Flags [ (0x0)
@@ -452,16 +456,17 @@ MACHO-PPC64-NEXT: ]
MACHO-PPC64-NEXT: Reserved1: 0x0
MACHO-PPC64-NEXT: Reserved2: 0x0
MACHO-PPC64-NEXT: Relocations [
-MACHO-PPC64-NEXT: 0x24 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x0 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x1C 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x58 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x18 1 2 0 0 -
+MACHO-PPC64-NEXT: 0x24 0 2 n/a 1 0x64
+MACHO-PPC64-NEXT: 0x0 0 2 n/a 1 0xC
+MACHO-PPC64-NEXT: 0x1C 0 2 n/a 1 0x64
+MACHO-PPC64-NEXT: 0x58 0 2 n/a 1 0xC
+MACHO-PPC64-NEXT: 0x18 1 2 0 0 0x2
MACHO-PPC64-NEXT: ]
MACHO-PPC64-NEXT: Symbols [
MACHO-PPC64-NEXT: Symbol {
MACHO-PPC64-NEXT: Name: _f (4)
-MACHO-PPC64-NEXT: Type: 0xF
+MACHO-PPC64-NEXT: Extern
+MACHO-PPC64-NEXT: Type: Section (0xE)
MACHO-PPC64-NEXT: Section: __text (0x1)
MACHO-PPC64-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-PPC64-NEXT: Flags [ (0x0)
@@ -494,10 +499,10 @@ MACHO-PPC64-NEXT: ]
MACHO-PPC64-NEXT: Reserved1: 0x0
MACHO-PPC64-NEXT: Reserved2: 0x20
MACHO-PPC64-NEXT: Relocations [
-MACHO-PPC64-NEXT: 0x14 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x0 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0xC 0 2 n/a 1 -
-MACHO-PPC64-NEXT: 0x24 0 2 n/a 1 -
+MACHO-PPC64-NEXT: 0x14 0 2 n/a 1 0x6C
+MACHO-PPC64-NEXT: 0x0 0 2 n/a 1 0x48
+MACHO-PPC64-NEXT: 0xC 0 2 n/a 1 0x6C
+MACHO-PPC64-NEXT: 0x24 0 2 n/a 1 0x48
MACHO-PPC64-NEXT: ]
MACHO-PPC64-NEXT: Symbols [
MACHO-PPC64-NEXT: ]
@@ -526,7 +531,8 @@ MACHO-PPC64-NEXT: ]
MACHO-PPC64-NEXT: Symbols [
MACHO-PPC64-NEXT: Symbol {
MACHO-PPC64-NEXT: Name: _b (1)
-MACHO-PPC64-NEXT: Type: 0xF
+MACHO-PPC64-NEXT: Extern
+MACHO-PPC64-NEXT: Type: Section (0xE)
MACHO-PPC64-NEXT: Section: __data (0x3)
MACHO-PPC64-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-PPC64-NEXT: Flags [ (0x0)
@@ -612,7 +618,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 2
MACHO-ARM-NEXT: Extern: N/A
MACHO-ARM-NEXT: Type: ARM_RELOC_SECTDIFF (2)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0x40
MACHO-ARM-NEXT: Scattered: 1
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -621,7 +627,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 2
MACHO-ARM-NEXT: Extern: N/A
MACHO-ARM-NEXT: Type: ARM_RELOC_PAIR (1)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0x28
MACHO-ARM-NEXT: Scattered: 1
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -648,7 +654,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 1
MACHO-ARM-NEXT: Extern: 0
MACHO-ARM-NEXT: Type: ARM_RELOC_PAIR (1)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0xFFFFFF
MACHO-ARM-NEXT: Scattered: 0
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -666,7 +672,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 0
MACHO-ARM-NEXT: Extern: 0
MACHO-ARM-NEXT: Type: ARM_RELOC_PAIR (1)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0xFFFFFF
MACHO-ARM-NEXT: Scattered: 0
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -675,7 +681,7 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 2
MACHO-ARM-NEXT: Extern: N/A
MACHO-ARM-NEXT: Type: ARM_RELOC_SECTDIFF (2)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0x44
MACHO-ARM-NEXT: Scattered: 1
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Relocation {
@@ -684,14 +690,15 @@ MACHO-ARM-NEXT: PCRel: 0
MACHO-ARM-NEXT: Length: 2
MACHO-ARM-NEXT: Extern: N/A
MACHO-ARM-NEXT: Type: ARM_RELOC_PAIR (1)
-MACHO-ARM-NEXT: Symbol: -
+MACHO-ARM-NEXT: Symbol: 0x4
MACHO-ARM-NEXT: Scattered: 1
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: ]
MACHO-ARM-NEXT: Symbols [
MACHO-ARM-NEXT: Symbol {
MACHO-ARM-NEXT: Name: _f (4)
-MACHO-ARM-NEXT: Type: 0xF
+MACHO-ARM-NEXT: Extern
+MACHO-ARM-NEXT: Type: Section (0xE)
MACHO-ARM-NEXT: Section: __text (0x1)
MACHO-ARM-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-ARM-NEXT: Flags [ (0x0)
@@ -700,7 +707,8 @@ MACHO-ARM-NEXT: Value: 0x10
MACHO-ARM-NEXT: }
MACHO-ARM-NEXT: Symbol {
MACHO-ARM-NEXT: Name: _h (1)
-MACHO-ARM-NEXT: Type: 0xF
+MACHO-ARM-NEXT: Extern
+MACHO-ARM-NEXT: Type: Section (0xE)
MACHO-ARM-NEXT: Section: __text (0x1)
MACHO-ARM-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-ARM-NEXT: Flags [ (0x0)
@@ -825,7 +833,8 @@ MACHO-ARM-NEXT: ]
MACHO-ARM-NEXT: Symbols [
MACHO-ARM-NEXT: Symbol {
MACHO-ARM-NEXT: Name: _b (10)
-MACHO-ARM-NEXT: Type: 0xF
+MACHO-ARM-NEXT: Extern
+MACHO-ARM-NEXT: Type: Section (0xE)
MACHO-ARM-NEXT: Section: __data (0x6)
MACHO-ARM-NEXT: RefType: UndefinedNonLazy (0x0)
MACHO-ARM-NEXT: Flags [ (0x0)
diff --git a/test/tools/llvm-readobj/sections.test b/test/tools/llvm-readobj/sections.test
index 16f1131e05bd..fe734d77e34b 100644
--- a/test/tools/llvm-readobj/sections.test
+++ b/test/tools/llvm-readobj/sections.test
@@ -2,6 +2,8 @@ RUN: llvm-readobj -s %p/Inputs/trivial.obj.coff-i386 \
RUN: | FileCheck %s -check-prefix COFF
RUN: llvm-readobj -s %p/Inputs/trivial.obj.elf-i386 \
RUN: | FileCheck %s -check-prefix ELF
+RUN: llvm-readobj -s %p/Inputs/trivial.obj.elf-mipsel \
+RUN: | FileCheck %s -check-prefix ELF-MIPSEL
RUN: llvm-readobj -s %p/Inputs/trivial.obj.macho-i386 \
RUN: | FileCheck %s -check-prefix MACHO-I386
RUN: llvm-readobj -s %p/Inputs/trivial.obj.macho-x86-64 \
@@ -84,6 +86,37 @@ ELF-NEXT: AddressAlignment: 16
ELF-NEXT: EntrySize: 0
ELF-NEXT: }
+ELF-MIPSEL: Section {
+ELF-MIPSEL: Index: 4
+ELF-MIPSEL-NEXT: Name: .reginfo (27)
+ELF-MIPSEL-NEXT: Type: SHT_MIPS_REGINFO (0x70000006)
+ELF-MIPSEL-NEXT: Flags [ (0x2)
+ELF-MIPSEL-NEXT: SHF_ALLOC (0x2)
+ELF-MIPSEL-NEXT: ]
+ELF-MIPSEL-NEXT: Address: 0x0
+ELF-MIPSEL-NEXT: Offset: 0x34
+ELF-MIPSEL-NEXT: Size: 24
+ELF-MIPSEL-NEXT: Link: 0
+ELF-MIPSEL-NEXT: Info: 0
+ELF-MIPSEL-NEXT: AddressAlignment: 1
+ELF-MIPSEL-NEXT: EntrySize: 0
+ELF-MIPSEL-NEXT: }
+ELF-MIPSEL-NEXT: Section {
+ELF-MIPSEL-NEXT: Index: 5
+ELF-MIPSEL-NEXT: Name: .MIPS.abiflags (12)
+ELF-MIPSEL-NEXT: Type: SHT_MIPS_ABIFLAGS (0x7000002A)
+ELF-MIPSEL-NEXT: Flags [ (0x2)
+ELF-MIPSEL-NEXT: SHF_ALLOC (0x2)
+ELF-MIPSEL-NEXT: ]
+ELF-MIPSEL-NEXT: Address: 0x0
+ELF-MIPSEL-NEXT: Offset: 0x50
+ELF-MIPSEL-NEXT: Size: 24
+ELF-MIPSEL-NEXT: Link: 0
+ELF-MIPSEL-NEXT: Info: 0
+ELF-MIPSEL-NEXT: AddressAlignment: 8
+ELF-MIPSEL-NEXT: EntrySize: 0
+ELF-MIPSEL-NEXT: }
+
MACHO-I386: Sections [
MACHO-I386-NEXT: Section {
MACHO-I386-NEXT: Index: 0
diff --git a/test/tools/llvm-readobj/symbols.test b/test/tools/llvm-readobj/symbols.test
index e014377e586f..26830ac46a8a 100644
--- a/test/tools/llvm-readobj/symbols.test
+++ b/test/tools/llvm-readobj/symbols.test
@@ -5,6 +5,24 @@ RUN: | FileCheck %s -check-prefix ELF
COFF: Symbols [
COFF-NEXT: Symbol {
+COFF-NEXT: Name: @comp.id
+COFF-NEXT: Value: 14766605
+COFF-NEXT: Section: (65535)
+COFF-NEXT: BaseType: Null (0x0)
+COFF-NEXT: ComplexType: Null (0x0)
+COFF-NEXT: StorageClass: Static (0x3)
+COFF-NEXT: AuxSymbolCount: 0
+COFF-NEXT: }
+COFF-NEXT: Symbol {
+COFF-NEXT: Name: @feat.00
+COFF-NEXT: Value: 2147484049
+COFF-NEXT: Section: (65535)
+COFF-NEXT: BaseType: Null (0x0)
+COFF-NEXT: ComplexType: Null (0x0)
+COFF-NEXT: StorageClass: Static (0x3)
+COFF-NEXT: AuxSymbolCount: 0
+COFF-NEXT: }
+COFF-NEXT: Symbol {
COFF-NEXT: Name: .text
COFF-NEXT: Value: 0
COFF-NEXT: Section: .text (1)
@@ -31,7 +49,7 @@ ELF-NEXT: Size: 0
ELF-NEXT: Binding: Local (0x0)
ELF-NEXT: Type: None (0x0)
ELF-NEXT: Other: 0
-ELF-NEXT: Section: (0x0)
+ELF-NEXT: Section: Undefined (0x0)
ELF-NEXT: }
ELF-NEXT: Symbol {
ELF-NEXT: Name: trivial.ll (1)
@@ -40,7 +58,7 @@ ELF-NEXT: Size: 0
ELF-NEXT: Binding: Local (0x0)
ELF-NEXT: Type: File (0x4)
ELF-NEXT: Other: 0
-ELF-NEXT: Section: (0xFFF1)
+ELF-NEXT: Section: Absolute (0xFFF1)
ELF-NEXT: }
ELF-NEXT: Symbol {
ELF-NEXT: Name: .L.str (39)